pax_global_header00006660000000000000000000000064131710713660014517gustar00rootroot0000000000000052 comment=e5a156ba633c8362fe5e7cecbf9ce71bde5a9b4e citus-7.0.3/000077500000000000000000000000001317107136600126555ustar00rootroot00000000000000citus-7.0.3/.codecov.yml000066400000000000000000000013141317107136600150770ustar00rootroot00000000000000codecov: notify: require_ci_to_pass: yes coverage: precision: 2 round: down range: "70...100" ignore: - "src/backend/distributed/utils/citus_outfuncs.c" - "src/backend/distributed/utils/citus_read.c" - "src/backend/distributed/utils/citus_readfuncs_95.c" - "src/backend/distributed/utils/ruleutils_*.c" - "src/include/distributed/citus_nodes.h" status: project: default: target: 87.5 threshold: 0.5 patch: default: target: 75 changes: no parsers: gcov: branch_detection: conditional: yes loop: yes method: no macro: no comment: layout: "header, diff" behavior: default require_changes: no citus-7.0.3/.editorconfig000066400000000000000000000002071317107136600153310ustar00rootroot00000000000000# top-most EditorConfig file root = true # rules for all files [*] indent_style = tab # we use tabs indent_size = 4 # of size four citus-7.0.3/.gitattributes000066400000000000000000000024131317107136600155500ustar00rootroot00000000000000* whitespace=space-before-tab,trailing-space *.[chly] whitespace=space-before-tab,trailing-space,indent-with-non-tab,tabwidth=4 *.dsl whitespace=space-before-tab,trailing-space,tab-in-indent *.patch -whitespace *.pl whitespace=space-before-tab,trailing-space,tabwidth=4 *.po whitespace=space-before-tab,trailing-space,tab-in-indent,-blank-at-eof *.sgml whitespace=space-before-tab,trailing-space,tab-in-indent,-blank-at-eol *.x[ms]l whitespace=space-before-tab,trailing-space,tab-in-indent # Avoid confusing ASCII underlines with leftover merge conflict markers README conflict-marker-size=32 README.* conflict-marker-size=32 # Certain data files that contain special whitespace, and other special cases *.data -whitespace # Test output files that contain extra whitespace *.out -whitespace src/test/regress/output/*.source -whitespace # These files are maintained or generated elsewhere. We take them as is. configure -whitespace # all C files (implementation and header) use our style... *.[ch] citus-style # except these exceptions... src/backend/distributed/utils/citus_outfuncs.c -citus-style src/backend/distributed/utils/ruleutils_96.c -citus-style src/backend/distributed/utils/ruleutils_10.c -citus-style src/include/distributed/citus_nodes.h -citus-style citus-7.0.3/.gitignore000066400000000000000000000007301317107136600146450ustar00rootroot00000000000000# Global excludes across all subdirectories *.o *.so *.so.[0-9] *.so.[0-9].[0-9] *.sl *.sl.[0-9] *.sl.[0-9].[0-9] *.dylib *.dll *.a *.mo *.pot objfiles.txt .deps/ *.gcno *.gcda *.gcov *.gcov.out lcov.info coverage/ *.vcproj *.vcxproj win32ver.rc *.exe lib*dll.def lib*.pc # Local excludes in root directory /config.log /config.status /pgsql.sln /pgsql.sln.cache /Debug/ /Release/ /autom4te.cache /Makefile.global /src/Makefile.custom # temporary files vim creates *.swp citus-7.0.3/.travis.yml000066400000000000000000000033111317107136600147640ustar00rootroot00000000000000sudo: required dist: trusty language: c cache: apt: true directories: - /home/travis/postgresql branches: except: [ /^open-.*$/ ] env: global: # GitHub API token for citus-bot - secure: degV+qb2xHiea7E2dGk/WLvmYjq4ZsBn6ZPko+YhRcNm2GRXRaU3FqMBIecPtsEEFYaL5GwCQq/CgBf9aQxgDQ+t2CrmtGTtI9AGAbVBl//amNeJOoLe6QvrDpSQX5pUxwDLCng8cvoQK7ZxGlNCzDKiu4Ep4DUWgQVpauJkQ9nHjtSMZvUqCoI9h1lBy9Mxh7YFfHPW2PAXCqpV4VlNiIYF84UKdX3MXKLy9Yt0JBSNTWLZFp/fFw2qNwzFvN94rF3ZvFSD7Wp6CIhT6R5/6k6Zx8YQIrjWhgm6OVy1osUA8X7W79h2ISPqKqMNVJkjJ+N8S4xuQU0kfejnQ74Ie/uJiHCmbW5W2TjpL1aU3FQpPsGwR8h0rSeHhJAJzd8Ma+z8vvnnQHDyvetPBB0WgA/VMQCu8uEutyfYw2hDmB2+l2dDwkViaI7R95bReAGrpd5uNqklAXuR7yOeArz0ZZpHV0aZHGcNBxznMaZExSVZ5DVPW38UPn7Kgse8BnOWeLgnA1hJVp6CmBCtu+hKYt+atBPgRbM8IUINnKKZf/Sk6HeJIJZs662jD8/X93vFi0ZtyV2jEKJpouWw8j4vrGGsaDzTEUcyJgDqZj7tPJptM2L5B3BcFJmkGj2HO3N+LGDarJrVBBSiEjhTgx4NnLiKZnUbMx547mCRg2akk2w= # During pull request builds (not push builds), use custom-built PostgreSQL - USE_CUSTOM_PG="${TRAVIS_PULL_REQUEST_SHA}" matrix: - PGVERSION=9.6 - PGVERSION=10 before_install: - git clone -b v0.6.4 --depth 1 https://github.com/citusdata/tools.git - sudo make -C tools install - setup_apt - curl https://install.citusdata.com/community/deb.sh | sudo bash - nuke_pg install: - install_uncrustify - install_pg - install_custom_pg # download and install HLL manually, as custom builds won't satisfy deps - apt-get download "postgresql-${PGVERSION}-hll=2.10.2.citus-1" && sudo dpkg --force-confold --force-confdef --force-all -i *hll*.deb before_script: citus_indent --quiet --check script: CFLAGS=-Werror pg_travis_multi_test check after_success: - sync_to_enterprise - bash <(curl -s https://codecov.io/bash) citus-7.0.3/CHANGELOG.md000066400000000000000000000410131317107136600144650ustar00rootroot00000000000000### citus v7.0.3 (October 16, 2017) ### * Fixes several bugs that could cause crash * Fixes a bug that could cause deadlock while creating reference tables * Fixes a bug that could cause false-positives in deadlock detection * Fixes a bug that could cause 2PC recovery not to work from MX workers * Fixes a bug that could cause cache incohorency * Fixes a bug that could cause maintenance daemon to skip cache invalidations * Improves performance of transaction recovery by using correct index ### citus v7.0.2 (September 28, 2017) ### * Updates task-tracker to limit file access ### citus v7.0.1 (September 12, 2017) ### * Fixes a bug that could cause memory leaks in `INSERT ... SELECT` queries * Fixes a bug that could cause incorrect execution of prepared statements * Fixes a bug that could cause excessive memory usage during COPY * Incorporates latest changes from core PostgreSQL code ### citus v7.0.0 (August 28, 2017) ### * Adds support for PostgreSQL 10 * Drops support for PostgreSQL 9.5 * Adds support for multi-row `INSERT` * Adds support for router `UPDATE` and `DELETE` queries with subqueries * Adds infrastructure for distributed deadlock detection * Deprecates `enable_deadlock_prevention` flag * Adds support for partitioned tables * Adds support for creating `UNLOGGED` tables * Adds support for `SAVEPOINT` * Adds UDF `citus_create_restore_point` for taking distributed snapshots * Adds support for evaluating non-pushable `INSERT ... SELECT` queries * Adds support for subquery pushdown on reference tables * Adds shard pruning support for `IN` and `ANY` * Adds support for `UPDATE` and `DELETE` commands that prune down to 0 shard * Enhances transaction support by relaxing some transaction restrictions * Fixes a bug causing crash if distributed table has no shards * Fixes a bug causing crash when removing inactive node * Fixes a bug causing failure during `COPY` on tables with dropped columns * Fixes a bug causing failure during `DROP EXTENSION` * Fixes a bug preventing executing `VACUUM` and `INSERT` concurrently * Fixes a bug in prepared `INSERT` statements containing an implicit cast * Fixes several issues related to statement cancellations and connections * Fixes several 2PC related issues * Removes an unnecessary dependency causing warning messages in pg_dump * Adds internal infrastructure for follower clusters * Adds internal infrastructure for progress tracking * Implements various performance improvements * Adds internal infrastructures and tests to improve development process * Addresses various race conditions and deadlocks * Improves and standardizes error messages ### citus v6.2.3 (July 13, 2017) ### * Fixes a crash during execution of local CREATE INDEX CONCURRENTLY * Fixes a bug preventing usage of quoted column names in COPY * Fixes a bug in prepared INSERTs with implicit cast in partition column * Relaxes locks in VACUUM to ensure concurrent execution with INSERT ### citus v6.2.2 (May 31, 2017) ### * Fixes a common cause of deadlocks when repairing tables with foreign keys ### citus v6.2.1 (May 24, 2017) ### * Relaxes version-check logic to avoid breaking non-distributed commands ### citus v6.2.0 (May 16, 2017) ### * Increases SQL subquery coverage by pushing down more kinds of queries * Adds CustomScan API support to allow read-only transactions * Adds support for `CREATE/DROP INDEX CONCURRENTLY` * Adds support for `ALTER TABLE ... ADD CONSTRAINT` * Adds support for `ALTER TABLE ... RENAME COLUMN` * Adds support for `DISABLE/ENABLE TRIGGER ALL` * Adds support for expressions in the partition column in INSERTs * Adds support for query parameters in combination with function evaluation * Adds support for creating distributed tables from non-empty local tables * Adds UDFs to get size of distributed tables * Adds UDFs to add a new node without replicating reference tables * Adds checks to prevent running Citus binaries with wrong metadata tables * Improves shard pruning performance for range queries * Improves planner performance for joins involving co-located tables * Improves shard copy performance by creating indexes after copy * Improves task-tracker performance by batching several status checks * Enables router planner for queries on range partitioned table * Changes `TRUNCATE` to drop local data only if `enable_ddl_propagation` is off * Starts to execute DDL on coordinator before workers * Fixes a bug causing incorrectly reading invalidated cache * Fixes a bug related to creation of schemas of in workers with incorrect owner * Fixes a bug related to concurrent run of shard drop functions * Fixes a bug related to `EXPLAIN ANALYZE` with DML queries * Fixes a bug related to SQL functions in FROM clause * Adds a GUC variable to report cross shard queries * Fixes a bug related to partition columns without native hash function * Adds internal infrastructures and tests to improve development process * Addresses various race conditions and deadlocks * Improves and standardizes error messages ### citus v6.1.2 (May 31, 2017) ### * Fixes a common cause of deadlocks when repairing tables with foreign keys ### citus v6.1.1 (May 5, 2017) ### * Fixes a crash caused by router executor use after connection timeouts * Fixes a crash caused by relation cache invalidation during COPY * Fixes bug related to DDL use within PL/pgSQL functions * Fixes a COPY bug related to types lacking binary output functions * Fixes a bug related to modifications with parameterized partition values * Fixes improper value interpolation in worker sequence generation * Guards shard pruning logic against zero-shard tables * Fixes possible NULL pointer dereference and buffer underflow (via PVS-Studio) * Fixes a INSERT ... SELECT bug that could push down non-partition column JOINs ### citus v6.1.0 (February 9, 2017) ### * Implements _reference tables_, transactionally replicated to all nodes * Adds `upgrade_to_reference_table` UDF to upgrade pre-6.1 reference tables * Expands prepared statement support to nearly all statements * Adds support for creating `VIEW`s which reference distributed tables * Adds targeted `VACUUM`/`ANALYZE` support * Adds support for the `FILTER` clause in aggregate expressions * Adds support for function evaluation within `INSERT INTO ... SELECT` * Adds support for creating foreign key constraints with `ALTER TABLE` * Adds logic to choose router planner for all queries it supports * Enhances `create_distributed_table` with parameter for explicit colocation * Adds generally useful utility UDFs previously available as "Citus Tools" * Adds user-facing UDFs for locking shard resources and metadata * Refactors connection and transaction management; giving a consistent experience * Enhances `COPY` with fully transactional semantics * Improves support for cancellation for a number of queries and commands * Adds `column_to_column_name` UDF to help users understand `partkey` values * Adds `master_disable_node` UDF for temporarily disabling nodes * Adds proper MX ("masterless") metadata propagation logic * Adds `start_metadata_sync_to_node` UDF to propagate metadata changes to nodes * Enhances `SERIAL` compatibility with MX tables * Adds an `node_connection_timeout` parameter to control node connection timeouts * Adds `enable_deadlock_prevention` setting to permit multi-node transactions * Adds a `replication_model` setting to specify replication of new tables * Changes the `shard_replication_factor` setting's default value to one * Adds code to automatically set `max_prepared_transactions` if not configured * Accelerates lookup of colocated shard placements * Fixes a bug affecting `INSERT INTO ... SELECT` queries using constant values * Fixes a bug by ensuring `COPY` does not mark placements inactive * Fixes a bug affecting reads from `pg_dist_shard_placement` table * Fixes a crash triggered by creating a foreign key without a column * Fixes a crash related to accessing catalog tables after aborted transactions * Fixes a bug affecting JOIN queries requiring repartitions * Fixes a bug affecting node insertions to `pg_dist_node` table * Fixes a crash triggered by queries with modifying common table expressions * Fixes a bug affecting workloads with concurrent shard appends and deletions * Addresses various race conditions and deadlocks * Improves and standardizes error messages ### citus v6.0.1 (November 29, 2016) ### * Fixes a bug causing failures during pg_upgrade * Fixes a bug preventing DML queries during colocated table creation * Fixes a bug that caused NULL parameters to be incorrectly passed as text ### citus v6.0.0 (November 7, 2016) ### * Adds compatibility with PostgreSQL 9.6, now the recommended version * Removes the `pg_worker_list.conf` file in favor of a `pg_dist_node` table * Adds `master_add_node` and `master_add_node` UDFs to manage membership * Removes the `\stage` command and corresponding csql binary in favor of `COPY` * Removes `copy_to_distributed_table` in favor of first-class `COPY` support * Adds support for multiple DDL statements within a transaction * Adds support for certain foreign key constraints * Adds support for parallel `INSERT INTO ... SELECT` against colocated tables * Adds support for the `TRUNCATE` command * Adds support for `HAVING` clauses in `SELECT` queries * Adds support for `EXCLUDE` constraints which include the partition column * Adds support for system columns in queries (`tableoid`, `ctid`, etc.) * Adds support for relation name extension within `INDEX` definitions * Adds support for no-op `UPDATE`s of the partition column * Adds several general-purpose utility UDFs to aid in Citus maintenance * Adds `master_expire_table_cache` UDF to forcibly expire cached shards * Parallelizes the processing of DDL commands which affect distributed tables * Adds support for repartition jobs using composite or custom types * Enhances object name extension to handle long names and large shard counts * Parallelizes the `master_modify_multiple_shards` UDF * Changes distributed table creation to error if the target table is not empty * Changes the `pg_dist_shard.logicalrelid` column from an `oid` to `regclass` * Adds a `placementid` column to `pg_dist_shard_placement`, replacing Oid use * Removes the `pg_dist_shard.shardalias` distribution metadata column * Adds `pg_dist_partition.repmodel` to track tables using streaming replication * Adds internal infrastructure to take snapshots of distribution metadata * Addresses the need to invalidate prepared statements on metadata changes * Adds a `mark_tables_colocated` UDF for denoting pre-6.0 manual colocation * Fixes a bug affecting prepared statement execution within PL/pgSQL * Fixes a bug affecting `COPY` commands using composite types * Fixes a bug that could cause crashes during `EXPLAIN EXECUTE` * Separates worker and master job temporary folders * Eliminates race condition between distributed modification and repair * Relaxes the requirement that shard repairs also repair colocated shards * Implements internal functions to track which tables' shards are colocated * Adds `pg_dist_partition.colocationid` to track colocation group membership * Extends shard copy and move operations to respect colocation settings * Adds `pg_dist_local_group` to prepare for future MX-related changes * Adds `create_distributed_table` to easily create shards and infer colocation ### citus v5.2.2 (November 7, 2016) ### * Adds support for `IF NOT EXISTS` clause of `CREATE INDEX` command * Adds support for `RETURN QUERY` and `FOR ... IN` PL/pgSQL features * Extends the router planner to handle more queries * Changes `COUNT` of zero-row sets to return `0` rather than an empty result * Reduces the minimum permitted `task_tracker_delay` to a single millisecond * Fixes a bug that caused crashes during joins with a `WHERE false` clause * Fixes a bug triggered by unique violation errors raised in long transactions * Fixes a bug resulting in multiple registration of transaction callbacks * Fixes a bug which could result in stale reads of distribution metadata * Fixes a bug preventing distributed modifications in some PL/pgSQL functions * Fixes some code paths that could hypothetically read uninitialized memory * Lowers log level of _waiting for activity_ messages ### citus v5.2.1 (September 6, 2016) ### * Fixes subquery pushdown to properly extract outer join qualifiers * Addresses possible memory leak during multi-shard transactions ### citus v5.2.0 (August 15, 2016) ### * Drops support for PostgreSQL 9.4; PostgreSQL 9.5 is required * Adds schema support for tables, other named objects (types, operators, etc.) * Evaluates non-immutable functions on master in all modification commands * Adds support for SERIAL types in non-partition columns * Adds support for RETURNING clause in INSERT, UPDATE, and DELETE commands * Adds support for multi-statement transactions involving a fixed set of nodes * Full SQL support for SELECT queries which can be executed on a single worker * Adds option to perform DDL changes using prepared transactions (2PC) * Adds an `enable_ddl_propagation` parameter to control DDL propagation * Accelerates shard pruning during merges * Adds `master_modify_multiple_shards` UDF to modify many shards at once * Adds COPY support for arrays of user-defined types * Now supports parameterized prepared statements for certain use cases * Extends LIMIT/OFFSET support to all executor types * Constraint violations now fail fast rather than hitting all placements * Makes `master_create_empty_shard` aware of shard placement policy * Reduces unnecessary sleep during queries processed by real-time executor * Improves task tracker executor's task cleanup logic * Relaxes restrictions on cancellation of DDL commands * Removes ONLY keyword from worker SELECT queries * Error message improvements and standardization * Moves `master_update_shard_statistics` function to `pg_catalog` schema * Fixes a bug where hash-partitioned anti-joins could return incorrect results * Now sets storage type correctly for foreign table-backed shards * Fixes `master_update_shard_statistics` issue with hash-partitioned tables * Fixes an issue related to extending table names that require escaping * Reduces risk of row counter overflows during modifications * Fixes a crash related to FILTER clause use in COUNT DISTINCT subqueries * Fixes crashes related to partition columns with high attribute numbers * Fixes certain subquery and join crashes * Detects flex for build even if PostgreSQL was built without it * Fixes assert-enabled crash when `all_modifications_commutative` is true ### citus v5.2.0-rc.1 (August 1, 2016) ### * Initial 5.2.0 candidate ### citus v5.1.1 (June 17, 2016) ### * Adds complex count distinct expression support in repartitioned subqueries * Improves task tracker job cleanup logic, addressing a memory leak * Fixes bug that generated incorrect results for LEFT JOIN queries * Improves compatibility with Debian's reproducible builds project * Fixes build issues on FreeBSD platforms ### citus v5.1.0 (May 17, 2016) ### * Adds distributed COPY to rapidly populate distributed tables * Adds support for using EXPLAIN on distributed queries * Recognizes and fast-paths single-shard SELECT statements automatically * Increases INSERT throughput via shard pruning optimizations * Improves planner performance for joins involving tables with many shards * Adds ability to pass columns as arguments to function calls in UPDATEs * Introduces transaction manager for use by multi-shard commands * Adds COUNT(DISTINCT ...) pushdown optimization for hash-partitioned tables * Adds support for certain UNIQUE indexes on hash- or range-partitioned tables * Deprecates \stage in favor of using COPY for append-partition tables * Deprecates `copy_to_distributed_table` in favor of first-class COPY support * Fixes build problems when using non-packaged PostgreSQL installs * Fixes bug that sometimes skipped pruning when partitioned by a VARCHAR column * Fixes bug impeding use of user-defined functions in repartitioned subqueries * Fixes bug involving queries with equality comparisons of boolean types * Fixes crash that prevented use alongside `pg_stat_statements` * Fixes crash arising from SELECT queries that lack a target list * Improves warning and error messages ### citus v5.1.0-rc.2 (May 10, 2016) ### * Fixes test failures * Fixes EXPLAIN output when FORMAT JSON in use ### citus v5.1.0-rc.1 (May 4, 2016) ### * Initial 5.1.0 candidate ### citus v5.0.1 (April 15, 2016) ### * Fixes issues on 32-bit systems ### citus v5.0.0 (March 24, 2016) ### * Public release under AGPLv3 * PostgreSQL extension compatible with PostgreSQL 9.5 and 9.4 citus-7.0.3/CONTRIBUTING.md000066400000000000000000000066251317107136600151170ustar00rootroot00000000000000# Contributing to Citus We're happy you want to contribute! You can help us in different ways: * Open an [issue](https://github.com/citusdata/citus/issues) with suggestions for improvements * Fork this repository and submit a pull request Before accepting any code contributions we ask that Citus contributors sign a Contributor License Agreement (CLA). For an explanation of why we ask this as well as instructions for how to proceed, see the [Citus CLA](https://cla.citusdata.com). ### Getting and building #### Mac 1. Install Xcode 2. Install packages with Homebrew ```bash brew update brew install git postgresql ``` 3. Get, build, and test the code ```bash git clone https://github.com/citusdata/citus.git cd citus ./configure make make install cd src/test/regress make check ``` #### Debian-based Linux (Ubuntu, Debian) 1. Install build dependencies ```bash echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" | \ sudo tee /etc/apt/sources.list.d/pgdg.list wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | \ sudo apt-key add - sudo apt-get update sudo apt-get install -y postgresql-server-dev-9.6 postgresql-9.6 \ libedit-dev libselinux1-dev libxslt-dev \ libpam0g-dev git flex make libssl-dev \ libkrb5-dev ``` 2. Get, build, and test the code ```bash git clone https://github.com/citusdata/citus.git cd citus ./configure make sudo make install cd src/test/regress make check ``` #### Red Hat-based Linux (RHEL, CentOS, Fedora) 1. Find the PostgreSQL 9.6 RPM URL for your repo at [yum.postgresql.org](http://yum.postgresql.org/repopackages.php#pg96) 2. Register its contents with Yum: ```bash sudo yum install -y ``` 3. Install build dependencies ```bash sudo yum update -y sudo yum groupinstall -y 'Development Tools' sudo yum install -y postgresql96-devel postgresql96-server \ libxml2-devel libxslt-devel openssl-devel \ pam-devel readline-devel git git clone https://github.com/citusdata/citus.git cd citus PG_CONFIG=/usr/pgsql-9.6/bin/pg_config ./configure make sudo make install cd src/test/regress make check ``` ### Following our coding conventions Travis will automatically reject any PRs which do not follow our coding conventions, it won't even run tests! The easiest way to ensure your PR adheres to those conventions is to use the [citus_indent](https://github.com/citusdata/tools/tree/develop/uncrustify) tool. ```bash # Ubuntu does have uncrustify in the package manager however it's an older # version which doesn't work with our citus-style.cfg file. We require version # 0.60 or greater. If your package manager has a more recent version of uncrustify # feel free to use that instead of installing from source git clone --branch uncrustify-0.60 https://github.com/uncrustify/uncrustify.git pushd uncrustify ./configure sudo make install popd git clone https://github.com/citusdata/tools.git pushd tools/uncrustify make install popd ``` Once you've done that, you can run the `citus_indent` command to recursively check and correct the style of any source files in the current directory. You can also run `make reindent` from within the Citus repo to correct the style of all source files in the repository. citus-7.0.3/LICENSE000066400000000000000000001033271317107136600136700ustar00rootroot00000000000000 GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License. Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see .citus-7.0.3/Makefile000066400000000000000000000025001317107136600143120ustar00rootroot00000000000000# Citus toplevel Makefile citus_subdir = . citus_top_builddir = . # Hint that configure should be run first ifeq (,$(wildcard Makefile.global)) $(error ./configure needs to be run before compiling Citus) endif include Makefile.global all: extension # build extension extension: $(citus_top_builddir)/src/include/citus_version.h $(MAKE) -C src/backend/distributed/ all install-extension: extension $(MAKE) -C src/backend/distributed/ install install-headers: extension $(MKDIR_P) '$(DESTDIR)$(includedir_server)/distributed/' # generated headers are located in the build directory $(INSTALL_DATA) $(citus_top_builddir)/src/include/citus_version.h '$(DESTDIR)$(includedir_server)/' # the rest in the source tree $(INSTALL_DATA) $(citus_abs_srcdir)/src/include/distributed/*.h '$(DESTDIR)$(includedir_server)/distributed/' clean-extension: $(MAKE) -C src/backend/distributed/ clean .PHONY: extension install-extension clean-extension # Add to generic targets install: install-extension install-headers clean: clean-extension # apply or check style reindent: cd ${citus_abs_top_srcdir} && citus_indent --quiet check-style: cd ${citus_abs_top_srcdir} && citus_indent --quiet --check .PHONY: reindent check-style # depend on install for now check: all install $(MAKE) -C src/test/regress check-full .PHONY: all check install clean citus-7.0.3/Makefile.global.in000066400000000000000000000056141317107136600161670ustar00rootroot00000000000000# -*-makefile-*- # @configure_input@ # Makefile.global.in - Makefile to be included by all submakes # # This file is converted by configure into an actual Makefile, # replacing the @varname@ placeholders by actual values. # # This files is intended to contain infrastructure needed by several # makefiles, particulary central handling of compilation flags and # rules. citus_abs_srcdir:=@abs_top_srcdir@/${citus_subdir} citus_abs_top_srcdir:=@abs_top_srcdir@ postgres_abs_srcdir:=@POSTGRES_SRCDIR@ postgres_abs_builddir:=@POSTGRES_BUILDDIR@ PG_CONFIG:=@PG_CONFIG@ PGXS:=$(shell $(PG_CONFIG) --pgxs) # Support for VPATH builds (i.e. builds from outside the source tree) vpath_build=@vpath_build@ ifeq ($(vpath_build),yes) override VPATH:=$(citus_abs_srcdir) USE_VPATH:=$(VPATH) citus_top_srcdir:=$(citus_abs_top_srcdir) override srcdir=$(VPATH) else citus_top_srcdir:=$(citus_top_builddir) endif # Citus is built using PostgreSQL's pgxs USE_PGXS=1 include $(PGXS) # Remake Makefile.global from Makefile.global.in if the latter # changed. In order to trigger this rule, the including file must # write `include $(citus_top_builddir)/Makefile.global', not some # shortcut thereof. This makes it less likely to accidentally run # with some outdated Makefile.global. # Make internally restarts whenever included Makefiles are # regenerated. $(citus_top_builddir)/Makefile.global: $(citus_abs_top_srcdir)/configure $(citus_top_builddir)/Makefile.global.in $(citus_top_builddir)/config.status cd @abs_top_builddir@ && ./config.status Makefile.global # Ensure configuration is generated by the most recent configure, # useful for longer existing build directories. $(citus_top_builddir)/config.status: $(citus_abs_top_srcdir)/configure $(citus_abs_top_srcdir)/src/backend/distributed/citus.control cd @abs_top_builddir@ && ./config.status --recheck && ./config.status # Regenerate configure if configure.in changed $(citus_abs_top_srcdir)/configure: $(citus_abs_top_srcdir)/configure.in cd ${citus_abs_top_srcdir} && ./autogen.sh # If specified via configure, replace the default compiler. Normally # we'll build with the one postgres was built with. But it's useful to # be able to use a different one, especially when building against # distribution packages. ifneq (@CC@,) override CC=@CC@ endif # If detected by our configure script, override the FLEX postgres # detected. That allows to compile citus against a postgres which was # built without flex available (possible because generated files are # included) ifneq (@FLEX@,) override FLEX=@FLEX@ endif # Add options passed to configure or computed therein, to CFLAGS/CPPFLAGS/... override CFLAGS += @CFLAGS@ @CITUS_CFLAGS@ override CPPFLAGS := @CPPFLAGS@ -I '${citus_abs_top_srcdir}/src/include' -I'${citus_top_builddir}/src/include' $(CPPFLAGS) override LDFLAGS += @LDFLAGS@ # optional file with user defined, additional, rules -include ${citus_abs_srcdir}/src/Makefile.custom citus-7.0.3/README.md000066400000000000000000000145201317107136600141360ustar00rootroot00000000000000![Citus Banner](/github-banner.png) [![Build Status](https://travis-ci.org/citusdata/citus.svg?branch=master)](https://travis-ci.org/citusdata/citus) [![Slack Status](http://slack.citusdata.com/badge.svg)](https://slack.citusdata.com) [![Latest Docs](https://img.shields.io/badge/docs-latest-brightgreen.svg)](https://docs.citusdata.com/) ### What is Citus? * **Open-source** PostgreSQL extension (not a fork) * **Scalable** across multiple machines through sharding and replication * **Distributed** engine for query parallelization * **Database** designed to scale multi-tenant applications Citus is a distributed database that scales across commodity servers using transparent sharding and replication. Citus extends the underlying database rather than forking it, giving developers and enterprises the power and familiarity of a relational database. As an extension, Citus supports new PostgreSQL releases, and allows you to benefit from new features while maintaining compatibility with existing PostgreSQL tools. Citus serves many use cases. Two common ones are: 1. [Multi-tenant database](https://www.citusdata.com/blog/2016/10/03/designing-your-saas-database-for-high-scalability): Most B2B applications already have the notion of a tenant / customer / account built into their data model. Citus allows you to scale out your transactional relational database to 100K+ tenants with minimal changes to your application. 2. [Real-time analytics](https://www.citusdata.com/blog/2017/01/27/getting-started-with-github-events-data): Citus enables ingesting large volumes of data and running analytical queries on that data in human real-time. Example applications include analytic dashboards with subsecond response times and exploratory queries on unfolding events. To learn more, visit [citusdata.com](https://www.citusdata.com) and join the [mailing list](https://groups.google.com/forum/#!forum/citus-users) to stay on top of the latest developments. ### Getting started with Citus The fastest way to get up and running is to create a Citus Cloud account. You can also setup a local Citus cluster with Docker. #### Citus Cloud Citus Cloud runs on top of AWS as a fully managed database as a service and has development plans available for getting started. You can provision a Citus Cloud account at [https://console.citusdata.com](https://console.citusdata.com/users/sign_up) and get started with just a few clicks. #### Local Citus Cluster If you're looking to get started locally, you can follow the following steps to get up and running. 1. Install Docker Community Edition and Docker Compose * Mac: 1. [Download](https://www.docker.com/community-edition#/download) and install Docker. 2. Start Docker by clicking on the application’s icon. * Linux: ```bash curl -sSL https://get.docker.com/ | sh sudo usermod -aG docker $USER && exec sg docker newgrp `id -gn` sudo systemctl start docker sudo curl -sSL https://github.com/docker/compose/releases/download/1.11.2/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose sudo chmod +x /usr/local/bin/docker-compose ``` The above version of Docker Compose is sufficient for running Citus, or you can install the [latest version](https://github.com/docker/compose/releases/latest). 2. Pull and start the Docker images ```bash curl -sSLO https://raw.githubusercontent.com/citusdata/docker/master/docker-compose.yml docker-compose -p citus up -d ``` 3. Connect to the master database ```bash docker exec -it citus_master psql -U postgres ``` 4. Follow the [first tutorial][tutorial] instructions 5. To shut the cluster down, run ```bash docker-compose -p citus down ``` ### Talk to Contributors and Learn More
Documentation Try the Citus tutorial for a hands-on introduction or
the documentation for a more comprehensive reference.
Google Groups The Citus Google Group is our place for detailed questions and discussions.
Slack Chat with us in our community Slack channel.
Github Issues We track specific bug reports and feature requests on our project issues.
Twitter Follow @citusdata for general updates and PostgreSQL scaling tips.
### Contributing Citus is built on and of open source, and we welcome your contributions. The [CONTRIBUTING.md](CONTRIBUTING.md) file explains how to get started developing the Citus extension itself and our code quality guidelines. ### Who is Using Citus? Citus is deployed in production by many customers, ranging from technology start-ups to large enterprises. Here are some examples: * [CloudFlare](https://www.cloudflare.com/) uses Citus to provide real-time analytics on 100 TBs of data from over 4 million customer websites. [Case Study](https://blog.cloudflare.com/scaling-out-postgresql-for-cloudflare-analytics-using-citusdb/) * [MixRank](https://mixrank.com/) uses Citus to efficiently collect and analyze vast amounts of data to allow inside B2B sales teams to find new customers. [Case Study](https://www.citusdata.com/solutions/case-studies/mixrank-case-study) * [Neustar](https://www.neustar.biz/) builds and maintains scalable ad-tech infrastructure that counts billions of events per day using Citus and HyperLogLog. * [Agari](https://www.agari.com/) uses Citus to secure more than 85 percent of U.S. consumer emails on two 6-8 TB clusters. [Case Study](https://www.citusdata.com/solutions/case-studies/agari-case-study) * [Heap](https://heapanalytics.com/) uses Citus to run dynamic funnel, segmentation, and cohort queries across billions of users and tens of billions of events. [Watch Video](https://www.youtube.com/watch?v=NVl9_6J1G60&list=PLixnExCn6lRpP10ZlpJwx6AuU3XIgNWpL) ___ Copyright © 2012–2017 Citus Data, Inc. [faq]: https://www.citusdata.com/frequently-asked-questions [tutorial]: https://docs.citusdata.com/en/stable/tutorials/multi-tenant-tutorial.html citus-7.0.3/autogen.sh000077500000000000000000000003251317107136600146560ustar00rootroot00000000000000#!/bin/bash # # autogen.sh converts configure.in to configure and creates # citus_config.h.in. The resuting resulting files are checked into # the SCM, to avoid everyone needing autoconf installed. autoreconf -f citus-7.0.3/configure000077500000000000000000003726731317107136600146060ustar00rootroot00000000000000#! /bin/sh # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.69 for Citus 7.0.3. # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # # # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. # # Copyright (c) 2012-2017, Citus Data, Inc. ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # Use a proper internal environment variable to ensure we don't fall # into an infinite loop, continuously re-executing ourselves. if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then _as_can_reexec=no; export _as_can_reexec; # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 as_fn_exit 255 fi # We don't want this to propagate to other subprocesses. { _as_can_reexec=; unset _as_can_reexec;} if test "x$CONFIG_SHELL" = x; then as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi " as_required="as_fn_return () { (exit \$1); } as_fn_success () { as_fn_return 0; } as_fn_failure () { as_fn_return 1; } as_fn_ret_success () { return 0; } as_fn_ret_failure () { return 1; } exitcode=0 as_fn_success || { exitcode=1; echo as_fn_success failed.; } as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : else exitcode=1; echo positional parameters were not saved. fi test x\$exitcode = x0 || exit 1 test -x / || exit 1" as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 test \$(( 1 + 1 )) = 2 || exit 1" if (eval "$as_required") 2>/dev/null; then : as_have_required=yes else as_have_required=no fi if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_found=false for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. as_found=: case $as_dir in #( /*) for as_base in sh bash ksh sh5; do # Try only shells that exist, to save several forks. as_shell=$as_dir/$as_base if { test -f "$as_shell" || test -f "$as_shell.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : CONFIG_SHELL=$as_shell as_have_required=yes if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : break 2 fi fi done;; esac as_found=false done $as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : CONFIG_SHELL=$SHELL as_have_required=yes fi; } IFS=$as_save_IFS if test "x$CONFIG_SHELL" != x; then : export CONFIG_SHELL # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 exit 255 fi if test x$as_have_required = xno; then : $as_echo "$0: This script requires a shell more modern than all" $as_echo "$0: the shells that I found on your system." if test x${ZSH_VERSION+set} = xset ; then $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" $as_echo "$0: be upgraded to zsh 4.3.4 or later." else $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, $0: including any error possibly output before this $0: message. Then install a modern shell, or manually run $0: the script under such a shell if you do have one." fi exit 1 fi fi fi SHELL=${CONFIG_SHELL-/bin/sh} export SHELL # Unset more variables known to interfere with behavior of common tools. CLICOLOR_FORCE= GREP_OPTIONS= unset CLICOLOR_FORCE GREP_OPTIONS ## --------------------- ## ## M4sh Shell Functions. ## ## --------------------- ## # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_lineno_1=$LINENO as_lineno_1a=$LINENO as_lineno_2=$LINENO as_lineno_2a=$LINENO eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } # If we had to re-execute with $CONFIG_SHELL, we're ensured to have # already done that, so ensure we don't try to do so again and fall # in an infinite loop. This has already happened in practice. _as_can_reexec=no; export _as_can_reexec # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" test -n "$DJDIR" || exec 7<&0 &1 # Name of the host. # hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIBOBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= # Identity of this package. PACKAGE_NAME='Citus' PACKAGE_TARNAME='citus' PACKAGE_VERSION='7.0.3' PACKAGE_STRING='Citus 7.0.3' PACKAGE_BUGREPORT='' PACKAGE_URL='' ac_subst_vars='LTLIBOBJS LIBOBJS POSTGRES_BUILDDIR POSTGRES_SRCDIR CITUS_CFLAGS OBJEXT EXEEXT ac_ct_CC CPPFLAGS LDFLAGS CFLAGS CC vpath_build PATH PG_CONFIG FLEX AWK SED target_alias host_alias build_alias LIBS ECHO_T ECHO_N ECHO_C DEFS mandir localedir libdir psdir pdfdir dvidir htmldir infodir docdir oldincludedir includedir localstatedir sharedstatedir sysconfdir datadir datarootdir libexecdir sbindir bindir program_transform_name prefix exec_prefix PACKAGE_URL PACKAGE_BUGREPORT PACKAGE_STRING PACKAGE_VERSION PACKAGE_TARNAME PACKAGE_NAME PATH_SEPARATOR SHELL' ac_subst_files='' ac_user_opts=' enable_option_checking enable_coverage ' ac_precious_vars='build_alias host_alias target_alias PG_CONFIG PATH CC CFLAGS LDFLAGS LIBS CPPFLAGS' # Initialize some variables set by options. ac_init_help= ac_init_version=false ac_unrecognized_opts= ac_unrecognized_sep= # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. # (The list follows the same order as the GNU Coding Standards.) bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datarootdir='${prefix}/share' datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' pdfdir='${docdir}' psdir='${docdir}' libdir='${exec_prefix}/lib' localedir='${datarootdir}/locale' mandir='${datarootdir}/man' ac_prev= ac_dashdash= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval $ac_prev=\$ac_option ac_prev= continue fi case $ac_option in *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; *=) ac_optarg= ;; *) ac_optarg=yes ;; esac # Accept the important Cygnus configure options, so we can diagnose typos. case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=*) datadir=$ac_optarg ;; -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ | --dataroo | --dataro | --datar) ac_prev=datarootdir ;; -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) docdir=$ac_optarg ;; -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ac_prev=dvidir ;; -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ac_prev=htmldir ;; -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ | --ht=*) htmldir=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localedir | --localedir | --localedi | --localed | --locale) ac_prev=localedir ;; -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) localedir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst | --locals) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ac_prev=pdfdir ;; -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) pdfdir=$ac_optarg ;; -psdir | --psdir | --psdi | --psd | --ps) ac_prev=psdir ;; -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) psdir=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=\$ac_optarg ;; -without-* | --without-*) ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=no ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) as_fn_error $? "unrecognized option: \`$ac_option' Try \`$0 --help' for more information" ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. case $ac_envvar in #( '' | [0-9]* | *[!_$as_cr_alnum]* ) as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; esac eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` as_fn_error $? "missing argument to $ac_option" fi if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi # Check all directory arguments for consistency. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir do eval ac_val=\$$ac_var # Remove trailing slashes. case $ac_val in */ ) ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` eval $ac_var=\$ac_val;; esac # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || as_fn_error $? "working directory cannot be determined" test "X$ac_ls_di" = "X$ac_pwd_ls_di" || as_fn_error $? "pwd does not report name of working directory" # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$as_myself" || $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_myself" : 'X\(//\)[^/]' \| \ X"$as_myself" : 'X\(//\)$' \| \ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_myself" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` srcdir=$ac_confdir if test ! -r "$srcdir/$ac_unique_file"; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info # mess up M-x gdb in Emacs. case $srcdir in */) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; esac for ac_var in $ac_precious_vars; do eval ac_env_${ac_var}_set=\${${ac_var}+set} eval ac_env_${ac_var}_value=\$${ac_var} eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} eval ac_cv_env_${ac_var}_value=\$${ac_var} done # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures Citus 7.0.3 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking ...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] --datadir=DIR read-only architecture-independent data [DATAROOTDIR] --infodir=DIR info documentation [DATAROOTDIR/info] --localedir=DIR locale-dependent data [DATAROOTDIR/locale] --mandir=DIR man documentation [DATAROOTDIR/man] --docdir=DIR documentation root [DATAROOTDIR/doc/citus] --htmldir=DIR html documentation [DOCDIR] --dvidir=DIR dvi documentation [DOCDIR] --pdfdir=DIR pdf documentation [DOCDIR] --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF _ACEOF fi if test -n "$ac_init_help"; then case $ac_init_help in short | recursive ) echo "Configuration of Citus 7.0.3:";; esac cat <<\_ACEOF Optional Features: --disable-option-checking ignore unrecognized --enable/--with options --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] --enable-coverage build with coverage testing instrumentation Some influential environment variables: PG_CONFIG Location to find pg_config for target PostgreSQL instalation (default PATH) PATH PATH for target PostgreSQL install pg_config CC C compiler command CFLAGS C compiler flags LDFLAGS linker flags, e.g. -L if you have libraries in a nonstandard directory LIBS libraries to pass to the linker, e.g. -l CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if you have headers in a nonstandard directory Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. Report bugs to the package provider. _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d "$ac_dir" || { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } # Check for guested configure. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive elif test -f "$ac_srcdir/configure"; then echo && $SHELL "$ac_srcdir/configure" --help=recursive else $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF Citus configure 7.0.3 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. Copyright (c) 2012-2017, Citus Data, Inc. _ACEOF exit fi ## ------------------------ ## ## Autoconf initialization. ## ## ------------------------ ## # ac_fn_c_try_compile LINENO # -------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_compile cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by Citus $as_me 7.0.3, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ _ACEOF exec 5>>config.log { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` /usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. $as_echo "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; 2) as_fn_append ac_configure_args1 " '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi as_fn_append ac_configure_args " '$ac_arg'" ;; esac done done { ac_configure_args0=; unset ac_configure_args0;} { ac_configure_args1=; unset ac_configure_args1;} # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? # Save into config.log some information that might help in debugging. { echo $as_echo "## ---------------- ## ## Cache variables. ## ## ---------------- ##" echo # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ "s/'\''/'\''\\\\'\'''\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ;; #( *) sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) echo $as_echo "## ----------------- ## ## Output variables. ## ## ----------------- ##" echo for ac_var in $ac_subst_vars do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then $as_echo "## ------------------- ## ## File substitutions. ## ## ------------------- ##" echo for ac_var in $ac_subst_files do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then $as_echo "## ----------- ## ## confdefs.h. ## ## ----------- ##" echo cat confdefs.h echo fi test "$ac_signal" != 0 && $as_echo "$as_me: caught signal $ac_signal" $as_echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h $as_echo "/* confdefs.h */" > confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_URL "$PACKAGE_URL" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer an explicitly selected file to automatically selected ones. ac_site_file1=NONE ac_site_file2=NONE if test -n "$CONFIG_SITE"; then # We do not want a PATH search for config.site. case $CONFIG_SITE in #(( -*) ac_site_file1=./$CONFIG_SITE;; */*) ac_site_file1=$CONFIG_SITE;; *) ac_site_file1=./$CONFIG_SITE;; esac elif test "x$prefix" != xNONE; then ac_site_file1=$prefix/share/config.site ac_site_file2=$prefix/etc/config.site else ac_site_file1=$ac_default_prefix/share/config.site ac_site_file2=$ac_default_prefix/etc/config.site fi for ac_site_file in "$ac_site_file1" "$ac_site_file2" do test "x$ac_site_file" = xNONE && continue if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 $as_echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" \ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "failed to load site script $ac_site_file See \`config.log' for more details" "$LINENO" 5; } fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special files # actually), so we avoid doing that. DJGPP emulates it as a regular file. if test /dev/null != "$cache_file" && test -f "$cache_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 $as_echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 $as_echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then # differences in whitespace do not lead to failure. ac_old_val_w=`echo x $ac_old_val` ac_new_val_w=`echo x $ac_new_val` if test "$ac_old_val_w" != "$ac_new_val_w"; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 $as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ac_cache_corrupted=: else { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 $as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} eval $ac_var=\$ac_old_val fi { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 $as_echo "$as_me: former value: \`$ac_old_val'" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 $as_echo "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) as_fn_append ac_configure_args " '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 $as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 fi ## -------------------- ## ## Main body of script. ## ## -------------------- ## ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu # we'll need sed and awk for some of the version commands { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 $as_echo_n "checking for a sed that does not truncate output... " >&6; } if ${ac_cv_path_SED+:} false; then : $as_echo_n "(cached) " >&6 else ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ for ac_i in 1 2 3 4 5 6 7; do ac_script="$ac_script$as_nl$ac_script" done echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed { ac_script=; unset ac_script;} if test -z "$SED"; then ac_path_SED_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in sed gsed; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_SED" || continue # Check for GNU ac_path_SED and select it if it is found. # Check for GNU $ac_path_SED case `"$ac_path_SED" --version 2>&1` in *GNU*) ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo '' >> "conftest.nl" "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_SED_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_SED="$ac_path_SED" ac_path_SED_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_SED_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_SED"; then as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5 fi else ac_cv_path_SED=$SED fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 $as_echo "$ac_cv_path_SED" >&6; } SED="$ac_cv_path_SED" rm -f conftest.sed for ac_prog in gawk mawk nawk awk do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_AWK+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AWK"; then ac_cv_prog_AWK="$AWK" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AWK="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AWK=$ac_cv_prog_AWK if test -n "$AWK"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 $as_echo "$AWK" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$AWK" && break done # CITUS_VERSION definition cat >>confdefs.h <<_ACEOF #define CITUS_VERSION "$PACKAGE_VERSION" _ACEOF # CITUS_MAJORVERSION definition CITUS_MAJORVERSION=`expr "$PACKAGE_VERSION" : '\([0-9][0-9]*\.[0-9][0-9]*\)'` cat >>confdefs.h <<_ACEOF #define CITUS_MAJORVERSION "$CITUS_MAJORVERSION" _ACEOF # CITUS_VERSION_NUM definition # awk -F is a regex on some platforms, and not on others, so make "." a tab CITUS_VERSION_NUM="`echo "$PACKAGE_VERSION" | sed 's/[A-Za-z].*$//' | tr '.' ' ' | $AWK '{printf "%d%02d%02d", $1, $2, (NF >= 3) ? $3 : 0}'`" cat >>confdefs.h <<_ACEOF #define CITUS_VERSION_NUM $CITUS_VERSION_NUM _ACEOF # CITUS_EXTENSIONVERSION definition CITUS_EXTENSIONVERSION="`grep '^default_version' $srcdir/src/backend/distributed/citus.control | cut -d\' -f2`" cat >>confdefs.h <<_ACEOF #define CITUS_EXTENSIONVERSION "$CITUS_EXTENSIONVERSION" _ACEOF # Re-check for flex. That allows to compile citus against a postgres # which was built without flex available (possible because generated # files are included) # Extract the first word of "flex", so it can be a program name with args. set dummy flex; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_FLEX+:} false; then : $as_echo_n "(cached) " >&6 else case $FLEX in [\\/]* | ?:[\\/]*) ac_cv_path_FLEX="$FLEX" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_FLEX="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi FLEX=$ac_cv_path_FLEX if test -n "$FLEX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $FLEX" >&5 $as_echo "$FLEX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi # Locate pg_config binary if test -z "$PG_CONFIG"; then # Extract the first word of "pg_config", so it can be a program name with args. set dummy pg_config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_PG_CONFIG+:} false; then : $as_echo_n "(cached) " >&6 else case $PG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_PG_CONFIG="$PG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_PG_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PG_CONFIG=$ac_cv_path_PG_CONFIG if test -n "$PG_CONFIG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PG_CONFIG" >&5 $as_echo "$PG_CONFIG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$PG_CONFIG"; then as_fn_error $? "Could not find pg_config. Set PG_CONFIG or PATH." "$LINENO" 5 fi # check we're building against a supported version of PostgreSQL citusac_pg_config_version=$($PG_CONFIG --version 2>/dev/null) version_num=$(echo "$citusac_pg_config_version"| $SED -e 's/^PostgreSQL \([0-9]*\)\(\.[0-9]*\)\{0,1\}\(.*\)$/\1\2/') # if PostgreSQL version starts with two digits, the major version is those digits version_num=$(echo "$version_num"| $SED -e 's/^\([0-9]\{2\}\)\(.*\)$/\1/') if test -z "$version_num"; then as_fn_error $? "Could not detect PostgreSQL version from pg_config." "$LINENO" 5 fi if test "$version_num" != '9.6' -a "$version_num" != '10'; then as_fn_error $? "Citus is not compatible with the detected PostgreSQL version ${version_num}." "$LINENO" 5 else { $as_echo "$as_me:${as_lineno-$LINENO}: building against PostgreSQL $version_num" >&5 $as_echo "$as_me: building against PostgreSQL $version_num" >&6;} fi; # Check whether we're building inside the source tree, if not, prepare # the build directory. if test "$srcdir" -ef '.' ; then vpath_build=no else vpath_build=yes $as_echo_n "preparing build tree... " >&6 citusac_abs_top_srcdir=`cd "$srcdir" && pwd` $SHELL "$citusac_abs_top_srcdir/prep_buildtree" "$citusac_abs_top_srcdir" "." \ || as_fn_error $? "failed" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: result: done" >&5 $as_echo "done" >&6; } fi # Allow to overwrite the C compiler, default to the one postgres was # compiled with. We don't want autoconf's default CFLAGS though, so save # those. SAVE_CFLAGS="$CFLAGS" ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then for ac_prog in $($PG_CONFIG --cc) do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in $($PG_CONFIG --cc) do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 $as_echo_n "checking whether the C compiler works... " >&6; } ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" ac_rmfiles= for ac_file in $ac_files do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; * ) ac_rmfiles="$ac_rmfiles $ac_file";; esac done rm -f $ac_rmfiles if { { ac_try="$ac_link_default" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link_default") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, # so that the user can short-circuit this test for compilers unknown to # Autoconf. for ac_file in $ac_files '' do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; [ab].out ) # We found the default executable, but exeext='' is most # certainly right. break;; *.* ) if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi # We set ac_cv_exeext here because the later test for it is not # safe: cross compilers may not add the suffix if given an `-o' # argument, so we may need to know it at that point already. # Even if this section looks crufty: it has the advantage of # actually working. break;; * ) break;; esac done test "$ac_cv_exeext" = no && ac_cv_exeext= else ac_file='' fi if test -z "$ac_file"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "C compiler cannot create executables See \`config.log' for more details" "$LINENO" 5; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 $as_echo_n "checking for C compiler default output file name... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 $as_echo "$ac_file" >&6; } ac_exeext=$ac_cv_exeext rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 $as_echo_n "checking for suffix of executables... " >&6; } if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with # `rm'. for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` break;; * ) break;; esac done else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of executables: cannot compile and link See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest conftest$ac_cv_exeext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 $as_echo "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { FILE *f = fopen ("conftest.out", "w"); return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF ac_clean_files="$ac_clean_files conftest.out" # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 $as_echo_n "checking whether we are cross compiling... " >&6; } if test "$cross_compiling" != yes; then { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if { ac_try='./conftest$ac_cv_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot run C compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details" "$LINENO" 5; } fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 $as_echo "$cross_compiling" >&6; } rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 $as_echo_n "checking for suffix of object files... " >&6; } if ${ac_cv_objext+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.o conftest.obj if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of object files: cannot compile See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 $as_echo "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if ${ac_cv_c_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if ${ac_cv_prog_cc_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes else CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if ${ac_cv_prog_cc_c89+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include struct stat; /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c89=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then : fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu CFLAGS="$SAVE_CFLAGS" # Locate source and build directory of the postgres we're building # against. Can't rely on either still being present, but e.g. optional # test infrastructure can rely on it. POSTGRES_SRCDIR=$(grep ^abs_top_srcdir $(dirname $($PG_CONFIG --pgxs))/../Makefile.global|cut -d ' ' -f3-) POSTGRES_BUILDDIR=$(grep ^abs_top_builddir $(dirname $($PG_CONFIG --pgxs))/../Makefile.global|cut -d ' ' -f3-) # check for a number of CFLAGS that make development easier # CITUSAC_PROG_CC_CFLAGS_OPT # ----------------------- # Given a string, check if the compiler supports the string as a # command-line option. If it does, add the string to CFLAGS. # CITUSAC_PROG_CC_CFLAGS_OPT { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wall" >&5 $as_echo_n "checking whether $CC supports -Wall... " >&6; } if ${citusac_cv_prog_cc_cflags__Wall+:} false; then : $as_echo_n "(cached) " >&6 else citusac_save_CFLAGS=$CFLAGS CFLAGS="$citusac_save_CFLAGS -Wall" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : citusac_cv_prog_cc_cflags__Wall=yes else citusac_cv_prog_cc_cflags__Wall=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$citusac_save_CFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $citusac_cv_prog_cc_cflags__Wall" >&5 $as_echo "$citusac_cv_prog_cc_cflags__Wall" >&6; } if test x"$citusac_cv_prog_cc_cflags__Wall" = x"yes"; then CITUS_CFLAGS="$CITUS_CFLAGS -Wall" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wextra" >&5 $as_echo_n "checking whether $CC supports -Wextra... " >&6; } if ${citusac_cv_prog_cc_cflags__Wextra+:} false; then : $as_echo_n "(cached) " >&6 else citusac_save_CFLAGS=$CFLAGS CFLAGS="$citusac_save_CFLAGS -Wextra" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : citusac_cv_prog_cc_cflags__Wextra=yes else citusac_cv_prog_cc_cflags__Wextra=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$citusac_save_CFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $citusac_cv_prog_cc_cflags__Wextra" >&5 $as_echo "$citusac_cv_prog_cc_cflags__Wextra" >&6; } if test x"$citusac_cv_prog_cc_cflags__Wextra" = x"yes"; then CITUS_CFLAGS="$CITUS_CFLAGS -Wextra" fi # disarm options included in the above, which are too noisy for now { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wno-unused-parameter" >&5 $as_echo_n "checking whether $CC supports -Wno-unused-parameter... " >&6; } if ${citusac_cv_prog_cc_cflags__Wno_unused_parameter+:} false; then : $as_echo_n "(cached) " >&6 else citusac_save_CFLAGS=$CFLAGS CFLAGS="$citusac_save_CFLAGS -Wno-unused-parameter" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : citusac_cv_prog_cc_cflags__Wno_unused_parameter=yes else citusac_cv_prog_cc_cflags__Wno_unused_parameter=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$citusac_save_CFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $citusac_cv_prog_cc_cflags__Wno_unused_parameter" >&5 $as_echo "$citusac_cv_prog_cc_cflags__Wno_unused_parameter" >&6; } if test x"$citusac_cv_prog_cc_cflags__Wno_unused_parameter" = x"yes"; then CITUS_CFLAGS="$CITUS_CFLAGS -Wno-unused-parameter" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wno-sign-compare" >&5 $as_echo_n "checking whether $CC supports -Wno-sign-compare... " >&6; } if ${citusac_cv_prog_cc_cflags__Wno_sign_compare+:} false; then : $as_echo_n "(cached) " >&6 else citusac_save_CFLAGS=$CFLAGS CFLAGS="$citusac_save_CFLAGS -Wno-sign-compare" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : citusac_cv_prog_cc_cflags__Wno_sign_compare=yes else citusac_cv_prog_cc_cflags__Wno_sign_compare=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$citusac_save_CFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $citusac_cv_prog_cc_cflags__Wno_sign_compare" >&5 $as_echo "$citusac_cv_prog_cc_cflags__Wno_sign_compare" >&6; } if test x"$citusac_cv_prog_cc_cflags__Wno_sign_compare" = x"yes"; then CITUS_CFLAGS="$CITUS_CFLAGS -Wno-sign-compare" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wno-missing-field-initializers" >&5 $as_echo_n "checking whether $CC supports -Wno-missing-field-initializers... " >&6; } if ${citusac_cv_prog_cc_cflags__Wno_missing_field_initializers+:} false; then : $as_echo_n "(cached) " >&6 else citusac_save_CFLAGS=$CFLAGS CFLAGS="$citusac_save_CFLAGS -Wno-missing-field-initializers" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : citusac_cv_prog_cc_cflags__Wno_missing_field_initializers=yes else citusac_cv_prog_cc_cflags__Wno_missing_field_initializers=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$citusac_save_CFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $citusac_cv_prog_cc_cflags__Wno_missing_field_initializers" >&5 $as_echo "$citusac_cv_prog_cc_cflags__Wno_missing_field_initializers" >&6; } if test x"$citusac_cv_prog_cc_cflags__Wno_missing_field_initializers" = x"yes"; then CITUS_CFLAGS="$CITUS_CFLAGS -Wno-missing-field-initializers" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wno-clobbered" >&5 $as_echo_n "checking whether $CC supports -Wno-clobbered... " >&6; } if ${citusac_cv_prog_cc_cflags__Wno_clobbered+:} false; then : $as_echo_n "(cached) " >&6 else citusac_save_CFLAGS=$CFLAGS CFLAGS="$citusac_save_CFLAGS -Wno-clobbered" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : citusac_cv_prog_cc_cflags__Wno_clobbered=yes else citusac_cv_prog_cc_cflags__Wno_clobbered=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$citusac_save_CFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $citusac_cv_prog_cc_cflags__Wno_clobbered" >&5 $as_echo "$citusac_cv_prog_cc_cflags__Wno_clobbered" >&6; } if test x"$citusac_cv_prog_cc_cflags__Wno_clobbered" = x"yes"; then CITUS_CFLAGS="$CITUS_CFLAGS -Wno-clobbered" fi # And add a few extra warnings { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wdeclaration-after-statement" >&5 $as_echo_n "checking whether $CC supports -Wdeclaration-after-statement... " >&6; } if ${citusac_cv_prog_cc_cflags__Wdeclaration_after_statement+:} false; then : $as_echo_n "(cached) " >&6 else citusac_save_CFLAGS=$CFLAGS CFLAGS="$citusac_save_CFLAGS -Wdeclaration-after-statement" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : citusac_cv_prog_cc_cflags__Wdeclaration_after_statement=yes else citusac_cv_prog_cc_cflags__Wdeclaration_after_statement=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$citusac_save_CFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $citusac_cv_prog_cc_cflags__Wdeclaration_after_statement" >&5 $as_echo "$citusac_cv_prog_cc_cflags__Wdeclaration_after_statement" >&6; } if test x"$citusac_cv_prog_cc_cflags__Wdeclaration_after_statement" = x"yes"; then CITUS_CFLAGS="$CITUS_CFLAGS -Wdeclaration-after-statement" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wendif-labels" >&5 $as_echo_n "checking whether $CC supports -Wendif-labels... " >&6; } if ${citusac_cv_prog_cc_cflags__Wendif_labels+:} false; then : $as_echo_n "(cached) " >&6 else citusac_save_CFLAGS=$CFLAGS CFLAGS="$citusac_save_CFLAGS -Wendif-labels" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : citusac_cv_prog_cc_cflags__Wendif_labels=yes else citusac_cv_prog_cc_cflags__Wendif_labels=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$citusac_save_CFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $citusac_cv_prog_cc_cflags__Wendif_labels" >&5 $as_echo "$citusac_cv_prog_cc_cflags__Wendif_labels" >&6; } if test x"$citusac_cv_prog_cc_cflags__Wendif_labels" = x"yes"; then CITUS_CFLAGS="$CITUS_CFLAGS -Wendif-labels" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wmissing-format-attribute" >&5 $as_echo_n "checking whether $CC supports -Wmissing-format-attribute... " >&6; } if ${citusac_cv_prog_cc_cflags__Wmissing_format_attribute+:} false; then : $as_echo_n "(cached) " >&6 else citusac_save_CFLAGS=$CFLAGS CFLAGS="$citusac_save_CFLAGS -Wmissing-format-attribute" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : citusac_cv_prog_cc_cflags__Wmissing_format_attribute=yes else citusac_cv_prog_cc_cflags__Wmissing_format_attribute=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$citusac_save_CFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $citusac_cv_prog_cc_cflags__Wmissing_format_attribute" >&5 $as_echo "$citusac_cv_prog_cc_cflags__Wmissing_format_attribute" >&6; } if test x"$citusac_cv_prog_cc_cflags__Wmissing_format_attribute" = x"yes"; then CITUS_CFLAGS="$CITUS_CFLAGS -Wmissing-format-attribute" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wmissing-declarations" >&5 $as_echo_n "checking whether $CC supports -Wmissing-declarations... " >&6; } if ${citusac_cv_prog_cc_cflags__Wmissing_declarations+:} false; then : $as_echo_n "(cached) " >&6 else citusac_save_CFLAGS=$CFLAGS CFLAGS="$citusac_save_CFLAGS -Wmissing-declarations" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : citusac_cv_prog_cc_cflags__Wmissing_declarations=yes else citusac_cv_prog_cc_cflags__Wmissing_declarations=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$citusac_save_CFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $citusac_cv_prog_cc_cflags__Wmissing_declarations" >&5 $as_echo "$citusac_cv_prog_cc_cflags__Wmissing_declarations" >&6; } if test x"$citusac_cv_prog_cc_cflags__Wmissing_declarations" = x"yes"; then CITUS_CFLAGS="$CITUS_CFLAGS -Wmissing-declarations" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wmissing-prototypes" >&5 $as_echo_n "checking whether $CC supports -Wmissing-prototypes... " >&6; } if ${citusac_cv_prog_cc_cflags__Wmissing_prototypes+:} false; then : $as_echo_n "(cached) " >&6 else citusac_save_CFLAGS=$CFLAGS CFLAGS="$citusac_save_CFLAGS -Wmissing-prototypes" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : citusac_cv_prog_cc_cflags__Wmissing_prototypes=yes else citusac_cv_prog_cc_cflags__Wmissing_prototypes=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$citusac_save_CFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $citusac_cv_prog_cc_cflags__Wmissing_prototypes" >&5 $as_echo "$citusac_cv_prog_cc_cflags__Wmissing_prototypes" >&6; } if test x"$citusac_cv_prog_cc_cflags__Wmissing_prototypes" = x"yes"; then CITUS_CFLAGS="$CITUS_CFLAGS -Wmissing-prototypes" fi # # --enable-coverage enables generation of code coverage metrics with gcov # # Check whether --enable-coverage was given. if test "${enable_coverage+set}" = set; then : enableval=$enable_coverage; fi if test "$enable_coverage" = yes; then CITUS_CFLAGS="$CITUS_CFLAGS -fprofile-arcs -ftest-coverage" fi CITUS_CFLAGS="$CITUS_CFLAGS" POSTGRES_SRCDIR="$POSTGRES_SRCDIR" POSTGRES_BUILDDIR="$POSTGRES_BUILDDIR" ac_config_files="$ac_config_files Makefile.global" ac_config_headers="$ac_config_headers src/include/citus_config.h src/include/citus_version.h" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, we kill variables containing newlines. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes: double-quote # substitution turns \\\\ into \\, and sed turns \\ into \. sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) | sed ' /^ac_cv_env_/b end t clear :clear s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then if test "x$cache_file" != "x/dev/null"; then { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 $as_echo "$as_me: updating cache $cache_file" >&6;} if test ! -f "$cache_file" || test -h "$cache_file"; then cat confcache >"$cache_file" else case $cache_file in #( */* | ?:*) mv -f confcache "$cache_file"$$ && mv -f "$cache_file"$$ "$cache_file" ;; #( *) mv -f confcache "$cache_file" ;; esac fi fi else { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 $as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' DEFS=-DHAVE_CONFIG_H ac_libobjs= ac_ltlibobjs= U= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`$as_echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs : "${CONFIG_STATUS=./config.status}" ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 $as_echo "$as_me: creating $CONFIG_STATUS" >&6;} as_write_fail=0 cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 ## ----------------------------------- ## ## Main body of $CONFIG_STATUS script. ## ## ----------------------------------- ## _ASEOF test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Save the log message, to keep $0 and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by Citus $as_me 7.0.3, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF case $ac_config_files in *" "*) set x $ac_config_files; shift; ac_config_files=$*;; esac case $ac_config_headers in *" "*) set x $ac_config_headers; shift; ac_config_headers=$*;; esac cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # Files that config.status was made for. config_files="$ac_config_files" config_headers="$ac_config_headers" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ac_cs_usage="\ \`$as_me' instantiates files and other configuration actions from templates according to the current configuration. Unless the files and actions are specified as TAGs, all are instantiated by default. Usage: $0 [OPTION]... [TAG]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit --config print configuration, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE --header=FILE[:TEMPLATE] instantiate the configuration header FILE Configuration files: $config_files Configuration headers: $config_headers Report bugs to the package provider." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ Citus config.status 7.0.3 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" Copyright (C) 2012 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' AWK='$AWK' test -n "\$AWK" || AWK=awk _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # The default lists apply if the user does not specify any file. ac_need_defaults=: while test $# != 0 do case $1 in --*=?*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; --*=) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg= ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; --config | --confi | --conf | --con | --co | --c ) $as_echo "$ac_cs_config"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; '') as_fn_error $? "missing file argument" ;; esac as_fn_append CONFIG_FILES " '$ac_optarg'" ac_need_defaults=false;; --header | --heade | --head | --hea ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac as_fn_append CONFIG_HEADERS " '$ac_optarg'" ac_need_defaults=false;; --he | --h) # Conflict between --help and --header as_fn_error $? "ambiguous option: \`$1' Try \`$0 --help' for more information.";; --help | --hel | -h ) $as_echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) as_fn_error $? "unrecognized option: \`$1' Try \`$0 --help' for more information." ;; *) as_fn_append ac_config_targets " $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 if \$ac_cs_recheck; then set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion shift \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 CONFIG_SHELL='$SHELL' export CONFIG_SHELL exec "\$@" fi _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX $as_echo "$ac_log" } >&5 _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "Makefile.global") CONFIG_FILES="$CONFIG_FILES Makefile.global" ;; "src/include/citus_config.h") CONFIG_HEADERS="$CONFIG_HEADERS src/include/citus_config.h" ;; "src/include/citus_version.h") CONFIG_HEADERS="$CONFIG_HEADERS src/include/citus_version.h" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= ac_tmp= trap 'exit_status=$? : "${ac_tmp:=$tmp}" { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status ' 0 trap 'as_fn_exit 1' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 ac_tmp=$tmp # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. # This happens for instance with `./config.status config.h'. if test -n "$CONFIG_FILES"; then ac_cr=`echo X | tr X '\015'` # On cygwin, bash can eat \r inside `` if the user requested igncr. # But we know of no other shell where ac_cr would be empty at this # point, so we can use a bashism as a fallback. if test "x$ac_cr" = x; then eval ac_cr=\$\'\\r\' fi ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\\r' else ac_cs_awk_cr=$ac_cr fi echo 'BEGIN {' >"$ac_tmp/subs1.awk" && _ACEOF { echo "cat >conf$$subs.awk <<_ACEOF" && echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && echo "_ACEOF" } >conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` ac_delim='%!_!# ' for ac_last_try in false false false false false :; do . ./conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` if test $ac_delim_n = $ac_delim_num; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done rm -f conf$$subs.sh cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && _ACEOF sed -n ' h s/^/S["/; s/!.*/"]=/ p g s/^[^!]*!// :repl t repl s/'"$ac_delim"'$// t delim :nl h s/\(.\{148\}\)..*/\1/ t more1 s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ p n b repl :more1 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t nl :delim h s/\(.\{148\}\)..*/\1/ t more2 s/["\\]/\\&/g; s/^/"/; s/$/"/ p b :more2 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t delim ' >$CONFIG_STATUS || ac_write_fail=1 rm -f conf$$subs.awk cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACAWK cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && for (key in S) S_is_set[key] = 1 FS = "" } { line = $ 0 nfields = split(line, field, "@") substed = 0 len = length(field[1]) for (i = 2; i < nfields; i++) { key = field[i] keylen = length(key) if (S_is_set[key]) { value = S[key] line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) len += length(value) + length(field[++i]) substed = 1 } else len += 1 + keylen } print line } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 _ACEOF # VPATH may cause trouble with some makes, so we remove sole $(srcdir), # ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ h s/// s/^/:/ s/[ ]*$/:/ s/:\$(srcdir):/:/g s/:\${srcdir}:/:/g s/:@srcdir@:/:/g s/^:*// s/:*$// x s/\(=[ ]*\).*/\1/ G s/\n// s/^[^=]*=[ ]*$// }' fi cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 fi # test -n "$CONFIG_FILES" # Set up the scripts for CONFIG_HEADERS section. # No need to generate them if there are no CONFIG_HEADERS. # This happens for instance with `./config.status Makefile'. if test -n "$CONFIG_HEADERS"; then cat >"$ac_tmp/defines.awk" <<\_ACAWK || BEGIN { _ACEOF # Transform confdefs.h into an awk script `defines.awk', embedded as # here-document in config.status, that substitutes the proper values into # config.h.in to produce config.h. # Create a delimiter string that does not exist in confdefs.h, to ease # handling of long lines. ac_delim='%!_!# ' for ac_last_try in false false :; do ac_tt=`sed -n "/$ac_delim/p" confdefs.h` if test -z "$ac_tt"; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done # For the awk script, D is an array of macro values keyed by name, # likewise P contains macro parameters if any. Preserve backslash # newline sequences. ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* sed -n ' s/.\{148\}/&'"$ac_delim"'/g t rset :rset s/^[ ]*#[ ]*define[ ][ ]*/ / t def d :def s/\\$// t bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3"/p s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p d :bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3\\\\\\n"\\/p t cont s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p t cont d :cont n s/.\{148\}/&'"$ac_delim"'/g t clear :clear s/\\$// t bsnlc s/["\\]/\\&/g; s/^/"/; s/$/"/p d :bsnlc s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p b cont ' >$CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 for (key in D) D_is_set[key] = 1 FS = "" } /^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { line = \$ 0 split(line, arg, " ") if (arg[1] == "#") { defundef = arg[2] mac1 = arg[3] } else { defundef = substr(arg[1], 2) mac1 = arg[2] } split(mac1, mac2, "(") #) macro = mac2[1] prefix = substr(line, 1, index(line, defundef) - 1) if (D_is_set[macro]) { # Preserve the white space surrounding the "#". print prefix "define", macro P[macro] D[macro] next } else { # Replace #undef with comments. This is necessary, for example, # in the case of _POSIX_SOURCE, which is predefined and required # on some systems where configure will not decide to define it. if (defundef == "undef") { print "/*", prefix defundef, macro, "*/" next } } } { print } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 fi # test -n "$CONFIG_HEADERS" eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS " shift for ac_tag do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$ac_tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac as_fn_append ac_file_inputs " '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 $as_echo "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) ac_sed_conf_input=`$as_echo "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac case $ac_tag in *:-:* | *:-) cat >"$ac_tmp/stdin" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir="$ac_dir"; as_fn_mkdir_p ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= ac_sed_dataroot=' /datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 $as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_sed_extra="$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s|@configure_input@|$ac_sed_conf_input|;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@top_build_prefix@&$ac_top_build_prefix&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ "$ac_tmp/out"`; test -z "$ac_out"; } && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&2;} rm -f "$ac_tmp/stdin" case $ac_file in -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; esac \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; :H) # # CONFIG_HEADER # if test x"$ac_file" != x-; then { $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" } >"$ac_tmp/config.h" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 $as_echo "$as_me: $ac_file is unchanged" >&6;} else rm -f "$ac_file" mv "$ac_tmp/config.h" "$ac_file" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 fi else $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ || as_fn_error $? "could not create -" "$LINENO" 5 fi ;; esac done # for ac_tag as_fn_exit 0 _ACEOF ac_clean_files=$ac_clean_files_save test $ac_write_fail = 0 || as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || as_fn_exit 1 fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi citus-7.0.3/configure.in000066400000000000000000000135611317107136600151740ustar00rootroot00000000000000# Citus autoconf input script. # # Converted into an actual configure script by autogen.sh. This # conversion only has to be done when configure.in changes. To avoid # everyone needing autoconf installed, the resulting files are checked # into the SCM. AC_INIT([Citus], [7.0.3]) AC_COPYRIGHT([Copyright (c) 2012-2017, Citus Data, Inc.]) # we'll need sed and awk for some of the version commands AC_PROG_SED AC_PROG_AWK # CITUS_VERSION definition AC_DEFINE_UNQUOTED(CITUS_VERSION, "$PACKAGE_VERSION", [Citus version as a string]) # CITUS_MAJORVERSION definition [CITUS_MAJORVERSION=`expr "$PACKAGE_VERSION" : '\([0-9][0-9]*\.[0-9][0-9]*\)'`] AC_DEFINE_UNQUOTED(CITUS_MAJORVERSION, "$CITUS_MAJORVERSION", [Citus major version as a string]) # CITUS_VERSION_NUM definition # awk -F is a regex on some platforms, and not on others, so make "." a tab [CITUS_VERSION_NUM="`echo "$PACKAGE_VERSION" | sed 's/[A-Za-z].*$//' | tr '.' ' ' | $AWK '{printf "%d%02d%02d", $1, $2, (NF >= 3) ? $3 : 0}'`"] AC_DEFINE_UNQUOTED(CITUS_VERSION_NUM, $CITUS_VERSION_NUM, [Citus version as a number]) # CITUS_EXTENSIONVERSION definition [CITUS_EXTENSIONVERSION="`grep '^default_version' $srcdir/src/backend/distributed/citus.control | cut -d\' -f2`"] AC_DEFINE_UNQUOTED([CITUS_EXTENSIONVERSION], "$CITUS_EXTENSIONVERSION", [Extension version expected by this Citus build]) # Re-check for flex. That allows to compile citus against a postgres # which was built without flex available (possible because generated # files are included) AC_PATH_PROG([FLEX], [flex]) # Locate pg_config binary AC_ARG_VAR([PG_CONFIG], [Location to find pg_config for target PostgreSQL instalation (default PATH)]) AC_ARG_VAR([PATH], [PATH for target PostgreSQL install pg_config]) if test -z "$PG_CONFIG"; then AC_PATH_PROG(PG_CONFIG, pg_config) fi if test -z "$PG_CONFIG"; then AC_MSG_ERROR([Could not find pg_config. Set PG_CONFIG or PATH.]) fi # check we're building against a supported version of PostgreSQL citusac_pg_config_version=$($PG_CONFIG --version 2>/dev/null) version_num=$(echo "$citusac_pg_config_version"| $SED -e 's/^PostgreSQL \([[0-9]]*\)\(\.[[0-9]]*\)\{0,1\}\(.*\)$/\1\2/') # if PostgreSQL version starts with two digits, the major version is those digits version_num=$(echo "$version_num"| $SED -e 's/^\([[0-9]]\{2\}\)\(.*\)$/\1/') if test -z "$version_num"; then AC_MSG_ERROR([Could not detect PostgreSQL version from pg_config.]) fi if test "$version_num" != '9.6' -a "$version_num" != '10'; then AC_MSG_ERROR([Citus is not compatible with the detected PostgreSQL version ${version_num}.]) else AC_MSG_NOTICE([building against PostgreSQL $version_num]) fi; # Check whether we're building inside the source tree, if not, prepare # the build directory. if test "$srcdir" -ef '.' ; then vpath_build=no else vpath_build=yes _AS_ECHO_N([preparing build tree... ]) citusac_abs_top_srcdir=`cd "$srcdir" && pwd` $SHELL "$citusac_abs_top_srcdir/prep_buildtree" "$citusac_abs_top_srcdir" "." \ || AC_MSG_ERROR(failed) AC_MSG_RESULT(done) fi AC_SUBST(vpath_build) # Allow to overwrite the C compiler, default to the one postgres was # compiled with. We don't want autoconf's default CFLAGS though, so save # those. SAVE_CFLAGS="$CFLAGS" AC_PROG_CC([$($PG_CONFIG --cc)]) CFLAGS="$SAVE_CFLAGS" # Locate source and build directory of the postgres we're building # against. Can't rely on either still being present, but e.g. optional # test infrastructure can rely on it. POSTGRES_SRCDIR=$(grep ^abs_top_srcdir $(dirname $($PG_CONFIG --pgxs))/../Makefile.global|cut -d ' ' -f3-) POSTGRES_BUILDDIR=$(grep ^abs_top_builddir $(dirname $($PG_CONFIG --pgxs))/../Makefile.global|cut -d ' ' -f3-) # check for a number of CFLAGS that make development easier # CITUSAC_PROG_CC_CFLAGS_OPT # ----------------------- # Given a string, check if the compiler supports the string as a # command-line option. If it does, add the string to CFLAGS. AC_DEFUN([CITUSAC_PROG_CC_CFLAGS_OPT], [define([Ac_cachevar], [AS_TR_SH([citusac_cv_prog_cc_cflags_$1])])dnl AC_CACHE_CHECK([whether $CC supports $1], [Ac_cachevar], [citusac_save_CFLAGS=$CFLAGS CFLAGS="$citusac_save_CFLAGS $1" ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes _AC_COMPILE_IFELSE([AC_LANG_PROGRAM()], [Ac_cachevar=yes], [Ac_cachevar=no]) ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="$citusac_save_CFLAGS"]) if test x"$Ac_cachevar" = x"yes"; then CITUS_CFLAGS="$CITUS_CFLAGS $1" fi undefine([Ac_cachevar])dnl ])# CITUSAC_PROG_CC_CFLAGS_OPT CITUSAC_PROG_CC_CFLAGS_OPT([-Wall]) CITUSAC_PROG_CC_CFLAGS_OPT([-Wextra]) # disarm options included in the above, which are too noisy for now CITUSAC_PROG_CC_CFLAGS_OPT([-Wno-unused-parameter]) CITUSAC_PROG_CC_CFLAGS_OPT([-Wno-sign-compare]) CITUSAC_PROG_CC_CFLAGS_OPT([-Wno-missing-field-initializers]) CITUSAC_PROG_CC_CFLAGS_OPT([-Wno-clobbered]) # And add a few extra warnings CITUSAC_PROG_CC_CFLAGS_OPT([-Wdeclaration-after-statement]) CITUSAC_PROG_CC_CFLAGS_OPT([-Wendif-labels]) CITUSAC_PROG_CC_CFLAGS_OPT([-Wmissing-format-attribute]) CITUSAC_PROG_CC_CFLAGS_OPT([-Wmissing-declarations]) CITUSAC_PROG_CC_CFLAGS_OPT([-Wmissing-prototypes]) # # --enable-coverage enables generation of code coverage metrics with gcov # AC_ARG_ENABLE([coverage], AS_HELP_STRING([--enable-coverage], [build with coverage testing instrumentation])) if test "$enable_coverage" = yes; then CITUS_CFLAGS="$CITUS_CFLAGS -fprofile-arcs -ftest-coverage" fi AC_SUBST(CITUS_CFLAGS, "$CITUS_CFLAGS") AC_SUBST(POSTGRES_SRCDIR, "$POSTGRES_SRCDIR") AC_SUBST(POSTGRES_BUILDDIR, "$POSTGRES_BUILDDIR") AC_CONFIG_FILES([Makefile.global]) AC_CONFIG_HEADERS([src/include/citus_config.h] [src/include/citus_version.h]) AH_TOP([ /* * citus_config.h.in is generated by autoconf/autoheader and * converted into citus_config.h by configure. Include when code needs to * depend on determinations made by configure. * * Do not manually edit! */ ]) AC_OUTPUT citus-7.0.3/github-banner.png000066400000000000000000000100531317107136600161070ustar00rootroot00000000000000‰PNG  IHDRØ3õ£Æ¬òIDATxÚí]xUENB3¡·°† &¤<¤« ]ËB^â·±×…µ€½¬X`²"¢ î‡q-(ˆ삈ˆ"` AIÙÿÜw^r™ÌÜ;÷½—˜„™ïû?ôݹ3sgÎ?sæœ3“¨(“L2©–'Ÿ?èd#€‰@6Üø€ö¦³L2ITm€!À߀åÀgÀÏ@©€OEÀ éD“LªHªK—€ÝdÒÁ~àI ›éX“Žwbõf_‡I*ÕÊv+p‚éh“Ž7bü ø­ˆ%â5 µét“Ž—kIJÄ;@êøÄ.ó³¢v@gtâù:@Gáy'ú½–ÊA"p¥€ zUT,ÐÙ†.@BMîÐVÀƒÀ¯š„8̆€Ÿ€_€’0IöP,ø(8€ž?Ãï´¶ Ï÷mj)ÁΑŒmUQýƒy D!ð:S;s°ÕAð÷owG¥çø£ÒrÝ€6@㨌"g ÐÈå=ÛÖvØ*§j¶ (ÕÀb~§ð½ðì(ð‡ZJ°Q’ñɯ*-õœ!©mÍ"˜Ï_ßZ5äÂþ•E”ôœ1Q}A ïe7`¿Ø2$»µì;áÙaC°Jµˆõ¯®9 èØo P ¬©ÆTÍ#X×yÀNM‚½^;XË+Ò~üØSIÒI‹Ft¡Î‚ݼ lv;€M¼ ,î.z!=‡ºÛŸ»l{uì6‡,Þ¬Rê*%X ×a&Önk" Ñ©º$-ÈšÁy›((LŸó€«Ó“‘¯3u"Íâ@[ ƒëŠæót!X~$ ³ú™ÀŸ€ €À©@\ÖZA°6ULtêï3€±ü™@r@ñ£Ýy,Æ1™z[¿‡K0Ÿ?žÃèh[p.[[FŒ`ö6ªßm œÆ–б¼žXÙä ±š–³ÄJˆ5•´ ‘ì* Dª¡vÔ>q ØrGjÞMÀz @b¤øø˜dHÞ'¾Ç«ñà}àE )?ŸÌe~Ê.6뀗ÙxBjå;¶òÖñêßÚáfpùkø=jÃ0!O{àiÞŠ–Ì-ÜM\ú>†cAÉwPbÁÝ\á ¢© æó÷²ÅY|ï[ài +ç%wÐÀÆû!ÏžÖ)4nûûV\kÅ6$²;¡Xž·Èä¼Ó€õ\ÿZnÏ-á칎X•öÍ™"!ÖrÛ€•0Ñ|•HöY»Ù¹¦û4­„"àñ y¸Œ’|{ƒ„À¿Oh–ýïMO–<;DŽm‡ïX!yg‚íywàmøHqPÏ_Õ44Q¾I@‘Á|þëØêVî÷ÀŸ7$ÏÆpY›4Ûø…І €ï4ޣȤ«9H]|öl(ÂLð¡î‹ê•;ÌF¬¬JO³Ðèy w¨ªw*>ž4MƒXq¼÷+ k‚Æ ü;€'ûó6ãÆ#še~Ëë.YéÈZÙÖá{^’”w±í[?òðmßT Yà8Ѧ|’îóùg†àï,”ü6ŠËÛ YÆç¶6L´Õ ² a®WAŽ>‚Ù}Gt¦ÿ”àÏI GŒc! Ê×´gñ ¿'rHÕhÖ•S<…Ðøü3½mq!…,- ƒ\AP$GsÞÃ8ìIÍò زi‚]ìñ»6c|ËÕÿôRÉWE(œíX‚V£H…Ê ö¹fþœˆ‡è#7x&Ø\tðîè~ÐMiÕZ<<«Ö〼AÆ ²zÓY_-Ffþk´ˆF.Ù¦Áèâ¾z]ï \Ê4¸¸¸xW‘÷=6Õ§»Œ¬¨°jyP²ÿ¡½Ý}li­ œ!‚]ÄÏò$ÏÞæveó~­\EÌËj/ôõ$¢}ÇÀ_€©ÀË.ÁÝåóùÛ{"ž.·öt>ÿß5NcŒærI}»O!#TÆýŒ©¼€|æpRc—Gíx@ƒ¼s½kÈõÈ•Lÿ›üÜp²ò½) Ô?ÈéêRZÎiÎÓ4VÕÞËÈÙ3 ;–'‚z AlÇÎßR‰5ï6ûÞJxo´mÅÎÆøY'‚ÙÊ õo¤Þ&B¾ž&˜Ld{·>O*L”™V_o“ŒÍøS¤“a Di·ÁnWäy訰..t›1BþÓ$yÞ–ì»de}bYE+¶¡pOø£¨ãLÿz¨…–Õì¤ç†§°`ÙéIr.æi©ææq»ëF1`¾­ø~ÏËFq[Óж A¼Y"l%vƒ€ƒ'¹@Cáw]‚i9š+`³$ÏþC>G¡ ™ña˜b¬¦ºŒQÅ õ|ëì^Àù©äùÇŽÁÀ>…qAF0wG³Ï¿Tª:ÒžÓùû—`÷à1žW®èü¯„Z¤QÆMMNg¿JS+ÑÙD¿A¢Î¦ÇæŒL€oTÀ‰Æ«U±‚aø•ª+Á‚{° —Ꞥ:ô÷tÅì^?Ä}r>“¤c™Ë§%Vì©{¹]oóù›+Âï&i´¡°+4‚ùü©0ÅOecF{èä[„ÙH›q—2&i’ë ݃Úz]…2Ò­™°iÀ²9œö2ç+„°™$špf-'X ûåTûÎ#(éïEžUøòwÓ¥VÄr5²¢Ù\÷( ©zá,Y²_<äºz•—?ß;ÁhfJÏÉ®súD²f5ÁVKœ®©.e ÌN2µT}rý±,Ф8ªå1±.ø—YRsÇ)Ü)Ã]ä  °YA°, Û%ϟױé1Ó"Šd*ë\­¤§½›éù؈Ì»Ue<*BxÉ>¼¹×%ßcðX¹NµÍôj®6ƒ{·\T°xÞçÐE57ØÔDš`œw¥¤dÖ[ü6S’w76ly’8ØwÞOÛŸ´0ëßô,f`vó¨ é¸|SU.'×2Gsç{P‘gºƒ|]ê)ŠLeí~ʺ•JÞ†!¾]W‚MQà Mµ E%\0ºƒ|޽‹âõ¾°E®'h,Z±_ ZÑnãèøN ä½Uˆ»,åã#=¹¼Ê ØdEWó®dž,æ(ò-µ•Õ–Ïâ•%Ÿy‹cuôŸTÄåï£ï§9ÜÎà¿)ÈFƒl…ê$‹äHqˆîYÁû´. t&k;%"9tF¾°/å½\ÄæüDÖ®r ›ë$|*]^/j£¼ÑçGˆXÖöÍi!ëd¹WxÜ3u–ºÙ¼V­Ø!ßNÒŒJ X‚Ë1šƒ.&÷A¶rÖ*òìå#2¥‚M¶¬‰ƒÊt·ÐßIàf¸l’$ëå?U´;h A˜Â¥JÿÔ•F|ÔClÓµQa&Šžç=ÖV á##Át qNã‘£Œ"¶LŠùZ³ñ¡È–·ÐíV)Š®Ð˜hÅœªºÏ„OI/•8°íxÍõ°lÀ´ý˜K÷*^ñ²úDûºTWŒw9Ô¹‹3Åðß<(Ê-”7T¨¿˜C¬b¾dñ]íê.¾tžÐ†bËî0Ϻ ℨ*Nì«Ù(ÛÄGòNw²²ªG—õ<ʱyó9òa>)Þ¥  7…ñ¿}€Áã,¶|uYÐSmù)j£žFûb¹/n$uÛ72£*¨têr¨Þ+è®Fþ¾ÙLÌ~'ÍDŽÆxˆs8NÔWæÛ n§òe8½9¾›‹€ÇqHÜ-–º({&_~oË—ÄwxØËn*”ÕXR²Æ·ÅXRŸÿFþ®ù<©\zLœ¢ÏßIÒ†N¿\coQEÄjÏþÙŒ›òjj’I¿WbýÝMMzE5“G¨ ñ¬²©¬|ô{3Z&ÕD‚ÍÐÜÓqón®›"Ýïâ»=œ,wýÍH™TS v‰kÕ|_E—0ê#è>jqÈ¥> …cFɤšL°d?˜ xïv ß®D¤iµA4äÛd{ðm²÷ðE9šuì/Ñ4ɤšJ²×¼ʬ!tå'üïf^ñB)o›V±I&Õ‚ŒÀ}‘ÀíhFŤÚD°h¶þžÄ"Óü½®×Á™dR Þ‹}ÿ;‘‹|\ÃÍ(˜TÛIv¶äâ•ÊÄOw×Ðô¾IÇ ÉÆk˜ÏÕ?7Ü{1L2©¦’¬?Ÿ¤4±¾æëš»›^6éx'Ù  º1LR}ËáXÞÌô¬I&K´:Ñ=ÿãv>gTÌG0Šø¿ð¡ÈÍl‘¼ƒ¯o6º&™d’I&™d’IÕ>ý•gq1ý)€IEND®B`‚citus-7.0.3/prep_buildtree000066400000000000000000000025231317107136600156070ustar00rootroot00000000000000#! /bin/sh # # Citus copy of PostgreSQL's config/prep_buildtree # # This script prepares a Citus build tree for an out-of-tree/VPATH # build. It is intended to be run by the configure script. me=`basename $0` help="\ Usage: $me sourcetree [buildtree]" if test -z "$1"; then echo "$help" 1>&2 exit 1 elif test x"$1" = x"--help"; then echo "$help" exit 0 fi unset CDPATH sourcetree=`cd $1 && pwd` buildtree=`cd ${2:-'.'} && pwd` # We must not auto-create the subdirectories holding built documentation. # If we did, it would interfere with installation of prebuilt docs from # the source tree, if a VPATH build is done from a distribution tarball. # See bug #5595. for item in `find "$sourcetree" -type d \( \( -name CVS -prune \) -o \( -name .git -prune \) -o -print \) | grep -v "$sourcetree/doc/src/sgml/\+"`; do subdir=`expr "$item" : "$sourcetree\(.*\)"` if test ! -d "$buildtree/$subdir"; then mkdir -p "$buildtree/$subdir" || exit 1 fi done for item in `find "$sourcetree" -not -path '*/.git/hg/*' \( -name Makefile -print -o -name GNUmakefile -print \)`; do filename=`expr "$item" : "$sourcetree\(.*\)"` if test ! -f "${item}.in"; then if cmp "$item" "$buildtree/$filename" >/dev/null 2>&1; then : ; else ln -fs "$item" "$buildtree/$filename" || exit 1 fi fi done exit 0 citus-7.0.3/src/000077500000000000000000000000001317107136600134445ustar00rootroot00000000000000citus-7.0.3/src/backend/000077500000000000000000000000001317107136600150335ustar00rootroot00000000000000citus-7.0.3/src/backend/.gitignore000066400000000000000000000000001317107136600170110ustar00rootroot00000000000000citus-7.0.3/src/backend/distributed/000077500000000000000000000000001317107136600173555ustar00rootroot00000000000000citus-7.0.3/src/backend/distributed/.gitignore000066400000000000000000000003661317107136600213520ustar00rootroot00000000000000# ==================== # = Project-Specific = # ==================== # regression test detritus /log/ /regression.diffs /regression.out /results/ /tmp_check* # ignore latest install file citus--?.?.sql citus--?.?-*.sql !citus--?.?-*--?.?-*.sql citus-7.0.3/src/backend/distributed/Makefile000066400000000000000000000174641317107136600210310ustar00rootroot00000000000000# Makefile for the Citus extension citus_subdir = src/backend/distributed citus_top_builddir = ../../.. MODULE_big = citus EXTENSION = citus EXTVERSIONS = 5.0 5.0-1 5.0-2 \ 5.1-1 5.1-2 5.1-3 5.1-4 5.1-5 5.1-6 5.1-7 5.1-8 \ 5.2-1 5.2-2 5.2-3 5.2-4 \ 6.0-1 6.0-2 6.0-3 6.0-4 6.0-5 6.0-6 6.0-7 6.0-8 6.0-9 6.0-10 6.0-11 6.0-12 6.0-13 6.0-14 6.0-15 6.0-16 6.0-17 6.0-18 \ 6.1-1 6.1-2 6.1-3 6.1-4 6.1-5 6.1-6 6.1-7 6.1-8 6.1-9 6.1-10 6.1-11 6.1-12 6.1-13 6.1-14 6.1-15 6.1-16 6.1-17 \ 6.2-1 6.2-2 6.2-3 6.2-4 \ 7.0-1 7.0-2 7.0-3 7.0-4 7.0-5 7.0-6 7.0-7 7.0-8 7.0-9 7.0-10 7.0-11 7.0-12 7.0-13 7.0-14 7.0-15 # All citus--*.sql files in the source directory DATA = $(patsubst $(citus_abs_srcdir)/%.sql,%.sql,$(wildcard $(citus_abs_srcdir)/$(EXTENSION)--*--*.sql)) # Generated files for each version DATA_built = $(foreach v,$(EXTVERSIONS),$(EXTENSION)--$(v).sql) # directories with source files SUBDIRS = . commands connection executor master metadata planner progress relay test transaction utils worker # That patsubst rule searches all directories listed in SUBDIRS for .c # files, and adds the corresponding .o files to OBJS OBJS += \ $(patsubst $(citus_abs_srcdir)/%.c,%.o,$(foreach dir,$(SUBDIRS), $(sort $(wildcard $(citus_abs_srcdir)/$(dir)/*.c)))) # be explicit about the default target all: # generate each version's file installation file by concatenating # previous upgrade scripts $(EXTENSION)--5.0.sql: $(EXTENSION).sql cat $^ > $@ $(EXTENSION)--5.0-1.sql: $(EXTENSION)--5.0.sql $(EXTENSION)--5.0--5.0-1.sql cat $^ > $@ $(EXTENSION)--5.0-2.sql: $(EXTENSION)--5.0-1.sql $(EXTENSION)--5.0-1--5.0-2.sql cat $^ > $@ $(EXTENSION)--5.1-1.sql: $(EXTENSION)--5.0-2.sql $(EXTENSION)--5.0-2--5.1-1.sql cat $^ > $@ $(EXTENSION)--5.1-2.sql: $(EXTENSION)--5.1-1.sql $(EXTENSION)--5.1-1--5.1-2.sql cat $^ > $@ $(EXTENSION)--5.1-3.sql: $(EXTENSION)--5.1-2.sql $(EXTENSION)--5.1-2--5.1-3.sql cat $^ > $@ $(EXTENSION)--5.1-4.sql: $(EXTENSION)--5.1-3.sql $(EXTENSION)--5.1-3--5.1-4.sql cat $^ > $@ $(EXTENSION)--5.1-5.sql: $(EXTENSION)--5.1-4.sql $(EXTENSION)--5.1-4--5.1-5.sql cat $^ > $@ $(EXTENSION)--5.1-6.sql: $(EXTENSION)--5.1-5.sql $(EXTENSION)--5.1-5--5.1-6.sql cat $^ > $@ $(EXTENSION)--5.1-7.sql: $(EXTENSION)--5.1-6.sql $(EXTENSION)--5.1-6--5.1-7.sql cat $^ > $@ $(EXTENSION)--5.1-8.sql: $(EXTENSION)--5.1-7.sql $(EXTENSION)--5.1-7--5.1-8.sql cat $^ > $@ $(EXTENSION)--5.2-1.sql: $(EXTENSION)--5.1-8.sql $(EXTENSION)--5.1-8--5.2-1.sql cat $^ > $@ $(EXTENSION)--5.2-2.sql: $(EXTENSION)--5.2-1.sql $(EXTENSION)--5.2-1--5.2-2.sql cat $^ > $@ $(EXTENSION)--5.2-3.sql: $(EXTENSION)--5.2-2.sql $(EXTENSION)--5.2-2--5.2-3.sql cat $^ > $@ $(EXTENSION)--5.2-4.sql: $(EXTENSION)--5.2-3.sql $(EXTENSION)--5.2-3--5.2-4.sql cat $^ > $@ $(EXTENSION)--6.0-1.sql: $(EXTENSION)--5.2-4.sql $(EXTENSION)--5.2-4--6.0-1.sql cat $^ > $@ $(EXTENSION)--6.0-2.sql: $(EXTENSION)--6.0-1.sql $(EXTENSION)--6.0-1--6.0-2.sql cat $^ > $@ $(EXTENSION)--6.0-3.sql: $(EXTENSION)--6.0-2.sql $(EXTENSION)--6.0-2--6.0-3.sql cat $^ > $@ $(EXTENSION)--6.0-4.sql: $(EXTENSION)--6.0-3.sql $(EXTENSION)--6.0-3--6.0-4.sql cat $^ > $@ $(EXTENSION)--6.0-5.sql: $(EXTENSION)--6.0-4.sql $(EXTENSION)--6.0-4--6.0-5.sql cat $^ > $@ $(EXTENSION)--6.0-6.sql: $(EXTENSION)--6.0-5.sql $(EXTENSION)--6.0-5--6.0-6.sql cat $^ > $@ $(EXTENSION)--6.0-7.sql: $(EXTENSION)--6.0-6.sql $(EXTENSION)--6.0-6--6.0-7.sql cat $^ > $@ $(EXTENSION)--6.0-8.sql: $(EXTENSION)--6.0-7.sql $(EXTENSION)--6.0-7--6.0-8.sql cat $^ > $@ $(EXTENSION)--6.0-9.sql: $(EXTENSION)--6.0-8.sql $(EXTENSION)--6.0-8--6.0-9.sql cat $^ > $@ $(EXTENSION)--6.0-10.sql: $(EXTENSION)--6.0-9.sql $(EXTENSION)--6.0-9--6.0-10.sql cat $^ > $@ $(EXTENSION)--6.0-11.sql: $(EXTENSION)--6.0-10.sql $(EXTENSION)--6.0-10--6.0-11.sql cat $^ > $@ $(EXTENSION)--6.0-12.sql: $(EXTENSION)--6.0-11.sql $(EXTENSION)--6.0-11--6.0-12.sql cat $^ > $@ $(EXTENSION)--6.0-13.sql: $(EXTENSION)--6.0-12.sql $(EXTENSION)--6.0-12--6.0-13.sql cat $^ > $@ $(EXTENSION)--6.0-14.sql: $(EXTENSION)--6.0-13.sql $(EXTENSION)--6.0-13--6.0-14.sql cat $^ > $@ $(EXTENSION)--6.0-15.sql: $(EXTENSION)--6.0-14.sql $(EXTENSION)--6.0-14--6.0-15.sql cat $^ > $@ $(EXTENSION)--6.0-16.sql: $(EXTENSION)--6.0-15.sql $(EXTENSION)--6.0-15--6.0-16.sql cat $^ > $@ $(EXTENSION)--6.0-17.sql: $(EXTENSION)--6.0-16.sql $(EXTENSION)--6.0-16--6.0-17.sql cat $^ > $@ $(EXTENSION)--6.0-18.sql: $(EXTENSION)--6.0-17.sql $(EXTENSION)--6.0-17--6.0-18.sql cat $^ > $@ $(EXTENSION)--6.1-1.sql: $(EXTENSION)--6.0-18.sql $(EXTENSION)--6.0-18--6.1-1.sql cat $^ > $@ $(EXTENSION)--6.1-2.sql: $(EXTENSION)--6.1-1.sql $(EXTENSION)--6.1-1--6.1-2.sql cat $^ > $@ $(EXTENSION)--6.1-3.sql: $(EXTENSION)--6.1-2.sql $(EXTENSION)--6.1-2--6.1-3.sql cat $^ > $@ $(EXTENSION)--6.1-4.sql: $(EXTENSION)--6.1-3.sql $(EXTENSION)--6.1-3--6.1-4.sql cat $^ > $@ $(EXTENSION)--6.1-5.sql: $(EXTENSION)--6.1-4.sql $(EXTENSION)--6.1-4--6.1-5.sql cat $^ > $@ $(EXTENSION)--6.1-6.sql: $(EXTENSION)--6.1-5.sql $(EXTENSION)--6.1-5--6.1-6.sql cat $^ > $@ $(EXTENSION)--6.1-7.sql: $(EXTENSION)--6.1-6.sql $(EXTENSION)--6.1-6--6.1-7.sql cat $^ > $@ $(EXTENSION)--6.1-8.sql: $(EXTENSION)--6.1-7.sql $(EXTENSION)--6.1-7--6.1-8.sql cat $^ > $@ $(EXTENSION)--6.1-9.sql: $(EXTENSION)--6.1-8.sql $(EXTENSION)--6.1-8--6.1-9.sql cat $^ > $@ $(EXTENSION)--6.1-10.sql: $(EXTENSION)--6.1-9.sql $(EXTENSION)--6.1-9--6.1-10.sql cat $^ > $@ $(EXTENSION)--6.1-11.sql: $(EXTENSION)--6.1-10.sql $(EXTENSION)--6.1-10--6.1-11.sql cat $^ > $@ $(EXTENSION)--6.1-12.sql: $(EXTENSION)--6.1-11.sql $(EXTENSION)--6.1-11--6.1-12.sql cat $^ > $@ $(EXTENSION)--6.1-13.sql: $(EXTENSION)--6.1-12.sql $(EXTENSION)--6.1-12--6.1-13.sql cat $^ > $@ $(EXTENSION)--6.1-14.sql: $(EXTENSION)--6.1-13.sql $(EXTENSION)--6.1-13--6.1-14.sql cat $^ > $@ $(EXTENSION)--6.1-15.sql: $(EXTENSION)--6.1-14.sql $(EXTENSION)--6.1-14--6.1-15.sql cat $^ > $@ $(EXTENSION)--6.1-16.sql: $(EXTENSION)--6.1-15.sql $(EXTENSION)--6.1-15--6.1-16.sql cat $^ > $@ $(EXTENSION)--6.1-17.sql: $(EXTENSION)--6.1-16.sql $(EXTENSION)--6.1-16--6.1-17.sql cat $^ > $@ $(EXTENSION)--6.2-1.sql: $(EXTENSION)--6.1-17.sql $(EXTENSION)--6.1-17--6.2-1.sql cat $^ > $@ $(EXTENSION)--6.2-2.sql: $(EXTENSION)--6.2-1.sql $(EXTENSION)--6.2-1--6.2-2.sql cat $^ > $@ $(EXTENSION)--6.2-3.sql: $(EXTENSION)--6.2-2.sql $(EXTENSION)--6.2-2--6.2-3.sql cat $^ > $@ $(EXTENSION)--6.2-4.sql: $(EXTENSION)--6.2-3.sql $(EXTENSION)--6.2-3--6.2-4.sql cat $^ > $@ $(EXTENSION)--7.0-1.sql: $(EXTENSION)--6.2-4.sql $(EXTENSION)--6.2-4--7.0-1.sql cat $^ > $@ $(EXTENSION)--7.0-2.sql: $(EXTENSION)--7.0-1.sql $(EXTENSION)--7.0-1--7.0-2.sql cat $^ > $@ $(EXTENSION)--7.0-3.sql: $(EXTENSION)--7.0-2.sql $(EXTENSION)--7.0-2--7.0-3.sql cat $^ > $@ $(EXTENSION)--7.0-4.sql: $(EXTENSION)--7.0-3.sql $(EXTENSION)--7.0-3--7.0-4.sql cat $^ > $@ $(EXTENSION)--7.0-5.sql: $(EXTENSION)--7.0-4.sql $(EXTENSION)--7.0-4--7.0-5.sql cat $^ > $@ $(EXTENSION)--7.0-6.sql: $(EXTENSION)--7.0-5.sql $(EXTENSION)--7.0-5--7.0-6.sql cat $^ > $@ $(EXTENSION)--7.0-7.sql: $(EXTENSION)--7.0-6.sql $(EXTENSION)--7.0-6--7.0-7.sql cat $^ > $@ $(EXTENSION)--7.0-8.sql: $(EXTENSION)--7.0-7.sql $(EXTENSION)--7.0-7--7.0-8.sql cat $^ > $@ $(EXTENSION)--7.0-9.sql: $(EXTENSION)--7.0-8.sql $(EXTENSION)--7.0-8--7.0-9.sql cat $^ > $@ $(EXTENSION)--7.0-10.sql: $(EXTENSION)--7.0-9.sql $(EXTENSION)--7.0-9--7.0-10.sql cat $^ > $@ $(EXTENSION)--7.0-11.sql: $(EXTENSION)--7.0-10.sql $(EXTENSION)--7.0-10--7.0-11.sql cat $^ > $@ $(EXTENSION)--7.0-12.sql: $(EXTENSION)--7.0-11.sql $(EXTENSION)--7.0-11--7.0-12.sql cat $^ > $@ $(EXTENSION)--7.0-13.sql: $(EXTENSION)--7.0-12.sql $(EXTENSION)--7.0-12--7.0-13.sql cat $^ > $@ $(EXTENSION)--7.0-14.sql: $(EXTENSION)--7.0-13.sql $(EXTENSION)--7.0-13--7.0-14.sql cat $^ > $@ $(EXTENSION)--7.0-15.sql: $(EXTENSION)--7.0-14.sql $(EXTENSION)--7.0-14--7.0-15.sql cat $^ > $@ NO_PGXS = 1 SHLIB_LINK = $(libpq) include $(citus_top_builddir)/Makefile.global override CPPFLAGS += -I$(libpq_srcdir) citus-7.0.3/src/backend/distributed/citus--5.0--5.0-1.sql000066400000000000000000000004111317107136600223110ustar00rootroot00000000000000/* citus--5.0--5.0-1.sql */ ALTER FUNCTION pg_catalog.citus_drop_trigger() SECURITY DEFINER; GRANT SELECT ON pg_catalog.pg_dist_partition TO public; GRANT SELECT ON pg_catalog.pg_dist_shard TO public; GRANT SELECT ON pg_catalog.pg_dist_shard_placement TO public; citus-7.0.3/src/backend/distributed/citus--5.0-1--5.0-2.sql000066400000000000000000000005071317107136600224560ustar00rootroot00000000000000/* citus--5.0-1--5.0-2.sql */ CREATE FUNCTION master_update_shard_statistics(shard_id bigint) RETURNS bigint LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_update_shard_statistics$$; COMMENT ON FUNCTION master_update_shard_statistics(bigint) IS 'updates shard statistics and returns the updated shard size'; citus-7.0.3/src/backend/distributed/citus--5.0-2--5.1-1.sql000066400000000000000000000001311317107136600224500ustar00rootroot00000000000000/* citus--5.0-2--5.1-1.sql */ /* empty, but required to update the extension version */ citus-7.0.3/src/backend/distributed/citus--5.1-1--5.1-2.sql000066400000000000000000000004201317107136600224520ustar00rootroot00000000000000CREATE FUNCTION pg_catalog.master_modify_multiple_shards(text) RETURNS integer LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_modify_multiple_shards$$; COMMENT ON FUNCTION master_modify_multiple_shards(text) IS 'push delete and update queries to shards';citus-7.0.3/src/backend/distributed/citus--5.1-2--5.1-3.sql000066400000000000000000000006171317107136600224640ustar00rootroot00000000000000DROP FUNCTION IF EXISTS public.master_update_shard_statistics(shard_id bigint); CREATE OR REPLACE FUNCTION pg_catalog.master_update_shard_statistics(shard_id bigint) RETURNS bigint LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_update_shard_statistics$$; COMMENT ON FUNCTION master_update_shard_statistics(bigint) IS 'updates shard statistics and returns the updated shard size'; citus-7.0.3/src/backend/distributed/citus--5.1-3--5.1-4.sql000066400000000000000000000006271317107136600224670ustar00rootroot00000000000000DROP FUNCTION IF EXISTS pg_catalog.worker_apply_shard_ddl_command(bigint, text); CREATE OR REPLACE FUNCTION pg_catalog.worker_apply_shard_ddl_command(bigint, text, text) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_apply_shard_ddl_command$$; COMMENT ON FUNCTION worker_apply_shard_ddl_command(bigint, text, text) IS 'extend ddl command with shardId and apply on database'; citus-7.0.3/src/backend/distributed/citus--5.1-4--5.1-5.sql000066400000000000000000000016021317107136600224630ustar00rootroot00000000000000DROP FUNCTION IF EXISTS pg_catalog.worker_fetch_foreign_file(text, bigint, text[], integer[]); CREATE OR REPLACE FUNCTION pg_catalog.worker_fetch_foreign_file(text, text, bigint, text[], integer[]) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_fetch_foreign_file$$; COMMENT ON FUNCTION pg_catalog.worker_fetch_foreign_file(text, text, bigint, text[], integer[]) IS 'fetch foreign file from remote node and apply file'; DROP FUNCTION IF EXISTS pg_catalog.worker_fetch_regular_table(text, bigint, text[], integer[]); CREATE OR REPLACE FUNCTION pg_catalog.worker_fetch_regular_table(text, text, bigint, text[], integer[]) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_fetch_regular_table$$; COMMENT ON FUNCTION pg_catalog.worker_fetch_regular_table(text, text, bigint, text[], integer[]) IS 'fetch PostgreSQL table from remote node'; citus-7.0.3/src/backend/distributed/citus--5.1-5--5.1-6.sql000066400000000000000000000023021317107136600224630ustar00rootroot00000000000000CREATE OR REPLACE FUNCTION pg_catalog.worker_apply_shard_ddl_command(bigint, text) RETURNS void LANGUAGE sql AS $worker_apply_shard_ddl_command$ SELECT pg_catalog.worker_apply_shard_ddl_command($1, 'public', $2); $worker_apply_shard_ddl_command$; COMMENT ON FUNCTION worker_apply_shard_ddl_command(bigint, text) IS 'extend ddl command with shardId and apply on database'; CREATE OR REPLACE FUNCTION pg_catalog.worker_fetch_foreign_file(text, bigint, text[], integer[]) RETURNS void LANGUAGE sql AS $worker_fetch_foreign_file$ SELECT pg_catalog.worker_fetch_foreign_file('public', $1, $2, $3, $4); $worker_fetch_foreign_file$; COMMENT ON FUNCTION pg_catalog.worker_fetch_foreign_file(text, bigint, text[], integer[]) IS 'fetch foreign file from remote node and apply file'; CREATE OR REPLACE FUNCTION pg_catalog.worker_fetch_regular_table(text, bigint, text[], integer[]) RETURNS void LANGUAGE sql AS $worker_fetch_regular_table$ SELECT pg_catalog.worker_fetch_regular_table('public', $1, $2, $3, $4); $worker_fetch_regular_table$; COMMENT ON FUNCTION pg_catalog.worker_fetch_regular_table(text, bigint, text[], integer[]) IS 'fetch PostgreSQL table from remote node'; citus-7.0.3/src/backend/distributed/citus--5.1-6--5.1-7.sql000066400000000000000000000015661317107136600225000ustar00rootroot00000000000000DROP FUNCTION IF EXISTS pg_catalog.worker_fetch_foreign_file(text, text, bigint, text[], integer[]); CREATE OR REPLACE FUNCTION pg_catalog.worker_fetch_foreign_file(text, bigint, text[], integer[]) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_fetch_foreign_file$$; COMMENT ON FUNCTION pg_catalog.worker_fetch_foreign_file(text, bigint, text[], integer[]) IS 'fetch foreign file from remote node and apply file'; DROP FUNCTION IF EXISTS pg_catalog.worker_fetch_regular_table(text, text, bigint, text[], integer[]); CREATE OR REPLACE FUNCTION pg_catalog.worker_fetch_regular_table(text, bigint, text[], integer[]) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_fetch_regular_table$$; COMMENT ON FUNCTION pg_catalog.worker_fetch_regular_table(text, bigint, text[], integer[]) IS 'fetch PostgreSQL table from remote node'; citus-7.0.3/src/backend/distributed/citus--5.1-7--5.1-8.sql000066400000000000000000000044711317107136600225000ustar00rootroot00000000000000CREATE FUNCTION pg_catalog.master_drop_sequences(sequence_names text[], node_name text, node_port bigint) RETURNS bool LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_drop_sequences$$; COMMENT ON FUNCTION pg_catalog.master_drop_sequences(text[], text, bigint) IS 'drop specified sequences from a node'; REVOKE ALL ON FUNCTION pg_catalog.master_drop_sequences(text[], text, bigint) FROM PUBLIC; CREATE OR REPLACE FUNCTION pg_catalog.citus_drop_trigger() RETURNS event_trigger LANGUAGE plpgsql SECURITY DEFINER SET search_path = pg_catalog AS $cdbdt$ DECLARE v_obj record; sequence_names text[] := '{}'; node_names text[] := '{}'; node_ports bigint[] := '{}'; node_name text; node_port bigint; BEGIN -- collect set of dropped sequences to drop on workers later SELECT array_agg(object_identity) INTO sequence_names FROM pg_event_trigger_dropped_objects() WHERE object_type = 'sequence'; -- Must accumulate set of affected nodes before deleting placements, as -- master_drop_all_shards will erase their rows, making it impossible for -- us to know where to drop sequences (which must be dropped after shards, -- since they have default value expressions which depend on sequences). SELECT array_agg(sp.nodename), array_agg(sp.nodeport) INTO node_names, node_ports FROM pg_event_trigger_dropped_objects() AS dobj, pg_dist_shard AS s, pg_dist_shard_placement AS sp WHERE dobj.object_type IN ('table', 'foreign table') AND dobj.objid = s.logicalrelid AND s.shardid = sp.shardid; FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() LOOP IF v_obj.object_type NOT IN ('table', 'foreign table') THEN CONTINUE; END IF; -- nothing to do if not a distributed table IF NOT EXISTS(SELECT * FROM pg_dist_partition WHERE logicalrelid = v_obj.objid) THEN CONTINUE; END IF; -- ensure all shards are dropped PERFORM master_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name); -- delete partition entry DELETE FROM pg_dist_partition WHERE logicalrelid = v_obj.objid; END LOOP; IF cardinality(sequence_names) = 0 THEN RETURN; END IF; FOR node_name, node_port IN SELECT DISTINCT name, port FROM unnest(node_names, node_ports) AS nodes(name, port) LOOP PERFORM master_drop_sequences(sequence_names, node_name, node_port); END LOOP; END; $cdbdt$; citus-7.0.3/src/backend/distributed/citus--5.1-8--5.2-1.sql000066400000000000000000000001311317107136600224600ustar00rootroot00000000000000/* citus--5.1-8--5.2-1.sql */ /* empty, but required to update the extension version */ citus-7.0.3/src/backend/distributed/citus--5.2-1--5.2-2.sql000066400000000000000000000012341317107136600224600ustar00rootroot00000000000000/* citus--5.2-1--5.2-2.sql */ CREATE OR REPLACE FUNCTION pg_catalog.citus_truncate_trigger() RETURNS trigger LANGUAGE plpgsql SET search_path = 'pg_catalog' AS $cdbtt$ DECLARE partitionType char; commandText text; BEGIN SELECT partmethod INTO partitionType FROM pg_dist_partition WHERE logicalrelid = TG_RELID; IF NOT FOUND THEN RETURN NEW; END IF; IF (partitionType = 'a') THEN PERFORM master_drop_all_shards(TG_RELID, TG_TABLE_SCHEMA, TG_TABLE_NAME); ELSE SELECT format('TRUNCATE TABLE %I.%I CASCADE', TG_TABLE_SCHEMA, TG_TABLE_NAME) INTO commandText; PERFORM master_modify_multiple_shards(commandText); END IF; RETURN NEW; END; $cdbtt$; citus-7.0.3/src/backend/distributed/citus--5.2-2--5.2-3.sql000066400000000000000000000002771317107136600224700ustar00rootroot00000000000000/* citus--5.2-2--5.2-3.sql */ CREATE OR REPLACE FUNCTION master_expire_table_cache(table_name regclass) RETURNS VOID LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_expire_table_cache$$; citus-7.0.3/src/backend/distributed/citus--5.2-3--5.2-4.sql000066400000000000000000000003241317107136600224630ustar00rootroot00000000000000/* citus--5.2-3--5.2-4.sql */ ALTER TABLE pg_dist_partition ADD COLUMN colocationid BIGINT DEFAULT 0 NOT NULL; CREATE INDEX pg_dist_partition_colocationid_index ON pg_dist_partition using btree(colocationid); citus-7.0.3/src/backend/distributed/citus--5.2-4--6.0-1.sql000066400000000000000000000004121317107136600224560ustar00rootroot00000000000000/* citus--5.2-4--6.0-1.sql */ /* change logicalrelid type to regclass to allow implicit casts to text */ ALTER TABLE pg_catalog.pg_dist_partition ALTER COLUMN logicalrelid TYPE regclass; ALTER TABLE pg_catalog.pg_dist_shard ALTER COLUMN logicalrelid TYPE regclass; citus-7.0.3/src/backend/distributed/citus--6.0-1--6.0-2.sql000066400000000000000000000005071317107136600224600ustar00rootroot00000000000000/* citus--6.0-1--6.0-2.sql */ CREATE FUNCTION pg_catalog.shard_name(object_name regclass, shard_id bigint) RETURNS text LANGUAGE C STABLE AS 'MODULE_PATHNAME', $$shard_name$$; COMMENT ON FUNCTION pg_catalog.shard_name(object_name regclass, shard_id bigint) IS 'returns shard-extended version of object name'; citus-7.0.3/src/backend/distributed/citus--6.0-10--6.0-11.sql000066400000000000000000000074341317107136600226260ustar00rootroot00000000000000/* citus--6.0-10--6.0-11.sql */ SET search_path = 'pg_catalog'; CREATE SEQUENCE citus.pg_dist_colocationid_seq MINVALUE 1 MAXVALUE 4294967296; ALTER SEQUENCE citus.pg_dist_colocationid_seq SET SCHEMA pg_catalog; /* add pg_dist_colocation */ CREATE TABLE citus.pg_dist_colocation( colocationid int NOT NULL PRIMARY KEY, shardcount int NOT NULL, replicationfactor int NOT NULL, distributioncolumntype oid NOT NULL ); ALTER TABLE citus.pg_dist_colocation SET SCHEMA pg_catalog; CREATE INDEX pg_dist_colocation_configuration_index ON pg_dist_colocation USING btree(shardcount, replicationfactor, distributioncolumntype); CREATE FUNCTION create_distributed_table(table_name regclass, distribution_column text, distribution_type citus.distribution_type DEFAULT 'hash') RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$create_distributed_table$$; COMMENT ON FUNCTION create_distributed_table(table_name regclass, distribution_column text, distribution_type citus.distribution_type) IS 'creates a distributed table'; CREATE OR REPLACE FUNCTION pg_catalog.citus_drop_trigger() RETURNS event_trigger LANGUAGE plpgsql SECURITY DEFINER SET search_path = pg_catalog AS $cdbdt$ DECLARE v_obj record; sequence_names text[] := '{}'; node_names text[] := '{}'; node_ports bigint[] := '{}'; node_name text; node_port bigint; table_colocation_id integer; BEGIN -- collect set of dropped sequences to drop on workers later SELECT array_agg(object_identity) INTO sequence_names FROM pg_event_trigger_dropped_objects() WHERE object_type = 'sequence'; -- Must accumulate set of affected nodes before deleting placements, as -- master_drop_all_shards will erase their rows, making it impossible for -- us to know where to drop sequences (which must be dropped after shards, -- since they have default value expressions which depend on sequences). SELECT array_agg(sp.nodename), array_agg(sp.nodeport) INTO node_names, node_ports FROM pg_event_trigger_dropped_objects() AS dobj, pg_dist_shard AS s, pg_dist_shard_placement AS sp WHERE dobj.object_type IN ('table', 'foreign table') AND dobj.objid = s.logicalrelid AND s.shardid = sp.shardid; FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() LOOP IF v_obj.object_type NOT IN ('table', 'foreign table') THEN CONTINUE; END IF; -- nothing to do if not a distributed table IF NOT EXISTS(SELECT * FROM pg_dist_partition WHERE logicalrelid = v_obj.objid) THEN CONTINUE; END IF; -- ensure all shards are dropped PERFORM master_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name); -- get colocation group SELECT colocationid INTO table_colocation_id FROM pg_dist_partition WHERE logicalrelid = v_obj.objid; -- delete partition entry DELETE FROM pg_dist_partition WHERE logicalrelid = v_obj.objid; -- drop colocation group if all referencing tables are dropped IF NOT EXISTS(SELECT * FROM pg_dist_partition WHERE colocationId = table_colocation_id) THEN DELETE FROM pg_dist_colocation WHERE colocationId = table_colocation_id; END IF; END LOOP; IF cardinality(sequence_names) = 0 THEN RETURN; END IF; FOR node_name, node_port IN SELECT DISTINCT name, port FROM unnest(node_names, node_ports) AS nodes(name, port) LOOP PERFORM master_drop_sequences(sequence_names, node_name, node_port); END LOOP; END; $cdbdt$; COMMENT ON FUNCTION citus_drop_trigger() IS 'perform checks and actions at the end of DROP actions'; ALTER TABLE pg_dist_partition ALTER COLUMN colocationid TYPE integer; RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.0-11--6.0-12.sql000066400000000000000000000005221317107136600226170ustar00rootroot00000000000000/* citus--6.0-11--6.0-12.sql */ SET search_path = 'pg_catalog'; CREATE FUNCTION create_reference_table(table_name regclass) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$create_reference_table$$; COMMENT ON FUNCTION create_reference_table(table_name regclass) IS 'create a distributed reference table'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.0-12--6.0-13.sql000066400000000000000000000012671317107136600226300ustar00rootroot00000000000000/* citus--6.0-12--6.0-13.sql */ CREATE FUNCTION pg_catalog.worker_apply_inter_shard_ddl_command(referencing_shard bigint, referencing_schema_name text, referenced_shard bigint, referenced_schema_name text, command text) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_apply_inter_shard_ddl_command$$; COMMENT ON FUNCTION pg_catalog.worker_apply_inter_shard_ddl_command(referencing_shard bigint, referencing_schema_name text, referenced_shard bigint, referenced_schema_name text, command text) IS 'executes inter shard ddl command'; citus-7.0.3/src/backend/distributed/citus--6.0-13--6.0-14.sql000066400000000000000000000012471317107136600226300ustar00rootroot00000000000000/* citus--6.0-13--6.0-14.sql */ DO $ff$ BEGIN -- fix functions created in wrong namespace ALTER FUNCTION public.recover_prepared_transactions() SET SCHEMA pg_catalog; ALTER FUNCTION public.column_name_to_column(table_name regclass, column_name text) SET SCHEMA pg_catalog; ALTER FUNCTION public.worker_drop_distributed_table(logicalrelid Oid) SET SCHEMA pg_catalog; ALTER FUNCTION public.master_get_new_placementid() SET SCHEMA pg_catalog; ALTER FUNCTION public.master_expire_table_cache(table_name regclass) SET SCHEMA pg_catalog; -- some installations don't need this corrective, so just skip... EXCEPTION WHEN undefined_function THEN -- do nothing END $ff$; citus-7.0.3/src/backend/distributed/citus--6.0-14--6.0-15.sql000066400000000000000000000010301317107136600226200ustar00rootroot00000000000000/* citus--6.0-14--6.0-15.sql */ CREATE FUNCTION pg_catalog.master_dist_placement_cache_invalidate() RETURNS trigger LANGUAGE C AS 'MODULE_PATHNAME', $$master_dist_placement_cache_invalidate$$; COMMENT ON FUNCTION master_dist_placement_cache_invalidate() IS 'register relcache invalidation for changed placements'; CREATE TRIGGER dist_placement_cache_invalidate AFTER INSERT OR UPDATE OR DELETE ON pg_catalog.pg_dist_shard_placement FOR EACH ROW EXECUTE PROCEDURE master_dist_placement_cache_invalidate(); citus-7.0.3/src/backend/distributed/citus--6.0-15--6.0-16.sql000066400000000000000000000006701317107136600226330ustar00rootroot00000000000000/* citus--6.0-15--6.0-16.sql */ SET search_path = 'pg_catalog'; CREATE FUNCTION mark_tables_colocated(source_table_name regclass, target_table_names regclass[]) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$mark_tables_colocated$$; COMMENT ON FUNCTION mark_tables_colocated(source_table_name regclass, target_table_names regclass[]) IS 'mark target distributed tables as colocated with the source table'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.0-16--6.0-17.sql000066400000000000000000000033441317107136600226360ustar00rootroot00000000000000/* citus--6.0-16--6.0-17.sql */ SET search_path = 'pg_catalog'; DROP FUNCTION pg_catalog.master_copy_shard_placement(bigint, text, integer, text, integer); CREATE FUNCTION pg_catalog.master_copy_shard_placement(shard_id bigint, source_node_name text, source_node_port integer, target_node_name text, target_node_port integer, do_repair bool DEFAULT true) RETURNS void LANGUAGE C STRICT AS 'citus', $$master_copy_shard_placement$$; COMMENT ON FUNCTION pg_catalog.master_copy_shard_placement(shard_id bigint, source_node_name text, source_node_port integer, target_node_name text, target_node_port integer, do_repair bool) IS 'copy shard from remote node'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.0-17--6.0-18.sql000066400000000000000000000024101317107136600226310ustar00rootroot00000000000000/* citus--6.0-17--6.0-18.sql */ SET search_path = 'pg_catalog'; DROP FUNCTION IF EXISTS master_add_node(text, integer); CREATE FUNCTION master_add_node(nodename text, nodeport integer, OUT nodeid integer, OUT groupid integer, OUT nodename text, OUT nodeport integer, OUT noderack text, OUT hasmetadata boolean) RETURNS record LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_add_node$$; COMMENT ON FUNCTION master_add_node(nodename text, nodeport integer) IS 'add node to the cluster'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.0-18--6.1-1.sql000066400000000000000000000006311317107136600225460ustar00rootroot00000000000000/* citus--6.0-18--6.1-1.sql */ SET search_path = 'pg_catalog'; CREATE FUNCTION start_metadata_sync_to_node(nodename text, nodeport integer) RETURNS VOID LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$start_metadata_sync_to_node$$; COMMENT ON FUNCTION start_metadata_sync_to_node(nodename text, nodeport integer) IS 'sync metadata to node'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.0-2--6.0-3.sql000066400000000000000000000001711317107136600224570ustar00rootroot00000000000000/* citus--6.0-2--6.0-3.sql */ ALTER TABLE pg_catalog.pg_dist_partition ADD COLUMN repmodel "char" DEFAULT 'c' NOT NULL; citus-7.0.3/src/backend/distributed/citus--6.0-3--6.0-4.sql000066400000000000000000000036211317107136600224640ustar00rootroot00000000000000SET search_path = 'pg_catalog'; CREATE SEQUENCE citus.pg_dist_groupid_seq MINVALUE 1 MAXVALUE 4294967296; CREATE SEQUENCE citus.pg_dist_node_nodeid_seq MINVALUE 1 MAXVALUE 4294967296; ALTER SEQUENCE citus.pg_dist_groupid_seq SET SCHEMA pg_catalog; ALTER SEQUENCE citus.pg_dist_node_nodeid_seq SET SCHEMA pg_catalog; /* add pg_dist_node */ CREATE TABLE citus.pg_dist_node( nodeid int NOT NULL DEFAULT nextval('pg_dist_groupid_seq') PRIMARY KEY, groupid int NOT NULL DEFAULT nextval('pg_dist_node_nodeid_seq'), nodename text NOT NULL, nodeport int NOT NULL DEFAULT 5432, noderack text NOT NULL DEFAULT 'default', UNIQUE (nodename, nodeport) ); ALTER TABLE citus.pg_dist_node SET SCHEMA pg_catalog; CREATE FUNCTION master_dist_node_cache_invalidate() RETURNS trigger LANGUAGE C AS 'MODULE_PATHNAME', $$master_dist_node_cache_invalidate$$; COMMENT ON FUNCTION master_dist_node_cache_invalidate() IS 'invalidate internal cache of nodes when pg_dist_nodes changes'; CREATE TRIGGER dist_node_cache_invalidate AFTER INSERT OR UPDATE OR DELETE ON pg_catalog.pg_dist_node FOR EACH ROW EXECUTE PROCEDURE master_dist_node_cache_invalidate(); CREATE FUNCTION master_add_node(nodename text, nodeport integer) RETURNS record LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_add_node$$; COMMENT ON FUNCTION master_add_node(nodename text, nodeport integer) IS 'add node to the cluster'; CREATE FUNCTION master_remove_node(nodename text, nodeport integer) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_remove_node$$; COMMENT ON FUNCTION master_remove_node(nodename text, nodeport integer) IS 'remove node from the cluster'; /* this only needs to run once, now. */ CREATE FUNCTION master_initialize_node_metadata() RETURNS BOOL LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_initialize_node_metadata$$; SELECT master_initialize_node_metadata(); RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.0-4--6.0-5.sql000066400000000000000000000021731317107136600224670ustar00rootroot00000000000000/* * Replace oid column in pg_dist_shard_placement with a sequence column. */ CREATE SEQUENCE citus.pg_dist_shard_placement_placementid_seq NO CYCLE; ALTER SEQUENCE citus.pg_dist_shard_placement_placementid_seq SET SCHEMA pg_catalog; ALTER TABLE pg_catalog.pg_dist_shard_placement ADD COLUMN placementid bigint; -- keep existing oids, and update sequence to match max. UPDATE pg_catalog.pg_dist_shard_placement SET placementid = oid; ALTER TABLE pg_catalog.pg_dist_shard_placement ALTER COLUMN placementid SET DEFAULT nextval('pg_catalog.pg_dist_shard_placement_placementid_seq'), ALTER COLUMN placementid SET NOT NULL, SET WITHOUT OIDS; CREATE UNIQUE INDEX pg_dist_shard_placement_placementid_index ON pg_catalog.pg_dist_shard_placement using btree(placementid); SELECT setval('pg_catalog.pg_dist_shard_placement_placementid_seq', max(placementid)) FROM pg_catalog.pg_dist_shard_placement; CREATE FUNCTION master_get_new_placementid() RETURNS bigint LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_get_new_placementid$$; COMMENT ON FUNCTION master_get_new_placementid() IS 'fetch unique placementid'; citus-7.0.3/src/backend/distributed/citus--6.0-5--6.0-6.sql000066400000000000000000000011701317107136600224650ustar00rootroot00000000000000CREATE FUNCTION worker_drop_distributed_table(logicalrelid Oid) RETURNS VOID LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_drop_distributed_table$$; COMMENT ON FUNCTION worker_drop_distributed_table(logicalrelid Oid) IS 'drop the clustered table and its reference from metadata tables'; CREATE FUNCTION column_name_to_column(table_name regclass, column_name text) RETURNS text LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$column_name_to_column$$; COMMENT ON FUNCTION column_name_to_column(table_name regclass, column_name text) IS 'convert a column name to its textual Var representation'; citus-7.0.3/src/backend/distributed/citus--6.0-6--6.0-7.sql000066400000000000000000000014071317107136600224720ustar00rootroot00000000000000/* citus--6.0-6--6.0-7.sql */ CREATE FUNCTION pg_catalog.get_colocated_table_array(regclass) RETURNS regclass[] AS 'citus' LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION pg_catalog.master_move_shard_placement(shard_id bigint, source_node_name text, source_node_port integer, target_node_name text, target_node_port integer) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_move_shard_placement$$; COMMENT ON FUNCTION pg_catalog.master_move_shard_placement(shard_id bigint, source_node_name text, source_node_port integer, target_node_name text, target_node_port integer) IS 'move shard from remote node'; citus-7.0.3/src/backend/distributed/citus--6.0-7--6.0-8.sql000066400000000000000000000001301317107136600224640ustar00rootroot00000000000000/* * Drop shardalias from pg_dist_shard */ ALTER TABLE pg_dist_shard DROP shardalias; citus-7.0.3/src/backend/distributed/citus--6.0-8--6.0-9.sql000066400000000000000000000006611317107136600224770ustar00rootroot00000000000000/* citus--6.0-8--6.0-9.sql */ CREATE TABLE citus.pg_dist_local_group( groupid int NOT NULL PRIMARY KEY) ; /* insert the default value for being the coordinator node */ INSERT INTO citus.pg_dist_local_group VALUES (0); ALTER TABLE citus.pg_dist_local_group SET SCHEMA pg_catalog; GRANT SELECT ON pg_catalog.pg_dist_local_group TO public; ALTER TABLE pg_catalog.pg_dist_node ADD COLUMN hasmetadata bool NOT NULL DEFAULT false; citus-7.0.3/src/backend/distributed/citus--6.0-9--6.0-10.sql000066400000000000000000000013241317107136600225450ustar00rootroot00000000000000/* citus--6.0-9--6.0-10.sql */ CREATE TABLE citus.pg_dist_transaction ( groupid int NOT NULL, gid text NOT NULL ); CREATE INDEX pg_dist_transaction_group_index ON citus.pg_dist_transaction using btree(groupid); ALTER TABLE citus.pg_dist_transaction SET SCHEMA pg_catalog; ALTER TABLE pg_catalog.pg_dist_transaction ADD CONSTRAINT pg_dist_transaction_unique_constraint UNIQUE (groupid, gid); GRANT SELECT ON pg_catalog.pg_dist_transaction TO public; CREATE FUNCTION recover_prepared_transactions() RETURNS int LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$recover_prepared_transactions$$; COMMENT ON FUNCTION recover_prepared_transactions() IS 'recover prepared transactions started by this node'; citus-7.0.3/src/backend/distributed/citus--6.1-1--6.1-2.sql000066400000000000000000000005701317107136600224620ustar00rootroot00000000000000/* citus--6.1-1--6.1-2.sql */ SET search_path = 'pg_catalog'; CREATE FUNCTION worker_create_truncate_trigger(table_name regclass) RETURNS VOID LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_create_truncate_trigger$$; COMMENT ON FUNCTION worker_create_truncate_trigger(tablename regclass) IS 'create truncate trigger for distributed table'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.1-10--6.1-11.sql000066400000000000000000000040311317107136600226160ustar00rootroot00000000000000/* citus--6.1-10--6.1-11.sql */ SET search_path = 'pg_catalog'; DROP FUNCTION master_drop_sequences(text[], text, bigint); CREATE FUNCTION master_drop_sequences(sequence_names text[]) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_drop_sequences$$; COMMENT ON FUNCTION master_drop_sequences(text[]) IS 'drop specified sequences from the cluster'; CREATE OR REPLACE FUNCTION pg_catalog.citus_drop_trigger() RETURNS event_trigger LANGUAGE plpgsql SECURITY DEFINER SET search_path = pg_catalog AS $cdbdt$ DECLARE v_obj record; sequence_names text[] := '{}'; table_colocation_id integer; propagate_drop boolean := false; BEGIN -- collect set of dropped sequences to drop on workers later SELECT array_agg(object_identity) INTO sequence_names FROM pg_event_trigger_dropped_objects() WHERE object_type = 'sequence'; FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() JOIN pg_dist_partition ON (logicalrelid = objid) WHERE object_type IN ('table', 'foreign table') LOOP -- get colocation group SELECT colocationid INTO table_colocation_id FROM pg_dist_partition WHERE logicalrelid = v_obj.objid; -- ensure all shards are dropped PERFORM master_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name); PERFORM master_drop_distributed_table_metadata(v_obj.objid, v_obj.schema_name, v_obj.object_name); -- drop colocation group if all referencing tables are dropped IF NOT EXISTS(SELECT * FROM pg_dist_partition WHERE colocationId = table_colocation_id) THEN DELETE FROM pg_dist_colocation WHERE colocationId = table_colocation_id; END IF; END LOOP; IF cardinality(sequence_names) = 0 THEN RETURN; END IF; PERFORM master_drop_sequences(sequence_names); END; $cdbdt$; COMMENT ON FUNCTION citus_drop_trigger() IS 'perform checks and actions at the end of DROP actions'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.1-11--6.1-12.sql000066400000000000000000000006031317107136600226210ustar00rootroot00000000000000/* citus--6.1-11--6.1-12.sql */ SET search_path = 'pg_catalog'; CREATE FUNCTION upgrade_to_reference_table(table_name regclass) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$upgrade_to_reference_table$$; COMMENT ON FUNCTION upgrade_to_reference_table(table_name regclass) IS 'upgrades an existing broadcast table to a reference table'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.1-12--6.1-13.sql000066400000000000000000000005521317107136600226260ustar00rootroot00000000000000/* citus--6.1-12--6.1-13.sql */ SET search_path = 'pg_catalog'; CREATE FUNCTION master_disable_node(nodename text, nodeport integer) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_disable_node$$; COMMENT ON FUNCTION master_disable_node(nodename text, nodeport integer) IS 'removes node from the cluster temporarily'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.1-13--6.1-14.sql000066400000000000000000000226471317107136600226410ustar00rootroot00000000000000/* citus--6.1-13--6.1-14.sql */ CREATE OR REPLACE FUNCTION pg_catalog.master_run_on_worker(worker_name text[], port integer[], command text[], parallel boolean, OUT node_name text, OUT node_port integer, OUT success boolean, OUT result text ) RETURNS SETOF record LANGUAGE C STABLE STRICT AS 'MODULE_PATHNAME', $$master_run_on_worker$$; CREATE TYPE citus.colocation_placement_type AS ( shardid1 bigint, shardid2 bigint, nodename text, nodeport bigint ); -- -- distributed_tables_colocated returns true if given tables are co-located, false otherwise. -- The function checks shard definitions, matches shard placements for given tables. -- CREATE OR REPLACE FUNCTION pg_catalog.distributed_tables_colocated(table1 regclass, table2 regclass) RETURNS bool LANGUAGE plpgsql AS $function$ DECLARE colocated_shard_count int; table1_shard_count int; table2_shard_count int; table1_placement_count int; table2_placement_count int; table1_placements citus.colocation_placement_type[]; table2_placements citus.colocation_placement_type[]; BEGIN SELECT count(*), (SELECT count(*) FROM pg_dist_shard a WHERE a.logicalrelid = table1), (SELECT count(*) FROM pg_dist_shard b WHERE b.logicalrelid = table2) INTO colocated_shard_count, table1_shard_count, table2_shard_count FROM pg_dist_shard tba JOIN pg_dist_shard tbb USING(shardminvalue, shardmaxvalue) WHERE tba.logicalrelid = table1 AND tbb.logicalrelid = table2; IF (table1_shard_count != table2_shard_count OR table1_shard_count != colocated_shard_count) THEN RETURN false; END IF; WITH colocated_shards AS ( SELECT tba.shardid as shardid1, tbb.shardid as shardid2 FROM pg_dist_shard tba JOIN pg_dist_shard tbb USING(shardminvalue, shardmaxvalue) WHERE tba.logicalrelid = table1 AND tbb.logicalrelid = table2), left_shard_placements AS ( SELECT cs.shardid1, cs.shardid2, sp.nodename, sp.nodeport FROM colocated_shards cs JOIN pg_dist_shard_placement sp ON (cs.shardid1 = sp.shardid) WHERE sp.shardstate = 1) SELECT array_agg( (lsp.shardid1, lsp.shardid2, lsp.nodename, lsp.nodeport)::citus.colocation_placement_type ORDER BY shardid1, shardid2, nodename, nodeport), count(distinct lsp.shardid1) FROM left_shard_placements lsp INTO table1_placements, table1_placement_count; WITH colocated_shards AS ( SELECT tba.shardid as shardid1, tbb.shardid as shardid2 FROM pg_dist_shard tba JOIN pg_dist_shard tbb USING(shardminvalue, shardmaxvalue) WHERE tba.logicalrelid = table1 AND tbb.logicalrelid = table2), right_shard_placements AS ( SELECT cs.shardid1, cs.shardid2, sp.nodename, sp.nodeport FROM colocated_shards cs LEFT JOIN pg_dist_shard_placement sp ON(cs.shardid2 = sp.shardid) WHERE sp.shardstate = 1) SELECT array_agg( (rsp.shardid1, rsp.shardid2, rsp.nodename, rsp.nodeport)::citus.colocation_placement_type ORDER BY shardid1, shardid2, nodename, nodeport), count(distinct rsp.shardid2) FROM right_shard_placements rsp INTO table2_placements, table2_placement_count; IF (table1_shard_count != table1_placement_count OR table1_placement_count != table2_placement_count) THEN RETURN false; END IF; IF (array_length(table1_placements, 1) != array_length(table2_placements, 1)) THEN RETURN false; END IF; FOR i IN 1..array_length(table1_placements,1) LOOP IF (table1_placements[i].nodename != table2_placements[i].nodename OR table1_placements[i].nodeport != table2_placements[i].nodeport) THEN RETURN false; END IF; END LOOP; RETURN true; END; $function$; CREATE OR REPLACE FUNCTION pg_catalog.run_command_on_workers(command text, parallel bool default true, OUT nodename text, OUT nodeport int, OUT success bool, OUT result text) RETURNS SETOF record LANGUAGE plpgsql AS $function$ DECLARE workers text[]; ports int[]; commands text[]; BEGIN WITH citus_workers AS ( SELECT * FROM master_get_active_worker_nodes() ORDER BY node_name, node_port) SELECT array_agg(node_name), array_agg(node_port), array_agg(command) INTO workers, ports, commands FROM citus_workers; RETURN QUERY SELECT * FROM master_run_on_worker(workers, ports, commands, parallel); END; $function$; CREATE OR REPLACE FUNCTION pg_catalog.run_command_on_placements(table_name regclass, command text, parallel bool default true, OUT nodename text, OUT nodeport int, OUT shardid bigint, OUT success bool, OUT result text) RETURNS SETOF record LANGUAGE plpgsql AS $function$ DECLARE workers text[]; ports int[]; shards bigint[]; commands text[]; BEGIN WITH citus_placements AS ( SELECT ds.logicalrelid::regclass AS tablename, ds.shardid AS shardid, shard_name(ds.logicalrelid, ds.shardid) AS shardname, dsp.nodename AS nodename, dsp.nodeport::int AS nodeport FROM pg_dist_shard ds JOIN pg_dist_shard_placement dsp USING (shardid) WHERE dsp.shardstate = 1 and ds.logicalrelid::regclass = table_name ORDER BY ds.logicalrelid, ds.shardid, dsp.nodename, dsp.nodeport) SELECT array_agg(cp.nodename), array_agg(cp.nodeport), array_agg(cp.shardid), array_agg(format(command, cp.shardname)) INTO workers, ports, shards, commands FROM citus_placements cp; RETURN QUERY SELECT r.node_name, r.node_port, shards[ordinality], r.success, r.result FROM master_run_on_worker(workers, ports, commands, parallel) WITH ORDINALITY r; END; $function$; CREATE OR REPLACE FUNCTION pg_catalog.run_command_on_colocated_placements( table_name1 regclass, table_name2 regclass, command text, parallel bool default true, OUT nodename text, OUT nodeport int, OUT shardid1 bigint, OUT shardid2 bigint, OUT success bool, OUT result text) RETURNS SETOF record LANGUAGE plpgsql AS $function$ DECLARE workers text[]; ports int[]; shards1 bigint[]; shards2 bigint[]; commands text[]; BEGIN IF NOT (SELECT distributed_tables_colocated(table_name1, table_name2)) THEN RAISE EXCEPTION 'tables % and % are not co-located', table_name1, table_name2; END IF; WITH active_shard_placements AS ( SELECT ds.logicalrelid, ds.shardid AS shardid, shard_name(ds.logicalrelid, ds.shardid) AS shardname, ds.shardminvalue AS shardminvalue, ds.shardmaxvalue AS shardmaxvalue, dsp.nodename AS nodename, dsp.nodeport::int AS nodeport FROM pg_dist_shard ds JOIN pg_dist_shard_placement dsp USING (shardid) WHERE dsp.shardstate = 1 and (ds.logicalrelid::regclass = table_name1 or ds.logicalrelid::regclass = table_name2) ORDER BY ds.logicalrelid, ds.shardid, dsp.nodename, dsp.nodeport), citus_colocated_placements AS ( SELECT a.logicalrelid::regclass AS tablename1, a.shardid AS shardid1, shard_name(a.logicalrelid, a.shardid) AS shardname1, b.logicalrelid::regclass AS tablename2, b.shardid AS shardid2, shard_name(b.logicalrelid, b.shardid) AS shardname2, a.nodename AS nodename, a.nodeport::int AS nodeport FROM active_shard_placements a, active_shard_placements b WHERE a.shardminvalue = b.shardminvalue AND a.shardmaxvalue = b.shardmaxvalue AND a.logicalrelid != b.logicalrelid AND a.nodename = b.nodename AND a.nodeport = b.nodeport AND a.logicalrelid::regclass = table_name1 AND b.logicalrelid::regclass = table_name2 ORDER BY a.logicalrelid, a.shardid, nodename, nodeport) SELECT array_agg(cp.nodename), array_agg(cp.nodeport), array_agg(cp.shardid1), array_agg(cp.shardid2), array_agg(format(command, cp.shardname1, cp.shardname2)) INTO workers, ports, shards1, shards2, commands FROM citus_colocated_placements cp; RETURN QUERY SELECT r.node_name, r.node_port, shards1[ordinality], shards2[ordinality], r.success, r.result FROM master_run_on_worker(workers, ports, commands, parallel) WITH ORDINALITY r; END; $function$; CREATE OR REPLACE FUNCTION pg_catalog.run_command_on_shards(table_name regclass, command text, parallel bool default true, OUT shardid bigint, OUT success bool, OUT result text) RETURNS SETOF record LANGUAGE plpgsql AS $function$ DECLARE workers text[]; ports int[]; shards bigint[]; commands text[]; shard_count int; BEGIN SELECT COUNT(*) INTO shard_count FROM pg_dist_shard WHERE logicalrelid = table_name; WITH citus_shards AS ( SELECT ds.logicalrelid::regclass AS tablename, ds.shardid AS shardid, shard_name(ds.logicalrelid, ds.shardid) AS shardname, array_agg(dsp.nodename) AS nodenames, array_agg(dsp.nodeport) AS nodeports FROM pg_dist_shard ds LEFT JOIN pg_dist_shard_placement dsp USING (shardid) WHERE dsp.shardstate = 1 and ds.logicalrelid::regclass = table_name GROUP BY ds.logicalrelid, ds.shardid ORDER BY ds.logicalrelid, ds.shardid) SELECT array_agg(cs.nodenames[1]), array_agg(cs.nodeports[1]), array_agg(cs.shardid), array_agg(format(command, cs.shardname)) INTO workers, ports, shards, commands FROM citus_shards cs; IF (shard_count != array_length(workers, 1)) THEN RAISE NOTICE 'some shards do not have active placements'; END IF; RETURN QUERY SELECT shards[ordinality], r.success, r.result FROM master_run_on_worker(workers, ports, commands, parallel) WITH ORDINALITY r; END; $function$; citus-7.0.3/src/backend/distributed/citus--6.1-14--6.1-15.sql000066400000000000000000000010671317107136600226340ustar00rootroot00000000000000/* citus--6.1-14--6.1-15.sql */ SET search_path = 'pg_catalog'; CREATE FUNCTION master_dist_local_group_cache_invalidate() RETURNS trigger LANGUAGE C AS 'MODULE_PATHNAME', $$master_dist_local_group_cache_invalidate$$; COMMENT ON FUNCTION master_dist_local_group_cache_invalidate() IS 'register node cache invalidation for changed rows'; CREATE TRIGGER dist_local_group_cache_invalidate AFTER UPDATE ON pg_catalog.pg_dist_local_group FOR EACH ROW EXECUTE PROCEDURE master_dist_local_group_cache_invalidate(); RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.1-15--6.1-16.sql000066400000000000000000000005541317107136600226360ustar00rootroot00000000000000/* citus--6.1-15--6.1-16.sql */ SET search_path = 'pg_catalog'; CREATE FUNCTION worker_apply_sequence_command(text) RETURNS VOID LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_apply_sequence_command$$; COMMENT ON FUNCTION worker_apply_sequence_command(text) IS 'create a sequence which products globally unique values'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.1-16--6.1-17.sql000066400000000000000000000012641317107136600226370ustar00rootroot00000000000000/* citus--6.1-16--6.1-17.sql */ SET search_path = 'pg_catalog'; CREATE FUNCTION isolate_tenant_to_new_shard(table_name regclass, tenant_id "any", cascade_option text DEFAULT '') RETURNS bigint LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$isolate_tenant_to_new_shard$$; COMMENT ON FUNCTION isolate_tenant_to_new_shard(table_name regclass, tenant_id "any", cascade_option text) IS 'isolate a tenant to its own shard and return the new shard id'; CREATE FUNCTION worker_hash(value "any") RETURNS integer LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_hash$$; COMMENT ON FUNCTION worker_hash(value "any") IS 'calculate hashed value and return it'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.1-17--6.2-1.sql000066400000000000000000000005101317107136600225430ustar00rootroot00000000000000/* citus--6.1-17--6.2-1.sql */ SET search_path = 'pg_catalog'; DROP FUNCTION IF EXISTS master_get_local_first_candidate_nodes(); DROP FUNCTION IF EXISTS master_get_round_robin_candidate_nodes(); DROP FUNCTION IF EXISTS master_stage_shard_row(); DROP FUNCTION IF EXISTS master_stage_shard_placement_row(); RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.1-2--6.1-3.sql000066400000000000000000000006311317107136600224620ustar00rootroot00000000000000/* citus--6.1-2--6.1-3.sql */ SET search_path = 'pg_catalog'; CREATE FUNCTION stop_metadata_sync_to_node(nodename text, nodeport integer) RETURNS VOID LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$stop_metadata_sync_to_node$$; COMMENT ON FUNCTION stop_metadata_sync_to_node(nodename text, nodeport integer) IS 'stop metadata sync to node'; RESET search_path;citus-7.0.3/src/backend/distributed/citus--6.1-3--6.1-4.sql000066400000000000000000000006301317107136600224630ustar00rootroot00000000000000/* citus--6.1-3--6.1-4.sql */ SET search_path = 'pg_catalog'; CREATE FUNCTION column_to_column_name(table_name regclass, column_var_text text) RETURNS text LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$column_to_column_name$$; COMMENT ON FUNCTION column_to_column_name(table_name regclass, column_var_text text) IS 'convert the textual Var representation to a column name'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.1-4--6.1-5.sql000066400000000000000000000013031317107136600224630ustar00rootroot00000000000000/* citus--6.1-4--6.1-5.sql */ SET search_path = 'pg_catalog'; DROP FUNCTION create_distributed_table(regclass, text, citus.distribution_type); CREATE FUNCTION create_distributed_table(table_name regclass, distribution_column text, distribution_type citus.distribution_type DEFAULT 'hash', colocate_with text DEFAULT 'default') RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$create_distributed_table$$; COMMENT ON FUNCTION create_distributed_table(table_name regclass, distribution_column text, distribution_type citus.distribution_type, colocate_with text) IS 'creates a distributed table'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.1-5--6.1-6.sql000066400000000000000000000004271317107136600224730ustar00rootroot00000000000000/* citus--6.1-5--6.1-6.sql */ SET search_path = 'pg_catalog'; -- we don't need this constraint any more since reference tables -- wouldn't have partition columns, which we represent as NULL ALTER TABLE pg_dist_partition ALTER COLUMN partkey DROP NOT NULL; RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.1-6--6.1-7.sql000066400000000000000000000007321317107136600224740ustar00rootroot00000000000000/* citus--6.1-6--6.1-7.sql */ SET search_path = 'pg_catalog'; CREATE FUNCTION get_shard_id_for_distribution_column(table_name regclass, distribution_value "any" DEFAULT NULL) RETURNS bigint LANGUAGE C AS 'MODULE_PATHNAME', $$get_shard_id_for_distribution_column$$; COMMENT ON FUNCTION get_shard_id_for_distribution_column(table_name regclass, distribution_value "any") IS 'return shard id which belongs to given table and contains given value'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.1-7--6.1-8.sql000066400000000000000000000012661317107136600225010ustar00rootroot00000000000000/* citus--6.1-4--6.1-5.sql */ SET search_path = 'pg_catalog'; CREATE FUNCTION lock_shard_resources(lock_mode int, shard_id bigint[]) RETURNS VOID LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$lock_shard_resources$$; COMMENT ON FUNCTION lock_shard_resources(lock_mode int, shard_id bigint[]) IS 'lock shard resource to serialise non-commutative writes'; CREATE FUNCTION lock_shard_metadata(lock_mode int, shard_id bigint[]) RETURNS VOID LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$lock_shard_metadata$$; COMMENT ON FUNCTION lock_shard_metadata(lock_mode int, shard_id bigint[]) IS 'lock shard metadata to prevent writes during metadata changes'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.1-8--6.1-9.sql000066400000000000000000000064651317107136600225110ustar00rootroot00000000000000/* citus--6.1-8--6.1-9.sql */ SET search_path = 'pg_catalog'; CREATE FUNCTION master_drop_distributed_table_metadata(logicalrelid regclass, schema_name text, table_name text) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_drop_distributed_table_metadata$$; COMMENT ON FUNCTION master_drop_distributed_table_metadata(logicalrelid regclass, schema_name text, table_name text) IS 'delete metadata of the distributed table'; CREATE OR REPLACE FUNCTION pg_catalog.citus_drop_trigger() RETURNS event_trigger LANGUAGE plpgsql SECURITY DEFINER SET search_path = pg_catalog AS $cdbdt$ DECLARE v_obj record; sequence_names text[] := '{}'; node_names text[] := '{}'; node_ports bigint[] := '{}'; node_name text; node_port bigint; table_colocation_id integer; BEGIN -- collect set of dropped sequences to drop on workers later SELECT array_agg(object_identity) INTO sequence_names FROM pg_event_trigger_dropped_objects() WHERE object_type = 'sequence'; -- Must accumulate set of affected nodes before deleting placements, as -- master_drop_all_shards will erase their rows, making it impossible for -- us to know where to drop sequences (which must be dropped after shards, -- since they have default value expressions which depend on sequences). SELECT array_agg(sp.nodename), array_agg(sp.nodeport) INTO node_names, node_ports FROM pg_event_trigger_dropped_objects() AS dobj, pg_dist_shard AS s, pg_dist_shard_placement AS sp WHERE dobj.object_type IN ('table', 'foreign table') AND dobj.objid = s.logicalrelid AND s.shardid = sp.shardid; FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() LOOP IF v_obj.object_type NOT IN ('table', 'foreign table') THEN CONTINUE; END IF; -- nothing to do if not a distributed table IF NOT EXISTS(SELECT * FROM pg_dist_partition WHERE logicalrelid = v_obj.objid) THEN CONTINUE; END IF; -- get colocation group SELECT colocationid INTO table_colocation_id FROM pg_dist_partition WHERE logicalrelid = v_obj.objid; -- ensure all shards are dropped PERFORM master_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name); PERFORM master_drop_distributed_table_metadata(v_obj.objid, v_obj.schema_name, v_obj.object_name); -- drop colocation group if all referencing tables are dropped IF NOT EXISTS(SELECT * FROM pg_dist_partition WHERE colocationId = table_colocation_id) THEN DELETE FROM pg_dist_colocation WHERE colocationId = table_colocation_id; END IF; END LOOP; IF cardinality(sequence_names) = 0 THEN RETURN; END IF; FOR node_name, node_port IN SELECT DISTINCT name, port FROM unnest(node_names, node_ports) AS nodes(name, port) LOOP PERFORM master_drop_sequences(sequence_names, node_name, node_port); END LOOP; END; $cdbdt$; COMMENT ON FUNCTION citus_drop_trigger() IS 'perform checks and actions at the end of DROP actions'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.1-9--6.1-10.sql000066400000000000000000000010031317107136600225410ustar00rootroot00000000000000/* citus--6.1-9--6.1-10.sql */ GRANT SELECT ON pg_catalog.pg_dist_node TO public; GRANT SELECT ON pg_catalog.pg_dist_colocation TO public; GRANT SELECT ON pg_catalog.pg_dist_colocationid_seq TO public; GRANT SELECT ON pg_catalog.pg_dist_groupid_seq TO public; GRANT SELECT ON pg_catalog.pg_dist_node_nodeid_seq TO public; GRANT SELECT ON pg_catalog.pg_dist_shard_placement_placementid_seq TO public; GRANT SELECT ON pg_catalog.pg_dist_shardid_seq TO public; GRANT SELECT ON pg_catalog.pg_dist_jobid_seq TO public; citus-7.0.3/src/backend/distributed/citus--6.2-1--6.2-2.sql000066400000000000000000000016431317107136600224660ustar00rootroot00000000000000/* citus--6.2-1--6.2-2.sql */ SET search_path = 'pg_catalog'; CREATE FUNCTION citus_table_size(logicalrelid regclass) RETURNS bigint LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$citus_table_size$$; COMMENT ON FUNCTION citus_table_size(logicalrelid regclass) IS 'get disk space used by the specified table, excluding indexes'; CREATE FUNCTION citus_relation_size(logicalrelid regclass) RETURNS bigint LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$citus_relation_size$$; COMMENT ON FUNCTION citus_relation_size(logicalrelid regclass) IS 'get disk space used by the ''main'' fork'; CREATE FUNCTION citus_total_relation_size(logicalrelid regclass) RETURNS bigint LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$citus_total_relation_size$$; COMMENT ON FUNCTION citus_total_relation_size(logicalrelid regclass) IS 'get total disk space used by the specified table'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.2-2--6.2-3.sql000066400000000000000000000045051317107136600224700ustar00rootroot00000000000000/* citus--6.2-2--6.2-3.sql */ SET search_path = 'pg_catalog'; ALTER TABLE pg_dist_node ADD isactive bool NOT NULL DEFAULT true; DROP FUNCTION IF EXISTS master_add_node(text, integer); CREATE FUNCTION master_add_node(nodename text, nodeport integer, OUT nodeid integer, OUT groupid integer, OUT nodename text, OUT nodeport integer, OUT noderack text, OUT hasmetadata boolean, OUT isactive bool) RETURNS record LANGUAGE C STRICT AS 'MODULE_PATHNAME',$$master_add_node$$; COMMENT ON FUNCTION master_add_node(nodename text, nodeport integer) IS 'add node to the cluster'; CREATE FUNCTION master_add_inactive_node(nodename text, nodeport integer, OUT nodeid integer, OUT groupid integer, OUT nodename text, OUT nodeport integer, OUT noderack text, OUT hasmetadata boolean, OUT isactive bool) RETURNS record LANGUAGE C STRICT AS 'MODULE_PATHNAME',$$master_add_inactive_node$$; COMMENT ON FUNCTION master_add_inactive_node(nodename text,nodeport integer) IS 'prepare node by adding it to pg_dist_node'; CREATE FUNCTION master_activate_node(nodename text, nodeport integer, OUT nodeid integer, OUT groupid integer, OUT nodename text, OUT nodeport integer, OUT noderack text, OUT hasmetadata boolean, OUT isactive bool) RETURNS record LANGUAGE C STRICT AS 'MODULE_PATHNAME',$$master_activate_node$$; COMMENT ON FUNCTION master_activate_node(nodename text, nodeport integer) IS 'activate a node which is in the cluster'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--6.2-3--6.2-4.sql000066400000000000000000000004761317107136600224750ustar00rootroot00000000000000/* citus--6.2-3--6.2-4.sql */ CREATE OR REPLACE FUNCTION pg_catalog.citus_truncate_trigger() RETURNS trigger LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$citus_truncate_trigger$$; COMMENT ON FUNCTION pg_catalog.citus_truncate_trigger() IS 'trigger function called when truncating the distributed table'; citus-7.0.3/src/backend/distributed/citus--6.2-4--7.0-1.sql000066400000000000000000000001311317107136600224560ustar00rootroot00000000000000/* citus--6.2-4--7.0-1.sql */ /* empty, but required to update the extension version */ citus-7.0.3/src/backend/distributed/citus--7.0-1--7.0-2.sql000066400000000000000000000006211317107136600224570ustar00rootroot00000000000000/* citus--7.0-1--7.0-2.sql */ /* redefine shard_name as STRICT */ CREATE OR REPLACE FUNCTION pg_catalog.shard_name(object_name regclass, shard_id bigint) RETURNS text LANGUAGE C STABLE STRICT AS 'MODULE_PATHNAME', $$shard_name$$; COMMENT ON FUNCTION pg_catalog.shard_name(object_name regclass, shard_id bigint) IS 'returns schema-qualified, shard-extended identifier of object name'; citus-7.0.3/src/backend/distributed/citus--7.0-10--7.0-11.sql000066400000000000000000000015101317107136600226150ustar00rootroot00000000000000/* citus-7.0-10--7.0-11 */ SET search_path = 'pg_catalog'; CREATE OR REPLACE FUNCTION master_update_table_statistics(relation regclass) RETURNS VOID AS $$ DECLARE colocated_tables regclass[]; BEGIN SELECT get_colocated_table_array(relation) INTO colocated_tables; PERFORM master_update_shard_statistics(shardid) FROM pg_dist_shard WHERE logicalrelid = ANY (colocated_tables); END; $$ LANGUAGE 'plpgsql'; COMMENT ON FUNCTION master_update_table_statistics(regclass) IS 'updates shard statistics of the given table and its colocated tables'; CREATE OR REPLACE FUNCTION get_colocated_shard_array(bigint) RETURNS BIGINT[] LANGUAGE C STRICT AS 'citus', $$get_colocated_shard_array$$; COMMENT ON FUNCTION get_colocated_shard_array(bigint) IS 'returns the array of colocated shards of the given shard'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--7.0-11--7.0-12.sql000066400000000000000000000005141317107136600226220ustar00rootroot00000000000000/* citus--7.0-11--7.0-12.sql */ CREATE OR REPLACE FUNCTION pg_catalog.citus_create_restore_point(text) RETURNS pg_lsn LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$citus_create_restore_point$$; COMMENT ON FUNCTION pg_catalog.citus_create_restore_point(text) IS 'temporarily block writes and create a named restore point on all nodes'; citus-7.0.3/src/backend/distributed/citus--7.0-12--7.0-13.sql000066400000000000000000000027141317107136600226300ustar00rootroot00000000000000/* citus--7.0-12--7.0-13.sql */ SET search_path = 'pg_catalog'; CREATE OR REPLACE FUNCTION pg_catalog.citus_drop_trigger() RETURNS event_trigger LANGUAGE plpgsql SECURITY DEFINER SET search_path = pg_catalog AS $cdbdt$ DECLARE v_obj record; sequence_names text[] := '{}'; table_colocation_id integer; propagate_drop boolean := false; BEGIN -- collect set of dropped sequences to drop on workers later SELECT array_agg(object_identity) INTO sequence_names FROM pg_event_trigger_dropped_objects() WHERE object_type = 'sequence'; FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() JOIN pg_dist_partition ON (logicalrelid = objid) WHERE object_type IN ('table', 'foreign table') LOOP -- get colocation group SELECT colocationid INTO table_colocation_id FROM pg_dist_partition WHERE logicalrelid = v_obj.objid; -- ensure all shards are dropped PERFORM master_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name); PERFORM master_drop_distributed_table_metadata(v_obj.objid, v_obj.schema_name, v_obj.object_name); END LOOP; IF cardinality(sequence_names) = 0 THEN RETURN; END IF; PERFORM master_drop_sequences(sequence_names); END; $cdbdt$; COMMENT ON FUNCTION citus_drop_trigger() IS 'perform checks and actions at the end of DROP actions'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--7.0-13--7.0-14.sql000066400000000000000000000006251317107136600226310ustar00rootroot00000000000000/* citus--7.0-13--7.0-14.sql */ SET search_path = 'pg_catalog'; CREATE OR REPLACE FUNCTION check_distributed_deadlocks() RETURNS BOOL LANGUAGE 'c' STRICT AS $$MODULE_PATHNAME$$, $$check_distributed_deadlocks$$; COMMENT ON FUNCTION check_distributed_deadlocks() IS 'does a distributed deadlock check, if a deadlock found cancels one of the participating backends and returns true '; RESET search_path; citus-7.0.3/src/backend/distributed/citus--7.0-14--7.0-15.sql000066400000000000000000000015031317107136600226270ustar00rootroot00000000000000/* citus--7.0-14--7.0-15 */ DROP FUNCTION pg_catalog.dump_local_wait_edges(int4); CREATE FUNCTION pg_catalog.dump_local_wait_edges( OUT waiting_pid int4, OUT waiting_node_id int4, OUT waiting_transaction_num int8, OUT waiting_transaction_stamp timestamptz, OUT blocking_pid int4, OUT blocking_node_id int4, OUT blocking_transaction_num int8, OUT blocking_transaction_stamp timestamptz, OUT blocking_transaction_waiting bool) RETURNS SETOF RECORD LANGUAGE C STRICT AS $$MODULE_PATHNAME$$, $$dump_local_wait_edges$$; COMMENT ON FUNCTION pg_catalog.dump_local_wait_edges() IS 'returns all local lock wait chains, that start from distributed transactions'; citus-7.0.3/src/backend/distributed/citus--7.0-2--7.0-3.sql000066400000000000000000000073161317107136600224710ustar00rootroot00000000000000/* citus--7.0-2--7.0-3.sql */ ALTER SEQUENCE pg_catalog.pg_dist_shard_placement_placementid_seq RENAME TO pg_dist_placement_placementid_seq; ALTER TABLE pg_catalog.pg_dist_shard_placement ALTER COLUMN placementid SET DEFAULT nextval('pg_catalog.pg_dist_placement_placementid_seq'); CREATE TABLE citus.pg_dist_placement ( placementid BIGINT NOT NULL default nextval('pg_dist_placement_placementid_seq'::regclass), shardid BIGINT NOT NULL, shardstate INT NOT NULL, shardlength BIGINT NOT NULL, groupid INT NOT NULL ); ALTER TABLE citus.pg_dist_placement SET SCHEMA pg_catalog; GRANT SELECT ON pg_catalog.pg_dist_placement TO public; CREATE INDEX pg_dist_placement_groupid_index ON pg_dist_placement USING btree(groupid); CREATE INDEX pg_dist_placement_shardid_index ON pg_dist_placement USING btree(shardid); CREATE UNIQUE INDEX pg_dist_placement_placementid_index ON pg_dist_placement USING btree(placementid); CREATE OR REPLACE FUNCTION citus.find_groupid_for_node(text, int) RETURNS int AS $$ DECLARE groupid int := (SELECT groupid FROM pg_dist_node WHERE nodename = $1 AND nodeport = $2); BEGIN IF groupid IS NULL THEN RAISE EXCEPTION 'There is no node at "%:%"', $1, $2; ELSE RETURN groupid; END IF; END; $$ LANGUAGE plpgsql; INSERT INTO pg_catalog.pg_dist_placement SELECT placementid, shardid, shardstate, shardlength, citus.find_groupid_for_node(placement.nodename, placement.nodeport::int) AS groupid FROM pg_dist_shard_placement placement; DROP TRIGGER dist_placement_cache_invalidate ON pg_catalog.pg_dist_shard_placement; CREATE TRIGGER dist_placement_cache_invalidate AFTER INSERT OR UPDATE OR DELETE ON pg_catalog.pg_dist_placement FOR EACH ROW EXECUTE PROCEDURE master_dist_placement_cache_invalidate(); -- this should be removed when noderole is added but for now it ensures the below view -- returns the correct results and that placements unambiguously belong to a view ALTER TABLE pg_catalog.pg_dist_node ADD CONSTRAINT pg_dist_node_groupid_unique UNIQUE (groupid); DROP TABLE pg_dist_shard_placement; CREATE VIEW citus.pg_dist_shard_placement AS SELECT shardid, shardstate, shardlength, nodename, nodeport, placementid -- assumes there's only one node per group FROM pg_dist_placement placement INNER JOIN pg_dist_node node ON ( placement.groupid = node.groupid ); ALTER VIEW citus.pg_dist_shard_placement SET SCHEMA pg_catalog; GRANT SELECT ON pg_catalog.pg_dist_shard_placement TO public; -- add some triggers which make it look like pg_dist_shard_placement is still a table ALTER VIEW pg_catalog.pg_dist_shard_placement ALTER placementid SET DEFAULT nextval('pg_dist_placement_placementid_seq'); CREATE OR REPLACE FUNCTION citus.pg_dist_shard_placement_trigger_func() RETURNS TRIGGER AS $$ BEGIN IF (TG_OP = 'DELETE') THEN DELETE FROM pg_dist_placement WHERE placementid = OLD.placementid; RETURN OLD; ELSIF (TG_OP = 'UPDATE') THEN UPDATE pg_dist_placement SET shardid = NEW.shardid, shardstate = NEW.shardstate, shardlength = NEW.shardlength, placementid = NEW.placementid, groupid = citus.find_groupid_for_node(NEW.nodename, NEW.nodeport) WHERE placementid = OLD.placementid; RETURN NEW; ELSIF (TG_OP = 'INSERT') THEN INSERT INTO pg_dist_placement (placementid, shardid, shardstate, shardlength, groupid) VALUES (NEW.placementid, NEW.shardid, NEW.shardstate, NEW.shardlength, citus.find_groupid_for_node(NEW.nodename, NEW.nodeport)); RETURN NEW; END IF; END; $$ LANGUAGE plpgsql; CREATE TRIGGER pg_dist_shard_placement_trigger INSTEAD OF INSERT OR UPDATE OR DELETE ON pg_dist_shard_placement FOR EACH ROW EXECUTE PROCEDURE citus.pg_dist_shard_placement_trigger_func(); citus-7.0.3/src/backend/distributed/citus--7.0-3--7.0-4.sql000066400000000000000000000032741317107136600224720ustar00rootroot00000000000000/* citus--7.0-3--7.0-4.sql */ SET search_path = 'pg_catalog'; CREATE FUNCTION assign_distributed_transaction_id(initiator_node_identifier int4, transaction_number int8, transaction_stamp timestamptz) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME',$$assign_distributed_transaction_id$$; COMMENT ON FUNCTION assign_distributed_transaction_id(initiator_node_identifier int4, transaction_number int8, transaction_stamp timestamptz) IS 'Only intended for internal use, users should not call this. The function sets the distributed transaction id'; CREATE OR REPLACE FUNCTION get_current_transaction_id(OUT database_id oid, OUT process_id int, OUT initiator_node_identifier int4, OUT transaction_number int8, OUT transaction_stamp timestamptz) RETURNS RECORD LANGUAGE C STRICT AS 'MODULE_PATHNAME',$$get_current_transaction_id$$; COMMENT ON FUNCTION get_current_transaction_id(OUT database_id oid, OUT process_id int, OUT initiator_node_identifier int4, OUT transaction_number int8, OUT transaction_stamp timestamptz) IS 'returns the current backend data including distributed transaction id'; CREATE OR REPLACE FUNCTION get_all_active_transactions(OUT database_id oid, OUT process_id int, OUT initiator_node_identifier int4, OUT transaction_number int8, OUT transaction_stamp timestamptz) RETURNS SETOF RECORD LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$get_all_active_transactions$$; COMMENT ON FUNCTION get_all_active_transactions(OUT database_id oid, OUT process_id int, OUT initiator_node_identifier int4, OUT transaction_number int8, OUT transaction_stamp timestamptz) IS 'returns distributed transaction ids of active distributed transactions'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--7.0-4--7.0-5.sql000066400000000000000000000110341317107136600224650ustar00rootroot00000000000000/* citus--7.0-4--7.0-5.sql */ SET search_path = 'pg_catalog'; CREATE TYPE pg_catalog.noderole AS ENUM ( 'primary', -- node is available and accepting writes 'secondary', -- node is available but only accepts reads 'unavailable' -- node is in recovery or otherwise not usable -- adding new values to a type inside of a transaction (such as during an ALTER EXTENSION -- citus UPDATE) isn't allowed in PG 9.6, and only allowed in PG10 if you don't use the -- new values inside of the same transaction. You might need to replace this type with a -- new one and then change the column type in pg_dist_node. There's a list of -- alternatives here: -- https://stackoverflow.com/questions/1771543/postgresql-updating-an-enum-type/41696273 ); ALTER TABLE pg_dist_node ADD COLUMN noderole noderole NOT NULL DEFAULT 'primary'; -- we're now allowed to have more than one node per group ALTER TABLE pg_catalog.pg_dist_node DROP CONSTRAINT pg_dist_node_groupid_unique; -- so make sure pg_dist_shard_placement only returns writable placements CREATE OR REPLACE VIEW pg_catalog.pg_dist_shard_placement AS SELECT shardid, shardstate, shardlength, nodename, nodeport, placementid FROM pg_dist_placement placement INNER JOIN pg_dist_node node ON ( placement.groupid = node.groupid AND node.noderole = 'primary' ); CREATE OR REPLACE FUNCTION citus.pg_dist_node_trigger_func() RETURNS TRIGGER AS $$ BEGIN /* AddNodeMetadata also takes out a ShareRowExclusiveLock */ LOCK TABLE pg_dist_node IN SHARE ROW EXCLUSIVE MODE; IF (TG_OP = 'INSERT') THEN IF NEW.noderole = 'primary' AND EXISTS (SELECT 1 FROM pg_dist_node WHERE groupid = NEW.groupid AND noderole = 'primary' AND nodeid <> NEW.nodeid) THEN RAISE EXCEPTION 'there cannot be two primary nodes in a group'; END IF; RETURN NEW; ELSIF (TG_OP = 'UPDATE') THEN IF NEW.noderole = 'primary' AND EXISTS (SELECT 1 FROM pg_dist_node WHERE groupid = NEW.groupid AND noderole = 'primary' AND nodeid <> NEW.nodeid) THEN RAISE EXCEPTION 'there cannot be two primary nodes in a group'; END IF; RETURN NEW; END IF; END; $$ LANGUAGE plpgsql; CREATE TRIGGER pg_dist_node_trigger BEFORE INSERT OR UPDATE ON pg_dist_node FOR EACH ROW EXECUTE PROCEDURE citus.pg_dist_node_trigger_func(); DROP FUNCTION master_add_node(text, integer); CREATE FUNCTION master_add_node(nodename text, nodeport integer, groupid integer default 0, noderole noderole default 'primary', OUT nodeid integer, OUT groupid integer, OUT nodename text, OUT nodeport integer, OUT noderack text, OUT hasmetadata boolean, OUT isactive bool, OUT noderole noderole) RETURNS record LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_add_node$$; COMMENT ON FUNCTION master_add_node(nodename text, nodeport integer, groupid integer, noderole noderole) IS 'add node to the cluster'; DROP FUNCTION master_add_inactive_node(text, integer); CREATE FUNCTION master_add_inactive_node(nodename text, nodeport integer, groupid integer default 0, noderole noderole default 'primary', OUT nodeid integer, OUT groupid integer, OUT nodename text, OUT nodeport integer, OUT noderack text, OUT hasmetadata boolean, OUT isactive bool, OUT noderole noderole) RETURNS record LANGUAGE C STRICT AS 'MODULE_PATHNAME',$$master_add_inactive_node$$; COMMENT ON FUNCTION master_add_inactive_node(nodename text,nodeport integer, groupid integer, noderole noderole) IS 'prepare node by adding it to pg_dist_node'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--7.0-5--7.0-6.sql000066400000000000000000000030541317107136600224720ustar00rootroot00000000000000/* citus--7.0-5--7.0-6 */ CREATE FUNCTION pg_catalog.dump_local_wait_edges( IN source_node_id int4, OUT waiting_pid int4, OUT waiting_node_id int4, OUT waiting_transaction_num int8, OUT waiting_transaction_stamp timestamptz, OUT blocking_pid int4, OUT blocking_node_id int4, OUT blocking_transaction_num int8, OUT blocking_transaction_stamp timestamptz, OUT blocking_transaction_waiting bool) RETURNS SETOF RECORD LANGUAGE 'c' STRICT AS $$MODULE_PATHNAME$$, $$dump_local_wait_edges$$; COMMENT ON FUNCTION pg_catalog.dump_local_wait_edges(int) IS 'returns a local list of blocked transactions originating from source_node_id'; CREATE FUNCTION pg_catalog.dump_global_wait_edges( OUT waiting_pid int4, OUT waiting_node_id int4, OUT waiting_transaction_num int8, OUT waiting_transaction_stamp timestamptz, OUT blocking_pid int4, OUT blocking_node_id int4, OUT blocking_transaction_num int8, OUT blocking_transaction_stamp timestamptz, OUT blocking_transaction_waiting bool) RETURNS SETOF RECORD LANGUAGE 'c' STRICT AS $$MODULE_PATHNAME$$, $$dump_global_wait_edges$$; COMMENT ON FUNCTION pg_catalog.dump_global_wait_edges() IS 'returns a global list of blocked transactions originating from this node'; citus-7.0.3/src/backend/distributed/citus--7.0-6--7.0-7.sql000066400000000000000000000074401317107136600224770ustar00rootroot00000000000000/* citus--7.0-6--7.0-7 */ CREATE FUNCTION citus.replace_isolation_tester_func() RETURNS void AS $$ DECLARE version integer := current_setting('server_version_num'); BEGIN IF version >= 100000 THEN ALTER FUNCTION pg_catalog.pg_isolation_test_session_is_blocked(integer, integer[]) RENAME TO old_pg_isolation_test_session_is_blocked; ALTER FUNCTION pg_catalog.citus_isolation_test_session_is_blocked(integer, integer[]) RENAME TO pg_isolation_test_session_is_blocked; ELSE ALTER FUNCTION pg_catalog.pg_blocking_pids(integer) RENAME TO old_pg_blocking_pids; ALTER FUNCTION pg_catalog.citus_blocking_pids(integer) RENAME TO pg_blocking_pids; END IF; END; $$ LANGUAGE plpgsql; CREATE FUNCTION citus.restore_isolation_tester_func() RETURNS void AS $$ DECLARE version integer := current_setting('server_version_num'); BEGIN IF version >= 100000 THEN ALTER FUNCTION pg_catalog.pg_isolation_test_session_is_blocked(integer, integer[]) RENAME TO citus_isolation_test_session_is_blocked; ALTER FUNCTION pg_catalog.old_pg_isolation_test_session_is_blocked(integer, integer[]) RENAME TO pg_isolation_test_session_is_blocked; ELSE ALTER FUNCTION pg_catalog.pg_blocking_pids(integer) RENAME TO citus_blocking_pids; ALTER FUNCTION pg_catalog.old_pg_blocking_pids(integer) RENAME TO pg_blocking_pids; END IF; END; $$ LANGUAGE plpgsql; CREATE FUNCTION citus.refresh_isolation_tester_prepared_statement() RETURNS void AS $$ BEGIN -- isolation creates a prepared statement using the old function before tests have a -- chance to call replace_isolation_tester_func. By calling that prepared statement -- with a different search_path we force a re-parse which picks up the new function SET search_path TO 'citus'; EXECUTE 'EXECUTE isolationtester_waiting (0)'; RESET search_path; END; $$ LANGUAGE plpgsql; CREATE FUNCTION pg_catalog.citus_blocking_pids(pBlockedPid integer) RETURNS int4[] AS $$ DECLARE mLocalBlockingPids int4[]; mRemoteBlockingPids int4[]; mLocalTransactionNum int8; BEGIN SELECT pg_catalog.old_pg_blocking_pids(pBlockedPid) INTO mLocalBlockingPids; IF (array_length(mLocalBlockingPids, 1) > 0) THEN RETURN mLocalBlockingPids; END IF; -- pg says we're not blocked locally; check whether we're blocked globally. SELECT transaction_number INTO mLocalTransactionNum FROM get_all_active_transactions() WHERE process_id = pBlockedPid; SELECT array_agg(process_id) INTO mRemoteBlockingPids FROM ( WITH activeTransactions AS ( SELECT process_id, transaction_number FROM get_all_active_transactions() ), blockingTransactions AS ( SELECT blocking_transaction_num AS txn_num FROM dump_global_wait_edges() WHERE waiting_transaction_num = mLocalTransactionNum ) SELECT activeTransactions.process_id FROM activeTransactions, blockingTransactions WHERE activeTransactions.transaction_number = blockingTransactions.txn_num ) AS sub; RETURN mRemoteBlockingPids; END; $$ LANGUAGE plpgsql; CREATE FUNCTION pg_catalog.citus_isolation_test_session_is_blocked(pBlockedPid integer, pInterestingPids integer[]) RETURNS boolean AS $$ DECLARE mBlockedTransactionNum int8; BEGIN IF pg_catalog.old_pg_isolation_test_session_is_blocked(pBlockedPid, pInterestingPids) THEN RETURN true; END IF; -- pg says we're not blocked locally; check whether we're blocked globally. SELECT transaction_number INTO mBlockedTransactionNum FROM get_all_active_transactions() WHERE process_id = pBlockedPid; RETURN EXISTS ( SELECT 1 FROM dump_global_wait_edges() WHERE waiting_transaction_num = mBlockedTransactionNum ); END; $$ LANGUAGE plpgsql; citus-7.0.3/src/backend/distributed/citus--7.0-7--7.0-8.sql000066400000000000000000000016321317107136600224760ustar00rootroot00000000000000/* citus--7.0-7--7.0-8.sql */ SET search_path = 'pg_catalog'; DROP FUNCTION master_activate_node(text, integer); CREATE FUNCTION master_activate_node(nodename text, nodeport integer, OUT nodeid integer, OUT groupid integer, OUT nodename text, OUT nodeport integer, OUT noderack text, OUT hasmetadata boolean, OUT isactive bool, OUT noderole noderole) RETURNS record LANGUAGE C STRICT AS 'MODULE_PATHNAME',$$master_activate_node$$; COMMENT ON FUNCTION master_activate_node(nodename text, nodeport integer) IS 'activate a node which is in the cluster'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--7.0-8--7.0-9.sql000066400000000000000000000071141317107136600225010ustar00rootroot00000000000000/* citus-7.0-8--7.0-9 */ SET search_path = 'pg_catalog'; ALTER TABLE pg_dist_node ADD COLUMN nodecluster name NOT NULL DEFAULT 'default'; ALTER TABLE pg_dist_node ADD CONSTRAINT primaries_are_only_allowed_in_the_default_cluster CHECK (NOT (nodecluster <> 'default' AND noderole = 'primary')); DROP FUNCTION master_add_node(text, integer, integer, noderole); CREATE FUNCTION master_add_node(nodename text, nodeport integer, groupid integer default 0, noderole noderole default 'primary', nodecluster name default 'default', OUT nodeid integer, OUT groupid integer, OUT nodename text, OUT nodeport integer, OUT noderack text, OUT hasmetadata boolean, OUT isactive bool, OUT noderole noderole, OUT nodecluster name) RETURNS record LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_add_node$$; COMMENT ON FUNCTION master_add_node(nodename text, nodeport integer, groupid integer, noderole noderole, nodecluster name) IS 'add node to the cluster'; DROP FUNCTION master_add_inactive_node(text, integer, integer, noderole); CREATE FUNCTION master_add_inactive_node(nodename text, nodeport integer, groupid integer default 0, noderole noderole default 'primary', nodecluster name default 'default', OUT nodeid integer, OUT groupid integer, OUT nodename text, OUT nodeport integer, OUT noderack text, OUT hasmetadata boolean, OUT isactive bool, OUT noderole noderole, OUT nodecluster name) RETURNS record LANGUAGE C STRICT AS 'MODULE_PATHNAME',$$master_add_inactive_node$$; COMMENT ON FUNCTION master_add_inactive_node(nodename text,nodeport integer, groupid integer, noderole noderole, nodecluster name) IS 'prepare node by adding it to pg_dist_node'; DROP FUNCTION master_activate_node(text, integer); CREATE FUNCTION master_activate_node(nodename text, nodeport integer, OUT nodeid integer, OUT groupid integer, OUT nodename text, OUT nodeport integer, OUT noderack text, OUT hasmetadata boolean, OUT isactive bool, OUT noderole noderole, OUT nodecluster name) RETURNS record LANGUAGE C STRICT AS 'MODULE_PATHNAME',$$master_activate_node$$; COMMENT ON FUNCTION master_activate_node(nodename text, nodeport integer) IS 'activate a node which is in the cluster'; RESET search_path; citus-7.0.3/src/backend/distributed/citus--7.0-9--7.0-10.sql000066400000000000000000000024621317107136600225530ustar00rootroot00000000000000/* citus-7.0-9--7.0-10 */ SET search_path = 'pg_catalog'; CREATE FUNCTION master_add_secondary_node(nodename text, nodeport integer, primaryname text, primaryport integer, nodecluster name default 'default', OUT nodeid integer, OUT groupid integer, OUT nodename text, OUT nodeport integer, OUT noderack text, OUT hasmetadata boolean, OUT isactive bool, OUT noderole noderole, OUT nodecluster name) RETURNS record LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_add_secondary_node$$; COMMENT ON FUNCTION master_add_secondary_node(nodename text, nodeport integer, primaryname text, primaryport integer, nodecluster name) IS 'add a secondary node to the cluster'; RESET search_path; citus-7.0.3/src/backend/distributed/citus.control000066400000000000000000000002361317107136600221070ustar00rootroot00000000000000# Citus extension comment = 'Citus distributed database' default_version = '7.0-15' module_pathname = '$libdir/citus' relocatable = false schema = pg_catalog citus-7.0.3/src/backend/distributed/citus.sql000066400000000000000000000460371317107136600212370ustar00rootroot00000000000000/* citus.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION \echo Use "CREATE EXTENSION citus" to load this file. \quit CREATE SCHEMA citus; -- Ensure CREATE EXTENSION is not run against an old citus data -- directory, we're not compatible (due to the builtin functions/tables) DO $$ BEGIN IF EXISTS(SELECT * FROM pg_proc WHERE proname = 'worker_apply_shard_ddl_command') THEN RAISE 'cannot install citus extension in Citus 4 data directory'; END IF; END; $$; /***************************************************************************** * Citus data types *****************************************************************************/ CREATE TYPE citus.distribution_type AS ENUM ( 'hash', 'range', 'append' ); /***************************************************************************** * Citus tables & corresponding indexes *****************************************************************************/ CREATE TABLE citus.pg_dist_partition( logicalrelid Oid NOT NULL, /* type changed to regclass as of version 6.0-1 */ partmethod "char" NOT NULL, partkey text NOT NULL ); /* SELECT granted to PUBLIC in upgrade script */ CREATE UNIQUE INDEX pg_dist_partition_logical_relid_index ON citus.pg_dist_partition using btree(logicalrelid); ALTER TABLE citus.pg_dist_partition SET SCHEMA pg_catalog; CREATE TABLE citus.pg_dist_shard( logicalrelid oid NOT NULL, /* type changed to regclass as of version 6.0-1 */ shardid int8 NOT NULL, shardstorage "char" NOT NULL, shardalias text, shardminvalue text, shardmaxvalue text ); /* SELECT granted to PUBLIC in upgrade script */ CREATE UNIQUE INDEX pg_dist_shard_shardid_index ON citus.pg_dist_shard using btree(shardid); CREATE INDEX pg_dist_shard_logical_relid_index ON citus.pg_dist_shard using btree(logicalrelid); ALTER TABLE citus.pg_dist_shard SET SCHEMA pg_catalog; CREATE TABLE citus.pg_dist_shard_placement( shardid int8 NOT NULL, shardstate int4 NOT NULL, shardlength int8 NOT NULL, nodename text NOT NULL, nodeport int8 NOT NULL ) WITH oids; /* SELECT granted to PUBLIC in upgrade script */ CREATE UNIQUE INDEX pg_dist_shard_placement_oid_index ON citus.pg_dist_shard_placement using btree(oid); CREATE INDEX pg_dist_shard_placement_shardid_index ON citus.pg_dist_shard_placement using btree(shardid); CREATE INDEX pg_dist_shard_placement_nodeid_index ON citus.pg_dist_shard_placement using btree(nodename, nodeport); ALTER TABLE citus.pg_dist_shard_placement SET SCHEMA pg_catalog; /***************************************************************************** * Citus sequences *****************************************************************************/ /* * Unternal sequence to generate 64-bit shard ids. These identifiers are then * used to identify shards in the distributed database. */ CREATE SEQUENCE citus.pg_dist_shardid_seq MINVALUE 102008 NO CYCLE; ALTER SEQUENCE citus.pg_dist_shardid_seq SET SCHEMA pg_catalog; /* * internal sequence to generate 32-bit jobIds. These identifiers are then * used to identify jobs in the distributed database; and they wrap at 32-bits * to allow for slave nodes to independently execute their distributed jobs. */ CREATE SEQUENCE citus.pg_dist_jobid_seq MINVALUE 2 /* first jobId reserved for clean up jobs */ MAXVALUE 4294967296; ALTER SEQUENCE citus.pg_dist_jobid_seq SET SCHEMA pg_catalog; /***************************************************************************** * Citus functions *****************************************************************************/ /* For backward compatibility and ease of use create functions et al. in pg_catalog */ SET search_path = 'pg_catalog'; /* master_* functions */ CREATE FUNCTION master_get_table_metadata(relation_name text, OUT logical_relid oid, OUT part_storage_type "char", OUT part_method "char", OUT part_key text, OUT part_replica_count integer, OUT part_max_size bigint, OUT part_placement_policy integer) RETURNS record LANGUAGE C STABLE STRICT AS 'MODULE_PATHNAME', $$master_get_table_metadata$$; COMMENT ON FUNCTION master_get_table_metadata(relation_name text) IS 'fetch metadata values for the table'; CREATE FUNCTION master_get_table_ddl_events(text) RETURNS SETOF text LANGUAGE C STRICT ROWS 100 AS 'MODULE_PATHNAME', $$master_get_table_ddl_events$$; COMMENT ON FUNCTION master_get_table_ddl_events(text) IS 'fetch set of ddl statements for the table'; CREATE FUNCTION master_get_new_shardid() RETURNS bigint LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_get_new_shardid$$; COMMENT ON FUNCTION master_get_new_shardid() IS 'fetch unique shardId'; CREATE FUNCTION master_create_empty_shard(text) RETURNS bigint LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_create_empty_shard$$; COMMENT ON FUNCTION master_create_empty_shard(text) IS 'create an empty shard and shard placements for the table'; CREATE FUNCTION master_append_table_to_shard(bigint, text, text, integer) RETURNS real LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_append_table_to_shard$$; COMMENT ON FUNCTION master_append_table_to_shard(bigint, text, text, integer) IS 'append given table to all shard placements and update metadata'; CREATE FUNCTION master_drop_all_shards(logicalrelid regclass, schema_name text, table_name text) RETURNS integer LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_drop_all_shards$$; COMMENT ON FUNCTION master_drop_all_shards(regclass, text, text) IS 'drop all shards in a relation and update metadata'; CREATE FUNCTION master_apply_delete_command(text) RETURNS integer LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_apply_delete_command$$; COMMENT ON FUNCTION master_apply_delete_command(text) IS 'drop shards matching delete criteria and update metadata'; CREATE FUNCTION master_get_active_worker_nodes(OUT node_name text, OUT node_port bigint) RETURNS SETOF record LANGUAGE C STRICT ROWS 100 AS 'MODULE_PATHNAME', $$master_get_active_worker_nodes$$; COMMENT ON FUNCTION master_get_active_worker_nodes() IS 'fetch set of active worker nodes'; CREATE FUNCTION master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$master_create_distributed_table$$; COMMENT ON FUNCTION master_create_distributed_table(table_name regclass, distribution_column text, distribution_method citus.distribution_type) IS 'define the table distribution functions'; -- define shard creation function for hash-partitioned tables CREATE FUNCTION master_create_worker_shards(table_name text, shard_count integer, replication_factor integer DEFAULT 2) RETURNS void AS 'MODULE_PATHNAME' LANGUAGE C STRICT; /* task_tracker_* functions */ CREATE FUNCTION task_tracker_assign_task(bigint, integer, text) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$task_tracker_assign_task$$; COMMENT ON FUNCTION task_tracker_assign_task(bigint, integer, text) IS 'assign a task to execute'; CREATE FUNCTION task_tracker_task_status(bigint, integer) RETURNS integer LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$task_tracker_task_status$$; COMMENT ON FUNCTION task_tracker_task_status(bigint, integer) IS 'check an assigned task''s execution status'; CREATE FUNCTION task_tracker_cleanup_job(bigint) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$task_tracker_cleanup_job$$; COMMENT ON FUNCTION task_tracker_cleanup_job(bigint) IS 'clean up all tasks associated with a job'; /* worker_* functions */ CREATE FUNCTION worker_fetch_partition_file(bigint, integer, integer, integer, text, integer) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_fetch_partition_file$$; COMMENT ON FUNCTION worker_fetch_partition_file(bigint, integer, integer, integer, text, integer) IS 'fetch partition file from remote node'; CREATE FUNCTION worker_fetch_query_results_file(bigint, integer, integer, text, integer) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_fetch_query_results_file$$; COMMENT ON FUNCTION worker_fetch_query_results_file(bigint, integer, integer, text, integer) IS 'fetch query results file from remote node'; CREATE FUNCTION worker_fetch_foreign_file(text, bigint, text[], integer[]) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_fetch_foreign_file$$; COMMENT ON FUNCTION worker_fetch_foreign_file(text, bigint, text[], integer[]) IS 'fetch foreign file from remote node and apply file'; CREATE FUNCTION worker_fetch_regular_table(text, bigint, text[], integer[]) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_fetch_regular_table$$; COMMENT ON FUNCTION worker_fetch_regular_table(text, bigint, text[], integer[]) IS 'fetch PostgreSQL table from remote node'; CREATE FUNCTION worker_range_partition_table(bigint, integer, text, text, oid, anyarray) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_range_partition_table$$; COMMENT ON FUNCTION worker_range_partition_table(bigint, integer, text, text, oid, anyarray) IS 'range partition query results'; CREATE FUNCTION worker_hash_partition_table(bigint, integer, text, text, oid, integer) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_hash_partition_table$$; COMMENT ON FUNCTION worker_hash_partition_table(bigint, integer, text, text, oid, integer) IS 'hash partition query results'; CREATE FUNCTION worker_merge_files_into_table(bigint, integer, text[], text[]) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_merge_files_into_table$$; COMMENT ON FUNCTION worker_merge_files_into_table(bigint, integer, text[], text[]) IS 'merge files into a table'; CREATE FUNCTION worker_merge_files_and_run_query(bigint, integer, text, text) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_merge_files_and_run_query$$; COMMENT ON FUNCTION worker_merge_files_and_run_query(bigint, integer, text, text) IS 'merge files and run a reduce query on merged files'; CREATE FUNCTION worker_cleanup_job_schema_cache() RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_cleanup_job_schema_cache$$; COMMENT ON FUNCTION worker_cleanup_job_schema_cache() IS 'cleanup all job schemas in current database'; CREATE FUNCTION worker_foreign_file_path(text) RETURNS text LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_foreign_file_path$$; COMMENT ON FUNCTION worker_foreign_file_path(text) IS 'get a foreign table''s local file path'; CREATE FUNCTION worker_find_block_local_path(bigint, text[]) RETURNS text LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_find_block_local_path$$; COMMENT ON FUNCTION worker_find_block_local_path(bigint, text[]) IS 'find an HDFS block''s local file path'; CREATE FUNCTION worker_apply_shard_ddl_command(bigint, text) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_apply_shard_ddl_command$$; COMMENT ON FUNCTION worker_apply_shard_ddl_command(bigint, text) IS 'extend ddl command with shardId and apply on database'; CREATE FUNCTION worker_append_table_to_shard(text, text, text, integer) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$worker_append_table_to_shard$$; COMMENT ON FUNCTION worker_append_table_to_shard(text, text, text, integer) IS 'append a regular table''s contents to the shard'; /* trigger functions */ CREATE OR REPLACE FUNCTION citus_drop_trigger() RETURNS event_trigger LANGUAGE plpgsql SET search_path = pg_catalog /* declared as SECURITY DEFINER in upgrade script */ AS $cdbdt$ DECLARE v_obj record; BEGIN FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() LOOP IF v_obj.object_type NOT IN ('table', 'foreign table') THEN CONTINUE; END IF; -- nothing to do if not a distributed table IF NOT EXISTS(SELECT * FROM pg_dist_partition WHERE logicalrelid = v_obj.objid) THEN CONTINUE; END IF; -- ensure all shards are dropped PERFORM master_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name); -- delete partition entry DELETE FROM pg_dist_partition WHERE logicalrelid = v_obj.objid; END LOOP; END; $cdbdt$; COMMENT ON FUNCTION citus_drop_trigger() IS 'perform checks and actions at the end of DROP actions'; CREATE FUNCTION master_dist_partition_cache_invalidate() RETURNS trigger LANGUAGE C AS 'MODULE_PATHNAME', $$master_dist_partition_cache_invalidate$$; COMMENT ON FUNCTION master_dist_partition_cache_invalidate() IS 'register relcache invalidation for changed rows'; CREATE FUNCTION master_dist_shard_cache_invalidate() RETURNS trigger LANGUAGE C AS 'MODULE_PATHNAME', $$master_dist_shard_cache_invalidate$$; COMMENT ON FUNCTION master_dist_shard_cache_invalidate() IS 'register relcache invalidation for changed rows'; /* internal functions, not user accessible */ CREATE FUNCTION citus_extradata_container(INTERNAL) RETURNS void LANGUAGE C AS 'MODULE_PATHNAME', $$citus_extradata_container$$; COMMENT ON FUNCTION pg_catalog.citus_extradata_container(INTERNAL) IS 'placeholder function to store additional data in postgres node trees'; /***************************************************************************** * Citus triggers *****************************************************************************/ CREATE EVENT TRIGGER citus_cascade_to_partition ON SQL_DROP EXECUTE PROCEDURE citus_drop_trigger(); CREATE TRIGGER dist_partition_cache_invalidate AFTER INSERT OR UPDATE OR DELETE ON pg_catalog.pg_dist_partition FOR EACH ROW EXECUTE PROCEDURE master_dist_partition_cache_invalidate(); CREATE TRIGGER dist_shard_cache_invalidate AFTER INSERT OR UPDATE OR DELETE ON pg_catalog.pg_dist_shard FOR EACH ROW EXECUTE PROCEDURE master_dist_shard_cache_invalidate(); /***************************************************************************** * Citus aggregates *****************************************************************************/ CREATE AGGREGATE array_cat_agg(anyarray) (SFUNC = array_cat, STYPE = anyarray); COMMENT ON AGGREGATE array_cat_agg(anyarray) IS 'concatenate input arrays into a single array'; /* * Creates a temporary table exactly like the specified target table along with * a trigger to redirect any INSERTed rows from the proxy to the underlying * table. Users may optionally provide a sequence which will be incremented * after each row that has been successfully proxied (useful for counting rows * processed). Returns the name of the proxy table that was created. */ CREATE FUNCTION create_insert_proxy_for_table(target_table regclass, sequence regclass DEFAULT NULL) RETURNS text AS $create_insert_proxy_for_table$ DECLARE temp_table_name text; attr_names text[]; attr_list text; param_list text; using_list text; insert_command text; -- templates to create dynamic functions, tables, and triggers func_tmpl CONSTANT text := $$CREATE FUNCTION pg_temp.copy_to_insert() RETURNS trigger AS $copy_to_insert$ BEGIN EXECUTE %L USING %s; PERFORM nextval(%L); RETURN NULL; END; $copy_to_insert$ LANGUAGE plpgsql;$$; table_tmpl CONSTANT text := $$CREATE TEMPORARY TABLE %I (LIKE %s INCLUDING DEFAULTS)$$; trigger_tmpl CONSTANT text := $$CREATE TRIGGER copy_to_insert BEFORE INSERT ON %s FOR EACH ROW EXECUTE PROCEDURE pg_temp.copy_to_insert()$$; BEGIN -- create name of temporary table using unqualified input table name SELECT format('%s_insert_proxy', relname) INTO STRICT temp_table_name FROM pg_class WHERE oid = target_table; -- get list of all attributes in table, we'll need shortly SELECT array_agg(attname) INTO STRICT attr_names FROM pg_attribute WHERE attrelid = target_table AND attnum > 0 AND NOT attisdropped; -- build fully specified column list and USING clause from attr. names SELECT string_agg(quote_ident(attr_name), ','), string_agg(format('NEW.%I', attr_name), ',') INTO STRICT attr_list, using_list FROM unnest(attr_names) AS attr_name; -- build ($1, $2, $3)-style VALUE list to bind parameters SELECT string_agg('$' || param_num, ',') INTO STRICT param_list FROM generate_series(1, array_length(attr_names, 1)) AS param_num; -- use the above lists to generate appropriate INSERT command insert_command = format('INSERT INTO %s (%s) VALUES (%s)', target_table, attr_list, param_list); -- use the command to make one-off trigger targeting specified table EXECUTE format(func_tmpl, insert_command, using_list, sequence); -- create a temporary table exactly like the target table... EXECUTE format(table_tmpl, temp_table_name, target_table); -- ... and install the trigger on that temporary table EXECUTE format(trigger_tmpl, quote_ident(temp_table_name)::regclass); RETURN temp_table_name; END; $create_insert_proxy_for_table$ LANGUAGE plpgsql SET search_path = 'pg_catalog'; COMMENT ON FUNCTION create_insert_proxy_for_table(regclass, regclass) IS 'create a proxy table that redirects INSERTed rows to a target table'; -- define shard repair function CREATE FUNCTION master_copy_shard_placement(shard_id bigint, source_node_name text, source_node_port integer, target_node_name text, target_node_port integer) RETURNS void AS 'MODULE_PATHNAME' LANGUAGE C STRICT; RESET search_path; citus-7.0.3/src/backend/distributed/commands/000077500000000000000000000000001317107136600211565ustar00rootroot00000000000000citus-7.0.3/src/backend/distributed/commands/create_distributed_table.c000066400000000000000000001226321317107136600263440ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * create_distributed_relation.c * Routines relation to the creation of distributed relations. * * Copyright (c) 2012-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "access/genam.h" #include "access/hash.h" #include "access/heapam.h" #include "access/htup.h" #include "access/htup_details.h" #include "access/nbtree.h" #include "access/xact.h" #include "catalog/dependency.h" #include "catalog/index.h" #include "catalog/pg_am.h" #include "catalog/pg_constraint_fn.h" #include "catalog/pg_enum.h" #include "catalog/pg_extension.h" #include "catalog/pg_opclass.h" #include "catalog/pg_trigger.h" #include "commands/defrem.h" #include "commands/extension.h" #include "commands/trigger.h" #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" #include "distributed/distribution_column.h" #include "distributed/master_metadata_utility.h" #include "distributed/master_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" #include "distributed/multi_copy.h" #include "distributed/multi_logical_planner.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/multi_utility.h" #include "distributed/pg_dist_colocation.h" #include "distributed/pg_dist_partition.h" #include "distributed/reference_table_utils.h" #include "distributed/remote_commands.h" #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" #include "executor/executor.h" #include "executor/spi.h" #include "nodes/execnodes.h" #include "nodes/nodeFuncs.h" #include "nodes/pg_list.h" #include "parser/parse_expr.h" #include "parser/parse_node.h" #include "parser/parse_relation.h" #include "parser/parser.h" #include "tcop/pquery.h" #include "tcop/tcopprot.h" #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/rel.h" #include "utils/snapmgr.h" #include "utils/syscache.h" #include "utils/inval.h" /* Replication model to use when creating distributed tables */ int ReplicationModel = REPLICATION_MODEL_COORDINATOR; /* local function forward declarations */ static char AppropriateReplicationModel(char distributionMethod, bool viaDeprecatedAPI); static void CreateHashDistributedTableShards(Oid relationId, Oid colocatedTableId, bool localTableEmpty); static uint32 ColocationIdForNewTable(Oid relationId, Var *distributionColumn, char distributionMethod, char replicationModel, char *colocateWithTableName, bool viaDeprecatedAPI); static void EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn, char distributionMethod, uint32 colocationId, char replicationModel, bool viaDeprecatedAPI); static void EnsureTableCanBeColocatedWith(Oid relationId, char replicationModel, Oid distributionColumnType, Oid sourceRelationId); static void EnsureSchemaExistsOnAllNodes(Oid relationId); static void EnsureLocalTableEmpty(Oid relationId); static void EnsureTableNotDistributed(Oid relationId); static char LookupDistributionMethod(Oid distributionMethodOid); static Oid SupportFunctionForColumn(Var *partitionColumn, Oid accessMethodId, int16 supportFunctionNumber); static void EnsureLocalTableEmptyIfNecessary(Oid relationId, char distributionMethod, bool viaDepracatedAPI); static bool LocalTableEmpty(Oid tableId); static void CopyLocalDataIntoShards(Oid relationId); static List * TupleDescColumnNameList(TupleDesc tupleDescriptor); static bool RelationUsesIdentityColumns(TupleDesc relationDesc); /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(master_create_distributed_table); PG_FUNCTION_INFO_V1(create_distributed_table); PG_FUNCTION_INFO_V1(create_reference_table); /* * master_create_distributed_table accepts a table, distribution column and * method and performs the corresponding catalog changes. * * Note that this UDF is deprecated and cannot create colocated tables, so we * always use INVALID_COLOCATION_ID. */ Datum master_create_distributed_table(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); text *distributionColumnText = PG_GETARG_TEXT_P(1); Oid distributionMethodOid = PG_GETARG_OID(2); char *distributionColumnName = NULL; Var *distributionColumn = NULL; char distributionMethod = 0; char *colocateWithTableName = NULL; bool viaDeprecatedAPI = true; Relation relation = NULL; CheckCitusVersion(ERROR); EnsureCoordinator(); /* * Lock target relation with an exclusive lock - there's no way to make * sense of this table until we've committed, and we don't want multiple * backends manipulating this relation. */ relation = relation_open(relationId, ExclusiveLock); /* * We should do this check here since the codes in the following lines rely * on this relation to have a supported relation kind. More extensive checks * will be performed in CreateDistributedTable. */ EnsureRelationKindSupported(relationId); distributionColumnName = text_to_cstring(distributionColumnText); distributionColumn = BuildDistributionKeyFromColumnName(relation, distributionColumnName); distributionMethod = LookupDistributionMethod(distributionMethodOid); CreateDistributedTable(relationId, distributionColumn, distributionMethod, colocateWithTableName, viaDeprecatedAPI); relation_close(relation, NoLock); PG_RETURN_VOID(); } /* * create_distributed_table gets a table name, distribution column, * distribution method and colocate_with option, then it creates a * distributed table. */ Datum create_distributed_table(PG_FUNCTION_ARGS) { Oid relationId = InvalidOid; text *distributionColumnText = NULL; Oid distributionMethodOid = InvalidOid; text *colocateWithTableNameText = NULL; Relation relation = NULL; char *distributionColumnName = NULL; Var *distributionColumn = NULL; char distributionMethod = 0; char *colocateWithTableName = NULL; bool viaDeprecatedAPI = false; CheckCitusVersion(ERROR); EnsureCoordinator(); relationId = PG_GETARG_OID(0); distributionColumnText = PG_GETARG_TEXT_P(1); distributionMethodOid = PG_GETARG_OID(2); colocateWithTableNameText = PG_GETARG_TEXT_P(3); /* * Lock target relation with an exclusive lock - there's no way to make * sense of this table until we've committed, and we don't want multiple * backends manipulating this relation. */ relation = relation_open(relationId, ExclusiveLock); /* * We should do this check here since the codes in the following lines rely * on this relation to have a supported relation kind. More extensive checks * will be performed in CreateDistributedTable. */ EnsureRelationKindSupported(relationId); distributionColumnName = text_to_cstring(distributionColumnText); distributionColumn = BuildDistributionKeyFromColumnName(relation, distributionColumnName); distributionMethod = LookupDistributionMethod(distributionMethodOid); colocateWithTableName = text_to_cstring(colocateWithTableNameText); CreateDistributedTable(relationId, distributionColumn, distributionMethod, colocateWithTableName, viaDeprecatedAPI); relation_close(relation, NoLock); PG_RETURN_VOID(); } /* * CreateReferenceTable creates a distributed table with the given relationId. The * created table has one shard and replication factor is set to the active worker * count. In fact, the above is the definition of a reference table in Citus. */ Datum create_reference_table(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); Relation relation = NULL; char *colocateWithTableName = NULL; List *workerNodeList = NIL; int workerCount = 0; Var *distributionColumn = NULL; bool viaDeprecatedAPI = false; EnsureCoordinator(); CheckCitusVersion(ERROR); /* * Ensure schema exists on each worker node. We can not run this function * transactionally, since we may create shards over separate sessions and * shard creation depends on the schema being present and visible from all * sessions. */ EnsureSchemaExistsOnAllNodes(relationId); /* * Lock target relation with an exclusive lock - there's no way to make * sense of this table until we've committed, and we don't want multiple * backends manipulating this relation. */ relation = relation_open(relationId, ExclusiveLock); /* * We should do this check here since the codes in the following lines rely * on this relation to have a supported relation kind. More extensive checks * will be performed in CreateDistributedTable. */ EnsureRelationKindSupported(relationId); workerNodeList = ActivePrimaryNodeList(); workerCount = list_length(workerNodeList); /* if there are no workers, error out */ if (workerCount == 0) { char *relationName = get_rel_name(relationId); ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("cannot create reference table \"%s\"", relationName), errdetail("There are no active worker nodes."))); } CreateDistributedTable(relationId, distributionColumn, DISTRIBUTE_BY_NONE, colocateWithTableName, viaDeprecatedAPI); relation_close(relation, NoLock); PG_RETURN_VOID(); } /* * CreateDistributedTable creates distributed table in the given configuration. * This functions contains all necessary logic to create distributed tables. It * perform necessary checks to ensure distributing the table is safe. If it is * safe to distribute the table, this function creates distributed table metadata, * creates shards and copies local data to shards. This function also handles * partitioned tables by distributing its partitions as well. * * viaDeprecatedAPI boolean flag is not optimal way to implement this function, * but it helps reducing code duplication a lot. We hope to remove that flag one * day, once we deprecate master_create_distribute_table completely. */ void CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributionMethod, char *colocateWithTableName, bool viaDeprecatedAPI) { char replicationModel = REPLICATION_MODEL_INVALID; uint32 colocationId = INVALID_COLOCATION_ID; Oid colocatedTableId = InvalidOid; bool localTableEmpty = false; Relation colocatedRelation = NULL; replicationModel = AppropriateReplicationModel(distributionMethod, viaDeprecatedAPI); /* * ColocationIdForNewTable assumes caller acquires lock on relationId. In our case, * our caller already acquired lock on relationId. */ colocationId = ColocationIdForNewTable(relationId, distributionColumn, distributionMethod, replicationModel, colocateWithTableName, viaDeprecatedAPI); EnsureRelationCanBeDistributed(relationId, distributionColumn, distributionMethod, colocationId, replicationModel, viaDeprecatedAPI); /* we need to calculate these variables before creating distributed metadata */ localTableEmpty = LocalTableEmpty(relationId); colocatedTableId = ColocatedTableId(colocationId); /* create an entry for distributed table in pg_dist_partition */ InsertIntoPgDistPartition(relationId, distributionMethod, distributionColumn, colocationId, replicationModel); /* foreign tables does not support TRUNCATE trigger */ if (RegularTable(relationId)) { CreateTruncateTrigger(relationId); } /* * If we are using master_create_distributed_table, we don't need to continue, * because deprecated API does not supports the following features. */ if (viaDeprecatedAPI) { /* * We exit early but there is no need to close colocatedRelation. Because * if viaDeprecatedAPI is true, we never open colocatedRelation in the first * place. */ Assert(colocatedRelation == NULL); return; } /* create shards for hash distributed and reference tables */ if (distributionMethod == DISTRIBUTE_BY_HASH) { CreateHashDistributedTableShards(relationId, colocatedTableId, localTableEmpty); } else if (distributionMethod == DISTRIBUTE_BY_NONE) { CreateReferenceTableShard(relationId); } /* if this table is partitioned table, distribute its partitions too */ if (PartitionedTable(relationId)) { List *partitionList = PartitionList(relationId); ListCell *partitionCell = NULL; foreach(partitionCell, partitionList) { Oid partitionRelationId = lfirst_oid(partitionCell); CreateDistributedTable(partitionRelationId, distributionColumn, distributionMethod, colocateWithTableName, viaDeprecatedAPI); } } /* copy over data for hash distributed and reference tables */ if (distributionMethod == DISTRIBUTE_BY_HASH || distributionMethod == DISTRIBUTE_BY_NONE) { if (RegularTable(relationId)) { CopyLocalDataIntoShards(relationId); } } if (colocatedRelation != NULL) { relation_close(colocatedRelation, NoLock); } if (ShouldSyncTableMetadata(relationId)) { CreateTableMetadataOnWorkers(relationId); } } /* * AppropriateReplicationModel function decides which replication model should be * used depending on given distribution configuration and global ReplicationModel * variable. If ReplicationModel conflicts with distribution configuration, this * function errors out. */ static char AppropriateReplicationModel(char distributionMethod, bool viaDeprecatedAPI) { if (viaDeprecatedAPI) { if (ReplicationModel != REPLICATION_MODEL_COORDINATOR) { ereport(NOTICE, (errmsg("using statement-based replication"), errdetail("The current replication_model setting is " "'streaming', which is not supported by " "master_create_distributed_table."), errhint("Use create_distributed_table to use the streaming " "replication model."))); } return REPLICATION_MODEL_COORDINATOR; } else if (distributionMethod == DISTRIBUTE_BY_NONE) { return REPLICATION_MODEL_2PC; } else if (distributionMethod == DISTRIBUTE_BY_HASH) { return ReplicationModel; } else { if (ReplicationModel != REPLICATION_MODEL_COORDINATOR) { ereport(NOTICE, (errmsg("using statement-based replication"), errdetail("Streaming replication is supported only for " "hash-distributed tables."))); } return REPLICATION_MODEL_COORDINATOR; } /* we should not reach to this point */ return REPLICATION_MODEL_INVALID; } /* * CreateHashDistributedTableShards creates shards of given hash distributed table. */ static void CreateHashDistributedTableShards(Oid relationId, Oid colocatedTableId, bool localTableEmpty) { bool useExclusiveConnection = false; /* * Ensure schema exists on each worker node. We can not run this function * transactionally, since we may create shards over separate sessions and * shard creation depends on the schema being present and visible from all * sessions. */ EnsureSchemaExistsOnAllNodes(relationId); if (RegularTable(relationId)) { useExclusiveConnection = IsTransactionBlock() || !localTableEmpty; } if (colocatedTableId != InvalidOid) { CreateColocatedShards(relationId, colocatedTableId, useExclusiveConnection); } else { /* * This path is only reached by create_distributed_table for the distributed * tables which will not be part of an existing colocation group. Therefore, * we can directly use ShardCount and ShardReplicationFactor global variables * here. */ CreateShardsWithRoundRobinPolicy(relationId, ShardCount, ShardReplicationFactor, useExclusiveConnection); } } /* * ColocationIdForNewTable returns a colocation id for hash-distributed table * according to given configuration. If there is no such configuration, it * creates one and returns colocation id of newly the created colocation group. * For append and range distributed tables, this function errors out if * colocateWithTableName parameter is not NULL, otherwise directly returns * INVALID_COLOCATION_ID. * * This function assumes its caller take necessary lock on relationId to * prevent possible changes on it. */ static uint32 ColocationIdForNewTable(Oid relationId, Var *distributionColumn, char distributionMethod, char replicationModel, char *colocateWithTableName, bool viaDeprecatedAPI) { uint32 colocationId = INVALID_COLOCATION_ID; if (viaDeprecatedAPI) { return colocationId; } else if (distributionMethod == DISTRIBUTE_BY_APPEND || distributionMethod == DISTRIBUTE_BY_RANGE) { if (pg_strncasecmp(colocateWithTableName, "default", NAMEDATALEN) != 0) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot distribute relation"), errdetail("Currently, colocate_with option is only supported " "for hash distributed tables."))); } return colocationId; } else if (distributionMethod == DISTRIBUTE_BY_NONE) { return CreateReferenceTableColocationId(); } else { /* * Get an exclusive lock on the colocation system catalog. Therefore, we * can be sure that there will no modifications on the colocation table * until this transaction is committed. */ Relation pgDistColocation = heap_open(DistColocationRelationId(), ExclusiveLock); Oid distributionColumnType = distributionColumn->vartype; bool createdColocationGroup = false; if (pg_strncasecmp(colocateWithTableName, "default", NAMEDATALEN) == 0) { /* check for default colocation group */ colocationId = ColocationId(ShardCount, ShardReplicationFactor, distributionColumnType); if (colocationId == INVALID_COLOCATION_ID) { colocationId = CreateColocationGroup(ShardCount, ShardReplicationFactor, distributionColumnType); createdColocationGroup = true; } } else if (pg_strncasecmp(colocateWithTableName, "none", NAMEDATALEN) == 0) { colocationId = GetNextColocationId(); createdColocationGroup = true; } else { text *colocateWithTableNameText = cstring_to_text(colocateWithTableName); Oid sourceRelationId = ResolveRelationId(colocateWithTableNameText); EnsureTableCanBeColocatedWith(relationId, replicationModel, distributionColumnType, sourceRelationId); colocationId = TableColocationId(sourceRelationId); } /* * If we created a new colocation group then we need to keep the lock to * prevent a concurrent create_distributed_table call from creating another * colocation group with the same parameters. If we're using an existing * colocation group then other transactions will use the same one. */ if (createdColocationGroup) { /* keep the exclusive lock */ heap_close(pgDistColocation, NoLock); } else { /* release the exclusive lock */ heap_close(pgDistColocation, ExclusiveLock); } } return colocationId; } /* * EnsureRelationCanBeDistributed checks whether Citus can safely distribute given * relation with the given configuration. We perform almost all safety checks for * distributing table here. If there is an unsatisfied requirement, we error out * and do not distribute the table. * * This function assumes, callers have already acquried necessary locks to ensure * there will not be any change in the given relation. */ static void EnsureRelationCanBeDistributed(Oid relationId, Var *distributionColumn, char distributionMethod, uint32 colocationId, char replicationModel, bool viaDeprecatedAPI) { Relation relation = NULL; TupleDesc relationDesc = NULL; char *relationName = NULL; Oid parentRelationId = InvalidOid; EnsureTableOwner(relationId); EnsureTableNotDistributed(relationId); EnsureLocalTableEmptyIfNecessary(relationId, distributionMethod, viaDeprecatedAPI); EnsureReplicationSettings(InvalidOid, replicationModel); /* we assume callers took necessary locks */ relation = relation_open(relationId, NoLock); relationDesc = RelationGetDescr(relation); relationName = RelationGetRelationName(relation); /* verify target relation does not use WITH (OIDS) PostgreSQL feature */ if (relationDesc->tdhasoid) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot distribute relation: %s", relationName), errdetail("Distributed relations must not specify the WITH " "(OIDS) option in their definitions."))); } /* verify target relation does not use identity columns */ if (RelationUsesIdentityColumns(relationDesc)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot distribute relation: %s", relationName), errdetail("Distributed relations must not use GENERATED " "... AS IDENTITY."))); } /* check for support function needed by specified partition method */ if (distributionMethod == DISTRIBUTE_BY_HASH) { Oid hashSupportFunction = SupportFunctionForColumn(distributionColumn, HASH_AM_OID, HASHPROC); if (hashSupportFunction == InvalidOid) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), errmsg("could not identify a hash function for type %s", format_type_be(distributionColumn->vartype)), errdatatype(distributionColumn->vartype), errdetail("Partition column types must have a hash function " "defined to use hash partitioning."))); } } else if (distributionMethod == DISTRIBUTE_BY_RANGE) { Oid btreeSupportFunction = SupportFunctionForColumn(distributionColumn, BTREE_AM_OID, BTORDER_PROC); if (btreeSupportFunction == InvalidOid) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), errmsg("could not identify a comparison function for type %s", format_type_be(distributionColumn->vartype)), errdatatype(distributionColumn->vartype), errdetail("Partition column types must have a comparison function " "defined to use range partitioning."))); } } if (PartitionTable(relationId)) { parentRelationId = PartitionParentOid(relationId); } /* partitions cannot be distributed if their parent is not distributed */ if (PartitionTable(relationId) && !IsDistributedTable(parentRelationId)) { char *relationName = get_rel_name(relationId); char *parentRelationName = get_rel_name(parentRelationId); ereport(ERROR, (errmsg("cannot distribute relation \"%s\" which is partition of " "\"%s\"", relationName, parentRelationName), errdetail("Citus does not support distributing partitions " "if their parent is not distributed table."), errhint("Distribute the partitioned table \"%s\" instead.", parentRelationName))); } /* * These checks are mostly for partitioned tables not partitions because we prevent * distributing partitions directly in the above check. However, partitions can still * reach this point because, we call CreateDistributedTable for partitions if their * parent table is distributed. */ if (PartitionedTable(relationId)) { /* we cannot distribute partitioned tables with master_create_distributed_table */ if (viaDeprecatedAPI) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("distributing partitioned tables in only supported " "with create_distributed_table UDF"))); } /* distributing partitioned tables in only supported for hash-distribution */ if (distributionMethod != DISTRIBUTE_BY_HASH) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("distributing partitioned tables in only supported " "for hash-distributed tables"))); } /* we currently don't support partitioned tables for replication factor > 1 */ if (ShardReplicationFactor > 1) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("distributing partitioned tables with replication " "factor greater than 1 is not supported"))); } /* we currently don't support MX tables to be distributed partitioned table */ if (replicationModel == REPLICATION_MODEL_STREAMING) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("distributing partitioned tables which uses " "streaming replication is not supported"))); } /* we don't support distributing tables with multi-level partitioning */ if (PartitionTable(relationId)) { char *relationName = get_rel_name(relationId); Oid parentRelationId = PartitionParentOid(relationId); char *parentRelationName = get_rel_name(parentRelationId); ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("distributing multi-level partitioned tables " "is not supported"), errdetail("Relation \"%s\" is partitioned table itself and " "it is also partition of relation \"%s\".", relationName, parentRelationName))); } } ErrorIfUnsupportedConstraint(relation, distributionMethod, distributionColumn, colocationId); relation_close(relation, NoLock); } /* * EnsureTableCanBeColocatedWith checks whether a given replication model and * distribution column type is suitable to distribute a table to be colocated * with given source table. * * We only pass relationId to provide meaningful error messages. */ static void EnsureTableCanBeColocatedWith(Oid relationId, char replicationModel, Oid distributionColumnType, Oid sourceRelationId) { DistTableCacheEntry *sourceTableEntry = DistributedTableCacheEntry(sourceRelationId); char sourceDistributionMethod = sourceTableEntry->partitionMethod; char sourceReplicationModel = sourceTableEntry->replicationModel; Var *sourceDistributionColumn = DistPartitionKey(sourceRelationId); Oid sourceDistributionColumnType = InvalidOid; if (sourceDistributionMethod != DISTRIBUTE_BY_HASH) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot distribute relation"), errdetail("Currently, colocate_with option is only supported " "for hash distributed tables."))); } if (sourceReplicationModel != replicationModel) { char *relationName = get_rel_name(relationId); char *sourceRelationName = get_rel_name(sourceRelationId); ereport(ERROR, (errmsg("cannot colocate tables %s and %s", sourceRelationName, relationName), errdetail("Replication models don't match for %s and %s.", sourceRelationName, relationName))); } sourceDistributionColumnType = sourceDistributionColumn->vartype; if (sourceDistributionColumnType != distributionColumnType) { char *relationName = get_rel_name(relationId); char *sourceRelationName = get_rel_name(sourceRelationId); ereport(ERROR, (errmsg("cannot colocate tables %s and %s", sourceRelationName, relationName), errdetail("Distribution column types don't match for " "%s and %s.", sourceRelationName, relationName))); } } /* * EnsureSchemaExistsOnAllNodes connects to all nodes with citus extension user * and creates the schema of the given relationId. The function errors out if the * command cannot be executed in any of the worker nodes. */ static void EnsureSchemaExistsOnAllNodes(Oid relationId) { List *workerNodeList = ActivePrimaryNodeList(); ListCell *workerNodeCell = NULL; StringInfo applySchemaCreationDDL = makeStringInfo(); Oid schemaId = get_rel_namespace(relationId); const char *createSchemaDDL = CreateSchemaDDLCommand(schemaId); uint64 connectionFlag = FORCE_NEW_CONNECTION; if (createSchemaDDL == NULL) { return; } appendStringInfo(applySchemaCreationDDL, "%s", createSchemaDDL); foreach(workerNodeCell, workerNodeList) { WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); char *nodeName = workerNode->workerName; uint32 nodePort = workerNode->workerPort; MultiConnection *connection = GetNodeUserDatabaseConnection(connectionFlag, nodeName, nodePort, NULL, NULL); ExecuteCriticalRemoteCommand(connection, applySchemaCreationDDL->data); } } /* * EnsureLocalTableEmptyIfNecessary only checks for emptiness if only an empty * relation can be distributed in given configuration. * * In some cases, it is possible and safe to send local data to shards while * distributing the table. In those cases, we can distribute non-empty local * tables. This function checks the distributionMethod and relation kind to * see whether we need to be ensure emptiness of local table. If we need to * be sure, this function calls EnsureLocalTableEmpty function to ensure * that local table does not contain any data. */ static void EnsureLocalTableEmptyIfNecessary(Oid relationId, char distributionMethod, bool viaDepracatedAPI) { if (viaDepracatedAPI) { EnsureLocalTableEmpty(relationId); } else if (distributionMethod != DISTRIBUTE_BY_HASH && distributionMethod != DISTRIBUTE_BY_NONE) { EnsureLocalTableEmpty(relationId); } else if (!RegularTable(relationId)) { EnsureLocalTableEmpty(relationId); } } /* * EnsureLocalTableEmpty errors out if the local table is not empty. */ static void EnsureLocalTableEmpty(Oid relationId) { char *relationName = get_rel_name(relationId); bool localTableEmpty = LocalTableEmpty(relationId); if (!localTableEmpty) { ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("cannot distribute relation \"%s\"", relationName), errdetail("Relation \"%s\" contains data.", relationName), errhint("Empty your table before distributing it."))); } } /* * EnsureTableNotDistributed errors out if the table is distributed. */ static void EnsureTableNotDistributed(Oid relationId) { char *relationName = get_rel_name(relationId); bool isDistributedTable = false; isDistributedTable = IsDistributedTable(relationId); if (isDistributedTable) { ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("table \"%s\" is already distributed", relationName))); } } /* * EnsureReplicationSettings checks whether the current replication factor * setting is compatible with the replication model. This function errors * out if caller tries to use streaming replication with more than one * replication factor. */ void EnsureReplicationSettings(Oid relationId, char replicationModel) { char *msgSuffix = "the streaming replication model"; char *extraHint = " or setting \"citus.replication_model\" to \"statement\""; if (relationId != InvalidOid) { msgSuffix = "tables which use the streaming replication model"; extraHint = ""; } if (replicationModel == REPLICATION_MODEL_STREAMING && ShardReplicationFactor != 1) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("replication factors above one are incompatible with %s", msgSuffix), errhint("Try again after reducing \"citus.shard_replication_" "factor\" to one%s.", extraHint))); } } /* * LookupDistributionMethod maps the oids of citus.distribution_type enum * values to pg_dist_partition.partmethod values. * * The passed in oid has to belong to a value of citus.distribution_type. */ static char LookupDistributionMethod(Oid distributionMethodOid) { HeapTuple enumTuple = NULL; Form_pg_enum enumForm = NULL; char distributionMethod = 0; const char *enumLabel = NULL; enumTuple = SearchSysCache1(ENUMOID, ObjectIdGetDatum(distributionMethodOid)); if (!HeapTupleIsValid(enumTuple)) { ereport(ERROR, (errmsg("invalid internal value for enum: %u", distributionMethodOid))); } enumForm = (Form_pg_enum) GETSTRUCT(enumTuple); enumLabel = NameStr(enumForm->enumlabel); if (strncmp(enumLabel, "append", NAMEDATALEN) == 0) { distributionMethod = DISTRIBUTE_BY_APPEND; } else if (strncmp(enumLabel, "hash", NAMEDATALEN) == 0) { distributionMethod = DISTRIBUTE_BY_HASH; } else if (strncmp(enumLabel, "range", NAMEDATALEN) == 0) { distributionMethod = DISTRIBUTE_BY_RANGE; } else { ereport(ERROR, (errmsg("invalid label for enum: %s", enumLabel))); } ReleaseSysCache(enumTuple); return distributionMethod; } /* * SupportFunctionForColumn locates a support function given a column, an access method, * and and id of a support function. This function returns InvalidOid if there is no * support function for the operator class family of the column, but if the data type * of the column has no default operator class whatsoever, this function errors out. */ static Oid SupportFunctionForColumn(Var *partitionColumn, Oid accessMethodId, int16 supportFunctionNumber) { Oid operatorFamilyId = InvalidOid; Oid supportFunctionOid = InvalidOid; Oid operatorClassInputType = InvalidOid; Oid columnOid = partitionColumn->vartype; Oid operatorClassId = GetDefaultOpClass(columnOid, accessMethodId); /* currently only support using the default operator class */ if (operatorClassId == InvalidOid) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("data type %s has no default operator class for specified" " partition method", format_type_be(columnOid)), errdatatype(columnOid), errdetail("Partition column types must have a default operator" " class defined."))); } operatorFamilyId = get_opclass_family(operatorClassId); operatorClassInputType = get_opclass_input_type(operatorClassId); supportFunctionOid = get_opfamily_proc(operatorFamilyId, operatorClassInputType, operatorClassInputType, supportFunctionNumber); return supportFunctionOid; } /* * LocalTableEmpty function checks whether given local table contains any row and * returns false if there is any data. This function is only for local tables and * should not be called for distributed tables. */ static bool LocalTableEmpty(Oid tableId) { Oid schemaId = get_rel_namespace(tableId); char *schemaName = get_namespace_name(schemaId); char *tableName = get_rel_name(tableId); char *tableQualifiedName = quote_qualified_identifier(schemaName, tableName); int spiConnectionResult = 0; int spiQueryResult = 0; StringInfo selectExistQueryString = makeStringInfo(); HeapTuple tuple = NULL; Datum hasDataDatum = 0; bool localTableEmpty = false; bool columnNull = false; bool readOnly = true; int rowId = 0; int attributeId = 1; AssertArg(!IsDistributedTable(tableId)); spiConnectionResult = SPI_connect(); if (spiConnectionResult != SPI_OK_CONNECT) { ereport(ERROR, (errmsg("could not connect to SPI manager"))); } appendStringInfo(selectExistQueryString, SELECT_EXIST_QUERY, tableQualifiedName); spiQueryResult = SPI_execute(selectExistQueryString->data, readOnly, 0); if (spiQueryResult != SPI_OK_SELECT) { ereport(ERROR, (errmsg("execution was not successful \"%s\"", selectExistQueryString->data))); } /* we expect that SELECT EXISTS query will return single value in a single row */ Assert(SPI_processed == 1); tuple = SPI_tuptable->vals[rowId]; hasDataDatum = SPI_getbinval(tuple, SPI_tuptable->tupdesc, attributeId, &columnNull); localTableEmpty = !DatumGetBool(hasDataDatum); SPI_finish(); return localTableEmpty; } /* * CreateTruncateTrigger creates a truncate trigger on table identified by relationId * and assigns citus_truncate_trigger() as handler. */ void CreateTruncateTrigger(Oid relationId) { CreateTrigStmt *trigger = NULL; StringInfo triggerName = makeStringInfo(); bool internal = true; appendStringInfo(triggerName, "truncate_trigger"); trigger = makeNode(CreateTrigStmt); trigger->trigname = triggerName->data; trigger->relation = NULL; trigger->funcname = SystemFuncName("citus_truncate_trigger"); trigger->args = NIL; trigger->row = false; trigger->timing = TRIGGER_TYPE_BEFORE; trigger->events = TRIGGER_TYPE_TRUNCATE; trigger->columns = NIL; trigger->whenClause = NULL; trigger->isconstraint = false; CreateTrigger(trigger, NULL, relationId, InvalidOid, InvalidOid, InvalidOid, internal); } /* * RegularTable function returns true if given table's relation kind is RELKIND_RELATION * (or RELKIND_PARTITIONED_TABLE for PG >= 10), otherwise it returns false. */ bool RegularTable(Oid relationId) { char relationKind = get_rel_relkind(relationId); #if (PG_VERSION_NUM >= 100000) if (relationKind == RELKIND_RELATION || relationKind == RELKIND_PARTITIONED_TABLE) #else if (relationKind == RELKIND_RELATION) #endif { return true; } return false; } /* * CopyLocalDataIntoShards copies data from the local table, which is hidden * after converting it to a distributed table, into the shards of the distributed * table. For partitioned tables, this functions returns without copying the data * because we call this function for both partitioned tables and its partitions. * Returning early saves us from copying data to workers twice. * * This function uses CitusCopyDestReceiver to invoke the distributed COPY logic. * We cannot use a regular COPY here since that cannot read from a table. Instead * we read from the table and pass each tuple to the CitusCopyDestReceiver which * opens a connection and starts a COPY for each shard placement that will have * data. * * We could call the planner and executor here and send the output to the * DestReceiver, but we are in a tricky spot here since Citus is already * intercepting queries on this table in the planner and executor hooks and we * want to read from the local table. To keep it simple, we perform a heap scan * directly on the table. * * Any writes on the table that are started during this operation will be handled * as distributed queries once the current transaction commits. SELECTs will * continue to read from the local table until the current transaction commits, * after which new SELECTs will be handled as distributed queries. * * After copying local data into the distributed table, the local data remains * in place and should be truncated at a later time. */ static void CopyLocalDataIntoShards(Oid distributedRelationId) { DestReceiver *copyDest = NULL; List *columnNameList = NIL; Relation distributedRelation = NULL; TupleDesc tupleDescriptor = NULL; Var *partitionColumn = NULL; int partitionColumnIndex = INVALID_PARTITION_COLUMN_INDEX; bool stopOnFailure = true; EState *estate = NULL; HeapScanDesc scan = NULL; HeapTuple tuple = NULL; ExprContext *econtext = NULL; MemoryContext oldContext = NULL; TupleTableSlot *slot = NULL; uint64 rowsCopied = 0; /* take an ExclusiveLock to block all operations except SELECT */ distributedRelation = heap_open(distributedRelationId, ExclusiveLock); /* * Skip copying from partitioned tables, we will copy the data from * partition to partition's shards. */ if (PartitionedTable(distributedRelationId)) { heap_close(distributedRelation, NoLock); return; } /* * All writes have finished, make sure that we can see them by using the * latest snapshot. We use GetLatestSnapshot instead of * GetTransactionSnapshot since the latter would not reveal all writes * in serializable or repeatable read mode. Note that subsequent reads * from the distributed table would reveal those writes, temporarily * violating the isolation level. However, this seems preferable over * dropping the writes entirely. */ PushActiveSnapshot(GetLatestSnapshot()); /* get the table columns */ tupleDescriptor = RelationGetDescr(distributedRelation); slot = MakeSingleTupleTableSlot(tupleDescriptor); columnNameList = TupleDescColumnNameList(tupleDescriptor); /* determine the partition column in the tuple descriptor */ partitionColumn = PartitionColumn(distributedRelationId, 0); if (partitionColumn != NULL) { partitionColumnIndex = partitionColumn->varattno - 1; } /* initialise per-tuple memory context */ estate = CreateExecutorState(); econtext = GetPerTupleExprContext(estate); econtext->ecxt_scantuple = slot; copyDest = (DestReceiver *) CreateCitusCopyDestReceiver(distributedRelationId, columnNameList, partitionColumnIndex, estate, stopOnFailure); /* initialise state for writing to shards, we'll open connections on demand */ copyDest->rStartup(copyDest, 0, tupleDescriptor); /* begin reading from local table */ scan = heap_beginscan(distributedRelation, GetActiveSnapshot(), 0, NULL); oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) { /* materialize tuple and send it to a shard */ ExecStoreTuple(tuple, slot, InvalidBuffer, false); copyDest->receiveSlot(slot, copyDest); /* clear tuple memory */ ResetPerTupleExprContext(estate); /* make sure we roll back on cancellation */ CHECK_FOR_INTERRUPTS(); if (rowsCopied == 0) { ereport(NOTICE, (errmsg("Copying data from local table..."))); } rowsCopied++; if (rowsCopied % 1000000 == 0) { ereport(DEBUG1, (errmsg("Copied %ld rows", rowsCopied))); } } if (rowsCopied % 1000000 != 0) { ereport(DEBUG1, (errmsg("Copied %ld rows", rowsCopied))); } MemoryContextSwitchTo(oldContext); /* finish reading from the local table */ heap_endscan(scan); /* finish writing into the shards */ copyDest->rShutdown(copyDest); /* free memory and close the relation */ ExecDropSingleTupleTableSlot(slot); FreeExecutorState(estate); heap_close(distributedRelation, NoLock); PopActiveSnapshot(); } /* * TupleDescColumnNameList returns a list of column names for the given tuple * descriptor as plain strings. */ static List * TupleDescColumnNameList(TupleDesc tupleDescriptor) { List *columnNameList = NIL; int columnIndex = 0; for (columnIndex = 0; columnIndex < tupleDescriptor->natts; columnIndex++) { Form_pg_attribute currentColumn = tupleDescriptor->attrs[columnIndex]; char *columnName = NameStr(currentColumn->attname); if (currentColumn->attisdropped) { continue; } columnNameList = lappend(columnNameList, columnName); } return columnNameList; } /* * RelationUsesIdentityColumns returns whether a given relation uses the SQL * GENERATED ... AS IDENTITY features supported as of PostgreSQL 10. */ static bool RelationUsesIdentityColumns(TupleDesc relationDesc) { #if (PG_VERSION_NUM >= 100000) int attributeIndex = 0; for (attributeIndex = 0; attributeIndex < relationDesc->natts; attributeIndex++) { Form_pg_attribute attributeForm = relationDesc->attrs[attributeIndex]; if (attributeForm->attidentity != '\0') { return true; } } #endif return false; } citus-7.0.3/src/backend/distributed/commands/drop_distributed_table.c000066400000000000000000000032671317107136600260470ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * drop_distributed_table.c * Routines related to dropping distributed relations from a trigger. * * Copyright (c) 2012-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "distributed/master_metadata_utility.h" #include "distributed/master_protocol.h" #include "distributed/metadata_sync.h" #include "distributed/worker_transaction.h" #include "utils/builtins.h" #include "utils/lsyscache.h" /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(master_drop_distributed_table_metadata); /* * master_drop_distributed_table_metadata removes the entry of the specified distributed * table from pg_dist_partition and drops the table from the workers if needed. */ Datum master_drop_distributed_table_metadata(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); text *schemaNameText = PG_GETARG_TEXT_P(1); text *tableNameText = PG_GETARG_TEXT_P(2); bool shouldSyncMetadata = false; char *schemaName = text_to_cstring(schemaNameText); char *tableName = text_to_cstring(tableNameText); EnsureCoordinator(); CheckCitusVersion(ERROR); CheckTableSchemaNameForDrop(relationId, &schemaName, &tableName); DeletePartitionRow(relationId); shouldSyncMetadata = ShouldSyncTableMetadata(relationId); if (shouldSyncMetadata) { char *deleteDistributionCommand = NULL; /* drop the distributed table metadata on the workers */ deleteDistributionCommand = DistributionDeleteCommand(schemaName, tableName); SendCommandToWorkers(WORKERS_WITH_METADATA, deleteDistributionCommand); } PG_RETURN_VOID(); } citus-7.0.3/src/backend/distributed/commands/multi_copy.c000066400000000000000000001707351317107136600235230ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_copy.c * This file contains implementation of COPY utility for distributed * tables. * * The CitusCopyFrom function should be called from the utility hook to process * COPY ... FROM commands on distributed tables. CitusCopyFrom parses the input * from stdin, a program, or a file, and decides to copy new rows to existing * shards or new shards based on the partition method of the distributed table. * If copy is run a worker node, CitusCopyFrom calls CopyFromWorkerNode which * parses the master node copy options and handles communication with the master * node. * * It opens a new connection for every shard placement and uses the PQputCopyData * function to copy the data. Because PQputCopyData transmits data, asynchronously, * the workers will ingest data at least partially in parallel. * * For hash-partitioned tables, if it fails to connect to a worker, the master * marks the placement for which it was trying to open a connection as inactive, * similar to the way DML statements are handled. If a failure occurs after * connecting, the transaction is rolled back on all the workers. Note that, * in the case of append-partitioned tables, if a fail occurs, immediately * metadata changes are rolled back on the master node, but shard placements * are left on the worker nodes. * * By default, COPY uses normal transactions on the workers. In the case of * hash or range-partitioned tables, this can cause a problem when some of the * transactions fail to commit while others have succeeded. To ensure no data * is lost, COPY can use two-phase commit, by increasing max_prepared_transactions * on the worker and setting citus.multi_shard_commit_protocol to '2pc'. The default * is '1pc'. This is not a problem for append-partitioned tables because new * shards are created and in the case of failure, metadata changes are rolled * back on the master node. * * Parsing options are processed and enforced on the node where copy command * is run, while constraints are enforced on the worker. In either case, * failure causes the whole COPY to roll back. * * Copyright (c) 2016, Citus Data, Inc. * * With contributions from Postgres Professional. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "libpq-fe.h" #include "miscadmin.h" #include /* for htons */ #include /* for htons */ #include #include "access/htup_details.h" #include "access/htup.h" #include "access/sdir.h" #include "catalog/namespace.h" #include "catalog/pg_type.h" #include "commands/copy.h" #include "commands/defrem.h" #include "distributed/master_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/multi_copy.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_shard_transaction.h" #include "distributed/placement_connection.h" #include "distributed/remote_commands.h" #include "distributed/resource_lock.h" #include "distributed/shard_pruning.h" #include "executor/executor.h" #include "nodes/makefuncs.h" #include "tsearch/ts_locale.h" #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/syscache.h" #include "utils/memutils.h" /* constant used in binary protocol */ static const char BinarySignature[11] = "PGCOPY\n\377\r\n\0"; /* use a global connection to the master node in order to skip passing it around */ static MultiConnection *masterConnection = NULL; /* Local functions forward declarations */ static void CopyFromWorkerNode(CopyStmt *copyStatement, char *completionTag); static void CopyToExistingShards(CopyStmt *copyStatement, char *completionTag); static void CopyToNewShards(CopyStmt *copyStatement, char *completionTag, Oid relationId); static char MasterPartitionMethod(RangeVar *relation); static void RemoveMasterOptions(CopyStmt *copyStatement); static void OpenCopyConnections(CopyStmt *copyStatement, ShardConnections *shardConnections, bool stopOnFailure, bool useBinaryCopyFormat); static bool CanUseBinaryCopyFormat(TupleDesc tupleDescription); static bool BinaryOutputFunctionDefined(Oid typeId); static List * MasterShardPlacementList(uint64 shardId); static List * RemoteFinalizedShardPlacementList(uint64 shardId); static void SendCopyBinaryHeaders(CopyOutState copyOutState, int64 shardId, List *connectionList); static void SendCopyBinaryFooters(CopyOutState copyOutState, int64 shardId, List *connectionList); static StringInfo ConstructCopyStatement(CopyStmt *copyStatement, int64 shardId, bool useBinaryCopyFormat); static void SendCopyDataToAll(StringInfo dataBuffer, int64 shardId, List *connectionList); static void SendCopyDataToPlacement(StringInfo dataBuffer, int64 shardId, MultiConnection *connection); static void EndRemoteCopy(int64 shardId, List *connectionList, bool stopOnFailure); static void ReportCopyError(MultiConnection *connection, PGresult *result); static uint32 AvailableColumnCount(TupleDesc tupleDescriptor); static int64 StartCopyToNewShard(ShardConnections *shardConnections, CopyStmt *copyStatement, bool useBinaryCopyFormat); static int64 MasterCreateEmptyShard(char *relationName); static int64 CreateEmptyShard(char *relationName); static int64 RemoteCreateEmptyShard(char *relationName); static void MasterUpdateShardStatistics(uint64 shardId); static void RemoteUpdateShardStatistics(uint64 shardId); /* Private functions copied and adapted from copy.c in PostgreSQL */ static void CopySendData(CopyOutState outputState, const void *databuf, int datasize); static void CopySendString(CopyOutState outputState, const char *str); static void CopySendChar(CopyOutState outputState, char c); static void CopySendInt32(CopyOutState outputState, int32 val); static void CopySendInt16(CopyOutState outputState, int16 val); static void CopyAttributeOutText(CopyOutState outputState, char *string); static inline void CopyFlushOutput(CopyOutState outputState, char *start, char *pointer); /* CitusCopyDestReceiver functions */ static void CitusCopyDestReceiverStartup(DestReceiver *copyDest, int operation, TupleDesc inputTupleDesc); static bool CitusCopyDestReceiverReceive(TupleTableSlot *slot, DestReceiver *copyDest); static void CitusCopyDestReceiverShutdown(DestReceiver *destReceiver); static void CitusCopyDestReceiverDestroy(DestReceiver *destReceiver); /* * CitusCopyFrom implements the COPY table_name FROM. It dispacthes the copy * statement to related subfunctions based on where the copy command is run * and the partition method of the distributed table. */ void CitusCopyFrom(CopyStmt *copyStatement, char *completionTag) { bool isCopyFromWorker = false; BeginOrContinueCoordinatedTransaction(); /* disallow COPY to/from file or program except for superusers */ if (copyStatement->filename != NULL && !superuser()) { if (copyStatement->is_program) { ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to COPY to or from an external program"), errhint("Anyone can COPY to stdout or from stdin. " "psql's \\copy command also works for anyone."))); } else { ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to COPY to or from a file"), errhint("Anyone can COPY to stdout or from stdin. " "psql's \\copy command also works for anyone."))); } } masterConnection = NULL; /* reset, might still be set after error */ isCopyFromWorker = IsCopyFromWorker(copyStatement); if (isCopyFromWorker) { CopyFromWorkerNode(copyStatement, completionTag); } else { Oid relationId = RangeVarGetRelid(copyStatement->relation, NoLock, false); char partitionMethod = PartitionMethod(relationId); if (partitionMethod == DISTRIBUTE_BY_HASH || partitionMethod == DISTRIBUTE_BY_RANGE || partitionMethod == DISTRIBUTE_BY_NONE) { CopyToExistingShards(copyStatement, completionTag); } else if (partitionMethod == DISTRIBUTE_BY_APPEND) { CopyToNewShards(copyStatement, completionTag, relationId); } else { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported partition method"))); } } XactModificationLevel = XACT_MODIFICATION_DATA; } /* * IsCopyFromWorker checks if the given copy statement has the master host option. */ bool IsCopyFromWorker(CopyStmt *copyStatement) { ListCell *optionCell = NULL; foreach(optionCell, copyStatement->options) { DefElem *defel = (DefElem *) lfirst(optionCell); if (strncmp(defel->defname, "master_host", NAMEDATALEN) == 0) { return true; } } return false; } /* * CopyFromWorkerNode implements the COPY table_name FROM ... from worker nodes * for append-partitioned tables. */ static void CopyFromWorkerNode(CopyStmt *copyStatement, char *completionTag) { NodeAddress *masterNodeAddress = MasterNodeAddress(copyStatement); char *nodeName = masterNodeAddress->nodeName; int32 nodePort = masterNodeAddress->nodePort; Oid relationId = InvalidOid; char partitionMethod = 0; char *schemaName = NULL; uint32 connectionFlags = FOR_DML; masterConnection = GetNodeConnection(connectionFlags, nodeName, nodePort); MarkRemoteTransactionCritical(masterConnection); ClaimConnectionExclusively(masterConnection); RemoteTransactionBeginIfNecessary(masterConnection); /* strip schema name for local reference */ schemaName = copyStatement->relation->schemaname; copyStatement->relation->schemaname = NULL; relationId = RangeVarGetRelid(copyStatement->relation, NoLock, false); /* put schema name back */ copyStatement->relation->schemaname = schemaName; partitionMethod = MasterPartitionMethod(copyStatement->relation); if (partitionMethod != DISTRIBUTE_BY_APPEND) { ereport(ERROR, (errmsg("copy from worker nodes is only supported " "for append-partitioned tables"))); } /* * Remove master node options from the copy statement because they are not * recognized by PostgreSQL machinery. */ RemoveMasterOptions(copyStatement); CopyToNewShards(copyStatement, completionTag, relationId); UnclaimConnection(masterConnection); masterConnection = NULL; } /* * CopyToExistingShards implements the COPY table_name FROM ... for hash or * range-partitioned tables where there are already shards into which to copy * rows. */ static void CopyToExistingShards(CopyStmt *copyStatement, char *completionTag) { Oid tableId = RangeVarGetRelid(copyStatement->relation, NoLock, false); CitusCopyDestReceiver *copyDest = NULL; DestReceiver *dest = NULL; Relation distributedRelation = NULL; Relation copiedDistributedRelation = NULL; Form_pg_class copiedDistributedRelationTuple = NULL; TupleDesc tupleDescriptor = NULL; uint32 columnCount = 0; Datum *columnValues = NULL; bool *columnNulls = NULL; int columnIndex = 0; List *columnNameList = NIL; Var *partitionColumn = NULL; int partitionColumnIndex = INVALID_PARTITION_COLUMN_INDEX; TupleTableSlot *tupleTableSlot = NULL; EState *executorState = NULL; MemoryContext executorTupleContext = NULL; ExprContext *executorExpressionContext = NULL; char partitionMethod = 0; bool stopOnFailure = false; CopyState copyState = NULL; uint64 processedRowCount = 0; ErrorContextCallback errorCallback; /* allocate column values and nulls arrays */ distributedRelation = heap_open(tableId, RowExclusiveLock); tupleDescriptor = RelationGetDescr(distributedRelation); columnCount = tupleDescriptor->natts; columnValues = palloc0(columnCount * sizeof(Datum)); columnNulls = palloc0(columnCount * sizeof(bool)); /* set up a virtual tuple table slot */ tupleTableSlot = MakeSingleTupleTableSlot(tupleDescriptor); tupleTableSlot->tts_nvalid = columnCount; tupleTableSlot->tts_values = columnValues; tupleTableSlot->tts_isnull = columnNulls; /* determine the partition column index in the tuple descriptor */ partitionColumn = PartitionColumn(tableId, 0); if (partitionColumn != NULL) { partitionColumnIndex = partitionColumn->varattno - 1; } /* build the list of column names for remote COPY statements */ for (columnIndex = 0; columnIndex < columnCount; columnIndex++) { Form_pg_attribute currentColumn = tupleDescriptor->attrs[columnIndex]; char *columnName = NameStr(currentColumn->attname); if (currentColumn->attisdropped) { continue; } columnNameList = lappend(columnNameList, columnName); } executorState = CreateExecutorState(); executorTupleContext = GetPerTupleMemoryContext(executorState); executorExpressionContext = GetPerTupleExprContext(executorState); partitionMethod = PartitionMethod(tableId); if (partitionMethod == DISTRIBUTE_BY_NONE) { stopOnFailure = true; } /* set up the destination for the COPY */ copyDest = CreateCitusCopyDestReceiver(tableId, columnNameList, partitionColumnIndex, executorState, stopOnFailure); dest = (DestReceiver *) copyDest; dest->rStartup(dest, 0, tupleDescriptor); /* * BeginCopyFrom opens all partitions of given partitioned table with relation_open * and it expects its caller to close those relations. We do not have direct access * to opened relations, thus we are changing relkind of partitioned tables so that * Postgres will treat those tables as regular relations and will not open its * partitions. * * We will make this change on copied version of distributed relation to not change * anything in relcache. */ if (PartitionedTable(tableId)) { copiedDistributedRelation = (Relation) palloc0(sizeof(RelationData)); copiedDistributedRelationTuple = (Form_pg_class) palloc(CLASS_TUPLE_SIZE); /* * There is no need to deep copy everything. We will just deep copy of the fields * we will change. */ memcpy(copiedDistributedRelation, distributedRelation, sizeof(RelationData)); memcpy(copiedDistributedRelationTuple, distributedRelation->rd_rel, CLASS_TUPLE_SIZE); copiedDistributedRelationTuple->relkind = RELKIND_RELATION; copiedDistributedRelation->rd_rel = copiedDistributedRelationTuple; } else { /* * If we are not dealing with partitioned table, copiedDistributedRelation is same * as distributedRelation. */ copiedDistributedRelation = distributedRelation; } /* initialize copy state to read from COPY data source */ #if (PG_VERSION_NUM >= 100000) copyState = BeginCopyFrom(NULL, copiedDistributedRelation, copyStatement->filename, copyStatement->is_program, NULL, copyStatement->attlist, copyStatement->options); #else copyState = BeginCopyFrom(copiedDistributedRelation, copyStatement->filename, copyStatement->is_program, copyStatement->attlist, copyStatement->options); #endif /* set up callback to identify error line number */ errorCallback.callback = CopyFromErrorCallback; errorCallback.arg = (void *) copyState; errorCallback.previous = error_context_stack; error_context_stack = &errorCallback; while (true) { bool nextRowFound = false; MemoryContext oldContext = NULL; ResetPerTupleExprContext(executorState); oldContext = MemoryContextSwitchTo(executorTupleContext); /* parse a row from the input */ nextRowFound = NextCopyFrom(copyState, executorExpressionContext, columnValues, columnNulls, NULL); if (!nextRowFound) { MemoryContextSwitchTo(oldContext); break; } CHECK_FOR_INTERRUPTS(); MemoryContextSwitchTo(oldContext); dest->receiveSlot(tupleTableSlot, dest); processedRowCount += 1; } EndCopyFrom(copyState); /* all lines have been copied, stop showing line number in errors */ error_context_stack = errorCallback.previous; /* finish the COPY commands */ dest->rShutdown(dest); ExecDropSingleTupleTableSlot(tupleTableSlot); FreeExecutorState(executorState); heap_close(distributedRelation, NoLock); /* mark failed placements as inactive */ MarkFailedShardPlacements(); CHECK_FOR_INTERRUPTS(); if (completionTag != NULL) { snprintf(completionTag, COMPLETION_TAG_BUFSIZE, "COPY " UINT64_FORMAT, processedRowCount); } } /* * CopyToNewShards implements the COPY table_name FROM ... for append-partitioned * tables where we create new shards into which to copy rows. */ static void CopyToNewShards(CopyStmt *copyStatement, char *completionTag, Oid relationId) { FmgrInfo *columnOutputFunctions = NULL; /* allocate column values and nulls arrays */ Relation distributedRelation = heap_open(relationId, RowExclusiveLock); TupleDesc tupleDescriptor = RelationGetDescr(distributedRelation); uint32 columnCount = tupleDescriptor->natts; Datum *columnValues = palloc0(columnCount * sizeof(Datum)); bool *columnNulls = palloc0(columnCount * sizeof(bool)); EState *executorState = CreateExecutorState(); MemoryContext executorTupleContext = GetPerTupleMemoryContext(executorState); ExprContext *executorExpressionContext = GetPerTupleExprContext(executorState); const char *delimiterCharacter = "\t"; const char *nullPrintCharacter = "\\N"; ErrorContextCallback errorCallback; int64 currentShardId = INVALID_SHARD_ID; uint64 shardMaxSizeInBytes = (int64) ShardMaxSize * 1024L; uint64 copiedDataSizeInBytes = 0; uint64 processedRowCount = 0; ShardConnections *shardConnections = (ShardConnections *) palloc0(sizeof(ShardConnections)); /* initialize copy state to read from COPY data source */ #if (PG_VERSION_NUM >= 100000) CopyState copyState = BeginCopyFrom(NULL, distributedRelation, copyStatement->filename, copyStatement->is_program, NULL, copyStatement->attlist, copyStatement->options); #else CopyState copyState = BeginCopyFrom(distributedRelation, copyStatement->filename, copyStatement->is_program, copyStatement->attlist, copyStatement->options); #endif CopyOutState copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData)); copyOutState->delim = (char *) delimiterCharacter; copyOutState->null_print = (char *) nullPrintCharacter; copyOutState->null_print_client = (char *) nullPrintCharacter; copyOutState->binary = CanUseBinaryCopyFormat(tupleDescriptor); copyOutState->fe_msgbuf = makeStringInfo(); copyOutState->rowcontext = executorTupleContext; columnOutputFunctions = ColumnOutputFunctions(tupleDescriptor, copyOutState->binary); /* set up callback to identify error line number */ errorCallback.callback = CopyFromErrorCallback; errorCallback.arg = (void *) copyState; errorCallback.previous = error_context_stack; /* * From here on we use copyStatement as the template for the command * that we send to workers. This command does not have an attribute * list since NextCopyFrom will generate a value for all columns. */ copyStatement->attlist = NIL; while (true) { bool nextRowFound = false; MemoryContext oldContext = NULL; uint64 messageBufferSize = 0; ResetPerTupleExprContext(executorState); /* switch to tuple memory context and start showing line number in errors */ error_context_stack = &errorCallback; oldContext = MemoryContextSwitchTo(executorTupleContext); /* parse a row from the input */ nextRowFound = NextCopyFrom(copyState, executorExpressionContext, columnValues, columnNulls, NULL); if (!nextRowFound) { /* switch to regular memory context and stop showing line number in errors */ MemoryContextSwitchTo(oldContext); error_context_stack = errorCallback.previous; break; } CHECK_FOR_INTERRUPTS(); /* switch to regular memory context and stop showing line number in errors */ MemoryContextSwitchTo(oldContext); error_context_stack = errorCallback.previous; /* * If copied data size is zero, this means either this is the first * line in the copy or we just filled the previous shard up to its * capacity. Either way, we need to create a new shard and * start copying new rows into it. */ if (copiedDataSizeInBytes == 0) { /* create shard and open connections to shard placements */ currentShardId = StartCopyToNewShard(shardConnections, copyStatement, copyOutState->binary); /* send copy binary headers to shard placements */ if (copyOutState->binary) { SendCopyBinaryHeaders(copyOutState, currentShardId, shardConnections->connectionList); } } /* replicate row to shard placements */ resetStringInfo(copyOutState->fe_msgbuf); AppendCopyRowData(columnValues, columnNulls, tupleDescriptor, copyOutState, columnOutputFunctions); SendCopyDataToAll(copyOutState->fe_msgbuf, currentShardId, shardConnections->connectionList); messageBufferSize = copyOutState->fe_msgbuf->len; copiedDataSizeInBytes = copiedDataSizeInBytes + messageBufferSize; /* * If we filled up this shard to its capacity, send copy binary footers * to shard placements, and update shard statistics. */ if (copiedDataSizeInBytes > shardMaxSizeInBytes) { Assert(currentShardId != INVALID_SHARD_ID); if (copyOutState->binary) { SendCopyBinaryFooters(copyOutState, currentShardId, shardConnections->connectionList); } EndRemoteCopy(currentShardId, shardConnections->connectionList, true); MasterUpdateShardStatistics(shardConnections->shardId); copiedDataSizeInBytes = 0; currentShardId = INVALID_SHARD_ID; } processedRowCount += 1; } /* * For the last shard, send copy binary footers to shard placements, * and update shard statistics. If no row is send, there is no shard * to finalize the copy command. */ if (copiedDataSizeInBytes > 0) { Assert(currentShardId != INVALID_SHARD_ID); if (copyOutState->binary) { SendCopyBinaryFooters(copyOutState, currentShardId, shardConnections->connectionList); } EndRemoteCopy(currentShardId, shardConnections->connectionList, true); MasterUpdateShardStatistics(shardConnections->shardId); } EndCopyFrom(copyState); heap_close(distributedRelation, NoLock); /* check for cancellation one last time before returning */ CHECK_FOR_INTERRUPTS(); if (completionTag != NULL) { snprintf(completionTag, COMPLETION_TAG_BUFSIZE, "COPY " UINT64_FORMAT, processedRowCount); } } /* * MasterNodeAddress gets the master node address from copy options and returns * it. Note that if the master_port is not provided, we use 5432 as the default * port. */ NodeAddress * MasterNodeAddress(CopyStmt *copyStatement) { NodeAddress *masterNodeAddress = (NodeAddress *) palloc0(sizeof(NodeAddress)); char *nodeName = NULL; /* set default port to 5432 */ int32 nodePort = 5432; ListCell *optionCell = NULL; foreach(optionCell, copyStatement->options) { DefElem *defel = (DefElem *) lfirst(optionCell); if (strncmp(defel->defname, "master_host", NAMEDATALEN) == 0) { nodeName = defGetString(defel); } else if (strncmp(defel->defname, "master_port", NAMEDATALEN) == 0) { nodePort = defGetInt32(defel); } } masterNodeAddress->nodeName = nodeName; masterNodeAddress->nodePort = nodePort; return masterNodeAddress; } /* * MasterPartitionMethod gets the partition method of the given relation from * the master node and returns it. */ static char MasterPartitionMethod(RangeVar *relation) { char partitionMethod = '\0'; PGresult *queryResult = NULL; bool raiseInterrupts = true; char *relationName = relation->relname; char *schemaName = relation->schemaname; char *qualifiedName = quote_qualified_identifier(schemaName, relationName); StringInfo partitionMethodCommand = makeStringInfo(); appendStringInfo(partitionMethodCommand, PARTITION_METHOD_QUERY, qualifiedName); if (!SendRemoteCommand(masterConnection, partitionMethodCommand->data)) { ReportConnectionError(masterConnection, ERROR); } queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts); if (PQresultStatus(queryResult) == PGRES_TUPLES_OK) { char *partitionMethodString = PQgetvalue((PGresult *) queryResult, 0, 0); if (partitionMethodString == NULL || (*partitionMethodString) == '\0') { ereport(ERROR, (errmsg("could not find a partition method for the " "table %s", relationName))); } partitionMethod = partitionMethodString[0]; } else { ReportResultError(masterConnection, queryResult, WARNING); ereport(ERROR, (errmsg("could not get the partition method of the " "distributed table"))); } PQclear(queryResult); queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts); Assert(!queryResult); return partitionMethod; } /* * RemoveMasterOptions removes master node related copy options from the option * list of the copy statement. */ static void RemoveMasterOptions(CopyStmt *copyStatement) { List *newOptionList = NIL; ListCell *optionCell = NULL; /* walk over the list of all options */ foreach(optionCell, copyStatement->options) { DefElem *option = (DefElem *) lfirst(optionCell); /* skip master related options */ if ((strncmp(option->defname, "master_host", NAMEDATALEN) == 0) || (strncmp(option->defname, "master_port", NAMEDATALEN) == 0)) { continue; } newOptionList = lappend(newOptionList, option); } copyStatement->options = newOptionList; } /* * OpenCopyConnections opens a connection for each placement of a shard and * starts a COPY transaction if necessary. If a connection cannot be opened, * then the shard placement is marked as inactive and the COPY continues with the remaining * shard placements. */ static void OpenCopyConnections(CopyStmt *copyStatement, ShardConnections *shardConnections, bool stopOnFailure, bool useBinaryCopyFormat) { List *finalizedPlacementList = NIL; int failedPlacementCount = 0; ListCell *placementCell = NULL; List *connectionList = NULL; int64 shardId = shardConnections->shardId; bool raiseInterrupts = true; MemoryContext localContext = AllocSetContextCreate(CurrentMemoryContext, "OpenCopyConnections", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); /* release finalized placement list at the end of this function */ MemoryContext oldContext = MemoryContextSwitchTo(localContext); finalizedPlacementList = MasterShardPlacementList(shardId); MemoryContextSwitchTo(oldContext); foreach(placementCell, finalizedPlacementList) { ShardPlacement *placement = (ShardPlacement *) lfirst(placementCell); char *nodeUser = CurrentUserName(); MultiConnection *connection = NULL; uint32 connectionFlags = FOR_DML | CONNECTION_PER_PLACEMENT; StringInfo copyCommand = NULL; PGresult *result = NULL; connection = GetPlacementConnection(connectionFlags, placement, nodeUser); if (PQstatus(connection->pgConn) != CONNECTION_OK) { if (stopOnFailure) { ReportConnectionError(connection, ERROR); } else { ReportConnectionError(connection, WARNING); MarkRemoteTransactionFailed(connection, true); failedPlacementCount++; continue; } } /* * Errors are supposed to cause immediate aborts (i.e. we don't * want to/can't invalidate placements), mark the connection as * critical so later errors cause failures. */ MarkRemoteTransactionCritical(connection); ClaimConnectionExclusively(connection); RemoteTransactionBeginIfNecessary(connection); copyCommand = ConstructCopyStatement(copyStatement, shardConnections->shardId, useBinaryCopyFormat); if (!SendRemoteCommand(connection, copyCommand->data)) { ReportConnectionError(connection, ERROR); } result = GetRemoteCommandResult(connection, raiseInterrupts); if (PQresultStatus(result) != PGRES_COPY_IN) { ReportResultError(connection, result, ERROR); } PQclear(result); connectionList = lappend(connectionList, connection); } /* if all placements failed, error out */ if (failedPlacementCount == list_length(finalizedPlacementList)) { ereport(ERROR, (errmsg("could not connect to any active placements"))); } /* * If stopOnFailure is true, we just error out and code execution should * never reach to this point. This is the case for reference tables and * copy from worker nodes. */ Assert(!stopOnFailure || failedPlacementCount == 0); shardConnections->connectionList = connectionList; MemoryContextReset(localContext); } /* * CanUseBinaryCopyFormat iterates over columns of the relation and looks for a * column whose type is array of user-defined type or composite type. If it finds * such column, that means we cannot use binary format for COPY, because binary * format sends Oid of the types, which are generally not same in master and * worker nodes for user-defined types. If the function can not detect a binary * output function for any of the column, it returns false. */ static bool CanUseBinaryCopyFormat(TupleDesc tupleDescription) { bool useBinaryCopyFormat = true; int totalColumnCount = tupleDescription->natts; int columnIndex = 0; for (columnIndex = 0; columnIndex < totalColumnCount; columnIndex++) { Form_pg_attribute currentColumn = tupleDescription->attrs[columnIndex]; Oid typeId = InvalidOid; char typeCategory = '\0'; bool typePreferred = false; bool binaryOutputFunctionDefined = false; if (currentColumn->attisdropped) { continue; } typeId = currentColumn->atttypid; /* built-in types may also don't have binary output function */ binaryOutputFunctionDefined = BinaryOutputFunctionDefined(typeId); if (!binaryOutputFunctionDefined) { useBinaryCopyFormat = false; break; } if (typeId >= FirstNormalObjectId) { get_type_category_preferred(typeId, &typeCategory, &typePreferred); if (typeCategory == TYPCATEGORY_ARRAY || typeCategory == TYPCATEGORY_COMPOSITE) { useBinaryCopyFormat = false; break; } } } return useBinaryCopyFormat; } /* * BinaryOutputFunctionDefined checks whether binary output function is defined * for the given type. */ static bool BinaryOutputFunctionDefined(Oid typeId) { Oid typeFunctionId = InvalidOid; Oid typeIoParam = InvalidOid; int16 typeLength = 0; bool typeByVal = false; char typeAlign = 0; char typeDelim = 0; get_type_io_data(typeId, IOFunc_send, &typeLength, &typeByVal, &typeAlign, &typeDelim, &typeIoParam, &typeFunctionId); if (OidIsValid(typeFunctionId)) { return true; } return false; } /* * MasterShardPlacementList dispatches the finalized shard placements call * between local or remote master node according to the master connection state. */ static List * MasterShardPlacementList(uint64 shardId) { List *finalizedPlacementList = NIL; if (masterConnection == NULL) { finalizedPlacementList = FinalizedShardPlacementList(shardId); } else { finalizedPlacementList = RemoteFinalizedShardPlacementList(shardId); } return finalizedPlacementList; } /* * RemoteFinalizedShardPlacementList gets the finalized shard placement list * for the given shard id from the remote master node. */ static List * RemoteFinalizedShardPlacementList(uint64 shardId) { List *finalizedPlacementList = NIL; PGresult *queryResult = NULL; bool raiseInterrupts = true; StringInfo shardPlacementsCommand = makeStringInfo(); appendStringInfo(shardPlacementsCommand, FINALIZED_SHARD_PLACEMENTS_QUERY, shardId); if (!SendRemoteCommand(masterConnection, shardPlacementsCommand->data)) { ReportConnectionError(masterConnection, ERROR); } queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts); if (PQresultStatus(queryResult) == PGRES_TUPLES_OK) { int rowCount = PQntuples(queryResult); int rowIndex = 0; for (rowIndex = 0; rowIndex < rowCount; rowIndex++) { char *placementIdString = PQgetvalue(queryResult, rowIndex, 0); char *nodeName = pstrdup(PQgetvalue(queryResult, rowIndex, 1)); char *nodePortString = pstrdup(PQgetvalue(queryResult, rowIndex, 2)); uint32 nodePort = atoi(nodePortString); uint64 placementId = atoll(placementIdString); ShardPlacement *shardPlacement = (ShardPlacement *) palloc0(sizeof(ShardPlacement)); shardPlacement->placementId = placementId; shardPlacement->nodeName = nodeName; shardPlacement->nodePort = nodePort; finalizedPlacementList = lappend(finalizedPlacementList, shardPlacement); } } else { ereport(ERROR, (errmsg("could not get shard placements from the master node"))); } PQclear(queryResult); queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts); Assert(!queryResult); return finalizedPlacementList; } /* Send copy binary headers to given connections */ static void SendCopyBinaryHeaders(CopyOutState copyOutState, int64 shardId, List *connectionList) { resetStringInfo(copyOutState->fe_msgbuf); AppendCopyBinaryHeaders(copyOutState); SendCopyDataToAll(copyOutState->fe_msgbuf, shardId, connectionList); } /* Send copy binary footers to given connections */ static void SendCopyBinaryFooters(CopyOutState copyOutState, int64 shardId, List *connectionList) { resetStringInfo(copyOutState->fe_msgbuf); AppendCopyBinaryFooters(copyOutState); SendCopyDataToAll(copyOutState->fe_msgbuf, shardId, connectionList); } /* * ConstructCopyStatement constructs the text of a COPY statement for a particular * shard. */ static StringInfo ConstructCopyStatement(CopyStmt *copyStatement, int64 shardId, bool useBinaryCopyFormat) { StringInfo command = makeStringInfo(); char *schemaName = copyStatement->relation->schemaname; char *relationName = copyStatement->relation->relname; char *shardName = pstrdup(relationName); char *shardQualifiedName = NULL; AppendShardIdToName(&shardName, shardId); shardQualifiedName = quote_qualified_identifier(schemaName, shardName); appendStringInfo(command, "COPY %s ", shardQualifiedName); if (copyStatement->attlist != NIL) { ListCell *columnNameCell = NULL; bool appendedFirstName = false; foreach(columnNameCell, copyStatement->attlist) { char *columnName = (char *) lfirst(columnNameCell); if (!appendedFirstName) { appendStringInfo(command, "(%s", columnName); appendedFirstName = true; } else { appendStringInfo(command, ", %s", columnName); } } appendStringInfoString(command, ") "); } appendStringInfo(command, "FROM STDIN WITH "); if (useBinaryCopyFormat) { appendStringInfoString(command, "(FORMAT BINARY)"); } else { appendStringInfoString(command, "(FORMAT TEXT)"); } return command; } /* * SendCopyDataToAll sends copy data to all connections in a list. */ static void SendCopyDataToAll(StringInfo dataBuffer, int64 shardId, List *connectionList) { ListCell *connectionCell = NULL; foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); SendCopyDataToPlacement(dataBuffer, shardId, connection); } } /* * SendCopyDataToPlacement sends serialized COPY data to a specific shard placement * over the given connection. */ static void SendCopyDataToPlacement(StringInfo dataBuffer, int64 shardId, MultiConnection *connection) { if (!PutRemoteCopyData(connection, dataBuffer->data, dataBuffer->len)) { ereport(ERROR, (errcode(ERRCODE_IO_ERROR), errmsg("failed to COPY to shard %ld on %s:%d", shardId, connection->hostname, connection->port), errdetail("failed to send %d bytes %s", dataBuffer->len, dataBuffer->data))); } } /* * EndRemoteCopy ends the COPY input on all connections, and unclaims connections. * If stopOnFailure is true, then EndRemoteCopy reports an error on failure, * otherwise it reports a warning or continues. */ static void EndRemoteCopy(int64 shardId, List *connectionList, bool stopOnFailure) { ListCell *connectionCell = NULL; foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); PGresult *result = NULL; bool raiseInterrupts = true; /* end the COPY input */ if (!PutRemoteCopyEnd(connection, NULL)) { if (stopOnFailure) { ereport(ERROR, (errcode(ERRCODE_IO_ERROR), errmsg("failed to COPY to shard %ld on %s:%d", shardId, connection->hostname, connection->port))); } continue; } /* check whether there were any COPY errors */ result = GetRemoteCommandResult(connection, raiseInterrupts); if (PQresultStatus(result) != PGRES_COMMAND_OK && stopOnFailure) { ReportCopyError(connection, result); } PQclear(result); ForgetResults(connection); UnclaimConnection(connection); } } /* * ReportCopyError tries to report a useful error message for the user from * the remote COPY error messages. */ static void ReportCopyError(MultiConnection *connection, PGresult *result) { char *remoteMessage = PQresultErrorField(result, PG_DIAG_MESSAGE_PRIMARY); if (remoteMessage != NULL) { /* probably a constraint violation, show remote message and detail */ char *remoteDetail = PQresultErrorField(result, PG_DIAG_MESSAGE_DETAIL); ereport(ERROR, (errmsg("%s", remoteMessage), errdetail("%s", remoteDetail))); } else { /* trim the trailing characters */ remoteMessage = pchomp(PQerrorMessage(connection->pgConn)); ereport(ERROR, (errcode(ERRCODE_IO_ERROR), errmsg("failed to complete COPY on %s:%d", connection->hostname, connection->port), errdetail("%s", remoteMessage))); } } /* * ColumnOutputFunctions walks over a table's columns, and finds each column's * type information. The function then resolves each type's output function, * and stores and returns these output functions in an array. */ FmgrInfo * ColumnOutputFunctions(TupleDesc rowDescriptor, bool binaryFormat) { uint32 columnCount = (uint32) rowDescriptor->natts; FmgrInfo *columnOutputFunctions = palloc0(columnCount * sizeof(FmgrInfo)); uint32 columnIndex = 0; for (columnIndex = 0; columnIndex < columnCount; columnIndex++) { FmgrInfo *currentOutputFunction = &columnOutputFunctions[columnIndex]; Form_pg_attribute currentColumn = rowDescriptor->attrs[columnIndex]; Oid columnTypeId = currentColumn->atttypid; Oid outputFunctionId = InvalidOid; bool typeVariableLength = false; if (currentColumn->attisdropped) { /* dropped column, leave the output function NULL */ continue; } else if (binaryFormat) { getTypeBinaryOutputInfo(columnTypeId, &outputFunctionId, &typeVariableLength); } else { getTypeOutputInfo(columnTypeId, &outputFunctionId, &typeVariableLength); } fmgr_info(outputFunctionId, currentOutputFunction); } return columnOutputFunctions; } /* * AppendCopyRowData serializes one row using the column output functions, * and appends the data to the row output state object's message buffer. * This function is modeled after the CopyOneRowTo() function in * commands/copy.c, but only implements a subset of that functionality. * Note that the caller of this function should reset row memory context * to not bloat memory usage. */ void AppendCopyRowData(Datum *valueArray, bool *isNullArray, TupleDesc rowDescriptor, CopyOutState rowOutputState, FmgrInfo *columnOutputFunctions) { uint32 totalColumnCount = (uint32) rowDescriptor->natts; uint32 availableColumnCount = AvailableColumnCount(rowDescriptor); uint32 appendedColumnCount = 0; uint32 columnIndex = 0; MemoryContext oldContext = MemoryContextSwitchTo(rowOutputState->rowcontext); if (rowOutputState->binary) { CopySendInt16(rowOutputState, availableColumnCount); } for (columnIndex = 0; columnIndex < totalColumnCount; columnIndex++) { Form_pg_attribute currentColumn = rowDescriptor->attrs[columnIndex]; Datum value = valueArray[columnIndex]; bool isNull = isNullArray[columnIndex]; bool lastColumn = false; if (currentColumn->attisdropped) { continue; } else if (rowOutputState->binary) { if (!isNull) { FmgrInfo *outputFunctionPointer = &columnOutputFunctions[columnIndex]; bytea *outputBytes = SendFunctionCall(outputFunctionPointer, value); CopySendInt32(rowOutputState, VARSIZE(outputBytes) - VARHDRSZ); CopySendData(rowOutputState, VARDATA(outputBytes), VARSIZE(outputBytes) - VARHDRSZ); } else { CopySendInt32(rowOutputState, -1); } } else { if (!isNull) { FmgrInfo *outputFunctionPointer = &columnOutputFunctions[columnIndex]; char *columnText = OutputFunctionCall(outputFunctionPointer, value); CopyAttributeOutText(rowOutputState, columnText); } else { CopySendString(rowOutputState, rowOutputState->null_print_client); } lastColumn = ((appendedColumnCount + 1) == availableColumnCount); if (!lastColumn) { CopySendChar(rowOutputState, rowOutputState->delim[0]); } } appendedColumnCount++; } if (!rowOutputState->binary) { /* append default line termination string depending on the platform */ #ifndef WIN32 CopySendChar(rowOutputState, '\n'); #else CopySendString(rowOutputState, "\r\n"); #endif } MemoryContextSwitchTo(oldContext); } /* * AvailableColumnCount returns the number of columns in a tuple descriptor, excluding * columns that were dropped. */ static uint32 AvailableColumnCount(TupleDesc tupleDescriptor) { uint32 columnCount = 0; uint32 columnIndex = 0; for (columnIndex = 0; columnIndex < tupleDescriptor->natts; columnIndex++) { Form_pg_attribute currentColumn = tupleDescriptor->attrs[columnIndex]; if (!currentColumn->attisdropped) { columnCount++; } } return columnCount; } /* * AppendCopyBinaryHeaders appends binary headers to the copy buffer in * headerOutputState. */ void AppendCopyBinaryHeaders(CopyOutState headerOutputState) { const int32 zero = 0; MemoryContext oldContext = MemoryContextSwitchTo(headerOutputState->rowcontext); /* Signature */ CopySendData(headerOutputState, BinarySignature, 11); /* Flags field (no OIDs) */ CopySendInt32(headerOutputState, zero); /* No header extension */ CopySendInt32(headerOutputState, zero); MemoryContextSwitchTo(oldContext); } /* * AppendCopyBinaryFooters appends binary footers to the copy buffer in * footerOutputState. */ void AppendCopyBinaryFooters(CopyOutState footerOutputState) { int16 negative = -1; MemoryContext oldContext = MemoryContextSwitchTo(footerOutputState->rowcontext); CopySendInt16(footerOutputState, negative); MemoryContextSwitchTo(oldContext); } /* * StartCopyToNewShard creates a new shard and related shard placements and * opens connections to shard placements. */ static int64 StartCopyToNewShard(ShardConnections *shardConnections, CopyStmt *copyStatement, bool useBinaryCopyFormat) { char *relationName = copyStatement->relation->relname; char *schemaName = copyStatement->relation->schemaname; char *qualifiedName = quote_qualified_identifier(schemaName, relationName); int64 shardId = MasterCreateEmptyShard(qualifiedName); bool stopOnFailure = true; shardConnections->shardId = shardId; shardConnections->connectionList = NIL; /* connect to shards placements and start transactions */ OpenCopyConnections(copyStatement, shardConnections, stopOnFailure, useBinaryCopyFormat); return shardId; } /* * MasterCreateEmptyShard dispatches the create empty shard call between local or * remote master node according to the master connection state. */ static int64 MasterCreateEmptyShard(char *relationName) { int64 shardId = 0; if (masterConnection == NULL) { shardId = CreateEmptyShard(relationName); } else { shardId = RemoteCreateEmptyShard(relationName); } return shardId; } /* * CreateEmptyShard creates a new shard and related shard placements from the * local master node. */ static int64 CreateEmptyShard(char *relationName) { int64 shardId = 0; text *relationNameText = cstring_to_text(relationName); Datum relationNameDatum = PointerGetDatum(relationNameText); Datum shardIdDatum = DirectFunctionCall1(master_create_empty_shard, relationNameDatum); shardId = DatumGetInt64(shardIdDatum); return shardId; } /* * RemoteCreateEmptyShard creates a new shard and related shard placements from * the remote master node. */ static int64 RemoteCreateEmptyShard(char *relationName) { int64 shardId = 0; PGresult *queryResult = NULL; bool raiseInterrupts = true; StringInfo createEmptyShardCommand = makeStringInfo(); appendStringInfo(createEmptyShardCommand, CREATE_EMPTY_SHARD_QUERY, relationName); if (!SendRemoteCommand(masterConnection, createEmptyShardCommand->data)) { ReportConnectionError(masterConnection, ERROR); } queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts); if (PQresultStatus(queryResult) == PGRES_TUPLES_OK) { char *shardIdString = PQgetvalue((PGresult *) queryResult, 0, 0); char *shardIdStringEnd = NULL; shardId = strtoul(shardIdString, &shardIdStringEnd, 0); } else { ReportResultError(masterConnection, queryResult, WARNING); ereport(ERROR, (errmsg("could not create a new empty shard on the remote node"))); } PQclear(queryResult); queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts); Assert(!queryResult); return shardId; } /* * MasterUpdateShardStatistics dispatches the update shard statistics call * between local or remote master node according to the master connection state. */ static void MasterUpdateShardStatistics(uint64 shardId) { if (masterConnection == NULL) { UpdateShardStatistics(shardId); } else { RemoteUpdateShardStatistics(shardId); } } /* * RemoteUpdateShardStatistics updates shard statistics on the remote master node. */ static void RemoteUpdateShardStatistics(uint64 shardId) { PGresult *queryResult = NULL; bool raiseInterrupts = true; StringInfo updateShardStatisticsCommand = makeStringInfo(); appendStringInfo(updateShardStatisticsCommand, UPDATE_SHARD_STATISTICS_QUERY, shardId); if (!SendRemoteCommand(masterConnection, updateShardStatisticsCommand->data)) { ReportConnectionError(masterConnection, ERROR); } queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts); if (PQresultStatus(queryResult) != PGRES_TUPLES_OK) { ereport(ERROR, (errmsg("could not update shard statistics"))); } PQclear(queryResult); queryResult = GetRemoteCommandResult(masterConnection, raiseInterrupts); Assert(!queryResult); } /* *INDENT-OFF* */ /* Append data to the copy buffer in outputState */ static void CopySendData(CopyOutState outputState, const void *databuf, int datasize) { appendBinaryStringInfo(outputState->fe_msgbuf, databuf, datasize); } /* Append a striong to the copy buffer in outputState. */ static void CopySendString(CopyOutState outputState, const char *str) { appendBinaryStringInfo(outputState->fe_msgbuf, str, strlen(str)); } /* Append a char to the copy buffer in outputState. */ static void CopySendChar(CopyOutState outputState, char c) { appendStringInfoCharMacro(outputState->fe_msgbuf, c); } /* Append an int32 to the copy buffer in outputState. */ static void CopySendInt32(CopyOutState outputState, int32 val) { uint32 buf = htonl((uint32) val); CopySendData(outputState, &buf, sizeof(buf)); } /* Append an int16 to the copy buffer in outputState. */ static void CopySendInt16(CopyOutState outputState, int16 val) { uint16 buf = htons((uint16) val); CopySendData(outputState, &buf, sizeof(buf)); } /* * Send text representation of one column, with conversion and escaping. * * NB: This function is based on commands/copy.c and doesn't fully conform to * our coding style. The function should be kept in sync with copy.c. */ static void CopyAttributeOutText(CopyOutState cstate, char *string) { char *pointer = NULL; char *start = NULL; char c = '\0'; char delimc = cstate->delim[0]; if (cstate->need_transcoding) { pointer = pg_server_to_any(string, strlen(string), cstate->file_encoding); } else { pointer = string; } /* * We have to grovel through the string searching for control characters * and instances of the delimiter character. In most cases, though, these * are infrequent. To avoid overhead from calling CopySendData once per * character, we dump out all characters between escaped characters in a * single call. The loop invariant is that the data from "start" to "pointer" * can be sent literally, but hasn't yet been. * * As all encodings here are safe, i.e. backend supported ones, we can * skip doing pg_encoding_mblen(), because in valid backend encodings, * extra bytes of a multibyte character never look like ASCII. */ start = pointer; while ((c = *pointer) != '\0') { if ((unsigned char) c < (unsigned char) 0x20) { /* * \r and \n must be escaped, the others are traditional. We * prefer to dump these using the C-like notation, rather than * a backslash and the literal character, because it makes the * dump file a bit more proof against Microsoftish data * mangling. */ switch (c) { case '\b': c = 'b'; break; case '\f': c = 'f'; break; case '\n': c = 'n'; break; case '\r': c = 'r'; break; case '\t': c = 't'; break; case '\v': c = 'v'; break; default: /* If it's the delimiter, must backslash it */ if (c == delimc) break; /* All ASCII control chars are length 1 */ pointer++; continue; /* fall to end of loop */ } /* if we get here, we need to convert the control char */ CopyFlushOutput(cstate, start, pointer); CopySendChar(cstate, '\\'); CopySendChar(cstate, c); start = ++pointer; /* do not include char in next run */ } else if (c == '\\' || c == delimc) { CopyFlushOutput(cstate, start, pointer); CopySendChar(cstate, '\\'); start = pointer++; /* we include char in next run */ } else { pointer++; } } CopyFlushOutput(cstate, start, pointer); } /* *INDENT-ON* */ /* Helper function to send pending copy output */ static inline void CopyFlushOutput(CopyOutState cstate, char *start, char *pointer) { if (pointer > start) { CopySendData(cstate, start, pointer - start); } } /* * CreateCitusCopyDestReceiver creates a DestReceiver that copies into * a distributed table. * * The caller should provide the list of column names to use in the * remote COPY statement, and the partition column index in the tuple * descriptor (*not* the column name list). */ CitusCopyDestReceiver * CreateCitusCopyDestReceiver(Oid tableId, List *columnNameList, int partitionColumnIndex, EState *executorState, bool stopOnFailure) { CitusCopyDestReceiver *copyDest = NULL; copyDest = (CitusCopyDestReceiver *) palloc0(sizeof(CitusCopyDestReceiver)); /* set up the DestReceiver function pointers */ copyDest->pub.receiveSlot = CitusCopyDestReceiverReceive; copyDest->pub.rStartup = CitusCopyDestReceiverStartup; copyDest->pub.rShutdown = CitusCopyDestReceiverShutdown; copyDest->pub.rDestroy = CitusCopyDestReceiverDestroy; copyDest->pub.mydest = DestCopyOut; /* set up output parameters */ copyDest->distributedRelationId = tableId; copyDest->columnNameList = columnNameList; copyDest->partitionColumnIndex = partitionColumnIndex; copyDest->executorState = executorState; copyDest->stopOnFailure = stopOnFailure; copyDest->memoryContext = CurrentMemoryContext; return copyDest; } /* * CitusCopyDestReceiverStartup implements the rStartup interface of * CitusCopyDestReceiver. It opens the relation */ static void CitusCopyDestReceiverStartup(DestReceiver *dest, int operation, TupleDesc inputTupleDescriptor) { CitusCopyDestReceiver *copyDest = (CitusCopyDestReceiver *) dest; Oid tableId = copyDest->distributedRelationId; char *relationName = get_rel_name(tableId); Oid schemaOid = get_rel_namespace(tableId); char *schemaName = get_namespace_name(schemaOid); Relation distributedRelation = NULL; List *columnNameList = copyDest->columnNameList; List *quotedColumnNameList = NIL; ListCell *columnNameCell = NULL; char partitionMethod = '\0'; DistTableCacheEntry *cacheEntry = NULL; CopyStmt *copyStatement = NULL; List *shardIntervalList = NULL; CopyOutState copyOutState = NULL; const char *delimiterCharacter = "\t"; const char *nullPrintCharacter = "\\N"; /* look up table properties */ distributedRelation = heap_open(tableId, RowExclusiveLock); cacheEntry = DistributedTableCacheEntry(tableId); partitionMethod = cacheEntry->partitionMethod; copyDest->distributedRelation = distributedRelation; copyDest->tupleDescriptor = inputTupleDescriptor; /* we don't support copy to reference tables from workers */ if (partitionMethod == DISTRIBUTE_BY_NONE) { EnsureCoordinator(); } /* load the list of shards and verify that we have shards to copy into */ shardIntervalList = LoadShardIntervalList(tableId); if (shardIntervalList == NIL) { if (partitionMethod == DISTRIBUTE_BY_HASH) { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("could not find any shards into which to copy"), errdetail("No shards exist for distributed table \"%s\".", relationName), errhint("Run master_create_worker_shards to create shards " "and try again."))); } else { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("could not find any shards into which to copy"), errdetail("No shards exist for distributed table \"%s\".", relationName))); } } /* error if any shard missing min/max values */ if (partitionMethod != DISTRIBUTE_BY_NONE && cacheEntry->hasUninitializedShardInterval) { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("could not start copy"), errdetail("Distributed relation \"%s\" has shards " "with missing shardminvalue/shardmaxvalue.", relationName))); } /* prevent concurrent placement changes and non-commutative DML statements */ LockShardListMetadata(shardIntervalList, ShareLock); LockShardListResources(shardIntervalList, ShareLock); /* keep the table metadata to avoid looking it up for every tuple */ copyDest->tableMetadata = cacheEntry; BeginOrContinueCoordinatedTransaction(); if (cacheEntry->replicationModel == REPLICATION_MODEL_2PC || MultiShardCommitProtocol == COMMIT_PROTOCOL_2PC) { CoordinatedTransactionUse2PC(); } /* define how tuples will be serialised */ copyOutState = (CopyOutState) palloc0(sizeof(CopyOutStateData)); copyOutState->delim = (char *) delimiterCharacter; copyOutState->null_print = (char *) nullPrintCharacter; copyOutState->null_print_client = (char *) nullPrintCharacter; copyOutState->binary = CanUseBinaryCopyFormat(inputTupleDescriptor); copyOutState->fe_msgbuf = makeStringInfo(); copyOutState->rowcontext = GetPerTupleMemoryContext(copyDest->executorState); copyDest->copyOutState = copyOutState; /* prepare output functions */ copyDest->columnOutputFunctions = ColumnOutputFunctions(inputTupleDescriptor, copyOutState->binary); /* ensure the column names are properly quoted in the COPY statement */ foreach(columnNameCell, columnNameList) { char *columnName = (char *) lfirst(columnNameCell); char *quotedColumnName = (char *) quote_identifier(columnName); quotedColumnNameList = lappend(quotedColumnNameList, quotedColumnName); } if (partitionMethod != DISTRIBUTE_BY_NONE && copyDest->partitionColumnIndex == INVALID_PARTITION_COLUMN_INDEX) { ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("the partition column of table %s should have a value", quote_qualified_identifier(schemaName, relationName)))); } /* define the template for the COPY statement that is sent to workers */ copyStatement = makeNode(CopyStmt); copyStatement->relation = makeRangeVar(schemaName, relationName, -1); copyStatement->query = NULL; copyStatement->attlist = quotedColumnNameList; copyStatement->is_from = true; copyStatement->is_program = false; copyStatement->filename = NULL; copyStatement->options = NIL; copyDest->copyStatement = copyStatement; copyDest->shardConnectionHash = CreateShardConnectionHash(TopTransactionContext); } /* * CitusCopyDestReceiverReceive implements the receiveSlot function of * CitusCopyDestReceiver. It takes a TupleTableSlot and sends the contents to * the appropriate shard placement(s). */ static bool CitusCopyDestReceiverReceive(TupleTableSlot *slot, DestReceiver *dest) { CitusCopyDestReceiver *copyDest = (CitusCopyDestReceiver *) dest; int partitionColumnIndex = copyDest->partitionColumnIndex; TupleDesc tupleDescriptor = copyDest->tupleDescriptor; CopyStmt *copyStatement = copyDest->copyStatement; HTAB *shardConnectionHash = copyDest->shardConnectionHash; CopyOutState copyOutState = copyDest->copyOutState; FmgrInfo *columnOutputFunctions = copyDest->columnOutputFunctions; bool stopOnFailure = copyDest->stopOnFailure; Datum *columnValues = NULL; bool *columnNulls = NULL; Datum partitionColumnValue = 0; ShardInterval *shardInterval = NULL; int64 shardId = 0; bool shardConnectionsFound = false; ShardConnections *shardConnections = NULL; EState *executorState = copyDest->executorState; MemoryContext executorTupleContext = GetPerTupleMemoryContext(executorState); MemoryContext oldContext = MemoryContextSwitchTo(executorTupleContext); slot_getallattrs(slot); columnValues = slot->tts_values; columnNulls = slot->tts_isnull; /* * Find the partition column value and corresponding shard interval * for non-reference tables. * Get the existing (and only a single) shard interval for the reference * tables. Note that, reference tables has NULL partition column values so * skip the check. */ if (partitionColumnIndex != INVALID_PARTITION_COLUMN_INDEX) { if (columnNulls[partitionColumnIndex]) { Oid relationId = copyDest->distributedRelationId; char *relationName = get_rel_name(relationId); Oid schemaOid = get_rel_namespace(relationId); char *schemaName = get_namespace_name(schemaOid); char *qualifiedTableName = quote_qualified_identifier(schemaName, relationName); ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("the partition column of table %s cannot be NULL", qualifiedTableName))); } /* find the partition column value */ partitionColumnValue = columnValues[partitionColumnIndex]; } /* * Find the shard interval and id for the partition column value for * non-reference tables. * * For reference table, this function blindly returns the tables single * shard. */ shardInterval = FindShardInterval(partitionColumnValue, copyDest->tableMetadata); if (shardInterval == NULL) { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("could not find shard for partition column " "value"))); } shardId = shardInterval->shardId; /* connections hash is kept in memory context */ MemoryContextSwitchTo(copyDest->memoryContext); /* get existing connections to the shard placements, if any */ shardConnections = GetShardHashConnections(shardConnectionHash, shardId, &shardConnectionsFound); if (!shardConnectionsFound) { /* open connections and initiate COPY on shard placements */ OpenCopyConnections(copyStatement, shardConnections, stopOnFailure, copyOutState->binary); /* send copy binary headers to shard placements */ if (copyOutState->binary) { SendCopyBinaryHeaders(copyOutState, shardId, shardConnections->connectionList); } } /* replicate row to shard placements */ resetStringInfo(copyOutState->fe_msgbuf); AppendCopyRowData(columnValues, columnNulls, tupleDescriptor, copyOutState, columnOutputFunctions); SendCopyDataToAll(copyOutState->fe_msgbuf, shardId, shardConnections->connectionList); MemoryContextSwitchTo(oldContext); copyDest->tuplesSent++; /* * Release per tuple memory allocated in this function. If we're writing * the results of an INSERT ... SELECT then the SELECT execution will use * its own executor state and reset the per tuple expression context * separately. */ ResetPerTupleExprContext(executorState); return true; } /* * CitusCopyDestReceiverShutdown implements the rShutdown interface of * CitusCopyDestReceiver. It ends the COPY on all the open connections and closes * the relation. */ static void CitusCopyDestReceiverShutdown(DestReceiver *destReceiver) { CitusCopyDestReceiver *copyDest = (CitusCopyDestReceiver *) destReceiver; HTAB *shardConnectionHash = copyDest->shardConnectionHash; List *shardConnectionsList = NIL; ListCell *shardConnectionsCell = NULL; CopyOutState copyOutState = copyDest->copyOutState; Relation distributedRelation = copyDest->distributedRelation; shardConnectionsList = ShardConnectionList(shardConnectionHash); foreach(shardConnectionsCell, shardConnectionsList) { ShardConnections *shardConnections = (ShardConnections *) lfirst( shardConnectionsCell); /* send copy binary footers to all shard placements */ if (copyOutState->binary) { SendCopyBinaryFooters(copyOutState, shardConnections->shardId, shardConnections->connectionList); } /* close the COPY input on all shard placements */ EndRemoteCopy(shardConnections->shardId, shardConnections->connectionList, true); } heap_close(distributedRelation, NoLock); } static void CitusCopyDestReceiverDestroy(DestReceiver *destReceiver) { CitusCopyDestReceiver *copyDest = (CitusCopyDestReceiver *) destReceiver; if (copyDest->copyOutState) { pfree(copyDest->copyOutState); } if (copyDest->columnOutputFunctions) { pfree(copyDest->columnOutputFunctions); } pfree(copyDest); } citus-7.0.3/src/backend/distributed/commands/transmit.c000066400000000000000000000177451317107136600232010ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * transmit.c * Routines for transmitting regular files between two nodes. * * Copyright (c) 2012-2016, Citus Data, Inc. *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "pgstat.h" #include #include #include #include "distributed/relay_utility.h" #include "distributed/transmit.h" #include "libpq/libpq.h" #include "libpq/pqformat.h" #include "storage/fd.h" /* Local functions forward declarations */ static File FileOpenForTransmit(const char *filename, int fileFlags, int fileMode); static void SendCopyInStart(void); static void SendCopyOutStart(void); static void SendCopyDone(void); static void SendCopyData(StringInfo fileBuffer); static bool ReceiveCopyData(StringInfo copyData); /* * RedirectCopyDataToRegularFile receives data from stdin using the standard copy * protocol. The function then creates or truncates a file with the given * filename, and appends received data to this file. */ void RedirectCopyDataToRegularFile(const char *filename) { StringInfo copyData = makeStringInfo(); bool copyDone = false; File fileDesc = -1; const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); const int fileMode = (S_IRUSR | S_IWUSR); fileDesc = FileOpenForTransmit(filename, fileFlags, fileMode); SendCopyInStart(); copyDone = ReceiveCopyData(copyData); while (!copyDone) { /* if received data has contents, append to regular file */ if (copyData->len > 0) { #if (PG_VERSION_NUM >= 100000) int appended = FileWrite(fileDesc, copyData->data, copyData->len, PG_WAIT_IO); #else int appended = FileWrite(fileDesc, copyData->data, copyData->len); #endif if (appended != copyData->len) { ereport(ERROR, (errcode_for_file_access(), errmsg("could not append to received file: %m"))); } } resetStringInfo(copyData); copyDone = ReceiveCopyData(copyData); } FreeStringInfo(copyData); FileClose(fileDesc); } /* * SendRegularFile reads data from the given file, and sends these data to * stdout using the standard copy protocol. After all file data are sent, the * function ends the copy protocol and closes the file. */ void SendRegularFile(const char *filename) { File fileDesc = -1; StringInfo fileBuffer = NULL; int readBytes = -1; const uint32 fileBufferSize = 32768; /* 32 KB */ const int fileFlags = (O_RDONLY | PG_BINARY); const int fileMode = 0; /* we currently do not check if the caller has permissions for this file */ fileDesc = FileOpenForTransmit(filename, fileFlags, fileMode); /* * We read file's contents into buffers of 32 KB. This buffer size is twice * as large as Hadoop's default buffer size, and may later be configurable. */ fileBuffer = makeStringInfo(); enlargeStringInfo(fileBuffer, fileBufferSize); SendCopyOutStart(); #if (PG_VERSION_NUM >= 100000) readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize, PG_WAIT_IO); #else readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize); #endif while (readBytes > 0) { fileBuffer->len = readBytes; SendCopyData(fileBuffer); resetStringInfo(fileBuffer); #if (PG_VERSION_NUM >= 100000) readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize, PG_WAIT_IO); #else readBytes = FileRead(fileDesc, fileBuffer->data, fileBufferSize); #endif } SendCopyDone(); FreeStringInfo(fileBuffer); FileClose(fileDesc); } /* Helper function that deallocates string info object. */ void FreeStringInfo(StringInfo stringInfo) { resetStringInfo(stringInfo); pfree(stringInfo->data); pfree(stringInfo); } /* * FileOpenForTransmit opens file with the given filename and flags. On success, * the function returns the internal file handle for the opened file. On failure * the function errors out. */ static File FileOpenForTransmit(const char *filename, int fileFlags, int fileMode) { File fileDesc = -1; int fileStated = -1; struct stat fileStat; fileStated = stat(filename, &fileStat); if (fileStated >= 0) { if (S_ISDIR(fileStat.st_mode)) { ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is a directory", filename))); } } fileDesc = PathNameOpenFile((char *) filename, fileFlags, fileMode); if (fileDesc < 0) { ereport(ERROR, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", filename))); } return fileDesc; } /* * SendCopyInStart sends the start copy in message to initiate receiving data * from stdin. The frontend should now send copy data. */ static void SendCopyInStart(void) { StringInfoData copyInStart = { NULL, 0, 0, 0 }; const char copyFormat = 1; /* binary copy format */ int flushed = 0; pq_beginmessage(©InStart, 'G'); pq_sendbyte(©InStart, copyFormat); pq_sendint(©InStart, 0, 2); pq_endmessage(©InStart); /* flush here to ensure that FE knows it can send data */ flushed = pq_flush(); if (flushed != 0) { ereport(WARNING, (errmsg("could not flush copy start data"))); } } /* * SendCopyOutStart sends the start copy out message to initiate sending data to * stdout. After this message, the backend will continue by sending copy data. */ static void SendCopyOutStart(void) { StringInfoData copyOutStart = { NULL, 0, 0, 0 }; const char copyFormat = 1; /* binary copy format */ pq_beginmessage(©OutStart, 'H'); pq_sendbyte(©OutStart, copyFormat); pq_sendint(©OutStart, 0, 2); pq_endmessage(©OutStart); } /* Sends the copy-complete message. */ static void SendCopyDone(void) { StringInfoData copyDone = { NULL, 0, 0, 0 }; int flushed = 0; pq_beginmessage(©Done, 'c'); pq_endmessage(©Done); /* flush here to signal to FE that we are done */ flushed = pq_flush(); if (flushed != 0) { ereport(WARNING, (errmsg("could not flush copy start data"))); } } /* Sends the copy data message to stdout. */ static void SendCopyData(StringInfo fileBuffer) { StringInfoData copyData = { NULL, 0, 0, 0 }; pq_beginmessage(©Data, 'd'); pq_sendbytes(©Data, fileBuffer->data, fileBuffer->len); pq_endmessage(©Data); } /* * ReceiveCopyData receives one copy data message from stdin, and writes this * message's contents into the given argument. The function then checks if the * copy protocol has been completed, and if it has, the function returns true. * If not, the function returns false indicating there are more data to read. * If the received message does not conform to the copy protocol, the function * mirrors copy.c's error behavior. */ static bool ReceiveCopyData(StringInfo copyData) { int messageType = 0; int messageCopied = 0; bool copyDone = true; const int unlimitedSize = 0; HOLD_CANCEL_INTERRUPTS(); pq_startmsgread(); messageType = pq_getbyte(); if (messageType == EOF) { ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), errmsg("unexpected EOF on client connection"))); } /* consume the rest of message before checking for message type */ messageCopied = pq_getmessage(copyData, unlimitedSize); if (messageCopied == EOF) { ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), errmsg("unexpected EOF on client connection"))); } RESUME_CANCEL_INTERRUPTS(); switch (messageType) { case 'd': /* CopyData */ { copyDone = false; break; } case 'c': /* CopyDone */ { copyDone = true; break; } case 'f': /* CopyFail */ { ereport(ERROR, (errcode(ERRCODE_QUERY_CANCELED), errmsg("COPY data failed: %s", pq_getmsgstring(copyData)))); break; } case 'H': /* Flush */ case 'S': /* Sync */ { /* * Ignore Flush/Sync for the convenience of client libraries (such * as libpq) that may send those without noticing that the command * they just sent was COPY. */ copyDone = false; break; } default: { ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("unexpected message type 0x%02X during COPY data", messageType))); break; } } return copyDone; } citus-7.0.3/src/backend/distributed/connection/000077500000000000000000000000001317107136600215145ustar00rootroot00000000000000citus-7.0.3/src/backend/distributed/connection/connection_management.c000066400000000000000000000446111317107136600262210ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * connection_management.c * Central management of connections and their life-cycle * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include #include "libpq-fe.h" #include "miscadmin.h" #include "access/hash.h" #include "commands/dbcommands.h" #include "distributed/connection_management.h" #include "distributed/metadata_cache.h" #include "distributed/hash_helpers.h" #include "distributed/placement_connection.h" #include "mb/pg_wchar.h" #include "utils/hsearch.h" #include "utils/memutils.h" int NodeConnectionTimeout = 5000; HTAB *ConnectionHash = NULL; MemoryContext ConnectionContext = NULL; static uint32 ConnectionHashHash(const void *key, Size keysize); static int ConnectionHashCompare(const void *a, const void *b, Size keysize); static MultiConnection * StartConnectionEstablishment(ConnectionHashKey *key); static void AfterXactHostConnectionHandling(ConnectionHashEntry *entry, bool isCommit); static MultiConnection * FindAvailableConnection(dlist_head *connections, uint32 flags); /* * Initialize per-backend connection management infrastructure. */ void InitializeConnectionManagement(void) { HASHCTL info; uint32 hashFlags = 0; /* * Create a single context for connection and transaction related memory * management. Doing so, instead of allocating in TopMemoryContext, makes * it easier to associate used memory. */ ConnectionContext = AllocSetContextCreate(TopMemoryContext, "Connection Context", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); /* create (host,port,user,database) -> [connection] hash */ memset(&info, 0, sizeof(info)); info.keysize = sizeof(ConnectionHashKey); info.entrysize = sizeof(ConnectionHashEntry); info.hash = ConnectionHashHash; info.match = ConnectionHashCompare; info.hcxt = ConnectionContext; hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE); ConnectionHash = hash_create("citus connection cache (host,port,user,database)", 64, &info, hashFlags); } /* * Perform connection management activity after the end of a transaction. Both * COMMIT and ABORT paths are handled here. * * This is called by Citus' global transaction callback. */ void AfterXactConnectionHandling(bool isCommit) { HASH_SEQ_STATUS status; ConnectionHashEntry *entry; hash_seq_init(&status, ConnectionHash); while ((entry = (ConnectionHashEntry *) hash_seq_search(&status)) != 0) { AfterXactHostConnectionHandling(entry, isCommit); /* * NB: We leave the hash entry in place, even if there's no individual * connections in it anymore. There seems no benefit in deleting it, * and it'll save a bit of work in the next transaction. */ } } /* * GetNodeConnection() establishes a connection to remote node, using default * user and database. * * See StartNodeUserDatabaseConnection for details. */ MultiConnection * GetNodeConnection(uint32 flags, const char *hostname, int32 port) { return GetNodeUserDatabaseConnection(flags, hostname, port, NULL, NULL); } /* * StartNodeConnection initiates a connection to remote node, using default * user and database. * * See StartNodeUserDatabaseConnection for details. */ MultiConnection * StartNodeConnection(uint32 flags, const char *hostname, int32 port) { return StartNodeUserDatabaseConnection(flags, hostname, port, NULL, NULL); } /* * GetNodeUserDatabaseConnection establishes connection to remote node. * * See StartNodeUserDatabaseConnection for details. */ MultiConnection * GetNodeUserDatabaseConnection(uint32 flags, const char *hostname, int32 port, const char *user, const char *database) { MultiConnection *connection; connection = StartNodeUserDatabaseConnection(flags, hostname, port, user, database); FinishConnectionEstablishment(connection); return connection; } /* * StartNodeUserDatabaseConnection() initiates a connection to a remote node. * * If user or database are NULL, the current session's defaults are used. The * following flags influence connection establishment behaviour: * - SESSION_LIFESPAN - the connection should persist after transaction end * - FORCE_NEW_CONNECTION - a new connection is required * * The returned connection has only been initiated, not fully * established. That's useful to allow parallel connection establishment. If * that's not desired use the Get* variant. */ MultiConnection * StartNodeUserDatabaseConnection(uint32 flags, const char *hostname, int32 port, const char *user, const char *database) { ConnectionHashKey key; ConnectionHashEntry *entry = NULL; MultiConnection *connection; bool found; /* do some minimal input checks */ strlcpy(key.hostname, hostname, MAX_NODE_LENGTH); if (strlen(hostname) > MAX_NODE_LENGTH) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("hostname exceeds the maximum length of %d", MAX_NODE_LENGTH))); } key.port = port; if (user) { strlcpy(key.user, user, NAMEDATALEN); } else { strlcpy(key.user, CurrentUserName(), NAMEDATALEN); } if (database) { strlcpy(key.database, database, NAMEDATALEN); } else { strlcpy(key.database, get_database_name(MyDatabaseId), NAMEDATALEN); } if (CurrentCoordinatedTransactionState == COORD_TRANS_NONE) { CurrentCoordinatedTransactionState = COORD_TRANS_IDLE; } /* * Lookup relevant hash entry. We always enter. If only a cached * connection is desired, and there's none, we'll simply leave the * connection list empty. */ entry = hash_search(ConnectionHash, &key, HASH_ENTER, &found); if (!found) { entry->connections = MemoryContextAlloc(ConnectionContext, sizeof(dlist_head)); dlist_init(entry->connections); } /* if desired, check whether there's a usable connection */ if (!(flags & FORCE_NEW_CONNECTION)) { /* check connection cache for a connection that's not already in use */ connection = FindAvailableConnection(entry->connections, flags); if (connection) { if (flags & SESSION_LIFESPAN) { connection->sessionLifespan = true; } return connection; } } /* * Either no caching desired, or no pre-established, non-claimed, * connection present. Initiate connection establishment. */ connection = StartConnectionEstablishment(&key); dlist_push_tail(entry->connections, &connection->connectionNode); ResetShardPlacementAssociation(connection); if (flags & SESSION_LIFESPAN) { connection->sessionLifespan = true; } return connection; } /* StartNodeUserDatabaseConnection() helper */ static MultiConnection * FindAvailableConnection(dlist_head *connections, uint32 flags) { dlist_iter iter; dlist_foreach(iter, connections) { MultiConnection *connection = dlist_container(MultiConnection, connectionNode, iter.cur); /* don't return claimed connections */ if (connection->claimedExclusively) { continue; } return connection; } return NULL; } /* * Return MultiConnection associated with the libpq connection. * * Note that this is comparatively expensive. Should only be used for * backward-compatibility purposes. */ MultiConnection * GetConnectionFromPGconn(struct pg_conn *pqConn) { HASH_SEQ_STATUS status; ConnectionHashEntry *entry; hash_seq_init(&status, ConnectionHash); while ((entry = (ConnectionHashEntry *) hash_seq_search(&status)) != 0) { dlist_head *connections = entry->connections; dlist_iter iter; /* check connection cache for a connection that's not already in use */ dlist_foreach(iter, connections) { MultiConnection *connection = dlist_container(MultiConnection, connectionNode, iter.cur); if (connection->pgConn == pqConn) { hash_seq_term(&status); return connection; } } } return NULL; } /* * CloseNodeConnectionsAfterTransaction sets the sessionLifespan flag of the connections * to a particular node as false. This is mainly used when a worker leaves the cluster. */ void CloseNodeConnectionsAfterTransaction(char *nodeName, int nodePort) { HASH_SEQ_STATUS status; ConnectionHashEntry *entry; hash_seq_init(&status, ConnectionHash); while ((entry = (ConnectionHashEntry *) hash_seq_search(&status)) != 0) { dlist_iter iter; dlist_head *connections = NULL; if (strcmp(entry->key.hostname, nodeName) != 0 || entry->key.port != nodePort) { continue; } connections = entry->connections; dlist_foreach(iter, connections) { MultiConnection *connection = dlist_container(MultiConnection, connectionNode, iter.cur); connection->sessionLifespan = false; } } } /* * Close a previously established connection. */ void CloseConnection(MultiConnection *connection) { ConnectionHashKey key; bool found; /* close connection */ PQfinish(connection->pgConn); connection->pgConn = NULL; strlcpy(key.hostname, connection->hostname, MAX_NODE_LENGTH); key.port = connection->port; strlcpy(key.user, connection->user, NAMEDATALEN); strlcpy(key.database, connection->database, NAMEDATALEN); hash_search(ConnectionHash, &key, HASH_FIND, &found); if (found) { /* unlink from list of open connections */ dlist_delete(&connection->connectionNode); /* same for transaction state and shard/placement machinery */ CloseRemoteTransaction(connection); CloseShardPlacementAssociation(connection); /* we leave the per-host entry alive */ pfree(connection); } else { ereport(ERROR, (errmsg("closing untracked connection"))); } } /* * Close a previously established connection. * * This function closes the MultiConnection associatated with the libpq * connection. * * Note that this is comparatively expensive. Should only be used for * backward-compatibility purposes. */ void CloseConnectionByPGconn(PGconn *pqConn) { MultiConnection *connection = GetConnectionFromPGconn(pqConn); if (connection) { CloseConnection(connection); } else { ereport(WARNING, (errmsg("could not find connection to close"))); } } /* * ShutdownConnection, if necessary cancels the currently running statement, * and then closes the underlying libpq connection. The MultiConnection * itself is left intact. * * NB: Cancelling a statement requires network IO, and currently is not * interruptible. Unfortunately libpq does not provide a non-blocking * implementation of PQcancel(), so we don't have much choice for now. */ void ShutdownConnection(MultiConnection *connection) { /* * Only cancel statement if there's currently one running, and the * connection is in an OK state. */ if (PQstatus(connection->pgConn) == CONNECTION_OK && PQtransactionStatus(connection->pgConn) == PQTRANS_ACTIVE) { char errorMessage[256] = { 0 }; PGcancel *cancel = PQgetCancel(connection->pgConn); if (!PQcancel(cancel, errorMessage, sizeof(errorMessage))) { ereport(WARNING, (errmsg("could not cancel connection: %s", errorMessage))); } PQfreeCancel(cancel); } PQfinish(connection->pgConn); connection->pgConn = NULL; } /* * FinishConnectionListEstablishment is a wrapper around FinishConnectionEstablishment. * The function iterates over the multiConnectionList and finishes the connection * establishment for each multi connection. */ void FinishConnectionListEstablishment(List *multiConnectionList) { ListCell *multiConnectionCell = NULL; foreach(multiConnectionCell, multiConnectionList) { MultiConnection *multiConnection = (MultiConnection *) lfirst( multiConnectionCell); /* TODO: consider making connection establishment fully in parallel */ FinishConnectionEstablishment(multiConnection); } } /* * Synchronously finish connection establishment of an individual connection. * * TODO: Replace with variant waiting for multiple connections. */ void FinishConnectionEstablishment(MultiConnection *connection) { static int checkIntervalMS = 200; /* * Loop until connection is established, or failed (possibly just timed * out). */ while (true) { ConnStatusType status = PQstatus(connection->pgConn); PostgresPollingStatusType pollmode; if (status == CONNECTION_OK) { return; } /* FIXME: retries? */ if (status == CONNECTION_BAD) { return; } pollmode = PQconnectPoll(connection->pgConn); /* * FIXME: Do we want to add transparent retry support here? */ if (pollmode == PGRES_POLLING_FAILED) { return; } else if (pollmode == PGRES_POLLING_OK) { return; } else { Assert(pollmode == PGRES_POLLING_WRITING || pollmode == PGRES_POLLING_READING); } /* Loop, to handle poll() being interrupted by signals (EINTR) */ while (true) { struct pollfd pollFileDescriptor; int pollResult = 0; pollFileDescriptor.fd = PQsocket(connection->pgConn); if (pollmode == PGRES_POLLING_READING) { pollFileDescriptor.events = POLLIN; } else { pollFileDescriptor.events = POLLOUT; } pollFileDescriptor.revents = 0; /* * Only sleep for a limited amount of time, so we can react to * interrupts in time, even if the platform doesn't interrupt * poll() after signal arrival. */ pollResult = poll(&pollFileDescriptor, 1, checkIntervalMS); if (pollResult == 0) { /* * Timeout exceeded. Two things to do: * - check whether any interrupts arrived and handle them * - check whether establishment for connection already has * lasted for too long, stop waiting if so. */ CHECK_FOR_INTERRUPTS(); if (TimestampDifferenceExceeds(connection->connectionStart, GetCurrentTimestamp(), NodeConnectionTimeout)) { ereport(WARNING, (errmsg("could not establish connection after %u ms", NodeConnectionTimeout))); /* close connection, otherwise we take up resource on the other side */ PQfinish(connection->pgConn); connection->pgConn = NULL; break; } } else if (pollResult > 0) { /* * IO possible, continue connection establishment. We could * check for timeouts here as well, but if there's progress * there seems little point. */ break; } else if (pollResult != EINTR) { /* Retrying, signal interrupted. So check. */ CHECK_FOR_INTERRUPTS(); } else { /* * We ERROR here, instead of just returning a failed * connection, because this shouldn't happen, and indicates a * programming error somewhere, not a network etc. issue. */ ereport(ERROR, (errcode_for_socket_access(), errmsg("poll() failed: %m"))); } } } } /* * ClaimConnectionExclusively signals that this connection is actively being * used. That means it'll not be, again, returned by * StartNodeUserDatabaseConnection() et al until releases with * UnclaimConnection(). */ void ClaimConnectionExclusively(MultiConnection *connection) { Assert(!connection->claimedExclusively); connection->claimedExclusively = true; } /* * UnclaimConnection signals that this connection is not being used * anymore. That means it again may be returned by * StartNodeUserDatabaseConnection() et al. */ void UnclaimConnection(MultiConnection *connection) { connection->claimedExclusively = false; } static uint32 ConnectionHashHash(const void *key, Size keysize) { ConnectionHashKey *entry = (ConnectionHashKey *) key; uint32 hash = 0; hash = string_hash(entry->hostname, NAMEDATALEN); hash = hash_combine(hash, hash_uint32(entry->port)); hash = hash_combine(hash, string_hash(entry->user, NAMEDATALEN)); hash = hash_combine(hash, string_hash(entry->database, NAMEDATALEN)); return hash; } static int ConnectionHashCompare(const void *a, const void *b, Size keysize) { ConnectionHashKey *ca = (ConnectionHashKey *) a; ConnectionHashKey *cb = (ConnectionHashKey *) b; if (strncmp(ca->hostname, cb->hostname, MAX_NODE_LENGTH) != 0 || ca->port != cb->port || strncmp(ca->user, cb->user, NAMEDATALEN) != 0 || strncmp(ca->database, cb->database, NAMEDATALEN) != 0) { return 1; } else { return 0; } } /* * Asynchronously establish connection to a remote node, but don't wait for * that to finish. DNS lookups etc. are performed synchronously though. */ static MultiConnection * StartConnectionEstablishment(ConnectionHashKey *key) { char nodePortString[12]; const char *clientEncoding = GetDatabaseEncodingName(); MultiConnection *connection = NULL; const char *keywords[] = { "host", "port", "dbname", "user", "client_encoding", "fallback_application_name", NULL }; const char *values[] = { key->hostname, nodePortString, key->database, key->user, clientEncoding, "citus", NULL }; connection = MemoryContextAllocZero(ConnectionContext, sizeof(MultiConnection)); sprintf(nodePortString, "%d", key->port); strlcpy(connection->hostname, key->hostname, MAX_NODE_LENGTH); connection->port = key->port; strlcpy(connection->database, key->database, NAMEDATALEN); strlcpy(connection->user, key->user, NAMEDATALEN); connection->pgConn = PQconnectStartParams(keywords, values, false); connection->connectionStart = GetCurrentTimestamp(); /* * To avoid issues with interrupts not getting caught all our connections * are managed in a non-blocking manner. remote_commands.c provides * wrappers emulating blocking behaviour. */ PQsetnonblocking(connection->pgConn, true); return connection; } /* * Close all remote connections if necessary anymore (i.e. not session * lifetime), or if in a failed state. */ static void AfterXactHostConnectionHandling(ConnectionHashEntry *entry, bool isCommit) { dlist_mutable_iter iter; dlist_foreach_modify(iter, entry->connections) { MultiConnection *connection = dlist_container(MultiConnection, connectionNode, iter.cur); /* * To avoid code leaking connections we warn if connections are * still claimed exclusively. We can only do so if the transaction * committed, as it's normal that code didn't have chance to clean * up after errors. */ if (isCommit && connection->claimedExclusively) { ereport(WARNING, (errmsg("connection claimed exclusively at transaction commit"))); } /* * Preserve session lifespan connections if they are still healthy. */ if (!connection->sessionLifespan || PQstatus(connection->pgConn) != CONNECTION_OK || PQtransactionStatus(connection->pgConn) != PQTRANS_IDLE) { ShutdownConnection(connection); /* unlink from list */ dlist_delete(iter.cur); pfree(connection); } else { /* reset per-transaction state */ ResetRemoteTransaction(connection); ResetShardPlacementAssociation(connection); UnclaimConnection(connection); } } } citus-7.0.3/src/backend/distributed/connection/placement_connection.c000066400000000000000000001063441317107136600260570ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * placement_connection.c * Per placement connection handling. * * Copyright (c) 2016-2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "access/hash.h" #include "distributed/colocation_utils.h" #include "distributed/connection_management.h" #include "distributed/hash_helpers.h" #include "distributed/master_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/multi_planner.h" #include "distributed/placement_connection.h" #include "utils/hsearch.h" #include "utils/memutils.h" /* * A connection reference is used to register that a connection has been used * to read or modify either a) a shard placement as a particular user b) a * group of colocated placements (which depend on whether the reference is * from ConnectionPlacementHashEntry or ColocatedPlacementHashEntry). */ typedef struct ConnectionReference { /* * The user used to read/modify the placement. We cannot reuse connections * that were performed using a different role, since it would not have the * right permissions. */ const char *userName; /* the connection */ MultiConnection *connection; /* * Information about what the connection is used for. There can only be * one connection executing DDL/DML for a placement to avoid deadlock * issues/read-your-own-writes violations. The difference between DDL/DML * currently is only used to emit more precise error messages. */ bool hadDML; bool hadDDL; /* colocation group of the placement, if any */ uint32 colocationGroupId; uint32 representativeValue; /* membership in MultiConnection->referencedPlacements */ dlist_node connectionNode; } ConnectionReference; struct ColocatedPlacementsHashEntry; /* * Hash table mapping placements to a list of connections. * * This stores a list of connections for each placement, because multiple * connections to the same placement may exist at the same time. E.g. a * real-time executor query may reference the same placement in several * sub-tasks. * * We keep track about a connection having executed DML or DDL, since we can * only ever allow a single transaction to do either to prevent deadlocks and * consistency violations (e.g. read-your-own-writes). */ /* hash key */ typedef struct ConnectionPlacementHashKey { uint64 placementId; } ConnectionPlacementHashKey; /* hash entry */ typedef struct ConnectionPlacementHashEntry { ConnectionPlacementHashKey key; /* did any remote transactions fail? */ bool failed; /* primary connection used to access the placement */ ConnectionReference *primaryConnection; /* are any other connections reading from the placements? */ bool hasSecondaryConnections; /* entry for the set of co-located placements */ struct ColocatedPlacementsHashEntry *colocatedEntry; /* membership in ConnectionShardHashEntry->placementConnections */ dlist_node shardNode; } ConnectionPlacementHashEntry; /* hash table */ static HTAB *ConnectionPlacementHash; /* * A hash-table mapping colocated placements to connections. Colocated * placements being the set of placements on a single node that represent the * same value range. This is needed because connections for colocated * placements (i.e. the corresponding placements for different colocated * distributed tables) need to share connections. Otherwise things like * foreign keys can very easily lead to unprincipled deadlocks. This means * that there can only one DML/DDL connection for a set of colocated * placements. * * A set of colocated placements is identified, besides node identifying * information, by the associated colocation group id and the placement's * 'representativeValue' which currently is the lower boundary of it's * hash-range. * * Note that this hash-table only contains entries for hash-partitioned * tables, because others so far don't support colocation. */ /* hash key */ typedef struct ColocatedPlacementsHashKey { /* to identify host - database can't differ */ char nodeName[MAX_NODE_LENGTH]; uint32 nodePort; /* colocation group, or invalid */ uint32 colocationGroupId; /* to represent the value range */ uint32 representativeValue; } ColocatedPlacementsHashKey; /* hash entry */ typedef struct ColocatedPlacementsHashEntry { ColocatedPlacementsHashKey key; /* primary connection used to access the co-located placements */ ConnectionReference *primaryConnection; /* are any other connections reading from the placements? */ bool hasSecondaryConnections; } ColocatedPlacementsHashEntry; static HTAB *ColocatedPlacementsHash; /* * Hash table mapping shard ids to placements. * * This is used to track whether placements of a shard have to be marked * invalid after a failure, or whether a coordinated transaction has to be * aborted, to avoid all placements of a shard to be marked invalid. */ /* hash key */ typedef struct ConnectionShardHashKey { uint64 shardId; } ConnectionShardHashKey; /* hash entry */ typedef struct ConnectionShardHashEntry { ConnectionShardHashKey key; dlist_head placementConnections; } ConnectionShardHashEntry; /* hash table */ static HTAB *ConnectionShardHash; static MultiConnection * FindPlacementListConnection(int flags, List *placementAccessList, const char *userName, List **placementEntryList); static ConnectionPlacementHashEntry * FindOrCreatePlacementEntry( ShardPlacement *placement); static bool CanUseExistingConnection(uint32 flags, const char *userName, ConnectionReference *placementConnection); static bool ConnectionAccessedDifferentPlacement(MultiConnection *connection, ShardPlacement *placement); static void AssociatePlacementWithShard(ConnectionPlacementHashEntry *placementEntry, ShardPlacement *placement); static bool CheckShardPlacements(ConnectionShardHashEntry *shardEntry); static uint32 ColocatedPlacementsHashHash(const void *key, Size keysize); static int ColocatedPlacementsHashCompare(const void *a, const void *b, Size keysize); /* * GetPlacementConnection establishes a connection for a placement. * * See StartPlacementConnection for details. */ MultiConnection * GetPlacementConnection(uint32 flags, ShardPlacement *placement, const char *userName) { MultiConnection *connection = StartPlacementConnection(flags, placement, userName); FinishConnectionEstablishment(connection); return connection; } /* * StartPlacementConnection initiates a connection to a remote node, * associated with the placement and transaction. * * The connection is established for the current database. If userName is NULL * the current user is used, otherwise the provided one. * * See StartNodeUserDatabaseConnection for details. * * Flags have the corresponding meaning from StartNodeUserDatabaseConnection, * except that two additional flags have an effect: * - FOR_DML - signal that connection is going to be used for DML (modifications) * - FOR_DDL - signal that connection is going to be used for DDL * * Only one connection associated with the placement may have FOR_DML or * FOR_DDL set. For hash-partitioned tables only one connection for a set of * colocated placements may have FOR_DML/DDL set. This restriction prevents * deadlocks and wrong results due to in-progress transactions. */ MultiConnection * StartPlacementConnection(uint32 flags, ShardPlacement *placement, const char *userName) { ShardPlacementAccess *placementAccess = (ShardPlacementAccess *) palloc0(sizeof(ShardPlacementAccess)); placementAccess->placement = placement; if (flags & FOR_DDL) { placementAccess->accessType = PLACEMENT_ACCESS_DDL; } else if (flags & FOR_DML) { placementAccess->accessType = PLACEMENT_ACCESS_DML; } else { placementAccess->accessType = PLACEMENT_ACCESS_SELECT; } return StartPlacementListConnection(flags, list_make1(placementAccess), userName); } /* * GetPlacementListConnection establishes a connection for a set of placement * accesses. * * See StartPlacementListConnection for details. */ MultiConnection * GetPlacementListConnection(uint32 flags, List *placementAccessList, const char *userName) { MultiConnection *connection = StartPlacementListConnection(flags, placementAccessList, userName); FinishConnectionEstablishment(connection); return connection; } /* * StartPlacementListConnection returns a connection to a remote node suitable for * a placement accesses (SELECT, DML, DDL) or throws an error if no suitable * connection can be established if would cause a self-deadlock or consistency * violation. */ MultiConnection * StartPlacementListConnection(uint32 flags, List *placementAccessList, const char *userName) { char *freeUserName = NULL; ListCell *placementAccessCell = NULL; List *placementEntryList = NIL; ListCell *placementEntryCell = NULL; MultiConnection *chosenConnection = NULL; if (userName == NULL) { userName = freeUserName = CurrentUserName(); } chosenConnection = FindPlacementListConnection(flags, placementAccessList, userName, &placementEntryList); if (chosenConnection == NULL) { /* use the first placement from the list to extract nodename and nodeport */ ShardPlacementAccess *placementAccess = (ShardPlacementAccess *) linitial(placementAccessList); ShardPlacement *placement = placementAccess->placement; char *nodeName = placement->nodeName; int nodePort = placement->nodePort; /* * No suitable connection in the placement->connection mapping, get one from * the node->connection pool. */ chosenConnection = StartNodeConnection(flags, nodeName, nodePort); if (flags & CONNECTION_PER_PLACEMENT && ConnectionAccessedDifferentPlacement(chosenConnection, placement)) { /* * Cached connection accessed a non-co-located placement in the same * table or co-location group, while the caller asked for a connection * per placement. Open a new connection instead. * * We use this for situations in which we want to use a different * connection for every placement, such as COPY. If we blindly returned * a cached conection that already modified a different, non-co-located * placement B in the same table or in a table with the same co-location * ID as the current placement, then we'd no longer able to write to * placement B later in the COPY. */ chosenConnection = StartNodeConnection(flags | FORCE_NEW_CONNECTION, nodeName, nodePort); Assert(!ConnectionAccessedDifferentPlacement(chosenConnection, placement)); } } /* * Now that a connection has been chosen, initialise or update the connection * references for all placements. */ forboth(placementAccessCell, placementAccessList, placementEntryCell, placementEntryList) { ShardPlacementAccess *placementAccess = (ShardPlacementAccess *) lfirst(placementAccessCell); ShardPlacementAccessType accessType = placementAccess->accessType; ConnectionPlacementHashEntry *placementEntry = (ConnectionPlacementHashEntry *) lfirst(placementEntryCell); ConnectionReference *placementConnection = placementEntry->primaryConnection; if (placementConnection->connection == chosenConnection) { /* using the connection that was already assigned to the placement */ } else if (placementConnection->connection == NULL) { /* placement does not have a connection assigned yet */ placementConnection->connection = chosenConnection; placementConnection->hadDDL = false; placementConnection->hadDML = false; placementConnection->userName = MemoryContextStrdup(TopTransactionContext, userName); /* record association with connection */ dlist_push_tail(&chosenConnection->referencedPlacements, &placementConnection->connectionNode); } else { /* using a different connection than the one assigned to the placement */ if (accessType != PLACEMENT_ACCESS_SELECT) { /* * We previously read from the placement, but now we're writing to * it (if we had written to the placement, we would have either chosen * the same connection, or errored out). Update the connection reference * to point to the connection used for writing. We don't need to remember * the existing connection since we won't be able to reuse it for * accessing the placement. However, we do register that it exists in * hasSecondaryConnections. */ placementConnection->connection = chosenConnection; placementConnection->userName = MemoryContextStrdup(TopTransactionContext, userName); Assert(!placementConnection->hadDDL); Assert(!placementConnection->hadDML); /* record association with connection */ dlist_push_tail(&chosenConnection->referencedPlacements, &placementConnection->connectionNode); } /* * There are now multiple connections that read from the placement * and DDL commands are forbidden. */ placementEntry->hasSecondaryConnections = true; if (placementEntry->colocatedEntry != NULL) { /* we also remember this for co-located placements */ placementEntry->colocatedEntry->hasSecondaryConnections = true; } } /* * Remember that we used the current connection for writes. */ if (accessType == PLACEMENT_ACCESS_DDL) { placementConnection->hadDDL = true; } if (accessType == PLACEMENT_ACCESS_DML) { placementConnection->hadDML = true; } } if (freeUserName) { pfree(freeUserName); } return chosenConnection; } /* * FindPlacementListConnection determines whether there is a connection that must * be used to perform the given placement accesses. * * If a placement was only read in this transaction, then the same connection must * be used for DDL to prevent self-deadlock. If a placement was modified in this * transaction, then the same connection must be used for all subsequent accesses * to ensure read-your-writes consistency and prevent self-deadlock. If those * conditions cannot be met, because a connection is in use or the placements in * the placement access list were modified over multiple connections, then this * function throws an error. * * The function returns the connection that needs to be used, if such a connection * exists, and the current placement entries for all placements in the placement * access list. */ static MultiConnection * FindPlacementListConnection(int flags, List *placementAccessList, const char *userName, List **placementEntryList) { bool foundModifyingConnection = false; ListCell *placementAccessCell = NULL; MultiConnection *chosenConnection = NULL; /* * Go through all placement accesses to find a suitable connection. * * If none of the placements have been accessed in this transaction, connection * remains NULL. * * If one or more of the placements have been modified in this transaction, then * use the connection that performed the write. If placements have been written * over multiple connections or the connection is not available, error out. * * If placements have only been read in this transaction, then use the last * suitable connection found for a placement in the placementAccessList. */ foreach(placementAccessCell, placementAccessList) { ShardPlacementAccess *placementAccess = (ShardPlacementAccess *) lfirst(placementAccessCell); ShardPlacement *placement = placementAccess->placement; ShardPlacementAccessType accessType = placementAccess->accessType; ConnectionPlacementHashEntry *placementEntry = NULL; ColocatedPlacementsHashEntry *colocatedEntry = NULL; ConnectionReference *placementConnection = NULL; if (placement->shardId == INVALID_SHARD_ID) { /* * When a SELECT prunes down to 0 shard, we use a dummy placement. * In that case, we can fall back to the default connection. * * FIXME: this can be removed if we evaluate empty SELECTs locally. */ continue; } placementEntry = FindOrCreatePlacementEntry(placement); colocatedEntry = placementEntry->colocatedEntry; placementConnection = placementEntry->primaryConnection; /* note: the Asserts below are primarily for clarifying the conditions */ if (placementConnection->connection == NULL) { /* no connection has been chosen for the placement */ } else if (accessType == PLACEMENT_ACCESS_DDL && placementEntry->hasSecondaryConnections) { /* * If a placement has been read over multiple connections (typically as * a result of a reference table join) then a DDL command on the placement * would create a self-deadlock. */ Assert(placementConnection != NULL); ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), errmsg( "cannot perform DDL on placement %ld, which has been read over " "multiple connections", placement->placementId))); } else if (accessType == PLACEMENT_ACCESS_DDL && colocatedEntry != NULL && colocatedEntry->hasSecondaryConnections) { /* * If a placement has been read over multiple (uncommitted) connections * then a DDL command on a co-located placement may create a self-deadlock * if there exist some relationship between the co-located placements * (e.g. foreign key, partitioning). */ Assert(placementConnection != NULL); ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), errmsg("cannot perform DDL on placement %ld since a co-located " "placement has been read over multiple connections", placement->placementId))); } else if (foundModifyingConnection) { /* * We already found a connection that performed writes on of the placements * and must use it. */ if ((placementConnection->hadDDL || placementConnection->hadDML) && placementConnection->connection != chosenConnection) { /* * The current placement may have been modified over a different * connection. Neither connection is guaranteed to see all uncomitted * writes and therefore we cannot proceed. */ ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), errmsg("cannot perform query with placements that were " "modified over multiple connections"))); } } else if (CanUseExistingConnection(flags, userName, placementConnection)) { /* * There is an existing connection for the placement and we can use it. */ Assert(placementConnection != NULL); chosenConnection = placementConnection->connection; if (placementConnection->hadDDL || placementConnection->hadDML) { /* this connection performed writes, we must use it */ foundModifyingConnection = true; } } else if (placementConnection->hadDDL) { /* * There is an existing connection, but we cannot use it and it executed * DDL. Any subsequent operation needs to be able to see the results of * the DDL command and thus cannot proceed if it cannot use the connection. */ Assert(placementConnection != NULL); Assert(!CanUseExistingConnection(flags, userName, placementConnection)); ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), errmsg("cannot establish a new connection for placement %ld, since " "DDL has been executed on a connection that is in use", placement->placementId))); } else if (placementConnection->hadDML) { /* * There is an existing connection, but we cannot use it and it executed * DML. Any subsequent operation needs to be able to see the results of * the DML command and thus cannot proceed if it cannot use the connection. * * Note that this is not meaningfully different from the previous case. We * just produce a different error message based on whether DDL was or only * DML was executed. */ Assert(placementConnection != NULL); Assert(!CanUseExistingConnection(flags, userName, placementConnection)); Assert(!placementConnection->hadDDL); ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), errmsg("cannot establish a new connection for placement %ld, since " "DML has been executed on a connection that is in use", placement->placementId))); } else if (accessType == PLACEMENT_ACCESS_DDL) { /* * There is an existing connection, but we cannot use it and we want to * execute DDL. The operation on the existing connection might conflict * with the DDL statement. */ Assert(placementConnection != NULL); Assert(!CanUseExistingConnection(flags, userName, placementConnection)); Assert(!placementConnection->hadDDL); Assert(!placementConnection->hadDML); ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), errmsg("cannot perform a parallel DDL command because multiple " "placements have been accessed over the same connection"))); } else { /* * The placement has a connection assigned to it, but it cannot be used, * most likely because it has been claimed exclusively. Fortunately, it * has only been used for reads and we're not performing a DDL command. * We can therefore use a different connection for this placement. */ Assert(placementConnection != NULL); Assert(!CanUseExistingConnection(flags, userName, placementConnection)); Assert(!placementConnection->hadDDL); Assert(!placementConnection->hadDML); Assert(accessType != PLACEMENT_ACCESS_DDL); } *placementEntryList = lappend(*placementEntryList, placementEntry); } return chosenConnection; } /* * FindOrCreatePlacementEntry finds a placement entry in either the * placement->connection hash or the co-located placements->connection hash, * or adds a new entry if the placement has not yet been accessed in the * current transaction. */ static ConnectionPlacementHashEntry * FindOrCreatePlacementEntry(ShardPlacement *placement) { ConnectionPlacementHashKey key; ConnectionPlacementHashEntry *placementEntry = NULL; bool found = false; key.placementId = placement->placementId; placementEntry = hash_search(ConnectionPlacementHash, &key, HASH_ENTER, &found); if (!found) { /* no connection has been chosen for this placement */ placementEntry->failed = false; placementEntry->primaryConnection = NULL; placementEntry->hasSecondaryConnections = false; placementEntry->colocatedEntry = NULL; if (placement->partitionMethod == DISTRIBUTE_BY_HASH || placement->partitionMethod == DISTRIBUTE_BY_NONE) { ColocatedPlacementsHashKey key; ColocatedPlacementsHashEntry *colocatedEntry = NULL; strcpy(key.nodeName, placement->nodeName); key.nodePort = placement->nodePort; key.colocationGroupId = placement->colocationGroupId; key.representativeValue = placement->representativeValue; /* look for a connection assigned to co-located placements */ colocatedEntry = hash_search(ColocatedPlacementsHash, &key, HASH_ENTER, &found); if (!found) { void *conRef = MemoryContextAllocZero(TopTransactionContext, sizeof(ConnectionReference)); ConnectionReference *connectionReference = (ConnectionReference *) conRef; /* * Store the co-location group information such that we can later * determine whether a connection accessed different placements * of the same co-location group. */ connectionReference->colocationGroupId = placement->colocationGroupId; connectionReference->representativeValue = placement->representativeValue; /* * Create a connection reference that can be used for the entire * set of co-located placements. */ colocatedEntry->primaryConnection = connectionReference; colocatedEntry->hasSecondaryConnections = false; } /* * Assign the connection reference for the set of co-located placements * to the current placement. */ placementEntry->primaryConnection = colocatedEntry->primaryConnection; placementEntry->colocatedEntry = colocatedEntry; } else { void *conRef = MemoryContextAllocZero(TopTransactionContext, sizeof(ConnectionReference)); placementEntry->primaryConnection = (ConnectionReference *) conRef; } } /* record association with shard, for invalidation */ AssociatePlacementWithShard(placementEntry, placement); return placementEntry; } /* * CanUseExistingConnection is a helper function for CheckExistingConnections() * that checks whether an existing connection can be reused. */ static bool CanUseExistingConnection(uint32 flags, const char *userName, ConnectionReference *connectionReference) { MultiConnection *connection = connectionReference->connection; if (!connection) { /* if already closed connection obviously not usable */ return false; } else if (connection->claimedExclusively) { /* already used */ return false; } else if (flags & FORCE_NEW_CONNECTION) { /* no connection reuse desired */ return false; } else if (strcmp(connectionReference->userName, userName) != 0) { /* connection for different user, check for conflict */ return false; } else { return true; } } /* * ConnectionAccessedDifferentPlacement returns true if the connection accessed another * placement in the same colocation group with a different representative value, * meaning it's not strictly colocated. */ static bool ConnectionAccessedDifferentPlacement(MultiConnection *connection, ShardPlacement *placement) { dlist_iter placementIter; dlist_foreach(placementIter, &connection->referencedPlacements) { ConnectionReference *connectionReference = dlist_container(ConnectionReference, connectionNode, placementIter.cur); if (placement->colocationGroupId != INVALID_COLOCATION_ID && placement->colocationGroupId == connectionReference->colocationGroupId && placement->representativeValue != connectionReference->representativeValue) { /* non-co-located placements from the same co-location group */ return true; } } return false; } /* * AssociatePlacementWithShard records shard->placement relation in * ConnectionShardHash. * * That association is later used, in CheckForFailedPlacements, to invalidate * shard placements if necessary. */ static void AssociatePlacementWithShard(ConnectionPlacementHashEntry *placementEntry, ShardPlacement *placement) { ConnectionShardHashKey shardKey; ConnectionShardHashEntry *shardEntry = NULL; bool found = false; dlist_iter placementIter; shardKey.shardId = placement->shardId; shardEntry = hash_search(ConnectionShardHash, &shardKey, HASH_ENTER, &found); if (!found) { dlist_init(&shardEntry->placementConnections); } /* * Check if placement is already associated with shard (happens if there's * multiple connections for a placement). There'll usually only be few * placement per shard, so the price of iterating isn't large. */ dlist_foreach(placementIter, &shardEntry->placementConnections) { ConnectionPlacementHashEntry *placementEntry = dlist_container(ConnectionPlacementHashEntry, shardNode, placementIter.cur); if (placementEntry->key.placementId == placement->placementId) { return; } } /* otherwise add */ dlist_push_tail(&shardEntry->placementConnections, &placementEntry->shardNode); } /* * CloseShardPlacementAssociation handles a connection being closed before * transaction end. * * This should only be called by connection_management.c. */ void CloseShardPlacementAssociation(struct MultiConnection *connection) { dlist_iter placementIter; /* set connection to NULL for all references to the connection */ dlist_foreach(placementIter, &connection->referencedPlacements) { ConnectionReference *reference = dlist_container(ConnectionReference, connectionNode, placementIter.cur); reference->connection = NULL; /* * Note that we don't reset ConnectionPlacementHashEntry's * primaryConnection here, that'd more complicated than it seems * worth. That means we'll error out spuriously if a DML/DDL * executing connection is closed earlier in a transaction. */ } } /* * ResetShardPlacementAssociation resets the association of connections to * shard placements at the end of a transaction. * * This should only be called by connection_management.c. */ void ResetShardPlacementAssociation(struct MultiConnection *connection) { dlist_init(&connection->referencedPlacements); } /* * ResetPlacementConnectionManagement() disassociates connections from * placements and shards. This will be called at the end of XACT_EVENT_COMMIT * and XACT_EVENT_ABORT. */ void ResetPlacementConnectionManagement(void) { /* Simply delete all entries */ hash_delete_all(ConnectionPlacementHash); hash_delete_all(ConnectionShardHash); hash_delete_all(ColocatedPlacementsHash); /* * NB: memory for ConnectionReference structs and subordinate data is * deleted by virtue of being allocated in TopTransactionContext. */ } /* * MarkFailedShardPlacements looks through every connection in the connection shard hash * and marks the placements associated with failed connections invalid. * * Every shard must have at least one placement connection which did not fail. If all * modifying connections for a shard failed then the transaction will be aborted. * * This will be called just before commit, so we can abort before executing remote * commits. It should also be called after modification statements, to ensure that we * don't run future statements against placements which are not up to date. */ void MarkFailedShardPlacements() { HASH_SEQ_STATUS status; ConnectionShardHashEntry *shardEntry = NULL; hash_seq_init(&status, ConnectionShardHash); while ((shardEntry = (ConnectionShardHashEntry *) hash_seq_search(&status)) != 0) { if (!CheckShardPlacements(shardEntry)) { ereport(ERROR, (errmsg("could not make changes to shard " INT64_FORMAT " on any node", shardEntry->key.shardId))); } } } /* * PostCommitMarkFailedShardPlacements marks placements invalid and checks whether * sufficiently many placements have failed to abort the entire coordinated * transaction. * * This will be called just after a coordinated commit so we can handle remote * transactions which failed during commit. * * When using2PC is set as least one placement must succeed per shard. If all placements * fail for a shard the entire transaction is aborted. If using2PC is not set then a only * a warning will be emitted; we cannot abort because some remote transactions might have * already been committed. */ void PostCommitMarkFailedShardPlacements(bool using2PC) { HASH_SEQ_STATUS status; ConnectionShardHashEntry *shardEntry = NULL; int successes = 0; int attempts = 0; int elevel = using2PC ? ERROR : WARNING; hash_seq_init(&status, ConnectionShardHash); while ((shardEntry = (ConnectionShardHashEntry *) hash_seq_search(&status)) != 0) { attempts++; if (CheckShardPlacements(shardEntry)) { successes++; } else { /* * Only error out if we're using 2PC. If we're not using 2PC we can't error * out otherwise we can end up with a state where some shard modifications * have already committed successfully. */ ereport(elevel, (errmsg("could not commit transaction for shard " INT64_FORMAT " on any active node", shardEntry->key.shardId))); } } /* * If no shards could be modified at all, error out. Doesn't matter whether * we're post-commit - there's nothing to invalidate. */ if (attempts > 0 && successes == 0) { ereport(ERROR, (errmsg("could not commit transaction on any active node"))); } } /* * CheckShardPlacements is a helper function for CheckForFailedPlacements that * performs the per-shard work. */ static bool CheckShardPlacements(ConnectionShardHashEntry *shardEntry) { int failures = 0; int successes = 0; dlist_iter placementIter; dlist_foreach(placementIter, &shardEntry->placementConnections) { ConnectionPlacementHashEntry *placementEntry = dlist_container(ConnectionPlacementHashEntry, shardNode, placementIter.cur); ConnectionReference *primaryConnection = placementEntry->primaryConnection; MultiConnection *connection = NULL; /* we only consider shards that are modified */ if (primaryConnection == NULL || !(primaryConnection->hadDDL || primaryConnection->hadDML)) { continue; } connection = primaryConnection->connection; if (!connection || connection->remoteTransaction.transactionFailed) { placementEntry->failed = true; failures++; } else { successes++; } } if (failures > 0 && successes == 0) { return false; } /* mark all failed placements invalid */ dlist_foreach(placementIter, &shardEntry->placementConnections) { ConnectionPlacementHashEntry *placementEntry = dlist_container(ConnectionPlacementHashEntry, shardNode, placementIter.cur); if (placementEntry->failed) { uint64 shardId = shardEntry->key.shardId; uint64 placementId = placementEntry->key.placementId; GroupShardPlacement *shardPlacement = LoadGroupShardPlacement(shardId, placementId); /* * We only set shard state if its current state is FILE_FINALIZED, which * prevents overwriting shard state if it is already set at somewhere else. */ if (shardPlacement->shardState == FILE_FINALIZED) { UpdateShardPlacementState(placementEntry->key.placementId, FILE_INACTIVE); } } } return true; } /* * InitPlacementConnectionManagement performs initialization of the * infrastructure in this file at server start. */ void InitPlacementConnectionManagement(void) { HASHCTL info; uint32 hashFlags = 0; /* create (placementId) -> [ConnectionReference] hash */ memset(&info, 0, sizeof(info)); info.keysize = sizeof(ConnectionPlacementHashKey); info.entrysize = sizeof(ConnectionPlacementHashEntry); info.hash = tag_hash; info.hcxt = ConnectionContext; hashFlags = (HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); ConnectionPlacementHash = hash_create("citus connection cache (placementid)", 64, &info, hashFlags); /* create (colocated placement identity) -> [ConnectionReference] hash */ memset(&info, 0, sizeof(info)); info.keysize = sizeof(ColocatedPlacementsHashKey); info.entrysize = sizeof(ColocatedPlacementsHashEntry); info.hash = ColocatedPlacementsHashHash; info.match = ColocatedPlacementsHashCompare; info.hcxt = ConnectionContext; hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE); ColocatedPlacementsHash = hash_create("citus connection cache (colocated placements)", 64, &info, hashFlags); /* create (shardId) -> [ConnectionShardHashEntry] hash */ memset(&info, 0, sizeof(info)); info.keysize = sizeof(ConnectionShardHashKey); info.entrysize = sizeof(ConnectionShardHashEntry); info.hash = tag_hash; info.hcxt = ConnectionContext; hashFlags = (HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); ConnectionShardHash = hash_create("citus connection cache (shardid)", 64, &info, hashFlags); } static uint32 ColocatedPlacementsHashHash(const void *key, Size keysize) { ColocatedPlacementsHashKey *entry = (ColocatedPlacementsHashKey *) key; uint32 hash = 0; hash = string_hash(entry->nodeName, NAMEDATALEN); hash = hash_combine(hash, hash_uint32(entry->nodePort)); hash = hash_combine(hash, hash_uint32(entry->colocationGroupId)); hash = hash_combine(hash, hash_uint32(entry->representativeValue)); return hash; } static int ColocatedPlacementsHashCompare(const void *a, const void *b, Size keysize) { ColocatedPlacementsHashKey *ca = (ColocatedPlacementsHashKey *) a; ColocatedPlacementsHashKey *cb = (ColocatedPlacementsHashKey *) b; if (strncmp(ca->nodeName, cb->nodeName, MAX_NODE_LENGTH) != 0 || ca->nodePort != cb->nodePort || ca->colocationGroupId != cb->colocationGroupId || ca->representativeValue != cb->representativeValue) { return 1; } else { return 0; } } citus-7.0.3/src/backend/distributed/connection/remote_commands.c000066400000000000000000000547341317107136600250510ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * remote_commands.c * Helpers to make it easier to execute command on remote nodes. * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "pgstat.h" #include "libpq-fe.h" #include "distributed/connection_management.h" #include "distributed/remote_commands.h" #include "lib/stringinfo.h" #include "miscadmin.h" #include "storage/latch.h" #include "utils/palloc.h" /* GUC, determining whether statements sent to remote nodes are logged */ bool LogRemoteCommands = false; static bool FinishConnectionIO(MultiConnection *connection, bool raiseInterrupts); static WaitEventSet * BuildWaitEventSet(MultiConnection **allConnections, int totalConnectionCount, int pendingConnectionsStartIndex); /* simple helpers */ /* * IsResponseOK checks whether the result is a successful one. */ bool IsResponseOK(PGresult *result) { ExecStatusType resultStatus = PQresultStatus(result); if (resultStatus == PGRES_SINGLE_TUPLE || resultStatus == PGRES_TUPLES_OK || resultStatus == PGRES_COMMAND_OK) { return true; } return false; } /* * ForgetResults clears a connection from pending activity. * * Note that this might require network IO. If that's not acceptable, use * NonblockingForgetResults(). * * ClearResults is variant of this function which can also raise errors. */ void ForgetResults(MultiConnection *connection) { while (true) { PGresult *result = NULL; const bool dontRaiseErrors = false; result = GetRemoteCommandResult(connection, dontRaiseErrors); if (result == NULL) { break; } if (PQresultStatus(result) == PGRES_COPY_IN) { PQputCopyEnd(connection->pgConn, NULL); /* TODO: mark transaction as failed, once we can. */ } PQclear(result); } } /* * ClearResults clears a connection from pending activity, * returns true if all pending commands return success. It raises * error if raiseErrors flag is set, any command fails and transaction * is marked critical. * * Note that this might require network IO. If that's not acceptable, use * NonblockingForgetResults(). */ bool ClearResults(MultiConnection *connection, bool raiseErrors) { bool success = true; while (true) { PGresult *result = GetRemoteCommandResult(connection, raiseErrors); if (result == NULL) { break; } /* * End any pending copy operation. Transaction will be marked * as failed by the following part. */ if (PQresultStatus(result) == PGRES_COPY_IN) { PQputCopyEnd(connection->pgConn, NULL); } if (!IsResponseOK(result)) { ReportResultError(connection, result, WARNING); MarkRemoteTransactionFailed(connection, raiseErrors); success = false; } PQclear(result); } return success; } /* * NonblockingForgetResults clears a connection from pending activity if doing * so does not require network IO. Returns true if successful, false * otherwise. */ bool NonblockingForgetResults(MultiConnection *connection) { PGconn *pgConn = connection->pgConn; if (PQstatus(pgConn) != CONNECTION_OK) { return false; } Assert(PQisnonblocking(pgConn)); while (true) { PGresult *result = NULL; /* just in case there's a lot of results */ CHECK_FOR_INTERRUPTS(); /* * If busy, there might still be results already received and buffered * by the OS. As connection is in non-blocking mode, we can check for * that without blocking. */ if (PQisBusy(pgConn)) { if (PQflush(pgConn) == -1) { /* write failed */ return false; } if (PQconsumeInput(pgConn) == 0) { /* some low-level failure */ return false; } } /* clearing would require blocking IO, return */ if (PQisBusy(pgConn)) { return false; } result = PQgetResult(pgConn); if (PQresultStatus(result) == PGRES_COPY_IN) { /* in copy, can't reliably recover without blocking */ return false; } if (result == NULL) { return true; } PQclear(result); } pg_unreachable(); } /* * SqlStateMatchesCategory returns true if the given sql state (which may be * NULL if unknown) is in the given error category. Note that we use * ERRCODE_TO_CATEGORY macro to determine error category of the sql state and * expect the caller to use the same macro for the error category. */ bool SqlStateMatchesCategory(char *sqlStateString, int category) { bool sqlStateMatchesCategory = false; int sqlState = 0; int sqlStateCategory = 0; if (sqlStateString == NULL) { return false; } sqlState = MAKE_SQLSTATE(sqlStateString[0], sqlStateString[1], sqlStateString[2], sqlStateString[3], sqlStateString[4]); sqlStateCategory = ERRCODE_TO_CATEGORY(sqlState); if (sqlStateCategory == category) { sqlStateMatchesCategory = true; } return sqlStateMatchesCategory; } /* report errors & warnings */ /* * Report libpq failure that's not associated with a result. */ void ReportConnectionError(MultiConnection *connection, int elevel) { char *nodeName = connection->hostname; int nodePort = connection->port; ereport(elevel, (errmsg("connection error: %s:%d", nodeName, nodePort), errdetail("%s", pchomp(PQerrorMessage(connection->pgConn))))); } /* * ReportResultError reports libpq failure associated with a result. */ void ReportResultError(MultiConnection *connection, PGresult *result, int elevel) { /* we release PQresult when throwing an error because the caller can't */ PG_TRY(); { char *sqlStateString = PQresultErrorField(result, PG_DIAG_SQLSTATE); char *messagePrimary = PQresultErrorField(result, PG_DIAG_MESSAGE_PRIMARY); char *messageDetail = PQresultErrorField(result, PG_DIAG_MESSAGE_DETAIL); char *messageHint = PQresultErrorField(result, PG_DIAG_MESSAGE_HINT); char *messageContext = PQresultErrorField(result, PG_DIAG_CONTEXT); char *nodeName = connection->hostname; int nodePort = connection->port; int sqlState = ERRCODE_INTERNAL_ERROR; if (sqlStateString != NULL) { sqlState = MAKE_SQLSTATE(sqlStateString[0], sqlStateString[1], sqlStateString[2], sqlStateString[3], sqlStateString[4]); } /* * If the PGresult did not contain a message, the connection may provide a * suitable top level one. At worst, this is an empty string. */ if (messagePrimary == NULL) { messagePrimary = pchomp(PQerrorMessage(connection->pgConn)); } ereport(elevel, (errcode(sqlState), errmsg("%s", messagePrimary), messageDetail ? errdetail("%s", messageDetail) : 0, messageHint ? errhint("%s", messageHint) : 0, messageContext ? errcontext("%s", messageContext) : 0, errcontext("while executing command on %s:%d", nodeName, nodePort))); } PG_CATCH(); { PQclear(result); PG_RE_THROW(); } PG_END_TRY(); } /* *INDENT-OFF* */ #if (PG_VERSION_NUM < 100000) /* * Make copy of string with all trailing newline characters removed. */ char * pchomp(const char *in) { size_t n; n = strlen(in); while (n > 0 && in[n - 1] == '\n') n--; return pnstrdup(in, n); } #endif /* *INDENT-ON* */ /* * LogRemoteCommand logs commands send to remote nodes if * citus.log_remote_commands wants us to do so. */ void LogRemoteCommand(MultiConnection *connection, const char *command) { if (!LogRemoteCommands) { return; } ereport(LOG, (errmsg("issuing %s", command), errdetail("on server %s:%d", connection->hostname, connection->port))); } /* wrappers around libpq functions, with command logging support */ /* * ExecuteCriticalRemoteCommand executes a remote command that is critical * to the transaction. If the command fails then the transaction aborts. */ void ExecuteCriticalRemoteCommand(MultiConnection *connection, const char *command) { int querySent = 0; PGresult *result = NULL; bool raiseInterrupts = true; querySent = SendRemoteCommand(connection, command); if (querySent == 0) { ReportConnectionError(connection, ERROR); } result = GetRemoteCommandResult(connection, raiseInterrupts); if (!IsResponseOK(result)) { ReportResultError(connection, result, ERROR); } PQclear(result); ForgetResults(connection); } /* * ExecuteOptionalRemoteCommand executes a remote command. If the command fails a WARNING * is emitted but execution continues. * * could return 0, QUERY_SEND_FAILED, or RESPONSE_NOT_OKAY * result is only set if there was no error */ int ExecuteOptionalRemoteCommand(MultiConnection *connection, const char *command, PGresult **result) { int querySent = 0; PGresult *localResult = NULL; bool raiseInterrupts = true; querySent = SendRemoteCommand(connection, command); if (querySent == 0) { ReportConnectionError(connection, WARNING); return QUERY_SEND_FAILED; } localResult = GetRemoteCommandResult(connection, raiseInterrupts); if (!IsResponseOK(localResult)) { ReportResultError(connection, localResult, WARNING); PQclear(localResult); ForgetResults(connection); return RESPONSE_NOT_OKAY; } *result = localResult; return 0; } /* * SendRemoteCommandParams is a PQsendQueryParams wrapper that logs remote commands, * and accepts a MultiConnection instead of a plain PGconn. It makes sure it can * send commands asynchronously without blocking (at the potential expense of * an additional memory allocation). The command string can only include a single * command since PQsendQueryParams() supports only that. */ int SendRemoteCommandParams(MultiConnection *connection, const char *command, int parameterCount, const Oid *parameterTypes, const char *const *parameterValues) { PGconn *pgConn = connection->pgConn; int rc = 0; LogRemoteCommand(connection, command); /* * Don't try to send command if connection is entirely gone * (PQisnonblocking() would crash). */ if (!pgConn) { return 0; } Assert(PQisnonblocking(pgConn)); rc = PQsendQueryParams(pgConn, command, parameterCount, parameterTypes, parameterValues, NULL, NULL, 0); return rc; } /* * SendRemoteCommand is a PQsendQuery wrapper that logs remote commands, and * accepts a MultiConnection instead of a plain PGconn. It makes sure it can * send commands asynchronously without blocking (at the potential expense of * an additional memory allocation). The command string can include multiple * commands since PQsendQuery() supports that. */ int SendRemoteCommand(MultiConnection *connection, const char *command) { PGconn *pgConn = connection->pgConn; int rc = 0; LogRemoteCommand(connection, command); /* * Don't try to send command if connection is entirely gone * (PQisnonblocking() would crash). */ if (!pgConn) { return 0; } Assert(PQisnonblocking(pgConn)); rc = PQsendQuery(pgConn, command); return rc; } /* * ReadFirstColumnAsText reads the first column of result tuples from the given * PGresult struct and returns them in a StringInfo list. */ List * ReadFirstColumnAsText(PGresult *queryResult) { List *resultRowList = NIL; const int columnIndex = 0; int64 rowIndex = 0; int64 rowCount = 0; ExecStatusType status = PQresultStatus(queryResult); if (status == PGRES_TUPLES_OK) { rowCount = PQntuples(queryResult); } for (rowIndex = 0; rowIndex < rowCount; rowIndex++) { char *rowValue = PQgetvalue(queryResult, rowIndex, columnIndex); StringInfo rowValueString = makeStringInfo(); appendStringInfoString(rowValueString, rowValue); resultRowList = lappend(resultRowList, rowValueString); } return resultRowList; } /* * GetRemoteCommandResult is a wrapper around PQgetResult() that handles interrupts. * * If raiseInterrupts is true and an interrupt arrives, e.g. the query is * being cancelled, CHECK_FOR_INTERRUPTS() will be called, which then throws * an error. * * If raiseInterrupts is false and an interrupt arrives that'd otherwise raise * an error, GetRemoteCommandResult returns NULL, and the transaction is * marked as having failed. While that's not a perfect way to signal failure, * callers will usually treat that as an error, and it's easy to use. * * Handling of interrupts is important to allow queries being cancelled while * waiting on remote nodes. In a distributed deadlock scenario cancelling * might be the only way to resolve the deadlock. */ PGresult * GetRemoteCommandResult(MultiConnection *connection, bool raiseInterrupts) { PGconn *pgConn = connection->pgConn; PGresult *result = NULL; /* * Short circuit tests around the more expensive parts of this * routine. This'd also trigger a return in the, unlikely, case of a * failed/nonexistant connection. */ if (!PQisBusy(pgConn)) { return PQgetResult(connection->pgConn); } if (!FinishConnectionIO(connection, raiseInterrupts)) { return NULL; } /* no IO should be necessary to get result */ Assert(!PQisBusy(pgConn)); result = PQgetResult(connection->pgConn); return result; } /* * PutRemoteCopyData is a wrapper around PQputCopyData() that handles * interrupts. * * Returns false if PQputCopyData() failed, true otherwise. */ bool PutRemoteCopyData(MultiConnection *connection, const char *buffer, int nbytes) { PGconn *pgConn = connection->pgConn; int copyState = 0; bool allowInterrupts = true; if (PQstatus(pgConn) != CONNECTION_OK) { return false; } Assert(PQisnonblocking(pgConn)); copyState = PQputCopyData(pgConn, buffer, nbytes); if (copyState == -1) { return false; } /* * PQputCopyData may have queued up part of the data even if it managed * to send some of it succesfully. We provide back pressure by waiting * until the socket is writable to prevent the internal libpq buffers * from growing excessively. * * In the future, we could reduce the frequency of these pushbacks to * achieve higher throughput. */ return FinishConnectionIO(connection, allowInterrupts); } /* * PutRemoteCopyEnd is a wrapper around PQputCopyEnd() that handles * interrupts. * * Returns false if PQputCopyEnd() failed, true otherwise. */ bool PutRemoteCopyEnd(MultiConnection *connection, const char *errormsg) { PGconn *pgConn = connection->pgConn; int copyState = 0; bool allowInterrupts = true; if (PQstatus(pgConn) != CONNECTION_OK) { return false; } Assert(PQisnonblocking(pgConn)); copyState = PQputCopyEnd(pgConn, errormsg); if (copyState == -1) { return false; } /* see PutRemoteCopyData() */ return FinishConnectionIO(connection, allowInterrupts); } /* * FinishConnectionIO performs pending IO for the connection, while accepting * interrupts. * * See GetRemoteCommandResult() for documentation of interrupt handling * behaviour. * * Returns true if IO was successfully completed, false otherwise. */ static bool FinishConnectionIO(MultiConnection *connection, bool raiseInterrupts) { PGconn *pgConn = connection->pgConn; int socket = PQsocket(pgConn); Assert(pgConn); Assert(PQisnonblocking(pgConn)); if (raiseInterrupts) { CHECK_FOR_INTERRUPTS(); } /* perform the necessary IO */ while (true) { int sendStatus = 0; int rc = 0; int waitFlags = WL_POSTMASTER_DEATH | WL_LATCH_SET; /* try to send all pending data */ sendStatus = PQflush(pgConn); /* if sending failed, there's nothing more we can do */ if (sendStatus == -1) { return false; } else if (sendStatus == 1) { waitFlags |= WL_SOCKET_WRITEABLE; } /* if reading fails, there's not much we can do */ if (PQconsumeInput(pgConn) == 0) { return false; } if (PQisBusy(pgConn)) { waitFlags |= WL_SOCKET_READABLE; } if ((waitFlags & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE)) == 0) { /* no IO necessary anymore, we're done */ return true; } #if (PG_VERSION_NUM >= 100000) rc = WaitLatchOrSocket(MyLatch, waitFlags, socket, 0, PG_WAIT_EXTENSION); #else rc = WaitLatchOrSocket(MyLatch, waitFlags, socket, 0); #endif if (rc & WL_POSTMASTER_DEATH) { ereport(ERROR, (errmsg("postmaster was shut down, exiting"))); } if (rc & WL_LATCH_SET) { ResetLatch(MyLatch); /* if allowed raise errors */ if (raiseInterrupts) { CHECK_FOR_INTERRUPTS(); } /* * If raising errors allowed, or called within in a section with * interrupts held, return instead, and mark the transaction as * failed. */ if (InterruptHoldoffCount > 0 && (QueryCancelPending || ProcDiePending)) { connection->remoteTransaction.transactionFailed = true; break; } } } return false; } /* * WaitForAllConnections blocks until all connections in the list are no * longer busy, meaning the pending command has either finished or failed. */ void WaitForAllConnections(List *connectionList, bool raiseInterrupts) { int totalConnectionCount = list_length(connectionList); int pendingConnectionsStartIndex = 0; int connectionIndex = 0; ListCell *connectionCell = NULL; MultiConnection *allConnections[totalConnectionCount]; WaitEvent events[totalConnectionCount]; bool connectionReady[totalConnectionCount]; WaitEventSet *waitEventSet = NULL; /* convert connection list to an array such that we can move items around */ foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); allConnections[connectionIndex] = connection; connectionReady[connectionIndex] = false; connectionIndex++; } /* make an initial pass to check for failed and idle connections */ for (connectionIndex = pendingConnectionsStartIndex; connectionIndex < totalConnectionCount; connectionIndex++) { MultiConnection *connection = allConnections[connectionIndex]; if (PQstatus(connection->pgConn) == CONNECTION_BAD || !PQisBusy(connection->pgConn)) { /* connection is already done; keep non-ready connections at the end */ allConnections[connectionIndex] = allConnections[pendingConnectionsStartIndex]; pendingConnectionsStartIndex++; } } PG_TRY(); { bool rebuildWaitEventSet = true; while (pendingConnectionsStartIndex < totalConnectionCount) { int eventIndex = 0; int eventCount = 0; long timeout = -1; int pendingConnectionCount = totalConnectionCount - pendingConnectionsStartIndex; /* * We cannot disable wait events as of postgres 9.6, so we rebuild the * WaitEventSet whenever connections are ready. */ if (rebuildWaitEventSet) { if (waitEventSet != NULL) { FreeWaitEventSet(waitEventSet); } waitEventSet = BuildWaitEventSet(allConnections, totalConnectionCount, pendingConnectionsStartIndex); rebuildWaitEventSet = false; } /* wait for I/O events */ #if (PG_VERSION_NUM >= 100000) eventCount = WaitEventSetWait(waitEventSet, timeout, events, pendingConnectionCount, WAIT_EVENT_CLIENT_READ); #else eventCount = WaitEventSetWait(waitEventSet, timeout, events, pendingConnectionCount); #endif /* process I/O events */ for (; eventIndex < eventCount; eventIndex++) { WaitEvent *event = &events[eventIndex]; MultiConnection *connection = NULL; bool connectionIsReady = false; if (event->events & WL_POSTMASTER_DEATH) { ereport(ERROR, (errmsg("postmaster was shut down, exiting"))); } if (event->events & WL_LATCH_SET) { ResetLatch(MyLatch); if (raiseInterrupts) { CHECK_FOR_INTERRUPTS(); } if (InterruptHoldoffCount > 0 && (QueryCancelPending || ProcDiePending)) { /* return immediately in case of cancellation */ FreeWaitEventSet(waitEventSet); return; } continue; } connection = (MultiConnection *) event->user_data; connectionIndex = event->pos + pendingConnectionsStartIndex; if (event->events & WL_SOCKET_WRITEABLE) { int sendStatus = PQflush(connection->pgConn); if (sendStatus == -1) { /* send failed, done with this connection */ connectionIsReady = true; } else if (sendStatus == 0) { /* done writing, only wait for read events */ ModifyWaitEvent(waitEventSet, connectionIndex, WL_SOCKET_READABLE, NULL); } } if (event->events & WL_SOCKET_READABLE) { int receiveStatus = PQconsumeInput(connection->pgConn); if (receiveStatus == 0) { /* receive failed, done with this connection */ connectionIsReady = true; } else if (!PQisBusy(connection->pgConn)) { /* result was received */ connectionIsReady = true; } } if (connectionIsReady) { connectionReady[connectionIndex] = true; rebuildWaitEventSet = true; } } /* move non-ready connections to the back of the array */ for (connectionIndex = pendingConnectionsStartIndex; connectionIndex < totalConnectionCount; connectionIndex++) { if (connectionReady[connectionIndex]) { allConnections[connectionIndex] = allConnections[pendingConnectionsStartIndex]; pendingConnectionsStartIndex++; } } } if (waitEventSet != NULL) { FreeWaitEventSet(waitEventSet); waitEventSet = NULL; } } PG_CATCH(); { /* make sure the epoll file descriptor is always closed */ if (waitEventSet != NULL) { FreeWaitEventSet(waitEventSet); waitEventSet = NULL; } PG_RE_THROW(); } PG_END_TRY(); } /* * BuildWaitEventSet creates a WaitEventSet for the given array of connections * which can be used to wait for any of the sockets to become read-ready, or * write-ready in case there is data to send. */ static WaitEventSet * BuildWaitEventSet(MultiConnection **allConnections, int totalConnectionCount, int pendingConnectionsStartIndex) { int pendingConnectionCount = totalConnectionCount - pendingConnectionsStartIndex; WaitEventSet *waitEventSet = NULL; int connectionIndex = 0; /* allocate pending connections + 2 for the signal latch and postmaster death */ waitEventSet = CreateWaitEventSet(CurrentMemoryContext, pendingConnectionCount + 2); for (connectionIndex = pendingConnectionsStartIndex; connectionIndex < totalConnectionCount; connectionIndex++) { MultiConnection *connection = allConnections[connectionIndex]; int socket = PQsocket(connection->pgConn); int eventMask = WL_SOCKET_READABLE; int sendStatus = PQflush(connection->pgConn); if (sendStatus == 1) { /* we have data to send, wake up when the socket is ready to write */ eventMask = WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE; } AddWaitEventToSet(waitEventSet, eventMask, socket, NULL, (void *) connection); } /* * Put the wait events for the signal latch and postmaster death at the end such that * event index + pendingConnectionsStartIndex = the connection index in the array. */ AddWaitEventToSet(waitEventSet, WL_POSTMASTER_DEATH, PGINVALID_SOCKET, NULL, NULL); AddWaitEventToSet(waitEventSet, WL_LATCH_SET, PGINVALID_SOCKET, MyLatch, NULL); return waitEventSet; } citus-7.0.3/src/backend/distributed/executor/000077500000000000000000000000001317107136600212135ustar00rootroot00000000000000citus-7.0.3/src/backend/distributed/executor/insert_select_executor.c000066400000000000000000000127401317107136600261440ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * insert_select_executor.c * * Executor logic for INSERT..SELECT. * * Copyright (c) 2017, Citus Data, Inc. *------------------------------------------------------------------------- */ #include "postgres.h" #include "distributed/insert_select_executor.h" #include "distributed/insert_select_planner.h" #include "distributed/multi_copy.h" #include "distributed/multi_executor.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_planner.h" #include "distributed/resource_lock.h" #include "distributed/transaction_management.h" #include "executor/executor.h" #include "nodes/execnodes.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "nodes/parsenodes.h" #include "nodes/plannodes.h" #include "parser/parse_coerce.h" #include "parser/parsetree.h" #include "tcop/pquery.h" #include "tcop/tcopprot.h" #include "utils/lsyscache.h" #include "utils/portal.h" #include "utils/snapmgr.h" static void ExecuteSelectIntoRelation(Oid targetRelationId, List *insertTargetList, Query *selectQuery, EState *executorState); static void ExecuteIntoDestReceiver(Query *query, ParamListInfo params, DestReceiver *dest); /* * CoordinatorInsertSelectExecScan executes an INSERT INTO distributed_table * SELECT .. query by setting up a DestReceiver that copies tuples into the * distributed table and then executing the SELECT query using that DestReceiver * as the tuple destination. */ TupleTableSlot * CoordinatorInsertSelectExecScan(CustomScanState *node) { CitusScanState *scanState = (CitusScanState *) node; TupleTableSlot *resultSlot = NULL; if (!scanState->finishedRemoteScan) { EState *executorState = scanState->customScanState.ss.ps.state; MultiPlan *multiPlan = scanState->multiPlan; Query *selectQuery = multiPlan->insertSelectSubquery; List *insertTargetList = multiPlan->insertTargetList; Oid targetRelationId = multiPlan->targetRelationId; ereport(DEBUG1, (errmsg("Collecting INSERT ... SELECT results on coordinator"))); /* * If we are dealing with partitioned table, we also need to lock its * partitions. Here we only lock targetRelation, we acquire necessary * locks on selected tables during execution of those select queries. */ if (PartitionedTable(targetRelationId)) { LockPartitionRelations(targetRelationId, RowExclusiveLock); } ExecuteSelectIntoRelation(targetRelationId, insertTargetList, selectQuery, executorState); scanState->finishedRemoteScan = true; } resultSlot = ReturnTupleFromTuplestore(scanState); return resultSlot; } /* * ExecuteSelectIntoRelation executes given SELECT query and inserts the * results into the target relation, which is assumed to be a distributed * table. */ static void ExecuteSelectIntoRelation(Oid targetRelationId, List *insertTargetList, Query *selectQuery, EState *executorState) { ParamListInfo paramListInfo = executorState->es_param_list_info; ListCell *insertTargetCell = NULL; List *columnNameList = NIL; bool stopOnFailure = false; char partitionMethod = 0; Var *partitionColumn = NULL; int partitionColumnIndex = -1; CitusCopyDestReceiver *copyDest = NULL; partitionMethod = PartitionMethod(targetRelationId); if (partitionMethod == DISTRIBUTE_BY_NONE) { stopOnFailure = true; } partitionColumn = PartitionColumn(targetRelationId, 0); /* build the list of column names for the COPY statement */ foreach(insertTargetCell, insertTargetList) { TargetEntry *insertTargetEntry = (TargetEntry *) lfirst(insertTargetCell); char *columnName = insertTargetEntry->resname; /* load the column information from pg_attribute */ AttrNumber attrNumber = get_attnum(targetRelationId, columnName); /* check whether this is the partition column */ if (partitionColumn != NULL && attrNumber == partitionColumn->varattno) { Assert(partitionColumnIndex == -1); partitionColumnIndex = list_length(columnNameList); } columnNameList = lappend(columnNameList, insertTargetEntry->resname); } /* set up a DestReceiver that copies into the distributed table */ copyDest = CreateCitusCopyDestReceiver(targetRelationId, columnNameList, partitionColumnIndex, executorState, stopOnFailure); ExecuteIntoDestReceiver(selectQuery, paramListInfo, (DestReceiver *) copyDest); executorState->es_processed = copyDest->tuplesSent; XactModificationLevel = XACT_MODIFICATION_DATA; } /* * ExecuteIntoDestReceiver plans and executes a query and sends results to the given * DestReceiver. */ static void ExecuteIntoDestReceiver(Query *query, ParamListInfo params, DestReceiver *dest) { PlannedStmt *queryPlan = NULL; Portal portal = NULL; int eflags = 0; int cursorOptions = 0; long count = FETCH_ALL; /* create a new portal for executing the query */ portal = CreateNewPortal(); /* don't display the portal in pg_cursors, it is for internal use only */ portal->visible = false; cursorOptions = CURSOR_OPT_PARALLEL_OK; /* plan the subquery, this may be another distributed query */ queryPlan = pg_plan_query(query, cursorOptions, params); PortalDefineQuery(portal, NULL, "", "SELECT", list_make1(queryPlan), NULL); PortalStart(portal, params, eflags, GetActiveSnapshot()); #if (PG_VERSION_NUM >= 100000) PortalRun(portal, count, false, true, dest, dest, NULL); #else PortalRun(portal, count, false, dest, dest, NULL); #endif PortalDrop(portal, false); } citus-7.0.3/src/backend/distributed/executor/multi_client_executor.c000066400000000000000000000570401317107136600257730ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_client_executor.c * * This file contains the libpq-specific parts of executing queries on remote * nodes. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "fmgr.h" #include "libpq-fe.h" #include "miscadmin.h" #include "commands/dbcommands.h" #include "distributed/metadata_cache.h" #include "distributed/connection_management.h" #include "distributed/multi_client_executor.h" #include "distributed/multi_server_executor.h" #include "distributed/remote_commands.h" #include #include #include #ifdef HAVE_SYS_POLL_H #include #endif /* Local pool to track active connections */ static MultiConnection *ClientConnectionArray[MAX_CONNECTION_COUNT]; /* * The value at any position on ClientPollingStatusArray is only defined when * the corresponding ClientConnectionArray entry exists. */ static PostgresPollingStatusType ClientPollingStatusArray[MAX_CONNECTION_COUNT]; /* Local functions forward declarations */ static bool ClientConnectionReady(MultiConnection *connection, PostgresPollingStatusType pollingStatus); /* AllocateConnectionId returns a connection id from the connection pool. */ static int32 AllocateConnectionId(void) { int32 connectionId = INVALID_CONNECTION_ID; int32 connIndex = 0; /* allocate connectionId from connection pool */ for (connIndex = 0; connIndex < MAX_CONNECTION_COUNT; connIndex++) { MultiConnection *connection = ClientConnectionArray[connIndex]; if (connection == NULL) { connectionId = connIndex; break; } } return connectionId; } /* * MultiClientConnect synchronously tries to establish a connection. If it * succeeds, it returns the connection id. Otherwise, it reports connection * error and returns INVALID_CONNECTION_ID. * * nodeDatabase and userName can be NULL, in which case values from the * current session are used. */ int32 MultiClientConnect(const char *nodeName, uint32 nodePort, const char *nodeDatabase, const char *userName) { MultiConnection *connection = NULL; ConnStatusType connStatusType = CONNECTION_OK; int32 connectionId = AllocateConnectionId(); int connectionFlags = FORCE_NEW_CONNECTION; /* no cached connections for now */ if (connectionId == INVALID_CONNECTION_ID) { ereport(WARNING, (errmsg("could not allocate connection in connection pool"))); return connectionId; } if (XactModificationLevel > XACT_MODIFICATION_NONE) { ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), errmsg("cannot open new connections after the first modification " "command within a transaction"))); } /* establish synchronous connection to worker node */ connection = GetNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort, userName, nodeDatabase); connStatusType = PQstatus(connection->pgConn); if (connStatusType == CONNECTION_OK) { ClientConnectionArray[connectionId] = connection; } else { ReportConnectionError(connection, WARNING); CloseConnection(connection); connectionId = INVALID_CONNECTION_ID; } return connectionId; } /* * MultiClientConnectStart asynchronously tries to establish a connection. If it * succeeds, it returns the connection id. Otherwise, it reports connection * error and returns INVALID_CONNECTION_ID. */ int32 MultiClientConnectStart(const char *nodeName, uint32 nodePort, const char *nodeDatabase, const char *userName) { MultiConnection *connection = NULL; ConnStatusType connStatusType = CONNECTION_OK; int32 connectionId = AllocateConnectionId(); int connectionFlags = FORCE_NEW_CONNECTION; /* no cached connections for now */ if (connectionId == INVALID_CONNECTION_ID) { ereport(WARNING, (errmsg("could not allocate connection in connection pool"))); return connectionId; } if (XactModificationLevel > XACT_MODIFICATION_NONE) { ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), errmsg("cannot open new connections after the first modification " "command within a transaction"))); } /* prepare asynchronous request for worker node connection */ connection = StartNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort, userName, nodeDatabase); connStatusType = PQstatus(connection->pgConn); /* * If prepared, we save the connection, and set its initial polling status * to PGRES_POLLING_WRITING as specified in "Database Connection Control * Functions" section of the PostgreSQL documentation. */ if (connStatusType != CONNECTION_BAD) { ClientConnectionArray[connectionId] = connection; ClientPollingStatusArray[connectionId] = PGRES_POLLING_WRITING; } else { ReportConnectionError(connection, WARNING); CloseConnection(connection); connectionId = INVALID_CONNECTION_ID; } return connectionId; } /* MultiClientConnectPoll returns the status of client connection. */ ConnectStatus MultiClientConnectPoll(int32 connectionId) { MultiConnection *connection = NULL; PostgresPollingStatusType pollingStatus = PGRES_POLLING_OK; ConnectStatus connectStatus = CLIENT_INVALID_CONNECT; Assert(connectionId != INVALID_CONNECTION_ID); connection = ClientConnectionArray[connectionId]; Assert(connection != NULL); pollingStatus = ClientPollingStatusArray[connectionId]; if (pollingStatus == PGRES_POLLING_OK) { connectStatus = CLIENT_CONNECTION_READY; } else if (pollingStatus == PGRES_POLLING_READING) { bool readReady = ClientConnectionReady(connection, PGRES_POLLING_READING); if (readReady) { ClientPollingStatusArray[connectionId] = PQconnectPoll(connection->pgConn); connectStatus = CLIENT_CONNECTION_BUSY; } else { connectStatus = CLIENT_CONNECTION_BUSY_READ; } } else if (pollingStatus == PGRES_POLLING_WRITING) { bool writeReady = ClientConnectionReady(connection, PGRES_POLLING_WRITING); if (writeReady) { ClientPollingStatusArray[connectionId] = PQconnectPoll(connection->pgConn); connectStatus = CLIENT_CONNECTION_BUSY; } else { connectStatus = CLIENT_CONNECTION_BUSY_WRITE; } } else if (pollingStatus == PGRES_POLLING_FAILED) { ReportConnectionError(connection, WARNING); connectStatus = CLIENT_CONNECTION_BAD; } return connectStatus; } /* MultiClientDisconnect disconnects the connection. */ void MultiClientDisconnect(int32 connectionId) { MultiConnection *connection = NULL; const int InvalidPollingStatus = -1; Assert(connectionId != INVALID_CONNECTION_ID); connection = ClientConnectionArray[connectionId]; Assert(connection != NULL); CloseConnection(connection); ClientConnectionArray[connectionId] = NULL; ClientPollingStatusArray[connectionId] = InvalidPollingStatus; } /* * MultiClientConnectionUp checks if the connection status is up, in other words, * it is not bad. */ bool MultiClientConnectionUp(int32 connectionId) { MultiConnection *connection = NULL; ConnStatusType connStatusType = CONNECTION_OK; bool connectionUp = true; Assert(connectionId != INVALID_CONNECTION_ID); connection = ClientConnectionArray[connectionId]; Assert(connection != NULL); connStatusType = PQstatus(connection->pgConn); if (connStatusType == CONNECTION_BAD) { connectionUp = false; } return connectionUp; } /* MultiClientExecute synchronously executes a query over the given connection. */ bool MultiClientExecute(int32 connectionId, const char *query, void **queryResult, int *rowCount, int *columnCount) { bool querySent = false; bool queryOK = false; querySent = MultiClientSendQuery(connectionId, query); if (!querySent) { return false; } queryOK = MultiClientQueryResult(connectionId, queryResult, rowCount, columnCount); return queryOK; } /* MultiClientSendQuery sends the given query over the given connection. */ bool MultiClientSendQuery(int32 connectionId, const char *query) { MultiConnection *connection = NULL; bool success = true; int querySent = 0; Assert(connectionId != INVALID_CONNECTION_ID); connection = ClientConnectionArray[connectionId]; Assert(connection != NULL); querySent = PQsendQuery(connection->pgConn, query); if (querySent == 0) { char *errorMessage = pchomp(PQerrorMessage(connection->pgConn)); ereport(WARNING, (errmsg("could not send remote query \"%s\"", query), errdetail("Client error: %s", errorMessage))); success = false; } return success; } /* MultiClientCancel cancels the running query on the given connection. */ bool MultiClientCancel(int32 connectionId) { MultiConnection *connection = NULL; PGcancel *cancelObject = NULL; int cancelSent = 0; bool canceled = true; char errorBuffer[STRING_BUFFER_SIZE]; Assert(connectionId != INVALID_CONNECTION_ID); connection = ClientConnectionArray[connectionId]; Assert(connection != NULL); cancelObject = PQgetCancel(connection->pgConn); cancelSent = PQcancel(cancelObject, errorBuffer, sizeof(errorBuffer)); if (cancelSent == 0) { ereport(WARNING, (errmsg("could not issue cancel request"), errdetail("Client error: %s", errorBuffer))); canceled = false; } PQfreeCancel(cancelObject); return canceled; } /* MultiClientResultStatus checks result status for an asynchronous query. */ ResultStatus MultiClientResultStatus(int32 connectionId) { MultiConnection *connection = NULL; int consumed = 0; ConnStatusType connStatusType = CONNECTION_OK; ResultStatus resultStatus = CLIENT_INVALID_RESULT_STATUS; Assert(connectionId != INVALID_CONNECTION_ID); connection = ClientConnectionArray[connectionId]; Assert(connection != NULL); connStatusType = PQstatus(connection->pgConn); if (connStatusType == CONNECTION_BAD) { ereport(WARNING, (errmsg("could not maintain connection to worker node"))); return CLIENT_RESULT_UNAVAILABLE; } /* consume input to allow status change */ consumed = PQconsumeInput(connection->pgConn); if (consumed != 0) { int connectionBusy = PQisBusy(connection->pgConn); if (connectionBusy == 0) { resultStatus = CLIENT_RESULT_READY; } else { resultStatus = CLIENT_RESULT_BUSY; } } else { ereport(WARNING, (errmsg("could not consume data from worker node"))); resultStatus = CLIENT_RESULT_UNAVAILABLE; } return resultStatus; } /* MultiClientQueryResult gets results for an asynchronous query. */ bool MultiClientQueryResult(int32 connectionId, void **queryResult, int *rowCount, int *columnCount) { MultiConnection *connection = NULL; PGresult *result = NULL; ConnStatusType connStatusType = CONNECTION_OK; ExecStatusType resultStatus = PGRES_COMMAND_OK; bool raiseInterrupts = true; Assert(connectionId != INVALID_CONNECTION_ID); connection = ClientConnectionArray[connectionId]; Assert(connection != NULL); connStatusType = PQstatus(connection->pgConn); if (connStatusType == CONNECTION_BAD) { ereport(WARNING, (errmsg("could not maintain connection to worker node"))); return false; } result = GetRemoteCommandResult(connection, raiseInterrupts); resultStatus = PQresultStatus(result); if (resultStatus == PGRES_TUPLES_OK) { (*queryResult) = (void **) result; (*rowCount) = PQntuples(result); (*columnCount) = PQnfields(result); } else { ReportResultError(connection, result, WARNING); PQclear(result); return false; } /* clear extra result objects */ ForgetResults(connection); return true; } /* * MultiClientBatchResult returns results for a "batch" of queries, meaning a * string containing multiple select statements separated by semicolons. This * function should be called multiple times to retrieve the results for all the * queries, until CLIENT_BATCH_QUERY_DONE is returned (even if a failure occurs). * If a query in the batch fails, the remaining queries will not be executed. On * success, queryResult, rowCount and columnCount will be set to the appropriate * values. After use, queryResult should be cleared using ClientClearResult. */ BatchQueryStatus MultiClientBatchResult(int32 connectionId, void **queryResult, int *rowCount, int *columnCount) { MultiConnection *connection = NULL; PGresult *result = NULL; ConnStatusType connStatusType = CONNECTION_OK; ExecStatusType resultStatus = PGRES_COMMAND_OK; BatchQueryStatus queryStatus = CLIENT_INVALID_BATCH_QUERY; bool raiseInterrupts = true; Assert(connectionId != INVALID_CONNECTION_ID); connection = ClientConnectionArray[connectionId]; Assert(connection != NULL); /* set default result */ (*queryResult) = NULL; (*rowCount) = -1; (*columnCount) = -1; connStatusType = PQstatus(connection->pgConn); if (connStatusType == CONNECTION_BAD) { ereport(WARNING, (errmsg("could not maintain connection to worker node"))); return CLIENT_BATCH_QUERY_FAILED; } result = GetRemoteCommandResult(connection, raiseInterrupts); if (result == NULL) { return CLIENT_BATCH_QUERY_DONE; } resultStatus = PQresultStatus(result); if (resultStatus == PGRES_TUPLES_OK) { (*queryResult) = (void **) result; (*rowCount) = PQntuples(result); (*columnCount) = PQnfields(result); queryStatus = CLIENT_BATCH_QUERY_CONTINUE; } else if (resultStatus == PGRES_COMMAND_OK) { (*queryResult) = (void **) result; queryStatus = CLIENT_BATCH_QUERY_CONTINUE; } else { ReportResultError(connection, result, WARNING); PQclear(result); queryStatus = CLIENT_BATCH_QUERY_FAILED; } return queryStatus; } /* MultiClientGetValue returns the value of field at the given position. */ char * MultiClientGetValue(void *queryResult, int rowIndex, int columnIndex) { char *value = PQgetvalue((PGresult *) queryResult, rowIndex, columnIndex); return value; } /* MultiClientValueIsNull returns whether the value at the given position is null. */ bool MultiClientValueIsNull(void *queryResult, int rowIndex, int columnIndex) { bool isNull = PQgetisnull((PGresult *) queryResult, rowIndex, columnIndex); return isNull; } /* MultiClientClearResult free's the memory associated with a PGresult. */ void MultiClientClearResult(void *queryResult) { PQclear((PGresult *) queryResult); } /* MultiClientQueryStatus returns the query status. */ QueryStatus MultiClientQueryStatus(int32 connectionId) { MultiConnection *connection = NULL; PGresult *result = NULL; int tupleCount PG_USED_FOR_ASSERTS_ONLY = 0; bool copyResults = false; ConnStatusType connStatusType = CONNECTION_OK; ExecStatusType resultStatus = PGRES_COMMAND_OK; QueryStatus queryStatus = CLIENT_INVALID_QUERY; bool raiseInterrupts = true; Assert(connectionId != INVALID_CONNECTION_ID); connection = ClientConnectionArray[connectionId]; Assert(connection != NULL); connStatusType = PQstatus(connection->pgConn); if (connStatusType == CONNECTION_BAD) { ereport(WARNING, (errmsg("could not maintain connection to worker node"))); return CLIENT_QUERY_FAILED; } /* * We now read the result object and check its status. If the result object * isn't ready yet (the caller didn't wait for the connection to be ready), * we will block on this call. */ result = GetRemoteCommandResult(connection, raiseInterrupts); resultStatus = PQresultStatus(result); if (resultStatus == PGRES_COMMAND_OK) { queryStatus = CLIENT_QUERY_DONE; } else if (resultStatus == PGRES_TUPLES_OK) { queryStatus = CLIENT_QUERY_DONE; /* * We use the client executor to only issue a select query that returns * a void value. We therefore should not have more than one value here. */ tupleCount = PQntuples(result); Assert(tupleCount <= 1); } else if (resultStatus == PGRES_COPY_OUT) { queryStatus = CLIENT_QUERY_COPY; copyResults = true; } else { queryStatus = CLIENT_QUERY_FAILED; if (resultStatus == PGRES_COPY_IN) { copyResults = true; } ReportResultError(connection, result, WARNING); } /* clear the result object */ PQclear(result); /* * When using the async query mechanism, we need to keep reading results * until we get null. The exception to this rule is the copy protocol. */ if (!copyResults) { ForgetResults(connection); } return queryStatus; } /* MultiClientCopyData copies data from the file. */ CopyStatus MultiClientCopyData(int32 connectionId, int32 fileDescriptor) { MultiConnection *connection = NULL; char *receiveBuffer = NULL; int consumed = 0; int receiveLength = 0; const int asynchronous = 1; CopyStatus copyStatus = CLIENT_INVALID_COPY; Assert(connectionId != INVALID_CONNECTION_ID); connection = ClientConnectionArray[connectionId]; Assert(connection != NULL); /* * Consume input to handle the case where previous copy operation might have * received zero bytes. */ consumed = PQconsumeInput(connection->pgConn); if (consumed == 0) { ereport(WARNING, (errmsg("could not read data from worker node"))); return CLIENT_COPY_FAILED; } /* receive copy data message in an asynchronous manner */ receiveLength = PQgetCopyData(connection->pgConn, &receiveBuffer, asynchronous); while (receiveLength > 0) { /* received copy data; append these data to file */ int appended = -1; errno = 0; appended = write(fileDescriptor, receiveBuffer, receiveLength); if (appended != receiveLength) { /* if write didn't set errno, assume problem is no disk space */ if (errno == 0) { errno = ENOSPC; } ereport(FATAL, (errcode_for_file_access(), errmsg("could not append to copied file: %m"))); } PQfreemem(receiveBuffer); receiveLength = PQgetCopyData(connection->pgConn, &receiveBuffer, asynchronous); } /* we now check the last received length returned by copy data */ if (receiveLength == 0) { /* we cannot read more data without blocking */ copyStatus = CLIENT_COPY_MORE; } else if (receiveLength == -1) { /* received copy done message */ bool raiseInterrupts = true; PGresult *result = GetRemoteCommandResult(connection, raiseInterrupts); ExecStatusType resultStatus = PQresultStatus(result); if (resultStatus == PGRES_COMMAND_OK) { copyStatus = CLIENT_COPY_DONE; } else { copyStatus = CLIENT_COPY_FAILED; ReportResultError(connection, result, WARNING); } PQclear(result); } else if (receiveLength == -2) { /* received an error */ copyStatus = CLIENT_COPY_FAILED; ReportConnectionError(connection, WARNING); } /* if copy out completed, make sure we drain all results from libpq */ if (receiveLength < 0) { ForgetResults(connection); } return copyStatus; } /* * MultiClientCreateWaitInfo creates a WaitInfo structure, capable of keeping * track of what maxConnections connections are waiting for; to allow * efficiently waiting for all of them at once. * * Connections can be added using MultiClientRegisterWait(). All added * connections can then be waited upon together using MultiClientWait(). */ WaitInfo * MultiClientCreateWaitInfo(int maxConnections) { WaitInfo *waitInfo = palloc(sizeof(WaitInfo)); waitInfo->maxWaiters = maxConnections; waitInfo->pollfds = palloc(maxConnections * sizeof(struct pollfd)); /* initialize remaining fields */ MultiClientResetWaitInfo(waitInfo); return waitInfo; } /* MultiClientResetWaitInfo clears all pending waits from a WaitInfo. */ void MultiClientResetWaitInfo(WaitInfo *waitInfo) { waitInfo->registeredWaiters = 0; waitInfo->haveReadyWaiter = false; waitInfo->haveFailedWaiter = false; } /* MultiClientFreeWaitInfo frees a resources associated with a waitInfo struct. */ void MultiClientFreeWaitInfo(WaitInfo *waitInfo) { pfree(waitInfo->pollfds); pfree(waitInfo); } /* * MultiClientRegisterWait adds a connection to be waited upon, waiting for * executionStatus. */ void MultiClientRegisterWait(WaitInfo *waitInfo, TaskExecutionStatus executionStatus, int32 connectionId) { MultiConnection *connection = NULL; struct pollfd *pollfd = NULL; Assert(waitInfo->registeredWaiters < waitInfo->maxWaiters); if (executionStatus == TASK_STATUS_READY) { waitInfo->haveReadyWaiter = true; return; } else if (executionStatus == TASK_STATUS_ERROR) { waitInfo->haveFailedWaiter = true; return; } connection = ClientConnectionArray[connectionId]; pollfd = &waitInfo->pollfds[waitInfo->registeredWaiters]; pollfd->fd = PQsocket(connection->pgConn); if (executionStatus == TASK_STATUS_SOCKET_READ) { pollfd->events = POLLERR | POLLIN; } else if (executionStatus == TASK_STATUS_SOCKET_WRITE) { pollfd->events = POLLERR | POLLOUT; } waitInfo->registeredWaiters++; } /* * MultiClientWait waits until at least one connection added with * MultiClientRegisterWait is ready to be processed again. */ void MultiClientWait(WaitInfo *waitInfo) { /* * If we had a failure, we always want to sleep for a bit, to prevent * flooding the other system, probably making the situation worse. */ if (waitInfo->haveFailedWaiter) { long sleepIntervalPerCycle = RemoteTaskCheckInterval * 1000L; pg_usleep(sleepIntervalPerCycle); return; } /* if there are tasks that already need attention again, don't wait */ if (waitInfo->haveReadyWaiter) { return; } while (true) { /* * Wait for activity on any of the sockets. Limit the maximum time * spent waiting in one wait cycle, as insurance against edge * cases. For efficiency we don't want wake up quite as often as * citus.remote_task_check_interval, so rather arbitrarily sleep ten * times as long. */ int rc = poll(waitInfo->pollfds, waitInfo->registeredWaiters, RemoteTaskCheckInterval * 10); if (rc < 0) { /* * Signals that arrive can interrupt our poll(). In that case just * check for interrupts, and try again. Every other error is * unexpected and treated as such. */ if (errno == EAGAIN || errno == EINTR) { CHECK_FOR_INTERRUPTS(); /* maximum wait starts at max again, but that's ok, it's just a stopgap */ continue; } else { ereport(ERROR, (errcode_for_file_access(), errmsg("poll failed: %m"))); } } else if (rc == 0) { ereport(DEBUG5, (errmsg("waiting for activity on tasks took longer than %ld ms", (long) RemoteTaskCheckInterval * 10))); } /* * At least one fd changed received a readiness notification, time to * process tasks again. */ return; } } /* * ClientConnectionReady checks if the given connection is ready for non-blocking * reads or writes. This function is loosely based on pqSocketCheck() at fe-misc.c * and libpq_select() at libpqwalreceiver.c. */ static bool ClientConnectionReady(MultiConnection *connection, PostgresPollingStatusType pollingStatus) { bool clientConnectionReady = false; int pollResult = 0; int fileDescriptorCount = 1; int immediateTimeout = 0; int pollEventMask = 0; struct pollfd pollFileDescriptor; if (pollingStatus == PGRES_POLLING_READING) { pollEventMask = POLLERR | POLLIN; } else if (pollingStatus == PGRES_POLLING_WRITING) { pollEventMask = POLLERR | POLLOUT; } pollFileDescriptor.fd = PQsocket(connection->pgConn); pollFileDescriptor.events = pollEventMask; pollFileDescriptor.revents = 0; pollResult = poll(&pollFileDescriptor, fileDescriptorCount, immediateTimeout); if (pollResult > 0) { clientConnectionReady = true; } else if (pollResult == 0) { clientConnectionReady = false; } else if (pollResult < 0) { if (errno == EINTR) { /* * If a signal was caught, we return false so the caller polls the * connection again. */ clientConnectionReady = false; } else { /* * poll() can set errno to EFAULT (when socket is not * contained in the calling program's address space), EBADF (invalid * file descriptor), EINVAL (invalid arguments to select or poll), * and ENOMEM (no space to allocate file descriptor tables). Out of * these, only ENOMEM is likely here, and it is a fatal error, so we * error out. */ Assert(errno == ENOMEM); ereport(ERROR, (errcode_for_socket_access(), errmsg("poll() failed: %m"))); } } return clientConnectionReady; } citus-7.0.3/src/backend/distributed/executor/multi_executor.c000066400000000000000000000345061317107136600244370ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_executor.c * * Entrypoint into distributed query execution. * * Copyright (c) 2012-2016, Citus Data, Inc. *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "access/xact.h" #include "catalog/dependency.h" #include "catalog/namespace.h" #include "distributed/insert_select_executor.h" #include "distributed/insert_select_planner.h" #include "distributed/multi_copy.h" #include "distributed/multi_executor.h" #include "distributed/multi_master_planner.h" #include "distributed/multi_planner.h" #include "distributed/multi_router_executor.h" #include "distributed/multi_router_planner.h" #include "distributed/multi_resowner.h" #include "distributed/multi_server_executor.h" #include "distributed/multi_utility.h" #include "distributed/resource_lock.h" #include "distributed/worker_protocol.h" #include "executor/execdebug.h" #include "commands/copy.h" #include "nodes/makefuncs.h" #include "parser/parsetree.h" #include "storage/lmgr.h" #include "tcop/utility.h" #include "utils/snapmgr.h" #include "utils/memutils.h" /* * Define executor methods for the different executor types. */ static CustomExecMethods RealTimeCustomExecMethods = { .CustomName = "RealTimeScan", .BeginCustomScan = CitusSelectBeginScan, .ExecCustomScan = RealTimeExecScan, .EndCustomScan = CitusEndScan, .ReScanCustomScan = CitusReScan, .ExplainCustomScan = CitusExplainScan }; static CustomExecMethods TaskTrackerCustomExecMethods = { .CustomName = "TaskTrackerScan", .BeginCustomScan = CitusSelectBeginScan, .ExecCustomScan = TaskTrackerExecScan, .EndCustomScan = CitusEndScan, .ReScanCustomScan = CitusReScan, .ExplainCustomScan = CitusExplainScan }; static CustomExecMethods RouterSequentialModifyCustomExecMethods = { .CustomName = "RouterSequentialModifyScan", .BeginCustomScan = CitusModifyBeginScan, .ExecCustomScan = RouterSequentialModifyExecScan, .EndCustomScan = CitusEndScan, .ReScanCustomScan = CitusReScan, .ExplainCustomScan = CitusExplainScan }; static CustomExecMethods RouterMultiModifyCustomExecMethods = { .CustomName = "RouterMultiModifyScan", .BeginCustomScan = CitusModifyBeginScan, .ExecCustomScan = RouterMultiModifyExecScan, .EndCustomScan = CitusEndScan, .ReScanCustomScan = CitusReScan, .ExplainCustomScan = CitusExplainScan }; static CustomExecMethods RouterSelectCustomExecMethods = { .CustomName = "RouterSelectScan", .BeginCustomScan = CitusSelectBeginScan, .ExecCustomScan = RouterSelectExecScan, .EndCustomScan = CitusEndScan, .ReScanCustomScan = CitusReScan, .ExplainCustomScan = CitusExplainScan }; static CustomExecMethods CoordinatorInsertSelectCustomExecMethods = { .CustomName = "CoordinatorInsertSelectScan", .BeginCustomScan = CitusSelectBeginScan, .ExecCustomScan = CoordinatorInsertSelectExecScan, .EndCustomScan = CitusEndScan, .ReScanCustomScan = CitusReScan, .ExplainCustomScan = CoordinatorInsertSelectExplainScan }; /* local function forward declarations */ static void PrepareMasterJobDirectory(Job *workerJob); static void LoadTuplesIntoTupleStore(CitusScanState *citusScanState, Job *workerJob); static Relation StubRelation(TupleDesc tupleDescriptor); /* * RealTimeCreateScan creates the scan state for real-time executor queries. */ Node * RealTimeCreateScan(CustomScan *scan) { CitusScanState *scanState = palloc0(sizeof(CitusScanState)); scanState->executorType = MULTI_EXECUTOR_REAL_TIME; scanState->customScanState.ss.ps.type = T_CustomScanState; scanState->multiPlan = GetMultiPlan(scan); scanState->customScanState.methods = &RealTimeCustomExecMethods; return (Node *) scanState; } /* * TaskTrackerCreateScan creates the scan state for task-tracker executor queries. */ Node * TaskTrackerCreateScan(CustomScan *scan) { CitusScanState *scanState = palloc0(sizeof(CitusScanState)); scanState->executorType = MULTI_EXECUTOR_TASK_TRACKER; scanState->customScanState.ss.ps.type = T_CustomScanState; scanState->multiPlan = GetMultiPlan(scan); scanState->customScanState.methods = &TaskTrackerCustomExecMethods; return (Node *) scanState; } /* * RouterCreateScan creates the scan state for router executor queries. */ Node * RouterCreateScan(CustomScan *scan) { CitusScanState *scanState = palloc0(sizeof(CitusScanState)); MultiPlan *multiPlan = NULL; Job *workerJob = NULL; List *taskList = NIL; bool isModificationQuery = false; scanState->executorType = MULTI_EXECUTOR_ROUTER; scanState->customScanState.ss.ps.type = T_CustomScanState; scanState->multiPlan = GetMultiPlan(scan); multiPlan = scanState->multiPlan; workerJob = multiPlan->workerJob; taskList = workerJob->taskList; isModificationQuery = IsModifyMultiPlan(multiPlan); /* check whether query has at most one shard */ if (list_length(taskList) <= 1) { if (isModificationQuery) { scanState->customScanState.methods = &RouterSequentialModifyCustomExecMethods; } else { scanState->customScanState.methods = &RouterSelectCustomExecMethods; } } else { Assert(isModificationQuery); if (IsMultiRowInsert(workerJob->jobQuery)) { /* * Multi-row INSERT is executed sequentially instead of using * parallel connections. */ scanState->customScanState.methods = &RouterSequentialModifyCustomExecMethods; } else { scanState->customScanState.methods = &RouterMultiModifyCustomExecMethods; } } return (Node *) scanState; } /* * CoordinatorInsertSelectCrateScan creates the scan state for executing * INSERT..SELECT into a distributed table via the coordinator. */ Node * CoordinatorInsertSelectCreateScan(CustomScan *scan) { CitusScanState *scanState = palloc0(sizeof(CitusScanState)); scanState->executorType = MULTI_EXECUTOR_COORDINATOR_INSERT_SELECT; scanState->customScanState.ss.ps.type = T_CustomScanState; scanState->multiPlan = GetMultiPlan(scan); scanState->customScanState.methods = &CoordinatorInsertSelectCustomExecMethods; return (Node *) scanState; } /* * DelayedErrorCreateScan is only called if we could not plan for the given * query. This is the case when a plan is not ready for execution because * CreateDistributedPlan() couldn't find a plan due to unresolved prepared * statement parameters, but didn't error out, because we expect custom plans * to come to our rescue. But sql (not plpgsql) functions unfortunately don't * go through a codepath supporting custom plans. Here, we error out with this * delayed error message. */ Node * DelayedErrorCreateScan(CustomScan *scan) { MultiPlan *multiPlan = GetMultiPlan(scan); /* raise the deferred error */ RaiseDeferredError(multiPlan->planningError, ERROR); return NULL; } /* * CitusSelectBeginScan is an empty function for BeginCustomScan callback. */ void CitusSelectBeginScan(CustomScanState *node, EState *estate, int eflags) { /* just an empty function */ } /* * RealTimeExecScan is a callback function which returns next tuple from a real-time * execution. In the first call, it executes distributed real-time plan and loads * results from temporary files into custom scan's tuple store. Then, it returns * tuples one by one from this tuple store. */ TupleTableSlot * RealTimeExecScan(CustomScanState *node) { CitusScanState *scanState = (CitusScanState *) node; TupleTableSlot *resultSlot = NULL; if (!scanState->finishedRemoteScan) { MultiPlan *multiPlan = scanState->multiPlan; Job *workerJob = multiPlan->workerJob; /* we are taking locks on partitions of partitioned tables */ LockPartitionsInRelationList(multiPlan->relationIdList, AccessShareLock); PrepareMasterJobDirectory(workerJob); MultiRealTimeExecute(workerJob); LoadTuplesIntoTupleStore(scanState, workerJob); scanState->finishedRemoteScan = true; } resultSlot = ReturnTupleFromTuplestore(scanState); return resultSlot; } /* * PrepareMasterJobDirectory creates a directory on the master node to keep job * execution results. We also register this directory for automatic cleanup on * portal delete. */ static void PrepareMasterJobDirectory(Job *workerJob) { StringInfo jobDirectoryName = MasterJobDirectoryName(workerJob->jobId); CreateDirectory(jobDirectoryName); ResourceOwnerEnlargeJobDirectories(CurrentResourceOwner); ResourceOwnerRememberJobDirectory(CurrentResourceOwner, workerJob->jobId); } /* * Load data collected by real-time or task-tracker executors into the tuplestore * of CitusScanState. For that, we first create a tuple store, and then copy the * files one-by-one into the tuple store. * * Note that in the long term it'd be a lot better if Multi*Execute() directly * filled the tuplestores, but that's a fair bit of work. */ static void LoadTuplesIntoTupleStore(CitusScanState *citusScanState, Job *workerJob) { CustomScanState customScanState = citusScanState->customScanState; List *workerTaskList = workerJob->taskList; List *copyOptions = NIL; EState *executorState = NULL; MemoryContext executorTupleContext = NULL; ExprContext *executorExpressionContext = NULL; TupleDesc tupleDescriptor = NULL; Relation stubRelation = NULL; ListCell *workerTaskCell = NULL; uint32 columnCount = 0; Datum *columnValues = NULL; bool *columnNulls = NULL; bool randomAccess = true; bool interTransactions = false; executorState = citusScanState->customScanState.ss.ps.state; executorTupleContext = GetPerTupleMemoryContext(executorState); executorExpressionContext = GetPerTupleExprContext(executorState); tupleDescriptor = customScanState.ss.ps.ps_ResultTupleSlot->tts_tupleDescriptor; stubRelation = StubRelation(tupleDescriptor); columnCount = tupleDescriptor->natts; columnValues = palloc0(columnCount * sizeof(Datum)); columnNulls = palloc0(columnCount * sizeof(bool)); Assert(citusScanState->tuplestorestate == NULL); citusScanState->tuplestorestate = tuplestore_begin_heap(randomAccess, interTransactions, work_mem); if (BinaryMasterCopyFormat) { DefElem *copyOption = NULL; #if (PG_VERSION_NUM >= 100000) int location = -1; /* "unknown" token location */ copyOption = makeDefElem("format", (Node *) makeString("binary"), location); #else copyOption = makeDefElem("format", (Node *) makeString("binary")); #endif copyOptions = lappend(copyOptions, copyOption); } foreach(workerTaskCell, workerTaskList) { Task *workerTask = (Task *) lfirst(workerTaskCell); StringInfo jobDirectoryName = NULL; StringInfo taskFilename = NULL; CopyState copyState = NULL; jobDirectoryName = MasterJobDirectoryName(workerTask->jobId); taskFilename = TaskFilename(jobDirectoryName, workerTask->taskId); #if (PG_VERSION_NUM >= 100000) copyState = BeginCopyFrom(NULL, stubRelation, taskFilename->data, false, NULL, NULL, copyOptions); #else copyState = BeginCopyFrom(stubRelation, taskFilename->data, false, NULL, copyOptions); #endif while (true) { MemoryContext oldContext = NULL; bool nextRowFound = false; ResetPerTupleExprContext(executorState); oldContext = MemoryContextSwitchTo(executorTupleContext); nextRowFound = NextCopyFrom(copyState, executorExpressionContext, columnValues, columnNulls, NULL); if (!nextRowFound) { MemoryContextSwitchTo(oldContext); break; } tuplestore_putvalues(citusScanState->tuplestorestate, tupleDescriptor, columnValues, columnNulls); MemoryContextSwitchTo(oldContext); } EndCopyFrom(copyState); } } /* * StubRelation creates a stub Relation from the given tuple descriptor. * To be able to use copy.c, we need a Relation descriptor. As there is no * relation corresponding to the data loaded from workers, we need to fake one. * We just need the bare minimal set of fields accessed by BeginCopyFrom(). */ static Relation StubRelation(TupleDesc tupleDescriptor) { Relation stubRelation = palloc0(sizeof(RelationData)); stubRelation->rd_att = tupleDescriptor; stubRelation->rd_rel = palloc0(sizeof(FormData_pg_class)); stubRelation->rd_rel->relkind = RELKIND_RELATION; return stubRelation; } /* * ReturnTupleFromTuplestore reads the next tuple from the tuple store of the * given Citus scan node and returns it. It returns null if all tuples are read * from the tuple store. */ TupleTableSlot * ReturnTupleFromTuplestore(CitusScanState *scanState) { Tuplestorestate *tupleStore = scanState->tuplestorestate; TupleTableSlot *resultSlot = NULL; ScanDirection scanDirection = NoMovementScanDirection; bool forwardScanDirection = true; if (tupleStore == NULL) { return NULL; } scanDirection = scanState->customScanState.ss.ps.state->es_direction; Assert(ScanDirectionIsValid(scanDirection)); if (ScanDirectionIsBackward(scanDirection)) { forwardScanDirection = false; } resultSlot = scanState->customScanState.ss.ps.ps_ResultTupleSlot; tuplestore_gettupleslot(tupleStore, forwardScanDirection, false, resultSlot); return resultSlot; } /* * TaskTrackerExecScan is a callback function which returns next tuple from a * task-tracker execution. In the first call, it executes distributed task-tracker * plan and loads results from temporary files into custom scan's tuple store. * Then, it returns tuples one by one from this tuple store. */ TupleTableSlot * TaskTrackerExecScan(CustomScanState *node) { CitusScanState *scanState = (CitusScanState *) node; TupleTableSlot *resultSlot = NULL; if (!scanState->finishedRemoteScan) { MultiPlan *multiPlan = scanState->multiPlan; Job *workerJob = multiPlan->workerJob; /* we are taking locks on partitions of partitioned tables */ LockPartitionsInRelationList(multiPlan->relationIdList, AccessShareLock); PrepareMasterJobDirectory(workerJob); MultiTaskTrackerExecute(workerJob); LoadTuplesIntoTupleStore(scanState, workerJob); scanState->finishedRemoteScan = true; } resultSlot = ReturnTupleFromTuplestore(scanState); return resultSlot; } /* * CitusEndScan is used to clean up tuple store of the given custom scan state. */ void CitusEndScan(CustomScanState *node) { CitusScanState *scanState = (CitusScanState *) node; if (scanState->tuplestorestate) { tuplestore_end(scanState->tuplestorestate); scanState->tuplestorestate = NULL; } } /* * CitusReScan is just a place holder for rescan callback. Currently, we don't * support rescan given that there is not any way to reach this code path. */ void CitusReScan(CustomScanState *node) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("rescan is unsupported"), errdetail("We don't expect this code path to be executed."))); } citus-7.0.3/src/backend/distributed/executor/multi_real_time_executor.c000066400000000000000000000650341317107136600264600ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_real_time_executor.c * * Routines for executing remote tasks as part of a distributed execution plan * in real-time. These routines open up a separate connection for each task they * need to execute, and therefore return their results faster. However, they can * only handle as many tasks as the number of file descriptors (connections) * available. They also can't handle execution primitives that need to write * their results to intermediate files. * * Copyright (c) 2013-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include #include #include #include "commands/dbcommands.h" #include "distributed/connection_management.h" #include "distributed/multi_client_executor.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_server_executor.h" #include "distributed/worker_protocol.h" #include "storage/fd.h" #include "utils/timestamp.h" /* Local functions forward declarations */ static ConnectAction ManageTaskExecution(Task *task, TaskExecution *taskExecution, TaskExecutionStatus *executionStatus); static bool TaskExecutionReadyToStart(TaskExecution *taskExecution); static bool TaskExecutionCompleted(TaskExecution *taskExecution); static void CancelTaskExecutionIfActive(TaskExecution *taskExecution); static void CancelRequestIfActive(TaskExecStatus taskStatus, int connectionId); /* Worker node state hash functions */ static HTAB * WorkerHash(const char *workerHashName, List *workerNodeList); static HTAB * WorkerHashCreate(const char *workerHashName, uint32 workerHashSize); static WorkerNodeState * WorkerHashEnter(HTAB *workerHash, char *nodeName, uint32 nodePort); static WorkerNodeState * WorkerHashLookup(HTAB *workerHash, const char *nodeName, uint32 nodePort); static WorkerNodeState * LookupWorkerForTask(HTAB *workerHash, Task *task, TaskExecution *taskExecution); /* Throttling functions */ static bool WorkerConnectionsExhausted(WorkerNodeState *workerNodeState); static bool MasterConnectionsExhausted(HTAB *workerHash); static uint32 TotalOpenConnectionCount(HTAB *workerHash); static void UpdateConnectionCounter(WorkerNodeState *workerNode, ConnectAction connectAction); /* * MultiRealTimeExecute loops over the given tasks, and manages their execution * until either one task permanently fails or all tasks successfully complete. * The function opens up a connection for each task it needs to execute, and * manages these tasks' execution in real-time. */ void MultiRealTimeExecute(Job *job) { List *taskList = job->taskList; List *taskExecutionList = NIL; ListCell *taskExecutionCell = NULL; ListCell *taskCell = NULL; uint32 failedTaskId = 0; bool allTasksCompleted = false; bool taskCompleted = false; bool taskFailed = false; List *workerNodeList = NIL; HTAB *workerHash = NULL; const char *workerHashName = "Worker node hash"; WaitInfo *waitInfo = MultiClientCreateWaitInfo(list_length(taskList)); workerNodeList = ActiveReadableNodeList(); workerHash = WorkerHash(workerHashName, workerNodeList); /* initialize task execution structures for remote execution */ foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); TaskExecution *taskExecution = InitTaskExecution(task, EXEC_TASK_CONNECT_START); taskExecutionList = lappend(taskExecutionList, taskExecution); } /* loop around until all tasks complete, one task fails, or user cancels */ while (!(allTasksCompleted || taskFailed || QueryCancelPending)) { uint32 taskCount = list_length(taskList); uint32 completedTaskCount = 0; /* loop around all tasks and manage them */ ListCell *taskCell = NULL; ListCell *taskExecutionCell = NULL; MultiClientResetWaitInfo(waitInfo); forboth(taskCell, taskList, taskExecutionCell, taskExecutionList) { Task *task = (Task *) lfirst(taskCell); TaskExecution *taskExecution = (TaskExecution *) lfirst(taskExecutionCell); ConnectAction connectAction = CONNECT_ACTION_NONE; WorkerNodeState *workerNodeState = NULL; TaskExecutionStatus executionStatus; workerNodeState = LookupWorkerForTask(workerHash, task, taskExecution); /* in case the task is about to start, throttle if necessary */ if (TaskExecutionReadyToStart(taskExecution) && (WorkerConnectionsExhausted(workerNodeState) || MasterConnectionsExhausted(workerHash))) { continue; } /* call the function that performs the core task execution logic */ connectAction = ManageTaskExecution(task, taskExecution, &executionStatus); /* update the connection counter for throttling */ UpdateConnectionCounter(workerNodeState, connectAction); /* * If this task failed, we need to iterate over task executions, and * manually clean out their client-side resources. Hence, we record * the failure here instead of immediately erroring out. */ taskFailed = TaskExecutionFailed(taskExecution); if (taskFailed) { failedTaskId = taskExecution->taskId; break; } taskCompleted = TaskExecutionCompleted(taskExecution); if (taskCompleted) { completedTaskCount++; } else { uint32 currentIndex = taskExecution->currentNodeIndex; int32 *connectionIdArray = taskExecution->connectionIdArray; int32 connectionId = connectionIdArray[currentIndex]; /* * If not done with the task yet, make note of what this task * and its associated connection is waiting for. */ MultiClientRegisterWait(waitInfo, executionStatus, connectionId); } } /* * Check if all tasks completed; otherwise wait as appropriate to * avoid a tight loop. That means we immediately continue if tasks are * ready to be processed further, and block when we're waiting for * network IO. */ if (completedTaskCount == taskCount) { allTasksCompleted = true; } else { MultiClientWait(waitInfo); } } MultiClientFreeWaitInfo(waitInfo); /* * We prevent cancel/die interrupts until we clean up connections to worker * nodes. Note that for the above while loop, if the user Ctrl+C's a query * and we emit a warning before looping to the beginning of the while loop, * we will get canceled away before we can hold any interrupts. */ HOLD_INTERRUPTS(); /* cancel any active task executions */ taskExecutionCell = NULL; foreach(taskExecutionCell, taskExecutionList) { TaskExecution *taskExecution = (TaskExecution *) lfirst(taskExecutionCell); CancelTaskExecutionIfActive(taskExecution); } /* * If cancel might have been sent, give remote backends some time to flush * their responses. This avoids some broken pipe logs on the backend-side. * * FIXME: This shouldn't be dependant on RemoteTaskCheckInterval; they're * unrelated type of delays. */ if (taskFailed || QueryCancelPending) { long sleepInterval = RemoteTaskCheckInterval * 1000L; pg_usleep(sleepInterval); } /* close connections and open files */ taskExecutionCell = NULL; foreach(taskExecutionCell, taskExecutionList) { TaskExecution *taskExecution = (TaskExecution *) lfirst(taskExecutionCell); CleanupTaskExecution(taskExecution); } RESUME_INTERRUPTS(); /* * If we previously broke out of the execution loop due to a task failure or * user cancellation request, we can now safely emit an error message (all * client-side resources have been cleared). */ if (taskFailed) { ereport(ERROR, (errmsg("failed to execute task %u", failedTaskId))); } else if (QueryCancelPending) { CHECK_FOR_INTERRUPTS(); } } /* * ManageTaskExecution manages all execution logic for the given task. For this, * the function starts a new "execution" on a node, and tracks this execution's * progress. On failure, the function restarts this execution on another node. * Note that this function directly manages a task's execution by opening up a * separate connection to the worker node for each execution. The function * returns a ConnectAction enum indicating whether a connection has been opened * or closed in this call. Via the executionStatus parameter this function returns * what a Task is blocked on. */ static ConnectAction ManageTaskExecution(Task *task, TaskExecution *taskExecution, TaskExecutionStatus *executionStatus) { TaskExecStatus *taskStatusArray = taskExecution->taskStatusArray; int32 *connectionIdArray = taskExecution->connectionIdArray; int32 *fileDescriptorArray = taskExecution->fileDescriptorArray; uint32 currentIndex = taskExecution->currentNodeIndex; TaskExecStatus currentStatus = taskStatusArray[currentIndex]; List *taskPlacementList = task->taskPlacementList; ShardPlacement *taskPlacement = list_nth(taskPlacementList, currentIndex); char *nodeName = taskPlacement->nodeName; uint32 nodePort = taskPlacement->nodePort; ConnectAction connectAction = CONNECT_ACTION_NONE; /* as most state transitions don't require blocking, default to not waiting */ *executionStatus = TASK_STATUS_READY; switch (currentStatus) { case EXEC_TASK_CONNECT_START: { int32 connectionId = INVALID_CONNECTION_ID; char *nodeDatabase = NULL; /* we use the same database name on the master and worker nodes */ nodeDatabase = get_database_name(MyDatabaseId); connectionId = MultiClientConnectStart(nodeName, nodePort, nodeDatabase, NULL); connectionIdArray[currentIndex] = connectionId; /* if valid, poll the connection until the connection is initiated */ if (connectionId != INVALID_CONNECTION_ID) { taskStatusArray[currentIndex] = EXEC_TASK_CONNECT_POLL; taskExecution->connectStartTime = GetCurrentTimestamp(); connectAction = CONNECT_ACTION_OPENED; } else { *executionStatus = TASK_STATUS_ERROR; AdjustStateForFailure(taskExecution); break; } break; } case EXEC_TASK_CONNECT_POLL: { int32 connectionId = connectionIdArray[currentIndex]; ConnectStatus pollStatus = MultiClientConnectPoll(connectionId); /* * If the connection is established, we reset the data fetch counter and * change our status to data fetching. */ if (pollStatus == CLIENT_CONNECTION_READY) { taskExecution->dataFetchTaskIndex = -1; taskStatusArray[currentIndex] = EXEC_FETCH_TASK_LOOP; } else if (pollStatus == CLIENT_CONNECTION_BUSY) { /* immediately retry */ taskStatusArray[currentIndex] = EXEC_TASK_CONNECT_POLL; } else if (pollStatus == CLIENT_CONNECTION_BUSY_READ) { *executionStatus = TASK_STATUS_SOCKET_READ; taskStatusArray[currentIndex] = EXEC_TASK_CONNECT_POLL; } else if (pollStatus == CLIENT_CONNECTION_BUSY_WRITE) { *executionStatus = TASK_STATUS_SOCKET_WRITE; taskStatusArray[currentIndex] = EXEC_TASK_CONNECT_POLL; } else if (pollStatus == CLIENT_CONNECTION_BAD) { taskStatusArray[currentIndex] = EXEC_TASK_FAILED; } /* now check if we have been trying to connect for too long */ if (pollStatus == CLIENT_CONNECTION_BUSY_READ || pollStatus == CLIENT_CONNECTION_BUSY_WRITE) { if (TimestampDifferenceExceeds(taskExecution->connectStartTime, GetCurrentTimestamp(), NodeConnectionTimeout)) { ereport(WARNING, (errmsg("could not establish asynchronous " "connection after %u ms", NodeConnectionTimeout))); taskStatusArray[currentIndex] = EXEC_TASK_FAILED; } } break; } case EXEC_TASK_FAILED: { /* * On task failure, we close the connection. We also reset our execution * status assuming that we might fail on all other worker nodes and come * back to this failed node. In that case, we will retry the same fetch * and compute task(s) on this node again. */ int32 connectionId = connectionIdArray[currentIndex]; MultiClientDisconnect(connectionId); connectionIdArray[currentIndex] = INVALID_CONNECTION_ID; connectAction = CONNECT_ACTION_CLOSED; taskStatusArray[currentIndex] = EXEC_TASK_CONNECT_START; /* try next worker node */ AdjustStateForFailure(taskExecution); /* * Add a delay, to avoid potentially excerbating problems by * looping quickly */ *executionStatus = TASK_STATUS_ERROR; break; } case EXEC_FETCH_TASK_LOOP: { List *dataFetchTaskList = task->dependedTaskList; int32 dataFetchTaskCount = list_length(dataFetchTaskList); /* move to the next data fetch task */ taskExecution->dataFetchTaskIndex++; if (taskExecution->dataFetchTaskIndex < dataFetchTaskCount) { taskStatusArray[currentIndex] = EXEC_FETCH_TASK_START; } else { taskStatusArray[currentIndex] = EXEC_COMPUTE_TASK_START; } break; } case EXEC_FETCH_TASK_START: { List *dataFetchTaskList = task->dependedTaskList; int32 dataFetchTaskIndex = taskExecution->dataFetchTaskIndex; Task *dataFetchTask = (Task *) list_nth(dataFetchTaskList, dataFetchTaskIndex); char *dataFetchQuery = dataFetchTask->queryString; int32 connectionId = connectionIdArray[currentIndex]; bool querySent = MultiClientSendQuery(connectionId, dataFetchQuery); if (querySent) { taskStatusArray[currentIndex] = EXEC_FETCH_TASK_RUNNING; } else { taskStatusArray[currentIndex] = EXEC_TASK_FAILED; } break; } case EXEC_FETCH_TASK_RUNNING: { int32 connectionId = connectionIdArray[currentIndex]; ResultStatus resultStatus = MultiClientResultStatus(connectionId); QueryStatus queryStatus = CLIENT_INVALID_QUERY; /* check if query results are in progress or unavailable */ if (resultStatus == CLIENT_RESULT_BUSY) { *executionStatus = TASK_STATUS_SOCKET_READ; taskStatusArray[currentIndex] = EXEC_FETCH_TASK_RUNNING; break; } else if (resultStatus == CLIENT_RESULT_UNAVAILABLE) { taskStatusArray[currentIndex] = EXEC_TASK_FAILED; break; } Assert(resultStatus == CLIENT_RESULT_READY); /* * If the query executed successfully, loop onto the next data fetch * task. Else if the query failed, try data fetching on another node. */ queryStatus = MultiClientQueryStatus(connectionId); if (queryStatus == CLIENT_QUERY_DONE) { taskStatusArray[currentIndex] = EXEC_FETCH_TASK_LOOP; } else if (queryStatus == CLIENT_QUERY_FAILED) { taskStatusArray[currentIndex] = EXEC_TASK_FAILED; } else { ereport(FATAL, (errmsg("invalid query status: %d", queryStatus))); } break; } case EXEC_COMPUTE_TASK_START: { int32 connectionId = connectionIdArray[currentIndex]; bool querySent = false; /* construct new query to copy query results to stdout */ char *queryString = task->queryString; StringInfo computeTaskQuery = makeStringInfo(); if (BinaryMasterCopyFormat) { appendStringInfo(computeTaskQuery, COPY_QUERY_TO_STDOUT_BINARY, queryString); } else { appendStringInfo(computeTaskQuery, COPY_QUERY_TO_STDOUT_TEXT, queryString); } querySent = MultiClientSendQuery(connectionId, computeTaskQuery->data); if (querySent) { taskStatusArray[currentIndex] = EXEC_COMPUTE_TASK_RUNNING; } else { taskStatusArray[currentIndex] = EXEC_TASK_FAILED; } break; } case EXEC_COMPUTE_TASK_RUNNING: { int32 connectionId = connectionIdArray[currentIndex]; ResultStatus resultStatus = MultiClientResultStatus(connectionId); QueryStatus queryStatus = CLIENT_INVALID_QUERY; /* check if query results are in progress or unavailable */ if (resultStatus == CLIENT_RESULT_BUSY) { taskStatusArray[currentIndex] = EXEC_COMPUTE_TASK_RUNNING; *executionStatus = TASK_STATUS_SOCKET_READ; break; } else if (resultStatus == CLIENT_RESULT_UNAVAILABLE) { taskStatusArray[currentIndex] = EXEC_TASK_FAILED; break; } Assert(resultStatus == CLIENT_RESULT_READY); /* check if our request to copy query results has been acknowledged */ queryStatus = MultiClientQueryStatus(connectionId); if (queryStatus == CLIENT_QUERY_COPY) { StringInfo jobDirectoryName = MasterJobDirectoryName(task->jobId); StringInfo taskFilename = TaskFilename(jobDirectoryName, task->taskId); char *filename = taskFilename->data; int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); int fileMode = (S_IRUSR | S_IWUSR); int32 fileDescriptor = BasicOpenFile(filename, fileFlags, fileMode); if (fileDescriptor >= 0) { /* * All files inside the job directory get automatically cleaned * up on transaction commit or abort. */ fileDescriptorArray[currentIndex] = fileDescriptor; taskStatusArray[currentIndex] = EXEC_COMPUTE_TASK_COPYING; } else { ereport(WARNING, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", filename))); taskStatusArray[currentIndex] = EXEC_TASK_FAILED; } } else if (queryStatus == CLIENT_QUERY_FAILED) { taskStatusArray[currentIndex] = EXEC_TASK_FAILED; } else { ereport(FATAL, (errmsg("invalid query status: %d", queryStatus))); } break; } case EXEC_COMPUTE_TASK_COPYING: { int32 connectionId = connectionIdArray[currentIndex]; int32 fileDesc = fileDescriptorArray[currentIndex]; int closed = -1; /* copy data from worker node, and write to local file */ CopyStatus copyStatus = MultiClientCopyData(connectionId, fileDesc); /* if worker node will continue to send more data, keep reading */ if (copyStatus == CLIENT_COPY_MORE) { taskStatusArray[currentIndex] = EXEC_COMPUTE_TASK_COPYING; *executionStatus = TASK_STATUS_SOCKET_READ; } else if (copyStatus == CLIENT_COPY_DONE) { closed = close(fileDesc); fileDescriptorArray[currentIndex] = -1; if (closed >= 0) { taskStatusArray[currentIndex] = EXEC_TASK_DONE; /* we are done executing; we no longer need the connection */ MultiClientDisconnect(connectionId); connectionIdArray[currentIndex] = INVALID_CONNECTION_ID; connectAction = CONNECT_ACTION_CLOSED; } else { ereport(WARNING, (errcode_for_file_access(), errmsg("could not close copied file: %m"))); taskStatusArray[currentIndex] = EXEC_TASK_FAILED; } } else if (copyStatus == CLIENT_COPY_FAILED) { taskStatusArray[currentIndex] = EXEC_TASK_FAILED; closed = close(fileDesc); fileDescriptorArray[currentIndex] = -1; if (closed < 0) { ereport(WARNING, (errcode_for_file_access(), errmsg("could not close copy file: %m"))); } } break; } case EXEC_TASK_DONE: { /* we are done with this task's execution */ break; } default: { /* we fatal here to avoid leaking client-side resources */ ereport(FATAL, (errmsg("invalid execution status: %d", currentStatus))); break; } } return connectAction; } /* Determines if the given task is ready to start. */ static bool TaskExecutionReadyToStart(TaskExecution *taskExecution) { bool readyToStart = false; TaskExecStatus *taskStatusArray = taskExecution->taskStatusArray; uint32 currentIndex = taskExecution->currentNodeIndex; TaskExecStatus taskStatus = taskStatusArray[currentIndex]; if (taskStatus == EXEC_TASK_CONNECT_START) { readyToStart = true; } return readyToStart; } /* Determines if the given task successfully completed executing. */ static bool TaskExecutionCompleted(TaskExecution *taskExecution) { bool completed = false; uint32 nodeIndex = 0; for (nodeIndex = 0; nodeIndex < taskExecution->nodeCount; nodeIndex++) { TaskExecStatus taskStatus = taskExecution->taskStatusArray[nodeIndex]; if (taskStatus == EXEC_TASK_DONE) { completed = true; break; } } return completed; } /* Iterates over all open connections, and cancels any active requests. */ static void CancelTaskExecutionIfActive(TaskExecution *taskExecution) { uint32 nodeIndex = 0; for (nodeIndex = 0; nodeIndex < taskExecution->nodeCount; nodeIndex++) { int32 connectionId = taskExecution->connectionIdArray[nodeIndex]; if (connectionId != INVALID_CONNECTION_ID) { TaskExecStatus *taskStatusArray = taskExecution->taskStatusArray; TaskExecStatus taskStatus = taskStatusArray[nodeIndex]; CancelRequestIfActive(taskStatus, connectionId); } } } /* Helper function to cancel an ongoing request, if any. */ static void CancelRequestIfActive(TaskExecStatus taskStatus, int connectionId) { /* * We use the task status to determine if we have an active request being * processed by the worker node. If we do, we send a cancellation request. * Note that we don't cancel data fetch tasks, and allow them to complete. */ if (taskStatus == EXEC_COMPUTE_TASK_RUNNING) { ResultStatus resultStatus = MultiClientResultStatus(connectionId); if (resultStatus == CLIENT_RESULT_BUSY) { MultiClientCancel(connectionId); } } else if (taskStatus == EXEC_COMPUTE_TASK_COPYING) { MultiClientCancel(connectionId); } } /* * WorkerHash creates a worker node hash with the given name. The function * then inserts one entry for each worker node in the given worker node * list. */ static HTAB * WorkerHash(const char *workerHashName, List *workerNodeList) { uint32 workerHashSize = list_length(workerNodeList); HTAB *workerHash = WorkerHashCreate(workerHashName, workerHashSize); ListCell *workerNodeCell = NULL; foreach(workerNodeCell, workerNodeList) { WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); char *nodeName = workerNode->workerName; uint32 nodePort = workerNode->workerPort; WorkerHashEnter(workerHash, nodeName, nodePort); } return workerHash; } /* * WorkerHashCreate allocates memory for a worker node hash, initializes an * empty hash, and returns this hash. */ static HTAB * WorkerHashCreate(const char *workerHashName, uint32 workerHashSize) { HASHCTL info; int hashFlags = 0; HTAB *workerHash = NULL; memset(&info, 0, sizeof(info)); info.keysize = WORKER_LENGTH + sizeof(uint32); info.entrysize = sizeof(WorkerNodeState); info.hash = tag_hash; info.hcxt = CurrentMemoryContext; hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); workerHash = hash_create(workerHashName, workerHashSize, &info, hashFlags); if (workerHash == NULL) { ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("could not initialize worker node hash"))); } return workerHash; } /* * WorkerHashEnter creates a new worker node entry in the given worker node * hash, and checks that the worker node entry has been properly created. */ static WorkerNodeState * WorkerHashEnter(HTAB *workerHash, char *nodeName, uint32 nodePort) { bool handleFound = false; WorkerNodeState *workerNodeState = NULL; WorkerNodeState workerNodeKey; memset(&workerNodeKey, 0, sizeof(WorkerNodeState)); strlcpy(workerNodeKey.workerName, nodeName, WORKER_LENGTH); workerNodeKey.workerPort = nodePort; workerNodeState = (WorkerNodeState *) hash_search(workerHash, (void *) &workerNodeKey, HASH_ENTER, &handleFound); if (handleFound) { ereport(WARNING, (errmsg("multiple worker node state entries for node: \"%s:%u\"", nodeName, nodePort))); } memcpy(workerNodeState, &workerNodeKey, sizeof(WorkerNodeState)); workerNodeState->openConnectionCount = 0; return workerNodeState; } /* * WorkerHashLookup looks for the worker node state that corresponds to the given * node name and port number, and returns the found worker node state if any. */ static WorkerNodeState * WorkerHashLookup(HTAB *workerHash, const char *nodeName, uint32 nodePort) { bool handleFound = false; WorkerNodeState *workerNodeState = NULL; WorkerNodeState workerNodeKey; memset(&workerNodeKey, 0, sizeof(WorkerNodeState)); strlcpy(workerNodeKey.workerName, nodeName, WORKER_LENGTH); workerNodeKey.workerPort = nodePort; workerNodeState = (WorkerNodeState *) hash_search(workerHash, (void *) &workerNodeKey, HASH_FIND, &handleFound); if (workerNodeState == NULL) { ereport(ERROR, (errmsg("could not find worker node state for node \"%s:%u\"", nodeName, nodePort))); } return workerNodeState; } /* * LookupWorkerForTask looks for the worker node state of the current worker * node of a task execution. */ static WorkerNodeState * LookupWorkerForTask(HTAB *workerHash, Task *task, TaskExecution *taskExecution) { uint32 currentIndex = taskExecution->currentNodeIndex; List *taskPlacementList = task->taskPlacementList; ShardPlacement *taskPlacement = list_nth(taskPlacementList, currentIndex); char *nodeName = taskPlacement->nodeName; uint32 nodePort = taskPlacement->nodePort; WorkerNodeState *workerNodeState = WorkerHashLookup(workerHash, nodeName, nodePort); return workerNodeState; } /* * WorkerConnectionsExhausted determines if the current query has exhausted the * maximum number of open connections that can be made to a worker. */ static bool WorkerConnectionsExhausted(WorkerNodeState *workerNodeState) { bool reachedLimit = false; /* * A worker cannot accept more than max_connections connections. If we have a * small number of workers with many shards, then a single query could exhaust * max_connections unless we throttle here. We use the value of max_connections * on the master as a proxy for the worker configuration to avoid introducing a * new configuration value. */ if (workerNodeState->openConnectionCount >= MaxConnections) { reachedLimit = true; } return reachedLimit; } /* * MasterConnectionsExhausted determines if the current query has exhausted * the maximum number of connections the master process can make. */ static bool MasterConnectionsExhausted(HTAB *workerHash) { bool reachedLimit = false; uint32 maxConnectionCount = MaxMasterConnectionCount(); uint32 totalConnectionCount = TotalOpenConnectionCount(workerHash); if (totalConnectionCount >= maxConnectionCount) { reachedLimit = true; } return reachedLimit; } /* * TotalOpenConnectionCount counts the total number of open connections across all the * workers. */ static uint32 TotalOpenConnectionCount(HTAB *workerHash) { uint32 connectionCount = 0; WorkerNodeState *workerNodeState = NULL; HASH_SEQ_STATUS status; hash_seq_init(&status, workerHash); workerNodeState = (WorkerNodeState *) hash_seq_search(&status); while (workerNodeState != NULL) { connectionCount += workerNodeState->openConnectionCount; workerNodeState = (WorkerNodeState *) hash_seq_search(&status); } return connectionCount; } /* * UpdateConnectionCounter updates the connection counter for a given worker * node based on the specified connect action. */ static void UpdateConnectionCounter(WorkerNodeState *workerNode, ConnectAction connectAction) { if (connectAction == CONNECT_ACTION_OPENED) { workerNode->openConnectionCount++; } else if (connectAction == CONNECT_ACTION_CLOSED) { workerNode->openConnectionCount--; } } citus-7.0.3/src/backend/distributed/executor/multi_router_executor.c000066400000000000000000001324551317107136600260410ustar00rootroot00000000000000/* * multi_router_executor.c * * Routines for executing remote tasks as part of a distributed execution plan * with synchronous connections. The routines utilize the connection cache. * Therefore, only a single connection is opened for each worker. Also, router * executor does not require a master table and a master query. In other words, * the results that are fetched from a single worker is sent to the output console * directly. Lastly, router executor can only execute a single task. * * Copyright (c) 2012-2016, Citus Data, Inc. */ #include "postgres.h" /* IWYU pragma: keep */ #include "c.h" #include "fmgr.h" /* IWYU pragma: keep */ #include "funcapi.h" #include "libpq-fe.h" #include "miscadmin.h" #include #include "access/htup.h" #include "access/sdir.h" #include "access/transam.h" #include "access/tupdesc.h" #include "access/xact.h" #include "catalog/pg_type.h" #include "distributed/citus_clauses.h" #include "distributed/citus_ruleutils.h" #include "distributed/connection_management.h" #include "distributed/deparse_shard_query.h" #include "distributed/listutils.h" #include "distributed/master_metadata_utility.h" #include "distributed/metadata_cache.h" #include "distributed/multi_executor.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_planner.h" #include "distributed/multi_router_executor.h" #include "distributed/multi_router_planner.h" #include "distributed/multi_shard_transaction.h" #include "distributed/placement_connection.h" #include "distributed/relay_utility.h" #include "distributed/remote_commands.h" #include "distributed/remote_transaction.h" #include "distributed/resource_lock.h" #include "executor/execdesc.h" #include "executor/executor.h" #include "executor/instrument.h" #include "executor/tuptable.h" #include "lib/stringinfo.h" #include "nodes/execnodes.h" #include "nodes/nodes.h" #include "nodes/params.h" #include "nodes/parsenodes.h" #include "nodes/pg_list.h" #include "nodes/plannodes.h" #include "storage/ipc.h" #include "storage/lock.h" #include "tcop/dest.h" #include "utils/elog.h" #include "utils/errcodes.h" #include "utils/hsearch.h" #include "utils/int8.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/palloc.h" #include "utils/tuplestore.h" /* controls use of locks to enforce safe commutativity */ bool AllModificationsCommutative = false; /* we've deprecated this flag, keeping here for some time not to break existing users */ bool EnableDeadlockPrevention = true; /* functions needed during run phase */ static void AcquireMetadataLocks(List *taskList); static ShardPlacementAccess * CreatePlacementAccess(ShardPlacement *placement, ShardPlacementAccessType accessType); static void ExecuteSingleModifyTask(CitusScanState *scanState, Task *task, bool multipleTasks, bool expectResults); static void ExecuteSingleSelectTask(CitusScanState *scanState, Task *task); static List * GetModifyConnections(Task *task, bool markCritical); static void ExecuteMultipleTasks(CitusScanState *scanState, List *taskList, bool isModificationQuery, bool expectResults); static int64 ExecuteModifyTasks(List *taskList, bool expectResults, ParamListInfo paramListInfo, CitusScanState *scanState); static void AcquireExecutorShardLock(Task *task, CmdType commandType); static void AcquireExecutorMultiShardLocks(List *taskList); static bool RequiresConsistentSnapshot(Task *task); static void ExtractParametersFromParamListInfo(ParamListInfo paramListInfo, Oid **parameterTypes, const char ***parameterValues); static bool SendQueryInSingleRowMode(MultiConnection *connection, char *query, ParamListInfo paramListInfo); static bool StoreQueryResult(CitusScanState *scanState, MultiConnection *connection, bool failOnError, int64 *rows); static bool ConsumeQueryResult(MultiConnection *connection, bool failOnError, int64 *rows); static LOCKMODE LockModeForModifyTask(Task *task); /* * AcquireMetadataLocks acquires metadata locks on each of the anchor * shards in the task list to prevent a shard being modified while it * is being copied. */ static void AcquireMetadataLocks(List *taskList) { ListCell *taskCell = NULL; /* * Note: to avoid the overhead of additional sorting, we assume tasks * to be already sorted by shard ID such that deadlocks are avoided. * This is true for INSERT/SELECT, which is the only multi-shard * command right now. */ foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); LockShardDistributionMetadata(task->anchorShardId, ShareLock); } } /* * AcquireExecutorShardLock acquires a lock on the shard for the given task and * command type if necessary to avoid divergence between multiple replicas of * the same shard. No lock is obtained when there is only one replica. * * The function determines the appropriate lock mode based on the commutativity * rule of the command. In each case, it uses a lock mode that enforces the * commutativity rule. * * The mapping is overridden when all_modifications_commutative is set to true. * In that case, all modifications are treated as commutative, which can be used * to communicate that the application is only generating commutative * UPDATE/DELETE/UPSERT commands and exclusive locks are unnecessary. */ static void AcquireExecutorShardLock(Task *task, CmdType commandType) { LOCKMODE lockMode = NoLock; int64 shardId = task->anchorShardId; if (commandType == CMD_SELECT || list_length(task->taskPlacementList) == 1) { /* * The executor shard lock is used to maintain consistency between * replicas and therefore no lock is required for read-only queries * or in general when there is only one replica. */ lockMode = NoLock; } else if (AllModificationsCommutative) { /* * Bypass commutativity checks when citus.all_modifications_commutative * is enabled. * * A RowExclusiveLock does not conflict with itself and therefore allows * multiple commutative commands to proceed concurrently. It does * conflict with ExclusiveLock, which may still be obtained by another * session that executes an UPDATE/DELETE/UPSERT command with * citus.all_modifications_commutative disabled. */ lockMode = RowExclusiveLock; } else if (task->upsertQuery || commandType == CMD_UPDATE || commandType == CMD_DELETE) { /* * UPDATE/DELETE/UPSERT commands do not commute with other modifications * since the rows modified by one command may be affected by the outcome * of another command. * * We need to handle upsert before INSERT, because PostgreSQL models * upsert commands as INSERT with an ON CONFLICT section. * * ExclusiveLock conflicts with all lock types used by modifications * and therefore prevents other modifications from running * concurrently. */ lockMode = ExclusiveLock; } else if (commandType == CMD_INSERT) { /* * An INSERT commutes with other INSERT commands, since performing them * out-of-order only affects the table order on disk, but not the * contents. * * When a unique constraint exists, INSERTs are not strictly commutative, * but whichever INSERT comes last will error out and thus has no effect. * INSERT is not commutative with UPDATE/DELETE/UPSERT, since the * UPDATE/DELETE/UPSERT may consider the INSERT, depending on execution * order. * * A RowExclusiveLock does not conflict with itself and therefore allows * multiple INSERT commands to proceed concurrently. It conflicts with * ExclusiveLock obtained by UPDATE/DELETE/UPSERT, ensuring those do * not run concurrently with INSERT. */ lockMode = RowExclusiveLock; } else { ereport(ERROR, (errmsg("unrecognized operation code: %d", (int) commandType))); } if (shardId != INVALID_SHARD_ID && lockMode != NoLock) { LockShardResource(shardId, lockMode); } /* * If the task has a subselect, then we may need to lock the shards from which * the query selects as well to prevent the subselects from seeing different * results on different replicas. In particular this prevents INSERT.. SELECT * commands from having a different effect on different placements. */ if (RequiresConsistentSnapshot(task)) { /* * ExclusiveLock conflicts with all lock types used by modifications * and therefore prevents other modifications from running * concurrently. */ LockRelationShardResources(task->relationShardList, ExclusiveLock); } } /* * AcquireExecutorMultiShardLocks acquires shard locks needed for execution * of writes on multiple shards. In addition to honouring commutativity * rules, we currently only allow a single multi-shard command on a shard at * a time. Otherwise, concurrent multi-shard commands may take row-level * locks on the shard placements in a different order and create a distributed * deadlock. This applies even when writes are commutative and/or there is * no replication. * * 1. If citus.all_modifications_commutative is set to true, then all locks * are acquired as ShareUpdateExclusiveLock. * * 2. If citus.all_modifications_commutative is false, then only the shards * with 2 or more replicas are locked with ExclusiveLock. Otherwise, the * lock is acquired with ShareUpdateExclusiveLock. * * ShareUpdateExclusiveLock conflicts with itself such that only one * multi-shard modification at a time is allowed on a shard. It also conflicts * with ExclusiveLock, which ensures that updates/deletes/upserts are applied * in the same order on all placements. It does not conflict with * RowExclusiveLock, which is normally obtained by single-shard, commutative * writes. */ static void AcquireExecutorMultiShardLocks(List *taskList) { ListCell *taskCell = NULL; foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); LOCKMODE lockMode = NoLock; if (AllModificationsCommutative || list_length(task->taskPlacementList) == 1) { /* * When all writes are commutative then we only need to prevent multi-shard * commands from running concurrently with each other and with commands * that are explicitly non-commutative. When there is no replication then * we only need to prevent concurrent multi-shard commands. * * In either case, ShareUpdateExclusive has the desired effect, since * it conflicts with itself and ExclusiveLock (taken by non-commutative * writes). */ lockMode = ShareUpdateExclusiveLock; } else { /* * When there is replication, prevent all concurrent writes to the same * shards to ensure the writes are ordered. */ lockMode = ExclusiveLock; } /* * If we are dealing with a partition we are also taking locks on parent table * to prevent deadlocks on concurrent operations on a partition and its parent. */ LockParentShardResourceIfPartition(task->anchorShardId, lockMode); LockShardResource(task->anchorShardId, lockMode); /* * If the task has a subselect, then we may need to lock the shards from which * the query selects as well to prevent the subselects from seeing different * results on different replicas. In particular this prevents INSERT..SELECT * commands from having different effects on different placements. */ if (RequiresConsistentSnapshot(task)) { /* * ExclusiveLock conflicts with all lock types used by modifications * and therefore prevents other modifications from running * concurrently. */ LockRelationShardResources(task->relationShardList, ExclusiveLock); } } } /* * RequiresConsistentSnapshot returns true if the given task need to take * the necessary locks to ensure that a subquery in the INSERT ... SELECT * query returns the same output for all task placements. */ static bool RequiresConsistentSnapshot(Task *task) { bool requiresIsolation = false; if (!task->insertSelectQuery) { /* * Only INSERT/SELECT commands currently require SELECT isolation. * Other commands do not read from other shards. */ requiresIsolation = false; } else if (list_length(task->taskPlacementList) == 1) { /* * If there is only one replica then we fully rely on PostgreSQL to * provide SELECT isolation. In this case, we do not provide isolation * across the shards, but that was never our intention. */ requiresIsolation = false; } else if (AllModificationsCommutative) { /* * An INSERT/SELECT is commutative with other writes if it excludes * any ongoing writes based on the filter conditions. Without knowing * whether this is true, we assume the user took this into account * when enabling citus.all_modifications_commutative. This option * gives users an escape from aggressive locking during INSERT/SELECT. */ requiresIsolation = false; } else { /* * If this is a non-commutative write, then we need to block ongoing * writes to make sure that the subselect returns the same result * on all placements. */ requiresIsolation = true; } return requiresIsolation; } /* * CitusModifyBeginScan first evaluates expressions in the query and then * performs shard pruning in case the partition column in an insert was * defined as a function call. * * The function also checks the validity of the given custom scan node and * gets locks on the shards involved in the task list of the distributed plan. */ void CitusModifyBeginScan(CustomScanState *node, EState *estate, int eflags) { CitusScanState *scanState = (CitusScanState *) node; MultiPlan *multiPlan = scanState->multiPlan; Job *workerJob = multiPlan->workerJob; Query *jobQuery = workerJob->jobQuery; List *taskList = workerJob->taskList; bool deferredPruning = workerJob->deferredPruning; if (workerJob->requiresMasterEvaluation) { PlanState *planState = &(scanState->customScanState.ss.ps); EState *executorState = planState->state; ExecuteMasterEvaluableFunctions(jobQuery, planState); /* * We've processed parameters in ExecuteMasterEvaluableFunctions and * don't need to send their values to workers, since they will be * represented as constants in the deparsed query. To avoid sending * parameter values, we set the parameter list to NULL. */ executorState->es_param_list_info = NULL; if (deferredPruning) { DeferredErrorMessage *planningError = NULL; /* need to perform shard pruning, rebuild the task list from scratch */ taskList = RouterInsertTaskList(jobQuery, &planningError); if (planningError != NULL) { RaiseDeferredError(planningError, ERROR); } workerJob->taskList = taskList; } RebuildQueryStrings(jobQuery, taskList); } /* prevent concurrent placement changes */ AcquireMetadataLocks(taskList); /* * We are taking locks on partitions of partitioned tables. These locks are * necessary for locking tables that appear in the SELECT part of the query. */ LockPartitionsInRelationList(multiPlan->relationIdList, AccessShareLock); /* modify tasks are always assigned using first-replica policy */ workerJob->taskList = FirstReplicaAssignTaskList(taskList); } /* * RouterSequentialModifyExecScan executes 0 or more modifications on a * distributed table sequentially and returns results if there are any. */ TupleTableSlot * RouterSequentialModifyExecScan(CustomScanState *node) { CitusScanState *scanState = (CitusScanState *) node; TupleTableSlot *resultSlot = NULL; if (!scanState->finishedRemoteScan) { MultiPlan *multiPlan = scanState->multiPlan; bool hasReturning = multiPlan->hasReturning; Job *workerJob = multiPlan->workerJob; List *taskList = workerJob->taskList; ListCell *taskCell = NULL; bool multipleTasks = list_length(taskList) > 1; /* * We could naturally handle function-based transactions (i.e. those using * PL/pgSQL or similar) by checking the type of queryDesc->dest, but some * customers already use functions that touch multiple shards from within * a function, so we'll ignore functions for now. */ if (IsTransactionBlock() || multipleTasks) { BeginOrContinueCoordinatedTransaction(); } foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); ExecuteSingleModifyTask(scanState, task, multipleTasks, hasReturning); } scanState->finishedRemoteScan = true; } resultSlot = ReturnTupleFromTuplestore(scanState); return resultSlot; } /* * RouterMultiModifyExecScan executes a list of tasks on remote nodes, retrieves * the results and, if RETURNING is used, stores them in custom scan's tuple store. * Then, it returns tuples one by one from this tuple store. */ TupleTableSlot * RouterMultiModifyExecScan(CustomScanState *node) { CitusScanState *scanState = (CitusScanState *) node; TupleTableSlot *resultSlot = NULL; if (!scanState->finishedRemoteScan) { MultiPlan *multiPlan = scanState->multiPlan; Job *workerJob = multiPlan->workerJob; List *taskList = workerJob->taskList; bool hasReturning = multiPlan->hasReturning; bool isModificationQuery = true; ExecuteMultipleTasks(scanState, taskList, isModificationQuery, hasReturning); scanState->finishedRemoteScan = true; } resultSlot = ReturnTupleFromTuplestore(scanState); return resultSlot; } /* * RouterSelectExecScan executes a single select task on the remote node, * retrieves the results and stores them in custom scan's tuple store. Then, it * returns tuples one by one from this tuple store. */ TupleTableSlot * RouterSelectExecScan(CustomScanState *node) { CitusScanState *scanState = (CitusScanState *) node; TupleTableSlot *resultSlot = NULL; if (!scanState->finishedRemoteScan) { MultiPlan *multiPlan = scanState->multiPlan; Job *workerJob = multiPlan->workerJob; List *taskList = workerJob->taskList; /* we are taking locks on partitions of partitioned tables */ LockPartitionsInRelationList(multiPlan->relationIdList, AccessShareLock); if (list_length(taskList) > 0) { Task *task = (Task *) linitial(taskList); ExecuteSingleSelectTask(scanState, task); } scanState->finishedRemoteScan = true; } resultSlot = ReturnTupleFromTuplestore(scanState); return resultSlot; } /* * ExecuteSingleSelectTask executes the task on the remote node, retrieves the * results and stores them in a tuple store. * * If the task fails on one of the placements, the function retries it on * other placements or errors out if the query fails on all placements. */ static void ExecuteSingleSelectTask(CitusScanState *scanState, Task *task) { ParamListInfo paramListInfo = scanState->customScanState.ss.ps.state->es_param_list_info; List *taskPlacementList = task->taskPlacementList; ListCell *taskPlacementCell = NULL; char *queryString = task->queryString; List *relationShardList = task->relationShardList; /* * Try to run the query to completion on one placement. If the query fails * attempt the query on the next placement. */ foreach(taskPlacementCell, taskPlacementList) { ShardPlacement *taskPlacement = (ShardPlacement *) lfirst(taskPlacementCell); bool queryOK = false; bool dontFailOnError = false; int64 currentAffectedTupleCount = 0; int connectionFlags = SESSION_LIFESPAN; List *placementAccessList = NIL; MultiConnection *connection = NULL; if (list_length(relationShardList) > 0) { placementAccessList = BuildPlacementSelectList(taskPlacement->groupId, relationShardList); } else { /* * When the SELECT prunes down to 0 shards, just use the dummy placement. * * FIXME: it would be preferable to evaluate the SELECT locally since no * data from the workers is required. */ ShardPlacementAccess *placementAccess = CreatePlacementAccess(taskPlacement, PLACEMENT_ACCESS_SELECT); placementAccessList = list_make1(placementAccess); } connection = GetPlacementListConnection(connectionFlags, placementAccessList, NULL); queryOK = SendQueryInSingleRowMode(connection, queryString, paramListInfo); if (!queryOK) { continue; } queryOK = StoreQueryResult(scanState, connection, dontFailOnError, ¤tAffectedTupleCount); if (queryOK) { return; } } ereport(ERROR, (errmsg("could not receive query results"))); } /* * BuildPlacementSelectList builds a list of SELECT placement accesses * which can be used to call StartPlacementListConnection or * GetPlacementListConnection. */ List * BuildPlacementSelectList(uint32 groupId, List *relationShardList) { ListCell *relationShardCell = NULL; List *placementAccessList = NIL; foreach(relationShardCell, relationShardList) { RelationShard *relationShard = (RelationShard *) lfirst(relationShardCell); ShardPlacement *placement = NULL; ShardPlacementAccess *placementAccess = NULL; placement = FindShardPlacementOnGroup(groupId, relationShard->shardId); if (placement == NULL) { ereport(ERROR, (errmsg("no active placement of shard %ld found on group %d", relationShard->shardId, groupId))); } placementAccess = CreatePlacementAccess(placement, PLACEMENT_ACCESS_SELECT); placementAccessList = lappend(placementAccessList, placementAccess); } return placementAccessList; } /* * CreatePlacementAccess returns a new ShardPlacementAccess for the given placement * and access type. */ static ShardPlacementAccess * CreatePlacementAccess(ShardPlacement *placement, ShardPlacementAccessType accessType) { ShardPlacementAccess *placementAccess = NULL; placementAccess = (ShardPlacementAccess *) palloc0(sizeof(ShardPlacementAccess)); placementAccess->placement = placement; placementAccess->accessType = accessType; return placementAccess; } /* * ExecuteSingleModifyTask executes the task on the remote node, retrieves the * results and stores them, if RETURNING is used, in a tuple store. * * If the task fails on one of the placements, the function reraises the * remote error (constraint violation in DML), marks the affected placement as * invalid (other error on some placements, via the placement connection * framework), or errors out (failed on all placements). */ static void ExecuteSingleModifyTask(CitusScanState *scanState, Task *task, bool multipleTasks, bool expectResults) { CmdType operation = scanState->multiPlan->operation; EState *executorState = scanState->customScanState.ss.ps.state; ParamListInfo paramListInfo = executorState->es_param_list_info; List *taskPlacementList = task->taskPlacementList; List *connectionList = NIL; ListCell *taskPlacementCell = NULL; ListCell *connectionCell = NULL; int64 affectedTupleCount = -1; bool resultsOK = false; bool gotResults = false; char *queryString = task->queryString; bool taskRequiresTwoPhaseCommit = (task->replicationModel == REPLICATION_MODEL_2PC); ShardInterval *shardInterval = LoadShardInterval(task->anchorShardId); Oid relationId = shardInterval->relationId; /* * Modifications for reference tables are always done using 2PC. First * ensure that distributed transaction is started. Then force the * transaction manager to use 2PC while running the task on the * placements. */ if (taskRequiresTwoPhaseCommit) { BeginOrContinueCoordinatedTransaction(); CoordinatedTransactionUse2PC(); } /* * Get connections required to execute task. This will, if necessary, * establish the connection, mark as critical (when modifying reference * table) and start a transaction (when in a transaction). */ connectionList = GetModifyConnections(task, taskRequiresTwoPhaseCommit); /* * If we are dealing with a partitioned table, we also need to lock its * partitions. */ if (PartitionedTable(relationId)) { LOCKMODE lockMode = LockModeForModifyTask(task); LockPartitionRelations(relationId, lockMode); } /* prevent replicas of the same shard from diverging */ AcquireExecutorShardLock(task, operation); /* try to execute modification on all placements */ forboth(taskPlacementCell, taskPlacementList, connectionCell, connectionList) { ShardPlacement *taskPlacement = (ShardPlacement *) lfirst(taskPlacementCell); MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); bool queryOK = false; bool failOnError = false; int64 currentAffectedTupleCount = 0; if (connection->remoteTransaction.transactionFailed) { /* * If GetModifyConnections failed to send BEGIN this connection will have * been marked as failed, and should not have any more commands sent to * it! Skip it for now, at the bottom of this method we call * MarkFailedShardPlacements() to ensure future statements will not use this * placement. */ continue; } queryOK = SendQueryInSingleRowMode(connection, queryString, paramListInfo); if (!queryOK) { continue; } /* if we're running a 2PC, the query should fail on error */ failOnError = taskRequiresTwoPhaseCommit; if (multipleTasks && expectResults) { /* * If we have multiple tasks and one fails, we cannot clear * the tuple store and start over. Error out instead. */ failOnError = true; } /* * If caller is interested, store query results the first time * through. The output of the query's execution on other shards is * discarded if we run there (because it's a modification query). */ if (!gotResults && expectResults) { queryOK = StoreQueryResult(scanState, connection, failOnError, ¤tAffectedTupleCount); } else { queryOK = ConsumeQueryResult(connection, failOnError, ¤tAffectedTupleCount); } if (queryOK) { if ((affectedTupleCount == -1) || (affectedTupleCount == currentAffectedTupleCount)) { affectedTupleCount = currentAffectedTupleCount; } else { ereport(WARNING, (errmsg("modified "INT64_FORMAT " tuples, but expected " "to modify "INT64_FORMAT, currentAffectedTupleCount, affectedTupleCount), errdetail("modified placement on %s:%d", taskPlacement->nodeName, taskPlacement->nodePort))); } resultsOK = true; gotResults = true; } } /* if all placements failed, error out */ if (!resultsOK) { ereport(ERROR, (errmsg("could not modify any active placements"))); } /* if some placements failed, ensure future statements don't access them */ MarkFailedShardPlacements(); executorState->es_processed += affectedTupleCount; if (IsTransactionBlock()) { XactModificationLevel = XACT_MODIFICATION_DATA; } } /* * GetModifyConnections returns the list of connections required to execute * modify commands on the placements in tasPlacementList. If necessary remote * transactions are started. * * If markCritical is true remote transactions are marked as critical. */ static List * GetModifyConnections(Task *task, bool markCritical) { List *taskPlacementList = task->taskPlacementList; ListCell *taskPlacementCell = NULL; List *multiConnectionList = NIL; List *relationShardList = task->relationShardList; /* first initiate connection establishment for all necessary connections */ foreach(taskPlacementCell, taskPlacementList) { ShardPlacement *taskPlacement = (ShardPlacement *) lfirst(taskPlacementCell); int connectionFlags = SESSION_LIFESPAN | FOR_DML; MultiConnection *multiConnection = NULL; List *placementAccessList = NIL; ShardPlacementAccess *placementModification = NULL; /* create placement accesses for placements that appear in a subselect */ placementAccessList = BuildPlacementSelectList(taskPlacement->groupId, relationShardList); /* create placement access for the placement that we're modifying */ placementModification = CreatePlacementAccess(taskPlacement, PLACEMENT_ACCESS_DML); placementAccessList = lappend(placementAccessList, placementModification); /* get an appropriate connection for the DML statement */ multiConnection = GetPlacementListConnection(connectionFlags, placementAccessList, NULL); /* * If we're expanding the set nodes that participate in the distributed * transaction, conform to MultiShardCommitProtocol. */ if (MultiShardCommitProtocol == COMMIT_PROTOCOL_2PC && InCoordinatedTransaction() && XactModificationLevel == XACT_MODIFICATION_DATA) { RemoteTransaction *transaction = &multiConnection->remoteTransaction; if (transaction->transactionState == REMOTE_TRANS_INVALID) { CoordinatedTransactionUse2PC(); } } if (markCritical) { MarkRemoteTransactionCritical(multiConnection); } multiConnectionList = lappend(multiConnectionList, multiConnection); } /* then finish in parallel */ FinishConnectionListEstablishment(multiConnectionList); /* and start transactions if applicable */ RemoteTransactionsBeginIfNecessary(multiConnectionList); return multiConnectionList; } /* * ExecuteMultipleTasks executes a list of tasks on remote nodes, retrieves * the results and, if RETURNING is used, stores them in a tuple store. * * If a task fails on one of the placements, the transaction rolls back. * Otherwise, the changes are committed using 2PC when the local transaction * commits. */ static void ExecuteMultipleTasks(CitusScanState *scanState, List *taskList, bool isModificationQuery, bool expectResults) { EState *executorState = scanState->customScanState.ss.ps.state; ParamListInfo paramListInfo = executorState->es_param_list_info; int64 affectedTupleCount = -1; /* can only support modifications right now */ Assert(isModificationQuery); affectedTupleCount = ExecuteModifyTasks(taskList, expectResults, paramListInfo, scanState); executorState->es_processed = affectedTupleCount; } /* * ExecuteModifyTasksWithoutResults provides a wrapper around ExecuteModifyTasks * for calls that do not require results. In this case, the expectResults flag * is set to false and arguments related to result sets and query parameters are * NULL. This function is primarily intended to allow DDL and * master_modify_multiple_shards to use the router executor infrastructure. */ int64 ExecuteModifyTasksWithoutResults(List *taskList) { return ExecuteModifyTasks(taskList, false, NULL, NULL); } /* * ExecuteTasksSequentiallyWithoutResults basically calls ExecuteModifyTasks in * a loop in order to simulate sequential execution of a list of tasks. Useful * in cases where issuing commands in parallel before waiting for results could * result in deadlocks (such as CREATE INDEX CONCURRENTLY). */ void ExecuteTasksSequentiallyWithoutResults(List *taskList) { ListCell *taskCell = NULL; foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); List *singleTask = list_make1(task); ExecuteModifyTasksWithoutResults(singleTask); } } /* * ExecuteModifyTasks executes a list of tasks on remote nodes, and * optionally retrieves the results and stores them in a tuple store. * * If a task fails on one of the placements, the transaction rolls back. * Otherwise, the changes are committed using 2PC when the local transaction * commits. */ static int64 ExecuteModifyTasks(List *taskList, bool expectResults, ParamListInfo paramListInfo, CitusScanState *scanState) { int64 totalAffectedTupleCount = 0; ListCell *taskCell = NULL; Task *firstTask = NULL; ShardInterval *firstShardInterval = NULL; int connectionFlags = 0; List *affectedTupleCountList = NIL; HTAB *shardConnectionHash = NULL; bool tasksPending = true; int placementIndex = 0; if (taskList == NIL) { return 0; } /* * In multi shard modification, we expect that all tasks operates on the * same relation, so it is enough to acquire a lock on the first task's * anchor relation's partitions. */ firstTask = (Task *) linitial(taskList); firstShardInterval = LoadShardInterval(firstTask->anchorShardId); if (PartitionedTable(firstShardInterval->relationId)) { LOCKMODE lockMode = LockModeForModifyTask(firstTask); LockPartitionRelations(firstShardInterval->relationId, lockMode); } /* ensure that there are no concurrent modifications on the same shards */ AcquireExecutorMultiShardLocks(taskList); BeginOrContinueCoordinatedTransaction(); if (MultiShardCommitProtocol == COMMIT_PROTOCOL_2PC || firstTask->replicationModel == REPLICATION_MODEL_2PC) { CoordinatedTransactionUse2PC(); } if (firstTask->taskType == DDL_TASK) { connectionFlags = FOR_DDL; } else { connectionFlags = FOR_DML; } /* open connection to all relevant placements, if not already open */ shardConnectionHash = OpenTransactionsForAllTasks(taskList, connectionFlags); XactModificationLevel = XACT_MODIFICATION_DATA; /* iterate over placements in rounds, to ensure in-order execution */ while (tasksPending) { int taskIndex = 0; tasksPending = false; /* send command to all shard placements with the current index in parallel */ foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); int64 shardId = task->anchorShardId; char *queryString = task->queryString; bool shardConnectionsFound = false; ShardConnections *shardConnections = NULL; List *connectionList = NIL; MultiConnection *connection = NULL; bool queryOK = false; shardConnections = GetShardHashConnections(shardConnectionHash, shardId, &shardConnectionsFound); connectionList = shardConnections->connectionList; if (placementIndex >= list_length(connectionList)) { /* no more active placements for this task */ continue; } connection = (MultiConnection *) list_nth(connectionList, placementIndex); queryOK = SendQueryInSingleRowMode(connection, queryString, paramListInfo); if (!queryOK) { ReportConnectionError(connection, ERROR); } } /* collects results from all relevant shard placements */ foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); int64 shardId = task->anchorShardId; bool shardConnectionsFound = false; ShardConnections *shardConnections = NULL; List *connectionList = NIL; MultiConnection *connection = NULL; int64 currentAffectedTupleCount = 0; bool failOnError = true; bool queryOK PG_USED_FOR_ASSERTS_ONLY = false; /* abort in case of cancellation */ CHECK_FOR_INTERRUPTS(); shardConnections = GetShardHashConnections(shardConnectionHash, shardId, &shardConnectionsFound); connectionList = shardConnections->connectionList; if (placementIndex >= list_length(connectionList)) { /* no more active placements for this task */ taskIndex++; continue; } connection = (MultiConnection *) list_nth(connectionList, placementIndex); /* * If caller is interested, store query results the first time * through. The output of the query's execution on other shards is * discarded if we run there (because it's a modification query). */ if (placementIndex == 0 && expectResults) { Assert(scanState != NULL); queryOK = StoreQueryResult(scanState, connection, failOnError, ¤tAffectedTupleCount); } else { queryOK = ConsumeQueryResult(connection, failOnError, ¤tAffectedTupleCount); } /* should have rolled back on error */ Assert(queryOK); if (placementIndex == 0) { totalAffectedTupleCount += currentAffectedTupleCount; /* keep track of the initial affected tuple count */ affectedTupleCountList = lappend_int(affectedTupleCountList, currentAffectedTupleCount); } else { /* warn the user if shard placements have diverged */ int64 previousAffectedTupleCount = list_nth_int(affectedTupleCountList, taskIndex); if (currentAffectedTupleCount != previousAffectedTupleCount) { ereport(WARNING, (errmsg("modified "INT64_FORMAT " tuples of shard " UINT64_FORMAT ", but expected to modify "INT64_FORMAT, currentAffectedTupleCount, shardId, previousAffectedTupleCount), errdetail("modified placement on %s:%d", connection->hostname, connection->port))); } } if (!tasksPending && placementIndex + 1 < list_length(connectionList)) { /* more tasks to be done after thise one */ tasksPending = true; } taskIndex++; } placementIndex++; } UnclaimAllShardConnections(shardConnectionHash); CHECK_FOR_INTERRUPTS(); return totalAffectedTupleCount; } /* * SendQueryInSingleRowMode sends the given query on the connection in an * asynchronous way. The function also sets the single-row mode on the * connection so that we receive results a row at a time. */ static bool SendQueryInSingleRowMode(MultiConnection *connection, char *query, ParamListInfo paramListInfo) { int querySent = 0; int singleRowMode = 0; if (paramListInfo != NULL) { int parameterCount = paramListInfo->numParams; Oid *parameterTypes = NULL; const char **parameterValues = NULL; ExtractParametersFromParamListInfo(paramListInfo, ¶meterTypes, ¶meterValues); querySent = SendRemoteCommandParams(connection, query, parameterCount, parameterTypes, parameterValues); } else { querySent = SendRemoteCommand(connection, query); } if (querySent == 0) { MarkRemoteTransactionFailed(connection, false); ReportConnectionError(connection, WARNING); return false; } singleRowMode = PQsetSingleRowMode(connection->pgConn); if (singleRowMode == 0) { MarkRemoteTransactionFailed(connection, false); ReportConnectionError(connection, WARNING); return false; } return true; } /* * ExtractParametersFromParamListInfo extracts parameter types and values from * the given ParamListInfo structure, and fills parameter type and value arrays. */ static void ExtractParametersFromParamListInfo(ParamListInfo paramListInfo, Oid **parameterTypes, const char ***parameterValues) { int parameterIndex = 0; int parameterCount = paramListInfo->numParams; *parameterTypes = (Oid *) palloc0(parameterCount * sizeof(Oid)); *parameterValues = (const char **) palloc0(parameterCount * sizeof(char *)); /* get parameter types and values */ for (parameterIndex = 0; parameterIndex < parameterCount; parameterIndex++) { ParamExternData *parameterData = ¶mListInfo->params[parameterIndex]; Oid typeOutputFunctionId = InvalidOid; bool variableLengthType = false; /* * Use 0 for data types where the oid values can be different on * the master and worker nodes. Therefore, the worker nodes can * infer the correct oid. */ if (parameterData->ptype >= FirstNormalObjectId) { (*parameterTypes)[parameterIndex] = 0; } else { (*parameterTypes)[parameterIndex] = parameterData->ptype; } /* * If the parameter is not referenced / used (ptype == 0) and * would otherwise have errored out inside standard_planner()), * don't pass a value to the remote side, and pass text oid to prevent * undetermined data type errors on workers. */ if (parameterData->ptype == 0) { (*parameterValues)[parameterIndex] = NULL; (*parameterTypes)[parameterIndex] = TEXTOID; continue; } /* * If the parameter is NULL then we preserve its type, but * don't need to evaluate its value. */ if (parameterData->isnull) { (*parameterValues)[parameterIndex] = NULL; continue; } getTypeOutputInfo(parameterData->ptype, &typeOutputFunctionId, &variableLengthType); (*parameterValues)[parameterIndex] = OidOutputFunctionCall(typeOutputFunctionId, parameterData->value); } } /* * StoreQueryResult gets the query results from the given connection, builds * tuples from the results, and stores them in the a newly created * tuple-store. If the function can't receive query results, it returns * false. Note that this function assumes the query has already been sent on * the connection. */ static bool StoreQueryResult(CitusScanState *scanState, MultiConnection *connection, bool failOnError, int64 *rows) { TupleDesc tupleDescriptor = scanState->customScanState.ss.ps.ps_ResultTupleSlot->tts_tupleDescriptor; AttInMetadata *attributeInputMetadata = TupleDescGetAttInMetadata(tupleDescriptor); List *targetList = scanState->customScanState.ss.ps.plan->targetlist; uint32 expectedColumnCount = ExecCleanTargetListLength(targetList); char **columnArray = (char **) palloc0(expectedColumnCount * sizeof(char *)); Tuplestorestate *tupleStore = NULL; bool randomAccess = true; bool interTransactions = false; bool commandFailed = false; MemoryContext ioContext = AllocSetContextCreate(CurrentMemoryContext, "StoreQueryResult", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); *rows = 0; if (scanState->tuplestorestate == NULL) { scanState->tuplestorestate = tuplestore_begin_heap(randomAccess, interTransactions, work_mem); } else if (!failOnError) { /* might have failed query execution on another placement before */ tuplestore_clear(scanState->tuplestorestate); } tupleStore = scanState->tuplestorestate; for (;;) { uint32 rowIndex = 0; uint32 columnIndex = 0; uint32 rowCount = 0; uint32 columnCount = 0; ExecStatusType resultStatus = 0; bool doRaiseInterrupts = true; PGresult *result = GetRemoteCommandResult(connection, doRaiseInterrupts); if (result == NULL) { break; } resultStatus = PQresultStatus(result); if ((resultStatus != PGRES_SINGLE_TUPLE) && (resultStatus != PGRES_TUPLES_OK)) { char *sqlStateString = PQresultErrorField(result, PG_DIAG_SQLSTATE); int category = 0; bool isConstraintViolation = false; MarkRemoteTransactionFailed(connection, false); /* * If the error code is in constraint violation class, we want to * fail fast because we must get the same error from all shard * placements. */ category = ERRCODE_TO_CATEGORY(ERRCODE_INTEGRITY_CONSTRAINT_VIOLATION); isConstraintViolation = SqlStateMatchesCategory(sqlStateString, category); if (isConstraintViolation || failOnError) { ReportResultError(connection, result, ERROR); } else { ReportResultError(connection, result, WARNING); } PQclear(result); commandFailed = true; /* continue, there could be other lingering results due to row mode */ continue; } rowCount = PQntuples(result); columnCount = PQnfields(result); Assert(columnCount == expectedColumnCount); for (rowIndex = 0; rowIndex < rowCount; rowIndex++) { HeapTuple heapTuple = NULL; MemoryContext oldContext = NULL; memset(columnArray, 0, columnCount * sizeof(char *)); for (columnIndex = 0; columnIndex < columnCount; columnIndex++) { if (PQgetisnull(result, rowIndex, columnIndex)) { columnArray[columnIndex] = NULL; } else { columnArray[columnIndex] = PQgetvalue(result, rowIndex, columnIndex); } } /* * Switch to a temporary memory context that we reset after each tuple. This * protects us from any memory leaks that might be present in I/O functions * called by BuildTupleFromCStrings. */ oldContext = MemoryContextSwitchTo(ioContext); heapTuple = BuildTupleFromCStrings(attributeInputMetadata, columnArray); MemoryContextSwitchTo(oldContext); tuplestore_puttuple(tupleStore, heapTuple); MemoryContextReset(ioContext); (*rows)++; } PQclear(result); } pfree(columnArray); return !commandFailed; } /* * ConsumeQueryResult gets a query result from a connection, counting the rows * and checking for errors, but otherwise discarding potentially returned * rows. Returns true if a non-error result has been returned, false if there * has been an error. */ static bool ConsumeQueryResult(MultiConnection *connection, bool failOnError, int64 *rows) { bool commandFailed = false; bool gotResponse = false; *rows = 0; /* * Due to single row mode we have to do multiple GetRemoteCommandResult() * to finish processing of this query, even without RETURNING. For * single-row mode we have to loop until all rows are consumed. */ while (true) { const bool doRaiseInterrupts = true; PGresult *result = GetRemoteCommandResult(connection, doRaiseInterrupts); ExecStatusType status = PGRES_COMMAND_OK; if (result == NULL) { break; } status = PQresultStatus(result); if (status != PGRES_COMMAND_OK && status != PGRES_SINGLE_TUPLE && status != PGRES_TUPLES_OK) { char *sqlStateString = PQresultErrorField(result, PG_DIAG_SQLSTATE); int category = 0; bool isConstraintViolation = false; MarkRemoteTransactionFailed(connection, false); /* * If the error code is in constraint violation class, we want to * fail fast because we must get the same error from all shard * placements. */ category = ERRCODE_TO_CATEGORY(ERRCODE_INTEGRITY_CONSTRAINT_VIOLATION); isConstraintViolation = SqlStateMatchesCategory(sqlStateString, category); if (isConstraintViolation || failOnError) { ReportResultError(connection, result, ERROR); } else { ReportResultError(connection, result, WARNING); } PQclear(result); commandFailed = true; /* continue, there could be other lingering results due to row mode */ continue; } if (status == PGRES_COMMAND_OK) { char *currentAffectedTupleString = PQcmdTuples(result); int64 currentAffectedTupleCount = 0; if (*currentAffectedTupleString != '\0') { scanint8(currentAffectedTupleString, false, ¤tAffectedTupleCount); Assert(currentAffectedTupleCount >= 0); } *rows += currentAffectedTupleCount; } else { *rows += PQntuples(result); } PQclear(result); gotResponse = true; } return gotResponse && !commandFailed; } /* * LockModeForRouterModifyTask returns appropriate LOCKMODE for given router * modify task. */ static LOCKMODE LockModeForModifyTask(Task *task) { LOCKMODE lockMode = NoLock; if (task->taskType == DDL_TASK) { lockMode = AccessExclusiveLock; } else if (task->taskType == MODIFY_TASK) { lockMode = RowExclusiveLock; } else { /* we do not allow any other task type in these code path */ Assert(false); } return lockMode; } citus-7.0.3/src/backend/distributed/executor/multi_server_executor.c000066400000000000000000000214561317107136600260250ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_server_executor.c * * Function definitions for distributed task execution for real-time * and task-tracker executors, and routines common to both. The common * routines are implement backend-side logic; and they trigger executions * on the client-side via function hooks that they load. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include #include "distributed/multi_client_executor.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_resowner.h" #include "distributed/multi_server_executor.h" #include "distributed/worker_protocol.h" int RemoteTaskCheckInterval = 100; /* per cycle sleep interval in millisecs */ int TaskExecutorType = MULTI_EXECUTOR_REAL_TIME; /* distributed executor type */ bool BinaryMasterCopyFormat = false; /* copy data from workers in binary format */ int MultiTaskQueryLogLevel = MULTI_TASK_QUERY_INFO_OFF; /* multi-task query log level */ /* * JobExecutorType selects the executor type for the given multiPlan using the task * executor type config value. The function then checks if the given multiPlan needs * more resources than those provided to it by other config values, and issues * warnings accordingly. If the selected executor type cannot execute the given * multiPlan, the function errors out. */ MultiExecutorType JobExecutorType(MultiPlan *multiPlan) { Job *job = multiPlan->workerJob; List *workerNodeList = NIL; int workerNodeCount = 0; int taskCount = 0; double tasksPerNode = 0.; MultiExecutorType executorType = TaskExecutorType; bool routerExecutablePlan = multiPlan->routerExecutable; /* check if can switch to router executor */ if (routerExecutablePlan) { ereport(DEBUG2, (errmsg("Plan is router executable"))); return MULTI_EXECUTOR_ROUTER; } if (multiPlan->insertSelectSubquery != NULL) { return MULTI_EXECUTOR_COORDINATOR_INSERT_SELECT; } /* if it is not a router executable plan, inform user according to the log level */ if (MultiTaskQueryLogLevel != MULTI_TASK_QUERY_INFO_OFF) { ereport(MultiTaskQueryLogLevel, (errmsg("multi-task query about to be executed"), errhint("Queries are split to multiple tasks " "if they have to be split into several" " queries on the workers."))); } Assert(multiPlan->operation == CMD_SELECT); workerNodeList = ActiveReadableNodeList(); workerNodeCount = list_length(workerNodeList); taskCount = list_length(job->taskList); tasksPerNode = taskCount / ((double) workerNodeCount); if (executorType == MULTI_EXECUTOR_REAL_TIME) { double reasonableConnectionCount = 0; int dependedJobCount = 0; /* if we need to open too many connections per worker, warn the user */ if (tasksPerNode >= MaxConnections) { ereport(WARNING, (errmsg("this query uses more connections than the " "configured max_connections limit"), errhint("Consider increasing max_connections or setting " "citus.task_executor_type to " "\"task-tracker\"."))); } /* * If we need to open too many outgoing connections, warn the user. * The real-time executor caps the number of tasks it starts by the same limit, * but we still issue this warning because it degrades performance. */ reasonableConnectionCount = MaxMasterConnectionCount(); if (taskCount >= reasonableConnectionCount) { ereport(WARNING, (errmsg("this query uses more file descriptors than the " "configured max_files_per_process limit"), errhint("Consider increasing max_files_per_process or " "setting citus.task_executor_type to " "\"task-tracker\"."))); } /* if we have repartition jobs with real time executor, error out */ dependedJobCount = list_length(job->dependedJobList); if (dependedJobCount > 0) { ereport(ERROR, (errmsg("cannot use real time executor with repartition jobs"), errhint("Set citus.task_executor_type to " "\"task-tracker\"."))); } } else { /* if we have more tasks per node than what can be tracked, warn the user */ if (tasksPerNode >= MaxTrackedTasksPerNode) { ereport(WARNING, (errmsg("this query assigns more tasks per node than the " "configured max_tracked_tasks_per_node limit"))); } } return executorType; } /* * MaxMasterConnectionCount returns the number of connections a master can open. * A master cannot create more than a certain number of file descriptors (FDs). * Every task requires 2 FDs, one file and one connection. Some FDs are taken by * the VFD pool and there is currently no way to reclaim these before opening a * connection. We therefore assume some FDs to be reserved for VFDs, based on * observing a typical size of the pool on a Citus master. */ int MaxMasterConnectionCount(void) { return Max((max_files_per_process - RESERVED_FD_COUNT) / 2, 1); } /* * RemoveJobDirectory gets automatically called at portal drop (end of query) or * at transaction abort. The function removes the job directory and releases the * associated job resource from the resource manager. */ void RemoveJobDirectory(uint64 jobId) { StringInfo jobDirectoryName = MasterJobDirectoryName(jobId); RemoveDirectory(jobDirectoryName); ResourceOwnerForgetJobDirectory(CurrentResourceOwner, jobId); } /* * InitTaskExecution creates a task execution structure for the given task, and * initializes execution related fields. */ TaskExecution * InitTaskExecution(Task *task, TaskExecStatus initialTaskExecStatus) { /* each task placement (assignment) corresponds to one worker node */ uint32 nodeCount = list_length(task->taskPlacementList); uint32 nodeIndex = 0; TaskExecution *taskExecution = CitusMakeNode(TaskExecution); taskExecution->jobId = task->jobId; taskExecution->taskId = task->taskId; taskExecution->nodeCount = nodeCount; taskExecution->connectStartTime = 0; taskExecution->currentNodeIndex = 0; taskExecution->dataFetchTaskIndex = -1; taskExecution->failureCount = 0; taskExecution->taskStatusArray = palloc0(nodeCount * sizeof(TaskExecStatus)); taskExecution->transmitStatusArray = palloc0(nodeCount * sizeof(TransmitExecStatus)); taskExecution->connectionIdArray = palloc0(nodeCount * sizeof(int32)); taskExecution->fileDescriptorArray = palloc0(nodeCount * sizeof(int32)); for (nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++) { taskExecution->taskStatusArray[nodeIndex] = initialTaskExecStatus; taskExecution->transmitStatusArray[nodeIndex] = EXEC_TRANSMIT_UNASSIGNED; taskExecution->connectionIdArray[nodeIndex] = INVALID_CONNECTION_ID; taskExecution->fileDescriptorArray[nodeIndex] = -1; } return taskExecution; } /* * CleanupTaskExecution iterates over all connections and file descriptors for * the given task execution. The function first closes all open connections and * file descriptors, and then frees memory allocated for the task execution. */ void CleanupTaskExecution(TaskExecution *taskExecution) { uint32 nodeIndex = 0; for (nodeIndex = 0; nodeIndex < taskExecution->nodeCount; nodeIndex++) { int32 connectionId = taskExecution->connectionIdArray[nodeIndex]; int32 fileDescriptor = taskExecution->fileDescriptorArray[nodeIndex]; /* close open connection */ if (connectionId != INVALID_CONNECTION_ID) { MultiClientDisconnect(connectionId); taskExecution->connectionIdArray[nodeIndex] = INVALID_CONNECTION_ID; } /* close open file */ if (fileDescriptor >= 0) { int closed = close(fileDescriptor); taskExecution->fileDescriptorArray[nodeIndex] = -1; if (closed < 0) { ereport(WARNING, (errcode_for_file_access(), errmsg("could not close copy file: %m"))); } } } /* deallocate memory and reset all fields */ pfree(taskExecution->taskStatusArray); pfree(taskExecution->connectionIdArray); pfree(taskExecution->fileDescriptorArray); pfree(taskExecution); } /* Determines if the given task exceeded its failure threshold. */ bool TaskExecutionFailed(TaskExecution *taskExecution) { if (taskExecution->failureCount >= MAX_TASK_EXECUTION_FAILURES) { return true; } return false; } /* * AdjustStateForFailure increments the failure count for given task execution. * The function also determines the next worker node that should be contacted * for remote execution. */ void AdjustStateForFailure(TaskExecution *taskExecution) { int maxNodeIndex = taskExecution->nodeCount - 1; Assert(maxNodeIndex >= 0); if (taskExecution->currentNodeIndex < maxNodeIndex) { taskExecution->currentNodeIndex++; /* try next worker node */ } else { taskExecution->currentNodeIndex = 0; /* go back to the first worker node */ } taskExecution->dataFetchTaskIndex = -1; /* reset data fetch counter */ taskExecution->failureCount++; /* record failure */ } citus-7.0.3/src/backend/distributed/executor/multi_task_tracker_executor.c000066400000000000000000002634431317107136600272000ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_task_tracker_executor.c * * Routines for executing remote tasks as part of a distributed execution plan * using task trackers. These task trackers receive task assignments from this * executor, and they manage task executions on worker nodes. The use of task * trackers brings us two benefits: (a) distributed execution plans can scale * out to many tasks, as the executor no longer needs to keep a connection open * for each task, and (b) distributed execution plans can include map/reduce * execution primitives, which involve writing intermediate results to files. * * Copyright (c) 2013-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include #include #include #include "commands/dbcommands.h" #include "distributed/citus_nodes.h" #include "distributed/connection_management.h" #include "distributed/metadata_cache.h" #include "distributed/multi_client_executor.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_server_executor.h" #include "distributed/pg_dist_partition.h" #include "distributed/worker_protocol.h" #include "storage/fd.h" #include "utils/builtins.h" #include "utils/hsearch.h" #include "utils/timestamp.h" int MaxAssignTaskBatchSize = 64; /* maximum number of tasks to assign per round */ int MaxTaskStatusBatchSize = 64; /* maximum number of tasks status checks per round */ /* TaskMapKey is used as a key in task hash */ typedef struct TaskMapKey { TaskType taskType; uint64 jobId; uint32 taskId; } TaskMapKey; /* * TaskMapEntry is used as entry in task hash. We need to keep a pointer * of the task in the entry. */ typedef struct TaskMapEntry { TaskMapKey key; Task *task; } TaskMapEntry; /* Local functions forward declarations to init tasks and trackers */ static List * TaskAndExecutionList(List *jobTaskList); static HTAB * TaskHashCreate(uint32 taskHashSize); static Task * TaskHashEnter(HTAB *taskHash, Task *task); static Task * TaskHashLookup(HTAB *trackerHash, TaskType taskType, uint64 jobId, uint32 taskId); static bool TopLevelTask(Task *task); static bool TransmitExecutionCompleted(TaskExecution *taskExecution); static HTAB * TrackerHash(const char *taskTrackerHashName, List *workerNodeList, char *userName); static HTAB * TrackerHashCreate(const char *taskTrackerHashName, uint32 taskTrackerHashSize); static TaskTracker * TrackerHashEnter(HTAB *taskTrackerHash, char *nodeName, uint32 nodePort); static void TrackerHashConnect(HTAB *taskTrackerHash); static TrackerStatus TrackerConnectPoll(TaskTracker *taskTracker); static TaskTracker * ResolveTaskTracker(HTAB *trackerHash, Task *task, TaskExecution *taskExecution); static TaskTracker * ResolveMapTaskTracker(HTAB *trackerHash, Task *task, TaskExecution *taskExecution); static TaskTracker * TrackerHashLookup(HTAB *trackerHash, const char *nodeName, uint32 nodePort); /* Local functions forward declarations to manage tasks and their assignments */ static TaskExecStatus ManageTaskExecution(TaskTracker *taskTracker, TaskTracker *sourceTaskTracker, Task *task, TaskExecution *taskExecution); static TransmitExecStatus ManageTransmitExecution(TaskTracker *transmitTracker, Task *task, TaskExecution *taskExecution); static bool TaskExecutionsCompleted(List *taskList); static StringInfo MapFetchTaskQueryString(Task *mapFetchTask, Task *mapTask); static void TrackerQueueSqlTask(TaskTracker *taskTracker, Task *task); static void TrackerQueueTask(TaskTracker *taskTracker, Task *task); static StringInfo TaskAssignmentQuery(Task *task, char *queryString); static TaskStatus TrackerTaskStatus(TaskTracker *taskTracker, Task *task); static TrackerTaskState * TrackerTaskStateHashLookup(HTAB *taskStateHash, Task *task); static bool TrackerHealthy(TaskTracker *taskTracker); static void TrackerQueueFileTransmit(TaskTracker *transmitTracker, Task *task); static TrackerTaskState * TaskStateHashEnter(HTAB *taskStateHash, uint64 jobId, uint32 taskId); static int32 TransmitTrackerConnectionId(TaskTracker *transmitTracker, Task *task); /* Local functions forward declarations to manage task failovers */ static List * ConstrainedTaskList(List *taskAndExecutionList, Task *task); static List * ConstrainedNonMergeTaskList(List *taskAndExecutionList, Task *task); static List * UpstreamDependencyList(List *taskAndExecutionList, Task *searchedTask); static List * ConstrainedMergeTaskList(List *taskAndExecutionList, Task *task); static List * MergeTaskList(List *taskList); static void ReassignTaskList(List *taskList); static void ReassignMapFetchTaskList(List *mapFetchTaskList); static List * ShardFetchTaskList(List *taskList); /* Local functions forward declarations to manage task trackers */ static void ManageTaskTracker(TaskTracker *taskTracker); static bool TrackerConnectionUp(TaskTracker *taskTracker); static void TrackerReconnectPoll(TaskTracker *taskTracker); static List * AssignQueuedTasks(TaskTracker *taskTracker); static List * TaskStatusBatchList(TaskTracker *taskTracker); static StringInfo TaskStatusBatchQuery(List *taskList); static void ReceiveTaskStatusBatchQueryResponse(TaskTracker *taskTracker); static void ManageTransmitTracker(TaskTracker *transmitTracker); static TrackerTaskState * NextQueuedFileTransmit(HTAB *taskStateHash); /* Local functions forward declarations to clean up tasks */ static List * JobIdList(Job *job); static void TrackerCleanupResources(HTAB *taskTrackerHash, HTAB *transmitTrackerHash, List *jobIdList, List *taskList); static void TrackerHashWaitActiveRequest(HTAB *taskTrackerHash); static void TrackerHashCancelActiveRequest(HTAB *taskTrackerHash); static Task * JobCleanupTask(uint64 jobId); static void TrackerHashCleanupJob(HTAB *taskTrackerHash, Task *jobCleanupTask); static void TrackerHashDisconnect(HTAB *taskTrackerHash); /* * MultiTaskTrackerExecute loops over given tasks, and manages their execution * until either one task permanently fails or all tasks successfully complete. * The function initializes connections to task trackers on worker nodes, and * executes tasks through assigning them to these trackers. */ void MultiTaskTrackerExecute(Job *job) { List *jobTaskList = job->taskList; List *taskAndExecutionList = NIL; ListCell *taskAndExecutionCell = NULL; uint32 taskTrackerCount = 0; uint32 topLevelTaskCount = 0; uint32 failedTaskId = 0; bool allTasksCompleted = false; bool taskFailed = false; bool taskTransmitFailed = false; bool clusterFailed = false; List *workerNodeList = NIL; HTAB *taskTrackerHash = NULL; HTAB *transmitTrackerHash = NULL; char *extensionOwner = CitusExtensionOwnerName(); const char *taskTrackerHashName = "Task Tracker Hash"; const char *transmitTrackerHashName = "Transmit Tracker Hash"; List *jobIdList = NIL; if (ReadFromSecondaries == USE_SECONDARY_NODES_ALWAYS) { ereport(ERROR, (errmsg("task tracker queries are not allowed while " "citus.use_secondary_nodes is 'always'"), errhint("try setting citus.task_executor_type TO 'real-time'"))); } /* * We walk over the task tree, and create a task execution struct for each * task. We then associate the task with its execution and get back a list. */ taskAndExecutionList = TaskAndExecutionList(jobTaskList); /* * We now count the number of "top level" tasks in the query tree. Once they * complete, we'll need to fetch these tasks' results to the master node. */ foreach(taskAndExecutionCell, taskAndExecutionList) { Task *task = (Task *) lfirst(taskAndExecutionCell); bool topLevelTask = TopLevelTask(task); if (topLevelTask) { topLevelTaskCount++; } } /* * We get the list of worker nodes, and then create two hashes to manage our * connections to these nodes. The first hash manages connections used for * assigning and checking the status of tasks. The second (temporary) hash * helps us in fetching results data from worker nodes to the master node. */ workerNodeList = ActivePrimaryNodeList(); taskTrackerCount = (uint32) list_length(workerNodeList); /* connect as the current user for running queries */ taskTrackerHash = TrackerHash(taskTrackerHashName, workerNodeList, NULL); /* connect as the superuser for fetching result files */ transmitTrackerHash = TrackerHash(transmitTrackerHashName, workerNodeList, extensionOwner); TrackerHashConnect(taskTrackerHash); TrackerHashConnect(transmitTrackerHash); /* loop around until all tasks complete, one task fails, or user cancels */ while (!(allTasksCompleted || taskFailed || taskTransmitFailed || clusterFailed || QueryCancelPending)) { TaskTracker *taskTracker = NULL; TaskTracker *transmitTracker = NULL; HASH_SEQ_STATUS taskStatus; HASH_SEQ_STATUS transmitStatus; uint32 completedTransmitCount = 0; uint32 healthyTrackerCount = 0; double acceptableHealthyTrackerCount = 0.0; /* first, loop around all tasks and manage them */ ListCell *taskAndExecutionCell = NULL; foreach(taskAndExecutionCell, taskAndExecutionList) { Task *task = (Task *) lfirst(taskAndExecutionCell); TaskExecution *taskExecution = task->taskExecution; TaskExecStatus taskExecutionStatus = 0; TaskTracker *execTaskTracker = ResolveTaskTracker(taskTrackerHash, task, taskExecution); TaskTracker *mapTaskTracker = ResolveMapTaskTracker(taskTrackerHash, task, taskExecution); Assert(execTaskTracker != NULL); /* call the function that performs the core task execution logic */ taskExecutionStatus = ManageTaskExecution(execTaskTracker, mapTaskTracker, task, taskExecution); /* * If task cannot execute on this task/map tracker, we fail over all * tasks in the same constraint group to the next task/map tracker. */ if (taskExecutionStatus == EXEC_TASK_TRACKER_FAILED) { List *taskList = NIL; TaskTracker *transmitTracker = NULL; /* mark task tracker as failed, in case it isn't marked already */ execTaskTracker->trackerFailureCount = MAX_TRACKER_FAILURE_COUNT; /* * We may have already started to transmit task results to the * master. When we reassign the transmits, we could leave the * transmit tracker in an invalid state. So, we fail it too. */ transmitTracker = ResolveTaskTracker(transmitTrackerHash, task, taskExecution); transmitTracker->trackerFailureCount = MAX_TRACKER_FAILURE_COUNT; taskList = ConstrainedTaskList(taskAndExecutionList, task); ReassignTaskList(taskList); } else if (taskExecutionStatus == EXEC_SOURCE_TASK_TRACKER_FAILED) { List *mapFetchTaskList = NIL; List *mapTaskList = NIL; /* first resolve the map task this map fetch task depends on */ Task *mapTask = (Task *) linitial(task->dependedTaskList); Assert(task->taskType == MAP_OUTPUT_FETCH_TASK); mapFetchTaskList = UpstreamDependencyList(taskAndExecutionList, mapTask); ReassignMapFetchTaskList(mapFetchTaskList); mapTaskList = ConstrainedTaskList(taskAndExecutionList, mapTask); ReassignTaskList(mapTaskList); } /* * If this task permanently failed, we first need to manually clean * out client-side resources for all task executions. We therefore * record the failure here instead of immediately erroring out. */ taskFailed = TaskExecutionFailed(taskExecution); if (taskFailed) { failedTaskId = taskExecution->taskId; break; } } /* second, loop around "top level" tasks to fetch their results */ taskAndExecutionCell = NULL; foreach(taskAndExecutionCell, taskAndExecutionList) { Task *task = (Task *) lfirst(taskAndExecutionCell); TaskExecution *taskExecution = task->taskExecution; TransmitExecStatus transmitExecutionStatus = 0; TaskTracker *execTransmitTracker = NULL; bool transmitCompleted = false; /* * We find the tasks that appear in the top level of the query tree, * and start fetching their results to the master node. */ bool topLevelTask = TopLevelTask(task); if (!topLevelTask) { continue; } execTransmitTracker = ResolveTaskTracker(transmitTrackerHash, task, taskExecution); Assert(execTransmitTracker != NULL); /* call the function that fetches results for completed SQL tasks */ transmitExecutionStatus = ManageTransmitExecution(execTransmitTracker, task, taskExecution); /* * If we cannot transmit SQL task's results to the master, we first * force fail the corresponding task tracker. We then fail over all * tasks in the constraint group to the next task/transmit tracker. */ if (transmitExecutionStatus == EXEC_TRANSMIT_TRACKER_FAILED) { List *taskList = NIL; TaskTracker *taskTracker = NULL; taskTracker = ResolveTaskTracker(taskTrackerHash, task, taskExecution); taskTracker->trackerFailureCount = MAX_TRACKER_FAILURE_COUNT; taskList = ConstrainedTaskList(taskAndExecutionList, task); ReassignTaskList(taskList); } /* if task failed for good, record failure and break out of loop */ taskTransmitFailed = TaskExecutionFailed(taskExecution); if (taskTransmitFailed) { failedTaskId = taskExecution->taskId; break; } transmitCompleted = TransmitExecutionCompleted(taskExecution); if (transmitCompleted) { completedTransmitCount++; } } /* third, loop around task trackers and manage them */ hash_seq_init(&taskStatus, taskTrackerHash); hash_seq_init(&transmitStatus, transmitTrackerHash); taskTracker = (TaskTracker *) hash_seq_search(&taskStatus); while (taskTracker != NULL) { bool trackerHealthy = TrackerHealthy(taskTracker); if (trackerHealthy) { healthyTrackerCount++; } ManageTaskTracker(taskTracker); taskTracker = (TaskTracker *) hash_seq_search(&taskStatus); } transmitTracker = (TaskTracker *) hash_seq_search(&transmitStatus); while (transmitTracker != NULL) { ManageTransmitTracker(transmitTracker); transmitTracker = (TaskTracker *) hash_seq_search(&transmitStatus); } /* if more than half the trackers have failed, mark cluster as failed */ acceptableHealthyTrackerCount = (double) taskTrackerCount / 2.0; if (healthyTrackerCount < acceptableHealthyTrackerCount) { clusterFailed = true; } /* check if we completed execution; otherwise sleep to avoid tight loop */ if (completedTransmitCount == topLevelTaskCount) { allTasksCompleted = true; } else { long sleepIntervalPerCycle = RemoteTaskCheckInterval * 1000L; pg_usleep(sleepIntervalPerCycle); } } /* * We prevent cancel/die interrupts until we issue cleanup requests to task * trackers and close open connections. Note that for the above while loop, * if the user Ctrl+C's a query and we emit a warning before looping to the * beginning of the while loop, we will get canceled away before we can hold * any interrupts. */ HOLD_INTERRUPTS(); jobIdList = JobIdList(job); TrackerCleanupResources(taskTrackerHash, transmitTrackerHash, jobIdList, taskAndExecutionList); RESUME_INTERRUPTS(); /* * If we previously broke out of the execution loop due to a task failure or * user cancellation request, we can now safely emit an error message. */ if (taskFailed) { ereport(ERROR, (errmsg("failed to execute task %u", failedTaskId))); } else if (clusterFailed) { ereport(ERROR, (errmsg("failed to execute task %u", failedTaskId))); } else if (QueryCancelPending) { CHECK_FOR_INTERRUPTS(); } } /* * TaskAndExecutionList visits all tasks in the job tree, starting with the given * job's task list. For each visited task, the function creates a task execution * struct, associates the task execution with the task, and adds the task and its * execution to a list. The function then returns the list. */ static List * TaskAndExecutionList(List *jobTaskList) { List *taskAndExecutionList = NIL; List *taskQueue = NIL; const int topLevelTaskHashSize = 32; int taskHashSize = list_length(jobTaskList) * topLevelTaskHashSize; HTAB *taskHash = TaskHashCreate(taskHashSize); /* * We walk over the task tree using breadth-first search. For the search, we * first queue top level tasks in the task tree. */ taskQueue = list_copy(jobTaskList); while (taskQueue != NIL) { TaskExecution *taskExecution = NULL; List *dependendTaskList = NIL; ListCell *dependedTaskCell = NULL; /* pop first element from the task queue */ Task *task = (Task *) linitial(taskQueue); taskQueue = list_delete_first(taskQueue); /* create task execution and associate it with task */ taskExecution = InitTaskExecution(task, EXEC_TASK_UNASSIGNED); task->taskExecution = taskExecution; taskAndExecutionList = lappend(taskAndExecutionList, task); dependendTaskList = task->dependedTaskList; /* * Push task node's children into the task queue, if and only if * they're not already there. As task dependencies have to form a * directed-acyclic-graph and are processed in a breadth-first search * we can never re-encounter nodes we've already processed. * * While we're checking this, we can also fix the problem that * copyObject() might have duplicated nodes in the graph - if a node * isn't pushed to the graph because it is already planned to be * visited, we can simply replace it with the copy. Note that, here * we only consider dependend tasks. Since currently top level tasks * cannot be on any dependend task list, we do not check them for duplicates. * * taskHash is used to reduce the complexity of keeping track of * the tasks that are already encountered. */ foreach(dependedTaskCell, dependendTaskList) { Task *dependendTask = lfirst(dependedTaskCell); Task *dependendTaskInHash = TaskHashLookup(taskHash, dependendTask->taskType, dependendTask->jobId, dependendTask->taskId); /* * If the dependend task encountered for the first time, add it to the hash. * Also, add this task to the task queue. Note that, we do not need to * add the tasks to the queue which are already encountered, because * they are already added to the queue. */ if (!dependendTaskInHash) { dependendTaskInHash = TaskHashEnter(taskHash, dependendTask); taskQueue = lappend(taskQueue, dependendTaskInHash); } /* update dependedTaskList element to the one which is in the hash */ lfirst(dependedTaskCell) = dependendTaskInHash; } } return taskAndExecutionList; } /* * TaskHashCreate allocates memory for a task hash, initializes an * empty hash, and returns this hash. */ static HTAB * TaskHashCreate(uint32 taskHashSize) { HASHCTL info; const char *taskHashName = "Task Hash"; int hashFlags = 0; HTAB *taskHash = NULL; /* * Can't create a hashtable of size 0. Normally that shouldn't happen, but * shard pruning currently can lead to this (Job with 0 Tasks). See #833. */ if (taskHashSize == 0) { taskHashSize = 2; } memset(&info, 0, sizeof(info)); info.keysize = sizeof(TaskMapKey); info.entrysize = sizeof(TaskMapEntry); info.hash = tag_hash; info.hcxt = CurrentMemoryContext; hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); taskHash = hash_create(taskHashName, taskHashSize, &info, hashFlags); return taskHash; } /* * TaskHashEnter creates a reference to the task entry in the given task * hash. The function errors-out if the same key exists multiple times. */ static Task * TaskHashEnter(HTAB *taskHash, Task *task) { void *hashKey = NULL; TaskMapEntry *taskInTheHash = NULL; bool handleFound = false; TaskMapKey taskKey; memset(&taskKey, 0, sizeof(TaskMapKey)); taskKey.taskType = task->taskType; taskKey.jobId = task->jobId; taskKey.taskId = task->taskId; hashKey = (void *) &taskKey; taskInTheHash = (TaskMapEntry *) hash_search(taskHash, hashKey, HASH_ENTER, &handleFound); /* if same node appears twice, we error-out */ if (handleFound) { ereport(ERROR, (errmsg("multiple entries for task: \"%d:%ld:%d\"", task->taskType, task->jobId, task->taskId))); } /* save the pointer to the original task in the hash */ taskInTheHash->task = task; return task; } /* * TaskHashLookup looks for the tasks that corresponds to the given * taskType, jobId and taskId, and returns the found task, NULL otherwise. */ static Task * TaskHashLookup(HTAB *taskHash, TaskType taskType, uint64 jobId, uint32 taskId) { TaskMapEntry *taskEntry = NULL; Task *task = NULL; void *hashKey = NULL; bool handleFound = false; TaskMapKey taskKey; memset(&taskKey, 0, sizeof(TaskMapKey)); taskKey.taskType = taskType; taskKey.jobId = jobId; taskKey.taskId = taskId; hashKey = (void *) &taskKey; taskEntry = (TaskMapEntry *) hash_search(taskHash, hashKey, HASH_FIND, &handleFound); if (taskEntry != NULL) { task = taskEntry->task; } return task; } /* * TopLevelTask checks if the given task appears at the top level of the task * tree. In doing this, the function assumes the physical planner creates SQL * tasks only for the top level job. */ static bool TopLevelTask(Task *task) { bool topLevelTask = false; /* * SQL tasks can only appear at the top level in our query tree. Further, no * other task type can appear at the top level in our tree. */ if (task->taskType == SQL_TASK) { topLevelTask = true; } return topLevelTask; } /* Determines if the given transmit task successfully completed executing. */ static bool TransmitExecutionCompleted(TaskExecution *taskExecution) { bool completed = false; uint32 nodeIndex = 0; for (nodeIndex = 0; nodeIndex < taskExecution->nodeCount; nodeIndex++) { TransmitExecStatus *transmitStatusArray = taskExecution->transmitStatusArray; TransmitExecStatus transmitStatus = transmitStatusArray[nodeIndex]; if (transmitStatus == EXEC_TRANSMIT_DONE) { completed = true; break; } } return completed; } /* * TrackerHash creates a task tracker hash with the given name. The function * then inserts one task tracker entry for each node in the given worker node * list, and initializes state for each task tracker. The userName argument * indicates which user to connect as. */ static HTAB * TrackerHash(const char *taskTrackerHashName, List *workerNodeList, char *userName) { /* create task tracker hash */ uint32 taskTrackerHashSize = list_length(workerNodeList); HTAB *taskTrackerHash = TrackerHashCreate(taskTrackerHashName, taskTrackerHashSize); ListCell *workerNodeCell = NULL; foreach(workerNodeCell, workerNodeList) { WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); char *nodeName = workerNode->workerName; uint32 nodePort = workerNode->workerPort; TaskTracker *taskTracker = NULL; char taskStateHashName[MAXPGPATH]; HTAB *taskStateHash = NULL; uint32 taskStateCount = 32; int hashFlags = 0; HASHCTL info; /* insert task tracker into the tracker hash */ taskTracker = TrackerHashEnter(taskTrackerHash, nodeName, nodePort); /* for each task tracker, create hash to track its assigned tasks */ snprintf(taskStateHashName, MAXPGPATH, "Task Tracker \"%s:%u\" Task State Hash", nodeName, nodePort); memset(&info, 0, sizeof(info)); info.keysize = sizeof(uint64) + sizeof(uint32); info.entrysize = sizeof(TrackerTaskState); info.hash = tag_hash; info.hcxt = CurrentMemoryContext; hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); taskStateHash = hash_create(taskStateHashName, taskStateCount, &info, hashFlags); if (taskStateHash == NULL) { ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("could not initialize %s", taskStateHashName))); } taskTracker->taskStateHash = taskStateHash; taskTracker->userName = userName; } return taskTrackerHash; } /* * TrackerHashCreate allocates memory for a task tracker hash, initializes an * empty hash, and returns this hash. */ static HTAB * TrackerHashCreate(const char *taskTrackerHashName, uint32 taskTrackerHashSize) { HASHCTL info; int hashFlags = 0; HTAB *taskTrackerHash = NULL; memset(&info, 0, sizeof(info)); info.keysize = WORKER_LENGTH + sizeof(uint32); info.entrysize = sizeof(TaskTracker); info.hash = tag_hash; info.hcxt = CurrentMemoryContext; hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); taskTrackerHash = hash_create(taskTrackerHashName, taskTrackerHashSize, &info, hashFlags); if (taskTrackerHash == NULL) { ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("could not initialize task tracker hash"))); } return taskTrackerHash; } /* * TrackerHashEnter creates a new task tracker entry in the given task tracker * hash, and checks that the task tracker entry has been properly created. Note * that the caller still needs to set the tracker's task state hash field. */ static TaskTracker * TrackerHashEnter(HTAB *taskTrackerHash, char *nodeName, uint32 nodePort) { TaskTracker *taskTracker = NULL; void *hashKey = NULL; bool handleFound = false; TaskTracker taskTrackerKey; memset(&taskTrackerKey, 0, sizeof(TaskTracker)); strlcpy(taskTrackerKey.workerName, nodeName, WORKER_LENGTH); taskTrackerKey.workerPort = nodePort; hashKey = (void *) &taskTrackerKey; taskTracker = (TaskTracker *) hash_search(taskTrackerHash, hashKey, HASH_ENTER, &handleFound); /* if same node appears twice, we overwrite previous entry */ if (handleFound) { ereport(WARNING, (errmsg("multiple entries for task tracker: \"%s:%u\"", nodeName, nodePort))); } /* init task tracker object with zeroed out task tracker key */ memcpy(taskTracker, &taskTrackerKey, sizeof(TaskTracker)); taskTracker->trackerStatus = TRACKER_CONNECT_START; taskTracker->connectionId = INVALID_CONNECTION_ID; taskTracker->currentTaskIndex = -1; return taskTracker; } /* * TrackerHashConnect walks over each task tracker in the given hash and tries * to open an asynchronous connection to it. The function then returns when we * tried connecting to all task trackers and have either succeeded or failed for * each one of them. */ static void TrackerHashConnect(HTAB *taskTrackerHash) { uint32 taskTrackerCount = (uint32) hash_get_num_entries(taskTrackerHash); uint32 triedTrackerCount = 0; /* loop until we tried to connect to all task trackers */ while (triedTrackerCount < taskTrackerCount) { TaskTracker *taskTracker = NULL; HASH_SEQ_STATUS status; long sleepIntervalPerCycle = 0; /* loop over the task tracker hash, and poll all trackers again */ triedTrackerCount = 0; hash_seq_init(&status, taskTrackerHash); taskTracker = (TaskTracker *) hash_seq_search(&status); while (taskTracker != NULL) { TrackerStatus trackerStatus = TrackerConnectPoll(taskTracker); if (trackerStatus == TRACKER_CONNECTED || trackerStatus == TRACKER_CONNECTION_FAILED) { triedTrackerCount++; } taskTracker = (TaskTracker *) hash_seq_search(&status); } /* sleep to avoid tight loop */ sleepIntervalPerCycle = RemoteTaskCheckInterval * 1000L; pg_usleep(sleepIntervalPerCycle); } } /* * TrackerConnectPoll opens an asynchronous connection to the given task tracker * and polls this connection's status on every call. The function also sets task * tracker's internal state on success, and returns the most recent status for * the connection. */ static TrackerStatus TrackerConnectPoll(TaskTracker *taskTracker) { switch (taskTracker->trackerStatus) { case TRACKER_CONNECT_START: { char *nodeName = taskTracker->workerName; uint32 nodePort = taskTracker->workerPort; char *nodeDatabase = get_database_name(MyDatabaseId); char *nodeUser = taskTracker->userName; int32 connectionId = MultiClientConnectStart(nodeName, nodePort, nodeDatabase, nodeUser); if (connectionId != INVALID_CONNECTION_ID) { taskTracker->connectionId = connectionId; taskTracker->trackerStatus = TRACKER_CONNECT_POLL; } else { taskTracker->trackerStatus = TRACKER_CONNECTION_FAILED; } break; } case TRACKER_CONNECT_POLL: { int32 connectionId = taskTracker->connectionId; ConnectStatus pollStatus = MultiClientConnectPoll(connectionId); if (pollStatus == CLIENT_CONNECTION_READY) { taskTracker->trackerStatus = TRACKER_CONNECTED; } else if (pollStatus == CLIENT_CONNECTION_BUSY || pollStatus == CLIENT_CONNECTION_BUSY_READ || pollStatus == CLIENT_CONNECTION_BUSY_WRITE) { taskTracker->trackerStatus = TRACKER_CONNECT_POLL; } else if (pollStatus == CLIENT_CONNECTION_BAD) { taskTracker->trackerStatus = TRACKER_CONNECTION_FAILED; MultiClientDisconnect(connectionId); taskTracker->connectionId = INVALID_CONNECTION_ID; } /* now check if we have been trying to connect for too long */ taskTracker->connectPollCount++; if (pollStatus == CLIENT_CONNECTION_BUSY_READ || pollStatus == CLIENT_CONNECTION_BUSY_WRITE) { uint32 maxCount = ceil(NodeConnectionTimeout * 1.0f / RemoteTaskCheckInterval); uint32 currentCount = taskTracker->connectPollCount; if (currentCount >= maxCount) { ereport(WARNING, (errmsg("could not establish asynchronous " "connection after %u ms", NodeConnectionTimeout))); taskTracker->trackerStatus = TRACKER_CONNECTION_FAILED; MultiClientDisconnect(connectionId); taskTracker->connectionId = INVALID_CONNECTION_ID; } } break; } case TRACKER_CONNECTED: case TRACKER_CONNECTION_FAILED: { /* if connected or failed to connect in previous pass, reset poll count */ taskTracker->connectPollCount = 0; break; } default: { int trackerStatus = (int) taskTracker->trackerStatus; ereport(FATAL, (errmsg("invalid task tracker status: %d", trackerStatus))); break; } } return taskTracker->trackerStatus; } /* * ResolveTaskTracker is a helper function that resolves the task tracker from * the given task and task execution. The function first finds the worker node * the given task is scheduled to, and resolves the corresponding task tracker. */ static TaskTracker * ResolveTaskTracker(HTAB *trackerHash, Task *task, TaskExecution *taskExecution) { List *taskPlacementList = task->taskPlacementList; uint32 currentIndex = taskExecution->currentNodeIndex; ShardPlacement *taskPlacement = list_nth(taskPlacementList, currentIndex); char *nodeName = taskPlacement->nodeName; uint32 nodePort = taskPlacement->nodePort; /* look up in the tracker hash for the found node name/port */ TaskTracker *taskTracker = TrackerHashLookup(trackerHash, nodeName, nodePort); Assert(taskTracker != NULL); return taskTracker; } /* * ResolveMapTaskTracker is a helper function that finds the downstream map task * dependency from the given task, and then resolves the task tracker for this * map task. */ static TaskTracker * ResolveMapTaskTracker(HTAB *trackerHash, Task *task, TaskExecution *taskExecution) { TaskTracker *mapTaskTracker = NULL; Task *mapTask = NULL; TaskExecution *mapTaskExecution = NULL; /* we only resolve source (map) task tracker for map output fetch tasks */ if (task->taskType != MAP_OUTPUT_FETCH_TASK) { return NULL; } Assert(task->dependedTaskList != NIL); mapTask = (Task *) linitial(task->dependedTaskList); mapTaskExecution = mapTask->taskExecution; mapTaskTracker = ResolveTaskTracker(trackerHash, mapTask, mapTaskExecution); Assert(mapTaskTracker != NULL); return mapTaskTracker; } /* * TrackerHashLookup looks for the task tracker that corresponds to the given * node name and port number, and returns the found task tracker if any. */ static TaskTracker * TrackerHashLookup(HTAB *trackerHash, const char *nodeName, uint32 nodePort) { TaskTracker *taskTracker = NULL; void *hashKey = NULL; bool handleFound = false; TaskTracker taskTrackerKey; memset(taskTrackerKey.workerName, 0, WORKER_LENGTH); strlcpy(taskTrackerKey.workerName, nodeName, WORKER_LENGTH); taskTrackerKey.workerPort = nodePort; hashKey = (void *) &taskTrackerKey; taskTracker = (TaskTracker *) hash_search(trackerHash, hashKey, HASH_FIND, &handleFound); if (taskTracker == NULL || !handleFound) { ereport(ERROR, (errmsg("could not find task tracker for node \"%s:%u\"", nodeName, nodePort))); } return taskTracker; } /* * ManageTaskExecution manages all execution logic for the given task. For this, * the function checks if the task's downstream dependencies have completed. If * they have, the function assigns the task to the task tracker proxy object, * and regularly checks the task's execution status. * * If the task completes, the function changes task's status. Else if the task * observes a connection related failure, the function retries the task on the * same task tracker. Else if the task tracker isn't considered as healthy, the * function signals to the caller that the task needs to be assigned to another * task tracker. */ static TaskExecStatus ManageTaskExecution(TaskTracker *taskTracker, TaskTracker *sourceTaskTracker, Task *task, TaskExecution *taskExecution) { TaskExecStatus *taskStatusArray = taskExecution->taskStatusArray; uint32 currentNodeIndex = taskExecution->currentNodeIndex; uint32 nextNodeIndex = 0; TaskExecStatus currentExecutionStatus = taskStatusArray[currentNodeIndex]; TaskExecStatus nextExecutionStatus = EXEC_TASK_INVALID_FIRST; switch (currentExecutionStatus) { case EXEC_TASK_UNASSIGNED: { bool taskExecutionsCompleted = true; TaskType taskType = TASK_TYPE_INVALID_FIRST; bool trackerHealthy = TrackerHealthy(taskTracker); if (!trackerHealthy) { nextExecutionStatus = EXEC_TASK_TRACKER_FAILED; break; } /* * We first retrieve this task's downstream dependencies, and then check * if these dependencies' executions have completed. */ taskExecutionsCompleted = TaskExecutionsCompleted(task->dependedTaskList); if (!taskExecutionsCompleted) { nextExecutionStatus = EXEC_TASK_UNASSIGNED; break; } /* if map fetch task, create query string from completed map task */ taskType = task->taskType; if (taskType == MAP_OUTPUT_FETCH_TASK) { StringInfo mapFetchTaskQueryString = NULL; Task *mapTask = (Task *) linitial(task->dependedTaskList); TaskExecution *mapTaskExecution = mapTask->taskExecution; mapFetchTaskQueryString = MapFetchTaskQueryString(task, mapTask); task->queryString = mapFetchTaskQueryString->data; taskExecution->querySourceNodeIndex = mapTaskExecution->currentNodeIndex; } /* * We finally queue this task for execution. Note that we queue sql and * other tasks slightly differently. */ if (taskType == SQL_TASK) { TrackerQueueSqlTask(taskTracker, task); } else { TrackerQueueTask(taskTracker, task); } nextExecutionStatus = EXEC_TASK_QUEUED; break; } case EXEC_TASK_QUEUED: { TaskStatus remoteTaskStatus = TASK_STATUS_INVALID_FIRST; bool trackerHealthy = TrackerHealthy(taskTracker); if (!trackerHealthy) { nextExecutionStatus = EXEC_TASK_TRACKER_FAILED; break; } remoteTaskStatus = TrackerTaskStatus(taskTracker, task); if (remoteTaskStatus == TASK_SUCCEEDED) { nextExecutionStatus = EXEC_TASK_DONE; } else if (remoteTaskStatus == TASK_CLIENT_SIDE_ASSIGN_FAILED || remoteTaskStatus == TASK_CLIENT_SIDE_STATUS_FAILED) { nextExecutionStatus = EXEC_TASK_TRACKER_RETRY; } else if (remoteTaskStatus == TASK_PERMANENTLY_FAILED) { /* * If a map output fetch task failed, we assume the problem lies with * the map task (and the source task tracker it runs on). Otherwise, * we assume the task tracker crashed, and fail over to the next task * tracker. */ if (task->taskType == MAP_OUTPUT_FETCH_TASK) { nextExecutionStatus = EXEC_SOURCE_TASK_TRACKER_RETRY; } else { nextExecutionStatus = EXEC_TASK_TRACKER_FAILED; } } else { /* assume task is still in progress */ nextExecutionStatus = EXEC_TASK_QUEUED; } break; } case EXEC_TASK_TRACKER_RETRY: { bool trackerHealthy = false; bool trackerConnectionUp = false; /* * This case statement usually handles connection related issues. Some * edge cases however, like a user sending a SIGTERM to the worker node, * keep the connection open but disallow task assignments. We therefore * need to track those as intermittent tracker failures here. */ trackerConnectionUp = TrackerConnectionUp(taskTracker); if (trackerConnectionUp) { taskTracker->trackerFailureCount++; } trackerHealthy = TrackerHealthy(taskTracker); if (trackerHealthy) { TaskStatus remoteTaskStatus = TrackerTaskStatus(taskTracker, task); if (remoteTaskStatus == TASK_CLIENT_SIDE_ASSIGN_FAILED) { nextExecutionStatus = EXEC_TASK_UNASSIGNED; } else if (remoteTaskStatus == TASK_CLIENT_SIDE_STATUS_FAILED) { nextExecutionStatus = EXEC_TASK_QUEUED; } } else { nextExecutionStatus = EXEC_TASK_TRACKER_FAILED; } break; } case EXEC_SOURCE_TASK_TRACKER_RETRY: { Task *mapTask = (Task *) linitial(task->dependedTaskList); TaskExecution *mapTaskExecution = mapTask->taskExecution; uint32 sourceNodeIndex = mapTaskExecution->currentNodeIndex; bool sourceTrackerHealthy = false; Assert(sourceTaskTracker != NULL); Assert(task->taskType == MAP_OUTPUT_FETCH_TASK); /* * As this map fetch task was running, another map fetch that depends on * another map task might have failed. We would have then reassigned the * map task and potentially other map tasks in its constraint group. So * this map fetch's source node might have changed underneath us. If it * did, we don't want to record a failure for the new source tracker. */ if (taskExecution->querySourceNodeIndex == sourceNodeIndex) { bool sourceTrackerConnectionUp = TrackerConnectionUp(sourceTaskTracker); if (sourceTrackerConnectionUp) { sourceTaskTracker->trackerFailureCount++; } } sourceTrackerHealthy = TrackerHealthy(sourceTaskTracker); if (sourceTrackerHealthy) { /* * We change our status to unassigned. In that status, we queue an * "update map fetch task" on the task tracker, and retry fetching * the map task's output from the same source node. */ nextExecutionStatus = EXEC_TASK_UNASSIGNED; } else { nextExecutionStatus = EXEC_SOURCE_TASK_TRACKER_FAILED; } break; } case EXEC_TASK_TRACKER_FAILED: case EXEC_SOURCE_TASK_TRACKER_FAILED: { /* * These two cases exist to signal to the caller that we failed. In both * cases, the caller is responsible for reassigning task(s) and running * the appropriate recovery logic. */ nextExecutionStatus = EXEC_TASK_UNASSIGNED; break; } case EXEC_TASK_DONE: { /* we are done with this task's execution */ nextExecutionStatus = EXEC_TASK_DONE; break; } default: { /* we fatal here to avoid leaking client-side resources */ ereport(FATAL, (errmsg("invalid execution status: %d", currentExecutionStatus))); break; } } /* update task execution's status for most recent task tracker */ nextNodeIndex = taskExecution->currentNodeIndex; taskStatusArray[nextNodeIndex] = nextExecutionStatus; return nextExecutionStatus; } /* * ManageTransmitExecution manages logic to fetch the results of the given SQL * task to the master node. For this, the function checks if the given SQL task * has completed. If it has, the function starts the copy out protocol to fetch * the task's results and write them to the local filesystem. When the transmit * completes or fails, the function notes that by changing the transmit status. */ static TransmitExecStatus ManageTransmitExecution(TaskTracker *transmitTracker, Task *task, TaskExecution *taskExecution) { int32 *fileDescriptorArray = taskExecution->fileDescriptorArray; uint32 currentNodeIndex = taskExecution->currentNodeIndex; uint32 nextNodeIndex = 0; TransmitExecStatus *transmitStatusArray = taskExecution->transmitStatusArray; TransmitExecStatus currentTransmitStatus = transmitStatusArray[currentNodeIndex]; TransmitExecStatus nextTransmitStatus = EXEC_TRANSMIT_INVALID_FIRST; Assert(task->taskType == SQL_TASK); switch (currentTransmitStatus) { case EXEC_TRANSMIT_UNASSIGNED: { TaskExecStatus *taskStatusArray = taskExecution->taskStatusArray; TaskExecStatus currentExecutionStatus = taskStatusArray[currentNodeIndex]; bool trackerHealthy = false; /* if top level task's in progress, nothing to do */ if (currentExecutionStatus != EXEC_TASK_DONE) { nextTransmitStatus = EXEC_TRANSMIT_UNASSIGNED; break; } trackerHealthy = TrackerHealthy(transmitTracker); if (!trackerHealthy) { nextTransmitStatus = EXEC_TRANSMIT_TRACKER_FAILED; break; } TrackerQueueFileTransmit(transmitTracker, task); nextTransmitStatus = EXEC_TRANSMIT_QUEUED; break; } case EXEC_TRANSMIT_QUEUED: { QueryStatus queryStatus = CLIENT_INVALID_QUERY; int32 connectionId = INVALID_CONNECTION_ID; TaskStatus taskStatus = TASK_STATUS_INVALID_FIRST; bool trackerHealthy = TrackerHealthy(transmitTracker); if (!trackerHealthy) { nextTransmitStatus = EXEC_TRANSMIT_TRACKER_FAILED; break; } taskStatus = TrackerTaskStatus(transmitTracker, task); if (taskStatus == TASK_FILE_TRANSMIT_QUEUED) { /* remain in queued status until tracker assigns this task */ nextTransmitStatus = EXEC_TRANSMIT_QUEUED; break; } else if (taskStatus == TASK_CLIENT_SIDE_TRANSMIT_FAILED) { nextTransmitStatus = EXEC_TRANSMIT_TRACKER_RETRY; break; } /* the open connection belongs to this task */ connectionId = TransmitTrackerConnectionId(transmitTracker, task); Assert(connectionId != INVALID_CONNECTION_ID); Assert(taskStatus == TASK_ASSIGNED); /* start copy protocol */ queryStatus = MultiClientQueryStatus(connectionId); if (queryStatus == CLIENT_QUERY_COPY) { StringInfo jobDirectoryName = MasterJobDirectoryName(task->jobId); StringInfo taskFilename = TaskFilename(jobDirectoryName, task->taskId); char *filename = taskFilename->data; int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); int fileMode = (S_IRUSR | S_IWUSR); int32 fileDescriptor = BasicOpenFile(filename, fileFlags, fileMode); if (fileDescriptor >= 0) { /* * All files inside the job directory get automatically cleaned * up on transaction commit or abort. */ fileDescriptorArray[currentNodeIndex] = fileDescriptor; nextTransmitStatus = EXEC_TRANSMIT_COPYING; } else { ereport(WARNING, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", filename))); nextTransmitStatus = EXEC_TRANSMIT_TRACKER_RETRY; } } else { nextTransmitStatus = EXEC_TRANSMIT_TRACKER_RETRY; } /* * We use task tracker logic to manage file transmits as well, but that * abstraction starts to leak after we drop into the copy protocol. To * make our task tracker logic work, we need to "void" the tracker's * connection if the transmit task failed in here. */ if (nextTransmitStatus == EXEC_TRANSMIT_TRACKER_RETRY) { transmitTracker->connectionBusy = false; transmitTracker->connectionBusyOnTask = NULL; } break; } case EXEC_TRANSMIT_COPYING: { int32 fileDescriptor = fileDescriptorArray[currentNodeIndex]; CopyStatus copyStatus = CLIENT_INVALID_COPY; int closed = -1; /* the open connection belongs to this task */ int32 connectionId = TransmitTrackerConnectionId(transmitTracker, task); Assert(connectionId != INVALID_CONNECTION_ID); copyStatus = MultiClientCopyData(connectionId, fileDescriptor); if (copyStatus == CLIENT_COPY_MORE) { /* worker node continues to send more data, keep reading */ nextTransmitStatus = EXEC_TRANSMIT_COPYING; break; } /* we are done copying data */ if (copyStatus == CLIENT_COPY_DONE) { closed = close(fileDescriptor); fileDescriptorArray[currentNodeIndex] = -1; if (closed >= 0) { nextTransmitStatus = EXEC_TRANSMIT_DONE; } else { ereport(WARNING, (errcode_for_file_access(), errmsg("could not close copied file: %m"))); nextTransmitStatus = EXEC_TRANSMIT_TRACKER_RETRY; } } else if (copyStatus == CLIENT_COPY_FAILED) { nextTransmitStatus = EXEC_TRANSMIT_TRACKER_RETRY; closed = close(fileDescriptor); fileDescriptorArray[currentNodeIndex] = -1; if (closed < 0) { ereport(WARNING, (errcode_for_file_access(), errmsg("could not close copy file: %m"))); } } /* * We use task tracker logic to manage file transmits as well, but that * abstraction leaks after we drop into the copy protocol. To make it * work, we reset transmit tracker's connection for next file transmit. */ transmitTracker->connectionBusy = false; transmitTracker->connectionBusyOnTask = NULL; break; } case EXEC_TRANSMIT_TRACKER_RETRY: { bool trackerHealthy = false; bool trackerConnectionUp = false; /* * The task tracker proxy handles connection errors. On the off chance * that our connection is still up and the transmit tracker misbehaved, * we capture this as an intermittent tracker failure. */ trackerConnectionUp = TrackerConnectionUp(transmitTracker); if (trackerConnectionUp) { transmitTracker->trackerFailureCount++; } trackerHealthy = TrackerHealthy(transmitTracker); if (trackerHealthy) { nextTransmitStatus = EXEC_TRANSMIT_UNASSIGNED; } else { nextTransmitStatus = EXEC_TRANSMIT_TRACKER_FAILED; } break; } case EXEC_TRANSMIT_TRACKER_FAILED: { /* * This case exists to signal to the caller that we failed. The caller * is now responsible for reassigning the transmit task (and downstream * SQL task dependencies) and running the appropriate recovery logic. */ nextTransmitStatus = EXEC_TRANSMIT_UNASSIGNED; break; } case EXEC_TRANSMIT_DONE: { /* we are done with fetching task results to the master node */ nextTransmitStatus = EXEC_TRANSMIT_DONE; break; } default: { /* we fatal here to avoid leaking client-side resources */ ereport(FATAL, (errmsg("invalid transmit status: %d", currentTransmitStatus))); break; } } /* update file transmit status for most recent transmit tracker */ nextNodeIndex = taskExecution->currentNodeIndex; transmitStatusArray[nextNodeIndex] = nextTransmitStatus; return nextTransmitStatus; } /* * TaskExecutionsCompleted checks if all task executions in the given task list * have completed. If they have, the function returns true. Note that this * function takes the list of tasks as an optimization over separately * extracting a list of task executions, but it should only operate on task * executions to preserve the abstraction. */ static bool TaskExecutionsCompleted(List *taskList) { bool taskExecutionsComplete = true; ListCell *taskCell = NULL; foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); TaskExecution *taskExecution = task->taskExecution; uint32 nodeIndex = taskExecution->currentNodeIndex; TaskExecStatus taskStatus = taskExecution->taskStatusArray[nodeIndex]; if (taskStatus != EXEC_TASK_DONE) { taskExecutionsComplete = false; break; } } return taskExecutionsComplete; } /* * MapFetchTaskQueryString constructs the map fetch query string from the given * map output fetch task and its downstream map task dependency. The constructed * query string allows fetching the map task's partitioned output file from the * worker node it's created to the worker node that will execute the merge task. */ static StringInfo MapFetchTaskQueryString(Task *mapFetchTask, Task *mapTask) { StringInfo mapFetchQueryString = NULL; uint32 partitionFileId = mapFetchTask->partitionId; uint32 mergeTaskId = mapFetchTask->upstreamTaskId; /* find the node name/port for map task's execution */ List *mapTaskPlacementList = mapTask->taskPlacementList; TaskExecution *mapTaskExecution = mapTask->taskExecution; uint32 currentIndex = mapTaskExecution->currentNodeIndex; ShardPlacement *mapTaskPlacement = list_nth(mapTaskPlacementList, currentIndex); char *mapTaskNodeName = mapTaskPlacement->nodeName; uint32 mapTaskNodePort = mapTaskPlacement->nodePort; Assert(mapFetchTask->taskType == MAP_OUTPUT_FETCH_TASK); Assert(mapTask->taskType == MAP_TASK); mapFetchQueryString = makeStringInfo(); appendStringInfo(mapFetchQueryString, MAP_OUTPUT_FETCH_COMMAND, mapTask->jobId, mapTask->taskId, partitionFileId, mergeTaskId, /* fetch results to merge task */ mapTaskNodeName, mapTaskNodePort); return mapFetchQueryString; } /* * TrackerQueueSqlTask wraps a copy out command around the given task's query, * creates a task assignment query from this copy out command, and then queues * this assignment query in the given tracker's internal hash. The queued query * will be assigned to the remote task tracker at a later time. */ static void TrackerQueueSqlTask(TaskTracker *taskTracker, Task *task) { HTAB *taskStateHash = taskTracker->taskStateHash; TrackerTaskState *taskState = NULL; StringInfo taskAssignmentQuery = NULL; /* * We first wrap a copy out command around the original query string. This * allows for the query's results to persist on the worker node after the * query completes and for the executor to later use this persisted data. */ StringInfo jobDirectoryName = JobDirectoryName(task->jobId); StringInfo taskFilename = TaskFilename(jobDirectoryName, task->taskId); StringInfo copyQueryString = makeStringInfo(); if (BinaryMasterCopyFormat) { appendStringInfo(copyQueryString, COPY_QUERY_TO_FILE_BINARY, task->queryString, taskFilename->data); } else { appendStringInfo(copyQueryString, COPY_QUERY_TO_FILE_TEXT, task->queryString, taskFilename->data); } /* wrap a task assignment query outside the copy out query */ taskAssignmentQuery = TaskAssignmentQuery(task, copyQueryString->data); taskState = TaskStateHashEnter(taskStateHash, task->jobId, task->taskId); taskState->status = TASK_CLIENT_SIDE_QUEUED; taskState->taskAssignmentQuery = taskAssignmentQuery; } /* * TrackerQueueTask creates a task assignment query from the given task's query * string, and then queues this assignment query in the given tracker's internal * hash. The queued query will be assigned to the remote task tracker at a later * time. */ static void TrackerQueueTask(TaskTracker *taskTracker, Task *task) { HTAB *taskStateHash = taskTracker->taskStateHash; TrackerTaskState *taskState = NULL; StringInfo taskAssignmentQuery = NULL; /* wrap a task assignment query outside the original query */ taskAssignmentQuery = TaskAssignmentQuery(task, task->queryString); taskState = TaskStateHashEnter(taskStateHash, task->jobId, task->taskId); taskState->status = TASK_CLIENT_SIDE_QUEUED; taskState->taskAssignmentQuery = taskAssignmentQuery; } /* * TaskAssignmentQuery escapes the given query string with quotes, and wraps * this escaped query string inside a task assignment command. This way, the * query can be assigned to the remote task tracker. */ static StringInfo TaskAssignmentQuery(Task *task, char *queryString) { StringInfo taskAssignmentQuery = NULL; /* quote the original query as a string literal */ char *escapedQueryString = quote_literal_cstr(queryString); taskAssignmentQuery = makeStringInfo(); appendStringInfo(taskAssignmentQuery, TASK_ASSIGNMENT_QUERY, task->jobId, task->taskId, escapedQueryString); return taskAssignmentQuery; } /* * TrackerTaskStatus returns the remote execution status of the given task. Note * that the task must have already been queued with the task tracker for status * checking to happen. */ static TaskStatus TrackerTaskStatus(TaskTracker *taskTracker, Task *task) { HTAB *taskStateHash = taskTracker->taskStateHash; TrackerTaskState *taskState = TrackerTaskStateHashLookup(taskStateHash, task); if (taskState == NULL) { const char *nodeName = taskTracker->workerName; uint32 nodePort = taskTracker->workerPort; ereport(ERROR, (errmsg("could not find task state for job " UINT64_FORMAT " and task %u", task->jobId, task->taskId), errdetail("Task tracker: \"%s:%u\"", nodeName, nodePort))); } return taskState->status; } /* * TrackerTaskStateHashLookup looks for the task state entry for the given task * in the task tracker's state hash. The function then returns the found task * state entry, if any. */ static TrackerTaskState * TrackerTaskStateHashLookup(HTAB *taskStateHash, Task *task) { TrackerTaskState *taskState = NULL; void *hashKey = NULL; bool handleFound = false; TrackerTaskState taskStateKey; taskStateKey.jobId = task->jobId; taskStateKey.taskId = task->taskId; hashKey = (void *) &taskStateKey; taskState = (TrackerTaskState *) hash_search(taskStateHash, hashKey, HASH_FIND, &handleFound); return taskState; } /* Checks if the given task tracker is considered as healthy. */ static bool TrackerHealthy(TaskTracker *taskTracker) { bool trackerHealthy = false; if (taskTracker->trackerFailureCount < MAX_TRACKER_FAILURE_COUNT && taskTracker->connectionFailureCount < MAX_TRACKER_FAILURE_COUNT) { trackerHealthy = true; } return trackerHealthy; } /* * TrackerQueueFileTransmit queues a file transmit request in the given task * tracker's internal hash. The queued request will be served at a later time. */ static void TrackerQueueFileTransmit(TaskTracker *transmitTracker, Task *task) { HTAB *transmitStateHash = transmitTracker->taskStateHash; TrackerTaskState *transmitState = NULL; transmitState = TaskStateHashEnter(transmitStateHash, task->jobId, task->taskId); transmitState->status = TASK_FILE_TRANSMIT_QUEUED; } /* * TaskStateHashEnter creates a new task state entry in the given task state * hash, and checks that the task entry has been properly created. */ static TrackerTaskState * TaskStateHashEnter(HTAB *taskStateHash, uint64 jobId, uint32 taskId) { TrackerTaskState *taskState = NULL; void *hashKey = NULL; bool handleFound = false; TrackerTaskState taskStateKey; taskStateKey.jobId = jobId; taskStateKey.taskId = taskId; hashKey = (void *) &taskStateKey; taskState = (TrackerTaskState *) hash_search(taskStateHash, hashKey, HASH_ENTER, &handleFound); /* if same task queued twice, we overwrite previous entry */ if (handleFound) { ereport(DEBUG1, (errmsg("multiple task state entries for job " UINT64_FORMAT " and task %u", jobId, taskId))); } /* init task state object */ taskState->status = TASK_STATUS_INVALID_FIRST; taskState->taskAssignmentQuery = NULL; return taskState; } /* * TransmitTrackerConnectionId checks if the given tracker is transmitting the * given task's results to the master node. If it is, the function returns the * connectionId used in transmitting task results. If not, the function returns * an invalid connectionId. */ static int32 TransmitTrackerConnectionId(TaskTracker *transmitTracker, Task *task) { int32 connectionId = INVALID_CONNECTION_ID; TrackerTaskState *transmitState = transmitTracker->connectionBusyOnTask; if (transmitState != NULL) { /* we are transmitting results for this particular task */ if (transmitState->jobId == task->jobId && transmitState->taskId == task->taskId) { connectionId = transmitTracker->connectionId; } } return connectionId; } /* * ConstrainedTaskList finds the given task's constraint group within the given * task and execution list. We define a constraint group as all tasks that need * to be assigned (or reassigned) to the same task tracker for query execution * to complete. At a high level, compute tasks and their data fetch dependencies * are part of the same constraint group. Also, the transitive closure of tasks * that have the same merge task dependency are part of one constraint group. */ static List * ConstrainedTaskList(List *taskAndExecutionList, Task *task) { List *constrainedTaskList = NIL; Task *constrainingTask = NULL; List *mergeTaskList = NIL; ListCell *mergeTaskCell = NULL; List *upstreamTaskList = NIL; ListCell *upstreamTaskCell = NULL; /* * We first check if this task depends on any merge tasks. If it does *not*, * the task's dependency list becomes our tiny constraint group. */ mergeTaskList = ConstrainedMergeTaskList(taskAndExecutionList, task); if (mergeTaskList == NIL) { constrainedTaskList = ConstrainedNonMergeTaskList(taskAndExecutionList, task); return constrainedTaskList; } /* we first add merge tasks and their dependencies to our constraint group */ foreach(mergeTaskCell, mergeTaskList) { Task *mergeTask = (Task *) lfirst(mergeTaskCell); List *dependedTaskList = mergeTask->dependedTaskList; constrainedTaskList = lappend(constrainedTaskList, mergeTask); constrainedTaskList = TaskListConcatUnique(constrainedTaskList, dependedTaskList); } /* * We now pick the first merge task as our constraining task, and walk over * the task list looking for any tasks that depend on the constraining merge * task. Note that finding a task's upstream dependencies necessitates that * we walk over all the tasks. If we want to optimize this later on, we can * precompute a task list that excludes map fetch tasks. */ constrainingTask = (Task *) linitial(mergeTaskList); upstreamTaskList = UpstreamDependencyList(taskAndExecutionList, constrainingTask); Assert(upstreamTaskList != NIL); foreach(upstreamTaskCell, upstreamTaskList) { Task *upstreamTask = (Task *) lfirst(upstreamTaskCell); List *dependedTaskList = upstreamTask->dependedTaskList; /* * We already added merge tasks to our constrained list. We therefore use * concat unique to ensure they don't get appended for a second time. */ constrainedTaskList = TaskListAppendUnique(constrainedTaskList, upstreamTask); constrainedTaskList = TaskListConcatUnique(constrainedTaskList, dependedTaskList); } return constrainedTaskList; } /* * ConstrainedNonMergeTaskList finds the constraint group for the given task, * assuming that the given task doesn't have any merge task dependencies. This * constraint group includes a compute task and its downstream data fetch task * dependencies. */ static List * ConstrainedNonMergeTaskList(List *taskAndExecutionList, Task *task) { List *constrainedTaskList = NIL; Task *upstreamTask = NULL; List *dependedTaskList = NIL; TaskType taskType = task->taskType; if (taskType == SQL_TASK || taskType == MAP_TASK) { upstreamTask = task; dependedTaskList = upstreamTask->dependedTaskList; } else if (taskType == SHARD_FETCH_TASK) { List *upstreamTaskList = UpstreamDependencyList(taskAndExecutionList, task); Assert(list_length(upstreamTaskList) == 1); upstreamTask = (Task *) linitial(upstreamTaskList); dependedTaskList = upstreamTask->dependedTaskList; } Assert(upstreamTask != NULL); constrainedTaskList = list_make1(upstreamTask); constrainedTaskList = list_concat(constrainedTaskList, dependedTaskList); return constrainedTaskList; } /* * UpstreamDependencyList looks for the given task's upstream task dependencies * in the given task and execution list. For this, the function walks across all * tasks in the task list. This walk is expensive due to the number of map fetch * tasks involved; and this function should be called sparingly. */ static List * UpstreamDependencyList(List *taskAndExecutionList, Task *searchedTask) { List *upstreamTaskList = NIL; ListCell *taskAndExecutionCell = NULL; foreach(taskAndExecutionCell, taskAndExecutionList) { Task *upstreamTask = (Task *) lfirst(taskAndExecutionCell); List *dependedTaskList = upstreamTask->dependedTaskList; ListCell *dependedTaskCell = NULL; /* * The given task and its upstream dependency cannot be of the same type. * We perform this check as an optimization. This way, we can quickly * skip over upstream map fetch tasks if we aren't looking for them. */ if (upstreamTask->taskType == searchedTask->taskType) { continue; } /* * We walk over the upstream task's dependency list, and check if any of * them is the task we are looking for. */ foreach(dependedTaskCell, dependedTaskList) { Task *dependedTask = (Task *) lfirst(dependedTaskCell); if (TasksEqual(dependedTask, searchedTask)) { upstreamTaskList = lappend(upstreamTaskList, upstreamTask); } } } return upstreamTaskList; } /* * ConstrainedMergeTaskList finds any merge task dependencies for the given task. * Note that a given task may have zero, one, or two merge task dependencies. To * resolve all dependencies, the function first looks at the task's type. Then, * the function may need to find the task's parent, and resolve any merge task * dependencies from that parent task. */ static List * ConstrainedMergeTaskList(List *taskAndExecutionList, Task *task) { List *constrainedMergeTaskList = NIL; TaskType taskType = task->taskType; /* * We find the list of constraining merge tasks for the given task. If the * given task is a SQL or map task, we simply need to find its merge task * dependencies -- if any. */ if (taskType == SQL_TASK || taskType == MAP_TASK) { constrainedMergeTaskList = MergeTaskList(task->dependedTaskList); } else if (taskType == SHARD_FETCH_TASK) { Task *upstreamTask = NULL; List *upstreamTaskList = UpstreamDependencyList(taskAndExecutionList, task); /* * A shard fetch task can only have one SQL/map task parent. We now get * that parent. From the parent, we find any merge task dependencies. */ Assert(list_length(upstreamTaskList) == 1); upstreamTask = (Task *) linitial(upstreamTaskList); constrainedMergeTaskList = MergeTaskList(upstreamTask->dependedTaskList); } else if (taskType == MAP_OUTPUT_FETCH_TASK) { List *taskList = UpstreamDependencyList(taskAndExecutionList, task); Task *mergeTask = (Task *) linitial(taskList); /* * Once we resolve the merge task, we use the exact same logic as below * to find any other merge task in our constraint group. */ List *upstreamTaskList = UpstreamDependencyList(taskAndExecutionList, mergeTask); Task *upstreamTask = (Task *) linitial(upstreamTaskList); constrainedMergeTaskList = MergeTaskList(upstreamTask->dependedTaskList); } else if (taskType == MERGE_TASK) { Task *upstreamTask = NULL; List *upstreamTaskList = UpstreamDependencyList(taskAndExecutionList, task); /* * A merge task can have multiple SQL/map task parents. We now get only * one of those parents. We then search if the parent depends on another * merge task besides us. */ Assert(upstreamTaskList != NIL); upstreamTask = (Task *) linitial(upstreamTaskList); constrainedMergeTaskList = MergeTaskList(upstreamTask->dependedTaskList); } return constrainedMergeTaskList; } /* * MergeTaskList walks over the given task list, finds the merge tasks in the * list, and returns the found tasks in a new list. */ static List * MergeTaskList(List *taskList) { List *mergeTaskList = NIL; ListCell *taskCell = NULL; foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); if (task->taskType == MERGE_TASK) { mergeTaskList = lappend(mergeTaskList, task); } } return mergeTaskList; } /* * ReassignTaskList walks over all tasks in the given task list, and reassigns * each task's execution and transmit to the next worker node. This ensures that * all tasks within the same constraint group are failed over to the next node * together. The function also increments each task's failure counter. */ static void ReassignTaskList(List *taskList) { List *completedTaskList = NIL; ListCell *taskCell = NULL; /* * As an optimization, we first find the SQL tasks whose results we already * fetched to the master node. We don't need to re-execute these SQL tasks * or their shard fetch dependencies. */ foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); TaskExecution *taskExecution = task->taskExecution; bool transmitCompleted = TransmitExecutionCompleted(taskExecution); if ((task->taskType == SQL_TASK) && transmitCompleted) { List *shardFetchTaskList = ShardFetchTaskList(task->dependedTaskList); completedTaskList = lappend(completedTaskList, task); completedTaskList = TaskListUnion(completedTaskList, shardFetchTaskList); } } taskList = TaskListDifference(taskList, completedTaskList); taskCell = NULL; foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); TaskExecution *taskExecution = task->taskExecution; uint32 currentNodeIndex = taskExecution->currentNodeIndex; TaskExecStatus *taskStatusArray = taskExecution->taskStatusArray; TransmitExecStatus *transmitStatusArray = taskExecution->transmitStatusArray; /* * We reset current task statuses in case we fail on all other worker * nodes and come back to this one. */ taskStatusArray[currentNodeIndex] = EXEC_TASK_UNASSIGNED; transmitStatusArray[currentNodeIndex] = EXEC_TRANSMIT_UNASSIGNED; /* update node index to try next worker node */ AdjustStateForFailure(taskExecution); } } /* * ReassignMapFetchTaskList walks over tasks in the given task list, and resets * their task execution status. This ensures that all map output fetch tasks are * retried after the node executing the map task has been failed over. */ static void ReassignMapFetchTaskList(List *mapFetchTaskList) { ListCell *mapFetchTaskCell = NULL; foreach(mapFetchTaskCell, mapFetchTaskList) { Task *mapFetchTask = (Task *) lfirst(mapFetchTaskCell); TaskExecution *mapFetchTaskExecution = mapFetchTask->taskExecution; TaskExecStatus *taskStatusArray = mapFetchTaskExecution->taskStatusArray; uint32 currentNodeIndex = mapFetchTaskExecution->currentNodeIndex; /* * We reassign to same task tracker knowing that the source task tracker * (that we failed to fetch map output from) has changed. */ taskStatusArray[currentNodeIndex] = EXEC_TASK_UNASSIGNED; } } /* * ShardFetchTaskList walks over the given task list, finds the shard fetch tasks * in the list, and returns the found tasks in a new list. */ static List * ShardFetchTaskList(List *taskList) { List *shardFetchTaskList = NIL; ListCell *taskCell = NULL; foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); if (task->taskType == SHARD_FETCH_TASK) { shardFetchTaskList = lappend(shardFetchTaskList, task); } } return shardFetchTaskList; } /* * ManageTaskTracker manages tasks assigned to the given task tracker. For this, * the function coordinates access to the underlying connection. The function * also: (1) synchronously assigns locally queued tasks to the task tracker, (2) * issues an asynchronous task status query for one assigned task at a time, and * (3) retrieves status query results for the previously issued status query. */ static void ManageTaskTracker(TaskTracker *taskTracker) { bool trackerConnectionUp = false; bool trackerHealthy = false; trackerHealthy = TrackerHealthy(taskTracker); if (!trackerHealthy) { return; } trackerConnectionUp = TrackerConnectionUp(taskTracker); if (!trackerConnectionUp) { TrackerReconnectPoll(taskTracker); /* try an async reconnect */ return; } /* * (1) We first synchronously assign any pending new tasks. We also make * sure not to block execution on one task tracker for a long time. */ if (!taskTracker->connectionBusy) { List *previousTaskList = taskTracker->assignedTaskList; List *newTaskList = AssignQueuedTasks(taskTracker); taskTracker->assignedTaskList = list_concat(previousTaskList, newTaskList); } /* * (2) We find assigned tasks. We then send an asynchronous query to check * the tasks' statuses. */ if (!taskTracker->connectionBusy) { List *taskStatusBatchList = TaskStatusBatchList(taskTracker); /* if we have running tasks, check their status */ if (taskStatusBatchList) { int32 connectionId = taskTracker->connectionId; StringInfo taskStatusBatchQuery = NULL; bool querySent = false; taskStatusBatchQuery = TaskStatusBatchQuery(taskStatusBatchList); querySent = MultiClientSendQuery(connectionId, taskStatusBatchQuery->data); if (querySent) { taskTracker->connectionBusy = true; taskTracker->connectionBusyOnTaskList = taskStatusBatchList; } else { /* mark only first task in list as failed */ TrackerTaskState *taskState = (TrackerTaskState *) linitial( taskStatusBatchList); taskState->status = TASK_CLIENT_SIDE_STATUS_FAILED; list_free(taskStatusBatchList); taskTracker->connectionBusy = false; taskTracker->connectionBusyOnTaskList = NIL; } pfree(taskStatusBatchQuery); } } /* * (3) check if results are ready for previously issued task status query */ if (taskTracker->connectionBusy) { int32 connectionId = taskTracker->connectionId; ResultStatus resultStatus = CLIENT_INVALID_RESULT_STATUS; /* if connection is available, update task status accordingly */ resultStatus = MultiClientResultStatus(connectionId); if (resultStatus == CLIENT_RESULT_READY) { ReceiveTaskStatusBatchQueryResponse(taskTracker); } else if (resultStatus == CLIENT_RESULT_UNAVAILABLE) { TrackerTaskState *taskState = (TrackerTaskState *) linitial( taskTracker->connectionBusyOnTaskList); Assert(taskState != NULL); taskState->status = TASK_CLIENT_SIDE_STATUS_FAILED; } /* if connection is available, give it back to the task tracker */ if (resultStatus != CLIENT_RESULT_BUSY) { list_free(taskTracker->connectionBusyOnTaskList); taskTracker->connectionBusy = false; taskTracker->connectionBusyOnTaskList = NIL; } } } /* * TrackerConnectionUp checks the most recent connection status for the given * task tracker. The function returns true if the connection is still up. */ static bool TrackerConnectionUp(TaskTracker *taskTracker) { bool connectionUp = false; /* if we think we have a connection, check its most recent status */ if (taskTracker->trackerStatus == TRACKER_CONNECTED) { connectionUp = MultiClientConnectionUp(taskTracker->connectionId); } return connectionUp; } /* * TrackerReconnectPoll checks if we have an open connection to the given task * tracker. If not, the function opens an asynchronous connection to the task * tracker and polls this connection's status on every call. The function also * sets the task tracker's internal state. */ static void TrackerReconnectPoll(TaskTracker *taskTracker) { TrackerStatus currentStatus = taskTracker->trackerStatus; if (currentStatus == TRACKER_CONNECTED) { bool connectionUp = MultiClientConnectionUp(taskTracker->connectionId); if (connectionUp) { taskTracker->trackerStatus = TRACKER_CONNECTED; } else { taskTracker->trackerStatus = TRACKER_CONNECTION_FAILED; /* we lost the connection underneath us, clean it up */ MultiClientDisconnect(taskTracker->connectionId); taskTracker->connectionId = INVALID_CONNECTION_ID; } } else if (currentStatus == TRACKER_CONNECT_START || currentStatus == TRACKER_CONNECT_POLL) { taskTracker->trackerStatus = TrackerConnectPoll(taskTracker); } else if (currentStatus == TRACKER_CONNECTION_FAILED) { taskTracker->connectionFailureCount++; taskTracker->connectPollCount = 0; taskTracker->trackerStatus = TRACKER_CONNECT_START; } } /* * AssignQueuedTasks walks over the given task tracker's task state hash, finds * queued tasks in this hash, and synchronously assigns them to the given task * tracker. The function then returns the list of newly assigned tasks. */ static List * AssignQueuedTasks(TaskTracker *taskTracker) { HTAB *taskStateHash = taskTracker->taskStateHash; List *assignedTaskList = NIL; uint32 taskAssignmentCount = 0; List *tasksToAssignList = NIL; StringInfo assignTaskBatchQuery = makeStringInfo(); int32 connectionId = taskTracker->connectionId; HASH_SEQ_STATUS status; TrackerTaskState *taskState = NULL; hash_seq_init(&status, taskStateHash); taskState = (TrackerTaskState *) hash_seq_search(&status); while (taskState != NULL) { if (taskState->status == TASK_CLIENT_SIDE_QUEUED) { StringInfo taskAssignmentQuery = taskState->taskAssignmentQuery; appendStringInfo(assignTaskBatchQuery, "%s", taskAssignmentQuery->data); tasksToAssignList = lappend(tasksToAssignList, taskState); taskAssignmentCount++; if (taskAssignmentCount >= MaxAssignTaskBatchSize) { hash_seq_term(&status); break; } } taskState = (TrackerTaskState *) hash_seq_search(&status); } if (taskAssignmentCount > 0) { void *queryResult = NULL; int rowCount = 0; int columnCount = 0; ListCell *taskCell = NULL; bool batchSuccess = MultiClientSendQuery(connectionId, assignTaskBatchQuery->data); foreach(taskCell, tasksToAssignList) { TrackerTaskState *taskState = (TrackerTaskState *) lfirst(taskCell); BatchQueryStatus queryStatus = CLIENT_INVALID_BATCH_QUERY; if (!batchSuccess) { taskState->status = TASK_CLIENT_SIDE_ASSIGN_FAILED; continue; } queryStatus = MultiClientBatchResult(connectionId, &queryResult, &rowCount, &columnCount); if (queryStatus == CLIENT_BATCH_QUERY_CONTINUE) { taskState->status = TASK_ASSIGNED; assignedTaskList = lappend(assignedTaskList, taskState); } else { taskState->status = TASK_CLIENT_SIDE_ASSIGN_FAILED; batchSuccess = false; } MultiClientClearResult(queryResult); } /* call MultiClientBatchResult one more time to finish reading results */ MultiClientBatchResult(connectionId, &queryResult, &rowCount, &columnCount); Assert(queryResult == NULL); pfree(assignTaskBatchQuery); list_free(tasksToAssignList); } return assignedTaskList; } /* * TaskStatusBatchList returns a list containing up to MaxTaskStatusBatchSize * tasks from the list of assigned tasks. When the number of tasks is greater * than the maximum, the next call of this function will continue in the * assigned task list after the last task that was added to the current list. * * In some cases the list may be empty even if tasks have been assigned due to * wrap-around, namely if we first generate a batch of MaxTaskStatusBatchSize, * but none of the remaining tasks in assignedTaskList are running. */ static List * TaskStatusBatchList(TaskTracker *taskTracker) { int32 assignedTaskCount = 0; int32 assignedTaskIndex = 0; List *assignedTaskList = taskTracker->assignedTaskList; List *taskStatusBatchList = NIL; ListCell *taskCell = NULL; int32 currentTaskIndex = 0; int32 lastTaskIndex = 0; assignedTaskCount = list_length(assignedTaskList); if (assignedTaskCount == 0) { return NIL; } lastTaskIndex = (assignedTaskCount - 1); currentTaskIndex = taskTracker->currentTaskIndex; if (currentTaskIndex >= lastTaskIndex) { currentTaskIndex = -1; } foreach(taskCell, assignedTaskList) { TrackerTaskState *assignedTask = (TrackerTaskState *) lfirst(taskCell); TaskStatus taskStatus = assignedTask->status; bool taskRunning = false; if (taskStatus == TASK_ASSIGNED || taskStatus == TASK_SCHEDULED || taskStatus == TASK_RUNNING || taskStatus == TASK_FAILED) { taskRunning = true; } if (taskRunning && (assignedTaskIndex > currentTaskIndex)) { taskStatusBatchList = lappend(taskStatusBatchList, assignedTask); if (list_length(taskStatusBatchList) >= MaxTaskStatusBatchSize) { break; } } assignedTaskIndex++; } /* continue where we left off next time this function is called */ taskTracker->currentTaskIndex = assignedTaskIndex; return taskStatusBatchList; } /* * TaskStatusBatchQuery builds a command string containing multiple * task_tracker_task_status queries from a TrackerTaskState list. */ static StringInfo TaskStatusBatchQuery(List *taskList) { StringInfo taskStatusBatchQuery = makeStringInfo(); ListCell *taskCell = NULL; foreach(taskCell, taskList) { TrackerTaskState *taskState = (TrackerTaskState *) lfirst(taskCell); appendStringInfo(taskStatusBatchQuery, TASK_STATUS_QUERY, taskState->jobId, taskState->taskId); } return taskStatusBatchQuery; } /* * ReceiveTaskStatusBatchQueryResponse assumes that a batch of task status * queries have been previously sent to the given task tracker, and receives * and processes the responses for these status queries. If a status check fails * only one task status is marked as failed and the remainder is considered not * executed. */ static void ReceiveTaskStatusBatchQueryResponse(TaskTracker *taskTracker) { ListCell *taskCell = NULL; List *checkedTaskList = taskTracker->connectionBusyOnTaskList; int32 connectionId = taskTracker->connectionId; int rowCount = 0; int columnCount = 0; void *queryResult = NULL; foreach(taskCell, checkedTaskList) { TrackerTaskState *checkedTask = (TrackerTaskState *) lfirst(taskCell); TaskStatus taskStatus = TASK_STATUS_INVALID_FIRST; BatchQueryStatus queryStatus = MultiClientBatchResult(connectionId, &queryResult, &rowCount, &columnCount); if (queryStatus == CLIENT_BATCH_QUERY_CONTINUE) { char *valueString = MultiClientGetValue(queryResult, 0, 0); if (valueString == NULL || (*valueString) == '\0') { taskStatus = TASK_PERMANENTLY_FAILED; } else { char *valueStringEnd = NULL; errno = 0; taskStatus = strtoul(valueString, &valueStringEnd, 0); if (errno != 0 || (*valueStringEnd) != '\0') { /* we couldn't parse received integer */ taskStatus = TASK_PERMANENTLY_FAILED; } Assert(taskStatus > TASK_STATUS_INVALID_FIRST); Assert(taskStatus < TASK_STATUS_LAST); } } else { taskStatus = TASK_CLIENT_SIDE_STATUS_FAILED; } checkedTask->status = taskStatus; MultiClientClearResult(queryResult); if (queryStatus == CLIENT_BATCH_QUERY_FAILED) { /* remaining queries were not executed */ break; } } /* call MultiClientBatchResult one more time to finish reading results */ MultiClientBatchResult(connectionId, &queryResult, &rowCount, &columnCount); Assert(queryResult == NULL); } /* * ManageTransmitTracker manages access to the connection we opened to the worker * node. If the connection is idle, and we have file transmit requests pending, * the function picks a pending file transmit request, and starts the Copy Out * protocol to copy the file's contents. */ static void ManageTransmitTracker(TaskTracker *transmitTracker) { TrackerTaskState *transmitState = NULL; bool trackerHealthy = false; bool trackerConnectionUp = false; trackerHealthy = TrackerHealthy(transmitTracker); if (!trackerHealthy) { return; } trackerConnectionUp = TrackerConnectionUp(transmitTracker); if (!trackerConnectionUp) { TrackerReconnectPoll(transmitTracker); /* try an async reconnect */ return; } /* connection belongs to another file transmit */ if (transmitTracker->connectionBusy) { return; } transmitState = NextQueuedFileTransmit(transmitTracker->taskStateHash); if (transmitState != NULL) { bool fileTransmitStarted = false; int32 connectionId = transmitTracker->connectionId; StringInfo jobDirectoryName = JobDirectoryName(transmitState->jobId); StringInfo taskFilename = TaskFilename(jobDirectoryName, transmitState->taskId); StringInfo fileTransmitQuery = makeStringInfo(); appendStringInfo(fileTransmitQuery, TRANSMIT_REGULAR_COMMAND, taskFilename->data); fileTransmitStarted = MultiClientSendQuery(connectionId, fileTransmitQuery->data); if (fileTransmitStarted) { transmitState->status = TASK_ASSIGNED; transmitTracker->connectionBusy = true; transmitTracker->connectionBusyOnTask = transmitState; } else { transmitState->status = TASK_CLIENT_SIDE_TRANSMIT_FAILED; transmitTracker->connectionBusy = false; transmitTracker->connectionBusyOnTask = NULL; } } } /* * NextQueuedFileTransmit walks over all tasks in the given hash, and looks for * a file transmit task that has been queued, but not served yet. */ static TrackerTaskState * NextQueuedFileTransmit(HTAB *taskStateHash) { HASH_SEQ_STATUS status; TrackerTaskState *taskState = NULL; hash_seq_init(&status, taskStateHash); taskState = (TrackerTaskState *) hash_seq_search(&status); while (taskState != NULL) { if (taskState->status == TASK_FILE_TRANSMIT_QUEUED) { hash_seq_term(&status); break; } taskState = (TrackerTaskState *) hash_seq_search(&status); } return taskState; } /* * JobIdList walks over all jobs in the given job tree and retrieves each job's * identifier. The function then inserts these job identifiers in a new list and * returns this list. */ static List * JobIdList(Job *job) { List *jobIdList = NIL; List *jobQueue = NIL; /* * We walk over the job tree using breadth-first search. For this, we first * queue the root node, and then start traversing our search space. */ jobQueue = list_make1(job); while (jobQueue != NIL) { uint64 *jobIdPointer = (uint64 *) palloc0(sizeof(uint64)); List *jobChildrenList = NIL; Job *job = (Job *) linitial(jobQueue); jobQueue = list_delete_first(jobQueue); (*jobIdPointer) = job->jobId; jobIdList = lappend(jobIdList, jobIdPointer); /* prevent dependedJobList being modified on list_concat() call */ jobChildrenList = list_copy(job->dependedJobList); if (jobChildrenList != NIL) { jobQueue = list_concat(jobQueue, jobChildrenList); } } return jobIdList; } /* * TrackerCleanupResources cleans up remote and local resources associated with * the query. To clean up remote resources, the function cancels ongoing transmit * tasks. It also waits for ongoing requests to the task trackers to complete * before assigning "job clean up" tasks to them. To reclaim local resources, * the function closes open file descriptors and disconnects from task trackers. */ static void TrackerCleanupResources(HTAB *taskTrackerHash, HTAB *transmitTrackerHash, List *jobIdList, List *taskList) { ListCell *taskCell = NULL; ListCell *jobIdCell = NULL; /* * We are done with query execution. We now wait for open requests to the task * trackers to complete and cancel any open requests to the transmit trackers. */ TrackerHashWaitActiveRequest(taskTrackerHash); TrackerHashCancelActiveRequest(transmitTrackerHash); /* only close open files; open connections are owned by trackers */ foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); TaskExecution *taskExecution = task->taskExecution; CleanupTaskExecution(taskExecution); task->taskExecution = NULL; } /* * For each executed job, we create a special task to clean up its resources * on worker nodes, and send this clean-up task to all task trackers. */ foreach(jobIdCell, jobIdList) { uint64 *jobIdPointer = (uint64 *) lfirst(jobIdCell); Task *jobCleanupTask = JobCleanupTask(*jobIdPointer); TrackerHashCleanupJob(taskTrackerHash, jobCleanupTask); } TrackerHashDisconnect(taskTrackerHash); TrackerHashDisconnect(transmitTrackerHash); } /* * TrackerHashWaitActiveRequest walks over task trackers in the given hash, and * checks if they have an ongoing request. If they do, the function waits for * the request to complete. If the request completes successfully, the function * frees the connection for future tasks. */ static void TrackerHashWaitActiveRequest(HTAB *taskTrackerHash) { TaskTracker *taskTracker = NULL; HASH_SEQ_STATUS status; hash_seq_init(&status, taskTrackerHash); taskTracker = (TaskTracker *) hash_seq_search(&status); while (taskTracker != NULL) { bool trackerConnectionUp = TrackerConnectionUp(taskTracker); /* if we have an ongoing request, block until we have a response */ if (trackerConnectionUp && taskTracker->connectionBusy) { QueryStatus queryStatus = MultiClientQueryStatus(taskTracker->connectionId); if (queryStatus == CLIENT_QUERY_DONE) { taskTracker->connectionBusy = false; taskTracker->connectionBusyOnTask = NULL; taskTracker->connectionBusyOnTaskList = NIL; } } taskTracker = (TaskTracker *) hash_seq_search(&status); } } /* * TrackerHashCancelActiveRequest walks over task trackers in the given hash, * and checks if they have an ongoing request. If they do, the function sends a * cancel message on that connection. */ static void TrackerHashCancelActiveRequest(HTAB *taskTrackerHash) { TaskTracker *taskTracker = NULL; HASH_SEQ_STATUS status; hash_seq_init(&status, taskTrackerHash); taskTracker = (TaskTracker *) hash_seq_search(&status); while (taskTracker != NULL) { bool trackerConnectionUp = TrackerConnectionUp(taskTracker); /* if we have an ongoing request, send cancel message */ if (trackerConnectionUp && taskTracker->connectionBusy) { MultiClientCancel(taskTracker->connectionId); } taskTracker = (TaskTracker *) hash_seq_search(&status); } } /* * JobCleanupTask creates a special task to clean up all resources associated * with a given job on the worker node. The function then returns this task. */ static Task * JobCleanupTask(uint64 jobId) { Task *jobCleanupTask = NULL; StringInfo jobCleanupQuery = NULL; jobCleanupQuery = makeStringInfo(); appendStringInfo(jobCleanupQuery, JOB_CLEANUP_QUERY, jobId); jobCleanupTask = CitusMakeNode(Task); jobCleanupTask->jobId = jobId; jobCleanupTask->taskId = JOB_CLEANUP_TASK_ID; jobCleanupTask->replicationModel = REPLICATION_MODEL_INVALID; jobCleanupTask->queryString = jobCleanupQuery->data; return jobCleanupTask; } /* * TrackerHashCleanupJob walks over task trackers in the given hash, and assigns * a job cleanup task to the tracker if the tracker's connection is available. * The function then walks over task trackers to which it sent a cleanup task, * checks the request's status, and emits an appropriate status message. */ static void TrackerHashCleanupJob(HTAB *taskTrackerHash, Task *jobCleanupTask) { uint64 jobId = jobCleanupTask->jobId; List *taskTrackerList = NIL; List *remainingTaskTrackerList = NIL; const long statusCheckInterval = 10000; /* microseconds */ bool timedOut = false; TimestampTz startTime = 0; TaskTracker *taskTracker = NULL; HASH_SEQ_STATUS status; hash_seq_init(&status, taskTrackerHash); /* walk over task trackers and try to issue job clean up requests */ taskTracker = (TaskTracker *) hash_seq_search(&status); while (taskTracker != NULL) { bool trackerConnectionUp = TrackerConnectionUp(taskTracker); if (trackerConnectionUp) { bool jobCleanupQuerySent = false; /* if we have a clear connection, send cleanup job */ if (!taskTracker->connectionBusy) { StringInfo jobCleanupQuery = NULL; /* assign through task tracker to manage resource utilization */ jobCleanupQuery = TaskAssignmentQuery(jobCleanupTask, jobCleanupTask->queryString); jobCleanupQuerySent = MultiClientSendQuery(taskTracker->connectionId, jobCleanupQuery->data); } /* * If cleanup query was sent, mark that the connection is busy and * hold onto the task tracker to check status. */ if (jobCleanupQuerySent) { taskTracker->connectionBusy = true; taskTrackerList = lappend(taskTrackerList, taskTracker); } else { const char *nodeName = taskTracker->workerName; uint32 nodePort = taskTracker->workerPort; ereport(WARNING, (errmsg("could not assign cleanup query for job " UINT64_FORMAT " to node \"%s:%u\"", jobId, nodeName, nodePort))); } } taskTracker = (TaskTracker *) hash_seq_search(&status); } /* record the time when we start waiting for cleanup jobs to be sent */ startTime = GetCurrentTimestamp(); /* * Walk over task trackers to which we sent clean up requests. Perform * these checks until it times out. * * We want to determine timedOut flag after the loop start to make sure * we iterate one more time after time out occurs. This is necessary to report * warning messages for timed out cleanup jobs. */ remainingTaskTrackerList = taskTrackerList; while (list_length(remainingTaskTrackerList) > 0 && !timedOut) { List *activeTackTrackerList = remainingTaskTrackerList; ListCell *activeTaskTrackerCell = NULL; TimestampTz currentTime = 0; remainingTaskTrackerList = NIL; pg_usleep(statusCheckInterval); currentTime = GetCurrentTimestamp(); timedOut = TimestampDifferenceExceeds(startTime, currentTime, NodeConnectionTimeout); foreach(activeTaskTrackerCell, activeTackTrackerList) { TaskTracker *taskTracker = (TaskTracker *) lfirst(activeTaskTrackerCell); int32 connectionId = taskTracker->connectionId; const char *nodeName = taskTracker->workerName; uint32 nodePort = taskTracker->workerPort; ResultStatus resultStatus = MultiClientResultStatus(connectionId); if (resultStatus == CLIENT_RESULT_READY) { QueryStatus queryStatus = MultiClientQueryStatus(connectionId); if (queryStatus == CLIENT_QUERY_DONE) { ereport(DEBUG4, (errmsg("completed cleanup query for job " UINT64_FORMAT, jobId))); /* clear connection for future cleanup queries */ taskTracker->connectionBusy = false; } else if (timedOut) { ereport(WARNING, (errmsg("could not receive response for cleanup " "query status for job " UINT64_FORMAT " " "on node \"%s:%u\" with status %d", jobId, nodeName, nodePort, (int) queryStatus), errhint("Manually clean job resources on node " "\"%s:%u\" by running \"%s\" ", nodeName, nodePort, jobCleanupTask->queryString))); } else { remainingTaskTrackerList = lappend(remainingTaskTrackerList, taskTracker); } } else if (timedOut) { ereport(WARNING, (errmsg("could not receive response for cleanup query " "result for job " UINT64_FORMAT " on node " "\"%s:%u\" with status %d", jobId, nodeName, nodePort, (int) resultStatus), errhint("Manually clean job resources on node " "\"%s:%u\" by running \"%s\" ", nodeName, nodePort, jobCleanupTask->queryString))); } else { remainingTaskTrackerList = lappend(remainingTaskTrackerList, taskTracker); } } } } /* * TrackerHashDisconnect walks over task trackers in the given hash, and closes * open connections to them. */ static void TrackerHashDisconnect(HTAB *taskTrackerHash) { TaskTracker *taskTracker = NULL; HASH_SEQ_STATUS status; hash_seq_init(&status, taskTrackerHash); taskTracker = (TaskTracker *) hash_seq_search(&status); while (taskTracker != NULL) { if (taskTracker->connectionId != INVALID_CONNECTION_ID) { MultiClientDisconnect(taskTracker->connectionId); taskTracker->connectionId = INVALID_CONNECTION_ID; } taskTracker = (TaskTracker *) hash_seq_search(&status); } } citus-7.0.3/src/backend/distributed/executor/multi_utility.c000066400000000000000000003306251317107136600243050ustar00rootroot00000000000000/*------------------------------------------------------------------------- * multi_utility.c * Citus utility hook and related functionality. * * Copyright (c) 2012-2016, Citus Data, Inc. *------------------------------------------------------------------------- */ #include "postgres.h" #include "c.h" #include "libpq-fe.h" #include "miscadmin.h" #include "port.h" #include #include "access/attnum.h" #include "access/heapam.h" #include "access/htup.h" #include "access/htup_details.h" #include "access/sysattr.h" #include "access/tupdesc.h" #include "access/xact.h" #include "catalog/catalog.h" #include "catalog/dependency.h" #include "catalog/index.h" #include "catalog/indexing.h" #include "catalog/namespace.h" #include "catalog/pg_attribute.h" #include "catalog/pg_class.h" #include "citus_version.h" #include "catalog/pg_constraint.h" #include "catalog/pg_type.h" #include "commands/dbcommands.h" #include "commands/defrem.h" #include "commands/tablecmds.h" #include "commands/prepare.h" #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" #include "distributed/maintenanced.h" #include "distributed/master_metadata_utility.h" #include "distributed/master_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" #include "distributed/multi_copy.h" #include "distributed/multi_join_order.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/multi_planner.h" #include "distributed/multi_router_executor.h" #include "distributed/multi_router_planner.h" #include "distributed/multi_shard_transaction.h" #include "distributed/multi_utility.h" /* IWYU pragma: keep */ #include "distributed/pg_dist_partition.h" #include "distributed/resource_lock.h" #include "distributed/transaction_management.h" #include "distributed/transmit.h" #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" #include "executor/executor.h" #include "foreign/foreign.h" #include "lib/stringinfo.h" #include "nodes/bitmapset.h" #include "nodes/nodes.h" #include "nodes/params.h" #include "nodes/parsenodes.h" #include "nodes/pg_list.h" #include "nodes/primnodes.h" #include "nodes/value.h" #include "parser/analyze.h" #include "storage/lmgr.h" #include "storage/lock.h" #include "tcop/dest.h" #include "tcop/utility.h" #include "utils/acl.h" #include "utils/builtins.h" #include "utils/elog.h" #include "utils/errcodes.h" #include "utils/fmgroids.h" #include "utils/guc.h" #include "utils/hsearch.h" #include "utils/inval.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/palloc.h" #include "utils/rel.h" #include "utils/relcache.h" #include "utils/syscache.h" bool EnableDDLPropagation = true; /* ddl propagation is enabled */ /* * This struct defines the state for the callback for drop statements. * It is copied as it is from commands/tablecmds.c in Postgres source. */ struct DropRelationCallbackState { char relkind; Oid heapOid; bool concurrent; }; /* Local functions forward declarations for deciding when to perform processing/checks */ static bool IsCitusExtensionStmt(Node *parsetree); /* Local functions forward declarations for Transmit statement */ static bool IsTransmitStmt(Node *parsetree); static void VerifyTransmitStmt(CopyStmt *copyStatement); /* Local functions forward declarations for processing distributed table commands */ static Node * ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, bool *commandMustRunAsOwner); static void ProcessCreateTableStmtPartitionOf(CreateStmt *createStatement); static void ProcessAlterTableStmtAttachPartition(AlterTableStmt *alterTableStatement); static List * PlanIndexStmt(IndexStmt *createIndexStatement, const char *createIndexCommand); static List * PlanDropIndexStmt(DropStmt *dropIndexStatement, const char *dropIndexCommand); static List * PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCommand); static List * PlanRenameStmt(RenameStmt *renameStmt, const char *renameCommand); static Node * WorkerProcessAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCommand); static List * PlanAlterObjectSchemaStmt(AlterObjectSchemaStmt *alterObjectSchemaStmt, const char *alterObjectSchemaCommand); static void ProcessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand); static bool IsSupportedDistributedVacuumStmt(Oid relationId, VacuumStmt *vacuumStmt); static List * VacuumTaskList(Oid relationId, VacuumStmt *vacuumStmt); static StringInfo DeparseVacuumStmtPrefix(VacuumStmt *vacuumStmt); static char * DeparseVacuumColumnNames(List *columnNameList); /* Local functions forward declarations for unsupported command checks */ static void ErrorIfUnstableCreateOrAlterExtensionStmt(Node *parsetree); static void ErrorIfUnsupportedIndexStmt(IndexStmt *createIndexStatement); static void ErrorIfUnsupportedDropIndexStmt(DropStmt *dropIndexStatement); static void ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement); static void ErrorIfAlterDropsPartitionColumn(AlterTableStmt *alterTableStatement); static void ErrorIfUnsupportedSeqStmt(CreateSeqStmt *createSeqStmt); static void ErrorIfDistributedAlterSeqOwnedBy(AlterSeqStmt *alterSeqStmt); static void ErrorIfUnsupportedTruncateStmt(TruncateStmt *truncateStatement); static bool OptionsSpecifyOwnedBy(List *optionList, Oid *ownedByTableId); static void ErrorIfUnsupportedRenameStmt(RenameStmt *renameStmt); static void ErrorIfUnsupportedAlterAddConstraintStmt(AlterTableStmt *alterTableStatement); static void ErrorIfUnsupportedForeignConstraint(Relation relation, char distributionMethod, Var *distributionColumn, uint32 colocationId); /* Local functions forward declarations for helper functions */ static char * ExtractNewExtensionVersion(Node *parsetree); static void CreateLocalTable(RangeVar *relation, char *nodeName, int32 nodePort); static bool IsAlterTableRenameStmt(RenameStmt *renameStmt); static bool AlterInvolvesPartitionColumn(AlterTableStmt *alterTableStatement, AlterTableCmd *command); static void ExecuteDistributedDDLJob(DDLJob *ddlJob); static void ShowNoticeIfNotUsing2PC(void); static List * DDLTaskList(Oid relationId, const char *commandString); static List * CreateIndexTaskList(Oid relationId, IndexStmt *indexStmt); static List * DropIndexTaskList(Oid relationId, Oid indexId, DropStmt *dropStmt); static List * InterShardDDLTaskList(Oid leftRelationId, Oid rightRelationId, const char *commandString); static void RangeVarCallbackForDropIndex(const RangeVar *rel, Oid relOid, Oid oldRelOid, void *arg); static void CheckCopyPermissions(CopyStmt *copyStatement); static List * CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist); static void PostProcessUtility(Node *parsetree); static bool warnedUserAbout2PC = false; /* * multi_ProcessUtility9x is the 9.x-compatible wrapper for Citus' main utility * hook. It simply adapts the old-style hook to call into the new-style (10+) * hook, which is what now houses all actual logic. */ void multi_ProcessUtility9x(Node *parsetree, const char *queryString, ProcessUtilityContext context, ParamListInfo params, DestReceiver *dest, char *completionTag) { PlannedStmt *plannedStmt = makeNode(PlannedStmt); plannedStmt->commandType = CMD_UTILITY; plannedStmt->utilityStmt = parsetree; multi_ProcessUtility(plannedStmt, queryString, context, params, NULL, dest, completionTag); } /* * CitusProcessUtility is a version-aware wrapper of ProcessUtility to account * for argument differences between the 9.x and 10+ PostgreSQL versions. */ void CitusProcessUtility(Node *node, const char *queryString, ProcessUtilityContext context, ParamListInfo params, DestReceiver *dest, char *completionTag) { #if (PG_VERSION_NUM >= 100000) PlannedStmt *plannedStmt = makeNode(PlannedStmt); plannedStmt->commandType = CMD_UTILITY; plannedStmt->utilityStmt = node; ProcessUtility(plannedStmt, queryString, context, params, NULL, dest, completionTag); #else ProcessUtility(node, queryString, context, params, dest, completionTag); #endif } /* * multi_ProcessUtility is the main entry hook for implementing Citus-specific * utility behavior. Its primary responsibilities are intercepting COPY and DDL * commands and augmenting the coordinator's command with corresponding tasks * to be run on worker nodes, after suitably ensuring said commands' options * are fully supported by Citus. Much of the DDL behavior is toggled by Citus' * enable_ddl_propagation GUC. In addition to DDL and COPY, utilities such as * TRUNCATE and VACUUM are also supported. */ void multi_ProcessUtility(PlannedStmt *pstmt, const char *queryString, ProcessUtilityContext context, ParamListInfo params, struct QueryEnvironment *queryEnv, DestReceiver *dest, char *completionTag) { Node *parsetree = pstmt->utilityStmt; bool commandMustRunAsOwner = false; Oid savedUserId = InvalidOid; int savedSecurityContext = 0; List *ddlJobs = NIL; bool checkExtensionVersion = false; if (IsA(parsetree, TransactionStmt)) { /* * Transaction statements (e.g. ABORT, COMMIT) can be run in aborted * transactions in which case a lot of checks cannot be done safely in * that state. Since we never need to intercept transaction statements, * skip our checks and immediately fall into standard_ProcessUtility. */ #if (PG_VERSION_NUM >= 100000) standard_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, completionTag); #else standard_ProcessUtility(parsetree, queryString, context, params, dest, completionTag); #endif return; } checkExtensionVersion = IsCitusExtensionStmt(parsetree); if (EnableVersionChecks && checkExtensionVersion) { ErrorIfUnstableCreateOrAlterExtensionStmt(parsetree); } if (!CitusHasBeenLoaded()) { /* * Ensure that utility commands do not behave any differently until CREATE * EXTENSION is invoked. */ #if (PG_VERSION_NUM >= 100000) standard_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, completionTag); #else standard_ProcessUtility(parsetree, queryString, context, params, dest, completionTag); #endif return; } /* * TRANSMIT used to be separate command, but to avoid patching the grammar * it's no overlaid onto COPY, but with FORMAT = 'transmit' instead of the * normal FORMAT options. */ if (IsTransmitStmt(parsetree)) { CopyStmt *copyStatement = (CopyStmt *) parsetree; VerifyTransmitStmt(copyStatement); /* ->relation->relname is the target file in our overloaded COPY */ if (copyStatement->is_from) { RedirectCopyDataToRegularFile(copyStatement->relation->relname); } else { SendRegularFile(copyStatement->relation->relname); } /* Don't execute the faux copy statement */ return; } if (IsA(parsetree, CopyStmt)) { /* copy parse tree since we might scribble on it to fix the schema name */ parsetree = copyObject(parsetree); parsetree = ProcessCopyStmt((CopyStmt *) parsetree, completionTag, &commandMustRunAsOwner); if (parsetree == NULL) { return; } } /* we're mostly in DDL (and VACUUM/TRUNCATE) territory at this point... */ if (IsA(parsetree, CreateSeqStmt)) { ErrorIfUnsupportedSeqStmt((CreateSeqStmt *) parsetree); } if (IsA(parsetree, AlterSeqStmt)) { ErrorIfDistributedAlterSeqOwnedBy((AlterSeqStmt *) parsetree); } if (IsA(parsetree, TruncateStmt)) { ErrorIfUnsupportedTruncateStmt((TruncateStmt *) parsetree); } /* only generate worker DDLJobs if propagation is enabled */ if (EnableDDLPropagation) { if (IsA(parsetree, IndexStmt)) { MemoryContext oldContext = MemoryContextSwitchTo(GetMemoryChunkContext( parsetree)); /* copy parse tree since we might scribble on it to fix the schema name */ parsetree = copyObject(parsetree); MemoryContextSwitchTo(oldContext); ddlJobs = PlanIndexStmt((IndexStmt *) parsetree, queryString); } if (IsA(parsetree, DropStmt)) { DropStmt *dropStatement = (DropStmt *) parsetree; if (dropStatement->removeType == OBJECT_INDEX) { ddlJobs = PlanDropIndexStmt(dropStatement, queryString); } } if (IsA(parsetree, AlterTableStmt)) { AlterTableStmt *alterTableStmt = (AlterTableStmt *) parsetree; if (alterTableStmt->relkind == OBJECT_TABLE) { ddlJobs = PlanAlterTableStmt(alterTableStmt, queryString); } } /* * ALTER TABLE ... RENAME statements have their node type as RenameStmt and * not AlterTableStmt. So, we intercept RenameStmt to tackle these commands. */ if (IsA(parsetree, RenameStmt)) { ddlJobs = PlanRenameStmt((RenameStmt *) parsetree, queryString); } /* * ALTER ... SET SCHEMA statements have their node type as AlterObjectSchemaStmt. * So, we intercept AlterObjectSchemaStmt to tackle these commands. */ if (IsA(parsetree, AlterObjectSchemaStmt)) { AlterObjectSchemaStmt *setSchemaStmt = (AlterObjectSchemaStmt *) parsetree; ddlJobs = PlanAlterObjectSchemaStmt(setSchemaStmt, queryString); } /* * ALTER TABLE ALL IN TABLESPACE statements have their node type as * AlterTableMoveAllStmt. At the moment we do not support this functionality in * the distributed environment. We warn out here. */ if (IsA(parsetree, AlterTableMoveAllStmt)) { ereport(WARNING, (errmsg("not propagating ALTER TABLE ALL IN TABLESPACE " "commands to worker nodes"), errhint("Connect to worker nodes directly to manually " "move all tables."))); } } else { /* * citus.enable_ddl_propagation is disabled, which means that PostgreSQL * should handle the DDL command on a distributed table directly, without * Citus intervening. The only exception is partition column drop, in * which case we error out. Advanced Citus users use this to implement their * own DDL propagation. We also use it to avoid re-propagating DDL commands * when changing MX tables on workers. Below, we also make sure that DDL * commands don't run queries that might get intercepted by Citus and error * out, specifically we skip validation in foreign keys. */ if (IsA(parsetree, AlterTableStmt)) { AlterTableStmt *alterTableStmt = (AlterTableStmt *) parsetree; if (alterTableStmt->relkind == OBJECT_TABLE) { ErrorIfAlterDropsPartitionColumn(alterTableStmt); /* * When issuing an ALTER TABLE ... ADD FOREIGN KEY command, the * the validation step should be skipped on the distributed table. * Therefore, we check whether the given ALTER TABLE statement is a * FOREIGN KEY constraint and if so disable the validation step. * Note that validation is done on the shard level when DDL * propagation is enabled. Unlike the preceeding Plan* calls, the * following eagerly executes some tasks on workers. */ parsetree = WorkerProcessAlterTableStmt(alterTableStmt, queryString); } } } /* inform the user about potential caveats */ if (IsA(parsetree, CreatedbStmt)) { ereport(NOTICE, (errmsg("Citus partially supports CREATE DATABASE for " "distributed databases"), errdetail("Citus does not propagate CREATE DATABASE " "command to workers"), errhint("You can manually create a database and its " "extensions on workers."))); } else if (IsA(parsetree, CreateRoleStmt)) { ereport(NOTICE, (errmsg("not propagating CREATE ROLE/USER commands to worker" " nodes"), errhint("Connect to worker nodes directly to manually create all" " necessary users and roles."))); } /* * Make sure that on DROP DATABASE we terminate the background deamon * associated with it. */ if (IsA(parsetree, DropdbStmt)) { DropdbStmt *dropDbStatement = (DropdbStmt *) parsetree; char *dbname = dropDbStatement->dbname; Oid databaseOid = get_database_oid(dbname, false); StopMaintenanceDaemon(databaseOid); } /* set user if needed and go ahead and run local utility using standard hook */ if (commandMustRunAsOwner) { GetUserIdAndSecContext(&savedUserId, &savedSecurityContext); SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE); } #if (PG_VERSION_NUM >= 100000) pstmt->utilityStmt = parsetree; standard_ProcessUtility(pstmt, queryString, context, params, queryEnv, dest, completionTag); #else standard_ProcessUtility(parsetree, queryString, context, params, dest, completionTag); #endif /* * We only process CREATE TABLE ... PARTITION OF commands in the function below * to handle the case when user creates a table as a partition of distributed table. */ if (IsA(parsetree, CreateStmt)) { CreateStmt *createStatement = (CreateStmt *) parsetree; ProcessCreateTableStmtPartitionOf(createStatement); } /* * We only process ALTER TABLE ... ATTACH PARTITION commands in the function below * and distribute the partition if necessary. */ if (IsA(parsetree, AlterTableStmt)) { AlterTableStmt *alterTableStatement = (AlterTableStmt *) parsetree; ProcessAlterTableStmtAttachPartition(alterTableStatement); } /* don't run post-process code for local commands */ if (ddlJobs != NIL) { PostProcessUtility(parsetree); } if (commandMustRunAsOwner) { SetUserIdAndSecContext(savedUserId, savedSecurityContext); } /* after local command has completed, finish by executing worker DDLJobs, if any */ if (ddlJobs != NIL) { ListCell *ddlJobCell = NULL; /* * At this point, ALTER TABLE command has already run on the master, so we * are checking constraints over the table with constraints already defined * (to make the constraint check process same for ALTER TABLE and CREATE * TABLE). If constraints do not fulfill the rules we defined, they will * be removed and the table will return back to the state before the ALTER * TABLE command. */ if (IsA(parsetree, AlterTableStmt)) { AlterTableStmt *alterTableStatement = (AlterTableStmt *) parsetree; List *commandList = alterTableStatement->cmds; ListCell *commandCell = NULL; foreach(commandCell, commandList) { AlterTableCmd *command = (AlterTableCmd *) lfirst(commandCell); AlterTableType alterTableType = command->subtype; if (alterTableType == AT_AddConstraint) { Assert(list_length(commandList) == 1); ErrorIfUnsupportedAlterAddConstraintStmt(alterTableStatement); } } } foreach(ddlJobCell, ddlJobs) { DDLJob *ddlJob = (DDLJob *) lfirst(ddlJobCell); ExecuteDistributedDDLJob(ddlJob); } } /* TODO: fold VACUUM's processing into the above block */ if (IsA(parsetree, VacuumStmt)) { VacuumStmt *vacuumStmt = (VacuumStmt *) parsetree; ProcessVacuumStmt(vacuumStmt, queryString); } /* * Ensure value is valid, we can't do some checks during CREATE * EXTENSION. This is important to register some invalidation callbacks. */ CitusHasBeenLoaded(); } /* * IsCitusExtensionStmt returns whether a given utility is a CREATE or ALTER * EXTENSION statement which references the citus extension. This function * returns false for all other inputs. */ static bool IsCitusExtensionStmt(Node *parsetree) { char *extensionName = ""; if (IsA(parsetree, CreateExtensionStmt)) { extensionName = ((CreateExtensionStmt *) parsetree)->extname; } else if (IsA(parsetree, AlterExtensionStmt)) { extensionName = ((AlterExtensionStmt *) parsetree)->extname; } return (strcmp(extensionName, "citus") == 0); } /* Is the passed in statement a transmit statement? */ static bool IsTransmitStmt(Node *parsetree) { if (IsA(parsetree, CopyStmt)) { CopyStmt *copyStatement = (CopyStmt *) parsetree; ListCell *optionCell = NULL; /* Extract options from the statement node tree */ foreach(optionCell, copyStatement->options) { DefElem *defel = (DefElem *) lfirst(optionCell); if (strncmp(defel->defname, "format", NAMEDATALEN) == 0 && strncmp(defGetString(defel), "transmit", NAMEDATALEN) == 0) { return true; } } } return false; } /* * VerifyTransmitStmt checks that the passed in command is a valid transmit * statement. Raise ERROR if not. * * Note that only 'toplevel' options in the CopyStmt struct are checked, and * that verification of the target files existance is not done here. */ static void VerifyTransmitStmt(CopyStmt *copyStatement) { char *fileName = NULL; EnsureSuperUser(); /* do some minimal option verification */ if (copyStatement->relation == NULL || copyStatement->relation->relname == NULL) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("FORMAT 'transmit' requires a target file"))); } fileName = copyStatement->relation->relname; if (is_absolute_path(fileName)) { ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("absolute path not allowed")))); } else if (!path_is_relative_and_below_cwd(fileName)) { ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("path must be in or below the current directory")))); } if (copyStatement->filename != NULL) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("FORMAT 'transmit' only accepts STDIN/STDOUT" " as input/output"))); } if (copyStatement->query != NULL || copyStatement->attlist != NULL || copyStatement->is_program) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("FORMAT 'transmit' does not accept query, attribute list" " or PROGRAM parameters "))); } } /* * ProcessCopyStmt handles Citus specific concerns for COPY like supporting * COPYing from distributed tables and preventing unsupported actions. The * function returns a modified COPY statement to be executed, or NULL if no * further processing is needed. * * commandMustRunAsOwner is an output parameter used to communicate to the caller whether * the copy statement should be executed using elevated privileges. If * ProcessCopyStmt that is required, a call to CheckCopyPermissions will take * care of verifying the current user's permissions before ProcessCopyStmt * returns. */ static Node * ProcessCopyStmt(CopyStmt *copyStatement, char *completionTag, bool *commandMustRunAsOwner) { *commandMustRunAsOwner = false; /* make sure variable is initialized */ /* * We check whether a distributed relation is affected. For that, we need to open the * relation. To prevent race conditions with later lookups, lock the table, and modify * the rangevar to include the schema. */ if (copyStatement->relation != NULL) { bool isDistributedRelation = false; bool isCopyFromWorker = IsCopyFromWorker(copyStatement); if (isCopyFromWorker) { RangeVar *relation = copyStatement->relation; NodeAddress *masterNodeAddress = MasterNodeAddress(copyStatement); char *nodeName = masterNodeAddress->nodeName; int32 nodePort = masterNodeAddress->nodePort; CreateLocalTable(relation, nodeName, nodePort); /* * We expect copy from worker to be on a distributed table; otherwise, * it fails in CitusCopyFrom() while checking the partition method. */ isDistributedRelation = true; } else { bool isFrom = copyStatement->is_from; Relation copiedRelation = NULL; char *schemaName = NULL; MemoryContext relationContext = NULL; /* consider using RangeVarGetRelidExtended to check perms before locking */ copiedRelation = heap_openrv(copyStatement->relation, isFrom ? RowExclusiveLock : AccessShareLock); isDistributedRelation = IsDistributedTable(RelationGetRelid(copiedRelation)); /* ensure future lookups hit the same relation */ schemaName = get_namespace_name(RelationGetNamespace(copiedRelation)); /* ensure we copy string into proper context */ relationContext = GetMemoryChunkContext(copyStatement->relation); schemaName = MemoryContextStrdup(relationContext, schemaName); copyStatement->relation->schemaname = schemaName; heap_close(copiedRelation, NoLock); } if (isDistributedRelation) { if (copyStatement->is_from) { /* check permissions, we're bypassing postgres' normal checks */ if (!isCopyFromWorker) { CheckCopyPermissions(copyStatement); } CitusCopyFrom(copyStatement, completionTag); return NULL; } else if (!copyStatement->is_from) { /* * The copy code only handles SELECTs in COPY ... TO on master tables, * as that can be done non-invasively. To handle COPY master_rel TO * the copy statement is replaced by a generated select statement. */ ColumnRef *allColumns = makeNode(ColumnRef); SelectStmt *selectStmt = makeNode(SelectStmt); ResTarget *selectTarget = makeNode(ResTarget); allColumns->fields = list_make1(makeNode(A_Star)); allColumns->location = -1; selectTarget->name = NULL; selectTarget->indirection = NIL; selectTarget->val = (Node *) allColumns; selectTarget->location = -1; selectStmt->targetList = list_make1(selectTarget); selectStmt->fromClause = list_make1(copyObject(copyStatement->relation)); /* replace original statement */ copyStatement = copyObject(copyStatement); copyStatement->relation = NULL; copyStatement->query = (Node *) selectStmt; } } } if (copyStatement->filename != NULL && !copyStatement->is_program) { const char *filename = copyStatement->filename; if (CacheDirectoryElement(filename)) { /* * Only superusers are allowed to copy from a file, so we have to * become superuser to execute copies to/from files used by citus' * query execution. * * XXX: This is a decidedly suboptimal solution, as that means * that triggers, input functions, etc. run with elevated * privileges. But this is better than not being able to run * queries as normal user. */ *commandMustRunAsOwner = true; /* * Have to manually check permissions here as the COPY is will be * run as a superuser. */ if (copyStatement->relation != NULL) { CheckCopyPermissions(copyStatement); } /* * Check if we have a "COPY (query) TO filename". If we do, copy * doesn't accept relative file paths. However, SQL tasks that get * assigned to worker nodes have relative paths. We therefore * convert relative paths to absolute ones here. */ if (copyStatement->relation == NULL && !copyStatement->is_from && !is_absolute_path(filename)) { copyStatement->filename = make_absolute_path(filename); } } } return (Node *) copyStatement; } /* * ProcessCreateTableStmtPartitionOf takes CreateStmt object as a parameter but * it only processes CREATE TABLE ... PARTITION OF statements and it checks if * user creates the table as a partition of a distributed table. In that case, * it distributes partition as well. Since the table itself is a partition, * CreateDistributedTable will attach it to its parent table automatically after * distributing it. * * This function does nothing if PostgreSQL's version is less then 10 and given * CreateStmt is not a CREATE TABLE ... PARTITION OF command. */ static void ProcessCreateTableStmtPartitionOf(CreateStmt *createStatement) { #if (PG_VERSION_NUM >= 100000) if (createStatement->inhRelations != NIL && createStatement->partbound != NULL) { RangeVar *parentRelation = linitial(createStatement->inhRelations); bool parentMissingOk = false; Oid parentRelationId = RangeVarGetRelid(parentRelation, NoLock, parentMissingOk); /* a partition can only inherit from single parent table */ Assert(list_length(createStatement->inhRelations) == 1); Assert(parentRelationId != InvalidOid); /* * If a partition is being created and if its parent is a distributed * table, we will distribute this table as well. */ if (IsDistributedTable(parentRelationId)) { bool missingOk = false; Oid relationId = RangeVarGetRelid(createStatement->relation, NoLock, missingOk); Var *parentDistributionColumn = DistPartitionKey(parentRelationId); char parentDistributionMethod = DISTRIBUTE_BY_HASH; char *parentRelationName = get_rel_name(parentRelationId); bool viaDeprecatedAPI = false; CreateDistributedTable(relationId, parentDistributionColumn, parentDistributionMethod, parentRelationName, viaDeprecatedAPI); } } #endif } /* * ProcessAlterTableStmtAttachPartition takes AlterTableStmt object as parameter * but it only processes into ALTER TABLE ... ATTACH PARTITION commands and * distributes the partition if necessary. There are four cases to consider; * * Parent is not distributed, partition is not distributed: We do not need to * do anything in this case. * * Parent is not distributed, partition is distributed: This can happen if * user first distributes a table and tries to attach it to a non-distributed * table. Non-distributed tables cannot have distributed partitions, thus we * simply error out in this case. * * Parent is distributed, partition is not distributed: We should distribute * the table and attach it to its parent in workers. CreateDistributedTable * perform both of these operations. Thus, we will not propagate ALTER TABLE * ... ATTACH PARTITION command to workers. * * Parent is distributed, partition is distributed: Partition is already * distributed, we only need to attach it to its parent in workers. Attaching * operation will be performed via propagating this ALTER TABLE ... ATTACH * PARTITION command to workers. * * This function does nothing if PostgreSQL's version is less then 10 and given * CreateStmt is not a ALTER TABLE ... ATTACH PARTITION OF command. */ static void ProcessAlterTableStmtAttachPartition(AlterTableStmt *alterTableStatement) { #if (PG_VERSION_NUM >= 100000) List *commandList = alterTableStatement->cmds; ListCell *commandCell = NULL; foreach(commandCell, commandList) { AlterTableCmd *alterTableCommand = (AlterTableCmd *) lfirst(commandCell); if (alterTableCommand->subtype == AT_AttachPartition) { Oid relationId = AlterTableLookupRelation(alterTableStatement, NoLock); PartitionCmd *partitionCommand = (PartitionCmd *) alterTableCommand->def; bool partitionMissingOk = false; Oid partitionRelationId = RangeVarGetRelid(partitionCommand->name, NoLock, partitionMissingOk); /* * If user first distributes the table then tries to attach it to non * distributed table, we error out. */ if (!IsDistributedTable(relationId) && IsDistributedTable(partitionRelationId)) { char *parentRelationName = get_rel_name(partitionRelationId); ereport(ERROR, (errmsg("non-distributed tables cannot have " "distributed partitions"), errhint("Distribute the partitioned table \"%s\" " "instead", parentRelationName))); } /* if parent of this table is distributed, distribute this table too */ if (IsDistributedTable(relationId) && !IsDistributedTable(partitionRelationId)) { Var *distributionColumn = DistPartitionKey(relationId); char distributionMethod = DISTRIBUTE_BY_HASH; char *relationName = get_rel_name(relationId); bool viaDeprecatedAPI = false; CreateDistributedTable(partitionRelationId, distributionColumn, distributionMethod, relationName, viaDeprecatedAPI); } } } #endif } /* * PlanIndexStmt determines whether a given CREATE INDEX statement involves * a distributed table. If so (and if the statement does not use unsupported * options), it modifies the input statement to ensure proper execution against * the master node table and creates a DDLJob to encapsulate information needed * during the worker node portion of DDL execution before returning that DDLJob * in a List. If no distributed table is involved, this function returns NIL. */ static List * PlanIndexStmt(IndexStmt *createIndexStatement, const char *createIndexCommand) { List *ddlJobs = NIL; /* * We first check whether a distributed relation is affected. For that, we need to * open the relation. To prevent race conditions with later lookups, lock the table, * and modify the rangevar to include the schema. */ if (createIndexStatement->relation != NULL) { Relation relation = NULL; Oid relationId = InvalidOid; bool isDistributedRelation = false; char *namespaceName = NULL; LOCKMODE lockmode = ShareLock; MemoryContext relationContext = NULL; /* * We don't support concurrently creating indexes for distributed * tables, but till this point, we don't know if it is a regular or a * distributed table. */ if (createIndexStatement->concurrent) { lockmode = ShareUpdateExclusiveLock; } /* * XXX: Consider using RangeVarGetRelidExtended with a permission * checking callback. Right now we'll acquire the lock before having * checked permissions, and will only fail when executing the actual * index statements. */ relation = heap_openrv(createIndexStatement->relation, lockmode); relationId = RelationGetRelid(relation); isDistributedRelation = IsDistributedTable(relationId); /* * Before we do any further processing, fix the schema name to make sure * that a (distributed) table with the same name does not appear on the * search path in front of the current schema. We do this even if the * table is not distributed, since a distributed table may appear on the * search path by the time postgres starts processing this statement. */ namespaceName = get_namespace_name(RelationGetNamespace(relation)); /* ensure we copy string into proper context */ relationContext = GetMemoryChunkContext(createIndexStatement->relation); namespaceName = MemoryContextStrdup(relationContext, namespaceName); createIndexStatement->relation->schemaname = namespaceName; heap_close(relation, NoLock); if (isDistributedRelation) { Oid namespaceId = InvalidOid; Oid indexRelationId = InvalidOid; char *indexName = createIndexStatement->idxname; ErrorIfUnsupportedIndexStmt(createIndexStatement); namespaceId = get_namespace_oid(namespaceName, false); indexRelationId = get_relname_relid(indexName, namespaceId); /* if index does not exist, send the command to workers */ if (!OidIsValid(indexRelationId)) { DDLJob *ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetRelationId = relationId; ddlJob->concurrentIndexCmd = createIndexStatement->concurrent; ddlJob->commandString = createIndexCommand; ddlJob->taskList = CreateIndexTaskList(relationId, createIndexStatement); ddlJobs = list_make1(ddlJob); } } } return ddlJobs; } /* * PlanDropIndexStmt determines whether a given DROP INDEX statement involves * a distributed table. If so (and if the statement does not use unsupported * options), it modifies the input statement to ensure proper execution against * the master node table and creates a DDLJob to encapsulate information needed * during the worker node portion of DDL execution before returning that DDLJob * in a List. If no distributed table is involved, this function returns NIL. */ static List * PlanDropIndexStmt(DropStmt *dropIndexStatement, const char *dropIndexCommand) { List *ddlJobs = NIL; ListCell *dropObjectCell = NULL; Oid distributedIndexId = InvalidOid; Oid distributedRelationId = InvalidOid; Assert(dropIndexStatement->removeType == OBJECT_INDEX); /* check if any of the indexes being dropped belong to a distributed table */ foreach(dropObjectCell, dropIndexStatement->objects) { Oid indexId = InvalidOid; Oid relationId = InvalidOid; bool isDistributedRelation = false; struct DropRelationCallbackState state; bool missingOK = true; bool noWait = false; LOCKMODE lockmode = AccessExclusiveLock; List *objectNameList = (List *) lfirst(dropObjectCell); RangeVar *rangeVar = makeRangeVarFromNameList(objectNameList); /* * We don't support concurrently dropping indexes for distributed * tables, but till this point, we don't know if it is a regular or a * distributed table. */ if (dropIndexStatement->concurrent) { lockmode = ShareUpdateExclusiveLock; } /* * The next few statements are based on RemoveRelations() in * commands/tablecmds.c in Postgres source. */ AcceptInvalidationMessages(); state.relkind = RELKIND_INDEX; state.heapOid = InvalidOid; state.concurrent = dropIndexStatement->concurrent; indexId = RangeVarGetRelidExtended(rangeVar, lockmode, missingOK, noWait, RangeVarCallbackForDropIndex, (void *) &state); /* * If the index does not exist, we don't do anything here, and allow * postgres to throw appropriate error or notice message later. */ if (!OidIsValid(indexId)) { continue; } relationId = IndexGetRelation(indexId, false); isDistributedRelation = IsDistributedTable(relationId); if (isDistributedRelation) { distributedIndexId = indexId; distributedRelationId = relationId; break; } } if (OidIsValid(distributedIndexId)) { DDLJob *ddlJob = palloc0(sizeof(DDLJob)); ErrorIfUnsupportedDropIndexStmt(dropIndexStatement); ddlJob->targetRelationId = distributedRelationId; ddlJob->concurrentIndexCmd = dropIndexStatement->concurrent; ddlJob->commandString = dropIndexCommand; ddlJob->taskList = DropIndexTaskList(distributedRelationId, distributedIndexId, dropIndexStatement); ddlJobs = list_make1(ddlJob); } return ddlJobs; } /* * PlanAlterTableStmt determines whether a given ALTER TABLE statement involves * a distributed table. If so (and if the statement does not use unsupported * options), it modifies the input statement to ensure proper execution against * the master node table and creates a DDLJob to encapsulate information needed * during the worker node portion of DDL execution before returning that DDLJob * in a List. If no distributed table is involved, this function returns NIL. */ static List * PlanAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCommand) { List *ddlJobs = NIL; DDLJob *ddlJob = NULL; LOCKMODE lockmode = 0; Oid leftRelationId = InvalidOid; Oid rightRelationId = InvalidOid; bool isDistributedRelation = false; List *commandList = NIL; ListCell *commandCell = NULL; /* first check whether a distributed relation is affected */ if (alterTableStatement->relation == NULL) { return NIL; } lockmode = AlterTableGetLockLevel(alterTableStatement->cmds); leftRelationId = AlterTableLookupRelation(alterTableStatement, lockmode); if (!OidIsValid(leftRelationId)) { return NIL; } isDistributedRelation = IsDistributedTable(leftRelationId); if (!isDistributedRelation) { return NIL; } ErrorIfUnsupportedAlterTableStmt(alterTableStatement); /* * We check if there is a ADD FOREIGN CONSTRAINT command in sub commands list. * If there is we assign referenced releation id to rightRelationId and we also * set skip_validation to true to prevent PostgreSQL to verify validity of the * foreign constraint in master. Validity will be checked in workers anyway. */ commandList = alterTableStatement->cmds; foreach(commandCell, commandList) { AlterTableCmd *command = (AlterTableCmd *) lfirst(commandCell); AlterTableType alterTableType = command->subtype; if (alterTableType == AT_AddConstraint) { Constraint *constraint = (Constraint *) command->def; if (constraint->contype == CONSTR_FOREIGN) { /* * We only support ALTER TABLE ADD CONSTRAINT ... FOREIGN KEY, if it is * only subcommand of ALTER TABLE. It was already checked in * ErrorIfUnsupportedAlterTableStmt. */ Assert(list_length(commandList) <= 1); rightRelationId = RangeVarGetRelid(constraint->pktable, lockmode, alterTableStatement->missing_ok); /* * Foreign constraint validations will be done in workers. If we do not * set this flag, PostgreSQL tries to do additional checking when we drop * to standard_ProcessUtility. standard_ProcessUtility tries to open new * connections to workers to verify foreign constraints while original * transaction is in process, which causes deadlock. */ constraint->skip_validation = true; } } #if (PG_VERSION_NUM >= 100000) else if (alterTableType == AT_AttachPartition) { PartitionCmd *partitionCommand = (PartitionCmd *) command->def; /* * We only support ALTER TABLE ATTACH PARTITION, if it is only subcommand of * ALTER TABLE. It was already checked in ErrorIfUnsupportedAlterTableStmt. */ Assert(list_length(commandList) <= 1); rightRelationId = RangeVarGetRelid(partitionCommand->name, NoLock, false); /* * Do not generate tasks if relation is distributed and the partition * is not distributed. Because, we'll manually convert the partition into * distributed table and co-locate with its parent. */ if (!IsDistributedTable(rightRelationId)) { return NIL; } } else if (alterTableType == AT_DetachPartition) { PartitionCmd *partitionCommand = (PartitionCmd *) command->def; /* * We only support ALTER TABLE DETACH PARTITION, if it is only subcommand of * ALTER TABLE. It was already checked in ErrorIfUnsupportedAlterTableStmt. */ Assert(list_length(commandList) <= 1); rightRelationId = RangeVarGetRelid(partitionCommand->name, NoLock, false); } #endif } ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetRelationId = leftRelationId; ddlJob->concurrentIndexCmd = false; ddlJob->commandString = alterTableCommand; if (rightRelationId) { /* if foreign key related, use specialized task list function ... */ ddlJob->taskList = InterShardDDLTaskList(leftRelationId, rightRelationId, alterTableCommand); } else { /* ... otherwise use standard DDL task list function */ ddlJob->taskList = DDLTaskList(leftRelationId, alterTableCommand); } ddlJobs = list_make1(ddlJob); return ddlJobs; } /* * PlanRenameStmt first determines whether a given rename statement involves * a distributed table. If so (and if it is supported, i.e. renames a column), * it creates a DDLJob to encapsulate information needed during the worker node * portion of DDL execution before returning that DDLJob in a List. If no dis- * tributed table is involved, this function returns NIL. */ static List * PlanRenameStmt(RenameStmt *renameStmt, const char *renameCommand) { Oid relationId = InvalidOid; bool isDistributedRelation = false; DDLJob *ddlJob = NULL; if (!IsAlterTableRenameStmt(renameStmt)) { return NIL; } /* * The lock levels here should be same as the ones taken in * RenameRelation(), renameatt() and RenameConstraint(). However, since all * three statements have identical lock levels, we just use a single statement. */ relationId = RangeVarGetRelid(renameStmt->relation, AccessExclusiveLock, renameStmt->missing_ok); /* * If the table does not exist, don't do anything here to allow PostgreSQL * to throw the appropriate error or notice message later. */ if (!OidIsValid(relationId)) { return NIL; } /* we have no planning to do unless the table is distributed */ isDistributedRelation = IsDistributedTable(relationId); if (!isDistributedRelation) { return NIL; } ErrorIfUnsupportedRenameStmt(renameStmt); ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetRelationId = relationId; ddlJob->concurrentIndexCmd = false; ddlJob->commandString = renameCommand; ddlJob->taskList = DDLTaskList(relationId, renameCommand); return list_make1(ddlJob); } /* * WorkerProcessAlterTableStmt checks and processes the alter table statement to be * worked on the distributed table of the worker node. Currently, it only processes * ALTER TABLE ... ADD FOREIGN KEY command to skip the validation step. */ static Node * WorkerProcessAlterTableStmt(AlterTableStmt *alterTableStatement, const char *alterTableCommand) { LOCKMODE lockmode = 0; Oid leftRelationId = InvalidOid; bool isDistributedRelation = false; List *commandList = NIL; ListCell *commandCell = NULL; /* first check whether a distributed relation is affected */ if (alterTableStatement->relation == NULL) { return (Node *) alterTableStatement; } lockmode = AlterTableGetLockLevel(alterTableStatement->cmds); leftRelationId = AlterTableLookupRelation(alterTableStatement, lockmode); if (!OidIsValid(leftRelationId)) { return (Node *) alterTableStatement; } isDistributedRelation = IsDistributedTable(leftRelationId); if (!isDistributedRelation) { return (Node *) alterTableStatement; } /* * We check if there is a ADD FOREIGN CONSTRAINT command in sub commands list. * If there is we assign referenced releation id to rightRelationId and we also * set skip_validation to true to prevent PostgreSQL to verify validity of the * foreign constraint in master. Validity will be checked in workers anyway. */ commandList = alterTableStatement->cmds; foreach(commandCell, commandList) { AlterTableCmd *command = (AlterTableCmd *) lfirst(commandCell); AlterTableType alterTableType = command->subtype; if (alterTableType == AT_AddConstraint) { Constraint *constraint = (Constraint *) command->def; if (constraint->contype == CONSTR_FOREIGN) { /* foreign constraint validations will be done in shards. */ constraint->skip_validation = true; } } } return (Node *) alterTableStatement; } /* * PlanAlterObjectSchemaStmt determines whether a given ALTER ... SET SCHEMA * statement involves a distributed table and issues a warning if so. Because * we do not support distributed ALTER ... SET SCHEMA, this function always * returns NIL. */ static List * PlanAlterObjectSchemaStmt(AlterObjectSchemaStmt *alterObjectSchemaStmt, const char *alterObjectSchemaCommand) { Oid relationId = InvalidOid; bool noWait = false; if (alterObjectSchemaStmt->relation == NULL) { return NIL; } relationId = RangeVarGetRelidExtended(alterObjectSchemaStmt->relation, AccessExclusiveLock, alterObjectSchemaStmt->missing_ok, noWait, NULL, NULL); /* first check whether a distributed relation is affected */ if (!OidIsValid(relationId) || !IsDistributedTable(relationId)) { return NIL; } /* emit a warning if a distributed relation is affected */ ereport(WARNING, (errmsg("not propagating ALTER ... SET SCHEMA commands to " "worker nodes"), errhint("Connect to worker nodes directly to manually " "change schemas of affected objects."))); return NIL; } /* * ProcessVacuumStmt processes vacuum statements that may need propagation to * distributed tables. If a VACUUM or ANALYZE command references a distributed * table, it is propagated to all involved nodes; otherwise, this function will * immediately exit after some error checking. * * Unlike most other Process functions within this file, this function does not * return a modified parse node, as it is expected that the local VACUUM or * ANALYZE has already been processed. */ static void ProcessVacuumStmt(VacuumStmt *vacuumStmt, const char *vacuumCommand) { Oid relationId = InvalidOid; List *taskList = NIL; bool supportedVacuumStmt = false; if (vacuumStmt->relation != NULL) { LOCKMODE lockMode = (vacuumStmt->options & VACOPT_FULL) ? AccessExclusiveLock : ShareUpdateExclusiveLock; relationId = RangeVarGetRelid(vacuumStmt->relation, lockMode, false); if (relationId == InvalidOid) { return; } } supportedVacuumStmt = IsSupportedDistributedVacuumStmt(relationId, vacuumStmt); if (!supportedVacuumStmt) { return; } taskList = VacuumTaskList(relationId, vacuumStmt); /* save old commit protocol to restore at xact end */ Assert(SavedMultiShardCommitProtocol == COMMIT_PROTOCOL_BARE); SavedMultiShardCommitProtocol = MultiShardCommitProtocol; MultiShardCommitProtocol = COMMIT_PROTOCOL_BARE; ExecuteModifyTasksWithoutResults(taskList); } /* * IsSupportedDistributedVacuumStmt returns whether distributed execution of a * given VacuumStmt is supported. The provided relationId (if valid) represents * the table targeted by the provided statement. * * Returns true if the statement requires distributed execution and returns * false otherwise; however, this function will raise errors if the provided * statement needs distributed execution but contains unsupported options. */ static bool IsSupportedDistributedVacuumStmt(Oid relationId, VacuumStmt *vacuumStmt) { const char *stmtName = (vacuumStmt->options & VACOPT_VACUUM) ? "VACUUM" : "ANALYZE"; if (vacuumStmt->relation == NULL) { /* WARN and exit early for unqualified VACUUM commands */ ereport(WARNING, (errmsg("not propagating %s command to worker nodes", stmtName), errhint("Provide a specific table in order to %s " "distributed tables.", stmtName))); return false; } if (!OidIsValid(relationId) || !IsDistributedTable(relationId)) { return false; } if (!EnableDDLPropagation) { /* WARN and exit early if DDL propagation is not enabled */ ereport(WARNING, (errmsg("not propagating %s command to worker nodes", stmtName), errhint("Set citus.enable_ddl_propagation to true in order to " "send targeted %s commands to worker nodes.", stmtName))); } if (vacuumStmt->options & VACOPT_VERBOSE) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("the VERBOSE option is currently unsupported in " "distributed %s commands", stmtName))); } return true; } /* * VacuumTaskList returns a list of tasks to be executed as part of processing * a VacuumStmt which targets a distributed relation. */ static List * VacuumTaskList(Oid relationId, VacuumStmt *vacuumStmt) { List *taskList = NIL; List *shardIntervalList = NIL; ListCell *shardIntervalCell = NULL; uint64 jobId = INVALID_JOB_ID; int taskId = 1; StringInfo vacuumString = DeparseVacuumStmtPrefix(vacuumStmt); const char *columnNames = DeparseVacuumColumnNames(vacuumStmt->va_cols); const int vacuumPrefixLen = vacuumString->len; Oid schemaId = get_rel_namespace(relationId); char *schemaName = get_namespace_name(schemaId); char *tableName = get_rel_name(relationId); /* * We obtain ShareUpdateExclusiveLock here to not conflict with INSERT's * RowExclusiveLock. However if VACUUM FULL is used, we already obtain * AccessExclusiveLock before reaching to that point and INSERT's will be * blocked anyway. This is inline with PostgreSQL's own behaviour. */ LockRelationOid(relationId, ShareUpdateExclusiveLock); shardIntervalList = LoadShardIntervalList(relationId); /* grab shard lock before getting placement list */ LockShardListMetadata(shardIntervalList, ShareLock); foreach(shardIntervalCell, shardIntervalList) { ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); uint64 shardId = shardInterval->shardId; Task *task = NULL; char *shardName = pstrdup(tableName); AppendShardIdToName(&shardName, shardInterval->shardId); shardName = quote_qualified_identifier(schemaName, shardName); vacuumString->len = vacuumPrefixLen; appendStringInfoString(vacuumString, shardName); appendStringInfoString(vacuumString, columnNames); task = CitusMakeNode(Task); task->jobId = jobId; task->taskId = taskId++; task->taskType = DDL_TASK; task->queryString = pstrdup(vacuumString->data); task->dependedTaskList = NULL; task->replicationModel = REPLICATION_MODEL_INVALID; task->anchorShardId = shardId; task->taskPlacementList = FinalizedShardPlacementList(shardId); taskList = lappend(taskList, task); } return taskList; } /* * DeparseVacuumStmtPrefix returns a StringInfo appropriate for use as a prefix * during distributed execution of a VACUUM or ANALYZE statement. Callers may * reuse this prefix within a loop to generate shard-specific VACUUM or ANALYZE * statements. */ static StringInfo DeparseVacuumStmtPrefix(VacuumStmt *vacuumStmt) { StringInfo vacuumPrefix = makeStringInfo(); int vacuumFlags = vacuumStmt->options; const int unsupportedFlags PG_USED_FOR_ASSERTS_ONLY = ~( VACOPT_ANALYZE | VACOPT_DISABLE_PAGE_SKIPPING | VACOPT_FREEZE | VACOPT_FULL ); /* determine actual command and block out its bit */ if (vacuumFlags & VACOPT_VACUUM) { appendStringInfoString(vacuumPrefix, "VACUUM "); vacuumFlags &= ~VACOPT_VACUUM; } else { appendStringInfoString(vacuumPrefix, "ANALYZE "); vacuumFlags &= ~VACOPT_ANALYZE; } /* unsupported flags should have already been rejected */ Assert((vacuumFlags & unsupportedFlags) == 0); /* if no flags remain, exit early */ if (vacuumFlags == 0) { return vacuumPrefix; } /* otherwise, handle options */ appendStringInfoChar(vacuumPrefix, '('); if (vacuumFlags & VACOPT_ANALYZE) { appendStringInfoString(vacuumPrefix, "ANALYZE,"); } if (vacuumFlags & VACOPT_DISABLE_PAGE_SKIPPING) { appendStringInfoString(vacuumPrefix, "DISABLE_PAGE_SKIPPING,"); } if (vacuumFlags & VACOPT_FREEZE) { appendStringInfoString(vacuumPrefix, "FREEZE,"); } if (vacuumFlags & VACOPT_FULL) { appendStringInfoString(vacuumPrefix, "FULL,"); } vacuumPrefix->data[vacuumPrefix->len - 1] = ')'; appendStringInfoChar(vacuumPrefix, ' '); return vacuumPrefix; } /* * DeparseVacuumColumnNames joins the list of strings using commas as a * delimiter. The whole thing is placed in parenthesis and set off with a * single space in order to facilitate appending it to the end of any VACUUM * or ANALYZE command which uses explicit column names. If the provided list * is empty, this function returns an empty string to keep the calling code * simplest. */ static char * DeparseVacuumColumnNames(List *columnNameList) { StringInfo columnNames = makeStringInfo(); ListCell *columnNameCell = NULL; if (columnNameList == NIL) { return columnNames->data; } appendStringInfoString(columnNames, " ("); foreach(columnNameCell, columnNameList) { char *columnName = strVal(lfirst(columnNameCell)); appendStringInfo(columnNames, "%s,", columnName); } columnNames->data[columnNames->len - 1] = ')'; return columnNames->data; } /* * ErrorIfUnstableCreateOrAlterExtensionStmt compares CITUS_EXTENSIONVERSION * and version given CREATE/ALTER EXTENSION statement will create/update to. If * they are not same in major or minor version numbers, this function errors * out. It ignores the schema version. */ static void ErrorIfUnstableCreateOrAlterExtensionStmt(Node *parsetree) { char *newExtensionVersion = ExtractNewExtensionVersion(parsetree); if (newExtensionVersion != NULL) { /* explicit version provided in CREATE or ALTER EXTENSION UPDATE; verify */ if (!MajorVersionsCompatible(newExtensionVersion, CITUS_EXTENSIONVERSION)) { ereport(ERROR, (errmsg("specified version incompatible with loaded " "Citus library"), errdetail("Loaded library requires %s, but %s was specified.", CITUS_MAJORVERSION, newExtensionVersion), errhint("If a newer library is present, restart the database " "and try the command again."))); } } else { /* * No version was specified, so PostgreSQL will use the default_version * from the citus.control file. */ CheckAvailableVersion(ERROR); } } /* * ExtractNewExtensionVersion returns the new extension version specified by * a CREATE or ALTER EXTENSION statement. Other inputs are not permitted. This * function returns NULL for statements with no explicit version specified. */ static char * ExtractNewExtensionVersion(Node *parsetree) { char *newVersion = NULL; List *optionsList = NIL; ListCell *optionsCell = NULL; if (IsA(parsetree, CreateExtensionStmt)) { optionsList = ((CreateExtensionStmt *) parsetree)->options; } else if (IsA(parsetree, AlterExtensionStmt)) { optionsList = ((AlterExtensionStmt *) parsetree)->options; } else { /* input must be one of the two above types */ Assert(false); } foreach(optionsCell, optionsList) { DefElem *defElement = (DefElem *) lfirst(optionsCell); if (strncmp(defElement->defname, "new_version", NAMEDATALEN) == 0) { newVersion = strVal(defElement->arg); break; } } return newVersion; } /* * ErrorIfUnsupportedIndexStmt checks if the corresponding index statement is * supported for distributed tables and errors out if it is not. */ static void ErrorIfUnsupportedIndexStmt(IndexStmt *createIndexStatement) { char *indexRelationName = createIndexStatement->idxname; if (indexRelationName == NULL) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("creating index without a name on a distributed table is " "currently unsupported"))); } if (createIndexStatement->tableSpace != NULL) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("specifying tablespaces with CREATE INDEX statements is " "currently unsupported"))); } if (createIndexStatement->unique) { RangeVar *relation = createIndexStatement->relation; bool missingOk = false; /* caller uses ShareLock for non-concurrent indexes, use the same lock here */ LOCKMODE lockMode = ShareLock; Oid relationId = RangeVarGetRelid(relation, lockMode, missingOk); Var *partitionKey = DistPartitionKey(relationId); char partitionMethod = PartitionMethod(relationId); List *indexParameterList = NIL; ListCell *indexParameterCell = NULL; bool indexContainsPartitionColumn = false; /* * Reference tables do not have partition key, and unique constraints * are allowed for them. Thus, we added a short-circuit for reference tables. */ if (partitionMethod == DISTRIBUTE_BY_NONE) { return; } if (partitionMethod == DISTRIBUTE_BY_APPEND) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("creating unique indexes on append-partitioned tables " "is currently unsupported"))); } indexParameterList = createIndexStatement->indexParams; foreach(indexParameterCell, indexParameterList) { IndexElem *indexElement = (IndexElem *) lfirst(indexParameterCell); char *columnName = indexElement->name; AttrNumber attributeNumber = InvalidAttrNumber; /* column name is null for index expressions, skip it */ if (columnName == NULL) { continue; } attributeNumber = get_attnum(relationId, columnName); if (attributeNumber == partitionKey->varattno) { indexContainsPartitionColumn = true; } } if (!indexContainsPartitionColumn) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("creating unique indexes on non-partition " "columns is currently unsupported"))); } } } /* * ErrorIfUnsupportedDropIndexStmt checks if the corresponding drop index statement is * supported for distributed tables and errors out if it is not. */ static void ErrorIfUnsupportedDropIndexStmt(DropStmt *dropIndexStatement) { Assert(dropIndexStatement->removeType == OBJECT_INDEX); if (list_length(dropIndexStatement->objects) > 1) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot drop multiple distributed objects in a " "single command"), errhint("Try dropping each object in a separate DROP " "command."))); } } /* * ErrorIfUnsupportedAlterTableStmt checks if the corresponding alter table statement * is supported for distributed tables and errors out if it is not. Currently, * only the following commands are supported. * * ALTER TABLE ADD|DROP COLUMN * ALTER TABLE ALTER COLUMN SET DATA TYPE * ALTER TABLE SET|DROP NOT NULL * ALTER TABLE SET|DROP DEFAULT * ALTER TABLE ADD|DROP CONSTRAINT */ static void ErrorIfUnsupportedAlterTableStmt(AlterTableStmt *alterTableStatement) { List *commandList = alterTableStatement->cmds; ListCell *commandCell = NULL; /* error out if any of the subcommands are unsupported */ foreach(commandCell, commandList) { AlterTableCmd *command = (AlterTableCmd *) lfirst(commandCell); AlterTableType alterTableType = command->subtype; switch (alterTableType) { case AT_AddColumn: { if (IsA(command->def, ColumnDef)) { ColumnDef *column = (ColumnDef *) command->def; /* * Check for SERIAL pseudo-types. The structure of this * check is copied from transformColumnDefinition. */ if (column->typeName && list_length(column->typeName->names) == 1 && !column->typeName->pct_type) { char *typeName = strVal(linitial(column->typeName->names)); if (strcmp(typeName, "smallserial") == 0 || strcmp(typeName, "serial2") == 0 || strcmp(typeName, "serial") == 0 || strcmp(typeName, "serial4") == 0 || strcmp(typeName, "bigserial") == 0 || strcmp(typeName, "serial8") == 0) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot execute ADD COLUMN commands " "involving serial pseudotypes"))); } } } break; } case AT_DropColumn: case AT_ColumnDefault: case AT_AlterColumnType: case AT_DropNotNull: { if (AlterInvolvesPartitionColumn(alterTableStatement, command)) { ereport(ERROR, (errmsg("cannot execute ALTER TABLE command " "involving partition column"))); } break; } case AT_AddConstraint: { Constraint *constraint = (Constraint *) command->def; /* we only allow constraints if they are only subcommand */ if (commandList->length > 1) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot execute ADD CONSTRAINT command with " "other subcommands"), errhint("You can issue each subcommand separately"))); } /* * We will use constraint name in each placement by extending it at * workers. Therefore we require it to be exist. */ if (constraint->conname == NULL) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot create constraint without a name on a " "distributed table"))); } break; } #if (PG_VERSION_NUM >= 100000) case AT_AttachPartition: { Oid relationId = AlterTableLookupRelation(alterTableStatement, NoLock); PartitionCmd *partitionCommand = (PartitionCmd *) command->def; bool missingOK = false; Oid partitionRelationId = RangeVarGetRelid(partitionCommand->name, NoLock, missingOK); /* we only allow partitioning commands if they are only subcommand */ if (commandList->length > 1) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot execute ATTACH PARTITION command " "with other subcommands"), errhint("You can issue each subcommand " "separately."))); } if (IsDistributedTable(partitionRelationId) && !TablesColocated(relationId, partitionRelationId)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("distributed tables cannot have " "non-colocated distributed tables as a " "partition "))); } break; } case AT_DetachPartition: { /* we only allow partitioning commands if they are only subcommand */ if (commandList->length > 1) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot execute DETACH PARTITION command " "with other subcommands"), errhint("You can issue each subcommand " "separately."))); } break; } #endif case AT_SetNotNull: case AT_DropConstraint: case AT_EnableTrigAll: case AT_DisableTrigAll: { /* * We will not perform any special check for ALTER TABLE DROP CONSTRAINT * , ALTER TABLE .. ALTER COLUMN .. SET NOT NULL and ALTER TABLE ENABLE/ * DISABLE TRIGGER ALL */ break; } default: { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("alter table command is currently unsupported"), errdetail("Only ADD|DROP COLUMN, SET|DROP NOT NULL, " "SET|DROP DEFAULT, ADD|DROP CONSTRAINT, " "ATTACH|DETACH PARTITION and TYPE subcommands " "are supported."))); } } } } /* * ErrorIfDropPartitionColumn checks if any subcommands of the given alter table * command is a DROP COLUMN command which drops the partition column of a distributed * table. If there is such a subcommand, this function errors out. */ static void ErrorIfAlterDropsPartitionColumn(AlterTableStmt *alterTableStatement) { LOCKMODE lockmode = 0; Oid leftRelationId = InvalidOid; bool isDistributedRelation = false; List *commandList = alterTableStatement->cmds; ListCell *commandCell = NULL; /* first check whether a distributed relation is affected */ if (alterTableStatement->relation == NULL) { return; } lockmode = AlterTableGetLockLevel(alterTableStatement->cmds); leftRelationId = AlterTableLookupRelation(alterTableStatement, lockmode); if (!OidIsValid(leftRelationId)) { return; } isDistributedRelation = IsDistributedTable(leftRelationId); if (!isDistributedRelation) { return; } /* then check if any of subcommands drop partition column.*/ foreach(commandCell, commandList) { AlterTableCmd *command = (AlterTableCmd *) lfirst(commandCell); AlterTableType alterTableType = command->subtype; if (alterTableType == AT_DropColumn) { if (AlterInvolvesPartitionColumn(alterTableStatement, command)) { ereport(ERROR, (errmsg("cannot execute ALTER TABLE command " "dropping partition column"))); } } } } /* * ErrorIfUnsopprtedAlterAddConstraintStmt runs the constraint checks on distributed * table using the same logic with create_distributed_table. */ static void ErrorIfUnsupportedAlterAddConstraintStmt(AlterTableStmt *alterTableStatement) { LOCKMODE lockmode = AlterTableGetLockLevel(alterTableStatement->cmds); Oid relationId = AlterTableLookupRelation(alterTableStatement, lockmode); char distributionMethod = PartitionMethod(relationId); Var *distributionColumn = DistPartitionKey(relationId); uint32 colocationId = TableColocationId(relationId); Relation relation = relation_open(relationId, ExclusiveLock); ErrorIfUnsupportedConstraint(relation, distributionMethod, distributionColumn, colocationId); relation_close(relation, NoLock); } /* * ErrorIfUnsupportedConstraint run checks related to unique index / exclude * constraints. * * The function skips the uniqeness checks for reference tables (i.e., distribution * method is 'none'). * * Forbid UNIQUE, PRIMARY KEY, or EXCLUDE constraints on append partitioned * tables, since currently there is no way of enforcing uniqueness for * overlapping shards. * * Similarly, do not allow such constraints if they do not include partition * column. This check is important for two reasons: * i. First, currently Citus does not enforce uniqueness constraint on multiple * shards. * ii. Second, INSERT INTO .. ON CONFLICT (i.e., UPSERT) queries can be executed * with no further check for constraints. */ void ErrorIfUnsupportedConstraint(Relation relation, char distributionMethod, Var *distributionColumn, uint32 colocationId) { char *relationName = NULL; List *indexOidList = NULL; ListCell *indexOidCell = NULL; /* * We first perform check for foreign constraints. It is important to do this check * before next check, because other types of constraints are allowed on reference * tables and we return early for those constraints thanks to next check. Therefore, * for reference tables, we first check for foreing constraints and if they are OK, * we do not error out for other types of constraints. */ ErrorIfUnsupportedForeignConstraint(relation, distributionMethod, distributionColumn, colocationId); /* * Citus supports any kind of uniqueness constraints for reference tables * given that they only consist of a single shard and we can simply rely on * Postgres. */ if (distributionMethod == DISTRIBUTE_BY_NONE) { return; } relationName = RelationGetRelationName(relation); indexOidList = RelationGetIndexList(relation); foreach(indexOidCell, indexOidList) { Oid indexOid = lfirst_oid(indexOidCell); Relation indexDesc = index_open(indexOid, RowExclusiveLock); IndexInfo *indexInfo = NULL; AttrNumber *attributeNumberArray = NULL; bool hasDistributionColumn = false; int attributeCount = 0; int attributeIndex = 0; /* extract index key information from the index's pg_index info */ indexInfo = BuildIndexInfo(indexDesc); /* only check unique indexes and exclusion constraints. */ if (indexInfo->ii_Unique == false && indexInfo->ii_ExclusionOps == NULL) { index_close(indexDesc, NoLock); continue; } /* * Citus cannot enforce uniqueness/exclusion constraints with overlapping shards. * Thus, emit a warning for unique indexes and exclusion constraints on * append partitioned tables. */ if (distributionMethod == DISTRIBUTE_BY_APPEND) { ereport(WARNING, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("table \"%s\" has a UNIQUE or EXCLUDE constraint", relationName), errdetail("UNIQUE constraints, EXCLUDE constraints, " "and PRIMARY KEYs on " "append-partitioned tables cannot be enforced."), errhint("Consider using hash partitioning."))); } attributeCount = indexInfo->ii_NumIndexAttrs; attributeNumberArray = indexInfo->ii_KeyAttrNumbers; for (attributeIndex = 0; attributeIndex < attributeCount; attributeIndex++) { AttrNumber attributeNumber = attributeNumberArray[attributeIndex]; bool uniqueConstraint = false; bool exclusionConstraintWithEquality = false; if (distributionColumn->varattno != attributeNumber) { continue; } uniqueConstraint = indexInfo->ii_Unique; exclusionConstraintWithEquality = (indexInfo->ii_ExclusionOps != NULL && OperatorImplementsEquality( indexInfo->ii_ExclusionOps[ attributeIndex])); if (uniqueConstraint || exclusionConstraintWithEquality) { hasDistributionColumn = true; break; } } if (!hasDistributionColumn) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot create constraint on \"%s\"", relationName), errdetail("Distributed relations cannot have UNIQUE, " "EXCLUDE, or PRIMARY KEY constraints that do not " "include the partition column (with an equality " "operator if EXCLUDE)."))); } index_close(indexDesc, NoLock); } } /* * ErrorIfUnsupportedForeignConstraint runs checks related to foreign constraints and * errors out if it is not possible to create one of the foreign constraint in distributed * environment. * * To support foreign constraints, we require that; * - Referencing and referenced tables are hash distributed. * - Referencing and referenced tables are co-located. * - Foreign constraint is defined over distribution column. * - ON DELETE/UPDATE SET NULL, ON DELETE/UPDATE SET DEFAULT and ON UPDATE CASCADE options * are not used. * - Replication factors of referencing and referenced table are 1. */ static void ErrorIfUnsupportedForeignConstraint(Relation relation, char distributionMethod, Var *distributionColumn, uint32 colocationId) { Relation pgConstraint = NULL; SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; HeapTuple heapTuple = NULL; Oid referencingTableId = relation->rd_id; Oid referencedTableId = InvalidOid; uint32 referencedTableColocationId = INVALID_COLOCATION_ID; Var *referencedTablePartitionColumn = NULL; Datum referencingColumnsDatum; Datum *referencingColumnArray; int referencingColumnCount = 0; Datum referencedColumnsDatum; Datum *referencedColumnArray; int referencedColumnCount = 0; bool isNull = false; int attrIdx = 0; bool foreignConstraintOnPartitionColumn = false; bool selfReferencingTable = false; pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, relation->rd_id); scanDescriptor = systable_beginscan(pgConstraint, ConstraintRelidIndexId, true, NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple); bool singleReplicatedTable = true; if (constraintForm->contype != CONSTRAINT_FOREIGN) { heapTuple = systable_getnext(scanDescriptor); continue; } referencedTableId = constraintForm->confrelid; selfReferencingTable = referencingTableId == referencedTableId; /* * We do not support foreign keys for reference tables. Here we skip the second * part of check if the table is a self referencing table because; * - PartitionMethod only works for distributed tables and this table may not be * distributed yet. * - Since referencing and referenced tables are same, it is OK to not checking * distribution method twice. */ if (distributionMethod == DISTRIBUTE_BY_NONE || (!selfReferencingTable && PartitionMethod(referencedTableId) == DISTRIBUTE_BY_NONE)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot create foreign key constraint from or to " "reference tables"))); } /* * ON DELETE SET NULL and ON DELETE SET DEFAULT is not supported. Because we do * not want to set partition column to NULL or default value. */ if (constraintForm->confdeltype == FKCONSTR_ACTION_SETNULL || constraintForm->confdeltype == FKCONSTR_ACTION_SETDEFAULT) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot create foreign key constraint"), errdetail("SET NULL or SET DEFAULT is not supported" " in ON DELETE operation."))); } /* * ON UPDATE SET NULL, ON UPDATE SET DEFAULT and UPDATE CASCADE is not supported. * Because we do not want to set partition column to NULL or default value. Also * cascading update operation would require re-partitioning. Updating partition * column value is not allowed anyway even outside of foreign key concept. */ if (constraintForm->confupdtype == FKCONSTR_ACTION_SETNULL || constraintForm->confupdtype == FKCONSTR_ACTION_SETDEFAULT || constraintForm->confupdtype == FKCONSTR_ACTION_CASCADE) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot create foreign key constraint"), errdetail("SET NULL, SET DEFAULT or CASCADE is not" " supported in ON UPDATE operation."))); } /* * Some checks are not meaningful if foreign key references the table itself. * Therefore we will skip those checks. */ if (!selfReferencingTable) { if (!IsDistributedTable(referencedTableId)) { ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("cannot create foreign key constraint"), errdetail("Referenced table must be a distributed " "table."))); } /* to enforce foreign constraints, tables must be co-located */ referencedTableColocationId = TableColocationId(referencedTableId); if (colocationId == INVALID_COLOCATION_ID || colocationId != referencedTableColocationId) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot create foreign key constraint"), errdetail("Foreign key constraint can only be created" " on co-located tables."))); } /* * Partition column must exist in both referencing and referenced side of the * foreign key constraint. They also must be in same ordinal. */ referencedTablePartitionColumn = DistPartitionKey(referencedTableId); } else { /* * Partition column must exist in both referencing and referenced side of the * foreign key constraint. They also must be in same ordinal. */ referencedTablePartitionColumn = distributionColumn; } /* * Column attributes are not available in Form_pg_constraint, therefore we need * to find them in the system catalog. After finding them, we iterate over column * attributes together because partition column must be at the same place in both * referencing and referenced side of the foreign key constraint */ referencingColumnsDatum = SysCacheGetAttr(CONSTROID, heapTuple, Anum_pg_constraint_conkey, &isNull); referencedColumnsDatum = SysCacheGetAttr(CONSTROID, heapTuple, Anum_pg_constraint_confkey, &isNull); deconstruct_array(DatumGetArrayTypeP(referencingColumnsDatum), INT2OID, 2, true, 's', &referencingColumnArray, NULL, &referencingColumnCount); deconstruct_array(DatumGetArrayTypeP(referencedColumnsDatum), INT2OID, 2, true, 's', &referencedColumnArray, NULL, &referencedColumnCount); Assert(referencingColumnCount == referencedColumnCount); for (attrIdx = 0; attrIdx < referencingColumnCount; ++attrIdx) { AttrNumber referencingAttrNo = DatumGetInt16(referencingColumnArray[attrIdx]); AttrNumber referencedAttrNo = DatumGetInt16(referencedColumnArray[attrIdx]); if (distributionColumn->varattno == referencingAttrNo && referencedTablePartitionColumn->varattno == referencedAttrNo) { foreignConstraintOnPartitionColumn = true; } } if (!foreignConstraintOnPartitionColumn) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot create foreign key constraint"), errdetail("Partition column must exist both " "referencing and referenced side of the " "foreign constraint statement and it must " "be in the same ordinal in both sides."))); } /* * We do not allow to create foreign constraints if shard replication factor is * greater than 1. Because in our current design, multiple replicas may cause * locking problems and inconsistent shard contents. We don't check the referenced * table, since referenced and referencing tables should be co-located and * colocation check has been done above. */ if (IsDistributedTable(referencingTableId)) { /* check whether ALTER TABLE command is applied over single replicated table */ if (!SingleReplicatedTable(referencingTableId)) { singleReplicatedTable = false; } } else { /* check whether creating single replicated table with foreign constraint */ if (ShardReplicationFactor > 1) { singleReplicatedTable = false; } } if (!singleReplicatedTable) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot create foreign key constraint"), errdetail("Citus Community Edition currently supports " "foreign key constraints only for " "\"citus.shard_replication_factor = 1\"."), errhint("Please change \"citus.shard_replication_factor to " "1\". To learn more about using foreign keys with " "other replication factors, please contact us at " "https://citusdata.com/about/contact_us."))); } heapTuple = systable_getnext(scanDescriptor); } /* clean up scan and close system catalog */ systable_endscan(scanDescriptor); heap_close(pgConstraint, AccessShareLock); } /* * ErrorIfUnsupportedSeqStmt errors out if the provided create sequence * statement specifies a distributed table in its OWNED BY clause. */ static void ErrorIfUnsupportedSeqStmt(CreateSeqStmt *createSeqStmt) { Oid ownedByTableId = InvalidOid; /* create is easy: just prohibit any distributed OWNED BY */ if (OptionsSpecifyOwnedBy(createSeqStmt->options, &ownedByTableId)) { if (IsDistributedTable(ownedByTableId)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot create sequences that specify a distributed " "table in their OWNED BY option"), errhint("Use a sequence in a distributed table by specifying " "a serial column type before creating any shards."))); } } } /* * ErrorIfDistributedAlterSeqOwnedBy errors out if the provided alter sequence * statement attempts to change the owned by property of a distributed sequence * or attempt to change a local sequence to be owned by a distributed table. */ static void ErrorIfDistributedAlterSeqOwnedBy(AlterSeqStmt *alterSeqStmt) { Oid sequenceId = RangeVarGetRelid(alterSeqStmt->sequence, AccessShareLock, alterSeqStmt->missing_ok); bool sequenceOwned = false; Oid ownedByTableId = InvalidOid; Oid newOwnedByTableId = InvalidOid; int32 ownedByColumnId = 0; bool hasDistributedOwner = false; /* alter statement referenced nonexistent sequence; return */ if (sequenceId == InvalidOid) { return; } #if (PG_VERSION_NUM >= 100000) sequenceOwned = sequenceIsOwned(sequenceId, DEPENDENCY_AUTO, &ownedByTableId, &ownedByColumnId); if (!sequenceOwned) { sequenceOwned = sequenceIsOwned(sequenceId, DEPENDENCY_INTERNAL, &ownedByTableId, &ownedByColumnId); } #else sequenceOwned = sequenceIsOwned(sequenceId, &ownedByTableId, &ownedByColumnId); #endif /* see whether the sequence is already owned by a distributed table */ if (sequenceOwned) { hasDistributedOwner = IsDistributedTable(ownedByTableId); } if (OptionsSpecifyOwnedBy(alterSeqStmt->options, &newOwnedByTableId)) { /* if a distributed sequence tries to change owner, error */ if (hasDistributedOwner && ownedByTableId != newOwnedByTableId) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot alter OWNED BY option of a sequence " "already owned by a distributed table"))); } else if (!hasDistributedOwner && IsDistributedTable(newOwnedByTableId)) { /* and don't let local sequences get a distributed OWNED BY */ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot associate an existing sequence with a " "distributed table"), errhint("Use a sequence in a distributed table by specifying " "a serial column type before creating any shards."))); } } } /* * ErrorIfUnsupportedTruncateStmt errors out if the command attempts to * truncate a distributed foreign table. */ static void ErrorIfUnsupportedTruncateStmt(TruncateStmt *truncateStatement) { List *relationList = truncateStatement->relations; ListCell *relationCell = NULL; foreach(relationCell, relationList) { RangeVar *rangeVar = (RangeVar *) lfirst(relationCell); Oid relationId = RangeVarGetRelid(rangeVar, NoLock, true); char relationKind = get_rel_relkind(relationId); if (IsDistributedTable(relationId) && relationKind == RELKIND_FOREIGN_TABLE) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("truncating distributed foreign tables is " "currently unsupported"), errhint("Use master_drop_all_shards to remove " "foreign table's shards."))); } } } /* * OptionsSpecifyOwnedBy processes the options list of either a CREATE or ALTER * SEQUENCE command, extracting the first OWNED BY option it encounters. The * identifier for the specified table is placed in the Oid out parameter before * returning true. Returns false if no such option is found. Still returns true * for OWNED BY NONE, but leaves the out paramter set to InvalidOid. */ static bool OptionsSpecifyOwnedBy(List *optionList, Oid *ownedByTableId) { ListCell *optionCell = NULL; foreach(optionCell, optionList) { DefElem *defElem = (DefElem *) lfirst(optionCell); if (strcmp(defElem->defname, "owned_by") == 0) { List *ownedByNames = defGetQualifiedName(defElem); int nameCount = list_length(ownedByNames); /* if only one name is present, this is OWNED BY NONE */ if (nameCount == 1) { *ownedByTableId = InvalidOid; return true; } else { /* * Otherwise, we have a list of schema, table, column, which we * need to truncate to simply the schema and table to determine * the relevant relation identifier. */ List *relNameList = list_truncate(list_copy(ownedByNames), nameCount - 1); RangeVar *rangeVar = makeRangeVarFromNameList(relNameList); bool failOK = true; *ownedByTableId = RangeVarGetRelid(rangeVar, NoLock, failOK); return true; } } } return false; } /* * ErrorIfDistributedRenameStmt errors out if the corresponding rename statement * operates on any part of a distributed table other than a column. * * Note: This function handles only those rename statements which operate on tables. */ static void ErrorIfUnsupportedRenameStmt(RenameStmt *renameStmt) { Assert(IsAlterTableRenameStmt(renameStmt)); if (renameStmt->renameType == OBJECT_TABLE) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("renaming distributed tables is currently unsupported"))); } else if (renameStmt->renameType == OBJECT_TABCONSTRAINT) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("renaming constraints belonging to distributed tables is " "currently unsupported"))); } } /* * CreateLocalTable gets DDL commands from the remote node for the given * relation. Then, it creates the local relation as temporary and on commit drop. */ static void CreateLocalTable(RangeVar *relation, char *nodeName, int32 nodePort) { List *ddlCommandList = NIL; ListCell *ddlCommandCell = NULL; char *relationName = relation->relname; char *schemaName = relation->schemaname; char *qualifiedRelationName = quote_qualified_identifier(schemaName, relationName); /* * The warning message created in TableDDLCommandList() is descriptive * enough; therefore, we just throw an error which says that we could not * run the copy operation. */ ddlCommandList = TableDDLCommandList(nodeName, nodePort, qualifiedRelationName); if (ddlCommandList == NIL) { ereport(ERROR, (errmsg("could not run copy from the worker node"))); } /* apply DDL commands against the local database */ foreach(ddlCommandCell, ddlCommandList) { StringInfo ddlCommand = (StringInfo) lfirst(ddlCommandCell); Node *ddlCommandNode = ParseTreeNode(ddlCommand->data); bool applyDDLCommand = false; if (IsA(ddlCommandNode, CreateStmt) || IsA(ddlCommandNode, CreateForeignTableStmt)) { CreateStmt *createStatement = (CreateStmt *) ddlCommandNode; /* create the local relation as temporary and on commit drop */ createStatement->relation->relpersistence = RELPERSISTENCE_TEMP; createStatement->oncommit = ONCOMMIT_DROP; /* temporarily strip schema name */ createStatement->relation->schemaname = NULL; applyDDLCommand = true; } else if (IsA(ddlCommandNode, CreateForeignServerStmt)) { CreateForeignServerStmt *createServerStmt = (CreateForeignServerStmt *) ddlCommandNode; if (GetForeignServerByName(createServerStmt->servername, true) == NULL) { /* create server if not exists */ applyDDLCommand = true; } } else if ((IsA(ddlCommandNode, CreateExtensionStmt))) { applyDDLCommand = true; } else if ((IsA(ddlCommandNode, CreateSeqStmt))) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot copy to table with serial column from worker"), errhint("Connect to the master node to COPY to tables which " "use serial column types."))); } /* run only a selected set of DDL commands */ if (applyDDLCommand) { CitusProcessUtility(ddlCommandNode, CreateCommandTag(ddlCommandNode), PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL); CommandCounterIncrement(); } } } /* * IsAlterTableRenameStmt returns whether the passed-in RenameStmt is one of * the following forms: * * - ALTER TABLE RENAME * - ALTER TABLE RENAME COLUMN * - ALTER TABLE RENAME CONSTRAINT */ static bool IsAlterTableRenameStmt(RenameStmt *renameStmt) { bool isAlterTableRenameStmt = false; if (renameStmt->renameType == OBJECT_TABLE) { isAlterTableRenameStmt = true; } else if (renameStmt->renameType == OBJECT_COLUMN && renameStmt->relationType == OBJECT_TABLE) { isAlterTableRenameStmt = true; } else if (renameStmt->renameType == OBJECT_TABCONSTRAINT) { isAlterTableRenameStmt = true; } return isAlterTableRenameStmt; } /* * AlterInvolvesPartitionColumn checks if the given alter table command * involves relation's partition column. */ static bool AlterInvolvesPartitionColumn(AlterTableStmt *alterTableStatement, AlterTableCmd *command) { bool involvesPartitionColumn = false; Var *partitionColumn = NULL; HeapTuple tuple = NULL; char *alterColumnName = command->name; LOCKMODE lockmode = AlterTableGetLockLevel(alterTableStatement->cmds); Oid relationId = AlterTableLookupRelation(alterTableStatement, lockmode); if (!OidIsValid(relationId)) { return false; } partitionColumn = DistPartitionKey(relationId); tuple = SearchSysCacheAttName(relationId, alterColumnName); if (HeapTupleIsValid(tuple)) { Form_pg_attribute targetAttr = (Form_pg_attribute) GETSTRUCT(tuple); /* reference tables do not have partition column, so allow them */ if (partitionColumn != NULL && targetAttr->attnum == partitionColumn->varattno) { involvesPartitionColumn = true; } ReleaseSysCache(tuple); } return involvesPartitionColumn; } /* * ExecuteDistributedDDLJob simply executes a provided DDLJob in a distributed trans- * action, including metadata sync if needed. If the multi shard commit protocol is * in its default value of '1pc', then a notice message indicating that '2pc' might be * used for extra safety. In the commit protocol, a BEGIN is sent after connection to * each shard placement and COMMIT/ROLLBACK is handled by * CompleteShardPlacementTransactions function. */ static void ExecuteDistributedDDLJob(DDLJob *ddlJob) { bool shouldSyncMetadata = ShouldSyncTableMetadata(ddlJob->targetRelationId); EnsureCoordinator(); if (!ddlJob->concurrentIndexCmd) { ShowNoticeIfNotUsing2PC(); if (shouldSyncMetadata) { SendCommandToWorkers(WORKERS_WITH_METADATA, DISABLE_DDL_PROPAGATION); SendCommandToWorkers(WORKERS_WITH_METADATA, (char *) ddlJob->commandString); } ExecuteModifyTasksWithoutResults(ddlJob->taskList); } else { /* save old commit protocol to restore at xact end */ Assert(SavedMultiShardCommitProtocol == COMMIT_PROTOCOL_BARE); SavedMultiShardCommitProtocol = MultiShardCommitProtocol; MultiShardCommitProtocol = COMMIT_PROTOCOL_BARE; PG_TRY(); { ExecuteTasksSequentiallyWithoutResults(ddlJob->taskList); if (shouldSyncMetadata) { List *commandList = list_make2(DISABLE_DDL_PROPAGATION, (char *) ddlJob->commandString); SendBareCommandListToWorkers(WORKERS_WITH_METADATA, commandList); } } PG_CATCH(); { ereport(ERROR, (errmsg("CONCURRENTLY-enabled index command failed"), errdetail("CONCURRENTLY-enabled index commands can fail partially, " "leaving behind an INVALID index."), errhint("Use DROP INDEX CONCURRENTLY IF EXISTS to remove the " "invalid index, then retry the original command."))); } PG_END_TRY(); } } /* * ShowNoticeIfNotUsing2PC shows a notice message about using 2PC by setting * citus.multi_shard_commit_protocol to 2PC. The notice message is shown only once in a * session */ static void ShowNoticeIfNotUsing2PC(void) { if (MultiShardCommitProtocol != COMMIT_PROTOCOL_2PC && !warnedUserAbout2PC) { ereport(NOTICE, (errmsg("using one-phase commit for distributed DDL commands"), errhint("You can enable two-phase commit for extra safety with: " "SET citus.multi_shard_commit_protocol TO '2pc'"))); warnedUserAbout2PC = true; } } /* * DDLTaskList builds a list of tasks to execute a DDL command on a * given list of shards. */ static List * DDLTaskList(Oid relationId, const char *commandString) { List *taskList = NIL; List *shardIntervalList = LoadShardIntervalList(relationId); ListCell *shardIntervalCell = NULL; Oid schemaId = get_rel_namespace(relationId); char *schemaName = get_namespace_name(schemaId); char *escapedSchemaName = quote_literal_cstr(schemaName); char *escapedCommandString = quote_literal_cstr(commandString); uint64 jobId = INVALID_JOB_ID; int taskId = 1; /* lock metadata before getting placement lists */ LockShardListMetadata(shardIntervalList, ShareLock); foreach(shardIntervalCell, shardIntervalList) { ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); uint64 shardId = shardInterval->shardId; StringInfo applyCommand = makeStringInfo(); Task *task = NULL; /* * If rightRelationId is not InvalidOid, instead of worker_apply_shard_ddl_command * we use worker_apply_inter_shard_ddl_command. */ appendStringInfo(applyCommand, WORKER_APPLY_SHARD_DDL_COMMAND, shardId, escapedSchemaName, escapedCommandString); task = CitusMakeNode(Task); task->jobId = jobId; task->taskId = taskId++; task->taskType = DDL_TASK; task->queryString = applyCommand->data; task->replicationModel = REPLICATION_MODEL_INVALID; task->dependedTaskList = NULL; task->anchorShardId = shardId; task->taskPlacementList = FinalizedShardPlacementList(shardId); taskList = lappend(taskList, task); } return taskList; } /* * CreateIndexTaskList builds a list of tasks to execute a CREATE INDEX command * against a specified distributed table. */ static List * CreateIndexTaskList(Oid relationId, IndexStmt *indexStmt) { List *taskList = NIL; List *shardIntervalList = LoadShardIntervalList(relationId); ListCell *shardIntervalCell = NULL; StringInfoData ddlString; uint64 jobId = INVALID_JOB_ID; int taskId = 1; initStringInfo(&ddlString); /* lock metadata before getting placement lists */ LockShardListMetadata(shardIntervalList, ShareLock); foreach(shardIntervalCell, shardIntervalList) { ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); uint64 shardId = shardInterval->shardId; Task *task = NULL; deparse_shard_index_statement(indexStmt, relationId, shardId, &ddlString); task = CitusMakeNode(Task); task->jobId = jobId; task->taskId = taskId++; task->taskType = DDL_TASK; task->queryString = pstrdup(ddlString.data); task->replicationModel = REPLICATION_MODEL_INVALID; task->dependedTaskList = NULL; task->anchorShardId = shardId; task->taskPlacementList = FinalizedShardPlacementList(shardId); taskList = lappend(taskList, task); resetStringInfo(&ddlString); } return taskList; } /* * DropIndexTaskList builds a list of tasks to execute a DROP INDEX command * against a specified distributed table. */ static List * DropIndexTaskList(Oid relationId, Oid indexId, DropStmt *dropStmt) { List *taskList = NIL; List *shardIntervalList = LoadShardIntervalList(relationId); ListCell *shardIntervalCell = NULL; char *indexName = get_rel_name(indexId); Oid schemaId = get_rel_namespace(indexId); char *schemaName = get_namespace_name(schemaId); StringInfoData ddlString; uint64 jobId = INVALID_JOB_ID; int taskId = 1; initStringInfo(&ddlString); /* lock metadata before getting placement lists */ LockShardListMetadata(shardIntervalList, ShareLock); foreach(shardIntervalCell, shardIntervalList) { ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); uint64 shardId = shardInterval->shardId; char *shardIndexName = pstrdup(indexName); Task *task = NULL; AppendShardIdToName(&shardIndexName, shardId); /* deparse shard-specific DROP INDEX command */ appendStringInfo(&ddlString, "DROP INDEX %s %s %s %s", (dropStmt->concurrent ? "CONCURRENTLY" : ""), (dropStmt->missing_ok ? "IF EXISTS" : ""), quote_qualified_identifier(schemaName, shardIndexName), (dropStmt->behavior == DROP_RESTRICT ? "RESTRICT" : "CASCADE")); task = CitusMakeNode(Task); task->jobId = jobId; task->taskId = taskId++; task->taskType = DDL_TASK; task->queryString = pstrdup(ddlString.data); task->replicationModel = REPLICATION_MODEL_INVALID; task->dependedTaskList = NULL; task->anchorShardId = shardId; task->taskPlacementList = FinalizedShardPlacementList(shardId); taskList = lappend(taskList, task); resetStringInfo(&ddlString); } return taskList; } /* * InterShardDDLTaskList builds a list of tasks to execute a inter shard DDL command on a * shards of given list of distributed table. At the moment this function is used to run * foreign key and partitioning command on worker node. * * leftRelationId is the relation id of actual distributed table which given command is * applied. rightRelationId is the relation id of distributed table which given command * refers to. */ static List * InterShardDDLTaskList(Oid leftRelationId, Oid rightRelationId, const char *commandString) { List *taskList = NIL; List *leftShardList = LoadShardIntervalList(leftRelationId); ListCell *leftShardCell = NULL; Oid leftSchemaId = get_rel_namespace(leftRelationId); char *leftSchemaName = get_namespace_name(leftSchemaId); char *escapedLeftSchemaName = quote_literal_cstr(leftSchemaName); List *rightShardList = LoadShardIntervalList(rightRelationId); ListCell *rightShardCell = NULL; Oid rightSchemaId = get_rel_namespace(rightRelationId); char *rightSchemaName = get_namespace_name(rightSchemaId); char *escapedRightSchemaName = quote_literal_cstr(rightSchemaName); char *escapedCommandString = quote_literal_cstr(commandString); uint64 jobId = INVALID_JOB_ID; int taskId = 1; /* lock metadata before getting placement lists */ LockShardListMetadata(leftShardList, ShareLock); forboth(leftShardCell, leftShardList, rightShardCell, rightShardList) { ShardInterval *leftShardInterval = (ShardInterval *) lfirst(leftShardCell); uint64 leftShardId = leftShardInterval->shardId; StringInfo applyCommand = makeStringInfo(); Task *task = NULL; ShardInterval *rightShardInterval = (ShardInterval *) lfirst(rightShardCell); uint64 rightShardId = rightShardInterval->shardId; appendStringInfo(applyCommand, WORKER_APPLY_INTER_SHARD_DDL_COMMAND, leftShardId, escapedLeftSchemaName, rightShardId, escapedRightSchemaName, escapedCommandString); task = CitusMakeNode(Task); task->jobId = jobId; task->taskId = taskId++; task->taskType = DDL_TASK; task->queryString = applyCommand->data; task->dependedTaskList = NULL; task->replicationModel = REPLICATION_MODEL_INVALID; task->anchorShardId = leftShardId; task->taskPlacementList = FinalizedShardPlacementList(leftShardId); taskList = lappend(taskList, task); } return taskList; } /* * Before acquiring a table lock, check whether we have sufficient rights. * In the case of DROP INDEX, also try to lock the table before the index. * * This code is heavily borrowed from RangeVarCallbackForDropRelation() in * commands/tablecmds.c in Postgres source. We need this to ensure the right * order of locking while dealing with DROP INDEX statments. Because we are * exclusively using this callback for INDEX processing, the PARTITION-related * logic from PostgreSQL's similar callback has been omitted as unneeded. */ static void RangeVarCallbackForDropIndex(const RangeVar *rel, Oid relOid, Oid oldRelOid, void *arg) { /* *INDENT-OFF* */ HeapTuple tuple; struct DropRelationCallbackState *state; char relkind; Form_pg_class classform; LOCKMODE heap_lockmode; state = (struct DropRelationCallbackState *) arg; relkind = state->relkind; heap_lockmode = state->concurrent ? ShareUpdateExclusiveLock : AccessExclusiveLock; Assert(relkind == RELKIND_INDEX); /* * If we previously locked some other index's heap, and the name we're * looking up no longer refers to that relation, release the now-useless * lock. */ if (relOid != oldRelOid && OidIsValid(state->heapOid)) { UnlockRelationOid(state->heapOid, heap_lockmode); state->heapOid = InvalidOid; } /* Didn't find a relation, so no need for locking or permission checks. */ if (!OidIsValid(relOid)) return; tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relOid)); if (!HeapTupleIsValid(tuple)) return; /* concurrently dropped, so nothing to do */ classform = (Form_pg_class) GETSTRUCT(tuple); if (classform->relkind != relkind) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is not an index", rel->relname))); /* Allow DROP to either table owner or schema owner */ if (!pg_class_ownercheck(relOid, GetUserId()) && !pg_namespace_ownercheck(classform->relnamespace, GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, rel->relname); if (!allowSystemTableMods && IsSystemClass(relOid, classform)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied: \"%s\" is a system catalog", rel->relname))); ReleaseSysCache(tuple); /* * In DROP INDEX, attempt to acquire lock on the parent table before * locking the index. index_drop() will need this anyway, and since * regular queries lock tables before their indexes, we risk deadlock if * we do it the other way around. No error if we don't find a pg_index * entry, though --- the relation may have been dropped. */ if (relkind == RELKIND_INDEX && relOid != oldRelOid) { state->heapOid = IndexGetRelation(relOid, true); if (OidIsValid(state->heapOid)) LockRelationOid(state->heapOid, heap_lockmode); } /* *INDENT-ON* */ } /* * Check whether the current user has the permission to execute a COPY * statement, raise ERROR if not. In some cases we have to do this separately * from postgres' copy.c, because we have to execute the copy with elevated * privileges. * * Copied from postgres, where it's part of DoCopy(). */ static void CheckCopyPermissions(CopyStmt *copyStatement) { /* *INDENT-OFF* */ bool is_from = copyStatement->is_from; Relation rel; Oid relid; List *range_table = NIL; TupleDesc tupDesc; AclMode required_access = (is_from ? ACL_INSERT : ACL_SELECT); List *attnums; ListCell *cur; RangeTblEntry *rte; rel = heap_openrv(copyStatement->relation, is_from ? RowExclusiveLock : AccessShareLock); relid = RelationGetRelid(rel); rte = makeNode(RangeTblEntry); rte->rtekind = RTE_RELATION; rte->relid = relid; rte->relkind = rel->rd_rel->relkind; rte->requiredPerms = required_access; range_table = list_make1(rte); tupDesc = RelationGetDescr(rel); attnums = CopyGetAttnums(tupDesc, rel, copyStatement->attlist); foreach(cur, attnums) { int attno = lfirst_int(cur) - FirstLowInvalidHeapAttributeNumber; if (is_from) { rte->insertedCols = bms_add_member(rte->insertedCols, attno); } else { rte->selectedCols = bms_add_member(rte->selectedCols, attno); } } ExecCheckRTPerms(range_table, true); /* TODO: Perform RLS checks once supported */ heap_close(rel, NoLock); /* *INDENT-ON* */ } /* Helper for CheckCopyPermissions(), copied from postgres */ static List * CopyGetAttnums(TupleDesc tupDesc, Relation rel, List *attnamelist) { /* *INDENT-OFF* */ List *attnums = NIL; if (attnamelist == NIL) { /* Generate default column list */ Form_pg_attribute *attr = tupDesc->attrs; int attr_count = tupDesc->natts; int i; for (i = 0; i < attr_count; i++) { if (attr[i]->attisdropped) { continue; } attnums = lappend_int(attnums, i + 1); } } else { /* Validate the user-supplied list and extract attnums */ ListCell *l; foreach(l, attnamelist) { char *name = strVal(lfirst(l)); int attnum; int i; /* Lookup column name */ attnum = InvalidAttrNumber; for (i = 0; i < tupDesc->natts; i++) { if (tupDesc->attrs[i]->attisdropped) { continue; } if (namestrcmp(&(tupDesc->attrs[i]->attname), name) == 0) { attnum = tupDesc->attrs[i]->attnum; break; } } if (attnum == InvalidAttrNumber) { if (rel != NULL) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), errmsg("column \"%s\" of relation \"%s\" does not exist", name, RelationGetRelationName(rel)))); } else { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), errmsg("column \"%s\" does not exist", name))); } } /* Check for duplicates */ if (list_member_int(attnums, attnum)) { ereport(ERROR, (errcode(ERRCODE_DUPLICATE_COLUMN), errmsg("column \"%s\" specified more than once", name))); } attnums = lappend_int(attnums, attnum); } } return attnums; /* *INDENT-ON* */ } /* * PostProcessUtility performs additional tasks after a utility's local portion * has been completed. Right now, the sole use is marking new indexes invalid * if they were created using the CONCURRENTLY flag. This (non-transactional) * change provides the fallback state if an error is raised, otherwise a sub- * sequent change to valid will be committed. */ static void PostProcessUtility(Node *parsetree) { IndexStmt *indexStmt = NULL; Relation relation = NULL; Oid indexRelationId = InvalidOid; Relation indexRelation = NULL; Relation pg_index = NULL; HeapTuple indexTuple = NULL; Form_pg_index indexForm = NULL; /* only IndexStmts are processed */ if (!IsA(parsetree, IndexStmt)) { return; } /* and even then only if they're CONCURRENT */ indexStmt = (IndexStmt *) parsetree; if (!indexStmt->concurrent) { return; } /* finally, this logic only applies to the coordinator */ if (!IsCoordinator()) { return; } /* commit the current transaction and start anew */ CommitTransactionCommand(); StartTransactionCommand(); /* get the affected relation and index */ relation = heap_openrv(indexStmt->relation, ShareUpdateExclusiveLock); indexRelationId = get_relname_relid(indexStmt->idxname, RelationGetNamespace(relation)); indexRelation = index_open(indexRelationId, RowExclusiveLock); /* close relations but retain locks */ heap_close(relation, NoLock); index_close(indexRelation, NoLock); /* mark index as invalid, in-place (cannot be rolled back) */ index_set_state_flags(indexRelationId, INDEX_DROP_CLEAR_VALID); /* re-open a transaction command from here on out */ CommitTransactionCommand(); StartTransactionCommand(); /* now, update index's validity in a way that can roll back */ pg_index = heap_open(IndexRelationId, RowExclusiveLock); indexTuple = SearchSysCacheCopy1(INDEXRELID, ObjectIdGetDatum(indexRelationId)); Assert(HeapTupleIsValid(indexTuple)); /* better be present, we have lock! */ /* mark as valid, save, and update pg_index indexes */ indexForm = (Form_pg_index) GETSTRUCT(indexTuple); indexForm->indisvalid = true; CatalogTupleUpdate(pg_index, &indexTuple->t_self, indexTuple); /* clean up; index now marked valid, but ROLLBACK will mark invalid */ heap_freetuple(indexTuple); heap_close(pg_index, RowExclusiveLock); } /* * PlanGrantStmt determines whether a given GRANT/REVOKE statement involves * a distributed table. If so, it creates DDLJobs to encapsulate information * needed during the worker node portion of DDL execution before returning the * DDLJobs in a List. If no distributed table is involved, this returns NIL. * * NB: So far column level privileges are not supported. */ List * PlanGrantStmt(GrantStmt *grantStmt) { StringInfoData privsString; StringInfoData granteesString; StringInfoData targetString; StringInfoData ddlString; ListCell *granteeCell = NULL; ListCell *objectCell = NULL; bool isFirst = true; List *ddlJobs = NIL; initStringInfo(&privsString); initStringInfo(&granteesString); initStringInfo(&targetString); initStringInfo(&ddlString); /* * So far only table level grants are supported. Most other types of * grants aren't interesting anyway. */ if (grantStmt->targtype != ACL_TARGET_OBJECT || grantStmt->objtype != ACL_OBJECT_RELATION) { return NIL; } /* deparse the privileges */ if (grantStmt->privileges == NIL) { appendStringInfo(&privsString, "ALL"); } else { ListCell *privilegeCell = NULL; isFirst = true; foreach(privilegeCell, grantStmt->privileges) { AccessPriv *priv = lfirst(privilegeCell); if (!isFirst) { appendStringInfoString(&privsString, ", "); } isFirst = false; Assert(priv->cols == NIL); Assert(priv->priv_name != NULL); appendStringInfo(&privsString, "%s", priv->priv_name); } } /* deparse the privileges */ isFirst = true; foreach(granteeCell, grantStmt->grantees) { RoleSpec *spec = lfirst(granteeCell); if (!isFirst) { appendStringInfoString(&granteesString, ", "); } isFirst = false; if (spec->roletype == ROLESPEC_CSTRING) { appendStringInfoString(&granteesString, quote_identifier(spec->rolename)); } else if (spec->roletype == ROLESPEC_CURRENT_USER) { appendStringInfoString(&granteesString, "CURRENT_USER"); } else if (spec->roletype == ROLESPEC_SESSION_USER) { appendStringInfoString(&granteesString, "SESSION_USER"); } else if (spec->roletype == ROLESPEC_PUBLIC) { appendStringInfoString(&granteesString, "PUBLIC"); } } /* * Deparse the target objects, and issue the deparsed statements to * workers, if applicable. That's so we easily can replicate statements * only to distributed relations. */ isFirst = true; foreach(objectCell, grantStmt->objects) { RangeVar *relvar = (RangeVar *) lfirst(objectCell); Oid relOid = RangeVarGetRelid(relvar, NoLock, false); const char *grantOption = ""; DDLJob *ddlJob = NULL; if (!IsDistributedTable(relOid)) { continue; } resetStringInfo(&targetString); appendStringInfo(&targetString, "%s", generate_relation_name(relOid, NIL)); if (grantStmt->is_grant) { if (grantStmt->grant_option) { grantOption = " WITH GRANT OPTION"; } appendStringInfo(&ddlString, "GRANT %s ON %s TO %s%s", privsString.data, targetString.data, granteesString.data, grantOption); } else { if (grantStmt->grant_option) { grantOption = "GRANT OPTION FOR "; } appendStringInfo(&ddlString, "REVOKE %s%s ON %s FROM %s", grantOption, privsString.data, targetString.data, granteesString.data); } ddlJob = palloc0(sizeof(DDLJob)); ddlJob->targetRelationId = relOid; ddlJob->concurrentIndexCmd = false; ddlJob->commandString = pstrdup(ddlString.data); ddlJob->taskList = DDLTaskList(relOid, ddlString.data); ddlJobs = lappend(ddlJobs, ddlJob); resetStringInfo(&ddlString); } return ddlJobs; } citus-7.0.3/src/backend/distributed/master/000077500000000000000000000000001317107136600206505ustar00rootroot00000000000000citus-7.0.3/src/backend/distributed/master/citus_create_restore_point.c000066400000000000000000000131021317107136600264370ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * citus_create_restore_point.c * * UDF for creating a consistent restore point across all nodes. * * Copyright (c) 2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "libpq-fe.h" #include "access/xlog.h" #include "access/xlog_internal.h" #include "catalog/pg_type.h" #include "distributed/connection_management.h" #include "distributed/master_metadata_utility.h" #include "distributed/metadata_cache.h" #include "distributed/remote_commands.h" #include "nodes/pg_list.h" #include "storage/lmgr.h" #include "storage/lock.h" #include "utils/builtins.h" #include "utils/pg_lsn.h" #define CREATE_RESTORE_POINT_COMMAND "SELECT pg_catalog.pg_create_restore_point($1::text)" /* local functions forward declarations */ static List * OpenConnectionsToAllNodes(void); static void BlockAllDistributedWrites(void); static void CreateRemoteRestorePoints(char *restoreName, List *connectionList); /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(citus_create_restore_point); /* * citus_create_restore_point blocks writes to distributed tables and then * runs pg_create_restore_point on all nodes. This creates a consistent * restore point under the assumption that there are no other writers * than the coordinator. */ Datum citus_create_restore_point(PG_FUNCTION_ARGS) { text *restoreNameText = PG_GETARG_TEXT_P(0); char *restoreNameString = NULL; XLogRecPtr localRestorePoint = InvalidXLogRecPtr; List *connectionList = NIL; CheckCitusVersion(ERROR); EnsureSuperUser(); EnsureCoordinator(); if (RecoveryInProgress()) { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), (errmsg("recovery is in progress"), errhint("WAL control functions cannot be executed during recovery.")))); } if (!XLogIsNeeded()) { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("WAL level not sufficient for creating a restore point"), errhint("wal_level must be set to \"replica\" or \"logical\" at server " "start."))); } restoreNameString = text_to_cstring(restoreNameText); if (strlen(restoreNameString) >= MAXFNAMELEN) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("value too long for restore point (maximum %d characters)", MAXFNAMELEN - 1))); } /* establish connections to all nodes before taking any locks */ connectionList = OpenConnectionsToAllNodes(); /* * Send a BEGIN to bust through pgbouncer. We won't actually commit since * that takes time. Instead we just close the connections and roll back, * which doesn't undo pg_create_restore_point. */ RemoteTransactionListBegin(connectionList); /* DANGER: finish as quickly as possible after this */ BlockAllDistributedWrites(); /* do local restore point first to bail out early if something goes wrong */ localRestorePoint = XLogRestorePoint(restoreNameString); /* run pg_create_restore_point on all nodes */ CreateRemoteRestorePoints(restoreNameString, connectionList); PG_RETURN_LSN(localRestorePoint); } /* * OpenConnectionsToAllNodes opens connections to all nodes and returns the list * of connections. */ static List * OpenConnectionsToAllNodes(void) { List *connectionList = NIL; List *workerNodeList = NIL; ListCell *workerNodeCell = NULL; int connectionFlags = FORCE_NEW_CONNECTION; workerNodeList = ActivePrimaryNodeList(); foreach(workerNodeCell, workerNodeList) { WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); MultiConnection *connection = NULL; connection = StartNodeConnection(connectionFlags, workerNode->workerName, workerNode->workerPort); MarkRemoteTransactionCritical(connection); connectionList = lappend(connectionList, connection); } FinishConnectionListEstablishment(connectionList); return connectionList; } /* * BlockAllDistributedWrites blocks all modifications to distributed tables * by taking ShareRowExclusive locks on all distributed tables. */ static void BlockAllDistributedWrites(void) { ListCell *distributedTableCell = NULL; List *distributedTableList = DistributedTableList(); LockRelationOid(DistNodeRelationId(), ExclusiveLock); LockRelationOid(DistPartitionRelationId(), ExclusiveLock); foreach(distributedTableCell, distributedTableList) { DistTableCacheEntry *cacheEntry = (DistTableCacheEntry *) lfirst(distributedTableCell); /* block all modifications */ LockRelationOid(cacheEntry->relationId, ShareRowExclusiveLock); } } /* * CreateRemoteRestorePoints creates a restore point via each of the * connections in the list in parallel. */ static void CreateRemoteRestorePoints(char *restoreName, List *connectionList) { ListCell *connectionCell = NULL; int parameterCount = 1; Oid parameterTypes[1] = { TEXTOID }; const char *parameterValues[1] = { restoreName }; foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); int querySent = SendRemoteCommandParams(connection, CREATE_RESTORE_POINT_COMMAND, parameterCount, parameterTypes, parameterValues); if (querySent == 0) { ReportConnectionError(connection, ERROR); } } foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); PGresult *result = GetRemoteCommandResult(connection, true); if (!IsResponseOK(result)) { ReportResultError(connection, result, ERROR); } PQclear(result); ForgetResults(connection); CloseConnection(connection); } } citus-7.0.3/src/backend/distributed/master/master_citus_tools.c000066400000000000000000000427011317107136600247420ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * master_citus_tools.c * UDF to run multi shard/worker queries * * This file contains functions to run commands on other worker/shards. * * Copyright (c) 2016-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "funcapi.h" #include "libpq-fe.h" #include "miscadmin.h" #include "access/htup_details.h" #include "catalog/pg_type.h" #include "distributed/connection_management.h" #include "distributed/metadata_cache.h" #include "distributed/multi_server_executor.h" #include "distributed/remote_commands.h" #include "distributed/worker_protocol.h" #include "lib/stringinfo.h" #include "utils/builtins.h" #include "distributed/multi_client_executor.h" PG_FUNCTION_INFO_V1(master_run_on_worker); static int ParseCommandParameters(FunctionCallInfo fcinfo, StringInfo **nodeNameArray, int **nodePortsArray, StringInfo **commandStringArray, bool *parallel); static void ExecuteCommandsInParallelAndStoreResults(StringInfo *nodeNameArray, int *nodePortArray, StringInfo *commandStringArray, bool *statusArray, StringInfo *resultStringArray, int commmandCount); static bool GetConnectionStatusAndResult(MultiConnection *connection, bool *resultStatus, StringInfo queryResultString); static bool EvaluateQueryResult(MultiConnection *connection, PGresult *queryResult, StringInfo queryResultString); static void StoreErrorMessage(MultiConnection *connection, StringInfo queryResultString); static void ExecuteCommandsAndStoreResults(StringInfo *nodeNameArray, int *nodePortArray, StringInfo *commandStringArray, bool *statusArray, StringInfo *resultStringArray, int commmandCount); static bool ExecuteRemoteQueryOrCommand(char *nodeName, uint32 nodePort, char *queryString, StringInfo queryResult); static Tuplestorestate * CreateTupleStore(TupleDesc tupleDescriptor, StringInfo *nodeNameArray, int *nodePortArray, bool *statusArray, StringInfo *resultArray, int commandCount); /* * master_run_on_worker executes queries/commands to run on specified worker and * returns success status and query/command result. Expected input is 3 arrays * containing node names, node ports, and query strings, and boolean flag to specify * parallel execution. The function then returns node_name, node_port, success, * result tuples upon completion of the query. The same user credentials are used * to connect to remote nodes. */ Datum master_run_on_worker(PG_FUNCTION_ARGS) { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; MemoryContext per_query_ctx = NULL; MemoryContext oldcontext = NULL; TupleDesc tupleDescriptor = NULL; Tuplestorestate *tupleStore = NULL; bool parallelExecution = false; StringInfo *nodeNameArray = NULL; int *nodePortArray = NULL; StringInfo *commandStringArray = NULL; bool *statusArray = NULL; StringInfo *resultArray = NULL; int commandIndex = 0; int commandCount = 0; CheckCitusVersion(ERROR); /* check to see if caller supports us returning a tuplestore */ if (!rsinfo || !(rsinfo->allowedModes & SFRM_Materialize)) { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("materialize mode required, but it is not " "allowed in this context"))); } commandCount = ParseCommandParameters(fcinfo, &nodeNameArray, &nodePortArray, &commandStringArray, ¶llelExecution); per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; oldcontext = MemoryContextSwitchTo(per_query_ctx); /* get the requested return tuple description */ tupleDescriptor = CreateTupleDescCopy(rsinfo->expectedDesc); /* * Check to make sure we have correct tuple descriptor */ if (tupleDescriptor->natts != 4 || tupleDescriptor->attrs[0]->atttypid != TEXTOID || tupleDescriptor->attrs[1]->atttypid != INT4OID || tupleDescriptor->attrs[2]->atttypid != BOOLOID || tupleDescriptor->attrs[3]->atttypid != TEXTOID) { ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_DEFINITION), errmsg("query-specified return tuple and " "function return type are not compatible"))); } /* prepare storage for status and result values */ statusArray = palloc0(commandCount * sizeof(bool)); resultArray = palloc0(commandCount * sizeof(StringInfo)); for (commandIndex = 0; commandIndex < commandCount; commandIndex++) { resultArray[commandIndex] = makeStringInfo(); } if (parallelExecution) { ExecuteCommandsInParallelAndStoreResults(nodeNameArray, nodePortArray, commandStringArray, statusArray, resultArray, commandCount); } else { ExecuteCommandsAndStoreResults(nodeNameArray, nodePortArray, commandStringArray, statusArray, resultArray, commandCount); } /* let the caller know we're sending back a tuplestore */ rsinfo->returnMode = SFRM_Materialize; tupleStore = CreateTupleStore(tupleDescriptor, nodeNameArray, nodePortArray, statusArray, resultArray, commandCount); rsinfo->setResult = tupleStore; rsinfo->setDesc = tupleDescriptor; MemoryContextSwitchTo(oldcontext); PG_RETURN_VOID(); } /* ParseCommandParameters reads call parameters and fills in data structures */ static int ParseCommandParameters(FunctionCallInfo fcinfo, StringInfo **nodeNameArray, int **nodePortsArray, StringInfo **commandStringArray, bool *parallel) { ArrayType *nodeNameArrayObject = PG_GETARG_ARRAYTYPE_P(0); ArrayType *nodePortArrayObject = PG_GETARG_ARRAYTYPE_P(1); ArrayType *commandStringArrayObject = PG_GETARG_ARRAYTYPE_P(2); bool parallelExecution = PG_GETARG_BOOL(3); int nodeNameCount = ArrayObjectCount(nodeNameArrayObject); int nodePortCount = ArrayObjectCount(nodePortArrayObject); int commandStringCount = ArrayObjectCount(commandStringArrayObject); Datum *nodeNameDatumArray = DeconstructArrayObject(nodeNameArrayObject); Datum *nodePortDatumArray = DeconstructArrayObject(nodePortArrayObject); Datum *commandStringDatumArray = DeconstructArrayObject(commandStringArrayObject); int index = 0; StringInfo *nodeNames = NULL; int *nodePorts = NULL; StringInfo *commandStrings = NULL; if (nodeNameCount != nodePortCount || nodeNameCount != commandStringCount) { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("expected same number of node name, port, and query string"))); } nodeNames = palloc0(nodeNameCount * sizeof(StringInfo)); nodePorts = palloc0(nodeNameCount * sizeof(int)); commandStrings = palloc0(nodeNameCount * sizeof(StringInfo)); for (index = 0; index < nodeNameCount; index++) { text *nodeNameText = DatumGetTextP(nodeNameDatumArray[index]); char *nodeName = text_to_cstring(nodeNameText); int32 nodePort = DatumGetInt32(nodePortDatumArray[index]); text *commandText = DatumGetTextP(commandStringDatumArray[index]); char *commandString = text_to_cstring(commandText); nodeNames[index] = makeStringInfo(); commandStrings[index] = makeStringInfo(); appendStringInfo(nodeNames[index], "%s", nodeName); nodePorts[index] = nodePort; appendStringInfo(commandStrings[index], "%s", commandString); } *nodeNameArray = nodeNames; *nodePortsArray = nodePorts; *commandStringArray = commandStrings; *parallel = parallelExecution; return nodeNameCount; } /* * ExecuteCommandsInParellelAndStoreResults connects to each node specified in * nodeNameArray and nodePortArray, and executes command in commandStringArray * in parallel fashion. Execution success status and result is reported for * each command in statusArray and resultStringArray. Each array contains * commandCount items. */ static void ExecuteCommandsInParallelAndStoreResults(StringInfo *nodeNameArray, int *nodePortArray, StringInfo *commandStringArray, bool *statusArray, StringInfo *resultStringArray, int commmandCount) { int commandIndex = 0; MultiConnection **connectionArray = palloc0(commmandCount * sizeof(MultiConnection *)); int finishedCount = 0; /* establish connections */ for (commandIndex = 0; commandIndex < commmandCount; commandIndex++) { char *nodeName = nodeNameArray[commandIndex]->data; int nodePort = nodePortArray[commandIndex]; int connectionFlags = FORCE_NEW_CONNECTION; MultiConnection *connection = GetNodeConnection(connectionFlags, nodeName, nodePort); StringInfo queryResultString = resultStringArray[commandIndex]; statusArray[commandIndex] = true; if (PQstatus(connection->pgConn) != CONNECTION_OK) { appendStringInfo(queryResultString, "failed to connect to %s:%d", nodeName, (int) nodePort); statusArray[commandIndex] = false; finishedCount++; } else { connectionArray[commandIndex] = connection; } } /* send queries at once */ for (commandIndex = 0; commandIndex < commmandCount; commandIndex++) { int querySent = 0; MultiConnection *connection = connectionArray[commandIndex]; char *queryString = commandStringArray[commandIndex]->data; StringInfo queryResultString = resultStringArray[commandIndex]; /* * If we don't have a connection, nothing to send, error string should already * been filled. */ if (connection == NULL) { continue; } /* * NB: this intentionally uses PQsendQuery rather than * SendRemoteCommand as multiple commands are allowed. */ querySent = PQsendQuery(connection->pgConn, queryString); if (querySent == 0) { StoreErrorMessage(connection, queryResultString); statusArray[commandIndex] = false; CloseConnection(connection); connectionArray[commandIndex] = NULL; finishedCount++; } } /* check for query results */ while (finishedCount < commmandCount) { for (commandIndex = 0; commandIndex < commmandCount; commandIndex++) { MultiConnection *connection = connectionArray[commandIndex]; StringInfo queryResultString = resultStringArray[commandIndex]; bool success = false; bool queryFinished = false; if (connection == NULL) { continue; } queryFinished = GetConnectionStatusAndResult(connection, &success, queryResultString); if (queryFinished) { finishedCount++; statusArray[commandIndex] = success; connectionArray[commandIndex] = NULL; CloseConnection(connection); } } CHECK_FOR_INTERRUPTS(); if (finishedCount < commmandCount) { long sleepIntervalPerCycle = RemoteTaskCheckInterval * 1000L; pg_usleep(sleepIntervalPerCycle); } } pfree(connectionArray); } /* * GetConnectionStatusAndResult checks the active connection and returns true if * query execution is finished (either success or fail). * Query success/fail in resultStatus, and query result in queryResultString are * reported upon completion of the query. */ static bool GetConnectionStatusAndResult(MultiConnection *connection, bool *resultStatus, StringInfo queryResultString) { bool finished = true; ConnStatusType connectionStatus = PQstatus(connection->pgConn); int consumeInput = 0; PGresult *queryResult = NULL; bool success = false; *resultStatus = false; resetStringInfo(queryResultString); if (connectionStatus == CONNECTION_BAD) { appendStringInfo(queryResultString, "connection lost"); return finished; } consumeInput = PQconsumeInput(connection->pgConn); if (consumeInput == 0) { appendStringInfo(queryResultString, "query result unavailable"); return finished; } /* check later if busy */ if (PQisBusy(connection->pgConn) != 0) { finished = false; return finished; } /* query result is available at this point */ queryResult = PQgetResult(connection->pgConn); success = EvaluateQueryResult(connection, queryResult, queryResultString); PQclear(queryResult); *resultStatus = success; finished = true; return true; } /* * EvaluateQueryResult gets the query result from connection and returns * true if the query is executed successfully, false otherwise. A query result * or an error message is returned in queryResultString. The function requires * that the query returns a single column/single row result. It returns an * error otherwise. */ static bool EvaluateQueryResult(MultiConnection *connection, PGresult *queryResult, StringInfo queryResultString) { bool success = false; ExecStatusType resultStatus = PQresultStatus(queryResult); if (resultStatus == PGRES_COMMAND_OK) { char *commandStatus = PQcmdStatus(queryResult); appendStringInfo(queryResultString, "%s", commandStatus); success = true; } else if (resultStatus == PGRES_TUPLES_OK) { int ntuples = PQntuples(queryResult); int nfields = PQnfields(queryResult); /* error if query returns more than 1 rows, or more than 1 fields */ if (nfields != 1) { appendStringInfo(queryResultString, "expected a single column in query target"); } else if (ntuples > 1) { appendStringInfo(queryResultString, "expected a single row in query result"); } else { int row = 0; int column = 0; if (!PQgetisnull(queryResult, row, column)) { char *queryResultValue = PQgetvalue(queryResult, row, column); appendStringInfo(queryResultString, "%s", queryResultValue); } success = true; } } else { StoreErrorMessage(connection, queryResultString); } return success; } /* * StoreErrorMessage gets the error message from connection and stores it * in queryResultString. It should be called only when error is present * otherwise it would return a default error message. */ static void StoreErrorMessage(MultiConnection *connection, StringInfo queryResultString) { char *errorMessage = PQerrorMessage(connection->pgConn); if (errorMessage != NULL) { char *firstNewlineIndex = NULL; /* copy the error message to a writable memory */ errorMessage = pnstrdup(errorMessage, strlen(errorMessage)); firstNewlineIndex = strchr(errorMessage, '\n'); /* trim the error message at the line break */ if (firstNewlineIndex != NULL) { *firstNewlineIndex = '\0'; } } else { /* put a default error message if no error message is reported */ errorMessage = "An error occurred while running the query"; } appendStringInfo(queryResultString, "%s", errorMessage); } /* * ExecuteCommandsAndStoreResults connects to each node specified in * nodeNameArray and nodePortArray, and executes command in commandStringArray * in sequential order. Execution success status and result is reported for * each command in statusArray and resultStringArray. Each array contains * commandCount items. */ static void ExecuteCommandsAndStoreResults(StringInfo *nodeNameArray, int *nodePortArray, StringInfo *commandStringArray, bool *statusArray, StringInfo *resultStringArray, int commmandCount) { int commandIndex = 0; for (commandIndex = 0; commandIndex < commmandCount; commandIndex++) { char *nodeName = nodeNameArray[commandIndex]->data; int32 nodePort = nodePortArray[commandIndex]; bool success = false; char *queryString = commandStringArray[commandIndex]->data; StringInfo queryResultString = resultStringArray[commandIndex]; success = ExecuteRemoteQueryOrCommand(nodeName, nodePort, queryString, queryResultString); statusArray[commandIndex] = success; CHECK_FOR_INTERRUPTS(); } } /* * ExecuteRemoteQueryOrCommand executes a query at specified remote node using * the calling user's credentials. The function returns the query status * (success/failure), and query result. The query is expected to return a single * target containing zero or one rows. */ static bool ExecuteRemoteQueryOrCommand(char *nodeName, uint32 nodePort, char *queryString, StringInfo queryResultString) { int connectionFlags = FORCE_NEW_CONNECTION; MultiConnection *connection = GetNodeConnection(connectionFlags, nodeName, nodePort); bool success = false; PGresult *queryResult = NULL; bool raiseInterrupts = true; if (PQstatus(connection->pgConn) != CONNECTION_OK) { appendStringInfo(queryResultString, "failed to connect to %s:%d", nodeName, (int) nodePort); return false; } SendRemoteCommand(connection, queryString); queryResult = GetRemoteCommandResult(connection, raiseInterrupts); success = EvaluateQueryResult(connection, queryResult, queryResultString); PQclear(queryResult); /* close the connection */ CloseConnection(connection); return success; } /* CreateTupleStore prepares result tuples from individual query results */ static Tuplestorestate * CreateTupleStore(TupleDesc tupleDescriptor, StringInfo *nodeNameArray, int *nodePortArray, bool *statusArray, StringInfo *resultArray, int commandCount) { Tuplestorestate *tupleStore = tuplestore_begin_heap(true, false, work_mem); int commandIndex = 0; bool nulls[4] = { false, false, false, false }; for (commandIndex = 0; commandIndex < commandCount; commandIndex++) { Datum values[4]; HeapTuple tuple = NULL; StringInfo nodeNameString = nodeNameArray[commandIndex]; StringInfo resultString = resultArray[commandIndex]; text *nodeNameText = cstring_to_text_with_len(nodeNameString->data, nodeNameString->len); text *resultText = cstring_to_text_with_len(resultString->data, resultString->len); values[0] = PointerGetDatum(nodeNameText); values[1] = Int32GetDatum(nodePortArray[commandIndex]); values[2] = BoolGetDatum(statusArray[commandIndex]); values[3] = PointerGetDatum(resultText); tuple = heap_form_tuple(tupleDescriptor, values, nulls); tuplestore_puttuple(tupleStore, tuple); heap_freetuple(tuple); pfree(nodeNameText); pfree(resultText); } tuplestore_donestoring(tupleStore); return tupleStore; } citus-7.0.3/src/backend/distributed/master/master_create_shards.c000066400000000000000000000354321317107136600252050ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * master_create_shards.c * * This file contains functions to distribute a table by creating shards for it * across a set of worker nodes. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "c.h" #include "fmgr.h" #include "libpq-fe.h" #include "miscadmin.h" #include "port.h" #include #include #include #include #include #include #include "catalog/namespace.h" #include "catalog/pg_class.h" #include "distributed/listutils.h" #include "distributed/master_metadata_utility.h" #include "distributed/master_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/multi_join_order.h" #include "distributed/pg_dist_partition.h" #include "distributed/pg_dist_shard.h" #include "distributed/reference_table_utils.h" #include "distributed/resource_lock.h" #include "distributed/shardinterval_utils.h" #include "distributed/transaction_management.h" #include "distributed/worker_manager.h" #include "lib/stringinfo.h" #include "nodes/pg_list.h" #include "nodes/primnodes.h" #include "postmaster/postmaster.h" #include "storage/fd.h" #include "storage/lmgr.h" #include "storage/lock.h" #include "utils/builtins.h" #include "utils/elog.h" #include "utils/errcodes.h" #include "utils/lsyscache.h" #include "utils/palloc.h" /* declarations for dynamic loading */ PG_FUNCTION_INFO_V1(master_create_worker_shards); /* * master_create_worker_shards is a user facing function to create worker shards * for the given relation in round robin order. */ Datum master_create_worker_shards(PG_FUNCTION_ARGS) { text *tableNameText = PG_GETARG_TEXT_P(0); int32 shardCount = PG_GETARG_INT32(1); int32 replicationFactor = PG_GETARG_INT32(2); Oid distributedTableId = ResolveRelationId(tableNameText); /* do not add any data */ bool useExclusiveConnections = false; EnsureCoordinator(); CheckCitusVersion(ERROR); CreateShardsWithRoundRobinPolicy(distributedTableId, shardCount, replicationFactor, useExclusiveConnections); PG_RETURN_VOID(); } /* * CreateShardsWithRoundRobinPolicy creates empty shards for the given table * based on the specified number of initial shards. The function first updates * metadata on the coordinator node to make this shard (and its placements) * visible. Note that the function assumes the table is hash partitioned and * calculates the min/max hash token ranges for each shard, giving them an equal * split of the hash space. Finally, function creates empty shard placements on * worker nodes. */ void CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount, int32 replicationFactor, bool useExclusiveConnections) { char shardStorageType = 0; List *workerNodeList = NIL; int32 workerNodeCount = 0; uint32 placementAttemptCount = 0; uint64 hashTokenIncrement = 0; List *existingShardList = NIL; int64 shardIndex = 0; DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(distributedTableId); bool colocatedShard = false; List *insertedShardPlacements = NIL; /* make sure table is hash partitioned */ CheckHashPartitionedTable(distributedTableId); /* * In contrast to append/range partitioned tables it makes more sense to * require ownership privileges - shards for hash-partitioned tables are * only created once, not continually during ingest as for the other * partitioning types. */ EnsureTableOwner(distributedTableId); /* we plan to add shards: get an exclusive lock on relation oid */ LockRelationOid(distributedTableId, ExclusiveLock); /* validate that shards haven't already been created for this table */ existingShardList = LoadShardList(distributedTableId); if (existingShardList != NIL) { char *tableName = get_rel_name(distributedTableId); ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("table \"%s\" has already had shards created for it", tableName))); } /* make sure that at least one shard is specified */ if (shardCount <= 0) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("shard_count must be positive"))); } /* make sure that at least one replica is specified */ if (replicationFactor <= 0) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("replication_factor must be positive"))); } /* make sure that RF=1 if the table is streaming replicated */ if (cacheEntry->replicationModel == REPLICATION_MODEL_STREAMING && replicationFactor > 1) { char *relationName = get_rel_name(cacheEntry->relationId); ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("using replication factor %d with the streaming " "replication model is not supported", replicationFactor), errdetail("The table %s is marked as streaming replicated and " "the shard replication factor of streaming replicated " "tables must be 1.", relationName), errhint("Use replication factor 1."))); } /* calculate the split of the hash space */ hashTokenIncrement = HASH_TOKEN_COUNT / shardCount; /* don't allow concurrent node list changes that require an exclusive lock */ LockRelationOid(DistNodeRelationId(), RowShareLock); /* load and sort the worker node list for deterministic placement */ workerNodeList = ActivePrimaryNodeList(); workerNodeList = SortList(workerNodeList, CompareWorkerNodes); /* make sure we don't process cancel signals until all shards are created */ HOLD_INTERRUPTS(); workerNodeCount = list_length(workerNodeList); if (replicationFactor > workerNodeCount) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("replication_factor (%d) exceeds number of worker nodes " "(%d)", replicationFactor, workerNodeCount), errhint("Add more worker nodes or try again with a lower " "replication factor."))); } /* if we have enough nodes, add an extra placement attempt for backup */ placementAttemptCount = (uint32) replicationFactor; if (workerNodeCount > replicationFactor) { placementAttemptCount++; } /* set shard storage type according to relation type */ shardStorageType = ShardStorageType(distributedTableId); for (shardIndex = 0; shardIndex < shardCount; shardIndex++) { uint32 roundRobinNodeIndex = shardIndex % workerNodeCount; /* initialize the hash token space for this shard */ text *minHashTokenText = NULL; text *maxHashTokenText = NULL; int32 shardMinHashToken = INT32_MIN + (shardIndex * hashTokenIncrement); int32 shardMaxHashToken = shardMinHashToken + (hashTokenIncrement - 1); uint64 shardId = GetNextShardId(); List *currentInsertedShardPlacements = NIL; /* if we are at the last shard, make sure the max token value is INT_MAX */ if (shardIndex == (shardCount - 1)) { shardMaxHashToken = INT32_MAX; } /* insert the shard metadata row along with its min/max values */ minHashTokenText = IntegerToText(shardMinHashToken); maxHashTokenText = IntegerToText(shardMaxHashToken); /* * Grabbing the shard metadata lock isn't technically necessary since * we already hold an exclusive lock on the partition table, but we'll * acquire it for the sake of completeness. As we're adding new active * placements, the mode must be exclusive. */ LockShardDistributionMetadata(shardId, ExclusiveLock); InsertShardRow(distributedTableId, shardId, shardStorageType, minHashTokenText, maxHashTokenText); currentInsertedShardPlacements = InsertShardPlacementRows(distributedTableId, shardId, workerNodeList, roundRobinNodeIndex, replicationFactor); insertedShardPlacements = list_concat(insertedShardPlacements, currentInsertedShardPlacements); } CreateShardsOnWorkers(distributedTableId, insertedShardPlacements, useExclusiveConnections, colocatedShard); if (QueryCancelPending) { ereport(WARNING, (errmsg("cancel requests are ignored during shard creation"))); QueryCancelPending = false; } RESUME_INTERRUPTS(); } /* * CreateColocatedShards creates shards for the target relation colocated with * the source relation. */ void CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool useExclusiveConnections) { char targetShardStorageType = 0; List *existingShardList = NIL; List *sourceShardIntervalList = NIL; ListCell *sourceShardCell = NULL; bool colocatedShard = true; List *insertedShardPlacements = NIL; /* make sure that tables are hash partitioned */ CheckHashPartitionedTable(targetRelationId); CheckHashPartitionedTable(sourceRelationId); /* * In contrast to append/range partitioned tables it makes more sense to * require ownership privileges - shards for hash-partitioned tables are * only created once, not continually during ingest as for the other * partitioning types. */ EnsureTableOwner(targetRelationId); /* we plan to add shards: get an exclusive lock on target relation oid */ LockRelationOid(targetRelationId, ExclusiveLock); /* we don't want source table to get dropped before we colocate with it */ LockRelationOid(sourceRelationId, AccessShareLock); /* prevent placement changes of the source relation until we colocate with them */ sourceShardIntervalList = LoadShardIntervalList(sourceRelationId); LockShardListMetadata(sourceShardIntervalList, ShareLock); /* validate that shards haven't already been created for this table */ existingShardList = LoadShardList(targetRelationId); if (existingShardList != NIL) { char *targetRelationName = get_rel_name(targetRelationId); ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("table \"%s\" has already had shards created for it", targetRelationName))); } targetShardStorageType = ShardStorageType(targetRelationId); foreach(sourceShardCell, sourceShardIntervalList) { ShardInterval *sourceShardInterval = (ShardInterval *) lfirst(sourceShardCell); uint64 sourceShardId = sourceShardInterval->shardId; uint64 newShardId = GetNextShardId(); ListCell *sourceShardPlacementCell = NULL; int32 shardMinValue = DatumGetInt32(sourceShardInterval->minValue); int32 shardMaxValue = DatumGetInt32(sourceShardInterval->maxValue); text *shardMinValueText = IntegerToText(shardMinValue); text *shardMaxValueText = IntegerToText(shardMaxValue); List *sourceShardPlacementList = ShardPlacementList(sourceShardId); InsertShardRow(targetRelationId, newShardId, targetShardStorageType, shardMinValueText, shardMaxValueText); foreach(sourceShardPlacementCell, sourceShardPlacementList) { ShardPlacement *sourcePlacement = (ShardPlacement *) lfirst(sourceShardPlacementCell); uint32 groupId = sourcePlacement->groupId; const RelayFileState shardState = FILE_FINALIZED; const uint64 shardSize = 0; uint64 shardPlacementId = 0; ShardPlacement *shardPlacement = NULL; /* * Optimistically add shard placement row the pg_dist_shard_placement, in case * of any error it will be roll-backed. */ shardPlacementId = InsertShardPlacementRow(newShardId, INVALID_PLACEMENT_ID, shardState, shardSize, groupId); shardPlacement = LoadShardPlacement(newShardId, shardPlacementId); insertedShardPlacements = lappend(insertedShardPlacements, shardPlacement); } } CreateShardsOnWorkers(targetRelationId, insertedShardPlacements, useExclusiveConnections, colocatedShard); } /* * CreateReferenceTableShard creates a single shard for the given * distributedTableId. The created shard does not have min/max values. * Also, the shard is replicated to the all active nodes in the cluster. */ void CreateReferenceTableShard(Oid distributedTableId) { char shardStorageType = 0; List *workerNodeList = NIL; int32 workerNodeCount = 0; List *existingShardList = NIL; uint64 shardId = INVALID_SHARD_ID; int workerStartIndex = 0; int replicationFactor = 0; text *shardMinValue = NULL; text *shardMaxValue = NULL; bool useExclusiveConnection = false; bool colocatedShard = false; List *insertedShardPlacements = NIL; /* * In contrast to append/range partitioned tables it makes more sense to * require ownership privileges - shards for reference tables are * only created once, not continually during ingest as for the other * partitioning types such as append and range. */ EnsureTableOwner(distributedTableId); /* we plan to add shards: get an exclusive lock on relation oid */ LockRelationOid(distributedTableId, ExclusiveLock); /* set shard storage type according to relation type */ shardStorageType = ShardStorageType(distributedTableId); /* validate that shards haven't already been created for this table */ existingShardList = LoadShardList(distributedTableId); if (existingShardList != NIL) { char *tableName = get_rel_name(distributedTableId); ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("table \"%s\" has already had shards created for it", tableName))); } /* load and sort the worker node list for deterministic placement */ workerNodeList = ActivePrimaryNodeList(); workerNodeList = SortList(workerNodeList, CompareWorkerNodes); /* get the next shard id */ shardId = GetNextShardId(); /* set the replication factor equal to the number of worker nodes */ workerNodeCount = list_length(workerNodeList); replicationFactor = workerNodeCount; /* * Grabbing the shard metadata lock isn't technically necessary since * we already hold an exclusive lock on the partition table, but we'll * acquire it for the sake of completeness. As we're adding new active * placements, the mode must be exclusive. */ LockShardDistributionMetadata(shardId, ExclusiveLock); InsertShardRow(distributedTableId, shardId, shardStorageType, shardMinValue, shardMaxValue); insertedShardPlacements = InsertShardPlacementRows(distributedTableId, shardId, workerNodeList, workerStartIndex, replicationFactor); CreateShardsOnWorkers(distributedTableId, insertedShardPlacements, useExclusiveConnection, colocatedShard); } /* * CheckHashPartitionedTable looks up the partition information for the given * tableId and checks if the table is hash partitioned. If not, the function * throws an error. */ void CheckHashPartitionedTable(Oid distributedTableId) { char partitionType = PartitionMethod(distributedTableId); if (partitionType != DISTRIBUTE_BY_HASH) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported table partition type: %c", partitionType))); } } /* Helper function to convert an integer value to a text type */ text * IntegerToText(int32 value) { text *valueText = NULL; StringInfo valueString = makeStringInfo(); appendStringInfo(valueString, "%d", value); valueText = cstring_to_text(valueString->data); return valueText; } citus-7.0.3/src/backend/distributed/master/master_delete_protocol.c000066400000000000000000000431711317107136600255600ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * master_delete_protocol.c * * Routine for deleting shards in the distributed cluster. This function takes * in a delete command and deletes a shard if and only if all rows in the shard * satisfy the conditions in the delete command. * * Copyright (c) 2014-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "c.h" #include "fmgr.h" #include "libpq-fe.h" #include "miscadmin.h" #include "port.h" #include #include "access/xact.h" #include "catalog/namespace.h" #include "commands/dbcommands.h" #include "distributed/connection_management.h" #include "distributed/master_protocol.h" #include "distributed/metadata_sync.h" #include "distributed/multi_client_executor.h" #include "distributed/multi_join_order.h" #include "distributed/multi_logical_planner.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_server_executor.h" #include "distributed/multi_utility.h" #include "distributed/pg_dist_partition.h" #include "distributed/pg_dist_shard.h" #include "distributed/placement_connection.h" #include "distributed/relay_utility.h" #include "distributed/remote_commands.h" #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" #include "lib/stringinfo.h" #include "nodes/nodes.h" #include "nodes/parsenodes.h" #include "nodes/pg_list.h" #include "nodes/primnodes.h" #include "nodes/relation.h" #include "optimizer/clauses.h" #include "optimizer/predtest.h" #include "optimizer/restrictinfo.h" #include "storage/lock.h" #include "storage/lmgr.h" #include "tcop/tcopprot.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/elog.h" #include "utils/errcodes.h" #include "utils/lsyscache.h" /* Local functions forward declarations */ static void CheckTableCount(Query *deleteQuery); static void CheckDeleteCriteria(Node *deleteCriteria); static void CheckPartitionColumn(Oid relationId, Node *whereClause); static List * ShardsMatchingDeleteCriteria(Oid relationId, List *shardList, Node *deleteCriteria); static int DropShards(Oid relationId, char *schemaName, char *relationName, List *deletableShardIntervalList); /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(master_apply_delete_command); PG_FUNCTION_INFO_V1(master_drop_all_shards); PG_FUNCTION_INFO_V1(master_drop_sequences); /* * master_apply_delete_command takes in a delete command, finds shards that * match the criteria defined in the delete command, drops the found shards from * the worker nodes, and updates the corresponding metadata on the master node. * This function drops a shard if and only if all rows in the shard satisfy * the conditions in the delete command. Note that this function only accepts * conditions on the partition key and if no condition is provided then all * shards are deleted. * * We mark shard placements that we couldn't drop as to be deleted later. If a * shard satisfies the given conditions, we delete it from shard metadata table * even though related shard placements are not deleted. */ Datum master_apply_delete_command(PG_FUNCTION_ARGS) { text *queryText = PG_GETARG_TEXT_P(0); char *queryString = text_to_cstring(queryText); char *relationName = NULL; char *schemaName = NULL; Oid relationId = InvalidOid; List *shardIntervalList = NIL; List *deletableShardIntervalList = NIL; List *queryTreeList = NIL; Query *deleteQuery = NULL; Node *whereClause = NULL; Node *deleteCriteria = NULL; Node *queryTreeNode = NULL; DeleteStmt *deleteStatement = NULL; int droppedShardCount = 0; LOCKMODE lockMode = 0; char partitionMethod = 0; bool failOK = false; #if (PG_VERSION_NUM >= 100000) RawStmt *rawStmt = (RawStmt *) ParseTreeRawStmt(queryString); queryTreeNode = rawStmt->stmt; #else queryTreeNode = ParseTreeNode(queryString); #endif EnsureCoordinator(); CheckCitusVersion(ERROR); if (!IsA(queryTreeNode, DeleteStmt)) { ereport(ERROR, (errmsg("query \"%s\" is not a delete statement", queryString))); } deleteStatement = (DeleteStmt *) queryTreeNode; schemaName = deleteStatement->relation->schemaname; relationName = deleteStatement->relation->relname; /* * We take an exclusive lock while dropping shards to prevent concurrent * writes. We don't want to block SELECTs, which means queries might fail * if they access a shard that has just been dropped. */ lockMode = ExclusiveLock; relationId = RangeVarGetRelid(deleteStatement->relation, lockMode, failOK); /* schema-prefix if it is not specified already */ if (schemaName == NULL) { Oid schemaId = get_rel_namespace(relationId); schemaName = get_namespace_name(schemaId); } CheckDistributedTable(relationId); EnsureTablePermissions(relationId, ACL_DELETE); #if (PG_VERSION_NUM >= 100000) queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL); #else queryTreeList = pg_analyze_and_rewrite(queryTreeNode, queryString, NULL, 0); #endif deleteQuery = (Query *) linitial(queryTreeList); CheckTableCount(deleteQuery); /* get where clause and flatten it */ whereClause = (Node *) deleteQuery->jointree->quals; deleteCriteria = eval_const_expressions(NULL, whereClause); partitionMethod = PartitionMethod(relationId); if (partitionMethod == DISTRIBUTE_BY_HASH) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot delete from hash distributed table with this " "command"), errdetail("Delete statements on hash-partitioned tables " "are not supported with master_apply_delete_command."), errhint("Use master_modify_multiple_shards command instead."))); } else if (partitionMethod == DISTRIBUTE_BY_NONE) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot delete from distributed table"), errdetail("Delete statements on reference tables " "are not supported."))); } CheckDeleteCriteria(deleteCriteria); CheckPartitionColumn(relationId, deleteCriteria); shardIntervalList = LoadShardIntervalList(relationId); /* drop all shards if where clause is not present */ if (deleteCriteria == NULL) { deletableShardIntervalList = shardIntervalList; ereport(DEBUG2, (errmsg("dropping all shards for \"%s\"", relationName))); } else { deletableShardIntervalList = ShardsMatchingDeleteCriteria(relationId, shardIntervalList, deleteCriteria); } droppedShardCount = DropShards(relationId, schemaName, relationName, deletableShardIntervalList); PG_RETURN_INT32(droppedShardCount); } /* * master_drop_all_shards attempts to drop all shards for a given relation. * Unlike master_apply_delete_command, this function can be called even * if the table has already been dropped. */ Datum master_drop_all_shards(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); text *schemaNameText = PG_GETARG_TEXT_P(1); text *relationNameText = PG_GETARG_TEXT_P(2); List *shardIntervalList = NIL; int droppedShardCount = 0; char *schemaName = text_to_cstring(schemaNameText); char *relationName = text_to_cstring(relationNameText); EnsureCoordinator(); CheckCitusVersion(ERROR); CheckTableSchemaNameForDrop(relationId, &schemaName, &relationName); /* * master_drop_all_shards is typically called from the DROP TABLE trigger, * but could be called by a user directly. Make sure we have an * AccessExlusiveLock to prevent any other commands from running on this table * concurrently. */ LockRelationOid(relationId, AccessExclusiveLock); shardIntervalList = LoadShardIntervalList(relationId); droppedShardCount = DropShards(relationId, schemaName, relationName, shardIntervalList); PG_RETURN_INT32(droppedShardCount); } /* * master_drop_sequences attempts to drop a list of sequences on worker nodes. * The "IF EXISTS" clause is used to permit dropping sequences even if they may not * exist. If the commands fail on the workers, the operation is rolled back. * If ddl propagation (citus.enable_ddl_propagation) is set to off, then the function * returns without doing anything. */ Datum master_drop_sequences(PG_FUNCTION_ARGS) { ArrayType *sequenceNamesArray = PG_GETARG_ARRAYTYPE_P(0); ArrayIterator sequenceIterator = NULL; Datum sequenceText = 0; bool isNull = false; StringInfo dropSeqCommand = makeStringInfo(); bool coordinator = IsCoordinator(); CheckCitusVersion(ERROR); /* do nothing if DDL propagation is switched off or this is not the coordinator */ if (!EnableDDLPropagation || !coordinator) { PG_RETURN_VOID(); } /* iterate over sequence names to build single command to DROP them all */ sequenceIterator = array_create_iterator(sequenceNamesArray, 0, NULL); while (array_iterate(sequenceIterator, &sequenceText, &isNull)) { if (isNull) { ereport(ERROR, (errmsg("unexpected NULL sequence name"), errcode(ERRCODE_INVALID_PARAMETER_VALUE))); } /* append command portion if we haven't added any sequence names yet */ if (dropSeqCommand->len == 0) { appendStringInfoString(dropSeqCommand, "DROP SEQUENCE IF EXISTS"); } else { /* otherwise, add a comma to separate subsequent sequence names */ appendStringInfoChar(dropSeqCommand, ','); } appendStringInfo(dropSeqCommand, " %s", TextDatumGetCString(sequenceText)); } if (dropSeqCommand->len != 0) { appendStringInfoString(dropSeqCommand, " CASCADE"); SendCommandToWorkers(ALL_WORKERS, DISABLE_DDL_PROPAGATION); SendCommandToWorkers(ALL_WORKERS, dropSeqCommand->data); } PG_RETURN_VOID(); } /* * CheckTableSchemaNameForDrop errors out if the current user does not * have permission to undistribute the given relation, taking into * account that it may be called from the drop trigger. If the table exists, * the function rewrites the given table and schema name. */ void CheckTableSchemaNameForDrop(Oid relationId, char **schemaName, char **tableName) { char *tempTableName = get_rel_name(relationId); if (tempTableName != NULL) { /* ensure proper values are used if the table exists */ Oid schemaId = get_rel_namespace(relationId); (*schemaName) = get_namespace_name(schemaId); (*tableName) = tempTableName; EnsureTableOwner(relationId); } else if (!superuser()) { /* table does not exist, must be called from drop trigger */ ereport(ERROR, (errmsg("cannot drop distributed table metadata as a " "non-superuser"))); } } /* * DropShards drops all given shards in a relation. The id, name and schema * for the relation are explicitly provided, since this function may be * called when the table is already dropped. * * We mark shard placements that we couldn't drop as to be deleted later, but * we do delete the shard metadadata. */ static int DropShards(Oid relationId, char *schemaName, char *relationName, List *deletableShardIntervalList) { ListCell *shardIntervalCell = NULL; int droppedShardCount = 0; BeginOrContinueCoordinatedTransaction(); /* At this point we intentionally decided to not use 2PC for reference tables */ if (MultiShardCommitProtocol == COMMIT_PROTOCOL_2PC) { CoordinatedTransactionUse2PC(); } foreach(shardIntervalCell, deletableShardIntervalList) { List *shardPlacementList = NIL; ListCell *shardPlacementCell = NULL; ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); uint64 shardId = shardInterval->shardId; char *quotedShardName = NULL; char *shardRelationName = pstrdup(relationName); Assert(shardInterval->relationId == relationId); /* Build shard relation name. */ AppendShardIdToName(&shardRelationName, shardId); quotedShardName = quote_qualified_identifier(schemaName, shardRelationName); shardPlacementList = ShardPlacementList(shardId); foreach(shardPlacementCell, shardPlacementList) { ShardPlacement *shardPlacement = (ShardPlacement *) lfirst(shardPlacementCell); char *workerName = shardPlacement->nodeName; uint32 workerPort = shardPlacement->nodePort; StringInfo workerDropQuery = makeStringInfo(); MultiConnection *connection = NULL; uint32 connectionFlags = FOR_DDL; char *extensionOwner = CitusExtensionOwnerName(); char storageType = shardInterval->storageType; if (storageType == SHARD_STORAGE_TABLE) { appendStringInfo(workerDropQuery, DROP_REGULAR_TABLE_COMMAND, quotedShardName); } else if (storageType == SHARD_STORAGE_COLUMNAR || storageType == SHARD_STORAGE_FOREIGN) { appendStringInfo(workerDropQuery, DROP_FOREIGN_TABLE_COMMAND, quotedShardName); } connection = GetPlacementConnection(connectionFlags, shardPlacement, extensionOwner); RemoteTransactionBeginIfNecessary(connection); if (PQstatus(connection->pgConn) != CONNECTION_OK) { uint64 placementId = shardPlacement->placementId; ereport(WARNING, (errmsg("could not connect to shard \"%s\" on node " "\"%s:%u\"", shardRelationName, workerName, workerPort), errdetail("Marking this shard placement for " "deletion"))); UpdateShardPlacementState(placementId, FILE_TO_DELETE); continue; } MarkRemoteTransactionCritical(connection); ExecuteCriticalRemoteCommand(connection, workerDropQuery->data); DeleteShardPlacementRow(shardPlacement->placementId); } DeleteShardRow(shardId); } droppedShardCount = list_length(deletableShardIntervalList); return droppedShardCount; } /* Checks that delete is only on one table. */ static void CheckTableCount(Query *deleteQuery) { int rangeTableCount = list_length(deleteQuery->rtable); if (rangeTableCount > 1) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot delete from distributed table"), errdetail("Delete on multiple tables is not supported"))); } } /* Checks that delete criteria only consists of simple operator expressions. */ static void CheckDeleteCriteria(Node *deleteCriteria) { bool simpleOpExpression = true; if (deleteCriteria == NULL) { return; } if (is_opclause(deleteCriteria)) { simpleOpExpression = SimpleOpExpression((Expr *) deleteCriteria); } else if (IsA(deleteCriteria, BoolExpr)) { ListCell *opExpressionCell = NULL; BoolExpr *deleteCriteriaExpression = (BoolExpr *) deleteCriteria; List *opExpressionList = deleteCriteriaExpression->args; foreach(opExpressionCell, opExpressionList) { Expr *opExpression = (Expr *) lfirst(opExpressionCell); if (!SimpleOpExpression(opExpression)) { simpleOpExpression = false; break; } } } else { simpleOpExpression = false; } if (!simpleOpExpression) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot delete from distributed table"), errdetail("Delete query has a complex operator expression"))); } } /* * CheckPartitionColumn checks that the given where clause is based only on the * partition key of the given relation id. */ static void CheckPartitionColumn(Oid relationId, Node *whereClause) { Var *partitionColumn = DistPartitionKey(relationId); ListCell *columnCell = NULL; List *columnList = pull_var_clause_default(whereClause); foreach(columnCell, columnList) { Var *var = (Var *) lfirst(columnCell); if (var->varattno != partitionColumn->varattno) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot delete from distributed table"), errdetail("Where clause includes a column other than " "partition column"))); } } } /* * ShardsMatchingDeleteCriteria selects shards to be deleted from the shard * interval list based on the delete criteria, and returns selected shards in * another list. We add a shard to the list if and only if all rows in the shard * satisfy the delete criteria. Note that this function does not expect * deleteCriteria to be NULL. */ static List * ShardsMatchingDeleteCriteria(Oid relationId, List *shardIntervalList, Node *deleteCriteria) { List *dropShardIntervalList = NIL; List *deleteCriteriaList = NIL; ListCell *shardIntervalCell = NULL; /* build the base expression for constraint */ Index rangeTableIndex = 1; Var *partitionColumn = PartitionColumn(relationId, rangeTableIndex); Node *baseConstraint = BuildBaseConstraint(partitionColumn); Assert(deleteCriteria != NULL); deleteCriteriaList = list_make1(deleteCriteria); /* walk over shard list and check if shards can be dropped */ foreach(shardIntervalCell, shardIntervalList) { ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); if (shardInterval->minValueExists && shardInterval->maxValueExists) { List *restrictInfoList = NIL; bool dropShard = false; BoolExpr *andExpr = NULL; Expr *lessThanExpr = NULL; Expr *greaterThanExpr = NULL; RestrictInfo *lessThanRestrictInfo = NULL; RestrictInfo *greaterThanRestrictInfo = NULL; /* set the min/max values in the base constraint */ UpdateConstraint(baseConstraint, shardInterval); andExpr = (BoolExpr *) baseConstraint; lessThanExpr = (Expr *) linitial(andExpr->args); greaterThanExpr = (Expr *) lsecond(andExpr->args); lessThanRestrictInfo = make_simple_restrictinfo(lessThanExpr); greaterThanRestrictInfo = make_simple_restrictinfo(greaterThanExpr); restrictInfoList = lappend(restrictInfoList, lessThanRestrictInfo); restrictInfoList = lappend(restrictInfoList, greaterThanRestrictInfo); #if (PG_VERSION_NUM >= 100000) dropShard = predicate_implied_by(deleteCriteriaList, restrictInfoList, false); #else dropShard = predicate_implied_by(deleteCriteriaList, restrictInfoList); #endif if (dropShard) { dropShardIntervalList = lappend(dropShardIntervalList, shardInterval); ereport(DEBUG2, (errmsg("delete criteria includes shardId " UINT64_FORMAT, shardInterval->shardId))); } } } return dropShardIntervalList; } citus-7.0.3/src/backend/distributed/master/master_expire_table_cache.c000066400000000000000000000137121317107136600261610ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * master_expire_table_cache.c * UDF to refresh shard cache at workers * * This file contains master_expire_table_cache function. The function * accepts a table name and drops tables cached shards from all workers. * It does not change existing shard placement. Only drops cached copies * of shards. * * Copyright (c) 2012-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "funcapi.h" #include "libpq-fe.h" #include "catalog/pg_class.h" #include "distributed/connection_management.h" #include "distributed/master_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/multi_join_order.h" #include "distributed/pg_dist_shard.h" #include "distributed/remote_commands.h" #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" #include "utils/builtins.h" #include "utils/lsyscache.h" static List * FindAbsentShardPlacementsOnWorker(WorkerNode *workerNode, ShardInterval **shardIntervalArray, List **placementListArray, int shardCount); static void DropShardsFromWorker(WorkerNode *workerNode, Oid relationId, List *shardIntervalList); PG_FUNCTION_INFO_V1(master_expire_table_cache); /* * master_expire_table_cache drops table's caches shards in all workers. The function * expects a passed table to be a small distributed table meaning it has less than * large_table_shard_count. */ Datum master_expire_table_cache(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); DistTableCacheEntry *cacheEntry = NULL; List *workerNodeList = NIL; ListCell *workerNodeCell = NULL; int shardCount = 0; ShardInterval **shardIntervalArray = NULL; List **placementListArray = NULL; int shardIndex = 0; CheckCitusVersion(ERROR); cacheEntry = DistributedTableCacheEntry(relationId); workerNodeList = ActivePrimaryNodeList(); shardCount = cacheEntry->shardIntervalArrayLength; shardIntervalArray = cacheEntry->sortedShardIntervalArray; if (shardCount == 0) { ereport(WARNING, (errmsg("Table has no shards, no action is taken"))); PG_RETURN_VOID(); } if (shardCount >= LargeTableShardCount) { ereport(ERROR, (errmsg("Must be called on tables smaller than %d shards", LargeTableShardCount))); } placementListArray = palloc(shardCount * sizeof(List *)); for (shardIndex = 0; shardIndex < shardCount; shardIndex++) { ShardInterval *shardInterval = shardIntervalArray[shardIndex]; placementListArray[shardIndex] = FinalizedShardPlacementList(shardInterval->shardId); } foreach(workerNodeCell, workerNodeList) { WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); List *shardDropList = FindAbsentShardPlacementsOnWorker(workerNode, shardIntervalArray, placementListArray, shardCount); DropShardsFromWorker(workerNode, relationId, shardDropList); } pfree(placementListArray); PG_RETURN_VOID(); } /* * FindAbsentShardPlacementsOnWorker compiles shard interval list of shards * that do not have registered placement at given worker node. */ List * FindAbsentShardPlacementsOnWorker(WorkerNode *workerNode, ShardInterval **shardIntervalArray, List **placementListArray, int shardCount) { List *absentShardIntervalList = NIL; int shardIndex = 0; for (shardIndex = 0; shardIndex < shardCount; shardIndex++) { ShardInterval *shardInterval = shardIntervalArray[shardIndex]; List *placementList = placementListArray[shardIndex]; ListCell *placementCell = NULL; foreach(placementCell, placementList) { ShardPlacement *placement = (ShardPlacement *) lfirst(placementCell); /* * Append shard interval to absent list if none of its placements is on * the worker. */ if (placement->nodePort == workerNode->workerPort && strncmp(placement->nodeName, workerNode->workerName, WORKER_LENGTH) == 0) { break; } else if (lnext(placementCell) == NULL) { absentShardIntervalList = lappend(absentShardIntervalList, shardInterval); } } } return absentShardIntervalList; } /* * DropShardsFromWorker drops provided shards belonging to a relation from * given worker. It does not change any metadata at the master. */ static void DropShardsFromWorker(WorkerNode *workerNode, Oid relationId, List *shardIntervalList) { Oid schemaId = get_rel_namespace(relationId); char *schemaName = get_namespace_name(schemaId); char *relationName = get_rel_name(relationId); char relationKind = get_rel_relkind(relationId); StringInfo workerCommand = makeStringInfo(); StringInfo shardNames = makeStringInfo(); ListCell *shardIntervalCell = NULL; MultiConnection *connection = NULL; int connectionFlag = FORCE_NEW_CONNECTION; PGresult *result = NULL; if (shardIntervalList == NIL) { return; } foreach(shardIntervalCell, shardIntervalList) { ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); char *shardName = pstrdup(relationName); char *quotedShardName = NULL; AppendShardIdToName(&shardName, shardInterval->shardId); quotedShardName = quote_qualified_identifier(schemaName, shardName); appendStringInfo(shardNames, "%s", quotedShardName); /* append a comma after the shard name if there are more shards */ if (lnext(shardIntervalCell) != NULL) { appendStringInfo(shardNames, ", "); } } if (RegularTable(relationId)) { appendStringInfo(workerCommand, DROP_REGULAR_TABLE_COMMAND, shardNames->data); } else if (relationKind == RELKIND_FOREIGN_TABLE) { appendStringInfo(workerCommand, DROP_FOREIGN_TABLE_COMMAND, shardNames->data); } else { ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("expire target is not a regular, foreign or partitioned " "table"))); } connection = GetNodeConnection(connectionFlag, workerNode->workerName, workerNode->workerPort); ExecuteOptionalRemoteCommand(connection, workerCommand->data, &result); } citus-7.0.3/src/backend/distributed/master/master_metadata_utility.c000066400000000000000000001215461317107136600257430ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * master_metadata_utility.c * Routines for reading and modifying master node's metadata. * * Copyright (c) 2014-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "funcapi.h" #include "libpq-fe.h" #include "miscadmin.h" #include "access/htup_details.h" #include "access/sysattr.h" #include "access/xact.h" #include "catalog/dependency.h" #include "catalog/indexing.h" #include "catalog/pg_constraint.h" #include "catalog/pg_extension.h" #include "catalog/pg_namespace.h" #include "catalog/pg_type.h" #include "commands/extension.h" #include "distributed/connection_management.h" #include "distributed/citus_nodes.h" #include "distributed/master_metadata_utility.h" #include "distributed/master_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/multi_join_order.h" #include "distributed/multi_logical_optimizer.h" #include "distributed/pg_dist_colocation.h" #include "distributed/pg_dist_partition.h" #include "distributed/pg_dist_shard.h" #include "distributed/pg_dist_placement.h" #include "distributed/relay_utility.h" #include "distributed/resource_lock.h" #include "distributed/remote_commands.h" #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" #include "nodes/makefuncs.h" #include "parser/scansup.h" #include "storage/lmgr.h" #include "utils/acl.h" #include "utils/builtins.h" #include "utils/datum.h" #include "utils/fmgroids.h" #include "utils/inval.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/syscache.h" #include "utils/tqual.h" /* Local functions forward declarations */ static uint64 * AllocateUint64(uint64 value); static void RecordDistributedRelationDependencies(Oid distributedRelationId, Node *distributionKey); static GroupShardPlacement * TupleToGroupShardPlacement(TupleDesc tupleDesc, HeapTuple heapTuple); static uint64 DistributedTableSize(Oid relationId, char *sizeQuery); static uint64 DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, char *sizeQuery); static List * ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId); static StringInfo GenerateSizeQueryOnMultiplePlacements(Oid distributedRelationId, List *shardIntervalList, char *sizeQuery); static void ErrorIfNotSuitableToGetSize(Oid relationId); /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(citus_table_size); PG_FUNCTION_INFO_V1(citus_total_relation_size); PG_FUNCTION_INFO_V1(citus_relation_size); /* * citus_total_relation_size accepts a table name and returns a distributed table * and its indexes' total relation size. */ Datum citus_total_relation_size(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); uint64 totalRelationSize = 0; char *tableSizeFunction = PG_TOTAL_RELATION_SIZE_FUNCTION; CheckCitusVersion(ERROR); if (CStoreTable(relationId)) { tableSizeFunction = CSTORE_TABLE_SIZE_FUNCTION; } totalRelationSize = DistributedTableSize(relationId, tableSizeFunction); PG_RETURN_INT64(totalRelationSize); } /* * citus_table_size accepts a table name and returns a distributed table's total * relation size. */ Datum citus_table_size(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); uint64 tableSize = 0; char *tableSizeFunction = PG_TABLE_SIZE_FUNCTION; CheckCitusVersion(ERROR); if (CStoreTable(relationId)) { tableSizeFunction = CSTORE_TABLE_SIZE_FUNCTION; } tableSize = DistributedTableSize(relationId, tableSizeFunction); PG_RETURN_INT64(tableSize); } /* * citus_relation_size accept a table name and returns a relation's 'main' * fork's size. */ Datum citus_relation_size(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); uint64 relationSize = 0; char *tableSizeFunction = PG_RELATION_SIZE_FUNCTION; CheckCitusVersion(ERROR); if (CStoreTable(relationId)) { tableSizeFunction = CSTORE_TABLE_SIZE_FUNCTION; } relationSize = DistributedTableSize(relationId, tableSizeFunction); PG_RETURN_INT64(relationSize); } /* * DistributedTableSize is helper function for each kind of citus size functions. * It first checks whether the table is distributed and size query can be run on * it. Connection to each node has to be established to get the size of the table. */ static uint64 DistributedTableSize(Oid relationId, char *sizeQuery) { Relation relation = NULL; List *workerNodeList = NULL; ListCell *workerNodeCell = NULL; uint64 totalRelationSize = 0; if (XactModificationLevel == XACT_MODIFICATION_DATA) { ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), errmsg("citus size functions cannot be called in transaction" " blocks which contain multi-shard data modifications"))); } /* try to open relation, will error out if the relation does not exist */ relation = relation_open(relationId, AccessShareLock); ErrorIfNotSuitableToGetSize(relationId); workerNodeList = ActiveReadableNodeList(); foreach(workerNodeCell, workerNodeList) { WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); uint64 relationSizeOnNode = DistributedTableSizeOnWorker(workerNode, relationId, sizeQuery); totalRelationSize += relationSizeOnNode; } heap_close(relation, AccessShareLock); return totalRelationSize; } /* * DistributedTableSizeOnWorker gets the workerNode and relationId to calculate * size of that relation on the given workerNode by summing up the size of each * shard placement. */ static uint64 DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, char *sizeQuery) { StringInfo tableSizeQuery = NULL; StringInfo tableSizeStringInfo = NULL; char *workerNodeName = workerNode->workerName; uint32 workerNodePort = workerNode->workerPort; char *tableSizeString; uint64 tableSize = 0; MultiConnection *connection = NULL; uint32 connectionFlag = FORCE_NEW_CONNECTION; PGresult *result = NULL; int queryResult = 0; List *sizeList = NIL; List *shardIntervalsOnNode = ShardIntervalsOnWorkerGroup(workerNode, relationId); tableSizeQuery = GenerateSizeQueryOnMultiplePlacements(relationId, shardIntervalsOnNode, sizeQuery); connection = GetNodeConnection(connectionFlag, workerNodeName, workerNodePort); queryResult = ExecuteOptionalRemoteCommand(connection, tableSizeQuery->data, &result); if (queryResult != 0) { ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), errmsg("cannot get the size because of a connection error"))); } sizeList = ReadFirstColumnAsText(result); tableSizeStringInfo = (StringInfo) linitial(sizeList); tableSizeString = tableSizeStringInfo->data; tableSize = atol(tableSizeString); return tableSize; } /* * GroupShardPlacementsForTableOnGroup accepts a relationId and a group and returns a list * of GroupShardPlacement's representing all of the placements for the table which reside * on the group. */ List * GroupShardPlacementsForTableOnGroup(Oid relationId, uint32 groupId) { DistTableCacheEntry *distTableCacheEntry = DistributedTableCacheEntry(relationId); List *resultList = NIL; int shardIndex = 0; int shardIntervalArrayLength = distTableCacheEntry->shardIntervalArrayLength; for (shardIndex = 0; shardIndex < shardIntervalArrayLength; shardIndex++) { GroupShardPlacement *placementArray = distTableCacheEntry->arrayOfPlacementArrays[shardIndex]; int numberOfPlacements = distTableCacheEntry->arrayOfPlacementArrayLengths[shardIndex]; int placementIndex = 0; for (placementIndex = 0; placementIndex < numberOfPlacements; placementIndex++) { GroupShardPlacement *placement = &placementArray[placementIndex]; if (placement->groupId == groupId) { resultList = lappend(resultList, placement); } } } return resultList; } /* * ShardIntervalsOnWorkerGroup accepts a WorkerNode and returns a list of the shard * intervals of the given table which are placed on the group the node is a part of. * * DO NOT modify the shard intervals returned by this function, they are not copies but * pointers. */ static List * ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId) { DistTableCacheEntry *distTableCacheEntry = DistributedTableCacheEntry(relationId); List *shardIntervalList = NIL; int shardIndex = 0; int shardIntervalArrayLength = distTableCacheEntry->shardIntervalArrayLength; for (shardIndex = 0; shardIndex < shardIntervalArrayLength; shardIndex++) { GroupShardPlacement *placementArray = distTableCacheEntry->arrayOfPlacementArrays[shardIndex]; int numberOfPlacements = distTableCacheEntry->arrayOfPlacementArrayLengths[shardIndex]; int placementIndex = 0; for (placementIndex = 0; placementIndex < numberOfPlacements; placementIndex++) { GroupShardPlacement *placement = &placementArray[placementIndex]; uint64 shardId = placement->shardId; bool metadataLock = false; metadataLock = TryLockShardDistributionMetadata(shardId, ShareLock); /* if the lock is not acquired warn the user */ if (metadataLock == false) { ereport(WARNING, (errcode(ERRCODE_LOCK_NOT_AVAILABLE), errmsg("lock is not acquired, size of shard %ld " "will be ignored", shardId))); continue; } if (placement->groupId == workerNode->groupId) { ShardInterval *shardInterval = distTableCacheEntry->sortedShardIntervalArray[shardIndex]; shardIntervalList = lappend(shardIntervalList, shardInterval); } } } return shardIntervalList; } /* * GenerateSizeQueryOnMultiplePlacements generates a select size query to get * size of multiple tables from the relation with distributedRelationId. Note * that, different size functions supported by PG are also supported by this * function changing the size query given as the last parameter to function. * Format of sizeQuery is pg_*_size(%s). Examples of it can be found in the * master_protocol.h */ static StringInfo GenerateSizeQueryOnMultiplePlacements(Oid distributedRelationId, List *shardIntervalList, char *sizeQuery) { Oid schemaId = get_rel_namespace(distributedRelationId); char *schemaName = get_namespace_name(schemaId); StringInfo selectQuery = makeStringInfo(); ListCell *shardIntervalCell = NULL; appendStringInfo(selectQuery, "SELECT "); foreach(shardIntervalCell, shardIntervalList) { ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); uint64 shardId = shardInterval->shardId; char *shardName = get_rel_name(distributedRelationId); char *shardQualifiedName = NULL; char *quotedShardName = NULL; AppendShardIdToName(&shardName, shardId); shardQualifiedName = quote_qualified_identifier(schemaName, shardName); quotedShardName = quote_literal_cstr(shardQualifiedName); appendStringInfo(selectQuery, sizeQuery, quotedShardName); appendStringInfo(selectQuery, " + "); } /* * Add 0 as a last size, it handles empty list case and makes size control checks * unnecessary which would have implemented without this line. */ appendStringInfo(selectQuery, "0;"); return selectQuery; } /* * ErrorIfNotSuitableToGetSize determines whether the table is suitable to find * its' size with internal functions. */ static void ErrorIfNotSuitableToGetSize(Oid relationId) { if (!IsDistributedTable(relationId)) { char *relationName = get_rel_name(relationId); char *escapedQueryString = quote_literal_cstr(relationName); ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("cannot calculate the size because relation %s is not " "distributed", escapedQueryString))); } if (PartitionMethod(relationId) == DISTRIBUTE_BY_HASH && !SingleReplicatedTable(relationId)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot calculate the size because replication factor " "is greater than 1"))); } } /* * TableShardReplicationFactor returns the current replication factor of the * given relation by looking into shard placements. It errors out if there * are different number of shard placements for different shards. It also * errors out if the table does not have any shards. */ uint32 TableShardReplicationFactor(Oid relationId) { uint32 replicationCount = 0; ListCell *shardCell = NULL; List *shardIntervalList = LoadShardIntervalList(relationId); foreach(shardCell, shardIntervalList) { ShardInterval *shardInterval = (ShardInterval *) lfirst(shardCell); uint64 shardId = shardInterval->shardId; List *shardPlacementList = ShardPlacementList(shardId); uint32 shardPlacementCount = list_length(shardPlacementList); /* * Get the replication count of the first shard in the list, and error * out if there is a shard with different replication count. */ if (replicationCount == 0) { replicationCount = shardPlacementCount; } else if (replicationCount != shardPlacementCount) { char *relationName = get_rel_name(relationId); ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot find the replication factor of the " "table %s", relationName), errdetail("The shard %ld has different shards replication " "counts from other shards.", shardId))); } } /* error out if the table does not have any shards */ if (replicationCount == 0) { char *relationName = get_rel_name(relationId); ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot find the replication factor of the " "table %s", relationName), errdetail("The table %s does not have any shards.", relationName))); } return replicationCount; } /* * LoadShardIntervalList returns a list of shard intervals related for a given * distributed table. The function returns an empty list if no shards can be * found for the given relation. * Since LoadShardIntervalList relies on sortedShardIntervalArray, it returns * a shard interval list whose elements are sorted on shardminvalue. Shard intervals * with uninitialized shard min/max values are placed in the end of the list. */ List * LoadShardIntervalList(Oid relationId) { DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); List *shardList = NIL; int i = 0; for (i = 0; i < cacheEntry->shardIntervalArrayLength; i++) { ShardInterval *newShardInterval = NULL; newShardInterval = (ShardInterval *) palloc0(sizeof(ShardInterval)); CopyShardInterval(cacheEntry->sortedShardIntervalArray[i], newShardInterval); shardList = lappend(shardList, newShardInterval); } return shardList; } /* * ShardIntervalCount returns number of shard intervals for a given distributed table. * The function returns 0 if table is not distributed, or no shards can be found for * the given relation id. */ int ShardIntervalCount(Oid relationId) { DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); int shardIntervalCount = 0; if (cacheEntry->isDistributedTable) { shardIntervalCount = cacheEntry->shardIntervalArrayLength; } return shardIntervalCount; } /* * LoadShardList reads list of shards for given relationId from pg_dist_shard, * and returns the list of found shardIds. * Since LoadShardList relies on sortedShardIntervalArray, it returns a shard * list whose elements are sorted on shardminvalue. Shards with uninitialized * shard min/max values are placed in the end of the list. */ List * LoadShardList(Oid relationId) { DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); List *shardList = NIL; int i = 0; for (i = 0; i < cacheEntry->shardIntervalArrayLength; i++) { ShardInterval *currentShardInterval = cacheEntry->sortedShardIntervalArray[i]; uint64 *shardIdPointer = AllocateUint64(currentShardInterval->shardId); shardList = lappend(shardList, shardIdPointer); } return shardList; } /* Allocates eight bytes, and copies given value's contents those bytes. */ static uint64 * AllocateUint64(uint64 value) { uint64 *allocatedValue = (uint64 *) palloc0(sizeof(uint64)); Assert(sizeof(uint64) >= 8); (*allocatedValue) = value; return allocatedValue; } /* * CopyShardInterval copies fields from the specified source ShardInterval * into the fields of the provided destination ShardInterval. */ void CopyShardInterval(ShardInterval *srcInterval, ShardInterval *destInterval) { destInterval->type = srcInterval->type; destInterval->relationId = srcInterval->relationId; destInterval->storageType = srcInterval->storageType; destInterval->valueTypeId = srcInterval->valueTypeId; destInterval->valueTypeLen = srcInterval->valueTypeLen; destInterval->valueByVal = srcInterval->valueByVal; destInterval->minValueExists = srcInterval->minValueExists; destInterval->maxValueExists = srcInterval->maxValueExists; destInterval->shardId = srcInterval->shardId; destInterval->minValue = 0; if (destInterval->minValueExists) { destInterval->minValue = datumCopy(srcInterval->minValue, srcInterval->valueByVal, srcInterval->valueTypeLen); } destInterval->maxValue = 0; if (destInterval->maxValueExists) { destInterval->maxValue = datumCopy(srcInterval->maxValue, srcInterval->valueByVal, srcInterval->valueTypeLen); } } /* * CopyShardPlacement copies the values of the source placement into the * target placement. */ void CopyShardPlacement(ShardPlacement *srcPlacement, ShardPlacement *destPlacement) { /* first copy all by-value fields */ memcpy(destPlacement, srcPlacement, sizeof(ShardPlacement)); /* and then the fields pointing to external values */ if (srcPlacement->nodeName) { destPlacement->nodeName = pstrdup(srcPlacement->nodeName); } } /* * ShardLength finds shard placements for the given shardId, extracts the length * of a finalized shard, and returns the shard's length. This function errors * out if we cannot find any finalized shard placements for the given shardId. */ uint64 ShardLength(uint64 shardId) { uint64 shardLength = 0; List *shardPlacementList = FinalizedShardPlacementList(shardId); if (shardPlacementList == NIL) { ereport(ERROR, (errmsg("could not find length of shard " UINT64_FORMAT, shardId), errdetail("Could not find any shard placements for the shard."))); } else { ShardPlacement *shardPlacement = (ShardPlacement *) linitial(shardPlacementList); shardLength = shardPlacement->shardLength; } return shardLength; } /* * NodeGroupHasShardPlacements returns whether any active shards are placed on the group */ bool NodeGroupHasShardPlacements(uint32 groupId, bool onlyConsiderActivePlacements) { const int scanKeyCount = (onlyConsiderActivePlacements ? 2 : 1); const bool indexOK = false; bool hasFinalizedPlacements = false; HeapTuple heapTuple = NULL; SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[scanKeyCount]; Relation pgPlacement = heap_open(DistPlacementRelationId(), AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_dist_placement_groupid, BTEqualStrategyNumber, F_INT4EQ, UInt32GetDatum(groupId)); if (onlyConsiderActivePlacements) { ScanKeyInit(&scanKey[1], Anum_pg_dist_placement_shardstate, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(FILE_FINALIZED)); } scanDescriptor = systable_beginscan(pgPlacement, DistPlacementGroupidIndexId(), indexOK, NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(scanDescriptor); hasFinalizedPlacements = HeapTupleIsValid(heapTuple); systable_endscan(scanDescriptor); heap_close(pgPlacement, NoLock); return hasFinalizedPlacements; } /* * FinalizedShardPlacementList finds shard placements for the given shardId from * system catalogs, chooses placements that are in finalized state, and returns * these shard placements in a new list. */ List * FinalizedShardPlacementList(uint64 shardId) { List *finalizedPlacementList = NIL; List *shardPlacementList = ShardPlacementList(shardId); ListCell *shardPlacementCell = NULL; foreach(shardPlacementCell, shardPlacementList) { ShardPlacement *shardPlacement = (ShardPlacement *) lfirst(shardPlacementCell); if (shardPlacement->shardState == FILE_FINALIZED) { finalizedPlacementList = lappend(finalizedPlacementList, shardPlacement); } } return finalizedPlacementList; } /* * FinalizedShardPlacement finds a shard placement for the given shardId from * system catalog, chooses a placement that is in finalized state and returns * that shard placement. If this function cannot find a healthy shard placement * and missingOk is set to false it errors out. */ ShardPlacement * FinalizedShardPlacement(uint64 shardId, bool missingOk) { List *finalizedPlacementList = FinalizedShardPlacementList(shardId); ShardPlacement *shardPlacement = NULL; if (list_length(finalizedPlacementList) == 0) { if (!missingOk) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("could not find any healthy placement for shard " UINT64_FORMAT, shardId))); } return shardPlacement; } shardPlacement = (ShardPlacement *) linitial(finalizedPlacementList); return shardPlacement; } /* * BuildShardPlacementList finds shard placements for the given shardId from * system catalogs, converts these placements to their in-memory * representation, and returns the converted shard placements in a new list. * * This probably only should be called from metadata_cache.c. Resides here * because it shares code with other routines in this file. */ List * BuildShardPlacementList(ShardInterval *shardInterval) { int64 shardId = shardInterval->shardId; List *shardPlacementList = NIL; Relation pgPlacement = NULL; SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; bool indexOK = true; HeapTuple heapTuple = NULL; pgPlacement = heap_open(DistPlacementRelationId(), AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_dist_placement_shardid, BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(shardId)); scanDescriptor = systable_beginscan(pgPlacement, DistPlacementShardidIndexId(), indexOK, NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { TupleDesc tupleDescriptor = RelationGetDescr(pgPlacement); GroupShardPlacement *placement = TupleToGroupShardPlacement(tupleDescriptor, heapTuple); shardPlacementList = lappend(shardPlacementList, placement); heapTuple = systable_getnext(scanDescriptor); } systable_endscan(scanDescriptor); heap_close(pgPlacement, NoLock); return shardPlacementList; } /* * TupleToGroupShardPlacement takes in a heap tuple from pg_dist_placement, * and converts this tuple to in-memory struct. The function assumes the * caller already has locks on the tuple, and doesn't perform any locking. */ static GroupShardPlacement * TupleToGroupShardPlacement(TupleDesc tupleDescriptor, HeapTuple heapTuple) { GroupShardPlacement *shardPlacement = NULL; bool isNull = false; Datum placementId = heap_getattr(heapTuple, Anum_pg_dist_placement_placementid, tupleDescriptor, &isNull); Datum shardId = heap_getattr(heapTuple, Anum_pg_dist_placement_shardid, tupleDescriptor, &isNull); Datum shardLength = heap_getattr(heapTuple, Anum_pg_dist_placement_shardlength, tupleDescriptor, &isNull); Datum shardState = heap_getattr(heapTuple, Anum_pg_dist_placement_shardstate, tupleDescriptor, &isNull); Datum groupId = heap_getattr(heapTuple, Anum_pg_dist_placement_groupid, tupleDescriptor, &isNull); if (HeapTupleHeaderGetNatts(heapTuple->t_data) != Natts_pg_dist_placement || HeapTupleHasNulls(heapTuple)) { ereport(ERROR, (errmsg("unexpected null in pg_dist_placement tuple"))); } shardPlacement = CitusMakeNode(GroupShardPlacement); shardPlacement->placementId = DatumGetInt64(placementId); shardPlacement->shardId = DatumGetInt64(shardId); shardPlacement->shardLength = DatumGetInt64(shardLength); shardPlacement->shardState = DatumGetUInt32(shardState); shardPlacement->groupId = DatumGetUInt32(groupId); return shardPlacement; } /* * InsertShardRow opens the shard system catalog, and inserts a new row with the * given values into that system catalog. Note that we allow the user to pass in * null min/max values in case they are creating an empty shard. */ void InsertShardRow(Oid relationId, uint64 shardId, char storageType, text *shardMinValue, text *shardMaxValue) { Relation pgDistShard = NULL; TupleDesc tupleDescriptor = NULL; HeapTuple heapTuple = NULL; Datum values[Natts_pg_dist_shard]; bool isNulls[Natts_pg_dist_shard]; /* form new shard tuple */ memset(values, 0, sizeof(values)); memset(isNulls, false, sizeof(isNulls)); values[Anum_pg_dist_shard_logicalrelid - 1] = ObjectIdGetDatum(relationId); values[Anum_pg_dist_shard_shardid - 1] = Int64GetDatum(shardId); values[Anum_pg_dist_shard_shardstorage - 1] = CharGetDatum(storageType); /* dropped shardalias column must also be set; it is still part of the tuple */ isNulls[Anum_pg_dist_shard_shardalias_DROPPED - 1] = true; /* check if shard min/max values are null */ if (shardMinValue != NULL && shardMaxValue != NULL) { values[Anum_pg_dist_shard_shardminvalue - 1] = PointerGetDatum(shardMinValue); values[Anum_pg_dist_shard_shardmaxvalue - 1] = PointerGetDatum(shardMaxValue); } else { isNulls[Anum_pg_dist_shard_shardminvalue - 1] = true; isNulls[Anum_pg_dist_shard_shardmaxvalue - 1] = true; } /* open shard relation and insert new tuple */ pgDistShard = heap_open(DistShardRelationId(), RowExclusiveLock); tupleDescriptor = RelationGetDescr(pgDistShard); heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls); CatalogTupleInsert(pgDistShard, heapTuple); /* invalidate previous cache entry and close relation */ CitusInvalidateRelcacheByRelid(relationId); CommandCounterIncrement(); heap_close(pgDistShard, NoLock); } /* * InsertShardPlacementRow opens the shard placement system catalog, and inserts * a new row with the given values into that system catalog. If placementId is * INVALID_PLACEMENT_ID, a new placement id will be assigned.Then, returns the * placement id of the added shard placement. */ uint64 InsertShardPlacementRow(uint64 shardId, uint64 placementId, char shardState, uint64 shardLength, uint32 groupId) { Relation pgDistPlacement = NULL; TupleDesc tupleDescriptor = NULL; HeapTuple heapTuple = NULL; Datum values[Natts_pg_dist_placement]; bool isNulls[Natts_pg_dist_placement]; /* form new shard placement tuple */ memset(values, 0, sizeof(values)); memset(isNulls, false, sizeof(isNulls)); if (placementId == INVALID_PLACEMENT_ID) { placementId = master_get_new_placementid(NULL); } values[Anum_pg_dist_placement_placementid - 1] = Int64GetDatum(placementId); values[Anum_pg_dist_placement_shardid - 1] = Int64GetDatum(shardId); values[Anum_pg_dist_placement_shardstate - 1] = CharGetDatum(shardState); values[Anum_pg_dist_placement_shardlength - 1] = Int64GetDatum(shardLength); values[Anum_pg_dist_placement_groupid - 1] = Int64GetDatum(groupId); /* open shard placement relation and insert new tuple */ pgDistPlacement = heap_open(DistPlacementRelationId(), RowExclusiveLock); tupleDescriptor = RelationGetDescr(pgDistPlacement); heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls); CatalogTupleInsert(pgDistPlacement, heapTuple); CitusInvalidateRelcacheByShardId(shardId); CommandCounterIncrement(); heap_close(pgDistPlacement, NoLock); return placementId; } /* * InsertIntoPgDistPartition inserts a new tuple into pg_dist_partition. */ void InsertIntoPgDistPartition(Oid relationId, char distributionMethod, Var *distributionColumn, uint32 colocationId, char replicationModel) { Relation pgDistPartition = NULL; char *distributionColumnString = NULL; HeapTuple newTuple = NULL; Datum newValues[Natts_pg_dist_partition]; bool newNulls[Natts_pg_dist_partition]; /* open system catalog and insert new tuple */ pgDistPartition = heap_open(DistPartitionRelationId(), RowExclusiveLock); /* form new tuple for pg_dist_partition */ memset(newValues, 0, sizeof(newValues)); memset(newNulls, false, sizeof(newNulls)); newValues[Anum_pg_dist_partition_logicalrelid - 1] = ObjectIdGetDatum(relationId); newValues[Anum_pg_dist_partition_partmethod - 1] = CharGetDatum(distributionMethod); newValues[Anum_pg_dist_partition_colocationid - 1] = UInt32GetDatum(colocationId); newValues[Anum_pg_dist_partition_repmodel - 1] = CharGetDatum(replicationModel); /* set partkey column to NULL for reference tables */ if (distributionMethod != DISTRIBUTE_BY_NONE) { distributionColumnString = nodeToString((Node *) distributionColumn); newValues[Anum_pg_dist_partition_partkey - 1] = CStringGetTextDatum(distributionColumnString); } else { newValues[Anum_pg_dist_partition_partkey - 1] = PointerGetDatum(NULL); newNulls[Anum_pg_dist_partition_partkey - 1] = true; } newTuple = heap_form_tuple(RelationGetDescr(pgDistPartition), newValues, newNulls); /* finally insert tuple, build index entries & register cache invalidation */ CatalogTupleInsert(pgDistPartition, newTuple); CitusInvalidateRelcacheByRelid(relationId); RecordDistributedRelationDependencies(relationId, (Node *) distributionColumn); CommandCounterIncrement(); heap_close(pgDistPartition, NoLock); } /* * RecordDistributedRelationDependencies creates the dependency entries * necessary for a distributed relation in addition to the preexisting ones * for a normal relation. * * We create one dependency from the (now distributed) relation to the citus * extension to prevent the extension from being dropped while distributed * tables exist. Furthermore a dependency from pg_dist_partition's * distribution clause to the underlying columns is created, but it's marked * as being owned by the relation itself. That means the entire table can be * dropped, but the column itself can't. Neither can the type of the * distribution column be changed (c.f. ATExecAlterColumnType). */ static void RecordDistributedRelationDependencies(Oid distributedRelationId, Node *distributionKey) { ObjectAddress relationAddr = { 0, 0, 0 }; ObjectAddress citusExtensionAddr = { 0, 0, 0 }; relationAddr.classId = RelationRelationId; relationAddr.objectId = distributedRelationId; relationAddr.objectSubId = 0; citusExtensionAddr.classId = ExtensionRelationId; citusExtensionAddr.objectId = get_extension_oid("citus", false); citusExtensionAddr.objectSubId = 0; /* dependency from table entry to extension */ recordDependencyOn(&relationAddr, &citusExtensionAddr, DEPENDENCY_NORMAL); } /* * DeletePartitionRow removes the row from pg_dist_partition where the logicalrelid * field equals to distributedRelationId. Then, the function invalidates the * metadata cache. */ void DeletePartitionRow(Oid distributedRelationId) { Relation pgDistPartition = NULL; HeapTuple heapTuple = NULL; SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; pgDistPartition = heap_open(DistPartitionRelationId(), RowExclusiveLock); ScanKeyInit(&scanKey[0], Anum_pg_dist_partition_logicalrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(distributedRelationId)); scanDescriptor = systable_beginscan(pgDistPartition, InvalidOid, false, NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(scanDescriptor); if (!HeapTupleIsValid(heapTuple)) { ereport(ERROR, (errmsg("could not find valid entry for partition %d", distributedRelationId))); } simple_heap_delete(pgDistPartition, &heapTuple->t_self); systable_endscan(scanDescriptor); /* invalidate the cache */ CitusInvalidateRelcacheByRelid(distributedRelationId); /* increment the counter so that next command can see the row */ CommandCounterIncrement(); heap_close(pgDistPartition, NoLock); } /* * DeleteShardRow opens the shard system catalog, finds the unique row that has * the given shardId, and deletes this row. */ void DeleteShardRow(uint64 shardId) { Relation pgDistShard = NULL; SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; bool indexOK = true; HeapTuple heapTuple = NULL; Form_pg_dist_shard pgDistShardForm = NULL; Oid distributedRelationId = InvalidOid; pgDistShard = heap_open(DistShardRelationId(), RowExclusiveLock); ScanKeyInit(&scanKey[0], Anum_pg_dist_shard_shardid, BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(shardId)); scanDescriptor = systable_beginscan(pgDistShard, DistShardShardidIndexId(), indexOK, NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(scanDescriptor); if (!HeapTupleIsValid(heapTuple)) { ereport(ERROR, (errmsg("could not find valid entry for shard " UINT64_FORMAT, shardId))); } pgDistShardForm = (Form_pg_dist_shard) GETSTRUCT(heapTuple); distributedRelationId = pgDistShardForm->logicalrelid; simple_heap_delete(pgDistShard, &heapTuple->t_self); systable_endscan(scanDescriptor); /* invalidate previous cache entry */ CitusInvalidateRelcacheByRelid(distributedRelationId); CommandCounterIncrement(); heap_close(pgDistShard, NoLock); } /* * DeleteShardPlacementRow opens the shard placement system catalog, finds the placement * with the given placementId, and deletes it. */ void DeleteShardPlacementRow(uint64 placementId) { Relation pgDistPlacement = NULL; SysScanDesc scanDescriptor = NULL; const int scanKeyCount = 1; ScanKeyData scanKey[scanKeyCount]; bool indexOK = true; HeapTuple heapTuple = NULL; TupleDesc tupleDescriptor = NULL; bool isNull = false; uint64 shardId = 0; pgDistPlacement = heap_open(DistPlacementRelationId(), RowExclusiveLock); tupleDescriptor = RelationGetDescr(pgDistPlacement); ScanKeyInit(&scanKey[0], Anum_pg_dist_placement_placementid, BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(placementId)); scanDescriptor = systable_beginscan(pgDistPlacement, DistPlacementPlacementidIndexId(), indexOK, NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(scanDescriptor); if (heapTuple == NULL) { ereport(ERROR, (errmsg("could not find valid entry for shard placement " INT64_FORMAT, placementId))); } shardId = heap_getattr(heapTuple, Anum_pg_dist_placement_shardid, tupleDescriptor, &isNull); if (HeapTupleHeaderGetNatts(heapTuple->t_data) != Natts_pg_dist_placement || HeapTupleHasNulls(heapTuple)) { ereport(ERROR, (errmsg("unexpected null in pg_dist_placement tuple"))); } simple_heap_delete(pgDistPlacement, &heapTuple->t_self); systable_endscan(scanDescriptor); CitusInvalidateRelcacheByShardId(shardId); CommandCounterIncrement(); heap_close(pgDistPlacement, NoLock); } /* * UpdateShardPlacementState sets the shardState for the placement identified * by placementId. */ void UpdateShardPlacementState(uint64 placementId, char shardState) { Relation pgDistPlacement = NULL; SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; bool indexOK = true; HeapTuple heapTuple = NULL; TupleDesc tupleDescriptor = NULL; Datum values[Natts_pg_dist_placement]; bool isnull[Natts_pg_dist_placement]; bool replace[Natts_pg_dist_placement]; uint64 shardId = INVALID_SHARD_ID; bool colIsNull = false; pgDistPlacement = heap_open(DistPlacementRelationId(), RowExclusiveLock); tupleDescriptor = RelationGetDescr(pgDistPlacement); ScanKeyInit(&scanKey[0], Anum_pg_dist_placement_placementid, BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(placementId)); scanDescriptor = systable_beginscan(pgDistPlacement, DistPlacementPlacementidIndexId(), indexOK, NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(scanDescriptor); if (!HeapTupleIsValid(heapTuple)) { ereport(ERROR, (errmsg("could not find valid entry for shard placement " UINT64_FORMAT, placementId))); } memset(replace, 0, sizeof(replace)); values[Anum_pg_dist_placement_shardstate - 1] = CharGetDatum(shardState); isnull[Anum_pg_dist_placement_shardstate - 1] = false; replace[Anum_pg_dist_placement_shardstate - 1] = true; heapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isnull, replace); CatalogTupleUpdate(pgDistPlacement, &heapTuple->t_self, heapTuple); shardId = DatumGetInt64(heap_getattr(heapTuple, Anum_pg_dist_placement_shardid, tupleDescriptor, &colIsNull)); Assert(!colIsNull); CitusInvalidateRelcacheByShardId(shardId); CommandCounterIncrement(); systable_endscan(scanDescriptor); heap_close(pgDistPlacement, NoLock); } /* * UpdateColocationGroupReplicationFactor finds colocation group record for given * colocationId and updates its replication factor to given replicationFactor value. * Since we do not cache pg_dist_colocation table, we do not need to invalidate the * cache after updating replication factor. */ void UpdateColocationGroupReplicationFactor(uint32 colocationId, int replicationFactor) { Relation pgDistColocation = NULL; SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; bool indexOK = true; HeapTuple heapTuple = NULL; HeapTuple newHeapTuple = NULL; TupleDesc tupleDescriptor = NULL; Datum values[Natts_pg_dist_colocation]; bool isnull[Natts_pg_dist_colocation]; bool replace[Natts_pg_dist_colocation]; /* we first search for colocation group by its colocation id */ pgDistColocation = heap_open(DistColocationRelationId(), RowExclusiveLock); tupleDescriptor = RelationGetDescr(pgDistColocation); ScanKeyInit(&scanKey[0], Anum_pg_dist_colocation_colocationid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(colocationId)); scanDescriptor = systable_beginscan(pgDistColocation, DistColocationColocationidIndexId(), indexOK, NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(scanDescriptor); if (!HeapTupleIsValid(heapTuple)) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("could not find valid entry for colocation group " "%d", colocationId))); } /* after we find colocation group, we update it with new values */ memset(replace, false, sizeof(replace)); memset(isnull, false, sizeof(isnull)); memset(values, 0, sizeof(values)); values[Anum_pg_dist_colocation_replicationfactor - 1] = Int32GetDatum( replicationFactor); replace[Anum_pg_dist_colocation_replicationfactor - 1] = true; newHeapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isnull, replace); CatalogTupleUpdate(pgDistColocation, &newHeapTuple->t_self, newHeapTuple); CommandCounterIncrement(); heap_freetuple(newHeapTuple); systable_endscan(scanDescriptor); heap_close(pgDistColocation, NoLock); } /* * Check that the current user has `mode` permissions on relationId, error out * if not. Superusers always have such permissions. */ void EnsureTablePermissions(Oid relationId, AclMode mode) { AclResult aclresult; aclresult = pg_class_aclcheck(relationId, GetUserId(), mode); if (aclresult != ACLCHECK_OK) { aclcheck_error(aclresult, ACL_KIND_CLASS, get_rel_name(relationId)); } } /* * Check that the current user has owner rights to relationId, error out if * not. Superusers are regarded as owners. */ void EnsureTableOwner(Oid relationId) { if (!pg_class_ownercheck(relationId, GetUserId())) { aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, get_rel_name(relationId)); } } /* * EnsureSuperUser check that the current user is a superuser and errors out if not. */ void EnsureSuperUser(void) { if (!superuser()) { ereport(ERROR, (errmsg("operation is not allowed"), errhint("Run the command with a superuser."))); } } /* * Return a table's owner as a string. */ char * TableOwner(Oid relationId) { Oid userId = InvalidOid; HeapTuple tuple; tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relationId)); if (!HeapTupleIsValid(tuple)) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), errmsg("relation with OID %u does not exist", relationId))); } userId = ((Form_pg_class) GETSTRUCT(tuple))->relowner; ReleaseSysCache(tuple); return GetUserNameFromId(userId, false); } /* * TableReferenced function checks whether given table is referenced by another table * via foreign constraints. If it is referenced, this function returns true. To check * that, this function searches given relation at pg_constraints system catalog. However * since there is no index for the column we searched, this function performs sequential * search, therefore call this function with caution. */ bool TableReferenced(Oid relationId) { Relation pgConstraint = NULL; HeapTuple heapTuple = NULL; SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; Oid scanIndexId = InvalidOid; bool useIndex = false; pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_constraint_confrelid, BTEqualStrategyNumber, F_OIDEQ, relationId); scanDescriptor = systable_beginscan(pgConstraint, scanIndexId, useIndex, NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple); if (constraintForm->contype == CONSTRAINT_FOREIGN) { systable_endscan(scanDescriptor); heap_close(pgConstraint, NoLock); return true; } heapTuple = systable_getnext(scanDescriptor); } systable_endscan(scanDescriptor); heap_close(pgConstraint, NoLock); return false; } citus-7.0.3/src/backend/distributed/master/master_modify_multiple_shards.c000066400000000000000000000155371317107136600271500ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * master_modify_multiple_shards.c * UDF to run multi shard update/delete queries * * This file contains master_modify_multiple_shards function, which takes a update * or delete query and runs it worker shards of the distributed table. The distributed * modify operation can be done within a distributed transaction and committed in * one-phase or two-phase fashion, depending on the citus.multi_shard_commit_protocol * setting. * * Copyright (c) 2012-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "funcapi.h" #include "libpq-fe.h" #include "miscadmin.h" #include "access/xact.h" #include "catalog/namespace.h" #include "catalog/pg_class.h" #include "commands/dbcommands.h" #include "commands/event_trigger.h" #include "distributed/citus_clauses.h" #include "distributed/citus_ruleutils.h" #include "distributed/listutils.h" #include "distributed/master_metadata_utility.h" #include "distributed/master_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/multi_client_executor.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_router_executor.h" #include "distributed/multi_router_planner.h" #include "distributed/multi_server_executor.h" #include "distributed/multi_shard_transaction.h" #include "distributed/pg_dist_shard.h" #include "distributed/pg_dist_partition.h" #include "distributed/resource_lock.h" #include "distributed/shardinterval_utils.h" #include "distributed/shard_pruning.h" #include "distributed/worker_protocol.h" #include "optimizer/clauses.h" #include "optimizer/predtest.h" #include "optimizer/restrictinfo.h" #include "optimizer/var.h" #include "nodes/makefuncs.h" #include "tcop/tcopprot.h" #include "utils/builtins.h" #include "utils/datum.h" #include "utils/inval.h" #include "utils/lsyscache.h" #include "utils/memutils.h" static List * ModifyMultipleShardsTaskList(Query *query, List *shardIntervalList, Oid relationId); PG_FUNCTION_INFO_V1(master_modify_multiple_shards); /* * master_modify_multiple_shards takes in a DELETE or UPDATE query string and * pushes the query to shards. It finds shards that match the criteria defined * in the delete command, generates the same delete query string for each of the * found shards with distributed table name replaced with the shard name and * sends the queries to the workers. It uses one-phase or two-phase commit * transactions depending on citus.copy_transaction_manager value. */ Datum master_modify_multiple_shards(PG_FUNCTION_ARGS) { text *queryText = PG_GETARG_TEXT_P(0); char *queryString = text_to_cstring(queryText); List *queryTreeList = NIL; Oid relationId = InvalidOid; Index tableId = 1; Query *modifyQuery = NULL; Node *queryTreeNode; List *restrictClauseList = NIL; bool failOK = false; List *prunedShardIntervalList = NIL; List *taskList = NIL; int32 affectedTupleCount = 0; #if (PG_VERSION_NUM >= 100000) RawStmt *rawStmt = (RawStmt *) ParseTreeRawStmt(queryString); queryTreeNode = rawStmt->stmt; #else queryTreeNode = ParseTreeNode(queryString); #endif EnsureCoordinator(); CheckCitusVersion(ERROR); if (IsA(queryTreeNode, DeleteStmt)) { DeleteStmt *deleteStatement = (DeleteStmt *) queryTreeNode; relationId = RangeVarGetRelid(deleteStatement->relation, NoLock, failOK); EnsureTablePermissions(relationId, ACL_DELETE); } else if (IsA(queryTreeNode, UpdateStmt)) { UpdateStmt *updateStatement = (UpdateStmt *) queryTreeNode; relationId = RangeVarGetRelid(updateStatement->relation, NoLock, failOK); EnsureTablePermissions(relationId, ACL_UPDATE); } else if (IsA(queryTreeNode, TruncateStmt)) { TruncateStmt *truncateStatement = (TruncateStmt *) queryTreeNode; List *relationList = truncateStatement->relations; RangeVar *rangeVar = NULL; if (list_length(relationList) != 1) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("master_modify_multiple_shards() can truncate only " "one table"))); } rangeVar = (RangeVar *) linitial(relationList); relationId = RangeVarGetRelid(rangeVar, NoLock, failOK); if (rangeVar->schemaname == NULL) { Oid schemaOid = get_rel_namespace(relationId); char *schemaName = get_namespace_name(schemaOid); rangeVar->schemaname = schemaName; } EnsureTablePermissions(relationId, ACL_TRUNCATE); } else { ereport(ERROR, (errmsg("query \"%s\" is not a delete, update, or truncate " "statement", queryString))); } CheckDistributedTable(relationId); #if (PG_VERSION_NUM >= 100000) queryTreeList = pg_analyze_and_rewrite(rawStmt, queryString, NULL, 0, NULL); #else queryTreeList = pg_analyze_and_rewrite(queryTreeNode, queryString, NULL, 0); #endif modifyQuery = (Query *) linitial(queryTreeList); if (modifyQuery->commandType != CMD_UTILITY) { bool multiShardQuery = true; DeferredErrorMessage *error = ModifyQuerySupported(modifyQuery, multiShardQuery); if (error) { RaiseDeferredError(error, ERROR); } } /* reject queries with a returning list */ if (list_length(modifyQuery->returningList) > 0) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("master_modify_multiple_shards() does not support RETURNING"))); } ExecuteMasterEvaluableFunctions(modifyQuery, NULL); restrictClauseList = WhereClauseList(modifyQuery->jointree); prunedShardIntervalList = PruneShards(relationId, tableId, restrictClauseList); CHECK_FOR_INTERRUPTS(); taskList = ModifyMultipleShardsTaskList(modifyQuery, prunedShardIntervalList, relationId); affectedTupleCount = ExecuteModifyTasksWithoutResults(taskList); PG_RETURN_INT32(affectedTupleCount); } /* * ModifyMultipleShardsTaskList builds a list of tasks to execute a query on a * given list of shards. */ static List * ModifyMultipleShardsTaskList(Query *query, List *shardIntervalList, Oid relationId) { List *taskList = NIL; ListCell *shardIntervalCell = NULL; uint64 jobId = INVALID_JOB_ID; int taskId = 1; /* lock metadata before getting placment lists */ LockShardListMetadata(shardIntervalList, ShareLock); foreach(shardIntervalCell, shardIntervalList) { ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); Oid relationId = shardInterval->relationId; uint64 shardId = shardInterval->shardId; StringInfo shardQueryString = makeStringInfo(); Task *task = NULL; deparse_shard_query(query, relationId, shardId, shardQueryString); task = CitusMakeNode(Task); task->jobId = jobId; task->taskId = taskId++; task->taskType = MODIFY_TASK; task->queryString = shardQueryString->data; task->dependedTaskList = NULL; task->replicationModel = REPLICATION_MODEL_INVALID; task->anchorShardId = shardId; task->taskPlacementList = FinalizedShardPlacementList(shardId); taskList = lappend(taskList, task); } return taskList; } citus-7.0.3/src/backend/distributed/master/master_node_protocol.c000066400000000000000000000555651317107136600252550ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * master_node_protocol.c * Routines for requesting information from the master node for creating or * updating shards. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "c.h" #include "fmgr.h" #include "funcapi.h" #include "miscadmin.h" #include #include "access/attnum.h" #include "access/genam.h" #include "access/heapam.h" #include "access/htup.h" #include "access/htup_details.h" #include "access/skey.h" #include "access/stratnum.h" #include "access/sysattr.h" #include "access/tupdesc.h" #include "catalog/dependency.h" #include "catalog/indexing.h" #include "catalog/namespace.h" #include "catalog/pg_class.h" #include "catalog/pg_constraint.h" #include "catalog/pg_constraint_fn.h" #include "catalog/pg_index.h" #include "catalog/pg_type.h" #include "catalog/pg_namespace.h" #include "commands/sequence.h" #include "distributed/citus_ruleutils.h" #include "distributed/listutils.h" #include "distributed/master_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" #include "distributed/pg_dist_shard.h" #include "distributed/worker_manager.h" #include "foreign/foreign.h" #include "lib/stringinfo.h" #include "nodes/pg_list.h" #include "nodes/primnodes.h" #include "storage/lock.h" #include "utils/builtins.h" #include "utils/elog.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/palloc.h" #include "utils/relcache.h" #include "utils/ruleutils.h" #include "utils/tqual.h" #if (PG_VERSION_NUM >= 100000) #include "utils/varlena.h" #endif /* Shard related configuration */ int ShardCount = 32; int ShardReplicationFactor = 1; /* desired replication factor for shards */ int ShardMaxSize = 1048576; /* maximum size in KB one shard can grow to */ int ShardPlacementPolicy = SHARD_PLACEMENT_ROUND_ROBIN; static Datum WorkerNodeGetDatum(WorkerNode *workerNode, TupleDesc tupleDescriptor); /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(master_get_table_metadata); PG_FUNCTION_INFO_V1(master_get_table_ddl_events); PG_FUNCTION_INFO_V1(master_get_new_shardid); PG_FUNCTION_INFO_V1(master_get_new_placementid); PG_FUNCTION_INFO_V1(master_get_active_worker_nodes); /* * master_get_table_metadata takes in a relation name, and returns partition * related metadata for the relation. These metadata are grouped and returned in * a tuple, and are used by the caller when creating new shards. The function * errors if given relation does not exist, or is not partitioned. */ Datum master_get_table_metadata(PG_FUNCTION_ARGS) { text *relationName = PG_GETARG_TEXT_P(0); Oid relationId = ResolveRelationId(relationName); DistTableCacheEntry *partitionEntry = NULL; char *partitionKeyString = NULL; TypeFuncClass resultTypeClass = 0; Datum partitionKeyExpr = 0; Datum partitionKey = 0; Datum metadataDatum = 0; HeapTuple metadataTuple = NULL; TupleDesc metadataDescriptor = NULL; uint64 shardMaxSizeInBytes = 0; char shardStorageType = 0; Datum values[TABLE_METADATA_FIELDS]; bool isNulls[TABLE_METADATA_FIELDS]; CheckCitusVersion(ERROR); /* find partition tuple for partitioned relation */ partitionEntry = DistributedTableCacheEntry(relationId); /* create tuple descriptor for return value */ resultTypeClass = get_call_result_type(fcinfo, NULL, &metadataDescriptor); if (resultTypeClass != TYPEFUNC_COMPOSITE) { ereport(ERROR, (errmsg("return type must be a row type"))); } /* form heap tuple for table metadata */ memset(values, 0, sizeof(values)); memset(isNulls, false, sizeof(isNulls)); partitionKeyString = partitionEntry->partitionKeyString; /* reference tables do not have partition key */ if (partitionKeyString == NULL) { partitionKey = PointerGetDatum(NULL); isNulls[3] = true; } else { /* get decompiled expression tree for partition key */ partitionKeyExpr = PointerGetDatum(cstring_to_text(partitionEntry->partitionKeyString)); partitionKey = DirectFunctionCall2(pg_get_expr, partitionKeyExpr, ObjectIdGetDatum(relationId)); } shardMaxSizeInBytes = (int64) ShardMaxSize * 1024L; /* get storage type */ shardStorageType = ShardStorageType(relationId); values[0] = ObjectIdGetDatum(relationId); values[1] = shardStorageType; values[2] = partitionEntry->partitionMethod; values[3] = partitionKey; values[4] = Int32GetDatum(ShardReplicationFactor); values[5] = Int64GetDatum(shardMaxSizeInBytes); values[6] = Int32GetDatum(ShardPlacementPolicy); metadataTuple = heap_form_tuple(metadataDescriptor, values, isNulls); metadataDatum = HeapTupleGetDatum(metadataTuple); PG_RETURN_DATUM(metadataDatum); } /* * CStoreTable returns true if the given relationId belongs to a foreign cstore * table, otherwise it returns false. */ bool CStoreTable(Oid relationId) { bool cstoreTable = false; char relationKind = get_rel_relkind(relationId); if (relationKind == RELKIND_FOREIGN_TABLE) { ForeignTable *foreignTable = GetForeignTable(relationId); ForeignServer *server = GetForeignServer(foreignTable->serverid); ForeignDataWrapper *foreignDataWrapper = GetForeignDataWrapper(server->fdwid); if (strncmp(foreignDataWrapper->fdwname, CSTORE_FDW_NAME, NAMEDATALEN) == 0) { cstoreTable = true; } } return cstoreTable; } /* * master_get_table_ddl_events takes in a relation name, and returns the set of * DDL commands needed to reconstruct the relation. The returned DDL commands * are similar in flavor to schema definitions that pgdump returns. The function * errors if given relation does not exist. */ Datum master_get_table_ddl_events(PG_FUNCTION_ARGS) { FuncCallContext *functionContext = NULL; ListCell *tableDDLEventCell = NULL; CheckCitusVersion(ERROR); /* * On the very first call to this function, we first use the given relation * name to get to the relation. We then recreate the list of DDL statements * issued for this relation, and save the first statement's position in the * function context. */ if (SRF_IS_FIRSTCALL()) { text *relationName = PG_GETARG_TEXT_P(0); Oid relationId = ResolveRelationId(relationName); bool includeSequenceDefaults = true; MemoryContext oldContext = NULL; List *tableDDLEventList = NIL; /* create a function context for cross-call persistence */ functionContext = SRF_FIRSTCALL_INIT(); /* switch to memory context appropriate for multiple function calls */ oldContext = MemoryContextSwitchTo(functionContext->multi_call_memory_ctx); /* allocate DDL statements, and then save position in DDL statements */ tableDDLEventList = GetTableDDLEvents(relationId, includeSequenceDefaults); tableDDLEventCell = list_head(tableDDLEventList); functionContext->user_fctx = tableDDLEventCell; MemoryContextSwitchTo(oldContext); } /* * On every call to this function, we get the current position in the * statement list. We then iterate to the next position in the list and * return the current statement, if we have not yet reached the end of * list. */ functionContext = SRF_PERCALL_SETUP(); tableDDLEventCell = (ListCell *) functionContext->user_fctx; if (tableDDLEventCell != NULL) { char *ddlStatement = (char *) lfirst(tableDDLEventCell); text *ddlStatementText = cstring_to_text(ddlStatement); functionContext->user_fctx = lnext(tableDDLEventCell); SRF_RETURN_NEXT(functionContext, PointerGetDatum(ddlStatementText)); } else { SRF_RETURN_DONE(functionContext); } } /* * master_get_new_shardid is a user facing wrapper function around GetNextShardId() * which allocates and returns a unique shardId for the shard to be created. * * NB: This can be called by any user; for now we have decided that that's * ok. We might want to restrict this to users part of a specific role or such * at some later point. */ Datum master_get_new_shardid(PG_FUNCTION_ARGS) { uint64 shardId = 0; Datum shardIdDatum = 0; EnsureCoordinator(); CheckCitusVersion(ERROR); shardId = GetNextShardId(); shardIdDatum = Int64GetDatum(shardId); PG_RETURN_DATUM(shardIdDatum); } /* * GetNextShardId allocates and returns a unique shardId for the shard to be * created. This allocation occurs both in shared memory and in write ahead * logs; writing to logs avoids the risk of having shardId collisions. * * Please note that the caller is still responsible for finalizing shard data * and the shardId with the master node. */ uint64 GetNextShardId() { text *sequenceName = cstring_to_text(SHARDID_SEQUENCE_NAME); Oid sequenceId = ResolveRelationId(sequenceName); Datum sequenceIdDatum = ObjectIdGetDatum(sequenceId); Oid savedUserId = InvalidOid; int savedSecurityContext = 0; Datum shardIdDatum = 0; uint64 shardId = 0; GetUserIdAndSecContext(&savedUserId, &savedSecurityContext); SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE); /* generate new and unique shardId from sequence */ shardIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum); SetUserIdAndSecContext(savedUserId, savedSecurityContext); shardId = DatumGetInt64(shardIdDatum); return shardId; } /* * master_get_new_placementid is a user facing wrapper function around * GetNextPlacementId() which allocates and returns a unique placement id for the * placement to be created. * * NB: This can be called by any user; for now we have decided that that's * ok. We might want to restrict this to users part of a specific role or such * at some later point. */ Datum master_get_new_placementid(PG_FUNCTION_ARGS) { uint64 placementId = 0; Datum placementIdDatum = 0; EnsureCoordinator(); CheckCitusVersion(ERROR); placementId = GetNextPlacementId(); placementIdDatum = Int64GetDatum(placementId); PG_RETURN_DATUM(placementIdDatum); } /* * GetNextPlacementId allocates and returns a unique placementId for * the placement to be created. This allocation occurs both in shared memory * and in write ahead logs; writing to logs avoids the risk of having shardId * collisions. * * NB: This can be called by any user; for now we have decided that that's * ok. We might want to restrict this to users part of a specific role or such * at some later point. */ uint64 GetNextPlacementId(void) { text *sequenceName = cstring_to_text(PLACEMENTID_SEQUENCE_NAME); Oid sequenceId = ResolveRelationId(sequenceName); Datum sequenceIdDatum = ObjectIdGetDatum(sequenceId); Oid savedUserId = InvalidOid; int savedSecurityContext = 0; Datum placementIdDatum = 0; uint64 placementId = 0; GetUserIdAndSecContext(&savedUserId, &savedSecurityContext); SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE); /* generate new and unique placement id from sequence */ placementIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum); SetUserIdAndSecContext(savedUserId, savedSecurityContext); placementId = DatumGetInt64(placementIdDatum); return placementId; } /* * master_get_active_worker_nodes returns a set of active worker host names and * port numbers in deterministic order. Currently we assume that all worker * nodes in pg_dist_node are active. */ Datum master_get_active_worker_nodes(PG_FUNCTION_ARGS) { FuncCallContext *functionContext = NULL; uint32 workerNodeIndex = 0; uint32 workerNodeCount = 0; CheckCitusVersion(ERROR); if (SRF_IS_FIRSTCALL()) { MemoryContext oldContext = NULL; List *workerNodeList = NIL; uint32 workerNodeCount = 0; TupleDesc tupleDescriptor = NULL; bool hasOid = false; /* create a function context for cross-call persistence */ functionContext = SRF_FIRSTCALL_INIT(); /* switch to memory context appropriate for multiple function calls */ oldContext = MemoryContextSwitchTo(functionContext->multi_call_memory_ctx); workerNodeList = ActiveReadableNodeList(); workerNodeCount = (uint32) list_length(workerNodeList); functionContext->user_fctx = workerNodeList; functionContext->max_calls = workerNodeCount; /* * This tuple descriptor must match the output parameters declared for * the function in pg_proc. */ tupleDescriptor = CreateTemplateTupleDesc(WORKER_NODE_FIELDS, hasOid); TupleDescInitEntry(tupleDescriptor, (AttrNumber) 1, "node_name", TEXTOID, -1, 0); TupleDescInitEntry(tupleDescriptor, (AttrNumber) 2, "node_port", INT8OID, -1, 0); functionContext->tuple_desc = BlessTupleDesc(tupleDescriptor); MemoryContextSwitchTo(oldContext); } functionContext = SRF_PERCALL_SETUP(); workerNodeIndex = functionContext->call_cntr; workerNodeCount = functionContext->max_calls; if (workerNodeIndex < workerNodeCount) { List *workerNodeList = functionContext->user_fctx; WorkerNode *workerNode = list_nth(workerNodeList, workerNodeIndex); Datum workerNodeDatum = WorkerNodeGetDatum(workerNode, functionContext->tuple_desc); SRF_RETURN_NEXT(functionContext, workerNodeDatum); } else { SRF_RETURN_DONE(functionContext); } } /* Finds the relationId from a potentially qualified relation name. */ Oid ResolveRelationId(text *relationName) { List *relationNameList = NIL; RangeVar *relation = NULL; Oid relationId = InvalidOid; bool failOK = false; /* error if relation cannot be found */ /* resolve relationId from passed in schema and relation name */ relationNameList = textToQualifiedNameList(relationName); relation = makeRangeVarFromNameList(relationNameList); relationId = RangeVarGetRelid(relation, NoLock, failOK); return relationId; } /* * GetTableDDLEvents takes in a relationId, includeSequenceDefaults flag, * and returns the list of DDL commands needed to reconstruct the relation. * When the flag includeSequenceDefaults is set, the function also creates * DEFAULT clauses for columns getting their default values from a sequence. * These DDL commands are all palloced; and include the table's schema * definition, optional column storage and statistics definitions, and index * and constraint definitions. */ List * GetTableDDLEvents(Oid relationId, bool includeSequenceDefaults) { List *tableDDLEventList = NIL; List *tableCreationCommandList = NIL; List *indexAndConstraintCommandList = NIL; tableCreationCommandList = GetTableCreationCommands(relationId, includeSequenceDefaults); tableDDLEventList = list_concat(tableDDLEventList, tableCreationCommandList); indexAndConstraintCommandList = GetTableIndexAndConstraintCommands(relationId); tableDDLEventList = list_concat(tableDDLEventList, indexAndConstraintCommandList); return tableDDLEventList; } /* * GetTableCreationCommands takes in a relationId, and returns the list of DDL * commands needed to reconstruct the relation, excluding indexes and * constraints. */ List * GetTableCreationCommands(Oid relationId, bool includeSequenceDefaults) { List *tableDDLEventList = NIL; char tableType = 0; char *tableSchemaDef = NULL; char *tableColumnOptionsDef = NULL; char *createSchemaCommand = NULL; Oid schemaId = InvalidOid; /* * Set search_path to NIL so that all objects outside of pg_catalog will be * schema-prefixed. pg_catalog will be added automatically when we call * PushOverrideSearchPath(), since we set addCatalog to true; */ OverrideSearchPath *overridePath = GetOverrideSearchPath(CurrentMemoryContext); overridePath->schemas = NIL; overridePath->addCatalog = true; PushOverrideSearchPath(overridePath); /* if foreign table, fetch extension and server definitions */ tableType = get_rel_relkind(relationId); if (tableType == RELKIND_FOREIGN_TABLE) { char *extensionDef = pg_get_extensiondef_string(relationId); char *serverDef = pg_get_serverdef_string(relationId); if (extensionDef != NULL) { tableDDLEventList = lappend(tableDDLEventList, extensionDef); } tableDDLEventList = lappend(tableDDLEventList, serverDef); } /* create schema if the table is not in the default namespace (public) */ schemaId = get_rel_namespace(relationId); createSchemaCommand = CreateSchemaDDLCommand(schemaId); if (createSchemaCommand != NULL) { tableDDLEventList = lappend(tableDDLEventList, createSchemaCommand); } /* fetch table schema and column option definitions */ tableSchemaDef = pg_get_tableschemadef_string(relationId, includeSequenceDefaults); tableColumnOptionsDef = pg_get_tablecolumnoptionsdef_string(relationId); tableDDLEventList = lappend(tableDDLEventList, tableSchemaDef); if (tableColumnOptionsDef != NULL) { tableDDLEventList = lappend(tableDDLEventList, tableColumnOptionsDef); } /* revert back to original search_path */ PopOverrideSearchPath(); return tableDDLEventList; } /* * GetTableIndexAndConstraintCommands returns the list of DDL commands to * (re)create indexes and constraints for a given table. */ List * GetTableIndexAndConstraintCommands(Oid relationId) { List *indexDDLEventList = NIL; Relation pgIndex = NULL; SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; HeapTuple heapTuple = NULL; /* * Set search_path to NIL so that all objects outside of pg_catalog will be * schema-prefixed. pg_catalog will be added automatically when we call * PushOverrideSearchPath(), since we set addCatalog to true; */ OverrideSearchPath *overridePath = GetOverrideSearchPath(CurrentMemoryContext); overridePath->schemas = NIL; overridePath->addCatalog = true; PushOverrideSearchPath(overridePath); /* open system catalog and scan all indexes that belong to this table */ pgIndex = heap_open(IndexRelationId, AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_index_indrelid, BTEqualStrategyNumber, F_OIDEQ, relationId); scanDescriptor = systable_beginscan(pgIndex, IndexIndrelidIndexId, true, /* indexOK */ NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { Form_pg_index indexForm = (Form_pg_index) GETSTRUCT(heapTuple); Oid indexId = indexForm->indexrelid; bool isConstraint = false; char *statementDef = NULL; /* * A primary key index is always created by a constraint statement. * A unique key index or exclusion index is created by a constraint * if and only if the index has a corresponding constraint entry in pg_depend. * Any other index form is never associated with a constraint. */ if (indexForm->indisprimary) { isConstraint = true; } else if (indexForm->indisunique || indexForm->indisexclusion) { Oid constraintId = get_index_constraint(indexId); isConstraint = OidIsValid(constraintId); } else { isConstraint = false; } /* get the corresponding constraint or index statement */ if (isConstraint) { Oid constraintId = get_index_constraint(indexId); Assert(constraintId != InvalidOid); statementDef = pg_get_constraintdef_command(constraintId); } else { statementDef = pg_get_indexdef_string(indexId); } /* append found constraint or index definition to the list */ indexDDLEventList = lappend(indexDDLEventList, statementDef); /* if table is clustered on this index, append definition to the list */ if (indexForm->indisclustered) { char *clusteredDef = pg_get_indexclusterdef_string(indexId); Assert(clusteredDef != NULL); indexDDLEventList = lappend(indexDDLEventList, clusteredDef); } heapTuple = systable_getnext(scanDescriptor); } /* clean up scan and close system catalog */ systable_endscan(scanDescriptor); heap_close(pgIndex, AccessShareLock); /* revert back to original search_path */ PopOverrideSearchPath(); return indexDDLEventList; } /* * GetTableForeignConstraints takes in a relationId, and returns the list of foreign * constraint commands needed to reconstruct foreign constraints of that table. */ List * GetTableForeignConstraintCommands(Oid relationId) { List *tableForeignConstraints = NIL; Relation pgConstraint = NULL; SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; HeapTuple heapTuple = NULL; /* * Set search_path to NIL so that all objects outside of pg_catalog will be * schema-prefixed. pg_catalog will be added automatically when we call * PushOverrideSearchPath(), since we set addCatalog to true; */ OverrideSearchPath *overridePath = GetOverrideSearchPath(CurrentMemoryContext); overridePath->schemas = NIL; overridePath->addCatalog = true; PushOverrideSearchPath(overridePath); /* open system catalog and scan all constraints that belong to this table */ pgConstraint = heap_open(ConstraintRelationId, AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, relationId); scanDescriptor = systable_beginscan(pgConstraint, ConstraintRelidIndexId, true, NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple); if (constraintForm->contype == CONSTRAINT_FOREIGN) { Oid constraintId = get_relation_constraint_oid(relationId, constraintForm->conname.data, true); char *statementDef = pg_get_constraintdef_command(constraintId); tableForeignConstraints = lappend(tableForeignConstraints, statementDef); } heapTuple = systable_getnext(scanDescriptor); } /* clean up scan and close system catalog */ systable_endscan(scanDescriptor); heap_close(pgConstraint, AccessShareLock); /* revert back to original search_path */ PopOverrideSearchPath(); return tableForeignConstraints; } /* * ShardStorageType returns the shard storage type according to relation type. */ char ShardStorageType(Oid relationId) { char shardStorageType = 0; char relationType = get_rel_relkind(relationId); if (RegularTable(relationId)) { shardStorageType = SHARD_STORAGE_TABLE; } else if (relationType == RELKIND_FOREIGN_TABLE) { bool cstoreTable = CStoreTable(relationId); if (cstoreTable) { shardStorageType = SHARD_STORAGE_COLUMNAR; } else { shardStorageType = SHARD_STORAGE_FOREIGN; } } else { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unexpected relation type: %c", relationType))); } return shardStorageType; } /* * IsCoordinator function returns true if this node is identified as the * schema/coordinator/master node of the cluster. */ bool IsCoordinator(void) { return (GetLocalGroupId() == 0); } /* * WorkerNodeGetDatum converts the worker node passed to it into its datum * representation. To do this, the function first creates the heap tuple from * the worker node name and port. Then, the function converts the heap tuple * into a datum and returns it. */ static Datum WorkerNodeGetDatum(WorkerNode *workerNode, TupleDesc tupleDescriptor) { Datum values[WORKER_NODE_FIELDS]; bool isNulls[WORKER_NODE_FIELDS]; HeapTuple workerNodeTuple = NULL; Datum workerNodeDatum = 0; memset(values, 0, sizeof(values)); memset(isNulls, false, sizeof(isNulls)); values[0] = CStringGetTextDatum(workerNode->workerName); values[1] = Int64GetDatum((int64) workerNode->workerPort); workerNodeTuple = heap_form_tuple(tupleDescriptor, values, isNulls); workerNodeDatum = HeapTupleGetDatum(workerNodeTuple); return workerNodeDatum; } citus-7.0.3/src/backend/distributed/master/master_repair_shards.c000066400000000000000000000347171317107136600252310ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * master_repair_shards.c * * This file contains functions to repair unhealthy shard placements using data * from healthy ones. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "c.h" #include "fmgr.h" #include "miscadmin.h" #include #include "catalog/pg_class.h" #include "distributed/colocation_utils.h" #include "distributed/connection_management.h" #include "distributed/listutils.h" #include "distributed/master_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/multi_router_executor.h" #include "distributed/resource_lock.h" #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" #include "lib/stringinfo.h" #include "nodes/pg_list.h" #include "storage/lock.h" #include "utils/builtins.h" #include "utils/elog.h" #include "utils/errcodes.h" #include "utils/lsyscache.h" #include "utils/palloc.h" /* local function forward declarations */ static void RepairShardPlacement(int64 shardId, char *sourceNodeName, int32 sourceNodePort, char *targetNodeName, int32 targetNodePort); static void EnsureShardCanBeRepaired(int64 shardId, char *sourceNodeName, int32 sourceNodePort, char *targetNodeName, int32 targetNodePort); static List * RecreateTableDDLCommandList(Oid relationId); static List * WorkerApplyShardDDLCommandList(List *ddlCommandList, int64 shardId); /* declarations for dynamic loading */ PG_FUNCTION_INFO_V1(master_copy_shard_placement); PG_FUNCTION_INFO_V1(master_move_shard_placement); /* * master_copy_shard_placement implements a user-facing UDF to repair data from * a healthy (source) node to an inactive (target) node. To accomplish this it * entirely recreates the table structure before copying all data. During this * time all modifications are paused to the shard. After successful repair, the * inactive placement is marked healthy and modifications may continue. If the * repair fails at any point, this function throws an error, leaving the node * in an unhealthy state. Please note that master_copy_shard_placement copies * given shard along with its co-located shards. */ Datum master_copy_shard_placement(PG_FUNCTION_ARGS) { int64 shardId = PG_GETARG_INT64(0); text *sourceNodeNameText = PG_GETARG_TEXT_P(1); int32 sourceNodePort = PG_GETARG_INT32(2); text *targetNodeNameText = PG_GETARG_TEXT_P(3); int32 targetNodePort = PG_GETARG_INT32(4); bool doRepair = PG_GETARG_BOOL(5); char *sourceNodeName = text_to_cstring(sourceNodeNameText); char *targetNodeName = text_to_cstring(targetNodeNameText); if (!doRepair) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("master_copy_shard_placement() " "with do not repair functionality " "is only supported on Citus Enterprise"))); } EnsureCoordinator(); CheckCitusVersion(ERROR); /* RepairShardPlacement function repairs only given shard */ RepairShardPlacement(shardId, sourceNodeName, sourceNodePort, targetNodeName, targetNodePort); PG_RETURN_VOID(); } /* * master_move_shard_placement moves given shard (and its co-located shards) from one * node to the other node. */ Datum master_move_shard_placement(PG_FUNCTION_ARGS) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("master_move_shard_placement() is only supported on " "Citus Enterprise"))); } /* * RepairShardPlacement repairs given shard from a source node to target node. * This function is not co-location aware. It only repairs given shard. */ static void RepairShardPlacement(int64 shardId, char *sourceNodeName, int32 sourceNodePort, char *targetNodeName, int32 targetNodePort) { ShardInterval *shardInterval = LoadShardInterval(shardId); Oid distributedTableId = shardInterval->relationId; char relationKind = get_rel_relkind(distributedTableId); char *tableOwner = TableOwner(shardInterval->relationId); bool missingOk = false; List *ddlCommandList = NIL; List *foreignConstraintCommandList = NIL; List *placementList = NIL; ShardPlacement *placement = NULL; EnsureTableOwner(distributedTableId); if (relationKind == RELKIND_FOREIGN_TABLE) { char *relationName = get_rel_name(distributedTableId); ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot repair shard"), errdetail("Table %s is a foreign table. Repairing " "shards backed by foreign tables is " "not supported.", relationName))); } /* * We plan to move the placement to the healthy state, so we need to grab a shard * metadata lock (in exclusive mode). */ LockShardDistributionMetadata(shardId, ExclusiveLock); /* * For shard repair, there should be healthy placement in source node and unhealthy * placement in the target node. */ EnsureShardCanBeRepaired(shardId, sourceNodeName, sourceNodePort, targetNodeName, targetNodePort); /* we generate necessary commands to recreate the shard in target node */ ddlCommandList = CopyShardCommandList(shardInterval, sourceNodeName, sourceNodePort); foreignConstraintCommandList = CopyShardForeignConstraintCommandList(shardInterval); ddlCommandList = list_concat(ddlCommandList, foreignConstraintCommandList); SendCommandListToWorkerInSingleTransaction(targetNodeName, targetNodePort, tableOwner, ddlCommandList); /* after successful repair, we update shard state as healthy*/ placementList = ShardPlacementList(shardId); placement = SearchShardPlacementInList(placementList, targetNodeName, targetNodePort, missingOk); UpdateShardPlacementState(placement->placementId, FILE_FINALIZED); } /* * EnsureShardCanBeRepaired checks if the given shard has a healthy placement in the source * node and inactive node on the target node. */ static void EnsureShardCanBeRepaired(int64 shardId, char *sourceNodeName, int32 sourceNodePort, char *targetNodeName, int32 targetNodePort) { List *shardPlacementList = ShardPlacementList(shardId); ShardPlacement *sourcePlacement = NULL; ShardPlacement *targetPlacement = NULL; bool missingSourceOk = false; bool missingTargetOk = false; sourcePlacement = SearchShardPlacementInList(shardPlacementList, sourceNodeName, sourceNodePort, missingSourceOk); if (sourcePlacement->shardState != FILE_FINALIZED) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("source placement must be in finalized state"))); } targetPlacement = SearchShardPlacementInList(shardPlacementList, targetNodeName, targetNodePort, missingTargetOk); if (targetPlacement->shardState != FILE_INACTIVE) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("target placement must be in inactive state"))); } } /* * SearchShardPlacementInList searches a provided list for a shard placement with the * specified node name and port. If missingOk is set to true, this function returns NULL * if no such placement exists in the provided list, otherwise it throws an error. */ ShardPlacement * SearchShardPlacementInList(List *shardPlacementList, char *nodeName, uint32 nodePort, bool missingOk) { ListCell *shardPlacementCell = NULL; ShardPlacement *matchingPlacement = NULL; foreach(shardPlacementCell, shardPlacementList) { ShardPlacement *shardPlacement = lfirst(shardPlacementCell); if (strncmp(nodeName, shardPlacement->nodeName, MAX_NODE_LENGTH) == 0 && nodePort == shardPlacement->nodePort) { matchingPlacement = shardPlacement; break; } } if (matchingPlacement == NULL) { if (missingOk) { return NULL; } ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), errmsg("could not find placement matching \"%s:%d\"", nodeName, nodePort), errhint("Confirm the placement still exists and try again."))); } return matchingPlacement; } /* * CopyShardCommandList generates command list to copy the given shard placement * from the source node to the target node. */ List * CopyShardCommandList(ShardInterval *shardInterval, char *sourceNodeName, int32 sourceNodePort) { int64 shardId = shardInterval->shardId; char *shardName = ConstructQualifiedShardName(shardInterval); List *tableRecreationCommandList = NIL; List *indexCommandList = NIL; List *copyShardToNodeCommandsList = NIL; StringInfo copyShardDataCommand = makeStringInfo(); Oid relationId = shardInterval->relationId; tableRecreationCommandList = RecreateTableDDLCommandList(relationId); tableRecreationCommandList = WorkerApplyShardDDLCommandList(tableRecreationCommandList, shardId); copyShardToNodeCommandsList = list_concat(copyShardToNodeCommandsList, tableRecreationCommandList); appendStringInfo(copyShardDataCommand, WORKER_APPEND_TABLE_TO_SHARD, quote_literal_cstr(shardName), /* table to append */ quote_literal_cstr(shardName), /* remote table name */ quote_literal_cstr(sourceNodeName), /* remote host */ sourceNodePort); /* remote port */ copyShardToNodeCommandsList = lappend(copyShardToNodeCommandsList, copyShardDataCommand->data); indexCommandList = GetTableIndexAndConstraintCommands(relationId); indexCommandList = WorkerApplyShardDDLCommandList(indexCommandList, shardId); copyShardToNodeCommandsList = list_concat(copyShardToNodeCommandsList, indexCommandList); return copyShardToNodeCommandsList; } /* * CopyShardForeignConstraintCommandList generates command list to create foreign * constraints existing in source shard after copying it to the other node. */ List * CopyShardForeignConstraintCommandList(ShardInterval *shardInterval) { List *copyShardForeignConstraintCommandList = NIL; Oid schemaId = get_rel_namespace(shardInterval->relationId); char *schemaName = get_namespace_name(schemaId); char *escapedSchemaName = quote_literal_cstr(schemaName); int shardIndex = 0; List *commandList = GetTableForeignConstraintCommands(shardInterval->relationId); ListCell *commandCell = NULL; /* we will only use shardIndex if there is a foreign constraint */ if (commandList != NIL) { shardIndex = ShardIndex(shardInterval); } foreach(commandCell, commandList) { char *command = (char *) lfirst(commandCell); char *escapedCommand = quote_literal_cstr(command); Oid referencedRelationId = InvalidOid; Oid referencedSchemaId = InvalidOid; char *referencedSchemaName = NULL; char *escapedReferencedSchemaName = NULL; uint64 referencedShardId = INVALID_SHARD_ID; StringInfo applyForeignConstraintCommand = makeStringInfo(); /* we need to parse the foreign constraint command to get referencing table id */ referencedRelationId = ForeignConstraintGetReferencedTableId(command); if (referencedRelationId == InvalidOid) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot create foreign key constraint"), errdetail("Referenced relation cannot be found."))); } referencedSchemaId = get_rel_namespace(referencedRelationId); referencedSchemaName = get_namespace_name(referencedSchemaId); escapedReferencedSchemaName = quote_literal_cstr(referencedSchemaName); referencedShardId = ColocatedShardIdInRelation(referencedRelationId, shardIndex); appendStringInfo(applyForeignConstraintCommand, WORKER_APPLY_INTER_SHARD_DDL_COMMAND, shardInterval->shardId, escapedSchemaName, referencedShardId, escapedReferencedSchemaName, escapedCommand); copyShardForeignConstraintCommandList = lappend( copyShardForeignConstraintCommandList, applyForeignConstraintCommand->data); } return copyShardForeignConstraintCommandList; } /* * ConstuctQualifiedShardName creates the fully qualified name string of the * given shard in ._ format. */ char * ConstructQualifiedShardName(ShardInterval *shardInterval) { Oid schemaId = get_rel_namespace(shardInterval->relationId); char *schemaName = get_namespace_name(schemaId); char *tableName = get_rel_name(shardInterval->relationId); char *shardName = NULL; shardName = pstrdup(tableName); AppendShardIdToName(&shardName, shardInterval->shardId); shardName = quote_qualified_identifier(schemaName, shardName); return shardName; } /* * RecreateTableDDLCommandList returns a list of DDL statements similar to that * returned by GetTableCreationCommands except that the list begins with a "DROP TABLE" * or "DROP FOREIGN TABLE" statement to facilitate idempotent recreation of a placement. */ static List * RecreateTableDDLCommandList(Oid relationId) { const char *relationName = get_rel_name(relationId); Oid relationSchemaId = get_rel_namespace(relationId); const char *relationSchemaName = get_namespace_name(relationSchemaId); const char *qualifiedRelationName = quote_qualified_identifier(relationSchemaName, relationName); StringInfo dropCommand = makeStringInfo(); List *createCommandList = NIL; List *dropCommandList = NIL; List *recreateCommandList = NIL; char relationKind = get_rel_relkind(relationId); bool includeSequenceDefaults = false; /* build appropriate DROP command based on relation kind */ if (RegularTable(relationId)) { appendStringInfo(dropCommand, DROP_REGULAR_TABLE_COMMAND, qualifiedRelationName); } else if (relationKind == RELKIND_FOREIGN_TABLE) { appendStringInfo(dropCommand, DROP_FOREIGN_TABLE_COMMAND, qualifiedRelationName); } else { ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("repair target is not a regular, foreign or partitioned " "table"))); } dropCommandList = list_make1(dropCommand->data); createCommandList = GetTableCreationCommands(relationId, includeSequenceDefaults); recreateCommandList = list_concat(dropCommandList, createCommandList); return recreateCommandList; } /* * WorkerApplyShardDDLCommandList wraps all DDL commands in ddlCommandList * in a call to worker_apply_shard_ddl_command to apply the DDL command to * the shard specified by shardId. */ static List * WorkerApplyShardDDLCommandList(List *ddlCommandList, int64 shardId) { List *applyDdlCommandList = NIL; ListCell *ddlCommandCell = NULL; foreach(ddlCommandCell, ddlCommandList) { char *ddlCommand = lfirst(ddlCommandCell); char *escapedDdlCommand = quote_literal_cstr(ddlCommand); StringInfo applyDdlCommand = makeStringInfo(); appendStringInfo(applyDdlCommand, WORKER_APPLY_SHARD_DDL_COMMAND_WITHOUT_SCHEMA, shardId, escapedDdlCommand); applyDdlCommandList = lappend(applyDdlCommandList, applyDdlCommand->data); } return applyDdlCommandList; } citus-7.0.3/src/backend/distributed/master/master_split_shards.c000066400000000000000000000050511317107136600250670ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * master_split_shards.c * * This file contains functions to split a shard according to a given * distribution column value. * * Copyright (c) 2014-2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "c.h" #include "fmgr.h" #include "catalog/pg_class.h" #include "distributed/colocation_utils.h" #include "distributed/master_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" #include "distributed/multi_join_order.h" #include "distributed/multi_router_planner.h" #include "distributed/pg_dist_partition.h" #include "distributed/pg_dist_shard.h" #include "distributed/remote_commands.h" #include "distributed/resource_lock.h" #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" #include "nodes/pg_list.h" #include "storage/lock.h" #include "utils/builtins.h" #include "utils/elog.h" #include "utils/errcodes.h" #include "utils/lsyscache.h" #include "utils/typcache.h" /* declarations for dynamic loading */ PG_FUNCTION_INFO_V1(isolate_tenant_to_new_shard); PG_FUNCTION_INFO_V1(worker_hash); /* * isolate_tenant_to_new_shard isolates a tenant to its own shard by spliting * the current matching shard. */ Datum isolate_tenant_to_new_shard(PG_FUNCTION_ARGS) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("isolate_tenant_to_new_shard() is only supported on " "Citus Enterprise"))); } /* * worker_hash returns the hashed value of the given value. */ Datum worker_hash(PG_FUNCTION_ARGS) { Datum valueDatum = PG_GETARG_DATUM(0); Datum hashedValueDatum = 0; TypeCacheEntry *typeEntry = NULL; FmgrInfo *hashFunction = NULL; Oid valueDataType = InvalidOid; CheckCitusVersion(ERROR); /* figure out hash function from the data type */ valueDataType = get_fn_expr_argtype(fcinfo->flinfo, 0); typeEntry = lookup_type_cache(valueDataType, TYPECACHE_HASH_PROC_FINFO); if (typeEntry->hash_proc_finfo.fn_oid == InvalidOid) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot find a hash function for the input type"), errhint("Cast input to a data type with a hash function."))); } hashFunction = palloc0(sizeof(FmgrInfo)); fmgr_info_copy(hashFunction, &(typeEntry->hash_proc_finfo), CurrentMemoryContext); /* calculate hash value */ hashedValueDatum = FunctionCall1(hashFunction, valueDatum); PG_RETURN_INT32(hashedValueDatum); } citus-7.0.3/src/backend/distributed/master/master_stage_protocol.c000066400000000000000000000676661317107136600254400ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * master_stage_protocol.c * * Routines for staging PostgreSQL table data as shards into the distributed * cluster. These user-defined functions are similar to the psql-side \stage * command, but also differ from them in that users stage data from tables and * not files, and that they can also append to existing shards. * * Copyright (c) 2013-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "funcapi.h" #include "miscadmin.h" #include "libpq-fe.h" #include "access/htup_details.h" #include "access/xact.h" #include "commands/tablecmds.h" #include "catalog/indexing.h" #include "catalog/namespace.h" #if (PG_VERSION_NUM >= 100000) #include "catalog/partition.h" #endif #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" #include "distributed/connection_management.h" #include "distributed/multi_client_executor.h" #include "distributed/master_metadata_utility.h" #include "distributed/master_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/multi_join_order.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/pg_dist_partition.h" #include "distributed/pg_dist_shard.h" #include "distributed/placement_connection.h" #include "distributed/remote_commands.h" #include "distributed/resource_lock.h" #include "distributed/transaction_management.h" #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" #include "storage/lmgr.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/inval.h" #include "utils/lsyscache.h" #include "utils/syscache.h" #include "utils/rel.h" #include "utils/tqual.h" /* Local functions forward declarations */ static bool WorkerShardStats(ShardPlacement *placement, Oid relationId, char *shardName, uint64 *shardSize, text **shardMinValue, text **shardMaxValue); /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(master_create_empty_shard); PG_FUNCTION_INFO_V1(master_append_table_to_shard); PG_FUNCTION_INFO_V1(master_update_shard_statistics); /* * master_create_empty_shard creates an empty shard for the given distributed * table. The function first updates metadata on the coordinator node to make * this shard visible. Then it creates empty shard on worker node and added * shard placement row to metadata table. */ Datum master_create_empty_shard(PG_FUNCTION_ARGS) { text *relationNameText = PG_GETARG_TEXT_P(0); char *relationName = text_to_cstring(relationNameText); uint64 shardId = INVALID_SHARD_ID; uint32 attemptableNodeCount = 0; uint32 candidateNodeIndex = 0; List *candidateNodeList = NIL; text *nullMinValue = NULL; text *nullMaxValue = NULL; char partitionMethod = 0; char storageType = SHARD_STORAGE_TABLE; Oid relationId = ResolveRelationId(relationNameText); char relationKind = get_rel_relkind(relationId); char replicationModel = REPLICATION_MODEL_INVALID; CheckCitusVersion(ERROR); EnsureTablePermissions(relationId, ACL_INSERT); CheckDistributedTable(relationId); /* don't allow the table to be dropped */ LockRelationOid(relationId, AccessShareLock); /* don't allow concurrent node list changes that require an exclusive lock */ LockRelationOid(DistNodeRelationId(), RowShareLock); /* * We check whether the table is a foreign table or not. If it is, we set * storage type as foreign also. Only exception is if foreign table is a * foreign cstore table, in this case we set storage type as columnar. * * i.e. While setting storage type, columnar has priority over foreign. */ if (relationKind == RELKIND_FOREIGN_TABLE) { bool cstoreTable = cstoreTable = CStoreTable(relationId); if (cstoreTable) { storageType = SHARD_STORAGE_COLUMNAR; } else { storageType = SHARD_STORAGE_FOREIGN; } } partitionMethod = PartitionMethod(relationId); if (partitionMethod == DISTRIBUTE_BY_HASH) { ereport(ERROR, (errmsg("relation \"%s\" is a hash partitioned table", relationName), errdetail("We currently don't support creating shards " "on hash-partitioned tables"))); } else if (partitionMethod == DISTRIBUTE_BY_NONE) { ereport(ERROR, (errmsg("relation \"%s\" is a reference table", relationName), errdetail("We currently don't support creating shards " "on reference tables"))); } replicationModel = TableReplicationModel(relationId); EnsureReplicationSettings(relationId, replicationModel); /* generate new and unique shardId from sequence */ shardId = GetNextShardId(); /* if enough live groups, add an extra candidate node as backup */ { uint32 primaryNodeCount = ActivePrimaryNodeCount(); attemptableNodeCount = ShardReplicationFactor; if (primaryNodeCount > ShardReplicationFactor) { attemptableNodeCount = ShardReplicationFactor + 1; } } /* first retrieve a list of random nodes for shard placements */ while (candidateNodeIndex < attemptableNodeCount) { WorkerNode *candidateNode = NULL; if (ShardPlacementPolicy == SHARD_PLACEMENT_LOCAL_NODE_FIRST) { candidateNode = WorkerGetLocalFirstCandidateNode(candidateNodeList); } else if (ShardPlacementPolicy == SHARD_PLACEMENT_ROUND_ROBIN) { List *workerNodeList = ActivePrimaryNodeList(); candidateNode = WorkerGetRoundRobinCandidateNode(workerNodeList, shardId, candidateNodeIndex); } else if (ShardPlacementPolicy == SHARD_PLACEMENT_RANDOM) { candidateNode = WorkerGetRandomCandidateNode(candidateNodeList); } else { ereport(ERROR, (errmsg("unrecognized shard placement policy"))); } if (candidateNode == NULL) { ereport(ERROR, (errmsg("could only find %u of %u possible nodes", candidateNodeIndex, attemptableNodeCount))); } candidateNodeList = lappend(candidateNodeList, candidateNode); candidateNodeIndex++; } InsertShardRow(relationId, shardId, storageType, nullMinValue, nullMaxValue); CreateAppendDistributedShardPlacements(relationId, shardId, candidateNodeList, ShardReplicationFactor); PG_RETURN_INT64(shardId); } /* * master_append_table_to_shard appends the given table's contents to the given * shard, and updates shard metadata on the master node. If the function fails * to append table data to all shard placements, it doesn't update any metadata * and errors out. Else if the function fails to append table data to some of * the shard placements, it marks those placements as invalid. These invalid * placements will get cleaned up during shard rebalancing. */ Datum master_append_table_to_shard(PG_FUNCTION_ARGS) { uint64 shardId = PG_GETARG_INT64(0); text *sourceTableNameText = PG_GETARG_TEXT_P(1); text *sourceNodeNameText = PG_GETARG_TEXT_P(2); uint32 sourceNodePort = PG_GETARG_UINT32(3); char *sourceTableName = text_to_cstring(sourceTableNameText); char *sourceNodeName = text_to_cstring(sourceNodeNameText); Oid shardSchemaOid = 0; char *shardSchemaName = NULL; char *shardTableName = NULL; char *shardQualifiedName = NULL; List *shardPlacementList = NIL; ListCell *shardPlacementCell = NULL; uint64 newShardSize = 0; uint64 shardMaxSizeInBytes = 0; float4 shardFillLevel = 0.0; char partitionMethod = 0; ShardInterval *shardInterval = NULL; Oid relationId = InvalidOid; bool cstoreTable = false; char storageType = 0; CheckCitusVersion(ERROR); shardInterval = LoadShardInterval(shardId); relationId = shardInterval->relationId; cstoreTable = CStoreTable(relationId); storageType = shardInterval->storageType; EnsureTablePermissions(relationId, ACL_INSERT); if (storageType != SHARD_STORAGE_TABLE && !cstoreTable) { ereport(ERROR, (errmsg("cannot append to shardId " UINT64_FORMAT, shardId), errdetail("The underlying shard is not a regular table"))); } partitionMethod = PartitionMethod(relationId); if (partitionMethod == DISTRIBUTE_BY_HASH || partitionMethod == DISTRIBUTE_BY_NONE) { ereport(ERROR, (errmsg("cannot append to shardId " UINT64_FORMAT, shardId), errdetail("We currently don't support appending to shards " "in hash-partitioned or reference tables"))); } /* ensure that the shard placement metadata does not change during the append */ LockShardDistributionMetadata(shardId, ShareLock); /* serialize appends to the same shard */ LockShardResource(shardId, ExclusiveLock); /* get schame name of the target shard */ shardSchemaOid = get_rel_namespace(relationId); shardSchemaName = get_namespace_name(shardSchemaOid); /* Build shard table name. */ shardTableName = get_rel_name(relationId); AppendShardIdToName(&shardTableName, shardId); shardQualifiedName = quote_qualified_identifier(shardSchemaName, shardTableName); shardPlacementList = FinalizedShardPlacementList(shardId); if (shardPlacementList == NIL) { ereport(ERROR, (errmsg("could not find any shard placements for shardId " UINT64_FORMAT, shardId), errhint("Try running master_create_empty_shard() first"))); } BeginOrContinueCoordinatedTransaction(); /* issue command to append table to each shard placement */ foreach(shardPlacementCell, shardPlacementList) { ShardPlacement *shardPlacement = (ShardPlacement *) lfirst(shardPlacementCell); MultiConnection *connection = GetPlacementConnection(FOR_DML, shardPlacement, NULL); PGresult *queryResult = NULL; int executeResult = 0; StringInfo workerAppendQuery = makeStringInfo(); appendStringInfo(workerAppendQuery, WORKER_APPEND_TABLE_TO_SHARD, quote_literal_cstr(shardQualifiedName), quote_literal_cstr(sourceTableName), quote_literal_cstr(sourceNodeName), sourceNodePort); RemoteTransactionBeginIfNecessary(connection); executeResult = ExecuteOptionalRemoteCommand(connection, workerAppendQuery->data, &queryResult); PQclear(queryResult); ForgetResults(connection); if (executeResult != 0) { MarkRemoteTransactionFailed(connection, false); } } MarkFailedShardPlacements(); /* update shard statistics and get new shard size */ newShardSize = UpdateShardStatistics(shardId); /* calculate ratio of current shard size compared to shard max size */ shardMaxSizeInBytes = (int64) ShardMaxSize * 1024L; shardFillLevel = ((float4) newShardSize / (float4) shardMaxSizeInBytes); PG_RETURN_FLOAT4(shardFillLevel); } /* * master_update_shard_statistics updates metadata (shard size and shard min/max * values) of the given shard and returns the updated shard size. */ Datum master_update_shard_statistics(PG_FUNCTION_ARGS) { int64 shardId = PG_GETARG_INT64(0); uint64 shardSize = 0; CheckCitusVersion(ERROR); shardSize = UpdateShardStatistics(shardId); PG_RETURN_INT64(shardSize); } /* * CheckDistributedTable checks if the given relationId corresponds to a * distributed table. If it does not, the function errors out. */ void CheckDistributedTable(Oid relationId) { char *relationName = get_rel_name(relationId); /* check that the relationId belongs to a table */ EnsureRelationKindSupported(relationId); if (!IsDistributedTable(relationId)) { ereport(ERROR, (errmsg("relation \"%s\" is not a distributed table", relationName))); } } /* * CreateAppendDistributedShardPlacements creates shards for append distributed * tables on worker nodes. After successfully creating shard on the worker, * shard placement rows are added to the metadata. */ void CreateAppendDistributedShardPlacements(Oid relationId, int64 shardId, List *workerNodeList, int replicationFactor) { int attemptCount = replicationFactor; int workerNodeCount = list_length(workerNodeList); int placementsCreated = 0; int attemptNumber = 0; List *foreignConstraintCommandList = GetTableForeignConstraintCommands(relationId); char *alterTableAttachPartitionCommand = NULL; bool includeSequenceDefaults = false; List *ddlCommandList = GetTableDDLEvents(relationId, includeSequenceDefaults); uint32 connectionFlag = FOR_DDL; char *relationOwner = TableOwner(relationId); /* if we have enough nodes, add an extra placement attempt for backup */ if (workerNodeCount > replicationFactor) { attemptCount++; } for (attemptNumber = 0; attemptNumber < attemptCount; attemptNumber++) { int workerNodeIndex = attemptNumber % workerNodeCount; WorkerNode *workerNode = (WorkerNode *) list_nth(workerNodeList, workerNodeIndex); uint32 nodeGroupId = workerNode->groupId; char *nodeName = workerNode->workerName; uint32 nodePort = workerNode->workerPort; int shardIndex = -1; /* not used in this code path */ const RelayFileState shardState = FILE_FINALIZED; const uint64 shardSize = 0; MultiConnection *connection = GetNodeUserDatabaseConnection(connectionFlag, nodeName, nodePort, relationOwner, NULL); if (PQstatus(connection->pgConn) != CONNECTION_OK) { ereport(WARNING, (errmsg("could not connect to node \"%s:%u\"", nodeName, nodePort))); continue; } WorkerCreateShard(relationId, shardIndex, shardId, ddlCommandList, foreignConstraintCommandList, alterTableAttachPartitionCommand, connection); InsertShardPlacementRow(shardId, INVALID_PLACEMENT_ID, shardState, shardSize, nodeGroupId); placementsCreated++; if (placementsCreated >= replicationFactor) { break; } } /* check if we created enough shard replicas */ if (placementsCreated < replicationFactor) { ereport(ERROR, (errmsg("could only create %u of %u of required shard replicas", placementsCreated, replicationFactor))); } } /* * InsertShardPlacementRows inserts shard placements to the metadata table on * the coordinator node. Then, returns the list of added shard placements. */ List * InsertShardPlacementRows(Oid relationId, int64 shardId, List *workerNodeList, int workerStartIndex, int replicationFactor) { int workerNodeCount = list_length(workerNodeList); int attemptNumber = 0; int placementsInserted = 0; List *insertedShardPlacements = NIL; for (attemptNumber = 0; attemptNumber < replicationFactor; attemptNumber++) { int workerNodeIndex = (workerStartIndex + attemptNumber) % workerNodeCount; WorkerNode *workerNode = (WorkerNode *) list_nth(workerNodeList, workerNodeIndex); uint32 nodeGroupId = workerNode->groupId; const RelayFileState shardState = FILE_FINALIZED; const uint64 shardSize = 0; uint64 shardPlacementId = 0; ShardPlacement *shardPlacement = NULL; shardPlacementId = InsertShardPlacementRow(shardId, INVALID_PLACEMENT_ID, shardState, shardSize, nodeGroupId); shardPlacement = LoadShardPlacement(shardId, shardPlacementId); insertedShardPlacements = lappend(insertedShardPlacements, shardPlacement); placementsInserted++; if (placementsInserted >= replicationFactor) { break; } } return insertedShardPlacements; } /* * CreateShardsOnWorkers creates shards on worker nodes given the shard placements * as a parameter. Function opens connections in transactional way. If the caller * needs an exclusive connection (in case of distributing local table with data * on it) or creating shards in a transaction, per placement connection is opened * for each placement. */ void CreateShardsOnWorkers(Oid distributedRelationId, List *shardPlacements, bool useExclusiveConnection, bool colocatedShard) { DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(distributedRelationId); char *placementOwner = TableOwner(distributedRelationId); bool includeSequenceDefaults = false; List *ddlCommandList = GetTableDDLEvents(distributedRelationId, includeSequenceDefaults); List *foreignConstraintCommandList = GetTableForeignConstraintCommands( distributedRelationId); List *claimedConnectionList = NIL; ListCell *connectionCell = NULL; ListCell *shardPlacementCell = NULL; int connectionFlags = FOR_DDL; char *alterTableAttachPartitionCommand = NULL; if (useExclusiveConnection) { connectionFlags |= CONNECTION_PER_PLACEMENT; } if (PartitionTable(distributedRelationId)) { alterTableAttachPartitionCommand = GenerateAlterTableAttachPartitionCommand(distributedRelationId); } BeginOrContinueCoordinatedTransaction(); if (MultiShardCommitProtocol == COMMIT_PROTOCOL_2PC || cacheEntry->replicationModel == REPLICATION_MODEL_2PC) { CoordinatedTransactionUse2PC(); } foreach(shardPlacementCell, shardPlacements) { ShardPlacement *shardPlacement = (ShardPlacement *) lfirst(shardPlacementCell); uint64 shardId = shardPlacement->shardId; ShardInterval *shardInterval = LoadShardInterval(shardId); MultiConnection *connection = NULL; int shardIndex = -1; if (colocatedShard) { shardIndex = ShardIndex(shardInterval); } connection = GetPlacementConnection(connectionFlags, shardPlacement, placementOwner); if (useExclusiveConnection) { ClaimConnectionExclusively(connection); claimedConnectionList = lappend(claimedConnectionList, connection); } RemoteTransactionBeginIfNecessary(connection); MarkRemoteTransactionCritical(connection); WorkerCreateShard(distributedRelationId, shardIndex, shardId, ddlCommandList, foreignConstraintCommandList, alterTableAttachPartitionCommand, connection); } /* * We need to unclaim all connections to make them usable again for the copy * command, otherwise copy going to open new connections to placements and * can not see uncommitted changes. */ foreach(connectionCell, claimedConnectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); UnclaimConnection(connection); } } /* * WorkerCreateShard applies DDL commands for the given shardId to create the * shard on the worker node. Commands are sent to the worker node over the * given connection. */ void WorkerCreateShard(Oid relationId, int shardIndex, uint64 shardId, List *ddlCommandList, List *foreignConstraintCommandList, char *alterTableAttachPartitionCommand, MultiConnection *connection) { Oid schemaId = get_rel_namespace(relationId); char *schemaName = get_namespace_name(schemaId); char *escapedSchemaName = quote_literal_cstr(schemaName); ListCell *ddlCommandCell = NULL; ListCell *foreignConstraintCommandCell = NULL; foreach(ddlCommandCell, ddlCommandList) { char *ddlCommand = (char *) lfirst(ddlCommandCell); char *escapedDDLCommand = quote_literal_cstr(ddlCommand); StringInfo applyDDLCommand = makeStringInfo(); if (strcmp(schemaName, "public") != 0) { appendStringInfo(applyDDLCommand, WORKER_APPLY_SHARD_DDL_COMMAND, shardId, escapedSchemaName, escapedDDLCommand); } else { appendStringInfo(applyDDLCommand, WORKER_APPLY_SHARD_DDL_COMMAND_WITHOUT_SCHEMA, shardId, escapedDDLCommand); } ExecuteCriticalRemoteCommand(connection, applyDDLCommand->data); } foreach(foreignConstraintCommandCell, foreignConstraintCommandList) { char *command = (char *) lfirst(foreignConstraintCommandCell); char *escapedCommand = quote_literal_cstr(command); Oid referencedRelationId = InvalidOid; Oid referencedSchemaId = InvalidOid; char *referencedSchemaName = NULL; char *escapedReferencedSchemaName = NULL; uint64 referencedShardId = INVALID_SHARD_ID; StringInfo applyForeignConstraintCommand = makeStringInfo(); /* we need to parse the foreign constraint command to get referencing table id */ referencedRelationId = ForeignConstraintGetReferencedTableId(command); if (referencedRelationId == InvalidOid) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot create foreign key constraint"), errdetail("Referenced relation cannot be found."))); } referencedSchemaId = get_rel_namespace(referencedRelationId); referencedSchemaName = get_namespace_name(referencedSchemaId); escapedReferencedSchemaName = quote_literal_cstr(referencedSchemaName); /* * In case of self referencing shards, relation itself might not be distributed * already. Therefore we cannot use ColocatedShardIdInRelation which assumes * given relation is distributed. Besides, since we know foreign key references * itself, referencedShardId is actual shardId anyway. */ if (relationId == referencedRelationId) { referencedShardId = shardId; } else { referencedShardId = ColocatedShardIdInRelation(referencedRelationId, shardIndex); } appendStringInfo(applyForeignConstraintCommand, WORKER_APPLY_INTER_SHARD_DDL_COMMAND, shardId, escapedSchemaName, referencedShardId, escapedReferencedSchemaName, escapedCommand); ExecuteCriticalRemoteCommand(connection, applyForeignConstraintCommand->data); } /* * If the shard is created for a partition, send the command to create the * partitioning hierarcy on the shard. */ if (alterTableAttachPartitionCommand != NULL) { Oid parentRelationId = PartitionParentOid(relationId); uint64 correspondingParentShardId = InvalidOid; StringInfo applyAttachPartitionCommand = makeStringInfo(); Oid parentSchemaId = InvalidOid; char *parentSchemaName = NULL; char *escapedParentSchemaName = NULL; char *escapedCommand = NULL; Assert(PartitionTable(relationId)); parentSchemaId = get_rel_namespace(parentRelationId); parentSchemaName = get_namespace_name(parentSchemaId); escapedParentSchemaName = quote_literal_cstr(parentSchemaName); escapedCommand = quote_literal_cstr(alterTableAttachPartitionCommand); correspondingParentShardId = ColocatedShardIdInRelation(parentRelationId, shardIndex); appendStringInfo(applyAttachPartitionCommand, WORKER_APPLY_INTER_SHARD_DDL_COMMAND, correspondingParentShardId, escapedParentSchemaName, shardId, escapedSchemaName, escapedCommand); ExecuteCriticalRemoteCommand(connection, applyAttachPartitionCommand->data); } } /* * UpdateShardStatistics updates metadata (shard size and shard min/max values) * of the given shard and returns the updated shard size. */ uint64 UpdateShardStatistics(int64 shardId) { ShardInterval *shardInterval = LoadShardInterval(shardId); Oid relationId = shardInterval->relationId; char storageType = shardInterval->storageType; char partitionType = PartitionMethod(relationId); char *shardQualifiedName = NULL; List *shardPlacementList = NIL; ListCell *shardPlacementCell = NULL; bool statsOK = false; uint64 shardSize = 0; text *minValue = NULL; text *maxValue = NULL; /* Build shard qualified name. */ char *shardName = get_rel_name(relationId); Oid schemaId = get_rel_namespace(relationId); char *schemaName = get_namespace_name(schemaId); AppendShardIdToName(&shardName, shardId); shardQualifiedName = quote_qualified_identifier(schemaName, shardName); shardPlacementList = FinalizedShardPlacementList(shardId); /* get shard's statistics from a shard placement */ foreach(shardPlacementCell, shardPlacementList) { ShardPlacement *placement = (ShardPlacement *) lfirst(shardPlacementCell); statsOK = WorkerShardStats(placement, relationId, shardQualifiedName, &shardSize, &minValue, &maxValue); if (statsOK) { break; } } /* * If for some reason we appended data to a shard, but failed to retrieve * statistics we just WARN here to avoid losing shard-state updates. Note * that this means we will return 0 as the shard fill-factor, and this shard * also won't be pruned as the statistics will be empty. If the failure was * transient, a subsequent append call will fetch the correct statistics. */ if (!statsOK) { ereport(WARNING, (errmsg("could not get statistics for shard %s", shardQualifiedName), errdetail("Setting shard statistics to NULL"))); } /* make sure we don't process cancel signals */ HOLD_INTERRUPTS(); /* update metadata for each shard placement we appended to */ shardPlacementCell = NULL; foreach(shardPlacementCell, shardPlacementList) { ShardPlacement *placement = (ShardPlacement *) lfirst(shardPlacementCell); uint64 placementId = placement->placementId; uint32 groupId = placement->groupId; DeleteShardPlacementRow(placementId); InsertShardPlacementRow(shardId, placementId, FILE_FINALIZED, shardSize, groupId); } /* only update shard min/max values for append-partitioned tables */ if (partitionType == DISTRIBUTE_BY_APPEND) { DeleteShardRow(shardId); InsertShardRow(relationId, shardId, storageType, minValue, maxValue); } if (QueryCancelPending) { ereport(WARNING, (errmsg("cancel requests are ignored during metadata update"))); QueryCancelPending = false; } RESUME_INTERRUPTS(); return shardSize; } /* * WorkerShardStats queries the worker node, and retrieves shard statistics that * we assume have changed after new table data have been appended to the shard. */ static bool WorkerShardStats(ShardPlacement *placement, Oid relationId, char *shardName, uint64 *shardSize, text **shardMinValue, text **shardMaxValue) { char *quotedShardName = NULL; bool cstoreTable = false; StringInfo tableSizeQuery = makeStringInfo(); const uint32 unusedTableId = 1; char partitionType = PartitionMethod(relationId); Var *partitionColumn = NULL; char *partitionColumnName = NULL; StringInfo partitionValueQuery = makeStringInfo(); PGresult *queryResult = NULL; const int minValueIndex = 0; const int maxValueIndex = 1; uint64 tableSize = 0; char *tableSizeString = NULL; char *tableSizeStringEnd = NULL; bool minValueIsNull = false; bool maxValueIsNull = false; int connectionFlags = 0; int executeCommand = 0; MultiConnection *connection = GetPlacementConnection(connectionFlags, placement, NULL); *shardSize = 0; *shardMinValue = NULL; *shardMaxValue = NULL; quotedShardName = quote_literal_cstr(shardName); cstoreTable = CStoreTable(relationId); if (cstoreTable) { appendStringInfo(tableSizeQuery, SHARD_CSTORE_TABLE_SIZE_QUERY, quotedShardName); } else { appendStringInfo(tableSizeQuery, SHARD_TABLE_SIZE_QUERY, quotedShardName); } executeCommand = ExecuteOptionalRemoteCommand(connection, tableSizeQuery->data, &queryResult); if (executeCommand != 0) { return false; } tableSizeString = PQgetvalue(queryResult, 0, 0); if (tableSizeString == NULL) { PQclear(queryResult); ForgetResults(connection); return false; } errno = 0; tableSize = strtoull(tableSizeString, &tableSizeStringEnd, 0); if (errno != 0 || (*tableSizeStringEnd) != '\0') { PQclear(queryResult); ForgetResults(connection); return false; } *shardSize = tableSize; PQclear(queryResult); ForgetResults(connection); if (partitionType != DISTRIBUTE_BY_APPEND) { /* we don't need min/max for non-append distributed tables */ return true; } /* fill in the partition column name and shard name in the query. */ partitionColumn = PartitionColumn(relationId, unusedTableId); partitionColumnName = get_attname(relationId, partitionColumn->varattno); appendStringInfo(partitionValueQuery, SHARD_RANGE_QUERY, partitionColumnName, partitionColumnName, shardName); executeCommand = ExecuteOptionalRemoteCommand(connection, partitionValueQuery->data, &queryResult); if (executeCommand != 0) { return false; } minValueIsNull = PQgetisnull(queryResult, 0, minValueIndex); maxValueIsNull = PQgetisnull(queryResult, 0, maxValueIndex); if (!minValueIsNull && !maxValueIsNull) { char *minValueResult = PQgetvalue(queryResult, 0, minValueIndex); char *maxValueResult = PQgetvalue(queryResult, 0, maxValueIndex); *shardMinValue = cstring_to_text(minValueResult); *shardMaxValue = cstring_to_text(maxValueResult); } PQclear(queryResult); ForgetResults(connection); return true; } /* * ForeignConstraintGetReferencedTableId parses given foreign constraint query and * extracts referenced table id from it. */ Oid ForeignConstraintGetReferencedTableId(char *queryString) { Node *queryNode = ParseTreeNode(queryString); AlterTableStmt *foreignConstraintStmt = (AlterTableStmt *) queryNode; AlterTableCmd *command = (AlterTableCmd *) linitial(foreignConstraintStmt->cmds); if (command->subtype == AT_AddConstraint) { Constraint *constraint = (Constraint *) command->def; if (constraint->contype == CONSTR_FOREIGN) { RangeVar *referencedTable = constraint->pktable; return RangeVarGetRelid(referencedTable, NoLock, foreignConstraintStmt->missing_ok); } } return InvalidOid; } citus-7.0.3/src/backend/distributed/master/master_truncate.c000066400000000000000000000043521317107136600242200ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * master_truncate.c * * Routine for truncating local data after a table has been distributed. * * Copyright (c) 2014-2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include #include "commands/tablecmds.h" #include "commands/trigger.h" #include "distributed/master_metadata_utility.h" #include "distributed/multi_join_order.h" #include "distributed/multi_utility.h" #include "distributed/pg_dist_partition.h" #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/rel.h" /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(citus_truncate_trigger); /* * citus_truncate_trigger is called as a trigger when a distributed * table is truncated. */ Datum citus_truncate_trigger(PG_FUNCTION_ARGS) { TriggerData *triggerData = NULL; Relation truncatedRelation = NULL; Oid relationId = InvalidOid; char *relationName = NULL; Oid schemaId = InvalidOid; char *schemaName = NULL; char partitionMethod = 0; if (!CALLED_AS_TRIGGER(fcinfo)) { ereport(ERROR, (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), errmsg("must be called as trigger"))); } triggerData = (TriggerData *) fcinfo->context; truncatedRelation = triggerData->tg_relation; relationId = RelationGetRelid(truncatedRelation); relationName = get_rel_name(relationId); schemaId = get_rel_namespace(relationId); schemaName = get_namespace_name(schemaId); partitionMethod = PartitionMethod(relationId); if (!EnableDDLPropagation) { PG_RETURN_DATUM(PointerGetDatum(NULL)); } if (partitionMethod == DISTRIBUTE_BY_APPEND) { DirectFunctionCall3(master_drop_all_shards, ObjectIdGetDatum(relationId), CStringGetTextDatum(relationName), CStringGetTextDatum(schemaName)); } else { StringInfo truncateStatement = makeStringInfo(); char *qualifiedTableName = quote_qualified_identifier(schemaName, relationName); appendStringInfo(truncateStatement, "TRUNCATE TABLE %s CASCADE", qualifiedTableName); DirectFunctionCall1(master_modify_multiple_shards, CStringGetTextDatum(truncateStatement->data)); } PG_RETURN_DATUM(PointerGetDatum(NULL)); } citus-7.0.3/src/backend/distributed/master/pg_worker_list.conf.sample000066400000000000000000000020421317107136600260270ustar00rootroot00000000000000# ------------------------------------------ # Citus Database Worker Node Membership List # ------------------------------------------ # # This file contains list of worker node names; these names are used both for # initializing the worker nodes, and later for communicating with them. Records # in this file are in the following format: # # HOSTNAME [PORT] [RACK] # # (The uppercase items must be replaced by actual values.) # # HOSTNAME specifies the DNS resolvable host name for the worker node. In test # environments, localhost may be used to loopback to the current node. # # PORT specifies the port number to connect to at the specified host. This value # is optional; in its absence, the port configuration value is used as the # default. # # RACK specifies the host's network location for the purposes of performing rack # aware data distribution. This value is optional; in its absence, a generic # value is used as the default. # Put your actual configuration here # ---------------------------------- # # HOSTNAME [PORT] [RACK] citus-7.0.3/src/backend/distributed/master/worker_node_manager.c000066400000000000000000000317771317107136600250430ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * worker_node_manager.c * Routines for reading worker nodes from membership file, and allocating * candidate nodes for shard placement. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "commands/dbcommands.h" #include "distributed/worker_manager.h" #include "distributed/metadata_cache.h" #include "distributed/multi_client_executor.h" #include "libpq/hba.h" #if (PG_VERSION_NUM >= 100000) #include "common/ip.h" #else #include "libpq/ip.h" #endif #include "libpq/libpq-be.h" #include "postmaster/postmaster.h" #include "storage/fd.h" #include "storage/ipc.h" #include "storage/shmem.h" #include "utils/guc.h" #include "utils/hsearch.h" #include "utils/memutils.h" /* Config variables managed via guc.c */ char *WorkerListFileName; int MaxWorkerNodesTracked = 2048; /* determines worker node hash table size */ /* Local functions forward declarations */ static WorkerNode * WorkerGetNodeWithName(const char *hostname); static char * ClientHostAddress(StringInfo remoteHostStringInfo); static List * PrimaryNodesNotInList(List *currentList); static WorkerNode * FindRandomNodeFromList(List *candidateWorkerNodeList); static bool OddNumber(uint32 number); static bool ListMember(List *currentList, WorkerNode *workerNode); /* ------------------------------------------------------------ * Worker node selection functions follow * ------------------------------------------------------------ */ /* * WorkerGetRandomCandidateNode accepts a list of WorkerNode's and returns a random * primary node which is not in that list. * * Note that the function returns null if the worker membership list does not * contain enough nodes to allocate a new worker node. */ WorkerNode * WorkerGetRandomCandidateNode(List *currentNodeList) { WorkerNode *workerNode = NULL; bool wantSameRack = false; uint32 tryCount = WORKER_RACK_TRIES; uint32 tryIndex = 0; uint32 currentNodeCount = list_length(currentNodeList); List *candidateWorkerNodeList = PrimaryNodesNotInList(currentNodeList); /* we check if the shard has already been placed on all nodes known to us */ if (list_length(candidateWorkerNodeList) == 0) { return NULL; } /* if current node list is empty, randomly pick one node and return */ if (currentNodeCount == 0) { workerNode = FindRandomNodeFromList(candidateWorkerNodeList); return workerNode; } /* * If the current list has an odd number of nodes (1, 3, 5, etc), we want to * place the shard on a different rack than the first node's rack. * Otherwise, we want to place the shard on the same rack as the first node. */ if (OddNumber(currentNodeCount)) { wantSameRack = false; } else { wantSameRack = true; } /* * We try to find a worker node that fits our rack-aware placement strategy. * If after a predefined number of tries, we still cannot find such a node, * we simply give up and return the last worker node we found. */ for (tryIndex = 0; tryIndex < tryCount; tryIndex++) { WorkerNode *firstNode = (WorkerNode *) linitial(currentNodeList); char *firstRack = firstNode->workerRack; char *workerRack = NULL; bool sameRack = false; workerNode = FindRandomNodeFromList(candidateWorkerNodeList); workerRack = workerNode->workerRack; sameRack = (strncmp(workerRack, firstRack, WORKER_LENGTH) == 0); if ((sameRack && wantSameRack) || (!sameRack && !wantSameRack)) { break; } } return workerNode; } /* * WorkerGetRoundRobinCandidateNode takes in a list of worker nodes and returns * a candidate worker node from that list. To select this node, this function * uses the round-robin policy. An ideal round-robin implementation requires * keeping shared state for shard placements; and we instead approximate our * implementation by relying on the ever-increasing shardId. So, the first * worker node selected will be the node at the (shardId MOD worker node count) * index and the remaining candidate nodes will be the next nodes in the list. * * Note that the function returns null if the worker membership list does not * contain enough nodes to place all replicas. */ WorkerNode * WorkerGetRoundRobinCandidateNode(List *workerNodeList, uint64 shardId, uint32 placementIndex) { uint32 workerNodeCount = list_length(workerNodeList); WorkerNode *candidateNode = NULL; if (placementIndex < workerNodeCount) { uint32 candidateNodeIndex = (shardId + placementIndex) % workerNodeCount; candidateNode = (WorkerNode *) list_nth(workerNodeList, candidateNodeIndex); } return candidateNode; } /* * WorkerGetLocalFirstCandidateNode takes in a list of worker nodes, and then * allocates a new worker node. The allocation is performed according to the * following policy: if the list is empty, the node where the caller is connecting * from is allocated; if the list is not empty, a node is allocated according * to random policy. */ WorkerNode * WorkerGetLocalFirstCandidateNode(List *currentNodeList) { WorkerNode *candidateNode = NULL; uint32 currentNodeCount = list_length(currentNodeList); /* choose first candidate node to be the client's host */ if (currentNodeCount == 0) { StringInfo clientHostStringInfo = makeStringInfo(); char *clientHost = NULL; char *errorMessage = ClientHostAddress(clientHostStringInfo); if (errorMessage != NULL) { ereport(ERROR, (errmsg("%s", errorMessage), errdetail("Could not find the first worker " "node for local-node-first policy."), errhint("Make sure that you are not on the " "master node."))); } /* if hostname is localhost.localdomain, change it to localhost */ clientHost = clientHostStringInfo->data; if (strncmp(clientHost, "localhost.localdomain", WORKER_LENGTH) == 0) { clientHost = pstrdup("localhost"); } candidateNode = WorkerGetNodeWithName(clientHost); if (candidateNode == NULL) { ereport(ERROR, (errmsg("could not find worker node for " "host: %s", clientHost))); } } else { /* find a candidate node different from those already selected */ candidateNode = WorkerGetRandomCandidateNode(currentNodeList); } return candidateNode; } /* * ClientHostAddress appends the connecting client's fully qualified hostname * to the given StringInfo. If there is no such connection or the connection is * over Unix domain socket, the function fills the error message and returns it. * On success, it just returns NULL. */ static char * ClientHostAddress(StringInfo clientHostStringInfo) { Port *port = MyProcPort; char *clientHost = NULL; char *errorMessage = NULL; int clientHostLength = NI_MAXHOST; int flags = NI_NAMEREQD; /* require fully qualified hostname */ int nameFound = 0; if (port == NULL) { errorMessage = "cannot find tcp/ip connection to client"; return errorMessage; } switch (port->raddr.addr.ss_family) { case AF_INET: #ifdef HAVE_IPV6 case AF_INET6: #endif { break; } default: { errorMessage = "invalid address family in connection"; return errorMessage; } } clientHost = palloc0(clientHostLength); nameFound = pg_getnameinfo_all(&port->raddr.addr, port->raddr.salen, clientHost, clientHostLength, NULL, 0, flags); if (nameFound == 0) { appendStringInfo(clientHostStringInfo, "%s", clientHost); } else { StringInfo errorMessageStringInfo = makeStringInfo(); appendStringInfo(errorMessageStringInfo, "could not resolve client host: %s", gai_strerror(nameFound)); errorMessage = errorMessageStringInfo->data; return errorMessage; } return errorMessage; } /* * WorkerGetNodeWithName finds and returns a node from the membership list that * has the given hostname. The function returns null if no such node exists. */ static WorkerNode * WorkerGetNodeWithName(const char *hostname) { WorkerNode *workerNode = NULL; HASH_SEQ_STATUS status; HTAB *workerNodeHash = GetWorkerNodeHash(); hash_seq_init(&status, workerNodeHash); while ((workerNode = hash_seq_search(&status)) != NULL) { int nameCompare = strncmp(workerNode->workerName, hostname, WORKER_LENGTH); if (nameCompare == 0) { /* we need to terminate the scan since we break */ hash_seq_term(&status); break; } } return workerNode; } /* * ActivePrimaryNodeCount returns the number of groups with a primary in the cluster. */ uint32 ActivePrimaryNodeCount(void) { List *workerNodeList = ActivePrimaryNodeList(); uint32 liveWorkerCount = list_length(workerNodeList); return liveWorkerCount; } /* * ActiveReadableNodeCount returns the number of groups with a node we can read from. */ uint32 ActiveReadableNodeCount(void) { List *workerNodeList = ActiveReadableNodeList(); uint32 liveWorkerCount = list_length(workerNodeList); return liveWorkerCount; } /* * ActivePrimaryNodeList returns a list of all the active primary nodes in workerNodeHash */ List * ActivePrimaryNodeList(void) { List *workerNodeList = NIL; WorkerNode *workerNode = NULL; HTAB *workerNodeHash = GetWorkerNodeHash(); HASH_SEQ_STATUS status; EnsureModificationsCanRun(); hash_seq_init(&status, workerNodeHash); while ((workerNode = hash_seq_search(&status)) != NULL) { if (workerNode->isActive && WorkerNodeIsPrimary(workerNode)) { WorkerNode *workerNodeCopy = palloc0(sizeof(WorkerNode)); memcpy(workerNodeCopy, workerNode, sizeof(WorkerNode)); workerNodeList = lappend(workerNodeList, workerNodeCopy); } } return workerNodeList; } /* * ActiveReadableNodeList returns a list of all nodes in workerNodeHash we can read from. */ List * ActiveReadableNodeList(void) { List *workerNodeList = NIL; WorkerNode *workerNode = NULL; HTAB *workerNodeHash = GetWorkerNodeHash(); HASH_SEQ_STATUS status; hash_seq_init(&status, workerNodeHash); while ((workerNode = hash_seq_search(&status)) != NULL) { WorkerNode *workerNodeCopy; if (!workerNode->isActive) { continue; } if (!WorkerNodeIsReadable(workerNode)) { continue; } workerNodeCopy = palloc0(sizeof(WorkerNode)); memcpy(workerNodeCopy, workerNode, sizeof(WorkerNode)); workerNodeList = lappend(workerNodeList, workerNodeCopy); } return workerNodeList; } /* * PrimaryNodesNotInList scans through the worker node hash and returns a list of all * primary nodes which are not in currentList. It runs in O(n*m) but currentList is * quite small. */ static List * PrimaryNodesNotInList(List *currentList) { List *workerNodeList = NIL; HTAB *workerNodeHash = GetWorkerNodeHash(); WorkerNode *workerNode = NULL; HASH_SEQ_STATUS status; hash_seq_init(&status, workerNodeHash); while ((workerNode = hash_seq_search(&status)) != NULL) { if (ListMember(currentList, workerNode)) { continue; } if (WorkerNodeIsPrimary(workerNode)) { workerNodeList = lappend(workerNodeList, workerNode); } } return workerNodeList; } /* FindRandomNodeFromList picks a random node from the list provided to it. */ static WorkerNode * FindRandomNodeFromList(List *candidateWorkerNodeList) { uint32 candidateNodeCount = list_length(candidateWorkerNodeList); /* nb, the random seed has already been set by the postmaster when starting up */ uint32 workerPosition = (random() % candidateNodeCount); WorkerNode *workerNode = (WorkerNode *) list_nth(candidateWorkerNodeList, workerPosition); return workerNode; } /* * OddNumber function returns true if given number is odd; returns false otherwise. */ static bool OddNumber(uint32 number) { bool oddNumber = ((number % 2) == 1); return oddNumber; } /* Checks if given worker node is a member of the current list. */ static bool ListMember(List *currentList, WorkerNode *workerNode) { bool listMember = false; Size keySize = WORKER_LENGTH + sizeof(uint32); ListCell *currentCell = NULL; foreach(currentCell, currentList) { WorkerNode *currentNode = (WorkerNode *) lfirst(currentCell); if (WorkerNodeCompare(workerNode, currentNode, keySize) == 0) { listMember = true; } } return listMember; } /* * CompareWorkerNodes compares two pointers to worker nodes using the exact * same logic employed by WorkerNodeCompare. */ int CompareWorkerNodes(const void *leftElement, const void *rightElement) { const void *leftWorker = *((const void **) leftElement); const void *rightWorker = *((const void **) rightElement); int compare = 0; Size ignoredKeySize = 0; compare = WorkerNodeCompare(leftWorker, rightWorker, ignoredKeySize); return compare; } /* * WorkerNodeCompare compares two worker nodes by their host name and port * number. Two nodes that only differ by their rack locations are considered to * be equal to each other. */ int WorkerNodeCompare(const void *lhsKey, const void *rhsKey, Size keySize) { const WorkerNode *workerLhs = (const WorkerNode *) lhsKey; const WorkerNode *workerRhs = (const WorkerNode *) rhsKey; int nameCompare = 0; int portCompare = 0; nameCompare = strncmp(workerLhs->workerName, workerRhs->workerName, WORKER_LENGTH); if (nameCompare != 0) { return nameCompare; } portCompare = workerLhs->workerPort - workerRhs->workerPort; return portCompare; } citus-7.0.3/src/backend/distributed/metadata/000077500000000000000000000000001317107136600211355ustar00rootroot00000000000000citus-7.0.3/src/backend/distributed/metadata/metadata_sync.c000066400000000000000000001025351317107136600241230ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * metadata_sync.c * * Routines for synchronizing metadata to all workers. * * Copyright (c) 2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include #include #include "access/genam.h" #include "access/heapam.h" #include "access/htup_details.h" #include "access/sysattr.h" #include "access/xact.h" #include "catalog/dependency.h" #include "catalog/indexing.h" #include "catalog/pg_foreign_server.h" #include "catalog/pg_namespace.h" #include "catalog/pg_type.h" #include "distributed/citus_ruleutils.h" #include "distributed/distribution_column.h" #include "distributed/listutils.h" #include "distributed/master_metadata_utility.h" #include "distributed/master_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" #include "distributed/multi_join_order.h" #include "distributed/pg_dist_node.h" #include "distributed/worker_manager.h" #include "distributed/worker_transaction.h" #include "foreign/foreign.h" #include "nodes/pg_list.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/syscache.h" #include "utils/tqual.h" static char * LocalGroupIdUpdateCommand(uint32 groupId); static void MarkNodeHasMetadata(char *nodeName, int32 nodePort, bool hasMetadata); static List * SequenceDDLCommandsForTable(Oid relationId); static void EnsureSupportedSequenceColumnType(Oid sequenceOid); static Oid TypeOfColumn(Oid tableId, int16 columnId); static char * TruncateTriggerCreateCommand(Oid relationId); static char * SchemaOwnerName(Oid objectId); static bool HasMetadataWorkers(void); PG_FUNCTION_INFO_V1(start_metadata_sync_to_node); PG_FUNCTION_INFO_V1(stop_metadata_sync_to_node); /* * start_metadata_sync_to_node function creates the metadata in a worker for preparing the * worker for accepting queries. The function first sets the localGroupId of the worker * so that the worker knows which tuple in pg_dist_node table represents itself. After * that, SQL statetemens for re-creating metadata of MX-eligible distributed tables are * sent to the worker. Finally, the hasmetadata column of the target node in pg_dist_node * is marked as true. */ Datum start_metadata_sync_to_node(PG_FUNCTION_ARGS) { text *nodeName = PG_GETARG_TEXT_P(0); int32 nodePort = PG_GETARG_INT32(1); char *nodeNameString = text_to_cstring(nodeName); char *extensionOwner = CitusExtensionOwnerName(); char *escapedNodeName = quote_literal_cstr(nodeNameString); WorkerNode *workerNode = NULL; char *localGroupIdUpdateCommand = NULL; List *recreateMetadataSnapshotCommandList = NIL; List *dropMetadataCommandList = NIL; List *createMetadataCommandList = NIL; EnsureCoordinator(); EnsureSuperUser(); EnsureModificationsCanRun(); CheckCitusVersion(ERROR); PreventTransactionChain(true, "start_metadata_sync_to_node"); workerNode = FindWorkerNode(nodeNameString, nodePort); if (workerNode == NULL) { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("you cannot sync metadata to a non-existent node"), errhint("First, add the node with SELECT master_add_node" "(%s,%d)", escapedNodeName, nodePort))); } if (!workerNode->isActive) { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("you cannot sync metadata to an inactive node"), errhint("First, activate the node with " "SELECT master_activate_node(%s,%d)", escapedNodeName, nodePort))); } MarkNodeHasMetadata(nodeNameString, nodePort, true); if (!WorkerNodeIsPrimary(workerNode)) { /* * If this is a secondary node we can't actually sync metadata to it; we assume * the primary node is receiving metadata. */ PG_RETURN_VOID(); } /* generate and add the local group id's update query */ localGroupIdUpdateCommand = LocalGroupIdUpdateCommand(workerNode->groupId); /* generate the queries which drop the metadata */ dropMetadataCommandList = MetadataDropCommands(); /* generate the queries which create the metadata from scratch */ createMetadataCommandList = MetadataCreateCommands(); recreateMetadataSnapshotCommandList = lappend(recreateMetadataSnapshotCommandList, localGroupIdUpdateCommand); recreateMetadataSnapshotCommandList = list_concat(recreateMetadataSnapshotCommandList, dropMetadataCommandList); recreateMetadataSnapshotCommandList = list_concat(recreateMetadataSnapshotCommandList, createMetadataCommandList); /* * Send the snapshot recreation commands in a single remote transaction and * error out in any kind of failure. Note that it is not required to send * createMetadataSnapshotCommandList in the same transaction that we send * nodeDeleteCommand and nodeInsertCommand commands below. */ SendCommandListToWorkerInSingleTransaction(nodeNameString, nodePort, extensionOwner, recreateMetadataSnapshotCommandList); PG_RETURN_VOID(); } /* * stop_metadata_sync_to_node function sets the hasmetadata column of the specified node * to false in pg_dist_node table, thus indicating that the specified worker node does not * receive DDL changes anymore and cannot be used for issuing queries. */ Datum stop_metadata_sync_to_node(PG_FUNCTION_ARGS) { text *nodeName = PG_GETARG_TEXT_P(0); int32 nodePort = PG_GETARG_INT32(1); char *nodeNameString = text_to_cstring(nodeName); WorkerNode *workerNode = NULL; EnsureCoordinator(); EnsureSuperUser(); CheckCitusVersion(ERROR); workerNode = FindWorkerNode(nodeNameString, nodePort); if (workerNode == NULL) { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("node (%s,%d) does not exist", nodeNameString, nodePort))); } MarkNodeHasMetadata(nodeNameString, nodePort, false); PG_RETURN_VOID(); } /* * ShouldSyncTableMetadata checks if the metadata of a distributed table should be * propagated to metadata workers, i.e. the table is an MX table or reference table. * Tables with streaming replication model (which means RF=1) and hash distribution are * considered as MX tables while tables with none distribution are reference tables. */ bool ShouldSyncTableMetadata(Oid relationId) { DistTableCacheEntry *tableEntry = DistributedTableCacheEntry(relationId); bool hashDistributed = (tableEntry->partitionMethod == DISTRIBUTE_BY_HASH); bool streamingReplicated = (tableEntry->replicationModel == REPLICATION_MODEL_STREAMING); bool mxTable = (streamingReplicated && hashDistributed); bool referenceTable = (tableEntry->partitionMethod == DISTRIBUTE_BY_NONE); if (mxTable || referenceTable) { return true; } else { return false; } } /* * MetadataCreateCommands returns list of queries that are * required to create the current metadata snapshot of the node that the * function is called. The metadata snapshot commands includes the * following queries: * * (i) Query that populates pg_dist_node table * (ii) Queries that create the clustered tables * (iii) Queries that populate pg_dist_partition table referenced by (ii) * (iv) Queries that populate pg_dist_shard table referenced by (iii) * (v) Queries that populate pg_dist_placement table referenced by (iv) */ List * MetadataCreateCommands(void) { List *metadataSnapshotCommandList = NIL; List *distributedTableList = DistributedTableList(); List *propagatedTableList = NIL; bool includeNodesFromOtherClusters = true; List *workerNodeList = ReadWorkerNodes(includeNodesFromOtherClusters); ListCell *distributedTableCell = NULL; char *nodeListInsertCommand = NULL; bool includeSequenceDefaults = true; /* make sure we have deterministic output for our tests */ SortList(workerNodeList, CompareWorkerNodes); /* generate insert command for pg_dist_node table */ nodeListInsertCommand = NodeListInsertCommand(workerNodeList); metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, nodeListInsertCommand); /* create the list of tables whose metadata will be created */ foreach(distributedTableCell, distributedTableList) { DistTableCacheEntry *cacheEntry = (DistTableCacheEntry *) lfirst(distributedTableCell); if (ShouldSyncTableMetadata(cacheEntry->relationId)) { propagatedTableList = lappend(propagatedTableList, cacheEntry); } } /* create the tables, but not the metadata */ foreach(distributedTableCell, propagatedTableList) { DistTableCacheEntry *cacheEntry = (DistTableCacheEntry *) lfirst(distributedTableCell); Oid relationId = cacheEntry->relationId; List *workerSequenceDDLCommands = SequenceDDLCommandsForTable(relationId); List *ddlCommandList = GetTableDDLEvents(relationId, includeSequenceDefaults); char *tableOwnerResetCommand = TableOwnerResetCommand(relationId); metadataSnapshotCommandList = list_concat(metadataSnapshotCommandList, workerSequenceDDLCommands); metadataSnapshotCommandList = list_concat(metadataSnapshotCommandList, ddlCommandList); metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, tableOwnerResetCommand); } /* construct the foreign key constraints after all tables are created */ foreach(distributedTableCell, propagatedTableList) { DistTableCacheEntry *cacheEntry = (DistTableCacheEntry *) lfirst(distributedTableCell); List *foreignConstraintCommands = GetTableForeignConstraintCommands(cacheEntry->relationId); metadataSnapshotCommandList = list_concat(metadataSnapshotCommandList, foreignConstraintCommands); } /* after all tables are created, create the metadata */ foreach(distributedTableCell, propagatedTableList) { DistTableCacheEntry *cacheEntry = (DistTableCacheEntry *) lfirst(distributedTableCell); List *shardIntervalList = NIL; List *shardCreateCommandList = NIL; char *metadataCommand = NULL; char *truncateTriggerCreateCommand = NULL; Oid clusteredTableId = cacheEntry->relationId; /* add the table metadata command first*/ metadataCommand = DistributionCreateCommand(cacheEntry); metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, metadataCommand); /* add the truncate trigger command after the table became distributed */ truncateTriggerCreateCommand = TruncateTriggerCreateCommand(cacheEntry->relationId); metadataSnapshotCommandList = lappend(metadataSnapshotCommandList, truncateTriggerCreateCommand); /* add the pg_dist_shard{,placement} entries */ shardIntervalList = LoadShardIntervalList(clusteredTableId); shardCreateCommandList = ShardListInsertCommand(shardIntervalList); metadataSnapshotCommandList = list_concat(metadataSnapshotCommandList, shardCreateCommandList); } return metadataSnapshotCommandList; } /* * GetDistributedTableDDLEvents returns the full set of DDL commands necessary to * create the given distributed table on a worker. The list includes setting up any * sequences, setting the owner of the table, inserting table and shard metadata, * setting the truncate trigger and foreign key constraints. */ List * GetDistributedTableDDLEvents(Oid relationId) { DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); List *shardIntervalList = NIL; List *commandList = NIL; List *foreignConstraintCommands = NIL; List *shardMetadataInsertCommandList = NIL; List *sequenceDDLCommands = NIL; List *tableDDLCommands = NIL; char *tableOwnerResetCommand = NULL; char *metadataCommand = NULL; char *truncateTriggerCreateCommand = NULL; bool includeSequenceDefaults = true; /* commands to create sequences */ sequenceDDLCommands = SequenceDDLCommandsForTable(relationId); commandList = list_concat(commandList, sequenceDDLCommands); /* commands to create the table */ tableDDLCommands = GetTableDDLEvents(relationId, includeSequenceDefaults); commandList = list_concat(commandList, tableDDLCommands); /* command to reset the table owner */ tableOwnerResetCommand = TableOwnerResetCommand(relationId); commandList = lappend(commandList, tableOwnerResetCommand); /* command to insert pg_dist_partition entry */ metadataCommand = DistributionCreateCommand(cacheEntry); commandList = lappend(commandList, metadataCommand); /* commands to create the truncate trigger of the table */ truncateTriggerCreateCommand = TruncateTriggerCreateCommand(relationId); commandList = lappend(commandList, truncateTriggerCreateCommand); /* commands to insert pg_dist_shard & pg_dist_placement entries */ shardIntervalList = LoadShardIntervalList(relationId); shardMetadataInsertCommandList = ShardListInsertCommand(shardIntervalList); commandList = list_concat(commandList, shardMetadataInsertCommandList); /* commands to create foreign key constraints */ foreignConstraintCommands = GetTableForeignConstraintCommands(relationId); commandList = list_concat(commandList, foreignConstraintCommands); return commandList; } /* * MetadataDropCommands returns list of queries that are required to * drop all the metadata of the node that are related to clustered tables. * The drop metadata snapshot commands includes the following queries: * * (i) Queries that delete all the rows from pg_dist_node table * (ii) Queries that drop the clustered tables and remove its references from * the pg_dist_partition. Note that distributed relation ids are gathered * from the worker itself to prevent dropping any non-distributed tables * with the same name. * (iii) Queries that delete all the rows from pg_dist_shard table referenced by (ii) * (iv) Queries that delete all the rows from pg_dist_placement table * referenced by (iii) */ List * MetadataDropCommands(void) { List *dropSnapshotCommandList = NIL; dropSnapshotCommandList = lappend(dropSnapshotCommandList, REMOVE_ALL_CLUSTERED_TABLES_COMMAND); dropSnapshotCommandList = lappend(dropSnapshotCommandList, DELETE_ALL_NODES); return dropSnapshotCommandList; } /* * NodeListInsertCommand generates a single multi-row INSERT command that can be * executed to insert the nodes that are in workerNodeList to pg_dist_node table. */ char * NodeListInsertCommand(List *workerNodeList) { ListCell *workerNodeCell = NULL; StringInfo nodeListInsertCommand = makeStringInfo(); int workerCount = list_length(workerNodeList); int processedWorkerNodeCount = 0; Oid primaryRole = PrimaryNodeRoleId(); /* if there are no workers, return NULL */ if (workerCount == 0) { return nodeListInsertCommand->data; } if (primaryRole == InvalidOid) { ereport(ERROR, (errmsg("bad metadata, noderole does not exist"), errdetail("you should never see this, please submit " "a bug report"), errhint("run ALTER EXTENSION citus UPDATE and try again"))); } /* generate the query without any values yet */ appendStringInfo(nodeListInsertCommand, "INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, " "noderack, hasmetadata, isactive, noderole, nodecluster) VALUES "); /* iterate over the worker nodes, add the values */ foreach(workerNodeCell, workerNodeList) { WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); char *hasMetadataString = workerNode->hasMetadata ? "TRUE" : "FALSE"; char *isActiveString = workerNode->isActive ? "TRUE" : "FALSE"; Datum nodeRoleOidDatum = ObjectIdGetDatum(workerNode->nodeRole); Datum nodeRoleStringDatum = DirectFunctionCall1(enum_out, nodeRoleOidDatum); char *nodeRoleString = DatumGetCString(nodeRoleStringDatum); appendStringInfo(nodeListInsertCommand, "(%d, %d, %s, %d, %s, %s, %s, '%s'::noderole, %s)", workerNode->nodeId, workerNode->groupId, quote_literal_cstr(workerNode->workerName), workerNode->workerPort, quote_literal_cstr(workerNode->workerRack), hasMetadataString, isActiveString, nodeRoleString, quote_literal_cstr(workerNode->nodeCluster)); processedWorkerNodeCount++; if (processedWorkerNodeCount != workerCount) { appendStringInfo(nodeListInsertCommand, ","); } } return nodeListInsertCommand->data; } /* * DistributionCreateCommands generates a commands that can be * executed to replicate the metadata for a distributed table. */ char * DistributionCreateCommand(DistTableCacheEntry *cacheEntry) { StringInfo insertDistributionCommand = makeStringInfo(); Oid relationId = cacheEntry->relationId; char distributionMethod = cacheEntry->partitionMethod; char *partitionKeyString = cacheEntry->partitionKeyString; char *qualifiedRelationName = generate_qualified_relation_name(relationId); uint32 colocationId = cacheEntry->colocationId; char replicationModel = cacheEntry->replicationModel; StringInfo tablePartitionKeyString = makeStringInfo(); if (distributionMethod == DISTRIBUTE_BY_NONE) { appendStringInfo(tablePartitionKeyString, "NULL"); } else { char *partitionKeyColumnName = ColumnNameToColumn(relationId, partitionKeyString); appendStringInfo(tablePartitionKeyString, "column_name_to_column(%s,%s)", quote_literal_cstr(qualifiedRelationName), quote_literal_cstr(partitionKeyColumnName)); } appendStringInfo(insertDistributionCommand, "INSERT INTO pg_dist_partition " "(logicalrelid, partmethod, partkey, colocationid, repmodel) " "VALUES " "(%s::regclass, '%c', %s, %d, '%c')", quote_literal_cstr(qualifiedRelationName), distributionMethod, tablePartitionKeyString->data, colocationId, replicationModel); return insertDistributionCommand->data; } /* * DistributionDeleteCommand generates a command that can be executed * to drop a distributed table and its metadata on a remote node. */ char * DistributionDeleteCommand(char *schemaName, char *tableName) { char *distributedRelationName = NULL; StringInfo deleteDistributionCommand = makeStringInfo(); distributedRelationName = quote_qualified_identifier(schemaName, tableName); appendStringInfo(deleteDistributionCommand, "SELECT worker_drop_distributed_table(%s::regclass)", quote_literal_cstr(distributedRelationName)); return deleteDistributionCommand->data; } /* * TableOwnerResetCommand generates a commands that can be executed * to reset the table owner. */ char * TableOwnerResetCommand(Oid relationId) { StringInfo ownerResetCommand = makeStringInfo(); char *qualifiedRelationName = generate_qualified_relation_name(relationId); char *tableOwnerName = TableOwner(relationId); appendStringInfo(ownerResetCommand, "ALTER TABLE %s OWNER TO %s", qualifiedRelationName, quote_identifier(tableOwnerName)); return ownerResetCommand->data; } /* * ShardListInsertCommand generates a single command that can be * executed to replicate shard and shard placement metadata for the * given shard intervals. The function assumes that each shard has a * single placement, and asserts this information. */ List * ShardListInsertCommand(List *shardIntervalList) { List *commandList = NIL; ListCell *shardIntervalCell = NULL; StringInfo insertPlacementCommand = makeStringInfo(); StringInfo insertShardCommand = makeStringInfo(); int shardCount = list_length(shardIntervalList); int processedShardCount = 0; /* if there are no shards, return empty list */ if (shardCount == 0) { return commandList; } /* add placements to insertPlacementCommand */ foreach(shardIntervalCell, shardIntervalList) { ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); uint64 shardId = shardInterval->shardId; List *shardPlacementList = FinalizedShardPlacementList(shardId); ListCell *shardPlacementCell = NULL; foreach(shardPlacementCell, shardPlacementList) { ShardPlacement *placement = (ShardPlacement *) lfirst(shardPlacementCell); if (insertPlacementCommand->len == 0) { /* generate the shard placement query without any values yet */ appendStringInfo(insertPlacementCommand, "INSERT INTO pg_dist_placement " "(shardid, shardstate, shardlength," " groupid, placementid) " "VALUES "); } else { appendStringInfo(insertPlacementCommand, ","); } appendStringInfo(insertPlacementCommand, "(%lu, 1, %lu, %d, %lu)", shardId, placement->shardLength, placement->groupId, placement->placementId); } } /* add the command to the list that we'll return */ commandList = lappend(commandList, insertPlacementCommand->data); /* now, generate the shard query without any values yet */ appendStringInfo(insertShardCommand, "INSERT INTO pg_dist_shard " "(logicalrelid, shardid, shardstorage," " shardminvalue, shardmaxvalue) " "VALUES "); /* now add shards to insertShardCommand */ foreach(shardIntervalCell, shardIntervalList) { ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); uint64 shardId = shardInterval->shardId; Oid distributedRelationId = shardInterval->relationId; char *qualifiedRelationName = generate_qualified_relation_name( distributedRelationId); StringInfo minHashToken = makeStringInfo(); StringInfo maxHashToken = makeStringInfo(); if (shardInterval->minValueExists) { appendStringInfo(minHashToken, "'%d'", DatumGetInt32( shardInterval->minValue)); } else { appendStringInfo(minHashToken, "NULL"); } if (shardInterval->maxValueExists) { appendStringInfo(maxHashToken, "'%d'", DatumGetInt32( shardInterval->maxValue)); } else { appendStringInfo(maxHashToken, "NULL"); } appendStringInfo(insertShardCommand, "(%s::regclass, %lu, '%c', %s, %s)", quote_literal_cstr(qualifiedRelationName), shardId, shardInterval->storageType, minHashToken->data, maxHashToken->data); processedShardCount++; if (processedShardCount != shardCount) { appendStringInfo(insertShardCommand, ","); } } /* finally add the command to the list that we'll return */ commandList = lappend(commandList, insertShardCommand->data); return commandList; } /* * ShardListDeleteCommand generates a command list that can be executed to delete * shard and shard placement metadata for the given shard. */ List * ShardDeleteCommandList(ShardInterval *shardInterval) { uint64 shardId = shardInterval->shardId; List *commandList = NIL; StringInfo deletePlacementCommand = NULL; StringInfo deleteShardCommand = NULL; /* create command to delete shard placements */ deletePlacementCommand = makeStringInfo(); appendStringInfo(deletePlacementCommand, "DELETE FROM pg_dist_placement WHERE shardid = %lu", shardId); commandList = lappend(commandList, deletePlacementCommand->data); /* create command to delete shard */ deleteShardCommand = makeStringInfo(); appendStringInfo(deleteShardCommand, "DELETE FROM pg_dist_shard WHERE shardid = %lu", shardId); commandList = lappend(commandList, deleteShardCommand->data); return commandList; } /* * NodeDeleteCommand generate a command that can be * executed to delete the metadata for a worker node. */ char * NodeDeleteCommand(uint32 nodeId) { StringInfo nodeDeleteCommand = makeStringInfo(); appendStringInfo(nodeDeleteCommand, "DELETE FROM pg_dist_node " "WHERE nodeid = %u", nodeId); return nodeDeleteCommand->data; } /* * NodeStateUpdateCommand generates a command that can be executed to update * isactive column of a node in pg_dist_node table. */ char * NodeStateUpdateCommand(uint32 nodeId, bool isActive) { StringInfo nodeStateUpdateCommand = makeStringInfo(); char *isActiveString = isActive ? "TRUE" : "FALSE"; appendStringInfo(nodeStateUpdateCommand, "UPDATE pg_dist_node SET isactive = %s " "WHERE nodeid = %u", isActiveString, nodeId); return nodeStateUpdateCommand->data; } /* * ColocationIdUpdateCommand creates the SQL command to change the colocationId * of the table with the given name to the given colocationId in pg_dist_partition * table. */ char * ColocationIdUpdateCommand(Oid relationId, uint32 colocationId) { StringInfo command = makeStringInfo(); char *qualifiedRelationName = generate_qualified_relation_name(relationId); appendStringInfo(command, "UPDATE pg_dist_partition " "SET colocationid = %d " "WHERE logicalrelid = %s::regclass", colocationId, quote_literal_cstr(qualifiedRelationName)); return command->data; } /* * PlacementUpsertCommand creates a SQL command for upserting a pg_dist_placment * entry with the given properties. In the case of a conflict on placementId, the command * updates all properties (excluding the placementId) with the given ones. */ char * PlacementUpsertCommand(uint64 shardId, uint64 placementId, int shardState, uint64 shardLength, uint32 groupId) { StringInfo command = makeStringInfo(); appendStringInfo(command, UPSERT_PLACEMENT, shardId, shardState, shardLength, groupId, placementId); return command->data; } /* * LocalGroupIdUpdateCommand creates the SQL command required to set the local group id * of a worker and returns the command in a string. */ static char * LocalGroupIdUpdateCommand(uint32 groupId) { StringInfo updateCommand = makeStringInfo(); appendStringInfo(updateCommand, "UPDATE pg_dist_local_group SET groupid = %d", groupId); return updateCommand->data; } /* * MarkNodeHasMetadata function sets the hasmetadata column of the specified worker in * pg_dist_node to true. */ static void MarkNodeHasMetadata(char *nodeName, int32 nodePort, bool hasMetadata) { const bool indexOK = false; const int scanKeyCount = 2; Relation pgDistNode = NULL; TupleDesc tupleDescriptor = NULL; ScanKeyData scanKey[scanKeyCount]; SysScanDesc scanDescriptor = NULL; HeapTuple heapTuple = NULL; Datum values[Natts_pg_dist_node]; bool isnull[Natts_pg_dist_node]; bool replace[Natts_pg_dist_node]; pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock); tupleDescriptor = RelationGetDescr(pgDistNode); ScanKeyInit(&scanKey[0], Anum_pg_dist_node_nodename, BTEqualStrategyNumber, F_TEXTEQ, CStringGetTextDatum(nodeName)); ScanKeyInit(&scanKey[1], Anum_pg_dist_node_nodeport, BTEqualStrategyNumber, F_INT8EQ, Int32GetDatum(nodePort)); scanDescriptor = systable_beginscan(pgDistNode, InvalidOid, indexOK, NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(scanDescriptor); if (!HeapTupleIsValid(heapTuple)) { ereport(ERROR, (errmsg("could not find valid entry for node \"%s:%d\"", nodeName, nodePort))); } memset(replace, 0, sizeof(replace)); values[Anum_pg_dist_node_hasmetadata - 1] = BoolGetDatum(hasMetadata); isnull[Anum_pg_dist_node_hasmetadata - 1] = false; replace[Anum_pg_dist_node_hasmetadata - 1] = true; heapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isnull, replace); CatalogTupleUpdate(pgDistNode, &heapTuple->t_self, heapTuple); CitusInvalidateRelcacheByRelid(DistNodeRelationId()); CommandCounterIncrement(); systable_endscan(scanDescriptor); heap_close(pgDistNode, NoLock); } /* * SequenceDDLCommandsForTable returns a list of commands which create sequences (and * their schemas) to run on workers before creating the relation. The sequence creation * commands are wrapped with a `worker_apply_sequence_command` call, which sets the * sequence space uniquely for each worker. Notice that this function is relevant only * during metadata propagation to workers and adds nothing to the list of sequence * commands if none of the workers is marked as receiving metadata changes. */ List * SequenceDDLCommandsForTable(Oid relationId) { List *sequenceDDLList = NIL; #if (PG_VERSION_NUM >= 100000) List *ownedSequences = getOwnedSequences(relationId, InvalidAttrNumber); #else List *ownedSequences = getOwnedSequences(relationId); #endif ListCell *listCell; char *ownerName = TableOwner(relationId); foreach(listCell, ownedSequences) { Oid sequenceOid = (Oid) lfirst_oid(listCell); char *sequenceDef = pg_get_sequencedef_string(sequenceOid); char *escapedSequenceDef = quote_literal_cstr(sequenceDef); StringInfo wrappedSequenceDef = makeStringInfo(); StringInfo sequenceGrantStmt = makeStringInfo(); Oid schemaId = InvalidOid; char *createSchemaCommand = NULL; char *sequenceName = generate_qualified_relation_name(sequenceOid); EnsureSupportedSequenceColumnType(sequenceOid); /* create schema if needed */ schemaId = get_rel_namespace(sequenceOid); createSchemaCommand = CreateSchemaDDLCommand(schemaId); if (createSchemaCommand != NULL) { sequenceDDLList = lappend(sequenceDDLList, createSchemaCommand); } appendStringInfo(wrappedSequenceDef, WORKER_APPLY_SEQUENCE_COMMAND, escapedSequenceDef); appendStringInfo(sequenceGrantStmt, "ALTER SEQUENCE %s OWNER TO %s", sequenceName, quote_identifier(ownerName)); sequenceDDLList = lappend(sequenceDDLList, wrappedSequenceDef->data); sequenceDDLList = lappend(sequenceDDLList, sequenceGrantStmt->data); } return sequenceDDLList; } /* * CreateSchemaDDLCommand returns a "CREATE SCHEMA..." SQL string for creating the given * schema if not exists and with proper authorization. */ char * CreateSchemaDDLCommand(Oid schemaId) { char *schemaName = get_namespace_name(schemaId); StringInfo schemaNameDef = NULL; const char *ownerName = NULL; if (strncmp(schemaName, "public", NAMEDATALEN) == 0) { return NULL; } schemaNameDef = makeStringInfo(); ownerName = quote_identifier(SchemaOwnerName(schemaId)); appendStringInfo(schemaNameDef, CREATE_SCHEMA_COMMAND, schemaName, ownerName); return schemaNameDef->data; } /* * EnsureSupportedSequenceColumnType looks at the column which depends on this sequence * (which it Assert's exists) and makes sure its type is suitable for use in a disributed * manner. * * Any column which depends on a sequence (and will therefore be replicated) but which is * not a bigserial cannot be used for an mx table, because there aren't enough values to * ensure that generated numbers are globally unique. */ static void EnsureSupportedSequenceColumnType(Oid sequenceOid) { Oid tableId = InvalidOid; Oid columnType = InvalidOid; int32 columnId = 0; bool shouldSyncMetadata = false; bool hasMetadataWorkers = HasMetadataWorkers(); /* call sequenceIsOwned in order to get the tableId and columnId */ #if (PG_VERSION_NUM >= 100000) bool sequenceOwned = sequenceIsOwned(sequenceOid, DEPENDENCY_AUTO, &tableId, &columnId); if (!sequenceOwned) { sequenceOwned = sequenceIsOwned(sequenceOid, DEPENDENCY_INTERNAL, &tableId, &columnId); } Assert(sequenceOwned); #else sequenceIsOwned(sequenceOid, &tableId, &columnId); #endif shouldSyncMetadata = ShouldSyncTableMetadata(tableId); columnType = TypeOfColumn(tableId, (int16) columnId); if (columnType != INT8OID && shouldSyncMetadata && hasMetadataWorkers) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot create an mx table with a serial or smallserial " "column "), errdetail("Only bigserial is supported in mx tables."))); } } /* * TypeOfColumn returns the Oid of the type of the provided column of the provided table. */ static Oid TypeOfColumn(Oid tableId, int16 columnId) { Relation tableRelation = relation_open(tableId, NoLock); TupleDesc tupleDescriptor = RelationGetDescr(tableRelation); Form_pg_attribute attrForm = tupleDescriptor->attrs[columnId - 1]; relation_close(tableRelation, NoLock); return attrForm->atttypid; } /* * TruncateTriggerCreateCommand creates a SQL query calling worker_create_truncate_trigger * function, which creates the truncate trigger on the worker. */ static char * TruncateTriggerCreateCommand(Oid relationId) { StringInfo triggerCreateCommand = makeStringInfo(); char *tableName = generate_qualified_relation_name(relationId); appendStringInfo(triggerCreateCommand, "SELECT worker_create_truncate_trigger(%s)", quote_literal_cstr(tableName)); return triggerCreateCommand->data; } /* * SchemaOwnerName returns the name of the owner of the specified schema. */ static char * SchemaOwnerName(Oid objectId) { HeapTuple tuple = NULL; Oid ownerId = InvalidOid; char *ownerName = NULL; tuple = SearchSysCache1(NAMESPACEOID, ObjectIdGetDatum(objectId)); if (HeapTupleIsValid(tuple)) { ownerId = ((Form_pg_namespace) GETSTRUCT(tuple))->nspowner; } else { ownerId = GetUserId(); } ownerName = GetUserNameFromId(ownerId, false); ReleaseSysCache(tuple); return ownerName; } /* * HasMetadataWorkers returns true if any of the workers in the cluster has its * hasmetadata column set to true, which happens when start_metadata_sync_to_node * command is run. */ static bool HasMetadataWorkers(void) { List *workerNodeList = ActivePrimaryNodeList(); ListCell *workerNodeCell = NULL; foreach(workerNodeCell, workerNodeList) { WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); if (workerNode->hasMetadata) { return true; } } return false; } /* * CreateTableMetadataOnWorkers creates the list of commands needed to create the * given distributed table and sends these commands to all metadata workers i.e. workers * with hasmetadata=true. Before sending the commands, in order to prevent recursive * propagation, DDL propagation on workers are disabled with a * `SET citus.enable_ddl_propagation TO off;` command. */ void CreateTableMetadataOnWorkers(Oid relationId) { List *commandList = GetDistributedTableDDLEvents(relationId); ListCell *commandCell = NULL; /* prevent recursive propagation */ SendCommandToWorkers(WORKERS_WITH_METADATA, DISABLE_DDL_PROPAGATION); /* send the commands one by one */ foreach(commandCell, commandList) { char *command = (char *) lfirst(commandCell); SendCommandToWorkers(WORKERS_WITH_METADATA, command); } } citus-7.0.3/src/backend/distributed/planner/000077500000000000000000000000001317107136600210145ustar00rootroot00000000000000citus-7.0.3/src/backend/distributed/planner/deparse_shard_query.c000066400000000000000000000210341317107136600252110ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * deparse_shard_query.c * * This file contains functions for deparsing shard queries. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "c.h" #include "access/heapam.h" #include "distributed/citus_nodefuncs.h" #include "distributed/citus_ruleutils.h" #include "distributed/deparse_shard_query.h" #include "distributed/insert_select_planner.h" #include "distributed/metadata_cache.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_router_planner.h" #include "lib/stringinfo.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "nodes/nodes.h" #include "nodes/parsenodes.h" #include "nodes/pg_list.h" #include "parser/parsetree.h" #include "storage/lock.h" #include "utils/lsyscache.h" #include "utils/rel.h" static void UpdateTaskQueryString(Query *query, Oid distributedTableId, RangeTblEntry *valuesRTE, Task *task); static void ConvertRteToSubqueryWithEmptyResult(RangeTblEntry *rte); /* * RebuildQueryStrings deparses the job query for each task to * include execution-time changes such as function evaluation. */ void RebuildQueryStrings(Query *originalQuery, List *taskList) { ListCell *taskCell = NULL; Oid relationId = ((RangeTblEntry *) linitial(originalQuery->rtable))->relid; RangeTblEntry *valuesRTE = ExtractDistributedInsertValuesRTE(originalQuery); foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); Query *query = originalQuery; if (task->insertSelectQuery) { /* for INSERT..SELECT, adjust shard names in SELECT part */ RangeTblEntry *copiedInsertRte = NULL; RangeTblEntry *copiedSubqueryRte = NULL; Query *copiedSubquery = NULL; List *relationShardList = task->relationShardList; ShardInterval *shardInterval = LoadShardInterval(task->anchorShardId); query = copyObject(originalQuery); copiedInsertRte = ExtractInsertRangeTableEntry(query); copiedSubqueryRte = ExtractSelectRangeTableEntry(query); copiedSubquery = copiedSubqueryRte->subquery; AddShardIntervalRestrictionToSelect(copiedSubquery, shardInterval); ReorderInsertSelectTargetLists(query, copiedInsertRte, copiedSubqueryRte); /* setting an alias simplifies deparsing of RETURNING */ if (copiedInsertRte->alias == NULL) { Alias *alias = makeAlias(CITUS_TABLE_ALIAS, NIL); copiedInsertRte->alias = alias; } UpdateRelationToShardNames((Node *) copiedSubquery, relationShardList); } else if (task->upsertQuery || valuesRTE != NULL) { RangeTblEntry *rangeTableEntry = NULL; /* * Always an alias in UPSERTs and multi-row INSERTs to avoid * deparsing issues (e.g. RETURNING might reference the original * table name, which has been replaced by a shard name). */ rangeTableEntry = linitial(query->rtable); if (rangeTableEntry->alias == NULL) { Alias *alias = makeAlias(CITUS_TABLE_ALIAS, NIL); rangeTableEntry->alias = alias; } } ereport(DEBUG4, (errmsg("query before rebuilding: %s", task->queryString))); UpdateTaskQueryString(query, relationId, valuesRTE, task); ereport(DEBUG4, (errmsg("query after rebuilding: %s", task->queryString))); } } /* * UpdateTaskQueryString updates the query string stored within the provided * Task. If the Task has row values from a multi-row INSERT, those are injected * into the provided query (using the provided valuesRTE, which must belong to * the query) before deparse occurs (the query's full VALUES list will be * restored before this function returns). */ static void UpdateTaskQueryString(Query *query, Oid distributedTableId, RangeTblEntry *valuesRTE, Task *task) { StringInfo queryString = makeStringInfo(); List *oldValuesLists = NIL; if (valuesRTE != NULL) { Assert(valuesRTE->rtekind == RTE_VALUES); Assert(task->rowValuesLists != NULL); oldValuesLists = valuesRTE->values_lists; valuesRTE->values_lists = task->rowValuesLists; } /* * For INSERT queries, we only have one relation to update, so we can * use deparse_shard_query(). For UPDATE and DELETE queries, we may have * subqueries and joins, so we use relation shard list to update shard * names and call pg_get_query_def() directly. */ if (query->commandType == CMD_INSERT) { deparse_shard_query(query, distributedTableId, task->anchorShardId, queryString); } else { List *relationShardList = task->relationShardList; UpdateRelationToShardNames((Node *) query, relationShardList); pg_get_query_def(query, queryString); } if (valuesRTE != NULL) { valuesRTE->values_lists = oldValuesLists; } task->queryString = queryString->data; } /* * UpdateRelationToShardNames walks over the query tree and appends shard ids to * relations. It uses unique identity value to establish connection between a * shard and the range table entry. If the range table id is not given a * identity, than the relation is not referenced from the query, no connection * could be found between a shard and this relation. Therefore relation is replaced * by set of NULL values so that the query would work at worker without any problems. * */ bool UpdateRelationToShardNames(Node *node, List *relationShardList) { RangeTblEntry *newRte = NULL; uint64 shardId = INVALID_SHARD_ID; Oid relationId = InvalidOid; Oid schemaId = InvalidOid; char *relationName = NULL; char *schemaName = NULL; bool replaceRteWithNullValues = false; ListCell *relationShardCell = NULL; RelationShard *relationShard = NULL; if (node == NULL) { return false; } /* want to look at all RTEs, even in subqueries, CTEs and such */ if (IsA(node, Query)) { return query_tree_walker((Query *) node, UpdateRelationToShardNames, relationShardList, QTW_EXAMINE_RTES); } if (!IsA(node, RangeTblEntry)) { return expression_tree_walker(node, UpdateRelationToShardNames, relationShardList); } newRte = (RangeTblEntry *) node; if (newRte->rtekind != RTE_RELATION) { return false; } /* * Search for the restrictions associated with the RTE. There better be * some, otherwise this query wouldn't be elegible as a router query. * * FIXME: We should probably use a hashtable here, to do efficient * lookup. */ foreach(relationShardCell, relationShardList) { relationShard = (RelationShard *) lfirst(relationShardCell); if (newRte->relid == relationShard->relationId) { break; } relationShard = NULL; } replaceRteWithNullValues = relationShard == NULL || relationShard->shardId == INVALID_SHARD_ID; if (replaceRteWithNullValues) { ConvertRteToSubqueryWithEmptyResult(newRte); return false; } shardId = relationShard->shardId; relationId = relationShard->relationId; relationName = get_rel_name(relationId); AppendShardIdToName(&relationName, shardId); schemaId = get_rel_namespace(relationId); schemaName = get_namespace_name(schemaId); ModifyRangeTblExtraData(newRte, CITUS_RTE_SHARD, schemaName, relationName, NIL); return false; } /* * ConvertRteToSubqueryWithEmptyResult converts given relation RTE into * subquery RTE that returns no results. */ static void ConvertRteToSubqueryWithEmptyResult(RangeTblEntry *rte) { Relation relation = heap_open(rte->relid, NoLock); TupleDesc tupleDescriptor = RelationGetDescr(relation); int columnCount = tupleDescriptor->natts; int columnIndex = 0; Query *subquery = NULL; List *targetList = NIL; FromExpr *joinTree = NULL; for (columnIndex = 0; columnIndex < columnCount; columnIndex++) { FormData_pg_attribute *attributeForm = tupleDescriptor->attrs[columnIndex]; TargetEntry *targetEntry = NULL; StringInfo resname = NULL; Const *constValue = NULL; if (attributeForm->attisdropped) { continue; } resname = makeStringInfo(); constValue = makeNullConst(attributeForm->atttypid, attributeForm->atttypmod, attributeForm->attcollation); appendStringInfo(resname, "%s", attributeForm->attname.data); targetEntry = makeNode(TargetEntry); targetEntry->expr = (Expr *) constValue; targetEntry->resno = columnIndex; targetEntry->resname = resname->data; targetList = lappend(targetList, targetEntry); } heap_close(relation, NoLock); joinTree = makeNode(FromExpr); joinTree->quals = makeBoolConst(false, false); subquery = makeNode(Query); subquery->commandType = CMD_SELECT; subquery->querySource = QSRC_ORIGINAL; subquery->canSetTag = true; subquery->targetList = targetList; subquery->jointree = joinTree; rte->rtekind = RTE_SUBQUERY; rte->subquery = subquery; rte->alias = copyObject(rte->eref); } citus-7.0.3/src/backend/distributed/planner/insert_select_planner.c000066400000000000000000001206751317107136600255550ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * insert_select_planner.c * * Planning logic for INSERT..SELECT. * * Copyright (c) 2017, Citus Data, Inc. *------------------------------------------------------------------------- */ #include "postgres.h" #include "catalog/pg_class.h" #include "distributed/citus_clauses.h" #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" #include "distributed/errormessage.h" #include "distributed/insert_select_planner.h" #include "distributed/metadata_cache.h" #include "distributed/multi_executor.h" #include "distributed/multi_logical_planner.h" #include "distributed/multi_logical_optimizer.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_router_planner.h" #include "distributed/pg_dist_partition.h" #include "distributed/resource_lock.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "nodes/parsenodes.h" #include "optimizer/clauses.h" #include "optimizer/planner.h" #include "optimizer/restrictinfo.h" #include "optimizer/var.h" #include "parser/parsetree.h" #include "parser/parse_coerce.h" #include "parser/parse_relation.h" #include "utils/lsyscache.h" static MultiPlan * CreateDistributedInsertSelectPlan(Query *originalQuery, PlannerRestrictionContext * plannerRestrictionContext); static bool SafeToPushDownSubquery(PlannerRestrictionContext *plannerRestrictionContext, Query *originalQuery); static Task * RouterModifyTaskForShardInterval(Query *originalQuery, ShardInterval *shardInterval, RelationRestrictionContext * restrictionContext, uint32 taskIdIndex, bool allRelationsJoinedOnPartitionKey); static DeferredErrorMessage * DistributedInsertSelectSupported(Query *queryTree, RangeTblEntry *insertRte, RangeTblEntry *subqueryRte, bool allReferenceTables); static DeferredErrorMessage * MultiTaskRouterSelectQuerySupported(Query *query); static DeferredErrorMessage * InsertPartitionColumnMatchesSelect(Query *query, RangeTblEntry *insertRte, RangeTblEntry * subqueryRte, Oid * selectPartitionColumnTableId); static MultiPlan * CreateCoordinatorInsertSelectPlan(Query *parse); static DeferredErrorMessage * CoordinatorInsertSelectSupported(Query *insertSelectQuery); static Query * WrapSubquery(Query *subquery); static void CastSelectTargetList(List *selectTargetList, Oid targetRelationId, List *insertTargetList); /* * InsertSelectIntoDistributedTable returns true when the input query is an * INSERT INTO ... SELECT kind of query and the target is a distributed * table. * * Note that the input query should be the original parsetree of * the query (i.e., not passed trough the standard planner). * * This function is inspired from getInsertSelectQuery() on * rewrite/rewriteManip.c. */ bool InsertSelectIntoDistributedTable(Query *query) { CmdType commandType = query->commandType; List *fromList = NULL; RangeTblRef *rangeTableReference = NULL; RangeTblEntry *subqueryRte = NULL; RangeTblEntry *insertRte = NULL; if (commandType != CMD_INSERT) { return false; } if (query->jointree == NULL || !IsA(query->jointree, FromExpr)) { return false; } fromList = query->jointree->fromlist; if (list_length(fromList) != 1) { return false; } rangeTableReference = linitial(fromList); if (!IsA(rangeTableReference, RangeTblRef)) { return false; } subqueryRte = rt_fetch(rangeTableReference->rtindex, query->rtable); if (subqueryRte->rtekind != RTE_SUBQUERY) { return false; } /* ensure that there is a query */ Assert(IsA(subqueryRte->subquery, Query)); insertRte = ExtractInsertRangeTableEntry(query); if (!IsDistributedTable(insertRte->relid)) { return false; } return true; } /* * CreateInsertSelectPlan tries to create a distributed plan for an * INSERT INTO distributed_table SELECT ... query by push down the * command to the workers and if that is not possible it creates a * plan for evaluating the SELECT on the coordinator. */ MultiPlan * CreateInsertSelectPlan(Query *originalQuery, PlannerRestrictionContext *plannerRestrictionContext) { MultiPlan *distributedPlan = NULL; distributedPlan = CreateDistributedInsertSelectPlan(originalQuery, plannerRestrictionContext); if (distributedPlan->planningError != NULL) { RaiseDeferredError(distributedPlan->planningError, DEBUG1); /* if INSERT..SELECT cannot be distributed, pull to coordinator */ distributedPlan = CreateCoordinatorInsertSelectPlan(originalQuery); } return distributedPlan; } /* * CreateDistributedInsertSelectPlan Creates a MultiPlan for distributed * INSERT ... SELECT queries which could consists of multiple tasks. * * The function never returns NULL, it errors out if cannot create the MultiPlan. */ static MultiPlan * CreateDistributedInsertSelectPlan(Query *originalQuery, PlannerRestrictionContext *plannerRestrictionContext) { int shardOffset = 0; List *sqlTaskList = NIL; uint32 taskIdIndex = 1; /* 0 is reserved for invalid taskId */ Job *workerJob = NULL; uint64 jobId = INVALID_JOB_ID; MultiPlan *multiPlan = CitusMakeNode(MultiPlan); RangeTblEntry *insertRte = ExtractInsertRangeTableEntry(originalQuery); RangeTblEntry *subqueryRte = ExtractSelectRangeTableEntry(originalQuery); Oid targetRelationId = insertRte->relid; DistTableCacheEntry *targetCacheEntry = DistributedTableCacheEntry(targetRelationId); int shardCount = targetCacheEntry->shardIntervalArrayLength; RelationRestrictionContext *relationRestrictionContext = plannerRestrictionContext->relationRestrictionContext; bool allReferenceTables = relationRestrictionContext->allReferenceTables; bool safeToPushDownSubquery = false; multiPlan->operation = originalQuery->commandType; /* * Error semantics for INSERT ... SELECT queries are different than regular * modify queries. Thus, handle separately. */ multiPlan->planningError = DistributedInsertSelectSupported(originalQuery, insertRte, subqueryRte, allReferenceTables); if (multiPlan->planningError) { return multiPlan; } safeToPushDownSubquery = SafeToPushDownSubquery(plannerRestrictionContext, originalQuery); /* * Plan select query for each shard in the target table. Do so by replacing the * partitioning qual parameter added in multi_planner() using the current shard's * actual boundary values. Also, add the current shard's boundary values to the * top level subquery to ensure that even if the partitioning qual is not distributed * to all the tables, we never run the queries on the shards that don't match with * the current shard boundaries. Finally, perform the normal shard pruning to * decide on whether to push the query to the current shard or not. */ for (shardOffset = 0; shardOffset < shardCount; shardOffset++) { ShardInterval *targetShardInterval = targetCacheEntry->sortedShardIntervalArray[shardOffset]; Task *modifyTask = NULL; modifyTask = RouterModifyTaskForShardInterval(originalQuery, targetShardInterval, relationRestrictionContext, taskIdIndex, safeToPushDownSubquery); /* add the task if it could be created */ if (modifyTask != NULL) { modifyTask->insertSelectQuery = true; sqlTaskList = lappend(sqlTaskList, modifyTask); } ++taskIdIndex; } if (MultiTaskQueryLogLevel != MULTI_TASK_QUERY_INFO_OFF && list_length(sqlTaskList) > 1) { ereport(MultiTaskQueryLogLevel, (errmsg("multi-task query about to be executed"), errhint("Queries are split to multiple tasks " "if they have to be split into several" " queries on the workers."))); } /* Create the worker job */ workerJob = CitusMakeNode(Job); workerJob->taskList = sqlTaskList; workerJob->subqueryPushdown = false; workerJob->dependedJobList = NIL; workerJob->jobId = jobId; workerJob->jobQuery = originalQuery; workerJob->requiresMasterEvaluation = RequiresMasterEvaluation(originalQuery); /* and finally the multi plan */ multiPlan->workerJob = workerJob; multiPlan->masterQuery = NULL; multiPlan->routerExecutable = true; multiPlan->hasReturning = false; if (list_length(originalQuery->returningList) > 0) { multiPlan->hasReturning = true; } return multiPlan; } /* * DistributedInsertSelectSupported returns NULL if the INSERT ... SELECT query * is supported, or a description why not. */ static DeferredErrorMessage * DistributedInsertSelectSupported(Query *queryTree, RangeTblEntry *insertRte, RangeTblEntry *subqueryRte, bool allReferenceTables) { Query *subquery = NULL; Oid selectPartitionColumnTableId = InvalidOid; Oid targetRelationId = insertRte->relid; char targetPartitionMethod = PartitionMethod(targetRelationId); ListCell *rangeTableCell = NULL; DeferredErrorMessage *error = NULL; /* we only do this check for INSERT ... SELECT queries */ AssertArg(InsertSelectIntoDistributedTable(queryTree)); subquery = subqueryRte->subquery; if (!NeedsDistributedPlanning(subquery)) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "distributed INSERT ... SELECT can only select from " "distributed tables", NULL, NULL); } if (GetLocalGroupId() != 0) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "distributed INSERT ... SELECT can only be performed from " "the coordinator", NULL, NULL); } /* we do not expect to see a view in modify target */ foreach(rangeTableCell, queryTree->rtable) { RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell); if (rangeTableEntry->rtekind == RTE_RELATION && rangeTableEntry->relkind == RELKIND_VIEW) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot insert into view over distributed table", NULL, NULL); } } if (contain_volatile_functions((Node *) queryTree)) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "volatile functions are not allowed in distributed " "INSERT ... SELECT queries", NULL, NULL); } /* we don't support LIMIT, OFFSET and WINDOW functions */ error = MultiTaskRouterSelectQuerySupported(subquery); if (error) { return error; } /* * If we're inserting into a reference table, all participating tables * should be reference tables as well. */ if (targetPartitionMethod == DISTRIBUTE_BY_NONE) { if (!allReferenceTables) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "only reference tables may be queried when targeting " "a reference table with distributed INSERT ... SELECT", NULL, NULL); } } else { DeferredErrorMessage *error = NULL; /* ensure that INSERT's partition column comes from SELECT's partition column */ error = InsertPartitionColumnMatchesSelect(queryTree, insertRte, subqueryRte, &selectPartitionColumnTableId); if (error) { return error; } /* * We expect partition column values come from colocated tables. Note that we * skip this check from the reference table case given that all reference tables * are already (and by default) co-located. */ if (!TablesColocated(insertRte->relid, selectPartitionColumnTableId)) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "INSERT target table and the source relation of the SELECT partition " "column value must be colocated in distributed INSERT ... SELECT", NULL, NULL); } } return NULL; } /* * SafeToPushDownSubquery returns true if either * (i) there exists join in the query and all relations joined on their * partition keys * (ii) there exists only union set operations and all relations has * partition keys in the same ordinal position in the query */ static bool SafeToPushDownSubquery(PlannerRestrictionContext *plannerRestrictionContext, Query *originalQuery) { RelationRestrictionContext *relationRestrictionContext = plannerRestrictionContext->relationRestrictionContext; bool restrictionEquivalenceForPartitionKeys = RestrictionEquivalenceForPartitionKeys(plannerRestrictionContext); if (restrictionEquivalenceForPartitionKeys) { return true; } if (ContainsUnionSubquery(originalQuery)) { return SafeToPushdownUnionSubquery(relationRestrictionContext); } return false; } /* * RouterModifyTaskForShardInterval creates a modify task by * replacing the partitioning qual parameter added in multi_planner() * with the shardInterval's boundary value. Then perform the normal * shard pruning on the subquery. Finally, checks if the target shardInterval * has exactly same placements with the select task's available anchor * placements. * * The function errors out if the subquery is not router select query (i.e., * subqueries with non equi-joins.). */ static Task * RouterModifyTaskForShardInterval(Query *originalQuery, ShardInterval *shardInterval, RelationRestrictionContext *restrictionContext, uint32 taskIdIndex, bool safeToPushdownSubquery) { Query *copiedQuery = copyObject(originalQuery); RangeTblEntry *copiedInsertRte = ExtractInsertRangeTableEntry(copiedQuery); RangeTblEntry *copiedSubqueryRte = ExtractSelectRangeTableEntry(copiedQuery); Query *copiedSubquery = (Query *) copiedSubqueryRte->subquery; uint64 shardId = shardInterval->shardId; Oid distributedTableId = shardInterval->relationId; DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(distributedTableId); RelationRestrictionContext *copiedRestrictionContext = CopyRelationRestrictionContext(restrictionContext); StringInfo queryString = makeStringInfo(); ListCell *restrictionCell = NULL; Task *modifyTask = NULL; List *selectPlacementList = NIL; uint64 selectAnchorShardId = INVALID_SHARD_ID; List *relationShardList = NIL; uint64 jobId = INVALID_JOB_ID; List *insertShardPlacementList = NULL; List *intersectedPlacementList = NULL; bool upsertQuery = false; bool replacePrunedQueryWithDummy = false; bool allReferenceTables = restrictionContext->allReferenceTables; List *shardOpExpressions = NIL; RestrictInfo *shardRestrictionList = NULL; DeferredErrorMessage *planningError = NULL; /* grab shared metadata lock to stop concurrent placement additions */ LockShardDistributionMetadata(shardId, ShareLock); /* * Replace the partitioning qual parameter value in all baserestrictinfos. * Note that this has to be done on a copy, as the walker modifies in place. */ foreach(restrictionCell, copiedRestrictionContext->relationRestrictionList) { RelationRestriction *restriction = lfirst(restrictionCell); List *originalBaseRestrictInfo = restriction->relOptInfo->baserestrictinfo; List *extendedBaseRestrictInfo = originalBaseRestrictInfo; Index rteIndex = restriction->index; if (!safeToPushdownSubquery || allReferenceTables) { continue; } shardOpExpressions = ShardIntervalOpExpressions(shardInterval, rteIndex); /* means it is a reference table and do not add any shard interval information */ if (shardOpExpressions == NIL) { continue; } shardRestrictionList = make_simple_restrictinfo((Expr *) shardOpExpressions); extendedBaseRestrictInfo = lappend(extendedBaseRestrictInfo, shardRestrictionList); restriction->relOptInfo->baserestrictinfo = extendedBaseRestrictInfo; } /* * We also need to add shard interval range to the subquery in case * the partition qual not distributed all tables such as some * subqueries in WHERE clause. * * Note that we need to add the ranges before the shard pruning to * prevent shard pruning logic (i.e, namely UpdateRelationNames()) * modifies range table entries, which makes hard to add the quals. */ if (!allReferenceTables) { AddShardIntervalRestrictionToSelect(copiedSubquery, shardInterval); } /* mark that we don't want the router planner to generate dummy hosts/queries */ replacePrunedQueryWithDummy = false; /* * Use router planner to decide on whether we can push down the query or not. * If we can, we also rely on the side-effects that all RTEs have been updated * to point to the relevant nodes and selectPlacementList is determined. */ planningError = PlanRouterQuery(copiedSubquery, copiedRestrictionContext, &selectPlacementList, &selectAnchorShardId, &relationShardList, replacePrunedQueryWithDummy); if (planningError) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot perform distributed planning for the given " "modification"), errdetail("Select query cannot be pushed down to the worker."))); } /* ensure that we do not send queries where select is pruned away completely */ if (list_length(selectPlacementList) == 0) { ereport(DEBUG2, (errmsg("Skipping target shard interval %ld since " "SELECT query for it pruned away", shardId))); return NULL; } /* get the placements for insert target shard and its intersection with select */ insertShardPlacementList = FinalizedShardPlacementList(shardId); intersectedPlacementList = IntersectPlacementList(insertShardPlacementList, selectPlacementList); /* * If insert target does not have exactly the same placements with the select, * we sholdn't run the query. */ if (list_length(insertShardPlacementList) != list_length(intersectedPlacementList)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot perform distributed planning for the given " "modification"), errdetail("Insert query cannot be executed on all placements " "for shard %ld", shardId))); } /* this is required for correct deparsing of the query */ ReorderInsertSelectTargetLists(copiedQuery, copiedInsertRte, copiedSubqueryRte); /* set the upsert flag */ if (originalQuery->onConflict != NULL) { upsertQuery = true; } /* setting an alias simplifies deparsing of RETURNING */ if (copiedInsertRte->alias == NULL) { Alias *alias = makeAlias(CITUS_TABLE_ALIAS, NIL); copiedInsertRte->alias = alias; } /* and generate the full query string */ deparse_shard_query(copiedQuery, distributedTableId, shardInterval->shardId, queryString); ereport(DEBUG2, (errmsg("distributed statement: %s", queryString->data))); modifyTask = CreateBasicTask(jobId, taskIdIndex, MODIFY_TASK, queryString->data); modifyTask->dependedTaskList = NULL; modifyTask->anchorShardId = shardId; modifyTask->taskPlacementList = insertShardPlacementList; modifyTask->upsertQuery = upsertQuery; modifyTask->relationShardList = relationShardList; modifyTask->replicationModel = cacheEntry->replicationModel; return modifyTask; } /* * ReorderInsertSelectTargetLists reorders the target lists of INSERT/SELECT * query which is required for deparsing purposes. The reordered query is returned. * * The necessity for this function comes from the fact that ruleutils.c is not supposed * to be used on "rewritten" queries (i.e. ones that have been passed through * QueryRewrite()). Query rewriting is the process in which views and such are expanded, * and, INSERT/UPDATE targetlists are reordered to match the physical order, * defaults etc. For the details of reordeing, see transformInsertRow() and * rewriteTargetListIU(). */ Query * ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte, RangeTblEntry *subqueryRte) { Query *subquery = NULL; ListCell *insertTargetEntryCell; List *newSubqueryTargetlist = NIL; List *newInsertTargetlist = NIL; int resno = 1; Index insertTableId = 1; Oid insertRelationId = InvalidOid; int subqueryTargetLength = 0; int targetEntryIndex = 0; AssertArg(InsertSelectIntoDistributedTable(originalQuery)); subquery = subqueryRte->subquery; insertRelationId = insertRte->relid; /* * We implement the following algorithm for the reoderding: * - Iterate over the INSERT target list entries * - If the target entry includes a Var, find the corresponding * SELECT target entry on the original query and update resno * - If the target entry does not include a Var (i.e., defaults * or constants), create new target entry and add that to * SELECT target list * - Create a new INSERT target entry with respect to the new * SELECT target entry created. */ foreach(insertTargetEntryCell, originalQuery->targetList) { TargetEntry *oldInsertTargetEntry = lfirst(insertTargetEntryCell); TargetEntry *newInsertTargetEntry = NULL; Var *newInsertVar = NULL; TargetEntry *newSubqueryTargetEntry = NULL; List *targetVarList = NULL; int targetVarCount = 0; AttrNumber originalAttrNo = get_attnum(insertRelationId, oldInsertTargetEntry->resname); /* see transformInsertRow() for the details */ if (IsA(oldInsertTargetEntry->expr, ArrayRef) || IsA(oldInsertTargetEntry->expr, FieldStore)) { ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg( "cannot plan distributed INSERT INTO ... SELECT query"), errhint("Do not use array references and field stores " "on the INSERT target list."))); } /* * It is safe to pull Var clause and ignore the coercions since that * are already going to be added on the workers implicitly. */ targetVarList = pull_var_clause((Node *) oldInsertTargetEntry->expr, PVC_RECURSE_AGGREGATES); targetVarCount = list_length(targetVarList); /* a single INSERT target entry cannot have more than one Var */ Assert(targetVarCount <= 1); if (targetVarCount == 1) { Var *oldInsertVar = (Var *) linitial(targetVarList); TargetEntry *oldSubqueryTle = list_nth(subquery->targetList, oldInsertVar->varattno - 1); newSubqueryTargetEntry = copyObject(oldSubqueryTle); newSubqueryTargetEntry->resno = resno; newSubqueryTargetlist = lappend(newSubqueryTargetlist, newSubqueryTargetEntry); } else { newSubqueryTargetEntry = makeTargetEntry(oldInsertTargetEntry->expr, resno, oldInsertTargetEntry->resname, oldInsertTargetEntry->resjunk); newSubqueryTargetlist = lappend(newSubqueryTargetlist, newSubqueryTargetEntry); } /* * The newly created select target entry cannot be a junk entry since junk * entries are not in the final target list and we're processing the * final target list entries. */ Assert(!newSubqueryTargetEntry->resjunk); newInsertVar = makeVar(insertTableId, originalAttrNo, exprType((Node *) newSubqueryTargetEntry->expr), exprTypmod((Node *) newSubqueryTargetEntry->expr), exprCollation((Node *) newSubqueryTargetEntry->expr), 0); newInsertTargetEntry = makeTargetEntry((Expr *) newInsertVar, originalAttrNo, oldInsertTargetEntry->resname, oldInsertTargetEntry->resjunk); newInsertTargetlist = lappend(newInsertTargetlist, newInsertTargetEntry); resno++; } /* * if there are any remaining target list entries (i.e., GROUP BY column not on the * target list of subquery), update the remaining resnos. */ subqueryTargetLength = list_length(subquery->targetList); for (; targetEntryIndex < subqueryTargetLength; ++targetEntryIndex) { TargetEntry *oldSubqueryTle = list_nth(subquery->targetList, targetEntryIndex); TargetEntry *newSubqueryTargetEntry = NULL; /* * Skip non-junk entries since we've already processed them above and this * loop only is intended for junk entries. */ if (!oldSubqueryTle->resjunk) { continue; } newSubqueryTargetEntry = copyObject(oldSubqueryTle); newSubqueryTargetEntry->resno = resno; newSubqueryTargetlist = lappend(newSubqueryTargetlist, newSubqueryTargetEntry); resno++; } originalQuery->targetList = newInsertTargetlist; subquery->targetList = newSubqueryTargetlist; return NULL; } /* * MultiTaskRouterSelectQuerySupported returns NULL if the query may be used * as the source for an INSERT ... SELECT or returns a description why not. */ static DeferredErrorMessage * MultiTaskRouterSelectQuerySupported(Query *query) { List *queryList = NIL; ListCell *queryCell = NULL; ExtractQueryWalker((Node *) query, &queryList); foreach(queryCell, queryList) { Query *subquery = (Query *) lfirst(queryCell); Assert(subquery->commandType == CMD_SELECT); /* pushing down rtes without relations yields (shardCount * expectedRows) */ if (subquery->rtable == NIL) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "Subqueries without relations are not allowed in " "distributed INSERT ... SELECT queries", NULL, NULL); } /* pushing down limit per shard would yield wrong results */ if (subquery->limitCount != NULL) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "LIMIT clauses are not allowed in distirbuted INSERT " "... SELECT queries", NULL, NULL); } /* pushing down limit offest per shard would yield wrong results */ if (subquery->limitOffset != NULL) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "OFFSET clauses are not allowed in distributed " "INSERT ... SELECT queries", NULL, NULL); } /* * We could potentially support window clauses where the data is partitioned * over distribution column. For simplicity, we currently do not support window * clauses at all. */ if (subquery->windowClause != NULL) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "window functions are not allowed in distributed " "INSERT ... SELECT queries", NULL, NULL); } if (subquery->setOperations != NULL) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "Set operations are not allowed in distributed " "INSERT ... SELECT queries", NULL, NULL); } /* * We currently do not support grouping sets since it could generate NULL * results even after the restrictions are applied to the query. A solution * would be to add the whole query into a subquery and add the restrictions * on that subquery. */ if (subquery->groupingSets != NULL) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "grouping sets are not allowed in distributed " "INSERT ... SELECT queries", NULL, NULL); } /* * We cannot support DISTINCT ON clauses since it could be on a non-partition column. * In that case, there is no way that Citus can support this. */ if (subquery->hasDistinctOn) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "DISTINCT ON clauses are not allowed in distributed " "INSERT ... SELECT queries", NULL, NULL); } } return NULL; } /* * InsertPartitionColumnMatchesSelect returns NULL the partition column in the * table targeted by INSERTed matches with the any of the SELECTed table's * partition column. Returns the error description if there's no match. * * On return without error (i.e., if partition columns match), the function * also sets selectPartitionColumnTableId. */ static DeferredErrorMessage * InsertPartitionColumnMatchesSelect(Query *query, RangeTblEntry *insertRte, RangeTblEntry *subqueryRte, Oid *selectPartitionColumnTableId) { ListCell *targetEntryCell = NULL; uint32 rangeTableId = 1; Oid insertRelationId = insertRte->relid; Var *insertPartitionColumn = PartitionColumn(insertRelationId, rangeTableId); Query *subquery = subqueryRte->subquery; bool targetTableHasPartitionColumn = false; foreach(targetEntryCell, query->targetList) { TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell); List *insertTargetEntryColumnList = pull_var_clause_default((Node *) targetEntry); Var *insertVar = NULL; AttrNumber originalAttrNo = InvalidAttrNumber; TargetEntry *subqueryTargetEntry = NULL; Expr *selectTargetExpr = NULL; Oid subqueryPartitionColumnRelationId = InvalidOid; Var *subqueryPartitionColumn = NULL; List *parentQueryList = NIL; /* * We only consider target entries that include a single column. Note that this * is slightly different than directly checking the whether the targetEntry->expr * is a var since the var could be wrapped into an implicit/explicit casting. * * Also note that we skip the target entry if it does not contain a Var, which * corresponds to columns with DEFAULT values on the target list. */ if (list_length(insertTargetEntryColumnList) != 1) { continue; } insertVar = (Var *) linitial(insertTargetEntryColumnList); originalAttrNo = targetEntry->resno; /* skip processing of target table non-partition columns */ if (originalAttrNo != insertPartitionColumn->varattno) { continue; } /* INSERT query includes the partition column */ targetTableHasPartitionColumn = true; subqueryTargetEntry = list_nth(subquery->targetList, insertVar->varattno - 1); selectTargetExpr = subqueryTargetEntry->expr; parentQueryList = list_make2(query, subquery); FindReferencedTableColumn(selectTargetExpr, parentQueryList, subquery, &subqueryPartitionColumnRelationId, &subqueryPartitionColumn); /* * Corresponding (i.e., in the same ordinal position as the target table's * partition column) select target entry does not directly belong a table. * Evaluate its expression type and error out properly. */ if (subqueryPartitionColumnRelationId == InvalidOid) { char *errorDetailTemplate = "Subquery contains %s in the " "same position as the target table's " "partition column."; char *exprDescription = ""; switch (selectTargetExpr->type) { case T_Const: { exprDescription = "a constant value"; break; } case T_OpExpr: { exprDescription = "an operator"; break; } case T_FuncExpr: { FuncExpr *subqueryFunctionExpr = (FuncExpr *) selectTargetExpr; switch (subqueryFunctionExpr->funcformat) { case COERCE_EXPLICIT_CALL: { exprDescription = "a function call"; break; } case COERCE_EXPLICIT_CAST: { exprDescription = "an explicit cast"; break; } case COERCE_IMPLICIT_CAST: { exprDescription = "an implicit cast"; break; } default: { exprDescription = "a function call"; break; } } break; } case T_Aggref: { exprDescription = "an aggregation"; break; } case T_CaseExpr: { exprDescription = "a case expression"; break; } case T_CoalesceExpr: { exprDescription = "a coalesce expression"; break; } case T_RowExpr: { exprDescription = "a row expression"; break; } case T_MinMaxExpr: { exprDescription = "a min/max expression"; break; } case T_CoerceViaIO: { exprDescription = "an explicit coercion"; break; } default: { exprDescription = "an expression that is not a simple column reference"; break; } } return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot perform distributed INSERT INTO ... SELECT " "because the partition columns in the source table " "and subquery do not match", psprintf(errorDetailTemplate, exprDescription), "Ensure the target table's partition column has a " "corresponding simple column reference to a distributed " "table's partition column in the subquery."); } /* * Insert target expression could only be non-var if the select target * entry does not have the same type (i.e., target column requires casting). */ if (!IsA(targetEntry->expr, Var)) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot perform distributed INSERT INTO ... SELECT " "because the partition columns in the source table " "and subquery do not match", "The data type of the target table's partition column " "should exactly match the data type of the " "corresponding simple column reference in the subquery.", NULL); } /* finally, check that the select target column is a partition column */ if (!IsPartitionColumn(selectTargetExpr, subquery)) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot perform distributed INSERT INTO ... SELECT " "becuase the partition columns in the source table " "and subquery do not match", "The target table's partition column should correspond " "to a partition column in the subquery.", NULL); } /* finally, check that the select target column is a partition column */ /* we can set the select relation id */ *selectPartitionColumnTableId = subqueryPartitionColumnRelationId; break; } if (!targetTableHasPartitionColumn) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot perform distributed INSERT INTO ... SELECT " "because the partition columns in the source table " "and subquery do not match", "the query doesn't include the target table's " "partition column", NULL); } return NULL; } /* * CreatteCoordinatorInsertSelectPlan creates a query plan for a SELECT into a * distributed table. The query plan can also be executed on a worker in MX. */ static MultiPlan * CreateCoordinatorInsertSelectPlan(Query *parse) { Query *insertSelectQuery = copyObject(parse); Query *selectQuery = NULL; RangeTblEntry *selectRte = ExtractSelectRangeTableEntry(insertSelectQuery); RangeTblEntry *insertRte = ExtractInsertRangeTableEntry(insertSelectQuery); Oid targetRelationId = insertRte->relid; MultiPlan *multiPlan = CitusMakeNode(MultiPlan); multiPlan->operation = CMD_INSERT; multiPlan->planningError = CoordinatorInsertSelectSupported(insertSelectQuery); if (multiPlan->planningError != NULL) { return multiPlan; } selectQuery = selectRte->subquery; /* * Wrap the SELECT as a subquery if the INSERT...SELECT has CTEs or the SELECT * has top-level set operations. * * We could simply wrap all queries, but that might create a subquery that is * not supported by the logical planner. Since the logical planner also does * not support CTEs and top-level set operations, we can wrap queries containing * those without breaking anything. */ if (list_length(insertSelectQuery->cteList) > 0) { selectQuery = WrapSubquery(selectRte->subquery); /* copy CTEs from the INSERT ... SELECT statement into outer SELECT */ selectQuery->cteList = copyObject(insertSelectQuery->cteList); } else if (selectQuery->setOperations != NULL) { /* top-level set operations confuse the ReorderInsertSelectTargetLists logic */ selectQuery = WrapSubquery(selectRte->subquery); } selectRte->subquery = selectQuery; ReorderInsertSelectTargetLists(insertSelectQuery, insertRte, selectRte); /* make sure the SELECT returns the right type for copying into the table */ CastSelectTargetList(selectQuery->targetList, targetRelationId, insertSelectQuery->targetList); multiPlan->insertSelectSubquery = selectQuery; multiPlan->insertTargetList = insertSelectQuery->targetList; multiPlan->targetRelationId = targetRelationId; return multiPlan; } /* * CoordinatorInsertSelectSupported returns an error if executing an * INSERT ... SELECT command by pulling results of the SELECT to the coordinator * is unsupported because it uses RETURNING, ON CONFLICT, or an append-distributed * table. */ static DeferredErrorMessage * CoordinatorInsertSelectSupported(Query *insertSelectQuery) { RangeTblEntry *insertRte = NULL; RangeTblEntry *subqueryRte = NULL; Query *subquery = NULL; if (list_length(insertSelectQuery->returningList) > 0) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "RETURNING is not supported in INSERT ... SELECT via " "coordinator", NULL, NULL); } if (insertSelectQuery->onConflict) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "ON CONFLICT is not supported in INSERT ... SELECT via " "coordinator", NULL, NULL); } insertRte = ExtractInsertRangeTableEntry(insertSelectQuery); if (PartitionMethod(insertRte->relid) == DISTRIBUTE_BY_APPEND) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "INSERT ... SELECT into an append-distributed table is " "not supported", NULL, NULL); } subqueryRte = ExtractSelectRangeTableEntry(insertSelectQuery); subquery = (Query *) subqueryRte->subquery; if (NeedsDistributedPlanning(subquery) && contain_nextval_expression_walker((Node *) insertSelectQuery->targetList, NULL)) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "INSERT ... SELECT cannot generate sequence values when " "selecting from a distributed table", NULL, NULL); } return NULL; } /* * WrapSubquery wraps the given query as a subquery in a newly constructed * "SELECT * FROM (...subquery...) citus_insert_select_subquery" query. */ static Query * WrapSubquery(Query *subquery) { Query *outerQuery = NULL; ParseState *pstate = make_parsestate(NULL); Alias *selectAlias = NULL; RangeTblEntry *newRangeTableEntry = NULL; RangeTblRef *newRangeTableRef = NULL; ListCell *selectTargetCell = NULL; List *newTargetList = NIL; outerQuery = makeNode(Query); outerQuery->commandType = CMD_SELECT; /* create range table entries */ selectAlias = makeAlias("citus_insert_select_subquery", NIL); newRangeTableEntry = addRangeTableEntryForSubquery(pstate, subquery, selectAlias, false, true); outerQuery->rtable = list_make1(newRangeTableEntry); /* set the FROM expression to the subquery */ newRangeTableRef = makeNode(RangeTblRef); newRangeTableRef->rtindex = 1; outerQuery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL); /* create a target list that matches the SELECT */ foreach(selectTargetCell, subquery->targetList) { TargetEntry *selectTargetEntry = (TargetEntry *) lfirst(selectTargetCell); Var *newSelectVar = NULL; TargetEntry *newSelectTargetEntry = NULL; /* exactly 1 entry in FROM */ int indexInRangeTable = 1; if (selectTargetEntry->resjunk) { continue; } newSelectVar = makeVar(indexInRangeTable, selectTargetEntry->resno, exprType((Node *) selectTargetEntry->expr), exprTypmod((Node *) selectTargetEntry->expr), exprCollation((Node *) selectTargetEntry->expr), 0); newSelectTargetEntry = makeTargetEntry((Expr *) newSelectVar, selectTargetEntry->resno, selectTargetEntry->resname, selectTargetEntry->resjunk); newTargetList = lappend(newTargetList, newSelectTargetEntry); } outerQuery->targetList = newTargetList; return outerQuery; } /* * CastSelectTargetList adds casts to the target entries in selectTargetList * to match the type in insertTargetList. This ensures that the results of * the SELECT will have the right type when serialised during COPY. For * example, a float that is inserted into a an int column normally has an * implicit cast, but if we send it through the COPY protocol the serialised * form would contain decimal notation, which is not valid for int. */ static void CastSelectTargetList(List *selectTargetList, Oid targetRelationId, List *insertTargetList) { ListCell *insertTargetCell = NULL; ListCell *selectTargetCell = NULL; /* add casts when the SELECT output does not directly match the table */ forboth(insertTargetCell, insertTargetList, selectTargetCell, selectTargetList) { TargetEntry *insertTargetEntry = (TargetEntry *) lfirst(insertTargetCell); TargetEntry *selectTargetEntry = (TargetEntry *) lfirst(selectTargetCell); Var *columnVar = NULL; Oid columnType = InvalidOid; int32 columnTypeMod = 0; Oid selectOutputType = InvalidOid; /* indirection is not supported, e.g. INSERT INTO table (composite_column.x) */ if (!IsA(insertTargetEntry->expr, Var)) { ereport(ERROR, (errmsg("can only handle regular columns in the target " "list"))); } columnVar = (Var *) insertTargetEntry->expr; columnType = get_atttype(targetRelationId, columnVar->varattno); columnTypeMod = get_atttypmod(targetRelationId, columnVar->varattno); selectOutputType = columnVar->vartype; /* * If the type in the target list does not match the type of the column, * we need to cast to the column type. PostgreSQL would do this * automatically during the insert, but we're passing the SELECT * output directly to COPY. */ if (columnType != selectOutputType) { Expr *selectExpression = selectTargetEntry->expr; Expr *typeCastedSelectExpr = (Expr *) coerce_to_target_type(NULL, (Node *) selectExpression, selectOutputType, columnType, columnTypeMod, COERCION_EXPLICIT, COERCE_IMPLICIT_CAST, -1); selectTargetEntry->expr = typeCastedSelectExpr; } } } citus-7.0.3/src/backend/distributed/planner/multi_explain.c000066400000000000000000000534101317107136600240350ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_explain.c * Citus explain support. * * Copyright (c) 2012-2016, Citus Data, Inc. *------------------------------------------------------------------------- */ #include "postgres.h" #include "libpq-fe.h" #include "miscadmin.h" #include "access/xact.h" #include "catalog/namespace.h" #include "catalog/pg_class.h" #include "commands/copy.h" #include "commands/createas.h" #include "commands/dbcommands.h" #include "commands/explain.h" #include "commands/tablecmds.h" #include "optimizer/cost.h" #include "distributed/citus_nodefuncs.h" #include "distributed/connection_management.h" #include "distributed/insert_select_planner.h" #include "distributed/multi_client_executor.h" #include "distributed/multi_executor.h" #include "distributed/multi_explain.h" #include "distributed/multi_logical_optimizer.h" #include "distributed/multi_logical_planner.h" #include "distributed/multi_master_planner.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_planner.h" #include "distributed/multi_server_executor.h" #include "distributed/remote_commands.h" #include "distributed/placement_connection.h" #include "distributed/worker_protocol.h" #include "lib/stringinfo.h" #include "nodes/plannodes.h" #include "nodes/primnodes.h" #include "nodes/print.h" #include "optimizer/clauses.h" #include "optimizer/planner.h" #include "portability/instr_time.h" #include "tcop/dest.h" #include "tcop/tcopprot.h" #include "tcop/utility.h" #include "utils/builtins.h" #include "utils/json.h" #include "utils/lsyscache.h" #include "utils/snapmgr.h" /* OR-able flags for ExplainXMLTag() (explain.c) */ #define X_OPENING 0 #define X_CLOSING 1 #define X_CLOSE_IMMEDIATE 2 #define X_NOWHITESPACE 4 /* Config variables that enable printing distributed query plans */ bool ExplainDistributedQueries = true; bool ExplainAllTasks = false; /* Result for a single remote EXPLAIN command */ typedef struct RemoteExplainPlan { int placementIndex; List *explainOutputList; } RemoteExplainPlan; /* Explain functions for distributed queries */ static void ExplainJob(Job *job, ExplainState *es); static void ExplainMapMergeJob(MapMergeJob *mapMergeJob, ExplainState *es); static void ExplainTaskList(List *taskList, ExplainState *es); static RemoteExplainPlan * RemoteExplain(Task *task, ExplainState *es); static void ExplainTask(Task *task, int placementIndex, List *explainOutputList, ExplainState *es); static void ExplainTaskPlacement(ShardPlacement *taskPlacement, List *explainOutputList, ExplainState *es); static StringInfo BuildRemoteExplainQuery(char *queryString, ExplainState *es); /* Static Explain functions copied from explain.c */ #if (PG_VERSION_NUM >= 100000) static void ExplainOneQuery(Query *query, int cursorOptions, IntoClause *into, ExplainState *es, const char *queryString, ParamListInfo params, QueryEnvironment *queryEnv); #else static void ExplainOneQuery(Query *query, IntoClause *into, ExplainState *es, const char *queryString, ParamListInfo params); #endif static void ExplainOpenGroup(const char *objtype, const char *labelname, bool labeled, ExplainState *es); static void ExplainCloseGroup(const char *objtype, const char *labelname, bool labeled, ExplainState *es); static void ExplainXMLTag(const char *tagname, int flags, ExplainState *es); static void ExplainJSONLineEnding(ExplainState *es); static void ExplainYAMLLineStarting(ExplainState *es); /* * CitusExplainScan is a custom scan explain callback function which is used to * print explain information of a Citus plan which includes both master and * distributed plan. */ void CitusExplainScan(CustomScanState *node, List *ancestors, struct ExplainState *es) { CitusScanState *scanState = (CitusScanState *) node; MultiPlan *multiPlan = scanState->multiPlan; if (!ExplainDistributedQueries) { appendStringInfoSpaces(es->str, es->indent * 2); appendStringInfo(es->str, "explain statements for distributed queries "); appendStringInfo(es->str, "are not enabled\n"); return; } ExplainOpenGroup("Distributed Query", "Distributed Query", true, es); ExplainJob(multiPlan->workerJob, es); ExplainCloseGroup("Distributed Query", "Distributed Query", true, es); } /* * CoordinatorInsertSelectExplainScan is a custom scan explain callback function * which is used to print explain information of a Citus plan for an INSERT INTO * distributed_table SELECT ... query that is evaluated on the coordinator. */ void CoordinatorInsertSelectExplainScan(CustomScanState *node, List *ancestors, struct ExplainState *es) { CitusScanState *scanState = (CitusScanState *) node; MultiPlan *multiPlan = scanState->multiPlan; Query *query = multiPlan->insertSelectSubquery; IntoClause *into = NULL; ParamListInfo params = NULL; char *queryString = NULL; if (es->analyze) { /* avoiding double execution here is tricky, error out for now */ ereport(ERROR, (errmsg("EXPLAIN ANALYZE is currently not supported for INSERT " "... SELECT commands via the coordinator"))); } ExplainOpenGroup("Select Query", "Select Query", false, es); /* explain the inner SELECT query */ #if (PG_VERSION_NUM >= 100000) ExplainOneQuery(query, 0, into, es, queryString, params, NULL); #else ExplainOneQuery(query, into, es, queryString, params); #endif ExplainCloseGroup("Select Query", "Select Query", false, es); } /* * ExplainJob shows the EXPLAIN output for a Job in the physical plan of * a distributed query by showing the remote EXPLAIN for the first task, * or all tasks if citus.explain_all_tasks is on. */ static void ExplainJob(Job *job, ExplainState *es) { List *dependedJobList = job->dependedJobList; int dependedJobCount = list_length(dependedJobList); ListCell *dependedJobCell = NULL; List *taskList = job->taskList; int taskCount = list_length(taskList); ExplainOpenGroup("Job", "Job", true, es); ExplainPropertyInteger("Task Count", taskCount, es); if (dependedJobCount > 0) { ExplainPropertyText("Tasks Shown", "None, not supported for re-partition " "queries", es); } else if (ExplainAllTasks || taskCount <= 1) { ExplainPropertyText("Tasks Shown", "All", es); } else { StringInfo tasksShownText = makeStringInfo(); appendStringInfo(tasksShownText, "One of %d", taskCount); ExplainPropertyText("Tasks Shown", tasksShownText->data, es); } /* * We cannot fetch EXPLAIN plans for jobs that have dependencies, since the * intermediate tables have not been created. */ if (dependedJobCount == 0) { ExplainOpenGroup("Tasks", "Tasks", false, es); ExplainTaskList(taskList, es); ExplainCloseGroup("Tasks", "Tasks", false, es); } else { ExplainOpenGroup("Depended Jobs", "Depended Jobs", false, es); /* show explain output for depended jobs, if any */ foreach(dependedJobCell, dependedJobList) { Job *dependedJob = (Job *) lfirst(dependedJobCell); if (CitusIsA(dependedJob, MapMergeJob)) { ExplainMapMergeJob((MapMergeJob *) dependedJob, es); } } ExplainCloseGroup("Depended Jobs", "Depended Jobs", false, es); } ExplainCloseGroup("Job", "Job", true, es); } /* * ExplainMapMergeJob shows a very basic EXPLAIN plan for a MapMergeJob. It does * not yet show the EXPLAIN plan for the individual tasks, because this requires * specific logic for getting the query (which is wrapped in a UDF), and the * queries may use intermediate tables that have not been created. */ static void ExplainMapMergeJob(MapMergeJob *mapMergeJob, ExplainState *es) { List *dependedJobList = mapMergeJob->job.dependedJobList; int dependedJobCount = list_length(dependedJobList); ListCell *dependedJobCell = NULL; int mapTaskCount = list_length(mapMergeJob->mapTaskList); int mergeTaskCount = list_length(mapMergeJob->mergeTaskList); if (es->format == EXPLAIN_FORMAT_TEXT) { appendStringInfoSpaces(es->str, es->indent * 2); appendStringInfo(es->str, "-> MapMergeJob\n"); es->indent += 3; } ExplainOpenGroup("MapMergeJob", NULL, true, es); ExplainPropertyInteger("Map Task Count", mapTaskCount, es); ExplainPropertyInteger("Merge Task Count", mergeTaskCount, es); if (dependedJobCount > 0) { ExplainOpenGroup("Depended Jobs", "Depended Jobs", false, es); foreach(dependedJobCell, dependedJobList) { Job *dependedJob = (Job *) lfirst(dependedJobCell); if (CitusIsA(dependedJob, MapMergeJob)) { ExplainMapMergeJob((MapMergeJob *) dependedJob, es); } } ExplainCloseGroup("Depended Jobs", "Depended Jobs", false, es); } ExplainCloseGroup("MapMergeJob", NULL, true, es); if (es->format == EXPLAIN_FORMAT_TEXT) { es->indent -= 3; } } /* * ExplainTaskList shows the remote EXPLAIN for the first task in taskList, * or all tasks if citus.explain_all_tasks is on. */ static void ExplainTaskList(List *taskList, ExplainState *es) { ListCell *taskCell = NULL; ListCell *remoteExplainCell = NULL; List *remoteExplainList = NIL; foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); RemoteExplainPlan *remoteExplain = NULL; remoteExplain = RemoteExplain(task, es); remoteExplainList = lappend(remoteExplainList, remoteExplain); if (!ExplainAllTasks) { break; } } forboth(taskCell, taskList, remoteExplainCell, remoteExplainList) { Task *task = (Task *) lfirst(taskCell); RemoteExplainPlan *remoteExplain = (RemoteExplainPlan *) lfirst(remoteExplainCell); ExplainTask(task, remoteExplain->placementIndex, remoteExplain->explainOutputList, es); } } /* * RemoteExplain fetches the the remote EXPLAIN output for a single * task. It tries each shard placement until one succeeds or all * failed. */ static RemoteExplainPlan * RemoteExplain(Task *task, ExplainState *es) { StringInfo explainQuery = NULL; List *taskPlacementList = task->taskPlacementList; int placementCount = list_length(taskPlacementList); int placementIndex = 0; RemoteExplainPlan *remotePlan = NULL; remotePlan = (RemoteExplainPlan *) palloc0(sizeof(RemoteExplainPlan)); explainQuery = BuildRemoteExplainQuery(task->queryString, es); /* * Use a coordinated transaction to ensure that we open a transaction block * such that we can set a savepoint. */ BeginOrContinueCoordinatedTransaction(); for (placementIndex = 0; placementIndex < placementCount; placementIndex++) { ShardPlacement *taskPlacement = list_nth(taskPlacementList, placementIndex); MultiConnection *connection = NULL; PGresult *queryResult = NULL; int connectionFlags = 0; int executeResult = 0; remotePlan->placementIndex = placementIndex; connection = GetPlacementConnection(connectionFlags, taskPlacement, NULL); /* try other placements if we fail to connect this one */ if (PQstatus(connection->pgConn) != CONNECTION_OK) { continue; } RemoteTransactionBeginIfNecessary(connection); /* * Start a savepoint for the explain query. After running the explain * query, we will rollback to this savepoint. This saves us from side * effects of EXPLAIN ANALYZE on DML queries. */ ExecuteCriticalRemoteCommand(connection, "SAVEPOINT citus_explain_savepoint"); /* run explain query */ executeResult = ExecuteOptionalRemoteCommand(connection, explainQuery->data, &queryResult); if (executeResult != 0) { PQclear(queryResult); ForgetResults(connection); continue; } /* read explain query results */ remotePlan->explainOutputList = ReadFirstColumnAsText(queryResult); PQclear(queryResult); ForgetResults(connection); /* rollback to the savepoint */ ExecuteCriticalRemoteCommand(connection, "ROLLBACK TO SAVEPOINT citus_explain_savepoint"); if (remotePlan->explainOutputList != NIL) { break; } } return remotePlan; } /* * ExplainTask shows the EXPLAIN output for an single task. The output has been * fetched from the placement at index placementIndex. If explainOutputList is NIL, * then the EXPLAIN output could not be fetched from any placement. */ static void ExplainTask(Task *task, int placementIndex, List *explainOutputList, ExplainState *es) { ExplainOpenGroup("Task", NULL, true, es); if (es->format == EXPLAIN_FORMAT_TEXT) { appendStringInfoSpaces(es->str, es->indent * 2); appendStringInfo(es->str, "-> Task\n"); es->indent += 3; } if (explainOutputList != NIL) { List *taskPlacementList = task->taskPlacementList; ShardPlacement *taskPlacement = list_nth(taskPlacementList, placementIndex); ExplainTaskPlacement(taskPlacement, explainOutputList, es); } else { ExplainPropertyText("Error", "Could not get remote plan.", es); } ExplainCloseGroup("Task", NULL, true, es); if (es->format == EXPLAIN_FORMAT_TEXT) { es->indent -= 3; } } /* * ExplainTaskPlacement shows the EXPLAIN output for an individual task placement. * It corrects the indentation of the remote explain output to match the local * output. */ static void ExplainTaskPlacement(ShardPlacement *taskPlacement, List *explainOutputList, ExplainState *es) { int savedIndentation = es->indent; StringInfo nodeAddress = makeStringInfo(); char *nodeName = taskPlacement->nodeName; uint32 nodePort = taskPlacement->nodePort; char *nodeDatabase = get_database_name(MyDatabaseId); ListCell *explainOutputCell = NULL; int rowIndex = 0; appendStringInfo(nodeAddress, "host=%s port=%d dbname=%s", nodeName, nodePort, nodeDatabase); ExplainPropertyText("Node", nodeAddress->data, es); ExplainOpenGroup("Remote Plan", "Remote Plan", false, es); if (es->format == EXPLAIN_FORMAT_JSON || es->format == EXPLAIN_FORMAT_YAML) { /* prevent appending the remote EXPLAIN on the same line */ appendStringInfoChar(es->str, '\n'); } foreach(explainOutputCell, explainOutputList) { StringInfo rowString = (StringInfo) lfirst(explainOutputCell); int rowLength = 0; char *lineStart = NULL; rowLength = strlen(rowString->data); lineStart = rowString->data; /* parse the lines in the remote EXPLAIN for proper indentation */ while (lineStart < rowString->data + rowLength) { /* find the end-of-line */ char *lineEnd = strchr(lineStart, '\n'); if (lineEnd == NULL) { /* no end-of-line, use end of row string instead */ lineEnd = rowString->data + rowLength; } /* convert line to a separate string */ *lineEnd = '\0'; /* indentation that is applied to all lines */ appendStringInfoSpaces(es->str, es->indent * 2); if (es->format == EXPLAIN_FORMAT_TEXT && rowIndex == 0) { /* indent the first line of the remote plan with an arrow */ appendStringInfoString(es->str, "-> "); es->indent += 2; } /* show line in the output */ appendStringInfo(es->str, "%s\n", lineStart); /* continue at the start of the next line */ lineStart = lineEnd + 1; } rowIndex++; } ExplainCloseGroup("Remote Plan", "Remote Plan", false, es); if (es->format == EXPLAIN_FORMAT_TEXT) { es->indent = savedIndentation; } } /* * BuildRemoteExplainQuery returns an EXPLAIN query string * to run on a worker node which explicitly contains all * the options in the explain state. */ static StringInfo BuildRemoteExplainQuery(char *queryString, ExplainState *es) { StringInfo explainQuery = makeStringInfo(); char *formatStr = NULL; switch (es->format) { case EXPLAIN_FORMAT_XML: { formatStr = "XML"; break; } case EXPLAIN_FORMAT_JSON: { formatStr = "JSON"; break; } case EXPLAIN_FORMAT_YAML: { formatStr = "YAML"; break; } default: { formatStr = "TEXT"; break; } } appendStringInfo(explainQuery, "EXPLAIN (ANALYZE %s, VERBOSE %s, " "COSTS %s, BUFFERS %s, TIMING %s, " "FORMAT %s) %s", es->analyze ? "TRUE" : "FALSE", es->verbose ? "TRUE" : "FALSE", es->costs ? "TRUE" : "FALSE", es->buffers ? "TRUE" : "FALSE", es->timing ? "TRUE" : "FALSE", formatStr, queryString); return explainQuery; } /* below are private functions copied from explain.c */ /* *INDENT-OFF* */ /* * ExplainOneQuery - * print out the execution plan for one Query * * "into" is NULL unless we are explaining the contents of a CreateTableAsStmt. */ static void #if (PG_VERSION_NUM >= 100000) ExplainOneQuery(Query *query, int cursorOptions, IntoClause *into, ExplainState *es, const char *queryString, ParamListInfo params, QueryEnvironment *queryEnv) #else ExplainOneQuery(Query *query, IntoClause *into, ExplainState *es, const char *queryString, ParamListInfo params) #endif { /* if an advisor plugin is present, let it manage things */ if (ExplainOneQuery_hook) #if (PG_VERSION_NUM >= 100000) (*ExplainOneQuery_hook) (query, cursorOptions, into, es, queryString, params); #else (*ExplainOneQuery_hook) (query, into, es, queryString, params); #endif else { PlannedStmt *plan; instr_time planstart, planduration; INSTR_TIME_SET_CURRENT(planstart); /* plan the query */ #if (PG_VERSION_NUM >= 100000) plan = pg_plan_query(query, cursorOptions, params); #else plan = pg_plan_query(query, into ? 0 : CURSOR_OPT_PARALLEL_OK, params); #endif INSTR_TIME_SET_CURRENT(planduration); INSTR_TIME_SUBTRACT(planduration, planstart); /* run it (if needed) and produce output */ #if (PG_VERSION_NUM >= 100000) ExplainOnePlan(plan, into, es, queryString, params, queryEnv, &planduration); #else ExplainOnePlan(plan, into, es, queryString, params, &planduration); #endif } } /* * Open a group of related objects. * * objtype is the type of the group object, labelname is its label within * a containing object (if any). * * If labeled is true, the group members will be labeled properties, * while if it's false, they'll be unlabeled objects. */ static void ExplainOpenGroup(const char *objtype, const char *labelname, bool labeled, ExplainState *es) { switch (es->format) { case EXPLAIN_FORMAT_TEXT: /* nothing to do */ break; case EXPLAIN_FORMAT_XML: ExplainXMLTag(objtype, X_OPENING, es); es->indent++; break; case EXPLAIN_FORMAT_JSON: ExplainJSONLineEnding(es); appendStringInfoSpaces(es->str, 2 * es->indent); if (labelname) { escape_json(es->str, labelname); appendStringInfoString(es->str, ": "); } appendStringInfoChar(es->str, labeled ? '{' : '['); /* * In JSON format, the grouping_stack is an integer list. 0 means * we've emitted nothing at this grouping level, 1 means we've * emitted something (and so the next item needs a comma). See * ExplainJSONLineEnding(). */ es->grouping_stack = lcons_int(0, es->grouping_stack); es->indent++; break; case EXPLAIN_FORMAT_YAML: /* * In YAML format, the grouping stack is an integer list. 0 means * we've emitted nothing at this grouping level AND this grouping * level is unlabelled and must be marked with "- ". See * ExplainYAMLLineStarting(). */ ExplainYAMLLineStarting(es); if (labelname) { appendStringInfo(es->str, "%s: ", labelname); es->grouping_stack = lcons_int(1, es->grouping_stack); } else { appendStringInfoString(es->str, "- "); es->grouping_stack = lcons_int(0, es->grouping_stack); } es->indent++; break; } } /* * Close a group of related objects. * Parameters must match the corresponding ExplainOpenGroup call. */ static void ExplainCloseGroup(const char *objtype, const char *labelname, bool labeled, ExplainState *es) { switch (es->format) { case EXPLAIN_FORMAT_TEXT: /* nothing to do */ break; case EXPLAIN_FORMAT_XML: es->indent--; ExplainXMLTag(objtype, X_CLOSING, es); break; case EXPLAIN_FORMAT_JSON: es->indent--; appendStringInfoChar(es->str, '\n'); appendStringInfoSpaces(es->str, 2 * es->indent); appendStringInfoChar(es->str, labeled ? '}' : ']'); es->grouping_stack = list_delete_first(es->grouping_stack); break; case EXPLAIN_FORMAT_YAML: es->indent--; es->grouping_stack = list_delete_first(es->grouping_stack); break; } } /* * Emit opening or closing XML tag. * * "flags" must contain X_OPENING, X_CLOSING, or X_CLOSE_IMMEDIATE. * Optionally, OR in X_NOWHITESPACE to suppress the whitespace we'd normally * add. * * XML tag names can't contain white space, so we replace any spaces in * "tagname" with dashes. */ static void ExplainXMLTag(const char *tagname, int flags, ExplainState *es) { const char *s; if ((flags & X_NOWHITESPACE) == 0) appendStringInfoSpaces(es->str, 2 * es->indent); appendStringInfoCharMacro(es->str, '<'); if ((flags & X_CLOSING) != 0) appendStringInfoCharMacro(es->str, '/'); for (s = tagname; *s; s++) appendStringInfoCharMacro(es->str, (*s == ' ') ? '-' : *s); if ((flags & X_CLOSE_IMMEDIATE) != 0) appendStringInfoString(es->str, " /"); appendStringInfoCharMacro(es->str, '>'); if ((flags & X_NOWHITESPACE) == 0) appendStringInfoCharMacro(es->str, '\n'); } /* * Emit a JSON line ending. * * JSON requires a comma after each property but the last. To facilitate this, * in JSON format, the text emitted for each property begins just prior to the * preceding line-break (and comma, if applicable). */ static void ExplainJSONLineEnding(ExplainState *es) { Assert(es->format == EXPLAIN_FORMAT_JSON); if (linitial_int(es->grouping_stack) != 0) appendStringInfoChar(es->str, ','); else linitial_int(es->grouping_stack) = 1; appendStringInfoChar(es->str, '\n'); } /* * Indent a YAML line. * * YAML lines are ordinarily indented by two spaces per indentation level. * The text emitted for each property begins just prior to the preceding * line-break, except for the first property in an unlabelled group, for which * it begins immediately after the "- " that introduces the group. The first * property of the group appears on the same line as the opening "- ". */ static void ExplainYAMLLineStarting(ExplainState *es) { Assert(es->format == EXPLAIN_FORMAT_YAML); if (linitial_int(es->grouping_stack) == 0) { linitial_int(es->grouping_stack) = 1; } else { appendStringInfoChar(es->str, '\n'); appendStringInfoSpaces(es->str, es->indent * 2); } } citus-7.0.3/src/backend/distributed/planner/multi_join_order.c000066400000000000000000001410611317107136600245270ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_join_order.c * * Routines for constructing the join order list using a rule-based approach. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include #include "access/nbtree.h" #include "access/heapam.h" #include "access/htup_details.h" #include "catalog/pg_am.h" #include "distributed/metadata_cache.h" #include "distributed/multi_join_order.h" #include "distributed/multi_physical_planner.h" #include "distributed/pg_dist_partition.h" #include "distributed/worker_protocol.h" #include "lib/stringinfo.h" #include "optimizer/var.h" #include "nodes/nodeFuncs.h" #include "utils/builtins.h" #include "utils/datum.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/syscache.h" /* Config variables managed via guc.c */ int LargeTableShardCount = 4; /* shard counts for a large table */ bool LogMultiJoinOrder = false; /* print join order as a debugging aid */ /* Function pointer type definition for join rule evaluation functions */ typedef JoinOrderNode *(*RuleEvalFunction) (JoinOrderNode *currentJoinNode, TableEntry *candidateTable, List *candidateShardList, List *applicableJoinClauses, JoinType joinType); static char *RuleNameArray[JOIN_RULE_LAST] = { 0 }; /* ordered join rule names */ static RuleEvalFunction RuleEvalFunctionArray[JOIN_RULE_LAST] = { 0 }; /* join rules */ /* Local functions forward declarations */ static JoinOrderNode * CreateFirstJoinOrderNode(FromExpr *fromExpr, List *tableEntryList); static bool JoinExprListWalker(Node *node, List **joinList); static bool ExtractLeftMostRangeTableIndex(Node *node, int *rangeTableIndex); static List * MergeShardIntervals(List *leftShardIntervalList, List *rightShardIntervalList, JoinType joinType); static bool ShardIntervalsMatch(List *leftShardIntervalList, List *rightShardIntervalList); static List * JoinOrderForTable(TableEntry *firstTable, List *tableEntryList, List *joinClauseList); static List * BestJoinOrder(List *candidateJoinOrders); static List * FewestOfJoinRuleType(List *candidateJoinOrders, JoinRuleType ruleType); static uint32 JoinRuleTypeCount(List *joinOrder, JoinRuleType ruleTypeToCount); static List * LatestLargeDataTransfer(List *candidateJoinOrders); static void PrintJoinOrderList(List *joinOrder); static uint32 LargeDataTransferLocation(List *joinOrder); static List * TableEntryListDifference(List *lhsTableList, List *rhsTableList); static TableEntry * FindTableEntry(List *tableEntryList, uint32 tableId); /* Local functions forward declarations for join evaluations */ static JoinOrderNode * EvaluateJoinRules(List *joinedTableList, JoinOrderNode *currentJoinNode, TableEntry *candidateTable, List *candidateShardList, List *joinClauseList, JoinType joinType); static List * RangeTableIdList(List *tableList); static RuleEvalFunction JoinRuleEvalFunction(JoinRuleType ruleType); static char * JoinRuleName(JoinRuleType ruleType); static JoinOrderNode * BroadcastJoin(JoinOrderNode *joinNode, TableEntry *candidateTable, List *candidateShardList, List *applicableJoinClauses, JoinType joinType); static JoinOrderNode * LocalJoin(JoinOrderNode *joinNode, TableEntry *candidateTable, List *candidateShardList, List *applicableJoinClauses, JoinType joinType); static bool JoinOnColumns(Var *currentPartitioncolumn, Var *candidatePartitionColumn, List *joinClauseList); static JoinOrderNode * SinglePartitionJoin(JoinOrderNode *joinNode, TableEntry *candidateTable, List *candidateShardList, List *applicableJoinClauses, JoinType joinType); static JoinOrderNode * DualPartitionJoin(JoinOrderNode *joinNode, TableEntry *candidateTable, List *candidateShardList, List *applicableJoinClauses, JoinType joinType); static JoinOrderNode * CartesianProduct(JoinOrderNode *joinNode, TableEntry *candidateTable, List *candidateShardList, List *applicableJoinClauses, JoinType joinType); static JoinOrderNode * MakeJoinOrderNode(TableEntry *tableEntry, JoinRuleType joinRuleType, Var *partitionColumn, char partitionMethod); /* * FixedJoinOrderList returns a list of join order nodes for the query in the order * specified by the user. This is used to handle join trees that contain OUTER joins. * The regular JoinOrderList currently assumes that all joins are inner-joins and can * thus be arbitrarily reordered, which is not the case for OUTER joins. At some point * we should merge these two functions. */ List * FixedJoinOrderList(FromExpr *fromExpr, List *tableEntryList) { List *joinList = NIL; ListCell *joinCell = NULL; List *joinWhereClauseList = NIL; List *joinOrderList = NIL; List *joinedTableList = NIL; JoinOrderNode *firstJoinNode = NULL; JoinOrderNode *currentJoinNode = NULL; ListCell *tableEntryCell = NULL; foreach(tableEntryCell, tableEntryList) { TableEntry *rangeTableEntry = (TableEntry *) lfirst(tableEntryCell); Oid relationId = rangeTableEntry->relationId; DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); if (cacheEntry->partitionMethod != DISTRIBUTE_BY_NONE && cacheEntry->hasUninitializedShardInterval) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot perform distributed planning on this query"), errdetail("Shards of relations in outer join queries must " "have shard min/max values."))); } } /* get the FROM section as a flattened list of JoinExpr nodes */ joinList = JoinExprList(fromExpr); /* get the join clauses in the WHERE section for implicit joins */ joinWhereClauseList = JoinClauseList((List *) fromExpr->quals); /* create join node for the first table */ firstJoinNode = CreateFirstJoinOrderNode(fromExpr, tableEntryList); /* add first node to the join order */ joinOrderList = list_make1(firstJoinNode); joinedTableList = list_make1(firstJoinNode->tableEntry); currentJoinNode = firstJoinNode; foreach(joinCell, joinList) { JoinExpr *joinExpr = (JoinExpr *) lfirst(joinCell); List *onClauseList = list_copy((List *) joinExpr->quals); List *joinClauseList = list_copy((List *) joinExpr->quals); JoinType joinType = joinExpr->jointype; RangeTblRef *nextRangeTableRef = NULL; TableEntry *nextTable = NULL; JoinOrderNode *nextJoinNode = NULL; List *candidateShardList = NIL; Node *rightArg = joinExpr->rarg; /* get the table on the right hand side of the join */ if (IsA(rightArg, RangeTblRef)) { nextRangeTableRef = (RangeTblRef *) rightArg; } else { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot perform distributed planning on this query"), errdetail("Subqueries in outer joins are not supported"))); } nextTable = FindTableEntry(tableEntryList, nextRangeTableRef->rtindex); if (joinType == JOIN_INNER) { /* also consider WHERE clauses for INNER joins */ joinClauseList = list_concat(joinClauseList, joinWhereClauseList); } /* get the sorted list of shards to check broadcast/local join possibility */ candidateShardList = LoadShardIntervalList(nextTable->relationId); /* find the best join rule type */ nextJoinNode = EvaluateJoinRules(joinedTableList, currentJoinNode, nextTable, candidateShardList, joinClauseList, joinType); if (nextJoinNode->joinRuleType == BROADCAST_JOIN) { if (joinType == JOIN_RIGHT || joinType == JOIN_FULL) { /* the overall interval list is now the same as the right side */ nextJoinNode->shardIntervalList = candidateShardList; } else if (list_length(candidateShardList) == 1) { /* the overall interval list is now the same as the left side */ nextJoinNode->shardIntervalList = currentJoinNode->shardIntervalList; } else { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot perform distributed planning on this " "query"), errdetail("Cannot perform outer joins with broadcast " "joins of more than 1 shard"), errhint("Set citus.large_table_shard_count to 1"))); } } else if (nextJoinNode->joinRuleType == LOCAL_PARTITION_JOIN) { /* shard interval lists must have 1-1 matching for local joins */ bool shardIntervalsMatch = ShardIntervalsMatch(currentJoinNode->shardIntervalList, candidateShardList); if (shardIntervalsMatch) { nextJoinNode->shardIntervalList = MergeShardIntervals(currentJoinNode->shardIntervalList, candidateShardList, joinType); } else { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot perform distributed planning on this " "query"), errdetail("Shards of relations in outer join queries " "must have 1-to-1 shard partitioning"))); } } else { /* re-partitioning for OUTER joins is not implemented */ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot run outer join query if join is not on the " "partition column"), errdetail("Outer joins requiring repartitioning are not " "supported."))); } if (joinType != JOIN_INNER) { /* preserve non-join clauses for OUTER joins */ nextJoinNode->joinClauseList = onClauseList; } /* add next node to the join order */ joinOrderList = lappend(joinOrderList, nextJoinNode); joinedTableList = lappend(joinedTableList, nextTable); currentJoinNode = nextJoinNode; } if (LogMultiJoinOrder) { PrintJoinOrderList(joinOrderList); } return joinOrderList; } /* * CreateFirstJoinOrderNode creates the join order node for the left-most table in the * join tree. */ static JoinOrderNode * CreateFirstJoinOrderNode(FromExpr *fromExpr, List *tableEntryList) { JoinOrderNode *firstJoinNode = NULL; TableEntry *firstTable = NULL; JoinRuleType firstJoinRule = JOIN_RULE_INVALID_FIRST; Var *firstPartitionColumn = NULL; char firstPartitionMethod = '\0'; int rangeTableIndex = 0; ExtractLeftMostRangeTableIndex((Node *) fromExpr, &rangeTableIndex); firstTable = FindTableEntry(tableEntryList, rangeTableIndex); firstPartitionColumn = PartitionColumn(firstTable->relationId, firstTable->rangeTableId); firstPartitionMethod = PartitionMethod(firstTable->relationId); firstJoinNode = MakeJoinOrderNode(firstTable, firstJoinRule, firstPartitionColumn, firstPartitionMethod); firstJoinNode->shardIntervalList = LoadShardIntervalList(firstTable->relationId); return firstJoinNode; } /* * JoinExprList flattens the JoinExpr nodes in the FROM expression and translate implicit * joins to inner joins. This function does not consider (right-)nested joins. */ List * JoinExprList(FromExpr *fromExpr) { List *joinList = NIL; List *fromList = fromExpr->fromlist; ListCell *fromCell = NULL; foreach(fromCell, fromList) { Node *nextNode = (Node *) lfirst(fromCell); if (joinList != NIL) { /* multiple nodes in from clause, add an explicit join between them */ JoinExpr *newJoinExpr = NULL; RangeTblRef *nextRangeTableRef = NULL; int nextRangeTableIndex = 0; /* find the left most range table in this node */ ExtractLeftMostRangeTableIndex((Node *) fromExpr, &nextRangeTableIndex); nextRangeTableRef = makeNode(RangeTblRef); nextRangeTableRef->rtindex = nextRangeTableIndex; /* join the previous node with nextRangeTableRef */ newJoinExpr = makeNode(JoinExpr); newJoinExpr->jointype = JOIN_INNER; newJoinExpr->rarg = (Node *) nextRangeTableRef; newJoinExpr->quals = NULL; } JoinExprListWalker(nextNode, &joinList); } return joinList; } /* * JoinExprListWalker the JoinExpr nodes in a join tree in the order in which joins are * to be executed. If there are no joins then no elements are added to joinList. */ static bool JoinExprListWalker(Node *node, List **joinList) { bool walkerResult = false; if (node == NULL) { return false; } if (IsA(node, JoinExpr)) { JoinExpr *joinExpr = (JoinExpr *) node; walkerResult = JoinExprListWalker(joinExpr->larg, joinList); (*joinList) = lappend(*joinList, joinExpr); } else { walkerResult = expression_tree_walker(node, JoinExprListWalker, joinList); } return walkerResult; } /* * ExtractLeftMostRangeTableIndex extracts the range table index of the left-most * leaf in a join tree. */ static bool ExtractLeftMostRangeTableIndex(Node *node, int *rangeTableIndex) { bool walkerResult = false; Assert(node != NULL); if (IsA(node, JoinExpr)) { JoinExpr *joinExpr = (JoinExpr *) node; walkerResult = ExtractLeftMostRangeTableIndex(joinExpr->larg, rangeTableIndex); } else if (IsA(node, RangeTblRef)) { RangeTblRef *rangeTableRef = (RangeTblRef *) node; *rangeTableIndex = rangeTableRef->rtindex; walkerResult = true; } else { walkerResult = expression_tree_walker(node, ExtractLeftMostRangeTableIndex, rangeTableIndex); } return walkerResult; } /* * MergeShardIntervals merges given shard interval lists. It assumes that both lists * have the same number of shard intervals, and each shard interval overlaps only with * a corresponding shard interval from the other shard interval list. It uses union or * intersection logic when merging two shard intervals depending on joinType. */ static List * MergeShardIntervals(List *leftShardIntervalList, List *rightShardIntervalList, JoinType joinType) { FmgrInfo *comparisonFunction = NULL; ShardInterval *firstShardInterval = NULL; Oid typeId = InvalidOid; bool typeByValue = false; int typeLen = 0; ListCell *leftShardIntervalCell = NULL; ListCell *rightShardIntervalCell = NULL; List *mergedShardIntervalList = NIL; bool shardUnion = IS_OUTER_JOIN(joinType); Assert(list_length(leftShardIntervalList) > 0); Assert(list_length(leftShardIntervalList) == list_length(rightShardIntervalList)); firstShardInterval = (ShardInterval *) linitial(leftShardIntervalList); typeId = firstShardInterval->valueTypeId; typeByValue = firstShardInterval->valueByVal; typeLen = firstShardInterval->valueTypeLen; comparisonFunction = GetFunctionInfo(typeId, BTREE_AM_OID, BTORDER_PROC); forboth(leftShardIntervalCell, leftShardIntervalList, rightShardIntervalCell, rightShardIntervalList) { ShardInterval *currentInterval = (ShardInterval *) lfirst(leftShardIntervalCell); ShardInterval *nextInterval = (ShardInterval *) lfirst(rightShardIntervalCell); ShardInterval *newShardInterval = NULL; Datum currentMin = currentInterval->minValue; Datum currentMax = currentInterval->maxValue; newShardInterval = (ShardInterval *) palloc0(sizeof(ShardInterval)); CopyShardInterval(currentInterval, newShardInterval); if (nextInterval->minValueExists) { Datum nextMin = nextInterval->minValue; Datum comparisonDatum = CompareCall2(comparisonFunction, currentMin, nextMin); int comparisonResult = DatumGetInt32(comparisonDatum); bool nextMinSmaller = comparisonResult > 0; bool nextMinLarger = comparisonResult < 0; if ((shardUnion && nextMinSmaller) || (!shardUnion && nextMinLarger)) { newShardInterval->minValue = datumCopy(nextMin, typeByValue, typeLen); } } if (nextInterval->maxValueExists) { Datum nextMax = nextInterval->maxValue; Datum comparisonDatum = CompareCall2(comparisonFunction, currentMax, nextMax); int comparisonResult = DatumGetInt32(comparisonDatum); bool nextMaxLarger = comparisonResult < 0; bool nextMaxSmaller = comparisonResult > 0; if ((shardUnion && nextMaxLarger) || (!shardUnion && nextMaxSmaller)) { newShardInterval->maxValue = datumCopy(nextMax, typeByValue, typeLen); } } mergedShardIntervalList = lappend(mergedShardIntervalList, newShardInterval); } return mergedShardIntervalList; } /* * JoinOnColumns determines whether two columns are joined by a given join clause * list. */ static bool JoinOnColumns(Var *currentColumn, Var *candidateColumn, List *joinClauseList) { ListCell *joinClauseCell = NULL; bool joinOnColumns = false; foreach(joinClauseCell, joinClauseList) { OpExpr *joinClause = (OpExpr *) lfirst(joinClauseCell); Var *leftColumn = LeftColumn(joinClause); Var *rightColumn = RightColumn(joinClause); /* check if both join columns and both partition key columns match */ if (equal(leftColumn, currentColumn) && equal(rightColumn, candidateColumn)) { joinOnColumns = true; break; } if (equal(leftColumn, candidateColumn) && equal(rightColumn, currentColumn)) { joinOnColumns = true; break; } } return joinOnColumns; } /* * ShardIntervalsMatch returns true if provided shard interval has one-to-one * matching. Shards intervals must be not empty, and their intervals musht be in * ascending order of range min values. Shard interval ranges said to be matched * only if (1) they have same number of shards, (2) a shard interval on the left * side overlaps with corresponding shard on the right side, (3) a shard interval * on the right side does not overlap with any other shard. The function does not * compare a left shard with every right shard. It compares the left shard with the * previous and next shards of the corresponding shard to check they to not overlap * for optimization purposes. */ static bool ShardIntervalsMatch(List *leftShardIntervalList, List *rightShardIntervalList) { int leftShardIntervalCount = list_length(leftShardIntervalList); int rightShardIntervalCount = list_length(rightShardIntervalList); ListCell *leftShardIntervalCell = NULL; ListCell *rightShardIntervalCell = NULL; ShardInterval *previousRightInterval = NULL; /* we do not support outer join queries on tables with no shards */ if (leftShardIntervalCount == 0 || rightShardIntervalCount == 0) { return false; } if (leftShardIntervalCount != rightShardIntervalCount) { return false; } forboth(leftShardIntervalCell, leftShardIntervalList, rightShardIntervalCell, rightShardIntervalList) { ShardInterval *leftInterval = (ShardInterval *) lfirst(leftShardIntervalCell); ShardInterval *rightInterval = (ShardInterval *) lfirst(rightShardIntervalCell); ListCell *nextRightIntervalCell = NULL; bool shardIntervalsIntersect = ShardIntervalsOverlap(leftInterval, rightInterval); if (!shardIntervalsIntersect) { return false; } /* * Compare left interval with a previous right interval, they should not * intersect. */ if (previousRightInterval != NULL) { shardIntervalsIntersect = ShardIntervalsOverlap(leftInterval, previousRightInterval); if (shardIntervalsIntersect) { return false; } } /* * Compare left interval with a next right interval, they should not * intersect. */ nextRightIntervalCell = lnext(rightShardIntervalCell); if (nextRightIntervalCell != NULL) { ShardInterval *nextRightInterval = (ShardInterval *) lfirst(nextRightIntervalCell); shardIntervalsIntersect = ShardIntervalsOverlap(leftInterval, nextRightInterval); if (shardIntervalsIntersect) { return false; } } previousRightInterval = rightInterval; } return true; } /* * JoinOrderList calculates the best join order and join rules that apply given * the list of tables and join clauses. First, the function generates a set of * candidate join orders, each with a different table as its first table. Then, * the function chooses among these candidates the join order that transfers the * least amount of data across the network, and returns this join order. */ List * JoinOrderList(List *tableEntryList, List *joinClauseList) { List *bestJoinOrder = NIL; List *candidateJoinOrderList = NIL; ListCell *tableEntryCell = NULL; foreach(tableEntryCell, tableEntryList) { TableEntry *startingTable = (TableEntry *) lfirst(tableEntryCell); List *candidateJoinOrder = NIL; /* each candidate join order starts with a different table */ candidateJoinOrder = JoinOrderForTable(startingTable, tableEntryList, joinClauseList); candidateJoinOrderList = lappend(candidateJoinOrderList, candidateJoinOrder); } bestJoinOrder = BestJoinOrder(candidateJoinOrderList); /* if logging is enabled, print join order */ if (LogMultiJoinOrder) { PrintJoinOrderList(bestJoinOrder); } return bestJoinOrder; } /* * JoinOrderForTable creates a join order whose first element is the given first * table. To determine each subsequent element in the join order, the function * then chooses the table that has the lowest ranking join rule, and with which * it can join the table to the previous table in the join order. The function * repeats this until it determines all elements in the join order list, and * returns this list. */ static List * JoinOrderForTable(TableEntry *firstTable, List *tableEntryList, List *joinClauseList) { JoinOrderNode *currentJoinNode = NULL; JoinRuleType firstJoinRule = JOIN_RULE_INVALID_FIRST; List *joinOrderList = NIL; List *joinedTableList = NIL; int joinedTableCount = 1; int totalTableCount = list_length(tableEntryList); /* create join node for the first table */ Oid firstRelationId = firstTable->relationId; uint32 firstTableId = firstTable->rangeTableId; Var *firstPartitionColumn = PartitionColumn(firstRelationId, firstTableId); char firstPartitionMethod = PartitionMethod(firstRelationId); JoinOrderNode *firstJoinNode = MakeJoinOrderNode(firstTable, firstJoinRule, firstPartitionColumn, firstPartitionMethod); /* add first node to the join order */ joinOrderList = list_make1(firstJoinNode); joinedTableList = list_make1(firstTable); currentJoinNode = firstJoinNode; /* loop until we join all remaining tables */ while (joinedTableCount < totalTableCount) { List *pendingTableList = NIL; ListCell *pendingTableCell = NULL; JoinOrderNode *nextJoinNode = NULL; TableEntry *nextJoinedTable = NULL; JoinRuleType nextJoinRuleType = JOIN_RULE_LAST; pendingTableList = TableEntryListDifference(tableEntryList, joinedTableList); /* * Iterate over all pending tables, and find the next best table to * join. The best table is the one whose join rule requires the least * amount of data transfer. */ foreach(pendingTableCell, pendingTableList) { TableEntry *pendingTable = (TableEntry *) lfirst(pendingTableCell); JoinOrderNode *pendingJoinNode = NULL; JoinRuleType pendingJoinRuleType = JOIN_RULE_LAST; JoinType joinType = JOIN_INNER; List *candidateShardList = LoadShardIntervalList(pendingTable->relationId); /* evaluate all join rules for this pending table */ pendingJoinNode = EvaluateJoinRules(joinedTableList, currentJoinNode, pendingTable, candidateShardList, joinClauseList, joinType); /* if this rule is better than previous ones, keep it */ pendingJoinRuleType = pendingJoinNode->joinRuleType; if (pendingJoinRuleType < nextJoinRuleType) { nextJoinNode = pendingJoinNode; nextJoinRuleType = pendingJoinRuleType; } } Assert(nextJoinNode != NULL); nextJoinedTable = nextJoinNode->tableEntry; /* add next node to the join order */ joinOrderList = lappend(joinOrderList, nextJoinNode); joinedTableList = lappend(joinedTableList, nextJoinedTable); currentJoinNode = nextJoinNode; joinedTableCount++; } return joinOrderList; } /* * BestJoinOrder takes in a list of candidate join orders, and determines the * best join order among these candidates. The function uses two heuristics for * this. First, the function chooses join orders that have the fewest number of * join operators that cause large data transfers. Second, the function chooses * join orders where large data transfers occur later in the execution. */ static List * BestJoinOrder(List *candidateJoinOrders) { List *bestJoinOrder = NULL; uint32 ruleTypeIndex = 0; uint32 highestValidIndex = JOIN_RULE_LAST - 1; uint32 candidateCount PG_USED_FOR_ASSERTS_ONLY = 0; /* * We start with the highest ranking rule type (cartesian product), and walk * over these rules in reverse order. For each rule type, we then keep join * orders that only contain the fewest number of join rules of that type. * * For example, the algorithm chooses join orders like the following: * (a) The algorithm prefers join orders with 2 cartesian products (CP) to * those that have 3 or more, if there isn't a join order with fewer CPs. * (b) Assuming that all join orders have the same number of CPs, the * algorithm prefers join orders with 2 dual partitions (DP) to those that * have 3 or more, if there isn't a join order with fewer DPs; and so * forth. */ for (ruleTypeIndex = highestValidIndex; ruleTypeIndex > 0; ruleTypeIndex--) { JoinRuleType ruleType = (JoinRuleType) ruleTypeIndex; candidateJoinOrders = FewestOfJoinRuleType(candidateJoinOrders, ruleType); } /* * If there is a tie, we pick candidate join orders where large data * transfers happen at later stages of query execution. This results in more * data being filtered via joins, selections, and projections earlier on. */ candidateJoinOrders = LatestLargeDataTransfer(candidateJoinOrders); /* we should have at least one join order left after optimizations */ candidateCount = list_length(candidateJoinOrders); Assert(candidateCount > 0); /* * If there still is a tie, we pick the join order whose relation appeared * earliest in the query's range table entry list. */ bestJoinOrder = (List *) linitial(candidateJoinOrders); return bestJoinOrder; } /* * FewestOfJoinRuleType finds join orders that have the fewest number of times * the given join rule occurs in the candidate join orders, and filters all * other join orders. For example, if four candidate join orders have a join * rule appearing 3, 5, 3, and 6 times, only two join orders that have the join * rule appearing 3 times will be returned. */ static List * FewestOfJoinRuleType(List *candidateJoinOrders, JoinRuleType ruleType) { List *fewestJoinOrders = NULL; uint32 fewestRuleCount = INT_MAX; ListCell *joinOrderCell = NULL; foreach(joinOrderCell, candidateJoinOrders) { List *joinOrder = (List *) lfirst(joinOrderCell); uint32 ruleTypeCount = JoinRuleTypeCount(joinOrder, ruleType); if (ruleTypeCount == fewestRuleCount) { fewestJoinOrders = lappend(fewestJoinOrders, joinOrder); } else if (ruleTypeCount < fewestRuleCount) { fewestJoinOrders = list_make1(joinOrder); fewestRuleCount = ruleTypeCount; } } return fewestJoinOrders; } /* Counts the number of times the given join rule occurs in the join order. */ static uint32 JoinRuleTypeCount(List *joinOrder, JoinRuleType ruleTypeToCount) { uint32 ruleTypeCount = 0; ListCell *joinOrderNodeCell = NULL; foreach(joinOrderNodeCell, joinOrder) { JoinOrderNode *joinOrderNode = (JoinOrderNode *) lfirst(joinOrderNodeCell); JoinRuleType ruleType = joinOrderNode->joinRuleType; if (ruleType == ruleTypeToCount) { ruleTypeCount++; } } return ruleTypeCount; } /* * LatestLargeDataTransfer finds and returns join orders where a large data * transfer join rule occurs as late as possible in the join order. Late large * data transfers result in more data being filtered before data gets shuffled * in the network. */ static List * LatestLargeDataTransfer(List *candidateJoinOrders) { List *latestJoinOrders = NIL; uint32 latestJoinLocation = 0; ListCell *joinOrderCell = NULL; foreach(joinOrderCell, candidateJoinOrders) { List *joinOrder = (List *) lfirst(joinOrderCell); uint32 joinRuleLocation = LargeDataTransferLocation(joinOrder); if (joinRuleLocation == latestJoinLocation) { latestJoinOrders = lappend(latestJoinOrders, joinOrder); } else if (joinRuleLocation > latestJoinLocation) { latestJoinOrders = list_make1(joinOrder); latestJoinLocation = joinRuleLocation; } } return latestJoinOrders; } /* * LargeDataTransferLocation finds the first location of a large data transfer * join rule, and returns that location. If the join order does not have any * large data transfer rules, the function returns one location past the end of * the join order list. */ static uint32 LargeDataTransferLocation(List *joinOrder) { uint32 joinRuleLocation = 0; ListCell *joinOrderNodeCell = NULL; foreach(joinOrderNodeCell, joinOrder) { JoinOrderNode *joinOrderNode = (JoinOrderNode *) lfirst(joinOrderNodeCell); JoinRuleType joinRuleType = joinOrderNode->joinRuleType; /* we consider the following join rules to cause large data transfers */ if (joinRuleType == SINGLE_PARTITION_JOIN || joinRuleType == DUAL_PARTITION_JOIN || joinRuleType == CARTESIAN_PRODUCT) { break; } joinRuleLocation++; } return joinRuleLocation; } /* Prints the join order list and join rules for debugging purposes. */ static void PrintJoinOrderList(List *joinOrder) { StringInfo printBuffer = makeStringInfo(); ListCell *joinOrderNodeCell = NULL; bool firstJoinNode = true; foreach(joinOrderNodeCell, joinOrder) { JoinOrderNode *joinOrderNode = (JoinOrderNode *) lfirst(joinOrderNodeCell); Oid relationId = joinOrderNode->tableEntry->relationId; char *relationName = get_rel_name(relationId); if (firstJoinNode) { appendStringInfo(printBuffer, "[ \"%s\" ]", relationName); firstJoinNode = false; } else { JoinRuleType ruleType = (JoinRuleType) joinOrderNode->joinRuleType; char *ruleName = JoinRuleName(ruleType); appendStringInfo(printBuffer, "[ %s ", ruleName); appendStringInfo(printBuffer, "\"%s\" ]", relationName); } } ereport(LOG, (errmsg("join order: %s", printBuffer->data))); } /* * TableEntryListDifference returns a list containing table entries that are in * the left-hand side table list, but not in the right-hand side table list. */ static List * TableEntryListDifference(List *lhsTableList, List *rhsTableList) { List *tableListDifference = NIL; ListCell *lhsTableCell = NULL; foreach(lhsTableCell, lhsTableList) { TableEntry *lhsTableEntry = (TableEntry *) lfirst(lhsTableCell); ListCell *rhsTableCell = NULL; bool lhsTableEntryExists = false; foreach(rhsTableCell, rhsTableList) { TableEntry *rhsTableEntry = (TableEntry *) lfirst(rhsTableCell); if ((lhsTableEntry->relationId == rhsTableEntry->relationId) && (lhsTableEntry->rangeTableId == rhsTableEntry->rangeTableId)) { lhsTableEntryExists = true; } } if (!lhsTableEntryExists) { tableListDifference = lappend(tableListDifference, lhsTableEntry); } } return tableListDifference; } /* * Finds the table entry in tableEntryList with the given range table id. */ static TableEntry * FindTableEntry(List *tableEntryList, uint32 tableId) { ListCell *tableEntryCell = NULL; foreach(tableEntryCell, tableEntryList) { TableEntry *tableEntry = (TableEntry *) lfirst(tableEntryCell); if (tableEntry->rangeTableId == tableId) { return tableEntry; } } return NULL; } /* * EvaluateJoinRules takes in a list of already joined tables and a candidate * next table, evaluates different join rules between the two tables, and finds * the best join rule that applies. The function returns the applicable join * order node which includes the join rule and the partition information. */ static JoinOrderNode * EvaluateJoinRules(List *joinedTableList, JoinOrderNode *currentJoinNode, TableEntry *candidateTable, List *candidateShardList, List *joinClauseList, JoinType joinType) { JoinOrderNode *nextJoinNode = NULL; uint32 candidateTableId = 0; List *joinedTableIdList = NIL; List *applicableJoinClauses = NIL; uint32 lowestValidIndex = JOIN_RULE_INVALID_FIRST + 1; uint32 highestValidIndex = JOIN_RULE_LAST - 1; uint32 ruleIndex = 0; /* * We first find all applicable join clauses between already joined tables * and the candidate table. */ joinedTableIdList = RangeTableIdList(joinedTableList); candidateTableId = candidateTable->rangeTableId; applicableJoinClauses = ApplicableJoinClauses(joinedTableIdList, candidateTableId, joinClauseList); /* we then evaluate all join rules in order */ for (ruleIndex = lowestValidIndex; ruleIndex <= highestValidIndex; ruleIndex++) { JoinRuleType ruleType = (JoinRuleType) ruleIndex; RuleEvalFunction ruleEvalFunction = JoinRuleEvalFunction(ruleType); nextJoinNode = (*ruleEvalFunction)(currentJoinNode, candidateTable, candidateShardList, applicableJoinClauses, joinType); /* break after finding the first join rule that applies */ if (nextJoinNode != NULL) { break; } } Assert(nextJoinNode != NULL); nextJoinNode->joinType = joinType; nextJoinNode->joinClauseList = applicableJoinClauses; return nextJoinNode; } /* Extracts range table identifiers from the given table list, and returns them. */ static List * RangeTableIdList(List *tableList) { List *rangeTableIdList = NIL; ListCell *tableCell = NULL; foreach(tableCell, tableList) { TableEntry *tableEntry = (TableEntry *) lfirst(tableCell); uint32 rangeTableId = tableEntry->rangeTableId; rangeTableIdList = lappend_int(rangeTableIdList, rangeTableId); } return rangeTableIdList; } /* * JoinRuleEvalFunction returns a function pointer for the rule evaluation * function; this rule evaluation function corresponds to the given rule type. * The function also initializes the rule evaluation function array in a static * code block, if the array has not been initialized. */ static RuleEvalFunction JoinRuleEvalFunction(JoinRuleType ruleType) { static bool ruleEvalFunctionsInitialized = false; RuleEvalFunction ruleEvalFunction = NULL; if (!ruleEvalFunctionsInitialized) { RuleEvalFunctionArray[BROADCAST_JOIN] = &BroadcastJoin; RuleEvalFunctionArray[LOCAL_PARTITION_JOIN] = &LocalJoin; RuleEvalFunctionArray[SINGLE_PARTITION_JOIN] = &SinglePartitionJoin; RuleEvalFunctionArray[DUAL_PARTITION_JOIN] = &DualPartitionJoin; RuleEvalFunctionArray[CARTESIAN_PRODUCT] = &CartesianProduct; ruleEvalFunctionsInitialized = true; } ruleEvalFunction = RuleEvalFunctionArray[ruleType]; Assert(ruleEvalFunction != NULL); return ruleEvalFunction; } /* Returns a string name for the given join rule type. */ static char * JoinRuleName(JoinRuleType ruleType) { static bool ruleNamesInitialized = false; char *ruleName = NULL; if (!ruleNamesInitialized) { /* use strdup() to be independent of memory contexts */ RuleNameArray[BROADCAST_JOIN] = strdup("broadcast join"); RuleNameArray[LOCAL_PARTITION_JOIN] = strdup("local partition join"); RuleNameArray[SINGLE_PARTITION_JOIN] = strdup("single partition join"); RuleNameArray[DUAL_PARTITION_JOIN] = strdup("dual partition join"); RuleNameArray[CARTESIAN_PRODUCT] = strdup("cartesian product"); ruleNamesInitialized = true; } ruleName = RuleNameArray[ruleType]; Assert(ruleName != NULL); return ruleName; } /* * BroadcastJoin evaluates if the candidate table is small enough to be * broadcasted to all nodes in the system. If the table can be broadcasted, * the function simply returns a join order node that includes the current * partition key and method. Otherwise, the function returns null. */ static JoinOrderNode * BroadcastJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable, List *candidateShardList, List *applicableJoinClauses, JoinType joinType) { JoinOrderNode *nextJoinNode = NULL; int candidateShardCount = list_length(candidateShardList); int leftShardCount = list_length(currentJoinNode->shardIntervalList); int applicableJoinCount = list_length(applicableJoinClauses); bool performBroadcastJoin = false; if (applicableJoinCount <= 0) { return NULL; } /* * If the table's shard count doesn't exceed the value specified in the * configuration or the table is a reference table, then we assume table * broadcasting is feasible. This assumption is valid only for inner joins. * * Left join requires candidate table to have single shard, right join requires * existing (left) table to have single shard, full outer join requires both tables * to have single shard. */ if (joinType == JOIN_INNER) { ShardInterval *initialCandidateShardInterval = NULL; char candidatePartitionMethod = '\0'; if (candidateShardCount > 0) { initialCandidateShardInterval = (ShardInterval *) linitial(candidateShardList); candidatePartitionMethod = PartitionMethod(initialCandidateShardInterval->relationId); } if (candidatePartitionMethod == DISTRIBUTE_BY_NONE || candidateShardCount < LargeTableShardCount) { performBroadcastJoin = true; } } else if ((joinType == JOIN_LEFT || joinType == JOIN_ANTI) && candidateShardCount == 1) { performBroadcastJoin = true; } else if (joinType == JOIN_RIGHT && leftShardCount == 1) { performBroadcastJoin = true; } else if (joinType == JOIN_FULL && leftShardCount == 1 && candidateShardCount == 1) { performBroadcastJoin = true; } if (performBroadcastJoin) { nextJoinNode = MakeJoinOrderNode(candidateTable, BROADCAST_JOIN, currentJoinNode->partitionColumn, currentJoinNode->partitionMethod); } return nextJoinNode; } /* * LocalJoin takes the current partition key column and the candidate table's * partition key column and the partition method for each table. The function * then evaluates if tables in the join order and the candidate table can be * joined locally, without any data transfers. If they can, the function returns * a join order node for a local join. Otherwise, the function returns null. */ static JoinOrderNode * LocalJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable, List *candidateShardList, List *applicableJoinClauses, JoinType joinType) { JoinOrderNode *nextJoinNode = NULL; Oid relationId = candidateTable->relationId; uint32 tableId = candidateTable->rangeTableId; Var *candidatePartitionColumn = PartitionColumn(relationId, tableId); Var *currentPartitionColumn = currentJoinNode->partitionColumn; char candidatePartitionMethod = PartitionMethod(relationId); char currentPartitionMethod = currentJoinNode->partitionMethod; bool joinOnPartitionColumns = false; /* the partition method should be the same for a local join */ if (currentPartitionMethod != candidatePartitionMethod) { return NULL; } joinOnPartitionColumns = JoinOnColumns(currentPartitionColumn, candidatePartitionColumn, applicableJoinClauses); if (joinOnPartitionColumns) { nextJoinNode = MakeJoinOrderNode(candidateTable, LOCAL_PARTITION_JOIN, currentPartitionColumn, currentPartitionMethod); } return nextJoinNode; } /* * SinglePartitionJoin takes the current and the candidate table's partition keys * and methods. The function then evaluates if either "tables in the join order" * or the candidate table is already partitioned on a join column. If they are, * the function returns a join order node with the already partitioned column as * the next partition key. Otherwise, the function returns null. */ static JoinOrderNode * SinglePartitionJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable, List *candidateShardList, List *applicableJoinClauses, JoinType joinType) { JoinOrderNode *nextJoinNode = NULL; Var *currentPartitionColumn = currentJoinNode->partitionColumn; char currentPartitionMethod = currentJoinNode->partitionMethod; Oid relationId = candidateTable->relationId; uint32 tableId = candidateTable->rangeTableId; Var *candidatePartitionColumn = PartitionColumn(relationId, tableId); char candidatePartitionMethod = PartitionMethod(relationId); /* outer joins are not supported yet */ if (IS_OUTER_JOIN(joinType)) { return NULL; } /* * If we previously dual-hash re-partitioned the tables for a join, we * currently don't allow a single-repartition join. */ if (currentPartitionMethod == REDISTRIBUTE_BY_HASH) { return NULL; } if (currentPartitionMethod != DISTRIBUTE_BY_HASH) { OpExpr *joinClause = SinglePartitionJoinClause(currentPartitionColumn, applicableJoinClauses); if (joinClause != NULL) { nextJoinNode = MakeJoinOrderNode(candidateTable, SINGLE_PARTITION_JOIN, currentPartitionColumn, currentPartitionMethod); } } /* evaluate re-partitioning the current table only if the rule didn't apply above */ if (nextJoinNode == NULL && candidatePartitionMethod != DISTRIBUTE_BY_HASH && candidatePartitionMethod != DISTRIBUTE_BY_NONE) { OpExpr *joinClause = SinglePartitionJoinClause(candidatePartitionColumn, applicableJoinClauses); if (joinClause != NULL) { nextJoinNode = MakeJoinOrderNode(candidateTable, SINGLE_PARTITION_JOIN, candidatePartitionColumn, candidatePartitionMethod); } } return nextJoinNode; } /* * SinglePartitionJoinClause walks over the applicable join clause list, and * finds an applicable join clause for the given partition column. If no such * clause exists, the function returns NULL. */ OpExpr * SinglePartitionJoinClause(Var *partitionColumn, List *applicableJoinClauses) { OpExpr *joinClause = NULL; ListCell *applicableJoinClauseCell = NULL; foreach(applicableJoinClauseCell, applicableJoinClauses) { OpExpr *applicableJoinClause = (OpExpr *) lfirst(applicableJoinClauseCell); Var *leftColumn = LeftColumn(applicableJoinClause); Var *rightColumn = RightColumn(applicableJoinClause); /* * We first check if partition column matches either of the join columns * and if it does, we then check if the join column types match. If the * types are different, we will use different hash functions for the two * column types, and will incorrectly repartition the data. */ if (equal(leftColumn, partitionColumn) || equal(rightColumn, partitionColumn)) { if (leftColumn->vartype == rightColumn->vartype) { joinClause = applicableJoinClause; break; } else { ereport(DEBUG1, (errmsg("single partition column types do not match"))); } } } return joinClause; } /* * DualPartitionJoin evaluates if a join clause exists between "tables in the * join order" and the candidate table. If such a clause exists, both tables can * be repartitioned on the join column; and the function returns a join order * node with the join column as the next partition key. Otherwise, the function * returns null. */ static JoinOrderNode * DualPartitionJoin(JoinOrderNode *currentJoinNode, TableEntry *candidateTable, List *candidateShardList, List *applicableJoinClauses, JoinType joinType) { JoinOrderNode *nextJoinNode = NULL; OpExpr *joinClause = DualPartitionJoinClause(applicableJoinClauses); if (joinClause) { Var *nextPartitionColumn = LeftColumn(joinClause); nextJoinNode = MakeJoinOrderNode(candidateTable, DUAL_PARTITION_JOIN, nextPartitionColumn, REDISTRIBUTE_BY_HASH); } return nextJoinNode; } /* * DualPartitionJoinClause walks over the applicable join clause list, and finds * an applicable join clause for dual re-partitioning. If no such clause exists, * the function returns NULL. */ OpExpr * DualPartitionJoinClause(List *applicableJoinClauses) { OpExpr *joinClause = NULL; ListCell *applicableJoinClauseCell = NULL; foreach(applicableJoinClauseCell, applicableJoinClauses) { OpExpr *applicableJoinClause = (OpExpr *) lfirst(applicableJoinClauseCell); Var *leftColumn = LeftColumn(applicableJoinClause); Var *rightColumn = RightColumn(applicableJoinClause); /* we only need to check that the join column types match */ if (leftColumn->vartype == rightColumn->vartype) { joinClause = applicableJoinClause; break; } else { ereport(DEBUG1, (errmsg("dual partition column types do not match"))); } } return joinClause; } /* * CartesianProduct always evaluates to true since all tables can be combined * using a cartesian product operator. This function acts as a catch-all rule, * in case none of the join rules apply. */ static JoinOrderNode * CartesianProduct(JoinOrderNode *currentJoinNode, TableEntry *candidateTable, List *candidateShardList, List *applicableJoinClauses, JoinType joinType) { JoinOrderNode *nextJoinNode = MakeJoinOrderNode(candidateTable, CARTESIAN_PRODUCT, currentJoinNode->partitionColumn, currentJoinNode->partitionMethod); return nextJoinNode; } /* Constructs and returns a join-order node with the given arguments */ JoinOrderNode * MakeJoinOrderNode(TableEntry *tableEntry, JoinRuleType joinRuleType, Var *partitionColumn, char partitionMethod) { JoinOrderNode *joinOrderNode = palloc0(sizeof(JoinOrderNode)); joinOrderNode->tableEntry = tableEntry; joinOrderNode->joinRuleType = joinRuleType; joinOrderNode->joinType = JOIN_INNER; joinOrderNode->partitionColumn = partitionColumn; joinOrderNode->partitionMethod = partitionMethod; joinOrderNode->joinClauseList = NIL; return joinOrderNode; } /* * ApplicableJoinClauses finds all join clauses that apply between the given * left table list and the right table, and returns these found join clauses. */ List * ApplicableJoinClauses(List *leftTableIdList, uint32 rightTableId, List *joinClauseList) { ListCell *joinClauseCell = NULL; List *applicableJoinClauses = NIL; /* make sure joinClauseList contains only join clauses */ joinClauseList = JoinClauseList(joinClauseList); foreach(joinClauseCell, joinClauseList) { OpExpr *joinClause = (OpExpr *) lfirst(joinClauseCell); Var *joinLeftColumn = LeftColumn(joinClause); Var *joinRightColumn = RightColumn(joinClause); uint32 joinLeftTableId = joinLeftColumn->varno; uint32 joinRightTableId = joinRightColumn->varno; bool leftListHasJoinLeft = list_member_int(leftTableIdList, joinLeftTableId); bool leftListHasJoinRight = list_member_int(leftTableIdList, joinRightTableId); if ((leftListHasJoinLeft && (rightTableId == joinRightTableId)) || (leftListHasJoinRight && (rightTableId == joinLeftTableId))) { applicableJoinClauses = lappend(applicableJoinClauses, joinClause); } } return applicableJoinClauses; } /* Returns the left column in the given join clause. */ Var * LeftColumn(OpExpr *joinClause) { List *argumentList = joinClause->args; Node *leftArgument = (Node *) linitial(argumentList); List *varList = pull_var_clause_default(leftArgument); Var *leftColumn = NULL; Assert(list_length(varList) == 1); leftColumn = (Var *) linitial(varList); return leftColumn; } /* Returns the right column in the given join clause. */ Var * RightColumn(OpExpr *joinClause) { List *argumentList = joinClause->args; Node *rightArgument = (Node *) lsecond(argumentList); List *varList = pull_var_clause_default(rightArgument); Var *rightColumn = NULL; Assert(list_length(varList) == 1); rightColumn = (Var *) linitial(varList); return rightColumn; } /* * PartitionColumn builds the partition column for the given relation, and sets * the partition column's range table references to the given table identifier. * * Note that reference tables do not have partition column. Thus, this function * returns NULL when called for reference tables. */ Var * PartitionColumn(Oid relationId, uint32 rangeTableId) { Var *partitionKey = DistPartitionKey(relationId); Var *partitionColumn = NULL; /* short circuit for reference tables */ if (partitionKey == NULL) { return partitionColumn; } partitionColumn = partitionKey; partitionColumn->varno = rangeTableId; partitionColumn->varnoold = rangeTableId; return partitionColumn; } /* * DistPartitionKey returns the partition key column for the given relation. Note * that in the context of distributed join and query planning, the callers of * this function *must* set the partition key column's range table reference * (varno) to match the table's location in the query range table list. * * Note that reference tables do not have partition column. Thus, this function * returns NULL when called for reference tables. */ Var * DistPartitionKey(Oid relationId) { DistTableCacheEntry *partitionEntry = DistributedTableCacheEntry(relationId); Node *variableNode = NULL; Var *partitionKey = NULL; /* reference tables do not have partition column */ if (partitionEntry->partitionMethod == DISTRIBUTE_BY_NONE) { return NULL; } /* now obtain partition key and build the var node */ variableNode = stringToNode(partitionEntry->partitionKeyString); partitionKey = (Var *) variableNode; Assert(IsA(variableNode, Var)); return partitionKey; } /* Returns the partition method for the given relation. */ char PartitionMethod(Oid relationId) { /* errors out if not a distributed table */ DistTableCacheEntry *partitionEntry = DistributedTableCacheEntry(relationId); char partitionMethod = partitionEntry->partitionMethod; return partitionMethod; } /* Returns the replication model for the given relation. */ char TableReplicationModel(Oid relationId) { /* errors out if not a distributed table */ DistTableCacheEntry *partitionEntry = DistributedTableCacheEntry(relationId); char replicationModel = partitionEntry->replicationModel; return replicationModel; } citus-7.0.3/src/backend/distributed/planner/multi_logical_optimizer.c000066400000000000000000003373361317107136600261250ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_logical_optimizer.c * Routines for optimizing logical plan trees based on multi-relational * algebra. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include #include "access/genam.h" #include "access/heapam.h" #include "access/htup_details.h" #include "access/nbtree.h" #include "catalog/indexing.h" #include "catalog/namespace.h" #include "catalog/pg_aggregate.h" #include "catalog/pg_am.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "commands/extension.h" #include "distributed/citus_nodes.h" #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" #include "distributed/metadata_cache.h" #include "distributed/multi_logical_optimizer.h" #include "distributed/multi_logical_planner.h" #include "distributed/multi_physical_planner.h" #include "distributed/pg_dist_partition.h" #include "distributed/worker_protocol.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "nodes/print.h" #include "optimizer/clauses.h" #include "optimizer/tlist.h" #include "optimizer/var.h" #include "parser/parse_agg.h" #include "parser/parse_coerce.h" #include "parser/parse_oper.h" #include "parser/parsetree.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #if (PG_VERSION_NUM >= 100000) #include "utils/regproc.h" #endif #include "utils/rel.h" #include "utils/syscache.h" #include "utils/tqual.h" /* Config variable managed via guc.c */ int LimitClauseRowFetchCount = -1; /* number of rows to fetch from each task */ double CountDistinctErrorRate = 0.0; /* precision of count(distinct) approximate */ typedef struct MasterAggregateWalkerContext { bool repartitionSubquery; AttrNumber columnId; } MasterAggregateWalkerContext; typedef struct WorkerAggregateWalkerContext { bool repartitionSubquery; List *expressionList; bool createGroupByClause; } WorkerAggregateWalkerContext; /* Local functions forward declarations */ static MultiSelect * AndSelectNode(MultiSelect *selectNode); static MultiSelect * OrSelectNode(MultiSelect *selectNode); static List * OrSelectClauseList(List *selectClauseList); static void PushDownNodeLoop(MultiUnaryNode *currentNode); static void PullUpCollectLoop(MultiCollect *collectNode); static void AddressProjectSpecialConditions(MultiProject *projectNode); static List * ListCopyDeep(List *nodeList); static PushDownStatus CanPushDown(MultiUnaryNode *parentNode); static PullUpStatus CanPullUp(MultiUnaryNode *childNode); static PushDownStatus Commutative(MultiUnaryNode *parentNode, MultiUnaryNode *childNode); static PushDownStatus Distributive(MultiUnaryNode *parentNode, MultiBinaryNode *childNode); static PullUpStatus Factorizable(MultiBinaryNode *parentNode, MultiUnaryNode *childNode); static List * SelectClauseTableIdList(List *selectClauseList); static MultiUnaryNode * GenerateLeftNode(MultiUnaryNode *currentNode, MultiBinaryNode *binaryNode); static MultiUnaryNode * GenerateRightNode(MultiUnaryNode *currentNode, MultiBinaryNode *binaryNode); static MultiUnaryNode * GenerateNode(MultiUnaryNode *currentNode, MultiNode *childNode); static List * TableIdListColumns(List *tableIdList, List *columnList); static List * TableIdListSelectClauses(List *tableIdList, List *selectClauseList); static void PushDownBelowUnaryChild(MultiUnaryNode *currentNode, MultiUnaryNode *childNode); static void PlaceUnaryNodeChild(MultiUnaryNode *unaryNode, MultiUnaryNode *childNode); static void PlaceBinaryNodeLeftChild(MultiBinaryNode *binaryNode, MultiUnaryNode *newLeftChildNode); static void PlaceBinaryNodeRightChild(MultiBinaryNode *binaryNode, MultiUnaryNode *newRightChildNode); static void RemoveUnaryNode(MultiUnaryNode *unaryNode); static void PullUpUnaryNode(MultiUnaryNode *unaryNode); static void ParentSetNewChild(MultiNode *parentNode, MultiNode *oldChildNode, MultiNode *newChildNode); /* Local functions forward declarations for aggregate expressions */ static void ApplyExtendedOpNodes(MultiExtendedOp *originalNode, MultiExtendedOp *masterNode, MultiExtendedOp *workerNode); static void TransformSubqueryNode(MultiTable *subqueryNode); static MultiExtendedOp * MasterExtendedOpNode(MultiExtendedOp *originalOpNode); static Node * MasterAggregateMutator(Node *originalNode, MasterAggregateWalkerContext *walkerContext); static Expr * MasterAggregateExpression(Aggref *originalAggregate, MasterAggregateWalkerContext *walkerContext); static Expr * MasterAverageExpression(Oid sumAggregateType, Oid countAggregateType, AttrNumber *columnId); static Expr * AddTypeConversion(Node *originalAggregate, Node *newExpression); static MultiExtendedOp * WorkerExtendedOpNode(MultiExtendedOp *originalOpNode); static bool WorkerAggregateWalker(Node *node, WorkerAggregateWalkerContext *walkerContext); static List * WorkerAggregateExpressionList(Aggref *originalAggregate, WorkerAggregateWalkerContext *walkerContextry); static AggregateType GetAggregateType(Oid aggFunctionId); static Oid AggregateArgumentType(Aggref *aggregate); static Oid AggregateFunctionOid(const char *functionName, Oid inputType); static Oid TypeOid(Oid schemaId, const char *typeName); /* Local functions forward declarations for count(distinct) approximations */ static char * CountDistinctHashFunctionName(Oid argumentType); static int CountDistinctStorageSize(double approximationErrorRate); static Const * MakeIntegerConst(int32 integerValue); static Const * MakeIntegerConstInt64(int64 integerValue); /* Local functions forward declarations for aggregate expression checks */ static void ErrorIfContainsUnsupportedAggregate(MultiNode *logicalPlanNode); static void ErrorIfUnsupportedArrayAggregate(Aggref *arrayAggregateExpression); static void ErrorIfUnsupportedAggregateDistinct(Aggref *aggregateExpression, MultiNode *logicalPlanNode); static Var * AggregateDistinctColumn(Aggref *aggregateExpression); static bool TablePartitioningSupportsDistinct(List *tableNodeList, MultiExtendedOp *opNode, Var *distinctColumn); static bool GroupedByColumn(List *groupClauseList, List *targetList, Var *column); /* Local functions forward declarations for limit clauses */ static Node * WorkerLimitCount(MultiExtendedOp *originalOpNode); static List * WorkerSortClauseList(MultiExtendedOp *originalOpNode); static bool CanPushDownLimitApproximate(List *sortClauseList, List *targetList); static bool HasOrderByAggregate(List *sortClauseList, List *targetList); static bool HasOrderByAverage(List *sortClauseList, List *targetList); static bool HasOrderByComplexExpression(List *sortClauseList, List *targetList); static bool HasOrderByHllType(List *sortClauseList, List *targetList); /* * MultiLogicalPlanOptimize applies multi-relational algebra optimizations on * the given logical plan tree. Specifically, the function applies four set of * optimizations in a particular order. * * First, the function splits the search node into two nodes that contain And * and Or clauses, and pushes down the node that contains And clauses. Second, * the function pushes down the project node; this node either contains columns * to return to the user, or aggregate expressions used by the aggregate node. * Third, the function pulls up the collect operators in the tree. Fourth, the * function finds the extended operator node, and splits this node into master * and worker extended operator nodes. */ void MultiLogicalPlanOptimize(MultiTreeRoot *multiLogicalPlan) { bool hasOrderByHllType = false; List *selectNodeList = NIL; List *projectNodeList = NIL; List *collectNodeList = NIL; List *extendedOpNodeList = NIL; List *tableNodeList = NIL; ListCell *collectNodeCell = NULL; ListCell *tableNodeCell = NULL; MultiProject *projectNode = NULL; MultiExtendedOp *extendedOpNode = NULL; MultiExtendedOp *masterExtendedOpNode = NULL; MultiExtendedOp *workerExtendedOpNode = NULL; MultiNode *logicalPlanNode = (MultiNode *) multiLogicalPlan; /* check that we can optimize aggregates in the plan */ ErrorIfContainsUnsupportedAggregate(logicalPlanNode); /* * If a select node exists, we use the idempower property to split the node * into two nodes that contain And and Or clauses. If both And and Or nodes * exist, we modify the tree in place to swap the original select node with * And and Or nodes. We then push down the And select node if it exists. */ selectNodeList = FindNodesOfType(logicalPlanNode, T_MultiSelect); if (selectNodeList != NIL) { MultiSelect *selectNode = (MultiSelect *) linitial(selectNodeList); MultiSelect *andSelectNode = AndSelectNode(selectNode); MultiSelect *orSelectNode = OrSelectNode(selectNode); if (andSelectNode != NULL && orSelectNode != NULL) { MultiNode *parentNode = ParentNode((MultiNode *) selectNode); MultiNode *childNode = ChildNode((MultiUnaryNode *) selectNode); Assert(UnaryOperator(parentNode)); SetChild((MultiUnaryNode *) parentNode, (MultiNode *) orSelectNode); SetChild((MultiUnaryNode *) orSelectNode, (MultiNode *) andSelectNode); SetChild((MultiUnaryNode *) andSelectNode, (MultiNode *) childNode); } else if (andSelectNode != NULL && orSelectNode == NULL) { andSelectNode = selectNode; /* no need to modify the tree */ } if (andSelectNode != NULL) { PushDownNodeLoop((MultiUnaryNode *) andSelectNode); } } /* push down the multi project node */ projectNodeList = FindNodesOfType(logicalPlanNode, T_MultiProject); projectNode = (MultiProject *) linitial(projectNodeList); PushDownNodeLoop((MultiUnaryNode *) projectNode); /* pull up collect nodes and merge duplicate collects */ collectNodeList = FindNodesOfType(logicalPlanNode, T_MultiCollect); foreach(collectNodeCell, collectNodeList) { MultiCollect *collectNode = (MultiCollect *) lfirst(collectNodeCell); PullUpCollectLoop(collectNode); } /* * We split the extended operator node into its equivalent master and worker * operator nodes; and if the extended operator has aggregates, we transform * aggregate functions accordingly for the master and worker operator nodes. * If we can push down the limit clause, we also add limit count and sort * clause list to the worker operator node. We then push the worker operator * node below the collect node. */ extendedOpNodeList = FindNodesOfType(logicalPlanNode, T_MultiExtendedOp); extendedOpNode = (MultiExtendedOp *) linitial(extendedOpNodeList); masterExtendedOpNode = MasterExtendedOpNode(extendedOpNode); workerExtendedOpNode = WorkerExtendedOpNode(extendedOpNode); ApplyExtendedOpNodes(extendedOpNode, masterExtendedOpNode, workerExtendedOpNode); tableNodeList = FindNodesOfType(logicalPlanNode, T_MultiTable); foreach(tableNodeCell, tableNodeList) { MultiTable *tableNode = (MultiTable *) lfirst(tableNodeCell); if (tableNode->relationId == SUBQUERY_RELATION_ID) { ErrorIfContainsUnsupportedAggregate((MultiNode *) tableNode); TransformSubqueryNode(tableNode); } } /* * When enabled, count(distinct) approximation uses hll as the intermediate * data type. We currently have a mismatch between hll target entry and sort * clause's sortop oid, so we can't push an order by on the hll data type to * the worker node. We check that here and error out if necessary. */ hasOrderByHllType = HasOrderByHllType(workerExtendedOpNode->sortClauseList, workerExtendedOpNode->targetList); if (hasOrderByHllType) { ereport(ERROR, (errmsg("cannot approximate count(distinct) and order by it"), errhint("You might need to disable approximations for either " "count(distinct) or limit through configuration."))); } } /* * AndSelectNode looks for AND clauses in the given select node. If they exist, * the function returns these clauses in a new node. Otherwise, the function * returns null. */ static MultiSelect * AndSelectNode(MultiSelect *selectNode) { MultiSelect *andSelectNode = NULL; List *selectClauseList = selectNode->selectClauseList; List *orSelectClauseList = OrSelectClauseList(selectClauseList); /* AND clauses are select clauses that are not OR clauses */ List *andSelectClauseList = list_difference(selectClauseList, orSelectClauseList); if (andSelectClauseList != NIL) { andSelectNode = CitusMakeNode(MultiSelect); andSelectNode->selectClauseList = andSelectClauseList; } return andSelectNode; } /* * OrSelectNode looks for OR clauses in the given select node. If they exist, * the function returns these clauses in a new node. Otherwise, the function * returns null. */ static MultiSelect * OrSelectNode(MultiSelect *selectNode) { MultiSelect *orSelectNode = NULL; List *selectClauseList = selectNode->selectClauseList; List *orSelectClauseList = OrSelectClauseList(selectClauseList); if (orSelectClauseList != NIL) { orSelectNode = CitusMakeNode(MultiSelect); orSelectNode->selectClauseList = orSelectClauseList; } return orSelectNode; } /* * OrSelectClauseList walks over the select clause list, and returns all clauses * that have OR expressions in them. */ static List * OrSelectClauseList(List *selectClauseList) { List *orSelectClauseList = NIL; ListCell *selectClauseCell = NULL; foreach(selectClauseCell, selectClauseList) { Node *selectClause = (Node *) lfirst(selectClauseCell); bool orClause = or_clause(selectClause); if (orClause) { orSelectClauseList = lappend(orSelectClauseList, selectClause); } } return orSelectClauseList; } /* * PushDownNodeLoop pushes down the current node as far down the plan tree as * possible. For this, the function first addresses any special conditions that * may apply on the current node. Then, the function pushes down the current * node if its child node is unary. If the child is binary, the function splits * the current node into two nodes by applying generation rules, and recurses * into itself to push down these two nodes. */ static void PushDownNodeLoop(MultiUnaryNode *currentNode) { MultiUnaryNode *projectNodeGenerated = NULL; MultiUnaryNode *leftNodeGenerated = NULL; MultiUnaryNode *rightNodeGenerated = NULL; PushDownStatus pushDownStatus = CanPushDown(currentNode); while (pushDownStatus == PUSH_DOWN_VALID || pushDownStatus == PUSH_DOWN_SPECIAL_CONDITIONS) { MultiNode *childNode = currentNode->childNode; bool unaryChild = UnaryOperator(childNode); bool binaryChild = BinaryOperator(childNode); /* * We first check if we can use the idempower property to split the * project node. We split at a partition node as it captures the * minimal set of columns needed from a partition job. After the split * we break from the loop and recursively call pushdown for the * generated project node. */ MultiNode *parentNode = ParentNode((MultiNode *) currentNode); CitusNodeTag currentNodeType = CitusNodeTag(currentNode); CitusNodeTag parentNodeType = CitusNodeTag(parentNode); if (currentNodeType == T_MultiProject && parentNodeType == T_MultiPartition) { projectNodeGenerated = GenerateNode(currentNode, childNode); PlaceUnaryNodeChild(currentNode, projectNodeGenerated); break; } /* address any special conditions before we can perform the pushdown */ if (pushDownStatus == PUSH_DOWN_SPECIAL_CONDITIONS) { MultiProject *projectNode = (MultiProject *) currentNode; Assert(currentNodeType == T_MultiProject); AddressProjectSpecialConditions(projectNode); } if (unaryChild) { MultiUnaryNode *unaryChildNode = (MultiUnaryNode *) childNode; PushDownBelowUnaryChild(currentNode, unaryChildNode); } else if (binaryChild) { MultiBinaryNode *binaryChildNode = (MultiBinaryNode *) childNode; leftNodeGenerated = GenerateLeftNode(currentNode, binaryChildNode); rightNodeGenerated = GenerateRightNode(currentNode, binaryChildNode); /* push down the generated nodes below the binary child node */ PlaceBinaryNodeLeftChild(binaryChildNode, leftNodeGenerated); PlaceBinaryNodeRightChild(binaryChildNode, rightNodeGenerated); /* * Remove the current node, and break out of the push down loop for * the current node. Then, recurse into the push down function for * the newly generated nodes. */ RemoveUnaryNode(currentNode); break; } pushDownStatus = CanPushDown(currentNode); } /* recursively perform pushdown of any nodes generated in the loop */ if (projectNodeGenerated != NULL) { PushDownNodeLoop(projectNodeGenerated); } if (leftNodeGenerated != NULL) { PushDownNodeLoop(leftNodeGenerated); } if (rightNodeGenerated != NULL) { PushDownNodeLoop(rightNodeGenerated); } } /* * PullUpCollectLoop pulls up the collect node as far up as possible in the plan * tree. The function also merges two collect nodes that are direct descendants * of each other by removing the given collect node from the tree. */ static void PullUpCollectLoop(MultiCollect *collectNode) { MultiNode *childNode = NULL; MultiUnaryNode *currentNode = (MultiUnaryNode *) collectNode; PullUpStatus pullUpStatus = CanPullUp(currentNode); while (pullUpStatus == PULL_UP_VALID) { PullUpUnaryNode(currentNode); pullUpStatus = CanPullUp(currentNode); } /* * After pulling up the collect node, if we find that our child node is also * a collect, we merge the two collect nodes together by removing this node. */ childNode = currentNode->childNode; if (CitusIsA(childNode, MultiCollect)) { RemoveUnaryNode(currentNode); } } /* * AddressProjectSpecialConditions adds columns to the project node if necessary * to make the node commutative and distributive with its child node. For this, * the function checks for any special conditions between the project and child * node, and determines the child node columns to add for the special conditions * to apply. The function then adds these columns to the project node. */ static void AddressProjectSpecialConditions(MultiProject *projectNode) { MultiNode *childNode = ChildNode((MultiUnaryNode *) projectNode); CitusNodeTag childNodeTag = CitusNodeTag(childNode); List *childColumnList = NIL; /* * We check if we need to include any child columns in the project node to * address the following special conditions. * * SNC1: project node must include child node's projected columns, or * SNC2: project node must include child node's partition column, or * SNC3: project node must include child node's selection columns, or * NSC1: project node must include child node's join columns. */ if (childNodeTag == T_MultiProject) { MultiProject *projectChildNode = (MultiProject *) childNode; List *projectColumnList = projectChildNode->columnList; childColumnList = ListCopyDeep(projectColumnList); } else if (childNodeTag == T_MultiPartition) { MultiPartition *partitionNode = (MultiPartition *) childNode; Var *partitionColumn = partitionNode->partitionColumn; List *partitionColumnList = list_make1(partitionColumn); childColumnList = ListCopyDeep(partitionColumnList); } else if (childNodeTag == T_MultiSelect) { MultiSelect *selectNode = (MultiSelect *) childNode; Node *selectClauseList = (Node *) selectNode->selectClauseList; List *selectList = pull_var_clause_default(selectClauseList); childColumnList = ListCopyDeep(selectList); } else if (childNodeTag == T_MultiJoin) { MultiJoin *joinNode = (MultiJoin *) childNode; Node *joinClauseList = (Node *) joinNode->joinClauseList; List *joinList = pull_var_clause_default(joinClauseList); childColumnList = ListCopyDeep(joinList); } /* * If we need to include any child columns, then find the columns that are * not already in the project column list, and add them. */ if (childColumnList != NIL) { List *projectColumnList = projectNode->columnList; List *newColumnList = list_concat_unique(projectColumnList, childColumnList); projectNode->columnList = newColumnList; } } /* Deep copies the given node list, and returns the deep copied list. */ static List * ListCopyDeep(List *nodeList) { List *nodeCopyList = NIL; ListCell *nodeCell = NULL; foreach(nodeCell, nodeList) { Node *node = (Node *) lfirst(nodeCell); Node *nodeCopy = copyObject(node); nodeCopyList = lappend(nodeCopyList, nodeCopy); } return nodeCopyList; } /* * CanPushDown determines if a particular node can be moved below its child. The * criteria for pushing down a node is determined by multi-relational algebra's * rules for commutativity and distributivity. */ static PushDownStatus CanPushDown(MultiUnaryNode *parentNode) { PushDownStatus pushDownStatus = PUSH_DOWN_INVALID_FIRST; MultiNode *childNode = parentNode->childNode; bool unaryChild = UnaryOperator(childNode); bool binaryChild = BinaryOperator(childNode); if (unaryChild) { pushDownStatus = Commutative(parentNode, (MultiUnaryNode *) childNode); } else if (binaryChild) { pushDownStatus = Distributive(parentNode, (MultiBinaryNode *) childNode); } Assert(pushDownStatus != PUSH_DOWN_INVALID_FIRST); return pushDownStatus; } /* * CanPullUp determines if a particular node can be moved above its parent. The * criteria for pulling up a node is determined by multi-relational algebra's * rules for commutativity and factorizability. */ static PullUpStatus CanPullUp(MultiUnaryNode *childNode) { PullUpStatus pullUpStatus = PULL_UP_INVALID_FIRST; MultiNode *parentNode = ParentNode((MultiNode *) childNode); bool unaryParent = UnaryOperator(parentNode); bool binaryParent = BinaryOperator(parentNode); if (unaryParent) { /* * Evaluate if parent can be pushed down below the child node, since it * is equivalent to pulling up the child above its parent. */ PushDownStatus parentPushDownStatus = PUSH_DOWN_INVALID_FIRST; parentPushDownStatus = Commutative((MultiUnaryNode *) parentNode, childNode); if (parentPushDownStatus == PUSH_DOWN_VALID) { pullUpStatus = PULL_UP_VALID; } else { pullUpStatus = PULL_UP_NOT_VALID; } } else if (binaryParent) { pullUpStatus = Factorizable((MultiBinaryNode *) parentNode, childNode); } Assert(pullUpStatus != PULL_UP_INVALID_FIRST); return pullUpStatus; } /* * Commutative returns a status which denotes whether the given parent node can * be pushed down below its child node using the commutative property. */ static PushDownStatus Commutative(MultiUnaryNode *parentNode, MultiUnaryNode *childNode) { PushDownStatus pushDownStatus = PUSH_DOWN_NOT_VALID; CitusNodeTag parentNodeTag = CitusNodeTag(parentNode); CitusNodeTag childNodeTag = CitusNodeTag(childNode); /* we cannot be commutative with non-query operators */ if (childNodeTag == T_MultiTreeRoot || childNodeTag == T_MultiTable) { return PUSH_DOWN_NOT_VALID; } /* first check for commutative operators and no special conditions */ if ((parentNodeTag == T_MultiPartition && childNodeTag == T_MultiProject) || (parentNodeTag == T_MultiPartition && childNodeTag == T_MultiPartition) || (parentNodeTag == T_MultiPartition && childNodeTag == T_MultiSelect)) { pushDownStatus = PUSH_DOWN_VALID; } if ((parentNodeTag == T_MultiCollect && childNodeTag == T_MultiProject) || (parentNodeTag == T_MultiCollect && childNodeTag == T_MultiCollect) || (parentNodeTag == T_MultiCollect && childNodeTag == T_MultiSelect)) { pushDownStatus = PUSH_DOWN_VALID; } if (parentNodeTag == T_MultiSelect) { pushDownStatus = PUSH_DOWN_VALID; } if (parentNodeTag == T_MultiProject && childNodeTag == T_MultiCollect) { pushDownStatus = PUSH_DOWN_VALID; } /* * The project node is commutative with the below operators given that * its special conditions apply. */ if ((parentNodeTag == T_MultiProject && childNodeTag == T_MultiProject) || (parentNodeTag == T_MultiProject && childNodeTag == T_MultiPartition) || (parentNodeTag == T_MultiProject && childNodeTag == T_MultiSelect) || (parentNodeTag == T_MultiProject && childNodeTag == T_MultiJoin)) { pushDownStatus = PUSH_DOWN_SPECIAL_CONDITIONS; } return pushDownStatus; } /* * Distributive returns a status which denotes whether the given parent node can * be pushed down below its binary child node using the distributive property. */ static PushDownStatus Distributive(MultiUnaryNode *parentNode, MultiBinaryNode *childNode) { PushDownStatus pushDownStatus = PUSH_DOWN_NOT_VALID; CitusNodeTag parentNodeTag = CitusNodeTag(parentNode); CitusNodeTag childNodeTag = CitusNodeTag(childNode); /* special condition checks for partition operator are not implemented */ Assert(parentNodeTag != T_MultiPartition); /* * The project node is distributive with the join operator given that its * special conditions apply. */ if (parentNodeTag == T_MultiProject) { pushDownStatus = PUSH_DOWN_SPECIAL_CONDITIONS; } /* collect node is distributive without special conditions */ if ((parentNodeTag == T_MultiCollect && childNodeTag == T_MultiJoin) || (parentNodeTag == T_MultiCollect && childNodeTag == T_MultiCartesianProduct)) { pushDownStatus = PUSH_DOWN_VALID; } /* * The select node is distributive with a binary operator if all tables in * the select clauses are output by the binary child. The select clauses are * individually AND'd; and therefore this check is sufficient to implement * the NSC3 special condition in multi-relational algebra. */ if ((parentNodeTag == T_MultiSelect && childNodeTag == T_MultiJoin) || (parentNodeTag == T_MultiSelect && childNodeTag == T_MultiCartesianProduct)) { MultiSelect *selectNode = (MultiSelect *) parentNode; List *selectClauseList = selectNode->selectClauseList; List *selectTableIdList = SelectClauseTableIdList(selectClauseList); List *childTableIdList = OutputTableIdList((MultiNode *) childNode); /* find tables that are in select clause list, but not in child list */ List *diffList = list_difference_int(selectTableIdList, childTableIdList); if (diffList == NIL) { pushDownStatus = PUSH_DOWN_VALID; } } return pushDownStatus; } /* * Factorizable returns a status which denotes whether the given unary child * node can be pulled up above its binary parent node using the factorizability * property. The function currently performs this check only for collect node * types; other node types have generation rules that are not yet implemented. */ static PullUpStatus Factorizable(MultiBinaryNode *parentNode, MultiUnaryNode *childNode) { PullUpStatus pullUpStatus = PULL_UP_NOT_VALID; CitusNodeTag parentNodeTag = CitusNodeTag(parentNode); CitusNodeTag childNodeTag = CitusNodeTag(childNode); /* * The following nodes are factorizable with their parents, but we don't * have their generation rules implemented. We therefore assert here. */ Assert(childNodeTag != T_MultiProject); Assert(childNodeTag != T_MultiPartition); Assert(childNodeTag != T_MultiSelect); if ((childNodeTag == T_MultiCollect && parentNodeTag == T_MultiJoin) || (childNodeTag == T_MultiCollect && parentNodeTag == T_MultiCartesianProduct)) { pullUpStatus = PULL_UP_VALID; } return pullUpStatus; } /* * SelectClauseTableIdList finds the (range) table identifier for each select * clause in the given list, and returns these identifiers in a new list. */ static List * SelectClauseTableIdList(List *selectClauseList) { List *tableIdList = NIL; ListCell *selectClauseCell = NULL; foreach(selectClauseCell, selectClauseList) { Node *selectClause = (Node *) lfirst(selectClauseCell); List *selectColumnList = pull_var_clause_default(selectClause); Var *selectColumn = NULL; int selectColumnTableId = 0; if (list_length(selectColumnList) == 0) { /* filter is a constant, e.g. false or 1=0 */ continue; } selectColumn = (Var *) linitial(selectColumnList); selectColumnTableId = (int) selectColumn->varno; tableIdList = lappend_int(tableIdList, selectColumnTableId); } return tableIdList; } /* * GenerateLeftNode splits the current node over the binary node by applying the * generation rule for distributivity in multi-relational algebra. After the * split, the function returns the left node. */ static MultiUnaryNode * GenerateLeftNode(MultiUnaryNode *currentNode, MultiBinaryNode *binaryNode) { MultiNode *leftChildNode = binaryNode->leftChildNode; MultiUnaryNode *leftNodeGenerated = GenerateNode(currentNode, leftChildNode); return leftNodeGenerated; } /* * GenerateRightNode splits the current node over the binary node by applying * the generation rule for distributivity in multi-relational algebra. After the * split, the function returns the right node. */ static MultiUnaryNode * GenerateRightNode(MultiUnaryNode *currentNode, MultiBinaryNode *binaryNode) { MultiNode *rightChildNode = binaryNode->rightChildNode; MultiUnaryNode *rightNodeGenerated = GenerateNode(currentNode, rightChildNode); return rightNodeGenerated; } /* * GenerateNode determines the current node's type, and applies the relevant * generation node for that node type. If the current node is a project node, * the function creates a new project node with attributes that only have the * child subtree's tables. Else if the current node is a select node, the * function creates a new select node with select clauses that only belong to * the tables output by the child node's subtree. */ static MultiUnaryNode * GenerateNode(MultiUnaryNode *currentNode, MultiNode *childNode) { MultiUnaryNode *generatedNode = NULL; CitusNodeTag currentNodeType = CitusNodeTag(currentNode); List *tableIdList = OutputTableIdList(childNode); if (currentNodeType == T_MultiProject) { MultiProject *projectNode = (MultiProject *) currentNode; List *columnList = copyObject(projectNode->columnList); List *newColumnList = TableIdListColumns(tableIdList, columnList); if (newColumnList != NIL) { MultiProject *newProjectNode = CitusMakeNode(MultiProject); newProjectNode->columnList = newColumnList; generatedNode = (MultiUnaryNode *) newProjectNode; } } else if (currentNodeType == T_MultiSelect) { MultiSelect *selectNode = (MultiSelect *) currentNode; List *selectClauseList = copyObject(selectNode->selectClauseList); List *newSelectClauseList = NIL; newSelectClauseList = TableIdListSelectClauses(tableIdList, selectClauseList); if (newSelectClauseList != NIL) { MultiSelect *newSelectNode = CitusMakeNode(MultiSelect); newSelectNode->selectClauseList = newSelectClauseList; generatedNode = (MultiUnaryNode *) newSelectNode; } } return generatedNode; } /* * TableIdListColumns walks over the given column list, finds columns belonging * to the given table id list, and returns the found columns in a new list. */ static List * TableIdListColumns(List *tableIdList, List *columnList) { List *tableColumnList = NIL; ListCell *columnCell = NULL; foreach(columnCell, columnList) { Var *column = (Var *) lfirst(columnCell); int columnTableId = (int) column->varno; bool tableListMember = list_member_int(tableIdList, columnTableId); if (tableListMember) { tableColumnList = lappend(tableColumnList, column); } } return tableColumnList; } /* * TableIdListSelectClauses walks over the given select clause list, finds the * select clauses whose column references belong to the given table list, and * returns the found clauses in a new list. */ static List * TableIdListSelectClauses(List *tableIdList, List *selectClauseList) { List *tableSelectClauseList = NIL; ListCell *selectClauseCell = NULL; foreach(selectClauseCell, selectClauseList) { Node *selectClause = (Node *) lfirst(selectClauseCell); List *selectColumnList = pull_var_clause_default(selectClause); if (list_length(selectColumnList) == 0) { /* filter is a constant, e.g. false or 1=0, always include it */ tableSelectClauseList = lappend(tableSelectClauseList, selectClause); } else { Var *selectColumn = (Var *) linitial(selectColumnList); int selectClauseTableId = (int) selectColumn->varno; bool tableIdListMember = list_member_int(tableIdList, selectClauseTableId); if (tableIdListMember) { tableSelectClauseList = lappend(tableSelectClauseList, selectClause); } } } return tableSelectClauseList; } /* Pushes down the current node below its unary child node. */ static void PushDownBelowUnaryChild(MultiUnaryNode *currentNode, MultiUnaryNode *childNode) { MultiNode *parentNode = ParentNode((MultiNode *) currentNode); MultiNode *childChildNode = ChildNode(childNode); /* current node's parent now points to the child node */ ParentSetNewChild(parentNode, (MultiNode *) currentNode, (MultiNode *) childNode); /* current node's child becomes its parent */ SetChild(childNode, (MultiNode *) currentNode); /* current node points to the child node's child */ SetChild(currentNode, childChildNode); } /* * PlaceUnaryNodeChild inserts the new node as a child node under the given * unary node. The function also places the previous child node under the new * child node. */ static void PlaceUnaryNodeChild(MultiUnaryNode *unaryNode, MultiUnaryNode *newChildNode) { MultiNode *oldChildNode = ChildNode(unaryNode); SetChild(unaryNode, (MultiNode *) newChildNode); SetChild(newChildNode, oldChildNode); } /* * PlaceBinaryNodeLeftChild inserts the new left child as the binary node's left * child. The function also places the previous left child below the new child * node. */ static void PlaceBinaryNodeLeftChild(MultiBinaryNode *binaryNode, MultiUnaryNode *newLeftChildNode) { if (newLeftChildNode == NULL) { return; } SetChild(newLeftChildNode, binaryNode->leftChildNode); SetLeftChild(binaryNode, (MultiNode *) newLeftChildNode); } /* * PlaceBinaryNodeRightChild inserts the new right child as the binary node's * right child. The function also places the previous right child below the new * child node. */ static void PlaceBinaryNodeRightChild(MultiBinaryNode *binaryNode, MultiUnaryNode *newRightChildNode) { if (newRightChildNode == NULL) { return; } SetChild(newRightChildNode, binaryNode->rightChildNode); SetRightChild(binaryNode, (MultiNode *) newRightChildNode); } /* Removes the given unary node from the logical plan, and frees the node. */ static void RemoveUnaryNode(MultiUnaryNode *unaryNode) { MultiNode *parentNode = ParentNode((MultiNode *) unaryNode); MultiNode *childNode = ChildNode(unaryNode); /* set parent to directly point to unary node's child */ ParentSetNewChild(parentNode, (MultiNode *) unaryNode, childNode); pfree(unaryNode); } /* Pulls up the given current node above its parent node. */ static void PullUpUnaryNode(MultiUnaryNode *unaryNode) { MultiNode *parentNode = ParentNode((MultiNode *) unaryNode); bool unaryParent = UnaryOperator(parentNode); bool binaryParent = BinaryOperator(parentNode); if (unaryParent) { /* pulling up a node is the same as pushing down the node's unary parent */ MultiUnaryNode *unaryParentNode = (MultiUnaryNode *) parentNode; PushDownBelowUnaryChild(unaryParentNode, unaryNode); } else if (binaryParent) { MultiBinaryNode *binaryParentNode = (MultiBinaryNode *) parentNode; MultiNode *parentParentNode = ParentNode((MultiNode *) binaryParentNode); MultiNode *childNode = unaryNode->childNode; /* make the parent node point to the unary node's child node */ if (binaryParentNode->leftChildNode == ((MultiNode *) unaryNode)) { SetLeftChild(binaryParentNode, childNode); } else { SetRightChild(binaryParentNode, childNode); } /* make the parent parent node point to the unary node */ ParentSetNewChild(parentParentNode, parentNode, (MultiNode *) unaryNode); /* make the unary node point to the (old) parent node */ SetChild(unaryNode, parentNode); } } /* * ParentSetNewChild takes in the given parent node, and replaces the parent's * old child node with the new child node. The function needs the old child node * in case the parent is a binary node and the function needs to determine which * side of the parent node the new child node needs to go to. */ static void ParentSetNewChild(MultiNode *parentNode, MultiNode *oldChildNode, MultiNode *newChildNode) { bool unaryParent = UnaryOperator(parentNode); bool binaryParent = BinaryOperator(parentNode); if (unaryParent) { MultiUnaryNode *unaryParentNode = (MultiUnaryNode *) parentNode; SetChild(unaryParentNode, newChildNode); } else if (binaryParent) { MultiBinaryNode *binaryParentNode = (MultiBinaryNode *) parentNode; /* determine which side of the parent the old child is on */ if (binaryParentNode->leftChildNode == oldChildNode) { SetLeftChild(binaryParentNode, newChildNode); } else { SetRightChild(binaryParentNode, newChildNode); } } } /* * ApplyExtendedOpNodes replaces the original extended operator node with the * master and worker extended operator nodes. The function then pushes down the * worker node below the original node's child node. Note that for the push down * to apply, the original node's child must be a collect node. */ static void ApplyExtendedOpNodes(MultiExtendedOp *originalNode, MultiExtendedOp *masterNode, MultiExtendedOp *workerNode) { MultiNode *parentNode = ParentNode((MultiNode *) originalNode); MultiNode *collectNode = ChildNode((MultiUnaryNode *) originalNode); MultiNode *collectChildNode = ChildNode((MultiUnaryNode *) collectNode); /* original node's child must be a collect node */ Assert(CitusIsA(collectNode, MultiCollect)); Assert(UnaryOperator(parentNode)); /* swap the original aggregate node with the master extended node */ SetChild((MultiUnaryNode *) parentNode, (MultiNode *) masterNode); SetChild((MultiUnaryNode *) masterNode, (MultiNode *) collectNode); /* add the worker extended node below the collect node */ SetChild((MultiUnaryNode *) collectNode, (MultiNode *) workerNode); SetChild((MultiUnaryNode *) workerNode, (MultiNode *) collectChildNode); /* clean up the original extended operator node */ pfree(originalNode); } /* * TransformSubqueryNode splits the extended operator node under subquery * multi table node into its equivalent master and worker operator nodes, and * we transform aggregate functions accordingly for the master and worker * operator nodes. We create a partition node based on the first group by * column of the extended operator node and set it as the child of the master * operator node. */ static void TransformSubqueryNode(MultiTable *subqueryNode) { MultiExtendedOp *extendedOpNode = (MultiExtendedOp *) ChildNode((MultiUnaryNode *) subqueryNode); MultiNode *collectNode = ChildNode((MultiUnaryNode *) extendedOpNode); MultiNode *collectChildNode = ChildNode((MultiUnaryNode *) collectNode); MultiExtendedOp *masterExtendedOpNode = MasterExtendedOpNode(extendedOpNode); MultiExtendedOp *workerExtendedOpNode = WorkerExtendedOpNode(extendedOpNode); MultiPartition *partitionNode = CitusMakeNode(MultiPartition); List *groupClauseList = extendedOpNode->groupClauseList; List *targetEntryList = extendedOpNode->targetList; List *groupTargetEntryList = GroupTargetEntryList(groupClauseList, targetEntryList); TargetEntry *groupByTargetEntry = (TargetEntry *) linitial(groupTargetEntryList); Expr *groupByExpression = groupByTargetEntry->expr; /* * If group by is on a function expression, then we create a new column from * function expression result type. Because later while creating partition * tasks, we expect a column type to partition intermediate results. * Note that we will only need partition type. So we set column type to * result type of the function expression, and set other fields of column to * default values. */ if (IsA(groupByExpression, Var)) { partitionNode->partitionColumn = (Var *) groupByExpression; } else if (IsA(groupByExpression, FuncExpr)) { FuncExpr *functionExpression = (FuncExpr *) groupByExpression; Index tableId = 0; AttrNumber columnAttributeNumber = InvalidAttrNumber; Oid columnType = functionExpression->funcresulttype; int32 columnTypeMod = -1; Oid columnCollationOid = InvalidOid; Index columnLevelSup = 0; Var *partitionColumn = makeVar(tableId, columnAttributeNumber, columnType, columnTypeMod, columnCollationOid, columnLevelSup); partitionNode->partitionColumn = partitionColumn; } else { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot run this subquery"), errdetail("Currently only columns and function expressions " "are allowed in group by expression of subqueries"))); } SetChild((MultiUnaryNode *) subqueryNode, (MultiNode *) masterExtendedOpNode); SetChild((MultiUnaryNode *) masterExtendedOpNode, (MultiNode *) partitionNode); SetChild((MultiUnaryNode *) partitionNode, (MultiNode *) collectNode); SetChild((MultiUnaryNode *) collectNode, (MultiNode *) workerExtendedOpNode); SetChild((MultiUnaryNode *) workerExtendedOpNode, (MultiNode *) collectChildNode); } /* * MasterExtendedOpNode creates the master extended operator node from the given * target entries. The function walks over these target entries; and for entries * with aggregates in them, this function calls the aggregate expression mutator * function. * * Note that the function logically depends on the worker extended operator node * function. If the target entry does not contain aggregate functions, we assume * all work is done on the worker side, and create a column that references the * worker nodes' results. */ static MultiExtendedOp * MasterExtendedOpNode(MultiExtendedOp *originalOpNode) { MultiExtendedOp *masterExtendedOpNode = NULL; List *targetEntryList = originalOpNode->targetList; List *newTargetEntryList = NIL; ListCell *targetEntryCell = NULL; Node *originalHavingQual = originalOpNode->havingQual; Node *newHavingQual = NULL; MultiNode *parentNode = ParentNode((MultiNode *) originalOpNode); MultiNode *childNode = ChildNode((MultiUnaryNode *) originalOpNode); MasterAggregateWalkerContext *walkerContext = palloc0( sizeof(MasterAggregateWalkerContext)); walkerContext->columnId = 1; walkerContext->repartitionSubquery = false; if (CitusIsA(parentNode, MultiTable) && CitusIsA(childNode, MultiCollect)) { walkerContext->repartitionSubquery = true; } /* iterate over original target entries */ foreach(targetEntryCell, targetEntryList) { TargetEntry *originalTargetEntry = (TargetEntry *) lfirst(targetEntryCell); TargetEntry *newTargetEntry = copyObject(originalTargetEntry); Expr *originalExpression = originalTargetEntry->expr; Expr *newExpression = NULL; bool hasAggregates = contain_agg_clause((Node *) originalExpression); if (hasAggregates) { Node *newNode = MasterAggregateMutator((Node *) originalExpression, walkerContext); newExpression = (Expr *) newNode; } else { /* * The expression does not have any aggregates. We simply make it * reference the output generated by worker nodes. */ const uint32 masterTableId = 1; /* only one table on master node */ Var *column = makeVarFromTargetEntry(masterTableId, originalTargetEntry); column->varattno = walkerContext->columnId; column->varoattno = walkerContext->columnId; walkerContext->columnId++; newExpression = (Expr *) column; } newTargetEntry->expr = newExpression; newTargetEntryList = lappend(newTargetEntryList, newTargetEntry); } if (originalHavingQual != NULL) { newHavingQual = MasterAggregateMutator(originalHavingQual, walkerContext); } masterExtendedOpNode = CitusMakeNode(MultiExtendedOp); masterExtendedOpNode->targetList = newTargetEntryList; masterExtendedOpNode->groupClauseList = originalOpNode->groupClauseList; masterExtendedOpNode->sortClauseList = originalOpNode->sortClauseList; masterExtendedOpNode->limitCount = originalOpNode->limitCount; masterExtendedOpNode->limitOffset = originalOpNode->limitOffset; masterExtendedOpNode->havingQual = newHavingQual; return masterExtendedOpNode; } /* * MasterAggregateMutator walks over the original target entry expression, and * creates the new expression tree to execute on the master node. The function * transforms aggregates, and copies columns; and recurses into the expression * mutator function for all other expression types. * * Please note that the recursive mutator function traverses the expression tree * in depth first order. For this function to set attribute numbers correctly, * WorkerAggregateWalker() *must* walk over the expression tree in the same * depth first order. */ static Node * MasterAggregateMutator(Node *originalNode, MasterAggregateWalkerContext *walkerContext) { Node *newNode = NULL; if (originalNode == NULL) { return NULL; } if (IsA(originalNode, Aggref)) { Aggref *originalAggregate = (Aggref *) originalNode; Expr *newExpression = MasterAggregateExpression(originalAggregate, walkerContext); newNode = (Node *) newExpression; } else if (IsA(originalNode, Var)) { uint32 masterTableId = 1; /* one table on the master node */ Var *newColumn = copyObject((Var *) originalNode); newColumn->varno = masterTableId; newColumn->varattno = walkerContext->columnId; walkerContext->columnId++; newNode = (Node *) newColumn; } else { newNode = expression_tree_mutator(originalNode, MasterAggregateMutator, (void *) walkerContext); } return newNode; } /* * MasterAggregateExpression creates the master aggregate expression using the * original aggregate and aggregate's type information. This function handles * the average, count, and array_agg aggregates separately due to differences * in these aggregate functions' transformations. * * Note that this function has implicit knowledge of the transformations applied * for worker nodes on the original aggregate. The function uses this implicit * knowledge to create the appropriate master function with correct data types. */ static Expr * MasterAggregateExpression(Aggref *originalAggregate, MasterAggregateWalkerContext *walkerContext) { AggregateType aggregateType = GetAggregateType(originalAggregate->aggfnoid); Expr *newMasterExpression = NULL; Expr *typeConvertedExpression = NULL; const uint32 masterTableId = 1; /* one table on the master node */ const Index columnLevelsUp = 0; /* normal column */ const AttrNumber argumentId = 1; /* our aggregates have single arguments */ AggClauseCosts aggregateCosts; if (aggregateType == AGGREGATE_COUNT && originalAggregate->aggdistinct && CountDistinctErrorRate == DISABLE_DISTINCT_APPROXIMATION && walkerContext->repartitionSubquery) { Aggref *aggregate = (Aggref *) copyObject(originalAggregate); List *varList = pull_var_clause_default((Node *) aggregate); ListCell *varCell = NULL; List *uniqueVarList = NIL; int startColumnCount = walkerContext->columnId; /* determine unique vars that were placed in target list by worker */ foreach(varCell, varList) { Var *column = (Var *) lfirst(varCell); uniqueVarList = list_append_unique(uniqueVarList, copyObject(column)); } /* * Go over each var inside aggregate and update their varattno's according to * worker query target entry column index. */ foreach(varCell, varList) { Var *columnToUpdate = (Var *) lfirst(varCell); ListCell *uniqueVarCell = NULL; int columnIndex = 0; foreach(uniqueVarCell, uniqueVarList) { Var *currentVar = (Var *) lfirst(uniqueVarCell); if (equal(columnToUpdate, currentVar)) { break; } columnIndex++; } columnToUpdate->varattno = startColumnCount + columnIndex; columnToUpdate->varoattno = startColumnCount + columnIndex; } /* we added that many columns */ walkerContext->columnId += list_length(uniqueVarList); newMasterExpression = (Expr *) aggregate; } else if (aggregateType == AGGREGATE_COUNT && originalAggregate->aggdistinct && CountDistinctErrorRate != DISABLE_DISTINCT_APPROXIMATION) { /* * If enabled, we check for count(distinct) approximations before count * distincts. For this, we first compute hll_add_agg(hll_hash(column) on * worker nodes, and get hll values. We then gather hlls on the master * node, and compute hll_cardinality(hll_union_agg(hll)). */ const int argCount = 1; const int defaultTypeMod = -1; TargetEntry *hllTargetEntry = NULL; Aggref *unionAggregate = NULL; FuncExpr *cardinalityExpression = NULL; /* extract schema name of hll */ Oid hllId = get_extension_oid(HLL_EXTENSION_NAME, false); Oid hllSchemaOid = get_extension_schema(hllId); const char *hllSchemaName = get_namespace_name(hllSchemaOid); Oid unionFunctionId = FunctionOid(hllSchemaName, HLL_UNION_AGGREGATE_NAME, argCount); Oid cardinalityFunctionId = FunctionOid(hllSchemaName, HLL_CARDINALITY_FUNC_NAME, argCount); Oid cardinalityReturnType = get_func_rettype(cardinalityFunctionId); Oid hllType = TypeOid(hllSchemaOid, HLL_TYPE_NAME); Oid hllTypeCollationId = get_typcollation(hllType); Var *hllColumn = makeVar(masterTableId, walkerContext->columnId, hllType, defaultTypeMod, hllTypeCollationId, columnLevelsUp); walkerContext->columnId++; hllTargetEntry = makeTargetEntry((Expr *) hllColumn, argumentId, NULL, false); unionAggregate = makeNode(Aggref); unionAggregate->aggfnoid = unionFunctionId; unionAggregate->aggtype = hllType; unionAggregate->args = list_make1(hllTargetEntry); unionAggregate->aggkind = AGGKIND_NORMAL; unionAggregate->aggfilter = NULL; unionAggregate->aggtranstype = InvalidOid; unionAggregate->aggargtypes = list_make1_oid(unionAggregate->aggtype); unionAggregate->aggsplit = AGGSPLIT_SIMPLE; cardinalityExpression = makeNode(FuncExpr); cardinalityExpression->funcid = cardinalityFunctionId; cardinalityExpression->funcresulttype = cardinalityReturnType; cardinalityExpression->args = list_make1(unionAggregate); newMasterExpression = (Expr *) cardinalityExpression; } else if (aggregateType == AGGREGATE_AVERAGE) { /* * If the original aggregate is an average, we first compute sum(colum) * and count(column) on worker nodes. Then, we compute (sum(sum(column)) * / sum(count(column))) on the master node. */ const char *sumAggregateName = AggregateNames[AGGREGATE_SUM]; const char *countAggregateName = AggregateNames[AGGREGATE_COUNT]; Oid argumentType = AggregateArgumentType(originalAggregate); Oid sumFunctionId = AggregateFunctionOid(sumAggregateName, argumentType); Oid countFunctionId = AggregateFunctionOid(countAggregateName, ANYOID); /* calculate the aggregate types that worker nodes are going to return */ Oid workerSumReturnType = get_func_rettype(sumFunctionId); Oid workerCountReturnType = get_func_rettype(countFunctionId); /* create the expression sum(sum(column) / sum(count(column))) */ newMasterExpression = MasterAverageExpression(workerSumReturnType, workerCountReturnType, &(walkerContext->columnId)); } else if (aggregateType == AGGREGATE_COUNT) { /* * Count aggregates are handled in two steps. First, worker nodes report * their count results. Then, the master node sums up these results. */ Var *column = NULL; TargetEntry *columnTargetEntry = NULL; CoerceViaIO *coerceExpr = NULL; Const *zeroConst = NULL; List *coalesceArgs = NULL; CoalesceExpr *coalesceExpr = NULL; /* worker aggregate and original aggregate have the same return type */ Oid workerReturnType = exprType((Node *) originalAggregate); int32 workerReturnTypeMod = exprTypmod((Node *) originalAggregate); Oid workerCollationId = exprCollation((Node *) originalAggregate); const char *sumAggregateName = AggregateNames[AGGREGATE_SUM]; Oid sumFunctionId = AggregateFunctionOid(sumAggregateName, workerReturnType); Oid masterReturnType = get_func_rettype(sumFunctionId); Aggref *newMasterAggregate = copyObject(originalAggregate); newMasterAggregate->aggstar = false; newMasterAggregate->aggdistinct = NULL; newMasterAggregate->aggfnoid = sumFunctionId; newMasterAggregate->aggtype = masterReturnType; newMasterAggregate->aggfilter = NULL; newMasterAggregate->aggtranstype = InvalidOid; newMasterAggregate->aggargtypes = list_make1_oid(newMasterAggregate->aggtype); newMasterAggregate->aggsplit = AGGSPLIT_SIMPLE; column = makeVar(masterTableId, walkerContext->columnId, workerReturnType, workerReturnTypeMod, workerCollationId, columnLevelsUp); walkerContext->columnId++; /* aggref expects its arguments to be wrapped in target entries */ columnTargetEntry = makeTargetEntry((Expr *) column, argumentId, NULL, false); newMasterAggregate->args = list_make1(columnTargetEntry); /* cast numeric sum result to bigint (count's return type) */ coerceExpr = makeNode(CoerceViaIO); coerceExpr->arg = (Expr *) newMasterAggregate; coerceExpr->resulttype = INT8OID; coerceExpr->resultcollid = InvalidOid; coerceExpr->coerceformat = COERCE_IMPLICIT_CAST; coerceExpr->location = -1; /* convert NULL to 0 in case of no rows */ zeroConst = MakeIntegerConstInt64(0); coalesceArgs = list_make2(coerceExpr, zeroConst); coalesceExpr = makeNode(CoalesceExpr); coalesceExpr->coalescetype = INT8OID; coalesceExpr->coalescecollid = InvalidOid; coalesceExpr->args = coalesceArgs; coalesceExpr->location = -1; newMasterExpression = (Expr *) coalesceExpr; } else if (aggregateType == AGGREGATE_ARRAY_AGG) { /* * Array aggregates are handled in two steps. First, we compute array_agg() * on the worker nodes. Then, we gather the arrays on the master and * compute the array_cat_agg() aggregate on them to get the final array. */ Var *column = NULL; TargetEntry *arrayCatAggArgument = NULL; Aggref *newMasterAggregate = NULL; Oid aggregateFunctionId = InvalidOid; /* worker aggregate and original aggregate have same return type */ Oid workerReturnType = exprType((Node *) originalAggregate); int32 workerReturnTypeMod = exprTypmod((Node *) originalAggregate); Oid workerCollationId = exprCollation((Node *) originalAggregate); /* assert that we do not support array_agg() with distinct or order by */ Assert(!originalAggregate->aggorder); Assert(!originalAggregate->aggdistinct); /* array_cat_agg() takes anyarray as input */ aggregateFunctionId = AggregateFunctionOid(ARRAY_CAT_AGGREGATE_NAME, ANYARRAYOID); /* create argument for the array_cat_agg() aggregate */ column = makeVar(masterTableId, walkerContext->columnId, workerReturnType, workerReturnTypeMod, workerCollationId, columnLevelsUp); arrayCatAggArgument = makeTargetEntry((Expr *) column, argumentId, NULL, false); walkerContext->columnId++; /* construct the master array_cat_agg() expression */ newMasterAggregate = copyObject(originalAggregate); newMasterAggregate->aggfnoid = aggregateFunctionId; newMasterAggregate->args = list_make1(arrayCatAggArgument); newMasterAggregate->aggfilter = NULL; newMasterAggregate->aggtranstype = InvalidOid; newMasterAggregate->aggargtypes = list_make1_oid(ANYARRAYOID); newMasterAggregate->aggsplit = AGGSPLIT_SIMPLE; newMasterExpression = (Expr *) newMasterAggregate; } else { /* * All other aggregates are handled as they are. These include sum, min, * and max. */ Var *column = NULL; TargetEntry *columnTargetEntry = NULL; /* worker aggregate and original aggregate have the same return type */ Oid workerReturnType = exprType((Node *) originalAggregate); int32 workerReturnTypeMod = exprTypmod((Node *) originalAggregate); Oid workerCollationId = exprCollation((Node *) originalAggregate); const char *aggregateName = AggregateNames[aggregateType]; Oid aggregateFunctionId = AggregateFunctionOid(aggregateName, workerReturnType); Oid masterReturnType = get_func_rettype(aggregateFunctionId); Aggref *newMasterAggregate = copyObject(originalAggregate); newMasterAggregate->aggdistinct = NULL; newMasterAggregate->aggfnoid = aggregateFunctionId; newMasterAggregate->aggtype = masterReturnType; newMasterAggregate->aggfilter = NULL; column = makeVar(masterTableId, walkerContext->columnId, workerReturnType, workerReturnTypeMod, workerCollationId, columnLevelsUp); walkerContext->columnId++; /* aggref expects its arguments to be wrapped in target entries */ columnTargetEntry = makeTargetEntry((Expr *) column, argumentId, NULL, false); newMasterAggregate->args = list_make1(columnTargetEntry); newMasterExpression = (Expr *) newMasterAggregate; } /* * Aggregate functions could have changed the return type. If so, we wrap * the new expression with a conversion function to make it have the same * type as the original aggregate. We need this since functions like sorting * and grouping have already been chosen based on the original type. */ typeConvertedExpression = AddTypeConversion((Node *) originalAggregate, (Node *) newMasterExpression); if (typeConvertedExpression != NULL) { newMasterExpression = typeConvertedExpression; } /* Run AggRefs through cost machinery to mark required fields sanely */ memset(&aggregateCosts, 0, sizeof(aggregateCosts)); get_agg_clause_costs(NULL, (Node *) newMasterExpression, AGGSPLIT_SIMPLE, &aggregateCosts); return newMasterExpression; } /* * MasterAverageExpression creates an expression of the form (sum(column1) / * sum(column2)), where column1 is the sum of the original value, and column2 is * the count of that value. This expression allows us to evaluate the average * function over distributed data. */ static Expr * MasterAverageExpression(Oid sumAggregateType, Oid countAggregateType, AttrNumber *columnId) { const char *sumAggregateName = AggregateNames[AGGREGATE_SUM]; const uint32 masterTableId = 1; const int32 defaultTypeMod = -1; const Index defaultLevelsUp = 0; const AttrNumber argumentId = 1; Oid sumTypeCollationId = get_typcollation(sumAggregateType); Oid countTypeCollationId = get_typcollation(countAggregateType); Var *firstColumn = NULL; Var *secondColumn = NULL; TargetEntry *firstTargetEntry = NULL; TargetEntry *secondTargetEntry = NULL; Aggref *firstSum = NULL; Aggref *secondSum = NULL; List *operatorNameList = NIL; Expr *opExpr = NULL; /* create the first argument for sum(column1) */ firstColumn = makeVar(masterTableId, (*columnId), sumAggregateType, defaultTypeMod, sumTypeCollationId, defaultLevelsUp); firstTargetEntry = makeTargetEntry((Expr *) firstColumn, argumentId, NULL, false); (*columnId)++; firstSum = makeNode(Aggref); firstSum->aggfnoid = AggregateFunctionOid(sumAggregateName, sumAggregateType); firstSum->aggtype = get_func_rettype(firstSum->aggfnoid); firstSum->args = list_make1(firstTargetEntry); firstSum->aggkind = AGGKIND_NORMAL; firstSum->aggtranstype = InvalidOid; firstSum->aggargtypes = list_make1_oid(firstSum->aggtype); firstSum->aggsplit = AGGSPLIT_SIMPLE; /* create the second argument for sum(column2) */ secondColumn = makeVar(masterTableId, (*columnId), countAggregateType, defaultTypeMod, countTypeCollationId, defaultLevelsUp); secondTargetEntry = makeTargetEntry((Expr *) secondColumn, argumentId, NULL, false); (*columnId)++; secondSum = makeNode(Aggref); secondSum->aggfnoid = AggregateFunctionOid(sumAggregateName, countAggregateType); secondSum->aggtype = get_func_rettype(secondSum->aggfnoid); secondSum->args = list_make1(secondTargetEntry); secondSum->aggkind = AGGKIND_NORMAL; secondSum->aggtranstype = InvalidOid; secondSum->aggargtypes = list_make1_oid(firstSum->aggtype); secondSum->aggsplit = AGGSPLIT_SIMPLE; /* * Build the division operator between these two aggregates. This function * will convert the types of the aggregates if necessary. */ operatorNameList = list_make1(makeString(DIVISION_OPER_NAME)); #if (PG_VERSION_NUM >= 100000) opExpr = make_op(NULL, operatorNameList, (Node *) firstSum, (Node *) secondSum, NULL, -1); #else opExpr = make_op(NULL, operatorNameList, (Node *) firstSum, (Node *) secondSum, -1); #endif return opExpr; } /* * AddTypeConversion checks if the given expressions generate the same types. If * they don't, the function adds a type conversion function on top of the new * expression to have it generate the same type as the original aggregate. */ static Expr * AddTypeConversion(Node *originalAggregate, Node *newExpression) { Oid newTypeId = exprType(newExpression); Oid originalTypeId = exprType(originalAggregate); int32 originalTypeMod = exprTypmod(originalAggregate); Node *typeConvertedExpression = NULL; /* nothing to do if the two types are the same */ if (originalTypeId == newTypeId) { return NULL; } /* otherwise, add a type conversion function */ typeConvertedExpression = coerce_to_target_type(NULL, newExpression, newTypeId, originalTypeId, originalTypeMod, COERCION_EXPLICIT, COERCE_EXPLICIT_CAST, -1); Assert(typeConvertedExpression != NULL); return (Expr *) typeConvertedExpression; } /* * WorkerExtendedOpNode creates the worker extended operator node from the given * target entries. The function walks over these target entries; and for entries * with aggregates in them, this function calls the recursive aggregate walker * function to create aggregates for the worker nodes. Also, the function checks * if we can push down the limit to worker nodes; and if we can, sets the limit * count and sort clause list fields in the new operator node. It provides special * treatment for count distinct operator if it is used in repartition subqueries. * Each column in count distinct aggregate is added to target list, and group by * list of worker extended operator. */ static MultiExtendedOp * WorkerExtendedOpNode(MultiExtendedOp *originalOpNode) { MultiExtendedOp *workerExtendedOpNode = NULL; MultiNode *parentNode = ParentNode((MultiNode *) originalOpNode); MultiNode *childNode = ChildNode((MultiUnaryNode *) originalOpNode); List *targetEntryList = originalOpNode->targetList; ListCell *targetEntryCell = NULL; List *newTargetEntryList = NIL; List *groupClauseList = copyObject(originalOpNode->groupClauseList); Node *havingQual = originalOpNode->havingQual; AttrNumber targetProjectionNumber = 1; WorkerAggregateWalkerContext *walkerContext = palloc0(sizeof(WorkerAggregateWalkerContext)); Index nextSortGroupRefIndex = 0; walkerContext->repartitionSubquery = false; walkerContext->expressionList = NIL; if (CitusIsA(parentNode, MultiTable) && CitusIsA(childNode, MultiCollect)) { walkerContext->repartitionSubquery = true; /* find max of sort group ref index */ foreach(targetEntryCell, targetEntryList) { TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell); if (targetEntry->ressortgroupref > nextSortGroupRefIndex) { nextSortGroupRefIndex = targetEntry->ressortgroupref; } } /* next group ref index starts from max group ref index + 1 */ nextSortGroupRefIndex++; } /* iterate over original target entries */ foreach(targetEntryCell, targetEntryList) { TargetEntry *originalTargetEntry = (TargetEntry *) lfirst(targetEntryCell); Expr *originalExpression = originalTargetEntry->expr; List *newExpressionList = NIL; ListCell *newExpressionCell = NULL; bool hasAggregates = contain_agg_clause((Node *) originalExpression); walkerContext->expressionList = NIL; walkerContext->createGroupByClause = false; if (hasAggregates) { WorkerAggregateWalker((Node *) originalExpression, walkerContext); newExpressionList = walkerContext->expressionList; } else { newExpressionList = list_make1(originalExpression); } /* now create target entries for each new expression */ foreach(newExpressionCell, newExpressionList) { Expr *newExpression = (Expr *) lfirst(newExpressionCell); TargetEntry *newTargetEntry = copyObject(originalTargetEntry); newTargetEntry->expr = newExpression; /* * Detect new targets of type Var and add it to group clause list. * This case is expected only if the target entry has aggregates and * it is inside a repartitioned subquery. We create group by entry * for each Var in target list. This code does not check if this * Var was already in the target list or in group by clauses. */ if (IsA(newExpression, Var) && walkerContext->createGroupByClause) { Var *column = (Var *) newExpression; Oid lessThanOperator = InvalidOid; Oid equalsOperator = InvalidOid; bool hashable = false; SortGroupClause *groupByClause = makeNode(SortGroupClause); get_sort_group_operators(column->vartype, true, true, true, &lessThanOperator, &equalsOperator, NULL, &hashable); groupByClause->eqop = equalsOperator; groupByClause->hashable = hashable; groupByClause->nulls_first = false; groupByClause->sortop = lessThanOperator; groupByClause->tleSortGroupRef = nextSortGroupRefIndex; groupClauseList = lappend(groupClauseList, groupByClause); newTargetEntry->ressortgroupref = nextSortGroupRefIndex; nextSortGroupRefIndex++; } if (newTargetEntry->resname == NULL) { StringInfo columnNameString = makeStringInfo(); appendStringInfo(columnNameString, WORKER_COLUMN_FORMAT, targetProjectionNumber); newTargetEntry->resname = columnNameString->data; } /* force resjunk to false as we may need this on the master */ newTargetEntry->resjunk = false; newTargetEntry->resno = targetProjectionNumber; targetProjectionNumber++; newTargetEntryList = lappend(newTargetEntryList, newTargetEntry); } } /* we also need to add having expressions to worker target list */ if (havingQual != NULL) { List *newExpressionList = NIL; ListCell *newExpressionCell = NULL; /* reset walker context */ walkerContext->expressionList = NIL; walkerContext->createGroupByClause = false; WorkerAggregateWalker(havingQual, walkerContext); newExpressionList = walkerContext->expressionList; /* now create target entries for each new expression */ foreach(newExpressionCell, newExpressionList) { TargetEntry *newTargetEntry = makeNode(TargetEntry); StringInfo columnNameString = makeStringInfo(); Expr *newExpression = (Expr *) lfirst(newExpressionCell); newTargetEntry->expr = newExpression; appendStringInfo(columnNameString, WORKER_COLUMN_FORMAT, targetProjectionNumber); newTargetEntry->resname = columnNameString->data; /* force resjunk to false as we may need this on the master */ newTargetEntry->resjunk = false; newTargetEntry->resno = targetProjectionNumber; newTargetEntryList = lappend(newTargetEntryList, newTargetEntry); targetProjectionNumber++; } } workerExtendedOpNode = CitusMakeNode(MultiExtendedOp); workerExtendedOpNode->targetList = newTargetEntryList; workerExtendedOpNode->groupClauseList = groupClauseList; /* if we can push down the limit, also set related fields */ workerExtendedOpNode->limitCount = WorkerLimitCount(originalOpNode); workerExtendedOpNode->sortClauseList = WorkerSortClauseList(originalOpNode); return workerExtendedOpNode; } /* * WorkerAggregateWalker walks over the original target entry expression, and * creates the list of expression trees (potentially more than one) to execute * on the worker nodes. The function creates new expressions for aggregates and * columns; and recurses into expression_tree_walker() for all other expression * types. */ static bool WorkerAggregateWalker(Node *node, WorkerAggregateWalkerContext *walkerContext) { bool walkerResult = false; if (node == NULL) { return false; } if (IsA(node, Aggref)) { Aggref *originalAggregate = (Aggref *) node; List *workerAggregateList = WorkerAggregateExpressionList(originalAggregate, walkerContext); walkerContext->expressionList = list_concat(walkerContext->expressionList, workerAggregateList); } else if (IsA(node, Var)) { Var *originalColumn = (Var *) node; walkerContext->expressionList = lappend(walkerContext->expressionList, originalColumn); } else { walkerResult = expression_tree_walker(node, WorkerAggregateWalker, (void *) walkerContext); } return walkerResult; } /* * WorkerAggregateExpressionList takes in the original aggregate function, and * determines the transformed aggregate functions to execute on worker nodes. * The function then returns these aggregates in a list. It also creates * group by clauses for newly added targets to be placed in the extended operator * node. */ static List * WorkerAggregateExpressionList(Aggref *originalAggregate, WorkerAggregateWalkerContext *walkerContext) { AggregateType aggregateType = GetAggregateType(originalAggregate->aggfnoid); List *workerAggregateList = NIL; AggClauseCosts aggregateCosts; if (aggregateType == AGGREGATE_COUNT && originalAggregate->aggdistinct && CountDistinctErrorRate == DISABLE_DISTINCT_APPROXIMATION && walkerContext->repartitionSubquery) { Aggref *aggregate = (Aggref *) copyObject(originalAggregate); List *columnList = pull_var_clause_default((Node *) aggregate); ListCell *columnCell = NULL; foreach(columnCell, columnList) { Var *column = (Var *) lfirst(columnCell); workerAggregateList = list_append_unique(workerAggregateList, column); } walkerContext->createGroupByClause = true; } else if (aggregateType == AGGREGATE_COUNT && originalAggregate->aggdistinct && CountDistinctErrorRate != DISABLE_DISTINCT_APPROXIMATION) { /* * If the original aggregate is a count(distinct) approximation, we want * to compute hll_add_agg(hll_hash(var), storageSize) on worker nodes. */ const AttrNumber firstArgumentId = 1; const AttrNumber secondArgumentId = 2; const int hashArgumentCount = 2; const int addArgumentCount = 2; TargetEntry *hashedColumnArgument = NULL; TargetEntry *storageSizeArgument = NULL; List *addAggregateArgumentList = NIL; Aggref *addAggregateFunction = NULL; /* init hll_hash() related variables */ Oid argumentType = AggregateArgumentType(originalAggregate); TargetEntry *argument = (TargetEntry *) linitial(originalAggregate->args); Expr *argumentExpression = copyObject(argument->expr); /* extract schema name of hll */ Oid hllId = get_extension_oid(HLL_EXTENSION_NAME, false); Oid hllSchemaOid = get_extension_schema(hllId); const char *hllSchemaName = get_namespace_name(hllSchemaOid); char *hashFunctionName = CountDistinctHashFunctionName(argumentType); Oid hashFunctionId = FunctionOid(hllSchemaName, hashFunctionName, hashArgumentCount); Oid hashFunctionReturnType = get_func_rettype(hashFunctionId); /* init hll_add_agg() related variables */ Oid addFunctionId = FunctionOid(hllSchemaName, HLL_ADD_AGGREGATE_NAME, addArgumentCount); Oid hllType = TypeOid(hllSchemaOid, HLL_TYPE_NAME); int logOfStorageSize = CountDistinctStorageSize(CountDistinctErrorRate); Const *logOfStorageSizeConst = MakeIntegerConst(logOfStorageSize); /* construct hll_hash() expression */ FuncExpr *hashFunction = makeNode(FuncExpr); hashFunction->funcid = hashFunctionId; hashFunction->funcresulttype = hashFunctionReturnType; hashFunction->args = list_make1(argumentExpression); /* construct hll_add_agg() expression */ hashedColumnArgument = makeTargetEntry((Expr *) hashFunction, firstArgumentId, NULL, false); storageSizeArgument = makeTargetEntry((Expr *) logOfStorageSizeConst, secondArgumentId, NULL, false); addAggregateArgumentList = list_make2(hashedColumnArgument, storageSizeArgument); addAggregateFunction = makeNode(Aggref); addAggregateFunction->aggfnoid = addFunctionId; addAggregateFunction->aggtype = hllType; addAggregateFunction->args = addAggregateArgumentList; addAggregateFunction->aggkind = AGGKIND_NORMAL; addAggregateFunction->aggfilter = (Expr *) copyObject( originalAggregate->aggfilter); workerAggregateList = lappend(workerAggregateList, addAggregateFunction); } else if (aggregateType == AGGREGATE_AVERAGE) { /* * If the original aggregate is an average, we want to compute sum(var) * and count(var) on worker nodes. */ Aggref *sumAggregate = copyObject(originalAggregate); Aggref *countAggregate = copyObject(originalAggregate); /* extract function names for sum and count */ const char *sumAggregateName = AggregateNames[AGGREGATE_SUM]; const char *countAggregateName = AggregateNames[AGGREGATE_COUNT]; /* * Find the type of the expression over which we execute the aggregate. * We then need to find the right sum function for that type. */ Oid argumentType = AggregateArgumentType(originalAggregate); /* find function implementing sum over the original type */ sumAggregate->aggfnoid = AggregateFunctionOid(sumAggregateName, argumentType); sumAggregate->aggtype = get_func_rettype(sumAggregate->aggfnoid); sumAggregate->aggtranstype = InvalidOid; sumAggregate->aggargtypes = list_make1_oid(argumentType); sumAggregate->aggsplit = AGGSPLIT_SIMPLE; /* count has any input type */ countAggregate->aggfnoid = AggregateFunctionOid(countAggregateName, ANYOID); countAggregate->aggtype = get_func_rettype(countAggregate->aggfnoid); countAggregate->aggtranstype = InvalidOid; countAggregate->aggargtypes = list_make1_oid(argumentType); countAggregate->aggsplit = AGGSPLIT_SIMPLE; workerAggregateList = lappend(workerAggregateList, sumAggregate); workerAggregateList = lappend(workerAggregateList, countAggregate); } else { /* * All other aggregates are sent as they are to the worker nodes. These * aggregate functions include sum, count, min, max, and array_agg. */ Aggref *workerAggregate = copyObject(originalAggregate); workerAggregateList = lappend(workerAggregateList, workerAggregate); } /* Run AggRefs through cost machinery to mark required fields sanely */ memset(&aggregateCosts, 0, sizeof(aggregateCosts)); get_agg_clause_costs(NULL, (Node *) workerAggregateList, AGGSPLIT_SIMPLE, &aggregateCosts); return workerAggregateList; } /* * GetAggregateType scans pg_catalog.pg_proc for the given aggregate oid, and * finds the aggregate's name. The function then matches the aggregate's name to * previously stored strings, and returns the appropriate aggregate type. */ static AggregateType GetAggregateType(Oid aggFunctionId) { char *aggregateProcName = NULL; uint32 aggregateCount = 0; uint32 aggregateIndex = 0; bool found = false; /* look up the function name */ aggregateProcName = get_func_name(aggFunctionId); if (aggregateProcName == NULL) { ereport(ERROR, (errmsg("cache lookup failed for function %u", aggFunctionId))); } aggregateCount = lengthof(AggregateNames); for (aggregateIndex = 0; aggregateIndex < aggregateCount; aggregateIndex++) { const char *aggregateName = AggregateNames[aggregateIndex]; if (strncmp(aggregateName, aggregateProcName, NAMEDATALEN) == 0) { found = true; break; } } if (!found) { ereport(ERROR, (errmsg("unsupported aggregate function %s", aggregateProcName))); } return aggregateIndex; } /* Extracts the type of the argument over which the aggregate is operating. */ static Oid AggregateArgumentType(Aggref *aggregate) { List *argumentList = aggregate->args; TargetEntry *argument = (TargetEntry *) linitial(argumentList); Oid returnTypeId = exprType((Node *) argument->expr); /* We currently support aggregates with only one argument; assert that. */ Assert(list_length(argumentList) == 1); return returnTypeId; } /* * AggregateFunctionOid performs a reverse lookup on aggregate function name, * and returns the corresponding aggregate function oid for the given function * name and input type. */ static Oid AggregateFunctionOid(const char *functionName, Oid inputType) { Oid functionOid = InvalidOid; Relation procRelation = NULL; SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; HeapTuple heapTuple = NULL; procRelation = heap_open(ProcedureRelationId, AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_proc_proname, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(functionName)); scanDescriptor = systable_beginscan(procRelation, ProcedureNameArgsNspIndexId, true, NULL, scanKeyCount, scanKey); /* loop until we find the right function */ heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { Form_pg_proc procForm = (Form_pg_proc) GETSTRUCT(heapTuple); int argumentCount = procForm->pronargs; if (argumentCount == 1) { /* check if input type and found value type match */ if (procForm->proargtypes.values[0] == inputType) { functionOid = HeapTupleGetOid(heapTuple); break; } } Assert(argumentCount <= 1); heapTuple = systable_getnext(scanDescriptor); } if (functionOid == InvalidOid) { ereport(ERROR, (errmsg("no matching oid for function: %s", functionName))); } systable_endscan(scanDescriptor); heap_close(procRelation, AccessShareLock); return functionOid; } /* * FunctionOid looks for a function that has the given name and the given number * of arguments, and returns the corresponding function's oid. */ Oid FunctionOid(const char *schemaName, const char *functionName, int argumentCount) { FuncCandidateList functionList = NULL; Oid functionOid = InvalidOid; char *qualifiedFunctionName = quote_qualified_identifier(schemaName, functionName); List *qualifiedFunctionNameList = stringToQualifiedNameList(qualifiedFunctionName); List *argumentList = NIL; const bool findVariadics = false; const bool findDefaults = false; const bool missingOK = true; functionList = FuncnameGetCandidates(qualifiedFunctionNameList, argumentCount, argumentList, findVariadics, findDefaults, missingOK); if (functionList == NULL) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), errmsg("function \"%s\" does not exist", functionName))); } else if (functionList->next != NULL) { ereport(ERROR, (errcode(ERRCODE_AMBIGUOUS_FUNCTION), errmsg("more than one function named \"%s\"", functionName))); } /* get function oid from function list's head */ functionOid = functionList->oid; return functionOid; } /* * TypeOid looks for a type that has the given name and schema, and returns the * corresponding type's oid. */ static Oid TypeOid(Oid schemaId, const char *typeName) { Oid typeOid; typeOid = GetSysCacheOid2(TYPENAMENSP, PointerGetDatum(typeName), ObjectIdGetDatum(schemaId)); return typeOid; } /* * CountDistinctHashFunctionName resolves the hll_hash function name to use for * the given input type, and returns this function name. */ static char * CountDistinctHashFunctionName(Oid argumentType) { char *hashFunctionName = NULL; /* resolve hash function name based on input argument type */ switch (argumentType) { case INT4OID: { hashFunctionName = pstrdup(HLL_HASH_INTEGER_FUNC_NAME); break; } case INT8OID: { hashFunctionName = pstrdup(HLL_HASH_BIGINT_FUNC_NAME); break; } case TEXTOID: case BPCHAROID: case VARCHAROID: { hashFunctionName = pstrdup(HLL_HASH_TEXT_FUNC_NAME); break; } default: { hashFunctionName = pstrdup(HLL_HASH_ANY_FUNC_NAME); break; } } return hashFunctionName; } /* * CountDistinctStorageSize takes in the desired precision for count distinct * approximations, and returns the log-base-2 of storage space needed for the * HyperLogLog algorithm. */ static int CountDistinctStorageSize(double approximationErrorRate) { double desiredStorageSize = pow((1.04 / approximationErrorRate), 2); double logOfDesiredStorageSize = log(desiredStorageSize) / log(2); /* keep log2(storage size) inside allowed range */ int logOfStorageSize = (int) rint(logOfDesiredStorageSize); if (logOfStorageSize < 4) { logOfStorageSize = 4; } else if (logOfStorageSize > 17) { logOfStorageSize = 17; } return logOfStorageSize; } /* Makes an integer constant node from the given value, and returns that node. */ static Const * MakeIntegerConst(int32 integerValue) { const int typeCollationId = get_typcollation(INT4OID); const int16 typeLength = get_typlen(INT4OID); const int32 typeModifier = -1; const bool typeIsNull = false; const bool typePassByValue = true; Datum integerDatum = Int32GetDatum(integerValue); Const *integerConst = makeConst(INT4OID, typeModifier, typeCollationId, typeLength, integerDatum, typeIsNull, typePassByValue); return integerConst; } /* Makes a 64-bit integer constant node from the given value, and returns that node. */ static Const * MakeIntegerConstInt64(int64 integerValue) { const int typeCollationId = get_typcollation(INT8OID); const int16 typeLength = get_typlen(INT8OID); const int32 typeModifier = -1; const bool typeIsNull = false; const bool typePassByValue = true; Datum integer64Datum = Int64GetDatum(integerValue); Const *integer64Const = makeConst(INT8OID, typeModifier, typeCollationId, typeLength, integer64Datum, typeIsNull, typePassByValue); return integer64Const; } /* * ErrorIfContainsUnsupportedAggregate extracts aggregate expressions from the * logical plan, walks over them and uses helper functions to check if we can * transform these aggregate expressions and push them down to worker nodes. * These helper functions error out if we cannot transform the aggregates. */ static void ErrorIfContainsUnsupportedAggregate(MultiNode *logicalPlanNode) { List *opNodeList = FindNodesOfType(logicalPlanNode, T_MultiExtendedOp); MultiExtendedOp *extendedOpNode = (MultiExtendedOp *) linitial(opNodeList); List *targetList = extendedOpNode->targetList; /* * PVC_REJECT_PLACEHOLDERS is implicit if PVC_INCLUDE_PLACEHOLDERS isn't * specified. */ List *expressionList = pull_var_clause((Node *) targetList, PVC_INCLUDE_AGGREGATES); ListCell *expressionCell = NULL; foreach(expressionCell, expressionList) { Node *expression = (Node *) lfirst(expressionCell); Aggref *aggregateExpression = NULL; AggregateType aggregateType = AGGREGATE_INVALID_FIRST; /* only consider aggregate expressions */ if (!IsA(expression, Aggref)) { continue; } /* GetAggregateType errors out on unsupported aggregate types */ aggregateExpression = (Aggref *) expression; aggregateType = GetAggregateType(aggregateExpression->aggfnoid); Assert(aggregateType != AGGREGATE_INVALID_FIRST); /* * Check that we can transform the current aggregate expression. These * functions error out on unsupported array_agg and aggregate (distinct) * clauses. */ if (aggregateType == AGGREGATE_ARRAY_AGG) { ErrorIfUnsupportedArrayAggregate(aggregateExpression); } else if (aggregateExpression->aggdistinct) { ErrorIfUnsupportedAggregateDistinct(aggregateExpression, logicalPlanNode); } } } /* * ErrorIfUnsupportedArrayAggregate checks if we can transform the array aggregate * expression and push it down to the worker node. If we cannot transform the * aggregate, this function errors. */ static void ErrorIfUnsupportedArrayAggregate(Aggref *arrayAggregateExpression) { /* if array_agg has order by, we error out */ if (arrayAggregateExpression->aggorder) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("array_agg with order by is unsupported"))); } /* if array_agg has distinct, we error out */ if (arrayAggregateExpression->aggdistinct) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("array_agg (distinct) is unsupported"))); } } /* * ErrorIfUnsupportedAggregateDistinct checks if we can transform the aggregate * (distinct expression) and push it down to the worker node. It handles count * (distinct) separately to check if we can use distinct approximations. If we * cannot transform the aggregate, this function errors. */ static void ErrorIfUnsupportedAggregateDistinct(Aggref *aggregateExpression, MultiNode *logicalPlanNode) { char *errorDetail = NULL; bool distinctSupported = true; List *repartitionNodeList = NIL; Var *distinctColumn = NULL; List *tableNodeList = NIL; List *extendedOpNodeList = NIL; MultiExtendedOp *extendedOpNode = NULL; AggregateType aggregateType = GetAggregateType(aggregateExpression->aggfnoid); /* check if logical plan includes a subquery */ List *subqueryMultiTableList = SubqueryMultiTableList(logicalPlanNode); if (subqueryMultiTableList != NIL) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot push down this subquery"), errdetail("distinct in the outermost query is unsupported"))); } /* * We partially support count(distinct) in subqueries, other distinct aggregates in * subqueries are not supported yet. */ if (aggregateType == AGGREGATE_COUNT) { Node *aggregateArgument = (Node *) linitial(aggregateExpression->args); List *columnList = pull_var_clause_default(aggregateArgument); ListCell *columnCell = NULL; foreach(columnCell, columnList) { Var *column = (Var *) lfirst(columnCell); if (column->varattno <= 0) { ereport(ERROR, (errmsg("cannot compute count (distinct)"), errdetail("Non-column references are not supported " "yet"))); } } } else { List *multiTableNodeList = FindNodesOfType(logicalPlanNode, T_MultiTable); ListCell *multiTableNodeCell = NULL; foreach(multiTableNodeCell, multiTableNodeList) { MultiTable *multiTable = (MultiTable *) lfirst(multiTableNodeCell); if (multiTable->relationId == SUBQUERY_RELATION_ID) { ereport(ERROR, (errmsg("cannot compute aggregate (distinct)"), errdetail("Only count(distinct) aggregate is " "supported in subqueries"))); } } } /* if we have a count(distinct), and distinct approximation is enabled */ if (aggregateType == AGGREGATE_COUNT && CountDistinctErrorRate != DISABLE_DISTINCT_APPROXIMATION) { bool missingOK = true; Oid distinctExtensionId = get_extension_oid(HLL_EXTENSION_NAME, missingOK); /* if extension for distinct approximation is loaded, we are good */ if (distinctExtensionId != InvalidOid) { return; } else { ereport(ERROR, (errmsg("cannot compute count (distinct) approximation"), errhint("You need to have the hll extension loaded."))); } } if (aggregateType == AGGREGATE_COUNT) { List *aggregateVarList = pull_var_clause_default((Node *) aggregateExpression); if (aggregateVarList == NIL) { distinctSupported = false; errorDetail = "aggregate (distinct) with no columns is unsupported"; } } repartitionNodeList = FindNodesOfType(logicalPlanNode, T_MultiPartition); if (repartitionNodeList != NIL) { distinctSupported = false; errorDetail = "aggregate (distinct) with table repartitioning is unsupported"; } tableNodeList = FindNodesOfType(logicalPlanNode, T_MultiTable); extendedOpNodeList = FindNodesOfType(logicalPlanNode, T_MultiExtendedOp); extendedOpNode = (MultiExtendedOp *) linitial(extendedOpNodeList); distinctColumn = AggregateDistinctColumn(aggregateExpression); if (distinctSupported && distinctColumn == NULL) { /* * If the query has a single table, and table is grouped by partition column, * then we support count distincts even distinct column can not be identified. */ distinctSupported = TablePartitioningSupportsDistinct(tableNodeList, extendedOpNode, distinctColumn); if (!distinctSupported) { errorDetail = "aggregate (distinct) on complex expressions is unsupported"; } } else if (distinctSupported) { bool supports = TablePartitioningSupportsDistinct(tableNodeList, extendedOpNode, distinctColumn); if (!supports) { distinctSupported = false; errorDetail = "table partitioning is unsuitable for aggregate (distinct)"; } } /* if current aggregate expression isn't supported, error out */ if (!distinctSupported) { if (aggregateType == AGGREGATE_COUNT) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot compute aggregate (distinct)"), errdetail("%s", errorDetail), errhint("You can load the hll extension from contrib " "packages and enable distinct approximations."))); } else { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot compute aggregate (distinct)"), errdetail("%s", errorDetail))); } } } /* * AggregateDistinctColumn checks if the given aggregate expression's distinct * clause is on a single column. If it is, the function finds and returns that * column. Otherwise, the function returns null. */ static Var * AggregateDistinctColumn(Aggref *aggregateExpression) { Var *aggregateColumn = NULL; int aggregateArgumentCount = 0; TargetEntry *aggregateTargetEntry = NULL; /* only consider aggregates with distincts */ if (!aggregateExpression->aggdistinct) { return NULL; } aggregateArgumentCount = list_length(aggregateExpression->args); if (aggregateArgumentCount != 1) { return NULL; } aggregateTargetEntry = (TargetEntry *) linitial(aggregateExpression->args); if (!IsA(aggregateTargetEntry->expr, Var)) { return NULL; } aggregateColumn = (Var *) aggregateTargetEntry->expr; return aggregateColumn; } /* * TablePartitioningSupportsDistinct walks over all tables in the given list and * checks that each table's partitioning method is suitable for pushing down an * aggregate (distinct) expression to worker nodes. For this, the function needs * to check that task results do not overlap with one another on the distinct * column. */ static bool TablePartitioningSupportsDistinct(List *tableNodeList, MultiExtendedOp *opNode, Var *distinctColumn) { bool distinctSupported = true; ListCell *tableNodeCell = NULL; foreach(tableNodeCell, tableNodeList) { MultiTable *tableNode = (MultiTable *) lfirst(tableNodeCell); Oid relationId = tableNode->relationId; bool tableDistinctSupported = false; char partitionMethod = 0; List *shardList = NIL; if (relationId == SUBQUERY_RELATION_ID) { return true; } /* if table has one shard, task results don't overlap */ shardList = LoadShardList(relationId); if (list_length(shardList) == 1) { continue; } /* * We need to check that task results don't overlap. We can only do this * if table is range partitioned. */ partitionMethod = PartitionMethod(relationId); if (partitionMethod == DISTRIBUTE_BY_RANGE || partitionMethod == DISTRIBUTE_BY_HASH) { Var *tablePartitionColumn = tableNode->partitionColumn; bool groupedByPartitionColumn = false; /* if distinct is on table partition column, we can push it down */ if (distinctColumn != NULL && tablePartitionColumn->varno == distinctColumn->varno && tablePartitionColumn->varattno == distinctColumn->varattno) { tableDistinctSupported = true; } /* if results are grouped by partition column, we can push down */ groupedByPartitionColumn = GroupedByColumn(opNode->groupClauseList, opNode->targetList, tablePartitionColumn); if (groupedByPartitionColumn) { tableDistinctSupported = true; } } if (!tableDistinctSupported) { distinctSupported = false; break; } } return distinctSupported; } /* * GroupedByColumn walks over group clauses in the given list, and checks if any * of the group clauses is on the given column. */ static bool GroupedByColumn(List *groupClauseList, List *targetList, Var *column) { bool groupedByColumn = false; ListCell *groupClauseCell = NULL; foreach(groupClauseCell, groupClauseList) { SortGroupClause *groupClause = (SortGroupClause *) lfirst(groupClauseCell); TargetEntry *groupTargetEntry = get_sortgroupclause_tle(groupClause, targetList); Expr *groupExpression = (Expr *) groupTargetEntry->expr; if (IsA(groupExpression, Var)) { Var *groupColumn = (Var *) groupExpression; if (groupColumn->varno == column->varno && groupColumn->varattno == column->varattno) { groupedByColumn = true; break; } } } return groupedByColumn; } /* * SubqueryMultiTableList extracts multi tables in the given logical plan tree * and returns subquery multi tables in a new list. */ List * SubqueryMultiTableList(MultiNode *multiNode) { List *subqueryMultiTableList = NIL; List *multiTableNodeList = FindNodesOfType(multiNode, T_MultiTable); ListCell *multiTableNodeCell = NULL; foreach(multiTableNodeCell, multiTableNodeList) { MultiTable *multiTable = (MultiTable *) lfirst(multiTableNodeCell); Query *subquery = multiTable->subquery; if (subquery != NULL) { subqueryMultiTableList = lappend(subqueryMultiTableList, multiTable); } } return subqueryMultiTableList; } /* * GroupTargetEntryList walks over group clauses in the given list, finds * matching target entries and return them in a new list. */ List * GroupTargetEntryList(List *groupClauseList, List *targetEntryList) { List *groupTargetEntryList = NIL; ListCell *groupClauseCell = NULL; foreach(groupClauseCell, groupClauseList) { SortGroupClause *groupClause = (SortGroupClause *) lfirst(groupClauseCell); TargetEntry *groupTargetEntry = get_sortgroupclause_tle(groupClause, targetEntryList); groupTargetEntryList = lappend(groupTargetEntryList, groupTargetEntry); } return groupTargetEntryList; } /* * IsPartitionColumn returns true if the given column is a partition column. * The function uses FindReferencedTableColumn to find the original relation * id and column that the column expression refers to. It then checks whether * that column is a partition column of the relation. * * Also, the function returns always false for reference tables given that * reference tables do not have partition column. The function does not * support queries with CTEs, it would return false if columnExpression * refers to a column returned by a CTE. */ bool IsPartitionColumn(Expr *columnExpression, Query *query) { bool isPartitionColumn = false; Oid relationId = InvalidOid; Var *column = NULL; FindReferencedTableColumn(columnExpression, NIL, query, &relationId, &column); if (relationId != InvalidOid && column != NULL) { Var *partitionColumn = DistPartitionKey(relationId); /* not all distributed tables have partition column */ if (partitionColumn != NULL && column->varattno == partitionColumn->varattno) { isPartitionColumn = true; } } return isPartitionColumn; } /* * FindReferencedTableColumn recursively traverses query tree to find actual relation * id, and column that columnExpression refers to. If columnExpression is a * non-relational or computed/derived expression, the function returns InvolidOid for * relationId and NULL for column. The caller should provide parent query list from * top of the tree to this particular Query's parent. This argument is used to look * into CTEs that may be present in the query. */ void FindReferencedTableColumn(Expr *columnExpression, List *parentQueryList, Query *query, Oid *relationId, Var **column) { Var *candidateColumn = NULL; List *rangetableList = query->rtable; Index rangeTableEntryIndex = 0; RangeTblEntry *rangeTableEntry = NULL; Expr *strippedColumnExpression = (Expr *) strip_implicit_coercions( (Node *) columnExpression); *relationId = InvalidOid; *column = NULL; if (IsA(strippedColumnExpression, Var)) { candidateColumn = (Var *) strippedColumnExpression; } else if (IsA(strippedColumnExpression, FieldSelect)) { FieldSelect *compositeField = (FieldSelect *) strippedColumnExpression; Expr *fieldExpression = compositeField->arg; if (IsA(fieldExpression, Var)) { candidateColumn = (Var *) fieldExpression; } } if (candidateColumn == NULL) { return; } rangeTableEntryIndex = candidateColumn->varno - 1; rangeTableEntry = list_nth(rangetableList, rangeTableEntryIndex); if (rangeTableEntry->rtekind == RTE_RELATION) { *relationId = rangeTableEntry->relid; *column = candidateColumn; } else if (rangeTableEntry->rtekind == RTE_SUBQUERY) { Query *subquery = rangeTableEntry->subquery; List *targetEntryList = subquery->targetList; AttrNumber targetEntryIndex = candidateColumn->varattno - 1; TargetEntry *subqueryTargetEntry = list_nth(targetEntryList, targetEntryIndex); Expr *subColumnExpression = subqueryTargetEntry->expr; /* append current query to parent query list */ parentQueryList = lappend(parentQueryList, query); FindReferencedTableColumn(subColumnExpression, parentQueryList, subquery, relationId, column); } else if (rangeTableEntry->rtekind == RTE_JOIN) { List *joinColumnList = rangeTableEntry->joinaliasvars; AttrNumber joinColumnIndex = candidateColumn->varattno - 1; Expr *joinColumn = list_nth(joinColumnList, joinColumnIndex); /* parent query list stays the same since still in the same query boundary */ FindReferencedTableColumn(joinColumn, parentQueryList, query, relationId, column); } else if (rangeTableEntry->rtekind == RTE_CTE) { int cteParentListIndex = list_length(parentQueryList) - rangeTableEntry->ctelevelsup - 1; Query *cteParentQuery = NULL; List *cteList = NIL; ListCell *cteListCell = NULL; CommonTableExpr *cte = NULL; /* * This should have been an error case, not marking it as error at the * moment due to usage from IsPartitionColumn. Callers of that function * do not have access to parent query list. */ if (cteParentListIndex >= 0) { cteParentQuery = list_nth(parentQueryList, cteParentListIndex); cteList = cteParentQuery->cteList; } foreach(cteListCell, cteList) { CommonTableExpr *candidateCte = (CommonTableExpr *) lfirst(cteListCell); if (strcmp(candidateCte->ctename, rangeTableEntry->ctename) == 0) { cte = candidateCte; break; } } if (cte != NULL) { Query *cteQuery = (Query *) cte->ctequery; List *targetEntryList = cteQuery->targetList; AttrNumber targetEntryIndex = candidateColumn->varattno - 1; TargetEntry *targetEntry = list_nth(targetEntryList, targetEntryIndex); parentQueryList = lappend(parentQueryList, query); FindReferencedTableColumn(targetEntry->expr, parentQueryList, cteQuery, relationId, column); } } } /* * ExtractQueryWalker walks over a query, and finds all queries in the query * tree and returns these queries. Note that the function also recurses into * the subqueries in WHERE clause. */ bool ExtractQueryWalker(Node *node, List **queryList) { if (node == NULL) { return false; } if (IsA(node, Query)) { Query *query = (Query *) node; (*queryList) = lappend(*queryList, query); return query_tree_walker(query, ExtractQueryWalker, queryList, 0); } return expression_tree_walker(node, ExtractQueryWalker, queryList); } /* * LeafQuery checks if the given query is a leaf query. Leaf queries have only * simple relations in the join tree. */ bool LeafQuery(Query *queryTree) { List *rangeTableList = queryTree->rtable; List *joinTreeTableIndexList = NIL; ListCell *joinTreeTableIndexCell = NULL; bool leafQuery = true; /* * Extract all range table indexes from the join tree. Note that sub-queries * that get pulled up by PostgreSQL don't appear in this join tree. */ ExtractRangeTableIndexWalker((Node *) queryTree->jointree, &joinTreeTableIndexList); foreach(joinTreeTableIndexCell, joinTreeTableIndexList) { /* * Join tree's range table index starts from 1 in the query tree. But, * list indexes start from 0. */ int joinTreeTableIndex = lfirst_int(joinTreeTableIndexCell); int rangeTableListIndex = joinTreeTableIndex - 1; RangeTblEntry *rangeTableEntry = (RangeTblEntry *) list_nth(rangeTableList, rangeTableListIndex); /* * Check if the range table in the join tree is a simple relation. */ if (rangeTableEntry->rtekind != RTE_RELATION) { leafQuery = false; } } return leafQuery; } /* * PartitionColumnOpExpressionList returns operator expressions which are on * partition column in the query. This function walks over where clause list, * finds operator expressions on partition column and returns them in a new list. */ List * PartitionColumnOpExpressionList(Query *query) { List *whereClauseList = WhereClauseList(query->jointree); List *partitionColumnOpExpressionList = NIL; ListCell *whereClauseCell = NULL; foreach(whereClauseCell, whereClauseList) { Node *whereNode = (Node *) lfirst(whereClauseCell); Node *leftArgument = NULL; Node *rightArgument = NULL; Node *strippedLeftArgument = NULL; Node *strippedRightArgument = NULL; OpExpr *whereClause = NULL; List *argumentList = NIL; List *rangetableList = NIL; uint32 argumentCount = 0; Var *candidatePartitionColumn = NULL; Var *partitionColumn = NULL; Index rangeTableEntryIndex = 0; RangeTblEntry *rangeTableEntry = NULL; Oid relationId = InvalidOid; if (!IsA(whereNode, OpExpr)) { continue; } whereClause = (OpExpr *) whereNode; argumentList = whereClause->args; /* * Select clauses must have two arguments. Note that logic here use to * find select clauses is very similar to IsSelectClause(). But we are * not able to reuse it, because it calls pull_var_clause_default() * which in return deep down calls pull_var_clause_walker(), and this * function errors out for variable level other than 0 which is the case * for lateral joins. */ argumentCount = list_length(argumentList); if (argumentCount != 2) { continue; } leftArgument = (Node *) linitial(argumentList); rightArgument = (Node *) lsecond(argumentList); strippedLeftArgument = strip_implicit_coercions(leftArgument); strippedRightArgument = strip_implicit_coercions(rightArgument); if (IsA(strippedLeftArgument, Var) && IsA(strippedRightArgument, Const)) { candidatePartitionColumn = (Var *) strippedLeftArgument; } else if (IsA(strippedLeftArgument, Const) && IsA(strippedRightArgument, Var)) { candidatePartitionColumn = (Var *) strippedRightArgument; } else { continue; } rangetableList = query->rtable; rangeTableEntryIndex = candidatePartitionColumn->varno - 1; rangeTableEntry = list_nth(rangetableList, rangeTableEntryIndex); Assert(rangeTableEntry->rtekind == RTE_RELATION); relationId = rangeTableEntry->relid; partitionColumn = DistPartitionKey(relationId); if (partitionColumn != NULL && candidatePartitionColumn->varattno == partitionColumn->varattno) { partitionColumnOpExpressionList = lappend(partitionColumnOpExpressionList, whereClause); } } return partitionColumnOpExpressionList; } /* * ReplaceColumnsInOpExpressionList walks over the given operator expression * list and copies every one them, replaces columns with the given new column * and finally returns new copies in a new list of operator expressions. */ List * ReplaceColumnsInOpExpressionList(List *opExpressionList, Var *newColumn) { List *newOpExpressionList = NIL; ListCell *opExpressionCell = NULL; foreach(opExpressionCell, opExpressionList) { OpExpr *opExpression = (OpExpr *) lfirst(opExpressionCell); OpExpr *copyOpExpression = (OpExpr *) copyObject(opExpression); List *argumentList = copyOpExpression->args; List *newArgumentList = NIL; Node *leftArgument = (Node *) linitial(argumentList); Node *rightArgument = (Node *) lsecond(argumentList); Node *strippedLeftArgument = strip_implicit_coercions(leftArgument); Node *strippedRightArgument = strip_implicit_coercions(rightArgument); if (IsA(strippedLeftArgument, Var)) { newArgumentList = list_make2(newColumn, strippedRightArgument); } else if (IsA(strippedRightArgument, Var)) { newArgumentList = list_make2(strippedLeftArgument, newColumn); } copyOpExpression->args = newArgumentList; newOpExpressionList = lappend(newOpExpressionList, copyOpExpression); } return newOpExpressionList; } /* * WorkerLimitCount checks if the given extended node contains a limit node, and * if that node can be pushed down. For this, the function checks if this limit * count or a meaningful approximation of it can be pushed down to worker nodes. * If they can, the function returns the limit count. * * The limit push-down decision tree is as follows: * group by? * 1/ \0 * order by? (exact pd) * 1/ \0 * has order by agg? (no pd) * 1/ \0 * can approximate? (exact pd) * 1/ \0 * (approx pd) (no pd) * * When an offset is present, the offset value is added to limit because for a query * with LIMIT x OFFSET y, (x+y) records should be pulled from the workers. * * If no limit is present or can be pushed down, then WorkerLimitCount * returns null. */ static Node * WorkerLimitCount(MultiExtendedOp *originalOpNode) { Node *workerLimitNode = NULL; List *groupClauseList = originalOpNode->groupClauseList; List *sortClauseList = originalOpNode->sortClauseList; List *targetList = originalOpNode->targetList; bool hasOrderByAggregate = HasOrderByAggregate(sortClauseList, targetList); bool canPushDownLimit = false; bool canApproximate = false; /* no limit node to push down */ if (originalOpNode->limitCount == NULL) { return NULL; } /* * During subquery pushdown planning original query is used. In that case, * certain expressions such as parameters are not evaluated and converted * into Consts on the op node. */ Assert(IsA(originalOpNode->limitCount, Const)); Assert(originalOpNode->limitOffset == NULL || IsA(originalOpNode->limitOffset, Const)); /* * If we don't have group by clauses, or if we have order by clauses without * aggregates, we can push down the original limit. Else if we have order by * clauses with commutative aggregates, we can push down approximate limits. */ if (groupClauseList == NIL) { canPushDownLimit = true; } else if (sortClauseList == NIL) { canPushDownLimit = false; } else if (!hasOrderByAggregate) { canPushDownLimit = true; } else { canApproximate = CanPushDownLimitApproximate(sortClauseList, targetList); } /* create the workerLimitNode according to the decisions above */ if (canPushDownLimit) { workerLimitNode = (Node *) copyObject(originalOpNode->limitCount); } else if (canApproximate) { Const *workerLimitConst = (Const *) copyObject(originalOpNode->limitCount); int64 workerLimitCount = (int64) LimitClauseRowFetchCount; workerLimitConst->constvalue = Int64GetDatum(workerLimitCount); workerLimitNode = (Node *) workerLimitConst; } /* * If offset clause is present and limit can be pushed down (whether exactly or * approximately), add the offset value to limit on workers */ if (workerLimitNode != NULL && originalOpNode->limitOffset != NULL) { Const *workerLimitConst = (Const *) workerLimitNode; Const *workerOffsetConst = (Const *) originalOpNode->limitOffset; int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue); int64 workerOffsetCount = DatumGetInt64(workerOffsetConst->constvalue); workerLimitCount = workerLimitCount + workerOffsetCount; workerLimitNode = (Node *) MakeIntegerConstInt64(workerLimitCount); } /* display debug message on limit push down */ if (workerLimitNode != NULL) { Const *workerLimitConst = (Const *) workerLimitNode; int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue); ereport(DEBUG1, (errmsg("push down of limit count: " INT64_FORMAT, workerLimitCount))); } return workerLimitNode; } /* * WorkerSortClauseList first checks if the given extended node contains a limit * that can be pushed down. If it does, the function then checks if we need to * add any sorting and grouping clauses to the sort list we push down for the * limit. If we do, the function adds these clauses and returns them. Otherwise, * the function returns null. */ static List * WorkerSortClauseList(MultiExtendedOp *originalOpNode) { List *workerSortClauseList = NIL; List *groupClauseList = originalOpNode->groupClauseList; List *sortClauseList = originalOpNode->sortClauseList; List *targetList = originalOpNode->targetList; /* if no limit node, no need to push down sort clauses */ if (originalOpNode->limitCount == NULL) { return NIL; } /* * If we are pushing down the limit, push down any order by clauses. Also if * we are pushing down the limit because the order by clauses don't have any * aggregates, add group by clauses to the order by list. We do this because * rows that belong to the same grouping may appear in different "offsets" * in different task results. By ordering on the group by clause, we ensure * that query results are consistent. */ if (groupClauseList == NIL) { workerSortClauseList = originalOpNode->sortClauseList; } else if (sortClauseList != NIL) { bool orderByNonAggregates = !(HasOrderByAggregate(sortClauseList, targetList)); bool canApproximate = CanPushDownLimitApproximate(sortClauseList, targetList); if (orderByNonAggregates) { workerSortClauseList = list_copy(sortClauseList); workerSortClauseList = list_concat(workerSortClauseList, groupClauseList); } else if (canApproximate) { workerSortClauseList = originalOpNode->sortClauseList; } } return workerSortClauseList; } /* * CanPushDownLimitApproximate checks if we can push down the limit clause to * the worker nodes, and get approximate and meaningful results. We can do this * only when: (1) the user has enabled the limit approximation and (2) the query * has order by clauses that are commutative. */ static bool CanPushDownLimitApproximate(List *sortClauseList, List *targetList) { bool canApproximate = false; /* user hasn't enabled the limit approximation */ if (LimitClauseRowFetchCount == DISABLE_LIMIT_APPROXIMATION) { return false; } if (sortClauseList != NIL) { bool orderByAverage = HasOrderByAverage(sortClauseList, targetList); bool orderByComplex = HasOrderByComplexExpression(sortClauseList, targetList); /* * If we don't have any order by average or any complex expressions with * aggregates in them, we can meaningfully approximate. */ if (!orderByAverage && !orderByComplex) { canApproximate = true; } } return canApproximate; } /* * HasOrderByAggregate walks over the given order by clauses, and checks if we * have an order by an aggregate function. If we do, the function returns true. */ static bool HasOrderByAggregate(List *sortClauseList, List *targetList) { bool hasOrderByAggregate = false; ListCell *sortClauseCell = NULL; foreach(sortClauseCell, sortClauseList) { SortGroupClause *sortClause = (SortGroupClause *) lfirst(sortClauseCell); Node *sortExpression = get_sortgroupclause_expr(sortClause, targetList); bool containsAggregate = contain_agg_clause(sortExpression); if (containsAggregate) { hasOrderByAggregate = true; break; } } return hasOrderByAggregate; } /* * HasOrderByAverage walks over the given order by clauses, and checks if we * have an order by an average. If we do, the function returns true. */ static bool HasOrderByAverage(List *sortClauseList, List *targetList) { bool hasOrderByAverage = false; ListCell *sortClauseCell = NULL; foreach(sortClauseCell, sortClauseList) { SortGroupClause *sortClause = (SortGroupClause *) lfirst(sortClauseCell); Node *sortExpression = get_sortgroupclause_expr(sortClause, targetList); /* if sort expression is an aggregate, check its type */ if (IsA(sortExpression, Aggref)) { Aggref *aggregate = (Aggref *) sortExpression; AggregateType aggregateType = GetAggregateType(aggregate->aggfnoid); if (aggregateType == AGGREGATE_AVERAGE) { hasOrderByAverage = true; break; } } } return hasOrderByAverage; } /* * HasOrderByComplexExpression walks over the given order by clauses, and checks * if we have a nested expression that contains an aggregate function within it. * If we do, the function returns true. */ static bool HasOrderByComplexExpression(List *sortClauseList, List *targetList) { bool hasOrderByComplexExpression = false; ListCell *sortClauseCell = NULL; foreach(sortClauseCell, sortClauseList) { SortGroupClause *sortClause = (SortGroupClause *) lfirst(sortClauseCell); Node *sortExpression = get_sortgroupclause_expr(sortClause, targetList); bool nestedAggregate = false; /* simple aggregate functions are ok */ if (IsA(sortExpression, Aggref)) { continue; } nestedAggregate = contain_agg_clause(sortExpression); if (nestedAggregate) { hasOrderByComplexExpression = true; break; } } return hasOrderByComplexExpression; } /* * HasOrderByHllType walks over the given order by clauses, and checks if any of * those clauses operate on hll data type. If they do, the function returns true. */ static bool HasOrderByHllType(List *sortClauseList, List *targetList) { bool hasOrderByHllType = false; Oid hllId = InvalidOid; Oid hllSchemaOid = InvalidOid; Oid hllTypeId = InvalidOid; ListCell *sortClauseCell = NULL; /* check whether HLL is loaded */ hllId = get_extension_oid(HLL_EXTENSION_NAME, true); if (!OidIsValid(hllId)) { return hasOrderByHllType; } hllSchemaOid = get_extension_schema(hllId); hllTypeId = TypeOid(hllSchemaOid, HLL_TYPE_NAME); foreach(sortClauseCell, sortClauseList) { SortGroupClause *sortClause = (SortGroupClause *) lfirst(sortClauseCell); Node *sortExpression = get_sortgroupclause_expr(sortClause, targetList); Oid sortColumnTypeId = exprType(sortExpression); if (sortColumnTypeId == hllTypeId) { hasOrderByHllType = true; break; } } return hasOrderByHllType; } citus-7.0.3/src/backend/distributed/planner/multi_logical_planner.c000066400000000000000000003234401317107136600255310ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_logical_planner.c * * Routines for constructing a logical plan tree from the given Query tree * structure. This new logical plan is based on multi-relational algebra rules. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "access/heapam.h" #include "access/nbtree.h" #include "catalog/pg_am.h" #include "catalog/pg_class.h" #include "commands/defrem.h" #include "distributed/citus_clauses.h" #include "distributed/colocation_utils.h" #include "distributed/metadata_cache.h" #include "distributed/insert_select_planner.h" #include "distributed/multi_logical_optimizer.h" #include "distributed/multi_logical_planner.h" #include "distributed/multi_physical_planner.h" #include "distributed/relation_restriction_equivalence.h" #include "distributed/multi_router_planner.h" #include "distributed/worker_protocol.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "optimizer/clauses.h" #include "optimizer/prep.h" #include "optimizer/tlist.h" #include "optimizer/var.h" #include "parser/parsetree.h" #include "utils/datum.h" #include "utils/lsyscache.h" #include "utils/syscache.h" #include "utils/rel.h" #include "utils/relcache.h" /* Config variable managed via guc.c */ bool SubqueryPushdown = false; /* is subquery pushdown enabled */ /* Struct to differentiate different qualifier types in an expression tree walker */ typedef struct QualifierWalkerContext { List *baseQualifierList; List *outerJoinQualifierList; } QualifierWalkerContext; /* Function pointer type definition for apply join rule functions */ typedef MultiNode *(*RuleApplyFunction) (MultiNode *leftNode, MultiNode *rightNode, Var *partitionColumn, JoinType joinType, List *joinClauses); static RuleApplyFunction RuleApplyFunctionArray[JOIN_RULE_LAST] = { 0 }; /* join rules */ /* Local functions forward declarations */ static bool SingleRelationRepartitionSubquery(Query *queryTree); static DeferredErrorMessage * DeferErrorIfUnsupportedSubqueryPushdown(Query * originalQuery, PlannerRestrictionContext * plannerRestrictionContext); static DeferredErrorMessage * DeferErrorIfUnsupportedFilters(Query *subquery); static bool EqualOpExpressionLists(List *firstOpExpressionList, List *secondOpExpressionList); static DeferredErrorMessage * DeferErrorIfCannotPushdownSubquery(Query *subqueryTree, bool outerMostQueryHasLimit); static DeferredErrorMessage * DeferErrorIfUnsupportedUnionQuery(Query *queryTree, bool outerMostQueryHasLimit); static bool ExtractSetOperationStatmentWalker(Node *node, List **setOperationList); static DeferredErrorMessage * DeferErrorIfUnsupportedTableCombination(Query *queryTree); static bool TargetListOnPartitionColumn(Query *query, List *targetEntryList); static FieldSelect * CompositeFieldRecursive(Expr *expression, Query *query); static bool FullCompositeFieldList(List *compositeFieldList); static MultiNode * MultiPlanTree(Query *queryTree); static void ErrorIfQueryNotSupported(Query *queryTree); static bool HasUnsupportedReferenceTableJoin( PlannerRestrictionContext *plannerRestrictionContext); static bool HasUnsupportedJoinWalker(Node *node, void *context); static bool ErrorHintRequired(const char *errorHint, Query *queryTree); static DeferredErrorMessage * DeferErrorIfUnsupportedSubqueryRepartition(Query * subqueryTree); static bool HasTablesample(Query *queryTree); static bool HasOuterJoin(Query *queryTree); static bool HasOuterJoinWalker(Node *node, void *maxJoinLevel); static bool HasComplexJoinOrder(Query *queryTree); static bool HasComplexRangeTableType(Query *queryTree); static bool RelationInfoHasReferenceTable(PlannerInfo *plannerInfo, RelOptInfo *relationInfo); static void ValidateClauseList(List *clauseList); static void ValidateSubqueryPushdownClauseList(List *clauseList); static bool ExtractFromExpressionWalker(Node *node, QualifierWalkerContext *walkerContext); static List * MultiTableNodeList(List *tableEntryList, List *rangeTableList); static List * AddMultiCollectNodes(List *tableNodeList); static MultiNode * MultiJoinTree(List *joinOrderList, List *collectTableList, List *joinClauseList); static MultiCollect * CollectNodeForTable(List *collectTableList, uint32 rangeTableId); static MultiSelect * MultiSelectNode(List *whereClauseList); static bool IsSelectClause(Node *clause); static bool IsSublinkClause(Node *clause); static MultiProject * MultiProjectNode(List *targetEntryList); static MultiExtendedOp * MultiExtendedOpNode(Query *queryTree); /* Local functions forward declarations for applying joins */ static MultiNode * ApplyJoinRule(MultiNode *leftNode, MultiNode *rightNode, JoinRuleType ruleType, Var *partitionColumn, JoinType joinType, List *joinClauseList); static RuleApplyFunction JoinRuleApplyFunction(JoinRuleType ruleType); static MultiNode * ApplyBroadcastJoin(MultiNode *leftNode, MultiNode *rightNode, Var *partitionColumn, JoinType joinType, List *joinClauses); static MultiNode * ApplyLocalJoin(MultiNode *leftNode, MultiNode *rightNode, Var *partitionColumn, JoinType joinType, List *joinClauses); static MultiNode * ApplySinglePartitionJoin(MultiNode *leftNode, MultiNode *rightNode, Var *partitionColumn, JoinType joinType, List *joinClauses); static MultiNode * ApplyDualPartitionJoin(MultiNode *leftNode, MultiNode *rightNode, Var *partitionColumn, JoinType joinType, List *joinClauses); static MultiNode * ApplyCartesianProduct(MultiNode *leftNode, MultiNode *rightNode, Var *partitionColumn, JoinType joinType, List *joinClauses); /* * Local functions forward declarations for subquery pushdown. Note that these * functions will be removed with upcoming subqery changes. */ static Node * ResolveExternalParams(Node *inputNode, ParamListInfo boundParams); static MultiNode * MultiSubqueryPlanTree(Query *originalQuery, Query *queryTree, PlannerRestrictionContext * plannerRestrictionContext); static List * SublinkList(Query *originalQuery); static bool ExtractSublinkWalker(Node *node, List **sublinkList); static MultiNode * SubqueryPushdownMultiPlanTree(Query *queryTree); static List * CreateSubqueryTargetEntryList(List *columnList); static void UpdateVarMappingsForExtendedOpNode(List *columnList, List *subqueryTargetEntryList); static MultiTable * MultiSubqueryPushdownTable(Query *subquery); /* * MultiLogicalPlanCreate takes in both the original query and its corresponding modified * query tree yield by the standard planner. It uses helper functions to create logical * plan and adds a root node to top of it. The original query is only used for subquery * pushdown planning. * * In order to support external parameters for the queries where planning * is done on the original query, we need to replace the external parameters * manually. To achive that for subquery pushdown planning, we pass boundParams * to this function. We need to do that since Citus currently unable to send * parameters to the workers on the execution. * * We also pass queryTree and plannerRestrictionContext to the planner. They * are primarily used to decide whether the subquery is safe to pushdown. * If not, it helps to produce meaningful error messages for subquery * pushdown planning. */ MultiTreeRoot * MultiLogicalPlanCreate(Query *originalQuery, Query *queryTree, PlannerRestrictionContext *plannerRestrictionContext, ParamListInfo boundParams) { MultiNode *multiQueryNode = NULL; MultiTreeRoot *rootNode = NULL; /* * We check the existence of subqueries in FROM clause on the modified query * given that if postgres already flattened the subqueries, MultiPlanTree() * can plan corresponding distributed plan. * * We also check the existence of subqueries in WHERE clause. Note that * this check needs to be done on the original query given that * standard_planner() may replace the sublinks with anti/semi joins and * MultiPlanTree() cannot plan such queries. */ if (SubqueryEntryList(queryTree) != NIL || SublinkList(originalQuery) != NIL) { originalQuery = (Query *) ResolveExternalParams((Node *) originalQuery, boundParams); multiQueryNode = MultiSubqueryPlanTree(originalQuery, queryTree, plannerRestrictionContext); } else { multiQueryNode = MultiPlanTree(queryTree); } /* add a root node to serve as the permanent handle to the tree */ rootNode = CitusMakeNode(MultiTreeRoot); SetChild((MultiUnaryNode *) rootNode, multiQueryNode); return rootNode; } /* * ResolveExternalParams replaces the external parameters that appears * in the query with the corresponding entries in the boundParams. * * Note that this function is inspired by eval_const_expr() on Postgres. * We cannot use that function because it requires access to PlannerInfo. */ static Node * ResolveExternalParams(Node *inputNode, ParamListInfo boundParams) { /* consider resolving external parameters only when boundParams exists */ if (!boundParams) { return inputNode; } if (inputNode == NULL) { return NULL; } if (IsA(inputNode, Param)) { Param *paramToProcess = (Param *) inputNode; ParamExternData *correspondingParameterData = NULL; int numberOfParameters = boundParams->numParams; int parameterId = paramToProcess->paramid; int16 typeLength = 0; bool typeByValue = false; Datum constValue = 0; bool paramIsNull = false; int parameterIndex = 0; if (paramToProcess->paramkind != PARAM_EXTERN) { return inputNode; } if (parameterId < 0) { return inputNode; } /* parameterId starts from 1 */ parameterIndex = parameterId - 1; if (parameterIndex >= numberOfParameters) { return inputNode; } correspondingParameterData = &boundParams->params[parameterIndex]; if (!(correspondingParameterData->pflags & PARAM_FLAG_CONST)) { return inputNode; } get_typlenbyval(paramToProcess->paramtype, &typeLength, &typeByValue); paramIsNull = correspondingParameterData->isnull; if (paramIsNull) { constValue = 0; } else if (typeByValue) { constValue = correspondingParameterData->value; } else { /* * Out of paranoia ensure that datum lives long enough, * although bind params currently should always live * long enough. */ constValue = datumCopy(correspondingParameterData->value, typeByValue, typeLength); } return (Node *) makeConst(paramToProcess->paramtype, paramToProcess->paramtypmod, paramToProcess->paramcollid, typeLength, constValue, paramIsNull, typeByValue); } else if (IsA(inputNode, Query)) { return (Node *) query_tree_mutator((Query *) inputNode, ResolveExternalParams, boundParams, 0); } return expression_tree_mutator(inputNode, ResolveExternalParams, boundParams); } /* * SublinkList finds the subquery nodes in the where clause of the given query. Note * that the function should be called on the original query given that postgres * standard_planner() may convert the subqueries in WHERE clause to joins. */ static List * SublinkList(Query *originalQuery) { FromExpr *joinTree = originalQuery->jointree; Node *queryQuals = NULL; List *sublinkList = NIL; if (!joinTree) { return NIL; } queryQuals = joinTree->quals; ExtractSublinkWalker(queryQuals, &sublinkList); return sublinkList; } /* * ExtractSublinkWalker walks over a quals node, and finds all sublinks * in that node. */ static bool ExtractSublinkWalker(Node *node, List **sublinkList) { bool walkerResult = false; if (node == NULL) { return false; } if (IsA(node, SubLink)) { (*sublinkList) = lappend(*sublinkList, node); } else { walkerResult = expression_tree_walker(node, ExtractSublinkWalker, sublinkList); } return walkerResult; } /* * MultiSubqueryPlanTree gets the query objects and returns logical plan * for subqueries. * * We currently have two different code paths for creating logic plan for subqueries: * (i) subquery pushdown * (ii) single relation repartition subquery * * In order to create the logical plan, we follow the algorithm below: * - If subquery pushdown planner can plan the query * - We're done, we create the multi plan tree and return * - Else * - If the query is not eligible for single table repartition subquery planning * - Throw the error that the subquery pushdown planner generated * - If it is eligible for single table repartition subquery planning * - Check for the errors for single table repartition subquery planning * - If no errors found, we're done. Create the multi plan and return * - If found errors, throw it */ static MultiNode * MultiSubqueryPlanTree(Query *originalQuery, Query *queryTree, PlannerRestrictionContext *plannerRestrictionContext) { MultiNode *multiQueryNode = NULL; DeferredErrorMessage *subqueryPushdownError = NULL; /* * This is a generic error check that applies to both subquery pushdown * and single table repartition subquery. */ ErrorIfQueryNotSupported(originalQuery); /* * In principle, we're first trying subquery pushdown planner. If it fails * to create a logical plan, continue with trying the single table * repartition subquery planning. */ subqueryPushdownError = DeferErrorIfUnsupportedSubqueryPushdown(originalQuery, plannerRestrictionContext); if (!subqueryPushdownError) { multiQueryNode = SubqueryPushdownMultiPlanTree(originalQuery); } else if (subqueryPushdownError) { bool singleRelationRepartitionSubquery = false; RangeTblEntry *subqueryRangeTableEntry = NULL; Query *subqueryTree = NULL; DeferredErrorMessage *repartitionQueryError = NULL; List *subqueryEntryList = NULL; /* * If not eligible for single relation repartition query, we should raise * subquery pushdown error. */ singleRelationRepartitionSubquery = SingleRelationRepartitionSubquery(originalQuery); if (!singleRelationRepartitionSubquery) { RaiseDeferredErrorInternal(subqueryPushdownError, ERROR); } subqueryEntryList = SubqueryEntryList(queryTree); subqueryRangeTableEntry = (RangeTblEntry *) linitial(subqueryEntryList); Assert(subqueryRangeTableEntry->rtekind == RTE_SUBQUERY); subqueryTree = subqueryRangeTableEntry->subquery; repartitionQueryError = DeferErrorIfUnsupportedSubqueryRepartition(subqueryTree); if (repartitionQueryError) { RaiseDeferredErrorInternal(repartitionQueryError, ERROR); } /* all checks has passed, safe to create the multi plan */ multiQueryNode = MultiPlanTree(queryTree); } Assert(multiQueryNode != NULL); return multiQueryNode; } /* * SingleRelationRepartitionSubquery returns true if it is eligible single * repartition query planning in the sense that: * - None of the levels of the subquery contains a join * - Only a single RTE_RELATION exists, which means only a single table * name is specified on the whole query * - No sublinks exists in the subquery * * Note that the caller should still call DeferErrorIfUnsupportedSubqueryRepartition() * to ensure that Citus supports the subquery. Also, this function is designed to run * on the original query. */ static bool SingleRelationRepartitionSubquery(Query *queryTree) { List *rangeTableIndexList = NULL; RangeTblEntry *rangeTableEntry = NULL; List *rangeTableList = queryTree->rtable; int rangeTableIndex = 0; /* we don't support subqueries in WHERE */ if (queryTree->hasSubLinks) { return false; } /* * Don't allow joins and set operations. If join appears in the queryTree, the * length would be greater than 1. If only set operations exists, the length * would be 0. */ ExtractRangeTableIndexWalker((Node *) queryTree->jointree, &rangeTableIndexList); if (list_length(rangeTableIndexList) != 1) { return false; } rangeTableIndex = linitial_int(rangeTableIndexList); rangeTableEntry = rt_fetch(rangeTableIndex, rangeTableList); if (rangeTableEntry->rtekind == RTE_RELATION) { return true; } else if (rangeTableEntry->rtekind == RTE_SUBQUERY) { Query *subqueryTree = rangeTableEntry->subquery; return SingleRelationRepartitionSubquery(subqueryTree); } return false; } /* * DeferErrorIfContainsUnsupportedSubqueryPushdown iterates on the query's subquery * entry list and uses helper functions to check if we can push down subquery * to worker nodes. These helper functions returns a deferred error if we * cannot push down the subquery. */ static DeferredErrorMessage * DeferErrorIfUnsupportedSubqueryPushdown(Query *originalQuery, PlannerRestrictionContext * plannerRestrictionContext) { bool outerMostQueryHasLimit = false; ListCell *subqueryCell = NULL; List *subqueryList = NIL; DeferredErrorMessage *error = NULL; RelationRestrictionContext *relationRestrictionContext = plannerRestrictionContext->relationRestrictionContext; if (originalQuery->limitCount != NULL) { outerMostQueryHasLimit = true; } /* * We're checking two things here: * (i) If the query contains a top level union, ensure that all leaves * return the partition key at the same position * (ii) Else, check whether all relations joined on the partition key or not */ if (ContainsUnionSubquery(originalQuery)) { if (!SafeToPushdownUnionSubquery(relationRestrictionContext)) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot pushdown the subquery since all leaves of " "the UNION does not include partition key at the " "same position", "Each leaf query of the UNION should return " "partition key at the same position on its " "target list.", NULL); } } else if (!RestrictionEquivalenceForPartitionKeys(plannerRestrictionContext)) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot pushdown the subquery since all relations are not " "joined using distribution keys", "Each relation should be joined with at least " "one another relation using distribution keys and " "equality operator.", NULL); } else if (HasUnsupportedReferenceTableJoin(plannerRestrictionContext)) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot pushdown the subquery", "There exist a reference table in the outer part of the " "outer join", NULL); } /* * We first extract all the queries that appear in the original query. Later, * we delete the original query given that error rules does not apply to the * top level query. For instance, we could support any LIMIT/ORDER BY on the * top level query. */ ExtractQueryWalker((Node *) originalQuery, &subqueryList); subqueryList = list_delete(subqueryList, originalQuery); /* iterate on the subquery list and error out accordingly */ foreach(subqueryCell, subqueryList) { Query *subquery = lfirst(subqueryCell); error = DeferErrorIfCannotPushdownSubquery(subquery, outerMostQueryHasLimit); if (error) { return error; } error = DeferErrorIfUnsupportedFilters(subquery); if (error) { return error; } } return NULL; } /* * DeferErrorIfUnsupportedFilters checks if all leaf queries in the given query have * same filter on the partition column. Note that if there are queries without * any filter on the partition column, they don't break this prerequisite. */ static DeferredErrorMessage * DeferErrorIfUnsupportedFilters(Query *subquery) { List *queryList = NIL; ListCell *queryCell = NULL; List *subqueryOpExpressionList = NIL; List *relationIdList = RelationIdList(subquery); Var *partitionColumn = NULL; Oid relationId = InvalidOid; /* * If there are no appropriate relations, we're going to error out on * DeferErrorIfCannotPushdownSubquery(). It may happen once the subquery * does not include a relation. */ if (relationIdList == NIL) { return NULL; } /* * Get relation id of any relation in the subquery and create partiton column * for this relation. We will use this column to replace columns on operator * expressions on different tables. Then we compare these operator expressions * to see if they consist of same operator and constant value. */ relationId = linitial_oid(relationIdList); partitionColumn = PartitionColumn(relationId, 0); ExtractQueryWalker((Node *) subquery, &queryList); foreach(queryCell, queryList) { Query *query = (Query *) lfirst(queryCell); List *opExpressionList = NIL; List *newOpExpressionList = NIL; bool leafQuery = LeafQuery(query); if (!leafQuery) { continue; } opExpressionList = PartitionColumnOpExpressionList(query); if (opExpressionList == NIL) { continue; } newOpExpressionList = ReplaceColumnsInOpExpressionList(opExpressionList, partitionColumn); if (subqueryOpExpressionList == NIL) { subqueryOpExpressionList = newOpExpressionList; } else { bool equalOpExpressionLists = EqualOpExpressionLists(subqueryOpExpressionList, newOpExpressionList); if (!equalOpExpressionLists) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot push down this subquery", "Currently all leaf queries need to " "have same filters on partition column", NULL); } } } return NULL; } /* * EqualOpExpressionLists checks if given two operator expression lists are * equal. */ static bool EqualOpExpressionLists(List *firstOpExpressionList, List *secondOpExpressionList) { bool equalOpExpressionLists = false; ListCell *firstOpExpressionCell = NULL; uint32 equalOpExpressionCount = 0; uint32 firstOpExpressionCount = list_length(firstOpExpressionList); uint32 secondOpExpressionCount = list_length(secondOpExpressionList); if (firstOpExpressionCount != secondOpExpressionCount) { return false; } foreach(firstOpExpressionCell, firstOpExpressionList) { OpExpr *firstOpExpression = (OpExpr *) lfirst(firstOpExpressionCell); ListCell *secondOpExpressionCell = NULL; foreach(secondOpExpressionCell, secondOpExpressionList) { OpExpr *secondOpExpression = (OpExpr *) lfirst(secondOpExpressionCell); bool equalExpressions = equal(firstOpExpression, secondOpExpression); if (equalExpressions) { equalOpExpressionCount++; continue; } } } if (equalOpExpressionCount == firstOpExpressionCount) { equalOpExpressionLists = true; } return equalOpExpressionLists; } /* * DeferErrorIfCannotPushdownSubquery checks if we can push down the given * subquery to worker nodes. If we cannot push down the subquery, this function * returns a deferred error. * * We can push down a subquery if it follows rules below: * a. If there is an aggregate, it must be grouped on partition column. * b. If there is a join, it must be between two regular tables or two subqueries. * We don't support join between a regular table and a subquery. And columns on * the join condition must be partition columns. * c. If there is a distinct clause, it must be on the partition column. * * This function is very similar to ErrorIfQueryNotSupported() in logical * planner, but we don't reuse it, because differently for subqueries we support * a subset of distinct, union and left joins. * * Note that this list of checks is not exhaustive, there can be some cases * which we let subquery to run but returned results would be wrong. Such as if * a subquery has a group by on another subquery which includes order by with * limit, we let this query to run, but results could be wrong depending on the * features of underlying tables. */ static DeferredErrorMessage * DeferErrorIfCannotPushdownSubquery(Query *subqueryTree, bool outerMostQueryHasLimit) { bool preconditionsSatisfied = true; char *errorDetail = NULL; DeferredErrorMessage *deferredError = NULL; deferredError = DeferErrorIfUnsupportedTableCombination(subqueryTree); if (deferredError) { return deferredError; } if (subqueryTree->rtable == NIL) { preconditionsSatisfied = false; errorDetail = "Subqueries without relations are unsupported"; } if (subqueryTree->hasWindowFuncs) { preconditionsSatisfied = false; errorDetail = "Window functions are currently unsupported"; } if (subqueryTree->limitOffset) { preconditionsSatisfied = false; errorDetail = "Offset clause is currently unsupported"; } /* limit is not supported when SubqueryPushdown is not set */ if (subqueryTree->limitCount && !SubqueryPushdown) { preconditionsSatisfied = false; errorDetail = "Limit in subquery is currently unsupported"; } /* * Limit is partially supported when SubqueryPushdown is set. * The outermost query must have a limit clause. */ if (subqueryTree->limitCount && SubqueryPushdown && !outerMostQueryHasLimit) { preconditionsSatisfied = false; errorDetail = "Limit in subquery without limit in the outermost query is " "unsupported"; } if (subqueryTree->setOperations) { deferredError = DeferErrorIfUnsupportedUnionQuery(subqueryTree, outerMostQueryHasLimit); if (deferredError) { return deferredError; } } if (subqueryTree->hasRecursive) { preconditionsSatisfied = false; errorDetail = "Recursive queries are currently unsupported"; } if (subqueryTree->cteList) { preconditionsSatisfied = false; errorDetail = "Common Table Expressions are currently unsupported"; } if (subqueryTree->hasForUpdate) { preconditionsSatisfied = false; errorDetail = "For Update/Share commands are currently unsupported"; } /* group clause list must include partition column */ if (subqueryTree->groupClause) { List *groupClauseList = subqueryTree->groupClause; List *targetEntryList = subqueryTree->targetList; List *groupTargetEntryList = GroupTargetEntryList(groupClauseList, targetEntryList); bool groupOnPartitionColumn = TargetListOnPartitionColumn(subqueryTree, groupTargetEntryList); if (!groupOnPartitionColumn) { preconditionsSatisfied = false; errorDetail = "Group by list without partition column is currently " "unsupported"; } } /* we don't support aggregates without group by */ if (subqueryTree->hasAggs && (subqueryTree->groupClause == NULL)) { preconditionsSatisfied = false; errorDetail = "Aggregates without group by are currently unsupported"; } /* having clause without group by on partition column is not supported */ if (subqueryTree->havingQual && (subqueryTree->groupClause == NULL)) { preconditionsSatisfied = false; errorDetail = "Having qual without group by on partition column is " "currently unsupported"; } /* distinct clause list must include partition column */ if (subqueryTree->distinctClause) { List *distinctClauseList = subqueryTree->distinctClause; List *targetEntryList = subqueryTree->targetList; List *distinctTargetEntryList = GroupTargetEntryList(distinctClauseList, targetEntryList); bool distinctOnPartitionColumn = TargetListOnPartitionColumn(subqueryTree, distinctTargetEntryList); if (!distinctOnPartitionColumn) { preconditionsSatisfied = false; errorDetail = "Distinct on columns without partition column is " "currently unsupported"; } } /* finally check and return deferred if not satisfied */ if (!preconditionsSatisfied) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot push down this subquery", errorDetail, NULL); } return NULL; } /* * DeferErrorIfUnsupportedUnionQuery is a helper function for ErrorIfCannotPushdownSubquery(). * The function also errors out for set operations INTERSECT and EXCEPT. */ static DeferredErrorMessage * DeferErrorIfUnsupportedUnionQuery(Query *subqueryTree, bool outerMostQueryHasLimit) { List *setOperationStatementList = NIL; ListCell *setOperationStatmentCell = NULL; ExtractSetOperationStatmentWalker((Node *) subqueryTree->setOperations, &setOperationStatementList); foreach(setOperationStatmentCell, setOperationStatementList) { SetOperationStmt *setOperation = (SetOperationStmt *) lfirst(setOperationStatmentCell); Node *leftArg = setOperation->larg; Node *rightArg = setOperation->rarg; int leftArgRTI = 0; int rightArgRTI = 0; if (setOperation->op != SETOP_UNION) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot push down this subquery", "Intersect and Except are currently unsupported", NULL); } if (IsA(leftArg, RangeTblRef)) { Node *leftArgSubquery = NULL; leftArgRTI = ((RangeTblRef *) leftArg)->rtindex; leftArgSubquery = (Node *) rt_fetch(leftArgRTI, subqueryTree->rtable)->subquery; if (HasReferenceTable(leftArgSubquery)) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot push down this subquery ", "Reference tables are not supported with union" " operator", NULL); } } if (IsA(rightArg, RangeTblRef)) { Node *rightArgSubquery = NULL; rightArgRTI = ((RangeTblRef *) rightArg)->rtindex; rightArgSubquery = (Node *) rt_fetch(rightArgRTI, subqueryTree->rtable)->subquery; if (HasReferenceTable(rightArgSubquery)) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot push down this subquery", "Reference tables are not supported with union" " operator", NULL); } } } return NULL; } /* * ExtractSetOperationStatementWalker walks over a set operations statment, * and finds all set operations in the tree. */ static bool ExtractSetOperationStatmentWalker(Node *node, List **setOperationList) { bool walkerResult = false; if (node == NULL) { return false; } if (IsA(node, SetOperationStmt)) { SetOperationStmt *setOperation = (SetOperationStmt *) node; (*setOperationList) = lappend(*setOperationList, setOperation); } walkerResult = expression_tree_walker(node, ExtractSetOperationStatmentWalker, setOperationList); return walkerResult; } /* * DeferErrorIfUnsupportedTableCombination checks if the given query tree contains any * unsupported range table combinations. For this, the function walks over all * range tables in the join tree, and checks if they correspond to simple relations * or subqueries. It also checks if there is a join between a regular table and * a subquery and if join is on more than two range table entries. If any error is found, * a deferred error is returned. Else, NULL is returned. */ static DeferredErrorMessage * DeferErrorIfUnsupportedTableCombination(Query *queryTree) { List *rangeTableList = queryTree->rtable; List *joinTreeTableIndexList = NIL; ListCell *joinTreeTableIndexCell = NULL; bool unsupporteTableCombination = false; char *errorDetail = NULL; uint32 relationRangeTableCount = 0; uint32 subqueryRangeTableCount = 0; /* * Extract all range table indexes from the join tree. Note that sub-queries * that get pulled up by PostgreSQL don't appear in this join tree. */ ExtractRangeTableIndexWalker((Node *) queryTree->jointree, &joinTreeTableIndexList); foreach(joinTreeTableIndexCell, joinTreeTableIndexList) { /* * Join tree's range table index starts from 1 in the query tree. But, * list indexes start from 0. */ int joinTreeTableIndex = lfirst_int(joinTreeTableIndexCell); int rangeTableListIndex = joinTreeTableIndex - 1; RangeTblEntry *rangeTableEntry = (RangeTblEntry *) list_nth(rangeTableList, rangeTableListIndex); /* * Check if the range table in the join tree is a simple relation or a * subquery. */ if (rangeTableEntry->rtekind == RTE_RELATION) { relationRangeTableCount++; } else if (rangeTableEntry->rtekind == RTE_SUBQUERY) { subqueryRangeTableCount++; } else { unsupporteTableCombination = true; errorDetail = "Table expressions other than simple relations and " "subqueries are currently unsupported"; break; } } /* finally check and error out if not satisfied */ if (unsupporteTableCombination) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot push down this subquery", errorDetail, NULL); } return NULL; } /* * TargetListOnPartitionColumn checks if at least one target list entry is on * partition column or the table is a reference table. */ static bool TargetListOnPartitionColumn(Query *query, List *targetEntryList) { bool targetListOnPartitionColumn = false; List *compositeFieldList = NIL; ListCell *targetEntryCell = NULL; foreach(targetEntryCell, targetEntryList) { TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell); Expr *targetExpression = targetEntry->expr; bool isPartitionColumn = IsPartitionColumn(targetExpression, query); Oid relationId = InvalidOid; Var *column = NULL; FindReferencedTableColumn(targetExpression, NIL, query, &relationId, &column); /* * If the expression belongs to reference table directly returns true. * We can assume that target list entry always on partition column of * reference tables. */ if (IsDistributedTable(relationId) && PartitionMethod(relationId) == DISTRIBUTE_BY_NONE) { targetListOnPartitionColumn = true; break; } if (isPartitionColumn) { FieldSelect *compositeField = CompositeFieldRecursive(targetExpression, query); if (compositeField) { compositeFieldList = lappend(compositeFieldList, compositeField); } else { targetListOnPartitionColumn = true; break; } } } /* check composite fields */ if (!targetListOnPartitionColumn) { bool fullCompositeFieldList = FullCompositeFieldList(compositeFieldList); if (fullCompositeFieldList) { targetListOnPartitionColumn = true; } } return targetListOnPartitionColumn; } /* * FullCompositeFieldList gets a composite field list, and checks if all fields * of composite type are used in the list. */ static bool FullCompositeFieldList(List *compositeFieldList) { bool fullCompositeFieldList = true; bool *compositeFieldArray = NULL; uint32 compositeFieldCount = 0; uint32 fieldIndex = 0; ListCell *fieldSelectCell = NULL; foreach(fieldSelectCell, compositeFieldList) { FieldSelect *fieldSelect = (FieldSelect *) lfirst(fieldSelectCell); uint32 compositeFieldIndex = 0; Expr *fieldExpression = fieldSelect->arg; if (!IsA(fieldExpression, Var)) { continue; } if (compositeFieldArray == NULL) { uint32 index = 0; Var *compositeColumn = (Var *) fieldExpression; Oid compositeTypeId = compositeColumn->vartype; Oid compositeRelationId = get_typ_typrelid(compositeTypeId); /* get composite type attribute count */ Relation relation = relation_open(compositeRelationId, AccessShareLock); compositeFieldCount = relation->rd_att->natts; compositeFieldArray = palloc0(compositeFieldCount * sizeof(bool)); relation_close(relation, AccessShareLock); for (index = 0; index < compositeFieldCount; index++) { compositeFieldArray[index] = false; } } compositeFieldIndex = fieldSelect->fieldnum - 1; compositeFieldArray[compositeFieldIndex] = true; } for (fieldIndex = 0; fieldIndex < compositeFieldCount; fieldIndex++) { if (!compositeFieldArray[fieldIndex]) { fullCompositeFieldList = false; } } if (compositeFieldCount == 0) { fullCompositeFieldList = false; } return fullCompositeFieldList; } /* * CompositeFieldRecursive recursively finds composite field in the query tree * referred by given expression. If expression does not refer to a composite * field, then it returns NULL. * * If expression is a field select we directly return composite field. If it is * a column is referenced from a subquery, then we recursively check that subquery * until we reach the source of that column, and find composite field. If this * column is referenced from join range table entry, then we resolve which join * column it refers and recursively use this column with the same query. */ static FieldSelect * CompositeFieldRecursive(Expr *expression, Query *query) { FieldSelect *compositeField = NULL; List *rangetableList = query->rtable; Index rangeTableEntryIndex = 0; RangeTblEntry *rangeTableEntry = NULL; Var *candidateColumn = NULL; if (IsA(expression, FieldSelect)) { compositeField = (FieldSelect *) expression; return compositeField; } if (IsA(expression, Var)) { candidateColumn = (Var *) expression; } else { return NULL; } rangeTableEntryIndex = candidateColumn->varno - 1; rangeTableEntry = list_nth(rangetableList, rangeTableEntryIndex); if (rangeTableEntry->rtekind == RTE_SUBQUERY) { Query *subquery = rangeTableEntry->subquery; List *targetEntryList = subquery->targetList; AttrNumber targetEntryIndex = candidateColumn->varattno - 1; TargetEntry *subqueryTargetEntry = list_nth(targetEntryList, targetEntryIndex); Expr *subqueryExpression = subqueryTargetEntry->expr; compositeField = CompositeFieldRecursive(subqueryExpression, subquery); } else if (rangeTableEntry->rtekind == RTE_JOIN) { List *joinColumnList = rangeTableEntry->joinaliasvars; AttrNumber joinColumnIndex = candidateColumn->varattno - 1; Expr *joinColumn = list_nth(joinColumnList, joinColumnIndex); compositeField = CompositeFieldRecursive(joinColumn, query); } return compositeField; } /* * SubqueryEntryList finds the subquery nodes in the range table entry list, and * builds a list of subquery range table entries from these subquery nodes. Range * table entry list also includes subqueries which are pulled up. We don't want * to add pulled up subqueries to list, so we walk over join tree indexes and * check range table entries referenced in the join tree. */ List * SubqueryEntryList(Query *queryTree) { List *rangeTableList = queryTree->rtable; List *subqueryEntryList = NIL; List *joinTreeTableIndexList = NIL; ListCell *joinTreeTableIndexCell = NULL; /* * Extract all range table indexes from the join tree. Note that here we * only walk over range table entries at this level and do not recurse into * subqueries. */ ExtractRangeTableIndexWalker((Node *) queryTree->jointree, &joinTreeTableIndexList); foreach(joinTreeTableIndexCell, joinTreeTableIndexList) { /* * Join tree's range table index starts from 1 in the query tree. But, * list indexes start from 0. */ int joinTreeTableIndex = lfirst_int(joinTreeTableIndexCell); int rangeTableListIndex = joinTreeTableIndex - 1; RangeTblEntry *rangeTableEntry = (RangeTblEntry *) list_nth(rangeTableList, rangeTableListIndex); if (rangeTableEntry->rtekind == RTE_SUBQUERY) { subqueryEntryList = lappend(subqueryEntryList, rangeTableEntry); } } return subqueryEntryList; } /* * MultiPlanTree takes in a parsed query tree and uses that tree to construct a * logical plan. This plan is based on multi-relational algebra. This function * creates the logical plan in several steps. * * First, the function checks if there is a subquery. If there is a subquery * it recursively creates nested multi trees. If this query has a subquery, the * function does not create any join trees and jumps to last step. * * If there is no subquery, the function calculates the join order using tables * in the query and join clauses between the tables. Second, the function * starts building the logical plan from the bottom-up, and begins with the table * and collect nodes. Third, the function builds the join tree using the join * order information and table nodes. * * In the last step, the function adds the select, project, aggregate, sort, * group, and limit nodes if they appear in the original query tree. */ static MultiNode * MultiPlanTree(Query *queryTree) { List *rangeTableList = queryTree->rtable; List *targetEntryList = queryTree->targetList; List *whereClauseList = NIL; List *joinClauseList = NIL; List *joinOrderList = NIL; List *tableEntryList = NIL; List *tableNodeList = NIL; List *collectTableList = NIL; List *subqueryEntryList = NIL; MultiNode *joinTreeNode = NULL; MultiSelect *selectNode = NULL; MultiProject *projectNode = NULL; MultiExtendedOp *extendedOpNode = NULL; MultiNode *currentTopNode = NULL; /* verify we can perform distributed planning on this query */ ErrorIfQueryNotSupported(queryTree); /* extract where clause qualifiers and verify we can plan for them */ whereClauseList = WhereClauseList(queryTree->jointree); ValidateClauseList(whereClauseList); /* * If we have a subquery, build a multi table node for the subquery and * add a collect node on top of the multi table node. */ subqueryEntryList = SubqueryEntryList(queryTree); if (subqueryEntryList != NIL) { RangeTblEntry *subqueryRangeTableEntry = NULL; MultiCollect *subqueryCollectNode = CitusMakeNode(MultiCollect); MultiTable *subqueryNode = NULL; MultiNode *subqueryExtendedNode = NULL; Query *subqueryTree = NULL; List *whereClauseColumnList = NIL; List *targetListColumnList = NIL; List *columnList = NIL; ListCell *columnCell = NULL; /* we only support single subquery in the entry list */ Assert(list_length(subqueryEntryList) == 1); subqueryRangeTableEntry = (RangeTblEntry *) linitial(subqueryEntryList); subqueryTree = subqueryRangeTableEntry->subquery; /* ensure if subquery satisfies preconditions */ Assert(DeferErrorIfUnsupportedSubqueryRepartition(subqueryTree) == NULL); subqueryNode = CitusMakeNode(MultiTable); subqueryNode->relationId = SUBQUERY_RELATION_ID; subqueryNode->rangeTableId = SUBQUERY_RANGE_TABLE_ID; subqueryNode->partitionColumn = NULL; subqueryNode->alias = NULL; subqueryNode->referenceNames = NULL; /* * We disregard pulled subqueries. This changes order of range table list. * We do not allow subquery joins, so we will have only one range table * entry in range table list after dropping pulled subquery. For this * reason, here we are updating columns in the most outer query for where * clause list and target list accordingly. */ Assert(list_length(subqueryEntryList) == 1); whereClauseColumnList = pull_var_clause_default((Node *) whereClauseList); targetListColumnList = pull_var_clause_default((Node *) targetEntryList); columnList = list_concat(whereClauseColumnList, targetListColumnList); foreach(columnCell, columnList) { Var *column = (Var *) lfirst(columnCell); column->varno = 1; } /* recursively create child nested multitree */ subqueryExtendedNode = MultiPlanTree(subqueryTree); SetChild((MultiUnaryNode *) subqueryCollectNode, (MultiNode *) subqueryNode); SetChild((MultiUnaryNode *) subqueryNode, subqueryExtendedNode); currentTopNode = (MultiNode *) subqueryCollectNode; } else { bool hasOuterJoin = false; /* * We calculate the join order using the list of tables in the query and * the join clauses between them. Note that this function owns the table * entry list's memory, and JoinOrderList() shallow copies the list's * elements. */ joinClauseList = JoinClauseList(whereClauseList); tableEntryList = UsedTableEntryList(queryTree); /* build the list of multi table nodes */ tableNodeList = MultiTableNodeList(tableEntryList, rangeTableList); /* add collect nodes on top of the multi table nodes */ collectTableList = AddMultiCollectNodes(tableNodeList); hasOuterJoin = HasOuterJoin(queryTree); if (hasOuterJoin) { /* use the user-defined join order when there are outer joins */ joinOrderList = FixedJoinOrderList(queryTree->jointree, tableEntryList); } else { /* find best join order for commutative inner joins */ joinOrderList = JoinOrderList(tableEntryList, joinClauseList); } /* build join tree using the join order and collected tables */ joinTreeNode = MultiJoinTree(joinOrderList, collectTableList, joinClauseList); currentTopNode = joinTreeNode; } Assert(currentTopNode != NULL); /* build select node if the query has selection criteria */ selectNode = MultiSelectNode(whereClauseList); if (selectNode != NULL) { SetChild((MultiUnaryNode *) selectNode, currentTopNode); currentTopNode = (MultiNode *) selectNode; } /* build project node for the columns to project */ projectNode = MultiProjectNode(targetEntryList); SetChild((MultiUnaryNode *) projectNode, currentTopNode); currentTopNode = (MultiNode *) projectNode; /* * We build the extended operator node to capture aggregate functions, group * clauses, sort clauses, limit/offset clauses, and expressions. We need to * distinguish between aggregates and expressions; and we address this later * in the logical optimizer. */ extendedOpNode = MultiExtendedOpNode(queryTree); SetChild((MultiUnaryNode *) extendedOpNode, currentTopNode); currentTopNode = (MultiNode *) extendedOpNode; return currentTopNode; } /* * HasUnsupportedReferenceTableJoin returns true if there exists a outer join * between reference table and distributed tables which does not follow * the rules : * - Reference tables can not be located in the outer part of the semi join or the * anti join. Otherwise, we may have duplicate results. Although getting duplicate * results is not possible by checking the equality on the column of the reference * table and partition column of distributed table, we still keep these checks. * Because, using the reference table in the outer part of the semi join or anti * join is not very common. * - Reference tables can not be located in the outer part of the left join * (Note that PostgreSQL converts right joins to left joins. While converting * join types, innerrel and outerrel are also switched.) Otherwise we will * definitely have duplicate rows. Beside, reference tables can not be used * with full outer joins because of the same reason. */ static bool HasUnsupportedReferenceTableJoin(PlannerRestrictionContext *plannerRestrictionContext) { List *joinRestrictionList = plannerRestrictionContext->joinRestrictionContext->joinRestrictionList; ListCell *joinRestrictionCell = NULL; foreach(joinRestrictionCell, joinRestrictionList) { JoinRestriction *joinRestriction = (JoinRestriction *) lfirst( joinRestrictionCell); JoinType joinType = joinRestriction->joinType; PlannerInfo *plannerInfo = joinRestriction->plannerInfo; RelOptInfo *innerrel = joinRestriction->innerrel; RelOptInfo *outerrel = joinRestriction->outerrel; if (joinType == JOIN_SEMI || joinType == JOIN_ANTI || joinType == JOIN_LEFT) { if (RelationInfoHasReferenceTable(plannerInfo, outerrel)) { return true; } } else if (joinType == JOIN_FULL) { if (RelationInfoHasReferenceTable(plannerInfo, innerrel) || RelationInfoHasReferenceTable(plannerInfo, outerrel)) { return true; } } } return false; } /* * RelationInfoHasReferenceTable check whether the relationInfo has reference table. * Since relation ids of relationInfo indexes to the range table entry list of * planner info, planner info is also passed. */ static bool RelationInfoHasReferenceTable(PlannerInfo *plannerInfo, RelOptInfo *relationInfo) { Relids relids = bms_copy(relationInfo->relids); int relationId = -1; while ((relationId = bms_first_member(relids)) >= 0) { RangeTblEntry *rangeTableEntry = plannerInfo->simple_rte_array[relationId]; /* relationInfo has this range table entry */ if (HasReferenceTable((Node *) rangeTableEntry)) { return true; } } return false; } /* * HasReferenceTable checks whether there exist a reference table in the * given node. */ bool HasReferenceTable(Node *node) { List *relationList = NIL; ListCell *relationCell = NULL; ExtractRangeTableRelationWalkerWithRTEExpand(node, &relationList); foreach(relationCell, relationList) { RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(relationCell); Oid relationId = rangeTableEntry->relid; if (IsDistributedTable(relationId) && PartitionMethod(relationId) == DISTRIBUTE_BY_NONE) { return true; } } return false; } /* * ErrorIfQueryNotSupported checks that we can perform distributed planning for * the given query. The checks in this function will be removed as we support * more functionality in our distributed planning. */ static void ErrorIfQueryNotSupported(Query *queryTree) { char *errorMessage = NULL; bool hasTablesample = false; bool hasUnsupportedJoin = false; bool hasComplexJoinOrder = false; bool hasComplexRangeTableType = false; bool preconditionsSatisfied = true; const char *errorHint = NULL; const char *joinHint = "Consider joining tables on partition column and have " "equal filter on joining columns."; const char *filterHint = "Consider using an equality filter on the distributed " "table's partition column."; /* * There could be Sublinks in the target list as well. To produce better * error messages we're checking sublinks in the where clause. */ if (queryTree->hasSubLinks && SublinkList(queryTree) == NIL) { preconditionsSatisfied = false; errorMessage = "could not run distributed query with subquery outside the " "FROM and WHERE clauses"; errorHint = filterHint; } if (queryTree->hasWindowFuncs) { preconditionsSatisfied = false; errorMessage = "could not run distributed query with window functions"; errorHint = filterHint; } if (queryTree->setOperations) { preconditionsSatisfied = false; errorMessage = "could not run distributed query with UNION, INTERSECT, or " "EXCEPT"; errorHint = filterHint; } if (queryTree->hasRecursive) { preconditionsSatisfied = false; errorMessage = "could not run distributed query with RECURSIVE"; errorHint = filterHint; } if (queryTree->cteList) { preconditionsSatisfied = false; errorMessage = "could not run distributed query with common table expressions"; errorHint = filterHint; } if (queryTree->hasForUpdate) { preconditionsSatisfied = false; errorMessage = "could not run distributed query with FOR UPDATE/SHARE commands"; errorHint = filterHint; } if (queryTree->distinctClause) { preconditionsSatisfied = false; errorMessage = "could not run distributed query with DISTINCT clause"; errorHint = filterHint; } if (queryTree->groupingSets) { preconditionsSatisfied = false; errorMessage = "could not run distributed query with GROUPING SETS, CUBE, " "or ROLLUP"; errorHint = filterHint; } hasTablesample = HasTablesample(queryTree); if (hasTablesample) { preconditionsSatisfied = false; errorMessage = "could not run distributed query which use TABLESAMPLE"; errorHint = filterHint; } hasUnsupportedJoin = HasUnsupportedJoinWalker((Node *) queryTree->jointree, NULL); if (hasUnsupportedJoin) { preconditionsSatisfied = false; errorMessage = "could not run distributed query with join types other than " "INNER or OUTER JOINS"; errorHint = joinHint; } hasComplexJoinOrder = HasComplexJoinOrder(queryTree); if (hasComplexJoinOrder) { preconditionsSatisfied = false; errorMessage = "could not run distributed query with complex join orders"; errorHint = joinHint; } hasComplexRangeTableType = HasComplexRangeTableType(queryTree); if (hasComplexRangeTableType) { preconditionsSatisfied = false; errorMessage = "could not run distributed query with complex table expressions"; errorHint = filterHint; } /* finally check and error out if not satisfied */ if (!preconditionsSatisfied) { bool showHint = ErrorHintRequired(errorHint, queryTree); ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("%s", errorMessage), showHint ? errhint("%s", errorHint) : 0)); } } /* HasTablesample returns tree if the query contains tablesample */ static bool HasTablesample(Query *queryTree) { List *rangeTableList = queryTree->rtable; ListCell *rangeTableEntryCell = NULL; bool hasTablesample = false; foreach(rangeTableEntryCell, rangeTableList) { RangeTblEntry *rangeTableEntry = lfirst(rangeTableEntryCell); if (rangeTableEntry->tablesample) { hasTablesample = true; break; } } return hasTablesample; } /* * HasUnsupportedJoinWalker returns tree if the query contains an unsupported * join type. We currently support inner, left, right, full and anti joins. * Semi joins are not supported. A full description of these join types is * included in nodes/nodes.h. */ static bool HasUnsupportedJoinWalker(Node *node, void *context) { bool hasUnsupportedJoin = false; if (node == NULL) { return false; } if (IsA(node, JoinExpr)) { JoinExpr *joinExpr = (JoinExpr *) node; JoinType joinType = joinExpr->jointype; bool outerJoin = IS_OUTER_JOIN(joinType); if (!outerJoin && joinType != JOIN_INNER) { hasUnsupportedJoin = true; } } if (!hasUnsupportedJoin) { hasUnsupportedJoin = expression_tree_walker(node, HasUnsupportedJoinWalker, NULL); } return hasUnsupportedJoin; } /* * ErrorHintRequired returns true if error hint shold be displayed with the * query error message. Error hint is valid only for queries involving reference * and hash partitioned tables. If more than one hash distributed table is * present we display the hint only if the tables are colocated. If the query * only has reference table(s), then it is handled by router planner. */ static bool ErrorHintRequired(const char *errorHint, Query *queryTree) { List *rangeTableList = NIL; ListCell *rangeTableCell = NULL; List *colocationIdList = NIL; if (errorHint == NULL) { return false; } ExtractRangeTableRelationWalker((Node *) queryTree, &rangeTableList); foreach(rangeTableCell, rangeTableList) { RangeTblEntry *rte = (RangeTblEntry *) lfirst(rangeTableCell); Oid relationId = rte->relid; char partitionMethod = PartitionMethod(relationId); if (partitionMethod == DISTRIBUTE_BY_NONE) { continue; } else if (partitionMethod == DISTRIBUTE_BY_HASH) { int colocationId = TableColocationId(relationId); colocationIdList = list_append_unique_int(colocationIdList, colocationId); } else { return false; } } /* do not display the hint if there are more than one colocation group */ if (list_length(colocationIdList) > 1) { return false; } return true; } /* * DeferErrorIfSubqueryNotSupported checks that we can perform distributed planning for * the given subquery. If not, a deferred error is returned. The function recursively * does this check to all lower levels of the subquery. */ static DeferredErrorMessage * DeferErrorIfUnsupportedSubqueryRepartition(Query *subqueryTree) { char *errorDetail = NULL; bool preconditionsSatisfied = true; List *joinTreeTableIndexList = NIL; int rangeTableIndex = 0; RangeTblEntry *rangeTableEntry = NULL; Query *innerSubquery = NULL; if (!subqueryTree->hasAggs) { preconditionsSatisfied = false; errorDetail = "Subqueries without aggregates are not supported yet"; } if (subqueryTree->groupClause == NIL) { preconditionsSatisfied = false; errorDetail = "Subqueries without group by clause are not supported yet"; } if (subqueryTree->sortClause != NULL) { preconditionsSatisfied = false; errorDetail = "Subqueries with order by clause are not supported yet"; } if (subqueryTree->limitCount != NULL) { preconditionsSatisfied = false; errorDetail = "Subqueries with limit are not supported yet"; } if (subqueryTree->limitOffset != NULL) { preconditionsSatisfied = false; errorDetail = "Subqueries with offset are not supported yet"; } if (subqueryTree->hasSubLinks) { preconditionsSatisfied = false; errorDetail = "Subqueries other than from-clause subqueries are unsupported"; } /* finally check and return error if conditions are not satisfied */ if (!preconditionsSatisfied) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot perform distributed planning on this query", errorDetail, NULL); } /* * Extract all range table indexes from the join tree. Note that sub-queries * that get pulled up by PostgreSQL don't appear in this join tree. */ ExtractRangeTableIndexWalker((Node *) subqueryTree->jointree, &joinTreeTableIndexList); Assert(list_length(joinTreeTableIndexList) == 1); /* continue with the inner subquery */ rangeTableIndex = linitial_int(joinTreeTableIndexList); rangeTableEntry = rt_fetch(rangeTableIndex, subqueryTree->rtable); if (rangeTableEntry->rtekind == RTE_RELATION) { return NULL; } Assert(rangeTableEntry->rtekind == RTE_SUBQUERY); innerSubquery = rangeTableEntry->subquery; /* recursively continue to the inner subqueries */ return DeferErrorIfUnsupportedSubqueryRepartition(innerSubquery); } /* * HasOuterJoin returns true if query has a outer join. */ static bool HasOuterJoin(Query *queryTree) { bool hasOuterJoin = HasOuterJoinWalker((Node *) queryTree->jointree, NULL); return hasOuterJoin; } /* * HasOuterJoinWalker returns true if the query has an outer join. The context * parameter should be NULL. */ static bool HasOuterJoinWalker(Node *node, void *context) { bool hasOuterJoin = false; if (node == NULL) { return false; } if (IsA(node, JoinExpr)) { JoinExpr *joinExpr = (JoinExpr *) node; JoinType joinType = joinExpr->jointype; if (IS_OUTER_JOIN(joinType)) { hasOuterJoin = true; } } if (!hasOuterJoin) { hasOuterJoin = expression_tree_walker(node, HasOuterJoinWalker, NULL); } return hasOuterJoin; } /* * HasComplexJoinOrder returns true if join tree is not a left-handed tree i.e. * it has a join expression in at least one right argument. */ static bool HasComplexJoinOrder(Query *queryTree) { bool hasComplexJoinOrder = false; List *joinList = NIL; ListCell *joinCell = NULL; joinList = JoinExprList(queryTree->jointree); foreach(joinCell, joinList) { JoinExpr *joinExpr = lfirst(joinCell); if (IsA(joinExpr->rarg, JoinExpr)) { hasComplexJoinOrder = true; break; } } return hasComplexJoinOrder; } /* * HasComplexRangeTableType checks if the given query tree contains any complex * range table types. For this, the function walks over all range tables in the * join tree, and checks if they correspond to simple relations or subqueries. * If they don't, the function assumes the query has complex range tables. */ static bool HasComplexRangeTableType(Query *queryTree) { List *rangeTableList = queryTree->rtable; List *joinTreeTableIndexList = NIL; ListCell *joinTreeTableIndexCell = NULL; bool hasComplexRangeTableType = false; /* * Extract all range table indexes from the join tree. Note that sub-queries * that get pulled up by PostgreSQL don't appear in this join tree. */ ExtractRangeTableIndexWalker((Node *) queryTree->jointree, &joinTreeTableIndexList); foreach(joinTreeTableIndexCell, joinTreeTableIndexList) { /* * Join tree's range table index starts from 1 in the query tree. But, * list indexes start from 0. */ int joinTreeTableIndex = lfirst_int(joinTreeTableIndexCell); int rangeTableListIndex = joinTreeTableIndex - 1; RangeTblEntry *rangeTableEntry = (RangeTblEntry *) list_nth(rangeTableList, rangeTableListIndex); /* * Check if the range table in the join tree is a simple relation or a * subquery. */ if (rangeTableEntry->rtekind != RTE_RELATION && rangeTableEntry->rtekind != RTE_SUBQUERY) { hasComplexRangeTableType = true; } /* * Check if the subquery range table entry includes children inheritance. * * Note that PostgreSQL flattens out simple union all queries into an * append relation, sets "inh" field of RangeTblEntry to true and deletes * set operations. Here we check this for subqueries. */ if (rangeTableEntry->rtekind == RTE_SUBQUERY && rangeTableEntry->inh) { hasComplexRangeTableType = true; } } return hasComplexRangeTableType; } /* * ExtractRangeTableIndexWalker walks over a join tree, and finds all range * table indexes in that tree. */ bool ExtractRangeTableIndexWalker(Node *node, List **rangeTableIndexList) { bool walkerResult = false; if (node == NULL) { return false; } if (IsA(node, RangeTblRef)) { int rangeTableIndex = ((RangeTblRef *) node)->rtindex; (*rangeTableIndexList) = lappend_int(*rangeTableIndexList, rangeTableIndex); } else { walkerResult = expression_tree_walker(node, ExtractRangeTableIndexWalker, rangeTableIndexList); } return walkerResult; } /* * WhereClauseList walks over the FROM expression in the query tree, and builds * a list of all clauses from the expression tree. The function checks for both * implicitly and explicitly defined clauses, but only selects INNER join * explicit clauses, and skips any outer-join clauses. Explicit clauses are * expressed as "SELECT ... FROM R1 INNER JOIN R2 ON R1.A = R2.A". Implicit * joins differ in that they live in the WHERE clause, and are expressed as * "SELECT ... FROM ... WHERE R1.a = R2.a". */ List * WhereClauseList(FromExpr *fromExpr) { FromExpr *fromExprCopy = copyObject(fromExpr); QualifierWalkerContext *walkerContext = palloc0(sizeof(QualifierWalkerContext)); List *whereClauseList = NIL; ExtractFromExpressionWalker((Node *) fromExprCopy, walkerContext); whereClauseList = walkerContext->baseQualifierList; return whereClauseList; } /* * QualifierList walks over the FROM expression in the query tree, and builds * a list of all qualifiers from the expression tree. The function checks for * both implicitly and explicitly defined qualifiers. Note that this function * is very similar to WhereClauseList(), but QualifierList() also includes * outer-join clauses. */ List * QualifierList(FromExpr *fromExpr) { FromExpr *fromExprCopy = copyObject(fromExpr); QualifierWalkerContext *walkerContext = palloc0(sizeof(QualifierWalkerContext)); List *qualifierList = NIL; ExtractFromExpressionWalker((Node *) fromExprCopy, walkerContext); qualifierList = list_concat(qualifierList, walkerContext->baseQualifierList); qualifierList = list_concat(qualifierList, walkerContext->outerJoinQualifierList); return qualifierList; } /* * ValidateClauseList walks over the given list of clauses, and checks that we * can recognize all the clauses. This function ensures that we do not drop an * unsupported clause type on the floor, and thus prevents erroneous results. */ static void ValidateClauseList(List *clauseList) { ListCell *clauseCell = NULL; foreach(clauseCell, clauseList) { Node *clause = (Node *) lfirst(clauseCell); /* * There could never be sublinks here given that it is handled * in subquery pushdown code-path. */ Assert(!IsSublinkClause(clause)); if (!(IsSelectClause(clause) || IsJoinClause(clause) || or_clause(clause))) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported clause type"))); } } } /* * ValidateSubqueryPushdownClauseList walks over the given list of clauses, * and checks that we can recognize all the clauses. This function ensures * that we do not drop an unsupported clause type on the floor, and thus * prevents erroneous results. * * Note that this function is slightly different than ValidateClauseList(), * additionally allowing sublinks. */ static void ValidateSubqueryPushdownClauseList(List *clauseList) { ListCell *clauseCell = NULL; foreach(clauseCell, clauseList) { Node *clause = (Node *) lfirst(clauseCell); if (!(IsSublinkClause(clause) || IsSelectClause(clause) || IsJoinClause(clause) || or_clause(clause))) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported clause type"))); } } } /* * JoinClauseList finds the join clauses from the given where clause expression * list, and returns them. The function does not iterate into nested OR clauses * and relies on find_duplicate_ors() in the optimizer to pull up factorizable * OR clauses. */ List * JoinClauseList(List *whereClauseList) { List *joinClauseList = NIL; ListCell *whereClauseCell = NULL; foreach(whereClauseCell, whereClauseList) { Node *whereClause = (Node *) lfirst(whereClauseCell); if (IsJoinClause(whereClause)) { joinClauseList = lappend(joinClauseList, whereClause); } } return joinClauseList; } /* * ExtractFromExpressionWalker walks over a FROM expression, and finds all * implicit and explicit qualifiers in the expression. The function looks at * join and from expression nodes to find qualifiers, and returns these * qualifiers. * * Note that we don't want outer join clauses in regular outer join planning, * but we need outer join clauses in subquery pushdown prerequisite checks. * Therefore, outer join qualifiers are returned in a different list than other * qualifiers inside the given walker context. For this reason, we return two * qualifier lists. * * Note that we check if the qualifier node in join and from expression nodes * is a list node. If it is not a list node which is the case for subqueries, * then we run eval_const_expressions(), canonicalize_qual() and make_ands_implicit() * on the qualifier node and get a list of flattened implicitly AND'ed qualifier * list. Actually in the planer phase of PostgreSQL these functions also run on * subqueries but differently from the outermost query, they are run on a copy * of parse tree and changes do not get persisted as modifications to the original * query tree. * * Also this function adds SubLinks to the baseQualifierList when they appear on * the query's WHERE clause. The callers of the function should consider processing * Sublinks as well. */ static bool ExtractFromExpressionWalker(Node *node, QualifierWalkerContext *walkerContext) { bool walkerResult = false; if (node == NULL) { return false; } /* * Get qualifier lists of join and from expression nodes. Note that in the * case of subqueries, PostgreSQL can skip simplifying, flattening and * making ANDs implicit. If qualifiers node is not a list, then we run these * preprocess routines on qualifiers node. */ if (IsA(node, JoinExpr)) { List *joinQualifierList = NIL; JoinExpr *joinExpression = (JoinExpr *) node; Node *joinQualifiersNode = joinExpression->quals; JoinType joinType = joinExpression->jointype; if (joinQualifiersNode != NULL) { if (IsA(joinQualifiersNode, List)) { joinQualifierList = (List *) joinQualifiersNode; } else { /* this part of code only run for subqueries */ Node *joinClause = eval_const_expressions(NULL, joinQualifiersNode); joinClause = (Node *) canonicalize_qual((Expr *) joinClause); joinQualifierList = make_ands_implicit((Expr *) joinClause); } } /* return outer join clauses in a separate list */ if (joinType == JOIN_INNER) { walkerContext->baseQualifierList = list_concat(walkerContext->baseQualifierList, joinQualifierList); } else if (IS_OUTER_JOIN(joinType)) { walkerContext->outerJoinQualifierList = list_concat(walkerContext->outerJoinQualifierList, joinQualifierList); } } else if (IsA(node, FromExpr)) { List *fromQualifierList = NIL; FromExpr *fromExpression = (FromExpr *) node; Node *fromQualifiersNode = fromExpression->quals; if (fromQualifiersNode != NULL) { if (IsA(fromQualifiersNode, List)) { fromQualifierList = (List *) fromQualifiersNode; } else { /* this part of code only run for subqueries */ Node *fromClause = eval_const_expressions(NULL, fromQualifiersNode); fromClause = (Node *) canonicalize_qual((Expr *) fromClause); fromQualifierList = make_ands_implicit((Expr *) fromClause); } walkerContext->baseQualifierList = list_concat(walkerContext->baseQualifierList, fromQualifierList); } } walkerResult = expression_tree_walker(node, ExtractFromExpressionWalker, (void *) walkerContext); return walkerResult; } /* * IsJoinClause determines if the given node is a join clause according to our * criteria. Our criteria defines a join clause as an equi join operator between * two columns that belong to two different tables. */ bool IsJoinClause(Node *clause) { bool isJoinClause = false; OpExpr *operatorExpression = NULL; List *argumentList = NIL; Node *leftArgument = NULL; Node *rightArgument = NULL; List *leftColumnList = NIL; List *rightColumnList = NIL; if (!IsA(clause, OpExpr)) { return false; } operatorExpression = (OpExpr *) clause; argumentList = operatorExpression->args; /* join clauses must have two arguments */ if (list_length(argumentList) != 2) { return false; } /* get left and right side of the expression */ leftArgument = (Node *) linitial(argumentList); rightArgument = (Node *) lsecond(argumentList); leftColumnList = pull_var_clause_default(leftArgument); rightColumnList = pull_var_clause_default(rightArgument); /* each side of the expression should have only one column */ if ((list_length(leftColumnList) == 1) && (list_length(rightColumnList) == 1)) { Var *leftColumn = (Var *) linitial(leftColumnList); Var *rightColumn = (Var *) linitial(rightColumnList); bool equiJoin = false; bool joinBetweenDifferentTables = false; bool equalsOperator = OperatorImplementsEquality(operatorExpression->opno); if (equalsOperator) { equiJoin = true; } if (leftColumn->varno != rightColumn->varno) { joinBetweenDifferentTables = true; } /* codifies our logic for determining if this node is a join clause */ if (equiJoin && joinBetweenDifferentTables) { isJoinClause = true; } } return isJoinClause; } /* * TableEntryList finds the regular relation nodes in the range table entry * list, and builds a list of table entries from these regular relation nodes. */ List * TableEntryList(List *rangeTableList) { List *tableEntryList = NIL; ListCell *rangeTableCell = NULL; uint32 tableId = 1; /* range table indices start at 1 */ foreach(rangeTableCell, rangeTableList) { RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell); if (rangeTableEntry->rtekind == RTE_RELATION) { TableEntry *tableEntry = (TableEntry *) palloc0(sizeof(TableEntry)); tableEntry->relationId = rangeTableEntry->relid; tableEntry->rangeTableId = tableId; tableEntryList = lappend(tableEntryList, tableEntry); } /* * Increment tableId regardless so that table entry's tableId remains * congruent with column's range table reference (varno). */ tableId++; } return tableEntryList; } /* * UsedTableEntryList returns list of relation range table entries * that are referenced within the query. Unused entries due to query * flattening or re-rewriting are ignored. */ List * UsedTableEntryList(Query *query) { List *tableEntryList = NIL; List *rangeTableList = query->rtable; List *joinTreeTableIndexList = NIL; ListCell *joinTreeTableIndexCell = NULL; ExtractRangeTableIndexWalker((Node *) query->jointree, &joinTreeTableIndexList); foreach(joinTreeTableIndexCell, joinTreeTableIndexList) { int joinTreeTableIndex = lfirst_int(joinTreeTableIndexCell); RangeTblEntry *rangeTableEntry = rt_fetch(joinTreeTableIndex, rangeTableList); if (rangeTableEntry->rtekind == RTE_RELATION) { TableEntry *tableEntry = (TableEntry *) palloc0(sizeof(TableEntry)); tableEntry->relationId = rangeTableEntry->relid; tableEntry->rangeTableId = joinTreeTableIndex; tableEntryList = lappend(tableEntryList, tableEntry); } } return tableEntryList; } /* * MultiTableNodeList builds a list of MultiTable nodes from the given table * entry list. A multi table node represents one entry from the range table * list. These entries may belong to the same physical relation in the case of * self-joins. */ static List * MultiTableNodeList(List *tableEntryList, List *rangeTableList) { List *tableNodeList = NIL; ListCell *tableEntryCell = NULL; foreach(tableEntryCell, tableEntryList) { TableEntry *tableEntry = (TableEntry *) lfirst(tableEntryCell); Oid relationId = tableEntry->relationId; uint32 rangeTableId = tableEntry->rangeTableId; Var *partitionColumn = PartitionColumn(relationId, rangeTableId); RangeTblEntry *rangeTableEntry = rt_fetch(rangeTableId, rangeTableList); MultiTable *tableNode = CitusMakeNode(MultiTable); tableNode->subquery = NULL; tableNode->relationId = relationId; tableNode->rangeTableId = rangeTableId; tableNode->partitionColumn = partitionColumn; tableNode->alias = rangeTableEntry->alias; tableNode->referenceNames = rangeTableEntry->eref; tableNodeList = lappend(tableNodeList, tableNode); } return tableNodeList; } /* Adds a MultiCollect node on top of each MultiTable node in the given list. */ static List * AddMultiCollectNodes(List *tableNodeList) { List *collectTableList = NIL; ListCell *tableNodeCell = NULL; foreach(tableNodeCell, tableNodeList) { MultiTable *tableNode = (MultiTable *) lfirst(tableNodeCell); MultiCollect *collectNode = CitusMakeNode(MultiCollect); SetChild((MultiUnaryNode *) collectNode, (MultiNode *) tableNode); collectTableList = lappend(collectTableList, collectNode); } return collectTableList; } /* * MultiJoinTree takes in the join order information and the list of tables, and * builds a join tree by applying the corresponding join rules. The function * builds a left deep tree, as expressed by the join order list. * * The function starts by setting the first table as the top node in the join * tree. Then, the function iterates over the list of tables, and builds a new * join node between the top of the join tree and the next table in the list. * At each iteration, the function sets the top of the join tree to the newly * built list. This results in a left deep join tree, and the function returns * this tree after every table in the list has been joined. */ static MultiNode * MultiJoinTree(List *joinOrderList, List *collectTableList, List *joinWhereClauseList) { MultiNode *currentTopNode = NULL; ListCell *joinOrderCell = NULL; bool firstJoinNode = true; foreach(joinOrderCell, joinOrderList) { JoinOrderNode *joinOrderNode = (JoinOrderNode *) lfirst(joinOrderCell); uint32 joinTableId = joinOrderNode->tableEntry->rangeTableId; MultiCollect *collectNode = CollectNodeForTable(collectTableList, joinTableId); if (firstJoinNode) { currentTopNode = (MultiNode *) collectNode; firstJoinNode = false; } else { JoinRuleType joinRuleType = joinOrderNode->joinRuleType; JoinType joinType = joinOrderNode->joinType; Var *partitionColumn = joinOrderNode->partitionColumn; MultiNode *newJoinNode = NULL; List *joinClauseList = joinOrderNode->joinClauseList; /* * Build a join node between the top of our join tree and the next * table in the join order. */ newJoinNode = ApplyJoinRule(currentTopNode, (MultiNode *) collectNode, joinRuleType, partitionColumn, joinType, joinClauseList); /* the new join node becomes the top of our join tree */ currentTopNode = newJoinNode; } } /* current top node points to the entire left deep join tree */ return currentTopNode; } /* * CollectNodeForTable finds the MultiCollect node whose MultiTable node has the * given range table identifier. Note that this function expects each collect * node in the given list to have one table node as its child. */ static MultiCollect * CollectNodeForTable(List *collectTableList, uint32 rangeTableId) { MultiCollect *collectNodeForTable = NULL; ListCell *collectTableCell = NULL; foreach(collectTableCell, collectTableList) { MultiCollect *collectNode = (MultiCollect *) lfirst(collectTableCell); List *tableIdList = OutputTableIdList((MultiNode *) collectNode); uint32 tableId = (uint32) linitial_int(tableIdList); Assert(list_length(tableIdList) == 1); if (tableId == rangeTableId) { collectNodeForTable = collectNode; break; } } Assert(collectNodeForTable != NULL); return collectNodeForTable; } /* * MultiSelectNode extracts the select clauses from the given where clause list, * and builds a MultiSelect node from these clauses. If the expression tree does * not have any select clauses, the function return null. */ static MultiSelect * MultiSelectNode(List *whereClauseList) { List *selectClauseList = NIL; MultiSelect *selectNode = NULL; ListCell *whereClauseCell = NULL; foreach(whereClauseCell, whereClauseList) { Node *whereClause = (Node *) lfirst(whereClauseCell); if (IsSelectClause(whereClause) || or_clause(whereClause)) { selectClauseList = lappend(selectClauseList, whereClause); } } if (list_length(selectClauseList) > 0) { selectNode = CitusMakeNode(MultiSelect); selectNode->selectClauseList = selectClauseList; } return selectNode; } /* * IsSelectClause determines if the given node is a select clause according to * our criteria. Our criteria defines a select clause as an expression that has * zero or more columns belonging to only one table. The function assumes that * no sublinks exists in the clause. */ static bool IsSelectClause(Node *clause) { List *columnList = NIL; ListCell *columnCell = NULL; Var *firstColumn = NULL; Index firstColumnTableId = 0; bool isSelectClause = true; /* extract columns from the clause */ columnList = pull_var_clause_default(clause); if (list_length(columnList) == 0) { return true; } /* get first column's tableId */ firstColumn = (Var *) linitial(columnList); firstColumnTableId = firstColumn->varno; /* check if all columns are from the same table */ foreach(columnCell, columnList) { Var *column = (Var *) lfirst(columnCell); if (column->varno != firstColumnTableId) { isSelectClause = false; } } return isSelectClause; } /* * IsSublinkClause determines if the given node is a sublink or subplan. */ static bool IsSublinkClause(Node *clause) { NodeTag nodeTag = nodeTag(clause); if (nodeTag == T_SubLink || nodeTag == T_SubPlan) { return true; } return false; } /* * MultiProjectNode builds the project node using the target entry information * from the query tree. The project node only encapsulates projected columns, * and does not include aggregates, group clauses, or project expressions. */ static MultiProject * MultiProjectNode(List *targetEntryList) { MultiProject *projectNode = NULL; List *uniqueColumnList = NIL; List *columnList = NIL; ListCell *columnCell = NULL; /* extract the list of columns and remove any duplicates */ columnList = pull_var_clause_default((Node *) targetEntryList); foreach(columnCell, columnList) { Var *column = (Var *) lfirst(columnCell); uniqueColumnList = list_append_unique(uniqueColumnList, column); } /* create project node with list of columns to project */ projectNode = CitusMakeNode(MultiProject); projectNode->columnList = uniqueColumnList; return projectNode; } /* Builds the extended operator node using fields from the given query tree. */ static MultiExtendedOp * MultiExtendedOpNode(Query *queryTree) { MultiExtendedOp *extendedOpNode = CitusMakeNode(MultiExtendedOp); extendedOpNode->targetList = queryTree->targetList; extendedOpNode->groupClauseList = queryTree->groupClause; extendedOpNode->sortClauseList = queryTree->sortClause; extendedOpNode->limitCount = queryTree->limitCount; extendedOpNode->limitOffset = queryTree->limitOffset; extendedOpNode->havingQual = queryTree->havingQual; return extendedOpNode; } /* Helper function to return the parent node of the given node. */ MultiNode * ParentNode(MultiNode *multiNode) { MultiNode *parentNode = multiNode->parentNode; return parentNode; } /* Helper function to return the child of the given unary node. */ MultiNode * ChildNode(MultiUnaryNode *multiNode) { MultiNode *childNode = multiNode->childNode; return childNode; } /* Helper function to return the grand child of the given unary node. */ MultiNode * GrandChildNode(MultiUnaryNode *multiNode) { MultiNode *childNode = ChildNode(multiNode); MultiNode *grandChildNode = ChildNode((MultiUnaryNode *) childNode); return grandChildNode; } /* Sets the given child node as a child of the given unary parent node. */ void SetChild(MultiUnaryNode *parent, MultiNode *child) { parent->childNode = child; child->parentNode = (MultiNode *) parent; } /* Sets the given child node as a left child of the given parent node. */ void SetLeftChild(MultiBinaryNode *parent, MultiNode *leftChild) { parent->leftChildNode = leftChild; leftChild->parentNode = (MultiNode *) parent; } /* Sets the given child node as a right child of the given parent node. */ void SetRightChild(MultiBinaryNode *parent, MultiNode *rightChild) { parent->rightChildNode = rightChild; rightChild->parentNode = (MultiNode *) parent; } /* Returns true if the given node is a unary operator. */ bool UnaryOperator(MultiNode *node) { bool unaryOperator = false; if (CitusIsA(node, MultiTreeRoot) || CitusIsA(node, MultiTable) || CitusIsA(node, MultiCollect) || CitusIsA(node, MultiSelect) || CitusIsA(node, MultiProject) || CitusIsA(node, MultiPartition) || CitusIsA(node, MultiExtendedOp)) { unaryOperator = true; } return unaryOperator; } /* Returns true if the given node is a binary operator. */ bool BinaryOperator(MultiNode *node) { bool binaryOperator = false; if (CitusIsA(node, MultiJoin) || CitusIsA(node, MultiCartesianProduct)) { binaryOperator = true; } return binaryOperator; } /* * OutputTableIdList finds all table identifiers that are output by the given * multi node, and returns these identifiers in a new list. */ List * OutputTableIdList(MultiNode *multiNode) { List *tableIdList = NIL; List *tableNodeList = FindNodesOfType(multiNode, T_MultiTable); ListCell *tableNodeCell = NULL; foreach(tableNodeCell, tableNodeList) { MultiTable *tableNode = (MultiTable *) lfirst(tableNodeCell); int tableId = (int) tableNode->rangeTableId; if (tableId != SUBQUERY_RANGE_TABLE_ID) { tableIdList = lappend_int(tableIdList, tableId); } } return tableIdList; } /* * FindNodesOfType takes in a given logical plan tree, and recursively traverses * the tree in preorder. The function finds all nodes of requested type during * the traversal, and returns them in a list. */ List * FindNodesOfType(MultiNode *node, int type) { List *nodeList = NIL; int nodeType = T_Invalid; /* terminal condition for recursion */ if (node == NULL) { return NIL; } /* current node has expected node type */ nodeType = CitusNodeTag(node); if (nodeType == type) { nodeList = lappend(nodeList, node); } if (UnaryOperator(node)) { MultiNode *childNode = ((MultiUnaryNode *) node)->childNode; List *childNodeList = FindNodesOfType(childNode, type); nodeList = list_concat(nodeList, childNodeList); } else if (BinaryOperator(node)) { MultiNode *leftChildNode = ((MultiBinaryNode *) node)->leftChildNode; MultiNode *rightChildNode = ((MultiBinaryNode *) node)->rightChildNode; List *leftChildNodeList = FindNodesOfType(leftChildNode, type); List *rightChildNodeList = FindNodesOfType(rightChildNode, type); nodeList = list_concat(nodeList, leftChildNodeList); nodeList = list_concat(nodeList, rightChildNodeList); } return nodeList; } /* * NeedsDistributedPlanning checks if the passed in query is a query running * on a distributed table. If it is, we start distributed planning. * * For distributed relations it also assigns identifiers to the relevant RTEs. */ bool NeedsDistributedPlanning(Query *queryTree) { CmdType commandType = queryTree->commandType; List *rangeTableList = NIL; ListCell *rangeTableCell = NULL; bool hasLocalRelation = false; bool hasDistributedRelation = false; if (commandType != CMD_SELECT && commandType != CMD_INSERT && commandType != CMD_UPDATE && commandType != CMD_DELETE) { return false; } /* * We can handle INSERT INTO distributed_table SELECT ... even if the SELECT * part references local tables, so skip the remaining checks. */ if (InsertSelectIntoDistributedTable(queryTree)) { return true; } /* extract range table entries for simple relations only */ ExtractRangeTableRelationWalker((Node *) queryTree, &rangeTableList); foreach(rangeTableCell, rangeTableList) { RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell); /* check if relation is local or distributed */ Oid relationId = rangeTableEntry->relid; if (IsDistributedTable(relationId)) { hasDistributedRelation = true; } else { hasLocalRelation = true; } } if (hasLocalRelation && hasDistributedRelation) { ereport(ERROR, (errmsg("cannot plan queries which include both local and " "distributed relations"))); } return hasDistributedRelation; } /* * ExtractRangeTableRelationWalker gathers all range table entries in a query * and filters them to preserve only those of the RTE_RELATION type. */ bool ExtractRangeTableRelationWalker(Node *node, List **rangeTableRelationList) { List *rangeTableList = NIL; ListCell *rangeTableCell = NULL; bool walkIsComplete = ExtractRangeTableEntryWalker(node, &rangeTableList); foreach(rangeTableCell, rangeTableList) { RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell); if (rangeTableEntry->rtekind == RTE_RELATION && rangeTableEntry->relkind != RELKIND_VIEW) { (*rangeTableRelationList) = lappend(*rangeTableRelationList, rangeTableEntry); } } return walkIsComplete; } /* * ExtractRangeTableRelationWalkerWithRTEExpand obtains the list of relations * from the given node. Note that the difference between this function and * ExtractRangeTableRelationWalker is that this one recursively * walk into range table entries if it can. */ bool ExtractRangeTableRelationWalkerWithRTEExpand(Node *node, List **rangeTableRelationList) { bool walkIsComplete = false; if (node == NULL) { return walkIsComplete; } else if (IsA(node, RangeTblEntry)) { RangeTblEntry *rangeTableEntry = (RangeTblEntry *) node; List *rangeTableList = list_make1(rangeTableEntry); if (rangeTableEntry->rtekind == RTE_RELATION) { (*rangeTableRelationList) = lappend(*rangeTableRelationList, rangeTableEntry); } else { walkIsComplete = range_table_walker(rangeTableList, ExtractRangeTableRelationWalkerWithRTEExpand, rangeTableRelationList, 0); } } else { walkIsComplete = ExtractRangeTableRelationWalker(node, rangeTableRelationList); } return walkIsComplete; } /* * ExtractRangeTableEntryWalker walks over a query tree, and finds all range * table entries. For recursing into the query tree, this function uses the * query tree walker since the expression tree walker doesn't recurse into * sub-queries. */ bool ExtractRangeTableEntryWalker(Node *node, List **rangeTableList) { bool walkIsComplete = false; if (node == NULL) { return false; } if (IsA(node, RangeTblEntry)) { RangeTblEntry *rangeTable = (RangeTblEntry *) node; (*rangeTableList) = lappend(*rangeTableList, rangeTable); } else if (IsA(node, Query)) { walkIsComplete = query_tree_walker((Query *) node, ExtractRangeTableEntryWalker, rangeTableList, QTW_EXAMINE_RTES); } else { walkIsComplete = expression_tree_walker(node, ExtractRangeTableEntryWalker, rangeTableList); } return walkIsComplete; } /* * pull_var_clause_default calls pull_var_clause with the most commonly used * arguments for distributed planning. */ List * pull_var_clause_default(Node *node) { /* * PVC_REJECT_PLACEHOLDERS is implicit if PVC_INCLUDE_PLACEHOLDERS * isn't specified. */ List *columnList = pull_var_clause(node, PVC_RECURSE_AGGREGATES); return columnList; } /* * ApplyJoinRule finds the join rule application function that corresponds to * the given join rule, and calls this function to create a new join node that * joins the left and right nodes together. */ static MultiNode * ApplyJoinRule(MultiNode *leftNode, MultiNode *rightNode, JoinRuleType ruleType, Var *partitionColumn, JoinType joinType, List *joinClauseList) { RuleApplyFunction ruleApplyFunction = NULL; MultiNode *multiNode = NULL; List *applicableJoinClauses = NIL; List *leftTableIdList = OutputTableIdList(leftNode); List *rightTableIdList = OutputTableIdList(rightNode); int rightTableIdCount PG_USED_FOR_ASSERTS_ONLY = 0; uint32 rightTableId = 0; rightTableIdCount = list_length(rightTableIdList); Assert(rightTableIdCount == 1); /* find applicable join clauses between the left and right data sources */ rightTableId = (uint32) linitial_int(rightTableIdList); applicableJoinClauses = ApplicableJoinClauses(leftTableIdList, rightTableId, joinClauseList); /* call the join rule application function to create the new join node */ ruleApplyFunction = JoinRuleApplyFunction(ruleType); multiNode = (*ruleApplyFunction)(leftNode, rightNode, partitionColumn, joinType, applicableJoinClauses); if (joinType != JOIN_INNER && CitusIsA(multiNode, MultiJoin)) { MultiJoin *joinNode = (MultiJoin *) multiNode; /* preserve non-join clauses for OUTER joins */ joinNode->joinClauseList = list_copy(joinClauseList); } return multiNode; } /* * JoinRuleApplyFunction returns a function pointer for the rule application * function; this rule application function corresponds to the given rule type. * This function also initializes the rule application function array in a * static code block, if the array has not been initialized. */ static RuleApplyFunction JoinRuleApplyFunction(JoinRuleType ruleType) { static bool ruleApplyFunctionInitialized = false; RuleApplyFunction ruleApplyFunction = NULL; if (!ruleApplyFunctionInitialized) { RuleApplyFunctionArray[BROADCAST_JOIN] = &ApplyBroadcastJoin; RuleApplyFunctionArray[LOCAL_PARTITION_JOIN] = &ApplyLocalJoin; RuleApplyFunctionArray[SINGLE_PARTITION_JOIN] = &ApplySinglePartitionJoin; RuleApplyFunctionArray[DUAL_PARTITION_JOIN] = &ApplyDualPartitionJoin; RuleApplyFunctionArray[CARTESIAN_PRODUCT] = &ApplyCartesianProduct; ruleApplyFunctionInitialized = true; } ruleApplyFunction = RuleApplyFunctionArray[ruleType]; Assert(ruleApplyFunction != NULL); return ruleApplyFunction; } /* * ApplyBroadcastJoin creates a new MultiJoin node that joins the left and the * right node. The new node uses the broadcast join rule to perform the join. */ static MultiNode * ApplyBroadcastJoin(MultiNode *leftNode, MultiNode *rightNode, Var *partitionColumn, JoinType joinType, List *applicableJoinClauses) { MultiJoin *joinNode = CitusMakeNode(MultiJoin); joinNode->joinRuleType = BROADCAST_JOIN; joinNode->joinType = joinType; joinNode->joinClauseList = applicableJoinClauses; SetLeftChild((MultiBinaryNode *) joinNode, leftNode); SetRightChild((MultiBinaryNode *) joinNode, rightNode); return (MultiNode *) joinNode; } /* * ApplyLocalJoin creates a new MultiJoin node that joins the left and the right * node. The new node uses the local join rule to perform the join. */ static MultiNode * ApplyLocalJoin(MultiNode *leftNode, MultiNode *rightNode, Var *partitionColumn, JoinType joinType, List *applicableJoinClauses) { MultiJoin *joinNode = CitusMakeNode(MultiJoin); joinNode->joinRuleType = LOCAL_PARTITION_JOIN; joinNode->joinType = joinType; joinNode->joinClauseList = applicableJoinClauses; SetLeftChild((MultiBinaryNode *) joinNode, leftNode); SetRightChild((MultiBinaryNode *) joinNode, rightNode); return (MultiNode *) joinNode; } /* * ApplySinglePartitionJoin creates a new MultiJoin node that joins the left and * right node. The function also adds a MultiPartition node on top of the node * (left or right) that is not partitioned on the join column. */ static MultiNode * ApplySinglePartitionJoin(MultiNode *leftNode, MultiNode *rightNode, Var *partitionColumn, JoinType joinType, List *applicableJoinClauses) { OpExpr *joinClause = NULL; Var *leftColumn = NULL; Var *rightColumn = NULL; List *rightTableIdList = NIL; uint32 rightTableId = 0; uint32 partitionTableId = partitionColumn->varno; /* create all operator structures up front */ MultiJoin *joinNode = CitusMakeNode(MultiJoin); MultiCollect *collectNode = CitusMakeNode(MultiCollect); MultiPartition *partitionNode = CitusMakeNode(MultiPartition); /* * We first find the appropriate join clause. Then, we compare the partition * column against the join clause's columns. If one of the columns matches, * we introduce a (re-)partition operator for the other column. */ joinClause = SinglePartitionJoinClause(partitionColumn, applicableJoinClauses); Assert(joinClause != NULL); leftColumn = LeftColumn(joinClause); rightColumn = RightColumn(joinClause); if (equal(partitionColumn, leftColumn)) { partitionNode->partitionColumn = rightColumn; partitionNode->splitPointTableId = partitionTableId; } else if (equal(partitionColumn, rightColumn)) { partitionNode->partitionColumn = leftColumn; partitionNode->splitPointTableId = partitionTableId; } /* determine the node the partition operator goes on top of */ rightTableIdList = OutputTableIdList(rightNode); rightTableId = (uint32) linitial_int(rightTableIdList); Assert(list_length(rightTableIdList) == 1); /* * If the right child node is partitioned on the partition key column, we * add the partition operator on the left child node; and vice versa. Then, * we add a collect operator on top of the partition operator, and always * make sure that we have at most one relation on the right-hand side. */ if (partitionTableId == rightTableId) { SetChild((MultiUnaryNode *) partitionNode, leftNode); SetChild((MultiUnaryNode *) collectNode, (MultiNode *) partitionNode); SetLeftChild((MultiBinaryNode *) joinNode, (MultiNode *) collectNode); SetRightChild((MultiBinaryNode *) joinNode, rightNode); } else { SetChild((MultiUnaryNode *) partitionNode, rightNode); SetChild((MultiUnaryNode *) collectNode, (MultiNode *) partitionNode); SetLeftChild((MultiBinaryNode *) joinNode, leftNode); SetRightChild((MultiBinaryNode *) joinNode, (MultiNode *) collectNode); } /* finally set join operator fields */ joinNode->joinRuleType = SINGLE_PARTITION_JOIN; joinNode->joinType = joinType; joinNode->joinClauseList = applicableJoinClauses; return (MultiNode *) joinNode; } /* * ApplyDualPartitionJoin creates a new MultiJoin node that joins the left and * right node. The function also adds two MultiPartition operators on top of * both nodes to repartition these nodes' data on the join clause columns. */ static MultiNode * ApplyDualPartitionJoin(MultiNode *leftNode, MultiNode *rightNode, Var *partitionColumn, JoinType joinType, List *applicableJoinClauses) { MultiJoin *joinNode = NULL; OpExpr *joinClause = NULL; MultiPartition *leftPartitionNode = NULL; MultiPartition *rightPartitionNode = NULL; MultiCollect *leftCollectNode = NULL; MultiCollect *rightCollectNode = NULL; Var *leftColumn = NULL; Var *rightColumn = NULL; List *rightTableIdList = NIL; uint32 rightTableId = 0; /* find the appropriate join clause */ joinClause = DualPartitionJoinClause(applicableJoinClauses); Assert(joinClause != NULL); leftColumn = LeftColumn(joinClause); rightColumn = RightColumn(joinClause); rightTableIdList = OutputTableIdList(rightNode); rightTableId = (uint32) linitial_int(rightTableIdList); Assert(list_length(rightTableIdList) == 1); leftPartitionNode = CitusMakeNode(MultiPartition); rightPartitionNode = CitusMakeNode(MultiPartition); /* find the partition node each join clause column belongs to */ if (leftColumn->varno == rightTableId) { leftPartitionNode->partitionColumn = rightColumn; rightPartitionNode->partitionColumn = leftColumn; } else { leftPartitionNode->partitionColumn = leftColumn; rightPartitionNode->partitionColumn = rightColumn; } /* add partition operators on top of left and right nodes */ SetChild((MultiUnaryNode *) leftPartitionNode, leftNode); SetChild((MultiUnaryNode *) rightPartitionNode, rightNode); /* add collect operators on top of the two partition operators */ leftCollectNode = CitusMakeNode(MultiCollect); rightCollectNode = CitusMakeNode(MultiCollect); SetChild((MultiUnaryNode *) leftCollectNode, (MultiNode *) leftPartitionNode); SetChild((MultiUnaryNode *) rightCollectNode, (MultiNode *) rightPartitionNode); /* add join operator on top of the two collect operators */ joinNode = CitusMakeNode(MultiJoin); joinNode->joinRuleType = DUAL_PARTITION_JOIN; joinNode->joinType = joinType; joinNode->joinClauseList = applicableJoinClauses; SetLeftChild((MultiBinaryNode *) joinNode, (MultiNode *) leftCollectNode); SetRightChild((MultiBinaryNode *) joinNode, (MultiNode *) rightCollectNode); return (MultiNode *) joinNode; } /* Creates a cartesian product node that joins the left and the right node. */ static MultiNode * ApplyCartesianProduct(MultiNode *leftNode, MultiNode *rightNode, Var *partitionColumn, JoinType joinType, List *applicableJoinClauses) { MultiCartesianProduct *cartesianNode = CitusMakeNode(MultiCartesianProduct); SetLeftChild((MultiBinaryNode *) cartesianNode, leftNode); SetRightChild((MultiBinaryNode *) cartesianNode, rightNode); return (MultiNode *) cartesianNode; } /* * SubqueryPushdownMultiTree creates logical plan for subquery pushdown logic. * Note that this logic will be changed in next iterations, so we decoupled it * from other parts of code although it causes some code duplication. * * Current subquery pushdown support in MultiTree logic requires a single range * table entry in the top most from clause. Therefore we inject an synthetic * query derived from the top level query and make it the only range table * entry for the top level query. This way we can push down any subquery joins * down to workers without invoking join order planner. */ static MultiNode * SubqueryPushdownMultiPlanTree(Query *queryTree) { List *targetEntryList = queryTree->targetList; List *qualifierList = NIL; List *columnList = NIL; List *targetColumnList = NIL; MultiCollect *subqueryCollectNode = CitusMakeNode(MultiCollect); MultiTable *subqueryNode = NULL; MultiProject *projectNode = NULL; MultiExtendedOp *extendedOpNode = NULL; MultiNode *currentTopNode = NULL; Query *pushedDownQuery = NULL; List *subqueryTargetEntryList = NIL; List *havingClauseColumnList = NIL; /* verify we can perform distributed planning on this query */ ErrorIfQueryNotSupported(queryTree); /* * Extract qualifiers and verify we can plan for them. Note that since * subquery pushdown join planning is based on restriction equivalence, * checking for these qualifiers may not be necessary. */ qualifierList = QualifierList(queryTree->jointree); ValidateSubqueryPushdownClauseList(qualifierList); /* * We would be creating a new Query and pushing down top level query's * contents down to it. Join and filter clauses in higher level query would * be transferred to lower query. Therefore after this function we would * only have a single range table entry in the top level query. We need to * create a target list entry in lower query for each column reference in * upper level query's target list and having clauses. Any column reference * in the upper query will be updated to have varno=1, and varattno= * of matching target entry in pushed down query. * Consider query * SELECT s1.a, sum(s2.c) * FROM (some subquery) s1, (some subquery) s2 * WHERE s1.a = s2.a * GROUP BY s1.a * HAVING avg(s2.b); * * We want to prepare a multi tree to avoid subquery joins at top level, * therefore above query is converted to an equivalent * SELECT worker_column_0, sum(worker_column_1) * FROM ( * SELECT * s1.a AS worker_column_0, * s2.c AS worker_column_1, * s2.b AS as worker_column_2 * FROM (some subquery) s1, (some subquery) s2 * WHERE s1.a = s2.a) worker_subquery * GROUP BY worker_column_0 * HAVING avg(worker_column_2); * After this conversion MultiTree is created as follows * * MultiExtendedOpNode( * targetList : worker_column_0, sum(worker_column_1) * groupBy : worker_column_0 * having : avg(worker_column_2)) * --->MultiProject (worker_column_0, worker_column_1, worker_column_2) * --->---> MultiTable (subquery : worker_subquery) * * Master and worker queries will be created out of this MultiTree at later stages. */ /* * uniqueColumnList contains all columns returned by subquery. Subquery target * entry list, subquery range table entry's column name list are derived from * uniqueColumnList. Columns mentioned in multiProject node and multiExtendedOp * node are indexed with their respective position in uniqueColumnList. */ targetColumnList = pull_var_clause_default((Node *) targetEntryList); havingClauseColumnList = pull_var_clause_default(queryTree->havingQual); columnList = list_concat(targetColumnList, havingClauseColumnList); /* create a target entry for each unique column */ subqueryTargetEntryList = CreateSubqueryTargetEntryList(columnList); /* * Update varno/varattno fields of columns in columnList to * point to corresponding target entry in subquery target entry list. */ UpdateVarMappingsForExtendedOpNode(columnList, subqueryTargetEntryList); /* new query only has target entries, join tree, and rtable*/ pushedDownQuery = makeNode(Query); pushedDownQuery->commandType = queryTree->commandType; pushedDownQuery->targetList = subqueryTargetEntryList; pushedDownQuery->jointree = copyObject(queryTree->jointree); pushedDownQuery->rtable = copyObject(queryTree->rtable); pushedDownQuery->setOperations = copyObject(queryTree->setOperations); pushedDownQuery->querySource = queryTree->querySource; subqueryNode = MultiSubqueryPushdownTable(pushedDownQuery); SetChild((MultiUnaryNode *) subqueryCollectNode, (MultiNode *) subqueryNode); currentTopNode = (MultiNode *) subqueryCollectNode; /* build project node for the columns to project */ projectNode = MultiProjectNode(targetEntryList); SetChild((MultiUnaryNode *) projectNode, currentTopNode); currentTopNode = (MultiNode *) projectNode; /* * We build the extended operator node to capture aggregate functions, group * clauses, sort clauses, limit/offset clauses, and expressions. We need to * distinguish between aggregates and expressions; and we address this later * in the logical optimizer. */ extendedOpNode = MultiExtendedOpNode(queryTree); /* * Postgres standard planner converts having qual node to a list of and * clauses and expects havingQual to be of type List when executing the * query later. This function is called on an original query, therefore * havingQual has not been converted yet. Perform conversion here. */ if (extendedOpNode->havingQual != NULL && !IsA(extendedOpNode->havingQual, List)) { extendedOpNode->havingQual = (Node *) make_ands_implicit((Expr *) extendedOpNode->havingQual); } /* * Postgres standard planner evaluates expressions in the LIMIT/OFFSET clauses. * Since we're using original query here, we should manually evaluate the * expression on the LIMIT and OFFSET clauses. Note that logical optimizer * expects those clauses to be already evaluated. */ extendedOpNode->limitCount = PartiallyEvaluateExpression(extendedOpNode->limitCount, NULL); extendedOpNode->limitOffset = PartiallyEvaluateExpression(extendedOpNode->limitOffset, NULL); SetChild((MultiUnaryNode *) extendedOpNode, currentTopNode); currentTopNode = (MultiNode *) extendedOpNode; return currentTopNode; } /* * CreateSubqueryTargetEntryList creates a target entry for each unique column * in the column list and returns the target entry list. */ static List * CreateSubqueryTargetEntryList(List *columnList) { AttrNumber resNo = 1; ListCell *columnCell = NULL; List *uniqueColumnList = NIL; List *subqueryTargetEntryList = NIL; foreach(columnCell, columnList) { Var *column = (Var *) lfirst(columnCell); uniqueColumnList = list_append_unique(uniqueColumnList, copyObject(column)); } foreach(columnCell, uniqueColumnList) { Var *column = (Var *) lfirst(columnCell); TargetEntry *newTargetEntry = makeNode(TargetEntry); StringInfo columnNameString = makeStringInfo(); newTargetEntry->expr = (Expr *) copyObject(column); appendStringInfo(columnNameString, WORKER_COLUMN_FORMAT, resNo); newTargetEntry->resname = columnNameString->data; newTargetEntry->resjunk = false; newTargetEntry->resno = resNo; subqueryTargetEntryList = lappend(subqueryTargetEntryList, newTargetEntry); resNo++; } return subqueryTargetEntryList; } /* * UpdateVarMappingsForExtendedOpNode updates varno/varattno fields of columns * in columnList to point to corresponding target in subquery target entry * list. */ static void UpdateVarMappingsForExtendedOpNode(List *columnList, List *subqueryTargetEntryList) { ListCell *columnCell = NULL; foreach(columnCell, columnList) { Var *columnOnTheExtendedNode = (Var *) lfirst(columnCell); ListCell *targetEntryCell = NULL; foreach(targetEntryCell, subqueryTargetEntryList) { TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell); Var *targetColumn = NULL; Assert(IsA(targetEntry->expr, Var)); targetColumn = (Var *) targetEntry->expr; if (columnOnTheExtendedNode->varno == targetColumn->varno && columnOnTheExtendedNode->varattno == targetColumn->varattno) { columnOnTheExtendedNode->varno = 1; columnOnTheExtendedNode->varattno = targetEntry->resno; break; } } } } /* * MultiSubqueryPushdownTable creates a MultiTable from the given subquery, * populates column list and returns the multitable. */ static MultiTable * MultiSubqueryPushdownTable(Query *subquery) { MultiTable *subqueryTableNode = NULL; StringInfo rteName = makeStringInfo(); List *columnNamesList = NIL; ListCell *targetEntryCell = NULL; appendStringInfo(rteName, "worker_subquery"); foreach(targetEntryCell, subquery->targetList) { TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell); columnNamesList = lappend(columnNamesList, makeString(targetEntry->resname)); } subqueryTableNode = CitusMakeNode(MultiTable); subqueryTableNode->subquery = subquery; subqueryTableNode->relationId = SUBQUERY_PUSHDOWN_RELATION_ID; subqueryTableNode->rangeTableId = SUBQUERY_RANGE_TABLE_ID; subqueryTableNode->partitionColumn = NULL; subqueryTableNode->alias = makeNode(Alias); subqueryTableNode->alias->aliasname = rteName->data; subqueryTableNode->referenceNames = makeNode(Alias); subqueryTableNode->referenceNames->aliasname = rteName->data; subqueryTableNode->referenceNames->colnames = columnNamesList; return subqueryTableNode; } /* * OperatorImplementsEquality returns true if the given opno represents an * equality operator. The function retrieves btree interpretation list for this * opno and check if BTEqualStrategyNumber strategy is present. */ bool OperatorImplementsEquality(Oid opno) { bool equalityOperator = false; List *btreeIntepretationList = get_op_btree_interpretation(opno); ListCell *btreeInterpretationCell = NULL; foreach(btreeInterpretationCell, btreeIntepretationList) { OpBtreeInterpretation *btreeIntepretation = (OpBtreeInterpretation *) lfirst(btreeInterpretationCell); if (btreeIntepretation->strategy == BTEqualStrategyNumber) { equalityOperator = true; break; } } return equalityOperator; } citus-7.0.3/src/backend/distributed/planner/multi_master_planner.c000066400000000000000000000204341317107136600254070ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_master_planner.c * Routines for building create table and select into table statements on the * master node. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "distributed/multi_master_planner.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_planner.h" #include "distributed/multi_server_executor.h" #include "distributed/worker_protocol.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "optimizer/clauses.h" #include "optimizer/planmain.h" #include "optimizer/tlist.h" #include "optimizer/var.h" #include "utils/builtins.h" #include "utils/memutils.h" #include "utils/rel.h" #include "utils/syscache.h" /* * MasterTargetList uses the given worker target list's expressions, and creates * a target target list for the master node. This master target list keeps the * temporary table's columns on the master node. */ static List * MasterTargetList(List *workerTargetList) { List *masterTargetList = NIL; const Index tableId = 1; AttrNumber columnId = 1; ListCell *workerTargetCell = NULL; foreach(workerTargetCell, workerTargetList) { TargetEntry *workerTargetEntry = (TargetEntry *) lfirst(workerTargetCell); TargetEntry *masterTargetEntry = copyObject(workerTargetEntry); Var *masterColumn = makeVarFromTargetEntry(tableId, workerTargetEntry); masterColumn->varattno = columnId; masterColumn->varoattno = columnId; columnId++; /* * The master target entry has two pieces to it. The first piece is the * target entry's expression, which we set to the newly created column. * The second piece is sort and group clauses that we implicitly copy * from the worker target entry. Note that any changes to worker target * entry's sort and group clauses will *break* us here. */ masterTargetEntry->expr = (Expr *) masterColumn; masterTargetList = lappend(masterTargetList, masterTargetEntry); } return masterTargetList; } /* * BuildAggregatePlan creates and returns an aggregate plan. This aggregate plan * builds aggreation and grouping operators (if any) that are to be executed on * the master node. */ static Agg * BuildAggregatePlan(Query *masterQuery, Plan *subPlan) { Agg *aggregatePlan = NULL; AggStrategy aggregateStrategy = AGG_PLAIN; AggClauseCosts aggregateCosts; AttrNumber *groupColumnIdArray = NULL; List *aggregateTargetList = NIL; List *groupColumnList = NIL; List *aggregateColumnList = NIL; List *havingColumnList = NIL; List *columnList = NIL; ListCell *columnCell = NULL; Node *havingQual = NULL; Oid *groupColumnOpArray = NULL; uint32 groupColumnCount = 0; const long rowEstimate = 10; /* assert that we need to build an aggregate plan */ Assert(masterQuery->hasAggs || masterQuery->groupClause); aggregateTargetList = masterQuery->targetList; havingQual = masterQuery->havingQual; /* estimate aggregate execution costs */ memset(&aggregateCosts, 0, sizeof(AggClauseCosts)); get_agg_clause_costs(NULL, (Node *) aggregateTargetList, AGGSPLIT_SIMPLE, &aggregateCosts); get_agg_clause_costs(NULL, (Node *) havingQual, AGGSPLIT_SIMPLE, &aggregateCosts); /* * For upper level plans above the sequential scan, the planner expects the * table id (varno) to be set to OUTER_VAR. */ aggregateColumnList = pull_var_clause_default((Node *) aggregateTargetList); havingColumnList = pull_var_clause_default(havingQual); columnList = list_concat(aggregateColumnList, havingColumnList); foreach(columnCell, columnList) { Var *column = (Var *) lfirst(columnCell); column->varno = OUTER_VAR; } groupColumnList = masterQuery->groupClause; groupColumnCount = list_length(groupColumnList); /* if we have grouping, then initialize appropriate information */ if (groupColumnCount > 0) { if (!grouping_is_hashable(groupColumnList)) { ereport(ERROR, (errmsg("grouped column list cannot be hashed"))); } /* switch to hashed aggregate strategy to allow grouping */ aggregateStrategy = AGG_HASHED; /* get column indexes that are being grouped */ groupColumnIdArray = extract_grouping_cols(groupColumnList, subPlan->targetlist); groupColumnOpArray = extract_grouping_ops(groupColumnList); } /* finally create the plan */ aggregatePlan = make_agg(aggregateTargetList, (List *) havingQual, aggregateStrategy, AGGSPLIT_SIMPLE, groupColumnCount, groupColumnIdArray, groupColumnOpArray, NIL, NIL, rowEstimate, subPlan); /* just for reproducible costs between different PostgreSQL versions */ aggregatePlan->plan.startup_cost = 0; aggregatePlan->plan.total_cost = 0; aggregatePlan->plan.plan_rows = 0; return aggregatePlan; } /* * BuildSelectStatement builds the final select statement to run on the master * node, before returning results to the user. The function first gets the custom * scan node for all results fetched to the master, and layers aggregation, sort * and limit plans on top of the scan statement if necessary. */ static PlannedStmt * BuildSelectStatement(Query *masterQuery, List *masterTargetList, CustomScan *remoteScan) { PlannedStmt *selectStatement = NULL; RangeTblEntry *customScanRangeTableEntry = NULL; Agg *aggregationPlan = NULL; Plan *topLevelPlan = NULL; ListCell *targetEntryCell = NULL; List *columnNameList = NULL; /* (1) make PlannedStmt and set basic information */ selectStatement = makeNode(PlannedStmt); selectStatement->canSetTag = true; selectStatement->relationOids = NIL; selectStatement->commandType = CMD_SELECT; /* top level select query should have only one range table entry */ Assert(list_length(masterQuery->rtable) == 1); /* compute column names for the custom range table entry */ foreach(targetEntryCell, masterTargetList) { TargetEntry *targetEntry = lfirst(targetEntryCell); columnNameList = lappend(columnNameList, makeString(targetEntry->resname)); } customScanRangeTableEntry = RemoteScanRangeTableEntry(columnNameList); /* set the single element range table list */ selectStatement->rtable = list_make1(customScanRangeTableEntry); /* (2) add an aggregation plan if needed */ if (masterQuery->hasAggs || masterQuery->groupClause) { remoteScan->scan.plan.targetlist = masterTargetList; aggregationPlan = BuildAggregatePlan(masterQuery, &remoteScan->scan.plan); topLevelPlan = (Plan *) aggregationPlan; } else { /* otherwise set the final projections on the scan plan directly */ remoteScan->scan.plan.targetlist = masterQuery->targetList; topLevelPlan = &remoteScan->scan.plan; } /* (3) add a sorting plan if needed */ if (masterQuery->sortClause) { List *sortClauseList = masterQuery->sortClause; Sort *sortPlan = make_sort_from_sortclauses(sortClauseList, topLevelPlan); /* just for reproducible costs between different PostgreSQL versions */ sortPlan->plan.startup_cost = 0; sortPlan->plan.total_cost = 0; sortPlan->plan.plan_rows = 0; topLevelPlan = (Plan *) sortPlan; } /* (4) add a limit plan if needed */ if (masterQuery->limitCount || masterQuery->limitOffset) { Node *limitCount = masterQuery->limitCount; Node *limitOffset = masterQuery->limitOffset; Limit *limitPlan = make_limit(topLevelPlan, limitOffset, limitCount); topLevelPlan = (Plan *) limitPlan; } /* (5) finally set our top level plan in the plan tree */ selectStatement->planTree = topLevelPlan; return selectStatement; } /* * MasterNodeSelectPlan takes in a distributed plan and a custom scan node which * wraps remote part of the plan. This function finds the master node query * structure in the multi plan, and builds the final select plan to execute on * the tuples returned by remote scan on the master node. Note that this select * plan is executed after result files are retrieved from worker nodes and * filled into the tuple store inside provided custom scan. */ PlannedStmt * MasterNodeSelectPlan(MultiPlan *multiPlan, CustomScan *remoteScan) { Query *masterQuery = multiPlan->masterQuery; PlannedStmt *masterSelectPlan = NULL; Job *workerJob = multiPlan->workerJob; List *workerTargetList = workerJob->jobQuery->targetList; List *masterTargetList = MasterTargetList(workerTargetList); masterSelectPlan = BuildSelectStatement(masterQuery, masterTargetList, remoteScan); return masterSelectPlan; } citus-7.0.3/src/backend/distributed/planner/multi_physical_planner.c000066400000000000000000005225561317107136600257440ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_physical_planner.c * Routines for creating physical plans from given multi-relational algebra * trees. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include #include "miscadmin.h" #include "access/genam.h" #include "access/hash.h" #include "access/heapam.h" #include "access/nbtree.h" #include "access/skey.h" #include "access/xlog.h" #include "catalog/pg_am.h" #include "catalog/pg_operator.h" #include "catalog/pg_type.h" #include "commands/defrem.h" #include "commands/sequence.h" #include "distributed/listutils.h" #include "distributed/citus_nodefuncs.h" #include "distributed/citus_nodes.h" #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" #include "distributed/master_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/multi_router_planner.h" #include "distributed/multi_logical_optimizer.h" #include "distributed/multi_logical_planner.h" #include "distributed/multi_physical_planner.h" #include "distributed/pg_dist_partition.h" #include "distributed/pg_dist_shard.h" #include "distributed/shardinterval_utils.h" #include "distributed/shard_pruning.h" #include "distributed/task_tracker.h" #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "optimizer/clauses.h" #include "optimizer/predtest.h" #include "optimizer/restrictinfo.h" #include "optimizer/var.h" #include "parser/parse_relation.h" #include "parser/parsetree.h" #include "utils/builtins.h" #include "utils/catcache.h" #include "utils/fmgroids.h" #include "utils/guc.h" #include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/rel.h" #include "utils/typcache.h" /* Policy to use when assigning tasks to worker nodes */ int TaskAssignmentPolicy = TASK_ASSIGNMENT_GREEDY; bool EnableUniqueJobIds = true; /* * OperatorCache is used for caching operator identifiers for given typeId, * accessMethodId and strategyNumber. It is initialized to empty list as * there are no items in the cache. */ static List *OperatorCache = NIL; /* Local functions forward declarations for job creation */ static Job * BuildJobTree(MultiTreeRoot *multiTree); static MultiNode * LeftMostNode(MultiTreeRoot *multiTree); static Oid RangePartitionJoinBaseRelationId(MultiJoin *joinNode); static MultiTable * FindTableNode(MultiNode *multiNode, int rangeTableId); static Query * BuildJobQuery(MultiNode *multiNode, List *dependedJobList); static Query * BuildReduceQuery(MultiExtendedOp *extendedOpNode, List *dependedJobList); static List * BaseRangeTableList(MultiNode *multiNode); static List * QueryTargetList(MultiNode *multiNode); static List * TargetEntryList(List *expressionList); static List * QueryGroupClauseList(MultiNode *multiNode); static List * QuerySelectClauseList(MultiNode *multiNode); static List * QueryJoinClauseList(MultiNode *multiNode); static List * QueryFromList(List *rangeTableList); static Node * QueryJoinTree(MultiNode *multiNode, List *dependedJobList, List **rangeTableList); static RangeTblEntry * JoinRangeTableEntry(JoinExpr *joinExpr, List *dependedJobList, List *rangeTableList); static int ExtractRangeTableId(Node *node); static void ExtractColumns(RangeTblEntry *rangeTableEntry, int rangeTableId, List *dependedJobList, List **columnNames, List **columnVars); static RangeTblEntry * DerivedRangeTableEntry(MultiNode *multiNode, List *columnNames, List *tableIdList); static List * DerivedColumnNameList(uint32 columnCount, uint64 generatingJobId); static Query * BuildSubqueryJobQuery(MultiNode *multiNode); static void UpdateColumnAttributes(Var *column, List *rangeTableList, List *dependedJobList); static Index NewTableId(Index originalTableId, List *rangeTableList); static AttrNumber NewColumnId(Index originalTableId, AttrNumber originalColumnId, RangeTblEntry *newRangeTableEntry, List *dependedJobList); static Job * JobForRangeTable(List *jobList, RangeTblEntry *rangeTableEntry); static Job * JobForTableIdList(List *jobList, List *searchedTableIdList); static List * ChildNodeList(MultiNode *multiNode); static uint64 UniqueJobId(void); static Job * BuildJob(Query *jobQuery, List *dependedJobList); static MapMergeJob * BuildMapMergeJob(Query *jobQuery, List *dependedJobList, Var *partitionKey, PartitionType partitionType, Oid baseRelationId, BoundaryNodeJobType boundaryNodeJobType); static uint32 HashPartitionCount(void); static ArrayType * SplitPointObject(ShardInterval **shardIntervalArray, uint32 shardIntervalCount); /* Local functions forward declarations for task list creation and helper functions */ static bool MultiPlanRouterExecutable(MultiPlan *multiPlan); static Job * BuildJobTreeTaskList(Job *jobTree, PlannerRestrictionContext *plannerRestrictionContext); static List * SubquerySqlTaskList(Job *job, PlannerRestrictionContext *plannerRestrictionContext); static void ErrorIfUnsupportedShardDistribution(Query *query); static bool CoPartitionedTables(Oid firstRelationId, Oid secondRelationId); static bool ShardIntervalsEqual(FmgrInfo *comparisonFunction, ShardInterval *firstInterval, ShardInterval *secondInterval); static Task * SubqueryTaskCreate(Query *originalQuery, ShardInterval *shardInterval, RelationRestrictionContext *restrictionContext, uint32 taskId); static List * SqlTaskList(Job *job); static bool DependsOnHashPartitionJob(Job *job); static uint32 AnchorRangeTableId(List *rangeTableList); static List * BaseRangeTableIdList(List *rangeTableList); static List * AnchorRangeTableIdList(List *rangeTableList, List *baseRangeTableIdList); static void AdjustColumnOldAttributes(List *expressionList); static List * RangeTableFragmentsList(List *rangeTableList, List *whereClauseList, List *dependedJobList); static OperatorCacheEntry * LookupOperatorByType(Oid typeId, Oid accessMethodId, int16 strategyNumber); static Oid GetOperatorByType(Oid typeId, Oid accessMethodId, int16 strategyNumber); static List * FragmentCombinationList(List *rangeTableFragmentsList, Query *jobQuery, List *dependedJobList); static JoinSequenceNode * JoinSequenceArray(List *rangeTableFragmentsList, Query *jobQuery, List *dependedJobList); static bool PartitionedOnColumn(Var *column, List *rangeTableList, List *dependedJobList); static void CheckJoinBetweenColumns(OpExpr *joinClause); static List * FindRangeTableFragmentsList(List *rangeTableFragmentsList, int taskId); static bool JoinPrunable(RangeTableFragment *leftFragment, RangeTableFragment *rightFragment); static ShardInterval * FragmentInterval(RangeTableFragment *fragment); static StringInfo FragmentIntervalString(ShardInterval *fragmentInterval); static List * DataFetchTaskList(uint64 jobId, uint32 taskIdIndex, List *fragmentList); static StringInfo NodeNameArrayString(List *workerNodeList); static StringInfo NodePortArrayString(List *workerNodeList); static StringInfo DatumArrayString(Datum *datumArray, uint32 datumCount, Oid datumTypeId); static void UpdateRangeTableAlias(List *rangeTableList, List *fragmentList); static Alias * FragmentAlias(RangeTblEntry *rangeTableEntry, RangeTableFragment *fragment); static uint64 AnchorShardId(List *fragmentList, uint32 anchorRangeTableId); static List * PruneSqlTaskDependencies(List *sqlTaskList); static List * AssignTaskList(List *sqlTaskList); static bool HasMergeTaskDependencies(List *sqlTaskList); static List * GreedyAssignTaskList(List *taskList); static Task * GreedyAssignTask(WorkerNode *workerNode, List *taskList, List *activeShardPlacementLists); static List * RoundRobinAssignTaskList(List *taskList); static List * RoundRobinReorder(Task *task, List *placementList); static List * ReorderAndAssignTaskList(List *taskList, List * (*reorderFunction)(Task *, List *)); static int CompareTasksByShardId(const void *leftElement, const void *rightElement); static List * ActiveShardPlacementLists(List *taskList); static List * ActivePlacementList(List *placementList); static List * LeftRotateList(List *list, uint32 rotateCount); static List * FindDependedMergeTaskList(Task *sqlTask); static List * AssignDualHashTaskList(List *taskList); static int CompareTasksByTaskId(const void *leftElement, const void *rightElement); static void AssignDataFetchDependencies(List *taskList); static uint32 TaskListHighestTaskId(List *taskList); static List * MapTaskList(MapMergeJob *mapMergeJob, List *filterTaskList); static char * ColumnName(Var *column, List *rangeTableList); static StringInfo SplitPointArrayString(ArrayType *splitPointObject, Oid columnType, int32 columnTypeMod); static List * MergeTaskList(MapMergeJob *mapMergeJob, List *mapTaskList, uint32 taskIdIndex); static StringInfo ColumnNameArrayString(uint32 columnCount, uint64 generatingJobId); static StringInfo ColumnTypeArrayString(List *targetEntryList); static StringInfo MergeTableQueryString(uint32 taskIdIndex, List *targetEntryList); static StringInfo IntermediateTableQueryString(uint64 jobId, uint32 taskIdIndex, Query *reduceQuery); static uint32 FinalTargetEntryCount(List *targetEntryList); /* * MultiPhysicalPlanCreate is the entry point for physical plan generation. The * function builds the physical plan; this plan includes the list of tasks to be * executed on worker nodes, and the final query to run on the master node. */ MultiPlan * MultiPhysicalPlanCreate(MultiTreeRoot *multiTree, PlannerRestrictionContext *plannerRestrictionContext) { MultiPlan *multiPlan = NULL; Job *workerJob = NULL; Query *masterQuery = NULL; List *masterDependedJobList = NIL; /* build the worker job tree and check that we only one job in the tree */ workerJob = BuildJobTree(multiTree); /* create the tree of executable tasks for the worker job */ workerJob = BuildJobTreeTaskList(workerJob, plannerRestrictionContext); /* build the final merge query to execute on the master */ masterDependedJobList = list_make1(workerJob); masterQuery = BuildJobQuery((MultiNode *) multiTree, masterDependedJobList); multiPlan = CitusMakeNode(MultiPlan); multiPlan->workerJob = workerJob; multiPlan->masterQuery = masterQuery; multiPlan->routerExecutable = MultiPlanRouterExecutable(multiPlan); multiPlan->operation = CMD_SELECT; return multiPlan; } /* * MultiPlanRouterExecutable returns true if the input multiPlan is * router executable. * * Note that all the multi plans that are created by router planner are * already router executable. Thus, this function should only be called * for multi plans that are not generated by router planner. */ static bool MultiPlanRouterExecutable(MultiPlan *multiPlan) { Query *masterQuery = multiPlan->masterQuery; Job *job = multiPlan->workerJob; List *workerTaskList = job->taskList; int taskCount = list_length(workerTaskList); int dependedJobCount = list_length(job->dependedJobList); bool masterQueryHasAggregates = false; if (!EnableRouterExecution) { return false; } /* router executor cannot execute SELECT queries that hit more than one shard */ if (taskCount != 1) { return false; } /* router executor cannot execute repartition jobs */ if (dependedJobCount > 0) { return false; } /* * Router executor does not run master query. This means that aggregation and * sorting on the master query wouldn't be executed. Thus, such plans shouldn't be * qualified as router executable. */ if (masterQuery != NULL && list_length(masterQuery->sortClause) > 0) { return false; } /* * Note that worker query having an aggregate means that the master query should * have either an aggregate or a function expression which has to be executed for * the correct results. */ masterQueryHasAggregates = job->jobQuery->hasAggs; if (masterQueryHasAggregates) { return false; } return true; } /* * BuildJobTree builds the physical job tree from the given logical plan tree. * The function walks over the logical plan from the bottom up, finds boundaries * for jobs, and creates the query structure for each job. The function also * sets dependencies between jobs, and then returns the top level worker job. */ static Job * BuildJobTree(MultiTreeRoot *multiTree) { /* start building the tree from the deepest left node */ MultiNode *leftMostNode = LeftMostNode(multiTree); MultiNode *currentNode = leftMostNode; MultiNode *parentNode = ParentNode(currentNode); List *loopDependedJobList = NIL; Job *topLevelJob = NULL; while (parentNode != NULL) { CitusNodeTag currentNodeType = CitusNodeTag(currentNode); CitusNodeTag parentNodeType = CitusNodeTag(parentNode); BoundaryNodeJobType boundaryNodeJobType = JOB_INVALID_FIRST; /* we first check if this node forms the boundary for a remote job */ if (currentNodeType == T_MultiJoin) { MultiJoin *joinNode = (MultiJoin *) currentNode; if (joinNode->joinRuleType == SINGLE_PARTITION_JOIN || joinNode->joinRuleType == DUAL_PARTITION_JOIN) { boundaryNodeJobType = JOIN_MAP_MERGE_JOB; } } else if (currentNodeType == T_MultiPartition && parentNodeType == T_MultiExtendedOp) { boundaryNodeJobType = SUBQUERY_MAP_MERGE_JOB; } else if (currentNodeType == T_MultiCollect && parentNodeType != T_MultiPartition) { boundaryNodeJobType = TOP_LEVEL_WORKER_JOB; } /* * If this node is at the boundary for a repartition or top level worker * job, we build the corresponding job(s) and set their dependencies. */ if (boundaryNodeJobType == JOIN_MAP_MERGE_JOB) { MultiJoin *joinNode = (MultiJoin *) currentNode; MultiNode *leftChildNode = joinNode->binaryNode.leftChildNode; MultiNode *rightChildNode = joinNode->binaryNode.rightChildNode; PartitionType partitionType = PARTITION_INVALID_FIRST; Oid baseRelationId = InvalidOid; if (joinNode->joinRuleType == SINGLE_PARTITION_JOIN) { partitionType = RANGE_PARTITION_TYPE; baseRelationId = RangePartitionJoinBaseRelationId(joinNode); } else if (joinNode->joinRuleType == DUAL_PARTITION_JOIN) { partitionType = HASH_PARTITION_TYPE; } if (CitusIsA(leftChildNode, MultiPartition)) { MultiPartition *partitionNode = (MultiPartition *) leftChildNode; MultiNode *queryNode = GrandChildNode((MultiUnaryNode *) partitionNode); Var *partitionKey = partitionNode->partitionColumn; /* build query and partition job */ List *dependedJobList = list_copy(loopDependedJobList); Query *jobQuery = BuildJobQuery(queryNode, dependedJobList); MapMergeJob *mapMergeJob = BuildMapMergeJob(jobQuery, dependedJobList, partitionKey, partitionType, baseRelationId, JOIN_MAP_MERGE_JOB); /* reset depended job list */ loopDependedJobList = NIL; loopDependedJobList = list_make1(mapMergeJob); } if (CitusIsA(rightChildNode, MultiPartition)) { MultiPartition *partitionNode = (MultiPartition *) rightChildNode; MultiNode *queryNode = GrandChildNode((MultiUnaryNode *) partitionNode); Var *partitionKey = partitionNode->partitionColumn; /* * The right query and right partition job do not depend on any * jobs since our logical plan tree is left deep. */ Query *jobQuery = BuildJobQuery(queryNode, NIL); MapMergeJob *mapMergeJob = BuildMapMergeJob(jobQuery, NIL, partitionKey, partitionType, baseRelationId, JOIN_MAP_MERGE_JOB); /* append to the depended job list for on-going dependencies */ loopDependedJobList = lappend(loopDependedJobList, mapMergeJob); } } else if (boundaryNodeJobType == SUBQUERY_MAP_MERGE_JOB) { MultiPartition *partitionNode = (MultiPartition *) currentNode; MultiNode *queryNode = GrandChildNode((MultiUnaryNode *) partitionNode); Var *partitionKey = partitionNode->partitionColumn; /* build query and partition job */ List *dependedJobList = list_copy(loopDependedJobList); Query *jobQuery = BuildJobQuery(queryNode, dependedJobList); MapMergeJob *mapMergeJob = BuildMapMergeJob(jobQuery, dependedJobList, partitionKey, HASH_PARTITION_TYPE, InvalidOid, SUBQUERY_MAP_MERGE_JOB); Query *reduceQuery = BuildReduceQuery((MultiExtendedOp *) parentNode, list_make1(mapMergeJob)); mapMergeJob->reduceQuery = reduceQuery; /* reset depended job list */ loopDependedJobList = NIL; loopDependedJobList = list_make1(mapMergeJob); } else if (boundaryNodeJobType == TOP_LEVEL_WORKER_JOB) { MultiNode *childNode = ChildNode((MultiUnaryNode *) currentNode); List *dependedJobList = list_copy(loopDependedJobList); bool subqueryPushdown = false; List *subqueryMultiTableList = SubqueryMultiTableList(childNode); int subqueryCount = list_length(subqueryMultiTableList); if (subqueryCount > 0) { subqueryPushdown = true; } /* * Build top level query. If subquery pushdown is set, we use * sligthly different version of BuildJobQuery(). They are similar * but we don't need some parts of BuildJobQuery() for subquery * pushdown such as updating column attributes etc. */ if (subqueryPushdown) { Query *topLevelQuery = BuildSubqueryJobQuery(childNode); topLevelJob = BuildJob(topLevelQuery, dependedJobList); topLevelJob->subqueryPushdown = true; } else { Query *topLevelQuery = BuildJobQuery(childNode, dependedJobList); topLevelJob = BuildJob(topLevelQuery, dependedJobList); } } /* walk up the tree */ currentNode = parentNode; parentNode = ParentNode(currentNode); } return topLevelJob; } /* * LeftMostNode finds the deepest left node in the left-deep logical plan tree. * We build the physical plan by traversing the logical plan from the bottom up; * and this function helps us find the bottom of the logical tree. */ static MultiNode * LeftMostNode(MultiTreeRoot *multiTree) { MultiNode *currentNode = (MultiNode *) multiTree; MultiNode *leftChildNode = ChildNode((MultiUnaryNode *) multiTree); while (leftChildNode != NULL) { currentNode = leftChildNode; if (UnaryOperator(currentNode)) { leftChildNode = ChildNode((MultiUnaryNode *) currentNode); } else if (BinaryOperator(currentNode)) { MultiBinaryNode *binaryNode = (MultiBinaryNode *) currentNode; leftChildNode = binaryNode->leftChildNode; } } return currentNode; } /* * RangePartitionJoinBaseRelationId finds partition node from join node, and * returns base relation id of this node. Note that this function assumes that * given join node is range partition join type. */ static Oid RangePartitionJoinBaseRelationId(MultiJoin *joinNode) { MultiPartition *partitionNode = NULL; MultiTable *baseTable = NULL; Index baseTableId = 0; Oid baseRelationId = InvalidOid; MultiNode *leftChildNode = joinNode->binaryNode.leftChildNode; MultiNode *rightChildNode = joinNode->binaryNode.rightChildNode; if (CitusIsA(leftChildNode, MultiPartition)) { partitionNode = (MultiPartition *) leftChildNode; } else if (CitusIsA(rightChildNode, MultiPartition)) { partitionNode = (MultiPartition *) rightChildNode; } baseTableId = partitionNode->splitPointTableId; baseTable = FindTableNode((MultiNode *) joinNode, baseTableId); baseRelationId = baseTable->relationId; return baseRelationId; } /* * FindTableNode walks over the given logical plan tree, and returns the table * node that corresponds to the given range tableId. */ static MultiTable * FindTableNode(MultiNode *multiNode, int rangeTableId) { MultiTable *foundTableNode = NULL; List *tableNodeList = FindNodesOfType(multiNode, T_MultiTable); ListCell *tableNodeCell = NULL; foreach(tableNodeCell, tableNodeList) { MultiTable *tableNode = (MultiTable *) lfirst(tableNodeCell); if (tableNode->rangeTableId == rangeTableId) { foundTableNode = tableNode; break; } } Assert(foundTableNode != NULL); return foundTableNode; } /* * BuildJobQuery traverses the given logical plan tree, determines the job that * corresponds to this part of the tree, and builds the query structure for that * particular job. The function assumes that jobs, this particular job depends on, * have already been built, as their output is needed to build the query. */ static Query * BuildJobQuery(MultiNode *multiNode, List *dependedJobList) { Query *jobQuery = NULL; MultiNode *parentNode = NULL; bool updateColumnAttributes = false; List *rangeTableList = NIL; List *targetList = NIL; List *extendedOpNodeList = NIL; List *sortClauseList = NIL; List *groupClauseList = NIL; List *selectClauseList = NIL; List *columnList = NIL; Node *limitCount = NULL; Node *limitOffset = NULL; ListCell *columnCell = NULL; FromExpr *joinTree = NULL; Node *joinRoot = NULL; Node *havingQual = NULL; /* we start building jobs from below the collect node */ Assert(!CitusIsA(multiNode, MultiCollect)); /* * First check if we are building a master/worker query. If we are building * a worker query, we update the column attributes for target entries, select * and join columns. Because if underlying query includes repartition joins, * then we create multiple queries from a join. In this case, range table lists * and column lists are subject to change. * * Note that we don't do this for master queries, as column attributes for * master target entries are already set during the master/worker split. */ parentNode = ParentNode(multiNode); if (parentNode != NULL) { updateColumnAttributes = true; } /* * If we are building this query on a repartitioned subquery job then we * don't need to update column attributes. */ if (dependedJobList != NIL) { Job *job = (Job *) linitial(dependedJobList); if (CitusIsA(job, MapMergeJob)) { MapMergeJob *mapMergeJob = (MapMergeJob *) job; if (mapMergeJob->reduceQuery) { updateColumnAttributes = false; } } } /* * If we have an extended operator, then we copy the operator's target list. * Otherwise, we use the target list based on the MultiProject node at this * level in the query tree. */ extendedOpNodeList = FindNodesOfType(multiNode, T_MultiExtendedOp); if (extendedOpNodeList != NIL) { MultiExtendedOp *extendedOp = (MultiExtendedOp *) linitial(extendedOpNodeList); targetList = copyObject(extendedOp->targetList); } else { targetList = QueryTargetList(multiNode); } /* build the join tree and the range table list */ rangeTableList = BaseRangeTableList(multiNode); joinRoot = QueryJoinTree(multiNode, dependedJobList, &rangeTableList); /* update the column attributes for target entries */ if (updateColumnAttributes) { ListCell *columnCell = NULL; List *columnList = pull_var_clause_default((Node *) targetList); foreach(columnCell, columnList) { Var *column = (Var *) lfirst(columnCell); UpdateColumnAttributes(column, rangeTableList, dependedJobList); } } /* extract limit count/offset and sort clauses */ if (extendedOpNodeList != NIL) { MultiExtendedOp *extendedOp = (MultiExtendedOp *) linitial(extendedOpNodeList); limitCount = extendedOp->limitCount; limitOffset = extendedOp->limitOffset; sortClauseList = extendedOp->sortClauseList; havingQual = extendedOp->havingQual; } /* build group clauses */ groupClauseList = QueryGroupClauseList(multiNode); /* build the where clause list using select predicates */ selectClauseList = QuerySelectClauseList(multiNode); /* set correct column attributes for select columns */ if (updateColumnAttributes) { columnCell = NULL; columnList = pull_var_clause_default((Node *) selectClauseList); foreach(columnCell, columnList) { Var *column = (Var *) lfirst(columnCell); UpdateColumnAttributes(column, rangeTableList, dependedJobList); } } /* * Build the From/Where construct. We keep the where-clause list implicitly * AND'd, since both partition and join pruning depends on the clauses being * expressed as a list. */ joinTree = makeNode(FromExpr); joinTree->quals = (Node *) list_copy(selectClauseList); joinTree->fromlist = list_make1(joinRoot); /* build the query structure for this job */ jobQuery = makeNode(Query); jobQuery->commandType = CMD_SELECT; jobQuery->querySource = QSRC_ORIGINAL; jobQuery->canSetTag = true; jobQuery->rtable = rangeTableList; jobQuery->targetList = targetList; jobQuery->jointree = joinTree; jobQuery->sortClause = sortClauseList; jobQuery->groupClause = groupClauseList; jobQuery->limitOffset = limitOffset; jobQuery->limitCount = limitCount; jobQuery->havingQual = havingQual; jobQuery->hasAggs = contain_agg_clause((Node *) targetList); return jobQuery; } /* * BuildReduceQuery traverses the given logical plan tree, determines the job that * corresponds to this part of the tree, and builds the query structure for that * particular job. The function assumes that jobs this particular job depends on * have already been built, as their output is needed to build the query. */ static Query * BuildReduceQuery(MultiExtendedOp *extendedOpNode, List *dependedJobList) { Query *reduceQuery = NULL; MultiNode *multiNode = (MultiNode *) extendedOpNode; List *derivedRangeTableList = NIL; List *targetList = NIL; List *whereClauseList = NIL; List *selectClauseList = NIL; List *joinClauseList = NIL; List *columnList = NIL; ListCell *columnCell = NULL; FromExpr *joinTree = NULL; List *columnNameList = NIL; RangeTblEntry *rangeTableEntry = NULL; Job *dependedJob = linitial(dependedJobList); List *dependedTargetList = dependedJob->jobQuery->targetList; uint32 columnCount = (uint32) list_length(dependedTargetList); uint32 columnIndex = 0; for (columnIndex = 0; columnIndex < columnCount; columnIndex++) { Value *columnValue = NULL; StringInfo columnNameString = makeStringInfo(); appendStringInfo(columnNameString, MERGE_COLUMN_FORMAT, columnIndex); columnValue = makeString(columnNameString->data); columnNameList = lappend(columnNameList, columnValue); } /* create a derived range table for the subtree below the collect */ rangeTableEntry = DerivedRangeTableEntry(multiNode, columnNameList, OutputTableIdList(multiNode)); rangeTableEntry->eref->colnames = columnNameList; ModifyRangeTblExtraData(rangeTableEntry, CITUS_RTE_SHARD, NULL, NULL, NULL); derivedRangeTableList = lappend(derivedRangeTableList, rangeTableEntry); targetList = copyObject(extendedOpNode->targetList); columnList = pull_var_clause_default((Node *) targetList); foreach(columnCell, columnList) { Var *column = (Var *) lfirst(columnCell); Index originalTableId = column->varnoold; /* find the new table identifier */ Index newTableId = NewTableId(originalTableId, derivedRangeTableList); column->varno = newTableId; } /* build the where clause list using select and join predicates */ selectClauseList = QuerySelectClauseList((MultiNode *) extendedOpNode); joinClauseList = QueryJoinClauseList((MultiNode *) extendedOpNode); whereClauseList = list_concat(selectClauseList, joinClauseList); /* * Build the From/Where construct. We keep the where-clause list implicitly * AND'd, since both partition and join pruning depends on the clauses being * expressed as a list. */ joinTree = makeNode(FromExpr); joinTree->quals = (Node *) whereClauseList; joinTree->fromlist = QueryFromList(derivedRangeTableList); /* build the query structure for this job */ reduceQuery = makeNode(Query); reduceQuery->commandType = CMD_SELECT; reduceQuery->querySource = QSRC_ORIGINAL; reduceQuery->canSetTag = true; reduceQuery->rtable = derivedRangeTableList; reduceQuery->targetList = targetList; reduceQuery->jointree = joinTree; reduceQuery->sortClause = extendedOpNode->sortClauseList; reduceQuery->groupClause = extendedOpNode->groupClauseList; reduceQuery->limitOffset = extendedOpNode->limitOffset; reduceQuery->limitCount = extendedOpNode->limitCount; reduceQuery->havingQual = extendedOpNode->havingQual; reduceQuery->hasAggs = contain_agg_clause((Node *) targetList); return reduceQuery; } /* * BaseRangeTableList returns the list of range table entries for base tables in * the query. These base tables stand in contrast to derived tables generated by * repartition jobs. Note that this function only considers base tables relevant * to the current query, and does not visit nodes under the collect node. */ static List * BaseRangeTableList(MultiNode *multiNode) { List *baseRangeTableList = NIL; List *pendingNodeList = list_make1(multiNode); while (pendingNodeList != NIL) { MultiNode *multiNode = (MultiNode *) linitial(pendingNodeList); CitusNodeTag nodeType = CitusNodeTag(multiNode); pendingNodeList = list_delete_first(pendingNodeList); if (nodeType == T_MultiTable) { /* * We represent subqueries as MultiTables, and so for base table * entries we skip the subquery ones. */ MultiTable *multiTable = (MultiTable *) multiNode; if (multiTable->relationId != SUBQUERY_RELATION_ID && multiTable->relationId != SUBQUERY_PUSHDOWN_RELATION_ID) { RangeTblEntry *rangeTableEntry = makeNode(RangeTblEntry); rangeTableEntry->inFromCl = true; rangeTableEntry->eref = multiTable->referenceNames; rangeTableEntry->alias = multiTable->alias; rangeTableEntry->relid = multiTable->relationId; SetRangeTblExtraData(rangeTableEntry, CITUS_RTE_RELATION, NULL, NULL, list_make1_int(multiTable->rangeTableId)); baseRangeTableList = lappend(baseRangeTableList, rangeTableEntry); } } /* do not visit nodes that belong to remote queries */ if (nodeType != T_MultiCollect) { List *childNodeList = ChildNodeList(multiNode); pendingNodeList = list_concat(pendingNodeList, childNodeList); } } return baseRangeTableList; } /* * DerivedRangeTableEntry builds a range table entry for the derived table. This * derived table either represents the output of a repartition job; or the data * on worker nodes in case of the master node query. */ static RangeTblEntry * DerivedRangeTableEntry(MultiNode *multiNode, List *columnList, List *tableIdList) { RangeTblEntry *rangeTableEntry = makeNode(RangeTblEntry); rangeTableEntry->inFromCl = true; rangeTableEntry->eref = makeNode(Alias); rangeTableEntry->eref->colnames = columnList; SetRangeTblExtraData(rangeTableEntry, CITUS_RTE_REMOTE_QUERY, NULL, NULL, tableIdList); return rangeTableEntry; } /* * DerivedColumnNameList builds a column name list for derived (intermediate) * tables. These column names are then used when building the create stament * query string for derived tables. */ static List * DerivedColumnNameList(uint32 columnCount, uint64 generatingJobId) { List *columnNameList = NIL; uint32 columnIndex = 0; for (columnIndex = 0; columnIndex < columnCount; columnIndex++) { StringInfo columnName = makeStringInfo(); Value *columnValue = NULL; appendStringInfo(columnName, "intermediate_column_"); appendStringInfo(columnName, UINT64_FORMAT "_", generatingJobId); appendStringInfo(columnName, "%u", columnIndex); columnValue = makeString(columnName->data); columnNameList = lappend(columnNameList, columnValue); } return columnNameList; } /* * QueryTargetList returns the target entry list for the projected columns * needed to evaluate the operators above the given multiNode. To do this, * the function retrieves a list of all MultiProject nodes below the given * node and picks the columns from the top-most MultiProject node, as this * will be the minimal list of columns needed. Note that this function relies * on a pre-order traversal of the operator tree by the function FindNodesOfType. */ static List * QueryTargetList(MultiNode *multiNode) { MultiProject *topProjectNode = NULL; List *columnList = NIL; List *queryTargetList = NIL; List *projectNodeList = FindNodesOfType(multiNode, T_MultiProject); Assert(list_length(projectNodeList) > 0); topProjectNode = (MultiProject *) linitial(projectNodeList); columnList = topProjectNode->columnList; queryTargetList = TargetEntryList(columnList); Assert(queryTargetList != NIL); return queryTargetList; } /* * TargetEntryList creates a target entry for each expression in the given list, * and returns the newly created target entries in a list. */ static List * TargetEntryList(List *expressionList) { List *targetEntryList = NIL; ListCell *expressionCell = NULL; foreach(expressionCell, expressionList) { Expr *expression = (Expr *) lfirst(expressionCell); TargetEntry *targetEntry = makeTargetEntry(expression, list_length(targetEntryList) + 1, NULL, false); targetEntryList = lappend(targetEntryList, targetEntry); } return targetEntryList; } /* * QueryGroupClauseList extracts the group clause list from the logical plan. If * no grouping clauses exist, the function returns an empty list. */ static List * QueryGroupClauseList(MultiNode *multiNode) { List *groupClauseList = NIL; List *pendingNodeList = list_make1(multiNode); while (pendingNodeList != NIL) { MultiNode *multiNode = (MultiNode *) linitial(pendingNodeList); CitusNodeTag nodeType = CitusNodeTag(multiNode); pendingNodeList = list_delete_first(pendingNodeList); /* extract the group clause list from the extended operator */ if (nodeType == T_MultiExtendedOp) { MultiExtendedOp *extendedOpNode = (MultiExtendedOp *) multiNode; groupClauseList = extendedOpNode->groupClauseList; } /* add children only if this node isn't a multi collect and multi table */ if (nodeType != T_MultiCollect && nodeType != T_MultiTable) { List *childNodeList = ChildNodeList(multiNode); pendingNodeList = list_concat(pendingNodeList, childNodeList); } } return groupClauseList; } /* * QuerySelectClauseList traverses the given logical plan tree, and extracts all * select clauses from the select nodes. Note that this function does not walk * below a collect node; the clauses below the collect node apply to a remote * query, and they would have been captured by the remote job we depend upon. */ static List * QuerySelectClauseList(MultiNode *multiNode) { List *selectClauseList = NIL; List *pendingNodeList = list_make1(multiNode); while (pendingNodeList != NIL) { MultiNode *multiNode = (MultiNode *) linitial(pendingNodeList); CitusNodeTag nodeType = CitusNodeTag(multiNode); pendingNodeList = list_delete_first(pendingNodeList); /* extract select clauses from the multi select node */ if (nodeType == T_MultiSelect) { MultiSelect *selectNode = (MultiSelect *) multiNode; List *clauseList = copyObject(selectNode->selectClauseList); selectClauseList = list_concat(selectClauseList, clauseList); } /* add children only if this node isn't a multi collect */ if (nodeType != T_MultiCollect) { List *childNodeList = ChildNodeList(multiNode); pendingNodeList = list_concat(pendingNodeList, childNodeList); } } return selectClauseList; } /* * QueryJoinClauseList traverses the given logical plan tree, and extracts all * join clauses from the join nodes. Note that this function does not walk below * a collect node; the clauses below the collect node apply to another query, * and they would have been captured by the remote job we depend upon. */ static List * QueryJoinClauseList(MultiNode *multiNode) { List *joinClauseList = NIL; List *pendingNodeList = list_make1(multiNode); while (pendingNodeList != NIL) { MultiNode *multiNode = (MultiNode *) linitial(pendingNodeList); CitusNodeTag nodeType = CitusNodeTag(multiNode); pendingNodeList = list_delete_first(pendingNodeList); /* extract join clauses from the multi join node */ if (nodeType == T_MultiJoin) { MultiJoin *joinNode = (MultiJoin *) multiNode; List *clauseList = copyObject(joinNode->joinClauseList); joinClauseList = list_concat(joinClauseList, clauseList); } /* add this node's children only if the node isn't a multi collect */ if (nodeType != T_MultiCollect) { List *childNodeList = ChildNodeList(multiNode); pendingNodeList = list_concat(pendingNodeList, childNodeList); } } return joinClauseList; } /* * Create a tree of JoinExpr and RangeTblRef nodes for the job query from * a given multiNode. If the tree contains MultiCollect or MultiJoin nodes, * add corresponding entries to the range table list. We need to construct * the entries at the same time as the tree to know the appropriate rtindex. */ static Node * QueryJoinTree(MultiNode *multiNode, List *dependedJobList, List **rangeTableList) { CitusNodeTag nodeType = CitusNodeTag(multiNode); switch (nodeType) { case T_MultiJoin: { MultiJoin *joinNode = (MultiJoin *) multiNode; MultiBinaryNode *binaryNode = (MultiBinaryNode *) multiNode; List *columnList = NIL; ListCell *columnCell = NULL; RangeTblEntry *rangeTableEntry = NULL; JoinExpr *joinExpr = makeNode(JoinExpr); joinExpr->jointype = joinNode->joinType; joinExpr->isNatural = false; joinExpr->larg = QueryJoinTree(binaryNode->leftChildNode, dependedJobList, rangeTableList); joinExpr->rarg = QueryJoinTree(binaryNode->rightChildNode, dependedJobList, rangeTableList); joinExpr->usingClause = NIL; joinExpr->alias = NULL; joinExpr->rtindex = list_length(*rangeTableList) + 1; /* * PostgreSQL's optimizer may mark left joins as anti-joins, when there * is an right-hand-join-key-is-null restriction, but there is no logic * in ruleutils to deparse anti-joins, so we cannot construct a task * query containing anti-joins. We therefore translate anti-joins back * into left-joins. At some point, we may also want to use different * join pruning logic for anti-joins. * * This approach would not work for anti-joins introduced via NOT EXISTS * sublinks, but currently such queries are prevented by error checks in * the logical planner. */ if (joinExpr->jointype == JOIN_ANTI) { joinExpr->jointype = JOIN_LEFT; } rangeTableEntry = JoinRangeTableEntry(joinExpr, dependedJobList, *rangeTableList); *rangeTableList = lappend(*rangeTableList, rangeTableEntry); /* fix the column attributes in ON (...) clauses */ columnList = pull_var_clause_default((Node *) joinNode->joinClauseList); foreach(columnCell, columnList) { Var *column = (Var *) lfirst(columnCell); UpdateColumnAttributes(column, *rangeTableList, dependedJobList); /* adjust our column old attributes for partition pruning to work */ column->varnoold = column->varno; column->varoattno = column->varattno; } /* make AND clauses explicit after fixing them */ joinExpr->quals = (Node *) make_ands_explicit(joinNode->joinClauseList); return (Node *) joinExpr; } case T_MultiTable: { MultiTable *rangeTableNode = (MultiTable *) multiNode; MultiUnaryNode *unaryNode = (MultiUnaryNode *) multiNode; if (unaryNode->childNode != NULL) { /* MultiTable is actually a subquery, return the query tree below */ Node *childNode = QueryJoinTree(unaryNode->childNode, dependedJobList, rangeTableList); return childNode; } else { RangeTblRef *rangeTableRef = makeNode(RangeTblRef); uint32 rangeTableId = rangeTableNode->rangeTableId; rangeTableRef->rtindex = NewTableId(rangeTableId, *rangeTableList); return (Node *) rangeTableRef; } } case T_MultiCollect: { List *tableIdList = OutputTableIdList(multiNode); Job *dependedJob = JobForTableIdList(dependedJobList, tableIdList); List *dependedTargetList = dependedJob->jobQuery->targetList; /* compute column names for the derived table */ uint32 columnCount = (uint32) list_length(dependedTargetList); List *columnNameList = DerivedColumnNameList(columnCount, dependedJob->jobId); RangeTblEntry *rangeTableEntry = DerivedRangeTableEntry(multiNode, columnNameList, tableIdList); RangeTblRef *rangeTableRef = makeNode(RangeTblRef); rangeTableRef->rtindex = list_length(*rangeTableList) + 1; *rangeTableList = lappend(*rangeTableList, rangeTableEntry); return (Node *) rangeTableRef; } case T_MultiCartesianProduct: { MultiBinaryNode *binaryNode = (MultiBinaryNode *) multiNode; RangeTblEntry *rangeTableEntry = NULL; JoinExpr *joinExpr = makeNode(JoinExpr); joinExpr->jointype = JOIN_INNER; joinExpr->isNatural = false; joinExpr->larg = QueryJoinTree(binaryNode->leftChildNode, dependedJobList, rangeTableList); joinExpr->rarg = QueryJoinTree(binaryNode->rightChildNode, dependedJobList, rangeTableList); joinExpr->usingClause = NIL; joinExpr->alias = NULL; joinExpr->quals = NULL; joinExpr->rtindex = list_length(*rangeTableList) + 1; rangeTableEntry = JoinRangeTableEntry(joinExpr, dependedJobList, *rangeTableList); *rangeTableList = lappend(*rangeTableList, rangeTableEntry); return (Node *) joinExpr; } case T_MultiTreeRoot: case T_MultiSelect: case T_MultiProject: case T_MultiExtendedOp: case T_MultiPartition: { MultiUnaryNode *unaryNode = (MultiUnaryNode *) multiNode; Node *childNode = NULL; Assert(UnaryOperator(multiNode)); childNode = QueryJoinTree(unaryNode->childNode, dependedJobList, rangeTableList); return childNode; } default: { ereport(ERROR, (errmsg("unrecognized multi-node type: %d", nodeType))); } } } /* * JoinRangeTableEntry builds a range table entry for a fully initialized JoinExpr node. * The column names and vars are determined using expandRTE, analogous to * transformFromClauseItem. */ static RangeTblEntry * JoinRangeTableEntry(JoinExpr *joinExpr, List *dependedJobList, List *rangeTableList) { RangeTblEntry *rangeTableEntry = makeNode(RangeTblEntry); List *joinedColumnNames = NIL; List *joinedColumnVars = NIL; List *leftColumnNames = NIL; List *leftColumnVars = NIL; int leftRangeTableId = ExtractRangeTableId(joinExpr->larg); RangeTblEntry *leftRTE = rt_fetch(leftRangeTableId, rangeTableList); List *rightColumnNames = NIL; List *rightColumnVars = NIL; int rightRangeTableId = ExtractRangeTableId(joinExpr->rarg); RangeTblEntry *rightRTE = rt_fetch(rightRangeTableId, rangeTableList); rangeTableEntry->rtekind = RTE_JOIN; rangeTableEntry->relid = InvalidOid; rangeTableEntry->inFromCl = true; rangeTableEntry->alias = joinExpr->alias; rangeTableEntry->jointype = joinExpr->jointype; rangeTableEntry->subquery = NULL; rangeTableEntry->eref = makeAlias("unnamed_join", NIL); ExtractColumns(leftRTE, leftRangeTableId, dependedJobList, &leftColumnNames, &leftColumnVars); ExtractColumns(rightRTE, rightRangeTableId, dependedJobList, &rightColumnNames, &rightColumnVars); joinedColumnNames = list_concat(joinedColumnNames, leftColumnNames); joinedColumnVars = list_concat(joinedColumnVars, leftColumnVars); joinedColumnNames = list_concat(joinedColumnNames, rightColumnNames); joinedColumnVars = list_concat(joinedColumnVars, rightColumnVars); rangeTableEntry->eref->colnames = joinedColumnNames; rangeTableEntry->joinaliasvars = joinedColumnVars; return rangeTableEntry; } /* * ExtractRangeTableId gets the range table id from a node that could * either be a JoinExpr or RangeTblRef. */ static int ExtractRangeTableId(Node *node) { int rangeTableId = 0; if (IsA(node, JoinExpr)) { JoinExpr *joinExpr = (JoinExpr *) node; rangeTableId = joinExpr->rtindex; } else if (IsA(node, RangeTblRef)) { RangeTblRef *rangeTableRef = (RangeTblRef *) node; rangeTableId = rangeTableRef->rtindex; } Assert(rangeTableId > 0); return rangeTableId; } /* * ExtractColumns gets a list of column names and vars for a given range * table entry using expandRTE. Since the range table entries in a job * query are mocked RTE_FUNCTION entries, it first translates the RTE's * to a form that expandRTE can handle. */ static void ExtractColumns(RangeTblEntry *rangeTableEntry, int rangeTableId, List *dependedJobList, List **columnNames, List **columnVars) { RangeTblEntry *callingRTE = NULL; CitusRTEKind rangeTableKind = GetRangeTblKind(rangeTableEntry); if (rangeTableKind == CITUS_RTE_JOIN) { /* * For joins, we can call expandRTE directly. */ callingRTE = rangeTableEntry; } else if (rangeTableKind == CITUS_RTE_RELATION) { /* * For distributed tables, we construct a regular table RTE to call * expandRTE, which will extract columns from the distributed table * schema. */ callingRTE = makeNode(RangeTblEntry); callingRTE->rtekind = RTE_RELATION; callingRTE->eref = rangeTableEntry->eref; callingRTE->relid = rangeTableEntry->relid; } else if (rangeTableKind == CITUS_RTE_REMOTE_QUERY) { Job *dependedJob = JobForRangeTable(dependedJobList, rangeTableEntry); Query *jobQuery = dependedJob->jobQuery; /* * For re-partition jobs, we construct a subquery RTE to call expandRTE, * which will extract the columns from the target list of the job query. */ callingRTE = makeNode(RangeTblEntry); callingRTE->rtekind = RTE_SUBQUERY; callingRTE->eref = rangeTableEntry->eref; callingRTE->subquery = jobQuery; } else { ereport(ERROR, (errmsg("unsupported Citus RTE kind: %d", rangeTableKind))); } expandRTE(callingRTE, rangeTableId, 0, -1, false, columnNames, columnVars); } /* * QueryFromList creates the from list construct that is used for building the * query's join tree. The function creates the from list by making a range table * reference for each entry in the given range table list. */ static List * QueryFromList(List *rangeTableList) { List *fromList = NIL; Index rangeTableIndex = 1; int rangeTableCount = list_length(rangeTableList); for (rangeTableIndex = 1; rangeTableIndex <= rangeTableCount; rangeTableIndex++) { RangeTblRef *rangeTableReference = makeNode(RangeTblRef); rangeTableReference->rtindex = rangeTableIndex; fromList = lappend(fromList, rangeTableReference); } return fromList; } /* * BuildSubqueryJobQuery traverses the given logical plan tree, finds MultiTable * which represents the subquery. It builds the query structure by adding this * subquery as it is to range table list of the query. * * Such as if user runs a query like this; * * SELECT avg(id) FROM ( * SELECT ... FROM () * ) * * then this function will build this worker query as keeping subquery as it is; * * SELECT sum(id), count(id) FROM ( * SELECT ... FROM () * ) */ static Query * BuildSubqueryJobQuery(MultiNode *multiNode) { Query *jobQuery = NULL; Query *subquery = NULL; MultiTable *multiTable = NULL; RangeTblEntry *rangeTableEntry = NULL; List *subqueryMultiTableList = NIL; List *rangeTableList = NIL; List *targetList = NIL; List *extendedOpNodeList = NIL; List *sortClauseList = NIL; List *groupClauseList = NIL; List *whereClauseList = NIL; Node *havingQual = NULL; Node *limitCount = NULL; Node *limitOffset = NULL; FromExpr *joinTree = NULL; /* we start building jobs from below the collect node */ Assert(!CitusIsA(multiNode, MultiCollect)); subqueryMultiTableList = SubqueryMultiTableList(multiNode); Assert(list_length(subqueryMultiTableList) == 1); multiTable = (MultiTable *) linitial(subqueryMultiTableList); subquery = multiTable->subquery; /* build subquery range table list */ rangeTableEntry = makeNode(RangeTblEntry); rangeTableEntry->rtekind = RTE_SUBQUERY; rangeTableEntry->inFromCl = true; rangeTableEntry->eref = multiTable->referenceNames; rangeTableEntry->alias = multiTable->alias; rangeTableEntry->subquery = subquery; rangeTableList = list_make1(rangeTableEntry); /* * If we have an extended operator, then we copy the operator's target list. * Otherwise, we use the target list based on the MultiProject node at this * level in the query tree. */ extendedOpNodeList = FindNodesOfType(multiNode, T_MultiExtendedOp); if (extendedOpNodeList != NIL) { MultiExtendedOp *extendedOp = (MultiExtendedOp *) linitial(extendedOpNodeList); targetList = copyObject(extendedOp->targetList); } else { targetList = QueryTargetList(multiNode); } /* extract limit count/offset, sort and having clauses */ if (extendedOpNodeList != NIL) { MultiExtendedOp *extendedOp = (MultiExtendedOp *) linitial(extendedOpNodeList); limitCount = extendedOp->limitCount; limitOffset = extendedOp->limitOffset; sortClauseList = extendedOp->sortClauseList; havingQual = extendedOp->havingQual; } /* build group clauses */ groupClauseList = QueryGroupClauseList(multiNode); /* build the where clause list using select predicates */ whereClauseList = QuerySelectClauseList(multiNode); /* * Build the From/Where construct. We keep the where-clause list implicitly * AND'd, since both partition and join pruning depends on the clauses being * expressed as a list. */ joinTree = makeNode(FromExpr); joinTree->quals = (Node *) whereClauseList; joinTree->fromlist = QueryFromList(rangeTableList); /* build the query structure for this job */ jobQuery = makeNode(Query); jobQuery->commandType = CMD_SELECT; jobQuery->querySource = QSRC_ORIGINAL; jobQuery->canSetTag = true; jobQuery->rtable = rangeTableList; jobQuery->targetList = targetList; jobQuery->jointree = joinTree; jobQuery->sortClause = sortClauseList; jobQuery->groupClause = groupClauseList; jobQuery->limitOffset = limitOffset; jobQuery->limitCount = limitCount; jobQuery->havingQual = havingQual; jobQuery->hasAggs = contain_agg_clause((Node *) targetList) || contain_agg_clause((Node *) havingQual); return jobQuery; } /* * UpdateColumnAttributes updates the column's range table reference (varno) and * column attribute number for the range table (varattno). The function uses the * newly built range table list to update the given column's attributes. */ static void UpdateColumnAttributes(Var *column, List *rangeTableList, List *dependedJobList) { Index originalTableId = column->varnoold; AttrNumber originalColumnId = column->varoattno; /* find the new table identifier */ Index newTableId = NewTableId(originalTableId, rangeTableList); AttrNumber newColumnId = originalColumnId; /* if this is a derived table, find the new column identifier */ RangeTblEntry *newRangeTableEntry = rt_fetch(newTableId, rangeTableList); if (GetRangeTblKind(newRangeTableEntry) == CITUS_RTE_REMOTE_QUERY) { newColumnId = NewColumnId(originalTableId, originalColumnId, newRangeTableEntry, dependedJobList); } column->varno = newTableId; column->varattno = newColumnId; } /* * NewTableId determines the new tableId for the query that is currently being * built. In this query, the original tableId represents the order of the table * in the initial parse tree. When queries involve repartitioning, we re-order * tables; and the new tableId corresponds to this new table order. */ static Index NewTableId(Index originalTableId, List *rangeTableList) { Index rangeTableIndex = 1; ListCell *rangeTableCell = NULL; foreach(rangeTableCell, rangeTableList) { RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell); List *originalTableIdList = NIL; bool listMember = false; ExtractRangeTblExtraData(rangeTableEntry, NULL, NULL, NULL, &originalTableIdList); listMember = list_member_int(originalTableIdList, originalTableId); if (listMember) { return rangeTableIndex; } rangeTableIndex++; } ereport(ERROR, (errmsg("Unrecognized range table id %d", (int) originalTableId))); return 0; } /* * NewColumnId determines the new columnId for the query that is currently being * built. In this query, the original columnId corresponds to the column in base * tables. When the current query is a partition job and generates intermediate * tables, the columns have a different order and the new columnId corresponds * to this order. Please note that this function assumes columnIds for depended * jobs have already been updated. */ static AttrNumber NewColumnId(Index originalTableId, AttrNumber originalColumnId, RangeTblEntry *newRangeTableEntry, List *dependedJobList) { AttrNumber newColumnId = 1; AttrNumber columnIndex = 1; Job *dependedJob = JobForRangeTable(dependedJobList, newRangeTableEntry); List *targetEntryList = dependedJob->jobQuery->targetList; ListCell *targetEntryCell = NULL; foreach(targetEntryCell, targetEntryList) { TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell); Expr *expression = targetEntry->expr; Var *column = (Var *) expression; Assert(IsA(expression, Var)); /* * Check against the *old* values for this column, as the new values * would have been updated already. */ if (column->varnoold == originalTableId && column->varoattno == originalColumnId) { newColumnId = columnIndex; break; } columnIndex++; } return newColumnId; } /* * JobForRangeTable returns the job that corresponds to the given range table * entry. The function walks over jobs in the given job list, and compares each * job's table list against the given range table entry's table list. When two * table lists match, the function returns the matching job. Note that we call * this function in practice when we need to determine which one of the jobs we * depend upon corresponds to given range table entry. */ static Job * JobForRangeTable(List *jobList, RangeTblEntry *rangeTableEntry) { Job *searchedJob = NULL; List *searchedTableIdList = NIL; CitusRTEKind rangeTableKind; ExtractRangeTblExtraData(rangeTableEntry, &rangeTableKind, NULL, NULL, &searchedTableIdList); Assert(rangeTableKind == CITUS_RTE_REMOTE_QUERY); searchedJob = JobForTableIdList(jobList, searchedTableIdList); return searchedJob; } /* * JobForTableIdList returns the job that corresponds to the given * tableIdList. The function walks over jobs in the given job list, and * compares each job's table list against the given table list. When the * two table lists match, the function returns the matching job. */ static Job * JobForTableIdList(List *jobList, List *searchedTableIdList) { Job *searchedJob = NULL; ListCell *jobCell = NULL; foreach(jobCell, jobList) { Job *job = (Job *) lfirst(jobCell); List *jobRangeTableList = job->jobQuery->rtable; List *jobTableIdList = NIL; ListCell *jobRangeTableCell = NULL; List *lhsDiff = NIL; List *rhsDiff = NIL; foreach(jobRangeTableCell, jobRangeTableList) { RangeTblEntry *jobRangeTable = (RangeTblEntry *) lfirst(jobRangeTableCell); List *tableIdList = NIL; ExtractRangeTblExtraData(jobRangeTable, NULL, NULL, NULL, &tableIdList); /* copy the list since list_concat is destructive */ tableIdList = list_copy(tableIdList); jobTableIdList = list_concat(jobTableIdList, tableIdList); } /* * Check if the searched range table's tableIds and the current job's * tableIds are the same. */ lhsDiff = list_difference_int(jobTableIdList, searchedTableIdList); rhsDiff = list_difference_int(searchedTableIdList, jobTableIdList); if (lhsDiff == NIL && rhsDiff == NIL) { searchedJob = job; break; } } Assert(searchedJob != NULL); return searchedJob; } /* Returns the list of children for the given multi node. */ static List * ChildNodeList(MultiNode *multiNode) { List *childNodeList = NIL; bool unaryNode = UnaryOperator(multiNode); bool binaryNode = BinaryOperator(multiNode); /* relation table nodes don't have any children */ if (CitusIsA(multiNode, MultiTable)) { MultiTable *multiTable = (MultiTable *) multiNode; if (multiTable->relationId != SUBQUERY_RELATION_ID) { return NIL; } } if (unaryNode) { MultiUnaryNode *unaryNode = (MultiUnaryNode *) multiNode; childNodeList = list_make1(unaryNode->childNode); } else if (binaryNode) { MultiBinaryNode *binaryNode = (MultiBinaryNode *) multiNode; childNodeList = list_make2(binaryNode->leftChildNode, binaryNode->rightChildNode); } return childNodeList; } /* * UniqueJobId allocates and returns a unique jobId for the job to be executed. * * The resulting job ID is built up as: * <16-bit group ID><24-bit process ID><1-bit secondary flag><23-bit local counter> * * When citus.enable_unique_job_ids is off then only the local counter is * included to get repeatable results. */ static uint64 UniqueJobId(void) { static uint32 jobIdCounter = 0; uint64 jobId = 0; uint64 jobIdNumber = 0; uint64 processId = 0; uint64 localGroupId = 0; jobIdCounter++; if (EnableUniqueJobIds) { /* * Add the local group id information to the jobId to * prevent concurrent jobs on different groups to conflict. */ localGroupId = GetLocalGroupId() & 0xFF; jobId = jobId | (localGroupId << 48); /* * Add the current process ID to distinguish jobs by this * backends from jobs started by other backends. Process * IDs can have at most 24-bits on platforms supported by * Citus. */ processId = MyProcPid & 0xFFFFFF; jobId = jobId | (processId << 24); /* * Add an extra bit for secondaries to distinguish their * jobs from primaries. */ if (RecoveryInProgress()) { jobId = jobId | (1 << 23); } } /* * Use the remaining 23 bits to distinguish jobs by the * same backend. */ jobIdNumber = jobIdCounter & 0x1FFFFFF; jobId = jobId | jobIdNumber; return jobId; } /* Builds a job from the given job query and depended job list. */ static Job * BuildJob(Query *jobQuery, List *dependedJobList) { Job *job = CitusMakeNode(Job); job->jobId = UniqueJobId(); job->jobQuery = jobQuery; job->dependedJobList = dependedJobList; job->requiresMasterEvaluation = false; return job; } /* * BuildMapMergeJob builds a MapMerge job from the given query and depended job * list. The function then copies and updates the logical plan's partition * column, and uses the join rule type to determine the physical repartitioning * method to apply. */ static MapMergeJob * BuildMapMergeJob(Query *jobQuery, List *dependedJobList, Var *partitionKey, PartitionType partitionType, Oid baseRelationId, BoundaryNodeJobType boundaryNodeJobType) { MapMergeJob *mapMergeJob = NULL; List *rangeTableList = jobQuery->rtable; Var *partitionColumn = copyObject(partitionKey); /* update the logical partition key's table and column identifiers */ if (boundaryNodeJobType != SUBQUERY_MAP_MERGE_JOB) { UpdateColumnAttributes(partitionColumn, rangeTableList, dependedJobList); } mapMergeJob = CitusMakeNode(MapMergeJob); mapMergeJob->job.jobId = UniqueJobId(); mapMergeJob->job.jobQuery = jobQuery; mapMergeJob->job.dependedJobList = dependedJobList; mapMergeJob->partitionColumn = partitionColumn; mapMergeJob->sortedShardIntervalArrayLength = 0; /* * We assume dual partition join defaults to hash partitioning, and single * partition join defaults to range partitioning. In practice, the join type * should have no impact on the physical repartitioning (hash/range) method. * If join type is not set, this means this job represents a subquery, and * uses hash partitioning. */ if (partitionType == HASH_PARTITION_TYPE) { uint32 partitionCount = HashPartitionCount(); mapMergeJob->partitionType = HASH_PARTITION_TYPE; mapMergeJob->partitionCount = partitionCount; } else if (partitionType == RANGE_PARTITION_TYPE) { /* build the split point object for the table on the right-hand side */ DistTableCacheEntry *cache = DistributedTableCacheEntry(baseRelationId); bool hasUninitializedShardInterval = false; uint32 shardCount = cache->shardIntervalArrayLength; ShardInterval **sortedShardIntervalArray = cache->sortedShardIntervalArray; char basePartitionMethod PG_USED_FOR_ASSERTS_ONLY = 0; hasUninitializedShardInterval = cache->hasUninitializedShardInterval; if (hasUninitializedShardInterval) { ereport(ERROR, (errmsg("cannot range repartition shard with " "missing min/max values"))); } basePartitionMethod = PartitionMethod(baseRelationId); /* this join-type currently doesn't work for hash partitioned tables */ Assert(basePartitionMethod != DISTRIBUTE_BY_HASH); mapMergeJob->partitionType = RANGE_PARTITION_TYPE; mapMergeJob->partitionCount = shardCount; mapMergeJob->sortedShardIntervalArray = sortedShardIntervalArray; mapMergeJob->sortedShardIntervalArrayLength = shardCount; } return mapMergeJob; } /* * HashPartitionCount returns the number of partition files we create for a hash * partition task. The function follows Hadoop's method for picking the number * of reduce tasks: 0.95 or 1.75 * node count * max reduces per node. We choose * the lower constant 0.95 so that all tasks can start immediately, but round it * to 1.0 so that we have a smooth number of partition tasks. */ static uint32 HashPartitionCount(void) { uint32 groupCount = ActiveReadableNodeCount(); double maxReduceTasksPerNode = MaxRunningTasksPerNode / 2.0; uint32 partitionCount = (uint32) rint(groupCount * maxReduceTasksPerNode); return partitionCount; } /* * SplitPointObject walks over shard intervals in the given array, extracts each * shard interval's minimum value, sorts and inserts these minimum values into a * new array. This sorted array is then used by the MapMerge job. */ static ArrayType * SplitPointObject(ShardInterval **shardIntervalArray, uint32 shardIntervalCount) { ArrayType *splitPointObject = NULL; uint32 intervalIndex = 0; Oid typeId = InvalidOid; bool typeByValue = false; char typeAlignment = 0; int16 typeLength = 0; /* allocate an array for shard min values */ uint32 minDatumCount = shardIntervalCount; Datum *minDatumArray = palloc0(minDatumCount * sizeof(Datum)); for (intervalIndex = 0; intervalIndex < shardIntervalCount; intervalIndex++) { ShardInterval *shardInterval = shardIntervalArray[intervalIndex]; minDatumArray[intervalIndex] = shardInterval->minValue; Assert(shardInterval->minValueExists); /* resolve the datum type on the first pass */ if (intervalIndex == 0) { typeId = shardInterval->valueTypeId; } } /* construct the split point object from the sorted array */ get_typlenbyvalalign(typeId, &typeLength, &typeByValue, &typeAlignment); splitPointObject = construct_array(minDatumArray, minDatumCount, typeId, typeLength, typeByValue, typeAlignment); return splitPointObject; } /* ------------------------------------------------------------ * Functions that relate to building and assigning tasks follow * ------------------------------------------------------------ */ /* * BuildJobTreeTaskList takes in the given job tree and walks over jobs in this * tree bottom up. The function then creates tasks for each job in the tree, * sets dependencies between tasks and their downstream dependencies and assigns * tasks to worker nodes. */ static Job * BuildJobTreeTaskList(Job *jobTree, PlannerRestrictionContext *plannerRestrictionContext) { List *flattenedJobList = NIL; uint32 flattenedJobCount = 0; int32 jobIndex = 0; /* * We traverse the job tree in preorder, and append each visited job to our * flattened list. This way, each job in our list appears before the jobs it * depends on. */ List *jobStack = list_make1(jobTree); while (jobStack != NIL) { Job *job = (Job *) llast(jobStack); flattenedJobList = lappend(flattenedJobList, job); /* pop top element and push its children to the stack */ jobStack = list_delete_ptr(jobStack, job); jobStack = list_union_ptr(jobStack, job->dependedJobList); } /* * We walk the job list in reverse order to visit jobs bottom up. This way, * we can create dependencies between tasks bottom up, and assign them to * worker nodes accordingly. */ flattenedJobCount = (int32) list_length(flattenedJobList); for (jobIndex = (flattenedJobCount - 1); jobIndex >= 0; jobIndex--) { Job *job = (Job *) list_nth(flattenedJobList, jobIndex); List *sqlTaskList = NIL; List *assignedSqlTaskList = NIL; ListCell *assignedSqlTaskCell = NULL; /* create sql tasks for the job, and prune redundant data fetch tasks */ if (job->subqueryPushdown) { sqlTaskList = SubquerySqlTaskList(job, plannerRestrictionContext); } else { sqlTaskList = SqlTaskList(job); } sqlTaskList = PruneSqlTaskDependencies(sqlTaskList); /* * We first assign sql and merge tasks to worker nodes. Next, we assign * sql tasks' data fetch dependencies. */ assignedSqlTaskList = AssignTaskList(sqlTaskList); AssignDataFetchDependencies(assignedSqlTaskList); /* now assign merge task's data fetch dependencies */ foreach(assignedSqlTaskCell, assignedSqlTaskList) { Task *assignedSqlTask = (Task *) lfirst(assignedSqlTaskCell); List *assignedMergeTaskList = FindDependedMergeTaskList(assignedSqlTask); AssignDataFetchDependencies(assignedMergeTaskList); } /* * If we have a MapMerge job, the map tasks in this job wrap around the * SQL tasks and their assignments. */ if (CitusIsA(job, MapMergeJob)) { MapMergeJob *mapMergeJob = (MapMergeJob *) job; uint32 taskIdIndex = TaskListHighestTaskId(assignedSqlTaskList) + 1; List *mapTaskList = MapTaskList(mapMergeJob, assignedSqlTaskList); List *mergeTaskList = MergeTaskList(mapMergeJob, mapTaskList, taskIdIndex); mapMergeJob->mapTaskList = mapTaskList; mapMergeJob->mergeTaskList = mergeTaskList; } else { job->taskList = assignedSqlTaskList; } } return jobTree; } /* * SubquerySqlTaskList creates a list of SQL tasks to execute the given subquery * pushdown job. For this, the it is being checked whether the query is router * plannable per target shard interval. For those router plannable worker * queries, we create a SQL task and append the task to the task list that is going * to be executed. */ static List * SubquerySqlTaskList(Job *job, PlannerRestrictionContext *plannerRestrictionContext) { Query *subquery = job->jobQuery; uint64 jobId = job->jobId; List *sqlTaskList = NIL; List *rangeTableList = NIL; ListCell *rangeTableCell = NULL; uint32 taskIdIndex = 1; /* 0 is reserved for invalid taskId */ Oid relationId = 0; int shardCount = 0; int shardOffset = 0; DistTableCacheEntry *targetCacheEntry = NULL; RelationRestrictionContext *relationRestrictionContext = plannerRestrictionContext->relationRestrictionContext; /* error if shards are not co-partitioned */ ErrorIfUnsupportedShardDistribution(subquery); /* get list of all range tables in subquery tree */ ExtractRangeTableRelationWalker((Node *) subquery, &rangeTableList); /* * Find the first relation that is not a reference table. We'll use the shards * of that relation as the target shards. */ foreach(rangeTableCell, rangeTableList) { RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell); DistTableCacheEntry *cacheEntry = NULL; relationId = rangeTableEntry->relid; cacheEntry = DistributedTableCacheEntry(relationId); if (cacheEntry->partitionMethod == DISTRIBUTE_BY_NONE) { continue; } targetCacheEntry = DistributedTableCacheEntry(relationId); break; } /* * That means all tables are reference tables and we can pick any any of them * as an anchor table. */ if (targetCacheEntry == NULL) { RangeTblEntry *rangeTableEntry = (RangeTblEntry *) linitial(rangeTableList); relationId = rangeTableEntry->relid; targetCacheEntry = DistributedTableCacheEntry(relationId); } shardCount = targetCacheEntry->shardIntervalArrayLength; for (shardOffset = 0; shardOffset < shardCount; shardOffset++) { ShardInterval *targetShardInterval = targetCacheEntry->sortedShardIntervalArray[shardOffset]; Task *subqueryTask = NULL; subqueryTask = SubqueryTaskCreate(subquery, targetShardInterval, relationRestrictionContext, taskIdIndex); /* add the task if it could be created */ if (subqueryTask != NULL) { subqueryTask->jobId = jobId; sqlTaskList = lappend(sqlTaskList, subqueryTask); ++taskIdIndex; } } return sqlTaskList; } /* * ErrorIfUnsupportedShardDistribution gets list of relations in the given query * and checks if two conditions below hold for them, otherwise it errors out. * a. Every relation is distributed by range or hash. This means shards are * disjoint based on the partition column. * b. All relations have 1-to-1 shard partitioning between them. This means * shard count for every relation is same and for every shard in a relation * there is exactly one shard in other relations with same min/max values. */ static void ErrorIfUnsupportedShardDistribution(Query *query) { Oid firstTableRelationId = InvalidOid; List *relationIdList = RelationIdList(query); List *nonReferenceRelations = NIL; ListCell *relationIdCell = NULL; uint32 relationIndex = 0; uint32 rangeDistributedRelationCount = 0; uint32 hashDistributedRelationCount = 0; foreach(relationIdCell, relationIdList) { Oid relationId = lfirst_oid(relationIdCell); char partitionMethod = PartitionMethod(relationId); if (partitionMethod == DISTRIBUTE_BY_RANGE) { rangeDistributedRelationCount++; nonReferenceRelations = lappend_oid(nonReferenceRelations, relationId); } else if (partitionMethod == DISTRIBUTE_BY_HASH) { hashDistributedRelationCount++; nonReferenceRelations = lappend_oid(nonReferenceRelations, relationId); } else if (partitionMethod == DISTRIBUTE_BY_NONE) { /* do not need to handle reference tables */ continue; } else { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot push down this subquery"), errdetail("Currently append partitioned relations " "are not supported"))); } } if ((rangeDistributedRelationCount > 0) && (hashDistributedRelationCount > 0)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot push down this subquery"), errdetail("A query including both range and hash " "partitioned relations are unsupported"))); } foreach(relationIdCell, nonReferenceRelations) { Oid relationId = lfirst_oid(relationIdCell); bool coPartitionedTables = false; Oid currentRelationId = relationId; /* get shard list of first relation and continue for the next relation */ if (relationIndex == 0) { firstTableRelationId = relationId; relationIndex++; continue; } /* check if this table has 1-1 shard partitioning with first table */ coPartitionedTables = CoPartitionedTables(firstTableRelationId, currentRelationId); if (!coPartitionedTables) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot push down this subquery"), errdetail("Shards of relations in subquery need to " "have 1-to-1 shard partitioning"))); } } } /* * CoPartitionedTables checks if given two distributed tables have 1-to-1 shard * partitioning. */ static bool CoPartitionedTables(Oid firstRelationId, Oid secondRelationId) { bool coPartitionedTables = true; uint32 intervalIndex = 0; DistTableCacheEntry *firstTableCache = DistributedTableCacheEntry(firstRelationId); DistTableCacheEntry *secondTableCache = DistributedTableCacheEntry(secondRelationId); ShardInterval **sortedFirstIntervalArray = firstTableCache->sortedShardIntervalArray; ShardInterval **sortedSecondIntervalArray = secondTableCache->sortedShardIntervalArray; uint32 firstListShardCount = firstTableCache->shardIntervalArrayLength; uint32 secondListShardCount = secondTableCache->shardIntervalArrayLength; FmgrInfo *comparisonFunction = firstTableCache->shardIntervalCompareFunction; if (firstListShardCount != secondListShardCount) { return false; } /* if there are not any shards just return true */ if (firstListShardCount == 0) { return true; } Assert(comparisonFunction != NULL); /* * Check if the tables have the same colocation ID - if so, we know * they're colocated. */ if (firstTableCache->colocationId != INVALID_COLOCATION_ID && firstTableCache->colocationId == secondTableCache->colocationId) { return true; } /* * If not known to be colocated check if the remaining shards are * anyway. Do so by comparing the shard interval arrays that are sorted on * interval minimum values. Then it compares every shard interval in order * and if any pair of shard intervals are not equal it returns false. */ for (intervalIndex = 0; intervalIndex < firstListShardCount; intervalIndex++) { ShardInterval *firstInterval = sortedFirstIntervalArray[intervalIndex]; ShardInterval *secondInterval = sortedSecondIntervalArray[intervalIndex]; bool shardIntervalsEqual = ShardIntervalsEqual(comparisonFunction, firstInterval, secondInterval); if (!shardIntervalsEqual) { coPartitionedTables = false; break; } } return coPartitionedTables; } /* * ShardIntervalsEqual checks if given shard intervals have equal min/max values. */ static bool ShardIntervalsEqual(FmgrInfo *comparisonFunction, ShardInterval *firstInterval, ShardInterval *secondInterval) { bool shardIntervalsEqual = false; Datum firstMin = 0; Datum firstMax = 0; Datum secondMin = 0; Datum secondMax = 0; firstMin = firstInterval->minValue; firstMax = firstInterval->maxValue; secondMin = secondInterval->minValue; secondMax = secondInterval->maxValue; if (firstInterval->minValueExists && firstInterval->maxValueExists && secondInterval->minValueExists && secondInterval->maxValueExists) { Datum minDatum = CompareCall2(comparisonFunction, firstMin, secondMin); Datum maxDatum = CompareCall2(comparisonFunction, firstMax, secondMax); int firstComparison = DatumGetInt32(minDatum); int secondComparison = DatumGetInt32(maxDatum); if (firstComparison == 0 && secondComparison == 0) { shardIntervalsEqual = true; } } return shardIntervalsEqual; } /* * SubqueryTaskCreate creates a sql task by replacing the target * shardInterval's boundary value.. Then performs the normal * shard pruning on the subquery via RouterSelectQuery(). * * The function errors out if the subquery is not router select query (i.e., * subqueries with non equi-joins.). */ static Task * SubqueryTaskCreate(Query *originalQuery, ShardInterval *shardInterval, RelationRestrictionContext *restrictionContext, uint32 taskId) { Query *taskQuery = copyObject(originalQuery); uint64 shardId = shardInterval->shardId; Oid distributedTableId = shardInterval->relationId; StringInfo queryString = makeStringInfo(); ListCell *restrictionCell = NULL; Task *subqueryTask = NULL; List *selectPlacementList = NIL; uint64 selectAnchorShardId = INVALID_SHARD_ID; List *relationShardList = NIL; uint64 jobId = INVALID_JOB_ID; bool replacePrunedQueryWithDummy = false; RelationRestrictionContext *copiedRestrictionContext = CopyRelationRestrictionContext(restrictionContext); List *shardOpExpressions = NIL; RestrictInfo *shardRestrictionList = NULL; DeferredErrorMessage *planningError = NULL; /* * Add the restriction qual parameter value in all baserestrictinfos. * Note that this has to be done on a copy, as the originals are needed * per target shard interval. */ foreach(restrictionCell, copiedRestrictionContext->relationRestrictionList) { RelationRestriction *restriction = lfirst(restrictionCell); Index rteIndex = restriction->index; List *originalBaseRestrictInfo = restriction->relOptInfo->baserestrictinfo; List *extendedBaseRestrictInfo = originalBaseRestrictInfo; shardOpExpressions = ShardIntervalOpExpressions(shardInterval, rteIndex); /* means it is a reference table and do not add any shard interval info */ if (shardOpExpressions == NIL) { continue; } shardRestrictionList = make_simple_restrictinfo((Expr *) shardOpExpressions); extendedBaseRestrictInfo = lappend(extendedBaseRestrictInfo, shardRestrictionList); restriction->relOptInfo->baserestrictinfo = extendedBaseRestrictInfo; } /* mark that we don't want the router planner to generate dummy hosts/queries */ replacePrunedQueryWithDummy = false; /* * Use router select planner to decide on whether we can push down the query * or not. If we can, we also rely on the side-effects that all RTEs have been * updated to point to the relevant nodes and selectPlacementList is determined. */ planningError = PlanRouterQuery(taskQuery, copiedRestrictionContext, &selectPlacementList, &selectAnchorShardId, &relationShardList, replacePrunedQueryWithDummy); /* we don't expect to this this error but keeping it as a precaution for future changes */ if (planningError) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot perform distributed planning for the given " "query"), errdetail("Select query cannot be pushed down to the worker."))); } /* ensure that we do not send queries where select is pruned away completely */ if (list_length(selectPlacementList) == 0) { ereport(DEBUG2, (errmsg("Skipping the target shard interval %ld because " "SELECT query is pruned away for the interval", shardId))); return NULL; } /* * Ands are made implicit during shard pruning, as predicate comparison and * refutation depend on it being so. We need to make them explicit again so * that the query string is generated as (...) AND (...) as opposed to * (...), (...). */ taskQuery->jointree->quals = (Node *) make_ands_explicit((List *) taskQuery->jointree->quals); /* and generate the full query string */ deparse_shard_query(taskQuery, distributedTableId, shardInterval->shardId, queryString); ereport(DEBUG4, (errmsg("distributed statement: %s", queryString->data))); subqueryTask = CreateBasicTask(jobId, taskId, SQL_TASK, queryString->data); subqueryTask->dependedTaskList = NULL; subqueryTask->anchorShardId = shardInterval->shardId; subqueryTask->taskPlacementList = selectPlacementList; subqueryTask->upsertQuery = false; subqueryTask->relationShardList = relationShardList; return subqueryTask; } /* * SqlTaskList creates a list of SQL tasks to execute the given job. For this, * the function walks over each range table in the job's range table list, gets * each range table's table fragments, and prunes unneeded table fragments. The * function then joins table fragments from different range tables, and creates * all fragment combinations. For each created combination, the function builds * a SQL task, and appends this task to a task list. */ static List * SqlTaskList(Job *job) { List *sqlTaskList = NIL; uint32 taskIdIndex = 1; /* 0 is reserved for invalid taskId */ uint64 jobId = job->jobId; bool anchorRangeTableBasedAssignment = false; uint32 anchorRangeTableId = 0; Node *whereClauseTree = NULL; List *rangeTableFragmentsList = NIL; List *fragmentCombinationList = NIL; ListCell *fragmentCombinationCell = NULL; Query *jobQuery = job->jobQuery; List *rangeTableList = jobQuery->rtable; List *whereClauseList = (List *) jobQuery->jointree->quals; List *dependedJobList = job->dependedJobList; /* * If we don't depend on a hash partition, then we determine the largest * table around which we build our queries. This reduces data fetching. */ bool dependsOnHashPartitionJob = DependsOnHashPartitionJob(job); if (!dependsOnHashPartitionJob) { anchorRangeTableBasedAssignment = true; anchorRangeTableId = AnchorRangeTableId(rangeTableList); Assert(anchorRangeTableId != 0); Assert(anchorRangeTableId <= list_length(rangeTableList)); } /* adjust our column old attributes for partition pruning to work */ AdjustColumnOldAttributes(whereClauseList); AdjustColumnOldAttributes(jobQuery->targetList); /* * Ands are made implicit during shard pruning, as predicate comparison and * refutation depend on it being so. We need to make them explicit again so * that the query string is generated as (...) AND (...) as opposed to * (...), (...). */ whereClauseTree = (Node *) make_ands_explicit((List *) jobQuery->jointree->quals); jobQuery->jointree->quals = whereClauseTree; /* * For each range table, we first get a list of their shards or merge tasks. * We also apply partition pruning based on the selection criteria. If all * range table fragments are pruned away, we return an empty task list. */ rangeTableFragmentsList = RangeTableFragmentsList(rangeTableList, whereClauseList, dependedJobList); if (rangeTableFragmentsList == NIL) { return NIL; } /* * We then generate fragment combinations according to how range tables join * with each other (and apply join pruning). Each fragment combination then * represents one SQL task's dependencies. */ fragmentCombinationList = FragmentCombinationList(rangeTableFragmentsList, jobQuery, dependedJobList); fragmentCombinationCell = NULL; foreach(fragmentCombinationCell, fragmentCombinationList) { List *fragmentCombination = (List *) lfirst(fragmentCombinationCell); List *dataFetchTaskList = NIL; int32 dataFetchTaskCount = 0; StringInfo sqlQueryString = NULL; Task *sqlTask = NULL; Query *taskQuery = NULL; List *fragmentRangeTableList = NIL; /* create tasks to fetch fragments required for the sql task */ dataFetchTaskList = DataFetchTaskList(jobId, taskIdIndex, fragmentCombination); dataFetchTaskCount = list_length(dataFetchTaskList); taskIdIndex += dataFetchTaskCount; /* update range table entries with fragment aliases (in place) */ taskQuery = copyObject(jobQuery); fragmentRangeTableList = taskQuery->rtable; UpdateRangeTableAlias(fragmentRangeTableList, fragmentCombination); /* transform the updated task query to a SQL query string */ sqlQueryString = makeStringInfo(); pg_get_query_def(taskQuery, sqlQueryString); sqlTask = CreateBasicTask(jobId, taskIdIndex, SQL_TASK, sqlQueryString->data); sqlTask->dependedTaskList = dataFetchTaskList; /* log the query string we generated */ ereport(DEBUG4, (errmsg("generated sql query for task %d", sqlTask->taskId), errdetail("query string: \"%s\"", sqlQueryString->data))); sqlTask->anchorShardId = INVALID_SHARD_ID; if (anchorRangeTableBasedAssignment) { sqlTask->anchorShardId = AnchorShardId(fragmentCombination, anchorRangeTableId); } taskIdIndex++; sqlTaskList = lappend(sqlTaskList, sqlTask); } return sqlTaskList; } /* * DependsOnHashPartitionJob checks if the given job depends on a hash * partitioning job. */ static bool DependsOnHashPartitionJob(Job *job) { bool dependsOnHashPartitionJob = false; List *dependedJobList = job->dependedJobList; uint32 dependedJobCount = (uint32) list_length(dependedJobList); if (dependedJobCount > 0) { Job *dependedJob = (Job *) linitial(dependedJobList); if (CitusIsA(dependedJob, MapMergeJob)) { MapMergeJob *mapMergeJob = (MapMergeJob *) dependedJob; if (mapMergeJob->partitionType == HASH_PARTITION_TYPE) { dependsOnHashPartitionJob = true; } } } return dependsOnHashPartitionJob; } /* * AnchorRangeTableId determines the table around which we build our queries, * and returns this table's range table id. We refer to this table as the anchor * table, and make sure that the anchor table's shards are moved or cached only * when absolutely necessary. */ static uint32 AnchorRangeTableId(List *rangeTableList) { uint32 anchorRangeTableId = 0; uint64 maxTableSize = 0; /* * We first filter anything but ordinary tables. Then, we pick the table(s) * with the most number of shards as our anchor table. If multiple tables * have the most number of shards, we have a draw. */ List *baseTableIdList = BaseRangeTableIdList(rangeTableList); List *anchorTableIdList = AnchorRangeTableIdList(rangeTableList, baseTableIdList); ListCell *anchorTableIdCell = NULL; int anchorTableIdCount = list_length(anchorTableIdList); Assert(anchorTableIdCount > 0); if (anchorTableIdCount == 1) { anchorRangeTableId = (uint32) linitial_int(anchorTableIdList); return anchorRangeTableId; } /* * If more than one table has the most number of shards, we break the draw * by comparing table sizes and picking the table with the largest size. */ foreach(anchorTableIdCell, anchorTableIdList) { uint32 anchorTableId = (uint32) lfirst_int(anchorTableIdCell); RangeTblEntry *tableEntry = rt_fetch(anchorTableId, rangeTableList); uint64 tableSize = 0; List *shardList = LoadShardList(tableEntry->relid); ListCell *shardCell = NULL; foreach(shardCell, shardList) { uint64 *shardIdPointer = (uint64 *) lfirst(shardCell); uint64 shardId = (*shardIdPointer); uint64 shardSize = ShardLength(shardId); tableSize += shardSize; } if (tableSize > maxTableSize) { maxTableSize = tableSize; anchorRangeTableId = anchorTableId; } } if (anchorRangeTableId == 0) { /* all tables have the same shard count and size 0, pick the first */ anchorRangeTableId = (uint32) linitial_int(anchorTableIdList); } return anchorRangeTableId; } /* * BaseRangeTableIdList walks over range tables in the given range table list, * finds range tables that correspond to base (non-repartitioned) tables, and * returns these range tables' identifiers in a new list. */ static List * BaseRangeTableIdList(List *rangeTableList) { List *baseRangeTableIdList = NIL; uint32 rangeTableId = 1; ListCell *rangeTableCell = NULL; foreach(rangeTableCell, rangeTableList) { RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell); if (GetRangeTblKind(rangeTableEntry) == CITUS_RTE_RELATION) { baseRangeTableIdList = lappend_int(baseRangeTableIdList, rangeTableId); } rangeTableId++; } return baseRangeTableIdList; } /* * AnchorRangeTableIdList finds ordinary table(s) with the most number of shards * and returns the corresponding range table id(s) in a list. */ static List * AnchorRangeTableIdList(List *rangeTableList, List *baseRangeTableIdList) { List *anchorTableIdList = NIL; uint32 maxShardCount = 0; ListCell *baseRangeTableIdCell = NULL; uint32 baseRangeTableCount = list_length(baseRangeTableIdList); if (baseRangeTableCount == 1) { return baseRangeTableIdList; } foreach(baseRangeTableIdCell, baseRangeTableIdList) { uint32 baseRangeTableId = (uint32) lfirst_int(baseRangeTableIdCell); RangeTblEntry *tableEntry = rt_fetch(baseRangeTableId, rangeTableList); List *shardList = LoadShardList(tableEntry->relid); uint32 shardCount = (uint32) list_length(shardList); if (shardCount > maxShardCount) { anchorTableIdList = list_make1_int(baseRangeTableId); maxShardCount = shardCount; } else if (shardCount == maxShardCount) { anchorTableIdList = lappend_int(anchorTableIdList, baseRangeTableId); } } return anchorTableIdList; } /* * AdjustColumnOldAttributes adjust the old tableId (varnoold) and old columnId * (varoattno), and sets them equal to the new values. We need this adjustment * for partition pruning where we compare these columns with partition columns * loaded from system catalogs. Since columns loaded from system catalogs always * have the same old and new values, we also need to adjust column values here. */ static void AdjustColumnOldAttributes(List *expressionList) { List *columnList = pull_var_clause_default((Node *) expressionList); ListCell *columnCell = NULL; foreach(columnCell, columnList) { Var *column = (Var *) lfirst(columnCell); column->varnoold = column->varno; column->varoattno = column->varattno; } } /* * RangeTableFragmentsList walks over range tables in the given range table list * and for each table, the function creates a list of its fragments. A fragment * in this list represents either a regular shard or a merge task. Once a list * for each range table is constructed, the function applies partition pruning * using the given where clause list. Then, the function appends the fragment * list for each range table to a list of lists, and returns this list of lists. */ static List * RangeTableFragmentsList(List *rangeTableList, List *whereClauseList, List *dependedJobList) { List *rangeTableFragmentsList = NIL; uint32 rangeTableIndex = 0; const uint32 fragmentSize = sizeof(RangeTableFragment); ListCell *rangeTableCell = NULL; foreach(rangeTableCell, rangeTableList) { uint32 tableId = rangeTableIndex + 1; /* tableId starts from 1 */ RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell); CitusRTEKind rangeTableKind = GetRangeTblKind(rangeTableEntry); if (rangeTableKind == CITUS_RTE_RELATION) { Oid relationId = rangeTableEntry->relid; ListCell *shardIntervalCell = NULL; List *shardFragmentList = NIL; List *prunedShardIntervalList = PruneShards(relationId, tableId, whereClauseList); /* * If we prune all shards for one table, query results will be empty. * We can therefore return NIL for the task list here. */ if (prunedShardIntervalList == NIL) { return NIL; } foreach(shardIntervalCell, prunedShardIntervalList) { ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); RangeTableFragment *shardFragment = palloc0(fragmentSize); shardFragment->fragmentReference = shardInterval; shardFragment->fragmentType = CITUS_RTE_RELATION; shardFragment->rangeTableId = tableId; shardFragmentList = lappend(shardFragmentList, shardFragment); } rangeTableFragmentsList = lappend(rangeTableFragmentsList, shardFragmentList); } else if (rangeTableKind == CITUS_RTE_REMOTE_QUERY) { MapMergeJob *dependedMapMergeJob = NULL; List *mergeTaskFragmentList = NIL; List *mergeTaskList = NIL; ListCell *mergeTaskCell = NULL; Job *dependedJob = JobForRangeTable(dependedJobList, rangeTableEntry); Assert(CitusIsA(dependedJob, MapMergeJob)); dependedMapMergeJob = (MapMergeJob *) dependedJob; mergeTaskList = dependedMapMergeJob->mergeTaskList; /* if there are no tasks for the depended job, just return NIL */ if (mergeTaskList == NIL) { return NIL; } foreach(mergeTaskCell, mergeTaskList) { Task *mergeTask = (Task *) lfirst(mergeTaskCell); RangeTableFragment *mergeTaskFragment = palloc0(fragmentSize); mergeTaskFragment->fragmentReference = mergeTask; mergeTaskFragment->fragmentType = CITUS_RTE_REMOTE_QUERY; mergeTaskFragment->rangeTableId = tableId; mergeTaskFragmentList = lappend(mergeTaskFragmentList, mergeTaskFragment); } rangeTableFragmentsList = lappend(rangeTableFragmentsList, mergeTaskFragmentList); } rangeTableIndex++; } return rangeTableFragmentsList; } /* * BuildBaseConstraint builds and returns a base constraint. This constraint * implements an expression in the form of (column <= max && column >= min), * where column is the partition key, and min and max values represent a shard's * min and max values. These shard values are filled in after the constraint is * built. */ Node * BuildBaseConstraint(Var *column) { Node *baseConstraint = NULL; OpExpr *lessThanExpr = NULL; OpExpr *greaterThanExpr = NULL; /* Build these expressions with only one argument for now */ lessThanExpr = MakeOpExpression(column, BTLessEqualStrategyNumber); greaterThanExpr = MakeOpExpression(column, BTGreaterEqualStrategyNumber); /* Build base constaint as an and of two qual conditions */ baseConstraint = make_and_qual((Node *) lessThanExpr, (Node *) greaterThanExpr); return baseConstraint; } /* * MakeOpExpression builds an operator expression node. This operator expression * implements the operator clause as defined by the variable and the strategy * number. */ OpExpr * MakeOpExpression(Var *variable, int16 strategyNumber) { Oid typeId = variable->vartype; Oid typeModId = variable->vartypmod; Oid collationId = variable->varcollid; OperatorCacheEntry *operatorCacheEntry = NULL; Oid accessMethodId = BTREE_AM_OID; Oid operatorId = InvalidOid; Oid operatorClassInputType = InvalidOid; Const *constantValue = NULL; OpExpr *expression = NULL; char typeType = 0; operatorCacheEntry = LookupOperatorByType(typeId, accessMethodId, strategyNumber); operatorId = operatorCacheEntry->operatorId; operatorClassInputType = operatorCacheEntry->operatorClassInputType; typeType = operatorCacheEntry->typeType; /* * Relabel variable if input type of default operator class is not equal to * the variable type. Note that we don't relabel the variable if the default * operator class variable type is a pseudo-type. */ if (operatorClassInputType != typeId && typeType != TYPTYPE_PSEUDO) { variable = (Var *) makeRelabelType((Expr *) variable, operatorClassInputType, -1, collationId, COERCE_IMPLICIT_CAST); } constantValue = makeNullConst(operatorClassInputType, typeModId, collationId); /* Now make the expression with the given variable and a null constant */ expression = (OpExpr *) make_opclause(operatorId, InvalidOid, /* no result type yet */ false, /* no return set */ (Expr *) variable, (Expr *) constantValue, InvalidOid, collationId); /* Set implementing function id and result type */ expression->opfuncid = get_opcode(operatorId); expression->opresulttype = get_func_rettype(expression->opfuncid); return expression; } /* * LookupOperatorByType is a wrapper around GetOperatorByType(), * operatorClassInputType() and get_typtype() functions that uses a cache to avoid * multiple lookups of operators and its related fields within a single session by * their types, access methods and strategy numbers. * LookupOperatorByType function errors out if it cannot find corresponding * default operator class with the given parameters on the system catalogs. */ static OperatorCacheEntry * LookupOperatorByType(Oid typeId, Oid accessMethodId, int16 strategyNumber) { OperatorCacheEntry *matchingCacheEntry = NULL; ListCell *cacheEntryCell = NULL; /* search the cache */ foreach(cacheEntryCell, OperatorCache) { OperatorCacheEntry *cacheEntry = lfirst(cacheEntryCell); if ((cacheEntry->typeId == typeId) && (cacheEntry->accessMethodId == accessMethodId) && (cacheEntry->strategyNumber == strategyNumber)) { matchingCacheEntry = cacheEntry; break; } } /* if not found in the cache, call GetOperatorByType and put the result in cache */ if (matchingCacheEntry == NULL) { MemoryContext oldContext = NULL; Oid operatorClassId = GetDefaultOpClass(typeId, accessMethodId); Oid operatorId = InvalidOid; Oid operatorClassInputType = InvalidOid; char typeType = InvalidOid; if (operatorClassId == InvalidOid) { /* if operatorId is invalid, error out */ ereport(ERROR, (errmsg("cannot find default operator class for type:%d," " access method: %d", typeId, accessMethodId))); } /* fill the other fields to the cache */ operatorId = GetOperatorByType(typeId, accessMethodId, strategyNumber); operatorClassInputType = get_opclass_input_type(operatorClassId); typeType = get_typtype(operatorClassInputType); /* make sure we've initialized CacheMemoryContext */ if (CacheMemoryContext == NULL) { CreateCacheMemoryContext(); } oldContext = MemoryContextSwitchTo(CacheMemoryContext); matchingCacheEntry = palloc0(sizeof(OperatorCacheEntry)); matchingCacheEntry->typeId = typeId; matchingCacheEntry->accessMethodId = accessMethodId; matchingCacheEntry->strategyNumber = strategyNumber; matchingCacheEntry->operatorId = operatorId; matchingCacheEntry->operatorClassInputType = operatorClassInputType; matchingCacheEntry->typeType = typeType; OperatorCache = lappend(OperatorCache, matchingCacheEntry); MemoryContextSwitchTo(oldContext); } return matchingCacheEntry; } /* * GetOperatorByType returns the operator oid for the given type, access method, * and strategy number. */ static Oid GetOperatorByType(Oid typeId, Oid accessMethodId, int16 strategyNumber) { /* Get default operator class from pg_opclass */ Oid operatorClassId = GetDefaultOpClass(typeId, accessMethodId); Oid operatorFamily = get_opclass_family(operatorClassId); Oid operatorClassInputType = get_opclass_input_type(operatorClassId); /* Lookup for the operator with the desired input type in the family */ Oid operatorId = get_opfamily_member(operatorFamily, operatorClassInputType, operatorClassInputType, strategyNumber); return operatorId; } /* * SimpleOpExpression checks that given expression is a simple operator * expression. A simple operator expression is a binary operator expression with * operands of a var and a non-null constant. */ bool SimpleOpExpression(Expr *clause) { Node *leftOperand = NULL; Node *rightOperand = NULL; Const *constantClause = NULL; if (is_opclause(clause) && list_length(((OpExpr *) clause)->args) == 2) { leftOperand = get_leftop(clause); rightOperand = get_rightop(clause); } else { return false; /* not a binary opclause */ } /* strip coercions before doing check */ leftOperand = strip_implicit_coercions(leftOperand); rightOperand = strip_implicit_coercions(rightOperand); if (IsA(rightOperand, Const) && IsA(leftOperand, Var)) { constantClause = (Const *) rightOperand; } else if (IsA(leftOperand, Const) && IsA(rightOperand, Var)) { constantClause = (Const *) leftOperand; } else { return false; } if (constantClause->constisnull) { return false; } return true; } /* * OpExpressionContainsColumn checks if the operator expression contains the * given partition column. We assume that given operator expression is a simple * operator expression which means it is a binary operator expression with * operands of a var and a non-null constant. */ bool OpExpressionContainsColumn(OpExpr *operatorExpression, Var *partitionColumn) { Node *leftOperand = get_leftop((Expr *) operatorExpression); Node *rightOperand = get_rightop((Expr *) operatorExpression); Var *column = NULL; /* strip coercions before doing check */ leftOperand = strip_implicit_coercions(leftOperand); rightOperand = strip_implicit_coercions(rightOperand); if (IsA(leftOperand, Var)) { column = (Var *) leftOperand; } else { column = (Var *) rightOperand; } return equal(column, partitionColumn); } /* * MakeInt4Column creates a column of int4 type with invalid table id and max * attribute number. */ Var * MakeInt4Column() { Index tableId = 0; AttrNumber columnAttributeNumber = RESERVED_HASHED_COLUMN_ID; Oid columnType = INT4OID; int32 columnTypeMod = -1; Oid columnCollationOid = InvalidOid; Index columnLevelSup = 0; Var *int4Column = makeVar(tableId, columnAttributeNumber, columnType, columnTypeMod, columnCollationOid, columnLevelSup); return int4Column; } /* * MakeInt4Constant creates a new constant of int4 type and assigns the given * value as a constant value. */ Const * MakeInt4Constant(Datum constantValue) { Oid constantType = INT4OID; int32 constantTypeMode = -1; Oid constantCollationId = InvalidOid; int constantLength = sizeof(int32); bool constantIsNull = false; bool constantByValue = true; Const *int4Constant = makeConst(constantType, constantTypeMode, constantCollationId, constantLength, constantValue, constantIsNull, constantByValue); return int4Constant; } /* Updates the base constraint with the given min/max values. */ void UpdateConstraint(Node *baseConstraint, ShardInterval *shardInterval) { BoolExpr *andExpr = (BoolExpr *) baseConstraint; Node *lessThanExpr = (Node *) linitial(andExpr->args); Node *greaterThanExpr = (Node *) lsecond(andExpr->args); Node *minNode = get_rightop((Expr *) greaterThanExpr); /* right op */ Node *maxNode = get_rightop((Expr *) lessThanExpr); /* right op */ Const *minConstant = NULL; Const *maxConstant = NULL; Assert(shardInterval != NULL); Assert(shardInterval->minValueExists); Assert(shardInterval->maxValueExists); Assert(IsA(minNode, Const)); Assert(IsA(maxNode, Const)); minConstant = (Const *) minNode; maxConstant = (Const *) maxNode; minConstant->constvalue = shardInterval->minValue; maxConstant->constvalue = shardInterval->maxValue; minConstant->constisnull = false; maxConstant->constisnull = false; } /* * FragmentCombinationList first builds an ordered sequence of range tables that * join together. The function then iteratively adds fragments from each joined * range table, and forms fragment combinations (lists) that cover all tables. * While doing so, the function also performs join pruning to remove unnecessary * fragment pairs. Last, the function adds each fragment combination (list) to a * list, and returns this list. */ static List * FragmentCombinationList(List *rangeTableFragmentsList, Query *jobQuery, List *dependedJobList) { List *fragmentCombinationList = NIL; JoinSequenceNode *joinSequenceArray = NULL; List *fragmentCombinationQueue = NIL; List *emptyList = NIL; /* find a sequence that joins the range tables in the list */ joinSequenceArray = JoinSequenceArray(rangeTableFragmentsList, jobQuery, dependedJobList); /* * We use breadth-first search with pruning to create fragment combinations. * For this, we first queue the root node (an empty combination), and then * start traversing our search space. */ fragmentCombinationQueue = lappend(fragmentCombinationQueue, emptyList); while (fragmentCombinationQueue != NIL) { List *fragmentCombination = NIL; int32 joinSequenceIndex = 0; uint32 tableId = 0; List *tableFragments = NIL; ListCell *tableFragmentCell = NULL; int32 joiningTableId = NON_PRUNABLE_JOIN; int32 joiningTableSequenceIndex = -1; int32 rangeTableCount = 0; /* pop first element from the fragment queue */ fragmentCombination = linitial(fragmentCombinationQueue); fragmentCombinationQueue = list_delete_first(fragmentCombinationQueue); /* * If this combination covered all range tables in a join sequence, add * this combination to our result set. */ joinSequenceIndex = list_length(fragmentCombination); rangeTableCount = list_length(rangeTableFragmentsList); if (joinSequenceIndex == rangeTableCount) { fragmentCombinationList = lappend(fragmentCombinationList, fragmentCombination); continue; } /* find the next range table to add to our search space */ tableId = joinSequenceArray[joinSequenceIndex].rangeTableId; tableFragments = FindRangeTableFragmentsList(rangeTableFragmentsList, tableId); /* resolve sequence index for the previous range table we join against */ joiningTableId = joinSequenceArray[joinSequenceIndex].joiningRangeTableId; if (joiningTableId != NON_PRUNABLE_JOIN) { int32 sequenceIndex = 0; for (sequenceIndex = 0; sequenceIndex < rangeTableCount; sequenceIndex++) { JoinSequenceNode *joinSequenceNode = &joinSequenceArray[sequenceIndex]; if (joinSequenceNode->rangeTableId == joiningTableId) { joiningTableSequenceIndex = sequenceIndex; break; } } Assert(joiningTableSequenceIndex != -1); } /* * We walk over each range table fragment, and check if we can prune out * this fragment joining with the existing fragment combination. If we * can't prune away, we create a new fragment combination and add it to * our search space. */ foreach(tableFragmentCell, tableFragments) { RangeTableFragment *tableFragment = lfirst(tableFragmentCell); bool joinPrunable = false; if (joiningTableId != NON_PRUNABLE_JOIN) { RangeTableFragment *joiningTableFragment = list_nth(fragmentCombination, joiningTableSequenceIndex); joinPrunable = JoinPrunable(joiningTableFragment, tableFragment); } /* if join can't be pruned, extend fragment combination and search */ if (!joinPrunable) { List *newFragmentCombination = list_copy(fragmentCombination); newFragmentCombination = lappend(newFragmentCombination, tableFragment); fragmentCombinationQueue = lappend(fragmentCombinationQueue, newFragmentCombination); } } } return fragmentCombinationList; } /* * JoinSequenceArray walks over the join nodes in the job query and constructs a join * sequence containing an entry for each joined table. The function then returns an * array of join sequence nodes, in which each node contains the id of a table in the * range table list and the id of a preceding table with which it is joined, if any. */ static JoinSequenceNode * JoinSequenceArray(List *rangeTableFragmentsList, Query *jobQuery, List *dependedJobList) { List *rangeTableList = jobQuery->rtable; uint32 rangeTableCount = (uint32) list_length(rangeTableList); uint32 sequenceNodeSize = sizeof(JoinSequenceNode); uint32 joinedTableCount = 0; List *joinedTableList = NIL; List *joinExprList = NIL; ListCell *joinExprCell = NULL; uint32 firstRangeTableId = 1; JoinSequenceNode *joinSequenceArray = palloc0(rangeTableCount * sequenceNodeSize); joinExprList = JoinExprList(jobQuery->jointree); /* pick first range table as starting table for the join sequence */ if (list_length(joinExprList) > 0) { JoinExpr *firstExpr = (JoinExpr *) linitial(joinExprList); RangeTblRef *leftTableRef = (RangeTblRef *) firstExpr->larg; firstRangeTableId = leftTableRef->rtindex; } else { /* when there are no joins, the join sequence contains a node for the table */ firstRangeTableId = 1; } joinSequenceArray[joinedTableCount].rangeTableId = firstRangeTableId; joinSequenceArray[joinedTableCount].joiningRangeTableId = NON_PRUNABLE_JOIN; joinedTableCount++; foreach(joinExprCell, joinExprList) { JoinExpr *joinExpr = (JoinExpr *) lfirst(joinExprCell); JoinType joinType = joinExpr->jointype; RangeTblRef *rightTableRef = (RangeTblRef *) joinExpr->rarg; JoinSequenceNode *nextJoinSequenceNode = NULL; uint32 nextRangeTableId = rightTableRef->rtindex; ListCell *nextJoinClauseCell = NULL; Index existingRangeTableId = 0; bool applyJoinPruning = false; List *nextJoinClauseList = make_ands_implicit((Expr *) joinExpr->quals); /* * If next join clause list is empty, the user tried a cartesian product * between tables. We don't support this functionality, and error out. */ if (nextJoinClauseList == NIL) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot perform distributed planning on this query"), errdetail("Cartesian products are currently unsupported"))); } /* * We now determine if we can apply join pruning between existing range * tables and this new one. */ foreach(nextJoinClauseCell, nextJoinClauseList) { OpExpr *nextJoinClause = (OpExpr *) lfirst(nextJoinClauseCell); Var *leftColumn = NULL; Var *rightColumn = NULL; Index leftRangeTableId = 0; Index rightRangeTableId = 0; bool leftPartitioned = false; bool rightPartitioned = false; if (!IsJoinClause((Node *) nextJoinClause)) { continue; } leftColumn = LeftColumn(nextJoinClause); rightColumn = RightColumn(nextJoinClause); leftRangeTableId = leftColumn->varno; rightRangeTableId = rightColumn->varno; /* * We have a table from the existing join list joining with the next * table. First resolve the existing table's range table id. */ if (leftRangeTableId == nextRangeTableId) { existingRangeTableId = rightRangeTableId; } else { existingRangeTableId = leftRangeTableId; } /* * Then, we check if we can apply join pruning between the existing * range table and this new one. For this, columns need to have the * same type and be the partition column for their respective tables. */ if (leftColumn->vartype != rightColumn->vartype) { continue; } /* * Check if this is a broadcast outer join, meaning the inner table has only * 1 shard. * * Broadcast outer join is a special case. In a left join, we want to join * every fragment on the left with the one fragment on the right to ensure * that all results from the left are included. As an optimization, we could * perform these joins with any empty set instead of an actual fragment, but * in any case they must not be pruned. */ if (IS_OUTER_JOIN(joinType)) { int innerRangeTableId = 0; List *tableFragments = NIL; int fragmentCount = 0; if (joinType == JOIN_RIGHT) { innerRangeTableId = existingRangeTableId; } else { /* * Note: For a full join the logical planner ensures a 1-1 mapping, * thus it is sufficient to check one side. */ innerRangeTableId = nextRangeTableId; } tableFragments = FindRangeTableFragmentsList(rangeTableFragmentsList, innerRangeTableId); fragmentCount = list_length(tableFragments); if (fragmentCount == 1) { continue; } } leftPartitioned = PartitionedOnColumn(leftColumn, rangeTableList, dependedJobList); rightPartitioned = PartitionedOnColumn(rightColumn, rangeTableList, dependedJobList); if (leftPartitioned && rightPartitioned) { /* make sure this join clause references only simple columns */ CheckJoinBetweenColumns(nextJoinClause); applyJoinPruning = true; break; } } /* set next joining range table's info in the join sequence */ nextJoinSequenceNode = &joinSequenceArray[joinedTableCount]; if (applyJoinPruning) { nextJoinSequenceNode->rangeTableId = nextRangeTableId; nextJoinSequenceNode->joiningRangeTableId = (int32) existingRangeTableId; } else { nextJoinSequenceNode->rangeTableId = nextRangeTableId; nextJoinSequenceNode->joiningRangeTableId = NON_PRUNABLE_JOIN; } joinedTableList = lappend_int(joinedTableList, nextRangeTableId); joinedTableCount++; } return joinSequenceArray; } /* * PartitionedOnColumn finds the given column's range table entry, and checks if * that range table is partitioned on the given column. Note that since reference * tables do not have partition columns, the function returns false when the distributed * relation is a reference table. */ static bool PartitionedOnColumn(Var *column, List *rangeTableList, List *dependedJobList) { bool partitionedOnColumn = false; Index rangeTableId = column->varno; RangeTblEntry *rangeTableEntry = rt_fetch(rangeTableId, rangeTableList); CitusRTEKind rangeTableType = GetRangeTblKind(rangeTableEntry); if (rangeTableType == CITUS_RTE_RELATION) { Oid relationId = rangeTableEntry->relid; char partitionMethod = PartitionMethod(relationId); Var *partitionColumn = PartitionColumn(relationId, rangeTableId); /* reference tables do not have partition columns */ if (partitionMethod == DISTRIBUTE_BY_NONE) { partitionedOnColumn = false; return partitionedOnColumn; } if (partitionColumn->varattno == column->varattno) { partitionedOnColumn = true; } } else if (rangeTableType == CITUS_RTE_REMOTE_QUERY) { Job *job = JobForRangeTable(dependedJobList, rangeTableEntry); MapMergeJob *mapMergeJob = (MapMergeJob *) job; Var *partitionColumn = NULL; Var *remoteRelationColumn = NULL; TargetEntry *targetEntry = NULL; /* * The column's current attribute number is it's location in the target * list for the table represented by the remote query. We retrieve this * value from the target list to compare against the partition column * as stored in the job. */ List *targetEntryList = job->jobQuery->targetList; int32 columnIndex = column->varattno - 1; Assert(columnIndex >= 0); Assert(columnIndex < list_length(targetEntryList)); targetEntry = (TargetEntry *) list_nth(targetEntryList, columnIndex); remoteRelationColumn = (Var *) targetEntry->expr; Assert(IsA(remoteRelationColumn, Var)); /* retrieve the partition column for the job */ partitionColumn = mapMergeJob->partitionColumn; if (partitionColumn->varattno == remoteRelationColumn->varattno) { partitionedOnColumn = true; } } return partitionedOnColumn; } /* Checks that the join clause references only simple columns. */ static void CheckJoinBetweenColumns(OpExpr *joinClause) { List *argumentList = joinClause->args; Node *leftArgument = (Node *) linitial(argumentList); Node *rightArgument = (Node *) lsecond(argumentList); Node *strippedLeftArgument = strip_implicit_coercions(leftArgument); Node *strippedRightArgument = strip_implicit_coercions(rightArgument); NodeTag leftArgumentType = nodeTag(strippedLeftArgument); NodeTag rightArgumentType = nodeTag(strippedRightArgument); if (leftArgumentType != T_Var || rightArgumentType != T_Var) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot perform local joins that involve expressions"), errdetail("local joins can be performed between columns only"))); } } /* * FindRangeTableFragmentsList walks over the given list of range table fragments * and, returns the one with the given table id. */ static List * FindRangeTableFragmentsList(List *rangeTableFragmentsList, int tableId) { List *foundTableFragments = NIL; ListCell *rangeTableFragmentsCell = NULL; foreach(rangeTableFragmentsCell, rangeTableFragmentsList) { List *tableFragments = (List *) lfirst(rangeTableFragmentsCell); if (tableFragments != NIL) { RangeTableFragment *tableFragment = (RangeTableFragment *) linitial(tableFragments); if (tableFragment->rangeTableId == tableId) { foundTableFragments = tableFragments; break; } } } return foundTableFragments; } /* * JoinPrunable checks if a join between the given left and right fragments can * be pruned away, without performing the actual join. To do this, the function * checks if we have a hash repartition join. If we do, the function determines * pruning based on partitionIds. Else if we have a merge repartition join, the * function checks if the two fragments have disjoint intervals. */ static bool JoinPrunable(RangeTableFragment *leftFragment, RangeTableFragment *rightFragment) { bool joinPrunable = false; bool overlap = false; ShardInterval *leftFragmentInterval = NULL; ShardInterval *rightFragmentInterval = NULL; /* * If both range tables are remote queries, we then have a hash repartition * join. In that case, we can just prune away this join if left and right * hand side fragments have the same partitionId. */ if (leftFragment->fragmentType == CITUS_RTE_REMOTE_QUERY && rightFragment->fragmentType == CITUS_RTE_REMOTE_QUERY) { Task *leftMergeTask = (Task *) leftFragment->fragmentReference; Task *rightMergeTask = (Task *) rightFragment->fragmentReference; if (leftMergeTask->partitionId != rightMergeTask->partitionId) { ereport(DEBUG2, (errmsg("join prunable for task partitionId %u and %u", leftMergeTask->partitionId, rightMergeTask->partitionId))); return true; } else { return false; } } /* * We have a range (re)partition join. We now get shard intervals for both * fragments, and then check if these intervals overlap. */ leftFragmentInterval = FragmentInterval(leftFragment); rightFragmentInterval = FragmentInterval(rightFragment); overlap = ShardIntervalsOverlap(leftFragmentInterval, rightFragmentInterval); if (!overlap) { if (log_min_messages <= DEBUG2 || client_min_messages <= DEBUG2) { StringInfo leftString = FragmentIntervalString(leftFragmentInterval); StringInfo rightString = FragmentIntervalString(rightFragmentInterval); ereport(DEBUG2, (errmsg("join prunable for intervals %s and %s", leftString->data, rightString->data))); } joinPrunable = true; } return joinPrunable; } /* * FragmentInterval takes the given fragment, and determines the range of data * covered by this fragment. The function then returns this range (interval). */ static ShardInterval * FragmentInterval(RangeTableFragment *fragment) { ShardInterval *fragmentInterval = NULL; if (fragment->fragmentType == CITUS_RTE_RELATION) { Assert(CitusIsA(fragment->fragmentReference, ShardInterval)); fragmentInterval = (ShardInterval *) fragment->fragmentReference; } else if (fragment->fragmentType == CITUS_RTE_REMOTE_QUERY) { Task *mergeTask = NULL; Assert(CitusIsA(fragment->fragmentReference, Task)); mergeTask = (Task *) fragment->fragmentReference; fragmentInterval = mergeTask->shardInterval; } return fragmentInterval; } /* Checks if the given shard intervals have overlapping ranges. */ bool ShardIntervalsOverlap(ShardInterval *firstInterval, ShardInterval *secondInterval) { bool nonOverlap = false; DistTableCacheEntry *intervalRelation = DistributedTableCacheEntry(firstInterval->relationId); FmgrInfo *comparisonFunction = intervalRelation->shardIntervalCompareFunction; Datum firstMin = 0; Datum firstMax = 0; Datum secondMin = 0; Datum secondMax = 0; firstMin = firstInterval->minValue; firstMax = firstInterval->maxValue; secondMin = secondInterval->minValue; secondMax = secondInterval->maxValue; /* * We need to have min/max values for both intervals first. Then, we assume * two intervals i1 = [min1, max1] and i2 = [min2, max2] do not overlap if * (max1 < min2) or (max2 < min1). For details, please see the explanation * on overlapping intervals at http://www.rgrjr.com/emacs/overlap.html. */ if (firstInterval->minValueExists && firstInterval->maxValueExists && secondInterval->minValueExists && secondInterval->maxValueExists) { Datum firstDatum = CompareCall2(comparisonFunction, firstMax, secondMin); Datum secondDatum = CompareCall2(comparisonFunction, secondMax, firstMin); int firstComparison = DatumGetInt32(firstDatum); int secondComparison = DatumGetInt32(secondDatum); if (firstComparison < 0 || secondComparison < 0) { nonOverlap = true; } } return (!nonOverlap); } /* * FragmentIntervalString takes the given fragment interval, and converts this * interval into its string representation for use in debug messages. */ static StringInfo FragmentIntervalString(ShardInterval *fragmentInterval) { StringInfo fragmentIntervalString = NULL; Oid typeId = fragmentInterval->valueTypeId; Oid outputFunctionId = InvalidOid; bool typeVariableLength = false; FmgrInfo *outputFunction = NULL; char *minValueString = NULL; char *maxValueString = NULL; Assert(fragmentInterval->minValueExists); Assert(fragmentInterval->maxValueExists); outputFunction = (FmgrInfo *) palloc0(sizeof(FmgrInfo)); getTypeOutputInfo(typeId, &outputFunctionId, &typeVariableLength); fmgr_info(outputFunctionId, outputFunction); minValueString = OutputFunctionCall(outputFunction, fragmentInterval->minValue); maxValueString = OutputFunctionCall(outputFunction, fragmentInterval->maxValue); fragmentIntervalString = makeStringInfo(); appendStringInfo(fragmentIntervalString, "[%s,%s]", minValueString, maxValueString); return fragmentIntervalString; } /* * DataFetchTaskList builds a data fetch task for every shard in the given shard * list, appends these data fetch tasks into a list, and returns this list. */ static List * DataFetchTaskList(uint64 jobId, uint32 taskIdIndex, List *fragmentList) { List *dataFetchTaskList = NIL; ListCell *fragmentCell = NULL; foreach(fragmentCell, fragmentList) { RangeTableFragment *fragment = (RangeTableFragment *) lfirst(fragmentCell); if (fragment->fragmentType == CITUS_RTE_RELATION) { ShardInterval *shardInterval = fragment->fragmentReference; uint64 shardId = shardInterval->shardId; StringInfo shardFetchQueryString = ShardFetchQueryString(shardId); Task *shardFetchTask = CreateBasicTask(jobId, taskIdIndex, SHARD_FETCH_TASK, shardFetchQueryString->data); shardFetchTask->shardId = shardId; dataFetchTaskList = lappend(dataFetchTaskList, shardFetchTask); taskIdIndex++; } else if (fragment->fragmentType == CITUS_RTE_REMOTE_QUERY) { Task *mergeTask = (Task *) fragment->fragmentReference; char *undefinedQueryString = NULL; /* create merge fetch task and have it depend on the merge task */ Task *mergeFetchTask = CreateBasicTask(jobId, taskIdIndex, MERGE_FETCH_TASK, undefinedQueryString); mergeFetchTask->dependedTaskList = list_make1(mergeTask); dataFetchTaskList = lappend(dataFetchTaskList, mergeFetchTask); taskIdIndex++; } } return dataFetchTaskList; } /* * ShardFetchQueryString constructs a query string to fetch the given shard from * the shards' placements. */ StringInfo ShardFetchQueryString(uint64 shardId) { StringInfo shardFetchQuery = NULL; uint64 shardLength = ShardLength(shardId); /* construct two array strings for node names and port numbers */ List *shardPlacements = FinalizedShardPlacementList(shardId); StringInfo nodeNameArrayString = NodeNameArrayString(shardPlacements); StringInfo nodePortArrayString = NodePortArrayString(shardPlacements); /* check storage type to create the correct query string */ ShardInterval *shardInterval = LoadShardInterval(shardId); char storageType = shardInterval->storageType; char *shardSchemaName = NULL; char *shardTableName = NULL; /* construct the shard name */ Oid shardSchemaId = get_rel_namespace(shardInterval->relationId); char *tableName = get_rel_name(shardInterval->relationId); shardSchemaName = get_namespace_name(shardSchemaId); shardTableName = pstrdup(tableName); AppendShardIdToName(&shardTableName, shardId); shardFetchQuery = makeStringInfo(); if (storageType == SHARD_STORAGE_TABLE || storageType == SHARD_STORAGE_RELAY || storageType == SHARD_STORAGE_COLUMNAR) { if (strcmp(shardSchemaName, "public") != 0) { char *qualifiedTableName = quote_qualified_identifier(shardSchemaName, shardTableName); appendStringInfo(shardFetchQuery, TABLE_FETCH_COMMAND, qualifiedTableName, shardLength, nodeNameArrayString->data, nodePortArrayString->data); } else { appendStringInfo(shardFetchQuery, TABLE_FETCH_COMMAND, shardTableName, shardLength, nodeNameArrayString->data, nodePortArrayString->data); } } else if (storageType == SHARD_STORAGE_FOREIGN) { if (strcmp(shardSchemaName, "public") != 0) { char *qualifiedTableName = quote_qualified_identifier(shardSchemaName, shardTableName); appendStringInfo(shardFetchQuery, FOREIGN_FETCH_COMMAND, qualifiedTableName, shardLength, nodeNameArrayString->data, nodePortArrayString->data); } else { appendStringInfo(shardFetchQuery, FOREIGN_FETCH_COMMAND, shardTableName, shardLength, nodeNameArrayString->data, nodePortArrayString->data); } } return shardFetchQuery; } /* * NodeNameArrayString extracts the node names from the given node list, stores * these node names in an array, and returns the array's string representation. */ static StringInfo NodeNameArrayString(List *shardPlacementList) { StringInfo nodeNameArrayString = NULL; ListCell *shardPlacementCell = NULL; uint32 nodeNameCount = (uint32) list_length(shardPlacementList); Datum *nodeNameArray = palloc0(nodeNameCount * sizeof(Datum)); uint32 nodeNameIndex = 0; foreach(shardPlacementCell, shardPlacementList) { ShardPlacement *shardPlacement = (ShardPlacement *) lfirst(shardPlacementCell); Datum nodeName = CStringGetDatum(shardPlacement->nodeName); nodeNameArray[nodeNameIndex] = nodeName; nodeNameIndex++; } nodeNameArrayString = DatumArrayString(nodeNameArray, nodeNameCount, CSTRINGOID); return nodeNameArrayString; } /* * NodePortArrayString extracts the node ports from the given node list, stores * these node ports in an array, and returns the array's string representation. */ static StringInfo NodePortArrayString(List *shardPlacementList) { StringInfo nodePortArrayString = NULL; ListCell *shardPlacementCell = NULL; uint32 nodePortCount = (uint32) list_length(shardPlacementList); Datum *nodePortArray = palloc0(nodePortCount * sizeof(Datum)); uint32 nodePortIndex = 0; foreach(shardPlacementCell, shardPlacementList) { ShardPlacement *shardPlacement = (ShardPlacement *) lfirst(shardPlacementCell); Datum nodePort = UInt32GetDatum(shardPlacement->nodePort); nodePortArray[nodePortIndex] = nodePort; nodePortIndex++; } nodePortArrayString = DatumArrayString(nodePortArray, nodePortCount, INT4OID); return nodePortArrayString; } /* Helper function to return a datum array's external string representation. */ static StringInfo DatumArrayString(Datum *datumArray, uint32 datumCount, Oid datumTypeId) { StringInfo arrayStringInfo = NULL; FmgrInfo *arrayOutFunction = NULL; ArrayType *arrayObject = NULL; Datum arrayObjectDatum = 0; Datum arrayStringDatum = 0; char *arrayString = NULL; int16 typeLength = 0; bool typeByValue = false; char typeAlignment = 0; /* construct the array object from the given array */ get_typlenbyvalalign(datumTypeId, &typeLength, &typeByValue, &typeAlignment); arrayObject = construct_array(datumArray, datumCount, datumTypeId, typeLength, typeByValue, typeAlignment); arrayObjectDatum = PointerGetDatum(arrayObject); /* convert the array object to its string representation */ arrayOutFunction = (FmgrInfo *) palloc0(sizeof(FmgrInfo)); fmgr_info(ARRAY_OUT_FUNC_ID, arrayOutFunction); arrayStringDatum = FunctionCall1(arrayOutFunction, arrayObjectDatum); arrayString = DatumGetCString(arrayStringDatum); arrayStringInfo = makeStringInfo(); appendStringInfo(arrayStringInfo, "%s", arrayString); return arrayStringInfo; } /* * CreateBasicTask creates a task, initializes fields that are common to each task, * and returns the created task. */ Task * CreateBasicTask(uint64 jobId, uint32 taskId, TaskType taskType, char *queryString) { Task *task = CitusMakeNode(Task); task->jobId = jobId; task->taskId = taskId; task->taskType = taskType; task->replicationModel = REPLICATION_MODEL_INVALID; task->queryString = queryString; return task; } /* * UpdateRangeTableAlias walks over each fragment in the given fragment list, * and creates an alias that represents the fragment name to be used in the * query. The function then updates the corresponding range table entry with * this alias. */ static void UpdateRangeTableAlias(List *rangeTableList, List *fragmentList) { ListCell *fragmentCell = NULL; foreach(fragmentCell, fragmentList) { RangeTableFragment *fragment = (RangeTableFragment *) lfirst(fragmentCell); Index rangeTableId = fragment->rangeTableId; RangeTblEntry *rangeTableEntry = rt_fetch(rangeTableId, rangeTableList); Alias *fragmentAlias = FragmentAlias(rangeTableEntry, fragment); rangeTableEntry->alias = fragmentAlias; } } /* * FragmentAlias creates an alias structure that captures the table fragment's * name on the worker node. Each fragment represents either a regular shard, or * a merge task. */ static Alias * FragmentAlias(RangeTblEntry *rangeTableEntry, RangeTableFragment *fragment) { Alias *alias = NULL; char *aliasName = NULL; char *schemaName = NULL; char *fragmentName = NULL; CitusRTEKind fragmentType = fragment->fragmentType; if (fragmentType == CITUS_RTE_RELATION) { ShardInterval *shardInterval = (ShardInterval *) fragment->fragmentReference; uint64 shardId = shardInterval->shardId; Oid relationId = rangeTableEntry->relid; char *relationName = get_rel_name(relationId); /* * If the table is not in the default namespace (public), we include it in * the fragment alias. */ Oid schemaId = get_rel_namespace(relationId); schemaName = get_namespace_name(schemaId); if (strncmp(schemaName, "public", NAMEDATALEN) == 0) { schemaName = NULL; } aliasName = relationName; /* * Set shard name in alias to _. */ fragmentName = pstrdup(relationName); AppendShardIdToName(&fragmentName, shardId); } else if (fragmentType == CITUS_RTE_REMOTE_QUERY) { Task *mergeTask = (Task *) fragment->fragmentReference; uint64 jobId = mergeTask->jobId; uint32 taskId = mergeTask->taskId; StringInfo jobSchemaName = JobSchemaName(jobId); StringInfo taskTableName = TaskTableName(taskId); StringInfo aliasNameString = makeStringInfo(); appendStringInfo(aliasNameString, "%s.%s", jobSchemaName->data, taskTableName->data); aliasName = aliasNameString->data; fragmentName = taskTableName->data; schemaName = jobSchemaName->data; } /* * We need to set the aliasname to relation name, as pg_get_query_def() uses * the relation name to disambiguate column names from different tables. */ alias = rangeTableEntry->alias; if (alias == NULL) { alias = makeNode(Alias); alias->aliasname = aliasName; } ModifyRangeTblExtraData(rangeTableEntry, CITUS_RTE_SHARD, schemaName, fragmentName, NIL); return alias; } /* * AnchorShardId walks over each fragment in the given fragment list, finds the * fragment that corresponds to the given anchor range tableId, and returns this * fragment's shard identifier. Note that the given tableId must correspond to a * base relation. */ static uint64 AnchorShardId(List *fragmentList, uint32 anchorRangeTableId) { uint64 anchorShardId = INVALID_SHARD_ID; ListCell *fragmentCell = NULL; foreach(fragmentCell, fragmentList) { RangeTableFragment *fragment = (RangeTableFragment *) lfirst(fragmentCell); if (fragment->rangeTableId == anchorRangeTableId) { ShardInterval *shardInterval = NULL; Assert(fragment->fragmentType == CITUS_RTE_RELATION); Assert(CitusIsA(fragment->fragmentReference, ShardInterval)); shardInterval = (ShardInterval *) fragment->fragmentReference; anchorShardId = shardInterval->shardId; break; } } Assert(anchorShardId != INVALID_SHARD_ID); return anchorShardId; } /* * PruneSqlTaskDependencies iterates over each sql task from the given sql task * list, and prunes away any data fetch tasks which are redundant or not needed * for the completion of that task. Specifically the function prunes away data * fetch tasks for the anchor shard and any merge-fetch tasks, as the task * assignment algorithm ensures co-location of these tasks. */ static List * PruneSqlTaskDependencies(List *sqlTaskList) { ListCell *sqlTaskCell = NULL; foreach(sqlTaskCell, sqlTaskList) { Task *sqlTask = (Task *) lfirst(sqlTaskCell); List *dependedTaskList = sqlTask->dependedTaskList; List *prunedDependedTaskList = NIL; ListCell *dependedTaskCell = NULL; foreach(dependedTaskCell, dependedTaskList) { Task *dataFetchTask = (Task *) lfirst(dependedTaskCell); /* * If we have a shard fetch task for the anchor shard, or if we have * a merge fetch task, our task assignment algorithm makes sure that * the sql task is colocated with the anchor shard / merge task. We * can therefore prune out this data fetch task. */ if (dataFetchTask->taskType == SHARD_FETCH_TASK && dataFetchTask->shardId != sqlTask->anchorShardId) { prunedDependedTaskList = lappend(prunedDependedTaskList, dataFetchTask); } else if (dataFetchTask->taskType == MERGE_FETCH_TASK) { Task *mergeTaskReference = NULL; List *mergeFetchDependencyList = dataFetchTask->dependedTaskList; Assert(list_length(mergeFetchDependencyList) == 1); mergeTaskReference = (Task *) linitial(mergeFetchDependencyList); prunedDependedTaskList = lappend(prunedDependedTaskList, mergeTaskReference); ereport(DEBUG2, (errmsg("pruning merge fetch taskId %d", dataFetchTask->taskId), errdetail("Creating dependency on merge taskId %d", mergeTaskReference->taskId))); } } sqlTask->dependedTaskList = prunedDependedTaskList; } return sqlTaskList; } /* * MapTaskList creates a list of map tasks for the given MapMerge job. For this, * the function walks over each filter task (sql task) in the given filter task * list, and wraps this task with a map function call. The map function call * repartitions the filter task's output according to MapMerge job's parameters. */ static List * MapTaskList(MapMergeJob *mapMergeJob, List *filterTaskList) { List *mapTaskList = NIL; Query *filterQuery = mapMergeJob->job.jobQuery; List *rangeTableList = filterQuery->rtable; ListCell *filterTaskCell = NULL; Var *partitionColumn = mapMergeJob->partitionColumn; Oid partitionColumnType = partitionColumn->vartype; char *partitionColumnTypeFullName = format_type_be_qualified(partitionColumnType); int32 partitionColumnTypeMod = partitionColumn->vartypmod; char *partitionColumnName = NULL; List *groupClauseList = filterQuery->groupClause; if (groupClauseList != NIL) { List *targetEntryList = filterQuery->targetList; List *groupTargetEntryList = GroupTargetEntryList(groupClauseList, targetEntryList); TargetEntry *groupByTargetEntry = (TargetEntry *) linitial(groupTargetEntryList); partitionColumnName = groupByTargetEntry->resname; } else { partitionColumnName = ColumnName(partitionColumn, rangeTableList); } foreach(filterTaskCell, filterTaskList) { Task *filterTask = (Task *) lfirst(filterTaskCell); uint64 jobId = filterTask->jobId; uint32 taskId = filterTask->taskId; Task *mapTask = NULL; /* wrap repartition query string around filter query string */ StringInfo mapQueryString = makeStringInfo(); char *filterQueryString = filterTask->queryString; char *filterQueryEscapedText = quote_literal_cstr(filterQueryString); PartitionType partitionType = mapMergeJob->partitionType; if (partitionType == RANGE_PARTITION_TYPE) { ShardInterval **intervalArray = mapMergeJob->sortedShardIntervalArray; uint32 intervalCount = mapMergeJob->partitionCount; ArrayType *splitPointObject = SplitPointObject(intervalArray, intervalCount); StringInfo splitPointString = SplitPointArrayString(splitPointObject, partitionColumnType, partitionColumnTypeMod); appendStringInfo(mapQueryString, RANGE_PARTITION_COMMAND, jobId, taskId, filterQueryEscapedText, partitionColumnName, partitionColumnTypeFullName, splitPointString->data); } else { uint32 partitionCount = mapMergeJob->partitionCount; appendStringInfo(mapQueryString, HASH_PARTITION_COMMAND, jobId, taskId, filterQueryEscapedText, partitionColumnName, partitionColumnTypeFullName, partitionCount); } /* convert filter query task into map task */ mapTask = filterTask; mapTask->queryString = mapQueryString->data; mapTask->taskType = MAP_TASK; mapTaskList = lappend(mapTaskList, mapTask); } return mapTaskList; } /* * ColumnName resolves the given column's name. The given column could belong to * a regular table or to an intermediate table formed to execute a distributed * query. */ static char * ColumnName(Var *column, List *rangeTableList) { char *columnName = NULL; Index tableId = column->varno; AttrNumber columnNumber = column->varattno; RangeTblEntry *rangeTableEntry = rt_fetch(tableId, rangeTableList); CitusRTEKind rangeTableKind = GetRangeTblKind(rangeTableEntry); if (rangeTableKind == CITUS_RTE_REMOTE_QUERY) { Alias *referenceNames = rangeTableEntry->eref; List *columnNameList = referenceNames->colnames; int columnIndex = columnNumber - 1; Value *columnValue = (Value *) list_nth(columnNameList, columnIndex); columnName = strVal(columnValue); } else if (rangeTableKind == CITUS_RTE_RELATION) { Oid relationId = rangeTableEntry->relid; columnName = get_attname(relationId, columnNumber); } Assert(columnName != NULL); return columnName; } /* * SplitPointArrayString takes the array representation of the given split point * object, and converts this array (and array's typed elements) to their string * representations. */ static StringInfo SplitPointArrayString(ArrayType *splitPointObject, Oid columnType, int32 columnTypeMod) { StringInfo splitPointArrayString = NULL; Datum splitPointDatum = PointerGetDatum(splitPointObject); Oid outputFunctionId = InvalidOid; bool typeVariableLength = false; FmgrInfo *arrayOutFunction = NULL; char *arrayOutputText = NULL; char *arrayOutputEscapedText = NULL; char *arrayOutTypeName = NULL; Oid arrayOutType = get_array_type(columnType); if (arrayOutType == InvalidOid) { char *columnTypeName = format_type_be(columnType); ereport(ERROR, (errmsg("cannot range repartition table on column type %s", columnTypeName))); } arrayOutFunction = (FmgrInfo *) palloc0(sizeof(FmgrInfo)); getTypeOutputInfo(arrayOutType, &outputFunctionId, &typeVariableLength); fmgr_info(outputFunctionId, arrayOutFunction); arrayOutputText = OutputFunctionCall(arrayOutFunction, splitPointDatum); arrayOutputEscapedText = quote_literal_cstr(arrayOutputText); /* add an explicit cast to array's string representation */ arrayOutTypeName = format_type_with_typemod(arrayOutType, columnTypeMod); splitPointArrayString = makeStringInfo(); appendStringInfo(splitPointArrayString, "%s::%s", arrayOutputEscapedText, arrayOutTypeName); return splitPointArrayString; } /* * MergeTaskList creates a list of merge tasks for the given MapMerge job. While * doing this, the function also establishes dependencies between each merge * task and its downstream map task dependencies by creating "map fetch" tasks. */ static List * MergeTaskList(MapMergeJob *mapMergeJob, List *mapTaskList, uint32 taskIdIndex) { List *mergeTaskList = NIL; uint64 jobId = mapMergeJob->job.jobId; uint32 partitionCount = mapMergeJob->partitionCount; uint32 partitionId = 0; uint32 initialPartitionId = 0; /* build column name and column type arrays (table schema) */ Query *filterQuery = mapMergeJob->job.jobQuery; List *targetEntryList = filterQuery->targetList; /* if all map tasks were pruned away, return NIL for merge tasks */ if (mapTaskList == NIL) { return NIL; } /* * XXX: We currently ignore the 0th partition bucket that range partitioning * generates. This bucket holds all values less than the minimum value or * NULLs, both of which we can currently ignore. However, when we support * range re-partitioned OUTER joins, we will need these rows for the * relation whose rows are retained in the OUTER join. */ initialPartitionId = 0; if (mapMergeJob->partitionType == RANGE_PARTITION_TYPE) { initialPartitionId = 1; partitionCount = partitionCount + 1; } /* build merge tasks and their associated "map output fetch" tasks */ for (partitionId = initialPartitionId; partitionId < partitionCount; partitionId++) { Task *mergeTask = NULL; List *mapOutputFetchTaskList = NIL; ListCell *mapTaskCell = NULL; uint32 mergeTaskId = taskIdIndex; Query *reduceQuery = mapMergeJob->reduceQuery; if (reduceQuery == NULL) { uint32 columnCount = (uint32) list_length(targetEntryList); StringInfo columnNames = ColumnNameArrayString(columnCount, jobId); StringInfo columnTypes = ColumnTypeArrayString(targetEntryList); StringInfo mergeQueryString = makeStringInfo(); appendStringInfo(mergeQueryString, MERGE_FILES_INTO_TABLE_COMMAND, jobId, taskIdIndex, columnNames->data, columnTypes->data); /* create merge task */ mergeTask = CreateBasicTask(jobId, mergeTaskId, MERGE_TASK, mergeQueryString->data); } else { StringInfo mergeTableQueryString = MergeTableQueryString(taskIdIndex, targetEntryList); char *escapedMergeTableQueryString = quote_literal_cstr(mergeTableQueryString->data); StringInfo intermediateTableQueryString = IntermediateTableQueryString(jobId, taskIdIndex, reduceQuery); char *escapedIntermediateTableQueryString = quote_literal_cstr(intermediateTableQueryString->data); StringInfo mergeAndRunQueryString = makeStringInfo(); appendStringInfo(mergeAndRunQueryString, MERGE_FILES_AND_RUN_QUERY_COMMAND, jobId, taskIdIndex, escapedMergeTableQueryString, escapedIntermediateTableQueryString); mergeTask = CreateBasicTask(jobId, mergeTaskId, MERGE_TASK, mergeAndRunQueryString->data); } mergeTask->partitionId = partitionId; taskIdIndex++; /* create tasks to fetch map outputs to this merge task */ foreach(mapTaskCell, mapTaskList) { Task *mapTask = (Task *) lfirst(mapTaskCell); /* we need node names for the query, and we'll resolve them later */ char *undefinedQueryString = NULL; Task *mapOutputFetchTask = CreateBasicTask(jobId, taskIdIndex, MAP_OUTPUT_FETCH_TASK, undefinedQueryString); mapOutputFetchTask->partitionId = partitionId; mapOutputFetchTask->upstreamTaskId = mergeTaskId; mapOutputFetchTask->dependedTaskList = list_make1(mapTask); taskIdIndex++; mapOutputFetchTaskList = lappend(mapOutputFetchTaskList, mapOutputFetchTask); } /* merge task depends on completion of fetch tasks */ mergeTask->dependedTaskList = mapOutputFetchTaskList; /* if range repartitioned, each merge task represents an interval */ if (mapMergeJob->partitionType == RANGE_PARTITION_TYPE) { int32 mergeTaskIntervalId = partitionId - 1; ShardInterval **mergeTaskIntervals = mapMergeJob->sortedShardIntervalArray; Assert(mergeTaskIntervalId >= 0); mergeTask->shardInterval = mergeTaskIntervals[mergeTaskIntervalId]; } mergeTaskList = lappend(mergeTaskList, mergeTask); } return mergeTaskList; } /* * ColumnNameArrayString creates a list of column names for a merged table, and * outputs this list of column names in their (array) string representation. */ static StringInfo ColumnNameArrayString(uint32 columnCount, uint64 generatingJobId) { StringInfo columnNameArrayString = NULL; Datum *columnNameArray = palloc0(columnCount * sizeof(Datum)); uint32 columnNameIndex = 0; /* build list of intermediate column names, generated by given jobId */ List *columnNameList = DerivedColumnNameList(columnCount, generatingJobId); ListCell *columnNameCell = NULL; foreach(columnNameCell, columnNameList) { Value *columnNameValue = (Value *) lfirst(columnNameCell); char *columnNameString = strVal(columnNameValue); Datum columnName = CStringGetDatum(columnNameString); columnNameArray[columnNameIndex] = columnName; columnNameIndex++; } columnNameArrayString = DatumArrayString(columnNameArray, columnCount, CSTRINGOID); return columnNameArrayString; } /* * ColumnTypeArrayString resolves a list of column types for a merged table, and * outputs this list of column types in their (array) string representation. */ static StringInfo ColumnTypeArrayString(List *targetEntryList) { StringInfo columnTypeArrayString = NULL; ListCell *targetEntryCell = NULL; uint32 columnCount = (uint32) list_length(targetEntryList); Datum *columnTypeArray = palloc0(columnCount * sizeof(Datum)); uint32 columnTypeIndex = 0; foreach(targetEntryCell, targetEntryList) { TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell); Node *columnExpression = (Node *) targetEntry->expr; Oid columnTypeId = exprType(columnExpression); int32 columnTypeMod = exprTypmod(columnExpression); char *columnTypeName = format_type_with_typemod(columnTypeId, columnTypeMod); Datum columnType = CStringGetDatum(columnTypeName); columnTypeArray[columnTypeIndex] = columnType; columnTypeIndex++; } columnTypeArrayString = DatumArrayString(columnTypeArray, columnCount, CSTRINGOID); return columnTypeArrayString; } /* * AssignTaskList assigns locations to given tasks based on dependencies between * tasks and configured task assignment policies. The function also handles the * case where multiple SQL tasks depend on the same merge task, and makes sure * that this group of multiple SQL tasks and the merge task are assigned to the * same location. */ static List * AssignTaskList(List *sqlTaskList) { List *assignedSqlTaskList = NIL; Task *firstSqlTask = NULL; bool hasAnchorShardId = false; bool hasMergeTaskDependencies = false; ListCell *sqlTaskCell = NULL; List *primarySqlTaskList = NIL; ListCell *primarySqlTaskCell = NULL; List *constrainedSqlTaskList = NIL; ListCell *constrainedSqlTaskCell = NULL; /* no tasks to assign */ if (sqlTaskList == NIL) { return NIL; } firstSqlTask = (Task *) linitial(sqlTaskList); if (firstSqlTask->anchorShardId != INVALID_SHARD_ID) { hasAnchorShardId = true; } /* * If these SQL tasks don't depend on any merge tasks, we can assign each * one independently of the other. We therefore go ahead and assign these * SQL tasks using the "anchor shard based" assignment algorithms. */ hasMergeTaskDependencies = HasMergeTaskDependencies(sqlTaskList); if (!hasMergeTaskDependencies) { Assert(hasAnchorShardId); assignedSqlTaskList = AssignAnchorShardTaskList(sqlTaskList); return assignedSqlTaskList; } /* * SQL tasks can depend on merge tasks in one of two ways: (1) each SQL task * depends on merge task(s) that no other SQL task depends upon, (2) several * SQL tasks depend on the same merge task(s) and all need to be assigned to * the same worker node. To handle the second case, we first pick a primary * SQL task among those that depend on the same merge task, and assign it. */ foreach(sqlTaskCell, sqlTaskList) { Task *sqlTask = (Task *) lfirst(sqlTaskCell); List *mergeTaskList = FindDependedMergeTaskList(sqlTask); Task *firstMergeTask = (Task *) linitial(mergeTaskList); if (!firstMergeTask->assignmentConstrained) { firstMergeTask->assignmentConstrained = true; primarySqlTaskList = lappend(primarySqlTaskList, sqlTask); } } if (hasAnchorShardId) { primarySqlTaskList = AssignAnchorShardTaskList(primarySqlTaskList); } else { primarySqlTaskList = AssignDualHashTaskList(primarySqlTaskList); } /* propagate SQL task assignments to the merge tasks we depend upon */ foreach(primarySqlTaskCell, primarySqlTaskList) { Task *sqlTask = (Task *) lfirst(primarySqlTaskCell); List *mergeTaskList = FindDependedMergeTaskList(sqlTask); ListCell *mergeTaskCell = NULL; foreach(mergeTaskCell, mergeTaskList) { Task *mergeTask = (Task *) lfirst(mergeTaskCell); Assert(mergeTask->taskPlacementList == NIL); mergeTask->taskPlacementList = list_copy(sqlTask->taskPlacementList); } assignedSqlTaskList = lappend(assignedSqlTaskList, sqlTask); } /* * If we had a set of SQL tasks depending on the same merge task, we only * assigned one SQL task from that set. We call the assigned SQL task the * primary, and note that the remaining SQL tasks are constrained by the * primary's task assignment. We propagate the primary's task assignment in * each set to the remaining (constrained) tasks. */ constrainedSqlTaskList = TaskListDifference(sqlTaskList, primarySqlTaskList); foreach(constrainedSqlTaskCell, constrainedSqlTaskList) { Task *sqlTask = (Task *) lfirst(constrainedSqlTaskCell); List *mergeTaskList = FindDependedMergeTaskList(sqlTask); List *mergeTaskPlacementList = NIL; ListCell *mergeTaskCell = NULL; foreach(mergeTaskCell, mergeTaskList) { Task *mergeTask = (Task *) lfirst(mergeTaskCell); /* * If we have more than one merge task, both of them should have the * same task placement list. */ mergeTaskPlacementList = mergeTask->taskPlacementList; Assert(mergeTaskPlacementList != NIL); ereport(DEBUG3, (errmsg("propagating assignment from merge task %d " "to constrained sql task %d", mergeTask->taskId, sqlTask->taskId))); } sqlTask->taskPlacementList = list_copy(mergeTaskPlacementList); assignedSqlTaskList = lappend(assignedSqlTaskList, sqlTask); } return assignedSqlTaskList; } /* * HasMergeTaskDependencies checks if sql tasks in the given sql task list have * any dependencies on merge tasks. If they do, the function returns true. */ static bool HasMergeTaskDependencies(List *sqlTaskList) { bool hasMergeTaskDependencies = false; Task *sqlTask = (Task *) linitial(sqlTaskList); List *dependedTaskList = sqlTask->dependedTaskList; ListCell *dependedTaskCell = NULL; foreach(dependedTaskCell, dependedTaskList) { Task *dependedTask = (Task *) lfirst(dependedTaskCell); if (dependedTask->taskType == MERGE_TASK) { hasMergeTaskDependencies = true; break; } } return hasMergeTaskDependencies; } /* Return true if two tasks are equal, false otherwise. */ bool TasksEqual(const Task *a, const Task *b) { Assert(CitusIsA(a, Task)); Assert(CitusIsA(b, Task)); if (a->taskType != b->taskType) { return false; } if (a->jobId != b->jobId) { return false; } if (a->taskId != b->taskId) { return false; } return true; } /* * TaskListAppendUnique returns a list that contains the elements of the * input task list and appends the input task parameter if it doesn't already * exists the list. */ List * TaskListAppendUnique(List *list, Task *task) { if (TaskListMember(list, task)) { return list; } return lappend(list, task); } /* * TaskListConcatUnique append to list1 each member of list2 that isn't * already in list1. Whether an element is already a member of the list * is determined via TaskListMember(). */ List * TaskListConcatUnique(List *list1, List *list2) { ListCell *taskCell = NULL; foreach(taskCell, list2) { Task *currentTask = (Task *) lfirst(taskCell); if (!TaskListMember(list1, currentTask)) { list1 = lappend(list1, currentTask); } } return list1; } /* Is the passed in Task a member of the list. */ bool TaskListMember(const List *taskList, const Task *task) { const ListCell *taskCell = NULL; foreach(taskCell, taskList) { if (TasksEqual((Task *) lfirst(taskCell), task)) { return true; } } return false; } /* * TaskListDifference returns a list that contains all the tasks in taskList1 * that are not in taskList2. The returned list is freshly allocated via * palloc(), but the cells themselves point to the same objects as the cells * of the input lists. */ List * TaskListDifference(const List *list1, const List *list2) { const ListCell *taskCell = NULL; List *resultList = NIL; if (list2 == NIL) { return list_copy(list1); } foreach(taskCell, list1) { if (!TaskListMember(list2, lfirst(taskCell))) { resultList = lappend(resultList, lfirst(taskCell)); } } return resultList; } /* * TaskListUnion generate the union of two tasks lists. This is calculated by * copying list1 via list_copy(), then adding to it all the members of list2 * that aren't already in list1. */ List * TaskListUnion(const List *list1, const List *list2) { const ListCell *taskCell = NULL; List *resultList = NIL; resultList = list_copy(list1); foreach(taskCell, list2) { if (!TaskListMember(resultList, lfirst(taskCell))) { resultList = lappend(resultList, lfirst(taskCell)); } } return resultList; } /* * AssignAnchorShardTaskList assigns locations to the given tasks based on the * configured task assignment policy. The distributed executor later sends these * tasks to their assigned locations for remote execution. */ List * AssignAnchorShardTaskList(List *taskList) { List *assignedTaskList = NIL; /* choose task assignment policy based on config value */ if (TaskAssignmentPolicy == TASK_ASSIGNMENT_GREEDY) { assignedTaskList = GreedyAssignTaskList(taskList); } else if (TaskAssignmentPolicy == TASK_ASSIGNMENT_FIRST_REPLICA) { assignedTaskList = FirstReplicaAssignTaskList(taskList); } else if (TaskAssignmentPolicy == TASK_ASSIGNMENT_ROUND_ROBIN) { assignedTaskList = RoundRobinAssignTaskList(taskList); } Assert(assignedTaskList != NIL); return assignedTaskList; } /* * GreedyAssignTaskList uses a greedy algorithm similar to Hadoop's, and assigns * locations to the given tasks. The ideal assignment algorithm balances three * properties: (a) determinism, (b) even load distribution, and (c) consistency * across similar task lists. To maintain these properties, the algorithm sorts * all its input lists. */ static List * GreedyAssignTaskList(List *taskList) { List *assignedTaskList = NIL; List *activeShardPlacementLists = NIL; uint32 assignedTaskCount = 0; uint32 taskCount = list_length(taskList); /* get the worker node list and sort the list */ List *workerNodeList = ActiveReadableNodeList(); workerNodeList = SortList(workerNodeList, CompareWorkerNodes); /* * We first sort tasks by their anchor shard id. We then walk over each task * in the sorted list, get the task's anchor shard id, and look up the shard * placements (locations) for this shard id. Next, we sort the placements by * their insertion time, and append them to a new list. */ taskList = SortList(taskList, CompareTasksByShardId); activeShardPlacementLists = ActiveShardPlacementLists(taskList); while (assignedTaskCount < taskCount) { ListCell *workerNodeCell = NULL; uint32 loopStartTaskCount = assignedTaskCount; /* walk over each node and check if we can assign a task to it */ foreach(workerNodeCell, workerNodeList) { WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); Task *assignedTask = GreedyAssignTask(workerNode, taskList, activeShardPlacementLists); if (assignedTask != NULL) { assignedTaskList = lappend(assignedTaskList, assignedTask); assignedTaskCount++; } } /* if we could not assign any new tasks, avoid looping forever */ if (assignedTaskCount == loopStartTaskCount) { uint32 remainingTaskCount = taskCount - assignedTaskCount; ereport(ERROR, (errmsg("failed to assign %u task(s) to worker nodes", remainingTaskCount))); } } return assignedTaskList; } /* * GreedyAssignTask tries to assign a task to the given worker node. To do this, * the function walks over tasks' anchor shard ids, and finds the first set of * nodes the shards were replicated to. If any of these replica nodes and the * given worker node match, the corresponding task is assigned to that node. If * not, the function goes on to search the second set of replicas and so forth. * * Note that this function has side-effects; when the function assigns a new * task, it overwrites the corresponding task list pointer. */ static Task * GreedyAssignTask(WorkerNode *workerNode, List *taskList, List *activeShardPlacementLists) { Task *assignedTask = NULL; List *taskPlacementList = NIL; ShardPlacement *primaryPlacement = NULL; uint32 rotatePlacementListBy = 0; uint32 replicaIndex = 0; uint32 replicaCount = ShardReplicationFactor; const char *workerName = workerNode->workerName; const uint32 workerPort = workerNode->workerPort; while ((assignedTask == NULL) && (replicaIndex < replicaCount)) { /* walk over all tasks and try to assign one */ ListCell *taskCell = NULL; ListCell *placementListCell = NULL; forboth(taskCell, taskList, placementListCell, activeShardPlacementLists) { Task *task = (Task *) lfirst(taskCell); List *placementList = (List *) lfirst(placementListCell); ShardPlacement *placement = NULL; uint32 placementCount = 0; /* check if we already assigned this task */ if (task == NULL) { continue; } /* check if we have enough replicas */ placementCount = list_length(placementList); if (placementCount <= replicaIndex) { continue; } placement = (ShardPlacement *) list_nth(placementList, replicaIndex); if ((strncmp(placement->nodeName, workerName, WORKER_LENGTH) == 0) && (placement->nodePort == workerPort)) { /* we found a task to assign to the given worker node */ assignedTask = task; taskPlacementList = placementList; rotatePlacementListBy = replicaIndex; /* overwrite task list to signal that this task is assigned */ taskCell->data.ptr_value = NULL; break; } } /* go over the next set of shard replica placements */ replicaIndex++; } /* if we found a task placement list, rotate and assign task placements */ if (assignedTask != NULL) { taskPlacementList = LeftRotateList(taskPlacementList, rotatePlacementListBy); assignedTask->taskPlacementList = taskPlacementList; primaryPlacement = (ShardPlacement *) linitial(assignedTask->taskPlacementList); ereport(DEBUG3, (errmsg("assigned task %u to node %s:%u", assignedTask->taskId, primaryPlacement->nodeName, primaryPlacement->nodePort))); } return assignedTask; } /* * FirstReplicaAssignTaskList assigns locations to the given tasks simply by * looking at placements for a given shard. A particular task's assignments are * then ordered by the insertion order of the relevant placements rows. In other * words, a task for a specific shard is simply assigned to the first replica * for that shard. This algorithm is extremely simple and intended for use when * a customer has placed shards carefully and wants strong guarantees about * which shards will be used by what nodes (i.e. for stronger memory residency * guarantees). */ List * FirstReplicaAssignTaskList(List *taskList) { /* No additional reordering need take place for this algorithm */ List *(*reorderFunction)(Task *, List *) = NULL; taskList = ReorderAndAssignTaskList(taskList, reorderFunction); return taskList; } /* * RoundRobinAssignTaskList uses a round-robin algorithm to assign locations to * the given tasks. An ideal round-robin implementation requires keeping shared * state for task assignments; and we instead approximate our implementation by * relying on the sequentially increasing jobId. For each task, we mod its jobId * by the number of active shard placements, and ensure that we rotate between * these placements across subsequent queries. */ static List * RoundRobinAssignTaskList(List *taskList) { taskList = ReorderAndAssignTaskList(taskList, RoundRobinReorder); return taskList; } /* * RoundRobinReorder implements the core of the round-robin assignment policy. * It takes a task and placement list and rotates a copy of the placement list * based on the task's jobId. The rotated copy is returned. */ static List * RoundRobinReorder(Task *task, List *placementList) { uint64 jobId = task->jobId; uint32 activePlacementCount = list_length(placementList); uint32 roundRobinIndex = (jobId % activePlacementCount); placementList = LeftRotateList(placementList, roundRobinIndex); return placementList; } /* * ReorderAndAssignTaskList finds the placements for a task based on its anchor * shard id and then sorts them by insertion time. If reorderFunction is given, * it is used to reorder the placements list in a custom fashion (for instance, * by rotation or shuffling). Returns the task list with placements assigned. */ static List * ReorderAndAssignTaskList(List *taskList, List * (*reorderFunction)(Task *, List *)) { List *assignedTaskList = NIL; List *activeShardPlacementLists = NIL; ListCell *taskCell = NULL; ListCell *placementListCell = NULL; uint32 unAssignedTaskCount = 0; /* * We first sort tasks by their anchor shard id. We then sort placements for * each anchor shard by the placement's insertion time. Note that we sort * these lists just to make our policy more deterministic. */ taskList = SortList(taskList, CompareTasksByShardId); activeShardPlacementLists = ActiveShardPlacementLists(taskList); forboth(taskCell, taskList, placementListCell, activeShardPlacementLists) { Task *task = (Task *) lfirst(taskCell); List *placementList = (List *) lfirst(placementListCell); /* inactive placements are already filtered out */ uint32 activePlacementCount = list_length(placementList); if (activePlacementCount > 0) { ShardPlacement *primaryPlacement = NULL; if (reorderFunction != NULL) { placementList = reorderFunction(task, placementList); } task->taskPlacementList = placementList; primaryPlacement = (ShardPlacement *) linitial(task->taskPlacementList); ereport(DEBUG3, (errmsg("assigned task %u to node %s:%u", task->taskId, primaryPlacement->nodeName, primaryPlacement->nodePort))); assignedTaskList = lappend(assignedTaskList, task); } else { unAssignedTaskCount++; } } /* if we have unassigned tasks, error out */ if (unAssignedTaskCount > 0) { ereport(ERROR, (errmsg("failed to assign %u task(s) to worker nodes", unAssignedTaskCount))); } return assignedTaskList; } /* Helper function to compare two tasks by their anchor shardId. */ static int CompareTasksByShardId(const void *leftElement, const void *rightElement) { const Task *leftTask = *((const Task **) leftElement); const Task *rightTask = *((const Task **) rightElement); uint64 leftShardId = leftTask->anchorShardId; uint64 rightShardId = rightTask->anchorShardId; /* we compare 64-bit integers, instead of casting their difference to int */ if (leftShardId > rightShardId) { return 1; } else if (leftShardId < rightShardId) { return -1; } else { return 0; } } /* * ActiveShardPlacementLists finds the active shard placement list for each task in * the given task list, sorts each shard placement list by shard creation time, * and adds the sorted placement list into a new list of lists. The function also * ensures a one-to-one mapping between each placement list in the new list of * lists and each task in the given task list. */ static List * ActiveShardPlacementLists(List *taskList) { List *shardPlacementLists = NIL; ListCell *taskCell = NULL; foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); uint64 anchorShardId = task->anchorShardId; List *shardPlacementList = FinalizedShardPlacementList(anchorShardId); /* filter out shard placements that reside in inactive nodes */ List *activeShardPlacementList = ActivePlacementList(shardPlacementList); /* sort shard placements by their creation time */ activeShardPlacementList = SortList(activeShardPlacementList, CompareShardPlacements); shardPlacementLists = lappend(shardPlacementLists, activeShardPlacementList); } return shardPlacementLists; } /* * CompareShardPlacements compares two shard placements by their tuple oid; this * oid reflects the tuple's insertion order into pg_dist_placement. */ int CompareShardPlacements(const void *leftElement, const void *rightElement) { const ShardPlacement *leftPlacement = *((const ShardPlacement **) leftElement); const ShardPlacement *rightPlacement = *((const ShardPlacement **) rightElement); uint64 leftPlacementId = leftPlacement->placementId; uint64 rightPlacementId = rightPlacement->placementId; if (leftPlacementId < rightPlacementId) { return -1; } else if (leftPlacementId > rightPlacementId) { return 1; } else { return 0; } } /* * ActivePlacementList walks over shard placements in the given list, and finds * the corresponding worker node for each placement. The function then checks if * that worker node is active, and if it is, appends the placement to a new list. * The function last returns the new placement list. */ static List * ActivePlacementList(List *placementList) { List *activePlacementList = NIL; ListCell *placementCell = NULL; foreach(placementCell, placementList) { ShardPlacement *placement = (ShardPlacement *) lfirst(placementCell); WorkerNode *workerNode = NULL; /* check if the worker node for this shard placement is active */ workerNode = FindWorkerNode(placement->nodeName, placement->nodePort); if (workerNode != NULL) { activePlacementList = lappend(activePlacementList, placement); } } return activePlacementList; } /* * LeftRotateList returns a copy of the given list that has been cyclically * shifted to the left by the given rotation count. For this, the function * repeatedly moves the list's first element to the end of the list, and * then returns the newly rotated list. */ static List * LeftRotateList(List *list, uint32 rotateCount) { List *rotatedList = list_copy(list); uint32 rotateIndex = 0; for (rotateIndex = 0; rotateIndex < rotateCount; rotateIndex++) { void *firstElement = linitial(rotatedList); rotatedList = list_delete_first(rotatedList); rotatedList = lappend(rotatedList, firstElement); } return rotatedList; } /* * FindDependedMergeTaskList walks over the given task's depended task list, * finds the merge tasks in the list, and returns those found tasks in a new * list. */ static List * FindDependedMergeTaskList(Task *sqlTask) { List *dependedMergeTaskList = NIL; List *dependedTaskList = sqlTask->dependedTaskList; ListCell *dependedTaskCell = NULL; foreach(dependedTaskCell, dependedTaskList) { Task *dependedTask = (Task *) lfirst(dependedTaskCell); if (dependedTask->taskType == MERGE_TASK) { dependedMergeTaskList = lappend(dependedMergeTaskList, dependedTask); } } return dependedMergeTaskList; } /* * AssignDualHashTaskList uses a round-robin algorithm to assign locations to * tasks; these tasks don't have any anchor shards and instead operate on (hash * repartitioned) merged tables. */ static List * AssignDualHashTaskList(List *taskList) { List *assignedTaskList = NIL; ListCell *taskCell = NULL; Task *firstTask = (Task *) linitial(taskList); uint64 jobId = firstTask->jobId; uint32 assignedTaskIndex = 0; /* * We start assigning tasks at an index determined by the jobId. This way, * if subsequent jobs have a small number of tasks, we won't allocate the * tasks to the same worker repeatedly. */ List *workerNodeList = ActiveReadableNodeList(); uint32 workerNodeCount = (uint32) list_length(workerNodeList); uint32 beginningNodeIndex = jobId % workerNodeCount; /* sort worker node list and task list for deterministic results */ workerNodeList = SortList(workerNodeList, CompareWorkerNodes); taskList = SortList(taskList, CompareTasksByTaskId); foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); List *taskPlacementList = NIL; ShardPlacement *primaryPlacement = NULL; uint32 replicaIndex = 0; for (replicaIndex = 0; replicaIndex < ShardReplicationFactor; replicaIndex++) { uint32 assignmentOffset = beginningNodeIndex + assignedTaskIndex + replicaIndex; uint32 assignmentIndex = assignmentOffset % workerNodeCount; WorkerNode *workerNode = list_nth(workerNodeList, assignmentIndex); ShardPlacement *taskPlacement = CitusMakeNode(ShardPlacement); taskPlacement->nodeName = pstrdup(workerNode->workerName); taskPlacement->nodePort = workerNode->workerPort; taskPlacementList = lappend(taskPlacementList, taskPlacement); } task->taskPlacementList = taskPlacementList; primaryPlacement = (ShardPlacement *) linitial(task->taskPlacementList); ereport(DEBUG3, (errmsg("assigned task %u to node %s:%u", task->taskId, primaryPlacement->nodeName, primaryPlacement->nodePort))); assignedTaskList = lappend(assignedTaskList, task); assignedTaskIndex++; } return assignedTaskList; } /* Helper function to compare two tasks by their taskId. */ static int CompareTasksByTaskId(const void *leftElement, const void *rightElement) { const Task *leftTask = *((const Task **) leftElement); const Task *rightTask = *((const Task **) rightElement); uint32 leftTaskId = leftTask->taskId; uint32 rightTaskId = rightTask->taskId; int taskIdDiff = leftTaskId - rightTaskId; return taskIdDiff; } /* * AssignDataFetchDependencies walks over tasks in the given sql or merge task * list. The function then propagates worker node assignments from each sql or * merge task to the task's data fetch dependencies. */ static void AssignDataFetchDependencies(List *taskList) { ListCell *taskCell = NULL; foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); List *dependedTaskList = task->dependedTaskList; ListCell *dependedTaskCell = NULL; Assert(task->taskPlacementList != NIL); Assert(task->taskType == SQL_TASK || task->taskType == MERGE_TASK); foreach(dependedTaskCell, dependedTaskList) { Task *dependedTask = (Task *) lfirst(dependedTaskCell); if (dependedTask->taskType == SHARD_FETCH_TASK || dependedTask->taskType == MAP_OUTPUT_FETCH_TASK) { dependedTask->taskPlacementList = task->taskPlacementList; } } } } /* * TaskListHighestTaskId walks over tasks in the given task list, finds the task * that has the largest taskId, and returns that taskId. * * Note: This function assumes that the depended taskId's are set before the * taskId's for the given task list. */ static uint32 TaskListHighestTaskId(List *taskList) { uint32 highestTaskId = 0; ListCell *taskCell = NULL; foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); if (task->taskId > highestTaskId) { highestTaskId = task->taskId; } } return highestTaskId; } /* * MergeTableQueryString builds a query string which creates a merge task table * within the job's schema, which should have already been created by the task * tracker protocol. */ static StringInfo MergeTableQueryString(uint32 taskIdIndex, List *targetEntryList) { StringInfo taskTableName = TaskTableName(taskIdIndex); StringInfo mergeTableQueryString = makeStringInfo(); StringInfo mergeTableName = makeStringInfo(); StringInfo columnsString = makeStringInfo(); ListCell *targetEntryCell = NULL; uint32 columnCount = 0; uint32 columnIndex = 0; appendStringInfo(mergeTableName, "%s%s", taskTableName->data, MERGE_TABLE_SUFFIX); columnCount = (uint32) list_length(targetEntryList); foreach(targetEntryCell, targetEntryList) { TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell); Node *columnExpression = (Node *) targetEntry->expr; Oid columnTypeId = exprType(columnExpression); int32 columnTypeMod = exprTypmod(columnExpression); char *columnName = NULL; char *columnType = NULL; StringInfo columnNameString = makeStringInfo(); appendStringInfo(columnNameString, MERGE_COLUMN_FORMAT, columnIndex); columnName = columnNameString->data; columnType = format_type_with_typemod(columnTypeId, columnTypeMod); appendStringInfo(columnsString, "%s %s", columnName, columnType); columnIndex++; if (columnIndex != columnCount) { appendStringInfo(columnsString, ", "); } } appendStringInfo(mergeTableQueryString, CREATE_TABLE_COMMAND, mergeTableName->data, columnsString->data); return mergeTableQueryString; } /* * IntermediateTableQueryString builds a query string which creates a task table * by running reduce query on already created merge table. */ static StringInfo IntermediateTableQueryString(uint64 jobId, uint32 taskIdIndex, Query *reduceQuery) { StringInfo taskTableName = TaskTableName(taskIdIndex); StringInfo intermediateTableQueryString = makeStringInfo(); StringInfo mergeTableName = makeStringInfo(); StringInfo columnsString = makeStringInfo(); StringInfo taskReduceQueryString = makeStringInfo(); Query *taskReduceQuery = copyObject(reduceQuery); RangeTblEntry *rangeTableEntry = NULL; Alias *referenceNames = NULL; List *columnNames = NIL; List *rangeTableList = NIL; ListCell *columnNameCell = NULL; uint32 columnCount = 0; uint32 columnIndex = 0; columnCount = FinalTargetEntryCount(reduceQuery->targetList); columnNames = DerivedColumnNameList(columnCount, jobId); foreach(columnNameCell, columnNames) { Value *columnNameValue = (Value *) lfirst(columnNameCell); char *columnName = strVal(columnNameValue); appendStringInfo(columnsString, "%s", columnName); columnIndex++; if (columnIndex != columnCount) { appendStringInfo(columnsString, ", "); } } appendStringInfo(mergeTableName, "%s%s", taskTableName->data, MERGE_TABLE_SUFFIX); rangeTableList = taskReduceQuery->rtable; rangeTableEntry = (RangeTblEntry *) linitial(rangeTableList); referenceNames = rangeTableEntry->eref; referenceNames->aliasname = mergeTableName->data; rangeTableEntry->alias = rangeTableEntry->eref; ModifyRangeTblExtraData(rangeTableEntry, GetRangeTblKind(rangeTableEntry), NULL, mergeTableName->data, NIL); pg_get_query_def(taskReduceQuery, taskReduceQueryString); appendStringInfo(intermediateTableQueryString, CREATE_TABLE_AS_COMMAND, taskTableName->data, columnsString->data, taskReduceQueryString->data); return intermediateTableQueryString; } /* * FinalTargetEntryCount returns count of target entries in the final target * entry list. */ static uint32 FinalTargetEntryCount(List *targetEntryList) { uint32 finalTargetEntryCount = 0; ListCell *targetEntryCell = NULL; foreach(targetEntryCell, targetEntryList) { TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell); if (!targetEntry->resjunk) { finalTargetEntryCount++; } } return finalTargetEntryCount; } citus-7.0.3/src/backend/distributed/planner/multi_planner.c000066400000000000000000000716601317107136600240430ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_planner.c * General Citus planner code. * * Copyright (c) 2012-2016, Citus Data, Inc. *------------------------------------------------------------------------- */ #include "postgres.h" #include #include #include "catalog/pg_type.h" #include "distributed/citus_nodefuncs.h" #include "distributed/citus_nodes.h" #include "distributed/insert_select_planner.h" #include "distributed/metadata_cache.h" #include "distributed/multi_executor.h" #include "distributed/multi_planner.h" #include "distributed/multi_logical_optimizer.h" #include "distributed/multi_logical_planner.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_master_planner.h" #include "distributed/multi_router_planner.h" #include "executor/executor.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "parser/parsetree.h" #include "optimizer/pathnode.h" #include "optimizer/planner.h" #include "utils/lsyscache.h" #include "utils/memutils.h" static List *plannerRestrictionContextList = NIL; /* create custom scan methods for separate executors */ static CustomScanMethods RealTimeCustomScanMethods = { "Citus Real-Time", RealTimeCreateScan }; static CustomScanMethods TaskTrackerCustomScanMethods = { "Citus Task-Tracker", TaskTrackerCreateScan }; static CustomScanMethods RouterCustomScanMethods = { "Citus Router", RouterCreateScan }; static CustomScanMethods CoordinatorInsertSelectCustomScanMethods = { "Citus INSERT ... SELECT via coordinator", CoordinatorInsertSelectCreateScan }; static CustomScanMethods DelayedErrorCustomScanMethods = { "Citus Delayed Error", DelayedErrorCreateScan }; /* local function forward declarations */ static PlannedStmt * CreateDistributedPlan(PlannedStmt *localPlan, Query *originalQuery, Query *query, ParamListInfo boundParams, PlannerRestrictionContext * plannerRestrictionContext); static void AdjustParseTree(Query *parse, bool assignRTEIdentities, bool setPartitionedTablesInherited); static void AssignRTEIdentity(RangeTblEntry *rangeTableEntry, int rteIdentifier); static PlannedStmt * FinalizePlan(PlannedStmt *localPlan, MultiPlan *multiPlan); static PlannedStmt * FinalizeNonRouterPlan(PlannedStmt *localPlan, MultiPlan *multiPlan, CustomScan *customScan); static PlannedStmt * FinalizeRouterPlan(PlannedStmt *localPlan, CustomScan *customScan); static void CheckNodeIsDumpable(Node *node); static Node * CheckNodeCopyAndSerialization(Node *node); static List * CopyPlanParamList(List *originalPlanParamList); static PlannerRestrictionContext * CreateAndPushPlannerRestrictionContext(void); static PlannerRestrictionContext * CurrentPlannerRestrictionContext(void); static void PopPlannerRestrictionContext(void); static bool HasUnresolvedExternParamsWalker(Node *expression, ParamListInfo boundParams); /* Distributed planner hook */ PlannedStmt * multi_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) { PlannedStmt *result = NULL; bool needsDistributedPlanning = NeedsDistributedPlanning(parse); Query *originalQuery = NULL; PlannerRestrictionContext *plannerRestrictionContext = NULL; bool assignRTEIdentities = false; bool setPartitionedTablesInherited = false; /* * standard_planner scribbles on it's input, but for deparsing we need the * unmodified form. So copy once we're sure it's a distributed query. */ if (needsDistributedPlanning) { originalQuery = copyObject(parse); assignRTEIdentities = true; setPartitionedTablesInherited = false; AdjustParseTree(parse, assignRTEIdentities, setPartitionedTablesInherited); } /* create a restriction context and put it at the end if context list */ plannerRestrictionContext = CreateAndPushPlannerRestrictionContext(); PG_TRY(); { /* * First call into standard planner. This is required because the Citus * planner relies on parse tree transformations made by postgres' planner. */ result = standard_planner(parse, cursorOptions, boundParams); if (needsDistributedPlanning) { result = CreateDistributedPlan(result, originalQuery, parse, boundParams, plannerRestrictionContext); } } PG_CATCH(); { PopPlannerRestrictionContext(); PG_RE_THROW(); } PG_END_TRY(); if (needsDistributedPlanning) { assignRTEIdentities = false; setPartitionedTablesInherited = true; AdjustParseTree(parse, assignRTEIdentities, setPartitionedTablesInherited); } /* remove the context from the context list */ PopPlannerRestrictionContext(); /* * In some cases, for example; parameterized SQL functions, we may miss that * there is a need for distributed planning. Such cases only become clear after * standart_planner performs some modifications on parse tree. In such cases * we will simply error out. */ if (!needsDistributedPlanning && NeedsDistributedPlanning(parse)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot perform distributed planning on this " "query because parameterized queries for SQL " "functions referencing distributed tables are " "not supported"), errhint("Consider using PL/pgSQL functions instead."))); } return result; } /* * AdjustParseTree function modifies query tree by adding RTE identities to the * RTE_RELATIONs and changing inh flag and relkind of partitioned tables. We * perform these operations to ensure PostgreSQL's standard planner behaves as * we need. * * Please note that, we want to avoid modifying query tree as much as possible * because if PostgreSQL changes the way it uses modified fields, that may break * our logic. */ static void AdjustParseTree(Query *queryTree, bool assignRTEIdentities, bool setPartitionedTablesInherited) { List *rangeTableList = NIL; ListCell *rangeTableCell = NULL; int rteIdentifier = 1; /* extract range table entries for simple relations only */ ExtractRangeTableEntryWalker((Node *) queryTree, &rangeTableList); foreach(rangeTableCell, rangeTableList) { RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell); /* * To be able to track individual RTEs through PostgreSQL's query * planning, we need to be able to figure out whether an RTE is * actually a copy of another, rather than a different one. We * simply number the RTEs starting from 1. * * Note that we're only interested in RTE_RELATIONs and thus assigning * identifiers to those RTEs only. */ if (assignRTEIdentities && rangeTableEntry->rtekind == RTE_RELATION) { AssignRTEIdentity(rangeTableEntry, rteIdentifier++); } /* * We want Postgres to behave partitioned tables as regular relations * (i.e. we do not want to expand them to their partitions). To do this * we set each distributed partitioned table's inh flag to appropriate * value before and after dropping to the standart_planner. */ if (IsDistributedTable(rangeTableEntry->relid) && PartitionedTable(rangeTableEntry->relid)) { rangeTableEntry->inh = setPartitionedTablesInherited; #if (PG_VERSION_NUM >= 100000) if (setPartitionedTablesInherited) { rangeTableEntry->relkind = RELKIND_PARTITIONED_TABLE; } else { rangeTableEntry->relkind = RELKIND_RELATION; } #endif } } } /* * AssignRTEIdentity assigns the given rteIdentifier to the given range table * entry. * * To be able to track RTEs through postgres' query planning, which copies and * duplicate, and modifies them, we sometimes need to figure out whether two * RTEs are copies of the same original RTE. For that we, hackishly, use a * field normally unused in RTE_RELATION RTEs. * * The assigned identifier better be unique within a plantree. */ static void AssignRTEIdentity(RangeTblEntry *rangeTableEntry, int rteIdentifier) { Assert(rangeTableEntry->rtekind == RTE_RELATION); Assert(rangeTableEntry->values_lists == NIL); rangeTableEntry->values_lists = list_make1_int(rteIdentifier); } /* GetRTEIdentity returns the identity assigned with AssignRTEIdentity. */ int GetRTEIdentity(RangeTblEntry *rte) { Assert(rte->rtekind == RTE_RELATION); Assert(IsA(rte->values_lists, IntList)); Assert(list_length(rte->values_lists) == 1); return linitial_int(rte->values_lists); } /* * IsModifyCommand returns true if the query performs modifications, false * otherwise. */ bool IsModifyCommand(Query *query) { CmdType commandType = query->commandType; if (commandType == CMD_INSERT || commandType == CMD_UPDATE || commandType == CMD_DELETE) { return true; } return false; } /* * IsModifyMultiPlan returns true if the multi plan performs modifications, * false otherwise. */ bool IsModifyMultiPlan(MultiPlan *multiPlan) { bool isModifyMultiPlan = false; CmdType operation = multiPlan->operation; if (operation == CMD_INSERT || operation == CMD_UPDATE || operation == CMD_DELETE) { isModifyMultiPlan = true; } return isModifyMultiPlan; } /* * CreateDistributedPlan encapsulates the logic needed to transform a particular * query into a distributed plan. */ static PlannedStmt * CreateDistributedPlan(PlannedStmt *localPlan, Query *originalQuery, Query *query, ParamListInfo boundParams, PlannerRestrictionContext *plannerRestrictionContext) { MultiPlan *distributedPlan = NULL; PlannedStmt *resultPlan = NULL; bool hasUnresolvedParams = false; if (HasUnresolvedExternParamsWalker((Node *) originalQuery, boundParams)) { hasUnresolvedParams = true; } if (IsModifyCommand(query)) { EnsureModificationsCanRun(); if (InsertSelectIntoDistributedTable(originalQuery)) { distributedPlan = CreateInsertSelectPlan(originalQuery, plannerRestrictionContext); } else { /* modifications are always routed through the same planner/executor */ distributedPlan = CreateModifyPlan(originalQuery, query, plannerRestrictionContext); } Assert(distributedPlan); } else { /* * For select queries we, if router executor is enabled, first try to * plan the query as a router query. If not supported, otherwise try * the full blown plan/optimize/physical planing process needed to * produce distributed query plans. */ if (EnableRouterExecution) { RelationRestrictionContext *relationRestrictionContext = plannerRestrictionContext->relationRestrictionContext; distributedPlan = CreateRouterPlan(originalQuery, query, relationRestrictionContext); /* for debugging it's useful to display why query was not router plannable */ if (distributedPlan && distributedPlan->planningError) { RaiseDeferredError(distributedPlan->planningError, DEBUG1); } } /* * Router didn't yield a plan, try the full distributed planner. As * real-time/task-tracker don't support prepared statement parameters, * skip planning in that case (we'll later trigger an error in that * case if necessary). */ if ((!distributedPlan || distributedPlan->planningError) && !hasUnresolvedParams) { MultiTreeRoot *logicalPlan = MultiLogicalPlanCreate(originalQuery, query, plannerRestrictionContext, boundParams); MultiLogicalPlanOptimize(logicalPlan); /* * This check is here to make it likely that all node types used in * Citus are dumpable. Explain can dump logical and physical plans * using the extended outfuncs infrastructure, but it's infeasible to * test most plans. MultiQueryContainerNode always serializes the * physical plan, so there's no need to check that separately. */ CheckNodeIsDumpable((Node *) logicalPlan); /* Create the physical plan */ distributedPlan = MultiPhysicalPlanCreate(logicalPlan, plannerRestrictionContext); /* distributed plan currently should always succeed or error out */ Assert(distributedPlan && distributedPlan->planningError == NULL); } } /* * If no plan was generated, prepare a generic error to be emitted. * Normally this error message will never returned to the user, as it's * usually due to unresolved prepared statement parameters - in that case * the logic below will force a custom plan (i.e. with parameters bound to * specific values) to be generated. But sql (not plpgsql) functions * unfortunately don't go through a codepath supporting custom plans - so * we still need to have an error prepared. */ if (!distributedPlan) { /* currently always should have a more specific error otherwise */ Assert(hasUnresolvedParams); distributedPlan = CitusMakeNode(MultiPlan); distributedPlan->planningError = DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "could not create distributed plan", "Possibly this is caused by the use of parameters in SQL " "functions, which is not supported in Citus.", "Consider using PL/pgSQL functions instead."); } /* * Error out if none of the planners resulted in a usable plan, unless the * error was possibly triggered by missing parameters. In that case we'll * not error out here, but instead rely on postgres' custom plan logic. * Postgres re-plans prepared statements the first five executions * (i.e. it produces custom plans), after that the cost of a generic plan * is compared with the average custom plan cost. We support otherwise * unsupported prepared statement parameters by assigning an exorbitant * cost to the unsupported query. That'll lead to the custom plan being * chosen. But for that to be possible we can't error out here, as * otherwise that logic is never reached. */ if (distributedPlan->planningError && !hasUnresolvedParams) { RaiseDeferredError(distributedPlan->planningError, ERROR); } /* create final plan by combining local plan with distributed plan */ resultPlan = FinalizePlan(localPlan, distributedPlan); /* * As explained above, force planning costs to be unrealistically high if * query planning failed (possibly) due to prepared statement parameters. */ if (distributedPlan->planningError && hasUnresolvedParams) { /* * Arbitraryly high cost, but low enough that it can be added up * without overflowing by choose_custom_plan(). */ resultPlan->planTree->total_cost = FLT_MAX / 100000000; } return resultPlan; } /* * GetMultiPlan returns the associated MultiPlan for a CustomScan. */ MultiPlan * GetMultiPlan(CustomScan *customScan) { Node *node = NULL; MultiPlan *multiPlan = NULL; Assert(list_length(customScan->custom_private) == 1); node = (Node *) linitial(customScan->custom_private); Assert(CitusIsA(node, MultiPlan)); node = CheckNodeCopyAndSerialization(node); /* * When using prepared statements the same plan gets reused across * multiple statements and transactions. We make several modifications * to the MultiPlan during execution such as assigning task placements * and evaluating functions and parameters. These changes should not * persist, so we always work on a copy. */ multiPlan = (MultiPlan *) copyObject(node); return multiPlan; } /* * FinalizePlan combines local plan with distributed plan and creates a plan * which can be run by the PostgreSQL executor. */ static PlannedStmt * FinalizePlan(PlannedStmt *localPlan, MultiPlan *multiPlan) { PlannedStmt *finalPlan = NULL; CustomScan *customScan = makeNode(CustomScan); Node *multiPlanData = NULL; MultiExecutorType executorType = MULTI_EXECUTOR_INVALID_FIRST; if (!multiPlan->planningError) { executorType = JobExecutorType(multiPlan); } switch (executorType) { case MULTI_EXECUTOR_REAL_TIME: { customScan->methods = &RealTimeCustomScanMethods; break; } case MULTI_EXECUTOR_TASK_TRACKER: { customScan->methods = &TaskTrackerCustomScanMethods; break; } case MULTI_EXECUTOR_ROUTER: { customScan->methods = &RouterCustomScanMethods; break; } case MULTI_EXECUTOR_COORDINATOR_INSERT_SELECT: { customScan->methods = &CoordinatorInsertSelectCustomScanMethods; break; } default: { customScan->methods = &DelayedErrorCustomScanMethods; break; } } multiPlan->relationIdList = localPlan->relationOids; multiPlanData = (Node *) multiPlan; customScan->custom_private = list_make1(multiPlanData); customScan->flags = CUSTOMPATH_SUPPORT_BACKWARD_SCAN; if (multiPlan->masterQuery) { finalPlan = FinalizeNonRouterPlan(localPlan, multiPlan, customScan); } else { finalPlan = FinalizeRouterPlan(localPlan, customScan); } return finalPlan; } /* * FinalizeNonRouterPlan gets the distributed custom scan plan, and creates the * final master select plan on the top of this distributed plan for real-time * and task-tracker executors. */ static PlannedStmt * FinalizeNonRouterPlan(PlannedStmt *localPlan, MultiPlan *multiPlan, CustomScan *customScan) { PlannedStmt *finalPlan = NULL; finalPlan = MasterNodeSelectPlan(multiPlan, customScan); finalPlan->queryId = localPlan->queryId; finalPlan->utilityStmt = localPlan->utilityStmt; /* add original range table list for access permission checks */ finalPlan->rtable = list_concat(finalPlan->rtable, localPlan->rtable); return finalPlan; } /* * FinalizeRouterPlan gets a CustomScan node which already wrapped distributed * part of a router plan and sets it as the direct child of the router plan * because we don't run any query on master node for router executable queries. * Here, we also rebuild the column list to read from the remote scan. */ static PlannedStmt * FinalizeRouterPlan(PlannedStmt *localPlan, CustomScan *customScan) { PlannedStmt *routerPlan = NULL; RangeTblEntry *remoteScanRangeTableEntry = NULL; ListCell *targetEntryCell = NULL; List *targetList = NIL; List *columnNameList = NIL; /* we will have custom scan range table entry as the first one in the list */ int customScanRangeTableIndex = 1; /* build a targetlist to read from the custom scan output */ foreach(targetEntryCell, localPlan->planTree->targetlist) { TargetEntry *targetEntry = lfirst(targetEntryCell); TargetEntry *newTargetEntry = NULL; Var *newVar = NULL; Value *columnName = NULL; Assert(IsA(targetEntry, TargetEntry)); /* * This is unlikely to be hit because we would not need resjunk stuff * at the toplevel of a router query - all things needing it have been * pushed down. */ if (targetEntry->resjunk) { continue; } /* build target entry pointing to remote scan range table entry */ newVar = makeVarFromTargetEntry(customScanRangeTableIndex, targetEntry); newTargetEntry = flatCopyTargetEntry(targetEntry); newTargetEntry->expr = (Expr *) newVar; targetList = lappend(targetList, newTargetEntry); columnName = makeString(targetEntry->resname); columnNameList = lappend(columnNameList, columnName); } customScan->scan.plan.targetlist = targetList; routerPlan = makeNode(PlannedStmt); routerPlan->planTree = (Plan *) customScan; remoteScanRangeTableEntry = RemoteScanRangeTableEntry(columnNameList); routerPlan->rtable = list_make1(remoteScanRangeTableEntry); /* add original range table list for access permission checks */ routerPlan->rtable = list_concat(routerPlan->rtable, localPlan->rtable); routerPlan->canSetTag = true; routerPlan->relationOids = NIL; routerPlan->queryId = localPlan->queryId; routerPlan->utilityStmt = localPlan->utilityStmt; routerPlan->commandType = localPlan->commandType; routerPlan->hasReturning = localPlan->hasReturning; return routerPlan; } /* * RemoteScanRangeTableEntry creates a range table entry from given column name * list to represent a remote scan. */ RangeTblEntry * RemoteScanRangeTableEntry(List *columnNameList) { RangeTblEntry *remoteScanRangeTableEntry = makeNode(RangeTblEntry); /* we use RTE_VALUES for custom scan because we can't look up relation */ remoteScanRangeTableEntry->rtekind = RTE_VALUES; remoteScanRangeTableEntry->eref = makeAlias("remote_scan", columnNameList); remoteScanRangeTableEntry->inh = false; remoteScanRangeTableEntry->inFromCl = true; return remoteScanRangeTableEntry; } /* * CheckNodeIsDumpable checks that the passed node can be dumped using * nodeToString(). As this checks is expensive, it's only active when * assertions are enabled. */ static void CheckNodeIsDumpable(Node *node) { #ifdef USE_ASSERT_CHECKING char *out = nodeToString(node); pfree(out); #endif } /* * CheckNodeCopyAndSerialization checks copy/dump/read functions * for nodes and returns copy of the input. * * It is only active when assertions are enabled, otherwise it returns * the input directly. We use this to confirm that our serialization * and copy logic produces the correct plan during regression tests. * * It does not check string equality on node dumps due to differences * in some Postgres types. */ static Node * CheckNodeCopyAndSerialization(Node *node) { #ifdef USE_ASSERT_CHECKING char *out = nodeToString(node); Node *deserializedNode = (Node *) stringToNode(out); Node *nodeCopy = copyObject(deserializedNode); char *outCopy = nodeToString(nodeCopy); pfree(out); pfree(outCopy); return nodeCopy; #else return node; #endif } /* * multi_join_restriction_hook is a hook called by postgresql standard planner * to notify us about various planning information regarding joins. We use * it to learn about the joining column. */ void multi_join_restriction_hook(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outerrel, RelOptInfo *innerrel, JoinType jointype, JoinPathExtraData *extra) { PlannerRestrictionContext *plannerRestrictionContext = NULL; JoinRestrictionContext *joinRestrictionContext = NULL; JoinRestriction *joinRestriction = NULL; MemoryContext restrictionsMemoryContext = NULL; MemoryContext oldMemoryContext = NULL; List *restrictInfoList = NIL; /* * Use a memory context that's guaranteed to live long enough, could be * called in a more shorted lived one (e.g. with GEQO). */ plannerRestrictionContext = CurrentPlannerRestrictionContext(); restrictionsMemoryContext = plannerRestrictionContext->memoryContext; oldMemoryContext = MemoryContextSwitchTo(restrictionsMemoryContext); /* * We create a copy of restrictInfoList because it may be created in a memory * context which will be deleted when we still need it, thus we create a copy * of it in our memory context. */ restrictInfoList = copyObject(extra->restrictlist); joinRestrictionContext = plannerRestrictionContext->joinRestrictionContext; Assert(joinRestrictionContext != NULL); joinRestriction = palloc0(sizeof(JoinRestriction)); joinRestriction->joinType = jointype; joinRestriction->joinRestrictInfoList = restrictInfoList; joinRestriction->plannerInfo = root; joinRestriction->innerrel = innerrel; joinRestriction->outerrel = outerrel; joinRestrictionContext->joinRestrictionList = lappend(joinRestrictionContext->joinRestrictionList, joinRestriction); MemoryContextSwitchTo(oldMemoryContext); } /* * multi_relation_restriction_hook is a hook called by postgresql standard planner * to notify us about various planning information regarding a relation. We use * it to retrieve restrictions on relations. */ void multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo, Index index, RangeTblEntry *rte) { PlannerRestrictionContext *plannerRestrictionContext = NULL; RelationRestrictionContext *relationRestrictionContext = NULL; MemoryContext restrictionsMemoryContext = NULL; MemoryContext oldMemoryContext = NULL; RelationRestriction *relationRestriction = NULL; DistTableCacheEntry *cacheEntry = NULL; bool distributedTable = false; bool localTable = false; if (rte->rtekind != RTE_RELATION) { return; } /* * Use a memory context that's guaranteed to live long enough, could be * called in a more shorted lived one (e.g. with GEQO). */ plannerRestrictionContext = CurrentPlannerRestrictionContext(); restrictionsMemoryContext = plannerRestrictionContext->memoryContext; oldMemoryContext = MemoryContextSwitchTo(restrictionsMemoryContext); distributedTable = IsDistributedTable(rte->relid); localTable = !distributedTable; relationRestriction = palloc0(sizeof(RelationRestriction)); relationRestriction->index = index; relationRestriction->relationId = rte->relid; relationRestriction->rte = rte; relationRestriction->relOptInfo = relOptInfo; relationRestriction->distributedRelation = distributedTable; relationRestriction->plannerInfo = root; relationRestriction->parentPlannerInfo = root->parent_root; relationRestriction->prunedShardIntervalList = NIL; /* see comments on GetVarFromAssignedParam() */ if (relationRestriction->parentPlannerInfo) { relationRestriction->parentPlannerParamList = CopyPlanParamList(root->parent_root->plan_params); } relationRestrictionContext = plannerRestrictionContext->relationRestrictionContext; relationRestrictionContext->hasDistributedRelation |= distributedTable; relationRestrictionContext->hasLocalRelation |= localTable; /* * We're also keeping track of whether all participant * tables are reference tables. */ if (distributedTable) { cacheEntry = DistributedTableCacheEntry(rte->relid); relationRestrictionContext->allReferenceTables &= (cacheEntry->partitionMethod == DISTRIBUTE_BY_NONE); } relationRestrictionContext->relationRestrictionList = lappend(relationRestrictionContext->relationRestrictionList, relationRestriction); MemoryContextSwitchTo(oldMemoryContext); } /* * CopyPlanParamList deep copies the input PlannerParamItem list and returns the newly * allocated list. * Note that we cannot use copyObject() function directly since there is no support for * copying PlannerParamItem structs. */ static List * CopyPlanParamList(List *originalPlanParamList) { ListCell *planParamCell = NULL; List *copiedPlanParamList = NIL; foreach(planParamCell, originalPlanParamList) { PlannerParamItem *originalParamItem = lfirst(planParamCell); PlannerParamItem *copiedParamItem = makeNode(PlannerParamItem); copiedParamItem->paramId = originalParamItem->paramId; copiedParamItem->item = copyObject(originalParamItem->item); copiedPlanParamList = lappend(copiedPlanParamList, copiedParamItem); } return copiedPlanParamList; } /* * CreateAndPushPlannerRestrictionContext creates a new relation restriction context * and a new join context, inserts it to the beginning of the * plannerRestrictionContextList. Finally, the planner restriction context is * inserted to the beginning of the plannerRestrictionContextList and it is returned. */ static PlannerRestrictionContext * CreateAndPushPlannerRestrictionContext(void) { PlannerRestrictionContext *plannerRestrictionContext = palloc0(sizeof(PlannerRestrictionContext)); plannerRestrictionContext->relationRestrictionContext = palloc0(sizeof(RelationRestrictionContext)); plannerRestrictionContext->joinRestrictionContext = palloc0(sizeof(JoinRestrictionContext)); plannerRestrictionContext->memoryContext = CurrentMemoryContext; /* we'll apply logical AND as we add tables */ plannerRestrictionContext->relationRestrictionContext->allReferenceTables = true; plannerRestrictionContextList = lcons(plannerRestrictionContext, plannerRestrictionContextList); return plannerRestrictionContext; } /* * CurrentRestrictionContext returns the the most recently added * PlannerRestrictionContext from the plannerRestrictionContextList list. */ static PlannerRestrictionContext * CurrentPlannerRestrictionContext(void) { PlannerRestrictionContext *plannerRestrictionContext = NULL; Assert(plannerRestrictionContextList != NIL); plannerRestrictionContext = (PlannerRestrictionContext *) linitial(plannerRestrictionContextList); return plannerRestrictionContext; } /* * PopPlannerRestrictionContext removes the most recently added restriction contexts from * the planner restriction context list. The function assumes the list is not empty. */ static void PopPlannerRestrictionContext(void) { plannerRestrictionContextList = list_delete_first(plannerRestrictionContextList); } /* * HasUnresolvedExternParamsWalker returns true if the passed in expression * has external parameters that are not contained in boundParams, false * otherwise. */ static bool HasUnresolvedExternParamsWalker(Node *expression, ParamListInfo boundParams) { if (expression == NULL) { return false; } if (IsA(expression, Param)) { Param *param = (Param *) expression; int paramId = param->paramid; /* only care about user supplied parameters */ if (param->paramkind != PARAM_EXTERN) { return false; } /* check whether parameter is available (and valid) */ if (boundParams && paramId > 0 && paramId <= boundParams->numParams) { ParamExternData *externParam = &boundParams->params[paramId - 1]; /* give hook a chance in case parameter is dynamic */ if (!OidIsValid(externParam->ptype) && boundParams->paramFetch != NULL) { (*boundParams->paramFetch)(boundParams, paramId); } if (OidIsValid(externParam->ptype)) { return false; } } return true; } /* keep traversing */ if (IsA(expression, Query)) { return query_tree_walker((Query *) expression, HasUnresolvedExternParamsWalker, boundParams, 0); } else { return expression_tree_walker(expression, HasUnresolvedExternParamsWalker, boundParams); } } citus-7.0.3/src/backend/distributed/planner/multi_router_planner.c000066400000000000000000002245431317107136600254430ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_router_planner.c * * This file contains functions to plan single shard queries * including distributed table modifications. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "c.h" #include #include "access/stratnum.h" #include "access/xact.h" #include "catalog/pg_opfamily.h" #include "distributed/citus_clauses.h" #include "catalog/pg_type.h" #include "distributed/colocation_utils.h" #include "distributed/citus_nodes.h" #include "distributed/citus_nodefuncs.h" #include "distributed/deparse_shard_query.h" #include "distributed/distribution_column.h" #include "distributed/errormessage.h" #include "distributed/insert_select_planner.h" #include "distributed/master_metadata_utility.h" #include "distributed/master_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/multi_join_order.h" #include "distributed/multi_logical_planner.h" #include "distributed/multi_logical_optimizer.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_router_planner.h" #include "distributed/multi_server_executor.h" #include "distributed/listutils.h" #include "distributed/citus_ruleutils.h" #include "distributed/relation_restriction_equivalence.h" #include "distributed/relay_utility.h" #include "distributed/resource_lock.h" #include "distributed/shardinterval_utils.h" #include "distributed/shard_pruning.h" #include "executor/execdesc.h" #include "lib/stringinfo.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "nodes/nodes.h" #include "nodes/parsenodes.h" #include "nodes/pg_list.h" #include "nodes/primnodes.h" #include "optimizer/clauses.h" #include "optimizer/joininfo.h" #include "optimizer/pathnode.h" #include "optimizer/paths.h" #include "optimizer/predtest.h" #include "optimizer/restrictinfo.h" #include "optimizer/var.h" #include "parser/parsetree.h" #include "parser/parse_oper.h" #include "storage/lock.h" #include "utils/builtins.h" #include "utils/elog.h" #include "utils/errcodes.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/typcache.h" #include "catalog/pg_proc.h" #include "optimizer/planmain.h" /* intermediate value for INSERT processing */ typedef struct InsertValues { Expr *partitionValueExpr; /* partition value provided in INSERT row */ List *rowValues; /* full values list of INSERT row, possibly NIL */ int64 shardId; /* target shard for this row, possibly invalid */ Index listIndex; /* index to make our sorting stable */ } InsertValues; /* * A ModifyRoute encapsulates the the information needed to route modifications * to the appropriate shard. For a single-shard modification, only one route * is needed, but in the case of e.g. a multi-row INSERT, lists of these values * will help divide the rows by their destination shards, permitting later * shard-and-row-specific extension of the original SQL. */ typedef struct ModifyRoute { int64 shardId; /* identifier of target shard */ List *rowValuesLists; /* for multi-row INSERTs, list of rows to be inserted */ } ModifyRoute; typedef struct WalkerState { bool containsVar; bool varArgument; bool badCoalesce; } WalkerState; bool EnableRouterExecution = true; /* planner functions forward declarations */ static MultiPlan * CreateSingleTaskRouterPlan(Query *originalQuery, Query *query, RelationRestrictionContext * restrictionContext); static bool MasterIrreducibleExpression(Node *expression, bool *varArgument, bool *badCoalesce); static bool MasterIrreducibleExpressionWalker(Node *expression, WalkerState *state); static bool MasterIrreducibleExpressionFunctionChecker(Oid func_id, void *context); static bool TargetEntryChangesValue(TargetEntry *targetEntry, Var *column, FromExpr *joinTree); static Job * RouterInsertJob(Query *originalQuery, Query *query, DeferredErrorMessage **planningError); static void ErrorIfNoShardsExist(DistTableCacheEntry *cacheEntry); static bool CanShardPrune(Oid distributedTableId, Query *query); static Job * CreateJob(Query *query); static Task * CreateTask(TaskType taskType); static Job * RouterJob(Query *originalQuery, RelationRestrictionContext *restrictionContext, DeferredErrorMessage **planningError); static bool RelationPrunesToMultipleShards(List *relationShardList); static List * TargetShardIntervalsForRouter(Query *query, RelationRestrictionContext *restrictionContext, bool *multiShardQuery); static List * WorkersContainingAllShards(List *prunedShardIntervalsList); static void NormalizeMultiRowInsertTargetList(Query *query); static List * BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError); static List * GroupInsertValuesByShardId(List *insertValuesList); static List * ExtractInsertValuesList(Query *query, Var *partitionColumn); static bool MultiRouterPlannableQuery(Query *query, RelationRestrictionContext *restrictionContext); static DeferredErrorMessage * ErrorIfQueryHasModifyingCTE(Query *queryTree); static bool UpdateOrDeleteQuery(Query *query); static RangeTblEntry * GetUpdateOrDeleteRTE(List *rangeTableList); static bool UpdateOrDeleteRTE(RangeTblEntry *rangeTableEntry); static bool SelectsFromDistributedTable(List *rangeTableList); #if (PG_VERSION_NUM >= 100000) static List * get_all_actual_clauses(List *restrictinfo_list); #endif static int CompareInsertValuesByShardId(const void *leftElement, const void *rightElement); /* * CreateRouterPlan attempts to create a router executor plan for the given * SELECT statement. If planning fails either NULL is returned, or * ->planningError is set to a description of the failure. */ MultiPlan * CreateRouterPlan(Query *originalQuery, Query *query, RelationRestrictionContext *restrictionContext) { Assert(EnableRouterExecution); if (MultiRouterPlannableQuery(query, restrictionContext)) { return CreateSingleTaskRouterPlan(originalQuery, query, restrictionContext); } /* * TODO: Instead have MultiRouterPlannableQuery set an error describing * why router cannot support the query. */ return NULL; } /* * CreateModifyPlan attempts to create a plan the given modification * statement. If planning fails ->planningError is set to a description of * the failure. */ MultiPlan * CreateModifyPlan(Query *originalQuery, Query *query, PlannerRestrictionContext *plannerRestrictionContext) { Job *job = NULL; MultiPlan *multiPlan = CitusMakeNode(MultiPlan); bool multiShardQuery = false; multiPlan->operation = query->commandType; multiPlan->planningError = ModifyQuerySupported(query, multiShardQuery); if (multiPlan->planningError != NULL) { return multiPlan; } if (UpdateOrDeleteQuery(query)) { RelationRestrictionContext *restrictionContext = plannerRestrictionContext->relationRestrictionContext; job = RouterJob(originalQuery, restrictionContext, &multiPlan->planningError); } else { job = RouterInsertJob(originalQuery, query, &multiPlan->planningError); } if (multiPlan->planningError != NULL) { return multiPlan; } ereport(DEBUG2, (errmsg("Creating router plan"))); multiPlan->workerJob = job; multiPlan->masterQuery = NULL; multiPlan->routerExecutable = true; multiPlan->hasReturning = false; if (list_length(originalQuery->returningList) > 0) { multiPlan->hasReturning = true; } return multiPlan; } /* * CreateSingleTaskRouterPlan creates a physical plan for given query. The created plan is * either a modify task that changes a single shard, or a router task that returns * query results from a single worker. Supported modify queries (insert/update/delete) * are router plannable by default. If query is not router plannable then either NULL is * returned, or the returned plan has planningError set to a description of the problem. */ static MultiPlan * CreateSingleTaskRouterPlan(Query *originalQuery, Query *query, RelationRestrictionContext *restrictionContext) { Job *job = NULL; MultiPlan *multiPlan = CitusMakeNode(MultiPlan); multiPlan->operation = query->commandType; /* FIXME: this should probably rather be inlined into CreateRouterPlan */ multiPlan->planningError = ErrorIfQueryHasModifyingCTE(query); if (multiPlan->planningError) { return multiPlan; } job = RouterJob(originalQuery, restrictionContext, &multiPlan->planningError); if (multiPlan->planningError) { /* query cannot be handled by this planner */ return NULL; } ereport(DEBUG2, (errmsg("Creating router plan"))); multiPlan->workerJob = job; multiPlan->masterQuery = NULL; multiPlan->routerExecutable = true; multiPlan->hasReturning = false; return multiPlan; } /* * ShardIntervalOpExpressions returns a list of OpExprs with exactly two * items in it. The list consists of shard interval ranges with partition columns * such as (partitionColumn >= shardMinValue) and (partitionColumn <= shardMaxValue). * * The function returns hashed columns generated by MakeInt4Column() for the hash * partitioned tables in place of partition columns. * * The function returns NIL if shard interval does not belong to a hash, * range and append distributed tables. * * NB: If you update this, also look at PrunableExpressionsWalker(). */ List * ShardIntervalOpExpressions(ShardInterval *shardInterval, Index rteIndex) { Oid relationId = shardInterval->relationId; char partitionMethod = PartitionMethod(shardInterval->relationId); Var *partitionColumn = NULL; Node *baseConstraint = NULL; if (partitionMethod == DISTRIBUTE_BY_HASH) { partitionColumn = MakeInt4Column(); } else if (partitionMethod == DISTRIBUTE_BY_RANGE || partitionMethod == DISTRIBUTE_BY_APPEND) { Assert(rteIndex > 0); partitionColumn = PartitionColumn(relationId, rteIndex); } else { /* do not add any shard range interval for reference tables */ return NIL; } /* build the base expression for constraint */ baseConstraint = BuildBaseConstraint(partitionColumn); /* walk over shard list and check if shards can be pruned */ if (shardInterval->minValueExists && shardInterval->maxValueExists) { UpdateConstraint(baseConstraint, shardInterval); } return list_make1(baseConstraint); } /* * AddShardIntervalRestrictionToSelect adds the following range boundaries * with the given subquery and shardInterval: * * hashfunc(partitionColumn) >= $lower_bound AND * hashfunc(partitionColumn) <= $upper_bound * * The function expects and asserts that subquery's target list contains a partition * column value. Thus, this function should never be called with reference tables. */ void AddShardIntervalRestrictionToSelect(Query *subqery, ShardInterval *shardInterval) { List *targetList = subqery->targetList; ListCell *targetEntryCell = NULL; Var *targetPartitionColumnVar = NULL; Oid integer4GEoperatorId = InvalidOid; Oid integer4LEoperatorId = InvalidOid; TypeCacheEntry *typeEntry = NULL; FuncExpr *hashFunctionExpr = NULL; OpExpr *greaterThanAndEqualsBoundExpr = NULL; OpExpr *lessThanAndEqualsBoundExpr = NULL; List *boundExpressionList = NIL; Expr *andedBoundExpressions = NULL; /* iterate through the target entries */ foreach(targetEntryCell, targetList) { TargetEntry *targetEntry = lfirst(targetEntryCell); if (IsPartitionColumn(targetEntry->expr, subqery) && IsA(targetEntry->expr, Var)) { targetPartitionColumnVar = (Var *) targetEntry->expr; break; } } /* we should have found target partition column */ Assert(targetPartitionColumnVar != NULL); integer4GEoperatorId = get_opfamily_member(INTEGER_BTREE_FAM_OID, INT4OID, INT4OID, BTGreaterEqualStrategyNumber); integer4LEoperatorId = get_opfamily_member(INTEGER_BTREE_FAM_OID, INT4OID, INT4OID, BTLessEqualStrategyNumber); /* ensure that we find the correct operators */ Assert(integer4GEoperatorId != InvalidOid); Assert(integer4LEoperatorId != InvalidOid); /* look up the type cache */ typeEntry = lookup_type_cache(targetPartitionColumnVar->vartype, TYPECACHE_HASH_PROC_FINFO); /* probable never possible given that the tables are already hash partitioned */ if (!OidIsValid(typeEntry->hash_proc_finfo.fn_oid)) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), errmsg("could not identify a hash function for type %s", format_type_be(targetPartitionColumnVar->vartype)))); } /* generate hashfunc(partCol) expression */ hashFunctionExpr = makeNode(FuncExpr); hashFunctionExpr->funcid = CitusWorkerHashFunctionId(); hashFunctionExpr->args = list_make1(targetPartitionColumnVar); /* hash functions always return INT4 */ hashFunctionExpr->funcresulttype = INT4OID; /* generate hashfunc(partCol) >= shardMinValue OpExpr */ greaterThanAndEqualsBoundExpr = (OpExpr *) make_opclause(integer4GEoperatorId, InvalidOid, false, (Expr *) hashFunctionExpr, (Expr *) MakeInt4Constant(shardInterval->minValue), targetPartitionColumnVar->varcollid, targetPartitionColumnVar->varcollid); /* update the operators with correct operator numbers and function ids */ greaterThanAndEqualsBoundExpr->opfuncid = get_opcode(greaterThanAndEqualsBoundExpr->opno); greaterThanAndEqualsBoundExpr->opresulttype = get_func_rettype(greaterThanAndEqualsBoundExpr->opfuncid); /* generate hashfunc(partCol) <= shardMinValue OpExpr */ lessThanAndEqualsBoundExpr = (OpExpr *) make_opclause(integer4LEoperatorId, InvalidOid, false, (Expr *) hashFunctionExpr, (Expr *) MakeInt4Constant(shardInterval->maxValue), targetPartitionColumnVar->varcollid, targetPartitionColumnVar->varcollid); /* update the operators with correct operator numbers and function ids */ lessThanAndEqualsBoundExpr->opfuncid = get_opcode(lessThanAndEqualsBoundExpr->opno); lessThanAndEqualsBoundExpr->opresulttype = get_func_rettype(lessThanAndEqualsBoundExpr->opfuncid); /* finally add the operators to a list and make them explicitly anded */ boundExpressionList = lappend(boundExpressionList, greaterThanAndEqualsBoundExpr); boundExpressionList = lappend(boundExpressionList, lessThanAndEqualsBoundExpr); andedBoundExpressions = make_ands_explicit(boundExpressionList); /* finally add the quals */ if (subqery->jointree->quals == NULL) { subqery->jointree->quals = (Node *) andedBoundExpressions; } else { subqery->jointree->quals = make_and_qual(subqery->jointree->quals, (Node *) andedBoundExpressions); } } /* * ExtractSelectRangeTableEntry returns the range table entry of the subquery. * Note that the function expects and asserts that the input query be * an INSERT...SELECT query. */ RangeTblEntry * ExtractSelectRangeTableEntry(Query *query) { List *fromList = NULL; RangeTblRef *reference = NULL; RangeTblEntry *subqueryRte = NULL; Assert(InsertSelectIntoDistributedTable(query)); /* * Since we already asserted InsertSelectIntoDistributedTable() it is safe to access * both lists */ fromList = query->jointree->fromlist; reference = linitial(fromList); subqueryRte = rt_fetch(reference->rtindex, query->rtable); return subqueryRte; } /* * ExtractInsertRangeTableEntry returns the INSERT'ed table's range table entry. * Note that the function expects and asserts that the input query be * an INSERT...SELECT query. */ RangeTblEntry * ExtractInsertRangeTableEntry(Query *query) { int resultRelation = query->resultRelation; List *rangeTableList = query->rtable; RangeTblEntry *insertRTE = NULL; insertRTE = rt_fetch(resultRelation, rangeTableList); return insertRTE; } /* * ModifyQuerySupported returns NULL if the query only contains supported * features, otherwise it returns an error description. */ DeferredErrorMessage * ModifyQuerySupported(Query *queryTree, bool multiShardQuery) { Oid distributedTableId = ExtractFirstDistributedTableId(queryTree); uint32 rangeTableId = 1; Var *partitionColumn = PartitionColumn(distributedTableId, rangeTableId); bool isCoordinator = IsCoordinator(); List *rangeTableList = NIL; ListCell *rangeTableCell = NULL; uint32 queryTableCount = 0; bool specifiesPartitionValue = false; ListCell *setTargetCell = NULL; List *onConflictSet = NIL; Node *arbiterWhere = NULL; Node *onConflictWhere = NULL; CmdType commandType = queryTree->commandType; /* * Reject subqueries which are in SELECT or WHERE clause. * Queries which include subqueries in FROM clauses are rejected below. */ if (queryTree->hasSubLinks == true) { /* * We support UPDATE and DELETE with subqueries unless they are multi * shard queries. */ if (!UpdateOrDeleteQuery(queryTree) || multiShardQuery) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot perform distributed planning for the given " "modifications", "Subqueries are not supported in distributed " "modifications.", NULL); } } /* reject queries which include CommonTableExpr */ if (queryTree->cteList != NIL) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "common table expressions are not supported in distributed " "modifications", NULL, NULL); } /* extract range table entries */ ExtractRangeTableEntryWalker((Node *) queryTree, &rangeTableList); foreach(rangeTableCell, rangeTableList) { RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell); bool referenceTable = false; if (rangeTableEntry->rtekind == RTE_RELATION) { /* * We are sure that the table should be distributed, therefore no need to * call IsDistributedTable() here and DistributedTableCacheEntry will * error out if the table is not distributed */ DistTableCacheEntry *distTableEntry = DistributedTableCacheEntry(rangeTableEntry->relid); if (distTableEntry->partitionMethod == DISTRIBUTE_BY_NONE) { referenceTable = true; } if (referenceTable && !isCoordinator) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot perform distributed planning for the given" " modification", "Modifications to reference tables are " "supported only from the coordinator.", NULL); } queryTableCount++; /* we do not expect to see a view in modify query */ if (rangeTableEntry->relkind == RELKIND_VIEW) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot modify views over distributed tables", NULL, NULL); } } else if (rangeTableEntry->rtekind == RTE_VALUES) { /* do nothing, this type is supported */ } else { char *rangeTableEntryErrorDetail = NULL; /* * We support UPDATE and DELETE with subqueries and joins unless * they are multi shard queries. */ if (UpdateOrDeleteQuery(queryTree) && !multiShardQuery) { continue; } /* * Error out for rangeTableEntries that we do not support. * We do not explicitly specify "in FROM clause" in the error detail * for the features that we do not support at all (SUBQUERY, JOIN). * We do not need to check for RTE_CTE because all common table expressions * are rejected above with queryTree->cteList check. */ if (rangeTableEntry->rtekind == RTE_SUBQUERY) { rangeTableEntryErrorDetail = "Subqueries are not supported in" " distributed modifications."; } else if (rangeTableEntry->rtekind == RTE_JOIN) { rangeTableEntryErrorDetail = "Joins are not supported in distributed" " modifications."; } else if (rangeTableEntry->rtekind == RTE_FUNCTION) { rangeTableEntryErrorDetail = "Functions must not appear in the FROM" " clause of a distributed modifications."; } else { rangeTableEntryErrorDetail = "Unrecognized range table entry."; } return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot perform distributed planning for the given " "modifications", rangeTableEntryErrorDetail, NULL); } } /* * Reject queries which involve joins. Note that UPSERTs are exceptional for this case. * Queries like "INSERT INTO table_name ON CONFLICT DO UPDATE (col) SET other_col = ''" * contains two range table entries, and we have to allow them. */ if (commandType != CMD_INSERT && queryTableCount != 1) { /* * We support UPDATE and DELETE with joins unless they are multi shard * queries. */ if (!UpdateOrDeleteQuery(queryTree) || multiShardQuery) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot perform distributed planning for the given " "modification", "Joins are not supported in distributed " "modifications.", NULL); } } if (commandType == CMD_INSERT || commandType == CMD_UPDATE || commandType == CMD_DELETE) { bool hasVarArgument = false; /* A STABLE function is passed a Var argument */ bool hasBadCoalesce = false; /* CASE/COALESCE passed a mutable function */ FromExpr *joinTree = queryTree->jointree; ListCell *targetEntryCell = NULL; foreach(targetEntryCell, queryTree->targetList) { TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell); bool targetEntryPartitionColumn = false; /* reference tables do not have partition column */ if (partitionColumn == NULL) { targetEntryPartitionColumn = false; } else if (targetEntry->resno == partitionColumn->varattno) { targetEntryPartitionColumn = true; } /* skip resjunk entries: UPDATE adds some for ctid, etc. */ if (targetEntry->resjunk) { continue; } if (commandType == CMD_UPDATE && contain_volatile_functions((Node *) targetEntry->expr)) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "functions used in UPDATE queries on distributed " "tables must not be VOLATILE", NULL, NULL); } if (commandType == CMD_UPDATE && targetEntryPartitionColumn && TargetEntryChangesValue(targetEntry, partitionColumn, queryTree->jointree)) { specifiesPartitionValue = true; } if (commandType == CMD_UPDATE && MasterIrreducibleExpression((Node *) targetEntry->expr, &hasVarArgument, &hasBadCoalesce)) { Assert(hasVarArgument || hasBadCoalesce); } } if (joinTree != NULL) { if (contain_volatile_functions(joinTree->quals)) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "functions used in the WHERE clause of modification " "queries on distributed tables must not be VOLATILE", NULL, NULL); } else if (MasterIrreducibleExpression(joinTree->quals, &hasVarArgument, &hasBadCoalesce)) { Assert(hasVarArgument || hasBadCoalesce); } } if (hasVarArgument) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "STABLE functions used in UPDATE queries " "cannot be called with column references", NULL, NULL); } if (hasBadCoalesce) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "non-IMMUTABLE functions are not allowed in CASE or " "COALESCE statements", NULL, NULL); } if (contain_mutable_functions((Node *) queryTree->returningList)) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "non-IMMUTABLE functions are not allowed in the " "RETURNING clause", NULL, NULL); } } if (commandType == CMD_INSERT && queryTree->onConflict != NULL) { onConflictSet = queryTree->onConflict->onConflictSet; arbiterWhere = queryTree->onConflict->arbiterWhere; onConflictWhere = queryTree->onConflict->onConflictWhere; } /* * onConflictSet is expanded via expand_targetlist() on the standard planner. * This ends up adding all the columns to the onConflictSet even if the user * does not explicitly state the columns in the query. * * The following loop simply allows "DO UPDATE SET part_col = table.part_col" * types of elements in the target list, which are added by expand_targetlist(). * Any other attempt to update partition column value is forbidden. */ foreach(setTargetCell, onConflictSet) { TargetEntry *setTargetEntry = (TargetEntry *) lfirst(setTargetCell); bool setTargetEntryPartitionColumn = false; /* reference tables do not have partition column */ if (partitionColumn == NULL) { setTargetEntryPartitionColumn = false; } else if (setTargetEntry->resno == partitionColumn->varattno) { setTargetEntryPartitionColumn = true; } if (setTargetEntryPartitionColumn) { Expr *setExpr = setTargetEntry->expr; if (IsA(setExpr, Var) && ((Var *) setExpr)->varattno == partitionColumn->varattno) { specifiesPartitionValue = false; } else { specifiesPartitionValue = true; } } else { /* * Similarly, allow "DO UPDATE SET col_1 = table.col_1" types of * target list elements. Note that, the following check allows * "DO UPDATE SET col_1 = table.col_2", which is not harmful. */ if (IsA(setTargetEntry->expr, Var)) { continue; } else if (contain_mutable_functions((Node *) setTargetEntry->expr)) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "functions used in the DO UPDATE SET clause of " "INSERTs on distributed tables must be marked " "IMMUTABLE", NULL, NULL); } } } /* error if either arbiter or on conflict WHERE contains a mutable function */ if (contain_mutable_functions((Node *) arbiterWhere) || contain_mutable_functions((Node *) onConflictWhere)) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "functions used in the WHERE clause of the " "ON CONFLICT clause of INSERTs on distributed " "tables must be marked IMMUTABLE", NULL, NULL); } if (specifiesPartitionValue) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "modifying the partition value of rows is not " "allowed", NULL, NULL); } return NULL; } /* * UpdateOrDeleteQuery checks if the given query is an UPDATE or DELETE command. * If it is, it returns true otherwise it returns false. */ static bool UpdateOrDeleteQuery(Query *query) { CmdType commandType = query->commandType; if (commandType == CMD_UPDATE || commandType == CMD_DELETE) { return true; } return false; } /* * If the expression contains STABLE functions which accept any parameters derived from a * Var returns true and sets varArgument. * * If the expression contains a CASE or COALESCE which invoke non-IMMUTABLE functions * returns true and sets badCoalesce. * * Assumes the expression contains no VOLATILE functions. * * Var's are allowed, but only if they are passed solely to IMMUTABLE functions * * We special-case CASE/COALESCE because those are evaluated lazily. We could evaluate * CASE/COALESCE expressions which don't reference Vars, or partially evaluate some * which do, but for now we just error out. That makes both the code and user-education * easier. */ static bool MasterIrreducibleExpression(Node *expression, bool *varArgument, bool *badCoalesce) { bool result; WalkerState data; data.containsVar = data.varArgument = data.badCoalesce = false; result = MasterIrreducibleExpressionWalker(expression, &data); *varArgument |= data.varArgument; *badCoalesce |= data.badCoalesce; return result; } static bool MasterIrreducibleExpressionWalker(Node *expression, WalkerState *state) { char volatileFlag = 0; WalkerState childState = { false, false, false }; bool containsDisallowedFunction = false; bool hasVolatileFunction PG_USED_FOR_ASSERTS_ONLY = false; if (expression == NULL) { return false; } if (IsA(expression, CoalesceExpr)) { CoalesceExpr *expr = (CoalesceExpr *) expression; if (contain_mutable_functions((Node *) (expr->args))) { state->badCoalesce = true; return true; } else { /* * There's no need to recurse. Since there are no STABLE functions * varArgument will never be set. */ return false; } } if (IsA(expression, CaseExpr)) { if (contain_mutable_functions(expression)) { state->badCoalesce = true; return true; } return false; } if (IsA(expression, Var)) { state->containsVar = true; return false; } /* * In order for statement replication to give us consistent results it's important * that we either disallow or evaluate on the master anything which has a volatility * category above IMMUTABLE. Newer versions of postgres might add node types which * should be checked in this function. * * Look through contain_mutable_functions_walker or future PG's equivalent for new * node types before bumping this version number to fix compilation; e.g. for any * PostgreSQL after 9.5, see check_functions_in_node. Review * MasterIrreducibleExpressionFunctionChecker for any changes in volatility * permissibility ordering. * * Once you've added them to this check, make sure you also evaluate them in the * executor! */ /* subqueries aren't allowed and should fail before control reaches this point */ Assert(!IsA(expression, Query)); hasVolatileFunction = check_functions_in_node(expression, MasterIrreducibleExpressionFunctionChecker, &volatileFlag); /* the caller should have already checked for this */ Assert(!hasVolatileFunction); Assert(volatileFlag != PROVOLATILE_VOLATILE); if (volatileFlag == PROVOLATILE_STABLE) { containsDisallowedFunction = expression_tree_walker(expression, MasterIrreducibleExpressionWalker, &childState); if (childState.containsVar) { state->varArgument = true; } state->badCoalesce |= childState.badCoalesce; state->varArgument |= childState.varArgument; return (containsDisallowedFunction || childState.containsVar); } /* keep traversing */ return expression_tree_walker(expression, MasterIrreducibleExpressionWalker, state); } /* * MasterIrreducibleExpressionFunctionChecker returns true if a provided function * oid corresponds to a volatile function. It also updates provided context if * the current volatility flag is more permissive than the provided one. It is * only called from check_functions_in_node as checker function. */ static bool MasterIrreducibleExpressionFunctionChecker(Oid func_id, void *context) { char volatileFlag = func_volatile(func_id); char *volatileContext = (char *) context; if (volatileFlag == PROVOLATILE_VOLATILE || *volatileContext == PROVOLATILE_VOLATILE) { *volatileContext = PROVOLATILE_VOLATILE; } else if (volatileFlag == PROVOLATILE_STABLE || *volatileContext == PROVOLATILE_STABLE) { *volatileContext = PROVOLATILE_STABLE; } else { *volatileContext = PROVOLATILE_IMMUTABLE; } return (volatileFlag == PROVOLATILE_VOLATILE); } /* * TargetEntryChangesValue determines whether the given target entry may * change the value in a given column, given a join tree. The result is * true unless the expression refers directly to the column, or the * expression is a value that is implied by the qualifiers of the join * tree, or the target entry sets a different column. */ static bool TargetEntryChangesValue(TargetEntry *targetEntry, Var *column, FromExpr *joinTree) { bool isColumnValueChanged = true; Expr *setExpr = targetEntry->expr; if (targetEntry->resno != column->varattno) { /* target entry of the form SET some_other_col = */ isColumnValueChanged = false; } else if (IsA(setExpr, Var)) { Var *newValue = (Var *) setExpr; if (newValue->varattno == column->varattno) { /* target entry of the form SET col = table.col */ isColumnValueChanged = false; } } else if (IsA(setExpr, Const)) { Const *newValue = (Const *) setExpr; List *restrictClauseList = WhereClauseList(joinTree); OpExpr *equalityExpr = MakeOpExpression(column, BTEqualStrategyNumber); Const *rightConst = (Const *) get_rightop((Expr *) equalityExpr); bool predicateIsImplied = false; rightConst->constvalue = newValue->constvalue; rightConst->constisnull = newValue->constisnull; rightConst->constbyval = newValue->constbyval; #if (PG_VERSION_NUM >= 100000) predicateIsImplied = predicate_implied_by(list_make1(equalityExpr), restrictClauseList, false); #else predicateIsImplied = predicate_implied_by(list_make1(equalityExpr), restrictClauseList); #endif if (predicateIsImplied) { /* target entry of the form SET col = WHERE col = AND ... */ isColumnValueChanged = false; } } return isColumnValueChanged; } /* * RouterInsertJob builds a Job to represent an insertion performed by * the provided query against the provided shard interval. This task contains * shard-extended deparsed SQL to be run during execution. */ static Job * RouterInsertJob(Query *originalQuery, Query *query, DeferredErrorMessage **planningError) { Oid distributedTableId = ExtractFirstDistributedTableId(query); List *taskList = NIL; Job *job = NULL; bool requiresMasterEvaluation = false; bool deferredPruning = false; bool isMultiRowInsert = IsMultiRowInsert(query); if (isMultiRowInsert) { /* add default expressions to RTE_VALUES in multi-row INSERTs */ NormalizeMultiRowInsertTargetList(originalQuery); } if (isMultiRowInsert || !CanShardPrune(distributedTableId, query)) { /* * If there is a non-constant (e.g. parameter, function call) in the partition * column of the INSERT then we defer shard pruning until the executor where * these values are known. * * XXX: We also defer pruning for multi-row INSERTs because of some current * limitations with the way multi-row INSERTs are handled. Most notably, we * don't evaluate functions in task->rowValuesList. Therefore we need to * perform function evaluation before we can run RouterInsertTaskList. */ taskList = NIL; deferredPruning = true; /* must evaluate the non-constant in the partition column */ requiresMasterEvaluation = true; } else { taskList = RouterInsertTaskList(query, planningError); if (*planningError) { return NULL; } /* determine whether there are function calls to evaluate */ requiresMasterEvaluation = RequiresMasterEvaluation(originalQuery); } if (!requiresMasterEvaluation) { /* no functions or parameters, build the query strings upfront */ RebuildQueryStrings(originalQuery, taskList); } job = CreateJob(originalQuery); job->taskList = taskList; job->requiresMasterEvaluation = requiresMasterEvaluation; job->deferredPruning = deferredPruning; return job; } /* * CreateJob returns a new Job for the given query. */ static Job * CreateJob(Query *query) { Job *job = NULL; job = CitusMakeNode(Job); job->jobId = INVALID_JOB_ID; job->jobQuery = query; job->taskList = NIL; job->dependedJobList = NIL; job->subqueryPushdown = false; job->requiresMasterEvaluation = false; job->deferredPruning = false; return job; } /* * CanShardPrune determines whether a query is ready for shard pruning * by checking whether there is a constant value in the partition column. */ static bool CanShardPrune(Oid distributedTableId, Query *query) { uint32 rangeTableId = 1; Var *partitionColumn = NULL; List *insertValuesList = NIL; ListCell *insertValuesCell = NULL; if (query->commandType != CMD_INSERT) { /* we assume UPDATE/DELETE is always prunable */ return true; } partitionColumn = PartitionColumn(distributedTableId, rangeTableId); if (partitionColumn == NULL) { /* can always do shard pruning for reference tables */ return true; } /* get full list of partition values and ensure they are all Consts */ insertValuesList = ExtractInsertValuesList(query, partitionColumn); foreach(insertValuesCell, insertValuesList) { InsertValues *insertValues = (InsertValues *) lfirst(insertValuesCell); if (!IsA(insertValues->partitionValueExpr, Const)) { /* can't do shard pruning if the partition column is not constant */ return false; } } return true; } /* * ErrorIfNoShardsExist throws an error if the given table has no shards. */ static void ErrorIfNoShardsExist(DistTableCacheEntry *cacheEntry) { int shardCount = cacheEntry->shardIntervalArrayLength; if (shardCount == 0) { Oid distributedTableId = cacheEntry->relationId; char *relationName = get_rel_name(distributedTableId); ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("could not find any shards"), errdetail("No shards exist for distributed table \"%s\".", relationName), errhint("Run master_create_worker_shards to create shards " "and try again."))); } } /* * RouterInsertTaskList generates a list of tasks for performing an INSERT on * a distributed table via the router executor. */ List * RouterInsertTaskList(Query *query, DeferredErrorMessage **planningError) { List *insertTaskList = NIL; List *modifyRouteList = NIL; ListCell *modifyRouteCell = NULL; Oid distributedTableId = ExtractFirstDistributedTableId(query); DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(distributedTableId); ErrorIfNoShardsExist(cacheEntry); Assert(query->commandType == CMD_INSERT); modifyRouteList = BuildRoutesForInsert(query, planningError); if (*planningError != NULL) { return NIL; } foreach(modifyRouteCell, modifyRouteList) { ModifyRoute *modifyRoute = (ModifyRoute *) lfirst(modifyRouteCell); Task *modifyTask = CreateTask(MODIFY_TASK); modifyTask->anchorShardId = modifyRoute->shardId; modifyTask->replicationModel = cacheEntry->replicationModel; modifyTask->rowValuesLists = modifyRoute->rowValuesLists; if (query->onConflict != NULL) { modifyTask->upsertQuery = true; } insertTaskList = lappend(insertTaskList, modifyTask); } return insertTaskList; } /* * CreateTask returns a new Task with the given type. */ static Task * CreateTask(TaskType taskType) { Task *task = NULL; task = CitusMakeNode(Task); task->taskType = taskType; task->jobId = INVALID_JOB_ID; task->taskId = INVALID_TASK_ID; task->queryString = NULL; task->anchorShardId = INVALID_SHARD_ID; task->taskPlacementList = NIL; task->dependedTaskList = NIL; task->partitionId = 0; task->upstreamTaskId = INVALID_TASK_ID; task->shardInterval = NULL; task->assignmentConstrained = false; task->shardId = INVALID_SHARD_ID; task->taskExecution = NULL; task->upsertQuery = false; task->replicationModel = REPLICATION_MODEL_INVALID; task->insertSelectQuery = false; task->relationShardList = NIL; return task; } /* * ExtractFirstDistributedTableId takes a given query, and finds the relationId * for the first distributed table in that query. If the function cannot find a * distributed table, it returns InvalidOid. */ Oid ExtractFirstDistributedTableId(Query *query) { List *rangeTableList = NIL; ListCell *rangeTableCell = NULL; Oid distributedTableId = InvalidOid; /* extract range table entries */ ExtractRangeTableEntryWalker((Node *) query, &rangeTableList); foreach(rangeTableCell, rangeTableList) { RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell); if (IsDistributedTable(rangeTableEntry->relid)) { distributedTableId = rangeTableEntry->relid; break; } } return distributedTableId; } /* RouterJob builds a Job to represent a single shard select/update/delete query */ static Job * RouterJob(Query *originalQuery, RelationRestrictionContext *restrictionContext, DeferredErrorMessage **planningError) { Job *job = NULL; Task *task = NULL; StringInfo queryString = makeStringInfo(); uint64 shardId = INVALID_SHARD_ID; List *placementList = NIL; List *relationShardList = NIL; List *rangeTableList = NIL; bool replacePrunedQueryWithDummy = false; bool requiresMasterEvaluation = false; RangeTblEntry *updateOrDeleteRTE = NULL; /* router planner should create task even if it deosn't hit a shard at all */ replacePrunedQueryWithDummy = true; /* check if this query requires master evaluation */ requiresMasterEvaluation = RequiresMasterEvaluation(originalQuery); (*planningError) = PlanRouterQuery(originalQuery, restrictionContext, &placementList, &shardId, &relationShardList, replacePrunedQueryWithDummy); if (*planningError) { return NULL; } job = CreateJob(originalQuery); ExtractRangeTableEntryWalker((Node *) originalQuery, &rangeTableList); updateOrDeleteRTE = GetUpdateOrDeleteRTE(rangeTableList); /* * If all of the shards are pruned, we replace the relation RTE into * subquery RTE that returns no results. However, this is not useful * for UPDATE and DELETE queries. Therefore, if we detect a UPDATE or * DELETE RTE with subquery type, we just set task list to empty and return * the job. */ if (updateOrDeleteRTE != NULL && updateOrDeleteRTE->rtekind == RTE_SUBQUERY) { job->taskList = NIL; return job; } pg_get_query_def(originalQuery, queryString); if (originalQuery->commandType == CMD_SELECT) { task = CreateTask(ROUTER_TASK); } else { DistTableCacheEntry *modificationTableCacheEntry = NULL; char modificationPartitionMethod = 0; modificationTableCacheEntry = DistributedTableCacheEntry( updateOrDeleteRTE->relid); modificationPartitionMethod = modificationTableCacheEntry->partitionMethod; if (modificationPartitionMethod == DISTRIBUTE_BY_NONE && SelectsFromDistributedTable(rangeTableList)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot perform select on a distributed table " "and modify a reference table"))); } task = CreateTask(MODIFY_TASK); task->replicationModel = modificationTableCacheEntry->replicationModel; } task->queryString = queryString->data; task->anchorShardId = shardId; task->taskPlacementList = placementList; task->relationShardList = relationShardList; job->taskList = list_make1(task); job->requiresMasterEvaluation = requiresMasterEvaluation; return job; } /* * GetUpdateOrDeleteRTE walks over the given range table list, and checks if * it has an UPDATE or DELETE RTE. If it finds one, it return it immediately. */ static RangeTblEntry * GetUpdateOrDeleteRTE(List *rangeTableList) { ListCell *rangeTableCell = NULL; foreach(rangeTableCell, rangeTableList) { RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell); if (UpdateOrDeleteRTE(rangeTableEntry)) { return rangeTableEntry; } } return NULL; } /* * UpdateOrDeleteRTE checks if the given range table entry is an UPDATE or * DELETE RTE by checking required permissions on it. */ static bool UpdateOrDeleteRTE(RangeTblEntry *rangeTableEntry) { if ((ACL_UPDATE & rangeTableEntry->requiredPerms) || (ACL_DELETE & rangeTableEntry->requiredPerms)) { return true; } else { return false; } } /* * SelectsFromDistributedTable checks if there is a select on a distributed * table by looking into range table entries. */ static bool SelectsFromDistributedTable(List *rangeTableList) { ListCell *rangeTableCell = NULL; foreach(rangeTableCell, rangeTableList) { RangeTblEntry *rangeTableEntry = (RangeTblEntry *) lfirst(rangeTableCell); DistTableCacheEntry *cacheEntry = NULL; if (rangeTableEntry->relid == InvalidOid) { continue; } cacheEntry = DistributedTableCacheEntry(rangeTableEntry->relid); if (cacheEntry->partitionMethod != DISTRIBUTE_BY_NONE && !UpdateOrDeleteRTE(rangeTableEntry)) { return true; } } return false; } /* * RouterQuery runs router pruning logic for SELECT, UPDATE and DELETE queries. * If there are shards present and query is routable, all RTEs have been updated * to point to the relevant shards in the originalQuery. Also, placementList is * filled with the list of worker nodes that has all the required shard placements * for the query execution. anchorShardId is set to the first pruned shardId of * the given query. Finally, relationShardList is filled with the list of * relation-to-shard mappings for the query. * * If the given query is not routable, it fills planningError with the related * DeferredErrorMessage. The caller can check this error message to see if query * is routable or not. */ DeferredErrorMessage * PlanRouterQuery(Query *originalQuery, RelationRestrictionContext *restrictionContext, List **placementList, uint64 *anchorShardId, List **relationShardList, bool replacePrunedQueryWithDummy) { bool multiShardQuery = false; List *prunedRelationShardList = NIL; DeferredErrorMessage *planningError = NULL; ListCell *prunedRelationShardListCell = NULL; List *workerList = NIL; bool shardsPresent = false; uint64 shardId = INVALID_SHARD_ID; *placementList = NIL; prunedRelationShardList = TargetShardIntervalsForRouter(originalQuery, restrictionContext, &multiShardQuery); /* * If multiShardQuery is true then it means a relation has more * than one shard left after pruning. */ if (multiShardQuery) { StringInfo errorMessage = makeStringInfo(); StringInfo errorHint = makeStringInfo(); CmdType commandType = originalQuery->commandType; const char *commandName = "SELECT"; if (commandType == CMD_UPDATE) { commandName = "UPDATE"; } else if (commandType == CMD_DELETE) { commandName = "DELETE"; } if (commandType == CMD_UPDATE || commandType == CMD_DELETE) { List *rangeTableList = NIL; RangeTblEntry *updateOrDeleteRTE = NULL; DistTableCacheEntry *updateOrDeleteTableCacheEntry = NULL; char *partitionKeyString = NULL; char *partitionColumnName = NULL; /* extract range table entries */ ExtractRangeTableEntryWalker((Node *) originalQuery, &rangeTableList); updateOrDeleteRTE = GetUpdateOrDeleteRTE(rangeTableList); updateOrDeleteTableCacheEntry = DistributedTableCacheEntry(updateOrDeleteRTE->relid); partitionKeyString = updateOrDeleteTableCacheEntry->partitionKeyString; partitionColumnName = ColumnNameToColumn(updateOrDeleteRTE->relid, partitionKeyString); appendStringInfo(errorHint, "Consider using an equality filter on " "partition column \"%s\" to target a " "single shard. If you'd like to run a " "multi-shard operation, use " "master_modify_multiple_shards().", partitionColumnName); } /* note that for SELECT queries, we never print this error message */ appendStringInfo(errorMessage, "cannot run %s command which targets multiple shards", commandName); planningError = DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, errorMessage->data, NULL, errorHint->data); return planningError; } foreach(prunedRelationShardListCell, prunedRelationShardList) { List *prunedShardList = (List *) lfirst(prunedRelationShardListCell); ShardInterval *shardInterval = NULL; RelationShard *relationShard = NULL; /* no shard is present or all shards are pruned out case will be handled later */ if (prunedShardList == NIL) { continue; } shardsPresent = true; /* all relations are now pruned down to 0 or 1 shards */ Assert(list_length(prunedShardList) <= 1); shardInterval = (ShardInterval *) linitial(prunedShardList); /* anchor shard id */ if (shardId == INVALID_SHARD_ID) { shardId = shardInterval->shardId; } /* add relation to shard mapping */ relationShard = CitusMakeNode(RelationShard); relationShard->relationId = shardInterval->relationId; relationShard->shardId = shardInterval->shardId; *relationShardList = lappend(*relationShardList, relationShard); } /* * We bail out if there are RTEs that prune multiple shards above, but * there can also be multiple RTEs that reference the same relation. */ if (RelationPrunesToMultipleShards(*relationShardList)) { planningError = DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "cannot run command which targets " "multiple shards", NULL, NULL); return planningError; } /* * Determine the worker that has all shard placements if a shard placement found. * If no shard placement exists and replacePrunedQueryWithDummy flag is set, we will * still run the query but the result will be empty. We create a dummy shard * placement for the first active worker. */ if (shardsPresent) { workerList = WorkersContainingAllShards(prunedRelationShardList); } else if (replacePrunedQueryWithDummy) { List *workerNodeList = ActiveReadableNodeList(); if (workerNodeList != NIL) { WorkerNode *workerNode = (WorkerNode *) linitial(workerNodeList); ShardPlacement *dummyPlacement = (ShardPlacement *) CitusMakeNode(ShardPlacement); dummyPlacement->nodeName = workerNode->workerName; dummyPlacement->nodePort = workerNode->workerPort; dummyPlacement->groupId = workerNode->groupId; workerList = lappend(workerList, dummyPlacement); } } else { /* * For INSERT ... SELECT, this query could be still a valid for some other target * shard intervals. Thus, we should return empty list if there aren't any matching * workers, so that the caller can decide what to do with this task. */ return NULL; } if (workerList == NIL) { ereport(DEBUG2, (errmsg("Found no worker with all shard placements"))); planningError = DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "found no worker with all shard placements", NULL, NULL); return planningError; } /* * If this is an UPDATE or DELETE query which requires master evaluation, * don't try update shard names, and postpone that to execution phase. */ if (!(UpdateOrDeleteQuery(originalQuery) && RequiresMasterEvaluation(originalQuery))) { UpdateRelationToShardNames((Node *) originalQuery, *relationShardList); } *placementList = workerList; *anchorShardId = shardId; return planningError; } /* * TargetShardIntervalsForRouter performs shard pruning for all referenced relations * in the query and returns list of shards per relation. Shard pruning is done based * on provided restriction context per relation. The function bails out and returns * after setting multiShardQuery to true if any of the relations pruned down to * more than one active shard. It also records pruned shard intervals in relation * restriction context to be used later on. Some queries may have contradiction * clauses like 'and false' or 'and 1=0', such queries are treated as if all of * the shards of joining relations are pruned out. */ static List * TargetShardIntervalsForRouter(Query *query, RelationRestrictionContext *restrictionContext, bool *multiShardQuery) { List *prunedRelationShardList = NIL; ListCell *restrictionCell = NULL; Assert(restrictionContext != NULL); foreach(restrictionCell, restrictionContext->relationRestrictionList) { RelationRestriction *relationRestriction = (RelationRestriction *) lfirst(restrictionCell); Oid relationId = relationRestriction->relationId; Index tableId = relationRestriction->index; DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); int shardCount = cacheEntry->shardIntervalArrayLength; List *baseRestrictionList = relationRestriction->relOptInfo->baserestrictinfo; List *restrictClauseList = get_all_actual_clauses(baseRestrictionList); List *prunedShardList = NIL; List *joinInfoList = relationRestriction->relOptInfo->joininfo; List *pseudoRestrictionList = extract_actual_clauses(joinInfoList, true); bool whereFalseQuery = false; relationRestriction->prunedShardIntervalList = NIL; /* * Queries may have contradiction clauses like 'false', or '1=0' in * their filters. Such queries would have pseudo constant 'false' * inside relOptInfo->joininfo list. We treat such cases as if all * shards of the table are pruned out. */ whereFalseQuery = ContainsFalseClause(pseudoRestrictionList); if (!whereFalseQuery && shardCount > 0) { prunedShardList = PruneShards(relationId, tableId, restrictClauseList); /* * Quick bail out. The query can not be router plannable if one * relation has more than one shard left after pruning. Having no * shard left is okay at this point. It will be handled at a later * stage. */ if (list_length(prunedShardList) > 1) { (*multiShardQuery) = true; return NIL; } } relationRestriction->prunedShardIntervalList = prunedShardList; prunedRelationShardList = lappend(prunedRelationShardList, prunedShardList); } return prunedRelationShardList; } /* * RelationPrunesToMultipleShards returns true if the given list of * relation-to-shard mappings contains at least two mappings with * the same relation, but different shards. */ static bool RelationPrunesToMultipleShards(List *relationShardList) { ListCell *relationShardCell = NULL; RelationShard *previousRelationShard = NULL; relationShardList = SortList(relationShardList, CompareRelationShards); foreach(relationShardCell, relationShardList) { RelationShard *relationShard = (RelationShard *) lfirst(relationShardCell); if (previousRelationShard != NULL && relationShard->relationId == previousRelationShard->relationId && relationShard->shardId != previousRelationShard->shardId) { return true; } previousRelationShard = relationShard; } return false; } /* * WorkersContainingAllShards returns list of shard placements that contain all * shard intervals provided to the function. It returns NIL if no placement exists. * The caller should check if there are any shard intervals exist for placement * check prior to calling this function. */ static List * WorkersContainingAllShards(List *prunedShardIntervalsList) { ListCell *prunedShardIntervalCell = NULL; bool firstShard = true; List *currentPlacementList = NIL; foreach(prunedShardIntervalCell, prunedShardIntervalsList) { List *shardIntervalList = (List *) lfirst(prunedShardIntervalCell); ShardInterval *shardInterval = NULL; uint64 shardId = INVALID_SHARD_ID; List *newPlacementList = NIL; if (shardIntervalList == NIL) { continue; } Assert(list_length(shardIntervalList) == 1); shardInterval = (ShardInterval *) linitial(shardIntervalList); shardId = shardInterval->shardId; /* retrieve all active shard placements for this shard */ newPlacementList = FinalizedShardPlacementList(shardId); if (firstShard) { firstShard = false; currentPlacementList = newPlacementList; } else { /* keep placements that still exists for this shard */ currentPlacementList = IntersectPlacementList(currentPlacementList, newPlacementList); } /* * Bail out if placement list becomes empty. This means there is no worker * containing all shards referecend by the query, hence we can not forward * this query directly to any worker. */ if (currentPlacementList == NIL) { break; } } return currentPlacementList; } /* * BuildRoutesForInsert returns a list of ModifyRoute objects for an INSERT * query or an empty list if the partition column value is defined as an ex- * pression that still needs to be evaluated. If any partition column value * falls within 0 or multiple (overlapping) shards, the planning error is set. * * Multi-row INSERTs are handled by grouping their rows by target shard. These * groups are returned in ascending order by shard id, ready for later deparse * to shard-specific SQL. */ static List * BuildRoutesForInsert(Query *query, DeferredErrorMessage **planningError) { Oid distributedTableId = ExtractFirstDistributedTableId(query); DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(distributedTableId); char partitionMethod = cacheEntry->partitionMethod; uint32 rangeTableId = 1; Var *partitionColumn = NULL; List *insertValuesList = NIL; List *modifyRouteList = NIL; ListCell *insertValuesCell = NULL; Assert(query->commandType == CMD_INSERT); /* reference tables can only have one shard */ if (partitionMethod == DISTRIBUTE_BY_NONE) { int shardCount = 0; List *shardIntervalList = LoadShardIntervalList(distributedTableId); RangeTblEntry *valuesRTE = NULL; ShardInterval *shardInterval = NULL; ModifyRoute *modifyRoute = NULL; shardCount = list_length(shardIntervalList); if (shardCount != 1) { ereport(ERROR, (errmsg("reference table cannot have %d shards", shardCount))); } shardInterval = linitial(shardIntervalList); modifyRoute = palloc(sizeof(ModifyRoute)); modifyRoute->shardId = shardInterval->shardId; valuesRTE = ExtractDistributedInsertValuesRTE(query); if (valuesRTE != NULL) { /* add the values list for a multi-row INSERT */ modifyRoute->rowValuesLists = valuesRTE->values_lists; } else { modifyRoute->rowValuesLists = NIL; } modifyRouteList = lappend(modifyRouteList, modifyRoute); return modifyRouteList; } partitionColumn = PartitionColumn(distributedTableId, rangeTableId); /* get full list of insert values and iterate over them to prune */ insertValuesList = ExtractInsertValuesList(query, partitionColumn); foreach(insertValuesCell, insertValuesList) { InsertValues *insertValues = (InsertValues *) lfirst(insertValuesCell); Const *partitionValueConst = NULL; List *prunedShardList = NIL; int prunedShardCount = 0; ShardInterval *targetShard = NULL; if (!IsA(insertValues->partitionValueExpr, Const)) { /* shard pruning not possible right now */ return NIL; } partitionValueConst = (Const *) insertValues->partitionValueExpr; if (partitionValueConst->constisnull) { ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("cannot perform an INSERT with NULL in the partition " "column"))); } if (partitionMethod == DISTRIBUTE_BY_HASH || partitionMethod == DISTRIBUTE_BY_RANGE) { Datum partitionValue = partitionValueConst->constvalue; DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry( distributedTableId); ShardInterval *shardInterval = FindShardInterval(partitionValue, cacheEntry); if (shardInterval != NULL) { prunedShardList = list_make1(shardInterval); } } else { List *restrictClauseList = NIL; Index tableId = 1; OpExpr *equalityExpr = MakeOpExpression(partitionColumn, BTEqualStrategyNumber); Node *rightOp = get_rightop((Expr *) equalityExpr); Const *rightConst = (Const *) rightOp; Assert(IsA(rightOp, Const)); rightConst->constvalue = partitionValueConst->constvalue; rightConst->constisnull = partitionValueConst->constisnull; rightConst->constbyval = partitionValueConst->constbyval; restrictClauseList = list_make1(equalityExpr); prunedShardList = PruneShards(distributedTableId, tableId, restrictClauseList); } prunedShardCount = list_length(prunedShardList); if (prunedShardCount != 1) { char *partitionKeyString = cacheEntry->partitionKeyString; char *partitionColumnName = ColumnNameToColumn(distributedTableId, partitionKeyString); StringInfo errorMessage = makeStringInfo(); StringInfo errorHint = makeStringInfo(); const char *targetCountType = NULL; if (prunedShardCount == 0) { targetCountType = "no"; } else { targetCountType = "multiple"; } if (prunedShardCount == 0) { appendStringInfo(errorHint, "Make sure you have created a shard which " "can receive this partition column value."); } else { appendStringInfo(errorHint, "Make sure the value for partition column " "\"%s\" falls into a single shard.", partitionColumnName); } appendStringInfo(errorMessage, "cannot run INSERT command which targets %s " "shards", targetCountType); (*planningError) = DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, errorMessage->data, NULL, errorHint->data); return NIL; } targetShard = (ShardInterval *) linitial(prunedShardList); insertValues->shardId = targetShard->shardId; } modifyRouteList = GroupInsertValuesByShardId(insertValuesList); return modifyRouteList; } /* * IsMultiRowInsert returns whether the given query is a multi-row INSERT. * * It does this by determining whether the query is an INSERT that has an * RTE_VALUES. Single-row INSERTs will have their RTE_VALUES optimised away * in transformInsertStmt, and instead use the target list. */ bool IsMultiRowInsert(Query *query) { return ExtractDistributedInsertValuesRTE(query) != NULL; } /* * ExtractDistributedInsertValuesRTE does precisely that. If the provided * query is not an INSERT, or if the INSERT does not have a VALUES RTE * (i.e. it is not a multi-row INSERT), this function returns NULL. * If all those conditions are met, an RTE representing the multiple values * of a multi-row INSERT is returned. */ RangeTblEntry * ExtractDistributedInsertValuesRTE(Query *query) { ListCell *rteCell = NULL; RangeTblEntry *valuesRTE = NULL; if (query->commandType != CMD_INSERT) { return NULL; } foreach(rteCell, query->rtable) { RangeTblEntry *rte = (RangeTblEntry *) lfirst(rteCell); if (rte->rtekind == RTE_VALUES) { valuesRTE = rte; break; } } return valuesRTE; } /* * NormalizeMultiRowInsertTargetList ensures all elements of multi-row INSERT target * lists are Vars. In multi-row INSERTs, most target list entries contain a Var * expression pointing to a position within the values_lists field of a VALUES * RTE, but non-NULL default columns are handled differently. Instead of adding * the default expression to each row, a single expression encoding the DEFAULT * appears in the target list. For consistency, we move these expressions into * values lists and replace them with an appropriately constructed Var. */ static void NormalizeMultiRowInsertTargetList(Query *query) { ListCell *valuesListCell = NULL; ListCell *targetEntryCell = NULL; int targetEntryNo = 0; RangeTblEntry *valuesRTE = ExtractDistributedInsertValuesRTE(query); if (valuesRTE == NULL) { return; } foreach(valuesListCell, valuesRTE->values_lists) { List *valuesList = (List *) lfirst(valuesListCell); Expr **valuesArray = (Expr **) PointerArrayFromList(valuesList); List *expandedValuesList = NIL; foreach(targetEntryCell, query->targetList) { TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell); Expr *targetExpr = targetEntry->expr; if (IsA(targetExpr, Var)) { /* expression from the VALUES section */ Var *targetListVar = (Var *) targetExpr; targetExpr = valuesArray[targetListVar->varattno - 1]; } else { /* copy the column's default expression */ targetExpr = copyObject(targetExpr); } expandedValuesList = lappend(expandedValuesList, targetExpr); } valuesListCell->data.ptr_value = (void *) expandedValuesList; } #if (PG_VERSION_NUM >= 100000) /* reset coltypes, coltypmods, colcollations and rebuild them below */ valuesRTE->coltypes = NIL; valuesRTE->coltypmods = NIL; valuesRTE->colcollations = NIL; #endif foreach(targetEntryCell, query->targetList) { TargetEntry *targetEntry = lfirst(targetEntryCell); Node *targetExprNode = (Node *) targetEntry->expr; Oid targetType = InvalidOid; int32 targetTypmod = -1; Oid targetColl = InvalidOid; Var *syntheticVar = NULL; /* RTE_VALUES comes 2nd, after destination table */ Index valuesVarno = 2; targetEntryNo++; targetType = exprType(targetExprNode); targetTypmod = exprTypmod(targetExprNode); targetColl = exprCollation(targetExprNode); #if (PG_VERSION_NUM >= 100000) valuesRTE->coltypes = lappend_oid(valuesRTE->coltypes, targetType); valuesRTE->coltypmods = lappend_int(valuesRTE->coltypmods, targetTypmod); valuesRTE->colcollations = lappend_oid(valuesRTE->colcollations, targetColl); #endif if (IsA(targetExprNode, Var)) { Var *targetVar = (Var *) targetExprNode; targetVar->varattno = targetEntryNo; continue; } /* replace the original expression with a Var referencing values_lists */ syntheticVar = makeVar(valuesVarno, targetEntryNo, targetType, targetTypmod, targetColl, 0); targetEntry->expr = (Expr *) syntheticVar; } } /* * IntersectPlacementList performs placement pruning based on matching on * nodeName:nodePort fields of shard placement data. We start pruning from all * placements of the first relation's shard. Then for each relation's shard, we * compute intersection of the new shards placement with existing placement list. * This operation could have been done using other methods, but since we do not * expect very high replication factor, iterating over a list and making string * comparisons should be sufficient. */ List * IntersectPlacementList(List *lhsPlacementList, List *rhsPlacementList) { ListCell *lhsPlacementCell = NULL; List *placementList = NIL; /* Keep existing placement in the list if it is also present in new placement list */ foreach(lhsPlacementCell, lhsPlacementList) { ShardPlacement *lhsPlacement = (ShardPlacement *) lfirst(lhsPlacementCell); ListCell *rhsPlacementCell = NULL; foreach(rhsPlacementCell, rhsPlacementList) { ShardPlacement *rhsPlacement = (ShardPlacement *) lfirst(rhsPlacementCell); if (rhsPlacement->nodePort == lhsPlacement->nodePort && strncmp(rhsPlacement->nodeName, lhsPlacement->nodeName, WORKER_LENGTH) == 0) { placementList = lappend(placementList, rhsPlacement); } } } return placementList; } /* * GroupInsertValuesByShardId takes care of grouping the rows from a multi-row * INSERT by target shard. At this point, all pruning has taken place and we * need only to build sets of rows for each destination. This is done by a * simple sort (by shard identifier) and gather step. The sort has the side- * effect of getting things in ascending order to avoid unnecessary deadlocks * during Task execution. */ static List * GroupInsertValuesByShardId(List *insertValuesList) { ModifyRoute *route = NULL; ListCell *insertValuesCell = NULL; List *modifyRouteList = NIL; insertValuesList = SortList(insertValuesList, CompareInsertValuesByShardId); foreach(insertValuesCell, insertValuesList) { InsertValues *insertValues = (InsertValues *) lfirst(insertValuesCell); int64 shardId = insertValues->shardId; bool foundSameShardId = false; if (route != NULL) { if (route->shardId == shardId) { foundSameShardId = true; } else { /* new shard id seen; current aggregation done; add to list */ modifyRouteList = lappend(modifyRouteList, route); } } if (foundSameShardId) { /* * Our current value has the same shard id as our aggregate object, * so append the rowValues. */ route->rowValuesLists = lappend(route->rowValuesLists, insertValues->rowValues); } else { /* we encountered a new shard id; build a new aggregate object */ route = (ModifyRoute *) palloc(sizeof(ModifyRoute)); route->shardId = insertValues->shardId; route->rowValuesLists = list_make1(insertValues->rowValues); } } /* left holding one final aggregate object; add to list */ modifyRouteList = lappend(modifyRouteList, route); return modifyRouteList; } /* * ExtractInsertValuesList extracts the partition column value for an INSERT * command and returns it within an InsertValues struct. For single-row INSERTs * this is simply a value extracted from the target list, but multi-row INSERTs * will generate a List of InsertValues, each with full row values in addition * to the partition value. If a partition value is NULL or missing altogether, * this function errors. */ static List * ExtractInsertValuesList(Query *query, Var *partitionColumn) { List *insertValuesList = NIL; TargetEntry *targetEntry = get_tle_by_resno(query->targetList, partitionColumn->varattno); if (targetEntry == NULL) { ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("cannot perform an INSERT without a partition column " "value"))); } /* * We've got a multi-row INSERT. PostgreSQL internally represents such * commands by linking Vars in the target list to lists of values within * a special VALUES range table entry. By extracting the right positional * expression from each list within that RTE, we will extract the partition * values for each row within the multi-row INSERT. */ if (IsA(targetEntry->expr, Var)) { Var *partitionVar = (Var *) targetEntry->expr; RangeTblEntry *referencedRTE = NULL; ListCell *valuesListCell = NULL; Index ivIndex = 0; referencedRTE = rt_fetch(partitionVar->varno, query->rtable); foreach(valuesListCell, referencedRTE->values_lists) { InsertValues *insertValues = (InsertValues *) palloc(sizeof(InsertValues)); insertValues->rowValues = (List *) lfirst(valuesListCell); insertValues->partitionValueExpr = list_nth(insertValues->rowValues, (partitionVar->varattno - 1)); insertValues->shardId = INVALID_SHARD_ID; insertValues->listIndex = ivIndex; insertValuesList = lappend(insertValuesList, insertValues); ivIndex++; } } /* nothing's been found yet; this is a simple single-row INSERT */ if (insertValuesList == NIL) { InsertValues *insertValues = (InsertValues *) palloc(sizeof(InsertValues)); insertValues->rowValues = NIL; insertValues->partitionValueExpr = targetEntry->expr; insertValues->shardId = INVALID_SHARD_ID; insertValuesList = lappend(insertValuesList, insertValues); } return insertValuesList; } /* * MultiRouterPlannableQuery returns true if given query can be router plannable. * The query is router plannable if it is a modify query, or if its is a select * query issued on a hash partitioned distributed table, and it has a filter * to reduce number of shard pairs to one, and all shard pairs are located on * the same node. Router plannable checks for select queries can be turned off * by setting citus.enable_router_execution flag to false. */ static bool MultiRouterPlannableQuery(Query *query, RelationRestrictionContext *restrictionContext) { CmdType commandType = query->commandType; ListCell *relationRestrictionContextCell = NULL; if (commandType == CMD_INSERT || commandType == CMD_UPDATE || commandType == CMD_DELETE) { return true; } Assert(commandType == CMD_SELECT); if (!EnableRouterExecution) { return false; } if (query->hasForUpdate) { return false; } foreach(relationRestrictionContextCell, restrictionContext->relationRestrictionList) { RelationRestriction *relationRestriction = (RelationRestriction *) lfirst(relationRestrictionContextCell); RangeTblEntry *rte = relationRestriction->rte; if (rte->rtekind == RTE_RELATION) { /* only hash partitioned tables are supported */ Oid distributedTableId = rte->relid; char partitionMethod = PartitionMethod(distributedTableId); if (!(partitionMethod == DISTRIBUTE_BY_HASH || partitionMethod == DISTRIBUTE_BY_NONE || partitionMethod == DISTRIBUTE_BY_RANGE)) { return false; } } } return true; } /* * Copy a RelationRestrictionContext. Note that several subfields are copied * shallowly, for lack of copyObject support. * * Note that CopyRelationRestrictionContext copies the following fields per relation * context: index, relationId, distributedRelation, rte, relOptInfo->baserestrictinfo * and relOptInfo->joininfo. Also, the function shallowly copies plannerInfo and * prunedShardIntervalList which are read-only. All other parts of the relOptInfo * is also shallowly copied. */ RelationRestrictionContext * CopyRelationRestrictionContext(RelationRestrictionContext *oldContext) { RelationRestrictionContext *newContext = (RelationRestrictionContext *) palloc(sizeof(RelationRestrictionContext)); ListCell *relationRestrictionCell = NULL; newContext->hasDistributedRelation = oldContext->hasDistributedRelation; newContext->hasLocalRelation = oldContext->hasLocalRelation; newContext->allReferenceTables = oldContext->allReferenceTables; newContext->relationRestrictionList = NIL; foreach(relationRestrictionCell, oldContext->relationRestrictionList) { RelationRestriction *oldRestriction = (RelationRestriction *) lfirst(relationRestrictionCell); RelationRestriction *newRestriction = (RelationRestriction *) palloc0(sizeof(RelationRestriction)); newRestriction->index = oldRestriction->index; newRestriction->relationId = oldRestriction->relationId; newRestriction->distributedRelation = oldRestriction->distributedRelation; newRestriction->rte = copyObject(oldRestriction->rte); /* can't be copied, we copy (flatly) a RelOptInfo, and then decouple baserestrictinfo */ newRestriction->relOptInfo = palloc(sizeof(RelOptInfo)); memcpy(newRestriction->relOptInfo, oldRestriction->relOptInfo, sizeof(RelOptInfo)); newRestriction->relOptInfo->baserestrictinfo = copyObject(oldRestriction->relOptInfo->baserestrictinfo); newRestriction->relOptInfo->joininfo = copyObject(oldRestriction->relOptInfo->joininfo); /* not copyable, but readonly */ newRestriction->plannerInfo = oldRestriction->plannerInfo; newRestriction->prunedShardIntervalList = oldRestriction->prunedShardIntervalList; newContext->relationRestrictionList = lappend(newContext->relationRestrictionList, newRestriction); } return newContext; } /* * ErrorIfQueryHasModifyingCTE checks if the query contains modifying common table * expressions and errors out if it does. */ static DeferredErrorMessage * ErrorIfQueryHasModifyingCTE(Query *queryTree) { ListCell *cteCell = NULL; Assert(queryTree->commandType == CMD_SELECT); foreach(cteCell, queryTree->cteList) { CommonTableExpr *cte = (CommonTableExpr *) lfirst(cteCell); Query *cteQuery = (Query *) cte->ctequery; /* * Here we only check for command type of top level query. Normally there can be * nested CTE, however PostgreSQL dictates that data-modifying statements must * be at top level of CTE. Therefore it is OK to just check for top level. * Similarly, we do not need to check for subqueries. */ if (cteQuery->commandType != CMD_SELECT) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, "data-modifying statements are not supported in " "the WITH clauses of distributed queries", NULL, NULL); } } /* everything OK */ return NULL; } #if (PG_VERSION_NUM >= 100000) /* * get_all_actual_clauses * * Returns a list containing the bare clauses from 'restrictinfo_list'. * * This loses the distinction between regular and pseudoconstant clauses, * so be careful what you use it for. */ static List * get_all_actual_clauses(List *restrictinfo_list) { List *result = NIL; ListCell *l; foreach(l, restrictinfo_list) { RestrictInfo *rinfo = (RestrictInfo *) lfirst(l); Assert(IsA(rinfo, RestrictInfo)); result = lappend(result, rinfo->clause); } return result; } #endif /* * CompareInsertValuesByShardId does what it says in the name. Used for sorting * InsertValues objects by their shard. */ static int CompareInsertValuesByShardId(const void *leftElement, const void *rightElement) { InsertValues *leftValue = *((InsertValues **) leftElement); InsertValues *rightValue = *((InsertValues **) rightElement); int64 leftShardId = leftValue->shardId; int64 rightShardId = rightValue->shardId; Index leftIndex = leftValue->listIndex; Index rightIndex = rightValue->listIndex; if (leftShardId > rightShardId) { return 1; } else if (leftShardId < rightShardId) { return -1; } else { /* shard identifiers are the same, list index is secondary sort key */ if (leftIndex > rightIndex) { return 1; } else if (leftIndex < rightIndex) { return -1; } else { return 0; } } } citus-7.0.3/src/backend/distributed/planner/relation_restriction_equivalence.c000066400000000000000000001303341317107136600300070ustar00rootroot00000000000000/* * relation_restriction_equivalence.c * * This file contains functions helper functions for planning * queries with colocated tables and subqueries. * * Copyright (c) 2017-2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "distributed/multi_planner.h" #include "distributed/multi_logical_planner.h" #include "distributed/multi_logical_optimizer.h" #include "distributed/pg_dist_partition.h" #include "distributed/relation_restriction_equivalence.h" #include "nodes/nodeFuncs.h" #include "nodes/pg_list.h" #include "nodes/primnodes.h" #include "nodes/relation.h" #include "parser/parsetree.h" #include "optimizer/pathnode.h" static uint32 attributeEquivalenceId = 1; /* * AttributeEquivalenceClass * * Whenever we find an equality clause A = B, where both A and B originates from * relation attributes (i.e., not random expressions), we create an * AttributeEquivalenceClass to record this knowledge. If we later find another * equivalence B = C, we create another AttributeEquivalenceClass. Finally, we can * apply transitivity rules and generate a new AttributeEquivalenceClass which includes * A, B and C. * * Note that equality among the members are identified by the varattno and rteIdentity. */ typedef struct AttributeEquivalenceClass { uint32 equivalenceId; List *equivalentAttributes; } AttributeEquivalenceClass; /* * AttributeEquivalenceClassMember - one member expression of an * AttributeEquivalenceClass. The important thing to consider is that * the class member contains "rteIndentity" field. Note that each RTE_RELATION * is assigned a unique rteIdentity in AssignRTEIdentities() function. * * "varno" and "varattno" is directly used from a Var clause that is being added * to the attribute equivalence. Since we only use this class for relations, the member * also includes the relation id field. */ typedef struct AttributeEquivalenceClassMember { Oid relationId; int rteIdentity; Index varno; AttrNumber varattno; } AttributeEquivalenceClassMember; static Var * FindTranslatedVar(List *appendRelList, Oid relationOid, Index relationRteIndex, Index *partitionKeyIndex); static bool EquivalenceListContainsRelationsEquality(List *attributeEquivalenceList, RelationRestrictionContext * restrictionContext); static List * GenerateAttributeEquivalencesForRelationRestrictions( RelationRestrictionContext *restrictionContext); static AttributeEquivalenceClass * AttributeEquivalenceClassForEquivalenceClass( EquivalenceClass *plannerEqClass, RelationRestriction *relationRestriction); static void AddToAttributeEquivalenceClass(AttributeEquivalenceClass ** attributeEquivalanceClass, PlannerInfo *root, Var *varToBeAdded); static void AddRteSubqueryToAttributeEquivalenceClass(AttributeEquivalenceClass * *attributeEquivalanceClass, RangeTblEntry * rangeTableEntry, PlannerInfo *root, Var *varToBeAdded); static Query * GetTargetSubquery(PlannerInfo *root, RangeTblEntry *rangeTableEntry, Var *varToBeAdded); static void AddUnionAllSetOperationsToAttributeEquivalenceClass( AttributeEquivalenceClass ** attributeEquivalanceClass, PlannerInfo *root, Var *varToBeAdded); static void AddUnionSetOperationsToAttributeEquivalenceClass(AttributeEquivalenceClass ** attributeEquivalenceClass, PlannerInfo *root, SetOperationStmt * setOperation, Var *varToBeAdded); static void AddRteRelationToAttributeEquivalenceClass(AttributeEquivalenceClass ** attrEquivalenceClass, RangeTblEntry *rangeTableEntry, Var *varToBeAdded); static Var * GetVarFromAssignedParam(List *parentPlannerParamList, Param *plannerParam); static List * GenerateAttributeEquivalencesForJoinRestrictions(JoinRestrictionContext *joinRestrictionContext); static bool AttributeClassContainsAttributeClassMember(AttributeEquivalenceClassMember * inputMember, AttributeEquivalenceClass * attributeEquivalenceClass); static List * AddAttributeClassToAttributeClassList(List *attributeEquivalenceList, AttributeEquivalenceClass * attributeEquivalance); static bool AttributeEquivalancesAreEqual(AttributeEquivalenceClass * firstAttributeEquivalance, AttributeEquivalenceClass * secondAttributeEquivalance); static AttributeEquivalenceClass * GenerateCommonEquivalence(List * attributeEquivalenceList); static void ListConcatUniqueAttributeClassMemberLists(AttributeEquivalenceClass ** firstClass, AttributeEquivalenceClass * secondClass); static Index RelationRestrictionPartitionKeyIndex(RelationRestriction * relationRestriction); /* * SafeToPushdownUnionSubquery returns true if all the relations are returns * partition keys in the same ordinal position and there is no reference table * exists. * * Note that the function expects (and asserts) the input query to be a top * level union query defined by TopLevelUnionQuery(). * * Lastly, the function fails to produce correct output if the target lists contains * multiple partition keys on the target list such as the following: * * select count(*) from ( * select user_id, user_id from users_table * union * select 2, user_id from users_table) u; * * For the above query, although the second item in the target list make this query * safe to push down, the function would fail to return true. */ bool SafeToPushdownUnionSubquery(RelationRestrictionContext *restrictionContext) { Index unionQueryPartitionKeyIndex = 0; AttributeEquivalenceClass *attributeEquivalance = palloc0(sizeof(AttributeEquivalenceClass)); ListCell *relationRestrictionCell = NULL; attributeEquivalance->equivalenceId = attributeEquivalenceId++; foreach(relationRestrictionCell, restrictionContext->relationRestrictionList) { RelationRestriction *relationRestriction = lfirst(relationRestrictionCell); Oid relationId = relationRestriction->relationId; Index partitionKeyIndex = InvalidAttrNumber; PlannerInfo *relationPlannerRoot = relationRestriction->plannerInfo; List *targetList = relationPlannerRoot->parse->targetList; List *appendRelList = relationPlannerRoot->append_rel_list; Var *varToBeAdded = NULL; TargetEntry *targetEntryToAdd = NULL; /* * Although it is not the best place to error out when facing with reference * tables, we decide to error out here. Otherwise, we need to add equality * for each reference table and it is more complex to implement. In the * future implementation all checks will be gathered to single point. */ if (PartitionMethod(relationId) == DISTRIBUTE_BY_NONE) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot pushdown this query"), errdetail( "Reference tables are not allowed with set operations"))); } /* * We first check whether UNION ALLs are pulled up or not. Note that Postgres * planner creates AppendRelInfos per each UNION ALL query that is pulled up. * Then, postgres stores the related information in the append_rel_list on the * plannerInfo struct. */ if (appendRelList != NULL) { varToBeAdded = FindTranslatedVar(appendRelList, relationRestriction->relationId, relationRestriction->index, &partitionKeyIndex); /* union does not have partition key in the target list */ if (partitionKeyIndex == 0) { return false; } } else { partitionKeyIndex = RelationRestrictionPartitionKeyIndex(relationRestriction); /* union does not have partition key in the target list */ if (partitionKeyIndex == 0) { return false; } targetEntryToAdd = list_nth(targetList, partitionKeyIndex - 1); if (!IsA(targetEntryToAdd->expr, Var)) { return false; } varToBeAdded = (Var *) targetEntryToAdd->expr; } /* * If the first relation doesn't have partition key on the target * list of the query that the relation in, simply not allow to push down * the query. */ if (partitionKeyIndex == InvalidAttrNumber) { return false; } /* * We find the first relations partition key index in the target list. Later, * we check whether all the relations have partition keys in the * same position. */ if (unionQueryPartitionKeyIndex == InvalidAttrNumber) { unionQueryPartitionKeyIndex = partitionKeyIndex; } else if (unionQueryPartitionKeyIndex != partitionKeyIndex) { return false; } AddToAttributeEquivalenceClass(&attributeEquivalance, relationPlannerRoot, varToBeAdded); } return EquivalenceListContainsRelationsEquality(list_make1(attributeEquivalance), restrictionContext); } /* * FindTranslatedVar iterates on the appendRelList and tries to find a translated * child var identified by the relation id and the relation rte index. * * Note that postgres translates UNION ALL target list elements into translated_vars * list on the corresponding AppendRelInfo struct. For details, see the related * structs. * * The function returns NULL if it cannot find a translated var. */ static Var * FindTranslatedVar(List *appendRelList, Oid relationOid, Index relationRteIndex, Index *partitionKeyIndex) { ListCell *appendRelCell = NULL; AppendRelInfo *targetAppendRelInfo = NULL; ListCell *translatedVarCell = NULL; AttrNumber childAttrNumber = 0; Var *relationPartitionKey = NULL; List *translaterVars = NULL; *partitionKeyIndex = 0; /* iterate on the queries that are part of UNION ALL subselects */ foreach(appendRelCell, appendRelList) { AppendRelInfo *appendRelInfo = (AppendRelInfo *) lfirst(appendRelCell); /* * We're only interested in the child rel that is equal to the * relation we're investigating. */ if (appendRelInfo->child_relid == relationRteIndex) { targetAppendRelInfo = appendRelInfo; break; } } /* we couldn't find the necessary append rel info */ if (targetAppendRelInfo == NULL) { return NULL; } relationPartitionKey = DistPartitionKey(relationOid); translaterVars = targetAppendRelInfo->translated_vars; foreach(translatedVarCell, translaterVars) { Node *targetNode = (Node *) lfirst(translatedVarCell); Var *targetVar = NULL; childAttrNumber++; if (!IsA(targetNode, Var)) { continue; } targetVar = (Var *) lfirst(translatedVarCell); if (targetVar->varno == relationRteIndex && targetVar->varattno == relationPartitionKey->varattno) { *partitionKeyIndex = childAttrNumber; return targetVar; } } return NULL; } /* * RestrictionEquivalenceForPartitionKeys aims to deduce whether each of the RTE_RELATION * is joined with at least one another RTE_RELATION on their partition keys. If each * RTE_RELATION follows the above rule, we can conclude that all RTE_RELATIONs are * joined on their partition keys. * * The function returns true if all relations are joined on their partition keys. * Otherwise, the function returns false. In order to support reference tables * with subqueries, equality between attributes of reference tables and partition * key of distributed tables are also considered. * * In order to do that, we invented a new equivalence class namely: * AttributeEquivalenceClass. In very simple words, a AttributeEquivalenceClass is * identified by an unique id and consists of a list of AttributeEquivalenceMembers. * * Each AttributeEquivalenceMember is designed to identify attributes uniquely within the * whole query. The necessity of this arise since varno attributes are defined within * a single level of a query. Instead, here we want to identify each RTE_RELATION uniquely * and try to find equality among each RTE_RELATION's partition key. * * Each equality among RTE_RELATION is saved using an AttributeEquivalenceClass where * each member attribute is identified by a AttributeEquivalenceMember. In the final * step, we try generate a common attribute equivalence class that holds as much as * AttributeEquivalenceMembers whose attributes are a partition keys. * * RestrictionEquivalenceForPartitionKeys uses both relation restrictions and join restrictions * to find as much as information that Postgres planner provides to extensions. For the * details of the usage, please see GenerateAttributeEquivalencesForRelationRestrictions() * and GenerateAttributeEquivalencesForJoinRestrictions() */ bool RestrictionEquivalenceForPartitionKeys(PlannerRestrictionContext * plannerRestrictionContext) { RelationRestrictionContext *restrictionContext = plannerRestrictionContext->relationRestrictionContext; JoinRestrictionContext *joinRestrictionContext = plannerRestrictionContext->joinRestrictionContext; List *relationRestrictionAttributeEquivalenceList = NIL; List *joinRestrictionAttributeEquivalenceList = NIL; List *allAttributeEquivalenceList = NIL; uint32 totalRelationCount = list_length(restrictionContext->relationRestrictionList); /* * If the query includes only one relation, we should not check the partition * column equality. Single table should not need to fetch data from other nodes * except it's own node(s). */ if (totalRelationCount == 1) { return true; } /* reset the equivalence id counter per call to prevent overflows */ attributeEquivalenceId = 1; relationRestrictionAttributeEquivalenceList = GenerateAttributeEquivalencesForRelationRestrictions(restrictionContext); joinRestrictionAttributeEquivalenceList = GenerateAttributeEquivalencesForJoinRestrictions(joinRestrictionContext); allAttributeEquivalenceList = list_concat(relationRestrictionAttributeEquivalenceList, joinRestrictionAttributeEquivalenceList); return EquivalenceListContainsRelationsEquality(allAttributeEquivalenceList, restrictionContext); } /* * EquivalenceListContainsRelationsEquality gets a list of attributed equivalence * list and a relation restriction context. The function first generates a common * equivalence class out of the attributeEquivalenceList. Later, the function checks * whether all the relations exists in the common equivalence class. * */ static bool EquivalenceListContainsRelationsEquality(List *attributeEquivalenceList, RelationRestrictionContext *restrictionContext) { AttributeEquivalenceClass *commonEquivalenceClass = NULL; ListCell *commonEqClassCell = NULL; ListCell *relationRestrictionCell = NULL; Relids commonRteIdentities = NULL; /* * In general we're trying to expand existing the equivalence classes to find a * common equivalence class. The main goal is to test whether this main class * contains all partition keys of the existing relations. */ commonEquivalenceClass = GenerateCommonEquivalence(attributeEquivalenceList); /* add the rte indexes of relations to a bitmap */ foreach(commonEqClassCell, commonEquivalenceClass->equivalentAttributes) { AttributeEquivalenceClassMember *classMember = (AttributeEquivalenceClassMember *) lfirst(commonEqClassCell); int rteIdentity = classMember->rteIdentity; commonRteIdentities = bms_add_member(commonRteIdentities, rteIdentity); } /* check whether all relations exists in the main restriction list */ foreach(relationRestrictionCell, restrictionContext->relationRestrictionList) { RelationRestriction *relationRestriction = (RelationRestriction *) lfirst(relationRestrictionCell); int rteIdentity = GetRTEIdentity(relationRestriction->rte); if (!bms_is_member(rteIdentity, commonRteIdentities)) { return false; } } return true; } /* * GenerateAttributeEquivalencesForRelationRestrictions gets a relation restriction * context and returns a list of AttributeEquivalenceClass. * * The algorithm followed can be summarized as below: * * - Per relation restriction * - Per plannerInfo's eq_class * - Create an AttributeEquivalenceClass * - Add all Vars that appear in the plannerInfo's * eq_class to the AttributeEquivalenceClass * - While doing that, consider LATERAL vars as well. * See GetVarFromAssignedParam() for the details. Note * that we're using parentPlannerInfo while adding the * LATERAL vars given that we rely on that plannerInfo. * */ static List * GenerateAttributeEquivalencesForRelationRestrictions(RelationRestrictionContext *restrictionContext) { List *attributeEquivalenceList = NIL; ListCell *relationRestrictionCell = NULL; foreach(relationRestrictionCell, restrictionContext->relationRestrictionList) { RelationRestriction *relationRestriction = (RelationRestriction *) lfirst(relationRestrictionCell); List *equivalenceClasses = relationRestriction->plannerInfo->eq_classes; ListCell *equivalenceClassCell = NULL; foreach(equivalenceClassCell, equivalenceClasses) { EquivalenceClass *plannerEqClass = (EquivalenceClass *) lfirst(equivalenceClassCell); AttributeEquivalenceClass *attributeEquivalance = AttributeEquivalenceClassForEquivalenceClass(plannerEqClass, relationRestriction); attributeEquivalenceList = AddAttributeClassToAttributeClassList(attributeEquivalenceList, attributeEquivalance); } } return attributeEquivalenceList; } /* * AttributeEquivalenceClassForEquivalenceClass is a helper function for * GenerateAttributeEquivalencesForRelationRestrictions. The function takes an * EquivalenceClass and the relation restriction that the equivalence class * belongs to. The function returns an AttributeEquivalenceClass that is composed * of ec_members that are simple Var references. * * The function also takes case of LATERAL joins by simply replacing the PARAM_EXEC * with the corresponding expression. */ static AttributeEquivalenceClass * AttributeEquivalenceClassForEquivalenceClass(EquivalenceClass *plannerEqClass, RelationRestriction *relationRestriction) { AttributeEquivalenceClass *attributeEquivalance = palloc0(sizeof(AttributeEquivalenceClass)); ListCell *equivilanceMemberCell = NULL; PlannerInfo *plannerInfo = relationRestriction->plannerInfo; attributeEquivalance->equivalenceId = attributeEquivalenceId++; foreach(equivilanceMemberCell, plannerEqClass->ec_members) { EquivalenceMember *equivalenceMember = (EquivalenceMember *) lfirst(equivilanceMemberCell); Node *equivalenceNode = strip_implicit_coercions( (Node *) equivalenceMember->em_expr); Expr *strippedEquivalenceExpr = (Expr *) equivalenceNode; Var *expressionVar = NULL; if (IsA(strippedEquivalenceExpr, Param)) { List *parentParamList = relationRestriction->parentPlannerParamList; Param *equivalenceParam = (Param *) strippedEquivalenceExpr; expressionVar = GetVarFromAssignedParam(parentParamList, equivalenceParam); if (expressionVar) { AddToAttributeEquivalenceClass(&attributeEquivalance, relationRestriction->parentPlannerInfo, expressionVar); } } else if (IsA(strippedEquivalenceExpr, Var)) { expressionVar = (Var *) strippedEquivalenceExpr; AddToAttributeEquivalenceClass(&attributeEquivalance, plannerInfo, expressionVar); } } return attributeEquivalance; } /* * GetVarFromAssignedParam returns the Var that is assigned to the given * plannerParam if its kind is PARAM_EXEC. * * If the paramkind is not equal to PARAM_EXEC the function returns NULL. Similarly, * if there is no var that the given param is assigned to, the function returns NULL. * * Rationale behind this function: * * While iterating through the equivalence classes of RTE_RELATIONs, we * observe that there are PARAM type of equivalence member expressions for * the RTE_RELATIONs which actually belong to lateral vars from the other query * levels. * * We're also keeping track of the RTE_RELATION's parent_root's * plan_param list which is expected to hold the parameters that are required * for its lower level queries as it is documented: * * plan_params contains the expressions that this query level needs to * make available to a lower query level that is currently being planned. * * This function is a helper function to iterate through the parent query's * plan_params and looks for the param that the equivalence member has. The * comparison is done via the "paramid" field. Finally, if the found parameter's * item is a Var, we conclude that Postgres standard_planner replaced the Var * with the Param on assign_param_for_var() function * @src/backend/optimizer//plan/subselect.c. * */ static Var * GetVarFromAssignedParam(List *parentPlannerParamList, Param *plannerParam) { Var *assignedVar = NULL; ListCell *plannerParameterCell = NULL; Assert(plannerParam != NULL); /* we're only interested in parameters that Postgres added for execution */ if (plannerParam->paramkind != PARAM_EXEC) { return NULL; } foreach(plannerParameterCell, parentPlannerParamList) { PlannerParamItem *plannerParamItem = (PlannerParamItem *) lfirst(plannerParameterCell); if (plannerParamItem->paramId != plannerParam->paramid) { continue; } /* TODO: Should we consider PlaceHolderVar?? */ if (!IsA(plannerParamItem->item, Var)) { continue; } assignedVar = (Var *) plannerParamItem->item; break; } return assignedVar; } /* * GenerateCommonEquivalence gets a list of unrelated AttributeEquiavalanceClass * whose all members are partition keys or a column of reference table. * * With the equivalence classes, the function follows the algorithm * outlined below: * * - Add the first equivalence class to the common equivalence class * - Then, iterate on the remaining equivalence classes * - If any of the members equal to the common equivalence class * add all the members of the equivalence class to the common * class * - Start the iteration from the beginning. The reason is that * in case any of the classes we've passed is equivalent to the * newly added one. To optimize the algorithm, we utilze the * equivalence class ids and skip the ones that are already added. * - Finally, return the common equivalence class. */ static AttributeEquivalenceClass * GenerateCommonEquivalence(List *attributeEquivalenceList) { AttributeEquivalenceClass *commonEquivalenceClass = NULL; AttributeEquivalenceClass *firstEquivalenceClass = NULL; Bitmapset *addedEquivalenceIds = NULL; uint32 equivalenceListSize = list_length(attributeEquivalenceList); uint32 equivalenceClassIndex = 0; commonEquivalenceClass = palloc0(sizeof(AttributeEquivalenceClass)); commonEquivalenceClass->equivalenceId = 0; /* think more on this. */ if (equivalenceListSize < 1) { return commonEquivalenceClass; } /* setup the initial state of the main equivalence class */ firstEquivalenceClass = linitial(attributeEquivalenceList); commonEquivalenceClass->equivalentAttributes = firstEquivalenceClass->equivalentAttributes; addedEquivalenceIds = bms_add_member(addedEquivalenceIds, firstEquivalenceClass->equivalenceId); for (; equivalenceClassIndex < equivalenceListSize; ++equivalenceClassIndex) { AttributeEquivalenceClass *currentEquivalenceClass = list_nth(attributeEquivalenceList, equivalenceClassIndex); ListCell *equivalenceMemberCell = NULL; /* * This is an optimization. If we already added the same equivalence class, * we could skip it since we've already added all the relevant equivalence * members. */ if (bms_is_member(currentEquivalenceClass->equivalenceId, addedEquivalenceIds)) { continue; } foreach(equivalenceMemberCell, currentEquivalenceClass->equivalentAttributes) { AttributeEquivalenceClassMember *attributeEquialanceMember = (AttributeEquivalenceClassMember *) lfirst(equivalenceMemberCell); if (AttributeClassContainsAttributeClassMember(attributeEquialanceMember, commonEquivalenceClass)) { ListConcatUniqueAttributeClassMemberLists(&commonEquivalenceClass, currentEquivalenceClass); addedEquivalenceIds = bms_add_member(addedEquivalenceIds, currentEquivalenceClass-> equivalenceId); /* * It seems inefficient to start from the beginning. * But, we should somehow restart from the beginning to test that * whether the already skipped ones are equal or not. */ equivalenceClassIndex = 0; break; } } } return commonEquivalenceClass; } /* * ListConcatUniqueAttributeClassMemberLists gets two attribute equivalence classes. It * basically concatenates attribute equivalence member lists uniquely and updates the * firstClass' member list with the list. * * Basically, the function iterates over the secondClass' member list and checks whether * it already exists in the firstClass' member list. If not, the member is added to the * firstClass. */ static void ListConcatUniqueAttributeClassMemberLists(AttributeEquivalenceClass **firstClass, AttributeEquivalenceClass *secondClass) { ListCell *equivalenceClassMemberCell = NULL; List *equivalenceMemberList = secondClass->equivalentAttributes; foreach(equivalenceClassMemberCell, equivalenceMemberList) { AttributeEquivalenceClassMember *newEqMember = (AttributeEquivalenceClassMember *) lfirst(equivalenceClassMemberCell); if (AttributeClassContainsAttributeClassMember(newEqMember, *firstClass)) { continue; } (*firstClass)->equivalentAttributes = lappend((*firstClass)->equivalentAttributes, newEqMember); } } /* * GenerateAttributeEquivalencesForJoinRestrictions gets a join restriction * context and returns a list of AttrributeEquivalenceClass. * * The algorithm followed can be summarized as below: * * - Per join restriction * - Per RestrictInfo of the join restriction * - Check whether the join restriction is in the form of (Var1 = Var2) * - Create an AttributeEquivalenceClass * - Add both Var1 and Var2 to the AttributeEquivalenceClass */ static List * GenerateAttributeEquivalencesForJoinRestrictions(JoinRestrictionContext * joinRestrictionContext) { List *attributeEquivalenceList = NIL; ListCell *joinRestrictionCell = NULL; foreach(joinRestrictionCell, joinRestrictionContext->joinRestrictionList) { JoinRestriction *joinRestriction = (JoinRestriction *) lfirst(joinRestrictionCell); ListCell *restrictionInfoList = NULL; foreach(restrictionInfoList, joinRestriction->joinRestrictInfoList) { RestrictInfo *rinfo = (RestrictInfo *) lfirst(restrictionInfoList); OpExpr *restrictionOpExpr = NULL; Node *leftNode = NULL; Node *rightNode = NULL; Expr *strippedLeftExpr = NULL; Expr *strippedRightExpr = NULL; Var *leftVar = NULL; Var *rightVar = NULL; Expr *restrictionClause = rinfo->clause; AttributeEquivalenceClass *attributeEquivalance = NULL; if (!IsA(restrictionClause, OpExpr)) { continue; } restrictionOpExpr = (OpExpr *) restrictionClause; if (list_length(restrictionOpExpr->args) != 2) { continue; } if (!OperatorImplementsEquality(restrictionOpExpr->opno)) { continue; } leftNode = linitial(restrictionOpExpr->args); rightNode = lsecond(restrictionOpExpr->args); /* we also don't want implicit coercions */ strippedLeftExpr = (Expr *) strip_implicit_coercions((Node *) leftNode); strippedRightExpr = (Expr *) strip_implicit_coercions((Node *) rightNode); if (!(IsA(strippedLeftExpr, Var) && IsA(strippedRightExpr, Var))) { continue; } leftVar = (Var *) strippedLeftExpr; rightVar = (Var *) strippedRightExpr; attributeEquivalance = palloc0(sizeof(AttributeEquivalenceClass)); attributeEquivalance->equivalenceId = attributeEquivalenceId++; AddToAttributeEquivalenceClass(&attributeEquivalance, joinRestriction->plannerInfo, leftVar); AddToAttributeEquivalenceClass(&attributeEquivalance, joinRestriction->plannerInfo, rightVar); attributeEquivalenceList = AddAttributeClassToAttributeClassList(attributeEquivalenceList, attributeEquivalance); } } return attributeEquivalenceList; } /* * AddToAttributeEquivalenceClass is a key function for building the attribute * equivalences. The function gets a plannerInfo, var and attribute equivalence * class. It searches for the RTE_RELATION(s) that the input var belongs to and * adds the found Var(s) to the input attribute equivalence class. * * Note that the input var could come from a subquery (i.e., not directly from an * RTE_RELATION). That's the reason we recursively call the function until the * RTE_RELATION found. * * The algorithm could be summarized as follows: * * - If the RTE that corresponds to a relation * - Generate an AttributeEquivalenceMember and add to the input * AttributeEquivalenceClass * - If the RTE that corresponds to a subquery * - If the RTE that corresponds to a UNION ALL subquery * - Iterate on each of the appendRels (i.e., each of the UNION ALL query) * - Recursively add all children of the set operation's * corresponding target entries * - If the corresponding subquery entry is a UNION set operation * - Recursively add all children of the set operation's * corresponding target entries * - If the corresponding subquery is a regular subquery (i.e., No set operations) * - Recursively try to add the corresponding target entry to the * equivalence class */ static void AddToAttributeEquivalenceClass(AttributeEquivalenceClass **attributeEquivalanceClass, PlannerInfo *root, Var *varToBeAdded) { RangeTblEntry *rangeTableEntry = NULL; /* punt if it's a whole-row var rather than a plain column reference */ if (varToBeAdded->varattno == InvalidAttrNumber) { return; } /* we also don't want to process ctid, tableoid etc */ if (varToBeAdded->varattno < InvalidAttrNumber) { return; } rangeTableEntry = root->simple_rte_array[varToBeAdded->varno]; if (rangeTableEntry->rtekind == RTE_RELATION) { AddRteRelationToAttributeEquivalenceClass(attributeEquivalanceClass, rangeTableEntry, varToBeAdded); } else if (rangeTableEntry->rtekind == RTE_SUBQUERY) { AddRteSubqueryToAttributeEquivalenceClass(attributeEquivalanceClass, rangeTableEntry, root, varToBeAdded); } } /* * AddRteSubqueryToAttributeEquivalenceClass adds the given var to the given * attribute equivalence class. * * The main algorithm is outlined in AddToAttributeEquivalenceClass(). */ static void AddRteSubqueryToAttributeEquivalenceClass(AttributeEquivalenceClass **attributeEquivalanceClass, RangeTblEntry *rangeTableEntry, PlannerInfo *root, Var *varToBeAdded) { RelOptInfo *baseRelOptInfo = find_base_rel(root, varToBeAdded->varno); TargetEntry *subqueryTargetEntry = NULL; Query *targetSubquery = GetTargetSubquery(root, rangeTableEntry, varToBeAdded); subqueryTargetEntry = get_tle_by_resno(targetSubquery->targetList, varToBeAdded->varattno); /* if we fail to find corresponding target entry, do not proceed */ if (subqueryTargetEntry == NULL || subqueryTargetEntry->resjunk) { return; } /* we're only interested in Vars */ if (!IsA(subqueryTargetEntry->expr, Var)) { return; } varToBeAdded = (Var *) subqueryTargetEntry->expr; /* * "inh" flag is set either when inheritance or "UNION ALL" exists in the * subquery. Here we're only interested in the "UNION ALL" case. * * Else, we check one more thing: Does the subquery contain a "UNION" query. * If so, we recursively traverse all "UNION" tree and add the corresponding * target list elements to the attribute equivalence. * * Finally, if it is a regular subquery (i.e., does not contain UNION or UNION ALL), * we simply recurse to find the corresponding RTE_RELATION to add to the * equivalence class. * * Note that we're treating "UNION" and "UNION ALL" clauses differently given * that postgres planner process/plans them separately. */ if (rangeTableEntry->inh) { AddUnionAllSetOperationsToAttributeEquivalenceClass(attributeEquivalanceClass, root, varToBeAdded); } else if (targetSubquery->setOperations) { AddUnionSetOperationsToAttributeEquivalenceClass(attributeEquivalanceClass, baseRelOptInfo->subroot, (SetOperationStmt *) targetSubquery->setOperations, varToBeAdded); } else if (varToBeAdded && IsA(varToBeAdded, Var) && varToBeAdded->varlevelsup == 0) { AddToAttributeEquivalenceClass(attributeEquivalanceClass, baseRelOptInfo->subroot, varToBeAdded); } } /* * GetTargetSubquery returns the corresponding subquery for the given planner root, * range table entry and the var. * * The aim of this function is to simplify extracting the subquery in case of "UNION ALL" * queries. */ static Query * GetTargetSubquery(PlannerInfo *root, RangeTblEntry *rangeTableEntry, Var *varToBeAdded) { Query *targetSubquery = NULL; /* * For subqueries other than "UNION ALL", find the corresponding targetSubquery. See * the details of how we process subqueries in the below comments. */ if (!rangeTableEntry->inh) { RelOptInfo *baseRelOptInfo = find_base_rel(root, varToBeAdded->varno); /* If the targetSubquery hasn't been planned yet, we have to punt */ if (baseRelOptInfo->subroot == NULL) { return NULL; } Assert(IsA(baseRelOptInfo->subroot, PlannerInfo)); targetSubquery = baseRelOptInfo->subroot->parse; Assert(IsA(targetSubquery, Query)); } else { targetSubquery = rangeTableEntry->subquery; } return targetSubquery; } /* * AddUnionAllSetOperationsToAttributeEquivalenceClass recursively iterates on all the * append rels, sets the varno's accordingly and adds the * var the given equivalence class. */ static void AddUnionAllSetOperationsToAttributeEquivalenceClass(AttributeEquivalenceClass ** attributeEquivalanceClass, PlannerInfo *root, Var *varToBeAdded) { List *appendRelList = root->append_rel_list; ListCell *appendRelCell = NULL; /* iterate on the queries that are part of UNION ALL subqueries */ foreach(appendRelCell, appendRelList) { AppendRelInfo *appendRelInfo = (AppendRelInfo *) lfirst(appendRelCell); /* * We're only interested in UNION ALL clauses and parent_reloid is invalid * only for UNION ALL (i.e., equals to a legitimate Oid for inheritance) */ if (appendRelInfo->parent_reloid != InvalidOid) { continue; } /* set the varno accordingly for this specific child */ varToBeAdded->varno = appendRelInfo->child_relid; AddToAttributeEquivalenceClass(attributeEquivalanceClass, root, varToBeAdded); } } /* * AddUnionSetOperationsToAttributeEquivalenceClass recursively iterates on all the * setOperations and adds each corresponding target entry to the given equivalence * class. * * Although the function silently accepts INTERSECT and EXPECT set operations, they are * rejected later in the planning. We prefer this behavior to provide better error * messages. */ static void AddUnionSetOperationsToAttributeEquivalenceClass(AttributeEquivalenceClass ** attributeEquivalenceClass, PlannerInfo *root, SetOperationStmt *setOperation, Var *varToBeAdded) { List *rangeTableIndexList = NIL; ListCell *rangeTableIndexCell = NULL; ExtractRangeTableIndexWalker((Node *) setOperation, &rangeTableIndexList); foreach(rangeTableIndexCell, rangeTableIndexList) { int rangeTableIndex = lfirst_int(rangeTableIndexCell); varToBeAdded->varno = rangeTableIndex; AddToAttributeEquivalenceClass(attributeEquivalenceClass, root, varToBeAdded); } } /* * AddRteRelationToAttributeEquivalenceClass adds the given var to the given equivalence * class using the rteIdentity provided by the rangeTableEntry. Note that * rteIdentities are only assigned to RTE_RELATIONs and this function asserts * the input rte to be an RTE_RELATION. */ static void AddRteRelationToAttributeEquivalenceClass(AttributeEquivalenceClass ** attrEquivalenceClass, RangeTblEntry *rangeTableEntry, Var *varToBeAdded) { AttributeEquivalenceClassMember *attributeEqMember = NULL; Oid relationId = rangeTableEntry->relid; Var *relationPartitionKey = DistPartitionKey(relationId); Assert(rangeTableEntry->rtekind == RTE_RELATION); if (PartitionMethod(relationId) != DISTRIBUTE_BY_NONE && relationPartitionKey->varattno != varToBeAdded->varattno) { return; } attributeEqMember = palloc0(sizeof(AttributeEquivalenceClassMember)); attributeEqMember->varattno = varToBeAdded->varattno; attributeEqMember->varno = varToBeAdded->varno; attributeEqMember->rteIdentity = GetRTEIdentity(rangeTableEntry); attributeEqMember->relationId = rangeTableEntry->relid; (*attrEquivalenceClass)->equivalentAttributes = lappend((*attrEquivalenceClass)->equivalentAttributes, attributeEqMember); } /* * AttributeClassContainsAttributeClassMember returns true if it the input class member * is already exists in the attributeEquivalenceClass. An equality is identified by the * varattno and rteIdentity. */ static bool AttributeClassContainsAttributeClassMember(AttributeEquivalenceClassMember *inputMember, AttributeEquivalenceClass * attributeEquivalenceClass) { ListCell *classCell = NULL; foreach(classCell, attributeEquivalenceClass->equivalentAttributes) { AttributeEquivalenceClassMember *memberOfClass = (AttributeEquivalenceClassMember *) lfirst(classCell); if (memberOfClass->rteIdentity == inputMember->rteIdentity && memberOfClass->varattno == inputMember->varattno) { return true; } } return false; } /* * AddAttributeClassToAttributeClassList checks for certain properties of the * input attributeEquivalance before adding it to the attributeEquivalenceList. * * Firstly, the function skips adding NULL attributeEquivalance to the list. * Secondly, since an attribute equivalence class with a single member does * not contribute to our purposes, we skip such classed adding to the list. * Finally, we don't want to add an equivalence class whose exact equivalent * already exists in the list. */ static List * AddAttributeClassToAttributeClassList(List *attributeEquivalenceList, AttributeEquivalenceClass *attributeEquivalance) { List *equivalentAttributes = NULL; ListCell *attributeEquivalanceCell = NULL; if (attributeEquivalance == NULL) { return attributeEquivalenceList; } /* * Note that in some cases we allow having equivalentAttributes with zero or * one elements. For the details, see AddToAttributeEquivalenceClass(). */ equivalentAttributes = attributeEquivalance->equivalentAttributes; if (list_length(equivalentAttributes) < 2) { return attributeEquivalenceList; } /* we don't want to add an attributeEquivalance which already exists */ foreach(attributeEquivalanceCell, attributeEquivalenceList) { AttributeEquivalenceClass *currentAttributeEquivalance = (AttributeEquivalenceClass *) lfirst(attributeEquivalanceCell); if (AttributeEquivalancesAreEqual(currentAttributeEquivalance, attributeEquivalance)) { return attributeEquivalenceList; } } attributeEquivalenceList = lappend(attributeEquivalenceList, attributeEquivalance); return attributeEquivalenceList; } /* * AttributeEquivalancesAreEqual returns true if both input attribute equivalence * classes contains exactly the same members. */ static bool AttributeEquivalancesAreEqual(AttributeEquivalenceClass *firstAttributeEquivalance, AttributeEquivalenceClass *secondAttributeEquivalance) { List *firstEquivalenceMemberList = firstAttributeEquivalance->equivalentAttributes; List *secondEquivalenceMemberList = secondAttributeEquivalance->equivalentAttributes; ListCell *firstAttributeEquivalanceCell = NULL; ListCell *secondAttributeEquivalanceCell = NULL; if (list_length(firstEquivalenceMemberList) != list_length( secondEquivalenceMemberList)) { return false; } foreach(firstAttributeEquivalanceCell, firstEquivalenceMemberList) { AttributeEquivalenceClassMember *firstEqMember = (AttributeEquivalenceClassMember *) lfirst(firstAttributeEquivalanceCell); bool foundAnEquivalentMember = false; foreach(secondAttributeEquivalanceCell, secondEquivalenceMemberList) { AttributeEquivalenceClassMember *secondEqMember = (AttributeEquivalenceClassMember *) lfirst( secondAttributeEquivalanceCell); if (firstEqMember->rteIdentity == secondEqMember->rteIdentity && firstEqMember->varattno == secondEqMember->varattno) { foundAnEquivalentMember = true; break; } } /* we couldn't find an equivalent member */ if (!foundAnEquivalentMember) { return false; } } return true; } /* * ContainsUnionSubquery gets a queryTree and returns true if the query * contains * - a subquery with UNION set operation * - no joins above the UNION set operation in the query tree * * Note that the function allows top level unions being wrapped into aggregations * queries and/or simple projection queries that only selects some fields from * the lower level queries. * * If there exists joins before the set operations, the function returns false. * Similarly, if the query does not contain any union set operations, the * function returns false. */ bool ContainsUnionSubquery(Query *queryTree) { List *rangeTableList = queryTree->rtable; Node *setOperations = queryTree->setOperations; List *joinTreeTableIndexList = NIL; Index subqueryRteIndex = 0; uint32 joiningRangeTableCount = 0; RangeTblEntry *rangeTableEntry = NULL; Query *subqueryTree = NULL; ExtractRangeTableIndexWalker((Node *) queryTree->jointree, &joinTreeTableIndexList); joiningRangeTableCount = list_length(joinTreeTableIndexList); /* don't allow joins on top of unions */ if (joiningRangeTableCount > 1) { return false; } subqueryRteIndex = linitial_int(joinTreeTableIndexList); rangeTableEntry = rt_fetch(subqueryRteIndex, rangeTableList); if (rangeTableEntry->rtekind != RTE_SUBQUERY) { return false; } subqueryTree = rangeTableEntry->subquery; setOperations = subqueryTree->setOperations; if (setOperations != NULL) { SetOperationStmt *setOperationStatement = (SetOperationStmt *) setOperations; /* * Note that the set operation tree is traversed elsewhere for ensuring * that we only support UNIONs. */ if (setOperationStatement->op != SETOP_UNION) { return false; } return true; } return ContainsUnionSubquery(subqueryTree); } /* * RelationRestrictionPartitionKeyIndex gets a relation restriction and finds the * index that the partition key of the relation exists in the query. The query is * found in the planner info of the relation restriction. */ static Index RelationRestrictionPartitionKeyIndex(RelationRestriction *relationRestriction) { PlannerInfo *relationPlannerRoot = NULL; Query *relationPlannerParseQuery = NULL; List *relationTargetList = NIL; ListCell *targetEntryCell = NULL; Index partitionKeyTargetAttrIndex = 0; relationPlannerRoot = relationRestriction->plannerInfo; relationPlannerParseQuery = relationPlannerRoot->parse; relationTargetList = relationPlannerParseQuery->targetList; foreach(targetEntryCell, relationTargetList) { TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell); Expr *targetExpression = targetEntry->expr; partitionKeyTargetAttrIndex++; if (!targetEntry->resjunk && IsPartitionColumn(targetExpression, relationPlannerParseQuery) && IsA(targetExpression, Var)) { Var *targetColumn = (Var *) targetExpression; if (targetColumn->varno == relationRestriction->index) { return partitionKeyTargetAttrIndex; } } } return InvalidAttrNumber; } /* * RelationIdList returns list of unique relation ids in query tree. */ List * RelationIdList(Query *query) { List *rangeTableList = NIL; List *tableEntryList = NIL; List *relationIdList = NIL; ListCell *tableEntryCell = NULL; ExtractRangeTableRelationWalker((Node *) query, &rangeTableList); tableEntryList = TableEntryList(rangeTableList); foreach(tableEntryCell, tableEntryList) { TableEntry *tableEntry = (TableEntry *) lfirst(tableEntryCell); Oid relationId = tableEntry->relationId; relationIdList = list_append_unique_oid(relationIdList, relationId); } return relationIdList; } citus-7.0.3/src/backend/distributed/planner/shard_pruning.c000066400000000000000000001176401317107136600240340ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * shard_pruning.c * Shard pruning related code. * * The goal of shard pruning is to find a minimal (super)set of shards that * need to be queried to find rows matching the expression in a query. * * In PruneShards, we first compute a simplified disjunctive normal form (DNF) * of the expression as a list of pruning instances. Each pruning instance * contains all AND-ed constraints on the partition column. An OR expression * will result in two or more new pruning instances being added for the * subexpressions. The "parent" instance is marked isPartial and ignored * during pruning. * * We use the distributive property for constraints of the form P AND (Q OR R) * to rewrite it to (P AND Q) OR (P AND R) by copying constraints from parent * to "child" pruning instances. However, we do not distribute nested * expressions. While (P OR Q) AND (R OR S) is logically equivalent to (P AND * R) OR (P AND S) OR (Q AND R) OR (Q AND S), in our implementation it becomes * P OR Q OR R OR S. This is acceptable since this will always result in a * superset of shards. If this proves to be a issue in practice, a more * complete algorithm could be implemented. * * We then evaluate each non-partial pruning instance in the disjunction * through the following, increasingly expensive, steps: * * 1) If there is a constant equality constraint on the partition column, and * no overlapping shards exist, find the shard interval in which the * constant falls * * 2) If there is a hash range constraint on the partition column, find the * shard interval matching the range * * 3) If there are range constraints (e.g. (a > 0 AND a < 10)) on the * partition column, find the shard intervals that overlap with the range * * 4) If there are overlapping shards, exhaustively search all shards that are * not excluded by constraints * * Finally, the union of the shards found by each pruning instance is * returned. * * Copyright (c) 2014-2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "distributed/shard_pruning.h" #include "access/nbtree.h" #include "catalog/pg_am.h" #include "catalog/pg_collation.h" #include "catalog/pg_type.h" #include "distributed/metadata_cache.h" #include "distributed/multi_planner.h" #include "distributed/multi_join_order.h" #include "distributed/multi_physical_planner.h" #include "distributed/shardinterval_utils.h" #include "distributed/pg_dist_partition.h" #include "distributed/worker_protocol.h" #include "nodes/nodeFuncs.h" #include "nodes/makefuncs.h" #include "optimizer/clauses.h" #include "utils/arrayaccess.h" #include "utils/catcache.h" #include "utils/lsyscache.h" #include "utils/memutils.h" /* * A pruning instance is a set of ANDed constraints on a partition key. */ typedef struct PruningInstance { /* Does this instance contain any prunable expressions? */ bool hasValidConstraint; /* * This constraint never evaluates to true, i.e. pruning does not have to * be performed. */ bool evaluatesToFalse; /* * Constraints on the partition column value. If multiple values are * found the more restrictive one should be stored here. Even in case of * a hash-partitioned table, actual column-values are stored here, *not* * hashed values. */ Const *lessConsts; Const *lessEqualConsts; Const *equalConsts; Const *greaterEqualConsts; Const *greaterConsts; /* * Constraint using a pre-hashed column value. The constant will store the * hashed value, not the original value of the restriction. */ Const *hashedEqualConsts; /* * Types of constraints not understood. We could theoretically try more * expensive methods of pruning if any such restrictions are found. * * TODO: any actual use for this? Right now there seems little point. */ List *otherRestrictions; /* * Has this PruningInstance been added to * ClauseWalkerContext->pruningInstances? This is not done immediately, * but the first time a constraint (independent of us being able to handle * that constraint) is found. */ bool addedToPruningInstances; /* * When OR clauses are found, the non-ORed part (think of a < 3 AND (a > 5 * OR a > 7)) of the expression is stored in one PruningInstance which is * then copied for the ORed expressions. The original is marked as * isPartial, to avoid it being used for pruning. */ bool isPartial; } PruningInstance; /* * Partial instances that need to be finished building. This is used to * collect all ANDed restrictions, before looking into ORed expressions. */ typedef struct PendingPruningInstance { PruningInstance *instance; Node *continueAt; } PendingPruningInstance; /* * Data necessary to perform a single PruneShards(). */ typedef struct ClauseWalkerContext { Var *partitionColumn; char partitionMethod; /* ORed list of pruning targets */ List *pruningInstances; /* * Partially built PruningInstances, that need to be completed by doing a * separate PrunableExpressionsWalker() pass. */ List *pendingInstances; /* PruningInstance currently being built, all elegible constraints are added here */ PruningInstance *currentPruningInstance; /* * Information about function calls we need to perform. Re-using the same * FunctionCallInfoData, instead of using FunctionCall2Coll, is often * cheaper. */ FunctionCallInfoData compareValueFunctionCall; FunctionCallInfoData compareIntervalFunctionCall; } ClauseWalkerContext; static void PrunableExpressions(Node *originalNode, ClauseWalkerContext *context); static bool PrunableExpressionsWalker(Node *originalNode, ClauseWalkerContext *context); static void AddPartitionKeyRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opClause, Var *varClause, Const *constantClause); static void AddSAOPartitionKeyRestrictionToInstance(ClauseWalkerContext *context, ScalarArrayOpExpr * arrayOperatorExpression); static void AddHashRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opClause, Var *varClause, Const *constantClause); static void AddNewConjuction(ClauseWalkerContext *context, OpExpr *op); static PruningInstance * CopyPartialPruningInstance(PruningInstance *sourceInstance); static List * ShardArrayToList(ShardInterval **shardArray, int length); static List * DeepCopyShardIntervalList(List *originalShardIntervalList); static int PerformValueCompare(FunctionCallInfoData *compareFunctionCall, Datum a, Datum b); static int PerformCompare(FunctionCallInfoData *compareFunctionCall); static List * PruneOne(DistTableCacheEntry *cacheEntry, ClauseWalkerContext *context, PruningInstance *prune); static List * PruneWithBoundaries(DistTableCacheEntry *cacheEntry, ClauseWalkerContext *context, PruningInstance *prune); static List * ExhaustivePrune(DistTableCacheEntry *cacheEntry, ClauseWalkerContext *context, PruningInstance *prune); static bool ExhaustivePruneOne(ShardInterval *curInterval, ClauseWalkerContext *context, PruningInstance *prune); static int UpperShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCache, int shardCount, FunctionCallInfoData *compareFunction, bool includeMin); static int LowerShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCache, int shardCount, FunctionCallInfoData *compareFunction, bool includeMax); /* * PruneShards returns all shards from a distributed table that cannot be * proven to be eliminated by whereClauseList. * * For reference tables, the function simply returns the single shard that the * table has. */ List * PruneShards(Oid relationId, Index rangeTableId, List *whereClauseList) { DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); int shardCount = cacheEntry->shardIntervalArrayLength; char partitionMethod = cacheEntry->partitionMethod; ClauseWalkerContext context = { 0 }; ListCell *pruneCell; List *prunedList = NIL; bool foundRestriction = false; /* there are no shards to return */ if (shardCount == 0) { return NIL; } /* always return empty result if WHERE clause is of the form: false (AND ..) */ if (ContainsFalseClause(whereClauseList)) { return NIL; } /* short circuit for reference tables */ if (partitionMethod == DISTRIBUTE_BY_NONE) { prunedList = ShardArrayToList(cacheEntry->sortedShardIntervalArray, cacheEntry->shardIntervalArrayLength); return DeepCopyShardIntervalList(prunedList); } context.partitionMethod = partitionMethod; context.partitionColumn = PartitionColumn(relationId, rangeTableId); context.currentPruningInstance = palloc0(sizeof(PruningInstance)); if (cacheEntry->shardIntervalCompareFunction) { /* initiate function call info once (allows comparators to cache metadata) */ InitFunctionCallInfoData(context.compareIntervalFunctionCall, cacheEntry->shardIntervalCompareFunction, 2, DEFAULT_COLLATION_OID, NULL, NULL); } else { ereport(ERROR, (errmsg("shard pruning not possible without " "a shard interval comparator"))); } if (cacheEntry->shardColumnCompareFunction) { /* initiate function call info once (allows comparators to cache metadata) */ InitFunctionCallInfoData(context.compareValueFunctionCall, cacheEntry->shardColumnCompareFunction, 2, DEFAULT_COLLATION_OID, NULL, NULL); } else { ereport(ERROR, (errmsg("shard pruning not possible without " "a partition column comparator"))); } /* Figure out what we can prune on */ PrunableExpressions((Node *) whereClauseList, &context); /* * Prune using each of the PrunableInstances we found, and OR results * together. */ foreach(pruneCell, context.pruningInstances) { PruningInstance *prune = (PruningInstance *) lfirst(pruneCell); List *pruneOneList; /* * If this is a partial instance, a fully built one has also been * added. Skip. */ if (prune->isPartial) { continue; } /* * If the current instance has no prunable expressions, we'll have to * return all shards. No point in continuing pruning in that case. */ if (!prune->hasValidConstraint) { foundRestriction = false; break; } /* * Similar to the above, if hash-partitioned and there's nothing to * prune by, we're done. */ if (context.partitionMethod == DISTRIBUTE_BY_HASH && !prune->evaluatesToFalse && !prune->equalConsts && !prune->hashedEqualConsts) { foundRestriction = false; break; } pruneOneList = PruneOne(cacheEntry, &context, prune); if (prunedList) { /* * We can use list_union_ptr, which is a lot faster than doing * comparing shards by value, because all the ShardIntervals are * guaranteed to be from * DistTableCacheEntry->sortedShardIntervalArray (thus having the * same pointer values). */ prunedList = list_union_ptr(prunedList, pruneOneList); } else { prunedList = pruneOneList; } foundRestriction = true; } /* found no valid restriction, build list of all shards */ if (!foundRestriction) { prunedList = ShardArrayToList(cacheEntry->sortedShardIntervalArray, cacheEntry->shardIntervalArrayLength); } /* * Deep copy list, so it's independent of the DistTableCacheEntry * contents. */ return DeepCopyShardIntervalList(prunedList); } /* * ContainsFalseClause returns whether the flattened where clause list * contains false as a clause. */ bool ContainsFalseClause(List *whereClauseList) { bool containsFalseClause = false; ListCell *clauseCell = NULL; foreach(clauseCell, whereClauseList) { Node *clause = (Node *) lfirst(clauseCell); if (IsA(clause, Const)) { Const *constant = (Const *) clause; if (constant->consttype == BOOLOID && !DatumGetBool(constant->constvalue)) { containsFalseClause = true; break; } } } return containsFalseClause; } /* * PrunableExpressions builds a list of all prunable expressions in node, * storing them in context->pruningInstances. */ static void PrunableExpressions(Node *node, ClauseWalkerContext *context) { /* * Build initial list of prunable expressions. As long as only, * implicitly or explicitly, ANDed expressions are found, this perform a * depth-first search. When an ORed expression is found, the current * PruningInstance is added to context->pruningInstances (once for each * ORed expression), then the tree-traversal is continued without * recursing. Once at the top-level again, we'll process all pending * expressions - that allows us to find all ANDed expressions, before * recursing into an ORed expression. */ PrunableExpressionsWalker(node, context); /* * Process all pending instances. While processing, new ones might be * added to the list, so don't use foreach(). * * Check the places in PruningInstanceWalker that push onto * context->pendingInstances why construction of the PruningInstance might * be pending. * * We copy the partial PruningInstance, and continue adding information by * calling PrunableExpressionsWalker() on the copy, continuing at the the * node stored in PendingPruningInstance->continueAt. */ while (context->pendingInstances != NIL) { PendingPruningInstance *instance = (PendingPruningInstance *) linitial(context->pendingInstances); PruningInstance *newPrune = CopyPartialPruningInstance(instance->instance); context->pendingInstances = list_delete_first(context->pendingInstances); context->currentPruningInstance = newPrune; PrunableExpressionsWalker(instance->continueAt, context); context->currentPruningInstance = NULL; } } /* * PrunableExpressionsWalker() is the main work horse for * PrunableExpressions(). */ static bool PrunableExpressionsWalker(Node *node, ClauseWalkerContext *context) { if (node == NULL) { return false; } /* * Check for expressions understood by this routine. */ if (IsA(node, List)) { /* at the top of quals we'll frequently see lists, those are to be treated as ANDs */ } else if (IsA(node, BoolExpr)) { BoolExpr *boolExpr = (BoolExpr *) node; if (boolExpr->boolop == NOT_EXPR) { return false; } else if (boolExpr->boolop == AND_EXPR) { return expression_tree_walker((Node *) boolExpr->args, PrunableExpressionsWalker, context); } else if (boolExpr->boolop == OR_EXPR) { ListCell *opCell = NULL; /* * "Queue" partial pruning instances. This is used to convert * expressions like (A AND (B OR C) AND D) into (A AND B AND D), * (A AND C AND D), with A, B, C, D being restrictions. When the * OR is encountered, a reference to the partially built * PruningInstance (containing A at this point), is added to * context->pendingInstances once for B and once for C. Once a * full tree-walk completed, PrunableExpressions() will complete * the pending instances, which'll now also know about restriction * D, by calling PrunableExpressionsWalker() once for B and once * for C. */ foreach(opCell, boolExpr->args) { AddNewConjuction(context, lfirst(opCell)); } return false; } } else if (IsA(node, OpExpr)) { OpExpr *opClause = (OpExpr *) node; PruningInstance *prune = context->currentPruningInstance; Node *leftOperand = NULL; Node *rightOperand = NULL; Const *constantClause = NULL; Var *varClause = NULL; if (!prune->addedToPruningInstances) { context->pruningInstances = lappend(context->pruningInstances, prune); prune->addedToPruningInstances = true; } if (list_length(opClause->args) == 2) { leftOperand = get_leftop((Expr *) opClause); rightOperand = get_rightop((Expr *) opClause); leftOperand = strip_implicit_coercions(leftOperand); rightOperand = strip_implicit_coercions(rightOperand); if (IsA(rightOperand, Const) && IsA(leftOperand, Var)) { constantClause = (Const *) rightOperand; varClause = (Var *) leftOperand; } else if (IsA(leftOperand, Const) && IsA(rightOperand, Var)) { constantClause = (Const *) leftOperand; varClause = (Var *) rightOperand; } } if (constantClause && varClause && equal(varClause, context->partitionColumn)) { /* * Found a restriction on the partition column itself. Update the * current constraint with the new information. */ AddPartitionKeyRestrictionToInstance(context, opClause, varClause, constantClause); } else if (constantClause && varClause && varClause->varattno == RESERVED_HASHED_COLUMN_ID) { /* * Found restriction that directly specifies the boundaries of a * hashed column. */ AddHashRestrictionToInstance(context, opClause, varClause, constantClause); } return false; } else if (IsA(node, ScalarArrayOpExpr)) { ScalarArrayOpExpr *arrayOperatorExpression = (ScalarArrayOpExpr *) node; AddSAOPartitionKeyRestrictionToInstance(context, arrayOperatorExpression); return false; } else { PruningInstance *prune = context->currentPruningInstance; /* * Mark expression as added, so we'll fail pruning if there's no ANDed * restrictions that we know how to deal with. */ if (!prune->addedToPruningInstances) { context->pruningInstances = lappend(context->pruningInstances, prune); prune->addedToPruningInstances = true; } return false; } return expression_tree_walker(node, PrunableExpressionsWalker, context); } /* * AddSAOPartitionKeyRestrictionToInstance adds partcol = arrayelem operator * restriction to the current pruning instance for each element of the array. These * restrictions are added to pruning instance to prune shards based on IN/=ANY * constraints. */ static void AddSAOPartitionKeyRestrictionToInstance(ClauseWalkerContext *context, ScalarArrayOpExpr *arrayOperatorExpression) { PruningInstance *prune = context->currentPruningInstance; Node *leftOpExpression = linitial(arrayOperatorExpression->args); Node *strippedLeftOpExpression = strip_implicit_coercions(leftOpExpression); bool usingEqualityOperator = OperatorImplementsEquality( arrayOperatorExpression->opno); Expr *arrayArgument = (Expr *) lsecond(arrayOperatorExpression->args); /* checking for partcol = ANY(const, value, s); or partcol IN (const,b,c); */ if (usingEqualityOperator && strippedLeftOpExpression != NULL && equal(strippedLeftOpExpression, context->partitionColumn) && IsA(arrayArgument, Const)) { ArrayType *array = NULL; int16 typlen = 0; bool typbyval = false; char typalign = '\0'; Oid elementType = 0; ArrayIterator arrayIterator = NULL; Datum arrayElement = 0; Datum inArray = ((Const *) arrayArgument)->constvalue; bool isNull = false; /* check for the NULL right-hand expression*/ if (inArray == 0) { return; } array = DatumGetArrayTypeP(((Const *) arrayArgument)->constvalue); /* get the necessary information from array type to iterate over it */ elementType = ARR_ELEMTYPE(array); get_typlenbyvalalign(elementType, &typlen, &typbyval, &typalign); /* Iterate over the righthand array of expression */ arrayIterator = array_create_iterator(array, 0, NULL); while (array_iterate(arrayIterator, &arrayElement, &isNull)) { OpExpr *arrayEqualityOp = NULL; Const *constElement = makeConst(elementType, -1, DEFAULT_COLLATION_OID, typlen, arrayElement, isNull, typbyval); /* build partcol = arrayelem operator */ arrayEqualityOp = makeNode(OpExpr); arrayEqualityOp->opno = arrayOperatorExpression->opno; arrayEqualityOp->opfuncid = arrayOperatorExpression->opfuncid; arrayEqualityOp->inputcollid = arrayOperatorExpression->inputcollid; arrayEqualityOp->opresulttype = get_func_rettype( arrayOperatorExpression->opfuncid); arrayEqualityOp->opcollid = DEFAULT_COLLATION_OID; arrayEqualityOp->location = -1; arrayEqualityOp->args = list_make2(strippedLeftOpExpression, constElement); AddNewConjuction(context, arrayEqualityOp); } } /* Since we could not deal with the constraint, add the pruning instance to * pruning instance list and labeled it as added. */ else if (!prune->addedToPruningInstances) { context->pruningInstances = lappend(context->pruningInstances, prune); prune->addedToPruningInstances = true; } } /* * AddNewConjuction adds the OpExpr to pending instance list of context * as conjunction as partial instance. */ static void AddNewConjuction(ClauseWalkerContext *context, OpExpr *op) { PendingPruningInstance *instance = palloc0(sizeof(PendingPruningInstance)); instance->instance = context->currentPruningInstance; instance->continueAt = (Node *) op; /* * Signal that this instance is not to be used for pruning on * its own. Once the pending instance is processed, it'll be * used. */ instance->instance->isPartial = true; context->pendingInstances = lappend(context->pendingInstances, instance); } /* * AddPartitionKeyRestrictionToInstance adds information about a PartitionKey * $op Const restriction to the current pruning instance. */ static void AddPartitionKeyRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opClause, Var *varClause, Const *constantClause) { PruningInstance *prune = context->currentPruningInstance; List *btreeInterpretationList = NULL; ListCell *btreeInterpretationCell = NULL; bool matchedOp = false; btreeInterpretationList = get_op_btree_interpretation(opClause->opno); foreach(btreeInterpretationCell, btreeInterpretationList) { OpBtreeInterpretation *btreeInterpretation = (OpBtreeInterpretation *) lfirst(btreeInterpretationCell); switch (btreeInterpretation->strategy) { case BTLessStrategyNumber: { if (!prune->lessConsts || PerformValueCompare(&context->compareValueFunctionCall, constantClause->constvalue, prune->lessConsts->constvalue) < 0) { prune->lessConsts = constantClause; } matchedOp = true; break; } case BTLessEqualStrategyNumber: { if (!prune->lessEqualConsts || PerformValueCompare(&context->compareValueFunctionCall, constantClause->constvalue, prune->lessEqualConsts->constvalue) < 0) { prune->lessEqualConsts = constantClause; } matchedOp = true; break; } case BTEqualStrategyNumber: { if (!prune->equalConsts) { prune->equalConsts = constantClause; } else if (PerformValueCompare(&context->compareValueFunctionCall, constantClause->constvalue, prune->equalConsts->constvalue) != 0) { /* key can't be equal to two values */ prune->evaluatesToFalse = true; } matchedOp = true; break; } case BTGreaterEqualStrategyNumber: { if (!prune->greaterEqualConsts || PerformValueCompare(&context->compareValueFunctionCall, constantClause->constvalue, prune->greaterEqualConsts->constvalue) > 0 ) { prune->greaterEqualConsts = constantClause; } matchedOp = true; break; } case BTGreaterStrategyNumber: { if (!prune->greaterConsts || PerformValueCompare(&context->compareValueFunctionCall, constantClause->constvalue, prune->greaterConsts->constvalue) > 0) { prune->greaterConsts = constantClause; } matchedOp = true; break; } case ROWCOMPARE_NE: { /* TODO: could add support for this, if we feel like it */ matchedOp = false; break; } default: Assert(false); } } if (!matchedOp) { prune->otherRestrictions = lappend(prune->otherRestrictions, opClause); } else { prune->hasValidConstraint = true; } } /* * AddHashRestrictionToInstance adds information about a * RESERVED_HASHED_COLUMN_ID = Const restriction to the current pruning * instance. */ static void AddHashRestrictionToInstance(ClauseWalkerContext *context, OpExpr *opClause, Var *varClause, Const *constantClause) { PruningInstance *prune = context->currentPruningInstance; List *btreeInterpretationList = NULL; ListCell *btreeInterpretationCell = NULL; btreeInterpretationList = get_op_btree_interpretation(opClause->opno); foreach(btreeInterpretationCell, btreeInterpretationList) { OpBtreeInterpretation *btreeInterpretation = (OpBtreeInterpretation *) lfirst(btreeInterpretationCell); /* * Ladidadida, dirty hackety hack. We only add such * constraints (in ShardIntervalOpExpressions()) to select a * shard based on its exact boundaries. For efficient binary * search it's better to simply use one representative value * to look up the shard. In practice, this is sufficient for * now. */ if (btreeInterpretation->strategy == BTGreaterEqualStrategyNumber) { Assert(!prune->hashedEqualConsts); prune->hashedEqualConsts = constantClause; prune->hasValidConstraint = true; } } } /* * CopyPartialPruningInstance copies a partial PruningInstance, so it can be * completed. */ static PruningInstance * CopyPartialPruningInstance(PruningInstance *sourceInstance) { PruningInstance *newInstance = palloc(sizeof(PruningInstance)); Assert(sourceInstance->isPartial); /* * To make the new PruningInstance useful for pruning, we have to reset it * being partial - if necessary it'll be marked so again by * PrunableExpressionsWalker(). */ memcpy(newInstance, sourceInstance, sizeof(PruningInstance)); newInstance->addedToPruningInstances = false; newInstance->isPartial = false; return newInstance; } /* * ShardArrayToList builds a list of out the array of ShardInterval*. */ static List * ShardArrayToList(ShardInterval **shardArray, int length) { List *shardIntervalList = NIL; int shardIndex; for (shardIndex = 0; shardIndex < length; shardIndex++) { ShardInterval *shardInterval = shardArray[shardIndex]; shardIntervalList = lappend(shardIntervalList, shardInterval); } return shardIntervalList; } /* * DeepCopyShardIntervalList copies originalShardIntervalList and the * contained ShardIntervals, into a new list. */ static List * DeepCopyShardIntervalList(List *originalShardIntervalList) { List *copiedShardIntervalList = NIL; ListCell *shardIntervalCell = NULL; foreach(shardIntervalCell, originalShardIntervalList) { ShardInterval *originalShardInterval = (ShardInterval *) lfirst(shardIntervalCell); ShardInterval *copiedShardInterval = (ShardInterval *) palloc0(sizeof(ShardInterval)); CopyShardInterval(originalShardInterval, copiedShardInterval); copiedShardIntervalList = lappend(copiedShardIntervalList, copiedShardInterval); } return copiedShardIntervalList; } /* * PruneOne returns all shards in the table that match a single * PruningInstance. */ static List * PruneOne(DistTableCacheEntry *cacheEntry, ClauseWalkerContext *context, PruningInstance *prune) { ShardInterval *shardInterval = NULL; /* Well, if life always were this easy... */ if (prune->evaluatesToFalse) { return NIL; } /* * For an equal constraints, if there's no overlapping shards (always the * case for hash and range partitioning, sometimes for append), can * perform binary search for the right interval. That's usually the * fastest, so try that first. */ if (prune->equalConsts && !cacheEntry->hasOverlappingShardInterval) { shardInterval = FindShardInterval(prune->equalConsts->constvalue, cacheEntry); /* * If pruned down to nothing, we're done. Otherwise see if other * methods prune down further / to nothing. */ if (!shardInterval) { return NIL; } } /* * If the hash value we're looking for is known, we can search for the * interval directly. That's fast and should only ever be the case for a * hash-partitioned table. */ if (prune->hashedEqualConsts) { int shardIndex = INVALID_SHARD_INDEX; ShardInterval **sortedShardIntervalArray = cacheEntry->sortedShardIntervalArray; Assert(context->partitionMethod == DISTRIBUTE_BY_HASH); shardIndex = FindShardIntervalIndex(prune->hashedEqualConsts->constvalue, cacheEntry); if (shardIndex == INVALID_SHARD_INDEX) { return NIL; } else if (shardInterval && sortedShardIntervalArray[shardIndex]->shardId != shardInterval->shardId) { /* * equalConst based pruning above yielded a different shard than * pruning based on pre-hashed equality. This is useful in case * of INSERT ... SELECT, where both can occur together (one via * join/colocation, the other via a plain equality restriction). */ return NIL; } else { return list_make1(sortedShardIntervalArray[shardIndex]); } } /* * If previous pruning method yielded a single shard, and the table is not * hash partitioned, attempt range based pruning to exclude it further. * * That's particularly important in particular for subquery pushdown, * where it's very common to have a user specified equality restriction, * and a range based restriction for shard boundaries, added by the * subquery machinery. */ if (shardInterval) { if (context->partitionMethod != DISTRIBUTE_BY_HASH && ExhaustivePruneOne(shardInterval, context, prune)) { return NIL; } else { /* no chance to prune further, return */ return list_make1(shardInterval); } } /* * Should never get here for hashing, we've filtered down to either zero * or one shard, and returned. */ Assert(context->partitionMethod != DISTRIBUTE_BY_HASH); /* * Next method: binary search with fuzzy boundaries. Can't trivially do so * if shards have overlapping boundaries. * * TODO: If we kept shard intervals separately sorted by both upper and * lower boundaries, this should be possible? */ if (!cacheEntry->hasOverlappingShardInterval && ( prune->greaterConsts || prune->greaterEqualConsts || prune->lessConsts || prune->lessEqualConsts)) { return PruneWithBoundaries(cacheEntry, context, prune); } /* * Brute force: Check each shard. */ return ExhaustivePrune(cacheEntry, context, prune); } /* * PerformCompare invokes comparator with prepared values, check for * unexpected NULL returns. */ static int PerformCompare(FunctionCallInfoData *compareFunctionCall) { Datum result = FunctionCallInvoke(compareFunctionCall); if (compareFunctionCall->isnull) { elog(ERROR, "function %u returned NULL", compareFunctionCall->flinfo->fn_oid); } return DatumGetInt32(result); } /* * PerformValueCompare invokes comparator with a/b, and checks for unexpected * NULL returns. */ static int PerformValueCompare(FunctionCallInfoData *compareFunctionCall, Datum a, Datum b) { compareFunctionCall->arg[0] = a; compareFunctionCall->argnull[0] = false; compareFunctionCall->arg[1] = b; compareFunctionCall->argnull[1] = false; return PerformCompare(compareFunctionCall); } /* * LowerShardBoundary returns the index of the first ShardInterval that's >= * (if includeMax) or > partitionColumnValue. */ static int LowerShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCache, int shardCount, FunctionCallInfoData *compareFunction, bool includeMax) { int lowerBoundIndex = 0; int upperBoundIndex = shardCount; Assert(shardCount != 0); /* setup partitionColumnValue argument once */ compareFunction->arg[0] = partitionColumnValue; compareFunction->argnull[0] = false; while (lowerBoundIndex < upperBoundIndex) { int middleIndex = lowerBoundIndex + ((upperBoundIndex - lowerBoundIndex) / 2); int maxValueComparison = 0; int minValueComparison = 0; /* setup minValue as argument */ compareFunction->arg[1] = shardIntervalCache[middleIndex]->minValue; compareFunction->argnull[1] = false; /* execute cmp(partitionValue, lowerBound) */ minValueComparison = PerformCompare(compareFunction); /* and evaluate results */ if (minValueComparison < 0) { /* value smaller than entire range */ upperBoundIndex = middleIndex; continue; } /* setup maxValue as argument */ compareFunction->arg[1] = shardIntervalCache[middleIndex]->maxValue; compareFunction->argnull[1] = false; /* execute cmp(partitionValue, upperBound) */ maxValueComparison = PerformCompare(compareFunction); if ((maxValueComparison == 0 && !includeMax) || maxValueComparison > 0) { /* value bigger than entire range */ lowerBoundIndex = middleIndex + 1; continue; } /* found interval containing partitionValue */ return middleIndex; } Assert(lowerBoundIndex == upperBoundIndex); /* * If we get here, none of the ShardIntervals exactly contain the value * (we'd have hit the return middleIndex; case otherwise). Figure out * whether there's possibly any interval containing a value that's bigger * than the partition key one. */ if (lowerBoundIndex == 0) { /* all intervals are bigger, thus return 0 */ return 0; } else if (lowerBoundIndex == shardCount) { /* partition value is bigger than all partition values */ return INVALID_SHARD_INDEX; } /* value falls inbetween intervals */ return lowerBoundIndex + 1; } /* * UpperShardBoundary returns the index of the last ShardInterval that's <= * (if includeMin) or < partitionColumnValue. */ static int UpperShardBoundary(Datum partitionColumnValue, ShardInterval **shardIntervalCache, int shardCount, FunctionCallInfoData *compareFunction, bool includeMin) { int lowerBoundIndex = 0; int upperBoundIndex = shardCount; Assert(shardCount != 0); /* setup partitionColumnValue argument once */ compareFunction->arg[0] = partitionColumnValue; compareFunction->argnull[0] = false; while (lowerBoundIndex < upperBoundIndex) { int middleIndex = lowerBoundIndex + ((upperBoundIndex - lowerBoundIndex) / 2); int maxValueComparison = 0; int minValueComparison = 0; /* setup minValue as argument */ compareFunction->arg[1] = shardIntervalCache[middleIndex]->minValue; compareFunction->argnull[1] = false; /* execute cmp(partitionValue, lowerBound) */ minValueComparison = PerformCompare(compareFunction); /* and evaluate results */ if ((minValueComparison == 0 && !includeMin) || minValueComparison < 0) { /* value smaller than entire range */ upperBoundIndex = middleIndex; continue; } /* setup maxValue as argument */ compareFunction->arg[1] = shardIntervalCache[middleIndex]->maxValue; compareFunction->argnull[1] = false; /* execute cmp(partitionValue, upperBound) */ maxValueComparison = PerformCompare(compareFunction); if (maxValueComparison > 0) { /* value bigger than entire range */ lowerBoundIndex = middleIndex + 1; continue; } /* found interval containing partitionValue */ return middleIndex; } Assert(lowerBoundIndex == upperBoundIndex); /* * If we get here, none of the ShardIntervals exactly contain the value * (we'd have hit the return middleIndex; case otherwise). Figure out * whether there's possibly any interval containing a value that's smaller * than the partition key one. */ if (upperBoundIndex == shardCount) { /* all intervals are smaller, thus return 0 */ return shardCount - 1; } else if (upperBoundIndex == 0) { /* partition value is smaller than all partition values */ return INVALID_SHARD_INDEX; } /* value falls inbetween intervals, return the inverval one smaller as bound */ return upperBoundIndex - 1; } /* * PruneWithBoundaries searches for shards that match inequality constraints, * using binary search on both the upper and lower boundary, and returns a * list of surviving shards. */ static List * PruneWithBoundaries(DistTableCacheEntry *cacheEntry, ClauseWalkerContext *context, PruningInstance *prune) { List *remainingShardList = NIL; int shardCount = cacheEntry->shardIntervalArrayLength; ShardInterval **sortedShardIntervalArray = cacheEntry->sortedShardIntervalArray; bool hasLowerBound = false; bool hasUpperBound = false; Datum lowerBound = 0; Datum upperBound = 0; bool lowerBoundInclusive = false; bool upperBoundInclusive = false; int lowerBoundIdx = -1; int upperBoundIdx = -1; int curIdx = 0; FunctionCallInfo compareFunctionCall = &context->compareIntervalFunctionCall; if (prune->greaterEqualConsts) { lowerBound = prune->greaterEqualConsts->constvalue; lowerBoundInclusive = true; hasLowerBound = true; } if (prune->greaterConsts) { /* * Use the more restrictive one, if both greater and greaterEqual * constraints are specified. */ if (!hasLowerBound || PerformValueCompare(compareFunctionCall, prune->greaterConsts->constvalue, lowerBound) >= 0) { lowerBound = prune->greaterConsts->constvalue; lowerBoundInclusive = false; hasLowerBound = true; } } if (prune->lessEqualConsts) { upperBound = prune->lessEqualConsts->constvalue; upperBoundInclusive = true; hasUpperBound = true; } if (prune->lessConsts) { /* * Use the more restrictive one, if both less and lessEqual * constraints are specified. */ if (!hasUpperBound || PerformValueCompare(compareFunctionCall, prune->lessConsts->constvalue, upperBound) <= 0) { upperBound = prune->lessConsts->constvalue; upperBoundInclusive = false; hasUpperBound = true; } } Assert(hasLowerBound || hasUpperBound); /* find lower bound */ if (hasLowerBound) { lowerBoundIdx = LowerShardBoundary(lowerBound, sortedShardIntervalArray, shardCount, compareFunctionCall, lowerBoundInclusive); } else { lowerBoundIdx = 0; } /* find upper bound */ if (hasUpperBound) { upperBoundIdx = UpperShardBoundary(upperBound, sortedShardIntervalArray, shardCount, compareFunctionCall, upperBoundInclusive); } else { upperBoundIdx = shardCount - 1; } if (lowerBoundIdx == INVALID_SHARD_INDEX) { return NIL; } else if (upperBoundIdx == INVALID_SHARD_INDEX) { return NIL; } /* * Build list of all shards that are in the range of shards (possibly 0). */ for (curIdx = lowerBoundIdx; curIdx <= upperBoundIdx; curIdx++) { remainingShardList = lappend(remainingShardList, sortedShardIntervalArray[curIdx]); } return remainingShardList; } /* * ExhaustivePrune returns a list of shards matching PruningInstances * constraints, by simply checking them for each individual shard. */ static List * ExhaustivePrune(DistTableCacheEntry *cacheEntry, ClauseWalkerContext *context, PruningInstance *prune) { List *remainingShardList = NIL; int shardCount = cacheEntry->shardIntervalArrayLength; ShardInterval **sortedShardIntervalArray = cacheEntry->sortedShardIntervalArray; int curIdx = 0; for (curIdx = 0; curIdx < shardCount; curIdx++) { ShardInterval *curInterval = sortedShardIntervalArray[curIdx]; if (!ExhaustivePruneOne(curInterval, context, prune)) { remainingShardList = lappend(remainingShardList, curInterval); } } return remainingShardList; } /* * ExhaustivePruneOne returns true if curInterval is pruned away, false * otherwise. */ static bool ExhaustivePruneOne(ShardInterval *curInterval, ClauseWalkerContext *context, PruningInstance *prune) { FunctionCallInfo compareFunctionCall = &context->compareIntervalFunctionCall; Datum compareWith = 0; /* NULL boundaries can't be compared to */ if (!curInterval->minValueExists || !curInterval->maxValueExists) { return false; } if (prune->equalConsts) { compareWith = prune->equalConsts->constvalue; if (PerformValueCompare(compareFunctionCall, compareWith, curInterval->minValue) < 0) { return true; } if (PerformValueCompare(compareFunctionCall, compareWith, curInterval->maxValue) > 0) { return true; } } if (prune->greaterEqualConsts) { compareWith = prune->greaterEqualConsts->constvalue; if (PerformValueCompare(compareFunctionCall, curInterval->maxValue, compareWith) < 0) { return true; } } if (prune->greaterConsts) { compareWith = prune->greaterConsts->constvalue; if (PerformValueCompare(compareFunctionCall, curInterval->maxValue, compareWith) <= 0) { return true; } } if (prune->lessEqualConsts) { compareWith = prune->lessEqualConsts->constvalue; if (PerformValueCompare(compareFunctionCall, curInterval->minValue, compareWith) > 0) { return true; } } if (prune->lessConsts) { compareWith = prune->lessConsts->constvalue; if (PerformValueCompare(compareFunctionCall, curInterval->minValue, compareWith) >= 0) { return true; } } return false; } citus-7.0.3/src/backend/distributed/progress/000077500000000000000000000000001317107136600212215ustar00rootroot00000000000000citus-7.0.3/src/backend/distributed/progress/multi_progress.c000066400000000000000000000200271317107136600244440ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_progress.c * Routines for tracking long-running jobs and seeing their progress. * * Copyright (c) 2017, Citus Data, Inc. *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "pgstat.h" #include "distributed/multi_logical_optimizer.h" #include "distributed/multi_progress.h" #include "nodes/execnodes.h" #include "storage/dsm.h" #include "utils/builtins.h" /* dynamic shared memory handle of the current progress */ static uint64 currentProgressDSMHandle = DSM_HANDLE_INVALID; static ProgressMonitorData * MonitorDataFromDSMHandle(dsm_handle dsmHandle, dsm_segment **attachedSegment); static ReturnSetInfo * FunctionCallGetTupleStore1(PGFunction function, Oid functionId, Datum argument); /* * CreateProgressMonitor is used to create a place to store progress information related * to long running processes. The function creates a dynamic shared memory segment * consisting of a header regarding to the process and an array of "steps" that the long * running "operations" consists of. The handle of the dynamic shared memory is stored in * pg_stat_get_progress_info output, to be parsed by a progress retrieval command * later on. This behavior may cause unrelated (but hopefully harmless) rows in * pg_stat_progress_vacuum output. The caller of this function should provide a magic * number, a unique 64 bit unsigned integer, to distinguish different types of commands. */ ProgressMonitorData * CreateProgressMonitor(uint64 progressTypeMagicNumber, int stepCount, Size stepSize, Oid relationId) { dsm_segment *dsmSegment = NULL; dsm_handle dsmHandle = 0; ProgressMonitorData *monitor = NULL; Size monitorSize = 0; if (stepSize <= 0 || stepCount <= 0) { ereport(ERROR, (errmsg("number of steps and size of each step should be " "positive values"))); } monitorSize = sizeof(ProgressMonitorData) + stepSize * stepCount; dsmSegment = dsm_create(monitorSize, DSM_CREATE_NULL_IF_MAXSEGMENTS); if (dsmSegment == NULL) { ereport(WARNING, (errmsg("could not create a dynamic shared memory segment to " "keep track of progress of the current command"))); return NULL; } dsmHandle = dsm_segment_handle(dsmSegment); monitor = MonitorDataFromDSMHandle(dsmHandle, &dsmSegment); monitor->stepCount = stepCount; monitor->processId = MyProcPid; pgstat_progress_start_command(PROGRESS_COMMAND_VACUUM, relationId); pgstat_progress_update_param(1, dsmHandle); pgstat_progress_update_param(0, progressTypeMagicNumber); currentProgressDSMHandle = dsmHandle; return monitor; } /* * GetCurrentProgressMonitor function returns the header and steps array related to the * current progress. A progress monitor should be created by calling * CreateProgressMonitor, before calling this function. */ ProgressMonitorData * GetCurrentProgressMonitor(void) { dsm_segment *dsmSegment = NULL; ProgressMonitorData *monitor = MonitorDataFromDSMHandle(currentProgressDSMHandle, &dsmSegment); return monitor; } /* * FinalizeCurrentProgressMonitor releases the dynamic memory segment of the current * progress monitoring data structure and removes the process from * pg_stat_get_progress_info() output. */ void FinalizeCurrentProgressMonitor(void) { dsm_segment *dsmSegment = dsm_find_mapping(currentProgressDSMHandle); if (dsmSegment != NULL) { dsm_detach(dsmSegment); } pgstat_progress_end_command(); currentProgressDSMHandle = DSM_HANDLE_INVALID; } /* * ProgressMonitorList returns the addresses of monitors of ongoing commands, associated * with the given identifier magic number. The function takes a pass in * pg_stat_get_progress_info output, filters the rows according to the given magic number, * and returns the list of addresses of dynamic shared memory segments. Notice that the * caller detach from the attached segments with a call to DetachFromDSMSegments function. */ List * ProgressMonitorList(uint64 commandTypeMagicNumber, List **attachedDSMSegments) { /* * The expected magic number should reside in the first progress field and the * actual segment handle in the second but the slot ordering is 1-indexed in the * tuple table slot and there are 3 other fields before the progress fields in the * pg_stat_get_progress_info output. */ const int magicNumberIndex = 0 + 1 + 3; const int dsmHandleIndex = 1 + 1 + 3; /* * Currently, Postgres' progress logging mechanism supports only the VACUUM, * operations. Therefore, we identify ourselves as a VACUUM command but only fill * a couple of the available fields. Therefore the commands that use Citus' progress * monitoring API will appear in pg_stat_progress_vacuum output. */ text *commandTypeText = cstring_to_text("VACUUM"); Datum commandTypeDatum = PointerGetDatum(commandTypeText); Oid getProgressInfoFunctionOid = InvalidOid; TupleTableSlot *tupleTableSlot = NULL; ReturnSetInfo *progressResultSet = NULL; List *monitorList = NIL; getProgressInfoFunctionOid = FunctionOid("pg_catalog", "pg_stat_get_progress_info", 1); progressResultSet = FunctionCallGetTupleStore1(pg_stat_get_progress_info, getProgressInfoFunctionOid, commandTypeDatum); tupleTableSlot = MakeSingleTupleTableSlot(progressResultSet->setDesc); /* iterate over tuples in tuple store, and send them to destination */ for (;;) { bool nextTuple = false; bool isNull = false; Datum magicNumberDatum = 0; uint64 magicNumber = 0; nextTuple = tuplestore_gettupleslot(progressResultSet->setResult, true, false, tupleTableSlot); if (!nextTuple) { break; } magicNumberDatum = slot_getattr(tupleTableSlot, magicNumberIndex, &isNull); magicNumber = DatumGetUInt64(magicNumberDatum); if (!isNull && magicNumber == commandTypeMagicNumber) { Datum dsmHandleDatum = slot_getattr(tupleTableSlot, dsmHandleIndex, &isNull); dsm_handle dsmHandle = DatumGetUInt64(dsmHandleDatum); dsm_segment *attachedSegment = NULL; ProgressMonitorData *monitor = MonitorDataFromDSMHandle(dsmHandle, &attachedSegment); if (monitor != NULL) { *attachedDSMSegments = lappend(*attachedDSMSegments, attachedSegment); monitorList = lappend(monitorList, monitor); } } ExecClearTuple(tupleTableSlot); } ExecDropSingleTupleTableSlot(tupleTableSlot); return monitorList; } /* * MonitorDataFromDSMHandle returns the progress monitoring data structure at the * given segment */ ProgressMonitorData * MonitorDataFromDSMHandle(dsm_handle dsmHandle, dsm_segment **attachedSegment) { dsm_segment *dsmSegment = dsm_find_mapping(dsmHandle); ProgressMonitorData *monitor = NULL; if (dsmSegment == NULL) { dsmSegment = dsm_attach(dsmHandle); } if (dsmSegment != NULL) { monitor = (ProgressMonitorData *) dsm_segment_address(dsmSegment); monitor->steps = (void *) (monitor + 1); *attachedSegment = dsmSegment; } return monitor; } /* * DetachFromDSMSegments ensures that the process is detached from all of the segments in * the given list. */ void DetachFromDSMSegments(List *dsmSegmentList) { ListCell *dsmSegmentCell = NULL; foreach(dsmSegmentCell, dsmSegmentList) { dsm_segment *dsmSegment = (dsm_segment *) lfirst(dsmSegmentCell); dsm_detach(dsmSegment); } } /* * FunctionCallGetTupleStore1 calls the given set-returning PGFunction with the given * argument and returns the ResultSetInfo filled by the called function. */ static ReturnSetInfo * FunctionCallGetTupleStore1(PGFunction function, Oid functionId, Datum argument) { FunctionCallInfoData fcinfo; FmgrInfo flinfo; ReturnSetInfo *rsinfo = makeNode(ReturnSetInfo); EState *estate = CreateExecutorState(); rsinfo->econtext = GetPerTupleExprContext(estate); rsinfo->allowedModes = SFRM_Materialize; fmgr_info(functionId, &flinfo); InitFunctionCallInfoData(fcinfo, &flinfo, 1, InvalidOid, NULL, (Node *) rsinfo); fcinfo.arg[0] = argument; fcinfo.argnull[0] = false; (*function)(&fcinfo); return rsinfo; } citus-7.0.3/src/backend/distributed/relay/000077500000000000000000000000001317107136600204715ustar00rootroot00000000000000citus-7.0.3/src/backend/distributed/relay/relay_event_utility.c000066400000000000000000000513011317107136600247350ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * relay_event_utility.c * * Routines for handling DDL statements that relate to relay files. These * routines extend relation, index and constraint names in utility commands. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "c.h" #include #include #include "access/genam.h" #include "access/heapam.h" #include "access/htup_details.h" #include "access/hash.h" #include "access/htup.h" #include "access/skey.h" #include "access/stratnum.h" #include "catalog/indexing.h" #include "catalog/namespace.h" #include "catalog/pg_constraint.h" #include "distributed/metadata_cache.h" #include "distributed/relay_utility.h" #include "lib/stringinfo.h" #include "mb/pg_wchar.h" #include "nodes/nodes.h" #include "nodes/nodeFuncs.h" #include "nodes/parsenodes.h" #include "nodes/pg_list.h" #include "nodes/primnodes.h" #include "nodes/value.h" #include "storage/lock.h" #include "utils/builtins.h" #include "utils/elog.h" #include "utils/errcodes.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/palloc.h" #include "utils/relcache.h" /* Local functions forward declarations */ static void AppendShardIdToConstraintName(AlterTableCmd *command, uint64 shardId); static void SetSchemaNameIfNotExist(char **schemaName, char *newSchemaName); static bool UpdateWholeRowColumnReferencesWalker(Node *node, uint64 *shardId); /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(shard_name); /* * RelayEventExtendNames extends relation names in the given parse tree for * certain utility commands. The function more specifically extends table and * index names in the parse tree by appending the given shardId; thereby * avoiding name collisions in the database among sharded tables. This function * has the side effect of extending relation names in the parse tree. */ void RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId) { /* we don't extend names in extension or schema commands */ NodeTag nodeType = nodeTag(parseTree); if (nodeType == T_CreateExtensionStmt || nodeType == T_CreateSchemaStmt || nodeType == T_CreateSeqStmt || nodeType == T_AlterSeqStmt) { return; } switch (nodeType) { case T_AlterTableStmt: { /* * We append shardId to the very end of table and index names to * avoid name collisions. We also append shardId to constraint names. */ AlterTableStmt *alterTableStmt = (AlterTableStmt *) parseTree; char **relationName = &(alterTableStmt->relation->relname); char **relationSchemaName = &(alterTableStmt->relation->schemaname); List *commandList = alterTableStmt->cmds; ListCell *commandCell = NULL; /* prefix with schema name if it is not added already */ SetSchemaNameIfNotExist(relationSchemaName, schemaName); /* append shardId to base relation name */ AppendShardIdToName(relationName, shardId); foreach(commandCell, commandList) { AlterTableCmd *command = (AlterTableCmd *) lfirst(commandCell); if (command->subtype == AT_AddConstraint || command->subtype == AT_DropConstraint) { AppendShardIdToConstraintName(command, shardId); } else if (command->subtype == AT_ClusterOn) { char **indexName = &(command->name); AppendShardIdToName(indexName, shardId); } } break; } case T_ClusterStmt: { ClusterStmt *clusterStmt = (ClusterStmt *) parseTree; char **relationName = NULL; char **relationSchemaName = NULL; /* we do not support clustering the entire database */ if (clusterStmt->relation == NULL) { ereport(ERROR, (errmsg("cannot extend name for multi-relation cluster"))); } relationName = &(clusterStmt->relation->relname); relationSchemaName = &(clusterStmt->relation->schemaname); /* prefix with schema name if it is not added already */ SetSchemaNameIfNotExist(relationSchemaName, schemaName); AppendShardIdToName(relationName, shardId); if (clusterStmt->indexname != NULL) { char **indexName = &(clusterStmt->indexname); AppendShardIdToName(indexName, shardId); } break; } case T_CreateForeignServerStmt: { CreateForeignServerStmt *serverStmt = (CreateForeignServerStmt *) parseTree; char **serverName = &(serverStmt->servername); AppendShardIdToName(serverName, shardId); break; } case T_CreateForeignTableStmt: { CreateForeignTableStmt *createStmt = (CreateForeignTableStmt *) parseTree; char **serverName = &(createStmt->servername); AppendShardIdToName(serverName, shardId); /* * Since CreateForeignTableStmt inherits from CreateStmt and any change * performed on CreateStmt should be done here too, we simply *fall * through* to avoid code repetition. */ } /* fallthrough */ case T_CreateStmt: { CreateStmt *createStmt = (CreateStmt *) parseTree; char **relationName = &(createStmt->relation->relname); char **relationSchemaName = &(createStmt->relation->schemaname); /* prefix with schema name if it is not added already */ SetSchemaNameIfNotExist(relationSchemaName, schemaName); AppendShardIdToName(relationName, shardId); break; } case T_DropStmt: { DropStmt *dropStmt = (DropStmt *) parseTree; ObjectType objectType = dropStmt->removeType; if (objectType == OBJECT_TABLE || objectType == OBJECT_INDEX || objectType == OBJECT_FOREIGN_TABLE || objectType == OBJECT_FOREIGN_SERVER) { List *relationNameList = NULL; int relationNameListLength = 0; Value *relationSchemaNameValue = NULL; Value *relationNameValue = NULL; char **relationName = NULL; uint32 dropCount = list_length(dropStmt->objects); if (dropCount > 1) { ereport(ERROR, (errmsg("cannot extend name for multiple drop objects"))); } /* * We now need to extend a single relation or index name. To be * able to do this extension, we need to extract the names' * addresses from the value objects they are stored in. Other- * wise, the repalloc called in AppendShardIdToName() will not * have the correct memory address for the name. */ relationNameList = (List *) linitial(dropStmt->objects); relationNameListLength = list_length(relationNameList); switch (relationNameListLength) { case 1: { relationNameValue = linitial(relationNameList); break; } case 2: { relationSchemaNameValue = linitial(relationNameList); relationNameValue = lsecond(relationNameList); break; } case 3: { relationSchemaNameValue = lsecond(relationNameList); relationNameValue = lthird(relationNameList); break; } default: { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("improper relation name: \"%s\"", NameListToString(relationNameList)))); break; } } /* prefix with schema name if it is not added already */ if (relationSchemaNameValue == NULL) { Value *schemaNameValue = makeString(pstrdup(schemaName)); relationNameList = lcons(schemaNameValue, relationNameList); } relationName = &(relationNameValue->val.str); AppendShardIdToName(relationName, shardId); } else { ereport(WARNING, (errmsg("unsafe object type in drop statement"), errdetail("Object type: %u", (uint32) objectType))); } break; } case T_GrantStmt: { GrantStmt *grantStmt = (GrantStmt *) parseTree; if (grantStmt->targtype == ACL_TARGET_OBJECT && grantStmt->objtype == ACL_OBJECT_RELATION) { ListCell *lc; foreach(lc, grantStmt->objects) { RangeVar *relation = (RangeVar *) lfirst(lc); char **relationName = &(relation->relname); char **relationSchemaName = &(relation->schemaname); /* prefix with schema name if it is not added already */ SetSchemaNameIfNotExist(relationSchemaName, schemaName); AppendShardIdToName(relationName, shardId); } } break; } case T_IndexStmt: { IndexStmt *indexStmt = (IndexStmt *) parseTree; char **relationName = &(indexStmt->relation->relname); char **indexName = &(indexStmt->idxname); char **relationSchemaName = &(indexStmt->relation->schemaname); /* * Concurrent index statements cannot run within a transaction block. * Therefore, we do not support them. */ if (indexStmt->concurrent) { ereport(ERROR, (errmsg("cannot extend name for concurrent index"))); } /* * In the regular DDL execution code path (for non-sharded tables), * if the index statement results from a table creation command, the * indexName may be null. For sharded tables however, we intercept * that code path and explicitly set the index name. Therefore, the * index name in here cannot be null. */ if ((*indexName) == NULL) { ereport(ERROR, (errmsg("cannot extend name for null index name"))); } /* extend ColumnRef nodes in the IndexStmt with the shardId */ UpdateWholeRowColumnReferencesWalker((Node *) indexStmt->indexParams, &shardId); /* prefix with schema name if it is not added already */ SetSchemaNameIfNotExist(relationSchemaName, schemaName); AppendShardIdToName(relationName, shardId); AppendShardIdToName(indexName, shardId); break; } case T_ReindexStmt: { ReindexStmt *reindexStmt = (ReindexStmt *) parseTree; ReindexObjectType objectType = reindexStmt->kind; if (objectType == REINDEX_OBJECT_TABLE || objectType == REINDEX_OBJECT_INDEX) { char **objectName = &(reindexStmt->relation->relname); char **objectSchemaName = &(reindexStmt->relation->schemaname); /* prefix with schema name if it is not added already */ SetSchemaNameIfNotExist(objectSchemaName, schemaName); AppendShardIdToName(objectName, shardId); } else if (objectType == REINDEX_OBJECT_DATABASE) { ereport(ERROR, (errmsg("cannot extend name for multi-relation reindex"))); } else { ereport(ERROR, (errmsg("invalid object type in reindex statement"), errdetail("Object type: %u", (uint32) objectType))); } break; } case T_RenameStmt: { RenameStmt *renameStmt = (RenameStmt *) parseTree; ObjectType objectType = renameStmt->renameType; if (objectType == OBJECT_TABLE || objectType == OBJECT_INDEX) { char **oldRelationName = &(renameStmt->relation->relname); char **newRelationName = &(renameStmt->newname); char **objectSchemaName = &(renameStmt->relation->schemaname); /* prefix with schema name if it is not added already */ SetSchemaNameIfNotExist(objectSchemaName, schemaName); AppendShardIdToName(oldRelationName, shardId); AppendShardIdToName(newRelationName, shardId); } else if (objectType == OBJECT_COLUMN || objectType == OBJECT_TRIGGER) { char **relationName = &(renameStmt->relation->relname); char **objectSchemaName = &(renameStmt->relation->schemaname); /* prefix with schema name if it is not added already */ SetSchemaNameIfNotExist(objectSchemaName, schemaName); AppendShardIdToName(relationName, shardId); } else { ereport(WARNING, (errmsg("unsafe object type in rename statement"), errdetail("Object type: %u", (uint32) objectType))); } break; } case T_TruncateStmt: { /* * We currently do not support truncate statements. This is * primarily because truncates allow implicit modifications to * sequences through table column dependencies. As we have not * determined our dependency model for sequences, we error here. */ ereport(ERROR, (errmsg("cannot extend name for truncate statement"))); break; } default: { ereport(WARNING, (errmsg("unsafe statement type in name extension"), errdetail("Statement type: %u", (uint32) nodeType))); break; } } } /* * RelayEventExtendNamesForInterShardCommands extends relation names in the given parse * tree for certain utility commands. The function more specifically extends table and * constraint names in the parse tree by appending the given shardId; thereby * avoiding name collisions in the database among sharded tables. This function * has the side effect of extending relation names in the parse tree. */ void RelayEventExtendNamesForInterShardCommands(Node *parseTree, uint64 leftShardId, char *leftShardSchemaName, uint64 rightShardId, char *rightShardSchemaName) { NodeTag nodeType = nodeTag(parseTree); switch (nodeType) { case T_AlterTableStmt: { AlterTableStmt *alterTableStmt = (AlterTableStmt *) parseTree; List *commandList = alterTableStmt->cmds; ListCell *commandCell = NULL; foreach(commandCell, commandList) { AlterTableCmd *command = (AlterTableCmd *) lfirst(commandCell); char **referencedTableName = NULL; char **relationSchemaName = NULL; if (command->subtype == AT_AddConstraint) { Constraint *constraint = (Constraint *) command->def; if (constraint->contype == CONSTR_FOREIGN) { referencedTableName = &(constraint->pktable->relname); relationSchemaName = &(constraint->pktable->schemaname); } } #if (PG_VERSION_NUM >= 100000) else if (command->subtype == AT_AttachPartition || command->subtype == AT_DetachPartition) { PartitionCmd *partitionCommand = (PartitionCmd *) command->def; referencedTableName = &(partitionCommand->name->relname); relationSchemaName = &(partitionCommand->name->schemaname); } #endif else { continue; } /* prefix with schema name if it is not added already */ SetSchemaNameIfNotExist(relationSchemaName, rightShardSchemaName); /* * We will not append shard id to left shard name. This will be * handled when we drop into RelayEventExtendNames. */ AppendShardIdToName(referencedTableName, rightShardId); } /* drop into RelayEventExtendNames for non-inter table commands */ RelayEventExtendNames(parseTree, leftShardSchemaName, leftShardId); break; } default: { ereport(WARNING, (errmsg("unsafe statement type in name extension"), errdetail("Statement type: %u", (uint32) nodeType))); break; } } } /* * AppendShardIdToConstraintName extends given constraint name with given * shardId. Note that we only extend constraint names if they correspond to * indexes, and the caller should verify that index correspondence before * calling this function. */ static void AppendShardIdToConstraintName(AlterTableCmd *command, uint64 shardId) { if (command->subtype == AT_AddConstraint) { Constraint *constraint = (Constraint *) command->def; char **constraintName = &(constraint->conname); AppendShardIdToName(constraintName, shardId); } else if (command->subtype == AT_DropConstraint) { char **constraintName = &(command->name); AppendShardIdToName(constraintName, shardId); } } /* * UpdateWholeRowColumnReferencesWalker extends ColumnRef nodes that end with A_Star * with the given shardId. * * ColumnRefs that don't reference A_Star are not extended as catalog access isn't * allowed here and we don't otherwise have enough context to disambiguate a * field name that is identical to the table name. */ static bool UpdateWholeRowColumnReferencesWalker(Node *node, uint64 *shardId) { bool walkIsComplete = false; if (node == NULL) { return false; } if (IsA(node, IndexElem)) { IndexElem *indexElem = (IndexElem *) node; walkIsComplete = raw_expression_tree_walker(indexElem->expr, UpdateWholeRowColumnReferencesWalker, shardId); } else if (IsA(node, ColumnRef)) { ColumnRef *columnRef = (ColumnRef *) node; Node *lastField = llast(columnRef->fields); if (IsA(lastField, A_Star)) { /* * ColumnRef fields list ends with an A_Star, so we can blindly * extend the penultimate element with the shardId. */ int colrefFieldCount = list_length(columnRef->fields); Value *relnameValue = list_nth(columnRef->fields, colrefFieldCount - 2); Assert(IsA(relnameValue, String)); AppendShardIdToName(&relnameValue->val.str, *shardId); } /* might be more than one ColumnRef to visit */ walkIsComplete = false; } else { walkIsComplete = raw_expression_tree_walker(node, UpdateWholeRowColumnReferencesWalker, shardId); } return walkIsComplete; } /* * SetSchemaNameIfNotExist function checks whether schemaName is set and if it is not set * it sets its value to given newSchemaName. */ static void SetSchemaNameIfNotExist(char **schemaName, char *newSchemaName) { if ((*schemaName) == NULL) { *schemaName = pstrdup(newSchemaName); } } /* * AppendShardIdToName appends shardId to the given name. The function takes in * the name's address in order to reallocate memory for the name in the same * memory context the name was originally created in. */ void AppendShardIdToName(char **name, uint64 shardId) { char extendedName[NAMEDATALEN]; uint32 extendedNameLength = 0; int nameLength = strlen(*name); char shardIdAndSeparator[NAMEDATALEN]; int shardIdAndSeparatorLength; uint32 longNameHash = 0; int multiByteClipLength = 0; if (nameLength >= NAMEDATALEN) { ereport(ERROR, (errcode(ERRCODE_NAME_TOO_LONG), errmsg("identifier must be less than %d characters", NAMEDATALEN))); } snprintf(shardIdAndSeparator, NAMEDATALEN, "%c" UINT64_FORMAT, SHARD_NAME_SEPARATOR, shardId); shardIdAndSeparatorLength = strlen(shardIdAndSeparator); /* * If *name strlen is < (NAMEDATALEN - shardIdAndSeparatorLength), * it is safe merely to append the separator and shardId. */ if (nameLength < (NAMEDATALEN - shardIdAndSeparatorLength)) { snprintf(extendedName, NAMEDATALEN, "%s%s", (*name), shardIdAndSeparator); } /* * Otherwise, we need to truncate the name further to accommodate * a sufficient hash value. The resulting name will avoid collision * with other hashed names such that for any given schema with * 90 distinct object names that are long enough to require hashing * (typically 57-63 characters), the chance of a collision existing is: * * If randomly generated UTF8 names: * (1e-6) * (9.39323783788e-114) ~= (9.39e-120) * If random case-insensitive ASCII names (letter first, 37 useful characters): * (1e-6) * (2.80380202421e-74) ~= (2.8e-80) * If names sharing only N distinct 45- to 47-character prefixes: * (1e-6) * (1/N) = (1e-6/N) * 1e-7 for 10 distinct prefixes * 5e-8 for 20 distinct prefixes * * In practice, since shard IDs are globally unique, the risk of name collision * exists only amongst objects that pertain to a single distributed table * and are created for each shard: the table name and the names of any indexes * or index-backed constraints. Since there are typically less than five such * names, and almost never more than ten, the expected collision rate even in * the worst case (ten names share same 45- to 47-character prefix) is roughly * 1e-8: one in 100 million schemas will experience a name collision only if ALL * 100 million schemas present the worst-case scenario. */ else { longNameHash = hash_any((unsigned char *) (*name), nameLength); multiByteClipLength = pg_mbcliplen(*name, nameLength, (NAMEDATALEN - shardIdAndSeparatorLength - 10)); snprintf(extendedName, NAMEDATALEN, "%.*s%c%.8x%s", multiByteClipLength, (*name), SHARD_NAME_SEPARATOR, longNameHash, shardIdAndSeparator); } extendedNameLength = strlen(extendedName) + 1; Assert(extendedNameLength <= NAMEDATALEN); (*name) = (char *) repalloc((*name), extendedNameLength); snprintf((*name), extendedNameLength, "%s", extendedName); } /* * shard_name() provides a PG function interface to AppendShardNameToId above. * Returns the name of a shard as a quoted schema-qualified identifier. */ Datum shard_name(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); int64 shardId = PG_GETARG_INT64(1); char *relationName = NULL; Oid schemaId = InvalidOid; char *schemaName = NULL; char *qualifiedName = NULL; CheckCitusVersion(ERROR); if (shardId <= 0) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("shard_id cannot be zero or negative value"))); } if (!OidIsValid(relationId)) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("object_name does not reference a valid relation"))); } relationName = get_rel_name(relationId); if (relationName == NULL) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("object_name does not reference a valid relation"))); } AppendShardIdToName(&relationName, shardId); schemaId = get_rel_namespace(relationId); schemaName = get_namespace_name(schemaId); qualifiedName = quote_qualified_identifier(schemaName, relationName); PG_RETURN_TEXT_P(cstring_to_text(qualifiedName)); } citus-7.0.3/src/backend/distributed/shared_library_init.c000066400000000000000000000666331317107136600235540ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * shared_library_init.c * Functionality related to the initialization of the Citus extension. * * Copyright (c) 2012-2016, Citus Data, Inc. *------------------------------------------------------------------------- */ #include "postgres.h" #include #include #include #include "fmgr.h" #include "miscadmin.h" #include "citus_version.h" #include "commands/explain.h" #include "executor/executor.h" #include "distributed/backend_data.h" #include "distributed/citus_nodefuncs.h" #include "distributed/connection_management.h" #include "distributed/connection_management.h" #include "distributed/distributed_deadlock_detection.h" #include "distributed/maintenanced.h" #include "distributed/master_metadata_utility.h" #include "distributed/master_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/multi_copy.h" #include "distributed/multi_explain.h" #include "distributed/multi_join_order.h" #include "distributed/multi_logical_optimizer.h" #include "distributed/multi_planner.h" #include "distributed/multi_router_executor.h" #include "distributed/multi_router_planner.h" #include "distributed/multi_server_executor.h" #include "distributed/multi_utility.h" #include "distributed/pg_dist_partition.h" #include "distributed/placement_connection.h" #include "distributed/remote_commands.h" #include "distributed/shared_library_init.h" #include "distributed/task_tracker.h" #include "distributed/transaction_management.h" #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" #include "postmaster/postmaster.h" #include "optimizer/planner.h" #include "optimizer/paths.h" #include "utils/guc.h" #include "utils/guc_tables.h" /* marks shared object as one loadable by the postgres version compiled against */ PG_MODULE_MAGIC; static char *CitusVersion = CITUS_VERSION; void _PG_init(void); static void multi_log_hook(ErrorData *edata); static void CreateRequiredDirectories(void); static void RegisterCitusConfigVariables(void); static void WarningForEnableDeadlockPrevention(bool newval, void *extra); static bool ErrorIfNotASuitableDeadlockFactor(double *newval, void **extra, GucSource source); static void NormalizeWorkerListPath(void); /* *INDENT-OFF* */ /* GUC enum definitions */ static const struct config_enum_entry task_assignment_policy_options[] = { { "greedy", TASK_ASSIGNMENT_GREEDY, false }, { "first-replica", TASK_ASSIGNMENT_FIRST_REPLICA, false }, { "round-robin", TASK_ASSIGNMENT_ROUND_ROBIN, false }, { NULL, 0, false } }; static const struct config_enum_entry replication_model_options[] = { { "statement", REPLICATION_MODEL_COORDINATOR, false }, { "streaming", REPLICATION_MODEL_STREAMING, false }, { NULL, 0, false } }; static const struct config_enum_entry task_executor_type_options[] = { { "real-time", MULTI_EXECUTOR_REAL_TIME, false }, { "task-tracker", MULTI_EXECUTOR_TASK_TRACKER, false }, { NULL, 0, false } }; static const struct config_enum_entry shard_placement_policy_options[] = { { "local-node-first", SHARD_PLACEMENT_LOCAL_NODE_FIRST, false }, { "round-robin", SHARD_PLACEMENT_ROUND_ROBIN, false }, { "random", SHARD_PLACEMENT_RANDOM, false }, { NULL, 0, false } }; static const struct config_enum_entry use_secondary_nodes_options[] = { { "never", USE_SECONDARY_NODES_NEVER, false }, { "always", USE_SECONDARY_NODES_ALWAYS, false }, { NULL, 0, false } }; static const struct config_enum_entry multi_shard_commit_protocol_options[] = { { "1pc", COMMIT_PROTOCOL_1PC, false }, { "2pc", COMMIT_PROTOCOL_2PC, false }, { NULL, 0, false } }; static const struct config_enum_entry multi_task_query_log_level_options[] = { { "off", MULTI_TASK_QUERY_INFO_OFF, false }, { "debug", DEBUG2, false }, { "log", LOG, false }, { "notice", NOTICE, false }, { "warning", WARNING, false }, { "error", ERROR, false }, { NULL, 0, false } }; /* *INDENT-ON* */ /* shared library initialization function */ void _PG_init(void) { if (!process_shared_preload_libraries_in_progress) { ereport(ERROR, (errmsg("Citus can only be loaded via shared_preload_libraries"), errhint("Add citus to shared_preload_libraries configuration " "variable in postgresql.conf in master and workers. Note " "that citus should be at the beginning of " "shared_preload_libraries."))); } /* * Perform checks before registering any hooks, to avoid erroring out in a * partial state. * * In many cases (e.g. planner and utility hook, to run inside * pg_stat_statements et. al.) we have to be loaded before other hooks * (thus as the innermost/last running hook) to be able to do our * duties. For simplicity insist that all hooks are previously unused. */ if (planner_hook != NULL || ProcessUtility_hook != NULL) { ereport(ERROR, (errmsg("Citus has to be loaded first"), errhint("Place citus at the beginning of " "shared_preload_libraries."))); } /* * Extend the database directory structure before continuing with * initialization - one of the later steps might require them to exist. */ CreateRequiredDirectories(); /* * Register Citus configuration variables. Do so before intercepting * hooks or calling initialization functions, in case we want to do the * latter in a configuration dependent manner. */ RegisterCitusConfigVariables(); /* make our additional node types known */ RegisterNodes(); /* intercept planner */ planner_hook = multi_planner; /* register utility hook */ #if (PG_VERSION_NUM >= 100000) ProcessUtility_hook = multi_ProcessUtility; #else ProcessUtility_hook = multi_ProcessUtility9x; #endif /* register for planner hook */ set_rel_pathlist_hook = multi_relation_restriction_hook; set_join_pathlist_hook = multi_join_restriction_hook; /* register hook for error messages */ emit_log_hook = multi_log_hook; InitializeMaintenanceDaemon(); /* organize that task tracker is started once server is up */ TaskTrackerRegister(); /* initialize coordinated transaction management */ InitializeTransactionManagement(); InitializeBackendManagement(); InitializeConnectionManagement(); InitPlacementConnectionManagement(); /* enable modification of pg_catalog tables during pg_upgrade */ if (IsBinaryUpgrade) { SetConfigOption("allow_system_table_mods", "true", PGC_POSTMASTER, PGC_S_OVERRIDE); } } /* * multi_log_hook intercepts postgres log commands. We use this to override * postgres error messages when they're not specific enough for the users. */ static void multi_log_hook(ErrorData *edata) { /* * Show the user a meaningful error message when a backend is cancelled * by the distributed deadlock detection. */ if (edata->elevel == ERROR && edata->sqlerrcode == ERRCODE_QUERY_CANCELED && MyBackendGotCancelledDueToDeadlock()) { edata->sqlerrcode = ERRCODE_T_R_DEADLOCK_DETECTED; edata->message = "canceling the transaction since it has " "involved in a distributed deadlock"; } } /* * StartupCitusBackend initializes per-backend infrastructure, and is called * the first time citus is used in a database. * * NB: All code here has to be able to cope with this routine being called * multiple times in the same backend. This will e.g. happen when the * extension is created or upgraded. */ void StartupCitusBackend(void) { InitializeMaintenanceDaemonBackend(); InitializeBackendData(); } /* * CreateRequiredDirectories - Create directories required for Citus to * function. * * These used to be created by initdb, but that's not possible anymore. */ static void CreateRequiredDirectories(void) { int dirNo = 0; const char *subdirs[] = { "pg_foreign_file", "pg_foreign_file/cached", "base/pgsql_job_cache" }; for (dirNo = 0; dirNo < lengthof(subdirs); dirNo++) { int ret = mkdir(subdirs[dirNo], S_IRWXU); if (ret != 0 && errno != EEXIST) { ereport(ERROR, (errcode_for_file_access(), errmsg("could not create directory \"%s\": %m", subdirs[dirNo]))); } } } /* Register Citus configuration variables. */ static void RegisterCitusConfigVariables(void) { DefineCustomIntVariable( "citus.node_connection_timeout", gettext_noop("Sets the maximum duration to connect to worker nodes."), NULL, &NodeConnectionTimeout, 5000, 10, 60 * 60 * 1000, PGC_USERSET, GUC_UNIT_MS, NULL, NULL, NULL); /* keeping temporarily for updates from pre-6.0 versions */ DefineCustomStringVariable( "citus.worker_list_file", gettext_noop("Sets the server's \"worker_list\" configuration file."), NULL, &WorkerListFileName, NULL, PGC_POSTMASTER, GUC_SUPERUSER_ONLY | GUC_NO_SHOW_ALL, NULL, NULL, NULL); NormalizeWorkerListPath(); DefineCustomBoolVariable( "citus.binary_master_copy_format", gettext_noop("Use the binary master copy format."), gettext_noop("When enabled, data is copied from workers to the master " "in PostgreSQL's binary serialization format."), &BinaryMasterCopyFormat, false, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.binary_worker_copy_format", gettext_noop("Use the binary worker copy format."), gettext_noop("When enabled, data is copied from workers to workers " "in PostgreSQL's binary serialization format when " "joining large tables."), &BinaryWorkerCopyFormat, false, PGC_SIGHUP, 0, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.expire_cached_shards", gettext_noop("Enables shard cache expiration if a shard's size on disk has " "changed."), gettext_noop("When appending to an existing shard, old data may still be cached " "on other workers. This configuration entry activates automatic " "expiration, but should not be used with manual updates to shards."), &ExpireCachedShards, false, PGC_SIGHUP, 0, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.subquery_pushdown", gettext_noop("Enables supported subquery pushdown to workers."), NULL, &SubqueryPushdown, false, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.log_multi_join_order", gettext_noop("Logs the distributed join order to the server log."), gettext_noop("We use this private configuration entry as a debugging aid. " "If enabled, we print the distributed join order."), &LogMultiJoinOrder, false, PGC_USERSET, GUC_NO_SHOW_ALL, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.log_remote_commands", gettext_noop("Log queries sent to other nodes in the server log"), NULL, &LogRemoteCommands, false, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.log_distributed_deadlock_detection", gettext_noop("Log distributed deadlock detection related processing in " "the server log"), NULL, &LogDistributedDeadlockDetection, false, PGC_SIGHUP, GUC_NO_SHOW_ALL, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.explain_distributed_queries", gettext_noop("Enables Explain for distributed queries."), gettext_noop("When enabled, the Explain command shows remote and local " "plans when used with a distributed query. It is enabled " "by default, but can be disabled for regression tests."), &ExplainDistributedQueries, true, PGC_USERSET, GUC_NO_SHOW_ALL, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.explain_all_tasks", gettext_noop("Enables showing output for all tasks in Explain."), gettext_noop("The Explain command for distributed queries shows " "the remote plan for a single task by default. When " "this configuration entry is enabled, the plan for " "all tasks is shown, but the Explain takes longer."), &ExplainAllTasks, false, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.all_modifications_commutative", gettext_noop("Bypasses commutativity checks when enabled"), NULL, &AllModificationsCommutative, false, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomRealVariable( "citus.distributed_deadlock_detection_factor", gettext_noop("Sets the time to wait before checking for distributed " "deadlocks. Postgres' deadlock_timeout setting is " "multiplied with the value. If the value is set to" "1000, distributed deadlock detection is disabled."), NULL, &DistributedDeadlockDetectionTimeoutFactor, 2.0, -1.0, 1000.0, PGC_SIGHUP, 0, ErrorIfNotASuitableDeadlockFactor, NULL, NULL); DefineCustomBoolVariable( "citus.enable_deadlock_prevention", gettext_noop("Prevents transactions from expanding to multiple nodes"), gettext_noop("When enabled, consecutive DML statements that write to " "shards on different nodes are prevented to avoid creating " "undetectable distributed deadlocks when performed " "concurrently."), &EnableDeadlockPrevention, true, PGC_USERSET, GUC_NO_SHOW_ALL, NULL, WarningForEnableDeadlockPrevention, NULL); DefineCustomBoolVariable( "citus.enable_ddl_propagation", gettext_noop("Enables propagating DDL statements to worker shards"), NULL, &EnableDDLPropagation, true, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.enable_router_execution", gettext_noop("Enables router execution"), NULL, &EnableRouterExecution, true, PGC_USERSET, GUC_NO_SHOW_ALL, NULL, NULL, NULL); DefineCustomIntVariable( "citus.shard_count", gettext_noop("Sets the number of shards for a new hash-partitioned table" "created with create_distributed_table()."), NULL, &ShardCount, 32, 1, 64000, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomIntVariable( "citus.shard_replication_factor", gettext_noop("Sets the replication factor for shards."), gettext_noop("Shards are replicated across nodes according to this " "replication factor. Note that shards read this " "configuration value at sharded table creation time, " "and later reuse the initially read value."), &ShardReplicationFactor, 1, 1, 100, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomIntVariable( "citus.shard_max_size", gettext_noop("Sets the maximum size a shard will grow before it gets split."), gettext_noop("Shards store table and file data. When the source " "file's size for one shard exceeds this configuration " "value, the database ensures that either a new shard " "gets created, or the current one gets split. Note that " "shards read this configuration value at sharded table " "creation time, and later reuse the initially read value."), &ShardMaxSize, 1048576, 256, INT_MAX, /* max allowed size not set to MAX_KILOBYTES on purpose */ PGC_USERSET, GUC_UNIT_KB, NULL, NULL, NULL); DefineCustomIntVariable( "citus.max_worker_nodes_tracked", gettext_noop("Sets the maximum number of worker nodes that are tracked."), gettext_noop("Worker nodes' network locations, their membership and " "health status are tracked in a shared hash table on " "the master node. This configuration value limits the " "size of the hash table, and consequently the maximum " "number of worker nodes that can be tracked."), &MaxWorkerNodesTracked, 2048, 8, INT_MAX, PGC_POSTMASTER, 0, NULL, NULL, NULL); DefineCustomIntVariable( "citus.remote_task_check_interval", gettext_noop("Sets the frequency at which we check job statuses."), gettext_noop("The master node assigns tasks to workers nodes, and " "then regularly checks with them about each task's " "progress. This configuration value sets the time " "interval between two consequent checks."), &RemoteTaskCheckInterval, 10, 1, INT_MAX, PGC_USERSET, GUC_UNIT_MS, NULL, NULL, NULL); DefineCustomIntVariable( "citus.task_tracker_delay", gettext_noop("Task tracker sleep time between task management rounds."), gettext_noop("The task tracker process wakes up regularly, walks over " "all tasks assigned to it, and schedules and executes these " "tasks. Then, the task tracker sleeps for a time period " "before walking over these tasks again. This configuration " "value determines the length of that sleeping period."), &TaskTrackerDelay, 200, 1, 100000, PGC_SIGHUP, GUC_UNIT_MS, NULL, NULL, NULL); DefineCustomIntVariable( "citus.max_assign_task_batch_size", gettext_noop("Sets the maximum number of tasks to assign per round."), gettext_noop("The master node synchronously assigns tasks to workers in " "batches. Bigger batches allow for faster task assignment, " "but it may take longer for all workers to get tasks " "if the number of workers is large. This configuration " "value controls the maximum batch size."), &MaxAssignTaskBatchSize, 64, 1, INT_MAX, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomIntVariable( "citus.max_tracked_tasks_per_node", gettext_noop("Sets the maximum number of tracked tasks per node."), gettext_noop("The task tracker processes keeps all assigned tasks in " "a shared hash table, and schedules and executes these " "tasks as appropriate. This configuration value limits " "the size of the hash table, and therefore the maximum " "number of tasks that can be tracked at any given time."), &MaxTrackedTasksPerNode, 1024, 8, INT_MAX, PGC_POSTMASTER, 0, NULL, NULL, NULL); DefineCustomIntVariable( "citus.max_running_tasks_per_node", gettext_noop("Sets the maximum number of tasks to run concurrently per node."), gettext_noop("The task tracker process schedules and executes the tasks " "assigned to it as appropriate. This configuration value " "sets the maximum number of tasks to execute concurrently " "on one node at any given time."), &MaxRunningTasksPerNode, 8, 1, INT_MAX, PGC_SIGHUP, 0, NULL, NULL, NULL); DefineCustomIntVariable( "citus.partition_buffer_size", gettext_noop("Sets the buffer size to use for partition operations."), gettext_noop("Worker nodes allow for table data to be repartitioned " "into multiple text files, much like Hadoop's Map " "command. This configuration value sets the buffer size " "to use per partition operation. After the buffer fills " "up, we flush the repartitioned data into text files."), &PartitionBufferSize, 8192, 0, (INT_MAX / 1024), /* result stored in int variable */ PGC_USERSET, GUC_UNIT_KB, NULL, NULL, NULL); DefineCustomIntVariable( "citus.large_table_shard_count", gettext_noop("The shard count threshold over which a table is considered large."), gettext_noop("A distributed table is considered to be large if it has " "more shards than the value specified here. This largeness " "criteria is then used in picking a table join order during " "distributed query planning."), &LargeTableShardCount, 4, 1, 10000, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomIntVariable( "citus.limit_clause_row_fetch_count", gettext_noop("Number of rows to fetch per task for limit clause optimization."), gettext_noop("Select queries get partitioned and executed as smaller " "tasks. In some cases, select queries with limit clauses " "may need to fetch all rows from each task to generate " "results. In those cases, and where an approximation would " "produce meaningful results, this configuration value sets " "the number of rows to fetch from each task."), &LimitClauseRowFetchCount, -1, -1, INT_MAX, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomRealVariable( "citus.count_distinct_error_rate", gettext_noop("Desired error rate when calculating count(distinct) " "approximates using the postgresql-hll extension. " "0.0 disables approximations for count(distinct); 1.0 " "provides no guarantees about the accuracy of results."), NULL, &CountDistinctErrorRate, 0.0, 0.0, 1.0, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomEnumVariable( "citus.multi_shard_commit_protocol", gettext_noop("Sets the commit protocol for commands modifying multiple shards."), gettext_noop("When a failure occurs during commands that modify multiple " "shards (currently, only COPY on distributed tables modifies more " "than one shard), two-phase commit is required to ensure data is " "never lost. Change this setting to '2pc' from its default '1pc' to " "enable 2 PC. You must also set max_prepared_transactions on the " "worker nodes. Recovery from failed 2PCs is currently manual."), &MultiShardCommitProtocol, COMMIT_PROTOCOL_1PC, multi_shard_commit_protocol_options, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomEnumVariable( "citus.task_assignment_policy", gettext_noop("Sets the policy to use when assigning tasks to worker nodes."), gettext_noop("The master node assigns tasks to worker nodes based on shard " "locations. This configuration value specifies the policy to " "use when making these assignments. The greedy policy aims to " "evenly distribute tasks across worker nodes, first-replica just " "assigns tasks in the order shard placements were created, " "and the round-robin policy assigns tasks to worker nodes in " "a round-robin fashion."), &TaskAssignmentPolicy, TASK_ASSIGNMENT_GREEDY, task_assignment_policy_options, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomEnumVariable( "citus.replication_model", gettext_noop("Sets the replication model to be used for distributed tables."), gettext_noop("Depending upon the execution environment, statement- or streaming-" "based replication modes may be employed. Though most Citus deploy-" "ments will simply use statement replication, hosted and MX-style" "deployments should set this parameter to 'streaming'."), &ReplicationModel, REPLICATION_MODEL_COORDINATOR, replication_model_options, PGC_SUSET, GUC_SUPERUSER_ONLY, NULL, NULL, NULL); DefineCustomEnumVariable( "citus.task_executor_type", gettext_noop("Sets the executor type to be used for distributed queries."), gettext_noop("The master node chooses between two different executor types " "when executing a distributed query.The real-time executor is " "optimal for simple key-value lookup queries and queries that " "involve aggregations and/or co-located joins on multiple shards. " "The task-tracker executor is optimal for long-running, complex " "queries that touch thousands of shards and/or that involve table " "repartitioning."), &TaskExecutorType, MULTI_EXECUTOR_REAL_TIME, task_executor_type_options, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomEnumVariable( "citus.shard_placement_policy", gettext_noop("Sets the policy to use when choosing nodes for shard placement."), gettext_noop("The master node chooses which worker nodes to place new shards " "on. This configuration value specifies the policy to use when " "selecting these nodes. The local-node-first policy places the " "first replica on the client node and chooses others randomly. " "The round-robin policy aims to distribute shards evenly across " "the cluster by selecting nodes in a round-robin fashion." "The random policy picks all workers randomly."), &ShardPlacementPolicy, SHARD_PLACEMENT_ROUND_ROBIN, shard_placement_policy_options, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomEnumVariable( "citus.use_secondary_nodes", gettext_noop("Sets the policy to use when choosing nodes for SELECT queries."), NULL, &ReadFromSecondaries, USE_SECONDARY_NODES_NEVER, use_secondary_nodes_options, PGC_SU_BACKEND, 0, NULL, NULL, NULL); DefineCustomEnumVariable( "citus.multi_task_query_log_level", gettext_noop("Sets the level of multi task query execution log messages"), NULL, &MultiTaskQueryLogLevel, MULTI_TASK_QUERY_INFO_OFF, multi_task_query_log_level_options, PGC_USERSET, 0, NULL, NULL, NULL); DefineCustomStringVariable( "citus.version", gettext_noop("Shows the Citus library version"), NULL, &CitusVersion, CITUS_VERSION, PGC_INTERNAL, 0, NULL, NULL, NULL); DefineCustomStringVariable( "citus.cluster_name", gettext_noop("Which cluster this node is a part of"), NULL, &CurrentCluster, "default", PGC_SU_BACKEND, 0, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.enable_version_checks", gettext_noop("Enables version checks during CREATE/ALTER EXTENSION commands"), NULL, &EnableVersionChecks, true, PGC_USERSET, GUC_NO_SHOW_ALL, NULL, NULL, NULL); DefineCustomBoolVariable( "citus.enable_unique_job_ids", gettext_noop("Enables unique job IDs by prepending the local process ID and " "group ID. This should usually be enabled, but can be disabled " "for repeatable output in regression tests."), NULL, &EnableUniqueJobIds, true, PGC_USERSET, GUC_SUPERUSER_ONLY | GUC_NO_SHOW_ALL, NULL, NULL, NULL); DefineCustomIntVariable( "citus.max_task_string_size", gettext_noop("Sets the maximum size (in bytes) of a worker task call string."), gettext_noop("Active worker tasks' are tracked in a shared hash table " "on the master node. This configuration value limits the " "maximum size of an individual worker task, and " "affects the size of pre-allocated shared memory."), &MaxTaskStringSize, 12288, 8192, 65536, PGC_POSTMASTER, 0, NULL, NULL, NULL); /* warn about config items in the citus namespace that are not registered above */ EmitWarningsOnPlaceholders("citus"); } /* * Inform the users about the deprecated flag. */ static void WarningForEnableDeadlockPrevention(bool newval, void *extra) { ereport(WARNING, (errcode(ERRCODE_WARNING_DEPRECATED_FEATURE), errmsg("citus.enable_deadlock_prevention is deprecated and it has " "no effect. The flag will be removed in the next release."))); } /* * We don't want to allow values less than 1.0. However, we define -1 as the value to disable * distributed deadlock checking. Here we enforce our special constraint. */ static bool ErrorIfNotASuitableDeadlockFactor(double *newval, void **extra, GucSource source) { if (*newval <= 1.0 && *newval != -1.0) { ereport(WARNING, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg( "citus.distributed_deadlock_detection_factor cannot be less than 1. " "To disable distributed deadlock detection set the value to -1."))); return false; } return true; } /* * NormalizeWorkerListPath converts the path configured via * citus.worker_list_file into an absolute path, falling back to the default * value if necessary. The previous value of the config variable is * overwritten with the normalized value. * * NB: This has to be called before ChangeToDataDir() is called as otherwise * the relative paths won't make much sense to the user anymore. */ static void NormalizeWorkerListPath(void) { char *absoluteFileName = NULL; if (WorkerListFileName != NULL) { absoluteFileName = make_absolute_path(WorkerListFileName); } else if (DataDir != NULL) { absoluteFileName = malloc(strlen(DataDir) + strlen(WORKER_LIST_FILENAME) + 2); if (absoluteFileName == NULL) { ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); } sprintf(absoluteFileName, "%s/%s", DataDir, WORKER_LIST_FILENAME); } else { ereport(FATAL, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("%s does not know where to find the \"worker_list_file\" " "configuration file.\n" "This can be specified as \"citus.worker_list_file\" in " "\"%s\", or by the -D invocation option, or by the PGDATA " "environment variable.\n", progname, ConfigFileName))); } SetConfigOption("citus.worker_list_file", absoluteFileName, PGC_POSTMASTER, PGC_S_OVERRIDE); free(absoluteFileName); } citus-7.0.3/src/backend/distributed/test/000077500000000000000000000000001317107136600203345ustar00rootroot00000000000000citus-7.0.3/src/backend/distributed/test/colocation_utils.c000066400000000000000000000063551317107136600240630ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * test/src/colocations_utils.c * * This file contains functions to test co-location functionality * within Citus. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "fmgr.h" #include "catalog/pg_type.h" #include "distributed/colocation_utils.h" #include "distributed/listutils.h" #include "distributed/metadata_cache.h" /* declarations for dynamic loading */ PG_FUNCTION_INFO_V1(get_table_colocation_id); PG_FUNCTION_INFO_V1(tables_colocated); PG_FUNCTION_INFO_V1(shards_colocated); PG_FUNCTION_INFO_V1(get_colocated_table_array); PG_FUNCTION_INFO_V1(find_shard_interval_index); /* * get_table_colocation_id returns colocation id of given distributed table. */ Datum get_table_colocation_id(PG_FUNCTION_ARGS) { Oid distributedTableId = PG_GETARG_OID(0); uint32 colocationId = TableColocationId(distributedTableId); PG_RETURN_INT32(colocationId); } /* * tables_colocated checks if given two tables are co-located or not. If they are * co-located, this function returns true. */ Datum tables_colocated(PG_FUNCTION_ARGS) { Oid leftDistributedTableId = PG_GETARG_OID(0); Oid rightDistributedTableId = PG_GETARG_OID(1); bool tablesColocated = TablesColocated(leftDistributedTableId, rightDistributedTableId); PG_RETURN_BOOL(tablesColocated); } /* * shards_colocated checks if given two shards are co-located or not. If they are * co-located, this function returns true. */ Datum shards_colocated(PG_FUNCTION_ARGS) { uint32 leftShardId = PG_GETARG_UINT32(0); uint32 rightShardId = PG_GETARG_UINT32(1); ShardInterval *leftShard = LoadShardInterval(leftShardId); ShardInterval *rightShard = LoadShardInterval(rightShardId); bool shardsColocated = ShardsColocated(leftShard, rightShard); PG_RETURN_BOOL(shardsColocated); } /* * get_colocated_tables_array returns array of table oids which are co-located with given * distributed table. */ Datum get_colocated_table_array(PG_FUNCTION_ARGS) { Oid distributedTableId = PG_GETARG_OID(0); ArrayType *colocatedTablesArrayType = NULL; List *colocatedTableList = ColocatedTableList(distributedTableId); ListCell *colocatedTableCell = NULL; int colocatedTableCount = list_length(colocatedTableList); Datum *colocatedTablesDatumArray = palloc0(colocatedTableCount * sizeof(Datum)); Oid arrayTypeId = OIDOID; int colocatedTableIndex = 0; foreach(colocatedTableCell, colocatedTableList) { Oid colocatedTableId = lfirst_oid(colocatedTableCell); Datum colocatedTableDatum = ObjectIdGetDatum(colocatedTableId); colocatedTablesDatumArray[colocatedTableIndex] = colocatedTableDatum; colocatedTableIndex++; } colocatedTablesArrayType = DatumArrayToArrayType(colocatedTablesDatumArray, colocatedTableCount, arrayTypeId); PG_RETURN_ARRAYTYPE_P(colocatedTablesArrayType); } /* * find_shard_interval_index finds index of given shard in sorted shard interval list. */ Datum find_shard_interval_index(PG_FUNCTION_ARGS) { uint32 shardId = PG_GETARG_UINT32(0); ShardInterval *shardInterval = LoadShardInterval(shardId); uint32 shardIndex = ShardIndex(shardInterval); PG_RETURN_INT32(shardIndex); } citus-7.0.3/src/backend/distributed/test/create_shards.c000066400000000000000000000032431317107136600233110ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * test/src/create_shards.c * * This file contains functions to exercise shard creation functionality * within Citus. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "c.h" #include "fmgr.h" #include #include "distributed/listutils.h" #include "lib/stringinfo.h" #include "nodes/pg_list.h" /* local function forward declarations */ static int CompareStrings(const void *leftElement, const void *rightElement); /* declarations for dynamic loading */ PG_FUNCTION_INFO_V1(sort_names); /* * sort_names accepts three strings, places them in a list, then calls PGSSortList * to test its sort functionality. Returns a string containing sorted lines. */ Datum sort_names(PG_FUNCTION_ARGS) { char *first = PG_GETARG_CSTRING(0); char *second = PG_GETARG_CSTRING(1); char *third = PG_GETARG_CSTRING(2); List *nameList = SortList(list_make3(first, second, third), (int (*)(const void *, const void *))(&CompareStrings)); StringInfo sortedNames = makeStringInfo(); ListCell *nameCell = NULL; foreach(nameCell, nameList) { char *name = lfirst(nameCell); appendStringInfo(sortedNames, "%s\n", name); } PG_RETURN_CSTRING(sortedNames->data); } /* * A simple wrapper around strcmp suitable for use with PGSSortList or qsort. */ static int CompareStrings(const void *leftElement, const void *rightElement) { const char *leftString = *((const char **) leftElement); const char *rightString = *((const char **) rightElement); return strcmp(leftString, rightString); } citus-7.0.3/src/backend/distributed/test/deparse_shard_query.c000066400000000000000000000042171317107136600245350ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * test/src/depase_shard_query.c * * This file contains functions to exercise deparsing of INSERT .. SELECT queries * for distributed tables. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "c.h" #include "fmgr.h" #include #include "catalog/pg_type.h" #include "distributed/master_protocol.h" #include "distributed/citus_ruleutils.h" #include "distributed/insert_select_planner.h" #include "distributed/multi_router_planner.h" #include "lib/stringinfo.h" #include "nodes/makefuncs.h" #include "nodes/nodes.h" #include "nodes/parsenodes.h" #include "nodes/pg_list.h" #include "nodes/value.h" #include "tcop/tcopprot.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/palloc.h" /* declarations for dynamic loading */ PG_FUNCTION_INFO_V1(deparse_shard_query_test); Datum deparse_shard_query_test(PG_FUNCTION_ARGS) { text *queryString = PG_GETARG_TEXT_P(0); char *queryStringChar = text_to_cstring(queryString); List *parseTreeList = pg_parse_query(queryStringChar); ListCell *parseTreeCell = NULL; foreach(parseTreeCell, parseTreeList) { Node *parsetree = (Node *) lfirst(parseTreeCell); ListCell *queryTreeCell = NULL; List *queryTreeList = NIL; #if (PG_VERSION_NUM >= 100000) queryTreeList = pg_analyze_and_rewrite((RawStmt *) parsetree, queryStringChar, NULL, 0, NULL); #else queryTreeList = pg_analyze_and_rewrite(parsetree, queryStringChar, NULL, 0); #endif foreach(queryTreeCell, queryTreeList) { Query *query = lfirst(queryTreeCell); StringInfo buffer = makeStringInfo(); /* reoreder the target list only for INSERT .. SELECT queries */ if (InsertSelectIntoDistributedTable(query)) { RangeTblEntry *insertRte = linitial(query->rtable); RangeTblEntry *subqueryRte = lsecond(query->rtable); ReorderInsertSelectTargetLists(query, insertRte, subqueryRte); } deparse_shard_query(query, InvalidOid, 0, buffer); elog(INFO, "query: %s", buffer->data); } } PG_RETURN_VOID(); } citus-7.0.3/src/backend/distributed/test/distributed_deadlock_detection.c000066400000000000000000000064171317107136600267160ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * test/src/distributed_deadlock_detection.c * * This file contains functions to exercise distributed deadlock detection * related lower level functionality. * * Copyright (c) 20167, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "funcapi.h" #include "miscadmin.h" #include "access/hash.h" #include "distributed/backend_data.h" #include "distributed/distributed_deadlock_detection.h" #include "distributed/hash_helpers.h" #include "distributed/listutils.h" #include "distributed/lock_graph.h" #include "distributed/metadata_cache.h" #include "distributed/transaction_identifier.h" #include "nodes/pg_list.h" #include "utils/hsearch.h" #include "utils/timestamp.h" PG_FUNCTION_INFO_V1(get_adjacency_list_wait_graph); /* * get_adjacency_list_wait_graph returns the wait graph in adjacency list format. For the * details see BuildAdjacencyListForWaitGraph(). * * This function is mostly useful for testing and debugging purposes. */ Datum get_adjacency_list_wait_graph(PG_FUNCTION_ARGS) { ReturnSetInfo *returnSetInfo = (ReturnSetInfo *) fcinfo->resultinfo; TupleDesc tupleDescriptor = NULL; Tuplestorestate *tupleStore = NULL; MemoryContext perQueryContext = NULL; MemoryContext oldContext = NULL; WaitGraph *waitGraph = NULL; HTAB *adjacencyList = NULL; HASH_SEQ_STATUS status; TransactionNode *transactionNode = NULL; const int attributeCount = 2; Datum values[attributeCount]; bool isNulls[attributeCount]; CheckCitusVersion(ERROR); /* check to see if caller supports us returning a tuplestore */ if (returnSetInfo == NULL || !IsA(returnSetInfo, ReturnSetInfo)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context " \ "that cannot accept a set"))); } if (!(returnSetInfo->allowedModes & SFRM_Materialize)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not " \ "allowed in this context"))); } /* build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupleDescriptor) != TYPEFUNC_COMPOSITE) { elog(ERROR, "return type must be a row type"); } perQueryContext = returnSetInfo->econtext->ecxt_per_query_memory; oldContext = MemoryContextSwitchTo(perQueryContext); tupleStore = tuplestore_begin_heap(true, false, work_mem); returnSetInfo->returnMode = SFRM_Materialize; returnSetInfo->setResult = tupleStore; returnSetInfo->setDesc = tupleDescriptor; MemoryContextSwitchTo(oldContext); waitGraph = BuildGlobalWaitGraph(); adjacencyList = BuildAdjacencyListsForWaitGraph(waitGraph); /* iterate on all nodes */ hash_seq_init(&status, adjacencyList); while ((transactionNode = (TransactionNode *) hash_seq_search(&status)) != 0) { memset(values, 0, sizeof(values)); memset(isNulls, false, sizeof(isNulls)); values[0] = UInt64GetDatum(transactionNode->transactionId.transactionNumber); values[1] = CStringGetDatum(WaitsForToString(transactionNode->waitsFor)); tuplestore_putvalues(tupleStore, tupleDescriptor, values, isNulls); } /* clean up and return the tuplestore */ tuplestore_donestoring(tupleStore); PG_RETURN_VOID(); } citus-7.0.3/src/backend/distributed/test/distribution_metadata.c000066400000000000000000000160751317107136600250700ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * test/src/distribution_metadata.c * * This file contains functions to exercise distributed table metadata * functionality within Citus. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "c.h" #include "fmgr.h" #include #include #include "access/heapam.h" #include "catalog/pg_type.h" #include "distributed/distribution_column.h" #include "distributed/listutils.h" #include "distributed/master_metadata_utility.h" #include "distributed/master_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/multi_join_order.h" #include "distributed/multi_physical_planner.h" #include "distributed/pg_dist_shard.h" #include "distributed/resource_lock.h" #include "lib/stringinfo.h" #include "nodes/pg_list.h" #include "nodes/primnodes.h" #include "storage/lock.h" #include "utils/array.h" #include "utils/elog.h" #include "utils/errcodes.h" #include "utils/builtins.h" #include "utils/palloc.h" /* declarations for dynamic loading */ PG_FUNCTION_INFO_V1(load_shard_id_array); PG_FUNCTION_INFO_V1(load_shard_interval_array); PG_FUNCTION_INFO_V1(load_shard_placement_array); PG_FUNCTION_INFO_V1(partition_column_id); PG_FUNCTION_INFO_V1(partition_type); PG_FUNCTION_INFO_V1(is_distributed_table); PG_FUNCTION_INFO_V1(create_monolithic_shard_row); PG_FUNCTION_INFO_V1(acquire_shared_shard_lock); /* * load_shard_id_array returns the shard identifiers for a particular * distributed table as a bigint array. If the table is not distributed * yet, the function errors-out. */ Datum load_shard_id_array(PG_FUNCTION_ARGS) { Oid distributedTableId = PG_GETARG_OID(0); ArrayType *shardIdArrayType = NULL; ListCell *shardCell = NULL; int shardIdIndex = 0; Oid shardIdTypeId = INT8OID; int shardIdCount = -1; Datum *shardIdDatumArray = NULL; List *shardList = LoadShardIntervalList(distributedTableId); shardIdCount = list_length(shardList); shardIdDatumArray = palloc0(shardIdCount * sizeof(Datum)); foreach(shardCell, shardList) { ShardInterval *shardId = (ShardInterval *) lfirst(shardCell); Datum shardIdDatum = Int64GetDatum(shardId->shardId); shardIdDatumArray[shardIdIndex] = shardIdDatum; shardIdIndex++; } shardIdArrayType = DatumArrayToArrayType(shardIdDatumArray, shardIdCount, shardIdTypeId); PG_RETURN_ARRAYTYPE_P(shardIdArrayType); } /* * load_shard_interval_array loads a shard interval using a provided identifier * and returns a two-element array consisting of min/max values contained in * that shard interval. If no such interval can be found, this function raises * an error instead. */ Datum load_shard_interval_array(PG_FUNCTION_ARGS) { int64 shardId = PG_GETARG_INT64(0); Oid expectedType PG_USED_FOR_ASSERTS_ONLY = get_fn_expr_argtype(fcinfo->flinfo, 1); ShardInterval *shardInterval = LoadShardInterval(shardId); Datum shardIntervalArray[] = { shardInterval->minValue, shardInterval->maxValue }; ArrayType *shardIntervalArrayType = NULL; Assert(expectedType == shardInterval->valueTypeId); shardIntervalArrayType = DatumArrayToArrayType(shardIntervalArray, 2, shardInterval->valueTypeId); PG_RETURN_ARRAYTYPE_P(shardIntervalArrayType); } /* * load_shard_placement_array loads a shard interval using the provided ID * and returns an array of strings containing the node name and port for each * placement of the specified shard interval. If the second argument is true, * only finalized placements are returned; otherwise, all are. If no such shard * interval can be found, this function raises an error instead. */ Datum load_shard_placement_array(PG_FUNCTION_ARGS) { int64 shardId = PG_GETARG_INT64(0); bool onlyFinalized = PG_GETARG_BOOL(1); ArrayType *placementArrayType = NULL; List *placementList = NIL; ListCell *placementCell = NULL; int placementCount = -1; int placementIndex = 0; Datum *placementDatumArray = NULL; Oid placementTypeId = TEXTOID; StringInfo placementInfo = makeStringInfo(); if (onlyFinalized) { placementList = FinalizedShardPlacementList(shardId); } else { placementList = ShardPlacementList(shardId); } placementList = SortList(placementList, CompareShardPlacements); placementCount = list_length(placementList); placementDatumArray = palloc0(placementCount * sizeof(Datum)); foreach(placementCell, placementList) { ShardPlacement *placement = (ShardPlacement *) lfirst(placementCell); appendStringInfo(placementInfo, "%s:%d", placement->nodeName, placement->nodePort); placementDatumArray[placementIndex] = CStringGetTextDatum(placementInfo->data); placementIndex++; resetStringInfo(placementInfo); } placementArrayType = DatumArrayToArrayType(placementDatumArray, placementCount, placementTypeId); PG_RETURN_ARRAYTYPE_P(placementArrayType); } /* * partition_column_id simply finds a distributed table using the provided Oid * and returns the column_id of its partition column. If the specified table is * not distributed, this function raises an error instead. */ Datum partition_column_id(PG_FUNCTION_ARGS) { Oid distributedTableId = PG_GETARG_OID(0); uint32 rangeTableId = 1; Var *partitionColumn = PartitionColumn(distributedTableId, rangeTableId); PG_RETURN_INT16((int16) partitionColumn->varattno); } /* * partition_type simply finds a distributed table using the provided Oid and * returns the type of partitioning in use by that table. If the specified * table is not distributed, this function raises an error instead. */ Datum partition_type(PG_FUNCTION_ARGS) { Oid distributedTableId = PG_GETARG_OID(0); char partitionType = PartitionMethod(distributedTableId); PG_RETURN_CHAR(partitionType); } /* * is_distributed_table simply returns whether a given table is distributed. No * errors, just a boolean. */ Datum is_distributed_table(PG_FUNCTION_ARGS) { Oid distributedTableId = PG_GETARG_OID(0); bool isDistributedTable = IsDistributedTable(distributedTableId); PG_RETURN_BOOL(isDistributedTable); } /* * create_monolithic_shard_row creates a single shard covering all possible * hash values for a given table and inserts a row representing that shard * into the backing store. It returns the primary key of the new row. */ Datum create_monolithic_shard_row(PG_FUNCTION_ARGS) { Oid distributedTableId = PG_GETARG_OID(0); StringInfo minInfo = makeStringInfo(); StringInfo maxInfo = makeStringInfo(); uint64 newShardId = GetNextShardId(); text *maxInfoText = NULL; text *minInfoText = NULL; appendStringInfo(minInfo, "%d", INT32_MIN); appendStringInfo(maxInfo, "%d", INT32_MAX); minInfoText = cstring_to_text(minInfo->data); maxInfoText = cstring_to_text(maxInfo->data); InsertShardRow(distributedTableId, newShardId, SHARD_STORAGE_TABLE, minInfoText, maxInfoText); PG_RETURN_INT64(newShardId); } /* * acquire_shared_shard_lock grabs a shared lock for the specified shard. */ Datum acquire_shared_shard_lock(PG_FUNCTION_ARGS) { int64 shardId = PG_GETARG_INT64(0); LockShardResource(shardId, ShareLock); PG_RETURN_VOID(); } citus-7.0.3/src/backend/distributed/test/fake_fdw.c000066400000000000000000000075551317107136600222620ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * test/src/fake_fdw.c * * This file contains a barebones FDW implementation, suitable for use in * test code. Inspired by Andrew Dunstan's blackhole_fdw. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "c.h" #include "fmgr.h" #include #include "executor/tuptable.h" #include "foreign/fdwapi.h" #include "nodes/execnodes.h" #include "nodes/nodes.h" #include "nodes/pg_list.h" #include "nodes/plannodes.h" #include "nodes/relation.h" #include "optimizer/pathnode.h" #include "optimizer/planmain.h" #include "optimizer/restrictinfo.h" #include "utils/palloc.h" /* local function forward declarations */ static void FakeGetForeignRelSize(PlannerInfo *root, RelOptInfo *baserel, Oid foreigntableid); static void FakeGetForeignPaths(PlannerInfo *root, RelOptInfo *baserel, Oid foreigntableid); static ForeignScan * FakeGetForeignPlan(PlannerInfo *root, RelOptInfo *baserel, Oid foreigntableid, ForeignPath *best_path, List *tlist, List *scan_clauses, Plan *outer_plan); static void FakeBeginForeignScan(ForeignScanState *node, int eflags); static TupleTableSlot * FakeIterateForeignScan(ForeignScanState *node); static void FakeReScanForeignScan(ForeignScanState *node); static void FakeEndForeignScan(ForeignScanState *node); /* declarations for dynamic loading */ PG_FUNCTION_INFO_V1(fake_fdw_handler); /* * fake_fdw_handler populates an FdwRoutine with pointers to the functions * implemented within this file. */ Datum fake_fdw_handler(PG_FUNCTION_ARGS) { FdwRoutine *fdwroutine = makeNode(FdwRoutine); fdwroutine->GetForeignRelSize = FakeGetForeignRelSize; fdwroutine->GetForeignPaths = FakeGetForeignPaths; fdwroutine->GetForeignPlan = FakeGetForeignPlan; fdwroutine->BeginForeignScan = FakeBeginForeignScan; fdwroutine->IterateForeignScan = FakeIterateForeignScan; fdwroutine->ReScanForeignScan = FakeReScanForeignScan; fdwroutine->EndForeignScan = FakeEndForeignScan; PG_RETURN_POINTER(fdwroutine); } /* * FakeGetForeignRelSize populates baserel with a fake relation size. */ static void FakeGetForeignRelSize(PlannerInfo *root, RelOptInfo *baserel, Oid foreigntableid) { baserel->rows = 0; baserel->fdw_private = (void *) palloc0(1); } /* * FakeGetForeignPaths adds a single fake foreign path to baserel. */ static void FakeGetForeignPaths(PlannerInfo *root, RelOptInfo *baserel, Oid foreigntableid) { Cost startup_cost = 0; Cost total_cost = startup_cost + baserel->rows; add_path(baserel, (Path *) create_foreignscan_path(root, baserel, NULL, baserel->rows, startup_cost, total_cost, NIL, NULL, NULL, NIL)); } /* * FakeGetForeignPlan builds a fake foreign plan. */ static ForeignScan * FakeGetForeignPlan(PlannerInfo *root, RelOptInfo *baserel, Oid foreigntableid, ForeignPath *best_path, List *tlist, List *scan_clauses, Plan *outer_plan) { Index scan_relid = baserel->relid; scan_clauses = extract_actual_clauses(scan_clauses, false); return make_foreignscan(tlist, scan_clauses, scan_relid, NIL, NIL, NIL, NIL, outer_plan); } /* * FakeBeginForeignScan begins the fake plan (i.e. does nothing). */ static void FakeBeginForeignScan(ForeignScanState *node, int eflags) { } /* * FakeIterateForeignScan continues the fake plan (i.e. does nothing). */ static TupleTableSlot * FakeIterateForeignScan(ForeignScanState *node) { TupleTableSlot *slot = node->ss.ss_ScanTupleSlot; ExecClearTuple(slot); return slot; } /* * FakeReScanForeignScan restarts the fake plan (i.e. does nothing). */ static void FakeReScanForeignScan(ForeignScanState *node) { } /* * FakeEndForeignScan ends the fake plan (i.e. does nothing). */ static void FakeEndForeignScan(ForeignScanState *node) { } citus-7.0.3/src/backend/distributed/test/generate_ddl_commands.c000066400000000000000000000035351317107136600250040ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * test/src/generate_ddl_commands.c * * This file contains functions to exercise DDL generation functionality * within Citus. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "c.h" #include "fmgr.h" #include #include "catalog/pg_type.h" #include "distributed/listutils.h" #include "distributed/master_protocol.h" #include "lib/stringinfo.h" #include "nodes/makefuncs.h" #include "nodes/nodes.h" #include "nodes/parsenodes.h" #include "nodes/pg_list.h" #include "nodes/value.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/palloc.h" /* declarations for dynamic loading */ PG_FUNCTION_INFO_V1(table_ddl_command_array); /* * table_ddl_command_array returns an array of strings, each of which is a DDL * command required to recreate a table (specified by OID). */ Datum table_ddl_command_array(PG_FUNCTION_ARGS) { Oid distributedTableId = PG_GETARG_OID(0); ArrayType *ddlCommandArrayType = NULL; bool includeSequenceDefaults = true; List *ddlCommandList = GetTableDDLEvents(distributedTableId, includeSequenceDefaults); int ddlCommandCount = list_length(ddlCommandList); Datum *ddlCommandDatumArray = palloc0(ddlCommandCount * sizeof(Datum)); ListCell *ddlCommandCell = NULL; int ddlCommandIndex = 0; Oid ddlCommandTypeId = TEXTOID; foreach(ddlCommandCell, ddlCommandList) { char *ddlCommand = (char *) lfirst(ddlCommandCell); Datum ddlCommandDatum = CStringGetTextDatum(ddlCommand); ddlCommandDatumArray[ddlCommandIndex] = ddlCommandDatum; ddlCommandIndex++; } ddlCommandArrayType = DatumArrayToArrayType(ddlCommandDatumArray, ddlCommandCount, ddlCommandTypeId); PG_RETURN_ARRAYTYPE_P(ddlCommandArrayType); } citus-7.0.3/src/backend/distributed/test/metadata_sync.c000066400000000000000000000037211317107136600233170ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * test/src/metadata_sync.c * * This file contains functions to exercise the metadata snapshoy * generation functionality within Citus. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "c.h" #include "fmgr.h" #include "catalog/pg_type.h" #include "distributed/listutils.h" #include "distributed/metadata_sync.h" #include "utils/array.h" #include "utils/builtins.h" /* declarations for dynamic loading */ PG_FUNCTION_INFO_V1(master_metadata_snapshot); /* * master_metadata_snapshot prints all the queries that are required * to generate a metadata snapshot. */ Datum master_metadata_snapshot(PG_FUNCTION_ARGS) { List *dropSnapshotCommands = MetadataDropCommands(); List *createSnapshotCommands = MetadataCreateCommands(); List *snapshotCommandList = NIL; ListCell *snapshotCommandCell = NULL; int snapshotCommandCount = 0; Datum *snapshotCommandDatumArray = NULL; ArrayType *snapshotCommandArrayType = NULL; int snapshotCommandIndex = 0; Oid ddlCommandTypeId = TEXTOID; snapshotCommandList = list_concat(snapshotCommandList, dropSnapshotCommands); snapshotCommandList = list_concat(snapshotCommandList, createSnapshotCommands); snapshotCommandCount = list_length(snapshotCommandList); snapshotCommandDatumArray = palloc0(snapshotCommandCount * sizeof(Datum)); foreach(snapshotCommandCell, snapshotCommandList) { char *metadataSnapshotCommand = (char *) lfirst(snapshotCommandCell); Datum metadataSnapshotCommandDatum = CStringGetTextDatum(metadataSnapshotCommand); snapshotCommandDatumArray[snapshotCommandIndex] = metadataSnapshotCommandDatum; snapshotCommandIndex++; } snapshotCommandArrayType = DatumArrayToArrayType(snapshotCommandDatumArray, snapshotCommandCount, ddlCommandTypeId); PG_RETURN_ARRAYTYPE_P(snapshotCommandArrayType); } citus-7.0.3/src/backend/distributed/test/partitioning_utils.c000066400000000000000000000055221317107136600244330ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * test/src/partitioning_utils.c * * This file contains functions to test partitioning utility functions * implemented in Citus. * * Copyright (c) 2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "fmgr.h" #include "catalog/pg_type.h" #include "distributed/listutils.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/reference_table_utils.h" #include "lib/stringinfo.h" #include "utils/builtins.h" #include "utils/lsyscache.h" PG_FUNCTION_INFO_V1(generate_alter_table_detach_partition_command); PG_FUNCTION_INFO_V1(generate_alter_table_attach_partition_command); PG_FUNCTION_INFO_V1(generate_partition_information); PG_FUNCTION_INFO_V1(print_partitions); PG_FUNCTION_INFO_V1(table_inherits); PG_FUNCTION_INFO_V1(table_inherited); /* * Just a wrapper around GenereateDetachPartitionCommand(). */ Datum generate_alter_table_detach_partition_command(PG_FUNCTION_ARGS) { char *command = ""; #if (PG_VERSION_NUM >= 100000) command = GenerateDetachPartitionCommand(PG_GETARG_OID(0)); #endif PG_RETURN_TEXT_P(cstring_to_text(command)); } /* * Just a wrapper around GenerateAlterTableAttachPartitionCommand(). */ Datum generate_alter_table_attach_partition_command(PG_FUNCTION_ARGS) { char *command = ""; #if (PG_VERSION_NUM >= 100000) command = GenerateAlterTableAttachPartitionCommand(PG_GETARG_OID(0)); #endif PG_RETURN_TEXT_P(cstring_to_text(command)); } /* * Just a wrapper around GenereatePartitioningInformation(). */ Datum generate_partition_information(PG_FUNCTION_ARGS) { char *command = ""; #if (PG_VERSION_NUM >= 100000) command = GeneratePartitioningInformation(PG_GETARG_OID(0)); #endif PG_RETURN_TEXT_P(cstring_to_text(command)); } /* * Just a wrapper around PartitionList() with human readable table name outpus. */ Datum print_partitions(PG_FUNCTION_ARGS) { StringInfo resultRelationNames = makeStringInfo(); #if (PG_VERSION_NUM >= 100000) List *partitionList = PartitionList(PG_GETARG_OID(0)); ListCell *partitionOidCell = NULL; partitionList = SortList(partitionList, CompareOids); foreach(partitionOidCell, partitionList) { Oid partitionOid = lfirst_oid(partitionOidCell); /* at least one table is already added, add comma */ if (resultRelationNames->len > 0) { appendStringInfoString(resultRelationNames, ","); } appendStringInfoString(resultRelationNames, get_rel_name(partitionOid)); } #endif PG_RETURN_TEXT_P(cstring_to_text(resultRelationNames->data)); } /* * Just a wrapper around IsChildTable() */ Datum table_inherits(PG_FUNCTION_ARGS) { return IsChildTable(PG_GETARG_OID(0)); } /* * Just a wrapper around IsParentTable() */ Datum table_inherited(PG_FUNCTION_ARGS) { return IsParentTable(PG_GETARG_OID(0)); } citus-7.0.3/src/backend/distributed/test/progress_utils.c000066400000000000000000000066511317107136600235740ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * progress_utils.c * * This file contains functions to exercise progress monitoring functionality * within Citus. * * Copyright (c) 2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "fmgr.h" #include "funcapi.h" #include #include "distributed/multi_progress.h" #include "nodes/execnodes.h" #include "utils/tuplestore.h" PG_FUNCTION_INFO_V1(create_progress); PG_FUNCTION_INFO_V1(update_progress); PG_FUNCTION_INFO_V1(finish_progress); PG_FUNCTION_INFO_V1(show_progress); Datum create_progress(PG_FUNCTION_ARGS) { uint64 magicNumber = PG_GETARG_INT64(0); int stepCount = PG_GETARG_INT32(1); ProgressMonitorData *monitor = CreateProgressMonitor(magicNumber, stepCount, sizeof(uint64), 0); if (monitor != NULL) { uint64 *steps = (uint64 *) monitor->steps; int i = 0; for (; i < stepCount; i++) { steps[i] = 0; } } PG_RETURN_VOID(); } Datum update_progress(PG_FUNCTION_ARGS) { uint64 step = PG_GETARG_INT64(0); uint64 newValue = PG_GETARG_INT64(1); ProgressMonitorData *monitor = GetCurrentProgressMonitor(); if (monitor != NULL && step < monitor->stepCount) { uint64 *steps = (uint64 *) monitor->steps; steps[step] = newValue; } PG_RETURN_VOID(); } Datum finish_progress(PG_FUNCTION_ARGS) { FinalizeCurrentProgressMonitor(); PG_RETURN_VOID(); } Datum show_progress(PG_FUNCTION_ARGS) { uint64 magicNumber = PG_GETARG_INT64(0); List *attachedDSMSegments = NIL; List *monitorList = ProgressMonitorList(magicNumber, &attachedDSMSegments); Tuplestorestate *tupstore = NULL; TupleDesc tupdesc; MemoryContext perQueryContext; MemoryContext currentContext; ReturnSetInfo *resultSet = (ReturnSetInfo *) fcinfo->resultinfo; ListCell *monitorCell = NULL; /* check to see if caller supports us returning a tuplestore */ if (resultSet == NULL || !IsA(resultSet, ReturnSetInfo)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context that cannot " \ "accept a set"))); } if (!(resultSet->allowedModes & SFRM_Materialize)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not " \ "allowed in this context"))); } if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) { elog(ERROR, "return type must be a row type"); } perQueryContext = resultSet->econtext->ecxt_per_query_memory; currentContext = MemoryContextSwitchTo(perQueryContext); tupstore = tuplestore_begin_heap(true, false, work_mem); resultSet->returnMode = SFRM_Materialize; resultSet->setResult = tupstore; resultSet->setDesc = tupdesc; MemoryContextSwitchTo(currentContext); foreach(monitorCell, monitorList) { ProgressMonitorData *monitor = lfirst(monitorCell); uint64 *steps = monitor->steps; int stepIndex = 0; for (stepIndex = 0; stepIndex < monitor->stepCount; stepIndex++) { uint64 step = steps[stepIndex]; Datum values[2]; bool nulls[2]; memset(values, 0, sizeof(values)); memset(nulls, 0, sizeof(nulls)); values[0] = Int32GetDatum(stepIndex); values[1] = UInt64GetDatum(step); tuplestore_putvalues(tupstore, tupdesc, values, nulls); } } tuplestore_donestoring(tupstore); DetachFromDSMSegments(attachedDSMSegments); return (Datum) 0; } citus-7.0.3/src/backend/distributed/test/prune_shard_list.c000066400000000000000000000172231317107136600240520ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * test/src/create_shards.c * * This file contains functions to exercise shard creation functionality * within Citus. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "c.h" #include "fmgr.h" #include #include "access/stratnum.h" #include "catalog/pg_type.h" #include "distributed/listutils.h" #include "distributed/metadata_cache.h" #include "distributed/master_metadata_utility.h" #include "distributed/multi_join_order.h" #include "distributed/multi_physical_planner.h" #include "distributed/resource_lock.h" #include "distributed/shard_pruning.h" #include "nodes/pg_list.h" #include "nodes/primnodes.h" #include "nodes/nodes.h" #include "optimizer/clauses.h" #include "utils/array.h" #include "utils/palloc.h" /* local function forward declarations */ static Expr * MakeTextPartitionExpression(Oid distributedTableId, text *value); static ArrayType * PrunedShardIdsForTable(Oid distributedTableId, List *whereClauseList); static ArrayType * SortedShardIntervalArray(Oid distributedTableId); /* declarations for dynamic loading */ PG_FUNCTION_INFO_V1(prune_using_no_values); PG_FUNCTION_INFO_V1(prune_using_single_value); PG_FUNCTION_INFO_V1(prune_using_either_value); PG_FUNCTION_INFO_V1(prune_using_both_values); PG_FUNCTION_INFO_V1(debug_equality_expression); PG_FUNCTION_INFO_V1(print_sorted_shard_intervals); /* * prune_using_no_values returns the shards for the specified distributed table * after pruning using an empty clause list. */ Datum prune_using_no_values(PG_FUNCTION_ARGS) { Oid distributedTableId = PG_GETARG_OID(0); List *whereClauseList = NIL; ArrayType *shardIdArrayType = PrunedShardIdsForTable(distributedTableId, whereClauseList); PG_RETURN_ARRAYTYPE_P(shardIdArrayType); } /* * prune_using_single_value returns the shards for the specified distributed * table after pruning using a single value provided by the caller. */ Datum prune_using_single_value(PG_FUNCTION_ARGS) { Oid distributedTableId = PG_GETARG_OID(0); text *value = (PG_ARGISNULL(1)) ? NULL : PG_GETARG_TEXT_P(1); Expr *equalityExpr = MakeTextPartitionExpression(distributedTableId, value); List *whereClauseList = list_make1(equalityExpr); ArrayType *shardIdArrayType = PrunedShardIdsForTable(distributedTableId, whereClauseList); PG_RETURN_ARRAYTYPE_P(shardIdArrayType); } /* * prune_using_either_value returns the shards for the specified distributed * table after pruning using either of two values provided by the caller (OR). */ Datum prune_using_either_value(PG_FUNCTION_ARGS) { Oid distributedTableId = PG_GETARG_OID(0); text *firstValue = PG_GETARG_TEXT_P(1); text *secondValue = PG_GETARG_TEXT_P(2); Expr *firstQual = MakeTextPartitionExpression(distributedTableId, firstValue); Expr *secondQual = MakeTextPartitionExpression(distributedTableId, secondValue); Expr *orClause = make_orclause(list_make2(firstQual, secondQual)); List *whereClauseList = list_make1(orClause); ArrayType *shardIdArrayType = PrunedShardIdsForTable(distributedTableId, whereClauseList); PG_RETURN_ARRAYTYPE_P(shardIdArrayType); } /* * prune_using_both_values returns the shards for the specified distributed * table after pruning using both of the values provided by the caller (AND). */ Datum prune_using_both_values(PG_FUNCTION_ARGS) { Oid distributedTableId = PG_GETARG_OID(0); text *firstValue = PG_GETARG_TEXT_P(1); text *secondValue = PG_GETARG_TEXT_P(2); Expr *firstQual = MakeTextPartitionExpression(distributedTableId, firstValue); Expr *secondQual = MakeTextPartitionExpression(distributedTableId, secondValue); List *whereClauseList = list_make2(firstQual, secondQual); ArrayType *shardIdArrayType = PrunedShardIdsForTable(distributedTableId, whereClauseList); PG_RETURN_ARRAYTYPE_P(shardIdArrayType); } /* * debug_equality_expression returns the textual representation of an equality * expression generated by a call to MakeOpExpression. */ Datum debug_equality_expression(PG_FUNCTION_ARGS) { Oid distributedTableId = PG_GETARG_OID(0); uint32 rangeTableId = 1; Var *partitionColumn = PartitionColumn(distributedTableId, rangeTableId); OpExpr *equalityExpression = MakeOpExpression(partitionColumn, BTEqualStrategyNumber); PG_RETURN_CSTRING(nodeToString(equalityExpression)); } /* * print_sorted_shard_intervals prints the sorted shard interval array that is in the * metadata cache. This function aims to test sorting functionality. */ Datum print_sorted_shard_intervals(PG_FUNCTION_ARGS) { Oid distributedTableId = PG_GETARG_OID(0); ArrayType *shardIdArrayType = SortedShardIntervalArray(distributedTableId); PG_RETURN_ARRAYTYPE_P(shardIdArrayType); } /* * MakeTextPartitionExpression returns an equality expression between the * specified table's partition column and the provided values. */ static Expr * MakeTextPartitionExpression(Oid distributedTableId, text *value) { uint32 rangeTableId = 1; Var *partitionColumn = PartitionColumn(distributedTableId, rangeTableId); Expr *partitionExpression = NULL; if (value != NULL) { OpExpr *equalityExpr = MakeOpExpression(partitionColumn, BTEqualStrategyNumber); Const *rightConst = (Const *) get_rightop((Expr *) equalityExpr); rightConst->constvalue = (Datum) value; rightConst->constisnull = false; rightConst->constbyval = false; partitionExpression = (Expr *) equalityExpr; } else { NullTest *nullTest = makeNode(NullTest); nullTest->arg = (Expr *) partitionColumn; nullTest->nulltesttype = IS_NULL; partitionExpression = (Expr *) nullTest; } return partitionExpression; } /* * PrunedShardIdsForTable loads the shard intervals for the specified table, * prunes them using the provided clauses. It returns an ArrayType containing * the shard identifiers, suitable for return from an SQL-facing function. */ static ArrayType * PrunedShardIdsForTable(Oid distributedTableId, List *whereClauseList) { ArrayType *shardIdArrayType = NULL; ListCell *shardCell = NULL; int shardIdIndex = 0; Oid shardIdTypeId = INT8OID; Index tableId = 1; List *shardList = NIL; int shardIdCount = -1; Datum *shardIdDatumArray = NULL; shardList = PruneShards(distributedTableId, tableId, whereClauseList); shardIdCount = list_length(shardList); shardIdDatumArray = palloc0(shardIdCount * sizeof(Datum)); foreach(shardCell, shardList) { ShardInterval *shardId = (ShardInterval *) lfirst(shardCell); Datum shardIdDatum = Int64GetDatum(shardId->shardId); shardIdDatumArray[shardIdIndex] = shardIdDatum; shardIdIndex++; } shardIdArrayType = DatumArrayToArrayType(shardIdDatumArray, shardIdCount, shardIdTypeId); return shardIdArrayType; } /* * SortedShardIntervalArray simply returns the shard interval ids in the sorted shard * interval cache as a datum array. */ static ArrayType * SortedShardIntervalArray(Oid distributedTableId) { ArrayType *shardIdArrayType = NULL; int shardIndex = 0; Oid shardIdTypeId = INT8OID; DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(distributedTableId); ShardInterval **shardIntervalArray = cacheEntry->sortedShardIntervalArray; int shardIdCount = cacheEntry->shardIntervalArrayLength; Datum *shardIdDatumArray = palloc0(shardIdCount * sizeof(Datum)); for (shardIndex = 0; shardIndex < shardIdCount; ++shardIndex) { ShardInterval *shardId = shardIntervalArray[shardIndex]; Datum shardIdDatum = Int64GetDatum(shardId->shardId); shardIdDatumArray[shardIndex] = shardIdDatum; } shardIdArrayType = DatumArrayToArrayType(shardIdDatumArray, shardIdCount, shardIdTypeId); return shardIdArrayType; } citus-7.0.3/src/backend/distributed/transaction/000077500000000000000000000000001317107136600217025ustar00rootroot00000000000000citus-7.0.3/src/backend/distributed/transaction/backend_data.c000066400000000000000000000431031317107136600244270ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * backend_data.c * * Infrastructure for managing per backend data that can efficiently * accessed by all sessions. * * Copyright (c) 2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "funcapi.h" #include "access/htup_details.h" #include "catalog/pg_type.h" #include "datatype/timestamp.h" #include "distributed/backend_data.h" #include "distributed/listutils.h" #include "distributed/lock_graph.h" #include "distributed/metadata_cache.h" #include "distributed/transaction_identifier.h" #include "nodes/execnodes.h" #include "storage/ipc.h" #include "storage/lwlock.h" #include "storage/proc.h" #include "storage/spin.h" #include "storage/s_lock.h" #include "utils/timestamp.h" /* * Each backend's data reside in the shared memory * on the BackendManagementShmemData. */ typedef struct BackendManagementShmemData { int trancheId; #if (PG_VERSION_NUM >= 100000) NamedLWLockTranche namedLockTranche; #else LWLockTranche lockTranche; #endif LWLock lock; /* * We prefer to use an atomic integer over sequences for two * reasons (i) orders of magnitude performance difference * (ii) allowing read-only replicas to be able to generate ids */ pg_atomic_uint64 nextTransactionNumber; BackendData backends[FLEXIBLE_ARRAY_MEMBER]; } BackendManagementShmemData; static shmem_startup_hook_type prev_shmem_startup_hook = NULL; static BackendManagementShmemData *backendManagementShmemData = NULL; static BackendData *MyBackendData = NULL; static void BackendManagementShmemInit(void); static size_t BackendManagementShmemSize(void); PG_FUNCTION_INFO_V1(assign_distributed_transaction_id); PG_FUNCTION_INFO_V1(get_current_transaction_id); PG_FUNCTION_INFO_V1(get_all_active_transactions); /* * assign_distributed_transaction_id updates the shared memory allocated for this backend * and sets initiatorNodeIdentifier, transactionNumber, timestamp fields with the given * inputs. Also, the function sets the database id and process id via the information that * Postgres provides. * * This function is only intended for internal use for managing distributed transactions. * Users should not use this function for any purpose. */ Datum assign_distributed_transaction_id(PG_FUNCTION_ARGS) { CheckCitusVersion(ERROR); /* MyBackendData should always be avaliable, just out of paranoia */ if (!MyBackendData) { ereport(ERROR, (errmsg("backend is not ready for distributed transactions"))); } /* * Note that we don't need to lock shared memory (i.e., LockBackendSharedMemory()) here * since this function is executed after AssignDistributedTransactionId() issued on the * initiator node, which already takes the required lock to enforce the consistency. */ SpinLockAcquire(&MyBackendData->mutex); /* if an id is already assigned, release the lock and error */ if (MyBackendData->transactionId.transactionNumber != 0) { SpinLockRelease(&MyBackendData->mutex); ereport(ERROR, (errmsg("the backend has already been assigned a " "transaction id"))); } MyBackendData->databaseId = MyDatabaseId; MyBackendData->transactionId.initiatorNodeIdentifier = PG_GETARG_INT32(0); MyBackendData->transactionId.transactionNumber = PG_GETARG_INT64(1); MyBackendData->transactionId.timestamp = PG_GETARG_TIMESTAMPTZ(2); MyBackendData->transactionId.transactionOriginator = false; SpinLockRelease(&MyBackendData->mutex); PG_RETURN_VOID(); } /* * get_current_transaction_id returns a tuple with (databaseId, processId, * initiatorNodeIdentifier, transactionNumber, timestamp) that exists in the * shared memory associated with this backend. Note that if the backend * is not in a transaction, the function returns uninitialized data where * transactionNumber equals to 0. */ Datum get_current_transaction_id(PG_FUNCTION_ARGS) { TupleDesc tupleDescriptor = NULL; HeapTuple heapTuple = NULL; const int attributeCount = 5; Datum values[attributeCount]; bool isNulls[attributeCount]; DistributedTransactionId *distributedTransctionId = NULL; CheckCitusVersion(ERROR); /* build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupleDescriptor) != TYPEFUNC_COMPOSITE) { elog(ERROR, "return type must be a row type"); } /* MyBackendData should always be avaliable, just out of paranoia */ if (!MyBackendData) { ereport(ERROR, (errmsg("backend is not ready for distributed transactions"))); } distributedTransctionId = GetCurrentDistributedTransactionId(); memset(values, 0, sizeof(values)); memset(isNulls, false, sizeof(isNulls)); /* first two fields do not change for this backend, so get directly */ values[0] = ObjectIdGetDatum(MyDatabaseId); values[1] = Int32GetDatum(MyProcPid); values[2] = Int32GetDatum(distributedTransctionId->initiatorNodeIdentifier); values[3] = UInt64GetDatum(distributedTransctionId->transactionNumber); /* provide a better output */ if (distributedTransctionId->initiatorNodeIdentifier != 0) { values[4] = TimestampTzGetDatum(distributedTransctionId->timestamp); } else { isNulls[4] = true; } heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls); PG_RETURN_DATUM(HeapTupleGetDatum(heapTuple)); } /* * get_all_active_transactions returns all the avaliable information about all * the active backends. */ Datum get_all_active_transactions(PG_FUNCTION_ARGS) { ReturnSetInfo *returnSetInfo = (ReturnSetInfo *) fcinfo->resultinfo; TupleDesc tupleDescriptor = NULL; Tuplestorestate *tupleStore = NULL; MemoryContext perQueryContext = NULL; MemoryContext oldContext = NULL; int backendIndex = 0; const int attributeCount = 5; Datum values[attributeCount]; bool isNulls[attributeCount]; CheckCitusVersion(ERROR); /* check to see if caller supports us returning a tuplestore */ if (returnSetInfo == NULL || !IsA(returnSetInfo, ReturnSetInfo)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("set-valued function called in context " \ "that cannot accept a set"))); } if (!(returnSetInfo->allowedModes & SFRM_Materialize)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not " \ "allowed in this context"))); } /* build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupleDescriptor) != TYPEFUNC_COMPOSITE) { elog(ERROR, "return type must be a row type"); } perQueryContext = returnSetInfo->econtext->ecxt_per_query_memory; oldContext = MemoryContextSwitchTo(perQueryContext); tupleStore = tuplestore_begin_heap(true, false, work_mem); returnSetInfo->returnMode = SFRM_Materialize; returnSetInfo->setResult = tupleStore; returnSetInfo->setDesc = tupleDescriptor; MemoryContextSwitchTo(oldContext); /* * We don't want to initialize memory while spinlock is held so we * prefer to do it here. This initialization is done only for the first * row. */ memset(values, 0, sizeof(values)); memset(isNulls, false, sizeof(isNulls)); /* we're reading all distributed transactions, prevent new backends */ LockBackendSharedMemory(LW_SHARED); for (backendIndex = 0; backendIndex < MaxBackends; ++backendIndex) { BackendData *currentBackend = &backendManagementShmemData->backends[backendIndex]; SpinLockAcquire(¤tBackend->mutex); /* we're only interested in active backends */ if (currentBackend->transactionId.transactionNumber == 0) { SpinLockRelease(¤tBackend->mutex); continue; } values[0] = ObjectIdGetDatum(currentBackend->databaseId); values[1] = Int32GetDatum(ProcGlobal->allProcs[backendIndex].pid); values[2] = Int32GetDatum(currentBackend->transactionId.initiatorNodeIdentifier); values[3] = UInt64GetDatum(currentBackend->transactionId.transactionNumber); values[4] = TimestampTzGetDatum(currentBackend->transactionId.timestamp); SpinLockRelease(¤tBackend->mutex); tuplestore_putvalues(tupleStore, tupleDescriptor, values, isNulls); /* * We don't want to initialize memory while spinlock is held so we * prefer to do it here. This initialization is done for the rows * starting from the second one. */ memset(values, 0, sizeof(values)); memset(isNulls, false, sizeof(isNulls)); } UnlockBackendSharedMemory(); /* clean up and return the tuplestore */ tuplestore_donestoring(tupleStore); PG_RETURN_VOID(); } /* * InitializeBackendManagement requests the necessary shared memory * from Postgres and sets up the shared memory startup hook. */ void InitializeBackendManagement(void) { /* allocate shared memory */ RequestAddinShmemSpace(BackendManagementShmemSize()); prev_shmem_startup_hook = shmem_startup_hook; shmem_startup_hook = BackendManagementShmemInit; } /* * BackendManagementShmemInit is the callback that is to be called on shared * memory startup hook. The function sets up the necessary shared memory * segment for the backend manager. */ static void BackendManagementShmemInit(void) { bool alreadyInitialized = false; /* we may update the shmem, acquire lock exclusively */ LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); backendManagementShmemData = (BackendManagementShmemData *) ShmemInitStruct( "Backend Management Shmem", BackendManagementShmemSize(), &alreadyInitialized); if (!alreadyInitialized) { int backendIndex = 0; char *trancheName = "Backend Management Tranche"; #if (PG_VERSION_NUM >= 100000) NamedLWLockTranche *namedLockTranche = &backendManagementShmemData->namedLockTranche; #else LWLockTranche *lockTranche = &backendManagementShmemData->lockTranche; #endif /* start by zeroing out all the memory */ memset(backendManagementShmemData, 0, BackendManagementShmemSize()); #if (PG_VERSION_NUM >= 100000) namedLockTranche->trancheId = LWLockNewTrancheId(); LWLockRegisterTranche(namedLockTranche->trancheId, trancheName); LWLockInitialize(&backendManagementShmemData->lock, namedLockTranche->trancheId); #else backendManagementShmemData->trancheId = LWLockNewTrancheId(); /* we only need a single lock */ lockTranche->array_base = &backendManagementShmemData->lock; lockTranche->array_stride = sizeof(LWLock); lockTranche->name = trancheName; LWLockRegisterTranche(backendManagementShmemData->trancheId, lockTranche); LWLockInitialize(&backendManagementShmemData->lock, backendManagementShmemData->trancheId); #endif /* start the distributed transaction ids from 1 */ pg_atomic_init_u64(&backendManagementShmemData->nextTransactionNumber, 1); /* * We need to init per backend's spinlock before any backend * starts its execution. */ for (backendIndex = 0; backendIndex < MaxBackends; ++backendIndex) { SpinLockInit(&backendManagementShmemData->backends[backendIndex].mutex); } } LWLockRelease(AddinShmemInitLock); if (prev_shmem_startup_hook != NULL) { prev_shmem_startup_hook(); } } /* * BackendManagementShmemSize returns the size that should be allocated * on the shared memory for backend management. */ static size_t BackendManagementShmemSize(void) { Size size = 0; size = add_size(size, sizeof(BackendManagementShmemData)); size = add_size(size, mul_size(sizeof(BackendData), MaxBackends)); return size; } /* * InitializeBackendData is called per backend and does the * required initialization. */ void InitializeBackendData(void) { MyBackendData = &backendManagementShmemData->backends[MyProc->pgprocno]; Assert(MyBackendData); LockBackendSharedMemory(LW_EXCLUSIVE); SpinLockAcquire(&MyBackendData->mutex); MyBackendData->databaseId = MyDatabaseId; MyBackendData->transactionId.initiatorNodeIdentifier = 0; MyBackendData->transactionId.transactionOriginator = false; MyBackendData->transactionId.transactionNumber = 0; MyBackendData->transactionId.timestamp = 0; SpinLockRelease(&MyBackendData->mutex); UnlockBackendSharedMemory(); } /* * UnSetDistributedTransactionId simply acquires the mutex and resets the backend's * distributed transaction data in shared memory to the initial values. */ void UnSetDistributedTransactionId(void) { /* backend does not exist if the extension is not created */ if (MyBackendData) { SpinLockAcquire(&MyBackendData->mutex); MyBackendData->databaseId = 0; MyBackendData->transactionId.initiatorNodeIdentifier = 0; MyBackendData->transactionId.transactionOriginator = false; MyBackendData->transactionId.transactionNumber = 0; MyBackendData->transactionId.timestamp = 0; SpinLockRelease(&MyBackendData->mutex); } } /* * LockBackendSharedMemory is a simple wrapper around LWLockAcquire on the * shared memory lock. * * We use the backend shared memory lock for preventing new backends to be part * of a new distributed transaction or an existing backend to leave a distributed * transaction while we're reading the all backends' data. * * The primary goal is to provide consistent view of the current distributed * transactions while doing the deadlock detection. */ void LockBackendSharedMemory(LWLockMode lockMode) { LWLockAcquire(&backendManagementShmemData->lock, lockMode); } /* * UnlockBackendSharedMemory is a simple wrapper around LWLockRelease on the * shared memory lock. */ void UnlockBackendSharedMemory(void) { LWLockRelease(&backendManagementShmemData->lock); } /* * GetCurrentDistributedTransactionId reads the backend's distributed transaction id and * returns a copy of it. */ DistributedTransactionId * GetCurrentDistributedTransactionId(void) { DistributedTransactionId *currentDistributedTransactionId = (DistributedTransactionId *) palloc(sizeof(DistributedTransactionId)); SpinLockAcquire(&MyBackendData->mutex); currentDistributedTransactionId->initiatorNodeIdentifier = MyBackendData->transactionId.initiatorNodeIdentifier; currentDistributedTransactionId->transactionOriginator = MyBackendData->transactionId.transactionOriginator; currentDistributedTransactionId->transactionNumber = MyBackendData->transactionId.transactionNumber; currentDistributedTransactionId->timestamp = MyBackendData->transactionId.timestamp; SpinLockRelease(&MyBackendData->mutex); return currentDistributedTransactionId; } /* * AssignDistributedTransactionId generates a new distributed transaction id and * sets it for the current backend. It also sets the databaseId and * processId fields. * * This function should only be called on BeginCoordinatedTransaction(). Any other * callers is very likely to break the distributed transction management. */ void AssignDistributedTransactionId(void) { pg_atomic_uint64 *transactionNumberSequence = &backendManagementShmemData->nextTransactionNumber; uint64 nextTransactionNumber = pg_atomic_fetch_add_u64(transactionNumberSequence, 1); int localGroupId = GetLocalGroupId(); TimestampTz currentTimestamp = GetCurrentTimestamp(); SpinLockAcquire(&MyBackendData->mutex); MyBackendData->databaseId = MyDatabaseId; MyBackendData->transactionId.initiatorNodeIdentifier = localGroupId; MyBackendData->transactionId.transactionOriginator = true; MyBackendData->transactionId.transactionNumber = nextTransactionNumber; MyBackendData->transactionId.timestamp = currentTimestamp; SpinLockRelease(&MyBackendData->mutex); } /* * CurrentDistributedTransactionNumber returns the transaction number of the * current distributed transaction. The caller must make sure a distributed * transaction is in progress. */ uint64 CurrentDistributedTransactionNumber(void) { Assert(MyBackendData != NULL); return MyBackendData->transactionId.transactionNumber; } /* * GetBackendDataForProc writes the backend data for the given process to * result. If the process is part of a lock group (parallel query) it * returns the leader data instead. */ void GetBackendDataForProc(PGPROC *proc, BackendData *result) { BackendData *backendData = NULL; int pgprocno = proc->pgprocno; if (proc->lockGroupLeader != NULL) { pgprocno = proc->lockGroupLeader->pgprocno; } backendData = &backendManagementShmemData->backends[pgprocno]; SpinLockAcquire(&backendData->mutex); memcpy(result, backendData, sizeof(BackendData)); SpinLockRelease(&backendData->mutex); } /* * CancelTransactionDueToDeadlock cancels the input proc and also marks the backend * data with this information. */ void CancelTransactionDueToDeadlock(PGPROC *proc) { BackendData *backendData = &backendManagementShmemData->backends[proc->pgprocno]; /* backend might not have used citus yet and thus not initialized backend data */ if (!backendData) { return; } SpinLockAcquire(&backendData->mutex); /* send a SIGINT only if the process is still in a distributed transaction */ if (backendData->transactionId.transactionNumber != 0) { backendData->cancelledDueToDeadlock = true; SpinLockRelease(&backendData->mutex); if (kill(proc->pid, SIGINT) != 0) { ereport(WARNING, (errmsg("attempted to cancel this backend (pid: %d) to resolve a " "distributed deadlock but the backend could not " "be cancelled", proc->pid))); } } else { SpinLockRelease(&backendData->mutex); } } /* * MyBackendGotCancelledDueToDeadlock returns whether the current distributed * transaction was cancelled due to a deadlock. If the backend is not in a * distributed transaction, the function returns false. */ bool MyBackendGotCancelledDueToDeadlock(void) { bool cancelledDueToDeadlock = false; /* backend might not have used citus yet and thus not initialized backend data */ if (!MyBackendData) { return false; } SpinLockAcquire(&MyBackendData->mutex); if (IsInDistributedTransaction(MyBackendData)) { cancelledDueToDeadlock = MyBackendData->cancelledDueToDeadlock; } SpinLockRelease(&MyBackendData->mutex); return cancelledDueToDeadlock; } citus-7.0.3/src/backend/distributed/transaction/distributed_deadlock_detection.c000066400000000000000000000476121317107136600302660ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * distributed_deadlock_detection.c * * Functions for performing distributed deadlock detection. * * Copyright (c) 2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "pgstat.h" #include "access/hash.h" #include "distributed/backend_data.h" #include "distributed/distributed_deadlock_detection.h" #include "distributed/hash_helpers.h" #include "distributed/listutils.h" #include "distributed/lock_graph.h" #include "distributed/metadata_cache.h" #include "distributed/transaction_identifier.h" #include "nodes/pg_list.h" #include "utils/hsearch.h" #include "utils/timestamp.h" /* used only for finding the deadlock cycle path */ typedef struct QueuedTransactionNode { TransactionNode *transactionNode; int currentStackDepth; } QueuedTransactionNode; /* GUC, determining whether debug messages for deadlock detection sent to LOG */ bool LogDistributedDeadlockDetection = false; static bool CheckDeadlockForTransactionNode(TransactionNode *startingTransactionNode, TransactionNode **transactionNodeStack, List **deadlockPath); static void PrependOutgoingNodesToQueue(TransactionNode *queuedTransactionNode, int currentStackDepth, List **toBeVisitedNodes); static void BuildDeadlockPathList(QueuedTransactionNode *cycledTransactionNode, TransactionNode **transactionNodeStack, List **deadlockPath); static void ResetVisitedFields(HTAB *adjacencyList); static bool AssociateDistributedTransactionWithBackendProc(TransactionNode * transactionNode); static TransactionNode * GetOrCreateTransactionNode(HTAB *adjacencyList, DistributedTransactionId * transactionId); static uint32 DistributedTransactionIdHash(const void *key, Size keysize); static int DistributedTransactionIdCompare(const void *a, const void *b, Size keysize); static void LogCancellingBackend(TransactionNode *transactionNode); static void LogTransactionNode(TransactionNode *transactionNode); static void LogDistributedDeadlockDebugMessage(const char *errorMessage); PG_FUNCTION_INFO_V1(check_distributed_deadlocks); /* * check_distributed_deadlocks is the external API for manually * checking for distributed deadlocks. For the details, see * CheckForDistributedDeadlocks(). */ Datum check_distributed_deadlocks(PG_FUNCTION_ARGS) { bool deadlockFound = CheckForDistributedDeadlocks(); return BoolGetDatum(deadlockFound); } /* * CheckForDistributedDeadlocks is the entry point for detecing * distributed deadlocks. * * In plain words, the function first builds a wait graph by * adding the wait edges from the local node and then adding the * remote wait edges to form a global wait graph. Later, the wait * graph is converted into another graph representation (adjacency * lists) for more efficient searches. Finally, a DFS is done on * the adjacency lists. Finding a cycle in the graph unveils a * distributed deadlock. Upon finding a deadlock, the youngest * participant backend is cancelled. * * The complexity of the algorithm is O(N) for each distributed * transaction that's checked for deadlocks. Note that there exists * 0 to MaxBackends number of transactions. * * The function returns true if a deadlock is found. Otherwise, returns * false. */ bool CheckForDistributedDeadlocks(void) { WaitGraph *waitGraph = NULL; HTAB *adjacencyLists = NULL; HASH_SEQ_STATUS status; TransactionNode *transactionNode = NULL; int edgeCount = 0; int localGroupId = GetLocalGroupId(); List *workerNodeList = ActiveReadableNodeList(); /* * We don't need to do any distributed deadlock checking if there * are no worker nodes. This might even be problematic for a non-mx * worker node which has the same group id with its master (i.e., 0), * which may erroneously decide to kill the deadlocks happening on it. */ if (list_length(workerNodeList) == 0) { return false; } waitGraph = BuildGlobalWaitGraph(); adjacencyLists = BuildAdjacencyListsForWaitGraph(waitGraph); edgeCount = waitGraph->edgeCount; /* * We iterate on transaction nodes and search for deadlocks where the * starting node is the given transaction node. */ hash_seq_init(&status, adjacencyLists); while ((transactionNode = (TransactionNode *) hash_seq_search(&status)) != 0) { bool deadlockFound = false; List *deadlockPath = NIL; TransactionNode *transactionNodeStack[edgeCount]; /* we're only interested in finding deadlocks originating from this node */ if (transactionNode->transactionId.initiatorNodeIdentifier != localGroupId) { continue; } ResetVisitedFields(adjacencyLists); deadlockFound = CheckDeadlockForTransactionNode(transactionNode, transactionNodeStack, &deadlockPath); if (deadlockFound) { TransactionNode *youngestAliveTransaction = NULL; ListCell *participantTransactionCell = NULL; /* * There should generally be at least two transactions to get into a * deadlock. However, in case Citus gets into a self-deadlock, we may * find a deadlock with a single transaction. */ Assert(list_length(deadlockPath) >= 1); LogDistributedDeadlockDebugMessage("Distributed deadlock found among the " "following distributed transactions:"); /* * We search for the youngest participant for two reasons * (i) predictable results (ii) cancel the youngest transaction * (i.e., if a DDL continues for 1 hour and deadlocks with a * SELECT continues for 10 msec, we prefer to cancel the SELECT). * * We're also searching for the youngest transactions initiated by * this node. */ foreach(participantTransactionCell, deadlockPath) { TransactionNode *currentNode = (TransactionNode *) lfirst(participantTransactionCell); bool transactionAssociatedWithProc = AssociateDistributedTransactionWithBackendProc(currentNode); TimestampTz youngestTimestamp = 0; TimestampTz currentTimestamp = 0; LogTransactionNode(currentNode); /* we couldn't find the backend process originated the transaction */ if (!transactionAssociatedWithProc) { continue; } if (youngestAliveTransaction == NULL) { youngestAliveTransaction = currentNode; continue; } youngestTimestamp = youngestAliveTransaction->transactionId.timestamp; currentTimestamp = currentNode->transactionId.timestamp; if (timestamptz_cmp_internal(currentTimestamp, youngestTimestamp) == 1) { youngestAliveTransaction = currentNode; } } /* we found the deadlock and its associated proc exists */ if (youngestAliveTransaction) { CancelTransactionDueToDeadlock(youngestAliveTransaction->initiatorProc); LogCancellingBackend(youngestAliveTransaction); hash_seq_term(&status); return true; } } } return false; } /* * CheckDeadlockForDistributedTransaction does a DFS starting with the given * transaction node and checks for a cycle (i.e., the node can be reached again * while traversing the graph). * * Finding a cycle indicates a distributed deadlock and the function returns * true on that case. Also, the deadlockPath is filled with the transaction * nodes that form the cycle. */ static bool CheckDeadlockForTransactionNode(TransactionNode *startingTransactionNode, TransactionNode **transactionNodeStack, List **deadlockPath) { List *toBeVisitedNodes = NIL; int currentStackDepth = 0; /* * We keep transactionNodeStack to keep track of the deadlock paths. At this point, * adjust the depth of the starting node and set the stack's first element with * the starting node. */ transactionNodeStack[currentStackDepth] = startingTransactionNode; PrependOutgoingNodesToQueue(startingTransactionNode, currentStackDepth, &toBeVisitedNodes); /* traverse the graph and search for the deadlocks */ while (toBeVisitedNodes != NIL) { QueuedTransactionNode *queuedTransactionNode = (QueuedTransactionNode *) linitial(toBeVisitedNodes); TransactionNode *currentTransactionNode = queuedTransactionNode->transactionNode; toBeVisitedNodes = list_delete_first(toBeVisitedNodes); /* cycle found, let the caller know about the cycle */ if (currentTransactionNode == startingTransactionNode) { BuildDeadlockPathList(queuedTransactionNode, transactionNodeStack, deadlockPath); return true; } /* don't need to revisit the node again */ if (currentTransactionNode->transactionVisited) { continue; } currentTransactionNode->transactionVisited = true; /* set the stack's corresponding element with the current node */ currentStackDepth = queuedTransactionNode->currentStackDepth; transactionNodeStack[currentStackDepth] = currentTransactionNode; PrependOutgoingNodesToQueue(currentTransactionNode, currentStackDepth, &toBeVisitedNodes); } return false; } /* * PrependOutgoingNodesToQueue prepends the waiters of the input transaction nodes to the * toBeVisitedNodes. */ static void PrependOutgoingNodesToQueue(TransactionNode *transactionNode, int currentStackDepth, List **toBeVisitedNodes) { ListCell *currentWaitForCell = NULL; /* as we traverse outgoing edges, increment the depth */ currentStackDepth++; /* prepend to the list to continue depth-first search */ foreach(currentWaitForCell, transactionNode->waitsFor) { TransactionNode *waitForTransaction = (TransactionNode *) lfirst(currentWaitForCell); QueuedTransactionNode *queuedNode = palloc0(sizeof(QueuedTransactionNode)); queuedNode->transactionNode = waitForTransaction; queuedNode->currentStackDepth = currentStackDepth; *toBeVisitedNodes = lappend(*toBeVisitedNodes, queuedNode); } } /* * BuildDeadlockPathList fills deadlockPath with a list of transactions involved * in a distributed deadlock (i.e. a cycle in the graph). */ static void BuildDeadlockPathList(QueuedTransactionNode *cycledTransactionNode, TransactionNode **transactionNodeStack, List **deadlockPath) { int deadlockStackDepth = cycledTransactionNode->currentStackDepth; int stackIndex = 0; *deadlockPath = NIL; for (stackIndex = 0; stackIndex < deadlockStackDepth; stackIndex++) { *deadlockPath = lappend(*deadlockPath, transactionNodeStack[stackIndex]); } } /* * ResetVisitedFields goes over all the elements of the input adjacency list * and sets transactionVisited to false. */ static void ResetVisitedFields(HTAB *adjacencyList) { HASH_SEQ_STATUS status; TransactionNode *resetNode = NULL; /* reset all visited fields */ hash_seq_init(&status, adjacencyList); while ((resetNode = (TransactionNode *) hash_seq_search(&status)) != 0) { resetNode->transactionVisited = false; } } /* * AssociateDistributedTransactionWithBackendProc gets a transaction node * and searches the corresponding backend. Once found, transactionNodes' * initiatorProc is set to it. * * The function goes over all the backends, checks for the backend with * the same transaction number as the given transaction node. * * If the transaction cannot be associated with a backend process, the function * returns false. Otherwise, the function returns true. */ static bool AssociateDistributedTransactionWithBackendProc(TransactionNode *transactionNode) { int backendIndex = 0; for (backendIndex = 0; backendIndex < MaxBackends; ++backendIndex) { PGPROC *currentProc = &ProcGlobal->allProcs[backendIndex]; BackendData currentBackendData; DistributedTransactionId *currentTransactionId = NULL; /* we're not interested in processes that are not active or waiting on a lock */ if (currentProc->pid <= 0) { continue; } GetBackendDataForProc(currentProc, ¤tBackendData); /* we're only interested in distribtued transactions */ if (!IsInDistributedTransaction(¤tBackendData)) { continue; } currentTransactionId = ¤tBackendData.transactionId; if (currentTransactionId->transactionNumber != transactionNode->transactionId.transactionNumber) { continue; } /* we're only interested in transactions started on this node */ if (!currentTransactionId->transactionOriginator) { continue; } /* at the point we should only have transactions initiated by this node */ Assert(currentTransactionId->initiatorNodeIdentifier == GetLocalGroupId()); transactionNode->initiatorProc = currentProc; return true; } return false; } /* * BuildAdjacencyListsForWaitGraph converts the input wait graph to * an adjacency list for further processing. * * The input wait graph consists of set of wait edges between all * backends in the Citus cluster. * * We represent the adjacency list with an HTAB structure. Each node is * represented with a DistributedTransactionId and each edge is represented with * a TransactionNode structure. * * While iterating over the input wait edges, we follow the algorithm * below: * for each edge in waitGraph: * - find the corresponding nodes for waiting and * blocking transactions in the adjacency list * - if not found, add new node(s) to the list * - Add blocking transaction to the waiting transaction's waitFor * list * * The format of the adjacency list becomes the following: * [transactionId] = [transactionNode->waitsFor {list of waiting transaction nodes}] */ extern HTAB * BuildAdjacencyListsForWaitGraph(WaitGraph *waitGraph) { HASHCTL info; uint32 hashFlags = 0; HTAB *adjacencyList = NULL; int edgeIndex = 0; int edgeCount = waitGraph->edgeCount; memset(&info, 0, sizeof(info)); info.keysize = sizeof(DistributedTransactionId); info.entrysize = sizeof(TransactionNode); info.hash = DistributedTransactionIdHash; info.match = DistributedTransactionIdCompare; info.hcxt = CurrentMemoryContext; hashFlags = (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE); adjacencyList = hash_create("distributed deadlock detection", 64, &info, hashFlags); for (edgeIndex = 0; edgeIndex < edgeCount; edgeIndex++) { WaitEdge *edge = &waitGraph->edges[edgeIndex]; TransactionNode *waitingTransaction = NULL; TransactionNode *blockingTransaction = NULL; bool transactionOriginator = false; DistributedTransactionId waitingId = { edge->waitingNodeId, transactionOriginator, edge->waitingTransactionNum, edge->waitingTransactionStamp }; DistributedTransactionId blockingId = { edge->blockingNodeId, transactionOriginator, edge->blockingTransactionNum, edge->blockingTransactionStamp }; waitingTransaction = GetOrCreateTransactionNode(adjacencyList, &waitingId); blockingTransaction = GetOrCreateTransactionNode(adjacencyList, &blockingId); waitingTransaction->waitsFor = lappend(waitingTransaction->waitsFor, blockingTransaction); } return adjacencyList; } /* * GetOrCreateTransactionNode searches distributedTransactionHash for the given * given transactionId. If the transaction is not found, a new transaction node * with the given transaction identifier is added. */ static TransactionNode * GetOrCreateTransactionNode(HTAB *adjacencyList, DistributedTransactionId *transactionId) { TransactionNode *transactionNode = NULL; bool found = false; transactionNode = (TransactionNode *) hash_search(adjacencyList, transactionId, HASH_ENTER, &found); if (!found) { transactionNode->waitsFor = NIL; transactionNode->initiatorProc = NULL; } return transactionNode; } /* * DistributedTransactionIdHash returns hashed value for a given distributed * transaction id. */ static uint32 DistributedTransactionIdHash(const void *key, Size keysize) { DistributedTransactionId *entry = (DistributedTransactionId *) key; uint32 hash = 0; hash = hash_uint32(entry->initiatorNodeIdentifier); hash = hash_combine(hash, hash_any((unsigned char *) &entry->transactionNumber, sizeof(int64))); hash = hash_combine(hash, hash_any((unsigned char *) &entry->timestamp, sizeof(TimestampTz))); return hash; } /* * DistributedTransactionIdCompare compares DistributedTransactionId's a and b * and returns -1 if a < b, 1 if a > b, 0 if they are equal. * * DistributedTransactionId are first compared by their timestamp, then transaction * number, then node identifier. */ static int DistributedTransactionIdCompare(const void *a, const void *b, Size keysize) { DistributedTransactionId *xactIdA = (DistributedTransactionId *) a; DistributedTransactionId *xactIdB = (DistributedTransactionId *) b; if (!TimestampDifferenceExceeds(xactIdB->timestamp, xactIdA->timestamp, 0)) { /* ! (B <= A) = A < B */ return -1; } else if (!TimestampDifferenceExceeds(xactIdA->timestamp, xactIdB->timestamp, 0)) { /* ! (A <= B) = A > B */ return 1; } else if (xactIdA->transactionNumber < xactIdB->transactionNumber) { return -1; } else if (xactIdA->transactionNumber > xactIdB->transactionNumber) { return 1; } else if (xactIdA->initiatorNodeIdentifier < xactIdB->initiatorNodeIdentifier) { return -1; } else if (xactIdA->initiatorNodeIdentifier > xactIdB->initiatorNodeIdentifier) { return 1; } else { return 0; } } /* * LogCancellingBackend should only be called when a distributed transaction's * backend is cancelled due to distributed deadlocks. It sends which transaction * is cancelled and its corresponding pid to the log. */ static void LogCancellingBackend(TransactionNode *transactionNode) { StringInfo logMessage = NULL; if (!LogDistributedDeadlockDetection) { return; } logMessage = makeStringInfo(); appendStringInfo(logMessage, "Cancelling the following backend " "to resolve distributed deadlock " "(transaction numner = %ld, pid = %d)", transactionNode->transactionId.transactionNumber, transactionNode->initiatorProc->pid); LogDistributedDeadlockDebugMessage(logMessage->data); } /* * LogTransactionNode converts the transaction node to a human readable form * and sends to the logs via LogDistributedDeadlockDebugMessage(). */ static void LogTransactionNode(TransactionNode *transactionNode) { StringInfo logMessage = NULL; DistributedTransactionId *transactionId = NULL; if (!LogDistributedDeadlockDetection) { return; } logMessage = makeStringInfo(); transactionId = &(transactionNode->transactionId); appendStringInfo(logMessage, "[DistributedTransactionId: (%d, %ld, %s)] = ", transactionId->initiatorNodeIdentifier, transactionId->transactionNumber, timestamptz_to_str(transactionId->timestamp)); appendStringInfo(logMessage, "[WaitsFor transaction numbers: %s]", WaitsForToString(transactionNode->waitsFor)); /* log the backend query if the proc is associated with the transaction */ if (transactionNode->initiatorProc != NULL) { const char *backendQuery = pgstat_get_backend_current_activity(transactionNode->initiatorProc->pid, false); appendStringInfo(logMessage, "[Backend Query: %s]", backendQuery); } LogDistributedDeadlockDebugMessage(logMessage->data); } /* * LogDistributedDeadlockDebugMessage checks EnableDistributedDeadlockDebugging flag. If * it is true, the input message is sent to the logs with LOG level. Also, current timestamp * is prepanded to the message. */ static void LogDistributedDeadlockDebugMessage(const char *errorMessage) { if (!LogDistributedDeadlockDetection) { return; } ereport(LOG, (errmsg("[%s] %s", timestamptz_to_str(GetCurrentTimestamp()), errorMessage))); } /* * WaitsForToString is only intended for testing and debugging. It gets a * waitsForList and returns the list of transaction nodes' transactionNumber * in a string. */ char * WaitsForToString(List *waitsFor) { StringInfo transactionIdStr = makeStringInfo(); ListCell *waitsForCell = NULL; foreach(waitsForCell, waitsFor) { TransactionNode *waitingNode = (TransactionNode *) lfirst(waitsForCell); if (transactionIdStr->len != 0) { appendStringInfoString(transactionIdStr, ","); } appendStringInfo(transactionIdStr, "%ld", waitingNode->transactionId.transactionNumber); } return transactionIdStr->data; } citus-7.0.3/src/backend/distributed/transaction/lock_graph.c000066400000000000000000000540151317107136600241640ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * lock_graph.c * * Functions for obtaining local and global lock graphs in which each * node is a distributed transaction, and an edge represent a waiting-for * relationship. * * Copyright (c) 2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "funcapi.h" #include "libpq-fe.h" #include "miscadmin.h" #include "access/hash.h" #include "distributed/backend_data.h" #include "distributed/connection_management.h" #include "distributed/hash_helpers.h" #include "distributed/lock_graph.h" #include "distributed/metadata_cache.h" #include "distributed/remote_commands.h" #include "storage/proc.h" #include "utils/builtins.h" #include "utils/hsearch.h" #include "utils/timestamp.h" /* * PROCStack is a stack of PGPROC pointers used to perform a depth-first search * through the lock graph. It also keeps track of which processes have been * added to the stack to avoid visiting the same process multiple times. */ typedef struct PROCStack { int procCount; PGPROC **procs; bool *procAdded; } PROCStack; static void AddWaitEdgeFromResult(WaitGraph *waitGraph, PGresult *result, int rowIndex); static int64 ParseIntField(PGresult *result, int rowIndex, int colIndex); static bool ParseBoolField(PGresult *result, int rowIndex, int colIndex); static TimestampTz ParseTimestampTzField(PGresult *result, int rowIndex, int colIndex); static void ReturnWaitGraph(WaitGraph *waitGraph, FunctionCallInfo fcinfo); static WaitGraph * BuildLocalWaitGraph(void); static bool IsProcessWaitingForRelationExtension(PGPROC *proc); static void LockLockData(void); static void UnlockLockData(void); static void AddEdgesForLockWaits(WaitGraph *waitGraph, PGPROC *waitingProc, PROCStack *remaining); static void AddEdgesForWaitQueue(WaitGraph *waitGraph, PGPROC *waitingProc, PROCStack *remaining); static void AddWaitEdge(WaitGraph *waitGraph, PGPROC *waitingProc, PGPROC *blockingProc, PROCStack *remaining); static WaitEdge * AllocWaitEdge(WaitGraph *waitGraph); static void AddProcToVisit(PROCStack *remaining, PGPROC *proc); static bool IsSameLockGroup(PGPROC *leftProc, PGPROC *rightProc); static bool IsConflictingLockMask(int holdMask, int conflictMask); PG_FUNCTION_INFO_V1(dump_local_wait_edges); PG_FUNCTION_INFO_V1(dump_global_wait_edges); /* * dump_global_wait_edges returns global wait edges for distributed transactions * originating from the node on which it is started. */ Datum dump_global_wait_edges(PG_FUNCTION_ARGS) { WaitGraph *waitGraph = NULL; waitGraph = BuildGlobalWaitGraph(); ReturnWaitGraph(waitGraph, fcinfo); return (Datum) 0; } /* * BuildGlobalWaitGraph builds a wait graph for distributed transactions * that originate from this node, including edges from all (other) worker * nodes. */ WaitGraph * BuildGlobalWaitGraph(void) { List *workerNodeList = ActiveReadableNodeList(); ListCell *workerNodeCell = NULL; char *nodeUser = CitusExtensionOwnerName(); List *connectionList = NIL; ListCell *connectionCell = NULL; int localNodeId = GetLocalGroupId(); WaitGraph *waitGraph = BuildLocalWaitGraph(); /* open connections in parallel */ foreach(workerNodeCell, workerNodeList) { WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); char *nodeName = workerNode->workerName; int nodePort = workerNode->workerPort; MultiConnection *connection = NULL; int connectionFlags = 0; if (workerNode->groupId == localNodeId) { /* we already have local wait edges */ continue; } connection = StartNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort, nodeUser, NULL); connectionList = lappend(connectionList, connection); } FinishConnectionListEstablishment(connectionList); /* send commands in parallel */ foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); int querySent = false; const char *command = "SELECT * FROM dump_local_wait_edges()"; querySent = SendRemoteCommand(connection, command); if (querySent == 0) { ReportConnectionError(connection, WARNING); } } /* receive dump_local_wait_edges results */ foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); PGresult *result = NULL; bool raiseInterrupts = true; int64 rowIndex = 0; int64 rowCount = 0; int64 colCount = 0; result = GetRemoteCommandResult(connection, raiseInterrupts); if (!IsResponseOK(result)) { ReportResultError(connection, result, WARNING); continue; } rowCount = PQntuples(result); colCount = PQnfields(result); if (colCount != 9) { ereport(WARNING, (errmsg("unexpected number of columns from " "dump_local_wait_edges"))); continue; } for (rowIndex = 0; rowIndex < rowCount; rowIndex++) { AddWaitEdgeFromResult(waitGraph, result, rowIndex); } PQclear(result); ForgetResults(connection); } return waitGraph; } /* * AddWaitEdgeFromResult adds an edge to the wait graph that is read from * a PGresult. */ static void AddWaitEdgeFromResult(WaitGraph *waitGraph, PGresult *result, int rowIndex) { WaitEdge *waitEdge = AllocWaitEdge(waitGraph); waitEdge->waitingPid = ParseIntField(result, rowIndex, 0); waitEdge->waitingNodeId = ParseIntField(result, rowIndex, 1); waitEdge->waitingTransactionNum = ParseIntField(result, rowIndex, 2); waitEdge->waitingTransactionStamp = ParseTimestampTzField(result, rowIndex, 3); waitEdge->blockingPid = ParseIntField(result, rowIndex, 4); waitEdge->blockingNodeId = ParseIntField(result, rowIndex, 5); waitEdge->blockingTransactionNum = ParseIntField(result, rowIndex, 6); waitEdge->blockingTransactionStamp = ParseTimestampTzField(result, rowIndex, 7); waitEdge->isBlockingXactWaiting = ParseBoolField(result, rowIndex, 8); } /* * ParseIntField parses a int64 from a remote result or returns 0 if the * result is NULL. */ static int64 ParseIntField(PGresult *result, int rowIndex, int colIndex) { char *resultString = NULL; if (PQgetisnull(result, rowIndex, colIndex)) { return 0; } resultString = PQgetvalue(result, rowIndex, colIndex); return pg_strtouint64(resultString, NULL, 10); } /* * ParseBoolField parses a bool from a remote result or returns false if the * result is NULL. */ static bool ParseBoolField(PGresult *result, int rowIndex, int colIndex) { char *resultString = NULL; if (PQgetisnull(result, rowIndex, colIndex)) { return false; } resultString = PQgetvalue(result, rowIndex, colIndex); if (strlen(resultString) != 1) { return false; } return resultString[0] == 't'; } /* * ParseTimestampTzField parses a timestamptz from a remote result or returns * 0 if the result is NULL. */ static TimestampTz ParseTimestampTzField(PGresult *result, int rowIndex, int colIndex) { char *resultString = NULL; Datum resultStringDatum = 0; Datum timestampDatum = 0; if (PQgetisnull(result, rowIndex, colIndex)) { return 0; } resultString = PQgetvalue(result, rowIndex, colIndex); resultStringDatum = CStringGetDatum(resultString); timestampDatum = DirectFunctionCall3(timestamptz_in, resultStringDatum, 0, -1); return DatumGetTimestampTz(timestampDatum); } /* * dump_local_wait_edges returns wait edges for distributed transactions * running on the node on which it is called, which originate from the source node. */ Datum dump_local_wait_edges(PG_FUNCTION_ARGS) { WaitGraph *waitGraph = BuildLocalWaitGraph(); ReturnWaitGraph(waitGraph, fcinfo); return (Datum) 0; } /* * ReturnWaitGraph returns a wait graph for a set returning function. */ static void ReturnWaitGraph(WaitGraph *waitGraph, FunctionCallInfo fcinfo) { ReturnSetInfo *resultInfo = (ReturnSetInfo *) fcinfo->resultinfo; TupleDesc tupleDesc = NULL; Tuplestorestate *tupleStore = NULL; MemoryContext per_query_ctx = NULL; MemoryContext oldContext = NULL; size_t curEdgeNum = 0; /* check to see if caller supports us returning a tuplestore */ if (resultInfo == NULL || !IsA(resultInfo, ReturnSetInfo)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg( "set-valued function called in context that cannot accept a set"))); } if (!(resultInfo->allowedModes & SFRM_Materialize)) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("materialize mode required, but it is not " \ "allowed in this context"))); } /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE) { elog(ERROR, "return type must be a row type"); } per_query_ctx = resultInfo->econtext->ecxt_per_query_memory; oldContext = MemoryContextSwitchTo(per_query_ctx); tupleStore = tuplestore_begin_heap(true, false, work_mem); resultInfo->returnMode = SFRM_Materialize; resultInfo->setResult = tupleStore; resultInfo->setDesc = tupleDesc; MemoryContextSwitchTo(oldContext); /* * Columns: * 00: waiting_pid * 01: waiting_node_id * 02: waiting_transaction_num * 03: waiting_transaction_stamp * 04: blocking_pid * 05: blocking__node_id * 06: blocking_transaction_num * 07: blocking_transaction_stamp * 08: blocking_transaction_waiting */ for (curEdgeNum = 0; curEdgeNum < waitGraph->edgeCount; curEdgeNum++) { Datum values[9]; bool nulls[9]; WaitEdge *curEdge = &waitGraph->edges[curEdgeNum]; memset(values, 0, sizeof(values)); memset(nulls, 0, sizeof(nulls)); values[0] = Int32GetDatum(curEdge->waitingPid); values[1] = Int32GetDatum(curEdge->waitingNodeId); if (curEdge->waitingTransactionNum != 0) { values[2] = Int64GetDatum(curEdge->waitingTransactionNum); values[3] = TimestampTzGetDatum(curEdge->waitingTransactionStamp); } else { nulls[2] = true; nulls[3] = true; } values[4] = Int32GetDatum(curEdge->blockingPid); values[5] = Int32GetDatum(curEdge->blockingNodeId); if (curEdge->blockingTransactionNum != 0) { values[6] = Int64GetDatum(curEdge->blockingTransactionNum); values[7] = TimestampTzGetDatum(curEdge->blockingTransactionStamp); } else { nulls[6] = true; nulls[7] = true; } values[8] = BoolGetDatum(curEdge->isBlockingXactWaiting); tuplestore_putvalues(tupleStore, tupleDesc, values, nulls); } /* clean up and return the tuplestore */ tuplestore_donestoring(tupleStore); } /* * BuildLocalWaitGraph builds a wait graph for distributed transactions * that originate from the local node. */ static WaitGraph * BuildLocalWaitGraph(void) { WaitGraph *waitGraph = NULL; int curBackend = 0; PROCStack remaining; /* * Try hard to avoid allocations while holding lock. Thus we pre-allocate * space for locks in large batches - for common scenarios this should be * more than enough space to build the list of wait edges without a single * allocation. */ waitGraph = (WaitGraph *) palloc0(sizeof(WaitGraph)); waitGraph->localNodeId = GetLocalGroupId(); waitGraph->allocatedSize = MaxBackends * 3; waitGraph->edgeCount = 0; waitGraph->edges = (WaitEdge *) palloc(waitGraph->allocatedSize * sizeof(WaitEdge)); remaining.procs = (PGPROC **) palloc(sizeof(PGPROC *) * MaxBackends); remaining.procAdded = (bool *) palloc0(sizeof(bool *) * MaxBackends); remaining.procCount = 0; LockLockData(); /* * Build lock-graph. We do so by first finding all procs which we are * interested in (in a distributed transaction, and blocked). Once * those are collected, do depth first search over all procs blocking * those. */ /* build list of starting procs */ for (curBackend = 0; curBackend < MaxBackends; curBackend++) { PGPROC *currentProc = &ProcGlobal->allProcs[curBackend]; BackendData currentBackendData; /* skip if the PGPROC slot is unused */ if (currentProc->pid == 0) { continue; } GetBackendDataForProc(currentProc, ¤tBackendData); /* * Only start searching from distributed transactions, since we only * care about distributed transactions for the purpose of distributed * deadlock detection. */ if (!IsInDistributedTransaction(¤tBackendData)) { continue; } /* skip if the process is not blocked */ if (!IsProcessWaitingForLock(currentProc)) { continue; } /* skip if the process is blocked for relation extension */ if (IsProcessWaitingForRelationExtension(currentProc)) { continue; } AddProcToVisit(&remaining, currentProc); } while (remaining.procCount > 0) { PGPROC *waitingProc = remaining.procs[--remaining.procCount]; /* only blocked processes result in wait edges */ if (!IsProcessWaitingForLock(waitingProc)) { continue; } /* skip if the process is blocked for relation extension */ if (IsProcessWaitingForRelationExtension(waitingProc)) { continue; } /* * Record an edge for everyone already holding the lock in a * conflicting manner ("hard edges" in postgres parlance). */ AddEdgesForLockWaits(waitGraph, waitingProc, &remaining); /* * Record an edge for everyone in front of us in the wait-queue * for the lock ("soft edges" in postgres parlance). */ AddEdgesForWaitQueue(waitGraph, waitingProc, &remaining); } UnlockLockData(); return waitGraph; } /* * IsProcessWaitingForRelationExtension returns true if the given PROC * waiting on relation extension lock. * * In general for the purpose of distributed deadlock detection, we should * skip if the process blocked on the relation extension. Those locks are * held for a short duration while the relation is actually extended on * the disk and released as soon as the extension is done, even before the * execution of the command that triggered the extension finishes. Thus, * recording such waits on our lock graphs could yield detecting wrong * distributed deadlocks. */ static bool IsProcessWaitingForRelationExtension(PGPROC *proc) { PROCLOCK *waitProcLock = NULL; LOCK *waitLock = NULL; if (proc->waitStatus != STATUS_WAITING) { return false; } waitProcLock = proc->waitProcLock; waitLock = waitProcLock->tag.myLock; return waitLock->tag.locktag_type == LOCKTAG_RELATION_EXTEND; } /* * LockLockData takes locks the shared lock data structure, which prevents * concurrent lock acquisitions/releases. * * The function also acquires lock on the backend shared memory to prevent * new backends to start. */ static void LockLockData(void) { int partitionNum = 0; LockBackendSharedMemory(LW_SHARED); for (partitionNum = 0; partitionNum < NUM_LOCK_PARTITIONS; partitionNum++) { LWLockAcquire(LockHashPartitionLockByIndex(partitionNum), LW_SHARED); } } /* * UnlockLockData unlocks the locks on the shared lock data structure in reverse * order since LWLockRelease searches the given lock from the end of the * held_lwlocks array. * * The function also releases the shared memory lock to allow new backends to * start. */ static void UnlockLockData(void) { int partitionNum = 0; for (partitionNum = NUM_LOCK_PARTITIONS - 1; partitionNum >= 0; partitionNum--) { LWLockRelease(LockHashPartitionLockByIndex(partitionNum)); } UnlockBackendSharedMemory(); } /* * AddEdgesForLockWaits adds an edge to the wait graph for every granted lock * that waitingProc is waiting for. * * This function iterates over the procLocks data structure in shared memory, * which also contains entries for locks which have not been granted yet, but * it does not reflect the order of the wait queue. We therefore handle the * wait queue separately. */ static void AddEdgesForLockWaits(WaitGraph *waitGraph, PGPROC *waitingProc, PROCStack *remaining) { /* the lock for which this process is waiting */ LOCK *waitLock = waitingProc->waitLock; /* determine the conflict mask for the lock level used by the process */ LockMethod lockMethodTable = GetLocksMethodTable(waitLock); int conflictMask = lockMethodTable->conflictTab[waitingProc->waitLockMode]; /* iterate through the queue of processes holding the lock */ SHM_QUEUE *procLocks = &waitLock->procLocks; PROCLOCK *procLock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink)); while (procLock != NULL) { PGPROC *currentProc = procLock->tag.myProc; /* * Skip processes from the same lock group, processes that don't conflict, * and processes that are waiting on a relation extension lock, which * will be released shortly. */ if (!IsSameLockGroup(waitingProc, currentProc) && IsConflictingLockMask(procLock->holdMask, conflictMask) && !IsProcessWaitingForRelationExtension(currentProc)) { AddWaitEdge(waitGraph, waitingProc, currentProc, remaining); } procLock = (PROCLOCK *) SHMQueueNext(procLocks, &procLock->lockLink, offsetof(PROCLOCK, lockLink)); } } /* * AddEdgesForWaitQueue adds an edge to the wait graph for processes in front of * waitingProc in the wait queue that are trying to acquire a conflicting lock. */ static void AddEdgesForWaitQueue(WaitGraph *waitGraph, PGPROC *waitingProc, PROCStack *remaining) { /* the lock for which this process is waiting */ LOCK *waitLock = waitingProc->waitLock; /* determine the conflict mask for the lock level used by the process */ LockMethod lockMethodTable = GetLocksMethodTable(waitLock); int conflictMask = lockMethodTable->conflictTab[waitingProc->waitLockMode]; /* iterate through the wait queue */ PROC_QUEUE *waitQueue = &(waitLock->waitProcs); int queueSize = waitQueue->size; PGPROC *currentProc = (PGPROC *) waitQueue->links.next; /* * Iterate through the queue from the start until we encounter waitingProc, * since we only care about processes in front of waitingProc in the queue. */ while (queueSize-- > 0 && currentProc != waitingProc) { int awaitMask = LOCKBIT_ON(currentProc->waitLockMode); /* * Skip processes from the same lock group, processes that don't conflict, * and processes that are waiting on a relation extension lock, which * will be released shortly. */ if (!IsSameLockGroup(waitingProc, currentProc) && IsConflictingLockMask(awaitMask, conflictMask) && !IsProcessWaitingForRelationExtension(currentProc)) { AddWaitEdge(waitGraph, waitingProc, currentProc, remaining); } currentProc = (PGPROC *) currentProc->links.next; } } /* * AddWaitEdge adds a new wait edge to a wait graph. The nodes in the graph are * transactions and an edge indicates the "waiting" process is blocked on a lock * held by the "blocking" process. * * If the blocking process is itself waiting then it is added to the remaining * stack. */ static void AddWaitEdge(WaitGraph *waitGraph, PGPROC *waitingProc, PGPROC *blockingProc, PROCStack *remaining) { WaitEdge *curEdge = AllocWaitEdge(waitGraph); BackendData waitingBackendData; BackendData blockingBackendData; GetBackendDataForProc(waitingProc, &waitingBackendData); GetBackendDataForProc(blockingProc, &blockingBackendData); curEdge->isBlockingXactWaiting = IsProcessWaitingForLock(blockingProc) && !IsProcessWaitingForRelationExtension(blockingProc); if (curEdge->isBlockingXactWaiting) { AddProcToVisit(remaining, blockingProc); } curEdge->waitingPid = waitingProc->pid; if (IsInDistributedTransaction(&waitingBackendData)) { DistributedTransactionId *waitingTransactionId = &waitingBackendData.transactionId; curEdge->waitingNodeId = waitingTransactionId->initiatorNodeIdentifier; curEdge->waitingTransactionNum = waitingTransactionId->transactionNumber; curEdge->waitingTransactionStamp = waitingTransactionId->timestamp; } else { curEdge->waitingNodeId = waitGraph->localNodeId; curEdge->waitingTransactionNum = 0; curEdge->waitingTransactionStamp = 0; } curEdge->blockingPid = blockingProc->pid; if (IsInDistributedTransaction(&blockingBackendData)) { DistributedTransactionId *blockingTransactionId = &blockingBackendData.transactionId; curEdge->blockingNodeId = blockingTransactionId->initiatorNodeIdentifier; curEdge->blockingTransactionNum = blockingTransactionId->transactionNumber; curEdge->blockingTransactionStamp = blockingTransactionId->timestamp; } else { curEdge->blockingNodeId = waitGraph->localNodeId; curEdge->blockingTransactionNum = 0; curEdge->blockingTransactionStamp = 0; } } /* * AllocWaitEdge allocates a wait edge as part of the given wait graph. * If the wait graph has insufficient space its size is doubled using * repalloc. */ static WaitEdge * AllocWaitEdge(WaitGraph *waitGraph) { /* ensure space for new edge */ if (waitGraph->allocatedSize == waitGraph->edgeCount) { waitGraph->allocatedSize *= 2; waitGraph->edges = (WaitEdge *) repalloc(waitGraph->edges, sizeof(WaitEdge) * waitGraph->allocatedSize); } return &waitGraph->edges[waitGraph->edgeCount++]; } /* * AddProcToVisit adds a process to the stack of processes to visit * in the depth-first search, unless it was already added. */ static void AddProcToVisit(PROCStack *remaining, PGPROC *proc) { if (remaining->procAdded[proc->pgprocno]) { return; } Assert(remaining->procCount < MaxBackends); remaining->procs[remaining->procCount++] = proc; remaining->procAdded[proc->pgprocno] = true; } /* * IsProcessWaitingForLock returns whether a given process is waiting for a lock. */ bool IsProcessWaitingForLock(PGPROC *proc) { return proc->waitStatus == STATUS_WAITING; } /* * IsSameLockGroup returns whether two processes are part of the same lock group, * meaning they are either the same process, or have the same lock group leader. */ static bool IsSameLockGroup(PGPROC *leftProc, PGPROC *rightProc) { return leftProc == rightProc || (leftProc->lockGroupLeader != NULL && leftProc->lockGroupLeader == rightProc->lockGroupLeader); } /* * IsConflictingLockMask returns whether the given conflict mask conflicts with the * holdMask. * * holdMask is a bitmask with the i-th bit turned on if a lock mode i is held. * * conflictMask is a bitmask with the j-th bit turned on if it conflicts with * lock mode i. */ static bool IsConflictingLockMask(int holdMask, int conflictMask) { return (holdMask & conflictMask) != 0; } /* * IsInDistributedTransaction returns whether the given backend is in a * distributed transaction. */ bool IsInDistributedTransaction(BackendData *backendData) { return backendData->transactionId.transactionNumber != 0; } citus-7.0.3/src/backend/distributed/transaction/multi_shard_transaction.c000066400000000000000000000166731317107136600270030ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_shard_transaction.c * This file contains functions for managing 1PC or 2PC transactions * across many shard placements. * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "libpq-fe.h" #include "postgres.h" #include "distributed/colocation_utils.h" #include "distributed/connection_management.h" #include "distributed/master_metadata_utility.h" #include "distributed/metadata_cache.h" #include "distributed/multi_router_executor.h" #include "distributed/multi_shard_transaction.h" #include "distributed/placement_connection.h" #include "distributed/shardinterval_utils.h" #include "distributed/worker_manager.h" #include "nodes/pg_list.h" #include "storage/ipc.h" #include "utils/memutils.h" #define INITIAL_SHARD_CONNECTION_HASH_SIZE 128 /* * OpenTransactionsForAllTasks opens a connection for each task, * taking into account which shards are read and modified by the task * to select the appopriate connection, or error out if no appropriate * connection can be found. The set of connections is returned as an * anchor shard ID -> ShardConnections hash. */ HTAB * OpenTransactionsForAllTasks(List *taskList, int connectionFlags) { HTAB *shardConnectionHash = NULL; ListCell *taskCell = NULL; List *newConnectionList = NIL; shardConnectionHash = CreateShardConnectionHash(CurrentMemoryContext); connectionFlags |= CONNECTION_PER_PLACEMENT; /* open connections to shards which don't have connections yet */ foreach(taskCell, taskList) { Task *task = (Task *) lfirst(taskCell); ShardPlacementAccessType accessType = PLACEMENT_ACCESS_SELECT; uint64 shardId = task->anchorShardId; ShardConnections *shardConnections = NULL; bool shardConnectionsFound = false; List *shardPlacementList = NIL; ListCell *placementCell = NULL; shardConnections = GetShardHashConnections(shardConnectionHash, shardId, &shardConnectionsFound); if (shardConnectionsFound) { continue; } shardPlacementList = FinalizedShardPlacementList(shardId); if (shardPlacementList == NIL) { /* going to have to have some placements to do any work */ ereport(ERROR, (errmsg("could not find any shard placements for the shard " UINT64_FORMAT, shardId))); } if (task->taskType == MODIFY_TASK) { accessType = PLACEMENT_ACCESS_DML; } else { /* can only open connections for DDL and DML commands */ Assert(task->taskType == DDL_TASK); accessType = PLACEMENT_ACCESS_DDL; } foreach(placementCell, shardPlacementList) { ShardPlacement *shardPlacement = (ShardPlacement *) lfirst(placementCell); ShardPlacementAccess placementModification; List *placementAccessList = NIL; List *placementSelectList = NIL; MultiConnection *connection = NULL; WorkerNode *workerNode = FindWorkerNode(shardPlacement->nodeName, shardPlacement->nodePort); if (workerNode == NULL) { ereport(ERROR, (errmsg("could not find worker node %s:%d", shardPlacement->nodeName, shardPlacement->nodePort))); } /* add placement access for modification */ placementModification.placement = shardPlacement; placementModification.accessType = accessType; placementAccessList = lappend(placementAccessList, &placementModification); /* add additional placement accesses for subselects (e.g. INSERT .. SELECT) */ placementSelectList = BuildPlacementSelectList(shardPlacement->groupId, task->relationShardList); placementAccessList = list_concat(placementAccessList, placementSelectList); /* * Find a connection that sees preceding writes and cannot self-deadlock, * or error out if no such connection exists. */ connection = StartPlacementListConnection(connectionFlags, placementAccessList, NULL); ClaimConnectionExclusively(connection); shardConnections->connectionList = lappend(shardConnections->connectionList, connection); newConnectionList = lappend(newConnectionList, connection); /* * Every individual failure should cause entire distributed * transaction to fail. */ MarkRemoteTransactionCritical(connection); } } /* finish connection establishment newly opened connections */ FinishConnectionListEstablishment(newConnectionList); /* the special BARE mode (for e.g. VACUUM/ANALYZE) skips BEGIN */ if (MultiShardCommitProtocol > COMMIT_PROTOCOL_BARE) { RemoteTransactionsBeginIfNecessary(newConnectionList); } return shardConnectionHash; } /* * CreateShardConnectionHash constructs a hash table which maps from shard * identifier to connection lists, passing the provided MemoryContext to * hash_create for hash allocations. */ HTAB * CreateShardConnectionHash(MemoryContext memoryContext) { HTAB *shardConnectionsHash = NULL; int hashFlags = 0; HASHCTL info; memset(&info, 0, sizeof(info)); info.keysize = sizeof(int64); info.entrysize = sizeof(ShardConnections); info.hcxt = memoryContext; hashFlags = (HASH_ELEM | HASH_CONTEXT | HASH_BLOBS); shardConnectionsHash = hash_create("Shard Connections Hash", INITIAL_SHARD_CONNECTION_HASH_SIZE, &info, hashFlags); return shardConnectionsHash; } /* * GetShardHashConnections finds existing connections for a shard in the * provided hash. If not found, then a ShardConnections structure with empty * connectionList is returned. */ ShardConnections * GetShardHashConnections(HTAB *connectionHash, int64 shardId, bool *connectionsFound) { ShardConnections *shardConnections = NULL; shardConnections = (ShardConnections *) hash_search(connectionHash, &shardId, HASH_ENTER, connectionsFound); if (!*connectionsFound) { shardConnections->shardId = shardId; shardConnections->connectionList = NIL; } return shardConnections; } /* * ShardConnectionList returns the list of ShardConnections in connectionHash. */ List * ShardConnectionList(HTAB *connectionHash) { List *shardConnectionsList = NIL; HASH_SEQ_STATUS status; ShardConnections *shardConnections = NULL; if (connectionHash == NULL) { return NIL; } hash_seq_init(&status, connectionHash); shardConnections = (ShardConnections *) hash_seq_search(&status); while (shardConnections != NULL) { shardConnectionsList = lappend(shardConnectionsList, shardConnections); shardConnections = (ShardConnections *) hash_seq_search(&status); } return shardConnectionsList; } /* * ResetShardPlacementTransactionState performs cleanup after the end of a * transaction. */ void ResetShardPlacementTransactionState(void) { if (MultiShardCommitProtocol == COMMIT_PROTOCOL_BARE) { MultiShardCommitProtocol = SavedMultiShardCommitProtocol; SavedMultiShardCommitProtocol = COMMIT_PROTOCOL_BARE; } } /* * UnclaimAllShardConnections unclaims all connections in the given * shard connections hash after previously claiming them exclusively * in OpenTransactionsToAllShardPlacements. */ void UnclaimAllShardConnections(HTAB *shardConnectionHash) { HASH_SEQ_STATUS status; ShardConnections *shardConnections = NULL; hash_seq_init(&status, shardConnectionHash); while ((shardConnections = hash_seq_search(&status)) != 0) { List *connectionList = shardConnections->connectionList; ListCell *connectionCell = NULL; foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); UnclaimConnection(connection); } } } citus-7.0.3/src/backend/distributed/transaction/remote_transaction.c000066400000000000000000001115171317107136600257540ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * remote_transaction.c * Management of transaction spanning more than one node. * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "libpq-fe.h" #include "miscadmin.h" #include "access/xact.h" #include "distributed/backend_data.h" #include "distributed/connection_management.h" #include "distributed/metadata_cache.h" #include "distributed/remote_commands.h" #include "distributed/remote_transaction.h" #include "distributed/transaction_identifier.h" #include "distributed/transaction_management.h" #include "distributed/transaction_recovery.h" #include "distributed/worker_manager.h" #include "utils/hsearch.h" static void StartRemoteTransactionSavepointBegin(MultiConnection *connection, SubTransactionId subId); static void FinishRemoteTransactionSavepointBegin(MultiConnection *connection, SubTransactionId subId); static void StartRemoteTransactionSavepointRelease(MultiConnection *connection, SubTransactionId subId); static void FinishRemoteTransactionSavepointRelease(MultiConnection *connection, SubTransactionId subId); static void StartRemoteTransactionSavepointRollback(MultiConnection *connection, SubTransactionId subId); static void FinishRemoteTransactionSavepointRollback(MultiConnection *connection, SubTransactionId subId); static void CheckTransactionHealth(void); static void Assign2PCIdentifier(MultiConnection *connection); static void WarnAboutLeakedPreparedTransaction(MultiConnection *connection, bool commit); /* * StartRemoteTransactionBeging initiates beginning the remote transaction in * a non-blocking manner. The function sends "BEGIN" followed by * assign_distributed_transaction_id() to assign the distributed transaction * id on the remote node. */ void StartRemoteTransactionBegin(struct MultiConnection *connection) { RemoteTransaction *transaction = &connection->remoteTransaction; StringInfo beginAndSetDistributedTransactionId = makeStringInfo(); DistributedTransactionId *distributedTransactionId = NULL; ListCell *subIdCell = NULL; List *activeSubXacts = NIL; Assert(transaction->transactionState == REMOTE_TRANS_INVALID); /* remember transaction as being in-progress */ dlist_push_tail(&InProgressTransactions, &connection->transactionNode); transaction->transactionState = REMOTE_TRANS_STARTING; /* * Explicitly specify READ COMMITTED, the default on the remote * side might have been changed, and that would cause problematic * behaviour. */ appendStringInfoString(beginAndSetDistributedTransactionId, "BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;"); /* * Append BEGIN and assign_distributed_transaction_id() statements into a single command * and send both in one step. The reason is purely performance, we don't want * seperate roundtrips for these two statements. */ distributedTransactionId = GetCurrentDistributedTransactionId(); appendStringInfo(beginAndSetDistributedTransactionId, "SELECT assign_distributed_transaction_id(%d, %ld, '%s');", distributedTransactionId->initiatorNodeIdentifier, distributedTransactionId->transactionNumber, timestamptz_to_str(distributedTransactionId->timestamp)); /* append in-progress savepoints for this transaction */ activeSubXacts = ActiveSubXacts(); transaction->lastSuccessfulSubXact = TopSubTransactionId; transaction->lastQueuedSubXact = TopSubTransactionId; foreach(subIdCell, activeSubXacts) { SubTransactionId subId = lfirst_int(subIdCell); appendStringInfo(beginAndSetDistributedTransactionId, "SAVEPOINT savepoint_%u;", subId); transaction->lastQueuedSubXact = subId; } if (!SendRemoteCommand(connection, beginAndSetDistributedTransactionId->data)) { ReportConnectionError(connection, WARNING); MarkRemoteTransactionFailed(connection, true); } } /* * FinishRemoteTransactionBegin finishes the work StartRemoteTransactionBegin * initiated. It blocks if necessary (i.e. if PQisBusy() would return true). */ void FinishRemoteTransactionBegin(struct MultiConnection *connection) { RemoteTransaction *transaction = &connection->remoteTransaction; bool clearSuccessful = true; bool raiseErrors = true; Assert(transaction->transactionState == REMOTE_TRANS_STARTING); clearSuccessful = ClearResults(connection, raiseErrors); if (clearSuccessful) { transaction->transactionState = REMOTE_TRANS_STARTED; transaction->lastSuccessfulSubXact = transaction->lastQueuedSubXact; } if (!transaction->transactionFailed) { Assert(PQtransactionStatus(connection->pgConn) == PQTRANS_INTRANS); } } /* * RemoteTransactionBegin begins a remote transaction in a blocking manner. */ void RemoteTransactionBegin(struct MultiConnection *connection) { StartRemoteTransactionBegin(connection); FinishRemoteTransactionBegin(connection); } /* * RemoteTransactionListBegin sends BEGIN over all connections in the * given connection list and waits for all of them to finish. */ void RemoteTransactionListBegin(List *connectionList) { ListCell *connectionCell = NULL; /* send BEGIN to all nodes */ foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); StartRemoteTransactionBegin(connection); } /* wait for BEGIN to finish on all nodes */ foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); FinishRemoteTransactionBegin(connection); } } /* * StartRemoteTransactionCommit initiates transaction commit in a non-blocking * manner. If the transaction is in a failed state, it'll instead get rolled * back. */ void StartRemoteTransactionCommit(MultiConnection *connection) { RemoteTransaction *transaction = &connection->remoteTransaction; const bool dontRaiseError = false; const bool isCommit = true; /* can only commit if transaction is in progress */ Assert(transaction->transactionState != REMOTE_TRANS_INVALID); /* can't commit if we already started to commit or abort */ Assert(transaction->transactionState < REMOTE_TRANS_1PC_ABORTING); if (transaction->transactionFailed) { /* abort the transaction if it failed */ transaction->transactionState = REMOTE_TRANS_1PC_ABORTING; /* * Try sending an ROLLBACK; Depending on the state that won't * succeed, but let's try. Have to clear previous results * first. */ ForgetResults(connection); /* try to clear pending stuff */ if (!SendRemoteCommand(connection, "ROLLBACK")) { /* no point in reporting a likely redundant message */ } } else if (transaction->transactionState == REMOTE_TRANS_PREPARED) { /* commit the prepared transaction */ StringInfoData command; initStringInfo(&command); appendStringInfo(&command, "COMMIT PREPARED '%s'", transaction->preparedName); transaction->transactionState = REMOTE_TRANS_2PC_COMMITTING; if (!SendRemoteCommand(connection, command.data)) { ReportConnectionError(connection, WARNING); MarkRemoteTransactionFailed(connection, dontRaiseError); WarnAboutLeakedPreparedTransaction(connection, isCommit); } } else { /* initiate remote transaction commit */ transaction->transactionState = REMOTE_TRANS_1PC_COMMITTING; if (!SendRemoteCommand(connection, "COMMIT")) { /* * For a moment there I thought we were in trouble. * * Failing in this state means that we don't know whether the the * commit has succeeded. */ ReportConnectionError(connection, WARNING); MarkRemoteTransactionFailed(connection, dontRaiseError); } } } /* * FinishRemoteTransactionCommit finishes the work * StartRemoteTransactionCommit initiated. It blocks if necessary (i.e. if * PQisBusy() would return true). */ void FinishRemoteTransactionCommit(MultiConnection *connection) { RemoteTransaction *transaction = &connection->remoteTransaction; PGresult *result = NULL; const bool dontRaiseErrors = false; const bool isCommit = true; Assert(transaction->transactionState == REMOTE_TRANS_1PC_ABORTING || transaction->transactionState == REMOTE_TRANS_1PC_COMMITTING || transaction->transactionState == REMOTE_TRANS_2PC_COMMITTING); result = GetRemoteCommandResult(connection, dontRaiseErrors); if (!IsResponseOK(result)) { ReportResultError(connection, result, WARNING); MarkRemoteTransactionFailed(connection, dontRaiseErrors); /* * Failing in this state means that we will often not know whether * the the commit has succeeded (particularly in case of network * troubles). * * XXX: It might be worthwhile to discern cases where we got a * proper error back from postgres (i.e. COMMIT was received but * produced an error) from cases where the connection failed * before getting a reply. */ if (transaction->transactionState == REMOTE_TRANS_1PC_COMMITTING) { if (transaction->transactionCritical) { ereport(WARNING, (errmsg("failed to commit critical transaction " "on %s:%d, metadata is likely out of sync", connection->hostname, connection->port))); } else { ereport(WARNING, (errmsg("failed to commit transaction on %s:%d", connection->hostname, connection->port))); } } else if (transaction->transactionState == REMOTE_TRANS_2PC_COMMITTING) { ereport(WARNING, (errmsg("failed to commit transaction on %s:%d", connection->hostname, connection->port))); WarnAboutLeakedPreparedTransaction(connection, isCommit); } } else if (transaction->transactionState == REMOTE_TRANS_1PC_ABORTING || transaction->transactionState == REMOTE_TRANS_2PC_ABORTING) { transaction->transactionState = REMOTE_TRANS_ABORTED; } else { transaction->transactionState = REMOTE_TRANS_COMMITTED; } PQclear(result); ForgetResults(connection); } /* * RemoteTransactionCommit commits (or aborts, if the transaction failed) a * remote transaction in a blocking manner. */ void RemoteTransactionCommit(MultiConnection *connection) { StartRemoteTransactionCommit(connection); FinishRemoteTransactionCommit(connection); } /* * StartRemoteTransactionAbort initiates abortin the transaction in a * non-blocking manner. */ void StartRemoteTransactionAbort(MultiConnection *connection) { RemoteTransaction *transaction = &connection->remoteTransaction; const bool dontRaiseErrors = false; const bool isNotCommit = false; Assert(transaction->transactionState != REMOTE_TRANS_INVALID); /* * Clear previous results, so we have a better chance to send ROLLBACK * [PREPARED]. If we've previously sent a PREPARE TRANSACTION, we always * want to wait for that result, as that shouldn't take long and will * reserve resources. But if there's another query running, we don't want * to wait, because a longrunning statement may be running, force it to be * killed in that case. */ if (transaction->transactionState == REMOTE_TRANS_PREPARING || transaction->transactionState == REMOTE_TRANS_PREPARED) { StringInfoData command; /* await PREPARE TRANSACTION results, closing the connection would leave it dangling */ ForgetResults(connection); initStringInfo(&command); appendStringInfo(&command, "ROLLBACK PREPARED '%s'", transaction->preparedName); if (!SendRemoteCommand(connection, command.data)) { ReportConnectionError(connection, WARNING); MarkRemoteTransactionFailed(connection, dontRaiseErrors); WarnAboutLeakedPreparedTransaction(connection, isNotCommit); } else { transaction->transactionState = REMOTE_TRANS_2PC_ABORTING; } } else { if (!NonblockingForgetResults(connection)) { ShutdownConnection(connection); /* FinishRemoteTransactionAbort will emit warning */ return; } if (!SendRemoteCommand(connection, "ROLLBACK")) { /* no point in reporting a likely redundant message */ MarkRemoteTransactionFailed(connection, dontRaiseErrors); } else { transaction->transactionState = REMOTE_TRANS_1PC_ABORTING; } } } /* * FinishRemoteTransactionAbort finishes the work StartRemoteTransactionAbort * initiated. It blocks if necessary (i.e. if PQisBusy() would return true). */ void FinishRemoteTransactionAbort(MultiConnection *connection) { RemoteTransaction *transaction = &connection->remoteTransaction; PGresult *result = NULL; const bool dontRaiseErrors = false; const bool isNotCommit = false; result = GetRemoteCommandResult(connection, dontRaiseErrors); if (!IsResponseOK(result)) { ReportResultError(connection, result, WARNING); MarkRemoteTransactionFailed(connection, dontRaiseErrors); if (transaction->transactionState == REMOTE_TRANS_2PC_ABORTING) { WarnAboutLeakedPreparedTransaction(connection, isNotCommit); } else { ereport(WARNING, (errmsg("failed to abort 1PC transaction \"%s\" on %s:%d", transaction->preparedName, connection->hostname, connection->port))); } } PQclear(result); result = GetRemoteCommandResult(connection, dontRaiseErrors); Assert(!result); transaction->transactionState = REMOTE_TRANS_ABORTED; } /* * RemoteTransactionAbort aborts a remote transaction in a blocking manner. */ void RemoteTransactionAbort(MultiConnection *connection) { StartRemoteTransactionAbort(connection); FinishRemoteTransactionAbort(connection); } /* * StartRemoteTransactionPrepare initiates preparing the transaction in a * non-blocking manner. */ void StartRemoteTransactionPrepare(struct MultiConnection *connection) { RemoteTransaction *transaction = &connection->remoteTransaction; StringInfoData command; const bool raiseErrors = true; WorkerNode *workerNode = NULL; /* can't prepare a nonexistant transaction */ Assert(transaction->transactionState != REMOTE_TRANS_INVALID); /* can't prepare in a failed transaction */ Assert(!transaction->transactionFailed); /* can't prepare if already started to prepare/abort/commit */ Assert(transaction->transactionState < REMOTE_TRANS_PREPARING); Assign2PCIdentifier(connection); /* log transactions to workers in pg_dist_transaction */ workerNode = FindWorkerNode(connection->hostname, connection->port); if (workerNode != NULL) { LogTransactionRecord(workerNode->groupId, transaction->preparedName); } initStringInfo(&command); appendStringInfo(&command, "PREPARE TRANSACTION '%s'", transaction->preparedName); if (!SendRemoteCommand(connection, command.data)) { ReportConnectionError(connection, WARNING); MarkRemoteTransactionFailed(connection, raiseErrors); } else { transaction->transactionState = REMOTE_TRANS_PREPARING; } } /* * FinishRemoteTransactionPrepare finishes the work * StartRemoteTransactionPrepare initiated. It blocks if necessary (i.e. if * PQisBusy() would return true). */ void FinishRemoteTransactionPrepare(struct MultiConnection *connection) { RemoteTransaction *transaction = &connection->remoteTransaction; PGresult *result = NULL; const bool raiseErrors = true; Assert(transaction->transactionState == REMOTE_TRANS_PREPARING); result = GetRemoteCommandResult(connection, raiseErrors); if (!IsResponseOK(result)) { ReportResultError(connection, result, WARNING); transaction->transactionState = REMOTE_TRANS_ABORTED; MarkRemoteTransactionFailed(connection, raiseErrors); } else { transaction->transactionState = REMOTE_TRANS_PREPARED; } result = GetRemoteCommandResult(connection, raiseErrors); Assert(!result); } /* * RemoteTransactionPrepare prepares a remote transaction in a blocking * manner. */ void RemoteTransactionPrepare(struct MultiConnection *connection) { StartRemoteTransactionPrepare(connection); FinishRemoteTransactionPrepare(connection); } /* * RemoteTransactionBeginIfNecessary is a convenience wrapper around * RemoteTransactionsBeginIfNecessary(), for a single connection. */ void RemoteTransactionBeginIfNecessary(MultiConnection *connection) { /* just delegate */ if (InCoordinatedTransaction()) { List *connectionList = list_make1(connection); RemoteTransactionsBeginIfNecessary(connectionList); list_free(connectionList); } } /* * RemoteTransactionsBeginIfNecessary begins, if necessary according to this * session's coordinated transaction state, and the remote transaction's * state, an explicit transaction on all the connections. This is done in * parallel, to lessen latency penalties. */ void RemoteTransactionsBeginIfNecessary(List *connectionList) { ListCell *connectionCell = NULL; bool raiseInterrupts = true; /* * Don't do anything if not in a coordinated transaction. That allows the * same code to work both in situations that uses transactions, and when * not. */ if (!InCoordinatedTransaction()) { return; } /* issue BEGIN to all connections needing it */ foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); RemoteTransaction *transaction = &connection->remoteTransaction; /* can't send BEGIN if a command already is in progress */ Assert(PQtransactionStatus(connection->pgConn) != PQTRANS_ACTIVE); /* * If a transaction already is in progress (including having failed), * don't start it again. Thats quite normal if a piece of code allows * cached connections. */ if (transaction->transactionState != REMOTE_TRANS_INVALID) { continue; } StartRemoteTransactionBegin(connection); } raiseInterrupts = true; WaitForAllConnections(connectionList, raiseInterrupts); /* get result of all the BEGINs */ foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); RemoteTransaction *transaction = &connection->remoteTransaction; /* * Only handle BEGIN results on connections that are in process of * starting a transaction, and haven't already failed (e.g. by not * being able to send BEGIN due to a network failure). */ if (transaction->transactionFailed || transaction->transactionState != REMOTE_TRANS_STARTING) { continue; } FinishRemoteTransactionBegin(connection); } } /* * MarkRemoteTransactionFailed records a transaction as having failed. * * If the connection is marked as critical, and allowErrorPromotion is true, * this routine will ERROR out. The allowErrorPromotion case is primarily * required for the transaction management code itself. Usually it is helpful * to fail as soon as possible. If !allowErrorPromotion transaction commit * will instead issue an error before committing on any node. */ void MarkRemoteTransactionFailed(MultiConnection *connection, bool allowErrorPromotion) { RemoteTransaction *transaction = &connection->remoteTransaction; transaction->transactionFailed = true; /* * If the connection is marked as critical, fail the entire coordinated * transaction. If allowed. */ if (transaction->transactionCritical && allowErrorPromotion) { ereport(ERROR, (errmsg("failure on connection marked as essential: %s:%d", connection->hostname, connection->port))); } } /* * MarkRemoteTransactionCritical signals that failures on this remote * transaction should fail the entire coordinated transaction. */ void MarkRemoteTransactionCritical(struct MultiConnection *connection) { RemoteTransaction *transaction = &connection->remoteTransaction; transaction->transactionCritical = true; } /* * CloseRemoteTransaction handles closing a connection that, potentially, is * part of a coordinated transaction. This should only ever be called from * connection_management.c, while closing a connection during a transaction. */ void CloseRemoteTransaction(struct MultiConnection *connection) { RemoteTransaction *transaction = &connection->remoteTransaction; /* unlink from list of open transactions, if necessary */ if (transaction->transactionState != REMOTE_TRANS_INVALID) { /* XXX: Should we error out for a critical transaction? */ dlist_delete(&connection->transactionNode); } } /* * ResetRemoteTransaction resets the state of the transaction after the end of * the main transaction, if the connection is being reused. */ void ResetRemoteTransaction(struct MultiConnection *connection) { RemoteTransaction *transaction = &connection->remoteTransaction; /* just reset the entire state, relying on 0 being invalid/false */ memset(transaction, 0, sizeof(*transaction)); } /* * CoordinatedRemoteTransactionsPrepare PREPAREs a 2PC transaction on all * non-failed transactions participating in the coordinated transaction. */ void CoordinatedRemoteTransactionsPrepare(void) { dlist_iter iter; bool raiseInterrupts = false; List *connectionList = NIL; /* issue PREPARE TRANSACTION; to all relevant remote nodes */ /* asynchronously send PREPARE */ dlist_foreach(iter, &InProgressTransactions) { MultiConnection *connection = dlist_container(MultiConnection, transactionNode, iter.cur); RemoteTransaction *transaction = &connection->remoteTransaction; Assert(transaction->transactionState != REMOTE_TRANS_INVALID); /* can't PREPARE a transaction that failed */ if (transaction->transactionFailed) { continue; } StartRemoteTransactionPrepare(connection); connectionList = lappend(connectionList, connection); } raiseInterrupts = true; WaitForAllConnections(connectionList, raiseInterrupts); /* Wait for result */ dlist_foreach(iter, &InProgressTransactions) { MultiConnection *connection = dlist_container(MultiConnection, transactionNode, iter.cur); RemoteTransaction *transaction = &connection->remoteTransaction; if (transaction->transactionState != REMOTE_TRANS_PREPARING) { continue; } FinishRemoteTransactionPrepare(connection); } CurrentCoordinatedTransactionState = COORD_TRANS_PREPARED; } /* * CoordinatedRemoteTransactionsCommit performs distributed transactions * handling at commit time. This will be called at XACT_EVENT_PRE_COMMIT if * 1PC commits are used - so shards can still be invalidated - and at * XACT_EVENT_COMMIT if 2PC is being used. * * Note that this routine has to issue rollbacks for failed transactions. */ void CoordinatedRemoteTransactionsCommit(void) { dlist_iter iter; List *connectionList = NIL; bool raiseInterrupts = false; /* * Before starting to commit on any of the nodes - after which we can't * completely roll-back anymore - check that things are in a good state. */ CheckTransactionHealth(); /* * Issue appropriate transaction commands to remote nodes. If everything * went well that's going to be COMMIT or COMMIT PREPARED, if individual * connections had errors, some or all of them might require a ROLLBACK. * * First send the command asynchronously over all connections. */ dlist_foreach(iter, &InProgressTransactions) { MultiConnection *connection = dlist_container(MultiConnection, transactionNode, iter.cur); RemoteTransaction *transaction = &connection->remoteTransaction; if (transaction->transactionState == REMOTE_TRANS_INVALID || transaction->transactionState == REMOTE_TRANS_1PC_COMMITTING || transaction->transactionState == REMOTE_TRANS_2PC_COMMITTING || transaction->transactionState == REMOTE_TRANS_COMMITTED || transaction->transactionState == REMOTE_TRANS_ABORTED) { continue; } StartRemoteTransactionCommit(connection); connectionList = lappend(connectionList, connection); } raiseInterrupts = false; WaitForAllConnections(connectionList, raiseInterrupts); /* wait for the replies to the commands to come in */ dlist_foreach(iter, &InProgressTransactions) { MultiConnection *connection = dlist_container(MultiConnection, transactionNode, iter.cur); RemoteTransaction *transaction = &connection->remoteTransaction; /* nothing to do if not committing / aborting */ if (transaction->transactionState != REMOTE_TRANS_1PC_COMMITTING && transaction->transactionState != REMOTE_TRANS_2PC_COMMITTING && transaction->transactionState != REMOTE_TRANS_1PC_ABORTING && transaction->transactionState != REMOTE_TRANS_2PC_ABORTING) { continue; } FinishRemoteTransactionCommit(connection); } } /* * CoordinatedRemoteTransactionsAbort performs distributed transactions * handling at abort time. * * This issues ROLLBACKS and ROLLBACK PREPARED depending on whether the remote * transaction has been prepared or not. */ void CoordinatedRemoteTransactionsAbort(void) { dlist_iter iter; List *connectionList = NIL; bool raiseInterrupts = false; /* asynchronously send ROLLBACK [PREPARED] */ dlist_foreach(iter, &InProgressTransactions) { MultiConnection *connection = dlist_container(MultiConnection, transactionNode, iter.cur); RemoteTransaction *transaction = &connection->remoteTransaction; if (transaction->transactionState == REMOTE_TRANS_INVALID || transaction->transactionState == REMOTE_TRANS_1PC_ABORTING || transaction->transactionState == REMOTE_TRANS_2PC_ABORTING || transaction->transactionState == REMOTE_TRANS_ABORTED) { continue; } StartRemoteTransactionAbort(connection); connectionList = lappend(connectionList, connection); } raiseInterrupts = false; WaitForAllConnections(connectionList, raiseInterrupts); /* and wait for the results */ dlist_foreach(iter, &InProgressTransactions) { MultiConnection *connection = dlist_container(MultiConnection, transactionNode, iter.cur); RemoteTransaction *transaction = &connection->remoteTransaction; if (transaction->transactionState != REMOTE_TRANS_1PC_ABORTING && transaction->transactionState != REMOTE_TRANS_2PC_ABORTING) { continue; } FinishRemoteTransactionAbort(connection); } } /* * CoordinatedRemoteTransactionsSavepointBegin sends the SAVEPOINT command for * the given sub-transaction id to all connections participating in the current * transaction. */ void CoordinatedRemoteTransactionsSavepointBegin(SubTransactionId subId) { dlist_iter iter; const bool raiseInterrupts = true; List *connectionList = NIL; /* asynchronously send SAVEPOINT */ dlist_foreach(iter, &InProgressTransactions) { MultiConnection *connection = dlist_container(MultiConnection, transactionNode, iter.cur); RemoteTransaction *transaction = &connection->remoteTransaction; if (transaction->transactionFailed) { continue; } StartRemoteTransactionSavepointBegin(connection, subId); connectionList = lappend(connectionList, connection); } WaitForAllConnections(connectionList, raiseInterrupts); /* and wait for the results */ dlist_foreach(iter, &InProgressTransactions) { MultiConnection *connection = dlist_container(MultiConnection, transactionNode, iter.cur); RemoteTransaction *transaction = &connection->remoteTransaction; if (transaction->transactionFailed) { continue; } FinishRemoteTransactionSavepointBegin(connection, subId); if (!transaction->transactionFailed) { transaction->lastSuccessfulSubXact = subId; } } } /* * CoordinatedRemoteTransactionsSavepointRelease sends the RELEASE SAVEPOINT * command for the given sub-transaction id to all connections participating in * the current transaction. */ void CoordinatedRemoteTransactionsSavepointRelease(SubTransactionId subId) { dlist_iter iter; const bool raiseInterrupts = true; List *connectionList = NIL; /* asynchronously send RELEASE SAVEPOINT */ dlist_foreach(iter, &InProgressTransactions) { MultiConnection *connection = dlist_container(MultiConnection, transactionNode, iter.cur); RemoteTransaction *transaction = &connection->remoteTransaction; if (transaction->transactionFailed) { continue; } StartRemoteTransactionSavepointRelease(connection, subId); connectionList = lappend(connectionList, connection); } WaitForAllConnections(connectionList, raiseInterrupts); /* and wait for the results */ dlist_foreach(iter, &InProgressTransactions) { MultiConnection *connection = dlist_container(MultiConnection, transactionNode, iter.cur); RemoteTransaction *transaction = &connection->remoteTransaction; if (transaction->transactionFailed) { continue; } FinishRemoteTransactionSavepointRelease(connection, subId); } } /* * CoordinatedRemoteTransactionsSavepointRollback sends the ROLLBACK TO SAVEPOINT * command for the given sub-transaction id to all connections participating in * the current transaction. */ void CoordinatedRemoteTransactionsSavepointRollback(SubTransactionId subId) { dlist_iter iter; const bool dontRaiseInterrupts = false; List *connectionList = NIL; /* asynchronously send ROLLBACK TO SAVEPOINT */ dlist_foreach(iter, &InProgressTransactions) { MultiConnection *connection = dlist_container(MultiConnection, transactionNode, iter.cur); RemoteTransaction *transaction = &connection->remoteTransaction; if (transaction->transactionFailed) { if (transaction->lastSuccessfulSubXact <= subId) { transaction->transactionRecovering = true; /* * Clear the results of the failed query so we can send the ROLLBACK * TO SAVEPOINT command for a savepoint that can recover the transaction * from failure. */ ForgetResults(connection); } else { continue; } } StartRemoteTransactionSavepointRollback(connection, subId); connectionList = lappend(connectionList, connection); } WaitForAllConnections(connectionList, dontRaiseInterrupts); /* and wait for the results */ dlist_foreach(iter, &InProgressTransactions) { MultiConnection *connection = dlist_container(MultiConnection, transactionNode, iter.cur); RemoteTransaction *transaction = &connection->remoteTransaction; if (transaction->transactionFailed && !transaction->transactionRecovering) { continue; } FinishRemoteTransactionSavepointRollback(connection, subId); } } /* * StartRemoteTransactionSavepointBegin initiates SAVEPOINT command for the given * subtransaction id in a non-blocking manner. */ static void StartRemoteTransactionSavepointBegin(MultiConnection *connection, SubTransactionId subId) { const bool raiseErrors = true; StringInfo savepointCommand = makeStringInfo(); appendStringInfo(savepointCommand, "SAVEPOINT savepoint_%u", subId); if (!SendRemoteCommand(connection, savepointCommand->data)) { ReportConnectionError(connection, WARNING); MarkRemoteTransactionFailed(connection, raiseErrors); } } /* * FinishRemoteTransactionSavepointBegin finishes the work * StartRemoteTransactionSavepointBegin initiated. It blocks if necessary (i.e. * if PQisBusy() would return true). */ static void FinishRemoteTransactionSavepointBegin(MultiConnection *connection, SubTransactionId subId) { const bool raiseErrors = true; PGresult *result = GetRemoteCommandResult(connection, raiseErrors); if (!IsResponseOK(result)) { ReportResultError(connection, result, WARNING); MarkRemoteTransactionFailed(connection, raiseErrors); } PQclear(result); ForgetResults(connection); } /* * StartRemoteTransactionSavepointRelease initiates RELEASE SAVEPOINT command for * the given subtransaction id in a non-blocking manner. */ static void StartRemoteTransactionSavepointRelease(MultiConnection *connection, SubTransactionId subId) { const bool raiseErrors = true; StringInfo savepointCommand = makeStringInfo(); appendStringInfo(savepointCommand, "RELEASE SAVEPOINT savepoint_%u", subId); if (!SendRemoteCommand(connection, savepointCommand->data)) { ReportConnectionError(connection, WARNING); MarkRemoteTransactionFailed(connection, raiseErrors); } } /* * FinishRemoteTransactionSavepointRelease finishes the work * StartRemoteTransactionSavepointRelease initiated. It blocks if necessary (i.e. * if PQisBusy() would return true). */ static void FinishRemoteTransactionSavepointRelease(MultiConnection *connection, SubTransactionId subId) { const bool raiseErrors = true; PGresult *result = GetRemoteCommandResult(connection, raiseErrors); if (!IsResponseOK(result)) { ReportResultError(connection, result, WARNING); MarkRemoteTransactionFailed(connection, raiseErrors); } PQclear(result); ForgetResults(connection); } /* * StartRemoteTransactionSavepointRollback initiates ROLLBACK TO SAVEPOINT command * for the given subtransaction id in a non-blocking manner. */ static void StartRemoteTransactionSavepointRollback(MultiConnection *connection, SubTransactionId subId) { const bool dontRaiseErrors = false; StringInfo savepointCommand = makeStringInfo(); appendStringInfo(savepointCommand, "ROLLBACK TO SAVEPOINT savepoint_%u", subId); if (!SendRemoteCommand(connection, savepointCommand->data)) { ReportConnectionError(connection, WARNING); MarkRemoteTransactionFailed(connection, dontRaiseErrors); } } /* * FinishRemoteTransactionSavepointRollback finishes the work * StartRemoteTransactionSavepointRollback initiated. It blocks if necessary (i.e. * if PQisBusy() would return true). It also recovers the transaction from failure * if transaction is recovering and the rollback command succeeds. */ static void FinishRemoteTransactionSavepointRollback(MultiConnection *connection, SubTransactionId subId) { const bool dontRaiseErrors = false; RemoteTransaction *transaction = &connection->remoteTransaction; PGresult *result = GetRemoteCommandResult(connection, dontRaiseErrors); if (!IsResponseOK(result)) { ReportResultError(connection, result, WARNING); MarkRemoteTransactionFailed(connection, dontRaiseErrors); } /* ROLLBACK TO SAVEPOINT succeeded, check if it recovers the transaction */ else if (transaction->transactionRecovering) { transaction->transactionFailed = false; transaction->transactionRecovering = false; } PQclear(result); ForgetResults(connection); } /* * CheckTransactionHealth checks if any of the participating transactions in a * coordinated transaction failed, and what consequence that should have. * This needs to be called before the coordinated transaction commits (but * after they've been PREPAREd if 2PC is in use). */ static void CheckTransactionHealth(void) { dlist_iter iter; dlist_foreach(iter, &InProgressTransactions) { MultiConnection *connection = dlist_container(MultiConnection, transactionNode, iter.cur); RemoteTransaction *transaction = &connection->remoteTransaction; PGTransactionStatusType status = PQtransactionStatus(connection->pgConn); /* if the connection is in a bad state, so is the transaction's state */ if (status == PQTRANS_INERROR || status == PQTRANS_UNKNOWN) { transaction->transactionFailed = true; } /* * If a critical connection is marked as failed (and no error has been * raised yet) do so now. */ if (transaction->transactionFailed && transaction->transactionCritical) { ereport(ERROR, (errmsg("failure on connection marked as essential: %s:%d", connection->hostname, connection->port))); } } } /* * Assign2PCIdentifier computes the 2PC transaction name to use for a * transaction. Every prepared transaction should get a new name, i.e. this * function will need to be called again. * * The format of the name is: * * citus____ * * (at most 5+1+10+1+10+20+1+10 = 58 characters, while limit is 64) * * The source group is used to distinguish 2PCs started by different * coordinators. A coordinator will only attempt to recover its own 2PCs. * * The pid is used to distinguish different processes on the coordinator, mainly * to provide some entropy across restarts. * * The distributed transaction number is used to distinguish different * transactions originating from the same node (since restart). * * The connection number is used to distinguish connections made to a node * within the same transaction. * * NB: we rely on the fact that we don't need to do full escaping on the names * generated here. */ static void Assign2PCIdentifier(MultiConnection *connection) { /* local sequence number used to distinguish different connections */ static uint32 connectionNumber = 0; /* transaction identifier that is unique across processes */ uint64 transactionNumber = CurrentDistributedTransactionNumber(); /* print all numbers as unsigned to guarantee no minus symbols appear in the name */ snprintf(connection->remoteTransaction.preparedName, NAMEDATALEN, "citus_%u_%u_"UINT64_FORMAT "_%u", GetLocalGroupId(), MyProcPid, transactionNumber, connectionNumber++); } /* * WarnAboutLeakedPreparedTransaction issues a WARNING explaining that a * prepared transaction could not be committed or rolled back, and explains * how to perform cleanup. */ static void WarnAboutLeakedPreparedTransaction(MultiConnection *connection, bool commit) { StringInfoData command; RemoteTransaction *transaction = &connection->remoteTransaction; initStringInfo(&command); if (commit) { appendStringInfo(&command, "COMMIT PREPARED '%s'", transaction->preparedName); } else { appendStringInfo(&command, "ROLLBACK PREPARED '%s'", transaction->preparedName); } /* log a warning so the user may abort the transaction later */ ereport(WARNING, (errmsg("failed to roll back prepared transaction '%s'", transaction->preparedName), errhint("Run \"%s\" on %s:%u", command.data, connection->hostname, connection->port))); } citus-7.0.3/src/backend/distributed/transaction/transaction_management.c000066400000000000000000000253531317107136600265770ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * transaction_management.c * * Transaction management for Citus. Most of the work is delegated to other * subsystems, this files, and especially CoordinatedTransactionCallback, * coordinates the work between them. * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "libpq-fe.h" #include "miscadmin.h" #include "access/twophase.h" #include "access/xact.h" #include "distributed/backend_data.h" #include "distributed/connection_management.h" #include "distributed/hash_helpers.h" #include "distributed/multi_shard_transaction.h" #include "distributed/transaction_management.h" #include "distributed/placement_connection.h" #include "utils/hsearch.h" #include "utils/guc.h" #include "utils/memutils.h" CoordinatedTransactionState CurrentCoordinatedTransactionState = COORD_TRANS_NONE; /* GUC, the commit protocol to use for commands affecting more than one connection */ int MultiShardCommitProtocol = COMMIT_PROTOCOL_1PC; int SavedMultiShardCommitProtocol = COMMIT_PROTOCOL_BARE; /* state needed to keep track of operations used during a transaction */ XactModificationType XactModificationLevel = XACT_MODIFICATION_NONE; /* list of connections that are part of the current coordinated transaction */ dlist_head InProgressTransactions = DLIST_STATIC_INIT(InProgressTransactions); /* stack of active sub-transactions */ static List *activeSubXacts = NIL; /* * Should this coordinated transaction use 2PC? Set by * CoordinatedTransactionUse2PC(), e.g. if DDL was issued and * MultiShardCommitProtocol was set to 2PC. */ bool CoordinatedTransactionUses2PC = false; /* transaction management functions */ static void CoordinatedTransactionCallback(XactEvent event, void *arg); static void CoordinatedSubTransactionCallback(SubXactEvent event, SubTransactionId subId, SubTransactionId parentSubid, void *arg); /* remaining functions */ static void AdjustMaxPreparedTransactions(void); static void PushSubXact(SubTransactionId subId); static void PopSubXact(SubTransactionId subId); /* * BeginCoordinatedTransaction begins a coordinated transaction. No * pre-existing coordinated transaction may be in progress. */ void BeginCoordinatedTransaction(void) { if (CurrentCoordinatedTransactionState != COORD_TRANS_NONE && CurrentCoordinatedTransactionState != COORD_TRANS_IDLE) { ereport(ERROR, (errmsg("starting transaction in wrong state"))); } CurrentCoordinatedTransactionState = COORD_TRANS_STARTED; AssignDistributedTransactionId(); } /* * BeginOrContinueCoordinatedTransaction starts a coordinated transaction, * unless one already is in progress. */ void BeginOrContinueCoordinatedTransaction(void) { if (CurrentCoordinatedTransactionState == COORD_TRANS_STARTED) { return; } BeginCoordinatedTransaction(); } /* * InCoordinatedTransaction returns whether a coordinated transaction has been * started. */ bool InCoordinatedTransaction(void) { return CurrentCoordinatedTransactionState != COORD_TRANS_NONE && CurrentCoordinatedTransactionState != COORD_TRANS_IDLE; } /* * CoordinatedTransactionUse2PC() signals that the current coordinated * transaction should use 2PC to commit. */ void CoordinatedTransactionUse2PC(void) { Assert(InCoordinatedTransaction()); CoordinatedTransactionUses2PC = true; } void InitializeTransactionManagement(void) { /* hook into transaction machinery */ RegisterXactCallback(CoordinatedTransactionCallback, NULL); RegisterSubXactCallback(CoordinatedSubTransactionCallback, NULL); AdjustMaxPreparedTransactions(); } /* * Transaction management callback, handling coordinated transaction, and * transaction independent connection management. * * NB: There should only ever be a single transaction callback in citus, the * ordering between the callbacks and thee actions within those callbacks * otherwise becomes too undeterministic / hard to reason about. */ static void CoordinatedTransactionCallback(XactEvent event, void *arg) { switch (event) { case XACT_EVENT_COMMIT: { /* * Call other parts of citus that need to integrate into * transaction management. Do so before doing other work, so the * callbacks still can perform work if needed. */ ResetShardPlacementTransactionState(); if (CurrentCoordinatedTransactionState == COORD_TRANS_PREPARED) { /* handles both already prepared and open transactions */ CoordinatedRemoteTransactionsCommit(); } /* close connections etc. */ if (CurrentCoordinatedTransactionState != COORD_TRANS_NONE) { ResetPlacementConnectionManagement(); AfterXactConnectionHandling(true); } CurrentCoordinatedTransactionState = COORD_TRANS_NONE; XactModificationLevel = XACT_MODIFICATION_NONE; dlist_init(&InProgressTransactions); CoordinatedTransactionUses2PC = false; UnSetDistributedTransactionId(); break; } case XACT_EVENT_ABORT: { /* * FIXME: Add warning for the COORD_TRANS_COMMITTED case. That * can be reached if this backend fails after the * XACT_EVENT_PRE_COMMIT state. */ /* * Call other parts of citus that need to integrate into * transaction management. Do so before doing other work, so the * callbacks still can perform work if needed. */ ResetShardPlacementTransactionState(); /* handles both already prepared and open transactions */ if (CurrentCoordinatedTransactionState > COORD_TRANS_IDLE) { CoordinatedRemoteTransactionsAbort(); } /* close connections etc. */ if (CurrentCoordinatedTransactionState != COORD_TRANS_NONE) { ResetPlacementConnectionManagement(); AfterXactConnectionHandling(false); } CurrentCoordinatedTransactionState = COORD_TRANS_NONE; XactModificationLevel = XACT_MODIFICATION_NONE; dlist_init(&InProgressTransactions); CoordinatedTransactionUses2PC = false; UnSetDistributedTransactionId(); break; } case XACT_EVENT_PARALLEL_COMMIT: case XACT_EVENT_PARALLEL_ABORT: { break; } case XACT_EVENT_PREPARE: { UnSetDistributedTransactionId(); break; } case XACT_EVENT_PRE_COMMIT: { /* nothing further to do if there's no managed remote xacts */ if (CurrentCoordinatedTransactionState == COORD_TRANS_NONE) { break; } /* * TODO: It'd probably be a good idea to force constraints and * such to 'immediate' here. Deferred triggers might try to send * stuff to the remote side, which'd not be good. Doing so * remotely would also catch a class of errors where committing * fails, which can lead to divergence when not using 2PC. */ /* * Check whether the coordinated transaction is in a state we want * to persist, or whether we want to error out. This handles the * case where iteratively executed commands marked all placements * as invalid. */ MarkFailedShardPlacements(); if (CoordinatedTransactionUses2PC) { CoordinatedRemoteTransactionsPrepare(); CurrentCoordinatedTransactionState = COORD_TRANS_PREPARED; } else { /* * Have to commit remote transactions in PRE_COMMIT, to allow * us to mark failed placements as invalid. Better don't use * this for anything important (i.e. DDL/metadata). */ CoordinatedRemoteTransactionsCommit(); CurrentCoordinatedTransactionState = COORD_TRANS_COMMITTED; } /* * Check again whether shards/placement successfully * committed. This handles failure at COMMIT/PREPARE time. */ PostCommitMarkFailedShardPlacements(CoordinatedTransactionUses2PC); break; } case XACT_EVENT_PARALLEL_PRE_COMMIT: case XACT_EVENT_PRE_PREPARE: { if (CurrentCoordinatedTransactionState > COORD_TRANS_NONE) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot use 2PC in transactions involving " "multiple servers"))); } break; } } } /* * Subtransaction callback - currently only used to remember whether a * savepoint has been rolled back, as we don't support that. */ static void CoordinatedSubTransactionCallback(SubXactEvent event, SubTransactionId subId, SubTransactionId parentSubid, void *arg) { switch (event) { case SUBXACT_EVENT_START_SUB: { PushSubXact(subId); if (InCoordinatedTransaction()) { CoordinatedRemoteTransactionsSavepointBegin(subId); } break; } case SUBXACT_EVENT_COMMIT_SUB: { PopSubXact(subId); if (InCoordinatedTransaction()) { CoordinatedRemoteTransactionsSavepointRelease(subId); } break; } case SUBXACT_EVENT_ABORT_SUB: { PopSubXact(subId); if (InCoordinatedTransaction()) { CoordinatedRemoteTransactionsSavepointRollback(subId); } break; } case SUBXACT_EVENT_PRE_COMMIT_SUB: { /* nothing to do */ break; } } } /* * AdjustMaxPreparedTransactions configures the number of available prepared * transaction slots at startup. */ static void AdjustMaxPreparedTransactions(void) { /* * As Citus uses 2PC internally, there always should be some available. As * the default is 0, we increase it to something appropriate * (connections * 2 currently). If the user explicitly configured 2PC, we * leave the configuration alone - there might have been intent behind the * decision. */ if (max_prepared_xacts == 0) { char newvalue[12]; snprintf(newvalue, sizeof(newvalue), "%d", MaxConnections * 2); SetConfigOption("max_prepared_transactions", newvalue, PGC_POSTMASTER, PGC_S_OVERRIDE); ereport(LOG, (errmsg("number of prepared transactions has not been " "configured, overriding"), errdetail("max_prepared_transactions is now set to %s", newvalue))); } } /* PushSubXact pushes subId to the stack of active sub-transactions. */ static void PushSubXact(SubTransactionId subId) { MemoryContext old_context = MemoryContextSwitchTo(CurTransactionContext); activeSubXacts = lcons_int(subId, activeSubXacts); MemoryContextSwitchTo(old_context); } /* PopSubXact pops subId from the stack of active sub-transactions. */ static void PopSubXact(SubTransactionId subId) { MemoryContext old_context = MemoryContextSwitchTo(CurTransactionContext); Assert(linitial_int(activeSubXacts) == subId); activeSubXacts = list_delete_first(activeSubXacts); MemoryContextSwitchTo(old_context); } /* ActiveSubXacts returns list of active sub-transactions in temporal order. */ List * ActiveSubXacts(void) { ListCell *subIdCell = NULL; List *activeSubXactsReversed = NIL; /* * activeSubXacts is in reversed temporal order, so we reverse it to get it * in temporal order. */ foreach(subIdCell, activeSubXacts) { SubTransactionId subId = lfirst_int(subIdCell); activeSubXactsReversed = lcons_int(subId, activeSubXactsReversed); } return activeSubXactsReversed; } citus-7.0.3/src/backend/distributed/transaction/transaction_recovery.c000066400000000000000000000354211317107136600263160ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * transaction_recovery.c * * Routines for recovering two-phase commits started by this node if a * failure occurs between prepare and commit/abort. * * Copyright (c) 2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "libpq-fe.h" #include #include #include "access/heapam.h" #include "access/htup_details.h" #include "access/relscan.h" #include "access/xact.h" #include "catalog/indexing.h" #include "distributed/connection_management.h" #include "distributed/listutils.h" #include "distributed/metadata_cache.h" #include "distributed/pg_dist_transaction.h" #include "distributed/remote_commands.h" #include "distributed/transaction_recovery.h" #include "distributed/worker_manager.h" #include "lib/stringinfo.h" #include "storage/lmgr.h" #include "storage/lock.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/memutils.h" #include "utils/rel.h" /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(recover_prepared_transactions); /* Local functions forward declarations */ static int RecoverPreparedTransactions(void); static int RecoverWorkerTransactions(WorkerNode *workerNode); static List * NameListDifference(List *nameList, List *subtractList); static int CompareNames(const void *leftPointer, const void *rightPointer); static bool FindMatchingName(char **nameArray, int nameCount, char *needle, int *matchIndex); static List * PendingWorkerTransactionList(MultiConnection *connection); static List * UnconfirmedWorkerTransactionsList(int groupId); static void DeleteTransactionRecord(int32 groupId, char *transactionName); /* * recover_prepared_transactions recovers any pending prepared * transactions started by this node on other nodes. */ Datum recover_prepared_transactions(PG_FUNCTION_ARGS) { int recoveredTransactionCount = 0; CheckCitusVersion(ERROR); recoveredTransactionCount = RecoverPreparedTransactions(); PG_RETURN_INT32(recoveredTransactionCount); } /* * LogTransactionRecord registers the fact that a transaction has been * prepared on a worker. The presence of this record indicates that the * prepared transaction should be committed. */ void LogTransactionRecord(int groupId, char *transactionName) { Relation pgDistTransaction = NULL; TupleDesc tupleDescriptor = NULL; HeapTuple heapTuple = NULL; Datum values[Natts_pg_dist_transaction]; bool isNulls[Natts_pg_dist_transaction]; /* form new transaction tuple */ memset(values, 0, sizeof(values)); memset(isNulls, false, sizeof(isNulls)); values[Anum_pg_dist_transaction_groupid - 1] = Int32GetDatum(groupId); values[Anum_pg_dist_transaction_gid - 1] = CStringGetTextDatum(transactionName); /* open transaction relation and insert new tuple */ pgDistTransaction = heap_open(DistTransactionRelationId(), RowExclusiveLock); tupleDescriptor = RelationGetDescr(pgDistTransaction); heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls); CatalogTupleInsert(pgDistTransaction, heapTuple); CommandCounterIncrement(); /* close relation and invalidate previous cache entry */ heap_close(pgDistTransaction, NoLock); } /* * RecoverPreparedTransactions recovers any pending prepared * transactions started by this node on other nodes. */ static int RecoverPreparedTransactions(void) { List *workerList = NIL; ListCell *workerNodeCell = NULL; int recoveredTransactionCount = 0; /* * We block here if metadata transactions are ongoing, since we * mustn't commit/abort their prepared transactions under their * feet. We also prevent concurrent recovery. */ LockRelationOid(DistTransactionRelationId(), ExclusiveLock); workerList = ActivePrimaryNodeList(); foreach(workerNodeCell, workerList) { WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); recoveredTransactionCount += RecoverWorkerTransactions(workerNode); } return recoveredTransactionCount; } /* * RecoverWorkerTransactions recovers any pending prepared transactions * started by this node on the specified worker. */ static int RecoverWorkerTransactions(WorkerNode *workerNode) { int recoveredTransactionCount = 0; int groupId = workerNode->groupId; char *nodeName = workerNode->workerName; int nodePort = workerNode->workerPort; List *pendingTransactionList = NIL; ListCell *pendingTransactionCell = NULL; List *unconfirmedTransactionList = NIL; char **unconfirmedTransactionArray = NULL; int unconfirmedTransactionCount = 0; int unconfirmedTransactionIndex = 0; List *committedTransactionList = NIL; ListCell *committedTransactionCell = NULL; MemoryContext localContext = NULL; MemoryContext oldContext = NULL; int connectionFlags = SESSION_LIFESPAN; MultiConnection *connection = GetNodeConnection(connectionFlags, nodeName, nodePort); if (connection->pgConn == NULL) { /* cannot recover transactions on this worker right now */ return 0; } localContext = AllocSetContextCreate(CurrentMemoryContext, "RecoverWorkerTransactions", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); oldContext = MemoryContextSwitchTo(localContext); /* find transactions that were committed, but not yet confirmed */ unconfirmedTransactionList = UnconfirmedWorkerTransactionsList(groupId); unconfirmedTransactionList = SortList(unconfirmedTransactionList, CompareNames); /* convert list to an array to use with FindMatchingNames */ unconfirmedTransactionCount = list_length(unconfirmedTransactionList); unconfirmedTransactionArray = (char **) PointerArrayFromList(unconfirmedTransactionList); /* find stale prepared transactions on the remote node */ pendingTransactionList = PendingWorkerTransactionList(connection); pendingTransactionList = SortList(pendingTransactionList, CompareNames); /* * Transactions that have no pending prepared transaction are assumed to * have been committed. Any records in unconfirmedTransactionList that * don't have a transaction in pendingTransactionList can be removed. */ committedTransactionList = NameListDifference(unconfirmedTransactionList, pendingTransactionList); /* * For each pending prepared transaction, check whether there is a transaction * record. If so, commit. If not, the transaction that started the transaction * must have rolled back and thus the prepared transaction should be aborted. */ foreach(pendingTransactionCell, pendingTransactionList) { char *transactionName = (char *) lfirst(pendingTransactionCell); StringInfo command = makeStringInfo(); int executeCommand = 0; PGresult *result = NULL; bool shouldCommit = FindMatchingName(unconfirmedTransactionArray, unconfirmedTransactionCount, transactionName, &unconfirmedTransactionIndex); if (shouldCommit) { /* should have committed this prepared transaction */ appendStringInfo(command, "COMMIT PREPARED '%s'", transactionName); } else { /* no record of this prepared transaction, abort */ appendStringInfo(command, "ROLLBACK PREPARED '%s'", transactionName); } executeCommand = ExecuteOptionalRemoteCommand(connection, command->data, &result); if (executeCommand == QUERY_SEND_FAILED) { break; } if (executeCommand == RESPONSE_NOT_OKAY) { /* cannot recover this transaction right now */ continue; } PQclear(result); ForgetResults(connection); ereport(NOTICE, (errmsg("recovered a prepared transaction on %s:%d", nodeName, nodePort), errcontext("%s", command->data))); if (shouldCommit) { committedTransactionList = lappend(committedTransactionList, transactionName); } recoveredTransactionCount += 1; } /* we can remove the transaction records of confirmed transactions */ foreach(committedTransactionCell, committedTransactionList) { char *transactionName = (char *) lfirst(committedTransactionCell); DeleteTransactionRecord(groupId, transactionName); } MemoryContextReset(localContext); MemoryContextSwitchTo(oldContext); return recoveredTransactionCount; } /* * NameListDifference returns the difference between the bag of * names in nameList and subtractList. Both are assumed to be * sorted. We cannot use list_difference_ptr here since we need * to compare the actual strings. */ static List * NameListDifference(List *nameList, List *subtractList) { List *differenceList = NIL; ListCell *nameCell = NULL; int subtractIndex = 0; int subtractCount = list_length(subtractList); char **subtractArray = (char **) PointerArrayFromList(subtractList); foreach(nameCell, nameList) { char *baseName = (char *) lfirst(nameCell); bool nameFound = FindMatchingName(subtractArray, subtractCount, baseName, &subtractIndex); if (!nameFound) { /* * baseName is not in subtractArray and thus included * in the difference. */ differenceList = lappend(differenceList, baseName); } } pfree(subtractArray); return differenceList; } /* * CompareNames compares names using strncmp. Its signature allows it to * be used in qsort. */ static int CompareNames(const void *leftPointer, const void *rightPointer) { const char *leftString = *((char **) leftPointer); const char *rightString = *((char **) rightPointer); int nameCompare = strncmp(leftString, rightString, NAMEDATALEN); return nameCompare; } /* * FindMatchingName searches for name in nameArray, starting at the * value pointed to by matchIndex and stopping at the first index of * name which is greater or equal to needle. nameArray is assumed * to be sorted. * * The function sets matchIndex to the index of the name and returns * true if the name is equal to needle. If matchIndex >= nameCount, * then the function always returns false. */ static bool FindMatchingName(char **nameArray, int nameCount, char *needle, int *matchIndex) { bool foundMatchingName = false; int searchIndex = *matchIndex; int compareResult = -1; while (searchIndex < nameCount) { char *testName = nameArray[searchIndex]; compareResult = strncmp(needle, testName, NAMEDATALEN); if (compareResult <= 0) { break; } searchIndex++; } *matchIndex = searchIndex; if (compareResult == 0) { foundMatchingName = true; } return foundMatchingName; } /* * PendingWorkerTransactionList returns a list of pending prepared * transactions on a remote node that were started by this node. */ static List * PendingWorkerTransactionList(MultiConnection *connection) { StringInfo command = makeStringInfo(); bool raiseInterrupts = true; int querySent = 0; PGresult *result = NULL; int rowCount = 0; int rowIndex = 0; List *transactionNames = NIL; int coordinatorId = GetLocalGroupId(); appendStringInfo(command, "SELECT gid FROM pg_prepared_xacts " "WHERE gid LIKE 'citus_%d_%%'", coordinatorId); querySent = SendRemoteCommand(connection, command->data); if (querySent == 0) { ReportConnectionError(connection, ERROR); } result = GetRemoteCommandResult(connection, raiseInterrupts); if (!IsResponseOK(result)) { ReportResultError(connection, result, ERROR); } rowCount = PQntuples(result); for (rowIndex = 0; rowIndex < rowCount; rowIndex++) { const int columnIndex = 0; char *transactionName = PQgetvalue(result, rowIndex, columnIndex); transactionNames = lappend(transactionNames, pstrdup(transactionName)); } PQclear(result); ForgetResults(connection); return transactionNames; } /* * UnconfirmedWorkerTransactionList returns a list of unconfirmed transactions * for a group of workers from pg_dist_transaction. A transaction is confirmed * once we have verified that it does not exist in pg_prepared_xacts on the * remote node and the entry in pg_dist_transaction is removed. */ static List * UnconfirmedWorkerTransactionsList(int groupId) { List *transactionNameList = NIL; Relation pgDistTransaction = NULL; SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; bool indexOK = true; HeapTuple heapTuple = NULL; pgDistTransaction = heap_open(DistTransactionRelationId(), AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_dist_transaction_groupid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(groupId)); scanDescriptor = systable_beginscan(pgDistTransaction, DistTransactionGroupIndexId(), indexOK, NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { TupleDesc tupleDescriptor = RelationGetDescr(pgDistTransaction); bool isNull = false; Datum transactionNameDatum = heap_getattr(heapTuple, Anum_pg_dist_transaction_gid, tupleDescriptor, &isNull); char *transactionName = TextDatumGetCString(transactionNameDatum); transactionNameList = lappend(transactionNameList, transactionName); heapTuple = systable_getnext(scanDescriptor); } systable_endscan(scanDescriptor); heap_close(pgDistTransaction, AccessShareLock); return transactionNameList; } /* * DeleteTransactionRecord opens the pg_dist_transaction system catalog, finds the * first (unique) row that corresponds to the given transactionName and worker node, * and deletes this row. */ static void DeleteTransactionRecord(int32 groupId, char *transactionName) { Relation pgDistTransaction = NULL; SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[2]; int scanKeyCount = 2; bool indexOK = true; HeapTuple heapTuple = NULL; bool heapTupleFound = false; pgDistTransaction = heap_open(DistTransactionRelationId(), RowExclusiveLock); ScanKeyInit(&scanKey[0], Anum_pg_dist_transaction_groupid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(groupId)); ScanKeyInit(&scanKey[1], Anum_pg_dist_transaction_gid, BTEqualStrategyNumber, F_TEXTEQ, CStringGetTextDatum(transactionName)); scanDescriptor = systable_beginscan(pgDistTransaction, DistTransactionRecordIndexId(), indexOK, NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { TupleDesc tupleDescriptor = RelationGetDescr(pgDistTransaction); bool isNull = false; Datum gidDatum = heap_getattr(heapTuple, Anum_pg_dist_transaction_gid, tupleDescriptor, &isNull); char *gid = TextDatumGetCString(gidDatum); if (strncmp(transactionName, gid, NAMEDATALEN) == 0) { heapTupleFound = true; break; } heapTuple = systable_getnext(scanDescriptor); } /* if we couldn't find the transaction record to delete, error out */ if (!heapTupleFound) { ereport(ERROR, (errmsg("could not find valid entry for transaction record " "'%s' in group %d", transactionName, groupId))); } simple_heap_delete(pgDistTransaction, &heapTuple->t_self); CommandCounterIncrement(); systable_endscan(scanDescriptor); heap_close(pgDistTransaction, RowExclusiveLock); } citus-7.0.3/src/backend/distributed/transaction/worker_transaction.c000066400000000000000000000160041317107136600257650ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * worker_transaction.c * * Routines for performing transactions across all workers. * * Copyright (c) 2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "libpq-fe.h" #include #include #include "access/xact.h" #include "distributed/connection_management.h" #include "distributed/metadata_cache.h" #include "distributed/multi_shard_transaction.h" #include "distributed/resource_lock.h" #include "distributed/remote_commands.h" #include "distributed/pg_dist_node.h" #include "distributed/pg_dist_transaction.h" #include "distributed/transaction_recovery.h" #include "distributed/worker_manager.h" #include "distributed/worker_transaction.h" #include "utils/memutils.h" /* * SendCommandToWorker sends a command to a particular worker as part of the * 2PC. */ void SendCommandToWorker(char *nodeName, int32 nodePort, char *command) { MultiConnection *transactionConnection = NULL; char *nodeUser = CitusExtensionOwnerName(); int connectionFlags = 0; BeginOrContinueCoordinatedTransaction(); CoordinatedTransactionUse2PC(); transactionConnection = GetNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort, nodeUser, NULL); MarkRemoteTransactionCritical(transactionConnection); RemoteTransactionBeginIfNecessary(transactionConnection); ExecuteCriticalRemoteCommand(transactionConnection, command); } /* * SendCommandToWorkers sends a command to all workers in * parallel. Commands are committed on the workers when the local * transaction commits. The connection are made as the extension * owner to ensure write access to the Citus metadata tables. */ void SendCommandToWorkers(TargetWorkerSet targetWorkerSet, char *command) { SendCommandToWorkersParams(targetWorkerSet, command, 0, NULL, NULL); } /* * SendBareCommandListToWorkers sends a list of commands to a set of target * workers in serial. Commands are committed immediately: new connections are * always used and no transaction block is used (hence "bare"). The connections * are made as the extension owner to ensure write access to the Citus metadata * tables. Primarly useful for INDEX commands using CONCURRENTLY. */ void SendBareCommandListToWorkers(TargetWorkerSet targetWorkerSet, List *commandList) { List *workerNodeList = ActivePrimaryNodeList(); ListCell *workerNodeCell = NULL; char *nodeUser = CitusExtensionOwnerName(); ListCell *commandCell = NULL; /* run commands serially */ foreach(workerNodeCell, workerNodeList) { MultiConnection *workerConnection = NULL; WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); char *nodeName = workerNode->workerName; int nodePort = workerNode->workerPort; int connectionFlags = FORCE_NEW_CONNECTION; if (targetWorkerSet == WORKERS_WITH_METADATA && !workerNode->hasMetadata) { continue; } workerConnection = GetNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort, nodeUser, NULL); /* iterate over the commands and execute them in the same connection */ foreach(commandCell, commandList) { char *commandString = lfirst(commandCell); ExecuteCriticalRemoteCommand(workerConnection, commandString); } CloseConnection(workerConnection); } } /* * SendCommandToWorkersParams sends a command to all workers in parallel. * Commands are committed on the workers when the local transaction commits. The * connection are made as the extension owner to ensure write access to the Citus * metadata tables. Parameters can be specified as for PQexecParams, except that * paramLengths, paramFormats and resultFormat are hard-coded to NULL, NULL and 0 * respectively. */ void SendCommandToWorkersParams(TargetWorkerSet targetWorkerSet, char *command, int parameterCount, const Oid *parameterTypes, const char *const *parameterValues) { List *connectionList = NIL; ListCell *connectionCell = NULL; List *workerNodeList = ActivePrimaryNodeList(); ListCell *workerNodeCell = NULL; char *nodeUser = CitusExtensionOwnerName(); BeginOrContinueCoordinatedTransaction(); CoordinatedTransactionUse2PC(); /* open connections in parallel */ foreach(workerNodeCell, workerNodeList) { WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); char *nodeName = workerNode->workerName; int nodePort = workerNode->workerPort; MultiConnection *connection = NULL; int connectionFlags = 0; if (targetWorkerSet == WORKERS_WITH_METADATA && !workerNode->hasMetadata) { continue; } connection = StartNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort, nodeUser, NULL); MarkRemoteTransactionCritical(connection); connectionList = lappend(connectionList, connection); } /* finish opening connections */ foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); FinishConnectionEstablishment(connection); } RemoteTransactionsBeginIfNecessary(connectionList); /* send commands in parallel */ foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); int querySent = SendRemoteCommandParams(connection, command, parameterCount, parameterTypes, parameterValues); if (querySent == 0) { ReportConnectionError(connection, ERROR); } } /* get results */ foreach(connectionCell, connectionList) { MultiConnection *connection = (MultiConnection *) lfirst(connectionCell); PGresult *result = GetRemoteCommandResult(connection, true); if (!IsResponseOK(result)) { ReportResultError(connection, result, ERROR); } PQclear(result); ForgetResults(connection); } } /* * SendCommandListToWorkerInSingleTransaction opens connection to the node with the given * nodeName and nodePort. Then, the connection starts a transaction on the remote * node and executes the commands in the transaction. The function raises error if * any of the queries fails. */ void SendCommandListToWorkerInSingleTransaction(char *nodeName, int32 nodePort, char *nodeUser, List *commandList) { MultiConnection *workerConnection = NULL; ListCell *commandCell = NULL; int connectionFlags = FORCE_NEW_CONNECTION; if (XactModificationLevel > XACT_MODIFICATION_NONE) { ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), errmsg("cannot open new connections after the first modification " "command within a transaction"))); } workerConnection = GetNodeUserDatabaseConnection(connectionFlags, nodeName, nodePort, nodeUser, NULL); MarkRemoteTransactionCritical(workerConnection); RemoteTransactionBegin(workerConnection); /* iterate over the commands and execute them in the same connection */ foreach(commandCell, commandList) { char *commandString = lfirst(commandCell); ExecuteCriticalRemoteCommand(workerConnection, commandString); } RemoteTransactionCommit(workerConnection); CloseConnection(workerConnection); } citus-7.0.3/src/backend/distributed/utils/000077500000000000000000000000001317107136600205155ustar00rootroot00000000000000citus-7.0.3/src/backend/distributed/utils/citus_clauses.c000066400000000000000000000232631317107136600235350ustar00rootroot00000000000000/* * citus_clauses.c * * Routines roughly equivalent to postgres' util/clauses. * * Copyright (c) 2016-2016, Citus Data, Inc. */ #include "postgres.h" #include "distributed/citus_clauses.h" #include "distributed/insert_select_planner.h" #include "distributed/multi_router_planner.h" #include "catalog/pg_type.h" #include "executor/executor.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "nodes/nodes.h" #include "nodes/primnodes.h" #include "optimizer/clauses.h" #include "optimizer/planmain.h" #include "utils/datum.h" #include "utils/lsyscache.h" typedef struct FunctionEvaluationContext { PlanState *planState; bool containsVar; } FunctionEvaluationContext; /* private function declarations */ static void EvaluateValuesListsItems(List *valuesLists, PlanState *planState); static Node * EvaluateNodeIfReferencesFunction(Node *expression, PlanState *planState); static Node * PartiallyEvaluateExpressionMutator(Node *expression, FunctionEvaluationContext *context); static Expr * citus_evaluate_expr(Expr *expr, Oid result_type, int32 result_typmod, Oid result_collation, PlanState *planState); /* * Whether the executor needs to reparse and try to execute this query. */ bool RequiresMasterEvaluation(Query *query) { ListCell *targetEntryCell = NULL; ListCell *rteCell = NULL; ListCell *cteCell = NULL; foreach(targetEntryCell, query->targetList) { TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell); if (contain_mutable_functions((Node *) targetEntry->expr)) { return true; } } foreach(rteCell, query->rtable) { RangeTblEntry *rte = (RangeTblEntry *) lfirst(rteCell); if (rte->rtekind == RTE_SUBQUERY) { if (RequiresMasterEvaluation(rte->subquery)) { return true; } } else if (rte->rtekind == RTE_VALUES) { if (contain_mutable_functions((Node *) rte->values_lists)) { return true; } } } foreach(cteCell, query->cteList) { CommonTableExpr *expr = (CommonTableExpr *) lfirst(cteCell); if (RequiresMasterEvaluation((Query *) expr->ctequery)) { return true; } } if (query->jointree && query->jointree->quals) { return contain_mutable_functions((Node *) query->jointree->quals); } return false; } /* * Looks at each TargetEntry of the query and the jointree quals, evaluating * any sub-expressions which don't include Vars. */ void ExecuteMasterEvaluableFunctions(Query *query, PlanState *planState) { ListCell *targetEntryCell = NULL; ListCell *rteCell = NULL; ListCell *cteCell = NULL; Node *modifiedNode = NULL; if (query->jointree && query->jointree->quals) { query->jointree->quals = PartiallyEvaluateExpression(query->jointree->quals, planState); } foreach(targetEntryCell, query->targetList) { TargetEntry *targetEntry = (TargetEntry *) lfirst(targetEntryCell); /* performance optimization for the most common cases */ if (IsA(targetEntry->expr, Const) || IsA(targetEntry->expr, Var)) { continue; } modifiedNode = PartiallyEvaluateExpression((Node *) targetEntry->expr, planState); targetEntry->expr = (Expr *) modifiedNode; } foreach(rteCell, query->rtable) { RangeTblEntry *rte = (RangeTblEntry *) lfirst(rteCell); if (rte->rtekind == RTE_SUBQUERY) { ExecuteMasterEvaluableFunctions(rte->subquery, planState); } else if (rte->rtekind == RTE_VALUES) { EvaluateValuesListsItems(rte->values_lists, planState); } } foreach(cteCell, query->cteList) { CommonTableExpr *expr = (CommonTableExpr *) lfirst(cteCell); ExecuteMasterEvaluableFunctions((Query *) expr->ctequery, planState); } } /* * EvaluateValuesListsItems siply does the work of walking over each expression * in each value list contained in a multi-row INSERT's VALUES RTE. Basically * a nested for loop to perform an in-place replacement of expressions with * their ultimate values, should evaluation be necessary. */ static void EvaluateValuesListsItems(List *valuesLists, PlanState *planState) { ListCell *exprListCell = NULL; foreach(exprListCell, valuesLists) { List *exprList = (List *) lfirst(exprListCell); ListCell *exprCell = NULL; foreach(exprCell, exprList) { Expr *expr = (Expr *) lfirst(exprCell); Node *modifiedNode = NULL; modifiedNode = PartiallyEvaluateExpression((Node *) expr, planState); exprCell->data.ptr_value = (void *) modifiedNode; } } } /* * Walks the expression evaluating any node which invokes a function as long as a Var * doesn't show up in the parameter list. */ Node * PartiallyEvaluateExpression(Node *expression, PlanState *planState) { FunctionEvaluationContext globalContext = { planState, false }; return PartiallyEvaluateExpressionMutator(expression, &globalContext); } /* * When you find a function call evaluate it, the planner made sure there were no Vars. * * Tell your parent if either you or one if your children is a Var. * * A little inefficient. It goes to the bottom of the tree then calls EvaluateExpression * on each function on the way back up. Say we had an expression with no Vars, we could * only call EvaluateExpression on the top-most level and get the same result. */ static Node * PartiallyEvaluateExpressionMutator(Node *expression, FunctionEvaluationContext *context) { Node *copy = NULL; FunctionEvaluationContext localContext = { context->planState, false }; if (expression == NULL) { return expression; } /* pass any argument lists back to the mutator to copy and recurse for us */ if (IsA(expression, List)) { return expression_tree_mutator(expression, PartiallyEvaluateExpressionMutator, context); } if (IsA(expression, Var)) { context->containsVar = true; /* makes a copy for us */ return expression_tree_mutator(expression, PartiallyEvaluateExpressionMutator, context); } copy = expression_tree_mutator(expression, PartiallyEvaluateExpressionMutator, &localContext); if (localContext.containsVar) { context->containsVar = true; } else { copy = EvaluateNodeIfReferencesFunction(copy, context->planState); } return copy; } /* * Used to evaluate functions during queries on the master before sending them to workers * * The idea isn't to evaluate every kind of expression, just the kinds whoes result might * change between invocations (the idea is to allow users to use functions but still have * consistent shard replicas, since we use statement replication). This means evaluating * all nodes which invoke functions which might not be IMMUTABLE. */ static Node * EvaluateNodeIfReferencesFunction(Node *expression, PlanState *planState) { if (expression == NULL || IsA(expression, Const)) { return expression; } switch (nodeTag(expression)) { case T_FuncExpr: case T_OpExpr: case T_DistinctExpr: case T_NullIfExpr: case T_CoerceViaIO: case T_ArrayCoerceExpr: case T_ScalarArrayOpExpr: case T_RowCompareExpr: case T_Param: case T_RelabelType: case T_CoerceToDomain: { return (Node *) citus_evaluate_expr((Expr *) expression, exprType(expression), exprTypmod(expression), exprCollation(expression), planState); } default: { break; } } return expression; } /* * a copy of pg's evaluate_expr, pre-evaluate a constant expression * * We use the executor's routine ExecEvalExpr() to avoid duplication of * code and ensure we get the same result as the executor would get. * * *INDENT-OFF* */ static Expr * citus_evaluate_expr(Expr *expr, Oid result_type, int32 result_typmod, Oid result_collation, PlanState *planState) { EState *estate; ExprState *exprstate; ExprContext *econtext; MemoryContext oldcontext; Datum const_val; bool const_is_null; int16 resultTypLen; bool resultTypByVal; /* * To use the executor, we need an EState. */ estate = CreateExecutorState(); /* We can use the estate's working context to avoid memory leaks. */ oldcontext = MemoryContextSwitchTo(estate->es_query_cxt); /* Make sure any opfuncids are filled in. */ fix_opfuncids((Node *) expr); /* * Prepare expr for execution. (Note: we can't use ExecPrepareExpr * because it'd result in recursively invoking eval_const_expressions.) */ exprstate = ExecInitExpr(expr, planState); if (planState != NULL) { /* use executor's context to pass down parameters */ econtext = planState->ps_ExprContext; } else { /* when called from a function, use a default context */ econtext = GetPerTupleExprContext(estate); } /* * And evaluate it. */ #if (PG_VERSION_NUM >= 100000) const_val = ExecEvalExprSwitchContext(exprstate, econtext, &const_is_null); #else const_val = ExecEvalExprSwitchContext(exprstate, econtext, &const_is_null, NULL); #endif /* Get info needed about result datatype */ get_typlenbyval(result_type, &resultTypLen, &resultTypByVal); /* Get back to outer memory context */ MemoryContextSwitchTo(oldcontext); /* * Must copy result out of sub-context used by expression eval. * * Also, if it's varlena, forcibly detoast it. This protects us against * storing TOAST pointers into plans that might outlive the referenced * data. (makeConst would handle detoasting anyway, but it's worth a few * extra lines here so that we can do the copy and detoast in one step.) */ if (!const_is_null) { if (resultTypLen == -1) const_val = PointerGetDatum(PG_DETOAST_DATUM_COPY(const_val)); else const_val = datumCopy(const_val, resultTypByVal, resultTypLen); } /* Release all the junk we just created */ FreeExecutorState(estate); /* * Make the constant result node. */ return (Expr *) makeConst(result_type, result_typmod, result_collation, resultTypLen, const_val, const_is_null, resultTypByVal); } /* *INDENT-ON* */ citus-7.0.3/src/backend/distributed/utils/citus_copyfuncs.c000066400000000000000000000152301317107136600241020ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * citus_copyfuncs.c * Citus specific node copy functions * * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 2012-2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "distributed/citus_nodefuncs.h" #include "distributed/multi_server_executor.h" #include "utils/datum.h" /* * Macros to simplify copying of different kinds of fields. Use these * wherever possible to reduce the chance for silly typos. Note that these * hard-wire the convention that the local variables in a Copy routine are * named 'newnode' and 'from'. */ static inline Node * CitusSetTag(Node *node, int tag) { CitusNode *citus_node = (CitusNode *) node; citus_node->citus_tag = tag; return node; } #define DECLARE_FROM_AND_NEW_NODE(nodeTypeName) \ nodeTypeName * newnode = (nodeTypeName *) \ CitusSetTag((Node *) target_node, T_ ## nodeTypeName); \ nodeTypeName *from = (nodeTypeName *) source_node /* Copy a simple scalar field (int, float, bool, enum, etc) */ #define COPY_SCALAR_FIELD(fldname) \ (newnode->fldname = from->fldname) /* Copy a field that is a pointer to some kind of Node or Node tree */ #define COPY_NODE_FIELD(fldname) \ (newnode->fldname = copyObject(from->fldname)) /* Copy a field that is a pointer to a C string, or perhaps NULL */ #define COPY_STRING_FIELD(fldname) \ (newnode->fldname = from->fldname ? pstrdup(from->fldname) : (char *) NULL) /* Copy a node array. Target array is also allocated. */ #define COPY_NODE_ARRAY(fldname, type, count) \ do { \ int i = 0; \ newnode->fldname = (type **) palloc(count * sizeof(type *)); \ for (i = 0; i < count; ++i) \ { \ newnode->fldname[i] = copyObject(from->fldname[i]); \ } \ } \ while (0) /* Copy a scalar array. Target array is also allocated. */ #define COPY_SCALAR_ARRAY(fldname, type, count) \ do { \ int i = 0; \ newnode->fldname = (type *) palloc(count * sizeof(type)); \ for (i = 0; i < count; ++i) \ { \ newnode->fldname[i] = from->fldname[i]; \ } \ } \ while (0) static void copyJobInfo(Job *newnode, Job *from) { COPY_SCALAR_FIELD(jobId); COPY_NODE_FIELD(jobQuery); COPY_NODE_FIELD(taskList); COPY_NODE_FIELD(dependedJobList); COPY_SCALAR_FIELD(subqueryPushdown); COPY_SCALAR_FIELD(requiresMasterEvaluation); COPY_SCALAR_FIELD(deferredPruning); } void CopyNodeJob(COPYFUNC_ARGS) { DECLARE_FROM_AND_NEW_NODE(Job); copyJobInfo(newnode, from); } void CopyNodeMultiPlan(COPYFUNC_ARGS) { DECLARE_FROM_AND_NEW_NODE(MultiPlan); COPY_SCALAR_FIELD(operation); COPY_SCALAR_FIELD(hasReturning); COPY_NODE_FIELD(workerJob); COPY_NODE_FIELD(masterQuery); COPY_SCALAR_FIELD(routerExecutable); COPY_NODE_FIELD(relationIdList); COPY_NODE_FIELD(insertSelectSubquery); COPY_NODE_FIELD(insertTargetList); COPY_SCALAR_FIELD(targetRelationId); COPY_NODE_FIELD(planningError); } void CopyNodeShardInterval(COPYFUNC_ARGS) { DECLARE_FROM_AND_NEW_NODE(ShardInterval); COPY_SCALAR_FIELD(relationId); COPY_SCALAR_FIELD(storageType); COPY_SCALAR_FIELD(valueTypeId); COPY_SCALAR_FIELD(valueTypeLen); COPY_SCALAR_FIELD(valueByVal); COPY_SCALAR_FIELD(minValueExists); COPY_SCALAR_FIELD(maxValueExists); if (from->minValueExists) { newnode->minValue = datumCopy(from->minValue, from->valueByVal, from->valueTypeLen); } if (from->maxValueExists) { newnode->maxValue = datumCopy(from->maxValue, from->valueByVal, from->valueTypeLen); } COPY_SCALAR_FIELD(shardId); } void CopyNodeMapMergeJob(COPYFUNC_ARGS) { DECLARE_FROM_AND_NEW_NODE(MapMergeJob); int arrayLength = 0; copyJobInfo(&newnode->job, &from->job); COPY_NODE_FIELD(reduceQuery); COPY_SCALAR_FIELD(partitionType); COPY_NODE_FIELD(partitionColumn); COPY_SCALAR_FIELD(partitionCount); COPY_SCALAR_FIELD(sortedShardIntervalArrayLength); arrayLength = from->sortedShardIntervalArrayLength; /* now build & read sortedShardIntervalArray */ COPY_NODE_ARRAY(sortedShardIntervalArray, ShardInterval, arrayLength); COPY_NODE_FIELD(mapTaskList); COPY_NODE_FIELD(mergeTaskList); } void CopyNodeShardPlacement(COPYFUNC_ARGS) { DECLARE_FROM_AND_NEW_NODE(ShardPlacement); COPY_SCALAR_FIELD(placementId); COPY_SCALAR_FIELD(shardId); COPY_SCALAR_FIELD(shardLength); COPY_SCALAR_FIELD(shardState); COPY_SCALAR_FIELD(groupId); COPY_STRING_FIELD(nodeName); COPY_SCALAR_FIELD(nodePort); COPY_SCALAR_FIELD(partitionMethod); COPY_SCALAR_FIELD(colocationGroupId); COPY_SCALAR_FIELD(representativeValue); } void CopyNodeGroupShardPlacement(COPYFUNC_ARGS) { DECLARE_FROM_AND_NEW_NODE(GroupShardPlacement); COPY_SCALAR_FIELD(placementId); COPY_SCALAR_FIELD(shardId); COPY_SCALAR_FIELD(shardLength); COPY_SCALAR_FIELD(shardState); COPY_SCALAR_FIELD(groupId); } void CopyNodeRelationShard(COPYFUNC_ARGS) { DECLARE_FROM_AND_NEW_NODE(RelationShard); COPY_SCALAR_FIELD(relationId); COPY_SCALAR_FIELD(shardId); } void CopyNodeTask(COPYFUNC_ARGS) { DECLARE_FROM_AND_NEW_NODE(Task); COPY_SCALAR_FIELD(taskType); COPY_SCALAR_FIELD(jobId); COPY_SCALAR_FIELD(taskId); COPY_STRING_FIELD(queryString); COPY_SCALAR_FIELD(anchorShardId); COPY_NODE_FIELD(taskPlacementList); COPY_NODE_FIELD(dependedTaskList); COPY_SCALAR_FIELD(partitionId); COPY_SCALAR_FIELD(upstreamTaskId); COPY_NODE_FIELD(shardInterval); COPY_SCALAR_FIELD(assignmentConstrained); COPY_SCALAR_FIELD(shardId); COPY_NODE_FIELD(taskExecution); COPY_SCALAR_FIELD(upsertQuery); COPY_SCALAR_FIELD(replicationModel); COPY_SCALAR_FIELD(insertSelectQuery); COPY_NODE_FIELD(relationShardList); COPY_NODE_FIELD(rowValuesLists); } void CopyNodeTaskExecution(COPYFUNC_ARGS) { DECLARE_FROM_AND_NEW_NODE(TaskExecution); COPY_SCALAR_FIELD(jobId); COPY_SCALAR_FIELD(taskId); COPY_SCALAR_FIELD(nodeCount); COPY_SCALAR_ARRAY(taskStatusArray, TaskExecStatus, from->nodeCount); COPY_SCALAR_ARRAY(transmitStatusArray, TransmitExecStatus, from->nodeCount); COPY_SCALAR_ARRAY(connectionIdArray, int32, from->nodeCount); COPY_SCALAR_ARRAY(fileDescriptorArray, int32, from->nodeCount); COPY_SCALAR_FIELD(connectStartTime); COPY_SCALAR_FIELD(currentNodeIndex); COPY_SCALAR_FIELD(querySourceNodeIndex); COPY_SCALAR_FIELD(dataFetchTaskIndex); COPY_SCALAR_FIELD(failureCount); } void CopyNodeDeferredErrorMessage(COPYFUNC_ARGS) { DECLARE_FROM_AND_NEW_NODE(DeferredErrorMessage); COPY_SCALAR_FIELD(code); COPY_STRING_FIELD(message); COPY_STRING_FIELD(detail); COPY_STRING_FIELD(hint); COPY_STRING_FIELD(filename); COPY_SCALAR_FIELD(linenumber); COPY_STRING_FIELD(functionname); } citus-7.0.3/src/backend/distributed/utils/citus_nodefuncs.c000066400000000000000000000265601317107136600240650ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * citus_nodefuncs.c * Helper functions for dealing with nodes * * Copyright (c) 2012-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "catalog/pg_type.h" #include "distributed/citus_nodes.h" #include "distributed/citus_nodefuncs.h" #include "distributed/errormessage.h" #include "distributed/metadata_cache.h" #include "distributed/multi_planner.h" #include "distributed/multi_server_executor.h" static const char *CitusNodeTagNamesD[] = { "MultiNode", "MultiTreeRoot", "MultiProject", "MultiCollect", "MultiSelect", "MultiTable", "MultiJoin", "MultiPartition", "MultiCartesianProduct", "MultiExtendedOp", "Job", "MapMergeJob", "MultiPlan", "Task", "TaskExecution", "ShardInterval", "ShardPlacement", "RelationShard", "DeferredErrorMessage", "GroupShardPlacement" }; const char **CitusNodeTagNames = CitusNodeTagNamesD; /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(citus_extradata_container); /* * SetRangeTblExtraData adds additional data to a RTE, overwriting previous * values, if present. * * The data is stored as RTE_FUNCTION type RTE of a special * citus_extradata_container function, with the extra data serialized into the * function arguments. That works, because these RTEs aren't used by Postgres * to any significant degree, and Citus' variant of ruleutils.c knows how to * deal with these extended RTEs. Note that rte->eref needs to be set prior * to calling SetRangeTblExtraData to ensure the funccolcount can be set * correctly. * * NB: If used for postgres defined RTEKinds, fields specific to that RTEKind * will not be handled by out/readfuncs.c. For the current uses that's ok. */ void SetRangeTblExtraData(RangeTblEntry *rte, CitusRTEKind rteKind, char *fragmentSchemaName, char *fragmentTableName, List *tableIdList) { RangeTblFunction *fauxFunction = NULL; FuncExpr *fauxFuncExpr = NULL; Const *rteKindData = NULL; Const *fragmentSchemaData = NULL; Const *fragmentTableData = NULL; Const *tableIdListData = NULL; Assert(rte->eref); /* store RTE kind as a plain int4 */ rteKindData = makeNode(Const); rteKindData->consttype = INT4OID; rteKindData->constlen = 4; rteKindData->constvalue = Int32GetDatum(rteKind); rteKindData->constbyval = true; rteKindData->constisnull = false; rteKindData->location = -1; /* store the fragment schema as a cstring */ fragmentSchemaData = makeNode(Const); fragmentSchemaData->consttype = CSTRINGOID; fragmentSchemaData->constlen = -2; fragmentSchemaData->constvalue = CStringGetDatum(fragmentSchemaName); fragmentSchemaData->constbyval = false; fragmentSchemaData->constisnull = fragmentSchemaName == NULL; fragmentSchemaData->location = -1; /* store the fragment name as a cstring */ fragmentTableData = makeNode(Const); fragmentTableData->consttype = CSTRINGOID; fragmentTableData->constlen = -2; fragmentTableData->constvalue = CStringGetDatum(fragmentTableName); fragmentTableData->constbyval = false; fragmentTableData->constisnull = fragmentTableName == NULL; fragmentTableData->location = -1; /* store the table id list as an array of integers: FIXME */ tableIdListData = makeNode(Const); tableIdListData->consttype = CSTRINGOID; tableIdListData->constbyval = false; tableIdListData->constlen = -2; tableIdListData->location = -1; /* serialize tableIdList to a string, seems simplest that way */ if (tableIdList != NIL) { char *serializedList = nodeToString(tableIdList); tableIdListData->constisnull = false; tableIdListData->constvalue = CStringGetDatum(serializedList); } else { tableIdListData->constisnull = true; } /* create function expression to store our faux arguments in */ fauxFuncExpr = makeNode(FuncExpr); fauxFuncExpr->funcid = CitusExtraDataContainerFuncId(); fauxFuncExpr->funcretset = true; fauxFuncExpr->location = -1; fauxFuncExpr->args = list_make4(rteKindData, fragmentSchemaData, fragmentTableData, tableIdListData); fauxFunction = makeNode(RangeTblFunction); fauxFunction->funcexpr = (Node *) fauxFuncExpr; /* set the column count to pass ruleutils checks, not used elsewhere */ fauxFunction->funccolcount = list_length(rte->eref->colnames); rte->rtekind = RTE_FUNCTION; rte->functions = list_make1(fauxFunction); } /* * ExtractRangeTblExtraData extracts extra data stored for a range table entry * that previously has been stored with * Set/ModifyRangeTblExtraData. Parameters can be NULL if unintersting. It is * valid to use the function on a RTE without extra data. */ void ExtractRangeTblExtraData(RangeTblEntry *rte, CitusRTEKind *rteKind, char **fragmentSchemaName, char **fragmentTableName, List **tableIdList) { RangeTblFunction *fauxFunction = NULL; FuncExpr *fauxFuncExpr = NULL; Const *tmpConst = NULL; /* set base rte kind first, so this can be used for 'non-extended' RTEs as well */ if (rteKind != NULL) { *rteKind = (CitusRTEKind) rte->rtekind; } /* reset values of optionally-present fields, will later be overwritten, if present */ if (fragmentSchemaName != NULL) { *fragmentSchemaName = NULL; } if (fragmentTableName != NULL) { *fragmentTableName = NULL; } if (tableIdList != NULL) { *tableIdList = NIL; } /* only function RTEs have our special extra data */ if (rte->rtekind != RTE_FUNCTION) { return; } /* we only ever generate one argument */ if (list_length(rte->functions) != 1) { return; } /* should pretty much always be a FuncExpr, but be liberal in what we expect... */ fauxFunction = linitial(rte->functions); if (!IsA(fauxFunction->funcexpr, FuncExpr)) { return; } fauxFuncExpr = (FuncExpr *) fauxFunction->funcexpr; /* * There will never be a range table entry with this function id, but for * the purpose of this file. */ if (fauxFuncExpr->funcid != CitusExtraDataContainerFuncId()) { return; } /* * Extra data for rtes is stored in the function arguments. The first * argument stores the rtekind, second fragmentSchemaName, third * fragmentTableName, fourth tableIdList. */ if (list_length(fauxFuncExpr->args) != 4) { ereport(ERROR, (errmsg("unexpected number of function arguments to " "citus_extradata_container"))); return; } /* extract rteKind */ tmpConst = (Const *) linitial(fauxFuncExpr->args); Assert(IsA(tmpConst, Const)); Assert(tmpConst->consttype == INT4OID); if (rteKind != NULL) { *rteKind = DatumGetInt32(tmpConst->constvalue); } /* extract fragmentSchemaName */ tmpConst = (Const *) lsecond(fauxFuncExpr->args); Assert(IsA(tmpConst, Const)); Assert(tmpConst->consttype == CSTRINGOID); if (fragmentSchemaName != NULL && !tmpConst->constisnull) { *fragmentSchemaName = DatumGetCString(tmpConst->constvalue); } /* extract fragmentTableName */ tmpConst = (Const *) lthird(fauxFuncExpr->args); Assert(IsA(tmpConst, Const)); Assert(tmpConst->consttype == CSTRINGOID); if (fragmentTableName != NULL && !tmpConst->constisnull) { *fragmentTableName = DatumGetCString(tmpConst->constvalue); } /* extract tableIdList, stored as a serialized integer list */ tmpConst = (Const *) lfourth(fauxFuncExpr->args); Assert(IsA(tmpConst, Const)); Assert(tmpConst->consttype == CSTRINGOID); if (tableIdList != NULL && !tmpConst->constisnull) { Node *deserializedList = stringToNode(DatumGetCString(tmpConst->constvalue)); Assert(IsA(deserializedList, IntList)); *tableIdList = (List *) deserializedList; } } /* * ModifyRangeTblExtraData sets the RTE extra data fields for the passed * fields, leaving the current values in place for the ones not specified. * * rteKind has to be specified, fragmentSchemaName, fragmentTableName, * tableIdList can be set to NULL/NIL respectively to leave the current values * in-place. */ void ModifyRangeTblExtraData(RangeTblEntry *rte, CitusRTEKind rteKind, char *fragmentSchemaName, char *fragmentTableName, List *tableIdList) { /* load existing values for the arguments not specifying a new value */ ExtractRangeTblExtraData(rte, NULL, fragmentSchemaName == NULL ? &fragmentSchemaName : NULL, fragmentTableName == NULL ? &fragmentTableName : NULL, tableIdList == NIL ? &tableIdList : NULL); SetRangeTblExtraData(rte, rteKind, fragmentSchemaName, fragmentTableName, tableIdList); } /* GetRangeTblKind returns rtekind of a RTE, be it an extended one or not. */ CitusRTEKind GetRangeTblKind(RangeTblEntry *rte) { CitusRTEKind rteKind = CITUS_RTE_RELATION /* invalid */; switch (rte->rtekind) { /* directly rtekind if it's not possibly an extended RTE */ #if (PG_VERSION_NUM >= 100000) case RTE_TABLEFUNC: case RTE_NAMEDTUPLESTORE: #endif case RTE_RELATION: case RTE_SUBQUERY: case RTE_JOIN: case RTE_VALUES: case RTE_CTE: { rteKind = (CitusRTEKind) rte->rtekind; break; } case RTE_FUNCTION: { /* * Extract extra data - correct even if a plain RTE_FUNCTION, not * an extended one, ExtractRangeTblExtraData handles that case * transparently. */ ExtractRangeTblExtraData(rte, &rteKind, NULL, NULL, NULL); break; } } return rteKind; } /* * citus_extradata_container is a placeholder function to store information * needed by Citus in plain postgres node trees. Executor and other hooks * should always intercept statements containing calls to this function. It's * not actually SQL callable by the user because of an INTERNAL argument. */ Datum citus_extradata_container(PG_FUNCTION_ARGS) { ereport(ERROR, (errmsg("not supposed to get here, did you cheat?"))); PG_RETURN_NULL(); } static void CopyUnsupportedCitusNode(struct ExtensibleNode *newnode, const struct ExtensibleNode *oldnode) { ereport(ERROR, (errmsg("not implemented"))); } static bool EqualUnsupportedCitusNode(const struct ExtensibleNode *a, const struct ExtensibleNode *b) { ereport(ERROR, (errmsg("not implemented"))); } /* *INDENT-OFF* */ #define DEFINE_NODE_METHODS(type) \ { \ #type, \ sizeof(type), \ CopyNode##type, \ EqualUnsupportedCitusNode, \ Out##type, \ Read##type \ } #define DEFINE_NODE_METHODS_NO_READ(type) \ { \ #type, \ sizeof(type), \ CopyUnsupportedCitusNode, \ EqualUnsupportedCitusNode, \ Out##type, \ ReadUnsupportedCitusNode \ } /* *INDENT-ON* */ const ExtensibleNodeMethods nodeMethods[] = { DEFINE_NODE_METHODS(MultiPlan), DEFINE_NODE_METHODS(Job), DEFINE_NODE_METHODS(ShardInterval), DEFINE_NODE_METHODS(MapMergeJob), DEFINE_NODE_METHODS(ShardPlacement), DEFINE_NODE_METHODS(RelationShard), DEFINE_NODE_METHODS(Task), DEFINE_NODE_METHODS(TaskExecution), DEFINE_NODE_METHODS(DeferredErrorMessage), DEFINE_NODE_METHODS(GroupShardPlacement), /* nodes with only output support */ DEFINE_NODE_METHODS_NO_READ(MultiNode), DEFINE_NODE_METHODS_NO_READ(MultiTreeRoot), DEFINE_NODE_METHODS_NO_READ(MultiProject), DEFINE_NODE_METHODS_NO_READ(MultiCollect), DEFINE_NODE_METHODS_NO_READ(MultiSelect), DEFINE_NODE_METHODS_NO_READ(MultiTable), DEFINE_NODE_METHODS_NO_READ(MultiJoin), DEFINE_NODE_METHODS_NO_READ(MultiPartition), DEFINE_NODE_METHODS_NO_READ(MultiCartesianProduct), DEFINE_NODE_METHODS_NO_READ(MultiExtendedOp) }; void RegisterNodes(void) { int off; StaticAssertExpr(lengthof(nodeMethods) == lengthof(CitusNodeTagNamesD), "number of node methods and names do not match"); for (off = 0; off < lengthof(nodeMethods); off++) { RegisterExtensibleNodeMethods(&nodeMethods[off]); } } citus-7.0.3/src/backend/distributed/utils/citus_outfuncs.c000066400000000000000000000271731317107136600237500ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * citus_outfuncs.c * Output functions for Citus tree nodes. * * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 2012-2016, Citus Data, Inc. * * NOTES * This is a wrapper around postgres' nodeToString() that additionally * supports Citus node types. * * Keep as closely aligned with the upstream version as possible. * *------------------------------------------------------------------------- */ #include "postgres.h" #include #include "distributed/citus_nodefuncs.h" #include "distributed/citus_nodes.h" #include "distributed/errormessage.h" #include "distributed/multi_logical_planner.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_planner.h" #include "distributed/multi_server_executor.h" #include "distributed/master_metadata_utility.h" #include "lib/stringinfo.h" #include "nodes/plannodes.h" #include "nodes/relation.h" #include "utils/datum.h" /* * Macros to simplify output of different kinds of fields. Use these * wherever possible to reduce the chance for silly typos. Note that these * hard-wire conventions about the names of the local variables in an Out * routine. */ /* Store const reference to raw input node in local named 'node' */ #define WRITE_LOCALS(nodeTypeName) \ const nodeTypeName *node = (const nodeTypeName *) raw_node /* Write the label for the node type */ #define WRITE_NODE_TYPE(nodelabel) \ (void) 0 /* Write an integer field (anything written as ":fldname %d") */ #define WRITE_INT_FIELD(fldname) \ appendStringInfo(str, " :" CppAsString(fldname) " %d", node->fldname) /* Write an 64-bit integer field (anything written as ":fldname %d") */ #define WRITE_INT64_FIELD(fldname) \ appendStringInfo(str, " :" CppAsString(fldname) " " INT64_FORMAT, node->fldname) /* Write an unsigned integer field (anything written as ":fldname %u") */ #define WRITE_UINT_FIELD(fldname) \ appendStringInfo(str, " :" CppAsString(fldname) " %u", node->fldname) /* XXX: Citus: Write an unsigned 64-bit integer field */ #define WRITE_UINT64_FIELD(fldname) \ appendStringInfo(str, " :" CppAsString(fldname) " " UINT64_FORMAT, node->fldname) /* Write an OID field (don't hard-wire assumption that OID is same as uint) */ #define WRITE_OID_FIELD(fldname) \ appendStringInfo(str, " :" CppAsString(fldname) " %u", node->fldname) /* Write a long-integer field */ #define WRITE_LONG_FIELD(fldname) \ appendStringInfo(str, " :" CppAsString(fldname) " %ld", node->fldname) /* Write a char field (ie, one ascii character) */ #define WRITE_CHAR_FIELD(fldname) \ appendStringInfo(str, " :" CppAsString(fldname) " %c", node->fldname) /* Write an enumerated-type field as an integer code */ #define WRITE_ENUM_FIELD(fldname, enumtype) \ appendStringInfo(str, " :" CppAsString(fldname) " %d", \ (int) node->fldname) /* Write a float field --- caller must give format to define precision */ #define WRITE_FLOAT_FIELD(fldname,format) \ appendStringInfo(str, " :" CppAsString(fldname) " " format, node->fldname) /* Write a boolean field */ #define WRITE_BOOL_FIELD(fldname) \ appendStringInfo(str, " :" CppAsString(fldname) " %s", \ booltostr(node->fldname)) /* Write a character-string (possibly NULL) field */ #define WRITE_STRING_FIELD(fldname) \ (appendStringInfo(str, " :" CppAsString(fldname) " "), \ outToken(str, node->fldname)) /* Write a parse location field (actually same as INT case) */ #define WRITE_LOCATION_FIELD(fldname) \ appendStringInfo(str, " :" CppAsString(fldname) " %d", node->fldname) /* Write a Node field */ #define WRITE_NODE_FIELD(fldname) \ (appendStringInfo(str, " :" CppAsString(fldname) " "), \ outNode(str, node->fldname)) /* Write a bitmapset field */ #define WRITE_BITMAPSET_FIELD(fldname) \ (appendStringInfo(str, " :" CppAsString(fldname) " "), \ _outBitmapset(str, node->fldname)) /* Write an integer array (anything written as ":fldname (%d, %d") */ #define WRITE_INT_ARRAY(fldname, count) \ appendStringInfo(str, " :" CppAsString(fldname) " ("); \ { \ int i;\ for (i = 0; i < count; i++) \ { \ if (i > 0) \ { \ appendStringInfo(str, ", "); \ } \ appendStringInfo(str, "%d", node->fldname[i]); \ }\ }\ appendStringInfo(str, ")") /* Write an enum array (anything written as ":fldname (%d, %d") */ #define WRITE_ENUM_ARRAY(fldname, count) WRITE_INT_ARRAY(fldname, count) #define booltostr(x) ((x) ? "true" : "false") /***************************************************************************** * Output routines for Citus node types *****************************************************************************/ static void OutMultiUnaryNodeFields(StringInfo str, const MultiUnaryNode *node) { WRITE_NODE_FIELD(childNode); } static void OutMultiBinaryNodeFields(StringInfo str, const MultiBinaryNode *node) { WRITE_NODE_FIELD(leftChildNode); WRITE_NODE_FIELD(rightChildNode); } void OutMultiNode(OUTFUNC_ARGS) { WRITE_NODE_TYPE("MULTINODE"); } void OutMultiTreeRoot(OUTFUNC_ARGS) { WRITE_LOCALS(MultiTreeRoot); WRITE_NODE_TYPE("MULTITREEROOT"); OutMultiUnaryNodeFields(str, (const MultiUnaryNode *) node); } void OutMultiPlan(OUTFUNC_ARGS) { WRITE_LOCALS(MultiPlan); WRITE_NODE_TYPE("MULTIPLAN"); WRITE_INT_FIELD(operation); WRITE_BOOL_FIELD(hasReturning); WRITE_NODE_FIELD(workerJob); WRITE_NODE_FIELD(masterQuery); WRITE_BOOL_FIELD(routerExecutable); WRITE_NODE_FIELD(relationIdList); WRITE_NODE_FIELD(insertSelectSubquery); WRITE_NODE_FIELD(insertTargetList); WRITE_OID_FIELD(targetRelationId); WRITE_NODE_FIELD(planningError); } void OutMultiProject(OUTFUNC_ARGS) { WRITE_LOCALS(MultiProject); WRITE_NODE_TYPE("MULTIPROJECT"); WRITE_NODE_FIELD(columnList); OutMultiUnaryNodeFields(str, (const MultiUnaryNode *) node); } void OutMultiCollect(OUTFUNC_ARGS) { WRITE_LOCALS(MultiCollect); WRITE_NODE_TYPE("MULTICOLLECT"); OutMultiUnaryNodeFields(str, (const MultiUnaryNode *) node); } void OutMultiSelect(OUTFUNC_ARGS) { WRITE_LOCALS(MultiSelect); WRITE_NODE_TYPE("MULTISELECT"); WRITE_NODE_FIELD(selectClauseList); OutMultiUnaryNodeFields(str, (const MultiUnaryNode *) node); } void OutMultiTable(OUTFUNC_ARGS) { WRITE_LOCALS(MultiTable); WRITE_NODE_TYPE("MULTITABLE"); WRITE_OID_FIELD(relationId); WRITE_INT_FIELD(rangeTableId); OutMultiUnaryNodeFields(str, (const MultiUnaryNode *) node); } void OutMultiJoin(OUTFUNC_ARGS) { WRITE_LOCALS(MultiJoin); WRITE_NODE_TYPE("MULTIJOIN"); WRITE_NODE_FIELD(joinClauseList); WRITE_ENUM_FIELD(joinRuleType, JoinRuleType); WRITE_ENUM_FIELD(joinType, JoinType); OutMultiBinaryNodeFields(str, (const MultiBinaryNode *) node); } void OutMultiPartition(OUTFUNC_ARGS) { WRITE_LOCALS(MultiPartition); WRITE_NODE_TYPE("MULTIPARTITION"); WRITE_NODE_FIELD(partitionColumn); OutMultiUnaryNodeFields(str, (const MultiUnaryNode *) node); } void OutMultiCartesianProduct(OUTFUNC_ARGS) { WRITE_LOCALS(MultiCartesianProduct); WRITE_NODE_TYPE("MULTICARTESIANPRODUCT"); OutMultiBinaryNodeFields(str, (const MultiBinaryNode *) node); } void OutMultiExtendedOp(OUTFUNC_ARGS) { WRITE_LOCALS(MultiExtendedOp); WRITE_NODE_TYPE("MULTIEXTENDEDOP"); WRITE_NODE_FIELD(targetList); WRITE_NODE_FIELD(groupClauseList); WRITE_NODE_FIELD(sortClauseList); WRITE_NODE_FIELD(limitCount); WRITE_NODE_FIELD(limitOffset); WRITE_NODE_FIELD(havingQual); OutMultiUnaryNodeFields(str, (const MultiUnaryNode *) node); } static void OutJobFields(StringInfo str, const Job *node) { WRITE_UINT64_FIELD(jobId); WRITE_NODE_FIELD(jobQuery); WRITE_NODE_FIELD(taskList); WRITE_NODE_FIELD(dependedJobList); WRITE_BOOL_FIELD(subqueryPushdown); WRITE_BOOL_FIELD(requiresMasterEvaluation); WRITE_BOOL_FIELD(deferredPruning); } void OutJob(OUTFUNC_ARGS) { WRITE_LOCALS(Job); WRITE_NODE_TYPE("JOB"); OutJobFields(str, node); } void OutShardInterval(OUTFUNC_ARGS) { WRITE_LOCALS(ShardInterval); WRITE_NODE_TYPE("SHARDINTERVAL"); WRITE_OID_FIELD(relationId); WRITE_CHAR_FIELD(storageType); WRITE_OID_FIELD(valueTypeId); WRITE_INT_FIELD(valueTypeLen); WRITE_BOOL_FIELD(valueByVal); WRITE_BOOL_FIELD(minValueExists); WRITE_BOOL_FIELD(maxValueExists); appendStringInfoString(str, " :minValue "); if (!node->minValueExists) appendStringInfoString(str, "<>"); else outDatum(str, node->minValue, node->valueTypeLen, node->valueByVal); appendStringInfoString(str, " :maxValue "); if (!node->maxValueExists) appendStringInfoString(str, "<>"); else outDatum(str, node->maxValue, node->valueTypeLen, node->valueByVal); WRITE_UINT64_FIELD(shardId); } void OutMapMergeJob(OUTFUNC_ARGS) { WRITE_LOCALS(MapMergeJob); int arrayLength = node->sortedShardIntervalArrayLength; int i; WRITE_NODE_TYPE("MAPMERGEJOB"); OutJobFields(str, (Job *) node); WRITE_NODE_FIELD(reduceQuery); WRITE_ENUM_FIELD(partitionType, PartitionType); WRITE_NODE_FIELD(partitionColumn); WRITE_UINT_FIELD(partitionCount); WRITE_INT_FIELD(sortedShardIntervalArrayLength); for (i = 0; i < arrayLength; ++i) { outNode(str, node->sortedShardIntervalArray[i]); } WRITE_NODE_FIELD(mapTaskList); WRITE_NODE_FIELD(mergeTaskList); } void OutShardPlacement(OUTFUNC_ARGS) { WRITE_LOCALS(ShardPlacement); WRITE_NODE_TYPE("SHARDPLACEMENT"); WRITE_UINT64_FIELD(placementId); WRITE_UINT64_FIELD(shardId); WRITE_UINT64_FIELD(shardLength); WRITE_ENUM_FIELD(shardState, RelayFileState); WRITE_UINT_FIELD(groupId); WRITE_STRING_FIELD(nodeName); WRITE_UINT_FIELD(nodePort); /* so we can deal with 0 */ WRITE_INT_FIELD(partitionMethod); WRITE_UINT_FIELD(colocationGroupId); WRITE_UINT_FIELD(representativeValue); } void OutGroupShardPlacement(OUTFUNC_ARGS) { WRITE_LOCALS(GroupShardPlacement); WRITE_NODE_TYPE("GROUPSHARDPLACEMENT"); WRITE_UINT64_FIELD(placementId); WRITE_UINT64_FIELD(shardId); WRITE_UINT64_FIELD(shardLength); WRITE_ENUM_FIELD(shardState, RelayFileState); WRITE_UINT_FIELD(groupId); } void OutRelationShard(OUTFUNC_ARGS) { WRITE_LOCALS(RelationShard); WRITE_NODE_TYPE("RELATIONSHARD"); WRITE_OID_FIELD(relationId); WRITE_UINT64_FIELD(shardId); } void OutTask(OUTFUNC_ARGS) { WRITE_LOCALS(Task); WRITE_NODE_TYPE("TASK"); WRITE_ENUM_FIELD(taskType, TaskType); WRITE_UINT64_FIELD(jobId); WRITE_UINT_FIELD(taskId); WRITE_STRING_FIELD(queryString); WRITE_UINT64_FIELD(anchorShardId); WRITE_NODE_FIELD(taskPlacementList); WRITE_NODE_FIELD(dependedTaskList); WRITE_UINT_FIELD(partitionId); WRITE_UINT_FIELD(upstreamTaskId); WRITE_NODE_FIELD(shardInterval); WRITE_BOOL_FIELD(assignmentConstrained); WRITE_NODE_FIELD(taskExecution); WRITE_BOOL_FIELD(upsertQuery); WRITE_CHAR_FIELD(replicationModel); WRITE_BOOL_FIELD(insertSelectQuery); WRITE_NODE_FIELD(relationShardList); WRITE_NODE_FIELD(rowValuesLists); } void OutTaskExecution(OUTFUNC_ARGS) { WRITE_LOCALS(TaskExecution); WRITE_NODE_TYPE("TASKEXECUTION"); WRITE_UINT64_FIELD(jobId); WRITE_UINT_FIELD(taskId); WRITE_UINT_FIELD(nodeCount); WRITE_ENUM_ARRAY(taskStatusArray, node->nodeCount); WRITE_ENUM_ARRAY(transmitStatusArray, node->nodeCount); WRITE_INT_ARRAY(connectionIdArray, node->nodeCount); WRITE_INT_ARRAY(fileDescriptorArray, node->nodeCount); WRITE_INT64_FIELD(connectStartTime); WRITE_UINT_FIELD(currentNodeIndex); WRITE_UINT_FIELD(querySourceNodeIndex); WRITE_INT_FIELD(dataFetchTaskIndex); WRITE_UINT_FIELD(failureCount); } void OutDeferredErrorMessage(OUTFUNC_ARGS) { WRITE_LOCALS(DeferredErrorMessage); WRITE_NODE_TYPE("DEFERREDERRORMESSAGE"); WRITE_INT_FIELD(code); WRITE_STRING_FIELD(message); WRITE_STRING_FIELD(detail); WRITE_STRING_FIELD(hint); WRITE_STRING_FIELD(filename); WRITE_INT_FIELD(linenumber); WRITE_STRING_FIELD(functionname); } citus-7.0.3/src/backend/distributed/utils/citus_readfuncs.c000066400000000000000000000240331317107136600240440ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * citus_readfuncs.c * Citus specific node functions * * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 2012-2015, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include #include "distributed/citus_nodefuncs.h" #include "distributed/errormessage.h" #include "distributed/multi_planner.h" #include "distributed/multi_server_executor.h" #include "nodes/parsenodes.h" #include "nodes/readfuncs.h" /* * Macros to simplify reading of different kinds of fields. Use these * wherever possible to reduce the chance for silly typos. Note that these * hard-wire conventions about the names of the local variables in a Read * routine. */ /* Macros for declaring appropriate local variables */ /* A few guys need only local_node */ static inline Node * CitusSetTag(Node *node, int tag) { CitusNode *citus_node = (CitusNode *) node; citus_node->citus_tag = tag; return node; } /* *INDENT-OFF* */ #define READ_LOCALS_NO_FIELDS(nodeTypeName) \ nodeTypeName *local_node = (nodeTypeName *) CitusSetTag((Node *) node, T_##nodeTypeName) /* And a few guys need only the pg_strtok support fields */ #define READ_TEMP_LOCALS() \ char *token; \ int length /* ... but most need both */ #define READ_LOCALS(nodeTypeName) \ READ_LOCALS_NO_FIELDS(nodeTypeName); \ READ_TEMP_LOCALS() /* Read an integer field (anything written as ":fldname %d") */ #define READ_INT_FIELD(fldname) \ token = pg_strtok(&length); /* skip :fldname */ \ token = pg_strtok(&length); /* get field value */ \ local_node->fldname = atoi(token) /* Read an 64-bit integer field (anything written as ":fldname %d") */ #define READ_INT64_FIELD(fldname) \ token = pg_strtok(&length); /* skip :fldname */ \ token = pg_strtok(&length); /* get field value */ \ local_node->fldname = (int64) strtoll(token, NULL, 10) /* Read an unsigned integer field (anything written as ":fldname %u") */ #define READ_UINT_FIELD(fldname) \ token = pg_strtok(&length); /* skip :fldname */ \ token = pg_strtok(&length); /* get field value */ \ local_node->fldname = atoui(token) /* XXX: CITUS Read an uint64 field (anything written as ":fldname %u") */ #define READ_UINT64_FIELD(fldname) \ token = pg_strtok(&length); /* skip :fldname */ \ token = pg_strtok(&length); /* get field value */ \ local_node->fldname = atoull(token) /* Read an OID field (don't hard-wire assumption that OID is same as uint) */ #define READ_OID_FIELD(fldname) \ token = pg_strtok(&length); /* skip :fldname */ \ token = pg_strtok(&length); /* get field value */ \ local_node->fldname = atooid(token) /* Read a char field (ie, one ascii character) */ #define READ_CHAR_FIELD(fldname) \ token = pg_strtok(&length); /* skip :fldname */ \ token = pg_strtok(&length); /* get field value */ \ local_node->fldname = token[0] /* Read an enumerated-type field that was written as an integer code */ #define READ_ENUM_FIELD(fldname, enumtype) \ token = pg_strtok(&length); /* skip :fldname */ \ token = pg_strtok(&length); /* get field value */ \ local_node->fldname = (enumtype) atoi(token) /* Read a float field */ #define READ_FLOAT_FIELD(fldname) \ token = pg_strtok(&length); /* skip :fldname */ \ token = pg_strtok(&length); /* get field value */ \ local_node->fldname = atof(token) /* Read a boolean field */ #define READ_BOOL_FIELD(fldname) \ token = pg_strtok(&length); /* skip :fldname */ \ token = pg_strtok(&length); /* get field value */ \ local_node->fldname = strtobool(token) /* Read a character-string field */ #define READ_STRING_FIELD(fldname) \ token = pg_strtok(&length); /* skip :fldname */ \ token = pg_strtok(&length); /* get field value */ \ local_node->fldname = nullable_string(token, length) /* Read a parse location field (and throw away the value, per notes above) */ #define READ_LOCATION_FIELD(fldname) \ token = pg_strtok(&length); /* skip :fldname */ \ token = pg_strtok(&length); /* get field value */ \ (void) token; /* in case not used elsewhere */ \ local_node->fldname = -1 /* set field to "unknown" */ /* Read a Node field */ #define READ_NODE_FIELD(fldname) \ token = pg_strtok(&length); /* skip :fldname */ \ (void) token; /* in case not used elsewhere */ \ local_node->fldname = nodeRead(NULL, 0) /* Read an integer field (anything written as ":fldname %d") */ #define READ_ENUM_ARRAY(fldname, count, enumtype) \ token = pg_strtok(&length); /* skip :fldname */ \ token = pg_strtok(&length); /* skip ( */ \ { \ int i = 0; \ for (i = 0; i < count; i++ ) \ { \ token = pg_strtok(&length); /* get field value */ \ local_node->fldname[i] = (enumtype) atoi(token); \ } \ } \ token = pg_strtok(&length); /* skip ) */ \ (void) token #define READ_INT_ARRAY(fldname, count) READ_ENUM_ARRAY(fldname, count, int32) /* Routine exit */ #define READ_DONE() \ return; /* * NOTE: use atoi() to read values written with %d, or atoui() to read * values written with %u in outfuncs.c. An exception is OID values, * for which use atooid(). (As of 7.1, outfuncs.c writes OIDs as %u, * but this will probably change in the future.) */ #define atoui(x) ((unsigned int) strtoul((x), NULL, 10)) #define atooid(x) ((Oid) strtoul((x), NULL, 10)) /* XXX: Citus */ #define atoull(x) ((uint64) strtoull((x), NULL, 10)) #define strtobool(x) ((*(x) == 't') ? true : false) #define nullable_string(token,length) \ ((length) == 0 ? NULL : debackslash(token, length)) static void readJobInfo(Job *local_node) { READ_TEMP_LOCALS(); CitusSetTag((Node *) local_node, T_Job); READ_UINT64_FIELD(jobId); READ_NODE_FIELD(jobQuery); READ_NODE_FIELD(taskList); READ_NODE_FIELD(dependedJobList); READ_BOOL_FIELD(subqueryPushdown); READ_BOOL_FIELD(requiresMasterEvaluation); READ_BOOL_FIELD(deferredPruning); } READFUNC_RET ReadJob(READFUNC_ARGS) { READ_LOCALS_NO_FIELDS(Job); readJobInfo(local_node); READ_DONE(); } READFUNC_RET ReadMultiPlan(READFUNC_ARGS) { READ_LOCALS(MultiPlan); READ_INT_FIELD(operation); READ_BOOL_FIELD(hasReturning); READ_NODE_FIELD(workerJob); READ_NODE_FIELD(masterQuery); READ_BOOL_FIELD(routerExecutable); READ_NODE_FIELD(relationIdList); READ_NODE_FIELD(insertSelectSubquery); READ_NODE_FIELD(insertTargetList); READ_OID_FIELD(targetRelationId); READ_NODE_FIELD(planningError); READ_DONE(); } READFUNC_RET ReadShardInterval(READFUNC_ARGS) { READ_LOCALS(ShardInterval); READ_OID_FIELD(relationId); READ_CHAR_FIELD(storageType); READ_OID_FIELD(valueTypeId); READ_INT_FIELD(valueTypeLen); READ_BOOL_FIELD(valueByVal); READ_BOOL_FIELD(minValueExists); READ_BOOL_FIELD(maxValueExists); token = pg_strtok(&length); /* skip :minValue */ if (!local_node->minValueExists) token = pg_strtok(&length); /* skip "<>" */ else local_node->minValue = readDatum(local_node->valueByVal); token = pg_strtok(&length); /* skip :maxValue */ if (!local_node->minValueExists) token = pg_strtok(&length); /* skip "<>" */ else local_node->maxValue = readDatum(local_node->valueByVal); READ_UINT64_FIELD(shardId); READ_DONE(); } READFUNC_RET ReadMapMergeJob(READFUNC_ARGS) { int arrayLength; int i; READ_LOCALS(MapMergeJob); readJobInfo(&local_node->job); READ_NODE_FIELD(reduceQuery); READ_ENUM_FIELD(partitionType, PartitionType); READ_NODE_FIELD(partitionColumn); READ_UINT_FIELD(partitionCount); READ_INT_FIELD(sortedShardIntervalArrayLength); arrayLength = local_node->sortedShardIntervalArrayLength; /* now build & read sortedShardIntervalArray */ local_node->sortedShardIntervalArray = (ShardInterval**) palloc(arrayLength * sizeof(ShardInterval *)); for (i = 0; i < arrayLength; ++i) { /* can't use READ_NODE_FIELD, no field names */ local_node->sortedShardIntervalArray[i] = nodeRead(NULL, 0); } READ_NODE_FIELD(mapTaskList); READ_NODE_FIELD(mergeTaskList); READ_DONE(); } READFUNC_RET ReadShardPlacement(READFUNC_ARGS) { READ_LOCALS(ShardPlacement); READ_UINT64_FIELD(placementId); READ_UINT64_FIELD(shardId); READ_UINT64_FIELD(shardLength); READ_ENUM_FIELD(shardState, RelayFileState); READ_UINT_FIELD(groupId); READ_STRING_FIELD(nodeName); READ_UINT_FIELD(nodePort); /* so we can deal with 0 */ READ_INT_FIELD(partitionMethod); READ_UINT_FIELD(colocationGroupId); READ_UINT_FIELD(representativeValue); READ_DONE(); } READFUNC_RET ReadGroupShardPlacement(READFUNC_ARGS) { READ_LOCALS(GroupShardPlacement); READ_UINT64_FIELD(placementId); READ_UINT64_FIELD(shardId); READ_UINT64_FIELD(shardLength); READ_ENUM_FIELD(shardState, RelayFileState); READ_UINT_FIELD(groupId); READ_DONE(); } READFUNC_RET ReadRelationShard(READFUNC_ARGS) { READ_LOCALS(RelationShard); READ_OID_FIELD(relationId); READ_UINT64_FIELD(shardId); READ_DONE(); } READFUNC_RET ReadTask(READFUNC_ARGS) { READ_LOCALS(Task); READ_ENUM_FIELD(taskType, TaskType); READ_UINT64_FIELD(jobId); READ_UINT_FIELD(taskId); READ_STRING_FIELD(queryString); READ_UINT64_FIELD(anchorShardId); READ_NODE_FIELD(taskPlacementList); READ_NODE_FIELD(dependedTaskList); READ_UINT_FIELD(partitionId); READ_UINT_FIELD(upstreamTaskId); READ_NODE_FIELD(shardInterval); READ_BOOL_FIELD(assignmentConstrained); READ_NODE_FIELD(taskExecution); READ_BOOL_FIELD(upsertQuery); READ_CHAR_FIELD(replicationModel); READ_BOOL_FIELD(insertSelectQuery); READ_NODE_FIELD(relationShardList); READ_NODE_FIELD(rowValuesLists); READ_DONE(); } READFUNC_RET ReadTaskExecution(READFUNC_ARGS) { ereport(ERROR, (errmsg("unexpected read request for TaskExecution node"))); } READFUNC_RET ReadDeferredErrorMessage(READFUNC_ARGS) { READ_LOCALS(DeferredErrorMessage); READ_INT_FIELD(code); READ_STRING_FIELD(message); READ_STRING_FIELD(detail); READ_STRING_FIELD(hint); READ_STRING_FIELD(filename); READ_INT_FIELD(linenumber); READ_STRING_FIELD(functionname); READ_DONE(); } READFUNC_RET ReadUnsupportedCitusNode(READFUNC_ARGS) { ereport(ERROR, (errmsg("not implemented"))); } citus-7.0.3/src/backend/distributed/utils/citus_ruleutils.c000066400000000000000000000714371317107136600241340ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * citus_ruleutils.c * Version independent ruleutils wrapper * * Copyright (c) 2012-2016, Citus Data, Inc. *------------------------------------------------------------------------- */ #include "postgres.h" #include "c.h" #include "miscadmin.h" #include #include "access/attnum.h" #include "access/genam.h" #include "access/heapam.h" #include "access/htup.h" #include "access/htup_details.h" #include "access/skey.h" #include "access/stratnum.h" #include "access/sysattr.h" #include "access/tupdesc.h" #include "catalog/dependency.h" #include "catalog/indexing.h" #include "catalog/namespace.h" #include "catalog/pg_attribute.h" #include "catalog/pg_authid.h" #include "catalog/pg_class.h" #include "catalog/pg_extension.h" #include "catalog/pg_foreign_data_wrapper.h" #include "catalog/pg_index.h" #include "commands/defrem.h" #include "commands/extension.h" #include "distributed/citus_ruleutils.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/relay_utility.h" #include "distributed/master_metadata_utility.h" #include "foreign/foreign.h" #include "lib/stringinfo.h" #include "nodes/nodes.h" #include "nodes/nodeFuncs.h" #include "nodes/parsenodes.h" #include "nodes/pg_list.h" #include "parser/parse_utilcmd.h" #include "storage/lock.h" #include "utils/acl.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/elog.h" #include "utils/errcodes.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/palloc.h" #include "utils/rel.h" #include "utils/relcache.h" #include "utils/ruleutils.h" #include "utils/syscache.h" static void AppendOptionListToString(StringInfo stringData, List *options); static const char * convert_aclright_to_string(int aclright); /* * pg_get_extensiondef_string finds the foreign data wrapper that corresponds to * the given foreign tableId, and checks if an extension owns this foreign data * wrapper. If it does, the function returns the extension's definition. If not, * the function returns null. */ char * pg_get_extensiondef_string(Oid tableRelationId) { ForeignTable *foreignTable = GetForeignTable(tableRelationId); ForeignServer *server = GetForeignServer(foreignTable->serverid); ForeignDataWrapper *foreignDataWrapper = GetForeignDataWrapper(server->fdwid); StringInfoData buffer = { NULL, 0, 0, 0 }; Oid classId = ForeignDataWrapperRelationId; Oid objectId = server->fdwid; Oid extensionId = getExtensionOfObject(classId, objectId); if (OidIsValid(extensionId)) { char *extensionName = get_extension_name(extensionId); Oid extensionSchemaId = get_extension_schema(extensionId); char *extensionSchema = get_namespace_name(extensionSchemaId); initStringInfo(&buffer); appendStringInfo(&buffer, "CREATE EXTENSION IF NOT EXISTS %s WITH SCHEMA %s", quote_identifier(extensionName), quote_identifier(extensionSchema)); } else { ereport(NOTICE, (errmsg("foreign-data wrapper \"%s\" does not have an " "extension defined", foreignDataWrapper->fdwname))); } return (buffer.data); } /* * get_extension_schema - given an extension OID, fetch its extnamespace * * Returns InvalidOid if no such extension. */ Oid get_extension_schema(Oid ext_oid) { /* *INDENT-OFF* */ Oid result; Relation rel; SysScanDesc scandesc; HeapTuple tuple; ScanKeyData entry[1]; rel = heap_open(ExtensionRelationId, AccessShareLock); ScanKeyInit(&entry[0], ObjectIdAttributeNumber, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(ext_oid)); scandesc = systable_beginscan(rel, ExtensionOidIndexId, true, NULL, 1, entry); tuple = systable_getnext(scandesc); /* We assume that there can be at most one matching tuple */ if (HeapTupleIsValid(tuple)) result = ((Form_pg_extension) GETSTRUCT(tuple))->extnamespace; else result = InvalidOid; systable_endscan(scandesc); heap_close(rel, AccessShareLock); return result; /* *INDENT-ON* */ } /* * pg_get_serverdef_string finds the foreign server that corresponds to the * given foreign tableId, and returns this server's definition. */ char * pg_get_serverdef_string(Oid tableRelationId) { ForeignTable *foreignTable = GetForeignTable(tableRelationId); ForeignServer *server = GetForeignServer(foreignTable->serverid); ForeignDataWrapper *foreignDataWrapper = GetForeignDataWrapper(server->fdwid); StringInfoData buffer = { NULL, 0, 0, 0 }; initStringInfo(&buffer); appendStringInfo(&buffer, "CREATE SERVER %s", quote_identifier(server->servername)); if (server->servertype != NULL) { appendStringInfo(&buffer, " TYPE %s", quote_literal_cstr(server->servertype)); } if (server->serverversion != NULL) { appendStringInfo(&buffer, " VERSION %s", quote_literal_cstr(server->serverversion)); } appendStringInfo(&buffer, " FOREIGN DATA WRAPPER %s", quote_identifier(foreignDataWrapper->fdwname)); /* append server options, if any */ AppendOptionListToString(&buffer, server->options); return (buffer.data); } /* * pg_get_sequencedef_string returns the definition of a given sequence. This * definition includes explicit values for all CREATE SEQUENCE options. */ char * pg_get_sequencedef_string(Oid sequenceRelationId) { char *qualifiedSequenceName = NULL; char *sequenceDef = NULL; Form_pg_sequence pgSequenceForm = NULL; pgSequenceForm = pg_get_sequencedef(sequenceRelationId); /* build our DDL command */ qualifiedSequenceName = generate_relation_name(sequenceRelationId, NIL); #if (PG_VERSION_NUM >= 100000) sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND, qualifiedSequenceName, pgSequenceForm->seqincrement, pgSequenceForm->seqmin, pgSequenceForm->seqmax, pgSequenceForm->seqstart, pgSequenceForm->seqcycle ? "" : "NO "); #else sequenceDef = psprintf(CREATE_SEQUENCE_COMMAND, qualifiedSequenceName, pgSequenceForm->increment_by, pgSequenceForm->min_value, pgSequenceForm->max_value, pgSequenceForm->start_value, pgSequenceForm->is_cycled ? "" : "NO "); #endif return sequenceDef; } /* * pg_get_sequencedef returns the Form_pg_sequence data about the sequence with the given * object id. */ Form_pg_sequence pg_get_sequencedef(Oid sequenceRelationId) { Form_pg_sequence pgSequenceForm = NULL; HeapTuple heapTuple = NULL; #if (PG_VERSION_NUM >= 100000) heapTuple = SearchSysCache1(SEQRELID, sequenceRelationId); if (!HeapTupleIsValid(heapTuple)) { elog(ERROR, "cache lookup failed for sequence %u", sequenceRelationId); } pgSequenceForm = (Form_pg_sequence) GETSTRUCT(heapTuple); ReleaseSysCache(heapTuple); #else SysScanDesc scanDescriptor = NULL; Relation sequenceRel = NULL; AclResult permissionCheck = ACLCHECK_NO_PRIV; /* open and lock sequence */ sequenceRel = heap_open(sequenceRelationId, AccessShareLock); /* check permissions to read sequence attributes */ permissionCheck = pg_class_aclcheck(sequenceRelationId, GetUserId(), ACL_SELECT | ACL_USAGE); if (permissionCheck != ACLCHECK_OK) { ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied for sequence %s", RelationGetRelationName(sequenceRel)))); } /* retrieve attributes from first tuple */ scanDescriptor = systable_beginscan(sequenceRel, InvalidOid, false, NULL, 0, NULL); heapTuple = systable_getnext(scanDescriptor); if (!HeapTupleIsValid(heapTuple)) { ereport(ERROR, (errmsg("could not find specified sequence"))); } pgSequenceForm = (Form_pg_sequence) GETSTRUCT(heapTuple); systable_endscan(scanDescriptor); heap_close(sequenceRel, AccessShareLock); #endif return pgSequenceForm; } /* * pg_get_tableschemadef_string returns the definition of a given table. This * definition includes table's schema, default column values, not null and check * constraints. The definition does not include constraints that trigger index * creations; specifically, unique and primary key constraints are excluded. * When the flag includeSequenceDefaults is set, the function also creates * DEFAULT clauses for columns getting their default values from a sequence. */ char * pg_get_tableschemadef_string(Oid tableRelationId, bool includeSequenceDefaults) { Relation relation = NULL; char *relationName = NULL; char relationKind = 0; TupleDesc tupleDescriptor = NULL; TupleConstr *tupleConstraints = NULL; int attributeIndex = 0; bool firstAttributePrinted = false; AttrNumber defaultValueIndex = 0; AttrNumber constraintIndex = 0; AttrNumber constraintCount = 0; StringInfoData buffer = { NULL, 0, 0, 0 }; /* * Instead of retrieving values from system catalogs as other functions in * ruleutils.c do, we follow an unusual approach here: we open the relation, * and fetch the relation's tuple descriptor. We do this because the tuple * descriptor already contains information harnessed from pg_attrdef, * pg_attribute, pg_constraint, and pg_class; and therefore using the * descriptor saves us from a lot of additional work. */ relation = relation_open(tableRelationId, AccessShareLock); relationName = generate_relation_name(tableRelationId, NIL); EnsureRelationKindSupported(tableRelationId); initStringInfo(&buffer); if (RegularTable(tableRelationId)) { appendStringInfoString(&buffer, "CREATE "); if (relation->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED) { appendStringInfoString(&buffer, "UNLOGGED "); } appendStringInfo(&buffer, "TABLE %s (", relationName); } else { appendStringInfo(&buffer, "CREATE FOREIGN TABLE %s (", relationName); } /* * Iterate over the table's columns. If a particular column is not dropped * and is not inherited from another table, print the column's name and its * formatted type. */ tupleDescriptor = RelationGetDescr(relation); tupleConstraints = tupleDescriptor->constr; for (attributeIndex = 0; attributeIndex < tupleDescriptor->natts; attributeIndex++) { Form_pg_attribute attributeForm = tupleDescriptor->attrs[attributeIndex]; /* * We disregard the inherited attributes (i.e., attinhcount > 0) here. The * reasoning behind this is that Citus implements declarative partitioning * by creating the partitions first and then sending * "ALTER TABLE parent_table ATTACH PARTITION .." command. This may not play * well with regular inhereted tables, which isn't a big concern from Citus' * perspective. */ if (!attributeForm->attisdropped) { const char *attributeName = NULL; const char *attributeTypeName = NULL; if (firstAttributePrinted) { appendStringInfoString(&buffer, ", "); } firstAttributePrinted = true; attributeName = NameStr(attributeForm->attname); appendStringInfo(&buffer, "%s ", quote_identifier(attributeName)); attributeTypeName = format_type_with_typemod(attributeForm->atttypid, attributeForm->atttypmod); appendStringInfoString(&buffer, attributeTypeName); /* if this column has a default value, append the default value */ if (attributeForm->atthasdef) { AttrDefault *defaultValueList = NULL; AttrDefault *defaultValue = NULL; Node *defaultNode = NULL; List *defaultContext = NULL; char *defaultString = NULL; Assert(tupleConstraints != NULL); defaultValueList = tupleConstraints->defval; Assert(defaultValueList != NULL); defaultValue = &(defaultValueList[defaultValueIndex]); defaultValueIndex++; Assert(defaultValue->adnum == (attributeIndex + 1)); Assert(defaultValueIndex <= tupleConstraints->num_defval); /* convert expression to node tree, and prepare deparse context */ defaultNode = (Node *) stringToNode(defaultValue->adbin); /* * if column default value is explicitly requested, or it is * not set from a sequence then we include DEFAULT clause for * this column. */ if (includeSequenceDefaults || !contain_nextval_expression_walker(defaultNode, NULL)) { defaultContext = deparse_context_for(relationName, tableRelationId); /* deparse default value string */ defaultString = deparse_expression(defaultNode, defaultContext, false, false); appendStringInfo(&buffer, " DEFAULT %s", defaultString); } } /* if this column has a not null constraint, append the constraint */ if (attributeForm->attnotnull) { appendStringInfoString(&buffer, " NOT NULL"); } } } /* * Now check if the table has any constraints. If it does, set the number of * check constraints here. Then iterate over all check constraints and print * them. */ if (tupleConstraints != NULL) { constraintCount = tupleConstraints->num_check; } for (constraintIndex = 0; constraintIndex < constraintCount; constraintIndex++) { ConstrCheck *checkConstraintList = tupleConstraints->check; ConstrCheck *checkConstraint = &(checkConstraintList[constraintIndex]); Node *checkNode = NULL; List *checkContext = NULL; char *checkString = NULL; /* if an attribute or constraint has been printed, format properly */ if (firstAttributePrinted || constraintIndex > 0) { appendStringInfoString(&buffer, ", "); } appendStringInfo(&buffer, "CONSTRAINT %s CHECK ", quote_identifier(checkConstraint->ccname)); /* convert expression to node tree, and prepare deparse context */ checkNode = (Node *) stringToNode(checkConstraint->ccbin); checkContext = deparse_context_for(relationName, tableRelationId); /* deparse check constraint string */ checkString = deparse_expression(checkNode, checkContext, false, false); appendStringInfoString(&buffer, checkString); } /* close create table's outer parentheses */ appendStringInfoString(&buffer, ")"); /* * If the relation is a foreign table, append the server name and options to * the create table statement. */ relationKind = relation->rd_rel->relkind; if (relationKind == RELKIND_FOREIGN_TABLE) { ForeignTable *foreignTable = GetForeignTable(tableRelationId); ForeignServer *foreignServer = GetForeignServer(foreignTable->serverid); char *serverName = foreignServer->servername; appendStringInfo(&buffer, " SERVER %s", quote_identifier(serverName)); AppendOptionListToString(&buffer, foreignTable->options); } #if (PG_VERSION_NUM >= 100000) else if (relationKind == RELKIND_PARTITIONED_TABLE) { char *partitioningInformation = GeneratePartitioningInformation(tableRelationId); appendStringInfo(&buffer, " PARTITION BY %s ", partitioningInformation); } #endif relation_close(relation, AccessShareLock); return (buffer.data); } /* * EnsureRelationKindSupported errors out if the given relation is not supported * as a distributed relation. */ void EnsureRelationKindSupported(Oid relationId) { char relationKind = get_rel_relkind(relationId); bool supportedRelationKind = false; supportedRelationKind = RegularTable(relationId) || relationKind == RELKIND_FOREIGN_TABLE; /* * Citus doesn't support bare inherited tables (i.e., not a partition or * partitioned table) */ supportedRelationKind = supportedRelationKind && !(IsChildTable(relationId) || IsParentTable(relationId)); if (!supportedRelationKind) { char *relationName = get_rel_name(relationId); ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("%s is not a regular, foreign or partitioned table", relationName))); } } /* * pg_get_tablecolumnoptionsdef_string returns column storage type and column * statistics definitions for given table, _if_ these definitions differ from * their default values. The function returns null if all columns use default * values for their storage types and statistics. */ char * pg_get_tablecolumnoptionsdef_string(Oid tableRelationId) { Relation relation = NULL; TupleDesc tupleDescriptor = NULL; AttrNumber attributeIndex = 0; List *columnOptionList = NIL; ListCell *columnOptionCell = NULL; bool firstOptionPrinted = false; StringInfoData buffer = { NULL, 0, 0, 0 }; /* * Instead of retrieving values from system catalogs, we open the relation, * and use the relation's tuple descriptor to access attribute information. * This is primarily to maintain symmetry with pg_get_tableschemadef. */ relation = relation_open(tableRelationId, AccessShareLock); EnsureRelationKindSupported(tableRelationId); /* * Iterate over the table's columns. If a particular column is not dropped * and is not inherited from another table, check if column storage or * statistics statements need to be printed. */ tupleDescriptor = RelationGetDescr(relation); for (attributeIndex = 0; attributeIndex < tupleDescriptor->natts; attributeIndex++) { Form_pg_attribute attributeForm = tupleDescriptor->attrs[attributeIndex]; char *attributeName = NameStr(attributeForm->attname); char defaultStorageType = get_typstorage(attributeForm->atttypid); if (!attributeForm->attisdropped && attributeForm->attinhcount == 0) { /* * If the user changed the column's default storage type, create * alter statement and add statement to a list for later processing. */ if (attributeForm->attstorage != defaultStorageType) { char *storageName = 0; StringInfoData statement = { NULL, 0, 0, 0 }; initStringInfo(&statement); switch (attributeForm->attstorage) { case 'p': { storageName = "PLAIN"; break; } case 'e': { storageName = "EXTERNAL"; break; } case 'm': { storageName = "MAIN"; break; } case 'x': { storageName = "EXTENDED"; break; } default: { ereport(ERROR, (errmsg("unrecognized storage type: %c", attributeForm->attstorage))); break; } } appendStringInfo(&statement, "ALTER COLUMN %s ", quote_identifier(attributeName)); appendStringInfo(&statement, "SET STORAGE %s", storageName); columnOptionList = lappend(columnOptionList, statement.data); } /* * If the user changed the column's statistics target, create * alter statement and add statement to a list for later processing. */ if (attributeForm->attstattarget >= 0) { StringInfoData statement = { NULL, 0, 0, 0 }; initStringInfo(&statement); appendStringInfo(&statement, "ALTER COLUMN %s ", quote_identifier(attributeName)); appendStringInfo(&statement, "SET STATISTICS %d", attributeForm->attstattarget); columnOptionList = lappend(columnOptionList, statement.data); } } } /* * Iterate over column storage and statistics statements that we created, * and append them to a single alter table statement. */ foreach(columnOptionCell, columnOptionList) { char *columnOptionStatement = NULL; if (!firstOptionPrinted) { initStringInfo(&buffer); appendStringInfo(&buffer, "ALTER TABLE ONLY %s ", generate_relation_name(tableRelationId, NIL)); } else { appendStringInfoString(&buffer, ", "); } firstOptionPrinted = true; columnOptionStatement = (char *) lfirst(columnOptionCell); appendStringInfoString(&buffer, columnOptionStatement); pfree(columnOptionStatement); } list_free(columnOptionList); relation_close(relation, AccessShareLock); return (buffer.data); } /* * deparse_shard_index_statement uses the provided CREATE INDEX node, dist. * relation, and shard identifier to populate a provided buffer with a string * representation of a shard-extended version of that command. */ void deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 shardid, StringInfo buffer) { IndexStmt *indexStmt = copyObject(origStmt); /* copy to avoid modifications */ char *relationName = indexStmt->relation->relname; char *indexName = indexStmt->idxname; ListCell *indexParameterCell = NULL; List *deparseContext = NULL; /* extend relation and index name using shard identifier */ AppendShardIdToName(&relationName, shardid); AppendShardIdToName(&indexName, shardid); /* use extended shard name and transformed stmt for deparsing */ deparseContext = deparse_context_for(relationName, distrelid); indexStmt = transformIndexStmt(distrelid, indexStmt, NULL); appendStringInfo(buffer, "CREATE %s INDEX %s %s %s ON %s USING %s ", (indexStmt->unique ? "UNIQUE" : ""), (indexStmt->concurrent ? "CONCURRENTLY" : ""), (indexStmt->if_not_exists ? "IF NOT EXISTS" : ""), quote_identifier(indexName), quote_qualified_identifier(indexStmt->relation->schemaname, relationName), indexStmt->accessMethod); /* index column or expression list begins here */ appendStringInfoChar(buffer, '('); foreach(indexParameterCell, indexStmt->indexParams) { IndexElem *indexElement = (IndexElem *) lfirst(indexParameterCell); /* use commas to separate subsequent elements */ if (indexParameterCell != list_head(indexStmt->indexParams)) { appendStringInfoChar(buffer, ','); } if (indexElement->name) { appendStringInfo(buffer, "%s ", quote_identifier(indexElement->name)); } else if (indexElement->expr) { appendStringInfo(buffer, "(%s)", deparse_expression(indexElement->expr, deparseContext, false, false)); } if (indexElement->collation != NIL) { appendStringInfo(buffer, "COLLATE %s ", NameListToQuotedString(indexElement->collation)); } if (indexElement->opclass != NIL) { appendStringInfo(buffer, "%s ", NameListToQuotedString(indexElement->opclass)); } if (indexElement->ordering != SORTBY_DEFAULT) { bool sortAsc = (indexElement->ordering == SORTBY_ASC); appendStringInfo(buffer, "%s ", (sortAsc ? "ASC" : "DESC")); } if (indexElement->nulls_ordering != SORTBY_NULLS_DEFAULT) { bool nullsFirst = (indexElement->nulls_ordering == SORTBY_NULLS_FIRST); appendStringInfo(buffer, "NULLS %s ", (nullsFirst ? "FIRST" : "LAST")); } } appendStringInfoString(buffer, ") "); if (indexStmt->options != NIL) { appendStringInfoString(buffer, "WITH "); AppendOptionListToString(buffer, indexStmt->options); } if (indexStmt->whereClause != NULL) { appendStringInfo(buffer, "WHERE %s", deparse_expression(indexStmt->whereClause, deparseContext, false, false)); } } /* * pg_get_indexclusterdef_string returns the definition of a cluster statement * for given index. The function returns null if the table is not clustered on * given index. */ char * pg_get_indexclusterdef_string(Oid indexRelationId) { HeapTuple indexTuple = NULL; Form_pg_index indexForm = NULL; Oid tableRelationId = InvalidOid; StringInfoData buffer = { NULL, 0, 0, 0 }; indexTuple = SearchSysCache(INDEXRELID, ObjectIdGetDatum(indexRelationId), 0, 0, 0); if (!HeapTupleIsValid(indexTuple)) { ereport(ERROR, (errmsg("cache lookup failed for index %u", indexRelationId))); } indexForm = (Form_pg_index) GETSTRUCT(indexTuple); tableRelationId = indexForm->indrelid; /* check if the table is clustered on this index */ if (indexForm->indisclustered) { char *tableName = generate_relation_name(tableRelationId, NIL); char *indexName = get_rel_name(indexRelationId); /* needs to be quoted */ initStringInfo(&buffer); appendStringInfo(&buffer, "ALTER TABLE %s CLUSTER ON %s", tableName, quote_identifier(indexName)); } ReleaseSysCache(indexTuple); return (buffer.data); } /* * pg_get_table_grants returns a list of sql statements which recreate the * permissions for a specific table. * * This function is modeled after aclexplode(), don't change too heavily. */ List * pg_get_table_grants(Oid relationId) { /* *INDENT-OFF* */ StringInfoData buffer; Relation relation = NULL; char *relationName = NULL; List *defs = NIL; HeapTuple classTuple = NULL; Datum aclDatum = 0; bool isNull = false; relation = relation_open(relationId, AccessShareLock); relationName = generate_relation_name(relationId, NIL); initStringInfo(&buffer); /* lookup all table level grants */ classTuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relationId)); if (!HeapTupleIsValid(classTuple)) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_TABLE), errmsg("relation with OID %u does not exist", relationId))); } aclDatum = SysCacheGetAttr(RELOID, classTuple, Anum_pg_class_relacl, &isNull); ReleaseSysCache(classTuple); if (!isNull) { int i = 0; AclItem *aidat = NULL; Acl *acl = NULL; int offtype = 0; /* * First revoke all default permissions, so we can start adding the * exact permissions from the master. Note that we only do so if there * are any actual grants; an empty grant set signals default * permissions. * * Note: This doesn't work correctly if default permissions have been * changed with ALTER DEFAULT PRIVILEGES - but that's hard to fix * properly currently. */ appendStringInfo(&buffer, "REVOKE ALL ON %s FROM PUBLIC", relationName); defs = lappend(defs, pstrdup(buffer.data)); resetStringInfo(&buffer); /* iterate through the acl datastructure, emit GRANTs */ acl = DatumGetAclP(aclDatum); aidat = ACL_DAT(acl); offtype = -1; i = 0; while (i < ACL_NUM(acl)) { AclItem *aidata = NULL; AclMode priv_bit = 0; offtype++; if (offtype == N_ACL_RIGHTS) { offtype = 0; i++; if (i >= ACL_NUM(acl)) /* done */ { break; } } aidata = &aidat[i]; priv_bit = 1 << offtype; if (ACLITEM_GET_PRIVS(*aidata) & priv_bit) { const char *roleName = NULL; const char *withGrant = ""; if (aidata->ai_grantee != 0) { HeapTuple htup; htup = SearchSysCache1(AUTHOID, ObjectIdGetDatum(aidata->ai_grantee)); if (HeapTupleIsValid(htup)) { Form_pg_authid authForm = ((Form_pg_authid) GETSTRUCT(htup)); roleName = quote_identifier(NameStr(authForm->rolname)); ReleaseSysCache(htup); } else { elog(ERROR, "cache lookup failed for role %u", aidata->ai_grantee); } } else { roleName = "PUBLIC"; } if ((ACLITEM_GET_GOPTIONS(*aidata) & priv_bit) != 0) { withGrant = " WITH GRANT OPTION"; } appendStringInfo(&buffer, "GRANT %s ON %s TO %s%s", convert_aclright_to_string(priv_bit), relationName, roleName, withGrant); defs = lappend(defs, pstrdup(buffer.data)); resetStringInfo(&buffer); } } } resetStringInfo(&buffer); relation_close(relation, NoLock); return defs; /* *INDENT-ON* */ } /* * generate_qualified_relation_name computes the schema-qualified name to display for a * relation specified by OID. */ char * generate_qualified_relation_name(Oid relid) { HeapTuple tp; Form_pg_class reltup; char *relname; char *nspname; char *result; tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (!HeapTupleIsValid(tp)) { elog(ERROR, "cache lookup failed for relation %u", relid); } reltup = (Form_pg_class) GETSTRUCT(tp); relname = NameStr(reltup->relname); nspname = get_namespace_name(reltup->relnamespace); if (!nspname) { elog(ERROR, "cache lookup failed for namespace %u", reltup->relnamespace); } result = quote_qualified_identifier(nspname, relname); ReleaseSysCache(tp); return result; } /* * AppendOptionListToString converts the option list to its textual format, and * appends this text to the given string buffer. */ static void AppendOptionListToString(StringInfo stringBuffer, List *optionList) { if (optionList != NIL) { ListCell *optionCell = NULL; bool firstOptionPrinted = false; appendStringInfo(stringBuffer, " OPTIONS ("); foreach(optionCell, optionList) { DefElem *option = (DefElem *) lfirst(optionCell); char *optionName = option->defname; char *optionValue = defGetString(option); if (firstOptionPrinted) { appendStringInfo(stringBuffer, ", "); } firstOptionPrinted = true; appendStringInfo(stringBuffer, "%s ", quote_identifier(optionName)); appendStringInfo(stringBuffer, "%s", quote_literal_cstr(optionValue)); } appendStringInfo(stringBuffer, ")"); } } /* copy of postgresql's function, which is static as well */ static const char * convert_aclright_to_string(int aclright) { /* *INDENT-OFF* */ switch (aclright) { case ACL_INSERT: return "INSERT"; case ACL_SELECT: return "SELECT"; case ACL_UPDATE: return "UPDATE"; case ACL_DELETE: return "DELETE"; case ACL_TRUNCATE: return "TRUNCATE"; case ACL_REFERENCES: return "REFERENCES"; case ACL_TRIGGER: return "TRIGGER"; case ACL_EXECUTE: return "EXECUTE"; case ACL_USAGE: return "USAGE"; case ACL_CREATE: return "CREATE"; case ACL_CREATE_TEMP: return "TEMPORARY"; case ACL_CONNECT: return "CONNECT"; default: elog(ERROR, "unrecognized aclright: %d", aclright); return NULL; } /* *INDENT-ON* */ } /* * contain_nextval_expression_walker walks over expression tree and returns * true if it contains call to 'nextval' function. */ bool contain_nextval_expression_walker(Node *node, void *context) { if (node == NULL) { return false; } if (IsA(node, FuncExpr)) { FuncExpr *funcExpr = (FuncExpr *) node; if (funcExpr->funcid == F_NEXTVAL_OID) { return true; } } return expression_tree_walker(node, contain_nextval_expression_walker, context); } citus-7.0.3/src/backend/distributed/utils/colocation_utils.c000066400000000000000000001032271317107136600242400ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * colocation_utils.c * * This file contains functions to perform useful operations on co-located tables. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "access/genam.h" #include "access/heapam.h" #include "access/htup_details.h" #include "access/xact.h" #include "catalog/indexing.h" #include "catalog/pg_type.h" #include "commands/sequence.h" #include "distributed/colocation_utils.h" #include "distributed/listutils.h" #include "distributed/master_metadata_utility.h" #include "distributed/master_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" #include "distributed/multi_logical_planner.h" #include "distributed/pg_dist_colocation.h" #include "distributed/resource_lock.h" #include "distributed/shardinterval_utils.h" #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" #include "storage/lmgr.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/rel.h" /* local function forward declarations */ static void MarkTablesColocated(Oid sourceRelationId, Oid targetRelationId); static void ErrorIfShardPlacementsNotColocated(Oid leftRelationId, Oid rightRelationId); static bool ShardsIntervalsEqual(ShardInterval *leftShardInterval, ShardInterval *rightShardInterval); static bool HashPartitionedShardIntervalsEqual(ShardInterval *leftShardInterval, ShardInterval *rightShardInterval); static int CompareShardPlacementsByNode(const void *leftElement, const void *rightElement); static void UpdateRelationColocationGroup(Oid distributedRelationId, uint32 colocationId); static List * ColocationGroupTableList(Oid colocationId); static void DeleteColocationGroup(uint32 colocationId); /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(mark_tables_colocated); PG_FUNCTION_INFO_V1(get_colocated_shard_array); /* * mark_tables_colocated puts target tables to same colocation group with the * source table. If the source table is in INVALID_COLOCATION_ID group, then it * creates a new colocation group and assigns all tables to this new colocation * group. */ Datum mark_tables_colocated(PG_FUNCTION_ARGS) { Oid sourceRelationId = PG_GETARG_OID(0); ArrayType *relationIdArrayObject = PG_GETARG_ARRAYTYPE_P(1); Datum *relationIdDatumArray = NULL; int relationIndex = 0; int relationCount = ArrayObjectCount(relationIdArrayObject); if (relationCount < 1) { ereport(ERROR, (errmsg("at least one target table is required for this " "operation"))); } EnsureCoordinator(); CheckCitusVersion(ERROR); relationIdDatumArray = DeconstructArrayObject(relationIdArrayObject); for (relationIndex = 0; relationIndex < relationCount; relationIndex++) { Oid nextRelationOid = DatumGetObjectId(relationIdDatumArray[relationIndex]); MarkTablesColocated(sourceRelationId, nextRelationOid); } PG_RETURN_VOID(); } /* * get_colocated_shards_array returns array of shards ids which are co-located with given * shard. */ Datum get_colocated_shard_array(PG_FUNCTION_ARGS) { uint32 shardId = PG_GETARG_UINT32(0); ShardInterval *shardInterval = LoadShardInterval(shardId); ArrayType *colocatedShardsArrayType = NULL; List *colocatedShardList = ColocatedShardIntervalList(shardInterval); ListCell *colocatedShardCell = NULL; int colocatedShardCount = list_length(colocatedShardList); Datum *colocatedShardsDatumArray = palloc0(colocatedShardCount * sizeof(Datum)); Oid arrayTypeId = OIDOID; int colocatedShardIndex = 0; /* sort to get consistent output */ colocatedShardList = SortList(colocatedShardList, CompareShardIntervalsById); foreach(colocatedShardCell, colocatedShardList) { ShardInterval *colocatedShardInterval = (ShardInterval *) lfirst( colocatedShardCell); uint64 colocatedShardId = colocatedShardInterval->shardId; Datum colocatedShardDatum = Int64GetDatum(colocatedShardId); colocatedShardsDatumArray[colocatedShardIndex] = colocatedShardDatum; colocatedShardIndex++; } colocatedShardsArrayType = DatumArrayToArrayType(colocatedShardsDatumArray, colocatedShardCount, arrayTypeId); PG_RETURN_ARRAYTYPE_P(colocatedShardsArrayType); } /* * MarkTablesColocated puts both tables to same colocation group. If the * source table is in INVALID_COLOCATION_ID group, then it creates a new * colocation group and assigns both tables to same colocation group. Otherwise, * it adds the target table to colocation group of the source table. */ static void MarkTablesColocated(Oid sourceRelationId, Oid targetRelationId) { uint32 sourceColocationId = INVALID_COLOCATION_ID; uint32 targetColocationId = INVALID_COLOCATION_ID; Relation pgDistColocation = NULL; CheckReplicationModel(sourceRelationId, targetRelationId); CheckDistributionColumnType(sourceRelationId, targetRelationId); /* * Get an exclusive lock on the colocation system catalog. Therefore, we * can be sure that there will no modifications on the colocation table * until this transaction is committed. */ pgDistColocation = heap_open(DistColocationRelationId(), ExclusiveLock); /* check if shard placements are colocated */ ErrorIfShardPlacementsNotColocated(sourceRelationId, targetRelationId); /* * Get colocation group of the source table, if the source table does not * have a colocation group, create a new one, and set it for the source table. */ sourceColocationId = TableColocationId(sourceRelationId); if (sourceColocationId == INVALID_COLOCATION_ID) { uint32 shardCount = ShardIntervalCount(sourceRelationId); uint32 shardReplicationFactor = TableShardReplicationFactor(sourceRelationId); Var *sourceDistributionColumn = DistPartitionKey(sourceRelationId); Oid sourceDistributionColumnType = InvalidOid; /* reference tables has NULL distribution column */ if (sourceDistributionColumn != NULL) { sourceDistributionColumnType = sourceDistributionColumn->vartype; } sourceColocationId = CreateColocationGroup(shardCount, shardReplicationFactor, sourceDistributionColumnType); UpdateRelationColocationGroup(sourceRelationId, sourceColocationId); } targetColocationId = TableColocationId(targetRelationId); /* finally set colocation group for the target relation */ UpdateRelationColocationGroup(targetRelationId, sourceColocationId); /* if there is not any remaining table in the colocation group, delete it */ DeleteColocationGroupIfNoTablesBelong(targetColocationId); heap_close(pgDistColocation, NoLock); } /* * ErrorIfShardPlacementsNotColocated checks if the shard placements of the * given two relations are physically colocated. It errors out in any of * following cases: * 1.Shard counts are different, * 2.Shard intervals don't match * 3.Matching shard intervals have different number of shard placements * 4.Shard placements are not colocated (not on the same node) * 5.Shard placements have different health states * * Note that, this functions assumes that both tables are hash distributed. */ static void ErrorIfShardPlacementsNotColocated(Oid leftRelationId, Oid rightRelationId) { List *leftShardIntervalList = NIL; List *rightShardIntervalList = NIL; ListCell *leftShardIntervalCell = NULL; ListCell *rightShardIntervalCell = NULL; char *leftRelationName = NULL; char *rightRelationName = NULL; uint32 leftShardCount = 0; uint32 rightShardCount = 0; /* get sorted shard interval lists for both tables */ leftShardIntervalList = LoadShardIntervalList(leftRelationId); rightShardIntervalList = LoadShardIntervalList(rightRelationId); /* prevent concurrent placement changes */ LockShardListMetadata(leftShardIntervalList, ShareLock); LockShardListMetadata(rightShardIntervalList, ShareLock); leftRelationName = get_rel_name(leftRelationId); rightRelationName = get_rel_name(rightRelationId); leftShardCount = list_length(leftShardIntervalList); rightShardCount = list_length(rightShardIntervalList); if (leftShardCount != rightShardCount) { ereport(ERROR, (errmsg("cannot colocate tables %s and %s", leftRelationName, rightRelationName), errdetail("Shard counts don't match for %s and %s.", leftRelationName, rightRelationName))); } /* compare shard intervals one by one */ forboth(leftShardIntervalCell, leftShardIntervalList, rightShardIntervalCell, rightShardIntervalList) { ShardInterval *leftInterval = (ShardInterval *) lfirst(leftShardIntervalCell); ShardInterval *rightInterval = (ShardInterval *) lfirst(rightShardIntervalCell); List *leftPlacementList = NIL; List *rightPlacementList = NIL; List *sortedLeftPlacementList = NIL; List *sortedRightPlacementList = NIL; ListCell *leftPlacementCell = NULL; ListCell *rightPlacementCell = NULL; uint64 leftShardId = leftInterval->shardId; uint64 rightShardId = rightInterval->shardId; bool shardsIntervalsEqual = ShardsIntervalsEqual(leftInterval, rightInterval); if (!shardsIntervalsEqual) { ereport(ERROR, (errmsg("cannot colocate tables %s and %s", leftRelationName, rightRelationName), errdetail("Shard intervals don't match for %s and %s.", leftRelationName, rightRelationName))); } leftPlacementList = ShardPlacementList(leftShardId); rightPlacementList = ShardPlacementList(rightShardId); if (list_length(leftPlacementList) != list_length(rightPlacementList)) { ereport(ERROR, (errmsg("cannot colocate tables %s and %s", leftRelationName, rightRelationName), errdetail("Shard %ld of %s and shard %ld of %s " "have different number of shard placements.", leftShardId, leftRelationName, rightShardId, rightRelationName))); } /* sort shard placements according to the node */ sortedLeftPlacementList = SortList(leftPlacementList, CompareShardPlacementsByNode); sortedRightPlacementList = SortList(rightPlacementList, CompareShardPlacementsByNode); /* compare shard placements one by one */ forboth(leftPlacementCell, sortedLeftPlacementList, rightPlacementCell, sortedRightPlacementList) { ShardPlacement *leftPlacement = (ShardPlacement *) lfirst(leftPlacementCell); ShardPlacement *rightPlacement = (ShardPlacement *) lfirst(rightPlacementCell); int nodeCompare = 0; /* * If shard placements are on different nodes, these shard * placements are not colocated. */ nodeCompare = CompareShardPlacementsByNode((void *) &leftPlacement, (void *) &rightPlacement); if (nodeCompare != 0) { ereport(ERROR, (errmsg("cannot colocate tables %s and %s", leftRelationName, rightRelationName), errdetail("Shard %ld of %s and shard %ld of %s " "are not colocated.", leftShardId, leftRelationName, rightShardId, rightRelationName))); } /* we also don't allow colocated shards to be in different shard states */ if (leftPlacement->shardState != rightPlacement->shardState) { ereport(ERROR, (errmsg("cannot colocate tables %s and %s", leftRelationName, rightRelationName), errdetail("%s and %s have shard placements in " "different shard states.", leftRelationName, rightRelationName))); } } } } /* * ShardsIntervalsEqual checks if two shard intervals of distributed * tables are equal. * * Notes on the function: * (i) The function returns true if both shard intervals are the same. * (ii) The function returns false even if the shard intervals equal, but, * their distribution method are different. * (iii) The function returns false for append and range partitioned tables * excluding (i) case. * (iv) For reference tables, all shards are equal (i.e., same replication factor * and shard min/max values). Thus, always return true for shards of reference * tables. */ static bool ShardsIntervalsEqual(ShardInterval *leftShardInterval, ShardInterval *rightShardInterval) { char leftIntervalPartitionMethod = PartitionMethod(leftShardInterval->relationId); char rightIntervalPartitionMethod = PartitionMethod(rightShardInterval->relationId); /* if both shards are the same, return true */ if (leftShardInterval->shardId == rightShardInterval->shardId) { return true; } /* if partition methods are not the same, shards cannot be considered as co-located */ leftIntervalPartitionMethod = PartitionMethod(leftShardInterval->relationId); rightIntervalPartitionMethod = PartitionMethod(rightShardInterval->relationId); if (leftIntervalPartitionMethod != rightIntervalPartitionMethod) { return false; } if (leftIntervalPartitionMethod == DISTRIBUTE_BY_HASH) { return HashPartitionedShardIntervalsEqual(leftShardInterval, rightShardInterval); } else if (leftIntervalPartitionMethod == DISTRIBUTE_BY_NONE) { /* * Reference tables has only a single shard and all reference tables * are always co-located with each other. */ return true; } /* append and range partitioned shard never co-located */ return false; } /* * HashPartitionedShardIntervalsEqual checks if two shard intervals of hash distributed * tables are equal. Note that, this function doesn't work with non-hash * partitioned table's shards. * * We do min/max value check here to decide whether two shards are colocated, * instead we can simply use ShardIndex function on both shards then * but do index check, but we avoid it because this way it is more cheaper. */ static bool HashPartitionedShardIntervalsEqual(ShardInterval *leftShardInterval, ShardInterval *rightShardInterval) { int32 leftShardMinValue = DatumGetInt32(leftShardInterval->minValue); int32 leftShardMaxValue = DatumGetInt32(leftShardInterval->maxValue); int32 rightShardMinValue = DatumGetInt32(rightShardInterval->minValue); int32 rightShardMaxValue = DatumGetInt32(rightShardInterval->maxValue); bool minValuesEqual = leftShardMinValue == rightShardMinValue; bool maxValuesEqual = leftShardMaxValue == rightShardMaxValue; return minValuesEqual && maxValuesEqual; } /* * CompareShardPlacementsByNode compares two shard placements by their nodename * and nodeport. */ static int CompareShardPlacementsByNode(const void *leftElement, const void *rightElement) { const ShardPlacement *leftPlacement = *((const ShardPlacement **) leftElement); const ShardPlacement *rightPlacement = *((const ShardPlacement **) rightElement); char *leftNodeName = leftPlacement->nodeName; char *rightNodeName = rightPlacement->nodeName; uint32 leftNodePort = leftPlacement->nodePort; uint32 rightNodePort = rightPlacement->nodePort; /* first compare node names */ int nodeNameCompare = strncmp(leftNodeName, rightNodeName, WORKER_LENGTH); if (nodeNameCompare != 0) { return nodeNameCompare; } /* if node names are same, check node ports */ if (leftNodePort < rightNodePort) { return -1; } else if (leftNodePort > rightNodePort) { return 1; } else { return 0; } } /* * ColocationId searches pg_dist_colocation for shard count, replication factor * and distribution column type. If a matching entry is found, it returns the * colocation id, otherwise it returns INVALID_COLOCATION_ID. */ uint32 ColocationId(int shardCount, int replicationFactor, Oid distributionColumnType) { uint32 colocationId = INVALID_COLOCATION_ID; HeapTuple colocationTuple = NULL; SysScanDesc scanDescriptor; const int scanKeyCount = 3; ScanKeyData scanKey[scanKeyCount]; bool indexOK = true; Relation pgDistColocation = heap_open(DistColocationRelationId(), AccessShareLock); /* set scan arguments */ ScanKeyInit(&scanKey[0], Anum_pg_dist_colocation_shardcount, BTEqualStrategyNumber, F_INT4EQ, UInt32GetDatum(shardCount)); ScanKeyInit(&scanKey[1], Anum_pg_dist_colocation_replicationfactor, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(replicationFactor)); ScanKeyInit(&scanKey[2], Anum_pg_dist_colocation_distributioncolumntype, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(distributionColumnType)); scanDescriptor = systable_beginscan(pgDistColocation, DistColocationConfigurationIndexId(), indexOK, NULL, scanKeyCount, scanKey); colocationTuple = systable_getnext(scanDescriptor); if (HeapTupleIsValid(colocationTuple)) { Form_pg_dist_colocation colocationForm = (Form_pg_dist_colocation) GETSTRUCT(colocationTuple); colocationId = colocationForm->colocationid; } systable_endscan(scanDescriptor); heap_close(pgDistColocation, AccessShareLock); return colocationId; } /* * CreateColocationGroup creates a new colocation id and writes it into * pg_dist_colocation with the given configuration. It also returns the created * colocation id. */ uint32 CreateColocationGroup(int shardCount, int replicationFactor, Oid distributionColumnType) { uint32 colocationId = GetNextColocationId(); Relation pgDistColocation = NULL; TupleDesc tupleDescriptor = NULL; HeapTuple heapTuple = NULL; Datum values[Natts_pg_dist_colocation]; bool isNulls[Natts_pg_dist_colocation]; /* form new colocation tuple */ memset(values, 0, sizeof(values)); memset(isNulls, false, sizeof(isNulls)); values[Anum_pg_dist_colocation_colocationid - 1] = UInt32GetDatum(colocationId); values[Anum_pg_dist_colocation_shardcount - 1] = UInt32GetDatum(shardCount); values[Anum_pg_dist_colocation_replicationfactor - 1] = UInt32GetDatum(replicationFactor); values[Anum_pg_dist_colocation_distributioncolumntype - 1] = ObjectIdGetDatum(distributionColumnType); /* open colocation relation and insert the new tuple */ pgDistColocation = heap_open(DistColocationRelationId(), RowExclusiveLock); tupleDescriptor = RelationGetDescr(pgDistColocation); heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls); CatalogTupleInsert(pgDistColocation, heapTuple); /* increment the counter so that next command can see the row */ CommandCounterIncrement(); heap_close(pgDistColocation, RowExclusiveLock); return colocationId; } /* * GetNextColocationId allocates and returns a unique colocationId for the * colocation group to be created. This allocation occurs both in shared memory * and in write ahead logs; writing to logs avoids the risk of having * colocationId collisions. * * Please note that the caller is still responsible for finalizing colocationId * with the master node. Further note that this function relies on an internal * sequence created in initdb to generate unique identifiers. */ uint32 GetNextColocationId() { text *sequenceName = cstring_to_text(COLOCATIONID_SEQUENCE_NAME); Oid sequenceId = ResolveRelationId(sequenceName); Datum sequenceIdDatum = ObjectIdGetDatum(sequenceId); Oid savedUserId = InvalidOid; int savedSecurityContext = 0; Datum colocationIdDatum = 0; uint32 colocationId = INVALID_COLOCATION_ID; GetUserIdAndSecContext(&savedUserId, &savedSecurityContext); SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE); /* generate new and unique colocation id from sequence */ colocationIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum); SetUserIdAndSecContext(savedUserId, savedSecurityContext); colocationId = DatumGetUInt32(colocationIdDatum); return colocationId; } /* * CheckReplicationModel checks if given relations are from the same * replication model. Otherwise, it errors out. */ void CheckReplicationModel(Oid sourceRelationId, Oid targetRelationId) { DistTableCacheEntry *sourceTableEntry = NULL; DistTableCacheEntry *targetTableEntry = NULL; char sourceReplicationModel = 0; char targetReplicationModel = 0; sourceTableEntry = DistributedTableCacheEntry(sourceRelationId); sourceReplicationModel = sourceTableEntry->replicationModel; targetTableEntry = DistributedTableCacheEntry(targetRelationId); targetReplicationModel = targetTableEntry->replicationModel; if (sourceReplicationModel != targetReplicationModel) { char *sourceRelationName = get_rel_name(sourceRelationId); char *targetRelationName = get_rel_name(targetRelationId); ereport(ERROR, (errmsg("cannot colocate tables %s and %s", sourceRelationName, targetRelationName), errdetail("Replication models don't match for %s and %s.", sourceRelationName, targetRelationName))); } } /* * CheckDistributionColumnType checks if distribution column types of relations * are same. Otherwise, it errors out. */ void CheckDistributionColumnType(Oid sourceRelationId, Oid targetRelationId) { Var *sourceDistributionColumn = NULL; Var *targetDistributionColumn = NULL; Oid sourceDistributionColumnType = InvalidOid; Oid targetDistributionColumnType = InvalidOid; /* reference tables have NULL distribution column */ sourceDistributionColumn = DistPartitionKey(sourceRelationId); if (sourceDistributionColumn == NULL) { sourceDistributionColumnType = InvalidOid; } else { sourceDistributionColumnType = sourceDistributionColumn->vartype; } /* reference tables have NULL distribution column */ targetDistributionColumn = DistPartitionKey(targetRelationId); if (targetDistributionColumn == NULL) { targetDistributionColumnType = InvalidOid; } else { targetDistributionColumnType = targetDistributionColumn->vartype; } if (sourceDistributionColumnType != targetDistributionColumnType) { char *sourceRelationName = get_rel_name(sourceRelationId); char *targetRelationName = get_rel_name(targetRelationId); ereport(ERROR, (errmsg("cannot colocate tables %s and %s", sourceRelationName, targetRelationName), errdetail("Distribution column types don't match for " "%s and %s.", sourceRelationName, targetRelationName))); } } /* * UpdateRelationColocationGroup updates colocation group in pg_dist_partition * for the given relation. */ static void UpdateRelationColocationGroup(Oid distributedRelationId, uint32 colocationId) { Relation pgDistPartition = NULL; HeapTuple heapTuple = NULL; TupleDesc tupleDescriptor = NULL; SysScanDesc scanDescriptor = NULL; bool shouldSyncMetadata = false; bool indexOK = true; int scanKeyCount = 1; ScanKeyData scanKey[scanKeyCount]; Datum values[Natts_pg_dist_partition]; bool isNull[Natts_pg_dist_partition]; bool replace[Natts_pg_dist_partition]; pgDistPartition = heap_open(DistPartitionRelationId(), RowExclusiveLock); tupleDescriptor = RelationGetDescr(pgDistPartition); ScanKeyInit(&scanKey[0], Anum_pg_dist_partition_logicalrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(distributedRelationId)); scanDescriptor = systable_beginscan(pgDistPartition, DistPartitionLogicalRelidIndexId(), indexOK, NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(scanDescriptor); if (!HeapTupleIsValid(heapTuple)) { char *distributedRelationName = get_rel_name(distributedRelationId); ereport(ERROR, (errmsg("could not find valid entry for relation %s", distributedRelationName))); } memset(values, 0, sizeof(values)); memset(isNull, false, sizeof(isNull)); memset(replace, false, sizeof(replace)); values[Anum_pg_dist_partition_colocationid - 1] = UInt32GetDatum(colocationId); isNull[Anum_pg_dist_partition_colocationid - 1] = false; replace[Anum_pg_dist_partition_colocationid - 1] = true; heapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isNull, replace); CatalogTupleUpdate(pgDistPartition, &heapTuple->t_self, heapTuple); CitusInvalidateRelcacheByRelid(distributedRelationId); CommandCounterIncrement(); systable_endscan(scanDescriptor); heap_close(pgDistPartition, NoLock); shouldSyncMetadata = ShouldSyncTableMetadata(distributedRelationId); if (shouldSyncMetadata) { char *updateColocationIdCommand = ColocationIdUpdateCommand(distributedRelationId, colocationId); SendCommandToWorkers(WORKERS_WITH_METADATA, updateColocationIdCommand); } } /* * TableColocationId function returns co-location id of given table. This function * errors out if given table is not distributed. */ uint32 TableColocationId(Oid distributedTableId) { DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(distributedTableId); return cacheEntry->colocationId; } /* * TablesColocated function checks whether given two tables are co-located and * returns true if they are co-located. A table is always co-located with itself. * If given two tables are different and they are not distributed, this function * errors out. */ bool TablesColocated(Oid leftDistributedTableId, Oid rightDistributedTableId) { uint32 leftColocationId = INVALID_COLOCATION_ID; uint32 rightColocationId = INVALID_COLOCATION_ID; if (leftDistributedTableId == rightDistributedTableId) { return true; } leftColocationId = TableColocationId(leftDistributedTableId); rightColocationId = TableColocationId(rightDistributedTableId); if (leftColocationId == INVALID_COLOCATION_ID || rightColocationId == INVALID_COLOCATION_ID) { return false; } return leftColocationId == rightColocationId; } /* * ShardsColocated function checks whether given two shards are co-located and * returns true if they are co-located. Two shards are co-located either; * - They are same (A shard is always co-located with itself). * OR * - Tables are hash partitioned. * - Tables containing the shards are co-located. * - Min/Max values of the shards are same. */ bool ShardsColocated(ShardInterval *leftShardInterval, ShardInterval *rightShardInterval) { bool tablesColocated = TablesColocated(leftShardInterval->relationId, rightShardInterval->relationId); if (tablesColocated) { bool shardIntervalEqual = ShardsIntervalsEqual(leftShardInterval, rightShardInterval); return shardIntervalEqual; } return false; } /* * ColocatedTableList function returns list of relation ids which are co-located * with given table. If given table is not hash distributed, co-location is not * valid for that table and it is only co-located with itself. */ List * ColocatedTableList(Oid distributedTableId) { uint32 tableColocationId = TableColocationId(distributedTableId); List *colocatedTableList = NIL; /* * If distribution type of the table is not hash, the table is only co-located * with itself. */ if (tableColocationId == INVALID_COLOCATION_ID) { colocatedTableList = lappend_oid(colocatedTableList, distributedTableId); return colocatedTableList; } colocatedTableList = ColocationGroupTableList(tableColocationId); return colocatedTableList; } /* * ColocationGroupTableList returns the list of tables in the given colocation * group. If the colocation group is INVALID_COLOCATION_ID, it returns NIL. */ static List * ColocationGroupTableList(Oid colocationId) { List *colocatedTableList = NIL; Relation pgDistPartition = NULL; TupleDesc tupleDescriptor = NULL; SysScanDesc scanDescriptor = NULL; HeapTuple heapTuple = NULL; bool indexOK = true; int scanKeyCount = 1; ScanKeyData scanKey[1]; /* * If distribution type of the table is not hash, the table is only co-located * with itself. */ if (colocationId == INVALID_COLOCATION_ID) { return NIL; } ScanKeyInit(&scanKey[0], Anum_pg_dist_partition_colocationid, BTEqualStrategyNumber, F_INT4EQ, ObjectIdGetDatum(colocationId)); pgDistPartition = heap_open(DistPartitionRelationId(), AccessShareLock); tupleDescriptor = RelationGetDescr(pgDistPartition); scanDescriptor = systable_beginscan(pgDistPartition, DistPartitionColocationidIndexId(), indexOK, NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { bool isNull = false; Oid colocatedTableId = heap_getattr(heapTuple, Anum_pg_dist_partition_logicalrelid, tupleDescriptor, &isNull); colocatedTableList = lappend_oid(colocatedTableList, colocatedTableId); heapTuple = systable_getnext(scanDescriptor); } systable_endscan(scanDescriptor); heap_close(pgDistPartition, AccessShareLock); return colocatedTableList; } /* * ColocatedShardIntervalList function returns list of shard intervals which are * co-located with given shard. If given shard is belong to append or range distributed * table, co-location is not valid for that shard. Therefore such shard is only co-located * with itself. */ List * ColocatedShardIntervalList(ShardInterval *shardInterval) { Oid distributedTableId = shardInterval->relationId; List *colocatedShardList = NIL; int shardIntervalIndex = -1; List *colocatedTableList = NIL; ListCell *colocatedTableCell = NULL; DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(distributedTableId); char partitionMethod = cacheEntry->partitionMethod; /* * If distribution type of the table is not hash or reference, each shard of * the shard is only co-located with itself. */ if ((partitionMethod == DISTRIBUTE_BY_APPEND) || (partitionMethod == DISTRIBUTE_BY_RANGE)) { ShardInterval *copyShardInterval = CitusMakeNode(ShardInterval); CopyShardInterval(shardInterval, copyShardInterval); colocatedShardList = lappend(colocatedShardList, copyShardInterval); return colocatedShardList; } shardIntervalIndex = ShardIndex(shardInterval); colocatedTableList = ColocatedTableList(distributedTableId); /* ShardIndex have to find index of given shard */ Assert(shardIntervalIndex >= 0); foreach(colocatedTableCell, colocatedTableList) { Oid colocatedTableId = lfirst_oid(colocatedTableCell); DistTableCacheEntry *colocatedTableCacheEntry = DistributedTableCacheEntry(colocatedTableId); ShardInterval *colocatedShardInterval = NULL; ShardInterval *copyShardInterval = NULL; /* * Since we iterate over co-located tables, shard count of each table should be * same and greater than shardIntervalIndex. */ Assert(cacheEntry->shardIntervalArrayLength == colocatedTableCacheEntry->shardIntervalArrayLength); colocatedShardInterval = colocatedTableCacheEntry->sortedShardIntervalArray[shardIntervalIndex]; copyShardInterval = CitusMakeNode(ShardInterval); CopyShardInterval(colocatedShardInterval, copyShardInterval); colocatedShardList = lappend(colocatedShardList, copyShardInterval); } Assert(list_length(colocatedTableList) == list_length(colocatedShardList)); return colocatedShardList; } /* * ColocatedTableId returns an arbitrary table which belongs to given colocation * group. If there is not such a colocation group, it returns invalid oid. * * This function also takes an AccessShareLock on the co-colocated table to * guarantee that the table isn't dropped for the remainder of the transaction. */ Oid ColocatedTableId(Oid colocationId) { Oid colocatedTableId = InvalidOid; Relation pgDistPartition = NULL; TupleDesc tupleDescriptor = NULL; SysScanDesc scanDescriptor = NULL; HeapTuple heapTuple = NULL; bool indexOK = true; bool isNull = false; ScanKeyData scanKey[1]; int scanKeyCount = 1; /* * We may have a distributed table whose colocation id is INVALID_COLOCATION_ID. * In this case, we do not want to send that table's id as colocated table id. */ if (colocationId == INVALID_COLOCATION_ID) { return colocatedTableId; } ScanKeyInit(&scanKey[0], Anum_pg_dist_partition_colocationid, BTEqualStrategyNumber, F_INT4EQ, ObjectIdGetDatum(colocationId)); /* do not allow any tables to be dropped while we read from pg_dist_partition */ pgDistPartition = heap_open(DistPartitionRelationId(), ShareLock); tupleDescriptor = RelationGetDescr(pgDistPartition); scanDescriptor = systable_beginscan(pgDistPartition, DistPartitionColocationidIndexId(), indexOK, NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(scanDescriptor); if (HeapTupleIsValid(heapTuple)) { colocatedTableId = heap_getattr(heapTuple, Anum_pg_dist_partition_logicalrelid, tupleDescriptor, &isNull); /* make sure the table isn't dropped for the remainder of the transaction */ LockRelationOid(colocatedTableId, AccessShareLock); } systable_endscan(scanDescriptor); heap_close(pgDistPartition, ShareLock); return colocatedTableId; } /* * ColocatedShardIdInRelation returns shardId of the shard from given relation, so that * returned shard is co-located with given shard. */ uint64 ColocatedShardIdInRelation(Oid relationId, int shardIndex) { DistTableCacheEntry *tableCacheEntry = DistributedTableCacheEntry(relationId); return tableCacheEntry->sortedShardIntervalArray[shardIndex]->shardId; } /* * DeleteColocationGroupIfNoTablesBelong function deletes given co-location group if there * is no relation in that co-location group. A co-location group may become empty after * mark_tables_colocated or upgrade_reference_table UDF calls. In that case we need to * remove empty co-location group to prevent orphaned co-location groups. */ void DeleteColocationGroupIfNoTablesBelong(uint32 colocationId) { if (colocationId != INVALID_COLOCATION_ID) { List *colocatedTableList = ColocationGroupTableList(colocationId); int colocatedTableCount = list_length(colocatedTableList); if (colocatedTableCount == 0) { DeleteColocationGroup(colocationId); } } } /* * DeleteColocationGroup deletes the colocation group from pg_dist_colocation. */ static void DeleteColocationGroup(uint32 colocationId) { Relation pgDistColocation = NULL; SysScanDesc scanDescriptor = NULL; int scanKeyCount = 1; ScanKeyData scanKey[scanKeyCount]; bool indexOK = false; HeapTuple heapTuple = NULL; pgDistColocation = heap_open(DistColocationRelationId(), RowExclusiveLock); ScanKeyInit(&scanKey[0], Anum_pg_dist_colocation_colocationid, BTEqualStrategyNumber, F_INT4EQ, UInt32GetDatum(colocationId)); scanDescriptor = systable_beginscan(pgDistColocation, InvalidOid, indexOK, NULL, scanKeyCount, scanKey); /* if a record is found, delete it */ heapTuple = systable_getnext(scanDescriptor); if (HeapTupleIsValid(heapTuple)) { simple_heap_delete(pgDistColocation, &(heapTuple->t_self)); CitusInvalidateRelcacheByRelid(DistColocationRelationId()); CommandCounterIncrement(); } systable_endscan(scanDescriptor); heap_close(pgDistColocation, RowExclusiveLock); } citus-7.0.3/src/backend/distributed/utils/distribution_column.c000066400000000000000000000145011317107136600247560ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * distribution_column.c * * This file contains functions for translating distribution columns in * metadata tables. * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "access/attnum.h" #include "access/heapam.h" #include "access/htup_details.h" #include "distributed/distribution_column.h" #include "distributed/metadata_cache.h" #include "nodes/makefuncs.h" #include "nodes/nodes.h" #include "nodes/primnodes.h" #include "parser/scansup.h" #include "utils/builtins.h" #include "utils/elog.h" #include "utils/errcodes.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/relcache.h" #include "utils/syscache.h" /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(column_name_to_column); PG_FUNCTION_INFO_V1(column_name_to_column_id); PG_FUNCTION_INFO_V1(column_to_column_name); /* * column_name_to_column is an internal UDF to obtain a textual representation * of a particular column node (Var), given a relation identifier and column * name. There is no requirement that the table be distributed; this function * simply returns the textual representation of a Var representing a column. * This function will raise an ERROR if no such column can be found or if the * provided name refers to a system column. */ Datum column_name_to_column(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); text *columnText = PG_GETARG_TEXT_P(1); Relation relation = NULL; char *columnName = text_to_cstring(columnText); Var *column = NULL; char *columnNodeString = NULL; text *columnNodeText = NULL; CheckCitusVersion(ERROR); relation = relation_open(relationId, AccessShareLock); column = BuildDistributionKeyFromColumnName(relation, columnName); columnNodeString = nodeToString(column); columnNodeText = cstring_to_text(columnNodeString); relation_close(relation, AccessShareLock); PG_RETURN_TEXT_P(columnNodeText); } /* * column_name_to_column_id takes a relation identifier and a name of a column * in that relation and returns the index of that column in the relation. If * the provided name is a system column or no column at all, this function will * throw an error instead. */ Datum column_name_to_column_id(PG_FUNCTION_ARGS) { Oid distributedTableId = PG_GETARG_OID(0); char *columnName = PG_GETARG_CSTRING(1); Relation relation = NULL; Var *column = NULL; relation = relation_open(distributedTableId, AccessExclusiveLock); column = BuildDistributionKeyFromColumnName(relation, columnName); relation_close(relation, NoLock); PG_RETURN_INT16((int16) column->varattno); } /* * column_to_column_name is an internal UDF to obtain the human-readable name * of a column given a relation identifier and the column's internal textual * (Var) representation. This function will raise an ERROR if no such column * can be found or if the provided Var refers to a system column. */ Datum column_to_column_name(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); text *columnNodeText = PG_GETARG_TEXT_P(1); char *columnNodeString = text_to_cstring(columnNodeText); char *columnName = NULL; text *columnText = NULL; CheckCitusVersion(ERROR); columnName = ColumnNameToColumn(relationId, columnNodeString); columnText = cstring_to_text(columnName); PG_RETURN_TEXT_P(columnText); } /* * BuildDistributionKeyFromColumnName builds a simple distribution key consisting * only out of a reference to the column of name columnName. Errors out if the * specified column does not exist or is not suitable to be used as a * distribution column. * * The function returns NULL if the passed column name is NULL. That case only * corresponds to reference tables. */ Var * BuildDistributionKeyFromColumnName(Relation distributedRelation, char *columnName) { HeapTuple columnTuple = NULL; Form_pg_attribute columnForm = NULL; Var *distributionColumn = NULL; char *tableName = RelationGetRelationName(distributedRelation); /* short circuit for reference tables */ if (columnName == NULL) { return NULL; } /* it'd probably better to downcase identifiers consistent with SQL case folding */ truncate_identifier(columnName, strlen(columnName), true); /* lookup column definition */ columnTuple = SearchSysCacheAttName(RelationGetRelid(distributedRelation), columnName); if (!HeapTupleIsValid(columnTuple)) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), errmsg("column \"%s\" of relation \"%s\" does not exist", columnName, tableName))); } columnForm = (Form_pg_attribute) GETSTRUCT(columnTuple); /* check if the column may be referenced in the distribution key */ if (columnForm->attnum <= 0) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot reference system column \"%s\" in relation \"%s\"", columnName, tableName))); } /* build Var referencing only the chosen distribution column */ distributionColumn = makeVar(1, columnForm->attnum, columnForm->atttypid, columnForm->atttypmod, columnForm->attcollation, 0); ReleaseSysCache(columnTuple); return distributionColumn; } /* * ColumnNameToColumn returns the human-readable name of a column given a * relation identifier and the column's internal textual (Var) representation. * This function will raise an ERROR if no such column can be found or if the * provided Var refers to a system column. */ char * ColumnNameToColumn(Oid relationId, char *columnNodeString) { Node *columnNode = NULL; Var *column = NULL; AttrNumber columnNumber = InvalidAttrNumber; char *columnName = NULL; columnNode = stringToNode(columnNodeString); Assert(IsA(columnNode, Var)); column = (Var *) columnNode; columnNumber = column->varattno; if (!AttrNumberIsForUserDefinedAttr(columnNumber)) { char *relationName = get_rel_name(relationId); ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), errmsg("attribute %d of relation \"%s\" is a system column", columnNumber, relationName))); } columnName = get_attname(relationId, column->varattno); if (columnName == NULL) { char *relationName = get_rel_name(relationId); ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), errmsg("attribute %d of relation \"%s\" does not exist", columnNumber, relationName))); } return columnName; } citus-7.0.3/src/backend/distributed/utils/errormessage.c000066400000000000000000000026651317107136600233700ustar00rootroot00000000000000/* * errormessage.c * Error handling related support functionality. * * Copyright (c) 2017, Citus Data, Inc. */ #include "postgres.h" #include "utils/memutils.h" #include "distributed/citus_nodes.h" #include "distributed/errormessage.h" /* * DeferredErrorInternal is a helper function for DeferredError(). */ DeferredErrorMessage * DeferredErrorInternal(int code, const char *message, const char *detail, const char *hint, const char *filename, int linenumber, const char *functionname) { DeferredErrorMessage *error = CitusMakeNode(DeferredErrorMessage); error->code = code; error->message = message; error->detail = detail; error->hint = hint; error->filename = filename; error->linenumber = linenumber; error->functionname = functionname; return error; } /* * RaiseDeferredErrorInternal is a helper function for RaiseDeferredError(). */ void RaiseDeferredErrorInternal(DeferredErrorMessage *error, int elevel) { ErrorData *errorData = palloc0(sizeof(ErrorData)); errorData->sqlerrcode = error->code; errorData->elevel = elevel; errorData->message = pstrdup(error->message); if (error->detail) { errorData->detail = pstrdup(error->detail); } if (error->hint) { errorData->hint = pstrdup(error->hint); } errorData->filename = pstrdup(error->filename); errorData->lineno = error->linenumber; errorData->funcname = error->functionname; errorData->assoc_context = ErrorContext; ThrowErrorData(errorData); } citus-7.0.3/src/backend/distributed/utils/hash_helpers.c000066400000000000000000000012561317107136600233320ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * hash_helpers.c * Helpers for dynahash.c style hash tables. * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "distributed/hash_helpers.h" #include "utils/hsearch.h" /* * Empty a hash, without destroying the hash table itself. */ void hash_delete_all(HTAB *htab) { HASH_SEQ_STATUS status; void *entry = NULL; hash_seq_init(&status, htab); while ((entry = hash_seq_search(&status)) != 0) { bool found = false; hash_search(htab, entry, HASH_REMOVE, &found); Assert(found); } } citus-7.0.3/src/backend/distributed/utils/listutils.c000066400000000000000000000055161317107136600227240ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * listutils.c * * This file contains functions to perform useful operations on lists. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "c.h" #include "port.h" #include "utils/lsyscache.h" #include "distributed/listutils.h" #include "nodes/pg_list.h" #include "utils/memutils.h" /* * SortList takes in a list of void pointers, and sorts these pointers (and the * values they point to) by applying the given comparison function. The function * then returns the sorted list of pointers. * * Because the input list is a list of pointers, and because qsort expects to * compare pointers to the list elements, the provided comparison function must * compare pointers to pointers to elements. In addition, this sort function * naturally exhibits the same lack of stability exhibited by qsort. See that * function's man page for more details. */ List * SortList(List *pointerList, int (*comparisonFunction)(const void *, const void *)) { List *sortedList = NIL; uint32 arrayIndex = 0; uint32 arraySize = (uint32) list_length(pointerList); void **array = (void **) palloc0(arraySize * sizeof(void *)); ListCell *pointerCell = NULL; foreach(pointerCell, pointerList) { void *pointer = lfirst(pointerCell); array[arrayIndex] = pointer; arrayIndex++; } /* sort the array of pointers using the comparison function */ qsort(array, arraySize, sizeof(void *), comparisonFunction); /* convert the sorted array of pointers back to a sorted list */ for (arrayIndex = 0; arrayIndex < arraySize; arrayIndex++) { void *sortedPointer = array[arrayIndex]; sortedList = lappend(sortedList, sortedPointer); } pfree(array); return sortedList; } /* * PointerArrayFromList converts a list of pointers to an array of pointers. */ void ** PointerArrayFromList(List *pointerList) { int pointerCount = list_length(pointerList); void **pointerArray = (void **) palloc0(pointerCount * sizeof(void *)); ListCell *pointerCell = NULL; int pointerIndex = 0; foreach(pointerCell, pointerList) { pointerArray[pointerIndex] = (void *) lfirst(pointerCell); pointerIndex += 1; } return pointerArray; } /* * DatumArrayToArrayType converts the provided Datum array (of the specified * length and type) into an ArrayType suitable for returning from a UDF. */ ArrayType * DatumArrayToArrayType(Datum *datumArray, int datumCount, Oid datumTypeId) { ArrayType *arrayObject = NULL; int16 typeLength = 0; bool typeByValue = false; char typeAlignment = 0; get_typlenbyvalalign(datumTypeId, &typeLength, &typeByValue, &typeAlignment); arrayObject = construct_array(datumArray, datumCount, datumTypeId, typeLength, typeByValue, typeAlignment); return arrayObject; } citus-7.0.3/src/backend/distributed/utils/maintenanced.c000066400000000000000000000361061317107136600233150ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * maintenanced.c * Background worker run for each citus using database in a postgres * cluster. * * This file provides infrastructure for launching exactly one a background * worker for every database in which citus is used. That background worker * can then perform work like deadlock detection, prepared transaction * recovery, and cleanup. * * Copyright (c) 2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "pgstat.h" #include "access/xact.h" #include "catalog/pg_extension.h" #include "commands/extension.h" #include "libpq/pqsignal.h" #include "catalog/namespace.h" #include "distributed/distributed_deadlock_detection.h" #include "distributed/maintenanced.h" #include "distributed/metadata_cache.h" #include "nodes/makefuncs.h" #include "postmaster/bgworker.h" #include "storage/ipc.h" #include "storage/proc.h" #include "storage/latch.h" #include "storage/lmgr.h" #include "storage/lwlock.h" #include "tcop/tcopprot.h" /* * Shared memory data for all maintenance workers. */ typedef struct MaintenanceDaemonControlData { /* * Lock protecting the shared memory state. This is to be taken when * looking up (shared mode) or inserting (exclusive mode) per-database * data in dbHash. */ int trancheId; #if (PG_VERSION_NUM >= 100000) char *lockTrancheName; #else LWLockTranche lockTranche; #endif LWLock lock; /* * Hash-table of workers, one entry for each database with citus * activated. */ HTAB *dbHash; } MaintenanceDaemonControlData; /* * Per database worker state. */ typedef struct MaintenanceDaemonDBData { /* hash key: database to run on */ Oid databaseOid; /* information: which user to use */ Oid userOid; bool daemonStarted; pid_t workerPid; Latch *latch; /* pointer to the background worker's latch */ } MaintenanceDaemonDBData; /* config variable for distributed deadlock detection timeout */ double DistributedDeadlockDetectionTimeoutFactor = 2.0; static shmem_startup_hook_type prev_shmem_startup_hook = NULL; static MaintenanceDaemonControlData *MaintenanceDaemonControl = NULL; static volatile sig_atomic_t got_SIGHUP = false; static void MaintenanceDaemonSigHupHandler(SIGNAL_ARGS); static size_t MaintenanceDaemonShmemSize(void); static void MaintenanceDaemonShmemInit(void); static void MaintenanceDaemonErrorContext(void *arg); static bool LockCitusExtension(void); /* * InitializeMaintenanceDaemon, called at server start, is responsible for * requesting shared memory and related infrastructure required by maintenance * daemons. */ void InitializeMaintenanceDaemon(void) { RequestAddinShmemSpace(MaintenanceDaemonShmemSize()); prev_shmem_startup_hook = shmem_startup_hook; shmem_startup_hook = MaintenanceDaemonShmemInit; } /* * InitializeMaintenanceDaemonBackend, called at backend start and * configuration changes, is responsible for starting a per-database * maintenance worker if necessary. */ void InitializeMaintenanceDaemonBackend(void) { MaintenanceDaemonDBData *dbData = NULL; Oid extensionOwner = CitusExtensionOwner(); bool found; LWLockAcquire(&MaintenanceDaemonControl->lock, LW_EXCLUSIVE); dbData = (MaintenanceDaemonDBData *) hash_search(MaintenanceDaemonControl->dbHash, &MyDatabaseId, HASH_ENTER_NULL, &found); if (dbData == NULL) { /* FIXME: better message, reference relevant guc in hint */ ereport(ERROR, (errmsg("ran out of database slots"))); } if (!found || !dbData->daemonStarted) { BackgroundWorker worker; BackgroundWorkerHandle *handle = NULL; int pid = 0; dbData->userOid = extensionOwner; memset(&worker, 0, sizeof(worker)); snprintf(worker.bgw_name, BGW_MAXLEN, "Citus Maintenance Daemon: %u/%u", MyDatabaseId, extensionOwner); /* request ability to connect to target database */ worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; /* * No point in getting started before able to run query, but we do * want to get started on Hot-Stanby standbys. */ worker.bgw_start_time = BgWorkerStart_ConsistentState; /* * Restart after a bit after errors, but don't bog the system. */ worker.bgw_restart_time = 5; sprintf(worker.bgw_library_name, "citus"); sprintf(worker.bgw_function_name, "CitusMaintenanceDaemonMain"); worker.bgw_main_arg = ObjectIdGetDatum(MyDatabaseId); memcpy(worker.bgw_extra, &extensionOwner, sizeof(Oid)); worker.bgw_notify_pid = MyProcPid; if (!RegisterDynamicBackgroundWorker(&worker, &handle)) { ereport(ERROR, (errmsg("could not start maintenance background worker"), errhint("Increasing max_worker_processes might help."))); } dbData->daemonStarted = true; dbData->workerPid = 0; LWLockRelease(&MaintenanceDaemonControl->lock); WaitForBackgroundWorkerStartup(handle, &pid); } else { Assert(dbData->daemonStarted); /* * If owner of extension changed, wake up daemon. It'll notice and * restart. */ if (dbData->userOid != extensionOwner) { dbData->userOid = extensionOwner; if (dbData->latch) { SetLatch(dbData->latch); } } LWLockRelease(&MaintenanceDaemonControl->lock); } } /* * CitusMaintenanceDaemonMain is the maintenance daemon's main routine, it'll * be started by the background worker infrastructure. If it errors out, * it'll be restarted after a few seconds. */ void CitusMaintenanceDaemonMain(Datum main_arg) { Oid databaseOid = DatumGetObjectId(main_arg); MaintenanceDaemonDBData *myDbData = NULL; ErrorContextCallback errorCallback; /* * Look up this worker's configuration. */ LWLockAcquire(&MaintenanceDaemonControl->lock, LW_SHARED); myDbData = (MaintenanceDaemonDBData *) hash_search(MaintenanceDaemonControl->dbHash, &databaseOid, HASH_FIND, NULL); if (!myDbData) { /* * When the database crashes, background workers are restarted, but * the state in shared memory is lost. In that case, we exit and * wait for a session to call InitializeMaintenanceDaemonBackend * to properly add it to the hash. */ proc_exit(0); } /* from this point, DROP DATABASE will attempt to kill the worker */ myDbData->workerPid = MyProcPid; /* wire up signals */ pqsignal(SIGTERM, die); pqsignal(SIGHUP, MaintenanceDaemonSigHupHandler); BackgroundWorkerUnblockSignals(); myDbData->latch = MyLatch; LWLockRelease(&MaintenanceDaemonControl->lock); /* * Setup error context so log messages can be properly attributed. Some of * them otherwise sound like they might be from a normal user connection. * Do so before setting up signals etc, so we never exit without the * context setup. */ memset(&errorCallback, 0, sizeof(errorCallback)); errorCallback.callback = MaintenanceDaemonErrorContext; errorCallback.arg = (void *) myDbData; errorCallback.previous = error_context_stack; error_context_stack = &errorCallback; elog(LOG, "starting maintenance daemon on database %u user %u", databaseOid, myDbData->userOid); /* connect to database, after that we can actually access catalogs */ BackgroundWorkerInitializeConnectionByOid(databaseOid, myDbData->userOid); /* make worker recognizable in pg_stat_activity */ pgstat_report_appname("Citus Maintenance Daemon"); /* enter main loop */ for (;;) { int rc; int latchFlags = WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH; double timeout = 10000.0; /* use this if the deadlock detection is disabled */ bool foundDeadlock = false; CHECK_FOR_INTERRUPTS(); /* * XXX: We clear the metadata cache before every iteration because otherwise * it might contain stale OIDs. It appears that in some cases invalidation * messages for a DROP EXTENSION may arrive during deadlock detection and * this causes us to cache a stale pg_dist_node OID. We'd actually expect * all invalidations to arrive after obtaining a lock in LockCitusExtension. */ InvalidateMetadataSystemCache(); /* * Perform Work. If a specific task needs to be called sooner than * timeout indicates, it's ok to lower it to that value. Expensive * tasks should do their own time math about whether to re-run checks. */ /* the config value -1 disables the distributed deadlock detection */ if (DistributedDeadlockDetectionTimeoutFactor != -1.0) { StartTransactionCommand(); /* * We skip the deadlock detection if citus extension * is not accessible. * * Similarly, we skip to run the deadlock checks if * there exists any version mismatch or the extension * is not fully created yet. */ if (!LockCitusExtension()) { ereport(DEBUG1, (errmsg("could not lock the citus extension, " "skipping deadlock detection"))); } else if (CheckCitusVersion(DEBUG1) && CitusHasBeenLoaded()) { foundDeadlock = CheckForDistributedDeadlocks(); } CommitTransactionCommand(); /* * If we find any deadlocks, run the distributed deadlock detection * more often since it is quite possible that there are other * deadlocks need to be resolved. * * Thus, we use 1/20 of the calculated value. With the default * values (i.e., deadlock_timeout 1 seconds, * citus.distributed_deadlock_detection_factor 2), we'd be able to cancel * ~10 distributed deadlocks per second. */ timeout = DistributedDeadlockDetectionTimeoutFactor * (double) DeadlockTimeout; if (foundDeadlock) { timeout = timeout / 20.0; } } /* * Wait until timeout, or until somebody wakes us up. Also cast the timeout to * integer where we've calculated it using double for not losing the precision. */ #if (PG_VERSION_NUM >= 100000) rc = WaitLatch(MyLatch, latchFlags, (long) timeout, PG_WAIT_EXTENSION); #else rc = WaitLatch(MyLatch, latchFlags, (long) timeout); #endif /* emergency bailout if postmaster has died */ if (rc & WL_POSTMASTER_DEATH) { proc_exit(1); } if (rc & WL_LATCH_SET) { ResetLatch(MyLatch); CHECK_FOR_INTERRUPTS(); /* check for changed configuration */ if (myDbData->userOid != GetSessionUserId()) { /* return code of 1 requests worker restart */ proc_exit(1); } /* * Could also add code checking whether extension still exists, * but that'd complicate things a bit, because we'd have to delete * the shared memory entry. There'd potentially be a race * condition where the extension gets re-created, checking that * this entry still exists, and it getting deleted just after. * Doesn't seem worth catering for that. */ } if (got_SIGHUP) { got_SIGHUP = false; ProcessConfigFile(PGC_SIGHUP); } } } /* * MaintenanceDaemonShmemSize computes how much shared memory is required. */ static size_t MaintenanceDaemonShmemSize(void) { Size size = 0; Size hashSize = 0; size = add_size(size, sizeof(MaintenanceDaemonControlData)); /* * We request enough shared memory to have one hash-table entry for each * worker process. We couldn't start more anyway, so there's little point * in allocating more. */ hashSize = hash_estimate_size(max_worker_processes, sizeof(MaintenanceDaemonDBData)); size = add_size(size, hashSize); return size; } /* * MaintenanceDaemonShmemInit initializes the requested shared memory for the * maintenance daemon. */ static void MaintenanceDaemonShmemInit(void) { bool alreadyInitialized = false; HASHCTL hashInfo; int hashFlags = 0; LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); MaintenanceDaemonControl = (MaintenanceDaemonControlData *) ShmemInitStruct("Citus Maintenance Daemon", MaintenanceDaemonShmemSize(), &alreadyInitialized); /* * Might already be initialized on EXEC_BACKEND type platforms that call * shared library initialization functions in every backend. */ if (!alreadyInitialized) { #if (PG_VERSION_NUM >= 100000) MaintenanceDaemonControl->trancheId = LWLockNewTrancheId(); MaintenanceDaemonControl->lockTrancheName = "Citus Maintenance Daemon"; LWLockRegisterTranche(MaintenanceDaemonControl->trancheId, MaintenanceDaemonControl->lockTrancheName); #else /* initialize lwlock */ LWLockTranche *tranche = &MaintenanceDaemonControl->lockTranche; /* start by zeroing out all the memory */ memset(MaintenanceDaemonControl, 0, MaintenanceDaemonShmemSize()); /* initialize lock */ MaintenanceDaemonControl->trancheId = LWLockNewTrancheId(); tranche->array_base = &MaintenanceDaemonControl->lock; tranche->array_stride = sizeof(LWLock); tranche->name = "Citus Maintenance Daemon"; LWLockRegisterTranche(MaintenanceDaemonControl->trancheId, tranche); #endif LWLockInitialize(&MaintenanceDaemonControl->lock, MaintenanceDaemonControl->trancheId); } memset(&hashInfo, 0, sizeof(hashInfo)); hashInfo.keysize = sizeof(Oid); hashInfo.entrysize = sizeof(MaintenanceDaemonDBData); hashInfo.hash = tag_hash; hashFlags = (HASH_ELEM | HASH_FUNCTION); MaintenanceDaemonControl->dbHash = ShmemInitHash("Maintenance Database Hash", max_worker_processes, max_worker_processes, &hashInfo, hashFlags); LWLockRelease(AddinShmemInitLock); if (prev_shmem_startup_hook != NULL) { prev_shmem_startup_hook(); } } /* * MaintenanceDaemonSigHupHandler set a flag to re-read config file at next * convenient time. */ static void MaintenanceDaemonSigHupHandler(SIGNAL_ARGS) { int save_errno = errno; got_SIGHUP = true; if (MyProc != NULL) { SetLatch(&MyProc->procLatch); } errno = save_errno; } /* * MaintenanceDaemonErrorContext adds some context to log messages to make it * easier to associate them with the maintenance daemon. */ static void MaintenanceDaemonErrorContext(void *arg) { MaintenanceDaemonDBData *myDbData = (MaintenanceDaemonDBData *) arg; errcontext("Citus maintenance daemon for database %u user %u", myDbData->databaseOid, myDbData->userOid); } /* * LockCitusExtension acquires a lock on the Citus extension or returns * false if the extension does not exist or is being dropped. */ static bool LockCitusExtension(void) { Oid recheckExtensionOid = InvalidOid; Oid extensionOid = get_extension_oid("citus", true); if (extensionOid == InvalidOid) { /* citus extension does not exist */ return false; } LockDatabaseObject(ExtensionRelationId, extensionOid, 0, AccessShareLock); /* * The extension may have been dropped and possibly recreated prior to * obtaining a lock. Check whether we still get the expected OID. */ recheckExtensionOid = get_extension_oid("citus", true); if (recheckExtensionOid != extensionOid) { return false; } return true; } /* * StopMaintenanceDaemon stops the maintenance daemon for the * given database and removes it from the maintenance daemon * control hash. */ void StopMaintenanceDaemon(Oid databaseId) { bool found = false; MaintenanceDaemonDBData *dbData = NULL; pid_t workerPid = 0; LWLockAcquire(&MaintenanceDaemonControl->lock, LW_EXCLUSIVE); dbData = (MaintenanceDaemonDBData *) hash_search(MaintenanceDaemonControl->dbHash, &databaseId, HASH_REMOVE, &found); if (found) { workerPid = dbData->workerPid; } LWLockRelease(&MaintenanceDaemonControl->lock); if (workerPid > 0) { kill(workerPid, SIGTERM); } } citus-7.0.3/src/backend/distributed/utils/metadata_cache.c000066400000000000000000002545171317107136600236020ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * metadata_cache.c * Distributed table metadata cache * * Copyright (c) 2012-2016, Citus Data, Inc. *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "access/genam.h" #include "access/heapam.h" #include "access/htup_details.h" #include "access/nbtree.h" #include "access/xact.h" #include "access/sysattr.h" #include "catalog/indexing.h" #include "catalog/pg_am.h" #include "catalog/pg_extension.h" #include "catalog/pg_namespace.h" #include "catalog/pg_type.h" #include "citus_version.h" #include "commands/extension.h" #include "commands/trigger.h" #include "distributed/colocation_utils.h" #include "distributed/citus_ruleutils.h" #include "distributed/master_metadata_utility.h" #include "distributed/metadata_cache.h" #include "distributed/multi_logical_optimizer.h" #include "distributed/pg_dist_local_group.h" #include "distributed/pg_dist_node.h" #include "distributed/pg_dist_partition.h" #include "distributed/pg_dist_shard.h" #include "distributed/pg_dist_placement.h" #include "distributed/shared_library_init.h" #include "distributed/shardinterval_utils.h" #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" #include "executor/executor.h" #include "nodes/makefuncs.h" #include "parser/parse_func.h" #include "parser/parse_type.h" #include "storage/lmgr.h" #include "utils/builtins.h" #include "utils/catcache.h" #include "utils/datum.h" #include "utils/hsearch.h" #include "utils/inval.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/relfilenodemap.h" #include "utils/relmapper.h" #include "utils/syscache.h" #include "utils/typcache.h" /* user configuration */ int ReadFromSecondaries = USE_SECONDARY_NODES_NEVER; /* * ShardCacheEntry represents an entry in the shardId -> ShardInterval cache. * To avoid duplicating data and invalidation logic between this cache and the * DistTableCache, this only points into the DistTableCacheEntry of the * shard's distributed table. */ typedef struct ShardCacheEntry { /* hash key, needs to be first */ int64 shardId; /* * Cache entry for the distributed table a shard belongs to, possibly not * valid. */ DistTableCacheEntry *tableEntry; /* * Offset in tableEntry->sortedShardIntervalArray, only valid if * tableEntry->isValid. We don't store pointers to the individual shard * placements because that'd make invalidation a bit more complicated, and * because there's simply no need. */ int shardIndex; } ShardCacheEntry; /* * State which should be cleared upon DROP EXTENSION. When the configuration * changes, e.g. because the extension is dropped, these summarily get set to * 0. */ typedef struct MetadataCacheData { bool extensionLoaded; Oid distShardRelationId; Oid distPlacementRelationId; Oid distNodeRelationId; Oid distLocalGroupRelationId; Oid distColocationRelationId; Oid distColocationConfigurationIndexId; Oid distColocationColocationidIndexId; Oid distPartitionRelationId; Oid distPartitionLogicalRelidIndexId; Oid distPartitionColocationidIndexId; Oid distShardLogicalRelidIndexId; Oid distShardShardidIndexId; Oid distPlacementShardidIndexId; Oid distPlacementPlacementidIndexId; Oid distPlacementGroupidIndexId; Oid distTransactionRelationId; Oid distTransactionGroupIndexId; Oid distTransactionRecordIndexId; Oid extraDataContainerFuncId; Oid workerHashFunctionId; Oid extensionOwner; Oid primaryNodeRoleId; Oid secondaryNodeRoleId; Oid unavailableNodeRoleId; } MetadataCacheData; static MetadataCacheData MetadataCache; /* Citus extension version variables */ bool EnableVersionChecks = true; /* version checks are enabled */ static bool citusVersionKnownCompatible = false; /* Hash table for informations about each partition */ static HTAB *DistTableCacheHash = NULL; /* Hash table for informations about each shard */ static HTAB *DistShardCacheHash = NULL; /* Hash table for informations about worker nodes */ static HTAB *WorkerNodeHash = NULL; static bool workerNodeHashValid = false; /* default value is -1, for coordinator it's 0 and for worker nodes > 0 */ static int LocalGroupId = -1; /* built first time through in InitializePartitionCache */ static ScanKeyData DistPartitionScanKey[1]; static ScanKeyData DistShardScanKey[1]; /* local function forward declarations */ static bool IsDistributedTableViaCatalog(Oid relationId); static ShardCacheEntry * LookupShardCacheEntry(int64 shardId); static DistTableCacheEntry * LookupDistTableCacheEntry(Oid relationId); static void BuildDistTableCacheEntry(DistTableCacheEntry *cacheEntry); static void BuildCachedShardList(DistTableCacheEntry *cacheEntry); static ShardInterval ** SortShardIntervalArray(ShardInterval **shardIntervalArray, int shardCount, FmgrInfo * shardIntervalSortCompareFunction); static bool HasUniformHashDistribution(ShardInterval **shardIntervalArray, int shardIntervalArrayLength); static bool HasUninitializedShardInterval(ShardInterval **sortedShardIntervalArray, int shardCount); static bool CheckInstalledVersion(int elevel); static char * AvailableExtensionVersion(void); static char * InstalledExtensionVersion(void); static bool HasOverlappingShardInterval(ShardInterval **shardIntervalArray, int shardIntervalArrayLength, FmgrInfo *shardIntervalSortCompareFunction); static void InitializeCaches(void); static void InitializeDistTableCache(void); static void InitializeWorkerNodeCache(void); static void RegisterWorkerNodeCacheCallbacks(void); static void RegisterLocalGroupIdCacheCallbacks(void); static uint32 WorkerNodeHashCode(const void *key, Size keySize); static void ResetDistTableCacheEntry(DistTableCacheEntry *cacheEntry); static void InvalidateDistRelationCacheCallback(Datum argument, Oid relationId); static void InvalidateNodeRelationCacheCallback(Datum argument, Oid relationId); static void InvalidateLocalGroupIdRelationCacheCallback(Datum argument, Oid relationId); static HeapTuple LookupDistPartitionTuple(Relation pgDistPartition, Oid relationId); static List * LookupDistShardTuples(Oid relationId); static Oid LookupShardRelation(int64 shardId); static void GetPartitionTypeInputInfo(char *partitionKeyString, char partitionMethod, Oid *columnTypeId, int32 *columnTypeMod, Oid *intervalTypeId, int32 *intervalTypeMod); static ShardInterval * TupleToShardInterval(HeapTuple heapTuple, TupleDesc tupleDescriptor, Oid intervalTypeId, int32 intervalTypeMod); static void CachedRelationLookup(const char *relationName, Oid *cachedOid); static ShardPlacement * ResolveGroupShardPlacement( GroupShardPlacement *groupShardPlacement, ShardCacheEntry *shardEntry); static WorkerNode * LookupNodeForGroup(uint32 groupid); /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(master_dist_partition_cache_invalidate); PG_FUNCTION_INFO_V1(master_dist_shard_cache_invalidate); PG_FUNCTION_INFO_V1(master_dist_placement_cache_invalidate); PG_FUNCTION_INFO_V1(master_dist_node_cache_invalidate); PG_FUNCTION_INFO_V1(master_dist_local_group_cache_invalidate); /* * EnsureModificationsCanRun checks if the current node is in recovery mode or * citus.use_secondary_nodes is 'alwaus'. If either is true the function errors out. */ void EnsureModificationsCanRun(void) { if (RecoveryInProgress()) { ereport(ERROR, (errmsg("writing to worker nodes is not currently allowed"), errdetail("the database is in recovery mode"))); } if (ReadFromSecondaries == USE_SECONDARY_NODES_ALWAYS) { ereport(ERROR, (errmsg("writing to worker nodes is not currently allowed"), errdetail("citus.use_secondary_nodes is set to 'always'"))); } } /* * IsDistributedTable returns whether relationId is a distributed relation or * not. */ bool IsDistributedTable(Oid relationId) { DistTableCacheEntry *cacheEntry = NULL; cacheEntry = LookupDistTableCacheEntry(relationId); /* * If extension hasn't been created, or has the wrong version and the * table isn't a distributed one, LookupDistTableCacheEntry() will return NULL. */ if (!cacheEntry) { return false; } return cacheEntry->isDistributedTable; } /* * IsDistributedTableViaCatalog returns whether the given relation is a * distributed table or not. * * It does so by searching pg_dist_partition, explicitly bypassing caches, * because this function is designed to be used in cases where accessing * metadata tables is not safe. * * NB: Currently this still hardcodes pg_dist_partition logicalrelid column * offset and the corresponding index. If we ever come close to changing * that, we'll have to work a bit harder. */ static bool IsDistributedTableViaCatalog(Oid relationId) { HeapTuple partitionTuple = NULL; SysScanDesc scanDescriptor = NULL; const int scanKeyCount = 1; ScanKeyData scanKey[scanKeyCount]; bool indexOK = true; Relation pgDistPartition = heap_open(DistPartitionRelationId(), AccessShareLock); ScanKeyInit(&scanKey[0], Anum_pg_dist_partition_logicalrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relationId)); scanDescriptor = systable_beginscan(pgDistPartition, DistPartitionLogicalRelidIndexId(), indexOK, NULL, scanKeyCount, scanKey); partitionTuple = systable_getnext(scanDescriptor); systable_endscan(scanDescriptor); heap_close(pgDistPartition, AccessShareLock); return HeapTupleIsValid(partitionTuple); } /* * DistributedTableList returns a list that includes all the valid distributed table * cache entries. */ List * DistributedTableList(void) { List *distTableOidList = NIL; List *distributedTableList = NIL; ListCell *distTableOidCell = NULL; Assert(CitusHasBeenLoaded() && CheckCitusVersion(WARNING)); /* first, we need to iterate over pg_dist_partition */ distTableOidList = DistTableOidList(); foreach(distTableOidCell, distTableOidList) { DistTableCacheEntry *cacheEntry = NULL; Oid relationId = lfirst_oid(distTableOidCell); cacheEntry = DistributedTableCacheEntry(relationId); distributedTableList = lappend(distributedTableList, cacheEntry); } return distributedTableList; } /* * LoadShardInterval returns the, cached, metadata about a shard. * * The return value is a copy of the cached ShardInterval struct and may * therefore be modified and/or freed. */ ShardInterval * LoadShardInterval(uint64 shardId) { ShardInterval *shardInterval = NULL; ShardInterval *sourceShardInterval = NULL; ShardCacheEntry *shardEntry = NULL; DistTableCacheEntry *tableEntry = NULL; shardEntry = LookupShardCacheEntry(shardId); tableEntry = shardEntry->tableEntry; Assert(tableEntry->isDistributedTable); /* the offset better be in a valid range */ Assert(shardEntry->shardIndex < tableEntry->shardIntervalArrayLength); sourceShardInterval = tableEntry->sortedShardIntervalArray[shardEntry->shardIndex]; /* copy value to return */ shardInterval = (ShardInterval *) palloc0(sizeof(ShardInterval)); CopyShardInterval(sourceShardInterval, shardInterval); return shardInterval; } /* * LoadGroupShardPlacement returns the cached shard placement metadata * * The return value is a copy of the cached GroupShardPlacement struct and may * therefore be modified and/or freed. */ GroupShardPlacement * LoadGroupShardPlacement(uint64 shardId, uint64 placementId) { ShardCacheEntry *shardEntry = NULL; DistTableCacheEntry *tableEntry = NULL; GroupShardPlacement *placementArray = NULL; int numberOfPlacements = 0; int i = 0; shardEntry = LookupShardCacheEntry(shardId); tableEntry = shardEntry->tableEntry; /* the offset better be in a valid range */ Assert(shardEntry->shardIndex < tableEntry->shardIntervalArrayLength); placementArray = tableEntry->arrayOfPlacementArrays[shardEntry->shardIndex]; numberOfPlacements = tableEntry->arrayOfPlacementArrayLengths[shardEntry->shardIndex]; for (i = 0; i < numberOfPlacements; i++) { if (placementArray[i].placementId == placementId) { GroupShardPlacement *shardPlacement = CitusMakeNode(GroupShardPlacement); memcpy(shardPlacement, &placementArray[i], sizeof(GroupShardPlacement)); return shardPlacement; } } ereport(ERROR, (errmsg("could not find valid entry for shard placement " UINT64_FORMAT, placementId))); } /* * LoadShardPlacement returns a shard placement for the primary node. */ ShardPlacement * LoadShardPlacement(uint64 shardId, uint64 placementId) { ShardCacheEntry *shardEntry = NULL; GroupShardPlacement *groupPlacement = NULL; ShardPlacement *nodePlacement = NULL; shardEntry = LookupShardCacheEntry(shardId); groupPlacement = LoadGroupShardPlacement(shardId, placementId); nodePlacement = ResolveGroupShardPlacement(groupPlacement, shardEntry); return nodePlacement; } /* * FindShardPlacementOnGroup returns the shard placement for the given shard * on the given group, or returns NULL of no placement for the shard exists * on the group. */ ShardPlacement * FindShardPlacementOnGroup(uint32 groupId, uint64 shardId) { ShardCacheEntry *shardEntry = NULL; DistTableCacheEntry *tableEntry = NULL; GroupShardPlacement *placementArray = NULL; int numberOfPlacements = 0; ShardPlacement *placementOnNode = NULL; int placementIndex = 0; shardEntry = LookupShardCacheEntry(shardId); tableEntry = shardEntry->tableEntry; placementArray = tableEntry->arrayOfPlacementArrays[shardEntry->shardIndex]; numberOfPlacements = tableEntry->arrayOfPlacementArrayLengths[shardEntry->shardIndex]; for (placementIndex = 0; placementIndex < numberOfPlacements; placementIndex++) { GroupShardPlacement *placement = &placementArray[placementIndex]; if (placement->groupId == groupId) { placementOnNode = ResolveGroupShardPlacement(placement, shardEntry); break; } } return placementOnNode; } /* * ResolveGroupShardPlacement takes a GroupShardPlacement and adds additional data to it, * such as the node we should consider it to be on. */ static ShardPlacement * ResolveGroupShardPlacement(GroupShardPlacement *groupShardPlacement, ShardCacheEntry *shardEntry) { DistTableCacheEntry *tableEntry = shardEntry->tableEntry; int shardIndex = shardEntry->shardIndex; ShardInterval *shardInterval = tableEntry->sortedShardIntervalArray[shardIndex]; ShardPlacement *shardPlacement = CitusMakeNode(ShardPlacement); uint32 groupId = groupShardPlacement->groupId; WorkerNode *workerNode = LookupNodeForGroup(groupId); /* copy everything into shardPlacement but preserve the header */ memcpy((((CitusNode *) shardPlacement) + 1), (((CitusNode *) groupShardPlacement) + 1), sizeof(GroupShardPlacement) - sizeof(CitusNode)); shardPlacement->nodeName = pstrdup(workerNode->workerName); shardPlacement->nodePort = workerNode->workerPort; /* fill in remaining fields */ Assert(tableEntry->partitionMethod != 0); shardPlacement->partitionMethod = tableEntry->partitionMethod; shardPlacement->colocationGroupId = tableEntry->colocationId; if (tableEntry->partitionMethod == DISTRIBUTE_BY_HASH) { Assert(shardInterval->minValueExists); Assert(shardInterval->valueTypeId == INT4OID); /* * Use the lower boundary of the interval's range to identify * it for colocation purposes. That remains meaningful even if * a concurrent session splits a shard. */ shardPlacement->representativeValue = DatumGetInt32(shardInterval->minValue); } else { shardPlacement->representativeValue = 0; } return shardPlacement; } /* * LookupNodeForGroup searches the WorkerNodeHash for a worker which is a member of the * given group and also readable (a primary if we're reading from primaries, a secondary * if we're reading from secondaries). If such a node does not exist it emits an * appropriate error message. */ static WorkerNode * LookupNodeForGroup(uint32 groupId) { WorkerNode *workerNode = NULL; HASH_SEQ_STATUS status; HTAB *workerNodeHash = GetWorkerNodeHash(); bool foundAnyNodes = false; hash_seq_init(&status, workerNodeHash); while ((workerNode = hash_seq_search(&status)) != NULL) { uint32 workerNodeGroupId = workerNode->groupId; if (workerNodeGroupId != groupId) { continue; } foundAnyNodes = true; if (WorkerNodeIsReadable(workerNode)) { hash_seq_term(&status); return workerNode; } } if (!foundAnyNodes) { ereport(ERROR, (errmsg("there is a shard placement in node group %u but " "there are no nodes in that group", groupId))); } switch (ReadFromSecondaries) { case USE_SECONDARY_NODES_NEVER: { ereport(ERROR, (errmsg("node group %u does not have a primary node", groupId))); } case USE_SECONDARY_NODES_ALWAYS: { ereport(ERROR, (errmsg("node group %u does not have a secondary node", groupId))); } default: { ereport(FATAL, (errmsg("unrecognized value for use_secondary_nodes"))); } } } /* * ShardPlacementList returns the list of placements for the given shard from * the cache. * * The returned list is deep copied from the cache and thus can be modified * and pfree()d freely. */ List * ShardPlacementList(uint64 shardId) { ShardCacheEntry *shardEntry = NULL; DistTableCacheEntry *tableEntry = NULL; GroupShardPlacement *placementArray = NULL; int numberOfPlacements = 0; List *placementList = NIL; int i = 0; shardEntry = LookupShardCacheEntry(shardId); tableEntry = shardEntry->tableEntry; /* the offset better be in a valid range */ Assert(shardEntry->shardIndex < tableEntry->shardIntervalArrayLength); placementArray = tableEntry->arrayOfPlacementArrays[shardEntry->shardIndex]; numberOfPlacements = tableEntry->arrayOfPlacementArrayLengths[shardEntry->shardIndex]; for (i = 0; i < numberOfPlacements; i++) { GroupShardPlacement *groupShardPlacement = &placementArray[i]; ShardPlacement *shardPlacement = ResolveGroupShardPlacement(groupShardPlacement, shardEntry); placementList = lappend(placementList, shardPlacement); } /* if no shard placements are found, warn the user */ if (numberOfPlacements == 0) { ereport(WARNING, (errmsg("could not find any shard placements for shardId " UINT64_FORMAT, shardId))); } return placementList; } /* * LookupShardCacheEntry returns the cache entry belonging to a shard, or * errors out if that shard is unknown. */ static ShardCacheEntry * LookupShardCacheEntry(int64 shardId) { ShardCacheEntry *shardEntry = NULL; bool foundInCache = false; bool recheck = false; Assert(CitusHasBeenLoaded() && CheckCitusVersion(WARNING)); InitializeCaches(); /* lookup cache entry */ shardEntry = hash_search(DistShardCacheHash, &shardId, HASH_FIND, &foundInCache); if (!foundInCache) { /* * A possible reason for not finding an entry in the cache is that the * distributed table's cache entry hasn't been accessed. Thus look up * the distributed table, and build the cache entry. Afterwards we * know that the shard has to be in the cache if it exists. If the * shard does *not* exist LookupShardRelation() will error out. */ Oid relationId = LookupShardRelation(shardId); /* trigger building the cache for the shard id */ LookupDistTableCacheEntry(relationId); recheck = true; } else { /* * We might have some concurrent metadata changes. In order to get the changes, * we first need to accept the cache invalidation messages. */ AcceptInvalidationMessages(); if (!shardEntry->tableEntry->isValid) { Oid oldRelationId = shardEntry->tableEntry->relationId; Oid currentRelationId = LookupShardRelation(shardId); /* * The relation OID to which the shard belongs could have changed, * most notably when the extension is dropped and a shard ID is * reused. Reload the cache entries for both old and new relation * ID and then look up the shard entry again. */ LookupDistTableCacheEntry(oldRelationId); LookupDistTableCacheEntry(currentRelationId); recheck = true; } } /* * If we (re-)loaded the table cache, re-search the shard cache - the * shard index might have changed. If we still can't find the entry, it * can't exist. */ if (recheck) { shardEntry = hash_search(DistShardCacheHash, &shardId, HASH_FIND, &foundInCache); if (!foundInCache) { ereport(ERROR, (errmsg("could not find valid entry for shard " UINT64_FORMAT, shardId))); } } return shardEntry; } /* * DistributedTableCacheEntry looks up a pg_dist_partition entry for a * relation. * * Errors out if no relation matching the criteria could be found. */ DistTableCacheEntry * DistributedTableCacheEntry(Oid distributedRelationId) { DistTableCacheEntry *cacheEntry = NULL; cacheEntry = LookupDistTableCacheEntry(distributedRelationId); if (cacheEntry && cacheEntry->isDistributedTable) { return cacheEntry; } else { char *relationName = get_rel_name(distributedRelationId); ereport(ERROR, (errmsg("relation %s is not distributed", relationName))); } } /* * LookupDistTableCacheEntry returns the distributed table metadata for the * passed relationId. For efficiency it caches lookups. */ static DistTableCacheEntry * LookupDistTableCacheEntry(Oid relationId) { DistTableCacheEntry *cacheEntry = NULL; bool foundInCache = false; void *hashKey = (void *) &relationId; /* * Can't be a distributed relation if the extension hasn't been loaded * yet. As we can't do lookups in nonexistent tables, directly return NULL * here. */ if (!CitusHasBeenLoaded()) { return NULL; } InitializeCaches(); /* * If the version is not known to be compatible, perform thorough check, * unless such checks are disabled. */ if (!citusVersionKnownCompatible && EnableVersionChecks) { bool isDistributed = IsDistributedTableViaCatalog(relationId); int reportLevel = DEBUG1; /* * If there's a version-mismatch, and we're dealing with a distributed * table, we have to error out as we can't return a valid entry. We * want to check compatibility in the non-distributed case as well, so * future lookups can use the cache if compatible. */ if (isDistributed) { reportLevel = ERROR; } if (!CheckCitusVersion(reportLevel)) { /* incompatible, can't access cache, so return before doing so */ return NULL; } } cacheEntry = hash_search(DistTableCacheHash, hashKey, HASH_ENTER, &foundInCache); /* return valid matches */ if (foundInCache) { /* * We might have some concurrent metadata changes. In order to get the changes, * we first need to accept the cache invalidation messages. */ AcceptInvalidationMessages(); if (cacheEntry->isValid) { return cacheEntry; } /* free the content of old, invalid, entries */ ResetDistTableCacheEntry(cacheEntry); } /* zero out entry, but not the key part */ memset(((char *) cacheEntry) + sizeof(Oid), 0, sizeof(DistTableCacheEntry) - sizeof(Oid)); /* actually fill out entry */ BuildDistTableCacheEntry(cacheEntry); /* and finally mark as valid */ cacheEntry->isValid = true; return cacheEntry; } /* * BuildDistTableCacheEntry is a helper routine for * LookupDistTableCacheEntry() for building the cache contents. */ static void BuildDistTableCacheEntry(DistTableCacheEntry *cacheEntry) { HeapTuple distPartitionTuple = NULL; Relation pgDistPartition = NULL; Form_pg_dist_partition partitionForm = NULL; Datum partitionKeyDatum = 0; Datum replicationModelDatum = 0; MemoryContext oldContext = NULL; TupleDesc tupleDescriptor = NULL; bool isNull = false; bool partitionKeyIsNull = false; pgDistPartition = heap_open(DistPartitionRelationId(), AccessShareLock); distPartitionTuple = LookupDistPartitionTuple(pgDistPartition, cacheEntry->relationId); /* not a distributed table, done */ if (distPartitionTuple == NULL) { cacheEntry->isDistributedTable = false; heap_close(pgDistPartition, NoLock); return; } cacheEntry->isDistributedTable = true; tupleDescriptor = RelationGetDescr(pgDistPartition); partitionForm = (Form_pg_dist_partition) GETSTRUCT(distPartitionTuple); cacheEntry->partitionMethod = partitionForm->partmethod; partitionKeyDatum = heap_getattr(distPartitionTuple, Anum_pg_dist_partition_partkey, tupleDescriptor, &partitionKeyIsNull); /* note that for reference tables partitionKeyisNull is true */ if (!partitionKeyIsNull) { oldContext = MemoryContextSwitchTo(CacheMemoryContext); cacheEntry->partitionKeyString = TextDatumGetCString(partitionKeyDatum); MemoryContextSwitchTo(oldContext); } else { cacheEntry->partitionKeyString = NULL; } cacheEntry->colocationId = heap_getattr(distPartitionTuple, Anum_pg_dist_partition_colocationid, tupleDescriptor, &isNull); if (isNull) { cacheEntry->colocationId = INVALID_COLOCATION_ID; } replicationModelDatum = heap_getattr(distPartitionTuple, Anum_pg_dist_partition_repmodel, tupleDescriptor, &isNull); if (isNull) { /* * repmodel is NOT NULL but before ALTER EXTENSION citus UPGRADE the column * doesn't exist */ cacheEntry->replicationModel = 'c'; } else { cacheEntry->replicationModel = DatumGetChar(replicationModelDatum); } heap_freetuple(distPartitionTuple); BuildCachedShardList(cacheEntry); /* we only need hash functions for hash distributed tables */ if (cacheEntry->partitionMethod == DISTRIBUTE_BY_HASH) { TypeCacheEntry *typeEntry = NULL; Node *partitionNode = stringToNode(cacheEntry->partitionKeyString); Var *partitionColumn = (Var *) partitionNode; FmgrInfo *hashFunction = NULL; Assert(IsA(partitionNode, Var)); typeEntry = lookup_type_cache(partitionColumn->vartype, TYPECACHE_HASH_PROC_FINFO); hashFunction = MemoryContextAllocZero(CacheMemoryContext, sizeof(FmgrInfo)); fmgr_info_copy(hashFunction, &(typeEntry->hash_proc_finfo), CacheMemoryContext); cacheEntry->hashFunction = hashFunction; /* check the shard distribution for hash partitioned tables */ cacheEntry->hasUniformHashDistribution = HasUniformHashDistribution(cacheEntry->sortedShardIntervalArray, cacheEntry->shardIntervalArrayLength); } else { cacheEntry->hashFunction = NULL; } heap_close(pgDistPartition, NoLock); } /* * BuildCachedShardList() is a helper routine for BuildDistTableCacheEntry() * building up the list of shards in a distributed relation. */ static void BuildCachedShardList(DistTableCacheEntry *cacheEntry) { ShardInterval **shardIntervalArray = NULL; ShardInterval **sortedShardIntervalArray = NULL; FmgrInfo *shardIntervalCompareFunction = NULL; FmgrInfo *shardColumnCompareFunction = NULL; List *distShardTupleList = NIL; int shardIntervalArrayLength = 0; int shardIndex = 0; Oid columnTypeId = InvalidOid; int32 columnTypeMod = -1; Oid intervalTypeId = InvalidOid; int32 intervalTypeMod = -1; GetPartitionTypeInputInfo(cacheEntry->partitionKeyString, cacheEntry->partitionMethod, &columnTypeId, &columnTypeMod, &intervalTypeId, &intervalTypeMod); distShardTupleList = LookupDistShardTuples(cacheEntry->relationId); shardIntervalArrayLength = list_length(distShardTupleList); if (shardIntervalArrayLength > 0) { Relation distShardRelation = heap_open(DistShardRelationId(), AccessShareLock); TupleDesc distShardTupleDesc = RelationGetDescr(distShardRelation); ListCell *distShardTupleCell = NULL; int arrayIndex = 0; shardIntervalArray = MemoryContextAllocZero(CacheMemoryContext, shardIntervalArrayLength * sizeof(ShardInterval *)); cacheEntry->arrayOfPlacementArrays = MemoryContextAllocZero(CacheMemoryContext, shardIntervalArrayLength * sizeof(GroupShardPlacement *)); cacheEntry->arrayOfPlacementArrayLengths = MemoryContextAllocZero(CacheMemoryContext, shardIntervalArrayLength * sizeof(int)); foreach(distShardTupleCell, distShardTupleList) { HeapTuple shardTuple = lfirst(distShardTupleCell); ShardInterval *shardInterval = TupleToShardInterval(shardTuple, distShardTupleDesc, intervalTypeId, intervalTypeMod); ShardInterval *newShardInterval = NULL; MemoryContext oldContext = MemoryContextSwitchTo(CacheMemoryContext); newShardInterval = (ShardInterval *) palloc0(sizeof(ShardInterval)); CopyShardInterval(shardInterval, newShardInterval); shardIntervalArray[arrayIndex] = newShardInterval; MemoryContextSwitchTo(oldContext); heap_freetuple(shardTuple); arrayIndex++; } heap_close(distShardRelation, AccessShareLock); } /* look up value comparison function */ if (columnTypeId != InvalidOid) { /* allocate the comparison function in the cache context */ MemoryContext oldContext = MemoryContextSwitchTo(CacheMemoryContext); shardColumnCompareFunction = GetFunctionInfo(columnTypeId, BTREE_AM_OID, BTORDER_PROC); MemoryContextSwitchTo(oldContext); } else { shardColumnCompareFunction = NULL; } /* look up interval comparison function */ if (intervalTypeId != InvalidOid) { /* allocate the comparison function in the cache context */ MemoryContext oldContext = MemoryContextSwitchTo(CacheMemoryContext); shardIntervalCompareFunction = GetFunctionInfo(intervalTypeId, BTREE_AM_OID, BTORDER_PROC); MemoryContextSwitchTo(oldContext); } else { shardIntervalCompareFunction = NULL; } /* reference tables has a single shard which is not initialized */ if (cacheEntry->partitionMethod == DISTRIBUTE_BY_NONE) { cacheEntry->hasUninitializedShardInterval = true; cacheEntry->hasOverlappingShardInterval = true; /* * Note that during create_reference_table() call, * the reference table do not have any shards. */ if (shardIntervalArrayLength > 1) { char *relationName = get_rel_name(cacheEntry->relationId); ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("reference table \"%s\" has more than 1 shard", relationName))); } /* since there is a zero or one shard, it is already sorted */ sortedShardIntervalArray = shardIntervalArray; } else { /* sort the interval array */ sortedShardIntervalArray = SortShardIntervalArray(shardIntervalArray, shardIntervalArrayLength, shardIntervalCompareFunction); /* check if there exists any shard intervals with no min/max values */ cacheEntry->hasUninitializedShardInterval = HasUninitializedShardInterval(sortedShardIntervalArray, shardIntervalArrayLength); if (!cacheEntry->hasUninitializedShardInterval) { cacheEntry->hasOverlappingShardInterval = HasOverlappingShardInterval(sortedShardIntervalArray, shardIntervalArrayLength, shardIntervalCompareFunction); } else { cacheEntry->hasOverlappingShardInterval = true; } /* * If table is hash-partitioned and has shards, there never should be * any uninitalized shards. Historically we've not prevented that for * range partitioned tables, but it might be a good idea to start * doing so. */ if (cacheEntry->partitionMethod == DISTRIBUTE_BY_HASH && cacheEntry->hasUninitializedShardInterval) { ereport(ERROR, (errmsg("hash partitioned table has uninitialized shards"))); } if (cacheEntry->partitionMethod == DISTRIBUTE_BY_HASH && cacheEntry->hasOverlappingShardInterval) { ereport(ERROR, (errmsg("hash partitioned table has overlapping shards"))); } } /* maintain shardId->(table,ShardInterval) cache */ for (shardIndex = 0; shardIndex < shardIntervalArrayLength; shardIndex++) { ShardCacheEntry *shardEntry = NULL; ShardInterval *shardInterval = sortedShardIntervalArray[shardIndex]; bool foundInCache = false; List *placementList = NIL; MemoryContext oldContext = NULL; ListCell *placementCell = NULL; GroupShardPlacement *placementArray = NULL; int placementOffset = 0; int numberOfPlacements = 0; shardEntry = hash_search(DistShardCacheHash, &shardInterval->shardId, HASH_ENTER, &foundInCache); if (foundInCache) { ereport(ERROR, (errmsg("cached metadata for shard " UINT64_FORMAT " is inconsistent", shardInterval->shardId), errhint("Reconnect and try again."))); } shardEntry->shardIndex = shardIndex; shardEntry->tableEntry = cacheEntry; /* build list of shard placements */ placementList = BuildShardPlacementList(shardInterval); numberOfPlacements = list_length(placementList); /* and copy that list into the cache entry */ oldContext = MemoryContextSwitchTo(CacheMemoryContext); placementArray = palloc0(numberOfPlacements * sizeof(GroupShardPlacement)); foreach(placementCell, placementList) { GroupShardPlacement *srcPlacement = (GroupShardPlacement *) lfirst(placementCell); GroupShardPlacement *dstPlacement = &placementArray[placementOffset]; memcpy(dstPlacement, srcPlacement, sizeof(GroupShardPlacement)); placementOffset++; } MemoryContextSwitchTo(oldContext); cacheEntry->arrayOfPlacementArrays[shardIndex] = placementArray; cacheEntry->arrayOfPlacementArrayLengths[shardIndex] = numberOfPlacements; } cacheEntry->shardIntervalArrayLength = shardIntervalArrayLength; cacheEntry->sortedShardIntervalArray = sortedShardIntervalArray; cacheEntry->shardColumnCompareFunction = shardColumnCompareFunction; cacheEntry->shardIntervalCompareFunction = shardIntervalCompareFunction; } /* * SortedShardIntervalArray sorts the input shardIntervalArray. Shard intervals with * no min/max values are placed at the end of the array. */ static ShardInterval ** SortShardIntervalArray(ShardInterval **shardIntervalArray, int shardCount, FmgrInfo *shardIntervalSortCompareFunction) { ShardInterval **sortedShardIntervalArray = NULL; /* short cut if there are no shard intervals in the array */ if (shardCount == 0) { return shardIntervalArray; } /* if a shard doesn't have min/max values, it's placed in the end of the array */ qsort_arg(shardIntervalArray, shardCount, sizeof(ShardInterval *), (qsort_arg_comparator) CompareShardIntervals, (void *) shardIntervalSortCompareFunction); sortedShardIntervalArray = shardIntervalArray; return sortedShardIntervalArray; } /* * HasUniformHashDistribution determines whether the given list of sorted shards * has a uniform hash distribution, as produced by master_create_worker_shards for * hash partitioned tables. */ static bool HasUniformHashDistribution(ShardInterval **shardIntervalArray, int shardIntervalArrayLength) { uint64 hashTokenIncrement = 0; int shardIndex = 0; /* if there are no shards, there is no uniform distribution */ if (shardIntervalArrayLength == 0) { return false; } /* calculate the hash token increment */ hashTokenIncrement = HASH_TOKEN_COUNT / shardIntervalArrayLength; for (shardIndex = 0; shardIndex < shardIntervalArrayLength; shardIndex++) { ShardInterval *shardInterval = shardIntervalArray[shardIndex]; int32 shardMinHashToken = INT32_MIN + (shardIndex * hashTokenIncrement); int32 shardMaxHashToken = shardMinHashToken + (hashTokenIncrement - 1); if (shardIndex == (shardIntervalArrayLength - 1)) { shardMaxHashToken = INT32_MAX; } if (DatumGetInt32(shardInterval->minValue) != shardMinHashToken || DatumGetInt32(shardInterval->maxValue) != shardMaxHashToken) { return false; } } return true; } /* * HasUninitializedShardInterval returns true if all the elements of the * sortedShardIntervalArray has min/max values. Callers of the function must * ensure that input shard interval array is sorted on shardminvalue and uninitialized * shard intervals are at the end of the array. */ static bool HasUninitializedShardInterval(ShardInterval **sortedShardIntervalArray, int shardCount) { bool hasUninitializedShardInterval = false; ShardInterval *lastShardInterval = NULL; if (shardCount == 0) { return hasUninitializedShardInterval; } Assert(sortedShardIntervalArray != NULL); /* * Since the shard interval array is sorted, and uninitialized ones stored * in the end of the array, checking the last element is enough. */ lastShardInterval = sortedShardIntervalArray[shardCount - 1]; if (!lastShardInterval->minValueExists || !lastShardInterval->maxValueExists) { hasUninitializedShardInterval = true; } return hasUninitializedShardInterval; } /* * HasOverlappingShardInterval determines whether the given list of sorted * shards has overlapping ranges. */ static bool HasOverlappingShardInterval(ShardInterval **shardIntervalArray, int shardIntervalArrayLength, FmgrInfo *shardIntervalSortCompareFunction) { int shardIndex = 0; ShardInterval *lastShardInterval = NULL; Datum comparisonDatum = 0; int comparisonResult = 0; /* zero/a single shard can't overlap */ if (shardIntervalArrayLength < 2) { return false; } lastShardInterval = shardIntervalArray[0]; for (shardIndex = 1; shardIndex < shardIntervalArrayLength; shardIndex++) { ShardInterval *curShardInterval = shardIntervalArray[shardIndex]; /* only called if !hasUninitializedShardInterval */ Assert(lastShardInterval->minValueExists && lastShardInterval->maxValueExists); Assert(curShardInterval->minValueExists && curShardInterval->maxValueExists); comparisonDatum = CompareCall2(shardIntervalSortCompareFunction, lastShardInterval->maxValue, curShardInterval->minValue); comparisonResult = DatumGetInt32(comparisonDatum); if (comparisonResult >= 0) { return true; } lastShardInterval = curShardInterval; } return false; } /* * CitusHasBeenLoaded returns true if the citus extension has been created * in the current database and the extension script has been executed. Otherwise, * it returns false. The result is cached as this is called very frequently. */ bool CitusHasBeenLoaded(void) { /* recheck presence until citus has been loaded */ if (!MetadataCache.extensionLoaded || creating_extension) { bool extensionPresent = false; bool extensionScriptExecuted = true; Oid extensionOid = get_extension_oid("citus", true); if (extensionOid != InvalidOid) { extensionPresent = true; } if (extensionPresent) { /* check if Citus extension objects are still being created */ if (creating_extension && CurrentExtensionObject == extensionOid) { extensionScriptExecuted = false; } /* * Whenever the extension exists, even when currently creating it, * we need the infrastructure to run citus in this database to be * ready. */ StartupCitusBackend(); } /* we disable extension features during pg_upgrade */ MetadataCache.extensionLoaded = extensionPresent && extensionScriptExecuted && !IsBinaryUpgrade; if (MetadataCache.extensionLoaded) { /* * InvalidateDistRelationCacheCallback resets state such as extensionLoaded * when it notices changes to pg_dist_partition (which usually indicate * `DROP EXTENSION citus;` has been run) * * Ensure InvalidateDistRelationCacheCallback will notice those changes * by caching pg_dist_partition's oid. * * We skip these checks during upgrade since pg_dist_partition is not * present during early stages of upgrade operation. */ DistPartitionRelationId(); /* * We also reset citusVersionKnownCompatible, so it will be re-read in * case of extension update. */ citusVersionKnownCompatible = false; } } return MetadataCache.extensionLoaded; } /* * CheckCitusVersion checks whether there is a version mismatch between the * available version and the loaded version or between the installed version * and the loaded version. Returns true if compatible, false otherwise. * * As a side effect, this function also sets citusVersionKnownCompatible global * variable to true which reduces version check cost of next calls. */ bool CheckCitusVersion(int elevel) { if (citusVersionKnownCompatible || !CitusHasBeenLoaded() || !EnableVersionChecks) { return true; } if (CheckAvailableVersion(elevel) && CheckInstalledVersion(elevel)) { citusVersionKnownCompatible = true; return true; } else { return false; } } /* * CheckAvailableVersion compares CITUS_EXTENSIONVERSION and the currently * available version from the citus.control file. If they are not compatible, * this function logs an error with the specified elevel and returns false, * otherwise it returns true. */ bool CheckAvailableVersion(int elevel) { char *availableVersion = NULL; if (!EnableVersionChecks) { return true; } availableVersion = AvailableExtensionVersion(); if (!MajorVersionsCompatible(availableVersion, CITUS_EXTENSIONVERSION)) { ereport(elevel, (errmsg("loaded Citus library version differs from latest " "available extension version"), errdetail("Loaded library requires %s, but the latest control " "file specifies %s.", CITUS_MAJORVERSION, availableVersion), errhint("Restart the database to load the latest Citus " "library."))); return false; } return true; } /* * CheckInstalledVersion compares CITUS_EXTENSIONVERSION and the the * extension's current version from the pg_extemsion catalog table. If they * are not compatible, this function logs an error with the specified elevel, * otherwise it returns true. */ static bool CheckInstalledVersion(int elevel) { char *installedVersion = NULL; Assert(CitusHasBeenLoaded()); Assert(EnableVersionChecks); installedVersion = InstalledExtensionVersion(); if (!MajorVersionsCompatible(installedVersion, CITUS_EXTENSIONVERSION)) { ereport(elevel, (errmsg("loaded Citus library version differs from installed " "extension version"), errdetail("Loaded library requires %s, but the installed " "extension version is %s.", CITUS_MAJORVERSION, installedVersion), errhint("Run ALTER EXTENSION citus UPDATE and try again."))); return false; } return true; } /* * MajorVersionsCompatible checks whether both versions are compatible. They * are if major and minor version numbers match, the schema version is * ignored. Returns true if compatible, false otherwise. */ bool MajorVersionsCompatible(char *leftVersion, char *rightVersion) { const char schemaVersionSeparator = '-'; char *leftSeperatorPosition = strchr(leftVersion, schemaVersionSeparator); char *rightSeperatorPosition = strchr(rightVersion, schemaVersionSeparator); int leftComparisionLimit = 0; int rightComparisionLimit = 0; if (leftSeperatorPosition != NULL) { leftComparisionLimit = leftSeperatorPosition - leftVersion; } else { leftComparisionLimit = strlen(leftVersion); } if (rightSeperatorPosition != NULL) { rightComparisionLimit = rightSeperatorPosition - rightVersion; } else { rightComparisionLimit = strlen(leftVersion); } /* we can error out early if hypens are not in the same position */ if (leftComparisionLimit != rightComparisionLimit) { return false; } return strncmp(leftVersion, rightVersion, leftComparisionLimit) == 0; } /* * AvailableExtensionVersion returns the Citus version from citus.control file. It also * saves the result, thus consecutive calls to CitusExtensionAvailableVersion will * not read the citus.control file again. */ static char * AvailableExtensionVersion(void) { ReturnSetInfo *extensionsResultSet = NULL; TupleTableSlot *tupleTableSlot = NULL; FunctionCallInfoData *fcinfo = NULL; FmgrInfo *flinfo = NULL; int argumentCount = 0; EState *estate = NULL; bool hasTuple = false; bool goForward = true; bool doCopy = false; char *availableExtensionVersion; InitializeCaches(); estate = CreateExecutorState(); extensionsResultSet = makeNode(ReturnSetInfo); extensionsResultSet->econtext = GetPerTupleExprContext(estate); extensionsResultSet->allowedModes = SFRM_Materialize; fcinfo = palloc0(sizeof(FunctionCallInfoData)); flinfo = palloc0(sizeof(FmgrInfo)); fmgr_info(F_PG_AVAILABLE_EXTENSIONS, flinfo); InitFunctionCallInfoData(*fcinfo, flinfo, argumentCount, InvalidOid, NULL, (Node *) extensionsResultSet); /* pg_available_extensions returns result set containing all available extensions */ (*pg_available_extensions)(fcinfo); tupleTableSlot = MakeSingleTupleTableSlot(extensionsResultSet->setDesc); hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult, goForward, doCopy, tupleTableSlot); while (hasTuple) { Datum extensionNameDatum = 0; char *extensionName = NULL; bool isNull = false; extensionNameDatum = slot_getattr(tupleTableSlot, 1, &isNull); extensionName = NameStr(*DatumGetName(extensionNameDatum)); if (strcmp(extensionName, "citus") == 0) { MemoryContext oldMemoryContext = NULL; Datum availableVersion = slot_getattr(tupleTableSlot, 2, &isNull); /* we will cache the result of citus version to prevent catalog access */ oldMemoryContext = MemoryContextSwitchTo(CacheMemoryContext); availableExtensionVersion = text_to_cstring(DatumGetTextPP(availableVersion)); MemoryContextSwitchTo(oldMemoryContext); ExecClearTuple(tupleTableSlot); ExecDropSingleTupleTableSlot(tupleTableSlot); return availableExtensionVersion; } ExecClearTuple(tupleTableSlot); hasTuple = tuplestore_gettupleslot(extensionsResultSet->setResult, goForward, doCopy, tupleTableSlot); } ExecDropSingleTupleTableSlot(tupleTableSlot); ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("citus extension is not found"))); return NULL; } /* * InstalledExtensionVersion returns the Citus version in PostgreSQL pg_extension table. */ static char * InstalledExtensionVersion(void) { Relation relation = NULL; SysScanDesc scandesc; ScanKeyData entry[1]; HeapTuple extensionTuple = NULL; char *installedExtensionVersion = NULL; relation = heap_open(ExtensionRelationId, AccessShareLock); ScanKeyInit(&entry[0], Anum_pg_extension_extname, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum("citus")); scandesc = systable_beginscan(relation, ExtensionNameIndexId, true, NULL, 1, entry); extensionTuple = systable_getnext(scandesc); /* We assume that there can be at most one matching tuple */ if (HeapTupleIsValid(extensionTuple)) { MemoryContext oldMemoryContext = NULL; int extensionIndex = Anum_pg_extension_extversion; TupleDesc tupleDescriptor = RelationGetDescr(relation); bool isNull = false; Datum installedVersion = heap_getattr(extensionTuple, extensionIndex, tupleDescriptor, &isNull); if (isNull) { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("citus extension version is null"))); } /* we will cache the result of citus version to prevent catalog access */ oldMemoryContext = MemoryContextSwitchTo(CacheMemoryContext); installedExtensionVersion = text_to_cstring(DatumGetTextPP(installedVersion)); MemoryContextSwitchTo(oldMemoryContext); } else { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("citus extension is not loaded"))); } systable_endscan(scandesc); heap_close(relation, AccessShareLock); return installedExtensionVersion; } /* return oid of pg_dist_shard relation */ Oid DistShardRelationId(void) { CachedRelationLookup("pg_dist_shard", &MetadataCache.distShardRelationId); return MetadataCache.distShardRelationId; } /* return oid of pg_dist_placement relation */ Oid DistPlacementRelationId(void) { CachedRelationLookup("pg_dist_placement", &MetadataCache.distPlacementRelationId); return MetadataCache.distPlacementRelationId; } /* return oid of pg_dist_node relation */ Oid DistNodeRelationId(void) { CachedRelationLookup("pg_dist_node", &MetadataCache.distNodeRelationId); return MetadataCache.distNodeRelationId; } /* return oid of pg_dist_local_group relation */ Oid DistLocalGroupIdRelationId(void) { CachedRelationLookup("pg_dist_local_group", &MetadataCache.distLocalGroupRelationId); return MetadataCache.distLocalGroupRelationId; } /* return oid of pg_dist_colocation relation */ Oid DistColocationRelationId(void) { CachedRelationLookup("pg_dist_colocation", &MetadataCache.distColocationRelationId); return MetadataCache.distColocationRelationId; } /* return oid of pg_dist_colocation_configuration_index index */ Oid DistColocationConfigurationIndexId(void) { CachedRelationLookup("pg_dist_colocation_configuration_index", &MetadataCache.distColocationConfigurationIndexId); return MetadataCache.distColocationConfigurationIndexId; } /* return oid of pg_dist_colocation_pkey index */ Oid DistColocationColocationidIndexId(void) { CachedRelationLookup("pg_dist_colocation_pkey", &MetadataCache.distColocationColocationidIndexId); return MetadataCache.distColocationColocationidIndexId; } /* return oid of pg_dist_partition relation */ Oid DistPartitionRelationId(void) { CachedRelationLookup("pg_dist_partition", &MetadataCache.distPartitionRelationId); return MetadataCache.distPartitionRelationId; } /* return oid of pg_dist_partition_logical_relid_index index */ Oid DistPartitionLogicalRelidIndexId(void) { CachedRelationLookup("pg_dist_partition_logical_relid_index", &MetadataCache.distPartitionLogicalRelidIndexId); return MetadataCache.distPartitionLogicalRelidIndexId; } /* return oid of pg_dist_partition_colocationid_index index */ Oid DistPartitionColocationidIndexId(void) { CachedRelationLookup("pg_dist_partition_colocationid_index", &MetadataCache.distPartitionColocationidIndexId); return MetadataCache.distPartitionColocationidIndexId; } /* return oid of pg_dist_shard_logical_relid_index index */ Oid DistShardLogicalRelidIndexId(void) { CachedRelationLookup("pg_dist_shard_logical_relid_index", &MetadataCache.distShardLogicalRelidIndexId); return MetadataCache.distShardLogicalRelidIndexId; } /* return oid of pg_dist_shard_shardid_index index */ Oid DistShardShardidIndexId(void) { CachedRelationLookup("pg_dist_shard_shardid_index", &MetadataCache.distShardShardidIndexId); return MetadataCache.distShardShardidIndexId; } /* return oid of pg_dist_placement_shardid_index */ Oid DistPlacementShardidIndexId(void) { CachedRelationLookup("pg_dist_placement_shardid_index", &MetadataCache.distPlacementShardidIndexId); return MetadataCache.distPlacementShardidIndexId; } /* return oid of pg_dist_placement_placementid_index */ Oid DistPlacementPlacementidIndexId(void) { CachedRelationLookup("pg_dist_placement_placementid_index", &MetadataCache.distPlacementPlacementidIndexId); return MetadataCache.distPlacementPlacementidIndexId; } /* return oid of pg_dist_transaction relation */ Oid DistTransactionRelationId(void) { CachedRelationLookup("pg_dist_transaction", &MetadataCache.distTransactionRelationId); return MetadataCache.distTransactionRelationId; } /* return oid of pg_dist_transaction_group_index */ Oid DistTransactionGroupIndexId(void) { CachedRelationLookup("pg_dist_transaction_group_index", &MetadataCache.distTransactionGroupIndexId); return MetadataCache.distTransactionGroupIndexId; } /* return oid of pg_dist_transaction_unique_constraint */ Oid DistTransactionRecordIndexId(void) { CachedRelationLookup("pg_dist_transaction_unique_constraint", &MetadataCache.distTransactionRecordIndexId); return MetadataCache.distTransactionRecordIndexId; } /* return oid of pg_dist_placement_groupid_index */ Oid DistPlacementGroupidIndexId(void) { CachedRelationLookup("pg_dist_placement_groupid_index", &MetadataCache.distPlacementGroupidIndexId); return MetadataCache.distPlacementGroupidIndexId; } /* return oid of the citus_extradata_container(internal) function */ Oid CitusExtraDataContainerFuncId(void) { List *nameList = NIL; Oid paramOids[1] = { INTERNALOID }; if (MetadataCache.extraDataContainerFuncId == InvalidOid) { nameList = list_make2(makeString("pg_catalog"), makeString("citus_extradata_container")); MetadataCache.extraDataContainerFuncId = LookupFuncName(nameList, 1, paramOids, false); } return MetadataCache.extraDataContainerFuncId; } /* return oid of the worker_hash function */ Oid CitusWorkerHashFunctionId(void) { if (MetadataCache.workerHashFunctionId == InvalidOid) { Oid citusExtensionOid = get_extension_oid("citus", false); Oid citusSchemaOid = get_extension_schema(citusExtensionOid); char *citusSchemaName = get_namespace_name(citusSchemaOid); const int argCount = 1; MetadataCache.workerHashFunctionId = FunctionOid(citusSchemaName, "worker_hash", argCount); } return MetadataCache.workerHashFunctionId; } /* * CitusExtensionOwner() returns the owner of the 'citus' extension. That user * is, amongst others, used to perform actions a normal user might not be * allowed to perform. */ extern Oid CitusExtensionOwner(void) { Relation relation = NULL; SysScanDesc scandesc; ScanKeyData entry[1]; HeapTuple extensionTuple = NULL; Form_pg_extension extensionForm = NULL; if (MetadataCache.extensionOwner != InvalidOid) { return MetadataCache.extensionOwner; } relation = heap_open(ExtensionRelationId, AccessShareLock); ScanKeyInit(&entry[0], Anum_pg_extension_extname, BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum("citus")); scandesc = systable_beginscan(relation, ExtensionNameIndexId, true, NULL, 1, entry); extensionTuple = systable_getnext(scandesc); /* We assume that there can be at most one matching tuple */ if (HeapTupleIsValid(extensionTuple)) { extensionForm = (Form_pg_extension) GETSTRUCT(extensionTuple); /* * For some operations Citus requires superuser permissions; we use * the extension owner for that. The extension owner is guaranteed to * be a superuser (otherwise C functions can't be created), but it'd * be possible to change the owner. So check that this still a * superuser. */ if (!superuser_arg(extensionForm->extowner)) { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("citus extension needs to be owned by superuser"))); } MetadataCache.extensionOwner = extensionForm->extowner; Assert(OidIsValid(MetadataCache.extensionOwner)); } else { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("citus extension not loaded"))); } systable_endscan(scandesc); heap_close(relation, AccessShareLock); return MetadataCache.extensionOwner; } /* * CitusExtensionOwnerName returns the name of the owner of the extension. */ char * CitusExtensionOwnerName(void) { Oid superUserId = CitusExtensionOwner(); return GetUserNameFromId(superUserId, false); } /* return the username of the currently active role */ char * CurrentUserName(void) { Oid userId = GetUserId(); return GetUserNameFromId(userId, false); } /* * LookupNodeRoleValueId returns the Oid of the "pg_catalog.noderole" type, or InvalidOid * if it does not exist. */ static Oid LookupNodeRoleTypeOid() { Value *schemaName = makeString("pg_catalog"); Value *typeName = makeString("noderole"); List *qualifiedName = list_make2(schemaName, typeName); TypeName *enumTypeName = makeTypeNameFromNameList(qualifiedName); Oid nodeRoleTypId; /* typenameTypeId but instead of raising an error return InvalidOid */ Type tup = LookupTypeName(NULL, enumTypeName, NULL, false); if (tup == NULL) { return InvalidOid; } nodeRoleTypId = HeapTupleGetOid(tup); ReleaseSysCache(tup); return nodeRoleTypId; } /* * LookupNodeRoleValueId returns the Oid of the value in "pg_catalog.noderole" which * matches the provided name, or InvalidOid if the noderole enum doesn't exist yet. */ static Oid LookupNodeRoleValueId(char *valueName) { Oid nodeRoleTypId = LookupNodeRoleTypeOid(); if (nodeRoleTypId == InvalidOid) { return InvalidOid; } else { Datum nodeRoleIdDatum = ObjectIdGetDatum(nodeRoleTypId); Datum valueDatum = CStringGetDatum(valueName); Datum valueIdDatum = DirectFunctionCall2(enum_in, valueDatum, nodeRoleIdDatum); Oid valueId = DatumGetObjectId(valueIdDatum); return valueId; } } /* return the Oid of the 'primary' nodeRole enum value */ Oid PrimaryNodeRoleId(void) { if (!MetadataCache.primaryNodeRoleId) { MetadataCache.primaryNodeRoleId = LookupNodeRoleValueId("primary"); } return MetadataCache.primaryNodeRoleId; } /* return the Oid of the 'secodary' nodeRole enum value */ Oid SecondaryNodeRoleId(void) { if (!MetadataCache.secondaryNodeRoleId) { MetadataCache.secondaryNodeRoleId = LookupNodeRoleValueId("secondary"); } return MetadataCache.secondaryNodeRoleId; } /* return the Oid of the 'unavailable' nodeRole enum value */ Oid UnavailableNodeRoleId(void) { if (!MetadataCache.unavailableNodeRoleId) { MetadataCache.unavailableNodeRoleId = LookupNodeRoleValueId("unavailable"); } return MetadataCache.unavailableNodeRoleId; } /* * master_dist_partition_cache_invalidate is a trigger function that performs * relcache invalidations when the contents of pg_dist_partition are changed * on the SQL level. * * NB: We decided there is little point in checking permissions here, there * are much easier ways to waste CPU than causing cache invalidations. */ Datum master_dist_partition_cache_invalidate(PG_FUNCTION_ARGS) { TriggerData *triggerData = (TriggerData *) fcinfo->context; HeapTuple newTuple = NULL; HeapTuple oldTuple = NULL; Oid oldLogicalRelationId = InvalidOid; Oid newLogicalRelationId = InvalidOid; if (!CALLED_AS_TRIGGER(fcinfo)) { ereport(ERROR, (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), errmsg("must be called as trigger"))); } CheckCitusVersion(ERROR); newTuple = triggerData->tg_newtuple; oldTuple = triggerData->tg_trigtuple; /* collect logicalrelid for OLD and NEW tuple */ if (oldTuple != NULL) { Form_pg_dist_partition distPart = (Form_pg_dist_partition) GETSTRUCT(oldTuple); oldLogicalRelationId = distPart->logicalrelid; } if (newTuple != NULL) { Form_pg_dist_partition distPart = (Form_pg_dist_partition) GETSTRUCT(newTuple); newLogicalRelationId = distPart->logicalrelid; } /* * Invalidate relcache for the relevant relation(s). In theory * logicalrelid should never change, but it doesn't hurt to be * paranoid. */ if (oldLogicalRelationId != InvalidOid && oldLogicalRelationId != newLogicalRelationId) { CitusInvalidateRelcacheByRelid(oldLogicalRelationId); } if (newLogicalRelationId != InvalidOid) { CitusInvalidateRelcacheByRelid(newLogicalRelationId); } PG_RETURN_DATUM(PointerGetDatum(NULL)); } /* * master_dist_shard_cache_invalidate is a trigger function that performs * relcache invalidations when the contents of pg_dist_shard are changed * on the SQL level. * * NB: We decided there is little point in checking permissions here, there * are much easier ways to waste CPU than causing cache invalidations. */ Datum master_dist_shard_cache_invalidate(PG_FUNCTION_ARGS) { TriggerData *triggerData = (TriggerData *) fcinfo->context; HeapTuple newTuple = NULL; HeapTuple oldTuple = NULL; Oid oldLogicalRelationId = InvalidOid; Oid newLogicalRelationId = InvalidOid; if (!CALLED_AS_TRIGGER(fcinfo)) { ereport(ERROR, (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), errmsg("must be called as trigger"))); } CheckCitusVersion(ERROR); newTuple = triggerData->tg_newtuple; oldTuple = triggerData->tg_trigtuple; /* collect logicalrelid for OLD and NEW tuple */ if (oldTuple != NULL) { Form_pg_dist_shard distShard = (Form_pg_dist_shard) GETSTRUCT(oldTuple); oldLogicalRelationId = distShard->logicalrelid; } if (newTuple != NULL) { Form_pg_dist_shard distShard = (Form_pg_dist_shard) GETSTRUCT(newTuple); newLogicalRelationId = distShard->logicalrelid; } /* * Invalidate relcache for the relevant relation(s). In theory * logicalrelid should never change, but it doesn't hurt to be * paranoid. */ if (oldLogicalRelationId != InvalidOid && oldLogicalRelationId != newLogicalRelationId) { CitusInvalidateRelcacheByRelid(oldLogicalRelationId); } if (newLogicalRelationId != InvalidOid) { CitusInvalidateRelcacheByRelid(newLogicalRelationId); } PG_RETURN_DATUM(PointerGetDatum(NULL)); } /* * master_dist_placement_cache_invalidate is a trigger function that performs * relcache invalidations when the contents of pg_dist_placement are * changed on the SQL level. * * NB: We decided there is little point in checking permissions here, there * are much easier ways to waste CPU than causing cache invalidations. */ Datum master_dist_placement_cache_invalidate(PG_FUNCTION_ARGS) { TriggerData *triggerData = (TriggerData *) fcinfo->context; HeapTuple newTuple = NULL; HeapTuple oldTuple = NULL; Oid oldShardId = InvalidOid; Oid newShardId = InvalidOid; if (!CALLED_AS_TRIGGER(fcinfo)) { ereport(ERROR, (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), errmsg("must be called as trigger"))); } CheckCitusVersion(ERROR); newTuple = triggerData->tg_newtuple; oldTuple = triggerData->tg_trigtuple; /* collect shardid for OLD and NEW tuple */ if (oldTuple != NULL) { Form_pg_dist_placement distPlacement = (Form_pg_dist_placement) GETSTRUCT(oldTuple); oldShardId = distPlacement->shardid; } if (newTuple != NULL) { Form_pg_dist_placement distPlacement = (Form_pg_dist_placement) GETSTRUCT(newTuple); newShardId = distPlacement->shardid; } /* * Invalidate relcache for the relevant relation(s). In theory shardId * should never change, but it doesn't hurt to be paranoid. */ if (oldShardId != InvalidOid && oldShardId != newShardId) { CitusInvalidateRelcacheByShardId(oldShardId); } if (newShardId != InvalidOid) { CitusInvalidateRelcacheByShardId(newShardId); } PG_RETURN_DATUM(PointerGetDatum(NULL)); } /* * master_dist_node_cache_invalidate is a trigger function that performs * relcache invalidations when the contents of pg_dist_node are changed * on the SQL level. * * NB: We decided there is little point in checking permissions here, there * are much easier ways to waste CPU than causing cache invalidations. */ Datum master_dist_node_cache_invalidate(PG_FUNCTION_ARGS) { if (!CALLED_AS_TRIGGER(fcinfo)) { ereport(ERROR, (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), errmsg("must be called as trigger"))); } CheckCitusVersion(ERROR); CitusInvalidateRelcacheByRelid(DistNodeRelationId()); PG_RETURN_DATUM(PointerGetDatum(NULL)); } /* * master_dist_local_group_cache_invalidate is a trigger function that performs * relcache invalidations when the contents of pg_dist_local_group are changed * on the SQL level. * * NB: We decided there is little point in checking permissions here, there * are much easier ways to waste CPU than causing cache invalidations. */ Datum master_dist_local_group_cache_invalidate(PG_FUNCTION_ARGS) { if (!CALLED_AS_TRIGGER(fcinfo)) { ereport(ERROR, (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), errmsg("must be called as trigger"))); } CheckCitusVersion(ERROR); CitusInvalidateRelcacheByRelid(DistLocalGroupIdRelationId()); PG_RETURN_DATUM(PointerGetDatum(NULL)); } /* * InitializeCaches() registers invalidation handlers for metadata_cache.c's * caches. */ static void InitializeCaches(void) { static bool performedInitialization = false; if (!performedInitialization) { /* set first, to avoid recursion dangers */ performedInitialization = true; /* make sure we've initialized CacheMemoryContext */ if (CacheMemoryContext == NULL) { CreateCacheMemoryContext(); } InitializeDistTableCache(); RegisterWorkerNodeCacheCallbacks(); RegisterLocalGroupIdCacheCallbacks(); } } /* initialize the infrastructure for the metadata cache */ static void InitializeDistTableCache(void) { HASHCTL info; /* build initial scan keys, copied for every relation scan */ memset(&DistPartitionScanKey, 0, sizeof(DistPartitionScanKey)); fmgr_info_cxt(F_OIDEQ, &DistPartitionScanKey[0].sk_func, CacheMemoryContext); DistPartitionScanKey[0].sk_strategy = BTEqualStrategyNumber; DistPartitionScanKey[0].sk_subtype = InvalidOid; DistPartitionScanKey[0].sk_collation = InvalidOid; DistPartitionScanKey[0].sk_attno = Anum_pg_dist_partition_logicalrelid; memset(&DistShardScanKey, 0, sizeof(DistShardScanKey)); fmgr_info_cxt(F_OIDEQ, &DistShardScanKey[0].sk_func, CacheMemoryContext); DistShardScanKey[0].sk_strategy = BTEqualStrategyNumber; DistShardScanKey[0].sk_subtype = InvalidOid; DistShardScanKey[0].sk_collation = InvalidOid; DistShardScanKey[0].sk_attno = Anum_pg_dist_shard_logicalrelid; /* initialize the per-table hash table */ MemSet(&info, 0, sizeof(info)); info.keysize = sizeof(Oid); info.entrysize = sizeof(DistTableCacheEntry); info.hash = tag_hash; DistTableCacheHash = hash_create("Distributed Relation Cache", 32, &info, HASH_ELEM | HASH_FUNCTION); /* initialize the per-shard hash table */ MemSet(&info, 0, sizeof(info)); info.keysize = sizeof(int64); info.entrysize = sizeof(ShardCacheEntry); info.hash = tag_hash; DistShardCacheHash = hash_create("Shard Cache", 32 * 64, &info, HASH_ELEM | HASH_FUNCTION); /* Watch for invalidation events. */ CacheRegisterRelcacheCallback(InvalidateDistRelationCacheCallback, (Datum) 0); } /* * GetWorkerNodeHash is a wrapper around InitializeWorkerNodeCache(). It * triggers InitializeWorkerNodeCache when the workerHash is invalid. Otherwise, * it returns the hash. */ HTAB * GetWorkerNodeHash(void) { InitializeCaches(); /* ensure relevant callbacks are registered */ /* * Simulate a SELECT from pg_dist_node, ensure pg_dist_node doesn't change while our * caller is using WorkerNodeHash. */ LockRelationOid(DistNodeRelationId(), AccessShareLock); /* * We might have some concurrent metadata changes. In order to get the changes, * we first need to accept the cache invalidation messages. */ AcceptInvalidationMessages(); if (!workerNodeHashValid) { InitializeWorkerNodeCache(); workerNodeHashValid = true; } return WorkerNodeHash; } /* * InitializeWorkerNodeCache initialize the infrastructure for the worker node cache. * The function reads the worker nodes from the metadata table, adds them to the hash and * finally registers an invalidation callback. */ static void InitializeWorkerNodeCache(void) { HTAB *oldWorkerNodeHash = NULL; List *workerNodeList = NIL; ListCell *workerNodeCell = NULL; HASHCTL info; int hashFlags = 0; long maxTableSize = (long) MaxWorkerNodesTracked; bool includeNodesFromOtherClusters = false; InitializeCaches(); /* * Create the hash that holds the worker nodes. The key is the combination of * nodename and nodeport, instead of the unique nodeid because worker nodes are * searched by the nodename and nodeport in every physical plan creation. */ memset(&info, 0, sizeof(info)); info.keysize = +sizeof(uint32) + WORKER_LENGTH + sizeof(uint32); info.entrysize = sizeof(WorkerNode); info.hcxt = CacheMemoryContext; info.hash = WorkerNodeHashCode; info.match = WorkerNodeCompare; hashFlags = HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT | HASH_COMPARE; oldWorkerNodeHash = WorkerNodeHash; WorkerNodeHash = hash_create("Worker Node Hash", maxTableSize, &info, hashFlags); /* read the list from pg_dist_node */ workerNodeList = ReadWorkerNodes(includeNodesFromOtherClusters); /* iterate over the worker node list */ foreach(workerNodeCell, workerNodeList) { WorkerNode *workerNode = NULL; WorkerNode *currentNode = lfirst(workerNodeCell); void *hashKey = NULL; bool handleFound = false; /* search for the worker node in the hash, and then insert the values */ hashKey = (void *) currentNode; workerNode = (WorkerNode *) hash_search(WorkerNodeHash, hashKey, HASH_ENTER, &handleFound); /* fill the newly allocated workerNode in the cache */ strlcpy(workerNode->workerName, currentNode->workerName, WORKER_LENGTH); workerNode->workerPort = currentNode->workerPort; workerNode->groupId = currentNode->groupId; workerNode->nodeId = currentNode->nodeId; strlcpy(workerNode->workerRack, currentNode->workerRack, WORKER_LENGTH); workerNode->hasMetadata = currentNode->hasMetadata; workerNode->isActive = currentNode->isActive; workerNode->nodeRole = currentNode->nodeRole; strlcpy(workerNode->nodeCluster, currentNode->nodeCluster, NAMEDATALEN); if (handleFound) { ereport(WARNING, (errmsg("multiple lines for worker node: \"%s:%u\"", workerNode->workerName, workerNode->workerPort))); } /* we do not need the currentNode anymore */ pfree(currentNode); } /* now, safe to destroy the old hash */ hash_destroy(oldWorkerNodeHash); } /* * RegisterWorkerNodeCacheCallbacks registers the callbacks required for the * worker node cache. It's separate from InitializeWorkerNodeCache so the * callback can be registered early, before the metadata tables exist. */ static void RegisterWorkerNodeCacheCallbacks(void) { /* Watch for invalidation events. */ CacheRegisterRelcacheCallback(InvalidateNodeRelationCacheCallback, (Datum) 0); } /* * GetLocalGroupId returns the group identifier of the local node. The function assumes * that pg_dist_local_node_group has exactly one row and has at least one column. * Otherwise, the function errors out. */ int GetLocalGroupId(void) { SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 0; HeapTuple heapTuple = NULL; TupleDesc tupleDescriptor = NULL; Oid groupId = InvalidOid; Relation pgDistLocalGroupId = NULL; Oid localGroupTableOid = InvalidOid; InitializeCaches(); /* * Already set the group id, no need to read the heap again. */ if (LocalGroupId != -1) { return LocalGroupId; } localGroupTableOid = get_relname_relid("pg_dist_local_group", PG_CATALOG_NAMESPACE); if (localGroupTableOid == InvalidOid) { return 0; } pgDistLocalGroupId = heap_open(localGroupTableOid, AccessShareLock); scanDescriptor = systable_beginscan(pgDistLocalGroupId, InvalidOid, false, NULL, scanKeyCount, scanKey); tupleDescriptor = RelationGetDescr(pgDistLocalGroupId); heapTuple = systable_getnext(scanDescriptor); if (HeapTupleIsValid(heapTuple)) { bool isNull = false; Datum groupIdDatum = heap_getattr(heapTuple, Anum_pg_dist_local_groupid, tupleDescriptor, &isNull); groupId = DatumGetUInt32(groupIdDatum); } else { elog(ERROR, "could not find any entries in pg_dist_local_group"); } systable_endscan(scanDescriptor); heap_close(pgDistLocalGroupId, AccessShareLock); /* set the local cache variable */ LocalGroupId = groupId; return groupId; } /* * RegisterLocalGroupIdCacheCallbacks registers the callbacks required to * maintain LocalGroupId at a consistent value. It's separate from * GetLocalGroupId so the callback can be registered early, before metadata * tables exist. */ static void RegisterLocalGroupIdCacheCallbacks(void) { /* Watch for invalidation events. */ CacheRegisterRelcacheCallback(InvalidateLocalGroupIdRelationCacheCallback, (Datum) 0); } /* * WorkerNodeHashCode computes the hash code for a worker node from the node's * host name and port number. Nodes that only differ by their rack locations * hash to the same value. */ static uint32 WorkerNodeHashCode(const void *key, Size keySize) { const WorkerNode *worker = (const WorkerNode *) key; const char *workerName = worker->workerName; const uint32 *workerPort = &(worker->workerPort); /* standard hash function outlined in Effective Java, Item 8 */ uint32 result = 17; result = 37 * result + string_hash(workerName, WORKER_LENGTH); result = 37 * result + tag_hash(workerPort, sizeof(uint32)); return result; } /* * ResetDistTableCacheEntry frees any out-of-band memory used by a cache entry, * but does not free the entry itself. */ static void ResetDistTableCacheEntry(DistTableCacheEntry *cacheEntry) { int shardIndex = 0; if (cacheEntry->partitionKeyString != NULL) { pfree(cacheEntry->partitionKeyString); cacheEntry->partitionKeyString = NULL; } if (cacheEntry->shardIntervalCompareFunction != NULL) { pfree(cacheEntry->shardIntervalCompareFunction); cacheEntry->shardIntervalCompareFunction = NULL; } if (cacheEntry->hashFunction) { pfree(cacheEntry->hashFunction); cacheEntry->hashFunction = NULL; } if (cacheEntry->shardIntervalArrayLength == 0) { return; } for (shardIndex = 0; shardIndex < cacheEntry->shardIntervalArrayLength; shardIndex++) { ShardInterval *shardInterval = cacheEntry->sortedShardIntervalArray[shardIndex]; GroupShardPlacement *placementArray = cacheEntry->arrayOfPlacementArrays[shardIndex]; bool valueByVal = shardInterval->valueByVal; bool foundInCache = false; /* delete the shard's placements */ pfree(placementArray); /* delete per-shard cache-entry */ hash_search(DistShardCacheHash, &shardInterval->shardId, HASH_REMOVE, &foundInCache); Assert(foundInCache); /* delete data pointed to by ShardInterval */ if (!valueByVal) { if (shardInterval->minValueExists) { pfree(DatumGetPointer(shardInterval->minValue)); } if (shardInterval->maxValueExists) { pfree(DatumGetPointer(shardInterval->maxValue)); } } /* and finally the ShardInterval itself */ pfree(shardInterval); } if (cacheEntry->sortedShardIntervalArray) { pfree(cacheEntry->sortedShardIntervalArray); cacheEntry->sortedShardIntervalArray = NULL; } if (cacheEntry->arrayOfPlacementArrayLengths) { pfree(cacheEntry->arrayOfPlacementArrayLengths); cacheEntry->arrayOfPlacementArrayLengths = NULL; } if (cacheEntry->arrayOfPlacementArrays) { pfree(cacheEntry->arrayOfPlacementArrays); cacheEntry->arrayOfPlacementArrays = NULL; } cacheEntry->shardIntervalArrayLength = 0; cacheEntry->hasUninitializedShardInterval = false; cacheEntry->hasUniformHashDistribution = false; cacheEntry->hasOverlappingShardInterval = false; } /* * InvalidateDistRelationCacheCallback flushes cache entries when a relation * is updated (or flushes the entire cache). */ static void InvalidateDistRelationCacheCallback(Datum argument, Oid relationId) { /* invalidate either entire cache or a specific entry */ if (relationId == InvalidOid) { DistTableCacheEntry *cacheEntry = NULL; HASH_SEQ_STATUS status; hash_seq_init(&status, DistTableCacheHash); while ((cacheEntry = (DistTableCacheEntry *) hash_seq_search(&status)) != NULL) { cacheEntry->isValid = false; } } else { void *hashKey = (void *) &relationId; bool foundInCache = false; DistTableCacheEntry *cacheEntry = hash_search(DistTableCacheHash, hashKey, HASH_FIND, &foundInCache); if (foundInCache) { cacheEntry->isValid = false; } } /* * If pg_dist_partition is being invalidated drop all state * This happens pretty rarely, but most importantly happens during * DROP EXTENSION citus; */ if (relationId != InvalidOid && relationId == MetadataCache.distPartitionRelationId) { InvalidateMetadataSystemCache(); } } /* * InvalidateMetadataSystemCache resets all the cached OIDs and the extensionLoaded flag, * and invalidates the worker node and local group ID caches. */ void InvalidateMetadataSystemCache(void) { memset(&MetadataCache, 0, sizeof(MetadataCache)); workerNodeHashValid = false; LocalGroupId = -1; } /* * DistTableOidList iterates over the pg_dist_partition table and returns * a list that consists of the logicalrelids. */ List * DistTableOidList(void) { SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 0; HeapTuple heapTuple = NULL; List *distTableOidList = NIL; TupleDesc tupleDescriptor = NULL; Relation pgDistPartition = heap_open(DistPartitionRelationId(), AccessShareLock); scanDescriptor = systable_beginscan(pgDistPartition, InvalidOid, false, NULL, scanKeyCount, scanKey); tupleDescriptor = RelationGetDescr(pgDistPartition); heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { bool isNull = false; Oid relationId = InvalidOid; Datum relationIdDatum = heap_getattr(heapTuple, Anum_pg_dist_partition_logicalrelid, tupleDescriptor, &isNull); relationId = DatumGetObjectId(relationIdDatum); distTableOidList = lappend_oid(distTableOidList, relationId); heapTuple = systable_getnext(scanDescriptor); } systable_endscan(scanDescriptor); heap_close(pgDistPartition, AccessShareLock); return distTableOidList; } /* * InvalidateNodeRelationCacheCallback destroys the WorkerNodeHash when * any change happens on pg_dist_node table. It also set WorkerNodeHash to * NULL, which allows consequent accesses to the hash read from the * pg_dist_node from scratch. */ static void InvalidateNodeRelationCacheCallback(Datum argument, Oid relationId) { if (relationId == InvalidOid || relationId == MetadataCache.distNodeRelationId) { workerNodeHashValid = false; } } /* * InvalidateLocalGroupIdRelationCacheCallback sets the LocalGroupId to * the default value. */ static void InvalidateLocalGroupIdRelationCacheCallback(Datum argument, Oid relationId) { /* when invalidation happens simply set the LocalGroupId to the default value */ if (relationId == InvalidOid || relationId == MetadataCache.distLocalGroupRelationId) { LocalGroupId = -1; } } /* * LookupDistPartitionTuple searches pg_dist_partition for relationId's entry * and returns that or, if no matching entry was found, NULL. */ static HeapTuple LookupDistPartitionTuple(Relation pgDistPartition, Oid relationId) { HeapTuple distPartitionTuple = NULL; HeapTuple currentPartitionTuple = NULL; SysScanDesc scanDescriptor; ScanKeyData scanKey[1]; /* copy scankey to local copy, it will be modified during the scan */ memcpy(scanKey, DistPartitionScanKey, sizeof(DistPartitionScanKey)); /* set scan arguments */ scanKey[0].sk_argument = ObjectIdGetDatum(relationId); scanDescriptor = systable_beginscan(pgDistPartition, DistPartitionLogicalRelidIndexId(), true, NULL, 1, scanKey); currentPartitionTuple = systable_getnext(scanDescriptor); if (HeapTupleIsValid(currentPartitionTuple)) { distPartitionTuple = heap_copytuple(currentPartitionTuple); } systable_endscan(scanDescriptor); return distPartitionTuple; } /* * LookupDistShardTuples returns a list of all dist_shard tuples for the * specified relation. */ static List * LookupDistShardTuples(Oid relationId) { Relation pgDistShard = NULL; List *distShardTupleList = NIL; HeapTuple currentShardTuple = NULL; SysScanDesc scanDescriptor; ScanKeyData scanKey[1]; pgDistShard = heap_open(DistShardRelationId(), AccessShareLock); /* copy scankey to local copy, it will be modified during the scan */ memcpy(scanKey, DistShardScanKey, sizeof(DistShardScanKey)); /* set scan arguments */ scanKey[0].sk_argument = ObjectIdGetDatum(relationId); scanDescriptor = systable_beginscan(pgDistShard, DistShardLogicalRelidIndexId(), true, NULL, 1, scanKey); currentShardTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(currentShardTuple)) { HeapTuple shardTupleCopy = heap_copytuple(currentShardTuple); distShardTupleList = lappend(distShardTupleList, shardTupleCopy); currentShardTuple = systable_getnext(scanDescriptor); } systable_endscan(scanDescriptor); heap_close(pgDistShard, AccessShareLock); return distShardTupleList; } /* * LookupShardRelation returns the logical relation oid a shard belongs to. * * Errors out if the shardId does not exist. */ static Oid LookupShardRelation(int64 shardId) { SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; HeapTuple heapTuple = NULL; Form_pg_dist_shard shardForm = NULL; Relation pgDistShard = heap_open(DistShardRelationId(), AccessShareLock); Oid relationId = InvalidOid; ScanKeyInit(&scanKey[0], Anum_pg_dist_shard_shardid, BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(shardId)); scanDescriptor = systable_beginscan(pgDistShard, DistShardShardidIndexId(), true, NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(scanDescriptor); if (!HeapTupleIsValid(heapTuple)) { ereport(ERROR, (errmsg("could not find valid entry for shard " UINT64_FORMAT, shardId))); } shardForm = (Form_pg_dist_shard) GETSTRUCT(heapTuple); relationId = shardForm->logicalrelid; systable_endscan(scanDescriptor); heap_close(pgDistShard, NoLock); return relationId; } /* * GetPartitionTypeInputInfo populates output parameters with the interval type * identifier and modifier for the specified partition key/method combination. */ static void GetPartitionTypeInputInfo(char *partitionKeyString, char partitionMethod, Oid *columnTypeId, int32 *columnTypeMod, Oid *intervalTypeId, int32 *intervalTypeMod) { *columnTypeId = InvalidOid; *columnTypeMod = -1; *intervalTypeId = InvalidOid; *intervalTypeMod = -1; switch (partitionMethod) { case DISTRIBUTE_BY_APPEND: case DISTRIBUTE_BY_RANGE: { Node *partitionNode = stringToNode(partitionKeyString); Var *partitionColumn = (Var *) partitionNode; Assert(IsA(partitionNode, Var)); *intervalTypeId = partitionColumn->vartype; *intervalTypeMod = partitionColumn->vartypmod; *columnTypeId = partitionColumn->vartype; *columnTypeMod = partitionColumn->vartypmod; break; } case DISTRIBUTE_BY_HASH: { Node *partitionNode = stringToNode(partitionKeyString); Var *partitionColumn = (Var *) partitionNode; Assert(IsA(partitionNode, Var)); *intervalTypeId = INT4OID; *columnTypeId = partitionColumn->vartype; *columnTypeMod = partitionColumn->vartypmod; break; } case DISTRIBUTE_BY_NONE: { break; } default: { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("unsupported table partition type: %c", partitionMethod))); } } } /* * TupleToShardInterval transforms the specified dist_shard tuple into a new * ShardInterval using the provided descriptor and partition type information. */ static ShardInterval * TupleToShardInterval(HeapTuple heapTuple, TupleDesc tupleDescriptor, Oid intervalTypeId, int32 intervalTypeMod) { ShardInterval *shardInterval = NULL; bool isNull = false; bool minValueNull = false; bool maxValueNull = false; Oid inputFunctionId = InvalidOid; Oid typeIoParam = InvalidOid; Datum relationIdDatum = heap_getattr(heapTuple, Anum_pg_dist_shard_logicalrelid, tupleDescriptor, &isNull); Datum shardIdDatum = heap_getattr(heapTuple, Anum_pg_dist_shard_shardid, tupleDescriptor, &isNull); Datum storageTypeDatum = heap_getattr(heapTuple, Anum_pg_dist_shard_shardstorage, tupleDescriptor, &isNull); Datum minValueTextDatum = heap_getattr(heapTuple, Anum_pg_dist_shard_shardminvalue, tupleDescriptor, &minValueNull); Datum maxValueTextDatum = heap_getattr(heapTuple, Anum_pg_dist_shard_shardmaxvalue, tupleDescriptor, &maxValueNull); Oid relationId = DatumGetObjectId(relationIdDatum); int64 shardId = DatumGetInt64(shardIdDatum); char storageType = DatumGetChar(storageTypeDatum); Datum minValue = 0; Datum maxValue = 0; bool minValueExists = false; bool maxValueExists = false; int16 intervalTypeLen = 0; bool intervalByVal = false; char intervalAlign = '0'; char intervalDelim = '0'; if (!minValueNull && !maxValueNull) { char *minValueString = TextDatumGetCString(minValueTextDatum); char *maxValueString = TextDatumGetCString(maxValueTextDatum); /* TODO: move this up the call stack to avoid per-tuple invocation? */ get_type_io_data(intervalTypeId, IOFunc_input, &intervalTypeLen, &intervalByVal, &intervalAlign, &intervalDelim, &typeIoParam, &inputFunctionId); /* finally convert min/max values to their actual types */ minValue = OidInputFunctionCall(inputFunctionId, minValueString, typeIoParam, intervalTypeMod); maxValue = OidInputFunctionCall(inputFunctionId, maxValueString, typeIoParam, intervalTypeMod); minValueExists = true; maxValueExists = true; } shardInterval = CitusMakeNode(ShardInterval); shardInterval->relationId = relationId; shardInterval->storageType = storageType; shardInterval->valueTypeId = intervalTypeId; shardInterval->valueTypeLen = intervalTypeLen; shardInterval->valueByVal = intervalByVal; shardInterval->minValueExists = minValueExists; shardInterval->maxValueExists = maxValueExists; shardInterval->minValue = minValue; shardInterval->maxValue = maxValue; shardInterval->shardId = shardId; return shardInterval; } /* * CachedRelationLookup performs a cached lookup for the relation * relationName, with the result cached in *cachedOid. */ static void CachedRelationLookup(const char *relationName, Oid *cachedOid) { /* force callbacks to be registered, so we always get notified upon changes */ InitializeCaches(); if (*cachedOid == InvalidOid) { *cachedOid = get_relname_relid(relationName, PG_CATALOG_NAMESPACE); if (*cachedOid == InvalidOid) { ereport(ERROR, (errmsg("cache lookup failed for %s, called too early?", relationName))); } } } /* * Register a relcache invalidation for a non-shared relation. * * We ignore the case that there's no corresponding pg_class entry - that * happens if we register a relcache invalidation (e.g. for a * pg_dist_partition deletion) after the relation has been dropped. That's ok, * because in those cases we're guaranteed to already have registered an * invalidation for the target relation. */ void CitusInvalidateRelcacheByRelid(Oid relationId) { HeapTuple classTuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relationId)); if (HeapTupleIsValid(classTuple)) { CacheInvalidateRelcacheByTuple(classTuple); ReleaseSysCache(classTuple); } } /* * Register a relcache invalidation for the distributed relation associated * with the shard. */ void CitusInvalidateRelcacheByShardId(int64 shardId) { SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 1; HeapTuple heapTuple = NULL; Form_pg_dist_shard shardForm = NULL; Relation pgDistShard = heap_open(DistShardRelationId(), AccessShareLock); /* * Load shard, to find the associated relation id. Can't use * LoadShardInterval directly because that'd fail if the shard doesn't * exist anymore, which we can't have. Also lower overhead is desirable * here. */ ScanKeyInit(&scanKey[0], Anum_pg_dist_shard_shardid, BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(shardId)); scanDescriptor = systable_beginscan(pgDistShard, DistShardShardidIndexId(), true, NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(scanDescriptor); if (HeapTupleIsValid(heapTuple)) { shardForm = (Form_pg_dist_shard) GETSTRUCT(heapTuple); CitusInvalidateRelcacheByRelid(shardForm->logicalrelid); } else { /* * Couldn't find associated relation. That can primarily happen in two cases: * * 1) A placement row is inserted before the shard row. That's fine, * since we don't need invalidations via placements in that case. * * 2) The shard has been deleted, but some placements were * unreachable, and the user is manually deleting the rows. Not * much point in WARNING or ERRORing in that case either, there's * nothing to invalidate. * * Hence we just emit a DEBUG5 message. */ ereport(DEBUG5, (errmsg("could not find distributed relation to invalidate for " "shard "INT64_FORMAT, shardId))); } systable_endscan(scanDescriptor); heap_close(pgDistShard, NoLock); /* bump command counter, to force invalidation to take effect */ CommandCounterIncrement(); } citus-7.0.3/src/backend/distributed/utils/multi_partitioning_utils.c000066400000000000000000000214131317107136600260230ustar00rootroot00000000000000/* * multi_partitioning_utils.c * Utility functions for declarative partitioning * * Copyright (c) 2017, Citus Data, Inc. */ #include "postgres.h" #include "access/genam.h" #include "access/heapam.h" #include "access/htup_details.h" #include "catalog/indexing.h" #if (PG_VERSION_NUM >= 100000) #include "catalog/partition.h" #endif #include "catalog/pg_class.h" #include "catalog/pg_inherits.h" #include "catalog/pg_inherits_fn.h" #include "distributed/citus_ruleutils.h" #include "distributed/multi_partitioning_utils.h" #include "lib/stringinfo.h" #include "nodes/pg_list.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/syscache.h" #if (PG_VERSION_NUM >= 100000) static char * PartitionBound(Oid partitionId); #endif /* * Returns true if the given relation is a partitioned table. */ bool PartitionedTable(Oid relationId) { Relation rel = heap_open(relationId, AccessShareLock); bool partitionedTable = false; #if (PG_VERSION_NUM >= 100000) if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) { partitionedTable = true; } #endif /* keep the lock */ heap_close(rel, NoLock); return partitionedTable; } /* * Returns true if the given relation is a partition. */ bool PartitionTable(Oid relationId) { Relation rel = heap_open(relationId, AccessShareLock); bool partitionTable = false; #if (PG_VERSION_NUM >= 100000) partitionTable = rel->rd_rel->relispartition; #endif /* keep the lock */ heap_close(rel, NoLock); return partitionTable; } /* * IsChildTable returns true if the table is inherited. Note that * partition tables inherites by default. However, this function * returns false if the given table is a partition. */ bool IsChildTable(Oid relationId) { Relation pgInherits = NULL; SysScanDesc scan = NULL; ScanKeyData key[1]; HeapTuple inheritsTuple = NULL; bool tableInherits = false; pgInherits = heap_open(InheritsRelationId, AccessShareLock); ScanKeyInit(&key[0], Anum_pg_inherits_inhrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relationId)); scan = systable_beginscan(pgInherits, InvalidOid, false, NULL, 1, key); while ((inheritsTuple = systable_getnext(scan)) != NULL) { Oid inheritedRelationId = ((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhrelid; if (relationId == inheritedRelationId) { tableInherits = true; break; } } systable_endscan(scan); heap_close(pgInherits, AccessShareLock); if (tableInherits && PartitionTable(relationId)) { tableInherits = false; } return tableInherits; } /* * IsParentTable returns true if the table is inherited. Note that * partitioned tables inherited by default. However, this function * returns false if the given table is a partitioned table. */ bool IsParentTable(Oid relationId) { Relation pgInherits = NULL; SysScanDesc scan = NULL; ScanKeyData key[1]; bool tableInherited = false; pgInherits = heap_open(InheritsRelationId, AccessShareLock); ScanKeyInit(&key[0], Anum_pg_inherits_inhparent, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relationId)); scan = systable_beginscan(pgInherits, InheritsParentIndexId, true, NULL, 1, key); if (systable_getnext(scan) != NULL) { tableInherited = true; } systable_endscan(scan); heap_close(pgInherits, AccessShareLock); if (tableInherited && PartitionedTable(relationId)) { tableInherited = false; } return tableInherited; } /* * Wrapper around get_partition_parent * * Note: Because this function assumes that the relation whose OID is passed * as an argument will have precisely one parent, it should only be called * when it is known that the relation is a partition. */ Oid PartitionParentOid(Oid partitionOid) { Oid partitionParentOid = InvalidOid; #if (PG_VERSION_NUM >= 100000) partitionParentOid = get_partition_parent(partitionOid); #endif return partitionParentOid; } /* * Takes a parent relation and returns Oid list of its partitions. The * function errors out if the given relation is not a parent. */ List * PartitionList(Oid parentRelationId) { Relation rel = heap_open(parentRelationId, AccessShareLock); List *partitionList = NIL; #if (PG_VERSION_NUM >= 100000) int partitionIndex = 0; int partitionCount = 0; if (!PartitionedTable(parentRelationId)) { char *relationName = get_rel_name(parentRelationId); ereport(ERROR, (errmsg("\"%s\" is not a parent table", relationName))); } Assert(rel->rd_partdesc != NULL); partitionCount = rel->rd_partdesc->nparts; for (partitionIndex = 0; partitionIndex < partitionCount; ++partitionIndex) { partitionList = lappend_oid(partitionList, rel->rd_partdesc->oids[partitionIndex]); } #endif /* keep the lock */ heap_close(rel, NoLock); return partitionList; } /* * GenerateDetachPartitionCommand gets a partition table and returns * "ALTER TABLE parent_table DETACH PARTITION partitionName" command. */ char * GenerateDetachPartitionCommand(Oid partitionTableId) { StringInfo detachPartitionCommand = makeStringInfo(); #if (PG_VERSION_NUM >= 100000) Oid parentId = InvalidOid; char *tableQualifiedName = NULL; char *parentTableQualifiedName = NULL; if (!PartitionTable(partitionTableId)) { char *relationName = get_rel_name(partitionTableId); ereport(ERROR, (errmsg("\"%s\" is not a partition", relationName))); } parentId = get_partition_parent(partitionTableId); tableQualifiedName = generate_qualified_relation_name(partitionTableId); parentTableQualifiedName = generate_qualified_relation_name(parentId); appendStringInfo(detachPartitionCommand, "ALTER TABLE %s DETACH PARTITION %s;", parentTableQualifiedName, tableQualifiedName); #endif return detachPartitionCommand->data; } /* * GenereatePartitioningInformation returns the partitioning type and partition column * for the given parent table in the form of "PARTITION TYPE (partitioning column(s)/expression(s))". */ char * GeneratePartitioningInformation(Oid parentTableId) { char *partitionBoundCString = ""; #if (PG_VERSION_NUM >= 100000) Datum partitionBoundDatum = 0; if (!PartitionedTable(parentTableId)) { char *relationName = get_rel_name(parentTableId); ereport(ERROR, (errmsg("\"%s\" is not a parent table", relationName))); } partitionBoundDatum = DirectFunctionCall1(pg_get_partkeydef, ObjectIdGetDatum(parentTableId)); partitionBoundCString = TextDatumGetCString(partitionBoundDatum); #endif return partitionBoundCString; } /* * GenerateAlterTableAttachPartitionCommand returns the necessary command to * attach the given partition to its parent. */ char * GenerateAlterTableAttachPartitionCommand(Oid partitionTableId) { StringInfo createPartitionCommand = makeStringInfo(); #if (PG_VERSION_NUM >= 100000) char *partitionBoundCString = NULL; Oid parentId = InvalidOid; char *tableQualifiedName = NULL; char *parentTableQualifiedName = NULL; if (!PartitionTable(partitionTableId)) { char *relationName = get_rel_name(partitionTableId); ereport(ERROR, (errmsg("\"%s\" is not a partition", relationName))); } parentId = get_partition_parent(partitionTableId); tableQualifiedName = generate_qualified_relation_name(partitionTableId); parentTableQualifiedName = generate_qualified_relation_name(parentId); partitionBoundCString = PartitionBound(partitionTableId); appendStringInfo(createPartitionCommand, "ALTER TABLE %s ATTACH PARTITION %s %s;", parentTableQualifiedName, tableQualifiedName, partitionBoundCString); #endif return createPartitionCommand->data; } #if (PG_VERSION_NUM >= 100000) /* * This function heaviliy inspired from RelationBuildPartitionDesc() * which is avaliable in src/backend/catalog/partition.c. * * The function simply reads the pg_class and gets the partition bound. * Later, converts it to text format and returns. */ static char * PartitionBound(Oid partitionId) { char *partitionBoundString = NULL; HeapTuple tuple = NULL; Datum datum = 0; bool isnull = false; Datum partitionBoundDatum = 0; tuple = SearchSysCache1(RELOID, partitionId); if (!HeapTupleIsValid(tuple)) { elog(ERROR, "cache lookup failed for relation %u", partitionId); } /* * It is possible that the pg_class tuple of a partition has not been * updated yet to set its relpartbound field. The only case where * this happens is when we open the parent relation to check using its * partition descriptor that a new partition's bound does not overlap * some existing partition. */ if (!((Form_pg_class) GETSTRUCT(tuple))->relispartition) { ReleaseSysCache(tuple); return ""; } datum = SysCacheGetAttr(RELOID, tuple, Anum_pg_class_relpartbound, &isnull); Assert(!isnull); partitionBoundDatum = DirectFunctionCall2(pg_get_expr, datum, ObjectIdGetDatum(partitionId)); partitionBoundString = TextDatumGetCString(partitionBoundDatum); ReleaseSysCache(tuple); return partitionBoundString; } #endif citus-7.0.3/src/backend/distributed/utils/multi_resowner.c000066400000000000000000000101301317107136600237320ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_resowner.c * Citus resource owner integration * * An extension can't directly add members to ResourceOwnerData. Instead we * have to use the resource owner callback mechanism. Right now it's * sufficient to have an array of referenced resources - there bascially are * never more than a handful of entries, if that. If that changes we should * probably rather use a hash table using the pointer value of the resource * owner as key. * * Copyright (c) 2012-2016, Citus Data, Inc. *------------------------------------------------------------------------- */ #include "postgres.h" #include "distributed/multi_server_executor.h" #include "utils/memutils.h" #include "utils/resowner_private.h" #include "distributed/multi_resowner.h" typedef struct JobDirectoryEntry { ResourceOwner owner; uint64 jobId; } JobDirectoryEntry; static bool RegisteredResownerCallback = false; JobDirectoryEntry *RegisteredJobDirectories = NULL; size_t NumRegisteredJobDirectories = 0; size_t NumAllocatedJobDirectories = 0; /* * Resource owner callback - release resources still held by the resource * owner. */ static void MultiResourceOwnerReleaseCallback(ResourceReleasePhase phase, bool isCommit, bool isTopLevel, void *arg) { int lastJobIndex = NumRegisteredJobDirectories - 1; int jobIndex = 0; if (phase == RESOURCE_RELEASE_AFTER_LOCKS) { /* * Remove all remaining job directories, after locks have been * released. */ for (jobIndex = lastJobIndex; jobIndex >= 0; jobIndex--) { JobDirectoryEntry *entry = &RegisteredJobDirectories[jobIndex]; if (entry->owner == CurrentResourceOwner) { RemoveJobDirectory(entry->jobId); } } } } /* * ResourceOwnerEnlargeJobDirectories makes sure that there is space to * reference at least one more job directory for the resource owner. Note that * we only expect one job directory per portal, but we still use an array * here. * * This function is separate from the one actually inserting an entry because * if we run out of memory, it's critical to do so *before* acquiring the * resource. */ void ResourceOwnerEnlargeJobDirectories(ResourceOwner owner) { int newMax = 0; /* ensure callback is registered */ if (!RegisteredResownerCallback) { RegisterResourceReleaseCallback(MultiResourceOwnerReleaseCallback, NULL); RegisteredResownerCallback = true; } if (RegisteredJobDirectories == NULL) { newMax = 16; RegisteredJobDirectories = (JobDirectoryEntry *) MemoryContextAlloc(TopMemoryContext, newMax * sizeof(JobDirectoryEntry)); NumAllocatedJobDirectories = newMax; } else if (NumRegisteredJobDirectories + 1 > NumAllocatedJobDirectories) { newMax = NumAllocatedJobDirectories * 2; RegisteredJobDirectories = (JobDirectoryEntry *) repalloc(RegisteredJobDirectories, newMax * sizeof(JobDirectoryEntry)); NumAllocatedJobDirectories = newMax; } } /* Remembers that a temporary job directory is owned by a resource owner. */ void ResourceOwnerRememberJobDirectory(ResourceOwner owner, uint64 jobId) { JobDirectoryEntry *entry = NULL; Assert(NumRegisteredJobDirectories + 1 <= NumAllocatedJobDirectories); entry = &RegisteredJobDirectories[NumRegisteredJobDirectories]; entry->owner = owner; entry->jobId = jobId; NumRegisteredJobDirectories++; } /* Forgets that a temporary job directory is owned by a resource owner. */ void ResourceOwnerForgetJobDirectory(ResourceOwner owner, uint64 jobId) { int lastJobIndex = NumRegisteredJobDirectories - 1; int jobIndex = 0; for (jobIndex = lastJobIndex; jobIndex >= 0; jobIndex--) { JobDirectoryEntry *entry = &RegisteredJobDirectories[jobIndex]; if (entry->owner == owner && entry->jobId == jobId) { /* move all later entries one up */ while (jobIndex < lastJobIndex) { RegisteredJobDirectories[jobIndex] = RegisteredJobDirectories[jobIndex + 1]; jobIndex++; } NumRegisteredJobDirectories = lastJobIndex; return; } } elog(ERROR, "jobId " UINT64_FORMAT " is not owned by resource owner %p", jobId, owner); } citus-7.0.3/src/backend/distributed/utils/node_metadata.c000066400000000000000000001255551317107136600234630ustar00rootroot00000000000000/* * node_metadata.c * Functions that operate on pg_dist_node * * Copyright (c) 2012-2016, Citus Data, Inc. */ #include "postgres.h" #include "miscadmin.h" #include "funcapi.h" #include "access/genam.h" #include "access/heapam.h" #include "access/htup.h" #include "access/htup_details.h" #include "access/skey.h" #include "access/skey.h" #include "access/tupmacs.h" #include "access/xact.h" #include "catalog/indexing.h" #include "catalog/namespace.h" #include "commands/sequence.h" #include "distributed/colocation_utils.h" #include "distributed/connection_management.h" #include "distributed/master_protocol.h" #include "distributed/master_metadata_utility.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" #include "distributed/multi_join_order.h" #include "distributed/multi_router_planner.h" #include "distributed/pg_dist_node.h" #include "distributed/reference_table_utils.h" #include "distributed/resource_lock.h" #include "distributed/shardinterval_utils.h" #include "distributed/worker_manager.h" #include "distributed/worker_transaction.h" #include "lib/stringinfo.h" #include "storage/bufmgr.h" #include "storage/lmgr.h" #include "storage/lock.h" #include "storage/fd.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/relcache.h" /* default group size */ int GroupSize = 1; /* config variable managed via guc.c */ char *CurrentCluster = "default"; /* local function forward declarations */ static Datum ActivateNode(char *nodeName, int nodePort); static void RemoveNodeFromCluster(char *nodeName, int32 nodePort); static Datum AddNodeMetadata(char *nodeName, int32 nodePort, int32 groupId, char *nodeRack, bool hasMetadata, bool isActive, Oid nodeRole, char *nodeCluster, bool *nodeAlreadyExists); static uint32 CountPrimariesWithMetadata(); static void SetNodeState(char *nodeName, int32 nodePort, bool isActive); static HeapTuple GetNodeTuple(char *nodeName, int32 nodePort); static Datum GenerateNodeTuple(WorkerNode *workerNode); static int32 GetNextGroupId(void); static uint32 GetMaxGroupId(void); static int GetNextNodeId(void); static void InsertNodeRow(int nodeid, char *nodename, int32 nodeport, uint32 groupId, char *nodeRack, bool hasMetadata, bool isActive, Oid nodeRole, char *nodeCluster); static void DeleteNodeRow(char *nodename, int32 nodeport); static List * ParseWorkerNodeFileAndRename(void); static WorkerNode * TupleToWorkerNode(TupleDesc tupleDescriptor, HeapTuple heapTuple); /* declarations for dynamic loading */ PG_FUNCTION_INFO_V1(master_add_node); PG_FUNCTION_INFO_V1(master_add_inactive_node); PG_FUNCTION_INFO_V1(master_add_secondary_node); PG_FUNCTION_INFO_V1(master_remove_node); PG_FUNCTION_INFO_V1(master_disable_node); PG_FUNCTION_INFO_V1(master_activate_node); PG_FUNCTION_INFO_V1(master_initialize_node_metadata); PG_FUNCTION_INFO_V1(get_shard_id_for_distribution_column); /* * master_add_node function adds a new node to the cluster and returns its data. It also * replicates all reference tables to the new node. */ Datum master_add_node(PG_FUNCTION_ARGS) { text *nodeName = PG_GETARG_TEXT_P(0); int32 nodePort = PG_GETARG_INT32(1); char *nodeNameString = text_to_cstring(nodeName); int32 groupId = PG_GETARG_INT32(2); Oid nodeRole = InvalidOid; char *nodeClusterString = NULL; char *nodeRack = WORKER_DEFAULT_RACK; bool hasMetadata = false; bool isActive = false; bool nodeAlreadyExists = false; Datum nodeRecord; CheckCitusVersion(ERROR); /* * During tests this function is called before nodeRole and nodeCluster have been * created. */ if (PG_NARGS() == 3) { nodeRole = InvalidOid; nodeClusterString = "default"; } else { Name nodeClusterName = PG_GETARG_NAME(4); nodeClusterString = NameStr(*nodeClusterName); nodeRole = PG_GETARG_OID(3); } nodeRecord = AddNodeMetadata(nodeNameString, nodePort, groupId, nodeRack, hasMetadata, isActive, nodeRole, nodeClusterString, &nodeAlreadyExists); /* * After adding new node, if the node did not already exist, we will activate * the node. This means we will replicate all reference tables to the new * node. */ if (!nodeAlreadyExists) { nodeRecord = ActivateNode(nodeNameString, nodePort); } PG_RETURN_DATUM(nodeRecord); } /* * master_add_inactive_node function adds a new node to the cluster as inactive node * and returns information about newly added node. It does not replicate reference * tables to the new node, it only adds new node to the pg_dist_node table. */ Datum master_add_inactive_node(PG_FUNCTION_ARGS) { text *nodeName = PG_GETARG_TEXT_P(0); int32 nodePort = PG_GETARG_INT32(1); char *nodeNameString = text_to_cstring(nodeName); int32 groupId = PG_GETARG_INT32(2); Oid nodeRole = PG_GETARG_OID(3); Name nodeClusterName = PG_GETARG_NAME(4); char *nodeClusterString = NameStr(*nodeClusterName); char *nodeRack = WORKER_DEFAULT_RACK; bool hasMetadata = false; bool isActive = false; bool nodeAlreadyExists = false; Datum nodeRecord; CheckCitusVersion(ERROR); nodeRecord = AddNodeMetadata(nodeNameString, nodePort, groupId, nodeRack, hasMetadata, isActive, nodeRole, nodeClusterString, &nodeAlreadyExists); PG_RETURN_DATUM(nodeRecord); } /* * master_add_secondary_node adds a new secondary node to the cluster. It accepts as * arguments the primary node it should share a group with. */ Datum master_add_secondary_node(PG_FUNCTION_ARGS) { text *nodeName = PG_GETARG_TEXT_P(0); int32 nodePort = PG_GETARG_INT32(1); char *nodeNameString = text_to_cstring(nodeName); text *primaryName = PG_GETARG_TEXT_P(2); int32 primaryPort = PG_GETARG_INT32(3); char *primaryNameString = text_to_cstring(primaryName); int32 groupId = GroupForNode(primaryNameString, primaryPort); Oid nodeRole = SecondaryNodeRoleId(); Name nodeClusterName = PG_GETARG_NAME(4); char *nodeClusterString = NameStr(*nodeClusterName); char *nodeRack = WORKER_DEFAULT_RACK; bool hasMetadata = false; bool isActive = true; bool nodeAlreadyExists = false; Datum nodeRecord; CheckCitusVersion(ERROR); nodeRecord = AddNodeMetadata(nodeNameString, nodePort, groupId, nodeRack, hasMetadata, isActive, nodeRole, nodeClusterString, &nodeAlreadyExists); PG_RETURN_DATUM(nodeRecord); } /* * master_remove_node function removes the provided node from the pg_dist_node table of * the master node and all nodes with metadata. * The call to the master_remove_node should be done by the super user and the specified * node should not have any active placements. * This function also deletes all reference table placements belong to the given node from * pg_dist_placement, but it does not drop actual placement at the node. In the case of * re-adding the node, master_add_node first drops and re-creates the reference tables. */ Datum master_remove_node(PG_FUNCTION_ARGS) { text *nodeName = PG_GETARG_TEXT_P(0); int32 nodePort = PG_GETARG_INT32(1); char *nodeNameString = text_to_cstring(nodeName); CheckCitusVersion(ERROR); RemoveNodeFromCluster(nodeNameString, nodePort); PG_RETURN_VOID(); } /* * master_disable_node function sets isactive value of the provided node as inactive at * master node and all nodes with metadata regardless of the node having an active shard * placement. * * The call to the master_disable_node must be done by the super user. * * This function also deletes all reference table placements belong to the given node * from pg_dist_placement, but it does not drop actual placement at the node. In the case * of re-activating the node, master_add_node first drops and re-creates the reference * tables. */ Datum master_disable_node(PG_FUNCTION_ARGS) { const bool onlyConsiderActivePlacements = true; text *nodeNameText = PG_GETARG_TEXT_P(0); int32 nodePort = PG_GETARG_INT32(1); char *nodeName = text_to_cstring(nodeNameText); bool isActive = false; WorkerNode *workerNode = NULL; CheckCitusVersion(ERROR); /* take an exclusive lock on pg_dist_node to serialize pg_dist_node changes */ LockRelationOid(DistNodeRelationId(), ExclusiveLock); workerNode = FindWorkerNodeAnyCluster(nodeName, nodePort); if (workerNode == NULL) { ereport(ERROR, (errmsg("node at \"%s:%u\" does not exist", nodeName, nodePort))); } if (WorkerNodeIsPrimary(workerNode)) { DeleteAllReferenceTablePlacementsFromNodeGroup(workerNode->groupId); } if (WorkerNodeIsPrimary(workerNode) && NodeGroupHasShardPlacements(workerNode->groupId, onlyConsiderActivePlacements)) { ereport(NOTICE, (errmsg("Node %s:%d has active shard placements. Some queries " "may fail after this operation. Use " "SELECT master_activate_node('%s', %d) to activate this " "node back.", nodeName, nodePort, nodeName, nodePort))); } SetNodeState(nodeName, nodePort, isActive); PG_RETURN_VOID(); } /* * master_activate_node UDF activates the given node. It sets the node's isactive * value to active and replicates all reference tables to that node. */ Datum master_activate_node(PG_FUNCTION_ARGS) { text *nodeName = PG_GETARG_TEXT_P(0); int32 nodePort = PG_GETARG_INT32(1); char *nodeNameString = text_to_cstring(nodeName); Datum nodeRecord = 0; CheckCitusVersion(ERROR); nodeRecord = ActivateNode(nodeNameString, nodePort); PG_RETURN_DATUM(nodeRecord); } /* * GroupForNode returns the group which a given node belongs to. * * It only works if the requested node is a part of CurrentCluster. */ uint32 GroupForNode(char *nodeName, int nodePort) { WorkerNode *workerNode = FindWorkerNode(nodeName, nodePort); if (workerNode == NULL) { ereport(ERROR, (errmsg("node at \"%s:%u\" does not exist", nodeName, nodePort))); } return workerNode->groupId; } /* * WorkerNodeIsPrimary returns whether the argument represents a primary node. */ bool WorkerNodeIsPrimary(WorkerNode *worker) { Oid primaryRole = PrimaryNodeRoleId(); /* if nodeRole does not yet exist, all nodes are primary nodes */ if (primaryRole == InvalidOid) { return true; } return worker->nodeRole == primaryRole; } /* * WorkerNodeIsSecondary returns whether the argument represents a secondary node. */ bool WorkerNodeIsSecondary(WorkerNode *worker) { Oid secondaryRole = SecondaryNodeRoleId(); /* if nodeRole does not yet exist, all nodes are primary nodes */ if (secondaryRole == InvalidOid) { return false; } return worker->nodeRole == secondaryRole; } /* * WorkerNodeIsReadable returns whether we're allowed to send SELECT queries to this * node. */ bool WorkerNodeIsReadable(WorkerNode *workerNode) { if (ReadFromSecondaries == USE_SECONDARY_NODES_NEVER && WorkerNodeIsPrimary(workerNode)) { return true; } if (ReadFromSecondaries == USE_SECONDARY_NODES_ALWAYS && WorkerNodeIsSecondary(workerNode)) { return true; } return false; } /* * PrimaryNodeForGroup returns the (unique) primary in the specified group. * * If there are any nodes in the requested group and groupContainsNodes is not NULL * it will set the bool groupContainsNodes references to true. */ WorkerNode * PrimaryNodeForGroup(uint32 groupId, bool *groupContainsNodes) { WorkerNode *workerNode = NULL; HASH_SEQ_STATUS status; HTAB *workerNodeHash = GetWorkerNodeHash(); hash_seq_init(&status, workerNodeHash); while ((workerNode = hash_seq_search(&status)) != NULL) { uint32 workerNodeGroupId = workerNode->groupId; if (workerNodeGroupId != groupId) { continue; } if (groupContainsNodes != NULL) { *groupContainsNodes = true; } if (WorkerNodeIsPrimary(workerNode)) { hash_seq_term(&status); return workerNode; } } return NULL; } /* * ActivateNode activates the node with nodeName and nodePort. Currently, activation * includes only replicating the reference tables and setting isactive column of the * given node. */ static Datum ActivateNode(char *nodeName, int nodePort) { WorkerNode *workerNode = NULL; bool isActive = true; Datum nodeRecord = 0; /* take an exclusive lock on pg_dist_node to serialize pg_dist_node changes */ LockRelationOid(DistNodeRelationId(), ExclusiveLock); SetNodeState(nodeName, nodePort, isActive); workerNode = FindWorkerNodeAnyCluster(nodeName, nodePort); if (WorkerNodeIsPrimary(workerNode)) { ReplicateAllReferenceTablesToNode(nodeName, nodePort); } nodeRecord = GenerateNodeTuple(workerNode); return nodeRecord; } /* * master_initialize_node_metadata is run once, when upgrading citus. It ingests the * existing pg_worker_list.conf into pg_dist_node, then adds a header to the file stating * that it's no longer used. */ Datum master_initialize_node_metadata(PG_FUNCTION_ARGS) { ListCell *workerNodeCell = NULL; List *workerNodes = NIL; bool nodeAlreadyExists = false; /* nodeRole and nodeCluster don't exist when this function is caled */ Oid nodeRole = InvalidOid; char *nodeCluster = WORKER_DEFAULT_CLUSTER; CheckCitusVersion(ERROR); /* * This function should only ever be called from the create extension * script, but just to be sure, take an exclusive lock on pg_dist_node * to prevent concurrent calls. */ LockRelationOid(DistNodeRelationId(), ExclusiveLock); workerNodes = ParseWorkerNodeFileAndRename(); foreach(workerNodeCell, workerNodes) { WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); AddNodeMetadata(workerNode->workerName, workerNode->workerPort, 0, workerNode->workerRack, false, workerNode->isActive, nodeRole, nodeCluster, &nodeAlreadyExists); } PG_RETURN_BOOL(true); } /* * get_shard_id_for_distribution_column function takes a distributed table name and a * distribution value then returns shard id of the shard which belongs to given table and * contains given value. This function only works for hash distributed tables. */ Datum get_shard_id_for_distribution_column(PG_FUNCTION_ARGS) { ShardInterval *shardInterval = NULL; char distributionMethod = 0; Oid relationId = InvalidOid; CheckCitusVersion(ERROR); /* * To have optional parameter as NULL, we defined this UDF as not strict, therefore * we need to check all parameters for NULL values. */ if (PG_ARGISNULL(0)) { ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("relation cannot be NULL"))); } relationId = PG_GETARG_OID(0); EnsureTablePermissions(relationId, ACL_SELECT); if (!IsDistributedTable(relationId)) { ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), errmsg("relation is not distributed"))); } distributionMethod = PartitionMethod(relationId); if (distributionMethod == DISTRIBUTE_BY_NONE) { List *shardIntervalList = LoadShardIntervalList(relationId); if (shardIntervalList == NIL) { PG_RETURN_INT64(NULL); } shardInterval = (ShardInterval *) linitial(shardIntervalList); } else if (distributionMethod == DISTRIBUTE_BY_HASH || distributionMethod == DISTRIBUTE_BY_RANGE) { Var *distributionColumn = NULL; Oid distributionDataType = InvalidOid; Oid inputDataType = InvalidOid; char *distributionValueString = NULL; Datum inputDatum = 0; Datum distributionValueDatum = 0; DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(relationId); /* if given table is not reference table, distributionValue cannot be NULL */ if (PG_ARGISNULL(1)) { ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("distribution value cannot be NULL for tables other " "than reference tables."))); } inputDatum = PG_GETARG_DATUM(1); inputDataType = get_fn_expr_argtype(fcinfo->flinfo, 1); distributionValueString = DatumToString(inputDatum, inputDataType); distributionColumn = DistPartitionKey(relationId); distributionDataType = distributionColumn->vartype; distributionValueDatum = StringToDatum(distributionValueString, distributionDataType); shardInterval = FindShardInterval(distributionValueDatum, cacheEntry); } else { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("finding shard id of given distribution value is only " "supported for hash partitioned tables, range partitioned " "tables and reference tables."))); } if (shardInterval != NULL) { PG_RETURN_INT64(shardInterval->shardId); } PG_RETURN_INT64(NULL); } /* * FindWorkerNode searches over the worker nodes and returns the workerNode * if it already exists. Else, the function returns NULL. */ WorkerNode * FindWorkerNode(char *nodeName, int32 nodePort) { WorkerNode *workerNode = NULL; HTAB *workerNodeHash = GetWorkerNodeHash(); bool handleFound = false; void *hashKey = NULL; WorkerNode *searchedNode = (WorkerNode *) palloc0(sizeof(WorkerNode)); strlcpy(searchedNode->workerName, nodeName, WORKER_LENGTH); searchedNode->workerPort = nodePort; hashKey = (void *) searchedNode; workerNode = (WorkerNode *) hash_search(workerNodeHash, hashKey, HASH_FIND, &handleFound); return workerNode; } /* * FindWorkerNodeAnyCluster returns the workerNode no matter which cluster it is a part * of. FindWorkerNodes, like almost every other function, acts as if nodes in other * clusters do not exist. */ WorkerNode * FindWorkerNodeAnyCluster(char *nodeName, int32 nodePort) { WorkerNode *workerNode = NULL; Relation pgDistNode = heap_open(DistNodeRelationId(), AccessShareLock); TupleDesc tupleDescriptor = RelationGetDescr(pgDistNode); HeapTuple heapTuple = GetNodeTuple(nodeName, nodePort); if (heapTuple != NULL) { workerNode = TupleToWorkerNode(tupleDescriptor, heapTuple); } heap_close(pgDistNode, NoLock); return workerNode; } /* * ReadWorkerNodes iterates over pg_dist_node table, converts each row * into it's memory representation (i.e., WorkerNode) and adds them into * a list. Lastly, the list is returned to the caller. * * It skips nodes which are not in the current clusters unless requested to do otherwise * by includeNodesFromOtherClusters. */ List * ReadWorkerNodes(bool includeNodesFromOtherClusters) { SysScanDesc scanDescriptor = NULL; ScanKeyData scanKey[1]; int scanKeyCount = 0; HeapTuple heapTuple = NULL; List *workerNodeList = NIL; TupleDesc tupleDescriptor = NULL; Relation pgDistNode = heap_open(DistNodeRelationId(), AccessShareLock); scanDescriptor = systable_beginscan(pgDistNode, InvalidOid, false, NULL, scanKeyCount, scanKey); tupleDescriptor = RelationGetDescr(pgDistNode); heapTuple = systable_getnext(scanDescriptor); while (HeapTupleIsValid(heapTuple)) { WorkerNode *workerNode = TupleToWorkerNode(tupleDescriptor, heapTuple); if (includeNodesFromOtherClusters || strncmp(workerNode->nodeCluster, CurrentCluster, WORKER_LENGTH) == 0) { /* the coordinator acts as if it never sees nodes not in it's cluster */ workerNodeList = lappend(workerNodeList, workerNode); } heapTuple = systable_getnext(scanDescriptor); } systable_endscan(scanDescriptor); heap_close(pgDistNode, NoLock); return workerNodeList; } /* * RemoveNodeFromCluster removes the provided node from the pg_dist_node table of * the master node and all nodes with metadata. * The call to the master_remove_node should be done by the super user. If there are * active shard placements on the node; the function errors out. * This function also deletes all reference table placements belong to the given node from * pg_dist_placement, but it does not drop actual placement at the node. It also * modifies replication factor of the colocation group of reference tables, so that * replication factor will be equal to worker count. */ static void RemoveNodeFromCluster(char *nodeName, int32 nodePort) { const bool onlyConsiderActivePlacements = false; char *nodeDeleteCommand = NULL; WorkerNode *workerNode = NULL; List *referenceTableList = NIL; uint32 deletedNodeId = INVALID_PLACEMENT_ID; EnsureCoordinator(); EnsureSuperUser(); /* take an exclusive lock on pg_dist_node to serialize pg_dist_node changes */ LockRelationOid(DistNodeRelationId(), ExclusiveLock); workerNode = FindWorkerNodeAnyCluster(nodeName, nodePort); if (workerNode == NULL) { ereport(ERROR, (errmsg("node at \"%s:%u\" does not exist", nodeName, nodePort))); } if (workerNode != NULL) { deletedNodeId = workerNode->nodeId; } if (WorkerNodeIsPrimary(workerNode)) { DeleteAllReferenceTablePlacementsFromNodeGroup(workerNode->groupId); } if (WorkerNodeIsPrimary(workerNode) && NodeGroupHasShardPlacements(workerNode->groupId, onlyConsiderActivePlacements)) { ereport(ERROR, (errmsg("you cannot remove the primary node of a node group " "which has shard placements"))); } DeleteNodeRow(nodeName, nodePort); /* * After deleting reference tables placements, we will update replication factor * column for colocation group of reference tables so that replication factor will * be equal to worker count. */ if (WorkerNodeIsPrimary(workerNode)) { referenceTableList = ReferenceTableOidList(); if (list_length(referenceTableList) != 0) { Oid firstReferenceTableId = linitial_oid(referenceTableList); uint32 referenceTableColocationId = TableColocationId(firstReferenceTableId); List *workerNodeList = ActivePrimaryNodeList(); int workerCount = list_length(workerNodeList); UpdateColocationGroupReplicationFactor(referenceTableColocationId, workerCount); } } nodeDeleteCommand = NodeDeleteCommand(deletedNodeId); /* make sure we don't have any lingering session lifespan connections */ CloseNodeConnectionsAfterTransaction(nodeName, nodePort); SendCommandToWorkers(WORKERS_WITH_METADATA, nodeDeleteCommand); } /* CountPrimariesWithMetadata returns the number of primary nodes which have metadata. */ static uint32 CountPrimariesWithMetadata() { uint32 primariesWithMetadata = 0; WorkerNode *workerNode = NULL; HASH_SEQ_STATUS status; HTAB *workerNodeHash = GetWorkerNodeHash(); hash_seq_init(&status, workerNodeHash); while ((workerNode = hash_seq_search(&status)) != NULL) { if (workerNode->hasMetadata && WorkerNodeIsPrimary(workerNode)) { primariesWithMetadata++; } } return primariesWithMetadata; } /* * AddNodeMetadata checks the given node information and adds the specified node to the * pg_dist_node table of the master and workers with metadata. * If the node already exists, the function returns the information about the node. * If not, the following prodecure is followed while adding a node: If the groupId is not * explicitly given by the user, the function picks the group that the new node should * be in with respect to GroupSize. Then, the new node is inserted into the local * pg_dist_node as well as the nodes with hasmetadata=true. */ static Datum AddNodeMetadata(char *nodeName, int32 nodePort, int32 groupId, char *nodeRack, bool hasMetadata, bool isActive, Oid nodeRole, char *nodeCluster, bool *nodeAlreadyExists) { int nextNodeIdInt = 0; Datum returnData = 0; WorkerNode *workerNode = NULL; char *nodeDeleteCommand = NULL; uint32 primariesWithMetadata = 0; EnsureCoordinator(); EnsureSuperUser(); *nodeAlreadyExists = false; /* * Take an exclusive lock on pg_dist_node to serialize node changes. * We may want to relax or have more fine-grained locking in the future * to allow users to add multiple nodes concurrently. */ LockRelationOid(DistNodeRelationId(), ExclusiveLock); workerNode = FindWorkerNodeAnyCluster(nodeName, nodePort); if (workerNode != NULL) { /* fill return data and return */ returnData = GenerateNodeTuple(workerNode); *nodeAlreadyExists = true; return returnData; } /* user lets Citus to decide on the group that the newly added node should be in */ if (groupId == 0) { groupId = GetNextGroupId(); } else { uint maxGroupId = GetMaxGroupId(); if (groupId > maxGroupId) { ereport(ERROR, (errmsg("you cannot add a node to a non-existing group"))); } } /* if nodeRole hasn't been added yet there's a constraint for one-node-per-group */ if (nodeRole != InvalidOid && nodeRole == PrimaryNodeRoleId()) { WorkerNode *existingPrimaryNode = PrimaryNodeForGroup(groupId, NULL); if (existingPrimaryNode != NULL) { ereport(ERROR, (errmsg("group %d already has a primary node", groupId))); } } if (nodeRole == PrimaryNodeRoleId()) { if (strncmp(nodeCluster, WORKER_DEFAULT_CLUSTER, WORKER_LENGTH) != 0) { ereport(ERROR, (errmsg("primaries must be added to the default cluster"))); } } /* generate the new node id from the sequence */ nextNodeIdInt = GetNextNodeId(); InsertNodeRow(nextNodeIdInt, nodeName, nodePort, groupId, nodeRack, hasMetadata, isActive, nodeRole, nodeCluster); workerNode = FindWorkerNodeAnyCluster(nodeName, nodePort); /* send the delete command to all primary nodes with metadata */ nodeDeleteCommand = NodeDeleteCommand(workerNode->nodeId); SendCommandToWorkers(WORKERS_WITH_METADATA, nodeDeleteCommand); /* finally prepare the insert command and send it to all primary nodes */ primariesWithMetadata = CountPrimariesWithMetadata(); if (primariesWithMetadata != 0) { List *workerNodeList = list_make1(workerNode); char *nodeInsertCommand = NodeListInsertCommand(workerNodeList); SendCommandToWorkers(WORKERS_WITH_METADATA, nodeInsertCommand); } returnData = GenerateNodeTuple(workerNode); return returnData; } /* * SetNodeState function sets the isactive column of the specified worker in * pg_dist_node to isActive. */ static void SetNodeState(char *nodeName, int32 nodePort, bool isActive) { Relation pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock); TupleDesc tupleDescriptor = RelationGetDescr(pgDistNode); HeapTuple heapTuple = GetNodeTuple(nodeName, nodePort); Datum values[Natts_pg_dist_node]; bool isnull[Natts_pg_dist_node]; bool replace[Natts_pg_dist_node]; char *nodeStateUpdateCommand = NULL; WorkerNode *workerNode = NULL; if (heapTuple == NULL) { ereport(ERROR, (errmsg("could not find valid entry for node \"%s:%d\"", nodeName, nodePort))); } memset(replace, 0, sizeof(replace)); values[Anum_pg_dist_node_isactive - 1] = BoolGetDatum(isActive); isnull[Anum_pg_dist_node_isactive - 1] = false; replace[Anum_pg_dist_node_isactive - 1] = true; heapTuple = heap_modify_tuple(heapTuple, tupleDescriptor, values, isnull, replace); CatalogTupleUpdate(pgDistNode, &heapTuple->t_self, heapTuple); CitusInvalidateRelcacheByRelid(DistNodeRelationId()); CommandCounterIncrement(); workerNode = TupleToWorkerNode(tupleDescriptor, heapTuple); heap_close(pgDistNode, NoLock); /* we also update isactive column at worker nodes */ nodeStateUpdateCommand = NodeStateUpdateCommand(workerNode->nodeId, isActive); SendCommandToWorkers(WORKERS_WITH_METADATA, nodeStateUpdateCommand); } /* * GetNodeTuple function returns the heap tuple of given nodeName and nodePort. If the * node is not found this function returns NULL. * * This function may return worker nodes from other clusters. */ static HeapTuple GetNodeTuple(char *nodeName, int32 nodePort) { Relation pgDistNode = heap_open(DistNodeRelationId(), AccessShareLock); const int scanKeyCount = 2; const bool indexOK = false; ScanKeyData scanKey[scanKeyCount]; SysScanDesc scanDescriptor = NULL; HeapTuple heapTuple = NULL; HeapTuple nodeTuple = NULL; ScanKeyInit(&scanKey[0], Anum_pg_dist_node_nodename, BTEqualStrategyNumber, F_TEXTEQ, CStringGetTextDatum(nodeName)); ScanKeyInit(&scanKey[1], Anum_pg_dist_node_nodeport, BTEqualStrategyNumber, F_INT8EQ, Int32GetDatum(nodePort)); scanDescriptor = systable_beginscan(pgDistNode, InvalidOid, indexOK, NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(scanDescriptor); if (HeapTupleIsValid(heapTuple)) { nodeTuple = heap_copytuple(heapTuple); } systable_endscan(scanDescriptor); heap_close(pgDistNode, NoLock); return nodeTuple; } /* * GenerateNodeTuple gets a worker node and return a heap tuple of * given worker node. */ static Datum GenerateNodeTuple(WorkerNode *workerNode) { Relation pgDistNode = NULL; TupleDesc tupleDescriptor = NULL; HeapTuple heapTuple = NULL; Datum nodeDatum = 0; Datum values[Natts_pg_dist_node]; bool isNulls[Natts_pg_dist_node]; Datum nodeClusterStringDatum = CStringGetDatum(workerNode->nodeCluster); Datum nodeClusterNameDatum = DirectFunctionCall1(namein, nodeClusterStringDatum); /* form new shard tuple */ memset(values, 0, sizeof(values)); memset(isNulls, false, sizeof(isNulls)); values[Anum_pg_dist_node_nodeid - 1] = UInt32GetDatum(workerNode->nodeId); values[Anum_pg_dist_node_groupid - 1] = UInt32GetDatum(workerNode->groupId); values[Anum_pg_dist_node_nodename - 1] = CStringGetTextDatum(workerNode->workerName); values[Anum_pg_dist_node_nodeport - 1] = UInt32GetDatum(workerNode->workerPort); values[Anum_pg_dist_node_noderack - 1] = CStringGetTextDatum(workerNode->workerRack); values[Anum_pg_dist_node_hasmetadata - 1] = BoolGetDatum(workerNode->hasMetadata); values[Anum_pg_dist_node_isactive - 1] = BoolGetDatum(workerNode->isActive); values[Anum_pg_dist_node_noderole - 1] = ObjectIdGetDatum(workerNode->nodeRole); values[Anum_pg_dist_node_nodecluster - 1] = nodeClusterNameDatum; pgDistNode = heap_open(DistNodeRelationId(), AccessShareLock); /* generate the tuple */ tupleDescriptor = RelationGetDescr(pgDistNode); heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls); nodeDatum = HeapTupleGetDatum(heapTuple); heap_close(pgDistNode, NoLock); return nodeDatum; } /* * GetNextGroupId allocates and returns a unique groupId for the group * to be created. This allocation occurs both in shared memory and in write * ahead logs; writing to logs avoids the risk of having groupId collisions. * * Please note that the caller is still responsible for finalizing node data * and the groupId with the master node. Further note that this function relies * on an internal sequence created in initdb to generate unique identifiers. */ int32 GetNextGroupId() { text *sequenceName = cstring_to_text(GROUPID_SEQUENCE_NAME); Oid sequenceId = ResolveRelationId(sequenceName); Datum sequenceIdDatum = ObjectIdGetDatum(sequenceId); Oid savedUserId = InvalidOid; int savedSecurityContext = 0; Datum groupIdDatum = 0; int32 groupId = 0; GetUserIdAndSecContext(&savedUserId, &savedSecurityContext); SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE); /* generate new and unique shardId from sequence */ groupIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum); SetUserIdAndSecContext(savedUserId, savedSecurityContext); groupId = DatumGetUInt32(groupIdDatum); return groupId; } /* * GetMaxGroupId iterates over the worker node hash, and returns the maximum * group id from the table. */ static uint32 GetMaxGroupId() { uint32 maxGroupId = 0; WorkerNode *workerNode = NULL; HTAB *workerNodeHash = GetWorkerNodeHash(); HASH_SEQ_STATUS status; hash_seq_init(&status, workerNodeHash); while ((workerNode = hash_seq_search(&status)) != NULL) { uint32 workerNodeGroupId = workerNode->groupId; if (workerNodeGroupId > maxGroupId) { maxGroupId = workerNodeGroupId; } } return maxGroupId; } /* * GetNextNodeId allocates and returns a unique nodeId for the node * to be added. This allocation occurs both in shared memory and in write * ahead logs; writing to logs avoids the risk of having nodeId collisions. * * Please note that the caller is still responsible for finalizing node data * and the nodeId with the master node. Further note that this function relies * on an internal sequence created in initdb to generate unique identifiers. */ int GetNextNodeId() { text *sequenceName = cstring_to_text(NODEID_SEQUENCE_NAME); Oid sequenceId = ResolveRelationId(sequenceName); Datum sequenceIdDatum = ObjectIdGetDatum(sequenceId); Oid savedUserId = InvalidOid; int savedSecurityContext = 0; Datum nextNodedIdDatum = 0; int nextNodeId = 0; GetUserIdAndSecContext(&savedUserId, &savedSecurityContext); SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE); /* generate new and unique shardId from sequence */ nextNodedIdDatum = DirectFunctionCall1(nextval_oid, sequenceIdDatum); SetUserIdAndSecContext(savedUserId, savedSecurityContext); PG_RETURN_DATUM(nextNodedIdDatum); nextNodeId = DatumGetUInt32(nextNodeId); return nextNodeId; } /* * EnsureCoordinator checks if the current node is the coordinator. If it does not, * the function errors out. */ void EnsureCoordinator(void) { int localGroupId = GetLocalGroupId(); if (localGroupId != 0) { ereport(ERROR, (errmsg("operation is not allowed on this node"), errhint("Connect to the coordinator and run it again."))); } } /* * InsertNodeRow opens the node system catalog, and inserts a new row with the * given values into that system catalog. * * NOTE: If you call this function you probably need to have taken a * ShareRowExclusiveLock then checked that you're not adding a second primary to * an existing group. If you don't it's possible for the metadata to become inconsistent. */ static void InsertNodeRow(int nodeid, char *nodeName, int32 nodePort, uint32 groupId, char *nodeRack, bool hasMetadata, bool isActive, Oid nodeRole, char *nodeCluster) { Relation pgDistNode = NULL; TupleDesc tupleDescriptor = NULL; HeapTuple heapTuple = NULL; Datum values[Natts_pg_dist_node]; bool isNulls[Natts_pg_dist_node]; Datum nodeClusterStringDatum = CStringGetDatum(nodeCluster); Datum nodeClusterNameDatum = DirectFunctionCall1(namein, nodeClusterStringDatum); /* form new shard tuple */ memset(values, 0, sizeof(values)); memset(isNulls, false, sizeof(isNulls)); values[Anum_pg_dist_node_nodeid - 1] = UInt32GetDatum(nodeid); values[Anum_pg_dist_node_groupid - 1] = UInt32GetDatum(groupId); values[Anum_pg_dist_node_nodename - 1] = CStringGetTextDatum(nodeName); values[Anum_pg_dist_node_nodeport - 1] = UInt32GetDatum(nodePort); values[Anum_pg_dist_node_noderack - 1] = CStringGetTextDatum(nodeRack); values[Anum_pg_dist_node_hasmetadata - 1] = BoolGetDatum(hasMetadata); values[Anum_pg_dist_node_isactive - 1] = BoolGetDatum(isActive); values[Anum_pg_dist_node_noderole - 1] = ObjectIdGetDatum(nodeRole); values[Anum_pg_dist_node_nodecluster - 1] = nodeClusterNameDatum; pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock); tupleDescriptor = RelationGetDescr(pgDistNode); heapTuple = heap_form_tuple(tupleDescriptor, values, isNulls); CatalogTupleInsert(pgDistNode, heapTuple); CitusInvalidateRelcacheByRelid(DistNodeRelationId()); /* increment the counter so that next command can see the row */ CommandCounterIncrement(); /* close relation */ heap_close(pgDistNode, NoLock); } /* * DeleteNodeRow removes the requested row from pg_dist_node table if it exists. */ static void DeleteNodeRow(char *nodeName, int32 nodePort) { const int scanKeyCount = 2; bool indexOK = false; HeapTuple heapTuple = NULL; SysScanDesc heapScan = NULL; ScanKeyData scanKey[scanKeyCount]; Relation pgDistNode = heap_open(DistNodeRelationId(), RowExclusiveLock); ScanKeyInit(&scanKey[0], Anum_pg_dist_node_nodename, BTEqualStrategyNumber, F_TEXTEQ, CStringGetTextDatum(nodeName)); ScanKeyInit(&scanKey[1], Anum_pg_dist_node_nodeport, BTEqualStrategyNumber, F_INT8EQ, Int32GetDatum(nodePort)); heapScan = systable_beginscan(pgDistNode, InvalidOid, indexOK, NULL, scanKeyCount, scanKey); heapTuple = systable_getnext(heapScan); if (!HeapTupleIsValid(heapTuple)) { ereport(ERROR, (errmsg("could not find valid entry for node \"%s:%d\"", nodeName, nodePort))); } simple_heap_delete(pgDistNode, &(heapTuple->t_self)); systable_endscan(heapScan); /* ensure future commands don't use the node we just removed */ CitusInvalidateRelcacheByRelid(DistNodeRelationId()); /* increment the counter so that next command won't see the row */ CommandCounterIncrement(); heap_close(pgDistNode, NoLock); } /* * ParseWorkerNodeFileAndRename opens and parses the node name and node port from the * specified configuration file and after that, renames it marking it is not used anymore. * Note that this function is deprecated. Do not use this function for any new * features. */ static List * ParseWorkerNodeFileAndRename() { FILE *workerFileStream = NULL; List *workerNodeList = NIL; char workerNodeLine[MAXPGPATH]; char *workerFilePath = make_absolute_path(WorkerListFileName); StringInfo renamedWorkerFilePath = makeStringInfo(); char *workerPatternTemplate = "%%%u[^# \t]%%*[ \t]%%%u[^# \t]%%*[ \t]%%%u[^# \t]"; char workerLinePattern[1024]; const int workerNameIndex = 0; const int workerPortIndex = 1; memset(workerLinePattern, '\0', sizeof(workerLinePattern)); workerFileStream = AllocateFile(workerFilePath, PG_BINARY_R); if (workerFileStream == NULL) { if (errno == ENOENT) { ereport(DEBUG1, (errmsg("worker list file located at \"%s\" is not present", workerFilePath))); } else { ereport(ERROR, (errcode_for_file_access(), errmsg("could not open worker list file \"%s\": %m", workerFilePath))); } return NIL; } /* build pattern to contain node name length limit */ snprintf(workerLinePattern, sizeof(workerLinePattern), workerPatternTemplate, WORKER_LENGTH, MAX_PORT_LENGTH, WORKER_LENGTH); while (fgets(workerNodeLine, sizeof(workerNodeLine), workerFileStream) != NULL) { const int workerLineLength = strnlen(workerNodeLine, MAXPGPATH); WorkerNode *workerNode = NULL; char *linePointer = NULL; int32 nodePort = 5432; /* default port number */ int fieldCount = 0; bool lineIsInvalid = false; char nodeName[WORKER_LENGTH + 1]; char nodeRack[WORKER_LENGTH + 1]; char nodePortString[MAX_PORT_LENGTH + 1]; memset(nodeName, '\0', sizeof(nodeName)); strlcpy(nodeRack, WORKER_DEFAULT_RACK, sizeof(nodeRack)); memset(nodePortString, '\0', sizeof(nodePortString)); if (workerLineLength == MAXPGPATH - 1) { ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("worker node list file line exceeds the maximum " "length of %d", MAXPGPATH))); } /* trim trailing newlines preserved by fgets, if any */ linePointer = workerNodeLine + workerLineLength - 1; while (linePointer >= workerNodeLine && (*linePointer == '\n' || *linePointer == '\r')) { *linePointer-- = '\0'; } /* skip leading whitespace */ for (linePointer = workerNodeLine; *linePointer; linePointer++) { if (!isspace((unsigned char) *linePointer)) { break; } } /* if the entire line is whitespace or a comment, skip it */ if (*linePointer == '\0' || *linePointer == '#') { continue; } /* parse line; node name is required, but port and rack are optional */ fieldCount = sscanf(linePointer, workerLinePattern, nodeName, nodePortString, nodeRack); /* adjust field count for zero based indexes */ fieldCount--; /* raise error if no fields were assigned */ if (fieldCount < workerNameIndex) { lineIsInvalid = true; } /* no special treatment for nodeName: already parsed by sscanf */ /* if a second token was specified, convert to integer port */ if (fieldCount >= workerPortIndex) { char *nodePortEnd = NULL; errno = 0; nodePort = strtol(nodePortString, &nodePortEnd, 10); if (errno != 0 || (*nodePortEnd) != '\0' || nodePort <= 0) { lineIsInvalid = true; } } if (lineIsInvalid) { ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("could not parse worker node line: %s", workerNodeLine), errhint("Lines in the worker node file must contain a valid " "node name and, optionally, a positive port number. " "Comments begin with a '#' character and extend to " "the end of their line."))); } /* allocate worker node structure and set fields */ workerNode = (WorkerNode *) palloc0(sizeof(WorkerNode)); strlcpy(workerNode->workerName, nodeName, WORKER_LENGTH); strlcpy(workerNode->workerRack, nodeRack, WORKER_LENGTH); workerNode->workerPort = nodePort; workerNode->hasMetadata = false; workerNode->isActive = true; workerNodeList = lappend(workerNodeList, workerNode); } /* rename the file, marking that it is not used anymore */ appendStringInfo(renamedWorkerFilePath, "%s", workerFilePath); appendStringInfo(renamedWorkerFilePath, ".obsolete"); rename(workerFilePath, renamedWorkerFilePath->data); FreeFile(workerFileStream); free(workerFilePath); return workerNodeList; } /* * TupleToWorkerNode takes in a heap tuple from pg_dist_node, and * converts this tuple to an equivalent struct in memory. The function assumes * the caller already has locks on the tuple, and doesn't perform any locking. */ static WorkerNode * TupleToWorkerNode(TupleDesc tupleDescriptor, HeapTuple heapTuple) { WorkerNode *workerNode = NULL; bool isNull = false; Datum nodeId = heap_getattr(heapTuple, Anum_pg_dist_node_nodeid, tupleDescriptor, &isNull); Datum groupId = heap_getattr(heapTuple, Anum_pg_dist_node_groupid, tupleDescriptor, &isNull); Datum nodeName = heap_getattr(heapTuple, Anum_pg_dist_node_nodename, tupleDescriptor, &isNull); Datum nodePort = heap_getattr(heapTuple, Anum_pg_dist_node_nodeport, tupleDescriptor, &isNull); Datum nodeRack = heap_getattr(heapTuple, Anum_pg_dist_node_noderack, tupleDescriptor, &isNull); Datum hasMetadata = heap_getattr(heapTuple, Anum_pg_dist_node_hasmetadata, tupleDescriptor, &isNull); Datum isActive = heap_getattr(heapTuple, Anum_pg_dist_node_isactive, tupleDescriptor, &isNull); Datum nodeRole = heap_getattr(heapTuple, Anum_pg_dist_node_noderole, tupleDescriptor, &isNull); Datum nodeCluster = heap_getattr(heapTuple, Anum_pg_dist_node_nodecluster, tupleDescriptor, &isNull); Assert(!HeapTupleHasNulls(heapTuple)); workerNode = (WorkerNode *) palloc0(sizeof(WorkerNode)); workerNode->nodeId = DatumGetUInt32(nodeId); workerNode->workerPort = DatumGetUInt32(nodePort); workerNode->groupId = DatumGetUInt32(groupId); strlcpy(workerNode->workerName, TextDatumGetCString(nodeName), WORKER_LENGTH); strlcpy(workerNode->workerRack, TextDatumGetCString(nodeRack), WORKER_LENGTH); workerNode->hasMetadata = DatumGetBool(hasMetadata); workerNode->isActive = DatumGetBool(isActive); workerNode->nodeRole = DatumGetObjectId(nodeRole); { Name nodeClusterName = DatumGetName(nodeCluster); char *nodeClusterString = NameStr(*nodeClusterName); /* * nodeClusterString can be null if nodecluster column is not present. * In the case of extension creation/upgrade, master_initialize_node_metadata * function is called before the nodecluster column is added to pg_dist_node * table. */ if (nodeClusterString != NULL) { strlcpy(workerNode->nodeCluster, nodeClusterString, NAMEDATALEN); } } return workerNode; } /* * StringToDatum transforms a string representation into a Datum. */ Datum StringToDatum(char *inputString, Oid dataType) { Oid typIoFunc = InvalidOid; Oid typIoParam = InvalidOid; int32 typeModifier = -1; Datum datum = 0; getTypeInputInfo(dataType, &typIoFunc, &typIoParam); getBaseTypeAndTypmod(dataType, &typeModifier); datum = OidInputFunctionCall(typIoFunc, inputString, typIoParam, typeModifier); return datum; } /* * DatumToString returns the string representation of the given datum. */ char * DatumToString(Datum datum, Oid dataType) { char *outputString = NULL; Oid typIoFunc = InvalidOid; bool typIsVarlena = false; getTypeOutputInfo(dataType, &typIoFunc, &typIsVarlena); outputString = OidOutputFunctionCall(typIoFunc, datum); return outputString; } citus-7.0.3/src/backend/distributed/utils/reference_table_utils.c000066400000000000000000000403651317107136600252160ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * reference_table_utils.c * * Declarations for public utility functions related to reference tables. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include "access/heapam.h" #include "access/htup_details.h" #include "access/genam.h" #include "distributed/colocation_utils.h" #include "distributed/listutils.h" #include "distributed/master_protocol.h" #include "distributed/master_metadata_utility.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" #include "distributed/multi_logical_planner.h" #include "distributed/reference_table_utils.h" #include "distributed/resource_lock.h" #include "distributed/shardinterval_utils.h" #include "distributed/transaction_management.h" #include "distributed/worker_manager.h" #include "distributed/worker_transaction.h" #include "storage/lmgr.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/rel.h" /* local function forward declarations */ static void ReplicateSingleShardTableToAllWorkers(Oid relationId); static void ReplicateShardToAllWorkers(ShardInterval *shardInterval); static void ReplicateShardToNode(ShardInterval *shardInterval, char *nodeName, int nodePort); static void ConvertToReferenceTableMetadata(Oid relationId, uint64 shardId); /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(upgrade_to_reference_table); /* * upgrade_to_reference_table accepts a broadcast table which has only one shard and * replicates it across all nodes to create a reference table. It also modifies related * metadata to mark the table as reference. */ Datum upgrade_to_reference_table(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); List *shardIntervalList = NIL; ShardInterval *shardInterval = NULL; uint64 shardId = INVALID_SHARD_ID; DistTableCacheEntry *tableEntry = NULL; EnsureCoordinator(); CheckCitusVersion(ERROR); if (!IsDistributedTable(relationId)) { char *relationName = get_rel_name(relationId); ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot upgrade to reference table"), errdetail("Relation \"%s\" is not distributed.", relationName), errhint("Instead, you can use; " "create_reference_table('%s');", relationName))); } tableEntry = DistributedTableCacheEntry(relationId); if (tableEntry->partitionMethod == DISTRIBUTE_BY_NONE) { char *relationName = get_rel_name(relationId); ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot upgrade to reference table"), errdetail("Relation \"%s\" is already a reference table", relationName))); } if (tableEntry->replicationModel == REPLICATION_MODEL_STREAMING) { char *relationName = get_rel_name(relationId); ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("cannot upgrade to reference table"), errdetail("Upgrade is only supported for statement-based " "replicated tables but \"%s\" is streaming replicated", relationName))); } shardIntervalList = LoadShardIntervalList(relationId); if (list_length(shardIntervalList) != 1) { char *relationName = get_rel_name(relationId); ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot upgrade to reference table"), errdetail("Relation \"%s\" shard count is not one. Only " "relations with one shard can be upgraded to " "reference tables.", relationName))); } shardInterval = (ShardInterval *) linitial(shardIntervalList); shardId = shardInterval->shardId; LockShardDistributionMetadata(shardId, ExclusiveLock); LockShardResource(shardId, ExclusiveLock); ReplicateSingleShardTableToAllWorkers(relationId); PG_RETURN_VOID(); } /* * ReplicateAllReferenceTablesToNode function finds all reference tables and * replicates them to the given worker node. It also modifies pg_dist_colocation * table to update the replication factor column when necessary. This function * skips reference tables if that node already has healthy placement of that * reference table to prevent unnecessary data transfer. */ void ReplicateAllReferenceTablesToNode(char *nodeName, int nodePort) { List *referenceTableList = ReferenceTableOidList(); ListCell *referenceTableCell = NULL; List *workerNodeList = ActivePrimaryNodeList(); uint32 workerCount = 0; Oid firstReferenceTableId = InvalidOid; uint32 referenceTableColocationId = INVALID_COLOCATION_ID; /* if there is no reference table, we do not need to do anything */ if (list_length(referenceTableList) == 0) { return; } /* * We sort the reference table list to prevent deadlocks in concurrent * ReplicateAllReferenceTablesToAllNodes calls. */ referenceTableList = SortList(referenceTableList, CompareOids); foreach(referenceTableCell, referenceTableList) { Oid referenceTableId = lfirst_oid(referenceTableCell); List *shardIntervalList = LoadShardIntervalList(referenceTableId); ShardInterval *shardInterval = (ShardInterval *) linitial(shardIntervalList); uint64 shardId = shardInterval->shardId; LockShardDistributionMetadata(shardId, ExclusiveLock); ReplicateShardToNode(shardInterval, nodeName, nodePort); } /* * After replicating reference tables, we will update replication factor column for * colocation group of reference tables so that worker count will be equal to * replication factor again. */ workerCount = list_length(workerNodeList); firstReferenceTableId = linitial_oid(referenceTableList); referenceTableColocationId = TableColocationId(firstReferenceTableId); UpdateColocationGroupReplicationFactor(referenceTableColocationId, workerCount); } /* * ReplicateSingleShardTableToAllWorkers accepts a broadcast table and replicates it to * all worker nodes. It assumes that caller of this function ensures that given broadcast * table has only one shard. */ static void ReplicateSingleShardTableToAllWorkers(Oid relationId) { List *shardIntervalList = LoadShardIntervalList(relationId); ShardInterval *shardInterval = (ShardInterval *) linitial(shardIntervalList); uint64 shardId = shardInterval->shardId; List *foreignConstraintCommandList = CopyShardForeignConstraintCommandList( shardInterval); if (foreignConstraintCommandList != NIL || TableReferenced(relationId)) { char *relationName = get_rel_name(relationId); ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot upgrade to reference table"), errdetail("Relation \"%s\" is part of a foreign constraint. " "Foreign key constraints are not allowed " "from or to reference tables.", relationName))); } /* * ReplicateShardToAllWorkers function opens separate transactions (i.e., not part * of any coordinated transactions) to each worker and replicates given shard to all * workers. If a worker already has a healthy replica of given shard, it skips that * worker to prevent copying unnecessary data. */ ReplicateShardToAllWorkers(shardInterval); /* * We need to update metadata tables to mark this table as reference table. We modify * pg_dist_partition, pg_dist_colocation and pg_dist_shard tables in * ConvertToReferenceTableMetadata function. */ ConvertToReferenceTableMetadata(relationId, shardId); /* * After the table has been officially marked as a reference table, we need to create * the reference table itself and insert its pg_dist_partition, pg_dist_shard and * existing pg_dist_placement rows. */ CreateTableMetadataOnWorkers(relationId); } /* * ReplicateShardToAllWorkers function replicates given shard to the all worker nodes * in separate transactions. While replicating, it only replicates the shard to the * workers which does not have a healthy replica of the shard. However, this function * does not obtain any lock on shard resource and shard metadata. It is caller's * responsibility to take those locks. */ static void ReplicateShardToAllWorkers(ShardInterval *shardInterval) { List *workerNodeList = NULL; ListCell *workerNodeCell = NULL; /* prevent concurrent pg_dist_node changes */ LockRelationOid(DistNodeRelationId(), RowShareLock); workerNodeList = ActivePrimaryNodeList(); /* * We will iterate over all worker nodes and if healthy placement is not exist at * given node we will copy the shard to that node. Then we will also modify * the metadata to reflect newly copied shard. */ workerNodeList = SortList(workerNodeList, CompareWorkerNodes); foreach(workerNodeCell, workerNodeList) { WorkerNode *workerNode = (WorkerNode *) lfirst(workerNodeCell); char *nodeName = workerNode->workerName; uint32 nodePort = workerNode->workerPort; ReplicateShardToNode(shardInterval, nodeName, nodePort); } } /* * ReplicateShardToNode function replicates given shard to the given worker node * in a separate transaction. While replicating, it only replicates the shard to the * workers which does not have a healthy replica of the shard. This function also modifies * metadata by inserting/updating related rows in pg_dist_placement. */ static void ReplicateShardToNode(ShardInterval *shardInterval, char *nodeName, int nodePort) { uint64 shardId = shardInterval->shardId; bool missingOk = false; ShardPlacement *sourceShardPlacement = FinalizedShardPlacement(shardId, missingOk); char *srcNodeName = sourceShardPlacement->nodeName; uint32 srcNodePort = sourceShardPlacement->nodePort; List *ddlCommandList = CopyShardCommandList(shardInterval, srcNodeName, srcNodePort); List *shardPlacementList = ShardPlacementList(shardId); bool missingWorkerOk = true; ShardPlacement *targetPlacement = SearchShardPlacementInList(shardPlacementList, nodeName, nodePort, missingWorkerOk); char *tableOwner = TableOwner(shardInterval->relationId); /* * Although this function is used for reference tables and reference table shard * placements always have shardState = FILE_FINALIZED, in case of an upgrade of * a non-reference table to reference table, unhealty placements may exist. In * this case, we repair the shard placement and update its state in * pg_dist_placement table. */ if (targetPlacement == NULL || targetPlacement->shardState != FILE_FINALIZED) { uint64 placementId = 0; uint32 groupId = 0; ereport(NOTICE, (errmsg("Replicating reference table \"%s\" to the node %s:%d", get_rel_name(shardInterval->relationId), nodeName, nodePort))); SendCommandListToWorkerInSingleTransaction(nodeName, nodePort, tableOwner, ddlCommandList); if (targetPlacement == NULL) { groupId = GroupForNode(nodeName, nodePort); placementId = GetNextPlacementId(); InsertShardPlacementRow(shardId, placementId, FILE_FINALIZED, 0, groupId); } else { groupId = targetPlacement->groupId; placementId = targetPlacement->placementId; UpdateShardPlacementState(placementId, FILE_FINALIZED); } /* * Although ReplicateShardToAllWorkers is used only for reference tables, * during the upgrade phase, the placements are created before the table is * marked as a reference table. All metadata (including the placement * metadata) will be copied to workers after all reference table changed * are finished. */ if (ShouldSyncTableMetadata(shardInterval->relationId)) { char *placementCommand = PlacementUpsertCommand(shardId, placementId, FILE_FINALIZED, 0, groupId); SendCommandToWorkers(WORKERS_WITH_METADATA, placementCommand); } } } /* * ConvertToReferenceTableMetadata accepts a broadcast table and modifies its metadata to * reference table metadata. To do this, this function updates pg_dist_partition, * pg_dist_colocation and pg_dist_shard. This function assumes that caller ensures that * given broadcast table has only one shard. */ static void ConvertToReferenceTableMetadata(Oid relationId, uint64 shardId) { uint32 currentColocationId = TableColocationId(relationId); uint32 newColocationId = CreateReferenceTableColocationId(); Var *distributionColumn = NULL; char shardStorageType = ShardStorageType(relationId); text *shardMinValue = NULL; text *shardMaxValue = NULL; /* delete old metadata rows */ DeletePartitionRow(relationId); DeleteColocationGroupIfNoTablesBelong(currentColocationId); DeleteShardRow(shardId); /* insert new metadata rows */ InsertIntoPgDistPartition(relationId, DISTRIBUTE_BY_NONE, distributionColumn, newColocationId, REPLICATION_MODEL_2PC); InsertShardRow(relationId, shardId, shardStorageType, shardMinValue, shardMaxValue); } /* * CreateReferenceTableColocationId creates a new co-location id for reference tables and * writes it into pg_dist_colocation, then returns the created co-location id. Since there * can be only one colocation group for all kinds of reference tables, if a co-location id * is already created for reference tables, this function returns it without creating * anything. */ uint32 CreateReferenceTableColocationId() { uint32 colocationId = INVALID_COLOCATION_ID; List *workerNodeList = ActivePrimaryNodeList(); int shardCount = 1; int replicationFactor = list_length(workerNodeList); Oid distributionColumnType = InvalidOid; /* check for existing colocations */ colocationId = ColocationId(shardCount, replicationFactor, distributionColumnType); if (colocationId == INVALID_COLOCATION_ID) { colocationId = CreateColocationGroup(shardCount, replicationFactor, distributionColumnType); } return colocationId; } /* * DeleteAllReferenceTablePlacementsFromNodeGroup function iterates over list of reference * tables and deletes all reference table placements from pg_dist_placement table * for given group. However, it does not modify replication factor of the colocation * group of reference tables. It is caller's responsibility to do that if it is necessary. */ void DeleteAllReferenceTablePlacementsFromNodeGroup(uint32 groupId) { List *referenceTableList = ReferenceTableOidList(); ListCell *referenceTableCell = NULL; /* if there are no reference tables, we do not need to do anything */ if (list_length(referenceTableList) == 0) { return; } /* * We sort the reference table list to prevent deadlocks in concurrent * DeleteAllReferenceTablePlacementsFromNodeGroup calls. */ referenceTableList = SortList(referenceTableList, CompareOids); foreach(referenceTableCell, referenceTableList) { GroupShardPlacement *placement = NULL; StringInfo deletePlacementCommand = makeStringInfo(); Oid referenceTableId = lfirst_oid(referenceTableCell); List *placements = GroupShardPlacementsForTableOnGroup(referenceTableId, groupId); if (list_length(placements) == 0) { /* this happens if the node was previously disabled */ continue; } placement = (GroupShardPlacement *) linitial(placements); LockShardDistributionMetadata(placement->shardId, ExclusiveLock); DeleteShardPlacementRow(placement->placementId); appendStringInfo(deletePlacementCommand, "DELETE FROM pg_dist_placement WHERE placementid=%lu", placement->placementId); SendCommandToWorkers(WORKERS_WITH_METADATA, deletePlacementCommand->data); } } /* * ReferenceTableOidList function scans pg_dist_partition to create a list of all * reference tables. To create the list, it performs sequential scan. Since it is not * expected that this function will be called frequently, it is OK not to use index scan. * If this function becomes performance bottleneck, it is possible to modify this function * to perform index scan. */ List * ReferenceTableOidList() { List *distTableOidList = DistTableOidList(); ListCell *distTableOidCell = NULL; List *referenceTableList = NIL; foreach(distTableOidCell, distTableOidList) { DistTableCacheEntry *cacheEntry = NULL; Oid relationId = lfirst_oid(distTableOidCell); cacheEntry = DistributedTableCacheEntry(relationId); if (cacheEntry->partitionMethod == DISTRIBUTE_BY_NONE) { referenceTableList = lappend_oid(referenceTableList, relationId); } } return referenceTableList; } /* CompareOids is a comparison function for sort shard oids */ int CompareOids(const void *leftElement, const void *rightElement) { Oid *leftId = (Oid *) leftElement; Oid *rightId = (Oid *) rightElement; if (*leftId > *rightId) { return 1; } else if (*leftId < *rightId) { return -1; } else { return 0; } } citus-7.0.3/src/backend/distributed/utils/resource_lock.c000066400000000000000000000236771317107136600235370ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * resource_lock.c * Locking Infrastructure for Citus. * * To avoid introducing a new type of locktag - that then could not be * displayed by core functionality - we reuse advisory locks. If we'd just * reused them directly we'd run into danger conflicting with user-defined * advisory locks, but luckily advisory locks only two values for 'field4' in * the locktag. * * Copyright (c) 2012-2016, Citus Data, Inc. *------------------------------------------------------------------------- */ #include "postgres.h" #include "c.h" #include "miscadmin.h" #include "distributed/colocation_utils.h" #include "distributed/listutils.h" #include "distributed/master_metadata_utility.h" #include "distributed/metadata_cache.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/multi_planner.h" #include "distributed/multi_router_executor.h" #include "distributed/relay_utility.h" #include "distributed/resource_lock.h" #include "distributed/shardinterval_utils.h" #include "distributed/worker_protocol.h" #include "storage/lmgr.h" /* local function forward declarations */ static LOCKMODE IntToLockMode(int mode); /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(lock_shard_metadata); PG_FUNCTION_INFO_V1(lock_shard_resources); /* * lock_shard_metadata allows the shard distribution metadata to be locked * remotely to block concurrent writes from workers in MX tables. * * This function does not sort the array to avoid deadlock, callers * must ensure a consistent order. */ Datum lock_shard_metadata(PG_FUNCTION_ARGS) { LOCKMODE lockMode = IntToLockMode(PG_GETARG_INT32(0)); ArrayType *shardIdArrayObject = PG_GETARG_ARRAYTYPE_P(1); Datum *shardIdArrayDatum = NULL; int shardIdCount = 0; int shardIdIndex = 0; CheckCitusVersion(ERROR); if (ARR_NDIM(shardIdArrayObject) == 0) { ereport(ERROR, (errmsg("no locks specified"))); } /* we don't want random users to block writes */ EnsureSuperUser(); shardIdCount = ArrayObjectCount(shardIdArrayObject); shardIdArrayDatum = DeconstructArrayObject(shardIdArrayObject); for (shardIdIndex = 0; shardIdIndex < shardIdCount; shardIdIndex++) { int64 shardId = DatumGetInt64(shardIdArrayDatum[shardIdIndex]); LockShardDistributionMetadata(shardId, lockMode); } PG_RETURN_VOID(); } /* * lock_shard_resources allows shard resources to be locked * remotely to serialise non-commutative writes on shards. * * This function does not sort the array to avoid deadlock, callers * must ensure a consistent order. */ Datum lock_shard_resources(PG_FUNCTION_ARGS) { LOCKMODE lockMode = IntToLockMode(PG_GETARG_INT32(0)); ArrayType *shardIdArrayObject = PG_GETARG_ARRAYTYPE_P(1); Datum *shardIdArrayDatum = NULL; int shardIdCount = 0; int shardIdIndex = 0; CheckCitusVersion(ERROR); if (ARR_NDIM(shardIdArrayObject) == 0) { ereport(ERROR, (errmsg("no locks specified"))); } /* we don't want random users to block writes */ EnsureSuperUser(); shardIdCount = ArrayObjectCount(shardIdArrayObject); shardIdArrayDatum = DeconstructArrayObject(shardIdArrayObject); for (shardIdIndex = 0; shardIdIndex < shardIdCount; shardIdIndex++) { int64 shardId = DatumGetInt64(shardIdArrayDatum[shardIdIndex]); LockShardResource(shardId, lockMode); } PG_RETURN_VOID(); } /* * IntToLockMode verifies whether the specified integer is an accepted lock mode * and returns it as a LOCKMODE enum. */ static LOCKMODE IntToLockMode(int mode) { if (mode == ExclusiveLock) { return ExclusiveLock; } else if (mode == ShareLock) { return ShareLock; } else if (mode == AccessShareLock) { return AccessShareLock; } else { elog(ERROR, "unsupported lockmode %d", mode); } } /* * LockShardDistributionMetadata returns after grabbing a lock for distribution * metadata related to the specified shard, blocking if required. Any locks * acquired using this method are released at transaction end. */ void LockShardDistributionMetadata(int64 shardId, LOCKMODE lockMode) { LOCKTAG tag; const bool sessionLock = false; const bool dontWait = false; SET_LOCKTAG_SHARD_METADATA_RESOURCE(tag, MyDatabaseId, shardId); (void) LockAcquire(&tag, lockMode, sessionLock, dontWait); } /* * TryLockShardDistributionMetadata tries to grab a lock for distribution * metadata related to the specified shard, returning false if the lock * is currently taken. Any locks acquired using this method are released * at transaction end. */ bool TryLockShardDistributionMetadata(int64 shardId, LOCKMODE lockMode) { LOCKTAG tag; const bool sessionLock = false; const bool dontWait = true; bool lockAcquired = false; SET_LOCKTAG_SHARD_METADATA_RESOURCE(tag, MyDatabaseId, shardId); lockAcquired = LockAcquire(&tag, lockMode, sessionLock, dontWait); return lockAcquired; } /* * LockShardResource acquires a lock needed to modify data on a remote shard. * This task may be assigned to multiple backends at the same time, so the lock * manages any concurrency issues associated with shard file fetching and DML * command execution. */ void LockShardResource(uint64 shardId, LOCKMODE lockmode) { LOCKTAG tag; const bool sessionLock = false; const bool dontWait = false; AssertArg(shardId != INVALID_SHARD_ID); SET_LOCKTAG_SHARD_RESOURCE(tag, MyDatabaseId, shardId); (void) LockAcquire(&tag, lockmode, sessionLock, dontWait); } /* Releases the lock associated with the relay file fetching/DML task. */ void UnlockShardResource(uint64 shardId, LOCKMODE lockmode) { LOCKTAG tag; const bool sessionLock = false; SET_LOCKTAG_SHARD_RESOURCE(tag, MyDatabaseId, shardId); LockRelease(&tag, lockmode, sessionLock); } /* * LockJobResource acquires a lock for creating resources associated with the * given jobId. This resource is typically a job schema (namespace), and less * commonly a partition task directory. */ void LockJobResource(uint64 jobId, LOCKMODE lockmode) { LOCKTAG tag; const bool sessionLock = false; const bool dontWait = false; SET_LOCKTAG_JOB_RESOURCE(tag, MyDatabaseId, jobId); (void) LockAcquire(&tag, lockmode, sessionLock, dontWait); } /* Releases the lock for resources associated with the given job id. */ void UnlockJobResource(uint64 jobId, LOCKMODE lockmode) { LOCKTAG tag; const bool sessionLock = false; SET_LOCKTAG_JOB_RESOURCE(tag, MyDatabaseId, jobId); LockRelease(&tag, lockmode, sessionLock); } /* * LockShardListMetadata takes shared locks on the metadata of all shards in * shardIntervalList to prevents concurrent placement changes. */ void LockShardListMetadata(List *shardIntervalList, LOCKMODE lockMode) { ListCell *shardIntervalCell = NULL; /* lock shards in order of shard id to prevent deadlock */ shardIntervalList = SortList(shardIntervalList, CompareShardIntervalsById); foreach(shardIntervalCell, shardIntervalList) { ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); int64 shardId = shardInterval->shardId; LockShardDistributionMetadata(shardId, lockMode); } } /* * LockShardListResources takes locks on all shards in shardIntervalList to * prevent concurrent DML statements on those shards. */ void LockShardListResources(List *shardIntervalList, LOCKMODE lockMode) { ListCell *shardIntervalCell = NULL; /* lock shards in order of shard id to prevent deadlock */ shardIntervalList = SortList(shardIntervalList, CompareShardIntervalsById); foreach(shardIntervalCell, shardIntervalList) { ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); int64 shardId = shardInterval->shardId; LockShardResource(shardId, lockMode); } } /* * LockRelationShardResources takes locks on all shards in a list of RelationShards * to prevent concurrent DML statements on those shards. */ void LockRelationShardResources(List *relationShardList, LOCKMODE lockMode) { ListCell *relationShardCell = NULL; /* lock shards in a consistent order to prevent deadlock */ relationShardList = SortList(relationShardList, CompareRelationShards); foreach(relationShardCell, relationShardList) { RelationShard *relationShard = (RelationShard *) lfirst(relationShardCell); uint64 shardId = relationShard->shardId; if (shardId != INVALID_SHARD_ID) { LockShardResource(shardId, lockMode); } } } /* * LockParentShardResourceIfPartition checks whether the given shard belongs * to a partition. If it does, LockParentShardResourceIfPartition acquires a * shard resource lock on the colocated shard of the parent table. */ void LockParentShardResourceIfPartition(uint64 shardId, LOCKMODE lockMode) { ShardInterval *shardInterval = LoadShardInterval(shardId); Oid relationId = shardInterval->relationId; if (PartitionTable(relationId)) { int shardIndex = ShardIndex(shardInterval); Oid parentRelationId = PartitionParentOid(relationId); uint64 parentShardId = ColocatedShardIdInRelation(parentRelationId, shardIndex); LockShardResource(parentShardId, lockMode); } } /* * LockPartitionsInRelationList iterates over given list and acquires locks on * partitions of each partitioned table. It does nothing for non-partitioned tables. */ void LockPartitionsInRelationList(List *relationIdList, LOCKMODE lockmode) { ListCell *relationIdCell = NULL; foreach(relationIdCell, relationIdList) { Oid relationId = lfirst_oid(relationIdCell); if (PartitionedTable(relationId)) { LockPartitionRelations(relationId, lockmode); } } } /* * LockPartitionRelations acquires relation lock on all partitions of given * partitioned relation. This function expects that given relation is a * partitioned relation. */ void LockPartitionRelations(Oid relationId, LOCKMODE lockMode) { /* * PartitionList function generates partition list in the same order * as PostgreSQL. Therefore we do not need to sort it before acquiring * locks. */ List *partitionList = PartitionList(relationId); ListCell *partitionCell = NULL; foreach(partitionCell, partitionList) { Oid partitionRelationId = lfirst_oid(partitionCell); LockRelationOid(partitionRelationId, lockMode); } } citus-7.0.3/src/backend/distributed/utils/ruleutils_10.c000066400000000000000000007020001317107136600232100ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * ruleutils_10.c * Functions to convert stored expressions/querytrees back to * source text * * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION * src/backend/distributed/utils/ruleutils_10.c * * This needs to be closely in sync with the core code. *------------------------------------------------------------------------- */ #include "postgres.h" #if (PG_VERSION_NUM >= 100000) #include #include #include #include "access/amapi.h" #include "access/htup_details.h" #include "access/sysattr.h" #include "catalog/dependency.h" #include "catalog/indexing.h" #include "catalog/pg_aggregate.h" #include "catalog/pg_am.h" #include "catalog/pg_authid.h" #include "catalog/pg_collation.h" #include "catalog/pg_constraint.h" #include "catalog/pg_depend.h" #include "catalog/pg_extension.h" #include "catalog/pg_foreign_data_wrapper.h" #include "catalog/pg_language.h" #include "catalog/pg_opclass.h" #include "catalog/pg_operator.h" #include "catalog/pg_partitioned_table.h" #include "catalog/pg_proc.h" #include "catalog/pg_statistic_ext.h" #include "catalog/pg_trigger.h" #include "catalog/pg_type.h" #include "commands/defrem.h" #include "commands/extension.h" #include "commands/tablespace.h" #include "common/keywords.h" #include "distributed/citus_nodefuncs.h" #include "distributed/citus_ruleutils.h" #include "executor/spi.h" #include "foreign/foreign.h" #include "funcapi.h" #include "mb/pg_wchar.h" #include "miscadmin.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "optimizer/tlist.h" #include "parser/parse_node.h" #include "parser/parse_agg.h" #include "parser/parse_func.h" #include "parser/parse_node.h" #include "parser/parse_oper.h" #include "parser/parser.h" #include "parser/parsetree.h" #include "rewrite/rewriteHandler.h" #include "rewrite/rewriteManip.h" #include "rewrite/rewriteSupport.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/hsearch.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/ruleutils.h" #include "utils/snapmgr.h" #include "utils/syscache.h" #include "utils/tqual.h" #include "utils/typcache.h" #include "utils/varlena.h" #include "utils/xml.h" /* ---------- * Pretty formatting constants * ---------- */ /* Indent counts */ #define PRETTYINDENT_STD 8 #define PRETTYINDENT_JOIN 4 #define PRETTYINDENT_VAR 4 #define PRETTYINDENT_LIMIT 40 /* wrap limit */ /* Pretty flags */ #define PRETTYFLAG_PAREN 1 #define PRETTYFLAG_INDENT 2 /* Default line length for pretty-print wrapping: 0 means wrap always */ #define WRAP_COLUMN_DEFAULT 0 /* macro to test if pretty action needed */ #define PRETTY_PAREN(context) ((context)->prettyFlags & PRETTYFLAG_PAREN) #define PRETTY_INDENT(context) ((context)->prettyFlags & PRETTYFLAG_INDENT) /* ---------- * Local data types * ---------- */ /* Context info needed for invoking a recursive querytree display routine */ typedef struct { StringInfo buf; /* output buffer to append to */ List *namespaces; /* List of deparse_namespace nodes */ List *windowClause; /* Current query level's WINDOW clause */ List *windowTList; /* targetlist for resolving WINDOW clause */ int prettyFlags; /* enabling of pretty-print functions */ int wrapColumn; /* max line length, or -1 for no limit */ int indentLevel; /* current indent level for prettyprint */ bool varprefix; /* TRUE to print prefixes on Vars */ Oid distrelid; /* the distributed table being modified, if valid */ int64 shardid; /* a distributed table's shardid, if positive */ ParseExprKind special_exprkind; /* set only for exprkinds needing special * handling */ } deparse_context; /* * Each level of query context around a subtree needs a level of Var namespace. * A Var having varlevelsup=N refers to the N'th item (counting from 0) in * the current context's namespaces list. * * The rangetable is the list of actual RTEs from the query tree, and the * cte list is the list of actual CTEs. * * rtable_names holds the alias name to be used for each RTE (either a C * string, or NULL for nameless RTEs such as unnamed joins). * rtable_columns holds the column alias names to be used for each RTE. * * In some cases we need to make names of merged JOIN USING columns unique * across the whole query, not only per-RTE. If so, unique_using is TRUE * and using_names is a list of C strings representing names already assigned * to USING columns. * * When deparsing plan trees, there is always just a single item in the * deparse_namespace list (since a plan tree never contains Vars with * varlevelsup > 0). We store the PlanState node that is the immediate * parent of the expression to be deparsed, as well as a list of that * PlanState's ancestors. In addition, we store its outer and inner subplan * state nodes, as well as their plan nodes' targetlists, and the index tlist * if the current plan node might contain INDEX_VAR Vars. (These fields could * be derived on-the-fly from the current PlanState, but it seems notationally * clearer to set them up as separate fields.) */ typedef struct { List *rtable; /* List of RangeTblEntry nodes */ List *rtable_names; /* Parallel list of names for RTEs */ List *rtable_columns; /* Parallel list of deparse_columns structs */ List *ctes; /* List of CommonTableExpr nodes */ /* Workspace for column alias assignment: */ bool unique_using; /* Are we making USING names globally unique */ List *using_names; /* List of assigned names for USING columns */ /* Remaining fields are used only when deparsing a Plan tree: */ PlanState *planstate; /* immediate parent of current expression */ List *ancestors; /* ancestors of planstate */ PlanState *outer_planstate; /* outer subplan state, or NULL if none */ PlanState *inner_planstate; /* inner subplan state, or NULL if none */ List *outer_tlist; /* referent for OUTER_VAR Vars */ List *inner_tlist; /* referent for INNER_VAR Vars */ List *index_tlist; /* referent for INDEX_VAR Vars */ } deparse_namespace; /* * Per-relation data about column alias names. * * Selecting aliases is unreasonably complicated because of the need to dump * rules/views whose underlying tables may have had columns added, deleted, or * renamed since the query was parsed. We must nonetheless print the rule/view * in a form that can be reloaded and will produce the same results as before. * * For each RTE used in the query, we must assign column aliases that are * unique within that RTE. SQL does not require this of the original query, * but due to factors such as *-expansion we need to be able to uniquely * reference every column in a decompiled query. As long as we qualify all * column references, per-RTE uniqueness is sufficient for that. * * However, we can't ensure per-column name uniqueness for unnamed join RTEs, * since they just inherit column names from their input RTEs, and we can't * rename the columns at the join level. Most of the time this isn't an issue * because we don't need to reference the join's output columns as such; we * can reference the input columns instead. That approach can fail for merged * JOIN USING columns, however, so when we have one of those in an unnamed * join, we have to make that column's alias globally unique across the whole * query to ensure it can be referenced unambiguously. * * Another problem is that a JOIN USING clause requires the columns to be * merged to have the same aliases in both input RTEs, and that no other * columns in those RTEs or their children conflict with the USING names. * To handle that, we do USING-column alias assignment in a recursive * traversal of the query's jointree. When descending through a JOIN with * USING, we preassign the USING column names to the child columns, overriding * other rules for column alias assignment. We also mark each RTE with a list * of all USING column names selected for joins containing that RTE, so that * when we assign other columns' aliases later, we can avoid conflicts. * * Another problem is that if a JOIN's input tables have had columns added or * deleted since the query was parsed, we must generate a column alias list * for the join that matches the current set of input columns --- otherwise, a * change in the number of columns in the left input would throw off matching * of aliases to columns of the right input. Thus, positions in the printable * column alias list are not necessarily one-for-one with varattnos of the * JOIN, so we need a separate new_colnames[] array for printing purposes. */ typedef struct { /* * colnames is an array containing column aliases to use for columns that * existed when the query was parsed. Dropped columns have NULL entries. * This array can be directly indexed by varattno to get a Var's name. * * Non-NULL entries are guaranteed unique within the RTE, *except* when * this is for an unnamed JOIN RTE. In that case we merely copy up names * from the two input RTEs. * * During the recursive descent in set_using_names(), forcible assignment * of a child RTE's column name is represented by pre-setting that element * of the child's colnames array. So at that stage, NULL entries in this * array just mean that no name has been preassigned, not necessarily that * the column is dropped. */ int num_cols; /* length of colnames[] array */ char **colnames; /* array of C strings and NULLs */ /* * new_colnames is an array containing column aliases to use for columns * that would exist if the query was re-parsed against the current * definitions of its base tables. This is what to print as the column * alias list for the RTE. This array does not include dropped columns, * but it will include columns added since original parsing. Indexes in * it therefore have little to do with current varattno values. As above, * entries are unique unless this is for an unnamed JOIN RTE. (In such an * RTE, we never actually print this array, but we must compute it anyway * for possible use in computing column names of upper joins.) The * parallel array is_new_col marks which of these columns are new since * original parsing. Entries with is_new_col false must match the * non-NULL colnames entries one-for-one. */ int num_new_cols; /* length of new_colnames[] array */ char **new_colnames; /* array of C strings */ bool *is_new_col; /* array of bool flags */ /* This flag tells whether we should actually print a column alias list */ bool printaliases; /* This list has all names used as USING names in joins above this RTE */ List *parentUsing; /* names assigned to parent merged columns */ /* * If this struct is for a JOIN RTE, we fill these fields during the * set_using_names() pass to describe its relationship to its child RTEs. * * leftattnos and rightattnos are arrays with one entry per existing * output column of the join (hence, indexable by join varattno). For a * simple reference to a column of the left child, leftattnos[i] is the * child RTE's attno and rightattnos[i] is zero; and conversely for a * column of the right child. But for merged columns produced by JOIN * USING/NATURAL JOIN, both leftattnos[i] and rightattnos[i] are nonzero. * Also, if the column has been dropped, both are zero. * * If it's a JOIN USING, usingNames holds the alias names selected for the * merged columns (these might be different from the original USING list, * if we had to modify names to achieve uniqueness). */ int leftrti; /* rangetable index of left child */ int rightrti; /* rangetable index of right child */ int *leftattnos; /* left-child varattnos of join cols, or 0 */ int *rightattnos; /* right-child varattnos of join cols, or 0 */ List *usingNames; /* names assigned to merged columns */ } deparse_columns; /* This macro is analogous to rt_fetch(), but for deparse_columns structs */ #define deparse_columns_fetch(rangetable_index, dpns) \ ((deparse_columns *) list_nth((dpns)->rtable_columns, (rangetable_index)-1)) /* * Entry in set_rtable_names' hash table */ typedef struct { char name[NAMEDATALEN]; /* Hash key --- must be first */ int counter; /* Largest addition used so far for name */ } NameHashEntry; /* ---------- * Local functions * * Most of these functions used to use fixed-size buffers to build their * results. Now, they take an (already initialized) StringInfo object * as a parameter, and append their text output to its contents. * ---------- */ static void set_rtable_names(deparse_namespace *dpns, List *parent_namespaces, Bitmapset *rels_used); static void set_deparse_for_query(deparse_namespace *dpns, Query *query, List *parent_namespaces); static bool has_dangerous_join_using(deparse_namespace *dpns, Node *jtnode); static void set_using_names(deparse_namespace *dpns, Node *jtnode, List *parentUsing); static void set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte, deparse_columns *colinfo); static void set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte, deparse_columns *colinfo); static bool colname_is_unique(char *colname, deparse_namespace *dpns, deparse_columns *colinfo); static char *make_colname_unique(char *colname, deparse_namespace *dpns, deparse_columns *colinfo); static void expand_colnames_array_to(deparse_columns *colinfo, int n); static void identify_join_columns(JoinExpr *j, RangeTblEntry *jrte, deparse_columns *colinfo); static void flatten_join_using_qual(Node *qual, List **leftvars, List **rightvars); static char *get_rtable_name(int rtindex, deparse_context *context); static void set_deparse_planstate(deparse_namespace *dpns, PlanState *ps); static void push_child_plan(deparse_namespace *dpns, PlanState *ps, deparse_namespace *save_dpns); static void pop_child_plan(deparse_namespace *dpns, deparse_namespace *save_dpns); static void push_ancestor_plan(deparse_namespace *dpns, ListCell *ancestor_cell, deparse_namespace *save_dpns); static void pop_ancestor_plan(deparse_namespace *dpns, deparse_namespace *save_dpns); static void get_query_def(Query *query, StringInfo buf, List *parentnamespace, TupleDesc resultDesc, int prettyFlags, int wrapColumn, int startIndent); static void get_query_def_extended(Query *query, StringInfo buf, List *parentnamespace, Oid distrelid, int64 shardid, TupleDesc resultDesc, int prettyFlags, int wrapColumn, int startIndent); static void get_values_def(List *values_lists, deparse_context *context); static void get_with_clause(Query *query, deparse_context *context); static void get_select_query_def(Query *query, deparse_context *context, TupleDesc resultDesc); static void get_insert_query_def(Query *query, deparse_context *context); static void get_update_query_def(Query *query, deparse_context *context); static void get_update_query_targetlist_def(Query *query, List *targetList, deparse_context *context, RangeTblEntry *rte); static void get_delete_query_def(Query *query, deparse_context *context); static void get_utility_query_def(Query *query, deparse_context *context); static void get_basic_select_query(Query *query, deparse_context *context, TupleDesc resultDesc); static void get_target_list(List *targetList, deparse_context *context, TupleDesc resultDesc); static void get_setop_query(Node *setOp, Query *query, deparse_context *context, TupleDesc resultDesc); static Node *get_rule_sortgroupclause(Index ref, List *tlist, bool force_colno, deparse_context *context); static void get_rule_groupingset(GroupingSet *gset, List *targetlist, bool omit_parens, deparse_context *context); static void get_rule_orderby(List *orderList, List *targetList, bool force_colno, deparse_context *context); static void get_rule_windowclause(Query *query, deparse_context *context); static void get_rule_windowspec(WindowClause *wc, List *targetList, deparse_context *context); static char *get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context); static void get_special_variable(Node *node, deparse_context *context, void *private); static void resolve_special_varno(Node *node, deparse_context *context, void *private, void (*callback) (Node *, deparse_context *, void *)); static Node *find_param_referent(Param *param, deparse_context *context, deparse_namespace **dpns_p, ListCell **ancestor_cell_p); static void get_parameter(Param *param, deparse_context *context); static const char *get_simple_binary_op_name(OpExpr *expr); static bool isSimpleNode(Node *node, Node *parentNode, int prettyFlags); static void appendContextKeyword(deparse_context *context, const char *str, int indentBefore, int indentAfter, int indentPlus); static void removeStringInfoSpaces(StringInfo str); static void get_rule_expr(Node *node, deparse_context *context, bool showimplicit); static void get_rule_expr_toplevel(Node *node, deparse_context *context, bool showimplicit); static void get_rule_expr_funccall(Node *node, deparse_context *context, bool showimplicit); static bool looks_like_function(Node *node); static void get_oper_expr(OpExpr *expr, deparse_context *context); static void get_func_expr(FuncExpr *expr, deparse_context *context, bool showimplicit); static void get_agg_expr(Aggref *aggref, deparse_context *context, Aggref *original_aggref); static void get_agg_combine_expr(Node *node, deparse_context *context, void *private); static void get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context); static void get_coercion_expr(Node *arg, deparse_context *context, Oid resulttype, int32 resulttypmod, Node *parentNode); static void get_const_expr(Const *constval, deparse_context *context, int showtype); static void get_const_collation(Const *constval, deparse_context *context); static void simple_quote_literal(StringInfo buf, const char *val); static void get_sublink_expr(SubLink *sublink, deparse_context *context); static void get_tablefunc(TableFunc *tf, deparse_context *context, bool showimplicit); static void get_from_clause(Query *query, const char *prefix, deparse_context *context); static void get_from_clause_item(Node *jtnode, Query *query, deparse_context *context); static void get_column_alias_list(deparse_columns *colinfo, deparse_context *context); static void get_from_clause_coldeflist(RangeTblFunction *rtfunc, deparse_columns *colinfo, deparse_context *context); static void get_tablesample_def(TableSampleClause *tablesample, deparse_context *context); static void get_opclass_name(Oid opclass, Oid actual_datatype, StringInfo buf); static Node *processIndirection(Node *node, deparse_context *context); static void printSubscripts(ArrayRef *aref, deparse_context *context); static char *get_relation_name(Oid relid); static char *generate_relation_or_shard_name(Oid relid, Oid distrelid, int64 shardid, List *namespaces); static char *generate_fragment_name(char *schemaName, char *tableName); static char *generate_function_name(Oid funcid, int nargs, List *argnames, Oid *argtypes, bool has_variadic, bool *use_variadic_p, ParseExprKind special_exprkind); static char *generate_operator_name(Oid operid, Oid arg1, Oid arg2); #define only_marker(rte) ((rte)->inh ? "" : "ONLY ") /* * pg_get_query_def parses back one query tree, and outputs the resulting query * string into given buffer. */ void pg_get_query_def(Query *query, StringInfo buffer) { get_query_def(query, buffer, NIL, NULL, 0, WRAP_COLUMN_DEFAULT, 0); } /* * set_rtable_names: select RTE aliases to be used in printing a query * * We fill in dpns->rtable_names with a list of names that is one-for-one with * the already-filled dpns->rtable list. Each RTE name is unique among those * in the new namespace plus any ancestor namespaces listed in * parent_namespaces. * * If rels_used isn't NULL, only RTE indexes listed in it are given aliases. * * Note that this function is only concerned with relation names, not column * names. */ static void set_rtable_names(deparse_namespace *dpns, List *parent_namespaces, Bitmapset *rels_used) { HASHCTL hash_ctl; HTAB *names_hash; NameHashEntry *hentry; bool found; int rtindex; ListCell *lc; dpns->rtable_names = NIL; /* nothing more to do if empty rtable */ if (dpns->rtable == NIL) return; /* * We use a hash table to hold known names, so that this process is O(N) * not O(N^2) for N names. */ MemSet(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = NAMEDATALEN; hash_ctl.entrysize = sizeof(NameHashEntry); hash_ctl.hcxt = CurrentMemoryContext; names_hash = hash_create("set_rtable_names names", list_length(dpns->rtable), &hash_ctl, HASH_ELEM | HASH_CONTEXT); /* Preload the hash table with names appearing in parent_namespaces */ foreach(lc, parent_namespaces) { deparse_namespace *olddpns = (deparse_namespace *) lfirst(lc); ListCell *lc2; foreach(lc2, olddpns->rtable_names) { char *oldname = (char *) lfirst(lc2); if (oldname == NULL) continue; hentry = (NameHashEntry *) hash_search(names_hash, oldname, HASH_ENTER, &found); /* we do not complain about duplicate names in parent namespaces */ hentry->counter = 0; } } /* Now we can scan the rtable */ rtindex = 1; foreach(lc, dpns->rtable) { RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); char *refname; /* Just in case this takes an unreasonable amount of time ... */ CHECK_FOR_INTERRUPTS(); if (rels_used && !bms_is_member(rtindex, rels_used)) { /* Ignore unreferenced RTE */ refname = NULL; } else if (rte->alias) { /* If RTE has a user-defined alias, prefer that */ refname = rte->alias->aliasname; } else if (rte->rtekind == RTE_RELATION) { /* Use the current actual name of the relation */ refname = get_rel_name(rte->relid); } else if (rte->rtekind == RTE_JOIN) { /* Unnamed join has no refname */ refname = NULL; } else { /* Otherwise use whatever the parser assigned */ refname = rte->eref->aliasname; } /* * If the selected name isn't unique, append digits to make it so, and * make a new hash entry for it once we've got a unique name. For a * very long input name, we might have to truncate to stay within * NAMEDATALEN. */ if (refname) { hentry = (NameHashEntry *) hash_search(names_hash, refname, HASH_ENTER, &found); if (found) { /* Name already in use, must choose a new one */ int refnamelen = strlen(refname); char *modname = (char *) palloc(refnamelen + 16); NameHashEntry *hentry2; do { hentry->counter++; for (;;) { /* * We avoid using %.*s here because it can misbehave * if the data is not valid in what libc thinks is the * prevailing encoding. */ memcpy(modname, refname, refnamelen); sprintf(modname + refnamelen, "_%d", hentry->counter); if (strlen(modname) < NAMEDATALEN) break; /* drop chars from refname to keep all the digits */ refnamelen = pg_mbcliplen(refname, refnamelen, refnamelen - 1); } hentry2 = (NameHashEntry *) hash_search(names_hash, modname, HASH_ENTER, &found); } while (found); hentry2->counter = 0; /* init new hash entry */ refname = modname; } else { /* Name not previously used, need only initialize hentry */ hentry->counter = 0; } } dpns->rtable_names = lappend(dpns->rtable_names, refname); rtindex++; } hash_destroy(names_hash); } /* * set_deparse_for_query: set up deparse_namespace for deparsing a Query tree * * For convenience, this is defined to initialize the deparse_namespace struct * from scratch. */ static void set_deparse_for_query(deparse_namespace *dpns, Query *query, List *parent_namespaces) { ListCell *lc; ListCell *lc2; /* Initialize *dpns and fill rtable/ctes links */ memset(dpns, 0, sizeof(deparse_namespace)); dpns->rtable = query->rtable; dpns->ctes = query->cteList; /* Assign a unique relation alias to each RTE */ set_rtable_names(dpns, parent_namespaces, NULL); /* Initialize dpns->rtable_columns to contain zeroed structs */ dpns->rtable_columns = NIL; while (list_length(dpns->rtable_columns) < list_length(dpns->rtable)) dpns->rtable_columns = lappend(dpns->rtable_columns, palloc0(sizeof(deparse_columns))); /* If it's a utility query, it won't have a jointree */ if (query->jointree) { /* Detect whether global uniqueness of USING names is needed */ dpns->unique_using = has_dangerous_join_using(dpns, (Node *) query->jointree); /* * Select names for columns merged by USING, via a recursive pass over * the query jointree. */ set_using_names(dpns, (Node *) query->jointree, NIL); } /* * Now assign remaining column aliases for each RTE. We do this in a * linear scan of the rtable, so as to process RTEs whether or not they * are in the jointree (we mustn't miss NEW.*, INSERT target relations, * etc). JOIN RTEs must be processed after their children, but this is * okay because they appear later in the rtable list than their children * (cf Asserts in identify_join_columns()). */ forboth(lc, dpns->rtable, lc2, dpns->rtable_columns) { RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); deparse_columns *colinfo = (deparse_columns *) lfirst(lc2); if (rte->rtekind == RTE_JOIN) set_join_column_names(dpns, rte, colinfo); else set_relation_column_names(dpns, rte, colinfo); } } /* * has_dangerous_join_using: search jointree for unnamed JOIN USING * * Merged columns of a JOIN USING may act differently from either of the input * columns, either because they are merged with COALESCE (in a FULL JOIN) or * because an implicit coercion of the underlying input column is required. * In such a case the column must be referenced as a column of the JOIN not as * a column of either input. And this is problematic if the join is unnamed * (alias-less): we cannot qualify the column's name with an RTE name, since * there is none. (Forcibly assigning an alias to the join is not a solution, * since that will prevent legal references to tables below the join.) * To ensure that every column in the query is unambiguously referenceable, * we must assign such merged columns names that are globally unique across * the whole query, aliasing other columns out of the way as necessary. * * Because the ensuing re-aliasing is fairly damaging to the readability of * the query, we don't do this unless we have to. So, we must pre-scan * the join tree to see if we have to, before starting set_using_names(). */ static bool has_dangerous_join_using(deparse_namespace *dpns, Node *jtnode) { if (IsA(jtnode, RangeTblRef)) { /* nothing to do here */ } else if (IsA(jtnode, FromExpr)) { FromExpr *f = (FromExpr *) jtnode; ListCell *lc; foreach(lc, f->fromlist) { if (has_dangerous_join_using(dpns, (Node *) lfirst(lc))) return true; } } else if (IsA(jtnode, JoinExpr)) { JoinExpr *j = (JoinExpr *) jtnode; /* Is it an unnamed JOIN with USING? */ if (j->alias == NULL && j->usingClause) { /* * Yes, so check each join alias var to see if any of them are not * simple references to underlying columns. If so, we have a * dangerous situation and must pick unique aliases. */ RangeTblEntry *jrte = rt_fetch(j->rtindex, dpns->rtable); ListCell *lc; foreach(lc, jrte->joinaliasvars) { Var *aliasvar = (Var *) lfirst(lc); if (aliasvar != NULL && !IsA(aliasvar, Var)) return true; } } /* Nope, but inspect children */ if (has_dangerous_join_using(dpns, j->larg)) return true; if (has_dangerous_join_using(dpns, j->rarg)) return true; } else elog(ERROR, "unrecognized node type: %d", (int) nodeTag(jtnode)); return false; } /* * set_using_names: select column aliases to be used for merged USING columns * * We do this during a recursive descent of the query jointree. * dpns->unique_using must already be set to determine the global strategy. * * Column alias info is saved in the dpns->rtable_columns list, which is * assumed to be filled with pre-zeroed deparse_columns structs. * * parentUsing is a list of all USING aliases assigned in parent joins of * the current jointree node. (The passed-in list must not be modified.) */ static void set_using_names(deparse_namespace *dpns, Node *jtnode, List *parentUsing) { if (IsA(jtnode, RangeTblRef)) { /* nothing to do now */ } else if (IsA(jtnode, FromExpr)) { FromExpr *f = (FromExpr *) jtnode; ListCell *lc; foreach(lc, f->fromlist) set_using_names(dpns, (Node *) lfirst(lc), parentUsing); } else if (IsA(jtnode, JoinExpr)) { JoinExpr *j = (JoinExpr *) jtnode; RangeTblEntry *rte = rt_fetch(j->rtindex, dpns->rtable); deparse_columns *colinfo = deparse_columns_fetch(j->rtindex, dpns); int *leftattnos; int *rightattnos; deparse_columns *leftcolinfo; deparse_columns *rightcolinfo; int i; ListCell *lc; /* Get info about the shape of the join */ identify_join_columns(j, rte, colinfo); leftattnos = colinfo->leftattnos; rightattnos = colinfo->rightattnos; /* Look up the not-yet-filled-in child deparse_columns structs */ leftcolinfo = deparse_columns_fetch(colinfo->leftrti, dpns); rightcolinfo = deparse_columns_fetch(colinfo->rightrti, dpns); /* * If this join is unnamed, then we cannot substitute new aliases at * this level, so any name requirements pushed down to here must be * pushed down again to the children. */ if (rte->alias == NULL) { for (i = 0; i < colinfo->num_cols; i++) { char *colname = colinfo->colnames[i]; if (colname == NULL) continue; /* Push down to left column, unless it's a system column */ if (leftattnos[i] > 0) { expand_colnames_array_to(leftcolinfo, leftattnos[i]); leftcolinfo->colnames[leftattnos[i] - 1] = colname; } /* Same on the righthand side */ if (rightattnos[i] > 0) { expand_colnames_array_to(rightcolinfo, rightattnos[i]); rightcolinfo->colnames[rightattnos[i] - 1] = colname; } } } /* * If there's a USING clause, select the USING column names and push * those names down to the children. We have two strategies: * * If dpns->unique_using is TRUE, we force all USING names to be * unique across the whole query level. In principle we'd only need * the names of dangerous USING columns to be globally unique, but to * safely assign all USING names in a single pass, we have to enforce * the same uniqueness rule for all of them. However, if a USING * column's name has been pushed down from the parent, we should use * it as-is rather than making a uniqueness adjustment. This is * necessary when we're at an unnamed join, and it creates no risk of * ambiguity. Also, if there's a user-written output alias for a * merged column, we prefer to use that rather than the input name; * this simplifies the logic and seems likely to lead to less aliasing * overall. * * If dpns->unique_using is FALSE, we only need USING names to be * unique within their own join RTE. We still need to honor * pushed-down names, though. * * Though significantly different in results, these two strategies are * implemented by the same code, with only the difference of whether * to put assigned names into dpns->using_names. */ if (j->usingClause) { /* Copy the input parentUsing list so we don't modify it */ parentUsing = list_copy(parentUsing); /* USING names must correspond to the first join output columns */ expand_colnames_array_to(colinfo, list_length(j->usingClause)); i = 0; foreach(lc, j->usingClause) { char *colname = strVal(lfirst(lc)); /* Assert it's a merged column */ Assert(leftattnos[i] != 0 && rightattnos[i] != 0); /* Adopt passed-down name if any, else select unique name */ if (colinfo->colnames[i] != NULL) colname = colinfo->colnames[i]; else { /* Prefer user-written output alias if any */ if (rte->alias && i < list_length(rte->alias->colnames)) colname = strVal(list_nth(rte->alias->colnames, i)); /* Make it appropriately unique */ colname = make_colname_unique(colname, dpns, colinfo); if (dpns->unique_using) dpns->using_names = lappend(dpns->using_names, colname); /* Save it as output column name, too */ colinfo->colnames[i] = colname; } /* Remember selected names for use later */ colinfo->usingNames = lappend(colinfo->usingNames, colname); parentUsing = lappend(parentUsing, colname); /* Push down to left column, unless it's a system column */ if (leftattnos[i] > 0) { expand_colnames_array_to(leftcolinfo, leftattnos[i]); leftcolinfo->colnames[leftattnos[i] - 1] = colname; } /* Same on the righthand side */ if (rightattnos[i] > 0) { expand_colnames_array_to(rightcolinfo, rightattnos[i]); rightcolinfo->colnames[rightattnos[i] - 1] = colname; } i++; } } /* Mark child deparse_columns structs with correct parentUsing info */ leftcolinfo->parentUsing = parentUsing; rightcolinfo->parentUsing = parentUsing; /* Now recursively assign USING column names in children */ set_using_names(dpns, j->larg, parentUsing); set_using_names(dpns, j->rarg, parentUsing); } else elog(ERROR, "unrecognized node type: %d", (int) nodeTag(jtnode)); } /* * set_relation_column_names: select column aliases for a non-join RTE * * Column alias info is saved in *colinfo, which is assumed to be pre-zeroed. * If any colnames entries are already filled in, those override local * choices. */ static void set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte, deparse_columns *colinfo) { int ncolumns; char **real_colnames; bool changed_any; int noldcolumns; int i; int j; /* * Extract the RTE's "real" column names. This is comparable to * get_rte_attribute_name, except that it's important to disregard dropped * columns. We put NULL into the array for a dropped column. */ if (rte->rtekind == RTE_RELATION) { /* Relation --- look to the system catalogs for up-to-date info */ Relation rel; TupleDesc tupdesc; rel = relation_open(rte->relid, AccessShareLock); tupdesc = RelationGetDescr(rel); ncolumns = tupdesc->natts; real_colnames = (char **) palloc(ncolumns * sizeof(char *)); for (i = 0; i < ncolumns; i++) { if (tupdesc->attrs[i]->attisdropped) real_colnames[i] = NULL; else real_colnames[i] = pstrdup(NameStr(tupdesc->attrs[i]->attname)); } relation_close(rel, AccessShareLock); } else { /* Otherwise use the column names from eref */ ListCell *lc; ncolumns = list_length(rte->eref->colnames); real_colnames = (char **) palloc(ncolumns * sizeof(char *)); i = 0; foreach(lc, rte->eref->colnames) { /* * If the column name shown in eref is an empty string, then it's * a column that was dropped at the time of parsing the query, so * treat it as dropped. */ char *cname = strVal(lfirst(lc)); if (cname[0] == '\0') cname = NULL; real_colnames[i] = cname; i++; } } /* * Ensure colinfo->colnames has a slot for each column. (It could be long * enough already, if we pushed down a name for the last column.) Note: * it's possible that there are now more columns than there were when the * query was parsed, ie colnames could be longer than rte->eref->colnames. * We must assign unique aliases to the new columns too, else there could * be unresolved conflicts when the view/rule is reloaded. */ expand_colnames_array_to(colinfo, ncolumns); Assert(colinfo->num_cols == ncolumns); /* * Make sufficiently large new_colnames and is_new_col arrays, too. * * Note: because we leave colinfo->num_new_cols zero until after the loop, * colname_is_unique will not consult that array, which is fine because it * would only be duplicate effort. */ colinfo->new_colnames = (char **) palloc(ncolumns * sizeof(char *)); colinfo->is_new_col = (bool *) palloc(ncolumns * sizeof(bool)); /* * Scan the columns, select a unique alias for each one, and store it in * colinfo->colnames and colinfo->new_colnames. The former array has NULL * entries for dropped columns, the latter omits them. Also mark * new_colnames entries as to whether they are new since parse time; this * is the case for entries beyond the length of rte->eref->colnames. */ noldcolumns = list_length(rte->eref->colnames); changed_any = false; j = 0; for (i = 0; i < ncolumns; i++) { char *real_colname = real_colnames[i]; char *colname = colinfo->colnames[i]; /* Skip dropped columns */ if (real_colname == NULL) { Assert(colname == NULL); /* colnames[i] is already NULL */ continue; } /* If alias already assigned, that's what to use */ if (colname == NULL) { /* If user wrote an alias, prefer that over real column name */ if (rte->alias && i < list_length(rte->alias->colnames)) colname = strVal(list_nth(rte->alias->colnames, i)); else colname = real_colname; /* Unique-ify and insert into colinfo */ colname = make_colname_unique(colname, dpns, colinfo); colinfo->colnames[i] = colname; } /* Put names of non-dropped columns in new_colnames[] too */ colinfo->new_colnames[j] = colname; /* And mark them as new or not */ colinfo->is_new_col[j] = (i >= noldcolumns); j++; /* Remember if any assigned aliases differ from "real" name */ if (!changed_any && strcmp(colname, real_colname) != 0) changed_any = true; } /* * Set correct length for new_colnames[] array. (Note: if columns have * been added, colinfo->num_cols includes them, which is not really quite * right but is harmless, since any new columns must be at the end where * they won't affect varattnos of pre-existing columns.) */ colinfo->num_new_cols = j; /* * For a relation RTE, we need only print the alias column names if any * are different from the underlying "real" names. For a function RTE, * always emit a complete column alias list; this is to protect against * possible instability of the default column names (eg, from altering * parameter names). For tablefunc RTEs, we never print aliases, because * the column names are part of the clause itself. For other RTE types, * print if we changed anything OR if there were user-written column * aliases (since the latter would be part of the underlying "reality"). */ if (rte->rtekind == RTE_RELATION) colinfo->printaliases = changed_any; else if (rte->rtekind == RTE_FUNCTION) colinfo->printaliases = true; else if (rte->rtekind == RTE_TABLEFUNC) colinfo->printaliases = false; else if (rte->alias && rte->alias->colnames != NIL) colinfo->printaliases = true; else colinfo->printaliases = changed_any; } /* * set_join_column_names: select column aliases for a join RTE * * Column alias info is saved in *colinfo, which is assumed to be pre-zeroed. * If any colnames entries are already filled in, those override local * choices. Also, names for USING columns were already chosen by * set_using_names(). We further expect that column alias selection has been * completed for both input RTEs. */ static void set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte, deparse_columns *colinfo) { deparse_columns *leftcolinfo; deparse_columns *rightcolinfo; bool changed_any; int noldcolumns; int nnewcolumns; Bitmapset *leftmerged = NULL; Bitmapset *rightmerged = NULL; int i; int j; int ic; int jc; /* Look up the previously-filled-in child deparse_columns structs */ leftcolinfo = deparse_columns_fetch(colinfo->leftrti, dpns); rightcolinfo = deparse_columns_fetch(colinfo->rightrti, dpns); /* * Ensure colinfo->colnames has a slot for each column. (It could be long * enough already, if we pushed down a name for the last column.) Note: * it's possible that one or both inputs now have more columns than there * were when the query was parsed, but we'll deal with that below. We * only need entries in colnames for pre-existing columns. */ noldcolumns = list_length(rte->eref->colnames); expand_colnames_array_to(colinfo, noldcolumns); Assert(colinfo->num_cols == noldcolumns); /* * Scan the join output columns, select an alias for each one, and store * it in colinfo->colnames. If there are USING columns, set_using_names() * already selected their names, so we can start the loop at the first * non-merged column. */ changed_any = false; for (i = list_length(colinfo->usingNames); i < noldcolumns; i++) { char *colname = colinfo->colnames[i]; char *real_colname; /* Ignore dropped column (only possible for non-merged column) */ if (colinfo->leftattnos[i] == 0 && colinfo->rightattnos[i] == 0) { Assert(colname == NULL); continue; } /* Get the child column name */ if (colinfo->leftattnos[i] > 0) real_colname = leftcolinfo->colnames[colinfo->leftattnos[i] - 1]; else if (colinfo->rightattnos[i] > 0) real_colname = rightcolinfo->colnames[colinfo->rightattnos[i] - 1]; else { /* We're joining system columns --- use eref name */ real_colname = strVal(list_nth(rte->eref->colnames, i)); } Assert(real_colname != NULL); /* In an unnamed join, just report child column names as-is */ if (rte->alias == NULL) { colinfo->colnames[i] = real_colname; continue; } /* If alias already assigned, that's what to use */ if (colname == NULL) { /* If user wrote an alias, prefer that over real column name */ if (rte->alias && i < list_length(rte->alias->colnames)) colname = strVal(list_nth(rte->alias->colnames, i)); else colname = real_colname; /* Unique-ify and insert into colinfo */ colname = make_colname_unique(colname, dpns, colinfo); colinfo->colnames[i] = colname; } /* Remember if any assigned aliases differ from "real" name */ if (!changed_any && strcmp(colname, real_colname) != 0) changed_any = true; } /* * Calculate number of columns the join would have if it were re-parsed * now, and create storage for the new_colnames and is_new_col arrays. * * Note: colname_is_unique will be consulting new_colnames[] during the * loops below, so its not-yet-filled entries must be zeroes. */ nnewcolumns = leftcolinfo->num_new_cols + rightcolinfo->num_new_cols - list_length(colinfo->usingNames); colinfo->num_new_cols = nnewcolumns; colinfo->new_colnames = (char **) palloc0(nnewcolumns * sizeof(char *)); colinfo->is_new_col = (bool *) palloc0(nnewcolumns * sizeof(bool)); /* * Generating the new_colnames array is a bit tricky since any new columns * added since parse time must be inserted in the right places. This code * must match the parser, which will order a join's columns as merged * columns first (in USING-clause order), then non-merged columns from the * left input (in attnum order), then non-merged columns from the right * input (ditto). If one of the inputs is itself a join, its columns will * be ordered according to the same rule, which means newly-added columns * might not be at the end. We can figure out what's what by consulting * the leftattnos and rightattnos arrays plus the input is_new_col arrays. * * In these loops, i indexes leftattnos/rightattnos (so it's join varattno * less one), j indexes new_colnames/is_new_col, and ic/jc have similar * meanings for the current child RTE. */ /* Handle merged columns; they are first and can't be new */ i = j = 0; while (i < noldcolumns && colinfo->leftattnos[i] != 0 && colinfo->rightattnos[i] != 0) { /* column name is already determined and known unique */ colinfo->new_colnames[j] = colinfo->colnames[i]; colinfo->is_new_col[j] = false; /* build bitmapsets of child attnums of merged columns */ if (colinfo->leftattnos[i] > 0) leftmerged = bms_add_member(leftmerged, colinfo->leftattnos[i]); if (colinfo->rightattnos[i] > 0) rightmerged = bms_add_member(rightmerged, colinfo->rightattnos[i]); i++, j++; } /* Handle non-merged left-child columns */ ic = 0; for (jc = 0; jc < leftcolinfo->num_new_cols; jc++) { char *child_colname = leftcolinfo->new_colnames[jc]; if (!leftcolinfo->is_new_col[jc]) { /* Advance ic to next non-dropped old column of left child */ while (ic < leftcolinfo->num_cols && leftcolinfo->colnames[ic] == NULL) ic++; Assert(ic < leftcolinfo->num_cols); ic++; /* If it is a merged column, we already processed it */ if (bms_is_member(ic, leftmerged)) continue; /* Else, advance i to the corresponding existing join column */ while (i < colinfo->num_cols && colinfo->colnames[i] == NULL) i++; Assert(i < colinfo->num_cols); Assert(ic == colinfo->leftattnos[i]); /* Use the already-assigned name of this column */ colinfo->new_colnames[j] = colinfo->colnames[i]; i++; } else { /* * Unique-ify the new child column name and assign, unless we're * in an unnamed join, in which case just copy */ if (rte->alias != NULL) { colinfo->new_colnames[j] = make_colname_unique(child_colname, dpns, colinfo); if (!changed_any && strcmp(colinfo->new_colnames[j], child_colname) != 0) changed_any = true; } else colinfo->new_colnames[j] = child_colname; } colinfo->is_new_col[j] = leftcolinfo->is_new_col[jc]; j++; } /* Handle non-merged right-child columns in exactly the same way */ ic = 0; for (jc = 0; jc < rightcolinfo->num_new_cols; jc++) { char *child_colname = rightcolinfo->new_colnames[jc]; if (!rightcolinfo->is_new_col[jc]) { /* Advance ic to next non-dropped old column of right child */ while (ic < rightcolinfo->num_cols && rightcolinfo->colnames[ic] == NULL) ic++; Assert(ic < rightcolinfo->num_cols); ic++; /* If it is a merged column, we already processed it */ if (bms_is_member(ic, rightmerged)) continue; /* Else, advance i to the corresponding existing join column */ while (i < colinfo->num_cols && colinfo->colnames[i] == NULL) i++; Assert(i < colinfo->num_cols); Assert(ic == colinfo->rightattnos[i]); /* Use the already-assigned name of this column */ colinfo->new_colnames[j] = colinfo->colnames[i]; i++; } else { /* * Unique-ify the new child column name and assign, unless we're * in an unnamed join, in which case just copy */ if (rte->alias != NULL) { colinfo->new_colnames[j] = make_colname_unique(child_colname, dpns, colinfo); if (!changed_any && strcmp(colinfo->new_colnames[j], child_colname) != 0) changed_any = true; } else colinfo->new_colnames[j] = child_colname; } colinfo->is_new_col[j] = rightcolinfo->is_new_col[jc]; j++; } /* Assert we processed the right number of columns */ #ifdef USE_ASSERT_CHECKING while (i < colinfo->num_cols && colinfo->colnames[i] == NULL) i++; Assert(i == colinfo->num_cols); Assert(j == nnewcolumns); #endif /* * For a named join, print column aliases if we changed any from the child * names. Unnamed joins cannot print aliases. */ if (rte->alias != NULL) colinfo->printaliases = changed_any; else colinfo->printaliases = false; } /* * colname_is_unique: is colname distinct from already-chosen column names? * * dpns is query-wide info, colinfo is for the column's RTE */ static bool colname_is_unique(char *colname, deparse_namespace *dpns, deparse_columns *colinfo) { int i; ListCell *lc; /* Check against already-assigned column aliases within RTE */ for (i = 0; i < colinfo->num_cols; i++) { char *oldname = colinfo->colnames[i]; if (oldname && strcmp(oldname, colname) == 0) return false; } /* * If we're building a new_colnames array, check that too (this will be * partially but not completely redundant with the previous checks) */ for (i = 0; i < colinfo->num_new_cols; i++) { char *oldname = colinfo->new_colnames[i]; if (oldname && strcmp(oldname, colname) == 0) return false; } /* Also check against USING-column names that must be globally unique */ foreach(lc, dpns->using_names) { char *oldname = (char *) lfirst(lc); if (strcmp(oldname, colname) == 0) return false; } /* Also check against names already assigned for parent-join USING cols */ foreach(lc, colinfo->parentUsing) { char *oldname = (char *) lfirst(lc); if (strcmp(oldname, colname) == 0) return false; } return true; } /* * make_colname_unique: modify colname if necessary to make it unique * * dpns is query-wide info, colinfo is for the column's RTE */ static char * make_colname_unique(char *colname, deparse_namespace *dpns, deparse_columns *colinfo) { /* * If the selected name isn't unique, append digits to make it so. For a * very long input name, we might have to truncate to stay within * NAMEDATALEN. */ if (!colname_is_unique(colname, dpns, colinfo)) { int colnamelen = strlen(colname); char *modname = (char *) palloc(colnamelen + 16); int i = 0; do { i++; for (;;) { /* * We avoid using %.*s here because it can misbehave if the * data is not valid in what libc thinks is the prevailing * encoding. */ memcpy(modname, colname, colnamelen); sprintf(modname + colnamelen, "_%d", i); if (strlen(modname) < NAMEDATALEN) break; /* drop chars from colname to keep all the digits */ colnamelen = pg_mbcliplen(colname, colnamelen, colnamelen - 1); } } while (!colname_is_unique(modname, dpns, colinfo)); colname = modname; } return colname; } /* * expand_colnames_array_to: make colinfo->colnames at least n items long * * Any added array entries are initialized to zero. */ static void expand_colnames_array_to(deparse_columns *colinfo, int n) { if (n > colinfo->num_cols) { if (colinfo->colnames == NULL) colinfo->colnames = (char **) palloc0(n * sizeof(char *)); else { colinfo->colnames = (char **) repalloc(colinfo->colnames, n * sizeof(char *)); memset(colinfo->colnames + colinfo->num_cols, 0, (n - colinfo->num_cols) * sizeof(char *)); } colinfo->num_cols = n; } } /* * identify_join_columns: figure out where columns of a join come from * * Fills the join-specific fields of the colinfo struct, except for * usingNames which is filled later. */ static void identify_join_columns(JoinExpr *j, RangeTblEntry *jrte, deparse_columns *colinfo) { int numjoincols; int i; ListCell *lc; /* Extract left/right child RT indexes */ if (IsA(j->larg, RangeTblRef)) colinfo->leftrti = ((RangeTblRef *) j->larg)->rtindex; else if (IsA(j->larg, JoinExpr)) colinfo->leftrti = ((JoinExpr *) j->larg)->rtindex; else elog(ERROR, "unrecognized node type in jointree: %d", (int) nodeTag(j->larg)); if (IsA(j->rarg, RangeTblRef)) colinfo->rightrti = ((RangeTblRef *) j->rarg)->rtindex; else if (IsA(j->rarg, JoinExpr)) colinfo->rightrti = ((JoinExpr *) j->rarg)->rtindex; else elog(ERROR, "unrecognized node type in jointree: %d", (int) nodeTag(j->rarg)); /* Assert children will be processed earlier than join in second pass */ Assert(colinfo->leftrti < j->rtindex); Assert(colinfo->rightrti < j->rtindex); /* Initialize result arrays with zeroes */ numjoincols = list_length(jrte->joinaliasvars); Assert(numjoincols == list_length(jrte->eref->colnames)); colinfo->leftattnos = (int *) palloc0(numjoincols * sizeof(int)); colinfo->rightattnos = (int *) palloc0(numjoincols * sizeof(int)); /* Scan the joinaliasvars list to identify simple column references */ i = 0; foreach(lc, jrte->joinaliasvars) { Var *aliasvar = (Var *) lfirst(lc); /* get rid of any implicit coercion above the Var */ aliasvar = (Var *) strip_implicit_coercions((Node *) aliasvar); if (aliasvar == NULL) { /* It's a dropped column; nothing to do here */ } else if (IsA(aliasvar, Var)) { Assert(aliasvar->varlevelsup == 0); Assert(aliasvar->varattno != 0); if (aliasvar->varno == colinfo->leftrti) colinfo->leftattnos[i] = aliasvar->varattno; else if (aliasvar->varno == colinfo->rightrti) colinfo->rightattnos[i] = aliasvar->varattno; else elog(ERROR, "unexpected varno %d in JOIN RTE", aliasvar->varno); } else if (IsA(aliasvar, CoalesceExpr)) { /* * It's a merged column in FULL JOIN USING. Ignore it for now and * let the code below identify the merged columns. */ } else elog(ERROR, "unrecognized node type in join alias vars: %d", (int) nodeTag(aliasvar)); i++; } /* * If there's a USING clause, deconstruct the join quals to identify the * merged columns. This is a tad painful but if we cannot rely on the * column names, there is no other representation of which columns were * joined by USING. (Unless the join type is FULL, we can't tell from the * joinaliasvars list which columns are merged.) Note: we assume that the * merged columns are the first output column(s) of the join. */ if (j->usingClause) { List *leftvars = NIL; List *rightvars = NIL; ListCell *lc2; /* Extract left- and right-side Vars from the qual expression */ flatten_join_using_qual(j->quals, &leftvars, &rightvars); Assert(list_length(leftvars) == list_length(j->usingClause)); Assert(list_length(rightvars) == list_length(j->usingClause)); /* Mark the output columns accordingly */ i = 0; forboth(lc, leftvars, lc2, rightvars) { Var *leftvar = (Var *) lfirst(lc); Var *rightvar = (Var *) lfirst(lc2); Assert(leftvar->varlevelsup == 0); Assert(leftvar->varattno != 0); if (leftvar->varno != colinfo->leftrti) elog(ERROR, "unexpected varno %d in JOIN USING qual", leftvar->varno); colinfo->leftattnos[i] = leftvar->varattno; Assert(rightvar->varlevelsup == 0); Assert(rightvar->varattno != 0); if (rightvar->varno != colinfo->rightrti) elog(ERROR, "unexpected varno %d in JOIN USING qual", rightvar->varno); colinfo->rightattnos[i] = rightvar->varattno; i++; } } } /* * flatten_join_using_qual: extract Vars being joined from a JOIN/USING qual * * We assume that transformJoinUsingClause won't have produced anything except * AND nodes, equality operator nodes, and possibly implicit coercions, and * that the AND node inputs match left-to-right with the original USING list. * * Caller must initialize the result lists to NIL. */ static void flatten_join_using_qual(Node *qual, List **leftvars, List **rightvars) { if (IsA(qual, BoolExpr)) { /* Handle AND nodes by recursion */ BoolExpr *b = (BoolExpr *) qual; ListCell *lc; Assert(b->boolop == AND_EXPR); foreach(lc, b->args) { flatten_join_using_qual((Node *) lfirst(lc), leftvars, rightvars); } } else if (IsA(qual, OpExpr)) { /* Otherwise we should have an equality operator */ OpExpr *op = (OpExpr *) qual; Var *var; if (list_length(op->args) != 2) elog(ERROR, "unexpected unary operator in JOIN/USING qual"); /* Arguments should be Vars with perhaps implicit coercions */ var = (Var *) strip_implicit_coercions((Node *) linitial(op->args)); if (!IsA(var, Var)) elog(ERROR, "unexpected node type in JOIN/USING qual: %d", (int) nodeTag(var)); *leftvars = lappend(*leftvars, var); var = (Var *) strip_implicit_coercions((Node *) lsecond(op->args)); if (!IsA(var, Var)) elog(ERROR, "unexpected node type in JOIN/USING qual: %d", (int) nodeTag(var)); *rightvars = lappend(*rightvars, var); } else { /* Perhaps we have an implicit coercion to boolean? */ Node *q = strip_implicit_coercions(qual); if (q != qual) flatten_join_using_qual(q, leftvars, rightvars); else elog(ERROR, "unexpected node type in JOIN/USING qual: %d", (int) nodeTag(qual)); } } /* * get_rtable_name: convenience function to get a previously assigned RTE alias * * The RTE must belong to the topmost namespace level in "context". */ static char * get_rtable_name(int rtindex, deparse_context *context) { deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces); Assert(rtindex > 0 && rtindex <= list_length(dpns->rtable_names)); return (char *) list_nth(dpns->rtable_names, rtindex - 1); } /* * set_deparse_planstate: set up deparse_namespace to parse subexpressions * of a given PlanState node * * This sets the planstate, outer_planstate, inner_planstate, outer_tlist, * inner_tlist, and index_tlist fields. Caller is responsible for adjusting * the ancestors list if necessary. Note that the rtable and ctes fields do * not need to change when shifting attention to different plan nodes in a * single plan tree. */ static void set_deparse_planstate(deparse_namespace *dpns, PlanState *ps) { dpns->planstate = ps; /* * We special-case Append and MergeAppend to pretend that the first child * plan is the OUTER referent; we have to interpret OUTER Vars in their * tlists according to one of the children, and the first one is the most * natural choice. Likewise special-case ModifyTable to pretend that the * first child plan is the OUTER referent; this is to support RETURNING * lists containing references to non-target relations. */ if (IsA(ps, AppendState)) dpns->outer_planstate = ((AppendState *) ps)->appendplans[0]; else if (IsA(ps, MergeAppendState)) dpns->outer_planstate = ((MergeAppendState *) ps)->mergeplans[0]; else if (IsA(ps, ModifyTableState)) dpns->outer_planstate = ((ModifyTableState *) ps)->mt_plans[0]; else dpns->outer_planstate = outerPlanState(ps); if (dpns->outer_planstate) dpns->outer_tlist = dpns->outer_planstate->plan->targetlist; else dpns->outer_tlist = NIL; /* * For a SubqueryScan, pretend the subplan is INNER referent. (We don't * use OUTER because that could someday conflict with the normal meaning.) * Likewise, for a CteScan, pretend the subquery's plan is INNER referent. * For ON CONFLICT .. UPDATE we just need the inner tlist to point to the * excluded expression's tlist. (Similar to the SubqueryScan we don't want * to reuse OUTER, it's used for RETURNING in some modify table cases, * although not INSERT .. CONFLICT). */ if (IsA(ps, SubqueryScanState)) dpns->inner_planstate = ((SubqueryScanState *) ps)->subplan; else if (IsA(ps, CteScanState)) dpns->inner_planstate = ((CteScanState *) ps)->cteplanstate; else if (IsA(ps, ModifyTableState)) dpns->inner_planstate = ps; else dpns->inner_planstate = innerPlanState(ps); if (IsA(ps, ModifyTableState)) dpns->inner_tlist = ((ModifyTableState *) ps)->mt_excludedtlist; else if (dpns->inner_planstate) dpns->inner_tlist = dpns->inner_planstate->plan->targetlist; else dpns->inner_tlist = NIL; /* Set up referent for INDEX_VAR Vars, if needed */ if (IsA(ps->plan, IndexOnlyScan)) dpns->index_tlist = ((IndexOnlyScan *) ps->plan)->indextlist; else if (IsA(ps->plan, ForeignScan)) dpns->index_tlist = ((ForeignScan *) ps->plan)->fdw_scan_tlist; else if (IsA(ps->plan, CustomScan)) dpns->index_tlist = ((CustomScan *) ps->plan)->custom_scan_tlist; else dpns->index_tlist = NIL; } /* * push_child_plan: temporarily transfer deparsing attention to a child plan * * When expanding an OUTER_VAR or INNER_VAR reference, we must adjust the * deparse context in case the referenced expression itself uses * OUTER_VAR/INNER_VAR. We modify the top stack entry in-place to avoid * affecting levelsup issues (although in a Plan tree there really shouldn't * be any). * * Caller must provide a local deparse_namespace variable to save the * previous state for pop_child_plan. */ static void push_child_plan(deparse_namespace *dpns, PlanState *ps, deparse_namespace *save_dpns) { /* Save state for restoration later */ *save_dpns = *dpns; /* Link current plan node into ancestors list */ dpns->ancestors = lcons(dpns->planstate, dpns->ancestors); /* Set attention on selected child */ set_deparse_planstate(dpns, ps); } /* * pop_child_plan: undo the effects of push_child_plan */ static void pop_child_plan(deparse_namespace *dpns, deparse_namespace *save_dpns) { List *ancestors; /* Get rid of ancestors list cell added by push_child_plan */ ancestors = list_delete_first(dpns->ancestors); /* Restore fields changed by push_child_plan */ *dpns = *save_dpns; /* Make sure dpns->ancestors is right (may be unnecessary) */ dpns->ancestors = ancestors; } /* * push_ancestor_plan: temporarily transfer deparsing attention to an * ancestor plan * * When expanding a Param reference, we must adjust the deparse context * to match the plan node that contains the expression being printed; * otherwise we'd fail if that expression itself contains a Param or * OUTER_VAR/INNER_VAR/INDEX_VAR variable. * * The target ancestor is conveniently identified by the ListCell holding it * in dpns->ancestors. * * Caller must provide a local deparse_namespace variable to save the * previous state for pop_ancestor_plan. */ static void push_ancestor_plan(deparse_namespace *dpns, ListCell *ancestor_cell, deparse_namespace *save_dpns) { PlanState *ps = (PlanState *) lfirst(ancestor_cell); List *ancestors; /* Save state for restoration later */ *save_dpns = *dpns; /* Build a new ancestor list with just this node's ancestors */ ancestors = NIL; while ((ancestor_cell = lnext(ancestor_cell)) != NULL) ancestors = lappend(ancestors, lfirst(ancestor_cell)); dpns->ancestors = ancestors; /* Set attention on selected ancestor */ set_deparse_planstate(dpns, ps); } /* * pop_ancestor_plan: undo the effects of push_ancestor_plan */ static void pop_ancestor_plan(deparse_namespace *dpns, deparse_namespace *save_dpns) { /* Free the ancestor list made in push_ancestor_plan */ list_free(dpns->ancestors); /* Restore fields changed by push_ancestor_plan */ *dpns = *save_dpns; } /* ---------- * deparse_shard_query - Parse back a query for execution on a shard * * Builds an SQL string to perform the provided query on a specific shard and * places this string into the provided buffer. * ---------- */ void deparse_shard_query(Query *query, Oid distrelid, int64 shardid, StringInfo buffer) { get_query_def_extended(query, buffer, NIL, distrelid, shardid, NULL, 0, WRAP_COLUMN_DEFAULT, 0); } /* ---------- * get_query_def - Parse back one query parsetree * * If resultDesc is not NULL, then it is the output tuple descriptor for * the view represented by a SELECT query. * ---------- */ static void get_query_def(Query *query, StringInfo buf, List *parentnamespace, TupleDesc resultDesc, int prettyFlags, int wrapColumn, int startIndent) { get_query_def_extended(query, buf, parentnamespace, InvalidOid, 0, resultDesc, prettyFlags, wrapColumn, startIndent); } /* ---------- * get_query_def_extended - Parse back one query parsetree, optionally * with extension using a shard identifier. * * If distrelid is valid and shardid is positive, the provided shardid is added * any time the provided relid is deparsed, so that the query may be executed * on a placement for the given shard. * ---------- */ static void get_query_def_extended(Query *query, StringInfo buf, List *parentnamespace, Oid distrelid, int64 shardid, TupleDesc resultDesc, int prettyFlags, int wrapColumn, int startIndent) { deparse_context context; deparse_namespace dpns; OverrideSearchPath *overridePath = NULL; /* Guard against excessively long or deeply-nested queries */ CHECK_FOR_INTERRUPTS(); check_stack_depth(); /* * Before we begin to examine the query, acquire locks on referenced * relations, and fix up deleted columns in JOIN RTEs. This ensures * consistent results. Note we assume it's OK to scribble on the passed * querytree! * * We are only deparsing the query (we are not about to execute it), so we * only need AccessShareLock on the relations it mentions. */ AcquireRewriteLocks(query, false, false); /* * Set search_path to NIL so that all objects outside of pg_catalog will be * schema-prefixed. pg_catalog will be added automatically when we call * PushOverrideSearchPath(), since we set addCatalog to true; */ overridePath = GetOverrideSearchPath(CurrentMemoryContext); overridePath->schemas = NIL; overridePath->addCatalog = true; PushOverrideSearchPath(overridePath); context.buf = buf; context.namespaces = lcons(&dpns, list_copy(parentnamespace)); context.windowClause = NIL; context.windowTList = NIL; context.varprefix = (parentnamespace != NIL || list_length(query->rtable) != 1); context.prettyFlags = prettyFlags; context.wrapColumn = wrapColumn; context.indentLevel = startIndent; context.special_exprkind = EXPR_KIND_NONE; context.distrelid = distrelid; context.shardid = shardid; set_deparse_for_query(&dpns, query, parentnamespace); switch (query->commandType) { case CMD_SELECT: get_select_query_def(query, &context, resultDesc); break; case CMD_UPDATE: get_update_query_def(query, &context); break; case CMD_INSERT: get_insert_query_def(query, &context); break; case CMD_DELETE: get_delete_query_def(query, &context); break; case CMD_NOTHING: appendStringInfoString(buf, "NOTHING"); break; case CMD_UTILITY: get_utility_query_def(query, &context); break; default: elog(ERROR, "unrecognized query command type: %d", query->commandType); break; } /* revert back to original search_path */ PopOverrideSearchPath(); } /* ---------- * get_values_def - Parse back a VALUES list * ---------- */ static void get_values_def(List *values_lists, deparse_context *context) { StringInfo buf = context->buf; bool first_list = true; ListCell *vtl; appendStringInfoString(buf, "VALUES "); foreach(vtl, values_lists) { List *sublist = (List *) lfirst(vtl); bool first_col = true; ListCell *lc; if (first_list) first_list = false; else appendStringInfoString(buf, ", "); appendStringInfoChar(buf, '('); foreach(lc, sublist) { Node *col = (Node *) lfirst(lc); if (first_col) first_col = false; else appendStringInfoChar(buf, ','); /* * Print the value. Whole-row Vars need special treatment. */ get_rule_expr_toplevel(col, context, false); } appendStringInfoChar(buf, ')'); } } /* ---------- * get_with_clause - Parse back a WITH clause * ---------- */ static void get_with_clause(Query *query, deparse_context *context) { StringInfo buf = context->buf; const char *sep; ListCell *l; if (query->cteList == NIL) return; if (PRETTY_INDENT(context)) { context->indentLevel += PRETTYINDENT_STD; appendStringInfoChar(buf, ' '); } if (query->hasRecursive) sep = "WITH RECURSIVE "; else sep = "WITH "; foreach(l, query->cteList) { CommonTableExpr *cte = (CommonTableExpr *) lfirst(l); appendStringInfoString(buf, sep); appendStringInfoString(buf, quote_identifier(cte->ctename)); if (cte->aliascolnames) { bool first = true; ListCell *col; appendStringInfoChar(buf, '('); foreach(col, cte->aliascolnames) { if (first) first = false; else appendStringInfoString(buf, ", "); appendStringInfoString(buf, quote_identifier(strVal(lfirst(col)))); } appendStringInfoChar(buf, ')'); } appendStringInfoString(buf, " AS ("); if (PRETTY_INDENT(context)) appendContextKeyword(context, "", 0, 0, 0); get_query_def((Query *) cte->ctequery, buf, context->namespaces, NULL, context->prettyFlags, context->wrapColumn, context->indentLevel); if (PRETTY_INDENT(context)) appendContextKeyword(context, "", 0, 0, 0); appendStringInfoChar(buf, ')'); sep = ", "; } if (PRETTY_INDENT(context)) { context->indentLevel -= PRETTYINDENT_STD; appendContextKeyword(context, "", 0, 0, 0); } else appendStringInfoChar(buf, ' '); } /* ---------- * get_select_query_def - Parse back a SELECT parsetree * ---------- */ static void get_select_query_def(Query *query, deparse_context *context, TupleDesc resultDesc) { StringInfo buf = context->buf; List *save_windowclause; List *save_windowtlist; bool force_colno; ListCell *l; /* Insert the WITH clause if given */ get_with_clause(query, context); /* Set up context for possible window functions */ save_windowclause = context->windowClause; context->windowClause = query->windowClause; save_windowtlist = context->windowTList; context->windowTList = query->targetList; /* * If the Query node has a setOperations tree, then it's the top level of * a UNION/INTERSECT/EXCEPT query; only the WITH, ORDER BY and LIMIT * fields are interesting in the top query itself. */ if (query->setOperations) { get_setop_query(query->setOperations, query, context, resultDesc); /* ORDER BY clauses must be simple in this case */ force_colno = true; } else { get_basic_select_query(query, context, resultDesc); force_colno = false; } /* Add the ORDER BY clause if given */ if (query->sortClause != NIL) { appendContextKeyword(context, " ORDER BY ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); get_rule_orderby(query->sortClause, query->targetList, force_colno, context); } /* Add the LIMIT clause if given */ if (query->limitOffset != NULL) { appendContextKeyword(context, " OFFSET ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); get_rule_expr(query->limitOffset, context, false); } if (query->limitCount != NULL) { appendContextKeyword(context, " LIMIT ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); if (IsA(query->limitCount, Const) && ((Const *) query->limitCount)->constisnull) appendStringInfoString(buf, "ALL"); else get_rule_expr(query->limitCount, context, false); } /* Add FOR [KEY] UPDATE/SHARE clauses if present */ if (query->hasForUpdate) { foreach(l, query->rowMarks) { RowMarkClause *rc = (RowMarkClause *) lfirst(l); /* don't print implicit clauses */ if (rc->pushedDown) continue; switch (rc->strength) { case LCS_NONE: /* we intentionally throw an error for LCS_NONE */ elog(ERROR, "unrecognized LockClauseStrength %d", (int) rc->strength); break; case LCS_FORKEYSHARE: appendContextKeyword(context, " FOR KEY SHARE", -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); break; case LCS_FORSHARE: appendContextKeyword(context, " FOR SHARE", -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); break; case LCS_FORNOKEYUPDATE: appendContextKeyword(context, " FOR NO KEY UPDATE", -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); break; case LCS_FORUPDATE: appendContextKeyword(context, " FOR UPDATE", -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); break; } appendStringInfo(buf, " OF %s", quote_identifier(get_rtable_name(rc->rti, context))); if (rc->waitPolicy == LockWaitError) appendStringInfoString(buf, " NOWAIT"); else if (rc->waitPolicy == LockWaitSkip) appendStringInfoString(buf, " SKIP LOCKED"); } } context->windowClause = save_windowclause; context->windowTList = save_windowtlist; } /* * Detect whether query looks like SELECT ... FROM VALUES(); * if so, return the VALUES RTE. Otherwise return NULL. */ static RangeTblEntry * get_simple_values_rte(Query *query) { RangeTblEntry *result = NULL; ListCell *lc; /* * We want to return TRUE even if the Query also contains OLD or NEW rule * RTEs. So the idea is to scan the rtable and see if there is only one * inFromCl RTE that is a VALUES RTE. */ foreach(lc, query->rtable) { RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); if (rte->rtekind == RTE_VALUES && rte->inFromCl) { if (result) return NULL; /* multiple VALUES (probably not possible) */ result = rte; } else if (rte->rtekind == RTE_RELATION && !rte->inFromCl) continue; /* ignore rule entries */ else return NULL; /* something else -> not simple VALUES */ } /* * We don't need to check the targetlist in any great detail, because * parser/analyze.c will never generate a "bare" VALUES RTE --- they only * appear inside auto-generated sub-queries with very restricted * structure. However, DefineView might have modified the tlist by * injecting new column aliases; so compare tlist resnames against the * RTE's names to detect that. */ if (result) { ListCell *lcn; if (list_length(query->targetList) != list_length(result->eref->colnames)) return NULL; /* this probably cannot happen */ forboth(lc, query->targetList, lcn, result->eref->colnames) { TargetEntry *tle = (TargetEntry *) lfirst(lc); char *cname = strVal(lfirst(lcn)); if (tle->resjunk) return NULL; /* this probably cannot happen */ if (tle->resname == NULL || strcmp(tle->resname, cname) != 0) return NULL; /* column name has been changed */ } } return result; } static void get_basic_select_query(Query *query, deparse_context *context, TupleDesc resultDesc) { StringInfo buf = context->buf; RangeTblEntry *values_rte; char *sep; ListCell *l; if (PRETTY_INDENT(context)) { context->indentLevel += PRETTYINDENT_STD; appendStringInfoChar(buf, ' '); } /* * If the query looks like SELECT * FROM (VALUES ...), then print just the * VALUES part. This reverses what transformValuesClause() did at parse * time. */ values_rte = get_simple_values_rte(query); if (values_rte) { get_values_def(values_rte->values_lists, context); return; } /* * Build up the query string - first we say SELECT */ appendStringInfoString(buf, "SELECT"); /* Add the DISTINCT clause if given */ if (query->distinctClause != NIL) { if (query->hasDistinctOn) { appendStringInfoString(buf, " DISTINCT ON ("); sep = ""; foreach(l, query->distinctClause) { SortGroupClause *srt = (SortGroupClause *) lfirst(l); appendStringInfoString(buf, sep); get_rule_sortgroupclause(srt->tleSortGroupRef, query->targetList, false, context); sep = ", "; } appendStringInfoChar(buf, ')'); } else appendStringInfoString(buf, " DISTINCT"); } /* Then we tell what to select (the targetlist) */ get_target_list(query->targetList, context, resultDesc); /* Add the FROM clause if needed */ get_from_clause(query, " FROM ", context); /* Add the WHERE clause if given */ if (query->jointree->quals != NULL) { appendContextKeyword(context, " WHERE ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); get_rule_expr(query->jointree->quals, context, false); } /* Add the GROUP BY clause if given */ if (query->groupClause != NULL || query->groupingSets != NULL) { ParseExprKind save_exprkind; appendContextKeyword(context, " GROUP BY ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); save_exprkind = context->special_exprkind; context->special_exprkind = EXPR_KIND_GROUP_BY; if (query->groupingSets == NIL) { sep = ""; foreach(l, query->groupClause) { SortGroupClause *grp = (SortGroupClause *) lfirst(l); appendStringInfoString(buf, sep); get_rule_sortgroupclause(grp->tleSortGroupRef, query->targetList, false, context); sep = ", "; } } else { sep = ""; foreach(l, query->groupingSets) { GroupingSet *grp = lfirst(l); appendStringInfoString(buf, sep); get_rule_groupingset(grp, query->targetList, true, context); sep = ", "; } } context->special_exprkind = save_exprkind; } /* Add the HAVING clause if given */ if (query->havingQual != NULL) { appendContextKeyword(context, " HAVING ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); get_rule_expr(query->havingQual, context, false); } /* Add the WINDOW clause if needed */ if (query->windowClause != NIL) get_rule_windowclause(query, context); } /* ---------- * get_target_list - Parse back a SELECT target list * * This is also used for RETURNING lists in INSERT/UPDATE/DELETE. * ---------- */ static void get_target_list(List *targetList, deparse_context *context, TupleDesc resultDesc) { StringInfo buf = context->buf; StringInfoData targetbuf; bool last_was_multiline = false; char *sep; int colno; ListCell *l; /* we use targetbuf to hold each TLE's text temporarily */ initStringInfo(&targetbuf); sep = " "; colno = 0; foreach(l, targetList) { TargetEntry *tle = (TargetEntry *) lfirst(l); char *colname; char *attname; if (tle->resjunk) continue; /* ignore junk entries */ appendStringInfoString(buf, sep); sep = ", "; colno++; /* * Put the new field text into targetbuf so we can decide after we've * got it whether or not it needs to go on a new line. */ resetStringInfo(&targetbuf); context->buf = &targetbuf; /* * We special-case Var nodes rather than using get_rule_expr. This is * needed because get_rule_expr will display a whole-row Var as * "foo.*", which is the preferred notation in most contexts, but at * the top level of a SELECT list it's not right (the parser will * expand that notation into multiple columns, yielding behavior * different from a whole-row Var). We need to call get_variable * directly so that we can tell it to do the right thing, and so that * we can get the attribute name which is the default AS label. */ if (tle->expr && (IsA(tle->expr, Var))) { attname = get_variable((Var *) tle->expr, 0, true, context); } else { get_rule_expr((Node *) tle->expr, context, true); /* We'll show the AS name unless it's this: */ attname = "?column?"; } /* * Figure out what the result column should be called. In the context * of a view, use the view's tuple descriptor (so as to pick up the * effects of any column RENAME that's been done on the view). * Otherwise, just use what we can find in the TLE. */ if (resultDesc && colno <= resultDesc->natts) colname = NameStr(resultDesc->attrs[colno - 1]->attname); else colname = tle->resname; /* Show AS unless the column's name is correct as-is */ if (colname) /* resname could be NULL */ { if (attname == NULL || strcmp(attname, colname) != 0) appendStringInfo(&targetbuf, " AS %s", quote_identifier(colname)); } /* Restore context's output buffer */ context->buf = buf; /* Consider line-wrapping if enabled */ if (PRETTY_INDENT(context) && context->wrapColumn >= 0) { int leading_nl_pos; /* Does the new field start with a new line? */ if (targetbuf.len > 0 && targetbuf.data[0] == '\n') leading_nl_pos = 0; else leading_nl_pos = -1; /* If so, we shouldn't add anything */ if (leading_nl_pos >= 0) { /* instead, remove any trailing spaces currently in buf */ removeStringInfoSpaces(buf); } else { char *trailing_nl; /* Locate the start of the current line in the output buffer */ trailing_nl = strrchr(buf->data, '\n'); if (trailing_nl == NULL) trailing_nl = buf->data; else trailing_nl++; /* * Add a newline, plus some indentation, if the new field is * not the first and either the new field would cause an * overflow or the last field used more than one line. */ if (colno > 1 && ((strlen(trailing_nl) + targetbuf.len > context->wrapColumn) || last_was_multiline)) appendContextKeyword(context, "", -PRETTYINDENT_STD, PRETTYINDENT_STD, PRETTYINDENT_VAR); } /* Remember this field's multiline status for next iteration */ last_was_multiline = (strchr(targetbuf.data + leading_nl_pos + 1, '\n') != NULL); } /* Add the new field */ appendStringInfoString(buf, targetbuf.data); } /* clean up */ pfree(targetbuf.data); } static void get_setop_query(Node *setOp, Query *query, deparse_context *context, TupleDesc resultDesc) { StringInfo buf = context->buf; bool need_paren; /* Guard against excessively long or deeply-nested queries */ CHECK_FOR_INTERRUPTS(); check_stack_depth(); if (IsA(setOp, RangeTblRef)) { RangeTblRef *rtr = (RangeTblRef *) setOp; RangeTblEntry *rte = rt_fetch(rtr->rtindex, query->rtable); Query *subquery = rte->subquery; Assert(subquery != NULL); Assert(subquery->setOperations == NULL); /* Need parens if WITH, ORDER BY, FOR UPDATE, or LIMIT; see gram.y */ need_paren = (subquery->cteList || subquery->sortClause || subquery->rowMarks || subquery->limitOffset || subquery->limitCount); if (need_paren) appendStringInfoChar(buf, '('); get_query_def(subquery, buf, context->namespaces, resultDesc, context->prettyFlags, context->wrapColumn, context->indentLevel); if (need_paren) appendStringInfoChar(buf, ')'); } else if (IsA(setOp, SetOperationStmt)) { SetOperationStmt *op = (SetOperationStmt *) setOp; int subindent; /* * We force parens when nesting two SetOperationStmts, except when the * lefthand input is another setop of the same kind. Syntactically, * we could omit parens in rather more cases, but it seems best to use * parens to flag cases where the setop operator changes. If we use * parens, we also increase the indentation level for the child query. * * There are some cases in which parens are needed around a leaf query * too, but those are more easily handled at the next level down (see * code above). */ if (IsA(op->larg, SetOperationStmt)) { SetOperationStmt *lop = (SetOperationStmt *) op->larg; if (op->op == lop->op && op->all == lop->all) need_paren = false; else need_paren = true; } else need_paren = false; if (need_paren) { appendStringInfoChar(buf, '('); subindent = PRETTYINDENT_STD; appendContextKeyword(context, "", subindent, 0, 0); } else subindent = 0; get_setop_query(op->larg, query, context, resultDesc); if (need_paren) appendContextKeyword(context, ") ", -subindent, 0, 0); else if (PRETTY_INDENT(context)) appendContextKeyword(context, "", -subindent, 0, 0); else appendStringInfoChar(buf, ' '); switch (op->op) { case SETOP_UNION: appendStringInfoString(buf, "UNION "); break; case SETOP_INTERSECT: appendStringInfoString(buf, "INTERSECT "); break; case SETOP_EXCEPT: appendStringInfoString(buf, "EXCEPT "); break; default: elog(ERROR, "unrecognized set op: %d", (int) op->op); } if (op->all) appendStringInfoString(buf, "ALL "); /* Always parenthesize if RHS is another setop */ need_paren = IsA(op->rarg, SetOperationStmt); /* * The indentation code here is deliberately a bit different from that * for the lefthand input, because we want the line breaks in * different places. */ if (need_paren) { appendStringInfoChar(buf, '('); subindent = PRETTYINDENT_STD; } else subindent = 0; appendContextKeyword(context, "", subindent, 0, 0); get_setop_query(op->rarg, query, context, resultDesc); if (PRETTY_INDENT(context)) context->indentLevel -= subindent; if (need_paren) appendContextKeyword(context, ")", 0, 0, 0); } else { elog(ERROR, "unrecognized node type: %d", (int) nodeTag(setOp)); } } /* * Display a sort/group clause. * * Also returns the expression tree, so caller need not find it again. */ static Node * get_rule_sortgroupclause(Index ref, List *tlist, bool force_colno, deparse_context *context) { StringInfo buf = context->buf; TargetEntry *tle; Node *expr; tle = get_sortgroupref_tle(ref, tlist); expr = (Node *) tle->expr; /* * Use column-number form if requested by caller. Otherwise, if * expression is a constant, force it to be dumped with an explicit cast * as decoration --- this is because a simple integer constant is * ambiguous (and will be misinterpreted by findTargetlistEntry()) if we * dump it without any decoration. If it's anything more complex than a * simple Var, then force extra parens around it, to ensure it can't be * misinterpreted as a cube() or rollup() construct. */ if (force_colno) { Assert(!tle->resjunk); appendStringInfo(buf, "%d", tle->resno); } else if (expr && IsA(expr, Const)) get_const_expr((Const *) expr, context, 1); else if (!expr || IsA(expr, Var)) get_rule_expr(expr, context, true); else { /* * We must force parens for function-like expressions even if * PRETTY_PAREN is off, since those are the ones in danger of * misparsing. For other expressions we need to force them only if * PRETTY_PAREN is on, since otherwise the expression will output them * itself. (We can't skip the parens.) */ bool need_paren = (PRETTY_PAREN(context) || IsA(expr, FuncExpr) ||IsA(expr, Aggref) ||IsA(expr, WindowFunc)); if (need_paren) appendStringInfoString(context->buf, "("); get_rule_expr(expr, context, true); if (need_paren) appendStringInfoString(context->buf, ")"); } return expr; } /* * Display a GroupingSet */ static void get_rule_groupingset(GroupingSet *gset, List *targetlist, bool omit_parens, deparse_context *context) { ListCell *l; StringInfo buf = context->buf; bool omit_child_parens = true; char *sep = ""; switch (gset->kind) { case GROUPING_SET_EMPTY: appendStringInfoString(buf, "()"); return; case GROUPING_SET_SIMPLE: { if (!omit_parens || list_length(gset->content) != 1) appendStringInfoString(buf, "("); foreach(l, gset->content) { Index ref = lfirst_int(l); appendStringInfoString(buf, sep); get_rule_sortgroupclause(ref, targetlist, false, context); sep = ", "; } if (!omit_parens || list_length(gset->content) != 1) appendStringInfoString(buf, ")"); } return; case GROUPING_SET_ROLLUP: appendStringInfoString(buf, "ROLLUP("); break; case GROUPING_SET_CUBE: appendStringInfoString(buf, "CUBE("); break; case GROUPING_SET_SETS: appendStringInfoString(buf, "GROUPING SETS ("); omit_child_parens = false; break; } foreach(l, gset->content) { appendStringInfoString(buf, sep); get_rule_groupingset(lfirst(l), targetlist, omit_child_parens, context); sep = ", "; } appendStringInfoString(buf, ")"); } /* * Display an ORDER BY list. */ static void get_rule_orderby(List *orderList, List *targetList, bool force_colno, deparse_context *context) { StringInfo buf = context->buf; const char *sep; ListCell *l; sep = ""; foreach(l, orderList) { SortGroupClause *srt = (SortGroupClause *) lfirst(l); Node *sortexpr; Oid sortcoltype; TypeCacheEntry *typentry; appendStringInfoString(buf, sep); sortexpr = get_rule_sortgroupclause(srt->tleSortGroupRef, targetList, force_colno, context); sortcoltype = exprType(sortexpr); /* See whether operator is default < or > for datatype */ typentry = lookup_type_cache(sortcoltype, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); if (srt->sortop == typentry->lt_opr) { /* ASC is default, so emit nothing for it */ if (srt->nulls_first) appendStringInfoString(buf, " NULLS FIRST"); } else if (srt->sortop == typentry->gt_opr) { appendStringInfoString(buf, " DESC"); /* DESC defaults to NULLS FIRST */ if (!srt->nulls_first) appendStringInfoString(buf, " NULLS LAST"); } else { appendStringInfo(buf, " USING %s", generate_operator_name(srt->sortop, sortcoltype, sortcoltype)); /* be specific to eliminate ambiguity */ if (srt->nulls_first) appendStringInfoString(buf, " NULLS FIRST"); else appendStringInfoString(buf, " NULLS LAST"); } sep = ", "; } } /* * Display a WINDOW clause. * * Note that the windowClause list might contain only anonymous window * specifications, in which case we should print nothing here. */ static void get_rule_windowclause(Query *query, deparse_context *context) { StringInfo buf = context->buf; const char *sep; ListCell *l; sep = NULL; foreach(l, query->windowClause) { WindowClause *wc = (WindowClause *) lfirst(l); if (wc->name == NULL) continue; /* ignore anonymous windows */ if (sep == NULL) appendContextKeyword(context, " WINDOW ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); else appendStringInfoString(buf, sep); appendStringInfo(buf, "%s AS ", quote_identifier(wc->name)); get_rule_windowspec(wc, query->targetList, context); sep = ", "; } } /* * Display a window definition */ static void get_rule_windowspec(WindowClause *wc, List *targetList, deparse_context *context) { StringInfo buf = context->buf; bool needspace = false; const char *sep; ListCell *l; appendStringInfoChar(buf, '('); if (wc->refname) { appendStringInfoString(buf, quote_identifier(wc->refname)); needspace = true; } /* partition clauses are always inherited, so only print if no refname */ if (wc->partitionClause && !wc->refname) { if (needspace) appendStringInfoChar(buf, ' '); appendStringInfoString(buf, "PARTITION BY "); sep = ""; foreach(l, wc->partitionClause) { SortGroupClause *grp = (SortGroupClause *) lfirst(l); appendStringInfoString(buf, sep); get_rule_sortgroupclause(grp->tleSortGroupRef, targetList, false, context); sep = ", "; } needspace = true; } /* print ordering clause only if not inherited */ if (wc->orderClause && !wc->copiedOrder) { if (needspace) appendStringInfoChar(buf, ' '); appendStringInfoString(buf, "ORDER BY "); get_rule_orderby(wc->orderClause, targetList, false, context); needspace = true; } /* framing clause is never inherited, so print unless it's default */ if (wc->frameOptions & FRAMEOPTION_NONDEFAULT) { if (needspace) appendStringInfoChar(buf, ' '); if (wc->frameOptions & FRAMEOPTION_RANGE) appendStringInfoString(buf, "RANGE "); else if (wc->frameOptions & FRAMEOPTION_ROWS) appendStringInfoString(buf, "ROWS "); else Assert(false); if (wc->frameOptions & FRAMEOPTION_BETWEEN) appendStringInfoString(buf, "BETWEEN "); if (wc->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING) appendStringInfoString(buf, "UNBOUNDED PRECEDING "); else if (wc->frameOptions & FRAMEOPTION_START_CURRENT_ROW) appendStringInfoString(buf, "CURRENT ROW "); else if (wc->frameOptions & FRAMEOPTION_START_VALUE) { get_rule_expr(wc->startOffset, context, false); if (wc->frameOptions & FRAMEOPTION_START_VALUE_PRECEDING) appendStringInfoString(buf, " PRECEDING "); else if (wc->frameOptions & FRAMEOPTION_START_VALUE_FOLLOWING) appendStringInfoString(buf, " FOLLOWING "); else Assert(false); } else Assert(false); if (wc->frameOptions & FRAMEOPTION_BETWEEN) { appendStringInfoString(buf, "AND "); if (wc->frameOptions & FRAMEOPTION_END_UNBOUNDED_FOLLOWING) appendStringInfoString(buf, "UNBOUNDED FOLLOWING "); else if (wc->frameOptions & FRAMEOPTION_END_CURRENT_ROW) appendStringInfoString(buf, "CURRENT ROW "); else if (wc->frameOptions & FRAMEOPTION_END_VALUE) { get_rule_expr(wc->endOffset, context, false); if (wc->frameOptions & FRAMEOPTION_END_VALUE_PRECEDING) appendStringInfoString(buf, " PRECEDING "); else if (wc->frameOptions & FRAMEOPTION_END_VALUE_FOLLOWING) appendStringInfoString(buf, " FOLLOWING "); else Assert(false); } else Assert(false); } /* we will now have a trailing space; remove it */ buf->len--; } appendStringInfoChar(buf, ')'); } /* ---------- * get_insert_query_def - Parse back an INSERT parsetree * ---------- */ static void get_insert_query_def(Query *query, deparse_context *context) { StringInfo buf = context->buf; RangeTblEntry *select_rte = NULL; RangeTblEntry *values_rte = NULL; RangeTblEntry *rte; char *sep; ListCell *l; List *strippedexprs; /* Insert the WITH clause if given */ get_with_clause(query, context); /* * If it's an INSERT ... SELECT or multi-row VALUES, there will be a * single RTE for the SELECT or VALUES. Plain VALUES has neither. */ foreach(l, query->rtable) { rte = (RangeTblEntry *) lfirst(l); if (rte->rtekind == RTE_SUBQUERY) { if (select_rte) elog(ERROR, "too many subquery RTEs in INSERT"); select_rte = rte; } if (rte->rtekind == RTE_VALUES) { if (values_rte) elog(ERROR, "too many values RTEs in INSERT"); values_rte = rte; } } if (select_rte && values_rte) elog(ERROR, "both subquery and values RTEs in INSERT"); /* * Start the query with INSERT INTO relname */ rte = rt_fetch(query->resultRelation, query->rtable); Assert(rte->rtekind == RTE_RELATION); if (PRETTY_INDENT(context)) { context->indentLevel += PRETTYINDENT_STD; appendStringInfoChar(buf, ' '); } appendStringInfo(buf, "INSERT INTO %s ", generate_relation_or_shard_name(rte->relid, context->distrelid, context->shardid, NIL)); /* INSERT requires AS keyword for target alias */ if (rte->alias != NULL) appendStringInfo(buf, "AS %s ", quote_identifier(rte->alias->aliasname)); /* * Add the insert-column-names list. Any indirection decoration needed on * the column names can be inferred from the top targetlist. */ strippedexprs = NIL; sep = ""; if (query->targetList) appendStringInfoChar(buf, '('); foreach(l, query->targetList) { TargetEntry *tle = (TargetEntry *) lfirst(l); if (tle->resjunk) continue; /* ignore junk entries */ appendStringInfoString(buf, sep); sep = ", "; /* * Put out name of target column; look in the catalogs, not at * tle->resname, since resname will fail to track RENAME. */ appendStringInfoString(buf, quote_identifier(get_relid_attribute_name(rte->relid, tle->resno))); /* * Print any indirection needed (subfields or subscripts), and strip * off the top-level nodes representing the indirection assignments. * Add the stripped expressions to strippedexprs. (If it's a * single-VALUES statement, the stripped expressions are the VALUES to * print below. Otherwise they're just Vars and not really * interesting.) */ strippedexprs = lappend(strippedexprs, processIndirection((Node *) tle->expr, context)); } if (query->targetList) appendStringInfoString(buf, ") "); if (query->override) { if (query->override == OVERRIDING_SYSTEM_VALUE) appendStringInfoString(buf, "OVERRIDING SYSTEM VALUE "); else if (query->override == OVERRIDING_USER_VALUE) appendStringInfoString(buf, "OVERRIDING USER VALUE "); } if (select_rte) { /* Add the SELECT */ get_query_def(select_rte->subquery, buf, NIL, NULL, context->prettyFlags, context->wrapColumn, context->indentLevel); } else if (values_rte) { /* Add the multi-VALUES expression lists */ get_values_def(values_rte->values_lists, context); } else if (strippedexprs) { /* Add the single-VALUES expression list */ appendContextKeyword(context, "VALUES (", -PRETTYINDENT_STD, PRETTYINDENT_STD, 2); get_rule_expr((Node *) strippedexprs, context, false); appendStringInfoChar(buf, ')'); } else { /* No expressions, so it must be DEFAULT VALUES */ appendStringInfoString(buf, "DEFAULT VALUES"); } /* Add ON CONFLICT if present */ if (query->onConflict) { OnConflictExpr *confl = query->onConflict; appendStringInfoString(buf, " ON CONFLICT"); if (confl->arbiterElems) { /* Add the single-VALUES expression list */ appendStringInfoChar(buf, '('); get_rule_expr((Node *) confl->arbiterElems, context, false); appendStringInfoChar(buf, ')'); /* Add a WHERE clause (for partial indexes) if given */ if (confl->arbiterWhere != NULL) { bool save_varprefix; /* * Force non-prefixing of Vars, since parser assumes that they * belong to target relation. WHERE clause does not use * InferenceElem, so this is separately required. */ save_varprefix = context->varprefix; context->varprefix = false; appendContextKeyword(context, " WHERE ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); get_rule_expr(confl->arbiterWhere, context, false); context->varprefix = save_varprefix; } } else if (OidIsValid(confl->constraint)) { char *constraint = get_constraint_name(confl->constraint); int64 shardId = context->shardid; if (shardId > 0) { AppendShardIdToName(&constraint, shardId); } if (!constraint) elog(ERROR, "cache lookup failed for constraint %u", confl->constraint); appendStringInfo(buf, " ON CONSTRAINT %s", quote_identifier(constraint)); } if (confl->action == ONCONFLICT_NOTHING) { appendStringInfoString(buf, " DO NOTHING"); } else { appendStringInfoString(buf, " DO UPDATE SET "); /* Deparse targetlist */ get_update_query_targetlist_def(query, confl->onConflictSet, context, rte); /* Add a WHERE clause if given */ if (confl->onConflictWhere != NULL) { appendContextKeyword(context, " WHERE ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); get_rule_expr(confl->onConflictWhere, context, false); } } } /* Add RETURNING if present */ if (query->returningList) { appendContextKeyword(context, " RETURNING", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); get_target_list(query->returningList, context, NULL); } } /* ---------- * get_update_query_def - Parse back an UPDATE parsetree * ---------- */ static void get_update_query_def(Query *query, deparse_context *context) { StringInfo buf = context->buf; RangeTblEntry *rte; /* Insert the WITH clause if given */ get_with_clause(query, context); /* * Start the query with UPDATE relname SET */ rte = rt_fetch(query->resultRelation, query->rtable); if (PRETTY_INDENT(context)) { appendStringInfoChar(buf, ' '); context->indentLevel += PRETTYINDENT_STD; } /* if it's a shard, do differently */ if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) { char *fragmentSchemaName = NULL; char *fragmentTableName = NULL; ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); /* use schema and table name from the remote alias */ appendStringInfo(buf, "UPDATE %s%s", only_marker(rte), generate_fragment_name(fragmentSchemaName, fragmentTableName)); if(rte->eref != NULL) appendStringInfo(buf, " %s", quote_identifier(rte->eref->aliasname)); } else { appendStringInfo(buf, "UPDATE %s%s", only_marker(rte), generate_relation_or_shard_name(rte->relid, context->distrelid, context->shardid, NIL)); if (rte->alias != NULL) appendStringInfo(buf, " %s", quote_identifier(rte->alias->aliasname)); } appendStringInfoString(buf, " SET "); /* Deparse targetlist */ get_update_query_targetlist_def(query, query->targetList, context, rte); /* Add the FROM clause if needed */ get_from_clause(query, " FROM ", context); /* Add a WHERE clause if given */ if (query->jointree->quals != NULL) { appendContextKeyword(context, " WHERE ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); get_rule_expr(query->jointree->quals, context, false); } /* Add RETURNING if present */ if (query->returningList) { appendContextKeyword(context, " RETURNING", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); get_target_list(query->returningList, context, NULL); } } /* ---------- * get_update_query_targetlist_def - Parse back an UPDATE targetlist * ---------- */ static void get_update_query_targetlist_def(Query *query, List *targetList, deparse_context *context, RangeTblEntry *rte) { StringInfo buf = context->buf; ListCell *l; ListCell *next_ma_cell; int remaining_ma_columns; const char *sep; SubLink *cur_ma_sublink; List *ma_sublinks; /* * Prepare to deal with MULTIEXPR assignments: collect the source SubLinks * into a list. We expect them to appear, in ID order, in resjunk tlist * entries. */ ma_sublinks = NIL; if (query->hasSubLinks) /* else there can't be any */ { foreach(l, targetList) { TargetEntry *tle = (TargetEntry *) lfirst(l); if (tle->resjunk && IsA(tle->expr, SubLink)) { SubLink *sl = (SubLink *) tle->expr; if (sl->subLinkType == MULTIEXPR_SUBLINK) { ma_sublinks = lappend(ma_sublinks, sl); Assert(sl->subLinkId == list_length(ma_sublinks)); } } } } next_ma_cell = list_head(ma_sublinks); cur_ma_sublink = NULL; remaining_ma_columns = 0; /* Add the comma separated list of 'attname = value' */ sep = ""; foreach(l, targetList) { TargetEntry *tle = (TargetEntry *) lfirst(l); Node *expr; if (tle->resjunk) continue; /* ignore junk entries */ /* Emit separator (OK whether we're in multiassignment or not) */ appendStringInfoString(buf, sep); sep = ", "; /* * Check to see if we're starting a multiassignment group: if so, * output a left paren. */ if (next_ma_cell != NULL && cur_ma_sublink == NULL) { /* * We must dig down into the expr to see if it's a PARAM_MULTIEXPR * Param. That could be buried under FieldStores and ArrayRefs * and CoerceToDomains (cf processIndirection()), and underneath * those there could be an implicit type coercion. Because we * would ignore implicit type coercions anyway, we don't need to * be as careful as processIndirection() is about descending past * implicit CoerceToDomains. */ expr = (Node *) tle->expr; while (expr) { if (IsA(expr, FieldStore)) { FieldStore *fstore = (FieldStore *) expr; expr = (Node *) linitial(fstore->newvals); } else if (IsA(expr, ArrayRef)) { ArrayRef *aref = (ArrayRef *) expr; if (aref->refassgnexpr == NULL) break; expr = (Node *) aref->refassgnexpr; } else if (IsA(expr, CoerceToDomain)) { CoerceToDomain *cdomain = (CoerceToDomain *) expr; if (cdomain->coercionformat != COERCE_IMPLICIT_CAST) break; expr = (Node *) cdomain->arg; } else break; } expr = strip_implicit_coercions(expr); if (expr && IsA(expr, Param) && ((Param *) expr)->paramkind == PARAM_MULTIEXPR) { cur_ma_sublink = (SubLink *) lfirst(next_ma_cell); next_ma_cell = lnext(next_ma_cell); remaining_ma_columns = count_nonjunk_tlist_entries( ((Query *) cur_ma_sublink->subselect)->targetList); Assert(((Param *) expr)->paramid == ((cur_ma_sublink->subLinkId << 16) | 1)); appendStringInfoChar(buf, '('); } } /* * Put out name of target column; look in the catalogs, not at * tle->resname, since resname will fail to track RENAME. */ appendStringInfoString(buf, quote_identifier(get_relid_attribute_name(rte->relid, tle->resno))); /* * Print any indirection needed (subfields or subscripts), and strip * off the top-level nodes representing the indirection assignments. */ expr = processIndirection((Node *) tle->expr, context); /* * If we're in a multiassignment, skip printing anything more, unless * this is the last column; in which case, what we print should be the * sublink, not the Param. */ if (cur_ma_sublink != NULL) { if (--remaining_ma_columns > 0) continue; /* not the last column of multiassignment */ appendStringInfoChar(buf, ')'); expr = (Node *) cur_ma_sublink; cur_ma_sublink = NULL; } appendStringInfoString(buf, " = "); get_rule_expr(expr, context, false); } } /* ---------- * get_delete_query_def - Parse back a DELETE parsetree * ---------- */ static void get_delete_query_def(Query *query, deparse_context *context) { StringInfo buf = context->buf; RangeTblEntry *rte; /* Insert the WITH clause if given */ get_with_clause(query, context); /* * Start the query with DELETE FROM relname */ rte = rt_fetch(query->resultRelation, query->rtable); if (PRETTY_INDENT(context)) { appendStringInfoChar(buf, ' '); context->indentLevel += PRETTYINDENT_STD; } /* if it's a shard, do differently */ if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) { char *fragmentSchemaName = NULL; char *fragmentTableName = NULL; ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); /* use schema and table name from the remote alias */ appendStringInfo(buf, "DELETE FROM %s%s", only_marker(rte), generate_fragment_name(fragmentSchemaName, fragmentTableName)); if(rte->eref != NULL) appendStringInfo(buf, " %s", quote_identifier(rte->eref->aliasname)); } else { appendStringInfo(buf, "DELETE FROM %s%s", only_marker(rte), generate_relation_or_shard_name(rte->relid, context->distrelid, context->shardid, NIL)); if (rte->alias != NULL) appendStringInfo(buf, " %s", quote_identifier(rte->alias->aliasname)); } /* Add the USING clause if given */ get_from_clause(query, " USING ", context); /* Add a WHERE clause if given */ if (query->jointree->quals != NULL) { appendContextKeyword(context, " WHERE ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); get_rule_expr(query->jointree->quals, context, false); } /* Add RETURNING if present */ if (query->returningList) { appendContextKeyword(context, " RETURNING", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); get_target_list(query->returningList, context, NULL); } } /* ---------- * get_utility_query_def - Parse back a UTILITY parsetree * ---------- */ static void get_utility_query_def(Query *query, deparse_context *context) { StringInfo buf = context->buf; if (query->utilityStmt && IsA(query->utilityStmt, NotifyStmt)) { NotifyStmt *stmt = (NotifyStmt *) query->utilityStmt; appendContextKeyword(context, "", 0, PRETTYINDENT_STD, 1); appendStringInfo(buf, "NOTIFY %s", quote_identifier(stmt->conditionname)); if (stmt->payload) { appendStringInfoString(buf, ", "); simple_quote_literal(buf, stmt->payload); } } else if (query->utilityStmt && IsA(query->utilityStmt, TruncateStmt)) { TruncateStmt *stmt = (TruncateStmt *) query->utilityStmt; List *relationList = stmt->relations; ListCell *relationCell = NULL; appendContextKeyword(context, "", 0, PRETTYINDENT_STD, 1); appendStringInfo(buf, "TRUNCATE TABLE"); foreach(relationCell, relationList) { RangeVar *relationVar = (RangeVar *) lfirst(relationCell); Oid relationId = RangeVarGetRelid(relationVar, NoLock, false); char *relationName = generate_relation_or_shard_name(relationId, context->distrelid, context->shardid, NIL); appendStringInfo(buf, " %s", relationName); if (lnext(relationCell) != NULL) { appendStringInfo(buf, ","); } } if (stmt->restart_seqs) { appendStringInfo(buf, " RESTART IDENTITY"); } if (stmt->behavior == DROP_CASCADE) { appendStringInfo(buf, " CASCADE"); } } else { /* Currently only NOTIFY utility commands can appear in rules */ elog(ERROR, "unexpected utility statement type"); } } /* * Display a Var appropriately. * * In some cases (currently only when recursing into an unnamed join) * the Var's varlevelsup has to be interpreted with respect to a context * above the current one; levelsup indicates the offset. * * If istoplevel is TRUE, the Var is at the top level of a SELECT's * targetlist, which means we need special treatment of whole-row Vars. * Instead of the normal "tab.*", we'll print "tab.*::typename", which is a * dirty hack to prevent "tab.*" from being expanded into multiple columns. * (The parser will strip the useless coercion, so no inefficiency is added in * dump and reload.) We used to print just "tab" in such cases, but that is * ambiguous and will yield the wrong result if "tab" is also a plain column * name in the query. * * Returns the attname of the Var, or NULL if the Var has no attname (because * it is a whole-row Var or a subplan output reference). */ static char * get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context) { StringInfo buf = context->buf; RangeTblEntry *rte; AttrNumber attnum; int netlevelsup; deparse_namespace *dpns; deparse_columns *colinfo; char *refname; char *attname; /* Find appropriate nesting depth */ netlevelsup = var->varlevelsup + levelsup; if (netlevelsup >= list_length(context->namespaces)) elog(ERROR, "bogus varlevelsup: %d offset %d", var->varlevelsup, levelsup); dpns = (deparse_namespace *) list_nth(context->namespaces, netlevelsup); /* * Try to find the relevant RTE in this rtable. In a plan tree, it's * likely that varno is OUTER_VAR or INNER_VAR, in which case we must dig * down into the subplans, or INDEX_VAR, which is resolved similarly. Also * find the aliases previously assigned for this RTE. */ if (var->varno >= 1 && var->varno <= list_length(dpns->rtable)) { rte = rt_fetch(var->varno, dpns->rtable); refname = (char *) list_nth(dpns->rtable_names, var->varno - 1); colinfo = deparse_columns_fetch(var->varno, dpns); attnum = var->varattno; } else { resolve_special_varno((Node *) var, context, NULL, get_special_variable); return NULL; } /* * The planner will sometimes emit Vars referencing resjunk elements of a * subquery's target list (this is currently only possible if it chooses * to generate a "physical tlist" for a SubqueryScan or CteScan node). * Although we prefer to print subquery-referencing Vars using the * subquery's alias, that's not possible for resjunk items since they have * no alias. So in that case, drill down to the subplan and print the * contents of the referenced tlist item. This works because in a plan * tree, such Vars can only occur in a SubqueryScan or CteScan node, and * we'll have set dpns->inner_planstate to reference the child plan node. */ if ((rte->rtekind == RTE_SUBQUERY || rte->rtekind == RTE_CTE) && attnum > list_length(rte->eref->colnames) && dpns->inner_planstate) { TargetEntry *tle; deparse_namespace save_dpns; tle = get_tle_by_resno(dpns->inner_tlist, var->varattno); if (!tle) elog(ERROR, "invalid attnum %d for relation \"%s\"", var->varattno, rte->eref->aliasname); Assert(netlevelsup == 0); push_child_plan(dpns, dpns->inner_planstate, &save_dpns); /* * Force parentheses because our caller probably assumed a Var is a * simple expression. */ if (!IsA(tle->expr, Var)) appendStringInfoChar(buf, '('); get_rule_expr((Node *) tle->expr, context, true); if (!IsA(tle->expr, Var)) appendStringInfoChar(buf, ')'); pop_child_plan(dpns, &save_dpns); return NULL; } /* * If it's an unnamed join, look at the expansion of the alias variable. * If it's a simple reference to one of the input vars, then recursively * print the name of that var instead. When it's not a simple reference, * we have to just print the unqualified join column name. (This can only * happen with "dangerous" merged columns in a JOIN USING; we took pains * previously to make the unqualified column name unique in such cases.) * * This wouldn't work in decompiling plan trees, because we don't store * joinaliasvars lists after planning; but a plan tree should never * contain a join alias variable. */ if (rte->rtekind == RTE_JOIN && rte->alias == NULL) { if (rte->joinaliasvars == NIL) elog(ERROR, "cannot decompile join alias var in plan tree"); if (attnum > 0) { Var *aliasvar; aliasvar = (Var *) list_nth(rte->joinaliasvars, attnum - 1); /* we intentionally don't strip implicit coercions here */ if (aliasvar && IsA(aliasvar, Var)) { return get_variable(aliasvar, var->varlevelsup + levelsup, istoplevel, context); } } /* * Unnamed join has no refname. (Note: since it's unnamed, there is * no way the user could have referenced it to create a whole-row Var * for it. So we don't have to cover that case below.) */ Assert(refname == NULL); } if (attnum == InvalidAttrNumber) attname = NULL; else if (attnum > 0) { /* Get column name to use from the colinfo struct */ if (attnum > colinfo->num_cols) elog(ERROR, "invalid attnum %d for relation \"%s\"", attnum, rte->eref->aliasname); attname = colinfo->colnames[attnum - 1]; if (attname == NULL) /* dropped column? */ elog(ERROR, "invalid attnum %d for relation \"%s\"", attnum, rte->eref->aliasname); } else if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) { /* System column on a Citus shard */ attname = get_relid_attribute_name(rte->relid, attnum); } else { /* System column - name is fixed, get it from the catalog */ attname = get_rte_attribute_name(rte, attnum); } if (refname && (context->varprefix || attname == NULL)) { appendStringInfoString(buf, quote_identifier(refname)); appendStringInfoChar(buf, '.'); } if (attname) appendStringInfoString(buf, quote_identifier(attname)); else { appendStringInfoChar(buf, '*'); if (istoplevel) appendStringInfo(buf, "::%s", format_type_with_typemod(var->vartype, var->vartypmod)); } return attname; } /* * Deparse a Var which references OUTER_VAR, INNER_VAR, or INDEX_VAR. This * routine is actually a callback for get_special_varno, which handles finding * the correct TargetEntry. We get the expression contained in that * TargetEntry and just need to deparse it, a job we can throw back on * get_rule_expr. */ static void get_special_variable(Node *node, deparse_context *context, void *private) { StringInfo buf = context->buf; /* * Force parentheses because our caller probably assumed a Var is a simple * expression. */ if (!IsA(node, Var)) appendStringInfoChar(buf, '('); get_rule_expr(node, context, true); if (!IsA(node, Var)) appendStringInfoChar(buf, ')'); } /* * Chase through plan references to special varnos (OUTER_VAR, INNER_VAR, * INDEX_VAR) until we find a real Var or some kind of non-Var node; then, * invoke the callback provided. */ static void resolve_special_varno(Node *node, deparse_context *context, void *private, void (*callback) (Node *, deparse_context *, void *)) { Var *var; deparse_namespace *dpns; /* If it's not a Var, invoke the callback. */ if (!IsA(node, Var)) { callback(node, context, private); return; } /* Find appropriate nesting depth */ var = (Var *) node; dpns = (deparse_namespace *) list_nth(context->namespaces, var->varlevelsup); /* * It's a special RTE, so recurse. */ if (var->varno == OUTER_VAR && dpns->outer_tlist) { TargetEntry *tle; deparse_namespace save_dpns; tle = get_tle_by_resno(dpns->outer_tlist, var->varattno); if (!tle) elog(ERROR, "bogus varattno for OUTER_VAR var: %d", var->varattno); push_child_plan(dpns, dpns->outer_planstate, &save_dpns); resolve_special_varno((Node *) tle->expr, context, private, callback); pop_child_plan(dpns, &save_dpns); return; } else if (var->varno == INNER_VAR && dpns->inner_tlist) { TargetEntry *tle; deparse_namespace save_dpns; tle = get_tle_by_resno(dpns->inner_tlist, var->varattno); if (!tle) elog(ERROR, "bogus varattno for INNER_VAR var: %d", var->varattno); push_child_plan(dpns, dpns->inner_planstate, &save_dpns); resolve_special_varno((Node *) tle->expr, context, private, callback); pop_child_plan(dpns, &save_dpns); return; } else if (var->varno == INDEX_VAR && dpns->index_tlist) { TargetEntry *tle; tle = get_tle_by_resno(dpns->index_tlist, var->varattno); if (!tle) elog(ERROR, "bogus varattno for INDEX_VAR var: %d", var->varattno); resolve_special_varno((Node *) tle->expr, context, private, callback); return; } else if (var->varno < 1 || var->varno > list_length(dpns->rtable)) elog(ERROR, "bogus varno: %d", var->varno); /* Not special. Just invoke the callback. */ callback(node, context, private); } /* * Get the name of a field of an expression of composite type. The * expression is usually a Var, but we handle other cases too. * * levelsup is an extra offset to interpret the Var's varlevelsup correctly. * * This is fairly straightforward when the expression has a named composite * type; we need only look up the type in the catalogs. However, the type * could also be RECORD. Since no actual table or view column is allowed to * have type RECORD, a Var of type RECORD must refer to a JOIN or FUNCTION RTE * or to a subquery output. We drill down to find the ultimate defining * expression and attempt to infer the field name from it. We ereport if we * can't determine the name. * * Similarly, a PARAM of type RECORD has to refer to some expression of * a determinable composite type. */ static const char * get_name_for_var_field(Var *var, int fieldno, int levelsup, deparse_context *context) { RangeTblEntry *rte; AttrNumber attnum; int netlevelsup; deparse_namespace *dpns; TupleDesc tupleDesc; Node *expr; /* * If it's a RowExpr that was expanded from a whole-row Var, use the * column names attached to it. */ if (IsA(var, RowExpr)) { RowExpr *r = (RowExpr *) var; if (fieldno > 0 && fieldno <= list_length(r->colnames)) return strVal(list_nth(r->colnames, fieldno - 1)); } /* * If it's a Param of type RECORD, try to find what the Param refers to. */ if (IsA(var, Param)) { Param *param = (Param *) var; ListCell *ancestor_cell; expr = find_param_referent(param, context, &dpns, &ancestor_cell); if (expr) { /* Found a match, so recurse to decipher the field name */ deparse_namespace save_dpns; const char *result; push_ancestor_plan(dpns, ancestor_cell, &save_dpns); result = get_name_for_var_field((Var *) expr, fieldno, 0, context); pop_ancestor_plan(dpns, &save_dpns); return result; } } /* * If it's a Var of type RECORD, we have to find what the Var refers to; * if not, we can use get_expr_result_type. If that fails, we try * lookup_rowtype_tupdesc, which will probably fail too, but will ereport * an acceptable message. */ if (!IsA(var, Var) || var->vartype != RECORDOID) { if (get_expr_result_type((Node *) var, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE) tupleDesc = lookup_rowtype_tupdesc_copy(exprType((Node *) var), exprTypmod((Node *) var)); Assert(tupleDesc); /* Got the tupdesc, so we can extract the field name */ Assert(fieldno >= 1 && fieldno <= tupleDesc->natts); return NameStr(tupleDesc->attrs[fieldno - 1]->attname); } /* Find appropriate nesting depth */ netlevelsup = var->varlevelsup + levelsup; if (netlevelsup >= list_length(context->namespaces)) elog(ERROR, "bogus varlevelsup: %d offset %d", var->varlevelsup, levelsup); dpns = (deparse_namespace *) list_nth(context->namespaces, netlevelsup); /* * Try to find the relevant RTE in this rtable. In a plan tree, it's * likely that varno is OUTER_VAR or INNER_VAR, in which case we must dig * down into the subplans, or INDEX_VAR, which is resolved similarly. */ if (var->varno >= 1 && var->varno <= list_length(dpns->rtable)) { rte = rt_fetch(var->varno, dpns->rtable); attnum = var->varattno; } else if (var->varno == OUTER_VAR && dpns->outer_tlist) { TargetEntry *tle; deparse_namespace save_dpns; const char *result; tle = get_tle_by_resno(dpns->outer_tlist, var->varattno); if (!tle) elog(ERROR, "bogus varattno for OUTER_VAR var: %d", var->varattno); Assert(netlevelsup == 0); push_child_plan(dpns, dpns->outer_planstate, &save_dpns); result = get_name_for_var_field((Var *) tle->expr, fieldno, levelsup, context); pop_child_plan(dpns, &save_dpns); return result; } else if (var->varno == INNER_VAR && dpns->inner_tlist) { TargetEntry *tle; deparse_namespace save_dpns; const char *result; tle = get_tle_by_resno(dpns->inner_tlist, var->varattno); if (!tle) elog(ERROR, "bogus varattno for INNER_VAR var: %d", var->varattno); Assert(netlevelsup == 0); push_child_plan(dpns, dpns->inner_planstate, &save_dpns); result = get_name_for_var_field((Var *) tle->expr, fieldno, levelsup, context); pop_child_plan(dpns, &save_dpns); return result; } else if (var->varno == INDEX_VAR && dpns->index_tlist) { TargetEntry *tle; const char *result; tle = get_tle_by_resno(dpns->index_tlist, var->varattno); if (!tle) elog(ERROR, "bogus varattno for INDEX_VAR var: %d", var->varattno); Assert(netlevelsup == 0); result = get_name_for_var_field((Var *) tle->expr, fieldno, levelsup, context); return result; } else { elog(ERROR, "bogus varno: %d", var->varno); return NULL; /* keep compiler quiet */ } if (attnum == InvalidAttrNumber) { /* Var is whole-row reference to RTE, so select the right field */ return get_rte_attribute_name(rte, fieldno); } /* * This part has essentially the same logic as the parser's * expandRecordVariable() function, but we are dealing with a different * representation of the input context, and we only need one field name * not a TupleDesc. Also, we need special cases for finding subquery and * CTE subplans when deparsing Plan trees. */ expr = (Node *) var; /* default if we can't drill down */ switch (rte->rtekind) { case RTE_RELATION: case RTE_VALUES: case RTE_NAMEDTUPLESTORE: /* * This case should not occur: a column of a table or values list * shouldn't have type RECORD. Fall through and fail (most * likely) at the bottom. */ break; case RTE_SUBQUERY: /* Subselect-in-FROM: examine sub-select's output expr */ { if (rte->subquery) { TargetEntry *ste = get_tle_by_resno(rte->subquery->targetList, attnum); if (ste == NULL || ste->resjunk) elog(ERROR, "subquery %s does not have attribute %d", rte->eref->aliasname, attnum); expr = (Node *) ste->expr; if (IsA(expr, Var)) { /* * Recurse into the sub-select to see what its Var * refers to. We have to build an additional level of * namespace to keep in step with varlevelsup in the * subselect. */ deparse_namespace mydpns; const char *result; set_deparse_for_query(&mydpns, rte->subquery, context->namespaces); context->namespaces = lcons(&mydpns, context->namespaces); result = get_name_for_var_field((Var *) expr, fieldno, 0, context); context->namespaces = list_delete_first(context->namespaces); return result; } /* else fall through to inspect the expression */ } else { /* * We're deparsing a Plan tree so we don't have complete * RTE entries (in particular, rte->subquery is NULL). But * the only place we'd see a Var directly referencing a * SUBQUERY RTE is in a SubqueryScan plan node, and we can * look into the child plan's tlist instead. */ TargetEntry *tle; deparse_namespace save_dpns; const char *result; if (!dpns->inner_planstate) elog(ERROR, "failed to find plan for subquery %s", rte->eref->aliasname); tle = get_tle_by_resno(dpns->inner_tlist, attnum); if (!tle) elog(ERROR, "bogus varattno for subquery var: %d", attnum); Assert(netlevelsup == 0); push_child_plan(dpns, dpns->inner_planstate, &save_dpns); result = get_name_for_var_field((Var *) tle->expr, fieldno, levelsup, context); pop_child_plan(dpns, &save_dpns); return result; } } break; case RTE_JOIN: /* Join RTE --- recursively inspect the alias variable */ if (rte->joinaliasvars == NIL) elog(ERROR, "cannot decompile join alias var in plan tree"); Assert(attnum > 0 && attnum <= list_length(rte->joinaliasvars)); expr = (Node *) list_nth(rte->joinaliasvars, attnum - 1); Assert(expr != NULL); /* we intentionally don't strip implicit coercions here */ if (IsA(expr, Var)) return get_name_for_var_field((Var *) expr, fieldno, var->varlevelsup + levelsup, context); /* else fall through to inspect the expression */ break; case RTE_FUNCTION: case RTE_TABLEFUNC: /* * We couldn't get here unless a function is declared with one of * its result columns as RECORD, which is not allowed. */ break; case RTE_CTE: /* CTE reference: examine subquery's output expr */ { CommonTableExpr *cte = NULL; Index ctelevelsup; ListCell *lc; /* * Try to find the referenced CTE using the namespace stack. */ ctelevelsup = rte->ctelevelsup + netlevelsup; if (ctelevelsup >= list_length(context->namespaces)) lc = NULL; else { deparse_namespace *ctedpns; ctedpns = (deparse_namespace *) list_nth(context->namespaces, ctelevelsup); foreach(lc, ctedpns->ctes) { cte = (CommonTableExpr *) lfirst(lc); if (strcmp(cte->ctename, rte->ctename) == 0) break; } } if (lc != NULL) { Query *ctequery = (Query *) cte->ctequery; TargetEntry *ste = get_tle_by_resno(GetCTETargetList(cte), attnum); if (ste == NULL || ste->resjunk) elog(ERROR, "subquery %s does not have attribute %d", rte->eref->aliasname, attnum); expr = (Node *) ste->expr; if (IsA(expr, Var)) { /* * Recurse into the CTE to see what its Var refers to. * We have to build an additional level of namespace * to keep in step with varlevelsup in the CTE. * Furthermore it could be an outer CTE, so we may * have to delete some levels of namespace. */ List *save_nslist = context->namespaces; List *new_nslist; deparse_namespace mydpns; const char *result; set_deparse_for_query(&mydpns, ctequery, context->namespaces); new_nslist = list_copy_tail(context->namespaces, ctelevelsup); context->namespaces = lcons(&mydpns, new_nslist); result = get_name_for_var_field((Var *) expr, fieldno, 0, context); context->namespaces = save_nslist; return result; } /* else fall through to inspect the expression */ } else { /* * We're deparsing a Plan tree so we don't have a CTE * list. But the only place we'd see a Var directly * referencing a CTE RTE is in a CteScan plan node, and we * can look into the subplan's tlist instead. */ TargetEntry *tle; deparse_namespace save_dpns; const char *result; if (!dpns->inner_planstate) elog(ERROR, "failed to find plan for CTE %s", rte->eref->aliasname); tle = get_tle_by_resno(dpns->inner_tlist, attnum); if (!tle) elog(ERROR, "bogus varattno for subquery var: %d", attnum); Assert(netlevelsup == 0); push_child_plan(dpns, dpns->inner_planstate, &save_dpns); result = get_name_for_var_field((Var *) tle->expr, fieldno, levelsup, context); pop_child_plan(dpns, &save_dpns); return result; } } break; } /* * We now have an expression we can't expand any more, so see if * get_expr_result_type() can do anything with it. If not, pass to * lookup_rowtype_tupdesc() which will probably fail, but will give an * appropriate error message while failing. */ if (get_expr_result_type(expr, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE) tupleDesc = lookup_rowtype_tupdesc_copy(exprType(expr), exprTypmod(expr)); Assert(tupleDesc); /* Got the tupdesc, so we can extract the field name */ Assert(fieldno >= 1 && fieldno <= tupleDesc->natts); return NameStr(tupleDesc->attrs[fieldno - 1]->attname); } /* * Try to find the referenced expression for a PARAM_EXEC Param that might * reference a parameter supplied by an upper NestLoop or SubPlan plan node. * * If successful, return the expression and set *dpns_p and *ancestor_cell_p * appropriately for calling push_ancestor_plan(). If no referent can be * found, return NULL. */ static Node * find_param_referent(Param *param, deparse_context *context, deparse_namespace **dpns_p, ListCell **ancestor_cell_p) { /* Initialize output parameters to prevent compiler warnings */ *dpns_p = NULL; *ancestor_cell_p = NULL; /* * If it's a PARAM_EXEC parameter, look for a matching NestLoopParam or * SubPlan argument. This will necessarily be in some ancestor of the * current expression's PlanState. */ if (param->paramkind == PARAM_EXEC) { deparse_namespace *dpns; PlanState *child_ps; bool in_same_plan_level; ListCell *lc; dpns = (deparse_namespace *) linitial(context->namespaces); child_ps = dpns->planstate; in_same_plan_level = true; foreach(lc, dpns->ancestors) { PlanState *ps = (PlanState *) lfirst(lc); ListCell *lc2; /* * NestLoops transmit params to their inner child only; also, once * we've crawled up out of a subplan, this couldn't possibly be * the right match. */ if (IsA(ps, NestLoopState) && child_ps == innerPlanState(ps) && in_same_plan_level) { NestLoop *nl = (NestLoop *) ps->plan; foreach(lc2, nl->nestParams) { NestLoopParam *nlp = (NestLoopParam *) lfirst(lc2); if (nlp->paramno == param->paramid) { /* Found a match, so return it */ *dpns_p = dpns; *ancestor_cell_p = lc; return (Node *) nlp->paramval; } } } /* * Check to see if we're crawling up from a subplan. */ foreach(lc2, ps->subPlan) { SubPlanState *sstate = (SubPlanState *) lfirst(lc2); SubPlan *subplan = sstate->subplan; ListCell *lc3; ListCell *lc4; if (child_ps != sstate->planstate) continue; /* Matched subplan, so check its arguments */ forboth(lc3, subplan->parParam, lc4, subplan->args) { int paramid = lfirst_int(lc3); Node *arg = (Node *) lfirst(lc4); if (paramid == param->paramid) { /* Found a match, so return it */ *dpns_p = dpns; *ancestor_cell_p = lc; return arg; } } /* Keep looking, but we are emerging from a subplan. */ in_same_plan_level = false; break; } /* * Likewise check to see if we're emerging from an initplan. * Initplans never have any parParams, so no need to search that * list, but we need to know if we should reset * in_same_plan_level. */ foreach(lc2, ps->initPlan) { SubPlanState *sstate = (SubPlanState *) lfirst(lc2); if (child_ps != sstate->planstate) continue; /* No parameters to be had here. */ Assert(sstate->subplan->parParam == NIL); /* Keep looking, but we are emerging from an initplan. */ in_same_plan_level = false; break; } /* No luck, crawl up to next ancestor */ child_ps = ps; } } /* No referent found */ return NULL; } /* * Display a Param appropriately. */ static void get_parameter(Param *param, deparse_context *context) { Node *expr; deparse_namespace *dpns; ListCell *ancestor_cell; /* * If it's a PARAM_EXEC parameter, try to locate the expression from which * the parameter was computed. Note that failing to find a referent isn't * an error, since the Param might well be a subplan output rather than an * input. */ expr = find_param_referent(param, context, &dpns, &ancestor_cell); if (expr) { /* Found a match, so print it */ deparse_namespace save_dpns; bool save_varprefix; bool need_paren; /* Switch attention to the ancestor plan node */ push_ancestor_plan(dpns, ancestor_cell, &save_dpns); /* * Force prefixing of Vars, since they won't belong to the relation * being scanned in the original plan node. */ save_varprefix = context->varprefix; context->varprefix = true; /* * A Param's expansion is typically a Var, Aggref, or upper-level * Param, which wouldn't need extra parentheses. Otherwise, insert * parens to ensure the expression looks atomic. */ need_paren = !(IsA(expr, Var) || IsA(expr, Aggref) || IsA(expr, Param)); if (need_paren) appendStringInfoChar(context->buf, '('); get_rule_expr(expr, context, false); if (need_paren) appendStringInfoChar(context->buf, ')'); context->varprefix = save_varprefix; pop_ancestor_plan(dpns, &save_dpns); return; } /* * Not PARAM_EXEC, or couldn't find referent: just print $N. */ appendStringInfo(context->buf, "$%d", param->paramid); } /* * get_simple_binary_op_name * * helper function for isSimpleNode * will return single char binary operator name, or NULL if it's not */ static const char * get_simple_binary_op_name(OpExpr *expr) { List *args = expr->args; if (list_length(args) == 2) { /* binary operator */ Node *arg1 = (Node *) linitial(args); Node *arg2 = (Node *) lsecond(args); const char *op; op = generate_operator_name(expr->opno, exprType(arg1), exprType(arg2)); if (strlen(op) == 1) return op; } return NULL; } /* * isSimpleNode - check if given node is simple (doesn't need parenthesizing) * * true : simple in the context of parent node's type * false : not simple */ static bool isSimpleNode(Node *node, Node *parentNode, int prettyFlags) { if (!node) return false; switch (nodeTag(node)) { case T_Var: case T_Const: case T_Param: case T_CoerceToDomainValue: case T_SetToDefault: case T_CurrentOfExpr: /* single words: always simple */ return true; case T_ArrayRef: case T_ArrayExpr: case T_RowExpr: case T_CoalesceExpr: case T_MinMaxExpr: case T_SQLValueFunction: case T_XmlExpr: case T_NextValueExpr: case T_NullIfExpr: case T_Aggref: case T_WindowFunc: case T_FuncExpr: /* function-like: name(..) or name[..] */ return true; /* CASE keywords act as parentheses */ case T_CaseExpr: return true; case T_FieldSelect: /* * appears simple since . has top precedence, unless parent is * T_FieldSelect itself! */ return (IsA(parentNode, FieldSelect) ? false : true); case T_FieldStore: /* * treat like FieldSelect (probably doesn't matter) */ return (IsA(parentNode, FieldStore) ? false : true); case T_CoerceToDomain: /* maybe simple, check args */ return isSimpleNode((Node *) ((CoerceToDomain *) node)->arg, node, prettyFlags); case T_RelabelType: return isSimpleNode((Node *) ((RelabelType *) node)->arg, node, prettyFlags); case T_CoerceViaIO: return isSimpleNode((Node *) ((CoerceViaIO *) node)->arg, node, prettyFlags); case T_ArrayCoerceExpr: return isSimpleNode((Node *) ((ArrayCoerceExpr *) node)->arg, node, prettyFlags); case T_ConvertRowtypeExpr: return isSimpleNode((Node *) ((ConvertRowtypeExpr *) node)->arg, node, prettyFlags); case T_OpExpr: { /* depends on parent node type; needs further checking */ if (prettyFlags & PRETTYFLAG_PAREN && IsA(parentNode, OpExpr)) { const char *op; const char *parentOp; bool is_lopriop; bool is_hipriop; bool is_lopriparent; bool is_hipriparent; op = get_simple_binary_op_name((OpExpr *) node); if (!op) return false; /* We know only the basic operators + - and * / % */ is_lopriop = (strchr("+-", *op) != NULL); is_hipriop = (strchr("*/%", *op) != NULL); if (!(is_lopriop || is_hipriop)) return false; parentOp = get_simple_binary_op_name((OpExpr *) parentNode); if (!parentOp) return false; is_lopriparent = (strchr("+-", *parentOp) != NULL); is_hipriparent = (strchr("*/%", *parentOp) != NULL); if (!(is_lopriparent || is_hipriparent)) return false; if (is_hipriop && is_lopriparent) return true; /* op binds tighter than parent */ if (is_lopriop && is_hipriparent) return false; /* * Operators are same priority --- can skip parens only if * we have (a - b) - c, not a - (b - c). */ if (node == (Node *) linitial(((OpExpr *) parentNode)->args)) return true; return false; } /* else do the same stuff as for T_SubLink et al. */ /* FALL THROUGH */ } case T_SubLink: case T_NullTest: case T_BooleanTest: case T_DistinctExpr: switch (nodeTag(parentNode)) { case T_FuncExpr: { /* special handling for casts */ CoercionForm type = ((FuncExpr *) parentNode)->funcformat; if (type == COERCE_EXPLICIT_CAST || type == COERCE_IMPLICIT_CAST) return false; return true; /* own parentheses */ } case T_BoolExpr: /* lower precedence */ case T_ArrayRef: /* other separators */ case T_ArrayExpr: /* other separators */ case T_RowExpr: /* other separators */ case T_CoalesceExpr: /* own parentheses */ case T_MinMaxExpr: /* own parentheses */ case T_XmlExpr: /* own parentheses */ case T_NullIfExpr: /* other separators */ case T_Aggref: /* own parentheses */ case T_WindowFunc: /* own parentheses */ case T_CaseExpr: /* other separators */ return true; default: return false; } case T_BoolExpr: switch (nodeTag(parentNode)) { case T_BoolExpr: if (prettyFlags & PRETTYFLAG_PAREN) { BoolExprType type; BoolExprType parentType; type = ((BoolExpr *) node)->boolop; parentType = ((BoolExpr *) parentNode)->boolop; switch (type) { case NOT_EXPR: case AND_EXPR: if (parentType == AND_EXPR || parentType == OR_EXPR) return true; break; case OR_EXPR: if (parentType == OR_EXPR) return true; break; } } return false; case T_FuncExpr: { /* special handling for casts */ CoercionForm type = ((FuncExpr *) parentNode)->funcformat; if (type == COERCE_EXPLICIT_CAST || type == COERCE_IMPLICIT_CAST) return false; return true; /* own parentheses */ } case T_ArrayRef: /* other separators */ case T_ArrayExpr: /* other separators */ case T_RowExpr: /* other separators */ case T_CoalesceExpr: /* own parentheses */ case T_MinMaxExpr: /* own parentheses */ case T_XmlExpr: /* own parentheses */ case T_NullIfExpr: /* other separators */ case T_Aggref: /* own parentheses */ case T_WindowFunc: /* own parentheses */ case T_CaseExpr: /* other separators */ return true; default: return false; } default: break; } /* those we don't know: in dubio complexo */ return false; } /* * appendContextKeyword - append a keyword to buffer * * If prettyPrint is enabled, perform a line break, and adjust indentation. * Otherwise, just append the keyword. */ static void appendContextKeyword(deparse_context *context, const char *str, int indentBefore, int indentAfter, int indentPlus) { StringInfo buf = context->buf; if (PRETTY_INDENT(context)) { int indentAmount; context->indentLevel += indentBefore; /* remove any trailing spaces currently in the buffer ... */ removeStringInfoSpaces(buf); /* ... then add a newline and some spaces */ appendStringInfoChar(buf, '\n'); if (context->indentLevel < PRETTYINDENT_LIMIT) indentAmount = Max(context->indentLevel, 0) + indentPlus; else { /* * If we're indented more than PRETTYINDENT_LIMIT characters, try * to conserve horizontal space by reducing the per-level * indentation. For best results the scale factor here should * divide all the indent amounts that get added to indentLevel * (PRETTYINDENT_STD, etc). It's important that the indentation * not grow unboundedly, else deeply-nested trees use O(N^2) * whitespace; so we also wrap modulo PRETTYINDENT_LIMIT. */ indentAmount = PRETTYINDENT_LIMIT + (context->indentLevel - PRETTYINDENT_LIMIT) / (PRETTYINDENT_STD / 2); indentAmount %= PRETTYINDENT_LIMIT; /* scale/wrap logic affects indentLevel, but not indentPlus */ indentAmount += indentPlus; } appendStringInfoSpaces(buf, indentAmount); appendStringInfoString(buf, str); context->indentLevel += indentAfter; if (context->indentLevel < 0) context->indentLevel = 0; } else appendStringInfoString(buf, str); } /* * removeStringInfoSpaces - delete trailing spaces from a buffer. * * Possibly this should move to stringinfo.c at some point. */ static void removeStringInfoSpaces(StringInfo str) { while (str->len > 0 && str->data[str->len - 1] == ' ') str->data[--(str->len)] = '\0'; } /* * get_rule_expr_paren - deparse expr using get_rule_expr, * embracing the string with parentheses if necessary for prettyPrint. * * Never embrace if prettyFlags=0, because it's done in the calling node. * * Any node that does *not* embrace its argument node by sql syntax (with * parentheses, non-operator keywords like CASE/WHEN/ON, or comma etc) should * use get_rule_expr_paren instead of get_rule_expr so parentheses can be * added. */ static void get_rule_expr_paren(Node *node, deparse_context *context, bool showimplicit, Node *parentNode) { bool need_paren; need_paren = PRETTY_PAREN(context) && !isSimpleNode(node, parentNode, context->prettyFlags); if (need_paren) appendStringInfoChar(context->buf, '('); get_rule_expr(node, context, showimplicit); if (need_paren) appendStringInfoChar(context->buf, ')'); } /* ---------- * get_rule_expr - Parse back an expression * * Note: showimplicit determines whether we display any implicit cast that * is present at the top of the expression tree. It is a passed argument, * not a field of the context struct, because we change the value as we * recurse down into the expression. In general we suppress implicit casts * when the result type is known with certainty (eg, the arguments of an * OR must be boolean). We display implicit casts for arguments of functions * and operators, since this is needed to be certain that the same function * or operator will be chosen when the expression is re-parsed. * ---------- */ static void get_rule_expr(Node *node, deparse_context *context, bool showimplicit) { StringInfo buf = context->buf; if (node == NULL) return; /* Guard against excessively long or deeply-nested queries */ CHECK_FOR_INTERRUPTS(); check_stack_depth(); /* * Each level of get_rule_expr must emit an indivisible term * (parenthesized if necessary) to ensure result is reparsed into the same * expression tree. The only exception is that when the input is a List, * we emit the component items comma-separated with no surrounding * decoration; this is convenient for most callers. */ switch (nodeTag(node)) { case T_Var: (void) get_variable((Var *) node, 0, false, context); break; case T_Const: get_const_expr((Const *) node, context, 0); break; case T_Param: get_parameter((Param *) node, context); break; case T_Aggref: get_agg_expr((Aggref *) node, context, (Aggref *) node); break; case T_GroupingFunc: { GroupingFunc *gexpr = (GroupingFunc *) node; appendStringInfoString(buf, "GROUPING("); get_rule_expr((Node *) gexpr->args, context, true); appendStringInfoChar(buf, ')'); } break; case T_WindowFunc: get_windowfunc_expr((WindowFunc *) node, context); break; case T_ArrayRef: { ArrayRef *aref = (ArrayRef *) node; bool need_parens; /* * If the argument is a CaseTestExpr, we must be inside a * FieldStore, ie, we are assigning to an element of an array * within a composite column. Since we already punted on * displaying the FieldStore's target information, just punt * here too, and display only the assignment source * expression. */ if (IsA(aref->refexpr, CaseTestExpr)) { Assert(aref->refassgnexpr); get_rule_expr((Node *) aref->refassgnexpr, context, showimplicit); break; } /* * Parenthesize the argument unless it's a simple Var or a * FieldSelect. (In particular, if it's another ArrayRef, we * *must* parenthesize to avoid confusion.) */ need_parens = !IsA(aref->refexpr, Var) && !IsA(aref->refexpr, FieldSelect); if (need_parens) appendStringInfoChar(buf, '('); get_rule_expr((Node *) aref->refexpr, context, showimplicit); if (need_parens) appendStringInfoChar(buf, ')'); /* * If there's a refassgnexpr, we want to print the node in the * format "array[subscripts] := refassgnexpr". This is not * legal SQL, so decompilation of INSERT or UPDATE statements * should always use processIndirection as part of the * statement-level syntax. We should only see this when * EXPLAIN tries to print the targetlist of a plan resulting * from such a statement. */ if (aref->refassgnexpr) { Node *refassgnexpr; /* * Use processIndirection to print this node's subscripts * as well as any additional field selections or * subscripting in immediate descendants. It returns the * RHS expr that is actually being "assigned". */ refassgnexpr = processIndirection(node, context); appendStringInfoString(buf, " := "); get_rule_expr(refassgnexpr, context, showimplicit); } else { /* Just an ordinary array fetch, so print subscripts */ printSubscripts(aref, context); } } break; case T_FuncExpr: get_func_expr((FuncExpr *) node, context, showimplicit); break; case T_NamedArgExpr: { NamedArgExpr *na = (NamedArgExpr *) node; appendStringInfo(buf, "%s => ", quote_identifier(na->name)); get_rule_expr((Node *) na->arg, context, showimplicit); } break; case T_OpExpr: get_oper_expr((OpExpr *) node, context); break; case T_DistinctExpr: { DistinctExpr *expr = (DistinctExpr *) node; List *args = expr->args; Node *arg1 = (Node *) linitial(args); Node *arg2 = (Node *) lsecond(args); if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); get_rule_expr_paren(arg1, context, true, node); appendStringInfoString(buf, " IS DISTINCT FROM "); get_rule_expr_paren(arg2, context, true, node); if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); } break; case T_NullIfExpr: { NullIfExpr *nullifexpr = (NullIfExpr *) node; appendStringInfoString(buf, "NULLIF("); get_rule_expr((Node *) nullifexpr->args, context, true); appendStringInfoChar(buf, ')'); } break; case T_ScalarArrayOpExpr: { ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node; List *args = expr->args; Node *arg1 = (Node *) linitial(args); Node *arg2 = (Node *) lsecond(args); if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); get_rule_expr_paren(arg1, context, true, node); appendStringInfo(buf, " %s %s (", generate_operator_name(expr->opno, exprType(arg1), get_base_element_type(exprType(arg2))), expr->useOr ? "ANY" : "ALL"); get_rule_expr_paren(arg2, context, true, node); /* * There's inherent ambiguity in "x op ANY/ALL (y)" when y is * a bare sub-SELECT. Since we're here, the sub-SELECT must * be meant as a scalar sub-SELECT yielding an array value to * be used in ScalarArrayOpExpr; but the grammar will * preferentially interpret such a construct as an ANY/ALL * SubLink. To prevent misparsing the output that way, insert * a dummy coercion (which will be stripped by parse analysis, * so no inefficiency is added in dump and reload). This is * indeed most likely what the user wrote to get the construct * accepted in the first place. */ if (IsA(arg2, SubLink) && ((SubLink *) arg2)->subLinkType == EXPR_SUBLINK) appendStringInfo(buf, "::%s", format_type_with_typemod(exprType(arg2), exprTypmod(arg2))); appendStringInfoChar(buf, ')'); if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); } break; case T_BoolExpr: { BoolExpr *expr = (BoolExpr *) node; Node *first_arg = linitial(expr->args); ListCell *arg = lnext(list_head(expr->args)); switch (expr->boolop) { case AND_EXPR: if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); get_rule_expr_paren(first_arg, context, false, node); while (arg) { appendStringInfoString(buf, " AND "); get_rule_expr_paren((Node *) lfirst(arg), context, false, node); arg = lnext(arg); } if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); break; case OR_EXPR: if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); get_rule_expr_paren(first_arg, context, false, node); while (arg) { appendStringInfoString(buf, " OR "); get_rule_expr_paren((Node *) lfirst(arg), context, false, node); arg = lnext(arg); } if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); break; case NOT_EXPR: if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); appendStringInfoString(buf, "NOT "); get_rule_expr_paren(first_arg, context, false, node); if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); break; default: elog(ERROR, "unrecognized boolop: %d", (int) expr->boolop); } } break; case T_SubLink: get_sublink_expr((SubLink *) node, context); break; case T_SubPlan: { SubPlan *subplan = (SubPlan *) node; /* * We cannot see an already-planned subplan in rule deparsing, * only while EXPLAINing a query plan. We don't try to * reconstruct the original SQL, just reference the subplan * that appears elsewhere in EXPLAIN's result. */ if (subplan->useHashTable) appendStringInfo(buf, "(hashed %s)", subplan->plan_name); else appendStringInfo(buf, "(%s)", subplan->plan_name); } break; case T_AlternativeSubPlan: { AlternativeSubPlan *asplan = (AlternativeSubPlan *) node; ListCell *lc; /* As above, this can only happen during EXPLAIN */ appendStringInfoString(buf, "(alternatives: "); foreach(lc, asplan->subplans) { SubPlan *splan = lfirst_node(SubPlan, lc); if (splan->useHashTable) appendStringInfo(buf, "hashed %s", splan->plan_name); else appendStringInfoString(buf, splan->plan_name); if (lnext(lc)) appendStringInfoString(buf, " or "); } appendStringInfoChar(buf, ')'); } break; case T_FieldSelect: { FieldSelect *fselect = (FieldSelect *) node; Node *arg = (Node *) fselect->arg; int fno = fselect->fieldnum; const char *fieldname; bool need_parens; /* * Parenthesize the argument unless it's an ArrayRef or * another FieldSelect. Note in particular that it would be * WRONG to not parenthesize a Var argument; simplicity is not * the issue here, having the right number of names is. */ need_parens = !IsA(arg, ArrayRef) &&!IsA(arg, FieldSelect); if (need_parens) appendStringInfoChar(buf, '('); get_rule_expr(arg, context, true); if (need_parens) appendStringInfoChar(buf, ')'); /* * Get and print the field name. */ fieldname = get_name_for_var_field((Var *) arg, fno, 0, context); appendStringInfo(buf, ".%s", quote_identifier(fieldname)); } break; case T_FieldStore: { FieldStore *fstore = (FieldStore *) node; bool need_parens; /* * There is no good way to represent a FieldStore as real SQL, * so decompilation of INSERT or UPDATE statements should * always use processIndirection as part of the * statement-level syntax. We should only get here when * EXPLAIN tries to print the targetlist of a plan resulting * from such a statement. The plan case is even harder than * ordinary rules would be, because the planner tries to * collapse multiple assignments to the same field or subfield * into one FieldStore; so we can see a list of target fields * not just one, and the arguments could be FieldStores * themselves. We don't bother to try to print the target * field names; we just print the source arguments, with a * ROW() around them if there's more than one. This isn't * terribly complete, but it's probably good enough for * EXPLAIN's purposes; especially since anything more would be * either hopelessly confusing or an even poorer * representation of what the plan is actually doing. */ need_parens = (list_length(fstore->newvals) != 1); if (need_parens) appendStringInfoString(buf, "ROW("); get_rule_expr((Node *) fstore->newvals, context, showimplicit); if (need_parens) appendStringInfoChar(buf, ')'); } break; case T_RelabelType: { RelabelType *relabel = (RelabelType *) node; Node *arg = (Node *) relabel->arg; if (relabel->relabelformat == COERCE_IMPLICIT_CAST && !showimplicit) { /* don't show the implicit cast */ get_rule_expr_paren(arg, context, false, node); } else { get_coercion_expr(arg, context, relabel->resulttype, relabel->resulttypmod, node); } } break; case T_CoerceViaIO: { CoerceViaIO *iocoerce = (CoerceViaIO *) node; Node *arg = (Node *) iocoerce->arg; if (iocoerce->coerceformat == COERCE_IMPLICIT_CAST && !showimplicit) { /* don't show the implicit cast */ get_rule_expr_paren(arg, context, false, node); } else { get_coercion_expr(arg, context, iocoerce->resulttype, -1, node); } } break; case T_ArrayCoerceExpr: { ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node; Node *arg = (Node *) acoerce->arg; if (acoerce->coerceformat == COERCE_IMPLICIT_CAST && !showimplicit) { /* don't show the implicit cast */ get_rule_expr_paren(arg, context, false, node); } else { get_coercion_expr(arg, context, acoerce->resulttype, acoerce->resulttypmod, node); } } break; case T_ConvertRowtypeExpr: { ConvertRowtypeExpr *convert = (ConvertRowtypeExpr *) node; Node *arg = (Node *) convert->arg; if (convert->convertformat == COERCE_IMPLICIT_CAST && !showimplicit) { /* don't show the implicit cast */ get_rule_expr_paren(arg, context, false, node); } else { get_coercion_expr(arg, context, convert->resulttype, -1, node); } } break; case T_CollateExpr: { CollateExpr *collate = (CollateExpr *) node; Node *arg = (Node *) collate->arg; if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); get_rule_expr_paren(arg, context, showimplicit, node); appendStringInfo(buf, " COLLATE %s", generate_collation_name(collate->collOid)); if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); } break; case T_CaseExpr: { CaseExpr *caseexpr = (CaseExpr *) node; ListCell *temp; appendContextKeyword(context, "CASE", 0, PRETTYINDENT_VAR, 0); if (caseexpr->arg) { appendStringInfoChar(buf, ' '); get_rule_expr((Node *) caseexpr->arg, context, true); } foreach(temp, caseexpr->args) { CaseWhen *when = (CaseWhen *) lfirst(temp); Node *w = (Node *) when->expr; if (caseexpr->arg) { /* * The parser should have produced WHEN clauses of the * form "CaseTestExpr = RHS", possibly with an * implicit coercion inserted above the CaseTestExpr. * For accurate decompilation of rules it's essential * that we show just the RHS. However in an * expression that's been through the optimizer, the * WHEN clause could be almost anything (since the * equality operator could have been expanded into an * inline function). If we don't recognize the form * of the WHEN clause, just punt and display it as-is. */ if (IsA(w, OpExpr)) { List *args = ((OpExpr *) w)->args; if (list_length(args) == 2 && IsA(strip_implicit_coercions(linitial(args)), CaseTestExpr)) w = (Node *) lsecond(args); } } if (!PRETTY_INDENT(context)) appendStringInfoChar(buf, ' '); appendContextKeyword(context, "WHEN ", 0, 0, 0); get_rule_expr(w, context, false); appendStringInfoString(buf, " THEN "); get_rule_expr((Node *) when->result, context, true); } if (!PRETTY_INDENT(context)) appendStringInfoChar(buf, ' '); appendContextKeyword(context, "ELSE ", 0, 0, 0); get_rule_expr((Node *) caseexpr->defresult, context, true); if (!PRETTY_INDENT(context)) appendStringInfoChar(buf, ' '); appendContextKeyword(context, "END", -PRETTYINDENT_VAR, 0, 0); } break; case T_CaseTestExpr: { /* * Normally we should never get here, since for expressions * that can contain this node type we attempt to avoid * recursing to it. But in an optimized expression we might * be unable to avoid that (see comments for CaseExpr). If we * do see one, print it as CASE_TEST_EXPR. */ appendStringInfoString(buf, "CASE_TEST_EXPR"); } break; case T_ArrayExpr: { ArrayExpr *arrayexpr = (ArrayExpr *) node; appendStringInfoString(buf, "ARRAY["); get_rule_expr((Node *) arrayexpr->elements, context, true); appendStringInfoChar(buf, ']'); /* * If the array isn't empty, we assume its elements are * coerced to the desired type. If it's empty, though, we * need an explicit coercion to the array type. */ if (arrayexpr->elements == NIL) appendStringInfo(buf, "::%s", format_type_with_typemod(arrayexpr->array_typeid, -1)); } break; case T_RowExpr: { RowExpr *rowexpr = (RowExpr *) node; TupleDesc tupdesc = NULL; ListCell *arg; int i; char *sep; /* * If it's a named type and not RECORD, we may have to skip * dropped columns and/or claim there are NULLs for added * columns. */ if (rowexpr->row_typeid != RECORDOID) { tupdesc = lookup_rowtype_tupdesc(rowexpr->row_typeid, -1); Assert(list_length(rowexpr->args) <= tupdesc->natts); } /* * SQL99 allows "ROW" to be omitted when there is more than * one column, but for simplicity we always print it. */ appendStringInfoString(buf, "ROW("); sep = ""; i = 0; foreach(arg, rowexpr->args) { Node *e = (Node *) lfirst(arg); if (tupdesc == NULL || !tupdesc->attrs[i]->attisdropped) { appendStringInfoString(buf, sep); /* Whole-row Vars need special treatment here */ get_rule_expr_toplevel(e, context, true); sep = ", "; } i++; } if (tupdesc != NULL) { while (i < tupdesc->natts) { if (!tupdesc->attrs[i]->attisdropped) { appendStringInfoString(buf, sep); appendStringInfoString(buf, "NULL"); sep = ", "; } i++; } ReleaseTupleDesc(tupdesc); } appendStringInfoChar(buf, ')'); if (rowexpr->row_format == COERCE_EXPLICIT_CAST) appendStringInfo(buf, "::%s", format_type_with_typemod(rowexpr->row_typeid, -1)); } break; case T_RowCompareExpr: { RowCompareExpr *rcexpr = (RowCompareExpr *) node; ListCell *arg; char *sep; /* * SQL99 allows "ROW" to be omitted when there is more than * one column, but for simplicity we always print it. */ appendStringInfoString(buf, "(ROW("); sep = ""; foreach(arg, rcexpr->largs) { Node *e = (Node *) lfirst(arg); appendStringInfoString(buf, sep); get_rule_expr(e, context, true); sep = ", "; } /* * We assume that the name of the first-column operator will * do for all the rest too. This is definitely open to * failure, eg if some but not all operators were renamed * since the construct was parsed, but there seems no way to * be perfect. */ appendStringInfo(buf, ") %s ROW(", generate_operator_name(linitial_oid(rcexpr->opnos), exprType(linitial(rcexpr->largs)), exprType(linitial(rcexpr->rargs)))); sep = ""; foreach(arg, rcexpr->rargs) { Node *e = (Node *) lfirst(arg); appendStringInfoString(buf, sep); get_rule_expr(e, context, true); sep = ", "; } appendStringInfoString(buf, "))"); } break; case T_CoalesceExpr: { CoalesceExpr *coalesceexpr = (CoalesceExpr *) node; appendStringInfoString(buf, "COALESCE("); get_rule_expr((Node *) coalesceexpr->args, context, true); appendStringInfoChar(buf, ')'); } break; case T_MinMaxExpr: { MinMaxExpr *minmaxexpr = (MinMaxExpr *) node; switch (minmaxexpr->op) { case IS_GREATEST: appendStringInfoString(buf, "GREATEST("); break; case IS_LEAST: appendStringInfoString(buf, "LEAST("); break; } get_rule_expr((Node *) minmaxexpr->args, context, true); appendStringInfoChar(buf, ')'); } break; case T_SQLValueFunction: { SQLValueFunction *svf = (SQLValueFunction *) node; /* * Note: this code knows that typmod for time, timestamp, and * timestamptz just prints as integer. */ switch (svf->op) { case SVFOP_CURRENT_DATE: appendStringInfoString(buf, "CURRENT_DATE"); break; case SVFOP_CURRENT_TIME: appendStringInfoString(buf, "CURRENT_TIME"); break; case SVFOP_CURRENT_TIME_N: appendStringInfo(buf, "CURRENT_TIME(%d)", svf->typmod); break; case SVFOP_CURRENT_TIMESTAMP: appendStringInfoString(buf, "CURRENT_TIMESTAMP"); break; case SVFOP_CURRENT_TIMESTAMP_N: appendStringInfo(buf, "CURRENT_TIMESTAMP(%d)", svf->typmod); break; case SVFOP_LOCALTIME: appendStringInfoString(buf, "LOCALTIME"); break; case SVFOP_LOCALTIME_N: appendStringInfo(buf, "LOCALTIME(%d)", svf->typmod); break; case SVFOP_LOCALTIMESTAMP: appendStringInfoString(buf, "LOCALTIMESTAMP"); break; case SVFOP_LOCALTIMESTAMP_N: appendStringInfo(buf, "LOCALTIMESTAMP(%d)", svf->typmod); break; case SVFOP_CURRENT_ROLE: appendStringInfoString(buf, "CURRENT_ROLE"); break; case SVFOP_CURRENT_USER: appendStringInfoString(buf, "CURRENT_USER"); break; case SVFOP_USER: appendStringInfoString(buf, "USER"); break; case SVFOP_SESSION_USER: appendStringInfoString(buf, "SESSION_USER"); break; case SVFOP_CURRENT_CATALOG: appendStringInfoString(buf, "CURRENT_CATALOG"); break; case SVFOP_CURRENT_SCHEMA: appendStringInfoString(buf, "CURRENT_SCHEMA"); break; } } break; case T_XmlExpr: { XmlExpr *xexpr = (XmlExpr *) node; bool needcomma = false; ListCell *arg; ListCell *narg; Const *con; switch (xexpr->op) { case IS_XMLCONCAT: appendStringInfoString(buf, "XMLCONCAT("); break; case IS_XMLELEMENT: appendStringInfoString(buf, "XMLELEMENT("); break; case IS_XMLFOREST: appendStringInfoString(buf, "XMLFOREST("); break; case IS_XMLPARSE: appendStringInfoString(buf, "XMLPARSE("); break; case IS_XMLPI: appendStringInfoString(buf, "XMLPI("); break; case IS_XMLROOT: appendStringInfoString(buf, "XMLROOT("); break; case IS_XMLSERIALIZE: appendStringInfoString(buf, "XMLSERIALIZE("); break; case IS_DOCUMENT: break; } if (xexpr->op == IS_XMLPARSE || xexpr->op == IS_XMLSERIALIZE) { if (xexpr->xmloption == XMLOPTION_DOCUMENT) appendStringInfoString(buf, "DOCUMENT "); else appendStringInfoString(buf, "CONTENT "); } if (xexpr->name) { appendStringInfo(buf, "NAME %s", quote_identifier(map_xml_name_to_sql_identifier(xexpr->name))); needcomma = true; } if (xexpr->named_args) { if (xexpr->op != IS_XMLFOREST) { if (needcomma) appendStringInfoString(buf, ", "); appendStringInfoString(buf, "XMLATTRIBUTES("); needcomma = false; } forboth(arg, xexpr->named_args, narg, xexpr->arg_names) { Node *e = (Node *) lfirst(arg); char *argname = strVal(lfirst(narg)); if (needcomma) appendStringInfoString(buf, ", "); get_rule_expr((Node *) e, context, true); appendStringInfo(buf, " AS %s", quote_identifier(map_xml_name_to_sql_identifier(argname))); needcomma = true; } if (xexpr->op != IS_XMLFOREST) appendStringInfoChar(buf, ')'); } if (xexpr->args) { if (needcomma) appendStringInfoString(buf, ", "); switch (xexpr->op) { case IS_XMLCONCAT: case IS_XMLELEMENT: case IS_XMLFOREST: case IS_XMLPI: case IS_XMLSERIALIZE: /* no extra decoration needed */ get_rule_expr((Node *) xexpr->args, context, true); break; case IS_XMLPARSE: Assert(list_length(xexpr->args) == 2); get_rule_expr((Node *) linitial(xexpr->args), context, true); con = lsecond_node(Const, xexpr->args); Assert(!con->constisnull); if (DatumGetBool(con->constvalue)) appendStringInfoString(buf, " PRESERVE WHITESPACE"); else appendStringInfoString(buf, " STRIP WHITESPACE"); break; case IS_XMLROOT: Assert(list_length(xexpr->args) == 3); get_rule_expr((Node *) linitial(xexpr->args), context, true); appendStringInfoString(buf, ", VERSION "); con = (Const *) lsecond(xexpr->args); if (IsA(con, Const) && con->constisnull) appendStringInfoString(buf, "NO VALUE"); else get_rule_expr((Node *) con, context, false); con = lthird_node(Const, xexpr->args); if (con->constisnull) /* suppress STANDALONE NO VALUE */ ; else { switch (DatumGetInt32(con->constvalue)) { case XML_STANDALONE_YES: appendStringInfoString(buf, ", STANDALONE YES"); break; case XML_STANDALONE_NO: appendStringInfoString(buf, ", STANDALONE NO"); break; case XML_STANDALONE_NO_VALUE: appendStringInfoString(buf, ", STANDALONE NO VALUE"); break; default: break; } } break; case IS_DOCUMENT: get_rule_expr_paren((Node *) xexpr->args, context, false, node); break; } } if (xexpr->op == IS_XMLSERIALIZE) appendStringInfo(buf, " AS %s", format_type_with_typemod(xexpr->type, xexpr->typmod)); if (xexpr->op == IS_DOCUMENT) appendStringInfoString(buf, " IS DOCUMENT"); else appendStringInfoChar(buf, ')'); } break; case T_NullTest: { NullTest *ntest = (NullTest *) node; if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); get_rule_expr_paren((Node *) ntest->arg, context, true, node); /* * For scalar inputs, we prefer to print as IS [NOT] NULL, * which is shorter and traditional. If it's a rowtype input * but we're applying a scalar test, must print IS [NOT] * DISTINCT FROM NULL to be semantically correct. */ if (ntest->argisrow || !type_is_rowtype(exprType((Node *) ntest->arg))) { switch (ntest->nulltesttype) { case IS_NULL: appendStringInfoString(buf, " IS NULL"); break; case IS_NOT_NULL: appendStringInfoString(buf, " IS NOT NULL"); break; default: elog(ERROR, "unrecognized nulltesttype: %d", (int) ntest->nulltesttype); } } else { switch (ntest->nulltesttype) { case IS_NULL: appendStringInfoString(buf, " IS NOT DISTINCT FROM NULL"); break; case IS_NOT_NULL: appendStringInfoString(buf, " IS DISTINCT FROM NULL"); break; default: elog(ERROR, "unrecognized nulltesttype: %d", (int) ntest->nulltesttype); } } if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); } break; case T_BooleanTest: { BooleanTest *btest = (BooleanTest *) node; if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); get_rule_expr_paren((Node *) btest->arg, context, false, node); switch (btest->booltesttype) { case IS_TRUE: appendStringInfoString(buf, " IS TRUE"); break; case IS_NOT_TRUE: appendStringInfoString(buf, " IS NOT TRUE"); break; case IS_FALSE: appendStringInfoString(buf, " IS FALSE"); break; case IS_NOT_FALSE: appendStringInfoString(buf, " IS NOT FALSE"); break; case IS_UNKNOWN: appendStringInfoString(buf, " IS UNKNOWN"); break; case IS_NOT_UNKNOWN: appendStringInfoString(buf, " IS NOT UNKNOWN"); break; default: elog(ERROR, "unrecognized booltesttype: %d", (int) btest->booltesttype); } if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); } break; case T_CoerceToDomain: { CoerceToDomain *ctest = (CoerceToDomain *) node; Node *arg = (Node *) ctest->arg; if (ctest->coercionformat == COERCE_IMPLICIT_CAST && !showimplicit) { /* don't show the implicit cast */ get_rule_expr(arg, context, false); } else { get_coercion_expr(arg, context, ctest->resulttype, ctest->resulttypmod, node); } } break; case T_CoerceToDomainValue: appendStringInfoString(buf, "VALUE"); break; case T_SetToDefault: appendStringInfoString(buf, "DEFAULT"); break; case T_CurrentOfExpr: { CurrentOfExpr *cexpr = (CurrentOfExpr *) node; if (cexpr->cursor_name) appendStringInfo(buf, "CURRENT OF %s", quote_identifier(cexpr->cursor_name)); else appendStringInfo(buf, "CURRENT OF $%d", cexpr->cursor_param); } break; case T_NextValueExpr: { NextValueExpr *nvexpr = (NextValueExpr *) node; /* * This isn't exactly nextval(), but that seems close enough * for EXPLAIN's purposes. */ appendStringInfoString(buf, "nextval("); simple_quote_literal(buf, generate_relation_name(nvexpr->seqid, NIL)); appendStringInfoChar(buf, ')'); } break; case T_InferenceElem: { InferenceElem *iexpr = (InferenceElem *) node; bool save_varprefix; bool need_parens; /* * InferenceElem can only refer to target relation, so a * prefix is not useful, and indeed would cause parse errors. */ save_varprefix = context->varprefix; context->varprefix = false; /* * Parenthesize the element unless it's a simple Var or a bare * function call. Follows pg_get_indexdef_worker(). */ need_parens = !IsA(iexpr->expr, Var); if (IsA(iexpr->expr, FuncExpr) && ((FuncExpr *) iexpr->expr)->funcformat == COERCE_EXPLICIT_CALL) need_parens = false; if (need_parens) appendStringInfoChar(buf, '('); get_rule_expr((Node *) iexpr->expr, context, false); if (need_parens) appendStringInfoChar(buf, ')'); context->varprefix = save_varprefix; if (iexpr->infercollid) appendStringInfo(buf, " COLLATE %s", generate_collation_name(iexpr->infercollid)); /* Add the operator class name, if not default */ if (iexpr->inferopclass) { Oid inferopclass = iexpr->inferopclass; Oid inferopcinputtype = get_opclass_input_type(iexpr->inferopclass); get_opclass_name(inferopclass, inferopcinputtype, buf); } } break; case T_PartitionBoundSpec: { PartitionBoundSpec *spec = (PartitionBoundSpec *) node; ListCell *cell; char *sep; switch (spec->strategy) { case PARTITION_STRATEGY_LIST: Assert(spec->listdatums != NIL); appendStringInfoString(buf, "FOR VALUES IN ("); sep = ""; foreach(cell, spec->listdatums) { Const *val = castNode(Const, lfirst(cell)); appendStringInfoString(buf, sep); get_const_expr(val, context, -1); sep = ", "; } appendStringInfoString(buf, ")"); break; case PARTITION_STRATEGY_RANGE: Assert(spec->lowerdatums != NIL && spec->upperdatums != NIL && list_length(spec->lowerdatums) == list_length(spec->upperdatums)); appendStringInfo(buf, "FOR VALUES FROM %s TO %s", get_range_partbound_string(spec->lowerdatums), get_range_partbound_string(spec->upperdatums)); break; default: elog(ERROR, "unrecognized partition strategy: %d", (int) spec->strategy); break; } } break; case T_List: { char *sep; ListCell *l; sep = ""; foreach(l, (List *) node) { appendStringInfoString(buf, sep); get_rule_expr((Node *) lfirst(l), context, showimplicit); sep = ", "; } } break; case T_TableFunc: get_tablefunc((TableFunc *) node, context, showimplicit); break; default: elog(ERROR, "unrecognized node type: %d", (int) nodeTag(node)); break; } } /* * get_rule_expr_toplevel - Parse back a toplevel expression * * Same as get_rule_expr(), except that if the expr is just a Var, we pass * istoplevel = true not false to get_variable(). This causes whole-row Vars * to get printed with decoration that will prevent expansion of "*". * We need to use this in contexts such as ROW() and VALUES(), where the * parser would expand "foo.*" appearing at top level. (In principle we'd * use this in get_target_list() too, but that has additional worries about * whether to print AS, so it needs to invoke get_variable() directly anyway.) */ static void get_rule_expr_toplevel(Node *node, deparse_context *context, bool showimplicit) { if (node && IsA(node, Var)) (void) get_variable((Var *) node, 0, true, context); else get_rule_expr(node, context, showimplicit); } /* * get_rule_expr_funccall - Parse back a function-call expression * * Same as get_rule_expr(), except that we guarantee that the output will * look like a function call, or like one of the things the grammar treats as * equivalent to a function call (see the func_expr_windowless production). * This is needed in places where the grammar uses func_expr_windowless and * you can't substitute a parenthesized a_expr. If what we have isn't going * to look like a function call, wrap it in a dummy CAST() expression, which * will satisfy the grammar --- and, indeed, is likely what the user wrote to * produce such a thing. */ static void get_rule_expr_funccall(Node *node, deparse_context *context, bool showimplicit) { if (looks_like_function(node)) get_rule_expr(node, context, showimplicit); else { StringInfo buf = context->buf; appendStringInfoString(buf, "CAST("); /* no point in showing any top-level implicit cast */ get_rule_expr(node, context, false); appendStringInfo(buf, " AS %s)", format_type_with_typemod(exprType(node), exprTypmod(node))); } } /* * Helper function to identify node types that satisfy func_expr_windowless. * If in doubt, "false" is always a safe answer. */ static bool looks_like_function(Node *node) { if (node == NULL) return false; /* probably shouldn't happen */ switch (nodeTag(node)) { case T_FuncExpr: /* OK, unless it's going to deparse as a cast */ return (((FuncExpr *) node)->funcformat == COERCE_EXPLICIT_CALL); case T_NullIfExpr: case T_CoalesceExpr: case T_MinMaxExpr: case T_SQLValueFunction: case T_XmlExpr: /* these are all accepted by func_expr_common_subexpr */ return true; default: break; } return false; } /* * get_oper_expr - Parse back an OpExpr node */ static void get_oper_expr(OpExpr *expr, deparse_context *context) { StringInfo buf = context->buf; Oid opno = expr->opno; List *args = expr->args; if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); if (list_length(args) == 2) { /* binary operator */ Node *arg1 = (Node *) linitial(args); Node *arg2 = (Node *) lsecond(args); get_rule_expr_paren(arg1, context, true, (Node *) expr); appendStringInfo(buf, " %s ", generate_operator_name(opno, exprType(arg1), exprType(arg2))); get_rule_expr_paren(arg2, context, true, (Node *) expr); } else { /* unary operator --- but which side? */ Node *arg = (Node *) linitial(args); HeapTuple tp; Form_pg_operator optup; tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for operator %u", opno); optup = (Form_pg_operator) GETSTRUCT(tp); switch (optup->oprkind) { case 'l': appendStringInfo(buf, "%s ", generate_operator_name(opno, InvalidOid, exprType(arg))); get_rule_expr_paren(arg, context, true, (Node *) expr); break; case 'r': get_rule_expr_paren(arg, context, true, (Node *) expr); appendStringInfo(buf, " %s", generate_operator_name(opno, exprType(arg), InvalidOid)); break; default: elog(ERROR, "bogus oprkind: %d", optup->oprkind); } ReleaseSysCache(tp); } if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); } /* * get_func_expr - Parse back a FuncExpr node */ static void get_func_expr(FuncExpr *expr, deparse_context *context, bool showimplicit) { StringInfo buf = context->buf; Oid funcoid = expr->funcid; Oid argtypes[FUNC_MAX_ARGS]; int nargs; List *argnames; bool use_variadic; ListCell *l; /* * If the function call came from an implicit coercion, then just show the * first argument --- unless caller wants to see implicit coercions. */ if (expr->funcformat == COERCE_IMPLICIT_CAST && !showimplicit) { get_rule_expr_paren((Node *) linitial(expr->args), context, false, (Node *) expr); return; } /* * If the function call came from a cast, then show the first argument * plus an explicit cast operation. */ if (expr->funcformat == COERCE_EXPLICIT_CAST || expr->funcformat == COERCE_IMPLICIT_CAST) { Node *arg = linitial(expr->args); Oid rettype = expr->funcresulttype; int32 coercedTypmod; /* Get the typmod if this is a length-coercion function */ (void) exprIsLengthCoercion((Node *) expr, &coercedTypmod); get_coercion_expr(arg, context, rettype, coercedTypmod, (Node *) expr); return; } /* * Normal function: display as proname(args). First we need to extract * the argument datatypes. */ if (list_length(expr->args) > FUNC_MAX_ARGS) ereport(ERROR, (errcode(ERRCODE_TOO_MANY_ARGUMENTS), errmsg("too many arguments"))); nargs = 0; argnames = NIL; foreach(l, expr->args) { Node *arg = (Node *) lfirst(l); if (IsA(arg, NamedArgExpr)) argnames = lappend(argnames, ((NamedArgExpr *) arg)->name); argtypes[nargs] = exprType(arg); nargs++; } appendStringInfo(buf, "%s(", generate_function_name(funcoid, nargs, argnames, argtypes, expr->funcvariadic, &use_variadic, context->special_exprkind)); nargs = 0; foreach(l, expr->args) { if (nargs++ > 0) appendStringInfoString(buf, ", "); if (use_variadic && lnext(l) == NULL) appendStringInfoString(buf, "VARIADIC "); get_rule_expr((Node *) lfirst(l), context, true); } appendStringInfoChar(buf, ')'); } /* * get_agg_expr - Parse back an Aggref node */ static void get_agg_expr(Aggref *aggref, deparse_context *context, Aggref *original_aggref) { StringInfo buf = context->buf; Oid argtypes[FUNC_MAX_ARGS]; int nargs; bool use_variadic; /* * For a combining aggregate, we look up and deparse the corresponding * partial aggregate instead. This is necessary because our input * argument list has been replaced; the new argument list always has just * one element, which will point to a partial Aggref that supplies us with * transition states to combine. */ if (DO_AGGSPLIT_COMBINE(aggref->aggsplit)) { TargetEntry *tle = linitial_node(TargetEntry, aggref->args); Assert(list_length(aggref->args) == 1); resolve_special_varno((Node *) tle->expr, context, original_aggref, get_agg_combine_expr); return; } /* * Mark as PARTIAL, if appropriate. We look to the original aggref so as * to avoid printing this when recursing from the code just above. */ if (DO_AGGSPLIT_SKIPFINAL(original_aggref->aggsplit)) appendStringInfoString(buf, "PARTIAL "); /* Extract the argument types as seen by the parser */ nargs = get_aggregate_argtypes(aggref, argtypes); /* Print the aggregate name, schema-qualified if needed */ appendStringInfo(buf, "%s(%s", generate_function_name(aggref->aggfnoid, nargs, NIL, argtypes, aggref->aggvariadic, &use_variadic, context->special_exprkind), (aggref->aggdistinct != NIL) ? "DISTINCT " : ""); if (AGGKIND_IS_ORDERED_SET(aggref->aggkind)) { /* * Ordered-set aggregates do not use "*" syntax. Also, we needn't * worry about inserting VARIADIC. So we can just dump the direct * args as-is. */ Assert(!aggref->aggvariadic); get_rule_expr((Node *) aggref->aggdirectargs, context, true); Assert(aggref->aggorder != NIL); appendStringInfoString(buf, ") WITHIN GROUP (ORDER BY "); get_rule_orderby(aggref->aggorder, aggref->args, false, context); } else { /* aggstar can be set only in zero-argument aggregates */ if (aggref->aggstar) appendStringInfoChar(buf, '*'); else { ListCell *l; int i; i = 0; foreach(l, aggref->args) { TargetEntry *tle = (TargetEntry *) lfirst(l); Node *arg = (Node *) tle->expr; Assert(!IsA(arg, NamedArgExpr)); if (tle->resjunk) continue; if (i++ > 0) appendStringInfoString(buf, ", "); if (use_variadic && i == nargs) appendStringInfoString(buf, "VARIADIC "); get_rule_expr(arg, context, true); } } if (aggref->aggorder != NIL) { appendStringInfoString(buf, " ORDER BY "); get_rule_orderby(aggref->aggorder, aggref->args, false, context); } } if (aggref->aggfilter != NULL) { appendStringInfoString(buf, ") FILTER (WHERE "); get_rule_expr((Node *) aggref->aggfilter, context, false); } appendStringInfoChar(buf, ')'); } /* * This is a helper function for get_agg_expr(). It's used when we deparse * a combining Aggref; resolve_special_varno locates the corresponding partial * Aggref and then calls this. */ static void get_agg_combine_expr(Node *node, deparse_context *context, void *private) { Aggref *aggref; Aggref *original_aggref = private; if (!IsA(node, Aggref)) elog(ERROR, "combining Aggref does not point to an Aggref"); aggref = (Aggref *) node; get_agg_expr(aggref, context, original_aggref); } /* * get_windowfunc_expr - Parse back a WindowFunc node */ static void get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context) { StringInfo buf = context->buf; Oid argtypes[FUNC_MAX_ARGS]; int nargs; List *argnames; ListCell *l; if (list_length(wfunc->args) > FUNC_MAX_ARGS) ereport(ERROR, (errcode(ERRCODE_TOO_MANY_ARGUMENTS), errmsg("too many arguments"))); nargs = 0; argnames = NIL; foreach(l, wfunc->args) { Node *arg = (Node *) lfirst(l); if (IsA(arg, NamedArgExpr)) argnames = lappend(argnames, ((NamedArgExpr *) arg)->name); argtypes[nargs] = exprType(arg); nargs++; } appendStringInfo(buf, "%s(", generate_function_name(wfunc->winfnoid, nargs, argnames, argtypes, false, NULL, context->special_exprkind)); /* winstar can be set only in zero-argument aggregates */ if (wfunc->winstar) appendStringInfoChar(buf, '*'); else get_rule_expr((Node *) wfunc->args, context, true); if (wfunc->aggfilter != NULL) { appendStringInfoString(buf, ") FILTER (WHERE "); get_rule_expr((Node *) wfunc->aggfilter, context, false); } appendStringInfoString(buf, ") OVER "); foreach(l, context->windowClause) { WindowClause *wc = (WindowClause *) lfirst(l); if (wc->winref == wfunc->winref) { if (wc->name) appendStringInfoString(buf, quote_identifier(wc->name)); else get_rule_windowspec(wc, context->windowTList, context); break; } } if (l == NULL) { if (context->windowClause) elog(ERROR, "could not find window clause for winref %u", wfunc->winref); /* * In EXPLAIN, we don't have window context information available, so * we have to settle for this: */ appendStringInfoString(buf, "(?)"); } } /* ---------- * get_coercion_expr * * Make a string representation of a value coerced to a specific type * ---------- */ static void get_coercion_expr(Node *arg, deparse_context *context, Oid resulttype, int32 resulttypmod, Node *parentNode) { StringInfo buf = context->buf; /* * Since parse_coerce.c doesn't immediately collapse application of * length-coercion functions to constants, what we'll typically see in * such cases is a Const with typmod -1 and a length-coercion function * right above it. Avoid generating redundant output. However, beware of * suppressing casts when the user actually wrote something like * 'foo'::text::char(3). * * Note: it might seem that we are missing the possibility of needing to * print a COLLATE clause for such a Const. However, a Const could only * have nondefault collation in a post-constant-folding tree, in which the * length coercion would have been folded too. See also the special * handling of CollateExpr in coerce_to_target_type(): any collation * marking will be above the coercion node, not below it. */ if (arg && IsA(arg, Const) && ((Const *) arg)->consttype == resulttype && ((Const *) arg)->consttypmod == -1) { /* Show the constant without normal ::typename decoration */ get_const_expr((Const *) arg, context, -1); } else { if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); get_rule_expr_paren(arg, context, false, parentNode); if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); } appendStringInfo(buf, "::%s", format_type_with_typemod(resulttype, resulttypmod)); } /* ---------- * get_const_expr * * Make a string representation of a Const * * showtype can be -1 to never show "::typename" decoration, or +1 to always * show it, or 0 to show it only if the constant wouldn't be assumed to be * the right type by default. * * If the Const's collation isn't default for its type, show that too. * We mustn't do this when showtype is -1 (since that means the caller will * print "::typename", and we can't put a COLLATE clause in between). It's * caller's responsibility that collation isn't missed in such cases. * ---------- */ static void get_const_expr(Const *constval, deparse_context *context, int showtype) { StringInfo buf = context->buf; Oid typoutput; bool typIsVarlena; char *extval; bool needlabel = false; if (constval->constisnull) { /* * Always label the type of a NULL constant to prevent misdecisions * about type when reparsing. */ appendStringInfoString(buf, "NULL"); if (showtype >= 0) { appendStringInfo(buf, "::%s", format_type_with_typemod(constval->consttype, constval->consttypmod)); get_const_collation(constval, context); } return; } getTypeOutputInfo(constval->consttype, &typoutput, &typIsVarlena); extval = OidOutputFunctionCall(typoutput, constval->constvalue); switch (constval->consttype) { case INT4OID: /* * INT4 can be printed without any decoration, unless it is * negative; in that case print it as '-nnn'::integer to ensure * that the output will re-parse as a constant, not as a constant * plus operator. In most cases we could get away with printing * (-nnn) instead, because of the way that gram.y handles negative * literals; but that doesn't work for INT_MIN, and it doesn't * seem that much prettier anyway. */ if (extval[0] != '-') appendStringInfoString(buf, extval); else { appendStringInfo(buf, "'%s'", extval); needlabel = true; /* we must attach a cast */ } break; case NUMERICOID: /* * NUMERIC can be printed without quotes if it looks like a float * constant (not an integer, and not Infinity or NaN) and doesn't * have a leading sign (for the same reason as for INT4). */ if (isdigit((unsigned char) extval[0]) && strcspn(extval, "eE.") != strlen(extval)) { appendStringInfoString(buf, extval); } else { appendStringInfo(buf, "'%s'", extval); needlabel = true; /* we must attach a cast */ } break; case BITOID: case VARBITOID: appendStringInfo(buf, "B'%s'", extval); break; case BOOLOID: if (strcmp(extval, "t") == 0) appendStringInfoString(buf, "true"); else appendStringInfoString(buf, "false"); break; default: simple_quote_literal(buf, extval); break; } pfree(extval); if (showtype < 0) return; /* * For showtype == 0, append ::typename unless the constant will be * implicitly typed as the right type when it is read in. * * XXX this code has to be kept in sync with the behavior of the parser, * especially make_const. */ switch (constval->consttype) { case BOOLOID: case UNKNOWNOID: /* These types can be left unlabeled */ needlabel = false; break; case INT4OID: /* We determined above whether a label is needed */ break; case NUMERICOID: /* * Float-looking constants will be typed as numeric, which we * checked above; but if there's a nondefault typmod we need to * show it. */ needlabel |= (constval->consttypmod >= 0); break; default: needlabel = true; break; } if (needlabel || showtype > 0) appendStringInfo(buf, "::%s", format_type_with_typemod(constval->consttype, constval->consttypmod)); get_const_collation(constval, context); } /* * helper for get_const_expr: append COLLATE if needed */ static void get_const_collation(Const *constval, deparse_context *context) { StringInfo buf = context->buf; if (OidIsValid(constval->constcollid)) { Oid typcollation = get_typcollation(constval->consttype); if (constval->constcollid != typcollation) { appendStringInfo(buf, " COLLATE %s", generate_collation_name(constval->constcollid)); } } } /* * simple_quote_literal - Format a string as a SQL literal, append to buf */ static void simple_quote_literal(StringInfo buf, const char *val) { const char *valptr; /* * We form the string literal according to the prevailing setting of * standard_conforming_strings; we never use E''. User is responsible for * making sure result is used correctly. */ appendStringInfoChar(buf, '\''); for (valptr = val; *valptr; valptr++) { char ch = *valptr; if (SQL_STR_DOUBLE(ch, !standard_conforming_strings)) appendStringInfoChar(buf, ch); appendStringInfoChar(buf, ch); } appendStringInfoChar(buf, '\''); } /* ---------- * get_sublink_expr - Parse back a sublink * ---------- */ static void get_sublink_expr(SubLink *sublink, deparse_context *context) { StringInfo buf = context->buf; Query *query = (Query *) (sublink->subselect); char *opname = NULL; bool need_paren; if (sublink->subLinkType == ARRAY_SUBLINK) appendStringInfoString(buf, "ARRAY("); else appendStringInfoChar(buf, '('); /* * Note that we print the name of only the first operator, when there are * multiple combining operators. This is an approximation that could go * wrong in various scenarios (operators in different schemas, renamed * operators, etc) but there is not a whole lot we can do about it, since * the syntax allows only one operator to be shown. */ if (sublink->testexpr) { if (IsA(sublink->testexpr, OpExpr)) { /* single combining operator */ OpExpr *opexpr = (OpExpr *) sublink->testexpr; get_rule_expr(linitial(opexpr->args), context, true); opname = generate_operator_name(opexpr->opno, exprType(linitial(opexpr->args)), exprType(lsecond(opexpr->args))); } else if (IsA(sublink->testexpr, BoolExpr)) { /* multiple combining operators, = or <> cases */ char *sep; ListCell *l; appendStringInfoChar(buf, '('); sep = ""; foreach(l, ((BoolExpr *) sublink->testexpr)->args) { OpExpr *opexpr = lfirst_node(OpExpr, l); appendStringInfoString(buf, sep); get_rule_expr(linitial(opexpr->args), context, true); if (!opname) opname = generate_operator_name(opexpr->opno, exprType(linitial(opexpr->args)), exprType(lsecond(opexpr->args))); sep = ", "; } appendStringInfoChar(buf, ')'); } else if (IsA(sublink->testexpr, RowCompareExpr)) { /* multiple combining operators, < <= > >= cases */ RowCompareExpr *rcexpr = (RowCompareExpr *) sublink->testexpr; appendStringInfoChar(buf, '('); get_rule_expr((Node *) rcexpr->largs, context, true); opname = generate_operator_name(linitial_oid(rcexpr->opnos), exprType(linitial(rcexpr->largs)), exprType(linitial(rcexpr->rargs))); appendStringInfoChar(buf, ')'); } else elog(ERROR, "unrecognized testexpr type: %d", (int) nodeTag(sublink->testexpr)); } need_paren = true; switch (sublink->subLinkType) { case EXISTS_SUBLINK: appendStringInfoString(buf, "EXISTS "); break; case ANY_SUBLINK: if (strcmp(opname, "=") == 0) /* Represent = ANY as IN */ appendStringInfoString(buf, " IN "); else appendStringInfo(buf, " %s ANY ", opname); break; case ALL_SUBLINK: appendStringInfo(buf, " %s ALL ", opname); break; case ROWCOMPARE_SUBLINK: appendStringInfo(buf, " %s ", opname); break; case EXPR_SUBLINK: case MULTIEXPR_SUBLINK: case ARRAY_SUBLINK: need_paren = false; break; case CTE_SUBLINK: /* shouldn't occur in a SubLink */ default: elog(ERROR, "unrecognized sublink type: %d", (int) sublink->subLinkType); break; } if (need_paren) appendStringInfoChar(buf, '('); get_query_def(query, buf, context->namespaces, NULL, context->prettyFlags, context->wrapColumn, context->indentLevel); if (need_paren) appendStringInfoString(buf, "))"); else appendStringInfoChar(buf, ')'); } /* ---------- * get_tablefunc - Parse back a table function * ---------- */ static void get_tablefunc(TableFunc *tf, deparse_context *context, bool showimplicit) { StringInfo buf = context->buf; /* XMLTABLE is the only existing implementation. */ appendStringInfoString(buf, "XMLTABLE("); if (tf->ns_uris != NIL) { ListCell *lc1, *lc2; bool first = true; appendStringInfoString(buf, "XMLNAMESPACES ("); forboth(lc1, tf->ns_uris, lc2, tf->ns_names) { Node *expr = (Node *) lfirst(lc1); char *name = strVal(lfirst(lc2)); if (!first) appendStringInfoString(buf, ", "); else first = false; if (name != NULL) { get_rule_expr(expr, context, showimplicit); appendStringInfo(buf, " AS %s", name); } else { appendStringInfoString(buf, "DEFAULT "); get_rule_expr(expr, context, showimplicit); } } appendStringInfoString(buf, "), "); } appendStringInfoChar(buf, '('); get_rule_expr((Node *) tf->rowexpr, context, showimplicit); appendStringInfoString(buf, ") PASSING ("); get_rule_expr((Node *) tf->docexpr, context, showimplicit); appendStringInfoChar(buf, ')'); if (tf->colexprs != NIL) { ListCell *l1; ListCell *l2; ListCell *l3; ListCell *l4; ListCell *l5; int colnum = 0; l2 = list_head(tf->coltypes); l3 = list_head(tf->coltypmods); l4 = list_head(tf->colexprs); l5 = list_head(tf->coldefexprs); appendStringInfoString(buf, " COLUMNS "); foreach(l1, tf->colnames) { char *colname = strVal(lfirst(l1)); Oid typid; int32 typmod; Node *colexpr; Node *coldefexpr; bool ordinality = tf->ordinalitycol == colnum; bool notnull = bms_is_member(colnum, tf->notnulls); typid = lfirst_oid(l2); l2 = lnext(l2); typmod = lfirst_int(l3); l3 = lnext(l3); colexpr = (Node *) lfirst(l4); l4 = lnext(l4); coldefexpr = (Node *) lfirst(l5); l5 = lnext(l5); if (colnum > 0) appendStringInfoString(buf, ", "); colnum++; appendStringInfo(buf, "%s %s", quote_identifier(colname), ordinality ? "FOR ORDINALITY" : format_type_with_typemod(typid, typmod)); if (ordinality) continue; if (coldefexpr != NULL) { appendStringInfoString(buf, " DEFAULT ("); get_rule_expr((Node *) coldefexpr, context, showimplicit); appendStringInfoChar(buf, ')'); } if (colexpr != NULL) { appendStringInfoString(buf, " PATH ("); get_rule_expr((Node *) colexpr, context, showimplicit); appendStringInfoChar(buf, ')'); } if (notnull) appendStringInfoString(buf, " NOT NULL"); } } appendStringInfoChar(buf, ')'); } /* ---------- * get_from_clause - Parse back a FROM clause * * "prefix" is the keyword that denotes the start of the list of FROM * elements. It is FROM when used to parse back SELECT and UPDATE, but * is USING when parsing back DELETE. * ---------- */ static void get_from_clause(Query *query, const char *prefix, deparse_context *context) { StringInfo buf = context->buf; bool first = true; ListCell *l; /* * We use the query's jointree as a guide to what to print. However, we * must ignore auto-added RTEs that are marked not inFromCl. (These can * only appear at the top level of the jointree, so it's sufficient to * check here.) This check also ensures we ignore the rule pseudo-RTEs * for NEW and OLD. */ foreach(l, query->jointree->fromlist) { Node *jtnode = (Node *) lfirst(l); if (IsA(jtnode, RangeTblRef)) { int varno = ((RangeTblRef *) jtnode)->rtindex; RangeTblEntry *rte = rt_fetch(varno, query->rtable); if (!rte->inFromCl) continue; } if (first) { appendContextKeyword(context, prefix, -PRETTYINDENT_STD, PRETTYINDENT_STD, 2); first = false; get_from_clause_item(jtnode, query, context); } else { StringInfoData itembuf; appendStringInfoString(buf, ", "); /* * Put the new FROM item's text into itembuf so we can decide * after we've got it whether or not it needs to go on a new line. */ initStringInfo(&itembuf); context->buf = &itembuf; get_from_clause_item(jtnode, query, context); /* Restore context's output buffer */ context->buf = buf; /* Consider line-wrapping if enabled */ if (PRETTY_INDENT(context) && context->wrapColumn >= 0) { /* Does the new item start with a new line? */ if (itembuf.len > 0 && itembuf.data[0] == '\n') { /* If so, we shouldn't add anything */ /* instead, remove any trailing spaces currently in buf */ removeStringInfoSpaces(buf); } else { char *trailing_nl; /* Locate the start of the current line in the buffer */ trailing_nl = strrchr(buf->data, '\n'); if (trailing_nl == NULL) trailing_nl = buf->data; else trailing_nl++; /* * Add a newline, plus some indentation, if the new item * would cause an overflow. */ if (strlen(trailing_nl) + itembuf.len > context->wrapColumn) appendContextKeyword(context, "", -PRETTYINDENT_STD, PRETTYINDENT_STD, PRETTYINDENT_VAR); } } /* Add the new item */ appendStringInfoString(buf, itembuf.data); /* clean up */ pfree(itembuf.data); } } } static void get_from_clause_item(Node *jtnode, Query *query, deparse_context *context) { StringInfo buf = context->buf; deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces); if (IsA(jtnode, RangeTblRef)) { int varno = ((RangeTblRef *) jtnode)->rtindex; RangeTblEntry *rte = rt_fetch(varno, query->rtable); char *refname = get_rtable_name(varno, context); deparse_columns *colinfo = deparse_columns_fetch(varno, dpns); RangeTblFunction *rtfunc1 = NULL; bool printalias; if (rte->lateral) appendStringInfoString(buf, "LATERAL "); /* Print the FROM item proper */ switch (rte->rtekind) { case RTE_RELATION: /* Normal relation RTE */ appendStringInfo(buf, "%s%s", only_marker(rte), generate_relation_or_shard_name(rte->relid, context->distrelid, context->shardid, context->namespaces)); break; case RTE_SUBQUERY: /* Subquery RTE */ appendStringInfoChar(buf, '('); get_query_def(rte->subquery, buf, context->namespaces, NULL, context->prettyFlags, context->wrapColumn, context->indentLevel); appendStringInfoChar(buf, ')'); break; case RTE_FUNCTION: /* if it's a shard, do differently */ if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) { char *fragmentSchemaName = NULL; char *fragmentTableName = NULL; ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); /* use schema and table name from the remote alias */ appendStringInfoString(buf, generate_fragment_name(fragmentSchemaName, fragmentTableName)); break; } /* Function RTE */ rtfunc1 = (RangeTblFunction *) linitial(rte->functions); /* * Omit ROWS FROM() syntax for just one function, unless it * has both a coldeflist and WITH ORDINALITY. If it has both, * we must use ROWS FROM() syntax to avoid ambiguity about * whether the coldeflist includes the ordinality column. */ if (list_length(rte->functions) == 1 && (rtfunc1->funccolnames == NIL || !rte->funcordinality)) { get_rule_expr_funccall(rtfunc1->funcexpr, context, true); /* we'll print the coldeflist below, if it has one */ } else { bool all_unnest; ListCell *lc; /* * If all the function calls in the list are to unnest, * and none need a coldeflist, then collapse the list back * down to UNNEST(args). (If we had more than one * built-in unnest function, this would get more * difficult.) * * XXX This is pretty ugly, since it makes not-terribly- * future-proof assumptions about what the parser would do * with the output; but the alternative is to emit our * nonstandard ROWS FROM() notation for what might have * been a perfectly spec-compliant multi-argument * UNNEST(). */ all_unnest = true; foreach(lc, rte->functions) { RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); if (!IsA(rtfunc->funcexpr, FuncExpr) || ((FuncExpr *) rtfunc->funcexpr)->funcid != F_ARRAY_UNNEST || rtfunc->funccolnames != NIL) { all_unnest = false; break; } } if (all_unnest) { List *allargs = NIL; foreach(lc, rte->functions) { RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); List *args = ((FuncExpr *) rtfunc->funcexpr)->args; allargs = list_concat(allargs, list_copy(args)); } appendStringInfoString(buf, "UNNEST("); get_rule_expr((Node *) allargs, context, true); appendStringInfoChar(buf, ')'); } else { int funcno = 0; appendStringInfoString(buf, "ROWS FROM("); foreach(lc, rte->functions) { RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); if (funcno > 0) appendStringInfoString(buf, ", "); get_rule_expr_funccall(rtfunc->funcexpr, context, true); if (rtfunc->funccolnames != NIL) { /* Reconstruct the column definition list */ appendStringInfoString(buf, " AS "); get_from_clause_coldeflist(rtfunc, NULL, context); } funcno++; } appendStringInfoChar(buf, ')'); } /* prevent printing duplicate coldeflist below */ rtfunc1 = NULL; } if (rte->funcordinality) appendStringInfoString(buf, " WITH ORDINALITY"); break; case RTE_TABLEFUNC: get_tablefunc(rte->tablefunc, context, true); break; case RTE_VALUES: /* Values list RTE */ appendStringInfoChar(buf, '('); get_values_def(rte->values_lists, context); appendStringInfoChar(buf, ')'); break; case RTE_CTE: appendStringInfoString(buf, quote_identifier(rte->ctename)); break; default: elog(ERROR, "unrecognized RTE kind: %d", (int) rte->rtekind); break; } /* Print the relation alias, if needed */ printalias = false; if (rte->alias != NULL) { /* Always print alias if user provided one */ printalias = true; } else if (colinfo->printaliases) { /* Always print alias if we need to print column aliases */ printalias = true; } else if (rte->rtekind == RTE_RELATION) { /* * No need to print alias if it's same as relation name (this * would normally be the case, but not if set_rtable_names had to * resolve a conflict). */ if (strcmp(refname, get_relation_name(rte->relid)) != 0) printalias = true; } else if (rte->rtekind == RTE_FUNCTION) { /* * For a function RTE, always print alias. This covers possible * renaming of the function and/or instability of the * FigureColname rules for things that aren't simple functions. * Note we'd need to force it anyway for the columndef list case. */ printalias = true; } else if (rte->rtekind == RTE_VALUES) { /* Alias is syntactically required for VALUES */ printalias = true; } else if (rte->rtekind == RTE_CTE) { /* * No need to print alias if it's same as CTE name (this would * normally be the case, but not if set_rtable_names had to * resolve a conflict). */ if (strcmp(refname, rte->ctename) != 0) printalias = true; } else if (rte->rtekind == RTE_SUBQUERY) { /* subquery requires alias too */ printalias = true; } if (printalias) appendStringInfo(buf, " %s", quote_identifier(refname)); /* Print the column definitions or aliases, if needed */ if (rtfunc1 && rtfunc1->funccolnames != NIL) { /* Reconstruct the columndef list, which is also the aliases */ get_from_clause_coldeflist(rtfunc1, colinfo, context); } else if (GetRangeTblKind(rte) != CITUS_RTE_SHARD) { /* Else print column aliases as needed */ get_column_alias_list(colinfo, context); } /* Tablesample clause must go after any alias */ if (rte->rtekind == RTE_RELATION && rte->tablesample) get_tablesample_def(rte->tablesample, context); } else if (IsA(jtnode, JoinExpr)) { JoinExpr *j = (JoinExpr *) jtnode; deparse_columns *colinfo = deparse_columns_fetch(j->rtindex, dpns); bool need_paren_on_right; need_paren_on_right = PRETTY_PAREN(context) && !IsA(j->rarg, RangeTblRef) && !(IsA(j->rarg, JoinExpr) &&((JoinExpr *) j->rarg)->alias != NULL); if (!PRETTY_PAREN(context) || j->alias != NULL) appendStringInfoChar(buf, '('); get_from_clause_item(j->larg, query, context); switch (j->jointype) { case JOIN_INNER: if (j->quals) appendContextKeyword(context, " JOIN ", -PRETTYINDENT_STD, PRETTYINDENT_STD, PRETTYINDENT_JOIN); else appendContextKeyword(context, " CROSS JOIN ", -PRETTYINDENT_STD, PRETTYINDENT_STD, PRETTYINDENT_JOIN); break; case JOIN_LEFT: appendContextKeyword(context, " LEFT JOIN ", -PRETTYINDENT_STD, PRETTYINDENT_STD, PRETTYINDENT_JOIN); break; case JOIN_FULL: appendContextKeyword(context, " FULL JOIN ", -PRETTYINDENT_STD, PRETTYINDENT_STD, PRETTYINDENT_JOIN); break; case JOIN_RIGHT: appendContextKeyword(context, " RIGHT JOIN ", -PRETTYINDENT_STD, PRETTYINDENT_STD, PRETTYINDENT_JOIN); break; default: elog(ERROR, "unrecognized join type: %d", (int) j->jointype); } if (need_paren_on_right) appendStringInfoChar(buf, '('); get_from_clause_item(j->rarg, query, context); if (need_paren_on_right) appendStringInfoChar(buf, ')'); if (j->usingClause) { ListCell *lc; bool first = true; appendStringInfoString(buf, " USING ("); /* Use the assigned names, not what's in usingClause */ foreach(lc, colinfo->usingNames) { char *colname = (char *) lfirst(lc); if (first) first = false; else appendStringInfoString(buf, ", "); appendStringInfoString(buf, quote_identifier(colname)); } appendStringInfoChar(buf, ')'); } else if (j->quals) { appendStringInfoString(buf, " ON "); if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); get_rule_expr(j->quals, context, false); if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); } else if (j->jointype != JOIN_INNER) { /* If we didn't say CROSS JOIN above, we must provide an ON */ appendStringInfoString(buf, " ON TRUE"); } if (!PRETTY_PAREN(context) || j->alias != NULL) appendStringInfoChar(buf, ')'); /* Yes, it's correct to put alias after the right paren ... */ if (j->alias != NULL) { appendStringInfo(buf, " %s", quote_identifier(j->alias->aliasname)); get_column_alias_list(colinfo, context); } } else elog(ERROR, "unrecognized node type: %d", (int) nodeTag(jtnode)); } /* * get_column_alias_list - print column alias list for an RTE * * Caller must already have printed the relation's alias name. */ static void get_column_alias_list(deparse_columns *colinfo, deparse_context *context) { StringInfo buf = context->buf; int i; bool first = true; /* Don't print aliases if not needed */ if (!colinfo->printaliases) return; for (i = 0; i < colinfo->num_new_cols; i++) { char *colname = colinfo->new_colnames[i]; if (first) { appendStringInfoChar(buf, '('); first = false; } else appendStringInfoString(buf, ", "); appendStringInfoString(buf, quote_identifier(colname)); } if (!first) appendStringInfoChar(buf, ')'); } /* * get_from_clause_coldeflist - reproduce FROM clause coldeflist * * When printing a top-level coldeflist (which is syntactically also the * relation's column alias list), use column names from colinfo. But when * printing a coldeflist embedded inside ROWS FROM(), we prefer to use the * original coldeflist's names, which are available in rtfunc->funccolnames. * Pass NULL for colinfo to select the latter behavior. * * The coldeflist is appended immediately (no space) to buf. Caller is * responsible for ensuring that an alias or AS is present before it. */ static void get_from_clause_coldeflist(RangeTblFunction *rtfunc, deparse_columns *colinfo, deparse_context *context) { StringInfo buf = context->buf; ListCell *l1; ListCell *l2; ListCell *l3; ListCell *l4; int i; appendStringInfoChar(buf, '('); /* there's no forfour(), so must chase one list the hard way */ i = 0; l4 = list_head(rtfunc->funccolnames); forthree(l1, rtfunc->funccoltypes, l2, rtfunc->funccoltypmods, l3, rtfunc->funccolcollations) { Oid atttypid = lfirst_oid(l1); int32 atttypmod = lfirst_int(l2); Oid attcollation = lfirst_oid(l3); char *attname; if (colinfo) attname = colinfo->colnames[i]; else attname = strVal(lfirst(l4)); Assert(attname); /* shouldn't be any dropped columns here */ if (i > 0) appendStringInfoString(buf, ", "); appendStringInfo(buf, "%s %s", quote_identifier(attname), format_type_with_typemod(atttypid, atttypmod)); if (OidIsValid(attcollation) && attcollation != get_typcollation(atttypid)) appendStringInfo(buf, " COLLATE %s", generate_collation_name(attcollation)); l4 = lnext(l4); i++; } appendStringInfoChar(buf, ')'); } /* * get_tablesample_def - print a TableSampleClause */ static void get_tablesample_def(TableSampleClause *tablesample, deparse_context *context) { StringInfo buf = context->buf; Oid argtypes[1]; int nargs; ListCell *l; /* * We should qualify the handler's function name if it wouldn't be * resolved by lookup in the current search path. */ argtypes[0] = INTERNALOID; appendStringInfo(buf, " TABLESAMPLE %s (", generate_function_name(tablesample->tsmhandler, 1, NIL, argtypes, false, NULL, EXPR_KIND_NONE)); nargs = 0; foreach(l, tablesample->args) { if (nargs++ > 0) appendStringInfoString(buf, ", "); get_rule_expr((Node *) lfirst(l), context, false); } appendStringInfoChar(buf, ')'); if (tablesample->repeatable != NULL) { appendStringInfoString(buf, " REPEATABLE ("); get_rule_expr((Node *) tablesample->repeatable, context, false); appendStringInfoChar(buf, ')'); } } /* * get_opclass_name - fetch name of an index operator class * * The opclass name is appended (after a space) to buf. * * Output is suppressed if the opclass is the default for the given * actual_datatype. (If you don't want this behavior, just pass * InvalidOid for actual_datatype.) */ static void get_opclass_name(Oid opclass, Oid actual_datatype, StringInfo buf) { HeapTuple ht_opc; Form_pg_opclass opcrec; char *opcname; char *nspname; ht_opc = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass)); if (!HeapTupleIsValid(ht_opc)) elog(ERROR, "cache lookup failed for opclass %u", opclass); opcrec = (Form_pg_opclass) GETSTRUCT(ht_opc); if (!OidIsValid(actual_datatype) || GetDefaultOpClass(actual_datatype, opcrec->opcmethod) != opclass) { /* Okay, we need the opclass name. Do we need to qualify it? */ opcname = NameStr(opcrec->opcname); if (OpclassIsVisible(opclass)) appendStringInfo(buf, " %s", quote_identifier(opcname)); else { nspname = get_namespace_name(opcrec->opcnamespace); appendStringInfo(buf, " %s.%s", quote_identifier(nspname), quote_identifier(opcname)); } } ReleaseSysCache(ht_opc); } /* * processIndirection - take care of array and subfield assignment * * We strip any top-level FieldStore or assignment ArrayRef nodes that * appear in the input, printing them as decoration for the base column * name (which we assume the caller just printed). We might also need to * strip CoerceToDomain nodes, but only ones that appear above assignment * nodes. * * Returns the subexpression that's to be assigned. */ static Node * processIndirection(Node *node, deparse_context *context) { StringInfo buf = context->buf; CoerceToDomain *cdomain = NULL; for (;;) { if (node == NULL) break; if (IsA(node, FieldStore)) { FieldStore *fstore = (FieldStore *) node; Oid typrelid; char *fieldname; /* lookup tuple type */ typrelid = get_typ_typrelid(fstore->resulttype); if (!OidIsValid(typrelid)) elog(ERROR, "argument type %s of FieldStore is not a tuple type", format_type_be(fstore->resulttype)); /* * Print the field name. There should only be one target field in * stored rules. There could be more than that in executable * target lists, but this function cannot be used for that case. */ Assert(list_length(fstore->fieldnums) == 1); fieldname = get_relid_attribute_name(typrelid, linitial_int(fstore->fieldnums)); appendStringInfo(buf, ".%s", quote_identifier(fieldname)); /* * We ignore arg since it should be an uninteresting reference to * the target column or subcolumn. */ node = (Node *) linitial(fstore->newvals); } else if (IsA(node, ArrayRef)) { ArrayRef *aref = (ArrayRef *) node; if (aref->refassgnexpr == NULL) break; printSubscripts(aref, context); /* * We ignore refexpr since it should be an uninteresting reference * to the target column or subcolumn. */ node = (Node *) aref->refassgnexpr; } else if (IsA(node, CoerceToDomain)) { cdomain = (CoerceToDomain *) node; /* If it's an explicit domain coercion, we're done */ if (cdomain->coercionformat != COERCE_IMPLICIT_CAST) break; /* Tentatively descend past the CoerceToDomain */ node = (Node *) cdomain->arg; } else break; } /* * If we descended past a CoerceToDomain whose argument turned out not to * be a FieldStore or array assignment, back up to the CoerceToDomain. * (This is not enough to be fully correct if there are nested implicit * CoerceToDomains, but such cases shouldn't ever occur.) */ if (cdomain && node == (Node *) cdomain->arg) node = (Node *) cdomain; return node; } static void printSubscripts(ArrayRef *aref, deparse_context *context) { StringInfo buf = context->buf; ListCell *lowlist_item; ListCell *uplist_item; lowlist_item = list_head(aref->reflowerindexpr); /* could be NULL */ foreach(uplist_item, aref->refupperindexpr) { appendStringInfoChar(buf, '['); if (lowlist_item) { /* If subexpression is NULL, get_rule_expr prints nothing */ get_rule_expr((Node *) lfirst(lowlist_item), context, false); appendStringInfoChar(buf, ':'); lowlist_item = lnext(lowlist_item); } /* If subexpression is NULL, get_rule_expr prints nothing */ get_rule_expr((Node *) lfirst(uplist_item), context, false); appendStringInfoChar(buf, ']'); } } /* * get_relation_name * Get the unqualified name of a relation specified by OID * * This differs from the underlying get_rel_name() function in that it will * throw error instead of silently returning NULL if the OID is bad. */ static char * get_relation_name(Oid relid) { char *relname = get_rel_name(relid); if (!relname) elog(ERROR, "cache lookup failed for relation %u", relid); return relname; } /* * generate_relation_or_shard_name * Compute the name to display for a relation or shard * * If the provided relid is equal to the provided distrelid, this function * returns a shard-extended relation name; otherwise, it falls through to a * simple generate_relation_name call. */ static char * generate_relation_or_shard_name(Oid relid, Oid distrelid, int64 shardid, List *namespaces) { char *relname = NULL; if (relid == distrelid) { relname = get_relation_name(relid); if (shardid > 0) { Oid schemaOid = get_rel_namespace(relid); char *schemaName = get_namespace_name(schemaOid); AppendShardIdToName(&relname, shardid); relname = quote_qualified_identifier(schemaName, relname); } } else { relname = generate_relation_name(relid, namespaces); } return relname; } /* * generate_relation_name * Compute the name to display for a relation specified by OID * * The result includes all necessary quoting and schema-prefixing. * * If namespaces isn't NIL, it must be a list of deparse_namespace nodes. * We will forcibly qualify the relation name if it equals any CTE name * visible in the namespace list. */ char * generate_relation_name(Oid relid, List *namespaces) { HeapTuple tp; Form_pg_class reltup; bool need_qual; ListCell *nslist; char *relname; char *nspname; char *result; tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for relation %u", relid); reltup = (Form_pg_class) GETSTRUCT(tp); relname = NameStr(reltup->relname); /* Check for conflicting CTE name */ need_qual = false; foreach(nslist, namespaces) { deparse_namespace *dpns = (deparse_namespace *) lfirst(nslist); ListCell *ctlist; foreach(ctlist, dpns->ctes) { CommonTableExpr *cte = (CommonTableExpr *) lfirst(ctlist); if (strcmp(cte->ctename, relname) == 0) { need_qual = true; break; } } if (need_qual) break; } /* Otherwise, qualify the name if not visible in search path */ if (!need_qual) need_qual = !RelationIsVisible(relid); if (need_qual) nspname = get_namespace_name(reltup->relnamespace); else nspname = NULL; result = quote_qualified_identifier(nspname, relname); ReleaseSysCache(tp); return result; } /* * generate_fragment_name * Compute the name to display for a shard or merged table * * The result includes all necessary quoting and schema-prefixing. The schema * name can be NULL for regular shards. For merged tables, they are always * declared within a job-specific schema, and therefore can't have null schema * names. */ static char * generate_fragment_name(char *schemaName, char *tableName) { StringInfo fragmentNameString = makeStringInfo(); if (schemaName != NULL) { appendStringInfo(fragmentNameString, "%s.%s", quote_identifier(schemaName), quote_identifier(tableName)); } else { appendStringInfoString(fragmentNameString, quote_identifier(tableName)); } return fragmentNameString->data; } /* * generate_function_name * Compute the name to display for a function specified by OID, * given that it is being called with the specified actual arg names and * types. (Those matter because of ambiguous-function resolution rules.) * * If we're dealing with a potentially variadic function (in practice, this * means a FuncExpr or Aggref, not some other way of calling a function), then * has_variadic must specify whether variadic arguments have been merged, * and *use_variadic_p will be set to indicate whether to print VARIADIC in * the output. For non-FuncExpr cases, has_variadic should be FALSE and * use_variadic_p can be NULL. * * The result includes all necessary quoting and schema-prefixing. */ static char * generate_function_name(Oid funcid, int nargs, List *argnames, Oid *argtypes, bool has_variadic, bool *use_variadic_p, ParseExprKind special_exprkind) { char *result; HeapTuple proctup; Form_pg_proc procform; char *proname; bool use_variadic; char *nspname; FuncDetailCode p_result; Oid p_funcid; Oid p_rettype; bool p_retset; int p_nvargs; Oid p_vatype; Oid *p_true_typeids; bool force_qualify = false; proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (!HeapTupleIsValid(proctup)) elog(ERROR, "cache lookup failed for function %u", funcid); procform = (Form_pg_proc) GETSTRUCT(proctup); proname = NameStr(procform->proname); /* * Due to parser hacks to avoid needing to reserve CUBE, we need to force * qualification in some special cases. */ if (special_exprkind == EXPR_KIND_GROUP_BY) { if (strcmp(proname, "cube") == 0 || strcmp(proname, "rollup") == 0) force_qualify = true; } /* * Determine whether VARIADIC should be printed. We must do this first * since it affects the lookup rules in func_get_detail(). * * Currently, we always print VARIADIC if the function has a merged * variadic-array argument. Note that this is always the case for * functions taking a VARIADIC argument type other than VARIADIC ANY. * * In principle, if VARIADIC wasn't originally specified and the array * actual argument is deconstructable, we could print the array elements * separately and not print VARIADIC, thus more nearly reproducing the * original input. For the moment that seems like too much complication * for the benefit, and anyway we do not know whether VARIADIC was * originally specified if it's a non-ANY type. */ if (use_variadic_p) { /* Parser should not have set funcvariadic unless fn is variadic */ Assert(!has_variadic || OidIsValid(procform->provariadic)); use_variadic = has_variadic; *use_variadic_p = use_variadic; } else { Assert(!has_variadic); use_variadic = false; } /* * The idea here is to schema-qualify only if the parser would fail to * resolve the correct function given the unqualified func name with the * specified argtypes and VARIADIC flag. But if we already decided to * force qualification, then we can skip the lookup and pretend we didn't * find it. */ if (!force_qualify) p_result = func_get_detail(list_make1(makeString(proname)), NIL, argnames, nargs, argtypes, !use_variadic, true, &p_funcid, &p_rettype, &p_retset, &p_nvargs, &p_vatype, &p_true_typeids, NULL); else { p_result = FUNCDETAIL_NOTFOUND; p_funcid = InvalidOid; } if ((p_result == FUNCDETAIL_NORMAL || p_result == FUNCDETAIL_AGGREGATE || p_result == FUNCDETAIL_WINDOWFUNC) && p_funcid == funcid) nspname = NULL; else nspname = get_namespace_name(procform->pronamespace); result = quote_qualified_identifier(nspname, proname); ReleaseSysCache(proctup); return result; } /* * generate_operator_name * Compute the name to display for an operator specified by OID, * given that it is being called with the specified actual arg types. * (Arg types matter because of ambiguous-operator resolution rules. * Pass InvalidOid for unused arg of a unary operator.) * * The result includes all necessary quoting and schema-prefixing, * plus the OPERATOR() decoration needed to use a qualified operator name * in an expression. */ static char * generate_operator_name(Oid operid, Oid arg1, Oid arg2) { StringInfoData buf; HeapTuple opertup; Form_pg_operator operform; char *oprname; char *nspname; Operator p_result; initStringInfo(&buf); opertup = SearchSysCache1(OPEROID, ObjectIdGetDatum(operid)); if (!HeapTupleIsValid(opertup)) elog(ERROR, "cache lookup failed for operator %u", operid); operform = (Form_pg_operator) GETSTRUCT(opertup); oprname = NameStr(operform->oprname); /* * The idea here is to schema-qualify only if the parser would fail to * resolve the correct operator given the unqualified op name with the * specified argtypes. */ switch (operform->oprkind) { case 'b': p_result = oper(NULL, list_make1(makeString(oprname)), arg1, arg2, true, -1); break; case 'l': p_result = left_oper(NULL, list_make1(makeString(oprname)), arg2, true, -1); break; case 'r': p_result = right_oper(NULL, list_make1(makeString(oprname)), arg1, true, -1); break; default: elog(ERROR, "unrecognized oprkind: %d", operform->oprkind); p_result = NULL; /* keep compiler quiet */ break; } if (p_result != NULL && oprid(p_result) == operid) nspname = NULL; else { nspname = get_namespace_name(operform->oprnamespace); appendStringInfo(&buf, "OPERATOR(%s.", quote_identifier(nspname)); } appendStringInfoString(&buf, oprname); if (nspname) appendStringInfoChar(&buf, ')'); if (p_result != NULL) ReleaseSysCache(p_result); ReleaseSysCache(opertup); return buf.data; } /* * get_one_range_partition_bound_string * A C string representation of one range partition bound */ char * get_range_partbound_string(List *bound_datums) { deparse_context context; StringInfo buf = makeStringInfo(); ListCell *cell; char *sep; memset(&context, 0, sizeof(deparse_context)); context.buf = buf; appendStringInfoString(buf, "("); sep = ""; foreach(cell, bound_datums) { PartitionRangeDatum *datum = castNode(PartitionRangeDatum, lfirst(cell)); appendStringInfoString(buf, sep); if (datum->kind == PARTITION_RANGE_DATUM_MINVALUE) appendStringInfoString(buf, "MINVALUE"); else if (datum->kind == PARTITION_RANGE_DATUM_MAXVALUE) appendStringInfoString(buf, "MAXVALUE"); else { Const *val = castNode(Const, datum->value); get_const_expr(val, &context, -1); } sep = ", "; } appendStringInfoString(buf, ")"); return buf->data; } #endif /* (PG_VERSION_NUM >= 100000) */ citus-7.0.3/src/backend/distributed/utils/ruleutils_96.c000066400000000000000000006631701317107136600232440ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * ruleutils_96.c * Additional, non core exposed, functions to convert stored * expressions/querytrees back to source text * * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION * src/backend/distributed/utils/ruleutils_96.c * * This needs to be closely in sync with the core code. *------------------------------------------------------------------------- */ #include "postgres.h" #if (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM < 90700) #include #include #include #include "access/amapi.h" #include "access/htup_details.h" #include "access/sysattr.h" #include "catalog/dependency.h" #include "catalog/indexing.h" #include "catalog/pg_aggregate.h" #include "catalog/pg_am.h" #include "catalog/pg_authid.h" #include "catalog/pg_collation.h" #include "catalog/pg_constraint.h" #include "catalog/pg_depend.h" #include "catalog/pg_extension.h" #include "catalog/pg_foreign_data_wrapper.h" #include "catalog/pg_language.h" #include "catalog/pg_opclass.h" #include "catalog/pg_operator.h" #include "catalog/pg_proc.h" #include "catalog/pg_trigger.h" #include "catalog/pg_type.h" #include "commands/defrem.h" #include "commands/extension.h" #include "commands/tablespace.h" #include "common/keywords.h" #include "distributed/citus_nodefuncs.h" #include "distributed/citus_ruleutils.h" #include "executor/spi.h" #include "foreign/foreign.h" #include "funcapi.h" #include "mb/pg_wchar.h" #include "miscadmin.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "optimizer/tlist.h" #include "parser/parse_agg.h" #include "parser/parse_func.h" #include "parser/parse_node.h" #include "parser/parse_oper.h" #include "parser/parser.h" #include "parser/parsetree.h" #include "rewrite/rewriteHandler.h" #include "rewrite/rewriteManip.h" #include "rewrite/rewriteSupport.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/hsearch.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/ruleutils.h" #include "utils/snapmgr.h" #include "utils/syscache.h" #include "utils/tqual.h" #include "utils/typcache.h" #include "utils/xml.h" /* ---------- * Pretty formatting constants * ---------- */ /* Indent counts */ #define PRETTYINDENT_STD 8 #define PRETTYINDENT_JOIN 4 #define PRETTYINDENT_VAR 4 #define PRETTYINDENT_LIMIT 40 /* wrap limit */ /* Pretty flags */ #define PRETTYFLAG_PAREN 1 #define PRETTYFLAG_INDENT 2 /* Default line length for pretty-print wrapping: 0 means wrap always */ #define WRAP_COLUMN_DEFAULT 0 /* macro to test if pretty action needed */ #define PRETTY_PAREN(context) ((context)->prettyFlags & PRETTYFLAG_PAREN) #define PRETTY_INDENT(context) ((context)->prettyFlags & PRETTYFLAG_INDENT) /* ---------- * Local data types * ---------- */ /* Context info needed for invoking a recursive querytree display routine */ typedef struct { StringInfo buf; /* output buffer to append to */ List *namespaces; /* List of deparse_namespace nodes */ List *windowClause; /* Current query level's WINDOW clause */ List *windowTList; /* targetlist for resolving WINDOW clause */ int prettyFlags; /* enabling of pretty-print functions */ int wrapColumn; /* max line length, or -1 for no limit */ int indentLevel; /* current indent level for prettyprint */ bool varprefix; /* TRUE to print prefixes on Vars */ Oid distrelid; /* the distributed table being modified, if valid */ int64 shardid; /* a distributed table's shardid, if positive */ ParseExprKind special_exprkind; /* set only for exprkinds needing * special handling */ } deparse_context; /* * Each level of query context around a subtree needs a level of Var namespace. * A Var having varlevelsup=N refers to the N'th item (counting from 0) in * the current context's namespaces list. * * The rangetable is the list of actual RTEs from the query tree, and the * cte list is the list of actual CTEs. * * rtable_names holds the alias name to be used for each RTE (either a C * string, or NULL for nameless RTEs such as unnamed joins). * rtable_columns holds the column alias names to be used for each RTE. * * In some cases we need to make names of merged JOIN USING columns unique * across the whole query, not only per-RTE. If so, unique_using is TRUE * and using_names is a list of C strings representing names already assigned * to USING columns. * * When deparsing plan trees, there is always just a single item in the * deparse_namespace list (since a plan tree never contains Vars with * varlevelsup > 0). We store the PlanState node that is the immediate * parent of the expression to be deparsed, as well as a list of that * PlanState's ancestors. In addition, we store its outer and inner subplan * state nodes, as well as their plan nodes' targetlists, and the index tlist * if the current plan node might contain INDEX_VAR Vars. (These fields could * be derived on-the-fly from the current PlanState, but it seems notationally * clearer to set them up as separate fields.) */ typedef struct { List *rtable; /* List of RangeTblEntry nodes */ List *rtable_names; /* Parallel list of names for RTEs */ List *rtable_columns; /* Parallel list of deparse_columns structs */ List *ctes; /* List of CommonTableExpr nodes */ /* Workspace for column alias assignment: */ bool unique_using; /* Are we making USING names globally unique */ List *using_names; /* List of assigned names for USING columns */ /* Remaining fields are used only when deparsing a Plan tree: */ PlanState *planstate; /* immediate parent of current expression */ List *ancestors; /* ancestors of planstate */ PlanState *outer_planstate; /* outer subplan state, or NULL if none */ PlanState *inner_planstate; /* inner subplan state, or NULL if none */ List *outer_tlist; /* referent for OUTER_VAR Vars */ List *inner_tlist; /* referent for INNER_VAR Vars */ List *index_tlist; /* referent for INDEX_VAR Vars */ } deparse_namespace; /* * Per-relation data about column alias names. * * Selecting aliases is unreasonably complicated because of the need to dump * rules/views whose underlying tables may have had columns added, deleted, or * renamed since the query was parsed. We must nonetheless print the rule/view * in a form that can be reloaded and will produce the same results as before. * * For each RTE used in the query, we must assign column aliases that are * unique within that RTE. SQL does not require this of the original query, * but due to factors such as *-expansion we need to be able to uniquely * reference every column in a decompiled query. As long as we qualify all * column references, per-RTE uniqueness is sufficient for that. * * However, we can't ensure per-column name uniqueness for unnamed join RTEs, * since they just inherit column names from their input RTEs, and we can't * rename the columns at the join level. Most of the time this isn't an issue * because we don't need to reference the join's output columns as such; we * can reference the input columns instead. That approach can fail for merged * JOIN USING columns, however, so when we have one of those in an unnamed * join, we have to make that column's alias globally unique across the whole * query to ensure it can be referenced unambiguously. * * Another problem is that a JOIN USING clause requires the columns to be * merged to have the same aliases in both input RTEs, and that no other * columns in those RTEs or their children conflict with the USING names. * To handle that, we do USING-column alias assignment in a recursive * traversal of the query's jointree. When descending through a JOIN with * USING, we preassign the USING column names to the child columns, overriding * other rules for column alias assignment. We also mark each RTE with a list * of all USING column names selected for joins containing that RTE, so that * when we assign other columns' aliases later, we can avoid conflicts. * * Another problem is that if a JOIN's input tables have had columns added or * deleted since the query was parsed, we must generate a column alias list * for the join that matches the current set of input columns --- otherwise, a * change in the number of columns in the left input would throw off matching * of aliases to columns of the right input. Thus, positions in the printable * column alias list are not necessarily one-for-one with varattnos of the * JOIN, so we need a separate new_colnames[] array for printing purposes. */ typedef struct { /* * colnames is an array containing column aliases to use for columns that * existed when the query was parsed. Dropped columns have NULL entries. * This array can be directly indexed by varattno to get a Var's name. * * Non-NULL entries are guaranteed unique within the RTE, *except* when * this is for an unnamed JOIN RTE. In that case we merely copy up names * from the two input RTEs. * * During the recursive descent in set_using_names(), forcible assignment * of a child RTE's column name is represented by pre-setting that element * of the child's colnames array. So at that stage, NULL entries in this * array just mean that no name has been preassigned, not necessarily that * the column is dropped. */ int num_cols; /* length of colnames[] array */ char **colnames; /* array of C strings and NULLs */ /* * new_colnames is an array containing column aliases to use for columns * that would exist if the query was re-parsed against the current * definitions of its base tables. This is what to print as the column * alias list for the RTE. This array does not include dropped columns, * but it will include columns added since original parsing. Indexes in * it therefore have little to do with current varattno values. As above, * entries are unique unless this is for an unnamed JOIN RTE. (In such an * RTE, we never actually print this array, but we must compute it anyway * for possible use in computing column names of upper joins.) The * parallel array is_new_col marks which of these columns are new since * original parsing. Entries with is_new_col false must match the * non-NULL colnames entries one-for-one. */ int num_new_cols; /* length of new_colnames[] array */ char **new_colnames; /* array of C strings */ bool *is_new_col; /* array of bool flags */ /* This flag tells whether we should actually print a column alias list */ bool printaliases; /* This list has all names used as USING names in joins above this RTE */ List *parentUsing; /* names assigned to parent merged columns */ /* * If this struct is for a JOIN RTE, we fill these fields during the * set_using_names() pass to describe its relationship to its child RTEs. * * leftattnos and rightattnos are arrays with one entry per existing * output column of the join (hence, indexable by join varattno). For a * simple reference to a column of the left child, leftattnos[i] is the * child RTE's attno and rightattnos[i] is zero; and conversely for a * column of the right child. But for merged columns produced by JOIN * USING/NATURAL JOIN, both leftattnos[i] and rightattnos[i] are nonzero. * Also, if the column has been dropped, both are zero. * * If it's a JOIN USING, usingNames holds the alias names selected for the * merged columns (these might be different from the original USING list, * if we had to modify names to achieve uniqueness). */ int leftrti; /* rangetable index of left child */ int rightrti; /* rangetable index of right child */ int *leftattnos; /* left-child varattnos of join cols, or 0 */ int *rightattnos; /* right-child varattnos of join cols, or 0 */ List *usingNames; /* names assigned to merged columns */ } deparse_columns; /* This macro is analogous to rt_fetch(), but for deparse_columns structs */ #define deparse_columns_fetch(rangetable_index, dpns) \ ((deparse_columns *) list_nth((dpns)->rtable_columns, (rangetable_index)-1)) /* * Entry in set_rtable_names' hash table */ typedef struct { char name[NAMEDATALEN]; /* Hash key --- must be first */ int counter; /* Largest addition used so far for name */ } NameHashEntry; /* ---------- * Local functions * * Most of these functions used to use fixed-size buffers to build their * results. Now, they take an (already initialized) StringInfo object * as a parameter, and append their text output to its contents. * ---------- */ static void set_rtable_names(deparse_namespace *dpns, List *parent_namespaces, Bitmapset *rels_used); static void set_deparse_for_query(deparse_namespace *dpns, Query *query, List *parent_namespaces); static bool has_dangerous_join_using(deparse_namespace *dpns, Node *jtnode); static void set_using_names(deparse_namespace *dpns, Node *jtnode, List *parentUsing); static void set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte, deparse_columns *colinfo); static void set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte, deparse_columns *colinfo); static bool colname_is_unique(char *colname, deparse_namespace *dpns, deparse_columns *colinfo); static char *make_colname_unique(char *colname, deparse_namespace *dpns, deparse_columns *colinfo); static void expand_colnames_array_to(deparse_columns *colinfo, int n); static void identify_join_columns(JoinExpr *j, RangeTblEntry *jrte, deparse_columns *colinfo); static void flatten_join_using_qual(Node *qual, List **leftvars, List **rightvars); static char *get_rtable_name(int rtindex, deparse_context *context); static void set_deparse_planstate(deparse_namespace *dpns, PlanState *ps); static void push_child_plan(deparse_namespace *dpns, PlanState *ps, deparse_namespace *save_dpns); static void pop_child_plan(deparse_namespace *dpns, deparse_namespace *save_dpns); static void push_ancestor_plan(deparse_namespace *dpns, ListCell *ancestor_cell, deparse_namespace *save_dpns); static void pop_ancestor_plan(deparse_namespace *dpns, deparse_namespace *save_dpns); static void get_query_def(Query *query, StringInfo buf, List *parentnamespace, TupleDesc resultDesc, int prettyFlags, int wrapColumn, int startIndent); static void get_query_def_extended(Query *query, StringInfo buf, List *parentnamespace, Oid distrelid, int64 shardid, TupleDesc resultDesc, int prettyFlags, int wrapColumn, int startIndent); static void get_values_def(List *values_lists, deparse_context *context); static void get_with_clause(Query *query, deparse_context *context); static void get_select_query_def(Query *query, deparse_context *context, TupleDesc resultDesc); static void get_insert_query_def(Query *query, deparse_context *context); static void get_update_query_def(Query *query, deparse_context *context); static void get_update_query_targetlist_def(Query *query, List *targetList, deparse_context *context, RangeTblEntry *rte); static void get_delete_query_def(Query *query, deparse_context *context); static void get_utility_query_def(Query *query, deparse_context *context); static void get_basic_select_query(Query *query, deparse_context *context, TupleDesc resultDesc); static void get_target_list(List *targetList, deparse_context *context, TupleDesc resultDesc); static void get_setop_query(Node *setOp, Query *query, deparse_context *context, TupleDesc resultDesc); static Node *get_rule_sortgroupclause(Index ref, List *tlist, bool force_colno, deparse_context *context); static void get_rule_groupingset(GroupingSet *gset, List *targetlist, bool omit_parens, deparse_context *context); static void get_rule_orderby(List *orderList, List *targetList, bool force_colno, deparse_context *context); static void get_rule_windowclause(Query *query, deparse_context *context); static void get_rule_windowspec(WindowClause *wc, List *targetList, deparse_context *context); static char *get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context); static void get_special_variable(Node *node, deparse_context *context, void *private); static void resolve_special_varno(Node *node, deparse_context *context, void *private, void (*callback) (Node *, deparse_context *, void *)); static Node *find_param_referent(Param *param, deparse_context *context, deparse_namespace **dpns_p, ListCell **ancestor_cell_p); static void get_parameter(Param *param, deparse_context *context); static const char *get_simple_binary_op_name(OpExpr *expr); static bool isSimpleNode(Node *node, Node *parentNode, int prettyFlags); static void appendContextKeyword(deparse_context *context, const char *str, int indentBefore, int indentAfter, int indentPlus); static void removeStringInfoSpaces(StringInfo str); static void get_rule_expr(Node *node, deparse_context *context, bool showimplicit); static void get_rule_expr_toplevel(Node *node, deparse_context *context, bool showimplicit); static void get_rule_expr_funccall(Node *node, deparse_context *context, bool showimplicit); static bool looks_like_function(Node *node); static void get_oper_expr(OpExpr *expr, deparse_context *context); static void get_func_expr(FuncExpr *expr, deparse_context *context, bool showimplicit); static void get_agg_expr(Aggref *aggref, deparse_context *context, Aggref *original_aggref); static void get_agg_combine_expr(Node *node, deparse_context *context, void *private); static void get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context); static void get_coercion_expr(Node *arg, deparse_context *context, Oid resulttype, int32 resulttypmod, Node *parentNode); static void get_const_expr(Const *constval, deparse_context *context, int showtype); static void get_const_collation(Const *constval, deparse_context *context); static void simple_quote_literal(StringInfo buf, const char *val); static void get_sublink_expr(SubLink *sublink, deparse_context *context); static void get_from_clause(Query *query, const char *prefix, deparse_context *context); static void get_from_clause_item(Node *jtnode, Query *query, deparse_context *context); static void get_column_alias_list(deparse_columns *colinfo, deparse_context *context); static void get_from_clause_coldeflist(RangeTblFunction *rtfunc, deparse_columns *colinfo, deparse_context *context); static void get_tablesample_def(TableSampleClause *tablesample, deparse_context *context); static void get_opclass_name(Oid opclass, Oid actual_datatype, StringInfo buf); static Node *processIndirection(Node *node, deparse_context *context); static void printSubscripts(ArrayRef *aref, deparse_context *context); static char *get_relation_name(Oid relid); static char *generate_relation_or_shard_name(Oid relid, Oid distrelid, int64 shardid, List *namespaces); static char *generate_fragment_name(char *schemaName, char *tableName); static char *generate_function_name(Oid funcid, int nargs, List *argnames, Oid *argtypes, bool has_variadic, bool *use_variadic_p, ParseExprKind special_exprkind); static char *generate_operator_name(Oid operid, Oid arg1, Oid arg2); #define only_marker(rte) ((rte)->inh ? "" : "ONLY ") /* * pg_get_query_def parses back one query tree, and outputs the resulting query * string into given buffer. */ void pg_get_query_def(Query *query, StringInfo buffer) { get_query_def(query, buffer, NIL, NULL, 0, WRAP_COLUMN_DEFAULT, 0); } /* * set_rtable_names: select RTE aliases to be used in printing a query * * We fill in dpns->rtable_names with a list of names that is one-for-one with * the already-filled dpns->rtable list. Each RTE name is unique among those * in the new namespace plus any ancestor namespaces listed in * parent_namespaces. * * If rels_used isn't NULL, only RTE indexes listed in it are given aliases. * * Note that this function is only concerned with relation names, not column * names. */ static void set_rtable_names(deparse_namespace *dpns, List *parent_namespaces, Bitmapset *rels_used) { HASHCTL hash_ctl; HTAB *names_hash; NameHashEntry *hentry; bool found; int rtindex; ListCell *lc; dpns->rtable_names = NIL; /* nothing more to do if empty rtable */ if (dpns->rtable == NIL) return; /* * We use a hash table to hold known names, so that this process is O(N) * not O(N^2) for N names. */ MemSet(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = NAMEDATALEN; hash_ctl.entrysize = sizeof(NameHashEntry); hash_ctl.hcxt = CurrentMemoryContext; names_hash = hash_create("set_rtable_names names", list_length(dpns->rtable), &hash_ctl, HASH_ELEM | HASH_CONTEXT); /* Preload the hash table with names appearing in parent_namespaces */ foreach(lc, parent_namespaces) { deparse_namespace *olddpns = (deparse_namespace *) lfirst(lc); ListCell *lc2; foreach(lc2, olddpns->rtable_names) { char *oldname = (char *) lfirst(lc2); if (oldname == NULL) continue; hentry = (NameHashEntry *) hash_search(names_hash, oldname, HASH_ENTER, &found); /* we do not complain about duplicate names in parent namespaces */ hentry->counter = 0; } } /* Now we can scan the rtable */ rtindex = 1; foreach(lc, dpns->rtable) { RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); char *refname; /* Just in case this takes an unreasonable amount of time ... */ CHECK_FOR_INTERRUPTS(); if (rels_used && !bms_is_member(rtindex, rels_used)) { /* Ignore unreferenced RTE */ refname = NULL; } else if (rte->alias) { /* If RTE has a user-defined alias, prefer that */ refname = rte->alias->aliasname; } else if (rte->rtekind == RTE_RELATION) { /* Use the current actual name of the relation */ refname = get_rel_name(rte->relid); } else if (rte->rtekind == RTE_JOIN) { /* Unnamed join has no refname */ refname = NULL; } else { /* Otherwise use whatever the parser assigned */ refname = rte->eref->aliasname; } /* * If the selected name isn't unique, append digits to make it so, and * make a new hash entry for it once we've got a unique name. For a * very long input name, we might have to truncate to stay within * NAMEDATALEN. */ if (refname) { hentry = (NameHashEntry *) hash_search(names_hash, refname, HASH_ENTER, &found); if (found) { /* Name already in use, must choose a new one */ int refnamelen = strlen(refname); char *modname = (char *) palloc(refnamelen + 16); NameHashEntry *hentry2; do { hentry->counter++; for (;;) { /* * We avoid using %.*s here because it can misbehave * if the data is not valid in what libc thinks is the * prevailing encoding. */ memcpy(modname, refname, refnamelen); sprintf(modname + refnamelen, "_%d", hentry->counter); if (strlen(modname) < NAMEDATALEN) break; /* drop chars from refname to keep all the digits */ refnamelen = pg_mbcliplen(refname, refnamelen, refnamelen - 1); } hentry2 = (NameHashEntry *) hash_search(names_hash, modname, HASH_ENTER, &found); } while (found); hentry2->counter = 0; /* init new hash entry */ refname = modname; } else { /* Name not previously used, need only initialize hentry */ hentry->counter = 0; } } dpns->rtable_names = lappend(dpns->rtable_names, refname); rtindex++; } hash_destroy(names_hash); } /* * set_deparse_for_query: set up deparse_namespace for deparsing a Query tree * * For convenience, this is defined to initialize the deparse_namespace struct * from scratch. */ static void set_deparse_for_query(deparse_namespace *dpns, Query *query, List *parent_namespaces) { ListCell *lc; ListCell *lc2; /* Initialize *dpns and fill rtable/ctes links */ memset(dpns, 0, sizeof(deparse_namespace)); dpns->rtable = query->rtable; dpns->ctes = query->cteList; /* Assign a unique relation alias to each RTE */ set_rtable_names(dpns, parent_namespaces, NULL); /* Initialize dpns->rtable_columns to contain zeroed structs */ dpns->rtable_columns = NIL; while (list_length(dpns->rtable_columns) < list_length(dpns->rtable)) dpns->rtable_columns = lappend(dpns->rtable_columns, palloc0(sizeof(deparse_columns))); /* If it's a utility query, it won't have a jointree */ if (query->jointree) { /* Detect whether global uniqueness of USING names is needed */ dpns->unique_using = has_dangerous_join_using(dpns, (Node *) query->jointree); /* * Select names for columns merged by USING, via a recursive pass over * the query jointree. */ set_using_names(dpns, (Node *) query->jointree, NIL); } /* * Now assign remaining column aliases for each RTE. We do this in a * linear scan of the rtable, so as to process RTEs whether or not they * are in the jointree (we mustn't miss NEW.*, INSERT target relations, * etc). JOIN RTEs must be processed after their children, but this is * okay because they appear later in the rtable list than their children * (cf Asserts in identify_join_columns()). */ forboth(lc, dpns->rtable, lc2, dpns->rtable_columns) { RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); deparse_columns *colinfo = (deparse_columns *) lfirst(lc2); if (rte->rtekind == RTE_JOIN) set_join_column_names(dpns, rte, colinfo); else set_relation_column_names(dpns, rte, colinfo); } } /* * has_dangerous_join_using: search jointree for unnamed JOIN USING * * Merged columns of a JOIN USING may act differently from either of the input * columns, either because they are merged with COALESCE (in a FULL JOIN) or * because an implicit coercion of the underlying input column is required. * In such a case the column must be referenced as a column of the JOIN not as * a column of either input. And this is problematic if the join is unnamed * (alias-less): we cannot qualify the column's name with an RTE name, since * there is none. (Forcibly assigning an alias to the join is not a solution, * since that will prevent legal references to tables below the join.) * To ensure that every column in the query is unambiguously referenceable, * we must assign such merged columns names that are globally unique across * the whole query, aliasing other columns out of the way as necessary. * * Because the ensuing re-aliasing is fairly damaging to the readability of * the query, we don't do this unless we have to. So, we must pre-scan * the join tree to see if we have to, before starting set_using_names(). */ static bool has_dangerous_join_using(deparse_namespace *dpns, Node *jtnode) { if (IsA(jtnode, RangeTblRef)) { /* nothing to do here */ } else if (IsA(jtnode, FromExpr)) { FromExpr *f = (FromExpr *) jtnode; ListCell *lc; foreach(lc, f->fromlist) { if (has_dangerous_join_using(dpns, (Node *) lfirst(lc))) return true; } } else if (IsA(jtnode, JoinExpr)) { JoinExpr *j = (JoinExpr *) jtnode; /* Is it an unnamed JOIN with USING? */ if (j->alias == NULL && j->usingClause) { /* * Yes, so check each join alias var to see if any of them are not * simple references to underlying columns. If so, we have a * dangerous situation and must pick unique aliases. */ RangeTblEntry *jrte = rt_fetch(j->rtindex, dpns->rtable); ListCell *lc; foreach(lc, jrte->joinaliasvars) { Var *aliasvar = (Var *) lfirst(lc); if (aliasvar != NULL && !IsA(aliasvar, Var)) return true; } } /* Nope, but inspect children */ if (has_dangerous_join_using(dpns, j->larg)) return true; if (has_dangerous_join_using(dpns, j->rarg)) return true; } else elog(ERROR, "unrecognized node type: %d", (int) nodeTag(jtnode)); return false; } /* * set_using_names: select column aliases to be used for merged USING columns * * We do this during a recursive descent of the query jointree. * dpns->unique_using must already be set to determine the global strategy. * * Column alias info is saved in the dpns->rtable_columns list, which is * assumed to be filled with pre-zeroed deparse_columns structs. * * parentUsing is a list of all USING aliases assigned in parent joins of * the current jointree node. (The passed-in list must not be modified.) */ static void set_using_names(deparse_namespace *dpns, Node *jtnode, List *parentUsing) { if (IsA(jtnode, RangeTblRef)) { /* nothing to do now */ } else if (IsA(jtnode, FromExpr)) { FromExpr *f = (FromExpr *) jtnode; ListCell *lc; foreach(lc, f->fromlist) set_using_names(dpns, (Node *) lfirst(lc), parentUsing); } else if (IsA(jtnode, JoinExpr)) { JoinExpr *j = (JoinExpr *) jtnode; RangeTblEntry *rte = rt_fetch(j->rtindex, dpns->rtable); deparse_columns *colinfo = deparse_columns_fetch(j->rtindex, dpns); int *leftattnos; int *rightattnos; deparse_columns *leftcolinfo; deparse_columns *rightcolinfo; int i; ListCell *lc; /* Get info about the shape of the join */ identify_join_columns(j, rte, colinfo); leftattnos = colinfo->leftattnos; rightattnos = colinfo->rightattnos; /* Look up the not-yet-filled-in child deparse_columns structs */ leftcolinfo = deparse_columns_fetch(colinfo->leftrti, dpns); rightcolinfo = deparse_columns_fetch(colinfo->rightrti, dpns); /* * If this join is unnamed, then we cannot substitute new aliases at * this level, so any name requirements pushed down to here must be * pushed down again to the children. */ if (rte->alias == NULL) { for (i = 0; i < colinfo->num_cols; i++) { char *colname = colinfo->colnames[i]; if (colname == NULL) continue; /* Push down to left column, unless it's a system column */ if (leftattnos[i] > 0) { expand_colnames_array_to(leftcolinfo, leftattnos[i]); leftcolinfo->colnames[leftattnos[i] - 1] = colname; } /* Same on the righthand side */ if (rightattnos[i] > 0) { expand_colnames_array_to(rightcolinfo, rightattnos[i]); rightcolinfo->colnames[rightattnos[i] - 1] = colname; } } } /* * If there's a USING clause, select the USING column names and push * those names down to the children. We have two strategies: * * If dpns->unique_using is TRUE, we force all USING names to be * unique across the whole query level. In principle we'd only need * the names of dangerous USING columns to be globally unique, but to * safely assign all USING names in a single pass, we have to enforce * the same uniqueness rule for all of them. However, if a USING * column's name has been pushed down from the parent, we should use * it as-is rather than making a uniqueness adjustment. This is * necessary when we're at an unnamed join, and it creates no risk of * ambiguity. Also, if there's a user-written output alias for a * merged column, we prefer to use that rather than the input name; * this simplifies the logic and seems likely to lead to less aliasing * overall. * * If dpns->unique_using is FALSE, we only need USING names to be * unique within their own join RTE. We still need to honor * pushed-down names, though. * * Though significantly different in results, these two strategies are * implemented by the same code, with only the difference of whether * to put assigned names into dpns->using_names. */ if (j->usingClause) { /* Copy the input parentUsing list so we don't modify it */ parentUsing = list_copy(parentUsing); /* USING names must correspond to the first join output columns */ expand_colnames_array_to(colinfo, list_length(j->usingClause)); i = 0; foreach(lc, j->usingClause) { char *colname = strVal(lfirst(lc)); /* Assert it's a merged column */ Assert(leftattnos[i] != 0 && rightattnos[i] != 0); /* Adopt passed-down name if any, else select unique name */ if (colinfo->colnames[i] != NULL) colname = colinfo->colnames[i]; else { /* Prefer user-written output alias if any */ if (rte->alias && i < list_length(rte->alias->colnames)) colname = strVal(list_nth(rte->alias->colnames, i)); /* Make it appropriately unique */ colname = make_colname_unique(colname, dpns, colinfo); if (dpns->unique_using) dpns->using_names = lappend(dpns->using_names, colname); /* Save it as output column name, too */ colinfo->colnames[i] = colname; } /* Remember selected names for use later */ colinfo->usingNames = lappend(colinfo->usingNames, colname); parentUsing = lappend(parentUsing, colname); /* Push down to left column, unless it's a system column */ if (leftattnos[i] > 0) { expand_colnames_array_to(leftcolinfo, leftattnos[i]); leftcolinfo->colnames[leftattnos[i] - 1] = colname; } /* Same on the righthand side */ if (rightattnos[i] > 0) { expand_colnames_array_to(rightcolinfo, rightattnos[i]); rightcolinfo->colnames[rightattnos[i] - 1] = colname; } i++; } } /* Mark child deparse_columns structs with correct parentUsing info */ leftcolinfo->parentUsing = parentUsing; rightcolinfo->parentUsing = parentUsing; /* Now recursively assign USING column names in children */ set_using_names(dpns, j->larg, parentUsing); set_using_names(dpns, j->rarg, parentUsing); } else elog(ERROR, "unrecognized node type: %d", (int) nodeTag(jtnode)); } /* * set_relation_column_names: select column aliases for a non-join RTE * * Column alias info is saved in *colinfo, which is assumed to be pre-zeroed. * If any colnames entries are already filled in, those override local * choices. */ static void set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte, deparse_columns *colinfo) { int ncolumns; char **real_colnames; bool changed_any; int noldcolumns; int i; int j; /* * Extract the RTE's "real" column names. This is comparable to * get_rte_attribute_name, except that it's important to disregard dropped * columns. We put NULL into the array for a dropped column. */ if (rte->rtekind == RTE_RELATION) { /* Relation --- look to the system catalogs for up-to-date info */ Relation rel; TupleDesc tupdesc; rel = relation_open(rte->relid, AccessShareLock); tupdesc = RelationGetDescr(rel); ncolumns = tupdesc->natts; real_colnames = (char **) palloc(ncolumns * sizeof(char *)); for (i = 0; i < ncolumns; i++) { if (tupdesc->attrs[i]->attisdropped) real_colnames[i] = NULL; else real_colnames[i] = pstrdup(NameStr(tupdesc->attrs[i]->attname)); } relation_close(rel, AccessShareLock); } else { /* Otherwise use the column names from eref */ ListCell *lc; ncolumns = list_length(rte->eref->colnames); real_colnames = (char **) palloc(ncolumns * sizeof(char *)); i = 0; foreach(lc, rte->eref->colnames) { /* * If the column name shown in eref is an empty string, then it's * a column that was dropped at the time of parsing the query, so * treat it as dropped. */ char *cname = strVal(lfirst(lc)); if (cname[0] == '\0') cname = NULL; real_colnames[i] = cname; i++; } } /* * Ensure colinfo->colnames has a slot for each column. (It could be long * enough already, if we pushed down a name for the last column.) Note: * it's possible that there are now more columns than there were when the * query was parsed, ie colnames could be longer than rte->eref->colnames. * We must assign unique aliases to the new columns too, else there could * be unresolved conflicts when the view/rule is reloaded. */ expand_colnames_array_to(colinfo, ncolumns); Assert(colinfo->num_cols == ncolumns); /* * Make sufficiently large new_colnames and is_new_col arrays, too. * * Note: because we leave colinfo->num_new_cols zero until after the loop, * colname_is_unique will not consult that array, which is fine because it * would only be duplicate effort. */ colinfo->new_colnames = (char **) palloc(ncolumns * sizeof(char *)); colinfo->is_new_col = (bool *) palloc(ncolumns * sizeof(bool)); /* * Scan the columns, select a unique alias for each one, and store it in * colinfo->colnames and colinfo->new_colnames. The former array has NULL * entries for dropped columns, the latter omits them. Also mark * new_colnames entries as to whether they are new since parse time; this * is the case for entries beyond the length of rte->eref->colnames. */ noldcolumns = list_length(rte->eref->colnames); changed_any = false; j = 0; for (i = 0; i < ncolumns; i++) { char *real_colname = real_colnames[i]; char *colname = colinfo->colnames[i]; /* Skip dropped columns */ if (real_colname == NULL) { Assert(colname == NULL); /* colnames[i] is already NULL */ continue; } /* If alias already assigned, that's what to use */ if (colname == NULL) { /* If user wrote an alias, prefer that over real column name */ if (rte->alias && i < list_length(rte->alias->colnames)) colname = strVal(list_nth(rte->alias->colnames, i)); else colname = real_colname; /* Unique-ify and insert into colinfo */ colname = make_colname_unique(colname, dpns, colinfo); colinfo->colnames[i] = colname; } /* Put names of non-dropped columns in new_colnames[] too */ colinfo->new_colnames[j] = colname; /* And mark them as new or not */ colinfo->is_new_col[j] = (i >= noldcolumns); j++; /* Remember if any assigned aliases differ from "real" name */ if (!changed_any && strcmp(colname, real_colname) != 0) changed_any = true; } /* * Set correct length for new_colnames[] array. (Note: if columns have * been added, colinfo->num_cols includes them, which is not really quite * right but is harmless, since any new columns must be at the end where * they won't affect varattnos of pre-existing columns.) */ colinfo->num_new_cols = j; /* * For a relation RTE, we need only print the alias column names if any * are different from the underlying "real" names. For a function RTE, * always emit a complete column alias list; this is to protect against * possible instability of the default column names (eg, from altering * parameter names). For other RTE types, print if we changed anything OR * if there were user-written column aliases (since the latter would be * part of the underlying "reality"). */ if (rte->rtekind == RTE_RELATION) colinfo->printaliases = changed_any; else if (rte->rtekind == RTE_FUNCTION) colinfo->printaliases = true; else if (rte->alias && rte->alias->colnames != NIL) colinfo->printaliases = true; else colinfo->printaliases = changed_any; } /* * set_join_column_names: select column aliases for a join RTE * * Column alias info is saved in *colinfo, which is assumed to be pre-zeroed. * If any colnames entries are already filled in, those override local * choices. Also, names for USING columns were already chosen by * set_using_names(). We further expect that column alias selection has been * completed for both input RTEs. */ static void set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte, deparse_columns *colinfo) { deparse_columns *leftcolinfo; deparse_columns *rightcolinfo; bool changed_any; int noldcolumns; int nnewcolumns; Bitmapset *leftmerged = NULL; Bitmapset *rightmerged = NULL; int i; int j; int ic; int jc; /* Look up the previously-filled-in child deparse_columns structs */ leftcolinfo = deparse_columns_fetch(colinfo->leftrti, dpns); rightcolinfo = deparse_columns_fetch(colinfo->rightrti, dpns); /* * Ensure colinfo->colnames has a slot for each column. (It could be long * enough already, if we pushed down a name for the last column.) Note: * it's possible that one or both inputs now have more columns than there * were when the query was parsed, but we'll deal with that below. We * only need entries in colnames for pre-existing columns. */ noldcolumns = list_length(rte->eref->colnames); expand_colnames_array_to(colinfo, noldcolumns); Assert(colinfo->num_cols == noldcolumns); /* * Scan the join output columns, select an alias for each one, and store * it in colinfo->colnames. If there are USING columns, set_using_names() * already selected their names, so we can start the loop at the first * non-merged column. */ changed_any = false; for (i = list_length(colinfo->usingNames); i < noldcolumns; i++) { char *colname = colinfo->colnames[i]; char *real_colname; /* Ignore dropped column (only possible for non-merged column) */ if (colinfo->leftattnos[i] == 0 && colinfo->rightattnos[i] == 0) { Assert(colname == NULL); continue; } /* Get the child column name */ if (colinfo->leftattnos[i] > 0) real_colname = leftcolinfo->colnames[colinfo->leftattnos[i] - 1]; else if (colinfo->rightattnos[i] > 0) real_colname = rightcolinfo->colnames[colinfo->rightattnos[i] - 1]; else { /* We're joining system columns --- use eref name */ real_colname = strVal(list_nth(rte->eref->colnames, i)); } Assert(real_colname != NULL); /* In an unnamed join, just report child column names as-is */ if (rte->alias == NULL) { colinfo->colnames[i] = real_colname; continue; } /* If alias already assigned, that's what to use */ if (colname == NULL) { /* If user wrote an alias, prefer that over real column name */ if (rte->alias && i < list_length(rte->alias->colnames)) colname = strVal(list_nth(rte->alias->colnames, i)); else colname = real_colname; /* Unique-ify and insert into colinfo */ colname = make_colname_unique(colname, dpns, colinfo); colinfo->colnames[i] = colname; } /* Remember if any assigned aliases differ from "real" name */ if (!changed_any && strcmp(colname, real_colname) != 0) changed_any = true; } /* * Calculate number of columns the join would have if it were re-parsed * now, and create storage for the new_colnames and is_new_col arrays. * * Note: colname_is_unique will be consulting new_colnames[] during the * loops below, so its not-yet-filled entries must be zeroes. */ nnewcolumns = leftcolinfo->num_new_cols + rightcolinfo->num_new_cols - list_length(colinfo->usingNames); colinfo->num_new_cols = nnewcolumns; colinfo->new_colnames = (char **) palloc0(nnewcolumns * sizeof(char *)); colinfo->is_new_col = (bool *) palloc0(nnewcolumns * sizeof(bool)); /* * Generating the new_colnames array is a bit tricky since any new columns * added since parse time must be inserted in the right places. This code * must match the parser, which will order a join's columns as merged * columns first (in USING-clause order), then non-merged columns from the * left input (in attnum order), then non-merged columns from the right * input (ditto). If one of the inputs is itself a join, its columns will * be ordered according to the same rule, which means newly-added columns * might not be at the end. We can figure out what's what by consulting * the leftattnos and rightattnos arrays plus the input is_new_col arrays. * * In these loops, i indexes leftattnos/rightattnos (so it's join varattno * less one), j indexes new_colnames/is_new_col, and ic/jc have similar * meanings for the current child RTE. */ /* Handle merged columns; they are first and can't be new */ i = j = 0; while (i < noldcolumns && colinfo->leftattnos[i] != 0 && colinfo->rightattnos[i] != 0) { /* column name is already determined and known unique */ colinfo->new_colnames[j] = colinfo->colnames[i]; colinfo->is_new_col[j] = false; /* build bitmapsets of child attnums of merged columns */ if (colinfo->leftattnos[i] > 0) leftmerged = bms_add_member(leftmerged, colinfo->leftattnos[i]); if (colinfo->rightattnos[i] > 0) rightmerged = bms_add_member(rightmerged, colinfo->rightattnos[i]); i++, j++; } /* Handle non-merged left-child columns */ ic = 0; for (jc = 0; jc < leftcolinfo->num_new_cols; jc++) { char *child_colname = leftcolinfo->new_colnames[jc]; if (!leftcolinfo->is_new_col[jc]) { /* Advance ic to next non-dropped old column of left child */ while (ic < leftcolinfo->num_cols && leftcolinfo->colnames[ic] == NULL) ic++; Assert(ic < leftcolinfo->num_cols); ic++; /* If it is a merged column, we already processed it */ if (bms_is_member(ic, leftmerged)) continue; /* Else, advance i to the corresponding existing join column */ while (i < colinfo->num_cols && colinfo->colnames[i] == NULL) i++; Assert(i < colinfo->num_cols); Assert(ic == colinfo->leftattnos[i]); /* Use the already-assigned name of this column */ colinfo->new_colnames[j] = colinfo->colnames[i]; i++; } else { /* * Unique-ify the new child column name and assign, unless we're * in an unnamed join, in which case just copy */ if (rte->alias != NULL) { colinfo->new_colnames[j] = make_colname_unique(child_colname, dpns, colinfo); if (!changed_any && strcmp(colinfo->new_colnames[j], child_colname) != 0) changed_any = true; } else colinfo->new_colnames[j] = child_colname; } colinfo->is_new_col[j] = leftcolinfo->is_new_col[jc]; j++; } /* Handle non-merged right-child columns in exactly the same way */ ic = 0; for (jc = 0; jc < rightcolinfo->num_new_cols; jc++) { char *child_colname = rightcolinfo->new_colnames[jc]; if (!rightcolinfo->is_new_col[jc]) { /* Advance ic to next non-dropped old column of right child */ while (ic < rightcolinfo->num_cols && rightcolinfo->colnames[ic] == NULL) ic++; Assert(ic < rightcolinfo->num_cols); ic++; /* If it is a merged column, we already processed it */ if (bms_is_member(ic, rightmerged)) continue; /* Else, advance i to the corresponding existing join column */ while (i < colinfo->num_cols && colinfo->colnames[i] == NULL) i++; Assert(i < colinfo->num_cols); Assert(ic == colinfo->rightattnos[i]); /* Use the already-assigned name of this column */ colinfo->new_colnames[j] = colinfo->colnames[i]; i++; } else { /* * Unique-ify the new child column name and assign, unless we're * in an unnamed join, in which case just copy */ if (rte->alias != NULL) { colinfo->new_colnames[j] = make_colname_unique(child_colname, dpns, colinfo); if (!changed_any && strcmp(colinfo->new_colnames[j], child_colname) != 0) changed_any = true; } else colinfo->new_colnames[j] = child_colname; } colinfo->is_new_col[j] = rightcolinfo->is_new_col[jc]; j++; } /* Assert we processed the right number of columns */ #ifdef USE_ASSERT_CHECKING while (i < colinfo->num_cols && colinfo->colnames[i] == NULL) i++; Assert(i == colinfo->num_cols); Assert(j == nnewcolumns); #endif /* * For a named join, print column aliases if we changed any from the child * names. Unnamed joins cannot print aliases. */ if (rte->alias != NULL) colinfo->printaliases = changed_any; else colinfo->printaliases = false; } /* * colname_is_unique: is colname distinct from already-chosen column names? * * dpns is query-wide info, colinfo is for the column's RTE */ static bool colname_is_unique(char *colname, deparse_namespace *dpns, deparse_columns *colinfo) { int i; ListCell *lc; /* Check against already-assigned column aliases within RTE */ for (i = 0; i < colinfo->num_cols; i++) { char *oldname = colinfo->colnames[i]; if (oldname && strcmp(oldname, colname) == 0) return false; } /* * If we're building a new_colnames array, check that too (this will be * partially but not completely redundant with the previous checks) */ for (i = 0; i < colinfo->num_new_cols; i++) { char *oldname = colinfo->new_colnames[i]; if (oldname && strcmp(oldname, colname) == 0) return false; } /* Also check against USING-column names that must be globally unique */ foreach(lc, dpns->using_names) { char *oldname = (char *) lfirst(lc); if (strcmp(oldname, colname) == 0) return false; } /* Also check against names already assigned for parent-join USING cols */ foreach(lc, colinfo->parentUsing) { char *oldname = (char *) lfirst(lc); if (strcmp(oldname, colname) == 0) return false; } return true; } /* * make_colname_unique: modify colname if necessary to make it unique * * dpns is query-wide info, colinfo is for the column's RTE */ static char * make_colname_unique(char *colname, deparse_namespace *dpns, deparse_columns *colinfo) { /* * If the selected name isn't unique, append digits to make it so. For a * very long input name, we might have to truncate to stay within * NAMEDATALEN. */ if (!colname_is_unique(colname, dpns, colinfo)) { int colnamelen = strlen(colname); char *modname = (char *) palloc(colnamelen + 16); int i = 0; do { i++; for (;;) { /* * We avoid using %.*s here because it can misbehave if the * data is not valid in what libc thinks is the prevailing * encoding. */ memcpy(modname, colname, colnamelen); sprintf(modname + colnamelen, "_%d", i); if (strlen(modname) < NAMEDATALEN) break; /* drop chars from colname to keep all the digits */ colnamelen = pg_mbcliplen(colname, colnamelen, colnamelen - 1); } } while (!colname_is_unique(modname, dpns, colinfo)); colname = modname; } return colname; } /* * expand_colnames_array_to: make colinfo->colnames at least n items long * * Any added array entries are initialized to zero. */ static void expand_colnames_array_to(deparse_columns *colinfo, int n) { if (n > colinfo->num_cols) { if (colinfo->colnames == NULL) colinfo->colnames = (char **) palloc0(n * sizeof(char *)); else { colinfo->colnames = (char **) repalloc(colinfo->colnames, n * sizeof(char *)); memset(colinfo->colnames + colinfo->num_cols, 0, (n - colinfo->num_cols) * sizeof(char *)); } colinfo->num_cols = n; } } /* * identify_join_columns: figure out where columns of a join come from * * Fills the join-specific fields of the colinfo struct, except for * usingNames which is filled later. */ static void identify_join_columns(JoinExpr *j, RangeTblEntry *jrte, deparse_columns *colinfo) { int numjoincols; int i; ListCell *lc; /* Extract left/right child RT indexes */ if (IsA(j->larg, RangeTblRef)) colinfo->leftrti = ((RangeTblRef *) j->larg)->rtindex; else if (IsA(j->larg, JoinExpr)) colinfo->leftrti = ((JoinExpr *) j->larg)->rtindex; else elog(ERROR, "unrecognized node type in jointree: %d", (int) nodeTag(j->larg)); if (IsA(j->rarg, RangeTblRef)) colinfo->rightrti = ((RangeTblRef *) j->rarg)->rtindex; else if (IsA(j->rarg, JoinExpr)) colinfo->rightrti = ((JoinExpr *) j->rarg)->rtindex; else elog(ERROR, "unrecognized node type in jointree: %d", (int) nodeTag(j->rarg)); /* Assert children will be processed earlier than join in second pass */ Assert(colinfo->leftrti < j->rtindex); Assert(colinfo->rightrti < j->rtindex); /* Initialize result arrays with zeroes */ numjoincols = list_length(jrte->joinaliasvars); Assert(numjoincols == list_length(jrte->eref->colnames)); colinfo->leftattnos = (int *) palloc0(numjoincols * sizeof(int)); colinfo->rightattnos = (int *) palloc0(numjoincols * sizeof(int)); /* Scan the joinaliasvars list to identify simple column references */ i = 0; foreach(lc, jrte->joinaliasvars) { Var *aliasvar = (Var *) lfirst(lc); /* get rid of any implicit coercion above the Var */ aliasvar = (Var *) strip_implicit_coercions((Node *) aliasvar); if (aliasvar == NULL) { /* It's a dropped column; nothing to do here */ } else if (IsA(aliasvar, Var)) { Assert(aliasvar->varlevelsup == 0); Assert(aliasvar->varattno != 0); if (aliasvar->varno == colinfo->leftrti) colinfo->leftattnos[i] = aliasvar->varattno; else if (aliasvar->varno == colinfo->rightrti) colinfo->rightattnos[i] = aliasvar->varattno; else elog(ERROR, "unexpected varno %d in JOIN RTE", aliasvar->varno); } else if (IsA(aliasvar, CoalesceExpr)) { /* * It's a merged column in FULL JOIN USING. Ignore it for now and * let the code below identify the merged columns. */ } else elog(ERROR, "unrecognized node type in join alias vars: %d", (int) nodeTag(aliasvar)); i++; } /* * If there's a USING clause, deconstruct the join quals to identify the * merged columns. This is a tad painful but if we cannot rely on the * column names, there is no other representation of which columns were * joined by USING. (Unless the join type is FULL, we can't tell from the * joinaliasvars list which columns are merged.) Note: we assume that the * merged columns are the first output column(s) of the join. */ if (j->usingClause) { List *leftvars = NIL; List *rightvars = NIL; ListCell *lc2; /* Extract left- and right-side Vars from the qual expression */ flatten_join_using_qual(j->quals, &leftvars, &rightvars); Assert(list_length(leftvars) == list_length(j->usingClause)); Assert(list_length(rightvars) == list_length(j->usingClause)); /* Mark the output columns accordingly */ i = 0; forboth(lc, leftvars, lc2, rightvars) { Var *leftvar = (Var *) lfirst(lc); Var *rightvar = (Var *) lfirst(lc2); Assert(leftvar->varlevelsup == 0); Assert(leftvar->varattno != 0); if (leftvar->varno != colinfo->leftrti) elog(ERROR, "unexpected varno %d in JOIN USING qual", leftvar->varno); colinfo->leftattnos[i] = leftvar->varattno; Assert(rightvar->varlevelsup == 0); Assert(rightvar->varattno != 0); if (rightvar->varno != colinfo->rightrti) elog(ERROR, "unexpected varno %d in JOIN USING qual", rightvar->varno); colinfo->rightattnos[i] = rightvar->varattno; i++; } } } /* * flatten_join_using_qual: extract Vars being joined from a JOIN/USING qual * * We assume that transformJoinUsingClause won't have produced anything except * AND nodes, equality operator nodes, and possibly implicit coercions, and * that the AND node inputs match left-to-right with the original USING list. * * Caller must initialize the result lists to NIL. */ static void flatten_join_using_qual(Node *qual, List **leftvars, List **rightvars) { if (IsA(qual, BoolExpr)) { /* Handle AND nodes by recursion */ BoolExpr *b = (BoolExpr *) qual; ListCell *lc; Assert(b->boolop == AND_EXPR); foreach(lc, b->args) { flatten_join_using_qual((Node *) lfirst(lc), leftvars, rightvars); } } else if (IsA(qual, OpExpr)) { /* Otherwise we should have an equality operator */ OpExpr *op = (OpExpr *) qual; Var *var; if (list_length(op->args) != 2) elog(ERROR, "unexpected unary operator in JOIN/USING qual"); /* Arguments should be Vars with perhaps implicit coercions */ var = (Var *) strip_implicit_coercions((Node *) linitial(op->args)); if (!IsA(var, Var)) elog(ERROR, "unexpected node type in JOIN/USING qual: %d", (int) nodeTag(var)); *leftvars = lappend(*leftvars, var); var = (Var *) strip_implicit_coercions((Node *) lsecond(op->args)); if (!IsA(var, Var)) elog(ERROR, "unexpected node type in JOIN/USING qual: %d", (int) nodeTag(var)); *rightvars = lappend(*rightvars, var); } else { /* Perhaps we have an implicit coercion to boolean? */ Node *q = strip_implicit_coercions(qual); if (q != qual) flatten_join_using_qual(q, leftvars, rightvars); else elog(ERROR, "unexpected node type in JOIN/USING qual: %d", (int) nodeTag(qual)); } } /* * get_rtable_name: convenience function to get a previously assigned RTE alias * * The RTE must belong to the topmost namespace level in "context". */ static char * get_rtable_name(int rtindex, deparse_context *context) { deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces); Assert(rtindex > 0 && rtindex <= list_length(dpns->rtable_names)); return (char *) list_nth(dpns->rtable_names, rtindex - 1); } /* * set_deparse_planstate: set up deparse_namespace to parse subexpressions * of a given PlanState node * * This sets the planstate, outer_planstate, inner_planstate, outer_tlist, * inner_tlist, and index_tlist fields. Caller is responsible for adjusting * the ancestors list if necessary. Note that the rtable and ctes fields do * not need to change when shifting attention to different plan nodes in a * single plan tree. */ static void set_deparse_planstate(deparse_namespace *dpns, PlanState *ps) { dpns->planstate = ps; /* * We special-case Append and MergeAppend to pretend that the first child * plan is the OUTER referent; we have to interpret OUTER Vars in their * tlists according to one of the children, and the first one is the most * natural choice. Likewise special-case ModifyTable to pretend that the * first child plan is the OUTER referent; this is to support RETURNING * lists containing references to non-target relations. */ if (IsA(ps, AppendState)) dpns->outer_planstate = ((AppendState *) ps)->appendplans[0]; else if (IsA(ps, MergeAppendState)) dpns->outer_planstate = ((MergeAppendState *) ps)->mergeplans[0]; else if (IsA(ps, ModifyTableState)) dpns->outer_planstate = ((ModifyTableState *) ps)->mt_plans[0]; else dpns->outer_planstate = outerPlanState(ps); if (dpns->outer_planstate) dpns->outer_tlist = dpns->outer_planstate->plan->targetlist; else dpns->outer_tlist = NIL; /* * For a SubqueryScan, pretend the subplan is INNER referent. (We don't * use OUTER because that could someday conflict with the normal meaning.) * Likewise, for a CteScan, pretend the subquery's plan is INNER referent. * For ON CONFLICT .. UPDATE we just need the inner tlist to point to the * excluded expression's tlist. (Similar to the SubqueryScan we don't want * to reuse OUTER, it's used for RETURNING in some modify table cases, * although not INSERT .. CONFLICT). */ if (IsA(ps, SubqueryScanState)) dpns->inner_planstate = ((SubqueryScanState *) ps)->subplan; else if (IsA(ps, CteScanState)) dpns->inner_planstate = ((CteScanState *) ps)->cteplanstate; else if (IsA(ps, ModifyTableState)) dpns->inner_planstate = ps; else dpns->inner_planstate = innerPlanState(ps); if (IsA(ps, ModifyTableState)) dpns->inner_tlist = ((ModifyTableState *) ps)->mt_excludedtlist; else if (dpns->inner_planstate) dpns->inner_tlist = dpns->inner_planstate->plan->targetlist; else dpns->inner_tlist = NIL; /* Set up referent for INDEX_VAR Vars, if needed */ if (IsA(ps->plan, IndexOnlyScan)) dpns->index_tlist = ((IndexOnlyScan *) ps->plan)->indextlist; else if (IsA(ps->plan, ForeignScan)) dpns->index_tlist = ((ForeignScan *) ps->plan)->fdw_scan_tlist; else if (IsA(ps->plan, CustomScan)) dpns->index_tlist = ((CustomScan *) ps->plan)->custom_scan_tlist; else dpns->index_tlist = NIL; } /* * push_child_plan: temporarily transfer deparsing attention to a child plan * * When expanding an OUTER_VAR or INNER_VAR reference, we must adjust the * deparse context in case the referenced expression itself uses * OUTER_VAR/INNER_VAR. We modify the top stack entry in-place to avoid * affecting levelsup issues (although in a Plan tree there really shouldn't * be any). * * Caller must provide a local deparse_namespace variable to save the * previous state for pop_child_plan. */ static void push_child_plan(deparse_namespace *dpns, PlanState *ps, deparse_namespace *save_dpns) { /* Save state for restoration later */ *save_dpns = *dpns; /* Link current plan node into ancestors list */ dpns->ancestors = lcons(dpns->planstate, dpns->ancestors); /* Set attention on selected child */ set_deparse_planstate(dpns, ps); } /* * pop_child_plan: undo the effects of push_child_plan */ static void pop_child_plan(deparse_namespace *dpns, deparse_namespace *save_dpns) { List *ancestors; /* Get rid of ancestors list cell added by push_child_plan */ ancestors = list_delete_first(dpns->ancestors); /* Restore fields changed by push_child_plan */ *dpns = *save_dpns; /* Make sure dpns->ancestors is right (may be unnecessary) */ dpns->ancestors = ancestors; } /* * push_ancestor_plan: temporarily transfer deparsing attention to an * ancestor plan * * When expanding a Param reference, we must adjust the deparse context * to match the plan node that contains the expression being printed; * otherwise we'd fail if that expression itself contains a Param or * OUTER_VAR/INNER_VAR/INDEX_VAR variable. * * The target ancestor is conveniently identified by the ListCell holding it * in dpns->ancestors. * * Caller must provide a local deparse_namespace variable to save the * previous state for pop_ancestor_plan. */ static void push_ancestor_plan(deparse_namespace *dpns, ListCell *ancestor_cell, deparse_namespace *save_dpns) { PlanState *ps = (PlanState *) lfirst(ancestor_cell); List *ancestors; /* Save state for restoration later */ *save_dpns = *dpns; /* Build a new ancestor list with just this node's ancestors */ ancestors = NIL; while ((ancestor_cell = lnext(ancestor_cell)) != NULL) ancestors = lappend(ancestors, lfirst(ancestor_cell)); dpns->ancestors = ancestors; /* Set attention on selected ancestor */ set_deparse_planstate(dpns, ps); } /* * pop_ancestor_plan: undo the effects of push_ancestor_plan */ static void pop_ancestor_plan(deparse_namespace *dpns, deparse_namespace *save_dpns) { /* Free the ancestor list made in push_ancestor_plan */ list_free(dpns->ancestors); /* Restore fields changed by push_ancestor_plan */ *dpns = *save_dpns; } /* ---------- * deparse_shard_query - Parse back a query for execution on a shard * * Builds an SQL string to perform the provided query on a specific shard and * places this string into the provided buffer. * ---------- */ void deparse_shard_query(Query *query, Oid distrelid, int64 shardid, StringInfo buffer) { get_query_def_extended(query, buffer, NIL, distrelid, shardid, NULL, 0, WRAP_COLUMN_DEFAULT, 0); } /* ---------- * get_query_def - Parse back one query parsetree * * If resultDesc is not NULL, then it is the output tuple descriptor for * the view represented by a SELECT query. * ---------- */ static void get_query_def(Query *query, StringInfo buf, List *parentnamespace, TupleDesc resultDesc, int prettyFlags, int wrapColumn, int startIndent) { get_query_def_extended(query, buf, parentnamespace, InvalidOid, 0, resultDesc, prettyFlags, wrapColumn, startIndent); } /* ---------- * get_query_def_extended - Parse back one query parsetree, optionally * with extension using a shard identifier. * * If distrelid is valid and shardid is positive, the provided shardid is added * any time the provided relid is deparsed, so that the query may be executed * on a placement for the given shard. * ---------- */ static void get_query_def_extended(Query *query, StringInfo buf, List *parentnamespace, Oid distrelid, int64 shardid, TupleDesc resultDesc, int prettyFlags, int wrapColumn, int startIndent) { deparse_context context; deparse_namespace dpns; OverrideSearchPath *overridePath = NULL; /* Guard against excessively long or deeply-nested queries */ CHECK_FOR_INTERRUPTS(); check_stack_depth(); /* * Before we begin to examine the query, acquire locks on referenced * relations, and fix up deleted columns in JOIN RTEs. This ensures * consistent results. Note we assume it's OK to scribble on the passed * querytree! * * We are only deparsing the query (we are not about to execute it), so we * only need AccessShareLock on the relations it mentions. */ AcquireRewriteLocks(query, false, false); /* * Set search_path to NIL so that all objects outside of pg_catalog will be * schema-prefixed. pg_catalog will be added automatically when we call * PushOverrideSearchPath(), since we set addCatalog to true; */ overridePath = GetOverrideSearchPath(CurrentMemoryContext); overridePath->schemas = NIL; overridePath->addCatalog = true; PushOverrideSearchPath(overridePath); context.buf = buf; context.namespaces = lcons(&dpns, list_copy(parentnamespace)); context.windowClause = NIL; context.windowTList = NIL; context.varprefix = (parentnamespace != NIL || list_length(query->rtable) != 1); context.prettyFlags = prettyFlags; context.wrapColumn = wrapColumn; context.indentLevel = startIndent; context.special_exprkind = EXPR_KIND_NONE; context.distrelid = distrelid; context.shardid = shardid; set_deparse_for_query(&dpns, query, parentnamespace); switch (query->commandType) { case CMD_SELECT: get_select_query_def(query, &context, resultDesc); break; case CMD_UPDATE: get_update_query_def(query, &context); break; case CMD_INSERT: get_insert_query_def(query, &context); break; case CMD_DELETE: get_delete_query_def(query, &context); break; case CMD_NOTHING: appendStringInfoString(buf, "NOTHING"); break; case CMD_UTILITY: get_utility_query_def(query, &context); break; default: elog(ERROR, "unrecognized query command type: %d", query->commandType); break; } /* revert back to original search_path */ PopOverrideSearchPath(); } /* ---------- * get_values_def - Parse back a VALUES list * ---------- */ static void get_values_def(List *values_lists, deparse_context *context) { StringInfo buf = context->buf; bool first_list = true; ListCell *vtl; appendStringInfoString(buf, "VALUES "); foreach(vtl, values_lists) { List *sublist = (List *) lfirst(vtl); bool first_col = true; ListCell *lc; if (first_list) first_list = false; else appendStringInfoString(buf, ", "); appendStringInfoChar(buf, '('); foreach(lc, sublist) { Node *col = (Node *) lfirst(lc); if (first_col) first_col = false; else appendStringInfoChar(buf, ','); /* * Print the value. Whole-row Vars need special treatment. */ get_rule_expr_toplevel(col, context, false); } appendStringInfoChar(buf, ')'); } } /* ---------- * get_with_clause - Parse back a WITH clause * ---------- */ static void get_with_clause(Query *query, deparse_context *context) { StringInfo buf = context->buf; const char *sep; ListCell *l; if (query->cteList == NIL) return; if (PRETTY_INDENT(context)) { context->indentLevel += PRETTYINDENT_STD; appendStringInfoChar(buf, ' '); } if (query->hasRecursive) sep = "WITH RECURSIVE "; else sep = "WITH "; foreach(l, query->cteList) { CommonTableExpr *cte = (CommonTableExpr *) lfirst(l); appendStringInfoString(buf, sep); appendStringInfoString(buf, quote_identifier(cte->ctename)); if (cte->aliascolnames) { bool first = true; ListCell *col; appendStringInfoChar(buf, '('); foreach(col, cte->aliascolnames) { if (first) first = false; else appendStringInfoString(buf, ", "); appendStringInfoString(buf, quote_identifier(strVal(lfirst(col)))); } appendStringInfoChar(buf, ')'); } appendStringInfoString(buf, " AS ("); if (PRETTY_INDENT(context)) appendContextKeyword(context, "", 0, 0, 0); get_query_def((Query *) cte->ctequery, buf, context->namespaces, NULL, context->prettyFlags, context->wrapColumn, context->indentLevel); if (PRETTY_INDENT(context)) appendContextKeyword(context, "", 0, 0, 0); appendStringInfoChar(buf, ')'); sep = ", "; } if (PRETTY_INDENT(context)) { context->indentLevel -= PRETTYINDENT_STD; appendContextKeyword(context, "", 0, 0, 0); } else appendStringInfoChar(buf, ' '); } /* ---------- * get_select_query_def - Parse back a SELECT parsetree * ---------- */ static void get_select_query_def(Query *query, deparse_context *context, TupleDesc resultDesc) { StringInfo buf = context->buf; List *save_windowclause; List *save_windowtlist; bool force_colno; ListCell *l; /* Insert the WITH clause if given */ get_with_clause(query, context); /* Set up context for possible window functions */ save_windowclause = context->windowClause; context->windowClause = query->windowClause; save_windowtlist = context->windowTList; context->windowTList = query->targetList; /* * If the Query node has a setOperations tree, then it's the top level of * a UNION/INTERSECT/EXCEPT query; only the WITH, ORDER BY and LIMIT * fields are interesting in the top query itself. */ if (query->setOperations) { get_setop_query(query->setOperations, query, context, resultDesc); /* ORDER BY clauses must be simple in this case */ force_colno = true; } else { get_basic_select_query(query, context, resultDesc); force_colno = false; } /* Add the ORDER BY clause if given */ if (query->sortClause != NIL) { appendContextKeyword(context, " ORDER BY ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); get_rule_orderby(query->sortClause, query->targetList, force_colno, context); } /* Add the LIMIT clause if given */ if (query->limitOffset != NULL) { appendContextKeyword(context, " OFFSET ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); get_rule_expr(query->limitOffset, context, false); } if (query->limitCount != NULL) { appendContextKeyword(context, " LIMIT ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); if (IsA(query->limitCount, Const) && ((Const *) query->limitCount)->constisnull) appendStringInfoString(buf, "ALL"); else get_rule_expr(query->limitCount, context, false); } /* Add FOR [KEY] UPDATE/SHARE clauses if present */ if (query->hasForUpdate) { foreach(l, query->rowMarks) { RowMarkClause *rc = (RowMarkClause *) lfirst(l); /* don't print implicit clauses */ if (rc->pushedDown) continue; switch (rc->strength) { case LCS_NONE: /* we intentionally throw an error for LCS_NONE */ elog(ERROR, "unrecognized LockClauseStrength %d", (int) rc->strength); break; case LCS_FORKEYSHARE: appendContextKeyword(context, " FOR KEY SHARE", -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); break; case LCS_FORSHARE: appendContextKeyword(context, " FOR SHARE", -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); break; case LCS_FORNOKEYUPDATE: appendContextKeyword(context, " FOR NO KEY UPDATE", -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); break; case LCS_FORUPDATE: appendContextKeyword(context, " FOR UPDATE", -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); break; } appendStringInfo(buf, " OF %s", quote_identifier(get_rtable_name(rc->rti, context))); if (rc->waitPolicy == LockWaitError) appendStringInfoString(buf, " NOWAIT"); else if (rc->waitPolicy == LockWaitSkip) appendStringInfoString(buf, " SKIP LOCKED"); } } context->windowClause = save_windowclause; context->windowTList = save_windowtlist; } /* * Detect whether query looks like SELECT ... FROM VALUES(); * if so, return the VALUES RTE. Otherwise return NULL. */ static RangeTblEntry * get_simple_values_rte(Query *query) { RangeTblEntry *result = NULL; ListCell *lc; /* * We want to return TRUE even if the Query also contains OLD or NEW rule * RTEs. So the idea is to scan the rtable and see if there is only one * inFromCl RTE that is a VALUES RTE. */ foreach(lc, query->rtable) { RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc); if (rte->rtekind == RTE_VALUES && rte->inFromCl) { if (result) return NULL; /* multiple VALUES (probably not possible) */ result = rte; } else if (rte->rtekind == RTE_RELATION && !rte->inFromCl) continue; /* ignore rule entries */ else return NULL; /* something else -> not simple VALUES */ } /* * We don't need to check the targetlist in any great detail, because * parser/analyze.c will never generate a "bare" VALUES RTE --- they only * appear inside auto-generated sub-queries with very restricted * structure. However, DefineView might have modified the tlist by * injecting new column aliases; so compare tlist resnames against the * RTE's names to detect that. */ if (result) { ListCell *lcn; if (list_length(query->targetList) != list_length(result->eref->colnames)) return NULL; /* this probably cannot happen */ forboth(lc, query->targetList, lcn, result->eref->colnames) { TargetEntry *tle = (TargetEntry *) lfirst(lc); char *cname = strVal(lfirst(lcn)); if (tle->resjunk) return NULL; /* this probably cannot happen */ if (tle->resname == NULL || strcmp(tle->resname, cname) != 0) return NULL; /* column name has been changed */ } } return result; } static void get_basic_select_query(Query *query, deparse_context *context, TupleDesc resultDesc) { StringInfo buf = context->buf; RangeTblEntry *values_rte; char *sep; ListCell *l; if (PRETTY_INDENT(context)) { context->indentLevel += PRETTYINDENT_STD; appendStringInfoChar(buf, ' '); } /* * If the query looks like SELECT * FROM (VALUES ...), then print just the * VALUES part. This reverses what transformValuesClause() did at parse * time. */ values_rte = get_simple_values_rte(query); if (values_rte) { get_values_def(values_rte->values_lists, context); return; } /* * Build up the query string - first we say SELECT */ appendStringInfoString(buf, "SELECT"); /* Add the DISTINCT clause if given */ if (query->distinctClause != NIL) { if (query->hasDistinctOn) { appendStringInfoString(buf, " DISTINCT ON ("); sep = ""; foreach(l, query->distinctClause) { SortGroupClause *srt = (SortGroupClause *) lfirst(l); appendStringInfoString(buf, sep); get_rule_sortgroupclause(srt->tleSortGroupRef, query->targetList, false, context); sep = ", "; } appendStringInfoChar(buf, ')'); } else appendStringInfoString(buf, " DISTINCT"); } /* Then we tell what to select (the targetlist) */ get_target_list(query->targetList, context, resultDesc); /* Add the FROM clause if needed */ get_from_clause(query, " FROM ", context); /* Add the WHERE clause if given */ if (query->jointree->quals != NULL) { appendContextKeyword(context, " WHERE ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); get_rule_expr(query->jointree->quals, context, false); } /* Add the GROUP BY clause if given */ if (query->groupClause != NULL || query->groupingSets != NULL) { ParseExprKind save_exprkind; appendContextKeyword(context, " GROUP BY ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); save_exprkind = context->special_exprkind; context->special_exprkind = EXPR_KIND_GROUP_BY; if (query->groupingSets == NIL) { sep = ""; foreach(l, query->groupClause) { SortGroupClause *grp = (SortGroupClause *) lfirst(l); appendStringInfoString(buf, sep); get_rule_sortgroupclause(grp->tleSortGroupRef, query->targetList, false, context); sep = ", "; } } else { sep = ""; foreach(l, query->groupingSets) { GroupingSet *grp = lfirst(l); appendStringInfoString(buf, sep); get_rule_groupingset(grp, query->targetList, true, context); sep = ", "; } } context->special_exprkind = save_exprkind; } /* Add the HAVING clause if given */ if (query->havingQual != NULL) { appendContextKeyword(context, " HAVING ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); get_rule_expr(query->havingQual, context, false); } /* Add the WINDOW clause if needed */ if (query->windowClause != NIL) get_rule_windowclause(query, context); } /* ---------- * get_target_list - Parse back a SELECT target list * * This is also used for RETURNING lists in INSERT/UPDATE/DELETE. * ---------- */ static void get_target_list(List *targetList, deparse_context *context, TupleDesc resultDesc) { StringInfo buf = context->buf; StringInfoData targetbuf; bool last_was_multiline = false; char *sep; int colno; ListCell *l; /* we use targetbuf to hold each TLE's text temporarily */ initStringInfo(&targetbuf); sep = " "; colno = 0; foreach(l, targetList) { TargetEntry *tle = (TargetEntry *) lfirst(l); char *colname; char *attname; if (tle->resjunk) continue; /* ignore junk entries */ appendStringInfoString(buf, sep); sep = ", "; colno++; /* * Put the new field text into targetbuf so we can decide after we've * got it whether or not it needs to go on a new line. */ resetStringInfo(&targetbuf); context->buf = &targetbuf; /* * We special-case Var nodes rather than using get_rule_expr. This is * needed because get_rule_expr will display a whole-row Var as * "foo.*", which is the preferred notation in most contexts, but at * the top level of a SELECT list it's not right (the parser will * expand that notation into multiple columns, yielding behavior * different from a whole-row Var). We need to call get_variable * directly so that we can tell it to do the right thing, and so that * we can get the attribute name which is the default AS label. */ if (tle->expr && (IsA(tle->expr, Var))) { attname = get_variable((Var *) tle->expr, 0, true, context); } else { get_rule_expr((Node *) tle->expr, context, true); /* We'll show the AS name unless it's this: */ attname = "?column?"; } /* * Figure out what the result column should be called. In the context * of a view, use the view's tuple descriptor (so as to pick up the * effects of any column RENAME that's been done on the view). * Otherwise, just use what we can find in the TLE. */ if (resultDesc && colno <= resultDesc->natts) colname = NameStr(resultDesc->attrs[colno - 1]->attname); else colname = tle->resname; /* Show AS unless the column's name is correct as-is */ if (colname) /* resname could be NULL */ { if (attname == NULL || strcmp(attname, colname) != 0) appendStringInfo(&targetbuf, " AS %s", quote_identifier(colname)); } /* Restore context's output buffer */ context->buf = buf; /* Consider line-wrapping if enabled */ if (PRETTY_INDENT(context) && context->wrapColumn >= 0) { int leading_nl_pos; /* Does the new field start with a new line? */ if (targetbuf.len > 0 && targetbuf.data[0] == '\n') leading_nl_pos = 0; else leading_nl_pos = -1; /* If so, we shouldn't add anything */ if (leading_nl_pos >= 0) { /* instead, remove any trailing spaces currently in buf */ removeStringInfoSpaces(buf); } else { char *trailing_nl; /* Locate the start of the current line in the output buffer */ trailing_nl = strrchr(buf->data, '\n'); if (trailing_nl == NULL) trailing_nl = buf->data; else trailing_nl++; /* * Add a newline, plus some indentation, if the new field is * not the first and either the new field would cause an * overflow or the last field used more than one line. */ if (colno > 1 && ((strlen(trailing_nl) + targetbuf.len > context->wrapColumn) || last_was_multiline)) appendContextKeyword(context, "", -PRETTYINDENT_STD, PRETTYINDENT_STD, PRETTYINDENT_VAR); } /* Remember this field's multiline status for next iteration */ last_was_multiline = (strchr(targetbuf.data + leading_nl_pos + 1, '\n') != NULL); } /* Add the new field */ appendStringInfoString(buf, targetbuf.data); } /* clean up */ pfree(targetbuf.data); } static void get_setop_query(Node *setOp, Query *query, deparse_context *context, TupleDesc resultDesc) { StringInfo buf = context->buf; bool need_paren; /* Guard against excessively long or deeply-nested queries */ CHECK_FOR_INTERRUPTS(); check_stack_depth(); if (IsA(setOp, RangeTblRef)) { RangeTblRef *rtr = (RangeTblRef *) setOp; RangeTblEntry *rte = rt_fetch(rtr->rtindex, query->rtable); Query *subquery = rte->subquery; Assert(subquery != NULL); Assert(subquery->setOperations == NULL); /* Need parens if WITH, ORDER BY, FOR UPDATE, or LIMIT; see gram.y */ need_paren = (subquery->cteList || subquery->sortClause || subquery->rowMarks || subquery->limitOffset || subquery->limitCount); if (need_paren) appendStringInfoChar(buf, '('); get_query_def(subquery, buf, context->namespaces, resultDesc, context->prettyFlags, context->wrapColumn, context->indentLevel); if (need_paren) appendStringInfoChar(buf, ')'); } else if (IsA(setOp, SetOperationStmt)) { SetOperationStmt *op = (SetOperationStmt *) setOp; int subindent; /* * We force parens when nesting two SetOperationStmts, except when the * lefthand input is another setop of the same kind. Syntactically, * we could omit parens in rather more cases, but it seems best to use * parens to flag cases where the setop operator changes. If we use * parens, we also increase the indentation level for the child query. * * There are some cases in which parens are needed around a leaf query * too, but those are more easily handled at the next level down (see * code above). */ if (IsA(op->larg, SetOperationStmt)) { SetOperationStmt *lop = (SetOperationStmt *) op->larg; if (op->op == lop->op && op->all == lop->all) need_paren = false; else need_paren = true; } else need_paren = false; if (need_paren) { appendStringInfoChar(buf, '('); subindent = PRETTYINDENT_STD; appendContextKeyword(context, "", subindent, 0, 0); } else subindent = 0; get_setop_query(op->larg, query, context, resultDesc); if (need_paren) appendContextKeyword(context, ") ", -subindent, 0, 0); else if (PRETTY_INDENT(context)) appendContextKeyword(context, "", -subindent, 0, 0); else appendStringInfoChar(buf, ' '); switch (op->op) { case SETOP_UNION: appendStringInfoString(buf, "UNION "); break; case SETOP_INTERSECT: appendStringInfoString(buf, "INTERSECT "); break; case SETOP_EXCEPT: appendStringInfoString(buf, "EXCEPT "); break; default: elog(ERROR, "unrecognized set op: %d", (int) op->op); } if (op->all) appendStringInfoString(buf, "ALL "); /* Always parenthesize if RHS is another setop */ need_paren = IsA(op->rarg, SetOperationStmt); /* * The indentation code here is deliberately a bit different from that * for the lefthand input, because we want the line breaks in * different places. */ if (need_paren) { appendStringInfoChar(buf, '('); subindent = PRETTYINDENT_STD; } else subindent = 0; appendContextKeyword(context, "", subindent, 0, 0); get_setop_query(op->rarg, query, context, resultDesc); if (PRETTY_INDENT(context)) context->indentLevel -= subindent; if (need_paren) appendContextKeyword(context, ")", 0, 0, 0); } else { elog(ERROR, "unrecognized node type: %d", (int) nodeTag(setOp)); } } /* * Display a sort/group clause. * * Also returns the expression tree, so caller need not find it again. */ static Node * get_rule_sortgroupclause(Index ref, List *tlist, bool force_colno, deparse_context *context) { StringInfo buf = context->buf; TargetEntry *tle; Node *expr; tle = get_sortgroupref_tle(ref, tlist); expr = (Node *) tle->expr; /* * Use column-number form if requested by caller. Otherwise, if * expression is a constant, force it to be dumped with an explicit cast * as decoration --- this is because a simple integer constant is * ambiguous (and will be misinterpreted by findTargetlistEntry()) if we * dump it without any decoration. If it's anything more complex than a * simple Var, then force extra parens around it, to ensure it can't be * misinterpreted as a cube() or rollup() construct. */ if (force_colno) { Assert(!tle->resjunk); appendStringInfo(buf, "%d", tle->resno); } else if (expr && IsA(expr, Const)) get_const_expr((Const *) expr, context, 1); else if (!expr || IsA(expr, Var)) get_rule_expr(expr, context, true); else { /* * We must force parens for function-like expressions even if * PRETTY_PAREN is off, since those are the ones in danger of * misparsing. For other expressions we need to force them only if * PRETTY_PAREN is on, since otherwise the expression will output them * itself. (We can't skip the parens.) */ bool need_paren = (PRETTY_PAREN(context) || IsA(expr, FuncExpr) ||IsA(expr, Aggref) ||IsA(expr, WindowFunc)); if (need_paren) appendStringInfoString(context->buf, "("); get_rule_expr(expr, context, true); if (need_paren) appendStringInfoString(context->buf, ")"); } return expr; } /* * Display a GroupingSet */ static void get_rule_groupingset(GroupingSet *gset, List *targetlist, bool omit_parens, deparse_context *context) { ListCell *l; StringInfo buf = context->buf; bool omit_child_parens = true; char *sep = ""; switch (gset->kind) { case GROUPING_SET_EMPTY: appendStringInfoString(buf, "()"); return; case GROUPING_SET_SIMPLE: { if (!omit_parens || list_length(gset->content) != 1) appendStringInfoString(buf, "("); foreach(l, gset->content) { Index ref = lfirst_int(l); appendStringInfoString(buf, sep); get_rule_sortgroupclause(ref, targetlist, false, context); sep = ", "; } if (!omit_parens || list_length(gset->content) != 1) appendStringInfoString(buf, ")"); } return; case GROUPING_SET_ROLLUP: appendStringInfoString(buf, "ROLLUP("); break; case GROUPING_SET_CUBE: appendStringInfoString(buf, "CUBE("); break; case GROUPING_SET_SETS: appendStringInfoString(buf, "GROUPING SETS ("); omit_child_parens = false; break; } foreach(l, gset->content) { appendStringInfoString(buf, sep); get_rule_groupingset(lfirst(l), targetlist, omit_child_parens, context); sep = ", "; } appendStringInfoString(buf, ")"); } /* * Display an ORDER BY list. */ static void get_rule_orderby(List *orderList, List *targetList, bool force_colno, deparse_context *context) { StringInfo buf = context->buf; const char *sep; ListCell *l; sep = ""; foreach(l, orderList) { SortGroupClause *srt = (SortGroupClause *) lfirst(l); Node *sortexpr; Oid sortcoltype; TypeCacheEntry *typentry; appendStringInfoString(buf, sep); sortexpr = get_rule_sortgroupclause(srt->tleSortGroupRef, targetList, force_colno, context); sortcoltype = exprType(sortexpr); /* See whether operator is default < or > for datatype */ typentry = lookup_type_cache(sortcoltype, TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); if (srt->sortop == typentry->lt_opr) { /* ASC is default, so emit nothing for it */ if (srt->nulls_first) appendStringInfoString(buf, " NULLS FIRST"); } else if (srt->sortop == typentry->gt_opr) { appendStringInfoString(buf, " DESC"); /* DESC defaults to NULLS FIRST */ if (!srt->nulls_first) appendStringInfoString(buf, " NULLS LAST"); } else { appendStringInfo(buf, " USING %s", generate_operator_name(srt->sortop, sortcoltype, sortcoltype)); /* be specific to eliminate ambiguity */ if (srt->nulls_first) appendStringInfoString(buf, " NULLS FIRST"); else appendStringInfoString(buf, " NULLS LAST"); } sep = ", "; } } /* * Display a WINDOW clause. * * Note that the windowClause list might contain only anonymous window * specifications, in which case we should print nothing here. */ static void get_rule_windowclause(Query *query, deparse_context *context) { StringInfo buf = context->buf; const char *sep; ListCell *l; sep = NULL; foreach(l, query->windowClause) { WindowClause *wc = (WindowClause *) lfirst(l); if (wc->name == NULL) continue; /* ignore anonymous windows */ if (sep == NULL) appendContextKeyword(context, " WINDOW ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); else appendStringInfoString(buf, sep); appendStringInfo(buf, "%s AS ", quote_identifier(wc->name)); get_rule_windowspec(wc, query->targetList, context); sep = ", "; } } /* * Display a window definition */ static void get_rule_windowspec(WindowClause *wc, List *targetList, deparse_context *context) { StringInfo buf = context->buf; bool needspace = false; const char *sep; ListCell *l; appendStringInfoChar(buf, '('); if (wc->refname) { appendStringInfoString(buf, quote_identifier(wc->refname)); needspace = true; } /* partition clauses are always inherited, so only print if no refname */ if (wc->partitionClause && !wc->refname) { if (needspace) appendStringInfoChar(buf, ' '); appendStringInfoString(buf, "PARTITION BY "); sep = ""; foreach(l, wc->partitionClause) { SortGroupClause *grp = (SortGroupClause *) lfirst(l); appendStringInfoString(buf, sep); get_rule_sortgroupclause(grp->tleSortGroupRef, targetList, false, context); sep = ", "; } needspace = true; } /* print ordering clause only if not inherited */ if (wc->orderClause && !wc->copiedOrder) { if (needspace) appendStringInfoChar(buf, ' '); appendStringInfoString(buf, "ORDER BY "); get_rule_orderby(wc->orderClause, targetList, false, context); needspace = true; } /* framing clause is never inherited, so print unless it's default */ if (wc->frameOptions & FRAMEOPTION_NONDEFAULT) { if (needspace) appendStringInfoChar(buf, ' '); if (wc->frameOptions & FRAMEOPTION_RANGE) appendStringInfoString(buf, "RANGE "); else if (wc->frameOptions & FRAMEOPTION_ROWS) appendStringInfoString(buf, "ROWS "); else Assert(false); if (wc->frameOptions & FRAMEOPTION_BETWEEN) appendStringInfoString(buf, "BETWEEN "); if (wc->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING) appendStringInfoString(buf, "UNBOUNDED PRECEDING "); else if (wc->frameOptions & FRAMEOPTION_START_CURRENT_ROW) appendStringInfoString(buf, "CURRENT ROW "); else if (wc->frameOptions & FRAMEOPTION_START_VALUE) { get_rule_expr(wc->startOffset, context, false); if (wc->frameOptions & FRAMEOPTION_START_VALUE_PRECEDING) appendStringInfoString(buf, " PRECEDING "); else if (wc->frameOptions & FRAMEOPTION_START_VALUE_FOLLOWING) appendStringInfoString(buf, " FOLLOWING "); else Assert(false); } else Assert(false); if (wc->frameOptions & FRAMEOPTION_BETWEEN) { appendStringInfoString(buf, "AND "); if (wc->frameOptions & FRAMEOPTION_END_UNBOUNDED_FOLLOWING) appendStringInfoString(buf, "UNBOUNDED FOLLOWING "); else if (wc->frameOptions & FRAMEOPTION_END_CURRENT_ROW) appendStringInfoString(buf, "CURRENT ROW "); else if (wc->frameOptions & FRAMEOPTION_END_VALUE) { get_rule_expr(wc->endOffset, context, false); if (wc->frameOptions & FRAMEOPTION_END_VALUE_PRECEDING) appendStringInfoString(buf, " PRECEDING "); else if (wc->frameOptions & FRAMEOPTION_END_VALUE_FOLLOWING) appendStringInfoString(buf, " FOLLOWING "); else Assert(false); } else Assert(false); } /* we will now have a trailing space; remove it */ buf->len--; } appendStringInfoChar(buf, ')'); } /* ---------- * get_insert_query_def - Parse back an INSERT parsetree * ---------- */ static void get_insert_query_def(Query *query, deparse_context *context) { StringInfo buf = context->buf; RangeTblEntry *select_rte = NULL; RangeTblEntry *values_rte = NULL; RangeTblEntry *rte; char *sep; ListCell *l; List *strippedexprs; /* Insert the WITH clause if given */ get_with_clause(query, context); /* * If it's an INSERT ... SELECT or multi-row VALUES, there will be a * single RTE for the SELECT or VALUES. Plain VALUES has neither. */ foreach(l, query->rtable) { rte = (RangeTblEntry *) lfirst(l); if (rte->rtekind == RTE_SUBQUERY) { if (select_rte) elog(ERROR, "too many subquery RTEs in INSERT"); select_rte = rte; } if (rte->rtekind == RTE_VALUES) { if (values_rte) elog(ERROR, "too many values RTEs in INSERT"); values_rte = rte; } } if (select_rte && values_rte) elog(ERROR, "both subquery and values RTEs in INSERT"); /* * Start the query with INSERT INTO relname */ rte = rt_fetch(query->resultRelation, query->rtable); Assert(rte->rtekind == RTE_RELATION); if (PRETTY_INDENT(context)) { context->indentLevel += PRETTYINDENT_STD; appendStringInfoChar(buf, ' '); } appendStringInfo(buf, "INSERT INTO %s ", generate_relation_or_shard_name(rte->relid, context->distrelid, context->shardid, NIL)); /* INSERT requires AS keyword for target alias */ if (rte->alias != NULL) appendStringInfo(buf, "AS %s ", quote_identifier(rte->alias->aliasname)); /* * Add the insert-column-names list. Any indirection decoration needed on * the column names can be inferred from the top targetlist. */ strippedexprs = NIL; sep = ""; if (query->targetList) appendStringInfoChar(buf, '('); foreach(l, query->targetList) { TargetEntry *tle = (TargetEntry *) lfirst(l); if (tle->resjunk) continue; /* ignore junk entries */ appendStringInfoString(buf, sep); sep = ", "; /* * Put out name of target column; look in the catalogs, not at * tle->resname, since resname will fail to track RENAME. */ appendStringInfoString(buf, quote_identifier(get_relid_attribute_name(rte->relid, tle->resno))); /* * Print any indirection needed (subfields or subscripts), and strip * off the top-level nodes representing the indirection assignments. * Add the stripped expressions to strippedexprs. (If it's a * single-VALUES statement, the stripped expressions are the VALUES to * print below. Otherwise they're just Vars and not really * interesting.) */ strippedexprs = lappend(strippedexprs, processIndirection((Node *) tle->expr, context)); } if (query->targetList) appendStringInfoString(buf, ") "); if (select_rte) { /* Add the SELECT */ get_query_def(select_rte->subquery, buf, NIL, NULL, context->prettyFlags, context->wrapColumn, context->indentLevel); } else if (values_rte) { /* Add the multi-VALUES expression lists */ get_values_def(values_rte->values_lists, context); } else if (strippedexprs) { /* Add the single-VALUES expression list */ appendContextKeyword(context, "VALUES (", -PRETTYINDENT_STD, PRETTYINDENT_STD, 2); get_rule_expr((Node *) strippedexprs, context, false); appendStringInfoChar(buf, ')'); } else { /* No expressions, so it must be DEFAULT VALUES */ appendStringInfoString(buf, "DEFAULT VALUES"); } /* Add ON CONFLICT if present */ if (query->onConflict) { OnConflictExpr *confl = query->onConflict; appendStringInfoString(buf, " ON CONFLICT"); if (confl->arbiterElems) { /* Add the single-VALUES expression list */ appendStringInfoChar(buf, '('); get_rule_expr((Node *) confl->arbiterElems, context, false); appendStringInfoChar(buf, ')'); /* Add a WHERE clause (for partial indexes) if given */ if (confl->arbiterWhere != NULL) { bool save_varprefix; /* * Force non-prefixing of Vars, since parser assumes that they * belong to target relation. WHERE clause does not use * InferenceElem, so this is separately required. */ save_varprefix = context->varprefix; context->varprefix = false; appendContextKeyword(context, " WHERE ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); get_rule_expr(confl->arbiterWhere, context, false); context->varprefix = save_varprefix; } } else if (OidIsValid(confl->constraint)) { char *constraint = get_constraint_name(confl->constraint); int64 shardId = context->shardid; if (shardId > 0) { AppendShardIdToName(&constraint, shardId); } if (!constraint) elog(ERROR, "cache lookup failed for constraint %u", confl->constraint); appendStringInfo(buf, " ON CONSTRAINT %s", quote_identifier(constraint)); } if (confl->action == ONCONFLICT_NOTHING) { appendStringInfoString(buf, " DO NOTHING"); } else { appendStringInfoString(buf, " DO UPDATE SET "); /* Deparse targetlist */ get_update_query_targetlist_def(query, confl->onConflictSet, context, rte); /* Add a WHERE clause if given */ if (confl->onConflictWhere != NULL) { appendContextKeyword(context, " WHERE ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); get_rule_expr(confl->onConflictWhere, context, false); } } } /* Add RETURNING if present */ if (query->returningList) { appendContextKeyword(context, " RETURNING", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); get_target_list(query->returningList, context, NULL); } } /* ---------- * get_update_query_def - Parse back an UPDATE parsetree * ---------- */ static void get_update_query_def(Query *query, deparse_context *context) { StringInfo buf = context->buf; RangeTblEntry *rte; /* Insert the WITH clause if given */ get_with_clause(query, context); /* * Start the query with UPDATE relname SET */ rte = rt_fetch(query->resultRelation, query->rtable); if (PRETTY_INDENT(context)) { appendStringInfoChar(buf, ' '); context->indentLevel += PRETTYINDENT_STD; } /* if it's a shard, do differently */ if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) { char *fragmentSchemaName = NULL; char *fragmentTableName = NULL; ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); /* use schema and table name from the remote alias */ appendStringInfo(buf, "UPDATE %s%s", only_marker(rte), generate_fragment_name(fragmentSchemaName, fragmentTableName)); if(rte->eref != NULL) appendStringInfo(buf, " %s", quote_identifier(rte->eref->aliasname)); } else { appendStringInfo(buf, "UPDATE %s%s", only_marker(rte), generate_relation_or_shard_name(rte->relid, context->distrelid, context->shardid, NIL)); if (rte->alias != NULL) appendStringInfo(buf, " %s", quote_identifier(rte->alias->aliasname)); } appendStringInfoString(buf, " SET "); /* Deparse targetlist */ get_update_query_targetlist_def(query, query->targetList, context, rte); /* Add the FROM clause if needed */ get_from_clause(query, " FROM ", context); /* Add a WHERE clause if given */ if (query->jointree->quals != NULL) { appendContextKeyword(context, " WHERE ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); get_rule_expr(query->jointree->quals, context, false); } /* Add RETURNING if present */ if (query->returningList) { appendContextKeyword(context, " RETURNING", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); get_target_list(query->returningList, context, NULL); } } /* ---------- * get_update_query_targetlist_def - Parse back an UPDATE targetlist * ---------- */ static void get_update_query_targetlist_def(Query *query, List *targetList, deparse_context *context, RangeTblEntry *rte) { StringInfo buf = context->buf; ListCell *l; ListCell *next_ma_cell; int remaining_ma_columns; const char *sep; SubLink *cur_ma_sublink; List *ma_sublinks; /* * Prepare to deal with MULTIEXPR assignments: collect the source SubLinks * into a list. We expect them to appear, in ID order, in resjunk tlist * entries. */ ma_sublinks = NIL; if (query->hasSubLinks) /* else there can't be any */ { foreach(l, targetList) { TargetEntry *tle = (TargetEntry *) lfirst(l); if (tle->resjunk && IsA(tle->expr, SubLink)) { SubLink *sl = (SubLink *) tle->expr; if (sl->subLinkType == MULTIEXPR_SUBLINK) { ma_sublinks = lappend(ma_sublinks, sl); Assert(sl->subLinkId == list_length(ma_sublinks)); } } } } next_ma_cell = list_head(ma_sublinks); cur_ma_sublink = NULL; remaining_ma_columns = 0; /* Add the comma separated list of 'attname = value' */ sep = ""; foreach(l, targetList) { TargetEntry *tle = (TargetEntry *) lfirst(l); Node *expr; if (tle->resjunk) continue; /* ignore junk entries */ /* Emit separator (OK whether we're in multiassignment or not) */ appendStringInfoString(buf, sep); sep = ", "; /* * Check to see if we're starting a multiassignment group: if so, * output a left paren. */ if (next_ma_cell != NULL && cur_ma_sublink == NULL) { /* * We must dig down into the expr to see if it's a PARAM_MULTIEXPR * Param. That could be buried under FieldStores and ArrayRefs * and CoerceToDomains (cf processIndirection()), and underneath * those there could be an implicit type coercion. Because we * would ignore implicit type coercions anyway, we don't need to * be as careful as processIndirection() is about descending past * implicit CoerceToDomains. */ expr = (Node *) tle->expr; while (expr) { if (IsA(expr, FieldStore)) { FieldStore *fstore = (FieldStore *) expr; expr = (Node *) linitial(fstore->newvals); } else if (IsA(expr, ArrayRef)) { ArrayRef *aref = (ArrayRef *) expr; if (aref->refassgnexpr == NULL) break; expr = (Node *) aref->refassgnexpr; } else if (IsA(expr, CoerceToDomain)) { CoerceToDomain *cdomain = (CoerceToDomain *) expr; if (cdomain->coercionformat != COERCE_IMPLICIT_CAST) break; expr = (Node *) cdomain->arg; } else break; } expr = strip_implicit_coercions(expr); if (expr && IsA(expr, Param) && ((Param *) expr)->paramkind == PARAM_MULTIEXPR) { cur_ma_sublink = (SubLink *) lfirst(next_ma_cell); next_ma_cell = lnext(next_ma_cell); remaining_ma_columns = count_nonjunk_tlist_entries( ((Query *) cur_ma_sublink->subselect)->targetList); Assert(((Param *) expr)->paramid == ((cur_ma_sublink->subLinkId << 16) | 1)); appendStringInfoChar(buf, '('); } } /* * Put out name of target column; look in the catalogs, not at * tle->resname, since resname will fail to track RENAME. */ appendStringInfoString(buf, quote_identifier(get_relid_attribute_name(rte->relid, tle->resno))); /* * Print any indirection needed (subfields or subscripts), and strip * off the top-level nodes representing the indirection assignments. */ expr = processIndirection((Node *) tle->expr, context); /* * If we're in a multiassignment, skip printing anything more, unless * this is the last column; in which case, what we print should be the * sublink, not the Param. */ if (cur_ma_sublink != NULL) { if (--remaining_ma_columns > 0) continue; /* not the last column of multiassignment */ appendStringInfoChar(buf, ')'); expr = (Node *) cur_ma_sublink; cur_ma_sublink = NULL; } appendStringInfoString(buf, " = "); get_rule_expr(expr, context, false); } } /* ---------- * get_delete_query_def - Parse back a DELETE parsetree * ---------- */ static void get_delete_query_def(Query *query, deparse_context *context) { StringInfo buf = context->buf; RangeTblEntry *rte; /* Insert the WITH clause if given */ get_with_clause(query, context); /* * Start the query with DELETE FROM relname */ rte = rt_fetch(query->resultRelation, query->rtable); if (PRETTY_INDENT(context)) { appendStringInfoChar(buf, ' '); context->indentLevel += PRETTYINDENT_STD; } /* if it's a shard, do differently */ if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) { char *fragmentSchemaName = NULL; char *fragmentTableName = NULL; ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); /* use schema and table name from the remote alias */ appendStringInfo(buf, "DELETE FROM %s%s", only_marker(rte), generate_fragment_name(fragmentSchemaName, fragmentTableName)); if(rte->eref != NULL) appendStringInfo(buf, " %s", quote_identifier(rte->eref->aliasname)); } else { appendStringInfo(buf, "DELETE FROM %s%s", only_marker(rte), generate_relation_or_shard_name(rte->relid, context->distrelid, context->shardid, NIL)); if (rte->alias != NULL) appendStringInfo(buf, " %s", quote_identifier(rte->alias->aliasname)); } /* Add the USING clause if given */ get_from_clause(query, " USING ", context); /* Add a WHERE clause if given */ if (query->jointree->quals != NULL) { appendContextKeyword(context, " WHERE ", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); get_rule_expr(query->jointree->quals, context, false); } /* Add RETURNING if present */ if (query->returningList) { appendContextKeyword(context, " RETURNING", -PRETTYINDENT_STD, PRETTYINDENT_STD, 1); get_target_list(query->returningList, context, NULL); } } /* ---------- * get_utility_query_def - Parse back a UTILITY parsetree * ---------- */ static void get_utility_query_def(Query *query, deparse_context *context) { StringInfo buf = context->buf; if (query->utilityStmt && IsA(query->utilityStmt, NotifyStmt)) { NotifyStmt *stmt = (NotifyStmt *) query->utilityStmt; appendContextKeyword(context, "", 0, PRETTYINDENT_STD, 1); appendStringInfo(buf, "NOTIFY %s", quote_identifier(stmt->conditionname)); if (stmt->payload) { appendStringInfoString(buf, ", "); simple_quote_literal(buf, stmt->payload); } } else if (query->utilityStmt && IsA(query->utilityStmt, TruncateStmt)) { TruncateStmt *stmt = (TruncateStmt *) query->utilityStmt; List *relationList = stmt->relations; ListCell *relationCell = NULL; appendContextKeyword(context, "", 0, PRETTYINDENT_STD, 1); appendStringInfo(buf, "TRUNCATE TABLE"); foreach(relationCell, relationList) { RangeVar *relationVar = (RangeVar *) lfirst(relationCell); Oid relationId = RangeVarGetRelid(relationVar, NoLock, false); char *relationName = generate_relation_or_shard_name(relationId, context->distrelid, context->shardid, NIL); appendStringInfo(buf, " %s", relationName); if (lnext(relationCell) != NULL) { appendStringInfo(buf, ","); } } if (stmt->restart_seqs) { appendStringInfo(buf, " RESTART IDENTITY"); } if (stmt->behavior == DROP_CASCADE) { appendStringInfo(buf, " CASCADE"); } } else { /* Currently only NOTIFY utility commands can appear in rules */ elog(ERROR, "unexpected utility statement type"); } } /* * Display a Var appropriately. * * In some cases (currently only when recursing into an unnamed join) * the Var's varlevelsup has to be interpreted with respect to a context * above the current one; levelsup indicates the offset. * * If istoplevel is TRUE, the Var is at the top level of a SELECT's * targetlist, which means we need special treatment of whole-row Vars. * Instead of the normal "tab.*", we'll print "tab.*::typename", which is a * dirty hack to prevent "tab.*" from being expanded into multiple columns. * (The parser will strip the useless coercion, so no inefficiency is added in * dump and reload.) We used to print just "tab" in such cases, but that is * ambiguous and will yield the wrong result if "tab" is also a plain column * name in the query. * * Returns the attname of the Var, or NULL if the Var has no attname (because * it is a whole-row Var or a subplan output reference). */ static char * get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context) { StringInfo buf = context->buf; RangeTblEntry *rte; AttrNumber attnum; int netlevelsup; deparse_namespace *dpns; deparse_columns *colinfo; char *refname; char *attname; /* Find appropriate nesting depth */ netlevelsup = var->varlevelsup + levelsup; if (netlevelsup >= list_length(context->namespaces)) elog(ERROR, "bogus varlevelsup: %d offset %d", var->varlevelsup, levelsup); dpns = (deparse_namespace *) list_nth(context->namespaces, netlevelsup); /* * Try to find the relevant RTE in this rtable. In a plan tree, it's * likely that varno is OUTER_VAR or INNER_VAR, in which case we must dig * down into the subplans, or INDEX_VAR, which is resolved similarly. Also * find the aliases previously assigned for this RTE. */ if (var->varno >= 1 && var->varno <= list_length(dpns->rtable)) { rte = rt_fetch(var->varno, dpns->rtable); refname = (char *) list_nth(dpns->rtable_names, var->varno - 1); colinfo = deparse_columns_fetch(var->varno, dpns); attnum = var->varattno; } else { resolve_special_varno((Node *) var, context, NULL, get_special_variable); return NULL; } /* * The planner will sometimes emit Vars referencing resjunk elements of a * subquery's target list (this is currently only possible if it chooses * to generate a "physical tlist" for a SubqueryScan or CteScan node). * Although we prefer to print subquery-referencing Vars using the * subquery's alias, that's not possible for resjunk items since they have * no alias. So in that case, drill down to the subplan and print the * contents of the referenced tlist item. This works because in a plan * tree, such Vars can only occur in a SubqueryScan or CteScan node, and * we'll have set dpns->inner_planstate to reference the child plan node. */ if ((rte->rtekind == RTE_SUBQUERY || rte->rtekind == RTE_CTE) && attnum > list_length(rte->eref->colnames) && dpns->inner_planstate) { TargetEntry *tle; deparse_namespace save_dpns; tle = get_tle_by_resno(dpns->inner_tlist, var->varattno); if (!tle) elog(ERROR, "invalid attnum %d for relation \"%s\"", var->varattno, rte->eref->aliasname); Assert(netlevelsup == 0); push_child_plan(dpns, dpns->inner_planstate, &save_dpns); /* * Force parentheses because our caller probably assumed a Var is a * simple expression. */ if (!IsA(tle->expr, Var)) appendStringInfoChar(buf, '('); get_rule_expr((Node *) tle->expr, context, true); if (!IsA(tle->expr, Var)) appendStringInfoChar(buf, ')'); pop_child_plan(dpns, &save_dpns); return NULL; } /* * If it's an unnamed join, look at the expansion of the alias variable. * If it's a simple reference to one of the input vars, then recursively * print the name of that var instead. When it's not a simple reference, * we have to just print the unqualified join column name. (This can only * happen with "dangerous" merged columns in a JOIN USING; we took pains * previously to make the unqualified column name unique in such cases.) * * This wouldn't work in decompiling plan trees, because we don't store * joinaliasvars lists after planning; but a plan tree should never * contain a join alias variable. */ if (rte->rtekind == RTE_JOIN && rte->alias == NULL) { if (rte->joinaliasvars == NIL) elog(ERROR, "cannot decompile join alias var in plan tree"); if (attnum > 0) { Var *aliasvar; aliasvar = (Var *) list_nth(rte->joinaliasvars, attnum - 1); /* we intentionally don't strip implicit coercions here */ if (aliasvar && IsA(aliasvar, Var)) { return get_variable(aliasvar, var->varlevelsup + levelsup, istoplevel, context); } } /* * Unnamed join has no refname. (Note: since it's unnamed, there is * no way the user could have referenced it to create a whole-row Var * for it. So we don't have to cover that case below.) */ Assert(refname == NULL); } if (attnum == InvalidAttrNumber) attname = NULL; else if (attnum > 0) { /* Get column name to use from the colinfo struct */ if (attnum > colinfo->num_cols) elog(ERROR, "invalid attnum %d for relation \"%s\"", attnum, rte->eref->aliasname); attname = colinfo->colnames[attnum - 1]; if (attname == NULL) /* dropped column? */ elog(ERROR, "invalid attnum %d for relation \"%s\"", attnum, rte->eref->aliasname); } else if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) { /* System column on a Citus shard */ attname = get_relid_attribute_name(rte->relid, attnum); } else { /* System column - name is fixed, get it from the catalog */ attname = get_rte_attribute_name(rte, attnum); } if (refname && (context->varprefix || attname == NULL)) { appendStringInfoString(buf, quote_identifier(refname)); appendStringInfoChar(buf, '.'); } if (attname) appendStringInfoString(buf, quote_identifier(attname)); else { appendStringInfoChar(buf, '*'); if (istoplevel) appendStringInfo(buf, "::%s", format_type_with_typemod(var->vartype, var->vartypmod)); } return attname; } /* * Deparse a Var which references OUTER_VAR, INNER_VAR, or INDEX_VAR. This * routine is actually a callback for get_special_varno, which handles finding * the correct TargetEntry. We get the expression contained in that * TargetEntry and just need to deparse it, a job we can throw back on * get_rule_expr. */ static void get_special_variable(Node *node, deparse_context *context, void *private) { StringInfo buf = context->buf; /* * Force parentheses because our caller probably assumed a Var is a simple * expression. */ if (!IsA(node, Var)) appendStringInfoChar(buf, '('); get_rule_expr(node, context, true); if (!IsA(node, Var)) appendStringInfoChar(buf, ')'); } /* * Chase through plan references to special varnos (OUTER_VAR, INNER_VAR, * INDEX_VAR) until we find a real Var or some kind of non-Var node; then, * invoke the callback provided. */ static void resolve_special_varno(Node *node, deparse_context *context, void *private, void (*callback) (Node *, deparse_context *, void *)) { Var *var; deparse_namespace *dpns; /* If it's not a Var, invoke the callback. */ if (!IsA(node, Var)) { callback(node, context, private); return; } /* Find appropriate nesting depth */ var = (Var *) node; dpns = (deparse_namespace *) list_nth(context->namespaces, var->varlevelsup); /* * It's a special RTE, so recurse. */ if (var->varno == OUTER_VAR && dpns->outer_tlist) { TargetEntry *tle; deparse_namespace save_dpns; tle = get_tle_by_resno(dpns->outer_tlist, var->varattno); if (!tle) elog(ERROR, "bogus varattno for OUTER_VAR var: %d", var->varattno); push_child_plan(dpns, dpns->outer_planstate, &save_dpns); resolve_special_varno((Node *) tle->expr, context, private, callback); pop_child_plan(dpns, &save_dpns); return; } else if (var->varno == INNER_VAR && dpns->inner_tlist) { TargetEntry *tle; deparse_namespace save_dpns; tle = get_tle_by_resno(dpns->inner_tlist, var->varattno); if (!tle) elog(ERROR, "bogus varattno for INNER_VAR var: %d", var->varattno); push_child_plan(dpns, dpns->inner_planstate, &save_dpns); resolve_special_varno((Node *) tle->expr, context, private, callback); pop_child_plan(dpns, &save_dpns); return; } else if (var->varno == INDEX_VAR && dpns->index_tlist) { TargetEntry *tle; tle = get_tle_by_resno(dpns->index_tlist, var->varattno); if (!tle) elog(ERROR, "bogus varattno for INDEX_VAR var: %d", var->varattno); resolve_special_varno((Node *) tle->expr, context, private, callback); return; } else if (var->varno < 1 || var->varno > list_length(dpns->rtable)) elog(ERROR, "bogus varno: %d", var->varno); /* Not special. Just invoke the callback. */ callback(node, context, private); } /* * Get the name of a field of an expression of composite type. The * expression is usually a Var, but we handle other cases too. * * levelsup is an extra offset to interpret the Var's varlevelsup correctly. * * This is fairly straightforward when the expression has a named composite * type; we need only look up the type in the catalogs. However, the type * could also be RECORD. Since no actual table or view column is allowed to * have type RECORD, a Var of type RECORD must refer to a JOIN or FUNCTION RTE * or to a subquery output. We drill down to find the ultimate defining * expression and attempt to infer the field name from it. We ereport if we * can't determine the name. * * Similarly, a PARAM of type RECORD has to refer to some expression of * a determinable composite type. */ static const char * get_name_for_var_field(Var *var, int fieldno, int levelsup, deparse_context *context) { RangeTblEntry *rte; AttrNumber attnum; int netlevelsup; deparse_namespace *dpns; TupleDesc tupleDesc; Node *expr; /* * If it's a RowExpr that was expanded from a whole-row Var, use the * column names attached to it. */ if (IsA(var, RowExpr)) { RowExpr *r = (RowExpr *) var; if (fieldno > 0 && fieldno <= list_length(r->colnames)) return strVal(list_nth(r->colnames, fieldno - 1)); } /* * If it's a Param of type RECORD, try to find what the Param refers to. */ if (IsA(var, Param)) { Param *param = (Param *) var; ListCell *ancestor_cell; expr = find_param_referent(param, context, &dpns, &ancestor_cell); if (expr) { /* Found a match, so recurse to decipher the field name */ deparse_namespace save_dpns; const char *result; push_ancestor_plan(dpns, ancestor_cell, &save_dpns); result = get_name_for_var_field((Var *) expr, fieldno, 0, context); pop_ancestor_plan(dpns, &save_dpns); return result; } } /* * If it's a Var of type RECORD, we have to find what the Var refers to; * if not, we can use get_expr_result_type. If that fails, we try * lookup_rowtype_tupdesc, which will probably fail too, but will ereport * an acceptable message. */ if (!IsA(var, Var) || var->vartype != RECORDOID) { if (get_expr_result_type((Node *) var, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE) tupleDesc = lookup_rowtype_tupdesc_copy(exprType((Node *) var), exprTypmod((Node *) var)); Assert(tupleDesc); /* Got the tupdesc, so we can extract the field name */ Assert(fieldno >= 1 && fieldno <= tupleDesc->natts); return NameStr(tupleDesc->attrs[fieldno - 1]->attname); } /* Find appropriate nesting depth */ netlevelsup = var->varlevelsup + levelsup; if (netlevelsup >= list_length(context->namespaces)) elog(ERROR, "bogus varlevelsup: %d offset %d", var->varlevelsup, levelsup); dpns = (deparse_namespace *) list_nth(context->namespaces, netlevelsup); /* * Try to find the relevant RTE in this rtable. In a plan tree, it's * likely that varno is OUTER_VAR or INNER_VAR, in which case we must dig * down into the subplans, or INDEX_VAR, which is resolved similarly. */ if (var->varno >= 1 && var->varno <= list_length(dpns->rtable)) { rte = rt_fetch(var->varno, dpns->rtable); attnum = var->varattno; } else if (var->varno == OUTER_VAR && dpns->outer_tlist) { TargetEntry *tle; deparse_namespace save_dpns; const char *result; tle = get_tle_by_resno(dpns->outer_tlist, var->varattno); if (!tle) elog(ERROR, "bogus varattno for OUTER_VAR var: %d", var->varattno); Assert(netlevelsup == 0); push_child_plan(dpns, dpns->outer_planstate, &save_dpns); result = get_name_for_var_field((Var *) tle->expr, fieldno, levelsup, context); pop_child_plan(dpns, &save_dpns); return result; } else if (var->varno == INNER_VAR && dpns->inner_tlist) { TargetEntry *tle; deparse_namespace save_dpns; const char *result; tle = get_tle_by_resno(dpns->inner_tlist, var->varattno); if (!tle) elog(ERROR, "bogus varattno for INNER_VAR var: %d", var->varattno); Assert(netlevelsup == 0); push_child_plan(dpns, dpns->inner_planstate, &save_dpns); result = get_name_for_var_field((Var *) tle->expr, fieldno, levelsup, context); pop_child_plan(dpns, &save_dpns); return result; } else if (var->varno == INDEX_VAR && dpns->index_tlist) { TargetEntry *tle; const char *result; tle = get_tle_by_resno(dpns->index_tlist, var->varattno); if (!tle) elog(ERROR, "bogus varattno for INDEX_VAR var: %d", var->varattno); Assert(netlevelsup == 0); result = get_name_for_var_field((Var *) tle->expr, fieldno, levelsup, context); return result; } else { elog(ERROR, "bogus varno: %d", var->varno); return NULL; /* keep compiler quiet */ } if (attnum == InvalidAttrNumber) { /* Var is whole-row reference to RTE, so select the right field */ return get_rte_attribute_name(rte, fieldno); } /* * This part has essentially the same logic as the parser's * expandRecordVariable() function, but we are dealing with a different * representation of the input context, and we only need one field name * not a TupleDesc. Also, we need special cases for finding subquery and * CTE subplans when deparsing Plan trees. */ expr = (Node *) var; /* default if we can't drill down */ switch (rte->rtekind) { case RTE_RELATION: case RTE_VALUES: /* * This case should not occur: a column of a table or values list * shouldn't have type RECORD. Fall through and fail (most * likely) at the bottom. */ break; case RTE_SUBQUERY: /* Subselect-in-FROM: examine sub-select's output expr */ { if (rte->subquery) { TargetEntry *ste = get_tle_by_resno(rte->subquery->targetList, attnum); if (ste == NULL || ste->resjunk) elog(ERROR, "subquery %s does not have attribute %d", rte->eref->aliasname, attnum); expr = (Node *) ste->expr; if (IsA(expr, Var)) { /* * Recurse into the sub-select to see what its Var * refers to. We have to build an additional level of * namespace to keep in step with varlevelsup in the * subselect. */ deparse_namespace mydpns; const char *result; set_deparse_for_query(&mydpns, rte->subquery, context->namespaces); context->namespaces = lcons(&mydpns, context->namespaces); result = get_name_for_var_field((Var *) expr, fieldno, 0, context); context->namespaces = list_delete_first(context->namespaces); return result; } /* else fall through to inspect the expression */ } else { /* * We're deparsing a Plan tree so we don't have complete * RTE entries (in particular, rte->subquery is NULL). But * the only place we'd see a Var directly referencing a * SUBQUERY RTE is in a SubqueryScan plan node, and we can * look into the child plan's tlist instead. */ TargetEntry *tle; deparse_namespace save_dpns; const char *result; if (!dpns->inner_planstate) elog(ERROR, "failed to find plan for subquery %s", rte->eref->aliasname); tle = get_tle_by_resno(dpns->inner_tlist, attnum); if (!tle) elog(ERROR, "bogus varattno for subquery var: %d", attnum); Assert(netlevelsup == 0); push_child_plan(dpns, dpns->inner_planstate, &save_dpns); result = get_name_for_var_field((Var *) tle->expr, fieldno, levelsup, context); pop_child_plan(dpns, &save_dpns); return result; } } break; case RTE_JOIN: /* Join RTE --- recursively inspect the alias variable */ if (rte->joinaliasvars == NIL) elog(ERROR, "cannot decompile join alias var in plan tree"); Assert(attnum > 0 && attnum <= list_length(rte->joinaliasvars)); expr = (Node *) list_nth(rte->joinaliasvars, attnum - 1); Assert(expr != NULL); /* we intentionally don't strip implicit coercions here */ if (IsA(expr, Var)) return get_name_for_var_field((Var *) expr, fieldno, var->varlevelsup + levelsup, context); /* else fall through to inspect the expression */ break; case RTE_FUNCTION: /* * We couldn't get here unless a function is declared with one of * its result columns as RECORD, which is not allowed. */ break; case RTE_CTE: /* CTE reference: examine subquery's output expr */ { CommonTableExpr *cte = NULL; Index ctelevelsup; ListCell *lc; /* * Try to find the referenced CTE using the namespace stack. */ ctelevelsup = rte->ctelevelsup + netlevelsup; if (ctelevelsup >= list_length(context->namespaces)) lc = NULL; else { deparse_namespace *ctedpns; ctedpns = (deparse_namespace *) list_nth(context->namespaces, ctelevelsup); foreach(lc, ctedpns->ctes) { cte = (CommonTableExpr *) lfirst(lc); if (strcmp(cte->ctename, rte->ctename) == 0) break; } } if (lc != NULL) { Query *ctequery = (Query *) cte->ctequery; TargetEntry *ste = get_tle_by_resno(GetCTETargetList(cte), attnum); if (ste == NULL || ste->resjunk) elog(ERROR, "subquery %s does not have attribute %d", rte->eref->aliasname, attnum); expr = (Node *) ste->expr; if (IsA(expr, Var)) { /* * Recurse into the CTE to see what its Var refers to. * We have to build an additional level of namespace * to keep in step with varlevelsup in the CTE. * Furthermore it could be an outer CTE, so we may * have to delete some levels of namespace. */ List *save_nslist = context->namespaces; List *new_nslist; deparse_namespace mydpns; const char *result; set_deparse_for_query(&mydpns, ctequery, context->namespaces); new_nslist = list_copy_tail(context->namespaces, ctelevelsup); context->namespaces = lcons(&mydpns, new_nslist); result = get_name_for_var_field((Var *) expr, fieldno, 0, context); context->namespaces = save_nslist; return result; } /* else fall through to inspect the expression */ } else { /* * We're deparsing a Plan tree so we don't have a CTE * list. But the only place we'd see a Var directly * referencing a CTE RTE is in a CteScan plan node, and we * can look into the subplan's tlist instead. */ TargetEntry *tle; deparse_namespace save_dpns; const char *result; if (!dpns->inner_planstate) elog(ERROR, "failed to find plan for CTE %s", rte->eref->aliasname); tle = get_tle_by_resno(dpns->inner_tlist, attnum); if (!tle) elog(ERROR, "bogus varattno for subquery var: %d", attnum); Assert(netlevelsup == 0); push_child_plan(dpns, dpns->inner_planstate, &save_dpns); result = get_name_for_var_field((Var *) tle->expr, fieldno, levelsup, context); pop_child_plan(dpns, &save_dpns); return result; } } break; } /* * We now have an expression we can't expand any more, so see if * get_expr_result_type() can do anything with it. If not, pass to * lookup_rowtype_tupdesc() which will probably fail, but will give an * appropriate error message while failing. */ if (get_expr_result_type(expr, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE) tupleDesc = lookup_rowtype_tupdesc_copy(exprType(expr), exprTypmod(expr)); Assert(tupleDesc); /* Got the tupdesc, so we can extract the field name */ Assert(fieldno >= 1 && fieldno <= tupleDesc->natts); return NameStr(tupleDesc->attrs[fieldno - 1]->attname); } /* * Try to find the referenced expression for a PARAM_EXEC Param that might * reference a parameter supplied by an upper NestLoop or SubPlan plan node. * * If successful, return the expression and set *dpns_p and *ancestor_cell_p * appropriately for calling push_ancestor_plan(). If no referent can be * found, return NULL. */ static Node * find_param_referent(Param *param, deparse_context *context, deparse_namespace **dpns_p, ListCell **ancestor_cell_p) { /* Initialize output parameters to prevent compiler warnings */ *dpns_p = NULL; *ancestor_cell_p = NULL; /* * If it's a PARAM_EXEC parameter, look for a matching NestLoopParam or * SubPlan argument. This will necessarily be in some ancestor of the * current expression's PlanState. */ if (param->paramkind == PARAM_EXEC) { deparse_namespace *dpns; PlanState *child_ps; bool in_same_plan_level; ListCell *lc; dpns = (deparse_namespace *) linitial(context->namespaces); child_ps = dpns->planstate; in_same_plan_level = true; foreach(lc, dpns->ancestors) { PlanState *ps = (PlanState *) lfirst(lc); ListCell *lc2; /* * NestLoops transmit params to their inner child only; also, once * we've crawled up out of a subplan, this couldn't possibly be * the right match. */ if (IsA(ps, NestLoopState) && child_ps == innerPlanState(ps) && in_same_plan_level) { NestLoop *nl = (NestLoop *) ps->plan; foreach(lc2, nl->nestParams) { NestLoopParam *nlp = (NestLoopParam *) lfirst(lc2); if (nlp->paramno == param->paramid) { /* Found a match, so return it */ *dpns_p = dpns; *ancestor_cell_p = lc; return (Node *) nlp->paramval; } } } /* * Check to see if we're crawling up from a subplan. */ foreach(lc2, ps->subPlan) { SubPlanState *sstate = (SubPlanState *) lfirst(lc2); SubPlan *subplan = (SubPlan *) sstate->xprstate.expr; ListCell *lc3; ListCell *lc4; if (child_ps != sstate->planstate) continue; /* Matched subplan, so check its arguments */ forboth(lc3, subplan->parParam, lc4, subplan->args) { int paramid = lfirst_int(lc3); Node *arg = (Node *) lfirst(lc4); if (paramid == param->paramid) { /* Found a match, so return it */ *dpns_p = dpns; *ancestor_cell_p = lc; return arg; } } /* Keep looking, but we are emerging from a subplan. */ in_same_plan_level = false; break; } /* * Likewise check to see if we're emerging from an initplan. * Initplans never have any parParams, so no need to search that * list, but we need to know if we should reset * in_same_plan_level. */ foreach(lc2, ps->initPlan) { SubPlanState *sstate = (SubPlanState *) lfirst(lc2); if (child_ps != sstate->planstate) continue; /* No parameters to be had here. */ Assert(((SubPlan *) sstate->xprstate.expr)->parParam == NIL); /* Keep looking, but we are emerging from an initplan. */ in_same_plan_level = false; break; } /* No luck, crawl up to next ancestor */ child_ps = ps; } } /* No referent found */ return NULL; } /* * Display a Param appropriately. */ static void get_parameter(Param *param, deparse_context *context) { Node *expr; deparse_namespace *dpns; ListCell *ancestor_cell; /* * If it's a PARAM_EXEC parameter, try to locate the expression from which * the parameter was computed. Note that failing to find a referent isn't * an error, since the Param might well be a subplan output rather than an * input. */ expr = find_param_referent(param, context, &dpns, &ancestor_cell); if (expr) { /* Found a match, so print it */ deparse_namespace save_dpns; bool save_varprefix; bool need_paren; /* Switch attention to the ancestor plan node */ push_ancestor_plan(dpns, ancestor_cell, &save_dpns); /* * Force prefixing of Vars, since they won't belong to the relation * being scanned in the original plan node. */ save_varprefix = context->varprefix; context->varprefix = true; /* * A Param's expansion is typically a Var, Aggref, or upper-level * Param, which wouldn't need extra parentheses. Otherwise, insert * parens to ensure the expression looks atomic. */ need_paren = !(IsA(expr, Var) || IsA(expr, Aggref) || IsA(expr, Param)); if (need_paren) appendStringInfoChar(context->buf, '('); get_rule_expr(expr, context, false); if (need_paren) appendStringInfoChar(context->buf, ')'); context->varprefix = save_varprefix; pop_ancestor_plan(dpns, &save_dpns); return; } /* * Not PARAM_EXEC, or couldn't find referent: just print $N. */ appendStringInfo(context->buf, "$%d", param->paramid); } /* * get_simple_binary_op_name * * helper function for isSimpleNode * will return single char binary operator name, or NULL if it's not */ static const char * get_simple_binary_op_name(OpExpr *expr) { List *args = expr->args; if (list_length(args) == 2) { /* binary operator */ Node *arg1 = (Node *) linitial(args); Node *arg2 = (Node *) lsecond(args); const char *op; op = generate_operator_name(expr->opno, exprType(arg1), exprType(arg2)); if (strlen(op) == 1) return op; } return NULL; } /* * isSimpleNode - check if given node is simple (doesn't need parenthesizing) * * true : simple in the context of parent node's type * false : not simple */ static bool isSimpleNode(Node *node, Node *parentNode, int prettyFlags) { if (!node) return false; switch (nodeTag(node)) { case T_Var: case T_Const: case T_Param: case T_CoerceToDomainValue: case T_SetToDefault: case T_CurrentOfExpr: /* single words: always simple */ return true; case T_ArrayRef: case T_ArrayExpr: case T_RowExpr: case T_CoalesceExpr: case T_MinMaxExpr: case T_XmlExpr: case T_NullIfExpr: case T_Aggref: case T_WindowFunc: case T_FuncExpr: /* function-like: name(..) or name[..] */ return true; /* CASE keywords act as parentheses */ case T_CaseExpr: return true; case T_FieldSelect: /* * appears simple since . has top precedence, unless parent is * T_FieldSelect itself! */ return (IsA(parentNode, FieldSelect) ? false : true); case T_FieldStore: /* * treat like FieldSelect (probably doesn't matter) */ return (IsA(parentNode, FieldStore) ? false : true); case T_CoerceToDomain: /* maybe simple, check args */ return isSimpleNode((Node *) ((CoerceToDomain *) node)->arg, node, prettyFlags); case T_RelabelType: return isSimpleNode((Node *) ((RelabelType *) node)->arg, node, prettyFlags); case T_CoerceViaIO: return isSimpleNode((Node *) ((CoerceViaIO *) node)->arg, node, prettyFlags); case T_ArrayCoerceExpr: return isSimpleNode((Node *) ((ArrayCoerceExpr *) node)->arg, node, prettyFlags); case T_ConvertRowtypeExpr: return isSimpleNode((Node *) ((ConvertRowtypeExpr *) node)->arg, node, prettyFlags); case T_OpExpr: { /* depends on parent node type; needs further checking */ if (prettyFlags & PRETTYFLAG_PAREN && IsA(parentNode, OpExpr)) { const char *op; const char *parentOp; bool is_lopriop; bool is_hipriop; bool is_lopriparent; bool is_hipriparent; op = get_simple_binary_op_name((OpExpr *) node); if (!op) return false; /* We know only the basic operators + - and * / % */ is_lopriop = (strchr("+-", *op) != NULL); is_hipriop = (strchr("*/%", *op) != NULL); if (!(is_lopriop || is_hipriop)) return false; parentOp = get_simple_binary_op_name((OpExpr *) parentNode); if (!parentOp) return false; is_lopriparent = (strchr("+-", *parentOp) != NULL); is_hipriparent = (strchr("*/%", *parentOp) != NULL); if (!(is_lopriparent || is_hipriparent)) return false; if (is_hipriop && is_lopriparent) return true; /* op binds tighter than parent */ if (is_lopriop && is_hipriparent) return false; /* * Operators are same priority --- can skip parens only if * we have (a - b) - c, not a - (b - c). */ if (node == (Node *) linitial(((OpExpr *) parentNode)->args)) return true; return false; } /* else do the same stuff as for T_SubLink et al. */ } /* fallthrough */ case T_SubLink: case T_NullTest: case T_BooleanTest: case T_DistinctExpr: switch (nodeTag(parentNode)) { case T_FuncExpr: { /* special handling for casts */ CoercionForm type = ((FuncExpr *) parentNode)->funcformat; if (type == COERCE_EXPLICIT_CAST || type == COERCE_IMPLICIT_CAST) return false; return true; /* own parentheses */ } case T_BoolExpr: /* lower precedence */ case T_ArrayRef: /* other separators */ case T_ArrayExpr: /* other separators */ case T_RowExpr: /* other separators */ case T_CoalesceExpr: /* own parentheses */ case T_MinMaxExpr: /* own parentheses */ case T_XmlExpr: /* own parentheses */ case T_NullIfExpr: /* other separators */ case T_Aggref: /* own parentheses */ case T_WindowFunc: /* own parentheses */ case T_CaseExpr: /* other separators */ return true; default: return false; } case T_BoolExpr: switch (nodeTag(parentNode)) { case T_BoolExpr: if (prettyFlags & PRETTYFLAG_PAREN) { BoolExprType type; BoolExprType parentType; type = ((BoolExpr *) node)->boolop; parentType = ((BoolExpr *) parentNode)->boolop; switch (type) { case NOT_EXPR: case AND_EXPR: if (parentType == AND_EXPR || parentType == OR_EXPR) return true; break; case OR_EXPR: if (parentType == OR_EXPR) return true; break; } } return false; case T_FuncExpr: { /* special handling for casts */ CoercionForm type = ((FuncExpr *) parentNode)->funcformat; if (type == COERCE_EXPLICIT_CAST || type == COERCE_IMPLICIT_CAST) return false; return true; /* own parentheses */ } case T_ArrayRef: /* other separators */ case T_ArrayExpr: /* other separators */ case T_RowExpr: /* other separators */ case T_CoalesceExpr: /* own parentheses */ case T_MinMaxExpr: /* own parentheses */ case T_XmlExpr: /* own parentheses */ case T_NullIfExpr: /* other separators */ case T_Aggref: /* own parentheses */ case T_WindowFunc: /* own parentheses */ case T_CaseExpr: /* other separators */ return true; default: return false; } default: break; } /* those we don't know: in dubio complexo */ return false; } /* * appendContextKeyword - append a keyword to buffer * * If prettyPrint is enabled, perform a line break, and adjust indentation. * Otherwise, just append the keyword. */ static void appendContextKeyword(deparse_context *context, const char *str, int indentBefore, int indentAfter, int indentPlus) { StringInfo buf = context->buf; if (PRETTY_INDENT(context)) { int indentAmount; context->indentLevel += indentBefore; /* remove any trailing spaces currently in the buffer ... */ removeStringInfoSpaces(buf); /* ... then add a newline and some spaces */ appendStringInfoChar(buf, '\n'); if (context->indentLevel < PRETTYINDENT_LIMIT) indentAmount = Max(context->indentLevel, 0) + indentPlus; else { /* * If we're indented more than PRETTYINDENT_LIMIT characters, try * to conserve horizontal space by reducing the per-level * indentation. For best results the scale factor here should * divide all the indent amounts that get added to indentLevel * (PRETTYINDENT_STD, etc). It's important that the indentation * not grow unboundedly, else deeply-nested trees use O(N^2) * whitespace; so we also wrap modulo PRETTYINDENT_LIMIT. */ indentAmount = PRETTYINDENT_LIMIT + (context->indentLevel - PRETTYINDENT_LIMIT) / (PRETTYINDENT_STD / 2); indentAmount %= PRETTYINDENT_LIMIT; /* scale/wrap logic affects indentLevel, but not indentPlus */ indentAmount += indentPlus; } appendStringInfoSpaces(buf, indentAmount); appendStringInfoString(buf, str); context->indentLevel += indentAfter; if (context->indentLevel < 0) context->indentLevel = 0; } else appendStringInfoString(buf, str); } /* * removeStringInfoSpaces - delete trailing spaces from a buffer. * * Possibly this should move to stringinfo.c at some point. */ static void removeStringInfoSpaces(StringInfo str) { while (str->len > 0 && str->data[str->len - 1] == ' ') str->data[--(str->len)] = '\0'; } /* * get_rule_expr_paren - deparse expr using get_rule_expr, * embracing the string with parentheses if necessary for prettyPrint. * * Never embrace if prettyFlags=0, because it's done in the calling node. * * Any node that does *not* embrace its argument node by sql syntax (with * parentheses, non-operator keywords like CASE/WHEN/ON, or comma etc) should * use get_rule_expr_paren instead of get_rule_expr so parentheses can be * added. */ static void get_rule_expr_paren(Node *node, deparse_context *context, bool showimplicit, Node *parentNode) { bool need_paren; need_paren = PRETTY_PAREN(context) && !isSimpleNode(node, parentNode, context->prettyFlags); if (need_paren) appendStringInfoChar(context->buf, '('); get_rule_expr(node, context, showimplicit); if (need_paren) appendStringInfoChar(context->buf, ')'); } /* ---------- * get_rule_expr - Parse back an expression * * Note: showimplicit determines whether we display any implicit cast that * is present at the top of the expression tree. It is a passed argument, * not a field of the context struct, because we change the value as we * recurse down into the expression. In general we suppress implicit casts * when the result type is known with certainty (eg, the arguments of an * OR must be boolean). We display implicit casts for arguments of functions * and operators, since this is needed to be certain that the same function * or operator will be chosen when the expression is re-parsed. * ---------- */ static void get_rule_expr(Node *node, deparse_context *context, bool showimplicit) { StringInfo buf = context->buf; if (node == NULL) return; /* Guard against excessively long or deeply-nested queries */ CHECK_FOR_INTERRUPTS(); check_stack_depth(); /* * Each level of get_rule_expr must emit an indivisible term * (parenthesized if necessary) to ensure result is reparsed into the same * expression tree. The only exception is that when the input is a List, * we emit the component items comma-separated with no surrounding * decoration; this is convenient for most callers. */ switch (nodeTag(node)) { case T_Var: (void) get_variable((Var *) node, 0, false, context); break; case T_Const: get_const_expr((Const *) node, context, 0); break; case T_Param: get_parameter((Param *) node, context); break; case T_Aggref: get_agg_expr((Aggref *) node, context, (Aggref *) node); break; case T_GroupingFunc: { GroupingFunc *gexpr = (GroupingFunc *) node; appendStringInfoString(buf, "GROUPING("); get_rule_expr((Node *) gexpr->args, context, true); appendStringInfoChar(buf, ')'); } break; case T_WindowFunc: get_windowfunc_expr((WindowFunc *) node, context); break; case T_ArrayRef: { ArrayRef *aref = (ArrayRef *) node; bool need_parens; /* * If the argument is a CaseTestExpr, we must be inside a * FieldStore, ie, we are assigning to an element of an array * within a composite column. Since we already punted on * displaying the FieldStore's target information, just punt * here too, and display only the assignment source * expression. */ if (IsA(aref->refexpr, CaseTestExpr)) { Assert(aref->refassgnexpr); get_rule_expr((Node *) aref->refassgnexpr, context, showimplicit); break; } /* * Parenthesize the argument unless it's a simple Var or a * FieldSelect. (In particular, if it's another ArrayRef, we * *must* parenthesize to avoid confusion.) */ need_parens = !IsA(aref->refexpr, Var) && !IsA(aref->refexpr, FieldSelect); if (need_parens) appendStringInfoChar(buf, '('); get_rule_expr((Node *) aref->refexpr, context, showimplicit); if (need_parens) appendStringInfoChar(buf, ')'); /* * If there's a refassgnexpr, we want to print the node in the * format "array[subscripts] := refassgnexpr". This is not * legal SQL, so decompilation of INSERT or UPDATE statements * should always use processIndirection as part of the * statement-level syntax. We should only see this when * EXPLAIN tries to print the targetlist of a plan resulting * from such a statement. */ if (aref->refassgnexpr) { Node *refassgnexpr; /* * Use processIndirection to print this node's subscripts * as well as any additional field selections or * subscripting in immediate descendants. It returns the * RHS expr that is actually being "assigned". */ refassgnexpr = processIndirection(node, context); appendStringInfoString(buf, " := "); get_rule_expr(refassgnexpr, context, showimplicit); } else { /* Just an ordinary array fetch, so print subscripts */ printSubscripts(aref, context); } } break; case T_FuncExpr: get_func_expr((FuncExpr *) node, context, showimplicit); break; case T_NamedArgExpr: { NamedArgExpr *na = (NamedArgExpr *) node; appendStringInfo(buf, "%s => ", quote_identifier(na->name)); get_rule_expr((Node *) na->arg, context, showimplicit); } break; case T_OpExpr: get_oper_expr((OpExpr *) node, context); break; case T_DistinctExpr: { DistinctExpr *expr = (DistinctExpr *) node; List *args = expr->args; Node *arg1 = (Node *) linitial(args); Node *arg2 = (Node *) lsecond(args); if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); get_rule_expr_paren(arg1, context, true, node); appendStringInfoString(buf, " IS DISTINCT FROM "); get_rule_expr_paren(arg2, context, true, node); if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); } break; case T_NullIfExpr: { NullIfExpr *nullifexpr = (NullIfExpr *) node; appendStringInfoString(buf, "NULLIF("); get_rule_expr((Node *) nullifexpr->args, context, true); appendStringInfoChar(buf, ')'); } break; case T_ScalarArrayOpExpr: { ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node; List *args = expr->args; Node *arg1 = (Node *) linitial(args); Node *arg2 = (Node *) lsecond(args); if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); get_rule_expr_paren(arg1, context, true, node); appendStringInfo(buf, " %s %s (", generate_operator_name(expr->opno, exprType(arg1), get_base_element_type(exprType(arg2))), expr->useOr ? "ANY" : "ALL"); get_rule_expr_paren(arg2, context, true, node); /* * There's inherent ambiguity in "x op ANY/ALL (y)" when y is * a bare sub-SELECT. Since we're here, the sub-SELECT must * be meant as a scalar sub-SELECT yielding an array value to * be used in ScalarArrayOpExpr; but the grammar will * preferentially interpret such a construct as an ANY/ALL * SubLink. To prevent misparsing the output that way, insert * a dummy coercion (which will be stripped by parse analysis, * so no inefficiency is added in dump and reload). This is * indeed most likely what the user wrote to get the construct * accepted in the first place. */ if (IsA(arg2, SubLink) && ((SubLink *) arg2)->subLinkType == EXPR_SUBLINK) appendStringInfo(buf, "::%s", format_type_with_typemod(exprType(arg2), exprTypmod(arg2))); appendStringInfoChar(buf, ')'); if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); } break; case T_BoolExpr: { BoolExpr *expr = (BoolExpr *) node; Node *first_arg = linitial(expr->args); ListCell *arg = lnext(list_head(expr->args)); switch (expr->boolop) { case AND_EXPR: if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); get_rule_expr_paren(first_arg, context, false, node); while (arg) { appendStringInfoString(buf, " AND "); get_rule_expr_paren((Node *) lfirst(arg), context, false, node); arg = lnext(arg); } if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); break; case OR_EXPR: if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); get_rule_expr_paren(first_arg, context, false, node); while (arg) { appendStringInfoString(buf, " OR "); get_rule_expr_paren((Node *) lfirst(arg), context, false, node); arg = lnext(arg); } if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); break; case NOT_EXPR: if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); appendStringInfoString(buf, "NOT "); get_rule_expr_paren(first_arg, context, false, node); if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); break; default: elog(ERROR, "unrecognized boolop: %d", (int) expr->boolop); } } break; case T_SubLink: get_sublink_expr((SubLink *) node, context); break; case T_SubPlan: { SubPlan *subplan = (SubPlan *) node; /* * We cannot see an already-planned subplan in rule deparsing, * only while EXPLAINing a query plan. We don't try to * reconstruct the original SQL, just reference the subplan * that appears elsewhere in EXPLAIN's result. */ if (subplan->useHashTable) appendStringInfo(buf, "(hashed %s)", subplan->plan_name); else appendStringInfo(buf, "(%s)", subplan->plan_name); } break; case T_AlternativeSubPlan: { AlternativeSubPlan *asplan = (AlternativeSubPlan *) node; ListCell *lc; /* As above, this can only happen during EXPLAIN */ appendStringInfoString(buf, "(alternatives: "); foreach(lc, asplan->subplans) { SubPlan *splan = (SubPlan *) lfirst(lc); Assert(IsA(splan, SubPlan)); if (splan->useHashTable) appendStringInfo(buf, "hashed %s", splan->plan_name); else appendStringInfoString(buf, splan->plan_name); if (lnext(lc)) appendStringInfoString(buf, " or "); } appendStringInfoChar(buf, ')'); } break; case T_FieldSelect: { FieldSelect *fselect = (FieldSelect *) node; Node *arg = (Node *) fselect->arg; int fno = fselect->fieldnum; const char *fieldname; bool need_parens; /* * Parenthesize the argument unless it's an ArrayRef or * another FieldSelect. Note in particular that it would be * WRONG to not parenthesize a Var argument; simplicity is not * the issue here, having the right number of names is. */ need_parens = !IsA(arg, ArrayRef) &&!IsA(arg, FieldSelect); if (need_parens) appendStringInfoChar(buf, '('); get_rule_expr(arg, context, true); if (need_parens) appendStringInfoChar(buf, ')'); /* * Get and print the field name. */ fieldname = get_name_for_var_field((Var *) arg, fno, 0, context); appendStringInfo(buf, ".%s", quote_identifier(fieldname)); } break; case T_FieldStore: { FieldStore *fstore = (FieldStore *) node; bool need_parens; /* * There is no good way to represent a FieldStore as real SQL, * so decompilation of INSERT or UPDATE statements should * always use processIndirection as part of the * statement-level syntax. We should only get here when * EXPLAIN tries to print the targetlist of a plan resulting * from such a statement. The plan case is even harder than * ordinary rules would be, because the planner tries to * collapse multiple assignments to the same field or subfield * into one FieldStore; so we can see a list of target fields * not just one, and the arguments could be FieldStores * themselves. We don't bother to try to print the target * field names; we just print the source arguments, with a * ROW() around them if there's more than one. This isn't * terribly complete, but it's probably good enough for * EXPLAIN's purposes; especially since anything more would be * either hopelessly confusing or an even poorer * representation of what the plan is actually doing. */ need_parens = (list_length(fstore->newvals) != 1); if (need_parens) appendStringInfoString(buf, "ROW("); get_rule_expr((Node *) fstore->newvals, context, showimplicit); if (need_parens) appendStringInfoChar(buf, ')'); } break; case T_RelabelType: { RelabelType *relabel = (RelabelType *) node; Node *arg = (Node *) relabel->arg; if (relabel->relabelformat == COERCE_IMPLICIT_CAST && !showimplicit) { /* don't show the implicit cast */ get_rule_expr_paren(arg, context, false, node); } else { get_coercion_expr(arg, context, relabel->resulttype, relabel->resulttypmod, node); } } break; case T_CoerceViaIO: { CoerceViaIO *iocoerce = (CoerceViaIO *) node; Node *arg = (Node *) iocoerce->arg; if (iocoerce->coerceformat == COERCE_IMPLICIT_CAST && !showimplicit) { /* don't show the implicit cast */ get_rule_expr_paren(arg, context, false, node); } else { get_coercion_expr(arg, context, iocoerce->resulttype, -1, node); } } break; case T_ArrayCoerceExpr: { ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node; Node *arg = (Node *) acoerce->arg; if (acoerce->coerceformat == COERCE_IMPLICIT_CAST && !showimplicit) { /* don't show the implicit cast */ get_rule_expr_paren(arg, context, false, node); } else { get_coercion_expr(arg, context, acoerce->resulttype, acoerce->resulttypmod, node); } } break; case T_ConvertRowtypeExpr: { ConvertRowtypeExpr *convert = (ConvertRowtypeExpr *) node; Node *arg = (Node *) convert->arg; if (convert->convertformat == COERCE_IMPLICIT_CAST && !showimplicit) { /* don't show the implicit cast */ get_rule_expr_paren(arg, context, false, node); } else { get_coercion_expr(arg, context, convert->resulttype, -1, node); } } break; case T_CollateExpr: { CollateExpr *collate = (CollateExpr *) node; Node *arg = (Node *) collate->arg; if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); get_rule_expr_paren(arg, context, showimplicit, node); appendStringInfo(buf, " COLLATE %s", generate_collation_name(collate->collOid)); if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); } break; case T_CaseExpr: { CaseExpr *caseexpr = (CaseExpr *) node; ListCell *temp; appendContextKeyword(context, "CASE", 0, PRETTYINDENT_VAR, 0); if (caseexpr->arg) { appendStringInfoChar(buf, ' '); get_rule_expr((Node *) caseexpr->arg, context, true); } foreach(temp, caseexpr->args) { CaseWhen *when = (CaseWhen *) lfirst(temp); Node *w = (Node *) when->expr; if (caseexpr->arg) { /* * The parser should have produced WHEN clauses of the * form "CaseTestExpr = RHS", possibly with an * implicit coercion inserted above the CaseTestExpr. * For accurate decompilation of rules it's essential * that we show just the RHS. However in an * expression that's been through the optimizer, the * WHEN clause could be almost anything (since the * equality operator could have been expanded into an * inline function). If we don't recognize the form * of the WHEN clause, just punt and display it as-is. */ if (IsA(w, OpExpr)) { List *args = ((OpExpr *) w)->args; if (list_length(args) == 2 && IsA(strip_implicit_coercions(linitial(args)), CaseTestExpr)) w = (Node *) lsecond(args); } } if (!PRETTY_INDENT(context)) appendStringInfoChar(buf, ' '); appendContextKeyword(context, "WHEN ", 0, 0, 0); get_rule_expr(w, context, false); appendStringInfoString(buf, " THEN "); get_rule_expr((Node *) when->result, context, true); } if (!PRETTY_INDENT(context)) appendStringInfoChar(buf, ' '); appendContextKeyword(context, "ELSE ", 0, 0, 0); get_rule_expr((Node *) caseexpr->defresult, context, true); if (!PRETTY_INDENT(context)) appendStringInfoChar(buf, ' '); appendContextKeyword(context, "END", -PRETTYINDENT_VAR, 0, 0); } break; case T_CaseTestExpr: { /* * Normally we should never get here, since for expressions * that can contain this node type we attempt to avoid * recursing to it. But in an optimized expression we might * be unable to avoid that (see comments for CaseExpr). If we * do see one, print it as CASE_TEST_EXPR. */ appendStringInfoString(buf, "CASE_TEST_EXPR"); } break; case T_ArrayExpr: { ArrayExpr *arrayexpr = (ArrayExpr *) node; appendStringInfoString(buf, "ARRAY["); get_rule_expr((Node *) arrayexpr->elements, context, true); appendStringInfoChar(buf, ']'); /* * If the array isn't empty, we assume its elements are * coerced to the desired type. If it's empty, though, we * need an explicit coercion to the array type. */ if (arrayexpr->elements == NIL) appendStringInfo(buf, "::%s", format_type_with_typemod(arrayexpr->array_typeid, -1)); } break; case T_RowExpr: { RowExpr *rowexpr = (RowExpr *) node; TupleDesc tupdesc = NULL; ListCell *arg; int i; char *sep; /* * If it's a named type and not RECORD, we may have to skip * dropped columns and/or claim there are NULLs for added * columns. */ if (rowexpr->row_typeid != RECORDOID) { tupdesc = lookup_rowtype_tupdesc(rowexpr->row_typeid, -1); Assert(list_length(rowexpr->args) <= tupdesc->natts); } /* * SQL99 allows "ROW" to be omitted when there is more than * one column, but for simplicity we always print it. */ appendStringInfoString(buf, "ROW("); sep = ""; i = 0; foreach(arg, rowexpr->args) { Node *e = (Node *) lfirst(arg); if (tupdesc == NULL || !tupdesc->attrs[i]->attisdropped) { appendStringInfoString(buf, sep); /* Whole-row Vars need special treatment here */ get_rule_expr_toplevel(e, context, true); sep = ", "; } i++; } if (tupdesc != NULL) { while (i < tupdesc->natts) { if (!tupdesc->attrs[i]->attisdropped) { appendStringInfoString(buf, sep); appendStringInfoString(buf, "NULL"); sep = ", "; } i++; } ReleaseTupleDesc(tupdesc); } appendStringInfoChar(buf, ')'); if (rowexpr->row_format == COERCE_EXPLICIT_CAST) appendStringInfo(buf, "::%s", format_type_with_typemod(rowexpr->row_typeid, -1)); } break; case T_RowCompareExpr: { RowCompareExpr *rcexpr = (RowCompareExpr *) node; ListCell *arg; char *sep; /* * SQL99 allows "ROW" to be omitted when there is more than * one column, but for simplicity we always print it. */ appendStringInfoString(buf, "(ROW("); sep = ""; foreach(arg, rcexpr->largs) { Node *e = (Node *) lfirst(arg); appendStringInfoString(buf, sep); get_rule_expr(e, context, true); sep = ", "; } /* * We assume that the name of the first-column operator will * do for all the rest too. This is definitely open to * failure, eg if some but not all operators were renamed * since the construct was parsed, but there seems no way to * be perfect. */ appendStringInfo(buf, ") %s ROW(", generate_operator_name(linitial_oid(rcexpr->opnos), exprType(linitial(rcexpr->largs)), exprType(linitial(rcexpr->rargs)))); sep = ""; foreach(arg, rcexpr->rargs) { Node *e = (Node *) lfirst(arg); appendStringInfoString(buf, sep); get_rule_expr(e, context, true); sep = ", "; } appendStringInfoString(buf, "))"); } break; case T_CoalesceExpr: { CoalesceExpr *coalesceexpr = (CoalesceExpr *) node; appendStringInfoString(buf, "COALESCE("); get_rule_expr((Node *) coalesceexpr->args, context, true); appendStringInfoChar(buf, ')'); } break; case T_MinMaxExpr: { MinMaxExpr *minmaxexpr = (MinMaxExpr *) node; switch (minmaxexpr->op) { case IS_GREATEST: appendStringInfoString(buf, "GREATEST("); break; case IS_LEAST: appendStringInfoString(buf, "LEAST("); break; } get_rule_expr((Node *) minmaxexpr->args, context, true); appendStringInfoChar(buf, ')'); } break; case T_XmlExpr: { XmlExpr *xexpr = (XmlExpr *) node; bool needcomma = false; ListCell *arg; ListCell *narg; Const *con; switch (xexpr->op) { case IS_XMLCONCAT: appendStringInfoString(buf, "XMLCONCAT("); break; case IS_XMLELEMENT: appendStringInfoString(buf, "XMLELEMENT("); break; case IS_XMLFOREST: appendStringInfoString(buf, "XMLFOREST("); break; case IS_XMLPARSE: appendStringInfoString(buf, "XMLPARSE("); break; case IS_XMLPI: appendStringInfoString(buf, "XMLPI("); break; case IS_XMLROOT: appendStringInfoString(buf, "XMLROOT("); break; case IS_XMLSERIALIZE: appendStringInfoString(buf, "XMLSERIALIZE("); break; case IS_DOCUMENT: break; } if (xexpr->op == IS_XMLPARSE || xexpr->op == IS_XMLSERIALIZE) { if (xexpr->xmloption == XMLOPTION_DOCUMENT) appendStringInfoString(buf, "DOCUMENT "); else appendStringInfoString(buf, "CONTENT "); } if (xexpr->name) { appendStringInfo(buf, "NAME %s", quote_identifier(map_xml_name_to_sql_identifier(xexpr->name))); needcomma = true; } if (xexpr->named_args) { if (xexpr->op != IS_XMLFOREST) { if (needcomma) appendStringInfoString(buf, ", "); appendStringInfoString(buf, "XMLATTRIBUTES("); needcomma = false; } forboth(arg, xexpr->named_args, narg, xexpr->arg_names) { Node *e = (Node *) lfirst(arg); char *argname = strVal(lfirst(narg)); if (needcomma) appendStringInfoString(buf, ", "); get_rule_expr((Node *) e, context, true); appendStringInfo(buf, " AS %s", quote_identifier(map_xml_name_to_sql_identifier(argname))); needcomma = true; } if (xexpr->op != IS_XMLFOREST) appendStringInfoChar(buf, ')'); } if (xexpr->args) { if (needcomma) appendStringInfoString(buf, ", "); switch (xexpr->op) { case IS_XMLCONCAT: case IS_XMLELEMENT: case IS_XMLFOREST: case IS_XMLPI: case IS_XMLSERIALIZE: /* no extra decoration needed */ get_rule_expr((Node *) xexpr->args, context, true); break; case IS_XMLPARSE: Assert(list_length(xexpr->args) == 2); get_rule_expr((Node *) linitial(xexpr->args), context, true); con = (Const *) lsecond(xexpr->args); Assert(IsA(con, Const)); Assert(!con->constisnull); if (DatumGetBool(con->constvalue)) appendStringInfoString(buf, " PRESERVE WHITESPACE"); else appendStringInfoString(buf, " STRIP WHITESPACE"); break; case IS_XMLROOT: Assert(list_length(xexpr->args) == 3); get_rule_expr((Node *) linitial(xexpr->args), context, true); appendStringInfoString(buf, ", VERSION "); con = (Const *) lsecond(xexpr->args); if (IsA(con, Const) && con->constisnull) appendStringInfoString(buf, "NO VALUE"); else get_rule_expr((Node *) con, context, false); con = (Const *) lthird(xexpr->args); Assert(IsA(con, Const)); if (con->constisnull) /* suppress STANDALONE NO VALUE */ ; else { switch (DatumGetInt32(con->constvalue)) { case XML_STANDALONE_YES: appendStringInfoString(buf, ", STANDALONE YES"); break; case XML_STANDALONE_NO: appendStringInfoString(buf, ", STANDALONE NO"); break; case XML_STANDALONE_NO_VALUE: appendStringInfoString(buf, ", STANDALONE NO VALUE"); break; default: break; } } break; case IS_DOCUMENT: get_rule_expr_paren((Node *) xexpr->args, context, false, node); break; } } if (xexpr->op == IS_XMLSERIALIZE) appendStringInfo(buf, " AS %s", format_type_with_typemod(xexpr->type, xexpr->typmod)); if (xexpr->op == IS_DOCUMENT) appendStringInfoString(buf, " IS DOCUMENT"); else appendStringInfoChar(buf, ')'); } break; case T_NullTest: { NullTest *ntest = (NullTest *) node; if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); get_rule_expr_paren((Node *) ntest->arg, context, true, node); /* * For scalar inputs, we prefer to print as IS [NOT] NULL, * which is shorter and traditional. If it's a rowtype input * but we're applying a scalar test, must print IS [NOT] * DISTINCT FROM NULL to be semantically correct. */ if (ntest->argisrow || !type_is_rowtype(exprType((Node *) ntest->arg))) { switch (ntest->nulltesttype) { case IS_NULL: appendStringInfoString(buf, " IS NULL"); break; case IS_NOT_NULL: appendStringInfoString(buf, " IS NOT NULL"); break; default: elog(ERROR, "unrecognized nulltesttype: %d", (int) ntest->nulltesttype); } } else { switch (ntest->nulltesttype) { case IS_NULL: appendStringInfoString(buf, " IS NOT DISTINCT FROM NULL"); break; case IS_NOT_NULL: appendStringInfoString(buf, " IS DISTINCT FROM NULL"); break; default: elog(ERROR, "unrecognized nulltesttype: %d", (int) ntest->nulltesttype); } } if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); } break; case T_BooleanTest: { BooleanTest *btest = (BooleanTest *) node; if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); get_rule_expr_paren((Node *) btest->arg, context, false, node); switch (btest->booltesttype) { case IS_TRUE: appendStringInfoString(buf, " IS TRUE"); break; case IS_NOT_TRUE: appendStringInfoString(buf, " IS NOT TRUE"); break; case IS_FALSE: appendStringInfoString(buf, " IS FALSE"); break; case IS_NOT_FALSE: appendStringInfoString(buf, " IS NOT FALSE"); break; case IS_UNKNOWN: appendStringInfoString(buf, " IS UNKNOWN"); break; case IS_NOT_UNKNOWN: appendStringInfoString(buf, " IS NOT UNKNOWN"); break; default: elog(ERROR, "unrecognized booltesttype: %d", (int) btest->booltesttype); } if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); } break; case T_CoerceToDomain: { CoerceToDomain *ctest = (CoerceToDomain *) node; Node *arg = (Node *) ctest->arg; if (ctest->coercionformat == COERCE_IMPLICIT_CAST && !showimplicit) { /* don't show the implicit cast */ get_rule_expr(arg, context, false); } else { get_coercion_expr(arg, context, ctest->resulttype, ctest->resulttypmod, node); } } break; case T_CoerceToDomainValue: appendStringInfoString(buf, "VALUE"); break; case T_SetToDefault: appendStringInfoString(buf, "DEFAULT"); break; case T_CurrentOfExpr: { CurrentOfExpr *cexpr = (CurrentOfExpr *) node; if (cexpr->cursor_name) appendStringInfo(buf, "CURRENT OF %s", quote_identifier(cexpr->cursor_name)); else appendStringInfo(buf, "CURRENT OF $%d", cexpr->cursor_param); } break; case T_InferenceElem: { InferenceElem *iexpr = (InferenceElem *) node; bool save_varprefix; bool need_parens; /* * InferenceElem can only refer to target relation, so a * prefix is not useful, and indeed would cause parse errors. */ save_varprefix = context->varprefix; context->varprefix = false; /* * Parenthesize the element unless it's a simple Var or a bare * function call. Follows pg_get_indexdef_worker(). */ need_parens = !IsA(iexpr->expr, Var); if (IsA(iexpr->expr, FuncExpr) && ((FuncExpr *) iexpr->expr)->funcformat == COERCE_EXPLICIT_CALL) need_parens = false; if (need_parens) appendStringInfoChar(buf, '('); get_rule_expr((Node *) iexpr->expr, context, false); if (need_parens) appendStringInfoChar(buf, ')'); context->varprefix = save_varprefix; if (iexpr->infercollid) appendStringInfo(buf, " COLLATE %s", generate_collation_name(iexpr->infercollid)); /* Add the operator class name, if not default */ if (iexpr->inferopclass) { Oid inferopclass = iexpr->inferopclass; Oid inferopcinputtype = get_opclass_input_type(iexpr->inferopclass); get_opclass_name(inferopclass, inferopcinputtype, buf); } } break; case T_List: { char *sep; ListCell *l; sep = ""; foreach(l, (List *) node) { appendStringInfoString(buf, sep); get_rule_expr((Node *) lfirst(l), context, showimplicit); sep = ", "; } } break; default: elog(ERROR, "unrecognized node type: %d", (int) nodeTag(node)); break; } } /* * get_rule_expr_toplevel - Parse back a toplevel expression * * Same as get_rule_expr(), except that if the expr is just a Var, we pass * istoplevel = true not false to get_variable(). This causes whole-row Vars * to get printed with decoration that will prevent expansion of "*". * We need to use this in contexts such as ROW() and VALUES(), where the * parser would expand "foo.*" appearing at top level. (In principle we'd * use this in get_target_list() too, but that has additional worries about * whether to print AS, so it needs to invoke get_variable() directly anyway.) */ static void get_rule_expr_toplevel(Node *node, deparse_context *context, bool showimplicit) { if (node && IsA(node, Var)) (void) get_variable((Var *) node, 0, true, context); else get_rule_expr(node, context, showimplicit); } /* * get_rule_expr_funccall - Parse back a function-call expression * * Same as get_rule_expr(), except that we guarantee that the output will * look like a function call, or like one of the things the grammar treats as * equivalent to a function call (see the func_expr_windowless production). * This is needed in places where the grammar uses func_expr_windowless and * you can't substitute a parenthesized a_expr. If what we have isn't going * to look like a function call, wrap it in a dummy CAST() expression, which * will satisfy the grammar --- and, indeed, is likely what the user wrote to * produce such a thing. */ static void get_rule_expr_funccall(Node *node, deparse_context *context, bool showimplicit) { if (looks_like_function(node)) get_rule_expr(node, context, showimplicit); else { StringInfo buf = context->buf; appendStringInfoString(buf, "CAST("); /* no point in showing any top-level implicit cast */ get_rule_expr(node, context, false); appendStringInfo(buf, " AS %s)", format_type_with_typemod(exprType(node), exprTypmod(node))); } } /* * Helper function to identify node types that satisfy func_expr_windowless. * If in doubt, "false" is always a safe answer. */ static bool looks_like_function(Node *node) { if (node == NULL) return false; /* probably shouldn't happen */ switch (nodeTag(node)) { case T_FuncExpr: /* OK, unless it's going to deparse as a cast */ return (((FuncExpr *) node)->funcformat == COERCE_EXPLICIT_CALL); case T_NullIfExpr: case T_CoalesceExpr: case T_MinMaxExpr: case T_XmlExpr: /* these are all accepted by func_expr_common_subexpr */ return true; default: break; } return false; } /* * get_oper_expr - Parse back an OpExpr node */ static void get_oper_expr(OpExpr *expr, deparse_context *context) { StringInfo buf = context->buf; Oid opno = expr->opno; List *args = expr->args; if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); if (list_length(args) == 2) { /* binary operator */ Node *arg1 = (Node *) linitial(args); Node *arg2 = (Node *) lsecond(args); get_rule_expr_paren(arg1, context, true, (Node *) expr); appendStringInfo(buf, " %s ", generate_operator_name(opno, exprType(arg1), exprType(arg2))); get_rule_expr_paren(arg2, context, true, (Node *) expr); } else { /* unary operator --- but which side? */ Node *arg = (Node *) linitial(args); HeapTuple tp; Form_pg_operator optup; tp = SearchSysCache1(OPEROID, ObjectIdGetDatum(opno)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for operator %u", opno); optup = (Form_pg_operator) GETSTRUCT(tp); switch (optup->oprkind) { case 'l': appendStringInfo(buf, "%s ", generate_operator_name(opno, InvalidOid, exprType(arg))); get_rule_expr_paren(arg, context, true, (Node *) expr); break; case 'r': get_rule_expr_paren(arg, context, true, (Node *) expr); appendStringInfo(buf, " %s", generate_operator_name(opno, exprType(arg), InvalidOid)); break; default: elog(ERROR, "bogus oprkind: %d", optup->oprkind); } ReleaseSysCache(tp); } if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); } /* * get_func_expr - Parse back a FuncExpr node */ static void get_func_expr(FuncExpr *expr, deparse_context *context, bool showimplicit) { StringInfo buf = context->buf; Oid funcoid = expr->funcid; Oid argtypes[FUNC_MAX_ARGS]; int nargs; List *argnames; bool use_variadic; ListCell *l; /* * If the function call came from an implicit coercion, then just show the * first argument --- unless caller wants to see implicit coercions. */ if (expr->funcformat == COERCE_IMPLICIT_CAST && !showimplicit) { get_rule_expr_paren((Node *) linitial(expr->args), context, false, (Node *) expr); return; } /* * If the function call came from a cast, then show the first argument * plus an explicit cast operation. */ if (expr->funcformat == COERCE_EXPLICIT_CAST || expr->funcformat == COERCE_IMPLICIT_CAST) { Node *arg = linitial(expr->args); Oid rettype = expr->funcresulttype; int32 coercedTypmod; /* Get the typmod if this is a length-coercion function */ (void) exprIsLengthCoercion((Node *) expr, &coercedTypmod); get_coercion_expr(arg, context, rettype, coercedTypmod, (Node *) expr); return; } /* * Normal function: display as proname(args). First we need to extract * the argument datatypes. */ if (list_length(expr->args) > FUNC_MAX_ARGS) ereport(ERROR, (errcode(ERRCODE_TOO_MANY_ARGUMENTS), errmsg("too many arguments"))); nargs = 0; argnames = NIL; foreach(l, expr->args) { Node *arg = (Node *) lfirst(l); if (IsA(arg, NamedArgExpr)) argnames = lappend(argnames, ((NamedArgExpr *) arg)->name); argtypes[nargs] = exprType(arg); nargs++; } appendStringInfo(buf, "%s(", generate_function_name(funcoid, nargs, argnames, argtypes, expr->funcvariadic, &use_variadic, context->special_exprkind)); nargs = 0; foreach(l, expr->args) { if (nargs++ > 0) appendStringInfoString(buf, ", "); if (use_variadic && lnext(l) == NULL) appendStringInfoString(buf, "VARIADIC "); get_rule_expr((Node *) lfirst(l), context, true); } appendStringInfoChar(buf, ')'); } /* * get_agg_expr - Parse back an Aggref node */ static void get_agg_expr(Aggref *aggref, deparse_context *context, Aggref *original_aggref) { StringInfo buf = context->buf; Oid argtypes[FUNC_MAX_ARGS]; int nargs; bool use_variadic; /* * For a combining aggregate, we look up and deparse the corresponding * partial aggregate instead. This is necessary because our input * argument list has been replaced; the new argument list always has just * one element, which will point to a partial Aggref that supplies us with * transition states to combine. */ if (DO_AGGSPLIT_COMBINE(aggref->aggsplit)) { TargetEntry *tle = linitial(aggref->args); Assert(list_length(aggref->args) == 1); Assert(IsA(tle, TargetEntry)); resolve_special_varno((Node *) tle->expr, context, original_aggref, get_agg_combine_expr); return; } /* * Mark as PARTIAL, if appropriate. We look to the original aggref so as * to avoid printing this when recursing from the code just above. */ if (DO_AGGSPLIT_SKIPFINAL(original_aggref->aggsplit)) appendStringInfoString(buf, "PARTIAL "); /* Extract the argument types as seen by the parser */ nargs = get_aggregate_argtypes(aggref, argtypes); /* Print the aggregate name, schema-qualified if needed */ appendStringInfo(buf, "%s(%s", generate_function_name(aggref->aggfnoid, nargs, NIL, argtypes, aggref->aggvariadic, &use_variadic, context->special_exprkind), (aggref->aggdistinct != NIL) ? "DISTINCT " : ""); if (AGGKIND_IS_ORDERED_SET(aggref->aggkind)) { /* * Ordered-set aggregates do not use "*" syntax. Also, we needn't * worry about inserting VARIADIC. So we can just dump the direct * args as-is. */ Assert(!aggref->aggvariadic); get_rule_expr((Node *) aggref->aggdirectargs, context, true); Assert(aggref->aggorder != NIL); appendStringInfoString(buf, ") WITHIN GROUP (ORDER BY "); get_rule_orderby(aggref->aggorder, aggref->args, false, context); } else { /* aggstar can be set only in zero-argument aggregates */ if (aggref->aggstar) appendStringInfoChar(buf, '*'); else { ListCell *l; int i; i = 0; foreach(l, aggref->args) { TargetEntry *tle = (TargetEntry *) lfirst(l); Node *arg = (Node *) tle->expr; Assert(!IsA(arg, NamedArgExpr)); if (tle->resjunk) continue; if (i++ > 0) appendStringInfoString(buf, ", "); if (use_variadic && i == nargs) appendStringInfoString(buf, "VARIADIC "); get_rule_expr(arg, context, true); } } if (aggref->aggorder != NIL) { appendStringInfoString(buf, " ORDER BY "); get_rule_orderby(aggref->aggorder, aggref->args, false, context); } } if (aggref->aggfilter != NULL) { appendStringInfoString(buf, ") FILTER (WHERE "); get_rule_expr((Node *) aggref->aggfilter, context, false); } appendStringInfoChar(buf, ')'); } /* * This is a helper function for get_agg_expr(). It's used when we deparse * a combining Aggref; resolve_special_varno locates the corresponding partial * Aggref and then calls this. */ static void get_agg_combine_expr(Node *node, deparse_context *context, void *private) { Aggref *aggref; Aggref *original_aggref = private; if (!IsA(node, Aggref)) elog(ERROR, "combining Aggref does not point to an Aggref"); aggref = (Aggref *) node; get_agg_expr(aggref, context, original_aggref); } /* * get_windowfunc_expr - Parse back a WindowFunc node */ static void get_windowfunc_expr(WindowFunc *wfunc, deparse_context *context) { StringInfo buf = context->buf; Oid argtypes[FUNC_MAX_ARGS]; int nargs; List *argnames; ListCell *l; if (list_length(wfunc->args) > FUNC_MAX_ARGS) ereport(ERROR, (errcode(ERRCODE_TOO_MANY_ARGUMENTS), errmsg("too many arguments"))); nargs = 0; argnames = NIL; foreach(l, wfunc->args) { Node *arg = (Node *) lfirst(l); if (IsA(arg, NamedArgExpr)) argnames = lappend(argnames, ((NamedArgExpr *) arg)->name); argtypes[nargs] = exprType(arg); nargs++; } appendStringInfo(buf, "%s(", generate_function_name(wfunc->winfnoid, nargs, argnames, argtypes, false, NULL, context->special_exprkind)); /* winstar can be set only in zero-argument aggregates */ if (wfunc->winstar) appendStringInfoChar(buf, '*'); else get_rule_expr((Node *) wfunc->args, context, true); if (wfunc->aggfilter != NULL) { appendStringInfoString(buf, ") FILTER (WHERE "); get_rule_expr((Node *) wfunc->aggfilter, context, false); } appendStringInfoString(buf, ") OVER "); foreach(l, context->windowClause) { WindowClause *wc = (WindowClause *) lfirst(l); if (wc->winref == wfunc->winref) { if (wc->name) appendStringInfoString(buf, quote_identifier(wc->name)); else get_rule_windowspec(wc, context->windowTList, context); break; } } if (l == NULL) { if (context->windowClause) elog(ERROR, "could not find window clause for winref %u", wfunc->winref); /* * In EXPLAIN, we don't have window context information available, so * we have to settle for this: */ appendStringInfoString(buf, "(?)"); } } /* ---------- * get_coercion_expr * * Make a string representation of a value coerced to a specific type * ---------- */ static void get_coercion_expr(Node *arg, deparse_context *context, Oid resulttype, int32 resulttypmod, Node *parentNode) { StringInfo buf = context->buf; /* * Since parse_coerce.c doesn't immediately collapse application of * length-coercion functions to constants, what we'll typically see in * such cases is a Const with typmod -1 and a length-coercion function * right above it. Avoid generating redundant output. However, beware of * suppressing casts when the user actually wrote something like * 'foo'::text::char(3). * * Note: it might seem that we are missing the possibility of needing to * print a COLLATE clause for such a Const. However, a Const could only * have nondefault collation in a post-constant-folding tree, in which the * length coercion would have been folded too. See also the special * handling of CollateExpr in coerce_to_target_type(): any collation * marking will be above the coercion node, not below it. */ if (arg && IsA(arg, Const) && ((Const *) arg)->consttype == resulttype && ((Const *) arg)->consttypmod == -1) { /* Show the constant without normal ::typename decoration */ get_const_expr((Const *) arg, context, -1); } else { if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); get_rule_expr_paren(arg, context, false, parentNode); if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); } appendStringInfo(buf, "::%s", format_type_with_typemod(resulttype, resulttypmod)); } /* ---------- * get_const_expr * * Make a string representation of a Const * * showtype can be -1 to never show "::typename" decoration, or +1 to always * show it, or 0 to show it only if the constant wouldn't be assumed to be * the right type by default. * * If the Const's collation isn't default for its type, show that too. * We mustn't do this when showtype is -1 (since that means the caller will * print "::typename", and we can't put a COLLATE clause in between). It's * caller's responsibility that collation isn't missed in such cases. * ---------- */ static void get_const_expr(Const *constval, deparse_context *context, int showtype) { StringInfo buf = context->buf; Oid typoutput; bool typIsVarlena; char *extval; bool needlabel = false; if (constval->constisnull) { /* * Always label the type of a NULL constant to prevent misdecisions * about type when reparsing. */ appendStringInfoString(buf, "NULL"); if (showtype >= 0) { appendStringInfo(buf, "::%s", format_type_with_typemod(constval->consttype, constval->consttypmod)); get_const_collation(constval, context); } return; } getTypeOutputInfo(constval->consttype, &typoutput, &typIsVarlena); extval = OidOutputFunctionCall(typoutput, constval->constvalue); switch (constval->consttype) { case INT4OID: /* * INT4 can be printed without any decoration, unless it is * negative; in that case print it as '-nnn'::integer to ensure * that the output will re-parse as a constant, not as a constant * plus operator. In most cases we could get away with printing * (-nnn) instead, because of the way that gram.y handles negative * literals; but that doesn't work for INT_MIN, and it doesn't * seem that much prettier anyway. */ if (extval[0] != '-') appendStringInfoString(buf, extval); else { appendStringInfo(buf, "'%s'", extval); needlabel = true; /* we must attach a cast */ } break; case NUMERICOID: /* * NUMERIC can be printed without quotes if it looks like a float * constant (not an integer, and not Infinity or NaN) and doesn't * have a leading sign (for the same reason as for INT4). */ if (isdigit((unsigned char) extval[0]) && strcspn(extval, "eE.") != strlen(extval)) { appendStringInfoString(buf, extval); } else { appendStringInfo(buf, "'%s'", extval); needlabel = true; /* we must attach a cast */ } break; case BITOID: case VARBITOID: appendStringInfo(buf, "B'%s'", extval); break; case BOOLOID: if (strcmp(extval, "t") == 0) appendStringInfoString(buf, "true"); else appendStringInfoString(buf, "false"); break; default: simple_quote_literal(buf, extval); break; } pfree(extval); if (showtype < 0) return; /* * For showtype == 0, append ::typename unless the constant will be * implicitly typed as the right type when it is read in. * * XXX this code has to be kept in sync with the behavior of the parser, * especially make_const. */ switch (constval->consttype) { case BOOLOID: case UNKNOWNOID: /* These types can be left unlabeled */ needlabel = false; break; case INT4OID: /* We determined above whether a label is needed */ break; case NUMERICOID: /* * Float-looking constants will be typed as numeric, which we * checked above; but if there's a nondefault typmod we need to * show it. */ needlabel |= (constval->consttypmod >= 0); break; default: needlabel = true; break; } if (needlabel || showtype > 0) appendStringInfo(buf, "::%s", format_type_with_typemod(constval->consttype, constval->consttypmod)); get_const_collation(constval, context); } /* * helper for get_const_expr: append COLLATE if needed */ static void get_const_collation(Const *constval, deparse_context *context) { StringInfo buf = context->buf; if (OidIsValid(constval->constcollid)) { Oid typcollation = get_typcollation(constval->consttype); if (constval->constcollid != typcollation) { appendStringInfo(buf, " COLLATE %s", generate_collation_name(constval->constcollid)); } } } /* * simple_quote_literal - Format a string as a SQL literal, append to buf */ static void simple_quote_literal(StringInfo buf, const char *val) { const char *valptr; /* * We form the string literal according to the prevailing setting of * standard_conforming_strings; we never use E''. User is responsible for * making sure result is used correctly. */ appendStringInfoChar(buf, '\''); for (valptr = val; *valptr; valptr++) { char ch = *valptr; if (SQL_STR_DOUBLE(ch, !standard_conforming_strings)) appendStringInfoChar(buf, ch); appendStringInfoChar(buf, ch); } appendStringInfoChar(buf, '\''); } /* ---------- * get_sublink_expr - Parse back a sublink * ---------- */ static void get_sublink_expr(SubLink *sublink, deparse_context *context) { StringInfo buf = context->buf; Query *query = (Query *) (sublink->subselect); char *opname = NULL; bool need_paren; if (sublink->subLinkType == ARRAY_SUBLINK) appendStringInfoString(buf, "ARRAY("); else appendStringInfoChar(buf, '('); /* * Note that we print the name of only the first operator, when there are * multiple combining operators. This is an approximation that could go * wrong in various scenarios (operators in different schemas, renamed * operators, etc) but there is not a whole lot we can do about it, since * the syntax allows only one operator to be shown. */ if (sublink->testexpr) { if (IsA(sublink->testexpr, OpExpr)) { /* single combining operator */ OpExpr *opexpr = (OpExpr *) sublink->testexpr; get_rule_expr(linitial(opexpr->args), context, true); opname = generate_operator_name(opexpr->opno, exprType(linitial(opexpr->args)), exprType(lsecond(opexpr->args))); } else if (IsA(sublink->testexpr, BoolExpr)) { /* multiple combining operators, = or <> cases */ char *sep; ListCell *l; appendStringInfoChar(buf, '('); sep = ""; foreach(l, ((BoolExpr *) sublink->testexpr)->args) { OpExpr *opexpr = (OpExpr *) lfirst(l); Assert(IsA(opexpr, OpExpr)); appendStringInfoString(buf, sep); get_rule_expr(linitial(opexpr->args), context, true); if (!opname) opname = generate_operator_name(opexpr->opno, exprType(linitial(opexpr->args)), exprType(lsecond(opexpr->args))); sep = ", "; } appendStringInfoChar(buf, ')'); } else if (IsA(sublink->testexpr, RowCompareExpr)) { /* multiple combining operators, < <= > >= cases */ RowCompareExpr *rcexpr = (RowCompareExpr *) sublink->testexpr; appendStringInfoChar(buf, '('); get_rule_expr((Node *) rcexpr->largs, context, true); opname = generate_operator_name(linitial_oid(rcexpr->opnos), exprType(linitial(rcexpr->largs)), exprType(linitial(rcexpr->rargs))); appendStringInfoChar(buf, ')'); } else elog(ERROR, "unrecognized testexpr type: %d", (int) nodeTag(sublink->testexpr)); } need_paren = true; switch (sublink->subLinkType) { case EXISTS_SUBLINK: appendStringInfoString(buf, "EXISTS "); break; case ANY_SUBLINK: if (strcmp(opname, "=") == 0) /* Represent = ANY as IN */ appendStringInfoString(buf, " IN "); else appendStringInfo(buf, " %s ANY ", opname); break; case ALL_SUBLINK: appendStringInfo(buf, " %s ALL ", opname); break; case ROWCOMPARE_SUBLINK: appendStringInfo(buf, " %s ", opname); break; case EXPR_SUBLINK: case MULTIEXPR_SUBLINK: case ARRAY_SUBLINK: need_paren = false; break; case CTE_SUBLINK: /* shouldn't occur in a SubLink */ default: elog(ERROR, "unrecognized sublink type: %d", (int) sublink->subLinkType); break; } if (need_paren) appendStringInfoChar(buf, '('); get_query_def(query, buf, context->namespaces, NULL, context->prettyFlags, context->wrapColumn, context->indentLevel); if (need_paren) appendStringInfoString(buf, "))"); else appendStringInfoChar(buf, ')'); } /* ---------- * get_from_clause - Parse back a FROM clause * * "prefix" is the keyword that denotes the start of the list of FROM * elements. It is FROM when used to parse back SELECT and UPDATE, but * is USING when parsing back DELETE. * ---------- */ static void get_from_clause(Query *query, const char *prefix, deparse_context *context) { StringInfo buf = context->buf; bool first = true; ListCell *l; /* * We use the query's jointree as a guide to what to print. However, we * must ignore auto-added RTEs that are marked not inFromCl. (These can * only appear at the top level of the jointree, so it's sufficient to * check here.) This check also ensures we ignore the rule pseudo-RTEs * for NEW and OLD. */ foreach(l, query->jointree->fromlist) { Node *jtnode = (Node *) lfirst(l); if (IsA(jtnode, RangeTblRef)) { int varno = ((RangeTblRef *) jtnode)->rtindex; RangeTblEntry *rte = rt_fetch(varno, query->rtable); if (!rte->inFromCl) continue; } if (first) { appendContextKeyword(context, prefix, -PRETTYINDENT_STD, PRETTYINDENT_STD, 2); first = false; get_from_clause_item(jtnode, query, context); } else { StringInfoData itembuf; appendStringInfoString(buf, ", "); /* * Put the new FROM item's text into itembuf so we can decide * after we've got it whether or not it needs to go on a new line. */ initStringInfo(&itembuf); context->buf = &itembuf; get_from_clause_item(jtnode, query, context); /* Restore context's output buffer */ context->buf = buf; /* Consider line-wrapping if enabled */ if (PRETTY_INDENT(context) && context->wrapColumn >= 0) { /* Does the new item start with a new line? */ if (itembuf.len > 0 && itembuf.data[0] == '\n') { /* If so, we shouldn't add anything */ /* instead, remove any trailing spaces currently in buf */ removeStringInfoSpaces(buf); } else { char *trailing_nl; /* Locate the start of the current line in the buffer */ trailing_nl = strrchr(buf->data, '\n'); if (trailing_nl == NULL) trailing_nl = buf->data; else trailing_nl++; /* * Add a newline, plus some indentation, if the new item * would cause an overflow. */ if (strlen(trailing_nl) + itembuf.len > context->wrapColumn) appendContextKeyword(context, "", -PRETTYINDENT_STD, PRETTYINDENT_STD, PRETTYINDENT_VAR); } } /* Add the new item */ appendStringInfoString(buf, itembuf.data); /* clean up */ pfree(itembuf.data); } } } static void get_from_clause_item(Node *jtnode, Query *query, deparse_context *context) { StringInfo buf = context->buf; deparse_namespace *dpns = (deparse_namespace *) linitial(context->namespaces); if (IsA(jtnode, RangeTblRef)) { int varno = ((RangeTblRef *) jtnode)->rtindex; RangeTblEntry *rte = rt_fetch(varno, query->rtable); char *refname = get_rtable_name(varno, context); deparse_columns *colinfo = deparse_columns_fetch(varno, dpns); RangeTblFunction *rtfunc1 = NULL; bool printalias; if (rte->lateral) appendStringInfoString(buf, "LATERAL "); /* Print the FROM item proper */ switch (rte->rtekind) { case RTE_RELATION: /* Normal relation RTE */ appendStringInfo(buf, "%s%s", only_marker(rte), generate_relation_or_shard_name(rte->relid, context->distrelid, context->shardid, context->namespaces)); break; case RTE_SUBQUERY: /* Subquery RTE */ appendStringInfoChar(buf, '('); get_query_def(rte->subquery, buf, context->namespaces, NULL, context->prettyFlags, context->wrapColumn, context->indentLevel); appendStringInfoChar(buf, ')'); break; case RTE_FUNCTION: /* if it's a shard, do differently */ if (GetRangeTblKind(rte) == CITUS_RTE_SHARD) { char *fragmentSchemaName = NULL; char *fragmentTableName = NULL; ExtractRangeTblExtraData(rte, NULL, &fragmentSchemaName, &fragmentTableName, NULL); /* use schema and table name from the remote alias */ appendStringInfoString(buf, generate_fragment_name(fragmentSchemaName, fragmentTableName)); break; } /* Function RTE */ rtfunc1 = (RangeTblFunction *) linitial(rte->functions); /* * Omit ROWS FROM() syntax for just one function, unless it * has both a coldeflist and WITH ORDINALITY. If it has both, * we must use ROWS FROM() syntax to avoid ambiguity about * whether the coldeflist includes the ordinality column. */ if (list_length(rte->functions) == 1 && (rtfunc1->funccolnames == NIL || !rte->funcordinality)) { get_rule_expr_funccall(rtfunc1->funcexpr, context, true); /* we'll print the coldeflist below, if it has one */ } else { bool all_unnest; ListCell *lc; /* * If all the function calls in the list are to unnest, * and none need a coldeflist, then collapse the list back * down to UNNEST(args). (If we had more than one * built-in unnest function, this would get more * difficult.) * * XXX This is pretty ugly, since it makes not-terribly- * future-proof assumptions about what the parser would do * with the output; but the alternative is to emit our * nonstandard ROWS FROM() notation for what might have * been a perfectly spec-compliant multi-argument * UNNEST(). */ all_unnest = true; foreach(lc, rte->functions) { RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); if (!IsA(rtfunc->funcexpr, FuncExpr) || ((FuncExpr *) rtfunc->funcexpr)->funcid != F_ARRAY_UNNEST || rtfunc->funccolnames != NIL) { all_unnest = false; break; } } if (all_unnest) { List *allargs = NIL; foreach(lc, rte->functions) { RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); List *args = ((FuncExpr *) rtfunc->funcexpr)->args; allargs = list_concat(allargs, list_copy(args)); } appendStringInfoString(buf, "UNNEST("); get_rule_expr((Node *) allargs, context, true); appendStringInfoChar(buf, ')'); } else { int funcno = 0; appendStringInfoString(buf, "ROWS FROM("); foreach(lc, rte->functions) { RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); if (funcno > 0) appendStringInfoString(buf, ", "); get_rule_expr_funccall(rtfunc->funcexpr, context, true); if (rtfunc->funccolnames != NIL) { /* Reconstruct the column definition list */ appendStringInfoString(buf, " AS "); get_from_clause_coldeflist(rtfunc, NULL, context); } funcno++; } appendStringInfoChar(buf, ')'); } /* prevent printing duplicate coldeflist below */ rtfunc1 = NULL; } if (rte->funcordinality) appendStringInfoString(buf, " WITH ORDINALITY"); break; case RTE_VALUES: /* Values list RTE */ appendStringInfoChar(buf, '('); get_values_def(rte->values_lists, context); appendStringInfoChar(buf, ')'); break; case RTE_CTE: appendStringInfoString(buf, quote_identifier(rte->ctename)); break; default: elog(ERROR, "unrecognized RTE kind: %d", (int) rte->rtekind); break; } /* Print the relation alias, if needed */ printalias = false; if (rte->alias != NULL) { /* Always print alias if user provided one */ printalias = true; } else if (colinfo->printaliases) { /* Always print alias if we need to print column aliases */ printalias = true; } else if (rte->rtekind == RTE_RELATION) { /* * No need to print alias if it's same as relation name (this * would normally be the case, but not if set_rtable_names had to * resolve a conflict). */ if (strcmp(refname, get_relation_name(rte->relid)) != 0) printalias = true; } else if (rte->rtekind == RTE_FUNCTION) { /* * For a function RTE, always print alias. This covers possible * renaming of the function and/or instability of the * FigureColname rules for things that aren't simple functions. * Note we'd need to force it anyway for the columndef list case. */ printalias = true; } else if (rte->rtekind == RTE_VALUES) { /* Alias is syntactically required for VALUES */ printalias = true; } else if (rte->rtekind == RTE_CTE) { /* * No need to print alias if it's same as CTE name (this would * normally be the case, but not if set_rtable_names had to * resolve a conflict). */ if (strcmp(refname, rte->ctename) != 0) printalias = true; } else if (rte->rtekind == RTE_SUBQUERY) { /* subquery requires alias too */ printalias = true; } if (printalias) appendStringInfo(buf, " %s", quote_identifier(refname)); /* Print the column definitions or aliases, if needed */ if (rtfunc1 && rtfunc1->funccolnames != NIL) { /* Reconstruct the columndef list, which is also the aliases */ get_from_clause_coldeflist(rtfunc1, colinfo, context); } else if (GetRangeTblKind(rte) != CITUS_RTE_SHARD) { /* Else print column aliases as needed */ get_column_alias_list(colinfo, context); } /* Tablesample clause must go after any alias */ if (rte->rtekind == RTE_RELATION && rte->tablesample) get_tablesample_def(rte->tablesample, context); } else if (IsA(jtnode, JoinExpr)) { JoinExpr *j = (JoinExpr *) jtnode; deparse_columns *colinfo = deparse_columns_fetch(j->rtindex, dpns); bool need_paren_on_right; need_paren_on_right = PRETTY_PAREN(context) && !IsA(j->rarg, RangeTblRef) && !(IsA(j->rarg, JoinExpr) &&((JoinExpr *) j->rarg)->alias != NULL); if (!PRETTY_PAREN(context) || j->alias != NULL) appendStringInfoChar(buf, '('); get_from_clause_item(j->larg, query, context); switch (j->jointype) { case JOIN_INNER: if (j->quals) appendContextKeyword(context, " JOIN ", -PRETTYINDENT_STD, PRETTYINDENT_STD, PRETTYINDENT_JOIN); else appendContextKeyword(context, " CROSS JOIN ", -PRETTYINDENT_STD, PRETTYINDENT_STD, PRETTYINDENT_JOIN); break; case JOIN_LEFT: appendContextKeyword(context, " LEFT JOIN ", -PRETTYINDENT_STD, PRETTYINDENT_STD, PRETTYINDENT_JOIN); break; case JOIN_FULL: appendContextKeyword(context, " FULL JOIN ", -PRETTYINDENT_STD, PRETTYINDENT_STD, PRETTYINDENT_JOIN); break; case JOIN_RIGHT: appendContextKeyword(context, " RIGHT JOIN ", -PRETTYINDENT_STD, PRETTYINDENT_STD, PRETTYINDENT_JOIN); break; default: elog(ERROR, "unrecognized join type: %d", (int) j->jointype); } if (need_paren_on_right) appendStringInfoChar(buf, '('); get_from_clause_item(j->rarg, query, context); if (need_paren_on_right) appendStringInfoChar(buf, ')'); if (j->usingClause) { ListCell *lc; bool first = true; appendStringInfoString(buf, " USING ("); /* Use the assigned names, not what's in usingClause */ foreach(lc, colinfo->usingNames) { char *colname = (char *) lfirst(lc); if (first) first = false; else appendStringInfoString(buf, ", "); appendStringInfoString(buf, quote_identifier(colname)); } appendStringInfoChar(buf, ')'); } else if (j->quals) { appendStringInfoString(buf, " ON "); if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, '('); get_rule_expr(j->quals, context, false); if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); } else if (j->jointype != JOIN_INNER) { /* If we didn't say CROSS JOIN above, we must provide an ON */ appendStringInfoString(buf, " ON TRUE"); } if (!PRETTY_PAREN(context) || j->alias != NULL) appendStringInfoChar(buf, ')'); /* Yes, it's correct to put alias after the right paren ... */ if (j->alias != NULL) { appendStringInfo(buf, " %s", quote_identifier(j->alias->aliasname)); get_column_alias_list(colinfo, context); } } else elog(ERROR, "unrecognized node type: %d", (int) nodeTag(jtnode)); } /* * get_column_alias_list - print column alias list for an RTE * * Caller must already have printed the relation's alias name. */ static void get_column_alias_list(deparse_columns *colinfo, deparse_context *context) { StringInfo buf = context->buf; int i; bool first = true; /* Don't print aliases if not needed */ if (!colinfo->printaliases) return; for (i = 0; i < colinfo->num_new_cols; i++) { char *colname = colinfo->new_colnames[i]; if (first) { appendStringInfoChar(buf, '('); first = false; } else appendStringInfoString(buf, ", "); appendStringInfoString(buf, quote_identifier(colname)); } if (!first) appendStringInfoChar(buf, ')'); } /* * get_from_clause_coldeflist - reproduce FROM clause coldeflist * * When printing a top-level coldeflist (which is syntactically also the * relation's column alias list), use column names from colinfo. But when * printing a coldeflist embedded inside ROWS FROM(), we prefer to use the * original coldeflist's names, which are available in rtfunc->funccolnames. * Pass NULL for colinfo to select the latter behavior. * * The coldeflist is appended immediately (no space) to buf. Caller is * responsible for ensuring that an alias or AS is present before it. */ static void get_from_clause_coldeflist(RangeTblFunction *rtfunc, deparse_columns *colinfo, deparse_context *context) { StringInfo buf = context->buf; ListCell *l1; ListCell *l2; ListCell *l3; ListCell *l4; int i; appendStringInfoChar(buf, '('); /* there's no forfour(), so must chase one list the hard way */ i = 0; l4 = list_head(rtfunc->funccolnames); forthree(l1, rtfunc->funccoltypes, l2, rtfunc->funccoltypmods, l3, rtfunc->funccolcollations) { Oid atttypid = lfirst_oid(l1); int32 atttypmod = lfirst_int(l2); Oid attcollation = lfirst_oid(l3); char *attname; if (colinfo) attname = colinfo->colnames[i]; else attname = strVal(lfirst(l4)); Assert(attname); /* shouldn't be any dropped columns here */ if (i > 0) appendStringInfoString(buf, ", "); appendStringInfo(buf, "%s %s", quote_identifier(attname), format_type_with_typemod(atttypid, atttypmod)); if (OidIsValid(attcollation) && attcollation != get_typcollation(atttypid)) appendStringInfo(buf, " COLLATE %s", generate_collation_name(attcollation)); l4 = lnext(l4); i++; } appendStringInfoChar(buf, ')'); } /* * get_tablesample_def - print a TableSampleClause */ static void get_tablesample_def(TableSampleClause *tablesample, deparse_context *context) { StringInfo buf = context->buf; Oid argtypes[1]; int nargs; ListCell *l; /* * We should qualify the handler's function name if it wouldn't be * resolved by lookup in the current search path. */ argtypes[0] = INTERNALOID; appendStringInfo(buf, " TABLESAMPLE %s (", generate_function_name(tablesample->tsmhandler, 1, NIL, argtypes, false, NULL, EXPR_KIND_NONE)); nargs = 0; foreach(l, tablesample->args) { if (nargs++ > 0) appendStringInfoString(buf, ", "); get_rule_expr((Node *) lfirst(l), context, false); } appendStringInfoChar(buf, ')'); if (tablesample->repeatable != NULL) { appendStringInfoString(buf, " REPEATABLE ("); get_rule_expr((Node *) tablesample->repeatable, context, false); appendStringInfoChar(buf, ')'); } } /* * get_opclass_name - fetch name of an index operator class * * The opclass name is appended (after a space) to buf. * * Output is suppressed if the opclass is the default for the given * actual_datatype. (If you don't want this behavior, just pass * InvalidOid for actual_datatype.) */ static void get_opclass_name(Oid opclass, Oid actual_datatype, StringInfo buf) { HeapTuple ht_opc; Form_pg_opclass opcrec; char *opcname; char *nspname; ht_opc = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass)); if (!HeapTupleIsValid(ht_opc)) elog(ERROR, "cache lookup failed for opclass %u", opclass); opcrec = (Form_pg_opclass) GETSTRUCT(ht_opc); if (!OidIsValid(actual_datatype) || GetDefaultOpClass(actual_datatype, opcrec->opcmethod) != opclass) { /* Okay, we need the opclass name. Do we need to qualify it? */ opcname = NameStr(opcrec->opcname); if (OpclassIsVisible(opclass)) appendStringInfo(buf, " %s", quote_identifier(opcname)); else { nspname = get_namespace_name(opcrec->opcnamespace); appendStringInfo(buf, " %s.%s", quote_identifier(nspname), quote_identifier(opcname)); } } ReleaseSysCache(ht_opc); } /* * processIndirection - take care of array and subfield assignment * * We strip any top-level FieldStore or assignment ArrayRef nodes that * appear in the input, printing them as decoration for the base column * name (which we assume the caller just printed). We might also need to * strip CoerceToDomain nodes, but only ones that appear above assignment * nodes. * * Returns the subexpression that's to be assigned. */ static Node * processIndirection(Node *node, deparse_context *context) { StringInfo buf = context->buf; CoerceToDomain *cdomain = NULL; for (;;) { if (node == NULL) break; if (IsA(node, FieldStore)) { FieldStore *fstore = (FieldStore *) node; Oid typrelid; char *fieldname; /* lookup tuple type */ typrelid = get_typ_typrelid(fstore->resulttype); if (!OidIsValid(typrelid)) elog(ERROR, "argument type %s of FieldStore is not a tuple type", format_type_be(fstore->resulttype)); /* * Print the field name. There should only be one target field in * stored rules. There could be more than that in executable * target lists, but this function cannot be used for that case. */ Assert(list_length(fstore->fieldnums) == 1); fieldname = get_relid_attribute_name(typrelid, linitial_int(fstore->fieldnums)); appendStringInfo(buf, ".%s", quote_identifier(fieldname)); /* * We ignore arg since it should be an uninteresting reference to * the target column or subcolumn. */ node = (Node *) linitial(fstore->newvals); } else if (IsA(node, ArrayRef)) { ArrayRef *aref = (ArrayRef *) node; if (aref->refassgnexpr == NULL) break; printSubscripts(aref, context); /* * We ignore refexpr since it should be an uninteresting reference * to the target column or subcolumn. */ node = (Node *) aref->refassgnexpr; } else if (IsA(node, CoerceToDomain)) { cdomain = (CoerceToDomain *) node; /* If it's an explicit domain coercion, we're done */ if (cdomain->coercionformat != COERCE_IMPLICIT_CAST) break; /* Tentatively descend past the CoerceToDomain */ node = (Node *) cdomain->arg; } else break; } /* * If we descended past a CoerceToDomain whose argument turned out not to * be a FieldStore or array assignment, back up to the CoerceToDomain. * (This is not enough to be fully correct if there are nested implicit * CoerceToDomains, but such cases shouldn't ever occur.) */ if (cdomain && node == (Node *) cdomain->arg) node = (Node *) cdomain; return node; } static void printSubscripts(ArrayRef *aref, deparse_context *context) { StringInfo buf = context->buf; ListCell *lowlist_item; ListCell *uplist_item; lowlist_item = list_head(aref->reflowerindexpr); /* could be NULL */ foreach(uplist_item, aref->refupperindexpr) { appendStringInfoChar(buf, '['); if (lowlist_item) { /* If subexpression is NULL, get_rule_expr prints nothing */ get_rule_expr((Node *) lfirst(lowlist_item), context, false); appendStringInfoChar(buf, ':'); lowlist_item = lnext(lowlist_item); } /* If subexpression is NULL, get_rule_expr prints nothing */ get_rule_expr((Node *) lfirst(uplist_item), context, false); appendStringInfoChar(buf, ']'); } } /* * get_relation_name * Get the unqualified name of a relation specified by OID * * This differs from the underlying get_rel_name() function in that it will * throw error instead of silently returning NULL if the OID is bad. */ static char * get_relation_name(Oid relid) { char *relname = get_rel_name(relid); if (!relname) elog(ERROR, "cache lookup failed for relation %u", relid); return relname; } /* * generate_relation_or_shard_name * Compute the name to display for a relation or shard * * If the provided relid is equal to the provided distrelid, this function * returns a shard-extended relation name; otherwise, it falls through to a * simple generate_relation_name call. */ static char * generate_relation_or_shard_name(Oid relid, Oid distrelid, int64 shardid, List *namespaces) { char *relname = NULL; if (relid == distrelid) { relname = get_relation_name(relid); if (shardid > 0) { Oid schemaOid = get_rel_namespace(relid); char *schemaName = get_namespace_name(schemaOid); AppendShardIdToName(&relname, shardid); relname = quote_qualified_identifier(schemaName, relname); } } else { relname = generate_relation_name(relid, namespaces); } return relname; } /* * generate_relation_name * Compute the name to display for a relation specified by OID * * The result includes all necessary quoting and schema-prefixing. * * If namespaces isn't NIL, it must be a list of deparse_namespace nodes. * We will forcibly qualify the relation name if it equals any CTE name * visible in the namespace list. */ char * generate_relation_name(Oid relid, List *namespaces) { HeapTuple tp; Form_pg_class reltup; bool need_qual; ListCell *nslist; char *relname; char *nspname; char *result; tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (!HeapTupleIsValid(tp)) elog(ERROR, "cache lookup failed for relation %u", relid); reltup = (Form_pg_class) GETSTRUCT(tp); relname = NameStr(reltup->relname); /* Check for conflicting CTE name */ need_qual = false; foreach(nslist, namespaces) { deparse_namespace *dpns = (deparse_namespace *) lfirst(nslist); ListCell *ctlist; foreach(ctlist, dpns->ctes) { CommonTableExpr *cte = (CommonTableExpr *) lfirst(ctlist); if (strcmp(cte->ctename, relname) == 0) { need_qual = true; break; } } if (need_qual) break; } /* Otherwise, qualify the name if not visible in search path */ if (!need_qual) need_qual = !RelationIsVisible(relid); if (need_qual) nspname = get_namespace_name(reltup->relnamespace); else nspname = NULL; result = quote_qualified_identifier(nspname, relname); ReleaseSysCache(tp); return result; } /* * generate_fragment_name * Compute the name to display for a shard or merged table * * The result includes all necessary quoting and schema-prefixing. The schema * name can be NULL for regular shards. For merged tables, they are always * declared within a job-specific schema, and therefore can't have null schema * names. */ static char * generate_fragment_name(char *schemaName, char *tableName) { StringInfo fragmentNameString = makeStringInfo(); if (schemaName != NULL) { appendStringInfo(fragmentNameString, "%s.%s", quote_identifier(schemaName), quote_identifier(tableName)); } else { appendStringInfoString(fragmentNameString, quote_identifier(tableName)); } return fragmentNameString->data; } /* * generate_function_name * Compute the name to display for a function specified by OID, * given that it is being called with the specified actual arg names and * types. (Those matter because of ambiguous-function resolution rules.) * * If we're dealing with a potentially variadic function (in practice, this * means a FuncExpr or Aggref, not some other way of calling a function), then * has_variadic must specify whether variadic arguments have been merged, * and *use_variadic_p will be set to indicate whether to print VARIADIC in * the output. For non-FuncExpr cases, has_variadic should be FALSE and * use_variadic_p can be NULL. * * The result includes all necessary quoting and schema-prefixing. */ static char * generate_function_name(Oid funcid, int nargs, List *argnames, Oid *argtypes, bool has_variadic, bool *use_variadic_p, ParseExprKind special_exprkind) { char *result; HeapTuple proctup; Form_pg_proc procform; char *proname; bool use_variadic; char *nspname; FuncDetailCode p_result; Oid p_funcid; Oid p_rettype; bool p_retset; int p_nvargs; Oid p_vatype; Oid *p_true_typeids; bool force_qualify = false; proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); if (!HeapTupleIsValid(proctup)) elog(ERROR, "cache lookup failed for function %u", funcid); procform = (Form_pg_proc) GETSTRUCT(proctup); proname = NameStr(procform->proname); /* * Due to parser hacks to avoid needing to reserve CUBE, we need to force * qualification in some special cases. */ if (special_exprkind == EXPR_KIND_GROUP_BY) { if (strcmp(proname, "cube") == 0 || strcmp(proname, "rollup") == 0) force_qualify = true; } /* * Determine whether VARIADIC should be printed. We must do this first * since it affects the lookup rules in func_get_detail(). * * Currently, we always print VARIADIC if the function has a merged * variadic-array argument. Note that this is always the case for * functions taking a VARIADIC argument type other than VARIADIC ANY. * * In principle, if VARIADIC wasn't originally specified and the array * actual argument is deconstructable, we could print the array elements * separately and not print VARIADIC, thus more nearly reproducing the * original input. For the moment that seems like too much complication * for the benefit, and anyway we do not know whether VARIADIC was * originally specified if it's a non-ANY type. */ if (use_variadic_p) { /* Parser should not have set funcvariadic unless fn is variadic */ Assert(!has_variadic || OidIsValid(procform->provariadic)); use_variadic = has_variadic; *use_variadic_p = use_variadic; } else { Assert(!has_variadic); use_variadic = false; } /* * The idea here is to schema-qualify only if the parser would fail to * resolve the correct function given the unqualified func name with the * specified argtypes and VARIADIC flag. But if we already decided to * force qualification, then we can skip the lookup and pretend we didn't * find it. */ if (!force_qualify) p_result = func_get_detail(list_make1(makeString(proname)), NIL, argnames, nargs, argtypes, !use_variadic, true, &p_funcid, &p_rettype, &p_retset, &p_nvargs, &p_vatype, &p_true_typeids, NULL); else { p_result = FUNCDETAIL_NOTFOUND; p_funcid = InvalidOid; } if ((p_result == FUNCDETAIL_NORMAL || p_result == FUNCDETAIL_AGGREGATE || p_result == FUNCDETAIL_WINDOWFUNC) && p_funcid == funcid) nspname = NULL; else nspname = get_namespace_name(procform->pronamespace); result = quote_qualified_identifier(nspname, proname); ReleaseSysCache(proctup); return result; } /* * generate_operator_name * Compute the name to display for an operator specified by OID, * given that it is being called with the specified actual arg types. * (Arg types matter because of ambiguous-operator resolution rules. * Pass InvalidOid for unused arg of a unary operator.) * * The result includes all necessary quoting and schema-prefixing, * plus the OPERATOR() decoration needed to use a qualified operator name * in an expression. */ static char * generate_operator_name(Oid operid, Oid arg1, Oid arg2) { StringInfoData buf; HeapTuple opertup; Form_pg_operator operform; char *oprname; char *nspname; Operator p_result; initStringInfo(&buf); opertup = SearchSysCache1(OPEROID, ObjectIdGetDatum(operid)); if (!HeapTupleIsValid(opertup)) elog(ERROR, "cache lookup failed for operator %u", operid); operform = (Form_pg_operator) GETSTRUCT(opertup); oprname = NameStr(operform->oprname); /* * The idea here is to schema-qualify only if the parser would fail to * resolve the correct operator given the unqualified op name with the * specified argtypes. */ switch (operform->oprkind) { case 'b': p_result = oper(NULL, list_make1(makeString(oprname)), arg1, arg2, true, -1); break; case 'l': p_result = left_oper(NULL, list_make1(makeString(oprname)), arg2, true, -1); break; case 'r': p_result = right_oper(NULL, list_make1(makeString(oprname)), arg1, true, -1); break; default: elog(ERROR, "unrecognized oprkind: %d", operform->oprkind); p_result = NULL; /* keep compiler quiet */ break; } if (p_result != NULL && oprid(p_result) == operid) nspname = NULL; else { nspname = get_namespace_name(operform->oprnamespace); appendStringInfo(&buf, "OPERATOR(%s.", quote_identifier(nspname)); } appendStringInfoString(&buf, oprname); if (nspname) appendStringInfoChar(&buf, ')'); if (p_result != NULL) ReleaseSysCache(p_result); ReleaseSysCache(opertup); return buf.data; } #endif /* (PG_VERSION_NUM >= 90600 && PG_VERSION_NUM < 90700) */ citus-7.0.3/src/backend/distributed/utils/shardinterval_utils.c000066400000000000000000000257171317107136600247630ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * shardinterval_utils.c * * This file contains functions to perform useful operations on shard intervals. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #include "postgres.h" #include "access/nbtree.h" #include "catalog/pg_am.h" #include "catalog/pg_collation.h" #include "catalog/pg_type.h" #include "distributed/metadata_cache.h" #include "distributed/multi_planner.h" #include "distributed/shard_pruning.h" #include "distributed/shardinterval_utils.h" #include "distributed/pg_dist_partition.h" #include "distributed/worker_protocol.h" #include "utils/catcache.h" #include "utils/memutils.h" static int SearchCachedShardInterval(Datum partitionColumnValue, ShardInterval **shardIntervalCache, int shardCount, FmgrInfo *compareFunction); /* * LowestShardIntervalById returns the shard interval with the lowest shard * ID from a list of shard intervals. */ ShardInterval * LowestShardIntervalById(List *shardIntervalList) { ShardInterval *lowestShardInterval = NULL; ListCell *shardIntervalCell = NULL; foreach(shardIntervalCell, shardIntervalList) { ShardInterval *shardInterval = (ShardInterval *) lfirst(shardIntervalCell); if (lowestShardInterval == NULL || lowestShardInterval->shardId > shardInterval->shardId) { lowestShardInterval = shardInterval; } } return lowestShardInterval; } /* * CompareShardIntervals acts as a helper function to compare two shard intervals * by their minimum values, using the value's type comparison function. * * If a shard interval does not have min/max value, it's treated as being greater * than the other. */ int CompareShardIntervals(const void *leftElement, const void *rightElement, FmgrInfo *typeCompareFunction) { ShardInterval *leftShardInterval = *((ShardInterval **) leftElement); ShardInterval *rightShardInterval = *((ShardInterval **) rightElement); Datum leftDatum = 0; Datum rightDatum = 0; Datum comparisonDatum = 0; int comparisonResult = 0; Assert(typeCompareFunction != NULL); /* * Left element should be treated as the greater element in case it doesn't * have min or max values. */ if (!leftShardInterval->minValueExists || !leftShardInterval->maxValueExists) { comparisonResult = 1; return comparisonResult; } /* * Right element should be treated as the greater element in case it doesn't * have min or max values. */ if (!rightShardInterval->minValueExists || !rightShardInterval->maxValueExists) { comparisonResult = -1; return comparisonResult; } /* if both shard interval have min/max values, calculate the comparison result */ leftDatum = leftShardInterval->minValue; rightDatum = rightShardInterval->minValue; comparisonDatum = CompareCall2(typeCompareFunction, leftDatum, rightDatum); comparisonResult = DatumGetInt32(comparisonDatum); return comparisonResult; } /* * CompareShardIntervalsById is a comparison function for sort shard * intervals by their shard ID. */ int CompareShardIntervalsById(const void *leftElement, const void *rightElement) { ShardInterval *leftInterval = *((ShardInterval **) leftElement); ShardInterval *rightInterval = *((ShardInterval **) rightElement); int64 leftShardId = leftInterval->shardId; int64 rightShardId = rightInterval->shardId; /* we compare 64-bit integers, instead of casting their difference to int */ if (leftShardId > rightShardId) { return 1; } else if (leftShardId < rightShardId) { return -1; } else { return 0; } } /* * CompareRelationShards is a comparison function for sorting relation * to shard mappings by their relation ID and then shard ID. */ int CompareRelationShards(const void *leftElement, const void *rightElement) { RelationShard *leftRelationShard = *((RelationShard **) leftElement); RelationShard *rightRelationShard = *((RelationShard **) rightElement); Oid leftRelationId = leftRelationShard->relationId; Oid rightRelationId = rightRelationShard->relationId; int64 leftShardId = leftRelationShard->shardId; int64 rightShardId = rightRelationShard->shardId; if (leftRelationId > rightRelationId) { return 1; } else if (leftRelationId < rightRelationId) { return -1; } else if (leftShardId > rightShardId) { return 1; } else if (leftShardId < rightShardId) { return -1; } else { return 0; } } /* * ShardIndex finds the index of given shard in sorted shard interval array. * * For hash partitioned tables, it calculates hash value of a number in its * range (e.g. min value) and finds which shard should contain the hashed * value. For reference tables, it simply returns 0. For distribution methods * other than hash and reference, the function errors out. */ int ShardIndex(ShardInterval *shardInterval) { int shardIndex = INVALID_SHARD_INDEX; Oid distributedTableId = shardInterval->relationId; Datum shardMinValue = shardInterval->minValue; DistTableCacheEntry *cacheEntry = DistributedTableCacheEntry(distributedTableId); char partitionMethod = cacheEntry->partitionMethod; /* * Note that, we can also support append and range distributed tables, but * currently it is not required. */ if (partitionMethod != DISTRIBUTE_BY_HASH && partitionMethod != DISTRIBUTE_BY_NONE) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("finding index of a given shard is only supported for " "hash distributed and reference tables"))); } /* short-circuit for reference tables */ if (partitionMethod == DISTRIBUTE_BY_NONE) { /* reference tables has only a single shard, so the index is fixed to 0 */ shardIndex = 0; return shardIndex; } shardIndex = FindShardIntervalIndex(shardMinValue, cacheEntry); return shardIndex; } /* * FindShardInterval finds a single shard interval in the cache for the * given partition column value. Note that reference tables do not have * partition columns, thus, pass partitionColumnValue and compareFunction * as NULL for them. */ ShardInterval * FindShardInterval(Datum partitionColumnValue, DistTableCacheEntry *cacheEntry) { Datum searchedValue = partitionColumnValue; int shardIndex = INVALID_SHARD_INDEX; if (cacheEntry->partitionMethod == DISTRIBUTE_BY_HASH) { searchedValue = FunctionCall1(cacheEntry->hashFunction, partitionColumnValue); } shardIndex = FindShardIntervalIndex(searchedValue, cacheEntry); if (shardIndex == INVALID_SHARD_INDEX) { return NULL; } return cacheEntry->sortedShardIntervalArray[shardIndex]; } /* * FindShardIntervalIndex finds the index of the shard interval which covers * the searched value. Note that the searched value must be the hashed value * of the original value if the distribution method is hash. * * Note that, if the searched value can not be found for hash partitioned * tables, we error out (unless there are no shards, in which case * INVALID_SHARD_INDEX is returned). This should only happen if something is * terribly wrong, either metadata tables are corrupted or we have a bug * somewhere. Such as a hash function which returns a value not in the range * of [INT32_MIN, INT32_MAX] can fire this. */ int FindShardIntervalIndex(Datum searchedValue, DistTableCacheEntry *cacheEntry) { ShardInterval **shardIntervalCache = cacheEntry->sortedShardIntervalArray; int shardCount = cacheEntry->shardIntervalArrayLength; char partitionMethod = cacheEntry->partitionMethod; FmgrInfo *compareFunction = cacheEntry->shardIntervalCompareFunction; bool useBinarySearch = (partitionMethod != DISTRIBUTE_BY_HASH || !cacheEntry->hasUniformHashDistribution); int shardIndex = INVALID_SHARD_INDEX; if (shardCount == 0) { return INVALID_SHARD_INDEX; } if (partitionMethod == DISTRIBUTE_BY_HASH) { if (useBinarySearch) { Assert(compareFunction != NULL); shardIndex = SearchCachedShardInterval(searchedValue, shardIntervalCache, shardCount, compareFunction); /* we should always return a valid shard index for hash partitioned tables */ if (shardIndex == INVALID_SHARD_INDEX) { ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), errmsg("cannot find shard interval"), errdetail("Hash of the partition column value " "does not fall into any shards."))); } } else { int hashedValue = DatumGetInt32(searchedValue); uint64 hashTokenIncrement = HASH_TOKEN_COUNT / shardCount; shardIndex = (uint32) (hashedValue - INT32_MIN) / hashTokenIncrement; Assert(shardIndex <= shardCount); /* * If the shard count is not power of 2, the range of the last * shard becomes larger than others. For that extra piece of range, * we still need to use the last shard. */ if (shardIndex == shardCount) { shardIndex = shardCount - 1; } } } else if (partitionMethod == DISTRIBUTE_BY_NONE) { /* reference tables has a single shard, all values mapped to that shard */ Assert(shardCount == 1); shardIndex = 0; } else { Assert(compareFunction != NULL); shardIndex = SearchCachedShardInterval(searchedValue, shardIntervalCache, shardCount, compareFunction); } return shardIndex; } /* * SearchCachedShardInterval performs a binary search for a shard interval * matching a given partition column value and returns it's index in the cached * array. If it can not find any shard interval with the given value, it returns * INVALID_SHARD_INDEX. */ static int SearchCachedShardInterval(Datum partitionColumnValue, ShardInterval **shardIntervalCache, int shardCount, FmgrInfo *compareFunction) { int lowerBoundIndex = 0; int upperBoundIndex = shardCount; while (lowerBoundIndex < upperBoundIndex) { int middleIndex = (lowerBoundIndex + upperBoundIndex) / 2; int maxValueComparison = 0; int minValueComparison = 0; minValueComparison = FunctionCall2Coll(compareFunction, DEFAULT_COLLATION_OID, partitionColumnValue, shardIntervalCache[middleIndex]->minValue); if (DatumGetInt32(minValueComparison) < 0) { upperBoundIndex = middleIndex; continue; } maxValueComparison = FunctionCall2Coll(compareFunction, DEFAULT_COLLATION_OID, partitionColumnValue, shardIntervalCache[middleIndex]->maxValue); if (DatumGetInt32(maxValueComparison) <= 0) { return middleIndex; } lowerBoundIndex = middleIndex + 1; } return INVALID_SHARD_INDEX; } /* * SingleReplicatedTable checks whether all shards of a distributed table, do not have * more than one replica. If even one shard has more than one replica, this function * returns false, otherwise it returns true. */ bool SingleReplicatedTable(Oid relationId) { List *shardIntervalList = LoadShardList(relationId); ListCell *shardIntervalCell = NULL; foreach(shardIntervalCell, shardIntervalList) { uint64 *shardIdPointer = (uint64 *) lfirst(shardIntervalCell); uint64 shardId = (*shardIdPointer); List *shardPlacementList = ShardPlacementList(shardId); if (shardPlacementList->length > 1) { return false; } } return true; } citus-7.0.3/src/backend/distributed/worker/000077500000000000000000000000001317107136600206665ustar00rootroot00000000000000citus-7.0.3/src/backend/distributed/worker/task_tracker.c000066400000000000000000000774001317107136600235170ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * task_tracker.c * * The task tracker background process runs on every worker node. The process * wakes up at regular intervals, reads information from a shared hash, and * checks if any new tasks are assigned to this node. If they are, the process * runs task-specific logic, and sends queries to the postmaster for execution. * The task tracker then tracks the execution of these queries, and updates the * shared hash with task progress information. * * The task tracker is started by the postmaster when the startup process * finishes. The process remains alive until the postmaster commands it to * terminate. Normal termination is by SIGTERM, which instructs the task tracker * to exit(0). Emergency termination is by SIGQUIT; like any backend, the task * tracker will simply abort and exit on SIGQUIT. * * For details on how the task tracker manages resources during process start-up * and shutdown, please see the writeboard on our Basecamp project website. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "miscadmin.h" #include #include "commands/dbcommands.h" #include "distributed/multi_client_executor.h" #include "distributed/multi_server_executor.h" #include "distributed/task_tracker.h" #include "distributed/transmit.h" #include "distributed/worker_protocol.h" #include "libpq/hba.h" #include "libpq/pqsignal.h" #include "lib/stringinfo.h" #include "postmaster/bgworker.h" #include "postmaster/postmaster.h" #include "storage/fd.h" #include "storage/ipc.h" #include "storage/lwlock.h" #include "storage/pmsignal.h" #include "storage/proc.h" #include "storage/shmem.h" #include "utils/guc.h" #include "utils/memutils.h" int TaskTrackerDelay = 200; /* process sleep interval in millisecs */ int MaxRunningTasksPerNode = 16; /* max number of running tasks */ int MaxTrackedTasksPerNode = 1024; /* max number of tracked tasks */ int MaxTaskStringSize = 12288; /* max size of a worker task call string in bytes */ WorkerTasksSharedStateData *WorkerTasksSharedState; /* shared memory state */ static shmem_startup_hook_type prev_shmem_startup_hook = NULL; /* Flags set by interrupt handlers for later service in the main loop */ static volatile sig_atomic_t got_SIGHUP = false; static volatile sig_atomic_t got_SIGTERM = false; /* initialization forward declarations */ static Size TaskTrackerShmemSize(void); static void TaskTrackerShmemInit(void); /* Signal handler forward declarations */ static void TrackerSigHupHandler(SIGNAL_ARGS); static void TrackerShutdownHandler(SIGNAL_ARGS); /* Local functions forward declarations */ static void TrackerCleanupJobDirectories(void); static void TrackerCleanupJobSchemas(void); static void TrackerCleanupConnections(HTAB *WorkerTasksHash); static void TrackerRegisterShutDown(HTAB *WorkerTasksHash); static void TrackerDelayLoop(void); static List * SchedulableTaskList(HTAB *WorkerTasksHash); static WorkerTask * SchedulableTaskPriorityQueue(HTAB *WorkerTasksHash); static uint32 CountTasksMatchingCriteria(HTAB *WorkerTasksHash, bool (*CriteriaFunction)(WorkerTask *)); static bool RunningTask(WorkerTask *workerTask); static bool SchedulableTask(WorkerTask *workerTask); static int CompareTasksByTime(const void *first, const void *second); static void ScheduleWorkerTasks(HTAB *WorkerTasksHash, List *schedulableTaskList); static void ManageWorkerTasksHash(HTAB *WorkerTasksHash); static void ManageWorkerTask(WorkerTask *workerTask, HTAB *WorkerTasksHash); static void RemoveWorkerTask(WorkerTask *workerTask, HTAB *WorkerTasksHash); static void CreateJobDirectoryIfNotExists(uint64 jobId); static int32 ConnectToLocalBackend(const char *databaseName, const char *userName); /* Organize, at startup, that the task tracker is started */ void TaskTrackerRegister(void) { BackgroundWorker worker; /* organize and register initialization of required shared memory */ RequestAddinShmemSpace(TaskTrackerShmemSize()); prev_shmem_startup_hook = shmem_startup_hook; shmem_startup_hook = TaskTrackerShmemInit; /* and that the task tracker is started as background worker */ memset(&worker, 0, sizeof(worker)); worker.bgw_flags = BGWORKER_SHMEM_ACCESS; worker.bgw_start_time = BgWorkerStart_ConsistentState; worker.bgw_restart_time = 1; snprintf(worker.bgw_library_name, BGW_MAXLEN, "citus"); snprintf(worker.bgw_function_name, BGW_MAXLEN, "TaskTrackerMain"); worker.bgw_notify_pid = 0; snprintf(worker.bgw_name, BGW_MAXLEN, "task tracker"); RegisterBackgroundWorker(&worker); } /* Main entry point for task tracker process. */ void TaskTrackerMain(Datum main_arg) { MemoryContext TaskTrackerContext = NULL; sigjmp_buf local_sigjmp_buf; static bool processStartUp = true; /* Properly accept or ignore signals the postmaster might send us */ pqsignal(SIGHUP, TrackerSigHupHandler); /* set flag to read config file */ pqsignal(SIGTERM, TrackerShutdownHandler); /* request shutdown */ /* We're now ready to receive signals */ BackgroundWorkerUnblockSignals(); /* * Create a memory context that we will do all our work in. We do this so * that we can reset the context during error recovery and thereby avoid * possible memory leaks. */ TaskTrackerContext = AllocSetContextCreate(TopMemoryContext, "Task Tracker", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); MemoryContextSwitchTo(TaskTrackerContext); /* * If an exception is encountered, processing resumes here. The motivation * for this code block is outlined in postgres.c, and the code itself is * heavily based on bgwriter.c. * * In most error scenarios, we will not drop here: the task tracker process * offloads all work to backend processes, and checks the completion of work * through the client executor library. We will therefore only come here if * we have inconsistencies in the shared hash and need to signal an error. */ if (sigsetjmp(local_sigjmp_buf, 1) != 0) { /* Since we are not using PG_TRY, we must reset error stack by hand */ error_context_stack = NULL; /* Prevents interrupts while cleaning up */ HOLD_INTERRUPTS(); /* Report the error to the server log */ EmitErrorReport(); /* * These operations are just a minimal subset of AbortTransaction(). * We do not have many resources to worry about; we only have a shared * hash and an LWLock guarding that hash. */ LWLockReleaseAll(); AtEOXact_Files(); AtEOXact_HashTables(false); /* * Now return to normal top-level context, and clear ErrorContext for * next time. */ MemoryContextSwitchTo(TaskTrackerContext); FlushErrorState(); /* Flush any leaked data in the top-level context */ MemoryContextResetAndDeleteChildren(TaskTrackerContext); /* Now we can allow interrupts again */ RESUME_INTERRUPTS(); /* * Sleep at least 1 second after any error. A write error is likely to * be repeated, and we don't want to be filling the error logs as fast * as we can. */ pg_usleep(1000000L); } /* We can now handle ereport(ERROR) */ PG_exception_stack = &local_sigjmp_buf; /* * We run validation and cache cleanup functions as this process is starting * up. If these functions throw an error, we won't try running them again. */ if (processStartUp) { processStartUp = false; /* clean up old files in the job cache */ TrackerCleanupJobDirectories(); /* clean up schemas in the job cache */ TrackerCleanupJobSchemas(); } /* Loop forever */ for (;;) { /* * Emergency bailout if postmaster has died. This is to avoid the * necessity for manual cleanup of all postmaster children. * * XXX: Note that PostgreSQL background processes no longer nap between * their loops, but instead uses latches to wake up when necessary. We * should switch to using latches in here too, and have the task tracker * assign function notify us when there is a new task. */ if (!PostmasterIsAlive()) { exit(1); } /* Process any requests or signals received recently */ if (got_SIGHUP) { got_SIGHUP = false; /* reload postgres configuration files */ ProcessConfigFile(PGC_SIGHUP); } if (got_SIGTERM) { /* * From here on, reporting errors should end with exit(1), and not * send control back to the sigsetjmp block above. */ ExitOnAnyError = true; /* Close open connections to local backends */ TrackerCleanupConnections(WorkerTasksSharedState->taskHash); /* Add a sentinel task to the shared hash to mark shutdown */ TrackerRegisterShutDown(WorkerTasksSharedState->taskHash); /* Normal exit from the task tracker is here */ proc_exit(0); } /* Call the function that does the actual work */ ManageWorkerTasksHash(WorkerTasksSharedState->taskHash); /* Sleep for the configured time */ TrackerDelayLoop(); } } /* * WorkerTasksHashEnter creates a new worker task in the shared hash, and * performs checks for this task. Note that the caller still needs to initialize * the worker task's fields, and hold the appopriate locks for the shared hash. */ WorkerTask * WorkerTasksHashEnter(uint64 jobId, uint32 taskId) { WorkerTask *workerTask = NULL; void *hashKey = NULL; bool handleFound = false; WorkerTask searchTask; searchTask.jobId = jobId; searchTask.taskId = taskId; hashKey = (void *) &searchTask; workerTask = (WorkerTask *) hash_search(WorkerTasksSharedState->taskHash, hashKey, HASH_ENTER_NULL, &handleFound); if (workerTask == NULL) { ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"), errhint("Try increasing citus.max_tracked_tasks_per_node."))); } /* check that we do not have the same task assigned twice to this node */ if (handleFound) { ereport(ERROR, (errmsg("cannot assign an already assigned task"), errdetail("Task jobId: " UINT64_FORMAT " and taskId: %u", jobId, taskId))); } return workerTask; } /* * WorkerTasksHashFind looks up the worker task with the given identifiers in * the shared hash. Note that the caller still needs to hold the appropriate * locks for the shared hash. */ WorkerTask * WorkerTasksHashFind(uint64 jobId, uint32 taskId) { WorkerTask *workerTask = NULL; void *hashKey = NULL; WorkerTask searchTask; searchTask.jobId = jobId; searchTask.taskId = taskId; hashKey = (void *) &searchTask; workerTask = (WorkerTask *) hash_search(WorkerTasksSharedState->taskHash, hashKey, HASH_FIND, NULL); return workerTask; } /* * TrackerCleanupJobDirectories cleans up all files in the job cache directory * as part of this process's start-up logic. The task tracker process manages * both tasks in the shared hash and these tasks' output files. When the task * tracker needs to shutdown, all shared hash entries are deleted, but the * associated files cannot be cleaned up safely. We therefore perform this * cleanup when the process restarts. */ static void TrackerCleanupJobDirectories(void) { /* use the default tablespace in {datadir}/base */ StringInfo jobCacheDirectory = makeStringInfo(); appendStringInfo(jobCacheDirectory, "base/%s", PG_JOB_CACHE_DIR); RemoveDirectory(jobCacheDirectory); CreateDirectory(jobCacheDirectory); FreeStringInfo(jobCacheDirectory); } /* * TrackerCleanupJobSchemas creates and assigns tasks to remove job schemas and * all tables within these schemas. These job schemas are currently created by * merge tasks, and may linger if the database shuts down before the jobs get * cleaned up. This function then runs during process start-up, and creates one * task per database to remove lingering job schemas, if any. */ static void TrackerCleanupJobSchemas(void) { /* * XXX: We previously called DatabaseNameList() to read the list of database * names here. This function read the database names from the flat database * file; this file was deprecated on Aug 31, 2009. We hence need to rewrite * this function to read from pg_database directly. */ List *databaseNameList = NIL; ListCell *databaseNameCell = NULL; const uint64 jobId = RESERVED_JOB_ID; uint32 taskIndex = 1; LWLockAcquire(&WorkerTasksSharedState->taskHashLock, LW_EXCLUSIVE); foreach(databaseNameCell, databaseNameList) { char *databaseName = (char *) lfirst(databaseNameCell); WorkerTask *cleanupTask = NULL; /* template0 database does not accept connections */ int skipDatabaseName = strncmp(databaseName, TEMPLATE0_NAME, NAMEDATALEN); if (skipDatabaseName == 0) { continue; } /* * We create cleanup tasks since we can't remove schemas within the task * tracker process. We also assign high priorities to these tasks so * that they get scheduled before everyone else. */ cleanupTask = WorkerTasksHashEnter(jobId, taskIndex); cleanupTask->assignedAt = HIGH_PRIORITY_TASK_TIME; cleanupTask->taskStatus = TASK_ASSIGNED; strlcpy(cleanupTask->taskCallString, JOB_SCHEMA_CLEANUP, MaxTaskStringSize); strlcpy(cleanupTask->databaseName, databaseName, NAMEDATALEN); /* zero out all other fields */ cleanupTask->connectionId = INVALID_CONNECTION_ID; cleanupTask->failureCount = 0; taskIndex++; } LWLockRelease(&WorkerTasksSharedState->taskHashLock); if (databaseNameList != NIL) { list_free_deep(databaseNameList); } } /* * TrackerCleanupConnections closes all open connections to backends during * process shutdown. This signals to the backends that their connections are * gone and stops them from logging pipe-related warning messages. */ static void TrackerCleanupConnections(HTAB *WorkerTasksHash) { HASH_SEQ_STATUS status; WorkerTask *currentTask = NULL; hash_seq_init(&status, WorkerTasksHash); currentTask = (WorkerTask *) hash_seq_search(&status); while (currentTask != NULL) { if (currentTask->connectionId != INVALID_CONNECTION_ID) { MultiClientDisconnect(currentTask->connectionId); currentTask->connectionId = INVALID_CONNECTION_ID; } currentTask = (WorkerTask *) hash_seq_search(&status); } } /* * TrackerRegisterShutDown enters a special marker task to the shared hash. This * marker task indicates to "task protocol processes" that we are shutting down * and that they shouldn't accept new task assignments. */ static void TrackerRegisterShutDown(HTAB *WorkerTasksHash) { uint64 jobId = RESERVED_JOB_ID; uint32 taskId = SHUTDOWN_MARKER_TASK_ID; WorkerTask *shutdownMarkerTask = NULL; LWLockAcquire(&WorkerTasksSharedState->taskHashLock, LW_EXCLUSIVE); shutdownMarkerTask = WorkerTasksHashEnter(jobId, taskId); shutdownMarkerTask->taskStatus = TASK_SUCCEEDED; shutdownMarkerTask->connectionId = INVALID_CONNECTION_ID; LWLockRelease(&WorkerTasksSharedState->taskHashLock); } /* Sleeps either for the configured time or until a signal is received. */ static void TrackerDelayLoop(void) { const long SignalCheckInterval = 1000000L; /* check signal every second */ /* * On some platforms, signals do not interrupt the sleep. To ensure we * respond promptly when someone signals us, we break down the sleep into * 1-second increments, and check for interrupts after each nap. */ long trackerDelay = TaskTrackerDelay * 1000L; while (trackerDelay > (SignalCheckInterval - 1)) { if (got_SIGHUP || got_SIGTERM) { break; } pg_usleep(SignalCheckInterval); trackerDelay -= SignalCheckInterval; } if (!(got_SIGHUP || got_SIGTERM)) { pg_usleep(trackerDelay); } } /* ------------------------------------------------------------ * Signal handling and shared hash initialization functions follow * ------------------------------------------------------------ */ /* SIGHUP: set flag to re-read config file at next convenient time */ static void TrackerSigHupHandler(SIGNAL_ARGS) { int save_errno = errno; got_SIGHUP = true; if (MyProc != NULL) { SetLatch(&MyProc->procLatch); } errno = save_errno; } /* SIGTERM: set flag for main loop to exit normally */ static void TrackerShutdownHandler(SIGNAL_ARGS) { int save_errno = errno; got_SIGTERM = true; if (MyProc != NULL) { SetLatch(&MyProc->procLatch); } errno = save_errno; } /* Estimates the shared memory size used for keeping track of tasks. */ static Size TaskTrackerShmemSize(void) { Size size = 0; Size hashSize = 0; size = add_size(size, sizeof(WorkerTasksSharedStateData)); hashSize = hash_estimate_size(MaxTrackedTasksPerNode, WORKER_TASK_SIZE); size = add_size(size, hashSize); return size; } /* Initializes the shared memory used for keeping track of tasks. */ static void TaskTrackerShmemInit(void) { bool alreadyInitialized = false; HASHCTL info; int hashFlags = 0; long maxTableSize = 0; long initTableSize = 0; maxTableSize = (long) MaxTrackedTasksPerNode; initTableSize = maxTableSize / 8; /* * Allocate the control structure for the hash table that maps unique task * identifiers (uint64:uint32) to general task information, as well as the * parameters needed to run the task. */ memset(&info, 0, sizeof(info)); info.keysize = sizeof(uint64) + sizeof(uint32); info.entrysize = WORKER_TASK_SIZE; info.hash = tag_hash; hashFlags = (HASH_ELEM | HASH_FUNCTION); /* * Currently the lock isn't required because allocation only happens at * startup in postmaster, but it doesn't hurt, and makes things more * consistent with other extensions. */ LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); /* allocate struct containing task tracker related shared state */ WorkerTasksSharedState = (WorkerTasksSharedStateData *) ShmemInitStruct("Worker Task Control", sizeof(WorkerTasksSharedStateData), &alreadyInitialized); if (!alreadyInitialized) { #if (PG_VERSION_NUM >= 100000) WorkerTasksSharedState->taskHashTrancheId = LWLockNewTrancheId(); WorkerTasksSharedState->taskHashTrancheName = "Worker Task Hash Tranche"; LWLockRegisterTranche(WorkerTasksSharedState->taskHashTrancheId, WorkerTasksSharedState->taskHashTrancheName); #else /* initialize lwlock protecting the task tracker hash table */ LWLockTranche *tranche = &WorkerTasksSharedState->taskHashLockTranche; WorkerTasksSharedState->taskHashTrancheId = LWLockNewTrancheId(); tranche->array_base = &WorkerTasksSharedState->taskHashLock; tranche->array_stride = sizeof(LWLock); tranche->name = "Worker Task Hash Tranche"; LWLockRegisterTranche(WorkerTasksSharedState->taskHashTrancheId, tranche); #endif LWLockInitialize(&WorkerTasksSharedState->taskHashLock, WorkerTasksSharedState->taskHashTrancheId); } /* allocate hash table */ WorkerTasksSharedState->taskHash = ShmemInitHash("Worker Task Hash", initTableSize, maxTableSize, &info, hashFlags); LWLockRelease(AddinShmemInitLock); Assert(WorkerTasksSharedState->taskHash != NULL); Assert(WorkerTasksSharedState->taskHashTrancheId != 0); if (prev_shmem_startup_hook != NULL) { prev_shmem_startup_hook(); } } /* ------------------------------------------------------------ * Task scheduling and management functions follow * ------------------------------------------------------------ */ /* * SchedulableTaskList calculates the number of tasks to schedule at this given * moment, and creates a deep-copied list containing that many tasks. The tasks * in the list are sorted according to a priority criteria, currently the task's * assignment time. Note that this function expects the caller to hold a read * lock over the shared hash. */ static List * SchedulableTaskList(HTAB *WorkerTasksHash) { List *schedulableTaskList = NIL; WorkerTask *schedulableTaskQueue = NULL; uint32 runningTaskCount = 0; uint32 schedulableTaskCount = 0; uint32 tasksToScheduleCount = 0; uint32 queueIndex = 0; runningTaskCount = CountTasksMatchingCriteria(WorkerTasksHash, &RunningTask); if (runningTaskCount >= MaxRunningTasksPerNode) { return NIL; /* we already have enough tasks running */ } schedulableTaskCount = CountTasksMatchingCriteria(WorkerTasksHash, &SchedulableTask); if (schedulableTaskCount == 0) { return NIL; /* we do not have any new tasks to schedule */ } tasksToScheduleCount = MaxRunningTasksPerNode - runningTaskCount; if (tasksToScheduleCount > schedulableTaskCount) { tasksToScheduleCount = schedulableTaskCount; } /* get all schedulable tasks ordered according to a priority criteria */ schedulableTaskQueue = SchedulableTaskPriorityQueue(WorkerTasksHash); for (queueIndex = 0; queueIndex < tasksToScheduleCount; queueIndex++) { WorkerTask *schedulableTask = (WorkerTask *) palloc0(WORKER_TASK_SIZE); WorkerTask *queuedTask = WORKER_TASK_AT(schedulableTaskQueue, queueIndex); schedulableTask->jobId = queuedTask->jobId; schedulableTask->taskId = queuedTask->taskId; schedulableTaskList = lappend(schedulableTaskList, schedulableTask); } /* free priority queue */ pfree(schedulableTaskQueue); return schedulableTaskList; } /* * SchedulableTaskPriorityQueue allocates an array containing all schedulable * tasks in the shared hash, orders these tasks according to a sorting criteria, * and returns the sorted array. */ static WorkerTask * SchedulableTaskPriorityQueue(HTAB *WorkerTasksHash) { HASH_SEQ_STATUS status; WorkerTask *currentTask = NULL; WorkerTask *priorityQueue = NULL; uint32 queueSize = 0; uint32 queueIndex = 0; /* our priority queue size equals to the number of schedulable tasks */ queueSize = CountTasksMatchingCriteria(WorkerTasksHash, &SchedulableTask); if (queueSize == 0) { return NULL; } /* allocate an array of tasks for our priority queue */ priorityQueue = (WorkerTask *) palloc0(WORKER_TASK_SIZE * queueSize); /* copy tasks in the shared hash to the priority queue */ hash_seq_init(&status, WorkerTasksHash); currentTask = (WorkerTask *) hash_seq_search(&status); while (currentTask != NULL) { if (SchedulableTask(currentTask)) { /* tasks in the priority queue only need the first three fields */ WorkerTask *queueTask = WORKER_TASK_AT(priorityQueue, queueIndex); queueTask->jobId = currentTask->jobId; queueTask->taskId = currentTask->taskId; queueTask->assignedAt = currentTask->assignedAt; queueIndex++; } currentTask = (WorkerTask *) hash_seq_search(&status); } /* now order elements in the queue according to our sorting criterion */ qsort(priorityQueue, queueSize, WORKER_TASK_SIZE, CompareTasksByTime); return priorityQueue; } /* Counts the number of tasks that match the given criteria function. */ static uint32 CountTasksMatchingCriteria(HTAB *WorkerTasksHash, bool (*CriteriaFunction)(WorkerTask *)) { HASH_SEQ_STATUS status; WorkerTask *currentTask = NULL; uint32 taskCount = 0; hash_seq_init(&status, WorkerTasksHash); currentTask = (WorkerTask *) hash_seq_search(&status); while (currentTask != NULL) { bool matchesCriteria = (*CriteriaFunction)(currentTask); if (matchesCriteria) { taskCount++; } currentTask = (WorkerTask *) hash_seq_search(&status); } return taskCount; } /* Checks if the worker task is running. */ static bool RunningTask(WorkerTask *workerTask) { TaskStatus currentStatus = workerTask->taskStatus; if (currentStatus == TASK_RUNNING) { return true; } return false; } /* Checks if the worker task can be scheduled to run. */ static bool SchedulableTask(WorkerTask *workerTask) { TaskStatus currentStatus = workerTask->taskStatus; if (currentStatus == TASK_ASSIGNED) { return true; } return false; } /* Comparison function to compare two worker tasks by their assignment times. */ static int CompareTasksByTime(const void *first, const void *second) { WorkerTask *firstTask = (WorkerTask *) first; WorkerTask *secondTask = (WorkerTask *) second; /* tasks that are assigned earlier have higher priority */ int timeDiff = firstTask->assignedAt - secondTask->assignedAt; return timeDiff; } /* * ScheduleWorkerTasks takes a list of tasks to schedule, and for each task in * the list, finds and schedules the corresponding task from the shared hash. * Note that this function expects the caller to hold an exclusive lock over the * shared hash. */ static void ScheduleWorkerTasks(HTAB *WorkerTasksHash, List *schedulableTaskList) { ListCell *schedulableTaskCell = NULL; foreach(schedulableTaskCell, schedulableTaskList) { WorkerTask *schedulableTask = (WorkerTask *) lfirst(schedulableTaskCell); WorkerTask *taskToSchedule = NULL; void *hashKey = (void *) schedulableTask; taskToSchedule = (WorkerTask *) hash_search(WorkerTasksHash, hashKey, HASH_FIND, NULL); /* if task is null, the shared hash is in an incosistent state */ if (taskToSchedule == NULL) { ereport(ERROR, (errmsg("could not find the worker task to schedule"), errdetail("Task jobId: " UINT64_FORMAT " and taskId: %u", schedulableTask->jobId, schedulableTask->taskId))); } /* * After determining the set of tasks to schedule, we release the hash's * shared lock for a short time period. We then re-acquire the lock in * exclusive mode. We therefore need to check if this task has been * canceled in the meantime. */ if (taskToSchedule->taskStatus != TASK_CANCEL_REQUESTED) { Assert(SchedulableTask(taskToSchedule)); taskToSchedule->taskStatus = TASK_SCHEDULED; } else { ereport(INFO, (errmsg("the worker task to schedule has been canceled"), errdetail("Task jobId: " UINT64_FORMAT " and taskId: %u", schedulableTask->jobId, schedulableTask->taskId))); } } } /* Manages the scheduling and execution of all tasks in the shared hash. */ static void ManageWorkerTasksHash(HTAB *WorkerTasksHash) { HASH_SEQ_STATUS status; List *schedulableTaskList = NIL; WorkerTask *currentTask = NULL; /* ask the scheduler if we have new tasks to schedule */ LWLockAcquire(&WorkerTasksSharedState->taskHashLock, LW_SHARED); schedulableTaskList = SchedulableTaskList(WorkerTasksHash); LWLockRelease(&WorkerTasksSharedState->taskHashLock); LWLockAcquire(&WorkerTasksSharedState->taskHashLock, LW_EXCLUSIVE); /* schedule new tasks if we have any */ if (schedulableTaskList != NIL) { ScheduleWorkerTasks(WorkerTasksHash, schedulableTaskList); list_free_deep(schedulableTaskList); } /* now iterate over all tasks, and manage them */ hash_seq_init(&status, WorkerTasksHash); currentTask = (WorkerTask *) hash_seq_search(&status); while (currentTask != NULL) { ManageWorkerTask(currentTask, WorkerTasksHash); /* * Typically, we delete worker tasks in the task tracker protocol * process. This task however was canceled mid-query, and the protocol * process asked us to remove it from the shared hash. */ if (currentTask->taskStatus == TASK_TO_REMOVE) { RemoveWorkerTask(currentTask, WorkerTasksHash); } currentTask = (WorkerTask *) hash_seq_search(&status); } LWLockRelease(&WorkerTasksSharedState->taskHashLock); } /* * ManageWorkerTask manages the execution of the worker task. More specifically, * the function connects to a local backend, sends the query associated with the * task, and oversees the query's execution. Note that this function expects the * caller to hold an exclusive lock over the shared hash. */ static void ManageWorkerTask(WorkerTask *workerTask, HTAB *WorkerTasksHash) { switch (workerTask->taskStatus) { case TASK_ASSIGNED: { break; /* nothing to do until the task gets scheduled */ } case TASK_SCHEDULED: { /* create the job output directory if it does not exist */ CreateJobDirectoryIfNotExists(workerTask->jobId); /* the task is ready to run; connect to local backend */ workerTask->connectionId = ConnectToLocalBackend(workerTask->databaseName, workerTask->userName); if (workerTask->connectionId != INVALID_CONNECTION_ID) { bool taskSent = MultiClientSendQuery(workerTask->connectionId, workerTask->taskCallString); if (taskSent) { workerTask->taskStatus = TASK_RUNNING; } else { workerTask->taskStatus = TASK_FAILED; workerTask->failureCount++; MultiClientDisconnect(workerTask->connectionId); workerTask->connectionId = INVALID_CONNECTION_ID; } } else { workerTask->taskStatus = TASK_FAILED; workerTask->failureCount++; } break; } case TASK_RUNNING: { int32 connectionId = workerTask->connectionId; ResultStatus resultStatus = MultiClientResultStatus(connectionId); /* check if query results are ready, in progress, or unavailable */ if (resultStatus == CLIENT_RESULT_READY) { QueryStatus queryStatus = MultiClientQueryStatus(connectionId); if (queryStatus == CLIENT_QUERY_DONE) { workerTask->taskStatus = TASK_SUCCEEDED; } else if (queryStatus == CLIENT_QUERY_FAILED) { workerTask->taskStatus = TASK_FAILED; workerTask->failureCount++; } else { ereport(FATAL, (errmsg("invalid query status: %d", queryStatus))); } } else if (resultStatus == CLIENT_RESULT_BUSY) { workerTask->taskStatus = TASK_RUNNING; } else if (resultStatus == CLIENT_RESULT_UNAVAILABLE) { workerTask->taskStatus = TASK_FAILED; workerTask->failureCount++; } /* clean up the connection if we are done with the task */ if (resultStatus != CLIENT_RESULT_BUSY) { MultiClientDisconnect(workerTask->connectionId); workerTask->connectionId = INVALID_CONNECTION_ID; } break; } case TASK_FAILED: { if (workerTask->failureCount < MAX_TASK_FAILURE_COUNT) { workerTask->taskStatus = TASK_ASSIGNED; } else { workerTask->taskStatus = TASK_PERMANENTLY_FAILED; } break; } case TASK_PERMANENTLY_FAILED: case TASK_SUCCEEDED: { break; } case TASK_CANCEL_REQUESTED: { /* * If this task is responsible for cleaning up the shared hash, we * give the task more time instead of canceling it. The reason this * task is marked for cancellation is that its file descriptor needs * to be reclaimed after the clean up completes. */ if (workerTask->taskId == JOB_CLEANUP_TASK_ID) { workerTask->taskStatus = TASK_CANCELED; break; } if (workerTask->connectionId != INVALID_CONNECTION_ID) { int32 connectionId = workerTask->connectionId; ResultStatus status = MultiClientResultStatus(connectionId); if (status == CLIENT_RESULT_BUSY) { MultiClientCancel(connectionId); } } /* give the backend some time to flush its response */ workerTask->taskStatus = TASK_CANCELED; break; } case TASK_CANCELED: { if (workerTask->connectionId != INVALID_CONNECTION_ID) { MultiClientDisconnect(workerTask->connectionId); workerTask->connectionId = INVALID_CONNECTION_ID; } workerTask->taskStatus = TASK_TO_REMOVE; break; } case TASK_TO_REMOVE: default: { /* we fatal here to avoid leaking client-side resources */ ereport(FATAL, (errmsg("invalid task status: %d", workerTask->taskStatus))); break; } } Assert(workerTask->failureCount <= MAX_TASK_FAILURE_COUNT); } /* Wrapper function to remove the worker task from the shared hash. */ static void RemoveWorkerTask(WorkerTask *workerTask, HTAB *WorkerTasksHash) { void *hashKey = (void *) workerTask; WorkerTask *taskRemoved = hash_search(WorkerTasksHash, hashKey, HASH_REMOVE, NULL); if (taskRemoved == NULL) { ereport(FATAL, (errmsg("worker task hash corrupted"))); } } /* Wrapper function to create the job directory if it does not already exist. */ static void CreateJobDirectoryIfNotExists(uint64 jobId) { StringInfo jobDirectoryName = JobDirectoryName(jobId); bool jobDirectoryExists = DirectoryExists(jobDirectoryName); if (!jobDirectoryExists) { CreateDirectory(jobDirectoryName); } FreeStringInfo(jobDirectoryName); } /* Wrapper function to inititate connection to local backend. */ static int32 ConnectToLocalBackend(const char *databaseName, const char *userName) { const char *nodeName = LOCAL_HOST_NAME; const uint32 nodePort = PostPortNumber; /* * Our client library currently only handles TCP sockets. We therefore do * not use Unix domain sockets here. */ int32 connectionId = MultiClientConnect(nodeName, nodePort, databaseName, userName); return connectionId; } citus-7.0.3/src/backend/distributed/worker/task_tracker_protocol.c000066400000000000000000000307051317107136600254350ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * task_tracker_protocol.c * * The task tracker background process runs on every worker node. The following * routines allow for the master node to assign tasks to the task tracker, check * these tasks' statuses, and remove these tasks when they are no longer needed. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "funcapi.h" #include "miscadmin.h" #include #include "access/xact.h" #include "commands/dbcommands.h" #include "commands/schemacmds.h" #include "distributed/metadata_cache.h" #include "distributed/multi_client_executor.h" #include "distributed/multi_server_executor.h" #include "distributed/resource_lock.h" #include "distributed/task_tracker.h" #include "distributed/task_tracker_protocol.h" #include "distributed/worker_protocol.h" #include "storage/lwlock.h" #include "storage/pmsignal.h" #include "utils/builtins.h" /* Local functions forward declarations */ static bool TaskTrackerRunning(void); static void CreateJobSchema(StringInfo schemaName); static void CreateTask(uint64 jobId, uint32 taskId, char *taskCallString); static void UpdateTask(WorkerTask *workerTask, char *taskCallString); static void CleanupTask(WorkerTask *workerTask); /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(task_tracker_assign_task); PG_FUNCTION_INFO_V1(task_tracker_task_status); PG_FUNCTION_INFO_V1(task_tracker_cleanup_job); /* * task_tracker_assign_task creates a new task in the shared hash or updates an * already existing task. The function also creates a schema for the job if it * doesn't already exist. */ Datum task_tracker_assign_task(PG_FUNCTION_ARGS) { uint64 jobId = PG_GETARG_INT64(0); uint32 taskId = PG_GETARG_UINT32(1); text *taskCallStringText = PG_GETARG_TEXT_P(2); StringInfo jobSchemaName = JobSchemaName(jobId); bool schemaExists = false; WorkerTask *workerTask = NULL; char *taskCallString = text_to_cstring(taskCallStringText); uint32 taskCallStringLength = strlen(taskCallString); bool taskTrackerRunning = false; CheckCitusVersion(ERROR); /* check that we have a running task tracker on this host */ taskTrackerRunning = TaskTrackerRunning(); if (!taskTrackerRunning) { ereport(ERROR, (errcode(ERRCODE_CANNOT_CONNECT_NOW), errmsg("the task tracker has been disabled or shut down"))); } /* check that we have enough space in our shared hash for this string */ if (taskCallStringLength >= MaxTaskStringSize) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("task string length (%d) exceeds maximum assignable " "size (%d)", taskCallStringLength, MaxTaskStringSize), errhint("Consider increasing citus.max_task_string_size."))); } /* * If the schema does not exist, we create it. However, the schema does not * become visible to other processes until the transaction commits, and we * therefore do not release the resource lock in this case. Otherwise, the * schema is already visible, and we immediately release the resource lock. */ LockJobResource(jobId, AccessExclusiveLock); schemaExists = JobSchemaExists(jobSchemaName); if (!schemaExists) { /* lock gets automatically released upon return from this function */ CreateJobSchema(jobSchemaName); } else { UnlockJobResource(jobId, AccessExclusiveLock); } LWLockAcquire(&WorkerTasksSharedState->taskHashLock, LW_EXCLUSIVE); /* check if we already have the task in our shared hash */ workerTask = WorkerTasksHashFind(jobId, taskId); if (workerTask == NULL) { CreateTask(jobId, taskId, taskCallString); } else { UpdateTask(workerTask, taskCallString); } LWLockRelease(&WorkerTasksSharedState->taskHashLock); PG_RETURN_VOID(); } /* Returns the task status of an already existing task. */ Datum task_tracker_task_status(PG_FUNCTION_ARGS) { uint64 jobId = PG_GETARG_INT64(0); uint32 taskId = PG_GETARG_UINT32(1); WorkerTask *workerTask = NULL; uint32 taskStatus = 0; bool taskTrackerRunning = false; CheckCitusVersion(ERROR); taskTrackerRunning = TaskTrackerRunning(); if (taskTrackerRunning) { LWLockAcquire(&WorkerTasksSharedState->taskHashLock, LW_SHARED); workerTask = WorkerTasksHashFind(jobId, taskId); if (workerTask == NULL) { ereport(ERROR, (errmsg("could not find the worker task"), errdetail("Task jobId: " UINT64_FORMAT " and taskId: %u", jobId, taskId))); } taskStatus = (uint32) workerTask->taskStatus; LWLockRelease(&WorkerTasksSharedState->taskHashLock); } else { ereport(ERROR, (errcode(ERRCODE_CANNOT_CONNECT_NOW), errmsg("the task tracker has been disabled or shut down"))); } PG_RETURN_UINT32(taskStatus); } /* * task_tracker_cleanup_job finds all tasks for the given job, and cleans up * files, connections, and shared hash enties associated with these tasks. */ Datum task_tracker_cleanup_job(PG_FUNCTION_ARGS) { uint64 jobId = PG_GETARG_INT64(0); HASH_SEQ_STATUS status; WorkerTask *currentTask = NULL; StringInfo jobDirectoryName = NULL; StringInfo jobSchemaName = NULL; CheckCitusVersion(ERROR); /* * We first clean up any open connections, and remove tasks belonging to * this job from the shared hash. */ LWLockAcquire(&WorkerTasksSharedState->taskHashLock, LW_EXCLUSIVE); hash_seq_init(&status, WorkerTasksSharedState->taskHash); currentTask = (WorkerTask *) hash_seq_search(&status); while (currentTask != NULL) { if (currentTask->jobId == jobId) { CleanupTask(currentTask); } currentTask = (WorkerTask *) hash_seq_search(&status); } LWLockRelease(&WorkerTasksSharedState->taskHashLock); /* * We then delete the job directory and schema, if they exist. This cleans * up all intermediate files and tables allocated for the job. Note that the * schema drop call can block if another process is creating the schema or * writing to a table within the schema. */ jobDirectoryName = JobDirectoryName(jobId); RemoveDirectory(jobDirectoryName); LockJobResource(jobId, AccessExclusiveLock); jobSchemaName = JobSchemaName(jobId); RemoveJobSchema(jobSchemaName); UnlockJobResource(jobId, AccessExclusiveLock); PG_RETURN_VOID(); } /* * TaskTrackerRunning checks if the task tracker process is running. To do this, * the function checks if the task tracker is configured to start up, and infers * from shared memory that the tracker hasn't received a shut down request. */ static bool TaskTrackerRunning(void) { WorkerTask *workerTask = NULL; bool postmasterAlive = true; bool taskTrackerRunning = true; /* if postmaster shut down, infer task tracker shut down from it */ postmasterAlive = PostmasterIsAlive(); if (!postmasterAlive) { return false; } /* * When the task tracker receives a termination signal, it inserts a special * marker task to the shared hash. We need to look up this marker task since * the postmaster doesn't send a terminate signal to running backends. */ LWLockAcquire(&WorkerTasksSharedState->taskHashLock, LW_SHARED); workerTask = WorkerTasksHashFind(RESERVED_JOB_ID, SHUTDOWN_MARKER_TASK_ID); if (workerTask != NULL) { taskTrackerRunning = false; } LWLockRelease(&WorkerTasksSharedState->taskHashLock); return taskTrackerRunning; } /* * CreateJobSchema creates a job schema with the given schema name. Note that * this function ensures that our pg_ prefixed schema names can be created. * Further note that the created schema does not become visible to other * processes until the transaction commits. */ static void CreateJobSchema(StringInfo schemaName) { const char *queryString = NULL; bool oldAllowSystemTableMods = false; Oid savedUserId = InvalidOid; int savedSecurityContext = 0; CreateSchemaStmt *createSchemaStmt = NULL; RoleSpec currentUserRole = { 0 }; /* allow schema names that start with pg_ */ oldAllowSystemTableMods = allowSystemTableMods; allowSystemTableMods = true; /* ensure we're allowed to create this schema */ GetUserIdAndSecContext(&savedUserId, &savedSecurityContext); SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE); /* build a CREATE SCHEMA statement */ currentUserRole.type = T_RoleSpec; currentUserRole.roletype = ROLESPEC_CSTRING; currentUserRole.rolename = GetUserNameFromId(savedUserId, false); currentUserRole.location = -1; createSchemaStmt = makeNode(CreateSchemaStmt); createSchemaStmt->schemaname = schemaName->data; createSchemaStmt->schemaElts = NIL; /* actually create schema with the current user as owner */ #if (PG_VERSION_NUM >= 100000) createSchemaStmt->authrole = ¤tUserRole; CreateSchemaCommand(createSchemaStmt, queryString, -1, -1); #else createSchemaStmt->authrole = (Node *) ¤tUserRole; CreateSchemaCommand(createSchemaStmt, queryString); #endif CommandCounterIncrement(); /* and reset environment */ SetUserIdAndSecContext(savedUserId, savedSecurityContext); allowSystemTableMods = oldAllowSystemTableMods; } /* * CreateTask creates a new task in shared hash, initializes the task, and sets * the task to assigned state. Note that this function expects the caller to * hold an exclusive lock over the shared hash. */ static void CreateTask(uint64 jobId, uint32 taskId, char *taskCallString) { WorkerTask *workerTask = NULL; uint32 assignmentTime = 0; char *databaseName = get_database_name(MyDatabaseId); char *userName = CurrentUserName(); /* increase task priority for cleanup tasks */ assignmentTime = (uint32) time(NULL); if (taskId == JOB_CLEANUP_TASK_ID) { assignmentTime = HIGH_PRIORITY_TASK_TIME; } /* enter the worker task into shared hash and initialize the task */ workerTask = WorkerTasksHashEnter(jobId, taskId); workerTask->assignedAt = assignmentTime; strlcpy(workerTask->taskCallString, taskCallString, MaxTaskStringSize); workerTask->taskStatus = TASK_ASSIGNED; workerTask->connectionId = INVALID_CONNECTION_ID; workerTask->failureCount = 0; strlcpy(workerTask->databaseName, databaseName, NAMEDATALEN); strlcpy(workerTask->userName, userName, NAMEDATALEN); } /* * UpdateTask updates the call string text for an already existing task. Note * that this function expects the caller to hold an exclusive lock over the * shared hash. */ static void UpdateTask(WorkerTask *workerTask, char *taskCallString) { TaskStatus taskStatus = TASK_STATUS_INVALID_FIRST; taskStatus = workerTask->taskStatus; Assert(taskStatus != TASK_STATUS_INVALID_FIRST); /* * 1. If the task has succeeded or has been canceled, we don't do anything. * 2. If the task has permanently failed, we update the task call string, * reset the failure count, and change the task's status to schedulable. * 3. If the task is in conduit, we update the task call string, and reset * the failure count. */ if (taskStatus == TASK_SUCCEEDED || taskStatus == TASK_CANCEL_REQUESTED || taskStatus == TASK_CANCELED) { /* nothing to do */ } else if (taskStatus == TASK_PERMANENTLY_FAILED) { strlcpy(workerTask->taskCallString, taskCallString, MaxTaskStringSize); workerTask->failureCount = 0; workerTask->taskStatus = TASK_ASSIGNED; } else { strlcpy(workerTask->taskCallString, taskCallString, MaxTaskStringSize); workerTask->failureCount = 0; } } /* Cleans up connection and shared hash entry associated with the given task. */ static void CleanupTask(WorkerTask *workerTask) { WorkerTask *taskRemoved = NULL; void *hashKey = (void *) workerTask; /* * If the connection is still valid, the master node decided to terminate * the task prematurely. This can happen when the user wants to cancel the * query, or when a speculatively executed task finishes elsewhere and the * query completes. */ if (workerTask->connectionId != INVALID_CONNECTION_ID) { /* * The task tracker process owns the connections to local backends, and * we cannot interefere with those connections from another process. We * therefore ask the task tracker to clean up the connection and to * remove the task from the shared hash. Note that one of the cleaned up * tasks will always be the clean-up task itself. */ ereport(DEBUG3, (errmsg("requesting cancel for worker task"), errdetail("Task jobId: " UINT64_FORMAT " and taskId: %u", workerTask->jobId, workerTask->taskId))); workerTask->taskStatus = TASK_CANCEL_REQUESTED; return; } /* remove the task from the shared hash */ taskRemoved = hash_search(WorkerTasksSharedState->taskHash, hashKey, HASH_REMOVE, NULL); if (taskRemoved == NULL) { ereport(FATAL, (errmsg("worker task hash corrupted"))); } } citus-7.0.3/src/backend/distributed/worker/worker_data_fetch_protocol.c000066400000000000000000001261601317107136600264340ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * worker_data_fetch_protocol.c * * Routines for fetching remote resources from other nodes to this worker node, * and materializing these resources on this node if necessary. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "funcapi.h" #include "libpq-fe.h" #include "miscadmin.h" #include #include #include "access/xact.h" #include "catalog/dependency.h" #include "catalog/namespace.h" #include "commands/copy.h" #include "commands/dbcommands.h" #include "commands/extension.h" #include "commands/sequence.h" #include "distributed/citus_ruleutils.h" #include "distributed/connection_management.h" #include "distributed/master_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/multi_client_executor.h" #include "distributed/multi_logical_optimizer.h" #include "distributed/multi_server_executor.h" #include "distributed/multi_utility.h" #include "distributed/relay_utility.h" #include "distributed/remote_commands.h" #include "distributed/resource_lock.h" #include "distributed/task_tracker.h" #include "distributed/worker_protocol.h" #include "nodes/makefuncs.h" #include "storage/lmgr.h" #include "tcop/tcopprot.h" #include "tcop/utility.h" #include "utils/builtins.h" #include "utils/lsyscache.h" #if (PG_VERSION_NUM >= 100000) #include "utils/regproc.h" #include "utils/varlena.h" #endif /* Config variable managed via guc.c */ bool ExpireCachedShards = false; /* Local functions forward declarations */ static void FetchRegularFileAsSuperUser(const char *nodeName, uint32 nodePort, StringInfo remoteFilename, StringInfo localFilename); static bool ReceiveRegularFile(const char *nodeName, uint32 nodePort, const char *nodeUser, StringInfo transmitCommand, StringInfo filePath); static void ReceiveResourceCleanup(int32 connectionId, const char *filename, int32 fileDescriptor); static void DeleteFile(const char *filename); static void FetchTableCommon(text *tableName, uint64 remoteTableSize, ArrayType *nodeNameObject, ArrayType *nodePortObject, bool (*FetchTableFunction)(const char *, uint32, const char *)); static uint64 LocalTableSize(Oid relationId); static uint64 ExtractShardId(const char *tableName); static bool FetchRegularTable(const char *nodeName, uint32 nodePort, const char *tableName); static bool FetchForeignTable(const char *nodeName, uint32 nodePort, const char *tableName); static const char * RemoteTableOwner(const char *nodeName, uint32 nodePort, const char *tableName); static StringInfo ForeignFilePath(const char *nodeName, uint32 nodePort, const char *tableName); static bool check_log_statement(List *stmt_list); static void AlterSequenceMinMax(Oid sequenceId, char *schemaName, char *sequenceName); static void SetDefElemArg(AlterSeqStmt *statement, const char *name, Node *arg); /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(worker_fetch_partition_file); PG_FUNCTION_INFO_V1(worker_fetch_query_results_file); PG_FUNCTION_INFO_V1(worker_apply_shard_ddl_command); PG_FUNCTION_INFO_V1(worker_apply_inter_shard_ddl_command); PG_FUNCTION_INFO_V1(worker_apply_sequence_command); PG_FUNCTION_INFO_V1(worker_fetch_regular_table); PG_FUNCTION_INFO_V1(worker_fetch_foreign_file); PG_FUNCTION_INFO_V1(worker_append_table_to_shard); /* * worker_fetch_partition_file fetches a partition file from the remote node. * The function assumes an upstream compute task depends on this partition file, * and therefore directly fetches the file into the upstream task's directory. */ Datum worker_fetch_partition_file(PG_FUNCTION_ARGS) { uint64 jobId = PG_GETARG_INT64(0); uint32 partitionTaskId = PG_GETARG_UINT32(1); uint32 partitionFileId = PG_GETARG_UINT32(2); uint32 upstreamTaskId = PG_GETARG_UINT32(3); text *nodeNameText = PG_GETARG_TEXT_P(4); uint32 nodePort = PG_GETARG_UINT32(5); char *nodeName = NULL; /* remote filename is // */ StringInfo remoteDirectoryName = TaskDirectoryName(jobId, partitionTaskId); StringInfo remoteFilename = PartitionFilename(remoteDirectoryName, partitionFileId); /* local filename is // */ StringInfo taskDirectoryName = TaskDirectoryName(jobId, upstreamTaskId); StringInfo taskFilename = TaskFilename(taskDirectoryName, partitionTaskId); /* * If we are the first function to fetch a file for the upstream task, the * task directory does not exist. We then lock and create the directory. */ bool taskDirectoryExists = DirectoryExists(taskDirectoryName); CheckCitusVersion(ERROR); if (!taskDirectoryExists) { InitTaskDirectory(jobId, upstreamTaskId); } nodeName = text_to_cstring(nodeNameText); /* we've made sure the file names are sanitized, safe to fetch as superuser */ FetchRegularFileAsSuperUser(nodeName, nodePort, remoteFilename, taskFilename); PG_RETURN_VOID(); } /* * worker_fetch_query_results_file fetches a query results file from the remote * node. The function assumes an upstream compute task depends on this query * results file, and therefore directly fetches the file into the upstream * task's directory. */ Datum worker_fetch_query_results_file(PG_FUNCTION_ARGS) { uint64 jobId = PG_GETARG_INT64(0); uint32 queryTaskId = PG_GETARG_UINT32(1); uint32 upstreamTaskId = PG_GETARG_UINT32(2); text *nodeNameText = PG_GETARG_TEXT_P(3); uint32 nodePort = PG_GETARG_UINT32(4); char *nodeName = NULL; /* remote filename is / */ StringInfo remoteDirectoryName = JobDirectoryName(jobId); StringInfo remoteFilename = TaskFilename(remoteDirectoryName, queryTaskId); /* local filename is // */ StringInfo taskDirectoryName = TaskDirectoryName(jobId, upstreamTaskId); StringInfo taskFilename = TaskFilename(taskDirectoryName, queryTaskId); /* * If we are the first function to fetch a file for the upstream task, the * task directory does not exist. We then lock and create the directory. */ bool taskDirectoryExists = DirectoryExists(taskDirectoryName); CheckCitusVersion(ERROR); if (!taskDirectoryExists) { InitTaskDirectory(jobId, upstreamTaskId); } nodeName = text_to_cstring(nodeNameText); /* we've made sure the file names are sanitized, safe to fetch as superuser */ FetchRegularFileAsSuperUser(nodeName, nodePort, remoteFilename, taskFilename); PG_RETURN_VOID(); } /* Constructs a standardized task file path for given directory and task id. */ StringInfo TaskFilename(StringInfo directoryName, uint32 taskId) { StringInfo taskFilename = makeStringInfo(); appendStringInfo(taskFilename, "%s/%s%0*u", directoryName->data, TASK_FILE_PREFIX, MIN_TASK_FILENAME_WIDTH, taskId); return taskFilename; } /* * FetchRegularFileAsSuperUser copies a file from a remote node in an idempotent * manner. It connects to the remote node as superuser to give file access. * Callers must make sure that the file names are sanitized. */ static void FetchRegularFileAsSuperUser(const char *nodeName, uint32 nodePort, StringInfo remoteFilename, StringInfo localFilename) { char *nodeUser = NULL; StringInfo attemptFilename = NULL; StringInfo transmitCommand = NULL; uint32 randomId = (uint32) random(); bool received = false; int renamed = 0; /* * We create an attempt file to signal that the file is still in transit. We * further append a random id to the filename to handle the unexpected case * of another process concurrently fetching the same file. */ attemptFilename = makeStringInfo(); appendStringInfo(attemptFilename, "%s_%0*u%s", localFilename->data, MIN_TASK_FILENAME_WIDTH, randomId, ATTEMPT_FILE_SUFFIX); transmitCommand = makeStringInfo(); appendStringInfo(transmitCommand, TRANSMIT_REGULAR_COMMAND, remoteFilename->data); /* connect as superuser to give file access */ nodeUser = CitusExtensionOwnerName(); received = ReceiveRegularFile(nodeName, nodePort, nodeUser, transmitCommand, attemptFilename); if (!received) { ereport(ERROR, (errmsg("could not receive file \"%s\" from %s:%u", remoteFilename->data, nodeName, nodePort))); } /* atomically rename the attempt file */ renamed = rename(attemptFilename->data, localFilename->data); if (renamed != 0) { ereport(ERROR, (errcode_for_file_access(), errmsg("could not rename file \"%s\" to \"%s\": %m", attemptFilename->data, localFilename->data))); } } /* * ReceiveRegularFile creates a local file at the given file path, and connects * to remote database that has the given node name and port number. The function * then issues the given transmit command using client-side logic (libpq), reads * the remote file's contents, and appends these contents to the local file. On * success, the function returns success; on failure, it cleans up all resources * and returns false. */ static bool ReceiveRegularFile(const char *nodeName, uint32 nodePort, const char *nodeUser, StringInfo transmitCommand, StringInfo filePath) { int32 fileDescriptor = -1; char filename[MAXPGPATH]; int closed = -1; const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); const int fileMode = (S_IRUSR | S_IWUSR); QueryStatus queryStatus = CLIENT_INVALID_QUERY; int32 connectionId = INVALID_CONNECTION_ID; char *nodeDatabase = NULL; bool querySent = false; bool queryReady = false; bool copyDone = false; /* create local file to append remote data to */ snprintf(filename, MAXPGPATH, "%s", filePath->data); fileDescriptor = BasicOpenFile(filename, fileFlags, fileMode); if (fileDescriptor < 0) { ereport(WARNING, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", filePath->data))); return false; } /* we use the same database name on the master and worker nodes */ nodeDatabase = get_database_name(MyDatabaseId); /* connect to remote node */ connectionId = MultiClientConnect(nodeName, nodePort, nodeDatabase, nodeUser); if (connectionId == INVALID_CONNECTION_ID) { ReceiveResourceCleanup(connectionId, filename, fileDescriptor); return false; } /* send request to remote node to start transmitting data */ querySent = MultiClientSendQuery(connectionId, transmitCommand->data); if (!querySent) { ReceiveResourceCleanup(connectionId, filename, fileDescriptor); return false; } /* loop until the remote node acknowledges our transmit request */ while (!queryReady) { ResultStatus resultStatus = MultiClientResultStatus(connectionId); if (resultStatus == CLIENT_RESULT_READY) { queryReady = true; } else if (resultStatus == CLIENT_RESULT_BUSY) { /* remote node did not respond; wait for longer */ long sleepIntervalPerCycle = RemoteTaskCheckInterval * 1000L; pg_usleep(sleepIntervalPerCycle); } else { ReceiveResourceCleanup(connectionId, filename, fileDescriptor); return false; } } /* check query response is as expected */ queryStatus = MultiClientQueryStatus(connectionId); if (queryStatus != CLIENT_QUERY_COPY) { ReceiveResourceCleanup(connectionId, filename, fileDescriptor); return false; } /* loop until we receive and append all the data from remote node */ while (!copyDone) { CopyStatus copyStatus = MultiClientCopyData(connectionId, fileDescriptor); if (copyStatus == CLIENT_COPY_DONE) { copyDone = true; } else if (copyStatus == CLIENT_COPY_MORE) { /* remote node will continue to send more data */ } else { ReceiveResourceCleanup(connectionId, filename, fileDescriptor); return false; } } /* we are done executing; release the connection and the file handle */ MultiClientDisconnect(connectionId); closed = close(fileDescriptor); if (closed < 0) { ereport(WARNING, (errcode_for_file_access(), errmsg("could not close file \"%s\": %m", filename))); /* if we failed to close file, try to delete it before erroring out */ DeleteFile(filename); return false; } /* we successfully received the remote file */ ereport(DEBUG2, (errmsg("received remote file \"%s\"", filename))); return true; } /* * ReceiveResourceCleanup gets called if an error occurs during file receiving. * The function closes the connection, and closes and deletes the local file. */ static void ReceiveResourceCleanup(int32 connectionId, const char *filename, int32 fileDescriptor) { if (connectionId != INVALID_CONNECTION_ID) { MultiClientDisconnect(connectionId); } if (fileDescriptor != -1) { int closed = -1; int deleted = -1; closed = close(fileDescriptor); if (closed < 0) { ereport(WARNING, (errcode_for_file_access(), errmsg("could not close file \"%s\": %m", filename))); } deleted = unlink(filename); if (deleted != 0) { ereport(WARNING, (errcode_for_file_access(), errmsg("could not delete file \"%s\": %m", filename))); } } } /* Deletes file with the given filename. */ static void DeleteFile(const char *filename) { int deleted = unlink(filename); if (deleted != 0) { ereport(WARNING, (errcode_for_file_access(), errmsg("could not delete file \"%s\": %m", filename))); } } /* * worker_apply_shard_ddl_command extends table, index, or constraint names in * the given DDL command. The function then applies this extended DDL command * against the database. */ Datum worker_apply_shard_ddl_command(PG_FUNCTION_ARGS) { uint64 shardId = PG_GETARG_INT64(0); text *schemaNameText = PG_GETARG_TEXT_P(1); text *ddlCommandText = PG_GETARG_TEXT_P(2); char *schemaName = text_to_cstring(schemaNameText); const char *ddlCommand = text_to_cstring(ddlCommandText); Node *ddlCommandNode = ParseTreeNode(ddlCommand); CheckCitusVersion(ERROR); /* extend names in ddl command and apply extended command */ RelayEventExtendNames(ddlCommandNode, schemaName, shardId); CitusProcessUtility(ddlCommandNode, ddlCommand, PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL); PG_RETURN_VOID(); } /* * worker_apply_inter_shard_ddl_command extends table, index, or constraint names in * the given DDL command. The function then applies this extended DDL command * against the database. */ Datum worker_apply_inter_shard_ddl_command(PG_FUNCTION_ARGS) { uint64 leftShardId = PG_GETARG_INT64(0); text *leftShardSchemaNameText = PG_GETARG_TEXT_P(1); uint64 rightShardId = PG_GETARG_INT64(2); text *rightShardSchemaNameText = PG_GETARG_TEXT_P(3); text *ddlCommandText = PG_GETARG_TEXT_P(4); char *leftShardSchemaName = text_to_cstring(leftShardSchemaNameText); char *rightShardSchemaName = text_to_cstring(rightShardSchemaNameText); const char *ddlCommand = text_to_cstring(ddlCommandText); Node *ddlCommandNode = ParseTreeNode(ddlCommand); CheckCitusVersion(ERROR); /* extend names in ddl command and apply extended command */ RelayEventExtendNamesForInterShardCommands(ddlCommandNode, leftShardId, leftShardSchemaName, rightShardId, rightShardSchemaName); CitusProcessUtility(ddlCommandNode, ddlCommand, PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL); PG_RETURN_VOID(); } /* * worker_apply_sequence_command takes a CREATE SEQUENCE command string, runs the * CREATE SEQUENCE command then creates and runs an ALTER SEQUENCE statement * which adjusts the minvalue and maxvalue of the sequence such that the sequence * creates globally unique values. */ Datum worker_apply_sequence_command(PG_FUNCTION_ARGS) { text *commandText = PG_GETARG_TEXT_P(0); const char *commandString = text_to_cstring(commandText); Node *commandNode = ParseTreeNode(commandString); CreateSeqStmt *createSequenceStatement = NULL; char *sequenceName = NULL; char *sequenceSchema = NULL; Oid sequenceRelationId = InvalidOid; NodeTag nodeType = nodeTag(commandNode); CheckCitusVersion(ERROR); if (nodeType != T_CreateSeqStmt) { ereport(ERROR, (errmsg("must call worker_apply_sequence_command with a CREATE" " SEQUENCE command string"))); } /* run the CREATE SEQUENCE command */ CitusProcessUtility(commandNode, commandString, PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL); CommandCounterIncrement(); createSequenceStatement = (CreateSeqStmt *) commandNode; sequenceName = createSequenceStatement->sequence->relname; sequenceSchema = createSequenceStatement->sequence->schemaname; createSequenceStatement = (CreateSeqStmt *) commandNode; sequenceRelationId = RangeVarGetRelid(createSequenceStatement->sequence, AccessShareLock, false); Assert(sequenceRelationId != InvalidOid); AlterSequenceMinMax(sequenceRelationId, sequenceSchema, sequenceName); PG_RETURN_VOID(); } /* * worker_fetch_regular_table caches the given PostgreSQL table on the local * node. The function caches this table by trying the given list of node names * and node ports in sequential order. On success, the function simply returns. */ Datum worker_fetch_regular_table(PG_FUNCTION_ARGS) { text *regularTableName = PG_GETARG_TEXT_P(0); uint64 generationStamp = PG_GETARG_INT64(1); ArrayType *nodeNameObject = PG_GETARG_ARRAYTYPE_P(2); ArrayType *nodePortObject = PG_GETARG_ARRAYTYPE_P(3); CheckCitusVersion(ERROR); /* * Run common logic to fetch the remote table, and use the provided function * pointer to perform the actual table fetching. */ FetchTableCommon(regularTableName, generationStamp, nodeNameObject, nodePortObject, &FetchRegularTable); PG_RETURN_VOID(); } /* * worker_fetch_foreign_file caches the given file-backed foreign table on the * local node. The function caches this table by trying the given list of node * names and node ports in sequential order. On success, the function returns. */ Datum worker_fetch_foreign_file(PG_FUNCTION_ARGS) { text *foreignTableName = PG_GETARG_TEXT_P(0); uint64 foreignFileSize = PG_GETARG_INT64(1); ArrayType *nodeNameObject = PG_GETARG_ARRAYTYPE_P(2); ArrayType *nodePortObject = PG_GETARG_ARRAYTYPE_P(3); CheckCitusVersion(ERROR); /* * Run common logic to fetch the remote table, and use the provided function * pointer to perform the actual table fetching. */ FetchTableCommon(foreignTableName, foreignFileSize, nodeNameObject, nodePortObject, &FetchForeignTable); PG_RETURN_VOID(); } /* * FetchTableCommon executes common logic that wraps around the actual data * fetching function. This common logic includes ensuring that only one process * tries to fetch this table at any given time, and that data fetch operations * are retried in case of node failures. */ static void FetchTableCommon(text *tableNameText, uint64 remoteTableSize, ArrayType *nodeNameObject, ArrayType *nodePortObject, bool (*FetchTableFunction)(const char *, uint32, const char *)) { uint64 shardId = INVALID_SHARD_ID; Oid relationId = InvalidOid; List *relationNameList = NIL; RangeVar *relation = NULL; uint32 nodeIndex = 0; bool tableFetched = false; char *tableName = text_to_cstring(tableNameText); Datum *nodeNameArray = DeconstructArrayObject(nodeNameObject); Datum *nodePortArray = DeconstructArrayObject(nodePortObject); int32 nodeNameCount = ArrayObjectCount(nodeNameObject); int32 nodePortCount = ArrayObjectCount(nodePortObject); /* we should have the same number of node names and port numbers */ if (nodeNameCount != nodePortCount) { ereport(ERROR, (errmsg("node name array size: %d and node port array size: %d" " do not match", nodeNameCount, nodePortCount))); } /* * We lock on the shardId, but do not unlock. When the function returns, and * the transaction for this function commits, this lock will automatically * be released. This ensures that concurrent caching commands will see the * newly created table when they acquire the lock (in read committed mode). */ shardId = ExtractShardId(tableName); LockShardResource(shardId, AccessExclusiveLock); relationNameList = textToQualifiedNameList(tableNameText); relation = makeRangeVarFromNameList(relationNameList); relationId = RangeVarGetRelid(relation, NoLock, true); /* check if we already fetched the table */ if (relationId != InvalidOid) { uint64 localTableSize = 0; if (!ExpireCachedShards) { return; } /* * Check if the cached shard has the same size on disk as it has as on * the placement (is up to date). * * Note 1: performing updates or deletes on the original shard leads to * inconsistent sizes between different databases in which case the data * would be fetched every time, or worse, the placement would get into * a deadlock when it tries to fetch from itself while holding the lock. * Therefore, this option is disabled by default. * * Note 2: when appending data to a shard, the size on disk only * increases when a new page is added (the next 8kB block). */ localTableSize = LocalTableSize(relationId); if (remoteTableSize > localTableSize) { /* table is not up to date, drop the table */ ObjectAddress tableObject = { InvalidOid, InvalidOid, 0 }; tableObject.classId = RelationRelationId; tableObject.objectId = relationId; tableObject.objectSubId = 0; performDeletion(&tableObject, DROP_RESTRICT, PERFORM_DELETION_INTERNAL); } else { /* table is up to date */ return; } } /* loop until we fetch the table or try all nodes */ while (!tableFetched && (nodeIndex < nodeNameCount)) { Datum nodeNameDatum = nodeNameArray[nodeIndex]; Datum nodePortDatum = nodePortArray[nodeIndex]; char *nodeName = TextDatumGetCString(nodeNameDatum); uint32 nodePort = DatumGetUInt32(nodePortDatum); tableFetched = (*FetchTableFunction)(nodeName, nodePort, tableName); nodeIndex++; } /* error out if we tried all nodes and could not fetch the table */ if (!tableFetched) { ereport(ERROR, (errmsg("could not fetch relation: \"%s\"", tableName))); } } /* LocalTableSize returns the size on disk of the given table. */ static uint64 LocalTableSize(Oid relationId) { uint64 tableSize = 0; char relationType = 0; Datum relationIdDatum = ObjectIdGetDatum(relationId); relationType = get_rel_relkind(relationId); if (RegularTable(relationId)) { Datum tableSizeDatum = DirectFunctionCall1(pg_table_size, relationIdDatum); tableSize = DatumGetInt64(tableSizeDatum); } else if (relationType == RELKIND_FOREIGN_TABLE) { bool cstoreTable = CStoreTable(relationId); if (cstoreTable) { /* extract schema name of cstore */ Oid cstoreId = get_extension_oid(CSTORE_FDW_NAME, false); Oid cstoreSchemaOid = get_extension_schema(cstoreId); const char *cstoreSchemaName = get_namespace_name(cstoreSchemaOid); const int tableSizeArgumentCount = 1; Oid tableSizeFunctionOid = FunctionOid(cstoreSchemaName, CSTORE_TABLE_SIZE_FUNCTION_NAME, tableSizeArgumentCount); Datum tableSizeDatum = OidFunctionCall1(tableSizeFunctionOid, relationIdDatum); tableSize = DatumGetInt64(tableSizeDatum); } else { char *relationName = get_rel_name(relationId); struct stat fileStat; int statOK = 0; StringInfo localFilePath = makeStringInfo(); appendStringInfo(localFilePath, FOREIGN_CACHED_FILE_PATH, relationName); /* extract the file size using stat, analogous to pg_stat_file */ statOK = stat(localFilePath->data, &fileStat); if (statOK < 0) { ereport(ERROR, (errcode_for_file_access(), errmsg("could not stat file \"%s\": %m", localFilePath->data))); } tableSize = (uint64) fileStat.st_size; } } else { char *relationName = get_rel_name(relationId); ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot get size for table \"%s\"", relationName), errdetail("Only regular and foreign tables are supported."))); } return tableSize; } /* Extracts shard id from the given table name, and returns it. */ static uint64 ExtractShardId(const char *tableName) { uint64 shardId = 0; char *shardIdString = NULL; char *shardIdStringEnd = NULL; /* find the last underscore and increment for shardId string */ shardIdString = strrchr(tableName, SHARD_NAME_SEPARATOR); if (shardIdString == NULL) { ereport(ERROR, (errmsg("could not extract shardId from table name \"%s\"", tableName))); } shardIdString++; #ifdef HAVE_STRTOULL errno = 0; shardId = strtoull(shardIdString, &shardIdStringEnd, 0); if (errno != 0 || (*shardIdStringEnd != '\0')) { ereport(ERROR, (errmsg("could not extract shardId from table name \"%s\"", tableName))); } #else ereport(ERROR, (errmsg("could not extract shardId from table name"), errhint("Your platform does not support strtoull()"))); #endif return shardId; } /* * FetchRegularTable fetches the given table's data using the copy out command. * The function then fetches the DDL commands necessary to create this table's * replica, and locally applies these DDL commands. Last, the function copies * the fetched table data into the created table; and on success, returns true. * On failure due to connectivity issues with remote node, the function returns * false. On other types of failures, the function errors out. */ static bool FetchRegularTable(const char *nodeName, uint32 nodePort, const char *tableName) { StringInfo localFilePath = NULL; StringInfo remoteCopyCommand = NULL; List *ddlCommandList = NIL; ListCell *ddlCommandCell = NULL; CopyStmt *localCopyCommand = NULL; RangeVar *localTable = NULL; uint64 shardId = 0; bool received = false; StringInfo queryString = NULL; const char *tableOwner = NULL; Oid tableOwnerId = InvalidOid; Oid savedUserId = InvalidOid; int savedSecurityContext = 0; List *tableNameList = NIL; /* copy remote table's data to this node in an idempotent manner */ shardId = ExtractShardId(tableName); localFilePath = makeStringInfo(); appendStringInfo(localFilePath, "base/%s/%s" UINT64_FORMAT, PG_JOB_CACHE_DIR, TABLE_FILE_PREFIX, shardId); remoteCopyCommand = makeStringInfo(); appendStringInfo(remoteCopyCommand, COPY_OUT_COMMAND, tableName); received = ReceiveRegularFile(nodeName, nodePort, NULL, remoteCopyCommand, localFilePath); if (!received) { return false; } /* fetch the ddl commands needed to create the table */ tableOwner = RemoteTableOwner(nodeName, nodePort, tableName); if (tableOwner == NULL) { return false; } tableOwnerId = get_role_oid(tableOwner, false); /* fetch the ddl commands needed to create the table */ ddlCommandList = TableDDLCommandList(nodeName, nodePort, tableName); if (ddlCommandList == NIL) { return false; } /* * Apply DDL commands against the database. Note that on failure from here * on, we immediately error out instead of returning false. Have to do * this as the table's owner to ensure the local table is created with * compatible permissions. */ GetUserIdAndSecContext(&savedUserId, &savedSecurityContext); SetUserIdAndSecContext(tableOwnerId, SECURITY_LOCAL_USERID_CHANGE); foreach(ddlCommandCell, ddlCommandList) { StringInfo ddlCommand = (StringInfo) lfirst(ddlCommandCell); Node *ddlCommandNode = ParseTreeNode(ddlCommand->data); CitusProcessUtility(ddlCommandNode, ddlCommand->data, PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL); CommandCounterIncrement(); } /* * Copy local file into the relation. We call ProcessUtility() instead of * directly calling DoCopy() because some extensions (e.g. cstore_fdw) hook * into process utility to provide their custom COPY behavior. */ tableNameList = stringToQualifiedNameList(tableName); localTable = makeRangeVarFromNameList(tableNameList); localCopyCommand = CopyStatement(localTable, localFilePath->data); queryString = makeStringInfo(); appendStringInfo(queryString, COPY_IN_COMMAND, tableName, localFilePath->data); CitusProcessUtility((Node *) localCopyCommand, queryString->data, PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL); /* finally delete the temporary file we created */ DeleteFile(localFilePath->data); SetUserIdAndSecContext(savedUserId, savedSecurityContext); return true; } /* * FetchForeignTable fetches the foreign file for the given table name from the * remote node. The function then fetches the DDL commands needed to create the * table, and applies these DDL commands locally to create the foreign table. * On success, the function returns true. On failure due to connectivity issues * with remote node, the function returns false. On failure due to applying DDL * commands against the local database, the function errors out. */ static bool FetchForeignTable(const char *nodeName, uint32 nodePort, const char *tableName) { const char *nodeUser = NULL; StringInfo localFilePath = NULL; StringInfo remoteFilePath = NULL; StringInfo transmitCommand = NULL; StringInfo alterTableCommand = NULL; bool received = false; List *ddlCommandList = NIL; ListCell *ddlCommandCell = NULL; /* * Fetch a foreign file to this node in an idempotent manner. It's OK that * this file name lacks the schema, as the table name will have a shard id * attached to it, which is unique (so conflicts are avoided even if two * tables in different schemas have the same name). */ localFilePath = makeStringInfo(); appendStringInfo(localFilePath, FOREIGN_CACHED_FILE_PATH, tableName); remoteFilePath = ForeignFilePath(nodeName, nodePort, tableName); if (remoteFilePath == NULL) { return false; } transmitCommand = makeStringInfo(); appendStringInfo(transmitCommand, TRANSMIT_REGULAR_COMMAND, remoteFilePath->data); /* * We allow some arbitrary input in the file name and connect to the remote * node as superuser to transmit. Therefore, we only allow calling this * function when already running as superuser. */ EnsureSuperUser(); nodeUser = CitusExtensionOwnerName(); received = ReceiveRegularFile(nodeName, nodePort, nodeUser, transmitCommand, localFilePath); if (!received) { return false; } /* fetch the ddl commands needed to create the table */ ddlCommandList = TableDDLCommandList(nodeName, nodePort, tableName); if (ddlCommandList == NIL) { return false; } alterTableCommand = makeStringInfo(); appendStringInfo(alterTableCommand, SET_FOREIGN_TABLE_FILENAME, tableName, localFilePath->data); ddlCommandList = lappend(ddlCommandList, alterTableCommand); /* * Apply DDL commands against the database. Note that on failure here, we * immediately error out instead of returning false. */ foreach(ddlCommandCell, ddlCommandList) { StringInfo ddlCommand = (StringInfo) lfirst(ddlCommandCell); Node *ddlCommandNode = ParseTreeNode(ddlCommand->data); CitusProcessUtility(ddlCommandNode, ddlCommand->data, PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL); CommandCounterIncrement(); } return true; } /* * RemoteTableOwner takes in the given table name, and fetches the owner of * the table. If an error occurs during fetching, return NULL. */ static const char * RemoteTableOwner(const char *nodeName, uint32 nodePort, const char *tableName) { List *ownerList = NIL; StringInfo queryString = NULL; StringInfo relationOwner; MultiConnection *connection = NULL; uint32 connectionFlag = FORCE_NEW_CONNECTION; PGresult *result = NULL; queryString = makeStringInfo(); appendStringInfo(queryString, GET_TABLE_OWNER, tableName); connection = GetNodeConnection(connectionFlag, nodeName, nodePort); ExecuteOptionalRemoteCommand(connection, queryString->data, &result); ownerList = ReadFirstColumnAsText(result); if (list_length(ownerList) != 1) { return NULL; } relationOwner = (StringInfo) linitial(ownerList); return relationOwner->data; } /* * TableDDLCommandList takes in the given table name, and fetches the list of * DDL commands used in creating the table. If an error occurs during fetching, * the function returns an empty list. */ List * TableDDLCommandList(const char *nodeName, uint32 nodePort, const char *tableName) { List *ddlCommandList = NIL; StringInfo queryString = NULL; MultiConnection *connection = NULL; PGresult *result = NULL; uint32 connectionFlag = FORCE_NEW_CONNECTION; queryString = makeStringInfo(); appendStringInfo(queryString, GET_TABLE_DDL_EVENTS, tableName); connection = GetNodeConnection(connectionFlag, nodeName, nodePort); ExecuteOptionalRemoteCommand(connection, queryString->data, &result); ddlCommandList = ReadFirstColumnAsText(result); ForgetResults(connection); CloseConnection(connection); return ddlCommandList; } /* * ForeignFilePath takes in the foreign table name, and fetches this table's * remote file path. If an error occurs during fetching, the function returns * null. */ static StringInfo ForeignFilePath(const char *nodeName, uint32 nodePort, const char *tableName) { List *foreignPathList = NIL; StringInfo foreignPathCommand = NULL; StringInfo foreignPath = NULL; MultiConnection *connection = NULL; PGresult *result = NULL; int connectionFlag = FORCE_NEW_CONNECTION; foreignPathCommand = makeStringInfo(); appendStringInfo(foreignPathCommand, FOREIGN_FILE_PATH_COMMAND, tableName); connection = GetNodeConnection(connectionFlag, nodeName, nodePort); ExecuteOptionalRemoteCommand(connection, foreignPathCommand->data, &result); foreignPathList = ReadFirstColumnAsText(result); if (foreignPathList != NIL) { foreignPath = (StringInfo) linitial(foreignPathList); } return foreignPath; } /* * ExecuteRemoteQuery executes the given query, copies the query's results to a * sorted list, and returns this list. The function assumes that query results * have a single column, and asserts on that assumption. If results are empty, * or an error occurs during query runtime, the function returns an empty list. * If asUser is NULL the connection is established as the current user, * otherwise as the specified user. */ List * ExecuteRemoteQuery(const char *nodeName, uint32 nodePort, char *runAsUser, StringInfo queryString) { int32 connectionId = -1; bool querySent = false; bool queryReady = false; bool queryOK = false; void *queryResult = NULL; int rowCount = 0; int rowIndex = 0; int columnCount = 0; List *resultList = NIL; connectionId = MultiClientConnect(nodeName, nodePort, NULL, runAsUser); if (connectionId == INVALID_CONNECTION_ID) { return NIL; } querySent = MultiClientSendQuery(connectionId, queryString->data); if (!querySent) { MultiClientDisconnect(connectionId); return NIL; } while (!queryReady) { ResultStatus resultStatus = MultiClientResultStatus(connectionId); if (resultStatus == CLIENT_RESULT_READY) { queryReady = true; } else if (resultStatus == CLIENT_RESULT_BUSY) { long sleepIntervalPerCycle = RemoteTaskCheckInterval * 1000L; pg_usleep(sleepIntervalPerCycle); } else { MultiClientDisconnect(connectionId); return NIL; } } queryOK = MultiClientQueryResult(connectionId, &queryResult, &rowCount, &columnCount); if (!queryOK) { MultiClientDisconnect(connectionId); return NIL; } for (rowIndex = 0; rowIndex < rowCount; rowIndex++) { const int columnIndex = 0; char *rowValue = MultiClientGetValue(queryResult, rowIndex, columnIndex); StringInfo rowValueString = makeStringInfo(); appendStringInfoString(rowValueString, rowValue); Assert(columnCount == 1); resultList = lappend(resultList, rowValueString); } MultiClientClearResult(queryResult); MultiClientDisconnect(connectionId); return resultList; } /* * Parses the given DDL command, and returns the tree node for parsed command. */ Node * ParseTreeNode(const char *ddlCommand) { Node *parseTreeNode = ParseTreeRawStmt(ddlCommand); #if (PG_VERSION_NUM >= 100000) parseTreeNode = ((RawStmt *) parseTreeNode)->stmt; #endif return parseTreeNode; } /* * Parses the given DDL command, and returns the tree node for parsed command. */ Node * ParseTreeRawStmt(const char *ddlCommand) { Node *parseTreeNode = NULL; List *parseTreeList = NULL; uint32 parseTreeCount = 0; parseTreeList = pg_parse_query(ddlCommand); /* log immediately if dictated by log statement */ if (check_log_statement(parseTreeList)) { ereport(LOG, (errmsg("statement: %s", ddlCommand), errhidestmt(true))); } parseTreeCount = list_length(parseTreeList); if (parseTreeCount != 1) { ereport(ERROR, (errmsg("cannot execute multiple utility events"))); } /* * xact.c rejects certain commands that are unsafe to run inside transaction * blocks. Since we only apply commands that relate to creating tables and * those commands are safe, we can safely set the ProcessUtilityContext to * PROCESS_UTILITY_TOPLEVEL. */ parseTreeNode = (Node *) linitial(parseTreeList); return parseTreeNode; } /* * worker_append_table_to_shard fetches the given remote table's data into the * local file system. The function then appends this file data into the given * shard. */ Datum worker_append_table_to_shard(PG_FUNCTION_ARGS) { text *shardQualifiedNameText = PG_GETARG_TEXT_P(0); text *sourceQualifiedNameText = PG_GETARG_TEXT_P(1); text *sourceNodeNameText = PG_GETARG_TEXT_P(2); uint32 sourceNodePort = PG_GETARG_UINT32(3); List *shardQualifiedNameList = textToQualifiedNameList(shardQualifiedNameText); List *sourceQualifiedNameList = textToQualifiedNameList(sourceQualifiedNameText); char *sourceNodeName = text_to_cstring(sourceNodeNameText); char *shardTableName = NULL; char *shardSchemaName = NULL; char *shardQualifiedName = NULL; char *sourceSchemaName = NULL; char *sourceTableName = NULL; char *sourceQualifiedName = NULL; StringInfo localFilePath = NULL; StringInfo sourceCopyCommand = NULL; CopyStmt *localCopyCommand = NULL; RangeVar *localTable = NULL; uint64 shardId = INVALID_SHARD_ID; bool received = false; StringInfo queryString = NULL; CheckCitusVersion(ERROR); /* We extract schema names and table names from qualified names */ DeconstructQualifiedName(shardQualifiedNameList, &shardSchemaName, &shardTableName); DeconstructQualifiedName(sourceQualifiedNameList, &sourceSchemaName, &sourceTableName); /* * We lock on the shardId, but do not unlock. When the function returns, and * the transaction for this function commits, this lock will automatically * be released. This ensures appends to a shard happen in a serial manner. */ shardId = ExtractShardId(shardTableName); LockShardResource(shardId, AccessExclusiveLock); /* copy remote table's data to this node */ localFilePath = makeStringInfo(); appendStringInfo(localFilePath, "base/%s/%s" UINT64_FORMAT, PG_JOB_CACHE_DIR, TABLE_FILE_PREFIX, shardId); sourceQualifiedName = quote_qualified_identifier(sourceSchemaName, sourceTableName); sourceCopyCommand = makeStringInfo(); appendStringInfo(sourceCopyCommand, COPY_OUT_COMMAND, sourceQualifiedName); received = ReceiveRegularFile(sourceNodeName, sourceNodePort, NULL, sourceCopyCommand, localFilePath); if (!received) { ereport(ERROR, (errmsg("could not copy table \"%s\" from \"%s:%u\"", sourceTableName, sourceNodeName, sourceNodePort))); } /* copy local file into the given shard */ localTable = makeRangeVar(shardSchemaName, shardTableName, -1); localCopyCommand = CopyStatement(localTable, localFilePath->data); shardQualifiedName = quote_qualified_identifier(shardSchemaName, shardTableName); queryString = makeStringInfo(); appendStringInfo(queryString, COPY_IN_COMMAND, shardQualifiedName, localFilePath->data); CitusProcessUtility((Node *) localCopyCommand, queryString->data, PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL); /* finally delete the temporary file we created */ DeleteFile(localFilePath->data); PG_RETURN_VOID(); } /* * check_log_statement is a copy of postgres' check_log_statement function and * returns whether a statement ought to be logged or not. */ static bool check_log_statement(List *statementList) { ListCell *statementCell; if (log_statement == LOGSTMT_NONE) { return false; } if (log_statement == LOGSTMT_ALL) { return true; } /* else we have to inspect the statement(s) to see whether to log */ foreach(statementCell, statementList) { Node *statement = (Node *) lfirst(statementCell); if (GetCommandLogLevel(statement) <= log_statement) { return true; } } return false; } /* * AlterSequenceMinMax arranges the min and max value of the given sequence. The function * creates ALTER SEQUENCE statemenet which sets the start, minvalue and maxvalue of * the given sequence. * * The function provides the uniqueness by shifting the start of the sequence by * GetLocalGroupId() << 48 + 1 and sets a maxvalue which stops it from passing out any * values greater than: (GetLocalGroupID() + 1) << 48. * * This is to ensure every group of workers passes out values from a unique range, * and therefore that all values generated for the sequence are globally unique. */ static void AlterSequenceMinMax(Oid sequenceId, char *schemaName, char *sequenceName) { Form_pg_sequence sequenceData = pg_get_sequencedef(sequenceId); int64 startValue = 0; int64 maxValue = 0; #if (PG_VERSION_NUM >= 100000) int64 sequenceMaxValue = sequenceData->seqmax; int64 sequenceMinValue = sequenceData->seqmin; #else int64 sequenceMaxValue = sequenceData->max_value; int64 sequenceMinValue = sequenceData->min_value; #endif /* calculate min/max values that the sequence can generate in this worker */ startValue = (((int64) GetLocalGroupId()) << 48) + 1; maxValue = startValue + ((int64) 1 << 48); /* * We alter the sequence if the previously set min and max values are not equal to * their correct values. This happens when the sequence has been created * during shard, before the current worker having the metadata. */ if (sequenceMinValue != startValue || sequenceMaxValue != maxValue) { StringInfo startNumericString = makeStringInfo(); StringInfo maxNumericString = makeStringInfo(); Node *startFloatArg = NULL; Node *maxFloatArg = NULL; AlterSeqStmt *alterSequenceStatement = makeNode(AlterSeqStmt); const char *dummyString = "-"; alterSequenceStatement->sequence = makeRangeVar(schemaName, sequenceName, -1); /* * DefElem->arg can only hold literal ints up to int4, in order to represent * larger numbers we need to construct a float represented as a string. */ appendStringInfo(startNumericString, "%lu", startValue); startFloatArg = (Node *) makeFloat(startNumericString->data); appendStringInfo(maxNumericString, "%lu", maxValue); maxFloatArg = (Node *) makeFloat(maxNumericString->data); SetDefElemArg(alterSequenceStatement, "start", startFloatArg); SetDefElemArg(alterSequenceStatement, "minvalue", startFloatArg); SetDefElemArg(alterSequenceStatement, "maxvalue", maxFloatArg); SetDefElemArg(alterSequenceStatement, "restart", startFloatArg); /* since the command is an AlterSeqStmt, a dummy command string works fine */ CitusProcessUtility((Node *) alterSequenceStatement, dummyString, PROCESS_UTILITY_TOPLEVEL, NULL, None_Receiver, NULL); } } /* * SetDefElemArg scans through all the DefElem's of an AlterSeqStmt and * and sets the arg of the one with a defname of name to arg. * * If a DefElem with the given defname does not exist it is created and * added to the AlterSeqStmt. */ static void SetDefElemArg(AlterSeqStmt *statement, const char *name, Node *arg) { DefElem *defElem = NULL; ListCell *optionCell = NULL; foreach(optionCell, statement->options) { defElem = (DefElem *) lfirst(optionCell); if (strcmp(defElem->defname, name) == 0) { pfree(defElem->arg); defElem->arg = arg; return; } } #if (PG_VERSION_NUM >= 100000) defElem = makeDefElem((char *) name, arg, -1); #else defElem = makeDefElem((char *) name, arg); #endif statement->options = lappend(statement->options, defElem); } citus-7.0.3/src/backend/distributed/worker/worker_drop_protocol.c000066400000000000000000000100631317107136600253100ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * worker_drop_protocol.c * * Routines for dropping distributed tables and their metadata on worker nodes. * * Copyright (c) 2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "access/genam.h" #include "access/heapam.h" #include "access/xact.h" #include "catalog/dependency.h" #include "catalog/pg_foreign_server.h" #include "distributed/citus_ruleutils.h" #include "distributed/distribution_column.h" #include "distributed/master_metadata_utility.h" #include "distributed/metadata_cache.h" #include "foreign/foreign.h" #include "utils/fmgroids.h" PG_FUNCTION_INFO_V1(worker_drop_distributed_table); /* * worker_drop_distributed_table drops the distributed table with the given oid, * then, removes the associated rows from pg_dist_partition, pg_dist_shard and * pg_dist_placement. The function also drops the server for foreign tables. * * Note that drop fails if any dependent objects are present for any of the * distributed tables. Also, shard placements of the distributed tables are * not dropped as in the case of "DROP TABLE distributed_table;" command. * * The function errors out if the input relation Oid is not a regular or foreign table. * The function is meant to be called only by the coordinator, therefore requires * superuser privileges. */ Datum worker_drop_distributed_table(PG_FUNCTION_ARGS) { Datum relationIdDatum = PG_GETARG_OID(0); Oid relationId = DatumGetObjectId(relationIdDatum); ObjectAddress distributedTableObject = { InvalidOid, InvalidOid, 0 }; Relation distributedRelation = NULL; List *shardList = NULL; ListCell *shardCell = NULL; char relationKind = '\0'; CheckCitusVersion(ERROR); EnsureSuperUser(); shardList = LoadShardList(relationId); /* first check the relation type */ distributedRelation = relation_open(relationId, AccessShareLock); relationKind = distributedRelation->rd_rel->relkind; EnsureRelationKindSupported(relationId); /* close the relation since we do not need anymore */ relation_close(distributedRelation, AccessShareLock); /* prepare distributedTableObject for dropping the table */ distributedTableObject.classId = RelationRelationId; distributedTableObject.objectId = relationId; distributedTableObject.objectSubId = 0; /* drop the server for the foreign relations */ if (relationKind == RELKIND_FOREIGN_TABLE) { ObjectAddresses *objects = new_object_addresses(); ObjectAddress foreignServerObject = { InvalidOid, InvalidOid, 0 }; ForeignTable *foreignTable = GetForeignTable(relationId); Oid serverId = foreignTable->serverid; /* prepare foreignServerObject for dropping the server */ foreignServerObject.classId = ForeignServerRelationId; foreignServerObject.objectId = serverId; foreignServerObject.objectSubId = 0; /* add the addresses that are going to be dropped */ add_exact_object_address(&distributedTableObject, objects); add_exact_object_address(&foreignServerObject, objects); /* drop both the table and the server */ performMultipleDeletions(objects, DROP_RESTRICT, PERFORM_DELETION_INTERNAL); } else { /* drop the table with cascade since other tables may be referring to it */ performDeletion(&distributedTableObject, DROP_CASCADE, PERFORM_DELETION_INTERNAL); } /* iterate over shardList to delete the corresponding rows */ foreach(shardCell, shardList) { List *shardPlacementList = NIL; ListCell *shardPlacementCell = NULL; uint64 *shardIdPointer = (uint64 *) lfirst(shardCell); uint64 shardId = (*shardIdPointer); shardPlacementList = ShardPlacementList(shardId); foreach(shardPlacementCell, shardPlacementList) { ShardPlacement *placement = (ShardPlacement *) lfirst(shardPlacementCell); /* delete the row from pg_dist_placement */ DeleteShardPlacementRow(placement->placementId); } /* delete the row from pg_dist_shard */ DeleteShardRow(shardId); } /* delete the row from pg_dist_partition */ DeletePartitionRow(relationId); PG_RETURN_VOID(); } citus-7.0.3/src/backend/distributed/worker/worker_file_access_protocol.c000066400000000000000000000044611317107136600266110ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * worker_file_access_protocol.c * * Routines for accessing file related information on this worker node. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "funcapi.h" #include "commands/defrem.h" #include "distributed/master_protocol.h" #include "distributed/worker_protocol.h" #include "foreign/foreign.h" #include "utils/builtins.h" #include "utils/lsyscache.h" /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(worker_foreign_file_path); PG_FUNCTION_INFO_V1(worker_find_block_local_path); /* * worker_foreign_file_path resolves the foreign table for the given table name, * and extracts and returns the file path associated with that foreign table. */ Datum worker_foreign_file_path(PG_FUNCTION_ARGS) { text *foreignTableName = PG_GETARG_TEXT_P(0); text *foreignFilePath = NULL; Oid relationId = ResolveRelationId(foreignTableName); ForeignTable *foreignTable = GetForeignTable(relationId); ListCell *optionCell = NULL; CheckCitusVersion(ERROR); foreach(optionCell, foreignTable->options) { DefElem *option = (DefElem *) lfirst(optionCell); char *optionName = option->defname; int compareResult = strncmp(optionName, FOREIGN_FILENAME_OPTION, MAXPGPATH); if (compareResult == 0) { char *optionValue = defGetString(option); foreignFilePath = cstring_to_text(optionValue); break; } } /* check that we found the filename option */ if (foreignFilePath == NULL) { char *relationName = get_rel_name(relationId); ereport(ERROR, (errmsg("could not find filename for foreign table: \"%s\"", relationName))); } PG_RETURN_TEXT_P(foreignFilePath); } /* * Protocol declaration for a function whose future implementation will find the * given HDFS block's local file path. */ Datum worker_find_block_local_path(PG_FUNCTION_ARGS) { int64 blockId = PG_GETARG_INT64(0); ArrayType *dataDirectoryObject = PG_GETARG_ARRAYTYPE_P(1); /* keep the compiler silent */ (void) blockId; (void) dataDirectoryObject; CheckCitusVersion(ERROR); ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("called function is currently unsupported"))); PG_RETURN_TEXT_P(NULL); } citus-7.0.3/src/backend/distributed/worker/worker_merge_protocol.c000066400000000000000000000432241317107136600254500ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * worker_merge_protocol.c * * Routines for merging partitioned files into a single file or table. Merging * files is one of the threee distributed execution primitives that we apply on * worker nodes. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "funcapi.h" #include "miscadmin.h" #include "access/htup_details.h" #include "access/xact.h" #include "catalog/dependency.h" #include "catalog/pg_namespace.h" #include "commands/copy.h" #include "commands/tablecmds.h" #include "distributed/metadata_cache.h" #include "distributed/worker_protocol.h" #include "executor/spi.h" #include "nodes/makefuncs.h" #include "parser/parse_type.h" #include "storage/lmgr.h" #include "utils/acl.h" #include "utils/builtins.h" #include "utils/snapmgr.h" #include "utils/syscache.h" #include "utils/tqual.h" /* Local functions forward declarations */ static List * ArrayObjectToCStringList(ArrayType *arrayObject); static void CreateTaskTable(StringInfo schemaName, StringInfo relationName, List *columnNameList, List *columnTypeList); static void CopyTaskFilesFromDirectory(StringInfo schemaName, StringInfo relationName, StringInfo sourceDirectoryName); /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(worker_merge_files_into_table); PG_FUNCTION_INFO_V1(worker_merge_files_and_run_query); PG_FUNCTION_INFO_V1(worker_cleanup_job_schema_cache); /* * worker_merge_files_into_table creates a task table within the job's schema, * which should have already been created by the task tracker protocol, and * copies files in its task directory into this table. If the schema doesn't * exist, the function defaults to the 'public' schema. Note that, unlike * partitioning functions, this function is not always idempotent. On success, * the function creates the table and loads data, and subsequent calls to the * function error out because the table already exist. On failure, the task * table creation commands are rolled back, and the function can be called * again. */ Datum worker_merge_files_into_table(PG_FUNCTION_ARGS) { uint64 jobId = PG_GETARG_INT64(0); uint32 taskId = PG_GETARG_UINT32(1); ArrayType *columnNameObject = PG_GETARG_ARRAYTYPE_P(2); ArrayType *columnTypeObject = PG_GETARG_ARRAYTYPE_P(3); StringInfo jobSchemaName = JobSchemaName(jobId); StringInfo taskTableName = TaskTableName(taskId); StringInfo taskDirectoryName = TaskDirectoryName(jobId, taskId); bool schemaExists = false; List *columnNameList = NIL; List *columnTypeList = NIL; Oid savedUserId = InvalidOid; int savedSecurityContext = 0; /* we should have the same number of column names and types */ int32 columnNameCount = ArrayObjectCount(columnNameObject); int32 columnTypeCount = ArrayObjectCount(columnTypeObject); CheckCitusVersion(ERROR); if (columnNameCount != columnTypeCount) { ereport(ERROR, (errmsg("column name array size: %d and type array size: %d" " do not match", columnNameCount, columnTypeCount))); } /* * If the schema for the job isn't already created by the task tracker * protocol, we fall to using the default 'public' schema. */ schemaExists = JobSchemaExists(jobSchemaName); if (!schemaExists) { resetStringInfo(jobSchemaName); appendStringInfoString(jobSchemaName, "public"); } /* create the task table and copy files into the table */ columnNameList = ArrayObjectToCStringList(columnNameObject); columnTypeList = ArrayObjectToCStringList(columnTypeObject); CreateTaskTable(jobSchemaName, taskTableName, columnNameList, columnTypeList); /* need superuser to copy from files */ GetUserIdAndSecContext(&savedUserId, &savedSecurityContext); SetUserIdAndSecContext(CitusExtensionOwner(), SECURITY_LOCAL_USERID_CHANGE); CopyTaskFilesFromDirectory(jobSchemaName, taskTableName, taskDirectoryName); SetUserIdAndSecContext(savedUserId, savedSecurityContext); PG_RETURN_VOID(); } /* * worker_merge_files_and_run_query creates a merge task table within the job's * schema, which should have already been created by the task tracker protocol. * It copies files in its task directory into this table. Then it runs final * query to create result table of the job. * * Note that here we followed a different approach to create a task table for merge * files than worker_merge_files_into_table(). In future we should unify these * two approaches. For this purpose creating a directory_fdw extension and using * it would make sense. Then we can merge files with a query or without query * through directory_fdw. */ Datum worker_merge_files_and_run_query(PG_FUNCTION_ARGS) { uint64 jobId = PG_GETARG_INT64(0); uint32 taskId = PG_GETARG_UINT32(1); text *createMergeTableQueryText = PG_GETARG_TEXT_P(2); text *createIntermediateTableQueryText = PG_GETARG_TEXT_P(3); const char *createMergeTableQuery = text_to_cstring(createMergeTableQueryText); const char *createIntermediateTableQuery = text_to_cstring(createIntermediateTableQueryText); StringInfo taskDirectoryName = TaskDirectoryName(jobId, taskId); StringInfo jobSchemaName = JobSchemaName(jobId); StringInfo intermediateTableName = TaskTableName(taskId); StringInfo mergeTableName = makeStringInfo(); StringInfo setSearchPathString = makeStringInfo(); bool schemaExists = false; int connected = 0; int setSearchPathResult = 0; int createMergeTableResult = 0; int createIntermediateTableResult = 0; int finished = 0; CheckCitusVersion(ERROR); /* * If the schema for the job isn't already created by the task tracker * protocol, we fall to using the default 'public' schema. */ schemaExists = JobSchemaExists(jobSchemaName); if (!schemaExists) { resetStringInfo(jobSchemaName); appendStringInfoString(jobSchemaName, "public"); } appendStringInfo(setSearchPathString, SET_SEARCH_PATH_COMMAND, jobSchemaName->data); /* Add "public" to search path to access UDFs in public schema */ appendStringInfo(setSearchPathString, ",public"); connected = SPI_connect(); if (connected != SPI_OK_CONNECT) { ereport(ERROR, (errmsg("could not connect to SPI manager"))); } setSearchPathResult = SPI_exec(setSearchPathString->data, 0); if (setSearchPathResult < 0) { ereport(ERROR, (errmsg("execution was not successful \"%s\"", setSearchPathString->data))); } createMergeTableResult = SPI_exec(createMergeTableQuery, 0); if (createMergeTableResult < 0) { ereport(ERROR, (errmsg("execution was not successful \"%s\"", createMergeTableQuery))); } appendStringInfo(mergeTableName, "%s%s", intermediateTableName->data, MERGE_TABLE_SUFFIX); CopyTaskFilesFromDirectory(jobSchemaName, mergeTableName, taskDirectoryName); createIntermediateTableResult = SPI_exec(createIntermediateTableQuery, 0); if (createIntermediateTableResult < 0) { ereport(ERROR, (errmsg("execution was not successful \"%s\"", createIntermediateTableQuery))); } finished = SPI_finish(); if (finished != SPI_OK_FINISH) { ereport(ERROR, (errmsg("could not disconnect from SPI manager"))); } PG_RETURN_VOID(); } /* * worker_cleanup_job_schema_cache walks over all schemas in the database, and * removes schemas whose names start with the job schema prefix. Note that this * function does not perform any locking; we expect it to be called at process * start-up time before any merge tasks are run. Further note that this function * runs within the scope of a particular database (template1, postgres) and can * only delete schemas within that database. */ Datum worker_cleanup_job_schema_cache(PG_FUNCTION_ARGS) { Relation pgNamespace = NULL; HeapScanDesc scanDescriptor = NULL; ScanKey scanKey = NULL; int scanKeyCount = 0; HeapTuple heapTuple = NULL; CheckCitusVersion(ERROR); pgNamespace = heap_open(NamespaceRelationId, AccessExclusiveLock); scanDescriptor = heap_beginscan_catalog(pgNamespace, scanKeyCount, scanKey); heapTuple = heap_getnext(scanDescriptor, ForwardScanDirection); while (HeapTupleIsValid(heapTuple)) { Form_pg_namespace schemaForm = (Form_pg_namespace) GETSTRUCT(heapTuple); char *schemaName = NameStr(schemaForm->nspname); char *jobSchemaFound = strstr(schemaName, JOB_SCHEMA_PREFIX); if (jobSchemaFound != NULL) { StringInfo jobSchemaName = makeStringInfo(); appendStringInfoString(jobSchemaName, schemaName); RemoveJobSchema(jobSchemaName); } heapTuple = heap_getnext(scanDescriptor, ForwardScanDirection); } heap_endscan(scanDescriptor); heap_close(pgNamespace, AccessExclusiveLock); PG_RETURN_VOID(); } /* Constructs a standardized job schema name for the given job id. */ StringInfo JobSchemaName(uint64 jobId) { StringInfo jobSchemaName = makeStringInfo(); appendStringInfo(jobSchemaName, "%s%0*" INT64_MODIFIER "u", JOB_SCHEMA_PREFIX, MIN_JOB_DIRNAME_WIDTH, jobId); return jobSchemaName; } /* Constructs a standardized task table name for the given task id. */ StringInfo TaskTableName(uint32 taskId) { StringInfo taskTableName = makeStringInfo(); appendStringInfo(taskTableName, "%s%0*u", TASK_TABLE_PREFIX, MIN_TASK_FILENAME_WIDTH, taskId); return taskTableName; } /* Creates a list of cstrings from a single dimensional array object. */ static List * ArrayObjectToCStringList(ArrayType *arrayObject) { List *cstringList = NIL; Datum *datumArray = DeconstructArrayObject(arrayObject); int32 arraySize = ArrayObjectCount(arrayObject); int32 arrayIndex = 0; for (arrayIndex = 0; arrayIndex < arraySize; arrayIndex++) { Datum datum = datumArray[arrayIndex]; char *cstring = TextDatumGetCString(datum); cstringList = lappend(cstringList, cstring); } Assert(cstringList != NIL); return cstringList; } /* Checks if a schema with the given schema name exists. */ bool JobSchemaExists(StringInfo schemaName) { Datum schemaNameDatum = CStringGetDatum(schemaName->data); bool schemaExists = SearchSysCacheExists(NAMESPACENAME, schemaNameDatum, 0, 0, 0); return schemaExists; } /* Removes the schema and all tables within the schema, if the schema exists. */ void RemoveJobSchema(StringInfo schemaName) { Datum schemaNameDatum = CStringGetDatum(schemaName->data); Oid schemaId = InvalidOid; schemaId = GetSysCacheOid(NAMESPACENAME, schemaNameDatum, 0, 0, 0); if (OidIsValid(schemaId)) { ObjectAddress schemaObject = { 0, 0, 0 }; bool permissionsOK = pg_namespace_ownercheck(schemaId, GetUserId()); if (!permissionsOK) { aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_NAMESPACE, schemaName->data); } schemaObject.classId = NamespaceRelationId; schemaObject.objectId = schemaId; schemaObject.objectSubId = 0; /* * We first delete all tables in this schema. Rather than relying on the * schema command, we call the dependency mechanism directly so that we * can suppress notice messages that are typically displayed during * cascading deletes. */ #if (PG_VERSION_NUM >= 100000) performDeletion(&schemaObject, DROP_CASCADE, PERFORM_DELETION_INTERNAL | PERFORM_DELETION_QUIETLY | PERFORM_DELETION_SKIP_ORIGINAL | PERFORM_DELETION_SKIP_EXTENSIONS); #else deleteWhatDependsOn(&schemaObject, false); #endif CommandCounterIncrement(); /* drop the empty schema */ performDeletion(&schemaObject, DROP_RESTRICT, 0); CommandCounterIncrement(); } else { ereport(DEBUG2, (errmsg("schema \"%s\" does not exist, skipping", schemaName->data))); } } /* Creates a simple table that only defines columns, in the given schema. */ static void CreateTaskTable(StringInfo schemaName, StringInfo relationName, List *columnNameList, List *columnTypeList) { CreateStmt *createStatement = NULL; RangeVar *relation = NULL; List *columnDefinitionList = NIL; Oid relationId PG_USED_FOR_ASSERTS_ONLY = InvalidOid; ObjectAddress relationObject; Assert(schemaName != NULL); Assert(relationName != NULL); /* * This new relation doesn't log to WAL, as the table creation and data copy * statements occur in the same transaction. Still, we want to make the * relation unlogged once we upgrade to PostgreSQL 9.1. */ relation = makeRangeVar(schemaName->data, relationName->data, -1); columnDefinitionList = ColumnDefinitionList(columnNameList, columnTypeList); createStatement = CreateStatement(relation, columnDefinitionList); #if (PG_VERSION_NUM >= 100000) relationObject = DefineRelation(createStatement, RELKIND_RELATION, InvalidOid, NULL, NULL); #else relationObject = DefineRelation(createStatement, RELKIND_RELATION, InvalidOid, NULL); #endif relationId = relationObject.objectId; Assert(relationId != InvalidOid); CommandCounterIncrement(); } /* * ColumnDefinitionList creates and returns a list of column definition objects * from two lists of column names and types. As an example, this function takes * in two single elements lists: "l_quantity" and "decimal(15, 2)". The function * then returns a list with one column definition, where the column's name is * l_quantity, its type is numeric, and the type modifier represents (15, 2). */ List * ColumnDefinitionList(List *columnNameList, List *columnTypeList) { List *columnDefinitionList = NIL; ListCell *columnNameCell = NULL; ListCell *columnTypeCell = NULL; forboth(columnNameCell, columnNameList, columnTypeCell, columnTypeList) { const char *columnName = (const char *) lfirst(columnNameCell); const char *columnType = (const char *) lfirst(columnTypeCell); /* * We should have a SQL compatible column type declaration; we first * convert this type to PostgreSQL's type identifiers and modifiers. */ Oid columnTypeId = InvalidOid; int32 columnTypeMod = -1; bool missingOK = false; TypeName *typeName = NULL; ColumnDef *columnDefinition = NULL; parseTypeString(columnType, &columnTypeId, &columnTypeMod, missingOK); typeName = makeTypeNameFromOid(columnTypeId, columnTypeMod); /* we then create the column definition */ columnDefinition = makeNode(ColumnDef); columnDefinition->colname = (char *) columnName; columnDefinition->typeName = typeName; columnDefinition->is_local = true; columnDefinition->is_not_null = false; columnDefinition->raw_default = NULL; columnDefinition->cooked_default = NULL; columnDefinition->constraints = NIL; columnDefinitionList = lappend(columnDefinitionList, columnDefinition); } return columnDefinitionList; } /* * CreateStatement creates and initializes a simple table create statement that * only has column definitions. */ CreateStmt * CreateStatement(RangeVar *relation, List *columnDefinitionList) { CreateStmt *createStatement = makeNode(CreateStmt); createStatement->relation = relation; createStatement->tableElts = columnDefinitionList; createStatement->inhRelations = NIL; createStatement->constraints = NIL; createStatement->options = NIL; createStatement->oncommit = ONCOMMIT_NOOP; createStatement->tablespacename = NULL; createStatement->if_not_exists = false; return createStatement; } /* * CopyTaskFilesFromDirectory finds all files in the given directory, except for * those having an attempt suffix. The function then copies these files into the * database table identified by the given schema and table name. */ static void CopyTaskFilesFromDirectory(StringInfo schemaName, StringInfo relationName, StringInfo sourceDirectoryName) { const char *directoryName = sourceDirectoryName->data; struct dirent *directoryEntry = NULL; uint64 copiedRowTotal = 0; DIR *directory = AllocateDir(directoryName); if (directory == NULL) { ereport(ERROR, (errcode_for_file_access(), errmsg("could not open directory \"%s\": %m", directoryName))); } directoryEntry = ReadDir(directory, directoryName); for (; directoryEntry != NULL; directoryEntry = ReadDir(directory, directoryName)) { const char *baseFilename = directoryEntry->d_name; const char *queryString = NULL; StringInfo fullFilename = NULL; RangeVar *relation = NULL; CopyStmt *copyStatement = NULL; uint64 copiedRowCount = 0; /* if system file or lingering task file, skip it */ if (strncmp(baseFilename, ".", MAXPGPATH) == 0 || strncmp(baseFilename, "..", MAXPGPATH) == 0 || strstr(baseFilename, ATTEMPT_FILE_SUFFIX) != NULL) { continue; } fullFilename = makeStringInfo(); appendStringInfo(fullFilename, "%s/%s", directoryName, baseFilename); /* build relation object and copy statement */ relation = makeRangeVar(schemaName->data, relationName->data, -1); copyStatement = CopyStatement(relation, fullFilename->data); if (BinaryWorkerCopyFormat) { #if (PG_VERSION_NUM >= 100000) DefElem *copyOption = makeDefElem("format", (Node *) makeString("binary"), -1); #else DefElem *copyOption = makeDefElem("format", (Node *) makeString("binary")); #endif copyStatement->options = list_make1(copyOption); } #if (PG_VERSION_NUM >= 100000) { ParseState *pstate = make_parsestate(NULL); pstate->p_sourcetext = queryString; DoCopy(pstate, copyStatement, -1, -1, &copiedRowCount); free_parsestate(pstate); } #else DoCopy(copyStatement, queryString, &copiedRowCount); #endif copiedRowTotal += copiedRowCount; CommandCounterIncrement(); } ereport(DEBUG2, (errmsg("copied " UINT64_FORMAT " rows into table: \"%s.%s\"", copiedRowTotal, schemaName->data, relationName->data))); FreeDir(directory); } /* * CopyStatement creates and initializes a copy statement to read the given * file's contents into the given table, using copy's standard text format. */ CopyStmt * CopyStatement(RangeVar *relation, char *sourceFilename) { CopyStmt *copyStatement = makeNode(CopyStmt); copyStatement->relation = relation; copyStatement->query = NULL; copyStatement->attlist = NIL; copyStatement->options = NIL; copyStatement->is_from = true; copyStatement->is_program = false; copyStatement->filename = sourceFilename; return copyStatement; } citus-7.0.3/src/backend/distributed/worker/worker_partition_protocol.c000066400000000000000000001056521317107136600263660ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * worker_partition_protocol.c * * Routines for partitioning table data into multiple files. Table partitioning * is one of the three distributed execution primitives that we apply on worker * nodes; and when partitioning data, we follow Hadoop's naming conventions as * much as possible. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "funcapi.h" #include "pgstat.h" #include #include #include #include #include #include "access/hash.h" #include "access/htup_details.h" #include "access/nbtree.h" #include "catalog/pg_am.h" #include "catalog/pg_collation.h" #include "commands/copy.h" #include "commands/defrem.h" #include "distributed/multi_copy.h" #include "distributed/resource_lock.h" #include "distributed/transmit.h" #include "distributed/worker_protocol.h" #include "executor/spi.h" #include "mb/pg_wchar.h" #include "storage/lmgr.h" #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/memutils.h" /* Config variables managed via guc.c */ bool BinaryWorkerCopyFormat = false; /* binary format for copying between workers */ int PartitionBufferSize = 16384; /* total partitioning buffer size in KB */ /* Local variables */ static uint32 FileBufferSizeInBytes = 0; /* file buffer size to init later */ /* Local functions forward declarations */ static StringInfo InitTaskAttemptDirectory(uint64 jobId, uint32 taskId); static uint32 FileBufferSize(int partitionBufferSizeInKB, uint32 fileCount); static FileOutputStream * OpenPartitionFiles(StringInfo directoryName, uint32 fileCount); static void ClosePartitionFiles(FileOutputStream *partitionFileArray, uint32 fileCount); static void RenameDirectory(StringInfo oldDirectoryName, StringInfo newDirectoryName); static void FileOutputStreamWrite(FileOutputStream file, StringInfo dataToWrite); static void FileOutputStreamFlush(FileOutputStream file); static void FilterAndPartitionTable(const char *filterQuery, const char *columnName, Oid columnType, uint32 (*PartitionIdFunction)(Datum, const void *), const void *partitionIdContext, FileOutputStream *partitionFileArray, uint32 fileCount); static int ColumnIndex(TupleDesc rowDescriptor, const char *columnName); static CopyOutState InitRowOutputState(void); static void ClearRowOutputState(CopyOutState copyState); static void OutputBinaryHeaders(FileOutputStream *partitionFileArray, uint32 fileCount); static void OutputBinaryFooters(FileOutputStream *partitionFileArray, uint32 fileCount); static uint32 RangePartitionId(Datum partitionValue, const void *context); static uint32 HashPartitionId(Datum partitionValue, const void *context); /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(worker_range_partition_table); PG_FUNCTION_INFO_V1(worker_hash_partition_table); /* * worker_range_partition_table executes the given filter query, repartitions * the filter query's results on a partitioning column, and writes the resulting * rows to a set of text files on local disk. The function then atomically * renames the directory in which the text files live to ensure deterministic * behavior. * * This function applies range partitioning through the use of a function * pointer and a range context object; for details, see RangePartitionId(). */ Datum worker_range_partition_table(PG_FUNCTION_ARGS) { uint64 jobId = PG_GETARG_INT64(0); uint32 taskId = PG_GETARG_UINT32(1); text *filterQueryText = PG_GETARG_TEXT_P(2); text *partitionColumnText = PG_GETARG_TEXT_P(3); Oid partitionColumnType = PG_GETARG_OID(4); ArrayType *splitPointObject = PG_GETARG_ARRAYTYPE_P(5); const char *filterQuery = text_to_cstring(filterQueryText); const char *partitionColumn = text_to_cstring(partitionColumnText); RangePartitionContext *partitionContext = NULL; FmgrInfo *comparisonFunction = NULL; Datum *splitPointArray = NULL; int32 splitPointCount = 0; uint32 fileCount = 0; StringInfo taskDirectory = NULL; StringInfo taskAttemptDirectory = NULL; FileOutputStream *partitionFileArray = NULL; /* first check that array element's and partition column's types match */ Oid splitPointType = ARR_ELEMTYPE(splitPointObject); CheckCitusVersion(ERROR); if (splitPointType != partitionColumnType) { ereport(ERROR, (errmsg("partition column type %u and split point type %u " "do not match", partitionColumnType, splitPointType))); } /* use column's type information to get the comparison function */ comparisonFunction = GetFunctionInfo(partitionColumnType, BTREE_AM_OID, BTORDER_PROC); /* deserialize split points into their array representation */ splitPointArray = DeconstructArrayObject(splitPointObject); splitPointCount = ArrayObjectCount(splitPointObject); fileCount = splitPointCount + 1; /* range partitioning needs an extra bucket */ /* create range partition context object */ partitionContext = palloc0(sizeof(RangePartitionContext)); partitionContext->comparisonFunction = comparisonFunction; partitionContext->splitPointArray = splitPointArray; partitionContext->splitPointCount = splitPointCount; /* init directories and files to write the partitioned data to */ taskDirectory = InitTaskDirectory(jobId, taskId); taskAttemptDirectory = InitTaskAttemptDirectory(jobId, taskId); partitionFileArray = OpenPartitionFiles(taskAttemptDirectory, fileCount); FileBufferSizeInBytes = FileBufferSize(PartitionBufferSize, fileCount); /* call the partitioning function that does the actual work */ FilterAndPartitionTable(filterQuery, partitionColumn, partitionColumnType, &RangePartitionId, (const void *) partitionContext, partitionFileArray, fileCount); /* close partition files and atomically rename (commit) them */ ClosePartitionFiles(partitionFileArray, fileCount); RemoveDirectory(taskDirectory); RenameDirectory(taskAttemptDirectory, taskDirectory); PG_RETURN_VOID(); } /* * worker_hash_partition_table executes the given filter query, repartitions the * filter query's results on a partitioning column, and writes the resulting * rows to a set of text files on local disk. The function then atomically * renames the directory in which the text files live to ensure deterministic * behavior. * * This function applies hash partitioning through the use of a function pointer * and a hash context object; for details, see HashPartitionId(). */ Datum worker_hash_partition_table(PG_FUNCTION_ARGS) { uint64 jobId = PG_GETARG_INT64(0); uint32 taskId = PG_GETARG_UINT32(1); text *filterQueryText = PG_GETARG_TEXT_P(2); text *partitionColumnText = PG_GETARG_TEXT_P(3); Oid partitionColumnType = PG_GETARG_OID(4); uint32 partitionCount = PG_GETARG_UINT32(5); const char *filterQuery = text_to_cstring(filterQueryText); const char *partitionColumn = text_to_cstring(partitionColumnText); HashPartitionContext *partitionContext = NULL; FmgrInfo *hashFunction = NULL; StringInfo taskDirectory = NULL; StringInfo taskAttemptDirectory = NULL; FileOutputStream *partitionFileArray = NULL; uint32 fileCount = partitionCount; CheckCitusVersion(ERROR); /* use column's type information to get the hashing function */ hashFunction = GetFunctionInfo(partitionColumnType, HASH_AM_OID, HASHPROC); /* create hash partition context object */ partitionContext = palloc0(sizeof(HashPartitionContext)); partitionContext->hashFunction = hashFunction; partitionContext->partitionCount = partitionCount; /* init directories and files to write the partitioned data to */ taskDirectory = InitTaskDirectory(jobId, taskId); taskAttemptDirectory = InitTaskAttemptDirectory(jobId, taskId); partitionFileArray = OpenPartitionFiles(taskAttemptDirectory, fileCount); FileBufferSizeInBytes = FileBufferSize(PartitionBufferSize, fileCount); /* call the partitioning function that does the actual work */ FilterAndPartitionTable(filterQuery, partitionColumn, partitionColumnType, &HashPartitionId, (const void *) partitionContext, partitionFileArray, fileCount); /* close partition files and atomically rename (commit) them */ ClosePartitionFiles(partitionFileArray, fileCount); RemoveDirectory(taskDirectory); RenameDirectory(taskAttemptDirectory, taskDirectory); PG_RETURN_VOID(); } /* * GetFunctionInfo first resolves the operator for the given data type, access * method, and support procedure. The function then uses the resolved operator's * identifier to fill in a function manager object, and returns this object. */ FmgrInfo * GetFunctionInfo(Oid typeId, Oid accessMethodId, int16 procedureId) { FmgrInfo *functionInfo = (FmgrInfo *) palloc0(sizeof(FmgrInfo)); /* get default operator class from pg_opclass for datum type */ Oid operatorClassId = GetDefaultOpClass(typeId, accessMethodId); Oid operatorFamilyId = get_opclass_family(operatorClassId); Oid operatorClassInputType = get_opclass_input_type(operatorClassId); Oid operatorId = get_opfamily_proc(operatorFamilyId, operatorClassInputType, operatorClassInputType, procedureId); if (operatorId == InvalidOid) { ereport(ERROR, (errmsg("could not find function for data typeId %u", typeId))); } /* fill in the FmgrInfo struct using the operatorId */ fmgr_info(operatorId, functionInfo); return functionInfo; } /* * DeconstructArrayObject takes in a single dimensional array, and deserializes * this array's members into an array of datum objects. The function then * returns this datum array. */ Datum * DeconstructArrayObject(ArrayType *arrayObject) { Datum *datumArray = NULL; bool *datumArrayNulls = NULL; int datumArrayLength = 0; Oid typeId = InvalidOid; bool typeByVal = false; char typeAlign = 0; int16 typeLength = 0; bool arrayHasNull = ARR_HASNULL(arrayObject); if (arrayHasNull) { ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("worker array object cannot contain null values"))); } typeId = ARR_ELEMTYPE(arrayObject); get_typlenbyvalalign(typeId, &typeLength, &typeByVal, &typeAlign); deconstruct_array(arrayObject, typeId, typeLength, typeByVal, typeAlign, &datumArray, &datumArrayNulls, &datumArrayLength); return datumArray; } /* * ArrayObjectCount takes in a single dimensional array, and returns the number * of elements in this array. */ int32 ArrayObjectCount(ArrayType *arrayObject) { int32 dimensionCount = ARR_NDIM(arrayObject); int32 *dimensionLengthArray = ARR_DIMS(arrayObject); int32 arrayLength = 0; /* we currently allow split point arrays to have only one subarray */ Assert(dimensionCount == 1); arrayLength = ArrayGetNItems(dimensionCount, dimensionLengthArray); if (arrayLength <= 0) { ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("worker array object cannot be empty"))); } return arrayLength; } /* * InitTaskDirectory creates a job and task directory using given identifiers, * if these directories do not already exist. The function then returns the task * directory's name. */ StringInfo InitTaskDirectory(uint64 jobId, uint32 taskId) { bool jobDirectoryExists = false; bool taskDirectoryExists = false; /* * If the task tracker assigned this task (regular case), the tracker should * have already created the job directory. */ StringInfo jobDirectoryName = JobDirectoryName(jobId); StringInfo taskDirectoryName = TaskDirectoryName(jobId, taskId); LockJobResource(jobId, AccessExclusiveLock); jobDirectoryExists = DirectoryExists(jobDirectoryName); if (!jobDirectoryExists) { CreateDirectory(jobDirectoryName); } taskDirectoryExists = DirectoryExists(taskDirectoryName); if (!taskDirectoryExists) { CreateDirectory(taskDirectoryName); } UnlockJobResource(jobId, AccessExclusiveLock); return taskDirectoryName; } /* * InitTaskAttemptDirectory finds a task attempt directory that is not taken, * and creates that directory. The function then returns the task attempt * directory's name. */ static StringInfo InitTaskAttemptDirectory(uint64 jobId, uint32 taskId) { StringInfo taskDirectoryName = TaskDirectoryName(jobId, taskId); uint32 randomId = (uint32) random(); /* * We should have only one process executing this task. Still, we append a * random id just in case. */ StringInfo taskAttemptDirectoryName = makeStringInfo(); appendStringInfo(taskAttemptDirectoryName, "%s_%0*u", taskDirectoryName->data, MIN_TASK_FILENAME_WIDTH, randomId); /* * If this task previously failed, and gets re-executed and improbably draws * the same randomId, the task will fail to create the directory. */ CreateDirectory(taskAttemptDirectoryName); return taskAttemptDirectoryName; } /* Calculates and returns the buffer size to use for each file. */ static uint32 FileBufferSize(int partitionBufferSizeInKB, uint32 fileCount) { double partitionBufferSize = (double) partitionBufferSizeInKB * 1024.0; uint32 fileBufferSize = (uint32) rint(partitionBufferSize / fileCount); return fileBufferSize; } /* * OpenPartitionFiles takes in a directory name and file count, and opens new * partition files in this directory. The names for these new files are modeled * after Hadoop's naming conventions for map files. These file names, virtual * file descriptors, and file buffers are stored together in file output stream * objects. These objects are then returned in an array from this function. */ static FileOutputStream * OpenPartitionFiles(StringInfo directoryName, uint32 fileCount) { FileOutputStream *partitionFileArray = NULL; File fileDescriptor = 0; uint32 fileIndex = 0; const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | PG_BINARY); const int fileMode = (S_IRUSR | S_IWUSR); partitionFileArray = palloc0(fileCount * sizeof(FileOutputStream)); for (fileIndex = 0; fileIndex < fileCount; fileIndex++) { StringInfo filePath = PartitionFilename(directoryName, fileIndex); fileDescriptor = PathNameOpenFile(filePath->data, fileFlags, fileMode); if (fileDescriptor < 0) { ereport(ERROR, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", filePath->data))); } partitionFileArray[fileIndex].fileDescriptor = fileDescriptor; partitionFileArray[fileIndex].fileBuffer = makeStringInfo(); partitionFileArray[fileIndex].filePath = filePath; } return partitionFileArray; } /* * ClosePartitionFiles walks over each file output stream object, and flushes * any remaining data in the file's buffer. The function then closes the file, * and deletes any allocated memory for the file stream object. */ static void ClosePartitionFiles(FileOutputStream *partitionFileArray, uint32 fileCount) { uint32 fileIndex = 0; for (fileIndex = 0; fileIndex < fileCount; fileIndex++) { FileOutputStream partitionFile = partitionFileArray[fileIndex]; FileOutputStreamFlush(partitionFile); FileClose(partitionFile.fileDescriptor); FreeStringInfo(partitionFile.fileBuffer); FreeStringInfo(partitionFile.filePath); } pfree(partitionFileArray); } /* * MasterJobDirectoryName constructs a standardized job * directory path for the given job id on the master node. */ StringInfo MasterJobDirectoryName(uint64 jobId) { StringInfo jobDirectoryName = makeStringInfo(); /* * We use the default tablespace in {datadir}/base. Further, we need to * apply padding on our 64-bit job id, and hence can't use UINT64_FORMAT. */ appendStringInfo(jobDirectoryName, "base/%s/%s%0*" INT64_MODIFIER "u", PG_JOB_CACHE_DIR, MASTER_JOB_DIRECTORY_PREFIX, MIN_JOB_DIRNAME_WIDTH, jobId); return jobDirectoryName; } /* * JobDirectoryName Constructs a standardized job * directory path for the given job id on the worker nodes. */ StringInfo JobDirectoryName(uint64 jobId) { /* * We use the default tablespace in {datadir}/base. */ StringInfo jobDirectoryName = makeStringInfo(); appendStringInfo(jobDirectoryName, "base/%s/%s%0*" INT64_MODIFIER "u", PG_JOB_CACHE_DIR, JOB_DIRECTORY_PREFIX, MIN_JOB_DIRNAME_WIDTH, jobId); return jobDirectoryName; } /* Constructs a standardized task directory path for given job and task ids. */ StringInfo TaskDirectoryName(uint64 jobId, uint32 taskId) { StringInfo jobDirectoryName = JobDirectoryName(jobId); StringInfo taskDirectoryName = makeStringInfo(); appendStringInfo(taskDirectoryName, "%s/%s%0*u", jobDirectoryName->data, TASK_FILE_PREFIX, MIN_TASK_FILENAME_WIDTH, taskId); return taskDirectoryName; } /* Constructs a standardized partition file path for given directory and id. */ StringInfo PartitionFilename(StringInfo directoryName, uint32 partitionId) { StringInfo partitionFilename = makeStringInfo(); appendStringInfo(partitionFilename, "%s/%s%0*u", directoryName->data, PARTITION_FILE_PREFIX, MIN_PARTITION_FILENAME_WIDTH, partitionId); return partitionFilename; } /* * JobDirectoryElement takes in a filename, and checks if this name lives in the * directory path that is used for task output files. Note that this function's * implementation is coupled with JobDirectoryName(). */ bool JobDirectoryElement(const char *filename) { bool directoryElement = false; char *directoryPathFound = NULL; StringInfo directoryPath = makeStringInfo(); appendStringInfo(directoryPath, "base/%s/%s", PG_JOB_CACHE_DIR, JOB_DIRECTORY_PREFIX); directoryPathFound = strstr(filename, directoryPath->data); if (directoryPathFound != NULL) { directoryElement = true; } pfree(directoryPath); return directoryElement; } /* * CacheDirectoryElement takes in a filename, and checks if this name lives in * the directory path that is used for job, task, table etc. files. */ bool CacheDirectoryElement(const char *filename) { bool directoryElement = false; char *directoryPathFound = NULL; StringInfo directoryPath = makeStringInfo(); appendStringInfo(directoryPath, "base/%s/", PG_JOB_CACHE_DIR); directoryPathFound = strstr(filename, directoryPath->data); if (directoryPathFound != NULL) { directoryElement = true; } pfree(directoryPath); return directoryElement; } /* Checks if a directory exists for the given directory name. */ bool DirectoryExists(StringInfo directoryName) { bool directoryExists = true; struct stat directoryStat; int statOK = stat(directoryName->data, &directoryStat); if (statOK == 0) { /* file already exists; just assert that it is a directory */ Assert(S_ISDIR(directoryStat.st_mode)); } else { if (errno == ENOENT) { directoryExists = false; } else { ereport(ERROR, (errcode_for_file_access(), errmsg("could not stat directory \"%s\": %m", directoryName->data))); } } return directoryExists; } /* Creates a new directory with the given directory name. */ void CreateDirectory(StringInfo directoryName) { int makeOK = mkdir(directoryName->data, S_IRWXU); if (makeOK != 0) { ereport(ERROR, (errcode_for_file_access(), errmsg("could not create directory \"%s\": %m", directoryName->data))); } } /* * RemoveDirectory first checks if the given directory exists. If it does, the * function recursively deletes the contents of the given directory, and then * deletes the directory itself. This function is modeled on the Boost file * system library's remove_all() method. */ void RemoveDirectory(StringInfo filename) { struct stat fileStat; int removed = 0; int fileStated = stat(filename->data, &fileStat); if (fileStated < 0) { if (errno == ENOENT) { return; /* if file does not exist, return */ } else { ereport(ERROR, (errcode_for_file_access(), errmsg("could not stat file \"%s\": %m", filename->data))); } } /* * If this is a directory, iterate over all its contents and for each * content, recurse into this function. Also, make sure that we do not * recurse into symbolic links. */ if (S_ISDIR(fileStat.st_mode) && !S_ISLNK(fileStat.st_mode)) { const char *directoryName = filename->data; struct dirent *directoryEntry = NULL; DIR *directory = AllocateDir(directoryName); if (directory == NULL) { ereport(ERROR, (errcode_for_file_access(), errmsg("could not open directory \"%s\": %m", directoryName))); } directoryEntry = ReadDir(directory, directoryName); for (; directoryEntry != NULL; directoryEntry = ReadDir(directory, directoryName)) { const char *baseFilename = directoryEntry->d_name; StringInfo fullFilename = NULL; /* if system file, skip it */ if (strncmp(baseFilename, ".", MAXPGPATH) == 0 || strncmp(baseFilename, "..", MAXPGPATH) == 0) { continue; } fullFilename = makeStringInfo(); appendStringInfo(fullFilename, "%s/%s", directoryName, baseFilename); RemoveDirectory(fullFilename); FreeStringInfo(fullFilename); } FreeDir(directory); } /* we now have an empty directory or a regular file, remove it */ if (S_ISDIR(fileStat.st_mode)) { removed = rmdir(filename->data); } else { removed = unlink(filename->data); } if (removed != 0) { ereport(ERROR, (errcode_for_file_access(), errmsg("could not remove file \"%s\": %m", filename->data))); } } /* Moves directory from old path to the new one. */ static void RenameDirectory(StringInfo oldDirectoryName, StringInfo newDirectoryName) { int renamed = rename(oldDirectoryName->data, newDirectoryName->data); if (renamed != 0) { ereport(ERROR, (errcode_for_file_access(), errmsg("could not rename directory \"%s\" to \"%s\": %m", oldDirectoryName->data, newDirectoryName->data))); } } /* * FileOutputStreamWrite appends given data to file stream's internal buffers. * The function then checks if buffered data exceeds preconfigured buffer size; * if so, the function flushes the buffer to the underlying file. */ static void FileOutputStreamWrite(FileOutputStream file, StringInfo dataToWrite) { StringInfo fileBuffer = file.fileBuffer; uint32 newBufferSize = fileBuffer->len + dataToWrite->len; appendBinaryStringInfo(fileBuffer, dataToWrite->data, dataToWrite->len); if (newBufferSize > FileBufferSizeInBytes) { FileOutputStreamFlush(file); resetStringInfo(fileBuffer); } } /* Flushes data buffered in the file stream object to the underlying file. */ static void FileOutputStreamFlush(FileOutputStream file) { StringInfo fileBuffer = file.fileBuffer; int written = 0; errno = 0; #if (PG_VERSION_NUM >= 100000) written = FileWrite(file.fileDescriptor, fileBuffer->data, fileBuffer->len, PG_WAIT_IO); #else written = FileWrite(file.fileDescriptor, fileBuffer->data, fileBuffer->len); #endif if (written != fileBuffer->len) { ereport(ERROR, (errcode_for_file_access(), errmsg("could not write %d bytes to partition file \"%s\"", fileBuffer->len, file.filePath->data))); } } /* * FilterAndPartitionTable executes a given SQL query, and iterates over query * results in a read-only fashion. For each resulting row, the function applies * the partitioning function and determines the partition identifier. Then, the * function chooses the partition file corresponding to this identifier, and * serializes the row into this file using the copy command's text format. */ static void FilterAndPartitionTable(const char *filterQuery, const char *partitionColumnName, Oid partitionColumnType, uint32 (*PartitionIdFunction)(Datum, const void *), const void *partitionIdContext, FileOutputStream *partitionFileArray, uint32 fileCount) { CopyOutState rowOutputState = NULL; FmgrInfo *columnOutputFunctions = NULL; int partitionColumnIndex = 0; Oid partitionColumnTypeId = InvalidOid; Portal queryPortal = NULL; int connected = 0; int finished = 0; uint32 columnCount = 0; Datum *valueArray = NULL; bool *isNullArray = NULL; const char *noPortalName = NULL; const bool readOnly = true; const bool fetchForward = true; const int noCursorOptions = 0; const int prefetchCount = ROW_PREFETCH_COUNT; connected = SPI_connect(); if (connected != SPI_OK_CONNECT) { ereport(ERROR, (errmsg("could not connect to SPI manager"))); } queryPortal = SPI_cursor_open_with_args(noPortalName, filterQuery, 0, NULL, NULL, NULL, /* no arguments */ readOnly, noCursorOptions); if (queryPortal == NULL) { ereport(ERROR, (errmsg("could not open implicit cursor for query \"%s\"", filterQuery))); } rowOutputState = InitRowOutputState(); SPI_cursor_fetch(queryPortal, fetchForward, prefetchCount); if (SPI_processed > 0) { TupleDesc rowDescriptor = SPI_tuptable->tupdesc; partitionColumnIndex = ColumnIndex(rowDescriptor, partitionColumnName); partitionColumnTypeId = SPI_gettypeid(rowDescriptor, partitionColumnIndex); if (partitionColumnType != partitionColumnTypeId) { ereport(ERROR, (errmsg("partition column types %u and %u do not match", partitionColumnTypeId, partitionColumnType))); } columnOutputFunctions = ColumnOutputFunctions(rowDescriptor, rowOutputState->binary); } if (BinaryWorkerCopyFormat) { OutputBinaryHeaders(partitionFileArray, fileCount); } columnCount = (uint32) SPI_tuptable->tupdesc->natts; valueArray = (Datum *) palloc0(columnCount * sizeof(Datum)); isNullArray = (bool *) palloc0(columnCount * sizeof(bool)); while (SPI_processed > 0) { int rowIndex = 0; for (rowIndex = 0; rowIndex < SPI_processed; rowIndex++) { HeapTuple row = SPI_tuptable->vals[rowIndex]; TupleDesc rowDescriptor = SPI_tuptable->tupdesc; FileOutputStream partitionFile = { 0, 0, 0 }; StringInfo rowText = NULL; Datum partitionKey = 0; bool partitionKeyNull = false; uint32 partitionId = 0; partitionKey = SPI_getbinval(row, rowDescriptor, partitionColumnIndex, &partitionKeyNull); /* * If we have a partition key, we compute its bucket. Else if we have * a null key, we then put this tuple into the 0th bucket. Note that * the 0th bucket may hold other tuples as well, such as tuples whose * partition keys hash to the value 0. */ if (!partitionKeyNull) { partitionId = (*PartitionIdFunction)(partitionKey, partitionIdContext); } else { partitionId = 0; } /* deconstruct the tuple; this is faster than repeated heap_getattr */ heap_deform_tuple(row, rowDescriptor, valueArray, isNullArray); AppendCopyRowData(valueArray, isNullArray, rowDescriptor, rowOutputState, columnOutputFunctions); rowText = rowOutputState->fe_msgbuf; partitionFile = partitionFileArray[partitionId]; FileOutputStreamWrite(partitionFile, rowText); resetStringInfo(rowText); MemoryContextReset(rowOutputState->rowcontext); } SPI_freetuptable(SPI_tuptable); SPI_cursor_fetch(queryPortal, fetchForward, prefetchCount); } pfree(valueArray); pfree(isNullArray); SPI_cursor_close(queryPortal); if (BinaryWorkerCopyFormat) { OutputBinaryFooters(partitionFileArray, fileCount); } /* delete row output memory context */ ClearRowOutputState(rowOutputState); finished = SPI_finish(); if (finished != SPI_OK_FINISH) { ereport(ERROR, (errmsg("could not disconnect from SPI manager"))); } } /* * Determines the column number for the given column name. The column number * count starts at 1. */ static int ColumnIndex(TupleDesc rowDescriptor, const char *columnName) { int columnIndex = SPI_fnumber(rowDescriptor, columnName); if (columnIndex == SPI_ERROR_NOATTRIBUTE) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), errmsg("could not find column name \"%s\"", columnName))); } Assert(columnIndex >= 1); return columnIndex; } /* * InitRowOutputState creates and initializes a copy state object. This object * is internal to the copy command's implementation in Postgres; and we refactor * and refer to it here to avoid code duplication. We also only initialize the * fields needed for writing row data to text files, and skip the other fields. * * Note that the default field values used in commands/copy.c and this function * must match one another. Therefore, any changes to the default values in the * copy command must be propagated to this function. */ static CopyOutState InitRowOutputState(void) { CopyOutState rowOutputState = (CopyOutState) palloc0(sizeof(CopyOutStateData)); int fileEncoding = pg_get_client_encoding(); int databaseEncoding = GetDatabaseEncoding(); int databaseEncodingMaxLength = pg_database_encoding_max_length(); /* initialize defaults for printing null values */ char *nullPrint = pstrdup("\\N"); int nullPrintLen = strlen(nullPrint); char *nullPrintClient = pg_server_to_any(nullPrint, nullPrintLen, fileEncoding); /* set default text output characters */ rowOutputState->null_print = nullPrint; rowOutputState->null_print_client = nullPrintClient; rowOutputState->delim = pstrdup("\t"); rowOutputState->binary = BinaryWorkerCopyFormat; /* set encoding conversion information */ rowOutputState->file_encoding = fileEncoding; if (PG_ENCODING_IS_CLIENT_ONLY(fileEncoding)) { ereport(ERROR, (errmsg("cannot repartition into encoding caller cannot " "receive"))); } /* set up transcoding information and default text output characters */ if ((fileEncoding != databaseEncoding) || (databaseEncodingMaxLength > 1)) { rowOutputState->need_transcoding = true; } else { rowOutputState->need_transcoding = false; } /* * Create a temporary memory context that we can reset once per row to * recover palloc'd memory. This avoids any problems with leaks inside data * type output routines, and should be faster than retail pfree's anyway. */ rowOutputState->rowcontext = AllocSetContextCreate(CurrentMemoryContext, "WorkerRowOutputContext", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); /* allocate the message buffer to use for serializing a row */ rowOutputState->fe_msgbuf = makeStringInfo(); return rowOutputState; } /* Clears copy state used for outputting row data. */ static void ClearRowOutputState(CopyOutState rowOutputState) { Assert(rowOutputState != NULL); MemoryContextDelete(rowOutputState->rowcontext); FreeStringInfo(rowOutputState->fe_msgbuf); pfree(rowOutputState->null_print_client); pfree(rowOutputState->delim); pfree(rowOutputState); } /* * Write the header of postgres' binary serialization format to each partition file. * This function is used when binary_worker_copy_format is enabled. */ static void OutputBinaryHeaders(FileOutputStream *partitionFileArray, uint32 fileCount) { uint32 fileIndex = 0; for (fileIndex = 0; fileIndex < fileCount; fileIndex++) { /* Generate header for a binary copy */ FileOutputStream partitionFile = { 0, 0, 0 }; CopyOutStateData headerOutputStateData; CopyOutState headerOutputState = (CopyOutState) & headerOutputStateData; memset(headerOutputState, 0, sizeof(CopyOutStateData)); headerOutputState->fe_msgbuf = makeStringInfo(); AppendCopyBinaryHeaders(headerOutputState); partitionFile = partitionFileArray[fileIndex]; FileOutputStreamWrite(partitionFile, headerOutputState->fe_msgbuf); } } /* * Write the footer of postgres' binary serialization format to each partition file. * This function is used when binary_worker_copy_format is enabled. */ static void OutputBinaryFooters(FileOutputStream *partitionFileArray, uint32 fileCount) { uint32 fileIndex = 0; for (fileIndex = 0; fileIndex < fileCount; fileIndex++) { /* Generate footer for a binary copy */ FileOutputStream partitionFile = { 0, 0, 0 }; CopyOutStateData footerOutputStateData; CopyOutState footerOutputState = (CopyOutState) & footerOutputStateData; memset(footerOutputState, 0, sizeof(CopyOutStateData)); footerOutputState->fe_msgbuf = makeStringInfo(); AppendCopyBinaryFooters(footerOutputState); partitionFile = partitionFileArray[fileIndex]; FileOutputStreamWrite(partitionFile, footerOutputState->fe_msgbuf); } } /* Helper function that invokes a function with the default collation oid. */ Datum CompareCall2(FmgrInfo *functionInfo, Datum leftArgument, Datum rightArgument) { Datum result = FunctionCall2Coll(functionInfo, DEFAULT_COLLATION_OID, leftArgument, rightArgument); return result; } /* * RangePartitionId determines the partition number for the given data value * by applying range partitioning. More specifically, the function takes in a * data value and an array of sorted split points, and performs a binary search * within that array to determine the bucket the data value falls into. The * function then returns that bucket number. * * Note that we employ a version of binary search known as upper_bound; this * ensures that all null values fall into the zeroth bucket and that we maintain * full compatibility with the semantics of Hadoop's TotalOrderPartitioner. */ static uint32 RangePartitionId(Datum partitionValue, const void *context) { RangePartitionContext *rangePartitionContext = (RangePartitionContext *) context; FmgrInfo *comparisonFunction = rangePartitionContext->comparisonFunction; Datum *pointArray = rangePartitionContext->splitPointArray; int32 currentLength = rangePartitionContext->splitPointCount; int32 halfLength = 0; uint32 firstIndex = 0; /* * We implement a binary search variant known as upper_bound. This variant * gives us the semantics we need for partitioned joins; and is also used by * Hadoop's TotalOrderPartitioner. To implement this variant, we rely on SGI * STL v3.3's source code for upper_bound(). Note that elements in the point * array cannot be null. */ while (currentLength > 0) { uint32 middleIndex = 0; Datum middlePoint = 0; Datum comparisonDatum = 0; int comparisonResult = 0; halfLength = currentLength >> 1; middleIndex = firstIndex; middleIndex += halfLength; middlePoint = pointArray[middleIndex]; comparisonDatum = CompareCall2(comparisonFunction, partitionValue, middlePoint); comparisonResult = DatumGetInt32(comparisonDatum); /* if partition value is less than middle point */ if (comparisonResult < 0) { currentLength = halfLength; } else { firstIndex = middleIndex; firstIndex++; currentLength = currentLength - halfLength - 1; } } return firstIndex; } /* * HashPartitionId determines the partition number for the given data value * using hash partitioning. More specifically, the function returns zero if the * given data value is null. If not, the function applies the standard Postgres * hashing function for the given data type, and mods the hashed result with the * number of partitions. The function then returns the modded number as the * partition number. * * Note that any changes to PostgreSQL's hashing functions will reshuffle the * entire distribution created by this function. For a discussion of this issue, * see Google "PL/Proxy Users: Hash Functions Have Changed in PostgreSQL 8.4." */ static uint32 HashPartitionId(Datum partitionValue, const void *context) { HashPartitionContext *hashPartitionContext = (HashPartitionContext *) context; FmgrInfo *hashFunction = hashPartitionContext->hashFunction; uint32 partitionCount = hashPartitionContext->partitionCount; Datum hashDatum = 0; uint32 hashResult = 0; uint32 hashPartitionId = 0; /* hash functions return unsigned 32-bit integers */ hashDatum = FunctionCall1(hashFunction, partitionValue); hashResult = DatumGetUInt32(hashDatum); hashPartitionId = (hashResult % partitionCount); return hashPartitionId; } citus-7.0.3/src/backend/distributed/worker/worker_truncate_trigger_protocol.c000066400000000000000000000023711317107136600277170ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * worker_create_truncate_trigger_protocol.c * * Routines for creating truncate triggers on distributed tables on worker nodes. * * Copyright (c) 2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "fmgr.h" #include "distributed/citus_ruleutils.h" #include "distributed/master_metadata_utility.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" #include "utils/elog.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" PG_FUNCTION_INFO_V1(worker_create_truncate_trigger); /* * worker_create_truncate_trigger creates a truncate trigger for the given distributed * table on current metadata worker. The function is intented to be called by the * coordinator node during metadata propagation of mx tables or during the upgrades from * citus version <=5.2 to >=6.1. The function requires superuser permissions. */ Datum worker_create_truncate_trigger(PG_FUNCTION_ARGS) { Oid relationId = PG_GETARG_OID(0); EnsureSuperUser(); CheckCitusVersion(ERROR); /* Create the truncate trigger */ CreateTruncateTrigger(relationId); PG_RETURN_VOID(); } citus-7.0.3/src/include/000077500000000000000000000000001317107136600150675ustar00rootroot00000000000000citus-7.0.3/src/include/.gitignore000066400000000000000000000001401317107136600170520ustar00rootroot00000000000000/stamp-h /stamp-ext-h /citus_config.h /citus_config.h.in~ /citus_version.h /citus_version.h.in~ citus-7.0.3/src/include/citus_config.h.in000066400000000000000000000020171317107136600203210ustar00rootroot00000000000000/* src/include/citus_config.h.in. Generated from configure.in by autoheader. */ /* * citus_config.h.in is generated by autoconf/autoheader and * converted into citus_config.h by configure. Include when code needs to * depend on determinations made by configure. * * Do not manually edit! */ /* Extension version expected by this Citus build */ #undef CITUS_EXTENSIONVERSION /* Citus major version as a string */ #undef CITUS_MAJORVERSION /* Citus version as a string */ #undef CITUS_VERSION /* Citus version as a number */ #undef CITUS_VERSION_NUM /* Define to the address where bug reports for this package should be sent. */ #undef PACKAGE_BUGREPORT /* Define to the full name of this package. */ #undef PACKAGE_NAME /* Define to the full name and version of this package. */ #undef PACKAGE_STRING /* Define to the one symbol short name of this package. */ #undef PACKAGE_TARNAME /* Define to the home page for this package. */ #undef PACKAGE_URL /* Define to the version of this package. */ #undef PACKAGE_VERSION citus-7.0.3/src/include/citus_version.h.in000066400000000000000000000004511317107136600205410ustar00rootroot00000000000000/* This file is created manually */ /* Extension version expected by this Citus build */ #undef CITUS_EXTENSIONVERSION /* Citus major version as a string */ #undef CITUS_MAJORVERSION /* Citus version as a string */ #undef CITUS_VERSION /* Citus version as a number */ #undef CITUS_VERSION_NUM citus-7.0.3/src/include/distributed/000077500000000000000000000000001317107136600174115ustar00rootroot00000000000000citus-7.0.3/src/include/distributed/backend_data.h000066400000000000000000000023421317107136600221430ustar00rootroot00000000000000/* * backend_data.h * * Data structure definition for managing backend data and related function * declarations. * * Copyright (c) 2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef BACKEND_DATA_H #define BACKEND_DATA_H #include "datatype/timestamp.h" #include "distributed/transaction_identifier.h" #include "nodes/pg_list.h" #include "storage/lwlock.h" #include "storage/proc.h" #include "storage/s_lock.h" /* * Each backend's active distributed transaction information is tracked via * BackendData in shared memory. */ typedef struct BackendData { Oid databaseId; slock_t mutex; bool cancelledDueToDeadlock; DistributedTransactionId transactionId; } BackendData; extern void InitializeBackendManagement(void); extern void InitializeBackendData(void); extern void LockBackendSharedMemory(LWLockMode lockMode); extern void UnlockBackendSharedMemory(void); extern void UnSetDistributedTransactionId(void); extern void AssignDistributedTransactionId(void); extern void GetBackendDataForProc(PGPROC *proc, BackendData *result); extern void CancelTransactionDueToDeadlock(PGPROC *proc); extern bool MyBackendGotCancelledDueToDeadlock(void); #endif /* BACKEND_DATA_H */ citus-7.0.3/src/include/distributed/citus_clauses.h000066400000000000000000000012351317107136600224310ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * citus_clauses.h * Routines roughly equivalent to postgres' util/clauses. * * Copyright (c) 2012-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef CITUS_CLAUSES_H #define CITUS_CLAUSES_H #include "nodes/execnodes.h" #include "nodes/nodes.h" #include "nodes/parsenodes.h" extern bool RequiresMasterEvaluation(Query *query); extern void ExecuteMasterEvaluableFunctions(Query *query, PlanState *planState); extern Node * PartiallyEvaluateExpression(Node *expression, PlanState *planState); #endif /* CITUS_CLAUSES_H */ citus-7.0.3/src/include/distributed/citus_nodefuncs.h000066400000000000000000000066251317107136600227660ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * citus_nodefuncs.h * Node (de-)serialization support for Citus. * * Copyright (c) 2012-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef CITUS_NODEFUNCS_H #define CITUS_NODEFUNCS_H #include "distributed/multi_physical_planner.h" #include "nodes/nodes.h" #include "nodes/parsenodes.h" /* citus_nodefuncs.c */ extern void SetRangeTblExtraData(RangeTblEntry *rte, CitusRTEKind rteKind, char *fragmentSchemaName, char *fragmentTableName, List *tableIdList); extern void ModifyRangeTblExtraData(RangeTblEntry *rte, CitusRTEKind rteKind, char *fragmentSchemaName, char *fragmentTableName, List *tableIdList); extern void ExtractRangeTblExtraData(RangeTblEntry *rte, CitusRTEKind *rteKind, char **fragmentSchemaName, char **fragmentTableName, List **tableIdList); extern CitusRTEKind GetRangeTblKind(RangeTblEntry *rte); extern void RegisterNodes(void); /* * Define read functions for citus nodes in a way they're usable across * several major versions. That requires some macro-uglyness as 9.6+ is quite * different from before. */ #define READFUNC_ARGS struct ExtensibleNode *node #define READFUNC_RET void #define OUTFUNC_ARGS StringInfo str, const struct ExtensibleNode *raw_node #define COPYFUNC_ARGS struct ExtensibleNode *target_node, const struct \ ExtensibleNode *source_node extern READFUNC_RET ReadJob(READFUNC_ARGS); extern READFUNC_RET ReadMultiPlan(READFUNC_ARGS); extern READFUNC_RET ReadShardInterval(READFUNC_ARGS); extern READFUNC_RET ReadMapMergeJob(READFUNC_ARGS); extern READFUNC_RET ReadShardPlacement(READFUNC_ARGS); extern READFUNC_RET ReadRelationShard(READFUNC_ARGS); extern READFUNC_RET ReadTask(READFUNC_ARGS); extern READFUNC_RET ReadTaskExecution(READFUNC_ARGS); extern READFUNC_RET ReadDeferredErrorMessage(READFUNC_ARGS); extern READFUNC_RET ReadGroupShardPlacement(READFUNC_ARGS); extern READFUNC_RET ReadUnsupportedCitusNode(READFUNC_ARGS); extern void OutJob(OUTFUNC_ARGS); extern void OutMultiPlan(OUTFUNC_ARGS); extern void OutShardInterval(OUTFUNC_ARGS); extern void OutMapMergeJob(OUTFUNC_ARGS); extern void OutShardPlacement(OUTFUNC_ARGS); extern void OutRelationShard(OUTFUNC_ARGS); extern void OutTask(OUTFUNC_ARGS); extern void OutTaskExecution(OUTFUNC_ARGS); extern void OutDeferredErrorMessage(OUTFUNC_ARGS); extern void OutGroupShardPlacement(OUTFUNC_ARGS); extern void OutMultiNode(OUTFUNC_ARGS); extern void OutMultiTreeRoot(OUTFUNC_ARGS); extern void OutMultiProject(OUTFUNC_ARGS); extern void OutMultiCollect(OUTFUNC_ARGS); extern void OutMultiSelect(OUTFUNC_ARGS); extern void OutMultiTable(OUTFUNC_ARGS); extern void OutMultiJoin(OUTFUNC_ARGS); extern void OutMultiPartition(OUTFUNC_ARGS); extern void OutMultiCartesianProduct(OUTFUNC_ARGS); extern void OutMultiExtendedOp(OUTFUNC_ARGS); extern void CopyNodeJob(COPYFUNC_ARGS); extern void CopyNodeMultiPlan(COPYFUNC_ARGS); extern void CopyNodeShardInterval(COPYFUNC_ARGS); extern void CopyNodeMapMergeJob(COPYFUNC_ARGS); extern void CopyNodeShardPlacement(COPYFUNC_ARGS); extern void CopyNodeGroupShardPlacement(COPYFUNC_ARGS); extern void CopyNodeRelationShard(COPYFUNC_ARGS); extern void CopyNodeTask(COPYFUNC_ARGS); extern void CopyNodeTaskExecution(COPYFUNC_ARGS); extern void CopyNodeDeferredErrorMessage(COPYFUNC_ARGS); #endif /* CITUS_NODEFUNCS_H */ citus-7.0.3/src/include/distributed/citus_nodes.h000066400000000000000000000061721317107136600221070ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * citus_nodes.h * Additional node types, and related infrastructure, for Citus. * * To add a new node type to Citus, perform the following: * * * Add a new CitusNodeTag value to use as a tag for the node. Add * the node's name at a corresponding offset within the array named * CitusNodeTagNamesD at the top of citus_nodefuncs.c * * * Describe the node in a struct, which must have a CitusNode as * its first element * * * Implement an 'outfunc' for the node in citus_outfuncs.c, using * the macros defined within that file. This function will handle * converting the node to a string * * * Implement a 'readfunc' for the node in citus_readfuncs.c, using * the macros defined within that file. This function will handle * converting strings into instances of the node * * * Use DEFINE_NODE_METHODS within the nodeMethods array (near the * bottom of citus_nodefuncs.c) to register the node in PostgreSQL * * Copyright (c) 2012-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef CITUS_NODES_H #define CITUS_NODES_H #include "nodes/extensible.h" /* * Citus Node Tags * * These have to be distinct from the ideas used in postgres' nodes.h * * NOTE: This list must match CitusNodeTagNamesD from citus_nodefuncs.c */ #define CITUS_NODE_TAG_START 1200 typedef enum CitusNodeTag { T_MultiNode = CITUS_NODE_TAG_START, /* FIXME: perhaps use something less predicable? */ T_MultiTreeRoot, T_MultiProject, T_MultiCollect, T_MultiSelect, T_MultiTable, T_MultiJoin, T_MultiPartition, T_MultiCartesianProduct, T_MultiExtendedOp, T_Job, T_MapMergeJob, T_MultiPlan, T_Task, T_TaskExecution, T_ShardInterval, T_ShardPlacement, T_RelationShard, T_DeferredErrorMessage, T_GroupShardPlacement } CitusNodeTag; const char** CitusNodeTagNames; typedef struct CitusNode { ExtensibleNode extensible; CitusNodeTag citus_tag; /* for quick type determination */ } CitusNode; #define CitusNodeTag(nodeptr) CitusNodeTagI((Node*) nodeptr) static inline int CitusNodeTagI(Node *node) { if (!IsA(node, ExtensibleNode)) { return nodeTag(node); } return ((CitusNode*)(node))->citus_tag; } /* Citus variant of newNode(), don't use directly. */ #define CitusNewNode(size, tag) \ ({ CitusNode *_result; \ AssertMacro((size) >= sizeof(CitusNode)); /* need the tag, at least */ \ _result = (CitusNode *) palloc0fast(size); \ _result->extensible.type = T_ExtensibleNode; \ _result->extensible.extnodename = CitusNodeTagNames[tag - CITUS_NODE_TAG_START]; \ _result->citus_tag =(int) (tag); \ _result; \ }) /* * IsA equivalent that compares node tags, including Citus-specific nodes. */ #define CitusIsA(nodeptr,_type_) (CitusNodeTag(nodeptr) == T_##_type_) /* * CitusMakeNode is Citus variant of makeNode(). Use it to create nodes of * the types listed in the CitusNodeTag enum and plain NodeTag. Initializes * memory, besides the node tag, to 0. */ #define CitusMakeNode(_type_) ((_type_ *) CitusNewNode(sizeof(_type_),T_##_type_)) #endif /* CITUS_NODES_H */ citus-7.0.3/src/include/distributed/citus_ruleutils.h000066400000000000000000000041321317107136600230210ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * citus_ruleutils.h * Citus ruleutils wrapper functions and exported PostgreSQL ruleutils * functions. * * Copyright (c) 2012-2016, Citus Data, Inc. *------------------------------------------------------------------------- */ #ifndef CITUS_RULEUTILS_H #define CITUS_RULEUTILS_H #include "postgres.h" /* IWYU pragma: keep */ #include "c.h" #if (PG_VERSION_NUM >= 100000) #include "catalog/pg_sequence.h" #endif #include "commands/sequence.h" #include "lib/stringinfo.h" #include "nodes/parsenodes.h" #include "nodes/pg_list.h" #define CREATE_SEQUENCE_COMMAND \ "CREATE SEQUENCE IF NOT EXISTS %s INCREMENT BY " INT64_FORMAT " MINVALUE " \ INT64_FORMAT " MAXVALUE " INT64_FORMAT " START WITH " INT64_FORMAT " %sCYCLE" /* Function declarations for version independent Citus ruleutils wrapper functions */ extern char * pg_get_extensiondef_string(Oid tableRelationId); extern Oid get_extension_schema(Oid ext_oid); extern char * pg_get_serverdef_string(Oid tableRelationId); extern char * pg_get_sequencedef_string(Oid sequenceRelid); extern Form_pg_sequence pg_get_sequencedef(Oid sequenceRelationId); extern char * pg_get_tableschemadef_string(Oid tableRelationId, bool forShardCreation); extern void EnsureRelationKindSupported(Oid relationId); extern char * pg_get_tablecolumnoptionsdef_string(Oid tableRelationId); extern void deparse_shard_index_statement(IndexStmt *origStmt, Oid distrelid, int64 shardid, StringInfo buffer); extern char * pg_get_indexclusterdef_string(Oid indexRelationId); extern List * pg_get_table_grants(Oid relationId); extern bool contain_nextval_expression_walker(Node *node, void *context); /* Function declarations for version dependent PostgreSQL ruleutils functions */ extern void pg_get_query_def(Query *query, StringInfo buffer); extern void deparse_shard_query(Query *query, Oid distrelid, int64 shardid, StringInfo buffer); extern char * generate_relation_name(Oid relid, List *namespaces); extern char * generate_qualified_relation_name(Oid relid); #endif /* CITUS_RULEUTILS_H */ citus-7.0.3/src/include/distributed/colocation_utils.h000066400000000000000000000027161317107136600231420ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * colocation_utils.h * * Declarations for public utility functions related to co-located tables. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef COLOCATION_UTILS_H_ #define COLOCATION_UTILS_H_ #include "distributed/shardinterval_utils.h" #include "nodes/pg_list.h" #define INVALID_COLOCATION_ID 0 extern uint32 TableColocationId(Oid distributedTableId); extern bool TablesColocated(Oid leftDistributedTableId, Oid rightDistributedTableId); extern bool ShardsColocated(ShardInterval *leftShardInterval, ShardInterval *rightShardInterval); extern List * ColocatedTableList(Oid distributedTableId); extern List * ColocatedShardIntervalList(ShardInterval *shardInterval); extern Oid ColocatedTableId(Oid colocationId); extern uint64 ColocatedShardIdInRelation(Oid relationId, int shardIndex); uint32 ColocationId(int shardCount, int replicationFactor, Oid distributionColumnType); extern uint32 CreateColocationGroup(int shardCount, int replicationFactor, Oid distributionColumnType); extern uint32 GetNextColocationId(void); extern void CheckReplicationModel(Oid sourceRelationId, Oid targetRelationId); extern void CheckDistributionColumnType(Oid sourceRelationId, Oid targetRelationId); extern void DeleteColocationGroupIfNoTablesBelong(uint32 colocationId); #endif /* COLOCATION_UTILS_H_ */ citus-7.0.3/src/include/distributed/connection_management.h000066400000000000000000000107051317107136600241200ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * connection_management.h * Central management of connections and their life-cycle * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef CONNECTION_MANAGMENT_H #define CONNECTION_MANAGMENT_H #include "distributed/transaction_management.h" #include "distributed/remote_transaction.h" #include "lib/ilist.h" #include "utils/hsearch.h" #include "utils/timestamp.h" /* maximum (textual) lengths of hostname and port */ #define MAX_NODE_LENGTH 255 /* includes 0 byte */ /* forward declare, to avoid forcing large headers on everyone */ struct pg_conn; /* target of the PGconn typedef */ struct MemoryContextData; /* * Flags determining connection establishment behaviour. */ enum MultiConnectionMode { /* force establishment of a new connection */ FORCE_NEW_CONNECTION = 1 << 0, /* mark returned connection as having session lifespan */ SESSION_LIFESPAN = 1 << 1, FOR_DDL = 1 << 2, FOR_DML = 1 << 3, /* open a connection per (co-located set of) placement(s) */ CONNECTION_PER_PLACEMENT = 1 << 4 }; /* declaring this directly above makes uncrustify go crazy */ typedef enum MultiConnectionMode MultiConnectionMode; typedef struct MultiConnection { /* connection details, useful for error messages and such. */ char hostname[MAX_NODE_LENGTH]; int32 port; char user[NAMEDATALEN]; char database[NAMEDATALEN]; /* underlying libpq connection */ struct pg_conn *pgConn; /* is the connection intended to be kept after transaction end */ bool sessionLifespan; /* is the connection currently in use, and shouldn't be used by anything else */ bool claimedExclusively; /* time connection establishment was started, for timeout */ TimestampTz connectionStart; /* membership in list of list of connections in ConnectionHashEntry */ dlist_node connectionNode; /* information about the associated remote transaction */ RemoteTransaction remoteTransaction; /* membership in list of in-progress transactions */ dlist_node transactionNode; /* list of all placements referenced by this connection */ dlist_head referencedPlacements; } MultiConnection; /* * Central connection management hash, mapping (host, port, user, database) to * a list of connections. * * This hash is used to keep track of which connections are open to which * node. Besides allowing connection reuse, that information is e.g. used to * handle closing connections after the end of a transaction. */ /* hash key */ typedef struct ConnectionHashKey { char hostname[MAX_NODE_LENGTH]; int32 port; char user[NAMEDATALEN]; char database[NAMEDATALEN]; } ConnectionHashKey; /* hash entry */ typedef struct ConnectionHashEntry { ConnectionHashKey key; dlist_head *connections; } ConnectionHashEntry; /* maximum duration to wait for connection */ extern int NodeConnectionTimeout; /* the hash table */ extern HTAB *ConnectionHash; /* context for all connection and transaction related memory */ extern struct MemoryContextData *ConnectionContext; extern void AfterXactConnectionHandling(bool isCommit); extern void InitializeConnectionManagement(void); /* Low-level connection establishment APIs */ extern MultiConnection * GetNodeConnection(uint32 flags, const char *hostname, int32 port); extern MultiConnection * StartNodeConnection(uint32 flags, const char *hostname, int32 port); extern MultiConnection * GetNodeUserDatabaseConnection(uint32 flags, const char *hostname, int32 port, const char *user, const char *database); extern MultiConnection * StartNodeUserDatabaseConnection(uint32 flags, const char *hostname, int32 port, const char *user, const char *database); extern MultiConnection * GetConnectionFromPGconn(struct pg_conn *pqConn); extern void CloseNodeConnectionsAfterTransaction(char *nodeName, int nodePort); extern void CloseConnection(MultiConnection *connection); extern void CloseConnectionByPGconn(struct pg_conn *pqConn); extern void ShutdownConnection(MultiConnection *connection); /* dealing with a connection */ extern void FinishConnectionListEstablishment(List *multiConnectionList); extern void FinishConnectionEstablishment(MultiConnection *connection); extern void ClaimConnectionExclusively(MultiConnection *connection); extern void UnclaimConnection(MultiConnection *connection); #endif /* CONNECTION_MANAGMENT_H */ citus-7.0.3/src/include/distributed/deparse_shard_query.h000066400000000000000000000012371317107136600236160ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * deparse_shard_query.h * * Declarations for public functions and types related to deparsing shard * queries. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef DEPARSE_SHARD_QUERY_H #define DEPARSE_SHARD_QUERY_H #include "c.h" #include "nodes/nodes.h" #include "nodes/parsenodes.h" #include "nodes/pg_list.h" extern void RebuildQueryStrings(Query *originalQuery, List *taskList); extern bool UpdateRelationToShardNames(Node *node, List *relationShardList); #endif /* DEPARSE_SHARD_QUERY_H */ citus-7.0.3/src/include/distributed/distributed_deadlock_detection.h000066400000000000000000000023771317107136600260010ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * distributed_deadlock_detection.h * Type and function declarations used for performing distributed deadlock * detection. * * Copyright (c) 2017, Citus Data, Inc. *------------------------------------------------------------------------- */ #ifndef DISTRIBUTED_DEADLOCK_DETECTION_H #define DISTRIBUTED_DEADLOCK_DETECTION_H #include "postgres.h" #include "access/hash.h" #include "distributed/backend_data.h" #include "distributed/listutils.h" #include "distributed/lock_graph.h" #include "distributed/transaction_identifier.h" #include "nodes/pg_list.h" typedef struct TransactionNode { DistributedTransactionId transactionId; /* list of TransactionNode that this distributed transaction is waiting for */ List *waitsFor; /* backend that is on the initiator node */ PGPROC *initiatorProc; bool transactionVisited; } TransactionNode; /* GUC, determining whether debug messages for deadlock detection sent to LOG */ extern bool LogDistributedDeadlockDetection; extern bool CheckForDistributedDeadlocks(void); extern HTAB * BuildAdjacencyListsForWaitGraph(WaitGraph *waitGraph); extern char * WaitsForToString(List *waitsFor); #endif /* DISTRIBUTED_DEADLOCK_DETECTION_H */ citus-7.0.3/src/include/distributed/distribution_column.h000066400000000000000000000012751317107136600236630ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * distribution_column.h * Type and function declarations used for handling the distribution * column of distributed tables. * * Copyright (c) 2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #ifndef DISTRIBUTION_COLUMN_H #define DISTRIBUTION_COLUMN_H #include "utils/rel.h" /* Remaining metadata utility functions */ extern Var * BuildDistributionKeyFromColumnName(Relation distributedRelation, char *columnName); extern char * ColumnNameToColumn(Oid relationId, char *columnNodeString); #endif /* DISTRIBUTION_COLUMN_H */ citus-7.0.3/src/include/distributed/errormessage.h000066400000000000000000000032251317107136600222620ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * errormessage.h * Error handling related support functionality. * * Copyright (c) 2017, Citus Data, Inc. *------------------------------------------------------------------------- */ #ifndef ERRORMESSAGE_H #define ERRORMESSAGE_H #include "distributed/citus_nodes.h" typedef struct DeferredErrorMessage { CitusNode tag; int code; const char *message; const char *detail; const char *hint; const char *filename; int linenumber; const char *functionname; } DeferredErrorMessage; /* * DeferredError allocates a deferred error message, that can later be emitted * using RaiseDeferredError(). These error messages can be * serialized/copied/deserialized, i.e. can be embedded in plans and such. */ #define DeferredError(code, message, detail, hint) \ DeferredErrorInternal(code, message, detail, hint, __FILE__, __LINE__, __func__) DeferredErrorMessage * DeferredErrorInternal(int code, const char *message, const char *detail, const char *hint, const char *filename, int linenumber, const char *functionname); /* * RaiseDeferredError emits a previously allocated error using the specified * severity. * * The trickery with __builtin_constant_p/pg_unreachable aims to have the * compiler understand that the function will not return if elevel >= ERROR. */ #define RaiseDeferredError(error, elevel) \ do { \ RaiseDeferredErrorInternal(error, elevel); \ if (__builtin_constant_p(elevel) && (elevel) >= ERROR) { \ pg_unreachable(); } \ } while (0) void RaiseDeferredErrorInternal(DeferredErrorMessage *error, int elevel); #endif citus-7.0.3/src/include/distributed/hash_helpers.h000066400000000000000000000011241317107136600222250ustar00rootroot00000000000000/*------------------------------------------------------------------------- * hash_helpers.h * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef HASH_HELPERS_H #define HASH_HELPERS_H #include "utils/hsearch.h" /* * Combine two hash values, resulting in another hash value, with decent bit * mixing. * * Similar to boost's hash_combine(). */ static inline uint32 hash_combine(uint32 a, uint32 b) { a ^= b + 0x9e3779b9 + (a << 6) + (a >> 2); return a; } extern void hash_delete_all(HTAB *htab); #endif citus-7.0.3/src/include/distributed/insert_select_executor.h000066400000000000000000000010521317107136600243410ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * insert_select_executor.h * * Declarations for public functions and types related to executing * INSERT..SELECT commands. * * Copyright (c) 2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef INSERT_SELECT_EXECUTOR_H #define INSERT_SELECT_EXECUTOR_H #include "executor/execdesc.h" extern TupleTableSlot * CoordinatorInsertSelectExecScan(CustomScanState *node); #endif /* INSERT_SELECT_EXECUTOR_H */ citus-7.0.3/src/include/distributed/insert_select_planner.h000066400000000000000000000021341317107136600241440ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * insert_select_planner.h * * Declarations for public functions and types related to planning * INSERT..SELECT commands. * * Copyright (c) 2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef INSERT_SELECT_PLANNER_H #define INSERT_SELECT_PLANNER_H #include "postgres.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_planner.h" #include "nodes/execnodes.h" #include "nodes/parsenodes.h" #include "nodes/plannodes.h" extern bool InsertSelectIntoDistributedTable(Query *query); extern Query * ReorderInsertSelectTargetLists(Query *originalQuery, RangeTblEntry *insertRte, RangeTblEntry *subqueryRte); extern void CoordinatorInsertSelectExplainScan(CustomScanState *node, List *ancestors, struct ExplainState *es); extern MultiPlan * CreateInsertSelectPlan(Query *originalQuery, PlannerRestrictionContext * plannerRestrictionContext); #endif /* INSERT_SELECT_PLANNER_H */ citus-7.0.3/src/include/distributed/listutils.h000066400000000000000000000014361317107136600216220ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * listutils.h * * Declarations for public utility functions related to lists. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef CITUS_LISTUTILS_H #define CITUS_LISTUTILS_H #include "postgres.h" #include "c.h" #include "nodes/pg_list.h" #include "utils/array.h" /* utility functions declaration shared within this module */ extern List * SortList(List *pointerList, int (*ComparisonFunction)(const void *, const void *)); extern void ** PointerArrayFromList(List *pointerList); extern ArrayType * DatumArrayToArrayType(Datum *datumArray, int datumCount, Oid datumTypeId); #endif /* CITUS_LISTUTILS_H */ citus-7.0.3/src/include/distributed/lock_graph.h000066400000000000000000000025311317107136600216740ustar00rootroot00000000000000/* * lock_graph.h * * Data structures and functions for gathering lock graphs between * distributed transactions. * * Copyright (c) 2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef LOCK_GRAPH_H #define LOCK_GRAPH_H #include "postgres.h" #include "datatype/timestamp.h" /* * Describes an edge in a waiting-for graph of locks. This isn't used for * deadlock-checking directly, but to gather the information necessary to * do so. * * The datatypes here are a bit looser than strictly necessary, because * they're transported as the return type from an SQL function. */ typedef struct WaitEdge { int waitingPid; int waitingNodeId; int64 waitingTransactionNum; TimestampTz waitingTransactionStamp; int blockingPid; int blockingNodeId; int64 blockingTransactionNum; TimestampTz blockingTransactionStamp; /* blocking transaction is also waiting on a lock */ bool isBlockingXactWaiting; } WaitEdge; /* * WaitGraph represent a graph of wait edges as an adjacency list. */ typedef struct WaitGraph { int localNodeId; int allocatedSize; int edgeCount; WaitEdge *edges; } WaitGraph; extern WaitGraph * BuildGlobalWaitGraph(void); extern bool IsProcessWaitingForLock(PGPROC *proc); extern bool IsInDistributedTransaction(BackendData *backendData); #endif /* LOCK_GRAPH_H */ citus-7.0.3/src/include/distributed/maintenanced.h000066400000000000000000000012471317107136600222140ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * maintenanced.h * Background worker run for each citus using database in a postgres * cluster. * * Copyright (c) 2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef MAINTENANCED_H #define MAINTENANCED_H /* config variable for */ extern double DistributedDeadlockDetectionTimeoutFactor; extern void StopMaintenanceDaemon(Oid databaseId); extern void InitializeMaintenanceDaemon(void); extern void InitializeMaintenanceDaemonBackend(void); extern void CitusMaintenanceDaemonMain(Datum main_arg); #endif /* MAINTENANCED_H */ citus-7.0.3/src/include/distributed/master_metadata_utility.h000066400000000000000000000125721317107136600245070ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * master_metadata_utility.h * Type and function declarations used for reading and modifying master * node's metadata. * * Copyright (c) 2014-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #ifndef MASTER_METADATA_UTILITY_H #define MASTER_METADATA_UTILITY_H #include "access/heapam.h" #include "access/htup.h" #include "access/tupdesc.h" #include "catalog/indexing.h" #include "distributed/citus_nodes.h" #include "distributed/relay_utility.h" #include "utils/acl.h" #include "utils/relcache.h" /* total number of hash tokens (2^32) */ #define HASH_TOKEN_COUNT INT64CONST(4294967296) #define SELECT_EXIST_QUERY "SELECT EXISTS (SELECT 1 FROM %s)" #define PG_TABLE_SIZE_FUNCTION "pg_table_size(%s)" #define PG_RELATION_SIZE_FUNCTION "pg_relation_size(%s)" #define PG_TOTAL_RELATION_SIZE_FUNCTION "pg_total_relation_size(%s)" #define CSTORE_TABLE_SIZE_FUNCTION "cstore_table_size(%s)" #if (PG_VERSION_NUM < 100000) static inline void CatalogTupleUpdate(Relation heapRel, ItemPointer otid, HeapTuple tup) { simple_heap_update(heapRel, otid, tup); CatalogUpdateIndexes(heapRel, tup); } static inline Oid CatalogTupleInsert(Relation heapRel, HeapTuple tup) { Oid oid = simple_heap_insert(heapRel, tup); CatalogUpdateIndexes(heapRel, tup); return oid; } #endif /* In-memory representation of a typed tuple in pg_dist_shard. */ typedef struct ShardInterval { CitusNode type; Oid relationId; char storageType; Oid valueTypeId; /* min/max value datum's typeId */ int valueTypeLen; /* min/max value datum's typelen */ bool valueByVal; /* min/max value datum's byval */ bool minValueExists; bool maxValueExists; Datum minValue; /* a shard's typed min value datum */ Datum maxValue; /* a shard's typed max value datum */ uint64 shardId; } ShardInterval; /* In-memory representation of a tuple in pg_dist_placement. */ typedef struct GroupShardPlacement { CitusNode type; uint64 placementId; /* sequence that implies this placement creation order */ uint64 shardId; uint64 shardLength; RelayFileState shardState; uint32 groupId; } GroupShardPlacement; /* A GroupShardPlacement which has had some extra data resolved */ typedef struct ShardPlacement { /* * careful, the rest of the code assumes this exactly matches GroupShardPlacement */ CitusNode type; uint64 placementId; uint64 shardId; uint64 shardLength; RelayFileState shardState; uint32 groupId; /* the rest of the fields aren't from pg_dist_placement */ char *nodeName; uint32 nodePort; char partitionMethod; uint32 colocationGroupId; uint32 representativeValue; } ShardPlacement; /* Config variable managed via guc.c */ extern int ReplicationModel; /* Function declarations to read shard and shard placement data */ extern uint32 TableShardReplicationFactor(Oid relationId); extern List * LoadShardIntervalList(Oid relationId); extern int ShardIntervalCount(Oid relationId); extern List * LoadShardList(Oid relationId); extern void CopyShardInterval(ShardInterval *srcInterval, ShardInterval *destInterval); extern void CopyShardPlacement(ShardPlacement *srcPlacement, ShardPlacement *destPlacement); extern uint64 ShardLength(uint64 shardId); extern bool NodeGroupHasShardPlacements(uint32 groupId, bool onlyConsiderActivePlacements); extern List * FinalizedShardPlacementList(uint64 shardId); extern ShardPlacement * FinalizedShardPlacement(uint64 shardId, bool missingOk); extern List * BuildShardPlacementList(ShardInterval *shardInterval); extern List * GroupShardPlacementsForTableOnGroup(Oid relationId, uint32 groupId); /* Function declarations to modify shard and shard placement data */ extern void InsertShardRow(Oid relationId, uint64 shardId, char storageType, text *shardMinValue, text *shardMaxValue); extern void DeleteShardRow(uint64 shardId); extern uint64 InsertShardPlacementRow(uint64 shardId, uint64 placementId, char shardState, uint64 shardLength, uint32 groupId); extern void InsertIntoPgDistPartition(Oid relationId, char distributionMethod, Var *distributionColumn, uint32 colocationId, char replicationModel); extern void DeletePartitionRow(Oid distributedRelationId); extern void DeleteShardRow(uint64 shardId); extern void UpdateShardPlacementState(uint64 placementId, char shardState); extern void DeleteShardPlacementRow(uint64 placementId); extern void UpdateColocationGroupReplicationFactor(uint32 colocationId, int replicationFactor); extern void CreateDistributedTable(Oid relationId, Var *distributionColumn, char distributionMethod, char *colocateWithTableName, bool viaDeprecatedAPI); extern void CreateTruncateTrigger(Oid relationId); /* Remaining metadata utility functions */ extern char * TableOwner(Oid relationId); extern void EnsureTablePermissions(Oid relationId, AclMode mode); extern void EnsureTableOwner(Oid relationId); extern void EnsureSuperUser(void); extern void EnsureReplicationSettings(Oid relationId, char replicationModel); extern bool RegularTable(Oid relationId); extern bool TableReferenced(Oid relationId); extern char * ConstructQualifiedShardName(ShardInterval *shardInterval); extern Datum StringToDatum(char *inputString, Oid dataType); extern char * DatumToString(Datum datum, Oid dataType); #endif /* MASTER_METADATA_UTILITY_H */ citus-7.0.3/src/include/distributed/master_protocol.h000066400000000000000000000164551317107136600230110ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * master_protocol.h * Header for shared declarations for access to master node data. These data * are used to create new shards or update existing ones. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #ifndef MASTER_PROTOCOL_H #define MASTER_PROTOCOL_H #include "postgres.h" #include "c.h" #include "fmgr.h" #include "distributed/connection_management.h" #include "distributed/shardinterval_utils.h" #include "nodes/pg_list.h" #include "distributed/master_metadata_utility.h" /* * In our distributed database, we need a mechanism to make remote procedure * calls between clients, the master node, and worker nodes. These remote calls * require serializing and deserializing values and function signatures between * nodes; and for these, we currently use PostgreSQL's built-in type and * function definition system. This approach is by no means ideal however; and * our implementation: (i) cannot perform compile-time type checks, (ii) * requires additional effort when upgrading to new function signatures, and * (iii) hides argument and return value names and types behind complicated * pg_proc.h definitions. * * An ideal implementation should overcome these problems, and make it much * easier to pass values back and forth between nodes. One such implementation * that comes close to ideal is Google's Protocol Buffers. Nonetheless, we do * not use it in here as its inclusion requires changes to PostgreSQL's make * system, and a native C version is currently unavailable. */ /* Number of tuple fields that master node functions return */ #define TABLE_METADATA_FIELDS 7 #define CANDIDATE_NODE_FIELDS 2 #define WORKER_NODE_FIELDS 2 /* Name of columnar foreign data wrapper */ #define CSTORE_FDW_NAME "cstore_fdw" #define SHARDID_SEQUENCE_NAME "pg_dist_shardid_seq" #define PLACEMENTID_SEQUENCE_NAME "pg_dist_placement_placementid_seq" /* Remote call definitions to help with data staging and deletion */ #define WORKER_APPLY_SHARD_DDL_COMMAND \ "SELECT worker_apply_shard_ddl_command (" UINT64_FORMAT ", %s, %s)" #define WORKER_APPLY_SHARD_DDL_COMMAND_WITHOUT_SCHEMA \ "SELECT worker_apply_shard_ddl_command (" UINT64_FORMAT ", %s)" #define WORKER_APPEND_TABLE_TO_SHARD \ "SELECT worker_append_table_to_shard (%s, %s, %s, %u)" #define WORKER_APPLY_INTER_SHARD_DDL_COMMAND \ "SELECT worker_apply_inter_shard_ddl_command (" UINT64_FORMAT ", %s, " UINT64_FORMAT \ ", %s, %s)" #define SHARD_RANGE_QUERY "SELECT min(%s), max(%s) FROM %s" #define SHARD_TABLE_SIZE_QUERY "SELECT pg_table_size(%s)" #define SHARD_CSTORE_TABLE_SIZE_QUERY "SELECT cstore_table_size(%s)" #define DROP_REGULAR_TABLE_COMMAND "DROP TABLE IF EXISTS %s CASCADE" #define DROP_FOREIGN_TABLE_COMMAND "DROP FOREIGN TABLE IF EXISTS %s CASCADE" #define CREATE_SCHEMA_COMMAND "CREATE SCHEMA IF NOT EXISTS %s AUTHORIZATION %s" #define CREATE_EMPTY_SHARD_QUERY "SELECT master_create_empty_shard('%s')" #define FINALIZED_SHARD_PLACEMENTS_QUERY \ "SELECT placementid, nodename, nodeport FROM pg_dist_shard_placement WHERE shardstate = 1 AND shardid = %ld" #define UPDATE_SHARD_STATISTICS_QUERY \ "SELECT master_update_shard_statistics(%ld)" #define PARTITION_METHOD_QUERY "SELECT part_method FROM master_get_table_metadata('%s');" /* Enumeration that defines the shard placement policy to use while staging */ typedef enum { SHARD_PLACEMENT_INVALID_FIRST = 0, SHARD_PLACEMENT_LOCAL_NODE_FIRST = 1, SHARD_PLACEMENT_ROUND_ROBIN = 2, SHARD_PLACEMENT_RANDOM = 3 } ShardPlacementPolicyType; /* Config variables managed via guc.c */ extern int ShardCount; extern int ShardReplicationFactor; extern int ShardMaxSize; extern int ShardPlacementPolicy; extern bool IsCoordinator(void); /* Function declarations local to the distributed module */ extern bool CStoreTable(Oid relationId); extern uint64 GetNextShardId(void); extern uint64 GetNextPlacementId(void); extern Oid ResolveRelationId(text *relationName); extern List * GetTableDDLEvents(Oid relationId, bool forShardCreation); extern List * GetTableCreationCommands(Oid relationId, bool forShardCreation); extern List * GetTableIndexAndConstraintCommands(Oid relationId); extern List * GetTableForeignConstraintCommands(Oid relationId); extern char ShardStorageType(Oid relationId); extern void CheckDistributedTable(Oid relationId); extern void CreateAppendDistributedShardPlacements(Oid relationId, int64 shardId, List *workerNodeList, int replicationFactor); extern void CreateShardsOnWorkers(Oid distributedRelationId, List *shardPlacements, bool useExclusiveConnection, bool colocatedShard); extern List * InsertShardPlacementRows(Oid relationId, int64 shardId, List *workerNodeList, int workerStartIndex, int replicationFactor); extern uint64 UpdateShardStatistics(int64 shardId); extern void CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount, int32 replicationFactor, bool useExclusiveConnections); extern void CreateColocatedShards(Oid targetRelationId, Oid sourceRelationId, bool useExclusiveConnections); extern void CreateReferenceTableShard(Oid distributedTableId); extern void WorkerCreateShard(Oid relationId, int shardIndex, uint64 shardId, List *ddlCommandList, List *foreignConstraintCommandList, char *alterTableAttachPartitionCommand, MultiConnection *connection); extern Oid ForeignConstraintGetReferencedTableId(char *queryString); extern void CheckHashPartitionedTable(Oid distributedTableId); extern void CheckTableSchemaNameForDrop(Oid relationId, char **schemaName, char **tableName); extern text * IntegerToText(int32 value); /* Function declarations for generating metadata for shard and placement creation */ extern Datum master_get_table_metadata(PG_FUNCTION_ARGS); extern Datum master_get_table_ddl_events(PG_FUNCTION_ARGS); extern Datum master_get_new_shardid(PG_FUNCTION_ARGS); extern Datum master_get_new_placementid(PG_FUNCTION_ARGS); extern Datum master_get_active_worker_nodes(PG_FUNCTION_ARGS); /* Function declarations to help with data staging and deletion */ extern Datum master_create_empty_shard(PG_FUNCTION_ARGS); extern Datum master_append_table_to_shard(PG_FUNCTION_ARGS); extern Datum master_update_shard_statistics(PG_FUNCTION_ARGS); extern Datum master_apply_delete_command(PG_FUNCTION_ARGS); extern Datum master_drop_sequences(PG_FUNCTION_ARGS); extern Datum master_modify_multiple_shards(PG_FUNCTION_ARGS); extern Datum master_drop_all_shards(PG_FUNCTION_ARGS); /* function declarations for shard creation functionality */ extern Datum master_create_worker_shards(PG_FUNCTION_ARGS); extern Datum isolate_tenant_to_new_shard(PG_FUNCTION_ARGS); /* function declarations for shard repair functionality */ extern Datum master_copy_shard_placement(PG_FUNCTION_ARGS); /* function declarations for shard copy functinality */ extern List * CopyShardCommandList(ShardInterval *shardInterval, char *sourceNodeName, int32 sourceNodePort); extern List * CopyShardForeignConstraintCommandList(ShardInterval *shardInterval); extern ShardPlacement * SearchShardPlacementInList(List *shardPlacementList, char *nodeName, uint32 nodePort, bool missingOk); #endif /* MASTER_PROTOCOL_H */ citus-7.0.3/src/include/distributed/metadata_cache.h000066400000000000000000000104521317107136600224670ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * metadata_cache.h * Executor support for Citus. * * Copyright (c) 2012-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef METADATA_CACHE_H #define METADATA_CACHE_H #include "fmgr.h" #include "distributed/master_metadata_utility.h" #include "distributed/pg_dist_partition.h" #include "distributed/worker_manager.h" #include "utils/hsearch.h" extern bool EnableVersionChecks; /* managed via guc.c */ typedef enum { USE_SECONDARY_NODES_NEVER = 0, USE_SECONDARY_NODES_ALWAYS = 1 } ReadFromSecondariesType; extern int ReadFromSecondaries; /* * Representation of a table's metadata that is frequently used for * distributed execution. Cached. */ typedef struct { /* lookup key - must be first. A pg_class.oid oid. */ Oid relationId; /* * Has an invalidation been received for this entry, requiring a rebuild * of the cache entry? */ bool isValid; bool isDistributedTable; bool hasUninitializedShardInterval; bool hasUniformHashDistribution; /* valid for hash partitioned tables */ bool hasOverlappingShardInterval; /* pg_dist_partition metadata for this table */ char *partitionKeyString; char partitionMethod; uint32 colocationId; char replicationModel; /* pg_dist_shard metadata (variable-length ShardInterval array) for this table */ int shardIntervalArrayLength; ShardInterval **sortedShardIntervalArray; /* comparator for partition column's type, NULL if DISTRIBUTE_BY_NONE */ FmgrInfo *shardColumnCompareFunction; /* * Comparator for partition interval type (different from * shardValueCompareFunction if hash-partitioned), NULL if * DISTRIBUTE_BY_NONE. */ FmgrInfo *shardIntervalCompareFunction; FmgrInfo *hashFunction; /* NULL if table is not distributed by hash */ /* pg_dist_placement metadata */ GroupShardPlacement **arrayOfPlacementArrays; int *arrayOfPlacementArrayLengths; } DistTableCacheEntry; extern bool IsDistributedTable(Oid relationId); extern List * DistributedTableList(void); extern ShardInterval * LoadShardInterval(uint64 shardId); extern ShardPlacement * FindShardPlacementOnGroup(uint32 groupId, uint64 shardId); extern GroupShardPlacement * LoadGroupShardPlacement(uint64 shardId, uint64 placementId); extern ShardPlacement * LoadShardPlacement(uint64 shardId, uint64 placementId); extern DistTableCacheEntry * DistributedTableCacheEntry(Oid distributedRelationId); extern int GetLocalGroupId(void); extern List * DistTableOidList(void); extern List * ShardPlacementList(uint64 shardId); extern void CitusInvalidateRelcacheByRelid(Oid relationId); extern void CitusInvalidateRelcacheByShardId(int64 shardId); extern void InvalidateMetadataSystemCache(void); extern bool CitusHasBeenLoaded(void); extern bool CheckCitusVersion(int elevel); extern bool CheckAvailableVersion(int elevel); bool MajorVersionsCompatible(char *leftVersion, char *rightVersion); extern void EnsureModificationsCanRun(void); /* access WorkerNodeHash */ extern HTAB * GetWorkerNodeHash(void); /* relation oids */ extern Oid DistColocationRelationId(void); extern Oid DistColocationConfigurationIndexId(void); extern Oid DistColocationColocationidIndexId(void); extern Oid DistPartitionRelationId(void); extern Oid DistShardRelationId(void); extern Oid DistPlacementRelationId(void); extern Oid DistNodeRelationId(void); extern Oid DistLocalGroupIdRelationId(void); /* index oids */ extern Oid DistPartitionLogicalRelidIndexId(void); extern Oid DistPartitionColocationidIndexId(void); extern Oid DistShardLogicalRelidIndexId(void); extern Oid DistShardShardidIndexId(void); extern Oid DistPlacementShardidIndexId(void); extern Oid DistPlacementPlacementidIndexId(void); extern Oid DistTransactionRelationId(void); extern Oid DistTransactionGroupIndexId(void); extern Oid DistTransactionRecordIndexId(void); extern Oid DistPlacementGroupidIndexId(void); /* function oids */ extern Oid CitusExtraDataContainerFuncId(void); extern Oid CitusWorkerHashFunctionId(void); /* nodeRole enum oids */ extern Oid PrimaryNodeRoleId(void); extern Oid SecondaryNodeRoleId(void); extern Oid UnavailableNodeRoleId(void); /* user related functions */ extern Oid CitusExtensionOwner(void); extern char * CitusExtensionOwnerName(void); extern char * CurrentUserName(void); #endif /* METADATA_CACHE_H */ citus-7.0.3/src/include/distributed/metadata_sync.h000066400000000000000000000043741317107136600224060ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * metadata_sync.h * Type and function declarations used to sync metadata across all * workers. * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef METADATA_SYNC_H #define METADATA_SYNC_H #include "distributed/metadata_cache.h" #include "nodes/pg_list.h" /* Functions declarations for metadata syncing */ extern bool ShouldSyncTableMetadata(Oid relationId); extern List * MetadataCreateCommands(void); extern List * GetDistributedTableDDLEvents(Oid relationId); extern List * MetadataDropCommands(void); extern char * DistributionCreateCommand(DistTableCacheEntry *cacheEntry); extern char * DistributionDeleteCommand(char *schemaName, char *tableName); extern char * TableOwnerResetCommand(Oid distributedRelationId); extern char * NodeListInsertCommand(List *workerNodeList); extern List * ShardListInsertCommand(List *shardIntervalList); extern List * ShardDeleteCommandList(ShardInterval *shardInterval); extern char * NodeDeleteCommand(uint32 nodeId); extern char * NodeStateUpdateCommand(uint32 nodeId, bool isActive); extern char * ColocationIdUpdateCommand(Oid relationId, uint32 colocationId); extern char * CreateSchemaDDLCommand(Oid schemaId); extern char * PlacementUpsertCommand(uint64 shardId, uint64 placementId, int shardState, uint64 shardLength, uint32 groupId); extern void CreateTableMetadataOnWorkers(Oid relationId); #define DELETE_ALL_NODES "TRUNCATE pg_dist_node" #define REMOVE_ALL_CLUSTERED_TABLES_COMMAND \ "SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition" #define DISABLE_DDL_PROPAGATION "SET citus.enable_ddl_propagation TO 'off'" #define WORKER_APPLY_SEQUENCE_COMMAND "SELECT worker_apply_sequence_command (%s)" #define UPSERT_PLACEMENT "INSERT INTO pg_dist_placement " \ "(shardid, shardstate, shardlength, " \ "groupid, placementid) " \ "VALUES (%lu, %d, %lu, %d, %lu) " \ "ON CONFLICT (placementid) DO UPDATE SET " \ "shardid = EXCLUDED.shardid, " \ "shardstate = EXCLUDED.shardstate, " \ "shardlength = EXCLUDED.shardlength, " \ "groupid = EXCLUDED.groupid" #endif /* METADATA_SYNC_H */ citus-7.0.3/src/include/distributed/multi_client_executor.h000066400000000000000000000102571317107136600241750ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_client_executor.h * Type and function pointer declarations for executing client-side (libpq) * logic. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #ifndef MULTI_CLIENT_EXECUTOR_H #define MULTI_CLIENT_EXECUTOR_H #define INVALID_CONNECTION_ID -1 /* identifies an invalid connection */ #define MAX_CONNECTION_COUNT 2048 /* simultaneous client connection count */ #define STRING_BUFFER_SIZE 1024 /* buffer size for character arrays */ /* Enumeration to track one client connection's status */ typedef enum { CLIENT_INVALID_CONNECT = 0, CLIENT_CONNECTION_BAD = 1, CLIENT_CONNECTION_BUSY = 2, CLIENT_CONNECTION_BUSY_READ = 3, CLIENT_CONNECTION_BUSY_WRITE = 4, CLIENT_CONNECTION_READY = 5 } ConnectStatus; /* Enumeration to see if we can read query results without blocking */ typedef enum { CLIENT_INVALID_RESULT_STATUS = 0, CLIENT_RESULT_UNAVAILABLE = 1, CLIENT_RESULT_BUSY = 2, CLIENT_RESULT_READY = 3 } ResultStatus; /* Enumeration to track one execution query's status on the client */ typedef enum { CLIENT_INVALID_QUERY = 0, CLIENT_QUERY_FAILED = 1, CLIENT_QUERY_DONE = 2, CLIENT_QUERY_COPY = 3 } QueryStatus; /* Enumeration to track one copy query's status on the client */ typedef enum { CLIENT_INVALID_COPY = 0, CLIENT_COPY_MORE = 1, CLIENT_COPY_FAILED = 2, CLIENT_COPY_DONE = 3 } CopyStatus; /* Enumeration to track the status of a query in a batch on the client */ typedef enum { CLIENT_INVALID_BATCH_QUERY = 0, CLIENT_BATCH_QUERY_FAILED = 1, CLIENT_BATCH_QUERY_CONTINUE = 2, CLIENT_BATCH_QUERY_DONE = 3 } BatchQueryStatus; /* Enumeration to track whether a task is ready to run and, if not, what it's blocked on*/ typedef enum TaskExecutionStatus { TASK_STATUS_INVALID = 0, TASK_STATUS_ERROR, /* error occured */ TASK_STATUS_READY, /* task ready to be processed further */ TASK_STATUS_SOCKET_READ, /* waiting for connection to become ready for reads */ TASK_STATUS_SOCKET_WRITE /* waiting for connection to become ready for writes */ } TaskExecutionStatus; struct pollfd; /* forward declared, to avoid having to include poll.h */ typedef struct WaitInfo { int maxWaiters; struct pollfd *pollfds; int registeredWaiters; bool haveReadyWaiter; bool haveFailedWaiter; } WaitInfo; /* Function declarations for executing client-side (libpq) logic. */ extern int32 MultiClientConnect(const char *nodeName, uint32 nodePort, const char *nodeDatabase, const char *nodeUser); extern int32 MultiClientConnectStart(const char *nodeName, uint32 nodePort, const char *nodeDatabase, const char *nodeUser); extern ConnectStatus MultiClientConnectPoll(int32 connectionId); extern void MultiClientDisconnect(int32 connectionId); extern bool MultiClientConnectionUp(int32 connectionId); extern bool MultiClientExecute(int32 connectionId, const char *query, void **queryResult, int *rowCount, int *columnCount); extern bool MultiClientSendQuery(int32 connectionId, const char *query); extern bool MultiClientCancel(int32 connectionId); extern ResultStatus MultiClientResultStatus(int32 connectionId); extern QueryStatus MultiClientQueryStatus(int32 connectionId); extern CopyStatus MultiClientCopyData(int32 connectionId, int32 fileDescriptor); extern bool MultiClientQueryResult(int32 connectionId, void **queryResult, int *rowCount, int *columnCount); extern BatchQueryStatus MultiClientBatchResult(int32 connectionId, void **queryResult, int *rowCount, int *columnCount); extern char * MultiClientGetValue(void *queryResult, int rowIndex, int columnIndex); extern bool MultiClientValueIsNull(void *queryResult, int rowIndex, int columnIndex); extern void MultiClientClearResult(void *queryResult); extern WaitInfo * MultiClientCreateWaitInfo(int maxConnections); extern void MultiClientResetWaitInfo(WaitInfo *waitInfo); extern void MultiClientFreeWaitInfo(WaitInfo *waitInfo); extern void MultiClientRegisterWait(WaitInfo *waitInfo, TaskExecutionStatus waitStatus, int32 connectionId); extern void MultiClientWait(WaitInfo *waitInfo); #endif /* MULTI_CLIENT_EXECUTOR_H */ citus-7.0.3/src/include/distributed/multi_copy.h000066400000000000000000000067761317107136600217660ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_copy.h * Declarations for public functions and variables used in COPY for * distributed tables. * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef MULTI_COPY_H #define MULTI_COPY_H #include "distributed/master_metadata_utility.h" #include "distributed/metadata_cache.h" #include "nodes/execnodes.h" #include "nodes/parsenodes.h" #include "tcop/dest.h" #define INVALID_PARTITION_COLUMN_INDEX -1 /* * A smaller version of copy.c's CopyStateData, trimmed to the elements * necessary to copy out results. While it'd be a bit nicer to share code, * it'd require changing core postgres code. */ typedef struct CopyOutStateData { StringInfo fe_msgbuf; /* used for all dests during COPY TO, only for * dest == COPY_NEW_FE in COPY FROM */ int file_encoding; /* file or remote side's character encoding */ bool need_transcoding; /* file encoding diff from server? */ bool binary; /* binary format? */ char *null_print; /* NULL marker string (server encoding!) */ char *null_print_client; /* same converted to file encoding */ char *delim; /* column delimiter (must be 1 byte) */ MemoryContext rowcontext; /* per-row evaluation context */ } CopyOutStateData; typedef struct CopyOutStateData *CopyOutState; /* struct type to keep both hostname and port */ typedef struct NodeAddress { char *nodeName; int32 nodePort; } NodeAddress; /* CopyDestReceiver can be used to stream results into a distributed table */ typedef struct CitusCopyDestReceiver { /* public DestReceiver interface */ DestReceiver pub; /* relation and columns to which to copy */ Oid distributedRelationId; List *columnNameList; int partitionColumnIndex; /* distributed table metadata */ DistTableCacheEntry *tableMetadata; /* open relation handle */ Relation distributedRelation; /* descriptor of the tuples that are sent to the worker */ TupleDesc tupleDescriptor; /* EState for per-tuple memory allocation */ EState *executorState; /* MemoryContext for DestReceiver session */ MemoryContext memoryContext; /* template for COPY statement to send to workers */ CopyStmt *copyStatement; /* cached shard metadata for pruning */ HTAB *shardConnectionHash; bool stopOnFailure; /* state on how to copy out data types */ CopyOutState copyOutState; FmgrInfo *columnOutputFunctions; /* number of tuples sent */ int64 tuplesSent; } CitusCopyDestReceiver; /* function declarations for copying into a distributed table */ extern CitusCopyDestReceiver * CreateCitusCopyDestReceiver(Oid relationId, List *columnNameList, int partitionColumnIndex, EState *executorState, bool stopOnFailure); extern FmgrInfo * ColumnOutputFunctions(TupleDesc rowDescriptor, bool binaryFormat); extern void AppendCopyRowData(Datum *valueArray, bool *isNullArray, TupleDesc rowDescriptor, CopyOutState rowOutputState, FmgrInfo *columnOutputFunctions); extern void AppendCopyBinaryHeaders(CopyOutState headerOutputState); extern void AppendCopyBinaryFooters(CopyOutState footerOutputState); extern void CitusCopyFrom(CopyStmt *copyStatement, char *completionTag); extern bool IsCopyFromWorker(CopyStmt *copyStatement); extern NodeAddress * MasterNodeAddress(CopyStmt *copyStatement); #endif /* MULTI_COPY_H */ citus-7.0.3/src/include/distributed/multi_executor.h000066400000000000000000000033001317107136600226260ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_executor.h * Executor support for Citus. * * Copyright (c) 2012-2016, Citus Data, Inc. *------------------------------------------------------------------------- */ #ifndef MULTI_EXECUTOR_H #define MULTI_EXECUTOR_H #include "executor/execdesc.h" #include "nodes/parsenodes.h" #include "nodes/execnodes.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_server_executor.h" typedef struct CitusScanState { CustomScanState customScanState; /* underlying custom scan node */ MultiPlan *multiPlan; /* distributed execution plan */ MultiExecutorType executorType; /* distributed executor type */ bool finishedRemoteScan; /* flag to check if remote scan is finished */ Tuplestorestate *tuplestorestate; /* tuple store to store distributed results */ } CitusScanState; extern Node * RealTimeCreateScan(CustomScan *scan); extern Node * TaskTrackerCreateScan(CustomScan *scan); extern Node * RouterCreateScan(CustomScan *scan); extern Node * CoordinatorInsertSelectCreateScan(CustomScan *scan); extern Node * DelayedErrorCreateScan(CustomScan *scan); extern void CitusSelectBeginScan(CustomScanState *node, EState *estate, int eflags); extern TupleTableSlot * RealTimeExecScan(CustomScanState *node); extern TupleTableSlot * TaskTrackerExecScan(CustomScanState *node); extern void CitusEndScan(CustomScanState *node); extern void CitusReScan(CustomScanState *node); extern void CitusExplainScan(CustomScanState *node, List *ancestors, struct ExplainState *es); extern TupleTableSlot * ReturnTupleFromTuplestore(CitusScanState *scanState); #endif /* MULTI_EXECUTOR_H */ citus-7.0.3/src/include/distributed/multi_explain.h000066400000000000000000000010021317107136600224250ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_explain.h * Explain support for Citus. * * Copyright (c) 2012-2016, Citus Data, Inc. *------------------------------------------------------------------------- */ #ifndef MULTI_EXPLAIN_H #define MULTI_EXPLAIN_H #include "executor/executor.h" /* Config variables managed via guc.c to explain distributed query plans */ extern bool ExplainDistributedQueries; extern bool ExplainAllTasks; #endif /* MULTI_EXPLAIN_H */ citus-7.0.3/src/include/distributed/multi_join_order.h000066400000000000000000000062111317107136600231260ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_join_order.h * * Type and function declarations for determining a left-only join order for a * distributed query plan. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #ifndef MULTI_JOIN_ORDER_H #define MULTI_JOIN_ORDER_H #include "nodes/pg_list.h" #include "nodes/primnodes.h" /* * JoinRuleType determines the type of the join rule that applies between two * tables or row sources. The rule types are ordered below according to their * costs, with the cheapes rule appearing at the top. Note that changing the * order of these enums *will* change the order in which the rules are applied. */ typedef enum JoinRuleType { JOIN_RULE_INVALID_FIRST = 0, BROADCAST_JOIN = 1, LOCAL_PARTITION_JOIN = 2, SINGLE_PARTITION_JOIN = 3, DUAL_PARTITION_JOIN = 4, CARTESIAN_PRODUCT = 5, /* * Add new join rule types above this comment. After adding, you must also * update these arrays: RuleEvalFunctionArray, RuleApplyFunctionArray, and * RuleNameArray. */ JOIN_RULE_LAST } JoinRuleType; /* * TableEntry represents a table used when determining the join order. A table * entry corresponds to an ordinary relation reference (RTE_RELATION) in the * query range table list. */ typedef struct TableEntry { Oid relationId; uint32 rangeTableId; } TableEntry; /* * JoinOrderNode represents an element in the join order list; and this list * keeps the total join order for a distributed query. The first node in this * list later becomes the leftmost table in the join tree, and the successive * elements in the list are the joining tables in the left-deep tree. */ typedef struct JoinOrderNode { TableEntry *tableEntry; /* this node's relation and range table id */ JoinRuleType joinRuleType; /* not relevant for the first table */ JoinType joinType; /* not relevant for the first table */ Var *partitionColumn; /* not relevant for the first table */ char partitionMethod; List *joinClauseList; /* not relevant for the first table */ List *shardIntervalList; } JoinOrderNode; /* Config variables managed via guc.c */ extern int LargeTableShardCount; extern bool LogMultiJoinOrder; /* Function declaration for determining table join orders */ extern List * FixedJoinOrderList(FromExpr *fromExpr, List *tableEntryList); extern List * JoinExprList(FromExpr *fromExpr); extern List * JoinOrderList(List *rangeTableEntryList, List *joinClauseList); extern List * ApplicableJoinClauses(List *leftTableIdList, uint32 rightTableId, List *joinClauseList); extern OpExpr * SinglePartitionJoinClause(Var *partitionColumn, List *applicableJoinClauses); extern OpExpr * DualPartitionJoinClause(List *applicableJoinClauses); extern Var * LeftColumn(OpExpr *joinClause); extern Var * RightColumn(OpExpr *joinClause); extern Var * PartitionColumn(Oid relationId, uint32 rangeTableId); extern Var * DistPartitionKey(Oid relationId); extern char PartitionMethod(Oid relationId); extern char TableReplicationModel(Oid relationId); #endif /* MULTI_JOIN_ORDER_H */ citus-7.0.3/src/include/distributed/multi_logical_optimizer.h000066400000000000000000000105141317107136600245110ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_logical_optimizer.h * Type and function declarations for optimizing multi-relation based logical * plans. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #ifndef MULTI_LOGICAL_OPTIMIZER_H #define MULTI_LOGICAL_OPTIMIZER_H #include "distributed/master_metadata_utility.h" #include "distributed/multi_logical_planner.h" #include "distributed/relation_restriction_equivalence.h" /* Definitions local to logical plan optimizer */ #define DIVISION_OPER_NAME "/" #define DISABLE_LIMIT_APPROXIMATION -1 #define DISABLE_DISTINCT_APPROXIMATION 0.0 #define ARRAY_CAT_AGGREGATE_NAME "array_cat_agg" #define WORKER_COLUMN_FORMAT "worker_column_%d" /* Definitions related to count(distinct) approximations */ #define HLL_EXTENSION_NAME "hll" #define HLL_TYPE_NAME "hll" #define HLL_HASH_INTEGER_FUNC_NAME "hll_hash_integer" #define HLL_HASH_BIGINT_FUNC_NAME "hll_hash_bigint" #define HLL_HASH_TEXT_FUNC_NAME "hll_hash_text" #define HLL_HASH_ANY_FUNC_NAME "hll_hash_any" #define HLL_ADD_AGGREGATE_NAME "hll_add_agg" #define HLL_UNION_AGGREGATE_NAME "hll_union_agg" #define HLL_CARDINALITY_FUNC_NAME "hll_cardinality" /* * AggregateType represents an aggregate function's type, where the function is * used in the context of a query. We use this function type to determine how to * modify the plan when creating the logical distributed plan. * * Please note that the order of values in this enumeration is tied to the order * of elements in the following AggregateNames array. This order needs to be * preserved. */ typedef enum { AGGREGATE_INVALID_FIRST = 0, AGGREGATE_AVERAGE = 1, AGGREGATE_MIN = 2, AGGREGATE_MAX = 3, AGGREGATE_SUM = 4, AGGREGATE_COUNT = 5, AGGREGATE_ARRAY_AGG = 6 } AggregateType; /* * PushDownStatus indicates whether a node can be pushed down below its child * using the commutative and distributive relational algebraic properties. */ typedef enum { PUSH_DOWN_INVALID_FIRST = 0, PUSH_DOWN_VALID = 1, PUSH_DOWN_NOT_VALID = 2, PUSH_DOWN_SPECIAL_CONDITIONS = 3 } PushDownStatus; /* * PullUpStatus indicates whether a node can be pulled up above its parent using * the commutative and factorizable relational algebraic properties. */ typedef enum { PULL_UP_INVALID_FIRST = 0, PULL_UP_VALID = 1, PULL_UP_NOT_VALID = 2 } PullUpStatus; /* * AggregateNames is an array that stores cstring names for aggregate functions; * these cstring names act as an intermediary when mapping aggregate function * oids to AggregateType enumerations. For this mapping to occur, we use the * aggregate function oid to find the corresponding cstring name in pg_proc. We * then compare that name against entries in this array, and return the * appropriate AggregateType value. * * Please note that the order of elements in this array is tied to the order of * values in the preceding AggregateType enum. This order needs to be preserved. */ static const char *const AggregateNames[] = { "invalid", "avg", "min", "max", "sum", "count", "array_agg" }; /* Config variable managed via guc.c */ extern int LimitClauseRowFetchCount; extern double CountDistinctErrorRate; /* Function declaration for optimizing logical plans */ extern void MultiLogicalPlanOptimize(MultiTreeRoot *multiTree); /* Function declaration for getting partition method for the given relation */ extern char PartitionMethod(Oid relationId); /* Function declaration for getting oid for the given function name */ extern Oid FunctionOid(const char *schemaName, const char *functionName, int argumentCount); /* Function declaration for helper functions in subquery pushdown */ extern List * SubqueryMultiTableList(MultiNode *multiNode); extern List * GroupTargetEntryList(List *groupClauseList, List *targetEntryList); extern bool ExtractQueryWalker(Node *node, List **queryList); extern bool LeafQuery(Query *queryTree); extern List * PartitionColumnOpExpressionList(Query *query); extern List * ReplaceColumnsInOpExpressionList(List *opExpressionList, Var *newColumn); extern bool IsPartitionColumn(Expr *columnExpression, Query *query); extern void FindReferencedTableColumn(Expr *columnExpression, List *parentQueryList, Query *query, Oid *relationId, Var **column); #endif /* MULTI_LOGICAL_OPTIMIZER_H */ citus-7.0.3/src/include/distributed/multi_logical_planner.h000066400000000000000000000144661317107136600241400ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_logical_planner.h * Type declarations for multi-relational algebra operators, and function * declarations for building logical plans over distributed relations. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #ifndef MULTI_LOGICAL_PLANNER_H #define MULTI_LOGICAL_PLANNER_H #include "distributed/citus_nodes.h" #include "distributed/multi_join_order.h" #include "distributed/relation_restriction_equivalence.h" #include "nodes/nodes.h" #include "nodes/primnodes.h" #include "nodes/parsenodes.h" #include "nodes/pg_list.h" #define SUBQUERY_RANGE_TABLE_ID -1 #define SUBQUERY_RELATION_ID 10000 #define SUBQUERY_PUSHDOWN_RELATION_ID 10001 /* * MultiNode represents the base node type for all multi-relational algebra * nodes. By creating this base node, we can simulate inheritance and represent * derived operator nodes as a MultiNode type. A similar structure to simulate * inheritance is also used in PostgreSQL's plan nodes. */ typedef struct MultiNode { CitusNode type; struct MultiNode *parentNode; /* child node(s) are defined in unary and binary nodes */ } MultiNode; /* Represents unary nodes that have only one child */ typedef struct MultiUnaryNode { MultiNode node; struct MultiNode *childNode; } MultiUnaryNode; /* Represents binary nodes that have two children */ typedef struct MultiBinaryNode { MultiNode node; struct MultiNode *leftChildNode; struct MultiNode *rightChildNode; } MultiBinaryNode; /* * MultiTreeRoot keeps a pointer to the root node in the multi-relational * operator tree. This node is always on the top of every logical plan. */ typedef struct MultiTreeRoot { MultiUnaryNode unaryNode; } MultiTreeRoot; /* * MultiTable represents a partitioned table in a logical query plan. Note that * this node does not represent a query operator, and differs from the nodes * that follow in that sense. */ typedef struct MultiTable { MultiUnaryNode unaryNode; Oid relationId; int rangeTableId; Var *partitionColumn; Alias *alias; Alias *referenceNames; Query *subquery; /* this field is only valid for non-relation subquery types */ } MultiTable; /* Defines the columns to project in multi-relational algebra */ typedef struct MultiProject { MultiUnaryNode unaryNode; List *columnList; } MultiProject; /* * MultiCollect defines the Collect operator in multi-relational algebra. This * operator collects data from remote nodes; the collected data are output from * the query operator that is beneath this Collect in the logical query tree. */ typedef struct MultiCollect { MultiUnaryNode unaryNode; } MultiCollect; /* * MultiSelect defines the MultiSelect operator in multi-relational algebra. * This operator contains select predicates which apply to a selected logical * relation. */ typedef struct MultiSelect { MultiUnaryNode unaryNode; List *selectClauseList; } MultiSelect; /* * MultiJoin joins the output of two query operators that are beneath it in the * query tree. The operator also keeps the join rule that applies between the * two operators, and the partition key to use if the join is distributed. */ typedef struct MultiJoin { MultiBinaryNode binaryNode; List *joinClauseList; JoinRuleType joinRuleType; JoinType joinType; } MultiJoin; /* Defines the (re-)Partition operator in multi-relational algebra */ typedef struct MultiPartition { MultiUnaryNode unaryNode; Var *partitionColumn; uint32 splitPointTableId; } MultiPartition; /* Defines the CartesianProduct operator in multi-relational algebra */ typedef struct MultiCartesianProduct { MultiBinaryNode binaryNode; } MultiCartesianProduct; /* * MultiExtendedOp defines a set of extended operators that operate on columns * in relational algebra. This node allows us to distinguish between operations * in the master and worker nodes, and also captures the following: * * (1) Aggregate functions such as sums or averages; * (2) Grouping of attributes; these groupings may also be tied to aggregates; * (3) Extended projection expressions including columns, arithmetic and string * functions; * (4) User's intented display information, such as column display order; * (5) Sort clauses on columns, expressions, or aggregates; and * (6) Limit count and offset clause. */ typedef struct MultiExtendedOp { MultiUnaryNode unaryNode; List *targetList; List *groupClauseList; List *sortClauseList; Node *limitCount; Node *limitOffset; Node *havingQual; } MultiExtendedOp; /* Config variables managed via guc.c */ extern bool SubqueryPushdown; /* Function declarations for building logical plans */ extern MultiTreeRoot * MultiLogicalPlanCreate(Query *originalQuery, Query *queryTree, PlannerRestrictionContext * plannerRestrictionContext, ParamListInfo boundParams); extern bool NeedsDistributedPlanning(Query *queryTree); extern MultiNode * ParentNode(MultiNode *multiNode); extern MultiNode * ChildNode(MultiUnaryNode *multiNode); extern MultiNode * GrandChildNode(MultiUnaryNode *multiNode); extern void SetChild(MultiUnaryNode *parent, MultiNode *child); extern void SetLeftChild(MultiBinaryNode *parent, MultiNode *leftChild); extern void SetRightChild(MultiBinaryNode *parent, MultiNode *rightChild); extern bool UnaryOperator(MultiNode *node); extern bool BinaryOperator(MultiNode *node); extern List * OutputTableIdList(MultiNode *multiNode); extern List * FindNodesOfType(MultiNode *node, int type); extern List * JoinClauseList(List *whereClauseList); extern bool IsJoinClause(Node *clause); extern List * SubqueryEntryList(Query *queryTree); extern bool ExtractRangeTableIndexWalker(Node *node, List **rangeTableIndexList); extern List * WhereClauseList(FromExpr *fromExpr); extern List * QualifierList(FromExpr *fromExpr); extern List * TableEntryList(List *rangeTableList); extern List * UsedTableEntryList(Query *query); extern bool ExtractRangeTableRelationWalker(Node *node, List **rangeTableList); extern bool ExtractRangeTableEntryWalker(Node *node, List **rangeTableList); extern bool ExtractRangeTableRelationWalkerWithRTEExpand(Node *node, List **rangeTableList); extern List * pull_var_clause_default(Node *node); extern bool OperatorImplementsEquality(Oid opno); #endif /* MULTI_LOGICAL_PLANNER_H */ citus-7.0.3/src/include/distributed/multi_master_planner.h000066400000000000000000000014161317107136600240100ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_master_planner.h * Function declarations for building planned statements; these statements * are then executed on the master node. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #ifndef MULTI_MASTER_PLANNER_H #define MULTI_MASTER_PLANNER_H #include "lib/stringinfo.h" #include "nodes/parsenodes.h" #include "nodes/plannodes.h" /* Function declarations for building local plans on the master node */ struct MultiPlan; struct CustomScan; extern PlannedStmt * MasterNodeSelectPlan(struct MultiPlan *multiPlan, struct CustomScan *dataScan); #endif /* MULTI_MASTER_PLANNER_H */ citus-7.0.3/src/include/distributed/multi_partitioning_utils.h000066400000000000000000000014041317107136600247220ustar00rootroot00000000000000/* * multi_partitioning_utils.h * Utility functions declarations for declarative partitioning * * Copyright (c) 2017, Citus Data, Inc. */ #ifndef MULTI_PARTITIONING_UTILS_H_ #define MULTI_PARTITIONING_UTILS_H_ #include "nodes/pg_list.h" extern bool PartitionedTable(Oid relationId); extern bool PartitionTable(Oid relationId); extern bool IsChildTable(Oid relationId); extern bool IsParentTable(Oid relationId); extern Oid PartitionParentOid(Oid partitionOid); extern List * PartitionList(Oid parentRelationId); extern char * GenerateDetachPartitionCommand(Oid partitionTableId); extern char * GenerateAlterTableAttachPartitionCommand(Oid partitionTableId); extern char * GeneratePartitioningInformation(Oid tableId); #endif /* MULTI_PARTITIONING_UTILS_H_ */ citus-7.0.3/src/include/distributed/multi_physical_planner.h000066400000000000000000000226601317107136600243350ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_physical_planner.h * Type and function declarations used in creating the distributed execution * plan. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #ifndef MULTI_PHYSICAL_PLANNER_H #define MULTI_PHYSICAL_PLANNER_H #include "postgres.h" #include "c.h" #include "datatype/timestamp.h" #include "distributed/citus_nodes.h" #include "distributed/errormessage.h" #include "distributed/master_metadata_utility.h" #include "distributed/multi_logical_planner.h" #include "distributed/multi_planner.h" #include "lib/stringinfo.h" #include "nodes/parsenodes.h" #include "utils/array.h" /* Definitions local to the physical planner */ #define ARRAY_OUT_FUNC_ID 751 #define NON_PRUNABLE_JOIN -1 #define RESERVED_HASHED_COLUMN_ID MaxAttrNumber #define MERGE_COLUMN_FORMAT "merge_column_%u" #define TABLE_FETCH_COMMAND "SELECT worker_fetch_regular_table \ ('%s', " UINT64_FORMAT ", '%s', '%s')" #define FOREIGN_FETCH_COMMAND "SELECT worker_fetch_foreign_file \ ('%s', " UINT64_FORMAT ", '%s', '%s')" #define MAP_OUTPUT_FETCH_COMMAND "SELECT worker_fetch_partition_file \ (" UINT64_FORMAT ", %u, %u, %u, '%s', %u)" #define RANGE_PARTITION_COMMAND "SELECT worker_range_partition_table \ (" UINT64_FORMAT ", %d, %s, '%s', '%s'::regtype, %s)" #define HASH_PARTITION_COMMAND "SELECT worker_hash_partition_table \ (" UINT64_FORMAT ", %d, %s, '%s', '%s'::regtype, %d)" #define MERGE_FILES_INTO_TABLE_COMMAND "SELECT worker_merge_files_into_table \ (" UINT64_FORMAT ", %d, '%s', '%s')" #define MERGE_FILES_AND_RUN_QUERY_COMMAND \ "SELECT worker_merge_files_and_run_query(" UINT64_FORMAT ", %d, %s, %s)" typedef enum CitusRTEKind { CITUS_RTE_RELATION = RTE_RELATION, /* ordinary relation reference */ CITUS_RTE_SUBQUERY = RTE_SUBQUERY, /* subquery in FROM */ CITUS_RTE_JOIN = RTE_JOIN, /* join */ CITUS_RTE_FUNCTION = RTE_FUNCTION, /* function in FROM */ #if (PG_VERSION_NUM >= 100000) CITUS_RTE_TABLEFUNC = RTE_TABLEFUNC, /* TableFunc(.., column list) */ #endif CITUS_RTE_VALUES = RTE_VALUES, /* VALUES (), (), ... */ CITUS_RTE_CTE = RTE_CTE, /* common table expr (WITH list element) */ #if (PG_VERSION_NUM >= 100000) CITUS_RTE_NAMEDTUPLESTORE = RTE_NAMEDTUPLESTORE, /* tuplestore, e.g. for triggers */ #endif CITUS_RTE_SHARD, CITUS_RTE_REMOTE_QUERY } CitusRTEKind; /* Enumeration that defines the partition type for a remote job */ typedef enum { PARTITION_INVALID_FIRST = 0, RANGE_PARTITION_TYPE = 1, HASH_PARTITION_TYPE = 2 } PartitionType; /* Enumeration that defines different task types */ typedef enum { TASK_TYPE_INVALID_FIRST = 0, SQL_TASK = 1, MAP_TASK = 2, MERGE_TASK = 3, SHARD_FETCH_TASK = 4, MAP_OUTPUT_FETCH_TASK = 5, MERGE_FETCH_TASK = 6, MODIFY_TASK = 7, ROUTER_TASK = 8, DDL_TASK = 9 } TaskType; /* Enumeration that defines the task assignment policy to use */ typedef enum { TASK_ASSIGNMENT_INVALID_FIRST = 0, TASK_ASSIGNMENT_GREEDY = 1, TASK_ASSIGNMENT_ROUND_ROBIN = 2, TASK_ASSIGNMENT_FIRST_REPLICA = 3 } TaskAssignmentPolicyType; /* Enumeration that defines different job types */ typedef enum { JOB_INVALID_FIRST = 0, JOIN_MAP_MERGE_JOB = 1, SUBQUERY_MAP_MERGE_JOB = 2, TOP_LEVEL_WORKER_JOB = 3 } BoundaryNodeJobType; /* * Job represents a logical unit of work that contains one set of data transfers * in our physical plan. The physical planner maps each SQL query into one or * more jobs depending on the query's complexity, and sets dependencies between * these jobs. Each job consists of multiple executable tasks; and these tasks * either operate on base shards, or repartitioned tables. */ typedef struct Job { CitusNode type; uint64 jobId; Query *jobQuery; List *taskList; List *dependedJobList; bool subqueryPushdown; bool requiresMasterEvaluation; /* only applies to modify jobs */ bool deferredPruning; } Job; /* Defines a repartitioning job and holds additional related data. */ typedef struct MapMergeJob { Job job; Query *reduceQuery; PartitionType partitionType; Var *partitionColumn; uint32 partitionCount; int sortedShardIntervalArrayLength; ShardInterval **sortedShardIntervalArray; /* only applies to range partitioning */ List *mapTaskList; List *mergeTaskList; } MapMergeJob; /* * Task represents an executable unit of work. We conceptualize our tasks into * compute and data fetch task types. SQL, map, and merge tasks are considered * as compute tasks; and shard fetch, map fetch, and merge fetch tasks are data * fetch tasks. We also forward declare the task execution struct here to avoid * including the executor header files. * * We currently do not take replication model into account for tasks other * than modifications. When it is set to REPLICATION_MODEL_2PC, the execution * of the modification task is done with two-phase commit. Set it to * REPLICATION_MODEL_INVALID if it is not relevant for the task. * * NB: Changing this requires also changing _outTask in citus_outfuncs and _readTask * in citus_readfuncs to correctly (de)serialize this struct. */ typedef struct TaskExecution TaskExecution; typedef struct Task { CitusNode type; TaskType taskType; uint64 jobId; uint32 taskId; char *queryString; uint64 anchorShardId; /* only applies to compute tasks */ List *taskPlacementList; /* only applies to compute tasks */ List *dependedTaskList; /* only applies to compute tasks */ uint32 partitionId; uint32 upstreamTaskId; /* only applies to data fetch tasks */ ShardInterval *shardInterval; /* only applies to merge tasks */ bool assignmentConstrained; /* only applies to merge tasks */ uint64 shardId; /* only applies to shard fetch tasks */ TaskExecution *taskExecution; /* used by task tracker executor */ bool upsertQuery; /* only applies to modify tasks */ char replicationModel; /* only applies to modify tasks */ bool insertSelectQuery; List *relationShardList; List *rowValuesLists; /* rows to use when building multi-row INSERT */ } Task; /* * RangeTableFragment represents a fragment of a range table. This fragment * could be a regular shard or a merged table formed in a MapMerge job. */ typedef struct RangeTableFragment { CitusRTEKind fragmentType; void *fragmentReference; uint32 rangeTableId; } RangeTableFragment; /* * JoinSequenceNode represents a range table in an ordered sequence of tables * joined together. This representation helps build combinations of all range * table fragments during task generation. */ typedef struct JoinSequenceNode { uint32 rangeTableId; int32 joiningRangeTableId; } JoinSequenceNode; /* * MultiPlan */ typedef struct MultiPlan { CitusNode type; CmdType operation; bool hasReturning; Job *workerJob; Query *masterQuery; bool routerExecutable; List *relationIdList; /* INSERT ... SELECT via coordinator only */ Query *insertSelectSubquery; List *insertTargetList; Oid targetRelationId; /* * NULL if this a valid plan, an error description otherwise. This will * e.g. be set if SQL features are present that a planner doesn't support, * or if prepared statement parameters prevented successful planning. */ DeferredErrorMessage *planningError; } MultiPlan; /* OperatorCacheEntry contains information for each element in OperatorCache */ typedef struct OperatorCacheEntry { /* cache key consists of typeId, accessMethodId and strategyNumber */ Oid typeId; Oid accessMethodId; int16 strategyNumber; Oid operatorId; Oid operatorClassInputType; char typeType; } OperatorCacheEntry; /* Config variable managed via guc.c */ extern int TaskAssignmentPolicy; extern bool EnableUniqueJobIds; /* Function declarations for building physical plans and constructing queries */ extern MultiPlan * MultiPhysicalPlanCreate(MultiTreeRoot *multiTree, PlannerRestrictionContext * plannerRestrictionContext); extern StringInfo ShardFetchQueryString(uint64 shardId); extern Task * CreateBasicTask(uint64 jobId, uint32 taskId, TaskType taskType, char *queryString); extern OpExpr * MakeOpExpression(Var *variable, int16 strategyNumber); /* * Function declarations for building, updating constraints and simple operator * expression check. */ extern Node * BuildBaseConstraint(Var *column); extern void UpdateConstraint(Node *baseConstraint, ShardInterval *shardInterval); extern bool SimpleOpExpression(Expr *clause); extern bool OpExpressionContainsColumn(OpExpr *operatorExpression, Var *partitionColumn); /* helper functions */ extern Var * MakeInt4Column(void); extern Const * MakeInt4Constant(Datum constantValue); extern int CompareShardPlacements(const void *leftElement, const void *rightElement); extern bool ShardIntervalsOverlap(ShardInterval *firstInterval, ShardInterval *secondInterval); extern bool HasReferenceTable(Node *node); /* function declarations for Task and Task list operations */ extern bool TasksEqual(const Task *a, const Task *b); extern List * TaskListAppendUnique(List *list, Task *task); extern List * TaskListConcatUnique(List *list1, List *list2); extern bool TaskListMember(const List *taskList, const Task *task); extern List * TaskListDifference(const List *list1, const List *list2); extern List * TaskListUnion(const List *list1, const List *list2); extern List * AssignAnchorShardTaskList(List *taskList); extern List * FirstReplicaAssignTaskList(List *taskList); #endif /* MULTI_PHYSICAL_PLANNER_H */ citus-7.0.3/src/include/distributed/multi_planner.h000066400000000000000000000044621317107136600224410ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_planner.h * General Citus planner code. * * Copyright (c) 2012-2016, Citus Data, Inc. *------------------------------------------------------------------------- */ #ifndef MULTI_PLANNER_H #define MULTI_PLANNER_H #include "nodes/plannodes.h" #include "nodes/relation.h" #include "distributed/citus_nodes.h" /* values used by jobs and tasks which do not require identifiers */ #define INVALID_JOB_ID 0 #define INVALID_TASK_ID 0 typedef struct RelationRestrictionContext { bool hasDistributedRelation; bool hasLocalRelation; bool allReferenceTables; List *relationRestrictionList; } RelationRestrictionContext; typedef struct RelationRestriction { Index index; Oid relationId; bool distributedRelation; RangeTblEntry *rte; RelOptInfo *relOptInfo; PlannerInfo *plannerInfo; PlannerInfo *parentPlannerInfo; List *parentPlannerParamList; List *prunedShardIntervalList; } RelationRestriction; typedef struct JoinRestrictionContext { List *joinRestrictionList; } JoinRestrictionContext; typedef struct JoinRestriction { JoinType joinType; List *joinRestrictInfoList; PlannerInfo *plannerInfo; RelOptInfo *innerrel; RelOptInfo *outerrel; } JoinRestriction; typedef struct PlannerRestrictionContext { RelationRestrictionContext *relationRestrictionContext; JoinRestrictionContext *joinRestrictionContext; MemoryContext memoryContext; } PlannerRestrictionContext; typedef struct RelationShard { CitusNode type; Oid relationId; uint64 shardId; } RelationShard; extern PlannedStmt * multi_planner(Query *parse, int cursorOptions, ParamListInfo boundParams); extern struct MultiPlan * GetMultiPlan(CustomScan *node); extern void multi_relation_restriction_hook(PlannerInfo *root, RelOptInfo *relOptInfo, Index index, RangeTblEntry *rte); extern void multi_join_restriction_hook(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *outerrel, RelOptInfo *innerrel, JoinType jointype, JoinPathExtraData *extra); extern bool IsModifyCommand(Query *query); extern bool IsModifyMultiPlan(struct MultiPlan *multiPlan); extern RangeTblEntry * RemoteScanRangeTableEntry(List *columnNameList); extern int GetRTEIdentity(RangeTblEntry *rte); #endif /* MULTI_PLANNER_H */ citus-7.0.3/src/include/distributed/multi_progress.h000066400000000000000000000022551317107136600226440ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_progress.h * Declarations for public functions and variables used in progress * tracking functions in Citus. * * Copyright (c) 2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef MULTI_PROGRESS_H #define MULTI_PROGRESS_H #include "fmgr.h" #include "nodes/pg_list.h" #if (PG_VERSION_NUM < 100000) /* define symbols that are undefined in PostgreSQL <= 9.6 */ #define DSM_HANDLE_INVALID 0 extern Datum pg_stat_get_progress_info(PG_FUNCTION_ARGS); #endif typedef struct ProgressMonitorData { uint64 processId; int stepCount; void *steps; } ProgressMonitorData; extern ProgressMonitorData * CreateProgressMonitor(uint64 progressTypeMagicNumber, int stepCount, Size stepSize, Oid relationId); extern ProgressMonitorData * GetCurrentProgressMonitor(void); extern void FinalizeCurrentProgressMonitor(void); extern List * ProgressMonitorList(uint64 commandTypeMagicNumber, List **attachedDSMSegmentList); extern void DetachFromDSMSegments(List *dsmSegmentList); #endif /* MULTI_PROGRESS_H */ citus-7.0.3/src/include/distributed/multi_resowner.h000066400000000000000000000012701317107136600226400ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_resowner.h * Citus resource owner integration. * * Copyright (c) 2012-2016, Citus Data, Inc. *------------------------------------------------------------------------- */ #ifndef MULTI_RESOWNER_H #define MULTI_RESOWNER_H #include "utils/resowner.h" /* resowner functions for temporary job directory management */ extern void ResourceOwnerEnlargeJobDirectories(ResourceOwner owner); extern void ResourceOwnerRememberJobDirectory(ResourceOwner owner, uint64 jobId); extern void ResourceOwnerForgetJobDirectory(ResourceOwner owner, uint64 jobId); #endif /* MULTI_RESOWNER_H */ citus-7.0.3/src/include/distributed/multi_router_executor.h000066400000000000000000000030101317107136600242240ustar00rootroot00000000000000/* * multi_router_executor.h * * Function declarations used in executing distributed execution * plan. * */ #ifndef MULTI_ROUTER_EXECUTOR_H_ #define MULTI_ROUTER_EXECUTOR_H_ #include "c.h" #include "access/sdir.h" #include "distributed/multi_executor.h" #include "distributed/multi_physical_planner.h" #include "executor/execdesc.h" #include "executor/tuptable.h" #include "nodes/pg_list.h" /* * XactShardConnSet keeps track of the mapping from shard to the set of nodes * involved in multi-statement transaction-wrapped modifications of that shard. * This information is used to mark placements inactive at transaction close. */ typedef struct XactShardConnSet { uint64 shardId; /* identifier of the shard that was modified */ List *connectionEntryList; /* NodeConnectionEntry pointers to participating nodes */ } XactShardConnSet; /* Config variables managed via guc.c */ extern bool AllModificationsCommutative; extern bool EnableDeadlockPrevention; extern void CitusModifyBeginScan(CustomScanState *node, EState *estate, int eflags); extern TupleTableSlot * RouterSequentialModifyExecScan(CustomScanState *node); extern TupleTableSlot * RouterSelectExecScan(CustomScanState *node); extern TupleTableSlot * RouterMultiModifyExecScan(CustomScanState *node); extern int64 ExecuteModifyTasksWithoutResults(List *taskList); extern void ExecuteTasksSequentiallyWithoutResults(List *taskList); extern List * BuildPlacementSelectList(uint32 groupId, List *relationShardList); #endif /* MULTI_ROUTER_EXECUTOR_H_ */ citus-7.0.3/src/include/distributed/multi_router_planner.h000066400000000000000000000042471317107136600240420ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_router_planner.h * * Declarations for public functions and types related to router planning. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef MULTI_ROUTER_PLANNER_H #define MULTI_ROUTER_PLANNER_H #include "c.h" #include "distributed/errormessage.h" #include "distributed/multi_logical_planner.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_planner.h" #include "nodes/parsenodes.h" /* reserved alias name for UPSERTs */ #define CITUS_TABLE_ALIAS "citus_table_alias" extern bool EnableRouterExecution; extern MultiPlan * CreateRouterPlan(Query *originalQuery, Query *query, RelationRestrictionContext *restrictionContext); extern MultiPlan * CreateModifyPlan(Query *originalQuery, Query *query, PlannerRestrictionContext * plannerRestrictionContext); extern DeferredErrorMessage * PlanRouterQuery(Query *originalQuery, RelationRestrictionContext * restrictionContext, List **placementList, uint64 *anchorShardId, List **relationShardList, bool replacePrunedQueryWithDummy); extern List * RouterInsertTaskList(Query *query, DeferredErrorMessage **planningError); extern List * IntersectPlacementList(List *lhsPlacementList, List *rhsPlacementList); extern DeferredErrorMessage * ModifyQuerySupported(Query *queryTree, bool multiShardQuery); extern List * ShardIntervalOpExpressions(ShardInterval *shardInterval, Index rteIndex); extern RelationRestrictionContext * CopyRelationRestrictionContext( RelationRestrictionContext *oldContext); extern Oid ExtractFirstDistributedTableId(Query *query); extern RangeTblEntry * ExtractSelectRangeTableEntry(Query *query); extern RangeTblEntry * ExtractInsertRangeTableEntry(Query *query); extern RangeTblEntry * ExtractDistributedInsertValuesRTE(Query *query); extern bool IsMultiRowInsert(Query *query); extern void AddShardIntervalRestrictionToSelect(Query *subqery, ShardInterval *shardInterval); #endif /* MULTI_ROUTER_PLANNER_H */ citus-7.0.3/src/include/distributed/multi_server_executor.h000066400000000000000000000141511317107136600242220ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_server_executor.h * Type and function declarations for executing remote jobs from a backend; * the ensemble of these jobs form the distributed execution plan. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #ifndef MULTI_SERVER_EXECUTOR_H #define MULTI_SERVER_EXECUTOR_H #include "distributed/multi_physical_planner.h" #include "distributed/task_tracker.h" #include "distributed/worker_manager.h" #define MAX_TASK_EXECUTION_FAILURES 3 /* allowed failure count for one task */ #define MAX_TRACKER_FAILURE_COUNT 3 /* allowed failure count for one tracker */ #define RESERVED_FD_COUNT 64 /* file descriptors unavailable to executor */ /* copy out query results */ #define COPY_QUERY_TO_STDOUT_TEXT "COPY (%s) TO STDOUT" #define COPY_QUERY_TO_STDOUT_BINARY "COPY (%s) TO STDOUT WITH (FORMAT binary)" #define COPY_QUERY_TO_FILE_TEXT "COPY (%s) TO '%s'" #define COPY_QUERY_TO_FILE_BINARY "COPY (%s) TO '%s' WITH (FORMAT binary)" /* Task tracker executor related defines */ #define TASK_ASSIGNMENT_QUERY "SELECT task_tracker_assign_task \ ("UINT64_FORMAT ", %u, %s);" #define TASK_STATUS_QUERY "SELECT task_tracker_task_status("UINT64_FORMAT ", %u);" #define JOB_CLEANUP_QUERY "SELECT task_tracker_cleanup_job("UINT64_FORMAT ")" #define JOB_CLEANUP_TASK_ID INT_MAX #define MULTI_TASK_QUERY_INFO_OFF 0 /* do not log multi-task queries */ /* Enumeration to track one task's execution status */ typedef enum { EXEC_TASK_INVALID_FIRST = 0, EXEC_TASK_CONNECT_START = 1, EXEC_TASK_CONNECT_POLL = 2, EXEC_TASK_FAILED = 3, EXEC_FETCH_TASK_LOOP = 4, EXEC_FETCH_TASK_START = 5, EXEC_FETCH_TASK_RUNNING = 6, EXEC_COMPUTE_TASK_START = 7, EXEC_COMPUTE_TASK_RUNNING = 8, EXEC_COMPUTE_TASK_COPYING = 9, EXEC_TASK_DONE = 10, /* used for task tracker executor */ EXEC_TASK_UNASSIGNED = 11, EXEC_TASK_QUEUED = 12, EXEC_TASK_TRACKER_RETRY = 13, EXEC_TASK_TRACKER_FAILED = 14, EXEC_SOURCE_TASK_TRACKER_RETRY = 15, EXEC_SOURCE_TASK_TRACKER_FAILED = 16 } TaskExecStatus; /* Enumeration to track file transmits to the master node */ typedef enum { EXEC_TRANSMIT_INVALID_FIRST = 0, EXEC_TRANSMIT_UNASSIGNED = 1, EXEC_TRANSMIT_QUEUED = 2, EXEC_TRANSMIT_COPYING = 3, EXEC_TRANSMIT_TRACKER_RETRY = 4, EXEC_TRANSMIT_TRACKER_FAILED = 5, EXEC_TRANSMIT_DONE = 6 } TransmitExecStatus; /* Enumeration to track a task tracker's connection status */ typedef enum { TRACKER_STATUS_INVALID_FIRST = 0, TRACKER_CONNECT_START = 1, TRACKER_CONNECT_POLL = 2, TRACKER_CONNECTED = 3, TRACKER_CONNECTION_FAILED = 4 } TrackerStatus; /* Enumeration that represents distributed executor types */ typedef enum { MULTI_EXECUTOR_INVALID_FIRST = 0, MULTI_EXECUTOR_REAL_TIME = 1, MULTI_EXECUTOR_TASK_TRACKER = 2, MULTI_EXECUTOR_ROUTER = 3, MULTI_EXECUTOR_COORDINATOR_INSERT_SELECT = 4 } MultiExecutorType; /* Enumeration that represents a (dis)connect action taken */ typedef enum { CONNECT_ACTION_NONE = 0, CONNECT_ACTION_OPENED = 1, CONNECT_ACTION_CLOSED = 2 } ConnectAction; /* * TaskExecution holds state that relates to a task's execution. In the case of * the real-time executor, this struct encapsulates all information necessary to * run the task. The task tracker executor however manages its connection logic * elsewhere, and doesn't use connection related fields defined in here. */ struct TaskExecution { CitusNode type; uint64 jobId; uint32 taskId; TaskExecStatus *taskStatusArray; TransmitExecStatus *transmitStatusArray; int32 *connectionIdArray; int32 *fileDescriptorArray; TimestampTz connectStartTime; uint32 nodeCount; uint32 currentNodeIndex; uint32 querySourceNodeIndex; /* only applies to map fetch tasks */ int32 dataFetchTaskIndex; uint32 failureCount; }; /* * TrackerTaskState represents a task's execution status on a particular task * tracker. This state augments task execution state in that it is associated * with execution on a particular task tracker. */ typedef struct TrackerTaskState { uint64 jobId; uint32 taskId; TaskStatus status; StringInfo taskAssignmentQuery; } TrackerTaskState; /* * TaskTracker keeps connection and task related state for a task tracker. The * task tracker executor then uses this state to open and manage a connection to * the task tracker; and assign and check status of tasks over this connection. */ typedef struct TaskTracker { uint32 workerPort; /* node's port; part of hash table key */ char workerName[WORKER_LENGTH]; /* node's name; part of hash table key */ char *userName; /* which user to connect as */ TrackerStatus trackerStatus; int32 connectionId; uint32 connectPollCount; uint32 connectionFailureCount; uint32 trackerFailureCount; HTAB *taskStateHash; List *assignedTaskList; int32 currentTaskIndex; bool connectionBusy; TrackerTaskState *connectionBusyOnTask; List *connectionBusyOnTaskList; } TaskTracker; /* * WorkerNodeState keeps state for a worker node. The real-time executor uses this to * keep track of the number of open connections to a worker node. */ typedef struct WorkerNodeState { uint32 workerPort; char workerName[WORKER_LENGTH]; uint32 openConnectionCount; } WorkerNodeState; /* Config variable managed via guc.c */ extern int RemoteTaskCheckInterval; extern int MaxAssignTaskBatchSize; extern int TaskExecutorType; extern bool BinaryMasterCopyFormat; extern int MultiTaskQueryLogLevel; /* Function declarations for distributed execution */ extern void MultiRealTimeExecute(Job *job); extern void MultiTaskTrackerExecute(Job *job); /* Function declarations common to more than one executor */ extern MultiExecutorType JobExecutorType(MultiPlan *multiPlan); extern void RemoveJobDirectory(uint64 jobId); extern TaskExecution * InitTaskExecution(Task *task, TaskExecStatus initialStatus); extern void CleanupTaskExecution(TaskExecution *taskExecution); extern bool TaskExecutionFailed(TaskExecution *taskExecution); extern void AdjustStateForFailure(TaskExecution *taskExecution); extern int MaxMasterConnectionCount(void); #endif /* MULTI_SERVER_EXECUTOR_H */ citus-7.0.3/src/include/distributed/multi_shard_transaction.h000066400000000000000000000022271317107136600245050ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_shard_transaction.h * Type and function declarations used in performing transactions across * shard placements. * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef MULTI_SHARD_TRANSACTION_H #define MULTI_SHARD_TRANSACTION_H #include "utils/hsearch.h" #include "nodes/pg_list.h" /* ShardConnections represents a set of connections for each placement of a shard */ typedef struct ShardConnections { int64 shardId; /* list of MultiConnection structs */ List *connectionList; } ShardConnections; extern HTAB * OpenTransactionsForAllTasks(List *taskList, int connectionFlags); extern HTAB * CreateShardConnectionHash(MemoryContext memoryContext); extern ShardConnections * GetShardHashConnections(HTAB *connectionHash, int64 shardId, bool *connectionsFound); extern List * ShardConnectionList(HTAB *connectionHash); extern void ResetShardPlacementTransactionState(void); extern void UnclaimAllShardConnections(HTAB *shardConnectionHash); #endif /* MULTI_SHARD_TRANSACTION_H */ citus-7.0.3/src/include/distributed/multi_utility.h000066400000000000000000000040431317107136600225000ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * multi_utility.h * Citus utility hook and related functionality. * * Copyright (c) 2012-2016, Citus Data, Inc. *------------------------------------------------------------------------- */ #ifndef MULTI_UTILITY_H #define MULTI_UTILITY_H #include "tcop/utility.h" extern bool EnableDDLPropagation; extern bool EnableVersionChecks; /* * A DDLJob encapsulates the remote tasks and commands needed to process all or * part of a distributed DDL command. It hold the distributed relation's oid, * the original DDL command string (for MX DDL propagation), and a task list of * DDL_TASK-type Tasks to be executed. */ typedef struct DDLJob { Oid targetRelationId; /* oid of the target distributed relation */ bool concurrentIndexCmd; /* related to a CONCURRENTLY index command? */ const char *commandString; /* initial (coordinator) DDL command string */ List *taskList; /* worker DDL tasks to execute */ } DDLJob; #if (PG_VERSION_NUM < 100000) struct QueryEnvironment; /* forward-declare to appease compiler */ #endif extern void multi_ProcessUtility(PlannedStmt *pstmt, const char *queryString, ProcessUtilityContext context, ParamListInfo params, struct QueryEnvironment *queryEnv, DestReceiver *dest, char *completionTag); extern void multi_ProcessUtility9x(Node *parsetree, const char *queryString, ProcessUtilityContext context, ParamListInfo params, DestReceiver *dest, char *completionTag); extern void CitusProcessUtility(Node *node, const char *queryString, ProcessUtilityContext context, ParamListInfo params, DestReceiver *dest, char *completionTag); extern List * PlanGrantStmt(GrantStmt *grantStmt); extern void ErrorIfUnsupportedConstraint(Relation relation, char distributionMethod, Var *distributionColumn, uint32 colocationId); extern Datum master_drop_all_shards(PG_FUNCTION_ARGS); extern Datum master_modify_multiple_shards(PG_FUNCTION_ARGS); #endif /* MULTI_UTILITY_H */ citus-7.0.3/src/include/distributed/pg_dist_colocation.h000066400000000000000000000024431317107136600234300ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pg_dist_colocation.h * definition of the relation that holds the colocation information on the * cluster (pg_dist_colocation). * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef PG_DIST_COLOCATION_H #define PG_DIST_COLOCATION_H /* ---------------- * pg_dist_colocation definition. * ---------------- */ typedef struct FormData_pg_dist_colocation { uint32 colocationid; uint32 shardcount; uint32 replicationfactor; Oid distributioncolumntype; } FormData_pg_dist_colocation; /* ---------------- * Form_pg_dist_colocation corresponds to a pointer to a tuple with * the format of pg_dist_colocation relation. * ---------------- */ typedef FormData_pg_dist_colocation *Form_pg_dist_colocation; /* ---------------- * compiler constants for pg_dist_colocation * ---------------- */ #define Natts_pg_dist_colocation 4 #define Anum_pg_dist_colocation_colocationid 1 #define Anum_pg_dist_colocation_shardcount 2 #define Anum_pg_dist_colocation_replicationfactor 3 #define Anum_pg_dist_colocation_distributioncolumntype 4 #define COLOCATIONID_SEQUENCE_NAME "pg_dist_colocationid_seq" #endif /* PG_DIST_COLOCATION_H */ citus-7.0.3/src/include/distributed/pg_dist_local_group.h000066400000000000000000000017411317107136600236040ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pg_dist_local_group.h * definition of the relation that holds the local group id (pg_dist_local_group). * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef PG_DIST_LOCAL_GROUP_H #define PG_DIST_LOCAL_GROUP_H /* ---------------- * pg_dist_local_group definition. * ---------------- */ typedef struct FormData_pg_dist_local_group { int groupid; } FormData_pg_dist_local_group; /* ---------------- * FormData_pg_dist_local_group corresponds to a pointer to a tuple with * the format of pg_dist_local_group relation. * ---------------- */ typedef FormData_pg_dist_local_group *Form_pg_dist_local_group; /* ---------------- * compiler constants for pg_dist_local_group * ---------------- */ #define Natts_pg_dist_local_group 1 #define Anum_pg_dist_local_groupid 1 #endif /* PG_DIST_LOCAL_GROUP_H */ citus-7.0.3/src/include/distributed/pg_dist_node.h000066400000000000000000000023021317107136600222150ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pg_dist_node.h * definition of the relation that holds the nodes on the cluster (pg_dist_node). * * Copyright (c) 2012-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef PG_DIST_NODE_H #define PG_DIST_NODE_H /* ---------------- * compiler constants for pg_dist_node * ---------------- * * n.b. master_add_node, master_add_inactive_node, and master_activate_node all * directly return pg_dist_node tuples. This means their definitions (and * in particular their OUT parameters) must be changed whenever the definition of * pg_dist_node changes. */ #define Natts_pg_dist_node 9 #define Anum_pg_dist_node_nodeid 1 #define Anum_pg_dist_node_groupid 2 #define Anum_pg_dist_node_nodename 3 #define Anum_pg_dist_node_nodeport 4 #define Anum_pg_dist_node_noderack 5 #define Anum_pg_dist_node_hasmetadata 6 #define Anum_pg_dist_node_isactive 7 #define Anum_pg_dist_node_noderole 8 #define Anum_pg_dist_node_nodecluster 9 #define GROUPID_SEQUENCE_NAME "pg_dist_groupid_seq" #define NODEID_SEQUENCE_NAME "pg_dist_node_nodeid_seq" #endif /* PG_DIST_NODE_H */ citus-7.0.3/src/include/distributed/pg_dist_partition.h000066400000000000000000000044511317107136600233100ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pg_dist_partition.h * definition of the system "remote partition" relation (pg_dist_partition). * * This table keeps metadata on logical tables that the user requested remote * partitioning for (smaller physical tables that we partition data to are * handled in another system catalog). * * Copyright (c) 2012-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef PG_DIST_PARTITION_H #define PG_DIST_PARTITION_H /* ---------------- * pg_dist_partition definition. * ---------------- */ typedef struct FormData_pg_dist_partition { Oid logicalrelid; /* logical relation id; references pg_class oid */ char partmethod; /* partition method; see codes below */ #ifdef CATALOG_VARLEN /* variable-length fields start here */ text partkey; /* partition key expression */ uint32 colocationid; /* id of the co-location group of particular table belongs to */ char repmodel; /* replication model; see codes below */ #endif } FormData_pg_dist_partition; /* ---------------- * Form_pg_dist_partitions corresponds to a pointer to a tuple with * the format of pg_dist_partitions relation. * ---------------- */ typedef FormData_pg_dist_partition *Form_pg_dist_partition; /* ---------------- * compiler constants for pg_dist_partitions * ---------------- */ #define Natts_pg_dist_partition 5 #define Anum_pg_dist_partition_logicalrelid 1 #define Anum_pg_dist_partition_partmethod 2 #define Anum_pg_dist_partition_partkey 3 #define Anum_pg_dist_partition_colocationid 4 #define Anum_pg_dist_partition_repmodel 5 /* valid values for partmethod include append, hash, and range */ #define DISTRIBUTE_BY_APPEND 'a' #define DISTRIBUTE_BY_HASH 'h' #define DISTRIBUTE_BY_RANGE 'r' #define DISTRIBUTE_BY_NONE 'n' #define REDISTRIBUTE_BY_HASH 'x' /* * Valid values for repmodel are 'c' for coordinator, 's' for streaming * and 't' for two-phase-commit. We also use an invalid replication model * ('i') for distinguishing uninitialized variables where necessary. */ #define REPLICATION_MODEL_COORDINATOR 'c' #define REPLICATION_MODEL_STREAMING 's' #define REPLICATION_MODEL_2PC 't' #define REPLICATION_MODEL_INVALID 'i' #endif /* PG_DIST_PARTITION_H */ citus-7.0.3/src/include/distributed/pg_dist_placement.h000066400000000000000000000034241317107136600232460ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pg_dist_placement.h * definition of the "server" relation (pg_dist_placement). * * This table keeps information on remote shards and their whereabouts on the * master node. The table's contents are updated and used as follows: (i) the * worker nodes send periodic reports about the shards they contain, and (ii) * the master reconciles these shard reports, and determines outdated, under- * and over-replicated shards. * * Copyright (c) 2012-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef PG_DIST_PLACEMENT_H #define PG_DIST_PLACEMENT_H /* ---------------- * pg_dist_placement definition. * ---------------- */ typedef struct FormData_pg_dist_placement { int64 placementid; /* global placementId on remote node */ int64 shardid; /* global shardId on remote node */ int32 shardstate; /* shard state on remote node; see RelayFileState */ int64 shardlength; /* shard length on remote node; stored as bigint */ int32 groupid; /* the group the shard is placed on */ } FormData_pg_dist_placement; /* ---------------- * Form_pg_dist_placement corresponds to a pointer to a tuple with * the format of pg_dist_placement relation. * ---------------- */ typedef FormData_pg_dist_placement *Form_pg_dist_placement; /* ---------------- * compiler constants for pg_dist_placement * ---------------- */ #define Natts_pg_dist_placement 5 #define Anum_pg_dist_placement_placementid 1 #define Anum_pg_dist_placement_shardid 2 #define Anum_pg_dist_placement_shardstate 3 #define Anum_pg_dist_placement_shardlength 4 #define Anum_pg_dist_placement_groupid 5 #endif /* PG_DIST_PLACEMENT_H */ citus-7.0.3/src/include/distributed/pg_dist_shard.h000066400000000000000000000041741317107136600224020ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pg_dist_shard.h * definition of the "shard" relation (pg_dist_shard). * * This table maps logical tables to their remote partitions (from this point * on, we use the terms remote partition and shard interchangeably). All changes * concerning the creation, deletion, merging, and split of remote partitions * reference this table. * * Copyright (c) 2012-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef PG_DIST_SHARD_H #define PG_DIST_SHARD_H /* ---------------- * pg_dist_shard definition. * ---------------- */ typedef struct FormData_pg_dist_shard { Oid logicalrelid; /* logical relation id; references pg_class oid */ int64 shardid; /* global shardId representing remote partition */ char shardstorage; /* shard storage type; see codes below */ #ifdef CATALOG_VARLEN /* variable-length fields start here */ text shardalias_DROPPED; /* dropped column, not in use */ text shardminvalue; /* partition key's minimum value in shard */ text shardmaxvalue; /* partition key's maximum value in shard */ #endif } FormData_pg_dist_shard; /* ---------------- * Form_pg_dist_shards corresponds to a pointer to a tuple with * the format of pg_dist_shards relation. * ---------------- */ typedef FormData_pg_dist_shard *Form_pg_dist_shard; /* ---------------- * compiler constants for pg_dist_shards * ---------------- */ #define Natts_pg_dist_shard 6 #define Anum_pg_dist_shard_logicalrelid 1 #define Anum_pg_dist_shard_shardid 2 #define Anum_pg_dist_shard_shardstorage 3 #define Anum_pg_dist_shard_shardalias_DROPPED 4 #define Anum_pg_dist_shard_shardminvalue 5 #define Anum_pg_dist_shard_shardmaxvalue 6 /* * Valid values for shard storage types include relay file, foreign table, * (standard) table and columnar table. Relay file types are currently unused. */ #define SHARD_STORAGE_RELAY 'r' #define SHARD_STORAGE_FOREIGN 'f' #define SHARD_STORAGE_TABLE 't' #define SHARD_STORAGE_COLUMNAR 'c' #endif /* PG_DIST_SHARD_H */ citus-7.0.3/src/include/distributed/pg_dist_transaction.h000066400000000000000000000021621317107136600236210ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * pg_dist_transaction.h * definition of the "transaction" relation (pg_dist_transaction). * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef PG_DIST_TRANSACTION_H #define PG_DIST_TRANSACTION_H /* ---------------- * pg_dist_transaction definition. * ---------------- */ typedef struct FormData_pg_dist_transaction { int32 groupid; /* id of the replication group */ text gid; /* global transaction identifier */ } FormData_pg_dist_transaction; /* ---------------- * Form_pg_dist_transactions corresponds to a pointer to a tuple with * the format of pg_dist_transactions relation. * ---------------- */ typedef FormData_pg_dist_transaction *Form_pg_dist_transaction; /* ---------------- * compiler constants for pg_dist_transaction * ---------------- */ #define Natts_pg_dist_transaction 2 #define Anum_pg_dist_transaction_groupid 1 #define Anum_pg_dist_transaction_gid 2 #endif /* PG_DIST_TRANSACTION_H */ citus-7.0.3/src/include/distributed/placement_connection.h000066400000000000000000000037031317107136600237540ustar00rootroot00000000000000/*------------------------------------------------------------------------- * placement_connection.h * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef PLACEMENT_CONNECTION_H #define PLACEMENT_CONNECTION_H #include "distributed/connection_management.h" /* forward declare, to avoid dependency on ShardPlacement definition */ struct ShardPlacement; /* represents the way in which a placement is accessed */ typedef enum ShardPlacementAccessType { /* read from placement */ PLACEMENT_ACCESS_SELECT, /* modify rows in placement */ PLACEMENT_ACCESS_DML, /* modify placement schema */ PLACEMENT_ACCESS_DDL } ShardPlacementAccessType; /* represents access to a placement */ typedef struct ShardPlacementAccess { /* placement that is accessed */ struct ShardPlacement *placement; /* the way in which the placement is accessed */ ShardPlacementAccessType accessType; } ShardPlacementAccess; extern MultiConnection * GetPlacementConnection(uint32 flags, struct ShardPlacement *placement, const char *userName); extern MultiConnection * StartPlacementConnection(uint32 flags, struct ShardPlacement *placement, const char *userName); extern MultiConnection * GetPlacementListConnection(uint32 flags, List *placementAccessList, const char *userName); extern MultiConnection * StartPlacementListConnection(uint32 flags, List *placementAccessList, const char *userName); extern void ResetPlacementConnectionManagement(void); extern void MarkFailedShardPlacements(void); extern void PostCommitMarkFailedShardPlacements(bool using2PC); extern void CloseShardPlacementAssociation(struct MultiConnection *connection); extern void ResetShardPlacementAssociation(struct MultiConnection *connection); extern void InitPlacementConnectionManagement(void); #endif /* PLACEMENT_CONNECTION_H */ citus-7.0.3/src/include/distributed/reference_table_utils.h000066400000000000000000000013531317107136600241110ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * reference_table_utils.h * * Declarations for public utility functions related to reference tables. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef REFERENCE_TABLE_UTILS_H_ #define REFERENCE_TABLE_UTILS_H_ extern uint32 CreateReferenceTableColocationId(void); extern void ReplicateAllReferenceTablesToNode(char *nodeName, int nodePort); extern void DeleteAllReferenceTablePlacementsFromNodeGroup(uint32 groupId); extern List * ReferenceTableOidList(void); extern int CompareOids(const void *leftElement, const void *rightElement); #endif /* REFERENCE_TABLE_UTILS_H_ */ citus-7.0.3/src/include/distributed/relation_restriction_equivalence.h000066400000000000000000000014051317107136600264050ustar00rootroot00000000000000/* * relation_restriction_equivalence.h * * This file contains functions helper functions for planning * queries with colocated tables and subqueries. * * Copyright (c) 2017-2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef RELATION_RESTRICTION_EQUIVALENCE_H #define RELATION_RESTRICTION_EQUIVALENCE_H #include "distributed/multi_planner.h" extern bool ContainsUnionSubquery(Query *queryTree); extern bool RestrictionEquivalenceForPartitionKeys(PlannerRestrictionContext * plannerRestrictionContext); extern bool SafeToPushdownUnionSubquery(RelationRestrictionContext *restrictionContext); extern List * RelationIdList(Query *query); #endif /* RELATION_RESTRICTION_EQUIVALENCE_H */ citus-7.0.3/src/include/distributed/relay_utility.h000066400000000000000000000026601317107136600224650ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * relay_utility.h * * Header and type declarations that extend relation, index and constraint names * with the appropriate shard identifiers. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id:$ * *------------------------------------------------------------------------- */ #ifndef RELAY_UTILITY_H #define RELAY_UTILITY_H #include "fmgr.h" #include "lib/stringinfo.h" #include "nodes/nodes.h" /* Shard name and identifier related defines */ #define SHARD_NAME_SEPARATOR '_' #define INVALID_SHARD_ID 0 #define INVALID_PLACEMENT_ID 0 /* * RelayFileState represents last known states of shards on a given node. We * currently only have shards in finalized or cached state; and set this state * after shards are sucessfully staged or cached. */ typedef enum { FILE_INVALID_FIRST = 0, FILE_FINALIZED = 1, FILE_CACHED = 2, FILE_INACTIVE = 3, FILE_TO_DELETE = 4 } RelayFileState; /* Function declarations to extend names in DDL commands */ extern void RelayEventExtendNames(Node *parseTree, char *schemaName, uint64 shardId); extern void RelayEventExtendNamesForInterShardCommands(Node *parseTree, uint64 leftShardId, char *leftShardSchemaName, uint64 rightShardId, char *rightShardSchemaName); extern void AppendShardIdToName(char **name, uint64 shardId); #endif /* RELAY_UTILITY_H */ citus-7.0.3/src/include/distributed/remote_commands.h000066400000000000000000000046041317107136600227420ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * remote_commands.h * Helpers to execute commands on remote nodes, over libpq. * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef REMOTE_COMMAND_H #define REMOTE_COMMAND_H #include "distributed/connection_management.h" /* errors which ExecuteRemoteCommand might return */ #define QUERY_SEND_FAILED 1 #define RESPONSE_NOT_OKAY 2 struct pg_result; /* target of the PGresult typedef */ /* GUC, determining whether statements sent to remote nodes are logged */ extern bool LogRemoteCommands; /* simple helpers */ extern bool IsResponseOK(struct pg_result *result); extern void ForgetResults(MultiConnection *connection); extern bool ClearResults(MultiConnection *connection, bool raiseErrors); extern bool NonblockingForgetResults(MultiConnection *connection); extern bool SqlStateMatchesCategory(char *sqlStateString, int category); /* report errors & warnings */ extern void ReportConnectionError(MultiConnection *connection, int elevel); extern void ReportResultError(MultiConnection *connection, struct pg_result *result, int elevel); extern char * pchomp(const char *in); extern void LogRemoteCommand(MultiConnection *connection, const char *command); /* wrappers around libpq functions, with command logging support */ extern void ExecuteCriticalRemoteCommand(MultiConnection *connection, const char *command); extern int ExecuteOptionalRemoteCommand(MultiConnection *connection, const char *command, struct pg_result **result); extern int SendRemoteCommand(MultiConnection *connection, const char *command); extern int SendRemoteCommandParams(MultiConnection *connection, const char *command, int parameterCount, const Oid *parameterTypes, const char *const *parameterValues); extern List * ReadFirstColumnAsText(struct pg_result *queryResult); extern struct pg_result * GetRemoteCommandResult(MultiConnection *connection, bool raiseInterrupts); extern bool PutRemoteCopyData(MultiConnection *connection, const char *buffer, int nbytes); extern bool PutRemoteCopyEnd(MultiConnection *connection, const char *errormsg); /* waiting for multiple command results */ extern void WaitForAllConnections(List *connectionList, bool raiseInterrupts); #endif /* REMOTE_COMMAND_H */ citus-7.0.3/src/include/distributed/remote_transaction.h000066400000000000000000000102011317107136600234540ustar00rootroot00000000000000/*------------------------------------------------------------------------- * remote_transaction.h * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef REMOTE_TRANSACTION_H #define REMOTE_TRANSACTION_H #include "nodes/pg_list.h" #include "lib/ilist.h" /* forward declare, to avoid recursive includes */ struct MultiConnection; /* * Enum that defines different remote transaction states, of a single remote * transaction. */ typedef enum { /* no transaction active */ REMOTE_TRANS_INVALID = 0, /* transaction start */ REMOTE_TRANS_STARTING, REMOTE_TRANS_STARTED, /* 2pc prepare */ REMOTE_TRANS_PREPARING, REMOTE_TRANS_PREPARED, /* transaction abort */ REMOTE_TRANS_1PC_ABORTING, REMOTE_TRANS_2PC_ABORTING, REMOTE_TRANS_ABORTED, /* transaction commit */ REMOTE_TRANS_1PC_COMMITTING, REMOTE_TRANS_2PC_COMMITTING, REMOTE_TRANS_COMMITTED } RemoteTransactionState; /* * Transaction state associated associated with a single MultiConnection. */ typedef struct RemoteTransaction { /* what state is the remote side transaction in */ RemoteTransactionState transactionState; /* failures on this connection should abort entire coordinated transaction */ bool transactionCritical; /* failed in current transaction */ bool transactionFailed; /* * Id of last savepoint that successfully began before transaction failure. * Since savepoint ids are assigned incrementally, rolling back to any savepoint * with id equal to or less than this id recovers the transaction from failures. */ SubTransactionId lastSuccessfulSubXact; /* Id of last savepoint queued before first query of transaction */ SubTransactionId lastQueuedSubXact; /* waiting for the result of a recovering ROLLBACK TO SAVEPOINT command */ bool transactionRecovering; /* 2PC transaction name currently associated with connection */ char preparedName[NAMEDATALEN]; } RemoteTransaction; /* change an individual remote transaction's state */ extern void StartRemoteTransactionBegin(struct MultiConnection *connection); extern void FinishRemoteTransactionBegin(struct MultiConnection *connection); extern void RemoteTransactionBegin(struct MultiConnection *connection); extern void RemoteTransactionListBegin(List *connectionList); extern void StartRemoteTransactionPrepare(struct MultiConnection *connection); extern void FinishRemoteTransactionPrepare(struct MultiConnection *connection); extern void RemoteTransactionPrepare(struct MultiConnection *connection); extern void StartRemoteTransactionCommit(struct MultiConnection *connection); extern void FinishRemoteTransactionCommit(struct MultiConnection *connection); extern void RemoteTransactionCommit(struct MultiConnection *connection); extern void StartRemoteTransactionAbort(struct MultiConnection *connection); extern void FinishRemoteTransactionAbort(struct MultiConnection *connection); extern void RemoteTransactionAbort(struct MultiConnection *connection); /* start transaction if necessary */ extern void RemoteTransactionBeginIfNecessary(struct MultiConnection *connection); extern void RemoteTransactionsBeginIfNecessary(List *connectionList); /* other public functionality */ extern void MarkRemoteTransactionFailed(struct MultiConnection *connection, bool allowErrorPromotion); extern void MarkRemoteTransactionCritical(struct MultiConnection *connection); /* * The following functions should all only be called by connection / * transaction managment code. */ extern void CloseRemoteTransaction(struct MultiConnection *connection); extern void ResetRemoteTransaction(struct MultiConnection *connection); /* perform handling for all in-progress transactions */ extern void CoordinatedRemoteTransactionsPrepare(void); extern void CoordinatedRemoteTransactionsCommit(void); extern void CoordinatedRemoteTransactionsAbort(void); /* remote savepoint commands */ extern void CoordinatedRemoteTransactionsSavepointBegin(SubTransactionId subId); extern void CoordinatedRemoteTransactionsSavepointRelease(SubTransactionId subId); extern void CoordinatedRemoteTransactionsSavepointRollback(SubTransactionId subId); #endif /* REMOTE_TRANSACTION_H */ citus-7.0.3/src/include/distributed/resource_lock.h000066400000000000000000000060611317107136600224240ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * resource_lock.h * Locking Infrastructure for Citus. * * Copyright (c) 2012-2016, Citus Data, Inc. *------------------------------------------------------------------------- */ #ifndef RESOURCE_LOCK_H #define RESOURCE_LOCK_H #include "postgres.h" /* IWYU pragma: keep */ #include "c.h" #include "nodes/pg_list.h" #include "storage/lock.h" /* * Postgres' advisory locks use 'field4' to discern between different kind of * advisory locks. Only 1 and 2 are used allowing us to define non-conflicting * lock methods. * * In case postgres starts to use additional values, Citus's values * will have to be changed. That just requires re-compiling and a restart. */ typedef enum AdvisoryLocktagClass { /* values defined in postgres' lockfuncs.c */ ADV_LOCKTAG_CLASS_INT64 = 1, ADV_LOCKTAG_CLASS_INT32 = 2, /* Citus lock types */ ADV_LOCKTAG_CLASS_CITUS_SHARD_METADATA = 4, ADV_LOCKTAG_CLASS_CITUS_SHARD = 5, ADV_LOCKTAG_CLASS_CITUS_JOB = 6 } AdvisoryLocktagClass; /* reuse advisory lock, but with different, unused field 4 (4)*/ #define SET_LOCKTAG_SHARD_METADATA_RESOURCE(tag, db, shardid) \ SET_LOCKTAG_ADVISORY(tag, \ db, \ (uint32) ((shardid) >> 32), \ (uint32) (shardid), \ ADV_LOCKTAG_CLASS_CITUS_SHARD_METADATA) /* reuse advisory lock, but with different, unused field 4 (5)*/ #define SET_LOCKTAG_SHARD_RESOURCE(tag, db, shardid) \ SET_LOCKTAG_ADVISORY(tag, \ db, \ (uint32) ((shardid) >> 32), \ (uint32) (shardid), \ ADV_LOCKTAG_CLASS_CITUS_SHARD) /* reuse advisory lock, but with different, unused field 4 (6) */ #define SET_LOCKTAG_JOB_RESOURCE(tag, db, jobid) \ SET_LOCKTAG_ADVISORY(tag, \ db, \ (uint32) ((jobid) >> 32), \ (uint32) (jobid), \ ADV_LOCKTAG_CLASS_CITUS_JOB) /* Lock shard/relation metadata for safe modifications */ extern void LockShardDistributionMetadata(int64 shardId, LOCKMODE lockMode); extern bool TryLockShardDistributionMetadata(int64 shardId, LOCKMODE lockMode); /* Lock shard data, for DML commands or remote fetches */ extern void LockShardResource(uint64 shardId, LOCKMODE lockmode); extern void UnlockShardResource(uint64 shardId, LOCKMODE lockmode); /* Lock a job schema or partition task directory */ extern void LockJobResource(uint64 jobId, LOCKMODE lockmode); extern void UnlockJobResource(uint64 jobId, LOCKMODE lockmode); /* Lock multiple shards for safe modification */ extern void LockShardListMetadata(List *shardIntervalList, LOCKMODE lockMode); extern void LockShardListResources(List *shardIntervalList, LOCKMODE lockMode); extern void LockRelationShardResources(List *relationShardList, LOCKMODE lockMode); /* Lock partitions of partitioned table */ extern void LockPartitionsInRelationList(List *relationIdList, LOCKMODE lockmode); extern void LockPartitionRelations(Oid relationId, LOCKMODE lockMode); /* Lock parent table's colocated shard resource */ extern void LockParentShardResourceIfPartition(uint64 shardId, LOCKMODE lockMode); #endif /* RESOURCE_LOCK_H */ citus-7.0.3/src/include/distributed/shard_pruning.h000066400000000000000000000011751317107136600224310ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * shard_pruning.h * Shard pruning infrastructure. * * Copyright (c) 2014-2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef SHARD_PRUNING_H_ #define SHARD_PRUNING_H_ #include "distributed/metadata_cache.h" #include "nodes/primnodes.h" #define INVALID_SHARD_INDEX -1 /* Function declarations for shard pruning */ extern List * PruneShards(Oid relationId, Index rangeTableId, List *whereClauseList); extern bool ContainsFalseClause(List *whereClauseList); #endif /* SHARD_PRUNING_H_ */ citus-7.0.3/src/include/distributed/shardinterval_utils.h000066400000000000000000000027351317107136600236570ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * shardinterval_utils.h * * Declarations for public utility functions related to shard intervals. * * Copyright (c) 2014-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef SHARDINTERVAL_UTILS_H_ #define SHARDINTERVAL_UTILS_H_ #include "distributed/master_metadata_utility.h" #include "distributed/metadata_cache.h" #include "nodes/primnodes.h" #define INVALID_SHARD_INDEX -1 /* OperatorCacheEntry contains information for each element in OperatorCache */ typedef struct ShardIntervalCompareFunctionCacheEntry { Var *partitionColumn; char partitionMethod; FmgrInfo *functionInfo; } ShardIntervalCompareFunctionCacheEntry; extern ShardInterval * LowestShardIntervalById(List *shardIntervalList); extern int CompareShardIntervals(const void *leftElement, const void *rightElement, FmgrInfo *typeCompareFunction); extern int CompareShardIntervalsById(const void *leftElement, const void *rightElement); extern int CompareRelationShards(const void *leftElement, const void *rightElement); extern int ShardIndex(ShardInterval *shardInterval); extern ShardInterval * FindShardInterval(Datum partitionColumnValue, DistTableCacheEntry *cacheEntry); extern int FindShardIntervalIndex(Datum searchedValue, DistTableCacheEntry *cacheEntry); extern bool SingleReplicatedTable(Oid relationId); #endif /* SHARDINTERVAL_UTILS_H_ */ citus-7.0.3/src/include/distributed/shared_library_init.h000066400000000000000000000006701317107136600236020ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * shared_library_init.h * Functionality related to the initialization of the Citus extension. * * Copyright (c) 2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef SHARED_LIBRARY_INIT_H #define SHARED_LIBRARY_INIT_H extern void StartupCitusBackend(void); #endif /* SHARED_LIBRARY_INIT_H */ citus-7.0.3/src/include/distributed/task_tracker.h000066400000000000000000000112701317107136600222400ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * task_tracker.h * * Header and type declarations for coordinating execution of tasks and data * source transfers on worker nodes. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #ifndef TASK_TRACKER_H #define TASK_TRACKER_H #include "storage/lwlock.h" #include "utils/hsearch.h" #define HIGH_PRIORITY_TASK_TIME 1 /* assignment time for high priority tasks */ #define RESERVED_JOB_ID 1 /* reserved for cleanup and shutdown tasks */ #define SHUTDOWN_MARKER_TASK_ID UINT_MAX /* used to identify task tracker shutdown */ #define MAX_TASK_FAILURE_COUNT 2 /* allowed failure count for one task */ #define LOCAL_HOST_NAME "localhost" /* connect to local backends using this name */ #define TASK_CALL_STRING_SIZE 12288 /* max length of task call string */ #define TEMPLATE0_NAME "template0" /* skip job schema cleanup for template0 */ #define JOB_SCHEMA_CLEANUP "SELECT worker_cleanup_job_schema_cache()" /* * TaskStatus represents execution status of worker tasks. The assigned and * cancel requested statuses are set by the master node; all other statuses are * assigned by the task tracker as the worker task makes progress. */ typedef enum { TASK_STATUS_INVALID_FIRST = 0, TASK_ASSIGNED = 1, /* master node and task tracker */ TASK_SCHEDULED = 2, TASK_RUNNING = 3, TASK_FAILED = 4, TASK_PERMANENTLY_FAILED = 5, TASK_SUCCEEDED = 6, TASK_CANCEL_REQUESTED = 7, /* master node only */ TASK_CANCELED = 8, TASK_TO_REMOVE = 9, /* * The master node's executor uses the following statuses to fully represent * the execution status of worker tasks, as they are perceived by the master * node. These statuses in fact don't belong with the task tracker. */ TASK_CLIENT_SIDE_QUEUED = 10, TASK_CLIENT_SIDE_ASSIGN_FAILED = 11, TASK_CLIENT_SIDE_STATUS_FAILED = 12, TASK_FILE_TRANSMIT_QUEUED = 13, TASK_CLIENT_SIDE_TRANSMIT_FAILED = 14, /* * Add new task status types above this comment. Existing types, except for * TASK_STATUS_LAST, should never have their numbers changed. */ TASK_STATUS_LAST } TaskStatus; /* * WorkerTask keeps shared memory state for tasks. At a high level, each worker * task holds onto three different types of state: (a) state assigned by the * master node, (b) state initialized by the protocol process at task assignment * time, and (c) state internal to the task tracker process that changes as the * task make progress. * * Since taskCallString is dynamically sized use WORKER_TASK_SIZE instead of * sizeof(WorkerTask). Use WORKER_TASK_AT to reference an item in WorkerTask array. */ typedef struct WorkerTask { uint64 jobId; /* job id (upper 32-bits reserved); part of hash table key */ uint32 taskId; /* task id; part of hash table key */ uint32 assignedAt; /* task assignment time in epoch seconds */ TaskStatus taskStatus; /* task's current execution status */ char databaseName[NAMEDATALEN]; /* name to use for local backend connection */ char userName[NAMEDATALEN]; /* user to use for local backend connection */ int32 connectionId; /* connection id to local backend */ uint32 failureCount; /* number of task failures */ char taskCallString[FLEXIBLE_ARRAY_MEMBER]; /* query or function call string */ } WorkerTask; #define WORKER_TASK_SIZE (offsetof(WorkerTask, taskCallString) + MaxTaskStringSize) #define WORKER_TASK_AT(workerTasks, index) \ ((WorkerTask *) (((char *) (workerTasks)) + (index) * WORKER_TASK_SIZE)) /* * WorkerTasksControlData contains task tracker state shared between * processes. */ typedef struct WorkerTasksSharedStateData { /* Hash table shared by the task tracker and task tracker protocol functions */ HTAB *taskHash; /* Lock protecting workerNodesHash */ int taskHashTrancheId; #if (PG_VERSION_NUM >= 100000) char *taskHashTrancheName; #else LWLockTranche taskHashLockTranche; #endif LWLock taskHashLock; } WorkerTasksSharedStateData; /* Config variables managed via guc.c */ extern int TaskTrackerDelay; extern int MaxTrackedTasksPerNode; extern int MaxRunningTasksPerNode; extern int MaxTaskStringSize; /* State shared by the task tracker and task tracker protocol functions */ extern WorkerTasksSharedStateData *WorkerTasksSharedState; /* Entry point */ extern void TaskTrackerMain(Datum main_arg); /* Function declarations local to the worker module */ extern WorkerTask * WorkerTasksHashEnter(uint64 jobId, uint32 taskId); extern WorkerTask * WorkerTasksHashFind(uint64 jobId, uint32 taskId); /* Function declarations for starting up and running the task tracker */ extern void TaskTrackerRegister(void); #endif /* TASK_TRACKER_H */ citus-7.0.3/src/include/distributed/task_tracker_protocol.h000066400000000000000000000014361317107136600241640ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * task_tracker_protocol.h * * Header and type declarations for assigning tasks to and removing tasks from * the task tracker running on this node. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #ifndef TASK_TRACKER_PROTOCOL_H #define TASK_TRACKER_PROTOCOL_H #include "fmgr.h" /* Function declarations for distributed task management */ extern Datum task_tracker_assign_task(PG_FUNCTION_ARGS); extern Datum task_tracker_update_data_fetch_task(PG_FUNCTION_ARGS); extern Datum task_tracker_task_status(PG_FUNCTION_ARGS); extern Datum task_tracker_cleanup_job(PG_FUNCTION_ARGS); #endif /* TASK_TRACKER_PROTOCOL_H */ citus-7.0.3/src/include/distributed/transaction_identifier.h000066400000000000000000000026761317107136600243240ustar00rootroot00000000000000/* * transaction_identifier.h * * Data structure for distributed transaction id and related function * declarations. * * Copyright (c) 2017, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef TRANSACTION_IDENTIFIER_H #define TRANSACTION_IDENTIFIER_H #include "datatype/timestamp.h" /* * Citus identifies a distributed transaction with a triplet consisting of * * - initiatorNodeIdentifier: A unique identifier of the node that initiated * the distributed transaction * - transactionOriginator: Set to true only for the transactions initialized on * the coordinator. This is only useful for MX in order to distinguish the transaction * that started the distributed transaction on the coordinator where we could * have the same transactions' worker queries on the same node * - transactionNumber: A locally unique identifier assigned for the distributed * transaction on the node that initiated the distributed transaction * - timestamp: The current timestamp of distributed transaction initiation * */ typedef struct DistributedTransactionId { int initiatorNodeIdentifier; bool transactionOriginator; uint64 transactionNumber; TimestampTz timestamp; } DistributedTransactionId; extern DistributedTransactionId * GetCurrentDistributedTransactionId(void); extern uint64 CurrentDistributedTransactionNumber(void); #endif /* TRANSACTION_IDENTIFIER_H */ citus-7.0.3/src/include/distributed/transaction_management.h000066400000000000000000000046651317107136600243160ustar00rootroot00000000000000/*------------------------------------------------------------------------- * transaction_management.h * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef TRANSACTION_MANAGMENT_H #define TRANSACTION_MANAGMENT_H #include "lib/ilist.h" #include "nodes/pg_list.h" /* describes what kind of modifications have occurred in the current transaction */ typedef enum { XACT_MODIFICATION_INVALID = 0, /* placeholder initial value */ XACT_MODIFICATION_NONE, /* no modifications have taken place */ XACT_MODIFICATION_DATA, /* data modifications (DML) have occurred */ XACT_MODIFICATION_MULTI_SHARD /* multi-shard modifications have occurred */ } XactModificationType; /* * Enum defining the state of a coordinated (i.e. a transaction potentially * spanning several nodes). */ typedef enum CoordinatedTransactionState { /* no coordinated transaction in progress, no connections established */ COORD_TRANS_NONE, /* no coordinated transaction in progress, but connections established */ COORD_TRANS_IDLE, /* coordinated transaction in progress */ COORD_TRANS_STARTED, /* coordinated transaction prepared on all workers */ COORD_TRANS_PREPARED, /* coordinated transaction committed */ COORD_TRANS_COMMITTED } CoordinatedTransactionState; /* Enumeration that defines the different commit protocols available */ typedef enum { COMMIT_PROTOCOL_BARE = 0, COMMIT_PROTOCOL_1PC = 1, COMMIT_PROTOCOL_2PC = 2 } CommitProtocolType; /* config variable managed via guc.c */ extern int MultiShardCommitProtocol; /* state needed to restore multi-shard commit protocol during VACUUM/ANALYZE */ extern int SavedMultiShardCommitProtocol; /* state needed to prevent new connections during modifying transactions */ extern XactModificationType XactModificationLevel; extern CoordinatedTransactionState CurrentCoordinatedTransactionState; /* list of connections that are part of the current coordinated transaction */ extern dlist_head InProgressTransactions; /* * Coordinated transaction management. */ extern void BeginCoordinatedTransaction(void); extern void BeginOrContinueCoordinatedTransaction(void); extern bool InCoordinatedTransaction(void); extern void CoordinatedTransactionUse2PC(void); /* initialization function(s) */ extern void InitializeTransactionManagement(void); /* other functions */ extern List * ActiveSubXacts(void); #endif /* TRANSACTION_MANAGMENT_H */ citus-7.0.3/src/include/distributed/transaction_recovery.h000066400000000000000000000010221317107136600240200ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * transaction_recovery.h * Type and function declarations used in recovering 2PC transactions. * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef TRANSACTION_RECOVERY_H #define TRANSACTION_RECOVERY_H /* Functions declarations for worker transactions */ extern void LogTransactionRecord(int groupId, char *transactionName); #endif /* TRANSACTION_RECOVERY_H */ citus-7.0.3/src/include/distributed/transmit.h000066400000000000000000000012651317107136600214270ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * transmit.h * Shared declarations for transmitting files between remote nodes. * * Copyright (c) 2012-2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef TRANSMIT_H #define TRANSMIT_H #include "lib/stringinfo.h" /* Function declarations for transmitting files between two nodes */ extern void RedirectCopyDataToRegularFile(const char *filename); extern void SendRegularFile(const char *filename); /* Function declaration local to commands and worker modules */ extern void FreeStringInfo(StringInfo stringInfo); #endif /* TRANSMIT_H */ citus-7.0.3/src/include/distributed/worker_manager.h000066400000000000000000000062201317107136600225650ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * worker_manager.h * Header and type declarations for managing worker nodes and for placing * shards on worker nodes in an intelligent manner. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #ifndef WORKER_MANAGER_H #define WORKER_MANAGER_H #include "nodes/pg_list.h" /* Worker nodeName's, nodePort's, and nodeCluster's maximum length */ #define WORKER_LENGTH 256 /* Maximum length of worker port number (represented as string) */ #define MAX_PORT_LENGTH 10 /* default filename for citus.worker_list_file */ #define WORKER_LIST_FILENAME "pg_worker_list.conf" /* Implementation specific definitions used in finding worker nodes */ #define WORKER_RACK_TRIES 5 #define WORKER_DEFAULT_RACK "default" #define WORKER_DEFAULT_CLUSTER "default" /* * In memory representation of pg_dist_node table elements. The elements are hold in * WorkerNodeHash table. */ typedef struct WorkerNode { uint32 nodeId; /* node's unique id, key of the hash table */ uint32 workerPort; /* node's port */ char workerName[WORKER_LENGTH]; /* node's name */ uint32 groupId; /* node's groupId; same for the nodes that are in the same group */ char workerRack[WORKER_LENGTH]; /* node's network location */ bool hasMetadata; /* node gets metadata changes */ bool isActive; /* node's state */ Oid nodeRole; /* the node's role in its group */ char nodeCluster[NAMEDATALEN]; /* the cluster the node is a part of */ } WorkerNode; /* Config variables managed via guc.c */ extern int MaxWorkerNodesTracked; extern char *WorkerListFileName; extern char *CurrentCluster; /* Function declarations for finding worker nodes to place shards on */ extern WorkerNode * WorkerGetRandomCandidateNode(List *currentNodeList); extern WorkerNode * WorkerGetRoundRobinCandidateNode(List *workerNodeList, uint64 shardId, uint32 placementIndex); extern WorkerNode * WorkerGetLocalFirstCandidateNode(List *currentNodeList); extern uint32 ActivePrimaryNodeCount(void); extern List * ActivePrimaryNodeList(void); extern uint32 ActiveReadableNodeCount(void); extern List * ActiveReadableNodeList(void); extern WorkerNode * FindWorkerNode(char *nodeName, int32 nodePort); extern WorkerNode * FindWorkerNodeAnyCluster(char *nodeName, int32 nodePort); extern List * ReadWorkerNodes(bool includeNodesFromOtherClusters); extern void EnsureCoordinator(void); extern uint32 GroupForNode(char *nodeName, int32 nodePorT); extern WorkerNode * PrimaryNodeForGroup(uint32 groupId, bool *groupContainsNodes); extern bool WorkerNodeIsPrimary(WorkerNode *worker); extern bool WorkerNodeIsSecondary(WorkerNode *worker); extern bool WorkerNodeIsReadable(WorkerNode *worker); /* Function declarations for worker node utilities */ extern int CompareWorkerNodes(const void *leftElement, const void *rightElement); extern int WorkerNodeCompare(const void *lhsKey, const void *rhsKey, Size keySize); #endif /* WORKER_MANAGER_H */ citus-7.0.3/src/include/distributed/worker_protocol.h000066400000000000000000000144451317107136600230240ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * worker_protocol.h * Header for shared declarations that are used for caching remote resources * on worker nodes, and also for applying distributed execution primitives. * * Copyright (c) 2012-2016, Citus Data, Inc. * * $Id$ * *------------------------------------------------------------------------- */ #ifndef WORKER_PROTOCOL_H #define WORKER_PROTOCOL_H #include "fmgr.h" #include "lib/stringinfo.h" #include "nodes/parsenodes.h" #include "storage/fd.h" #include "utils/array.h" /* Number of rows to prefetch when reading data with a cursor */ #define ROW_PREFETCH_COUNT 50 /* Directory, file, table name, and UDF related defines for distributed tasks */ #define PG_JOB_CACHE_DIR "pgsql_job_cache" #define MASTER_JOB_DIRECTORY_PREFIX "master_job_" #define JOB_DIRECTORY_PREFIX "job_" #define JOB_SCHEMA_PREFIX "pg_merge_job_" #define TASK_FILE_PREFIX "task_" #define TASK_TABLE_PREFIX "task_" #define TABLE_FILE_PREFIX "table_" #define PARTITION_FILE_PREFIX "p_" #define ATTEMPT_FILE_SUFFIX ".attempt" #define MERGE_TABLE_SUFFIX "_merge" #define MIN_JOB_DIRNAME_WIDTH 4 #define MIN_TASK_FILENAME_WIDTH 6 #define MIN_PARTITION_FILENAME_WIDTH 5 #define FOREIGN_FILENAME_OPTION "filename" #define CSTORE_TABLE_SIZE_FUNCTION_NAME "cstore_table_size" /* Defines used for fetching files and tables */ /* the tablename in the overloaded COPY statement is the to-be-transferred file */ #define TRANSMIT_REGULAR_COMMAND "COPY \"%s\" TO STDOUT WITH (format 'transmit')" #define COPY_OUT_COMMAND "COPY %s TO STDOUT" #define COPY_IN_COMMAND "COPY %s FROM '%s'" /* Defines that relate to fetching foreign tables */ #define FOREIGN_CACHED_FILE_PATH "pg_foreign_file/cached/%s" #define GET_TABLE_OWNER \ "SELECT rolname FROM pg_class JOIN pg_roles ON (pg_roles.oid = pg_class.relowner) " \ "WHERE pg_class.oid = '%s'::regclass" #define GET_TABLE_DDL_EVENTS "SELECT master_get_table_ddl_events('%s')" #define SET_FOREIGN_TABLE_FILENAME "ALTER FOREIGN TABLE %s OPTIONS (SET filename '%s')" #define FOREIGN_FILE_PATH_COMMAND "SELECT worker_foreign_file_path('%s')" #define SET_SEARCH_PATH_COMMAND "SET search_path TO %s" #define CREATE_TABLE_COMMAND "CREATE TABLE %s (%s)" #define CREATE_TABLE_AS_COMMAND "CREATE TABLE %s (%s) AS (%s)" /* * RangePartitionContext keeps range re-partitioning related data. The Btree * comparison function is set according to the partitioned column's data type. */ typedef struct RangePartitionContext { FmgrInfo *comparisonFunction; Datum *splitPointArray; int32 splitPointCount; } RangePartitionContext; /* * HashPartitionContext keeps hash re-partitioning related data. The hashing * function is set according to the partitioned column's data type. */ typedef struct HashPartitionContext { FmgrInfo *hashFunction; uint32 partitionCount; } HashPartitionContext; /* * FileOutputStream helps buffer write operations to a file; these writes are * then regularly flushed to the underlying file. This structure differs from * standard file output streams in that it keeps a larger buffer, and only * supports appending data to virtual file descriptors. */ typedef struct FileOutputStream { File fileDescriptor; StringInfo fileBuffer; StringInfo filePath; } FileOutputStream; /* Config variables managed via guc.c */ extern int PartitionBufferSize; extern bool ExpireCachedShards; extern bool BinaryWorkerCopyFormat; /* Function declarations local to the worker module */ extern StringInfo JobSchemaName(uint64 jobId); extern StringInfo TaskTableName(uint32 taskId); extern bool JobSchemaExists(StringInfo schemaName); extern StringInfo JobDirectoryName(uint64 jobId); extern StringInfo MasterJobDirectoryName(uint64 jobId); extern StringInfo TaskDirectoryName(uint64 jobId, uint32 taskId); extern StringInfo PartitionFilename(StringInfo directoryName, uint32 partitionId); extern bool CacheDirectoryElement(const char *filename); extern bool JobDirectoryElement(const char *filename); extern bool DirectoryExists(StringInfo directoryName); extern void CreateDirectory(StringInfo directoryName); extern void RemoveDirectory(StringInfo filename); extern StringInfo InitTaskDirectory(uint64 jobId, uint32 taskId); extern void RemoveJobSchema(StringInfo schemaName); extern Datum * DeconstructArrayObject(ArrayType *arrayObject); extern int32 ArrayObjectCount(ArrayType *arrayObject); extern FmgrInfo * GetFunctionInfo(Oid typeId, Oid accessMethodId, int16 procedureId); extern List * TableDDLCommandList(const char *nodeName, uint32 nodePort, const char *tableName); /* Function declarations shared with the master planner */ extern StringInfo TaskFilename(StringInfo directoryName, uint32 taskId); extern List * ExecuteRemoteQuery(const char *nodeName, uint32 nodePort, char *runAsUser, StringInfo queryString); extern List * ColumnDefinitionList(List *columnNameList, List *columnTypeList); extern CreateStmt * CreateStatement(RangeVar *relation, List *columnDefinitionList); extern CopyStmt * CopyStatement(RangeVar *relation, char *sourceFilename); extern Datum CompareCall2(FmgrInfo *funcInfo, Datum leftArgument, Datum rightArgument); /* Function declaration for parsing tree node */ extern Node * ParseTreeNode(const char *ddlCommand); extern Node * ParseTreeRawStmt(const char *ddlCommand); /* Function declarations for applying distributed execution primitives */ extern Datum worker_fetch_partition_file(PG_FUNCTION_ARGS); extern Datum worker_fetch_query_results_file(PG_FUNCTION_ARGS); extern Datum worker_apply_shard_ddl_command(PG_FUNCTION_ARGS); extern Datum worker_range_partition_table(PG_FUNCTION_ARGS); extern Datum worker_hash_partition_table(PG_FUNCTION_ARGS); extern Datum worker_merge_files_into_table(PG_FUNCTION_ARGS); extern Datum worker_merge_files_and_run_query(PG_FUNCTION_ARGS); extern Datum worker_cleanup_job_schema_cache(PG_FUNCTION_ARGS); /* Function declarations for fetching regular and foreign tables */ extern Datum worker_fetch_foreign_file(PG_FUNCTION_ARGS); extern Datum worker_fetch_regular_table(PG_FUNCTION_ARGS); extern Datum worker_append_table_to_shard(PG_FUNCTION_ARGS); extern Datum worker_foreign_file_path(PG_FUNCTION_ARGS); extern Datum worker_find_block_local_path(PG_FUNCTION_ARGS); /* Function declaration for calculating hashed value */ extern Datum worker_hash(PG_FUNCTION_ARGS); #endif /* WORKER_PROTOCOL_H */ citus-7.0.3/src/include/distributed/worker_transaction.h000066400000000000000000000027431317107136600235060ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * worker_transaction.h * Type and function declarations used in performing transactions across * workers. * * Copyright (c) 2016, Citus Data, Inc. * *------------------------------------------------------------------------- */ #ifndef WORKER_TRANSACTION_H #define WORKER_TRANSACTION_H #include "distributed/worker_manager.h" /* * TargetWorkerSet is used for determining the type of workers that a command * is targeted to. */ typedef enum TargetWorkerSet { WORKERS_WITH_METADATA, ALL_WORKERS } TargetWorkerSet; /* Functions declarations for worker transactions */ extern List * GetWorkerTransactions(void); extern void SendCommandToWorker(char *nodeName, int32 nodePort, char *command); extern void SendCommandToWorkers(TargetWorkerSet targetWorkerSet, char *command); extern void SendBareCommandListToWorkers(TargetWorkerSet targetWorkerSet, List *commandList); extern void SendCommandToWorkersParams(TargetWorkerSet targetWorkerSet, char *command, int parameterCount, const Oid *parameterTypes, const char *const *parameterValues); extern void SendCommandListToWorkerInSingleTransaction(char *nodeName, int32 nodePort, char *nodeUser, List *commandList); extern void RemoveWorkerTransaction(char *nodeName, int32 nodePort); /* helper functions for worker transactions */ extern bool IsWorkerTransactionActive(void); #endif /* WORKER_TRANSACTION_H */ citus-7.0.3/src/test/000077500000000000000000000000001317107136600144235ustar00rootroot00000000000000citus-7.0.3/src/test/regress/000077500000000000000000000000001317107136600160755ustar00rootroot00000000000000citus-7.0.3/src/test/regress/.gitignore000066400000000000000000000002211317107136600200600ustar00rootroot00000000000000# Local binaries /pg_regress # Generated subdirectories /tmp_check/ /results/ /log/ # Regression test output /regression.diffs /regression.out citus-7.0.3/src/test/regress/Makefile000066400000000000000000000077161317107136600175500ustar00rootroot00000000000000# Makefile for tests of the Citus extension citus_subdir = src/test/regress citus_top_builddir = ../../.. include $(citus_top_builddir)/Makefile.global # ensure MAJORVERSION is defined (missing in older versions) ifndef MAJORVERSION MAJORVERSION := $(basename $(VERSION)) endif ## ## Citus regression support ## MULTI_INSTALLDIR=$(CURDIR)/tmp_check/install pg_regress_multi_check = $(PERL) $(citus_abs_srcdir)/pg_regress_multi.pl --pgxsdir="$(pgxsdir)" --bindir="$(bindir)" --libdir="$(libdir)" --majorversion="$(MAJORVERSION)" --postgres-builddir="$(postgres_abs_builddir)" --postgres-srcdir="$(postgres_abs_srcdir)" MULTI_REGRESS_OPTS = --inputdir=$(citus_abs_srcdir) $(pg_regress_locale_flags) # XXX: Can't actually do useful testruns against install - $libdir # etc will point to the directory configured during postgres' # build. We could copy the installed tree around, but that's quite # likely to be mixed with other binaries and such... cleandir-main: ### echo rm -rf '$(CURDIR)'/tmp_check/install ### tempinstall-main: cleandir-main #### mkdir -p $(MULTI_INSTALLDIR) ### $(MAKE) DESTDIR=$(MULTI_INSTALLDIR) -C $(citus_top_builddir) install > tmp_check/install.log 2>&1 # Test input and expected files. These are created by pg_regress itself, so we # don't have a rule to create them. We do need rules to clean them however. input_files := $(patsubst $(citus_abs_srcdir)/input/%.source,sql/%.sql, $(wildcard $(citus_abs_srcdir)/input/*.source)) output_files := $(patsubst $(citus_abs_srcdir)/output/%.source,expected/%.out, $(wildcard $(citus_abs_srcdir)/output/*.source)) # have make check actually run all tests, but keep check-full as an # intermediate, for muscle memory backward compatibility. check: check-full # check-full triggers all tests that ought to be run routinely check-full: check-multi check-multi-mx check-multi-task-tracker-extra check-multi-binary check-worker check-follower-cluster # using pg_regress_multi_check unnecessarily starts up multiple nodes, which isn't needed # for check-worker. But that's harmless besides a few cycles. check-worker: all $(pg_regress_multi_check) --load-extension=citus \ -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/worker_schedule $(EXTRA_TESTS) check-multi: all tempinstall-main $(pg_regress_multi_check) --load-extension=citus \ -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_schedule $(EXTRA_TESTS) check-multi-vg: all tempinstall-main $(pg_regress_multi_check) --load-extension=citus --valgrind \ --pg_ctl-timeout=360 --connection-timeout=500000 --valgrind-path=valgrind --valgrind-log-file=$(VALGRIND_LOG_FILE) \ -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_schedule $(EXTRA_TESTS) check-isolation: all tempinstall-main $(pg_regress_multi_check) --load-extension=citus --isolationtester \ -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/isolation_schedule $(EXTRA_TESTS) check-vanilla: all tempinstall-main $(pg_regress_multi_check) --load-extension=citus --vanillatest check-multi-mx: all tempinstall-main $(pg_regress_multi_check) --load-extension=citus \ -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_mx_schedule $(EXTRA_TESTS) check-multi-task-tracker-extra: all tempinstall-main $(pg_regress_multi_check) --load-extension=citus \ --server-option=citus.task_executor_type=task-tracker \ --server-option=citus.large_table_shard_count=1 \ -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_task_tracker_extra_schedule $(EXTRA_TESTS) check-multi-binary: all tempinstall-main $(pg_regress_multi_check) --load-extension=citus \ --server-option=citus.binary_worker_copy_format=on \ -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_binary_schedule $(EXTRA_TESTS) check-follower-cluster: all $(pg_regress_multi_check) --load-extension=citus --follower-cluster \ -- $(MULTI_REGRESS_OPTS) --schedule=$(citus_abs_srcdir)/multi_follower_schedule $(EXTRA_TESTS) clean distclean maintainer-clean: rm -f $(output_files) $(input_files) rm -rf tmp_check/ citus-7.0.3/src/test/regress/data/000077500000000000000000000000001317107136600170065ustar00rootroot00000000000000citus-7.0.3/src/test/regress/data/agg.data000066400000000000000000000000461317107136600203770ustar00rootroot0000000000000056 7.8 100 99.097 0 0.09561 42 324.78 citus-7.0.3/src/test/regress/data/agg_type.data000066400000000000000000000001051317107136600214340ustar00rootroot000000000000001.0 2.343 23.44 2.0 3.544 324.4 3.0 3.5666 2332.9 4.5 6.34343 2332.9 citus-7.0.3/src/test/regress/data/customer-1-10.data000066400000000000000000000031011317107136600220510ustar00rootroot000000000000001|Customer#000000001|IVhzIApeRb ot,c,E|15|25-989-741-2988|711.56|BUILDING|to the even, regular platelets. regular, ironic epitaphs nag e 2|Customer#000000002|XSTf4,NCwDVaWNe6tEgvwfmRchLXak|13|23-768-687-3665|121.65|AUTOMOBILE|l accounts. blithely ironic theodolites integrate boldly: caref 3|Customer#000000003|MG9kdTD2WBHm|1|11-719-748-3364|7498.12|AUTOMOBILE| deposits eat slyly ironic, even instructions. express foxes detect slyly. blithely even accounts abov 4|Customer#000000004|XxVSJsLAGtn|4|14-128-190-5944|2866.83|MACHINERY| requests. final, regular ideas sleep final accou 5|Customer#000000005|KvpyuHCplrB84WgAiGV6sYpZq7Tj|3|13-750-942-6364|794.47|HOUSEHOLD|n accounts will have to unwind. foxes cajole accor 6|Customer#000000006|sKZz0CsnMD7mp4Xd0YrBvx,LREYKUWAh yVn|20|30-114-968-4951|7638.57|AUTOMOBILE|tions. even deposits boost according to the slyly bold packages. final accounts cajole requests. furious 7|Customer#000000007|TcGe5gaZNgVePxU5kRrvXBfkasDTea|18|28-190-982-9759|9561.95|AUTOMOBILE|ainst the ironic, express theodolites. express, even pinto beans among the exp 8|Customer#000000008|I0B10bB0AymmC, 0PrRYBCP1yGJ8xcBPmWhl5|17|27-147-574-9335|6819.74|BUILDING|among the slyly regular theodolites kindle blithely courts. carefully even theodolites haggle slyly along the ide 9|Customer#000000009|xKiAFTjUsCuxfeleNqefumTrjS|8|18-338-906-3675|8324.07|FURNITURE|r theodolites according to the requests wake thinly excuses: pending requests haggle furiousl 10|Customer#000000010|6LrEaV6KR6PLVcgl2ArL Q3rqzLzcT1 v2|5|15-741-346-9870|2753.54|HOUSEHOLD|es regular deposits haggle. fur citus-7.0.3/src/test/regress/data/customer-1-15.data000066400000000000000000000046111317107136600220650ustar00rootroot000000000000001|Customer#000000001|IVhzIApeRb ot,c,E|15|25-989-741-2988|711.56|BUILDING|to the even, regular platelets. regular, ironic epitaphs nag e 2|Customer#000000002|XSTf4,NCwDVaWNe6tEgvwfmRchLXak|13|23-768-687-3665|121.65|AUTOMOBILE|l accounts. blithely ironic theodolites integrate boldly: caref 3|Customer#000000003|MG9kdTD2WBHm|1|11-719-748-3364|7498.12|AUTOMOBILE| deposits eat slyly ironic, even instructions. express foxes detect slyly. blithely even accounts abov 4|Customer#000000004|XxVSJsLAGtn|4|14-128-190-5944|2866.83|MACHINERY| requests. final, regular ideas sleep final accou 5|Customer#000000005|KvpyuHCplrB84WgAiGV6sYpZq7Tj|3|13-750-942-6364|794.47|HOUSEHOLD|n accounts will have to unwind. foxes cajole accor 6|Customer#000000006|sKZz0CsnMD7mp4Xd0YrBvx,LREYKUWAh yVn|20|30-114-968-4951|7638.57|AUTOMOBILE|tions. even deposits boost according to the slyly bold packages. final accounts cajole requests. furious 7|Customer#000000007|TcGe5gaZNgVePxU5kRrvXBfkasDTea|18|28-190-982-9759|9561.95|AUTOMOBILE|ainst the ironic, express theodolites. express, even pinto beans among the exp 8|Customer#000000008|I0B10bB0AymmC, 0PrRYBCP1yGJ8xcBPmWhl5|17|27-147-574-9335|6819.74|BUILDING|among the slyly regular theodolites kindle blithely courts. carefully even theodolites haggle slyly along the ide 9|Customer#000000009|xKiAFTjUsCuxfeleNqefumTrjS|8|18-338-906-3675|8324.07|FURNITURE|r theodolites according to the requests wake thinly excuses: pending requests haggle furiousl 10|Customer#000000010|6LrEaV6KR6PLVcgl2ArL Q3rqzLzcT1 v2|5|15-741-346-9870|2753.54|HOUSEHOLD|es regular deposits haggle. fur 11|Customer#000000011|PkWS 3HlXqwTuzrKg633BEi|23|33-464-151-3439|-272.60|BUILDING|ckages. requests sleep slyly. quickly even pinto beans promise above the slyly regular pinto beans. 12|Customer#000000012|9PWKuhzT4Zr1Q|13|23-791-276-1263|3396.49|HOUSEHOLD| to the carefully final braids. blithely regular requests nag. ironic theodolites boost quickly along 13|Customer#000000013|nsXQu0oVjD7PM659uC3SRSp|3|13-761-547-5974|3857.34|BUILDING|ounts sleep carefully after the close frays. carefully bold notornis use ironic requests. blithely 14|Customer#000000014|KXkletMlL2JQEA |1|11-845-129-3851|5266.30|FURNITURE|, ironic packages across the unus 15|Customer#000000015|YtWggXoOLdwdo7b0y,BZaGUQMLJMX1Y,EC,6Dn|23|33-687-542-7601|2788.52|HOUSEHOLD| platelets. regular deposits detect asymptotes. blithely unusual packages nag slyly at the fluf citus-7.0.3/src/test/regress/data/customer-1-20.data000066400000000000000000000064561317107136600220720ustar00rootroot000000000000001|Customer#000000001|IVhzIApeRb ot,c,E|15|25-989-741-2988|711.56|BUILDING|to the even, regular platelets. regular, ironic epitaphs nag e 2|Customer#000000002|XSTf4,NCwDVaWNe6tEgvwfmRchLXak|13|23-768-687-3665|121.65|AUTOMOBILE|l accounts. blithely ironic theodolites integrate boldly: caref 3|Customer#000000003|MG9kdTD2WBHm|1|11-719-748-3364|7498.12|AUTOMOBILE| deposits eat slyly ironic, even instructions. express foxes detect slyly. blithely even accounts abov 4|Customer#000000004|XxVSJsLAGtn|4|14-128-190-5944|2866.83|MACHINERY| requests. final, regular ideas sleep final accou 5|Customer#000000005|KvpyuHCplrB84WgAiGV6sYpZq7Tj|3|13-750-942-6364|794.47|HOUSEHOLD|n accounts will have to unwind. foxes cajole accor 6|Customer#000000006|sKZz0CsnMD7mp4Xd0YrBvx,LREYKUWAh yVn|20|30-114-968-4951|7638.57|AUTOMOBILE|tions. even deposits boost according to the slyly bold packages. final accounts cajole requests. furious 7|Customer#000000007|TcGe5gaZNgVePxU5kRrvXBfkasDTea|18|28-190-982-9759|9561.95|AUTOMOBILE|ainst the ironic, express theodolites. express, even pinto beans among the exp 8|Customer#000000008|I0B10bB0AymmC, 0PrRYBCP1yGJ8xcBPmWhl5|17|27-147-574-9335|6819.74|BUILDING|among the slyly regular theodolites kindle blithely courts. carefully even theodolites haggle slyly along the ide 9|Customer#000000009|xKiAFTjUsCuxfeleNqefumTrjS|8|18-338-906-3675|8324.07|FURNITURE|r theodolites according to the requests wake thinly excuses: pending requests haggle furiousl 10|Customer#000000010|6LrEaV6KR6PLVcgl2ArL Q3rqzLzcT1 v2|5|15-741-346-9870|2753.54|HOUSEHOLD|es regular deposits haggle. fur 11|Customer#000000011|PkWS 3HlXqwTuzrKg633BEi|23|33-464-151-3439|-272.60|BUILDING|ckages. requests sleep slyly. quickly even pinto beans promise above the slyly regular pinto beans. 12|Customer#000000012|9PWKuhzT4Zr1Q|13|23-791-276-1263|3396.49|HOUSEHOLD| to the carefully final braids. blithely regular requests nag. ironic theodolites boost quickly along 13|Customer#000000013|nsXQu0oVjD7PM659uC3SRSp|3|13-761-547-5974|3857.34|BUILDING|ounts sleep carefully after the close frays. carefully bold notornis use ironic requests. blithely 14|Customer#000000014|KXkletMlL2JQEA |1|11-845-129-3851|5266.30|FURNITURE|, ironic packages across the unus 15|Customer#000000015|YtWggXoOLdwdo7b0y,BZaGUQMLJMX1Y,EC,6Dn|23|33-687-542-7601|2788.52|HOUSEHOLD| platelets. regular deposits detect asymptotes. blithely unusual packages nag slyly at the fluf 15|Customer#000000015|YtWggXoOLdwdo7b0y,BZaGUQMLJMX1Y,EC,6Dn|23|33-687-542-7601|2788.52|HOUSEHOLD| platelets. regular deposits detect asymptotes. blithely unusual packages nag slyly at the fluf 16|Customer#000000016|cYiaeMLZSMAOQ2 d0W,|10|20-781-609-3107|4681.03|FURNITURE|kly silent courts. thinly regular theodolites sleep fluffily after 17|Customer#000000017|izrh 6jdqtp2eqdtbkswDD8SG4SzXruMfIXyR7|2|12-970-682-3487|6.34|AUTOMOBILE|packages wake! blithely even pint 18|Customer#000000018|3txGO AiuFux3zT0Z9NYaFRnZt|6|16-155-215-1315|5494.43|BUILDING|s sleep. carefully even instructions nag furiously alongside of t 19|Customer#000000019|uc,3bHIx84H,wdrmLOjVsiqXCq2tr|18|28-396-526-5053|8914.71|HOUSEHOLD| nag. furiously careful packages are slyly at the accounts. furiously regular in 20|Customer#000000020|JrPk8Pqplj4Ne|22|32-957-234-8742|7603.40|FURNITURE|g alongside of the special excuses-- fluffily enticing packages wake citus-7.0.3/src/test/regress/data/customer-1-30.data000066400000000000000000000110571317107136600220640ustar00rootroot000000000000001|Customer#000000001|IVhzIApeRb ot,c,E|15|25-989-741-2988|711.56|BUILDING|to the even, regular platelets. regular, ironic epitaphs nag e 2|Customer#000000002|XSTf4,NCwDVaWNe6tEgvwfmRchLXak|13|23-768-687-3665|121.65|AUTOMOBILE|l accounts. blithely ironic theodolites integrate boldly: caref 3|Customer#000000003|MG9kdTD2WBHm|1|11-719-748-3364|7498.12|AUTOMOBILE| deposits eat slyly ironic, even instructions. express foxes detect slyly. blithely even accounts abov 4|Customer#000000004|XxVSJsLAGtn|4|14-128-190-5944|2866.83|MACHINERY| requests. final, regular ideas sleep final accou 5|Customer#000000005|KvpyuHCplrB84WgAiGV6sYpZq7Tj|3|13-750-942-6364|794.47|HOUSEHOLD|n accounts will have to unwind. foxes cajole accor 6|Customer#000000006|sKZz0CsnMD7mp4Xd0YrBvx,LREYKUWAh yVn|20|30-114-968-4951|7638.57|AUTOMOBILE|tions. even deposits boost according to the slyly bold packages. final accounts cajole requests. furious 7|Customer#000000007|TcGe5gaZNgVePxU5kRrvXBfkasDTea|18|28-190-982-9759|9561.95|AUTOMOBILE|ainst the ironic, express theodolites. express, even pinto beans among the exp 8|Customer#000000008|I0B10bB0AymmC, 0PrRYBCP1yGJ8xcBPmWhl5|17|27-147-574-9335|6819.74|BUILDING|among the slyly regular theodolites kindle blithely courts. carefully even theodolites haggle slyly along the ide 9|Customer#000000009|xKiAFTjUsCuxfeleNqefumTrjS|8|18-338-906-3675|8324.07|FURNITURE|r theodolites according to the requests wake thinly excuses: pending requests haggle furiousl 10|Customer#000000010|6LrEaV6KR6PLVcgl2ArL Q3rqzLzcT1 v2|5|15-741-346-9870|2753.54|HOUSEHOLD|es regular deposits haggle. fur 11|Customer#000000011|PkWS 3HlXqwTuzrKg633BEi|23|33-464-151-3439|-272.60|BUILDING|ckages. requests sleep slyly. quickly even pinto beans promise above the slyly regular pinto beans. 12|Customer#000000012|9PWKuhzT4Zr1Q|13|23-791-276-1263|3396.49|HOUSEHOLD| to the carefully final braids. blithely regular requests nag. ironic theodolites boost quickly along 13|Customer#000000013|nsXQu0oVjD7PM659uC3SRSp|3|13-761-547-5974|3857.34|BUILDING|ounts sleep carefully after the close frays. carefully bold notornis use ironic requests. blithely 14|Customer#000000014|KXkletMlL2JQEA |1|11-845-129-3851|5266.30|FURNITURE|, ironic packages across the unus 15|Customer#000000015|YtWggXoOLdwdo7b0y,BZaGUQMLJMX1Y,EC,6Dn|23|33-687-542-7601|2788.52|HOUSEHOLD| platelets. regular deposits detect asymptotes. blithely unusual packages nag slyly at the fluf 16|Customer#000000016|cYiaeMLZSMAOQ2 d0W,|10|20-781-609-3107|4681.03|FURNITURE|kly silent courts. thinly regular theodolites sleep fluffily after 17|Customer#000000017|izrh 6jdqtp2eqdtbkswDD8SG4SzXruMfIXyR7|2|12-970-682-3487|6.34|AUTOMOBILE|packages wake! blithely even pint 18|Customer#000000018|3txGO AiuFux3zT0Z9NYaFRnZt|6|16-155-215-1315|5494.43|BUILDING|s sleep. carefully even instructions nag furiously alongside of t 19|Customer#000000019|uc,3bHIx84H,wdrmLOjVsiqXCq2tr|18|28-396-526-5053|8914.71|HOUSEHOLD| nag. furiously careful packages are slyly at the accounts. furiously regular in 20|Customer#000000020|JrPk8Pqplj4Ne|22|32-957-234-8742|7603.40|FURNITURE|g alongside of the special excuses-- fluffily enticing packages wake 21|Customer#000000021|XYmVpr9yAHDEn|8|18-902-614-8344|1428.25|MACHINERY| quickly final accounts integrate blithely furiously u 22|Customer#000000022|QI6p41,FNs5k7RZoCCVPUTkUdYpB|3|13-806-545-9701|591.98|MACHINERY|s nod furiously above the furiously ironic ideas. 23|Customer#000000023|OdY W13N7Be3OC5MpgfmcYss0Wn6TKT|3|13-312-472-8245|3332.02|HOUSEHOLD|deposits. special deposits cajole slyly. fluffily special deposits about the furiously 24|Customer#000000024|HXAFgIAyjxtdqwimt13Y3OZO 4xeLe7U8PqG|13|23-127-851-8031|9255.67|MACHINERY|into beans. fluffily final ideas haggle fluffily 25|Customer#000000025|Hp8GyFQgGHFYSilH5tBfe|12|22-603-468-3533|7133.70|FURNITURE|y. accounts sleep ruthlessly according to the regular theodolites. unusual instructions sleep. ironic, final 26|Customer#000000026|8ljrc5ZeMl7UciP|22|32-363-455-4837|5182.05|AUTOMOBILE|c requests use furiously ironic requests. slyly ironic dependencies us 27|Customer#000000027|IS8GIyxpBrLpMT0u7|3|13-137-193-2709|5679.84|BUILDING| about the carefully ironic pinto beans. accoun 28|Customer#000000028|iVyg0daQ,Tha8x2WPWA9m2529m|8|18-774-241-1462|1007.18|FURNITURE| along the regular deposits. furiously final pac 29|Customer#000000029|sJ5adtfyAkCK63df2,vF25zyQMVYE34uh|0|10-773-203-7342|7618.27|FURNITURE|its after the carefully final platelets x-ray against 30|Customer#000000030|nJDsELGAavU63Jl0c5NKsKfL8rIJQQkQnYL2QJY|1|11-764-165-5076|9321.01|BUILDING|lithely final requests. furiously unusual account citus-7.0.3/src/test/regress/data/customer-11-20.data000066400000000000000000000030531317107136600221410ustar00rootroot0000000000000011|Customer#000000011|PkWS 3HlXqwTuzrKg633BEi|23|33-464-151-3439|-272.60|BUILDING|ckages. requests sleep slyly. quickly even pinto beans promise above the slyly regular pinto beans. 12|Customer#000000012|9PWKuhzT4Zr1Q|13|23-791-276-1263|3396.49|HOUSEHOLD| to the carefully final braids. blithely regular requests nag. ironic theodolites boost quickly along 13|Customer#000000013|nsXQu0oVjD7PM659uC3SRSp|3|13-761-547-5974|3857.34|BUILDING|ounts sleep carefully after the close frays. carefully bold notornis use ironic requests. blithely 14|Customer#000000014|KXkletMlL2JQEA |1|11-845-129-3851|5266.30|FURNITURE|, ironic packages across the unus 15|Customer#000000015|YtWggXoOLdwdo7b0y,BZaGUQMLJMX1Y,EC,6Dn|23|33-687-542-7601|2788.52|HOUSEHOLD| platelets. regular deposits detect asymptotes. blithely unusual packages nag slyly at the fluf 16|Customer#000000016|cYiaeMLZSMAOQ2 d0W,|10|20-781-609-3107|4681.03|FURNITURE|kly silent courts. thinly regular theodolites sleep fluffily after 17|Customer#000000017|izrh 6jdqtp2eqdtbkswDD8SG4SzXruMfIXyR7|2|12-970-682-3487|6.34|AUTOMOBILE|packages wake! blithely even pint 18|Customer#000000018|3txGO AiuFux3zT0Z9NYaFRnZt|6|16-155-215-1315|5494.43|BUILDING|s sleep. carefully even instructions nag furiously alongside of t 19|Customer#000000019|uc,3bHIx84H,wdrmLOjVsiqXCq2tr|18|28-396-526-5053|8914.71|HOUSEHOLD| nag. furiously careful packages are slyly at the accounts. furiously regular in 20|Customer#000000020|JrPk8Pqplj4Ne|22|32-957-234-8742|7603.40|FURNITURE|g alongside of the special excuses-- fluffily enticing packages wake citus-7.0.3/src/test/regress/data/customer-21-30.data000066400000000000000000000027031317107136600221440ustar00rootroot0000000000000021|Customer#000000021|XYmVpr9yAHDEn|8|18-902-614-8344|1428.25|MACHINERY| quickly final accounts integrate blithely furiously u 22|Customer#000000022|QI6p41,FNs5k7RZoCCVPUTkUdYpB|3|13-806-545-9701|591.98|MACHINERY|s nod furiously above the furiously ironic ideas. 23|Customer#000000023|OdY W13N7Be3OC5MpgfmcYss0Wn6TKT|3|13-312-472-8245|3332.02|HOUSEHOLD|deposits. special deposits cajole slyly. fluffily special deposits about the furiously 24|Customer#000000024|HXAFgIAyjxtdqwimt13Y3OZO 4xeLe7U8PqG|13|23-127-851-8031|9255.67|MACHINERY|into beans. fluffily final ideas haggle fluffily 25|Customer#000000025|Hp8GyFQgGHFYSilH5tBfe|12|22-603-468-3533|7133.70|FURNITURE|y. accounts sleep ruthlessly according to the regular theodolites. unusual instructions sleep. ironic, final 26|Customer#000000026|8ljrc5ZeMl7UciP|22|32-363-455-4837|5182.05|AUTOMOBILE|c requests use furiously ironic requests. slyly ironic dependencies us 27|Customer#000000027|IS8GIyxpBrLpMT0u7|3|13-137-193-2709|5679.84|BUILDING| about the carefully ironic pinto beans. accoun 28|Customer#000000028|iVyg0daQ,Tha8x2WPWA9m2529m|8|18-774-241-1462|1007.18|FURNITURE| along the regular deposits. furiously final pac 29|Customer#000000029|sJ5adtfyAkCK63df2,vF25zyQMVYE34uh|0|10-773-203-7342|7618.27|FURNITURE|its after the carefully final platelets x-ray against 30|Customer#000000030|nJDsELGAavU63Jl0c5NKsKfL8rIJQQkQnYL2QJY|1|11-764-165-5076|9321.01|BUILDING|lithely final requests. furiously unusual account citus-7.0.3/src/test/regress/data/customer.1.data000066400000000000000000004663041317107136600216560ustar00rootroot000000000000001|Customer#000000001|IVhzIApeRb ot,c,E|15|25-989-741-2988|711.56|BUILDING|to the even, regular platelets. regular, ironic epitaphs nag e 2|Customer#000000002|XSTf4,NCwDVaWNe6tEgvwfmRchLXak|13|23-768-687-3665|121.65|AUTOMOBILE|l accounts. blithely ironic theodolites integrate boldly: caref 3|Customer#000000003|MG9kdTD2WBHm|1|11-719-748-3364|7498.12|AUTOMOBILE| deposits eat slyly ironic, even instructions. express foxes detect slyly. blithely even accounts abov 4|Customer#000000004|XxVSJsLAGtn|4|14-128-190-5944|2866.83|MACHINERY| requests. final, regular ideas sleep final accou 5|Customer#000000005|KvpyuHCplrB84WgAiGV6sYpZq7Tj|3|13-750-942-6364|794.47|HOUSEHOLD|n accounts will have to unwind. foxes cajole accor 6|Customer#000000006|sKZz0CsnMD7mp4Xd0YrBvx,LREYKUWAh yVn|20|30-114-968-4951|7638.57|AUTOMOBILE|tions. even deposits boost according to the slyly bold packages. final accounts cajole requests. furious 7|Customer#000000007|TcGe5gaZNgVePxU5kRrvXBfkasDTea|18|28-190-982-9759|9561.95|AUTOMOBILE|ainst the ironic, express theodolites. express, even pinto beans among the exp 8|Customer#000000008|I0B10bB0AymmC, 0PrRYBCP1yGJ8xcBPmWhl5|17|27-147-574-9335|6819.74|BUILDING|among the slyly regular theodolites kindle blithely courts. carefully even theodolites haggle slyly along the ide 9|Customer#000000009|xKiAFTjUsCuxfeleNqefumTrjS|8|18-338-906-3675|8324.07|FURNITURE|r theodolites according to the requests wake thinly excuses: pending requests haggle furiousl 10|Customer#000000010|6LrEaV6KR6PLVcgl2ArL Q3rqzLzcT1 v2|5|15-741-346-9870|2753.54|HOUSEHOLD|es regular deposits haggle. fur 11|Customer#000000011|PkWS 3HlXqwTuzrKg633BEi|23|33-464-151-3439|-272.60|BUILDING|ckages. requests sleep slyly. quickly even pinto beans promise above the slyly regular pinto beans. 12|Customer#000000012|9PWKuhzT4Zr1Q|13|23-791-276-1263|3396.49|HOUSEHOLD| to the carefully final braids. blithely regular requests nag. ironic theodolites boost quickly along 13|Customer#000000013|nsXQu0oVjD7PM659uC3SRSp|3|13-761-547-5974|3857.34|BUILDING|ounts sleep carefully after the close frays. carefully bold notornis use ironic requests. blithely 14|Customer#000000014|KXkletMlL2JQEA |1|11-845-129-3851|5266.30|FURNITURE|, ironic packages across the unus 15|Customer#000000015|YtWggXoOLdwdo7b0y,BZaGUQMLJMX1Y,EC,6Dn|23|33-687-542-7601|2788.52|HOUSEHOLD| platelets. regular deposits detect asymptotes. blithely unusual packages nag slyly at the fluf 16|Customer#000000016|cYiaeMLZSMAOQ2 d0W,|10|20-781-609-3107|4681.03|FURNITURE|kly silent courts. thinly regular theodolites sleep fluffily after 17|Customer#000000017|izrh 6jdqtp2eqdtbkswDD8SG4SzXruMfIXyR7|2|12-970-682-3487|6.34|AUTOMOBILE|packages wake! blithely even pint 18|Customer#000000018|3txGO AiuFux3zT0Z9NYaFRnZt|6|16-155-215-1315|5494.43|BUILDING|s sleep. carefully even instructions nag furiously alongside of t 19|Customer#000000019|uc,3bHIx84H,wdrmLOjVsiqXCq2tr|18|28-396-526-5053|8914.71|HOUSEHOLD| nag. furiously careful packages are slyly at the accounts. furiously regular in 20|Customer#000000020|JrPk8Pqplj4Ne|22|32-957-234-8742|7603.40|FURNITURE|g alongside of the special excuses-- fluffily enticing packages wake 21|Customer#000000021|XYmVpr9yAHDEn|8|18-902-614-8344|1428.25|MACHINERY| quickly final accounts integrate blithely furiously u 22|Customer#000000022|QI6p41,FNs5k7RZoCCVPUTkUdYpB|3|13-806-545-9701|591.98|MACHINERY|s nod furiously above the furiously ironic ideas. 23|Customer#000000023|OdY W13N7Be3OC5MpgfmcYss0Wn6TKT|3|13-312-472-8245|3332.02|HOUSEHOLD|deposits. special deposits cajole slyly. fluffily special deposits about the furiously 24|Customer#000000024|HXAFgIAyjxtdqwimt13Y3OZO 4xeLe7U8PqG|13|23-127-851-8031|9255.67|MACHINERY|into beans. fluffily final ideas haggle fluffily 25|Customer#000000025|Hp8GyFQgGHFYSilH5tBfe|12|22-603-468-3533|7133.70|FURNITURE|y. accounts sleep ruthlessly according to the regular theodolites. unusual instructions sleep. ironic, final 26|Customer#000000026|8ljrc5ZeMl7UciP|22|32-363-455-4837|5182.05|AUTOMOBILE|c requests use furiously ironic requests. slyly ironic dependencies us 27|Customer#000000027|IS8GIyxpBrLpMT0u7|3|13-137-193-2709|5679.84|BUILDING| about the carefully ironic pinto beans. accoun 28|Customer#000000028|iVyg0daQ,Tha8x2WPWA9m2529m|8|18-774-241-1462|1007.18|FURNITURE| along the regular deposits. furiously final pac 29|Customer#000000029|sJ5adtfyAkCK63df2,vF25zyQMVYE34uh|0|10-773-203-7342|7618.27|FURNITURE|its after the carefully final platelets x-ray against 30|Customer#000000030|nJDsELGAavU63Jl0c5NKsKfL8rIJQQkQnYL2QJY|1|11-764-165-5076|9321.01|BUILDING|lithely final requests. furiously unusual account 31|Customer#000000031|LUACbO0viaAv6eXOAebryDB xjVst|23|33-197-837-7094|5236.89|HOUSEHOLD|s use among the blithely pending depo 32|Customer#000000032|jD2xZzi UmId,DCtNBLXKj9q0Tlp2iQ6ZcO3J|15|25-430-914-2194|3471.53|BUILDING|cial ideas. final, furious requests across the e 33|Customer#000000033|qFSlMuLucBmx9xnn5ib2csWUweg D|17|27-375-391-1280|-78.56|AUTOMOBILE|s. slyly regular accounts are furiously. carefully pending requests 34|Customer#000000034|Q6G9wZ6dnczmtOx509xgE,M2KV|15|25-344-968-5422|8589.70|HOUSEHOLD|nder against the even, pending accounts. even 35|Customer#000000035|TEjWGE4nBzJL2|17|27-566-888-7431|1228.24|HOUSEHOLD|requests. special, express requests nag slyly furiousl 36|Customer#000000036|3TvCzjuPzpJ0,DdJ8kW5U|21|31-704-669-5769|4987.27|BUILDING|haggle. enticing, quiet platelets grow quickly bold sheaves. carefully regular acc 37|Customer#000000037|7EV4Pwh,3SboctTWt|8|18-385-235-7162|-917.75|FURNITURE|ilent packages are carefully among the deposits. furiousl 38|Customer#000000038|a5Ee5e9568R8RLP 2ap7|12|22-306-880-7212|6345.11|HOUSEHOLD|lar excuses. closely even asymptotes cajole blithely excuses. carefully silent pinto beans sleep carefully fin 39|Customer#000000039|nnbRg,Pvy33dfkorYE FdeZ60|2|12-387-467-6509|6264.31|AUTOMOBILE|tions. slyly silent excuses slee 40|Customer#000000040|gOnGWAyhSV1ofv|3|13-652-915-8939|1335.30|BUILDING|rges impress after the slyly ironic courts. foxes are. blithely 41|Customer#000000041|IM9mzmyoxeBmvNw8lA7G3Ydska2nkZF|10|20-917-711-4011|270.95|HOUSEHOLD|ly regular accounts hang bold, silent packages. unusual foxes haggle slyly above the special, final depo 42|Customer#000000042|ziSrvyyBke|5|15-416-330-4175|8727.01|BUILDING|ssly according to the pinto beans: carefully special requests across the even, pending accounts wake special 43|Customer#000000043|ouSbjHk8lh5fKX3zGso3ZSIj9Aa3PoaFd|19|29-316-665-2897|9904.28|MACHINERY|ial requests: carefully pending foxes detect quickly. carefully final courts cajole quickly. carefully 44|Customer#000000044|Oi,dOSPwDu4jo4x,,P85E0dmhZGvNtBwi|16|26-190-260-5375|7315.94|AUTOMOBILE|r requests around the unusual, bold a 45|Customer#000000045|4v3OcpFgoOmMG,CbnF,4mdC|9|19-715-298-9917|9983.38|AUTOMOBILE|nto beans haggle slyly alongside of t 46|Customer#000000046|eaTXWWm10L9|6|16-357-681-2007|5744.59|AUTOMOBILE|ctions. accounts sleep furiously even requests. regular, regular accounts cajole blithely around the final pa 47|Customer#000000047|b0UgocSqEW5 gdVbhNT|2|12-427-271-9466|274.58|BUILDING|ions. express, ironic instructions sleep furiously ironic ideas. furi 48|Customer#000000048|0UU iPhBupFvemNB|0|10-508-348-5882|3792.50|BUILDING|re fluffily pending foxes. pending, bold platelets sleep slyly. even platelets cajo 49|Customer#000000049|cNgAeX7Fqrdf7HQN9EwjUa4nxT,68L FKAxzl|10|20-908-631-4424|4573.94|FURNITURE|nusual foxes! fluffily pending packages maintain to the regular 50|Customer#000000050|9SzDYlkzxByyJ1QeTI o|6|16-658-112-3221|4266.13|MACHINERY|ts. furiously ironic accounts cajole furiously slyly ironic dinos. 51|Customer#000000051|uR,wEaiTvo4|12|22-344-885-4251|855.87|FURNITURE|eposits. furiously regular requests integrate carefully packages. furious 52|Customer#000000052|7 QOqGqqSy9jfV51BC71jcHJSD0|11|21-186-284-5998|5630.28|HOUSEHOLD|ic platelets use evenly even accounts. stealthy theodolites cajole furiou 53|Customer#000000053|HnaxHzTfFTZs8MuCpJyTbZ47Cm4wFOOgib|15|25-168-852-5363|4113.64|HOUSEHOLD|ar accounts are. even foxes are blithely. fluffily pending deposits boost 54|Customer#000000054|,k4vf 5vECGWFy,hosTE,|4|14-776-370-4745|868.90|AUTOMOBILE|sual, silent accounts. furiously express accounts cajole special deposits. final, final accounts use furi 55|Customer#000000055|zIRBR4KNEl HzaiV3a i9n6elrxzDEh8r8pDom|10|20-180-440-8525|4572.11|MACHINERY|ully unusual packages wake bravely bold packages. unusual requests boost deposits! blithely ironic packages ab 56|Customer#000000056|BJYZYJQk4yD5B|10|20-895-685-6920|6530.86|FURNITURE|. notornis wake carefully. carefully fluffy requests are furiously even accounts. slyly expre 57|Customer#000000057|97XYbsuOPRXPWU|21|31-835-306-1650|4151.93|AUTOMOBILE|ove the carefully special packages. even, unusual deposits sleep slyly pend 58|Customer#000000058|g9ap7Dk1Sv9fcXEWjpMYpBZIRUohi T|13|23-244-493-2508|6478.46|HOUSEHOLD|ideas. ironic ideas affix furiously express, final instructions. regular excuses use quickly e 59|Customer#000000059|zLOCP0wh92OtBihgspOGl4|1|11-355-584-3112|3458.60|MACHINERY|ously final packages haggle blithely after the express deposits. furiou 60|Customer#000000060|FyodhjwMChsZmUz7Jz0H|12|22-480-575-5866|2741.87|MACHINERY|latelets. blithely unusual courts boost furiously about the packages. blithely final instruct 61|Customer#000000061|9kndve4EAJxhg3veF BfXr7AqOsT39o gtqjaYE|17|27-626-559-8599|1536.24|FURNITURE|egular packages shall have to impress along the 62|Customer#000000062|upJK2Dnw13,|7|17-361-978-7059|595.61|MACHINERY|kly special dolphins. pinto beans are slyly. quickly regular accounts are furiously a 63|Customer#000000063|IXRSpVWWZraKII|21|31-952-552-9584|9331.13|AUTOMOBILE|ithely even accounts detect slyly above the fluffily ir 64|Customer#000000064|MbCeGY20kaKK3oalJD,OT|3|13-558-731-7204|-646.64|BUILDING|structions after the quietly ironic theodolites cajole be 65|Customer#000000065|RGT yzQ0y4l0H90P783LG4U95bXQFDRXbWa1sl,X|23|33-733-623-5267|8795.16|AUTOMOBILE|y final foxes serve carefully. theodolites are carefully. pending i 66|Customer#000000066|XbsEqXH1ETbJYYtA1A|22|32-213-373-5094|242.77|HOUSEHOLD|le slyly accounts. carefully silent packages benea 67|Customer#000000067|rfG0cOgtr5W8 xILkwp9fpCS8|9|19-403-114-4356|8166.59|MACHINERY|indle furiously final, even theodo 68|Customer#000000068|o8AibcCRkXvQFh8hF,7o|12|22-918-832-2411|6853.37|HOUSEHOLD| pending pinto beans impress realms. final dependencies 69|Customer#000000069|Ltx17nO9Wwhtdbe9QZVxNgP98V7xW97uvSH1prEw|9|19-225-978-5670|1709.28|HOUSEHOLD|thely final ideas around the quickly final dependencies affix carefully quickly final theodolites. final accounts c 70|Customer#000000070|mFowIuhnHjp2GjCiYYavkW kUwOjIaTCQ|22|32-828-107-2832|4867.52|FURNITURE|fter the special asymptotes. ideas after the unusual frets cajole quickly regular pinto be 71|Customer#000000071|TlGalgdXWBmMV,6agLyWYDyIz9MKzcY8gl,w6t1B|7|17-710-812-5403|-611.19|HOUSEHOLD|g courts across the regular, final pinto beans are blithely pending ac 72|Customer#000000072|putjlmskxE,zs,HqeIA9Wqu7dhgH5BVCwDwHHcf|2|12-759-144-9689|-362.86|FURNITURE|ithely final foxes sleep always quickly bold accounts. final wat 73|Customer#000000073|8IhIxreu4Ug6tt5mog4|0|10-473-439-3214|4288.50|BUILDING|usual, unusual packages sleep busily along the furiou 74|Customer#000000074|IkJHCA3ZThF7qL7VKcrU nRLl,kylf |4|14-199-862-7209|2764.43|MACHINERY|onic accounts. blithely slow packages would haggle carefully. qui 75|Customer#000000075|Dh 6jZ,cwxWLKQfRKkiGrzv6pm|18|28-247-803-9025|6684.10|AUTOMOBILE| instructions cajole even, even deposits. finally bold deposits use above the even pains. slyl 76|Customer#000000076|m3sbCvjMOHyaOofH,e UkGPtqc4|0|10-349-718-3044|5745.33|FURNITURE|pecial deposits. ironic ideas boost blithely according to the closely ironic theodolites! furiously final deposits n 77|Customer#000000077|4tAE5KdMFGD4byHtXF92vx|17|27-269-357-4674|1738.87|BUILDING|uffily silent requests. carefully ironic asymptotes among the ironic hockey players are carefully bli 78|Customer#000000078|HBOta,ZNqpg3U2cSL0kbrftkPwzX|9|19-960-700-9191|7136.97|FURNITURE|ests. blithely bold pinto beans h 79|Customer#000000079|n5hH2ftkVRwW8idtD,BmM2|15|25-147-850-4166|5121.28|MACHINERY|es. packages haggle furiously. regular, special requests poach after the quickly express ideas. blithely pending re 80|Customer#000000080|K,vtXp8qYB |0|10-267-172-7101|7383.53|FURNITURE|tect among the dependencies. bold accounts engage closely even pinto beans. ca 81|Customer#000000081|SH6lPA7JiiNC6dNTrR|20|30-165-277-3269|2023.71|BUILDING|r packages. fluffily ironic requests cajole fluffily. ironically regular theodolit 82|Customer#000000082|zhG3EZbap4c992Gj3bK,3Ne,Xn|18|28-159-442-5305|9468.34|AUTOMOBILE|s wake. bravely regular accounts are furiously. regula 83|Customer#000000083|HnhTNB5xpnSF20JBH4Ycs6psVnkC3RDf|22|32-817-154-4122|6463.51|BUILDING|ccording to the quickly bold warhorses. final, regular foxes integrate carefully. bold packages nag blithely ev 84|Customer#000000084|lpXz6Fwr9945rnbtMc8PlueilS1WmASr CB|11|21-546-818-3802|5174.71|FURNITURE|ly blithe foxes. special asymptotes haggle blithely against the furiously regular depo 85|Customer#000000085|siRerlDwiolhYR 8FgksoezycLj|5|15-745-585-8219|3386.64|FURNITURE|ronic ideas use above the slowly pendin 86|Customer#000000086|US6EGGHXbTTXPL9SBsxQJsuvy|0|10-677-951-2353|3306.32|HOUSEHOLD|quests. pending dugouts are carefully aroun 87|Customer#000000087|hgGhHVSWQl 6jZ6Ev|23|33-869-884-7053|6327.54|FURNITURE|hely ironic requests integrate according to the ironic accounts. slyly regular pla 88|Customer#000000088|wtkjBN9eyrFuENSMmMFlJ3e7jE5KXcg|16|26-516-273-2566|8031.44|AUTOMOBILE|s are quickly above the quickly ironic instructions; even requests about the carefully final deposi 89|Customer#000000089|dtR, y9JQWUO6FoJExyp8whOU|14|24-394-451-5404|1530.76|FURNITURE|counts are slyly beyond the slyly final accounts. quickly final ideas wake. r 90|Customer#000000090|QxCzH7VxxYUWwfL7|16|26-603-491-1238|7354.23|BUILDING|sly across the furiously even 91|Customer#000000091|S8OMYFrpHwoNHaGBeuS6E 6zhHGZiprw1b7 q|8|18-239-400-3677|4643.14|AUTOMOBILE|onic accounts. fluffily silent pinto beans boost blithely according to the fluffily exp 92|Customer#000000092|obP PULk2LH LqNF,K9hcbNqnLAkJVsl5xqSrY,|2|12-446-416-8471|1182.91|MACHINERY|. pinto beans hang slyly final deposits. ac 93|Customer#000000093|EHXBr2QGdh|7|17-359-388-5266|2182.52|MACHINERY|press deposits. carefully regular platelets r 94|Customer#000000094|IfVNIN9KtkScJ9dUjK3Pg5gY1aFeaXewwf|9|19-953-499-8833|5500.11|HOUSEHOLD|latelets across the bold, final requests sleep according to the fluffily bold accounts. unusual deposits amon 95|Customer#000000095|EU0xvmWvOmUUn5J,2z85DQyG7QCJ9Xq7|15|25-923-255-2929|5327.38|MACHINERY|ithely. ruthlessly final requests wake slyly alongside of the furiously silent pinto beans. even the 96|Customer#000000096|vWLOrmXhRR|8|18-422-845-1202|6323.92|AUTOMOBILE|press requests believe furiously. carefully final instructions snooze carefully. 97|Customer#000000097|OApyejbhJG,0Iw3j rd1M|17|27-588-919-5638|2164.48|AUTOMOBILE|haggle slyly. bold, special ideas are blithely above the thinly bold theo 98|Customer#000000098|7yiheXNSpuEAwbswDW|12|22-885-845-6889|-551.37|BUILDING|ages. furiously pending accounts are quickly carefully final foxes: busily pe 99|Customer#000000099|szsrOiPtCHVS97Lt|15|25-515-237-9232|4088.65|HOUSEHOLD|cajole slyly about the regular theodolites! furiously bold requests nag along the pending, regular packages. somas 100|Customer#000000100|fptUABXcmkC5Wx|20|30-749-445-4907|9889.89|FURNITURE|was furiously fluffily quiet deposits. silent, pending requests boost against 101|Customer#000000101|sMmL2rNeHDltovSm Y|2|12-514-298-3699|7470.96|MACHINERY| sleep. pending packages detect slyly ironic pack 102|Customer#000000102|UAtflJ06 fn9zBfKjInkQZlWtqaA|19|29-324-978-8538|8462.17|BUILDING|ously regular dependencies nag among the furiously express dinos. blithely final 103|Customer#000000103|8KIsQX4LJ7QMsj6DrtFtXu0nUEdV,8a|9|19-216-107-2107|2757.45|BUILDING|furiously pending notornis boost slyly around the blithely ironic ideas? final, even instructions cajole fl 104|Customer#000000104|9mcCK L7rt0SwiYtrbO88DiZS7U d7M|10|20-966-284-8065|-588.38|FURNITURE|rate carefully slyly special pla 105|Customer#000000105|4iSJe4L SPjg7kJj98Yz3z0B|10|20-793-553-6417|9091.82|MACHINERY|l pains cajole even accounts. quietly final instructi 106|Customer#000000106|xGCOEAUjUNG|1|11-751-989-4627|3288.42|MACHINERY|lose slyly. ironic accounts along the evenly regular theodolites wake about the special, final gifts. 107|Customer#000000107|Zwg64UZ,q7GRqo3zm7P1tZIRshBDz|15|25-336-529-9919|2514.15|AUTOMOBILE|counts cajole slyly. regular requests wake. furiously regular deposits about the blithely final fo 108|Customer#000000108|GPoeEvpKo1|5|15-908-619-7526|2259.38|BUILDING|refully ironic deposits sleep. regular, unusual requests wake slyly 109|Customer#000000109|OOOkYBgCMzgMQXUmkocoLb56rfrdWp2NE2c|16|26-992-422-8153|-716.10|BUILDING|es. fluffily final dependencies sleep along the blithely even pinto beans. final deposits haggle furiously furiou 110|Customer#000000110|mymPfgphaYXNYtk|10|20-893-536-2069|7462.99|AUTOMOBILE|nto beans cajole around the even, final deposits. quickly bold packages according to the furiously regular dept 111|Customer#000000111|CBSbPyOWRorloj2TBvrK9qp9tHBs|22|32-582-283-7528|6505.26|MACHINERY|ly unusual instructions detect fluffily special deposits-- theodolites nag carefully during the ironic dependencies 112|Customer#000000112|RcfgG3bO7QeCnfjqJT1|19|29-233-262-8382|2953.35|FURNITURE|rmanently unusual multipliers. blithely ruthless deposits are furiously along the 113|Customer#000000113|eaOl5UBXIvdY57rglaIzqvfPD,MYfK|12|22-302-930-4756|2912.00|BUILDING|usly regular theodolites boost furiously doggedly pending instructio 114|Customer#000000114|xAt 5f5AlFIU|14|24-805-212-7646|1027.46|FURNITURE|der the carefully express theodolites are after the packages. packages are. bli 115|Customer#000000115|0WFt1IXENmUT2BgbsB0ShVKJZt0HCBCbFl0aHc|8|18-971-699-1843|7508.92|HOUSEHOLD|sits haggle above the carefully ironic theodolite 116|Customer#000000116|yCuVxIgsZ3,qyK2rloThy3u|16|26-632-309-5792|8403.99|BUILDING|as. quickly final sauternes haggle slyly carefully even packages. brave, ironic pinto beans are above the furious 117|Customer#000000117|uNhM,PzsRA3S,5Y Ge5Npuhi|24|34-403-631-3505|3950.83|FURNITURE|affix. instructions are furiously sl 118|Customer#000000118|OVnFuHygK9wx3xpg8|18|28-639-943-7051|3582.37|AUTOMOBILE|uick packages alongside of the furiously final deposits haggle above the fluffily even foxes. blithely dogged dep 119|Customer#000000119|M1ETOIecuvH8DtM0Y0nryXfW|7|17-697-919-8406|3930.35|FURNITURE|express ideas. blithely ironic foxes thrash. special acco 120|Customer#000000120|zBNna00AEInqyO1|12|22-291-534-1571|363.75|MACHINERY| quickly. slyly ironic requests cajole blithely furiously final dependen 121|Customer#000000121|tv nCR2YKupGN73mQudO|17|27-411-990-2959|6428.32|BUILDING|uriously stealthy ideas. carefully final courts use carefully 122|Customer#000000122|yp5slqoNd26lAENZW3a67wSfXA6hTF|3|13-702-694-4520|7865.46|HOUSEHOLD| the special packages hinder blithely around the permanent requests. bold depos 123|Customer#000000123|YsOnaaER8MkvK5cpf4VSlq|5|15-817-151-1168|5897.83|BUILDING|ependencies. regular, ironic requests are fluffily regu 124|Customer#000000124|aTbyVAW5tCd,v09O|18|28-183-750-7809|1842.49|AUTOMOBILE|le fluffily even dependencies. quietly s 125|Customer#000000125|,wSZXdVR xxIIfm9s8ITyLl3kgjT6UC07GY0Y|19|29-261-996-3120|-234.12|FURNITURE|x-ray finally after the packages? regular requests c 126|Customer#000000126|ha4EHmbx3kg DYCsP6DFeUOmavtQlHhcfaqr|22|32-755-914-7592|1001.39|HOUSEHOLD|s about the even instructions boost carefully furiously ironic pearls. ruthless, 127|Customer#000000127|Xyge4DX2rXKxXyye1Z47LeLVEYMLf4Bfcj|21|31-101-672-2951|9280.71|MACHINERY|ic, unusual theodolites nod silently after the final, ironic instructions: pending r 128|Customer#000000128|AmKUMlJf2NRHcKGmKjLS|4|14-280-874-8044|-986.96|HOUSEHOLD|ing packages integrate across the slyly unusual dugouts. blithely silent ideas sublate carefully. blithely expr 129|Customer#000000129|q7m7rbMM0BpaCdmxloCgBDRCleXsXkdD8kf|7|17-415-148-7416|9127.27|HOUSEHOLD| unusual deposits boost carefully furiously silent ideas. pending accounts cajole slyly across 130|Customer#000000130|RKPx2OfZy0Vn 8wGWZ7F2EAvmMORl1k8iH|9|19-190-993-9281|5073.58|HOUSEHOLD|ix slowly. express packages along the furiously ironic requests integrate daringly deposits. fur 131|Customer#000000131|jyN6lAjb1FtH10rMC,XzlWyCBrg75|11|21-840-210-3572|8595.53|HOUSEHOLD|jole special packages. furiously final dependencies about the furiously speci 132|Customer#000000132|QM5YabAsTLp9|4|14-692-150-9717|162.57|HOUSEHOLD|uickly carefully special theodolites. carefully regular requests against the blithely unusual instructions 133|Customer#000000133|IMCuXdpIvdkYO92kgDGuyHgojcUs88p|17|27-408-997-8430|2314.67|AUTOMOBILE|t packages. express pinto beans are blithely along the unusual, even theodolites. silent packages use fu 134|Customer#000000134|sUiZ78QCkTQPICKpA9OBzkUp2FM|11|21-200-159-5932|4608.90|BUILDING|yly fluffy foxes boost final ideas. b 135|Customer#000000135|oZK,oC0 fdEpqUML|19|29-399-293-6241|8732.91|FURNITURE| the slyly final accounts. deposits cajole carefully. carefully sly packag 136|Customer#000000136|QoLsJ0v5C1IQbh,DS1|7|17-501-210-4726|-842.39|FURNITURE|ackages sleep ironic, final courts. even requests above the blithely bold requests g 137|Customer#000000137|cdW91p92rlAEHgJafqYyxf1Q|16|26-777-409-5654|7838.30|HOUSEHOLD|carefully regular theodolites use. silent dolphins cajo 138|Customer#000000138|5uyLAeY7HIGZqtu66Yn08f|5|15-394-860-4589|430.59|MACHINERY|ts doze on the busy ideas. regular 139|Customer#000000139|3ElvBwudHKL02732YexGVFVt |9|19-140-352-1403|7897.78|MACHINERY|nstructions. quickly ironic ideas are carefully. bold, 140|Customer#000000140|XRqEPiKgcETII,iOLDZp5jA|4|14-273-885-6505|9963.15|MACHINERY|ies detect slyly ironic accounts. slyly ironic theodolites hag 141|Customer#000000141|5IW,WROVnikc3l7DwiUDGQNGsLBGOL6Dc0|1|11-936-295-6204|6706.14|FURNITURE|packages nag furiously. carefully unusual accounts snooze according to the fluffily regular pinto beans. slyly spec 142|Customer#000000142|AnJ5lxtLjioClr2khl9pb8NLxG2,|9|19-407-425-2584|2209.81|AUTOMOBILE|. even, express theodolites upo 143|Customer#000000143|681r22uL452zqk 8By7I9o9enQfx0|16|26-314-406-7725|2186.50|MACHINERY|across the blithely unusual requests haggle theodo 144|Customer#000000144|VxYZ3ebhgbltnetaGjNC8qCccjYU05 fePLOno8y|1|11-717-379-4478|6417.31|MACHINERY|ges. slyly regular accounts are slyly. bold, idle reque 145|Customer#000000145|kQjHmt2kcec cy3hfMh969u|13|23-562-444-8454|9748.93|HOUSEHOLD|ests? express, express instructions use. blithely fina 146|Customer#000000146|GdxkdXG9u7iyI1,,y5tq4ZyrcEy|3|13-835-723-3223|3328.68|FURNITURE|ffily regular dinos are slyly unusual requests. slyly specia 147|Customer#000000147|6VvIwbVdmcsMzuu,C84GtBWPaipGfi7DV|18|28-803-187-4335|8071.40|AUTOMOBILE|ress packages above the blithely regular packages sleep fluffily blithely ironic accounts. 148|Customer#000000148|BhSPlEWGvIJyT9swk vCWE|11|21-562-498-6636|2135.60|HOUSEHOLD|ing to the carefully ironic requests. carefully regular dependencies about the theodolites wake furious 149|Customer#000000149|3byTHCp2mNLPigUrrq|19|29-797-439-6760|8959.65|AUTOMOBILE|al instructions haggle against the slyly bold w 150|Customer#000000150|zeoGShTjCwGPplOWFkLURrh41O0AZ8dwNEEN4 |18|28-328-564-7630|3849.48|MACHINERY|ole blithely among the furiously pending packages. furiously bold ideas wake fluffily ironic idea 151|Customer#000000151|LlyEtNEXT6kkZ,kGP46H|19|29-433-197-6339|5187.02|HOUSEHOLD|regular dugouts: blithely even dolphins cajole furiously carefull 152|Customer#000000152|PDrllSkScKLh4lr19gmUZnK|8|18-585-850-3926|1215.18|BUILDING|ously ironic accounts. furiously even accounts accord 153|Customer#000000153|kDzx11sIjjWJm1|6|16-342-316-2815|5454.26|HOUSEHOLD|promise carefully. unusual deposits x-ray. carefully regular tithes u 154|Customer#000000154|2LAlU fDHkOqbXjHHDqw1mJQNC|19|29-522-835-6914|4695.12|FURNITURE|nic packages haggle blithely across the 155|Customer#000000155|l,sSphiStMgdrxpxi|0|10-566-282-8705|5902.85|AUTOMOBILE| sleep ironic, bold requests. regular packages on the quiet dependencies 156|Customer#000000156|5OS0edX2Y6B1cf9wJNuOQWgrrZccXk9|9|19-723-913-3943|9302.95|AUTOMOBILE| regular foxes above the theodolites haggle 157|Customer#000000157|HGEouzCcFrNd nBAdsCRjsMxKOvYZdbwA7he5w9v|15|25-207-442-1556|9768.73|BUILDING| pinto beans against the carefully bold requests wake quickly alongside of the final accounts. accounts 158|Customer#000000158|2HaYxi0J1620aoI1CdFyrW,rWOy|10|20-383-680-1329|6160.95|AUTOMOBILE|ecoys. fluffily quick requests use flu 159|Customer#000000159|KotsdDO6EHnysVu922s6pjZpG,vlT|10|20-888-668-2668|2060.06|HOUSEHOLD|cingly express somas haggle above the theodolites. pinto beans use special theodolites. theodolites sleep 160|Customer#000000160|5soVQ3dOCRBWBS|13|23-428-666-4806|4363.17|MACHINERY|olites. silently ironic accounts cajole furious 161|Customer#000000161|2oRkx,NtjFUh|7|17-805-718-2449|3714.06|MACHINERY|ptotes nag carefully instructions. silent accounts are. furiously even accounts alongside 162|Customer#000000162|JE398sXZt2QuKXfJd7poNpyQFLFtth|8|18-131-101-2267|6268.99|MACHINERY|accounts along the doggedly special asymptotes boost blithely during the quickly regular theodolites. slyly 163|Customer#000000163|OgrGcOnm4whd0f|21|31-863-349-4121|2948.61|FURNITURE| nag furiously furiously final requests. slyly s 164|Customer#000000164|YDW51PBWLXLnbQlKC|4|14-565-638-9768|208.45|HOUSEHOLD|ironic, special pinto beans. ironic 165|Customer#000000165|8pc6kwBmwBdEnfVP53aqL9DM4LymC4|0|10-927-209-5601|3349.92|HOUSEHOLD| requests. final ideas cajole quickly at the special, ironic acco 166|Customer#000000166|15HWGtwoP77EJfd95HxtMSTZUelV8NOKne2|10|20-320-530-5920|2042.21|FURNITURE|the packages. blithely final packages are furiously unusual asymptotes. regular frets promise carefully u 167|Customer#000000167|QNc2eOlRIzL6jpthwgDuB866uCIUPiOX|5|15-288-395-5501|1468.09|AUTOMOBILE|espite the ironic excuses. furiously final deposits wake slyly. slyly ex 168|Customer#000000168|GDcL5qU86P8,oaTwVBCLE6asM8rlxpE,211uziU|12|22-354-984-5361|-808.56|FURNITURE|blithely final accounts sleep quickly along the regular ideas. furiously sly foxes nag across the 169|Customer#000000169|NjhmHa7xrcjE|18|28-362-499-3728|4483.83|FURNITURE|fully unusual pinto beans. blithely express asymptotes lose carefully regular instructions? accounts b 170|Customer#000000170|5QmxmYubNhn6HAgLwTvphevM3OmpZTGsM|15|25-879-984-9818|7687.89|BUILDING| regular requests. carefully regu 171|Customer#000000171|RIhjJCrth89EU7xRSvN|7|17-513-603-7451|2379.91|MACHINERY|ly furiously final requests. slyly final requests wake silently pending, silent accounts. exp 172|Customer#000000172|KwgdKUL1G2WacsMNF50yX|22|32-178-964-1847|1134.40|MACHINERY|losely regular, unusual instructions. 173|Customer#000000173|Aue7KVz,FinSHpov Vk5ed,wSQ2BRSioJ0|9|19-443-196-8008|845.84|BUILDING|s pinto beans use thinly slyly regular packages. instructions print along the s 174|Customer#000000174|R5 fCPMSeDXtUpp5Ax|23|33-845-455-8799|1944.73|FURNITURE|oldly even requests haggle quickly blithely ironic accounts. idly final foxes doze slyly pending dep 175|Customer#000000175|8YK1ZyTqoY3wMWnExl4itPMLL793GpEZb6T|10|20-427-617-9922|1975.35|FURNITURE|ly final platelets are final pinto b 176|Customer#000000176|9hBepY2uz88HlCqToOLgeU770u81FeL|13|23-432-942-8830|-375.76|FURNITURE|uriously. final requests sleep ironic packages. quickly 177|Customer#000000177|6wzEKPyZE9dmBCJZ8e7x7fiiK,k|1|11-917-786-9955|7457.50|BUILDING|nal dolphins: blithely bold gifts wake slyly afte 178|Customer#000000178|p HUSDg8Cgan4Fj8Drvcdz4gi4dSqV0a7n 0ag|21|31-436-268-6327|2272.50|FURNITURE|unts. blithely regular dependencies kindle pending deposits. quietly express deposits wake above the Tiresias-- ex 179|Customer#000000179|djez3CWg0nnCiu60jsF|4|14-703-953-2987|-43.08|MACHINERY|st furiously. idly regular instructions wake fluffily slyl 180|Customer#000000180|DSGW3RFoYJE opVw,Y3wGCGcNULZi|13|23-678-802-2105|-92.58|FURNITURE|lar accounts sublate above the slyly final 181|Customer#000000181|YNviWd WrRkZvSw1OxIewBq|9|19-653-305-8440|3929.96|FURNITURE|final requests cajole furiously acro 182|Customer#000000182|tdwvgepG316CCTHtMaF8Q|3|13-199-211-9023|4810.22|AUTOMOBILE|quickly against the blithely even deposits; epitaphs unwind quickly along the carefully regular excuses. furio 183|Customer#000000183|aMAB2QSb8 86MAx|22|32-771-279-8154|4419.89|HOUSEHOLD|sual accounts across the slyl 184|Customer#000000184|uoOpBuRr42f1WIqnVYAhxbAA9bkK6HUGpOt|21|31-739-340-5476|170.46|AUTOMOBILE|hely according to the furiously unusual accounts. furiously bold platele 185|Customer#000000185|iHXzQgienOQ|5|15-760-572-8760|2788.76|BUILDING|t the ironic accounts. fluffily regular requests wake slyly ironic pinto beans. slyly unusu 186|Customer#000000186|BeVr6MzaobBENXRBC8pmOmkByMJI|3|13-518-743-2576|8737.50|HOUSEHOLD|e slyly final dependencies. unusual instructions against the carefully pending instructions boost quickly 187|Customer#000000187|OIlgR6oIRXV5g63q5YGudCjRD8kpod2p|4|14-716-294-6674|-774.22|FURNITURE|r deposits. carefully silent packages after the fluffily even instructio 188|Customer#000000188|58Srs6gEEoD3ZfwgXDM1OayRiaSY6K9YsveWwV|5|15-613-528-7811|9533.37|BUILDING|st slyly special platelets. bold, 189|Customer#000000189|r51HSq Rg8wQgF1CBfG1Vbye3GK|22|32-980-348-1114|-594.05|MACHINERY|sly express patterns. ideas on the regular d 190|Customer#000000190|F2X,GhSqLz8k u0gWsirsraFaEDEo6vIGtOTaO1T|11|21-730-373-8193|1657.46|AUTOMOBILE|uickly-- fluffily pending instructions boo 191|Customer#000000191|P1eCXsPWkv2y6ENQv|16|26-811-707-6869|2945.16|BUILDING|o beans hinder slyly bold accounts. 192|Customer#000000192|rDmB2c9d1BJQ y6R9jTx86YI77D|10|20-750-712-2481|8239.96|MACHINERY|ely unusual packages are fluffily 193|Customer#000000193|dUT4dtsPTZ6ZpkWLc,KGJCHY6JDJgPFH4|23|33-182-978-6287|8024.55|MACHINERY|y even theodolites. final foxes print along the final pinto beans. theodoli 194|Customer#000000194|mksKhdWuQ1pjbc4yffHp8rRmLOMcJ|16|26-597-636-3003|6696.49|HOUSEHOLD|quickly across the fluffily dogged requests. regular platelets around the ironic, even requests cajole quickl 195|Customer#000000195|WiqQD8hscyKekjMcSBA7AX 0AbxvBV|22|32-757-684-6845|4873.91|AUTOMOBILE| should detect blithely. quickly even packages above the deposits wak 196|Customer#000000196|68RstNo6a2B|18|28-135-177-2472|7760.33|FURNITURE|accounts wake. express instructions according to the s 197|Customer#000000197|UeVqssepNuXmtZ38D|1|11-107-312-6585|9860.22|AUTOMOBILE|ickly final accounts cajole. furiously re 198|Customer#000000198|,7fcZHIUn,fUaQtK8U,Q8|1|11-237-758-6141|3824.76|AUTOMOBILE|tions. slyly ironic waters wa 199|Customer#000000199|lBU3xll,a7e9TYm3 UyjDPCVMvnHKpq,9HW1X|4|14-136-924-5232|7654.31|FURNITURE|fully busy pinto beans. packages cajole around the express, bold packages! quickly ironic tithes 200|Customer#000000200|x1 H5c66DUgH2pgNTJhw6eZKgrAz|16|26-472-302-4189|9967.60|BUILDING|e after the ironic, even realms. fluffily regular packages doze-- courts haggle carefully! blithely 201|Customer#000000201|yWLtmd5usyjsCvyL1QJsBorC|2|12-759-183-9859|4614.40|MACHINERY| blithely even packages sleep carefully bold, unus 202|Customer#000000202|Q0uJ1frCbi9yvu|7|17-905-805-4635|2237.64|AUTOMOBILE|fully along the carefully pending Tiresias; special packages along the carefully special deposits try to 203|Customer#000000203|2fRlubh lWRinCs1nimADdn|1|11-886-563-6149|7960.63|MACHINERY| packages are. requests integrate regularly across th 204|Customer#000000204|7U7u2KryFP|6|16-761-837-4820|-627.76|BUILDING|ages. accounts wake slyly. dolphins nag blithely. final, regular requests haggle blithely furiously even 205|Customer#000000205|jOTQBGb nhfBMu3,LIN62WogLDBO0w|12|22-356-437-1311|7161.52|BUILDING| furiously pending accounts. ideas along the slyly final deposits cajole blithel 206|Customer#000000206|xsg,ehRHS5OKqyBR5YtoPm8myz|9|19-976-832-3312|-274.79|AUTOMOBILE| the carefully regular foxes. regular accounts wake furiously braids. bold ideas are carefu 207|Customer#000000207|ewz5JNnxJPmPGY|21|31-562-675-6475|-439.98|AUTOMOBILE|n theodolites against the evenly even requests boost carefully pinto beans! fi 208|Customer#000000208|Abye1MwcNfY0KO6yqv,Wwe|19|29-859-139-6234|6239.89|MACHINERY|le carefully according to the quickly silent packages. quickly ironic packages affix according to the ruthles 209|Customer#000000209|iBvmxOZV3qXMYQW3W4Oo7YFhdV|16|26-207-121-7721|8873.46|FURNITURE|deposits. furiously regular ideas across the quietly regular accounts cajole about the express packages. quickly reg 210|Customer#000000210|13cFL9sG1nrGERURN9WZI0|20|30-876-248-9750|7250.14|HOUSEHOLD|nusual instructions sleep regular acc 211|Customer#000000211|URhlVPzz4FqXem|13|23-965-335-9471|4198.72|BUILDING|furiously regular foxes boost fluffily special ideas. carefully regular dependencies are. slyly ironic 212|Customer#000000212|19U0iZ3GtDdrsn|7|17-382-405-4333|957.58|BUILDING|symptotes are blithely special pinto beans. blithely ironic 213|Customer#000000213|NpqMYBhBcWk8mnEta|24|34-768-700-9764|9987.71|HOUSEHOLD|al deposits. final instructions boost carefully. even deposits sleep quickly. furiously regul 214|Customer#000000214|MpCwhcLrbcIM7AeKS9tRM09by|8|18-180-678-6165|1526.59|MACHINERY|grow. fluffily regular pinto beans according to the regular accounts affix quickly pe 215|Customer#000000215|8H76xbBhde HY70BrYqGEFmVPXqlm8pgjjxh|9|19-564-446-4758|3379.20|FURNITURE|al pinto beans. ironic foxes serve. i 216|Customer#000000216|LXH7wSv4I6GG6TAkLOyLcMh559a8Y|21|31-296-111-5448|-776.08|FURNITURE|hely at the pending warhorses; blithe 217|Customer#000000217|YIy05RMdthrXqdfnNKud|23|33-159-298-3849|378.33|AUTOMOBILE|ven frays wake according to the carefully 218|Customer#000000218| V1FCIeSseuyNGYfHS Rx0,sc4IsBfReV|4|14-480-931-8567|9541.19|MACHINERY|lar courts. furiously pending dependencies cajole blithely? fluffily regular deposits cajol 219|Customer#000000219|eTjiL01eyoKiAe2WQoz3EpPg2lvSLeOu2X2wyxK|11|21-159-138-6090|9858.57|AUTOMOBILE|ckly multipliers. carefully eve 220|Customer#000000220|TbUHVhkttz|16|26-201-301-7371|9131.64|BUILDING| even, even accounts are. ironic 221|Customer#000000221|ripNyyPOewg8AahnZlsM|16|26-351-738-1001|1609.39|BUILDING| instructions above the regular requests cajole packages. pending, even 222|Customer#000000222|gAPkFjwxX1Zq 2Yq6 FIfLdJ4yUOt4Al7DL18Ou|1|11-722-672-5418|8893.76|BUILDING|regular accounts haggle furiously around the c 223|Customer#000000223|ftau6Pk,brboMyEl,,kFm|20|30-193-643-1517|7476.20|BUILDING|al, regular requests run furiously blithely silent packages. blithely ironic accounts across the furious 224|Customer#000000224|4tCJvf30WagGfacqcAqmfCptu2cbMVcj2M7Y0W|15|25-224-867-3668|8465.15|BUILDING|counts. bold packages doubt according to the furiously pending packages. bold, regular pinto beans 225|Customer#000000225|2HFk1E0fmqs|13|23-979-183-7021|8893.20|AUTOMOBILE|ages boost among the special foxes. quiet, final foxes lose carefully about the furiously unusual th 226|Customer#000000226|ToEmqB90fM TkLqyEgX8MJ8T8NkK|3|13-452-318-7709|9008.61|AUTOMOBILE|ic packages. ideas cajole furiously slyly special theodolites: carefully express pinto beans acco 227|Customer#000000227|7wlpEBswtXBPNODASgCUt8OZQ|13|23-951-816-2439|1808.23|MACHINERY|lar, ironic pinto beans use! quickly regular theodolites maintain slyly pending pac 228|Customer#000000228|A1Zvuxjdpt8TZP6i41H3fn9csGqOJUm5x0NIS1LA|20|30-435-915-1603|6868.12|FURNITURE| blithely ironic theodolites 229|Customer#000000229|Sbvjxgmwy4u6Ks1FH7lxo7toMmeU5dG|1|11-243-298-4029|7568.07|BUILDING|bold accounts haggle furiously even deposits. regular instruct 230|Customer#000000230|CnR8xt3MYqID0tiHwYh|21|31-744-950-8047|1682.83|MACHINERY|c decoys impress even deposits. thinly final asymptotes 231|Customer#000000231|WFOhG9Z9ohRdsyuYnPvBSv|10|20-825-880-1065|283.55|BUILDING|ly final deposits. fluffily ironic requests wake carefully carefully regular accounts. quickly sp 232|Customer#000000232|oA9o,3YOXu2rzKONdd,cxpqCFXUv5kuxBYKp|22|32-283-563-2674|554.71|HOUSEHOLD|ges sleep. final, bold theodolites are quickly final packages. furiously ironic packages are slyly fi 233|Customer#000000233|mFm45wZ7rV4VIbEE1F4|3|13-574-104-3221|3998.24|FURNITURE|st the special instructions. theodolites detect blithely according 234|Customer#000000234|ILyuJbixVmrNEVxsfQOMFxByySs|18|28-243-424-1393|8383.51|AUTOMOBILE| fluffily regular ideas play s 235|Customer#000000235|bp0rIBMh4fMdQnHBmMnB|3|13-350-790-6416|754.41|AUTOMOBILE|hely ruthless instructions again 236|Customer#000000236|kcW,mM0rhIstAcVaol1,6DVkS FPKlhY|14|24-808-967-4503|5384.59|AUTOMOBILE|te slyly along the requests. carefully final requests sleep slyly blithe frets. furiously ruthless dep 237|Customer#000000237|R dtznB5ocPPo|19|29-414-970-5238|-160.02|HOUSEHOLD|regular pinto beans sleep furiously ironically silent theodolites. quickly ironic courts after the deposits sleep f 238|Customer#000000238|tE0lVKK3tz5AG2 Hal2XHwE485g5MX7|16|26-307-925-1236|3482.32|HOUSEHOLD|uffily ironic theodolites are. regular, regular ideas cajole according to the blithely pending epitaphs. slyly 239|Customer#000000239|w8eRmMOmUTjVOkucbfcGDh2AqCixTOC|9|19-699-117-6988|5398.77|FURNITURE|uctions. furiously even dolphins haggle fluffily according to the furiously regular dep 240|Customer#000000240|SXfeEOwRZsXArtY3C5UWqXgLcJBAMmaynaTJs8|9|19-756-548-7835|7139.68|MACHINERY|al accounts about the slyly pending p 241|Customer#000000241|FBuwHkPR450PvnZnAezcaeMaS,hX3Ifdk|9|19-344-614-2207|6569.34|AUTOMOBILE| across the enticingly even requests. blithely iro 242|Customer#000000242|apgzK3HWAjKHFteJ16Tg3OERViesqBbx|3|13-324-350-3564|1975.41|MACHINERY|riously ironic pinto beans cajole silently. regular foxes wake slyly. bravely 243|Customer#000000243|te2FOn8xJzJinZc|7|17-297-684-7972|620.73|AUTOMOBILE|nic deposits. evenly pending deposits boost fluffily careful 244|Customer#000000244|FBVbCpEVaFaP8KogqQO2VuXeVx|15|25-621-225-8173|2506.38|HOUSEHOLD|encies. requests nag carefully. regularly final accounts h 245|Customer#000000245|IseFIO7jTGPTzAdZPoO2X4VX48Hy|12|22-952-232-2729|3720.15|MACHINERY|s. regular foxes against the s 246|Customer#000000246|WrRUR0ds6iypmopww9y9t0NJBRbke78qJm|15|25-608-618-2590|9584.96|AUTOMOBILE| requests shall have to integrate furiously pending courts. sil 247|Customer#000000247|N8q4W4QQG2mHY47Dg6|20|30-151-905-3513|8495.92|HOUSEHOLD|es affix furiously regular deposits. blithely ironic asymptotes after the blithely e 248|Customer#000000248|mgT15r8asLyaED|10|20-447-727-8914|8908.35|FURNITURE|s detect blithely. blithely pending dolphins along the fluffily final accounts haggle fu 249|Customer#000000249|0XE2fhX7j2uivaHBrFuRl1NoJnaTSIQT|3|13-226-455-7727|-234.01|MACHINERY|its are after the special deposits. ironic, final deposits against the slyl 250|Customer#000000250|9hif3yif6z8w8pW88F755PU7uz|16|26-464-852-1461|2869.97|FURNITURE|s. slyly unusual instructions cajole quickly carefully bold dep 251|Customer#000000251|Z9fdQmv07C3k hxwt9nchhuQiqC4wox85se8EW7L|13|23-975-623-5949|9585.32|HOUSEHOLD|fully blithely regular requests. fluffily even dugouts detect furiously final ideas. sometimes ironic depos 252|Customer#000000252|db1bPFF xUkJYzvE3cBtqYeDn2 u|16|26-330-347-9201|3561.74|FURNITURE|ngside of the pending foxes. furiously ironic requests wake. blithely ironic acco 253|Customer#000000253|naGyIRPFPH E|15|25-461-140-9884|9139.52|AUTOMOBILE| regular deposits sleep against the accounts. foxes cajole carefully special 254|Customer#000000254|vQ,pEzMQaFgJzK4TJ2eA|1|11-451-622-6325|1915.35|MACHINERY|equests. carefully ironic deposits detect carefully abo 255|Customer#000000255|I8Wz9sJBZTnEFG08lhcbfTZq3S|3|13-924-679-8287|3196.07|BUILDING|ges mold. bold, regular courts boost furiously at the 256|Customer#000000256|eJ6AggYh80JMEzZNwYK4CIC2flT|10|20-229-271-4429|1299.92|HOUSEHOLD|ld boost about the carefully ironic foxes. slyly special packages cajole alongside of the slyly final accounts. q 257|Customer#000000257|LyIa26EXYaSU|7|17-816-687-2155|-339.85|AUTOMOBILE|s cajole quickly along the ironic pinto beans: even, regular foxes are 258|Customer#000000258|7VbADek8qYezQYotxNUmnNI|12|22-278-425-9944|6022.27|MACHINERY|about the regular, bold accounts; pending packages use furiously stealthy warhorses. bold accounts sleep fur 259|Customer#000000259|kwh9i86Wj1Zq14nwTdhxapIkLEI|5|15-907-674-2046|3335.29|HOUSEHOLD|furiously unusual instructions. s 260|Customer#000000260|CrHY2zx4vner4|1|11-708-529-9446|9196.11|MACHINERY|carefully. furiously bold accounts nag furiously carefully regular accounts-- final decoys prin 261|Customer#000000261|dXkVi8qahjP|12|22-494-898-7855|7094.22|AUTOMOBILE|he special instructions integrate carefully final request 262|Customer#000000262|DcUOAFBxMu8oGKvIqbDx7xgeZ|4|14-698-169-5201|1561.80|AUTOMOBILE|ress packages above the ironic accounts are against the ironic pinto beans. carefully final accoun 263|Customer#000000263|Y2pxeGWkTyaq,0RCzIbZ3|1|11-276-906-3193|1162.03|FURNITURE|usly ironic theodolites cajole furiously. final ep 264|Customer#000000264|24Akixb4hqpRD|11|21-881-683-3829|3195.83|MACHINERY|ular packages cajole blithely a 265|Customer#000000265|sthiqpj6CPAKbD7BBSz9ulRuF9d,ebfaiTc|17|27-716-734-2046|8275.80|MACHINERY|lar, ironic platelets. furiously unu 266|Customer#000000266|VSIEruiMdDvjDaTQxkuK60Yw3AGxO|0|10-474-243-3974|5481.00|HOUSEHOLD|ccounts. quickly ironic excuses after the regular foxes wake along the ironic, fina 267|Customer#000000267|el7 bYzj1USp6T5i3KpfZ43jKegbdO,Jd69|15|25-402-954-8909|3166.94|AUTOMOBILE| detect slyly alongside of the foxes. closely regular pinto beans nag quickly of the blithely bold r 268|Customer#000000268|tkSLQoOpfOa601itad05EcN0UmhjZXdyKRc0r|3|13-720-469-5207|6821.01|MACHINERY|press ideas print quickly. fluffily unusual deposits use blithely eve 269|Customer#000000269|J7kLF9iPOQA 7CVwAmQRpwfZPDJ2q5Seu2Vj1gh|14|24-570-874-6232|7667.35|MACHINERY| close packages-- quickly regular instructions sleep. carefully 270|Customer#000000270|,rdHVwNKXKAgREU|7|17-241-806-3530|9192.50|AUTOMOBILE|ldly final instructions mold carefully along the ironic accounts. 271|Customer#000000271|O,HceV3 XE77Yx|6|16-621-282-5689|1490.35|MACHINERY|ly pending deposits cajole slyly sl 272|Customer#000000272| YDjKpjXEe0A6rDE|2|12-324-877-9650|-746.03|MACHINERY|he regular requests. slyly special 273|Customer#000000273|sOA,alhAw1juArjRLOd|2|12-197-772-5736|-675.05|FURNITURE|ng frets sleep. slyly express dolphins doubt ironically ironic accounts. final de 274|Customer#000000274|adesXwNumnPqsKgsE1groEAwdKNgZ|19|29-330-389-1442|4425.42|FURNITURE|gular dependencies. ironic foxes haggle du 275|Customer#000000275|M1UCTKrZLOgSyr|22|32-194-864-6861|5067.31|AUTOMOBILE|y regular deposits. fluffily ironic packages cajole along the 276|Customer#000000276|iSWxETEMKe5cF|16|26-716-357-3851|2292.67|AUTOMOBILE|eans. even, ironic accounts affix sl 277|Customer#000000277|BWGsQevHk0BfRJV3RRB ElFc|23|33-696-831-5394|8876.10|BUILDING|phins; bold, final accounts print. carefully silent 278|Customer#000000278|4jqLjG 2aeYMFEJi|20|30-445-570-5841|7621.56|BUILDING| pending, express requests cajole carefully special packages. blithely pending accounts affix furiously. fluffily 279|Customer#000000279|9t2Wo1jK1TYnDGg6ODSMGf1W9hRT3F3VK5zxJOC|9|19-220-605-9025|9663.23|AUTOMOBILE|l platelets sleep fluffily against the fluffily enticing excuses. blithely special requests wake somet 280|Customer#000000280|3fDiGmN64En0ei|11|21-537-461-3965|3952.84|BUILDING|accounts. quiet deposits sleep. slyly even instructions detect about the blithely bold instru 281|Customer#000000281|x5gJ8IGm2Fo9Jigt|6|16-809-382-6446|4361.70|BUILDING|fully quiet ideas detect quickly even packages. regular instructions accor 282|Customer#000000282|wcCc, y1996DnOwnXu1i|18|28-251-599-2415|1125.45|HOUSEHOLD|ole daringly against the carefully ir 283|Customer#000000283|jysHDuTKUKYEjdBhtI LqTttTp 7i2kvx1 O3A|7|17-111-303-1282|4450.03|AUTOMOBILE|y alongside of the accounts. slyly express dependencies wake quickly above the carefully ironic package 284|Customer#000000284|2ZgAkaBgb6aigORfIfUd3kHbPi42|6|16-161-235-2690|593.52|AUTOMOBILE|lar gifts. carefully even deposits boost! furiously even braids use afte 285|Customer#000000285|ApUL7bgFMUXGXewpoQyQOSnLeL9Vc1rrkW |20|30-235-130-1313|7276.72|FURNITURE|dolphins after the slyly ironic packages boost furiously among the furiously pending theodolites. bl 286|Customer#000000286|7 7uVDrpkWuozyEd|22|32-274-308-4633|-109.73|HOUSEHOLD|ly special accounts haggle slyly slyly fluffy req 287|Customer#000000287|KTsaTAJRC0eMYkyFm7EK3eeamHs7s|4|14-330-840-6321|1734.18|MACHINERY|requests. bold, silent depths lose f 288|Customer#000000288|eEs5rwc9AOJaKhvV|2|12-674-136-5397|5339.43|HOUSEHOLD| furiously about the carefully ironic packages. express reques 289|Customer#000000289|NUilehg0nVOkK3K1SW0,BAHCeST2JqKzuTMoGS|10|20-456-773-7693|-215.75|AUTOMOBILE|ending foxes across the carefully 290|Customer#000000290|8OlPT9G 8UqVXmVZNbmxVTPO8|4|14-458-625-5633|1811.35|MACHINERY|sts. blithely pending requests sleep fluffily on the regular excuses. carefully expre 291|Customer#000000291|ZlLNbGxnQYMubQ9K|8|18-657-656-2318|4261.68|HOUSEHOLD|e slyly silent deposits. bold deposits haggle slyly special packages. furiously bold requests cajole carefully abo 292|Customer#000000292|hCXh3vxC4uje9|11|21-457-910-2923|2975.43|HOUSEHOLD|usly regular, ironic accounts. blithely regular platelets are carefully. blithely unusual ideas affi 293|Customer#000000293|7ynwX7lZ3o2cmAWSkKAc3edKa 8yT|2|12-887-984-5485|-43.79|MACHINERY|ironic foxes are final packages. requests about the furiousl 294|Customer#000000294|hSaNqI1P2IyEFHY0r0PsPkMqt|18|28-187-946-4260|-994.79|BUILDING|bold packages. regular, final asymptotes use quickly fluffily even waters. blithely express requests wake into th 295|Customer#000000295|mk649IH6njR14woTVZ1cxtlNs URxBHD5o5z2|0|10-340-773-4322|9497.89|HOUSEHOLD|play according to the quickly ironic instructions-- unusual, bol 296|Customer#000000296|4eyqk2zpg4m V JGEtgwNmCq3c|15|25-875-178-1959|8081.52|BUILDING|es need to affix furiously. ironic, final foxes are against the regular instructions: pinto beans haggle q 297|Customer#000000297|hzg,409pj0|15|25-915-518-8800|7096.32|HOUSEHOLD|de of the regular asymptotes detect slyly ironic theod 298|Customer#000000298|jFKF3w 8aegECg7mP,qtuR9IsTSYQlEXq|21|31-542-157-4074|3812.84|BUILDING|sleep slyly. stealthy, bold pinto beans sleep blit 299|Customer#000000299|3F3Q0fTkjIv1UfJbcN7|4|14-948-474-7353|5380.50|HOUSEHOLD|tes sleep fluffily. furiously regular requests boost fluffily evenly even asympt 300|Customer#000000300|I0fJfo60DRqQ|7|17-165-193-5964|8084.92|AUTOMOBILE|p fluffily among the slyly express grouches. furiously express instruct 301|Customer#000000301|FtFq9Cgg5UAzUL|7|17-265-345-9265|9305.05|HOUSEHOLD|ular, regular notornis sleep along the furiously pending foxes 302|Customer#000000302|cJ3cHoAjAiaxTU2t87EJM|4|14-152-594-2967|1107.42|MACHINERY|dolphins haggle fluffily across the final requests. regularly unusual sentiments detect fluffily requests. regular 303|Customer#000000303|5pSw0OIoNRcpyTEEI1gZ6zRMyJ0UGhJdD|3|13-184-254-6407|9339.57|AUTOMOBILE|mise ironically against the unusual foxes. deposits cajole asymptotes. ironic ideas shall have to sleep 304|Customer#000000304|Cilvb3k8ghDX4|0|10-321-698-7663|9217.55|MACHINERY|s integrate at the carefully ironic instructions. fin 305|Customer#000000305|x8kcl,R4Wk|11|21-250-654-3339|4356.59|FURNITURE|nts. even, regular courts nag. dugouts use blithely a 306|Customer#000000306|ADoOEIr5aQcLIoGJM6nCvPEP 91|10|20-109-305-9629|3268.01|AUTOMOBILE|ill have to are. final, express deposits hag 307|Customer#000000307|xvkJ13gs7GH|13|23-836-934-5394|346.59|FURNITURE| ironic platelets nag against the bold pinto 308|Customer#000000308|c9WuNBiEYmGxeBmZaELg WWb|9|19-992-128-2013|4150.76|HOUSEHOLD|ilent accounts haggle carefully unusual dolphins. carefully regular requests wake along the 309|Customer#000000309|6Jg4ECVS2u7i,E|21|31-231-377-9535|8824.78|FURNITURE|lyly. furiously enticing instructions haggle. carefull 310|Customer#000000310|QZnc5mkLIPh6JGrzcHmRzCiL0AmdE92vyM|1|11-838-647-9285|3186.57|FURNITURE|mise fluffily blithely ironic courts 311|Customer#000000311|dvpNARle3mR19GD4s2gpEbkL2mZV3uvV6P|23|33-919-292-8822|6589.50|AUTOMOBILE|essly even escapades. blithely regular Tiresias cajole blithely furiously close packages. furiously ironic pi 312|Customer#000000312|cH6XucXV0V|6|16-316-482-2555|-178.84|AUTOMOBILE|e slyly. furiously regular pinto beans wake slyly according to the fluffily even excuses. ca 313|Customer#000000313|Ay52vCrTXsSmp7TmQ1kujvuItfLGx|0|10-401-786-6040|6115.81|HOUSEHOLD|g to the even dependencies. accoun 314|Customer#000000314|8,tdTVYGYoYRaAKwG 6aDJna4Cfjt,F9DDCC2|13|23-366-243-4713|2394.92|MACHINERY|ets alongside of the slyly pending pinto bean 315|Customer#000000315|pXaKKTCTyc UI3tglBaWRimosymG6ZyOCyb6Vb3M|7|17-442-286-3594|348.58|FURNITURE|s. slyly regular sentiments are carefully. slyly ironic asymptot 316|Customer#000000316|zE dN3aqjaG|8|18-171-394-5011|4571.78|MACHINERY|egular ideas cajole around the ironic, pending deposits. furiously pending dolphins serve blithely regular 317|Customer#000000317|uOeuL8DG1j|19|29-615-537-8871|956.88|HOUSEHOLD|ages. hockey players are. dependencie 318|Customer#000000318|PtJQn0IjYtShb1f2uYTYBnnmUeGNiwcALU|0|10-229-548-7118|9149.98|HOUSEHOLD|nding requests. special, bold instruction 319|Customer#000000319| UQ5mF3sdoZT2|6|16-734-928-1642|1834.36|FURNITURE| packages use slyly always ironic deposits. unusual, even notornis above 320|Customer#000000320|pO8rWSwK j|12|22-358-857-3698|6082.74|MACHINERY|ing requests. furiously regular accounts hinder slyly. final, regular theodolites against the slyly quiet requests 321|Customer#000000321|g3,8g XHACSvjZtJuiNk5BYiyPFnIxg|20|30-114-675-9153|7718.77|FURNITURE|special requests! express dugouts can affix furiously blithely regular platelets. fu 322|Customer#000000322|bWRyCyjH5OfGX|20|30-660-202-7517|4489.98|HOUSEHOLD|usual sauternes are among the slyly even instructions! thinly regular 323|Customer#000000323|ZLnVZ CXRi2,QDrlo|18|28-347-223-6024|1137.67|AUTOMOBILE|ely special foxes. express, final excuses across the packages are quickly amon 324|Customer#000000324|fiW1n6dLSVRkXj7kU1768UI2w1vMxEde5a |2|12-722-560-7023|806.59|FURNITURE|, regular requests kindle slyly furio 325|Customer#000000325|Z I43vl3ta3iYmjXNaSM d6Pe24ibjhdvPSi|15|25-823-702-9630|2377.34|HOUSEHOLD|nal foxes alongside of the always bold 326|Customer#000000326|nWDOTh6X019pfBtV3ikACYZiMjGykLrFnuyAo2|2|12-447-614-7494|1906.52|HOUSEHOLD|ckey players. carefully ironic a 327|Customer#000000327|UyKulwfNnX4l4ba1vQtwCWw8WNP50U8DCU|8|18-606-718-3062|8762.16|MACHINERY| unusual braids. daringly final ideas are quickly c 328|Customer#000000328|9pu j2HoEf1uhiY3jxE9l9fCRfjoVU|5|15-817-180-1487|6709.90|BUILDING|y about the daring accounts. furiously thin escapades integrate furiously against the furiously ironi 329|Customer#000000329|67r6XnIxUVgAc3pRX8tmGOw|11|21-106-357-8302|-651.91|BUILDING|ans. fluffily unusual instructions haggle about the slyly ironic platelets. never regular pinto beans sleep fl 330|Customer#000000330|UfNb7T9CTCnsfN3b|20|30-476-852-2371|8244.73|MACHINERY|en pinto beans. quickly final excuses haggle furiously. slyly pendin 331|Customer#000000331|Ug e2IBbl,LJuqjNz5XeQV|5|15-411-430-7917|170.27|AUTOMOBILE|r the silent ideas. carefully ironic deposits was carefully above the furiously even excuses. evenly regu 332|Customer#000000332|Jfosq,G6ziag7M04IvCx7SMRafyYvSI,Do|22|32-767-972-2596|-267.09|HOUSEHOLD| around the pinto beans. final theodolites haggle 333|Customer#000000333|heiloGYs Yey7NKhEFoiNhUBb,QFbjtn5wt|11|21-908-534-7709|8018.89|AUTOMOBILE|uriously close theodolites! slyly express foxes cajole-- final pinto beans boost blithely along the ironic 334|Customer#000000334|OPN1N7t4aQ23TnCpc|4|14-947-291-5002|-405.91|BUILDING|fully busily special ideas. carefully final excuses lose slyly carefully express accounts. even, ironic platelets ar 335|Customer#000000335|d2JCYLr2F9tC1AZMIvbIYPDQA|21|31-772-165-3138|6837.46|HOUSEHOLD| requests haggle carefully about the quickly special escapades. regular a 336|Customer#000000336|yC zy1i6AGrnykrV McJyjg|2|12-345-190-9898|9241.49|AUTOMOBILE|es. dependencies lose carefully blithely regular deposits. t 337|Customer#000000337|EluRTlO4pE7u0XSKKyvKvVyt4sADWFRLZuiyn|0|10-337-165-1106|-270.59|MACHINERY|ld requests sleep quickly. carefully express tithes wake carefully ac 338|Customer#000000338| aiYAeWgI0okGSJv7OgvKqMvPLhxF3blT8josX|23|33-302-620-7535|4092.49|FURNITURE|ckages nag blithely regular requests: carefully final packages between the slyly regular instructions sleep 339|Customer#000000339|jUs1Im28boIduGhp5vbKK50gM5ov7xH9G|24|34-992-529-2023|8438.07|HOUSEHOLD|ix. ironic, special tithes detect dog 340|Customer#000000340|WRnPrKQmAmoMQgHQERoVOhyTklcHMajJlc|2|12-730-681-4571|4667.12|BUILDING|es sleep according to the even, unusual Tiresias. carefully bold packages haggle. furiously pending s 341|Customer#000000341|4,zQfld2YV9TSeNgCSOvqlxhJvVW8WD|9|19-870-813-8585|8247.11|FURNITURE|low, special platelets alongside of the even, bold theodolites are carefully 342|Customer#000000342|SpDDdUfraEAfCULAuGLE|18|28-690-119-9571|7186.74|AUTOMOBILE|luffily final ideas. finally unusual requests boost slyly above the furio 343|Customer#000000343|ejvvSNHIkJVm8I1zpQINNn5yyJbA|3|13-877-910-5134|5521.36|HOUSEHOLD| unusual requests cajole blithely about the carefully express ideas. blithely even excuses above the pint 344|Customer#000000344|Zasc8,E0VVY|2|12-810-788-6699|-544.95|FURNITURE|le according to the regular instruction 345|Customer#000000345|dGFK ICPKxnsAzlX4UYOUf,n200yyEWhIeG|9|19-209-576-4513|1936.77|AUTOMOBILE|en pinto beans nag along the slyly regular deposits. slyly ir 346|Customer#000000346|K61SvIue3Emcwfel,7f9tO5WyJ58MbT7k3iS|2|12-100-890-4659|238.14|FURNITURE|ickly even pinto beans affix across the bravel 347|Customer#000000347|qRT7WRrnykLDfTc5Ei|1|11-519-832-9913|7348.92|BUILDING|ts use blithely blithely regular theodolites. even requests after the 348|Customer#000000348|ciP7BWkhOe1IbbVGlqJePBI6ZwqENkS|13|23-986-141-5327|3310.49|HOUSEHOLD|al foxes are on the carefully final excuses. careful dependen 349|Customer#000000349|vjJBjxjW9uoRZP02nS p6XY5wU6Ic,6xHpxUKA|23|33-818-229-3473|-565.35|BUILDING|y. bold, ironic instructions after the theodolites sleep blithely ironic packages. ideas c 350|Customer#000000350|G vBMGVmIOHl7tc4HeNMiMkKY|15|25-960-809-3690|19.31|BUILDING|tions. quietly unusual accounts sleep blithely afte 351|Customer#000000351|De35Hx1QiyS0uy|7|17-873-420-4342|3419.54|AUTOMOBILE|telets haggle blithely against the ironic 352|Customer#000000352|HqhIE5GRTK0dFtWpJUQENU4aa1bwdsUBEWtzUw|9|19-906-158-8420|6257.88|HOUSEHOLD|ts are. blithely special requests wake. furiously bold packages among the blithely eve 353|Customer#000000353|eftGCmL4b5rAKdvUe9biJXzAH|10|20-733-644-2244|3199.03|BUILDING|nal theodolites nag carefully. requests wake. slyly ironic ideas according to the blithely pe 354|Customer#000000354|sV3WgvJA06WngO4|2|12-545-101-2447|7095.95|BUILDING|. regular, final requests cajole fluffily. express attainments wake slyly until the even acco 355|Customer#000000355|205r3Xg9ZWjPZNX1z|14|24-656-787-6091|8727.90|FURNITURE|ly bold requests detect furiously. unusual instructions sleep aft 356|Customer#000000356|9RfNXUJivKTonL2bp1eG5IT|10|20-415-457-4421|2934.06|FURNITURE|al packages haggle always. daringly bold inst 357|Customer#000000357|l2C0Xkdib4t4 qKFUcRDOhRQMK7U0|18|28-452-965-8560|8747.36|AUTOMOBILE|ress platelets cajole fluffily final accounts: slyly ironic foxes s 358|Customer#000000358|F z jplpUKWz1Hn7p3ez2qTsiIh|5|15-457-255-3822|-44.66|MACHINERY|e furiously pending requests. slyly bold requests wake deposits. furiously express 359|Customer#000000359|z4lUH9ssc3K2w0UjRIuNRrdqw|14|24-608-547-4751|6375.23|FURNITURE|ifts wake fluffily ironic ideas. slyly ironic deposits above the 360|Customer#000000360|S,6ajyDFO3WUQ0Qr|17|27-604-646-1645|6542.83|FURNITURE|engage. quickly final platelets about the fluffily unusual accounts wake 361|Customer#000000361|l0F8jMJVe63cb|20|30-164-267-4590|7451.84|BUILDING|fully busy ideas. regular foxes cajole 362|Customer#000000362|UscV00TNrNTDddxF7BTk|17|27-651-653-4122|6149.01|AUTOMOBILE|ut the fluffily ironic platelets. ironi 363|Customer#000000363|2Koh mYARhsVcFn0U2Abt35qIyedAr1TxP|17|27-460-529-3937|-573.86|HOUSEHOLD|s. carefully unusual deposits are foxes. furiously even foxes nag carefully according to the furiously express 364|Customer#000000364|SQ3b5Q5OtrmmZjJ87tq,o1TiXKVJQ0M7ZOuud|23|33-492-647-4972|32.24|HOUSEHOLD| dependencies? pending requests use carefull 365|Customer#000000365|QiZRz y1xU|24|34-708-696-5226|737.03|HOUSEHOLD|counts. unusual packages are blithely foxes. unusual dinos 366|Customer#000000366|pPQektSfn55AC7s9SRFkj07I2yXqakvCa|3|13-915-531-6826|-729.74|MACHINERY|nos wake quickly. regular, regula 367|Customer#000000367|yZaDoEZCqt2VMTVKoZUkf6gJ4yj|13|23-939-319-4691|9108.65|HOUSEHOLD|eodolites under the ironic, stealthy requests affix furiously among the unusual tit 368|Customer#000000368|9p ReFA4fseKWYUaUHi|22|32-552-596-4994|84.72|MACHINERY|ic asymptotes. quickly special packages along the bravely bold depos 369|Customer#000000369|ge1XhgI3ADIkvLr5GPMqpup,hzlTVv|8|18-333-644-9832|2881.06|FURNITURE| theodolites? quickly quick foxes are fluffily slyly regular instructions. fluffily 370|Customer#000000370|oyAPndV IN|12|22-524-280-8721|8982.79|FURNITURE|ges. final packages haggle quickly. slyly bold 371|Customer#000000371|dnxjCYwhuSHx 9KX38nV0R16fG|22|32-119-346-2028|7789.14|AUTOMOBILE|equests shall boost furiously special pinto beans. express, ironic ideas sleep across the ironi 372|Customer#000000372|aKPMNZfbgV0neVIBo|19|29-226-339-6392|-921.91|MACHINERY|. furiously even foxes sleep at the forges. bold accounts sleep after the ironic theodolites. ironi 373|Customer#000000373|2hrQ wHkbaNlJCzY,mVkugMIE 8ryNlaA3JHDTjJ|20|30-883-170-4010|2354.06|MACHINERY|requests wake blithely even packages. slyly ironic deposits haggle blithely 374|Customer#000000374|fg4eklU1,UaFOan|22|32-282-723-3627|6718.78|AUTOMOBILE|ges are carefully. slyly ironic deposits about the fin 375|Customer#000000375|e53JADEeGvM1ikhN7aa|15|25-575-273-9756|5562.22|HOUSEHOLD|st the pending accounts. final courts above the pending pinto beans use furiously ironic requests. dolphins 376|Customer#000000376|4NwsvFQU T4mSgzvU1Rx2ZtHOGyaNyhe|16|26-437-952-8986|4231.45|AUTOMOBILE|gs cajole quickly. bold asymptotes wake regularly along the quickly 377|Customer#000000377|PA4levhyD,Rvr0JHQ4QNOqJ9gW YXE|23|33-260-610-4079|1043.72|MACHINERY|. slyly regular ideas cajole blithely. slyly ironic foxes are carefully after the thinly special accou 378|Customer#000000378|133stqM9 LT,a2BSlbm49 nXreFggaZgW6P6J|22|32-147-793-4825|5718.05|BUILDING|ackages haggle fluffily ironic packages. 379|Customer#000000379|t3QzCf,q1NbshmjOIUY|7|17-228-550-9246|5348.11|AUTOMOBILE|l deposits cajole blithely blithely final deposits. express, even foxes grow carefully about the sile 380|Customer#000000380|n2w3Jd1bipwICbOVgrELzcNRexmWSklo|21|31-538-493-4229|2755.46|BUILDING|riously special accounts. slyly final accounts sleep; blithely special requests integrate carefully slyly en 381|Customer#000000381|w3zVseYDbjBbzLld|5|15-860-208-7093|9931.71|BUILDING|t regular, bold accounts. carefully quick packages haggle. care 382|Customer#000000382|UdgAMamK5JnSykA,ZPfR5W5zRFatDUye|8|18-942-650-6657|6269.42|AUTOMOBILE|. blithely express notornis according to the blithely even requests are never fina 383|Customer#000000383|iBIHYgXvVDpu6qq7FlqXVcAIDAzv4qs|2|12-868-920-9034|-849.44|MACHINERY|slyly express ideas haggle blithely unusual dugouts. ironic pinto beans are ironic ideas. 384|Customer#000000384|kDDMb3ML nUSu2Sn7CUHyZVedAFUx9|9|19-271-453-8361|-614.30|HOUSEHOLD|olites. express, unusual dolphins cajole carefully about the 385|Customer#000000385|zJvPI24TSPpiFzYfu3RvTKQ9|3|13-741-675-6890|2457.09|AUTOMOBILE|rs. blithely ironic deposits nag furiously across the furiously ironic accounts. bold deposits sleep express 386|Customer#000000386|DeQxsCxixT8RQ7JV6mddRYGDGQ2WM94|24|34-193-143-1425|232.01|BUILDING|counts. blithely permanent deposits wake slyly! unusual, even theodolites u 387|Customer#000000387|Yj 9g1mNu00rKRkc1ovOmptsPI|18|28-694-363-3673|3404.23|HOUSEHOLD|oach. blithely regular instructions sublate across the quickly regular ideas. qui 388|Customer#000000388|dV4lqEufXkF8R|7|17-856-814-6352|1938.05|HOUSEHOLD| carefully bold deposits: final pinto beans sleep slyly idl 389|Customer#000000389|ij8KNM0,HRvIvnvY w8jQK4zvr1EOO9YM|9|19-264-943-1253|-307.61|AUTOMOBILE|o beans affix fluffily. slyly ironic notornis wake 390|Customer#000000390|Nsc3VZZnVsw0mLAnqqzVz,|4|14-812-253-6693|8862.18|HOUSEHOLD| final packages promise quickly. pending theodolites haggle quickly above the doggedly ironic 391|Customer#000000391|q10SV05KB1038lzUR8P|11|21-604-451-4462|4801.30|HOUSEHOLD|le blithely final forges. furiously even deposits cajole fluffily even patterns. furious 392|Customer#000000392|H7M6JObndO|17|27-601-793-2507|8492.33|BUILDING|efully bold ideas. bold requests sleep carefully blithe instructions. carefully final accounts are blithely quickly 393|Customer#000000393|RSELskV44I3LFA9VLGY2Qe|20|30-749-949-5915|3593.57|FURNITURE|ake furiously express notornis. pending accounts hang slyly slyly blithe theod 394|Customer#000000394|nxW1jt,MQvImdr z72gAt1bslnfEipCh,bKZN|23|33-422-600-6936|5200.96|MACHINERY| instructions. carefully special ideas after the fluffily unusual r 395|Customer#000000395|b06rg6Cl5W|15|25-804-388-6600|4582.28|HOUSEHOLD|s mold blithely regular platelets. slyly silent instructions use slowly slyly specia 396|Customer#000000396|miE7JrCdGpQkF4zYJ27tBdSu IYhQ HXx0 |22|32-902-936-4845|1433.50|BUILDING|xcuses. regular pains wake slyly across the ruthlessly ironic dependencies. e 397|Customer#000000397|EzR2BKJ85SmBDS|7|17-103-357-8777|709.46|FURNITURE|al theodolites. regular accounts are regular, silent foxes. unusual asymptotes above t 398|Customer#000000398|cq9NmtIT4b6JB8L79iLzljlHs4 3|15|25-110-215-3747|8865.61|HOUSEHOLD|l deposits breach slyly ironic asymptotes. carefully pend 399|Customer#000000399|ZBvzMa6N1wdCGaPmG13xVusIxdjSiA94jTXN|8|18-882-664-5454|7358.53|BUILDING|yly even excuses. ironic theodolites wake furiously. blithely regular pinto beans cajole. fin 400|Customer#000000400|U23zy17EPxqmJn7neVc|14|24-522-746-1247|-98.46|BUILDING|fully bold accounts cajole bravel 401|Customer#000000401|aKALIG526OK4veQfUh2KmKcE,oRyg|19|29-667-766-5291|4146.43|BUILDING|l instructions wake. slyly express deposits us 402|Customer#000000402|8Cw4p1m1gKYVUgomkAq,es1ZtrnmHaO|6|16-950-729-1638|2106.67|AUTOMOBILE|dolites. furiously regular theodolites integrate furiously. bravely bold requests are. furiously 403|Customer#000000403|9,BVYegfkFLsEMDkeVW|14|24-753-433-1769|6693.36|HOUSEHOLD|al hockey players; ironic dependencies after t 404|Customer#000000404|2orgvLJ05jOvM292mhkS7iJmHG0jk|22|32-840-785-1776|7408.73|BUILDING|uickly brave requests haggle furiously carefully special idea 405|Customer#000000405|mCQNH1rJtqjjQ9Piauc2bZr4pRFydscZtbD9d|10|20-509-301-7901|7519.14|MACHINERY|nts. pending, express foxes sleep? ironic, pending instructions haggle. ironic, pending theodolites detect slyly. bl 406|Customer#000000406|j1fOG9WsIr2JI6Yi9jgJ M|9|19-426-693-4043|4286.94|FURNITURE|nal foxes. unusual pinto beans wake. special excuses cajole ironic 407|Customer#000000407|cfCP9bE3HnI|1|11-975-454-8499|9537.08|MACHINERY|ect among the carefully regular theodolites. regular dep 408|Customer#000000408|TBjb3m,3aea4JtP833HD4VDk7STz2Y9FB|10|20-177-807-5661|6825.37|BUILDING|unts. furiously ironic depths among the instructions wake carefully along the blithely ironi 409|Customer#000000409|mtrMiDvQxNsy1Cj0cU4ITEW5wGKLPQ2IPHNE9r4|11|21-466-412-4731|3969.86|FURNITURE|fily pending courts. express, regular packages are furiously along the quickly regular packages. 410|Customer#000000410|nYak2u Q9,gYUiLfh1N|7|17-576-345-5940|4349.27|BUILDING| sublate across the pending, express asymptotes. quickly 411|Customer#000000411|V3e,FX5x50scsQDzt5,ESxfOQBt4OzjHRoTZxF|18|28-483-924-1955|1209.32|HOUSEHOLD|refully. slyly even packages above the evenly regular asymptotes are blithely ironic dependencies. deposi 412|Customer#000000412|5IN2Y,QrhDJ2YBVGKiDbMpzi2hk1fmozIy2zQ|22|32-940-318-3191|6044.02|BUILDING|ithely silent notornis haggle. regular requests haggle according to the ironic deposits. blithely final dep 413|Customer#000000413|,4Jm5N0ruhJCB7cBR6Kw|6|16-158-285-7336|5817.90|FURNITURE|ular packages integrate furiously fluffily final accounts. carefully regular 414|Customer#000000414|i49DWI61AFb 45vb1RMH|19|29-552-380-2475|527.78|AUTOMOBILE|sily silent, even accounts. careful, final ideas boost fluffily. slyly final pinto be 415|Customer#000000415|334jCRiUb,gx3|23|33-346-876-2972|2317.93|FURNITURE|egular deposits. blithely ironic inst 416|Customer#000000416|fm7H7k6sYhKfXttOT|12|22-651-146-4780|4365.28|MACHINERY|p the pending pinto beans. furiously express reques 417|Customer#000000417|X3LMSpIn4FgjgJxldHVUlUvKzyX|11|21-794-364-5100|6187.73|BUILDING|lent multipliers. quickly express theodolites kindle blithely. ironic re 418|Customer#000000418|,e0q82drO rgVHXHrJRQ0GDrRoUOl|5|15-826-508-1218|1211.39|FURNITURE|d foxes against the furiously special packages snooze blithely quickly 419|Customer#000000419|gvbZNJ4UVBAo5yOZ2UOWcvV9TeTj|16|26-338-447-2399|7786.69|BUILDING|ideas affix alongside of the final accounts. quickly ironic deposits abo 420|Customer#000000420|HV0YB82MWw93 9K|20|30-776-366-5869|1999.35|BUILDING|ideas wake. fluffily ironic packages hang furiously above the regular, even platelets; packages haggle slyly 421|Customer#000000421|it3mUlkZAe9J8gmy|13|23-918-228-2560|7073.17|FURNITURE|lithely final deposits haggle furiously above the 422|Customer#000000422|AyNzZBvmIDo42JtjP9xzaK3pnvkh Qc0o08ssnvq|9|19-299-247-2444|-272.14|HOUSEHOLD|eposits; furiously ironic packages accordi 423|Customer#000000423|Y2B EbOg39GpFLS0n|13|23-201-501-7824|95.79|BUILDING|ts cajole after the silent, pending instructions. ironic, even asymptotes use carefully. furi 424|Customer#000000424|i4cf3kmRE9IJr,cu,1|19|29-891-311-6778|1866.42|HOUSEHOLD|bove the express, final deposits wake furiously furiou 425|Customer#000000425|lp3aCRBK11qFY|16|26-756-407-4828|5824.88|HOUSEHOLD|ajole even, pending accounts. carefully brave accounts 426|Customer#000000426|GjFjM4zjbyhNrV6XlE|19|29-768-330-6311|7818.25|HOUSEHOLD|ar instructions are against the ironic platelets. slyly final acc 427|Customer#000000427|LHzXa71U2AGqfbqj1yYYqw2MEXq99dWmY|2|12-124-309-3821|4376.80|MACHINERY|y even multipliers according to the regu 428|Customer#000000428|TCVjlzbX7x,kWcHN33LRdEjO38mAGmPR|21|31-587-557-8211|1952.36|BUILDING|furiously quick accounts. slyly bold dependencies cajole carefully. quickly even requests int 429|Customer#000000429|kZBtY,LQAFu4iaSagjfIk8Q8dzgmT|15|25-989-936-1954|9247.21|FURNITURE|ly regular requests haggle enticing excuses. carefully ironic requests on th 430|Customer#000000430|s2yfPEGGOqHfgkVSs5Rs6 qh,SuVmR|3|13-406-611-4228|7905.17|BUILDING|ly slyly ironic attainments. slyly special instructions until the deposits nag quickly whithout the bo 431|Customer#000000431|RNfSXbUJkgUlBBPn|6|16-326-904-6643|2273.50|HOUSEHOLD|e quickly. final, even excuses against the even accounts sleep agai 432|Customer#000000432|FDConiq g20GI9dH QTM ZNX4OB9KU|23|33-307-912-9016|5715.64|BUILDING|wake carefully close, special deposits. regu 433|Customer#000000433|7XFuE4 euQR0w|20|30-659-445-3595|8746.23|FURNITURE|sual ideas affix carefully always regular accou 434|Customer#000000434|6LGAf2hv4MB5MJhfvNsg|3|13-325-443-1474|2940.46|MACHINERY|lly final Tiresias. blithely regular ideas nag stealthily about the furiously 435|Customer#000000435|diwjNQSb3wLYLy WfCDATo5rc1I3 s|2|12-741-309-6377|6217.46|MACHINERY|quickly excuses. blithely express theodolites poach slyly along the theodolites. slyly reg 436|Customer#000000436|4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z|19|29-927-687-6390|5896.87|FURNITURE|olites engage carefully. slyly ironic asymptotes about the ironi 437|Customer#000000437|0PM1xuHd0q2ElcJp 77F2MykOVBSQnZR8u3jkn|4|14-364-492-8498|7760.52|AUTOMOBILE| foxes sleep across the slyly unusual pack 438|Customer#000000438|eqo9A9oaE2CA7 7,L|23|33-394-388-4375|2131.13|MACHINERY|al deposits mold alongside of the fluffily brave requests. 439|Customer#000000439|3deBblz2syRv8yMf0yAVKkE4mDH20uDRj4tJVHUm|14|24-873-368-6801|-61.29|BUILDING|ions may impress thinly for the deposits? even packages towa 440|Customer#000000440|w4fKMgiBuGmV,nLn7NgJl1DoUWwNQMV8z 5,R|3|13-244-480-5751|1809.04|MACHINERY| even theodolites: fluffily final requests cajole about the quickly regular 441|Customer#000000441|gjYpcBx6MP8GvDa6|23|33-438-355-3491|9451.84|HOUSEHOLD|r requests wake theodolites. quickly final ideas haggle fluffily. blithely f 442|Customer#000000442|rvgayfJFLO2cjzMA|1|11-240-523-8711|4157.00|FURNITURE|lets would affix fluffily. regular, regular ideas ought to haggle carefully blit 443|Customer#000000443|UdyNGZ6GSz5aNpMO5N2|3|13-241-131-1632|3726.22|FURNITURE|t the special, final platelets. bold req 444|Customer#000000444|D8l4G8i9aZ7KRbqp6ajvR8h1wjr|1|11-402-300-1949|1505.27|HOUSEHOLD| express accounts along the pending deposits lose carefully above the furiously regular requests. pen 445|Customer#000000445|MX1UA0KUJzIGyWM p2hbLg5dCpVLws8KNcwEsP|20|30-849-846-6070|8018.81|FURNITURE|e ironic, special accounts. quickly regular packages integrate fluffily slyly 446|Customer#000000446|mJOJwYfch izLCuw70,qhlJSmH|24|34-321-168-5681|9225.60|FURNITURE|ending instructions. boldly ironic foxes across the regularly ironic pains sleep along the carefully final deposits. 447|Customer#000000447|hVZBzP8Pii|3|13-438-344-7007|7665.98|HOUSEHOLD|telets around the furiously unusual foxes detect carefully against the 448|Customer#000000448|BH4vtnDpabk0NgoGNJWu4OUXnidfJ|24|34-985-422-6009|8117.27|BUILDING|unts. final pinto beans boost carefully. furiously even foxes according to the express, regular pa 449|Customer#000000449|DiUXazp8EYcJFsX2a7nciEpo9W5BRB4iqdb9HWL|4|14-893-381-6454|3001.94|MACHINERY|posits boost slyly carefully regular requests. final, bold fo 450|Customer#000000450|KVpuYa4dDW8lZZVBttyK614C2qdS|9|19-782-397-9006|5544.42|HOUSEHOLD|gular decoys nod slyly express requests. slyly bold theodolites are along the regular 451|Customer#000000451|ZJKTC1Ck,B01fYZ xdN2|20|30-939-275-3248|2110.59|HOUSEHOLD|quests grow furiously final deposits. ironic, even pi 452|Customer#000000452|,TI7FdTc gCXUMi09qD|6|16-335-974-9174|6633.70|BUILDING|aggle quickly. unusual instructions i 453|Customer#000000453|PZ4mmWL7R,El0MtLWMfLXp120lo0,itmO|8|18-209-381-8571|5678.18|HOUSEHOLD|sts. slyly even dolphins across the bold, regular foxes haggle blithely 454|Customer#000000454|d9oQCm3onNsFlIoteVjFcQDv|7|17-818-915-9400|6134.40|AUTOMOBILE|ions print slyly platelets. carefully regular packages according to the fluffy, even foxes wake carefu 455|Customer#000000455|sssuscPJ,ZYQ8viO|6|16-863-225-9454|6860.34|BUILDING|l wake. blithely final instructions integrate furiously above the final, regular req 456|Customer#000000456|IgUSuulguDJ5|0|10-784-971-7777|8815.78|FURNITURE|ly even warhorses. quickly even requests wake slyly. 457|Customer#000000457|eaAWe Vqr0x17Uwj1uzQRb wQpXxZVDWS3Wg|20|30-543-684-2857|5867.61|FURNITURE|the foxes. carefully pending instructions integrate fluffily blithely pending packages. careful 458|Customer#000000458|iIKwI3HrgNlD9|4|14-651-706-4016|-38.42|BUILDING|ng. final, express requests are furious 459|Customer#000000459|CkGH34iK 9vAHXeY7 wAQIzJa1cmA8DAEA7m|6|16-927-662-8584|1207.97|MACHINERY|ronic, regular dependencies use above the ironic deposits. carefully express packages use car 460|Customer#000000460|Gbx5Hnw,ctlI7|11|21-643-955-6555|5222.83|FURNITURE|old dependencies mold slyly above the foxes. dogged, express ins 461|Customer#000000461|5vxNLzSASzkbrUr8CRf5|21|31-533-226-4307|9177.63|AUTOMOBILE|sits breach blithely. slyly regular ideas haggle fluffily; special ideas cajole q 462|Customer#000000462|MSqsCvNEkowp7FnscRXP6OUWm|21|31-157-561-4106|4522.60|HOUSEHOLD|ly special accounts? ideas engage regular dependencies. fluffily even pinto beans x-ray blith 463|Customer#000000463|LV7MN7Tkm2NSo4Q3lwvjxGQyRJjRZRf,M|8|18-167-214-5805|-654.50|HOUSEHOLD| quickly along the final ideas. slyly regular accounts are iro 464|Customer#000000464|kAALP9gEt3,G9XtxCXjv38HjKBEP|9|19-269-971-9738|8730.85|AUTOMOBILE|efully express accounts play. special requests use carefully. regular courts sle 465|Customer#000000465|gngnTNn7azjgQlQJnakTZto|2|12-137-838-1346|8432.74|FURNITURE|es. quick asymptotes integrate carefully alongside of the ideas. even requests believe slyly even ac 466|Customer#000000466|ZI1c8,ZanegEu5CEQxNf5,bkuYPwn7H7JIK7|12|22-280-738-3240|3168.41|MACHINERY|foxes. express, ironic accounts boost? carefully silent deposits engage. accou 467|Customer#000000467|amwRkh0nDQ6r6MU|11|21-449-581-5158|9398.51|MACHINERY|manently special warthogs. final ideas a 468|Customer#000000468|IcbihAtOVWcnswfyE|10|20-489-960-5023|9834.19|FURNITURE| accounts cajole quickly above the blithely final packages. even, express package 469|Customer#000000469|JWOULMa5Qtt|12|22-406-988-6460|6343.64|BUILDING|cajole carefully slyly regular packages. 470|Customer#000000470|v9 gWSuP4WrOjNJRgyJtjbNCChQME|20|30-507-458-4433|3597.53|HOUSEHOLD|ilent excuses. never ironic requests sleep furiously. daringly f 471|Customer#000000471|tGr0DtrK 91IgzfeZrSPpPIia3|4|14-574-118-1005|5716.90|FURNITURE|es. unusual accounts try to solve ca 472|Customer#000000472|hWgfnsmTAEOx9Mqp87YwztGrgLLqNkjMPh4|12|22-940-478-1933|7929.90|MACHINERY|deas sleep slyly blithely final foxes. slyly final e 473|Customer#000000473|zO3W9pYj PvlsQGe|9|19-209-647-5704|-202.22|HOUSEHOLD|ter the quickly pending requests sleep above the carefully iron 474|Customer#000000474|mvEKw,6zT0V8Yb2yTG hu990UX|21|31-247-536-6143|9165.47|MACHINERY|ns integrate against the quickly special courts. slyly 475|Customer#000000475|JJMbj6myLUzMlbUmg63hNtFv4pWL8nq|14|24-485-422-9361|9043.55|BUILDING|egular requests. ironic requests detect furiously; deposits ha 476|Customer#000000476|68r87HCBbQkVYaVfes8mgKs|2|12-996-628-9902|5973.10|BUILDING|sly. carefully quick instructions sleep carefully deposits. final, pending pinto beans use closely fluffily final in 477|Customer#000000477|5aW5WHphNgFdIS1Qdp2cIJXG8ER8|23|33-845-877-6997|1836.61|AUTOMOBILE|totes are blithely among the furiously final foxes. slyly 478|Customer#000000478|clyq458DIkXXt4qLyHlbe,n JueoniF|1|11-655-291-2694|-210.40|BUILDING|o the foxes. ironic requests sleep. c 479|Customer#000000479|RdIiG8NbwYtamReRwhR|18|28-336-406-1631|3653.64|AUTOMOBILE|ages. bravely even foxes detect careful 480|Customer#000000480|XyQSPswCeO WPD37K3 mYZ4hnCMJO5p|7|17-231-147-5851|2750.71|FURNITURE|posits. slyly ironic theodolites nag carefully about the quickly final accounts. s 481|Customer#000000481|o4xa7J20NqHM8E0ykH,NKe1gPz04OqIn|21|31-363-392-6461|7157.21|FURNITURE|s can nag slyly instructions. regular, regular asymptotes haggle sly 482|Customer#000000482|389RgNCsmVUKiRskmrQQm90xx JiIxOM0|13|23-732-448-1610|4333.37|HOUSEHOLD|carefully bold instructions. carefully final instructions wake carefully accounts. accounts cajole slyly ironic acc 483|Customer#000000483|Yv1QV 1JsV 9sVbNufRvdnprt0grx52|11|21-799-189-1135|8877.20|MACHINERY|pecial ideas. furiously final i 484|Customer#000000484|ismzlUzrqRMRGWmCEUUjkBsi|20|30-777-953-8902|4245.00|BUILDING|y against the express, even packages. blithely pending pearls haggle furiously above the fur 485|Customer#000000485|XeFbvXCQ,J|19|29-434-961-1623|8695.45|MACHINERY|ecial pinto beans. instructions ought to cajole even 486|Customer#000000486|2cXXa6MSx9CGU|21|31-787-534-8723|7487.40|AUTOMOBILE|nstructions. unusual, special pinto beans sleep about the slyly pending requests. fu 487|Customer#000000487|oTc,l9dAf8O0qOOMP4P0WFTuGS|2|12-111-401-4259|9749.37|AUTOMOBILE|as. excuses use carefully carefully pending i 488|Customer#000000488|bBcMjFPTysSTaTdHcoO|3|13-513-778-1881|-275.58|AUTOMOBILE|thely above the carefully ironic accounts. excuse 489|Customer#000000489|GIdW4IVgeqWMBXnNFZGHS8kmhw|4|14-916-241-6195|8255.83|AUTOMOBILE|lar accounts. finally pending dependencies solve fluffily 490|Customer#000000490| 66fG3Fyb946cVQsH9Z3VMNzR,yfHMKIEB|22|32-268-147-7824|-213.85|FURNITURE|ash carefully never bold instructions. regular, bold asymptotes cajole regularly. quickly bold foxes wak 491|Customer#000000491|r3zPOuenxHl0oqInxWlEyLP1ZH|0|10-856-259-7548|785.37|AUTOMOBILE| slyly special requests hang dogged, express epitaphs. 492|Customer#000000492|JexAgMLuUHoElYFaKx,hJcAP1b1GknYoYHQLyx|8|18-686-244-1077|8635.18|AUTOMOBILE|gle furiously furiously final packages. carefully bold pinto beans promise quickly alongside of the close 493|Customer#000000493|G dRBjxmBBug1 xRSa6VwRchFDtU5b|16|26-514-558-7246|6582.04|MACHINERY|er the furiously express excuses use above the regular accounts. regular instructions after the 494|Customer#000000494|GKgTjHFlQrDZWcketSqhZCopBhmChknI|10|20-330-453-6579|6295.47|FURNITURE|al courts. regular, ironic requests serve furiously. pending 495|Customer#000000495|QhFbEv6KbQIwfZs 1krt1eACKI31v3iyM|7|17-400-405-6060|7997.81|BUILDING| dependencies. silent accounts cajole quickly furiously pendin 496|Customer#000000496|Y8oYLlHme6Z4fEzkTu|12|22-173-644-7922|8174.82|MACHINERY| quickly bold packages. decoys among the blithely pending accounts lose according to the deposits. 497|Customer#000000497|0 qRRXAxUbo1J KDwDMjFde5fXDwn |23|33-937-724-3506|2191.59|BUILDING|fluffy ideas detect carefully 498|Customer#000000498|1Wnja9i7KAC3HxS5yATK,In8Q6AHcEUr0f5Tp|19|29-210-810-1479|3945.64|BUILDING|yly pending requests according to the slyly special asymptotes sleep carefully against the slyly even pack 499|Customer#000000499|m1hO3VXQVbwTbJ99Hw|14|24-387-817-9149|4293.76|HOUSEHOLD|old sentiments cajole carefully among the blithely unusual requests. final packages nag careful 500|Customer#000000500|fy7qx5fHLhcbFL93duj9|4|14-194-736-4233|3300.82|AUTOMOBILE|s boost furiously. slyly special deposits sleep quickly above the furiously i 501|Customer#000000501|lzkYA5C6wa,wX|13|23-867-672-1331|1909.35|FURNITURE|ual deposits wake. quickly ironic platelets along the careful deposits haggle 502|Customer#000000502|nouAF6kednGsWEhQYyVpSnnPt|11|21-405-590-9919|1378.67|HOUSEHOLD|even asymptotes haggle. final, unusual theodolites haggle. carefully bo 503|Customer#000000503|7xCLYGLCpFU,toJBIPIrJbLIuLok81h IxK ae5Z|20|30-441-755-3094|3213.66|MACHINERY| even deposits haggle. packages i 504|Customer#000000504|2GuRx4pOLEQWU7fJOa, DYiK8IuMsXRLO5D 0|10|20-916-264-7594|0.51|FURNITURE|slyly final theodolites are across the carefully 505|Customer#000000505|MAUkwAyEvg61RlCMomspMs0WzYa,Ns|2|12-530-647-8313|6557.51|HOUSEHOLD|mptotes haggle around the theodolites. furiously bold accounts detect quickly packages. special pinto beans 506|Customer#000000506|dT kFaJww1B|13|23-895-781-8227|1179.85|HOUSEHOLD| idle instructions impress blithely along the carefully unusual notornis. furiously even packages 507|Customer#000000507|QlA0Fc 6e,r67ugESzq|14|24-158-185-4455|5727.00|MACHINERY|nst the furiously even deposits cajole slyly among the furiously ironic requests. blithely unusual depo 508|Customer#000000508|q9Vq9 nTrUvx|18|28-344-250-3166|1685.90|BUILDING|uses dazzle since the carefully regular accounts. patterns around the furiously even accounts wake blithely abov 509|Customer#000000509|LHLR0IKQJHVF1 0UvBNPLq0|4|14-115-338-1002|7885.50|FURNITURE|ily! requests cajole fluffily. slyly regular waters na 510|Customer#000000510|r6f34uxtNID YBuAXpO94BKyqjkM0qmT5n0Rmd9L|5|15-846-260-5139|1572.48|HOUSEHOLD|symptotes. furiously careful re 511|Customer#000000511|lQC9KfW W77IYtJjAgSZguNzxjY rYk3t6lcxfSh|13|23-247-728-9743|4571.31|FURNITURE|he slyly close deposits. special, ironic ideas detect furiously carefull 512|Customer#000000512|e5 kymvjf6Vja7tNsL 3dfiK|2|12-144-416-6035|3937.58|BUILDING|packages are slyly after the slyly express packages. bold d 513|Customer#000000513|sbWV6FIPas6C0puqgnKUI|1|11-861-303-6887|955.37|HOUSEHOLD|press along the quickly regular instructions. regular requests against the carefully ironic s 514|Customer#000000514|0qD6Nwp3tG3QqCq9qvRAzT6N8L|23|33-194-775-6756|5840.97|BUILDING|carefully final ideas. quickly final packages are. requests haggle slyly. blithely pending sauternes lose bl 515|Customer#000000515|oXxHtgXP5pXYTh|15|25-204-592-4731|3225.07|BUILDING|ackages cajole furiously special, ironic deposits. carefully even Tiresias according to 516|Customer#000000516|EJwOQMTQnFwvd8r Y7f9i5POy6ZlNkIYxCL hg8t|6|16-947-309-2690|4768.96|MACHINERY|final requests after the furiously 517|Customer#000000517|mSo5eI8F4E6Kgl63nWtU84vfyQjOBg4y|10|20-475-741-4234|3959.71|FURNITURE|al, ironic foxes. packages wake according to the pending 518|Customer#000000518|EsCrt4chk,3IRIzwMHTu 6VQWrfh|17|27-651-256-7682|9871.66|BUILDING|as. quickly regular requests are carefully above th 519|Customer#000000519|Z6ke6Y9J2pYuPBp7jE|5|15-452-860-5592|9074.45|BUILDING|es. fluffily regular accounts should have to sleep quickly against the carefully ironic foxes. furiously daring 520|Customer#000000520|yaOGc9Ve92Bi4F6e0GcheU2MmEOXJE0zqyDT sEA|3|13-612-111-7765|8315.09|HOUSEHOLD| haggle across the even, bold instructions. final, even ideas might wake blithely against the 521|Customer#000000521|MUEAEA1ZuvRofNY453Ckr4Apqk1GlOe|2|12-539-480-8897|5830.69|MACHINERY|ackages. stealthily even attainments sleep carefull 522|Customer#000000522|gPz4FuAGpjvaU4YB9J,fGSnLBr9scEovGO1KkTx|12|22-771-454-9561|6358.46|BUILDING|instructions. doggedly express requests doze blithely. regular theodolites hagg 523|Customer#000000523|sHeOSgsSnJi6pwYSr0v5ugiGhgnx7ZB|10|20-638-320-5977|-275.73|BUILDING| fluffily deposits. slyly regular instructions sleep e 524|Customer#000000524|bpsO77xiAmjwOxqIgAszRu4Y|9|19-844-888-9800|5706.19|HOUSEHOLD|ending pinto beans unwind slyly. slyly final theodolites above the quickly ironic pinto beans haggle ev 525|Customer#000000525|w0pOG5FhH45aYg7mKtHQhAWQKe|19|29-365-641-8287|3931.68|AUTOMOBILE| blithely bold accounts about the quietl 526|Customer#000000526|0oAVPhh1I4JdrDafVG2Z8|1|11-170-679-3115|705.93|HOUSEHOLD|ctions cajole after the furiously unusual ideas. ironic packages among the instructions are carefully carefully iro 527|Customer#000000527|giJAUjnTtxX,HXIy0adwwvg,uu5Y3RVP|13|23-139-567-9286|4429.81|HOUSEHOLD|ending, ironic instructions. blithely regular deposits about the deposits wake pinto beans. closely silent 528|Customer#000000528|SRYjG5Wgp8ZG8GyDFhRIR5ep8yNs3nrCmYa|15|25-985-381-5453|1802.50|AUTOMOBILE| the slyly even instructions. carefully idle packages sleep about the platelets. bol 529|Customer#000000529|oGKgweC odpyORKPJ9oxTqzzdlYyFOwXm2F97C|15|25-383-240-7326|9647.58|FURNITURE| deposits after the fluffily special foxes integrate carefully blithely dogged dolphins. enticingly bold d 530|Customer#000000530|wG6AC7G6Y0DRuzJiroWCByzbrkqeySQDvRXzH|13|23-614-884-1055|4990.92|BUILDING|uctions cajole blithely across the ironic packages. slyly regular deposits wa 531|Customer#000000531|ceI1iHfAaZ4DVVcm6GU370dAuIEmUW1wxG|19|29-151-567-1296|5342.82|HOUSEHOLD|e the brave, pending accounts. pending pinto beans above the 532|Customer#000000532|xwWO3lWjgVJTZwhnltyH6zj5ddkzgH8RbF|15|25-875-978-2232|1725.68|MACHINERY|usly regular deposits kindle. quickly even depos 533|Customer#000000533|mSt8Gj4JqXXeDScn2CB PIrlnhvqxY,w6Ohku|15|25-525-957-4486|5432.77|HOUSEHOLD|even dolphins boost furiously among the theodo 534|Customer#000000534|3PI4ZATXq8yaHFt,sZOQccGl Fc1TA3Y 2|1|11-137-389-2888|6520.97|AUTOMOBILE|deas. blithely regular foxes use carefully bold accounts-- ruth 535|Customer#000000535|,2Y kklprPasEp6DcthUibs|2|12-787-866-1808|2912.80|BUILDING|even dinos breach. fluffily ironic 536|Customer#000000536|jf8PSOQDvqQj4uF8|12|22-521-348-9030|3342.75|MACHINERY|tes? blithely enticing theodolites wake. braids sleep. sly 537|Customer#000000537|wyXvxD,4jc|10|20-337-488-6765|2614.79|FURNITURE|e carefully blithely pending platelets. furiously final packages dazzle. ironic foxes wake f 538|Customer#000000538|u9jYEMPoKwrH5wXivkSebbxAx1PU|19|29-632-471-2852|-303.95|MACHINERY|uffily special requests nag around the quickly stealthy 539|Customer#000000539|FoGcDu9llpFiB LELF3rdjaiw RQe1S|6|16-166-785-8571|4390.33|HOUSEHOLD|ent instructions. pending patter 540|Customer#000000540|YkaXu3o1X8|16|26-933-117-7482|9195.77|MACHINERY| enticingly express excuses. quickly regular notornis cajole near th 541|Customer#000000541|,Cris88wkHw4Q0XlCLLYVOAJfkxw|0|10-362-308-9442|1295.54|FURNITURE|according to the final platelets. final, busy requests wake blithely across th 542|Customer#000000542|XU2ffxnW3TQasrfF0u2KwKWmMarPyY4q7Q|16|26-674-545-2517|3109.96|BUILDING|r forges! requests alongside of the bold, final deposits 543|Customer#000000543|JvbSKX7RG3xuqiKQ93C|17|27-972-408-3265|6089.13|AUTOMOBILE|l, even theodolites. carefully bold accounts sleep about the sly 544|Customer#000000544|Jv7vcm,oE,HEyxekXKia1V5H1up23|5|15-572-651-1323|4974.68|AUTOMOBILE|bout the packages integrate above the regular instructions. regular ideas hinder s 545|Customer#000000545|AsYw6k,nDUQcMOpEws|10|20-849-123-8918|7505.33|AUTOMOBILE| carefully final deposits. slyly unusual pinto beans may wake bold requests. unusual courts alongside 546|Customer#000000546|GZtBXX3OaqFLbC9JNi1hmF1JFLbmRs9|19|29-936-444-8248|3116.50|MACHINERY|ly fluffy braids. blithely special theodolites use express deposits-- slyly regular attainments 547|Customer#000000547|4h SK3dVkE1tQ0NCh|22|32-696-724-2981|6058.08|BUILDING|y express deposits. slyly ironic deposits nod slyly slyly ironic instructions. carefully quick idea 548|Customer#000000548|98nP31ToAGK tCCkYm7HqBZt0dLjy0JzlMMRCmkj|4|14-787-370-8722|90.45|BUILDING|c pinto beans. quickly even requests haggle against the blithel 549|Customer#000000549|v5uqfeHLiL1IELejUDnagWqP5pKWa9LtoemziGV|24|34-825-998-8579|91.53|BUILDING|n asymptotes grow blithely. blithely fluffy deposits boost furiously. busily fu 550|Customer#000000550|q5 gKwc7PBQOyd,H|17|27-938-997-6262|7270.82|FURNITURE|ully regular deposits. slyly ironic requests wake along the depos 551|Customer#000000551|holp1DkjYzznatSwjG|15|25-209-544-4006|-334.89|MACHINERY|y special ideas. slyly ironic foxes wake. regular packages alongside of the deposit 552|Customer#000000552|EbjtaRaiok7eqbQ5VJi7q|2|12-669-784-2191|1353.24|FURNITURE|ickly final accounts cajole fluffily according to the bold, regular accounts. 553|Customer#000000553|8tTlavJ sT|4|14-454-146-3094|4804.57|BUILDING|ully regular requests are blithely about the express, bold platelets. slyly permanent deposits across the 554|Customer#000000554|RluaguNRAJhYXmn,CWxcOC,Ly7|2|12-938-503-7317|8395.57|HOUSEHOLD|jole along the blithely bold 555|Customer#000000555|chm8jY6TfQ8CEnsvpuL6azNZzkqGcZcO8|15|25-548-367-9974|5486.52|BUILDING|lites are blithely ironic ideas. blithely special pinto beans dazzl 556|Customer#000000556|UMHllVkuyQUQ3aLXCVRxrXatsyd0AL6Xw|1|11-934-412-5846|7944.22|MACHINERY|nt excuses! carefully final requests solve quick 557|Customer#000000557|Nt6FUuDR7v|15|25-390-153-6699|9559.04|BUILDING|furiously pending dolphins use. carefully unusual ideas must have to are carefully. express instructions a 558|Customer#000000558|PB1ZV4kQnRHiC|17|27-866-273-7672|1912.23|HOUSEHOLD|ly final requests. regular requests hag 559|Customer#000000559|A3ACFoVbP,gPe xknVJMWC,wmRxb Nmg fWFS,UP|7|17-395-429-6655|5872.94|AUTOMOBILE|al accounts cajole carefully across the accounts. furiously pending pinto beans across the 560|Customer#000000560|gU5FQf0WM0sxTYQ|19|29-618-467-8489|1469.59|BUILDING|sly pending packages boost slyly-- fluffily ironic ideas bel 561|Customer#000000561|Z1kPCTbeTqGfdly2Ab9KEdE,jIKW|18|28-286-185-3047|2323.45|FURNITURE|across the furiously ironic theodolites. final requests cajole. slowly unusual foxes haggle carefully 562|Customer#000000562|04xjB,zuffnhVyEY0 PeiJPtdjh 0ji|15|25-271-465-6971|9234.50|BUILDING|accounts. ideas cajole. quick 563|Customer#000000563|2RSC1g7cVd,j23HusdkhdCGmiiE|12|22-544-152-1215|3231.71|FURNITURE| pinto beans believe fluffily. excuses wake blithely silent requests. b 564|Customer#000000564|qPQOo94iVl|4|14-865-332-8571|6307.59|MACHINERY|onic patterns about the furiously pending 565|Customer#000000565|HCBXAou,1eP6Z3IynHFI7XmEBgu27Sx|4|14-798-211-2891|2688.88|FURNITURE|e. carefully bold deposits sleep regu 566|Customer#000000566|5NmdMIwTpF8tj7O92363ycA6EL5Yh,vW|24|34-443-780-3708|1928.10|FURNITURE|ke express, ironic requests. regularly even sauternes detect de 567|Customer#000000567|KNE6mpW69IgTjVN|21|31-389-883-3371|8475.17|BUILDING|blithe, even ideas. fluffily special requests wake. c 568|Customer#000000568|ZddVCnzeABTTBgV3GvkvNtw9,KOHHpME2GELhz|13|23-603-795-8611|1317.56|BUILDING|gular decoys haggle slyly. blithely special packages slee 569|Customer#000000569|Kk20Q5HiysjcPpMlL6pNUZXXuE|2|12-648-567-6776|-795.23|MACHINERY|sh. blithely special excuses sleep. blithely ironic accounts slee 570|Customer#000000570|0Zo0P6m,sie 1,VXacPX2ccDIyWFolj6R|15|25-264-442-3057|8480.87|AUTOMOBILE|gular instructions unwind bold escapades. special asymptotes snooze according to the 571|Customer#000000571|hCrDDrMzGhsa6,5K4rGXQ|2|12-115-414-4819|8993.23|HOUSEHOLD|le fluffily. ironic, pending accounts poach quickly iron 572|Customer#000000572|Nf4Yqb49BqGkzmmtf6|11|21-425-209-5033|7252.65|AUTOMOBILE|leep. pending requests affix blithely. ironic theodol 573|Customer#000000573|BEluH7it7jUcWqb tNLbMIKjU9hrnL7K|4|14-354-826-9743|2333.96|HOUSEHOLD|as. furiously even packages sleep quickly final excu 574|Customer#000000574|ratVLdmp070|8|18-676-218-1058|9787.56|FURNITURE|al pinto beans. carefully ironic foxes cajole idly finally express theodolites. fluff 575|Customer#000000575|4K6h0pYH,bg2FS5cYL,qqejhvp7EfTlBjRjeVPkq|1|11-980-134-7627|3652.29|BUILDING| final requests cajole after the ironic, bold instructio 576|Customer#000000576|JI7ZI3BRrkt40uuUmg oyZC3pQ2lS65SnSGL|1|11-777-499-8213|2091.63|HOUSEHOLD|sual platelets. furiously final theodolit 577|Customer#000000577|a73SSq2cip7C8nSzdmmscpZyLCZ7KL|14|24-662-826-1317|7059.15|FURNITURE|int furiously. slyly express pin 578|Customer#000000578|nxUZ BCBO1 HAymUcopl2NtyWMuWVnE3bqPVDB|14|24-278-860-9263|6181.23|FURNITURE|ly. carefully pending packages cajole among the carefu 579|Customer#000000579|9ST2x,snyY3s|0|10-374-175-6181|1924.96|MACHINERY|ndencies detect slyly fluffil 580|Customer#000000580|wpvPbaPtx5QN|11|21-444-589-3830|-181.63|BUILDING|dependencies. final asymptotes haggle among the bold packages. slyly silent 581|Customer#000000581|s9SoN9XeVuCri|24|34-415-978-2518|3242.10|MACHINERY|ns. quickly regular pinto beans must sleep fluffily 582|Customer#000000582|KqH6uOztVK55zDxLA9kvdtny i5OYXt|3|13-484-591-9280|4879.55|MACHINERY|carefully against the quickly s 583|Customer#000000583|V3i6Gu9,LZtvdnNppXnI2eKQFx0b36WvL,F |13|23-234-625-4041|3686.07|HOUSEHOLD| haggle. regular, regular accounts hinder carefully i 584|Customer#000000584|jebKvptmHtS9,YE1qOjl2AOw38P,8skngJZh|15|25-352-778-1041|8825.71|MACHINERY|ages boost regular deposits. blithely stealthy depo 585|Customer#000000585|OAnZOqr6A,,77WC001ck8BAqvJTW6,dRGoRdX|16|26-397-693-4170|7820.26|MACHINERY|ickly ironic requests sleep regularly pending requ 586|Customer#000000586|vGaA9XBtn,hlswFhSjLIXGlLEDD2flE8UXwj|11|21-239-369-7791|5134.35|AUTOMOBILE|above the blithely express ideas. slyly r 587|Customer#000000587|J2UwoJEQzAOTtuBrxGVag9iWSUPTp|6|16-585-233-5906|7077.79|AUTOMOBILE|ve the final asymptotes. carefully final deposits wake fu 588|Customer#000000588|ex9SkK7K uM,ki1dsO7PgZLlIuQFKJUQZpD2oS|17|27-988-546-2598|483.89|FURNITURE|ic requests haggle quickly across the deposits. regular, express ideas along the 589|Customer#000000589|TvdYNogIzDfr 1UyJE4b9RTENPmffmIoH|19|29-479-316-3576|1647.05|FURNITURE|s; blithely ironic theodolites sleep-- accounts haggle around the furiously silent ideas. silent, final packages in 590|Customer#000000590|4sHhhAZWHYRxJVz0KRgjW9IlKu,55IuT|8|18-734-215-6394|3993.54|MACHINERY|es. regular dependencies cajole furiously blithely regular ideas. regular dependencies cajole carefully a 591|Customer#000000591|wGE7AnEtiX7cmCkYA|20|30-584-309-7885|6344.66|MACHINERY| regular requests after the deposits cajole blithely ironic pinto beans. platelets about the regular, sp 592|Customer#000000592|srNO5Hu10z1Ru4rRPU,QpXzFwY8759wqZ|24|34-832-574-7217|9712.75|BUILDING|lithely final requests use slyly. special theodolites nag carefully-- carefully pending deposits cajol 593|Customer#000000593|SYyEL2nytJXBbFemMseCiivA32USVEDbvGzZS|9|19-621-217-1535|233.51|AUTOMOBILE|ve the regular, ironic deposits. requests along the special, regular theodolites lose furi 594|Customer#000000594|sbcKWltfCAnXrc Z27ZYDzsH1ztd,ZhgaD9xIMsh|9|19-286-925-8440|6518.42|HOUSEHOLD|. fluffily final instructions are slyly toward the slyly 595|Customer#000000595|7Q17BacxM,liY2AwhnHGR0Pjf1180sMz1U|19|29-554-215-7805|4177.17|HOUSEHOLD|gular accounts x-ray carefully against the slyl 596|Customer#000000596|hoByQV2JchlIWfzPFW8I0nCI|5|15-484-811-5482|1722.88|MACHINERY|ecial deposits after the slyly regular packages dazzle furiously across the courts. accounts wake. reg 597|Customer#000000597|Dbv,XVGzl4X|15|25-687-952-9485|2443.52|AUTOMOBILE|es across the slyly brave packages maintain quickly quickly dogged excuses 598|Customer#000000598|9ICLFWFZa6|9|19-113-384-3847|3244.78|FURNITURE|es. furiously pending packages haggle fluffily carefully silent foxes. carefully unusual dependencies boost fu 599|Customer#000000599|fIvpza0tlXAVjOAPkWN5,DiU0DO4e5NkfgOlXpDI|4|14-916-825-6916|6004.52|HOUSEHOLD|thely even requests wake carefully regular theodolites. instructions haggle alongside of the f 600|Customer#000000600|LOtVjPC,Eu,0I2BRCqWf,K|12|22-675-907-7888|2003.44|HOUSEHOLD|nstructions sleep among the final, even pinto beans. fluffily pending theodolites according to the 601|Customer#000000601|P3 Dv,6yllTNmL9yt6NUZZPZjvM2coWJd|1|11-104-635-9839|9768.21|BUILDING|ly according to the unusual foxes. carefully ironic accounts haggle accounts-- regular dolphins will integ 602|Customer#000000602|NCryKIpG3W,FDV2|13|23-434-900-7213|8404.90|BUILDING|nstructions. asymptotes above the forges are against the carefully 603|Customer#000000603|DFAIWiyqZ0GzuF6AWCZX3DcDxyICb3EWxEw|19|29-629-573-6194|8161.13|AUTOMOBILE|n packages wake carefully. special requests haggle slyly carefully bold deposits. furiously regular pinto b 604|Customer#000000604|qCQsFELZ3W hlmi,zOHBcZGo0PZl3jbFu1jsijqE|21|31-757-951-9827|3195.96|HOUSEHOLD|le furiously express instructions. ideas hag 605|Customer#000000605|QAxZ0IXgCzUfNjseQCLfh95HEi|23|33-269-948-8039|-549.73|HOUSEHOLD|ly regular foxes are quickly 606|Customer#000000606|vBIUd7LjRJ5rZXSzITHIvpZwBCClyt4Hjr Tlnf,|1|11-284-540-8460|9676.98|AUTOMOBILE|, bold packages. regular, final theodolites haggle slyly carefully final accounts. silently specia 607|Customer#000000607|m61hvYPASIGmNJx7Tu|24|34-601-151-4029|4038.45|FURNITURE|ymptotes. blithely bold requests shall are about the furiously final platelets. 608|Customer#000000608|luMI1JpfrrILCEeTgz8k98z|16|26-767-193-8671|2256.36|BUILDING|ld packages. special requests along the accounts are after the carefully unusual deposits. blithely quiet excuses c 609|Customer#000000609|dSpUFl8IR8Gh|21|31-869-580-1707|3651.06|FURNITURE|pending, express platelets poach furiously after th 610|Customer#000000610|Fo8RfPq1kgzD 0|11|21-782-663-7023|3374.92|BUILDING|uriously final deposits against the deposits detect alongsi 611|Customer#000000611|E1dtWGHE7NrLfnSKLPFU|19|29-924-242-5243|4272.43|HOUSEHOLD|inst the slyly final accounts. final packages wake after the even pinto beans. carefully 612|Customer#000000612|oNFqorGhq3a3woEp5q8xVDX|14|24-818-339-9984|7669.16|HOUSEHOLD|ns wake quickly quickly ironic accounts. regular accounts toward the 613|Customer#000000613|AJT,26RbanTdEHOBgTWg|4|14-275-416-1669|6679.75|AUTOMOBILE|ironic, pending deposits: quickl 614|Customer#000000614|YKweqHJfVok|18|28-698-510-6194|9630.24|FURNITURE|y even multipliers. pinto beans nag busily after the busily ironic reques 615|Customer#000000615|6aITapYMqM1fJQKuJD05Yb,6FhjrW|2|12-639-391-3956|-482.48|BUILDING|yly. blithely even accounts sleep blithely unusual in 616|Customer#000000616|yvUE7Qy3Ub6uGhPkuEJeOI|1|11-275-121-4443|6898.65|FURNITURE|ts. blithely bold packages sleep 617|Customer#000000617|Ifjxbt3Y4mGu|14|24-527-532-7752|3625.93|HOUSEHOLD|deas sleep slyly? final, even gifts about the furiously regular 618|Customer#000000618|9O4fhgteQdyFvCkrFm|0|10-675-573-1877|-932.38|HOUSEHOLD|uickly even ideas sleep slyly pending foxes. final, pending foxes nag slyly. permanent instructio 619|Customer#000000619|6bxrNxQA oes7cMa23R 5lDmIOIRThvd|24|34-245-618-6317|2336.99|FURNITURE|ts breach slyly after the slyly regu 620|Customer#000000620|3ztw9KQqKGNsiMM,I1 6g2f,u2Pm5LhlSEe8ZK1k|8|18-466-916-8135|5795.15|AUTOMOBILE|ructions boost furiously among the slyly final dolphins. regular, regular ideas w 621|Customer#000000621|IpFo6e22CRink74PUEPthY9DJJnSeORmQJ4|17|27-667-987-3718|3164.28|HOUSEHOLD|nstructions! final dependencies s 622|Customer#000000622|qdRHTTnVf9O2iFMG1sDm2GSnlM24tKWK|13|23-925-151-9771|5974.11|FURNITURE| asymptotes. slyly ironic excus 623|Customer#000000623|HXiFb9oWlgqZXrJPUCEJ6zZIPxAM4m6|9|19-113-202-7085|7887.60|BUILDING| requests. dolphins above the busily regular dependencies cajole after 624|Customer#000000624|L1hGsNrx4BiN5DIZGk7WMsB90T4ag|4|14-558-935-8773|3907.11|MACHINERY|le carefully. sly asymptotes sleep fluffily unusual packages. final deposi 625|Customer#000000625|uvgDE6eQ2bJp4BkHyVdpYYC8|13|23-789-801-2873|5744.89|FURNITURE|, pending deposits. sly theodolites along the carefully unusual 626|Customer#000000626|PDeE61VY2Q96efuewIZ|5|15-540-121-5663|5447.12|FURNITURE|t brave foxes. slyly pending packages wake furiously along the deposits. carefully pendin 627|Customer#000000627|uOFz ,iMYi02Ksr13Q2nBCETCpSp|15|25-811-790-3533|5826.68|FURNITURE|ages. regular ideas sleep. bold foxes affix. regular instructions haggle. bravely unusual requests haggl 628|Customer#000000628|Vzraru5KbgcC3V|17|27-367-742-4090|4954.25|FURNITURE|l dependencies. pending warhorses haggle. fluffily final accounts slee 629|Customer#000000629|LeXGhXX1mFQ0Cq,7taW ruvRHTpG3q,KkW|12|22-260-205-9116|5100.77|AUTOMOBILE|ic attainments. stealthily pending pinto beans affix carefully ironic theodolites. fluffily final deposits sleep da 630|Customer#000000630|XAw3WrAa mt0DnOuycb16LG9zbUv04DXsS|24|34-396-743-8684|3649.05|HOUSEHOLD|ely across the blithely stealthy ac 631|Customer#000000631|By LIK3TbJ67sJLlpaoOCXZheuX|19|29-864-813-2575|2603.00|MACHINERY|bold platelets haggle. slyly even pearls can solve slyly among the final foxes. slyly unusual r 632|Customer#000000632|sUlni97rSK6DIL|3|13-310-645-6928|-487.92|MACHINERY|ld dependencies sleep slyly along the special requests. furiously regular asymptotes use furiously accounts. 633|Customer#000000633|0pgCxndi1coDLkAV,UJJDMV0wtVVahCQaQap 0M|2|12-450-116-1239|3385.52|HOUSEHOLD|ully. even, final requests use slyly. blithely special packages wake carefully furiously busy 634|Customer#000000634|O09TejHJ6UszNfmqTR cmal8zcs|20|30-997-704-1110|6397.58|FURNITURE|e above the regular deposits. slyly even requests integrate slyly blithely express forges. regular platele 635|Customer#000000635|Ftqi0UYvzz56Ov,J6,ySp5WE4vJ2rtY|2|12-399-186-7550|8216.79|AUTOMOBILE|efully bold deposits doubt above the clos 636|Customer#000000636|7urmO1zY77WBPOWn7pXA OqCm3upL9gOtL4V|7|17-420-712-5063|3660.47|FURNITURE|ions boost furiously at the final dolphins. blithe 637|Customer#000000637|Ey7g4q2oH Q1vs|7|17-983-923-8985|7511.17|HOUSEHOLD|ly furiously even accounts: final, unusual Tiresias do snooze theodolites. slyly regular dependenci 638|Customer#000000638|yyRRorZ HHzU1yJwNJwF72dvUVJ nMlzpKAXEb|24|34-102-347-8343|2258.40|HOUSEHOLD|nts x-ray fluffily across the theodolites. carefully final pearls cajole ruthl 639|Customer#000000639|8OiPHefIPoalRjUoCIwbXz|15|25-221-133-2233|4899.15|MACHINERY|ly bold asymptotes. ironic, even d 640|Customer#000000640|j3vjr0 n,pJFG4gIOtC|2|12-702-315-6637|3025.84|HOUSEHOLD|lly. furiously quick deposits haggle quickly regular packages. pinto 641|Customer#000000641|gbIvFlCygHjj5NG7U|24|34-761-987-7777|339.49|AUTOMOBILE|uriously final requests boost regular, final ideas. fluffily busy packages promise? requests along the bold, s 642|Customer#000000642|6Y1gEH0gMLh0yzlipNR|22|32-925-597-9911|5684.01|AUTOMOBILE| requests cajole blithely quick 643|Customer#000000643|9T 2avhfyF PQ|0|10-978-597-2747|5184.70|FURNITURE|fily along the quickly ironic ideas. final, final 644|Customer#000000644|bfZrdZE0QHtMc,ksudO|18|28-489-845-4801|6183.15|HOUSEHOLD|ke slyly alongside of the silent, even req 645|Customer#000000645|I,Kso,IZ,AL2rK4HhIB1wRWWrx3 yoaZlFs|11|21-242-974-5799|1146.49|BUILDING|. special packages haggle ruthles 646|Customer#000000646|gogI8kweD 2H6U,01lbIp0UWUwljSue,KRcC|2|12-177-329-4316|6074.42|HOUSEHOLD|ptotes. silent requests cajole fluffily along the slyly permanent ideas. pending instr 647|Customer#000000647|2Bx7,7i87h5cagC,ZBz49lyiziLqQoD|1|11-873-931-2886|-132.97|BUILDING|. excuses use alongside of the furiously final deposits. blithely express foxes wake furiously. blithely 648|Customer#000000648|pYCT1OLD5Y7rBjjAKnf0Lqd 41RC,0n,nT3oNWz9|7|17-473-731-8352|7029.24|FURNITURE| wake carefully outside the ironic, bold packages. blithely special dependencies boost slyly even ideas. careful 649|Customer#000000649|EntXL7MF4lU|11|21-656-678-1337|9442.55|BUILDING|unusual deposits engage along the carefully pen 650|Customer#000000650|1hqwYdlDhaqnkRrovbulo0rrDhJZgUz|5|15-842-586-1263|2086.91|AUTOMOBILE|g to the final deposits. quickly regular requests dazzle along the slyly express courts. 651|Customer#000000651|K7rCTwTb3UX9wAO6ihvYxwBOuJNx51|9|19-610-541-4787|4716.31|HOUSEHOLD| fluffily quick dependencies cajole slyly along the blithely final dependencies. carefully pen 652|Customer#000000652|0WlNKsoRqdjU9,5 Qz,bgm5swI,i0Kg|20|30-254-389-5987|-919.65|AUTOMOBILE|p carefully blithely bold platelets. furiously unusual deposits wake. sl 653|Customer#000000653|la,ROBPJ4I2YNzQw,RpbB0sTOjyv3F ZBeWRiQ|11|21-715-573-6928|7307.55|BUILDING|aggle bravely deposits. even du 654|Customer#000000654|Ip,OhnmOeHu9PezZBvw7AHJcBDOWVoVUJKsJyA|16|26-416-434-3449|1067.04|AUTOMOBILE| nag slyly. final deposits sleep along the ironic, bold i 655|Customer#000000655|SceikyyffYL5OUI8rFnAgrT5E|14|24-916-820-5158|1724.68|FURNITURE|e accounts. unusual, pending platelets are quickly. req 656|Customer#000000656|DeZPec2K1U1fCBR2Ul9mnBNjKuXPcRcm|5|15-379-993-4446|-390.09|AUTOMOBILE|uests cajole quickly. final, pending pinto beans wake furiously among the requests. ironic foxes with the forg 657|Customer#000000657|BpXQ3sbx8bCU0|19|29-952-164-8914|8729.97|FURNITURE|ies boost slyly bravely express instructions. fluffily even theodolites integrate s 658|Customer#000000658|9SboxGtXimmJg49IyT3Zt|1|11-675-750-9832|4716.68|HOUSEHOLD|r ideas about the slyly ironic theodolites integrate according to 659|Customer#000000659|ThR9miOedPuwVEZyz 3MMjHPwB|0|10-834-287-1466|5297.68|HOUSEHOLD|s cajole final, regular dependencies. final accounts sleep! furiously regular requests wake slyly silent 660|Customer#000000660|ZD4fphyxb5pyE|0|10-987-901-3986|2110.30|MACHINERY|ding excuses haggle furiously regular foxes. finally 661|Customer#000000661|1MqWuuRVM5y5NlT1kakwke|4|14-983-203-6472|1735.44|HOUSEHOLD| regular theodolites. brave platelets 662|Customer#000000662|ss AgOrB5VFubLk bsmTgbw2ddJD|0|10-728-355-4532|4517.33|BUILDING|ckages haggle: quickly regular ideas are after the furi 663|Customer#000000663| fqLQWxHWWC40GUOgVvb4idYDbE1Gxc6|9|19-613-882-5677|9698.21|AUTOMOBILE|r the slyly final packages. slyly bold attainments detect blithely acco 664|Customer#000000664|viRe1P6HiyL4LXpU7HPwu|15|25-329-655-5068|8878.22|BUILDING|yly ironic requests cajole pending pla 665|Customer#000000665|Fo9QgQsDOP28D3zR|22|32-759-858-2984|-616.79|BUILDING|y along the special requests. blithely regular waters thrash abou 666|Customer#000000666|dD32Q8kL6KW|2|12-594-508-9621|4538.54|AUTOMOBILE| special packages boost across the even accounts. ironic accounts serv 667|Customer#000000667|oQqeEC,OD9XC1JXyOsHqcpv0fPUdP9ek5KKb70tQ|6|16-917-453-2490|3288.76|AUTOMOBILE|lithely about the furiously sil 668|Customer#000000668|PCmw0r6KkLjXZsljablJ|15|25-582-501-2346|8184.21|FURNITURE|uffy excuses are carefully regular, ironic pinto 669|Customer#000000669|axdO3iaVyYXSxnqnwC0p2Dx6Mn3lDDMp|10|20-471-280-4789|6019.20|MACHINERY|ronic decoys. blithely ironic accoun 670|Customer#000000670|RJtcLv6Tjpx|2|12-839-426-4266|6738.93|BUILDING|y unusual deposits are carefully regular foxes. packages dazzle carefully. furiously unusual d 671|Customer#000000671|ic6qGrt0giB,HDEiBK,,FYGHXQpc|21|31-593-213-9388|3227.87|FURNITURE|bold ideas above the ironic packages affix blithely about the furiou 672|Customer#000000672|Rsq8zHIbqRRB8VlgldFlU56j,0SN |18|28-774-971-2700|7778.95|BUILDING|ake. regular requests about the idle asymptotes haggle slyly final, silent packages. carefully special 673|Customer#000000673|Q2S9DoW6mQN5iQ0A8DxD6UxNmPZky|4|14-769-230-8609|3942.58|MACHINERY|ons will cajole blithely. always even packages dazzle blithely across the regular, unusual 674|Customer#000000674|GLZCUQrtiNTrPKdK 0O86ZF|5|15-543-819-4391|7889.64|BUILDING|ites cajole ironic accounts. regular, ironic gifts nag furiously against the blithely express deposits; unusual, 675|Customer#000000675|canew8kOzr8RDTmenqJOn|20|30-299-640-9565|5295.26|HOUSEHOLD|riously even requests boost slowly. ex 676|Customer#000000676|JzJnD kA3KTjTYl|24|34-710-820-8362|5527.61|HOUSEHOLD|es. slyly even theodolites across the slyly ironic accounts nag from the slyly even accounts. accounts are. 677|Customer#000000677|,wTtWOOr wqX2sL,h79myT6nxG1EgY|3|13-398-309-9122|5582.63|AUTOMOBILE|ly brave ideas haggle slyly blithe acc 678|Customer#000000678|BiQyhSp oiDp,,MFVxyrOwMT810qiQlISEPye|9|19-508-735-4910|6302.93|AUTOMOBILE|gainst the carefully pending requests-- furiously special requests haggle? not 679|Customer#000000679|IJf1FlZL9I9m,rvofcoKy5pRUOjUQV|10|20-146-696-9508|1394.44|AUTOMOBILE|ely pending frays boost carefully 680|Customer#000000680|TuONtFVII8TR2QtJyl1lS5f0iagaWWsBm1IQo|12|22-593-663-2200|4366.23|AUTOMOBILE|kages: final deposits believe unusual deposits: b 681|Customer#000000681|x85ncT W41KEcUQCtxE,LD iMNO6j4Hh4x|20|30-809-878-5822|235.53|MACHINERY|ate carefully. accounts hang carefully along the blithely final theodolites. bravely sile 682|Customer#000000682|y5J0HJVI6Mb k4rXTAPbxE9aw|15|25-233-924-7389|7420.60|AUTOMOBILE|he bold, even accounts breach 683|Customer#000000683|G0, q8c6vBykpiLvcuSJLYvqE|6|16-566-251-5446|9120.93|MACHINERY| somas use-- slyly express foxes wake quickly blithely iro 684|Customer#000000684|5kSJ3qMS,7YKdfmwBFzQN4y8EgPiH,8ln19|15|25-456-286-6398|8545.90|HOUSEHOLD| of the bold requests. blithely regular accounts nag alongside of the carefully silent dolphins. carefully 685|Customer#000000685|eRvmYOdl5v|17|27-269-224-9902|2039.84|MACHINERY|nusual ideas. pending packages use carefully. slyly special sentiments cajole after the blithely even accounts. blit 686|Customer#000000686|1j C80VWHe ITCVCV|6|16-682-293-3599|5503.36|HOUSEHOLD| even deposits print quickly. foxes wake. furiously ironic asymptotes across the bold foxes 687|Customer#000000687|UJTC3 WtzvoD39r1GuoCP|21|31-149-119-1456|4999.31|FURNITURE|special packages haggle carefully slyly ironic pinto beans. sl 688|Customer#000000688|2QBxK8WIryWMZTsDM JS7GxWCB6Y71Swa,f1EUxk|8|18-210-546-2836|3354.47|HOUSEHOLD| use alongside of the ironic, pending foxes. final packages use. ironic, unusual dependencies haggle 689|Customer#000000689|Gcie9Q,Wc6J0QvKcefqflajjOtOVchnxqBn9|24|34-325-146-3591|1481.24|MACHINERY|ions. furiously unusual platelets cajole blithely. caref 690|Customer#000000690|xH61m,Si5X4REvi|3|13-489-760-5455|103.45|HOUSEHOLD|nt deposits. final, unusual requests use blithely. slyly unusual packages against the carefully bol 691|Customer#000000691|0RkDX6OLU1hISYCLmdLD C|16|26-741-688-4189|9566.15|MACHINERY|p. ironic, regular foxes against the ironic, special theodolites nod carefully quietly unusu 692|Customer#000000692|d6XE9sg, wINvIz8aZS b8n XuhAQU5|19|29-804-421-1703|276.71|AUTOMOBILE|cuses. slyly even asymptotes doubt quickly. fluffily thin theodolites boost. ca 693|Customer#000000693|r5gn5SUW0tsfkOw42x84|1|11-391-870-8153|6577.21|AUTOMOBILE|onic packages. carefully final sauternes across the even, express deposits h 694|Customer#000000694|3ToF2HqyF9mEcP1QgW NMN,6,KsFq6x4u14MS|10|20-646-375-1939|2222.65|BUILDING|efully final requests detect blithely. furiously ironic p 695|Customer#000000695|fbT9dQCc,su7JboB5FWI6|8|18-131-151-7466|9126.54|FURNITURE|ly express deposits according to the fina 696|Customer#000000696|dYyj4azN0EE GqeZxv|22|32-923-916-5643|3778.97|HOUSEHOLD| ironic ideas among the carefully bold foxes na 697|Customer#000000697|LFwdGCTUYDenZGoA9|4|14-613-651-2263|8121.84|AUTOMOBILE|he regular pinto beans. courts cajole carefully according to the blithely even theodolites. quickly 698|Customer#000000698|FLZ2NG5pKHpPtAh|23|33-282-178-6799|2894.22|BUILDING| sauternes. gifts cajole. slyly pending platelets cajole at the iron 699|Customer#000000699|4R6pspbuk5Sp,036VraUEKPBzs|4|14-865-140-8680|-809.22|HOUSEHOLD|. finally even ideas wake slyly pending dolphins. slyly even instructions wake whithout the p 700|Customer#000000700|zyWvi,SGc,tXTls|0|10-351-119-7514|4367.53|MACHINERY| bold excuses. furiously even asymptotes across the carefully regular dugouts cajole fur 701|Customer#000000701|yQU8LcjocNHkk1hse30M0U1f46MrU6dB|2|12-323-784-5793|148.98|FURNITURE|c packages. blithely final instructions according to the regular pinto beans 702|Customer#000000702|BDKtDAva8rBuCWXT6jXb2JJY7YoRS|3|13-549-296-5659|4782.05|FURNITURE| silent accounts. regular, regul 703|Customer#000000703|ge1GEYt4ewGUiSeqBA4rNB5JhyQ92uKF|6|16-741-513-6919|3998.42|AUTOMOBILE|arefully final accounts cajole quickly blith 704|Customer#000000704|41s9yU0ossOKgBvjPPSTZqhfciy|9|19-998-886-1551|441.44|AUTOMOBILE|ously ironic instructions believe about the pending, regular deposits; final accounts cajole alongside of t 705|Customer#000000705|YKdZRFEGcclF6rmdoiNCY8|18|28-991-405-7914|3199.00|BUILDING|s pinto beans sleep busily. ironic, bold theo 706|Customer#000000706|ycxysGDuWHN98FS4lZ2obT9ZPNRWjJTsHbQXvi|19|29-468-275-6045|6496.08|AUTOMOBILE|ses cajole furiously after the ironic requests. pinto beans sleep busi 707|Customer#000000707|DT0nzkijELHyI|21|31-796-903-4461|2619.52|FURNITURE| the deposits. fluffily ironic pinto beans wake quickly. slyly even foxes sleep slyly quickly final deposits. ev 708|Customer#000000708|9iJJ868sXAdFgZyo0V8cfPRTRaUc3d|17|27-796-490-6026|2297.33|BUILDING|p along the carefully ironic pinto beans. even pinto beans are fluffily against the furiously quick dolphins. s 709|Customer#000000709|drDnC6YXTJgcdfJkpbhrQ7z7XmCsoym2t22jwg|18|28-117-812-5493|-867.94|BUILDING|g the blithely ironic multipliers sleep a 710|Customer#000000710|OCLSZuXw1AEK NLvlofMkuK,YNe,bJD40a|22|32-459-427-9559|7412.12|FURNITURE|ges integrate express, even ideas 711|Customer#000000711|EmACeG3r2Y9bxf7KLLgX,ZdQlATjGaafINO|15|25-306-725-3622|9591.51|MACHINERY|ackages promise. theodolites haggle along the express dolphins. special, special deposits affix slyly final pac 712|Customer#000000712| 8w2pIiA4wWAhtjAdXR|6|16-843-486-5087|8667.09|BUILDING| express dependencies use fluffily final platelets. furiously regular accounts wake furiously. carefully careful cou 713|Customer#000000713|ov fZJgESFHP P|10|20-595-832-3185|3829.03|MACHINERY|lites sleep slyly furiously express requests. even, even notornis affix furious, ironic p 714|Customer#000000714|2,ARep1aMyhgNZqrkc,toQ3XY6FOiCZqNW|9|19-209-782-4244|2438.10|BUILDING|cross the bold pains. bold instructions haggle. even accounts wake slowly. carefully busy ideas need to x-ray sly 715|Customer#000000715|9qLvF42uxUarKl4I 2pEKOMNJmo8Ro5EK|19|29-500-408-6392|85.05|AUTOMOBILE|hins boost quickly. quickly regular epitaphs haggle fluffily quickly bold pinto beans. regular 716|Customer#000000716|jbXpMEcV9j,6ciftBXEMXDjE|12|22-501-849-6084|8364.99|AUTOMOBILE|as. even accounts about the slyly pending excuses lose bl 717|Customer#000000717|TqWi1c aNhdExPcDD7KQ18W|12|22-660-518-8009|8510.94|MACHINERY|ns poach slyly. carefully express pinto beans ab 718|Customer#000000718|w,GXCSSI4NEHAFPKG|20|30-605-635-8197|8438.40|HOUSEHOLD| regular packages-- pinto beans detect ironically. furiously even accounts detect unusual, pendin 719|Customer#000000719|wry Gj3xd8QX ylUCulG|15|25-498-699-7824|-774.47|HOUSEHOLD|gly express pinto beans sleep furiously around the quick foxes. 720|Customer#000000720|8shFEL7J9sq1NJXR8dixBqaTO,kbSx|22|32-575-838-4260|5357.32|HOUSEHOLD|furiously above the furiously ironic c 721|Customer#000000721|N6hr4gV9EkPBuE3Ayu |21|31-174-552-2949|3420.64|AUTOMOBILE|ar instructions. packages haggle stealthily ironic deposits. even platelets detect quickly. even sheaves along 722|Customer#000000722|aymiuXFyrALTRIzfbLDvtaj37ydq|20|30-633-109-9587|2724.80|BUILDING|leep sometimes express accounts. regular theodolites wake quickly ab 723|Customer#000000723|5tKvCvmVB0yxyAF|21|31-542-178-2520|738.57|BUILDING|e quickly against the blithely final requests. regular pinto beans nag fluffily fur 724|Customer#000000724|dF80enl1y5MfccEMqYz0JSY|5|15-316-638-4703|3035.29|AUTOMOBILE|s. bold accounts about the carefully final packages detect slyly pending platele 725|Customer#000000725|fm8t3X05Wm,PQxPSPHZy,rHI,wUBLTFO5S |16|26-923-317-3870|5030.24|FURNITURE|ts. accounts after the even pinto beans boost across the ironic, silent accounts. reques 726|Customer#000000726|4w7DOLtN9Hy,xzZMR|3|13-168-233-7513|6253.81|BUILDING|wake after the regular, final instructions. final epitaphs mold fluffily blithely ironic id 727|Customer#000000727|wLIX8hKnk0qjUAAGqm|18|28-815-316-3525|807.37|FURNITURE| requests-- quickly regular pinto beans need to detect 728|Customer#000000728|uACufK5vIMlsq,v9d4U5ZWi,|24|34-195-384-1105|4349.73|BUILDING|re stealthily furiously bold requests. carefully final pinto beans lose slyly 729|Customer#000000729|YZxBLBAqBmW53g2ia6s4u,MlJ4WhW|20|30-900-572-2883|8947.26|MACHINERY| packages about the furiously even foxes sleep furiously furiously pending ideas. slyly ironic epitaphs 730|Customer#000000730|ORP6ofUwyD1|2|12-513-973-2702|4718.23|AUTOMOBILE|foxes would cajole furiously slyly special forges; slyly regular pinto beans inste 731|Customer#000000731|D0ceqTVRO3EctrC|7|17-731-915-9753|9311.17|MACHINERY|lar dolphins are carefully slyly final theodolites. unusual accounts integrate across the regular, final deposit 732|Customer#000000732|tQ,r4voHok7oeKw9msDh4ORGje|16|26-154-285-2634|4830.51|BUILDING|structions are after the even deposits 733|Customer#000000733|Ks7Ed2g7zDP905tLGdGcFtomDNchNdaPK2cd|13|23-110-948-6754|4351.09|HOUSEHOLD| furiously. pending, quiet packages nag slyly. stealthy pinto beans haggle 734|Customer#000000734|xULRxhbfzPT5nBh|18|28-880-175-5173|9750.71|AUTOMOBILE|tions against the ideas haggle furiously quickly regular forges. carefully final requests solve even, 735|Customer#000000735|8Gn WOTY4cgGaoHwezrZ,JN,Px8e sr|5|15-959-576-4327|293.88|BUILDING|ites. furiously unusual frets boost among the slyly ironic theodolites. packages cajole 736|Customer#000000736|zQNs5e2aFcVY8MigFQEBtoCaQ9e|1|11-617-726-2039|9114.68|MACHINERY| the deposits. blithely regular asymptotes boost q 737|Customer#000000737|NdjG1k243iCLSoy1lYqMIrpvuH1Uf75|18|28-658-938-1102|2501.74|AUTOMOBILE|ding to the final platelets. regular packages against the carefully final ideas hag 738|Customer#000000738|iecb89zRRNeN3KywZSaPXEWMjz|10|20-722-370-5220|2734.95|MACHINERY| carefully slow instructions. furiously express dependencies alongside of the ca 739|Customer#000000739|pAROUfTi3wCEHi1PXu|14|24-309-302-6776|6344.18|HOUSEHOLD|ly regular accounts. blithely final theodolites sleep blithely. furiously regular decoys cajole blithely. bold de 740|Customer#000000740|FCerGpsfsWAsBrQTyqdzMxUQnbQembHKGg|10|20-215-156-3727|1733.76|BUILDING|ets. final, blithe theodolites abou 741|Customer#000000741|jiPAOQuJ5gIauMfvcbE8lKUVPMp|12|22-560-854-2068|9195.41|HOUSEHOLD|ve the fluffily regular accounts wake carefully ironic grouches. slyly bold theodo 742|Customer#000000742|2qRObRkFktME6SsNV0Pa3L8txbA0AFtXuWsKrkW|12|22-610-582-8610|6381.24|HOUSEHOLD|eodolites alongside of the daringly ironic deposi 743|Customer#000000743|WJ lVLsFSgZJCHHLqwRmvCjWvYlCs0c2TvO|20|30-743-559-7934|8450.37|HOUSEHOLD|ptotes. furiously even accounts haggle slyly. ironic foxes sl 744|Customer#000000744| nYSoGuQkf|7|17-121-555-4268|2458.81|HOUSEHOLD|n packages. furiously silent foxes sleep regular, regular accounts. slyly ironic patterns cajole fluffily 745|Customer#000000745|vjuHvDKdaomsivy l|18|28-913-438-9403|7115.14|FURNITURE|o beans. bold, regular theodolites haggle carefully about the quickl 746|Customer#000000746|JOKj8N2QKUm8Gi,F4qX0fLVy|20|30-154-354-9928|1164.46|MACHINERY| final theodolites. final pinto beans haggle-- furiously 747|Customer#000000747|uuichgTY7NjlZaaRZ6S7KzvapZWvAiCUWAQr|8|18-793-504-2931|67.95|FURNITURE|eodolites. blithely regular pinto bea 748|Customer#000000748|1 nkl3GMSnweulTNAMPeB8Sa5aSIk|23|33-403-226-2580|6959.09|AUTOMOBILE|nts. special excuses thrash blithe 749|Customer#000000749|U1Dvu0r793a|24|34-158-697-9591|7491.42|MACHINERY|accounts was. final, final requests wake. theodolites was slyly. blithely even foxes wake carefully ac 750|Customer#000000750|5OyNRajjgjjbaXtI rkxvB2lX4c6u|8|18-235-587-1274|269.90|BUILDING|s. regular, regular deposits sleep carefully blithely bol 751|Customer#000000751|e OSrreG6sx7l1t3wAg8u11DWk D 9|0|10-658-550-2257|2130.98|FURNITURE|ges sleep furiously bold deposits. furiously regular requests cajole slyly. unusual accounts nag unusual ide 752|Customer#000000752|KtdEacPUecPdPLt99kwZrnH9oIxUxpw|8|18-924-993-6038|8363.66|MACHINERY|mong the ironic, final waters. regular deposits above the fluffily ironic instructions 753|Customer#000000753|9k2PLlDRbMq4oSvW5Hh7Ak5iRDH|17|27-817-126-3646|8114.44|HOUSEHOLD|cies. deposits snooze. final, regular excuses wake furiously about the furiously final foxes. dependencies 754|Customer#000000754|8r5wwhhlL9MkAxOhRK|0|10-646-595-5871|-566.86|BUILDING|er regular accounts against the furiously unusual somas sleep carefull 755|Customer#000000755|F2YYbRT2EV|16|26-395-247-2207|7631.94|HOUSEHOLD|xpress instructions breach; pending request 756|Customer#000000756|Lv7cG by4Wyd8Hzmumwp8hSIZg9|14|24-267-298-7503|8116.99|AUTOMOBILE|ly unusual deposits. fluffily express deposits nag blithely above the silent, even instructions. expr 757|Customer#000000757|VFnouow3LhLvEDy|3|13-704-408-2991|9334.82|AUTOMOBILE|riously furiously unusual asymptotes. slyly 758|Customer#000000758|8fJLXfS5Zup0GQ3xBKL3eAC Q|17|27-175-799-9168|6352.14|HOUSEHOLD|eposits. blithely unusual deposits affix care 759|Customer#000000759|IX1uj4NFhOmu0V xDtiYzHVzWfi8bl,5EHtJ|1|11-731-806-1019|3477.59|FURNITURE|above the quickly pending requests nag final, ex 760|Customer#000000760|jp8DYJ7GPQSDQC|2|12-176-116-3113|2883.24|BUILDING|uriously alongside of the ironic deposits. slyly thin pinto beans a 761|Customer#000000761|oObRVLlulGS5xikRk8La|19|29-835-631-4258|1525.96|BUILDING|ress Tiresias haggle across the never ironic ideas. blithely pending theodolites cajole ironically 762|Customer#000000762|n5QsjD,gTSrdNRoRpvWqS|11|21-757-455-7898|3764.90|FURNITURE| sublate carefully carefully regular deposits. bold foxes along the gifts nag 763|Customer#000000763|ACMrkbcf3a2J3aobVEmU5hGnHuc|1|11-232-719-3610|2650.27|BUILDING| ironic asymptotes are. slyly bold realms alongside of the fluffily specia 764|Customer#000000764|F8WBznjtO2bX2knXl4ghnsp ixWylkf|16|26-714-322-4800|2061.45|MACHINERY|tructions. quick theodolites kindle 765|Customer#000000765|b7w2edOUZNe0QX 3Ab3y5RFlMZX|20|30-544-657-7473|5683.33|AUTOMOBILE|ter the blithely regular foxes. slyly bold packages use bl 766|Customer#000000766|zGTH6uWKoQxIE|16|26-283-847-8946|4677.55|BUILDING|ly special deposits boost blithely above the slyly ruthless ideas. carefully bold asymptotes unwind about the e 767|Customer#000000767|9f3 XsYXdvP0E39ZBi7r7oRCns3PTx,H|17|27-526-143-3959|7647.09|BUILDING|ngly express dolphins boost carefu 768|Customer#000000768| ,cIZ,06Kg|18|28-851-493-8588|9618.84|BUILDING|to cajole blithely express asymptotes. foxes use quickly. carefully special ideas could have to are bravely bl 769|Customer#000000769|0eGzsjpDFsN0|5|15-102-635-4835|5950.86|HOUSEHOLD|ggle slyly pinto beans. furiously even excuses above the 770|Customer#000000770|IjwJR6TjBJZbKIeouH2d|8|18-856-112-5677|2066.79|AUTOMOBILE|dolites haggle express, express requests. furiou 771|Customer#000000771|J9UMiYJznHVHZDuCG,ErV0iiMStETS|23|33-761-371-4753|7461.74|AUTOMOBILE|arefully unusual accounts shall boost unusual pinto beans. ironic, final ideas boost. slyly i 772|Customer#000000772|TBxlR0AAHeSYl0vyK 8joF|20|30-405-614-4887|7555.16|MACHINERY|ular requests. final ideas sleep. regular, even platelets could haggle blithely bol 773|Customer#000000773|NyRSeog kIkD7YOb0EuSfSGxfptN5nkX26Mk6|8|18-456-377-3723|4578.24|FURNITURE|hely silent ideas could doubt carefully al 774|Customer#000000774|95 O8gd08tdtmJwM0ebHUnDc|11|21-463-652-6686|-506.37|HOUSEHOLD|nd the bold platelets affix furiously doggedly express accounts. carefully unusual deposits serve furiousl 775|Customer#000000775|Cg3M4gTXeIY7llMN2puop7D2|17|27-903-936-7924|1376.67|BUILDING|ts. blithely regular requests upon the ironic, final courts haggle sly, regular deposits. final requests 776|Customer#000000776|rzhIStRHsiWoc6K,7yv3YMqVdrz|15|25-941-650-8313|2669.01|AUTOMOBILE| express accounts. furiously ironic theodolites serve blithely. blithely thin packages are among the eve 777|Customer#000000777|27adTXaVp7araW|20|30-765-163-9750|9097.52|AUTOMOBILE|pinto beans; furiously special platelets haggle quickly against the slyly unusual foxes. 778|Customer#000000778|tCuRA2W9y5iiGrcT7a4TzK|23|33-702-179-3134|52.43|MACHINERY|hely ironic instructions. regular, ironic requests affix along the carefully 779|Customer#000000779|2cTZiS4ulZ74edT,RmDnh4ZaCrphMMh Ff2|5|15-940-483-5702|-902.48|HOUSEHOLD|old dependencies. pains haggle fluffily carefull 780|Customer#000000780|CMxcdzgEUkCWP1|8|18-844-576-7345|9874.12|FURNITURE|, final packages use slyly regular deposits. slyly ironic instructions nag careful 781|Customer#000000781|FQCAkyfV0 kL3,FNA1OlBjABak|18|28-478-388-5881|6403.62|MACHINERY|ake blithely blithely final foxes. blithely silent pinto beans haggle furiously. fluffily bold acco 782|Customer#000000782|HFuyemzqz0g QhkL|19|29-850-576-7450|-326.32|AUTOMOBILE|usly bold deposits-- furiously ironic accoun 783|Customer#000000783|01bR7OOM6zPqo29DpAq|1|11-920-256-8525|2436.32|FURNITURE| slyly carefully pending packages; doggedly bold theodolites boost slyly slyly dogged excuses: slyly expr 784|Customer#000000784|evcGXqbosO6,qhx|14|24-975-574-2063|3170.47|BUILDING|cajole quickly. ironic dependencies wake quickly. silent ideas use furiously 785|Customer#000000785|gEkI8kSq8RYgO6tiTA0AB7urZX8s2w03JGwtMi9|18|28-808-670-2983|108.14|BUILDING|nal theodolites. dogged, ironic deposits wake carefully. slyly regular requests after the furiously ironic 786|Customer#000000786|viZtkiJ gbMcPrINM3Ez,33vOJW|13|23-413-365-2022|400.03|MACHINERY|ccording to the regular deposits. carefully final packages run. slyly final deposits h 787|Customer#000000787|wwPe2vMZZ1n1Mm2z0qzDfS43FPj2Ndn|10|20-210-617-3870|212.16|AUTOMOBILE|e. ironic theodolites serve regular foxes. pending requests haggle slyly busy instruct 788|Customer#000000788|LbFHoDpNUSu3AyDS7KLgjoQBJV|16|26-388-689-9272|-330.69|FURNITURE|. unusual packages against the 789|Customer#000000789|DW0NMV Ci5V2bnsX0Al98plG1J0QZqNwcjEVW|9|19-176-517-4263|6038.60|BUILDING|as. quickly special courts integrate slowly final accounts. ironic packages are slyly about th 790|Customer#000000790|CR bzmYYVP|0|10-368-832-9671|2724.98|BUILDING|riously final requests haggle to the blithely special requests. blithely pending gifts after the carefully silent id 791|Customer#000000791|3ZWQ5xexnnLDEmxpmbg|13|23-575-775-4059|3694.81|HOUSEHOLD|posits cajole carefully along the slyly final packages 792|Customer#000000792|icVt7HjGs,p3YL3nr1MHgaQIY5Gmzej57nB,b|7|17-392-500-4370|1672.46|AUTOMOBILE|latelets along the carefully even packages cajole blithely packages-- even pinto beans haggle 793|Customer#000000793| SltK1IMp2Xvwb,A0x3Co1uhcwr|0|10-404-953-9048|2072.99|BUILDING|uickly silent foxes use after the 794|Customer#000000794|RMY8 LyGnJ67NGc5cxPYiIDSF|23|33-633-470-5945|1709.50|FURNITURE|gular patterns cajole slyly blithely final ideas? furiously unusual courts wake among 795|Customer#000000795|droXvSIcNEElsEYS|14|24-973-990-6608|8443.18|HOUSEHOLD|packages cajole furiously since the slyly 796|Customer#000000796|79bj5Rk3jJj9ked7M|3|13-554-411-6773|2584.23|HOUSEHOLD|re slyly even deposits. pending pac 797|Customer#000000797|bdGkzA5duas6LZ1ywB96K6Av3x 99q95h|0|10-994-609-7082|3063.11|HOUSEHOLD|iously regular packages mold fluffily. express, idle accounts nag furiously across the carefully ironic req 798|Customer#000000798|wW2OgnHj6dBz tO9OXFqCLm|4|14-670-423-7529|-391.13|HOUSEHOLD|sleep slyly ironic, express ideas. slyly special packag 799|Customer#000000799|LVk8ljWeIYTQQFMKCmxEeRpWQT|12|22-909-693-7833|2263.25|MACHINERY|ets haggle. busily final packages nag carefully after the bo 800|Customer#000000800|mpI6pkdnWLZsBbQi4,uUC5Y3TcM9vmRIgZelrQ|14|24-555-630-2261|9443.39|AUTOMOBILE|ly alongside of the carefully ironic deposits. blithely ironic packages are blithely agains 801|Customer#000000801|UQ67hfDJlxgX68hiFPmDuHav12Vx|16|26-439-495-8236|5207.32|FURNITURE|sits wake blithely according to the slyly un 802|Customer#000000802|ZDk4Suvi8gMp2LLAOW6nFA 3u|0|10-606-236-5778|1377.52|BUILDING|the carefully even pinto beans. carefully unusual pinto beans against the asymptotes cajol 803|Customer#000000803|zm7Xs6RJJJfZ|18|28-855-429-9109|6003.09|BUILDING|. blithely special instructions 804|Customer#000000804|u6xYwCLD,Vd4ODt8|24|34-861-760-4796|3.43|AUTOMOBILE|usly. final accounts integrate slyly above the furiously sly pinto beans. furiously unusual ideas 805|Customer#000000805|wCKx5zcHvwpSffyc9qfi9dvqcm9LT,cLAG|10|20-732-989-5653|511.69|BUILDING|busy sentiments. pending packages haggle among the express requests-- slyly regular excuses above the slyl 806|Customer#000000806|FTM62Dujm2BoblKFY w|12|22-888-883-2475|6859.28|FURNITURE|the carefully regular accounts breach above 807|Customer#000000807|zKjtrn5FGIs|17|27-333-779-5333|1929.89|MACHINERY|y final patterns? carefully pendi 808|Customer#000000808|S2WkSKCGtnbhcFOp6MWcuB3rzFlFemVNrg |19|29-531-319-7726|5561.93|BUILDING| unusual deposits. furiously even packages against the furiously even ac 809|Customer#000000809|eApaa5hW3Mqp11ZuOP|16|26-776-223-5427|-594.23|HOUSEHOLD|ackages. quickly unusual dugouts c 810|Customer#000000810|3J1wwVhwzVT7XM1v3Rzx90a8c4|9|19-997-929-2765|9632.77|FURNITURE|ep pending deposits. quickly bold accounts are carefully instructions. ruth 811|Customer#000000811|Rau3ADOLCNXM42D6fjQTFuueQce2BHw9r|15|25-575-731-2159|9010.02|BUILDING|kly pending foxes dazzle fluffily after the furiously bold packages. fluffy 812|Customer#000000812|u9uI3BE68quHja6k0,UXRYFgvLHl3jKhn|17|27-714-618-2239|3428.67|BUILDING|ar packages. packages thrash amo 813|Customer#000000813|VBNrCYm67O|9|19-585-173-1514|5673.47|HOUSEHOLD|ts cajole silently bold pinto be 814|Customer#000000814|KmMC1nd5iOXTRlhzMu13Wtx|15|25-342-269-3824|4011.18|BUILDING|yly at the blithely regular excuses. carefully thin pinto beans boost slyly furiously regu 815|Customer#000000815|TmpTFwt3b0 WQUmljE1LKbg50dtQszn67qEog|12|22-941-606-7227|5841.33|HOUSEHOLD|unusual accounts. slyly ironic pinto beans nag against the regular, regular requests. slyly p 816|Customer#000000816|k2dnnQZowe|24|34-457-910-7430|9239.32|BUILDING|accounts cajole slyly quickly express platelets. carefully silent theodolites wake carefully. pe 817|Customer#000000817|jDJt0Wfk0,Wcx5HOI5itS XqUJmMpjzJ|0|10-566-341-4382|7297.64|MACHINERY|y against the carefully special patterns. unusual accounts sleep quickly final instructions. 818|Customer#000000818|CvQvKClYoh9lPjMngrjbCoxqqnp2QFiTe8eF|20|30-943-141-5174|-574.39|BUILDING|yly. blithely final platelets haggle regular, regular ideas: furiously r 819|Customer#000000819|cug3zDy qHUaZMQNEtYlWK3R,mGrid4Of|12|22-424-990-1743|2152.05|MACHINERY|y final accounts sleep slyly express ideas. carefully express foxes sleep. carefully stealthy pin 820|Customer#000000820|xsYy3Nu7RhNYyBqL8dD0fONh|4|14-993-368-5047|-614.28|AUTOMOBILE|g, final ideas. slyly ironic packages dazz 821|Customer#000000821|PVdTfF7cfPueJS0MBncE5v4bKfPV64Zg|5|15-901-460-2033|2691.42|FURNITURE|wake carefully. slyly express deposits wake furiously among the fluffily final deposits. pa 822|Customer#000000822|ZoRr18ZiDTtkRduo6PO60gNRg,b46zOe QdS8B|13|23-410-801-1644|736.47|BUILDING|ding dependencies hang alongs 823|Customer#000000823|HuoOuJLxx7S8YDQhexfmIX|0|10-109-430-5638|738.79|AUTOMOBILE|l requests are against the slyly ironic pinto beans. blithely unusual packages wake alongsid 824|Customer#000000824|qHxq5m g6Ug,SDq7R17|18|28-454-124-5859|6840.29|AUTOMOBILE|ffily unusual packages. furiously final ideas 825|Customer#000000825|L8P98o,xq9E78kmuadw,Z7Rwy|10|20-492-863-1129|1809.01|HOUSEHOLD| busy deposits. careful, bold foxes eat furiously according to the final as 826|Customer#000000826|f5Y UZoE nR|13|23-762-328-7631|8938.14|BUILDING|usly final dinos. final depths wake slyly across the final, bold requests. bl 827|Customer#000000827|fmmKD8aBlF9bFndkYf402cHVrUj8FhGZtHeL|14|24-196-343-6537|5411.20|HOUSEHOLD|om the carefully even requests are slowly along the regular 828|Customer#000000828|0PkG ELwBIgU4AsNcjDc5Q,9Gj|23|33-108-680-4317|7059.68|BUILDING|cial instructions. accounts nod special accounts. slowly even deposits belie 829|Customer#000000829|4oht3f64ZWA1,ACbBP9DNxjjG1CbIqOCK|8|18-404-707-6095|4086.56|MACHINERY|deposits cajole slyly ironic, express packages. asymptotes cajole ruthlessly 830|Customer#000000830|4fNmWCmfys1jUI|23|33-408-548-6806|7775.65|AUTOMOBILE|ironic packages integrate alo 831|Customer#000000831|1DyQwHuUncB7BSnQUiW8ksexb|20|30-495-591-5384|3401.49|AUTOMOBILE|c instructions. fluffily bold requests are furiously quickly ironic deposits. carefully 832|Customer#000000832|eNIy9PatAWlh0|19|29-264-864-8387|-201.49|HOUSEHOLD|the special ideas. finally ironic accounts doubt. slyly regular ideas 833|Customer#000000833|t3qDCo,Yh MZcJFV6PibeY,MUunz|6|16-624-307-4875|-526.14|FURNITURE|uickly final orbits across the blithely express accounts integrate furiously among the final sheaves. blithe 834|Customer#000000834|y4mA, TY6DW4gAY|14|24-637-803-7812|-976.25|MACHINERY|regular dolphins. furiously careful ideas eat beyond the bravely regular packages. slyly unusual deposi 835|Customer#000000835|Sjqs42jh111|21|31-700-242-6347|2106.81|BUILDING|gside of the furiously final packages wake quiet, permanent accounts. unusual packages into the unusual, ironic req 836|Customer#000000836|rayz3tXswDbL4hyIr5SMrEIOTqvmok48e|12|22-181-313-6281|9184.72|FURNITURE|press deposits-- fluffily express accounts cajole carefully according to the daringly even deposits-- furi 837|Customer#000000837|mMNC8wjT5aC655e3|9|19-572-730-8324|2701.29|BUILDING|he bold, pending theodolites. special, bold accounts haggle. blithely regular ideas nag fluffily regular packages 838|Customer#000000838|BxDlz44b56kXjsTpl|5|15-919-192-7197|1605.76|AUTOMOBILE|elets. never express accounts haggle slyly. depths along the requests haggle bl 839|Customer#000000839|Ch8 wZk5UKK45BSq1uZLJL0Z A8UUoms|22|32-435-825-7973|2924.78|HOUSEHOLD| close accounts. unusual pinto beans sleep quickly slyly bold instructions. blithely unusu 840|Customer#000000840|9wA4l70okCEW3GnYQvlIAXetDgD50l|22|32-802-156-1748|6799.00|AUTOMOBILE|usly according to the slyly regular pinto beans. blithely regula 841|Customer#000000841|pSISKRxFIgVasW71fLyaEuODHZQLBfd6E c7jo|9|19-556-695-9964|884.20|FURNITURE|ly. slyly special instructions wake. carefully even accounts sleep quickly slyly silent deposits. s 842|Customer#000000842|stRBRwdFCkdN|0|10-272-126-1413|8635.49|FURNITURE| silent pinto beans. bold, pending platelets sleep furiously express theodolites. fluffily express reque 843|Customer#000000843|OEJ7AElvHgEtDpjcrXgLK|14|24-979-584-7928|675.73|HOUSEHOLD|ronic somas. accounts was among the carefully unusual requests. unusual pinto beans are blithely regular depths. s 844|Customer#000000844|1nUzjsH9HS1sPAGLwDIom9IESivLeEh1BvyynjU|11|21-285-410-4046|2954.90|BUILDING|ymptotes. ironic, unusual notornis wake after the ironic, special deposits. blithely fina 845|Customer#000000845|fIq5p0GpDtw6FIsPMdbqNYgfSw3gL9ep|13|23-125-871-9246|6898.89|AUTOMOBILE|deposits above the deposits wake bold instructions. special accounts cajole. ideas along the regu 846|Customer#000000846|EWQcGkx0CU5ZGUNcTV9bBCQ4qnuKsHC|18|28-766-714-9136|7650.73|MACHINERY|s. quickly thin requests against the ironic, bold requests haggle above the final foxe 847|Customer#000000847|oR9VbMI LkR2GUv4MRmylhb|23|33-645-447-3944|4822.08|HOUSEHOLD|haggle carefully. pending, bold foxes play carefully ironic frets: slyly unusual ins 848|Customer#000000848|a nIm5Bk7 RMqMZ6|15|25-655-714-7125|5685.59|BUILDING|st furiously blithely pending packages. deposits across the 849|Customer#000000849|kqRGW2JQtTM,a6 DzJMgcU9U|24|34-718-798-7751|9670.64|MACHINERY|ly silent ideas. fluffily even packages boost carefully fur 850|Customer#000000850|GdhPVh9rkPqSt v17ZuxIlx8c1N8G|22|32-546-203-4000|7576.55|FURNITURE|ly ironic accounts whithout the regular waters are blithely abo 851|Customer#000000851|H9HRTaOz4yI9elrUUiS|9|19-678-843-9850|1144.23|FURNITURE|cross the final packages. blithely ironic excuses serve slyly about the final accounts. fluffily 852|Customer#000000852|fR1Cq8d6m,zJwS3FFrOBV7u|5|15-252-941-6247|1098.98|AUTOMOBILE|unts thrash quickly through the express, regular theodolites. 853|Customer#000000853|U0 9PrwAgWK8AE0GHmnCGtH9BTexWWv87k|2|12-869-161-3468|-444.73|MACHINERY|yly special deposits wake alongside of 854|Customer#000000854|flSR,SlEXwxrDcm3uedfK1Oiq,c9mZI|0|10-544-967-2382|3393.86|AUTOMOBILE|t slyly after the furiously even escapades. carefully regular pa 855|Customer#000000855|i8mS 0Plk2tI8HG1Mnzj8v5RIl5JqmwTSW2Wq|1|11-379-392-2701|3550.49|BUILDING|s. carefully final deposits detect furiously slyly even packages. final, special accounts eat along 856|Customer#000000856|X4U7LH4YtDzephie|15|25-336-316-9641|6988.55|FURNITURE|y bold pinto beans according to the pending foxes 857|Customer#000000857|TFCCMlSEyrItYvEZy068NhrUxJz|5|15-856-649-3113|7537.16|AUTOMOBILE|g to the deposits cajole furiously final deposits. furiously silent dependencies agains 858|Customer#000000858|8qSqHq,Fc378KIPPbTNmaL 0bpAwO|8|18-509-453-8977|-829.37|BUILDING|, special accounts. ironic, pending requests might cajole flu 859|Customer#000000859|yp6dlWnpbzQboP5Xj8W|17|27-379-135-4463|6737.18|BUILDING|uffy foxes boost about the eve 860|Customer#000000860|t0GtEsh39KvInVMH7CDn0xi|12|22-895-319-1388|6836.89|BUILDING| deposits above the blithe requests maintain furiously slyly even packages. slyly regular requests boost. blith 861|Customer#000000861|GvlCZ4fJYbHTOTYczE1iH2k6K9edUyNxWZsl6x|14|24-939-734-5650|-336.80|BUILDING|arefully regular requests unwind slyly 862|Customer#000000862|zkaZ,iOp8t9MBK9T,JIPGL5hmBmK,xjkjHEP|2|12-479-479-6941|2338.92|MACHINERY|yly regular foxes sleep. quickly ironic foxes use furiously express deposits! carefully regular 863|Customer#000000863| oaBBxuQLKPawG1yqOc7cyVhOezCy|3|13-801-392-5922|5274.52|AUTOMOBILE|d accounts detect. bold pinto beans against the slyly ironic 864|Customer#000000864|lKcUKtfu0myNF5msATCFVHHXfMg3,cdb|12|22-897-966-6672|8932.76|AUTOMOBILE| instructions detect slyly. never pending dependencies use slyly enticing accounts. slowl 865|Customer#000000865|UIv4t5cbA7j1ftOSatj2EKah3p|9|19-245-153-1471|3016.38|BUILDING|nic deposits print blithely slyly final accounts. special, express accounts are fluffily a 866|Customer#000000866|u,b0GdP7dZg|20|30-436-285-7224|6265.88|FURNITURE|ts. bravely express pinto beans after the blithely bold requests x-ray busy orbits. furiously pending 867|Customer#000000867|KmET9DxfPVs15pIUUWQ|14|24-522-194-4543|5680.23|FURNITURE|t according to the careful packages. regular 868|Customer#000000868|cFG8Fa5h1e uvHJ9pgZAO|22|32-850-421-3334|7616.48|FURNITURE|the requests. regular dinos at the even requests use furiously around the sheaves. slyly final theodolites bo 869|Customer#000000869|PDGU7BXDNXAo0vmo7QtDm,yCMVcD|2|12-379-344-7132|1228.06|HOUSEHOLD|. final packages wake slyly? blithely even deposits haggle carefully regular packages. unusual requests 870|Customer#000000870|6wGPZX1SbXLGtweqx8jK|2|12-953-532-5903|1970.76|BUILDING|es cajole slyly. furiously even asymptotes are furiously regular packages. special, final ideas s 871|Customer#000000871|KcLmBKitbx7NvU7bpu9clIyccxWG|20|30-933-714-8982|-395.89|HOUSEHOLD|ts. blithely silent courts doze. regular atta 872|Customer#000000872|vLP7iNZBK4B,HANFTKabVI3AO Y9O8H|17|27-357-139-7164|-858.61|BUILDING| detect. packages wake slyly express foxes. even deposits ru 873|Customer#000000873|XFnr9C2bANXL|6|16-375-385-5712|-797.38|AUTOMOBILE|lithely ironic, silent forges. furious 874|Customer#000000874|rdo knkGhtvpH6dbLkQon8QsrK1z4LFUpaVDTOn2|0|10-886-494-4217|5391.74|AUTOMOBILE|st the deposits. furiously even requests cajole slyly. bol 875|Customer#000000875|8pQ4YUYox0d|3|13-146-810-5423|-949.28|FURNITURE|ar theodolites snooze slyly. furiously express packages cajole blithely around the carefully r 876|Customer#000000876|NMzgtdV zCuRIMK0vV,DP9ynDd6Z9X3T|20|30-320-481-3076|4367.63|MACHINERY|ronic requests haggle blithely. slowly ironic ideas against the fluffily 877|Customer#000000877|uYO2BKogrHcOcHEgzjlmZAa1QYyR45i8|16|26-400-912-7812|1755.33|MACHINERY|packages cajole carefully. slyly regular pinto beans bo 878|Customer#000000878|hUCH2juGwk4OtThyY8p35Hi0,IfOGA|15|25-465-180-9022|8781.53|FURNITURE|the quickly pending deposits cajole care 879|Customer#000000879|EJcG18hFrS0SPT0yvl1b|2|12-878-466-6505|2235.20|HOUSEHOLD|ntegrate by the carefully special requests. dolphins sleep e 880|Customer#000000880|ogwHmUUFa1QB69pAoYAAoB0rjbdsVpAQ552e5Q,|8|18-763-990-8618|-77.63|FURNITURE|regular requests. regular deposits ar 881|Customer#000000881|XJ94RTR2oXI omeh|4|14-127-261-7876|2141.71|FURNITURE|pinto beans. asymptotes about the slowly even theodolites are pending requests 882|Customer#000000882|hsKaXwHCLD|2|12-437-842-6799|1650.12|HOUSEHOLD|ts. quickly regular packages alongside of the furiously silent theodolites nag slyly after 883|Customer#000000883|qVQ8rWNU5KZYDcS|3|13-526-239-6950|479.96|FURNITURE|uctions are carefully across the regular, regular asymptote 884|Customer#000000884|5KyisO0Tv9ZtlJhzyI7vAe88|21|31-483-489-6172|1601.60|FURNITURE| among the quickly express theodolites. accounts sleep furiously along the special, pending depende 885|Customer#000000885|nNUbC73nPBCKLg0|5|15-874-471-4903|-959.94|HOUSEHOLD|sits impress regular deposits. slyly silent excuses grow 886|Customer#000000886|QOTGbGPJjNPD7IrfAILA1da|12|22-771-691-7229|1194.33|AUTOMOBILE|slyly even foxes according to the pending, special accounts use carefully against the courts. regular, 887|Customer#000000887|CoInl1fmf5MjYn15AdA|1|11-136-651-8293|7009.42|MACHINERY|ording to the fluffily regular foxes nag fluffily instructions. thinly careful accounts around the furio 888|Customer#000000888|3vlJp0W8cniEXV|12|22-855-455-1154|6358.04|HOUSEHOLD|sleep carefully quick pinto beans. packages hinder beneath the instructions? ironic, unusual theodoli 889|Customer#000000889|pLvfd7drswfAcH8oh2seEct|13|23-625-369-6714|3635.35|FURNITURE|inal ideas. slowly pending frays are. fluff 890|Customer#000000890|rvsLCrRX9z,IPaaF9kqwvLLxueobbPiH4pz|4|14-938-708-4678|3329.21|BUILDING|ironic accounts cajole ruthlessly above the carefully pending accounts. quickly regular theodolites cajole. iro 891|Customer#000000891|AW0m6YSpe,BNPHvBj|11|21-439-958-7518|6032.18|FURNITURE|sits. final foxes run. quickly pendin 892|Customer#000000892|N KwiRAwIk6KL9WJ6vt0G|9|19-589-784-1249|4799.98|FURNITURE|ses are carefully. quickly regular theodolites cajole. carefully express accounts wake sly 893|Customer#000000893|W6m7LofOZoik72ku|13|23-827-724-6816|8250.87|BUILDING|ets. ironic instructions nag even, regular courts. slyly iro 894|Customer#000000894|5y7m8Ts4kDf|16|26-844-679-1540|4483.42|AUTOMOBILE|carefully regular accounts sleep carefully slyly ironic dependencies. blithely ironic accounts wa 895|Customer#000000895|MDaJr8ekGTS79bS7CH8f1WgWPU|0|10-933-819-2037|904.43|AUTOMOBILE|ggle final packages. slyly regular instructions affix fur 896|Customer#000000896|Tu1ZBNgiSEL9Ns|0|10-425-565-3199|7659.72|AUTOMOBILE|affix carefully unusual requests. furiously fin 897|Customer#000000897|nW1X1Hl9uWycuBEu3F3|6|16-988-776-4568|1999.42|MACHINERY|riously regular ideas sleep into the final, unusual 898|Customer#000000898|JrvrIEzAre,VJzJCi3SEmib1T2,YVXVvOGxaVZwR|3|13-265-738-4361|8137.24|AUTOMOBILE|s haggle around the special dependencies. slyly regular requests are according to the idly sly ideas! fluffily final 899|Customer#000000899|Th5XO5ImeCe9nHFQfQMCkNcmf5WHSeYQaR5TJ|2|12-594-534-9654|8605.53|FURNITURE|rding to the furiously unusual accounts. express, express accounts nag furiously. ide 900|Customer#000000900|kEhE1Y,OoZTDv,Auh d5G ALINN0rND|1|11-422-328-1612|3195.39|BUILDING| packages sleep slyly around the quickly special packages. final accounts are furiously. bol 901|Customer#000000901|QUyXt94YM6Ou6rDqK|12|22-202-667-4372|938.35|HOUSEHOLD| foxes maintain. theodolites sleep above the regular deposits. slyly bold excuses boost careful 902|Customer#000000902|A1hnMyYPSkXf7QgOPD2H|4|14-209-883-5797|5858.48|AUTOMOBILE| sometimes regular epitaphs. furiously regular gifts against 903|Customer#000000903|URTiQupkhObWG39,kZ3CfU|7|17-706-779-2078|509.23|FURNITURE|kly doggedly even instructions. regular, regular accounts along the even, bold packa 904|Customer#000000904|YdJEbNygDU6DrgWXQY6orasq|5|15-940-929-4572|9562.82|BUILDING|nic dolphins alongside of the slyly final ideas run 905|Customer#000000905|f iyVEgCU2lZZPCebx5bGp5|3|13-803-156-2231|-600.73|BUILDING|slyly closely ironic dolphins. blithely ironic asymptotes haggle carefully ironic theodolites. furiously 906|Customer#000000906|1Uavkms1A5z|6|16-594-569-6627|-613.45|HOUSEHOLD| accounts. furiously silent ins 907|Customer#000000907|UeVLwnnpccsG1pbQmN7pzD|10|20-501-816-7673|5751.31|HOUSEHOLD|sits haggle quickly above the excuses. slyly ironic packages print furiously. carefully pending reques 908|Customer#000000908|Fa5bchMKUMsaNKOXAiu9pX ME|12|22-814-669-9320|3215.96|AUTOMOBILE|quickly express packages. blithely fina 909|Customer#000000909|b 2X284A5AGpt8skuYwMvTyK68srMKikPst6X|24|34-717-350-9722|5565.58|MACHINERY|se carefully around the special, regular requests. ironic theodolites cajole quickly theodolites. regular, ev 910|Customer#000000910|Qg8TJTCT1mJ9H|9|19-899-463-4292|5794.69|BUILDING|uctions. silent requests after the regular theodolites haggle furiously across the ca 911|Customer#000000911|VS0fia,lJ RvUf68 l4Unv,Vx|13|23-121-746-7339|6364.60|BUILDING|gular foxes! permanently regular packages wake. quickly regular deposits detect blithely. carefully express 912|Customer#000000912|dQA12NEPQK1A5mvD|14|24-348-437-3105|3861.36|FURNITURE|sits use slyly carefully final dependen 913|Customer#000000913|aohNRUjsMbBNE1Ax|14|24-500-946-3315|6935.16|FURNITURE|y blithely final dugouts. accounts wake accounts. asymptotes above the even 914|Customer#000000914| LErnJFeOuDeMgvVzPKHS|5|15-915-758-7313|1230.79|MACHINERY|accounts. slyly final attainments boost slyly express, pen 915|Customer#000000915|mtGezp1BRzcfPVl,1,G8Wl|0|10-452-398-2445|3776.53|AUTOMOBILE|r ideas. final dependencies haggle fluffily. express ideas behin 916|Customer#000000916|9Zo7nkmzJla4Q4PE5mbw|23|33-511-587-8754|130.59|MACHINERY|sual ideas cajole carefully ironic excuses. final platelets use slyly. quickly special instructions nag. bold escapa 917|Customer#000000917|KZ TS0omSFmUQkIOmzlXhwQS,OcF3wzz5|1|11-100-917-5264|3679.65|HOUSEHOLD|arefully fluffy pinto beans. enticingly silent requests affix furiously busily regular ide 918|Customer#000000918|WSptkDdGQQyJ6|19|29-992-318-6425|-155.06|MACHINERY|s boost furiously slyly final dependencies! fluffily regular sheaves nag fluffily. slyly even courts sleep sl 919|Customer#000000919|cHGtsqVvXRiK|9|19-537-180-2200|9774.97|MACHINERY|lar instructions. blithely final deposits haggle furiously bold pinto bean 920|Customer#000000920|oDBFWKIP6M6OlYRPmqCBkVpVSj6uFa|2|12-905-464-3299|990.58|HOUSEHOLD|. ironic, pending frets haggle carefully. ironic, 921|Customer#000000921|XYBVDdDifSYrW gUeDPhITqMjpjtbnc|8|18-765-936-2316|3651.09|HOUSEHOLD|g the furiously unusual theodolites are carefully accounts. slyl 922|Customer#000000922|Az9RFaut7NkPnc5zSD2PwHgVwr4jRzq|7|17-945-916-9648|3869.25|BUILDING|luffily fluffy deposits. packages c 923|Customer#000000923|ckBLWkfYtn2VZXWWqUGbDgbP|11|21-476-142-5086|7462.20|HOUSEHOLD|s. furiously express depths boost. regular requests boost furiously. even, unusual deposits cajole blithely. expre 924|Customer#000000924|yKEtokQYXiuSSh8ZP5|15|25-518-232-9865|4212.53|BUILDING|ake slyly furiously even deposits. express pinto beans are carefully quic 925|Customer#000000925|jn Razhw70hWtHN4iRBWsf1UmrFUPn36Ni562ex|10|20-753-609-6699|1939.39|AUTOMOBILE|ironic accounts cajole carefully. even, expre 926|Customer#000000926| 3b8K2YhfbGDJOpSAUrvq82MnkhHBdwL|16|26-783-803-1329|274.50|AUTOMOBILE|ly quickly unusual foxes. fluffily regular ideas among the regular plat 927|Customer#000000927|Uy7xvOwo4Ndha1tSxDKrQ gXUTdS ,YDqwE2YSO|17|27-332-891-1391|2417.65|MACHINERY|es affix quickly carefully regular platelets. slyly special theodolites would sleep furiously after the special, 928|Customer#000000928|A9 UduEb48ffOe27FxMXF|21|31-508-509-6393|8330.70|HOUSEHOLD| instructions cajole fluffily alongside of the blithely regular re 929|Customer#000000929|c dPfaAmmoLjR3m|23|33-487-459-1026|4079.18|MACHINERY|ously silent foxes cajole slyly pe 930|Customer#000000930|84jHYR8u2XvhyT|19|29-562-904-5451|4787.20|FURNITURE|sual ideas according to the furiously regul 931|Customer#000000931|M,lWVafqdRIO, WnAyLLt|0|10-349-498-1720|2409.69|BUILDING|s the carefully regular packages: pending 932|Customer#000000932|HN9Ap0NsJG7Mb8O|13|23-300-708-7927|6553.37|BUILDING|packages boost slyly along the furiously express foxes. ev 933|Customer#000000933|V0SPv2VbrNo7Pj|14|24-623-803-8018|8541.16|MACHINERY| accounts haggle quickly against the blithely even accounts. deposits sleep blithely quickly p 934|Customer#000000934|UMAFCPYfCxn LhawyoEYoU9GZC7TORCX|12|22-119-576-7222|-592.69|AUTOMOBILE|fluffily requests. carefully even ideas snooze above the accounts. blithely bold platelets cajole 935|Customer#000000935|XkVT4jvetY4JV76IAkd91sSp9CqsICE|0|10-724-445-8323|2531.25|MACHINERY| furiously pending ideas. daringly 936|Customer#000000936|hwJIFpxofea6CLEbWZFsWUxNrGFLANp|11|21-100-538-9635|3650.90|MACHINERY| furiously enticingly final foxes. pending requests wake quickly according to the slyly regu 937|Customer#000000937|usrG6ohdPROyd98c9|23|33-869-990-3946|668.51|AUTOMOBILE|ickly alongside of the express, express ideas. sly 938|Customer#000000938|wrq9S3rEW8zXUVCXpa7uKi|12|22-157-321-7590|2584.52|BUILDING| the quickly special accounts are regular patt 939|Customer#000000939|jYaDdfxAlL1aVKPfN|19|29-627-844-1293|8059.51|FURNITURE|ages integrate carefully. sometimes even sheaves wake sometimes unusual ac 940|Customer#000000940|T7ROXBXdajS,vkwy3VuC8wNvA|19|29-958-573-1004|253.59|AUTOMOBILE|ent accounts. slyly even accounts breach across the dolphins. quickly regular pains dazzle carefully. sl 941|Customer#000000941|297w97UgOfpV3pv2QniJUWBKq0BRpcawOfpj|19|29-745-875-1061|2990.18|FURNITURE|e furiously along the slyly bold pains. ironic, ironic foxes affix quickly even deposits. packages caj 942|Customer#000000942|y0OKxFyfXeYuklJDY9RwujlNIC2ETXo9HxZCVhg1|18|28-560-449-7675|5898.17|HOUSEHOLD|lar depths! carefully regular pinto beans after the deposits wake about the packages. final, final instruc 943|Customer#000000943|74dBRGOKLFEQEqCgH2x8WGL9tubtgJAbHu|5|15-483-251-8603|7541.05|FURNITURE| quickly along the silent, express braids. quickly pending packages boost fluffily furiously regular foxes? a 944|Customer#000000944|8lO9F4WK6PKWXiocyE,ojIfPNfVY8|4|14-485-139-5142|9454.78|BUILDING|ilent pinto beans are according to the regular, final somas. idle, bold foxes was. som 945|Customer#000000945|300zKNJ9lg|15|25-542-662-1673|9615.39|BUILDING|. ironic deposits haggle among the carefully regular excuse 946|Customer#000000946|ufHQOmRhQoLSiyfQ|21|31-152-357-9762|2990.73|FURNITURE|ly after the furiously regular asymptotes. foxes play quickly ironic packages. dinos along the silently expre 947|Customer#000000947|JnzDRxqCwjRPyeq70wrxzKWLXI|22|32-838-393-6825|4092.24|BUILDING| blithely final instructions grow furiously ironic requests. furiously special theodolites sleep after 948|Customer#000000948|yxBr7nLGxxwECEk|17|27-125-968-3750|4346.90|AUTOMOBILE|he final, even deposits. furiously special pl 949|Customer#000000949|tOBbBIfhWM lNI3YxTYH8Or8Ki|2|12-391-316-1861|5340.67|AUTOMOBILE|pecial, pending dolphins. slyly final ideas boost quickly. carefull 950|Customer#000000950|mi3o6cp47mo8Miqh9d R1XWStjxatcQqHQZW|15|25-849-760-2017|9609.77|FURNITURE|s wake quickly after the carefully brave multipliers. regular excuses wake care 951|Customer#000000951|PnC4Xlds,v|0|10-813-916-8297|7499.47|FURNITURE|ts. permanently special accounts mold quickly. requests boost slyly bo 952|Customer#000000952|jg0YsHARdoULvVtP2vGHLVoAfWKFNz6QdTeAi|24|34-527-524-9172|3710.14|AUTOMOBILE|slyly regular ideas. even theodolite 953|Customer#000000953|5HJQ8UzSSl1PJv28MpZqWvNwUm|22|32-829-961-2870|6292.06|BUILDING| according to the carefully bold dependencies use boldly slyly express deposits. car 954|Customer#000000954|rr0Gz2iuYNuLgrIcLCdi5Zr2SnY8,wpzD9A|19|29-639-437-3775|5740.00|FURNITURE| haggle furiously. furiously ironic 955|Customer#000000955|FIis0dJhR5DwVCLy|0|10-918-863-8880|138.31|AUTOMOBILE|ts cajole quickly according to the pending, unusual dolphins. special, ironic c 956|Customer#000000956|aI12bsLSd1Y4dIx2Me5BLbGDCZPn |20|30-627-947-8311|1587.14|HOUSEHOLD|eans. regular, regular foxes wake furiously carefully even pinto beans. furiously r 957|Customer#000000957|9F8p,XsLLxyiZ3b8NN|19|29-941-553-8245|9076.68|FURNITURE|al excuses boost to the ideas. unusual requests are across the slyly 958|Customer#000000958|OrOUBBV7NlzVFXtuSOECmQFOkw8r|5|15-455-954-2914|1791.65|FURNITURE|s deposits. blithely even packages sleep carefully ironic deposits. quickly ironic accou 959|Customer#000000959|O FdrkZxCx PK|4|14-546-329-6898|3266.14|FURNITURE|press accounts wake busily after the furiously final theodolites: fluffily final deposits above the carefully iro 960|Customer#000000960|meekxaMlz5c1uE3wV7a,u h WcU,1OJz|4|14-664-604-8633|1932.59|HOUSEHOLD|s are carefully after the permanent foxes. fluffily 961|Customer#000000961|5,81YDLFuRR47KKzv8GXdmi3zyP37PlPn|12|22-989-463-6089|6963.68|MACHINERY|e final requests: busily final accounts believe a 962|Customer#000000962|lDp572JGdrL34kB YOQuC|4|14-792-232-1645|7557.00|BUILDING|. carefully brave foxes wake furiously final orbits. furiously pending theodolites along the bold the 963|Customer#000000963|40EdWkddaWhQyiQ6FfUo8VOZwgb MetJ2jV,QPL|13|23-921-332-7635|2557.49|HOUSEHOLD|ns snooze slyly daringly pending instructions. regular requests above the regular, even requests sleep blithely fin 964|Customer#000000964|ZnBNwMqvW7y3FSn6025pwkzgTDfsG2A|12|22-974-772-2802|4756.58|FURNITURE|al accounts haggle blithely! regular platelets haggle blit 965|Customer#000000965|UigBc,9d1iLtQAVatnWACSDc9mNx0mYl|2|12-893-735-6415|4768.80|MACHINERY|lly enticing pinto beans haggle requests. evenly express grouch 966|Customer#000000966|V9c8SR8WK7wEd|8|18-539-933-5176|1283.26|MACHINERY|ts. regular accounts cajole about the ideas. slyly unusual idea 967|Customer#000000967|xKdAl6HSWvAmptzHgQHX3cMmxZDhfyrMqx|23|33-687-917-3598|5710.41|BUILDING|iously quickly silent ideas. blithely pending pinto beans except t 968|Customer#000000968|eu 5FA1WHs9jq0pcdlVVA|0|10-470-740-2657|8921.97|BUILDING|ic foxes haggle slyly according to the dependencies. even, regular acc 969|Customer#000000969|N9NSGc0Bj6FlSw3d9k GI7VAd1jW|8|18-148-790-2039|8601.63|BUILDING|lithely according to the fluffily silent patterns. furiously fin 970|Customer#000000970|DXEgz7JHSFW401|14|24-266-486-1615|3623.60|MACHINERY|ronic requests sleep slyly at the slyl 971|Customer#000000971|z29DUY Utsi6mWKI|1|11-256-718-6928|3914.88|AUTOMOBILE|ular theodolites haggle carefully: f 972|Customer#000000972|ImKvHrrNc3rfWejksbCPyIQ|4|14-405-229-6174|4453.46|MACHINERY|deposits: slyly regular deposits among the furiously bold asymptotes are furiously along the slyly even 973|Customer#000000973|FT4jTOdVCpmYW|0|10-749-928-5415|3229.18|FURNITURE|sly special requests integrate carefully along the special foxes. regular, silent 974|Customer#000000974|7RcY6fOjTMbbOnVaFV,,6Dk5FIiHGrCpwXJNI|20|30-473-948-7149|7826.10|HOUSEHOLD|uickly. regular, ironic waters sleep blithely. blithely regular foxes are blithel 975|Customer#000000975|qPFceGMB0xDjY6BhTGdIxe2Z F4MVuKIXHqQ1|16|26-428-220-2070|4364.06|FURNITURE|furiously express packages. even, bold sheaves haggle fluffily. slyly ironic accounts wake slyly across the quickl 976|Customer#000000976|I78UJ2ks3sbcd0c2NQ7aH|19|29-436-660-4732|7772.85|BUILDING|special requests wake carefully regular somas. special theodolites wake regular, unusual pinto beans. furi 977|Customer#000000977|JcKxPwHPM7akg5IiCs,ZVAfO73KE3|2|12-602-807-5055|311.00|FURNITURE|lly against the busily unusual requests! busily even requests haggle blit 978|Customer#000000978|zpvQ6LYE89Inl40Yz,7NJ|24|34-261-243-2624|-50.51|BUILDING|ely unusual packages nag fluffily above the quickly regular requests. regular accounts run. blithely 979|Customer#000000979|DvzUxD35ohKtUnalLGO9kDsCzZxtfcjO|18|28-113-574-4962|7055.13|MACHINERY|. accounts wake carefully special accounts. de 980|Customer#000000980|UsrigSqZBnmbXhXNR6ibloq60qHBUj42kwX|1|11-572-281-8212|4586.33|AUTOMOBILE|st have to integrate above the regularly regular accounts. regular, final waters breach blithely 981|Customer#000000981|pM4DXkl6Y,7S6a6jlJf8dZogp9QOdv|18|28-202-962-8429|3383.26|MACHINERY|ts doze quickly. platelets are quickly agains 982|Customer#000000982|EN9aD5Xgh2q|23|33-460-986-9418|1437.55|FURNITURE|lithely express requests along the carefully pendi 983|Customer#000000983|9jgCxRufEbwbGwW0PmG1RDIOwCUYlHs8z|7|17-283-610-6143|2902.95|BUILDING|mas cajole furiously across the fluffily special deposits. pending foxes sleep regular, silent packages. ironic pla 984|Customer#000000984|fgAELFO9RS 6q9|11|21-247-588-5181|2811.97|AUTOMOBILE|t daringly against the even foxes. furiously silent forges sleep furiously busily ruthless requests. exp 985|Customer#000000985|0uAMe1ICB,wts4STD4eLL|23|33-408-194-5161|2701.21|HOUSEHOLD|gular deposits among the thin instructions haggle since the furiously final packages. ideas use. regular i 986|Customer#000000986|Cei2QidV0GC3OQWfJTNHLYPd|1|11-537-225-3800|178.73|BUILDING|cies impress blithely furiously final package 987|Customer#000000987|SO 0UTuH26eduKI|8|18-887-394-2506|9850.64|AUTOMOBILE|eposits. even packages cajole boldly bold ideas. even, regular accounts haggle. packages among 988|Customer#000000988|3BNYKEUyMbzfb40SEr 8OTb|3|13-862-722-3298|7746.97|FURNITURE|he courts. carefully silent courts impress. carefully close ideas run slyly pinto beans. even, ironic accounts are a 989|Customer#000000989|pKuixWbH6XZFJY0uZjGi0oRaH1Xl|10|20-646-819-6827|5453.74|AUTOMOBILE| accounts. fluffily regular requests use carefully after the furiously special instructions! slyly 990|Customer#000000990|uF idg4bq8Ij7ghxJ5KuTnU8w|0|10-403-137-1064|6988.49|BUILDING|dolites for the fluffily bold 991|Customer#000000991|dK1Gzw1glT|21|31-977-971-6175|6533.53|AUTOMOBILE|n somas. slyly ironic instructions solve quickly at the final dolphins. requests cajole carefully 992|Customer#000000992|Vbi1NGfPeKw,XU|5|15-262-535-3924|5027.75|MACHINERY| across the regular, pending requests. slyly ironic accounts wake furiously about the pending, regular 993|Customer#000000993|56K JjC bMcgbXlJA4KI Icu uggsRoviMQm,F|7|17-494-757-5759|8421.87|FURNITURE| according to the slyly bold accounts. 994|Customer#000000994|sZjdeW4LT9EKopmlv3M Xbnbe3gXQ9JkoxPv |16|26-638-159-5836|7461.27|HOUSEHOLD|ake furiously across the quickly idl 995|Customer#000000995|5tCSAsm4qL5OvHdRZsiwSlVTdqPZws3f|13|23-272-700-1002|-341.79|BUILDING|wake slyly fluffily unusual requests. stealthily regular pinto beans are along the slyly final dugouts. slyly 996|Customer#000000996|yjrSjcG z0Rm5PYrVMFTrU pFRMw|22|32-902-625-1946|6450.78|AUTOMOBILE|hely against the final, brave asymptotes. final ideas haggle slyly bold pinto beans. slyly unusu 997|Customer#000000997|85KMCT2D2RIGayG99ozpk85ppHE6i9gJE|17|27-218-645-5219|367.03|FURNITURE|ven asymptotes. carefully regular packages are blithely. special requests according to the care 998|Customer#000000998|fHRMFCGphazw9KvR1,EmNOUBG|17|27-951-935-6514|6679.20|MACHINERY|ular courts nag quickly unusual, sly pinto beans. special foxes thrash blithely up the foxes. 999|Customer#000000999|r2SFEmfqrRu3M7ouE4zvI2ApOAtD|16|26-876-956-1302|403.89|BUILDING|riously special instructions ac 1000|Customer#000001000|hzM1shTwWlLuk|22|32-730-275-2976|-881.70|BUILDING| closely against the slyly special deposits. regular, ironic p citus-7.0.3/src/test/regress/data/customer.2.data000066400000000000000000004720001317107136600216450ustar00rootroot000000000000006001|Customer#000006001|e,jhgkgWnN|3|13-247-356-9056|9139.66|FURNITURE|eans haggle blithely. requests hinder furiously alongside of the slyly final foxes. carefully even instructions wake 6002|Customer#000006002|ZnHuM0Y9nONIKR5TFrHGuJnmxq9GLULVLitL8|20|30-330-985-9161|4400.25|HOUSEHOLD|final instructions. quickly regular epitaphs are according to the sl 6003|Customer#000006003|TD5JS9ULaDBUHIy5J7FfT|16|26-402-596-3552|-30.04|MACHINERY|yly furiously regular instructions. requests sleep about the carefully special accounts. 6004|Customer#000006004|Oo0mrAFH,KrRuF0eCxbklLZC|4|14-523-907-2485|-987.86|FURNITURE|the regular theodolites. carefully silent deposits hagg 6005|Customer#000006005|Qhudh0ioaEafuJ Rfr6DAUqtKkZ33nby|0|10-930-477-2232|4911.63|MACHINERY|p fluffily fluffily pending notornis. requests ser 6006|Customer#000006006|5VfR3EWqbrt0zdkyy8 |8|18-760-236-6029|3751.22|FURNITURE| instructions wake about the slyly ironic deposits! regular excuses wake. furiously pendin 6007|Customer#000006007|eFTUQxFkePYCDkt2YiTlP1oUNww1lUPA|16|26-238-936-7519|3752.79|MACHINERY|quickly special pinto beans serve carefully. blithely ironic requests cajole 6008|Customer#000006008|S1yv Nmjxkhb2yQU7sEX ,poB5f0ijkeRWsY|2|12-448-135-5947|8049.94|FURNITURE|uses boost carefully throughout the blithely express theodolites. furiously i 6009|Customer#000006009|tJ3M102q9VHZ0jX|15|25-821-240-1667|4357.29|AUTOMOBILE|endencies. carefully silent attainments cajole according to the ironic multipliers. final dependencies integr 6010|Customer#000006010|Oyw3CVbhnakZby zVlRd9jZPUygik65nK6UCg|23|33-702-784-8705|3887.84|HOUSEHOLD|uickly final pinto beans kindle instructions. slyly regular instructions sleep carefully. slyly ironic 6011|Customer#000006011|wP2fviXCIC9E6kZ2StshJetAa9x0vNMBZ0I|5|15-600-545-9353|895.26|HOUSEHOLD|en requests wake against the fur 6012|Customer#000006012|mSI9TkywG,fJgIV,mr24o,JupCD36mw0|14|24-197-638-5900|7639.44|FURNITURE|ourts against the slyly final ideas sleep quickly pint 6013|Customer#000006013|NOTH5 kyVefl4bJA|20|30-137-646-6576|2051.61|MACHINERY|nal packages are quickly regular, even instructions. even, ironic deposits sleep; slyly 6014|Customer#000006014|ZB9wS3fmWrQi8cZil6mzcKYo0PQLXrKw2ZM Rv5|24|34-570-696-6422|5302.25|BUILDING|y blithely express accounts. furiousl 6015|Customer#000006015|AAc,PFYt20otDa85nT6wDwOVJ|21|31-376-506-1388|1950.33|FURNITURE|riously theodolites; fluffily enticing 6016|Customer#000006016|Of93IVKgMknchAkEf16|22|32-695-578-4478|8103.98|BUILDING|requests cajole packages: quickly express accoun 6017|Customer#000006017|013MNSbWeJML ohf|23|33-579-732-1955|-402.46|FURNITURE|iously. furiously bold theodolites cajole blithely regular 6018|Customer#000006018|XNLYh8cfpNGERz1O|0|10-261-311-5038|9806.55|AUTOMOBILE|ng the blithely pending platelets detect s 6019|Customer#000006019|5,lbJvlCV l8V|18|28-475-771-1730|3695.26|MACHINERY|carefully. furiously pending p 6020|Customer#000006020|SIdmPkG3HQKTwU4p5hGrJttj,km|16|26-990-181-4165|9256.73|BUILDING|cial deposits x-ray after the blithely regular requests. final requests 6021|Customer#000006021|r5k7syDoG41,Uhtpoii9Hp,oxJKKME9|21|31-358-419-1438|3002.73|AUTOMOBILE|ly pending accounts about the blithely ironic package 6022|Customer#000006022|z9Gbi2AkLS0s6HNIDyerHNGE7V98G|10|20-331-281-6929|7892.25|BUILDING|tructions detect among the bold, regular accounts. ironic ideas inte 6023|Customer#000006023|uz9mw3nZBoJ0j2zkMueLynANFVN|21|31-825-465-7964|5150.58|AUTOMOBILE| must have to sleep furiously at the pending, regular accounts. fluffily unusual deposits nag along the carefull 6024|Customer#000006024|LZ80EMXjRz7JULb75V5n3qg|3|13-664-584-4042|4732.68|AUTOMOBILE|cajole blithely about the furiously pending theodolit 6025|Customer#000006025|cawvuhdgRy KaqlhXcWab y31A37F8IPT|14|24-233-488-3262|-815.07|BUILDING| final packages boost slyly above the blithely regular requests. slyly ir 6026|Customer#000006026|ius6eByivZ4BVYaESvB9p0bWsZ|12|22-822-409-2653|4993.97|AUTOMOBILE|xpress requests. carefully unusual accounts cajole blithely even ideas. carefully regular deposits are quickly iro 6027|Customer#000006027|hST46enQLI8TzdOmvA8J|7|17-178-716-9690|1657.83|HOUSEHOLD|deposits sleep slyly regular deposits. furiously ironic accounts boost carefully according to the carefully regular 6028|Customer#000006028|mL,IJFVI1MA9|0|10-424-273-2141|9173.51|MACHINERY|ckly ironic theodolites cajole after the quickly regular ide 6029|Customer#000006029|Bhz6SV,wpp|2|12-230-673-1285|1247.91|FURNITURE|ites wake blithely final, unusual accounts? quietly final foxes hinder. furiously express theodoli 6030|Customer#000006030|thigRH2AJJ5ay1akT2MoIWgGS3iH|0|10-385-880-2306|5585.25|BUILDING|uickly final deposits: even packages after the deposits wake regularly under the blithely ironic theodolites. furio 6031|Customer#000006031|ovto GrnDjTqwxdA0L|24|34-257-441-6562|6268.52|BUILDING|oss the carefully even deposits. furiously ironic deposits haggle furiously furio 6032|Customer#000006032|U9Y1LGOIyhU1r|24|34-124-269-1297|6098.48|AUTOMOBILE|its. pending excuses detect among the carefully ironic packages. carefully final gifts are. ironic in 6033|Customer#000006033|CY9eVCYfqrI1WlamyV,2h2fAuwRof4Vi|20|30-194-447-2847|8203.11|HOUSEHOLD| regular deposits wake carefully final accounts. regular, thin 6034|Customer#000006034|9C1trxxcizUyNm,rADOUt5UB|15|25-369-506-8757|4765.57|HOUSEHOLD|ding, even gifts about the regular, final requests sleep fluffily by the carefully silent dolphins. even, s 6035|Customer#000006035|pzz5CQ3wWzqmawu811Zfm|2|12-929-596-2859|1907.14|FURNITURE|es sleep fluffily besides the furiously qui 6036|Customer#000006036|oArQJruOuhJxRvLWjeDX3 h6CspeJwV7U6by|0|10-960-488-8572|8282.11|HOUSEHOLD| after the carefully even theodolites. furiously unusual pinto beans among the ironic theodolites k 6037|Customer#000006037|SnqQMacIw1UlhWOZib7iEU|6|16-600-315-6012|1463.98|AUTOMOBILE|encies cajole carefully according to the final, pending packages. regular, ironic packages b 6038|Customer#000006038| bIKiOVO1JVZiEtTF4T7HxjimXgL5|9|19-485-680-7672|7087.20|AUTOMOBILE|quests are furiously besides the blithely ironic requests. slow asymptotes cajole about the furiously 6039|Customer#000006039|a5rSHRtBDeEFAd8f,suDA8ve|5|15-453-414-1942|4616.61|HOUSEHOLD| pending instructions must have to sleep carefully 6040|Customer#000006040|al9P9X5hZsq4GVzXaf45nmYGre S|18|28-602-749-7354|6309.48|BUILDING|nstructions along the blithely final foxes solve according to t 6041|Customer#000006041|BFIr9VI8QcoMd4mjXtXH|22|32-422-646-4800|1445.52|BUILDING|ests boost furiously blithely ironic pinto beans. daringly ironic pac 6042|Customer#000006042|5HJbh0QShvTfZretlUSjRk|0|10-526-919-1776|4839.54|FURNITURE| express requests boost carefully. deposits boost blithely quickly ironic deposits. ironic accounts x 6043|Customer#000006043|04Ln0amBet|9|19-335-750-5425|2143.97|AUTOMOBILE| asymptotes haggle above the final gifts. ironic accounts sleep. pending, regu 6044|Customer#000006044|DOpmBtl1j5gVB36A57vtsI6Z|10|20-311-967-3855|6649.41|HOUSEHOLD|thely special packages. express pinto beans sleep about the final, unu 6045|Customer#000006045|FH8AwQ54fTohmWWZEFluKjNP|0|10-579-571-4767|5587.35|HOUSEHOLD| instructions. special accounts hinder quickly unusual depend 6046|Customer#000006046|3pqu0wwA NYnUQAx|1|11-901-156-9794|8566.39|FURNITURE|telets sleep around the accounts. busily special theodolites along the fluffily silent deposits use fu 6047|Customer#000006047|WJIOtP1XJSsFYl7yCfPo2|4|14-601-562-8081|5439.08|FURNITURE|y express, ironic deposits. final deposits nag slyly slyly bold excuses; blithely regular accounts affix slyly. ir 6048|Customer#000006048|DNtLkGdov3MnFfAGa40Pi6c6Y1r5j NF|24|34-996-904-3634|2634.46|AUTOMOBILE| sleep blithely across the regular deposits. accounts wake. slyly ironic gifts above th 6049|Customer#000006049|b1Wlcr2IYgYvlZWW3V,JNkpDg,Msiwmm9|23|33-822-276-9791|357.21|AUTOMOBILE| instructions. instructions boost fu 6050|Customer#000006050|orhXeGjD4RRH5QU|0|10-472-892-7595|6612.70|AUTOMOBILE|ts cajole quickly furiously express packages. evenly final theodolites use quickly 6051|Customer#000006051|k7txuddD z|4|14-674-349-1580|4706.51|MACHINERY|platelets. slyly bold packages sleep permanently along the c 6052|Customer#000006052|5R8 L5TJey,G7Ta7YGFnezUXaI4eIZu|21|31-792-184-4320|-624.49|FURNITURE|gular, final dolphins. sauternes against the slyly final dependencies cajol 6053|Customer#000006053|Yzq6dtXjEfbY PEVyUkJ|15|25-363-543-1931|7102.91|MACHINERY|r, ironic foxes. packages wake 6054|Customer#000006054|bXUQWTwB29ox1ganSx,QcxemZ|6|16-472-362-3917|4601.79|HOUSEHOLD|uriously after the silent deposits. blithely express instructions integrate carefully alongsi 6055|Customer#000006055|FnwMVA8yGuU SgVGARNz22st,8|9|19-663-939-5531|-631.98|FURNITURE|g to the final, regular requests haggle furiously quick foxes. regular requests p 6056|Customer#000006056|nyg076hIRk|2|12-687-122-1788|2218.75|AUTOMOBILE|se accounts. ironic pearls are 6057|Customer#000006057|R4Bt0gWYUrIshQjPi7UaKDrFs|20|30-448-516-3117|9526.94|FURNITURE|ly regular ideas haggle furiously enticingly final packages. regular, bold dolphins cajole slyly 6058|Customer#000006058|noUsJ1sGwBlm9KfvZ|21|31-818-798-9205|1401.74|MACHINERY|y ironic requests. silent deposits grow fluffily. regular, ironic pearls among the dolphins integrate even senti 6059|Customer#000006059|hbYI6RCnsefLu,WWFcArgxqy5xQdygs8tv9|15|25-133-925-3453|6051.11|BUILDING|s detect slyly. even pinto beans across the quickly ironic forges use furiously slyly regular courts. furiou 6060|Customer#000006060|G ziIQMNWzroTjGIHiWM 0pG|15|25-174-729-5653|4030.26|BUILDING|express, regular multipliers haggle after the fluffily final requests. regular excuses ar 6061|Customer#000006061|FRN1 emJhjvn9yjnKN9HjoNG,X aW3kdDmBA|15|25-551-122-4107|7532.06|MACHINERY|during the blithely express requests. quickly ironic frays across the blithe 6062|Customer#000006062|UtdDwozzDvfMtgj3W|3|13-756-700-4918|1370.35|FURNITURE|ourts-- regular packages hang furious 6063|Customer#000006063|P8McFZy0XZk2tO0fd6e|13|23-185-189-2252|6385.75|BUILDING|, special foxes. regular asymptotes sleep slyly 6064|Customer#000006064|,izgs4ldAZyamftqvB|20|30-205-718-3890|7335.40|AUTOMOBILE|theodolites. carefully pending packages sleep fluffily blithe 6065|Customer#000006065|Gb72LtD9HVv9slwDCWiufPxBYW6qVgnfe|16|26-150-752-2624|7191.82|HOUSEHOLD| alongside of the furiously unusual instr 6066|Customer#000006066|s,Txrg7qYwMSykuMvxhA26sOs1KNm8t|6|16-166-390-9922|2967.12|BUILDING|arefully blithely regular courts. furiously express request 6067|Customer#000006067|wktXPx5LSL5ZSP0shdmYDauSiNMRiAHaaKVJ|4|14-386-119-4898|3407.75|AUTOMOBILE|inal dependencies nag blithely car 6068|Customer#000006068|RCTOrn8 qmIVd6qGMMLmjWCvz7|4|14-138-821-9164|6028.37|FURNITURE|arefully even instructions sleep slyly regular, silent pinto b 6069|Customer#000006069|lP56RbvJxUsPWR7AJ3nGdcXjPYhDzw|8|18-520-115-6373|6775.83|FURNITURE|le furiously carefully brave ideas. f 6070|Customer#000006070|0Pycomq4a6KgwcZYyaH5g6t5hdLgsKF|14|24-195-934-6766|7430.40|AUTOMOBILE|even asymptotes wake about the furiously express accounts. special platelets between 6071|Customer#000006071|bSImC9SNAZBpJfS|3|13-976-555-2947|8764.04|AUTOMOBILE|after the dependencies eat carefully against the blithely regular pinto beans. 6072|Customer#000006072|dJTs qvHOtIuFDNIsyGkzkuz|16|26-754-599-4979|5312.01|MACHINERY|theodolites nag evenly among the bold excuses. ironic accounts sleep along 6073|Customer#000006073|pfb,24MPwsE8,0LUyO|9|19-689-677-5011|1045.91|HOUSEHOLD|. furiously regular ideas are regularly ironic theodolites. slyly even packages sleep slyly fluffily express p 6074|Customer#000006074|nrcHwNPzOQ x|8|18-837-412-1792|9486.43|AUTOMOBILE|olve furiously about the furiously bold deposits. slyl 6075|Customer#000006075|DOsAAUjRKdmOtvgEMibSALCKR M5Nj|1|11-593-566-6284|2455.27|FURNITURE|s the furiously ironic requests a 6076|Customer#000006076|At1D9HHVnICLHSzLkebTdUFubbpiizOsfsnT1|7|17-850-828-6128|2626.39|FURNITURE| pending deposits solve. slowly bold ideas haggle fluffily packages. blithely 6077|Customer#000006077|uhQEW4hX BiYzeK vM4p5nmxPwa|9|19-473-215-4783|7755.29|MACHINERY|e the pinto beans. final somas affix slyly: fluffily special instructions are careful 6078|Customer#000006078|SBe5gejYZc lZ|9|19-127-604-4037|2143.99|MACHINERY|o beans. quickly ironic accounts 6079|Customer#000006079|JrsyZ3aOo7pyfy1Nfcu7wv2y9MssX9Fl2j|24|34-956-792-4754|7035.43|AUTOMOBILE|pinto beans cajole carefully special pinto beans. bold packages was across the carefully ironic ideas. even, regular 6080|Customer#000006080|TO8wL2kHE7DAZOlyh7U4aW|2|12-853-465-2990|3557.21|FURNITURE|as sleep carefully along the ironic deposits. furiously dogged deposits among the fluffily regular frets sleep blit 6081|Customer#000006081|g9sWPKJZrAa2yRmoTwz|14|24-898-630-6492|-956.69|MACHINERY| courts affix around the furiously stealthy deposits. packages wake evenly 6082|Customer#000006082|A VQviGoD71daDZOZv|11|21-155-663-3196|898.19|BUILDING|usual pinto beans after the carefully pending fox 6083|Customer#000006083|Zk4jDSMrlCCH,MIpgre hsHn8XZJn62|11|21-852-430-5106|1845.68|AUTOMOBILE|ial accounts wake slyly. furiously unusual depths cajole blithely furiously 6084|Customer#000006084|E0NXucb5MGwfV5BnCW1qPSpeMoqf|15|25-403-768-3899|9300.11|AUTOMOBILE|s are carefully. doggedly pending foxes boost carefully pending, ironic accounts. closely bold 6085|Customer#000006085|N280OwVf0BbPajbB89YmFrHEihif|12|22-247-826-9697|6041.35|MACHINERY|slyly permanent courts was blithely packages. furiously b 6086|Customer#000006086|kBBqMZHke10dGuoDBvzv|20|30-613-894-3303|5029.81|MACHINERY|ays? dugouts sleep carefully around the slyly regular attainments. iron 6087|Customer#000006087|HZvlNvQ41HBORsewqAXWc1i5z,V|17|27-176-985-1293|9991.42|FURNITURE|fluffily. furiously final deposits believe about the furiously final excuses. slyly ironic accoun 6088|Customer#000006088|yxbYU1ogtMNgiSUAFNsA5 aYG|24|34-944-696-5888|8895.30|MACHINERY|thely. packages at the final packages use carefully around the regular, even deposits. 6089|Customer#000006089|1xUlqn0cOHlIKlRHeBz8Mfn|18|28-312-896-5088|458.27|HOUSEHOLD|es. accounts cajole slyly ironic c 6090|Customer#000006090|NqM,WhTKaDb4uzuNsmHPiSvWFsEGHJA|8|18-417-809-3338|61.15|HOUSEHOLD| special sheaves. quickly final requests are. quickly fina 6091|Customer#000006091|xqRatn5FncOIxHo9oZ9lBbPrJQ0e aoISrI|1|11-922-360-2419|2753.48|MACHINERY|quests. furiously ironic requests are above the carefully special packages. 6092|Customer#000006092|Z,XTnww1rI6aWDjmfk7w0p8kxLcqCIfD7h8|5|15-941-440-2662|9761.72|FURNITURE| foxes are above the ironic excuses. special instructions dazzle blithely about the regular asymptotes. fur 6093|Customer#000006093|BOuFVeQZ,E|8|18-806-815-4057|9343.42|AUTOMOBILE|gedly regular pinto beans. express pinto beans nag slyly. regular ideas wake thi 6094|Customer#000006094|X7kFGROKaQpglhpv20A|12|22-421-694-6690|7983.44|FURNITURE|to beans boost. carefully special pinto beans wake alongside of the slyly regular accounts. furiously regular pinto 6095|Customer#000006095|qP9OXihxntW60ybk|17|27-151-511-8515|8802.47|BUILDING|thely. furiously ironic request 6096|Customer#000006096|wy UjJzsFOKGCMfqtaHK5NI|1|11-517-930-3964|2237.40|HOUSEHOLD|silent excuses detect slyly silent accounts. slyly final instructions nag furiously. fur 6097|Customer#000006097|KCW2zgHZRm36j90QVmQ9b5Ml|2|12-742-944-8759|3963.90|MACHINERY|ding to the carefully silent accounts. special excuses wake. furiously final foxes about t 6098|Customer#000006098|c Bp7dnCuklVVNOM7Gc11k2gCRv|3|13-223-820-6932|3312.28|HOUSEHOLD|ts! carefully final foxes haggle slyly: slyly express tithes wake furiously about the final, final deposits. 6099|Customer#000006099|Zw,1lluCeZSlbwLKZo2i37|23|33-797-420-9100|7413.61|HOUSEHOLD| deposits are slyly pinto beans. carefully express packages wake furiously. even deposits 6100|Customer#000006100|BFnRAdDK0EMZAr2zS3js6,Jsh|14|24-425-432-3048|1569.09|AUTOMOBILE|olphins after the requests wake furiously silent requests. quickly ironic dependencies are furiously around the reg 6101|Customer#000006101|KI6M7iuiqLHAb|1|11-152-108-8875|2651.06|AUTOMOBILE|efully even requests. ironic, unusual deposits wake furiously. blithely pending req 6102|Customer#000006102|s5ViRaDEjv2nKox7c6Y|3|13-533-564-6439|4257.41|HOUSEHOLD|cial asymptotes are slyly regular sauternes-- slyly ironic deposits integrate regular, final theodolites-- furi 6103|Customer#000006103|YYS9 3AI8tkKqUqMtWssXA44M|4|14-405-808-5807|8364.07|HOUSEHOLD| pinto beans sleep. carefully regular sentiments sleep blithely. final packages cajole 6104|Customer#000006104|jcKuAbr5WFuVxd2xt,4KqyvQ7kz|15|25-250-924-1873|4010.86|MACHINERY| final packages sleep above the slyly bold 6105|Customer#000006105|EfQPHmIraBqSvNY15deoIPsc|4|14-506-543-9059|506.68|BUILDING|he unusual deposits. furiously even a 6106|Customer#000006106|nfroWgkspby66r6BkrvQCMX,f,i4Myrykv3k pa|7|17-151-647-7940|-337.05|FURNITURE| special excuses cajole. carefully bold req 6107|Customer#000006107|,4I3WYJQN9ZAWX5I5J54dJna|19|29-717-849-8757|9064.29|FURNITURE|long the excuses. slyly special instructions unwind slyly beyond the regular, final pinto beans. requests use blit 6108|Customer#000006108|0SPg7CHlEjunN0dHuKF932w|10|20-564-384-5926|2570.78|BUILDING|asymptotes sleep carefully. fluffily regular pinto 6109|Customer#000006109|gpgBDs5krmN|7|17-896-737-8894|8151.07|HOUSEHOLD|uriously pending asymptotes. fluffily bold pinto beans are above the 6110|Customer#000006110|FAxMqfHBG36oB|8|18-793-720-8574|-888.62|BUILDING|iously final ideas. furiously regular packages nag. quickly regular pinto beans above the regular theo 6111|Customer#000006111|CLSQ2jkX5hmEPyKFJkKxemAH|18|28-252-831-5956|857.85|HOUSEHOLD|ly final deposits. slyly special accounts against the furious 6112|Customer#000006112|wPzFnLHJ Eg3EzH |24|34-654-913-4569|5894.96|HOUSEHOLD| final, dogged packages wake furiously slyly regular platelets. fluffily silent pinto bea 6113|Customer#000006113|YV9LsSwnYEjuQENdPBp8Sfl|18|28-599-409-3070|8646.86|MACHINERY| blithely at the blithely ironic deposits. carefully final ideas cajole fluffily. unusual pa 6114|Customer#000006114|87W0S9L9zjjL,lPnNxpXEJKSjaOY|12|22-957-665-4649|4860.40|HOUSEHOLD|haggle slyly above the slyly express deposits. slyly even ideas sleep fluffily above the slyly ironic accoun 6115|Customer#000006115|r3MSiTaTNNKpIXe1x,RDRqPx1s|21|31-465-527-1774|821.94|MACHINERY|er the slyly express platelets affix carefully above the careful requests. pending 6116|Customer#000006116|Q1HHnkKguo6sSYNWgNEk2tTeEe6db3s6osO|4|14-743-932-4071|7234.12|BUILDING|he blithely express requests haggle furiously regular, final excuses; carefully regular pinto beans unwind ca 6117|Customer#000006117|4CHkWzZ3fT|24|34-380-887-8865|9826.19|FURNITURE|ly final packages. fluffily bold escapades sleep fluffily after the carefully express 6118|Customer#000006118|GZJ6Dctbr0USrtx|11|21-581-376-6339|3810.27|HOUSEHOLD|e fluffily special packages integrate quickly final dependencies. regular 6119|Customer#000006119|p8I8iRWN3HIQZPoMD5y1qogRuycR7VnAiEyOZff9|1|11-137-505-5251|6246.11|FURNITURE|wake blithely fluffily ironic theodolites. bold deposits cajole about the blithely fin 6120|Customer#000006120|ZTjNH4Si5g9pTrul2fRGaHEB|6|16-141-186-1813|3351.67|HOUSEHOLD| excuses nag fluffily ironic requests. slyly 6121|Customer#000006121|uAXqrguis17T1SGF9Od0sJ|13|23-689-704-1607|9531.41|FURNITURE|ss packages are furiously final depths-- final, ev 6122|Customer#000006122|48JzmrY5BZiS2C5Ts,wgJkRR7z5SOkJhg7|18|28-417-497-5317|-485.36|BUILDING|yly express accounts. slyly regular f 6123|Customer#000006123|rriyD1ssl4dg,ur1WRPBG|19|29-912-115-6013|2397.49|BUILDING|ogged deposits breach blithely. blithely express theodolites use. packages about the fina 6124|Customer#000006124|1FAv28GTpQRD2Nw ULiQG3qCi6PTGFXC|7|17-962-756-6916|-383.24|MACHINERY|final courts. furiously ironic deposits run blithely furiously express instructions. slyly special deposits slee 6125|Customer#000006125|q4dT2taZKQmIDI8,V|11|21-143-295-8049|2811.99|MACHINERY| ideas. slyly even packages are fluffily instead of t 6126|Customer#000006126|7NRolRPvqN3QIrcKU|23|33-988-127-6540|3047.89|BUILDING|hely. quickly even braids haggle slyly. quickly even theodolites 6127|Customer#000006127|4xg9rQlpgsZ7NJl4k9IL47ODv3E26jIRCvnlgd|23|33-236-693-8622|7908.12|BUILDING|le carefully. even requests grow carefully carefully bold theodolites. furiously regul 6128|Customer#000006128|c88JZp1so3TfKB6o5|16|26-586-949-7031|7899.60|FURNITURE|efully enticing braids according to the furiously ironic accounts detect furiously around the fluffi 6129|Customer#000006129|MVydnT2OcBdbSLF|22|32-368-121-9588|7571.87|HOUSEHOLD|ously after the regularly pending excuses. accounts boost slyly final ideas. special account 6130|Customer#000006130|uzgA6cyeuysYLVKMv|6|16-405-646-2657|-555.94|BUILDING|efully regular deposits about the regular packages believe blithely alongside of the ideas. bl 6131|Customer#000006131|EwoT1zKxHYj8|13|23-237-602-7871|4223.57|BUILDING|e doggedly permanent platelets slee 6132|Customer#000006132|LMFK0bKFaBIX8tW74Yoxb8,bw9XS|1|11-890-884-8734|468.90|HOUSEHOLD|ly close courts among the express, even packages believe doggedly among the fluffily regular ins 6133|Customer#000006133|FwwrG68tR4k|13|23-849-670-9143|2698.12|HOUSEHOLD|final ideas detect furiously permanent accounts. slyly regular courts boost final instructions. theodolite 6134|Customer#000006134| DMIOsEg5VDDKmWzjjShHILJhBrFVea|8|18-349-804-2162|3214.77|FURNITURE|kages sleep. furiously thin requests affix ac 6135|Customer#000006135|IED4rGsufuEnT4NrO7KvhQfFfcwGzvMHgQiUnoP|1|11-117-608-4110|7552.48|MACHINERY| deposits. deposits along the final packages haggle furiously ironi 6136|Customer#000006136|pDoECZ7k3AwOSp9wDO|19|29-974-254-4381|3649.73|BUILDING|ely ironic tithes sleep finally after the 6137|Customer#000006137|Uv1p49Ppo7lO9zpW05z|5|15-958-508-2113|3037.82|BUILDING|riously express, regular hockey players. quickly final pinto beans use packages. carefully e 6138|Customer#000006138|Dm6,p6hst9ub9FiYivgUm3FapKRiEYPG|15|25-872-607-4602|3638.97|FURNITURE|snooze slyly quickly ironic packages. furiously final deposits sleep quickly specia 6139|Customer#000006139|3r3Uc,7NMqKxby7w641xEV3su|17|27-704-480-4139|9119.98|AUTOMOBILE|s cajole blithely regular packages. furiously ironic deposits are final, regular fo 6140|Customer#000006140|QPlaw,8zEWbcabsVf,wRBMC1hvedP0|11|21-263-388-7344|3593.93|AUTOMOBILE|ke furiously slyly bold warhors 6141|Customer#000006141|4QBCP6mT5dep|10|20-961-227-7319|247.76|BUILDING| alongside of the patterns. pending ideas doze 6142|Customer#000006142|6rJlz 6aiuufZqN8Ldf9|7|17-856-660-5846|4045.08|BUILDING|the carefully bold deposits-- requests haggle slyly 6143|Customer#000006143|zuh77l1EZ6xUn6H,sZIYq|1|11-253-915-7383|3706.32|FURNITURE| nag slyly. final, pending dependencies haggle slyly about the furiously ironic o 6144|Customer#000006144|zPKzjSPczgTN9Yb3ybnDmhplv6ymvG|1|11-306-913-9602|8057.08|FURNITURE|equests cajole carefully pending, even packages. regular platelets 6145|Customer#000006145|CybtvIEF1DXOFCddFGr|21|31-879-282-9414|3423.05|HOUSEHOLD|sits are slyly even requests. regular packages across t 6146|Customer#000006146|RNKo8PgfKqgW6hGWTJMp|19|29-723-257-4271|-832.84|FURNITURE|ld requests alongside of the regular packages wake fluff 6147|Customer#000006147|fr6iYAVpls5Zy8UF1qn|21|31-729-268-4470|3418.83|FURNITURE|accounts. silent, ironic orbits believe furiously escapades. slyly regular packag 6148|Customer#000006148|OiRtO4N,szTYNMjqyl,|14|24-345-723-7631|5246.25|AUTOMOBILE|s poach carefully after the furiously ironic packages. even pinto beans are fluffily. blithely unusual pinto beans 6149|Customer#000006149|Dv0FCsfLC8tg I,NGqcnL3uO|9|19-981-118-5032|6737.15|BUILDING|mong the blithely final requests sleep according to the b 6150|Customer#000006150|aX47rrqgyvzy74tFd0o Wn1wr5XK|2|12-895-769-4268|6249.24|BUILDING|o beans cajole doggedly above the slyly unusual deposits. enticingly regular theodolites snooze c 6151|Customer#000006151|ojCmk5FfGZm4Oset4R|1|11-178-162-9290|1016.84|BUILDING|inos should have to use furiously among the quickly express somas. ironic requests cajole furiously after the slyly 6152|Customer#000006152|Bqxwa70bCy,M8Vs6cWn|19|29-492-966-5351|4957.70|MACHINERY|y final foxes. express packages wake across the ruthlessly regular instructions. express 6153|Customer#000006153|,oEYlRJs0L76hU5MtnT17H5qULk|6|16-303-738-6232|8070.72|MACHINERY|ctions use slyly above the blithely express deposits. 6154|Customer#000006154|FI1U42PTLrAG n3a5JxzHabcH|10|20-372-167-3439|1571.98|AUTOMOBILE|tithes nag regular, final theodolites. deposits sleep along the ironic theodolites. carefully 6155|Customer#000006155|03gJRZx3TGwScCd8d1 MhitTEHY i|20|30-511-190-6866|7246.91|HOUSEHOLD|ess accounts. carefully even theodolites are silent requests. slyly unusual packages 6156|Customer#000006156|cVhwUHk5,b74V|12|22-119-245-3413|4873.83|HOUSEHOLD|ly daring, express deposits. special, regular packages haggle furiously ironic package 6157|Customer#000006157|3hTpojm17PUW3OvoLxeqQtz2|11|21-578-529-7649|6444.10|HOUSEHOLD|are slyly sometimes final deposits. special, final dependencies cajole quickly. slyly e 6158|Customer#000006158|GmqBG39QiPPqq1YA|10|20-393-733-8825|8966.55|FURNITURE| even asymptotes. fluffily pending requests against the quickly silent platelets cajo 6159|Customer#000006159|naaCoV9ztjZ2YVP4hmQAtTnFsb,DAWFoAJp5|6|16-192-111-5212|9877.27|FURNITURE|g the silent, ironic theodolite 6160|Customer#000006160|NrxXWav6LihuuSlWGszrc|13|23-830-408-5650|4333.77|FURNITURE|pending orbits sleep quickly. slyly unusual requests cajole. furiously regular requests 6161|Customer#000006161|pbRxOFTDqY0lPBucGOSzCy|18|28-703-556-9515|4706.39|FURNITURE|ing deposits? furiously regular accounts alongside of the fluffily even requests are blithely above the 6162|Customer#000006162|wFK59S80D3m iXYc96LznPltGTpDcB|3|13-525-195-7035|8277.40|HOUSEHOLD|ackages. requests use blithely according to the final, regular requests. even, special accounts 6163|Customer#000006163|tmxBpVae0jKAe9vXikmLaCRGPAoyiDVGz|21|31-674-263-4416|-678.87|AUTOMOBILE|requests sleep. quickly final depths affix quickly unusual excuses-- final packages af 6164|Customer#000006164|PSgfAO1LnbP8 5aNt2lWBD|14|24-782-729-4216|6171.41|FURNITURE| ironic courts solve closely according to the carefully ironic requests; special, 6165|Customer#000006165|31aZoMFnTFf|12|22-414-194-1058|1982.00|AUTOMOBILE| the even, unusual excuses. slyly special foxes nag quickly special, expres 6166|Customer#000006166|df2AQz9BJgmAj XoIOiyoUd|14|24-891-533-8945|1444.98|HOUSEHOLD|ng packages affix even, final pi 6167|Customer#000006167|oPGRaBt5IuSxtQhwgztJyv1miL|7|17-798-361-3738|6729.32|FURNITURE|ly final requests grow alongside of t 6168|Customer#000006168|TfeEEZ9Ds8x3eQljQUYaNzpu6tE1ap,JGRn2gZm|24|34-877-467-2443|6633.98|MACHINERY|ily. special platelets about the slyly 6169|Customer#000006169|UDeLABnhksFK7tfInLsDPRtynoWxesfiJ|7|17-656-209-2093|-681.05|HOUSEHOLD|y final pinto beans. slyly even dependencies boost 6170|Customer#000006170|PLJi9aPgRlkkIUsc8LGRVLOGrz1IBd|19|29-558-582-4733|2570.49|BUILDING|t requests cajole slyly. slyly ironic courts ca 6171|Customer#000006171|SNyHcZsmA5EydGjLu0MT7Y|19|29-222-112-6966|3125.83|MACHINERY|ts nag fluffily even instructions. furiously even pint 6172|Customer#000006172|uafeB6k3L MWgR6k8Wokrv0gtSwTLmWRW8,UVhv|5|15-750-174-4232|8749.91|BUILDING|ound the slyly regular theodolites wake slyly across the platelets. carefu 6173|Customer#000006173|jd3so7 leJO5Y0SF1YFrMJ|20|30-743-439-3998|9725.31|MACHINERY|es boost above the carefully regular 6174|Customer#000006174|UOkeiQv5WK1OBw5CjtHQDh84JAV|11|21-570-543-7869|2314.46|FURNITURE|ly bold foxes-- final ideas x-ray among the regular theodo 6175|Customer#000006175|4fRoxmxFa4n|23|33-387-822-4617|1065.86|MACHINERY|ress pinto beans. fluffily final ideas dazzle closely slyly ironic packages. furiousl 6176|Customer#000006176|kXM67uOjA5sj|24|34-919-259-2224|3918.54|FURNITURE|nts wake after the even packages. ironic requests alongside of the slyly regular excuses boost slyly regular reque 6177|Customer#000006177|pZZ8D,yKhfsFatXwk|24|34-161-190-1931|-149.78|MACHINERY|are across the slyly ironic requests; carefully dogged accounts run blithely bli 6178|Customer#000006178|ejgXNsz sI0Dl3F,FVziTAF4mPWXczkmXlu|9|19-567-173-3123|-369.66|FURNITURE| furiously alongside of the pending, ironic theodolites. ideas among the silent pinto beans dazzle silently alongsi 6179|Customer#000006179|0MU9 AVKw SeY6kbL5VJm|6|16-699-827-5744|38.38|AUTOMOBILE|l accounts lose furiously slyly final tithes. quickly stealthy attainm 6180|Customer#000006180|4csqeJ8yw1y r 6Bzi49uv|11|21-527-929-7958|4979.33|AUTOMOBILE|ymptotes solve beside the ironic packages. carefully special deposits ar 6181|Customer#000006181|cZ9B3p5D4poouVdTvh0Sol7ODKuWa|2|12-716-858-1804|8630.17|MACHINERY|es. unusual instructions among the slyly regular deposits affix carefully after the furiously ironic platelets. 6182|Customer#000006182|8U367QGaD8IUUdHyHtwSj3pmJoeLcVrMccGMZ5J|6|16-291-418-8009|-745.50|AUTOMOBILE|es serve blithely alongside of the regular instructions: unusual deposits cajole ruthlessly alo 6183|Customer#000006183|iDx,aeynoLw|20|30-808-423-5478|8330.40|MACHINERY|gular, express accounts. requests use. furiously pending instructions detect within the quickly 6184|Customer#000006184|dkynJLYBPBCkx 1paCFtwxmixcoPoqaFVyGQ|19|29-373-987-6278|-960.96|MACHINERY|hely blithely special deposits. furiously pending requests boost carefully instructions. excuses nag 6185|Customer#000006185|4qtODzt Kxhkagjgtc5U 6l|24|34-493-651-6114|6109.32|HOUSEHOLD|ests are final ideas-- ironic, pe 6186|Customer#000006186|0dwkPKvOkPIniv3,Fahd1rq9nwc|16|26-982-329-6333|3360.22|AUTOMOBILE|sly bold grouches. ironic pinto beans sleep furiously furiously regular depths. platelets breach t 6187|Customer#000006187|LJJpyf,OZivsv6IQBJG3gEisgc7d QC7oKRuXOBj|15|25-545-311-4634|-838.33|AUTOMOBILE| requests. slyly regular requests after the accounts hagg 6188|Customer#000006188|Z85HZ6fRUEl3|15|25-648-100-5980|-895.88|AUTOMOBILE|gle carefully. bold theodolites x-ray fluffily. bold gifts alongside of the sent 6189|Customer#000006189|X6edGVb,Osa emoLHSaQKn|14|24-556-862-5258|-213.89|AUTOMOBILE|ly regular deposits across the regular theodolites serve blithely express 6190|Customer#000006190|mV9CzSEQr,nE3CC, xJ1EsQOw|2|12-212-128-8305|33.63|MACHINERY| finally regular theodolites wa 6191|Customer#000006191|7XGJ0ugPk dVdCm1nJQ|22|32-229-609-5050|2167.56|BUILDING|s dugouts. carefully bold pinto beans 6192|Customer#000006192|oI8CXoK1w9PnZDUEbvj|0|10-276-595-2077|7179.63|BUILDING|ly bold decoys are slyly. slyly even epitaphs around the regular requests cajole at the blit 6193|Customer#000006193|RnHj1jACEqFgLpCQfzgsZtgoZu1Jck|16|26-200-646-6714|7033.00|MACHINERY|dependencies. furiously special foxes cajole 6194|Customer#000006194|9GsTKrC4NgB2bqd4ui9kuijhjxDlw IU1|9|19-886-790-6122|6639.12|AUTOMOBILE|ts boost after the carefully ex 6195|Customer#000006195|eVLaPYm6NRhqQzuMx3vk|0|10-891-840-4980|8707.05|FURNITURE|y. quickly pending accounts against the blithely thin ins 6196|Customer#000006196|KNWdwsj7hGyO0lrvqr6G1o|15|25-200-325-2383|4462.11|BUILDING|c courts. final instructions a 6197|Customer#000006197|Ce9LTBhp7GkCqZy|11|21-395-292-5975|5631.85|FURNITURE| unusual instructions wake always above the regular excuses 6198|Customer#000006198|OglD6pbHC9ovv2mfZ rDO iay cVjX5SRng2|23|33-381-544-1422|8515.39|FURNITURE|y unusual platelets are slyly across the carefully final pack 6199|Customer#000006199|3LH72AxUTOqvuazpB6dk5i80YVo3,H2YZiyir|22|32-514-462-3884|6178.78|MACHINERY|ideas wake fluffily. pending, pending p 6200|Customer#000006200|oGCR8cSGI,rHCpRMi2|3|13-609-502-2266|4767.80|FURNITURE|l dolphins. sometimes unusual instructi 6201|Customer#000006201|oK3Q7pkcEZaGXxOeNB4okAaAxNDbZB8K1y|0|10-385-198-4441|6397.29|HOUSEHOLD|ly. regular, slow packages along the carefully express foxes doze to the blithely even foxes-- furiou 6202|Customer#000006202|E73qW mbEQINh gPymXB,ed4O nKl|15|25-477-944-7482|3330.24|FURNITURE|l excuses. fluffily ironic packages use. quickly final Tiresias a 6203|Customer#000006203|xVMuglbV53zSLL4wb7Mxb,pkD8MrP6 R,CO ehMc|19|29-735-432-9939|1280.15|BUILDING| ideas atop the even, unusual excuses sleep against the furiously careful pearls. closely express 6204|Customer#000006204|6MBV7BG qK9BOmGQny|16|26-983-171-4809|8460.54|FURNITURE|eful requests. quickly ironic instructions use b 6205|Customer#000006205|Bd2A0KLWCYN1WZ1XJc,N |18|28-920-423-1966|8416.88|FURNITURE|osits over the pending instructions wa 6206|Customer#000006206|EZ6yws0GmtHJXSHiV|4|14-448-256-3507|486.36|HOUSEHOLD|nding platelets are after the pending foxes. unusual, regula 6207|Customer#000006207|Ba2gZAYGD 74QLT8T7,uRQwIi0rqbJ9|7|17-896-188-3890|-740.28|FURNITURE|foxes. blithely pending platelets haggle quickly bold, silent asymptotes 6208|Customer#000006208|m,PxS2pByk43RfabxsV6in7n|18|28-322-291-1770|8045.41|FURNITURE|deposits wake slyly. slyly even ideas use blithely. furiously iro 6209|Customer#000006209|C1Ls6INP7D9jHTugpjUUWD9kS9cpKclB|2|12-685-144-7597|5241.88|BUILDING|ges. final accounts breach slyly. furiously regular deposits according to the express instructions wake after 6210|Customer#000006210|d5fRx4ruNET9kj6LqGhVxCYwT|17|27-245-729-5781|-791.51|AUTOMOBILE|special pinto beans poach quickly careful theodolites. fluffily pending instructions cajo 6211|Customer#000006211|,bXG5MlIamKtG8mMbce|2|12-200-723-4029|5045.01|BUILDING| furiously special requests. carefully final deposits wake even requests. carefully final accounts are alongside o 6212|Customer#000006212|ArifsT45MN,N2HR,CoiDLwG0|5|15-251-873-7969|1570.87|AUTOMOBILE|n foxes. ironic, final foxes boost blithely bold instructions. furiously dogged deposi 6213|Customer#000006213|jpXKO9LktOMMIPpfE xyGA7uurVPqSzOn|5|15-600-409-7048|-445.21|AUTOMOBILE|nts use carefully above the b 6214|Customer#000006214| ,MAKZxDGF3QKwDERiLKFCaCOcIOlN|22|32-811-917-7230|6202.90|HOUSEHOLD|. carefully express theodolites sleep furiously even requests. quickly express instructions lose slyly silent idea 6215|Customer#000006215|fAfdqCTURbOu,|5|15-792-734-1509|3206.02|BUILDING|deposits. furiously even hockey players sleep carefully. quickly bold requests across the slyly spe 6216|Customer#000006216|bGTXGAHg72BEDM09QZEFI|0|10-585-359-5566|3548.03|MACHINERY|ickly. slyly final theodolites integrate fluffily quickly ironic asymptotes. 6217|Customer#000006217|k9NiqQlFJVv6|9|19-612-407-3150|9647.49|MACHINERY|tions wake quickly slyly regular pinto beans. ironic, final asymptotes affix blithely around the hockey players. 6218|Customer#000006218|4,z5xJL2IWWO5LMrF36cZISGaq77Q6 7DvAh5|16|26-501-652-8685|3504.23|HOUSEHOLD|slyly final instructions. final e 6219|Customer#000006219|lM 6tdVkyERY,wQ6n7ZHD11,G|8|18-384-857-8254|-443.88|FURNITURE|lyly regular theodolites doze furiously bold ac 6220|Customer#000006220|je,Ssek0XNsaWRGsiKF 0,hDjGVOaSNsN7TkZz|7|17-755-898-9664|6694.27|BUILDING|ffily express deposits. carefully ironic packages haggle furiously final excuses. pending dolphins af 6221|Customer#000006221|J2Zx,5YWGMpbyd9yupa1PMuDhhHCFtDqmtzx0CE|6|16-641-636-3853|712.36|HOUSEHOLD| the furiously even braids. instructions use blithely 6222|Customer#000006222|8p2FbclgqcvoFR29P,OwwCkWR|8|18-981-975-1436|2589.32|FURNITURE|usual pinto beans. slyly special deposits breach about the slyly i 6223|Customer#000006223|kOimz7buzOsZP8DPRQfB pa8a7bWyA0Axx|7|17-805-445-2530|644.72|BUILDING|each slyly quickly even requests. quickly regular instructions cajole blithely. deposits wake along 6224|Customer#000006224|LFoSAlF,JOEn4gVU0qGhIpu|5|15-952-723-9945|9794.49|FURNITURE|ly blithe dependencies use across the slow requests. 6225|Customer#000006225|cqOhtNVujJSTrsZlLLvt1k|21|31-221-435-4954|-847.96|BUILDING|; slyly unusual deposits cajole pinto beans. fluffily regular accounts above the accounts lose slyly 6226|Customer#000006226|8gPu8,NPGkfyQQ0hcIYUGPIBWc,ybP5g,|23|33-657-701-3391|2230.09|BUILDING|ending platelets along the express deposits cajole carefully final 6227|Customer#000006227|hQpDSUJLnjcvDZ4WbiVrWDSYBjCou1kJ|11|21-159-594-1232|2062.51|FURNITURE|uffily stealthy deposits haggle quickly carefully final request 6228|Customer#000006228|Zb2Vj1EhkIivE CSYkb936,JYTQaWYT0a|5|15-417-317-1397|1524.63|FURNITURE|ts. furiously regular requests wake carefully regular packages. furiously regular packages about 6229|Customer#000006229|6jBRlUNs3Q,XQZsgUuaWybaSY|17|27-996-380-7890|3480.92|HOUSEHOLD| packages. even asymptotes nag ironic excuses. slyly ironic deposits cajole. express requests 6230|Customer#000006230|MGXvPZQ6UMzc4PbZcSUl8kGfew|18|28-226-342-3356|3763.78|MACHINERY|ously pending platelets; busily special theodolites sleep carefully! express platelets sleep slyly acros 6231|Customer#000006231|DssbpsUtrcZVi81wG|11|21-183-229-4023|5805.58|HOUSEHOLD|ding accounts thrash carefully alongside of the even, regular ex 6232|Customer#000006232|T0JFOylApn8YQr,|18|28-847-729-1271|2455.95|HOUSEHOLD|ly pending packages are. regularly express accounts past the fluffily regular p 6233|Customer#000006233|4ZbcmGRlrqQ5|22|32-482-650-3134|3186.31|FURNITURE| instructions along the regular packages maintain above th 6234|Customer#000006234|lQaP7fCR8lVTQY7pkMi1BLv|23|33-658-619-9537|2250.65|BUILDING|dle packages boost slyly. quickly special theodolites wake furiou 6235|Customer#000006235|YpIuvasSAUFQ027rL|11|21-845-791-5239|6718.32|AUTOMOBILE|e the ironic, final instructions. carefully unusual somas haggle quickly quickly ironic requests. quickly eve 6236|Customer#000006236|zQjCvBPfacwUT0nrJ2uF,4FtSAC0ldB|17|27-557-761-6785|8836.84|MACHINERY|uriously bold accounts boost blithely ironic theodolites. quickly even packages mold along the 6237|Customer#000006237|jgXVEi0rAQCxBmRGM1vE1|0|10-312-679-5009|7806.93|AUTOMOBILE|oze slyly. thin, unusual depths integrate quickly alo 6238|Customer#000006238|AckKNmINwCbVw kC5bZ0u3Hh7C3,7Rdva0|15|25-749-568-5891|1052.60|HOUSEHOLD|hely special deposits. regular requests solve un 6239|Customer#000006239|PklZCqNbiyA9|22|32-305-456-4164|1007.44|AUTOMOBILE|ular packages. final, even ideas print slowly along the pending acco 6240|Customer#000006240|kxWW41iCbmQJFT3GxGtmHakhcHZ07nW0diOTG|8|18-432-692-3478|939.14|BUILDING| even dependencies. pending, regular packages wake carefully above the deposits. 6241|Customer#000006241|ucocla,JAAW amojIyO Ow |7|17-623-128-3126|8018.46|FURNITURE|tructions wake fluffily. enticingly regular requests sleep furiously silent packages. slyly special 6242|Customer#000006242|1J2Yz5f1lOLcsSkMTme|14|24-293-990-7292|3379.23|FURNITURE|uickly ironic packages sleep. fluffily regular accounts wake qui 6243|Customer#000006243|JotibBD2zs87K8|19|29-356-403-8790|-377.14|AUTOMOBILE|ly even accounts. final instructions haggle. silent, silent platelets are around the carefully even accounts. caref 6244|Customer#000006244|5XQ1QzJOWRsj2dkV0WnG2lFantHSML9hToTT2tw|21|31-123-516-6719|6385.54|FURNITURE| furiously even requests sleep quickly under the pinto beans. ironic warthogs thrash sp 6245|Customer#000006245|okJFjGfCET8m41TwZdswM4nqi2,wx8qP6tpM8GE|19|29-589-374-8352|3766.89|BUILDING|ounts cajole. deposits are. quickly final ideas across the carefully ironic notor 6246|Customer#000006246|5l,sIIFQVnUUtzs1|10|20-124-894-8392|3090.40|FURNITURE|ckly regular theodolites use. furiously regular instructions against the regular dolphins are sl 6247|Customer#000006247|22fOQj6uLxyE6L,ev4yMzPv8FuBvstp9JJAmez|3|13-961-966-3385|7408.81|HOUSEHOLD|s should have to haggle furiously even requests. furiously bold d 6248|Customer#000006248|F3NgALjt9qjhFKEDz|10|20-237-638-2221|8834.94|AUTOMOBILE|ckly according to the theodolites. foxes along the ironic, bold accou 6249|Customer#000006249|I3e rogGC4PkngfpXmtiw|15|25-311-257-8481|8946.03|MACHINERY|efully across the fluffily bold tithes. fluffily regular frays nag blithely along the sometimes eve 6250|Customer#000006250|swZXFAFQ2O|17|27-861-967-3557|5926.27|FURNITURE|after the furiously ironic asymptotes. pinto beans nag. fluffily regular ideas pro 6251|Customer#000006251|GWig6svLrx4Lcr0I tMBGMgd7HuyLGjbDJnFkGTl|8|18-245-505-4627|6920.15|AUTOMOBILE| unusual dependencies wake furiously even foxes. regular pinto beans mold furiously spe 6252|Customer#000006252|iGu23V6R50fBd4,WCMeGmcF9QaffCWOZiiY|22|32-311-215-9551|1928.88|AUTOMOBILE|packages cajole. carefully ironic packages nod along the even excuses. foxes wake against the packages. 6253|Customer#000006253|dHtDAyg0dAsmNZUJ6yonI6|9|19-583-938-5958|-356.48|BUILDING|ffix blithely. special, special packages believe. slyly regular platel 6254|Customer#000006254|XpXxuBjXYzFBXCvTp8sO4 0zwCiWD9ggiF|12|22-251-775-8243|1538.38|FURNITURE|. blithely final courts are quickly across the ironic deposit 6255|Customer#000006255|l6XtT3yvhZ,VzKzrXl2vQiIgwcrFB3qL7fuARSYk|5|15-472-424-3063|3495.68|BUILDING|even excuses. fluffily regular pa 6256|Customer#000006256|,fpJxiGODuy,EqJmiD9qM1DN|23|33-628-309-9349|9846.83|FURNITURE|lithely ironic, bold deposits. blithely bold 6257|Customer#000006257|etreRcLjc7uC|17|27-604-361-9761|9836.21|AUTOMOBILE| asymptotes wake slyly. bold excuses are furiously carefully furious accounts. slyly even packa 6258|Customer#000006258|CzJGapE7fxQx3x9eN A7O|18|28-429-156-3365|7221.20|FURNITURE| foxes haggle slyly slyly final asymptotes. slyly even packages wake furiously along the 6259|Customer#000006259|fpEAMihvplhOKyor ZRcf2bEUOwQgGz6SkilLk|16|26-449-972-6429|1723.42|AUTOMOBILE|instructions are evenly unusual ideas. furiously regular pinto beans among the furiously bold deposits wake 6260|Customer#000006260|oJLJxevvYZqj,n6Dq,L5V6C|4|14-214-869-6336|405.59|FURNITURE| haggle above the slyly regular platelets. pending theodolites wake carefully even 6261|Customer#000006261|rbTkXWn,HeQxxR8SLRuBS3m,LK|12|22-636-489-9192|5612.90|AUTOMOBILE|ructions. final, unusual pinto beans are furiously specia 6262|Customer#000006262|HlUg CpG1hqLgHf|14|24-761-458-3272|8970.62|FURNITURE| ironic requests believe quickly after the furious dependencies. express, fin 6263|Customer#000006263|nTwk5ECJ6elmDX8zLW7Fta9u9PlmTaqqRNja7|14|24-936-988-1040|1314.66|FURNITURE|ckages use furiously slyly final accounts. bravely bold dependencies about the carefully ironic asymptotes are 6264|Customer#000006264|pV82CJ4rNyOcMzCXNmdy|17|27-583-887-6592|5392.22|AUTOMOBILE|al, careful instructions affix blithely. ironic 6265|Customer#000006265|,8NdehNjF5ojMMEKikadmc2ng|23|33-192-952-7496|6977.71|BUILDING|y after the quickly pending requests. unusual 6266|Customer#000006266|9OKHx1,rXIAV0pq6Vj,uERU44LaT|22|32-443-596-1740|1468.66|BUILDING|g blithely. ironic foxes cajole blithely around the packages. 6267|Customer#000006267|Hxi,BwRbqRQUkum7Ts3R ugk4w58Ozmpp|20|30-657-865-4960|8650.75|HOUSEHOLD|l platelets sleep blithely. quickly pe 6268|Customer#000006268|cG,c4luyALcY|5|15-764-581-5523|4236.85|BUILDING|special hockey players wake fluffily express, bold pinto bean 6269|Customer#000006269|t3tcDR3QxuXh1Q5eHbzBls8jxxc4eCZSKKu Rh|16|26-790-547-6046|3793.75|FURNITURE|ccording to the furiously ironic deposits. final decoys wake along the patterns. blithely even 6270|Customer#000006270|PGn,pJmM gsA1tDtDbbuiiGra57c4FL|18|28-717-120-2144|3485.91|AUTOMOBILE|deposits. pending braids cajole fluffily pinto beans. carefully ir 6271|Customer#000006271|C61IFNXGXjOgzUAf8drHHKFksk,dASWbIXele|15|25-214-187-2123|-342.40|AUTOMOBILE|ptotes haggle slyly regular accounts-- regular pinto beans use carefully against the blithely reg 6272|Customer#000006272|HnzXtYtwH8Jco2wa,L|2|12-494-911-3342|7209.51|FURNITURE|ajole slyly. furiously final packages affix silent, final theodolites. even asymptotes x-ray bravely. reg 6273|Customer#000006273|ZD7bJedn3FdCd3p1SLGq4rZGqBCMlic|0|10-717-770-2411|3278.98|AUTOMOBILE|uffily among the fluffily regular accounts. even asymptotes haggle finally 6274|Customer#000006274|SgRPgKV3mB1oPuGe1ccFvjhDBRYiopGwIsWTNuOL|24|34-845-579-7944|8778.79|AUTOMOBILE|uriously regular requests. slyly regular instructions haggle. in 6275|Customer#000006275|vFGTLUjxiQu4HEiY16P1jSBrn380WinK,|0|10-194-385-3660|187.26|AUTOMOBILE| poach above the furiously unusual sauternes. accounts poach slyly. blithe 6276|Customer#000006276|qrFgLgA0RCdrQioauSbVb8g|11|21-790-836-6047|-777.89|AUTOMOBILE|uriously regular deposits. even deposits alongside of the slyly 6277|Customer#000006277|0EKQ4D5RMYQ6,NHCtnq14Es7OIWwNDPRtBEswyFP|0|10-237-523-6848|2907.96|BUILDING|s boost furiously final, special r 6278|Customer#000006278|F kMurdAhFU0C2KEiojmsS5gWlgxPZ5Q49iZl|5|15-299-327-8860|9996.76|AUTOMOBILE|detect slyly unusual hockey players. regular requests after the final 6279|Customer#000006279|Ft4nZfY7lsZ ws|5|15-206-922-2248|-319.90|FURNITURE|ial, unusual accounts. final asymptot 6280|Customer#000006280|eA0MCPdIfSK06GY JDS,GKrDGrr3e7ZqkB|19|29-971-689-4133|407.63|AUTOMOBILE|cuses. ironic deposits wake above the permanently ironic deposits. final ideas are slyly ca 6281|Customer#000006281|yORAuTtjrCJF9lOKTJtS9|4|14-318-809-5732|7055.03|BUILDING|refully even frays cajole during the carefully even acc 6282|Customer#000006282|fCqZiNSOZ46KUCaRvVFPG60DMq|16|26-750-142-4294|2969.47|AUTOMOBILE| furiously regular deposits. slyly ironic packages 6283|Customer#000006283|tmVqD0BrhOBHlA|14|24-539-196-5846|9860.06|HOUSEHOLD|regular, silent requests. blithely ironic theodolites sleep. blithely even deposits nag r 6284|Customer#000006284|zEh7eHXMGoSNP9h7Bk7G8axMfEkBVkj,NADgz9|13|23-583-198-6369|1178.44|BUILDING|fily alongside of the fluffily speci 6285|Customer#000006285|nf 4a5KU8QuLSMPZWmEkyVq6UAD065pgsgI|3|13-353-741-3596|3913.54|MACHINERY|rmanent deposits print furiously among the slyly e 6286|Customer#000006286|WbndcI8V39JL1oxVYtRHMvESj1|16|26-227-468-4312|3895.62|AUTOMOBILE|le blithely ironic packages. furiously bold instructions wake blithely ruthless, ironic pinto beans. even 6287|Customer#000006287|b1WalykIgCZUEIk,KPnxg7ytSfEtEHsVRCX98H5|20|30-755-248-6558|-296.05|BUILDING| beans wake slowly according to the regular, ironic deposits. furiously regular requests cajole. express 6288|Customer#000006288|iN4rgnOJ5RH8M5r6fvF75YNLBiT4loi|11|21-622-156-3974|5184.42|HOUSEHOLD|unts sleep carefully quickly even foxes. fluffily bold requests about the silent theodolites breach quickly above 6289|Customer#000006289|MuOF83xgQwBnj42OUkVScHj7RKGR3U7NkdzVdLc|17|27-437-457-4918|1509.92|MACHINERY|efully furiously pending pinto beans. pinto beans after the blithely express pinto beans haggle carefully packa 6290|Customer#000006290|Vi6,QwAcedleabbr0SEv6LeHEU9SluHi57,|3|13-927-921-7780|9223.20|HOUSEHOLD|ual, express deposits wake carefully final instructions. slow, ironic requests across the furiously regular package 6291|Customer#000006291|JwSPtW9LtBALXgfhtQ3H|15|25-657-320-9686|313.68|HOUSEHOLD|ess instructions wake slyly by the 6292|Customer#000006292|xR0ShtQF06IrULp,|20|30-313-386-9424|910.80|FURNITURE|ely regular requests cajole blithely regular, unusual accounts. ironic, ironic packages about the slyl 6293|Customer#000006293|40Q UY9xqRxuXTIF3Kh58iCsPTn6g6FXAU|3|13-828-651-7919|4785.81|BUILDING|uickly final excuses was slyly silent, express requests. carefully regular requests g 6294|Customer#000006294|TEYcUTvYiWvxYjLqLx1a7dI7nqlcLDSG6S732 |10|20-377-548-8347|1065.94|AUTOMOBILE|uriously. furiously bold platelets detect theodolites. quickly final accounts can cajole according to the bli 6295|Customer#000006295|YEDvY2dxIZA5AFzrYqM2R,Qu0BWeRzqZ|11|21-326-272-7171|5134.99|BUILDING|sts boost final, final packages. unusual pinto beans against the carefully final re 6296|Customer#000006296|igCv4BEwY9,779Tix1Jw|21|31-763-742-7377|6768.63|BUILDING|could sleep blithely inside the regular accounts. furiously ironic accounts slee 6297|Customer#000006297|DX,A9MX7Xpum|22|32-968-252-6956|7365.56|BUILDING|ctions haggle furiously carefully final pinto beans. pending deposits dazzle regular, regul 6298|Customer#000006298|v5JDDFsvutMitkNO|10|20-805-968-6774|8028.38|FURNITURE| furiously fluffily pending packages. silent pinto beans across the final foxes nag perm 6299|Customer#000006299|,4uQAq3HIX7qNb2tA yA|4|14-120-953-8397|4991.04|HOUSEHOLD|refully regular ideas wake carefully dependencies. slyly regular excuses serve. fur 6300|Customer#000006300|iFuTBsELWUD|14|24-935-162-6227|7456.50|AUTOMOBILE|bold ideas. depths hang ideas. slyly final requests boost fluffily above the quickly bold escapades 6301|Customer#000006301|JdPIarVVF5vd0laNpjFh|18|28-940-337-3490|7399.19|AUTOMOBILE|nal instructions eat slyly. pendin 6302|Customer#000006302|Pmy5pzuh1YeOHkpciY0bCwMtDvMEPy2816MEBz|11|21-127-281-7462|1273.35|AUTOMOBILE|o beans. bold requests promise carefully slyly regular pains-- boldly ironic accounts are blithely pending 6303|Customer#000006303|YlPFtsXInnJ9pylCP3WVnzHDGX,RbchWeoZ|21|31-241-146-4709|4079.40|HOUSEHOLD|ans run quickly slyly special deposits. carefully 6304|Customer#000006304|Zg6sBmlhMs9XVZeDS3D|13|23-243-641-3155|8426.13|FURNITURE|ccording to the furiously express instructions. asymptotes affix slyly carefully even do 6305|Customer#000006305|26EjrYGIc38wIM,rkf nwGVNIaWPfmsEpjmyyfP|0|10-141-833-6715|2088.12|FURNITURE|dencies. blithely final packages integrate furiously. pending platelets use across the quickly bold de 6306|Customer#000006306|Bp0L3gbYTOCVi9N3Tq6CtAMW6jFuqWTmOEB|7|17-595-455-5504|3048.38|BUILDING|luffily across the special requests. quickly special packages among 6307|Customer#000006307|FJlRZXhd4LA,uWwwa78AoylhQkOOJrNZ|19|29-453-201-8768|5047.86|AUTOMOBILE|eat. quickly pending courts lose furiously against the slyly special platelets. carefully special 6308|Customer#000006308|FmSPcC6tPeT6M6|10|20-499-286-6565|5329.24|BUILDING|s. stealthy, silent packages print. deposits are slyly even, pending accounts. blithely even 6309|Customer#000006309|pHdznY21xwitGpZp|7|17-525-121-8608|4575.56|AUTOMOBILE|deas. quickly quiet instructions can are. express accounts use after the 6310|Customer#000006310|n2yStHsmbyEE6P|6|16-432-150-5510|9468.61|AUTOMOBILE|e final packages haggle blithely above the qui 6311|Customer#000006311|wSwN,5PelFjtbAIv,SzZXy05,GTmiXBsXSTfI|17|27-553-115-9891|-71.60|HOUSEHOLD|inal, ironic accounts above the excuses use evenly among the bold pinto beans. carefully silent foxes above the r 6312|Customer#000006312|QMhVQC0PiraO8oEHoMJ14b8Gxf9gK4h5ATkCn|4|14-870-343-5163|4562.08|AUTOMOBILE|ily silent foxes above the ironically unusual packages cajole quickl 6313|Customer#000006313|g66sNXSi5LC9tvZ|6|16-911-713-4691|2933.72|FURNITURE|beans. boldly final deposits wake blithely. slyly ironic packag 6314|Customer#000006314|lqbO7daGUg0T0QppRJTLXXan2PJ4YiZUtA|24|34-155-178-2373|2996.03|HOUSEHOLD|uickly regular packages alongside of the slow, special requests wake slyly against the furiously regular accounts. 6315|Customer#000006315|H3lTGfGTxl|6|16-965-207-3063|5307.12|BUILDING|packages. packages cajole notornis. closely express accounts according to the id 6316|Customer#000006316|jbMHTjobBPcepRl|14|24-427-805-3364|6714.53|HOUSEHOLD|l, bold requests. fluffily unusual accounts s 6317|Customer#000006317|1i8b72gMGW4MBizzmt2G2j9FiUhxKUV2xcJ|20|30-302-599-4639|154.99|MACHINERY| with the silent accounts? theodolites affix. ironic asymptotes across the idle, express requests h 6318|Customer#000006318|iYPP2u47,ZVs pK,|21|31-463-681-6877|6923.15|BUILDING|quick packages wake slyly unusual 6319|Customer#000006319|j1p9Rogz9sN1u9g0cyHBNsV5Uj0sT,mYh2Z5|14|24-464-821-9726|8487.59|BUILDING|ic deposits integrate slyly across the quickly ironic pinto beans. furiously re 6320|Customer#000006320|utTqGM30xwZPpmVURyoh7jWE4emjju6JHYuUAWFs|3|13-381-184-9600|937.20|MACHINERY|ly ironic requests detect blithely inside the sentiments 6321|Customer#000006321|QQ3MOdoHCo9I6SvghQ10xY|19|29-754-952-3500|8433.20|AUTOMOBILE|odolites. even, final requests through 6322|Customer#000006322|NK2pKqhhwp LJnExfiTmLeCZm6bhLkXWRIfxBPQj|9|19-998-647-9970|-95.54|FURNITURE|counts wake quickly pending packages. silent instructions above the even, r 6323|Customer#000006323|CSFI2KxIkCJ7O,KVsI9rtPZZi1cYypJbtN|23|33-716-180-9533|9240.58|MACHINERY|to cajole about the slyly ironic foxes. i 6324|Customer#000006324|o7FUm0oOy,5Cz|24|34-225-267-1395|6470.97|HOUSEHOLD|furiously idle accounts wake among the closely bold foxes. requests grow fluffily abo 6325|Customer#000006325|eI IZBMs2Neixixf|14|24-113-347-2651|9653.04|MACHINERY|he quickly regular sentiments. ironic, ironic theodolites doze. final deposits run blithely ironi 6326|Customer#000006326|Vvne n,VXT ykC6eBZ202wy4ev360a7jWQugfmC|9|19-653-669-5014|8701.02|MACHINERY|yly. furiously express foxes sublate slyly. pinto beans grow. slyly final deposits cajole quickly f 6327|Customer#000006327|zfiwjLhIm3ykcc0PExS1enEBsWkf|16|26-366-282-8221|2114.22|BUILDING|gle. accounts might nag silent deposits. pending packages use fl 6328|Customer#000006328|wibapP1Bq,wY|23|33-499-740-7766|1651.24|FURNITURE|eposits wake carefully regular, final accounts. pending pinto beans after the unusu 6329|Customer#000006329|GgzjDBrJgBnzfhNzdcbe7XSOs5a9CVHoO|22|32-532-430-2910|8415.80|AUTOMOBILE|ully regular accounts. carefully unusual grouches are. slyly even requests boost b 6330|Customer#000006330|6c1MemjbKFOa41b5CGI4rox|16|26-223-365-1109|4490.78|HOUSEHOLD|counts haggle alongside of the careful requests. fluffily regular deposits are. ideas grow: slow braids sleep 6331|Customer#000006331|7,qAyD7LhheRuOcwIJEzmPI|9|19-824-332-5078|3583.47|MACHINERY| between the pinto beans cajole quickly among the slyly special foxes. furio 6332|Customer#000006332|s7FTXH37X2fnlxS|6|16-913-396-4738|436.62|MACHINERY|yly ironic theodolites cajole fu 6333|Customer#000006333|IExKkHfdJck,eoVnei8NhqREtBsDfpFKG6otE|4|14-571-939-5220|-856.26|HOUSEHOLD|hinder carefully. deposits cajole 6334|Customer#000006334|RwSGFTlf,AMKTl2|24|34-336-748-8274|5795.61|HOUSEHOLD|to haggle. furiously final packages haggle. fluffily final pinto beans a 6335|Customer#000006335|4V2jDP,swx9N|18|28-547-289-9779|-197.98|AUTOMOBILE|xes across the ideas wake above the requests. carefully unusual theodolites according to the slyly fina 6336|Customer#000006336|IZ8GS3783y5 K6zMxFP,wa9cvcmzVIe4noGXa|14|24-979-385-1940|3438.49|MACHINERY| foxes sleep alongside of the ironic, express gifts. de 6337|Customer#000006337|PiZCe6IVnzD4lUtquLcVYM0eH|22|32-187-888-4292|5610.32|BUILDING|s. carefully final requests use bli 6338|Customer#000006338|PUuGLhCv0G0OYIeBs|21|31-679-556-1647|9653.55|FURNITURE|ully regular accounts haggle. furiously ironic sheaves affix. furio 6339|Customer#000006339|tk70NFG92XanAXo,NySdfkSiQcpyXHW|12|22-301-127-4981|6226.35|BUILDING| pending packages. slyly ironic accounts wake across the furiously even instructions. pinto beans 6340|Customer#000006340|oxxavhy2E4A No|2|12-958-657-2432|1279.15|HOUSEHOLD|inal dinos use. ironic, regular theodolites sleep quickly theodolites. even pinto beans wake accord 6341|Customer#000006341|JPBISK7sJEEmLqyjH8gwtLCqIgByZEh|16|26-657-318-9584|2573.55|AUTOMOBILE|ites. special ideas are across the reg 6342|Customer#000006342|70kmTAuDuG9pOnUUXeUp en555nKm3lyh|11|21-100-162-4466|-326.28|BUILDING|n, regular ideas haggle blithely. bold, regular packages will boost blithely. car 6343|Customer#000006343|RxSzYVBobmzOfG7NTC3JnFIvUoIcgSo|20|30-972-622-2287|3104.63|HOUSEHOLD|ngside of the close accounts are final, e 6344|Customer#000006344|ad,0JVklm5JfxVGyc LkIojMtdbWpZnXT2a1aKSX|7|17-693-904-1827|4576.81|AUTOMOBILE|uffily special dependencies boost bl 6345|Customer#000006345|es2go,e,Lr4TRItOoUAQKzu1OJIS,8cq50Yb|19|29-627-959-8977|8513.41|BUILDING|he carefully final excuses. final pinto beans sleep 6346|Customer#000006346|50RfbxMaJICGgfSOXGA|6|16-291-905-7678|5910.74|FURNITURE| requests. ideas nag fluffily alongside of the slyly pending dependencies. quickly 6347|Customer#000006347|70IEncoPD8K5Vin9BRkBxdndJkIraSM|23|33-977-252-8569|9804.85|MACHINERY|egular requests sleep furiously fluffily thin somas. special instructions after th 6348|Customer#000006348|1W8CqVR0os|22|32-400-947-4328|8251.10|AUTOMOBILE|sits after the unusual, even pinto beans are sl 6349|Customer#000006349|mb,ZNqwU0WkZFYDX6hw|8|18-762-580-1517|8319.51|MACHINERY|mong the ironic dependencies. fluffily special excuses solve blithely. regul 6350|Customer#000006350|9PHlaqUJG84BxDkb8fe|9|19-525-320-9404|3019.75|FURNITURE|efully according to the quickly final requests. regular packages wake furiously after the quickly furious f 6351|Customer#000006351|mXVsAYZWVd8rduL2Ndnd20a bFxcZ39umb|22|32-148-461-6773|7852.79|BUILDING|uickly regular accounts cajole slyly around the fluffily ironic courts. unusual theodolites a 6352|Customer#000006352|RoiheUBTOtjUEm L2kUWHswZBOmhW|23|33-416-643-4777|1902.93|MACHINERY|beans detect ironic, express requests. furiously final asymptotes boost furiously. express acc 6353|Customer#000006353|TiJAQNquw5b57kjKHrf6RLX|19|29-104-330-1710|3465.89|AUTOMOBILE|equests. asymptotes are along 6354|Customer#000006354|NUj4uSOE6ZlEKd5dhc|1|11-652-847-7151|4231.75|HOUSEHOLD|ons wake quickly above the busily ironic deposit 6355|Customer#000006355|0gR84cQxuKr0m6V3z6YnGms2Kqj5 FT FHE5YRwg|3|13-206-334-2025|6066.98|HOUSEHOLD|ven, final deposits are carefully carefully regular excuses. quickly unusual excuses 6356|Customer#000006356|6xjYndpkg 0HArUyB56Xqv7EyDD7JrEi|4|14-568-481-2395|6800.46|AUTOMOBILE|ronic ideas integrate furiously careful platelets. blithely regular packages sleep carefully. 6357|Customer#000006357|,HVhNgTVe,T|22|32-202-348-8130|3312.51|HOUSEHOLD|e carefully final deposits. carefully regular deposits run 6358|Customer#000006358|aynGZ5F8MJIzxoEJc kCqh7J9vj|16|26-600-100-6775|297.48|AUTOMOBILE|ages sleep slyly decoys. sometimes bold accounts nag quickly despite the 6359|Customer#000006359|fhA9rzJfqM686ozX RLctv|22|32-890-744-3388|3722.61|MACHINERY|oost carefully. quickly ironic deposits eat carefully. ironic requests boost furiou 6360|Customer#000006360|WYPts48L0tn7iuLS58Cw8JUY2GUY0enx|13|23-508-750-7646|6814.61|AUTOMOBILE|boost slyly blithely bold accounts. careful 6361|Customer#000006361|VzLr6guzIzrZpZfDZjndwix,|23|33-598-626-9499|-780.01|AUTOMOBILE|ggle furiously regular requests. regular, special requests use. fluffily express asymptotes are slyly alon 6362|Customer#000006362|0H7VGv7MTGlMxF8igvaoXhElF8S bqj|15|25-667-257-1612|236.71|HOUSEHOLD|riously regular theodolites-- quickly unusual packages are fluffily after the quickly silent packages: 6363|Customer#000006363|M,pjP 1RfkcWiPc0wMlUmiaV4cmlz57JMV1BI,|15|25-466-653-4756|5382.91|AUTOMOBILE|carefully ironic requests cajole carefully across the carefully regular asymptotes. quickly pending packages slee 6364|Customer#000006364|P7n15pH0vDHA|3|13-693-752-4345|4585.65|AUTOMOBILE|according to the carefully final accounts maintain carefully among the even packages. ironic, i 6365|Customer#000006365|q9FqvI49NhkzJH6lwSPbM,8sD|23|33-576-256-7432|2994.15|HOUSEHOLD| use blithely pending courts. even, ironic deposits about the quickly special dep 6366|Customer#000006366|EAiyyzQp,q7GUp0wQzThB4OasmaTEnE4z|12|22-771-559-9705|6622.87|FURNITURE|s are according to the fluffily ironic instructions. quickly ironic packages haggle slyly among the furiously 6367|Customer#000006367|hGP9UlKdD2BN3LePLnOJ|2|12-744-594-5061|-340.83|MACHINERY|y final foxes believe ironically. carefully pending theodolites haggle. carefully final pinto beans haggle care 6368|Customer#000006368|9wlLx9qDuskAA5Lg3CgbgK2,RYX3|7|17-527-847-1825|591.04|HOUSEHOLD|arefully regular ideas use furiously after the special, special waters: accounts alongside of t 6369|Customer#000006369|2GoiGrH9dEHSKHX3Y8fbA|3|13-817-557-7983|9563.67|HOUSEHOLD|e carefully final accounts. quickly final d 6370|Customer#000006370|mpRkYWqUoJQncfc7Q3VEEgI3eyoGyKFzPJ|3|13-586-788-3167|2734.72|HOUSEHOLD|wake furiously carefully regular theodolites. unusual foxes haggle slyly among the regular depos 6371|Customer#000006371|sqDry7KOh6ztkEJrGoX Y7NPIBW3|16|26-326-418-1698|2443.10|MACHINERY|onic packages dazzle furiously. ironic, final foxes wake slyly. re 6372|Customer#000006372|cN5aX8yXPYruA4rITmdh8e1QaOEyjEeub|7|17-585-758-4417|7197.10|MACHINERY|ithely special requests are alongside of the quickly final requests? furiously regular instructions lose furi 6373|Customer#000006373|Ap2nqlR9SntNoWkk5DFSON84r|22|32-608-822-4087|3700.15|MACHINERY|he packages. unusual asymptotes haggle alongside of the final, 6374|Customer#000006374|6U,tnx3EQ6ymmLLtrRYD8FwwaQEiwvEqA2pL|20|30-879-852-8442|3388.38|AUTOMOBILE|s at the ironic, final pinto beans nag quickly after the packages. slyly even instructions wake furiously 6375|Customer#000006375|,f00Mk7z1TQ4lHuZQA|17|27-709-238-4692|-510.18|HOUSEHOLD|nal excuses. quickly bold packages across the express requests sleep slyly about the slyly final foxes. ironic de 6376|Customer#000006376|9xX7j8zvAb3rY,y2N6rakYVhkcNShJZ|6|16-888-795-8432|3376.54|AUTOMOBILE|lent foxes affix against the even deposits. fin 6377|Customer#000006377|fb5rLUH6Hn|18|28-929-173-8781|9538.01|HOUSEHOLD|lly. instructions are. regular realms after the deposits integra 6378|Customer#000006378|jk4,Yt5J6YsUnLKs3Fj8PWF8y|21|31-227-241-6464|330.88|BUILDING|. express pinto beans doubt quickly. silently bold platelets haggle. furiously regular packages sleep alongside 6379|Customer#000006379|fJOnWmoLfEKa8FDt9T6foVT2njtBKSEnnsQlV0|1|11-989-205-6094|4057.27|MACHINERY|lyly regular deposits. foxes cajole blithely regular, ironic accounts. reg 6380|Customer#000006380|A3CZEoXr2U3M9TJq|14|24-601-213-9207|-453.65|AUTOMOBILE|its nag quickly. furiously unusual platelets x-ray. car 6381|Customer#000006381|qgLG,UnydWWKKYvXEe6g,|7|17-877-502-9214|7346.88|HOUSEHOLD|onic instructions wake furiously according to the carefully express instructions; pending theodolites 6382|Customer#000006382|iT3z1RZJfuFhTVQWn gUQQ30J59FLhUtNa1y|5|15-884-325-7498|2101.57|MACHINERY|ely pending requests haggle s 6383|Customer#000006383|pZMLOQVPjfqg3JvDp|8|18-910-470-6748|9130.34|MACHINERY|its. blithely express asymptote 6384|Customer#000006384|eRUjfmK9XOnuIxULs,H3g2jasWFLc|23|33-767-423-3297|1756.89|FURNITURE|sleep alongside of the slyly idle packages. slyly si 6385|Customer#000006385|feb0lDE33xT7COEBl4CL|15|25-446-991-1683|1948.61|AUTOMOBILE|eposits haggle blithely regular, pending requests. carefully even 6386|Customer#000006386| ,6di,DsCLRoei7glq03Dv03xA|24|34-991-678-6036|1252.97|HOUSEHOLD|sts sleep carefully blithely bold packages-- the 6387|Customer#000006387|YU5Ai0APMax|10|20-581-220-3948|2536.86|MACHINERY|ckages use slyly regular, regular deposits. special, regular deposits 6388|Customer#000006388|p8xZE4LxJw,BhxqdqhnokSPs7TIAZyieKyyzse|14|24-525-264-8052|9925.28|HOUSEHOLD| according to the regular requests cajole carefully above the even depend 6389|Customer#000006389|h1DyF2DxWF0|10|20-749-986-1120|8270.04|BUILDING|slyly express packages affix. carefully ironic requests use among the bold foxes? blithely regular hockey p 6390|Customer#000006390|bn2EoZThG0 s3CAOW0V|17|27-231-144-4566|7571.48|AUTOMOBILE| bold asymptotes use fluffily special packages! slyly final theodolites unwind slyly about th 6391|Customer#000006391|tJFRcLobeDbMdgzr1EfSw7PtH8Uqx97Pd|10|20-447-685-8271|3923.68|HOUSEHOLD|yly thin braids haggle quickly unusual, pending deposits. ca 6392|Customer#000006392| Nn14Ugte47JXBLpdJj4NAXbnsmyk2ykL8v0B|6|16-236-622-1653|9277.66|FURNITURE|s use. bold accounts affix about the ironic theodolites. ironic, ironic packages according to the special pack 6393|Customer#000006393|LrSfeTLkBD8iJmD4,mkCP0awj6UnAxhohA|10|20-600-941-3910|2762.42|BUILDING|ial sentiments cajole quickly. slyly unusual accounts boost blithely regular 6394|Customer#000006394|Qsb,p1hXnullljxxWg7svdXJSXRT58Tjxm2|2|12-653-895-4516|5972.35|HOUSEHOLD|rs doubt even requests. slyly express requests haggle ironic packages. pending accounts nag fluffily after 6395|Customer#000006395|g qNyxByuAIIKgSdK8ye9mw B1ujFXueIu9|17|27-165-881-6729|379.03|MACHINERY|are furiously. carefully silent accounts snooze quickly along the bold, final depos 6396|Customer#000006396|OkUGWXg2r42,|21|31-720-445-5419|-695.45|HOUSEHOLD| even theodolites nag furiously. busy instructions affix slyly after the careful accounts. doggedl 6397|Customer#000006397|OupmJTZAYWzow1H3zbv,v|3|13-204-711-1078|2415.67|AUTOMOBILE|ly against the furiously express notorn 6398|Customer#000006398|nE3eEiDxvFRyoT|5|15-144-311-5947|8155.12|AUTOMOBILE|ly special requests. bold, ironic accounts nag furiously sly 6399|Customer#000006399|dQbyCKR,g2,MXLYPiFE1NlLxzL|20|30-783-132-6452|1103.67|MACHINERY|tes. pending dolphins nag blith 6400|Customer#000006400|B7M0M7FZy06iyPxs VmGJaRJeYgSiL|13|23-177-650-5024|1362.29|AUTOMOBILE|ctions are above the permanently ironic packages. 6401|Customer#000006401|yveLKpTUIx7K6WtnhXAcz5|23|33-945-761-8571|2891.62|HOUSEHOLD|yly final foxes integrate across the carefully expres 6402|Customer#000006402|GLZyBHPsfH,y|9|19-260-224-4337|-594.88|HOUSEHOLD|uctions cajole slyly according to the f 6403|Customer#000006403|9eX1E4GyIXptoHrL5db b7wqVYaUB55u|24|34-376-305-4671|9717.24|FURNITURE|ly unusual accounts cajole slyly even platelets. carefully even package 6404|Customer#000006404|s,kUf5HrSnq,g8un,UAcz7HvRQ MV|19|29-917-546-1913|9678.44|FURNITURE| furiously bold deposits eat ironic ideas. slyly regular theodolites across the even i 6405|Customer#000006405|I0BmHPY,nxSz0vA8a|24|34-382-195-4386|6618.54|AUTOMOBILE|sly pending braids. slyly even platelets sleep fluffily final packages. express patterns integrate finally pen 6406|Customer#000006406|lL3dzJv7lVqtqz,DiuUHcvH1EcpM9X8jba|21|31-342-422-4630|3830.68|AUTOMOBILE|es would haggle after the final pinto beans. slyly ironic deposits sleep quickly. asymptotes h 6407|Customer#000006407|BtK4gy01cd|6|16-862-127-3910|8151.95|MACHINERY| ironic platelets. carefully bold excuses mold slyly above the pending pinto beans. asympt 6408|Customer#000006408|oa9bZUw3jwpkK,Gd|9|19-912-624-3972|2836.65|AUTOMOBILE|boost against the quickly final requests. daring 6409|Customer#000006409|T0HCEe4pnQrvw|17|27-890-763-2774|-451.44|BUILDING|its about the accounts nag even foxes! slyly i 6410|Customer#000006410|GGGN,NDpImMdzCHOjMg6D,35uOrTr3yJ71fW|9|19-197-885-8569|530.65|FURNITURE|side of the special pinto beans. dependencies are. regular, ironic foxes cajole 6411|Customer#000006411|z3ej9PD1gmKm1uaQySQhjFndp0kz062lnUVt|0|10-759-591-4295|6576.83|FURNITURE|y bold foxes. foxes haggle slyly about the silent, special ideas. slyly regular requests haggle across the pendi 6412|Customer#000006412|wJVfQt,87Ckyca4rS4yh8PPGbzQyVxCXqil9ex|24|34-744-473-7572|6753.76|AUTOMOBILE| furious accounts after the furiously regular dependenci 6413|Customer#000006413|okFK1CC8ibk3ml7X4ZhRl|10|20-949-158-4934|-563.78|AUTOMOBILE|ades. frays wake slyly. carefully final realms sleep. qui 6414|Customer#000006414|3mdeAURniRPufSi6dz2|24|34-799-456-8937|4444.30|AUTOMOBILE| final pinto beans. furiously bold pinto beans nag. blithely fina 6415|Customer#000006415|0SOe8iGHkEtlpwV7 e|10|20-971-763-1702|3396.66|HOUSEHOLD|ependencies. quickly ironic pack 6416|Customer#000006416|WGVJj9TyQ5ac3qL7RHW8jSOYQH6XZV|2|12-415-551-2448|6601.37|FURNITURE|lar theodolites. accounts use sometimes fluffily ironic packages 6417|Customer#000006417|06dHuGAxyFTfEygMp6ZU|23|33-360-140-2353|1256.36|MACHINERY|c sauternes. carefully final fox 6418|Customer#000006418| ydC4OuGkly|1|11-186-687-6620|4592.96|AUTOMOBILE|sts. unusual deposits haggle against the regular, 6419|Customer#000006419|JisGDcZRiDHg4yLNKSc2HPy32KfTIL,|12|22-243-778-7315|4838.38|FURNITURE|ound the fluffily furious accounts. courts haggle furiously about the quickly final packages. carefully eve 6420|Customer#000006420|ROdVxbHD0,GVCY,9NwYfDO|22|32-469-891-2051|4658.51|BUILDING|sts cajole according to the dependencies. final requests cajole carefully. final courts are fluffily express, slow 6421|Customer#000006421|fv4AI3OzYxdrM3I4cuDUoxMfOubOlO1m6Oxmv|3|13-450-314-2988|6650.59|FURNITURE| according to the slyly bold accounts are fu 6422|Customer#000006422|ZltvyXcMiO|2|12-501-888-2104|3415.78|HOUSEHOLD|nto beans. fluffily unusual deposits boost slyly. quickly 6423|Customer#000006423|7nudsRA5wmNNjs4,FThBletUyOIq|19|29-421-996-4033|9077.24|FURNITURE|inal, special accounts. furiously final pinto beans a 6424|Customer#000006424|ieZS lxnQBTqJO8BOfz7mcPbUsnlS|6|16-525-665-8732|258.29|MACHINERY|lly even accounts sleep slyly alongside of the blithely final asymptotes. bl 6425|Customer#000006425|y4kyZJUM4AOCGcaIjuj8as6XMgEb,tZyXaWTY2yS|9|19-638-440-4991|5006.99|AUTOMOBILE|fully even requests integrate quickly final pinto beans. carefully silent ideas cajole 6426|Customer#000006426|MBbafnBIIVTjNxVB8,yZcLEMlp5FbyMCFozSR|16|26-200-900-4409|529.42|MACHINERY| bold requests wake slyly. even instructions are. slyly express accounts nag blithely 6427|Customer#000006427|Xa17u3du45DiLnp8SEaIgKaS8l|5|15-852-433-6135|7907.55|HOUSEHOLD|s integrate even, regular foxes. blith 6428|Customer#000006428|jiZdycWOv3UcTXmeQYadg7xOV6LRX|20|30-758-610-5037|9217.56|HOUSEHOLD|kages boost slyly carefully pending packages. b 6429|Customer#000006429|723rhJstUlnqt siNCyOV67ZVvTEXcJ0PqJ1uE|14|24-314-971-8033|4648.01|AUTOMOBILE|thely pending theodolites haggle furiously express requests. fluffily even requests are furiously pending requests. 6430|Customer#000006430|xXFJR6GUfqeCyN5PiAvrw7HV2U|11|21-549-125-1175|6077.54|BUILDING|quickly among the special accounts: even, special packages according to the quickly sp 6431|Customer#000006431|alr2AvVdnwHuR6MYiNcZvdfHfN seFOpZMS|4|14-847-121-5795|8310.76|MACHINERY|inal orbits about the final deposits wake furiously blithe instruc 6432|Customer#000006432|4PGhIJsN8,hNjTsk6|22|32-173-374-5893|9988.42|HOUSEHOLD|encies. special requests after the regular, regular packages use 6433|Customer#000006433|a3pPw8Sauu6hhR4k5uL7wg1H95kiZ64Tk|19|29-909-421-8085|2412.87|FURNITURE|ickly final deposits use carefully. blithely 6434|Customer#000006434|XEo,6jjEzKR0aOUcozaEu8 gisw|2|12-500-275-2120|5131.87|BUILDING|e slyly bold pinto beans. final decoys use against the special ideas. furiously ironic courts against t 6435|Customer#000006435|Bg7iFjWjLVQKT0AEclRbHPBTDpvlrEXKAD9K2|21|31-382-296-1618|9295.79|AUTOMOBILE| bold deposits cajole furiously theodolit 6436|Customer#000006436|yfj8DtQgQrb52|13|23-860-371-1102|-651.11|AUTOMOBILE|ular dependencies cajole evenly final the 6437|Customer#000006437|pZ639dzBsf14zfwauL0jOSYTGQ870YRctiD|23|33-994-170-5321|-282.52|AUTOMOBILE| about the final accounts. slyly bold instructions are iro 6438|Customer#000006438|RyVaFQyhkoASdbPjYYBI|3|13-147-903-2434|1684.90|BUILDING|lithely unusual instructions nag according to the slyly special theodolites. express, r 6439|Customer#000006439|JoIRrAaO8XlKsPF35eCO8Vcy0o5L|13|23-346-902-8254|2048.44|MACHINERY|cuses. final, special ideas boost furiously. furiously close dependencies above the daring deposits wake according 6440|Customer#000006440|EhPeQVf4268eEE81yT7L3kHeBFC1nsvYjF|22|32-963-990-1859|7040.39|MACHINERY|aggle quickly excuses. regular packages nag ruthlessly regular, idle sentiments. bold account 6441|Customer#000006441|XG8rZ2j868y,WH1VUBZ|19|29-490-926-8692|9951.53|BUILDING|es affix carefully. slyly regul 6442|Customer#000006442|KHfAHw,EYDgYxunkuoCi5UZb 0nZL6i868CET|19|29-922-394-2085|9442.52|AUTOMOBILE|osits haggle quickly slowly final requests. even, fina 6443|Customer#000006443|pHv34YqZSMUDlEyxk Mlvf6Ub,|6|16-852-419-2587|1478.34|BUILDING|nic courts haggle slyly blithely regular theodolites-- special excuses use slyly. ironic dolphins are slyly f 6444|Customer#000006444|7wOibQuMiSsum0zAfieMfvToucnh|15|25-268-268-9763|6469.44|HOUSEHOLD|sits use among the fluffily even foxes. furiously regular dependenc 6445|Customer#000006445|J8HlYAyAUaCQpMoj4cTlS,TDmGK8gcKnrDxmUPG,|5|15-192-490-3051|5966.09|BUILDING|, regular theodolites according to the quickly even pa 6446|Customer#000006446|V0Dd,KY57jzAlKGMwRdstqlBUgAj12janY O8|2|12-139-856-4622|5131.95|FURNITURE|bove the regular, even instruc 6447|Customer#000006447|2iQk,R9iPSQKkb3e7oQi |23|33-450-821-7164|-380.57|MACHINERY|after the ironic requests thrash slyly carefully special 6448|Customer#000006448|13Wg2FUg2Ffki TBsY517Ztb4fytGvRe8peQtJ0|0|10-486-606-3968|3677.31|MACHINERY|ing requests. ironic, ironic deposits cajole above the carefully bold deposits. pending i 6449|Customer#000006449|pE4YcxWffV,7kwV8BX4XyOY4S7|1|11-503-687-8564|4467.60|MACHINERY|ptotes sleep against the slyly regular deposits. express requests wake ironically across the blithely r 6450|Customer#000006450|Gc,ojHknX6MUSgmXpv8EmEhKPkU|24|34-272-661-2143|9888.45|FURNITURE|riously slyly silent courts. ironic, final excuses sleep after the regula 6451|Customer#000006451|E,ekihaEI,uN4kGRUwQ csQjx3hVRvUDh,|12|22-457-204-2046|5307.46|AUTOMOBILE|ts integrate carefully carefully pending theodolites. quic 6452|Customer#000006452|Qrin8a98Frzmr0kMzVeX9h6|18|28-426-597-9924|1512.79|HOUSEHOLD|ironic theodolites. requests detect carefully silent courts. slyly 6453|Customer#000006453|pZ0jI IY sh07aKPdmQ7uv3ta8eU,jCXFbL1|22|32-372-439-2517|2372.25|BUILDING|into beans sleep at the blithely brave accounts. unusual deposits wake 6454|Customer#000006454|ypyPdN8tQvcYERecN,3Oufizu|1|11-276-495-7172|4437.76|FURNITURE|ly special packages cajole ruthlessly around t 6455|Customer#000006455|unZZGWq2HBJ4EKR3UlbpcORxMlO|2|12-239-754-9487|3482.98|MACHINERY|c pinto beans sleep blithely carefully express accounts 6456|Customer#000006456|XF8NeBd23gN|23|33-589-473-6210|5405.47|FURNITURE|dependencies. final accounts are furiously. bold instructions use quickly. express, br 6457|Customer#000006457|RWwZBhG0WF8ixJJ9csB|2|12-292-531-2808|9754.94|MACHINERY|olites cajole blithely. blithely bold foxes wake blithely. regular deposits sleep blit 6458|Customer#000006458|9hveScYbQY iKmqJuNs9PetcC|9|19-701-840-4110|5425.87|MACHINERY|pending asymptotes are alongside of the ironic, pending dependencies. blithe 6459|Customer#000006459|zd0TdwWz,IhDrPeawC3G9SHV2Tr0r|16|26-264-700-6190|633.59|AUTOMOBILE|carefully regular requests are furiously about the packages. furiously special accou 6460|Customer#000006460|Pr5bp2A90JjbrjE1XN5HM1|12|22-504-912-3129|9685.49|BUILDING|ending ideas. regular platelets use quickly final theodolites. foxes affix above the regular epi 6461|Customer#000006461|vgpOxXZRI,,Y92 1NO7N98jKwnOY7|5|15-362-557-8104|4166.62|AUTOMOBILE| blithe packages boost slyly. blit 6462|Customer#000006462|3njV5Ft7NhNxW50neyjlryC0Ylq0fI0n,P|18|28-848-982-8505|433.66|AUTOMOBILE|l requests play carefully across the instructions. blithely 6463|Customer#000006463|M9Oy,Xx9CbN GSl3L6khL0LTRgCi8MgX0gTzVbCQ|23|33-939-251-7239|4447.04|HOUSEHOLD|final foxes. accounts should nod against the unusual, even instructions. blithely regular theodolites hag 6464|Customer#000006464|eF9E6ScHCw9,z8nF0py9 ySlB0 iHTIEEZRWl6H|1|11-870-572-9943|5468.53|FURNITURE|telets could are quickly regular packages. fluffily iro 6465|Customer#000006465|hh2iSvQ3ixAtfId4QsSzJOMORcy4t|24|34-206-617-3619|2544.19|FURNITURE|express packages. ironic requests wake carefully 6466|Customer#000006466|nxPwv4px0 1LW05BtXaT7m,b2a63Dq8s25b09|20|30-314-197-3871|1131.19|MACHINERY|believe furiously. unusual, pending deposits wake. quickly pen 6467|Customer#000006467|VKsGXyxDaRzjYny fn 1zOOjPmkA9ZDp1C6lu|11|21-298-482-2423|1798.41|BUILDING| slyly special instructions boost carefully. final packages 6468|Customer#000006468|NTWZxBhUwmLUSwoXnrTwKdFs2|8|18-741-953-3789|6840.45|MACHINERY|y ironic deposits are along the furiously ironic requests. theodolites can are. accounts use after the f 6469|Customer#000006469|kDCpN,X8RAKe6EHXL6zsygq57|7|17-884-283-2518|4429.67|MACHINERY|osits hinder slowly along the regular accounts. even, unusual foxes wi 6470|Customer#000006470|BHgwAOTI0WReuiU9Ub1y|24|34-790-284-2739|-501.95|HOUSEHOLD|c platelets. carefully regular platelets sleep carefully. blithely unusual dependencies haggle. packages use furio 6471|Customer#000006471|z64U8Zsf 1ItZlk|1|11-284-691-1178|9608.94|AUTOMOBILE|bove the slyly even requests haggle even ideas. slyly r 6472|Customer#000006472|sLyBV0ZtMvAH1QSOnxg6koHGsKzzjm7RWuEMRWQV|20|30-220-128-1332|4515.01|HOUSEHOLD|onic pinto beans. furiously final platelets promise never. fluffily ironic instructions p 6473|Customer#000006473|Ee0nC9W1Y9j6e4V|9|19-156-916-4569|3728.77|AUTOMOBILE|al theodolites boost across the even dependencies. express theodolites sleep enticingly fluffily ironic i 6474|Customer#000006474|AqmYPBgr9r vxl8sMzecS5ak3JJ4ByqN5 |9|19-500-356-4558|478.16|BUILDING|e blithely pending dolphins. packages 6475|Customer#000006475|itSIjP3CZMWKp3MjgSuPFim0l6T|8|18-427-310-9935|4338.27|FURNITURE|pades. blithe attainments impress quickly ab 6476|Customer#000006476|NbYct5LvnQfrajINpQm34t3yxD32UQED5bk5dNPG|16|26-531-511-8701|3327.81|AUTOMOBILE|side of the deposits are slyly around the ideas. quickly final accounts wake furiously. bol 6477|Customer#000006477|qylnoy8Yoou7au1IjGNmAR,amKok|1|11-336-397-1897|4554.64|MACHINERY|ly permanently final warhorses. deposits ca 6478|Customer#000006478|E1ZXZROw4YBwc4HhN2M|6|16-483-594-1126|9874.64|MACHINERY|lar accounts are quickly fluffily pending packages. special pinto beans above the slyly bold excuses hag 6479|Customer#000006479|r,YZ8Bqo9gHbHFfnEITjeU4riKFay4mvovDTmc|11|21-143-392-9914|4160.96|FURNITURE| quickly express accounts. slyly furio 6480|Customer#000006480|jZCx1qKm8AJtV|14|24-732-299-9650|4276.33|BUILDING|ending realms cajole at the caref 6481|Customer#000006481|QM0JCcwqe 2OSh5LpTjoQVL0dIbCJ3unJO5V|9|19-708-287-2759|7278.40|BUILDING|pecial packages are busily according to the ironic escapades. slyly 6482|Customer#000006482|dOVykuZo,txNWSMQncRnWc4Goi9sslnW5zv|2|12-270-253-5013|6111.54|MACHINERY|ncies. slyly unusual platelets haggle slyly above the even, express deposi 6483|Customer#000006483|ZT8TwDuABR77|11|21-679-759-6880|7606.73|FURNITURE| are. carefully bold instructions by the foxes are along the slyly express theodolites. eve 6484|Customer#000006484|VbH7IC5F ZR1CDcyyjxX|3|13-467-401-7503|2438.45|MACHINERY|ress whithout the blithely regular deposit 6485|Customer#000006485|sCWkSZkKOpsnFhyNUIccdfvFvipe|1|11-541-386-1396|5926.60|HOUSEHOLD|ly express foxes integrate after 6486|Customer#000006486|huOvXcWg648ez69Yyebn8|11|21-909-289-7137|1380.84|MACHINERY|l theodolites. slyly regular ideas sleep carefully slyly idle asymptotes. fluffily unusual ideas 6487|Customer#000006487|1JfLyCcXKGeHiEV|12|22-967-924-9865|6789.80|HOUSEHOLD|uests across the packages could have to nod furiously silent dependencies. final or 6488|Customer#000006488|fEUg4BCUhNIcaNVKNdkadz2N2lIDWybFm,quvpnO|8|18-915-822-5617|144.97|MACHINERY|final deposits serve carefully about the slyly ironic or 6489|Customer#000006489|z6IrxkffxNyMYDRx4qR,PQnTDYSMXG3bsdBD8mx|8|18-713-226-2794|3494.92|HOUSEHOLD|s haggle around the carefully unusual deposits. carefully even theodolites are enticingly ironic dep 6490|Customer#000006490|VjpZnXeSD2oIK0U,CNLNfRxZF1NDFadS W|1|11-122-897-9952|8180.23|FURNITURE|n instructions lose carefully alongside of the fu 6491|Customer#000006491|J3,jSVfCkxq91fHjycRrqx1mQ,sGEFBT7iCtgu|6|16-977-273-4547|5239.92|AUTOMOBILE|al foxes sleep fluffily along the 6492|Customer#000006492|nPlHrfRAlpx|0|10-440-853-6387|-723.71|FURNITURE|ruthlessly regular packages. pending platelets near the slyly ironic packages haggle quickly against the blith 6493|Customer#000006493|ZT2dynxWMfOTM|19|29-295-246-8539|500.11|MACHINERY|ending decoys. furiously thin ideas cajole according to the f 6494|Customer#000006494|Vu5i1odOtPFZVNY53su76yFnal |12|22-121-359-7339|-795.34|HOUSEHOLD|ts across the final packages are slyly ideas. blithely unusual ac 6495|Customer#000006495|Hs,w9KO2RR At|18|28-465-937-3117|6679.86|AUTOMOBILE|deposits. theodolites hang blithely after the carefully ironic 6496|Customer#000006496|tdAcewkJ,9IL7tYdP6xFSlR4sxhzqYx|14|24-755-676-4374|517.43|BUILDING|cies wake slyly according to the blithely ironic accounts. slyly even packages across the slyly ironic a 6497|Customer#000006497|0m1WM8YSt2E1jhOAohr3Zs9NJuVlgORZFF85yp|21|31-417-915-1172|4355.91|MACHINERY|refully pending packages. even pinto beans haggle across the pinto beans. final instructions after the foxes 6498|Customer#000006498|M,,F70BZbI7wFy Eij7bct0rFLaJkqFN5DS8vS8|3|13-779-226-9489|2765.60|MACHINERY|side of the pinto beans. slyly express theodolites sleep regular, special platelets. slyly bold depos 6499|Customer#000006499|zXN1H4vSEbL VEvBO5lT8IICF|22|32-496-645-5422|4390.89|FURNITURE|tegrate alongside of the final, final theodolite 6500|Customer#000006500|SQLIuXRcQ3Q7G38eUgMXV4ybIKPQxKS0|21|31-954-475-8701|7764.33|HOUSEHOLD|ggle against the silent excuses. frays doubt quickly. carefully pending deposits sleep quickly final 6501|Customer#000006501|9v8gAkVB KcQoBRKbQ2rykps4fKqYX|24|34-433-352-7689|184.55|FURNITURE|pliers. platelets eat slyly against the even accounts. fluffily express requests are furiously unusua 6502|Customer#000006502|mbRY4j5rudW|11|21-562-896-1594|8710.50|BUILDING|eep among the fluffily ironic req 6503|Customer#000006503|4p3W4XHBSKbswkOpK3jehDgW|11|21-476-545-9987|7492.32|FURNITURE|st the quickly careful instructions wake quickly according to 6504|Customer#000006504|,XoJJFtPqo6Hk7GxI4agFf9r5|8|18-223-523-7723|4395.75|HOUSEHOLD|al requests. carefully unusual ideas en 6505|Customer#000006505|,35tS 0nsQ,|10|20-116-997-7935|1320.29|MACHINERY|al theodolites: carefully bold dependencies haggle pending accou 6506|Customer#000006506|Mol5UoqoOCHvOfBHz3s07NcF7HoU2rIWZGlkcI|7|17-831-994-7711|1146.56|HOUSEHOLD|wake blithely after the idle, even packages. final pains c 6507|Customer#000006507|ewNKMjvFSXSHZRYQnPbMCYqB|21|31-762-523-7126|7134.11|FURNITURE|ar foxes around the carefully even deposits haggle quickly quickly final i 6508|Customer#000006508|Swm,c vbMg36c1z5,4fdBedBNG1OrfhsTPco|18|28-986-386-9035|952.00|FURNITURE|ters are above the express, ironic dependencies. express deposits according to the furiously furious packages pri 6509|Customer#000006509|70iy4ZLmA6,OjYW7tTtxlnxkJf N5|24|34-658-604-7430|4234.26|FURNITURE|ly final theodolites affix fl 6510|Customer#000006510|mHOy9Hl0Qj|0|10-992-585-3630|4211.77|MACHINERY|express accounts according to the quickly specia 6511|Customer#000006511|Hqpb ssH,HNi|0|10-987-779-8094|238.09|FURNITURE|ost slyly carefully even packages. slyly final theo 6512|Customer#000006512| 33vAzHrrXIgd68KBr4mD|7|17-511-867-8977|6571.18|BUILDING| after the final requests. carefully ironic packages nag slyly. fluffily eve 6513|Customer#000006513|0yWTa31spHkfkccVy,mRh|4|14-344-180-9079|-125.66|MACHINERY|ites doubt fluffily. furiously regular deposits 6514|Customer#000006514|F8eUA3o2KlPxQoOP42h,cNYShHe|10|20-816-198-3141|8911.35|AUTOMOBILE| sly deposits alongside of the requests are carefully carefully unusual a 6515|Customer#000006515|knMzD7lwT9|16|26-565-114-8675|6121.09|AUTOMOBILE|t ideas. excuses across the furiously fluffy accounts use quickly about the express, 6516|Customer#000006516|zL3M2qRYhxxz|4|14-657-773-6508|3208.09|HOUSEHOLD|ole furiously among the slyly express requests. foxes sleep blithely blithely bold ide 6517|Customer#000006517|tjhz2CB9lXX0TcmPRX|17|27-923-897-4118|5407.70|FURNITURE|the furiously ironic packages haggle blithely pending req 6518|Customer#000006518|1gxNOhqNUnOgcA8pfdSuYB1ROX|13|23-117-625-2924|3187.34|HOUSEHOLD|inal requests above the furiously final asymptotes are bold, 6519|Customer#000006519|NomK60Y3wXy06J7YEMGp 3,XRpg0j9IxcDtVEAM|21|31-885-482-7206|8593.58|MACHINERY|pinto beans. furiously unusual platelets integrate. furiously special packages are. fluffily express fox 6520|Customer#000006520|GgmUKbGO4YqccuU|2|12-135-446-1603|326.88|FURNITURE|p slyly about the silent requests. qui 6521|Customer#000006521|ocqjyIQB3nrDvcPfNK6YdWLJRV,aNNfYjBl|3|13-856-838-6626|2762.22|AUTOMOBILE|ironic braids above the regular r 6522|Customer#000006522|9lIS6iU8xfx2UB77M,cRbhUYe7WaawYE|2|12-106-450-9122|2611.92|MACHINERY|ress notornis integrate blithely inside the slyly thin deposits; ironic, regular platelets hinder 6523|Customer#000006523|ppBY8l3kJDLcOjqD0mF5H|21|31-855-623-3767|6472.78|BUILDING|counts wake blithely. carefully pending requests haggle carefully. slyly ruthless asymptotes wake furiou 6524|Customer#000006524|3NMnX6Cbi83z3Cul C|14|24-198-901-2901|7024.93|BUILDING|rges. even packages sleep across the furiously 6525|Customer#000006525|9TeyC0eRLeHUiJYR9EA8fimqlu6biVgxdxVx|18|28-268-185-9919|2973.93|MACHINERY|s promise furiously across the final grouches. furiously final accounts according to the ironically final re 6526|Customer#000006526|j2e,TUUVZxaFSuiJauFDS2eo|5|15-609-391-3715|7755.32|BUILDING| courts cajole blithely above the fluffily bol 6527|Customer#000006527|R0sIPtavn8fgJTfsLCVTfSuuFHkTVFh0ZscC6Rub|13|23-215-568-8678|2722.00|FURNITURE|furiously: furiously unusual packages cajole. pinto beans are furiously across the unusual accou 6528|Customer#000006528|5F4Zd7t2UCw COOvRxsTqSFYbNuT3LNWqAbE|15|25-123-454-7331|8627.24|FURNITURE|l requests boost carefully across the furiously regular dependencies 6529|Customer#000006529|z MsqjmOtVW8ynDudk6XiUsAurx57YfL9|6|16-605-167-6202|5032.68|BUILDING| orbits are? unusual, ironic accounts are slyly. unusual requests sleep after the ironic, ironic ex 6530|Customer#000006530|MV,sirXvlYHdNS|17|27-993-759-6289|3190.15|FURNITURE|e requests; quickly regular excuses nag 6531|Customer#000006531|mS8cUs YGyXTFQ6,raiT IVNEprioVqOkf02J|2|12-613-384-6941|889.74|MACHINERY|ng warthogs print furiously i 6532|Customer#000006532|TvhgzSH6z6mBFPH4PVuV8WYBdwaL|1|11-390-945-2159|2857.43|AUTOMOBILE|xes. quickly bold accounts detect accordi 6533|Customer#000006533|nlQQZE8X8Gcs0DTc4FbikdL|0|10-292-966-8076|7733.20|AUTOMOBILE|ions. requests wake slyly alongside of the even, even depths. always regular accounts after the blithely exp 6534|Customer#000006534|VxsPQuMbLvQ|14|24-272-292-9916|4916.15|HOUSEHOLD|ly alongside of the furiously unusual packages. special, ironic orbits 6535|Customer#000006535|x nBq56OShrH,akf9CsjKBDGZXYjPPD0JClsM|22|32-183-174-5882|2786.57|HOUSEHOLD|e blithely express requests. packages breach along the packages 6536|Customer#000006536|LCjRJ4bGqSQQ|7|17-209-711-6334|4800.56|HOUSEHOLD|ate slyly slyly regular accounts. furiously speci 6537|Customer#000006537|crEEScryYpiugrKGzY2vjFvO22Bq,wCnTwIzpYN|10|20-987-261-6336|7057.74|BUILDING|ymptotes. fluffily final packages above the packages cajole carefully blithely regular excuses. packag 6538|Customer#000006538|zKfgcSybmZ8|19|29-312-894-5415|4435.01|FURNITURE|nal, regular packages. furiou 6539|Customer#000006539|7Il6KjzRIE xHg8B2wsoIP5y 5t|19|29-262-259-8994|1175.83|AUTOMOBILE| with the carefully even requests. even, bo 6540|Customer#000006540|I8kQ2XzSVZ3sXkl|16|26-663-746-1303|4042.13|HOUSEHOLD|instructions integrate carefully. final requests sleep. quickly special de 6541|Customer#000006541|3ua8FZTnQ5aC|18|28-614-720-7659|9117.32|HOUSEHOLD|al dependencies. carefully final instructions cajole account 6542|Customer#000006542|0r NUZcHVeN5ZImNTuc2Gjf3st|4|14-681-824-2073|2821.51|FURNITURE|r packages. carefully even accounts cajole carefully along the regular, ironi 6543|Customer#000006543|vv4vWlUAksCLLGzokFhUypa0d67QFAE2pxiH,|18|28-439-480-5859|9050.06|FURNITURE|express requests wake slyly even dependencies. close requests around the car 6544|Customer#000006544|blMRmjCIk9qZDmu|8|18-430-406-1141|5344.32|BUILDING|totes sleep fluffily. ruthless, special accounts alongside of the carefully 6545|Customer#000006545|EVlv0HyoPBatXo6VRaH0uSmnfd0YQZggZBaeHrB0|3|13-174-449-5530|5016.87|BUILDING| thin pinto beans boost furiously ironic instructi 6546|Customer#000006546|zYQtqlAP5jGhQOtMELGmOF QYmKwL,|22|32-365-893-3285|1530.52|AUTOMOBILE|e carefully against the pending requ 6547|Customer#000006547|3zLebAX1KgiP7|7|17-710-138-4406|3321.43|HOUSEHOLD|ide of the slyly unusual requests. blithely ironic packages haggle. quickly silent pinto bea 6548|Customer#000006548|32TjRFavvtwh M|1|11-397-769-2069|7325.51|HOUSEHOLD| have to are always pending deposits. slyly silent requests wake carefully regular accoun 6549|Customer#000006549|qAhHr41OY7bbo|7|17-522-198-9893|5850.07|FURNITURE|encies are blithely. special theodolites above the slyly regular realms cajole fluffily foxes. unusual requests sl 6550|Customer#000006550|3bUibIk2 e0g9upYw7f|21|31-679-244-3796|2107.94|BUILDING|ending, bold dependencies boost. regular, silent foxes haggle. deposits h 6551|Customer#000006551|CFd12CDJGcvLDzKaWj SOIjHwSGuIjnqclcJwqJh|11|21-528-245-9647|6066.94|MACHINERY|ans? furiously ironic foxes hinder blithely pending, pending deposits. blith 6552|Customer#000006552|Nf6QCAunWDenuiZJmxzANjkcjzjl|20|30-889-967-1134|6162.01|AUTOMOBILE|. furiously express foxes according to the slyly idle accounts mold ruthlessly slyly slow packages. special, p 6553|Customer#000006553| ocLpu754,ol|16|26-166-724-4677|8985.90|HOUSEHOLD|refully regular courts. requests sleep: special, final accounts sle 6554|Customer#000006554|A9l1P,V5p9431yso381EbxGEBIrjzbQqe4 Hn|8|18-845-570-6654|5037.71|AUTOMOBILE|e along the warhorses. fluffily fina 6555|Customer#000006555|UMegOBlfpGA0IaM|21|31-923-419-2629|-120.55|AUTOMOBILE|alongside of the furiously special 6556|Customer#000006556|p8DBE,GLPulEItM,G,YMkdQ|10|20-224-737-2850|6794.45|AUTOMOBILE|lites. blithely special accounts use furiously deposits. blithely regular accounts use evenly pending accounts. 6557|Customer#000006557|wUknF8m7MjQL,,6nUI1gB LWN|0|10-666-886-5603|1405.40|MACHINERY|even attainments are carefully pending asymptotes. special deposits could have 6558|Customer#000006558|Kzbfegyh P0YRTxW9aCqgoNrx3jYa8j6j|13|23-111-200-2537|1525.10|MACHINERY|ages after the slyly regular r 6559|Customer#000006559|j4vrCbCwWy4ZdMUF, bHX58wklRdI0|0|10-180-831-4219|314.77|FURNITURE|ts. carefully bold packages cajole permanently besides the quickly final foxes. bol 6560|Customer#000006560|nicoCxzD22IH|3|13-393-157-4401|8302.04|AUTOMOBILE|silent deposits sleep quickly. platelets sleep furiously furiously bold ideas. carefully unusua 6561|Customer#000006561|U B1notuUA|24|34-800-483-7728|6486.47|BUILDING|nstructions. regular packages haggle against the bold packages. deposits cajole. blithely unusual pinto beans 6562|Customer#000006562|6XINODN,YblT3W5FrWSo2voo7MeU5kv8hTni|14|24-485-841-2292|6057.40|BUILDING|s accounts. regular, ironic theodolites haggle 6563|Customer#000006563|ckpsWGe2Xt2QnI05rzcbreoFdTEK,OwotWDbccxm|11|21-535-565-6266|-250.80|BUILDING|bold deposits after the ironic, regular asymptotes are regular dolphins. quickly regular requests are bra 6564|Customer#000006564|pqkVXBhs6SV,fGXfelR1l29,yit1uOZ|13|23-518-531-7793|6761.96|MACHINERY|can cajole blithely even foxes. dogged instructions wake after the bold requests. furiously regular pin 6565|Customer#000006565|hqYF09xT hXv3RHhuSgCCfHnZr7Sz, 2Zuy5XfRN|22|32-398-565-3295|5309.19|AUTOMOBILE|furiously express deposits. furiously even asymptotes cajole quickly. final dependencies sleep slyly. 6566|Customer#000006566|i9Aw4WrBV7UKH3JXXjsdz5 W5UKD1t,pSOo0v7r|11|21-404-346-3746|8548.38|FURNITURE|ages haggle furiously regular p 6567|Customer#000006567|R8iEyHwfq7JefvXy7woKlcnqbVN 0TYcPZu|5|15-714-627-6738|-379.87|FURNITURE|l ideas nag furiously bold hockey players. furiously unusual requests brea 6568|Customer#000006568|7KgPG F0nEcyBKpQJqL|22|32-338-335-4860|4514.95|FURNITURE|ly. regular, silent dependencies affix about the idly special excuses. co 6569|Customer#000006569|67iACh32SK|1|11-178-911-3792|2712.91|MACHINERY|al dependencies wake blithely against the ironic pinto beans. final 6570|Customer#000006570|Xvm7kfDpAmTyhISPbrqibCUopcCq1qqNCOE1pzlO|0|10-475-471-1307|9873.25|AUTOMOBILE|wake bravely against the blithely express theodolites. furiously silent deposits a 6571|Customer#000006571|fj4IX5Zk4vvfUuEIC|14|24-892-309-1142|2767.45|FURNITURE| pending accounts sleep above the blithely regul 6572|Customer#000006572|ar ADMGk0y2|19|29-771-561-9164|2540.69|MACHINERY|telets use blithely accounts. pending platelets integrate fur 6573|Customer#000006573|N58H8Hoy0XD216MOSnWysRXUlIsVqAUR6GZ1LTlF|18|28-312-121-4734|8367.22|FURNITURE|lyly slyly pending foxes. blithely final requests cajole slyly after the even, pending deposits. slyly i 6574|Customer#000006574|nzW785SZCqoQLHUqxecq,xzU0EyIxa,bwZ|17|27-392-453-6805|1972.26|AUTOMOBILE|ts above the carefully ironic packages cajole alongside of the sile 6575|Customer#000006575|GARTfwst7rRbB5|22|32-486-660-6159|2769.78|MACHINERY|uriously ironic accounts haggle blithely. ironic pinto beans use carefully furiously final multipliers. fl 6576|Customer#000006576|cWpLaNr2DIuZanI3i|12|22-167-100-5796|8636.08|HOUSEHOLD|ual packages. ironic, special deposits thrash slyl 6577|Customer#000006577|uEQPgw rPzHldPtHfUqU1r4K5|20|30-983-783-3040|-395.07|HOUSEHOLD|egular packages. furiously ironic fo 6578|Customer#000006578| 5L06W67,Mw8G|2|12-946-562-5905|1973.65|AUTOMOBILE| silent accounts haggle blithely blithe ideas. carefully special request 6579|Customer#000006579|tpu9XN6JdsLCO6nnqauXv3|21|31-593-816-5830|4037.08|FURNITURE|about the blithely silent foxes need to sleep ironic, fin 6580|Customer#000006580|AaTSoiFwZUkdYNegqMCCs|6|16-285-854-7551|1193.35|HOUSEHOLD| sleep requests. slyly even asymptotes sleep; sentiments affix according to the waters! final, final fra 6581|Customer#000006581|m7AxxAwDpU173tVX8AryB4bRTv|0|10-473-250-2099|1624.68|FURNITURE|l, special requests after the quickly regular requests are fluffily pending id 6582|Customer#000006582|roC81vpXtYqj6w2ofenW|3|13-543-506-5912|1920.90|BUILDING|st the slyly regular foxes. even 6583|Customer#000006583|xm9DySSRKsU04Oru|15|25-719-747-8483|8469.57|HOUSEHOLD|thely dependencies: quickly ironic tithes wake fluffily special, special requests. regular, spec 6584|Customer#000006584|KVHnlcQNk3RAGL9llr|7|17-892-368-3465|6059.87|MACHINERY|lyly special theodolites cajole fluffily even requests. even pinto beans among the iro 6585|Customer#000006585|FSuVskGB021iRHHJpNyQiBYx2S2eS R1g9|24|34-421-647-5744|8304.87|HOUSEHOLD|ronic packages haggle blithely packages. unusual excuses sleep busily. slyly careful dep 6586|Customer#000006586|rmyIMRGGnIp84hB9APjbpN3l2J4 lDcogPRb|13|23-442-685-1204|9955.66|HOUSEHOLD|oss the slyly regular requests. furiously even requests believe. quickly ex 6587|Customer#000006587|U2vx2k5HvCCh MNEYbIRD3BwkR|17|27-495-708-8832|1888.45|MACHINERY|the ironic, unusual accounts 6588|Customer#000006588|q4ECgmz0iqJlZeKE0U|1|11-899-895-2340|2153.86|FURNITURE|ans. even instructions wake at the slyly even packag 6589|Customer#000006589|OPaay XGwsQ5FPbgRupMO5|2|12-462-422-9223|7820.61|HOUSEHOLD|e ironic pinto beans should boost ironically furiously ironic asymptotes. blithely regular accounts despite the furi 6590|Customer#000006590|aMW6NjpCUVPZxoLFEQ3V75cZ5eVfmeGc|14|24-797-771-6036|106.60|BUILDING|y final requests are. foxes mold carefully regular depos 6591|Customer#000006591|5f8amVgHTYIC9LNg,oJ2358|10|20-937-427-5966|7511.62|FURNITURE|lly ironic deposits integrate regular ideas. packages are quickly. fu 6592|Customer#000006592|ICcURs4cxBOwwDlSm1N0Q3o2gBIhX|21|31-112-360-4445|9860.40|FURNITURE|cuses. furiously bold requests wake above the furiously unusual dinos. ironi 6593|Customer#000006593|bXigSMqStoMDk4bZGp7sSpBFr,KfbgZzZkU6x|19|29-550-277-9067|6893.52|MACHINERY|ake among the fluffily ironic foxes. dependencies sleep orbits 6594|Customer#000006594|2cDc15tGdriYteAK75|18|28-438-658-3673|5410.27|HOUSEHOLD|fully ruthless accounts sleep across the slow instructio 6595|Customer#000006595|uVa27rCZ,a|19|29-292-466-2278|2348.08|FURNITURE|s among the unusual forges integr 6596|Customer#000006596|v05Csj41kqY8c Z|3|13-153-530-7399|5083.03|AUTOMOBILE|ronic decoys. slyly special packages above the slyly regular deposits cajole past the regular, final platelets. 6597|Customer#000006597|xan0fBW83D27pugrU|2|12-820-261-1596|2468.03|BUILDING|y unusual packages. blithely express deposits run slyly. si 6598|Customer#000006598|gbSYPPXD xhYTY|13|23-340-258-3248|8171.34|HOUSEHOLD| blithely final braids; furiously express ideas cajole c 6599|Customer#000006599|zZJjOj,Fl38qicLtaaRFZmXBPrsOPu6K|0|10-297-776-2902|9848.52|HOUSEHOLD|sly ironic ideas sleep fluffily regular instructions. special, pending pinto b 6600|Customer#000006600|m3pLs7ZW2DQGLirHs2KrrsVG|3|13-922-914-9708|5090.20|HOUSEHOLD|ut the furiously ironic realms hang furiously final packages. slyly unusual instructions nod furiously furiously i 6601|Customer#000006601|8CksnofGDe3,eOr yd G1NJvNV8g|3|13-158-904-5841|1921.72|HOUSEHOLD|y against the slyly bold deposits. carefully regular sauternes above the furiously silent deposits 6602|Customer#000006602|xYS0xzAVCHivnaBFSkuHzezVfozYTop|3|13-468-842-3174|252.68|BUILDING|structions. instructions sleep blith 6603|Customer#000006603|G0VkNa06eg5whaAH6XUH|17|27-100-185-6322|9705.48|MACHINERY|ronic, idle foxes. bold, pending warhorses are: dependencies use boldly ironic requests. quickl 6604|Customer#000006604|kmwPS7a1rYHG3d2KI12OKOegpaHNwQitCvRbb|5|15-116-310-7342|-850.55|HOUSEHOLD| use above the final asymptotes. ironic, ironic instructio 6605|Customer#000006605|bJpsYu5HBIgwd3bPpcfTMme|5|15-797-674-4556|2748.15|BUILDING|o beans are after the packages. even pinto beans boost according to the final excuses. fluffil 6606|Customer#000006606|veGv5 O g1eNuMCQ8lbO0X0|18|28-322-896-9125|8130.69|FURNITURE|y special asymptotes. even, even requests above the enticing 6607|Customer#000006607|pm9R99glhFCWZzIEllz428 TqMQQmX|17|27-924-798-8911|8585.44|BUILDING|regular deposits. foxes are blithely carefull 6608|Customer#000006608|WZYAo7ClTq8j|1|11-275-467-1847|6611.72|FURNITURE|s pinto beans. furiously regular platelets according to the furiously regular hockey players slee 6609|Customer#000006609|9XLzC3FZ8xeSgZX2PiZ2JTpZP7uW,KsvvqY|15|25-264-475-4079|-773.01|HOUSEHOLD|theodolites nag quickly idly ironic accounts. ironically even foxes are. blithely ironic platelets c 6610|Customer#000006610| QFFI2olnw1MmJfAqwsm0iw4oNDjU6kITjOa53h|8|18-120-297-4174|7884.20|HOUSEHOLD|ently even packages sleep furiously. carefully even theodolites haggle quickly exc 6611|Customer#000006611|aQKv,MgJ9FK3NjvADMZ3rhl|3|13-452-843-5081|1801.07|HOUSEHOLD|carefully thin, express requests. carefully re 6612|Customer#000006612|gnqac7Ybh2kSoqKa3ASVAHvypm|17|27-106-926-4405|7620.71|AUTOMOBILE|ld asymptotes. carefully final requests haggle fluffily regular, regular platelets 6613|Customer#000006613|zPS5aK 66ca8nD|4|14-453-488-2934|6296.69|AUTOMOBILE|unts. ironic, even instructions cajole blithely a 6614|Customer#000006614|ICoJpB8v3 QU8AfZ|11|21-180-351-9946|6559.73|MACHINERY|eep blithely slyly pending platelets. ironic, express ideas cajole according to the regular ideas. slyly r 6615|Customer#000006615|GkKngc3qcaXlUc0oUGBVh,Ah|22|32-821-576-6664|5407.17|HOUSEHOLD|kindle. always regular accounts thrash furiously ab 6616|Customer#000006616|8ssRl7vqBcxVrQWh xQdt5U1zX34R5ga Txw,|11|21-804-310-8614|5341.58|HOUSEHOLD| silent packages ought to use 6617|Customer#000006617|gkz18C,mxfkSot1U zHcc8E|18|28-955-200-3871|3008.53|MACHINERY|ainst the blithely regular deposits boost carefully carefully unusual ideas. quickly u 6618|Customer#000006618|jUw3FhXzO0qfzz zXxWOhKwfmI3r|11|21-610-354-6624|6411.46|HOUSEHOLD|nal requests affix quickly pending packages. close pint 6619|Customer#000006619|clyImCIDigUVv1edDwAG34tr7MWfI|20|30-409-954-6902|-549.38|AUTOMOBILE|hely final instructions nag; carefully bold accounts maintain after the even requests. carefully unusual saut 6620|Customer#000006620|,paCQ5qIt,Ylr,iREwAefW8ys5k|13|23-766-421-3496|4441.83|FURNITURE| ironic accounts doze carefully according to the blithely regular deposits. slyly silent reques 6621|Customer#000006621|EuR3TpdBnWKm6OMxOW9yAuiT|8|18-801-593-9685|5852.43|MACHINERY|ymptotes sleep quickly regular platelets. furiously ironic fox 6622|Customer#000006622|fidCV6mKheF|16|26-334-959-1721|8847.45|MACHINERY|wake regularly final instructions. deposits are slyly. furiously pending pinto beans boost. care 6623|Customer#000006623|Xjw9Hy5h4Pgb3zcUEWm8|24|34-304-460-3284|-872.31|BUILDING|g the slyly final foxes. even, pending requests boost about the requests. r 6624|Customer#000006624|5VX6OjpQ Bro52jdZej8FlK0OFFtf7R3ESIJPK|1|11-660-742-2374|-7.37|BUILDING|inder blithely? regular platelets about the packages believe instructions. regular fox 6625|Customer#000006625|R3hdYck8WB F4No|0|10-367-922-8280|6076.36|HOUSEHOLD|ly regular requests cajole; blithely pending instructions haggle al 6626|Customer#000006626|ZdfCZEerdNphvEz5|8|18-208-761-1975|-595.84|HOUSEHOLD|have to doze permanent pinto beans. special requests sleep across the furio 6627|Customer#000006627| lMYWGolCqdPZwftuYfusc7pMOGgLUZdrI|6|16-598-408-8591|4678.15|FURNITURE|ckages. ironic pinto beans use carefully-- carefully eve 6628|Customer#000006628|xP MMDAoXhUc,s8N|9|19-364-386-8197|6607.71|HOUSEHOLD|s are. furiously express platelets boost. regular, regular accounts cajole. depo 6629|Customer#000006629|aLIf6koYS8OSSnvRGqhRFqIRnX|6|16-310-265-2013|8761.81|AUTOMOBILE|onic deposits hang slyly enticing packages: slyly regular courts was blithel 6630|Customer#000006630|mrADj6gHX6kvN8H9hV2TTZD|10|20-993-605-4157|844.42|FURNITURE|across the blithely special excuses. express accounts are about the packages. ironic re 6631|Customer#000006631|7iOXTPto,B5|12|22-688-971-9796|89.69|BUILDING|the ironic deposits serve slyly slow, regu 6632|Customer#000006632|WFDE,gGvSeFejvfMg|2|12-979-552-7277|9324.80|MACHINERY|onic theodolites haggle carefully carefully pending excuses! packages boost against the furiously ironic p 6633|Customer#000006633|MXIkYoyLJbpxhc|4|14-157-941-2531|2911.60|FURNITURE|deposits cajole even instructions! slyly final requests integrate enticingly furiously special 6634|Customer#000006634|v40njkdojYnRdVGwAW|5|15-916-327-8851|5261.30|BUILDING|lar instructions. accounts haggle permanently according to th 6635|Customer#000006635|dPKoh,uNPUKABQlR,WDK3pTzCp|24|34-785-680-5961|6618.78|AUTOMOBILE|packages sleep. slyly silent deposits x-ray furiously deposits: bold, e 6636|Customer#000006636|0yf ,8IEF6Ym5JFeZb1HfyYJG CDxU|24|34-625-369-8340|7912.37|BUILDING|ously ironic ideas integrate furiously ironic instructions. careful 6637|Customer#000006637|5RhMx2tf5k8u|13|23-455-236-7135|2317.68|BUILDING|e blithely. furiously even dependencies integrate slyly. car 6638|Customer#000006638|Mm2JAYCCHstTE|16|26-315-337-6748|191.55|FURNITURE|s packages sleep quickly. final ideas boost carefully 6639|Customer#000006639|auyhpm1qOcoflRfR4S35,7nPTFyM ZG04eGAMb,U|13|23-364-243-5030|5269.15|FURNITURE|urts cajole bold, even requests. furiously ironic exc 6640|Customer#000006640|LraQsOeV6d|7|17-233-212-8020|6693.51|AUTOMOBILE|ate after the special packages. carefully regular excuses are quickly quickly even decoys. ironic foxes detect bo 6641|Customer#000006641|gw3LD4q9DzXioZ37chuNxB2|8|18-212-860-9043|9799.19|BUILDING|e furiously according to the theodolites. carefully bold ideas wake fina 6642|Customer#000006642|LoE,WKpwd79h4TGVAJDgTYwzSwDmBfd|2|12-278-337-5916|1026.55|HOUSEHOLD|s. slyly bold excuses play above the furiously ironic requests. furiousl 6643|Customer#000006643|Iuv NTvS6dJ2io6Q76FiZeJI|23|33-173-903-7175|3109.73|HOUSEHOLD|are carefully blithe accounts. carefully unusual pi 6644|Customer#000006644|mf2MRu7b37tyk|21|31-369-661-7739|2257.43|AUTOMOBILE|bout the unusual requests. special, unusual epitaphs haggle furiously final deposits: even requests agains 6645|Customer#000006645|9wuMUJea1LYIsA0MDzoFxKARmIJvMpI|22|32-339-429-1573|-347.93|BUILDING|according to the permanently special packages. pen 6646|Customer#000006646|GioIRa7f673rsFRHt2e|9|19-556-383-9646|2206.85|MACHINERY| furiously special packages haggle busily. s 6647|Customer#000006647|gLBSZ8P6yXMpyGwCirn9HfN,|6|16-349-915-4350|7515.71|AUTOMOBILE|ncies are against the furiously express instructions. deposits above the bold theodolit 6648|Customer#000006648|0HJ,ghgxYfPYPC eCh6nDUgWLalajrwBRY4p|0|10-142-681-9531|1611.85|AUTOMOBILE|y unusual packages. express ideas sleep furiously along the final frets. furiousl 6649|Customer#000006649|q7et5s1SAXMlokVdGW3ZYueHV|20|30-484-404-2575|6300.33|BUILDING|nts. packages integrate blithely around t 6650|Customer#000006650|NxtKIJDfOaBn|16|26-568-235-1339|1611.97|FURNITURE|pite the ironic packages. silent requests across the regular theodolites solve care 6651|Customer#000006651|cAtLrZCfDjtH5vIpaqg,qBs6J|9|19-396-649-7221|8251.81|AUTOMOBILE|al, final pearls wake furiously regular grouches. packages wake bl 6652|Customer#000006652|4mz9d8cQw7Hh,KZYOWhHrH5NpLkhQL|14|24-741-346-4893|9296.36|FURNITURE| above the regular, unusual dependencies. final pinto beans dazzle fluffily. pending 6653|Customer#000006653|4Q4ARPm8n2f|10|20-532-650-2293|8927.69|MACHINERY|riously final packages integrate. ironic, thin accounts cajole among the carefully pending 6654|Customer#000006654|AHYF0lz1LiG,wC1WMH9L9pCe3PdUaO4Q|7|17-933-462-3572|5789.45|MACHINERY|s. blithely regular accounts engage slowly carefully final se 6655|Customer#000006655|FgGQl7KxO2rmmwE0rndJ|24|34-277-845-9539|6372.81|AUTOMOBILE|ermanently regular packages do are carefully express requests. pending accounts sleep. pac 6656|Customer#000006656|jgLvAdS6UQcyaUCSb|17|27-416-436-5518|9824.64|FURNITURE|eas serve furiously pending theodolites. f 6657|Customer#000006657|fDo1gqlFFrkkqjwSb9 9RD7DNbuPI59zt|3|13-538-227-3972|-146.22|BUILDING|ly. blithely stealthy asymptotes alongside of the fu 6658|Customer#000006658|qOYY,NL6MESHOBu1r8jx|21|31-302-918-2979|4384.07|MACHINERY|riously blithe accounts cajole slyly ideas 6659|Customer#000006659|WKPkuTEjDJc|10|20-393-281-4388|3053.98|BUILDING|around the blithely express dependencies. careful 6660|Customer#000006660|Mhf8lV21 BapxeXn9lz9k7b6tBd|23|33-164-539-3410|188.29|BUILDING|s. quickly close requests are quickly. asymptotes are against the carefully slow requests. pending foxes caj 6661|Customer#000006661|bB6EP7Tf3mVskZJ7tLHKQSGDifygpVm2|16|26-168-633-7265|5529.99|AUTOMOBILE|t requests nag. blithely regular courts wake quickly. thinly regular deposits according 6662|Customer#000006662|oZ6a dkzIqrldlL|10|20-378-737-1289|1621.83|AUTOMOBILE|tes use pending, ironic dependencies. even, ironic dependencies boost according to the deposits. regular 6663|Customer#000006663|tFHYxL YrhKpX|13|23-250-528-2581|9077.11|AUTOMOBILE|ckages after the pending, regular dolphins use quickly 6664|Customer#000006664|580X3yq552pkRg5sQwDbltI6XxFJb6c6rb|24|34-426-291-1081|9108.44|BUILDING|unusual dependencies sleep quickly accounts. ironic ideas sleep along the carefully bold accounts: furiousl 6665|Customer#000006665|,Y,EsUyItpokyz9XVi9jf3L7JuOwrQjNE3c1Mlij|0|10-163-117-8909|7628.89|FURNITURE|ickly regular theodolites use blithely among t 6666|Customer#000006666|9zJqKbGbRjPS|18|28-557-833-2670|792.26|AUTOMOBILE|lithely regular requests after t 6667|Customer#000006667|SXoMfAHkfO4b44Yr1Qz|19|29-738-728-6617|3385.51|MACHINERY|ual accounts. final excuses cajole car 6668|Customer#000006668|t,qsLMTcPSMSc I3,7LYW W0EwfqtOu,pmLc,|5|15-364-427-1235|6319.59|FURNITURE|unts boost. final deposits serve furiously according to the carefully regular accounts. b 6669|Customer#000006669|lT6gKsLMZJaBBXCtnGWroOoUkdbqwFyjMU Q|9|19-723-887-5480|6311.20|AUTOMOBILE|carefully even instructions haggle furiously. c 6670|Customer#000006670|naiDNB2uULTBT,321CFY9HYG0jFelpYg|16|26-694-136-5425|8297.83|HOUSEHOLD|gular pinto beans solve quickly carefully bold accounts. express pinto beans promise slyly acr 6671|Customer#000006671|WaqMrlZBfcDiT3n5KvOWt14jgw1m5ZARzqI85fY|18|28-145-184-9679|1603.96|MACHINERY|ans run fluffily blithely unusual dependencies. bold, ironic packages above the quickly pending foxes believe caref 6672|Customer#000006672|TAk1jzQy60fSpBRSLShvpTZae1797Bdve|10|20-897-841-4188|5700.11|AUTOMOBILE|unusual sheaves haggle quickly according to the carefully final som 6673|Customer#000006673|heXKQ2V3L0uxVhdWxCvr42|23|33-166-738-4873|751.90|MACHINERY|packages sleep fluffily final grouches? fur 6674|Customer#000006674|WfEiHyBjekCdDYT2Gbb0infBTvADc27M9R3 BWZ|9|19-629-336-3947|7179.20|BUILDING|y even deposits. accounts cajole furiously about th 6675|Customer#000006675|HAnz4SbD7 1s,9fqBkN8A4L,m8UZ|5|15-317-619-9609|-226.28|MACHINERY|ess accounts. even packages above the quickly regular requests mold a 6676|Customer#000006676|VtWsbzZD4qe8Z T02uSPbSuKiz|10|20-569-115-2865|816.03|FURNITURE|he ironic, bold courts are ac 6677|Customer#000006677|Wh55,5rAIfHnBZbN|0|10-283-641-1486|7035.29|BUILDING|ns cajole furiously beneath the carefully ironic waters. regular requests are according to the furi 6678|Customer#000006678|QDpY,eRLnl5,HsjFuCEmufwZadvV4|3|13-605-424-6382|1201.45|MACHINERY|warthogs. blithely ironic asymptotes 6679|Customer#000006679|VqLS2XSCc0GFgWN5 Ol|19|29-169-326-9045|5642.93|MACHINERY|, even deposits use fluffily regular pi 6680|Customer#000006680|8TEDCB7fdAUhgYRUU7ZfV1Ld3mB00gDQhtNB,oS|17|27-962-923-6320|7673.78|BUILDING|s cajole. fluffily express ideas wake furiously about the ironic 6681|Customer#000006681|th39LRNpVorKObh7TB|6|16-926-344-3556|7321.37|AUTOMOBILE|egrate furiously. slyly ironi 6682|Customer#000006682|47B7Wsx8E7991Y2|19|29-904-610-3132|2230.59|AUTOMOBILE|bout the boldly final requests. slyly special ideas h 6683|Customer#000006683|snrDJsmCFVQ4O3dveQpw5JIDvmtsZXdRtzNmP4O,|10|20-812-694-7415|-549.56|HOUSEHOLD|gular accounts. fluffily express 6684|Customer#000006684|qdqKwrzOb2|5|15-109-442-3321|1412.15|HOUSEHOLD|ily express excuses wake blithely. carefully bold packages sleep above the even, regular pinto beans. 6685|Customer#000006685|BkWlHQusdN1kzNDHvqnOV1SJ9|17|27-406-584-3296|4996.19|BUILDING|ly regular foxes boost blithely bold foxes. e 6686|Customer#000006686|DvvWufVBbpdB3nSVazM7|4|14-799-782-6111|5959.97|HOUSEHOLD|y unusual deposits play final requests. ironi 6687|Customer#000006687|Q0K48 G0mj|17|27-869-237-1381|1157.82|FURNITURE|ress asymptotes. quickly regular deposits haggle. final, close packages snooze fluffily according 6688|Customer#000006688|h61o1Amg8a2wnZYMAqd5gJM16PPB|15|25-369-725-3205|9401.81|AUTOMOBILE|fully regular platelets. ironic ideas hagg 6689|Customer#000006689|x5j,b7K9Irw9KnJmeCur|23|33-787-189-8082|-652.17|MACHINERY|blithely regular packages across the packages poach across the bold platelets. even accounts nag furiously against t 6690|Customer#000006690|B1nB5mFxDsSJQvm5lxn3nkyMl|16|26-671-890-1848|3868.69|FURNITURE|eas try to are fluffily. pending, t 6691|Customer#000006691|NvkXKfvzF9oErd6MzanMySOlVPNUCQ7tcV|5|15-503-903-6718|8999.87|FURNITURE|luffily. unusual dinos doubt slyly. accounts sleep fluffily after 6692|Customer#000006692|HkPGTwsyUV|9|19-145-218-4626|8817.45|AUTOMOBILE|boost permanently final, regular deposits. blithe, ironic requests sleep furiously express dep 6693|Customer#000006693|Bd7U3RV0lbRer WQY|24|34-882-944-4437|925.49|HOUSEHOLD|silent deposits. special foxes nag sometimes permanent, bold deposits. carefully special pinto beans sleep fluffily 6694|Customer#000006694|XyOYqBJqGspMZSdqQq5DvaX9CD|4|14-959-393-1619|9588.19|HOUSEHOLD| against the slyly ironic deposits cajole blithely even pinto beans. final 6695|Customer#000006695|XGxqrdbm4jLBmrqTRmd5dLwIR|18|28-628-769-5605|7779.06|BUILDING|y even dolphins. requests wake. fluffily express asymptotes sleep. slyly final acc 6696|Customer#000006696|xhoBDOYsLR89|21|31-975-638-1125|5667.73|MACHINERY|es. slyly regular accounts integrate! fluffily ruthless accounts wake furiously! quiet accounts 6697|Customer#000006697|0NTbi10hHKSxo|24|34-307-871-2967|6632.75|HOUSEHOLD|re carefully quickly final asymptotes. blithely pending packages cajole quickly along th 6698|Customer#000006698|0QyCW1acGdoAo59FdWV 3pZ|19|29-862-853-4688|620.84|MACHINERY|ges wake blithely. quickly pending braids sleep furiously: carefully regular pinto beans mold after th 6699|Customer#000006699|jGDulmO0cw9lFBN1jlR64OLiBqCJc|2|12-708-798-7379|4317.54|BUILDING|unts-- carefully regular instructions maintain s 6700|Customer#000006700|KUpYErT7tXcOM04gpSlDW4566SCbvBT dA6l|8|18-304-498-5307|6932.01|FURNITURE|structions. blithely final ideas affix. blithely unusual requests haggle slyly fluffily even instructions 6701|Customer#000006701|CVKLM V7ST0Nx,jr0e0gHcduEj5|9|19-659-528-9243|3257.42|AUTOMOBILE|lly final foxes. ironic requests boost carefully du 6702|Customer#000006702|Wi DrmUzjVKPCSBNG6Wok7io9QxVX7kN7Jd|7|17-216-230-3435|9392.14|AUTOMOBILE|t slyly according to the furiously fluffy theodolites. carefully regular requests are carefully int 6703|Customer#000006703|03PTXkGYGGxCThBmmgGy|21|31-804-154-5053|1836.91|BUILDING|. unusual asymptotes cajole carefully against the ironic packages. 6704|Customer#000006704|B1B1Ms3HDpnvU8cDBoMP3T4PkW,mHSWd|19|29-206-376-9599|4862.96|MACHINERY|rmanently even ideas haggle carefully. carefully unusual courts according to the c 6705|Customer#000006705|aDX0WTflMQRyU2JMFFlW|3|13-434-897-7312|-159.03|BUILDING| foxes. furiously silent pinto beans integrate slyly across the grouches. slyly special pinto beans in 6706|Customer#000006706|TBcc48ZL2vZv7afiGoSEhjbM|3|13-134-428-9567|9100.37|AUTOMOBILE| special, even requests are fluffily-- regular accounts a 6707|Customer#000006707|o6Nm3V8 Zj2tt4j3sMiTJ4|6|16-466-364-3185|3977.38|FURNITURE|uriously against the regular, ironic requests. regular packages wake never. quietly pending instructions believe. 6708|Customer#000006708|mUhDV6NCOCJghkAjEX0Jo|12|22-281-780-5651|9900.11|BUILDING|ly. ironic accounts wake carefully at the final pinto beans. slyly regular requests slee 6709|Customer#000006709|BXNfIHT5cfG6DtLlBKePFXUQbBeB4|8|18-207-447-8319|3199.11|BUILDING|ronic, even requests. slyly unusual realms sleep carefully. ironic asymptote 6710|Customer#000006710|CeyxEnNCzAuXmAId1vHI4kN5YuwNvV0rH8NhYz99|12|22-412-672-8270|8494.05|HOUSEHOLD|usly even requests. accounts boost after the bold deposits. slyl 6711|Customer#000006711|z9trQmIO3Y5z60O5ozSv877GQILvo|13|23-886-250-5460|-361.61|AUTOMOBILE|ounts! thinly even theodolites cajole furiousl 6712|Customer#000006712|T QGDjZA2gcd92poV ,1rQrRu9ZB7EzZ|17|27-884-283-7355|3268.82|FURNITURE|ke blithely bold deposits. final requests sleep furiously? blithely specia 6713|Customer#000006713|ynNA7HFYdk0KjYuVxo9R9NK8qw8lXJxDFhA|9|19-945-198-9475|3006.93|FURNITURE|gular asymptotes nag. sometimes final accounts integrate blithely blithely ironic ideas. caref 6714|Customer#000006714|m8zfcfvyi7w PulZPp,g|21|31-407-424-6606|1494.63|MACHINERY| blithely over the furiously regular 6715|Customer#000006715|wEEcGy ed7V nIyY|8|18-303-744-7177|5244.28|BUILDING|ep slyly-- fluffily regular pinto beans use silent accounts: sometimes regular foxes are along the regular, regular 6716|Customer#000006716|qBJ6Dx1ASZOZLrQ7FTreiGRWX|4|14-339-999-9145|6593.08|MACHINERY|ons. blithely bold instructions instead of the blithely even dependencies haggle according t 6717|Customer#000006717|A3TpI60MfBQNedRV 3DcYhf3GUWL|23|33-731-150-6538|4844.87|HOUSEHOLD|ependencies wake except the reques 6718|Customer#000006718|8m61NcNrEZzFo,NmKaBzQXjeX Va|12|22-820-610-1027|3658.95|BUILDING|ely. notornis try to nag. blithely pending package 6719|Customer#000006719|,SJoHSlIYiQZ3ebJw SpwZ8lg|7|17-221-621-5826|3965.07|BUILDING|ronic warthogs after the fluff 6720|Customer#000006720|luGQxDrBGnnftUVgjF|6|16-457-628-5807|843.36|FURNITURE|odolites cajole. ironic packages haggle regular instructions. slyly special 6721|Customer#000006721|OkHnnK98UeBILbX2bjAPwvOdKMBidiW|4|14-701-354-8135|4277.74|AUTOMOBILE|l requests are. furiously quick dependencies cajole. deposits cajole blithely after th 6722|Customer#000006722|1jnPUXu2iBwB7|1|11-451-400-5785|8919.68|MACHINERY|ronic asymptotes use. furiously pending packages boost furiously-- slyly regular deposits wake 6723|Customer#000006723|gAfgW13,GB|20|30-582-567-3761|3070.48|FURNITURE|sts. tithes wake quickly. carefully regular theodolites integrate. carefully final asymptotes 6724|Customer#000006724|bcpvOK,8gUO1|12|22-598-908-4189|2471.21|AUTOMOBILE|s. slyly final pains wake carefully. express, ironic deposits among the furiously final dinos ar 6725|Customer#000006725|if8lplIxBWNs6u2fgJKJLKDrRlfhEjwQcJVooOH|7|17-685-688-6979|6523.60|FURNITURE|n accounts poach silently after the regular 6726|Customer#000006726|hssVbd6,2x8YN7nJDem092|6|16-514-909-5495|3108.77|MACHINERY|es against the carefully regular courts are quickly furiously even th 6727|Customer#000006727|APG0BfGWOYnDkXgbcKdhJXzzJgLtQXYKCoXMPxJA|23|33-373-151-6436|7993.12|BUILDING|riously close foxes. fluffily final accounts haggle quickly across the accounts. bold theodolites engage furiously a 6728|Customer#000006728|v9cVptqbc0tKb9ZT q|8|18-216-645-1385|6449.93|MACHINERY|about the bravely even ideas are carefully ironic accounts. thinly even dependencies haggle 6729|Customer#000006729| NrrDioTuXtkJCh|13|23-575-217-4725|8051.37|AUTOMOBILE| furiously. quickly regular packages above the reg 6730|Customer#000006730|EmtyBtMxazeTZFx1zqBuNju5VmzsqOi930BE|11|21-223-976-8367|6413.26|MACHINERY| carefully final accounts. closely final deposits integrate carefully according to the dogged, regular ideas; platel 6731|Customer#000006731|HpEXXgDmsqIPyVb FXeWbadti|2|12-283-284-3677|7639.55|MACHINERY| foxes. carefully final asymptotes print excuses. furiously ironic waters engage. c 6732|Customer#000006732|sHH4w G8QMTtCVTjbkiEHTvL8bRr5Tu|16|26-779-843-5056|4018.93|FURNITURE|ions haggle quickly against the slyly regular accounts-- bli 6733|Customer#000006733|Kik6R2t1WP|11|21-569-230-8251|4219.13|AUTOMOBILE| boldly across the carefully ironic dugouts; slyly bold pinto beans haggle. 6734|Customer#000006734|yRF8my1jIZM1QuEGXxSuXT0P83w ljVko |14|24-157-448-3086|2971.38|FURNITURE|ly final deposits. excuses along the carefully special packages promise regular theodolites. ca 6735|Customer#000006735|eG5Pt6wlZpFUt0140cHxYox3roKuQU,V|9|19-117-148-6129|8904.94|HOUSEHOLD|onic, express packages wake carefully final asy 6736|Customer#000006736|TwL2M9L6iblvaG,93Efw2HP Glnm8jsH4EQP|17|27-421-117-6858|8363.54|AUTOMOBILE|inal dependencies nag. regular theodolites sleep. blithely unusual foxes boost slyly. final packages nag slyl 6737|Customer#000006737|TnIV3SSmxJFDBYUexZHw2w2m3vIAFgv38lwDY7Rb|4|14-372-941-6618|6015.13|FURNITURE|iously ironic theodolites nag slyly according to the even requests! slow, regular platelets doubt. 6738|Customer#000006738|eAylAy5pHWBnUK6q3v2cXNODweF4|3|13-699-399-9154|4394.53|BUILDING|ending accounts. slyly special pin 6739|Customer#000006739|3lsA2Q8pNR|5|15-493-613-7300|2838.35|BUILDING|uests nag furiously according to the quickly even dinos. ironic, bold requests 6740|Customer#000006740|VZIi3wMuEDjnmgAq4YwDGnf5BK e7Fs2|11|21-751-155-2705|6098.86|MACHINERY|lphins affix quickly alongside of the deposits. slyly pending pearls wake regular, regu 6741|Customer#000006741|ogMwwJa0CY9MZsdv7nCQi5HCOAwuE|7|17-567-242-2914|3514.90|BUILDING| are over the foxes. furiously even requests after the closely r 6742|Customer#000006742|u5lVPzNeS1z2TcfehzgZFHXtHyxNJHU|1|11-643-477-4053|2834.80|BUILDING|bold deposits wake. special deposits about the regular excuses boost furiously furiously final packages. slyly r 6743|Customer#000006743|uXzHSYlqjnoKSFFTrUjwPa4g9oIMRp4D,S,cNM|20|30-195-288-9509|1464.96|FURNITURE| unusual accounts wake slyly about the carefully regular packages: carefully pending dol 6744|Customer#000006744|PGmBpCi8fCF9caRqw|9|19-551-917-8841|1605.91|MACHINERY|ly. quickly special accounts dazzle around the ideas. blithely regular requests sleep quickly carefully regular 6745|Customer#000006745|F4AQlOnBqEtwLi85Fm0RzN3ZmaMdl3|18|28-602-954-3388|5454.54|FURNITURE| final pinto beans sleep pending, ironic deposits. busi 6746|Customer#000006746|M0 KJuw4qc9UBliw9 oD42VXakTKG0,6|18|28-204-291-9356|9415.95|MACHINERY|ges. slyly ironic patterns haggle slowly carefully regular requests. quickly regular t 6747|Customer#000006747|3ntn2pmdvucSKFlb|3|13-243-582-8819|5970.51|AUTOMOBILE|round the slyly bold deposits. slyly unusual dolphins boost bli 6748|Customer#000006748|a8b2paF9T94UrLTiSzHSE38ZmxHL|17|27-580-967-4556|2410.88|AUTOMOBILE| furiously ironic deposits haggle across the slyly silent instructions. quietly ironic 6749|Customer#000006749|sU3,BsMtoVz xNJJX1emkZJxZbOIRc|0|10-774-652-4046|4693.71|AUTOMOBILE|ts. unusual accounts are slyly furiously final accounts! ironic, even pains cajole furiously. 6750|Customer#000006750|V7UkwofaAeXZ7 0rXuHekMbN3mXZLK7koxDO66Tl|4|14-457-948-4761|4227.49|HOUSEHOLD|ding dependencies. carefully regular accounts cajole carefully about the ironic deposits. package 6751|Customer#000006751|Tdxt4GKZYz|7|17-895-936-3672|365.42|FURNITURE|ly for the fluffily silent foxes? slow deposits are. packages doze carefully. quickl 6752|Customer#000006752|80HtfjhjeYU|17|27-487-194-4578|1487.49|AUTOMOBILE|regular accounts. pinto beans sleep express foxes. special instructions wake. quickl 6753|Customer#000006753|yNkYKPtxuhgzqBBm7s6|8|18-860-208-5739|6195.82|HOUSEHOLD|s. bold, ironic packages wake c 6754|Customer#000006754|B6i9QUe1MogaHVLxdQy4QnH3nkb UaY0|7|17-897-648-1827|5143.81|BUILDING|ar ideas doze furiously final 6755|Customer#000006755|aJU5CHd5UZzrd80j1|6|16-237-199-3241|964.67|AUTOMOBILE|nic asymptotes. blithely ironic foxes are. blithely ev 6756|Customer#000006756|sRflkvpvQnc6q8wXS|7|17-105-972-5722|8127.58|HOUSEHOLD|ing accounts. unusual theodolites hag 6757|Customer#000006757|Ot2eNxssE43x0Ivhlzy 2|13|23-725-154-2356|227.89|BUILDING|breach blithely; furiously regular deposits according to the quickly regular packages nag slyly fluffily regular p 6758|Customer#000006758|YhRgjpezuIfxnPM3docKnH9Dr6Zs9dlMPmI r5X|18|28-730-164-7975|15.13|HOUSEHOLD|dolites hang carefully. slyly special accounts nod slyly express theodolites. pending depths according to the regula 6759|Customer#000006759|CyomHt7D6FLzXgN5Rff0fo0a |19|29-276-479-4436|-773.12|FURNITURE|ut the slowly final accounts sleep blithely slyly special foxes. quickly final ac 6760|Customer#000006760|ZXjHellHGmKAh|18|28-609-664-1095|6035.85|FURNITURE|. furiously unusual packages sleep quickly. accounts nod sly 6761|Customer#000006761|v1sTMXm97sEpzFo|1|11-894-413-9156|521.65|BUILDING|tructions cajole carefully against the ironic deposits. blithely final asymptotes wake furiously ironic ac 6762|Customer#000006762|Vha,VM8y w5TD0q7|18|28-388-163-7393|9311.76|BUILDING|against the final requests. p 6763|Customer#000006763|laxf,Ybtd4d2FGE9RCG|18|28-654-592-3290|3895.04|AUTOMOBILE|ts. busy pinto beans cajole carefully since the slow accounts. final pinto beans cajo 6764|Customer#000006764|Ehu6TNlFTGdkY|16|26-720-544-9325|921.28|AUTOMOBILE| forges snooze. carefully pending warhorses cajole carefully upon the carefully regular packages. thinly unusu 6765|Customer#000006765|K8S00oGkfyhG7,A08NTyZTfOItXThIZ|14|24-562-459-8122|4816.14|AUTOMOBILE| regular instructions haggle slyly accor 6766|Customer#000006766|KbP0 fFP7iJCWl4E|4|14-369-503-4420|4814.57|FURNITURE|express, final accounts. furiously u 6767|Customer#000006767|yEB,5rAwZ1Vi,u|21|31-217-839-6340|528.35|BUILDING| ironic requests cajole quickly slyly even foxes. fluffily pending accounts are according to the regul 6768|Customer#000006768|h Kgr0xrW9MkORlqUgwFsGgQZG3,Jks|9|19-647-976-4923|979.30|AUTOMOBILE|furiously idle packages are slyly fluffily quiet dinos. quickly even instructions about the regular reque 6769|Customer#000006769|v5TrRZTAAiD4i1eyVTLNA|20|30-186-784-8328|1029.38|AUTOMOBILE| slyly regular packages nag carefully. furiously bold theodolites along the 6770|Customer#000006770|fhS8YZLcFyBIeZSp2|12|22-300-803-6439|6756.19|BUILDING| express ideas sleep slyly ironic dolphins. u 6771|Customer#000006771|MMWc6i48BJhKrgu9ko Io|24|34-589-527-3110|7287.84|BUILDING|iously alongside of the theodolites! even realms above the furiously special orbits are slyly final ideas. car 6772|Customer#000006772|ggL8d6JSSQUkE9zg6F3|10|20-387-573-8593|-238.33|MACHINERY|eas: carefully regular packages about the furiously silent pains 6773|Customer#000006773|WO 4bouYuu |7|17-627-272-8854|7232.34|BUILDING|tes. furiously bold pains wake care 6774|Customer#000006774|tufe2Xi42Fdq6R4hf7drAFRCg3Rz4i|2|12-800-430-6064|1958.28|HOUSEHOLD|ounts. slyly even theodolites sleep slyly accounts. ironic foxes sleep carefully. blithely pend 6775|Customer#000006775|bzDrDnIGQLCgiExwO6VwqlW|15|25-386-188-6886|9781.70|BUILDING|old, ironic pinto beans wake fluffily. even, regu 6776|Customer#000006776|JXoJqh4JpcdvugREShRwNHCee4ltrxrb3OM1,E7l|18|28-994-492-7101|4054.12|BUILDING|hely among the ideas. slyly unu 6777|Customer#000006777|uT2JCbpTRH9Inj0wgbRuv,|16|26-447-185-5798|1597.51|AUTOMOBILE|cajole. slyly express asymptotes sleep above the final, bold realms. slyly special realms boost on the quickly re 6778|Customer#000006778|ACpbkEbCPYy|4|14-172-893-4202|7333.47|MACHINERY|r platelets affix. carefully ironic accounts arou 6779|Customer#000006779|UeGNbDKhSDW1MkcE,GnxRAiyjHHe0itTSj|18|28-974-892-3856|7630.58|MACHINERY|xcuses might sleep slyly silent, final deposits. regular pinto beans integra 6780|Customer#000006780|R20cDLZCvC,XeXxr3JVgS63kH9IYW41ql9|4|14-601-747-4629|7181.83|MACHINERY|blithely even escapades. even deposits are carefully pending accou 6781|Customer#000006781|hjgOEQCNYiCKQTwECBEJ6pMsKqhNJH1oRj|21|31-556-362-4055|-21.79|AUTOMOBILE|packages nag along the quickly express packages. ironic deposits are along the 6782|Customer#000006782|QfjRd7YtTJRio2K70XUp7w,WY3a9xlo,|18|28-152-315-2630|5711.06|MACHINERY|ss realms. blithely even ideas haggle furiously afte 6783|Customer#000006783|VcFpiOQxpgr2Q5w0,d|4|14-418-290-9278|8724.64|MACHINERY|. blithely regular ideas haggle fluffily against the ironic requests. final deposits sleep. even, regu 6784|Customer#000006784|t7bVQGTfY,PwEmIshCKmrWiNMj|10|20-926-379-4893|3026.77|FURNITURE|ful pinto beans nag final, even accounts. requests 6785|Customer#000006785|Zeb0QWx561VbdmAnNJwFX|5|15-334-822-1897|4849.38|FURNITURE|equests. blithely ironic dependencies about the slyly ironic accounts wake furiously iron 6786|Customer#000006786|y JAZymUDAm19ImpwXrDBhrJ2tkCXImzEU84NXe|14|24-639-124-4027|2564.13|MACHINERY| forges: fluffily bold packages boost about the sometimes final excuses. ironic deposit 6787|Customer#000006787|x3R8xwciGpAldSQtfrfQjKPVA5MK|4|14-221-614-7132|6276.35|BUILDING|ularly silent ideas along the regular, even asymptotes boost fluffily bold theodolites. blithely express packages 6788|Customer#000006788|tkzbRySfDjHBZuJU8xa9XXx4EeZ6L EmX|12|22-769-485-6232|5623.43|MACHINERY|ackages. carefully express platelets haggle blithely 6789|Customer#000006789|wQUHbVSc8YanGHPCDbK0,njoByEX7ThcX7|13|23-254-104-3764|-913.53|MACHINERY|yly: carefully ironic asymptotes are furiously; pending do 6790|Customer#000006790|DCD1tDMXhoUIaKhQPnCUVUKxiLdcGsNK|18|28-476-389-8594|222.98|AUTOMOBILE|rave deposits. regular, even instructions us 6791|Customer#000006791|w7qvbNTA3AUnviiYrUHQ3rrvxg|24|34-796-717-7454|5488.16|BUILDING|g the blithely express packages. u 6792|Customer#000006792|1eyzmigbKdS,uPvd,gTfV82Q0s|7|17-399-754-8075|2503.03|AUTOMOBILE|thely ironic pinto beans. accoun 6793|Customer#000006793|6iyy1xAWokEzS7vzwKebMn8uc9rnD|14|24-645-815-1178|2415.53|AUTOMOBILE|ies sleep slyly along the blithely bold deposits. sly 6794|Customer#000006794|eW4D8D nAkkGhhx|21|31-846-103-1877|5798.75|MACHINERY| integrate about the quickly dogged requests. furiously ironic pinto beans haggle above the never final 6795|Customer#000006795|uHQSwrVKQCUmmUjgTjuUo oR65yPIOzNuZ5j|11|21-520-971-6013|8574.87|MACHINERY|counts. pending packages haggle quickly alongside of the pending requests. ironic fo 6796|Customer#000006796|ry4xotfFJkV|21|31-204-934-2930|4956.27|FURNITURE|beans! furiously even deposits doubt fluffily carefully express pinto beans. furiously pending 6797|Customer#000006797|CbAgm9paksZYcQNM|2|12-443-241-4031|6051.25|FURNITURE|s ideas are carefully across the fur 6798|Customer#000006798|DRAvKxwGdh8qBsv6DlFfvsilS7,QuSOQfO4D|21|31-784-264-5102|6312.50|MACHINERY|kly ironic pinto beans. fluffily final escapades beyond the reg 6799|Customer#000006799|OmuxHQ,MFaA6IKDRJpI7rOq0 2|19|29-861-675-1912|8266.46|MACHINERY|tes. slyly permanent foxes sleep against the carefully regular deposits. ironic, ev 6800|Customer#000006800|dyQqJ sBw9RZiggYPODCddm2|24|34-746-736-6270|2438.35|HOUSEHOLD|al accounts sleep carefully accord 6801|Customer#000006801|h4zMf8BMKsgOf964TpkBtFenTIiHb|7|17-563-914-8922|4350.72|AUTOMOBILE| the furiously express accounts. even foxes sleep fluffily ironic ideas? 6802|Customer#000006802|XwZM0 CSn4C Pe|24|34-621-826-1804|3589.51|MACHINERY|nstructions wake quickly along the bold accounts. ironically ironic tithes sleep blithely unusual 6803|Customer#000006803|6xmvFf,9ifUkUXDtls|6|16-508-741-8182|2880.15|MACHINERY|ully slow asymptotes. special pinto beans use after the final ideas. furiously even ideas hinder. qui 6804|Customer#000006804|y3KkEdC7h4oc6BGM8r xsYpi V7ivtz9|16|26-988-381-6875|4647.66|MACHINERY|ccounts sleep slyly among the regular packages. quickly final ideas sleep furiousl 6805|Customer#000006805|kTi3eJA0j3wmK,PDjWGed8tzM|24|34-956-659-4290|147.84|MACHINERY|eep quickly carefully even courts. 6806|Customer#000006806|63GYGgLROgXoSKxwRPVwKdjdTFx|3|13-535-116-9961|7653.27|HOUSEHOLD|press theodolites. blithely pending accounts according to the quickly regular accounts nag alongside of the unusua 6807|Customer#000006807|zfbTyBPugePY1ea3MbBFWuXrhsT|0|10-318-291-1534|3571.22|BUILDING| blithely ironic deposits wake slyly about the fluffily pending dependencies-- bold, unusual theodolites above the 6808|Customer#000006808|roBxgLQjBCcu7 Yls8BnCT3Oum241ON43b|7|17-840-939-1028|3471.22|MACHINERY|gle idly. ironic instructions sleep carefully accounts. bold, even sentiments haggle. furiously 6809|Customer#000006809|D2OPM0RO7wjQB,y5lQJIhGKRAK|19|29-455-199-9534|5772.30|MACHINERY|efully according to the fluffily silent packages. carefully silent packages use furiously above the carefully even 6810|Customer#000006810|Vav VVRBQ3WYqKNtNTi39x7,kXBcfblF3|13|23-459-864-2903|4116.60|AUTOMOBILE|instructions cajole permanently furiously bold grouc 6811|Customer#000006811|OcI9j59VXpHJy8akRgo9QazL2FBJzr|2|12-527-163-1718|6619.05|FURNITURE|ans besides the carefully ironic pinto beans sleep slyly fluffily pending 6812|Customer#000006812|AhoTwIbKJQn1FD4f59Og02|12|22-522-190-4711|1433.83|HOUSEHOLD| slyly ironic instructions! pending 6813|Customer#000006813|7sU37tY2xghEmgmrmGFw|6|16-378-417-2821|6328.64|FURNITURE|ges. pinto beans are fluffily. blithely 6814|Customer#000006814|xKxM4j61kF6WrHJSYTo|0|10-143-621-7307|4498.28|AUTOMOBILE|ructions. carefully bold Tiresias haggle furiousl 6815|Customer#000006815|SwpQn8U2,7FIzgYG|23|33-553-747-6097|8623.37|BUILDING|dolites sleep. regular theodolites after the express ideas are a 6816|Customer#000006816|l9iMew1ckL2nrx5YiUnd0Bs7Z96,A|23|33-435-484-3984|5981.91|HOUSEHOLD|to beans are alongside of the final 6817|Customer#000006817|0GuFB2pIst2i5ku761SGYJF0YKbemV3oGBxuY|11|21-463-437-2941|6900.32|BUILDING| beans. carefully final theodolites haggle furiously unusual foxes. pin 6818|Customer#000006818|NsWBjqO16kujv3WRtKhhLH28|18|28-865-256-2071|9642.22|HOUSEHOLD|pecial packages sleep carefully furiously regular deposits. even, regular pinto beans cajole slyly about the blith 6819|Customer#000006819|BxmHawhcf5E|19|29-909-925-3216|2833.96|HOUSEHOLD|ts above the regular theodolites cajole at the pending, bold requests! slyly pending asymptotes would haggle. ir 6820|Customer#000006820|i1RJGpP9HqI4s6151jR1z8ZWpXdOU|10|20-589-342-9128|9377.01|AUTOMOBILE|ironic requests against the regular warthogs nag across the fluffily regular deposits: 6821|Customer#000006821|GpFN4sJSskru0JEsjNZLg,dcTKNsIgDO|14|24-956-317-4494|431.91|FURNITURE|l platelets. fluffily regular accounts across the silent pinto beans p 6822|Customer#000006822|bo tbD14X1LH254lRO|18|28-422-199-2677|8027.05|FURNITURE|sly regular ideas wake. fluffily do 6823|Customer#000006823|NxR5mcTtNqZad4WGlWudh PLP1itX0Y,xI4f|5|15-158-466-1756|4742.19|MACHINERY|onic deposits. carefully brave decoys along the blithely ironic asymptotes detect c 6824|Customer#000006824|ots Xj,zI6Lt,5CSMqtm0aoz7UeBs|14|24-918-571-1016|4911.60|HOUSEHOLD|encies cajole at the fluffily even pinto beans. express ep 6825|Customer#000006825|iCyuTflSjsWcDC|12|22-228-819-1874|3300.02|AUTOMOBILE|accounts. furiously unusual ideas are daringly alongside of the carefully unusu 6826|Customer#000006826|i6Gp3F X4bJxVZ,IOFWyvKHBzEWDK7Ao4B|16|26-414-821-5076|-532.24|BUILDING| slyly pending asymptotes! pending foxes nag. slyl 6827|Customer#000006827|pkyUJaeoz7jLGIrQ,kYUVF5loZj|5|15-181-335-1276|6647.51|AUTOMOBILE|press packages cajole carefully blithely bold pinto be 6828|Customer#000006828|i07aofApQ6Sg,gMjYzl ycOlqa sSi|11|21-232-607-3504|6718.53|FURNITURE|ickly fluffily permanent asymptotes. fluffily regular deposits use slyly carefu 6829|Customer#000006829|hLxbhkw8dFbPqPuELUIn39hrnisrNUfJv4F|20|30-413-747-6182|1323.84|BUILDING| the packages. regular dolphins s 6830|Customer#000006830|eVnzLRiUrpOj2zHTaq|6|16-527-593-5356|5679.21|AUTOMOBILE|endencies are slyly against the final, r 6831|Customer#000006831|s ZzU g4nFiJrvNg pE7cN1UGL7z3THVaXiuuY|16|26-690-485-7073|1470.27|MACHINERY|around the accounts integrate slyly according to the special accounts. blithely express deposits wak 6832|Customer#000006832|AggcVEoM1Dw2WfE|10|20-653-330-2367|2821.81|BUILDING|o beans use slyly ironic theodolites. ironic platelets boost carefully into the fina 6833|Customer#000006833|xpXLjnef5qfH2xlHXF9oMMzIjIsuex,F0uw|13|23-153-556-1341|3185.16|FURNITURE| quickly bold instructions. even somas wake blithely. blithely furious theodo 6834|Customer#000006834|2 OGzQbTM4PhDp6|12|22-607-237-9927|4970.18|FURNITURE|y after the notornis. fluffily regu 6835|Customer#000006835| 1Vwc1DHAl|18|28-336-747-3054|8825.71|FURNITURE|ss asymptotes about the bold ideas 6836|Customer#000006836|IPAnUQ7OdyUJ9HlXzSKYEp|9|19-944-807-7519|7724.27|HOUSEHOLD|e regular requests sublate regular platelets. ironic dolphins integrate carefully. slyly even 6837|Customer#000006837|kcS0yon,tgPVfGO7hrxyv5C,Su,gM|4|14-101-991-2132|8823.38|BUILDING|s. pending, express pinto beans w 6838|Customer#000006838|BITaY3dbOQVUe3i5g7,ewHd|23|33-156-718-4716|1578.52|FURNITURE| courts are after the fluffily express requests; c 6839|Customer#000006839|oYnqEjWQ9dZezLjJtHPNIXI8HoxtqANG Z7z zj1|19|29-332-980-9867|7140.01|HOUSEHOLD|tes sleep slyly furiously special requests. carefully even dolphins nod blith 6840|Customer#000006840|8enbm51YZ1P8 9WL56McRjcRLxC|18|28-421-550-7131|1256.89|HOUSEHOLD|ly blithely bold ideas. blithely regular excuses nag slyly even requests. slyly regular forges are packages. pendin 6841|Customer#000006841|g NzADDwQKOQedcgxkP3 vDTJ0OThsh0sqI,vb|9|19-631-468-3008|2459.12|MACHINERY| busily final packages across 6842|Customer#000006842|1xXUXh723lgE0wfY1d9vuOTXELqhEPFo5t|9|19-676-771-4155|1342.45|MACHINERY| above the regular theodolites wake ironically against the bold, regular foxes. unusual requ 6843|Customer#000006843|M5z1DoiXAJCQVVIHKjMbntcT1A3Lfc4XSkXVRY|19|29-764-509-2918|-563.00|FURNITURE|ove the carefully regular instructions. carefully r 6844|Customer#000006844|hgGc5KCVN3QDk0Lci|6|16-763-561-5338|6663.09|AUTOMOBILE|ng to the silently unusual deposits. slyly ironic excuses use around the slyly regular escapades. regul 6845|Customer#000006845|VzpxA2uz,A BwWkAvIJnXqZHGDlhXK8n3IX8|19|29-371-479-7196|4510.06|MACHINERY|g the slyly pending packages wake along the carefully final accounts. regular pin 6846|Customer#000006846|ggBnTO6OZXBMWuflbpz4yVEDFUa9n|20|30-223-579-2199|8524.19|AUTOMOBILE| carefully ironic requests. slyly ironic deposits sleep along the carefully special dep 6847|Customer#000006847|FgCB,v15gQzq6|16|26-489-731-4993|81.08|AUTOMOBILE|across the quickly final theodolites. slyly ironic d 6848|Customer#000006848|XETSPsMa,,KHVAAu|8|18-993-692-5687|7653.55|MACHINERY|. theodolites run carefully quietly even packages. furiously even pinto 6849|Customer#000006849|zwnB9JnyCV0B2O Ue6Jn78KBarQRx2m|19|29-921-924-3641|8217.84|HOUSEHOLD|requests. ironic instructions use blithely blithely special ideas. bold theodolites haggle a 6850|Customer#000006850|A0qvm6saCh|4|14-519-545-3578|-702.43|BUILDING| special ideas. final, regular platelets affix regularly according to the slyly bol 6851|Customer#000006851|hvFJsItQnymk8pxkrjnenWCJ9GAjSNh |12|22-860-161-7304|6230.56|FURNITURE| foxes. blithely enticing accounts above the furiously re 6852|Customer#000006852|C hC7N6xSqjDVmEQzEZ01NYj4SBl5nhu|24|34-681-524-9480|6121.02|HOUSEHOLD|ily silent pinto beans. even, ironic dependenc 6853|Customer#000006853|AbpZYqtK6c5EfpZtpF|10|20-422-853-4017|1986.65|HOUSEHOLD|bout the requests. pending deposits cajole carefully above the regular deposits. requ 6854|Customer#000006854|Ka5iBA43bmKVAjKqFya2HfFkurzVx3pbFn,|5|15-430-821-6024|2568.18|FURNITURE|ts cajole carefully across the regular deposits. furi 6855|Customer#000006855|7yJhlD4Ziy9 IFz4VFzAURNoVWzFj5zyYdKBKT|6|16-876-384-6010|8375.91|BUILDING|s across the slyly unusual requests cajole alongside of 6856|Customer#000006856|CQMnbODOwhI9toZ75vf|23|33-529-574-6366|4984.79|AUTOMOBILE|. blithely ruthless warthogs against the slyly final deposits wake along the furiously fi 6857|Customer#000006857|qnTjfF57SI5LrBzO|23|33-446-990-4978|1409.67|BUILDING|. slyly final packages poach bli 6858|Customer#000006858|pd6Motczl51ondujEyQ7367tYCT4NC6|0|10-799-377-4331|7239.92|BUILDING|olites cajole carefully requests. special, blithe gifts pr 6859|Customer#000006859|1zy,R99p9Dg|17|27-368-450-7892|8452.75|MACHINERY|ular requests. blithely regular requests nag! quickly unusual instructions cajole. carefully ironic ac 6860|Customer#000006860|j0c5d8lwjd X7fYicMKeDycv|4|14-580-483-9140|9414.14|AUTOMOBILE|ithely unusual pinto beans unwind special packages: quickly pending deposits cajo 6861|Customer#000006861|um,8dEPxPOIu4uCO5oF9C os|19|29-269-205-4055|8593.46|BUILDING|ole fluffily against the carefully unusual excuses. special, final accounts 6862|Customer#000006862|WHjtQl UwzncNIkejao3a5W|14|24-682-159-5973|9323.04|FURNITURE|heodolites wake idly against the packages. ironic theodolites c 6863|Customer#000006863|C7xp7Euj8Zatj|1|11-147-882-1449|6353.09|AUTOMOBILE|luffily above the ironic, express requests. sly, careful foxes are about the pending, fl 6864|Customer#000006864|sdyPu0LQ3RkTdoFy|23|33-998-134-8314|8447.64|HOUSEHOLD|ickly even theodolites cajole slyly blithely special pinto beans? slyly unusual depos 6865|Customer#000006865|5JPh7HJJGpVizXF|24|34-198-293-9623|527.61|AUTOMOBILE|l courts. slyly ironic foxes b 6866|Customer#000006866|P7lfQDiROc4qhR3Khflxr|6|16-366-687-4124|-567.46|MACHINERY|ly. slyly regular ideas integrate instructions: dependencies haggle furiously regular, regular dolp 6867|Customer#000006867|t1gpHaWFZqRKhV,raEaxdUJ,MU|14|24-146-780-7698|8539.69|MACHINERY|es doze furiously express accounts. slyly silent requests sleep fluffily fluf 6868|Customer#000006868|R13,6EjlPmdsl|23|33-647-349-2675|7639.96|MACHINERY|al packages. ideas boost slyly above the final requests. regular deposits haggle fur 6869|Customer#000006869|6ZYHNoZX5,Me|14|24-606-630-9319|-84.65|MACHINERY| pinto beans. carefully ironic accounts haggle caref 6870|Customer#000006870|i3GhNxcD0,NDHWYXq778CcKI4Rok57Rk4I50R|16|26-714-569-7040|5292.68|AUTOMOBILE|boost furiously. furiously unusual ideas nag along the ideas. requests cajole carefully among the sly 6871|Customer#000006871|4Aawh3u9VlvfLcp,2|4|14-497-499-6031|6002.37|AUTOMOBILE| regular requests. even, close deposits are. permanen 6872|Customer#000006872|seuInq8wsBrKZU6lBlb ro1lt1gMtWkI L6LEQS|14|24-644-733-1264|9852.24|AUTOMOBILE|y ironic packages after the quickly final accounts haggle foxes. ironic e 6873|Customer#000006873|KXt6OTXQyCYz46Kw5Ynz|0|10-948-162-7136|1599.50|BUILDING|ts boost requests. slyly quiet requests are slyly across th 6874|Customer#000006874|VbQ7fM46whu0cr31adJ|19|29-695-872-3712|7814.06|FURNITURE|en requests mold furiously express, final multipliers. car 6875|Customer#000006875|TjO3DIyAnZ5kX3 KOWj7m7d,3jC1dbZZNy9MoNVg|10|20-934-535-5633|28.31|AUTOMOBILE|lly regular instructions haggle fluffily accounts. carefully regular dependencies wake furiously. 6876|Customer#000006876|AzTnyu945JeYf4PLbjV7cSL3v|21|31-326-600-5503|812.97|MACHINERY|ccounts should nag until the ironic, express packages. ironic forges 6877|Customer#000006877|QR2VDDA94zJHPMmac86RN1Lg3SPxPzIt,u|16|26-767-249-9337|-532.13|BUILDING|rding to the quickly bold instructions? careful ideas cajole furiously against the final packages-- fluffily busy a 6878|Customer#000006878|OyrOaYhd68cPx9maycfwDGq R22|14|24-648-517-8694|8396.86|HOUSEHOLD|ic packages boost around the blithely final deposits. permanent, 6879|Customer#000006879|qGAxDp5CIcJWPNt zGFerq4kFZWQiX|12|22-942-165-1099|5044.93|FURNITURE| to the special foxes? fluffily special theodolites 6880|Customer#000006880|t3um3YPoJ17XQUXC8V3VBIFnoz1D|24|34-961-846-5811|225.45|BUILDING|ts print. unusual, special deposits against the blithely regular accounts serve fluffily after the deposits. even 6881|Customer#000006881|6VfVWZMuCYB8i4b 6dRhgUPFMnWADADr407sLL|18|28-406-320-1387|3156.70|AUTOMOBILE|ets across the ironic pinto beans should boost ironically furiously ironic asymptotes. blithely regular accounts des 6882|Customer#000006882|3lBEzGcwg1vKtSkqtvzMVQwqvwfGBGn|2|12-151-684-6027|-396.82|HOUSEHOLD|he unusual pinto beans. slyly regular accounts haggle furiously express excuses? slyly pending warh 6883|Customer#000006883|,OGuJ9Zt3XpAEONbIpIpal Ux3|11|21-873-538-3754|5579.14|FURNITURE|s frets nod blithely alongside of the deposits. quickly even theodol 6884|Customer#000006884|yRIvbIG72RGuz|11|21-789-993-5944|2597.78|BUILDING|uickly. quickly express deposits sleep slyly? q 6885|Customer#000006885|zgFxaDEtwdOvsa8eku1KI5f0Hq|17|27-266-722-7657|-188.30|AUTOMOBILE|sits wake carefully. ideas are carefully. final, furious packages impress slyly unusual requests. slyly re 6886|Customer#000006886|e1kh m79wZ2sAUFXAzmTAsYsG6bpc2SlJ|5|15-103-327-5461|1158.35|FURNITURE|lly. fluffily regular foxes alo 6887|Customer#000006887|Ts6FP r64sVYHPT|0|10-744-644-3355|7280.37|MACHINERY|lly even accounts. unusual, final foxes cajole express waters. carefully special 6888|Customer#000006888| O40aa4,Ip frcWNEh6XbZL5kktj,QFqRe|5|15-967-617-5600|6207.74|AUTOMOBILE|to beans. regular packages accor 6889|Customer#000006889|SvUWryqsTeYMUI8MoGwe23MxCJcgnk|15|25-961-943-9801|7526.00|FURNITURE|according to the accounts. foxes cajole carefully. ironic packages haggle blithely 6890|Customer#000006890|9h9 InEMYJ5P|7|17-911-310-5010|9514.17|MACHINERY|en packages. express, even inst 6891|Customer#000006891|6KWm7ap8xnKV2zYC7ejZifLwE|12|22-373-997-5030|6815.03|BUILDING| use carefully regular packages. blithely 6892|Customer#000006892|mNuuOriPB9 YdILhmvINOGd38jGBZ|13|23-151-643-6412|6203.78|BUILDING|s sleep carefully foxes. slyly pending requests sleep requests. requests nag furiously carefully 6893|Customer#000006893|IQKKtRNXWAsdbqynSPQ7P5Pd8X uF|3|13-947-844-8526|6929.65|AUTOMOBILE|y special foxes are quickly carefully final requests. unusual packages affix express, regular requests. quickly 6894|Customer#000006894|qP,e3crppnJlKzejVH b2D,|12|22-247-482-7189|7747.33|AUTOMOBILE|al accounts according to the pinto beans doubt among the final braids. blithely fina 6895|Customer#000006895|eYt JN3SRFOg,PHtmhhfqN|20|30-525-577-9213|361.67|BUILDING|kly even theodolites. packages integrate across the slyly pending accou 6896|Customer#000006896|MvVnnxEF29nDVdr WvzbHWkm|24|34-850-657-1872|4576.31|AUTOMOBILE| final, regular waters. final platelets wake furiously silent deposits. regular 6897|Customer#000006897|2y42Nhr2PYA54EHyciLjYpnVHJStojcN|0|10-920-317-5942|101.13|BUILDING| the express dependencies. boldly ironic foxes nag slyly express requests. final 6898|Customer#000006898|Iu VC1aV16HlqfvkfNd8snf4|20|30-234-165-4904|3546.09|MACHINERY|. carefully regular ideas cajole carefully brave, final asymptotes. regular, regular 6899|Customer#000006899|fI2lc9kOUESmGia2kXSJNNI, 5MMioi3cbGg1M97|0|10-535-357-3310|-909.78|BUILDING|e carefully regular ideas. pending warhorses use quickly according to the blithely even accounts. blithely regular 6900|Customer#000006900|XxzSLyP jdrxrFf,iyxf|5|15-836-566-6221|8277.05|HOUSEHOLD|ully final requests sleep carefully enticing, even theodolites. excuses cajole! f 6901|Customer#000006901|MF1Yh6821MDYo|17|27-691-188-7577|4462.87|AUTOMOBILE|sleep quickly even theodolites. furi 6902|Customer#000006902|bDR25t GnLkoK|6|16-378-380-3108|7453.93|HOUSEHOLD|e fluffily above the blithely even dept 6903|Customer#000006903|AtYnMz1ydb2y2yeKlQ6df1txYM1Ibs5u|9|19-471-505-9852|8241.38|MACHINERY|gouts cajole instructions? quickly unusual ideas caj 6904|Customer#000006904|Pm8 tKCreWStvS|22|32-451-759-6848|9979.02|AUTOMOBILE|lyly ironic asymptotes: fluffily ironic accounts cajole blithely pending 6905|Customer#000006905|R,hn e5kStdmzVDhYooDC7Z1tJ7|11|21-390-139-1440|9534.63|MACHINERY|ng requests. carefully silent theodolites 6906|Customer#000006906|NtpZ4oXphusYXOHppCd,DNX g65hon1kdIAGs233|6|16-847-442-4850|9552.15|AUTOMOBILE|inal theodolites wake slyly carefully dogged packages. reque 6907|Customer#000006907|xVxf5 SbjMNLGhAFzmW6|6|16-105-262-6169|7106.46|HOUSEHOLD|ccounts integrate against the even packages; blithely even pac 6908|Customer#000006908|PSFN752zobO,NnrXueh68DsNliR|18|28-352-804-7306|9321.69|HOUSEHOLD|y packages; carefully even theodolites boost carefully after the furiously ironic gifts. foxes kindle slyly. sly 6909|Customer#000006909|wTMW41vDDJ,rDvLBtcP2lAGPortmxNP18h|10|20-480-370-7421|5745.76|AUTOMOBILE|s about the ironic deposits detect blithely bravely ironic orbits. pending packages above the blithely iron 6910|Customer#000006910|wHUTQtKiSMGbGqKcC526p9Pfebuc7r|5|15-722-787-9962|8945.89|FURNITURE|the express, regular escapades. carefully unusual theodolites do wake quickly. quickly 6911|Customer#000006911|lx95MUKc7,CsN WHXRWUoVV|6|16-440-937-2210|3682.25|FURNITURE|riously pending accounts? carefully unusual accounts according to the quickly unusual re 6912|Customer#000006912|eZArhHGNgAZTIIs6m8JEFgvhhsybD77LXI8ScXH|8|18-789-906-8424|-383.69|AUTOMOBILE|quickly against the regular deposits. blithely regu 6913|Customer#000006913|jH27L,gZj9dws4sVD9pjQWjNM9Z3hjJhNEAHd|7|17-424-926-7617|6237.26|HOUSEHOLD|ses nag blithely slyly silent accounts. slyly ironic pinto beans alongside of the 6914|Customer#000006914|6ZnxYihMt7HmIQqiItpeVDq81omFb9S4mul|21|31-622-699-4865|8725.78|MACHINERY|. regular, ironic accounts among the furiously even pe 6915|Customer#000006915| VQo,H2BRkjoa0dYXLAA01mqwUytbBM|19|29-651-328-4337|319.47|BUILDING|lets after the carefully slow packages cajole blithely asy 6916|Customer#000006916|KGSD2t2VRVQ,n3EW4TiQH8i4L8VBRP1P|5|15-509-409-1957|-795.98|AUTOMOBILE|lites. regular pearls solve quickly. carefully regular sheaves against the always 6917|Customer#000006917|SwW3qHYL1ZuBlx17gCKpvcP|6|16-544-374-5520|6932.21|FURNITURE|d sleep. blithely special ideas haggle along the special pinto beans. regular, special warhorses are alongside o 6918|Customer#000006918|0HHnMHiko6CFQTroq5lGeFg0JDtZm6PUhIWU|3|13-658-888-3933|6671.40|HOUSEHOLD| fluffily carefully quick accounts: fluffily regular pinto beans cajole quickly according to the q 6919|Customer#000006919|hUBFeaV6cn41fFHMpvd7nuAMC1Q4|10|20-964-269-3393|1279.94|HOUSEHOLD|deas are carefully. regular, permanent instructions boost slyly. blithely unusual deposits caj 6920|Customer#000006920|DmTeHxRVLSe32KfxJp38lE|0|10-384-129-2868|4968.13|BUILDING|ounts are blithely above the furiously special sauternes-- quickly special 6921|Customer#000006921|nGFGmYctkA9IM0vxf2Y4GBPnT|12|22-642-260-2620|7388.91|HOUSEHOLD|ly unusual requests boost above the final pinto beans. careful 6922|Customer#000006922|3bCCt0S2wd6RQeq|4|14-163-216-8977|4434.20|HOUSEHOLD|kindle carefully. carefully unusual packages d 6923|Customer#000006923|v6BO5dXHqLFmOfPck0|7|17-733-662-9017|9600.79|FURNITURE|t about the pending requests. final, regular theodol 6924|Customer#000006924|z,OREBfNfUkRsj|16|26-289-764-7597|-408.30|BUILDING| the unusual packages. ironic, final requ 6925|Customer#000006925|2Gg4xMqjRAV0nAn|8|18-878-944-5385|-448.55|HOUSEHOLD|sits about the slyly regular accounts x-ray carefully over the unusual, final theodolites. blithel 6926|Customer#000006926|KOx lB nFGsdak3 Sc5zCjiRFoWq1mx5t|22|32-894-701-8844|5160.08|HOUSEHOLD|its haggle quickly. fluffily even packages sleep slyly along the theodolites. express, pending deposits above 6927|Customer#000006927|Ra0BshyuR,4uTcWjvs1PIqswCbIC6U l7A|6|16-135-283-6467|-509.64|FURNITURE|efully even depths. requests should have to w 6928|Customer#000006928|SOf,2mOUgK5PpW,C9US8oH61eNDG4|2|12-944-721-3451|1380.62|AUTOMOBILE|fully after the slyly dogged depos 6929|Customer#000006929|sQEFEgufg0ZT|12|22-151-910-6027|3000.98|HOUSEHOLD|lites haggle final platelets. express, special theodolites wake slyl 6930|Customer#000006930|RpPHMhjHQLhRR|11|21-832-212-8391|386.35|AUTOMOBILE|e blithely even accounts. ironic, even accounts about the furiously final 6931|Customer#000006931|DJcGXg0BrP1ibpV7UCxEaK5OXy yCckOACzF|24|34-282-643-3571|1402.40|HOUSEHOLD|nusual pinto beans across the ironic deposits are blithely alongside of the carefully brave foxes. quic 6932|Customer#000006932|XjlmI08 R3CXJ8JPYGwSihQUDir|19|29-930-104-7406|6064.87|AUTOMOBILE| courts cajole ironic, final ideas. ironic multipliers cajole carefully iron 6933|Customer#000006933|w0D2wXhqohW3rtxjXxIQvcxH75Fh1XAWZ3O|20|30-564-778-5516|4292.61|FURNITURE| accounts above the quickly final warhorses print blithely 6934|Customer#000006934|oVWWov tNz1fn|14|24-121-895-8013|5944.59|MACHINERY|efully express ideas cajole furiously slyly brave packages. closely bold requests haggle 6935|Customer#000006935|eqVuF6zTXrqK4BhmNUBAZQghbjY8uy0co|3|13-497-759-6170|6730.94|FURNITURE|ronic, regular requests after the ca 6936|Customer#000006936|XgPD s4bQqx5|9|19-923-718-1041|1006.58|HOUSEHOLD|he ironically even packages. regular dolphins are slyly by the pending excuses. slyly final requests haggle careful 6937|Customer#000006937|BlUCF0,i2jMXs70wxl3AWvd9frn|24|34-883-594-7672|8496.83|HOUSEHOLD|lly slyly ironic deposits. quickly special excus 6938|Customer#000006938|2ud1eHT7TvCAPub2UZ3bkcKT2Pq8M11eSpd|15|25-974-463-8064|2339.57|HOUSEHOLD|realms are busily carefully express requests. furiously pending foxes try to cajole blithe 6939|Customer#000006939|e2UhEWWPopLOyQ6G|10|20-425-357-4024|5165.17|BUILDING|ut the ironic ideas. regular courts according to the final instructions detect blithely theodolites. even packages 6940|Customer#000006940|37YCc8T1xgurTO|11|21-485-600-2482|8105.71|HOUSEHOLD|y along the carefully regular gifts. final accounts cajole. quickly ironic deposits kindle carefully f 6941|Customer#000006941|63Z3Qe6urA|4|14-210-435-4058|6757.19|MACHINERY|ular instructions cajole above the bold, ironic accounts. ironic ideas about the blithely even re 6942|Customer#000006942|N31XA0a7VOdIiIxuKpbYwE0l|10|20-899-337-6472|2059.91|HOUSEHOLD|ep blithely about the quickly regular dependencies. ironic 6943|Customer#000006943|qjFXg1Ia9FuM|24|34-704-691-5442|1872.41|HOUSEHOLD| carefully regular excuses. ruthlessly unusual instructions was ironic theodolites. pinto beans cajole carefully 6944|Customer#000006944|PlOgCXn qIsC96sqwi7jMKVcv|8|18-677-417-1382|7549.91|AUTOMOBILE|nding accounts. bold dolphins detect bold packages. reques 6945|Customer#000006945|X6nI1uOqfKBA,7V7iEb9PWg6hbM794B1ZmSsgk|10|20-763-130-4896|4363.99|BUILDING|nt accounts sleep quickly. theodol 6946|Customer#000006946|O142a46LKzb75,ggUsnA7yY MHZ7rxNi|24|34-108-937-2099|6626.82|HOUSEHOLD| final packages boost above the carefully bold pinto beans. pending sheaves sleep beneath the sp 6947|Customer#000006947|2q61zt4rUQY6JOVeKxh FcRNRqj|24|34-935-489-1820|-30.39|AUTOMOBILE| packages. furiously close deposits sublate across the final packages. regular, bold requests breach. car 6948|Customer#000006948|GWfdOFWXism9l,PQp8azQJO UPfcdMm2cb,rqU5|3|13-636-530-5983|4150.95|HOUSEHOLD| blithely unusual forges run quickly furiously final accounts. blithely regular accoun 6949|Customer#000006949|88E o,rjHZJVp|0|10-920-156-2564|999.02|AUTOMOBILE| even instructions. asymptotes use. blithe theodolites 6950|Customer#000006950|D7PZ9 FXDeoG9jzsIqiVm8i V|16|26-567-707-8729|2414.22|BUILDING|g to the express platelets. even account 6951|Customer#000006951|dswooF8dKb|21|31-661-344-4562|5751.69|MACHINERY|regular requests. carefully silent theodolites promise quickly regular accounts. platelets sleep among the fina 6952|Customer#000006952|tedolX6bU7a07kS,HSV1Kh2VartW|1|11-964-186-1638|9600.30|HOUSEHOLD|ckages play pending ideas. pending dependencies wake blithely along the unusual accounts. eve 6953|Customer#000006953|T6GNNKYnmp3KhK2a,n|4|14-200-221-1070|2384.73|HOUSEHOLD|ar dependencies boost across the regular, bold 6954|Customer#000006954|rGHTeIqYsbkLG4yOmrZWyvY17zxvWjCgvj|19|29-846-280-8291|5080.67|BUILDING|ross the ironic pinto beans affix after the packages. ironic, express theodolites after the furious 6955|Customer#000006955|7Pl77 AAMou0F56ufvqSGYm2dFZrsBSf|23|33-734-299-2759|6751.07|HOUSEHOLD|is against the carefully bold theodoli 6956|Customer#000006956|k 4M1dYnBY0s9tDIjnpZQ7QSRAsqv,dP4qip CBY|19|29-765-731-1061|9202.51|HOUSEHOLD|ial requests detect carefully express instruct 6957|Customer#000006957|sAt10Pog00qXxf3|1|11-448-447-2941|4672.45|HOUSEHOLD|idly pending pinto beans. carefully pending deposits cajole quickly across the unu 6958|Customer#000006958|pyyjzooPiwi2FUIz|5|15-554-805-5336|9829.41|AUTOMOBILE|ly final pinto beans. carefully express ideas about the furiously unusual pinto beans ha 6959|Customer#000006959|rorENMWClttRXEp|13|23-233-789-3757|2990.45|HOUSEHOLD|gular packages are slyly across the asymptotes? escapades boost blit 6960|Customer#000006960|OUKTB cNG030,aFLzyB|19|29-674-628-3972|-505.65|BUILDING|blithely final waters detect according to the even pinto beans. express 6961|Customer#000006961|T6oeN XNst4bY6QOIxFQAj,WN|16|26-346-690-5410|2474.82|MACHINERY|its use carefully blithely even pinto beans. carefully even acco 6962|Customer#000006962|4G9HL28bwq|5|15-357-936-1344|1281.15|MACHINERY|equests. regular deposits nag. foxes sleep quickly-- carefully regular requests across the deposits wake quick 6963|Customer#000006963|CW0iwpVyNqVJiJf0roU5OAoX|16|26-430-431-1058|3161.07|AUTOMOBILE|le quickly ironic excuses. slyly close ideas wake quickly wit 6964|Customer#000006964|IWYQGMU6rEz4GMdjsQAKCsnQT5|13|23-185-648-1303|7140.70|HOUSEHOLD|lly bold requests use carefully carefully bold 6965|Customer#000006965|d6gerbM9AWmOdDpp|15|25-939-405-4493|1815.68|AUTOMOBILE|ular accounts. carefully close accounts doze. final requests solve furiously furiously ironic requests. special asym 6966|Customer#000006966|Q3yAE1yoj,TKafWnhyfcR4WL322ME Pv9bkNR,FW|22|32-842-357-9534|144.58|FURNITURE|theodolites use. finally special excuses hagg 6967|Customer#000006967|uMPce8nER9v3PCIcsZmNlSrCKcau6tJd4qe|13|23-816-949-8373|7865.21|MACHINERY|r pinto beans. regular multipliers detect carefully. carefully final instructions affix quickly. packages boost af 6968|Customer#000006968|Ez1Rhj5Qi2,10Nug38BsPiacwskhzpT|5|15-858-442-4792|1651.44|AUTOMOBILE|deposits are blithely unusual foxes-- even requests do are to the blit 6969|Customer#000006969|HKsXzhiJwn0oWqic7outvp6ek5|24|34-608-122-1503|727.09|HOUSEHOLD|s. even foxes wake. requests haggle slyly carefully final deposits. even f 6970|Customer#000006970|PEtGiJUdTje1Iag6unPFdev|7|17-244-333-8174|8208.18|MACHINERY| regular accounts grow carefully 6971|Customer#000006971|OMyW3Rc1F9r9ixU|15|25-797-318-1684|1906.06|MACHINERY|ts. carefully even excuses mold blithely carefully final foxes. furiously unusual accounts sleep carefully above 6972|Customer#000006972|MjqRGeXURtvVOEY5u30KbgffSKBvJ2X2OI,Hm6Tj|10|20-340-512-1672|1096.88|FURNITURE|ges wake up the fluffily pending deposits: slow deposits cajole before the slyly regular frets. regular theodolites 6973|Customer#000006973|2kXQXsfJMUjQn|21|31-948-302-3253|8232.51|MACHINERY| the regular, final asymptotes ca 6974|Customer#000006974|TVPV8QjdvgxuewOviKUj8IrMvta yJ|23|33-695-299-2927|3894.63|FURNITURE|ounts above the blithely bold requests sleep furiously across the regular instructions. quickly 6975|Customer#000006975|bSjO03DW yuKgBg2ewRK6e47ixGfAYRuFT,GR9|23|33-652-640-1772|5049.82|BUILDING|ld accounts above the furiously 6976|Customer#000006976|Oj Pipp9GFv8FLDelp82C|9|19-115-502-9616|5348.14|HOUSEHOLD|structions wake carefully. furiously even requests detect blithely. fi 6977|Customer#000006977|IAhFYl42MDQOWl FBdXT8M o|8|18-423-650-1806|3164.56|FURNITURE|usly alongside of the bold accounts. packages haggle after the blithely ironic accounts. ironic pinto beans dete 6978|Customer#000006978| TOk2qS85CJpdrRpGsszF|12|22-707-108-2535|-325.02|MACHINERY|xpress patterns engage fluffily after the deposits. blithely final theodolites nag abo 6979|Customer#000006979|K1aueKP9rhN,OLMh6NB9MF9JC|3|13-179-679-2432|2270.82|BUILDING|ously regular pinto beans wake: ironic requests wak 6980|Customer#000006980|IzbhQ7AaNNSNkTjIGPsH1bZe53WXDupcQm|11|21-352-950-4327|4710.25|HOUSEHOLD|ing instructions: furiously bold dolphins about the furiously ironic instructions haggle furi 6981|Customer#000006981|BDM83,inyQX8VjjAAY8hrSlA8uu9h6zusf|22|32-465-600-2595|7194.73|MACHINERY|le quickly express deposits. final theodolites haggle quickly permanently even pac 6982|Customer#000006982|dTif54yxBO1KSK90YnlVp,j53YYC4D|8|18-481-928-6409|7891.67|BUILDING| alongside of the furiously regular accounts. quick 6983|Customer#000006983|E0AqrX3aIw4ZcINSIWDXD,7qxDykEj5odh8|22|32-796-697-5842|6348.19|BUILDING|ackages. special instructions wake alo 6984|Customer#000006984|ESRBQnnbjKMRydlNyoXn,|2|12-423-137-1791|3020.71|AUTOMOBILE|ckages at the blithely regular ideas wake blithely amon 6985|Customer#000006985|J9qvTE,Ic0Oxv2cbFwAynNxzDW5KY96qVpOQ3q|13|23-652-721-2985|2119.77|BUILDING|efully carefully final deposits: quickly pending dependencies wake al 6986|Customer#000006986|EBY0Sg3HMtzo8CzNhzaUtjiR1bAL|23|33-582-389-4321|6959.44|AUTOMOBILE|nts sleep slyly above the regular packages. slyly unusual packages na 6987|Customer#000006987|lGJOaGFNBmiIy3AIqwf5Az0aGvrXnhfakZBOQawR|17|27-320-420-9595|2282.79|AUTOMOBILE|latelets haggle quickly slyly regular ac 6988|Customer#000006988|PuokzRJ2EY5|3|13-830-329-5880|7733.90|BUILDING|inal platelets affix furiously. blithely regular deposits promise special, unusual requests. fluf 6989|Customer#000006989|VAdDFdOq9BAyUsOP8CaFBvTG8G2K0|23|33-283-282-3404|5712.90|AUTOMOBILE|ular asymptotes use even, regu 6990|Customer#000006990|z3zQwqsS8eg61oVV4e8UgdFDt|7|17-437-544-6391|6746.93|MACHINERY|ackages. express, unusual accounts cajole furiously toward the fluffily final acc 6991|Customer#000006991|xMyl,tcSuNEP L6z5VlZXB69GuDoBz1 FeQxHnb|0|10-948-457-2049|5747.55|AUTOMOBILE|s around the slyly bold deposits wake carefully among the bravely pending asymptotes. quick 6992|Customer#000006992|HvnRKHV1kJ|23|33-830-883-1031|6070.79|MACHINERY|ithely quick foxes cajole fluffily. even accounts 6993|Customer#000006993|VgyZ4hR3o2DDrqX2p9f1ao8GLn7Y el0|11|21-996-879-2346|4754.10|FURNITURE|l pinto beans boost silent pinto beans. carefully even accounts integrate evenly. d 6994|Customer#000006994|rV05MzBGNrLz|0|10-962-702-6979|7109.05|BUILDING|ctions unwind. bold foxes cajole quickly above the regular, i 6995|Customer#000006995|h1Is0,kANMXbCDqVHJav|16|26-594-133-2656|8768.51|HOUSEHOLD|slyly ironic, even accounts. furiou 6996|Customer#000006996|FFH0V1HOhdgfDs,kJV|20|30-658-795-5594|3335.98|MACHINERY|egular asymptotes cajole quickly regular requests. unusual courts use quietly unusual, pending packages. package 6997|Customer#000006997|twNLY6rDvngJvLOEiF|18|28-278-657-2446|9728.39|BUILDING|ep furiously along the grouches. regular, slow accounts use blithely regular, bold foxes. slyly slo 6998|Customer#000006998|R4GseYn1PbPAQkah9p5Bdqfe|15|25-779-557-3473|-870.34|AUTOMOBILE|after the blithely bold deposits impress quickly idle instructions. slyly silent instructions among the bra 6999|Customer#000006999|1T9GzwkkJp,giqi KBNPEXhslG|9|19-345-535-4019|93.74|BUILDING| are above the slowly pending platelets. 7000|Customer#000007000|GabU4EArz2WOSxBImz79QitlufnV,kWbFSrQD|16|26-901-104-2112|469.82|MACHINERY|after the carefully ironic accounts boost slyly regular accounts. express, regular a citus-7.0.3/src/test/regress/data/customer.3.data000066400000000000000000004727401317107136600216610ustar00rootroot000000000000001001|Customer#000001001|KbWTzGB3ZUymu nNCIuG5eCueaqu|21|31-389-986-4741|7140.81|MACHINERY|ever. fluffily special requests are. slyly final asymptotes are carefully quickly reg 1002|Customer#000001002|98bKmyr3jZWRLEY9WBtyUWOodVd|10|20-973-622-6579|3699.76|MACHINERY|ns. deposits along the ironic, regular packages wake furiously according to the carefully even excuses. slyly qui 1003|Customer#000001003|lE07lPMzVzMhG9CUC54uPwGw3BWO|21|31-716-397-1854|7894.00|AUTOMOBILE|quests sublate blithely blithely special dependencies. excuses use busily express pinto bea 1004|Customer#000001004|mBaNGEJoY2tgXD60V2DEO ajjoM3Zd,Jp|8|18-676-152-4849|1512.46|MACHINERY|ainst the ideas nag fluffily according to 1005|Customer#000001005|cTWPLcTvotjgrrcN3j|13|23-149-373-9093|7790.94|AUTOMOBILE|. furiously ironic accounts affix careful 1006|Customer#000001006|Q46palcsa4KwAMhPS|12|22-364-780-5932|7447.99|BUILDING|equests. regular pinto beans sleep furiously express, ironic accounts. special, 1007|Customer#000001007|PfH0lw8GzD7o|9|19-790-843-5283|7347.90|MACHINERY| theodolites. ironic requests wake. thinly silent 1008|Customer#000001008|AfP6tFNz1Eu4buoUd,HrZAld340 xz2wbQ2|2|12-115-571-7897|8191.74|HOUSEHOLD|press orbits affix furiously pending packages. courts alongsi 1009|Customer#000001009|cWONXs2Vx30bkgYoCkx7LrJH,E|12|22-132-906-1117|594.50|BUILDING|ng to the stealthy, final courts cajole carefully alongside of the gifts? regular ideas above the furiously express 1010|Customer#000001010|uasIK CZZ5|5|15-221-463-3776|1652.78|AUTOMOBILE|ing ideas doze carefully accounts. slyly regular theodolites poach. carefully pending de 1011|Customer#000001011|6m8KP FxT4nnHgoc4CN70TVLW1X5Q|5|15-736-809-3168|1188.94|BUILDING|uriously express asymptotes. u 1012|Customer#000001012|5Zsp rqM6oCmgqqFe|4|14-535-551-6255|4422.45|AUTOMOBILE|s above the carefully express r 1013|Customer#000001013|k5rfeOtchP1 w|15|25-725-599-1183|-951.53|BUILDING|idle packages cajole regular asymptotes. carefully express forg 1014|Customer#000001014|ZsiaboMOOV,aGwWUpfE|11|21-553-425-9152|-392.84|AUTOMOBILE|telets. ironic platelets cajole carefully; bold, special instructions unwind blithely regular somas. carefully 1015|Customer#000001015|RDJWEmcAk4GC8OT8WCsXB|10|20-134-926-5391|6392.00|HOUSEHOLD|g courts nag daringly brave pains. blithely special deposits use blithely carefully c 1016|Customer#000001016|8tzkhPXMFHKgmz|11|21-683-368-2994|2357.54|AUTOMOBILE|ular deposits. special foxes solve quickly idly special ideas. never final asymptotes nag. furiously even deposi 1017|Customer#000001017|OoVPZGR5hUp8oo|16|26-593-941-5690|-913.70|MACHINERY|integrate furiously furiously even pinto beans. ironically pending packages cajole quickly furiously special 1018|Customer#000001018|yldxLZOgQwzrXh3t4yktykZZV8v,vK2c6pVr|18|28-450-764-4871|8341.71|FURNITURE|e quickly around the quick, regular pinto beans. regular, regul 1019|Customer#000001019|VMFs38VlBt01g30PzPyliiAoGHazC4HG74JJ|21|31-502-683-3413|2114.53|FURNITURE|quickly special ideas about the courts use of the pending instructions. furiously final accounts c 1020|Customer#000001020|DHom,LSHKfYSwLSZv39AooYQHlvbaeztefjwR|3|13-692-286-8158|6914.87|MACHINERY|es. blithely unusual asymptotes sleep ironic accounts. fluffily express sheaves haggle fl 1021|Customer#000001021|m h2wQbujQnQOrcf109reW0 o|6|16-469-554-5196|1286.76|MACHINERY|courts could hang quickly express epitaphs. foxes haggle above the carefully unusual requ 1022|Customer#000001022|lP,9H6e6mQwLsWYYr2Y|8|18-733-553-2195|9605.83|AUTOMOBILE|ending packages. dependencies along the slyly pending dependencies wake carefu 1023|Customer#000001023|w8 oxHcOTUiF8dOr,ktZ05pO7qcHZ8ZeH7|17|27-960-306-5136|7188.35|AUTOMOBILE| express requests. slyly ironic asymptotes throughout the ironic, final packages nag careful 1024|Customer#000001024|9wLrRS78uOPy7CHW|11|21-508-779-7822|-425.09|FURNITURE| carefully regular instructions. furiously final deposits across the carefully special ideas cajole furi 1025|Customer#000001025|3T2A1uo8mCqTeO LTW8atjLBLO12nh6lyl|8|18-588-456-4616|3363.46|AUTOMOBILE|to beans sleep according to the fluffily regular instructions. 1026|Customer#000001026|ktKcS9tV2OC8T42KVqMem NjkNO 4pkXmu|17|27-169-221-8173|9699.28|MACHINERY|totes against the stealthy deposits haggle fluffily after the regular, regular deposi 1027|Customer#000001027|GNaw4RXXMr|2|12-278-154-5262|4946.21|BUILDING| final requests haggle. final, even sheaves maintain carefully above the even ac 1028|Customer#000001028|NxmOhIN,w45aogQ1hZSvqoz0 8nrbdkaiZOe|10|20-582-119-3249|1915.53|AUTOMOBILE|ly unusual, even packages. fluffily special foxes across the furiously final asymptotes main 1029|Customer#000001029|D3TLK5s,gc|15|25-602-810-8723|6252.18|BUILDING|arefully furiously final pinto beans. daringly express deposits 1030|Customer#000001030|Xpt1BiB5h9o|8|18-759-877-1870|6359.27|HOUSEHOLD|ding to the slyly unusual accounts. even requests among the evenly 1031|Customer#000001031|dwCYOftUgV5,EwGJc|21|31-946-641-1853|2226.80|AUTOMOBILE| theodolites. even theodolites sleep slyly. special, express excuses cajole among th 1032|Customer#000001032|6yoIzDrw5zLBO|18|28-449-227-3528|1853.64|FURNITURE| sleep quickly even somas. permanently regular grouches cajole blithely furiously ironic ide 1033|Customer#000001033|WOozPuOF8UdYMwjF5|8|18-470-380-2978|81.06|BUILDING| to the quickly final packages. carefully slow accounts use blithely slowly permanent requests. un 1034|Customer#000001034|Fn5qqb64TSKuJWz4f8GpPkF,c3WY3yqjsV,GgHu|5|15-370-179-6631|7349.82|HOUSEHOLD|bout the ironic requests-- packages wake. requests haggle silent, 1035|Customer#000001035|7yTbQ665G3Bi,6BK0EmQPw,Gc7bZOPk4ncXpo|10|20-376-345-3729|7499.36|AUTOMOBILE|ound the fluffily enticing foxes detect slyly at the furiously final deposits. quickly final instru 1036|Customer#000001036|fxujgj8DOFO6oKrH|8|18-791-577-7691|1766.23|BUILDING|ins sleep. slyly express platelets cajol 1037|Customer#000001037|dwgDZPKR5ZuU3HO2sDOS7Ym0oeC8c6Xm|23|33-855-960-2989|4936.25|BUILDING|lly final pinto beans. pending instructions boost careful 1038|Customer#000001038|yQCza56pNgcF9sxDR HCed22GeEq|17|27-511-101-1611|-509.92|AUTOMOBILE|uriously express accounts. even pinto beans wake. slyly regular requests according t 1039|Customer#000001039|F602TgKjElSWrZ|10|20-871-886-9220|7618.54|FURNITURE|nding packages use. blithely regular sheaves doze blithely. fluffi 1040|Customer#000001040|vbJmdHe6U9Pl|11|21-756-109-1482|2860.71|BUILDING|old requests wake slyly! slyly special deposits cajole above the unusua 1041|Customer#000001041|189f n2lA4|11|21-314-290-3052|7993.98|MACHINERY|uctions are busily along the furiously ironic instructions. blithely thin waters cajole slyly. 1042|Customer#000001042|S1sh9gyFn21m4zkb4J95GD5|5|15-215-652-3459|9849.87|FURNITURE|ly bold dugouts! pending asymptotes are blithel 1043|Customer#000001043|HJMn12xn4bl vWC7iVuTRsErYEzlyCO|7|17-266-334-8613|5847.76|HOUSEHOLD|iously ironic deposits cajole slyly busily final account 1044|Customer#000001044|Eh2e8gLyStrLE7A|0|10-451-459-9620|7291.30|BUILDING|ly across the slyly ironic accounts. even requests 1045|Customer#000001045|clvGUnQPLbzX 23hemPp24WS1MEtS4z|20|30-120-992-2121|2942.19|HOUSEHOLD|r deposits cajole blithely along the quickly silent pattern 1046|Customer#000001046|umgqzlyUW3AYz2C39YMhIgf|10|20-890-161-8958|2311.00|BUILDING| accounts. carefully regular theodolites run fluffily carefully e 1047|Customer#000001047|h5iBRMsym,y6LLSQU2DzNftiET qZ|9|19-146-399-4251|8918.99|BUILDING|lar packages haggle theodolites. thinly express deposits 1048|Customer#000001048|Mk0ebiw9SaFBTwoib|19|29-757-642-3735|2583.91|BUILDING|s are slyly regular foxes. slyly final pinto beans wake quickly among the regular, spe 1049|Customer#000001049|bZ1OcFhHaIZ5gMiH|9|19-499-258-2851|8747.99|MACHINERY|uriously according to the furiously silent packages 1050|Customer#000001050|KgVnjN7Y4HCN5f97HEUp7kYNNTrE3 O|11|21-448-313-4374|-517.65|FURNITURE|ely ironic packages. blithely regular foxes sublate furiously. special requests boost furiously agains 1051|Customer#000001051|iHS,UFudVOOe|2|12-869-221-1428|9776.39|HOUSEHOLD|cuses boost furiously silent deposits. quickly silent requests integrate quickly bold asymptotes; slyly regular ide 1052|Customer#000001052|OcXtKS,1Hvf2D0 rPvhw4qXViYOudQ3|13|23-496-475-9040|2837.96|BUILDING|s accounts haggle against the furiously final asymptotes. ironically regular accounts boost. furiously fina 1053|Customer#000001053|wDJTteyausmZswQAFQot|16|26-400-312-6496|-473.85|MACHINERY|efully enticing pinto beans. final pack 1054|Customer#000001054|Xgj6QVy2I9FVoSiIbgLf9LIE8XpWI2RtmbGUx|21|31-915-292-9727|8844.27|HOUSEHOLD|y pending ideas. dogged dependenci 1055|Customer#000001055|Z3AggyEMPME2hqqTfbMC76O0z|7|17-802-131-7180|639.93|HOUSEHOLD|dolphins: furiously ironic pinto beans above the carefully regular foxes nag slyly across th 1056|Customer#000001056|8u1rnDOcvU109|5|15-325-285-5215|6287.12|HOUSEHOLD|leep except the foxes. packages eat enticingly along the requests. even 1057|Customer#000001057|xyV8 FbW4xS,JhkxC0dY527tzcMKxM|24|34-750-735-1314|-377.11|AUTOMOBILE|s. furiously ironic deposits against the carefully bold accounts wake carefully even deposits. fluffily even 1058|Customer#000001058|R0NIEcSVDQ4rNUcCevDrap|19|29-818-620-9637|6807.55|MACHINERY|uctions. slyly express pinto beans are furiously. bold theodolites according to the fur 1059|Customer#000001059|OHwYMiDjmgeIQXhLlNW,8LIwIEr|23|33-683-418-9460|1547.50|HOUSEHOLD| wake carefully. carefully quick excuses cajole ruthlessly among the ideas. bold, ironic braids are 1060|Customer#000001060|aWJkU6JJJOvgaKPOAJJc|8|18-290-794-6133|2840.59|HOUSEHOLD|ter the bold, regular ideas! deposits eat. daringly unusual theodolites sleep alongside of the regular, fina 1061|Customer#000001061|CqLhg io1CpQKhrVHHDhWg1Omrx1hLcpKB6h|4|14-909-417-8324|-258.77|BUILDING|ticing packages maintain doggedly carefully regular instructio 1062|Customer#000001062|3OYrGEJC1YUa9DP|22|32-207-600-8684|4709.92|HOUSEHOLD|ag. carefully regular asymptotes a 1063|Customer#000001063|yHVWD7y1Oe1P|21|31-277-349-9036|1663.28|MACHINERY|ress attainments. furiously regular excu 1064|Customer#000001064|VmFhpV9 aIqPysMHRIWZl|15|25-391-998-4106|1666.07|MACHINERY|gular accounts. thin platelets promise fluffily. carefully express accounts haggle quickly qu 1065|Customer#000001065|qGBa7X0dOMsKLuYBpShpJVwGyU9rh|22|32-605-226-2449|4663.41|HOUSEHOLD|nts. quickly quick dolphins run 1066|Customer#000001066|2Ge 0Nk29FlBs1GuBiY84sLvn38mEkAKnM|0|10-333-463-4472|949.68|MACHINERY|requests. slyly final instructions sleep. fluffily even packages cajole pending, final 1067|Customer#000001067|g25CH,fhra|23|33-764-123-9568|9153.84|FURNITURE|ackages are furiously carefully even dependencies. idly final 1068|Customer#000001068|ElWdGnnKpmo0sA1Au teWwomSVgG,me|18|28-485-984-7299|737.40|AUTOMOBILE|nto beans. dinos sleep fluffily carefully regular sentiments. final, special packages wake blithely. ironic 1069|Customer#000001069|PdWrPGSArhnqWQ km65e|21|31-927-711-6278|5465.29|BUILDING|. ironic excuses after the special hockey pl 1070|Customer#000001070|m0sYmeYs5wLydSS qw542Et32|15|25-894-843-9171|3160.23|FURNITURE| carefully across the express foxes. carefully special accounts ca 1071|Customer#000001071|PgCAYL2LEwE7v7Pk4dYpRe Nn7MN8wVzYbA2qtj|16|26-350-231-6183|4033.56|MACHINERY|ar accounts. quickly regular packages sleep bold ideas. slyly pending asymptotes a 1072|Customer#000001072|HpCr1tM88WoELSld708ByJ|4|14-432-882-6163|7979.48|HOUSEHOLD|ic pinto beans are blithely across the grouches. furiously pending platelets sleep pe 1073|Customer#000001073|KEyFI2gYMZrSVbMMMIf|10|20-774-197-6595|8217.23|BUILDING|fter the blithely regular pinto beans. express asymptotes sleep special pa 1074|Customer#000001074|nG,eR,gjPr|10|20-176-839-1649|122.67|BUILDING|ly final courts haggle quickly boldly express excuses. dependencies eat. slyly even requests boost blithely 1075|Customer#000001075|hTIc2AUg pqhYh2W0yMUTQtrZV1KUutysIb6,nxb|21|31-724-234-4181|2714.50|BUILDING|regular patterns. unusual platelets try to are unusual theod 1076|Customer#000001076|C1gf0FyiU H88P0cpv4UOcdgaPRpVA|10|20-405-710-1902|3509.35|AUTOMOBILE|ainst the silent, silent ideas. sly theodolites use carefully express accounts. regular foxes boost carefully agai 1077|Customer#000001077|sjk1DTHWVMX53kG8AbTtTh1EcMvWeDO8gFDdpQOK|21|31-367-294-4048|8581.78|AUTOMOBILE|lithely regular deposits. carefully pending deposits sleep. quickl 1078|Customer#000001078|ZjRzAz8QbEeIkJxrUI,b|19|29-729-692-6790|70.93|MACHINERY|onic requests wake slyly furiously final attainments. ironic, even accounts cajo 1079|Customer#000001079|cOyd7wsHIQq2LNN|19|29-699-930-2250|2135.91|AUTOMOBILE|ter the express foxes nod slyly excuses. slyly speci 1080|Customer#000001080|Yux,gs14NpneiZEy9Rz|12|22-806-885-5347|3267.19|MACHINERY|ng dolphins cajole across the carefu 1081|Customer#000001081|eGGRjZex7YANvD1jfnPMcBK2JbM|12|22-866-942-1021|8647.42|MACHINERY|pendencies haggle after the quickly special instructions. furio 1082|Customer#000001082|vMX52A1zqDbGNzjfSzgsxSVU0GU6iFmrgiUE|5|15-646-384-2302|3247.92|BUILDING|ly. blithely final packages wake silent ideas. express, special theodo 1083|Customer#000001083|tnrpYmWGxwyaFmJy2Oq0Z|7|17-159-499-3318|3847.29|FURNITURE|luffily. slyly unusual accounts cajole furiously against the ironic asymptotes. slyly reg 1084|Customer#000001084|E091r836A8TPqn5|2|12-378-899-7136|1416.75|FURNITURE|nstructions. fluffily pending pinto beans affix slyly; carefully pending requ 1085|Customer#000001085|pZgtHRGIkUVwiEJLWZXs3KUNi6wLnQzJU1|21|31-831-702-3157|5275.88|BUILDING|he carefully regular courts use special excuses. ironic deposits along the blithely even sauternes nag slyly 1086|Customer#000001086|ECMZrONto2nI2TBv,k|8|18-399-482-6815|9726.83|FURNITURE|ymptotes cajole enticingly furiously silent ideas. furiously pending packages are al 1087|Customer#000001087|ETOH68urIxK839xmKEmfkjc|21|31-334-391-6403|5878.21|MACHINERY|s haggle above the slyly express requests. quickly regular packages after the quickly silent accoun 1088|Customer#000001088|YjXQtOJoM0nhClEy0,WFdNxvJ1g6xpn kL2ommEv|22|32-324-225-2635|2098.62|BUILDING|ly special ideas. slyly unusual requests haggle 1089|Customer#000001089|OO77 pLjaOe7bam1WnH9gtcZNCUlUPI|18|28-164-765-7462|3429.95|BUILDING|o beans affix carefully regular accounts. quickly even ideas sleep. pinto beans haggle fluffy courts. slyly regu 1090|Customer#000001090|P2JDHFVxjU|15|25-711-934-6343|5212.43|AUTOMOBILE|ffily even packages wake quickly 1091|Customer#000001091|4ye7wJ3gU92RZCpwTtDi8Ws,|17|27-336-955-4882|-710.53|BUILDING|y. carefully ironic excuses sleep quickly fluffily even requests. fi 1092|Customer#000001092|,oAq2L60hjb8|15|25-766-175-4580|2004.15|HOUSEHOLD|carefully silent somas can wake carefully aft 1093|Customer#000001093|LO,9qCPIjSXriBqQsAOXLrQKedQ8UO6gb|24|34-931-911-6156|-273.96|HOUSEHOLD|p furiously carefully bold packages. regular escapades breach. blithely unusual ideas integrate across t 1094|Customer#000001094|OFz0eedTmPmXk2 3XM9v9Mcp13NVC0PK|2|12-234-721-9871|2544.49|MACHINERY|tes serve blithely quickly pending foxes. express, quick accounts 1095|Customer#000001095|JtyQvLlCI ZPYQ6ygv,5q|9|19-881-259-2391|6221.26|MACHINERY|foxes. ironic, daring requests sleep regularly across the blithely 1096|Customer#000001096|ldbo6AfnCRjFW8rZnvG6UxbX6o7ISAJRDD7|4|14-368-827-9896|3687.37|FURNITURE|lyly even asymptotes cajole furiously. regular, ironic theodolite 1097|Customer#000001097|a wMc0lQutcHs6cRomoMCGjvM0MwEk4uyrxKI3|6|16-604-758-5574|8651.87|MACHINERY|p carefully. carefully special excuses haggle carefully about th 1098|Customer#000001098|XVJb1HxQeLu9x|22|32-206-732-5183|1009.22|FURNITURE|evenly unusual deposits. slyly even ideas according 1099|Customer#000001099|2ZiU64au LN0 GUxY8|1|11-128-186-5241|8990.07|AUTOMOBILE|ckages: blithely ironic theodolites cajole furiously. f 1100|Customer#000001100|PGXj,,vjAfMNLzd|12|22-880-206-7392|9189.75|BUILDING|ideas. furiously final sheaves integrate. pinto beans haggle slyly according to the furiously ironi 1101|Customer#000001101|h,UOEyoi1ZG4|3|13-528-469-6051|-842.72|MACHINERY|o beans; quickly express accounts slee 1102|Customer#000001102|F9fxZhJJhaR0P4Rgd7SE2PA58x|24|34-103-353-4822|2369.01|HOUSEHOLD|elets. regular requests sleep quickly. express ideas haggle. bold, regular ideas haggle. quickly regular accoun 1103|Customer#000001103|kbYrf d uR|16|26-307-423-8860|4878.10|AUTOMOBILE|n accounts cajole across the even pinto beans. quickly express pat 1104|Customer#000001104|,t,d8FlnmiECPa|8|18-644-507-8095|1230.47|AUTOMOBILE|ages haggle. slyly ironic foxes are idly among the furiously final pearls. slyly unusual reques 1105|Customer#000001105|cZhhOUzv6,Vbaa2bFT|22|32-885-298-6750|9491.46|FURNITURE|y final packages. furiously ironic packages was. fluffily ironic instructions integrate 1106|Customer#000001106|WZEExIU9g2smcowcinj|21|31-214-739-2409|9977.62|HOUSEHOLD|requests nag. fluffily regular packages haggle q 1107|Customer#000001107|yQBP1SLK2uzN4dzgaQ|1|11-720-869-9052|7961.62|AUTOMOBILE| along the final deposits. carefully express ideas wake? quickly regular instructions are furious 1108|Customer#000001108|9sPt6a66R0eCRVYh9QrF8zjxNWFFk8KU|7|17-408-450-8891|4997.35|BUILDING|rding to the final instructions. carefully final accounts wake along the carefully careful pinto beans. re 1109|Customer#000001109|BJCfTYEV9eCDraeyO3v|22|32-194-697-1794|3387.22|FURNITURE|r accounts. bold, final pinto beans wake carefully even Tiresias. quickly busy frays above the blithely ironic de 1110|Customer#000001110|BRnTy8RZ,1oHOg9ly8SsJLIyiuvhv|10|20-777-225-9349|2041.65|HOUSEHOLD|usual platelets along the quickly regul 1111|Customer#000001111|gavpg6eW5lEML|6|16-824-312-3537|2892.21|MACHINERY|s are slyly quiet requests. darin 1112|Customer#000001112|wFf 0nSvdJyk2GqRsqJrcr9 UPr0C3OT5zT|20|30-401-424-6458|9314.59|MACHINERY|fily quickly unusual theodolite 1113|Customer#000001113|jLtKZ0bRJyYL1k|12|22-412-216-1933|7392.30|HOUSEHOLD|ages among the furiously pending packages detect across the blithely unusual accounts. furiously ironic requests sh 1114|Customer#000001114|f7 he8eByBFy6z7vcOajC1gaUKqmRN|14|24-630-988-3843|6446.83|BUILDING|ularly ironic platelets. pinto beans along the slyly express packages wake unusual packa 1115|Customer#000001115|Elvb2a3FinAzxw |5|15-356-145-6356|-178.52|BUILDING|ending instructions thrash blithel 1116|Customer#000001116|aWuLgbu,8HZMbkI|14|24-116-214-4051|592.60|AUTOMOBILE|tes-- final, regular excuses sleep. sly 1117|Customer#000001117|80NfzBRWj5tUUaRdnsFE7Eg|23|33-461-439-5684|2829.07|FURNITURE| ironic deposits need to haggle furiously. furiously bold deposits use among the carefully ironic 1118|Customer#000001118|QHg,DNvEVXaYoCdrywazjAJ|11|21-583-715-8627|4130.18|HOUSEHOLD|y regular requests above the blithely ironic accounts use slyly bold packages: regular pinto beans eat carefully spe 1119|Customer#000001119|ER5vABifV766q5f0FN7l2eN7MIg2lO|20|30-789-716-6850|3971.65|AUTOMOBILE| pinto beans maintain slyly even instructions. regular acc 1120|Customer#000001120|UAG90slCmJS7JOP AhlV12tYD3yUiyB1p2hxZ|2|12-938-579-7156|1543.64|AUTOMOBILE|r theodolites boost. slyly final pinto beans sleep blithely unusual accounts. fluffily even multipliers 1121|Customer#000001121|o2uc3AHYz,m 3vYg8YxBwI0XuG|20|30-197-936-4724|3942.11|MACHINERY|usly? final theodolites are carefully 1122|Customer#000001122|9lxNEW0Rei4DFaT4vX,T551AwBzrZoOXsRNOm|0|10-257-957-3327|45.21|BUILDING|egular, regular instructions are slyly regular requests. deposits despite the regular, pendi 1123|Customer#000001123|pO80QGjK7S0Kmoh46dViD K4OSEVDyiJ53CN|16|26-983-192-5480|9786.36|MACHINERY|s carefully ironic packages. accounts boost boldly fluffily even gifts. slyly final fo 1124|Customer#000001124|EQNw9dNy63,|1|11-709-582-2006|5512.73|BUILDING|ctions wake. packages haggle furiously. express 1125|Customer#000001125|DrHkeaX6wshtuZOI2nLrME|3|13-807-542-3923|8427.55|MACHINERY|counts according to the carefully silent grouches haggl 1126|Customer#000001126|8J bzLWboPqySAWPgHrl4IK4roBvb|8|18-898-994-6389|3905.97|AUTOMOBILE|se carefully asymptotes. unusual accounts use slyly deposits; slyly regular pi 1127|Customer#000001127|nq1w3VhKie4I3ZquEIZuz1 5CWn|10|20-830-875-6204|8631.35|AUTOMOBILE|endencies. express instructions wake about th 1128|Customer#000001128|72XUL0qb4,NLmfyrtzyJlR0eP|0|10-392-200-8982|8123.99|BUILDING|odolites according to the regular courts detect quickly furiously pending foxes? unusual theodolites use p 1129|Customer#000001129|OMEqYv,hhyBAObDjIkoPL03BvuSRw02AuDPVoe|8|18-313-585-9420|6020.02|HOUSEHOLD|pades affix realms. pending courts haggle slowly fluffily final requests. quickly silent deposits are. iro 1130|Customer#000001130|60zzrBpFXjvHzyv0WObH3h8LhYbOaRID58e|22|32-503-721-8203|9519.36|HOUSEHOLD|s requests nag silently carefully special warhorses. special accounts hinder slyly. fluffily enticing 1131|Customer#000001131|KVAvB1lwuN qHWDDPNckenmRGULDFduxYRSBXv|20|30-644-540-9044|6019.10|MACHINERY|er the carefully dogged courts m 1132|Customer#000001132|6dcMOh60XVGcGYyEP|22|32-953-419-6880|4962.12|AUTOMOBILE|ges. final, special requests nag carefully carefully bold deposits. ironic requests boost slyly through th 1133|Customer#000001133|FfA0o cMP02Ylzxtmbq8DCOq|14|24-858-762-2348|5335.36|MACHINERY|g to the pending, ironic pinto beans. furiously blithe packages are fina 1134|Customer#000001134|79TYt94ty a|9|19-832-924-7391|8458.26|HOUSEHOLD|riously across the bold instructions. quickly 1135|Customer#000001135|cONv9cxslXOefPzhUQbGnMeRNKL1x,m2zlVOj|11|21-517-852-3282|3061.78|FURNITURE|regular frays about the bold, regular requests use quickly even pin 1136|Customer#000001136|GHCEiSK0TKsOncuJT3,2zSvlZW4Pz|24|34-440-798-1100|-723.49|FURNITURE|ular pinto beans. slyly special deposits according to the slyly ironic requests maintain quickly 1137|Customer#000001137|LJ3J3i0BlPLrhKi6VabXxNrtpLAGH|16|26-598-565-1269|4210.15|AUTOMOBILE|usly quickly unusual attainments. stealthily unusual requests cajole ironic reques 1138|Customer#000001138|8 9P,dIGWnrrDiVs0S|22|32-236-817-2959|6035.44|BUILDING| instructions cajole thinly ironic requests. regular packages affix. ironic, final pinto beans ac 1139|Customer#000001139|UDGG69rYgUGayNk 9vFytd5q3nZdeRZQNSfL6|22|32-182-662-9475|4604.83|BUILDING|y pending pinto beans haggle blit 1140|Customer#000001140|leG5nToZpjmWNeaOsVv|20|30-331-754-7359|6319.21|AUTOMOBILE| pinto beans. blithely regular packages sleep carefully blithely ironic requests. requests eat blithely aga 1141|Customer#000001141|A6uzuXpgRPp19ek8K8zd5O|22|32-330-618-9020|0.97|MACHINERY| accounts. furiously pending deposits cajole. c 1142|Customer#000001142|b7ytiiX7E9|16|26-191-682-8920|3273.08|AUTOMOBILE|doze slyly. furiously pending deposits cajole fluffily carefully pending packages. boldly regular 1143|Customer#000001143|9tfTGdYHyZXtXbbeboPIXwCT|4|14-568-471-9747|8655.98|AUTOMOBILE|e carefully final packages integrate against the furiously express platelets. ironic ideas wake above the e 1144|Customer#000001144|DGLUWG9evYLNbYhOXVzqZ LdfIMVfBjDf|1|11-336-453-4489|4189.04|BUILDING| ideas. even, regular excuses after the ironic requests cajole blithe 1145|Customer#000001145|6R rPD6SDQPpFuYxxwh,Dv1PeusmP,C6cNcI|2|12-270-756-2968|3249.25|HOUSEHOLD|e. asymptotes sleep fluffily quiet requests. even theodolites among the fluffily regular pinto 1146|Customer#000001146|DRBYvF0iBGsDC3iPNFsPHq3FkU,jCK8LJPX4W|12|22-720-237-1751|4204.36|FURNITURE|final, pending asymptotes. regular requests was 1147|Customer#000001147|AVjlczwVwL CT jO3sgWn|15|25-754-809-7107|7734.64|HOUSEHOLD|eposits. quickly express accounts are idly. slyly final platelets wak 1148|Customer#000001148|7PslyqtS1K2Pabjht 4qgaZ1BbSNFfz6QiK4K|19|29-393-445-2761|7129.84|AUTOMOBILE|c, even deposits. accounts do use. regular accounts haggle blithely special, express courts. blithely 1149|Customer#000001149|5JOAwCy8MD70TUZJDyxgEBMe|3|13-254-242-3889|6287.79|MACHINERY|ress requests haggle carefully across the fluffily regula 1150|Customer#000001150|fUJqzdkQg1|21|31-236-665-8430|-117.31|MACHINERY|usly final dolphins. fluffily bold platelets sleep. slyly unusual attainments lo 1151|Customer#000001151|ratQBQ4rYv TfhWfHe|7|17-948-135-2667|6354.89|BUILDING|l requests. furiously bold orbits after the furiously ironic excuses sleep 1152|Customer#000001152|QRmFl9ZkoBDQ7|12|22-471-341-5516|5680.15|HOUSEHOLD|oost along the quiet, bold foxes. ironic dinos nag fluffily final pinto beans. blithely regular deposit 1153|Customer#000001153|SYG3KMj1fMh7GwvIZ,pY7mGLR1NT6EmNjE|3|13-319-420-5160|6244.03|HOUSEHOLD|s. even packages use fluffily always express packages. regular, even asymptotes about the furiou 1154|Customer#000001154|7RqtNwcSPbaUKaC|19|29-797-132-6916|1498.46|BUILDING|thely. furiously regular accounts above the ironic platelets wake slyly blithely bold pint 1155|Customer#000001155|kEDBn1IQWyHyYjgGGs6FiXfm3|8|18-864-953-3058|3510.25|MACHINERY|ages? fluffily even accounts shall have to boost furiously alongside of the furiously pendin 1156|Customer#000001156|3ShFbt9dTbLOG4lUBvc1AZp0Tam0BNjYS qwTZ|14|24-637-724-1410|1799.67|HOUSEHOLD|ns. carefully regular foxes are quickly. furiously careful accounts accord 1157|Customer#000001157|3rchTZwilGpffMz1MfpnkFfWBtOIdgmvvS1E7sJj|20|30-741-794-9826|6013.09|HOUSEHOLD|equests. deposits cajole quickly slyly spe 1158|Customer#000001158|btAl2dQdvNV9cEzTwVRloTb08sLYKDopV2cK,p|10|20-487-747-8857|3081.79|MACHINERY| theodolites use stealthy asymptotes. frets integrate even instructions. car 1159|Customer#000001159|IAnWq4YFKs7|2|12-269-807-3861|5553.75|HOUSEHOLD|ages sleep fluffily. packages after the carefully express packages nag slyl 1160|Customer#000001160|v65g1aRCGA76ZHySoOBffL31n4vJ0nm,tK,UEA|24|34-103-942-4634|4976.24|AUTOMOBILE| pending, special packages against the blithely unusual packages eat quic 1161|Customer#000001161| QD7s2P6QpCC6g9t2aVzKg7y|19|29-213-663-3342|591.31|MACHINERY|ly alongside of the quickly blithe ideas. quickly ironic accounts haggle regul 1162|Customer#000001162|b5N12h9D6yJemoVx6OQf0uL|2|12-887-115-9986|3139.71|AUTOMOBILE|refully furious packages. furiously ironic ideas against the carefull 1163|Customer#000001163|54fBdElRYOjEH8S|2|12-204-803-1483|90.22|BUILDING|inments. carefully regular instructions haggle carefully slow packages. slyly even packages kindle blithely special 1164|Customer#000001164|XWfNRnO2S5KAW0VodNwaBDixCEtv nKzt2LVFiwm|0|10-828-178-5049|7341.35|HOUSEHOLD| ideas use. unusual packages sleep 1165|Customer#000001165|h7KTXGSqsn0|9|19-766-409-6769|8177.33|MACHINERY|jole slyly beside the quickly final accounts. silent, even requests are stealthily ironic, re 1166|Customer#000001166|W4FAGNPKcJFebzldtNp8SehhH3|17|27-869-223-7506|507.26|MACHINERY| before the platelets! carefully bold ideas lose carefully 1167|Customer#000001167|gNYGOcGkJu3ooSmsCh|19|29-721-479-1548|9510.87|FURNITURE|lly regular ideas grow furiously regular accounts. regular, special requests sleep blithely. slyly bold pla 1168|Customer#000001168|gmAnpwPPg0LX4|17|27-608-883-2632|6194.65|FURNITURE|ses run according to the regular instructions. slyly regular foxes around the furiously ironic theodolites use fl 1169|Customer#000001169|04YQNIYyRRFxUnJsTP36da|4|14-975-169-9356|7503.30|MACHINERY|into beans doubt about the slyly ironic multipliers. carefully regular requests breach theodolites. special packages 1170|Customer#000001170|BNhssVcV36vshEHUAc aPFJ|8|18-670-628-8499|2070.73|HOUSEHOLD|ronic instructions. express pinto beans poach blithely. quickly ironic accounts n 1171|Customer#000001171|GatOC LsLU9MkgyaNMYH|8|18-457-394-2863|7658.97|HOUSEHOLD|c dolphins. accounts are slyl 1172|Customer#000001172|r dreL8m8cRaiIz IqZ83oMo,AVxe2PbsHQzK|14|24-249-588-1969|420.97|FURNITURE|express asymptotes haggle furiously. fluffily special deposits haggle quietly even, special tithes. ironic foxes alo 1173|Customer#000001173|6Abj72jR5Z0GYQMZKBmiQxW|18|28-409-365-6392|182.59|FURNITURE| ironic accounts above the ironic excuses haggle fluffily furiously regular packages-- slyly regular gi 1174|Customer#000001174| b9zecNS,J97qi7Qk5|4|14-131-930-7154|8798.96|AUTOMOBILE| ironic packages. furiously regular excuses sleep about the fluffily unusual pinto beans? regular foxes kind 1175|Customer#000001175|olj7nLYgBZ526MNBg9CV7w LYo6F1uD,Hm54|4|14-756-259-6379|9207.16|FURNITURE| haggle pending requests. carefully regular ideas nag. ruthlessly final packages a 1176|Customer#000001176|V0xc0tXNMmObuUJ0rGARp6YIw4fo84CD|10|20-141-903-5936|5827.59|BUILDING|ven accounts boost after the accounts. slyly silent accounts use fluffily amon 1177|Customer#000001177|hZPNQ8a9QRM ,SYdTdoW9hw|14|24-200-701-8606|9281.72|FURNITURE|ng the quickly bold theodolites cajole carefully around the deposits. furi 1178|Customer#000001178|W,03Nl2iWQ94xYyCo3R8CTMNFhu|9|19-717-739-3103|4966.58|HOUSEHOLD| even requests cajole furiously after the quickly ironic accounts. even re 1179|Customer#000001179|JLtS3n1xDqnNFS5MZc5uZHOjDctAJEl|19|29-311-833-9211|3336.25|AUTOMOBILE|ress, special accounts sleep slyly about the carefully express packages. f 1180|Customer#000001180|jI4QtviiCs0,LOgUPH4aONMnoNt|13|23-188-767-6645|3367.36|BUILDING|uests poach carefully carefully final deposits: ironic, regular deposits are slyly busy excuses. regular 1181|Customer#000001181|ZFFYipzTg0vSjOhPbcBUgPK9se|10|20-330-305-8843|9180.50|AUTOMOBILE|gle about the busily special theodolites. furiously ironic deposits haggle beside the furiously special accounts. 1182|Customer#000001182|pLrF7F1,uoyGaU|6|16-229-473-7194|8814.39|AUTOMOBILE|jole carefully. furiously final pinto beans detect. f 1183|Customer#000001183|qdIqRUfpmvtWo0NGsyi4qyjkwzlImP9,NrSC|1|11-968-244-9275|4455.76|BUILDING|arefully regular dependencies. quick 1184|Customer#000001184|M0dd3R30k0YjIr4GVeo|11|21-661-875-1923|9032.89|BUILDING| excuses nag carefully even accounts. unusual platelets detect carefully bold acco 1185|Customer#000001185|z,dN83fETWpkJkoR|14|24-860-751-6688|2913.52|BUILDING|ndencies against the carefully even accounts cajole carefully quickly regular packages. even fox 1186|Customer#000001186|cj5EeLbJJ6MPdynzposq,Apbj9 2Jm|23|33-500-965-3385|4466.30|BUILDING|ding realms cajole after the even foxes 1187|Customer#000001187|W1GdUKr3vQMVAZIU|10|20-543-260-5157|-932.96|AUTOMOBILE| blithely unusual theodolites detect doggedly. bold dolphins was blithely. pinto beans use carefully at the furiou 1188|Customer#000001188|PtwoF3jNQ9r6 GbPIelt GvbNBuDH|15|25-108-989-8154|3698.86|MACHINERY|ts. quickly unusual ideas affix aft 1189|Customer#000001189|rbnZCbJSTE3qWLl|10|20-746-804-1553|3798.28|HOUSEHOLD|enticingly express platelets wake. regular requests boost even, regular instructions. express dependencies a 1190|Customer#000001190|JwzW9OtxFRXDnVo5hXl8 2A5VxH12|15|25-538-604-9042|2743.63|MACHINERY| regular deposits according to the pending packages wake blithely among the silent inst 1191|Customer#000001191|K9J7dhIXDB2kgubtIVdRC6RP0aF,GQXin|19|29-587-244-2901|9088.54|AUTOMOBILE|ts wake. waters detect fluffily carefully regu 1192|Customer#000001192|8DbtM3KloBZ4OO1dRrF99|20|30-904-936-4914|3231.33|BUILDING|efully final packages use. slyly pend 1193|Customer#000001193|gdKqrIp,yaMaQSFerrGGzc6Kpy|8|18-524-487-2547|-17.10|AUTOMOBILE|accounts sleep carefully. regular accounts wake slyly. carefully regular requests along the quickly pend 1194|Customer#000001194|NzWKbiPK1oFd7PNz|21|31-155-275-3981|7582.29|FURNITURE|lve quickly according to the carefully regu 1195|Customer#000001195|71mmXvaWKf|4|14-355-801-7486|9621.49|FURNITURE|l, regular gifts should have to x-ray blithely fluffily ironic 1196|Customer#000001196|S3sw3q SDWVuUoEFvwv9M|20|30-615-967-7758|6378.67|BUILDING| carefully alongside of the blithely even theodolites. carefully ironic instructions wake after the spec 1197|Customer#000001197|9A1LTDf0KbR|0|10-254-891-7835|9261.05|FURNITURE|ording to the slyly ironic accounts. carefully final instructions haggle. special, unusual foxes haggle enticing 1198|Customer#000001198|r0liwpMwaIBQ9 zQjojGylXkJuKUL|18|28-278-515-1034|9593.35|AUTOMOBILE|intain fluffily ironic instructions. regular requests nag fluffily carefully unu 1199|Customer#000001199|sQJZJRAgYrZY0gPo9fJp6KAbY|16|26-367-212-1737|6503.35|AUTOMOBILE|es. quickly slow foxes boost 1200|Customer#000001200|2PFysvL4pk80l|22|32-890-210-4199|3765.05|HOUSEHOLD|nent frets. blithely bold pearls thrash across the r 1201|Customer#000001201|LfCSVKWozyWOGDW02g9UX,XgH5YU2o5ql1zBrN|10|20-825-400-1187|5165.39|BUILDING|lyly pending packages. special requests sleep-- platelets use blithely after the instructions. sometimes even id 1202|Customer#000001202|xThQDiKdG,0sU IduCCPAgHJfx1PDJwtUQvfU|0|10-788-256-6117|702.73|BUILDING|accounts. fluffily regular requests are. packages among the final deposits haggle carefully arou 1203|Customer#000001203|9pTq4gggfKoSqQetn0yJR|16|26-370-660-6154|5787.69|MACHINERY|osits nag furiously final accounts. silent pack 1204|Customer#000001204|QxpCVhq2x0PwW,zgZ AEuFkgb50ryGM|20|30-117-472-8751|9777.19|HOUSEHOLD|ily final instructions. pending foxes detect doggedly accor 1205|Customer#000001205|1ALD7GN4Iw0Vl5toeM|8|18-185-307-2678|5390.34|HOUSEHOLD|ptotes. silent deposits above the bold warhorses boost 1206|Customer#000001206|dxzjW0gykcG2kJ gN8hZV02q6U5T6uVNfP|20|30-716-117-6066|8437.76|HOUSEHOLD|ng the ironic accounts. regular requests across the quickly bold deposits wake carefully across the 1207|Customer#000001207|tDZe2FlIxGjrf9x,n6N1tu0DbWyUkSSu|3|13-572-474-7362|-556.05|AUTOMOBILE|uriously by the slyly regular packages. fluffily final deposits across the quickly express epitaphs us 1208|Customer#000001208|M uLSFG6IrQkKQxrH5vCbPjglIpB3JC|5|15-789-973-6601|2426.52|BUILDING| blithely bold dependencies detect slyly. carefully silent platelets haggle along the pinto beans 1209|Customer#000001209|PW00geNpQOiug6dftXfBkpwdAfsmRYsve,b44uR8|4|14-664-771-9006|3551.21|HOUSEHOLD|unts. regular dolphins integrate slyly. regular, pending accounts sleep b 1210|Customer#000001210|bUTLW1KIHzzQkuOEwUMwEGCQfTQM7aBmUM0|16|26-202-315-9048|8137.66|AUTOMOBILE|luffily ironic accounts haggle about the regular theod 1211|Customer#000001211|HCACb3Al89h6NqHUJ8qIjhfGFyA4S0c2|18|28-280-785-7324|4723.37|HOUSEHOLD|posits. packages affix carefully after the carefully 1212|Customer#000001212|kjiVLfadsq6sU3A6MYwySu XZnWzgkiWSa9v3K 6|22|32-462-274-7707|7736.03|HOUSEHOLD|e quickly unusual pinto beans. packages need to sleep furiously. regular asymptotes are furiously. final packages 1213|Customer#000001213|4ATLYSTcqLfgAJLxL7U|7|17-548-151-7684|8555.12|HOUSEHOLD|ong the deposits. quickly express deposit 1214|Customer#000001214|EATpN6m rGunAAkWFNSpsqy|4|14-281-851-2904|2935.31|BUILDING|carefully across the carefully ironic asympto 1215|Customer#000001215|oAvLu8VcRg9FNS9sNmoqU9|16|26-405-743-5405|7795.87|MACHINERY|special packages against the slyly final pinto beans wake slyly furiously final 1216|Customer#000001216|CSf1BbFhJhucmvftOwYLQACMEqgXj|3|13-673-633-4561|2155.06|AUTOMOBILE|eposits. slyly ironic dependencies haggle quickly. slyly close orbits above 1217|Customer#000001217|ddk4YC7lmTM,Z3LbX|12|22-191-580-2887|6019.32|FURNITURE|ar instructions. furiously pending 1218|Customer#000001218|JYNvUpFG0dDy7aJNhl2zLyIxUGqZZ35ncUe|22|32-299-871-1751|8801.73|AUTOMOBILE|packages hang against the unusual, unusual accounts 1219|Customer#000001219|kP1xK5be a8tW5JRSq0nwJWgKbO|4|14-364-661-8744|774.23|FURNITURE| the quickly even packages wake fur 1220|Customer#000001220|tbyect2HMX47TzsKy5 ho5|18|28-379-869-1009|8429.33|FURNITURE|sual multipliers. furiously unusual theodolites are. 1221|Customer#000001221|4mmeiymVdRmz|22|32-185-876-3586|816.50|FURNITURE|express, bold pinto beans. packages would detect alongside of the quickly bold 1222|Customer#000001222|hn6SzlP4Dq8F89iOXH0tjIgsz0uBCiBM|11|21-709-519-4959|3883.18|AUTOMOBILE|riously special theodolites nag slyly. slyly special ideas sublate quickly across the slyly un 1223|Customer#000001223|,I0bRfCGE5ssaX4V3|11|21-659-745-8411|-413.03|BUILDING|oxes. bold foxes detect always furiously special platelets. fluffily bold depende 1224|Customer#000001224|PWOwgZKsBoFJQ7py4HJpdJoSO2,|8|18-794-312-9970|8124.15|HOUSEHOLD| furiously regular accounts. slyly regular 1225|Customer#000001225|CgaNokxe s|11|21-839-103-4411|8634.92|MACHINERY|elets. bold packages use blithely special foxes. quick 1226|Customer#000001226|HKR1zog fXW|0|10-251-221-9440|2135.92|FURNITURE|ns. furiously pending packages hinder special accounts. sl 1227|Customer#000001227|GiT5IrOJ1zJTZErbFt1Jy6Gj|23|33-468-642-3107|3335.72|FURNITURE|fily atop the bold, unusual theo 1228|Customer#000001228|fV,ZM6pj1nivvbnfseVaWRkB0UYwKgvv|12|22-778-955-6105|5392.30|AUTOMOBILE|s according to the carefully final ideas ha 1229|Customer#000001229|csvrfGKxtX|9|19-313-452-6076|8328.12|FURNITURE| instructions sleep. carefully regular accounts use furiously. ironic, even foxes wake: busily even deposits caj 1230|Customer#000001230|Pr7yxcRne6NiloD1oR,d28rwVFRnOoTWeYq9|23|33-786-129-3407|4787.85|MACHINERY|ackages cajole furiously quickly pending packages. ironic foxes 1231|Customer#000001231|qJWtxdKmKWcR5XgMDn|9|19-316-348-3289|2326.68|AUTOMOBILE|uickly regular foxes are after t 1232|Customer#000001232|yYXdTto04oLlk04SM|18|28-518-320-7417|8482.51|MACHINERY|. even deposits lose above the even, regular 1233|Customer#000001233|KdmXav1IIIo|15|25-522-912-6255|3649.49|MACHINERY|ymptotes according to the ironic deposits use around the reg 1234|Customer#000001234|B3OhbH0MRJE,F0Lc7Jq0Ttv3|1|11-742-434-6436|-982.32|FURNITURE|y ironic instructions are quickly about the slyly silent pinto beans. quickly final dependenci 1235|Customer#000001235|q 1E JKZqhvUzj48|24|34-549-333-8551|-982.05|BUILDING|ckly. furiously quick dependencie 1236|Customer#000001236|pTEEPYlnQBzi558CN7LJ5UTdvO|11|21-699-526-9355|3600.95|MACHINERY|, pending excuses wake slyly pending accounts. asymptotes wake fluffily against the ironic, bold pack 1237|Customer#000001237| CQEeqR ,cVU7Bby|20|30-415-666-7691|8156.62|BUILDING|the bold accounts wake blithely across the deposits 1238|Customer#000001238|HGCJI27,RIIQcS20,DcJbMQuUmN3Vhzdm|6|16-302-171-7578|4299.22|BUILDING|ly special requests. unusual, special asymptotes according to the blithely express pinto beans wake en 1239|Customer#000001239|,K7wNII9jhC ,|20|30-786-518-4678|6936.72|FURNITURE| are alongside of the requests. s 1240|Customer#000001240|XbvhyAXRkuujtESRmxeD9eQpYSkiCa|4|14-650-555-5310|5439.44|FURNITURE|ans above the slyly regular ideas cajole furiously across the regular, regular decoys. furiously final asymptotes s 1241|Customer#000001241|74mW8ipfvoVPR3PS|3|13-902-876-1609|8654.56|HOUSEHOLD|ructions affix against the evenly ironic packages. slyly regular accounts run carefully. accounts accor 1242|Customer#000001242|mA8bUqB6WqNEe2nsQXlaHqMqaACj|2|12-521-364-1211|2276.15|BUILDING| regular ideas cajole! blithely express excuses b 1243|Customer#000001243|g,qSvyYkgjDcCL cxx5qy8hAwhomRq9cYJRvXZ6|14|24-445-165-9851|6271.38|MACHINERY|s, regular packages through the carefully ruthless theodolites promise quickly blithely final pinto be 1244|Customer#000001244|I3DrbiKwP3dxs1jF0iDwXh|5|15-881-433-2257|-942.80|BUILDING|old deposits alongside of the packages are 1245|Customer#000001245| xLnSgzY70qTKPF753|4|14-500-764-3702|3196.66|FURNITURE|haggle slyly at the carefully pending excuses. slyly pending theodolites use re 1246|Customer#000001246|acguUq5BISzqjHB7Bt|4|14-882-141-9354|260.71|BUILDING|haggle furiously. blithely regular patterns sleep quickly slyly even 1247|Customer#000001247|q5,Og3ezW3jSUtbwK 6qLJPqPwCwdL|0|10-386-173-3167|1696.95|MACHINERY|s against the quickly unusual ideas are blithely packages. accounts sleep quickly. regular 1248|Customer#000001248|f0X68bItSl|8|18-692-669-1536|6539.15|AUTOMOBILE|theodolites for the unusual deposits cajole fluffily final patterns. caref 1249|Customer#000001249|x9ukZnNiUM5pBPXyE3epagewVQBPZzxGYD6sMH|7|17-866-269-1165|448.49|BUILDING| the ironic packages sleep carefully express theodolites. even, final deposits across the even deposits nag after t 1250|Customer#000001250|LBPszo9EVA88sbbdYl7E,MVm7UvoBjmjr|9|19-509-608-4350|780.05|AUTOMOBILE|und the fluffily bold requests. silent, final theodolites solve furiou 1251|Customer#000001251|4AjU4c4D4AMLwQx,lAJGwBIgmT7oSZwYUv0es3J|24|34-741-256-6399|7267.76|MACHINERY|ckly regular accounts affix slyly carefully unusual 1252|Customer#000001252|u2OUDBxaayX4WhrftcM,|12|22-604-782-7617|3279.84|MACHINERY|eans might impress about the bold requests. bl 1253|Customer#000001253|2rEfA2LR6LkUXjoMxIsv58YSHPMjlqr1YXhHSX|1|11-900-587-2067|1222.21|BUILDING|efully regular deposits. bravely ironi 1254|Customer#000001254|wdGz5Cm DrSdF|18|28-832-851-4673|2676.06|HOUSEHOLD|wind fluffily blithely regular pac 1255|Customer#000001255|UC6I32JjBU62t4WgDe e2pDYbuM3VAt4MPM|14|24-359-633-2713|6487.71|FURNITURE|s deposits sleep. blithely ironic dependencies wake. blithely even theodolites sleep. blithely 1256|Customer#000001256|sNx4HcJ35paZik,IN02G7p|15|25-306-342-4782|5012.07|MACHINERY|integrate carefully. blithely e 1257|Customer#000001257|kX6yufw5dfKrgwQPVwWE7|2|12-824-451-8526|8810.83|HOUSEHOLD|hins. furiously unusual foxes about the regular foxes wake blithel 1258|Customer#000001258|zK3TKgKVuFCBdjt|3|13-727-588-7092|-301.75|AUTOMOBILE|ove the carefully ironic asymptotes. quickly final Tiresias wake regular packages. s 1259|Customer#000001259| YQc2RRQJV7kl1zxWg4OiUVU 5GlpB|20|30-930-620-7210|8353.00|HOUSEHOLD|ages sleep blithely regular, final 1260|Customer#000001260|npdrgr5Yqp0znvQt,Cw07j4BS22RNIANcb3t|2|12-742-408-2980|4991.59|MACHINERY|kly express theodolites sleep blithely across the doggedly regular packages. final, unusual instruc 1261|Customer#000001261|mWs6m9QwmTOZ|20|30-372-895-4261|5579.81|BUILDING|uffily final pinto beans. ironic deposits according to th 1262|Customer#000001262|u39WRGDI6AKU|3|13-444-583-3984|2840.36|MACHINERY| final accounts sleep slyly Tiresias. packages are furiously idle platelets. slyly silent requests are acr 1263|Customer#000001263|MXA4v0xQ9Kt |9|19-690-614-5736|6975.90|MACHINERY|ress dependencies are carefully theodolites. blithely ironic foxes among the slyly bold packages sleep blith 1264|Customer#000001264|vC1Yg5q O9 Tt5SM7OF|16|26-617-707-6647|3959.28|MACHINERY|y, regular requests above the fluffily special deposits engage around the furiously 1265|Customer#000001265|CTbTIB ZTYyKUSY42Ksz F33fxKsSG|24|34-945-256-3226|2653.53|MACHINERY|yly pending deposits about the regular accounts pri 1266|Customer#000001266|LW7shrnoCLUjJKQI8EF7SIFofvvIUmiJzpdS|3|13-832-768-3873|1877.05|MACHINERY|egular ideas. blithely regular requests above the deposits unwind on the slyly dogged pinto 1267|Customer#000001267|o3dtauyIeWwFRok2whWam0MLjmOdlG1H|24|34-329-328-2500|8616.12|BUILDING| accounts nag fluffily blithely ironic pinto beans. carefully pending deposits dazzle along 1268|Customer#000001268|SHn6HpO2VXBw3RJFPxjQFGanrsndRwRR2LWdm|24|34-973-735-5374|5152.42|BUILDING|y pending accounts! blithely unusual ideas wake alongside of the regular, f 1269|Customer#000001269|j2hwJHCMprK8HQdK7DpeUx5SG8j4dfuNR|10|20-818-485-8060|2659.68|BUILDING|promise slyly against the carefully ironic deposits. fluffily unusual foxes cajole carefully 1270|Customer#000001270|HPcuKCEtUzP3np7 oDR|13|23-473-283-1422|6170.06|MACHINERY|oost along the unusual, permanent pinto beans. even packages integrate slyly according to the pendin 1271|Customer#000001271|S7fmHdkot3JAv|14|24-698-342-2768|1209.37|HOUSEHOLD|efully after the slyly regular accounts. carefully ironic theo 1272|Customer#000001272|hzMO9cmypW|21|31-659-617-1632|6865.14|FURNITURE|symptotes. carefully regular requests after the pending ideas affix fu 1273|Customer#000001273|6RglRQdIV9mF8Tn6ABFmSQl|12|22-594-567-9307|1499.56|AUTOMOBILE|t final packages. furiously unus 1274|Customer#000001274|eHJnE7ytBm|24|34-152-721-6307|126.97|AUTOMOBILE|nts are enticingly above the furiously 1275|Customer#000001275|KUtV3oFy2Kyuzs4zT DB,S|20|30-410-174-2034|8972.46|MACHINERY|uternes. blithely express accounts detect around the fluffily even theodolites. even, spe 1276|Customer#000001276|c5UAVe71MPvmerPafNlpvTBWCewT|16|26-809-582-2064|-761.70|AUTOMOBILE| the blithely regular packages boost blithely regular excuses. final dep 1277|Customer#000001277|2ETOoQWtvxqp|14|24-502-746-4128|-52.35|BUILDING|oss the packages. packages doze car 1278|Customer#000001278|OB JBXz5fghXsYEaClW8PZpDmxMVZct|5|15-253-270-5149|9038.43|HOUSEHOLD|foxes. even, special theodolites boost. furiously silent packages haggle? furiously 1279|Customer#000001279|fkrzLacsqCnwUwgjjttKmY|14|24-742-587-6985|7915.06|BUILDING|tes are. ideas above the carefully ironic d 1280|Customer#000001280|3AmBFWaqOYt7F|16|26-725-573-7255|3419.66|HOUSEHOLD|eodolites sleep according to the theodolites. slyly pending dolphins among the pending, express platel 1281|Customer#000001281|pekyJqzeIQKGO8TeLvXgH8HR|11|21-124-963-7614|8182.42|AUTOMOBILE|the furiously quick deposits. slyly regul 1282|Customer#000001282|qeYHABkf21,5C5OC5it6q|14|24-750-627-7414|8998.82|FURNITURE| even deposits sleep quickly regular 1283|Customer#000001283|6JeLWEtDERPB,0KzWB,I6Xs8rJXAC8ryFulW5NPC|0|10-203-771-2219|2222.71|AUTOMOBILE| blithely daringly final theodolites. foxes ha 1284|Customer#000001284|sdj PCsILD6mOJfEuIIbrN52hOHTYWwUUPT|18|28-750-346-1442|-911.40|BUILDING|pending packages cajole carefully! furiously final packages wake. special requests sleep along the caref 1285|Customer#000001285|5Hy,ajDzJPtZFeJedRSeLN7XGOJtyUy2FI93|19|29-424-835-1463|3061.58|HOUSEHOLD|ly bold ideas affix blithely about the slyly even pinto beans. slyly regular multip 1286|Customer#000001286|FP3aFvhRMSKfCz3l0h|12|22-374-932-9860|6906.08|MACHINERY|quests. quickly even packages wak 1287|Customer#000001287|8CaksGsCJOK3oUm1kUsQ|15|25-493-734-3918|7461.69|MACHINERY|e quickly silent courts. furiously even packages among the ironic ideas integrate slyl 1288|Customer#000001288|wQDTTCkSGxic2d66|3|13-533-256-9320|6603.43|HOUSEHOLD|equests detect atop the ironic deposits. final requests according to the blithely sp 1289|Customer#000001289|OGb4YMkool8QMr|24|34-409-591-4324|2925.52|MACHINERY|deas haggle carefully alongside of the always even ideas. never unusual as 1290|Customer#000001290|0Q9URl0Y3rJWt9GYZF|24|34-837-161-2672|8108.42|MACHINERY|s use across the express requests-- carefully bold foxes cajole slyly slyly express pinto beans. ironic request 1291|Customer#000001291|dg3hkdHiI9zqk7l3242Q28OLLFy,1vZ,|7|17-693-294-2656|8227.37|BUILDING|, final requests-- furiously careful ideas wake busily ironic, even a 1292|Customer#000001292|QVr2XTDOMzWcLKHtNgrLK|21|31-966-407-1575|5509.11|BUILDING|haggle. special foxes sleep blithely 1293|Customer#000001293|E79dBMCNl5xXBwtnSsjuBLa16VgrLsKz|12|22-517-223-6566|2565.67|HOUSEHOLD|heodolites boost blithely ironic packages. special, even ideas above the asymptotes wake quickly accordin 1294|Customer#000001294|EZVIKislr4L0PrBP8LShL|23|33-506-204-7796|-808.13|AUTOMOBILE|deas boost bravely final ideas. slyly even pearls are furiously 1295|Customer#000001295|kded3b,5e5|24|34-259-484-2624|753.62|BUILDING|slyly final accounts detect blithely regular, bold requests-- blithely final foxes wake blithel 1296|Customer#000001296|cLAyTJcfD3T4hKW52lIU9yk|5|15-130-485-4234|3034.69|MACHINERY|e slyly ironic requests. final requests 1297|Customer#000001297|4QnYEe0KXOP3yridKldXROs7jQdMu9tE|21|31-579-682-9907|6074.01|HOUSEHOLD| pinto beans! furiously regular courts ea 1298|Customer#000001298|ujAPYPBrLW,oIxGpuWmxoTDscSXFOP Tjk|15|25-765-244-1549|3903.54|AUTOMOBILE|er furiously despite the ironic, even ideas. slyly silent ideas boost 1299|Customer#000001299|vm2THnXrMKrn5xvPL88EMT9QntU|11|21-150-179-4763|808.39|HOUSEHOLD|sheaves promise furiously alongside of the slyly pending platelets. pending dolphins at the furi 1300|Customer#000001300|VmW1dNLVaQY0ud6Csa5WHWuV8|23|33-581-399-6027|-370.44|BUILDING| x-ray furiously regular deposits. final, silent theodolites are slyly pending ideas. final dependencies 1301|Customer#000001301|oR0kHfL6GWhF VPD,mM1Jxsd9l3nZEkfDn|10|20-339-347-9046|8966.63|MACHINERY|egular asymptotes along the even, express packages sleep express realms; carefully final packages haggle quickly 1302|Customer#000001302|vyImQ4AVgv,Rn|9|19-316-212-9313|197.90|MACHINERY| platelets engage carefully! furiously express ideas shall have to use. regular 1303|Customer#000001303|MarfB1lCCs2MZ8CWdWqCfb|5|15-658-234-7985|2020.15|MACHINERY|express deposits haggle slyly after the carefully unusual packages-- silently si 1304|Customer#000001304|1sXtodRtFvBd449a2aJ|11|21-638-815-3982|4548.46|AUTOMOBILE|orses boost blithely platelets. fur 1305|Customer#000001305|xHgwqc1p0eLf5F8JkE7zvYXPHhIOP5IgLRJgR|23|33-396-634-9150|4900.66|MACHINERY|efully. furiously ironic packages cajole slyly bold requests. quickly ruthless requests alongside of the iro 1306|Customer#000001306|0YRFIqAc5imIKGi9cEYtn6L|12|22-923-551-9639|6464.77|MACHINERY|le. quickly pending accounts detect furiously. packages 1307|Customer#000001307|L OAVSFQauP87kLdHouM8|3|13-970-299-8199|4344.52|MACHINERY|ts. brave, express packages boost. even, pending instructions nag blithely regular theodolite 1308|Customer#000001308|Ndovi7D9gJ u1gjQwYOkIARup6VzhQFCHHmSMw|18|28-560-833-2066|9290.53|MACHINERY|uickly even dependencies. unus 1309|Customer#000001309|xaOhk73bjekYrVc5zZ36c,GuZUxsMHjo7WH9WVe|10|20-821-905-5952|-922.69|AUTOMOBILE|nusual excuses. ironic deposits are furiously ironic frays. blithely ironic platelets are evenly regular package 1310|Customer#000001310|bN, XpseFnbjZRh3fryWogaudZB|17|27-538-338-3378|204.84|HOUSEHOLD|unts. silently bold ideas against the blithely regular deposits use furiously ironic fo 1311|Customer#000001311|rcff2L75vK5EOUaPK DiDz6atB|13|23-647-279-5735|8713.24|FURNITURE|nd the regularly unusual foxes. regular asympto 1312|Customer#000001312|f5zgMB4MHLMSHaX0tDduHAmVd4|3|13-153-492-9898|9459.50|BUILDING|odolites wake always packages. slyly slow orbits lose. regular depo 1313|Customer#000001313|Ax4TI4jbHvaYUaaFuEUQTiMWQvrjez G|23|33-623-834-3089|889.11|MACHINERY|ely. carefully pending foxes was furiously special, special 1314|Customer#000001314|auN4t99aykk1AlmJl|1|11-290-301-2722|3218.33|HOUSEHOLD|e special theodolites haggle furiously along the even deposits. final accounts haggle. furious 1315|Customer#000001315|5J941XxxkE|10|20-941-614-6433|1447.84|AUTOMOBILE|refully bold packages. final, regular pa 1316|Customer#000001316|nmbpR1rqOdlUDvT6C HXUhm2|5|15-642-801-1329|-158.39|BUILDING| might wake. sometimes unusual requests cajole carefully about the excuses. stealthily final requests wake quick 1317|Customer#000001317|a6M1wdC44LW|14|24-518-294-8197|8925.08|AUTOMOBILE|s haggle furiously slyly final accounts. slyly bold pac 1318|Customer#000001318|yrASJAqw67PQxFYVAVsGU|14|24-524-279-4270|5812.93|BUILDING|lyly blithely final depths. regularly even accounts haggle across the carefu 1319|Customer#000001319|c5M1KcH60UZPYsa|9|19-573-345-3305|4910.48|MACHINERY|se across the dependencies. express, 1320|Customer#000001320|8gman6hzpuKUsX7mKU9katXpP1ia|15|25-116-108-3791|6407.13|MACHINERY|lent, final accounts cajole fluffily special requests. deposits around the fluffily even packages 1321|Customer#000001321|dWd3MhPQY3|10|20-571-787-3958|3589.49|FURNITURE|express foxes are quickly blithely bold 1322|Customer#000001322|35jI39rgIHCI4Pwvpy1beKgL0|13|23-207-256-7245|2621.71|FURNITURE|tes cajole. blithely express request 1323|Customer#000001323|r9R6okXwQID,|23|33-476-768-7390|6006.81|HOUSEHOLD|uffily even packages. dependencies are. excuses cajole furiously regular foxes. special dep 1324|Customer#000001324|6qS1ZDpAYr9aED9Yh ggf8ACJcPi7sp|7|17-415-957-9976|7548.88|AUTOMOBILE|t the busy courts sleep quietly above the ideas. final accounts after the regular, ironic pl 1325|Customer#000001325|Agu uZvi6Xv77 nE7W8|7|17-687-303-1074|9108.61|HOUSEHOLD|n foxes integrate furiously ironic requests. furiously even theodolites use daringly pending deposits; even pl 1326|Customer#000001326|naLuK8XKUP72msE0e|21|31-373-307-4091|-468.49|HOUSEHOLD| pending accounts impress. regular, express accounts cajole ironically express deposits. slyly regular accoun 1327|Customer#000001327|LBVMBxjllZpTQd|12|22-920-576-6295|0.97|MACHINERY| are furiously according to the multipliers. pinto beans are thinly. special deposits haggle quickly express Ti 1328|Customer#000001328|fjKlKFyxTQRJjLeT1Md|10|20-305-428-9878|3264.99|AUTOMOBILE| deposits haggle ironic, bold packages. quickly unusual packages print furiously care 1329|Customer#000001329|Q3 pefFAcrEYPQ6J AC|17|27-945-826-8003|4645.91|BUILDING|quickly silent requests affix blithely slyly bold instructions. furiously even packages dazzle whith 1330|Customer#000001330|MGY4P7QIy3|1|11-353-524-1234|3893.14|BUILDING|to beans doze along the furiously final pinto beans. req 1331|Customer#000001331|mjaArHGsPWg|8|18-140-389-1328|2005.02|BUILDING|ic excuses. requests would promise according to the furiously ironic accounts. slyly final deposi 1332|Customer#000001332|1JMz4nbClfcxmzPyWyJK|18|28-560-351-6594|3323.04|FURNITURE|ing forges. foxes haggle fluffily. express, final excuses sleep slyly blithely express dependencies. 1333|Customer#000001333|o9o6lky2KYgFZ2cSx5lyFQYufM1i1d|2|12-154-975-6824|1330.85|AUTOMOBILE|beans integrate fluffily. carefully final pinto beans wake furiously even pint 1334|Customer#000001334|gZkxQY2Aa3o D6f1O 7nsPdg6BJ3|5|15-493-800-1041|2485.71|BUILDING| unusual dependencies cajole regular, r 1335|Customer#000001335|VeQAJlVqZgl0adTxSpZ6P2ZVIC0kWokJ|10|20-744-779-7057|8341.67|BUILDING|t slyly accounts. slyly express pinto beans nag. 1336|Customer#000001336|E4MeTLnSTIOlWkLDwmG7QPf 9Dq|16|26-350-110-5043|1490.21|AUTOMOBILE|ts after the deposits are quickly deposits! blithely regular theodolites integrate above the slyly pending 1337|Customer#000001337|ACAMJe2Xdw2BCgHrGMd0BX|22|32-528-594-1931|7882.44|MACHINERY|n, express gifts. express, fluffy pinto beans sleep. regula 1338|Customer#000001338|8Nx5v3cKF MK3ejHdMUgcY,FNZZs1|20|30-763-866-5779|5139.00|BUILDING|te quickly above the regular packages. thinly re 1339|Customer#000001339|QGiiQ1iMDmLKLAHsZa L68gZFyPXX18a38IS|4|14-904-963-2452|8167.50|MACHINERY|neath the carefully unusual plat 1340|Customer#000001340|dYRQ2tz0OdH|21|31-872-435-1900|280.29|HOUSEHOLD|against the final theodolites. slyly regular dependencies after the bo 1341|Customer#000001341|n5dnBrBUHnNEnaglCr9jNvONhG tMPb|18|28-701-221-9569|762.69|MACHINERY|nt requests. ironic, even excuse 1342|Customer#000001342|FD6UNqfsYMKkf3ZFZdI4EaYMZ|16|26-340-733-2096|1520.34|MACHINERY|y around the final, special foxes. 1343|Customer#000001343|WtCLJBdycxFOsHyv|18|28-393-594-5247|8303.09|AUTOMOBILE|accounts. blithely pending foxes wake among the carefully express forges. quickly ironic realms wake bl 1344|Customer#000001344|95XSwEZD22AZln3RB|5|15-307-682-9911|2113.32|AUTOMOBILE|after the furiously ironic foxes cajole slyly unusual, pending reques 1345|Customer#000001345|31zcobEB,6Li4YDZbnNX|9|19-913-651-7783|8593.83|BUILDING|of the express, express packages. final requests detect to the regular accou 1346|Customer#000001346|cuwz2Yvj VKYEXjZzfL|10|20-502-685-6183|4524.45|FURNITURE|heodolites after the quickly bold deposits wake according to the regular platelets. ca 1347|Customer#000001347|7oXery7shMx|24|34-956-232-6103|8476.43|HOUSEHOLD|ular accounts. furiously final t 1348|Customer#000001348|CgtcDDYMnvsgI1uozRj|23|33-360-732-3579|459.22|MACHINERY|s cajole furiously among the ironic deposits. carefully bold pinto beans wake carefully against the carefully 1349|Customer#000001349|HvlnFsKOdm39Ge4VPgzE,UN|18|28-950-527-8728|4967.24|AUTOMOBILE|ges. final ideas nag furiously against the fluffily express accounts. 1350|Customer#000001350|fc,TCo2zqB9T3C5IbaGkfV3,hLqLr|3|13-486-903-2349|3339.51|AUTOMOBILE| regular, ironic ideas are carefully against the silent packages. careful, 1351|Customer#000001351|NYMFfkNlCGoTeaDrNO9nn|1|11-916-210-6616|3106.00|FURNITURE| accounts after the final deposits sleep fluffily ironic accoun 1352|Customer#000001352|XW4X8ComPo5mlyrgLn|20|30-631-606-4317|5570.69|FURNITURE|en escapades after the furiously special accounts use slyly regular grouches. fluffily final pinto bean 1353|Customer#000001353|CzscM6Q8vW6|1|11-109-274-1421|3666.51|MACHINERY| quickly ironic packages. regular, bold asymptotes about the foxes haggle carefully regular pa 1354|Customer#000001354|rvGErAt5suIqpuxwtL QPAgN7n7Tyv|13|23-969-619-1363|-897.04|MACHINERY| blithely blithely pending packages. furiously pending accounts use slyly. bl 1355|Customer#000001355|c1r6G98ixzLQkvUV2KphsFwhYvpDo18oToGB|20|30-918-883-1662|2351.10|AUTOMOBILE|anent dependencies are blithely above the quickly silent escapades. requests sleep. final foxes sleep slyl 1356|Customer#000001356|3SLzAiW4PihnFUE243 AHKkwtL1PCj|5|15-656-712-5740|927.39|HOUSEHOLD|fully pending deposits. carefully unusual accounts 1357|Customer#000001357|S1bDHNFkDEi,Gbsc3|15|25-242-146-4223|8627.90|BUILDING|osits boost pending packages. slyly pending deposits along the requ 1358|Customer#000001358|t23gsl4TdVXqTZha DioEHIq5w7y|3|13-264-253-1258|5149.23|BUILDING|sy excuses. slyly express requests detect slyly quic 1359|Customer#000001359|F5XtTR5KeZ,wAL|11|21-124-833-5784|4069.82|FURNITURE|eposits sleep quickly. enticing packages sleep ironic, ironic accounts. daring, regular t 1360|Customer#000001360|xeaT6W6D569UKCKU86iK9b6aUanlra|19|29-574-552-4018|1422.57|MACHINERY|nt packages affix quickly furiously regular foxes; quickly 1361|Customer#000001361|OAHRbO5RS8,yFt16e7glYM4oVEZpf8BefK5DA,7|13|23-104-975-7608|4128.86|HOUSEHOLD| need to promise furiously quickly bold packages. finally express pinto beans alongside of the 1362|Customer#000001362|FKywgbtf4ib|7|17-801-385-5904|3718.92|MACHINERY|gouts. quickly silent foxes affix after the ironic, special accounts. carefully bold d 1363|Customer#000001363|mYa,yAtLmW2mCglfc7cZ8LrPuP0|13|23-964-365-7781|-112.46|HOUSEHOLD|silent packages. blithely regular instructions haggle carefully slyly ironic forges? thin, br 1364|Customer#000001364|INrMv02tUJWFSRMEbBl0oUTtCjry8qUcI8T|19|29-992-959-9626|-181.69|BUILDING|e furiously according to the slyly final ideas. blithely silent excuses cajole s 1365|Customer#000001365|DOifjgJKjlSgnpPJ3cHLl2yi EseDZbg3|17|27-358-301-5393|2207.81|MACHINERY|ironic requests use blithely according to the slyly ironic patterns. carefully regular excuses about the c 1366|Customer#000001366|v3YAa1hq4Qc7FdpLg4Jh0b7xo0soyvq1w,Yrb|20|30-193-707-6924|1634.70|HOUSEHOLD|the furiously final foxes. furiously bold depos 1367|Customer#000001367|gN803k703pZ1YizV5fp6S8|22|32-462-328-6604|5420.32|HOUSEHOLD|riously theodolites. slyly bold excuses thrash slyly final pinto beans. instructions use 1368|Customer#000001368|4PxJqZUIML EhegD7RXkLY8|15|25-801-622-7438|6376.18|BUILDING|iously regular packages wake according to the slyly final deposits. carefully even packages cajole. carefully 1369|Customer#000001369|rXTwOzU0a2ak4Nj5L5b1aLij|10|20-232-617-7418|498.77|AUTOMOBILE|ong the ironic ideas haggle slyly above the courts. packages engage blithely. pend 1370|Customer#000001370|WN7onCgcC,,Lt4dC4C f7SCgnHWSjeTUp|18|28-575-379-5893|9802.04|BUILDING|y across the regular dependencies. fur 1371|Customer#000001371|H,U3MSp1OTLGIQuW2|7|17-492-673-8157|4943.58|BUILDING|es are after the carefully ironic deposits. silent requests alongside of the furiously even dependencies 1372|Customer#000001372|WiWQk7DyBtI,hfP0CIZ|23|33-563-510-6488|1796.09|HOUSEHOLD|l theodolites. regular ideas are around the furiously iron 1373|Customer#000001373|fAfmAacTlPc|13|23-959-476-7310|909.84|FURNITURE|ckages cajole slyly even requests. express 1374|Customer#000001374|vRPteZtcyV|19|29-869-316-1166|-411.43|AUTOMOBILE|ckly permanent accounts wake fluffily regular packages. quickly express foxes cajole. carefully ironic packa 1375|Customer#000001375|lpKhW7g QK7Y13sxKlRvRYI7SItbTbcBxae|14|24-620-497-1489|2011.11|BUILDING|requests! even excuses are furiously express deposits: fluffily ironic 1376|Customer#000001376|VushDntQeYmYLT22vW09rlg5j06B|12|22-972-150-2900|6761.52|HOUSEHOLD|iously unusual ideas. ironic ideas use carefully about the foxes. slyly unusual pinto be 1377|Customer#000001377|P7aUKm47hbe14nVZSrwZ|17|27-398-963-9520|8839.15|MACHINERY|s sublate carefully alongside of the slyly express theodolites. furiously special instructions haggle 1378|Customer#000001378|zDULZOX6KrHF6aL1AMsIg0Ivv4Crz|17|27-806-173-2824|2675.73|HOUSEHOLD|ges haggle slyly alongside of the furiously final excuses. carefully regular foxes boost across the regular, ex 1379|Customer#000001379|rqYSBCMywMKnfcp2DwotVqI|6|16-695-982-9623|1008.26|MACHINERY|sly ironic requests cajole fluffi 1380|Customer#000001380|a,q fKSYFADxRtRQWSppP8YKp|17|27-641-565-1036|3723.53|HOUSEHOLD|lar instructions boost quickly. blithely regular 1381|Customer#000001381|HqKfFUD6Ib9yoFM5cIgMxjXaqdJAyKSN5w Od|22|32-418-900-6494|367.82|BUILDING|foxes thrash slyly express foxes. even th 1382|Customer#000001382|uiTMgqzTPqAPoKQwbnv|10|20-962-576-3853|8898.67|AUTOMOBILE| wake furiously through the pending platelets. furiously pending deposit 1383|Customer#000001383|bSLtrtrAaAky9GZuKhlQqp8BB|15|25-267-778-1591|2092.61|AUTOMOBILE|s. fluffily unusual accounts against the special theodolites print around the special the 1384|Customer#000001384|bQI5haTy6PHM8MyRtKSlvU4ixAUg|8|18-788-299-9227|1534.38|HOUSEHOLD|tes. regularly pending theodolites cajole even sheaves. stealthy, ironic ideas are furiously above the even p 1385|Customer#000001385|4jAtwsWIITPzhTIx7jblhjp9aAzejEGnu|3|13-693-138-5884|2326.75|BUILDING|ithely daring ideas? regular requests wake slyly ag 1386|Customer#000001386|uByG5EoybI5dNNLzU5uD4Ba|11|21-450-191-9064|9643.87|BUILDING|ages above the busily final packages grow blithely alongside of the blithely even f 1387|Customer#000001387|GQhAzCMyKiDoel3|19|29-444-890-8990|8541.87|BUILDING|foxes haggle furiously according to the stealthy ideas: slyly special accounts about the requests use caref 1388|Customer#000001388|WaKdgWEru70hsL8nyLeEkneHyM59Lboo5zfWv|13|23-185-747-9502|291.44|FURNITURE|longside of the ruthlessly regular dugouts. slyly ironic pinto beans wake. dogged de 1389|Customer#000001389|ORf,IQyXsXJ1svlQ,5U|19|29-865-304-6982|1111.02|FURNITURE|posits. accounts are carefully. carefully express deposits cajole-- slyly bold dugouts wake according to 1390|Customer#000001390|fQm,RnwO4Tt PMQIB|11|21-978-977-8988|3931.31|HOUSEHOLD|ress platelets poach carefully above the slyl 1391|Customer#000001391|7MqN5yFijW6Yua7LVU6i7QMjjiyJ2KTZEaQ|23|33-558-545-3053|5510.28|FURNITURE|ges haggle slyly across the carefully pending accounts. slyly regular accounts should sleep. slyly express packages 1392|Customer#000001392|iXmNoe7IBgjc|22|32-561-640-4912|249.82|HOUSEHOLD| sleep fluffily across the final, pendi 1393|Customer#000001393|zVp5Hbhro,9rTwCYys1HUk|24|34-953-819-7858|5672.05|FURNITURE|escapades. fluffily ironic packages nag among the slyly regular dolphins. special asymptot 1394|Customer#000001394|eE8wv lYYKLXB|3|13-580-581-6470|2233.10|MACHINERY|regular accounts cajole never above the even, final instructions. furiously regular foxes wake unusual requests. ca 1395|Customer#000001395|XJoxiYIaBYgEE|23|33-291-909-3901|8733.39|MACHINERY|tions sleep carefully. furiously final requests about the regular excuses a 1396|Customer#000001396|M4dHuyrttFfeBr|3|13-523-516-9742|7149.43|BUILDING| the even theodolites integrate fluffily regular dolphins. blithely 1397|Customer#000001397|1bk KBemIEsKhD3VyXa6IRLx 4GH u|8|18-294-992-6523|5466.83|AUTOMOBILE|onic packages across the bold, regular dolphins boost furiously furious multipliers; furiously specia 1398|Customer#000001398|K1rQq6exc3WcVCcgIjA4SaeqxtK2,HG1|8|18-377-181-4654|7004.90|HOUSEHOLD|the blithely silent dinos. even, special hockey pla 1399|Customer#000001399|FOuY,endAFj|0|10-775-919-7154|7352.14|AUTOMOBILE|foxes across the silent platelets haggle fluffily special requests. unusu 1400|Customer#000001400|BuouRkR7J f|0|10-217-180-5310|2432.73|BUILDING|etect fluffily final courts. carefully special instructions 1401|Customer#000001401|C4vlB8ENikVmaMizX3nH3zgds6|9|19-339-404-7859|8908.63|BUILDING|accounts use furiously unusual pinto bean 1402|Customer#000001402|F7 m0JwiCABmbJLPQpCJ2|6|16-713-144-2780|4396.25|AUTOMOBILE|g the carefully express dolphins: special, pending packages affix after the packa 1403|Customer#000001403|,ql804gtMc3uxTfP,lt4yRBWQ|12|22-458-624-2509|9782.34|HOUSEHOLD|tes are blithely carefully bold pac 1404|Customer#000001404|pIO5i3yjeODChGMHoVvrX,Ctpdj|12|22-320-701-5582|3828.46|MACHINERY|ven platelets use quickly pending requests. busily busy asymptotes sleep slyly across the 1405|Customer#000001405|i9khsGcg17kWI4q5LKTcc8U3aFojf403|16|26-285-488-6682|3987.39|BUILDING|haggle slyly; regular, final excuses are blithe 1406|Customer#000001406|g1xS4snd0fzl4R,JmPHfEzRD|5|15-767-155-6419|2023.59|HOUSEHOLD|uests are furiously carefully express packages. slyly permanen 1407|Customer#000001407|zZsTZ3nI1rG5X|14|24-529-300-1554|7424.99|BUILDING|nding, ironic instructions promise across the quickly regular r 1408|Customer#000001408|NMIb3p1DyU,Z4XOFUS0B,|11|21-901-381-6344|5920.09|HOUSEHOLD|express requests believe. pending, brave deposits sleep furiously. carefully regular deposit 1409|Customer#000001409|jzfaCksWUNlI|11|21-667-401-3780|1959.17|MACHINERY|ously ironic ideas are. unusual packages kindle along the dugouts. unus 1410|Customer#000001410|yEUlreh6mkGmg8SIwKZooUOJ42kuZwAptaR4HAJ|22|32-358-270-1819|2716.95|HOUSEHOLD|lar, express packages cajole bravely permanently final packages. blithely even dolphins nag finally special theo 1411|Customer#000001411|4iLVKtSmtJpU|21|31-898-640-7625|772.14|AUTOMOBILE|sly final ideas. carefully bold pinto beans wake: slyly regular packages must sleep. final platelets inte 1412|Customer#000001412|gfsI6WU i7kYypv09gGIqUFrUod9uhb|10|20-715-510-2804|6368.38|BUILDING|fully regular accounts print ironic, regular platelets. deposits promise slyly. express, ironic ideas 1413|Customer#000001413|9Yh cGpbCbrXZytNfH,dAEwX|15|25-624-816-9010|1387.83|AUTOMOBILE|onic foxes. quickly final dolphins are fluffily. quickly unusual ideas wake carefully. furiously 1414|Customer#000001414|2 HBoqTD0qCyMKtcBPVHbNna|20|30-323-797-7514|3136.15|BUILDING|ackages. blithely unusual accounts a 1415|Customer#000001415|x,hzUUAZ9w7ndksLyH0,fEpMfU|21|31-295-601-1598|6252.12|BUILDING|heodolites. furiously pending requests are above th 1416|Customer#000001416|ovOZcFGL31uxmA2ifIYudX6OuwNDz,B|7|17-306-898-9363|5348.40|MACHINERY|unts wake slyly excuses. bravely even pinto beans across the furiously final de 1417|Customer#000001417|1BDU8AvljnLmkM|1|11-242-612-1339|7543.01|FURNITURE|ag slyly-- furiously final accounts are ironic instruct 1418|Customer#000001418|S5uBtE hDxHcHunowPDXKSxP3csMFnhYt|17|27-773-818-3164|9359.01|AUTOMOBILE|ggle quickly blithely thin excuses. final, even accounts integrate slyly. carefully pending account 1419|Customer#000001419|JM4NV2pq4Ps0xJNRtUtlmQ8uuDvKx|4|14-533-796-5446|5912.72|FURNITURE|. idly express pinto beans sleep above the deposits. excuses use: furiously bold accounts cajole slyly across the ca 1420|Customer#000001420|mjkRUOEzdCWpNdpp5PKOObMmhpufeNGnO1VFdbpK|9|19-433-305-7356|-932.09|AUTOMOBILE|the instructions cajole carefully. slyly final requests nag carefully ac 1421|Customer#000001421|Qx9tZ9yiMo|12|22-139-990-1907|7292.93|MACHINERY|kly even ideas cajole carefully quickly ironic 1422|Customer#000001422|mzXw44ExYC8DAdeKBakiWy0II|17|27-270-833-4320|8389.50|MACHINERY|mong the evenly express asymptotes integrate slyly brave 1423|Customer#000001423|9BcCj8CLsqylKxRj0,lm|21|31-624-875-9135|2411.69|FURNITURE|he carefully express courts. regular instructions haggle. special, express accounts believe? packages cajole slyly. 1424|Customer#000001424| 3QsPgbVLZ|22|32-542-134-6212|7207.70|FURNITURE| instructions against the furiously express accounts doze fluffily unusual d 1425|Customer#000001425|I2UBZAPPdnA9oFKKJTGxaSQZb5QzNzTR4vN6d P|1|11-697-824-4418|5814.72|FURNITURE|uests boost carefully even foxes. accounts along the slyly ironic requests cajole express, final deposits. sl 1426|Customer#000001426|d1Tyzg,0ArKPuBln8CDH, 1Xsukm2nXVl|15|25-629-292-9022|1965.38|HOUSEHOLD|s are slyly according to the express, spe 1427|Customer#000001427|dDsmiig0T4oFKaf9ttFeh1etLvSIc9aV1xF2H|11|21-941-208-2485|8136.53|AUTOMOBILE|es detect accounts. slyly bold theodolites wake ironic, special accounts. instructions cajo 1428|Customer#000001428|3SdWi3lKPXk00UYT,hL|15|25-859-663-3690|7703.84|FURNITURE|iously final platelets sleep slyly ironic instructions. furious 1429|Customer#000001429|K5sID 6zGPrYdfoADUq4kidlPgF3|3|13-119-903-3814|6444.82|AUTOMOBILE|even requests among the blithely regular pinto beans use across the ironic accounts. slyly ironic 1430|Customer#000001430|mv 9MEDwd8yPeQj7N|0|10-209-317-6929|-920.40|BUILDING|nic deposits. bold, even accounts cajole blithely 1431|Customer#000001431|l3LM2d2T1n c7yI4sOfpEbbd540qO66A4MARk|13|23-640-395-7009|5805.81|AUTOMOBILE|eposits up the carefully bold requests mo 1432|Customer#000001432|,pbQM2fi642oAuel|21|31-831-635-9758|6314.25|MACHINERY|ts sleep. regular frets sleep carefully ironically special dolphins. carefully bold pinto beans 1433|Customer#000001433|gK7D76v78U iRA2YI2kxeKLlm4LZMH13,|17|27-558-375-8169|4605.87|BUILDING|ly bold deposits. furiously silent braids alongside 1434|Customer#000001434|V15TQAhSLp7YC3KdjuMwSV3cwg0lp|18|28-749-743-6583|1851.63|MACHINERY|lieve carefully ironic hockey players. special ide 1435|Customer#000001435|s0fqxkVVqLWR IaqibwwOf|20|30-309-437-9265|-729.09|AUTOMOBILE|usly final orbits are? unusual, ironic accounts are slyly. unusual requests sleep after the ironic, 1436|Customer#000001436|kV5m0jkgFEto,|10|20-918-593-1860|9158.91|MACHINERY|theodolites. final Tiresias after the quickly final packages dazzle carefully blithely bold 1437|Customer#000001437|DjHgTwtlzCmcQo|17|27-805-486-5768|8839.32|BUILDING|y. bold pinto beans affix carefully unusual pinto beans. carefully bold Tiresias mold 1438|Customer#000001438|Gz Aey8gzHxIIxtpJaG0lAqd82T|17|27-688-787-7928|5436.81|FURNITURE|odolites cajole slyly. furiously unusual requests boost furiously along the fluffily pending in 1439|Customer#000001439|IGGK4SXvT5ioAeT2fbVYDemsTTqqhsQu6|21|31-433-694-1822|8487.60|AUTOMOBILE|ely after the carefully bold accounts. carefully ironic packages are silent packages. blithely ironic pinto beans ar 1440|Customer#000001440|k3LXBO5QJrG94TBG77adB1HjqQkleDyUf2c|7|17-619-730-9883|1236.36|BUILDING|xpress, even accounts integrate. ironic, special requests doze. carefully express instructions doze furio 1441|Customer#000001441|u0YYZb46w,pwKo5H9vz d6B9zK4BOHhG jx|23|33-681-334-4499|9465.15|BUILDING|nts haggle quietly quickly final accounts. slyly regular accounts among the sl 1442|Customer#000001442|2fTQpX7N2kp31U|16|26-677-746-7145|7917.90|MACHINERY|ess theodolites. furiously express pinto beans alongside of the 1443|Customer#000001443|qtBPSM2NvmJXNePBT Ap3M6UqIZTvaF|10|20-959-383-4792|7141.87|HOUSEHOLD|ickly about the sly foxes. furiously bold patterns sleep regularly across 1444|Customer#000001444|8WcsyfQU5svH9miWvYbSTH9|7|17-107-228-8125|-501.37|HOUSEHOLD|inal platelets. quickly ironic requests do are carefully carefu 1445|Customer#000001445|5y7gtM75FOfTSBKx9gs9c9MkqJt|21|31-151-251-1931|8367.94|HOUSEHOLD|carefully regular accounts after the b 1446|Customer#000001446|p94EVXQW,Q3bhXDyhG1Gp96b5zbaW|23|33-873-120-5388|2981.48|BUILDING|gle ruthlessly. furiously express dolp 1447|Customer#000001447|pHkyNkViDja,dZVNg4bEEbicpoHIVDZvtQi8RPl|17|27-452-251-2941|2718.02|BUILDING|ously around the accounts. packages haggle blithely ironic, idle ac 1448|Customer#000001448|a45QD J55bo35zA4qR3v|24|34-969-612-1458|7756.35|AUTOMOBILE|y ironic instructions? slyly pending platelets hang quickly slyly ironic ideas. blithely ironic instructions a 1449|Customer#000001449|lNFczqF3TjlSO9BuO3jqXY,b|22|32-827-813-3340|9051.75|BUILDING|ructions wake slyly ironic notornis? slyly express courts wake along the slowly 1450|Customer#000001450|z7Pl iXBEstivMNf|3|13-443-688-6724|857.70|MACHINERY|ains; daringly dogged deposits across the furiously regular instructions breach furiously foxes. carefully unusual a 1451|Customer#000001451|Yt69m0Aw1LWZhisHJxL4iGEEzx6y,ehspkes|2|12-590-121-9328|3274.30|FURNITURE|ously regular packages. furiously final deposits boost. slyl 1452|Customer#000001452|51mhHAvPHZACedHYXVU5HXoDIQtBK9,pxuOIlJ|7|17-581-575-4538|7086.97|FURNITURE|silent theodolites. fluffily special 1453|Customer#000001453|FTfWkW1 8jVgOIIR9sMm2HpohiuR1v2278|0|10-852-397-3642|662.67|BUILDING| to wake above the blithely regular foxes. d 1454|Customer#000001454| wwPhUG35PiIVasu88,RvDA|24|34-478-555-5955|3366.61|FURNITURE|ss the blithely ironic deposits. regular deposits after t 1455|Customer#000001455|MrDN0cvhoLZ ioRLZCR hPcY4WvILz2|3|13-839-360-1866|7591.90|MACHINERY|ironic instructions: ironic pinto beans acros 1456|Customer#000001456|zKDB5elqlAQoUQp|13|23-171-834-8997|6123.69|AUTOMOBILE|unusual accounts wake. even, ironic packages wake carefully. regular p 1457|Customer#000001457|qmpteVs7H9WjRow7FDut9a77oFKRDOXxq0JmG|23|33-660-953-7656|2873.49|BUILDING|onic accounts nag blithely among the regular, regular pinto beans. carefully regular 1458|Customer#000001458|vsGifZH3fNgJjlgF6jJbmkSqGc|3|13-392-503-9207|2716.80|AUTOMOBILE|ests sleep bravely ironic accounts. quickly regular accounts cajole 1459|Customer#000001459|2sPwjFNEFf9dN4az|11|21-424-586-6295|9270.88|AUTOMOBILE|ounts use blithely. blithely pending packages use ironic deposits. final accounts boost slyly. care 1460|Customer#000001460|AEgBZGLmuMqe7Gqh1|20|30-151-388-7118|9680.51|BUILDING|accounts. ironic packages cajole furiously; quickly pending requests lose quickly carefully bold deposits. 1461|Customer#000001461|MMmT5l0zAilFCb2ZMqsUm3TXlRH|3|13-393-444-1533|8460.48|AUTOMOBILE|ress, unusual packages affix carefully carefully final ideas. blithely special instructions nag even deposits? f 1462|Customer#000001462|b9Ed,6BCKn5v37q1|17|27-153-195-4457|7305.88|FURNITURE|ously slyly express requests. spec 1463|Customer#000001463|WD3OuRpJ0NVj2qslrTkUPmeJqVx5|7|17-980-394-5868|6039.64|MACHINERY|s. blithely even courts wake quickly: quickly silent pains doubt slyly. slyl 1464|Customer#000001464|5kOAfK9s6goOZabgSzNLgD,CILowRxqC2OLnV|14|24-133-117-1577|9306.20|AUTOMOBILE|iously furiously regular tithes. boldly final requests use carefully at the f 1465|Customer#000001465|tDRaTC7UgFbBX7VF6cVXYQA0|8|18-807-487-1074|9365.93|FURNITURE|s lose blithely ironic, regular packages. regular, final foxes haggle c 1466|Customer#000001466|Fdm3uYarZ0Tnnh9R|17|27-360-496-5041|1268.69|FURNITURE|nts along the blithely bold instructions boost carefully after the unusual depos 1467|Customer#000001467|GE,jQi5oLlkzh4jIUct7r 3C5G|24|34-941-824-8063|2857.19|FURNITURE|. final, bold deposits sleep furiously. unusual instructions are final requests. quickly final sentiments 1468|Customer#000001468|APEd1ssFxDC9fhwosxxeQUul5EhwBczX|12|22-901-280-1023|3826.52|BUILDING|s. slyly regular theodolites aft 1469|Customer#000001469|yLW8qLv25wuMsibRd,1qJe9|7|17-961-583-4658|4329.98|FURNITURE|y even dependencies wake against the regular, final excuses. packages haggle slyly a 1470|Customer#000001470|8ufZxZ5IgwGrUM2CWfxYoRHuBi Vj8rY|17|27-350-836-5521|7033.49|HOUSEHOLD| excuses are slyly after the carefully bold accounts. unusual pinto beans boost. final accounts wak 1471|Customer#000001471|lbRP,tSo,eQT6rDDNNIBx|5|15-230-827-4758|3872.86|AUTOMOBILE|thely according to the carefully ironic foxes; packages according to the quickly special deposits wake fur 1472|Customer#000001472| Eayx9GAqjJEwrGy1Er5 ffNtLL|5|15-464-411-8342|2168.61|MACHINERY| haggle against the carefully bold theodolites. quietly regular ideas haggle. pending pinto beans engage sl 1473|Customer#000001473|UPkONG9dy4VYyGNJGHG|0|10-891-555-7734|2796.93|MACHINERY|uriously. quickly pending multipliers maintain slyly silent excuses. regular requests cajole qui 1474|Customer#000001474|KB83CaaM8DRjvAmEMg1fw|16|26-609-226-4269|2961.79|HOUSEHOLD|kages above the requests sleep furiously packages-- deposits detect fluffily. pending th 1475|Customer#000001475|4tUf4SaYTFV2H7ji|0|10-932-794-2009|1820.28|BUILDING|uctions sleep blithely bold packages. pending, silent deposits after the fluffily final pinto beans ar 1476|Customer#000001476|nsPnedR1dhWK,|16|26-621-638-1459|409.72|BUILDING|across the fluffily final requests. regular forges haggle furiously r 1477|Customer#000001477|nUT6kGEr7tmgpJaPgfFtXY|6|16-407-756-8079|9103.33|MACHINERY|ites nag blithely alongside of the ironic accounts. accounts use. carefully silent deposits 1478|Customer#000001478|x7HDvJDDpR3MqZ5vg2CanfQ1hF0j4|7|17-420-484-5959|9701.54|AUTOMOBILE|ng the furiously bold foxes. even notornis above the unusual 1479|Customer#000001479|KDZMMuMVSWQPkGpoTUE0G 1vXHd3mS4c,A,kFR|16|26-203-849-3685|9793.29|BUILDING|arefully final ideas. unusual accounts sleep. final packages wake. fluffily bold dependencies hang slyly. bl 1480|Customer#000001480|Hzjh65ZXBFSzflrjQgECkrp35gDha,2|7|17-573-775-8796|876.02|HOUSEHOLD|uriously pending courts are. deposits serve quickly blithely final excuses. slyly reg 1481|Customer#000001481|Vp7Um1Vy7MNVJvP 5cqUrz8scGtcaLJB3f5bZDW|12|22-674-694-9039|3204.67|AUTOMOBILE|lithely. idly ruthless packages wake above the bold, quick pinto beans. regular ex 1482|Customer#000001482|kTcr5JgkjFeLKIRcmtnCvOFr1feN59chP7|19|29-452-962-5934|2930.53|FURNITURE|are. slyly regular deposits mold carefully above the blithely regular ideas. carefully r 1483|Customer#000001483|ZjY1C b6cOnY3|7|17-202-113-4814|4409.70|BUILDING|nts sleep around the carefully express theodolites. requests nag 1484|Customer#000001484|WcOint654aJStnQWSgAAtI|18|28-987-505-1842|4883.17|FURNITURE|s against the furiously special packag 1485|Customer#000001485|oR6sZslMa7bPLxtHFhqdJt|24|34-329-123-7678|9412.02|BUILDING|pliers. ironic requests boost slyly carefully express ideas. blithely ironic foxes af 1486|Customer#000001486|7A2MhrNtsA|24|34-559-605-2237|5859.97|BUILDING|lithely ironic dependencies haggle quickly b 1487|Customer#000001487|AJXUi2qFVKfypmmpTEbkmjmz0gPKQ2|17|27-197-562-5547|3589.16|FURNITURE|y final instructions. regular, regular packages boost alongside of the b 1488|Customer#000001488|DtF2uJI8td2wqrumD|4|14-892-461-5341|7929.51|HOUSEHOLD|sits boost quickly fluffily even pinto beans. slyly e 1489|Customer#000001489|yM8biIU5IFKHODCGTCwdkUf|9|19-906-669-4354|4389.66|MACHINERY|ckages play carefully? permanently regular pinto beans 1490|Customer#000001490|vBUkY7eCyWP|20|30-326-598-2437|8997.60|MACHINERY| bold orbits boost slyly according to the carefully ironic accounts. slyly special packages whithout the 1491|Customer#000001491|GjZIP4Fv5lqDt|4|14-931-281-5631|3739.82|HOUSEHOLD|efully silent tithes. even deposits according to the unusual, even platelets haggle furiously a 1492|Customer#000001492|2QNz4Zy0UjjI|1|11-527-949-4092|-875.17|HOUSEHOLD| blithely even accounts. furiously final instructions across the decoys cajo 1493|Customer#000001493|FbV 8Ug9GkSfMde5b|24|34-947-154-7032|7014.12|MACHINERY|carefully quiet requests lose slyly. quickly final pinto beans haggle bli 1494|Customer#000001494|4V71P ku3jrqBfQp|11|21-248-166-9549|8292.21|MACHINERY|arefully furiously special ideas. pending deposits above the blithely regular excuses wake slyly car 1495|Customer#000001495|78w5H7VJSo0Ps,jqeoCWS4Kay17ygM4RtIH|10|20-416-910-7075|6227.55|FURNITURE|osely blithe, ironic foxes. regular dependencies use blithely about 1496|Customer#000001496|ZOyMxutVHpJy|3|13-802-978-9538|-496.49|AUTOMOBILE|counts wake slyly above the instructi 1497|Customer#000001497| D8e2U3gYd57H4grcOr,02|14|24-506-574-8552|2449.57|AUTOMOBILE|gular packages boost foxes. blithely bold escapades wake slyly special pack 1498|Customer#000001498|x XToT5oFi7oIsRG2mgIL3ncvYJoWBsufsQ7N,z|19|29-676-227-6356|5810.56|AUTOMOBILE|ackages are slyly unusual req 1499|Customer#000001499|4,6jWOEqfnuXkwhB7gs0M9TcWJlaJNv4bt|3|13-273-527-9609|9128.69|AUTOMOBILE|ole blithely permanent instructions. carefully even packages 1500|Customer#000001500|4zaoUzuWUTNFiNPbmu43|5|15-200-872-4790|6910.79|MACHINERY|s boost blithely above the fluffily ironic dolphins! ironic accounts 1501|Customer#000001501|tLJmtj5OgXCQM|10|20-489-284-9686|9734.53|BUILDING|longside of the furiously ruthless deposits slee 1502|Customer#000001502|FIsFVFApqxzRHQrRjAlODHWTDZc35,BD0c7CuyVy|13|23-873-733-3833|3361.88|FURNITURE| carefully express requests. quickly even in 1503|Customer#000001503|9fFMPuIIatxmXEDe4XCu4PRea9|2|12-957-226-3187|5164.52|FURNITURE|odolites. express notornis detect blithely unusual, regular d 1504|Customer#000001504|suueZs7bAberaafllLS|0|10-462-929-6039|8151.61|MACHINERY|ess theodolites. blithely speci 1505|Customer#000001505|SFczFxAak1xX,CmWAE|21|31-344-990-8260|8207.39|AUTOMOBILE|yly above the accounts. even deposi 1506|Customer#000001506|RUScjIPOHpz3it|1|11-381-308-9658|-373.24|MACHINERY|s. furiously unusual excuses sleep slyly. blithely unusual packages about the slyly regular accounts are accou 1507|Customer#000001507|KtVNuytlncvuV44YzpoB|10|20-694-294-7077|5801.76|BUILDING|symptotes around the blithely ironic requests may boost blithely during the bold, final ideas. ironic pinto beans sl 1508|Customer#000001508|E7fRkt7uXJHIR8akfmor42eTm5kZH|4|14-740-990-2746|4213.74|AUTOMOBILE| fluffily pending asymptotes. blithely even foxes nag slyly slyly pending platelets. carefully regular req 1509|Customer#000001509|LQY2i,MHY8czRV2Ize|9|19-226-262-5083|328.44|BUILDING|tipliers serve quickly furiously express excuses. furiously unusual deposits slee 1510|Customer#000001510|BVNRoS0TPt7yBxD|1|11-138-490-8934|7100.18|MACHINERY|ng the quickly bold deposits. regular, even deposits use silently. special, regular packages mold slyly regula 1511|Customer#000001511|Lh9VKgOjqeJ5P5veH6NKZG3We|4|14-230-666-6671|2757.29|MACHINERY|xes. ironic, special dolphins against the regular accounts haggle carefully across the f 1512|Customer#000001512| FhwT 40,zugIGQPtYUDkjvXct070xNX4Lze|16|26-502-737-9941|7729.48|HOUSEHOLD|l accounts. final deposits use carefully slyly regular deposit 1513|Customer#000001513|CkEgq3Yvj9kGkHvVeUELT1UP9HBnHwiEIFzRWNTA|10|20-670-367-4252|8434.13|FURNITURE|y regular accounts cajole blithely 1514|Customer#000001514|2dVI195Lf,EUjr1CY37GWxCxb0uUjEa|8|18-602-992-7324|4566.48|FURNITURE|ngage slyly alongside of the f 1515|Customer#000001515|QS USHJ02MP2yd7TIcCNGMyXjyQug0EIShDlUM|12|22-852-688-4287|-179.31|MACHINERY|ily: regular packages cajole furiously at the carefully dogged foxes. final grouches must are. silently daring de 1516|Customer#000001516|VFbEMU7LSQZPCZ3m73dNP2WH0ywr5loATV4r|14|24-797-943-8908|9263.27|FURNITURE|tructions integrate above the regular, regular somas. blithely ironic asymptote 1517|Customer#000001517|hJBcIv8Yc9ukY9Erz96RRKNR8upJ8IBJxgePjf9|23|33-993-734-9681|2875.95|FURNITURE|tes. carefully final packages against the p 1518|Customer#000001518|ulllJKhRl VkFwAIhlb |8|18-242-415-7477|37.80|MACHINERY|l, pending packages boost ironic, final theodolites. fluffy requests are carefully. ironic, regular theodoli 1519|Customer#000001519|ersLKVkITqd,b7yCM1td5h9Y1tQv|14|24-663-396-2927|6172.47|BUILDING|ost from the close accounts. r 1520|Customer#000001520|WuEf6uxQmSgTA1efbl24QhQ60WJoh2166RzzOiV|24|34-364-590-2076|8678.58|HOUSEHOLD|ges after the requests integrate slyly according to the ironic requ 1521|Customer#000001521| UAwhhVG066cebuZN6Wk7s|13|23-168-973-9213|9983.09|AUTOMOBILE|special deposits use quickly according to the furiously express packages. slyly unu 1522|Customer#000001522|aMbkFCcpuqN8YFzn8ctAhm skIIfd|4|14-712-410-2710|948.00|AUTOMOBILE|packages hang slyly alongside of the slyly silent foxes. closely express accounts alongside of the even Tir 1523|Customer#000001523| udI60hnJb0IMvB67xQFfkgamLpP2Bwynf5P|7|17-405-744-1455|2921.13|MACHINERY|oxes. slyly bold ideas snooze blithely. accounts sleep slyly agains 1524|Customer#000001524|nW8RCuzryVNcYMCvE ZKJC7apmhen|22|32-834-498-4224|425.06|BUILDING|ses are regular packages. even asymptotes believe. furiously regular platelets sleep carefully according to t 1525|Customer#000001525|NtS KugGxV4GMBxNAwZdR6wwq02 fd y5,M|0|10-178-851-9228|2915.69|AUTOMOBILE|cial dependencies. ironic accounts integrate regularly across the permanent accounts. quickly 1526|Customer#000001526|pAC6Yj2c5lyOIr5 IQpM0|8|18-679-265-7392|8012.73|MACHINERY|ructions. furiously ironic packages after the evenly express pinto beans 1527|Customer#000001527|486JIEEHa |24|34-219-462-2180|6025.59|FURNITURE|old excuses sleep furiously ironic packages-- final pinto beans cajol 1528|Customer#000001528|fa,9WdvoEW06FtLQ6bpXBYGORjOWt,w|21|31-594-709-9605|4114.07|AUTOMOBILE| bold deposits sleep slyly about the blithely express accounts. final accounts haggle furiously at the 1529|Customer#000001529|NYQrlaZMT2rOQadTbfSpAdPPTQwpWQEEWD|2|12-170-370-8690|8221.42|HOUSEHOLD|each blithely. ironic, even packages sleep slyly alongside of the furiousl 1530|Customer#000001530|KVYYmaQ7fGwFnhgBnot1zTnFa|2|12-845-483-5866|4404.87|MACHINERY|ckly furious deposits. furiously special instructions sleep furiously according to the regula 1531|Customer#000001531|OomxCS69ZBbyC99b6YHXYGvw1Fs|21|31-735-863-6916|728.83|AUTOMOBILE|. deposits use furiously. ironic accounts affix 1532|Customer#000001532|VHjtEO1OwfCkrTIj|1|11-301-550-1539|4311.62|BUILDING|ependencies. fluffily even instruction 1533|Customer#000001533|jVCPod3Ysz|7|17-511-289-3953|6323.48|HOUSEHOLD|cross the ironic, regular ideas. even, regular deposits haggle according to the blithely regular dep 1534|Customer#000001534|EJ1gh5MYQ7R xKH6RfqPU96So94cMHKHgnEVTgy|17|27-975-211-1327|5760.99|FURNITURE|ly final requests; blithely regular pinto beans haggle slyly along the quickly bold notornis. sentiments cajole c 1535|Customer#000001535|2l8xLuwaicTTg5RNA7mwyHhz|23|33-371-530-1740|1041.79|FURNITURE|g the blithely final accounts haggle carefully above the sly, express platelets. excuses eat slyly across the c 1536|Customer#000001536|HRUhB3D7LC6V ydQigaOZ10Y9Be1jN31|13|23-357-877-4041|6388.82|HOUSEHOLD|uriously against the slyly regular ideas. blithely ironic dependencies solve nev 1537|Customer#000001537|Fx1vbSLG90yTE3KF2VGDMOeny|15|25-482-334-6480|4000.55|HOUSEHOLD| courts. quickly regular instructions are blithely silent excuses. slyly special theodolites use finally 1538|Customer#000001538|ohSUJgfMxt2Hq9f0tv,MZaRsombSl,MU1d6,|2|12-766-442-8988|3245.69|FURNITURE| blithely carefully even instructions. blithe 1539|Customer#000001539|EFgodQ9F0u SUYZQcJCNzjDlte5 br0klLU|17|27-544-403-7594|-702.43|MACHINERY|e blithely express pinto beans. carefully regular pack 1540|Customer#000001540|c1kVCV43v2RpUwCoZJ2LBHWYt2BT7|17|27-352-357-7209|6102.98|HOUSEHOLD|ious foxes against the slyly regular deposits boost ideas. regular waters sleep slyly: unusual deposits 1541|Customer#000001541|3HbD4JaolktsAYU,OgPrar2|15|25-786-474-5957|6792.00|AUTOMOBILE|blithely. packages wake final accounts. carefully enticing asymptotes run quietly despite the slyl 1542|Customer#000001542|4whsFeeVSBH7Eq WSu gF5JCsJc|14|24-754-425-8980|4108.68|BUILDING|sual, regular deposits haggle blithely alongside of the quickly ironic foxes. slyly bold pinto beans det 1543|Customer#000001543|IKgaPQRsONfY1vAsPP|18|28-327-662-8527|5653.73|MACHINERY|ckages haggle. idly even deposits according to the regularly even ideas haggle blithely re 1544|Customer#000001544|R,hoHFlkusJ,1Kts,0QEixg|23|33-132-882-2925|2204.41|AUTOMOBILE|ular deposits. final, regular accounts nag carefully quickly regular pinto beans. unusual, unusual theodol 1545|Customer#000001545|08TtvYMUYuq6Hgi1T4IsV2fr1G90cnb D|1|11-287-870-3637|-487.86|BUILDING|ial requests wake. sometimes regular sentiments are. pinto beans use car 1546|Customer#000001546|kFu hXaTK2Vk|20|30-788-120-7833|4488.33|AUTOMOBILE|sts haggle furiously. even, regular packages sleep. idly idle somas affix furiousl 1547|Customer#000001547|RgRcB,v0ZS|4|14-683-809-4484|6387.31|AUTOMOBILE|ts after the accounts are above the fina 1548|Customer#000001548|0uaAwzhbw,1VFL|23|33-610-656-3668|562.75|AUTOMOBILE|eep slyly regular, final instructions. final dependencies engage a 1549|Customer#000001549|Bm8PVyaAYfS0IFPhkXiVGL|24|34-263-284-6757|7050.56|FURNITURE|sts about the quickly unusual dolphins integrate quickly slyly silent platelets. quickly final instructions 1550|Customer#000001550|NgbaaI8wjR|20|30-722-982-9755|4742.59|BUILDING|sits. regular requests cajole boldly after the slyly special ideas. quick t 1551|Customer#000001551|GSs9E1btXLKkSgkCAyLohk1bOLuJ6|14|24-667-589-4141|797.35|BUILDING|ely ironic dependencies. quickly iro 1552|Customer#000001552|eR6My q7YdhYeBH jVxHHC mYpeNFNBDuG10|21|31-902-185-9642|956.02|AUTOMOBILE|regular accounts eat furiously slyly slow ideas. carefully final accounts mold furiously after 1553|Customer#000001553|zS2t71h5ssFkRFiB4EvNtWPqjexC1FaO1MeNutf|1|11-879-323-7032|5853.10|AUTOMOBILE|he slyly unusual packages cajole slyly ab 1554|Customer#000001554|axGq6Zieq8sy|7|17-462-295-6567|8996.02|AUTOMOBILE|latelets cajole furiously final, regular packages. furiously even accounts cajole according to the even 1555|Customer#000001555|7V1UD h0 oKL04nnKVzu7UCmFSL56|5|15-722-660-7220|-740.02|BUILDING|ng deposits alongside of the express, bold deposits cajole blithely deposits. even packa 1556|Customer#000001556|0KThJm1X9rQH3Me,EI2QW8HzrUKnsU,gvw9BwzN|11|21-170-549-6376|1462.44|BUILDING|coys wake slyly along the ironic pinto beans. evenly ironic requests use quickly. final packages are slyly. dolph 1557|Customer#000001557|cbF7Kpmtk4w1vCoqB,3Ev3XNnr|19|29-927-226-6896|3144.14|BUILDING| according to the final, pending account 1558|Customer#000001558|hHKBdZXfRUbMjnlX i8sGWu6|9|19-532-314-9903|8473.41|AUTOMOBILE| thin packages against the even ideas sleep slyly according to the pending instructions: 1559|Customer#000001559|0rOzDCEPki4zpeqXx5nW3ajIGdLN15XHeS|0|10-700-486-1040|4630.52|FURNITURE|ronic sentiments doubt carefully slyly even deposits. e 1560|Customer#000001560|yNFoAP4UcMlluwL1uNYvUmCgrn7GfDiTo3H3mzV|15|25-187-156-3225|9146.01|MACHINERY|furiously ironic requests alongside of the deposits impre 1561|Customer#000001561|11hKNqixtqQsCgZKu3DYu0VEx28g04|13|23-445-875-1233|1083.49|AUTOMOBILE|slyly about the quickly expres 1562|Customer#000001562|Rj0aTQUqnb1u4qOvWzb3|13|23-883-927-3910|3102.27|FURNITURE|eposits cajole. final instructions alongsid 1563|Customer#000001563|cb7 vuR7o4Z5KQqgd5yllan5Evum5|4|14-146-791-5866|8838.33|MACHINERY|ourts-- slyly regular packages sleep car 1564|Customer#000001564| kQ06G,BN4KWou6DYH|5|15-898-126-9264|-184.33|BUILDING|ual foxes wake: theodolites sleep bravely after the furiously 1565|Customer#000001565|EWQO5Ck,nMuHVQimqL8dLrixRP6QKveXcz9QgorW|2|12-402-178-2007|1820.03|AUTOMOBILE|ously regular accounts wake slyly ironic idea 1566|Customer#000001566|NfBldfDRJyOWXbZ47UJP2hGn6HF1zOZGJaOa|23|33-480-441-5244|7256.21|FURNITURE| final frets are carefully against th 1567|Customer#000001567|D XMRaJOpRqLttO8yiMZ4tYU1L2nUr|24|34-146-945-2364|7209.94|BUILDING|thely ironic ideas. ruthlessly pending sauternes are furiously enticingly regular pinto 1568|Customer#000001568|uOMsOfJ0raeSGqW9PMPs1sL5D pcO,fUaYsY6|22|32-780-340-3819|-576.58|HOUSEHOLD|ts sublate carefully ironic orbits. final, even accounts sleep toward 1569|Customer#000001569|4vO9w7ixKJ 5od18LqLvr,|6|16-108-793-2841|9416.38|HOUSEHOLD| the unusual packages. even excuses against the fluffily regular idea 1570|Customer#000001570|RMBVeVOCt002J1|13|23-319-685-6601|2106.52|FURNITURE|unusual deposits unwind among the courts. silent packages 1571|Customer#000001571|akbtXy3o6igP3n8C|6|16-661-716-6605|4250.73|BUILDING|silent deposits sleep. silent foxes acro 1572|Customer#000001572|wS4p6kZ8dz8WyKfAbhXeBUO3QJj|5|15-262-124-6233|6070.44|FURNITURE|instructions affix furiously slyly regular foxe 1573|Customer#000001573|pcC2rrIA2bwtSXkcBy8X5eoQBrfoGb7gT|10|20-170-955-7287|9831.27|AUTOMOBILE|ests use of the brave accounts. excuses are 1574|Customer#000001574|hgovcHRlq4 y|8|18-753-101-5745|1204.82|HOUSEHOLD|e regular courts affix after the thinly ironic reque 1575|Customer#000001575|Ntyf,WOVz9hrnESfXT6gBxej1eZjbwgdSEVvmRw|0|10-455-580-7646|7283.99|AUTOMOBILE|dugouts. slyly even deposits about the quickly reg 1576|Customer#000001576|ec9dOjmCD0iicQEc4iIff88zX4kFGHPZUPYX1sBg|22|32-430-540-7796|1006.44|MACHINERY|y bold requests about the ironic, regular theodolites haggle slyly instructions: ironic courts wake carefully. 1577|Customer#000001577|eaMmfsWJ7 USPFwMH|22|32-267-732-1345|6204.00|MACHINERY|dolites boost slyly even deposits. furiously ironic asymptotes ha 1578|Customer#000001578|KGMw1t3in68W4|17|27-348-227-6667|-365.45|AUTOMOBILE|elets. special, express excuses after the accounts promise carefully requests. care 1579|Customer#000001579|TI4GCerFzw2UgqQdzJ94|3|13-255-948-9257|4763.17|AUTOMOBILE|efully across the quickly express deposits. slyly regular deposits sleep above the blithely ironic theodoli 1580|Customer#000001580|Uc5lBMkU8F1zW56P Bo,8fbVlyCKs|13|23-651-166-3240|5587.12|BUILDING|ts before the ironic packages sleep furiously regular, bold dependencies: dolphins 1581|Customer#000001581|fCDyGbFmnkclr,031ny|14|24-603-456-1171|4669.01|AUTOMOBILE|lithely final deposits. quickly express platelets unwind 1582|Customer#000001582|Tw 9wNgPjMmsy1brAYW0|11|21-998-418-6615|7119.80|BUILDING|even accounts. quickly brave deposits haggle. 1583|Customer#000001583|og6OTS,QKN2BidNDSZd0yB,Tn8ls6TGnKUz |13|23-136-310-3804|2540.51|BUILDING|bout the pinto beans. bold, e 1584|Customer#000001584|BWzLMEnPG7tsF54M8kdGVd7zQCxiXniOP|21|31-675-590-3473|5305.86|HOUSEHOLD|. quickly busy deposits haggle carefully under the even, unusual foxes. carefully blithe foxes snooze fluffily? qui 1585|Customer#000001585|kMDzNCvICH1j7sLp8g0CFB8cO12tCS70VTp5wM7|22|32-232-514-3624|7651.05|AUTOMOBILE| carefully regular packages are about the carefully silent foxes. fluffily regular Tiresias wake furiously across th 1586|Customer#000001586|I76G9G7dkkigm162L|2|12-221-668-7869|-808.05|FURNITURE|rs cajole silently. ideas doze furiously! spec 1587|Customer#000001587|ztyGKSLXBi6r,uNDAxxDeWuWWUdfR1WL4maTC|17|27-437-149-3006|2050.48|HOUSEHOLD|closely alongside of the furiously pending foxes. furiously final requests wake about the ironic, ironic depend 1588|Customer#000001588|TOCHdXfBa1nhv26OP|1|11-700-437-5542|8372.34|HOUSEHOLD|onic asymptotes sleep carefully-- furiously regular accounts against the quickly pending pinto beans mold boldl 1589|Customer#000001589|As9UC67KvgdnJcZWfdz,|13|23-189-857-8090|-101.68|HOUSEHOLD|s boost final excuses. slyly ironic deposits wake quickly blithely silent requests. car 1590|Customer#000001590|c9ykZTFqi2xpKpNedlJ5,v03aqCbT|19|29-736-744-4365|5065.00|AUTOMOBILE|nusual instructions sleep fluffily furiously bold realms. regular, quick platelets wake slyly. final asymptote 1591|Customer#000001591|ZLJNTInWmiv9a1|9|19-142-875-7741|7470.70|FURNITURE|yly final foxes should have to use carefully. even, bol 1592|Customer#000001592|Bf Y0y,RTCY4z|9|19-565-127-5247|4042.57|HOUSEHOLD|n foxes. foxes cajole daringly silent deposits. sentiments sleep flu 1593|Customer#000001593|IAhXngV2KlKAbAQh4y6S7Vd|7|17-767-583-7374|5447.72|AUTOMOBILE|he special pinto beans. silent accounts sleep furiously final packages; 1594|Customer#000001594|8No1IYGij7|13|23-416-484-3099|4796.94|FURNITURE| final packages wake idly. quickly regular pack 1595|Customer#000001595|bJ6tl8L3gexrf9rdD,Nn9ojzg92|3|13-153-638-7545|1151.78|MACHINERY|hely final ideas. regularly daring requests sleep. silent excuses s 1596|Customer#000001596|fpSMWvE3a |20|30-259-884-2046|6900.08|AUTOMOBILE|foxes integrate thinly. furious, special packages sleep furiously about the asymptotes. final accounts s 1597|Customer#000001597|6pS2oH twoOdcRPVMT13YQQA YIFu|22|32-621-493-2342|5728.91|HOUSEHOLD|s foxes eat furiously final foxes. slyly unusual packages are never against the deposits 1598|Customer#000001598|bz91jr1NNiJ|9|19-439-414-8308|835.29|AUTOMOBILE|onic packages about the ironic, bold ideas are packages. care 1599|Customer#000001599|DbZoJYdsMvL8hELLlgjAvUZ|11|21-556-967-2607|626.23|MACHINERY|ly unusual gifts. even, regular foxes use slyly along the ironic, regular dependencies. 1600|Customer#000001600|GFgAlTCNWGZU4Gyk9glu8uX2vZ|20|30-563-390-7858|7027.54|BUILDING| sleep blithely along the slyly ironic deposits. blithely permanent accounts nag. fluffily ironic accounts 1601|Customer#000001601|jiy,cXiM41u9yIb1Vy|9|19-152-934-8225|2884.08|MACHINERY|ly ironic, even accounts. special dependencies detect. c 1602|Customer#000001602|Lum76wozwPDwPGgk7yFzLnG|19|29-236-186-6698|4645.67|BUILDING|. blithely even requests use slyly. unusual platelets snooze carefully r 1603|Customer#000001603|JHju hD17jZDMXprwVfC|11|21-880-121-2298|-149.21|HOUSEHOLD|tect slyly quickly regular accounts. daringly bold deposits are blithely blithe 1604|Customer#000001604|DXn5Lr8KjjMebZznHhSsX3n7T6J8UkWdYw|6|16-960-140-9357|9079.75|HOUSEHOLD|odolites. final frets need to grow according to 1605|Customer#000001605|PLOEPrgnofqWl3|15|25-483-103-9669|9396.10|MACHINERY|sly unusual requests. furiously final theodolites are boldly unusual instructions. ironic, permanent acc 1606|Customer#000001606|rIhuh0JIXA caaG|19|29-275-181-3687|2244.46|FURNITURE|y slyly unusual accounts. even foxes print furiously daringly even waters. blithely unusual deposits boo 1607|Customer#000001607|JrtvTEYpFdqK68WSabydH6dz9Opj6X5orhrIYeHY|23|33-529-928-4089|1543.75|FURNITURE|ites sleep furiously quickly pending somas. fluffily special 1608|Customer#000001608|jGjdmzMbF05pXU5STryOYpL9orgJ6F|6|16-897-134-9884|5827.14|BUILDING|oys haggle never. regular ideas solve. express dependencies cajole furiously bold ac 1609|Customer#000001609|YuJ96cGZd lzZ5jo0HI6OAi,7b12GYDC,|20|30-784-105-5546|1710.71|HOUSEHOLD|unts alongside of the regular packages haggle carefull 1610|Customer#000001610|8T,m b4Gwjs9j|18|28-219-755-5479|6861.85|FURNITURE|ainst the slyly ironic instructions. blithely final deposits 1611|Customer#000001611|QX3yB3eqWVsGuy7WetBKk6U6s CXl|18|28-997-908-7044|1148.91|BUILDING|ests. furiously express instructions wake slyly! fluffily express frets haggle. quickly even instructions do 1612|Customer#000001612|oRmhlGYt71UyFdgI4KvPxF|16|26-493-547-6969|2613.29|AUTOMOBILE|oost blithely about the blithely express packages. reg 1613|Customer#000001613|grC4vU,xdQCWgrPJzj|19|29-636-508-4398|7539.75|HOUSEHOLD|onic excuses. regularly unusual deposits sleep. slyly ironic accounts nag carefully. furiousl 1614|Customer#000001614|7BofGHd,3lr2wda7i|1|11-986-549-9647|9609.43|HOUSEHOLD|to beans use blithely. unusual, regular waters along the slyly bold pinto beans wake sly 1615|Customer#000001615|AWqpPsmhK,yirQmha|18|28-449-655-8989|1764.25|MACHINERY|egular packages. carefully express ideas wake fluffily fluffily idle deposits. furiously final dinos n 1616|Customer#000001616|hZ7KvTsImg5hRUWmHXpkZGvhFe|20|30-752-506-2492|4635.15|FURNITURE| pending tithes. furiously blithe instructions boost. thinly pending ideas use careful 1617|Customer#000001617|SfX,PYtDB3h2gdmDD1JMN,gKIIVqo|2|12-677-936-3084|9911.51|AUTOMOBILE|y ironic requests above the regular requests haggle silent asymptotes. daringly special instructions detec 1618|Customer#000001618|efEU9gOnX05FfeJAyNMup|12|22-326-603-9101|7795.47|AUTOMOBILE|refully pending accounts haggle furiously dependencies. 1619|Customer#000001619|qM0OslnGfoXRS4YhYUDaUd6cXDDCPc5Ppke8CU|13|23-371-869-2433|6511.83|BUILDING|bout the slyly silent accounts. fluffily ironic deposits integrate carefully against the ironic, express pinto bea 1620|Customer#000001620|p2BIMDiWvXUWlb FXxIukQZI|5|15-151-404-4005|3324.77|FURNITURE|regular courts. carefully brave deposits 1621|Customer#000001621|5I2xwuWad5n73M5zM,Dj|16|26-615-141-1919|8358.51|AUTOMOBILE|efully silent deposits shall nag blithely a 1622|Customer#000001622|HEfEM41ad6Tar1EH526Q9cxe3Pi|5|15-120-999-7103|9617.34|HOUSEHOLD|e requests-- carefully bold packages are. 1623|Customer#000001623|azlsfbL,uqLb2T1wCn5yQ33YK5KvJ8Fo|23|33-467-523-3238|2746.27|HOUSEHOLD|ress carefully quickly special depend 1624|Customer#000001624|A8VfM,awG8VPydormLPcaw|17|27-822-330-4723|9566.20|AUTOMOBILE|accounts among the brave packages haggle finally careful 1625|Customer#000001625|qQ53P2z9Mnocb2HG9u|9|19-868-381-8072|1189.78|HOUSEHOLD|nts use furiously regular pinto beans. unusual, regular dependencies detect blithely 1626|Customer#000001626|Qqvd9BwVQQ133oxNXb8N1i6V3l9z7eu3A|22|32-751-259-8740|7564.80|FURNITURE|ar accounts haggle never. quietly regular instructions are carefu 1627|Customer#000001627|RV5yXOOv0tjeqxIoRtIw9lKU3UK|10|20-566-949-4093|1673.17|AUTOMOBILE|fully after the blithely regular warhorses-- quickly even 1628|Customer#000001628|xOOuECIqRpweZwxZRgQpb2guNYVE|18|28-241-420-5429|3002.83|MACHINERY|s use fluffily. slyly silent foxes 1629|Customer#000001629|eGVew4ADiILjquTPiTeVS9|18|28-413-295-5895|9601.48|HOUSEHOLD|sly express dependencies abou 1630|Customer#000001630|mFqtTXCA4QaCqP7yXsTlk|24|34-375-163-1478|214.28|FURNITURE|furiously final excuses. accounts cajole blithely ironic pinto 1631|Customer#000001631|TEZnHT8B6dqZw9,OoyNrOJs PlT2QfZKk|13|23-875-411-6115|2401.42|BUILDING|y ironic packages. fluffily express deposits sleep by the slyly ironic requests. regular depos 1632|Customer#000001632|9SoJbgMR23pwWXyE|24|34-183-653-4603|-378.16|AUTOMOBILE|p across the ironic foxes. furiously close dependencies det 1633|Customer#000001633|kO5Tq2Y W,NklARS,|11|21-575-247-9010|162.02|MACHINERY| ruthless pains alongside of the even platele 1634|Customer#000001634|mTRMQ9143TTe5kHsD2FdNE7proZ|24|34-186-980-9064|4030.37|FURNITURE|s are thinly fluffily ironic requests. pinto beans cajole blithely before the regular 1635|Customer#000001635|HjISwY7cr50HVcC81T7MnYJ7byRMXrMgB7RjsV|23|33-974-490-5943|9435.42|HOUSEHOLD| the blithely final dependencies. slyly ironic instructions are furiou 1636|Customer#000001636|t1VhiA5gssjGA4,o5b2e8WJHsaBAmCfm4G|20|30-559-573-5410|3228.88|FURNITURE|to beans are quickly carefully regular dolphins. regular, ironic dolphins de 1637|Customer#000001637|dqGWqAXF4JuL7FcYH7r9dnH3MiT0S08VS7KgD|15|25-177-814-7863|2844.51|HOUSEHOLD|the quickly ironic instructions. packages poach blithely express instructions. even packages haggle sl 1638|Customer#000001638|Gxgb1kyTwOAoVu0fqaRQk4KCyWzkULGTy6tkpdOx|17|27-548-377-6273|-411.15|FURNITURE|lly regular accounts! express attainments maintain carefully. furiously pending pi 1639|Customer#000001639|QWbeop69wQqRFQbySM7WqPGTSd7fW6QMFYIjL|1|11-304-833-1391|6651.19|AUTOMOBILE|nt deposits cajole slyly blithe pinto bea 1640|Customer#000001640|lGeZbMEg03r24lUuK|8|18-270-772-6060|2622.28|BUILDING| regular packages are slyly among th 1641|Customer#000001641|XT5DXFdGy4kjb|12|22-791-967-1788|4611.37|HOUSEHOLD|unts through the slyly pending platelets integrate carefully dependencies? blithely ironic 1642|Customer#000001642|UgLWC4Pw,XZX52b8hcEixGxk,J|9|19-134-303-4344|6248.64|FURNITURE|uick requests. furiously ironi 1643|Customer#000001643|,vdC1qp8aweR4z4 sTdnhujyZn,|13|23-553-752-7340|1982.44|BUILDING|ites. slyly pending accounts across the dogged excuses can are according to the bold packag 1644|Customer#000001644|la3oZuddBtIVanskRXO8|8|18-235-782-9940|8808.00|MACHINERY|thely final dependencies above the deposits boost fluffily above the ironic, express theodolites. ironic theodol 1645|Customer#000001645|2gNNcbkeFHKEgl4WSW7G8XpXL0VW,6MtTc0G|16|26-174-526-8279|7085.79|MACHINERY|ic theodolites. furiously regular theodolites nag furiously carefully pending pinto beans. blithely unusual de 1646|Customer#000001646|RQ,TryFh5loGPxszvCgRncdO5kM daRcgON|24|34-268-537-8282|2854.29|MACHINERY|re according to the ironic, ironic requests. ironic packages wake after the special requests. 1647|Customer#000001647|aLfdvxbHzfKz2CAdiOgKiJ|16|26-784-323-3431|9987.11|HOUSEHOLD| final deposits nag. sometimes final dependencies d 1648|Customer#000001648|e3oTXQ7OOTzwcRFXr|23|33-718-723-5373|2389.14|AUTOMOBILE|d sentiments eat carefully unusual instructions. unusual war 1649|Customer#000001649|7n8CvxEE4tthklLyRNZIMQds5rruRPiQLLdV|4|14-308-532-5953|2271.45|BUILDING|te across the blithely ironic packages. blithely final accounts among the quietl 1650|Customer#000001650|6mpXm8FQzetQ7wA1pzEmuYVcVp5 fnDk|24|34-295-469-8581|4183.71|FURNITURE|express ideas along the regular requests sleep furiously alongside of the fluffily ironic courts. idly final 1651|Customer#000001651|whCw6gMwEuls sCrsaB,DQI0,|3|13-593-198-5028|1614.35|AUTOMOBILE|eans. blithely final requests according to 1652|Customer#000001652|uDJ6cxL10W sEd4,O7,rdoxst2Sp1Ij72Jb1|7|17-670-200-2924|4335.18|MACHINERY|ainst the close pinto beans. furiously final asymptotes across the even deposits 1653|Customer#000001653|PQFMr5tmEgBCF7rww29Vc yMrHY9HJk|8|18-150-322-1853|6338.28|BUILDING|ic packages. platelets along the slyly pending dependencies sleep slyly alongside of the slyly 1654|Customer#000001654|igHSnmh 6yMC3vF|5|15-299-167-7023|1522.54|AUTOMOBILE|thely unusual requests boost slyly special, final requests. quickly exp 1655|Customer#000001655|DjCE7uReh1B,,ikdShz9W3PCfqkJow|5|15-306-607-3769|1214.81|BUILDING|s. special, bold requests haggle fluffily. carefull 1656|Customer#000001656|m3BvBNeQ1O eV1Bnn3y,MkEx7Io8GkfQ|19|29-904-708-2645|-664.93|AUTOMOBILE|furiously pending packages cajole regularly requests. fluffily even foxes wake blithely. furiously bold 1657|Customer#000001657|MSSbpflkYXCciBa|12|22-113-887-8875|9376.24|FURNITURE|ckages nag according to the regular 1658|Customer#000001658|5ZAXRv0hnyOcjObHR1ScVOZ77ncI,0|21|31-949-978-6932|9525.19|BUILDING|press instructions. carefully express requests are against the blithely final 1659|Customer#000001659|6,g4PcDD8cCUdAKNbhAmwyG3lqEKuUbq|11|21-545-972-9730|4982.96|MACHINERY|ake. regular, unusual instructions sleep quickly carefully even foxes. regular sauternes acco 1660|Customer#000001660|ClPcSJym47fEQW78Kt4|21|31-870-788-5315|3581.59|AUTOMOBILE| accounts breach slyly. permanent deposits are furi 1661|Customer#000001661|IRXXgB,YgRc078y2i1C,87 1wZ|0|10-582-676-4365|1760.90|HOUSEHOLD|s. ideas play slyly slyly bold theodolites. furiously ironic packages na 1662|Customer#000001662|fImm0 WZ JU39aNmhsh5WKcnCqXW|13|23-691-593-1242|3333.02|MACHINERY|nic accounts sleep carefully across the fluffily unusual pinto b 1663|Customer#000001663|7PGRXPj1HXVQUbcL S2|16|26-507-387-2886|4085.85|HOUSEHOLD| slyly ironic deposits. daringly regular requests haggle slyly. ironic dependencies cajole furiously 1664|Customer#000001664|LWrtr,G ifu9pwmSc2HknzWQS,o0FOMGucsq7Rdh|12|22-597-130-8584|6912.53|MACHINERY|thely ironic requests haggle slyly. quickly ruthless dolphins are slyly ironic requests. carefully special 1665|Customer#000001665|26NoCK4dbtU7jmEhrXSuq9rtQWM042UYODGFm,|9|19-542-708-2762|5869.13|HOUSEHOLD| wake furiously carefully even id 1666|Customer#000001666|AFCMGLIrCORZavTw7YX1dAVlJIk,aYlH|22|32-587-573-6083|3562.78|MACHINERY|fluffily regular ideas nag furiously. even accounts haggle carefully among the furiously final 1667|Customer#000001667|aEGS4v41BVwqZylqNvPj|16|26-528-257-9769|4649.03|AUTOMOBILE|nments; unusual deposits wake according to the regularly unusual deposits-- fluffily express requests sl 1668|Customer#000001668|CS067JF7eX,ax,vrx8wx|23|33-184-926-5421|1305.72|MACHINERY| ironic platelets must wake fur 1669|Customer#000001669|i38,,EDjrqVpk1UKXsl9cCdAwS,HpcFqPS|6|16-172-628-3560|9180.72|HOUSEHOLD|quickly unusual packages are. furiously express ins 1670|Customer#000001670|YP A2c1cFpn|18|28-571-377-3401|1472.96|AUTOMOBILE|ge silently. packages haggle quickly final packages! ruthless dependencies cajole ruthlessly q 1671|Customer#000001671|6yWHFFBO5YDpHN,YmYEpxulL|2|12-269-842-9419|4030.97|HOUSEHOLD|otes run slyly ironic deposits. carefully silent requests use slyly carefully ironic epita 1672|Customer#000001672|ZqEat15B3nCQI4MaRoxdfhN3WIH96vWpUs80z4|23|33-169-930-6985|8586.04|FURNITURE|sts. thinly final accounts are about the blithely regular foxes. blithely even dolphins promise blithely speci 1673|Customer#000001673|uQi2r9akwS4LNd7XQEa|3|13-713-161-3704|5714.73|HOUSEHOLD| slyly. dolphins wake carefully even deposits. quickly even dolphins grow slyly. express, regular deposits 1674|Customer#000001674|VDSnyhnkkFA1CKYDjdMx4Hpt1QaZ4g,1cy9Fnr|3|13-396-137-7834|4568.47|BUILDING|equests use furiously at the carefully ironic requests. unusual deposits boost slyly carefully even fox 1675|Customer#000001675|YvuT73pnh06wLlgAyTO1ZyZ4w,2e5Wk MGnbFO|24|34-888-827-2907|261.00|AUTOMOBILE|eep carefully. even deposits cajole furiously around the boldly final deposits. dep 1676|Customer#000001676|WgQKmlxIcGQz86n5sQMbWUu8cg7UG7W3r|5|15-612-997-6342|6504.47|AUTOMOBILE|s. even asymptotes boost blithely 1677|Customer#000001677|7PgxZfn 6hX3gSjJzRq|5|15-121-397-5027|650.89|MACHINERY|inal asymptotes haggle carefully packages. slyly ironic req 1678|Customer#000001678|xq R0 eIkV019MjY8yRdj r,Gfd|4|14-624-973-2343|3396.57|MACHINERY|sits. fluffily even instructions are again 1679|Customer#000001679|EBr12ymXS5u3,9Bh6Cd8VCsmJ9cOR8nuS|18|28-730-907-3502|5172.53|HOUSEHOLD|eep quickly. furiously express excuses haggl 1680|Customer#000001680|jYhr0a6R8XTw8RR3XJQ1kToU5H|18|28-465-621-9214|-203.01|HOUSEHOLD|ithely unusual patterns above the unusual ideas cajole blithely according to the fluffily expre 1681|Customer#000001681|QxfVn4jW30|16|26-375-137-8121|6996.10|HOUSEHOLD|tions among the ironically final accounts haggle according to the carefully unusual instructions. fluffi 1682|Customer#000001682|8TtqhjtXrYlzMxQ17N|17|27-438-398-2565|2459.75|MACHINERY|. blithely bold courts sleep carefully except the furiously unusual platelet 1683|Customer#000001683|ecOqgCbaUID9JwYZuvSrFxXH9dIDaV|22|32-209-661-3831|942.28|AUTOMOBILE| carefully unusual foxes doze according to the platelets. bo 1684|Customer#000001684|7t,Vo69PIG3t,ncWkzoLCJ8A,V28nMkK|6|16-197-588-1571|5928.82|AUTOMOBILE|d cajole about the ironic theodolites. fluffily special deposits about the enticingly blithe 1685|Customer#000001685|3Mg0g4AXNPa|8|18-694-638-1767|5621.33|MACHINERY| sheaves. regular packages nag slyly afte 1686|Customer#000001686|EYR2WxcOKG 4rIlcO9wbkAtID7PJOVkcPaC|12|22-241-190-8777|7782.48|HOUSEHOLD|osits sleep furiously about the closely ironic pac 1687|Customer#000001687|lNxhAZMB,t1bbxFz7UXI0gFWhw|23|33-345-542-8289|8151.36|AUTOMOBILE| boost blithely at the furiously special requests. ironic, unusual requests 1688|Customer#000001688|KE2TYjMt08|3|13-420-827-4701|3929.06|BUILDING|ckages use slyly. pinto beans haggle regular instructions. blithely even theodolites cajo 1689|Customer#000001689|oYgoEWSydzBD81VB3q20DEx8TgSUX6qio,zpL83p|23|33-974-102-7427|705.64|MACHINERY|y silent requests according to the slyly final deposits poach ac 1690|Customer#000001690|v2dVbJH3RxbBj5Wk5btJdzv9K35jXAoxhRYVthO|1|11-869-223-3212|-381.24|MACHINERY|riously throughout the slyly express accounts. slyl 1691|Customer#000001691|BvajZGLJDqzvJfZKsuVjdwaixCO|12|22-649-185-6921|3367.22|AUTOMOBILE|side of the ironic packages. fluffily special asymptotes among the slyly unusual foxes haggle slyly b 1692|Customer#000001692|C3n33KUNrCXK|7|17-625-330-9211|6921.50|BUILDING|. quickly unusual courts use furiously carefully ironic grouches. blithely unusual accounts sleep carefu 1693|Customer#000001693|k9j7wuuKPs8gE|12|22-402-777-3279|2747.22|MACHINERY|carefully special packages cajole. blithely bold 1694|Customer#000001694|jCLu0ZDrLdq7wFEJb|4|14-609-696-9902|3535.57|AUTOMOBILE|the pending, special Tiresias. fluffily even deposits wake carefully theo 1695|Customer#000001695|ihGFlPO39VGgr7xRcR7AM1BFKn9pDq3C|16|26-263-151-5237|9258.82|AUTOMOBILE|press, express packages nag. slyly special asymptotes sublate. regular, even 1696|Customer#000001696|HIJoLNtvjJbh5H0PturTaOBtSAQ3T,j7GSqq|16|26-115-967-9585|4978.11|BUILDING|eans across the even attainments haggle carefully above the furiously express req 1697|Customer#000001697|TQj18iiC1gziLOnTileoy|24|34-288-313-5272|-913.24|FURNITURE|nding dependencies wake alongside of the sly 1698|Customer#000001698|Xzkyij4D5OOWYsaWsucYV0|22|32-926-560-9683|5109.01|HOUSEHOLD|uriously ironic packages cajole slyly! regular deposits alon 1699|Customer#000001699|lGzEu5g4oOROn4QvjK8fEd Z,Y9Vn7IV7EnlE5|1|11-655-675-5843|-813.01|HOUSEHOLD|ngage ruthlessly alongside of the carefully 1700|Customer#000001700|DK3nJU9doE2BtBjQTFApwnLxOCSD |21|31-868-665-9539|6683.70|MACHINERY|ound the furiously final hockey pla 1701|Customer#000001701|IbyUBDvH,eRszYTbnEDHGu16B4UsJSbQaA7F |3|13-397-730-2856|9986.13|FURNITURE|gular ideas. deposits about the busily unusual deposits are furiously ironic theodolites. quickly e 1702|Customer#000001702|ZUf5SwR,j3HdY TBel7Mk|19|29-110-823-3729|8048.90|HOUSEHOLD|e quickly pending accounts-- carefully special deposi 1703|Customer#000001703|7qJL pH9GSS4BZ Nc31|19|29-687-882-9664|8889.69|BUILDING|the ironic, final accounts. qui 1704|Customer#000001704|G4lZ0VRWfLKldLDFR3,bA|16|26-425-543-6950|5145.40|HOUSEHOLD|fully express requests about the carefully regular deposits use carefully 1705|Customer#000001705|lvZ9qSNhUMiE0LTOzmU7,NgjBo6VvcGrs|11|21-470-157-6516|5688.31|AUTOMOBILE|s sleep carefully. fluffily regular accounts haggle furious 1706|Customer#000001706|FBx04exFTAFFRA3G,UR9Q2XSM1c8Uopaal2rEFv|2|12-442-364-1024|455.15|HOUSEHOLD| beans after the ironically pending accounts affix furiously quick pla 1707|Customer#000001707|MjHpj4aS20ftMyK5EMEk87p|11|21-859-412-6010|2714.86|AUTOMOBILE|. carefully even requests nag fluffily after the sometimes regular instructions. quickly bold foxes among the idly 1708|Customer#000001708|BdT2freRSXKa31JLnSBliCXmhi8J|2|12-857-766-2851|9702.83|AUTOMOBILE| always. requests are furiously packages. slyly regular tithes c 1709|Customer#000001709|x0 eZbj1iyKLgjF8qImD|11|21-877-394-8667|9588.88|AUTOMOBILE| deposits. blithely regular frets 1710|Customer#000001710|6kwF3TSVbf,JAh3PAjc0NSHHvdEGWjxVf2us|10|20-393-759-9313|8409.91|HOUSEHOLD|gular asymptotes. furiously silent excuses sleep never-- blith 1711|Customer#000001711|Mhg8c9IAFb8G|15|25-302-946-6337|4421.61|MACHINERY|gle carefully. final, even deposi 1712|Customer#000001712|Qdv0r7aA5tmEZw JkozgH|18|28-959-477-6941|7013.72|FURNITURE|le blithely. regular, regular dependencies integrate slowly a 1713|Customer#000001713|saqFezmCXyr2f2sGR3WexM8MSf03S|15|25-566-819-3545|1627.04|AUTOMOBILE|cial deposits. requests alongside of the 1714|Customer#000001714|6xIlLyed lGGxdneig8xdrvHZYHuLldIGeJu|23|33-251-747-7039|8569.50|BUILDING|ar packages. carefully final accounts wake carefully. slyly even ideas above the closely special reque 1715|Customer#000001715|TNwfsu3dIIti|12|22-283-174-5611|2587.36|BUILDING|slyly regular accounts. quickly furious accounts sleep blithely alongside of the carefully special packages. ironic 1716|Customer#000001716|FmPtChYDv7s7KX5Zi8Ug9SGMajKrQYRuv|20|30-299-232-8737|779.07|BUILDING|. final accounts use. furiously ironic asymptotes d 1717|Customer#000001717|HufIAAW0Xbs3qoYpAVxk4KKvm,F|3|13-450-115-4347|1715.40|MACHINERY|s doubt. blithely silent accounts try to haggle: blithely ironic pinto beans boost furiously. carefully unusual a 1718|Customer#000001718|z2SLViCW5QBh8FDiy3|24|34-842-694-7686|8697.88|FURNITURE|s sleep furiously about the carefully pending asymptotes. 1719|Customer#000001719|eOix0jv6gCP34oQBO2i2z1UugzE1hwWx28n7Uog0|9|19-261-141-3893|4257.24|BUILDING|ully furiously ironic courts. even asymptotes sleep! regular, ironic dependencies are. carefully silent theodolit 1720|Customer#000001720|CIRvBtD2pSJ2b2hoqOxhj|7|17-681-485-3576|5496.84|BUILDING|sly even accounts sleep carefully-- pending excuses sublate slyly. slyly unusual foxes al 1721|Customer#000001721|qRNwOW8G9E5fOvG,,W7HDgv|9|19-288-344-3668|5484.48|FURNITURE| furiously. foxes cajole. instructions after the bold instructions 1722|Customer#000001722|XqgWxT4hlg2EjS0HaV6XQwclVGN09kfwUJa iX|1|11-252-319-1249|6718.86|BUILDING|posits. furiously regular requests are fluffily ac 1723|Customer#000001723|riRitYAsJ0nPLmAnd5JX,8QpE2F9iZIKXAg|22|32-609-708-8185|6893.86|AUTOMOBILE|e the carefully unusual theodolites. bold, even deposits alongside of the pending packages s 1724|Customer#000001724|oCZhgKIkixlfOYiqSXqzeiUAV9ctBQSRsW55G6n|11|21-327-332-4432|123.03|AUTOMOBILE|ven packages. fluffily final deposits nag. carefully ironic dependencies unwind b 1725|Customer#000001725|deDnuDKB3fZczOWTxwxutP|6|16-227-800-7867|8778.07|FURNITURE|he furiously stealthy deposits. deposits haggle quickly among the final packages. carefully expres 1726|Customer#000001726|Gi5q77BDziKnHUbMcbOrja22sv|13|23-199-630-2326|-879.29|BUILDING|lithely final foxes around the ev 1727|Customer#000001727|g7iJ1HSEmxE1sd4lWLV XcE64HA2JHYqPEJBA|23|33-952-217-5029|3628.03|AUTOMOBILE|e regular excuses detect carefully spe 1728|Customer#000001728|ZbokjPCGrcUuysUjYtzQRK9gQSSNK CkFo3rh8i1|16|26-560-950-6812|1223.09|AUTOMOBILE|regular pinto beans; bold packages sleep. blithely ironic requests boost carefully furiously fin 1729|Customer#000001729|qc3FBSbJHHDIEkku69mWsz,KO|5|15-361-394-6195|6479.07|AUTOMOBILE|y even depths sleep silent accounts. evenly final epita 1730|Customer#000001730|p8wSGXOVmXO6rJBW3jgEjcFYRKW0jVY|1|11-216-159-3328|2743.27|HOUSEHOLD|efully final somas hang slyly. dependencies over the blithely regular requests haggle carefull 1731|Customer#000001731|YOLyjZXp0eenovcSOSR4cShVAvSLDG84VTtB7|2|12-671-588-4662|3075.42|MACHINERY|sual gifts above the blithely bold packages haggle slyly pending attainmen 1732|Customer#000001732|2ZHJonGizgo0QYzPxNF1PubW|2|12-505-455-1597|8512.55|BUILDING|arefully even deposits would cajole about the sometimes final requests. slyly silent ideas accordi 1733|Customer#000001733|RO7fzBGXbovRqHEQln,fPJza9UAmIOe,Q|21|31-150-385-3780|2568.57|BUILDING| pinto beans nag! furiously pending deposits grow blithely even tithes? pending, ironic requests impress. regular do 1734|Customer#000001734|Hj4hmdNc2sRBgG5YO4mz5q 9|8|18-264-978-2762|3888.89|AUTOMOBILE|inal asymptotes along the final platelets nag above the quickly regular requests. slyly final request 1735|Customer#000001735|59,bPx BNSZ1YJ9thet9N|10|20-343-415-9202|7495.94|HOUSEHOLD|efully along the special realms. ruthlessly brave courts cajole carefully carefully silent reques 1736|Customer#000001736|t3 1Dv36zNjBXauc2HKcSLJk,QRu4t|7|17-572-741-5408|-686.49|HOUSEHOLD|s! slyly sly requests above the expr 1737|Customer#000001737|Mdj2vXS04lbiu1hlBPSpA6XliT0yq,XL|5|15-973-473-2767|-917.33|MACHINERY|wake furiously even deposits. busy, bold grouches integrate q 1738|Customer#000001738|dKNfooI8lr6G0yb19Oug|8|18-263-208-3553|2290.69|AUTOMOBILE|mptotes. courts about the ideas nag requests. quickly unusual multipliers print after the pen 1739|Customer#000001739|lhga8XGNRXPfJTotgUTn5qc4ush|12|22-216-512-8595|8526.70|FURNITURE|ut the fluffily regular foxes. regular accounts haggle. special deposits affix carefully. even, final 1740|Customer#000001740|7SEO3ug3RMgpphEQ6Ozn2N22VGruDJA|19|29-148-722-9708|9323.31|FURNITURE|ronic foxes sleep carefully forges. regular, ironic pinto beans promise carefully across t 1741|Customer#000001741|W9G SogT0038gQBdoLtFsexbHqNl|20|30-605-828-6050|-0.28|FURNITURE|sits wake carefully final Tiresias. carefully unusual requests sleep 1742|Customer#000001742|t3PI5OUN02V0BSuoWRvpEcxhY6qX 3IxBOq|20|30-777-207-9522|4273.99|AUTOMOBILE| regular theodolites. carefully ironic packages are. even, express foxes h 1743|Customer#000001743|F 5AwQdqqG4K q Ra2AZ0DIsKLNwhtIgHVxIr|16|26-867-792-6806|985.87|HOUSEHOLD|riously regular attainments. furiously unusual pinto beans cajole slyly daringly r 1744|Customer#000001744|cUBf1 YMJEgbt2XDeQWD4WinTu4iFIF|17|27-864-312-2867|1436.96|FURNITURE|egularly bold, ironic packages. even theodolites nag. unusual theodolites would sleep furiously express 1745|Customer#000001745|pAo6p8Q,xr4Y|22|32-624-467-3275|3907.29|BUILDING|final instructions affix fluffily alongside of the blithely unusual pinto be 1746|Customer#000001746|f8Ku1TqpFJ 4wU,s6clle8G|17|27-110-285-2511|8774.31|HOUSEHOLD|ions cajole ironic theodolites. slyly even requests wake carefully. blithely regular frets integrate furiously agai 1747|Customer#000001747|xSJopUGOWIGwa2QJA9mbIzcLAA8OM|15|25-493-381-4100|1962.62|MACHINERY|furiously furiously ironic theodolites. stealthily reg 1748|Customer#000001748|4MaVm9Yox5xeu|13|23-496-856-9633|5779.19|AUTOMOBILE|y pending ideas. furiously regular instructions are. quickly ironic depo 1749|Customer#000001749|lgY9SnA8QnkA7x2LAje7MGsVYB|4|14-247-462-3766|9788.66|AUTOMOBILE| carefully ironic packages. d 1750|Customer#000001750|PBWOGiTcFiFucs8kN2h ,ACNS NC|16|26-538-454-1606|110.26|HOUSEHOLD|packages are stealthily final frets. regu 1751|Customer#000001751|8KIBa2IJPXquMkWYLAccUT|7|17-460-999-8173|3151.94|HOUSEHOLD|ithely regular, even accounts. unusual packages lose furiously; asymptotes nag si 1752|Customer#000001752|CZzZOOncqNdRX|5|15-542-683-9661|7585.99|MACHINERY|uriously unusual accounts. carefully unusual dependencies shall have to play sly 1753|Customer#000001753|XlL3uXUfcHDfQ 4unoq pbTPbUzO|11|21-214-570-7769|5787.81|MACHINERY| carefully regular asymptotes a 1754|Customer#000001754|wXsAA1ZgkeofaPGeZaIe|0|10-154-971-3056|759.00|BUILDING|requests. stealthily pending requ 1755|Customer#000001755|emfcQnwjix1Ul0eKbHP|3|13-333-888-8618|5389.05|MACHINERY|detect blithely about the fluffily regular foxes. slyly regular requests wake quickly about the final foxes 1756|Customer#000001756|q8WlGuUrrao7fWFp4pZE|19|29-962-368-3523|8817.44|MACHINERY|press pinto beans. accounts could are blithely carefully express dolphins. blithely bold deposits detect. eve 1757|Customer#000001757|jY0jJh ww3hLh8eLmmJvKhIL47ka2j1uAys|6|16-868-947-1151|830.36|HOUSEHOLD|, bold instructions integrate slyly final accounts! carefully final accounts caj 1758|Customer#000001758|xvGuRiXuKNuXABjdfERW8jl7b5FdFNo|16|26-263-427-8796|5746.38|FURNITURE|old bravely. ironic dugouts are slyly. carefull 1759|Customer#000001759|2hzwTzhbdHJlHbT0jWD6lKTGA|20|30-176-851-5565|8396.37|MACHINERY|tect furiously. permanently even requests ac 1760|Customer#000001760|HcnL7r33sIo1SAwc5|9|19-539-811-6674|7775.38|BUILDING|ts are after the regular theodolites. dar 1761|Customer#000001761|ypll07IxByF9atO nGkJo7R8Tds8Wq |21|31-376-476-3692|9846.97|BUILDING|en requests. accounts sleep quickly around the carefully e 1762|Customer#000001762|LD,Y0Xj io6wJYOZJI|15|25-319-745-1408|1171.92|AUTOMOBILE|ng accounts. thinly regular packages across the furiously ironic accounts wake fluffily accounts 1763|Customer#000001763|Co2ATzqcl61Cd|1|11-738-302-2342|4490.23|AUTOMOBILE|manent accounts use slyly regular requests. carefully special packages after the pending 1764|Customer#000001764|rlpLd5W4dwxpO7aAVKa7YlZHnwOQCziuByOrK|13|23-308-606-7773|5331.37|HOUSEHOLD|sits sleep slyly. even, ironic instructions breach permanently regular foxes. furiously silent platelets abo 1765|Customer#000001765|YYXRIFQJ4V9paDLFCd13|9|19-538-728-6104|7409.60|AUTOMOBILE|final, special deposits sleep carefully furiously special deposits. evenly ironic p 1766|Customer#000001766|n4uulup7FP1ZLXvSRHveY,TXBOTxs0pj67|15|25-155-396-7616|200.00|BUILDING|lly express foxes. blithely final deposits cajole doggedly regular theodol 1767|Customer#000001767|ddqHp2,Ylt8vN8Pf|12|22-421-403-9852|4369.15|AUTOMOBILE|e carefully atop the even theodolites. pending, ironic accounts hinder among the carefully regular foxes. c 1768|Customer#000001768|kId4M 0RG9dW9Po|17|27-746-769-7890|5356.96|AUTOMOBILE|s. carefully even requests are. spe 1769|Customer#000001769|mrche55tm5KGuJb6lGWqCeKKIlZ rj4|10|20-708-644-8998|8457.18|AUTOMOBILE|furiously silent packages. bold, bold theodolites sleep quickly against the quickly ironic theodolites. slyl 1770|Customer#000001770|IonoL5JlAeGYQFKq2k1sKH0QscvwaAYdrpFSxHV|18|28-976-259-9388|6876.37|BUILDING|ole. furiously regular pinto beans affix blithely inside 1771|Customer#000001771|evmQypmt DbfynZ4bXvm0KUNtyvynyDp3zjcXX|11|21-345-763-5234|3151.21|BUILDING|tes wake carefully according to the unusual accounts. fluffily regular theodolites na 1772|Customer#000001772|AQla93nCHVF6jkq,J|13|23-887-525-9315|6394.26|MACHINERY|e slyly ironic requests. furiously bold courts thrash blithely bold foxes. slyly express foxes 1773|Customer#000001773|StUAItIdmWQdpF6Gz|19|29-729-630-8987|7378.35|HOUSEHOLD|are furiously after the carefully unusual dependencies. ideas sleep furiously. slyly bold packages 1774|Customer#000001774|5pstbh4XaxP91 ,wNQFbR|8|18-753-365-9994|2922.61|AUTOMOBILE|furiously theodolites. blithely unusual ideas main 1775|Customer#000001775|RUY1tS8uCKV1DrB|2|12-544-332-4550|3221.15|MACHINERY| the furiously silent packages. pending, final accounts breach. instructions according to the quick 1776|Customer#000001776|,iyGlh4 Wrn2|22|32-425-716-3547|4801.42|BUILDING|l packages. slyly regular requests wake ruthlessly above the blithely bold dependencies. unusual, ironic in 1777|Customer#000001777|54GpDEcWWIDVMRjP3|24|34-469-776-4539|-535.08|MACHINERY|ar packages? blithely regular instructions along the fluffily ironic theodolites believe ironically aro 1778|Customer#000001778|XxR5jsS8OA1xjtocU,KS6F0Pte4Go5|4|14-504-368-5987|2772.31|BUILDING|eans. regular, pending packages haggle blithely regular p 1779|Customer#000001779|3iow2GjE85s8GnxfNO,fnr9T|23|33-986-427-8764|7233.18|AUTOMOBILE|ly pending instructions: carefully regular excuse 1780|Customer#000001780|ZIeOfVh8umRYig|17|27-213-387-3335|5137.48|FURNITURE|patterns use carefully about the slyly even foxes. furiously regular excuses about the furiously careful packages af 1781|Customer#000001781|JYtJY4OTZQUaEQlfDEeVkK4mtO|8|18-350-885-2317|4686.31|AUTOMOBILE|s cajole fluffily. platelets believe quickly. furiously even accounts wa 1782|Customer#000001782|ehV3 VRXVZ9SR3|21|31-246-927-6074|839.66|AUTOMOBILE|beans. requests was about the blithely regular foxes. unusual, final platelets sublate accounts. 1783|Customer#000001783|ey2RVFXAj5c1qisLEFJA43S2|3|13-239-528-2710|8131.81|BUILDING|dolites. ironic excuses shall wake blithely slyly even theodolites. quickly ironic asympt 1784|Customer#000001784|Zs8QpbcHZfcVJ6oujM8g69J|15|25-605-903-3007|5458.37|BUILDING|n frays. blithely ironic theodolites haggle carefully. blithel 1785|Customer#000001785|nnCUQ01AgIgYBDsdHteH4u0na6WiXvBv|23|33-475-488-7723|7805.55|FURNITURE|r packages was. permanent requests are special deposit 1786|Customer#000001786|pZAHd2LxDUbgcS1WQBN4vvoR5BeNpckOkl7DhG9|6|16-436-900-4501|-51.99|AUTOMOBILE|nts sleep among the quickly regular excuses. slyly even 1787|Customer#000001787|l9Rin,i89mx8LxSK0cC0BPi3OBcM3BXp|20|30-258-449-6408|4120.78|HOUSEHOLD|ts above the quickly regular instructions cajole about the attainments. carefully 1788|Customer#000001788|4,Lldd8YNe,DOiCq2dSW4JL0uqjdVpI2yW4fZMDI|24|34-826-793-6480|-45.96|BUILDING|tes promise. carefully final requests use above the blithely express packages. carefully even theodolites na 1789|Customer#000001789|lfHQVe9IPEKad|0|10-582-797-3122|6407.46|FURNITURE|he slyly express requests. silently daring packages sleep express courts! blithely pending foxes w 1790|Customer#000001790|z IQ1wwki0OLw7biMFHxPKNjp1JBOs7|5|15-805-568-6053|9177.60|MACHINERY| sleep along the special, bold requests. even accounts against the dogge 1791|Customer#000001791|nEuCQJ1,wiJUq7k05FED7j6EjZP0QdzriHsUWZ|8|18-937-127-3890|3917.82|HOUSEHOLD|he furiously final instructions sleep regular deposits. blithely ironic accounts boost among the ac 1792|Customer#000001792|IPqTE3D5cvxDmpveKD6WwW7Pb9ymeAgKs,rT|1|11-561-103-5122|9762.91|BUILDING|to the final instructions will wake quickly carefully special 1793|Customer#000001793|B13zlM7kKBKt|19|29-393-128-9130|7329.58|HOUSEHOLD|ringly furiously bold requests. 1794|Customer#000001794|col5dSe1MO8MyVdQ1f09bhAFsVjYwNH8R4I|7|17-670-112-5044|8243.80|HOUSEHOLD| instructions around the furious 1795|Customer#000001795|EaQbFX VR89kRgd6svC3NK8MSivUK8DZ3y|21|31-853-266-7057|6580.13|MACHINERY|; requests wake slyly after the furiously pending pa 1796|Customer#000001796|HGRGXJA8AwhfBNSyDNn8j3JMvbIwPUKjPTcvKBLs|10|20-905-459-3952|7327.39|BUILDING|ironic, regular accounts haggle care 1797|Customer#000001797|5v4QJxOVHNQ3J6NORJE2edRftfg8 HiGu|19|29-805-690-1846|4461.48|FURNITURE|long the accounts are furiously courts. quickly regular exc 1798|Customer#000001798|fOQAhX8wjDFg8tpeOa L ZdgFlOC69bvmZE|7|17-422-203-8428|6072.64|HOUSEHOLD|hely about the carefully bold deposits. quickly ruthless warthogs w 1799|Customer#000001799|07tCCGrmdFwcXYnolQabgAZW9yq|20|30-643-275-8135|2967.77|BUILDING|requests of the slyly ironic instructions believe slyly regular 1800|Customer#000001800|Cc 1QYWD8JeDlRuyLOEffaanH|16|26-566-785-6289|3323.37|FURNITURE|regular pearls. quickly even escapades alon 1801|Customer#000001801|8ZC3HFVDQGf23cjelZL0wa|3|13-994-265-8339|6806.86|MACHINERY|es sleep carefully along the quickly regular accounts. furiously unusual packages must are permanent accou 1802|Customer#000001802|fGUhRVo61nZfOPxAxzZLrp4z|23|33-720-109-4385|897.62|MACHINERY|c requests. slyly final deposits cajole bold packages. ironic deposits wake quickly around the ironic, special i 1803|Customer#000001803|D7E PteFioyOXeI 422,yZZIsEk|0|10-993-780-6774|3318.35|FURNITURE|its eat quickly around the slyly 1804|Customer#000001804|,TDgHBX9y5eC5ycEwVKbO3gJjXChWmj|15|25-332-547-7897|-516.28|HOUSEHOLD|quests cajole fluffily after the pending, special pinto beans. packages try to haggle. braids haggle fina 1805|Customer#000001805|ZERs4Cu5lQTYD|9|19-679-706-1096|-274.75|AUTOMOBILE|ding tithes. slyly ironic packages boost. final, stealthy requests wake. final, re 1806|Customer#000001806|BB6Vr7W,rSIpWKp|9|19-872-322-3433|254.17|MACHINERY|usly blithely regular instructions. slyly regular dugouts sleep carefully. 1807|Customer#000001807|jlGhIS6zaYIfu9tWFHAyDKVQpOvIluJ1RunV3X|6|16-707-959-9348|1720.94|MACHINERY| slyly final pinto beans sleep furious 1808|Customer#000001808|Z Losr9UXEWwm3RgetdFLr6Q|22|32-507-149-7712|2776.30|HOUSEHOLD|oost carefully ironic requests. fluffily regular foxes boost blithely blithely express ideas. car 1809|Customer#000001809|GqhON8SNyRv|20|30-390-140-3365|8306.29|MACHINERY|ld theodolites eat carefully special pinto 1810|Customer#000001810|RI0cwmW3gNVJiSnIGooUzA|0|10-274-496-6960|923.32|MACHINERY| the slyly special ideas. regular platelet 1811|Customer#000001811|GAuB2XYNF6YxAJgUQO1VcXal|14|24-892-235-8707|6104.69|MACHINERY|dazzle blithely. ironic asymptotes breach; carefully iron 1812|Customer#000001812|UqextI Ph0pve58|0|10-876-771-7164|2515.86|BUILDING| final orbits promise fluffily even waters. blithely 1813|Customer#000001813|xEoR4tsV2Bse527UyeFO8aFhmZ|3|13-604-485-5526|9074.92|FURNITURE|luffily ironic requests doze carefully after the express packages. slyly regular excuses sleep slyly carefu 1814|Customer#000001814|wrzLhEh9DAAHPrh19AGxqCxBWQjO52j4qA1fmqw6|13|23-588-705-2608|5215.32|AUTOMOBILE|ully special packages on the bold accounts sleep among the furiously ironic requests. final requests detect quickl 1815|Customer#000001815|wXx75IZkzX3hsqx5H,PBqxf1CB,cjS|13|23-954-607-5326|3940.94|BUILDING|, bold requests. ironically regular accounts sleep blithely carefully 1816|Customer#000001816|WemZ1IDB0sBS9yyqszdSxqRm8YtGGDme6 BW|4|14-307-306-8692|2328.09|BUILDING|e deposits maintain about the furiously thin theodolites. carefully iron 1817|Customer#000001817|,pNXvEI4pkygH 4wjAy,hPxNikXwej29WH8|7|17-246-120-5754|-862.51|FURNITURE|egular packages. carefully even Tiresias haggle. asymptotes haggle furiously across the quickly pending reque 1818|Customer#000001818|wSYuvOMT8YVZqRDS1YCGrALb|22|32-897-843-2620|-331.97|BUILDING|about the blithely regular requests. ironic requests sleep. blithely pending accounts are according to th 1819|Customer#000001819|pfaH03EbQcSE64yOEnKz7mwHHn|18|28-832-601-3029|6435.85|AUTOMOBILE|y even gifts haggle since the slyly bold warthogs. 1820|Customer#000001820|RsTONzasImDok,RBqc2J09pa29w8gSf6JDIuCBx|6|16-496-603-4437|2282.18|BUILDING|ymptotes affix fluffily doggedly express packages. ex 1821|Customer#000001821|QlxYI,DDZRUAyyVfdaF4q nurCpm,3FltjM|11|21-278-374-9593|8593.01|FURNITURE|tegrate slyly carefully final requests. ironic, regular package 1822|Customer#000001822|H79GEJ2,C0JTus4zVXNjEyMqEFe,7RmzxIGvU5|14|24-524-173-1344|1728.52|AUTOMOBILE|regular forges. ironic packages sleep furiously. slyly fina 1823|Customer#000001823|ci7lCqnRGp|19|29-787-260-1556|9240.38|FURNITURE| furiously. blithely final foxes integrate alongside of the instructions. quickly regular 1824|Customer#000001824|aN7aUMe22hd2LnIqVf7TnJLzSI8uggv,YpldbC|10|20-684-209-1084|3177.51|AUTOMOBILE|luffily regular, pending instructions. slyly pending foxes 1825|Customer#000001825|,E8l78G7k,u0eGXu,sGU1fta2Lg|8|18-969-538-4715|8447.18|FURNITURE|ing to the slyly silent deposits. 1826|Customer#000001826|tbWBFvYUZjBY,BH5r5CQsA71GJIQJNx|3|13-389-673-9030|3891.76|BUILDING|e. bold deposits among the slyly regular depths detect slyly a 1827|Customer#000001827|icXm8xlUo0Ca,T9MoUjkWej3|24|34-815-928-1369|800.31|BUILDING|se carefully alongside of the quickly special packages. regular requests run. pinto bean 1828|Customer#000001828|djbEIhlvmo1i8ZUgTNLFT2f1P|13|23-937-592-5811|6673.46|AUTOMOBILE|into beans alongside of the unusual foxes breach blithely above the quickly dog 1829|Customer#000001829|9TWqvF0jyqs5eJ O0OPAprl0chk8WGTf,|0|10-582-390-6176|2886.86|BUILDING|ccounts. boldly bold foxes use slyly even requests. never regular dolphins at th 1830|Customer#000001830|d6ZvjlxbHL6Kq5y,|17|27-148-316-6372|7363.56|AUTOMOBILE|around the fluffily regular notornis integrate above the final deposits. pending requests nag carefully packages 1831|Customer#000001831|DcW02etJU0N2SmSs3UgkmkPRvao7A|13|23-215-897-5399|7448.78|FURNITURE| thin ideas. furiously quiet pinto beans w 1832|Customer#000001832|VffJ90poBdj 9fq7Hpp0NvcEeeuHg |10|20-337-677-6483|9640.29|BUILDING|. furiously express ideas wake furiously. unusual, bold requests nag quickly. slyly regular 1833|Customer#000001833|KluDIF0rSvGj8mjBA vo7db1bA|3|13-425-435-3864|3418.04|BUILDING|y quickly slow requests? ironic, final pinto beans eat furiously. slyly regular deposits sleep 1834|Customer#000001834| oWU4qpGp8v5pjfHDGwIAh7z|24|34-976-916-8306|2985.37|HOUSEHOLD|slyly unusual packages play. unusual, ironic ideas integrate always according to the thin requests. ste 1835|Customer#000001835|XwMDWDz0zgJT7CpiEZ7Mak8kB6m|7|17-285-512-9067|2081.99|AUTOMOBILE|s. carefully unusual packages along the carefully idle pinto beans haggle carefull 1836|Customer#000001836|0oV7OBdPNyXRqIesxfDU0Ol4So|7|17-927-450-7081|9925.39|FURNITURE|ate slyly idle depths. silent, even theodolites are furiously ironic 1837|Customer#000001837|sWO0VNEY4GzuBq L0vJ5p2|14|24-908-704-5003|-886.94|AUTOMOBILE|unts after the bold, regular dependencies breach a 1838|Customer#000001838|4nF9mi2bj3s82Sv8KPgIsS|21|31-194-756-6301|7142.14|AUTOMOBILE| deposits. regular requests nag slyly-- regular accounts are furiously acr 1839|Customer#000001839|QKjHd6wOQJaxJCOqH2E5P n2tZb3Y|5|15-944-305-1436|3909.79|AUTOMOBILE|haggle furiously after the regular packages. ironic, regular foxes haggle 1840|Customer#000001840|cMNMPjKib0Ez0Bl,cesLvkOl39p4oiUH |3|13-833-461-5939|6879.93|BUILDING|the courts sleep blithely according to the slyly permanent ideas. 1841|Customer#000001841|xJLMIrO4OB|10|20-566-527-9390|8053.62|BUILDING|ts snooze furiously final waters. furiousl 1842|Customer#000001842|SYMcJ0iGluYT2pe6NR5jyErQBODPK|10|20-148-955-7830|233.23|BUILDING|y regular theodolites. packages sleep blithely according to the busy, final instructi 1843|Customer#000001843|5Op16OGLtq5hXrYvKBKPPu,B1pB|1|11-977-879-2346|1821.61|HOUSEHOLD| requests after the never bold ideas hang carefully bold sheaves. carefully pending deposits 1844|Customer#000001844|1BOo5no2lQ8Fa CLDWFaRC8j|18|28-705-205-8653|798.99|HOUSEHOLD| about the slyly final foxes. furiously pending asymptotes acco 1845|Customer#000001845|xHga IhnB2a,|10|20-777-704-2095|6567.69|MACHINERY| furiously requests. even instructions haggle. instructions haggle fluffily. requests along the carefu 1846|Customer#000001846|v0hJC6rlPBHdWAUGg7xm7SEUhpI3wWIO2|16|26-860-238-2408|7115.80|BUILDING|riously regular deposits wake regularly above the slyly unusual requests: unusual packages are furiously furiousl 1847|Customer#000001847|g12l6sqk5YNADO,NEnDg, prphdZp|16|26-677-971-4905|1240.90|MACHINERY|. furiously express packages wake quickly. sl 1848|Customer#000001848|g xIZPvP9AkmbUQa6e|4|14-161-419-6384|8684.90|HOUSEHOLD|as. final, regular asymptotes haggle in place of the slyly ironi 1849|Customer#000001849|vRc6mWLRoRf2ElUvz9byqx2Vt|22|32-730-179-7629|6259.04|BUILDING|ckages. bold epitaphs maintain slyly alongside of the carefully exp 1850|Customer#000001850|yY0Ihs9rn4Jt9qzowqGCKcGYUYVqrOpfKVArQK|10|20-451-644-7838|722.28|BUILDING|lly regular accounts. carefully regula 1851|Customer#000001851|n873H6Cz5A9uHamGSt iIs3|3|13-311-225-1271|4290.76|BUILDING|e slyly regular pains wake slyly final requests. quickly 1852|Customer#000001852|LCTu83UaCBLeatTuc|14|24-811-458-3601|7717.57|AUTOMOBILE|ar, final accounts. fluffily bold deposits cajole. ironic deposits above t 1853|Customer#000001853| z1i M6vmUfw|5|15-442-128-2785|6244.70|FURNITURE|ly pending instructions. blithely special pinto beans sleep carefully quickly final packages 1854|Customer#000001854|CYDzQ3P8qyP,o0ZCHt oAxFNfmlkY18|14|24-654-947-6633|1661.15|BUILDING| the fluffily silent deposits. quickly 1855|Customer#000001855|REHQRPduxffKW5vE8Laf|13|23-239-487-1955|9936.71|MACHINERY|nto beans cajole. pending packages nag quickly according to the packages. s 1856|Customer#000001856|FuvHERUFZy4lUPSGX9WzC6rM3dLy4DRPv21scFOR|21|31-242-270-7219|2381.48|AUTOMOBILE|ic requests. furiously silent requests wake slyly according to the never pending packages. slyly iro 1857|Customer#000001857|4BOgUDmgtH,RyMRb4jUc TDYqFB|12|22-974-598-1668|5485.71|HOUSEHOLD|sits doze among the ironic packages. carefully express requests wake blithely alongside of the theodolites. ca 1858|Customer#000001858|jtVS,lDzoPG7|18|28-940-298-3762|5263.13|MACHINERY| unusual theodolites affix according to the silent instructions. furiously darin 1859|Customer#000001859|MV0YXkg6XVwBzeXMljCiLp|1|11-147-516-2776|4508.50|AUTOMOBILE|t ideas. bold deposits across the quickly slow excuses haggle slyly unusual packages. furiously final ideas caj 1860|Customer#000001860|4,u2YoI2nnY|15|25-432-186-2226|4383.03|HOUSEHOLD|ackages wake on the pending sheaves. bold foxes nag slyly slyl 1861|Customer#000001861|E XEUZYUxRnh0Z1MKcU19Ff2kj7|15|25-966-583-8374|7584.92|HOUSEHOLD|blithely regular instructions detect fluffily abou 1862|Customer#000001862|jDYsot4wXtwiJ9W 7XqR,C|7|17-972-634-9470|9848.27|MACHINERY|s are after the blithely even instructions. slyly regular 1863|Customer#000001863|kfPxNgJute862KvopjfTItHJVC9Hu|16|26-777-313-6429|1024.85|FURNITURE|fily final accounts cajole furiously furiously pending accounts. slyly final requests acco 1864|Customer#000001864|ZTAtP8aj97X246rzbsglEXqogjhEcYybWE3W|22|32-582-138-8697|7669.96|FURNITURE|uriously. final packages detect slyly blithely r 1865|Customer#000001865|0wViSaeE08WI09xFxqr58|14|24-208-401-3922|9112.90|BUILDING|luffily regular dolphins. quickly regul 1866|Customer#000001866|bdPJelrGfDMOilIhvlZNmb3|6|16-520-541-7567|5563.66|HOUSEHOLD|ent accounts about the blithely unusual dolphins are carefully along the special deposits! final excuses u 1867|Customer#000001867|72gC1ZTlzFNQy9CvFosWzE|1|11-951-119-6203|6470.11|MACHINERY|final instructions are according to the carefully final accounts. caref 1868|Customer#000001868|kD u32wkAhHa FvvBM,y6oJVpfZQDcuDBQ|2|12-347-925-5872|6150.48|MACHINERY|onic, final ideas haggle blithely excuses. slyly regular ideas across the carefully bold dolphins cajole sly 1869|Customer#000001869|hOzuX9JGbYqPv677zUdWtUN3,LHdGYcEwqva|6|16-750-945-2045|2181.16|BUILDING|nts. special, unusual pinto beans haggle slyly 1870|Customer#000001870|iAG9OC,akOL06jR|1|11-950-597-6395|4739.19|BUILDING| epitaphs according to the slyly regular foxe 1871|Customer#000001871|,B2hG X848Trk9SZ|15|25-329-168-1291|9528.08|AUTOMOBILE|y express excuses. furiously regular warthogs integrate quickly after the regular packag 1872|Customer#000001872|CTHfC,m5kcnglbJawTVx5x1cJx3lynOTa7|22|32-985-585-5168|9479.72|FURNITURE|kages wake carefully after the ironic, special requests: regular foxes from the special asymptote 1873|Customer#000001873|fvkv57 kYzsIjzXH9,csoT7smnh pObla4,|3|13-581-396-9191|-201.93|BUILDING|yly. fluffily unusual theodolites among the ironic ac 1874|Customer#000001874|DVHIWiilJuI|17|27-133-279-5869|3121.93|FURNITURE|s. carefully final requests unwind furiously among the 1875|Customer#000001875|CiwSrmNO0dw3Bvf, v2NL,|10|20-222-437-7307|9319.39|MACHINERY|equests cajole above the slyly final requests. blithely special excuses nag blithely bold dependencies. blithely exp 1876|Customer#000001876|pUsteCJxfoVJ|7|17-965-399-4552|90.46|FURNITURE|s. ironic theodolites around the specia 1877|Customer#000001877|a1Rycy,Cni7sr7VUyLR|23|33-986-738-5501|341.10|MACHINERY|ss instructions. fluffily ironic packages boost alongside of the blithely final patterns. s 1878|Customer#000001878|EJqsZwacKoF5mzgYOzYM5Tt7tcuJ|6|16-201-771-9025|-120.65|MACHINERY|xes. furiously bold platelets sleep quickly among the slyly bold theodolites. special deposi 1879|Customer#000001879|DsH91sbfSoNl3Ohor9NQGBO94rGJeGRkc4|17|27-652-714-8086|5163.22|HOUSEHOLD|theodolites could cajole quickly about the ironically final requests. blithely even asymptotes use theodolit 1880|Customer#000001880|LRh,W2K0FBWZWB|16|26-331-549-2024|8247.29|HOUSEHOLD|aves are ironic, final accounts. regular asymptotes promise pending epitaphs-- ideas 1881|Customer#000001881|eo82SRJdBgn25wzgDGaYETk70Wj Y3NKEat|18|28-135-437-2467|-780.99|HOUSEHOLD|to beans haggle even, unusual packages. fluffily regular de 1882|Customer#000001882|U3c UftfresD4TjzRl8F|3|13-454-520-9987|5785.07|HOUSEHOLD|uickly regular, final accounts. furiously special theodolites cajole 1883|Customer#000001883|vyFP6B54fLxqmV3RkbkgNbb|18|28-359-399-4989|9692.28|MACHINERY|kly express packages haggle slyly according to the furiously even orbits. 1884|Customer#000001884|TFjb8jysqh0cMWSt36NLtlP|0|10-570-500-9765|8261.18|HOUSEHOLD|s over the ironic requests sleep fluffily carefully bold dependencies. 1885|Customer#000001885|WceTHrKwpwDq4AbQiFFO03R88q2|8|18-986-991-1839|1642.14|MACHINERY|. pinto beans wake along the theodolites. caref 1886|Customer#000001886|rEn7jolYiPkRQDuLU8lSbn|13|23-315-177-8462|9333.99|FURNITURE|ar pinto beans sleep even deposits. slyly special requests are. express platelets snooze blithely 1887|Customer#000001887|F9k42yQM7WJEwLS|10|20-704-266-2935|3370.69|AUTOMOBILE|s wake carefully against the carefully ironic deposits. finally regular attainments 1888|Customer#000001888|3psgNE6QfX,Iw2xTWauq9yL9EkSiD1fAUMXfXu8k|5|15-757-359-6861|-861.72|AUTOMOBILE|ntain ironic foxes. unusual accounts sleep quickly. quickly regular accounts are blithely. fin 1889|Customer#000001889|am4T 9xjY,|24|34-941-762-4541|1977.38|FURNITURE| ideas detect blithely. accounts haggle fluffily against the ironic, ironic accounts. slyly p 1890|Customer#000001890|QvxzOMYHMQUUfnfY3Fhydb|9|19-980-717-9488|704.54|BUILDING|nic sauternes. even pinto beans are carefully express requests. carefully express packages across the blithe 1891|Customer#000001891|xdhHnr2OrFVgZSEY190|9|19-323-409-2180|3069.79|HOUSEHOLD|ording to the blithely final requests. requests sleep quickly according to the ironic, even deposits. blithely ir 1892|Customer#000001892|Mm4BlT6Q10AlsxwZBAoJE,rXVDtArlKo3b,Ysk|14|24-677-485-8489|1917.06|AUTOMOBILE|luffily around the slyly ironic platelets. express deposits hang furiously among the r 1893|Customer#000001893|iFCRrPifE A|1|11-796-419-9244|9971.58|HOUSEHOLD|ccounts among the fluffily bold pinto beans 1894|Customer#000001894|X,KINka,nciwGbK|12|22-832-993-2803|5415.55|BUILDING| final deposits. regular frays was blithely carefully regular platelets. even dolphins run among the 1895|Customer#000001895|PAv4VeG04NXcldWKmT|12|22-350-120-2910|3216.87|HOUSEHOLD|onic pinto beans sleep quietly regular Tiresias: slyly regular excuses ha 1896|Customer#000001896|4QX6hX9fgTooTWKoy6jmMOEpNZHoNJ|16|26-210-412-7721|9849.67|MACHINERY|re among the slyly express packages. dolphins affix furiously across t 1897|Customer#000001897|bizbUJTTu896QHClLsGIeQmcIF5K,7pyN|8|18-397-783-2293|2415.01|BUILDING| fluffily final theodolites. blithely regular requests 1898|Customer#000001898|yK530x63pGQpzYZbGjsmt0INzw2f9oKfocxs0b|16|26-214-695-2065|7975.89|MACHINERY|ar requests. slyly pending accounts haggle blithely. busy account 1899|Customer#000001899|XuQb0moba ,XyN|10|20-243-672-4974|2869.40|MACHINERY| even, idle deposits about the busily final foxes 1900|Customer#000001900|lf78mUY8AHVrPld7M8Ysotn9WoXwKoN|21|31-565-138-3230|-57.96|MACHINERY|kly regular accounts. requests use furiously alongside of the carefu 1901|Customer#000001901|TJVv IWFTUUp ihC 7mzH,ru5s0J9Xb|24|34-736-824-5989|2749.93|FURNITURE|s use. express platelets beside 1902|Customer#000001902|03 VVAFfgEADO1Ert upreETF9E9i1QUgy53i|3|13-815-127-1409|5019.36|BUILDING|after the blithely regular packages. carefully regular gi 1903|Customer#000001903|oNu,vFSWInYzj5wIB5khmgObNMx61UfUap|15|25-489-905-9207|-679.30|FURNITURE|slyly regular requests sleep. blithely ironic packages c 1904|Customer#000001904|qtgYwanpIR3LfvbOPexVyJ8XHDWNn p95|24|34-565-694-9621|9913.42|BUILDING|. furiously special requests haggle finally carefully 1905|Customer#000001905|P xPYefatHsYxGtFYbzin3GqmqPWvR31YlXlOt|19|29-697-760-7269|6862.64|BUILDING|ajole about the pending, bold requests. furiously regular platelets above the q 1906|Customer#000001906|pByvzLPeSKa8Zlh5ncJwAuYK2U|5|15-956-730-8661|3448.10|MACHINERY| the fluffily special foxes. carefully pending deposits by the special, final accounts wake care 1907|Customer#000001907|Wjpx2yEVYfTUHPc9TgXzD64PBqW x|24|34-755-117-7537|2145.93|HOUSEHOLD|carefully silent platelets wake fluffily express deposits. forges against the ironic platelets are silent requests. 1908|Customer#000001908|eT6lmLbi11KA|17|27-275-292-5975|6551.65|HOUSEHOLD|ly unusual requests engage carefully blithely express depos 1909|Customer#000001909|wrQOWC 6kE|19|29-108-892-4321|1605.61|HOUSEHOLD| the regular, unusual accounts. fluffily final theodolites d 1910|Customer#000001910|LQ9x01e0v2qFuNr|5|15-112-377-2843|475.44|FURNITURE|. quickly even deposits sleep f 1911|Customer#000001911|F9kOxmOLLFyC|1|11-369-744-8780|2601.29|HOUSEHOLD|y idle requests poach across the quickly final theodolites. slyly ironic re 1912|Customer#000001912|1AHyJO5HDpo5AiczS 8zP9P9x7PUuoU5wwrTLEb|20|30-784-527-9935|3886.95|FURNITURE|uriously above the ironic requests. blithely careful packages use daringly furiously regular tithes. 1913|Customer#000001913|3Ya7Xzjzd,JayN,jN|8|18-831-659-1744|8031.55|BUILDING|y express platelets. quickly ironic requests hinder quickly against the carefully bold deposits. acc 1914|Customer#000001914|JqCaLg2nQOwrdP1DIHNQ9b,Wk|6|16-458-421-7729|3308.34|FURNITURE|ss pinto beans. quickly unusual packages doze carefully furiously final 1915|Customer#000001915|FMtQbAxpu5v3,0NVxEJwU24sV2TgF|16|26-683-446-1665|7268.98|AUTOMOBILE|s. furiously ironic ideas boost furiously among the ironic pinto beans. accounts thrash slyly about the 1916|Customer#000001916|jFGCdeXJVKzv7YDL|5|15-662-459-4512|1768.76|HOUSEHOLD|into beans. even deposits among the fluffily even platelets cajole ab 1917|Customer#000001917|gZW6epQmK,ISulI9 Qt IWytnY|7|17-940-989-5599|3470.08|AUTOMOBILE|silent packages boost quickly. regular, regular instructions nod. idle dependencies are slyly among the slyly i 1918|Customer#000001918|CRBet8MbYBwTgtdPOzX,ipUvTiOrp0vUQjnr|9|19-987-388-4413|8539.61|MACHINERY| ideas alongside of the blithely regular 1919|Customer#000001919|lnfRcSyQXSI|18|28-664-832-4585|6262.18|HOUSEHOLD|as outside the furiously expre 1920|Customer#000001920|FvwmRwSmKgzj7u4disg0ahJOtEfo|8|18-810-300-8541|9476.97|MACHINERY|uctions believe. furiously ironic accounts use fluffily. regula 1921|Customer#000001921|9AE6JuRlhkABh1m8ABQD4AsBk6x5rXlCQ5I|18|28-644-770-2274|8509.33|BUILDING|lar platelets was furiously above the slyly exp 1922|Customer#000001922|qMXL1bzgCtSj1GLMI3AMi1CNV,f, 0iCXt|9|19-532-924-9245|3398.55|MACHINERY|ake carefully even packages. ironic requests against the express, even asymptotes affi 1923|Customer#000001923|m6AVOkvy swPbt,pZ7b9MBfr8zo9vFUvappyyWjv|23|33-781-817-1440|5376.70|HOUSEHOLD|waters detect fluffily carefully regular pinto 1924|Customer#000001924|inc0YxJOJYunXhf|4|14-472-427-4447|-851.49|AUTOMOBILE|gular instructions boost among the blithely s 1925|Customer#000001925|qZ8m0grVeRJN9h6Bn|0|10-814-492-1424|8891.65|HOUSEHOLD|its sleep among the regular packages. fluffily final depende 1926|Customer#000001926|EMRuUDTA2m0|7|17-203-685-7821|4968.09|AUTOMOBILE|ve the even packages. blithely 1927|Customer#000001927|V4zstahq,OaYcbh2IeSkJfiRO7wtMPXOoV|1|11-876-774-5282|6756.49|AUTOMOBILE|side of the furiously final ideas are at the bold patterns. quickly pending requests thrash fluffily 1928|Customer#000001928|Ce02vQsU97|12|22-812-639-3782|1391.69|MACHINERY|t the carefully final instructions wake fluffily f 1929|Customer#000001929|imHqCzuePilpP5h UA2fwTIJMWVdj|20|30-949-561-2278|2157.13|AUTOMOBILE|osits are slyly across the even requests! busily special platelets cajole carefully 1930|Customer#000001930|kJD6LiynLuutDPKOcgIBUIdUQIWb14oGE jb|18|28-793-928-3242|7909.13|HOUSEHOLD|ular packages snooze blithely carefully express packages. quickly regular dolphins sleep about the slyl 1931|Customer#000001931|DUbNQry9jewVHI6GLNIbX0aIBGoWwsqWabbuXPy|19|29-893-435-8218|2762.06|BUILDING|heodolites. final ideas detect slyly furiously regular dugouts. even pinto beans haggle. slyly express foxes na 1932|Customer#000001932|VhBhkEXBrpSzYgxu1jmIJ3 tVomaDEr|15|25-252-625-7296|-55.92|FURNITURE|fully regular theodolites. quickly express requests may sleep carefully among the furio 1933|Customer#000001933|JGeZTkGT1uwbQa1C,sj|0|10-484-847-9516|4003.10|HOUSEHOLD|es the accounts. close, ironic dolphins wake quickly furiously ir 1934|Customer#000001934|REnXv9yHCBrQTv2i7j645dr5iSY5|10|20-656-890-6862|2038.31|HOUSEHOLD|ons. bold foxes haggle furiously blithely ironic ideas. blithely ironic deposits integrat 1935|Customer#000001935|2SwVYEBhbhWTTWCBsGow0Wu|9|19-674-562-9631|1775.61|HOUSEHOLD|ilent epitaphs wake slyly according to the slyly furious deposits. silent, special instructions past t 1936|Customer#000001936|PGu2Wen188BC|0|10-714-881-1338|8592.20|HOUSEHOLD|y regular packages. blithely even accounts are blithely. carefully regular accounts cajole blithely. quickly bol 1937|Customer#000001937|mXJvkSoAwpg1wVW45|3|13-166-456-2309|-866.47|FURNITURE|equests. final, ironic packages haggle: carefully regular packages mold fluffily accord 1938|Customer#000001938|IqaZSFbDxA,8a,xxnV,gHVOT|10|20-789-331-1347|-894.49|AUTOMOBILE|osits affix slow accounts. carefully final ideas according 1939|Customer#000001939|SBEYKDkrUBAt2RNh1z0dtG|0|10-417-450-5341|1306.86|HOUSEHOLD|usly unusual theodolites haggle slyly around the fluffily ironic asymptotes. express instructions cajo 1940|Customer#000001940|DIL GvUsP25WSgsshTdjULCPxy7l|24|34-475-738-8227|6310.63|BUILDING|e the furiously bold dependencies. slyly silent accounts grow carefully according to the 1941|Customer#000001941|,51rsqq9XV|0|10-365-133-1400|9710.06|FURNITURE|fluffily carefully regular foxes. fin 1942|Customer#000001942|5 eTPX2yTp49JPctgc1EeYMUSWm|3|13-500-263-4736|-947.24|MACHINERY| sauternes. quickly express accounts cajole carefully above the regular accounts. pinto beans 1943|Customer#000001943|sH4nc83Lh,6A2xe9ApH6WD|3|13-737-218-4651|5599.71|AUTOMOBILE|ckages above the final, final pinto beans haggle against t 1944|Customer#000001944|LbrsZ4CFBqJZ|14|24-492-922-6990|7279.93|HOUSEHOLD|s play boldly about the slyly iro 1945|Customer#000001945|ghlnLGw i,LV,MAuts8Ii7XE4WaoExoa|10|20-470-267-5365|9880.10|BUILDING|iously bold pinto beans cajole slyly warthogs. silent, special accounts detect evenly across the ironic pl 1946|Customer#000001946|Z3ceD0uA1BJsDmFl4botusq2SBPCJqIqzr9bHQM|20|30-764-188-4293|7851.87|FURNITURE| requests boost fluffily. ideas detect finally. quickly final deposits nag carefully. quick 1947|Customer#000001947|s 1fCYZ4AqqTJ,ZvJsLi5N|20|30-174-661-7603|8385.53|MACHINERY| ought to are furiously alongside of the blithel 1948|Customer#000001948|4aCCe9W1s4,VdYswJWigEOJ2sW6Thh|5|15-227-107-9153|1704.12|HOUSEHOLD|s about the blithely regular deposits cajole blithely 1949|Customer#000001949|fwIfk5BDREkV uapHbKzYrOCWUz|19|29-227-849-7731|6025.80|FURNITURE|nal platelets cajole whithout the silently regular ideas. furiously bold asympto 1950|Customer#000001950|sRX9I79UagRC3 sV9hO0H|6|16-524-582-1728|7698.52|HOUSEHOLD|nal requests. slyly pending pains according 1951|Customer#000001951|NOXYVa52HyF5lmpj9,WR0|0|10-334-921-5346|5002.24|MACHINERY|thely unusual accounts. furiously unusual deposits sleep along the blithely final packages-- f 1952|Customer#000001952|sZvM,CF54wYyQGhSuFVe|18|28-102-243-6615|8620.01|MACHINERY|y express pinto beans sleep carefully carefully regular pinto beans. fluffily final accounts 1953|Customer#000001953|vd2GjYHxhaYoyE0dU46shW|8|18-155-378-1656|4654.64|MACHINERY|ly slow foxes x-ray quickly even pinto beans. slyly permanent packages nag furiously. carefully 1954|Customer#000001954|ec2Cv6JlL3MFSnbBVWrE vljCiPiM6gFr4WE5PW8|22|32-872-341-1949|7575.28|BUILDING|ep furiously about the fluffily regular accounts. 1955|Customer#000001955|INxAk4G6dbQSijuSR,0t|7|17-649-408-5594|1754.99|BUILDING|usly. final packages doze carefully excuses. packages wake slyly according to the slyly ironic pi 1956|Customer#000001956|sOgO,FYO1KNY3Q45sj asSLH33rFk|21|31-426-823-2422|3007.04|HOUSEHOLD|ess platelets. express pinto beans wake furiously according to 1957|Customer#000001957|Hl2JPoWaDAeyGd|21|31-861-837-2140|3366.66|HOUSEHOLD|al platelets. sometimes final accounts 1958|Customer#000001958|P8lsQKRVNyrMTYEgXU7W|4|14-218-444-2269|8425.51|HOUSEHOLD|quests across the accounts need to haggle carefully regular deposits. careful accounts boost carefully fin 1959|Customer#000001959|NMDtn0GA,CLrZYGaMDmwb|7|17-992-958-9639|2608.12|AUTOMOBILE|ronic deposits: quickly bold packages along the fluffily regular depths sleep furiously thinly regular deposits 1960|Customer#000001960|DFG0YX 4bZQ40vdnfLc8zJfgdWY4iARmum|1|11-645-941-2149|8663.17|BUILDING|arefully carefully regular pains. carefully unusual ideas at the furio 1961|Customer#000001961|j7 XCftSKhgKscMoPW|23|33-428-682-4988|3918.30|MACHINERY|ut the fluffily regular theodolites. dependencies among the 1962|Customer#000001962|VxLrW,XnARNIdQgq1J |4|14-913-275-9532|6907.64|FURNITURE|os shall have to eat bold theodolites. regular dependencies sublate blithely alongside of the express, 1963|Customer#000001963|WFokQKiIqXjIiH|2|12-406-701-6501|680.33|BUILDING|ously fluffily express theodolites. ironic, final dolphins snooze regular ac 1964|Customer#000001964|xEoAWLcXGm|11|21-651-389-8060|3323.69|HOUSEHOLD|osits. furiously express ideas integrate 1965|Customer#000001965|Zf89ewXnl,RNe36J86yVxppUfr6VXEeKGYUa|15|25-361-492-1713|1223.48|BUILDING|ronic, even waters use along the final ideas. ruthlessly 1966|Customer#000001966|jPv1 UHra5JLALR5Isci5u0636RoAu7t vH|0|10-973-269-8886|1937.72|BUILDING|the blithely even accounts. final deposits cajole around the blithely final packages. 1967|Customer#000001967|F1wAzk2iYcb|14|24-936-670-3499|5250.63|BUILDING|ut the slyly ironic pearls. re 1968|Customer#000001968|dUAE71eduW|24|34-192-617-7717|3322.72|HOUSEHOLD|uriously regular deposits use car 1969|Customer#000001969|F7fcrNnwmFLr2fXyi58|11|21-978-965-1419|6864.72|FURNITURE|ckages sleep carefully. never bold instructions are even, final ideas. fluffily even id 1970|Customer#000001970|V9aSisZGbj8fo|14|24-949-639-9364|5469.10|MACHINERY|grate furiously ironic orbits? final, pending deposits affix carefully express ideas. special, express pack 1971|Customer#000001971|DZsft5O035OEx6ql2BKC3CHI3R5ZtlFgKsMs XZM|12|22-293-654-8027|1205.73|AUTOMOBILE|arefully. express packages are above the slyl 1972|Customer#000001972|0XaaQuagZ1K9E63qlSw5SBk|7|17-991-532-5136|689.12|AUTOMOBILE|ckly. regular, pending pinto beans solve around the blithely exp 1973|Customer#000001973|J8,WbeY81R4j4nAv5CEczBfn3XPh2dIcxnhLtdf|4|14-828-797-8149|7990.18|HOUSEHOLD|dolites affix bold, final requests. blithely special accounts integrate. bold, iron 1974|Customer#000001974|e4NzvGNJLIOkXfof8RF21qDH1O|24|34-740-820-4310|1007.21|AUTOMOBILE| sleep against the quickly ironic sentiments; ironic packages affix above the carefully final 1975|Customer#000001975|XzyjsmoapfpiWPo fn|5|15-174-926-1370|8125.51|MACHINERY|re slyly against the unusual dependencies. slyly 1976|Customer#000001976|M2WWTo3je0NgpIFOe55SvtJwnyxY8jp8Ikms4|15|25-703-881-1012|9548.13|MACHINERY| slyly regular deposits. fluffily fina 1977|Customer#000001977|TMAohw9bEg3eDJH|2|12-534-854-9706|5529.03|BUILDING| carefully bold requests up the final, speci 1978|Customer#000001978|e4sYh5qmltXfpKhnO fQF3E9dZuWXu,C0E|13|23-513-770-1875|7503.87|MACHINERY|ully regular dolphins. blithely express requests are furiously slyly regular accounts-- caref 1979|Customer#000001979|HSGMB5gHS1ieJ58hBBImFA9OxE|14|24-690-108-2317|631.63|MACHINERY|ns alongside of the bold, silent requests x-ray after the blithely silent p 1980|Customer#000001980|rhVLwo9PmcGj8oWJSAIYmcbPf 2lCekjg6V6|6|16-721-559-9819|9772.49|AUTOMOBILE|ckly regular packages. ironic packages after the special instructions breach fluffil 1981|Customer#000001981|wctyYavWwznYKAs4|13|23-867-283-2614|3283.90|FURNITURE|ding to the blithely bold deposits. ironic packages detect blithely deposits. regular, specia 1982|Customer#000001982|mpS3z3Av8c89RsaJyiet|12|22-891-773-1026|3567.30|HOUSEHOLD|e blithely courts. even, express re 1983|Customer#000001983|KFNKCUFOyS|0|10-305-434-9284|3640.90|FURNITURE|es sleep blithely regular instructions. regular instructions cajole carefully. slyly regular accounts 1984|Customer#000001984|MAqwYLxOBbMoyAWwvjEZK9QYgRMbhtFkdHbiR|13|23-768-636-1831|8661.08|HOUSEHOLD|y unusual requests. furiously ironic deposits haggle quickly a 1985|Customer#000001985|Cy7CMgihIn6lemMjU2d3SKiOmHXi0TrRZK|0|10-248-352-6397|1800.96|HOUSEHOLD| theodolites wake slyly fluffily regular pinto beans. furiously ironic packages according to t 1986|Customer#000001986|3ugRcAUZuXms2oHREuEsprYLg2u|0|10-971-309-8075|5749.20|FURNITURE|fter the slyly even deposits cajole pending accounts. 1987|Customer#000001987|lWwjammTGaKVKq9npAtfs3|19|29-745-306-2348|796.96|BUILDING|s. blithely even pinto beans boost always about the doggedly bold requests. regular requests use. 1988|Customer#000001988|MdIopEKT9C1|9|19-927-922-5628|5414.64|AUTOMOBILE|ve to are furiously bold theodolites. qu 1989|Customer#000001989|,WMIpzbXk5YrV7ZlH23grs5TeGhEtF|10|20-648-105-6440|9859.77|MACHINERY| unusual foxes wake slyly fluffily pending gifts. bold packages across the slyly 1990|Customer#000001990|M9VbFPNnktfGtPgnf3t1ptNYOzS3eCwlKpte|14|24-543-274-3227|7244.87|HOUSEHOLD|usual asymptotes around the packages haggle 1991|Customer#000001991|REzgRDc,2RF4ayaV3b3gZhIdMti9J6b8b90p|9|19-351-767-3188|3517.27|FURNITURE|nag blithely slyly special deposits? express accounts about the dolphins woul 1992|Customer#000001992|FNQZbT88wzGKhU9|1|11-760-815-8444|9679.43|BUILDING|e fluffily regular ideas haggle past the even requests. regular pinto beans nod acc 1993|Customer#000001993|Yi8QRnZDlVqZfW8SHbw,drKwsPBd6qXjavl|0|10-928-142-7350|1209.74|FURNITURE|regular pinto beans. furiously ironic epitaphs after the 1994|Customer#000001994|TRsalXgbwGxicJe1Ogt4th3BwgmrADqgILXd|1|11-384-723-1000|1971.60|HOUSEHOLD|taphs along the furiously regular dependencies must have to run blithely idle dependencies. slyly fi 1995|Customer#000001995|aIGN3hurONjCDOvfE JLx,5pmtqCPnXvgqV7VNy|2|12-339-453-3201|2674.92|BUILDING|ilent instructions sleep quickly quickly bold gifts-- final deposits grow after the final asymptotes. care 1996|Customer#000001996|SkYj5PWHdXD9|17|27-626-208-5273|9379.16|BUILDING|lyly final accounts are blithely final courts! even 1997|Customer#000001997|D7SUlJ,KUdYu|24|34-724-701-3880|3610.05|FURNITURE|accounts wake furiously. silently quick epitaphs are carefully slyly final pinto bea 1998|Customer#000001998|RpAD0CJxRxC2kjR|14|24-529-154-9925|7080.37|FURNITURE|p slyly alongside of the pains. 1999|Customer#000001999|y8mRnn6pJ0V|2|12-967-439-5391|-117.85|AUTOMOBILE|heodolites. furiously ironic excuses boost along the carefully f 2000|Customer#000002000|NabVtyaWao1l9Ss|13|23-122-829-3487|8176.83|AUTOMOBILE|s shall have to sleep quickly citus-7.0.3/src/test/regress/data/events_table.data000066400000000000000000014752771317107136600223430ustar00rootroot0000000000000060,2014-01-11 20:50:24.644039,141,967,903, 60,2014-01-19 02:07:05.295581,690,372,980, 20,2014-01-15 10:47:23.48223,843,510,21, 8,2014-01-18 06:11:47.966397,83,125,890, 8,2014-01-20 06:41:58.110297,415,726,435, 20,2014-01-11 05:46:47.906411,871,799,97, 8,2014-01-19 20:35:40.736028,605,502,859, 8,2014-01-19 09:07:03.159393,425,505,916, 60,2014-01-13 08:22:58.422199,789,865,151, 60,2014-01-17 11:41:13.789586,594,875,963, 20,2014-01-15 16:43:42.811215,442,859,834, 8,2014-01-17 13:34:43.250656,181,340,773, 20,2014-01-14 14:24:37.32708,780,339,411, 60,2014-01-16 14:56:45.688085,61,84,587, 20,2014-01-18 22:50:14.579619,915,194,653, 8,2014-01-21 04:50:18.970073,434,574,908, 20,2014-01-12 23:24:33.67177,455,766,229, 60,2014-01-16 00:15:17.583533,956,389,951, 8,2014-01-11 13:55:09.972465,445,648,799, 60,2014-01-20 07:44:39.013253,880,965,928, 60,2014-01-11 09:38:56.626627,745,674,935, 8,2014-01-20 14:48:21.53554,786,642,890, 20,2014-01-20 06:39:22.310415,715,937,448, 8,2014-01-16 23:20:26.577106,859,170,798, 8,2014-01-13 11:35:36.042404,238,454,738, 60,2014-01-13 05:48:36.617007,740,662,675, 8,2014-01-13 13:42:29.323378,148,111,802, 20,2014-01-17 20:09:36.980268,645,126,725, 8,2014-01-21 03:35:27.726329,564,441,407, 60,2014-01-18 08:29:55.800727,701,727,63, 60,2014-01-21 02:17:12.856401,685,216,602, 60,2014-01-11 18:25:52.370367,450,395,627, 8,2014-01-16 04:37:01.096307,215,762,768, 20,2014-01-15 07:04:36.33863,304,533,348, 60,2014-01-19 04:37:40.336765,68,987,970, 20,2014-01-15 05:18:59.206605,943,702,655, 60,2014-01-15 16:51:24.245461,360,886,916, 60,2014-01-17 19:13:21.072793,120,94,342, 8,2014-01-13 08:26:02.365998,455,404,699, 20,2014-01-18 05:14:22.66766,421,609,194, 20,2014-01-14 00:54:34.862378,241,107,238, 20,2014-01-18 05:09:45.177212,385,740,496, 8,2014-01-14 12:49:05.918317,633,4,226, 20,2014-01-17 19:19:12.189878,383,5,33, 20,2014-01-13 14:06:30.865298,565,623,587, 8,2014-01-12 05:39:20.497935,66,439,919, 20,2014-01-17 18:09:31.159951,622,589,636, 60,2014-01-15 17:31:04.537486,838,103,740, 20,2014-01-17 09:54:38.081888,467,670,636, 8,2014-01-18 12:04:41.132416,508,932,393, 20,2014-01-14 19:15:43.55074,973,193,250, 20,2014-01-17 23:33:34.708515,555,698,960, 20,2014-01-17 20:53:02.668374,658,971,38, 60,2014-01-13 19:55:05.861154,739,112,780, 8,2014-01-13 16:54:01.285621,436,64,996, 60,2014-01-20 12:28:38.710777,223,45,164, 8,2014-01-16 05:43:42.478161,645,830,149, 20,2014-01-16 10:36:30.608999,97,963,528, 20,2014-01-17 02:38:15.691045,718,542,979, 8,2014-01-16 03:19:10.204491,483,734,16, 60,2014-01-13 13:06:26.409726,886,177,968, 60,2014-01-16 22:55:28.547446,79,955,453, 20,2014-01-17 01:45:02.574857,613,521,17, 20,2014-01-18 21:43:27.506033,649,50,710, 60,2014-01-11 00:00:04.886076,96,17,57, 20,2014-01-15 00:22:42.911621,571,195,580, 20,2014-01-12 21:02:26.713058,187,520,36, 60,2014-01-15 20:51:29.131538,753,356,649, 8,2014-01-11 13:36:03.984832,851,529,392, 60,2014-01-15 09:28:29.079055,714,695,665, 20,2014-01-12 20:05:51.799198,426,23,695, 8,2014-01-14 18:30:38.847211,935,916,391, 20,2014-01-12 08:38:14.256686,179,229,31, 20,2014-01-16 12:54:57.717515,934,510,167, 8,2014-01-11 07:49:51.037089,440,323,596, 8,2014-01-15 02:44:45.121984,8,379,30, 60,2014-01-17 22:34:18.21587,685,238,838, 20,2014-01-18 05:59:22.19704,459,251,924, 8,2014-01-20 00:15:49.659471,500,428,273, 8,2014-01-14 02:28:56.297757,33,631,116, 20,2014-01-15 12:04:03.329874,75,617,939, 8,2014-01-13 13:31:33.210629,104,115,122, 20,2014-01-10 20:02:31.006272,30,934,489, 20,2014-01-12 02:57:05.998248,459,932,965, 60,2014-01-16 13:26:39.071783,56,360,778, 20,2014-01-13 16:56:32.291893,498,856,828, 8,2014-01-11 09:25:44.709026,729,163,959, 60,2014-01-11 13:10:21.550364,668,317,766, 20,2014-01-19 07:33:02.900893,701,9,511, 60,2014-01-17 16:04:00.400071,747,393,311, 60,2014-01-16 20:29:31.754854,835,355,192, 8,2014-01-11 14:39:29.311037,289,109,621, 20,2014-01-13 08:59:28.947517,446,73,280, 20,2014-01-12 16:14:34.33013,903,213,823, 20,2014-01-19 16:22:56.817071,10,844,835, 20,2014-01-13 12:59:33.833593,817,336,636, 60,2014-01-16 20:37:17.241751,644,603,344, 20,2014-01-11 07:25:23.530129,392,961,746, 20,2014-01-18 13:51:02.96555,722,675,514, 20,2014-01-17 14:13:21.226583,174,952,370, 8,2014-01-15 20:53:52.609603,674,493,725, 8,2014-01-20 13:56:54.764748,747,799,301, 8,2014-01-11 02:44:00.074214,157,666,623, 20,2014-01-17 09:32:06.866291,162,601,542, 20,2014-01-15 20:51:52.482682,245,591,116, 8,2014-01-11 14:33:51.111302,849,957,861, 8,2014-01-11 06:16:51.988275,556,142,689, 60,2014-01-12 13:26:10.698551,671,223,361, 60,2014-01-19 00:33:13.308761,714,457,675, 60,2014-01-20 10:32:41.648165,243,991,947, 20,2014-01-15 19:55:06.996308,467,392,780, 20,2014-01-13 06:37:16.638636,627,587,793, 8,2014-01-12 18:04:14.858794,41,206,974, 60,2014-01-15 19:57:38.00258,730,568,785, 8,2014-01-14 13:34:22.636884,658,928,790, 8,2014-01-18 11:30:53.930576,215,222,183, 20,2014-01-18 16:54:10.294474,321,990,282, 20,2014-01-15 03:00:07.34591,41,358,702, 60,2014-01-19 04:41:15.480941,20,19,801, 20,2014-01-16 18:27:13.195366,103,822,138, 20,2014-01-11 13:04:07.74598,650,602,161, 60,2014-01-14 19:10:47.236214,995,9,943, 20,2014-01-17 13:06:42.506403,797,531,482, 20,2014-01-14 02:03:36.693497,993,584,366, 20,2014-01-16 15:25:21.566343,523,5,398, 8,2014-01-15 23:29:39.323894,123,572,960, 20,2014-01-16 19:03:10.52709,7,889,321, 60,2014-01-12 06:02:38.808094,939,981,568, 60,2014-01-16 10:55:02.854022,39,437,144, 20,2014-01-14 02:54:13.49264,708,14,827, 60,2014-01-19 00:16:00.035097,173,63,428, 8,2014-01-11 01:48:55.463626,151,485,200, 60,2014-01-13 10:51:08.257806,746,730,713, 60,2014-01-19 07:00:00.10931,825,287,265, 20,2014-01-17 15:21:02.329917,993,874,321, 20,2014-01-18 11:43:00.740488,187,411,364, 60,2014-01-20 01:33:51.220613,4,477,253, 8,2014-01-18 01:37:54.31861,216,674,750, 20,2014-01-20 05:09:11.43904,292,19,582, 20,2014-01-17 20:07:04.529374,430,844,999, 8,2014-01-17 06:10:35.966774,18,293,554, 8,2014-01-14 19:04:18.435348,992,293,292, 20,2014-01-20 06:44:21.16801,265,43,831, 60,2014-01-19 04:14:50.825568,228,757,431, 60,2014-01-19 19:01:56.437929,572,134,21, 8,2014-01-13 14:18:43.804895,566,680,658, 20,2014-01-16 09:45:44.756145,136,481,435, 20,2014-01-17 05:56:06.732402,170,719,424, 60,2014-01-17 21:18:51.150805,822,816,739, 60,2014-01-14 08:27:00.237503,437,76,202, 60,2014-01-12 18:23:19.927769,39,654,384, 20,2014-01-18 14:22:58.896785,458,565,632, 20,2014-01-18 07:37:47.473717,304,369,156, 60,2014-01-19 11:30:02.434591,66,521,342, 8,2014-01-11 10:26:35.590282,242,107,648, 20,2014-01-13 17:03:09.040059,216,526,518, 20,2014-01-14 04:59:41.758484,756,18,959, 60,2014-01-17 09:29:46.117792,332,659,964, 20,2014-01-15 03:05:47.848153,141,529,381, 20,2014-01-19 19:54:44.612506,519,448,847, 20,2014-01-20 16:23:59.61043,853,495,120, 60,2014-01-12 21:21:47.88325,869,659,19, 8,2014-01-20 01:43:40.076552,748,630,481, 60,2014-01-12 21:15:07.868236,16,429,382, 60,2014-01-10 22:21:47.99256,746,982,377, 8,2014-01-16 11:04:42.406468,364,661,684, 8,2014-01-20 12:58:08.608725,95,850,849, 60,2014-01-20 03:55:39.213592,938,913,46, 20,2014-01-13 06:42:36.725078,576,962,742, 8,2014-01-19 12:07:20.047764,923,278,285, 20,2014-01-16 18:02:43.742967,519,698,629, 60,2014-01-19 16:53:12.691852,99,985,998, 60,2014-01-13 01:11:38.483112,705,32,387, 60,2014-01-15 18:47:04.911396,526,262,614, 60,2014-01-17 15:08:03.51742,382,940,948, 60,2014-01-11 14:13:34.921041,561,439,32, 8,2014-01-18 13:05:48.716291,671,190,93, 8,2014-01-12 18:53:48.273983,692,697,28, 20,2014-01-18 00:09:41.653444,704,646,525, 20,2014-01-15 04:24:39.867515,41,166,700, 60,2014-01-16 07:20:48.511487,234,532,108, 8,2014-01-19 22:33:01.581631,470,706,34, 60,2014-01-12 12:47:38.7643,710,264,376, 20,2014-01-13 08:58:35.985204,538,374,592, 20,2014-01-18 04:03:04.016222,569,872,74, 20,2014-01-13 03:14:14.355002,32,801,80, 20,2014-01-16 06:01:45.025264,368,263,117, 8,2014-01-11 03:02:45.774706,365,860,460, 60,2014-01-19 16:44:00.472793,814,509,922, 8,2014-01-20 13:07:32.873417,434,649,719, 8,2014-01-20 02:57:30.387213,492,15,820, 8,2014-01-19 03:08:00.083224,82,232,605, 60,2014-01-12 04:29:20.757086,139,45,472, 60,2014-01-18 22:41:10.463765,296,34,543, 8,2014-01-21 04:23:07.951879,462,728,453, 60,2014-01-12 06:51:08.749646,267,350,450, 8,2014-01-14 03:45:52.870651,562,336,896, 20,2014-01-20 11:21:16.560604,364,835,726, 20,2014-01-11 04:46:47.963238,466,330,431, 60,2014-01-16 14:28:29.595729,715,192,168, 8,2014-01-18 17:28:36.608787,772,534,214, 60,2014-01-17 02:49:31.706623,533,917,854, 60,2014-01-15 01:21:42.288001,51,127,971, 60,2014-01-20 22:40:15.091899,84,106,336, 20,2014-01-11 15:36:36.618019,148,213,373, 60,2014-01-11 10:29:45.805421,527,430,688, 60,2014-01-11 06:53:50.013359,934,525,115, 8,2014-01-19 08:42:25.334309,454,865,783, 20,2014-01-13 09:23:34.079405,392,995,940, 8,2014-01-18 11:03:31.666803,873,160,409, 8,2014-01-13 07:07:05.201824,629,875,999, 8,2014-01-18 20:44:22.590893,826,795,441, 20,2014-01-17 03:36:33.248433,635,742,32, 20,2014-01-14 23:54:43.966544,858,293,445, 60,2014-01-10 23:42:58.576096,281,457,845, 8,2014-01-14 01:39:37.264655,255,523,799, 60,2014-01-17 07:08:58.321546,654,928,587, 8,2014-01-16 09:44:43.601359,738,822,221, 60,2014-01-14 08:42:23.039361,504,297,764, 60,2014-01-15 17:52:58.794339,24,797,957, 60,2014-01-15 16:52:16.475196,473,248,72, 8,2014-01-13 05:39:53.426993,589,42,39, 60,2014-01-13 15:00:58.877562,512,212,668, 60,2014-01-17 01:21:37.232282,990,359,821, 8,2014-01-10 22:21:03.890757,156,163,513, 60,2014-01-13 13:24:06.829442,286,333,682, 60,2014-01-18 12:12:45.981928,227,280,360, 60,2014-01-14 06:06:56.761408,337,140,852, 20,2014-01-12 18:45:23.390466,466,646,620, 20,2014-01-18 20:59:33.945584,87,992,219, 8,2014-01-20 00:35:26.357557,935,746,728, 20,2014-01-20 16:13:59.999252,230,528,190, 60,2014-01-14 17:49:05.652208,112,242,463, 20,2014-01-13 19:57:08.645558,212,632,104, 20,2014-01-20 08:54:15.091152,406,746,441, 60,2014-01-15 13:25:42.270227,984,338,769, 8,2014-01-14 10:26:54.450979,644,917,121, 20,2014-01-20 19:48:05.104511,980,250,36, 20,2014-01-13 16:08:07.604536,206,643,210, 20,2014-01-16 23:50:28.530804,785,753,974, 20,2014-01-18 00:51:36.771733,265,500,154, 60,2014-01-16 03:15:12.80678,672,17,408, 20,2014-01-14 14:34:51.121696,366,656,7, 60,2014-01-13 22:28:10.020167,685,942,676, 20,2014-01-20 07:09:56.773324,450,537,965, 20,2014-01-14 18:17:49.698211,426,434,619, 60,2014-01-17 04:07:47.285242,529,245,332, 8,2014-01-16 08:18:55.094869,527,239,265, 8,2014-01-20 08:02:33.299571,819,268,808, 8,2014-01-20 16:50:10.324602,709,562,571, 60,2014-01-10 20:11:53.889207,574,364,328, 8,2014-01-14 18:54:49.774767,704,718,627, 8,2014-01-12 16:30:03.751596,523,375,182, 20,2014-01-13 15:12:52.76677,795,627,654, 60,2014-01-21 00:16:27.007048,415,595,798, 60,2014-01-12 18:51:07.642772,72,469,640, 60,2014-01-16 08:36:59.596631,147,200,729, 20,2014-01-18 06:29:12.989396,378,454,820, 60,2014-01-16 04:58:04.404181,835,742,86, 20,2014-01-18 07:22:22.987096,988,784,878, 20,2014-01-15 21:28:46.93498,490,604,700, 20,2014-01-14 23:33:30.761739,381,681,566, 8,2014-01-17 17:36:22.986349,893,947,851, 8,2014-01-19 19:17:52.587189,621,339,132, 8,2014-01-17 23:30:39.407297,12,630,672, 20,2014-01-16 20:30:38.077501,327,622,711, 8,2014-01-14 02:43:34.857416,123,521,346, 60,2014-01-11 03:57:33.858695,680,861,73, 60,2014-01-16 10:18:43.182431,744,445,283, 8,2014-01-16 22:51:42.461952,866,690,803, 20,2014-01-17 07:48:02.389499,722,915,518, 20,2014-01-13 05:10:19.954165,101,690,746, 20,2014-01-11 20:06:55.268732,989,952,597, 8,2014-01-21 02:22:53.511195,66,509,349, 60,2014-01-16 07:38:29.974331,416,383,262, 8,2014-01-10 21:16:52.042056,496,964,832, 20,2014-01-14 14:40:43.209406,175,293,832, 60,2014-01-12 05:46:17.259572,290,118,795, 60,2014-01-16 09:35:47.136926,546,190,780, 8,2014-01-13 16:43:16.509396,655,75,249, 8,2014-01-11 16:36:27.584594,959,388,491, 8,2014-01-16 09:47:41.026133,451,931,382, 20,2014-01-17 15:38:06.284162,47,699,480, 60,2014-01-13 13:06:31.33619,655,389,489, 60,2014-01-19 05:00:33.793322,8,618,667, 60,2014-01-17 09:54:33.29163,283,633,74, 8,2014-01-15 11:57:38.978961,297,287,722, 20,2014-01-14 07:37:33.389953,48,664,920, 60,2014-01-14 10:23:46.281025,902,16,152, 20,2014-01-20 20:55:43.383561,481,816,693, 8,2014-01-11 08:59:56.37705,661,829,911, 60,2014-01-19 11:52:33.216005,353,342,233, 60,2014-01-14 14:29:14.1453,20,233,283, 60,2014-01-18 06:36:19.363399,56,435,896, 60,2014-01-18 01:10:25.803194,989,397,648, 60,2014-01-11 07:59:53.553016,331,800,728, 60,2014-01-13 21:06:57.440899,459,234,541, 20,2014-01-10 21:54:00.660609,793,754,95, 20,2014-01-11 15:57:27.41171,151,945,888, 20,2014-01-19 11:25:40.62333,934,139,146, 8,2014-01-17 00:45:43.122561,547,450,913, 20,2014-01-18 03:45:29.801209,282,996,329, 20,2014-01-11 10:36:00.577495,152,698,146, 20,2014-01-18 00:52:38.391714,95,138,821, 63,2014-01-18 00:08:23.312404,550,790,789, 1,2014-01-16 22:14:30.552245,527,196,64, 25,2014-01-18 02:09:30.43377,645,144,200, 63,2014-01-11 08:49:06.522229,729,799,501, 15,2014-01-18 08:00:47.811818,856,257,1000, 1,2014-01-13 05:45:17.571115,430,87,158, 82,2014-01-14 05:32:23.031625,847,722,113, 25,2014-01-19 04:37:15.396411,471,602,653, 25,2014-01-18 19:32:58.597248,385,486,206, 25,2014-01-21 01:10:29.315788,436,954,288, 63,2014-01-11 11:43:46.73302,111,359,799, 15,2014-01-16 18:33:32.39057,434,193,695, 15,2014-01-17 05:05:02.607418,225,551,896, 15,2014-01-16 03:41:25.711981,441,771,432, 63,2014-01-20 06:11:05.780523,622,278,326, 63,2014-01-20 19:28:48.888443,605,838,261, 82,2014-01-15 18:37:09.095544,412,352,73, 82,2014-01-20 19:11:02.157573,153,984,750, 15,2014-01-19 01:21:22.104449,398,797,824, 25,2014-01-19 13:06:23.241263,666,107,936, 15,2014-01-17 19:47:21.520972,118,680,456, 82,2014-01-15 20:31:47.907643,891,488,475, 1,2014-01-20 01:06:16.794278,956,354,713, 1,2014-01-20 20:54:18.962291,663,408,857, 63,2014-01-15 22:25:48.568252,998,461,643, 1,2014-01-20 21:03:44.205989,74,902,865, 1,2014-01-19 02:19:59.585621,799,467,435, 1,2014-01-11 17:11:31.691232,578,462,655, 1,2014-01-17 18:49:14.007199,654,78,447, 82,2014-01-19 16:56:00.163535,996,190,26, 1,2014-01-18 22:04:10.082946,698,100,851, 63,2014-01-14 12:57:37.319603,123,558,911, 15,2014-01-15 09:10:30.71578,568,14,997, 15,2014-01-15 18:13:40.516716,628,608,623, 82,2014-01-15 01:46:43.841832,563,43,617, 15,2014-01-12 11:11:18.527598,562,888,18, 82,2014-01-18 03:58:58.08783,639,696,674, 25,2014-01-18 11:19:06.873458,656,394,26, 25,2014-01-20 19:48:33.924428,215,555,704, 1,2014-01-15 17:31:56.685497,134,246,256, 25,2014-01-18 06:29:36.189666,309,28,933, 25,2014-01-11 01:32:20.657448,757,416,631, 25,2014-01-11 06:05:29.076067,252,506,256, 63,2014-01-14 05:34:38.797083,779,616,136, 82,2014-01-16 09:13:46.36943,138,273,171, 15,2014-01-20 16:16:34.857009,773,471,32, 25,2014-01-13 19:03:27.685945,665,937,572, 25,2014-01-10 21:50:55.465393,77,233,486, 63,2014-01-20 05:27:37.014582,102,910,524, 25,2014-01-11 14:24:49.790394,910,267,195, 25,2014-01-19 14:57:18.706655,262,99,494, 25,2014-01-16 19:14:58.535973,806,133,669, 25,2014-01-16 14:56:37.698455,300,76,188, 63,2014-01-18 10:03:35.500933,679,21,867, 15,2014-01-16 10:09:17.498264,891,446,934, 82,2014-01-11 07:22:26.266707,402,706,438, 1,2014-01-18 01:07:19.706922,828,683,139, 15,2014-01-14 06:29:17.083884,841,768,173, 15,2014-01-12 04:33:57.95794,881,943,236, 82,2014-01-14 13:56:33.714121,325,596,970, 1,2014-01-12 17:25:17.24742,362,851,574, 25,2014-01-20 06:38:08.040885,472,717,21, 63,2014-01-18 06:54:11.033724,678,60,527, 82,2014-01-17 06:35:47.9632,434,537,208, 82,2014-01-14 18:51:48.55802,815,497,928, 82,2014-01-12 02:40:54.875976,646,742,276, 1,2014-01-18 21:47:06.491217,919,99,452, 1,2014-01-11 16:50:46.64585,131,253,525, 25,2014-01-19 18:00:01.749434,960,24,373, 15,2014-01-18 11:35:40.415645,851,473,925, 82,2014-01-16 14:22:43.331347,987,594,724, 15,2014-01-16 18:29:37.939099,6,273,704, 15,2014-01-18 17:08:01.073093,471,213,72, 25,2014-01-17 00:28:12.407832,267,973,622, 1,2014-01-20 04:04:16.736601,283,354,282, 25,2014-01-13 20:21:47.442523,475,122,99, 63,2014-01-16 10:44:47.264842,294,650,461, 1,2014-01-12 17:07:44.422546,236,531,452, 1,2014-01-13 22:12:42.907916,560,381,926, 63,2014-01-15 10:12:24.279424,310,991,544, 15,2014-01-13 11:32:34.212941,895,250,936, 25,2014-01-12 07:10:01.61457,246,331,389, 63,2014-01-10 23:27:22.815397,704,96,371, 1,2014-01-19 06:29:11.911396,813,852,355, 82,2014-01-19 21:13:37.115503,79,967,839, 1,2014-01-16 13:36:40.31366,569,560,521, 15,2014-01-19 17:51:38.178522,452,923,534, 82,2014-01-16 16:20:56.822426,838,726,708, 1,2014-01-20 00:05:57.397965,810,439,462, 15,2014-01-21 02:25:36.136461,350,936,166, 82,2014-01-20 10:17:30.536547,967,930,949, 63,2014-01-11 11:31:14.645384,339,238,531, 82,2014-01-20 03:03:44.177766,938,352,687, 82,2014-01-17 11:11:41.57069,496,831,967, 82,2014-01-17 22:07:02.609003,845,396,492, 63,2014-01-13 15:55:32.735785,484,873,987, 63,2014-01-18 17:52:36.446666,597,263,834, 25,2014-01-15 13:54:09.10022,674,9,711, 63,2014-01-14 12:46:19.381635,313,117,236, 15,2014-01-17 05:52:38.1961,220,976,601, 82,2014-01-12 19:29:49.515866,500,814,40, 82,2014-01-20 07:09:02.713401,553,959,708, 63,2014-01-12 18:22:16.135618,120,329,912, 82,2014-01-20 16:37:50.588958,192,768,657, 1,2014-01-16 01:37:15.121233,483,26,529, 82,2014-01-11 16:26:32.872219,504,208,678, 82,2014-01-13 06:59:38.031481,8,55,439, 25,2014-01-11 06:22:02.386075,645,861,380, 15,2014-01-13 13:34:17.294766,86,562,59, 25,2014-01-16 09:12:20.939816,364,245,81, 15,2014-01-15 20:34:26.665499,473,267,806, 1,2014-01-16 05:06:51.507705,455,28,552, 82,2014-01-17 20:22:22.554385,283,678,801, 15,2014-01-16 00:01:49.481315,990,944,87, 25,2014-01-14 05:36:03.41952,632,941,466, 1,2014-01-16 11:35:59.670308,100,759,396, 25,2014-01-11 07:38:29.794975,129,311,898, 15,2014-01-12 17:27:41.598042,162,264,987, 1,2014-01-11 21:56:56.492734,632,366,10, 1,2014-01-20 11:44:27.19294,825,407,142, 25,2014-01-12 13:53:17.734503,464,39,539, 25,2014-01-11 02:14:27.0297,609,861,893, 63,2014-01-21 03:15:41.838743,353,171,240, 1,2014-01-11 10:57:01.912268,638,87,312, 82,2014-01-17 17:26:08.600391,172,586,289, 1,2014-01-17 19:22:44.447746,974,411,670, 25,2014-01-14 06:52:34.648054,810,396,418, 25,2014-01-15 05:18:45.047056,804,403,306, 1,2014-01-12 03:16:53.547966,854,743,862, 82,2014-01-17 23:38:54.030108,684,86,181, 15,2014-01-11 05:11:23.243156,267,372,148, 25,2014-01-14 02:46:43.063831,300,763,396, 15,2014-01-17 00:47:56.743509,815,178,884, 82,2014-01-13 03:33:39.378774,417,755,236, 1,2014-01-13 13:24:33.65279,291,251,343, 63,2014-01-11 20:25:11.864743,319,407,457, 63,2014-01-14 00:00:12.250993,637,159,470, 15,2014-01-16 00:24:11.684271,19,938,703, 25,2014-01-12 06:47:14.250818,202,843,140, 1,2014-01-16 17:34:29.545758,503,57,695, 15,2014-01-11 03:36:32.624085,88,585,986, 25,2014-01-17 07:21:40.916316,851,336,473, 15,2014-01-11 16:41:21.053883,274,502,388, 82,2014-01-18 03:58:55.178891,481,759,889, 63,2014-01-12 01:23:30.39763,667,904,752, 82,2014-01-15 02:17:24.473403,211,147,525, 82,2014-01-13 09:34:54.849199,340,74,371, 82,2014-01-12 13:02:00.193025,161,211,842, 82,2014-01-16 23:45:06.071445,72,444,997, 82,2014-01-14 11:31:51.341932,546,920,163, 82,2014-01-11 18:46:27.385964,657,197,777, 25,2014-01-18 17:38:23.805948,21,741,136, 82,2014-01-14 17:46:18.371633,926,787,301, 25,2014-01-11 16:02:09.224706,438,502,662, 25,2014-01-19 08:35:25.718635,927,292,653, 15,2014-01-11 05:12:26.972023,996,165,405, 25,2014-01-18 15:24:53.672452,932,894,666, 1,2014-01-12 09:28:00.366689,471,245,174, 25,2014-01-15 14:31:12.019079,890,823,737, 1,2014-01-19 22:41:47.220418,347,379,21, 82,2014-01-19 13:06:54.396797,624,194,64, 82,2014-01-15 23:42:35.262654,464,74,220, 1,2014-01-12 19:28:30.284249,924,938,250, 82,2014-01-15 07:54:51.140306,597,233,603, 63,2014-01-18 07:16:14.641428,589,713,987, 1,2014-01-15 12:53:03.937039,624,825,842, 1,2014-01-16 08:20:03.005049,906,596,105, 82,2014-01-11 01:16:26.89284,975,887,83, 25,2014-01-20 17:17:15.621728,807,413,221, 1,2014-01-17 19:07:17.255866,820,340,571, 1,2014-01-16 22:50:56.438598,104,769,185, 15,2014-01-21 00:53:48.245814,14,562,153, 82,2014-01-13 20:28:58.172601,893,341,279, 82,2014-01-17 19:32:17.492481,943,634,174, 1,2014-01-17 22:52:43.424704,719,843,915, 1,2014-01-15 01:52:28.570231,868,10,811, 1,2014-01-11 15:49:41.965885,219,603,243, 15,2014-01-20 12:27:38.273903,629,356,431, 82,2014-01-16 18:54:28.763256,854,275,574, 25,2014-01-17 19:34:48.03733,237,743,453, 82,2014-01-13 17:59:29.616255,874,900,974, 15,2014-01-17 17:40:56.14964,780,452,970, 63,2014-01-15 07:13:11.843697,274,793,285, 63,2014-01-17 15:45:47.987888,737,649,14, 82,2014-01-18 13:43:05.374346,292,797,21, 25,2014-01-13 09:48:37.562331,246,521,163, 82,2014-01-18 00:58:14.960329,638,648,568, 1,2014-01-15 23:07:59.047218,594,779,885, 1,2014-01-14 23:16:37.92902,645,371,186, 63,2014-01-12 09:29:26.979408,129,607,618, 15,2014-01-14 15:49:46.267635,888,824,6, 15,2014-01-13 06:23:32.326236,355,324,776, 25,2014-01-17 13:12:02.242063,523,913,550, 15,2014-01-16 15:18:16.552303,439,805,490, 82,2014-01-17 18:18:23.466542,122,189,637, 15,2014-01-14 14:28:16.883909,648,66,703, 1,2014-01-10 22:11:20.489342,919,918,467, 1,2014-01-12 20:38:26.472009,524,997,625, 25,2014-01-14 19:44:43.776749,178,43,232, 63,2014-01-20 19:28:36.11107,567,120,81, 1,2014-01-19 19:45:43.727876,518,933,603, 15,2014-01-20 22:35:40.215348,754,97,892, 82,2014-01-20 14:22:24.357303,243,679,701, 15,2014-01-12 10:14:41.900477,128,61,75, 25,2014-01-17 12:07:57.708248,740,804,664, 63,2014-01-17 07:15:07.782007,257,402,164, 15,2014-01-16 16:07:10.471128,414,50,498, 1,2014-01-18 07:57:39.674133,304,755,155, 63,2014-01-16 13:42:46.056329,64,956,825, 25,2014-01-12 05:01:39.234384,57,910,766, 63,2014-01-14 21:32:27.711882,604,549,913, 15,2014-01-19 11:42:15.672583,481,732,649, 25,2014-01-19 02:42:35.384024,298,231,929, 15,2014-01-19 08:45:39.555579,278,946,397, 1,2014-01-15 21:28:03.660472,326,739,806, 63,2014-01-16 10:25:40.758789,200,404,768, 15,2014-01-11 12:34:17.11791,539,868,179, 63,2014-01-12 16:26:18.620801,378,45,118, 82,2014-01-11 03:33:39.806006,659,42,942, 1,2014-01-15 15:50:55.046931,443,623,389, 63,2014-01-14 05:55:45.600629,47,465,678, 25,2014-01-14 23:23:26.073642,200,860,1, 82,2014-01-18 02:14:27.373167,903,513,301, 25,2014-01-20 23:07:47.842692,147,830,805, 63,2014-01-20 18:41:42.625945,448,96,903, 82,2014-01-14 14:32:50.840128,33,347,238, 1,2014-01-14 07:36:04.726601,834,71,845, 1,2014-01-20 20:53:03.115706,928,191,850, 82,2014-01-16 15:11:17.312138,168,656,941, 25,2014-01-18 07:20:48.50335,209,769,196, 82,2014-01-20 10:21:39.226776,960,227,141, 1,2014-01-15 04:57:01.040014,900,729,3, 25,2014-01-17 23:56:28.719117,114,579,984, 63,2014-01-19 18:44:03.584078,655,973,945, 1,2014-01-16 19:11:42.94091,868,16,624, 1,2014-01-14 06:04:26.427366,969,908,605, 63,2014-01-15 19:59:11.366504,892,210,344, 25,2014-01-12 05:18:53.412038,624,695,430, 82,2014-01-11 08:02:06.101499,935,494,611, 63,2014-01-11 03:41:57.422833,184,185,331, 82,2014-01-13 14:20:32.646422,659,442,359, 1,2014-01-15 09:34:33.813381,885,155,37, 1,2014-01-19 19:24:13.095418,695,170,206, 15,2014-01-11 11:03:08.030865,779,36,208, 15,2014-01-13 12:20:13.36896,353,521,194, 63,2014-01-14 10:52:16.755889,779,171,427, 82,2014-01-17 01:28:48.789654,400,42,612, 1,2014-01-14 04:54:30.486871,870,192,583, 25,2014-01-16 07:18:35.377109,422,655,274, 1,2014-01-17 09:02:28.59566,14,231,253, 25,2014-01-19 00:45:25.534221,332,613,992, 82,2014-01-19 17:14:20.977738,245,350,113, 82,2014-01-11 02:25:54.669302,871,330,268, 63,2014-01-15 20:59:52.907388,713,177,745, 1,2014-01-19 10:22:08.820429,22,411,822, 25,2014-01-21 01:07:37.295666,972,867,274, 82,2014-01-19 15:32:43.747516,982,251,444, 63,2014-01-12 11:58:13.547449,443,303,297, 1,2014-01-20 16:00:40.411372,400,756,924, 25,2014-01-15 00:44:01.059654,356,33,207, 1,2014-01-19 23:19:02.050799,698,767,677, 1,2014-01-19 20:22:19.638147,149,864,321, 63,2014-01-19 09:41:02.100086,255,227,659, 82,2014-01-16 17:15:30.769917,807,250,360, 63,2014-01-18 09:06:23.222645,287,226,439, 82,2014-01-14 22:52:45.040997,192,8,19, 82,2014-01-20 03:19:57.197281,532,381,786, 15,2014-01-12 23:05:34.589149,865,583,356, 82,2014-01-16 08:11:38.453034,368,128,588, 63,2014-01-20 15:22:03.299199,409,555,893, 1,2014-01-13 06:47:32.011983,998,617,184, 63,2014-01-19 02:32:11.099456,774,998,166, 1,2014-01-14 18:56:37.11258,419,566,529, 25,2014-01-11 20:11:45.107401,629,755,83, 1,2014-01-19 17:35:19.130322,783,984,538, 1,2014-01-17 11:16:50.481959,273,133,513, 25,2014-01-15 11:04:01.86329,788,614,758, 25,2014-01-15 13:04:07.919976,676,831,770, 15,2014-01-20 20:11:20.96883,989,74,443, 1,2014-01-20 22:22:37.240399,438,717,447, 63,2014-01-11 16:06:36.515635,145,92,645, 63,2014-01-18 14:56:46.503051,217,459,857, 63,2014-01-19 09:36:58.218136,517,515,57, 1,2014-01-11 22:32:31.185356,581,882,696, 82,2014-01-13 05:56:39.410858,379,778,850, 82,2014-01-17 13:59:07.038985,685,5,528, 63,2014-01-11 17:40:08.481022,776,517,453, 15,2014-01-11 15:29:23.158374,44,307,70, 1,2014-01-19 05:57:20.586434,910,444,821, 82,2014-01-11 03:40:48.892394,116,741,466, 63,2014-01-15 20:13:24.218447,385,100,1, 15,2014-01-17 23:16:22.637233,377,728,160, 82,2014-01-20 04:03:08.530541,487,791,611, 1,2014-01-13 23:54:26.318533,583,978,899, 15,2014-01-13 10:31:53.407569,75,793,603, 63,2014-01-17 07:09:31.753185,672,47,2, 25,2014-01-18 02:47:11.35953,515,778,844, 25,2014-01-12 07:51:50.604851,811,622,244, 63,2014-01-19 10:15:06.342334,643,305,913, 82,2014-01-13 04:58:49.812565,688,990,875, 25,2014-01-11 17:13:53.90405,890,216,718, 1,2014-01-11 11:02:38.354318,240,853,887, 1,2014-01-11 01:31:00.912021,978,360,749, 15,2014-01-15 16:10:31.017049,778,543,134, 63,2014-01-12 11:14:23.461719,845,14,555, 25,2014-01-19 23:06:20.042761,350,68,608, 63,2014-01-11 21:27:21.499009,851,803,805, 15,2014-01-17 02:18:25.325427,923,277,398, 25,2014-01-14 06:10:27.962738,48,376,628, 15,2014-01-11 11:38:42.46784,475,831,854, 1,2014-01-16 18:41:02.565825,604,62,930, 1,2014-01-15 02:17:04.478793,103,317,402, 63,2014-01-19 06:35:28.971309,360,146,327, 82,2014-01-14 22:18:00.784381,521,609,887, 82,2014-01-16 04:49:35.664149,27,400,652, 15,2014-01-11 06:32:08.382168,37,772,18, 15,2014-01-11 06:17:07.823367,416,164,982, 1,2014-01-17 02:29:44.145171,813,383,986, 63,2014-01-12 02:01:31.540542,987,53,682, 1,2014-01-19 16:14:28.409801,415,783,124, 63,2014-01-17 10:10:33.037565,726,822,25, 82,2014-01-17 02:14:55.758989,20,711,40, 25,2014-01-16 09:30:51.047034,899,167,594, 1,2014-01-16 08:13:41.568104,592,837,745, 15,2014-01-20 06:09:22.077522,450,917,396, 15,2014-01-19 00:02:44.454603,164,207,917, 15,2014-01-12 09:23:13.321709,479,391,294, 82,2014-01-17 02:56:33.437052,271,750,132, 63,2014-01-20 11:54:35.059873,483,843,515, 25,2014-01-20 23:38:19.664044,145,503,101, 82,2014-01-19 11:55:23.249617,307,856,2, 25,2014-01-10 23:08:28.963923,918,819,572, 15,2014-01-11 04:40:58.01878,432,174,18, 25,2014-01-19 17:26:24.162057,721,39,624, 82,2014-01-15 19:18:59.980972,442,398,846, 1,2014-01-12 19:55:21.480499,73,304,467, 15,2014-01-18 10:32:44.204819,967,754,233, 25,2014-01-16 20:46:21.479981,390,627,562, 25,2014-01-19 02:13:46.805926,802,140,925, 15,2014-01-11 10:43:12.167556,759,521,860, 25,2014-01-17 12:25:03.94824,938,759,169, 25,2014-01-14 14:54:49.372171,631,510,213, 15,2014-01-15 17:00:16.646349,726,632,493, 25,2014-01-15 13:00:32.919549,752,330,973, 82,2014-01-18 17:12:50.156551,48,172,250, 82,2014-01-21 01:49:52.310498,48,911,300, 15,2014-01-15 23:32:41.301717,163,147,906, 1,2014-01-19 03:29:57.979918,869,858,877, 15,2014-01-16 22:19:36.455669,775,494,305, 15,2014-01-17 05:34:12.842258,891,818,758, 15,2014-01-17 13:44:26.389719,905,673,754, 63,2014-01-13 02:30:09.493233,381,375,758, 25,2014-01-13 01:49:08.601666,247,571,269, 25,2014-01-12 17:15:17.436752,89,232,983, 82,2014-01-18 14:43:51.061756,627,8,154, 82,2014-01-12 01:58:30.679188,580,996,93, 63,2014-01-20 21:18:01.891775,894,42,911, 82,2014-01-20 04:07:04.383466,707,97,899, 15,2014-01-18 08:55:04.116659,157,696,508, 1,2014-01-20 03:12:36.951648,664,39,134, 1,2014-01-19 21:45:24.047509,226,860,581, 63,2014-01-16 14:50:27.366276,983,432,740, 82,2014-01-20 06:21:05.915571,888,864,891, 25,2014-01-20 06:26:22.06629,756,698,566, 25,2014-01-15 02:16:51.528333,580,819,622, 25,2014-01-14 19:40:05.896961,782,478,730, 63,2014-01-11 20:21:43.547207,810,539,593, 25,2014-01-12 06:49:35.733152,279,59,327, 15,2014-01-20 20:26:27.376943,370,132,619, 15,2014-01-20 02:35:30.353133,419,533,667, 15,2014-01-12 21:32:47.901128,608,77,661, 25,2014-01-17 02:51:31.325182,813,855,381, 15,2014-01-13 11:30:19.725304,975,739,417, 1,2014-01-17 18:33:04.547477,523,71,164, 15,2014-01-11 09:52:04.244731,711,219,387, 25,2014-01-10 22:43:09.881855,517,5,175, 1,2014-01-17 14:22:56.858394,488,82,429, 25,2014-01-16 13:24:45.546449,938,502,835, 82,2014-01-19 06:13:07.861773,887,625,941, 1,2014-01-13 06:42:33.314063,224,503,653, 82,2014-01-12 12:58:58.389126,551,369,636, 63,2014-01-15 13:57:34.251492,682,801,193, 82,2014-01-15 13:12:42.807715,674,127,337, 82,2014-01-14 18:48:06.990792,842,564,304, 15,2014-01-17 11:12:51.688663,671,543,734, 25,2014-01-12 21:56:33.869471,68,710,243, 15,2014-01-16 00:46:37.669979,176,880,17, 15,2014-01-17 02:30:53.580438,504,508,269, 63,2014-01-11 20:03:38.252936,50,61,689, 1,2014-01-13 03:41:41.786639,892,226,927, 1,2014-01-15 23:43:30.532085,95,788,933, 25,2014-01-20 21:49:02.300446,654,157,985, 63,2014-01-18 22:32:09.153333,849,243,641, 15,2014-01-15 00:04:36.447655,180,3,34, 82,2014-01-19 22:15:24.367155,946,35,178, 25,2014-01-12 18:49:00.681666,280,910,686, 25,2014-01-18 23:44:42.344618,411,865,937, 25,2014-01-20 22:37:07.914362,870,789,396, 25,2014-01-14 05:38:36.415236,125,813,561, 82,2014-01-18 14:11:09.721979,81,252,24, 15,2014-01-19 19:12:38.267496,677,531,318, 25,2014-01-16 07:11:24.316365,897,53,893, 25,2014-01-14 11:02:41.047161,624,882,698, 25,2014-01-12 00:42:57.993219,244,932,829, 25,2014-01-12 19:44:28.863842,663,288,205, 1,2014-01-15 00:54:45.292313,237,78,843, 82,2014-01-12 03:26:07.875074,389,598,527, 82,2014-01-19 14:07:25.722235,32,122,899, 25,2014-01-20 18:19:30.838761,934,612,978, 63,2014-01-20 13:39:15.737266,512,76,738, 25,2014-01-11 14:49:59.036717,181,467,805, 25,2014-01-12 01:18:29.227886,864,623,981, 25,2014-01-14 21:36:49.988757,640,494,169, 25,2014-01-16 08:02:41.844431,819,739,917, 63,2014-01-16 00:06:36.218678,354,259,52, 1,2014-01-11 02:49:41.67742,136,342,117, 15,2014-01-18 09:59:15.713903,968,111,158, 82,2014-01-21 04:53:13.889077,750,996,945, 82,2014-01-17 09:20:35.257857,987,973,299, 15,2014-01-19 10:02:53.96684,419,824,554, 82,2014-01-13 02:34:55.675715,373,836,663, 82,2014-01-12 03:04:05.789943,859,474,553, 25,2014-01-19 01:51:56.267285,157,649,553, 82,2014-01-21 05:07:04.829048,964,270,142, 25,2014-01-16 07:08:42.238018,686,969,284, 25,2014-01-17 18:07:20.634441,965,459,369, 63,2014-01-12 17:56:05.511132,131,453,310, 25,2014-01-14 00:53:24.582635,889,944,879, 82,2014-01-17 10:44:28.548803,826,642,197, 1,2014-01-16 03:34:41.92637,677,315,400, 25,2014-01-11 09:04:34.304615,624,961,363, 15,2014-01-15 23:57:06.816718,103,664,705, 1,2014-01-11 04:46:06.242734,258,155,413, 1,2014-01-15 00:07:15.352195,771,899,8, 63,2014-01-17 04:40:04.809936,289,904,642, 63,2014-01-13 04:30:35.106575,223,432,689, 1,2014-01-19 05:02:00.644507,810,914,686, 82,2014-01-18 12:06:12.68501,178,31,26, 63,2014-01-11 12:38:00.82923,266,318,731, 25,2014-01-18 17:21:31.483269,155,877,543, 25,2014-01-17 19:45:28.422276,595,866,928, 1,2014-01-12 07:27:59.865947,636,153,891, 82,2014-01-19 22:40:00.711574,496,754,842, 25,2014-01-11 11:22:18.411452,908,746,513, 82,2014-01-17 19:30:41.710378,931,163,552, 63,2014-01-14 16:46:36.930253,885,428,249, 1,2014-01-11 18:12:00.088872,1,518,447, 82,2014-01-14 23:29:57.424281,44,264,589, 15,2014-01-14 15:39:50.819329,26,250,470, 82,2014-01-18 07:32:35.347149,694,12,63, 82,2014-01-13 03:32:51.391121,432,63,202, 25,2014-01-16 22:14:46.495464,97,599,176, 63,2014-01-19 14:36:41.137092,357,31,295, 1,2014-01-10 23:24:47.658406,777,936,924, 1,2014-01-16 21:21:51.32451,794,52,985, 82,2014-01-14 15:45:23.375109,25,214,251, 15,2014-01-17 21:32:08.293266,598,258,535, 82,2014-01-18 19:17:56.835643,556,358,380, 82,2014-01-17 20:38:47.958164,928,514,964, 1,2014-01-14 02:16:36.842069,696,188,49, 15,2014-01-13 16:52:38.762012,408,613,616, 82,2014-01-18 09:43:22.262778,94,533,921, 15,2014-01-19 06:13:43.658786,592,130,365, 25,2014-01-14 01:38:45.004747,376,28,298, 15,2014-01-12 03:50:37.614973,849,293,679, 15,2014-01-15 04:53:48.468722,5,319,167, 63,2014-01-16 10:09:20.111741,874,274,821, 15,2014-01-20 12:52:38.25948,516,382,191, 63,2014-01-12 11:00:01.154151,793,717,553, 15,2014-01-17 02:47:20.940971,899,252,741, 82,2014-01-18 00:14:09.743168,243,770,629, 1,2014-01-19 10:45:29.576427,80,361,37, 25,2014-01-18 14:15:20.806918,773,458,979, 63,2014-01-16 16:54:10.454743,359,944,278, 82,2014-01-20 02:07:47.987879,687,98,363, 25,2014-01-15 03:46:02.517297,411,548,445, 82,2014-01-20 13:40:47.384995,190,812,720, 25,2014-01-21 00:19:48.077171,922,404,544, 25,2014-01-19 07:15:59.941578,931,961,861, 1,2014-01-13 23:20:38.204744,642,728,801, 15,2014-01-18 01:52:23.42432,10,627,157, 1,2014-01-11 04:48:51.332699,518,628,661, 15,2014-01-20 01:35:24.700207,916,1,820, 25,2014-01-16 10:29:04.561411,291,752,197, 1,2014-01-11 08:13:38.991105,758,75,556, 63,2014-01-15 16:57:16.024718,985,248,677, 15,2014-01-20 06:14:27.936941,216,75,272, 63,2014-01-18 09:45:47.284371,410,780,113, 63,2014-01-13 06:15:12.86078,891,177,567, 15,2014-01-16 20:53:15.895104,992,900,675, 25,2014-01-11 06:02:24.126858,9,732,525, 63,2014-01-16 03:07:51.622793,830,59,748, 82,2014-01-14 00:36:38.157882,114,32,124, 63,2014-01-19 16:16:07.785645,180,704,368, 15,2014-01-19 08:46:36.627957,676,71,550, 63,2014-01-15 08:27:15.773274,557,288,775, 15,2014-01-13 15:09:56.254785,448,94,465, 63,2014-01-14 12:55:56.739699,714,953,27, 63,2014-01-14 15:19:54.032754,669,664,310, 82,2014-01-15 06:09:57.408936,325,643,83, 15,2014-01-20 19:43:17.68067,563,232,824, 82,2014-01-11 09:34:03.775923,520,127,744, 63,2014-01-13 10:55:26.985364,799,616,722, 63,2014-01-18 03:58:38.487588,935,683,704, 63,2014-01-17 06:28:14.230665,93,881,29, 63,2014-01-12 07:03:14.973662,56,366,260, 25,2014-01-12 01:44:41.004884,902,604,719, 15,2014-01-16 14:09:01.616079,218,61,359, 25,2014-01-12 01:23:03.050833,756,650,205, 25,2014-01-20 13:00:40.946462,179,760,16, 25,2014-01-19 17:29:39.820823,787,649,233, 1,2014-01-19 07:15:26.475153,887,374,601, 15,2014-01-20 21:49:32.27916,235,350,9, 63,2014-01-18 13:05:04.52103,106,142,217, 15,2014-01-14 11:44:31.036565,923,734,175, 15,2014-01-11 00:03:11.270266,591,505,901, 1,2014-01-13 00:02:20.545748,796,661,94, 25,2014-01-13 11:58:58.973505,592,476,889, 15,2014-01-18 13:48:58.555056,644,901,249, 82,2014-01-15 10:17:33.406527,114,832,804, 15,2014-01-19 12:52:14.868609,807,1,970, 63,2014-01-18 23:51:22.681915,376,363,737, 25,2014-01-20 17:25:25.029739,14,23,924, 1,2014-01-12 07:28:53.02691,591,159,259, 63,2014-01-17 10:07:30.467559,154,66,643, 15,2014-01-18 20:12:01.657697,412,56,505, 25,2014-01-16 19:56:08.800184,795,648,245, 25,2014-01-20 05:17:26.722345,794,651,494, 1,2014-01-12 03:07:58.397395,616,5,961, 15,2014-01-20 15:16:02.832939,929,661,192, 15,2014-01-14 05:27:24.131282,494,761,536, 1,2014-01-11 16:51:16.078065,837,127,915, 63,2014-01-21 04:50:06.60928,496,715,170, 82,2014-01-16 20:22:51.117065,288,950,877, 1,2014-01-19 00:49:54.565652,178,310,191, 15,2014-01-17 05:18:20.839945,342,964,194, 82,2014-01-18 07:26:06.090727,229,302,649, 1,2014-01-20 06:34:35.570536,989,669,559, 25,2014-01-12 13:27:22.456025,277,114,425, 15,2014-01-19 12:49:09.14156,67,797,931, 1,2014-01-19 13:35:16.516998,701,771,571, 63,2014-01-11 00:57:02.276848,905,520,687, 15,2014-01-17 14:04:35.616714,727,578,455, 1,2014-01-19 05:24:48.796159,188,149,737, 1,2014-01-18 18:02:06.797877,274,994,518, 82,2014-01-21 05:49:06.653279,120,289,226, 25,2014-01-19 09:28:00.066425,817,579,608, 1,2014-01-20 22:04:27.344044,315,626,781, 1,2014-01-13 11:48:05.627203,285,729,33, 63,2014-01-16 17:16:58.621481,715,688,978, 63,2014-01-15 02:22:00.750571,782,788,147, 63,2014-01-11 18:40:20.495813,918,40,794, 1,2014-01-14 11:08:21.303397,178,615,421, 82,2014-01-14 13:47:25.780309,179,912,990, 25,2014-01-13 06:09:13.522724,69,739,973, 88,2014-01-21 01:15:51.771375,136,912,16, 88,2014-01-12 03:59:27.438006,374,744,129, 88,2014-01-19 06:05:22.322908,879,599,134, 88,2014-01-20 00:33:18.49372,711,242,55, 31,2014-01-13 11:07:25.835401,64,650,605, 88,2014-01-18 15:21:25.156266,281,434,86, 31,2014-01-13 00:00:42.625421,586,939,863, 31,2014-01-14 07:58:41.913466,941,161,362, 31,2014-01-18 14:11:31.765547,103,74,87, 31,2014-01-19 00:23:33.742486,856,780,835, 88,2014-01-12 02:48:36.479118,617,784,801, 88,2014-01-14 13:29:52.605492,939,847,235, 31,2014-01-16 01:49:39.833214,96,121,207, 31,2014-01-11 03:23:12.049654,370,112,670, 88,2014-01-16 06:57:15.061935,748,994,62, 31,2014-01-14 08:38:48.974775,321,362,961, 31,2014-01-19 20:58:28.566653,608,453,139, 31,2014-01-16 11:54:17.338782,250,139,896, 31,2014-01-21 02:43:24.591489,272,501,908, 88,2014-01-17 20:23:17.362812,923,776,189, 31,2014-01-13 23:56:24.137079,579,443,534, 31,2014-01-21 02:32:31.244768,874,892,962, 88,2014-01-15 23:51:17.429656,466,678,936, 88,2014-01-13 16:00:51.481123,925,374,837, 88,2014-01-13 08:20:36.871971,93,345,434, 88,2014-01-11 11:08:16.051139,889,535,170, 88,2014-01-17 22:22:52.231694,600,886,430, 31,2014-01-14 07:00:57.368203,60,732,998, 88,2014-01-15 02:16:37.354954,742,19,949, 88,2014-01-11 06:10:18.012423,74,480,72, 88,2014-01-16 17:10:10.890926,412,926,1, 31,2014-01-14 21:32:29.126328,367,798,504, 31,2014-01-12 14:09:45.450429,529,378,467, 31,2014-01-14 17:15:33.214253,625,476,491, 88,2014-01-13 16:05:47.620048,430,443,934, 31,2014-01-15 05:17:11.286249,699,871,251, 31,2014-01-12 02:36:58.37052,679,63,886, 88,2014-01-15 20:06:30.24547,509,425,920, 88,2014-01-18 17:15:53.199715,421,309,720, 88,2014-01-19 20:48:30.136066,765,486,492, 88,2014-01-13 14:30:03.987955,675,843,615, 31,2014-01-20 00:04:29.679252,603,657,156, 88,2014-01-13 04:18:22.741557,640,126,551, 88,2014-01-18 20:19:43.821169,743,445,295, 31,2014-01-20 07:27:41.728906,52,931,530, 88,2014-01-18 15:15:37.803493,766,397,960, 31,2014-01-11 22:58:32.795944,331,146,521, 88,2014-01-18 22:26:10.295979,328,57,634, 31,2014-01-13 21:09:55.142695,549,812,950, 88,2014-01-11 19:41:57.387432,651,686,12, 88,2014-01-15 12:49:27.658791,730,927,337, 31,2014-01-17 01:06:19.279774,320,503,254, 88,2014-01-11 16:14:28.632619,327,680,47, 31,2014-01-20 16:40:45.088448,917,440,54, 88,2014-01-19 21:07:10.760897,420,461,77, 88,2014-01-14 04:35:05.50459,301,745,376, 31,2014-01-10 21:49:01.140005,352,670,50, 31,2014-01-16 13:30:02.992591,37,361,785, 88,2014-01-17 15:36:02.872792,982,452,536, 31,2014-01-15 04:05:38.494958,729,906,627, 31,2014-01-16 23:40:21.005014,506,26,599, 31,2014-01-13 02:46:13.764138,945,163,247, 31,2014-01-19 05:38:07.621287,295,285,134, 88,2014-01-18 17:50:06.455861,130,157,551, 88,2014-01-17 00:01:46.978391,53,666,813, 31,2014-01-11 15:43:55.241335,496,820,681, 88,2014-01-12 17:07:17.74211,586,332,564, 31,2014-01-18 06:38:45.34891,893,497,505, 88,2014-01-16 15:50:25.487224,723,44,154, 88,2014-01-20 14:23:10.941825,674,319,467, 31,2014-01-16 21:27:15.484977,453,97,383, 88,2014-01-19 10:20:29.475179,281,819,875, 88,2014-01-19 08:27:40.621077,935,92,743, 88,2014-01-19 05:45:38.226534,249,344,489, 31,2014-01-17 00:40:13.296768,631,981,491, 31,2014-01-18 09:55:22.350403,509,529,220, 88,2014-01-16 15:01:16.030447,116,517,875, 88,2014-01-18 03:38:46.09271,246,685,794, 31,2014-01-16 02:21:32.646381,303,295,752, 88,2014-01-19 16:11:11.173142,174,635,842, 31,2014-01-19 03:20:43.480561,855,201,698, 88,2014-01-20 19:11:00.305173,100,299,166, 31,2014-01-15 11:17:30.452916,455,883,409, 31,2014-01-19 23:35:12.11318,459,826,121, 31,2014-01-20 05:51:45.39404,569,802,508, 88,2014-01-14 02:24:41.213813,9,210,231, 31,2014-01-12 22:10:17.617769,545,790,779, 88,2014-01-20 07:40:46.534045,165,389,876, 88,2014-01-19 19:54:44.206403,975,423,106, 31,2014-01-19 17:46:20.49098,699,374,873, 31,2014-01-14 05:46:25.029003,57,278,269, 88,2014-01-15 13:35:05.211836,547,57,336, 88,2014-01-11 14:32:34.255118,17,873,928, 31,2014-01-12 05:24:32.650291,78,579,648, 31,2014-01-13 01:25:11.667698,986,611,519, 88,2014-01-17 18:34:21.233509,650,524,995, 88,2014-01-13 01:08:27.891626,10,397,949, 88,2014-01-14 22:32:29.409808,996,118,332, 88,2014-01-14 19:13:06.58242,839,826,761, 88,2014-01-18 20:58:53.37885,493,855,512, 31,2014-01-14 06:55:40.351633,309,535,670, 88,2014-01-20 20:40:22.067396,804,336,834, 31,2014-01-17 01:19:22.854448,216,547,979, 31,2014-01-12 09:23:20.973129,730,345,174, 88,2014-01-18 20:26:00.293931,209,426,137, 31,2014-01-12 19:59:36.151216,272,728,971, 88,2014-01-19 23:18:43.323532,669,16,830, 31,2014-01-14 05:27:16.324377,298,65,186, 88,2014-01-20 03:38:22.243926,344,312,633, 31,2014-01-14 19:40:15.969914,464,305,94, 88,2014-01-12 15:38:27.497519,804,726,903, 31,2014-01-18 00:59:05.724489,677,380,127, 31,2014-01-14 08:51:16.275086,482,45,772, 31,2014-01-17 06:55:57.950434,51,981,889, 88,2014-01-16 18:34:17.837668,763,744,154, 31,2014-01-13 08:43:01.669126,733,125,67, 31,2014-01-20 13:20:39.164247,475,444,847, 31,2014-01-18 20:44:35.455856,330,479,528, 88,2014-01-12 10:23:48.20317,842,790,163, 88,2014-01-19 03:15:23.37107,812,579,963, 31,2014-01-17 08:30:55.946836,673,188,73, 88,2014-01-15 20:10:13.232173,169,567,183, 31,2014-01-13 10:50:28.582905,784,892,437, 88,2014-01-18 03:03:30.201954,567,758,673, 31,2014-01-17 05:34:45.882464,831,654,454, 88,2014-01-15 16:15:40.250603,517,246,443, 88,2014-01-14 15:37:51.435464,103,342,135, 31,2014-01-19 10:43:13.774508,138,272,725, 88,2014-01-19 18:48:09.660411,988,690,479, 88,2014-01-18 14:50:58.017883,344,441,703, 88,2014-01-17 01:42:07.153358,13,717,319, 31,2014-01-12 19:43:50.012044,365,572,124, 88,2014-01-18 05:31:20.085279,45,631,908, 88,2014-01-12 21:01:30.007806,347,929,306, 31,2014-01-14 09:07:10.985173,887,215,840, 31,2014-01-15 19:57:20.37921,673,439,381, 31,2014-01-14 21:01:06.159022,406,422,131, 88,2014-01-13 02:25:54.308705,215,617,899, 88,2014-01-19 05:24:36.704006,753,872,322, 31,2014-01-13 18:39:28.403368,707,560,982, 88,2014-01-17 02:06:10.278619,77,942,775, 88,2014-01-21 01:03:04.201526,948,395,183, 31,2014-01-20 23:38:34.127856,555,601,532, 88,2014-01-20 14:57:26.554125,266,457,800, 31,2014-01-17 01:59:02.152379,541,83,296, 31,2014-01-16 12:12:51.965943,911,295,184, 31,2014-01-12 17:40:28.22325,281,174,335, 31,2014-01-16 09:19:41.316626,697,260,638, 88,2014-01-14 02:57:27.421799,783,173,927, 31,2014-01-14 08:04:16.42684,660,876,237, 31,2014-01-14 06:35:04.687696,387,310,579, 88,2014-01-20 15:28:23.368635,858,160,104, 88,2014-01-19 08:14:29.659013,780,641,715, 31,2014-01-16 21:25:33.270601,539,583,135, 88,2014-01-17 12:31:53.570591,74,490,551, 31,2014-01-15 07:49:15.541895,923,749,541, 31,2014-01-11 07:41:13.521204,650,710,238, 31,2014-01-10 22:09:45.006054,230,415,384, 31,2014-01-13 12:32:29.316403,80,110,525, 88,2014-01-20 06:29:23.181616,627,471,368, 31,2014-01-18 17:00:43.023936,410,536,807, 88,2014-01-19 18:14:36.469761,325,691,357, 31,2014-01-11 20:13:13.194078,400,120,907, 88,2014-01-15 16:32:03.109217,753,804,935, 31,2014-01-11 09:16:06.477987,617,307,284, 31,2014-01-15 09:20:24.179251,238,154,553, 88,2014-01-20 16:29:23.488426,325,770,117, 88,2014-01-15 10:17:12.637009,967,41,940, 31,2014-01-17 15:46:18.488376,195,759,727, 31,2014-01-18 15:54:00.192433,724,593,632, 88,2014-01-18 08:56:41.040377,332,599,379, 31,2014-01-13 11:52:28.766995,695,895,401, 31,2014-01-18 10:57:04.394378,871,2,921, 87,2014-01-18 02:35:15.168652,994,428,467, 26,2014-01-12 20:49:55.32112,7,541,890, 94,2014-01-14 06:56:06.546757,194,889,939, 87,2014-01-13 08:48:07.134595,321,631,961, 79,2014-01-14 18:30:23.54479,930,621,208, 24,2014-01-19 20:15:47.863383,32,138,958, 87,2014-01-16 15:45:34.556394,352,603,222, 24,2014-01-18 06:34:39.971629,908,603,348, 33,2014-01-12 20:50:52.551498,261,163,898, 87,2014-01-16 01:13:57.92503,286,684,118, 79,2014-01-16 08:49:09.630642,842,615,415, 87,2014-01-18 22:16:25.822099,678,298,190, 87,2014-01-12 07:45:51.49562,305,284,253, 79,2014-01-20 20:38:25.172536,774,73,586, 26,2014-01-19 09:57:39.343722,627,355,382, 24,2014-01-12 09:55:36.501674,807,441,60, 26,2014-01-13 03:10:54.48894,100,182,814, 79,2014-01-18 10:27:02.525337,711,810,56, 33,2014-01-20 06:56:19.52561,977,403,119, 33,2014-01-11 15:25:30.959121,695,517,687, 87,2014-01-19 10:40:15.719416,142,172,59, 87,2014-01-14 17:28:22.634827,826,277,916, 26,2014-01-12 04:41:37.437107,416,124,436, 26,2014-01-13 14:00:39.899086,844,420,950, 94,2014-01-14 03:57:46.123253,79,935,319, 94,2014-01-16 18:58:50.074117,715,942,82, 79,2014-01-20 09:46:58.387462,777,91,348, 94,2014-01-11 13:51:46.316105,774,634,710, 33,2014-01-13 21:55:31.114914,102,678,746, 26,2014-01-12 15:39:27.154457,103,102,593, 87,2014-01-19 04:48:50.710483,200,149,820, 87,2014-01-21 04:30:46.283565,413,379,520, 26,2014-01-14 16:29:22.475997,999,922,426, 94,2014-01-12 05:44:57.257241,364,133,675, 79,2014-01-13 07:18:53.41816,606,212,87, 87,2014-01-18 14:59:46.020786,255,751,678, 87,2014-01-10 20:00:45.120623,24,916,488, 33,2014-01-19 03:04:27.974554,722,95,195, 24,2014-01-15 15:34:25.992415,438,90,339, 79,2014-01-12 20:51:37.672121,324,336,181, 87,2014-01-13 22:18:25.899584,483,938,984, 33,2014-01-21 04:23:35.623056,98,760,149, 24,2014-01-20 23:08:03.494639,34,320,899, 26,2014-01-15 10:04:17.395204,260,455,788, 87,2014-01-20 19:02:00.796012,766,949,184, 33,2014-01-19 03:05:42.83836,343,949,229, 94,2014-01-16 23:59:53.896878,358,219,483, 24,2014-01-12 16:12:55.284952,229,941,210, 24,2014-01-16 07:32:45.363698,444,697,866, 24,2014-01-16 00:56:13.422489,30,815,326, 24,2014-01-13 11:38:26.244072,423,30,991, 26,2014-01-14 12:13:01.083532,7,869,323, 87,2014-01-19 22:24:36.057316,276,474,193, 94,2014-01-14 20:20:03.68118,389,139,340, 94,2014-01-17 06:13:40.982619,642,618,7, 79,2014-01-12 20:22:22.18057,462,406,771, 94,2014-01-20 19:18:53.755716,695,195,553, 33,2014-01-16 10:00:39.370081,335,501,566, 94,2014-01-13 14:14:08.496675,724,875,667, 24,2014-01-13 11:14:24.870629,137,221,644, 87,2014-01-18 05:40:06.524539,449,594,248, 94,2014-01-11 13:02:59.207158,633,560,125, 33,2014-01-13 09:45:11.154194,46,429,99, 33,2014-01-11 16:09:29.000534,24,14,498, 26,2014-01-12 22:47:56.464399,460,422,672, 79,2014-01-15 21:04:04.572354,699,22,285, 33,2014-01-19 11:09:15.021321,735,687,235, 33,2014-01-12 22:48:41.585441,118,170,328, 94,2014-01-13 18:08:32.546908,915,451,105, 33,2014-01-13 20:43:41.013735,31,364,562, 33,2014-01-14 23:40:19.257561,672,360,389, 26,2014-01-16 20:26:58.446492,903,853,276, 79,2014-01-13 19:07:16.637211,88,32,802, 26,2014-01-14 16:48:22.752199,514,875,353, 33,2014-01-11 00:31:15.841696,10,384,349, 33,2014-01-13 08:09:17.433223,909,281,20, 94,2014-01-12 13:54:05.590559,676,957,742, 94,2014-01-17 04:31:09.738574,891,810,392, 24,2014-01-15 04:22:12.718175,784,349,359, 79,2014-01-18 01:26:50.954677,585,628,273, 33,2014-01-11 23:27:23.161064,681,404,636, 24,2014-01-17 20:00:38.962667,700,889,424, 24,2014-01-11 07:39:52.038209,575,255,90, 94,2014-01-21 01:51:59.218379,358,509,711, 26,2014-01-11 10:20:42.643847,24,505,498, 26,2014-01-17 17:53:33.020828,661,823,704, 94,2014-01-12 16:14:21.398949,956,668,200, 24,2014-01-10 23:39:36.399563,323,794,377, 26,2014-01-12 21:54:12.390908,981,939,187, 87,2014-01-15 10:28:29.896043,655,648,480, 94,2014-01-13 14:54:01.270192,499,518,169, 94,2014-01-20 07:34:18.915447,894,296,443, 33,2014-01-16 03:31:29.103201,122,736,338, 33,2014-01-16 04:39:12.424385,871,340,769, 33,2014-01-21 03:43:47.9164,408,881,382, 33,2014-01-18 06:19:25.568019,743,371,786, 26,2014-01-21 05:43:16.99674,957,691,660, 94,2014-01-19 08:53:02.937721,424,282,130, 33,2014-01-20 09:08:07.153459,793,989,765, 87,2014-01-13 17:51:49.543648,299,802,360, 33,2014-01-11 23:36:43.951876,542,807,786, 87,2014-01-14 02:48:26.411021,425,440,29, 79,2014-01-19 18:18:47.99014,611,121,929, 87,2014-01-14 22:44:00.589087,374,502,326, 79,2014-01-17 23:36:49.16364,728,479,747, 79,2014-01-19 22:50:03.831836,852,264,442, 24,2014-01-17 10:53:18.02231,712,71,121, 94,2014-01-19 17:30:54.754199,198,756,261, 94,2014-01-15 21:21:13.57041,646,534,696, 26,2014-01-11 09:15:30.740485,866,62,419, 26,2014-01-16 12:57:45.708876,50,785,440, 26,2014-01-17 00:48:36.731474,153,79,711, 33,2014-01-18 09:16:09.703152,732,681,156, 33,2014-01-17 00:37:37.747084,159,625,86, 24,2014-01-16 20:40:35.949853,640,122,689, 94,2014-01-18 23:36:52.347418,311,990,131, 87,2014-01-13 12:31:10.767912,357,2,710, 26,2014-01-18 16:54:57.349221,664,829,742, 33,2014-01-19 03:16:28.74698,669,486,305, 33,2014-01-15 14:25:23.158821,821,240,833, 24,2014-01-12 21:23:27.245264,750,624,246, 24,2014-01-11 12:10:30.017592,328,101,644, 33,2014-01-14 15:59:42.074686,735,966,248, 94,2014-01-18 04:54:56.348466,400,349,131, 94,2014-01-16 20:49:42.441977,738,527,244, 33,2014-01-14 13:43:29.991087,686,558,491, 79,2014-01-15 05:14:21.916484,222,44,706, 94,2014-01-16 20:32:59.439136,14,559,676, 33,2014-01-12 16:36:32.928808,922,15,347, 24,2014-01-14 08:22:29.069944,315,323,446, 33,2014-01-19 18:24:48.982784,553,176,89, 94,2014-01-13 20:13:16.880684,924,909,327, 87,2014-01-17 15:10:55.481384,744,440,143, 26,2014-01-18 06:43:36.973343,443,153,279, 79,2014-01-17 22:57:17.469771,874,399,202, 87,2014-01-14 08:47:44.645023,150,489,182, 79,2014-01-16 23:33:40.805179,205,953,965, 94,2014-01-14 03:50:35.492081,911,338,351, 79,2014-01-12 20:18:39.399222,55,292,676, 26,2014-01-11 14:54:54.376009,959,883,437, 79,2014-01-14 17:06:06.232986,107,306,577, 79,2014-01-18 13:16:25.108517,892,447,577, 94,2014-01-17 19:43:31.107482,573,362,230, 33,2014-01-11 20:22:15.936139,946,562,48, 94,2014-01-14 07:54:02.855602,962,814,799, 24,2014-01-13 10:24:07.057755,970,700,649, 94,2014-01-19 23:59:08.283557,467,276,987, 87,2014-01-17 00:25:13.623933,913,267,509, 33,2014-01-10 21:19:04.406976,414,253,485, 94,2014-01-17 21:15:37.030956,788,713,291, 33,2014-01-11 08:50:36.782754,536,180,421, 87,2014-01-12 22:42:31.65224,656,549,197, 24,2014-01-18 13:26:07.048548,452,277,265, 26,2014-01-15 04:50:18.857441,850,406,436, 87,2014-01-20 07:37:28.000705,733,911,427, 79,2014-01-14 04:15:49.490525,221,98,307, 87,2014-01-18 22:33:48.848527,149,586,322, 94,2014-01-14 06:51:49.917608,284,989,487, 87,2014-01-20 04:48:48.92966,849,268,706, 24,2014-01-20 19:10:21.777754,135,973,309, 94,2014-01-17 19:14:18.987552,491,527,99, 87,2014-01-18 17:13:37.912864,552,554,517, 94,2014-01-13 09:23:38.658439,658,32,519, 79,2014-01-14 04:25:14.468936,442,900,550, 87,2014-01-15 17:57:14.886207,425,6,61, 87,2014-01-20 12:20:56.128629,29,338,957, 33,2014-01-17 17:12:59.113959,631,98,151, 24,2014-01-11 11:30:55.691386,168,138,584, 24,2014-01-13 10:11:31.62071,389,790,656, 24,2014-01-19 17:31:38.513601,503,109,226, 24,2014-01-12 06:25:50.067396,718,575,774, 33,2014-01-17 07:17:37.853697,449,339,54, 79,2014-01-17 00:48:03.622118,584,922,368, 87,2014-01-19 06:09:21.175297,618,849,191, 79,2014-01-18 07:39:53.790255,254,805,197, 26,2014-01-20 12:42:06.47772,934,40,730, 87,2014-01-11 10:33:28.233052,828,88,591, 87,2014-01-17 01:39:02.073811,542,930,279, 79,2014-01-16 07:07:20.101653,480,709,189, 87,2014-01-11 11:52:32.640028,752,282,236, 94,2014-01-13 16:54:39.104768,64,941,523, 24,2014-01-16 19:57:56.884826,487,617,360, 26,2014-01-13 14:35:04.292268,820,838,721, 24,2014-01-11 00:20:46.153316,919,308,751, 24,2014-01-21 04:48:15.742267,646,764,452, 79,2014-01-12 16:12:32.292973,453,500,24, 79,2014-01-14 08:36:35.644259,990,259,824, 26,2014-01-18 21:22:04.591213,129,790,145, 26,2014-01-16 03:04:22.21058,80,964,399, 87,2014-01-13 07:25:24.573921,681,93,106, 87,2014-01-18 10:32:26.368968,76,100,912, 87,2014-01-12 16:18:41.198552,376,806,514, 79,2014-01-21 04:39:02.486784,124,669,596, 87,2014-01-20 23:56:05.027406,909,949,880, 26,2014-01-16 00:43:55.667489,338,509,653, 94,2014-01-15 16:36:17.372992,494,202,985, 87,2014-01-20 06:17:01.156035,83,153,516, 24,2014-01-12 11:56:54.781867,91,680,892, 33,2014-01-16 08:07:13.064797,954,490,813, 94,2014-01-12 10:28:32.777165,969,21,614, 24,2014-01-10 23:28:33.295468,812,975,690, 33,2014-01-17 18:33:03.132193,954,553,435, 79,2014-01-18 21:46:10.630861,145,808,161, 24,2014-01-17 04:16:36.917586,193,303,587, 79,2014-01-15 18:42:24.30749,200,596,42, 24,2014-01-15 23:26:04.421117,294,142,54, 33,2014-01-16 10:58:43.395306,607,656,683, 79,2014-01-16 09:15:52.540542,159,888,829, 33,2014-01-11 19:05:06.495347,782,213,56, 87,2014-01-11 12:06:03.497378,379,347,304, 87,2014-01-17 01:08:25.18057,385,64,126, 24,2014-01-14 15:59:45.600115,40,966,211, 24,2014-01-17 12:04:00.382204,755,14,354, 94,2014-01-19 19:43:29.472837,741,882,861, 26,2014-01-14 20:20:31.75385,135,549,273, 79,2014-01-17 10:52:16.12447,830,810,713, 24,2014-01-11 05:56:01.766229,683,334,925, 33,2014-01-18 08:57:07.398109,553,227,393, 94,2014-01-15 02:14:20.715682,922,575,111, 26,2014-01-16 13:00:23.976809,501,970,687, 26,2014-01-20 20:22:31.97203,870,926,275, 87,2014-01-12 06:46:47.08465,930,662,845, 87,2014-01-18 09:19:05.175361,564,581,528, 24,2014-01-20 19:01:34.459234,298,622,300, 33,2014-01-12 00:42:52.112475,909,155,812, 87,2014-01-13 04:03:00.843269,480,955,150, 87,2014-01-15 05:37:51.832226,580,708,146, 94,2014-01-11 00:59:53.26851,367,181,149, 94,2014-01-14 19:59:55.625136,537,420,428, 94,2014-01-20 17:45:04.897023,487,278,281, 33,2014-01-12 15:28:26.045675,972,258,460, 24,2014-01-14 23:28:28.920604,709,52,918, 24,2014-01-17 06:18:08.029634,967,769,902, 26,2014-01-20 17:14:36.676955,121,792,870, 24,2014-01-10 21:45:05.83819,90,835,137, 94,2014-01-11 19:00:32.337124,238,128,358, 79,2014-01-15 10:40:41.098072,306,15,481, 24,2014-01-16 12:43:49.233496,508,92,487, 24,2014-01-17 08:16:24.878085,925,742,923, 24,2014-01-16 09:45:47.593419,65,927,995, 79,2014-01-17 04:49:52.730874,694,354,490, 26,2014-01-13 03:24:50.058655,633,55,40, 24,2014-01-20 05:45:33.193952,823,914,248, 79,2014-01-13 10:53:53.113078,648,813,896, 79,2014-01-11 17:08:19.531911,476,461,157, 87,2014-01-13 20:06:04.947802,773,325,710, 24,2014-01-20 01:46:09.237967,763,4,139, 79,2014-01-12 03:04:21.298141,635,319,519, 94,2014-01-10 23:03:12.345912,947,776,170, 33,2014-01-13 22:00:29.95365,273,390,364, 26,2014-01-17 20:04:45.27495,737,413,38, 26,2014-01-20 23:25:44.317943,503,957,693, 87,2014-01-15 08:47:17.038719,176,303,137, 87,2014-01-14 23:23:50.45073,897,949,582, 94,2014-01-20 12:27:18.777177,55,654,387, 33,2014-01-16 13:30:09.151194,70,194,109, 26,2014-01-17 07:26:51.293999,710,439,107, 26,2014-01-14 12:05:10.609822,873,432,384, 94,2014-01-16 18:30:02.419704,568,902,849, 24,2014-01-10 21:26:46.919135,958,435,445, 33,2014-01-13 23:50:15.506845,732,748,271, 79,2014-01-18 13:58:28.465799,520,718,4, 79,2014-01-15 00:55:15.83974,11,389,485, 24,2014-01-20 10:08:23.536479,435,56,57, 33,2014-01-18 01:13:05.142754,765,460,262, 26,2014-01-15 02:40:21.677929,912,47,592, 33,2014-01-10 23:08:55.874023,765,29,849, 79,2014-01-12 05:53:46.240826,940,443,858, 87,2014-01-20 19:24:10.911425,228,117,684, 24,2014-01-17 11:25:20.752108,499,282,986, 87,2014-01-17 19:39:33.834664,904,23,86, 26,2014-01-16 18:14:03.642719,892,941,707, 79,2014-01-19 18:50:10.810763,977,31,397, 79,2014-01-16 19:25:07.028616,119,904,282, 33,2014-01-19 09:07:56.755796,784,305,734, 26,2014-01-20 15:58:30.342674,602,197,935, 26,2014-01-19 19:31:11.976419,603,854,283, 79,2014-01-18 04:54:05.993763,336,331,556, 26,2014-01-11 13:02:51.640815,349,734,655, 26,2014-01-19 22:34:24.322331,476,433,374, 26,2014-01-10 20:54:35.947831,625,225,904, 87,2014-01-18 13:07:36.916183,447,758,281, 24,2014-01-19 16:00:08.640274,833,50,494, 94,2014-01-15 09:41:52.98655,621,132,242, 33,2014-01-12 06:31:27.366914,73,726,160, 94,2014-01-18 22:27:27.41787,376,786,962, 94,2014-01-21 03:12:02.137745,201,664,578, 26,2014-01-18 17:58:18.660914,755,435,490, 24,2014-01-12 04:32:38.027692,377,72,220, 33,2014-01-16 15:42:04.557449,453,790,274, 24,2014-01-18 19:25:05.580049,309,943,687, 33,2014-01-15 08:22:53.534536,277,353,290, 79,2014-01-13 23:40:33.023248,267,962,19, 87,2014-01-12 14:20:21.419789,555,807,990, 24,2014-01-14 12:31:17.071435,271,6,38, 94,2014-01-21 04:53:38.166002,333,653,244, 26,2014-01-16 21:00:43.097718,678,677,616, 26,2014-01-14 15:40:12.945458,507,285,616, 79,2014-01-12 04:47:24.407247,345,262,746, 87,2014-01-16 10:24:54.009143,461,121,639, 24,2014-01-10 21:05:33.697567,801,775,26, 87,2014-01-19 04:26:58.241909,536,171,790, 26,2014-01-11 22:38:57.651861,680,642,826, 24,2014-01-19 19:55:44.508329,844,256,83, 79,2014-01-14 17:52:05.270526,395,454,819, 26,2014-01-20 11:46:54.408076,587,127,651, 79,2014-01-19 05:54:14.851423,581,126,5, 26,2014-01-13 07:23:17.246944,662,772,516, 94,2014-01-17 10:41:00.40184,193,643,353, 87,2014-01-19 22:57:06.492237,113,492,762, 24,2014-01-11 23:57:41.569275,49,803,68, 79,2014-01-17 11:35:36.349671,477,991,977, 26,2014-01-17 06:04:43.408421,396,760,892, 94,2014-01-20 19:57:50.209549,593,864,253, 87,2014-01-11 15:17:29.336222,828,391,579, 26,2014-01-18 16:36:10.775334,62,645,201, 26,2014-01-18 12:25:17.627419,565,597,65, 26,2014-01-11 12:29:31.473967,445,35,408, 33,2014-01-16 04:34:29.436247,497,241,601, 26,2014-01-19 20:57:55.65511,369,822,872, 24,2014-01-17 08:11:36.031834,244,220,647, 33,2014-01-13 17:59:35.016296,415,918,811, 87,2014-01-13 23:20:49.190066,464,751,206, 94,2014-01-20 11:52:09.055082,805,899,621, 33,2014-01-15 12:19:56.436084,331,760,843, 79,2014-01-17 15:52:06.261501,801,301,890, 26,2014-01-20 10:45:47.221503,932,590,966, 87,2014-01-11 03:20:39.533802,409,39,485, 94,2014-01-11 01:32:19.206959,389,113,791, 26,2014-01-11 09:33:11.62875,675,639,814, 79,2014-01-16 17:45:33.543364,230,706,835, 79,2014-01-11 02:37:52.904526,961,979,217, 26,2014-01-19 18:00:09.87066,204,41,834, 24,2014-01-17 20:24:31.195225,568,10,485, 33,2014-01-20 02:33:37.412855,830,43,432, 87,2014-01-13 05:52:15.141185,147,25,971, 79,2014-01-17 02:11:25.603302,726,109,69, 33,2014-01-18 02:27:52.264278,924,660,234, 87,2014-01-15 17:15:32.38813,30,799,912, 94,2014-01-13 06:52:26.00556,640,210,169, 87,2014-01-16 19:24:58.756934,584,290,357, 94,2014-01-16 21:13:13.957405,640,548,539, 33,2014-01-19 22:28:02.355231,665,817,715, 26,2014-01-12 19:29:42.165355,992,116,817, 94,2014-01-16 11:11:04.166953,962,624,978, 26,2014-01-20 17:45:31.691872,832,662,552, 87,2014-01-20 16:05:52.941108,658,850,789, 87,2014-01-13 17:36:21.794791,98,589,132, 79,2014-01-11 00:15:03.165839,290,70,278, 33,2014-01-15 14:40:22.377356,471,272,754, 94,2014-01-12 08:34:17.449901,989,132,74, 26,2014-01-17 12:26:39.197673,375,648,12, 94,2014-01-18 12:39:57.394071,435,498,144, 26,2014-01-15 11:55:06.639967,83,377,625, 94,2014-01-16 18:18:48.253174,913,958,841, 94,2014-01-12 18:59:53.830155,782,401,763, 87,2014-01-11 21:47:12.901469,357,681,632, 94,2014-01-15 23:04:35.474678,769,529,110, 24,2014-01-13 02:20:33.364376,60,353,394, 87,2014-01-12 03:19:32.108428,682,102,873, 33,2014-01-16 12:37:47.103428,942,805,442, 26,2014-01-19 00:06:06.90774,64,990,739, 87,2014-01-12 09:57:25.012954,697,413,469, 26,2014-01-15 00:37:56.974088,881,730,717, 94,2014-01-15 14:30:38.102965,498,58,1, 79,2014-01-11 06:31:02.426229,614,178,107, 24,2014-01-17 10:30:12.115273,543,656,733, 79,2014-01-11 10:42:03.706686,302,87,672, 79,2014-01-18 12:58:54.690507,742,637,129, 33,2014-01-11 21:45:44.503403,496,948,196, 26,2014-01-13 21:34:29.712247,190,168,184, 94,2014-01-14 02:23:53.447442,343,637,83, 26,2014-01-17 22:58:58.460808,735,74,410, 33,2014-01-12 14:02:32.067478,821,187,588, 87,2014-01-16 01:53:35.613217,75,371,758, 87,2014-01-13 04:10:02.627761,908,480,214, 24,2014-01-12 01:48:03.75935,682,484,544, 24,2014-01-15 11:59:28.554325,461,967,430, 24,2014-01-16 01:46:24.422552,474,413,225, 33,2014-01-12 06:03:06.925607,893,672,569, 26,2014-01-20 06:39:50.93168,874,523,399, 87,2014-01-17 14:20:41.872873,495,547,292, 87,2014-01-18 22:29:46.12328,340,340,737, 79,2014-01-17 13:19:48.325751,369,516,382, 79,2014-01-11 20:15:48.51284,434,128,331, 87,2014-01-14 10:48:34.376454,434,783,601, 79,2014-01-19 12:19:42.156325,266,691,989, 24,2014-01-12 22:03:01.414727,897,387,600, 24,2014-01-19 13:53:09.851131,220,478,890, 33,2014-01-11 08:40:15.520701,695,397,568, 79,2014-01-14 05:22:33.523155,605,465,641, 24,2014-01-14 20:30:56.954559,774,508,29, 87,2014-01-19 12:46:22.428441,58,778,122, 94,2014-01-15 19:19:58.536529,829,323,449, 79,2014-01-19 01:08:53.928647,300,756,335, 33,2014-01-13 21:17:00.531405,731,127,177, 94,2014-01-16 05:51:00.962757,31,574,666, 33,2014-01-15 05:39:06.04392,65,468,387, 79,2014-01-14 11:59:04.238092,903,148,869, 94,2014-01-13 12:49:55.653265,590,610,408, 94,2014-01-16 07:24:50.547323,783,285,335, 87,2014-01-17 13:33:33.950339,97,604,678, 33,2014-01-16 19:13:49.101126,283,352,624, 24,2014-01-13 00:23:49.008131,562,594,492, 79,2014-01-19 07:36:06.018235,408,363,394, 26,2014-01-11 15:07:24.714342,86,286,738, 94,2014-01-15 08:33:51.636311,833,228,642, 24,2014-01-20 13:24:09.777585,907,654,967, 79,2014-01-16 07:06:53.268667,957,758,483, 87,2014-01-20 14:20:16.058864,638,718,448, 79,2014-01-11 13:27:16.703193,445,157,906, 26,2014-01-15 07:46:44.200347,211,137,71, 26,2014-01-16 22:40:57.931736,408,680,583, 87,2014-01-19 15:57:02.826892,879,535,306, 26,2014-01-11 15:06:32.526517,441,81,393, 24,2014-01-17 22:56:46.444995,662,273,492, 79,2014-01-12 20:45:37.203346,244,99,315, 24,2014-01-20 07:26:14.682843,748,746,872, 24,2014-01-20 00:59:47.859722,888,557,810, 94,2014-01-11 04:38:47.054896,924,361,610, 87,2014-01-20 20:06:30.203543,808,293,624, 79,2014-01-13 00:22:21.382877,335,551,673, 87,2014-01-15 05:09:44.009455,773,971,678, 79,2014-01-19 02:52:52.631983,469,212,472, 79,2014-01-17 23:42:19.919406,512,665,108, 94,2014-01-13 00:18:37.938522,330,643,803, 79,2014-01-11 18:09:53.163389,862,961,276, 26,2014-01-12 23:33:20.882164,293,378,44, 79,2014-01-17 09:57:43.982442,94,701,65, 26,2014-01-15 10:08:57.4019,334,872,412, 87,2014-01-15 16:23:16.535847,63,759,314, 26,2014-01-12 11:22:34.530184,723,888,490, 79,2014-01-11 17:42:31.352238,766,724,40, 79,2014-01-11 05:37:05.636973,804,920,584, 26,2014-01-14 15:46:23.538315,450,400,834, 87,2014-01-20 05:18:37.370473,172,876,296, 24,2014-01-12 00:44:30.351315,370,145,43, 94,2014-01-19 04:20:15.174626,135,918,358, 24,2014-01-19 12:42:47.148477,520,478,93, 26,2014-01-17 11:51:23.61998,763,11,492, 87,2014-01-18 12:40:31.233909,863,362,189, 26,2014-01-20 06:10:03.85167,46,341,291, 24,2014-01-11 13:38:07.820327,639,199,291, 33,2014-01-14 05:21:29.165646,424,343,722, 24,2014-01-18 16:07:06.678561,677,24,447, 33,2014-01-12 08:44:40.346845,586,596,984, 87,2014-01-10 22:18:15.610641,640,564,6, 24,2014-01-20 16:52:43.882326,796,406,210, 87,2014-01-11 10:10:55.029686,892,241,411, 33,2014-01-20 03:18:03.470362,219,665,525, 26,2014-01-21 01:31:30.937222,271,348,977, 26,2014-01-11 00:17:25.233229,713,584,463, 33,2014-01-11 21:40:24.853658,538,801,724, 79,2014-01-15 00:41:14.946678,165,280,956, 33,2014-01-19 07:10:17.865213,856,724,42, 94,2014-01-19 01:22:44.773065,961,165,905, 87,2014-01-17 04:59:52.885199,679,163,33, 24,2014-01-20 05:20:11.029021,575,349,78, 26,2014-01-21 04:56:05.655647,209,819,165, 33,2014-01-13 08:57:36.86806,156,836,540, 94,2014-01-14 09:29:08.430921,210,451,332, 33,2014-01-15 15:19:22.191494,852,526,854, 94,2014-01-15 00:20:11.398244,296,67,276, 79,2014-01-15 07:11:39.783158,782,383,376, 33,2014-01-16 00:56:27.828466,240,447,845, 94,2014-01-18 20:06:34.936559,608,26,846, 94,2014-01-14 06:30:17.154051,396,931,404, 24,2014-01-17 05:40:58.17978,283,924,835, 79,2014-01-16 18:26:50.111604,205,936,598, 26,2014-01-12 13:13:04.302528,108,21,907, 24,2014-01-13 11:32:21.79976,900,835,929, 26,2014-01-14 01:07:21.345514,880,952,982, 87,2014-01-11 13:23:08.154199,965,793,270, 33,2014-01-14 05:10:29.620088,668,895,734, 24,2014-01-17 10:28:50.511159,245,895,249, 33,2014-01-19 09:30:14.833179,313,214,11, 79,2014-01-15 17:55:09.967351,636,34,358, 94,2014-01-17 12:47:06.122219,231,940,531, 33,2014-01-18 20:22:58.715505,655,301,568, 87,2014-01-16 08:06:04.997038,587,591,988, 79,2014-01-16 10:05:09.592581,77,551,797, 79,2014-01-18 15:54:29.652727,207,675,268, 87,2014-01-16 12:23:30.230686,241,11,755, 87,2014-01-17 11:45:34.44624,206,891,799, 24,2014-01-12 10:35:44.599405,358,989,69, 33,2014-01-14 13:33:48.0959,477,891,97, 24,2014-01-15 07:08:19.219722,431,198,444, 26,2014-01-18 19:35:37.485023,932,890,752, 87,2014-01-13 12:53:59.124921,568,52,137, 94,2014-01-15 06:04:24.875369,295,258,603, 26,2014-01-10 22:33:14.353083,341,410,682, 24,2014-01-17 02:23:07.555842,835,963,235, 94,2014-01-20 01:23:47.066862,38,900,443, 94,2014-01-15 02:53:25.751327,724,708,407, 26,2014-01-11 03:34:47.339419,989,80,362, 24,2014-01-14 20:20:14.895328,674,312,121, 24,2014-01-12 17:00:00.687886,54,702,980, 87,2014-01-14 14:05:04.49347,993,94,487, 94,2014-01-10 20:01:13.075108,729,395,566, 24,2014-01-18 15:26:50.799491,313,97,796, 33,2014-01-16 07:18:08.795999,154,119,88, 79,2014-01-13 11:33:34.874869,614,305,228, 24,2014-01-11 10:34:12.145004,138,52,471, 94,2014-01-17 00:41:16.950197,657,427,681, 94,2014-01-16 20:44:04.494956,727,785,725, 87,2014-01-18 01:03:02.656583,851,340,812, 26,2014-01-15 04:11:31.783376,582,86,275, 79,2014-01-11 08:39:14.462308,903,518,654, 24,2014-01-14 07:50:08.778802,329,260,191, 79,2014-01-12 18:34:30.498881,397,815,625, 87,2014-01-16 20:45:19.459765,754,595,72, 26,2014-01-19 21:55:18.371382,824,321,425, 79,2014-01-20 14:29:00.151608,788,14,678, 33,2014-01-12 03:08:49.690451,424,867,257, 33,2014-01-16 03:40:52.818041,310,257,713, 94,2014-01-11 19:04:44.751432,532,433,246, 26,2014-01-15 20:42:37.786351,1,277,51, 79,2014-01-20 14:49:12.037764,291,729,117, 33,2014-01-19 18:40:22.236456,525,585,33, 94,2014-01-18 13:36:36.911691,914,511,625, 87,2014-01-14 14:53:36.913132,53,112,14, 26,2014-01-19 21:13:36.58954,435,0,96, 94,2014-01-14 09:59:44.467532,285,57,459, 79,2014-01-13 10:17:23.979994,825,6,419, 33,2014-01-13 18:07:02.340867,828,919,149, 33,2014-01-14 17:34:31.806951,925,274,434, 94,2014-01-17 10:37:38.875322,647,83,850, 24,2014-01-15 15:07:03.028752,72,951,288, 79,2014-01-18 11:39:36.300422,763,355,484, 26,2014-01-17 10:38:51.95043,118,73,953, 79,2014-01-13 00:33:53.828661,568,64,748, 79,2014-01-13 12:57:45.09642,22,764,559, 26,2014-01-20 02:12:26.825299,304,199,319, 26,2014-01-13 15:08:05.973667,378,419,940, 26,2014-01-19 17:39:02.047036,93,63,787, 87,2014-01-15 16:56:31.320256,789,221,533, 26,2014-01-20 20:11:08.63025,5,411,860, 94,2014-01-13 15:50:33.830413,366,815,411, 26,2014-01-16 05:35:45.782983,658,602,404, 94,2014-01-13 22:01:17.409052,231,427,168, 87,2014-01-15 14:25:04.329714,52,947,41, 24,2014-01-11 20:21:05.242748,962,128,188, 79,2014-01-12 13:56:35.780853,659,361,861, 79,2014-01-14 22:54:04.481322,484,324,882, 79,2014-01-13 03:29:54.9332,85,728,389, 87,2014-01-17 21:37:28.598895,665,934,579, 24,2014-01-15 21:58:49.232755,580,711,444, 94,2014-01-18 04:12:32.719551,292,662,740, 87,2014-01-17 06:26:40.636657,863,723,362, 26,2014-01-14 10:39:11.46921,583,251,904, 26,2014-01-15 11:49:09.631242,226,718,941, 24,2014-01-21 01:20:17.54979,518,742,499, 87,2014-01-13 01:52:48.05875,920,41,368, 33,2014-01-19 01:48:54.099193,316,399,199, 24,2014-01-13 05:37:41.529784,440,567,720, 24,2014-01-15 23:59:50.399617,655,928,480, 24,2014-01-12 13:23:25.906144,206,645,513, 24,2014-01-19 20:15:20.405106,654,65,437, 33,2014-01-20 19:06:53.428788,376,242,227, 26,2014-01-20 05:03:02.206984,867,638,495, 24,2014-01-16 00:54:12.355536,787,187,12, 87,2014-01-12 13:40:47.257449,421,177,416, 94,2014-01-12 12:00:47.303404,71,836,460, 87,2014-01-14 21:06:39.180836,226,454,989, 94,2014-01-15 08:48:53.231116,198,216,496, 79,2014-01-10 23:39:49.350441,744,23,953, 33,2014-01-19 18:03:10.501093,251,780,547, 33,2014-01-14 23:00:01.861785,36,664,499, 87,2014-01-13 19:30:23.181273,723,97,314, 24,2014-01-14 17:38:56.284076,990,420,930, 87,2014-01-18 01:01:19.270836,696,828,137, 33,2014-01-18 13:55:27.510986,148,7,283, 79,2014-01-15 18:00:01.526824,848,285,16, 87,2014-01-19 18:57:55.051691,600,915,662, 94,2014-01-12 06:49:31.992308,56,863,21, 94,2014-01-18 01:29:56.460024,895,365,880, 24,2014-01-16 10:35:23.650584,830,864,997, 94,2014-01-17 08:48:21.225482,178,764,750, 26,2014-01-14 23:42:29.179994,101,632,624, 33,2014-01-12 11:02:04.287243,126,493,490, 33,2014-01-20 23:27:32.694694,143,235,462, 94,2014-01-19 15:31:38.811235,138,115,119, 94,2014-01-12 06:22:21.837032,698,658,948, 87,2014-01-12 19:20:20.753863,850,392,58, 33,2014-01-17 11:20:32.910428,431,426,863, 94,2014-01-14 16:00:03.366816,409,726,912, 79,2014-01-17 23:20:11.153479,567,252,560, 87,2014-01-19 04:43:58.816992,692,62,542, 26,2014-01-13 06:15:23.771922,614,855,354, 10,2014-01-17 12:27:04.582267,333,55,984, 92,2014-01-18 03:47:01.023976,781,625,607, 35,2014-01-18 11:09:36.127459,201,787,743, 92,2014-01-19 06:07:51.839716,235,8,497, 35,2014-01-19 19:47:48.327381,574,39,916, 10,2014-01-12 02:16:15.308296,169,356,495, 10,2014-01-13 08:56:45.071251,813,676,720, 92,2014-01-19 23:27:37.678242,731,963,326, 10,2014-01-21 00:19:25.809807,805,220,950, 35,2014-01-17 11:56:46.933036,365,184,374, 92,2014-01-12 12:58:00.859515,902,636,942, 35,2014-01-14 11:58:22.093882,370,562,266, 10,2014-01-14 06:58:06.203873,317,978,869, 35,2014-01-20 06:53:28.370501,355,792,797, 35,2014-01-19 09:58:23.621126,175,412,657, 35,2014-01-12 19:56:01.255982,801,960,210, 10,2014-01-11 07:43:00.362809,244,224,127, 92,2014-01-16 05:28:20.08115,490,903,39, 10,2014-01-18 10:31:24.906567,591,683,790, 35,2014-01-17 20:31:21.588292,693,340,882, 35,2014-01-20 09:10:49.261144,983,7,486, 10,2014-01-20 01:33:29.194228,685,481,459, 35,2014-01-17 13:58:54.283405,543,232,872, 35,2014-01-18 18:42:28.072798,525,234,236, 35,2014-01-11 01:55:51.03126,148,221,420, 92,2014-01-19 13:19:15.037268,886,36,49, 92,2014-01-15 00:03:00.983227,71,793,535, 92,2014-01-14 21:55:54.398496,289,458,466, 35,2014-01-16 06:39:26.190747,318,570,377, 35,2014-01-12 22:46:59.800219,324,428,953, 10,2014-01-17 08:11:18.170418,442,669,123, 35,2014-01-12 13:06:30.773432,204,570,146, 35,2014-01-20 06:34:00.824195,393,763,253, 10,2014-01-14 13:20:54.298296,806,174,976, 35,2014-01-20 23:14:22.613148,254,419,345, 92,2014-01-18 20:21:49.151995,467,679,738, 35,2014-01-15 19:37:09.606592,145,198,509, 35,2014-01-13 02:11:07.684399,612,154,938, 10,2014-01-17 13:49:26.830236,266,583,680, 35,2014-01-15 13:56:35.416398,27,347,665, 92,2014-01-19 18:07:54.617435,290,367,677, 92,2014-01-19 06:47:27.689751,968,682,606, 35,2014-01-19 05:54:57.5107,753,331,603, 92,2014-01-12 19:06:00.821727,465,936,726, 92,2014-01-18 07:40:56.060252,627,965,124, 10,2014-01-17 09:53:21.131826,135,159,150, 35,2014-01-14 19:02:02.077709,788,237,503, 35,2014-01-18 19:23:56.423061,101,883,583, 10,2014-01-12 09:21:41.212975,975,376,764, 35,2014-01-11 23:33:26.984276,708,437,461, 10,2014-01-15 09:55:18.011772,843,367,143, 35,2014-01-11 12:32:30.474538,353,713,755, 10,2014-01-21 05:06:56.178504,202,711,512, 35,2014-01-11 17:54:12.295177,356,641,796, 35,2014-01-19 11:14:58.547335,240,281,921, 92,2014-01-11 01:02:47.210184,457,699,744, 35,2014-01-20 11:13:27.332445,843,347,805, 92,2014-01-13 05:17:59.530562,360,832,945, 35,2014-01-15 02:58:41.60868,244,794,659, 92,2014-01-15 11:52:53.523191,981,695,283, 35,2014-01-15 08:04:59.3312,249,454,633, 10,2014-01-11 05:09:59.779098,51,903,355, 35,2014-01-17 04:59:24.296623,47,621,795, 10,2014-01-14 08:39:00.155395,279,201,445, 10,2014-01-14 22:30:54.077394,851,93,323, 10,2014-01-16 22:13:46.91019,575,513,696, 92,2014-01-11 23:00:49.307391,18,510,238, 92,2014-01-19 22:08:03.684404,214,99,4, 10,2014-01-19 04:24:54.594589,423,227,347, 92,2014-01-18 16:50:16.137627,323,256,878, 10,2014-01-14 06:04:39.100803,220,381,857, 35,2014-01-17 16:32:49.212443,324,141,986, 35,2014-01-16 17:37:43.827377,67,283,459, 92,2014-01-12 05:59:36.611503,381,470,260, 10,2014-01-19 15:38:50.03417,720,414,380, 10,2014-01-13 19:18:39.887629,201,655,299, 92,2014-01-18 19:52:57.743328,197,278,131, 10,2014-01-13 04:40:52.111879,596,652,83, 92,2014-01-11 08:42:36.311109,252,362,254, 10,2014-01-20 09:14:38.956723,226,677,754, 35,2014-01-14 08:14:19.096155,616,101,393, 10,2014-01-15 22:37:54.322881,608,220,918, 92,2014-01-21 01:47:09.43126,364,993,19, 35,2014-01-14 07:21:15.275078,406,797,549, 10,2014-01-16 20:32:06.618058,497,611,848, 10,2014-01-19 07:02:07.978596,792,466,782, 35,2014-01-14 12:24:02.485262,599,525,39, 10,2014-01-16 01:45:33.950503,651,710,648, 10,2014-01-11 06:20:07.509158,908,336,170, 35,2014-01-18 19:22:44.093943,430,514,840, 35,2014-01-20 17:38:27.473694,507,325,527, 92,2014-01-15 18:25:06.840358,796,330,718, 35,2014-01-19 04:32:43.87304,998,800,653, 10,2014-01-16 16:37:51.770736,341,51,638, 10,2014-01-19 07:04:06.996173,61,322,42, 10,2014-01-12 21:03:37.950853,182,533,87, 35,2014-01-12 08:51:38.680926,422,183,568, 92,2014-01-20 10:04:56.303563,706,470,192, 10,2014-01-11 13:11:41.635257,285,585,359, 92,2014-01-20 17:16:33.275515,456,59,27, 35,2014-01-17 20:55:12.441608,670,352,168, 92,2014-01-14 23:16:20.73606,725,452,243, 92,2014-01-17 03:49:22.487958,81,157,6, 92,2014-01-13 08:32:56.268986,624,658,930, 35,2014-01-16 09:15:57.347982,6,887,585, 35,2014-01-15 13:28:12.522128,977,751,418, 10,2014-01-16 07:51:36.157034,720,798,61, 35,2014-01-13 23:08:55.09131,571,175,278, 10,2014-01-17 22:09:04.634007,264,476,699, 92,2014-01-16 20:34:12.468144,918,932,200, 35,2014-01-13 02:23:34.048033,515,334,921, 92,2014-01-11 00:23:23.730581,824,2,212, 35,2014-01-11 13:12:06.791025,352,702,684, 35,2014-01-12 22:10:43.479293,563,886,570, 92,2014-01-14 11:44:39.005659,749,692,921, 35,2014-01-17 13:44:13.409084,411,421,488, 35,2014-01-10 23:12:51.457889,441,374,701, 92,2014-01-18 04:08:41.490922,325,827,960, 35,2014-01-12 09:29:47.359587,312,907,643, 35,2014-01-11 09:32:58.967467,848,11,157, 35,2014-01-15 17:31:25.584864,290,540,585, 10,2014-01-11 21:08:14.833281,515,275,563, 92,2014-01-16 07:58:05.807825,819,386,928, 35,2014-01-13 16:04:09.457904,746,521,607, 35,2014-01-17 17:46:06.604018,177,289,838, 35,2014-01-14 09:02:12.803997,471,546,388, 92,2014-01-15 17:07:47.408757,764,488,712, 92,2014-01-19 06:37:45.284944,376,540,89, 35,2014-01-13 13:07:09.107979,291,51,713, 35,2014-01-16 10:19:29.044015,652,181,57, 10,2014-01-18 17:54:18.560878,325,7,768, 92,2014-01-20 14:02:21.549588,123,477,366, 92,2014-01-20 13:35:49.780494,909,687,830, 10,2014-01-14 15:43:41.048835,153,890,129, 92,2014-01-12 16:35:17.818574,668,945,74, 92,2014-01-15 16:51:47.128476,614,446,867, 10,2014-01-19 09:11:53.571382,479,159,933, 10,2014-01-18 04:26:53.975608,864,579,350, 10,2014-01-18 20:00:42.219787,421,225,92, 35,2014-01-16 01:20:58.20539,415,377,643, 35,2014-01-13 19:01:06.443752,243,555,592, 35,2014-01-21 02:24:16.26824,830,951,284, 10,2014-01-16 05:44:21.935971,144,595,640, 92,2014-01-14 12:13:13.234777,588,870,309, 35,2014-01-12 18:34:59.747532,524,654,808, 92,2014-01-19 21:29:00.94163,237,422,769, 92,2014-01-21 05:57:26.643861,826,815,4, 92,2014-01-12 21:47:51.205841,369,813,527, 92,2014-01-16 19:37:42.432552,771,767,976, 92,2014-01-12 09:27:14.003449,579,663,510, 10,2014-01-13 11:20:50.173307,655,283,493, 10,2014-01-11 07:09:08.017416,518,942,568, 10,2014-01-13 10:35:28.837149,128,77,126, 10,2014-01-18 23:18:55.981131,654,693,542, 10,2014-01-14 03:13:17.475739,414,431,103, 35,2014-01-20 08:21:35.441167,987,694,121, 35,2014-01-12 02:21:08.785548,794,253,570, 35,2014-01-19 00:21:04.884496,0,63,902, 92,2014-01-18 08:59:20.726529,409,809,611, 10,2014-01-14 19:28:17.893526,413,389,753, 10,2014-01-14 04:40:33.928929,132,278,34, 92,2014-01-15 20:53:39.287406,29,360,693, 35,2014-01-14 03:30:39.443114,559,458,243, 35,2014-01-13 12:16:23.709423,989,448,988, 92,2014-01-19 16:37:20.336243,228,554,305, 35,2014-01-16 00:05:57.261688,577,84,285, 92,2014-01-18 09:08:10.8379,279,657,179, 92,2014-01-17 19:49:13.907625,627,188,172, 92,2014-01-12 22:32:51.237716,547,242,430, 92,2014-01-15 23:08:53.057686,47,551,242, 35,2014-01-12 15:10:12.113433,277,731,544, 35,2014-01-15 21:33:57.681468,79,975,305, 35,2014-01-15 19:33:09.325926,329,118,157, 92,2014-01-18 00:54:34.049404,881,507,440, 92,2014-01-19 13:47:10.916664,555,572,417, 92,2014-01-17 18:08:09.073878,748,374,539, 10,2014-01-16 16:23:34.991453,69,745,570, 35,2014-01-19 13:44:37.560525,85,989,210, 10,2014-01-19 19:56:00.279719,996,143,466, 35,2014-01-12 06:01:17.424005,602,956,53, 35,2014-01-21 03:11:51.564393,717,796,599, 92,2014-01-12 01:16:50.453025,996,993,590, 35,2014-01-12 17:10:25.44184,242,314,822, 35,2014-01-13 07:47:20.401542,212,833,192, 92,2014-01-20 04:35:46.434156,163,326,991, 92,2014-01-16 00:23:42.917579,180,269,588, 10,2014-01-12 10:08:55.842708,711,49,370, 10,2014-01-11 00:56:55.219704,768,55,554, 10,2014-01-13 18:44:47.802074,784,128,303, 35,2014-01-19 23:08:16.569237,552,429,893, 92,2014-01-15 00:25:13.11323,215,738,621, 10,2014-01-17 03:25:21.731004,732,264,123, 35,2014-01-14 14:01:55.856644,782,783,892, 35,2014-01-18 07:55:52.556345,362,169,126, 10,2014-01-19 19:41:45.440427,193,847,592, 92,2014-01-13 00:39:16.193305,255,375,963, 92,2014-01-13 02:01:49.818451,962,59,560, 10,2014-01-16 22:49:56.278327,245,600,595, 10,2014-01-20 00:28:30.10093,552,444,40, 35,2014-01-15 04:34:41.056167,676,490,348, 10,2014-01-11 15:58:49.336432,573,137,477, 10,2014-01-11 09:38:42.214364,80,957,282, 35,2014-01-20 06:08:38.737634,272,956,352, 10,2014-01-16 15:31:58.662358,160,871,922, 92,2014-01-18 14:33:16.264187,591,105,815, 92,2014-01-18 13:55:49.654298,343,840,681, 35,2014-01-13 03:40:07.736236,923,227,336, 35,2014-01-14 00:56:51.255641,639,999,960, 92,2014-01-16 21:40:27.215243,586,712,979, 92,2014-01-11 17:36:08.015954,489,484,907, 10,2014-01-15 10:58:08.679646,70,254,867, 10,2014-01-16 18:52:18.779635,300,142,377, 92,2014-01-12 22:52:58.46898,764,50,720, 92,2014-01-17 08:08:34.121485,443,845,430, 92,2014-01-19 06:39:39.181177,205,707,350, 10,2014-01-11 21:28:44.903555,933,277,551, 92,2014-01-12 02:32:17.039065,628,458,20, 35,2014-01-20 20:48:35.023885,363,29,292, 35,2014-01-12 02:25:40.123259,642,370,178, 35,2014-01-15 01:17:04.841558,135,737,356, 10,2014-01-19 13:56:51.593542,250,565,604, 35,2014-01-16 06:50:53.236909,695,640,22, 10,2014-01-10 22:42:26.572561,10,871,202, 10,2014-01-12 21:58:47.450185,945,417,11, 92,2014-01-13 08:46:45.793254,59,50,497, 10,2014-01-19 22:24:12.013407,945,229,188, 92,2014-01-15 02:38:03.643491,394,40,478, 35,2014-01-15 14:48:35.611705,324,469,172, 35,2014-01-15 15:14:08.291734,912,511,92, 35,2014-01-13 21:06:33.74484,523,950,349, 92,2014-01-19 23:23:16.667872,426,65,279, 10,2014-01-16 11:12:57.628167,302,776,97, 10,2014-01-14 10:45:15.959204,355,885,352, 35,2014-01-18 23:31:55.405925,56,350,456, 10,2014-01-11 20:44:56.290944,5,924,126, 35,2014-01-11 19:18:32.223392,887,700,10, 35,2014-01-16 07:27:45.060224,465,94,507, 35,2014-01-14 04:25:04.02718,20,971,587, 10,2014-01-15 00:15:23.479032,100,160,700, 10,2014-01-11 23:08:12.275467,145,594,204, 35,2014-01-15 02:01:12.043554,205,863,41, 92,2014-01-19 15:13:32.159097,87,239,287, 35,2014-01-17 22:00:31.055102,413,3,145, 35,2014-01-17 04:54:10.512534,34,318,644, 92,2014-01-15 17:22:06.280582,351,380,196, 10,2014-01-15 22:40:10.236279,962,645,517, 10,2014-01-18 06:22:55.41609,387,262,958, 10,2014-01-16 23:54:23.320066,636,61,816, 10,2014-01-15 13:28:45.260584,32,907,24, 92,2014-01-19 12:48:35.539769,724,161,572, 35,2014-01-21 05:11:28.161623,347,344,245, 92,2014-01-13 21:25:36.854126,378,428,837, 10,2014-01-14 13:39:28.776677,589,409,783, 92,2014-01-10 21:53:54.734184,314,724,275, 35,2014-01-15 23:24:24.304311,501,311,905, 10,2014-01-17 02:26:14.569931,998,333,46, 35,2014-01-20 00:18:06.747593,923,208,193, 92,2014-01-20 06:02:27.948222,717,647,297, 35,2014-01-11 11:14:50.182055,24,755,869, 35,2014-01-14 09:32:15.039746,287,732,605, 92,2014-01-12 21:09:01.693061,645,509,596, 35,2014-01-20 14:38:06.849927,978,459,122, 10,2014-01-20 00:45:12.667912,883,253,798, 10,2014-01-16 11:54:17.652266,514,212,92, 10,2014-01-18 08:10:02.255853,298,34,412, 35,2014-01-21 01:30:08.958856,471,250,413, 10,2014-01-17 11:12:49.876076,682,11,172, 92,2014-01-13 09:37:47.316496,957,438,71, 92,2014-01-13 23:55:12.986456,13,505,628, 35,2014-01-11 05:28:13.355108,615,218,433, 10,2014-01-14 12:45:59.591962,133,93,727, 35,2014-01-18 05:56:25.03001,199,244,957, 35,2014-01-20 00:41:45.514204,637,874,856, 92,2014-01-11 04:46:30.647483,967,261,566, 92,2014-01-14 04:50:35.542544,37,482,852, 92,2014-01-14 12:03:51.795205,833,547,168, 92,2014-01-16 07:26:40.883763,992,373,490, 10,2014-01-11 05:13:30.959053,794,114,571, 10,2014-01-20 15:58:15.115271,742,723,357, 35,2014-01-21 00:55:26.144347,468,153,160, 92,2014-01-19 22:02:06.498821,998,406,837, 92,2014-01-20 15:09:43.276894,63,265,162, 10,2014-01-13 16:21:02.998474,981,46,695, 10,2014-01-13 05:41:35.275499,360,256,977, 10,2014-01-20 17:03:38.011078,712,155,845, 92,2014-01-18 19:45:27.303204,291,821,195, 92,2014-01-19 12:07:49.84543,596,759,876, 35,2014-01-19 11:21:44.75909,825,91,743, 35,2014-01-17 19:47:55.251426,60,225,907, 92,2014-01-20 03:22:40.027485,619,563,781, 92,2014-01-12 14:53:59.798835,478,303,512, 92,2014-01-19 20:56:56.944489,675,932,839, 10,2014-01-19 12:00:46.877832,539,760,994, 92,2014-01-11 09:39:12.466748,937,392,404, 10,2014-01-15 02:51:14.596754,513,413,849, 92,2014-01-16 14:10:49.133684,864,226,941, 35,2014-01-11 05:09:21.426023,419,791,60, 92,2014-01-11 08:04:04.472831,109,223,367, 10,2014-01-19 03:48:36.45018,489,432,22, 92,2014-01-14 09:04:34.412479,444,832,783, 92,2014-01-11 17:32:17.828358,753,216,340, 10,2014-01-12 10:34:36.042561,915,396,39, 10,2014-01-11 09:00:59.44249,306,761,846, 92,2014-01-20 22:14:03.342562,410,235,954, 92,2014-01-12 19:21:06.690045,833,773,442, 10,2014-01-14 17:51:34.985033,828,824,693, 92,2014-01-14 04:17:55.137768,308,364,0, 35,2014-01-18 06:47:47.573807,252,971,641, 35,2014-01-15 03:05:05.944085,357,571,890, 10,2014-01-13 14:16:10.253039,902,666,172, 35,2014-01-18 01:43:13.718574,876,358,136, 10,2014-01-13 19:07:12.442907,495,201,675, 10,2014-01-12 23:25:53.529932,587,169,352, 10,2014-01-20 22:04:16.717048,433,495,333, 92,2014-01-16 04:48:47.718405,634,128,333, 92,2014-01-12 10:29:31.54143,69,522,311, 10,2014-01-18 11:49:44.020252,932,123,904, 35,2014-01-14 10:56:37.564254,148,431,306, 35,2014-01-21 01:51:16.300521,323,999,700, 35,2014-01-15 01:37:39.271679,583,121,360, 10,2014-01-13 08:19:17.59174,824,664,733, 48,2014-01-12 10:45:16.099356,279,638,539, 48,2014-01-13 16:34:36.216168,692,110,563, 48,2014-01-11 14:20:04.469572,294,909,709, 48,2014-01-13 00:24:28.566104,892,477,242, 48,2014-01-17 23:25:50.813341,465,846,135, 48,2014-01-17 08:30:53.603676,546,669,364, 48,2014-01-13 09:33:49.992127,720,763,939, 48,2014-01-18 11:29:55.286172,80,107,214, 48,2014-01-15 06:19:30.053856,2,621,657, 48,2014-01-16 22:38:24.404606,43,14,554, 48,2014-01-19 09:02:13.11453,904,851,314, 48,2014-01-16 20:54:06.096418,630,202,888, 48,2014-01-17 11:39:23.847096,658,329,692, 48,2014-01-19 01:16:16.457511,448,342,418, 48,2014-01-18 20:15:12.786463,302,291,66, 48,2014-01-10 23:30:58.832548,801,438,607, 48,2014-01-11 23:34:11.595279,243,820,87, 48,2014-01-15 21:03:00.36069,542,186,65, 48,2014-01-15 06:36:04.776634,311,847,819, 48,2014-01-14 17:50:21.848737,666,840,744, 48,2014-01-12 16:46:14.079264,804,50,485, 48,2014-01-18 05:43:17.21996,601,396,412, 48,2014-01-16 21:16:15.37867,478,279,159, 48,2014-01-12 08:50:30.796311,785,659,597, 48,2014-01-13 04:32:04.938366,757,840,380, 48,2014-01-18 11:45:46.9201,67,861,360, 48,2014-01-20 00:40:14.816983,519,62,988, 48,2014-01-16 19:28:42.50262,633,50,72, 48,2014-01-18 07:37:03.220621,141,138,78, 48,2014-01-13 20:17:54.088662,21,87,262, 48,2014-01-19 07:48:00.094778,279,275,627, 48,2014-01-19 22:22:19.319977,412,866,487, 48,2014-01-16 16:52:30.305249,894,151,736, 48,2014-01-20 02:08:04.56435,49,711,839, 48,2014-01-11 16:46:47.8865,952,9,439, 48,2014-01-13 10:18:21.11859,373,63,605, 48,2014-01-16 04:38:58.168026,20,491,927, 48,2014-01-14 06:20:37.878627,558,719,872, 48,2014-01-21 01:48:16.404761,765,522,991, 48,2014-01-20 14:58:28.222301,341,992,17, 48,2014-01-20 08:59:02.283653,95,573,422, 48,2014-01-19 04:50:29.51971,107,564,291, 48,2014-01-16 05:52:34.318719,981,564,638, 48,2014-01-16 14:38:26.130749,75,973,47, 48,2014-01-17 00:06:45.977222,243,580,21, 48,2014-01-13 20:07:47.1056,5,569,20, 48,2014-01-16 18:09:24.963297,620,955,893, 48,2014-01-18 03:40:57.572501,97,263,984, 48,2014-01-18 21:10:47.46629,414,964,981, 48,2014-01-21 04:45:29.74035,937,689,317, 48,2014-01-11 15:31:19.421239,44,81,962, 48,2014-01-20 17:57:01.545553,200,244,630, 48,2014-01-18 04:28:46.96031,1,649,509, 48,2014-01-17 16:47:34.800327,0,592,739, 48,2014-01-11 20:47:32.341865,786,779,622, 48,2014-01-20 13:00:51.898676,535,485,710, 48,2014-01-14 22:33:21.720427,143,235,860, 48,2014-01-21 01:27:47.158848,660,351,699, 48,2014-01-16 02:29:34.401715,128,246,409, 48,2014-01-12 00:10:24.941467,244,7,756, 48,2014-01-13 15:45:41.247929,633,993,555, 48,2014-01-14 04:17:34.496493,964,897,254, 48,2014-01-21 02:32:44.261444,399,755,845, 48,2014-01-19 12:38:11.553178,834,101,671, 48,2014-01-13 00:25:39.060844,779,842,611, 48,2014-01-11 13:19:32.147944,473,713,439, 48,2014-01-11 16:56:32.671768,818,483,915, 48,2014-01-18 09:04:37.229289,385,210,313, 48,2014-01-14 23:40:10.026991,541,471,856, 48,2014-01-11 12:44:49.076529,591,529,394, 48,2014-01-17 18:03:05.45159,541,482,660, 48,2014-01-14 02:39:12.310644,678,521,542, 48,2014-01-19 21:35:18.596241,704,759,467, 48,2014-01-12 17:55:39.770307,86,962,770, 48,2014-01-19 21:17:38.441393,326,282,378, 48,2014-01-15 15:42:04.573462,473,575,750, 48,2014-01-15 18:03:26.875908,890,72,331, 48,2014-01-15 09:27:03.405108,565,601,158, 48,2014-01-12 13:23:02.146382,626,724,96, 48,2014-01-13 09:14:14.342198,392,45,38, 48,2014-01-15 08:12:33.145459,300,284,481, 48,2014-01-13 08:54:21.567621,844,246,847, 48,2014-01-12 21:11:15.887751,636,874,28, 48,2014-01-12 06:41:20.105769,683,765,479, 48,2014-01-20 05:41:56.367948,217,903,879, 48,2014-01-13 21:58:48.230034,51,955,192, 48,2014-01-11 13:42:12.004445,148,627,831, 48,2014-01-13 22:15:18.088794,136,765,483, 48,2014-01-13 17:26:35.388882,152,973,71, 48,2014-01-16 20:11:46.406159,661,93,387, 48,2014-01-15 02:25:43.030261,250,674,791, 48,2014-01-16 13:12:16.636811,873,946,71, 48,2014-01-20 04:29:20.902653,396,990,183, 48,2014-01-14 22:58:27.291705,847,147,988, 48,2014-01-14 19:50:28.189989,664,458,795, 48,2014-01-11 22:54:59.963916,54,414,571, 48,2014-01-15 16:17:59.440068,3,187,270, 48,2014-01-15 16:47:00.861756,41,224,793, 48,2014-01-19 11:59:37.193203,794,100,859, 48,2014-01-19 19:58:09.467058,993,387,239, 48,2014-01-16 09:31:49.938705,223,589,97, 48,2014-01-16 00:02:42.644793,500,571,922, 48,2014-01-12 16:37:21.777702,379,111,57, 48,2014-01-15 01:07:08.534945,139,175,285, 48,2014-01-17 21:58:22.4151,736,378,936, 48,2014-01-11 07:55:00.219094,57,546,745, 5,2014-01-19 20:49:13.108826,737,811,477, 53,2014-01-12 10:01:49.291427,283,644,144, 5,2014-01-15 21:22:03.624203,430,462,691, 53,2014-01-11 04:12:15.255208,242,281,532, 5,2014-01-14 23:16:03.633625,972,834,63, 53,2014-01-20 09:34:36.769661,857,409,676, 5,2014-01-13 17:06:36.822829,467,299,44, 5,2014-01-17 00:27:19.521795,786,764,834, 53,2014-01-11 10:15:56.875431,712,34,806, 5,2014-01-12 16:48:33.191196,52,79,496, 5,2014-01-20 02:26:07.75183,971,558,456, 53,2014-01-12 03:58:08.880295,863,163,645, 5,2014-01-15 19:03:51.27999,225,471,449, 53,2014-01-12 13:52:43.140711,921,843,288, 53,2014-01-18 04:09:55.286454,357,991,927, 5,2014-01-20 01:29:34.310251,570,307,912, 5,2014-01-18 07:04:59.777942,449,886,80, 53,2014-01-17 02:39:16.189527,442,314,207, 5,2014-01-13 18:28:01.601956,67,176,10, 53,2014-01-11 20:55:27.967931,975,404,366, 53,2014-01-18 05:34:16.153442,907,669,503, 5,2014-01-18 14:46:01.042025,284,546,244, 53,2014-01-16 17:42:28.830106,694,939,343, 5,2014-01-16 11:33:53.346646,739,184,842, 53,2014-01-17 04:44:10.509083,846,243,483, 5,2014-01-11 21:14:18.768811,591,534,737, 53,2014-01-11 05:36:35.991439,667,388,430, 5,2014-01-19 01:21:32.286784,642,581,244, 5,2014-01-16 02:21:27.303756,91,674,830, 53,2014-01-18 07:34:58.406539,94,664,969, 5,2014-01-19 13:16:32.506298,434,519,670, 5,2014-01-14 17:10:40.412583,378,218,690, 5,2014-01-19 21:36:47.697967,352,109,401, 53,2014-01-14 04:38:36.1305,965,203,298, 53,2014-01-15 01:22:55.667792,877,206,615, 53,2014-01-13 14:52:51.33201,984,739,774, 5,2014-01-13 08:13:12.900581,902,131,575, 5,2014-01-17 22:29:32.49104,262,447,872, 5,2014-01-19 19:20:10.853806,550,910,250, 5,2014-01-13 22:29:09.776011,687,352,519, 53,2014-01-19 19:18:05.682235,68,394,920, 53,2014-01-18 15:46:18.605635,301,726,512, 5,2014-01-15 06:27:18.656306,923,803,700, 53,2014-01-14 08:21:56.962226,828,539,658, 5,2014-01-20 09:39:01.746348,484,33,837, 5,2014-01-12 04:37:13.94318,281,545,971, 53,2014-01-13 03:51:31.272477,265,951,306, 53,2014-01-17 10:44:01.52429,395,843,87, 5,2014-01-18 11:16:30.132707,847,260,438, 53,2014-01-16 02:19:32.874433,101,554,137, 5,2014-01-18 11:39:29.49264,529,543,256, 5,2014-01-15 10:50:46.286149,73,152,558, 5,2014-01-13 11:05:33.916877,130,823,696, 5,2014-01-13 23:21:58.322746,888,624,391, 53,2014-01-21 02:24:39.632795,178,769,310, 53,2014-01-19 19:49:44.42596,673,624,22, 5,2014-01-15 00:36:17.091557,394,417,359, 5,2014-01-11 02:01:15.624232,732,594,851, 53,2014-01-17 15:11:16.713164,285,617,266, 5,2014-01-20 06:57:44.395315,722,977,830, 5,2014-01-18 13:36:14.030772,523,187,496, 53,2014-01-15 22:27:49.219461,76,445,570, 5,2014-01-13 18:08:24.808316,146,817,772, 5,2014-01-17 05:13:01.729157,344,988,768, 53,2014-01-19 07:06:25.349961,37,517,866, 5,2014-01-17 23:31:20.476107,792,825,860, 53,2014-01-20 00:05:53.061168,254,35,511, 5,2014-01-11 09:19:38.250543,825,724,751, 5,2014-01-14 16:00:52.967147,871,702,794, 53,2014-01-18 13:26:03.914973,912,170,677, 53,2014-01-14 11:48:48.026974,974,781,312, 5,2014-01-13 05:18:58.649383,73,474,93, 53,2014-01-15 23:12:22.520608,211,335,377, 53,2014-01-18 22:16:06.68328,204,193,750, 53,2014-01-16 17:40:55.611608,54,287,130, 5,2014-01-15 02:51:24.266956,255,603,871, 5,2014-01-20 06:53:20.62646,679,180,55, 53,2014-01-19 01:32:26.884085,294,72,916, 5,2014-01-11 07:35:25.791665,510,183,585, 5,2014-01-17 12:09:50.759168,296,720,641, 5,2014-01-13 21:51:59.758939,148,812,687, 53,2014-01-18 23:14:55.284304,317,298,533, 53,2014-01-11 17:00:37.045317,42,61,246, 5,2014-01-16 12:57:33.675816,901,595,156, 53,2014-01-11 16:36:53.60705,441,963,220, 5,2014-01-11 13:25:16.67811,918,687,540, 53,2014-01-15 02:47:18.101776,274,874,460, 5,2014-01-15 21:13:10.698608,407,378,919, 53,2014-01-11 19:26:32.302344,593,151,977, 5,2014-01-11 11:58:34.81494,113,972,416, 53,2014-01-14 22:10:55.093922,883,753,689, 5,2014-01-19 13:02:46.333534,692,877,932, 5,2014-01-16 14:26:24.034401,628,666,312, 53,2014-01-17 20:19:19.902239,795,957,452, 53,2014-01-15 12:15:48.062691,367,80,1000, 53,2014-01-14 15:32:49.384781,654,946,418, 5,2014-01-14 13:50:40.378765,677,834,751, 53,2014-01-14 06:21:41.123858,425,50,110, 53,2014-01-15 04:52:27.635324,968,961,454, 5,2014-01-18 09:51:33.345911,35,250,971, 53,2014-01-11 13:47:45.038832,768,206,226, 5,2014-01-18 20:41:15.662298,195,838,237, 5,2014-01-20 19:10:31.995714,135,356,987, 5,2014-01-16 17:00:07.559441,859,575,955, 5,2014-01-16 12:57:22.345997,180,557,392, 53,2014-01-16 06:51:27.607322,832,209,16, 5,2014-01-20 23:51:31.826397,735,812,252, 5,2014-01-15 13:50:42.972457,228,326,548, 5,2014-01-14 02:23:54.491827,65,873,764, 5,2014-01-11 01:26:57.618061,818,164,137, 53,2014-01-11 20:00:33.732044,783,197,897, 5,2014-01-17 04:15:54.250766,783,195,837, 53,2014-01-19 04:41:52.902366,16,439,390, 5,2014-01-12 17:01:10.77736,939,552,834, 5,2014-01-12 11:13:27.92658,130,72,288, 53,2014-01-20 01:18:46.509417,812,507,648, 5,2014-01-13 10:26:27.455891,919,784,584, 53,2014-01-16 18:00:46.028356,235,368,230, 5,2014-01-14 16:31:57.208025,693,43,519, 53,2014-01-14 09:52:59.758234,559,67,953, 53,2014-01-17 09:59:20.843715,574,111,327, 53,2014-01-18 18:42:52.302366,775,295,526, 53,2014-01-12 16:55:46.091768,837,643,48, 5,2014-01-12 18:25:44.878116,985,14,158, 5,2014-01-15 09:02:12.204604,358,565,296, 5,2014-01-17 09:11:34.154459,56,248,647, 53,2014-01-16 13:58:34.262897,667,453,626, 5,2014-01-19 02:52:52.583369,872,206,596, 5,2014-01-20 19:33:15.278318,356,85,554, 5,2014-01-20 22:51:01.898641,427,829,489, 5,2014-01-16 06:44:25.929699,650,260,510, 53,2014-01-11 03:21:00.31715,771,498,344, 5,2014-01-18 13:32:17.560939,708,344,735, 53,2014-01-15 19:54:57.925414,126,149,587, 5,2014-01-17 00:21:07.876591,186,188,689, 53,2014-01-13 20:29:39.906935,583,206,455, 5,2014-01-10 20:46:25.532735,562,141,184, 5,2014-01-16 18:12:39.703406,843,620,790, 53,2014-01-18 14:20:22.879811,350,218,767, 5,2014-01-14 03:10:20.024562,981,712,838, 53,2014-01-16 23:39:37.321468,664,827,42, 5,2014-01-19 14:20:56.611855,671,76,162, 53,2014-01-20 11:26:14.275327,810,179,132, 53,2014-01-14 22:21:30.224253,970,208,187, 53,2014-01-11 01:22:07.389216,937,362,763, 53,2014-01-11 16:39:42.201908,355,872,167, 5,2014-01-13 17:40:16.73367,404,538,819, 5,2014-01-13 15:48:34.845106,48,214,519, 5,2014-01-17 14:40:28.230683,90,329,41, 53,2014-01-17 14:12:13.942114,471,224,235, 5,2014-01-17 05:41:34.603759,507,503,224, 53,2014-01-13 18:39:49.074398,696,396,94, 5,2014-01-15 02:55:06.244479,294,758,236, 53,2014-01-19 02:37:20.695527,870,47,340, 5,2014-01-15 17:05:33.952514,614,212,53, 5,2014-01-19 15:57:18.449082,898,766,389, 5,2014-01-15 05:48:54.850406,540,614,850, 53,2014-01-11 01:04:08.21583,371,786,43, 53,2014-01-17 12:50:11.03287,446,51,880, 5,2014-01-14 19:22:10.128724,124,924,340, 53,2014-01-21 03:55:10.114471,284,961,900, 5,2014-01-12 13:34:36.96257,665,929,799, 5,2014-01-15 02:43:10.446293,156,736,796, 5,2014-01-18 11:27:27.675408,123,33,163, 5,2014-01-17 13:29:34.887982,179,1,757, 53,2014-01-10 21:04:18.322883,163,550,638, 53,2014-01-11 01:57:07.582764,847,241,256, 5,2014-01-17 14:16:00.420718,936,420,6, 5,2014-01-16 19:16:58.02629,89,830,579, 5,2014-01-18 20:17:30.462575,947,382,498, 53,2014-01-20 21:26:20.445699,675,975,966, 5,2014-01-12 12:56:35.348176,92,934,379, 53,2014-01-17 04:38:27.07443,254,461,834, 53,2014-01-20 02:52:34.721026,706,66,556, 5,2014-01-16 15:18:05.572429,764,160,311, 5,2014-01-17 10:00:34.463647,585,790,449, 53,2014-01-20 23:32:16.923354,944,801,240, 5,2014-01-19 12:58:22.306099,360,239,832, 53,2014-01-20 05:49:09.309172,89,783,782, 53,2014-01-17 08:12:45.154037,892,67,429, 77,2014-01-15 21:10:36.248211,624,608,746, 50,2014-01-16 05:30:43.912931,1,348,428, 50,2014-01-20 06:52:34.228435,945,795,85, 77,2014-01-20 04:05:42.492691,994,439,798, 77,2014-01-14 02:08:04.608459,552,216,211, 77,2014-01-14 17:58:08.180949,458,616,440, 50,2014-01-18 14:03:00.942193,753,966,504, 77,2014-01-18 11:56:59.458865,242,96,103, 77,2014-01-14 23:02:16.396779,933,678,525, 77,2014-01-14 20:53:11.975062,660,410,968, 50,2014-01-12 01:19:09.588008,779,298,756, 50,2014-01-14 20:57:26.51125,167,553,59, 50,2014-01-16 14:27:48.937631,100,584,670, 77,2014-01-16 08:02:20.0343,236,514,337, 77,2014-01-12 02:24:54.187077,398,682,344, 50,2014-01-12 21:57:23.825615,349,920,899, 77,2014-01-16 09:06:38.357183,223,284,339, 77,2014-01-12 08:22:01.769841,76,67,328, 77,2014-01-19 16:13:24.246331,425,580,363, 77,2014-01-11 22:23:36.383892,6,285,753, 77,2014-01-20 08:39:32.232416,332,298,54, 77,2014-01-19 07:39:44.692031,134,44,293, 77,2014-01-13 15:20:11.732069,8,748,567, 50,2014-01-16 07:17:59.306846,807,241,793, 77,2014-01-18 04:32:19.413476,541,195,685, 77,2014-01-19 10:38:17.304498,244,851,176, 77,2014-01-12 11:18:33.770493,207,503,214, 77,2014-01-17 22:04:36.33683,873,235,380, 50,2014-01-17 17:36:39.610597,134,154,884, 77,2014-01-11 11:07:43.079665,381,463,834, 77,2014-01-14 00:17:21.490868,196,751,175, 50,2014-01-12 08:47:15.859227,554,212,405, 50,2014-01-16 20:38:26.992597,508,433,246, 50,2014-01-13 01:09:55.719303,986,989,350, 77,2014-01-11 06:52:58.351919,828,525,181, 50,2014-01-20 02:46:31.601475,354,581,51, 77,2014-01-16 23:08:03.900252,113,96,911, 50,2014-01-19 00:55:59.29411,423,27,863, 77,2014-01-17 08:43:31.06034,38,399,963, 77,2014-01-21 02:10:20.29745,570,564,295, 77,2014-01-12 15:49:11.269173,684,930,322, 77,2014-01-18 14:02:40.648348,866,213,199, 50,2014-01-14 17:07:46.8087,67,740,50, 77,2014-01-18 10:17:00.206804,801,63,613, 77,2014-01-13 16:05:00.682648,866,571,727, 50,2014-01-15 23:32:40.995777,959,168,941, 50,2014-01-20 12:14:24.032419,715,934,872, 77,2014-01-19 05:11:39.04025,942,212,235, 77,2014-01-17 11:54:42.765618,267,412,281, 50,2014-01-18 22:27:48.27917,160,778,718, 50,2014-01-20 07:35:15.424143,527,890,541, 50,2014-01-16 14:34:14.998033,533,396,488, 50,2014-01-17 00:07:32.9712,487,464,615, 77,2014-01-12 16:55:27.15621,803,367,218, 77,2014-01-11 15:52:14.304879,812,265,193, 50,2014-01-13 22:39:52.384677,101,718,491, 50,2014-01-10 21:33:44.460708,489,895,578, 77,2014-01-13 07:10:48.075792,712,514,123, 50,2014-01-21 00:44:28.721506,902,366,776, 50,2014-01-17 19:10:24.071305,645,950,157, 77,2014-01-13 22:18:31.155456,284,645,328, 50,2014-01-13 19:01:50.212374,561,214,616, 50,2014-01-19 07:57:39.930532,487,141,334, 77,2014-01-19 22:56:58.148472,310,220,102, 50,2014-01-16 00:11:45.931677,59,250,947, 50,2014-01-19 18:50:38.28287,755,787,706, 77,2014-01-18 19:43:29.749947,750,941,506, 77,2014-01-11 17:19:49.832348,692,657,177, 50,2014-01-17 13:46:37.57698,373,134,276, 77,2014-01-14 22:27:00.810286,408,160,957, 77,2014-01-11 13:30:10.129798,831,832,126, 77,2014-01-19 09:35:48.846572,349,170,853, 77,2014-01-12 06:29:41.459052,995,172,813, 77,2014-01-15 10:37:56.938498,245,421,786, 77,2014-01-16 13:52:49.053377,798,729,943, 77,2014-01-15 02:34:42.1417,39,920,758, 77,2014-01-20 14:10:37.934695,495,595,767, 77,2014-01-15 20:07:13.086215,91,673,732, 50,2014-01-13 01:46:21.181951,255,553,626, 50,2014-01-16 20:05:20.700312,509,591,369, 50,2014-01-13 12:35:01.365384,277,432,219, 50,2014-01-12 03:21:36.606094,311,681,578, 50,2014-01-12 04:39:35.698765,935,623,338, 77,2014-01-19 16:42:34.336584,689,294,255, 50,2014-01-14 00:17:03.762304,502,975,545, 50,2014-01-13 00:31:50.003645,510,255,99, 50,2014-01-12 09:22:26.72168,114,950,345, 50,2014-01-14 01:50:48.223012,710,417,123, 50,2014-01-15 11:42:38.079436,317,636,41, 50,2014-01-12 04:06:55.443186,846,512,453, 77,2014-01-21 01:01:12.294736,992,823,953, 77,2014-01-18 14:01:09.235311,842,851,239, 50,2014-01-15 03:08:45.65598,894,657,801, 77,2014-01-19 02:58:52.225269,350,995,796, 77,2014-01-17 06:58:07.383783,644,232,415, 77,2014-01-20 07:20:31.587657,471,373,86, 77,2014-01-17 15:49:30.508139,465,358,922, 77,2014-01-14 20:41:37.133729,156,234,677, 77,2014-01-21 04:40:21.420005,23,202,254, 77,2014-01-13 23:36:08.085538,184,631,793, 50,2014-01-18 23:08:37.944016,683,556,955, 77,2014-01-11 12:10:31.549803,18,880,68, 77,2014-01-12 03:11:56.93211,234,16,21, 77,2014-01-20 09:38:19.403068,444,158,177, 50,2014-01-16 02:48:28.48872,84,337,890, 77,2014-01-17 21:04:45.985486,292,128,556, 77,2014-01-14 06:13:01.545188,750,369,870, 77,2014-01-15 10:59:06.423415,972,345,999, 50,2014-01-12 11:11:59.071701,577,606,515, 50,2014-01-16 11:59:22.727139,174,161,282, 50,2014-01-11 01:04:27.123727,222,917,677, 50,2014-01-15 03:47:00.437085,628,101,479, 77,2014-01-17 19:20:59.333651,512,393,296, 50,2014-01-12 09:44:02.822492,115,728,131, 50,2014-01-13 14:29:34.77409,810,581,347, 50,2014-01-20 23:38:03.095955,435,531,109, 77,2014-01-14 14:15:52.826556,213,480,911, 50,2014-01-15 03:52:01.495769,525,378,796, 50,2014-01-13 19:28:51.319386,599,897,446, 77,2014-01-19 05:58:30.905992,870,580,424, 50,2014-01-16 11:58:56.938956,962,264,10, 77,2014-01-13 14:30:03.614123,650,143,148, 50,2014-01-16 13:59:40.141303,320,263,681, 50,2014-01-20 19:07:42.594936,672,89,944, 77,2014-01-11 11:28:55.839391,919,401,140, 50,2014-01-12 14:57:47.525086,584,619,832, 77,2014-01-19 20:28:14.183012,832,30,262, 77,2014-01-18 07:18:26.347949,27,720,289, 77,2014-01-16 15:39:24.658817,929,345,263, 77,2014-01-19 19:08:35.603017,474,990,977, 50,2014-01-11 00:54:34.433486,505,796,31, 50,2014-01-14 08:48:02.603251,815,492,532, 77,2014-01-20 11:19:07.15324,725,585,318, 77,2014-01-12 08:06:31.365597,848,908,125, 50,2014-01-13 12:26:22.006319,807,651,606, 50,2014-01-15 08:07:35.641959,136,856,840, 77,2014-01-19 09:11:17.351083,958,999,210, 50,2014-01-16 22:39:23.551506,54,125,503, 50,2014-01-19 23:06:42.065374,127,63,73, 50,2014-01-21 00:23:16.423204,544,609,690, 77,2014-01-12 04:38:46.279064,639,464,937, 77,2014-01-20 04:11:09.189101,19,435,753, 77,2014-01-14 22:10:16.86029,80,651,882, 77,2014-01-19 03:59:45.612715,884,289,813, 50,2014-01-11 07:55:12.012013,835,473,510, 50,2014-01-17 16:39:51.634379,831,697,26, 77,2014-01-18 21:37:48.70867,28,828,997, 77,2014-01-15 02:11:04.838569,627,909,718, 77,2014-01-11 14:31:53.130148,741,18,45, 77,2014-01-11 11:06:40.028055,456,521,301, 77,2014-01-13 02:09:35.74456,49,455,471, 77,2014-01-17 06:30:50.069523,909,53,459, 50,2014-01-14 05:36:43.642178,941,889,422, 77,2014-01-18 20:09:15.886282,745,498,91, 50,2014-01-16 19:38:32.664459,192,981,966, 50,2014-01-14 21:05:39.481989,894,251,914, 77,2014-01-20 15:07:03.41137,956,9,488, 50,2014-01-15 10:06:46.847472,465,84,39, 77,2014-01-11 22:24:05.829936,562,181,329, 77,2014-01-16 00:46:28.070604,481,527,244, 77,2014-01-13 23:15:22.450907,674,828,954, 77,2014-01-12 03:18:40.263423,569,390,889, 50,2014-01-19 13:34:30.673855,316,801,677, 50,2014-01-13 04:34:29.604147,581,436,869, 50,2014-01-13 15:25:11.62902,139,17,162, 50,2014-01-11 20:00:52.680174,344,337,478, 77,2014-01-17 16:42:05.246106,494,24,365, 50,2014-01-11 18:36:28.980104,887,947,852, 50,2014-01-17 22:40:16.2321,563,98,807, 77,2014-01-16 09:48:47.311481,533,498,461, 77,2014-01-11 12:59:45.403308,61,845,758, 77,2014-01-19 07:19:02.511164,658,580,883, 77,2014-01-15 07:59:56.500582,828,221,422, 77,2014-01-15 15:10:02.264017,233,229,206, 77,2014-01-17 05:18:48.123878,128,354,422, 50,2014-01-15 19:55:08.512595,16,623,864, 50,2014-01-12 01:49:53.898396,430,88,950, 77,2014-01-14 20:56:36.832548,704,278,971, 50,2014-01-20 02:06:13.351164,562,492,706, 77,2014-01-12 20:21:47.028964,574,7,512, 4,2014-01-15 12:03:16.861022,346,529,541, 14,2014-01-11 22:15:49.096143,395,371,629, 61,2014-01-19 06:52:37.098486,668,998,655, 54,2014-01-18 21:40:00.5032,172,368,813, 14,2014-01-19 22:25:04.982426,382,623,463, 93,2014-01-14 20:31:09.762946,745,153,773, 7,2014-01-12 12:45:39.985189,923,659,306, 4,2014-01-19 07:32:08.393795,380,491,276, 4,2014-01-19 10:37:56.610836,892,307,876, 93,2014-01-13 15:09:45.815125,317,525,325, 61,2014-01-14 02:18:36.4644,297,438,665, 7,2014-01-12 03:53:19.061744,355,152,674, 61,2014-01-14 22:28:26.078549,410,25,592, 4,2014-01-12 09:53:07.138255,285,709,918, 4,2014-01-14 12:27:48.665891,735,499,116, 7,2014-01-17 17:53:37.707569,141,508,511, 7,2014-01-13 09:53:59.818848,166,556,139, 4,2014-01-10 23:09:53.911997,154,714,235, 4,2014-01-18 16:30:06.688092,784,79,108, 14,2014-01-20 12:34:16.050948,441,362,165, 7,2014-01-16 12:58:41.223478,993,376,21, 93,2014-01-19 09:29:52.091399,773,862,564, 4,2014-01-18 13:53:18.562111,707,454,434, 93,2014-01-21 00:58:37.724059,847,314,304, 61,2014-01-13 18:39:54.355417,274,502,118, 7,2014-01-14 13:12:06.68599,122,467,545, 7,2014-01-15 14:53:46.237074,858,739,358, 61,2014-01-15 00:29:48.254231,755,48,611, 61,2014-01-18 14:08:43.518538,973,81,405, 4,2014-01-14 10:59:59.588237,152,744,455, 54,2014-01-17 00:51:35.283194,488,207,842, 61,2014-01-12 20:12:00.37956,685,231,392, 14,2014-01-15 13:15:48.68438,120,765,759, 7,2014-01-15 01:44:12.38168,40,252,898, 14,2014-01-20 21:52:00.883179,163,480,824, 4,2014-01-14 05:40:53.666806,239,5,130, 54,2014-01-19 02:15:22.144626,363,267,499, 54,2014-01-12 04:37:40.868368,862,675,497, 14,2014-01-12 07:13:02.06102,621,667,370, 14,2014-01-17 06:53:18.755462,122,827,311, 93,2014-01-14 23:47:26.683912,622,133,527, 54,2014-01-15 13:31:38.52542,726,249,59, 61,2014-01-18 14:46:37.817205,882,321,232, 7,2014-01-19 02:15:52.762461,491,478,60, 61,2014-01-17 03:24:45.664094,630,58,855, 7,2014-01-11 21:14:26.483096,755,721,353, 7,2014-01-15 14:09:30.470449,818,965,518, 14,2014-01-19 17:18:45.482942,120,357,485, 7,2014-01-12 00:24:20.395093,755,936,709, 7,2014-01-13 00:39:37.158541,695,933,292, 7,2014-01-18 23:53:01.53389,969,333,940, 61,2014-01-17 17:23:01.618571,951,947,856, 61,2014-01-11 04:09:29.24994,645,580,467, 61,2014-01-16 07:46:20.096001,73,337,576, 14,2014-01-17 12:21:39.34305,36,316,482, 93,2014-01-14 02:49:23.605775,673,132,71, 7,2014-01-20 00:58:26.781992,850,346,758, 93,2014-01-11 21:15:25.580124,295,456,11, 4,2014-01-18 07:19:11.860006,212,231,746, 4,2014-01-17 09:07:10.300529,672,715,865, 7,2014-01-15 12:15:25.16836,569,636,768, 4,2014-01-14 02:10:47.143201,973,275,786, 93,2014-01-19 09:19:10.680508,829,542,373, 7,2014-01-20 05:31:13.85316,968,135,229, 14,2014-01-18 07:54:59.524881,185,819,433, 54,2014-01-19 01:11:11.563688,982,422,693, 14,2014-01-13 05:12:07.519966,364,947,254, 93,2014-01-16 04:10:21.669507,818,808,491, 54,2014-01-20 09:48:52.432056,581,886,201, 4,2014-01-14 16:25:09.580985,497,54,278, 54,2014-01-12 05:03:40.425389,685,755,243, 14,2014-01-14 03:36:19.115968,326,676,661, 14,2014-01-19 09:56:48.106824,65,723,757, 14,2014-01-19 23:50:18.242594,56,280,420, 61,2014-01-11 23:52:11.878848,643,731,460, 61,2014-01-15 07:21:33.770918,362,494,253, 14,2014-01-21 01:04:44.726109,657,901,868, 93,2014-01-16 18:01:42.349297,237,748,580, 14,2014-01-13 18:40:19.253859,438,622,121, 93,2014-01-11 19:29:05.121202,984,27,453, 61,2014-01-18 22:41:19.507838,5,206,252, 7,2014-01-11 12:33:20.78775,708,100,450, 93,2014-01-18 16:52:06.740192,759,385,956, 4,2014-01-19 06:50:48.757778,462,975,723, 61,2014-01-17 00:19:40.883751,48,498,69, 4,2014-01-14 23:13:46.083242,682,86,655, 4,2014-01-12 03:40:12.363553,521,358,733, 14,2014-01-15 19:18:07.665743,443,501,178, 7,2014-01-16 00:29:11.663365,756,968,790, 7,2014-01-19 14:59:24.223559,673,675,618, 4,2014-01-11 22:25:17.966272,197,869,202, 54,2014-01-20 16:44:36.831726,284,388,906, 7,2014-01-12 11:10:11.36676,103,963,413, 61,2014-01-20 11:44:28.64678,444,396,107, 4,2014-01-19 16:15:50.684885,234,283,756, 7,2014-01-19 23:05:10.891642,366,636,301, 61,2014-01-18 06:55:40.210469,761,999,645, 61,2014-01-11 15:27:58.205271,173,936,677, 54,2014-01-14 21:15:32.561568,571,978,416, 14,2014-01-17 10:44:32.642944,935,3,2, 7,2014-01-15 11:53:07.786256,784,286,399, 93,2014-01-16 06:19:12.986956,10,365,29, 61,2014-01-20 18:20:51.758911,457,63,895, 93,2014-01-13 15:49:55.893081,345,918,751, 54,2014-01-15 00:09:31.229551,767,768,850, 93,2014-01-11 12:13:03.637759,655,351,874, 61,2014-01-18 03:11:29.663999,984,477,539, 4,2014-01-14 19:14:15.95566,427,994,507, 7,2014-01-17 10:14:45.987057,67,901,679, 4,2014-01-21 01:51:48.917858,227,93,678, 7,2014-01-15 18:43:21.077281,527,881,924, 61,2014-01-15 02:56:05.494895,120,744,490, 14,2014-01-11 08:25:09.705608,561,626,837, 61,2014-01-13 05:35:27.817474,756,304,574, 14,2014-01-13 03:46:54.253092,122,415,603, 14,2014-01-17 12:44:50.589359,30,269,197, 4,2014-01-17 08:49:13.900715,396,585,725, 14,2014-01-14 11:27:06.616645,612,895,72, 93,2014-01-12 02:02:58.255102,585,142,563, 14,2014-01-12 03:18:25.56408,83,719,579, 61,2014-01-12 20:26:30.840624,592,164,445, 61,2014-01-13 04:28:16.221793,143,708,58, 14,2014-01-11 14:03:02.395805,493,926,307, 93,2014-01-14 11:36:42.207384,476,827,605, 14,2014-01-12 10:12:44.868573,833,522,452, 93,2014-01-20 10:18:53.08111,704,976,433, 4,2014-01-13 04:41:53.099445,721,998,712, 7,2014-01-19 21:08:25.07946,457,550,540, 14,2014-01-21 05:46:51.286381,430,57,898, 61,2014-01-17 05:57:25.661013,307,403,260, 7,2014-01-16 01:52:57.722404,783,570,322, 14,2014-01-15 11:39:59.072637,762,734,528, 93,2014-01-12 06:16:38.64797,126,424,930, 93,2014-01-15 14:13:49.481316,823,992,355, 4,2014-01-18 07:29:54.966137,337,512,534, 4,2014-01-16 10:26:09.877521,868,982,349, 7,2014-01-16 06:26:53.119494,981,523,22, 61,2014-01-15 04:41:24.630136,620,502,546, 7,2014-01-20 09:40:25.8336,68,426,415, 4,2014-01-12 10:41:39.106551,688,293,622, 14,2014-01-15 00:33:13.548414,362,490,920, 54,2014-01-14 22:23:46.910881,134,960,646, 4,2014-01-16 17:37:44.601866,779,929,207, 54,2014-01-15 12:58:23.254022,680,607,871, 4,2014-01-17 07:59:14.728355,404,270,791, 93,2014-01-19 01:24:38.854957,396,141,321, 93,2014-01-11 19:43:13.8438,591,199,468, 14,2014-01-13 10:48:28.62907,876,4,15, 93,2014-01-12 06:51:45.471602,327,939,215, 61,2014-01-13 01:46:12.098901,124,482,581, 61,2014-01-14 18:06:54.19315,274,513,965, 93,2014-01-14 07:18:16.312226,761,686,775, 61,2014-01-15 10:14:28.320694,355,123,854, 93,2014-01-15 12:09:56.588955,783,449,491, 54,2014-01-17 22:54:58.520028,428,544,978, 61,2014-01-17 00:27:13.189686,808,234,119, 54,2014-01-14 16:28:49.670066,691,930,185, 4,2014-01-20 07:36:51.619473,874,334,137, 4,2014-01-15 15:35:38.269147,562,404,229, 54,2014-01-14 16:15:40.956447,170,547,823, 7,2014-01-16 07:34:17.280486,241,683,768, 7,2014-01-20 21:28:35.991551,600,154,492, 93,2014-01-19 07:55:40.029503,414,734,108, 7,2014-01-17 17:50:55.928455,603,249,490, 7,2014-01-15 05:42:25.472866,570,847,170, 4,2014-01-16 09:25:34.995641,566,967,352, 93,2014-01-12 22:17:05.805976,141,533,218, 4,2014-01-20 16:09:18.59236,332,552,273, 54,2014-01-20 18:06:59.625777,546,451,231, 7,2014-01-12 01:57:31.639576,135,47,380, 14,2014-01-11 20:50:57.69933,231,309,815, 54,2014-01-14 12:40:13.174191,83,343,422, 7,2014-01-16 04:21:18.550458,795,958,307, 14,2014-01-17 18:28:42.301196,605,562,375, 93,2014-01-19 05:38:36.428631,779,696,829, 14,2014-01-12 06:20:33.278814,621,600,643, 93,2014-01-15 13:53:21.156153,20,714,735, 4,2014-01-20 05:21:50.272431,188,896,843, 61,2014-01-14 21:09:01.907883,113,369,200, 7,2014-01-17 00:45:06.627755,890,2,865, 61,2014-01-12 01:08:02.371332,989,266,603, 93,2014-01-18 19:15:56.101033,696,958,799, 61,2014-01-20 12:03:22.9404,426,920,544, 93,2014-01-16 15:22:30.692446,234,247,265, 93,2014-01-13 01:25:52.690408,727,766,749, 4,2014-01-17 04:58:21.460428,536,425,780, 54,2014-01-12 09:49:43.882132,758,448,270, 54,2014-01-16 21:54:42.360474,945,1,531, 93,2014-01-16 06:35:13.079902,72,17,532, 54,2014-01-17 05:25:22.151279,544,423,823, 61,2014-01-20 18:10:23.31734,730,686,345, 14,2014-01-11 08:09:30.360388,303,339,195, 93,2014-01-16 20:53:58.14283,107,564,750, 4,2014-01-18 20:06:03.346843,828,346,632, 7,2014-01-18 06:00:26.288844,523,374,416, 7,2014-01-10 20:36:23.615696,305,229,92, 14,2014-01-13 23:31:38.342482,250,814,57, 4,2014-01-20 08:17:32.09482,54,244,631, 14,2014-01-20 16:45:42.208475,67,558,542, 93,2014-01-13 11:38:37.968259,402,610,63, 93,2014-01-11 04:15:03.734816,335,424,82, 61,2014-01-11 07:36:39.907805,172,20,58, 61,2014-01-17 04:18:51.142869,57,149,94, 4,2014-01-16 12:36:22.285273,872,347,991, 93,2014-01-18 06:05:22.209001,938,602,575, 4,2014-01-15 03:57:27.5715,711,4,427, 93,2014-01-17 22:56:55.564505,666,99,350, 93,2014-01-12 13:58:43.365154,624,702,565, 93,2014-01-14 03:19:17.843931,556,375,91, 7,2014-01-11 14:05:57.472389,531,436,884, 54,2014-01-18 18:43:49.992909,493,506,645, 4,2014-01-15 08:27:20.215264,760,719,944, 93,2014-01-19 13:21:53.573842,127,679,729, 54,2014-01-18 00:47:12.933309,821,755,501, 54,2014-01-21 03:49:50.907709,610,618,22, 4,2014-01-11 08:47:46.264249,539,758,367, 4,2014-01-13 23:45:34.393739,7,131,563, 14,2014-01-12 07:39:34.789842,991,404,44, 54,2014-01-17 10:42:28.624723,114,840,422, 14,2014-01-19 10:20:47.47364,693,600,868, 7,2014-01-18 17:04:56.941121,172,913,261, 54,2014-01-16 22:52:51.942062,746,30,351, 61,2014-01-19 22:30:17.834028,597,116,710, 14,2014-01-14 07:58:55.08395,751,110,151, 61,2014-01-14 12:58:55.288904,729,43,971, 61,2014-01-16 22:30:44.122872,932,17,284, 61,2014-01-14 08:35:18.700065,851,163,42, 61,2014-01-17 16:30:33.631387,196,836,97, 93,2014-01-16 00:48:16.21811,22,420,541, 54,2014-01-13 19:21:00.90854,587,839,655, 54,2014-01-20 08:09:11.600066,935,936,774, 7,2014-01-16 09:03:19.952925,213,651,393, 54,2014-01-14 06:57:40.816346,123,409,350, 61,2014-01-16 06:28:02.742934,630,933,144, 93,2014-01-11 15:39:42.238199,107,67,18, 54,2014-01-11 07:03:03.025347,595,307,726, 61,2014-01-20 14:25:30.314434,815,788,221, 93,2014-01-18 18:36:37.802704,790,382,354, 7,2014-01-13 01:01:46.390501,587,228,80, 4,2014-01-13 11:44:48.158366,325,462,71, 7,2014-01-19 12:42:35.275511,704,762,764, 14,2014-01-20 23:45:36.38341,694,322,307, 54,2014-01-18 00:12:08.373629,502,953,902, 7,2014-01-17 20:04:28.849353,316,950,611, 4,2014-01-17 18:32:49.31714,35,491,844, 54,2014-01-17 22:01:59.281757,213,465,173, 7,2014-01-18 08:52:15.113602,791,453,559, 93,2014-01-20 22:18:23.710878,172,558,444, 4,2014-01-19 09:41:34.071599,323,268,816, 93,2014-01-14 13:34:43.738326,571,295,397, 14,2014-01-19 02:39:11.184937,799,813,907, 14,2014-01-16 20:46:31.013139,521,968,95, 54,2014-01-20 16:27:35.680807,409,734,222, 61,2014-01-17 19:09:29.018965,56,863,621, 7,2014-01-20 08:45:26.097089,784,376,448, 7,2014-01-13 23:26:30.969711,414,618,136, 7,2014-01-13 11:40:13.142255,411,337,693, 14,2014-01-13 11:20:44.797154,268,629,904, 14,2014-01-20 19:57:04.601518,91,983,50, 7,2014-01-18 16:28:29.360366,792,384,835, 54,2014-01-16 10:41:45.705694,885,751,240, 14,2014-01-19 22:06:16.201583,70,552,97, 7,2014-01-13 19:31:49.313291,376,521,62, 4,2014-01-19 21:39:26.52204,601,868,624, 4,2014-01-14 22:34:18.944519,179,227,254, 54,2014-01-14 15:11:31.55149,104,526,296, 93,2014-01-20 08:42:29.547386,30,996,93, 4,2014-01-14 06:59:49.258953,433,263,355, 93,2014-01-12 03:48:09.354612,377,958,191, 54,2014-01-12 03:44:15.937887,876,954,960, 61,2014-01-16 22:44:37.417319,546,103,721, 7,2014-01-20 20:30:44.630124,231,425,904, 93,2014-01-11 21:29:52.321718,914,426,34, 4,2014-01-13 16:56:45.790948,699,343,334, 93,2014-01-17 10:35:13.479477,867,309,142, 14,2014-01-18 20:02:41.638857,183,602,778, 14,2014-01-20 18:58:45.072706,928,190,92, 61,2014-01-14 13:27:28.593079,728,742,990, 54,2014-01-18 12:21:05.349735,921,266,625, 4,2014-01-18 22:40:19.144724,293,954,344, 7,2014-01-18 07:02:12.331405,800,676,446, 93,2014-01-16 09:00:16.534672,224,99,831, 61,2014-01-14 13:26:50.157863,506,631,62, 61,2014-01-17 17:29:48.012212,773,719,302, 61,2014-01-12 22:09:45.554056,681,341,412, 4,2014-01-13 16:12:16.254952,579,192,947, 4,2014-01-20 20:56:18.982343,438,562,525, 54,2014-01-15 13:49:58.696311,117,342,484, 61,2014-01-16 07:33:01.052525,820,920,247, 54,2014-01-20 10:53:23.58386,683,752,15, 54,2014-01-13 00:18:28.056677,731,81,794, 7,2014-01-11 12:14:46.758219,766,337,715, 93,2014-01-19 02:59:39.785445,467,82,576, 54,2014-01-15 23:50:17.369968,374,475,837, 93,2014-01-20 13:54:13.280678,785,750,325, 7,2014-01-12 19:33:58.729964,667,764,498, 7,2014-01-19 19:01:48.921457,604,941,489, 54,2014-01-19 16:36:42.828064,384,347,878, 14,2014-01-16 06:33:47.988917,354,301,480, 61,2014-01-21 02:49:58.27607,659,572,802, 7,2014-01-21 00:20:58.76637,540,564,679, 93,2014-01-11 23:18:25.406235,608,242,282, 54,2014-01-20 17:20:42.906194,116,984,751, 61,2014-01-11 15:50:51.088088,518,83,554, 61,2014-01-14 20:15:11.197183,632,806,98, 61,2014-01-16 21:55:56.385671,648,98,449, 7,2014-01-19 15:53:32.726945,794,899,849, 4,2014-01-14 09:13:56.270308,958,201,897, 7,2014-01-20 15:23:24.97875,549,644,867, 54,2014-01-16 22:14:38.076679,728,806,421, 14,2014-01-12 01:54:15.415032,950,468,368, 93,2014-01-17 16:25:37.310575,91,495,738, 61,2014-01-12 01:14:54.611771,915,823,738, 61,2014-01-15 19:21:05.572895,394,747,165, 4,2014-01-14 03:55:25.322786,639,126,205, 93,2014-01-14 03:24:40.165826,73,264,178, 61,2014-01-18 15:33:21.827847,555,172,347, 61,2014-01-13 18:51:44.305129,762,958,609, 7,2014-01-18 21:14:38.862138,948,130,249, 14,2014-01-13 17:06:22.880372,543,813,54, 7,2014-01-12 23:45:07.888991,951,676,202, 4,2014-01-21 01:33:06.918815,828,336,855, 93,2014-01-14 09:21:09.639009,480,431,499, 54,2014-01-10 20:44:47.674434,33,398,177, 93,2014-01-15 19:23:24.288782,75,313,438, 14,2014-01-13 17:15:22.919687,485,677,402, 7,2014-01-12 20:18:46.404398,463,591,178, 14,2014-01-14 08:25:13.21024,305,950,120, 54,2014-01-12 03:52:05.747751,518,302,539, 14,2014-01-18 06:52:34.393315,591,492,746, 61,2014-01-14 05:15:11.486728,941,451,855, 93,2014-01-11 22:13:04.514121,418,524,244, 61,2014-01-19 10:10:59.799549,115,874,800, 7,2014-01-13 16:35:54.392921,537,334,342, 4,2014-01-12 18:03:55.602209,924,433,936, 93,2014-01-13 00:26:10.997152,472,283,446, 4,2014-01-19 18:31:50.77901,214,864,356, 54,2014-01-11 03:57:28.329154,75,477,580, 93,2014-01-16 13:40:07.267459,300,900,796, 93,2014-01-19 03:55:15.757761,401,226,805, 61,2014-01-17 06:12:06.406252,915,865,903, 61,2014-01-17 19:34:22.682492,265,241,867, 7,2014-01-15 14:20:53.068335,381,792,864, 54,2014-01-18 11:27:01.018023,118,855,775, 4,2014-01-12 08:55:28.255386,108,903,739, 54,2014-01-18 22:16:18.391542,791,255,801, 93,2014-01-11 08:51:41.18385,153,275,234, 4,2014-01-20 04:28:50.083233,47,740,634, 4,2014-01-11 11:08:02.696671,30,332,313, 61,2014-01-19 10:06:20.045987,2,674,774, 7,2014-01-12 15:35:12.964024,593,535,887, 4,2014-01-13 14:53:10.58608,478,622,813, 4,2014-01-19 05:39:26.964802,5,343,85, 4,2014-01-16 04:56:22.603033,56,444,86, 4,2014-01-13 15:37:58.260516,410,359,688, 54,2014-01-13 19:02:51.253584,368,992,601, 61,2014-01-19 02:11:45.522721,534,368,51, 93,2014-01-15 15:56:44.665332,325,487,76, 4,2014-01-17 07:28:04.463823,77,790,5, 4,2014-01-20 10:03:51.270472,415,982,420, 93,2014-01-12 16:49:19.058647,251,683,587, 4,2014-01-20 16:43:15.950551,473,717,378, 4,2014-01-11 02:16:55.785012,544,557,429, 14,2014-01-10 21:00:18.858615,145,668,841, 4,2014-01-13 03:19:10.343892,685,389,871, 61,2014-01-13 00:20:51.387221,804,653,708, 54,2014-01-13 01:26:29.855767,73,386,35, 93,2014-01-11 15:51:01.122902,933,346,483, 61,2014-01-13 08:18:19.716794,991,773,652, 54,2014-01-18 19:06:37.123226,135,310,443, 54,2014-01-19 23:46:16.880664,331,412,914, 7,2014-01-19 18:30:26.123046,294,138,464, 14,2014-01-15 08:40:59.805719,37,313,684, 7,2014-01-14 08:07:09.949418,316,705,981, 61,2014-01-16 23:57:27.141069,25,633,376, 54,2014-01-16 21:36:28.061105,27,671,880, 14,2014-01-12 00:23:28.340959,911,703,850, 7,2014-01-17 12:49:08.324919,993,711,16, 93,2014-01-15 20:05:18.144339,953,71,725, 93,2014-01-12 15:31:31.03763,195,34,504, 61,2014-01-15 16:55:28.370906,350,618,344, 61,2014-01-17 15:40:31.108361,624,876,563, 14,2014-01-15 10:24:41.623711,702,493,414, 7,2014-01-13 16:34:55.335708,532,843,407, 7,2014-01-12 14:36:53.711395,336,652,140, 7,2014-01-18 06:02:39.884646,397,773,350, 61,2014-01-16 15:37:46.589292,512,469,316, 14,2014-01-20 20:48:39.234116,388,787,439, 4,2014-01-12 15:59:24.549978,267,166,67, 54,2014-01-12 17:05:51.053115,226,459,399, 93,2014-01-20 00:52:30.505006,545,91,972, 14,2014-01-14 12:48:43.608624,195,480,683, 14,2014-01-12 03:49:07.004085,5,906,260, 54,2014-01-20 07:09:26.290019,351,225,504, 7,2014-01-14 13:49:02.46724,804,138,293, 14,2014-01-14 11:08:17.347976,757,577,11, 93,2014-01-12 01:30:17.67724,462,857,655, 61,2014-01-16 19:15:32.323006,394,730,660, 54,2014-01-15 06:59:18.470878,61,754,467, 7,2014-01-14 13:48:37.394033,5,379,960, 54,2014-01-14 08:22:09.446232,484,443,388, 93,2014-01-14 00:45:35.351542,812,237,470, 14,2014-01-13 02:19:03.517079,841,868,970, 14,2014-01-18 21:03:09.251951,664,757,688, 7,2014-01-17 12:52:45.300959,522,80,284, 4,2014-01-19 06:16:30.658148,610,547,640, 14,2014-01-14 12:39:37.313056,464,494,956, 4,2014-01-18 17:16:13.64192,891,280,358, 4,2014-01-15 13:05:38.983067,140,197,575, 93,2014-01-19 12:44:55.457814,757,232,662, 54,2014-01-20 12:47:44.679969,760,42,747, 4,2014-01-20 10:01:07.353973,812,130,937, 54,2014-01-15 22:25:26.566175,431,684,267, 14,2014-01-14 17:12:26.30368,714,537,157, 61,2014-01-12 20:36:02.689681,985,701,400, 4,2014-01-17 17:02:20.27799,760,250,835, 7,2014-01-11 17:15:06.188326,559,448,932, 14,2014-01-18 16:13:49.278973,79,827,396, 14,2014-01-17 07:50:59.512105,807,738,346, 7,2014-01-13 13:14:30.738303,934,887,176, 14,2014-01-20 13:19:40.332507,89,694,271, 14,2014-01-16 02:43:30.017111,265,243,844, 61,2014-01-17 06:03:14.347347,974,710,40, 93,2014-01-11 11:08:47.336592,244,677,106, 4,2014-01-15 03:52:56.307129,768,544,165, 61,2014-01-20 23:52:16.814585,234,749,892, 7,2014-01-15 02:17:04.684568,258,717,677, 54,2014-01-16 09:23:13.984788,852,49,842, 7,2014-01-16 13:07:49.13801,343,580,863, 61,2014-01-19 13:16:23.155447,515,353,976, 61,2014-01-20 03:11:51.378821,781,188,676, 4,2014-01-20 01:29:58.584243,23,541,740, 7,2014-01-12 08:01:58.506989,16,173,720, 54,2014-01-11 23:30:54.89632,711,962,579, 93,2014-01-17 16:33:07.836613,267,742,571, 4,2014-01-19 00:54:43.808367,399,12,333, 54,2014-01-20 09:47:25.554468,952,623,858, 54,2014-01-10 23:12:45.149669,915,412,58, 54,2014-01-16 12:10:57.450286,424,636,79, 93,2014-01-14 16:53:04.537954,91,282,690, 4,2014-01-19 15:57:40.607481,955,576,156, 54,2014-01-15 18:58:42.130255,153,714,314, 4,2014-01-13 20:54:11.891927,764,881,246, 14,2014-01-14 08:23:07.174076,234,443,271, 93,2014-01-19 16:11:08.434354,449,835,489, 93,2014-01-15 21:30:14.582027,841,157,735, 61,2014-01-21 05:25:27.452065,392,472,925, 54,2014-01-20 13:26:14.62268,797,349,318, 4,2014-01-13 07:44:03.861,143,712,669, 4,2014-01-17 07:16:26.96417,886,362,883, 4,2014-01-12 20:40:45.361403,618,612,736, 54,2014-01-12 15:03:44.193506,183,113,438, 61,2014-01-12 03:59:56.9817,36,785,92, 93,2014-01-19 06:43:59.708749,892,991,495, 4,2014-01-13 06:12:31.530099,272,800,525, 4,2014-01-16 11:52:53.288829,779,875,871, 93,2014-01-19 00:36:16.523335,504,895,488, 14,2014-01-17 12:29:36.214667,403,755,74, 4,2014-01-11 15:16:07.273618,318,80,950, 7,2014-01-14 07:44:05.661346,791,994,312, 4,2014-01-15 19:45:59.370114,57,666,114, 61,2014-01-20 22:27:58.652857,622,219,979, 4,2014-01-13 03:14:04.246007,613,863,414, 54,2014-01-17 07:47:57.877521,388,667,575, 7,2014-01-11 15:58:53.549177,202,273,409, 4,2014-01-19 23:47:12.08262,546,835,68, 54,2014-01-15 02:42:41.685887,701,15,629, 54,2014-01-21 05:46:19.103645,595,477,996, 7,2014-01-20 02:59:57.232289,886,677,969, 93,2014-01-20 18:53:39.136174,289,622,292, 54,2014-01-14 16:39:23.641599,875,123,835, 54,2014-01-18 12:57:37.84019,950,712,596, 4,2014-01-15 07:52:21.266848,987,771,485, 7,2014-01-17 17:33:35.533945,671,581,439, 93,2014-01-11 15:20:45.014266,771,122,172, 14,2014-01-13 18:03:29.701203,831,852,539, 7,2014-01-12 09:03:50.115972,726,934,448, 54,2014-01-11 14:46:12.46633,343,532,312, 54,2014-01-13 01:29:44.323883,7,669,308, 93,2014-01-14 20:47:53.976972,926,621,714, 14,2014-01-18 02:02:39.4305,530,548,162, 61,2014-01-15 02:10:29.685285,630,478,339, 7,2014-01-16 15:51:38.170897,885,682,672, 4,2014-01-19 10:02:36.412201,886,106,609, 14,2014-01-13 02:54:29.394034,543,991,909, 4,2014-01-19 02:04:09.700996,722,965,205, 61,2014-01-14 15:55:29.701449,254,18,153, 14,2014-01-10 21:30:45.917789,472,430,697, 14,2014-01-15 08:33:45.916081,317,561,285, 61,2014-01-15 11:11:36.975067,593,863,925, 14,2014-01-14 09:14:51.579134,732,605,376, 93,2014-01-20 08:19:45.286195,793,493,276, 93,2014-01-15 03:39:35.627925,567,401,139, 4,2014-01-16 16:28:55.825142,731,79,35, 61,2014-01-16 10:07:43.163716,107,239,558, 54,2014-01-15 23:38:29.177101,625,318,726, 93,2014-01-15 10:16:07.907762,417,645,535, 54,2014-01-20 16:50:24.849602,413,88,826, 7,2014-01-15 23:24:48.281165,670,185,438, 61,2014-01-14 07:16:05.14047,5,147,653, 93,2014-01-20 05:44:03.986196,585,167,461, 54,2014-01-19 20:04:11.922764,645,570,532, 54,2014-01-11 14:13:42.980659,417,600,293, 54,2014-01-14 07:36:25.253044,505,392,80, 14,2014-01-16 07:37:47.456708,300,956,202, 4,2014-01-12 09:34:27.994925,788,362,815, 7,2014-01-17 05:39:54.954246,266,390,667, 61,2014-01-17 20:41:37.57268,72,947,269, 4,2014-01-13 04:20:40.461256,272,968,76, 14,2014-01-19 11:09:39.278129,874,814,664, 93,2014-01-11 11:29:31.550071,384,749,90, 4,2014-01-20 10:23:19.891756,738,26,42, 54,2014-01-13 07:20:08.963834,442,777,589, 14,2014-01-17 07:21:09.720968,452,343,528, 14,2014-01-18 14:25:56.304376,901,684,24, 4,2014-01-15 14:14:38.357869,580,667,149, 4,2014-01-15 03:25:19.421964,945,756,711, 93,2014-01-12 00:21:26.005826,132,253,417, 7,2014-01-15 15:45:24.275658,238,907,586, 93,2014-01-19 15:59:05.338046,920,445,371, 93,2014-01-16 15:33:02.980893,750,544,794, 61,2014-01-19 05:00:15.855211,321,678,112, 4,2014-01-18 18:18:50.624659,781,665,290, 4,2014-01-20 23:12:38.609236,366,881,105, 61,2014-01-14 15:29:11.680353,921,898,824, 93,2014-01-13 22:26:33.788375,321,118,875, 61,2014-01-15 16:51:07.786337,649,882,190, 4,2014-01-19 05:45:19.588534,693,770,9, 54,2014-01-13 09:16:58.638396,859,455,146, 54,2014-01-20 20:15:56.067502,376,688,461, 14,2014-01-12 07:01:24.729004,107,18,200, 7,2014-01-12 09:01:02.624592,831,469,581, 54,2014-01-19 10:20:07.990685,933,863,731, 54,2014-01-13 01:15:07.709665,959,445,201, 54,2014-01-15 20:37:27.877635,966,361,293, 93,2014-01-14 11:57:55.447393,953,613,220, 61,2014-01-14 14:49:35.70459,235,44,245, 7,2014-01-11 20:17:22.831881,718,918,778, 7,2014-01-11 02:39:33.020492,587,689,91, 7,2014-01-16 23:10:16.165846,634,536,822, 93,2014-01-20 11:27:02.11001,999,643,100, 54,2014-01-11 18:09:04.570563,912,639,777, 7,2014-01-16 03:33:36.057602,791,934,234, 7,2014-01-12 12:47:11.073845,264,758,617, 54,2014-01-18 05:30:14.29153,449,750,822, 61,2014-01-13 11:59:32.361978,680,884,149, 93,2014-01-17 07:01:49.432133,216,647,451, 4,2014-01-12 02:55:33.713494,890,211,950, 4,2014-01-14 16:20:58.367804,214,787,516, 54,2014-01-11 16:47:13.707791,309,439,383, 54,2014-01-20 22:54:39.051958,219,261,426, 7,2014-01-20 11:54:01.349116,658,685,342, 61,2014-01-20 01:47:29.563002,561,977,254, 61,2014-01-18 11:13:29.676618,674,193,889, 93,2014-01-20 05:06:39.958352,779,643,844, 54,2014-01-13 11:16:41.243775,701,926,466, 14,2014-01-11 03:40:03.464993,868,623,686, 61,2014-01-14 15:57:47.744689,844,281,528, 93,2014-01-11 11:02:00.832309,260,549,642, 14,2014-01-13 16:57:02.103389,20,308,788, 14,2014-01-14 06:13:43.81261,513,589,611, 7,2014-01-12 22:03:25.561314,554,629,343, 7,2014-01-15 05:58:04.72798,621,63,741, 4,2014-01-12 10:33:51.803296,183,701,802, 4,2014-01-15 03:18:33.270978,689,976,943, 14,2014-01-20 06:35:32.605616,8,977,733, 4,2014-01-16 02:31:47.250689,259,442,787, 54,2014-01-18 22:08:08.975568,54,239,876, 61,2014-01-10 20:52:55.437497,181,183,809, 7,2014-01-16 09:11:20.271181,559,857,956, 4,2014-01-14 15:18:25.141414,872,489,780, 7,2014-01-20 12:19:57.547927,627,866,335, 54,2014-01-17 07:20:24.841744,898,655,828, 7,2014-01-19 22:52:01.199017,602,221,776, 7,2014-01-11 19:07:08.621772,175,220,310, 7,2014-01-14 06:50:39.133274,6,995,473, 61,2014-01-12 04:51:33.561414,812,919,983, 14,2014-01-18 06:08:58.053904,318,887,466, 61,2014-01-15 13:46:12.847188,273,180,590, 54,2014-01-16 01:12:31.929218,138,737,377, 4,2014-01-19 02:56:11.762115,222,833,568, 7,2014-01-15 06:40:51.899146,469,136,490, 54,2014-01-15 07:06:33.278333,359,216,24, 7,2014-01-17 22:43:41.325117,681,52,723, 93,2014-01-12 11:54:21.575765,49,2,901, 54,2014-01-14 06:13:13.236685,591,522,491, 61,2014-01-20 14:00:22.568892,500,658,917, 14,2014-01-12 19:34:25.040757,826,898,450, 14,2014-01-18 02:11:00.981794,430,926,572, 54,2014-01-21 05:02:23.4012,712,348,758, 7,2014-01-15 16:31:27.144566,277,282,509, 54,2014-01-11 02:24:44.794404,923,55,178, 93,2014-01-12 21:05:48.962515,960,983,785, 4,2014-01-20 02:29:31.872546,821,484,197, 54,2014-01-12 16:58:36.5977,342,698,264, 14,2014-01-17 04:24:22.233493,578,308,612, 14,2014-01-19 03:05:04.47858,145,832,447, 61,2014-01-17 23:30:23.848389,910,116,21, 7,2014-01-14 20:32:31.20906,414,750,124, 61,2014-01-19 03:57:59.916077,452,879,466, 14,2014-01-13 02:41:44.119989,260,72,210, 93,2014-01-18 15:50:56.350474,67,157,749, 17,2014-01-18 10:17:57.464004,995,29,433, 17,2014-01-19 14:02:08.961732,359,928,12, 17,2014-01-17 08:42:57.549909,794,751,69, 51,2014-01-19 09:25:06.086195,144,796,502, 51,2014-01-12 14:52:48.095426,429,95,499, 51,2014-01-18 17:34:31.111324,920,547,836, 17,2014-01-16 09:34:04.140099,997,372,580, 17,2014-01-17 08:39:00.942614,357,578,15, 17,2014-01-13 12:47:03.040542,600,494,563, 51,2014-01-14 06:30:15.902214,32,630,851, 51,2014-01-11 09:19:52.841759,104,95,701, 17,2014-01-17 23:53:36.319295,112,622,106, 51,2014-01-10 23:13:57.227749,966,758,364, 51,2014-01-13 01:14:14.417524,442,640,580, 17,2014-01-11 00:06:49.555979,60,376,868, 51,2014-01-20 17:14:19.796641,352,403,79, 17,2014-01-15 00:48:39.458281,256,704,584, 17,2014-01-18 06:17:50.537773,456,416,825, 17,2014-01-20 16:16:43.19826,280,309,63, 17,2014-01-19 21:20:06.602847,881,16,537, 17,2014-01-18 12:42:35.332177,318,863,149, 17,2014-01-12 07:22:32.160774,366,690,595, 17,2014-01-18 17:49:38.475811,317,393,760, 17,2014-01-20 09:41:11.929876,403,254,190, 51,2014-01-18 15:46:54.394685,900,245,842, 17,2014-01-16 14:54:42.954392,368,130,298, 51,2014-01-17 03:11:35.778684,392,12,240, 51,2014-01-12 06:19:25.603746,850,847,142, 51,2014-01-14 12:52:42.87047,743,642,185, 17,2014-01-19 09:53:19.898673,599,424,614, 17,2014-01-20 02:10:21.95464,964,798,358, 51,2014-01-11 17:10:40.334893,875,720,103, 51,2014-01-17 17:55:28.860406,251,968,336, 17,2014-01-16 04:53:19.504549,132,953,272, 51,2014-01-20 06:35:46.421088,529,721,563, 51,2014-01-19 12:48:16.955831,718,408,338, 17,2014-01-13 16:27:50.615873,360,549,978, 17,2014-01-15 10:09:50.561186,974,362,503, 51,2014-01-15 15:27:17.898445,528,58,523, 17,2014-01-16 09:14:53.656415,129,800,220, 17,2014-01-18 20:40:06.46382,173,431,123, 51,2014-01-16 04:47:10.740205,46,201,477, 51,2014-01-13 03:08:29.975709,878,357,390, 17,2014-01-18 23:54:03.691569,481,587,222, 51,2014-01-18 10:01:25.157729,307,318,384, 17,2014-01-13 07:15:19.532107,747,766,915, 51,2014-01-18 11:08:23.488209,270,152,358, 51,2014-01-12 04:50:04.616429,729,592,939, 17,2014-01-20 17:33:10.06988,103,365,381, 51,2014-01-17 21:25:06.68647,418,511,791, 51,2014-01-10 20:10:11.219276,432,164,553, 17,2014-01-18 00:15:45.402476,688,534,809, 17,2014-01-19 08:47:38.847664,377,601,5, 17,2014-01-18 17:59:49.695087,828,740,907, 51,2014-01-17 03:56:57.332353,734,763,116, 17,2014-01-16 18:34:33.242349,54,943,132, 17,2014-01-14 02:54:32.649479,518,272,29, 51,2014-01-13 01:08:33.111037,363,727,501, 51,2014-01-18 04:53:58.846095,194,673,50, 17,2014-01-17 19:47:15.520368,729,370,587, 17,2014-01-11 05:01:53.00971,541,773,298, 17,2014-01-17 01:04:20.800734,802,543,557, 17,2014-01-18 16:57:55.85526,428,70,19, 51,2014-01-18 02:57:21.870535,917,92,909, 51,2014-01-11 23:57:40.305284,336,182,829, 17,2014-01-17 17:33:42.276348,417,39,160, 51,2014-01-16 09:45:38.826367,851,429,518, 51,2014-01-14 20:25:30.921157,825,167,668, 17,2014-01-11 21:43:32.837534,859,698,535, 17,2014-01-21 05:12:56.725231,478,352,990, 51,2014-01-20 09:40:24.577991,17,801,479, 51,2014-01-19 22:23:39.301354,799,569,346, 17,2014-01-16 04:00:07.465435,868,517,374, 51,2014-01-12 06:48:54.5537,755,800,592, 17,2014-01-17 16:17:42.992923,900,43,311, 51,2014-01-13 08:01:32.623584,360,564,8, 17,2014-01-14 18:04:14.085807,526,637,179, 51,2014-01-14 21:26:06.481552,809,451,319, 17,2014-01-14 16:51:37.240014,139,705,934, 51,2014-01-14 05:37:24.156107,661,729,215, 17,2014-01-11 12:51:13.168022,64,134,896, 17,2014-01-14 17:01:48.459289,177,360,536, 17,2014-01-10 23:53:09.558583,710,495,863, 51,2014-01-20 01:38:52.015685,208,154,308, 17,2014-01-12 05:01:38.154795,19,392,527, 17,2014-01-17 07:50:06.890935,129,355,714, 17,2014-01-15 14:13:25.258034,563,445,380, 17,2014-01-15 11:56:10.804274,784,916,711, 17,2014-01-19 12:58:40.002392,177,242,274, 51,2014-01-12 13:07:24.104548,5,170,667, 51,2014-01-12 01:43:26.324642,159,351,710, 51,2014-01-19 22:00:33.012102,708,777,444, 17,2014-01-18 18:11:44.905282,359,940,288, 17,2014-01-19 22:41:22.179901,191,844,795, 51,2014-01-16 18:57:54.882637,592,650,532, 17,2014-01-19 22:09:25.210985,920,279,382, 51,2014-01-16 10:15:04.456249,445,161,513, 51,2014-01-11 22:43:33.709423,685,538,649, 17,2014-01-13 12:34:56.132143,539,99,22, 17,2014-01-17 11:58:37.294202,700,72,624, 17,2014-01-11 21:56:30.434654,590,277,993, 51,2014-01-12 16:15:20.710133,184,814,680, 17,2014-01-16 04:22:16.595556,800,531,474, 17,2014-01-17 05:56:37.900091,996,871,403, 17,2014-01-14 03:04:15.264253,317,522,131, 51,2014-01-12 14:39:59.588899,788,203,460, 51,2014-01-19 17:58:10.523674,388,854,998, 17,2014-01-18 01:08:29.35006,693,126,631, 17,2014-01-16 16:06:06.070451,901,125,176, 17,2014-01-13 04:49:47.763688,220,660,700, 17,2014-01-11 00:45:53.506168,20,765,273, 17,2014-01-17 08:57:19.238473,700,525,651, 17,2014-01-17 01:51:36.223396,228,782,329, 51,2014-01-11 04:39:03.06475,408,304,660, 51,2014-01-16 04:36:11.254578,979,177,190, 17,2014-01-18 10:53:14.378191,515,255,428, 17,2014-01-17 16:29:09.956105,603,321,760, 51,2014-01-20 22:49:36.512612,376,804,122, 51,2014-01-12 16:49:25.182465,593,457,518, 17,2014-01-15 23:27:49.958497,535,416,636, 51,2014-01-12 05:57:00.61716,747,706,272, 17,2014-01-13 22:32:51.507525,942,848,267, 51,2014-01-14 15:28:22.970599,878,217,179, 17,2014-01-20 04:08:45.522862,979,848,662, 17,2014-01-12 15:14:13.687427,35,114,285, 51,2014-01-20 14:26:17.853656,139,89,994, 51,2014-01-18 20:18:10.733848,871,293,279, 51,2014-01-18 05:29:18.144095,914,508,22, 17,2014-01-11 07:09:51.563079,494,410,563, 51,2014-01-11 02:53:06.865991,451,849,173, 51,2014-01-14 11:27:55.438297,475,312,547, 17,2014-01-12 09:06:21.997733,330,43,27, 17,2014-01-12 23:08:27.576544,571,785,479, 51,2014-01-19 19:50:12.033854,282,25,432, 51,2014-01-18 19:02:59.897824,56,782,782, 17,2014-01-16 06:12:42.840796,670,814,762, 51,2014-01-11 04:30:11.622753,500,983,892, 51,2014-01-17 07:01:10.421918,914,888,905, 51,2014-01-13 01:21:12.190856,183,576,858, 17,2014-01-17 00:36:17.693204,248,508,216, 17,2014-01-19 15:50:58.185605,271,690,742, 17,2014-01-13 06:07:05.697443,611,244,46, 17,2014-01-13 03:33:36.932096,670,373,417, 17,2014-01-15 11:42:34.409001,624,270,774, 17,2014-01-13 14:46:08.762194,552,955,136, 17,2014-01-18 12:09:48.186674,342,748,483, 17,2014-01-12 16:35:48.787192,678,766,482, 17,2014-01-20 11:15:18.718299,350,976,23, 51,2014-01-18 04:59:24.699286,39,723,73, 51,2014-01-14 13:25:13.970076,129,915,666, 51,2014-01-15 04:43:08.676796,592,319,297, 51,2014-01-19 14:56:25.316866,858,137,24, 51,2014-01-17 15:58:05.477601,273,253,792, 17,2014-01-19 00:11:31.647815,112,777,785, 17,2014-01-18 13:05:10.839727,921,171,326, 51,2014-01-19 11:12:19.165028,286,170,646, 51,2014-01-18 08:37:49.50147,425,770,351, 17,2014-01-16 03:23:21.573575,629,118,141, 17,2014-01-16 10:41:37.309123,294,265,948, 17,2014-01-18 19:47:41.064549,345,863,720, 51,2014-01-16 10:16:28.439566,53,782,660, 51,2014-01-20 02:09:32.74742,826,169,217, 51,2014-01-20 08:54:03.062282,161,11,184, 17,2014-01-18 13:24:56.016109,682,341,232, 17,2014-01-18 15:59:44.781693,826,487,655, 17,2014-01-17 21:57:02.960106,618,744,472, 51,2014-01-13 13:37:38.856905,358,293,363, 51,2014-01-19 00:29:56.404445,656,184,944, 51,2014-01-13 22:58:13.382024,215,819,67, 17,2014-01-15 18:58:51.048181,923,557,907, 17,2014-01-14 19:06:14.098068,526,579,428, 51,2014-01-12 08:49:11.568048,659,790,500, 17,2014-01-18 05:05:56.745624,883,27,415, 51,2014-01-17 02:39:51.030165,509,934,660, 51,2014-01-17 00:31:45.977049,858,74,459, 17,2014-01-20 23:52:05.507818,141,913,282, 17,2014-01-14 08:49:39.216839,9,116,129, 51,2014-01-18 21:07:34.764659,992,106,410, 17,2014-01-20 05:07:24.226535,702,850,17, 51,2014-01-11 07:49:03.916125,576,79,714, 17,2014-01-12 04:32:48.734734,479,68,487, 51,2014-01-14 03:50:32.903331,337,336,664, 17,2014-01-20 02:45:29.232991,805,245,114, 17,2014-01-19 00:30:54.212335,459,949,49, 51,2014-01-11 22:02:04.551145,61,63,673, 17,2014-01-17 09:50:40.072718,82,348,497, 17,2014-01-17 05:43:13.377364,416,559,287, 17,2014-01-19 10:39:54.052615,657,49,855, 51,2014-01-12 07:14:01.646293,628,285,194, 51,2014-01-12 10:24:50.686905,373,977,395, 17,2014-01-17 00:27:35.117165,827,957,937, 51,2014-01-17 21:30:30.086278,546,888,888, 51,2014-01-11 06:34:23.434325,184,682,976, 17,2014-01-16 03:21:38.179447,860,764,641, 17,2014-01-15 04:55:26.102388,192,15,87, 51,2014-01-19 02:34:08.216018,222,171,814, 51,2014-01-12 19:18:41.139973,402,12,650, 17,2014-01-17 22:33:04.959712,299,789,350, 17,2014-01-16 21:04:04.620882,477,938,790, 51,2014-01-15 22:16:54.521997,258,648,688, 17,2014-01-12 11:31:56.007894,486,924,277, 51,2014-01-20 20:10:18.71895,156,619,351, 17,2014-01-17 11:06:06.090045,704,48,528, 17,2014-01-19 20:37:52.753518,535,185,765, 51,2014-01-16 16:50:09.749115,238,490,519, 51,2014-01-13 05:37:52.067513,936,193,185, 51,2014-01-19 14:29:58.261335,177,17,79, 51,2014-01-20 05:39:48.965953,60,188,342, 17,2014-01-10 20:45:26.832172,110,678,465, 17,2014-01-18 13:37:22.48787,186,125,396, 17,2014-01-20 17:28:52.882078,895,128,271, 51,2014-01-12 05:18:15.566907,813,998,510, 17,2014-01-11 11:27:55.39162,390,975,664, 51,2014-01-19 14:14:22.115487,77,184,367, 17,2014-01-20 09:49:09.779243,490,207,269, 51,2014-01-12 13:29:59.942765,396,529,32, 17,2014-01-15 18:05:02.188205,547,281,885, 16,2014-01-16 09:32:23.156606,906,631,563, 16,2014-01-21 04:09:53.995799,30,51,190, 16,2014-01-17 05:19:03.834918,117,930,210, 96,2014-01-17 23:57:13.843511,797,264,691, 96,2014-01-16 22:37:29.112964,602,355,401, 96,2014-01-13 20:49:33.921196,693,345,910, 96,2014-01-18 10:31:37.278256,425,810,887, 16,2014-01-11 19:59:07.292829,6,814,851, 96,2014-01-18 05:45:00.023583,750,51,45, 96,2014-01-16 07:05:45.494274,226,471,769, 16,2014-01-13 19:17:48.432802,443,484,343, 96,2014-01-14 22:18:04.983296,990,700,782, 96,2014-01-11 22:09:50.115155,809,116,562, 96,2014-01-18 21:34:42.954799,698,358,2, 16,2014-01-16 13:50:00.99119,883,6,676, 96,2014-01-11 12:20:08.834106,974,128,744, 96,2014-01-15 02:40:49.045262,24,18,456, 16,2014-01-15 04:27:53.744707,263,81,935, 16,2014-01-17 09:10:18.58322,347,389,769, 96,2014-01-17 12:18:41.112775,58,10,647, 96,2014-01-13 12:57:52.006461,5,811,534, 16,2014-01-16 08:50:07.549173,316,26,771, 96,2014-01-17 13:04:07.944947,606,237,684, 16,2014-01-10 20:35:14.494331,447,305,647, 96,2014-01-15 20:19:00.431671,505,401,23, 16,2014-01-18 22:22:23.511854,63,403,194, 16,2014-01-11 12:03:09.885951,353,608,671, 16,2014-01-14 04:33:22.547158,260,278,993, 16,2014-01-18 02:11:33.291097,273,516,110, 96,2014-01-13 05:33:09.829135,426,552,444, 96,2014-01-19 02:38:24.735364,219,102,937, 96,2014-01-13 05:43:56.447703,966,639,615, 16,2014-01-13 03:43:03.824934,642,528,920, 96,2014-01-15 01:57:28.570281,860,911,940, 96,2014-01-20 09:41:10.291633,892,705,130, 16,2014-01-19 06:20:32.937898,359,497,232, 16,2014-01-18 02:47:02.491477,971,788,42, 96,2014-01-17 14:12:47.56989,60,79,130, 16,2014-01-20 06:19:40.230727,644,966,46, 16,2014-01-15 02:32:02.515479,767,58,175, 96,2014-01-12 15:18:33.064164,904,137,459, 16,2014-01-12 19:37:28.663529,955,964,820, 96,2014-01-19 04:50:07.498775,961,825,845, 96,2014-01-13 17:28:23.179738,427,339,175, 16,2014-01-20 21:12:11.618747,587,752,689, 16,2014-01-14 12:40:08.490383,660,922,320, 96,2014-01-14 09:48:32.013844,414,310,297, 16,2014-01-14 17:53:00.664009,438,955,166, 16,2014-01-18 21:08:02.23509,469,202,861, 96,2014-01-20 22:58:50.597483,128,168,194, 96,2014-01-11 00:11:41.776784,364,949,884, 96,2014-01-11 04:05:54.241551,263,803,401, 96,2014-01-16 01:48:58.146656,585,526,385, 16,2014-01-17 17:15:49.721731,591,719,143, 96,2014-01-11 04:41:08.736301,605,343,423, 16,2014-01-21 02:07:58.578327,839,988,759, 16,2014-01-15 09:38:13.233585,35,958,178, 16,2014-01-11 20:44:18.622252,552,993,808, 16,2014-01-14 00:41:21.125484,961,851,962, 16,2014-01-12 05:49:46.524681,598,253,638, 16,2014-01-14 06:17:28.451388,774,173,818, 16,2014-01-11 21:19:45.861267,138,563,22, 96,2014-01-14 15:33:42.972384,805,553,549, 16,2014-01-16 14:00:32.276322,941,346,40, 96,2014-01-16 03:17:14.431548,794,847,860, 16,2014-01-13 19:14:53.264018,272,874,432, 96,2014-01-14 14:21:05.214639,133,642,593, 96,2014-01-13 00:04:16.923444,32,315,214, 16,2014-01-20 13:27:40.833907,170,884,642, 16,2014-01-13 14:40:45.445366,543,655,199, 96,2014-01-17 06:36:19.438923,419,303,651, 96,2014-01-11 22:46:13.89849,679,197,506, 16,2014-01-15 14:18:14.109314,893,193,532, 96,2014-01-15 05:26:26.937698,684,819,289, 16,2014-01-14 20:14:37.078227,774,646,649, 96,2014-01-15 05:30:25.72806,955,0,982, 96,2014-01-18 22:06:35.42808,447,413,287, 16,2014-01-18 10:03:09.092071,425,193,67, 16,2014-01-19 03:23:26.39207,577,535,17, 96,2014-01-16 13:14:37.66359,33,468,467, 96,2014-01-18 03:01:59.689554,880,870,256, 16,2014-01-19 07:35:08.168854,786,506,388, 96,2014-01-16 21:20:31.90514,408,18,977, 16,2014-01-12 22:50:57.836629,836,135,964, 16,2014-01-15 18:50:57.890585,550,176,46, 16,2014-01-17 06:01:40.641441,411,856,411, 96,2014-01-12 18:58:56.414956,60,809,884, 16,2014-01-20 08:29:11.124171,982,902,120, 16,2014-01-18 06:45:59.264112,531,433,355, 96,2014-01-15 23:40:17.54086,989,914,254, 96,2014-01-11 08:18:57.648852,109,187,466, 96,2014-01-11 07:03:27.7155,80,663,325, 16,2014-01-17 01:00:03.402127,982,980,350, 16,2014-01-15 03:52:40.621235,803,270,844, 16,2014-01-17 01:03:59.992241,279,316,416, 96,2014-01-11 22:17:17.833675,635,858,360, 16,2014-01-18 03:07:33.885673,317,388,563, 96,2014-01-20 19:25:05.206879,789,582,332, 16,2014-01-14 02:21:34.757119,401,633,220, 96,2014-01-17 10:35:14.71958,3,538,141, 96,2014-01-13 04:05:50.652246,658,29,820, 16,2014-01-20 12:57:54.196042,660,917,375, 96,2014-01-18 13:21:28.618069,591,623,446, 16,2014-01-17 22:24:04.761559,120,305,458, 16,2014-01-14 12:24:21.134158,502,165,321, 16,2014-01-12 03:36:05.696297,235,281,131, 96,2014-01-11 21:54:30.48962,998,824,424, 96,2014-01-12 04:30:56.56224,902,160,198, 16,2014-01-19 17:39:14.788786,934,782,233, 16,2014-01-20 05:17:56.88169,315,362,112, 96,2014-01-17 21:45:34.225828,721,817,115, 16,2014-01-16 14:41:14.478341,535,884,974, 16,2014-01-18 06:53:05.050544,685,836,769, 96,2014-01-13 13:06:06.131388,966,104,720, 96,2014-01-18 17:32:12.31497,406,582,821, 96,2014-01-12 19:44:02.941129,963,926,589, 96,2014-01-19 23:07:46.772828,530,645,536, 16,2014-01-20 16:31:08.730346,767,267,944, 16,2014-01-11 22:13:14.0653,438,387,790, 16,2014-01-16 23:53:46.036941,281,625,975, 16,2014-01-15 10:11:26.271205,118,766,653, 16,2014-01-12 10:32:11.714152,558,666,391, 96,2014-01-17 10:57:13.752859,100,35,113, 16,2014-01-11 05:11:29.673333,461,401,327, 96,2014-01-16 18:24:52.335807,637,638,323, 96,2014-01-13 06:01:13.745101,54,799,721, 16,2014-01-12 07:28:47.507008,470,865,366, 96,2014-01-13 15:32:26.221479,876,571,987, 16,2014-01-12 19:26:18.95198,121,859,876, 96,2014-01-15 13:50:22.264547,528,198,375, 96,2014-01-20 06:07:40.941059,773,96,449, 16,2014-01-15 03:32:09.604226,337,398,456, 16,2014-01-14 20:48:16.460589,284,79,570, 16,2014-01-17 13:29:09.559547,368,860,982, 16,2014-01-11 19:56:14.366204,758,858,851, 96,2014-01-18 13:12:37.594748,996,580,166, 16,2014-01-18 21:05:15.255844,261,559,255, 16,2014-01-12 21:50:44.855824,561,265,790, 96,2014-01-19 21:43:34.157406,195,930,853, 16,2014-01-17 08:44:30.04463,700,418,516, 96,2014-01-11 21:08:41.737933,458,227,312, 16,2014-01-16 13:29:08.383234,664,615,129, 16,2014-01-12 17:25:44.52339,940,860,806, 96,2014-01-19 08:01:46.788477,389,636,801, 96,2014-01-19 06:35:14.514622,992,104,659, 96,2014-01-20 14:57:56.83836,110,311,199, 96,2014-01-10 21:45:49.730026,456,482,595, 96,2014-01-17 23:43:01.28787,368,932,212, 96,2014-01-20 01:29:05.568705,368,81,383, 96,2014-01-11 23:59:03.795326,509,901,825, 96,2014-01-13 17:36:47.32481,512,826,930, 96,2014-01-14 05:40:31.83991,718,579,634, 16,2014-01-13 14:31:15.509897,974,146,982, 96,2014-01-20 08:34:01.077671,541,195,161, 96,2014-01-14 14:52:01.513244,142,730,281, 16,2014-01-19 12:56:07.845704,189,965,116, 16,2014-01-12 08:35:14.822771,319,747,538, 16,2014-01-16 02:20:49.020671,409,449,566, 96,2014-01-11 22:28:34.067183,795,535,921, 16,2014-01-14 08:01:33.774751,825,772,654, 16,2014-01-20 20:11:11.285218,984,401,931, 96,2014-01-10 22:36:15.008243,910,657,375, 96,2014-01-18 15:33:43.379396,945,700,67, 16,2014-01-14 10:59:27.745806,556,217,512, 96,2014-01-17 16:05:24.56779,415,131,5, 96,2014-01-19 15:29:57.7456,446,985,923, 96,2014-01-11 18:12:05.340973,747,955,358, 16,2014-01-15 07:10:39.824052,339,320,790, 96,2014-01-11 07:20:42.601424,204,201,367, 96,2014-01-20 19:55:39.498379,611,89,740, 16,2014-01-11 09:55:09.868683,437,72,683, 16,2014-01-12 08:29:24.339357,850,937,533, 96,2014-01-16 03:24:47.881614,827,155,958, 16,2014-01-13 07:20:54.392073,297,111,10, 96,2014-01-20 20:31:11.128253,537,445,18, 16,2014-01-14 04:00:02.396654,121,428,534, 16,2014-01-12 16:18:51.230852,427,377,81, 96,2014-01-20 22:17:00.858279,767,288,40, 96,2014-01-10 21:43:03.684524,32,334,579, 96,2014-01-11 11:47:56.799557,108,520,546, 16,2014-01-11 16:16:04.653605,233,740,974, 96,2014-01-13 19:19:51.009334,756,238,583, 96,2014-01-14 21:28:28.639887,265,464,171, 96,2014-01-14 10:47:20.163502,693,67,832, 96,2014-01-12 21:53:52.087424,74,185,913, 16,2014-01-18 16:20:30.153131,588,177,644, 16,2014-01-12 17:43:28.009206,391,885,525, 96,2014-01-14 10:29:06.910196,715,38,981, 16,2014-01-13 12:41:19.173802,339,179,453, 96,2014-01-13 20:12:02.07639,97,340,391, 16,2014-01-17 22:30:40.685366,661,189,234, 16,2014-01-13 02:52:30.45902,964,111,722, 16,2014-01-13 22:48:17.08505,386,37,561, 96,2014-01-15 08:04:24.064761,445,407,640, 96,2014-01-16 17:51:58.205245,891,406,160, 96,2014-01-20 18:53:41.65284,944,566,128, 16,2014-01-13 17:34:21.810361,608,578,847, 16,2014-01-17 16:04:03.546218,130,215,177, 16,2014-01-14 20:04:21.476893,120,627,770, 16,2014-01-14 04:55:04.411785,795,258,817, 16,2014-01-17 05:59:43.044597,337,628,219, 96,2014-01-15 09:59:31.345995,410,418,843, 96,2014-01-15 17:24:28.751562,807,895,565, 16,2014-01-12 03:24:30.92621,533,621,912, 96,2014-01-17 21:20:25.738068,612,170,681, 96,2014-01-15 07:55:39.879816,209,90,28, 16,2014-01-15 11:24:33.322865,40,214,534, 16,2014-01-19 17:39:16.96892,411,118,314, 96,2014-01-15 00:12:40.738095,111,669,825, 96,2014-01-15 13:07:37.007389,114,987,3, 16,2014-01-20 09:27:13.768896,931,548,30, 16,2014-01-15 20:28:45.391701,2,19,544, 96,2014-01-18 12:27:28.017143,641,841,2, 16,2014-01-14 00:55:42.408783,178,369,877, 16,2014-01-19 11:16:05.555623,183,446,338, 96,2014-01-20 14:21:20.104567,395,911,391, 16,2014-01-11 11:16:12.561915,650,853,483, 16,2014-01-10 22:59:33.564828,900,737,24, 85,2014-01-13 18:50:27.014762,672,985,281, 85,2014-01-14 03:57:31.736135,686,519,14, 98,2014-01-13 23:11:35.641636,532,388,701, 85,2014-01-20 21:21:07.700128,344,470,407, 98,2014-01-16 10:50:02.195155,708,23,279, 85,2014-01-17 01:59:52.726687,839,565,582, 85,2014-01-14 23:25:31.76489,253,13,986, 98,2014-01-11 22:42:00.400401,21,371,987, 98,2014-01-16 14:53:34.379527,726,700,408, 85,2014-01-17 20:59:53.575251,660,173,356, 85,2014-01-18 18:46:03.946619,754,346,218, 85,2014-01-20 14:57:55.85684,991,913,368, 98,2014-01-21 05:54:57.987456,23,873,402, 85,2014-01-14 18:45:46.991216,179,532,254, 85,2014-01-14 18:57:27.202833,971,700,199, 98,2014-01-15 17:19:26.739018,726,77,214, 85,2014-01-16 02:10:17.917845,361,489,80, 98,2014-01-11 10:17:52.940901,601,207,644, 98,2014-01-20 05:15:06.618833,35,943,148, 98,2014-01-20 17:34:51.240709,663,73,11, 98,2014-01-20 07:57:09.91024,116,413,927, 85,2014-01-13 23:27:47.356929,423,952,206, 85,2014-01-15 00:42:28.248517,30,571,9, 98,2014-01-19 11:24:23.679136,574,537,217, 98,2014-01-18 23:56:32.748629,744,998,641, 85,2014-01-12 07:09:56.265658,995,959,371, 85,2014-01-12 06:20:06.08792,628,918,758, 85,2014-01-17 05:12:38.304252,155,172,551, 85,2014-01-11 15:31:16.370225,9,632,116, 85,2014-01-12 21:36:18.649833,125,483,874, 85,2014-01-17 08:12:11.8695,468,263,195, 85,2014-01-14 14:21:43.384988,409,860,73, 98,2014-01-16 05:33:50.38597,194,82,975, 98,2014-01-20 11:23:47.511136,869,1000,138, 85,2014-01-14 05:42:51.085534,189,506,257, 98,2014-01-11 10:23:52.581545,546,295,133, 98,2014-01-16 07:23:40.237824,564,316,431, 98,2014-01-18 09:08:22.850424,939,392,653, 98,2014-01-12 13:05:52.981945,22,12,549, 85,2014-01-11 16:17:14.617351,340,553,457, 85,2014-01-15 00:08:16.426095,437,730,671, 98,2014-01-20 11:51:56.928983,72,304,620, 85,2014-01-11 01:15:10.474191,652,229,277, 98,2014-01-15 00:03:14.41355,652,340,566, 98,2014-01-14 00:37:43.920199,676,561,315, 98,2014-01-15 00:12:37.677024,387,878,698, 98,2014-01-19 21:22:41.152568,124,831,909, 85,2014-01-19 06:48:01.838044,352,223,252, 85,2014-01-15 14:30:30.618345,846,987,514, 98,2014-01-18 20:37:47.771402,920,51,780, 98,2014-01-18 18:22:53.079172,324,604,113, 98,2014-01-14 16:27:40.528584,122,125,642, 85,2014-01-11 14:05:35.12833,593,776,735, 98,2014-01-12 13:05:21.327688,49,811,833, 85,2014-01-12 21:52:04.207721,754,881,261, 85,2014-01-19 18:02:07.87696,486,124,621, 98,2014-01-14 00:15:17.593347,791,298,512, 85,2014-01-14 08:12:10.29564,590,207,745, 85,2014-01-15 17:14:46.181631,432,51,710, 85,2014-01-14 19:46:33.963572,688,867,255, 85,2014-01-16 09:48:28.945474,758,329,118, 98,2014-01-11 19:26:58.051131,432,951,800, 85,2014-01-18 14:08:17.348559,706,36,453, 85,2014-01-11 09:22:19.331862,260,152,735, 85,2014-01-11 00:50:45.562268,354,943,232, 98,2014-01-11 13:51:08.434094,168,750,796, 85,2014-01-11 23:46:11.913406,28,626,949, 85,2014-01-16 12:14:25.800092,546,342,748, 85,2014-01-19 02:59:31.284937,534,653,960, 98,2014-01-13 16:52:04.895352,503,815,712, 98,2014-01-17 08:31:40.417443,333,368,437, 98,2014-01-12 21:07:47.711032,672,608,866, 98,2014-01-12 22:44:01.824334,32,149,27, 85,2014-01-17 13:46:50.892052,454,453,620, 98,2014-01-17 01:11:02.124582,2,691,450, 85,2014-01-16 03:21:45.744953,625,249,313, 85,2014-01-11 07:59:28.569077,176,800,584, 85,2014-01-15 16:33:43.27715,296,463,781, 98,2014-01-14 04:09:47.582996,462,43,462, 85,2014-01-16 02:29:59.187422,795,189,514, 85,2014-01-13 07:11:31.048552,11,752,680, 98,2014-01-11 16:32:40.662168,39,971,63, 85,2014-01-19 22:57:39.716006,417,958,318, 98,2014-01-14 01:17:06.176882,450,524,888, 85,2014-01-13 09:38:01.989855,447,230,402, 85,2014-01-11 14:49:43.923726,140,421,25, 85,2014-01-12 13:19:14.053843,433,396,534, 85,2014-01-16 13:53:19.583202,77,714,15, 98,2014-01-15 03:01:54.219367,454,258,894, 85,2014-01-17 10:34:00.235474,263,391,720, 85,2014-01-20 13:39:53.546774,570,246,17, 98,2014-01-20 16:50:23.165259,735,909,647, 98,2014-01-18 10:00:58.286604,156,87,630, 98,2014-01-17 21:48:10.895753,419,435,674, 98,2014-01-10 20:12:42.497122,295,783,530, 85,2014-01-18 14:51:43.848872,825,96,990, 85,2014-01-18 15:39:19.329847,282,708,228, 85,2014-01-11 23:58:54.410528,807,399,245, 85,2014-01-13 21:06:09.648964,762,6,343, 85,2014-01-16 12:38:50.614784,168,668,878, 85,2014-01-14 20:50:59.30588,566,18,838, 85,2014-01-20 09:37:50.066826,384,759,72, 85,2014-01-18 13:46:38.325817,218,53,711, 98,2014-01-16 23:35:01.130633,656,755,840, 85,2014-01-16 17:24:40.958878,41,332,652, 98,2014-01-14 08:57:40.450398,543,652,427, 98,2014-01-11 20:56:46.875586,985,623,126, 98,2014-01-17 05:24:09.527955,257,779,583, 98,2014-01-19 05:31:23.727548,617,189,900, 85,2014-01-15 05:06:34.458582,583,698,435, 85,2014-01-12 01:54:08.715796,676,949,995, 98,2014-01-11 06:42:54.7761,974,12,468, 98,2014-01-16 01:39:15.121169,446,599,805, 85,2014-01-21 04:51:48.431802,622,184,683, 98,2014-01-14 12:00:00.952982,230,568,181, 85,2014-01-18 15:17:17.111025,310,634,476, 85,2014-01-11 13:41:32.355948,547,136,11, 98,2014-01-16 05:19:15.007244,338,736,824, 98,2014-01-13 23:10:36.694646,623,878,8, 85,2014-01-15 20:43:26.575314,659,922,459, 98,2014-01-12 09:53:15.242717,329,373,381, 98,2014-01-13 06:50:30.24142,207,3,309, 98,2014-01-15 07:33:49.740573,492,90,777, 85,2014-01-19 23:54:13.529741,218,512,126, 85,2014-01-20 08:38:41.137172,164,311,557, 98,2014-01-15 07:46:32.237695,877,374,832, 85,2014-01-17 08:45:57.378614,437,847,326, 98,2014-01-17 18:18:00.467438,859,480,447, 85,2014-01-16 11:45:26.648223,262,816,426, 98,2014-01-20 09:52:07.027996,935,677,858, 85,2014-01-13 00:56:51.082222,858,655,894, 85,2014-01-20 12:36:25.954522,596,855,310, 85,2014-01-19 13:29:57.094822,92,786,693, 85,2014-01-20 18:43:29.408457,520,527,980, 98,2014-01-16 06:11:27.085155,525,970,527, 85,2014-01-15 00:54:38.053701,946,101,385, 98,2014-01-13 21:41:09.858856,360,37,598, 98,2014-01-17 07:08:13.960742,0,428,127, 85,2014-01-11 00:18:47.581656,987,349,430, 85,2014-01-11 21:12:33.586823,141,880,203, 85,2014-01-11 06:14:48.419325,981,184,308, 85,2014-01-12 06:12:56.297452,214,993,704, 85,2014-01-12 07:55:28.362923,264,33,204, 98,2014-01-16 11:54:03.540494,568,507,299, 85,2014-01-12 05:04:44.729673,410,554,350, 98,2014-01-15 23:55:29.316324,529,56,223, 85,2014-01-13 21:11:20.651937,165,601,172, 98,2014-01-12 22:46:17.085621,847,495,66, 98,2014-01-10 23:14:44.323568,2,369,577, 85,2014-01-17 00:21:57.346583,958,205,672, 98,2014-01-17 23:29:43.660934,467,198,714, 85,2014-01-12 13:07:59.566285,404,220,142, 85,2014-01-19 11:12:27.588002,660,805,876, 98,2014-01-12 01:03:33.401927,35,97,594, 85,2014-01-11 07:02:13.096026,660,426,435, 98,2014-01-18 13:51:08.725594,150,72,788, 98,2014-01-16 12:50:05.639622,992,490,459, 98,2014-01-17 19:48:10.475058,114,592,516, 98,2014-01-15 02:09:09.193031,328,657,780, 98,2014-01-11 18:35:32.288264,997,249,967, 85,2014-01-16 23:40:17.503054,562,470,325, 85,2014-01-17 07:06:00.275254,440,689,324, 85,2014-01-11 01:11:58.242785,460,163,309, 98,2014-01-15 07:10:14.597877,401,577,561, 85,2014-01-16 19:49:29.683711,890,407,583, 98,2014-01-16 11:23:25.327941,788,865,918, 98,2014-01-19 12:04:52.651578,221,67,660, 85,2014-01-19 21:30:39.542986,105,518,464, 98,2014-01-12 12:31:39.288682,400,405,334, 85,2014-01-19 16:23:40.233234,533,479,244, 85,2014-01-20 22:43:13.129809,205,386,541, 85,2014-01-12 22:46:27.708007,142,964,601, 98,2014-01-21 02:36:36.531105,488,422,644, 85,2014-01-12 00:38:41.492732,668,496,111, 98,2014-01-18 14:40:31.24892,483,590,16, 98,2014-01-12 01:41:21.260779,45,355,991, 98,2014-01-17 04:34:10.809056,407,238,764, 85,2014-01-11 05:51:51.900857,794,336,386, 98,2014-01-14 04:27:38.3464,692,922,964, 98,2014-01-17 07:48:55.132625,617,861,629, 85,2014-01-17 10:13:49.24744,62,515,981, 98,2014-01-10 21:57:22.007334,836,517,603, 98,2014-01-19 00:56:54.699329,727,414,187, 85,2014-01-15 15:26:16.835443,32,770,746, 98,2014-01-12 03:00:55.409261,906,648,399, 85,2014-01-19 11:59:07.795354,891,723,617, 85,2014-01-12 23:17:25.561035,931,194,771, 98,2014-01-17 19:51:01.049302,699,840,879, 98,2014-01-16 01:47:18.270413,390,349,847, 85,2014-01-17 05:26:34.754067,355,27,512, 84,2014-01-18 18:26:33.337566,125,351,626, 84,2014-01-11 19:27:35.773468,250,557,722, 36,2014-01-13 06:32:35.02974,644,223,381, 36,2014-01-18 23:38:31.580351,698,548,552, 36,2014-01-16 06:37:50.371344,331,72,970, 84,2014-01-19 06:22:04.713451,220,436,878, 36,2014-01-14 05:01:56.908291,618,405,330, 84,2014-01-14 12:42:43.022922,761,209,990, 84,2014-01-17 21:52:44.256437,83,638,537, 36,2014-01-15 21:33:36.196973,417,310,967, 36,2014-01-12 23:06:23.256575,806,94,687, 84,2014-01-17 14:35:57.386246,707,841,772, 89,2014-01-18 00:20:03.90498,256,816,117, 89,2014-01-12 19:42:59.78768,117,798,874, 89,2014-01-18 19:14:38.879398,238,30,550, 84,2014-01-15 09:00:35.1539,360,462,590, 84,2014-01-14 01:24:21.048459,804,241,509, 89,2014-01-14 17:48:49.688454,171,920,237, 36,2014-01-15 18:52:27.054757,96,582,607, 84,2014-01-17 09:51:59.394858,422,885,943, 89,2014-01-21 05:37:44.821498,163,776,13, 89,2014-01-11 23:06:16.302197,93,101,230, 89,2014-01-17 11:49:21.402193,527,714,589, 89,2014-01-19 00:34:39.520827,965,845,412, 84,2014-01-16 18:32:33.138058,854,680,832, 36,2014-01-18 18:50:16.811874,282,327,443, 84,2014-01-17 06:33:47.316181,221,756,210, 36,2014-01-18 21:49:58.699094,115,500,936, 89,2014-01-15 08:41:17.861176,561,205,591, 84,2014-01-12 02:21:05.587014,159,221,609, 84,2014-01-14 21:16:33.45358,888,621,996, 36,2014-01-12 21:07:51.198742,308,969,700, 36,2014-01-13 01:48:41.36048,774,13,716, 36,2014-01-17 07:49:08.48332,325,755,457, 36,2014-01-21 00:46:22.779093,296,81,919, 36,2014-01-18 12:26:31.732244,137,689,771, 84,2014-01-15 08:11:13.196771,730,541,403, 36,2014-01-13 23:48:19.687383,515,700,473, 36,2014-01-11 19:09:14.755165,870,232,788, 84,2014-01-12 00:03:57.453627,948,884,188, 84,2014-01-19 01:21:55.884356,289,473,510, 36,2014-01-13 22:15:38.01174,469,343,818, 84,2014-01-18 18:39:54.839874,265,761,13, 84,2014-01-15 19:41:59.789336,702,273,249, 89,2014-01-15 21:58:37.79942,813,626,457, 89,2014-01-16 07:54:33.719271,362,621,839, 89,2014-01-20 08:42:34.943655,11,235,23, 89,2014-01-19 03:22:58.847879,337,598,270, 84,2014-01-20 05:43:23.408145,894,234,817, 89,2014-01-14 21:35:01.998412,500,749,326, 36,2014-01-15 07:14:58.242737,336,97,944, 84,2014-01-20 05:21:08.229643,753,108,901, 36,2014-01-16 00:41:18.301028,792,716,743, 89,2014-01-11 13:04:19.64535,805,4,679, 84,2014-01-17 23:55:47.75047,873,511,294, 36,2014-01-11 13:13:51.439086,116,476,203, 84,2014-01-19 11:54:36.457223,761,226,760, 84,2014-01-14 00:29:35.066651,688,51,159, 36,2014-01-19 15:03:50.13818,720,750,601, 89,2014-01-13 14:35:54.318399,870,349,543, 84,2014-01-15 06:50:40.653665,895,439,173, 36,2014-01-13 06:20:23.59176,862,399,485, 84,2014-01-15 15:43:45.517141,94,768,333, 84,2014-01-17 12:39:22.014564,645,817,968, 84,2014-01-19 18:09:32.07508,92,731,679, 89,2014-01-15 10:30:08.296233,10,399,35, 36,2014-01-14 19:05:53.746807,900,919,881, 36,2014-01-13 20:20:45.272271,905,14,789, 84,2014-01-18 14:18:27.983617,516,602,618, 36,2014-01-15 18:15:08.501972,861,120,353, 84,2014-01-15 00:24:42.725898,230,776,850, 89,2014-01-16 09:40:23.867973,852,650,89, 36,2014-01-18 20:30:46.514131,986,274,238, 84,2014-01-12 13:04:37.565771,595,643,744, 84,2014-01-10 23:22:23.65731,981,665,968, 84,2014-01-13 12:29:24.313551,207,142,375, 84,2014-01-18 00:59:11.285461,301,845,723, 89,2014-01-20 12:04:58.600966,512,714,836, 84,2014-01-11 09:52:23.161849,804,126,530, 89,2014-01-17 00:42:34.693606,806,660,917, 84,2014-01-14 03:40:00.599797,308,902,178, 89,2014-01-15 21:07:21.404586,470,606,697, 36,2014-01-16 00:03:42.923249,427,614,233, 84,2014-01-19 08:21:18.900825,825,336,776, 36,2014-01-16 14:11:41.049935,937,942,533, 89,2014-01-12 17:59:30.673719,891,826,800, 89,2014-01-20 01:35:10.339911,925,641,892, 36,2014-01-14 20:06:17.507159,130,179,753, 84,2014-01-15 22:29:05.74037,956,320,256, 36,2014-01-18 10:39:00.478092,586,187,56, 36,2014-01-17 14:42:11.825558,359,280,493, 89,2014-01-20 09:19:46.394453,110,291,671, 89,2014-01-20 20:59:24.070271,961,496,328, 89,2014-01-12 00:25:57.3427,7,600,475, 36,2014-01-16 15:59:08.409017,343,892,365, 89,2014-01-19 09:08:56.145352,936,666,958, 89,2014-01-16 14:56:05.638933,188,217,902, 89,2014-01-20 15:05:02.155824,69,466,277, 89,2014-01-11 23:29:41.417623,302,0,754, 84,2014-01-13 23:14:33.62255,577,998,608, 89,2014-01-15 03:20:10.657797,251,631,728, 36,2014-01-16 03:54:24.14352,951,967,333, 36,2014-01-19 12:54:57.490523,116,658,511, 36,2014-01-12 17:50:57.171928,498,369,972, 36,2014-01-17 20:59:01.70971,132,524,668, 84,2014-01-19 16:17:21.148252,125,718,951, 84,2014-01-15 10:20:21.485898,133,899,827, 84,2014-01-14 15:58:12.995172,852,908,181, 36,2014-01-18 22:22:19.749218,980,811,38, 89,2014-01-16 00:12:44.647747,788,63,740, 36,2014-01-20 20:40:47.688779,419,853,998, 89,2014-01-11 20:02:20.349015,270,624,818, 84,2014-01-21 01:20:06.052333,669,31,489, 89,2014-01-15 14:44:30.612028,456,799,355, 89,2014-01-20 08:23:39.249839,95,228,92, 36,2014-01-16 09:31:47.102689,877,889,45, 36,2014-01-17 12:44:01.285747,808,547,150, 89,2014-01-19 03:58:49.58975,382,65,310, 89,2014-01-20 09:38:04.609847,557,525,594, 36,2014-01-12 05:13:07.026118,980,606,817, 84,2014-01-16 08:37:50.068261,722,838,181, 36,2014-01-16 18:20:16.435825,80,776,101, 36,2014-01-11 08:32:53.420571,465,968,67, 89,2014-01-15 23:37:14.138532,216,178,455, 84,2014-01-17 22:46:13.778524,737,52,474, 84,2014-01-17 04:32:01.829588,607,372,653, 84,2014-01-14 02:46:10.283884,361,343,850, 84,2014-01-13 07:42:19.417876,787,861,964, 89,2014-01-16 13:37:03.985412,356,505,661, 36,2014-01-15 06:15:51.701926,643,312,58, 89,2014-01-16 10:56:53.040426,452,957,931, 89,2014-01-20 20:57:14.643628,199,158,614, 84,2014-01-20 14:10:15.845446,145,501,528, 84,2014-01-14 17:51:50.531368,1,252,423, 36,2014-01-12 08:48:11.815556,163,165,592, 89,2014-01-17 05:09:17.555156,58,286,977, 36,2014-01-13 04:09:11.67962,114,129,764, 84,2014-01-16 23:08:33.301454,617,254,73, 89,2014-01-21 01:07:30.550329,668,945,404, 89,2014-01-10 20:31:31.428838,206,313,89, 89,2014-01-11 17:21:17.949201,483,988,640, 89,2014-01-20 15:48:18.239107,864,249,694, 36,2014-01-11 20:33:51.778271,397,628,807, 84,2014-01-11 12:41:24.001953,800,381,284, 36,2014-01-15 00:32:48.851136,836,877,42, 36,2014-01-10 22:57:31.028109,926,208,608, 84,2014-01-17 02:13:11.104641,352,452,498, 84,2014-01-11 07:16:50.136883,695,523,754, 89,2014-01-19 06:56:20.618279,120,868,381, 89,2014-01-16 05:51:15.714488,924,162,712, 89,2014-01-12 16:29:57.16342,616,192,820, 89,2014-01-14 09:34:10.68654,317,375,60, 36,2014-01-11 18:11:32.150313,822,583,910, 84,2014-01-13 05:02:50.583991,242,184,172, 89,2014-01-19 13:11:24.825073,416,766,367, 84,2014-01-18 20:57:45.929256,13,419,683, 84,2014-01-19 13:34:52.413998,931,410,414, 84,2014-01-12 09:57:35.108956,924,243,216, 89,2014-01-10 22:40:05.347132,301,749,481, 89,2014-01-14 21:11:56.39941,542,353,854, 36,2014-01-16 20:13:26.810882,797,134,765, 36,2014-01-16 13:36:58.387558,256,846,216, 84,2014-01-14 12:09:11.043039,724,513,109, 36,2014-01-16 04:23:42.656747,43,836,256, 89,2014-01-20 11:28:48.918926,750,634,674, 84,2014-01-16 00:57:22.859013,402,773,285, 89,2014-01-12 03:33:00.211904,775,252,740, 36,2014-01-12 09:38:00.598547,256,14,158, 84,2014-01-11 18:05:56.160467,166,557,300, 89,2014-01-11 22:40:30.762652,395,731,416, 36,2014-01-12 10:09:32.027803,291,909,98, 89,2014-01-12 15:27:14.110086,564,359,673, 36,2014-01-11 08:28:49.001759,307,474,355, 36,2014-01-13 10:43:23.806074,35,991,271, 84,2014-01-13 08:08:38.11204,689,247,234, 36,2014-01-15 13:01:37.853315,451,546,684, 89,2014-01-13 13:40:54.834183,140,165,670, 89,2014-01-19 14:21:49.21668,701,41,55, 36,2014-01-16 00:18:27.990198,843,701,217, 36,2014-01-11 14:37:15.452463,562,146,364, 84,2014-01-14 14:13:04.931168,791,28,973, 89,2014-01-17 20:48:25.153618,872,834,801, 36,2014-01-15 04:11:26.139003,908,709,518, 36,2014-01-15 12:24:37.0819,450,113,322, 84,2014-01-20 05:51:15.738028,623,96,284, 84,2014-01-13 11:22:50.964075,861,797,601, 84,2014-01-13 03:22:23.011155,409,701,824, 84,2014-01-18 13:26:08.152025,381,438,152, 84,2014-01-15 01:20:26.073451,397,965,74, 36,2014-01-13 06:02:28.358289,858,536,332, 36,2014-01-12 04:38:04.551435,790,511,57, 36,2014-01-21 01:33:52.884333,97,748,821, 36,2014-01-18 23:39:26.745846,654,797,921, 89,2014-01-15 20:47:15.594893,837,638,312, 84,2014-01-15 23:57:35.54108,832,350,307, 84,2014-01-18 05:08:15.665192,70,651,41, 84,2014-01-21 01:44:38.453905,837,595,482, 36,2014-01-17 07:30:35.753403,375,28,291, 89,2014-01-19 18:46:16.263739,103,93,514, 89,2014-01-11 13:50:34.614372,968,708,749, 36,2014-01-18 10:11:06.516055,833,431,227, 36,2014-01-10 22:55:48.291542,858,96,94, 84,2014-01-13 09:17:48.724458,730,888,434, 84,2014-01-18 22:39:55.517816,173,408,614, 36,2014-01-13 13:39:12.097616,992,262,200, 36,2014-01-15 21:26:26.836498,232,919,591, 89,2014-01-13 05:41:33.37113,58,952,36, 36,2014-01-16 07:20:06.932219,510,802,864, 36,2014-01-14 05:48:16.053178,70,506,718, 84,2014-01-18 10:00:01.361748,153,976,165, 84,2014-01-17 01:57:22.384681,288,428,193, 84,2014-01-18 00:01:20.984765,444,316,882, 84,2014-01-15 00:48:26.515366,291,53,686, 84,2014-01-11 00:08:48.524103,865,531,88, 36,2014-01-12 06:25:58.066665,650,663,380, 36,2014-01-14 00:39:42.253393,591,70,17, 36,2014-01-13 15:31:39.488178,406,988,622, 89,2014-01-14 13:48:21.077821,0,215,213, 36,2014-01-11 08:05:50.405419,439,814,256, 89,2014-01-17 20:52:05.561629,762,83,925, 36,2014-01-16 23:50:49.436108,481,164,216, 36,2014-01-12 16:43:54.957273,471,170,337, 89,2014-01-17 16:25:58.446381,609,119,555, 89,2014-01-14 17:30:16.181955,420,831,99, 36,2014-01-17 17:31:10.552166,927,225,250, 84,2014-01-12 10:23:33.987462,435,540,806, 89,2014-01-11 16:38:31.847147,351,343,60, 36,2014-01-17 13:15:49.006071,510,894,640, 84,2014-01-18 21:54:09.740866,380,84,756, 89,2014-01-20 15:24:48.111304,615,267,844, 36,2014-01-18 07:06:23.620443,589,968,564, 36,2014-01-16 02:05:16.256922,146,346,451, 89,2014-01-20 18:20:36.402846,486,610,912, 36,2014-01-20 20:24:12.345321,492,31,837, 84,2014-01-13 18:45:11.775156,738,2,749, 84,2014-01-13 01:59:48.50088,1,187,175, 84,2014-01-15 11:50:39.181819,774,360,709, 36,2014-01-16 04:26:45.146285,493,540,321, 89,2014-01-18 13:19:55.433099,323,975,942, 84,2014-01-18 21:38:55.235416,452,2,652, 89,2014-01-13 08:26:46.508033,215,85,930, 84,2014-01-14 09:17:17.81778,568,949,381, 36,2014-01-15 15:40:16.22018,858,976,966, 36,2014-01-17 13:15:13.023818,571,183,881, 84,2014-01-14 13:26:06.341883,987,474,184, 89,2014-01-17 02:06:14.286846,814,563,319, 36,2014-01-20 17:54:55.277211,320,721,623, 84,2014-01-17 08:57:45.83048,68,278,466, 84,2014-01-20 19:54:35.364666,883,579,362, 84,2014-01-10 20:00:45.683049,751,941,169, 36,2014-01-13 23:49:51.392109,693,585,824, 36,2014-01-16 13:45:24.800774,400,321,150, 89,2014-01-12 16:44:40.640322,237,384,235, 89,2014-01-20 20:15:49.838492,377,986,763, 84,2014-01-20 11:15:40.982729,797,946,277, 36,2014-01-19 14:15:51.192487,81,648,80, 36,2014-01-12 00:39:23.826373,667,698,988, 36,2014-01-10 21:54:12.830295,496,565,694, 36,2014-01-15 21:31:40.198558,833,63,43, 84,2014-01-20 02:33:33.567239,967,625,58, 84,2014-01-20 17:19:00.941599,50,751,345, 89,2014-01-12 22:38:03.819001,743,637,744, 89,2014-01-14 22:38:49.824579,540,409,190, 36,2014-01-20 05:39:37.344445,671,16,683, 89,2014-01-12 13:02:16.164322,172,936,245, 36,2014-01-17 21:24:01.599734,639,95,789, 89,2014-01-12 01:39:25.845325,664,551,69, 84,2014-01-17 04:52:55.34656,813,169,639, 89,2014-01-12 19:50:46.746021,368,891,526, 89,2014-01-19 18:59:21.278424,956,556,807, 84,2014-01-14 20:31:50.581976,16,85,88, 89,2014-01-15 08:17:33.254473,338,283,58, 84,2014-01-12 22:16:39.096623,719,310,894, 84,2014-01-19 16:12:06.802156,234,233,739, 89,2014-01-11 15:32:46.278291,298,123,674, 89,2014-01-16 15:42:45.438505,831,149,296, 84,2014-01-15 12:18:21.089002,449,723,406, 84,2014-01-11 03:27:41.555502,956,363,46, 36,2014-01-12 18:40:31.268986,492,349,163, 36,2014-01-15 02:12:56.453668,146,476,494, 84,2014-01-11 03:28:27.23855,110,6,615, 89,2014-01-15 22:30:22.661515,41,218,26, 36,2014-01-20 19:58:21.254442,106,492,53, 84,2014-01-13 00:13:07.878872,283,202,818, 84,2014-01-15 12:46:12.500006,843,830,400, 36,2014-01-20 01:14:02.237171,477,356,990, 89,2014-01-11 08:28:59.071359,430,269,35, 36,2014-01-16 17:25:36.326378,743,171,753, 36,2014-01-20 03:08:15.067465,766,897,572, 36,2014-01-16 10:00:39.269917,218,554,76, 89,2014-01-15 13:59:09.894035,958,799,199, 36,2014-01-19 14:27:16.009064,619,390,941, 0,2014-01-18 12:38:43.089337,570,323,558, 64,2014-01-19 16:37:59.718615,26,931,337, 0,2014-01-18 14:06:53.353927,42,574,543, 55,2014-01-20 05:40:59.253659,392,901,2, 0,2014-01-16 08:02:01.318349,101,35,51, 19,2014-01-19 19:46:19.199252,400,621,804, 19,2014-01-16 04:33:54.60022,641,258,968, 55,2014-01-18 07:52:48.064369,402,28,741, 55,2014-01-18 08:45:40.478095,328,356,30, 55,2014-01-20 05:05:45.182195,681,201,826, 19,2014-01-12 10:10:21.318842,76,766,979, 64,2014-01-20 11:02:19.574718,529,610,939, 55,2014-01-18 15:17:51.984352,139,910,363, 55,2014-01-13 05:43:07.597133,262,436,591, 64,2014-01-15 20:45:05.013223,466,218,800, 0,2014-01-12 21:36:13.073353,88,35,27, 0,2014-01-13 13:10:49.153054,606,400,620, 55,2014-01-17 19:25:36.282628,856,549,40, 55,2014-01-17 03:49:09.527021,809,729,725, 19,2014-01-13 20:39:16.391604,978,86,638, 55,2014-01-12 11:55:58.944143,192,626,994, 55,2014-01-16 17:47:30.781463,206,601,326, 55,2014-01-16 00:52:24.270896,433,939,313, 64,2014-01-17 04:42:11.444148,754,639,23, 0,2014-01-15 13:01:33.019052,349,966,984, 64,2014-01-16 13:21:23.342254,113,164,649, 55,2014-01-12 16:07:47.770946,767,307,286, 0,2014-01-14 10:09:48.086517,22,406,349, 55,2014-01-11 17:22:02.612591,614,413,397, 64,2014-01-17 10:06:57.664981,167,951,327, 19,2014-01-12 18:37:04.096,623,798,643, 19,2014-01-19 10:00:45.701928,125,356,304, 55,2014-01-15 20:44:57.383596,883,946,788, 0,2014-01-20 12:43:57.449927,656,83,947, 0,2014-01-18 09:41:44.955588,314,618,677, 0,2014-01-10 22:46:58.701945,924,502,387, 19,2014-01-19 02:30:16.649179,129,165,541, 19,2014-01-13 08:15:39.556226,103,170,777, 55,2014-01-18 10:39:46.766314,167,38,114, 64,2014-01-16 05:15:57.127274,475,237,361, 64,2014-01-12 07:21:24.738422,888,220,624, 19,2014-01-20 00:50:08.085575,5,890,460, 55,2014-01-15 10:18:16.701991,315,385,361, 55,2014-01-20 02:39:16.723193,915,194,975, 19,2014-01-12 00:33:15.682707,758,874,904, 19,2014-01-20 11:03:21.715633,277,97,535, 55,2014-01-11 18:15:29.796546,448,90,213, 0,2014-01-14 17:44:04.835761,54,836,513, 55,2014-01-17 00:28:57.99826,289,744,557, 64,2014-01-18 02:04:39.323986,460,8,881, 64,2014-01-17 18:23:21.227785,352,705,352, 0,2014-01-18 16:24:56.942403,92,323,636, 64,2014-01-13 13:52:10.105449,616,333,912, 0,2014-01-12 13:15:45.49868,208,135,2, 0,2014-01-14 15:07:08.386552,436,957,401, 19,2014-01-18 06:53:43.124501,527,468,69, 55,2014-01-18 06:37:08.840934,680,441,544, 55,2014-01-16 11:14:56.157498,93,520,132, 55,2014-01-11 11:03:31.211437,592,961,103, 0,2014-01-19 03:59:11.453526,784,304,120, 55,2014-01-12 15:21:53.822479,8,287,506, 19,2014-01-13 09:40:35.307437,368,529,447, 19,2014-01-17 07:59:57.155453,569,638,140, 0,2014-01-17 16:06:51.206074,394,857,734, 64,2014-01-12 16:24:32.757365,393,660,476, 19,2014-01-14 11:41:42.11146,723,68,806, 64,2014-01-17 18:53:49.908019,828,412,718, 64,2014-01-20 22:54:49.406543,103,658,41, 55,2014-01-16 23:57:21.667686,472,824,669, 64,2014-01-14 23:33:36.674752,884,654,68, 64,2014-01-15 22:10:46.534235,354,485,377, 55,2014-01-18 11:18:46.406108,686,956,508, 64,2014-01-13 18:23:44.760327,373,89,987, 55,2014-01-20 12:29:03.236226,366,255,126, 64,2014-01-17 07:58:03.1293,986,728,66, 55,2014-01-14 22:57:00.443034,28,940,815, 55,2014-01-19 17:32:24.951859,538,245,844, 55,2014-01-18 06:13:32.926266,156,654,79, 0,2014-01-18 20:41:05.279216,21,458,226, 64,2014-01-15 12:01:22.95012,486,670,375, 0,2014-01-15 02:18:12.250251,176,72,640, 0,2014-01-15 09:04:26.507,389,704,688, 19,2014-01-12 22:26:19.892524,357,379,120, 64,2014-01-17 20:10:22.3557,542,784,405, 64,2014-01-17 02:20:12.00568,431,599,292, 64,2014-01-16 17:33:28.279495,587,568,745, 55,2014-01-14 21:04:05.480621,779,647,137, 55,2014-01-14 02:57:20.846616,795,191,296, 19,2014-01-11 22:48:24.436992,156,12,276, 55,2014-01-15 12:07:36.692058,284,431,208, 55,2014-01-12 00:56:32.30056,277,875,230, 19,2014-01-13 18:10:18.25989,518,294,358, 0,2014-01-18 01:48:11.999495,907,169,625, 0,2014-01-18 12:56:29.456014,545,972,120, 64,2014-01-20 14:17:09.465965,337,33,253, 64,2014-01-19 22:12:44.756859,281,30,612, 55,2014-01-11 18:38:11.567474,484,836,796, 64,2014-01-17 03:10:59.374402,980,40,197, 55,2014-01-19 15:07:34.163402,251,118,222, 19,2014-01-17 22:35:33.23516,729,618,36, 55,2014-01-10 20:44:36.049154,689,526,31, 55,2014-01-14 07:18:20.697637,34,456,714, 19,2014-01-15 03:54:19.641687,886,874,914, 55,2014-01-13 19:08:20.809481,732,929,327, 55,2014-01-13 13:47:23.934282,150,417,629, 64,2014-01-11 05:52:22.770987,875,352,572, 0,2014-01-17 22:05:21.252516,725,712,316, 55,2014-01-12 01:19:48.886142,801,808,715, 55,2014-01-18 16:05:55.697252,635,807,842, 19,2014-01-15 12:46:26.531731,514,906,36, 0,2014-01-16 17:21:11.836681,20,124,36, 55,2014-01-12 12:24:07.947504,820,468,285, 55,2014-01-20 01:50:53.038731,494,943,522, 55,2014-01-18 19:47:31.729204,79,331,724, 19,2014-01-19 12:34:30.303623,680,20,84, 55,2014-01-15 22:11:05.044411,255,955,989, 64,2014-01-14 07:21:00.008699,282,710,612, 64,2014-01-13 03:38:35.784244,640,641,202, 0,2014-01-19 05:08:25.891446,271,434,725, 64,2014-01-15 10:09:24.44611,976,186,322, 19,2014-01-17 19:46:12.476302,27,727,266, 55,2014-01-20 10:04:58.192005,108,364,565, 55,2014-01-18 08:19:42.706,375,481,829, 19,2014-01-14 15:34:24.475797,189,359,947, 19,2014-01-17 17:01:27.648019,673,756,529, 19,2014-01-17 16:36:52.171965,955,945,541, 0,2014-01-13 07:47:09.232656,804,331,189, 19,2014-01-18 15:39:39.215912,439,453,247, 55,2014-01-13 13:47:51.546367,766,775,345, 64,2014-01-11 16:54:43.396057,828,873,891, 64,2014-01-15 08:15:12.451073,862,837,258, 64,2014-01-13 14:32:27.595522,829,372,767, 64,2014-01-15 04:13:04.094115,346,414,48, 0,2014-01-19 16:09:32.092759,753,798,974, 55,2014-01-16 13:40:48.405002,320,66,309, 55,2014-01-17 22:00:28.028397,446,616,252, 64,2014-01-20 02:01:54.864166,88,797,782, 0,2014-01-13 05:46:09.657937,991,87,858, 0,2014-01-19 03:20:16.914539,839,539,642, 64,2014-01-17 12:07:50.561419,261,717,572, 55,2014-01-17 22:32:36.189668,464,1,863, 64,2014-01-14 14:41:28.75122,982,724,253, 55,2014-01-19 04:31:58.509342,79,811,217, 0,2014-01-16 18:23:29.228818,201,652,550, 64,2014-01-12 04:29:00.480843,964,318,751, 19,2014-01-17 11:06:28.812966,277,229,236, 64,2014-01-11 10:34:34.273229,811,477,461, 0,2014-01-15 15:50:00.489543,303,532,292, 0,2014-01-19 18:45:04.597209,326,217,464, 19,2014-01-19 19:43:00.164675,165,157,190, 55,2014-01-20 05:59:24.935652,372,12,278, 64,2014-01-16 08:31:17.07351,932,618,548, 0,2014-01-18 23:47:58.35668,661,717,181, 19,2014-01-17 08:19:07.641653,282,335,226, 19,2014-01-20 04:05:41.549307,284,31,748, 19,2014-01-15 10:49:26.005119,546,932,819, 19,2014-01-13 18:55:59.814036,774,914,43, 19,2014-01-12 05:52:50.781964,872,729,283, 64,2014-01-12 20:29:05.22103,357,863,137, 64,2014-01-16 12:43:51.360404,786,272,269, 19,2014-01-13 02:47:34.178021,215,795,37, 55,2014-01-17 08:44:17.672103,756,598,960, 19,2014-01-19 07:16:18.956345,777,777,861, 19,2014-01-17 11:00:38.272135,482,418,757, 19,2014-01-15 18:53:49.765282,392,400,381, 64,2014-01-14 14:57:07.361347,233,292,726, 64,2014-01-14 03:01:06.300532,447,274,678, 55,2014-01-14 14:55:44.629448,457,190,214, 0,2014-01-17 00:43:17.019283,328,588,364, 64,2014-01-12 00:21:23.21549,754,375,630, 64,2014-01-10 21:03:35.190867,464,598,129, 19,2014-01-13 17:15:53.209371,415,695,636, 19,2014-01-15 19:02:51.966709,412,8,699, 19,2014-01-19 05:35:33.700208,422,76,849, 55,2014-01-19 15:39:22.438189,329,862,266, 19,2014-01-17 03:31:52.447553,792,187,252, 19,2014-01-15 10:42:02.513174,799,31,902, 55,2014-01-20 06:13:56.711418,94,382,685, 19,2014-01-11 13:21:52.937515,745,750,83, 19,2014-01-13 23:27:07.110382,781,734,93, 0,2014-01-18 19:56:56.876511,419,218,805, 64,2014-01-20 23:21:17.873167,64,546,325, 0,2014-01-19 11:58:24.183894,212,744,529, 55,2014-01-16 13:44:55.233192,837,282,118, 55,2014-01-17 01:40:25.515239,810,345,547, 64,2014-01-18 10:04:05.733201,487,650,904, 64,2014-01-21 04:34:21.23831,215,517,364, 64,2014-01-20 00:36:25.329275,534,233,373, 0,2014-01-19 19:56:56.515165,263,503,822, 0,2014-01-12 19:03:26.459341,285,616,889, 19,2014-01-15 07:20:16.689679,985,618,269, 64,2014-01-11 16:44:30.693604,544,478,382, 55,2014-01-19 07:47:44.131864,982,612,909, 64,2014-01-13 08:36:35.646023,712,667,854, 55,2014-01-18 07:45:08.96574,504,248,779, 0,2014-01-13 20:41:33.897145,335,834,259, 19,2014-01-17 03:33:43.00779,347,873,208, 19,2014-01-11 04:46:15.266691,852,477,51, 19,2014-01-17 15:37:18.526593,578,927,480, 19,2014-01-12 22:17:00.027073,792,809,805, 64,2014-01-12 09:07:38.482181,763,421,43, 19,2014-01-17 16:40:53.717459,138,553,942, 55,2014-01-15 19:32:53.236444,158,389,54, 55,2014-01-17 08:10:30.448891,432,228,712, 19,2014-01-15 16:16:27.418086,224,380,524, 19,2014-01-14 05:12:15.674632,573,356,395, 19,2014-01-13 05:42:22.896862,479,722,211, 19,2014-01-20 06:58:29.93126,610,866,309, 64,2014-01-13 05:26:12.386469,839,779,181, 64,2014-01-13 23:04:15.834376,772,497,38, 55,2014-01-13 00:25:37.042062,586,869,24, 19,2014-01-21 05:23:09.26298,202,888,640, 55,2014-01-13 16:25:33.707543,570,777,396, 0,2014-01-11 06:24:01.225955,754,138,549, 55,2014-01-16 13:08:04.496172,686,687,259, 19,2014-01-19 22:05:59.222781,249,224,7, 64,2014-01-18 20:28:06.959575,843,874,329, 64,2014-01-16 11:42:25.734901,430,833,877, 55,2014-01-18 16:42:24.552057,581,50,359, 64,2014-01-17 10:25:03.47474,124,172,358, 0,2014-01-18 10:45:52.194242,667,90,870, 0,2014-01-12 18:02:41.242155,196,304,602, 64,2014-01-18 07:09:34.168344,327,893,878, 19,2014-01-16 12:33:36.326105,115,145,583, 55,2014-01-15 06:39:16.888178,868,620,315, 55,2014-01-15 08:54:43.134502,560,551,492, 55,2014-01-19 13:15:10.22325,401,284,464, 19,2014-01-11 04:12:59.895968,414,164,437, 55,2014-01-15 17:40:58.401193,431,398,979, 64,2014-01-15 22:52:28.749843,829,230,510, 19,2014-01-13 06:29:59.923459,248,272,580, 64,2014-01-17 06:48:36.883374,249,491,574, 55,2014-01-12 09:33:22.467721,974,132,53, 0,2014-01-18 06:02:53.159904,959,404,694, 19,2014-01-13 08:59:07.332265,772,890,102, 0,2014-01-17 05:49:49.885807,425,851,159, 64,2014-01-11 05:15:08.834955,668,632,183, 55,2014-01-15 18:41:30.229127,354,916,454, 55,2014-01-16 06:48:19.817068,164,225,690, 19,2014-01-13 14:41:21.221424,879,911,328, 64,2014-01-18 21:45:46.063503,119,39,435, 64,2014-01-18 11:13:56.85913,919,788,212, 19,2014-01-13 14:04:30.484404,550,593,852, 64,2014-01-11 08:11:19.771466,695,511,191, 19,2014-01-18 21:37:58.085504,145,657,446, 64,2014-01-19 07:12:34.980576,851,731,793, 19,2014-01-20 10:17:18.994247,360,612,533, 55,2014-01-16 12:06:05.045079,245,784,31, 0,2014-01-14 12:55:00.715477,883,829,593, 0,2014-01-17 20:59:43.546723,940,78,101, 55,2014-01-12 16:31:08.519819,987,359,776, 55,2014-01-11 17:40:52.910138,8,843,979, 0,2014-01-19 19:02:24.788878,997,204,838, 55,2014-01-20 03:40:42.688581,438,493,877, 64,2014-01-17 10:14:29.236243,819,248,582, 0,2014-01-13 19:41:41.677056,630,696,267, 64,2014-01-14 06:35:25.823083,580,273,389, 0,2014-01-15 17:29:39.459911,631,370,479, 64,2014-01-14 03:54:41.573443,866,35,77, 64,2014-01-19 04:16:24.224277,890,139,913, 19,2014-01-20 20:22:08.209754,389,495,924, 64,2014-01-16 14:24:41.496903,0,649,431, 64,2014-01-15 05:05:01.107651,363,358,64, 0,2014-01-11 23:55:30.677475,65,65,641, 64,2014-01-13 14:27:34.656806,853,583,47, 55,2014-01-17 18:04:08.439916,68,71,447, 19,2014-01-18 09:45:20.563283,250,158,741, 0,2014-01-13 23:42:43.491761,173,44,187, 19,2014-01-12 06:45:38.669461,915,322,794, 64,2014-01-13 10:33:40.38077,832,810,509, 19,2014-01-16 18:24:04.713185,343,659,471, 64,2014-01-20 08:31:24.732965,537,313,604, 55,2014-01-21 01:47:37.2399,569,310,561, 55,2014-01-19 12:28:35.19759,636,820,151, 0,2014-01-20 20:42:44.50443,523,989,9, 64,2014-01-18 17:25:35.325404,677,646,744, 64,2014-01-17 13:41:10.178585,698,466,174, 55,2014-01-20 01:00:03.499096,840,345,91, 19,2014-01-13 23:31:40.370484,83,133,311, 55,2014-01-10 20:36:10.894062,940,291,896, 55,2014-01-16 15:59:47.045819,631,907,713, 0,2014-01-15 20:02:48.890722,469,261,770, 19,2014-01-11 18:17:03.8042,145,128,301, 0,2014-01-15 05:02:11.834697,462,625,505, 64,2014-01-14 17:43:31.579303,860,400,233, 19,2014-01-18 08:31:33.040861,527,649,704, 55,2014-01-18 04:43:53.511753,738,782,536, 0,2014-01-18 04:18:57.402386,898,981,128, 0,2014-01-12 20:01:12.500774,530,989,838, 0,2014-01-11 02:38:35.085197,218,951,484, 19,2014-01-16 02:35:21.626664,854,536,749, 19,2014-01-12 10:23:20.710527,419,423,645, 64,2014-01-16 21:03:16.582099,271,3,303, 64,2014-01-20 11:40:22.734315,757,108,378, 55,2014-01-13 14:18:51.388422,824,865,578, 0,2014-01-19 15:30:51.239325,418,792,563, 55,2014-01-16 23:44:31.174229,557,46,304, 19,2014-01-21 04:04:11.951704,427,154,997, 55,2014-01-12 09:13:34.731086,453,860,208, 19,2014-01-18 10:30:09.843691,648,436,297, 19,2014-01-13 08:37:52.332475,22,66,510, 64,2014-01-18 07:37:39.444692,2,580,873, 64,2014-01-17 13:01:34.577074,921,218,141, 0,2014-01-13 04:25:29.572794,813,575,422, 64,2014-01-16 14:06:14.642281,890,782,686, 19,2014-01-17 03:44:19.081503,378,736,835, 0,2014-01-21 01:51:04.898199,908,130,96, 55,2014-01-12 21:47:24.820867,849,532,657, 64,2014-01-15 22:44:22.580599,868,484,229, 0,2014-01-13 19:22:45.269102,90,134,232, 55,2014-01-12 22:23:35.715348,933,104,697, 64,2014-01-11 08:44:09.626418,173,592,39, 19,2014-01-18 19:25:34.159824,108,966,753, 19,2014-01-13 20:40:39.519548,652,377,494, 19,2014-01-15 17:46:21.461116,961,629,119, 55,2014-01-12 07:09:05.739127,580,807,969, 55,2014-01-10 23:12:12.56041,526,811,276, 55,2014-01-12 16:30:14.973288,320,401,677, 19,2014-01-19 15:28:03.141515,370,347,353, 64,2014-01-12 23:13:25.061183,176,81,828, 55,2014-01-12 23:08:50.058484,428,180,301, 64,2014-01-14 12:03:24.768178,604,259,340, 19,2014-01-14 13:36:45.77213,278,850,581, 0,2014-01-19 00:12:06.641003,571,445,951, 55,2014-01-13 17:43:47.502492,866,774,736, 19,2014-01-17 07:55:37.160552,436,875,212, 0,2014-01-17 09:42:57.880328,244,487,235, 19,2014-01-19 21:28:18.676722,328,483,775, 19,2014-01-17 05:59:49.112256,404,752,849, 64,2014-01-18 22:56:32.611415,893,487,894, 64,2014-01-17 01:58:28.520831,419,121,895, 19,2014-01-19 18:37:41.44515,962,246,827, 55,2014-01-16 00:34:12.056106,403,869,585, 64,2014-01-13 09:00:03.097904,54,425,39, 64,2014-01-11 17:03:11.017944,318,870,569, 55,2014-01-11 08:40:26.698807,50,241,540, 19,2014-01-19 16:44:22.179408,556,357,34, 64,2014-01-11 12:54:15.916143,423,775,226, 64,2014-01-13 10:27:51.519674,977,889,885, 55,2014-01-14 09:28:44.760426,384,47,247, 64,2014-01-14 12:17:01.185244,615,120,392, 0,2014-01-15 12:51:27.235022,401,630,59, 19,2014-01-14 22:12:54.386845,487,849,582, 64,2014-01-12 01:42:35.345068,383,336,604, 55,2014-01-18 13:32:06.754989,575,53,511, 19,2014-01-19 19:59:15.848379,868,259,101, 64,2014-01-13 12:51:41.084196,504,76,8, 55,2014-01-18 16:44:19.315399,303,500,319, 0,2014-01-11 06:29:30.821667,717,82,49, 0,2014-01-11 22:19:44.225709,354,599,3, 55,2014-01-20 19:57:44.377001,173,126,211, 64,2014-01-13 09:38:20.88015,179,5,443, 55,2014-01-15 14:23:08.993888,304,462,380, 0,2014-01-14 03:34:30.149131,631,320,870, 64,2014-01-11 03:50:27.521153,881,872,635, 55,2014-01-18 12:06:56.496379,252,738,274, 55,2014-01-20 15:30:07.309683,578,896,101, 0,2014-01-17 17:33:25.401481,930,534,11, 19,2014-01-17 03:35:15.17352,481,38,369, 40,2014-01-16 15:29:56.42194,921,221,688, 74,2014-01-15 10:29:58.013315,698,736,657, 45,2014-01-12 23:33:43.694351,211,515,983, 40,2014-01-15 04:07:37.86709,677,988,975, 40,2014-01-20 15:04:10.069421,694,848,270, 65,2014-01-15 12:33:46.792255,590,339,933, 74,2014-01-16 01:10:48.885034,98,563,860, 40,2014-01-21 03:44:36.768228,540,737,237, 65,2014-01-13 23:18:08.971664,4,234,820, 45,2014-01-16 18:05:04.801596,851,89,997, 74,2014-01-13 08:12:28.288321,752,788,633, 37,2014-01-17 12:46:53.73209,471,685,824, 65,2014-01-20 10:22:05.98684,991,747,527, 37,2014-01-18 01:03:55.523342,690,186,933, 45,2014-01-11 04:59:48.119353,713,1,833, 37,2014-01-11 06:04:41.332327,839,387,49, 65,2014-01-15 08:36:02.27833,845,620,402, 74,2014-01-20 04:59:03.967731,195,359,399, 45,2014-01-13 22:56:22.416523,866,589,512, 74,2014-01-12 19:20:21.594149,260,615,799, 45,2014-01-20 15:28:34.789398,909,942,722, 45,2014-01-15 01:16:06.642651,925,575,603, 45,2014-01-12 09:18:05.97115,645,839,792, 65,2014-01-12 19:06:55.669549,139,936,543, 45,2014-01-19 19:39:15.636539,813,219,689, 45,2014-01-15 16:52:36.120281,543,877,893, 40,2014-01-13 02:57:23.191121,610,539,28, 45,2014-01-17 01:46:12.133338,554,280,45, 74,2014-01-15 02:22:43.429965,86,20,973, 40,2014-01-20 00:30:48.592602,375,409,517, 65,2014-01-12 23:21:27.306858,803,844,630, 37,2014-01-20 21:52:39.851904,410,293,557, 37,2014-01-14 05:00:46.605917,473,810,627, 45,2014-01-15 02:55:11.001208,327,660,182, 40,2014-01-14 20:00:17.719414,896,716,525, 65,2014-01-13 14:04:56.675756,979,811,172, 45,2014-01-19 19:28:57.793465,561,983,496, 37,2014-01-20 01:11:06.604448,782,999,748, 45,2014-01-13 11:49:33.443984,557,283,703, 40,2014-01-12 12:47:06.765127,974,143,988, 40,2014-01-15 13:16:11.406044,632,743,174, 65,2014-01-16 00:02:01.732304,142,410,403, 40,2014-01-19 05:34:00.497636,766,788,895, 37,2014-01-14 17:38:17.393302,866,107,459, 37,2014-01-12 19:05:57.256066,796,813,887, 40,2014-01-19 14:33:48.616989,537,482,374, 74,2014-01-15 03:42:58.725629,678,932,630, 45,2014-01-17 07:41:59.534396,472,981,772, 74,2014-01-18 13:32:52.584721,100,670,799, 45,2014-01-18 06:39:21.142152,989,599,750, 45,2014-01-19 07:02:21.128545,215,620,519, 37,2014-01-17 23:01:27.374119,518,78,491, 40,2014-01-12 01:55:27.784803,106,384,172, 65,2014-01-20 20:20:27.099695,11,167,108, 45,2014-01-19 22:08:23.043668,273,462,123, 65,2014-01-21 01:34:43.421761,51,612,850, 37,2014-01-15 07:13:03.219976,973,811,727, 37,2014-01-11 19:05:46.234788,251,761,727, 40,2014-01-16 21:20:55.555099,69,23,717, 65,2014-01-19 13:35:46.649941,466,251,129, 74,2014-01-20 23:36:34.827808,799,371,595, 45,2014-01-19 00:42:22.861956,394,884,149, 65,2014-01-19 05:28:26.502264,9,498,751, 74,2014-01-13 22:37:21.433725,476,865,504, 65,2014-01-12 21:37:33.863166,415,883,550, 40,2014-01-12 19:28:44.221678,411,148,436, 45,2014-01-16 16:42:18.109481,324,611,663, 37,2014-01-11 11:06:31.65663,110,732,408, 40,2014-01-11 14:39:50.826125,879,318,503, 40,2014-01-19 08:31:51.553884,669,961,126, 37,2014-01-13 03:53:38.421757,561,812,503, 37,2014-01-16 07:56:02.232588,601,974,715, 37,2014-01-14 02:33:53.286189,581,919,349, 45,2014-01-11 03:27:38.919394,889,589,912, 40,2014-01-20 05:34:19.625891,175,817,415, 74,2014-01-16 01:39:50.542254,377,936,358, 65,2014-01-19 22:01:27.536382,707,255,839, 65,2014-01-14 03:17:18.35152,500,491,695, 65,2014-01-12 03:21:50.077069,633,631,303, 45,2014-01-17 05:34:20.121103,917,226,755, 45,2014-01-11 03:56:39.493671,154,465,675, 74,2014-01-20 14:24:11.205614,739,741,91, 74,2014-01-13 22:35:47.495222,707,711,21, 45,2014-01-12 09:52:07.278894,477,935,404, 40,2014-01-20 04:44:38.305727,853,303,783, 37,2014-01-12 14:44:10.539308,83,358,737, 40,2014-01-12 05:26:50.700654,571,140,358, 45,2014-01-14 05:57:41.525702,922,341,970, 65,2014-01-13 13:49:56.774096,68,193,257, 45,2014-01-18 06:47:46.255753,215,743,231, 74,2014-01-12 13:33:28.176061,196,201,579, 45,2014-01-13 07:26:31.601905,445,120,557, 45,2014-01-16 01:30:09.11771,998,812,259, 65,2014-01-20 23:01:54.678326,145,724,275, 45,2014-01-16 10:03:53.036049,182,840,711, 45,2014-01-18 03:07:42.980876,215,529,540, 45,2014-01-12 12:30:38.900003,770,44,472, 74,2014-01-11 20:46:11.14553,548,647,757, 65,2014-01-18 18:14:14.637505,97,996,463, 74,2014-01-13 07:10:29.726129,113,566,847, 37,2014-01-20 09:18:02.699415,693,913,824, 45,2014-01-21 02:07:53.059263,460,798,716, 65,2014-01-18 19:06:31.958717,852,504,612, 40,2014-01-13 05:51:55.986022,673,950,379, 65,2014-01-10 23:35:31.979075,164,1,704, 65,2014-01-17 18:40:51.584607,825,235,722, 65,2014-01-18 11:31:46.528276,370,59,158, 74,2014-01-20 01:36:59.515457,675,921,344, 65,2014-01-21 01:58:09.936126,568,901,655, 37,2014-01-19 18:53:36.605345,504,148,900, 65,2014-01-16 01:11:19.636979,76,89,337, 37,2014-01-10 23:54:49.430217,682,91,141, 65,2014-01-19 03:17:47.811378,463,916,288, 40,2014-01-19 03:47:07.132201,380,880,423, 74,2014-01-12 13:46:56.70911,198,918,203, 65,2014-01-18 02:02:26.117104,77,74,589, 37,2014-01-20 22:31:17.671509,256,909,51, 37,2014-01-13 23:13:47.409765,905,299,528, 37,2014-01-11 02:00:07.642807,576,559,783, 37,2014-01-13 06:21:14.446024,648,88,983, 65,2014-01-11 00:01:33.665519,972,528,675, 45,2014-01-12 19:33:35.818869,287,338,509, 37,2014-01-15 17:47:46.047929,621,108,147, 74,2014-01-16 05:31:42.783228,327,243,617, 65,2014-01-12 12:35:30.497195,199,580,563, 45,2014-01-10 21:51:39.083978,709,689,68, 45,2014-01-13 02:39:25.764104,496,213,988, 40,2014-01-14 05:06:09.397198,589,243,273, 45,2014-01-11 22:37:50.229928,141,222,249, 45,2014-01-21 00:53:40.401609,401,48,485, 40,2014-01-16 16:16:39.123747,357,59,621, 40,2014-01-11 01:55:52.929343,878,62,800, 65,2014-01-20 21:01:33.460873,264,786,558, 37,2014-01-14 05:23:11.082464,822,867,476, 45,2014-01-13 11:47:48.915365,301,484,933, 37,2014-01-21 00:37:05.439948,188,713,466, 37,2014-01-21 04:04:02.66749,246,553,553, 37,2014-01-21 03:19:35.44406,200,382,160, 65,2014-01-19 20:14:04.955824,908,939,204, 45,2014-01-21 00:02:12.603616,270,49,308, 74,2014-01-19 16:13:12.049405,84,831,380, 74,2014-01-14 15:25:24.592803,16,887,492, 37,2014-01-21 03:57:02.033832,694,702,931, 40,2014-01-17 13:30:59.860783,178,993,932, 74,2014-01-12 13:12:31.725004,644,386,463, 74,2014-01-12 11:43:58.742943,724,687,889, 45,2014-01-14 09:33:25.977888,614,214,698, 40,2014-01-12 05:43:49.396933,809,225,181, 65,2014-01-15 14:57:46.152707,259,51,573, 45,2014-01-14 15:33:33.621114,487,366,409, 65,2014-01-14 16:05:03.842957,972,268,120, 37,2014-01-15 18:59:19.818226,2,870,83, 37,2014-01-16 15:07:09.439982,443,842,810, 40,2014-01-19 13:52:49.890886,810,670,309, 37,2014-01-21 04:31:02.601454,711,54,660, 45,2014-01-18 07:42:39.937597,241,35,911, 37,2014-01-19 15:44:28.975284,959,930,485, 45,2014-01-13 01:10:28.365558,661,550,947, 40,2014-01-11 06:48:49.334795,969,304,762, 65,2014-01-20 18:22:19.205211,427,911,2, 74,2014-01-12 20:04:08.767168,3,333,668, 45,2014-01-17 03:05:28.458542,223,698,704, 65,2014-01-21 00:18:12.134554,357,873,844, 45,2014-01-12 11:05:42.228459,291,861,362, 74,2014-01-20 12:28:39.541424,359,874,513, 74,2014-01-13 06:06:01.050339,865,837,600, 37,2014-01-12 05:42:47.668407,19,983,39, 74,2014-01-20 10:32:42.208914,183,900,569, 37,2014-01-13 03:25:36.494399,61,33,694, 65,2014-01-21 05:56:52.624231,241,30,543, 65,2014-01-20 04:34:54.812529,352,944,364, 40,2014-01-11 13:38:48.543804,361,771,594, 37,2014-01-14 15:22:17.217034,615,429,243, 40,2014-01-20 02:31:56.846781,27,531,802, 37,2014-01-18 07:09:48.405006,944,850,761, 40,2014-01-16 08:34:48.942458,520,493,174, 74,2014-01-11 08:15:55.589724,523,164,737, 40,2014-01-11 10:43:14.382894,244,525,393, 40,2014-01-17 18:18:38.339391,555,448,85, 40,2014-01-16 03:13:41.742851,386,568,89, 37,2014-01-15 06:16:48.004007,854,61,638, 65,2014-01-11 04:23:42.182348,551,303,832, 37,2014-01-21 02:13:01.561076,950,156,324, 45,2014-01-21 01:23:57.444409,47,784,882, 45,2014-01-19 22:16:32.073235,649,685,745, 37,2014-01-21 00:44:04.16295,145,334,835, 45,2014-01-18 03:06:37.382006,425,807,375, 40,2014-01-18 08:01:01.048519,911,700,724, 45,2014-01-12 19:54:32.528508,133,165,756, 65,2014-01-18 13:55:26.716801,412,239,557, 65,2014-01-17 20:23:20.25373,300,252,94, 65,2014-01-14 19:58:41.296095,455,127,104, 74,2014-01-14 11:00:55.175761,92,407,393, 37,2014-01-17 14:41:32.388704,719,658,956, 45,2014-01-16 11:04:23.524554,706,228,209, 74,2014-01-13 17:29:34.717186,208,983,771, 74,2014-01-20 00:47:33.439043,534,405,880, 40,2014-01-17 20:47:11.192961,933,616,834, 37,2014-01-12 22:02:16.9261,681,785,263, 65,2014-01-11 22:13:09.933441,610,864,450, 65,2014-01-17 20:44:03.817611,221,973,872, 45,2014-01-11 20:37:11.738629,141,995,643, 40,2014-01-12 15:51:58.477245,283,810,585, 74,2014-01-11 06:06:21.034646,951,96,281, 40,2014-01-21 03:09:08.58541,536,45,833, 74,2014-01-20 03:01:46.882251,225,526,912, 40,2014-01-16 18:41:09.977104,937,163,574, 65,2014-01-11 05:25:04.175553,658,166,279, 40,2014-01-20 17:45:01.265564,966,645,597, 37,2014-01-13 06:59:48.316496,852,651,960, 74,2014-01-16 12:38:45.918404,600,238,201, 74,2014-01-14 18:01:49.269571,793,804,607, 40,2014-01-13 15:23:30.498844,144,522,231, 40,2014-01-16 08:51:47.47948,418,523,390, 45,2014-01-14 13:25:46.71398,324,787,736, 65,2014-01-12 07:40:02.572498,883,515,971, 65,2014-01-16 03:35:51.64243,386,656,49, 45,2014-01-11 10:32:24.095986,167,273,224, 74,2014-01-19 19:41:03.621017,171,590,272, 45,2014-01-18 03:30:24.171357,720,640,121, 40,2014-01-19 04:27:50.813206,614,941,782, 65,2014-01-16 10:04:23.875167,649,594,315, 37,2014-01-11 17:29:05.467452,172,182,377, 40,2014-01-12 09:28:45.988969,183,33,295, 37,2014-01-12 18:45:56.263871,350,261,777, 74,2014-01-17 08:33:28.992006,620,779,990, 45,2014-01-15 06:58:20.706154,641,261,383, 37,2014-01-11 13:33:29.702913,418,372,879, 45,2014-01-13 23:20:40.184969,392,958,932, 65,2014-01-17 09:00:37.632254,738,795,64, 45,2014-01-12 15:46:39.636353,481,735,764, 40,2014-01-21 00:04:44.00258,658,117,657, 37,2014-01-18 09:37:49.370883,285,852,962, 40,2014-01-14 11:38:38.113598,7,897,775, 37,2014-01-11 00:11:05.037646,794,40,17, 40,2014-01-18 06:46:57.956713,54,576,998, 74,2014-01-13 08:40:24.996268,286,110,47, 45,2014-01-16 22:52:15.01475,511,841,833, 45,2014-01-18 16:12:02.132267,84,562,598, 37,2014-01-12 20:25:26.261831,482,482,799, 45,2014-01-19 09:52:03.331665,663,992,228, 74,2014-01-13 22:50:48.050671,899,772,194, 40,2014-01-16 18:27:15.531402,480,204,570, 65,2014-01-11 19:15:33.830508,204,812,660, 74,2014-01-19 11:42:35.530151,692,527,193, 45,2014-01-20 11:53:02.245802,149,273,187, 74,2014-01-13 06:55:36.403005,424,165,584, 74,2014-01-14 09:18:27.173,650,814,96, 37,2014-01-21 02:25:26.341789,197,572,374, 65,2014-01-11 20:36:40.024442,544,123,16, 65,2014-01-11 06:48:51.344356,20,633,937, 65,2014-01-19 00:53:17.154995,595,89,855, 40,2014-01-17 10:41:03.899607,428,610,561, 65,2014-01-12 04:17:56.811808,270,261,751, 74,2014-01-20 14:22:03.143964,342,327,420, 65,2014-01-19 09:27:00.163478,597,562,823, 37,2014-01-18 16:51:25.803815,403,840,443, 45,2014-01-14 15:20:23.850118,859,194,556, 45,2014-01-20 03:00:29.866391,540,422,308, 45,2014-01-11 10:12:05.988783,427,717,652, 40,2014-01-21 04:21:01.482372,404,268,622, 45,2014-01-11 12:47:09.502744,840,176,862, 74,2014-01-11 04:16:49.991782,872,681,582, 45,2014-01-18 07:58:50.853674,788,421,300, 37,2014-01-15 04:25:47.616343,456,444,709, 40,2014-01-11 08:27:55.029428,128,494,123, 74,2014-01-15 08:45:48.810387,453,171,521, 40,2014-01-17 17:06:12.61261,487,31,143, 37,2014-01-17 11:20:10.044178,572,802,340, 74,2014-01-12 18:57:50.942653,518,328,864, 65,2014-01-19 17:31:38.874442,614,413,528, 45,2014-01-15 15:12:13.375842,124,565,150, 45,2014-01-15 21:48:38.993324,335,399,83, 74,2014-01-15 05:58:54.406264,864,164,61, 37,2014-01-16 14:27:47.206769,133,512,700, 65,2014-01-14 03:31:14.523474,519,201,836, 65,2014-01-14 11:51:56.652066,694,815,307, 65,2014-01-19 01:23:23.609774,613,172,304, 45,2014-01-17 16:49:41.696474,397,966,248, 37,2014-01-14 08:17:22.993853,194,285,615, 45,2014-01-20 02:00:03.634216,922,513,257, 45,2014-01-18 03:38:33.04083,201,735,163, 74,2014-01-12 03:10:40.149267,984,830,98, 74,2014-01-16 06:41:07.533823,724,507,684, 40,2014-01-19 11:56:29.852639,116,189,884, 45,2014-01-11 11:32:43.29323,689,150,339, 40,2014-01-14 10:08:07.697301,165,472,845, 74,2014-01-16 22:47:55.656453,719,33,217, 65,2014-01-15 06:53:07.143767,612,370,559, 40,2014-01-13 07:08:37.563692,595,934,746, 65,2014-01-17 13:00:01.645656,509,454,978, 45,2014-01-15 05:14:08.626139,726,516,132, 65,2014-01-13 23:55:47.066436,241,251,126, 65,2014-01-17 21:16:51.637439,908,844,469, 37,2014-01-12 07:12:59.479813,381,771,384, 45,2014-01-18 08:21:34.683198,75,851,872, 45,2014-01-18 09:44:46.666867,694,489,154, 65,2014-01-16 19:58:48.2902,967,702,60, 45,2014-01-14 19:27:47.295808,85,171,803, 37,2014-01-14 15:04:56.711044,681,766,855, 45,2014-01-18 18:56:39.232853,204,998,869, 37,2014-01-13 06:59:26.170669,187,259,114, 40,2014-01-19 10:17:10.087305,327,660,475, 65,2014-01-13 10:45:18.226177,395,859,767, 45,2014-01-17 16:58:20.576932,632,194,13, 40,2014-01-14 18:44:57.294074,249,566,431, 40,2014-01-16 18:16:32.75007,537,774,643, 74,2014-01-10 22:50:17.228998,80,958,722, 40,2014-01-12 14:08:20.903848,900,737,91, 45,2014-01-13 05:06:14.446544,845,940,888, 74,2014-01-14 11:07:40.222851,352,215,312, 45,2014-01-11 10:08:24.538482,179,506,269, 65,2014-01-20 12:44:47.487794,193,345,343, 40,2014-01-15 18:18:20.372118,119,491,748, 37,2014-01-16 20:49:32.072305,766,806,420, 74,2014-01-18 18:41:17.340432,109,74,673, 74,2014-01-16 09:51:03.665767,530,343,909, 74,2014-01-20 10:57:39.770026,58,35,333, 74,2014-01-14 11:29:12.997304,879,72,828, 65,2014-01-20 20:44:10.809534,39,552,69, 37,2014-01-12 12:06:17.333718,369,678,676, 74,2014-01-21 04:29:14.64296,279,587,643, 37,2014-01-14 19:58:19.435673,404,373,995, 65,2014-01-15 16:02:04.400573,540,919,36, 65,2014-01-17 19:46:06.280399,967,931,966, 37,2014-01-16 07:11:18.915904,753,676,326, 65,2014-01-12 18:23:39.08377,137,531,438, 74,2014-01-14 23:30:52.947266,27,301,960, 40,2014-01-11 21:10:07.206104,19,808,671, 74,2014-01-16 17:51:26.379578,854,908,407, 37,2014-01-18 18:35:49.658309,994,749,202, 37,2014-01-19 20:06:46.439377,601,118,619, 65,2014-01-19 04:50:52.550247,847,354,244, 65,2014-01-16 22:52:59.745615,151,99,71, 37,2014-01-12 00:52:04.665553,602,724,624, 45,2014-01-15 15:49:13.127179,961,837,342, 45,2014-01-20 21:37:57.039688,173,414,664, 65,2014-01-17 23:08:37.415624,629,118,965, 37,2014-01-15 18:39:30.356177,273,360,371, 37,2014-01-12 05:46:17.943954,976,303,44, 40,2014-01-20 08:14:51.862586,660,656,922, 74,2014-01-19 09:47:10.579447,211,354,316, 45,2014-01-12 19:54:42.482437,193,535,884, 65,2014-01-19 14:59:39.35038,457,484,45, 37,2014-01-13 22:05:30.951565,823,957,476, 74,2014-01-18 20:44:14.554743,63,583,874, 40,2014-01-17 03:40:56.690812,469,995,575, 65,2014-01-19 11:56:34.617331,236,209,338, 37,2014-01-18 01:41:54.324769,522,108,897, 37,2014-01-20 19:10:09.688117,110,698,801, 37,2014-01-19 02:40:45.426866,565,804,50, 74,2014-01-19 17:48:11.658905,757,759,195, 74,2014-01-20 17:39:24.331077,714,265,154, 65,2014-01-12 16:39:04.862957,340,257,340, 45,2014-01-14 03:50:16.059477,868,671,98, 37,2014-01-17 07:25:30.611476,807,370,65, 74,2014-01-18 03:50:23.778861,630,157,161, 45,2014-01-16 02:13:55.143247,624,362,234, 40,2014-01-11 00:56:23.558742,744,719,597, 65,2014-01-19 05:00:30.984966,8,174,91, 65,2014-01-11 14:05:21.522825,495,537,736, 74,2014-01-18 23:32:13.21747,857,320,938, 40,2014-01-17 19:07:17.424343,937,319,205, 74,2014-01-19 22:56:14.073492,21,531,172, 40,2014-01-14 16:25:12.963083,914,578,721, 45,2014-01-18 23:59:22.090315,283,793,912, 37,2014-01-14 08:45:27.200671,195,833,812, 65,2014-01-14 08:03:10.003191,524,538,242, 37,2014-01-15 17:07:59.505939,692,273,397, 65,2014-01-19 07:24:57.557267,777,657,440, 65,2014-01-15 17:49:27.947145,198,608,996, 65,2014-01-14 19:22:51.368526,124,982,297, 40,2014-01-17 11:12:08.136714,292,1,218, 40,2014-01-17 17:44:10.429582,209,745,808, 45,2014-01-13 04:22:30.718906,569,906,422, 65,2014-01-20 13:17:39.088277,820,197,290, 74,2014-01-15 08:28:24.984325,954,361,172, 74,2014-01-19 12:03:27.410137,632,381,312, 65,2014-01-18 19:14:13.706028,223,748,527, 74,2014-01-12 04:10:19.309512,893,142,783, 74,2014-01-19 01:13:37.098253,14,909,519, 37,2014-01-16 15:54:59.132894,156,488,883, 40,2014-01-21 01:58:30.968417,218,835,650, 37,2014-01-18 12:53:01.42975,586,570,96, 37,2014-01-18 12:34:03.99585,231,868,147, 37,2014-01-13 23:48:47.027894,516,679,56, 37,2014-01-14 14:18:32.041226,616,588,926, 45,2014-01-15 10:24:27.774711,802,456,769, 37,2014-01-19 06:02:42.171142,582,293,41, 74,2014-01-14 19:14:55.600386,752,185,945, 37,2014-01-13 09:24:58.760097,67,382,529, 40,2014-01-20 00:08:03.693967,351,185,834, 40,2014-01-12 12:47:08.817855,650,576,672, 45,2014-01-20 08:32:16.184439,596,317,795, 37,2014-01-18 17:04:17.767458,534,243,642, 40,2014-01-16 09:12:21.78094,693,992,836, 40,2014-01-18 02:31:38.274753,428,57,219, 45,2014-01-11 19:49:44.968548,1000,911,253, 37,2014-01-19 21:15:31.78413,727,812,519, 40,2014-01-12 13:39:37.781111,392,958,532, 37,2014-01-20 07:14:42.525814,564,723,506, 37,2014-01-14 09:04:59.731275,68,865,302, 74,2014-01-16 13:02:29.149636,576,744,147, 65,2014-01-16 12:26:50.662527,912,25,610, 37,2014-01-10 20:49:10.160857,51,502,417, 37,2014-01-18 21:24:59.868961,710,725,322, 74,2014-01-15 19:44:29.750804,527,772,910, 45,2014-01-15 09:17:35.145601,262,111,159, 40,2014-01-17 03:28:27.279099,637,145,678, 74,2014-01-13 08:58:43.456832,84,173,703, 65,2014-01-16 17:27:54.455114,384,236,680, 45,2014-01-14 22:42:04.377352,127,387,18, 37,2014-01-19 04:53:42.590145,729,369,425, 40,2014-01-16 13:26:25.42353,554,863,790, 37,2014-01-12 05:35:05.807102,840,101,227, 65,2014-01-16 11:27:46.585995,242,891,464, 40,2014-01-19 17:15:12.451424,347,593,40, 40,2014-01-15 23:53:37.848747,289,798,332, 45,2014-01-21 01:52:14.361126,872,572,291, 65,2014-01-17 17:17:54.622566,478,798,178, 45,2014-01-19 23:08:33.449133,6,365,278, 74,2014-01-13 05:17:13.121222,297,224,408, 65,2014-01-16 11:25:58.316532,355,958,929, 45,2014-01-11 05:55:42.266989,993,290,533, 74,2014-01-12 07:49:29.30566,408,920,984, 40,2014-01-13 22:30:16.084409,41,53,232, 40,2014-01-16 19:08:04.047928,899,188,367, 65,2014-01-19 14:21:07.580833,126,903,116, 74,2014-01-14 22:20:01.052957,576,283,268, 45,2014-01-15 10:23:35.832477,297,113,492, 40,2014-01-10 22:00:45.361945,937,383,388, 65,2014-01-13 23:34:43.578771,685,782,375, 74,2014-01-18 23:28:35.563752,286,300,104, 74,2014-01-16 15:03:14.511581,235,935,607, 40,2014-01-19 16:01:34.241298,955,835,107, 65,2014-01-19 00:17:45.725028,148,978,306, 40,2014-01-14 06:28:14.380544,262,183,196, 45,2014-01-14 05:46:03.992102,566,970,74, 40,2014-01-13 03:35:20.870629,29,822,672, 40,2014-01-20 13:56:41.659642,2,895,848, 74,2014-01-16 18:44:47.449354,547,669,475, 74,2014-01-19 01:03:15.325743,985,132,266, 37,2014-01-14 06:38:46.037414,416,284,157, 65,2014-01-14 17:38:30.039499,219,530,821, 40,2014-01-14 08:29:40.749273,948,770,608, 65,2014-01-15 16:13:51.844516,592,435,678, 37,2014-01-20 09:06:16.625494,948,423,47, 40,2014-01-12 19:44:53.200697,13,393,871, 45,2014-01-20 20:07:29.693263,151,787,611, 40,2014-01-20 04:58:30.986619,754,207,232, 65,2014-01-19 17:02:47.823264,274,752,885, 74,2014-01-19 13:16:03.142396,495,481,476, 65,2014-01-12 04:15:44.107841,92,888,42, 40,2014-01-14 22:28:46.139796,268,751,53, 37,2014-01-19 23:11:45.409383,801,966,146, 37,2014-01-13 16:05:13.413921,375,590,415, 74,2014-01-18 00:59:02.224205,207,972,124, 40,2014-01-15 12:19:49.45773,94,167,466, 37,2014-01-12 00:26:20.994754,611,452,788, 65,2014-01-11 17:19:03.277162,901,927,794, 37,2014-01-20 02:43:25.290207,871,819,830, 45,2014-01-12 02:27:06.356699,255,699,228, 45,2014-01-14 20:53:46.855933,782,114,916, 65,2014-01-17 20:12:00.853959,508,138,263, 40,2014-01-17 21:30:20.86828,513,197,595, 37,2014-01-13 06:55:21.097231,360,462,874, 65,2014-01-15 14:29:46.578987,578,915,190, 74,2014-01-10 21:58:35.249243,225,902,350, 65,2014-01-16 16:41:25.089753,900,599,286, 74,2014-01-17 22:05:07.449617,586,478,82, 74,2014-01-20 15:55:16.908884,460,440,849, 74,2014-01-12 05:26:12.539106,556,74,47, 40,2014-01-15 17:08:22.775359,927,256,729, 45,2014-01-13 16:34:02.946298,579,975,545, 37,2014-01-16 03:04:42.578605,72,714,612, 40,2014-01-19 05:38:03.524632,821,376,473, 37,2014-01-18 12:47:54.791233,85,565,156, 37,2014-01-15 06:10:59.204099,880,114,927, 65,2014-01-21 05:22:56.725329,519,457,259, 45,2014-01-18 02:55:24.484496,474,381,785, 74,2014-01-14 05:09:30.191137,838,168,271, 65,2014-01-19 16:25:44.548593,885,342,388, 65,2014-01-16 10:11:27.62731,599,719,813, 40,2014-01-15 13:25:14.298978,475,697,683, 45,2014-01-13 08:54:30.688807,808,605,477, 65,2014-01-15 03:23:13.036694,502,55,699, 37,2014-01-18 09:30:27.712899,342,707,995, 37,2014-01-20 13:53:32.913013,937,214,784, 74,2014-01-19 19:43:02.494425,924,484,268, 45,2014-01-19 13:56:48.707653,766,914,240, 65,2014-01-11 01:12:36.190595,618,779,949, 40,2014-01-18 16:26:27.784632,550,36,449, 65,2014-01-20 20:23:55.064351,876,276,898, 45,2014-01-15 02:06:23.046527,199,678,927, 74,2014-01-15 06:38:28.63901,990,216,630, 45,2014-01-17 11:54:15.93305,610,733,534, 45,2014-01-17 13:01:44.144178,649,396,669, 74,2014-01-20 01:08:15.217997,532,520,862, 74,2014-01-17 13:52:51.182293,494,407,239, 74,2014-01-12 23:43:09.233931,787,988,691, 3,2014-01-16 17:13:22.668032,217,625,610, 3,2014-01-16 23:48:08.091178,427,19,857, 91,2014-01-14 09:09:21.773038,133,777,144, 3,2014-01-11 04:21:45.443392,453,319,793, 3,2014-01-19 20:22:11.037896,514,331,180, 3,2014-01-19 16:14:04.351643,607,905,667, 3,2014-01-19 13:59:48.968443,803,6,613, 3,2014-01-17 03:10:05.829128,492,74,544, 91,2014-01-13 16:25:03.555742,699,533,82, 91,2014-01-19 13:22:45.693773,416,283,871, 3,2014-01-14 00:05:30.313624,874,956,866, 3,2014-01-17 01:34:33.746879,193,492,599, 3,2014-01-17 23:48:30.242784,192,563,881, 3,2014-01-19 14:16:57.940934,804,13,305, 3,2014-01-11 08:59:48.046276,623,932,885, 3,2014-01-20 12:43:00.931592,542,359,466, 3,2014-01-13 11:40:10.978048,396,113,862, 3,2014-01-18 22:30:15.759175,425,518,975, 91,2014-01-19 20:36:33.844605,553,989,373, 3,2014-01-12 01:23:13.472473,478,122,394, 3,2014-01-17 06:27:04.467247,486,278,256, 3,2014-01-20 01:49:10.035199,155,16,93, 3,2014-01-19 21:49:41.257524,410,494,567, 3,2014-01-16 20:50:59.531598,169,413,777, 3,2014-01-13 21:55:33.082146,538,423,263, 91,2014-01-13 22:28:09.896533,302,946,269, 91,2014-01-13 02:45:15.464648,375,207,675, 3,2014-01-20 14:57:17.226324,465,545,368, 91,2014-01-12 17:36:25.114531,683,79,381, 3,2014-01-19 20:38:06.646942,970,692,373, 3,2014-01-12 08:40:26.460254,138,615,836, 3,2014-01-18 14:49:47.782564,317,652,493, 3,2014-01-15 14:26:14.738539,450,327,895, 3,2014-01-15 21:49:48.233292,588,547,797, 3,2014-01-18 23:11:33.226375,172,850,799, 91,2014-01-14 04:48:25.776433,943,296,28, 3,2014-01-14 08:03:52.584935,104,573,230, 3,2014-01-17 07:11:22.194818,192,929,189, 3,2014-01-20 11:58:31.605562,657,820,488, 91,2014-01-17 04:28:56.140677,752,530,672, 3,2014-01-15 14:34:07.888591,326,160,597, 91,2014-01-13 06:04:01.919186,617,623,356, 91,2014-01-13 00:03:29.887975,782,263,737, 91,2014-01-12 08:22:38.131375,268,606,829, 3,2014-01-11 14:20:59.860541,46,166,950, 3,2014-01-13 13:03:17.934251,693,873,388, 91,2014-01-11 15:05:39.062967,140,88,605, 91,2014-01-14 06:01:10.838589,555,172,434, 3,2014-01-11 05:33:33.693426,826,481,479, 91,2014-01-20 15:42:12.907991,321,283,465, 3,2014-01-15 11:24:24.311061,503,142,574, 3,2014-01-17 16:00:38.160673,880,991,115, 3,2014-01-19 11:31:22.94319,891,116,309, 91,2014-01-14 03:14:05.568584,898,808,536, 3,2014-01-13 06:51:37.69269,625,140,750, 91,2014-01-12 03:26:56.025335,291,34,911, 91,2014-01-17 05:42:15.465119,950,932,229, 3,2014-01-15 13:36:53.157338,620,243,260, 91,2014-01-11 12:24:13.251659,199,414,937, 3,2014-01-19 03:18:40.580069,930,53,703, 91,2014-01-14 04:14:59.804698,242,175,438, 91,2014-01-13 01:04:39.711915,304,988,145, 91,2014-01-16 12:08:28.362632,928,814,820, 91,2014-01-18 22:41:14.543236,804,928,716, 91,2014-01-18 02:54:27.945206,351,907,226, 91,2014-01-14 05:20:01.589006,630,58,570, 3,2014-01-11 21:29:40.31967,291,808,484, 91,2014-01-11 04:58:20.530142,373,724,648, 3,2014-01-20 16:31:23.783824,734,175,772, 3,2014-01-11 03:28:11.925232,996,659,615, 3,2014-01-17 13:27:16.671237,705,190,976, 91,2014-01-15 01:05:31.672834,4,901,919, 91,2014-01-13 13:32:13.844419,241,62,695, 3,2014-01-19 17:30:46.559212,156,997,40, 91,2014-01-16 13:28:09.804208,455,764,709, 3,2014-01-14 07:53:13.704959,672,764,92, 91,2014-01-12 00:34:04.493464,105,921,210, 3,2014-01-17 08:33:48.867595,86,346,558, 3,2014-01-17 17:54:24.543548,112,530,957, 91,2014-01-12 10:07:38.18689,335,68,184, 3,2014-01-16 18:16:01.775585,959,750,595, 3,2014-01-11 23:18:48.855028,851,468,696, 91,2014-01-19 06:08:16.347981,47,687,171, 91,2014-01-14 23:47:24.718775,828,207,140, 91,2014-01-15 06:32:54.423614,994,768,444, 91,2014-01-11 06:59:54.040672,926,753,102, 91,2014-01-16 07:14:20.74453,647,591,85, 91,2014-01-11 06:15:09.88915,155,950,406, 91,2014-01-16 00:36:47.198009,890,173,37, 91,2014-01-16 23:38:33.99619,879,325,318, 91,2014-01-19 13:33:50.469219,45,230,677, 3,2014-01-19 08:51:47.002707,639,79,948, 3,2014-01-19 04:43:13.708523,701,249,406, 91,2014-01-14 19:42:18.83185,91,706,586, 3,2014-01-17 01:33:01.545944,891,201,927, 3,2014-01-16 01:37:41.653729,844,115,977, 3,2014-01-18 05:02:20.421276,837,711,353, 3,2014-01-18 03:02:41.865615,614,879,919, 91,2014-01-16 10:36:02.18429,570,694,264, 91,2014-01-17 15:33:44.205101,716,175,742, 91,2014-01-18 10:30:53.790847,828,418,407, 3,2014-01-12 18:03:18.855526,567,295,816, 91,2014-01-11 10:39:15.877934,319,915,963, 91,2014-01-21 04:03:07.635685,137,549,60, 3,2014-01-11 05:34:05.414739,747,723,7, 3,2014-01-17 04:07:25.682562,91,936,792, 3,2014-01-14 05:56:21.340645,489,906,215, 91,2014-01-12 10:08:09.908202,480,488,228, 3,2014-01-13 06:41:14.550156,41,488,757, 3,2014-01-21 03:50:45.884612,92,669,702, 3,2014-01-14 00:15:48.095092,461,853,462, 91,2014-01-19 04:57:16.325742,318,541,681, 3,2014-01-11 21:09:34.739641,550,931,652, 91,2014-01-12 00:24:04.443074,275,318,264, 91,2014-01-12 22:44:41.044517,5,103,721, 3,2014-01-16 07:42:29.163253,903,752,513, 91,2014-01-12 11:23:58.483745,102,326,766, 3,2014-01-18 09:59:01.789047,44,491,467, 3,2014-01-16 17:57:39.052404,932,802,777, 3,2014-01-17 16:00:45.682174,31,748,405, 91,2014-01-14 03:37:35.785655,725,403,356, 91,2014-01-15 01:31:29.521623,808,712,437, 3,2014-01-15 18:52:32.684881,296,225,824, 3,2014-01-12 02:20:49.494178,470,713,634, 91,2014-01-19 01:13:48.353893,226,752,582, 91,2014-01-11 14:25:34.231245,837,186,395, 91,2014-01-17 07:58:31.147907,152,805,519, 91,2014-01-16 00:16:08.775169,478,536,510, 91,2014-01-18 21:28:16.096858,274,290,455, 91,2014-01-12 12:34:33.332196,56,383,240, 3,2014-01-12 09:49:52.980269,320,204,184, 91,2014-01-16 01:59:09.888125,555,992,432, 3,2014-01-14 10:37:52.187723,769,707,739, 91,2014-01-13 00:29:08.858623,590,256,983, 91,2014-01-16 00:02:17.523809,65,244,276, 91,2014-01-14 20:11:57.602461,298,908,232, 3,2014-01-19 08:36:34.541184,621,672,101, 91,2014-01-19 09:58:38.864455,544,921,332, 91,2014-01-16 10:20:07.510664,94,306,407, 3,2014-01-11 09:17:49.091342,879,780,439, 91,2014-01-19 07:49:24.749067,825,132,574, 91,2014-01-19 14:35:55.606175,908,967,257, 91,2014-01-19 18:15:05.417083,306,246,277, 91,2014-01-20 08:58:59.488707,704,277,161, 91,2014-01-20 19:00:00.049249,4,944,795, 3,2014-01-11 10:59:46.4616,176,799,461, 3,2014-01-15 10:41:28.65238,952,534,637, 3,2014-01-12 00:23:58.533414,79,201,593, 3,2014-01-19 00:58:48.251067,108,543,381, 91,2014-01-10 22:39:07.704784,302,63,422, 3,2014-01-18 20:24:44.215586,439,891,938, 91,2014-01-11 22:36:24.036722,402,600,3, 3,2014-01-15 04:10:37.226826,82,43,715, 91,2014-01-13 09:17:16.900468,10,507,224, 91,2014-01-13 04:57:13.5309,156,880,658, 91,2014-01-12 23:24:25.580718,604,435,474, 91,2014-01-14 03:42:51.131712,654,811,123, 91,2014-01-19 16:55:44.678807,791,379,930, 91,2014-01-18 03:40:34.355886,551,136,732, 91,2014-01-11 19:11:07.22899,124,386,309, 3,2014-01-10 23:30:18.011423,784,594,600, 3,2014-01-19 17:30:27.336155,733,704,285, 91,2014-01-17 01:10:17.117115,650,562,149, 91,2014-01-14 14:08:10.199145,27,690,106, 3,2014-01-11 11:59:36.194778,984,339,515, 3,2014-01-11 19:12:34.640925,713,638,921, 3,2014-01-18 14:20:07.802025,971,518,428, 3,2014-01-20 00:36:10.735963,660,362,118, 3,2014-01-20 09:11:13.505379,470,616,969, 3,2014-01-13 18:40:15.312689,103,61,42, 91,2014-01-20 13:53:59.827304,9,691,93, 91,2014-01-18 11:00:38.254866,612,545,651, 3,2014-01-12 03:16:10.918864,53,439,151, 3,2014-01-19 02:09:05.244387,537,157,230, 3,2014-01-17 13:59:37.743573,688,105,109, 3,2014-01-11 16:16:10.968531,877,951,479, 91,2014-01-19 17:08:51.706406,666,561,246, 3,2014-01-11 18:41:06.395953,947,531,683, 91,2014-01-12 20:40:09.501945,267,646,63, 91,2014-01-17 12:07:39.957473,884,464,878, 3,2014-01-11 21:20:14.101156,816,472,797, 3,2014-01-20 21:04:53.717531,511,357,559, 91,2014-01-18 14:44:03.994195,226,143,187, 91,2014-01-16 05:30:51.327982,973,971,880, 91,2014-01-13 00:22:10.617999,693,686,871, 3,2014-01-20 23:41:17.525095,295,0,15, 91,2014-01-18 08:55:16.9087,533,669,155, 3,2014-01-16 08:05:01.75013,922,43,29, 91,2014-01-19 10:37:02.203902,878,850,783, 3,2014-01-15 06:35:51.264587,976,41,44, 3,2014-01-17 07:16:08.97912,659,677,615, 91,2014-01-19 14:07:20.215325,838,663,605, 91,2014-01-13 18:06:18.600743,362,390,47, 3,2014-01-13 02:26:26.096235,810,767,971, 52,2014-01-12 22:15:30.414889,322,752,691, 100,2014-01-14 10:05:54.79594,826,603,769, 57,2014-01-14 01:39:00.737159,334,95,861, 57,2014-01-20 16:35:38.216915,317,456,555, 57,2014-01-13 04:42:05.531902,582,828,477, 52,2014-01-13 04:50:14.242539,870,839,513, 100,2014-01-13 05:15:53.529603,951,974,955, 100,2014-01-12 12:36:05.359626,845,31,702, 52,2014-01-20 19:50:52.497404,242,661,425, 52,2014-01-14 12:32:04.448885,140,40,608, 57,2014-01-20 18:45:10.604013,229,359,647, 57,2014-01-17 03:50:30.241396,364,600,831, 52,2014-01-15 08:48:15.417418,626,609,949, 57,2014-01-19 05:54:02.31042,506,723,529, 57,2014-01-18 02:31:36.637349,815,641,647, 52,2014-01-17 09:28:24.919361,189,807,975, 52,2014-01-15 12:01:42.267893,656,821,250, 52,2014-01-19 03:51:50.738505,880,11,457, 52,2014-01-17 00:33:18.636893,678,322,539, 57,2014-01-12 20:45:46.262087,547,707,795, 52,2014-01-14 03:22:42.066487,684,190,734, 57,2014-01-19 04:55:29.254892,750,539,587, 57,2014-01-12 14:27:03.787182,718,153,261, 57,2014-01-11 06:17:58.975188,407,609,723, 52,2014-01-14 07:00:31.005022,108,659,820, 57,2014-01-21 05:04:05.991504,901,950,392, 57,2014-01-15 16:53:50.239775,47,338,142, 57,2014-01-20 18:16:39.984142,685,883,438, 52,2014-01-19 13:11:26.206829,933,471,198, 52,2014-01-18 15:00:08.840936,684,119,216, 57,2014-01-12 14:43:06.080377,600,654,827, 57,2014-01-11 05:26:56.621718,577,33,153, 52,2014-01-11 19:06:03.636876,362,13,9, 52,2014-01-15 20:22:06.817536,886,28,892, 52,2014-01-21 02:02:34.838632,894,450,992, 52,2014-01-14 03:48:09.168779,966,553,649, 100,2014-01-18 05:12:21.060495,893,431,128, 52,2014-01-13 01:18:28.368654,317,291,768, 52,2014-01-15 20:24:14.528404,687,560,871, 52,2014-01-17 19:03:13.557899,103,68,485, 100,2014-01-16 17:50:32.817541,231,399,68, 100,2014-01-15 09:09:25.132836,33,463,942, 100,2014-01-13 16:53:43.799295,695,233,997, 57,2014-01-10 20:38:48.234957,707,131,261, 100,2014-01-13 09:03:27.443255,980,269,124, 52,2014-01-20 23:25:20.437063,454,797,255, 57,2014-01-17 10:07:13.154319,901,298,640, 52,2014-01-18 01:05:09.711567,82,175,850, 57,2014-01-18 21:17:11.175568,539,784,705, 52,2014-01-13 04:40:31.791212,252,564,952, 100,2014-01-20 01:50:55.973654,780,307,147, 100,2014-01-11 18:39:53.242474,45,176,179, 57,2014-01-11 03:36:01.046104,331,209,374, 57,2014-01-11 10:17:59.761256,489,295,769, 100,2014-01-12 04:57:52.217661,526,423,492, 52,2014-01-14 14:36:32.051545,726,727,223, 57,2014-01-11 09:22:05.75276,273,861,474, 57,2014-01-17 01:51:42.457855,294,169,679, 52,2014-01-14 02:53:12.035686,879,835,870, 100,2014-01-20 02:33:31.95959,536,317,373, 57,2014-01-14 10:51:51.298792,899,590,49, 52,2014-01-15 21:36:18.116483,322,553,985, 52,2014-01-20 12:00:28.581726,114,172,838, 100,2014-01-15 09:57:54.935668,26,510,951, 100,2014-01-20 21:58:24.93402,876,752,141, 57,2014-01-20 08:03:03.420359,853,600,666, 100,2014-01-18 17:46:04.104447,420,839,922, 57,2014-01-17 21:10:45.994515,328,411,608, 52,2014-01-12 03:21:31.789014,212,335,796, 100,2014-01-13 08:10:18.63327,753,154,160, 57,2014-01-14 10:13:59.552832,135,544,741, 52,2014-01-18 01:12:04.606554,43,786,426, 52,2014-01-17 21:19:43.766106,574,212,872, 52,2014-01-17 07:07:43.352127,291,518,457, 57,2014-01-18 01:50:52.841511,712,288,902, 100,2014-01-20 10:23:11.209362,439,186,187, 52,2014-01-17 00:33:03.78919,772,381,339, 57,2014-01-14 05:58:05.99583,677,238,416, 57,2014-01-17 05:28:20.920929,398,704,842, 57,2014-01-14 15:50:14.965176,846,884,661, 52,2014-01-16 14:38:37.787043,813,16,331, 100,2014-01-16 01:19:16.895002,56,513,728, 52,2014-01-15 14:30:08.20765,874,819,28, 52,2014-01-16 22:14:38.833566,643,954,898, 100,2014-01-16 15:37:16.656258,294,645,232, 100,2014-01-16 23:28:00.425311,633,988,714, 57,2014-01-20 16:51:10.88511,641,216,255, 100,2014-01-17 04:59:22.409019,926,780,344, 57,2014-01-12 19:19:42.883167,901,182,868, 100,2014-01-13 13:44:22.920797,376,694,375, 100,2014-01-16 01:32:54.368607,506,978,502, 100,2014-01-16 10:11:34.181958,873,80,789, 100,2014-01-18 15:20:41.03728,749,385,54, 57,2014-01-15 07:33:22.950334,967,800,317, 57,2014-01-21 00:09:29.117626,412,592,899, 57,2014-01-18 07:19:05.97172,204,535,193, 100,2014-01-14 09:36:26.371112,739,756,402, 52,2014-01-18 11:55:33.222493,354,287,290, 52,2014-01-14 22:29:51.966234,809,924,66, 52,2014-01-15 16:57:58.160126,611,943,9, 52,2014-01-21 00:05:51.855763,303,895,655, 52,2014-01-18 12:43:51.519067,535,315,656, 57,2014-01-12 12:10:02.767099,40,71,658, 57,2014-01-17 15:25:35.621869,728,342,446, 52,2014-01-14 13:51:34.871613,686,887,307, 57,2014-01-19 18:00:55.60861,771,342,389, 52,2014-01-16 19:48:46.831231,669,710,905, 52,2014-01-20 18:24:38.660802,426,224,527, 100,2014-01-12 17:59:01.604859,669,242,907, 52,2014-01-12 19:17:07.752579,245,959,891, 57,2014-01-14 04:14:53.625978,708,527,227, 52,2014-01-18 12:37:39.391902,61,524,24, 100,2014-01-18 00:36:24.647581,536,635,505, 52,2014-01-18 22:45:01.833628,89,690,317, 52,2014-01-14 04:52:18.225468,407,462,861, 57,2014-01-13 10:13:41.30384,344,707,319, 57,2014-01-14 16:13:02.259359,124,24,506, 100,2014-01-13 15:43:29.110578,253,546,20, 57,2014-01-19 19:13:03.712858,533,846,872, 52,2014-01-16 15:32:45.142526,413,487,689, 52,2014-01-16 09:27:52.031795,978,712,353, 100,2014-01-14 14:45:58.081466,925,539,20, 52,2014-01-11 19:44:19.324904,245,881,594, 57,2014-01-13 18:48:33.069075,278,731,759, 57,2014-01-19 02:19:21.032219,693,714,47, 52,2014-01-11 13:53:48.44253,588,283,788, 57,2014-01-10 20:07:39.040795,547,645,614, 57,2014-01-12 05:55:47.403331,584,602,269, 57,2014-01-19 05:49:21.665023,905,515,829, 57,2014-01-14 22:37:31.007448,961,97,555, 57,2014-01-17 02:53:45.563457,143,33,254, 52,2014-01-18 23:55:13.520785,285,321,642, 52,2014-01-12 05:21:22.526515,97,158,839, 52,2014-01-18 19:03:48.330556,310,742,784, 57,2014-01-15 09:20:49.142654,717,622,846, 57,2014-01-15 23:12:57.398127,543,990,150, 57,2014-01-17 07:04:43.939585,293,919,497, 52,2014-01-10 23:09:35.974305,307,590,404, 100,2014-01-15 11:37:36.059348,653,600,735, 100,2014-01-19 05:03:45.544445,825,3,981, 57,2014-01-12 22:26:43.726884,268,377,545, 52,2014-01-18 19:52:29.685327,677,675,696, 52,2014-01-16 11:41:24.936347,7,244,978, 57,2014-01-20 03:03:08.374466,550,412,523, 52,2014-01-16 12:37:31.519374,934,874,117, 100,2014-01-19 20:33:43.162234,967,444,698, 57,2014-01-12 07:16:49.678306,113,966,239, 100,2014-01-20 08:50:33.778733,903,28,453, 57,2014-01-12 06:17:12.272812,918,82,54, 57,2014-01-10 20:29:53.391165,879,975,989, 100,2014-01-15 18:23:18.921259,44,97,733, 100,2014-01-17 19:45:04.304607,188,682,892, 57,2014-01-14 15:15:51.47263,687,695,411, 52,2014-01-16 18:07:38.246164,44,718,832, 57,2014-01-20 18:33:37.374101,249,832,639, 57,2014-01-12 11:35:12.50485,549,562,834, 52,2014-01-17 12:01:26.689113,88,748,358, 100,2014-01-20 18:41:16.414896,670,316,655, 57,2014-01-13 21:30:59.908181,550,537,176, 57,2014-01-15 11:50:48.354135,320,983,877, 52,2014-01-14 11:18:47.422343,86,846,998, 57,2014-01-20 04:24:45.472057,896,202,562, 52,2014-01-13 05:46:01.87534,802,949,427, 57,2014-01-15 20:40:09.949277,543,827,36, 57,2014-01-17 17:28:33.802614,84,756,624, 52,2014-01-17 19:06:51.017995,32,402,531, 52,2014-01-20 23:53:07.347403,657,801,457, 57,2014-01-13 18:33:17.742198,329,394,365, 52,2014-01-17 22:16:26.9923,120,167,929, 100,2014-01-15 05:30:43.406752,989,732,843, 57,2014-01-11 17:37:03.286644,945,581,874, 100,2014-01-20 00:43:10.719184,900,73,288, 52,2014-01-12 19:23:13.092498,333,760,875, 100,2014-01-17 09:18:28.223409,955,666,984, 52,2014-01-18 21:46:19.09365,384,255,944, 100,2014-01-18 12:00:44.611872,165,545,924, 57,2014-01-15 23:52:11.385644,937,618,174, 57,2014-01-20 09:03:08.771956,408,772,386, 100,2014-01-17 14:51:18.390605,811,69,991, 57,2014-01-17 10:09:23.658456,128,89,96, 100,2014-01-20 09:33:02.163121,616,555,247, 52,2014-01-12 03:14:37.311865,933,998,497, 52,2014-01-13 23:54:27.963482,822,974,141, 57,2014-01-13 18:48:53.63617,816,384,967, 57,2014-01-18 01:22:15.558447,92,45,7, 57,2014-01-13 12:28:05.337583,963,957,828, 52,2014-01-15 10:24:06.14102,791,86,723, 52,2014-01-14 07:23:42.247559,575,564,470, 57,2014-01-13 01:09:21.752478,644,624,932, 100,2014-01-18 11:55:06.04962,609,840,905, 57,2014-01-18 23:14:30.601695,924,442,568, 57,2014-01-16 16:28:09.175241,973,202,874, 100,2014-01-17 10:19:51.521677,764,586,619, 100,2014-01-10 23:00:32.477035,695,195,828, 52,2014-01-11 07:08:19.124518,349,786,366, 57,2014-01-13 21:48:25.324291,88,713,179, 52,2014-01-17 22:07:23.495449,693,377,834, 52,2014-01-11 01:01:26.471921,353,496,138, 57,2014-01-16 20:21:43.066908,221,225,659, 57,2014-01-14 14:23:50.487748,177,606,854, 100,2014-01-15 10:32:09.879092,51,511,571, 52,2014-01-17 17:58:46.353551,364,846,124, 57,2014-01-13 09:07:01.207351,23,904,82, 100,2014-01-17 09:55:22.97159,167,219,793, 57,2014-01-13 21:17:14.57696,768,355,793, 52,2014-01-11 00:53:20.301001,197,840,130, 52,2014-01-14 15:56:07.58346,940,108,735, 57,2014-01-19 01:09:25.962604,253,871,405, 52,2014-01-20 13:56:29.073376,354,352,716, 57,2014-01-11 00:47:25.974066,960,122,986, 57,2014-01-15 05:18:49.621479,597,539,137, 57,2014-01-19 17:29:31.236497,624,698,882, 52,2014-01-12 08:02:03.28593,798,127,793, 100,2014-01-18 09:13:17.584961,547,155,11, 57,2014-01-12 06:18:24.872667,347,184,573, 57,2014-01-19 13:24:18.844377,448,273,308, 57,2014-01-21 01:41:22.922545,410,799,365, 57,2014-01-16 20:42:31.014105,570,72,539, 57,2014-01-12 14:48:01.091936,604,74,732, 57,2014-01-12 20:50:44.675023,865,922,965, 52,2014-01-14 02:37:37.063725,821,53,80, 52,2014-01-20 18:02:31.694051,393,569,736, 52,2014-01-18 17:18:53.850264,534,849,924, 52,2014-01-20 16:57:28.585402,641,711,474, 52,2014-01-20 21:03:04.171085,761,219,678, 100,2014-01-19 04:27:12.974781,570,687,465, 52,2014-01-13 08:45:53.909692,378,756,398, 52,2014-01-17 13:10:27.666534,71,567,376, 57,2014-01-19 09:28:39.447121,426,3,571, 57,2014-01-19 09:07:36.9766,723,763,518, 100,2014-01-10 21:34:18.154702,75,273,796, 52,2014-01-13 14:00:49.326213,323,538,922, 100,2014-01-15 21:06:23.33057,596,783,377, 52,2014-01-13 10:41:19.362053,798,594,467, 57,2014-01-20 03:56:12.297802,901,506,169, 100,2014-01-18 22:23:37.907531,854,153,172, 52,2014-01-13 15:34:39.663054,976,111,922, 52,2014-01-13 13:52:19.881263,328,126,587, 57,2014-01-16 17:33:03.870554,2,376,757, 100,2014-01-12 23:31:08.73643,303,928,301, 100,2014-01-13 18:39:45.855328,997,530,691, 52,2014-01-21 02:51:53.492034,549,693,740, 57,2014-01-11 11:00:39.973347,278,49,96, 52,2014-01-15 06:41:49.141258,595,529,351, 52,2014-01-18 06:05:11.076996,846,281,44, 52,2014-01-12 21:19:04.846014,229,388,9, 57,2014-01-13 14:06:07.986055,929,428,580, 99,2014-01-18 01:46:33.999541,710,495,124, 99,2014-01-18 22:01:35.860119,247,539,988, 99,2014-01-15 08:54:09.077992,112,31,694, 62,2014-01-20 02:37:18.674564,997,56,184, 99,2014-01-11 18:39:12.923844,739,458,925, 62,2014-01-14 20:56:40.772043,266,739,6, 99,2014-01-17 13:56:12.525247,60,750,169, 62,2014-01-11 05:36:41.509245,751,17,689, 62,2014-01-14 11:59:44.943547,816,314,176, 99,2014-01-15 12:23:25.500027,890,808,763, 99,2014-01-13 18:22:35.419356,634,674,547, 62,2014-01-21 05:10:12.610081,192,293,751, 62,2014-01-13 15:52:04.947149,494,52,655, 62,2014-01-11 21:30:12.395956,288,959,77, 99,2014-01-10 20:44:30.764783,310,461,362, 62,2014-01-16 09:52:54.273361,835,796,490, 62,2014-01-16 22:36:35.726526,492,36,646, 99,2014-01-13 11:25:50.126836,393,230,436, 99,2014-01-15 07:49:06.571164,767,153,932, 99,2014-01-14 15:00:13.634476,166,680,45, 62,2014-01-16 07:00:29.790309,543,386,374, 62,2014-01-18 01:41:26.452426,88,775,967, 99,2014-01-20 12:33:17.505031,885,522,997, 99,2014-01-18 10:31:38.526741,905,138,590, 62,2014-01-21 00:21:12.307754,491,905,166, 99,2014-01-20 09:25:10.997065,545,394,891, 62,2014-01-19 01:32:18.500086,540,164,882, 99,2014-01-15 01:03:01.449431,146,6,997, 99,2014-01-17 09:30:22.07406,479,385,366, 62,2014-01-21 02:51:23.346519,662,683,41, 62,2014-01-17 19:09:09.435486,297,252,909, 62,2014-01-14 05:16:56.073601,246,570,39, 99,2014-01-18 18:52:59.206638,987,497,783, 62,2014-01-11 22:03:18.513478,834,832,679, 62,2014-01-13 01:54:14.748165,399,993,617, 62,2014-01-19 17:32:12.1309,921,110,55, 62,2014-01-15 22:59:59.285939,243,590,422, 99,2014-01-19 19:50:27.27341,433,309,998, 99,2014-01-20 03:08:53.640145,79,825,971, 99,2014-01-19 14:59:44.229486,618,480,868, 99,2014-01-14 02:13:52.773857,862,331,33, 62,2014-01-12 15:31:29.059501,444,158,169, 99,2014-01-19 14:09:56.839568,880,243,200, 62,2014-01-16 22:05:57.721006,47,217,799, 62,2014-01-13 17:01:41.455877,217,27,930, 62,2014-01-19 14:54:27.60477,579,258,466, 99,2014-01-12 01:58:51.994786,847,921,187, 99,2014-01-19 19:38:17.182403,253,77,998, 99,2014-01-11 20:20:17.731606,1,129,558, 62,2014-01-16 13:47:58.56595,24,603,401, 62,2014-01-13 04:38:30.81688,682,34,381, 99,2014-01-17 07:20:47.521917,471,553,305, 62,2014-01-13 09:29:25.018795,402,585,730, 62,2014-01-12 11:11:48.32191,362,400,541, 99,2014-01-14 11:52:26.049076,59,471,516, 99,2014-01-13 03:50:37.326548,901,917,343, 99,2014-01-11 14:36:59.318975,971,615,266, 62,2014-01-12 07:24:44.549163,107,28,325, 62,2014-01-17 08:53:38.77598,67,638,578, 62,2014-01-18 04:07:21.393036,372,677,467, 62,2014-01-12 04:16:07.895682,445,101,286, 62,2014-01-13 22:02:48.211466,839,481,283, 99,2014-01-11 03:24:17.466636,911,447,626, 62,2014-01-20 03:09:07.102739,6,627,190, 99,2014-01-15 00:06:06.725363,233,663,335, 99,2014-01-13 09:18:32.215221,693,957,688, 62,2014-01-18 14:41:19.233639,42,414,968, 99,2014-01-20 03:06:06.011302,927,456,236, 62,2014-01-11 23:08:59.488631,317,67,617, 62,2014-01-17 11:50:12.873784,599,908,65, 99,2014-01-18 12:05:50.240788,700,463,831, 62,2014-01-15 05:22:52.262489,270,699,214, 99,2014-01-19 07:21:41.933705,711,803,934, 99,2014-01-16 20:15:47.080355,312,207,196, 62,2014-01-10 21:28:49.983915,501,217,510, 99,2014-01-11 18:23:23.389581,621,810,399, 99,2014-01-15 05:10:14.685126,672,215,402, 99,2014-01-12 03:27:41.978701,955,48,619, 99,2014-01-20 18:01:40.571985,521,844,790, 99,2014-01-16 05:30:32.417151,589,418,546, 62,2014-01-17 21:15:40.54465,470,778,824, 99,2014-01-12 16:40:11.389284,783,222,314, 62,2014-01-12 06:51:19.939068,559,196,433, 62,2014-01-20 10:45:05.563445,827,348,33, 62,2014-01-14 07:51:59.711195,769,154,30, 99,2014-01-15 22:43:45.988144,65,264,776, 99,2014-01-12 08:35:42.890412,853,286,555, 99,2014-01-15 02:28:59.03059,103,276,52, 99,2014-01-17 10:08:30.537726,881,11,461, 99,2014-01-18 21:29:21.666392,831,536,663, 62,2014-01-12 00:36:20.423626,328,867,442, 62,2014-01-18 18:24:38.433408,327,96,770, 62,2014-01-11 13:32:09.878277,817,850,768, 99,2014-01-12 08:00:37.890681,894,103,403, 99,2014-01-17 15:33:45.536147,50,881,805, 99,2014-01-15 17:38:16.60364,704,485,374, 62,2014-01-14 21:19:10.105902,943,801,21, 62,2014-01-15 00:15:04.769785,670,12,482, 62,2014-01-14 14:44:22.614942,861,209,531, 99,2014-01-16 00:28:09.594533,149,231,807, 62,2014-01-11 06:05:17.643989,9,25,748, 99,2014-01-11 20:50:12.855729,120,625,135, 62,2014-01-20 09:51:01.857442,568,225,976, 62,2014-01-19 17:26:59.577694,577,190,272, 62,2014-01-17 21:05:59.936085,643,583,311, 62,2014-01-20 11:19:51.841356,586,928,958, 62,2014-01-20 15:50:22.967275,39,424,207, 99,2014-01-11 20:16:14.62163,295,635,35, 99,2014-01-11 08:47:33.820057,169,148,573, 62,2014-01-20 03:52:03.53926,473,364,740, 99,2014-01-17 05:46:47.038781,187,263,162, 62,2014-01-18 10:03:14.364707,694,707,963, 99,2014-01-11 14:32:14.928545,673,928,7, 62,2014-01-18 16:38:06.977849,152,637,553, 62,2014-01-17 14:48:19.928571,865,113,350, 99,2014-01-15 02:24:14.640158,420,302,797, 99,2014-01-13 09:21:52.966411,63,268,662, 99,2014-01-19 03:24:02.818983,29,332,14, 99,2014-01-19 08:53:13.670748,147,333,319, 99,2014-01-19 23:30:23.504137,153,928,510, 99,2014-01-16 18:53:24.485375,954,925,456, 99,2014-01-20 13:29:34.094374,672,903,379, 62,2014-01-17 11:55:01.937545,704,829,973, 99,2014-01-17 12:25:34.363652,345,649,252, 99,2014-01-11 15:30:11.985055,221,355,827, 62,2014-01-13 21:28:47.474111,380,172,603, 62,2014-01-12 00:03:50.967292,617,892,650, 62,2014-01-15 16:49:22.090957,14,770,772, 62,2014-01-18 01:43:52.243897,125,694,228, 99,2014-01-15 18:48:13.582233,182,141,754, 99,2014-01-20 21:17:31.68591,701,543,583, 62,2014-01-18 11:49:09.887885,183,665,457, 62,2014-01-16 19:38:26.437963,205,263,279, 62,2014-01-20 01:08:33.543352,181,376,338, 99,2014-01-16 23:16:09.465579,944,722,229, 62,2014-01-13 10:44:26.374467,188,55,871, 99,2014-01-19 06:28:25.384708,673,527,749, 62,2014-01-16 09:06:32.432854,655,366,502, 62,2014-01-14 11:00:40.996096,745,843,242, 62,2014-01-19 19:15:59.204765,834,296,358, 99,2014-01-15 06:58:35.972533,792,438,89, 62,2014-01-20 20:47:28.034877,261,379,703, 62,2014-01-16 23:19:13.569472,711,683,993, 99,2014-01-16 01:30:50.901078,57,387,969, 62,2014-01-18 07:25:35.012726,957,215,986, 99,2014-01-13 08:07:33.498043,590,670,805, 62,2014-01-20 07:55:05.541237,213,448,477, 62,2014-01-20 20:47:27.979138,669,620,466, 62,2014-01-11 05:31:36.317026,94,100,178, 62,2014-01-18 10:48:19.211985,71,663,336, 99,2014-01-19 14:17:51.483275,569,334,215, 62,2014-01-17 04:25:00.80282,72,179,633, 99,2014-01-17 18:17:53.306778,521,775,679, 99,2014-01-15 20:12:53.42124,683,738,334, 99,2014-01-13 10:50:35.166472,500,484,96, 62,2014-01-18 13:48:05.291833,420,668,372, 62,2014-01-18 21:41:40.89535,717,940,541, 62,2014-01-14 14:54:26.134182,642,791,561, 99,2014-01-13 00:37:27.38321,121,483,435, 62,2014-01-15 17:25:33.139247,366,675,457, 99,2014-01-19 13:42:39.716415,386,683,100, 62,2014-01-12 15:54:59.069121,311,461,587, 62,2014-01-12 23:14:43.027551,393,220,884, 99,2014-01-15 03:21:06.154797,758,719,266, 99,2014-01-11 11:03:32.612473,471,527,915, 99,2014-01-19 02:30:52.49313,54,413,751, 99,2014-01-17 18:05:32.529264,58,587,960, 62,2014-01-19 21:31:57.99718,65,8,114, 62,2014-01-14 05:37:24.926404,180,789,540, 99,2014-01-10 23:06:13.525359,637,66,886, 62,2014-01-18 10:47:57.201946,50,952,361, 99,2014-01-18 16:36:00.898938,49,964,422, 99,2014-01-20 23:53:41.560236,596,725,64, 99,2014-01-14 04:07:10.771418,972,676,444, 99,2014-01-13 12:06:51.800016,672,547,190, 62,2014-01-18 01:19:16.572963,86,811,23, 99,2014-01-16 16:14:44.26988,584,753,789, 62,2014-01-12 14:01:57.341672,291,721,30, 99,2014-01-17 16:06:44.55252,773,520,397, 99,2014-01-17 01:46:20.586906,451,31,34, 99,2014-01-20 04:50:16.553657,59,371,978, 99,2014-01-16 00:24:36.035795,282,468,154, 62,2014-01-13 00:11:21.389726,869,314,181, 99,2014-01-16 17:08:09.860435,506,285,802, 62,2014-01-21 00:37:29.457033,737,384,904, 99,2014-01-15 15:01:56.556617,799,156,799, 99,2014-01-14 00:56:15.152689,899,707,385, 99,2014-01-18 16:19:10.352383,701,301,947, 62,2014-01-19 09:56:22.690799,819,224,553, 99,2014-01-16 05:33:42.535898,128,796,723, 99,2014-01-13 03:44:43.49205,745,217,439, 99,2014-01-17 17:39:02.407214,274,300,848, 46,2014-01-18 01:28:41.605019,421,352,633, 28,2014-01-15 06:59:26.519601,912,373,744, 46,2014-01-11 15:00:08.56201,70,47,377, 46,2014-01-18 16:32:14.217491,393,28,619, 28,2014-01-13 03:30:19.012731,855,76,498, 97,2014-01-18 13:05:41.091273,911,249,663, 28,2014-01-17 08:04:12.214672,688,118,958, 28,2014-01-16 13:07:43.939135,458,891,63, 28,2014-01-18 16:11:54.616633,81,471,229, 97,2014-01-14 12:52:09.416618,890,25,765, 28,2014-01-13 23:43:44.838073,837,665,47, 28,2014-01-18 10:05:36.17687,993,672,311, 46,2014-01-17 20:59:20.188035,801,31,180, 28,2014-01-16 15:50:36.638508,61,896,339, 97,2014-01-15 05:24:52.750252,573,281,278, 28,2014-01-13 07:14:04.457916,309,207,283, 28,2014-01-18 09:52:33.98018,168,258,563, 28,2014-01-11 15:31:37.302772,376,472,583, 46,2014-01-19 13:00:25.045241,197,140,697, 28,2014-01-17 08:42:50.533838,367,400,768, 97,2014-01-16 19:56:13.338566,440,291,48, 46,2014-01-11 07:11:46.434967,708,482,423, 46,2014-01-12 19:51:00.394693,605,134,24, 97,2014-01-16 14:33:42.796019,903,477,45, 97,2014-01-16 02:13:42.991583,632,880,946, 28,2014-01-16 00:47:15.54738,493,356,880, 46,2014-01-14 00:52:53.148402,93,360,706, 97,2014-01-14 06:10:05.682381,319,516,340, 46,2014-01-11 00:20:58.083279,305,89,266, 97,2014-01-16 08:37:36.640452,689,195,1000, 46,2014-01-21 03:49:08.090015,366,123,284, 97,2014-01-18 05:49:39.688297,445,534,67, 46,2014-01-20 19:37:03.160053,939,371,194, 46,2014-01-11 12:49:16.652024,757,841,640, 97,2014-01-15 16:21:53.905789,424,4,398, 97,2014-01-12 17:07:22.173203,523,563,523, 28,2014-01-19 05:54:57.743717,485,562,791, 28,2014-01-11 18:26:06.120459,824,403,605, 28,2014-01-18 10:15:06.112339,107,392,555, 46,2014-01-16 16:06:52.36035,763,328,736, 46,2014-01-15 11:18:15.537077,553,826,654, 97,2014-01-11 03:58:50.95083,243,269,985, 46,2014-01-13 20:12:28.537639,371,922,943, 28,2014-01-12 02:17:35.725113,646,965,49, 46,2014-01-16 23:49:27.589339,288,543,808, 97,2014-01-18 05:37:21.28789,985,903,95, 46,2014-01-14 13:31:40.183447,514,586,905, 46,2014-01-14 03:42:01.569519,844,793,29, 46,2014-01-19 01:08:58.590661,781,628,977, 46,2014-01-12 20:32:05.228688,550,248,939, 97,2014-01-20 16:24:52.103356,689,120,413, 46,2014-01-14 15:05:11.929227,755,139,425, 46,2014-01-13 07:43:51.663654,470,474,122, 46,2014-01-12 06:15:52.498049,416,944,443, 46,2014-01-20 09:38:54.725246,298,946,322, 28,2014-01-18 13:57:34.655656,244,631,982, 97,2014-01-17 11:03:08.045429,387,818,841, 46,2014-01-13 04:31:47.874068,299,913,99, 46,2014-01-11 14:07:40.338039,958,304,406, 28,2014-01-17 15:24:06.128708,640,864,707, 28,2014-01-18 17:09:24.51452,183,382,519, 46,2014-01-11 11:56:48.428052,645,117,103, 28,2014-01-14 15:13:45.817005,588,944,237, 28,2014-01-18 06:46:27.674992,183,330,520, 28,2014-01-12 04:46:05.080496,785,262,963, 46,2014-01-19 11:35:39.722794,852,301,453, 97,2014-01-20 03:53:49.848196,673,786,417, 28,2014-01-20 14:41:02.824213,185,854,226, 46,2014-01-20 10:01:45.843253,610,650,697, 97,2014-01-17 08:08:55.960534,213,936,918, 97,2014-01-16 00:47:55.184562,399,730,754, 28,2014-01-14 15:20:01.38033,289,499,36, 46,2014-01-17 16:07:46.911365,20,744,787, 97,2014-01-19 01:00:23.722201,368,168,968, 46,2014-01-15 21:37:37.105862,634,217,326, 97,2014-01-13 09:57:14.500703,207,116,279, 28,2014-01-16 00:37:45.010091,294,919,705, 46,2014-01-19 15:09:17.289309,386,799,514, 46,2014-01-16 17:39:16.070222,996,854,716, 97,2014-01-13 19:46:43.600752,825,381,849, 97,2014-01-11 05:41:22.517997,869,993,420, 28,2014-01-16 04:04:08.173998,698,478,691, 97,2014-01-17 14:51:55.530399,131,613,471, 28,2014-01-13 17:25:14.181651,91,861,169, 46,2014-01-17 14:20:00.672047,133,892,880, 97,2014-01-16 18:30:50.255645,801,461,271, 28,2014-01-11 01:22:48.837308,825,549,106, 46,2014-01-13 19:23:08.717895,356,557,327, 28,2014-01-19 03:02:38.129713,692,304,580, 46,2014-01-11 19:30:29.175766,369,77,133, 46,2014-01-20 14:47:14.846603,336,896,553, 46,2014-01-16 14:12:02.644652,812,568,73, 46,2014-01-12 11:27:17.603818,917,165,613, 28,2014-01-14 00:01:00.664027,192,892,931, 28,2014-01-13 14:58:30.319644,66,307,863, 28,2014-01-13 20:13:22.684314,968,902,585, 46,2014-01-12 05:36:40.386821,722,267,21, 28,2014-01-12 12:52:20.16784,126,800,896, 28,2014-01-13 04:54:25.508527,695,991,449, 97,2014-01-11 09:38:26.230494,322,507,255, 28,2014-01-19 01:01:16.128374,981,308,314, 97,2014-01-18 09:42:20.693508,427,507,633, 28,2014-01-15 04:58:27.610824,462,766,748, 46,2014-01-15 11:09:03.040158,659,435,931, 46,2014-01-16 04:42:44.415709,87,606,531, 97,2014-01-20 06:36:04.716686,332,479,509, 46,2014-01-18 01:06:17.540861,926,961,579, 97,2014-01-10 23:20:29.426219,723,855,117, 97,2014-01-18 15:45:22.005996,836,976,924, 28,2014-01-13 12:45:33.611502,896,939,228, 97,2014-01-13 23:07:13.026971,872,983,858, 46,2014-01-19 01:26:44.523994,944,9,107, 97,2014-01-18 20:49:41.7855,107,587,66, 97,2014-01-20 17:59:08.55737,411,175,783, 97,2014-01-11 12:51:58.706064,723,12,589, 97,2014-01-15 05:09:42.457546,386,834,675, 46,2014-01-16 06:29:58.813434,409,174,219, 46,2014-01-11 18:14:47.543372,233,173,799, 28,2014-01-18 04:32:51.175442,430,39,618, 28,2014-01-14 03:32:36.943147,380,583,607, 46,2014-01-12 17:45:16.719138,754,571,299, 28,2014-01-17 13:20:06.022464,72,841,605, 28,2014-01-19 21:44:39.587799,895,94,755, 28,2014-01-14 09:12:34.323375,130,915,420, 46,2014-01-20 17:21:06.686491,788,855,721, 97,2014-01-12 06:43:09.907444,788,143,917, 97,2014-01-17 09:25:57.007689,190,778,908, 46,2014-01-11 16:57:47.073312,839,484,374, 46,2014-01-13 23:35:30.075283,996,241,840, 46,2014-01-19 18:20:22.516634,453,512,319, 46,2014-01-12 06:36:13.303805,318,697,576, 97,2014-01-11 18:36:46.204076,334,822,702, 46,2014-01-16 22:02:43.210142,52,190,673, 97,2014-01-16 15:34:40.915049,469,904,830, 46,2014-01-16 09:45:49.244234,751,181,214, 46,2014-01-11 20:45:27.625851,50,548,376, 46,2014-01-15 16:10:45.631735,357,987,341, 28,2014-01-13 04:52:06.785096,730,629,392, 97,2014-01-12 00:05:57.05207,649,705,604, 46,2014-01-13 01:56:07.638151,523,981,4, 28,2014-01-15 21:37:40.396598,24,586,157, 46,2014-01-15 03:13:10.07946,3,85,31, 97,2014-01-10 21:22:52.162144,201,100,317, 97,2014-01-13 12:27:22.182097,161,616,874, 28,2014-01-14 15:12:18.63683,164,908,225, 46,2014-01-11 14:14:50.868208,658,84,193, 97,2014-01-17 21:37:04.640062,208,128,626, 97,2014-01-20 01:42:17.450265,975,609,145, 46,2014-01-12 12:29:38.411999,478,489,873, 97,2014-01-14 20:09:55.815503,911,444,376, 28,2014-01-12 23:14:54.393831,624,659,706, 97,2014-01-14 10:14:55.131136,561,323,746, 46,2014-01-11 03:30:01.837967,811,417,712, 46,2014-01-11 14:59:33.98163,231,54,737, 97,2014-01-17 23:27:29.454511,234,788,144, 46,2014-01-21 00:51:08.524458,908,999,676, 97,2014-01-13 01:42:43.889493,41,207,464, 46,2014-01-14 02:53:26.462619,138,576,249, 97,2014-01-11 11:48:55.598189,393,321,845, 46,2014-01-16 05:18:13.964776,429,814,634, 46,2014-01-12 15:13:48.979253,296,332,158, 46,2014-01-12 22:25:08.901995,346,388,700, 97,2014-01-17 03:55:00.168853,952,801,323, 28,2014-01-18 17:16:32.189395,54,615,807, 46,2014-01-18 17:59:49.817044,481,383,111, 97,2014-01-12 07:40:49.413087,399,117,915, 97,2014-01-19 18:01:59.815246,799,437,590, 97,2014-01-13 04:10:35.449199,850,762,412, 28,2014-01-14 16:32:56.198602,61,386,620, 28,2014-01-20 22:07:56.867734,520,643,261, 28,2014-01-15 10:06:43.08735,584,850,446, 46,2014-01-19 18:10:36.595199,557,485,950, 46,2014-01-14 19:21:06.947195,560,12,671, 97,2014-01-15 11:29:35.249494,304,811,455, 46,2014-01-12 00:37:58.777716,962,630,398, 97,2014-01-18 14:33:25.584025,42,367,290, 28,2014-01-16 05:44:26.118121,271,168,217, 28,2014-01-19 02:15:03.417778,515,849,475, 46,2014-01-17 10:15:43.034709,401,881,281, 28,2014-01-17 22:14:04.530121,593,629,835, 28,2014-01-12 16:24:59.233281,927,665,187, 97,2014-01-19 13:30:37.428541,350,49,910, 46,2014-01-11 02:28:59.661257,482,383,817, 46,2014-01-12 23:55:01.071668,286,842,68, 97,2014-01-20 08:30:11.410171,869,246,929, 28,2014-01-18 05:56:29.116188,175,513,588, 97,2014-01-12 18:46:09.596126,206,864,366, 97,2014-01-12 04:12:55.299664,779,651,581, 97,2014-01-11 02:49:55.578806,765,466,640, 28,2014-01-13 10:35:05.194315,292,570,854, 28,2014-01-17 13:31:09.264859,630,996,357, 28,2014-01-12 22:03:44.558059,349,488,741, 46,2014-01-15 13:00:14.096728,772,419,49, 28,2014-01-13 11:26:09.433712,847,932,130, 46,2014-01-20 19:20:16.747454,604,349,844, 28,2014-01-13 01:00:03.913772,527,950,257, 28,2014-01-14 23:06:58.847219,346,168,39, 28,2014-01-19 07:22:16.562699,759,466,498, 97,2014-01-15 09:10:39.362971,807,515,206, 46,2014-01-18 19:39:55.04582,490,366,619, 46,2014-01-18 23:30:13.430434,257,228,690, 97,2014-01-19 23:17:22.450321,870,19,318, 28,2014-01-17 07:50:31.64102,197,361,633, 97,2014-01-12 12:51:20.377629,301,672,208, 97,2014-01-14 04:46:57.700233,112,606,933, 97,2014-01-18 12:28:30.418735,607,37,319, 97,2014-01-20 07:24:45.962074,225,745,28, 28,2014-01-19 14:31:23.818354,913,822,676, 46,2014-01-16 08:43:33.836514,251,918,577, 97,2014-01-16 11:40:28.996782,50,517,113, 28,2014-01-16 06:45:28.348475,589,109,582, 97,2014-01-18 05:08:33.070214,229,874,383, 46,2014-01-14 19:11:06.425323,807,461,859, 97,2014-01-16 13:14:28.009732,266,144,294, 28,2014-01-20 09:03:34.141881,825,19,295, 28,2014-01-13 21:41:17.835494,358,78,324, 28,2014-01-13 13:10:57.12592,91,557,671, 28,2014-01-11 21:49:43.738007,268,961,322, 28,2014-01-15 05:54:13.135577,465,967,937, 46,2014-01-13 20:00:52.704726,401,854,995, 46,2014-01-14 12:24:48.932741,528,418,930, 46,2014-01-11 13:25:22.400436,32,245,276, 28,2014-01-15 22:04:37.262785,139,826,464, 28,2014-01-19 05:25:03.029469,118,759,196, 46,2014-01-14 04:51:31.834148,834,773,728, 97,2014-01-15 11:24:54.01024,37,544,418, 97,2014-01-11 00:25:06.943241,642,99,295, 28,2014-01-18 07:58:30.681367,908,986,280, 97,2014-01-13 12:47:10.573358,608,880,175, 46,2014-01-15 13:35:46.306631,322,167,513, 46,2014-01-15 21:38:25.727187,434,267,730, 46,2014-01-11 06:17:24.003792,197,649,534, 97,2014-01-14 06:53:08.756951,398,352,476, 28,2014-01-11 23:28:57.368626,357,520,295, 46,2014-01-12 23:08:44.381841,724,597,599, 28,2014-01-17 15:40:06.457184,755,500,742, 28,2014-01-19 15:57:27.78736,927,317,485, 97,2014-01-12 00:33:30.343914,841,439,665, 28,2014-01-16 00:11:30.27554,952,641,542, 97,2014-01-14 18:41:01.624293,651,51,895, 46,2014-01-17 16:13:59.340697,900,405,708, 28,2014-01-11 00:56:58.624014,761,160,511, 97,2014-01-11 17:49:34.694507,926,734,135, 97,2014-01-11 05:25:05.76602,664,14,11, 46,2014-01-16 18:11:26.634166,931,28,230, 28,2014-01-10 20:53:08.836389,670,786,222, 46,2014-01-14 07:06:23.601514,668,677,665, 97,2014-01-19 11:22:23.760085,614,114,104, 46,2014-01-11 22:42:52.574396,324,381,922, 97,2014-01-18 17:00:36.737091,146,115,652, 28,2014-01-12 01:23:16.465231,822,384,713, 97,2014-01-15 15:07:41.507136,367,140,233, 97,2014-01-19 10:25:59.137526,447,337,8, 97,2014-01-17 03:27:53.728017,898,475,685, 46,2014-01-13 14:32:44.536605,411,627,696, 28,2014-01-12 09:17:30.972094,126,192,766, 97,2014-01-11 08:52:47.738675,901,209,863, 28,2014-01-13 18:57:51.480266,975,136,848, 97,2014-01-19 21:16:01.65346,297,245,94, 46,2014-01-14 01:39:58.312033,850,830,381, 46,2014-01-18 12:33:37.786896,904,48,954, 28,2014-01-14 12:54:27.381068,81,194,428, 97,2014-01-14 11:57:22.315825,941,722,807, 97,2014-01-11 13:26:46.543848,455,388,33, 46,2014-01-15 16:23:24.749693,741,221,595, 28,2014-01-16 15:06:06.697666,631,524,254, 28,2014-01-18 09:06:53.001451,211,450,543, 28,2014-01-14 02:20:52.537054,424,602,164, 97,2014-01-17 19:39:37.041579,193,911,957, 97,2014-01-13 03:18:23.276991,461,625,659, 97,2014-01-18 01:01:54.161347,586,508,960, 46,2014-01-14 05:53:36.382276,967,862,296, 46,2014-01-13 08:15:21.901424,102,403,293, 28,2014-01-18 22:51:28.855854,846,511,223, 46,2014-01-14 15:18:42.148296,606,811,481, 97,2014-01-19 06:26:48.535589,730,201,427, 46,2014-01-18 23:44:37.692242,194,824,60, 46,2014-01-18 02:25:05.750228,286,751,627, 28,2014-01-17 11:49:12.295674,550,279,358, 97,2014-01-20 02:27:30.267056,71,160,828, 28,2014-01-15 13:25:42.487319,733,965,275, 46,2014-01-18 17:12:28.760905,350,812,478, 97,2014-01-14 11:35:11.774193,872,258,878, 28,2014-01-13 17:51:41.625265,984,790,514, 46,2014-01-14 14:40:22.489342,382,764,363, 86,2014-01-17 06:07:56.311217,206,115,75, 38,2014-01-15 07:09:12.597358,179,948,651, 6,2014-01-15 03:33:10.228017,217,676,230, 71,2014-01-20 05:05:47.791481,13,692,896, 71,2014-01-13 22:25:14.250819,18,761,423, 71,2014-01-18 09:13:08.54005,605,621,756, 6,2014-01-17 11:39:25.578378,983,250,453, 38,2014-01-17 15:19:41.631886,92,593,66, 38,2014-01-11 15:10:30.855875,764,858,255, 73,2014-01-18 05:06:12.122226,460,725,961, 38,2014-01-12 01:43:06.381579,700,825,606, 86,2014-01-17 10:16:37.55396,713,197,502, 66,2014-01-15 08:13:05.123677,271,263,988, 6,2014-01-15 08:03:58.919052,160,312,216, 71,2014-01-13 23:56:14.59554,638,37,181, 66,2014-01-17 15:31:28.401086,615,116,565, 38,2014-01-12 03:05:53.080399,420,8,501, 66,2014-01-17 09:49:50.978234,869,500,985, 86,2014-01-20 03:46:50.302509,516,210,467, 38,2014-01-20 05:57:21.936672,483,295,389, 86,2014-01-21 05:08:33.12653,341,183,797, 6,2014-01-18 04:13:38.838099,125,879,890, 38,2014-01-17 23:41:59.628914,660,466,792, 66,2014-01-18 01:33:38.876758,281,709,635, 66,2014-01-14 10:02:51.133773,173,267,29, 66,2014-01-16 20:09:29.89597,670,53,980, 6,2014-01-12 08:59:21.364496,298,294,495, 86,2014-01-11 21:15:19.895098,126,608,173, 6,2014-01-20 11:44:41.670581,625,502,999, 6,2014-01-15 06:51:02.989759,562,441,890, 86,2014-01-15 15:55:42.384439,129,237,4, 38,2014-01-16 11:52:37.981798,531,894,517, 6,2014-01-19 18:00:15.587118,429,324,735, 71,2014-01-19 23:28:52.612455,194,613,668, 86,2014-01-15 10:58:25.77328,395,496,556, 38,2014-01-12 10:25:29.837936,113,978,220, 73,2014-01-17 02:42:01.152505,234,715,120, 38,2014-01-11 16:37:51.351657,70,781,314, 71,2014-01-19 05:45:11.470243,746,231,271, 66,2014-01-17 21:52:32.008381,835,260,907, 73,2014-01-19 01:44:03.473883,39,847,475, 73,2014-01-20 11:28:17.851822,800,887,232, 66,2014-01-14 02:09:09.562342,508,407,350, 71,2014-01-13 03:57:08.597979,318,267,195, 38,2014-01-14 13:32:16.770874,70,309,838, 73,2014-01-17 06:05:24.1583,265,414,913, 86,2014-01-19 23:28:36.999066,821,156,395, 86,2014-01-15 20:38:09.851692,822,629,97, 38,2014-01-13 09:55:15.136534,947,14,509, 73,2014-01-18 21:15:27.301575,14,257,967, 66,2014-01-14 20:35:31.788364,16,285,392, 71,2014-01-13 09:03:48.263065,154,97,345, 6,2014-01-15 19:29:06.139674,703,443,980, 66,2014-01-11 14:17:31.417277,165,838,620, 71,2014-01-20 14:37:27.140242,200,651,53, 66,2014-01-19 09:31:57.273447,712,580,741, 86,2014-01-17 14:27:01.313667,78,639,698, 73,2014-01-11 17:36:48.504738,989,935,892, 71,2014-01-20 10:47:17.168545,810,560,18, 38,2014-01-16 20:11:42.984248,505,948,506, 38,2014-01-16 04:27:51.494498,390,294,373, 6,2014-01-14 20:42:59.552983,72,923,445, 73,2014-01-12 02:04:20.966046,102,411,241, 6,2014-01-14 16:28:07.081616,76,499,574, 86,2014-01-13 14:11:52.16544,897,419,448, 71,2014-01-16 17:02:46.739326,466,355,565, 86,2014-01-16 06:53:36.919552,141,168,434, 38,2014-01-19 20:53:53.317945,216,44,796, 6,2014-01-17 13:40:38.090983,21,423,125, 6,2014-01-14 06:38:48.389795,816,60,389, 71,2014-01-16 12:46:25.326745,753,931,229, 73,2014-01-15 09:24:41.564867,584,523,256, 66,2014-01-13 12:07:06.242035,532,46,94, 71,2014-01-19 18:55:34.889086,308,719,476, 66,2014-01-17 17:21:50.162846,255,93,459, 86,2014-01-17 05:39:23.012908,597,358,201, 66,2014-01-15 19:00:59.047386,613,872,591, 66,2014-01-16 10:50:27.161911,15,182,785, 86,2014-01-11 20:17:32.8646,88,151,448, 71,2014-01-18 08:56:14.183921,30,391,294, 66,2014-01-14 02:05:54.463486,406,538,705, 6,2014-01-15 20:53:04.652964,126,123,960, 71,2014-01-20 22:00:02.447405,676,941,403, 6,2014-01-19 01:35:00.60316,947,871,43, 71,2014-01-16 15:10:36.070659,732,750,52, 73,2014-01-20 06:37:29.587647,693,887,369, 71,2014-01-17 05:06:57.877027,198,605,799, 6,2014-01-12 23:37:37.384326,758,112,891, 6,2014-01-21 04:14:18.092385,565,470,201, 38,2014-01-16 09:54:15.045571,881,504,442, 38,2014-01-18 23:49:20.368574,522,166,442, 73,2014-01-16 02:42:09.586883,965,726,525, 38,2014-01-20 10:37:14.598974,733,501,764, 66,2014-01-20 05:53:41.33462,854,732,281, 73,2014-01-19 23:10:16.668499,696,939,655, 73,2014-01-12 18:49:06.764413,585,724,103, 73,2014-01-15 16:56:28.073945,911,78,240, 6,2014-01-15 00:03:53.588051,190,438,464, 6,2014-01-11 09:43:00.082777,538,758,21, 66,2014-01-12 00:37:06.164928,266,430,98, 71,2014-01-18 10:42:41.978265,924,97,370, 86,2014-01-17 02:29:25.409522,469,127,682, 71,2014-01-16 14:01:47.729795,185,55,555, 71,2014-01-21 02:49:48.2203,726,777,323, 86,2014-01-15 15:25:00.298608,571,875,354, 73,2014-01-13 01:23:37.89306,182,334,615, 38,2014-01-17 02:29:11.233209,114,319,956, 38,2014-01-20 14:25:59.346414,577,733,286, 66,2014-01-18 16:14:05.054972,276,883,216, 86,2014-01-18 02:46:44.098228,804,912,64, 86,2014-01-17 17:22:13.530334,764,439,130, 66,2014-01-11 12:19:59.518878,313,730,356, 66,2014-01-12 17:39:48.751192,544,22,169, 6,2014-01-17 09:22:15.97774,386,453,725, 38,2014-01-19 17:55:00.122039,404,849,880, 38,2014-01-18 12:50:24.821852,37,502,220, 71,2014-01-16 09:59:45.565387,883,29,81, 86,2014-01-15 17:01:57.999065,247,573,368, 6,2014-01-20 16:28:02.206178,90,36,556, 66,2014-01-16 08:14:03.657772,974,151,408, 66,2014-01-10 20:56:13.045055,748,485,289, 6,2014-01-18 10:17:22.574751,602,525,213, 86,2014-01-11 04:56:13.244655,705,299,145, 38,2014-01-20 11:33:27.644029,206,367,639, 6,2014-01-17 10:11:03.909371,304,356,36, 38,2014-01-20 08:06:29.913154,660,656,821, 6,2014-01-12 00:22:34.408442,622,193,115, 66,2014-01-11 21:07:31.983316,788,398,23, 71,2014-01-14 02:10:23.501624,963,876,947, 71,2014-01-12 14:05:34.491219,500,192,919, 38,2014-01-13 01:44:38.148245,459,121,25, 38,2014-01-11 06:53:05.479889,288,273,633, 66,2014-01-18 20:34:59.900741,825,395,689, 66,2014-01-18 19:46:25.878458,741,16,823, 6,2014-01-11 03:42:53.700189,222,126,967, 86,2014-01-13 06:00:00.199768,141,465,195, 66,2014-01-21 01:10:03.771519,318,217,706, 66,2014-01-17 10:12:04.933398,730,241,696, 6,2014-01-12 14:25:59.546181,33,923,138, 66,2014-01-18 11:24:08.826491,603,122,969, 86,2014-01-14 06:58:49.031626,838,691,764, 71,2014-01-19 11:48:13.076516,77,543,125, 66,2014-01-19 03:44:08.345369,831,987,73, 73,2014-01-16 04:38:37.783237,528,165,107, 38,2014-01-15 15:10:29.054675,813,826,197, 66,2014-01-17 15:39:08.467826,86,634,192, 86,2014-01-13 11:29:02.605089,807,10,340, 86,2014-01-21 05:10:14.620061,592,21,614, 6,2014-01-12 02:41:06.466891,435,157,411, 86,2014-01-12 21:57:04.811685,861,266,724, 66,2014-01-16 07:24:18.278253,857,380,601, 38,2014-01-12 03:37:19.511946,354,567,106, 86,2014-01-20 12:14:27.386437,496,680,53, 86,2014-01-16 16:20:31.522908,339,550,723, 86,2014-01-11 09:10:47.155975,822,752,693, 86,2014-01-16 16:25:31.296227,335,160,22, 38,2014-01-15 18:27:01.436481,799,80,824, 73,2014-01-12 13:33:21.564835,264,625,661, 73,2014-01-17 17:33:03.279543,735,92,410, 73,2014-01-19 00:37:24.938105,940,744,510, 71,2014-01-14 07:38:56.056054,283,4,669, 6,2014-01-19 23:17:41.428206,293,597,586, 71,2014-01-19 11:30:30.417994,627,15,734, 38,2014-01-11 22:13:55.957214,213,655,837, 71,2014-01-17 13:04:07.306665,868,825,375, 73,2014-01-19 19:13:24.118182,564,880,812, 6,2014-01-14 08:13:56.156981,932,606,22, 86,2014-01-17 08:14:11.078184,432,472,613, 73,2014-01-15 23:25:29.05158,841,53,306, 38,2014-01-16 02:39:55.703163,483,364,670, 73,2014-01-14 13:38:19.904675,58,191,817, 66,2014-01-19 10:24:18.083625,411,741,83, 73,2014-01-14 08:28:08.779678,220,97,682, 73,2014-01-12 11:22:28.250462,88,829,407, 86,2014-01-14 09:02:55.866863,401,347,271, 66,2014-01-19 03:38:37.834353,510,519,926, 73,2014-01-19 07:01:36.718288,407,620,164, 73,2014-01-17 00:31:58.471952,270,269,10, 73,2014-01-19 02:48:52.454834,742,574,78, 73,2014-01-20 13:42:43.185178,344,246,868, 71,2014-01-19 02:29:03.283638,153,172,11, 86,2014-01-14 04:13:10.733088,93,930,39, 38,2014-01-11 11:20:02.697544,716,193,212, 38,2014-01-18 08:43:30.670494,399,234,998, 66,2014-01-20 00:33:42.255996,601,275,139, 66,2014-01-12 00:30:49.853519,116,770,315, 86,2014-01-13 19:09:01.966721,933,825,547, 73,2014-01-14 13:00:43.692477,99,417,350, 86,2014-01-13 18:04:11.418354,865,620,63, 73,2014-01-20 16:42:05.246264,759,70,958, 6,2014-01-12 07:38:08.630582,735,905,107, 71,2014-01-17 05:43:07.474827,54,899,465, 73,2014-01-19 09:59:46.674471,61,513,159, 6,2014-01-20 23:08:39.048576,227,713,216, 38,2014-01-18 07:57:03.432042,890,484,256, 66,2014-01-15 17:03:53.981135,4,222,551, 38,2014-01-19 12:22:03.166758,67,923,762, 66,2014-01-11 10:10:59.589023,648,135,322, 86,2014-01-11 19:18:05.05932,940,715,685, 73,2014-01-14 05:47:32.218758,153,367,321, 73,2014-01-16 16:50:55.292185,598,160,903, 6,2014-01-15 12:56:24.964414,69,679,788, 86,2014-01-12 10:11:50.302383,674,442,563, 71,2014-01-20 05:19:04.071864,344,14,977, 38,2014-01-17 04:18:53.214875,303,595,756, 6,2014-01-15 23:14:46.169246,616,343,748, 71,2014-01-18 02:57:41.906636,155,145,686, 86,2014-01-15 05:20:29.933164,430,493,297, 71,2014-01-11 17:46:44.641198,35,938,359, 73,2014-01-15 23:46:34.361471,493,51,238, 66,2014-01-14 13:03:13.118342,441,645,517, 66,2014-01-20 00:15:47.925255,377,573,727, 38,2014-01-19 07:59:45.094558,633,900,404, 6,2014-01-15 04:23:15.815885,884,744,552, 66,2014-01-17 02:59:18.595749,236,545,661, 66,2014-01-18 02:33:27.350555,287,322,694, 71,2014-01-16 08:54:05.669822,830,327,998, 38,2014-01-20 02:08:20.56247,474,966,92, 66,2014-01-11 09:34:11.043452,327,634,668, 38,2014-01-19 06:58:17.088176,639,273,6, 73,2014-01-19 12:50:25.808733,511,710,766, 6,2014-01-12 21:12:19.674034,474,363,969, 38,2014-01-15 06:41:24.563004,931,881,94, 73,2014-01-17 16:50:12.483623,499,681,466, 73,2014-01-12 14:20:58.723029,90,166,290, 71,2014-01-12 08:38:27.995045,664,887,485, 86,2014-01-12 03:54:06.464758,359,227,760, 6,2014-01-10 20:43:01.889788,94,809,15, 6,2014-01-12 22:49:27.584068,53,54,147, 6,2014-01-13 03:12:11.524497,186,724,702, 38,2014-01-14 06:30:34.108546,223,437,190, 38,2014-01-18 19:40:22.876253,920,739,452, 86,2014-01-17 20:08:36.48891,754,588,249, 66,2014-01-15 20:42:24.410929,434,189,528, 66,2014-01-17 18:59:26.948536,401,207,54, 6,2014-01-13 18:27:29.703785,415,128,336, 6,2014-01-20 23:57:10.580175,439,351,10, 71,2014-01-14 15:57:08.855173,751,636,625, 71,2014-01-18 03:47:59.636949,344,24,772, 6,2014-01-11 11:43:55.221793,668,684,729, 38,2014-01-19 19:43:43.216644,738,550,121, 66,2014-01-11 10:51:12.755291,739,851,143, 6,2014-01-20 15:59:43.147047,377,503,475, 71,2014-01-17 21:43:28.311203,423,249,111, 86,2014-01-15 19:14:28.571595,525,180,806, 86,2014-01-16 12:59:01.742796,193,401,895, 86,2014-01-14 18:16:55.661757,984,5,796, 66,2014-01-10 22:08:34.241417,514,410,429, 73,2014-01-15 09:07:22.305266,833,873,181, 71,2014-01-15 07:51:06.705209,1,110,992, 6,2014-01-19 09:06:51.329594,438,227,219, 86,2014-01-13 15:57:48.113999,575,117,90, 38,2014-01-17 09:03:26.379662,526,867,806, 38,2014-01-13 09:48:15.893017,783,22,611, 38,2014-01-20 12:48:00.597622,383,983,492, 73,2014-01-19 03:24:25.102692,780,792,214, 38,2014-01-14 22:26:43.888061,66,555,845, 38,2014-01-11 10:42:07.0628,786,407,835, 71,2014-01-19 04:07:26.992479,372,160,942, 71,2014-01-17 01:16:11.472549,939,184,254, 38,2014-01-13 17:54:18.587298,190,909,648, 66,2014-01-12 04:38:01.101444,412,258,240, 71,2014-01-14 14:56:34.348802,403,331,203, 66,2014-01-20 18:02:55.076208,252,637,911, 86,2014-01-17 05:20:25.512373,673,800,659, 86,2014-01-11 03:56:01.297339,347,344,905, 6,2014-01-13 06:30:24.779994,192,205,501, 86,2014-01-16 23:17:36.092968,235,633,780, 66,2014-01-14 23:53:10.152511,293,684,517, 38,2014-01-20 14:18:24.416943,66,734,471, 73,2014-01-17 15:01:31.314759,166,402,820, 66,2014-01-13 13:36:53.369155,827,835,30, 38,2014-01-21 05:09:37.172233,258,213,38, 73,2014-01-17 01:01:14.461807,61,184,195, 38,2014-01-20 15:20:21.680358,881,770,170, 66,2014-01-15 18:24:05.743828,943,511,464, 73,2014-01-12 08:00:16.204603,516,985,697, 73,2014-01-14 03:37:17.342535,501,60,581, 6,2014-01-15 20:32:39.985245,603,378,130, 38,2014-01-16 21:07:38.509868,419,383,266, 66,2014-01-18 15:28:24.047745,16,694,879, 71,2014-01-13 23:39:31.315257,614,159,998, 86,2014-01-19 17:05:26.623867,418,959,419, 86,2014-01-14 18:31:50.427407,723,355,612, 86,2014-01-16 13:27:47.208274,141,784,682, 38,2014-01-18 23:53:27.221908,142,424,831, 86,2014-01-12 15:56:15.530098,821,90,296, 71,2014-01-20 15:54:31.096335,549,464,9, 71,2014-01-19 14:35:34.284708,949,814,423, 86,2014-01-21 00:03:42.522997,814,461,825, 38,2014-01-16 11:10:42.568884,240,716,26, 73,2014-01-12 02:29:52.872006,963,694,361, 73,2014-01-11 22:41:43.624441,10,821,577, 6,2014-01-20 06:07:16.918105,38,138,239, 38,2014-01-11 14:32:47.948214,135,515,119, 71,2014-01-18 08:02:09.136814,822,366,890, 6,2014-01-20 14:03:18.215444,891,606,958, 66,2014-01-14 01:03:12.728207,704,890,90, 71,2014-01-14 01:19:45.229781,858,368,947, 86,2014-01-14 07:56:28.368375,266,808,852, 71,2014-01-13 09:21:37.14515,303,289,330, 86,2014-01-20 20:21:16.544541,998,694,676, 38,2014-01-17 01:33:21.73753,177,171,51, 86,2014-01-13 08:31:14.317383,676,264,664, 38,2014-01-16 15:22:31.006348,464,125,230, 71,2014-01-16 10:53:43.418307,724,647,892, 6,2014-01-18 06:55:20.06163,262,610,668, 66,2014-01-18 03:22:47.210951,517,132,918, 86,2014-01-19 18:31:00.760842,989,68,277, 86,2014-01-12 21:28:00.046875,988,84,939, 6,2014-01-13 18:30:25.720819,156,940,534, 38,2014-01-17 03:59:24.808586,948,204,543, 66,2014-01-16 01:07:31.362133,471,428,672, 38,2014-01-12 05:35:52.345105,140,902,139, 73,2014-01-21 02:31:15.236412,418,213,884, 86,2014-01-11 08:35:18.570407,877,827,55, 38,2014-01-20 09:29:19.567014,824,748,336, 38,2014-01-12 12:27:30.76651,318,885,364, 66,2014-01-21 04:29:49.667161,999,133,765, 71,2014-01-18 18:04:53.851722,753,695,931, 38,2014-01-12 06:31:13.289507,369,740,252, 73,2014-01-16 09:40:32.236046,340,719,109, 71,2014-01-20 00:34:46.723728,460,361,632, 66,2014-01-13 09:12:56.913948,665,64,79, 71,2014-01-15 09:47:49.154152,407,654,823, 66,2014-01-20 19:07:34.671942,202,426,360, 66,2014-01-20 21:15:06.051181,734,915,346, 6,2014-01-14 17:51:07.370015,445,366,722, 6,2014-01-13 14:10:47.40015,233,580,623, 71,2014-01-13 16:34:51.280962,722,13,211, 66,2014-01-18 05:47:35.73839,887,433,663, 73,2014-01-16 03:32:24.5453,294,726,614, 86,2014-01-13 06:56:07.825922,911,836,793, 71,2014-01-14 01:20:57.47592,124,251,887, 71,2014-01-18 16:03:38.862683,976,351,549, 71,2014-01-19 02:18:38.832271,696,325,438, 86,2014-01-19 16:14:40.894226,716,858,44, 38,2014-01-15 16:58:58.924314,696,946,697, 86,2014-01-15 23:41:26.043641,299,699,318, 71,2014-01-18 04:45:41.655068,978,522,824, 6,2014-01-17 18:26:58.971608,237,940,717, 66,2014-01-18 22:11:51.764459,522,358,913, 66,2014-01-14 02:45:06.464073,754,899,636, 38,2014-01-12 13:34:30.333742,777,644,932, 38,2014-01-20 07:47:44.109564,271,369,715, 66,2014-01-13 23:16:21.700485,490,479,159, 6,2014-01-13 02:09:48.904568,858,784,143, 71,2014-01-19 11:17:03.676577,346,435,278, 71,2014-01-15 15:43:52.466996,939,281,705, 86,2014-01-13 00:39:38.571729,150,259,584, 73,2014-01-16 23:21:57.528299,646,570,763, 38,2014-01-17 02:15:05.756923,646,983,605, 73,2014-01-18 14:20:10.807776,291,427,18, 71,2014-01-15 17:56:44.252027,266,125,195, 6,2014-01-19 15:28:02.670871,787,171,545, 86,2014-01-12 18:07:59.962347,483,924,906, 86,2014-01-15 07:04:18.92397,632,431,706, 66,2014-01-19 06:43:08.722051,46,729,433, 38,2014-01-16 15:59:07.332361,549,916,881, 38,2014-01-18 01:15:06.324119,601,460,456, 66,2014-01-11 17:18:00.003432,531,58,214, 38,2014-01-13 15:46:43.070751,846,754,26, 73,2014-01-12 22:47:30.869419,639,916,160, 71,2014-01-14 04:14:07.829355,666,361,845, 71,2014-01-16 21:07:40.546671,844,655,869, 71,2014-01-20 18:51:09.732522,68,618,421, 86,2014-01-12 00:32:46.661625,48,740,490, 38,2014-01-15 07:22:21.441316,783,517,320, 73,2014-01-15 05:50:08.656835,785,909,834, 86,2014-01-17 04:14:12.705266,866,115,408, 6,2014-01-12 06:08:03.096384,652,840,932, 86,2014-01-11 18:17:07.628444,839,160,827, 71,2014-01-14 20:26:04.470144,405,756,966, 6,2014-01-15 12:53:09.560457,467,934,103, 86,2014-01-13 11:51:37.962604,988,525,517, 66,2014-01-13 22:13:48.579707,922,518,835, 38,2014-01-18 16:09:31.260943,347,167,668, 73,2014-01-15 18:01:26.867171,739,431,109, 73,2014-01-12 03:30:52.256704,533,906,458, 71,2014-01-13 01:53:23.728358,769,703,194, 73,2014-01-17 22:41:05.4389,248,196,165, 86,2014-01-18 06:52:49.785003,429,502,641, 71,2014-01-19 08:08:29.48528,228,963,577, 66,2014-01-15 07:01:16.246676,465,218,33, 71,2014-01-12 18:49:34.037449,69,451,844, 6,2014-01-17 17:36:32.156151,205,656,537, 38,2014-01-17 05:09:16.209023,789,789,551, 86,2014-01-17 05:53:52.961419,74,559,418, 86,2014-01-15 18:19:40.878621,361,70,626, 86,2014-01-12 15:08:23.541384,216,861,813, 86,2014-01-14 01:08:59.285957,129,110,968, 38,2014-01-16 15:37:40.882054,274,116,611, 73,2014-01-15 10:55:06.612135,907,403,32, 66,2014-01-16 03:56:30.155376,773,670,554, 73,2014-01-19 23:51:48.711408,21,421,153, 6,2014-01-11 02:02:47.159225,800,183,217, 73,2014-01-15 16:47:39.887898,909,119,648, 38,2014-01-21 04:24:35.373034,973,670,76, 38,2014-01-15 13:25:08.600542,266,709,934, 38,2014-01-20 02:37:48.545152,6,930,995, 6,2014-01-17 02:38:48.078719,838,687,221, 6,2014-01-16 23:33:11.696925,330,996,626, 38,2014-01-21 00:54:56.173595,57,315,171, 38,2014-01-21 03:04:52.548862,62,709,733, 73,2014-01-11 06:26:21.257801,978,692,258, 73,2014-01-13 06:46:34.136199,291,232,644, 6,2014-01-13 19:18:41.12857,442,715,189, 71,2014-01-19 02:35:52.518745,729,496,834, 73,2014-01-18 04:48:01.00337,369,568,70, 73,2014-01-15 02:49:33.385274,586,227,466, 71,2014-01-10 22:29:16.247102,889,2,762, 66,2014-01-14 21:29:06.442271,173,857,158, 71,2014-01-12 03:42:23.170696,143,132,395, 73,2014-01-19 10:37:45.732383,771,204,834, 73,2014-01-19 08:30:22.689366,327,784,530, 71,2014-01-14 02:31:57.208145,727,599,182, 66,2014-01-15 22:14:17.888953,650,843,760, 66,2014-01-15 07:39:38.898389,997,898,349, 86,2014-01-20 12:25:50.169564,710,224,279, 73,2014-01-20 20:33:58.767574,862,369,699, 73,2014-01-17 02:48:02.440193,387,17,513, 6,2014-01-13 07:34:49.455521,306,0,357, 66,2014-01-16 06:11:39.649628,386,775,796, 6,2014-01-11 07:43:09.052328,514,276,503, 38,2014-01-18 15:31:19.611317,464,928,980, 66,2014-01-15 00:03:28.361037,284,834,400, 71,2014-01-11 13:45:56.211554,987,176,617, 86,2014-01-13 02:18:59.499214,546,144,24, 73,2014-01-14 22:28:03.734071,81,450,808, 6,2014-01-16 07:11:04.812095,751,926,524, 38,2014-01-11 22:56:48.044366,339,569,439, 71,2014-01-21 05:06:51.812791,186,788,187, 66,2014-01-12 00:44:16.50944,654,448,94, 73,2014-01-11 17:51:44.21838,503,320,31, 86,2014-01-21 02:11:44.361652,389,479,143, 38,2014-01-12 11:10:37.767241,259,938,918, 66,2014-01-14 04:38:18.354579,616,189,226, 73,2014-01-13 15:30:25.490642,355,970,431, 6,2014-01-20 17:46:30.285985,432,141,780, 86,2014-01-11 03:26:19.358369,101,511,370, 6,2014-01-17 22:19:58.875916,802,650,355, 66,2014-01-20 20:15:46.533506,956,995,912, 66,2014-01-15 04:55:25.800639,68,520,624, 38,2014-01-19 06:02:22.046611,847,872,125, 73,2014-01-19 00:53:32.26589,67,446,288, 73,2014-01-13 07:25:48.490005,509,934,898, 73,2014-01-12 02:34:19.254756,800,593,398, 73,2014-01-13 17:07:50.154843,182,994,69, 6,2014-01-17 19:05:27.388813,756,546,173, 66,2014-01-11 09:00:09.42474,210,942,908, 38,2014-01-13 07:41:48.922417,475,881,352, 73,2014-01-13 15:53:29.829005,364,860,815, 66,2014-01-13 20:34:58.88026,274,483,694, 86,2014-01-18 17:53:28.572046,79,679,912, 6,2014-01-14 03:36:38.881334,948,459,966, 38,2014-01-11 06:06:18.491577,224,589,588, 71,2014-01-12 11:56:56.933082,932,158,399, 71,2014-01-14 21:22:35.093307,827,750,747, 66,2014-01-13 12:25:17.991209,671,176,683, 38,2014-01-16 14:25:00.667573,667,624,710, 73,2014-01-20 08:33:39.905402,907,94,529, 73,2014-01-14 15:22:06.035576,793,785,891, 73,2014-01-16 13:31:52.480363,307,506,779, 6,2014-01-11 03:17:56.414842,508,485,53, 66,2014-01-15 13:13:50.253955,196,456,764, 73,2014-01-16 09:43:36.842434,819,967,564, 6,2014-01-12 18:28:34.182082,582,193,536, 71,2014-01-18 21:52:08.608954,3,90,956, 38,2014-01-19 05:14:02.333076,118,254,942, 38,2014-01-12 06:15:04.468486,427,151,710, 73,2014-01-19 05:18:27.967322,39,521,884, 71,2014-01-15 21:34:01.208992,549,812,382, 71,2014-01-11 20:30:51.001993,361,545,874, 66,2014-01-13 04:13:53.767961,111,817,872, 71,2014-01-13 21:36:23.256022,879,361,271, 66,2014-01-20 01:24:23.267883,468,910,430, 6,2014-01-15 15:39:42.258385,738,813,197, 86,2014-01-15 04:10:42.510778,440,773,995, 6,2014-01-12 12:32:13.423145,713,745,197, 86,2014-01-12 04:45:09.647198,995,19,447, 6,2014-01-15 17:10:51.935517,662,450,553, 73,2014-01-15 00:14:02.345561,275,212,98, 66,2014-01-15 00:38:39.476204,860,102,969, 86,2014-01-18 17:45:50.816197,666,845,310, 6,2014-01-12 12:07:30.918026,470,671,707, 66,2014-01-18 08:15:18.357539,22,440,996, 73,2014-01-19 03:52:09.307773,88,551,1, 73,2014-01-14 04:04:27.851108,434,592,307, 86,2014-01-11 23:37:53.450845,621,194,611, 66,2014-01-11 10:17:27.298983,968,83,806, 73,2014-01-19 22:29:28.518681,561,855,969, 71,2014-01-11 02:11:33.356667,686,665,686, 6,2014-01-15 05:39:33.334978,931,346,733, 71,2014-01-15 06:01:20.999464,643,243,206, 86,2014-01-11 09:29:29.771507,689,628,176, 73,2014-01-19 22:53:23.588933,490,978,895, 73,2014-01-20 19:44:57.841898,886,881,812, 71,2014-01-13 07:58:03.954009,693,312,870, 66,2014-01-17 14:45:32.197887,423,592,766, 86,2014-01-18 18:59:00.174974,208,347,612, 86,2014-01-14 18:13:08.422496,948,15,825, 73,2014-01-15 14:04:00.165209,215,41,300, 86,2014-01-13 10:33:01.383966,700,337,897, 73,2014-01-15 18:43:59.424489,620,810,592, 6,2014-01-17 22:17:53.933591,847,469,12, 73,2014-01-16 12:09:24.639987,62,219,984, 71,2014-01-14 14:08:22.692372,596,375,460, 38,2014-01-12 07:57:36.191976,302,731,696, 71,2014-01-20 20:20:07.151184,12,274,422, 6,2014-01-16 06:40:36.115517,910,242,428, 6,2014-01-13 16:42:45.839174,896,497,129, 73,2014-01-15 07:30:59.086701,965,711,896, 71,2014-01-20 10:54:38.461497,748,743,845, 66,2014-01-17 21:21:25.315378,81,940,745, 66,2014-01-12 19:16:49.902898,722,635,323, 71,2014-01-11 17:02:09.379524,56,644,403, 71,2014-01-14 23:36:43.672916,804,635,311, 73,2014-01-21 03:08:59.21109,529,476,987, 66,2014-01-15 01:06:37.230632,693,955,400, 86,2014-01-16 03:14:37.12418,699,287,363, 86,2014-01-11 07:26:26.510073,628,906,934, 6,2014-01-13 17:36:05.749732,944,318,249, 73,2014-01-16 09:26:10.480847,68,473,438, 66,2014-01-15 17:05:59.845051,287,260,833, 86,2014-01-18 03:37:26.749196,358,925,641, 6,2014-01-16 22:55:40.252354,926,490,854, 38,2014-01-14 09:59:23.434403,800,66,307, 86,2014-01-17 17:22:24.591094,234,109,121, 38,2014-01-19 10:53:44.206364,862,686,561, 38,2014-01-21 04:44:55.632291,367,721,893, 86,2014-01-15 06:21:24.766069,12,81,15, 38,2014-01-12 23:06:52.628859,90,678,19, 6,2014-01-15 12:48:55.79792,1,354,189, 71,2014-01-17 20:54:26.150033,507,469,489, 86,2014-01-17 21:50:52.053348,842,940,790, 66,2014-01-12 05:06:49.73151,800,349,653, 73,2014-01-13 03:03:50.79044,962,897,234, 6,2014-01-11 05:59:14.746139,657,310,844, 71,2014-01-13 17:04:25.923485,235,969,585, 6,2014-01-12 17:23:57.941624,463,431,980, 86,2014-01-16 16:39:50.861654,903,762,943, 73,2014-01-16 13:47:11.762659,840,845,232, 66,2014-01-17 04:54:57.028325,999,502,760, 71,2014-01-15 21:34:29.323152,811,843,981, 38,2014-01-13 05:08:37.078038,865,823,433, 73,2014-01-19 04:11:46.931642,778,49,247, 38,2014-01-16 18:36:38.702675,820,846,831, 71,2014-01-17 08:45:20.750953,195,84,808, 73,2014-01-19 01:20:46.142731,315,886,559, 86,2014-01-20 23:43:15.933727,381,463,67, 38,2014-01-12 05:59:57.875134,189,935,849, 38,2014-01-19 12:47:12.652804,759,909,566, 86,2014-01-13 11:19:21.683459,976,467,378, 38,2014-01-17 19:26:08.35598,384,206,505, 6,2014-01-13 23:53:12.498274,805,616,640, 71,2014-01-20 18:56:48.432656,563,810,436, 6,2014-01-13 12:21:48.608754,369,502,816, 66,2014-01-17 13:52:35.932677,452,693,13, 66,2014-01-17 06:19:13.02375,450,37,28, 86,2014-01-11 17:15:32.815118,234,731,93, 73,2014-01-17 12:37:31.564969,675,253,853, 38,2014-01-11 06:40:37.789819,942,199,679, 71,2014-01-13 20:22:25.444396,114,379,95, 6,2014-01-11 19:26:27.362889,578,775,199, 6,2014-01-18 07:35:03.939852,180,335,114, 73,2014-01-20 22:13:17.497743,748,259,256, 6,2014-01-13 04:33:17.094398,25,441,295, 86,2014-01-20 14:38:54.730292,305,293,795, 73,2014-01-10 22:12:32.243881,261,499,659, 86,2014-01-16 01:37:43.017883,389,976,15, 66,2014-01-12 02:02:52.671916,858,668,346, 86,2014-01-16 18:52:23.105955,125,870,645, 86,2014-01-11 09:24:54.780543,246,546,433, 66,2014-01-18 10:57:49.700659,730,224,990, 71,2014-01-11 10:26:52.429107,189,852,275, 6,2014-01-13 18:33:31.85858,899,945,770, 71,2014-01-16 09:09:36.632301,161,722,466, 86,2014-01-17 09:03:31.132201,972,514,292, 71,2014-01-20 07:18:52.609953,608,620,853, 73,2014-01-14 04:30:22.775032,192,875,882, 21,2014-01-17 02:46:47.065928,69,318,247, 70,2014-01-11 07:18:50.485087,139,99,714, 21,2014-01-12 11:17:35.427836,942,356,819, 21,2014-01-19 18:06:08.749388,696,864,77, 21,2014-01-18 06:44:58.841486,500,531,695, 70,2014-01-15 15:10:47.92611,352,840,98, 34,2014-01-19 07:02:57.182044,273,901,776, 70,2014-01-20 23:06:47.450241,896,862,714, 21,2014-01-11 23:03:23.858789,98,430,163, 70,2014-01-15 07:22:10.205794,597,618,194, 21,2014-01-11 10:22:20.265358,44,671,816, 70,2014-01-18 15:40:55.424176,475,212,441, 34,2014-01-15 18:02:47.995611,267,684,785, 21,2014-01-14 10:44:45.709753,190,74,803, 21,2014-01-19 15:07:22.787064,493,824,593, 70,2014-01-12 19:37:51.935465,839,826,864, 34,2014-01-14 02:58:03.207916,719,894,597, 34,2014-01-11 13:40:39.881881,610,361,302, 70,2014-01-12 04:16:46.666175,375,839,190, 70,2014-01-14 05:10:35.451797,722,562,588, 21,2014-01-16 19:18:22.899765,189,371,487, 34,2014-01-13 10:19:39.338092,49,800,586, 21,2014-01-20 04:02:58.557752,219,638,84, 34,2014-01-17 08:43:17.680727,91,301,388, 34,2014-01-21 01:17:29.038751,459,61,527, 70,2014-01-20 18:29:50.987278,457,994,682, 70,2014-01-20 07:16:49.539307,844,333,891, 34,2014-01-16 04:27:05.671052,926,643,811, 34,2014-01-16 21:33:22.11948,159,330,644, 21,2014-01-19 08:35:42.14926,539,207,526, 70,2014-01-19 12:57:28.446084,467,336,507, 21,2014-01-12 18:20:09.185408,271,615,111, 34,2014-01-19 19:54:32.634767,681,619,925, 34,2014-01-21 04:15:03.874341,124,261,350, 70,2014-01-11 06:26:17.934796,136,753,918, 34,2014-01-16 20:39:31.476254,802,923,39, 34,2014-01-15 13:25:51.800451,152,811,504, 70,2014-01-19 17:29:15.11684,929,905,12, 34,2014-01-16 13:46:18.926495,488,878,659, 70,2014-01-16 16:29:15.659658,139,938,528, 34,2014-01-13 18:51:25.322633,143,90,108, 34,2014-01-17 04:08:39.191853,176,527,90, 21,2014-01-14 02:10:11.083834,637,696,699, 34,2014-01-18 16:54:13.318246,871,620,698, 34,2014-01-20 18:53:24.902026,963,516,181, 34,2014-01-12 11:17:33.870898,883,893,679, 70,2014-01-20 16:32:05.253709,245,469,555, 70,2014-01-13 15:51:28.109942,507,106,15, 21,2014-01-13 04:58:13.752779,712,851,312, 34,2014-01-11 14:48:51.919884,647,435,188, 34,2014-01-17 01:02:03.561739,910,878,801, 34,2014-01-19 04:16:36.652963,873,583,881, 21,2014-01-14 05:08:31.258395,76,541,861, 34,2014-01-15 23:05:02.119909,476,34,286, 70,2014-01-15 06:59:54.33369,433,807,827, 21,2014-01-14 00:26:00.297146,630,408,666, 70,2014-01-15 11:34:53.107188,755,895,441, 70,2014-01-14 08:16:43.872997,183,81,874, 34,2014-01-19 08:53:05.968618,920,422,384, 34,2014-01-11 03:08:15.226668,808,335,599, 34,2014-01-12 10:52:26.022676,361,933,495, 70,2014-01-17 15:50:34.414702,764,735,593, 21,2014-01-13 01:28:24.412075,881,287,959, 34,2014-01-11 00:46:58.657444,30,927,80, 34,2014-01-17 14:05:38.289043,774,899,483, 21,2014-01-13 11:54:42.346872,652,798,26, 34,2014-01-17 01:26:30.133697,307,221,689, 21,2014-01-11 21:31:30.089912,139,424,311, 34,2014-01-11 23:23:57.463711,702,442,195, 21,2014-01-12 09:12:49.060192,446,225,604, 21,2014-01-17 18:00:45.749571,477,879,20, 34,2014-01-14 22:15:22.786345,291,65,889, 70,2014-01-18 17:21:28.252045,502,212,306, 21,2014-01-21 00:10:56.833405,527,56,218, 34,2014-01-12 09:09:36.10459,244,917,193, 34,2014-01-18 06:14:53.154071,668,168,435, 70,2014-01-12 05:28:30.704303,752,920,748, 34,2014-01-11 19:41:41.358299,382,12,207, 70,2014-01-21 02:06:21.264011,313,351,688, 21,2014-01-14 14:26:44.457501,756,198,921, 34,2014-01-12 14:30:33.278603,178,350,122, 34,2014-01-16 21:08:24.82617,476,226,4, 70,2014-01-12 12:43:21.110464,820,479,615, 34,2014-01-15 23:39:04.536998,229,656,711, 70,2014-01-11 14:13:26.946079,864,364,793, 70,2014-01-16 23:43:15.444153,313,520,382, 34,2014-01-19 04:05:04.834144,263,56,474, 34,2014-01-16 05:48:20.053267,302,577,839, 70,2014-01-20 11:59:59.31715,550,434,543, 34,2014-01-17 06:58:10.802761,411,181,645, 70,2014-01-16 12:56:35.279934,909,352,943, 21,2014-01-11 16:52:25.339826,445,675,222, 21,2014-01-13 16:48:45.217882,953,983,941, 70,2014-01-18 18:24:59.692009,320,905,605, 70,2014-01-11 21:39:23.99727,527,4,510, 34,2014-01-20 10:54:23.506925,685,126,661, 70,2014-01-11 00:19:42.038881,324,96,262, 70,2014-01-18 03:05:54.130967,825,383,78, 34,2014-01-11 02:25:53.596837,870,88,7, 34,2014-01-12 03:43:39.502593,820,688,707, 70,2014-01-19 16:18:43.191158,438,196,716, 34,2014-01-18 00:26:39.346408,383,553,909, 21,2014-01-16 05:59:02.288937,393,86,198, 34,2014-01-17 03:40:11.443623,875,603,529, 21,2014-01-17 18:37:36.179813,799,619,587, 34,2014-01-17 19:08:38.393527,818,207,493, 21,2014-01-14 03:55:04.597693,812,253,365, 21,2014-01-19 04:06:06.884535,879,968,273, 70,2014-01-18 18:50:19.751826,36,787,238, 34,2014-01-14 00:01:25.862124,925,983,338, 34,2014-01-12 12:32:51.342036,231,602,272, 70,2014-01-20 13:20:53.030429,181,620,300, 70,2014-01-20 01:09:50.688294,631,957,824, 21,2014-01-14 05:16:12.4525,760,203,872, 21,2014-01-15 06:59:57.567427,987,543,421, 34,2014-01-20 19:23:17.634373,756,499,416, 34,2014-01-20 08:59:27.896654,788,322,170, 34,2014-01-13 05:05:02.401991,105,172,717, 34,2014-01-15 19:11:37.68764,465,328,91, 70,2014-01-19 14:59:27.213803,605,90,338, 21,2014-01-19 16:03:13.204752,471,557,903, 21,2014-01-11 02:08:12.967574,973,864,27, 70,2014-01-20 11:51:52.554049,674,515,177, 70,2014-01-12 02:51:58.422635,902,555,774, 70,2014-01-19 00:33:12.659583,538,882,474, 70,2014-01-11 03:31:16.551318,531,827,931, 34,2014-01-11 07:46:21.929979,380,567,155, 21,2014-01-19 04:52:54.698465,372,9,260, 21,2014-01-18 10:37:10.682704,770,178,508, 34,2014-01-11 14:12:15.526816,828,965,157, 21,2014-01-20 12:36:34.201057,665,851,965, 70,2014-01-16 20:55:53.873862,995,776,292, 21,2014-01-18 18:38:54.873643,252,503,815, 34,2014-01-15 12:35:36.489994,322,951,533, 34,2014-01-12 18:36:05.317484,10,776,23, 21,2014-01-15 07:16:31.053455,725,444,254, 21,2014-01-12 01:44:14.883521,831,64,227, 21,2014-01-16 02:31:09.915597,924,116,517, 21,2014-01-13 05:22:37.937991,266,643,704, 34,2014-01-20 00:34:34.635347,192,256,754, 21,2014-01-19 06:32:35.777721,2,140,903, 70,2014-01-14 21:55:29.280027,806,989,292, 34,2014-01-19 07:55:27.666196,857,149,57, 21,2014-01-18 01:42:26.466015,450,537,372, 21,2014-01-18 07:11:41.732528,201,690,565, 34,2014-01-13 08:55:25.233623,976,923,845, 21,2014-01-17 15:05:44.100386,33,276,411, 70,2014-01-17 10:11:09.629181,229,527,118, 34,2014-01-15 18:00:27.635615,894,133,375, 21,2014-01-12 04:17:21.788026,441,663,942, 70,2014-01-15 19:10:36.843404,825,866,76, 34,2014-01-14 04:03:40.840785,533,976,524, 21,2014-01-12 10:25:34.7556,261,391,196, 70,2014-01-15 01:02:29.397453,663,374,987, 70,2014-01-15 10:55:39.263421,257,509,865, 21,2014-01-20 14:58:47.415183,16,143,619, 70,2014-01-15 08:33:45.948771,755,820,213, 70,2014-01-15 22:42:01.193399,349,260,326, 34,2014-01-18 13:51:42.113647,281,552,139, 21,2014-01-12 13:10:56.631475,183,272,296, 70,2014-01-16 16:54:16.720634,321,820,849, 70,2014-01-17 20:28:16.314704,140,137,239, 34,2014-01-18 14:06:50.505756,961,744,243, 34,2014-01-14 05:33:11.594277,863,502,854, 70,2014-01-12 03:03:52.804699,92,14,583, 21,2014-01-20 12:42:55.823241,918,91,955, 21,2014-01-18 16:49:42.647732,774,99,909, 21,2014-01-13 08:48:07.68822,803,822,20, 70,2014-01-15 09:14:05.738838,803,909,347, 34,2014-01-21 02:12:20.586143,328,372,15, 21,2014-01-12 03:22:42.323567,733,984,570, 70,2014-01-13 09:46:41.516558,157,394,358, 34,2014-01-14 18:07:49.86617,474,123,107, 21,2014-01-20 15:18:09.989763,712,766,269, 21,2014-01-20 15:29:07.982573,801,130,684, 34,2014-01-11 19:19:31.599117,117,554,802, 70,2014-01-12 18:13:35.223805,936,820,73, 70,2014-01-17 00:34:52.082959,482,760,300, 70,2014-01-18 09:30:41.228299,802,565,605, 21,2014-01-17 16:14:02.85942,996,901,985, 70,2014-01-18 08:52:13.870985,132,665,271, 34,2014-01-12 22:41:18.071703,683,694,338, 21,2014-01-21 00:17:43.700205,135,929,246, 70,2014-01-19 23:17:48.626584,173,732,452, 21,2014-01-17 03:43:47.469155,484,994,671, 70,2014-01-15 05:13:22.964045,590,793,6, 21,2014-01-19 08:16:36.041768,733,596,340, 70,2014-01-11 06:17:33.418346,656,289,660, 70,2014-01-20 07:55:24.157444,88,252,687, 34,2014-01-16 16:08:18.155415,489,756,214, 70,2014-01-12 23:28:30.049821,504,697,956, 21,2014-01-15 18:49:40.878078,509,529,904, 34,2014-01-13 06:36:34.47012,602,311,552, 34,2014-01-20 17:35:20.555577,54,679,441, 21,2014-01-19 04:22:52.472355,549,363,262, 70,2014-01-14 13:40:27.274818,260,651,38, 34,2014-01-20 00:18:16.379237,563,110,895, 34,2014-01-16 15:12:35.120507,529,133,311, 70,2014-01-17 02:28:34.963038,298,375,936, 34,2014-01-14 03:32:22.118075,167,591,252, 34,2014-01-16 11:24:55.70665,186,633,251, 70,2014-01-18 09:51:17.287024,251,334,520, 21,2014-01-16 17:19:03.635053,830,614,356, 34,2014-01-20 09:32:45.57282,597,239,445, 70,2014-01-17 19:09:27.276786,440,533,541, 21,2014-01-16 02:48:11.617626,608,335,476, 21,2014-01-10 22:52:17.171936,928,438,134, 34,2014-01-19 17:23:02.500592,867,545,925, 34,2014-01-11 21:23:03.701004,314,262,334, 21,2014-01-18 12:22:58.400654,862,43,570, 34,2014-01-16 03:37:05.36043,207,786,533, 21,2014-01-19 10:15:17.57199,689,248,486, 70,2014-01-20 15:04:16.472356,372,119,514, 70,2014-01-15 21:54:49.060635,424,554,926, 34,2014-01-18 03:33:06.198994,703,425,886, 70,2014-01-16 12:48:03.941931,273,486,514, 70,2014-01-20 07:08:12.02468,201,423,673, 21,2014-01-16 05:49:42.240761,290,690,909, 21,2014-01-16 23:05:37.360277,55,928,63, 70,2014-01-19 09:03:36.182543,681,892,797, 70,2014-01-11 15:58:00.396595,967,928,400, 21,2014-01-19 02:34:07.410098,751,248,669, 70,2014-01-13 21:53:17.060621,735,736,609, 21,2014-01-14 02:34:34.866715,900,30,223, 34,2014-01-18 14:09:27.966094,221,958,468, 34,2014-01-11 20:16:09.533396,18,412,678, 70,2014-01-17 20:15:02.141952,521,550,890, 21,2014-01-17 08:27:44.345331,210,21,598, 34,2014-01-17 15:28:44.653902,277,684,797, 34,2014-01-13 16:43:37.10499,939,962,676, 21,2014-01-20 16:00:06.463406,490,156,692, 34,2014-01-12 20:53:40.360552,184,728,600, 34,2014-01-10 20:34:54.392013,489,523,974, 70,2014-01-16 03:19:10.098459,716,60,32, 21,2014-01-12 00:26:25.933371,592,435,627, 21,2014-01-17 19:44:21.669218,394,338,776, 21,2014-01-11 00:07:21.716504,612,491,653, 34,2014-01-12 03:18:43.105727,841,272,426, 34,2014-01-16 07:07:24.169809,34,888,549, 34,2014-01-12 01:30:25.417507,682,226,670, 70,2014-01-19 19:41:41.506381,608,12,38, 21,2014-01-11 04:44:29.53024,318,221,25, 70,2014-01-20 15:45:42.989497,745,164,59, 21,2014-01-19 04:45:57.978737,236,798,346, 21,2014-01-16 06:39:18.591294,243,894,122, 21,2014-01-17 13:18:49.188491,223,452,266, 70,2014-01-14 11:34:01.920669,519,632,907, 21,2014-01-15 07:47:30.615974,493,312,606, 21,2014-01-12 13:08:31.429671,848,464,782, 21,2014-01-20 14:39:39.280946,200,4,392, 70,2014-01-13 10:51:06.798517,748,207,497, 21,2014-01-13 09:06:31.826266,599,537,835, 34,2014-01-18 11:13:46.691462,654,7,540, 70,2014-01-16 12:44:23.859557,31,438,476, 70,2014-01-16 15:41:06.692981,537,394,566, 21,2014-01-15 19:23:14.657556,301,938,449, 21,2014-01-17 13:00:33.392952,466,630,862, 70,2014-01-13 05:56:08.834932,705,795,958, 34,2014-01-11 21:50:59.002887,872,668,989, 21,2014-01-13 22:29:18.046854,929,26,440, 70,2014-01-16 02:39:45.940341,800,3,843, 21,2014-01-11 07:51:05.466294,547,367,48, 21,2014-01-15 23:22:58.407406,350,116,83, 21,2014-01-16 03:14:40.332354,713,311,331, 70,2014-01-16 15:10:15.565172,500,487,141, 21,2014-01-17 03:49:24.340778,964,785,809, 21,2014-01-12 16:59:02.001572,604,269,960, 34,2014-01-16 19:17:37.281676,873,715,138, 70,2014-01-18 11:08:07.446504,251,874,975, 34,2014-01-18 04:06:26.171801,742,889,82, 70,2014-01-18 00:48:02.699183,954,910,105, 34,2014-01-17 00:49:48.952885,957,352,283, 21,2014-01-18 12:50:55.702041,117,239,175, 70,2014-01-17 10:33:45.68868,747,136,795, 34,2014-01-14 23:35:46.932042,12,909,500, 34,2014-01-13 13:30:14.293334,725,320,412, 21,2014-01-13 17:52:34.877591,459,648,939, 34,2014-01-18 15:09:48.85271,225,96,606, 34,2014-01-18 01:17:44.909727,737,844,63, 21,2014-01-15 11:01:06.307261,225,848,133, 34,2014-01-17 23:49:28.134075,714,926,881, 21,2014-01-20 16:08:51.708244,591,321,982, 34,2014-01-18 00:07:38.133527,365,995,198, 49,2014-01-15 05:03:14.825538,219,297,369, 83,2014-01-15 22:53:15.567801,493,761,769, 39,2014-01-13 09:48:44.826508,824,342,658, 83,2014-01-20 04:26:29.483094,679,10,581, 49,2014-01-12 05:53:48.960753,282,49,475, 39,2014-01-15 19:44:53.661859,80,169,523, 39,2014-01-10 20:17:28.485981,548,443,543, 39,2014-01-15 08:23:07.007608,90,647,602, 49,2014-01-21 02:24:39.602201,844,668,351, 39,2014-01-11 08:08:33.952275,881,257,228, 49,2014-01-20 11:46:05.415015,617,302,770, 49,2014-01-15 23:39:19.934554,847,264,313, 39,2014-01-17 03:18:49.517447,271,332,714, 83,2014-01-16 09:35:29.756211,861,506,486, 83,2014-01-17 20:38:21.936545,720,662,618, 39,2014-01-12 16:36:26.799122,525,713,241, 49,2014-01-13 14:43:37.202716,99,514,258, 83,2014-01-14 18:44:48.108346,884,468,52, 39,2014-01-19 21:24:29.498306,718,181,836, 39,2014-01-19 19:33:26.15602,578,78,210, 49,2014-01-12 01:35:43.810386,1,479,881, 49,2014-01-16 01:58:15.187405,462,621,304, 83,2014-01-13 13:09:13.088061,21,71,931, 83,2014-01-14 19:05:58.10414,991,68,50, 39,2014-01-18 23:50:50.064996,124,207,696, 49,2014-01-10 22:19:01.940772,35,85,75, 83,2014-01-11 14:23:43.013867,197,945,518, 39,2014-01-13 04:51:56.372257,955,105,194, 39,2014-01-18 02:08:30.074847,464,307,568, 83,2014-01-11 00:32:34.72211,317,42,235, 39,2014-01-20 08:59:34.505784,436,894,336, 49,2014-01-12 01:11:44.900384,28,599,203, 83,2014-01-16 03:25:50.29033,494,124,893, 39,2014-01-12 12:48:19.332293,144,796,746, 49,2014-01-10 23:38:14.383478,423,481,495, 83,2014-01-17 13:19:39.251083,788,83,871, 49,2014-01-17 12:33:12.994152,333,389,209, 39,2014-01-10 23:55:42.869459,898,932,150, 39,2014-01-11 15:42:46.25911,169,914,187, 83,2014-01-17 08:57:52.596352,73,394,367, 49,2014-01-11 12:04:16.822153,420,778,336, 83,2014-01-10 21:28:51.674125,983,636,168, 49,2014-01-12 02:37:12.531326,728,461,523, 83,2014-01-17 19:23:06.3396,492,813,445, 83,2014-01-16 11:04:21.430336,923,188,595, 83,2014-01-19 03:15:34.467871,685,661,685, 83,2014-01-19 15:59:33.139142,144,314,284, 83,2014-01-19 05:47:58.633471,561,362,657, 83,2014-01-12 16:00:22.576217,113,954,646, 49,2014-01-18 07:24:02.637448,629,395,427, 49,2014-01-17 19:21:24.789491,803,192,766, 39,2014-01-13 21:36:06.386603,517,133,944, 83,2014-01-13 03:22:17.824853,516,533,756, 39,2014-01-20 12:30:37.877552,896,163,914, 83,2014-01-17 20:42:04.490743,108,45,221, 83,2014-01-10 21:13:07.889849,446,542,408, 49,2014-01-20 14:49:39.818324,56,232,990, 49,2014-01-18 15:05:47.504609,312,726,532, 49,2014-01-13 06:05:04.262106,5,303,76, 49,2014-01-17 10:58:09.89359,652,128,425, 83,2014-01-18 19:38:22.22714,500,848,524, 83,2014-01-12 09:04:38.767891,105,580,908, 49,2014-01-18 16:09:54.793974,585,940,659, 49,2014-01-13 17:04:12.51747,636,532,472, 39,2014-01-14 01:52:58.100602,838,230,716, 83,2014-01-18 19:48:09.177872,406,720,338, 83,2014-01-20 10:23:51.768553,468,140,911, 49,2014-01-20 18:26:11.094754,928,220,712, 39,2014-01-18 23:43:52.047331,38,259,322, 83,2014-01-10 20:06:38.027664,92,399,36, 83,2014-01-16 21:24:03.691526,807,341,955, 39,2014-01-19 15:48:08.869484,32,29,307, 83,2014-01-10 21:35:29.702208,845,367,380, 39,2014-01-18 04:01:16.222851,975,941,393, 83,2014-01-16 05:11:15.209504,902,814,896, 49,2014-01-16 12:39:51.132545,704,355,448, 39,2014-01-16 01:16:50.690723,567,401,147, 39,2014-01-14 15:10:48.348646,129,903,99, 83,2014-01-14 12:27:49.766016,105,27,486, 83,2014-01-17 21:17:13.26694,112,412,224, 49,2014-01-11 16:34:50.986094,13,229,729, 39,2014-01-11 01:49:14.555506,608,697,496, 39,2014-01-20 22:53:19.653543,530,450,877, 39,2014-01-13 23:57:08.811366,430,707,775, 83,2014-01-20 18:19:52.433058,836,715,784, 83,2014-01-17 13:35:24.144704,791,271,262, 39,2014-01-14 01:10:16.701215,860,320,933, 39,2014-01-20 03:09:32.251801,238,331,777, 83,2014-01-14 22:41:11.649315,953,941,116, 49,2014-01-16 11:15:20.96374,408,311,894, 49,2014-01-16 08:07:42.145391,763,745,250, 83,2014-01-12 12:19:33.876453,330,688,61, 49,2014-01-18 00:19:59.73163,882,35,972, 83,2014-01-13 18:17:36.939366,261,922,340, 39,2014-01-15 09:23:46.393923,924,826,450, 49,2014-01-10 20:12:57.832232,252,300,302, 83,2014-01-11 08:05:46.117236,975,388,476, 83,2014-01-14 13:47:38.162895,107,987,680, 49,2014-01-20 18:39:08.927405,78,16,585, 49,2014-01-19 11:49:38.164986,241,800,74, 39,2014-01-14 13:54:16.190559,607,545,934, 49,2014-01-16 10:03:12.618931,717,429,484, 49,2014-01-17 21:37:47.034472,75,133,264, 83,2014-01-14 15:29:45.892768,747,806,135, 83,2014-01-13 08:04:28.841782,107,404,20, 49,2014-01-12 20:49:02.243975,918,192,183, 83,2014-01-20 08:09:37.025731,846,633,585, 83,2014-01-18 13:21:19.532505,841,112,328, 39,2014-01-16 15:59:50.592621,32,475,287, 49,2014-01-13 14:37:26.791746,555,144,876, 83,2014-01-15 04:38:32.799445,532,443,445, 39,2014-01-17 12:34:41.579134,881,723,663, 39,2014-01-13 20:26:41.347671,302,396,759, 83,2014-01-14 21:31:52.452988,629,336,116, 39,2014-01-20 16:31:50.3905,636,844,583, 83,2014-01-13 08:46:33.780729,303,684,645, 83,2014-01-11 05:07:16.597693,856,515,957, 83,2014-01-13 11:42:07.091715,985,154,662, 39,2014-01-12 05:56:06.03253,302,674,961, 49,2014-01-15 07:48:28.247425,325,271,375, 49,2014-01-19 02:57:28.055454,70,45,883, 83,2014-01-17 18:03:48.177922,402,316,466, 39,2014-01-17 00:08:02.123878,901,327,362, 49,2014-01-15 21:17:27.787504,159,972,450, 39,2014-01-20 16:21:25.117706,101,578,247, 83,2014-01-11 03:31:48.518221,918,807,712, 83,2014-01-15 21:30:25.619736,973,248,792, 39,2014-01-21 04:27:11.234943,780,36,628, 83,2014-01-14 21:19:26.681116,411,1000,820, 49,2014-01-15 10:09:34.547142,544,652,613, 83,2014-01-19 10:16:49.399929,678,510,117, 49,2014-01-18 15:13:42.871675,496,60,900, 49,2014-01-21 00:12:47.166073,623,773,692, 39,2014-01-16 01:54:36.43482,840,998,872, 49,2014-01-12 00:43:28.764861,469,508,763, 39,2014-01-13 02:17:16.007855,830,376,724, 49,2014-01-18 02:43:38.678796,147,548,144, 83,2014-01-11 02:53:05.790593,23,356,669, 83,2014-01-20 19:38:35.54036,648,848,660, 49,2014-01-13 12:43:29.271835,984,318,223, 39,2014-01-13 21:30:32.582339,779,894,288, 49,2014-01-14 18:17:08.339805,265,100,173, 39,2014-01-20 05:18:10.85097,74,829,298, 39,2014-01-16 21:57:13.930009,635,789,694, 49,2014-01-18 19:49:00.793212,50,311,59, 49,2014-01-19 15:50:01.24147,435,484,181, 83,2014-01-19 10:43:47.711158,443,113,691, 39,2014-01-19 04:56:17.390904,3,10,191, 49,2014-01-11 21:32:08.333183,978,461,539, 39,2014-01-20 20:39:53.743689,285,107,998, 83,2014-01-13 06:44:45.63833,342,148,229, 39,2014-01-20 04:29:36.389057,529,485,915, 83,2014-01-17 08:43:41.922029,86,770,325, 49,2014-01-19 10:52:47.762627,4,616,397, 83,2014-01-14 19:47:04.176561,34,803,170, 83,2014-01-16 19:05:07.039734,53,218,562, 49,2014-01-19 18:24:36.280848,691,138,295, 83,2014-01-19 21:17:29.796298,384,214,856, 83,2014-01-16 17:32:18.274677,744,891,274, 49,2014-01-13 09:44:02.961964,100,717,135, 39,2014-01-14 01:27:04.343858,991,648,67, 83,2014-01-14 21:49:07.675026,165,662,287, 49,2014-01-21 04:57:45.833639,530,729,181, 49,2014-01-13 19:39:51.509931,841,43,738, 39,2014-01-20 03:43:44.109846,726,815,989, 83,2014-01-11 23:41:14.5985,753,952,961, 83,2014-01-16 01:57:07.518205,678,121,906, 49,2014-01-17 00:27:22.788642,47,562,786, 39,2014-01-12 06:34:20.389093,776,332,122, 83,2014-01-15 15:35:43.058567,697,365,366, 49,2014-01-19 17:10:52.060478,490,256,918, 49,2014-01-15 08:04:52.97185,106,744,378, 83,2014-01-19 13:52:51.398791,944,92,32, 49,2014-01-18 16:29:02.911448,59,538,464, 39,2014-01-11 00:02:06.901859,823,759,26, 49,2014-01-17 03:41:52.192003,393,307,845, 39,2014-01-17 02:19:04.152917,758,834,92, 49,2014-01-19 14:45:54.613018,361,855,196, 49,2014-01-15 02:38:09.582907,106,83,245, 83,2014-01-18 03:51:12.48652,390,91,441, 83,2014-01-19 05:25:48.357125,315,9,961, 83,2014-01-17 13:22:55.221656,90,631,464, 49,2014-01-17 02:20:48.875577,631,918,317, 49,2014-01-15 08:09:30.279154,297,111,828, 49,2014-01-15 18:15:42.984283,279,598,373, 39,2014-01-21 02:07:53.052139,565,597,651, 49,2014-01-10 21:14:37.318889,408,470,557, 83,2014-01-14 06:40:19.265131,929,476,151, 39,2014-01-19 17:25:22.848435,791,468,171, 49,2014-01-16 18:46:55.593566,862,65,596, 39,2014-01-16 20:24:22.227095,147,654,604, 83,2014-01-12 12:52:27.192294,977,107,284, 83,2014-01-20 20:36:03.268593,944,713,298, 39,2014-01-16 19:22:08.060734,471,72,222, 49,2014-01-15 12:32:18.702644,214,807,178, 49,2014-01-19 18:19:47.378439,644,758,644, 49,2014-01-17 23:03:22.659653,771,860,735, 49,2014-01-20 18:29:26.22085,38,87,108, 39,2014-01-15 12:47:10.1675,239,274,80, 49,2014-01-19 09:37:43.048746,80,299,400, 49,2014-01-15 04:05:09.279416,545,660,359, 39,2014-01-13 23:58:02.227977,186,82,487, 83,2014-01-13 11:42:36.020596,492,111,501, 83,2014-01-13 11:58:00.678207,486,444,913, 83,2014-01-11 10:27:05.139844,672,402,640, 49,2014-01-13 15:44:42.922875,929,573,369, 49,2014-01-19 19:39:52.87021,498,662,665, 39,2014-01-17 16:46:09.292761,592,766,604, 83,2014-01-12 00:30:37.535893,6,567,694, 39,2014-01-13 16:18:02.453536,254,216,452, 49,2014-01-14 14:37:21.779281,376,300,513, 39,2014-01-20 09:56:25.893018,229,138,801, 49,2014-01-20 09:40:57.675193,769,17,455, 39,2014-01-20 20:58:10.654858,202,689,102, 39,2014-01-14 12:05:56.172171,323,961,614, 83,2014-01-14 21:56:40.659477,707,549,173, 83,2014-01-20 17:06:03.706996,887,30,845, 83,2014-01-14 13:20:33.491059,890,983,128, 49,2014-01-18 08:36:59.924608,565,972,544, 83,2014-01-19 04:31:26.55585,419,762,107, 39,2014-01-20 12:07:29.085045,603,958,670, 83,2014-01-13 23:01:22.151703,712,765,559, 39,2014-01-20 21:23:53.748145,459,807,687, 39,2014-01-20 02:43:32.353637,473,327,61, 83,2014-01-19 22:23:30.212437,626,702,574, 49,2014-01-15 03:56:12.450789,586,566,160, 83,2014-01-18 15:03:19.732495,956,144,941, 49,2014-01-16 15:26:52.872091,997,872,442, 39,2014-01-14 16:25:38.671639,706,790,763, 83,2014-01-12 21:50:29.899995,405,881,803, 83,2014-01-14 19:04:35.921255,426,571,1000, 83,2014-01-19 00:30:47.951473,824,515,196, 49,2014-01-16 01:48:32.127972,62,970,952, 39,2014-01-17 10:47:11.941851,328,311,93, 49,2014-01-11 06:28:48.62968,97,679,795, 83,2014-01-16 16:15:37.267816,279,39,129, 49,2014-01-20 06:31:54.864726,466,569,315, 83,2014-01-20 06:08:41.500309,386,623,698, 49,2014-01-13 03:01:46.560996,113,582,957, 39,2014-01-11 01:02:32.401038,849,773,972, 39,2014-01-12 16:26:43.953846,441,822,919, 49,2014-01-16 21:39:08.340277,773,329,628, 49,2014-01-20 14:58:58.294055,748,560,989, 39,2014-01-11 20:07:41.629039,591,980,955, 49,2014-01-16 12:37:18.995553,96,574,120, 39,2014-01-13 21:04:54.466645,163,495,81, 39,2014-01-15 22:04:22.288516,66,73,654, 49,2014-01-15 23:43:22.702549,108,675,355, 49,2014-01-17 14:25:27.957705,710,149,838, 83,2014-01-13 00:41:22.213124,936,891,588, 49,2014-01-13 22:14:49.2584,583,221,686, 83,2014-01-16 20:32:57.04275,57,529,839, 83,2014-01-16 03:42:44.364827,465,764,328, 39,2014-01-13 13:38:43.006964,509,162,173, 83,2014-01-15 17:16:29.396806,149,579,532, 49,2014-01-14 20:06:14.577265,904,984,460, 39,2014-01-17 21:34:55.457752,876,827,198, 39,2014-01-13 02:19:49.129301,221,298,823, 49,2014-01-20 15:33:07.449775,265,836,781, 83,2014-01-11 08:00:34.129808,493,322,502, 39,2014-01-15 04:10:19.029295,568,333,970, 39,2014-01-14 04:37:43.37103,331,891,200, 83,2014-01-19 12:31:22.081282,259,173,209, 83,2014-01-20 09:58:51.157267,60,899,969, 49,2014-01-20 19:24:55.312881,835,624,162, 83,2014-01-19 23:00:10.710962,943,476,911, 39,2014-01-15 20:14:28.425501,645,420,999, 49,2014-01-19 19:56:50.178026,526,195,24, 49,2014-01-18 23:08:52.211272,257,587,930, 39,2014-01-18 03:16:14.986497,818,978,311, 83,2014-01-20 00:59:22.579064,310,156,76, 49,2014-01-20 19:35:36.165118,574,214,241, 83,2014-01-13 18:55:23.326774,335,376,906, 83,2014-01-19 09:58:20.873119,160,274,391, 39,2014-01-11 09:43:17.794157,944,556,660, 49,2014-01-19 11:32:42.322327,991,109,655, 49,2014-01-12 01:03:15.339764,752,901,19, 39,2014-01-16 11:47:40.082673,144,225,843, 83,2014-01-14 05:16:05.025296,389,336,310, 39,2014-01-18 19:28:43.297889,422,465,322, 39,2014-01-18 16:29:02.295797,745,668,213, 83,2014-01-17 07:30:54.283696,319,210,725, 39,2014-01-14 10:01:40.340638,3,393,997, 39,2014-01-13 14:11:46.660624,843,520,174, 83,2014-01-20 01:09:37.290659,230,522,806, 83,2014-01-19 07:18:09.737444,877,886,649, 39,2014-01-17 14:18:01.238308,358,217,141, 39,2014-01-16 16:44:32.74883,338,521,476, 49,2014-01-11 03:37:58.866745,213,217,676, 49,2014-01-16 23:51:08.688082,9,644,736, 49,2014-01-17 04:45:06.878639,286,413,96, 83,2014-01-15 11:48:17.89604,932,197,305, 49,2014-01-20 08:28:52.059113,432,694,935, 49,2014-01-15 11:16:28.959921,618,12,131, 83,2014-01-14 15:47:09.053726,326,454,557, 39,2014-01-19 21:53:47.371994,7,252,307, 83,2014-01-14 04:16:39.670883,633,629,893, 49,2014-01-19 16:01:37.479227,101,889,808, 83,2014-01-18 11:50:37.55002,258,743,183, 39,2014-01-11 21:25:31.882575,535,841,540, 83,2014-01-16 13:17:52.465725,371,946,824, 39,2014-01-17 06:50:00.129083,131,215,658, 39,2014-01-11 11:01:08.047693,339,937,572, 39,2014-01-19 12:13:15.792499,890,838,40, 49,2014-01-15 10:48:21.002621,557,853,498, 39,2014-01-12 00:44:25.84227,980,413,109, 39,2014-01-17 17:45:58.115245,183,112,903, 83,2014-01-16 15:51:36.342385,114,922,335, 39,2014-01-17 16:32:05.924943,947,332,920, 83,2014-01-21 03:02:03.140541,456,980,192, 83,2014-01-14 05:20:19.640273,251,176,809, 83,2014-01-15 03:01:08.221158,634,375,829, 83,2014-01-17 04:32:57.424237,741,836,643, 49,2014-01-17 19:21:59.98133,864,842,914, 49,2014-01-17 21:12:54.881782,940,102,198, 43,2014-01-15 23:42:34.715316,889,739,314, 56,2014-01-15 20:40:09.718774,331,679,13, 43,2014-01-14 05:30:56.120091,333,58,292, 13,2014-01-11 10:27:07.464146,155,857,480, 56,2014-01-16 04:18:08.585939,995,543,913, 13,2014-01-20 09:22:04.808173,793,117,161, 56,2014-01-17 19:12:14.342784,650,268,51, 43,2014-01-20 20:06:26.481978,765,883,303, 13,2014-01-19 11:50:56.867285,767,736,265, 56,2014-01-12 00:28:43.302706,865,853,799, 13,2014-01-14 05:53:35.535704,919,587,508, 43,2014-01-18 03:44:44.239699,566,154,658, 13,2014-01-15 08:45:22.97359,9,334,396, 56,2014-01-12 15:55:13.014931,440,906,782, 56,2014-01-15 09:35:21.789719,566,168,119, 56,2014-01-16 10:10:54.856165,197,254,148, 13,2014-01-18 09:13:05.480655,331,524,400, 56,2014-01-11 10:25:21.919221,231,708,119, 43,2014-01-17 01:12:02.904276,962,267,738, 43,2014-01-16 15:26:21.273573,553,948,916, 56,2014-01-16 01:13:42.921843,492,74,424, 13,2014-01-18 05:56:28.746546,748,971,31, 13,2014-01-13 03:12:19.388818,170,250,760, 13,2014-01-11 11:05:19.264227,461,846,185, 13,2014-01-14 16:28:34.671489,569,760,821, 56,2014-01-13 00:14:22.529359,503,527,144, 13,2014-01-14 20:25:38.90492,620,945,538, 56,2014-01-18 23:29:42.892646,914,892,589, 13,2014-01-19 08:47:19.953596,866,449,29, 56,2014-01-11 09:47:38.88625,733,957,277, 43,2014-01-15 14:42:37.774848,669,343,209, 56,2014-01-14 02:29:54.668911,490,410,817, 56,2014-01-16 10:27:48.605024,504,972,720, 13,2014-01-19 00:13:33.894939,41,642,693, 13,2014-01-14 16:57:02.133057,290,408,521, 13,2014-01-11 08:45:57.190963,616,64,535, 56,2014-01-18 03:35:38.70353,830,181,597, 56,2014-01-11 06:09:16.475843,151,26,326, 13,2014-01-10 22:52:23.673359,744,840,294, 13,2014-01-16 09:26:35.570817,90,889,776, 13,2014-01-12 10:37:59.778548,477,687,973, 56,2014-01-14 08:45:59.209064,864,566,232, 56,2014-01-13 07:11:19.810515,95,15,80, 13,2014-01-16 23:23:22.752557,987,188,272, 13,2014-01-16 04:41:12.224414,283,460,662, 56,2014-01-17 20:46:41.600652,241,901,503, 56,2014-01-12 03:34:17.608722,641,977,675, 13,2014-01-13 07:54:17.705069,912,196,345, 56,2014-01-18 11:12:03.519874,449,505,272, 13,2014-01-18 08:46:20.512998,427,330,454, 43,2014-01-19 03:20:38.978642,913,798,526, 56,2014-01-13 06:25:46.441716,348,324,463, 13,2014-01-15 08:42:49.259544,173,860,894, 13,2014-01-11 00:32:58.367459,884,267,314, 43,2014-01-13 21:31:05.705944,679,82,597, 43,2014-01-19 05:11:23.931451,902,792,619, 43,2014-01-13 04:47:20.896819,327,614,265, 13,2014-01-17 21:56:44.610864,5,121,470, 56,2014-01-16 22:41:06.824097,628,923,336, 13,2014-01-11 07:34:40.850833,122,573,243, 13,2014-01-18 11:44:23.497114,257,927,842, 13,2014-01-11 07:23:44.598945,601,547,994, 56,2014-01-14 14:04:35.519746,366,434,589, 13,2014-01-13 16:12:12.102557,214,482,644, 13,2014-01-19 11:37:18.494302,910,769,560, 13,2014-01-18 11:01:37.653222,509,75,877, 13,2014-01-14 04:58:09.29352,122,998,67, 13,2014-01-16 09:12:57.197833,363,97,343, 43,2014-01-18 21:10:54.129065,904,80,13, 13,2014-01-14 07:50:32.966879,687,96,290, 13,2014-01-11 12:39:32.768649,508,617,455, 56,2014-01-20 11:48:53.908032,264,884,552, 43,2014-01-17 20:36:32.176361,320,730,480, 56,2014-01-13 23:50:52.579583,127,147,603, 43,2014-01-16 05:12:16.660589,633,524,538, 13,2014-01-12 19:17:44.400775,312,277,497, 43,2014-01-21 00:37:34.180236,753,700,418, 43,2014-01-17 12:46:34.26931,820,723,555, 43,2014-01-15 07:12:02.105843,835,152,255, 13,2014-01-18 05:49:37.700109,374,467,477, 13,2014-01-14 15:32:54.782727,251,141,998, 56,2014-01-13 04:32:41.084904,430,793,895, 13,2014-01-20 16:15:24.141826,3,296,449, 13,2014-01-19 04:15:44.042271,638,982,413, 13,2014-01-13 09:05:39.452364,331,496,300, 43,2014-01-13 07:46:29.847769,553,400,805, 43,2014-01-17 03:27:07.973722,464,634,300, 43,2014-01-15 17:53:00.349601,742,830,292, 13,2014-01-20 09:43:14.459052,459,63,68, 56,2014-01-12 20:08:14.798239,727,915,235, 56,2014-01-16 05:27:41.200434,792,826,16, 43,2014-01-17 15:27:37.956165,42,342,973, 56,2014-01-13 07:31:59.397183,96,308,267, 13,2014-01-19 23:32:16.72018,232,43,915, 13,2014-01-20 11:39:50.058722,669,572,475, 43,2014-01-11 13:09:17.891485,113,245,393, 56,2014-01-17 04:33:54.373402,798,637,431, 56,2014-01-13 10:37:59.35266,837,374,513, 43,2014-01-17 02:22:15.089319,345,990,137, 43,2014-01-14 19:44:48.502885,10,293,90, 56,2014-01-16 22:28:32.31954,33,286,535, 13,2014-01-17 19:01:47.858387,421,225,330, 43,2014-01-14 01:33:42.410918,313,831,342, 56,2014-01-13 13:05:04.495901,251,550,430, 43,2014-01-20 22:52:40.437971,841,452,969, 13,2014-01-19 10:45:59.071506,683,345,32, 43,2014-01-15 12:22:48.896675,76,167,432, 56,2014-01-20 17:30:14.618206,853,375,195, 56,2014-01-15 17:32:33.340817,29,345,112, 13,2014-01-19 23:34:51.002938,602,380,24, 56,2014-01-17 17:19:52.318317,65,962,487, 13,2014-01-19 13:05:28.123544,426,140,835, 56,2014-01-11 22:07:32.087842,50,372,493, 13,2014-01-17 03:35:16.460143,154,404,961, 13,2014-01-17 11:21:12.166234,463,983,131, 56,2014-01-14 11:13:11.540205,627,461,593, 56,2014-01-19 15:21:46.307911,342,524,535, 56,2014-01-13 08:48:20.139956,368,189,225, 43,2014-01-19 09:06:11.889805,691,825,609, 13,2014-01-18 19:05:00.766964,150,241,290, 13,2014-01-15 08:56:34.938194,220,640,660, 13,2014-01-14 08:33:53.09024,810,337,525, 56,2014-01-15 04:32:38.723547,341,625,173, 43,2014-01-17 20:28:34.335796,469,546,498, 56,2014-01-13 02:06:09.810839,355,513,171, 43,2014-01-14 10:12:28.78227,144,301,869, 56,2014-01-18 13:37:52.227281,224,446,328, 43,2014-01-19 10:40:04.184241,756,374,244, 56,2014-01-17 00:50:28.13493,485,162,382, 13,2014-01-14 10:00:07.317019,25,411,782, 56,2014-01-13 00:24:52.687126,66,968,110, 43,2014-01-12 17:19:00.454889,906,266,629, 13,2014-01-10 23:01:55.175406,759,476,557, 13,2014-01-16 05:58:35.098043,787,916,325, 13,2014-01-15 10:24:04.95079,819,830,298, 56,2014-01-21 01:54:35.613377,881,67,900, 13,2014-01-14 10:44:34.169549,137,796,227, 56,2014-01-20 02:46:53.847883,107,278,459, 13,2014-01-20 13:24:50.231583,770,9,84, 56,2014-01-19 08:17:07.510785,388,385,584, 13,2014-01-18 20:21:44.850822,936,432,993, 13,2014-01-17 00:44:42.5499,383,243,363, 43,2014-01-17 15:22:35.634329,443,413,661, 43,2014-01-19 22:29:16.938664,842,837,560, 43,2014-01-12 22:19:59.010043,232,467,15, 56,2014-01-13 20:43:47.800563,764,422,612, 13,2014-01-13 03:42:28.478868,32,624,214, 43,2014-01-11 07:41:45.317954,646,631,876, 56,2014-01-16 09:32:07.940938,680,703,784, 43,2014-01-11 06:48:40.369093,887,378,742, 43,2014-01-19 06:46:46.085336,168,521,682, 13,2014-01-20 22:28:42.879131,623,503,239, 43,2014-01-14 19:22:33.459332,876,550,606, 43,2014-01-13 05:19:24.808883,17,368,696, 43,2014-01-17 12:57:17.214928,22,690,439, 13,2014-01-17 01:28:43.270171,501,857,521, 43,2014-01-16 19:31:53.591153,752,967,326, 43,2014-01-14 20:35:09.442628,234,469,971, 13,2014-01-15 06:08:47.45483,627,43,454, 56,2014-01-12 14:22:21.726502,44,854,946, 43,2014-01-18 10:35:16.759648,788,951,146, 13,2014-01-17 10:33:40.141956,328,290,146, 13,2014-01-14 11:41:22.18139,793,729,414, 13,2014-01-18 13:37:11.935054,673,271,765, 13,2014-01-12 10:32:15.240419,619,151,439, 56,2014-01-19 02:05:27.132179,733,331,821, 43,2014-01-18 09:31:47.548431,174,937,753, 13,2014-01-16 01:16:49.409968,477,336,107, 13,2014-01-17 22:52:20.980063,102,677,199, 56,2014-01-17 16:56:37.780015,558,262,668, 13,2014-01-14 03:33:56.920753,371,340,887, 56,2014-01-15 13:14:05.830884,680,785,881, 43,2014-01-13 11:41:20.329915,875,5,214, 56,2014-01-20 22:56:32.555501,398,102,212, 43,2014-01-14 05:43:22.769548,947,804,269, 43,2014-01-15 14:01:19.339957,830,632,476, 43,2014-01-13 13:40:20.356064,935,368,827, 13,2014-01-16 13:25:51.248834,216,6,182, 43,2014-01-16 01:43:04.657911,561,749,650, 13,2014-01-19 03:12:28.297001,523,396,776, 56,2014-01-17 00:14:31.617927,948,845,807, 56,2014-01-14 02:29:50.743248,465,846,813, 13,2014-01-18 19:41:11.176551,837,360,37, 43,2014-01-20 23:37:05.077259,682,82,850, 56,2014-01-16 11:49:15.552131,15,175,818, 13,2014-01-15 02:38:28.391479,328,239,372, 13,2014-01-16 19:05:48.347849,685,640,773, 56,2014-01-12 01:21:09.143703,55,42,743, 43,2014-01-19 03:13:37.834107,647,478,395, 43,2014-01-21 05:14:35.802679,77,626,22, 56,2014-01-13 19:43:30.870204,261,998,710, 13,2014-01-16 07:48:54.593755,269,925,726, 13,2014-01-17 09:48:15.944635,159,910,85, 13,2014-01-17 11:24:53.051595,534,688,200, 56,2014-01-13 15:26:06.528809,950,180,640, 56,2014-01-19 00:20:31.185054,858,549,32, 43,2014-01-15 07:30:20.183774,616,812,221, 13,2014-01-21 04:57:54.077241,692,295,790, 56,2014-01-13 19:37:20.595441,584,54,698, 43,2014-01-12 00:22:41.164256,107,881,573, 13,2014-01-17 15:54:31.857255,488,365,148, 56,2014-01-17 03:11:17.516194,460,305,130, 43,2014-01-16 17:36:46.995141,220,895,170, 43,2014-01-20 07:35:52.18717,249,486,185, 43,2014-01-16 20:07:50.071694,838,86,787, 13,2014-01-20 03:20:09.765107,812,436,156, 43,2014-01-14 15:37:11.527127,107,258,58, 13,2014-01-19 13:48:10.428176,215,450,713, 56,2014-01-15 10:46:01.013942,735,572,686, 13,2014-01-19 21:20:16.185457,908,927,181, 13,2014-01-17 11:00:38.725177,941,194,282, 13,2014-01-11 05:00:32.631869,710,66,269, 43,2014-01-12 17:50:06.928704,475,136,872, 13,2014-01-15 00:41:49.901728,127,842,250, 43,2014-01-10 22:37:37.709128,384,888,765, 13,2014-01-18 09:39:22.481255,573,57,941, 43,2014-01-19 07:20:18.293627,935,991,602, 43,2014-01-16 21:43:26.056977,787,972,98, 56,2014-01-19 15:00:31.624957,866,171,659, 43,2014-01-17 04:33:56.127734,858,306,73, 56,2014-01-16 20:58:01.859656,615,125,467, 13,2014-01-12 04:44:02.495162,434,690,984, 13,2014-01-12 06:22:50.721908,935,451,821, 43,2014-01-13 00:46:17.804711,46,382,786, 43,2014-01-18 20:08:55.546756,424,705,836, 43,2014-01-15 01:48:57.250718,798,219,824, 13,2014-01-21 05:06:48.989766,530,917,877, 43,2014-01-12 21:39:15.73053,205,665,467, 43,2014-01-15 00:46:51.328377,535,462,198, 56,2014-01-13 18:44:09.585206,386,384,280, 56,2014-01-14 02:01:56.894786,474,728,933, 13,2014-01-11 10:41:23.185632,579,362,29, 43,2014-01-20 01:55:27.101819,573,192,457, 13,2014-01-19 23:38:43.890346,768,80,718, 43,2014-01-20 22:17:15.373221,978,88,663, 56,2014-01-15 16:03:17.173514,459,566,111, 56,2014-01-18 20:58:53.655453,52,521,521, 56,2014-01-14 07:54:26.900348,108,756,781, 43,2014-01-13 23:51:27.60169,741,675,840, 13,2014-01-13 01:44:54.669395,582,162,483, 56,2014-01-12 23:14:43.085805,626,640,924, 56,2014-01-20 14:52:06.327285,650,472,787, 13,2014-01-13 10:45:27.301264,951,829,339, 13,2014-01-14 21:04:50.014929,250,404,745, 43,2014-01-14 09:33:56.229015,105,828,646, 13,2014-01-13 13:23:05.010392,754,102,154, 43,2014-01-12 00:44:12.496184,957,527,915, 43,2014-01-12 10:54:14.522641,261,437,385, 56,2014-01-19 15:06:31.067788,482,305,523, 56,2014-01-20 19:44:44.121142,253,924,577, 56,2014-01-18 19:28:10.650795,80,581,817, 56,2014-01-15 06:04:32.927444,159,844,404, 13,2014-01-11 18:28:46.616303,774,586,82, 43,2014-01-20 05:51:01.372703,782,873,424, 13,2014-01-17 10:50:50.732155,424,135,1, 13,2014-01-19 18:37:42.16306,810,346,360, 56,2014-01-14 01:39:58.623421,450,138,779, 56,2014-01-17 09:57:39.72234,216,986,506, 43,2014-01-11 10:16:57.893591,499,134,941, 56,2014-01-18 06:26:49.951799,331,363,940, 56,2014-01-20 08:41:49.307546,633,170,242, 13,2014-01-14 16:18:54.788796,850,219,310, 56,2014-01-18 21:08:13.13785,677,80,912, 56,2014-01-19 04:37:16.409365,441,183,848, 43,2014-01-13 09:57:38.679141,366,131,425, 43,2014-01-18 13:25:28.511072,953,923,295, 13,2014-01-13 14:40:33.583297,321,877,974, 56,2014-01-11 00:56:32.334594,845,264,32, 56,2014-01-11 15:19:55.41142,661,123,698, 56,2014-01-16 18:32:01.184986,350,941,429, 56,2014-01-13 06:41:27.003989,832,228,147, 56,2014-01-13 18:34:38.497645,203,378,967, 56,2014-01-16 03:24:07.512272,25,452,429, 43,2014-01-15 21:26:54.305252,943,605,364, 13,2014-01-17 19:39:28.512574,418,181,150, 56,2014-01-19 16:58:03.741705,876,555,581, 43,2014-01-18 14:49:59.315644,746,528,835, 56,2014-01-19 00:23:41.008758,144,435,832, 56,2014-01-10 21:52:18.264347,28,89,884, 13,2014-01-16 23:56:30.383431,427,716,104, 13,2014-01-18 14:08:25.129899,997,491,775, 43,2014-01-18 21:20:28.915141,647,536,65, 56,2014-01-11 00:01:03.311294,131,544,206, 13,2014-01-19 12:37:11.746203,373,202,132, 43,2014-01-17 21:11:30.287844,409,518,423, 56,2014-01-17 14:51:54.043449,759,932,626, 13,2014-01-18 01:14:53.909264,634,971,553, 13,2014-01-21 02:51:28.911684,137,514,434, 56,2014-01-13 18:49:33.765789,247,10,783, 56,2014-01-18 15:31:51.802853,601,329,185, 13,2014-01-18 03:18:18.863482,101,319,154, 56,2014-01-12 21:31:23.073753,584,80,897, 13,2014-01-12 01:50:46.591649,41,917,515, 13,2014-01-15 18:26:32.001332,128,486,22, 56,2014-01-10 20:08:39.483118,200,392,434, 13,2014-01-14 15:48:25.270791,721,740,843, 43,2014-01-13 01:52:00.512404,266,223,250, 13,2014-01-13 14:49:13.066414,823,452,501, 43,2014-01-14 20:44:57.605384,899,574,777, 13,2014-01-13 21:11:55.924244,803,983,744, 56,2014-01-19 13:21:14.251819,340,313,971, 13,2014-01-17 07:26:24.609373,848,547,454, 43,2014-01-16 19:46:34.421888,786,772,86, 56,2014-01-14 10:45:21.764091,638,189,505, 43,2014-01-11 22:53:18.914624,132,444,933, 56,2014-01-13 09:26:02.934461,370,505,711, 13,2014-01-12 21:43:25.505797,840,155,887, 43,2014-01-19 17:43:18.230686,325,945,193, 13,2014-01-11 03:49:43.94322,579,250,218, 56,2014-01-12 23:35:43.770562,424,58,841, 56,2014-01-15 11:39:48.614118,43,17,861, 43,2014-01-18 21:58:09.073119,211,879,518, 43,2014-01-21 00:56:12.685703,679,144,545, 47,2014-01-15 15:40:51.925411,967,0,999, 2,2014-01-17 04:35:20.819322,887,329,306, 32,2014-01-17 16:07:42.973966,337,81,97, 2,2014-01-12 00:32:45.96928,129,401,227, 32,2014-01-13 23:50:14.728585,911,768,411, 2,2014-01-17 12:59:11.88565,956,384,921, 2,2014-01-14 23:22:19.735069,193,123,13, 32,2014-01-11 09:22:06.531439,173,935,22, 32,2014-01-14 10:17:30.749132,105,903,643, 47,2014-01-17 00:53:42.808822,31,651,135, 2,2014-01-12 15:12:53.123507,450,479,302, 2,2014-01-19 08:44:02.750883,332,304,86, 47,2014-01-17 01:02:22.292358,851,810,83, 47,2014-01-16 11:01:18.394298,82,530,802, 32,2014-01-11 04:36:03.263289,773,820,31, 2,2014-01-19 19:51:35.358772,618,1000,888, 32,2014-01-20 11:46:16.000101,416,325,923, 32,2014-01-14 05:47:59.187531,581,213,16, 47,2014-01-18 03:12:49.610592,258,693,389, 2,2014-01-16 13:12:40.609473,537,834,534, 32,2014-01-20 05:34:33.609419,762,531,800, 32,2014-01-11 07:58:11.375101,985,792,641, 47,2014-01-17 16:05:59.524517,352,448,814, 32,2014-01-12 09:00:36.543881,838,90,185, 2,2014-01-13 09:41:36.880898,443,106,762, 2,2014-01-16 03:49:17.755203,416,988,414, 32,2014-01-12 16:50:20.4871,625,356,452, 2,2014-01-15 13:17:20.65146,498,337,604, 32,2014-01-20 19:29:06.369321,772,837,916, 32,2014-01-20 18:48:29.560219,956,589,440, 32,2014-01-15 08:13:33.337582,949,945,749, 32,2014-01-15 05:09:58.295152,568,394,766, 2,2014-01-16 17:23:50.379542,6,637,127, 32,2014-01-11 18:21:16.311548,148,920,388, 2,2014-01-16 09:42:44.264432,520,908,611, 2,2014-01-19 21:14:05.108126,569,97,496, 2,2014-01-18 11:20:28.197197,166,431,693, 2,2014-01-20 13:05:03.999501,503,755,218, 32,2014-01-20 10:36:11.639984,875,847,444, 47,2014-01-11 15:37:58.946329,510,814,961, 2,2014-01-16 07:58:46.808742,355,889,808, 47,2014-01-11 19:49:04.763492,207,764,917, 2,2014-01-20 04:22:01.697214,859,751,307, 47,2014-01-12 03:01:09.1011,551,149,874, 32,2014-01-17 10:50:23.15779,49,328,875, 2,2014-01-20 12:58:04.960921,318,525,932, 47,2014-01-21 02:52:44.459873,297,666,618, 32,2014-01-16 16:36:39.15789,812,115,358, 32,2014-01-13 12:46:04.148452,748,696,597, 47,2014-01-18 00:05:34.070883,647,567,609, 32,2014-01-11 23:49:19.767782,391,114,463, 2,2014-01-12 12:20:37.75829,494,756,703, 2,2014-01-18 12:03:45.445984,505,349,792, 47,2014-01-18 19:55:19.292299,123,980,647, 32,2014-01-14 01:21:14.302171,221,107,457, 2,2014-01-21 01:45:22.326882,453,283,11, 32,2014-01-13 17:44:37.047503,681,105,613, 2,2014-01-15 22:11:34.789691,320,714,150, 2,2014-01-15 09:02:42.978761,327,607,158, 32,2014-01-13 07:13:43.416824,757,394,961, 32,2014-01-15 11:00:04.349909,458,218,890, 32,2014-01-19 21:16:16.316344,136,441,696, 2,2014-01-17 16:23:41.711976,988,45,642, 2,2014-01-10 22:23:54.72987,284,445,642, 47,2014-01-20 19:37:32.627891,771,691,497, 32,2014-01-12 20:06:25.976408,323,496,63, 32,2014-01-19 23:37:59.837996,946,745,544, 47,2014-01-18 00:58:00.825089,714,321,430, 2,2014-01-12 03:11:29.976328,173,631,822, 47,2014-01-19 04:14:11.47798,225,671,23, 47,2014-01-18 20:35:59.771418,337,57,926, 2,2014-01-17 15:10:16.785071,31,109,248, 2,2014-01-20 04:03:16.241472,279,950,565, 2,2014-01-17 18:58:01.46905,909,96,66, 2,2014-01-18 22:11:25.886171,371,184,444, 32,2014-01-16 08:53:39.39968,817,910,548, 2,2014-01-17 01:56:06.42997,210,710,54, 47,2014-01-18 19:04:10.346044,619,469,3, 32,2014-01-11 19:30:18.557571,399,328,412, 32,2014-01-19 18:42:10.578422,381,89,252, 2,2014-01-15 13:09:44.416927,369,770,695, 47,2014-01-12 23:19:38.325353,185,628,458, 2,2014-01-11 01:02:48.336711,36,131,46, 32,2014-01-12 19:13:29.862911,154,700,206, 2,2014-01-20 23:14:57.617652,663,861,602, 2,2014-01-14 06:24:02.639302,273,154,799, 32,2014-01-12 14:58:52.190212,730,157,168, 2,2014-01-13 10:59:34.665154,676,444,234, 2,2014-01-19 08:35:37.428992,344,499,92, 32,2014-01-17 04:01:35.168974,3,290,331, 47,2014-01-15 22:13:18.082398,479,513,34, 32,2014-01-13 13:35:41.778901,137,149,123, 47,2014-01-15 19:17:51.485317,735,968,736, 47,2014-01-12 08:36:59.794374,458,427,403, 47,2014-01-13 15:59:36.508771,857,573,992, 47,2014-01-15 08:55:24.113207,978,263,258, 2,2014-01-14 08:43:25.771201,380,268,412, 2,2014-01-12 09:37:36.347186,641,807,747, 47,2014-01-12 03:53:24.938296,656,555,523, 2,2014-01-15 15:54:55.74753,97,620,493, 47,2014-01-20 17:51:47.825167,804,122,656, 32,2014-01-20 04:29:24.709714,578,795,180, 32,2014-01-12 01:05:12.5326,910,518,942, 47,2014-01-19 15:55:04.067058,949,976,115, 2,2014-01-16 17:27:26.178763,243,687,929, 2,2014-01-20 03:16:38.418772,498,703,123, 32,2014-01-14 18:48:43.466739,721,526,320, 47,2014-01-12 13:23:32.608733,840,259,710, 47,2014-01-17 16:20:48.764816,37,691,367, 47,2014-01-15 18:19:02.024309,29,964,917, 2,2014-01-11 02:05:43.187574,393,107,968, 47,2014-01-11 23:30:33.18216,827,998,643, 47,2014-01-17 21:38:40.350081,475,772,299, 32,2014-01-11 07:08:31.524285,542,433,441, 47,2014-01-13 22:44:03.045072,272,290,811, 32,2014-01-17 14:53:37.967733,68,542,290, 2,2014-01-14 17:32:34.163587,491,651,131, 2,2014-01-15 17:42:55.235284,618,411,136, 32,2014-01-20 05:53:12.633306,709,540,883, 32,2014-01-12 20:08:11.592579,878,667,296, 32,2014-01-11 15:44:30.404257,200,292,935, 2,2014-01-14 22:06:30.715704,80,156,460, 2,2014-01-15 13:43:53.3719,984,626,566, 47,2014-01-16 15:02:21.889574,593,219,494, 32,2014-01-16 10:43:30.510078,592,826,503, 32,2014-01-18 09:43:29.880671,866,696,816, 47,2014-01-21 03:57:46.002782,246,706,57, 32,2014-01-19 23:26:56.281279,310,907,105, 32,2014-01-19 23:21:06.227856,229,565,46, 32,2014-01-12 01:51:10.941078,536,880,285, 32,2014-01-14 09:21:52.028808,22,202,390, 32,2014-01-19 11:12:54.053443,101,599,643, 47,2014-01-11 00:20:35.65121,742,813,710, 47,2014-01-15 14:27:04.561409,942,313,171, 2,2014-01-17 21:07:58.120501,14,110,908, 2,2014-01-16 21:48:01.829973,220,573,30, 47,2014-01-14 11:43:42.98018,891,370,538, 47,2014-01-11 09:56:41.58724,769,750,833, 32,2014-01-18 15:11:34.439125,635,412,248, 2,2014-01-10 22:04:31.745415,90,76,550, 32,2014-01-16 08:15:43.611549,681,349,171, 2,2014-01-18 21:17:17.626699,171,132,726, 2,2014-01-12 01:35:04.927576,472,896,945, 2,2014-01-12 23:54:23.961629,862,574,758, 47,2014-01-19 08:25:49.150985,706,887,52, 47,2014-01-15 04:19:07.972648,331,623,183, 32,2014-01-19 18:48:01.929781,95,189,757, 47,2014-01-12 19:58:23.314992,120,324,373, 2,2014-01-20 02:02:03.208351,476,812,798, 47,2014-01-18 18:41:14.563089,696,248,359, 32,2014-01-14 20:06:34.907571,480,544,892, 32,2014-01-20 21:46:33.612608,674,317,507, 47,2014-01-12 10:47:45.278793,25,95,40, 47,2014-01-19 13:50:28.279471,849,836,829, 2,2014-01-16 06:48:55.502602,936,859,251, 2,2014-01-18 01:31:15.78887,108,822,916, 32,2014-01-16 17:33:58.16056,783,861,506, 32,2014-01-16 04:46:41.505383,506,115,981, 2,2014-01-16 18:58:12.070568,511,757,391, 47,2014-01-15 10:55:04.388417,335,757,164, 2,2014-01-17 10:37:52.446881,369,738,957, 32,2014-01-20 08:20:04.099376,954,335,200, 32,2014-01-13 16:07:58.441859,606,142,573, 32,2014-01-17 14:58:28.098092,915,945,478, 47,2014-01-14 16:47:08.661204,291,653,916, 32,2014-01-20 17:15:56.56236,286,542,736, 47,2014-01-13 06:46:29.928065,224,99,437, 47,2014-01-18 08:30:51.641384,156,226,808, 2,2014-01-10 21:12:38.149599,420,648,436, 32,2014-01-21 01:58:04.36719,15,21,221, 47,2014-01-18 10:35:23.3868,665,549,156, 32,2014-01-16 09:28:21.761567,441,594,481, 32,2014-01-18 17:15:21.99389,554,199,807, 32,2014-01-19 16:10:28.314375,57,519,163, 32,2014-01-18 13:22:45.723197,209,427,352, 2,2014-01-16 19:41:11.145294,794,992,85, 47,2014-01-13 14:29:36.287442,146,250,774, 47,2014-01-17 02:10:47.652978,850,837,18, 32,2014-01-18 19:39:34.460285,584,438,357, 2,2014-01-12 10:31:39.495793,83,849,320, 32,2014-01-14 14:52:02.216067,734,466,702, 47,2014-01-12 09:46:09.367856,502,898,274, 47,2014-01-12 02:18:13.108401,387,408,968, 47,2014-01-16 05:39:47.494858,745,858,703, 2,2014-01-21 03:36:37.647746,342,328,412, 2,2014-01-17 13:07:08.611002,488,487,766, 2,2014-01-13 01:11:03.284148,345,775,499, 2,2014-01-16 15:10:35.808306,73,432,38, 2,2014-01-12 11:53:50.116805,431,4,754, 2,2014-01-19 00:09:15.354716,442,877,287, 32,2014-01-10 20:05:40.196723,998,10,764, 32,2014-01-19 02:31:42.563686,213,8,358, 32,2014-01-18 02:29:19.454093,53,85,146, 2,2014-01-13 16:13:38.639002,303,752,876, 32,2014-01-15 11:30:10.661777,459,815,604, 2,2014-01-11 13:16:28.115297,883,128,35, 47,2014-01-13 03:29:35.201361,350,45,207, 47,2014-01-17 22:16:40.589842,111,687,864, 2,2014-01-19 01:47:19.757101,559,778,267, 47,2014-01-13 04:42:13.35138,961,310,481, 47,2014-01-17 18:14:44.957451,377,981,520, 2,2014-01-16 06:22:43.1439,739,58,436, 47,2014-01-18 18:10:35.112947,107,169,547, 47,2014-01-15 05:30:06.951341,280,460,397, 32,2014-01-14 16:33:11.458695,878,754,751, 47,2014-01-16 01:33:20.836144,965,344,913, 32,2014-01-21 05:11:18.096633,96,167,107, 47,2014-01-17 11:02:47.746136,452,804,836, 2,2014-01-11 21:44:08.489541,635,699,428, 47,2014-01-18 18:50:52.556919,100,623,545, 32,2014-01-19 01:34:27.241929,220,362,965, 32,2014-01-15 16:36:10.705607,875,788,446, 47,2014-01-20 08:37:01.925194,575,457,953, 47,2014-01-20 07:52:40.350329,498,816,87, 47,2014-01-21 02:15:58.200884,932,573,374, 2,2014-01-20 06:13:39.57294,770,728,493, 2,2014-01-16 14:59:48.961751,152,101,561, 32,2014-01-12 21:27:01.485032,835,567,190, 47,2014-01-15 15:24:15.381246,285,638,896, 32,2014-01-18 06:53:39.078556,332,177,737, 32,2014-01-21 01:36:16.839748,403,628,958, 32,2014-01-15 15:29:55.57797,199,643,79, 47,2014-01-16 03:25:21.642243,599,464,551, 2,2014-01-17 22:05:36.293841,699,449,993, 32,2014-01-18 11:43:34.216971,225,617,959, 47,2014-01-20 18:55:32.30402,341,667,111, 32,2014-01-18 15:22:04.409556,229,188,135, 32,2014-01-20 19:13:09.418752,955,393,489, 32,2014-01-17 11:12:12.893862,42,868,264, 2,2014-01-16 11:09:24.166657,770,646,321, 47,2014-01-12 17:55:22.770131,337,395,143, 2,2014-01-13 23:26:57.851314,928,846,664, 32,2014-01-11 11:32:07.310558,1,481,83, 2,2014-01-20 16:05:57.883078,489,654,145, 32,2014-01-18 08:57:04.803073,63,809,59, 47,2014-01-15 08:05:18.769253,567,966,734, 2,2014-01-15 11:39:18.719222,903,343,909, 32,2014-01-18 08:08:22.899706,961,267,995, 32,2014-01-11 13:08:06.515389,590,293,554, 2,2014-01-16 13:23:27.208762,397,190,563, 2,2014-01-15 20:59:15.457045,129,775,471, 32,2014-01-19 18:42:33.757318,898,831,969, 47,2014-01-10 23:59:37.914789,854,694,375, 47,2014-01-14 23:36:17.382239,677,670,62, 32,2014-01-18 20:35:14.108067,961,978,81, 2,2014-01-10 20:15:36.115673,966,278,403, 2,2014-01-13 23:49:56.955179,459,529,816, 2,2014-01-14 05:35:03.069818,929,868,206, 32,2014-01-12 21:42:37.600705,168,895,658, 32,2014-01-18 19:14:12.336425,584,441,267, 32,2014-01-11 06:28:42.148374,533,32,754, 2,2014-01-12 17:18:54.440454,733,7,521, 2,2014-01-13 04:44:07.914395,268,769,800, 2,2014-01-16 13:54:03.790617,917,342,142, 47,2014-01-19 19:24:30.734295,459,67,547, 47,2014-01-20 20:27:42.131784,57,564,572, 32,2014-01-16 02:49:36.094636,906,747,653, 2,2014-01-17 04:46:35.143851,683,720,340, 2,2014-01-20 09:40:51.550536,545,156,557, 32,2014-01-12 08:01:48.988499,303,158,154, 2,2014-01-12 09:55:59.310509,616,101,502, 47,2014-01-11 21:36:14.320668,771,895,83, 47,2014-01-15 11:28:46.840231,851,93,121, 32,2014-01-13 01:28:06.621486,3,551,670, 2,2014-01-11 07:42:12.203747,529,211,713, 47,2014-01-12 14:25:51.643304,56,895,351, 32,2014-01-17 13:33:25.390739,11,986,929, 47,2014-01-15 23:21:30.922969,77,703,228, 2,2014-01-20 02:34:14.54301,172,412,422, 32,2014-01-18 06:41:31.906547,301,727,349, 32,2014-01-11 06:44:58.13215,521,354,37, 32,2014-01-14 17:33:30.000054,999,890,471, 32,2014-01-16 19:24:05.663865,619,660,108, 47,2014-01-11 10:44:36.046939,118,710,38, 32,2014-01-18 21:09:47.382293,523,183,888, 47,2014-01-14 09:59:19.771931,529,604,557, 47,2014-01-11 11:00:12.16261,914,15,175, 47,2014-01-11 14:59:44.337472,409,746,60, 2,2014-01-17 19:34:22.841748,460,604,64, 2,2014-01-13 12:42:49.763317,545,916,190, 32,2014-01-19 14:13:56.673897,817,881,758, 32,2014-01-18 06:03:04.990122,849,400,279, 2,2014-01-15 10:01:44.20377,430,984,2, 2,2014-01-11 12:58:04.58871,104,593,384, 2,2014-01-13 13:57:08.780739,619,619,41, 2,2014-01-13 23:26:14.938065,646,295,590, 32,2014-01-11 03:25:46.720495,776,979,733, 47,2014-01-18 20:46:44.875375,304,421,877, 32,2014-01-20 08:12:50.082334,147,489,801, 32,2014-01-20 17:06:38.271031,723,156,156, 2,2014-01-20 08:48:33.864293,259,628,343, 32,2014-01-11 12:08:49.392844,815,342,535, 2,2014-01-11 08:42:52.591699,161,416,286, 47,2014-01-14 14:17:20.704524,740,965,285, 2,2014-01-13 17:36:56.01433,626,915,178, 2,2014-01-11 20:25:04.795446,957,463,309, 47,2014-01-16 08:43:12.347828,692,300,713, 32,2014-01-20 11:10:21.405488,348,413,371, 32,2014-01-16 23:46:35.718414,390,11,506, 47,2014-01-15 05:17:26.890839,954,170,586, 47,2014-01-17 11:51:53.312034,459,129,7, 2,2014-01-17 10:31:33.850564,917,984,249, 47,2014-01-19 02:50:56.891311,385,745,848, 47,2014-01-13 01:15:58.975899,423,867,883, 47,2014-01-18 01:16:09.897502,403,163,605, 47,2014-01-16 18:00:44.273605,896,843,191, 47,2014-01-16 15:15:18.748249,712,492,344, 32,2014-01-18 16:16:22.060113,672,705,597, 47,2014-01-17 13:00:28.611496,182,69,551, 2,2014-01-13 04:49:41.589997,897,756,119, 2,2014-01-10 22:59:11.823429,559,229,733, 2,2014-01-15 21:14:25.285393,415,848,794, 2,2014-01-20 14:52:46.580538,691,718,653, 32,2014-01-15 13:00:56.027199,678,159,613, 2,2014-01-16 14:12:29.874103,657,177,191, 47,2014-01-12 22:49:55.361276,430,7,15, 2,2014-01-18 16:27:10.965682,905,752,6, 32,2014-01-16 21:38:16.594598,494,292,349, 32,2014-01-20 23:36:40.23707,881,386,748, 95,2014-01-17 18:40:01.048018,642,799,965, 27,2014-01-16 08:44:54.865628,433,322,559, 27,2014-01-20 02:25:14.101363,379,502,852, 27,2014-01-18 10:48:50.440861,90,203,122, 95,2014-01-16 21:27:47.457327,974,214,858, 27,2014-01-13 10:42:34.805887,700,563,32, 95,2014-01-10 22:25:46.455609,823,718,875, 95,2014-01-17 21:52:52.252773,102,612,476, 95,2014-01-18 23:25:47.153715,635,246,258, 27,2014-01-20 13:36:07.861097,124,191,819, 27,2014-01-13 15:39:27.971606,811,597,364, 27,2014-01-12 22:43:14.044553,163,5,292, 95,2014-01-16 19:28:01.173131,892,350,369, 27,2014-01-20 06:11:01.82217,198,207,591, 27,2014-01-21 05:34:10.935865,912,605,989, 27,2014-01-19 00:44:00.14903,725,533,375, 27,2014-01-17 01:27:11.720091,577,905,171, 95,2014-01-16 17:34:55.209889,513,139,513, 27,2014-01-14 09:59:18.897279,447,198,34, 95,2014-01-14 11:43:33.780203,34,479,520, 95,2014-01-13 00:35:23.821384,673,941,68, 27,2014-01-16 18:49:00.487695,83,576,652, 27,2014-01-14 14:42:45.604052,468,407,206, 95,2014-01-18 01:49:49.106777,567,773,363, 27,2014-01-16 03:41:47.068233,905,405,592, 27,2014-01-19 07:43:41.631251,795,928,328, 27,2014-01-13 10:02:18.981299,645,648,27, 27,2014-01-18 06:31:42.429508,402,648,98, 95,2014-01-16 18:10:52.596933,688,470,835, 95,2014-01-19 11:40:35.575896,829,354,181, 95,2014-01-18 00:08:22.666579,238,26,452, 95,2014-01-13 06:50:53.64495,214,797,587, 27,2014-01-14 14:25:30.441944,900,874,600, 95,2014-01-16 20:33:36.767942,342,2,948, 95,2014-01-20 21:39:44.08623,133,768,826, 95,2014-01-20 15:53:17.899271,530,711,572, 95,2014-01-19 11:16:11.573829,114,748,91, 27,2014-01-21 00:05:30.541841,825,790,228, 95,2014-01-17 07:46:10.152463,958,422,18, 95,2014-01-17 04:41:58.727544,40,993,368, 27,2014-01-20 07:41:38.402938,985,159,944, 95,2014-01-20 03:25:38.124069,919,447,542, 95,2014-01-19 07:25:12.772517,958,493,494, 95,2014-01-15 21:09:39.576069,632,98,481, 27,2014-01-19 03:36:39.946239,12,929,445, 27,2014-01-19 06:59:23.708381,805,339,503, 95,2014-01-13 15:53:39.7251,943,24,933, 27,2014-01-14 23:03:51.666329,129,956,269, 95,2014-01-14 18:34:18.91827,817,257,744, 27,2014-01-17 05:52:58.622798,118,436,368, 27,2014-01-18 14:47:25.446951,348,573,247, 27,2014-01-16 23:09:42.739654,943,882,766, 95,2014-01-12 18:41:59.110493,442,448,96, 95,2014-01-11 23:30:11.051003,161,932,415, 95,2014-01-13 18:59:31.84685,802,833,729, 27,2014-01-18 02:23:46.178726,524,266,698, 27,2014-01-20 11:13:52.682253,12,355,425, 95,2014-01-16 09:01:50.828149,95,962,21, 95,2014-01-15 02:55:28.608234,92,223,821, 95,2014-01-15 23:24:45.279605,201,958,38, 27,2014-01-14 14:42:26.404045,973,486,587, 27,2014-01-11 21:03:51.275232,300,952,149, 27,2014-01-18 10:15:38.924556,298,400,890, 27,2014-01-18 09:07:56.845989,644,748,457, 95,2014-01-17 21:37:28.043174,973,356,780, 27,2014-01-18 01:55:23.010786,326,670,390, 27,2014-01-17 19:01:14.745678,330,888,811, 27,2014-01-16 02:53:39.617422,796,490,665, 95,2014-01-17 20:00:53.552626,300,830,369, 95,2014-01-13 20:47:24.898142,212,375,109, 95,2014-01-12 01:35:38.344967,421,664,879, 95,2014-01-16 21:42:31.955564,202,252,903, 27,2014-01-12 18:13:03.022209,429,710,943, 95,2014-01-20 13:00:51.117482,409,81,986, 95,2014-01-11 12:52:11.531633,938,463,88, 27,2014-01-21 01:49:42.968448,776,739,144, 95,2014-01-18 14:00:14.826282,514,920,28, 95,2014-01-14 08:45:51.257152,83,735,752, 27,2014-01-14 18:53:34.635196,497,700,872, 95,2014-01-12 02:34:33.744552,154,941,84, 27,2014-01-20 18:38:49.87995,363,290,329, 27,2014-01-12 03:41:00.082147,759,757,183, 95,2014-01-18 05:44:16.484625,174,425,284, 27,2014-01-12 07:20:48.990443,375,853,581, 95,2014-01-13 07:11:11.13315,394,120,808, 95,2014-01-21 04:43:48.331476,76,126,898, 95,2014-01-19 13:44:35.169169,800,341,811, 95,2014-01-12 12:25:03.815403,279,272,701, 27,2014-01-16 07:45:39.159625,963,870,388, 95,2014-01-13 10:40:03.777403,392,982,994, 95,2014-01-17 15:49:49.095008,784,598,200, 27,2014-01-20 02:28:05.56367,604,416,721, 27,2014-01-14 11:43:55.052635,43,791,59, 95,2014-01-14 20:05:28.019983,951,422,676, 95,2014-01-17 05:36:02.410077,449,876,642, 95,2014-01-11 03:21:23.096229,65,608,170, 95,2014-01-11 16:00:51.03077,145,954,67, 27,2014-01-13 18:37:17.155756,18,676,574, 95,2014-01-16 10:15:02.713651,987,940,177, 27,2014-01-18 16:01:44.583816,139,399,571, 95,2014-01-16 19:24:42.053897,331,301,205, 95,2014-01-17 15:50:41.058618,27,415,149, 95,2014-01-14 07:44:16.539379,269,174,575, 95,2014-01-18 17:37:45.076107,430,227,598, 95,2014-01-16 22:51:32.176519,64,886,156, 27,2014-01-15 00:36:28.071431,977,836,152, 95,2014-01-18 13:27:28.044554,817,872,609, 95,2014-01-14 06:51:47.002801,385,471,356, 27,2014-01-18 13:22:19.328583,89,961,284, 95,2014-01-12 02:21:02.67975,537,528,668, 27,2014-01-15 13:26:20.747354,25,435,74, 95,2014-01-18 02:01:09.208533,73,718,572, 95,2014-01-13 10:02:02.761897,403,520,505, 27,2014-01-12 13:10:37.231979,140,819,755, 95,2014-01-19 13:21:58.198976,634,991,65, 95,2014-01-15 21:13:13.895047,884,40,287, 95,2014-01-12 11:54:25.563455,330,59,473, 27,2014-01-17 21:06:33.368144,956,806,464, 27,2014-01-17 13:38:17.710869,439,537,745, 27,2014-01-17 23:40:04.723079,172,133,994, 27,2014-01-20 11:46:37.145547,649,550,374, 27,2014-01-13 23:28:06.805878,610,974,319, 27,2014-01-16 20:08:10.287169,948,996,878, 27,2014-01-13 17:30:32.198602,284,543,675, 95,2014-01-17 23:33:34.825861,751,834,281, 27,2014-01-12 19:44:12.697246,189,465,467, 27,2014-01-14 00:51:55.29483,337,567,358, 27,2014-01-18 19:34:25.857051,240,158,683, 95,2014-01-15 18:21:29.853002,0,425,528, 95,2014-01-19 15:06:58.008481,925,145,93, 27,2014-01-16 05:36:10.440866,862,983,268, 27,2014-01-11 07:46:11.906899,694,237,710, 27,2014-01-16 00:57:39.067099,155,20,640, 27,2014-01-19 17:20:26.980246,595,814,737, 27,2014-01-19 05:23:56.983005,611,298,410, 27,2014-01-11 17:49:11.243618,761,356,624, 27,2014-01-13 11:56:55.051677,475,806,595, 27,2014-01-16 12:51:25.02756,625,377,925, 27,2014-01-15 04:40:58.24642,510,185,987, 27,2014-01-21 05:19:14.38026,19,19,205, 95,2014-01-17 19:12:27.707729,162,44,446, 27,2014-01-19 22:07:18.994193,446,638,657, 95,2014-01-18 01:20:23.588794,304,702,988, 27,2014-01-20 09:14:30.469626,60,713,882, 27,2014-01-11 05:17:56.226172,275,111,73, 95,2014-01-16 08:42:21.787769,421,797,140, 95,2014-01-15 00:27:44.365093,739,93,765, 95,2014-01-12 21:12:21.790046,418,55,378, 95,2014-01-12 23:48:55.155913,106,631,997, 27,2014-01-11 08:06:02.075961,906,677,252, 27,2014-01-20 00:52:26.513125,494,417,719, 27,2014-01-12 05:35:32.301879,748,134,320, 27,2014-01-14 11:34:08.88184,286,908,856, 95,2014-01-15 15:00:36.800294,61,356,328, 95,2014-01-15 03:06:04.50048,942,742,154, 27,2014-01-11 05:07:43.70812,172,866,902, 27,2014-01-17 14:44:49.497539,811,180,25, 95,2014-01-18 07:57:59.795311,469,885,396, 27,2014-01-19 04:42:09.565171,518,521,946, 95,2014-01-12 03:06:19.350541,495,776,208, 27,2014-01-16 17:04:57.803793,789,337,827, 27,2014-01-14 04:18:20.006036,28,108,354, 27,2014-01-12 14:52:31.25744,681,980,994, 95,2014-01-11 12:02:36.870892,686,97,589, 95,2014-01-12 15:38:46.986282,740,670,986, 95,2014-01-21 00:16:28.240446,136,731,138, 27,2014-01-12 09:51:48.114929,96,703,220, 95,2014-01-15 07:35:42.038378,46,302,154, 95,2014-01-16 07:07:53.268425,683,257,853, 27,2014-01-16 18:32:46.361349,827,318,226, 95,2014-01-15 06:54:56.418639,23,285,584, 95,2014-01-12 20:20:20.976154,200,703,178, 27,2014-01-15 10:40:05.355541,14,907,374, 95,2014-01-12 02:15:20.007851,441,934,893, 27,2014-01-11 23:34:51.44578,252,307,993, 27,2014-01-15 19:58:01.582131,347,613,205, 95,2014-01-17 14:57:41.79562,333,162,722, 95,2014-01-16 04:02:35.810873,725,390,19, 27,2014-01-17 21:10:23.372177,802,23,791, 95,2014-01-19 18:46:36.951952,143,597,272, 27,2014-01-16 16:08:37.886834,239,249,641, 27,2014-01-16 16:02:49.885302,531,277,904, 27,2014-01-21 04:22:09.253831,576,237,269, 27,2014-01-20 07:42:46.769093,667,928,118, 95,2014-01-11 01:03:26.686015,382,14,141, 95,2014-01-15 01:28:13.754311,898,45,851, 27,2014-01-20 16:50:30.477212,433,811,84, 27,2014-01-17 19:48:16.183553,626,966,909, 95,2014-01-12 03:26:13.549622,186,241,50, 95,2014-01-18 15:32:40.042382,262,320,809, 95,2014-01-19 02:54:35.534514,96,471,542, 95,2014-01-18 00:31:11.353415,31,908,387, 27,2014-01-11 13:51:00.048419,648,815,825, 95,2014-01-20 21:47:06.791953,16,688,771, 27,2014-01-18 16:33:48.224307,260,813,704, 95,2014-01-13 09:29:47.034701,228,902,198, 95,2014-01-20 16:03:35.032818,15,829,629, 95,2014-01-20 06:25:36.339236,620,439,750, 27,2014-01-17 21:05:29.073079,275,580,748, 95,2014-01-15 17:11:28.301242,882,89,475, 27,2014-01-15 18:58:22.701004,144,356,60, 95,2014-01-11 22:00:25.492136,30,191,221, 58,2014-01-17 17:31:49.277396,887,708,445, 58,2014-01-20 09:38:28.056545,604,242,60, 58,2014-01-13 04:15:45.499988,620,453,143, 58,2014-01-18 21:06:40.723177,884,661,293, 58,2014-01-14 23:36:29.638676,230,486,461, 58,2014-01-19 23:13:27.295607,375,993,116, 58,2014-01-13 19:09:16.534469,558,91,720, 58,2014-01-11 14:46:53.010853,97,484,685, 58,2014-01-18 12:00:04.247559,523,478,629, 58,2014-01-19 15:17:54.421303,870,951,316, 58,2014-01-17 10:49:42.896575,272,66,755, 58,2014-01-18 10:22:13.501391,900,597,49, 58,2014-01-18 17:00:41.190396,113,469,107, 58,2014-01-17 15:53:09.582589,588,412,279, 58,2014-01-12 05:50:27.255702,264,390,825, 58,2014-01-18 03:51:11.667608,38,366,19, 58,2014-01-14 05:41:25.766143,963,398,688, 58,2014-01-13 13:16:40.805324,316,631,470, 58,2014-01-15 13:23:51.709991,191,349,769, 58,2014-01-12 02:36:01.300655,986,257,585, 58,2014-01-20 17:47:52.158739,950,819,238, 58,2014-01-16 07:14:51.758409,451,541,717, 58,2014-01-11 18:23:08.093027,610,592,139, 58,2014-01-18 04:21:40.383465,4,448,832, 58,2014-01-18 20:44:38.79353,615,133,752, 58,2014-01-11 04:26:43.125845,266,675,641, 58,2014-01-17 04:47:16.722701,95,202,976, 58,2014-01-15 11:50:07.866609,596,269,957, 58,2014-01-16 01:38:11.427087,508,945,494, 58,2014-01-11 17:45:39.423705,511,998,95, 58,2014-01-16 13:50:33.358747,162,129,693, 58,2014-01-12 13:10:00.704483,340,756,945, 58,2014-01-10 21:24:07.480669,447,906,401, 58,2014-01-18 22:06:18.858734,554,683,194, 58,2014-01-20 14:16:41.42766,113,454,195, 58,2014-01-15 01:00:37.119346,931,464,466, 58,2014-01-17 15:19:46.154761,668,846,504, 58,2014-01-13 03:25:57.962129,34,46,575, 58,2014-01-15 19:47:30.130618,856,495,371, 58,2014-01-14 21:19:50.402321,846,61,317, 58,2014-01-11 12:43:52.383851,843,765,674, 58,2014-01-12 00:37:13.027193,490,236,633, 58,2014-01-12 01:42:03.90413,523,443,206, 58,2014-01-19 09:44:33.574247,163,433,412, 58,2014-01-18 20:30:22.609781,502,922,759, 58,2014-01-13 11:32:31.159832,814,91,660, 58,2014-01-16 07:35:45.242273,810,92,908, 58,2014-01-11 20:11:48.375924,440,696,358, 58,2014-01-16 04:49:11.965575,49,648,209, 58,2014-01-21 00:59:36.952265,870,235,45, 58,2014-01-13 02:47:49.676998,685,787,135, 58,2014-01-15 16:37:04.124314,653,314,551, 58,2014-01-16 02:14:28.711093,723,688,845, 58,2014-01-14 01:10:57.770026,836,67,69, 58,2014-01-12 14:58:44.507779,345,958,420, 58,2014-01-13 16:59:07.504623,140,530,806, 58,2014-01-14 09:37:40.895871,135,267,383, 58,2014-01-18 23:46:01.230899,521,30,996, 58,2014-01-18 08:49:15.371232,272,582,762, 58,2014-01-19 15:15:52.322957,376,893,660, 58,2014-01-19 21:31:40.654604,16,814,571, 58,2014-01-13 16:39:48.730398,920,161,184, 58,2014-01-10 22:25:53.027441,308,582,689, 58,2014-01-19 22:55:48.135273,45,280,207, 58,2014-01-11 08:46:07.589132,120,717,834, 58,2014-01-20 16:42:34.455101,785,827,341, 58,2014-01-13 17:56:25.255037,88,584,457, 58,2014-01-18 04:05:53.743894,13,616,507, 58,2014-01-12 14:08:32.417649,181,75,241, 58,2014-01-18 17:43:55.385655,164,924,16, 58,2014-01-11 19:25:44.146633,504,707,929, 58,2014-01-13 06:52:24.8015,208,747,329, 58,2014-01-19 22:21:08.412848,429,487,122, 58,2014-01-13 01:07:48.050764,421,518,282, 58,2014-01-11 10:36:58.375747,960,442,110, 58,2014-01-17 12:51:31.022629,860,302,391, 58,2014-01-15 16:40:19.210596,713,418,320, 58,2014-01-16 22:12:43.61802,586,992,758, 58,2014-01-18 13:03:19.398553,125,536,170, 58,2014-01-21 01:29:31.176171,507,726,388, 58,2014-01-16 17:12:20.570703,633,689,204, 58,2014-01-20 19:51:09.075551,257,504,63, 58,2014-01-15 12:06:35.300903,742,5,971, 58,2014-01-11 13:26:49.281796,306,496,608, 58,2014-01-13 15:02:06.845577,862,83,465, 58,2014-01-17 07:05:19.808682,729,270,717, 58,2014-01-14 10:25:56.78642,923,533,114, 42,2014-01-17 04:39:47.741448,79,809,880, 41,2014-01-15 00:51:21.039581,621,379,436, 41,2014-01-11 13:15:12.157652,857,212,899, 41,2014-01-15 13:55:40.064405,608,191,542, 41,2014-01-13 16:23:01.694604,5,815,917, 42,2014-01-14 09:55:00.88805,655,723,893, 41,2014-01-15 16:21:33.091846,105,815,78, 42,2014-01-12 09:18:49.829877,245,813,111, 41,2014-01-14 22:41:08.477602,650,450,990, 41,2014-01-15 03:04:07.547367,122,398,216, 41,2014-01-15 07:15:15.084915,534,727,425, 41,2014-01-11 20:47:02.221496,884,917,84, 41,2014-01-16 21:12:39.965016,14,834,79, 41,2014-01-12 18:59:10.47057,552,817,481, 41,2014-01-12 20:12:46.368129,315,60,674, 42,2014-01-19 08:05:04.766516,406,708,87, 42,2014-01-11 11:20:18.883418,394,582,267, 41,2014-01-15 01:20:34.418893,53,970,340, 41,2014-01-19 22:42:03.142262,411,10,343, 41,2014-01-18 04:11:49.906047,711,785,746, 41,2014-01-19 22:00:53.629907,977,791,502, 42,2014-01-15 14:54:46.760701,384,437,605, 42,2014-01-15 11:15:09.305019,934,714,560, 42,2014-01-19 17:30:24.806078,103,290,775, 41,2014-01-11 02:07:07.331404,604,107,399, 41,2014-01-15 01:06:18.38057,940,95,290, 41,2014-01-13 23:37:00.106981,151,602,538, 41,2014-01-11 19:33:56.613201,529,380,590, 42,2014-01-17 20:08:25.226567,755,765,956, 42,2014-01-20 10:42:19.916083,447,996,743, 42,2014-01-15 09:59:53.39962,125,608,587, 42,2014-01-13 18:48:12.968014,328,702,822, 42,2014-01-14 05:33:40.955665,308,251,40, 42,2014-01-16 03:15:05.557272,359,254,749, 42,2014-01-18 12:43:53.032839,535,385,106, 42,2014-01-17 01:56:42.650269,611,859,136, 41,2014-01-19 17:10:06.445742,713,620,354, 42,2014-01-12 23:05:26.124684,243,376,428, 42,2014-01-18 15:15:32.480566,851,154,285, 42,2014-01-13 09:51:14.923344,322,837,755, 41,2014-01-17 06:09:33.672051,910,210,845, 42,2014-01-12 16:30:47.56548,920,466,696, 41,2014-01-14 10:38:17.14484,643,342,761, 41,2014-01-12 21:22:13.637067,122,21,637, 41,2014-01-14 15:29:58.036469,159,969,382, 41,2014-01-16 10:51:03.513387,198,936,996, 41,2014-01-10 23:27:18.403582,693,741,604, 42,2014-01-15 06:50:16.919886,844,669,348, 41,2014-01-20 16:11:37.93228,872,860,659, 42,2014-01-20 02:09:21.546264,876,867,717, 41,2014-01-12 05:02:06.826352,300,418,787, 42,2014-01-19 08:12:31.562188,72,198,304, 41,2014-01-14 11:04:08.306965,854,308,62, 42,2014-01-16 20:17:16.131371,187,959,459, 42,2014-01-17 19:42:56.368685,730,118,427, 41,2014-01-14 17:11:15.638369,901,741,37, 42,2014-01-21 01:23:34.511942,590,732,382, 42,2014-01-20 23:19:56.475667,164,522,712, 41,2014-01-15 16:45:12.25157,382,7,102, 42,2014-01-17 15:31:59.738507,343,846,472, 42,2014-01-20 04:02:16.39175,184,944,156, 41,2014-01-20 06:45:05.65119,294,523,438, 41,2014-01-20 14:20:12.706941,322,83,258, 41,2014-01-13 03:35:57.347833,997,843,328, 42,2014-01-15 04:00:11.208461,913,254,938, 42,2014-01-17 21:04:05.739779,86,782,778, 41,2014-01-19 09:32:39.998103,925,246,705, 41,2014-01-13 15:10:17.654203,129,546,347, 41,2014-01-20 00:09:31.864464,210,564,880, 41,2014-01-16 18:48:12.478669,854,880,391, 42,2014-01-16 05:01:32.577547,627,712,518, 42,2014-01-16 00:19:05.536933,951,459,42, 42,2014-01-18 15:19:00.044568,64,286,290, 42,2014-01-19 19:39:49.722805,440,454,912, 41,2014-01-18 01:41:19.174,150,984,399, 42,2014-01-12 00:48:58.081036,662,847,945, 41,2014-01-15 00:30:53.236193,865,689,41, 42,2014-01-18 05:08:37.578001,621,913,394, 41,2014-01-16 11:39:15.000923,416,186,956, 42,2014-01-14 10:42:31.168473,611,248,898, 41,2014-01-17 01:17:59.124265,564,415,988, 41,2014-01-17 20:41:21.827275,180,662,115, 41,2014-01-12 12:55:02.73108,428,206,823, 41,2014-01-20 16:22:07.431229,802,887,475, 41,2014-01-13 10:58:37.958646,491,977,480, 42,2014-01-19 12:37:59.099765,930,656,300, 41,2014-01-14 03:33:23.069599,365,466,280, 42,2014-01-13 06:22:12.471006,716,842,246, 42,2014-01-19 05:57:55.575432,963,879,531, 42,2014-01-19 00:18:35.321169,328,887,248, 42,2014-01-20 01:54:12.209514,141,268,559, 41,2014-01-18 04:00:11.967601,695,842,211, 41,2014-01-18 01:03:40.972358,252,685,543, 42,2014-01-19 10:14:24.916454,413,541,128, 41,2014-01-20 11:36:09.315434,312,105,531, 42,2014-01-11 23:03:52.18082,677,745,208, 41,2014-01-16 01:18:30.656233,135,476,994, 42,2014-01-18 15:08:49.313537,916,966,696, 42,2014-01-14 18:14:09.835443,730,824,540, 41,2014-01-14 19:28:02.521116,953,323,838, 41,2014-01-14 03:57:01.792625,520,603,543, 41,2014-01-20 03:15:42.41299,293,122,977, 42,2014-01-19 23:47:08.058049,581,462,714, 42,2014-01-11 13:16:01.837193,638,575,29, 42,2014-01-18 16:55:32.135796,548,402,650, 41,2014-01-16 19:28:27.23205,605,870,385, 42,2014-01-12 18:04:59.918228,335,407,736, 41,2014-01-12 11:26:25.371989,310,247,355, 41,2014-01-13 18:37:04.810051,864,990,564, 41,2014-01-18 09:44:14.919151,530,428,158, 42,2014-01-16 02:08:56.54088,170,659,434, 41,2014-01-19 23:55:03.934316,127,156,244, 41,2014-01-15 00:25:36.746845,698,980,236, 41,2014-01-17 19:03:59.27196,8,437,131, 42,2014-01-19 10:17:11.365545,240,644,476, 42,2014-01-17 15:24:14.705491,320,494,615, 41,2014-01-16 01:41:58.371725,224,195,438, 42,2014-01-12 07:50:34.435144,387,21,623, 41,2014-01-20 01:46:27.176497,141,183,590, 42,2014-01-14 01:39:53.947157,686,160,982, 42,2014-01-20 12:09:09.756312,280,905,129, 41,2014-01-18 21:40:39.38643,865,400,821, 41,2014-01-10 23:40:05.914758,703,852,71, 42,2014-01-17 07:12:50.72867,692,167,8, 41,2014-01-17 01:55:04.302884,388,510,773, 42,2014-01-20 15:16:15.230193,673,346,443, 41,2014-01-18 10:16:42.909909,109,243,107, 42,2014-01-11 21:13:34.959537,617,857,850, 41,2014-01-18 00:25:04.544149,331,104,275, 42,2014-01-11 22:30:52.745352,185,809,117, 42,2014-01-15 20:41:37.480653,207,536,163, 42,2014-01-10 22:22:06.336774,659,565,606, 42,2014-01-21 05:46:35.158342,761,877,335, 42,2014-01-14 14:28:45.538703,744,719,718, 42,2014-01-11 15:38:08.173967,321,301,547, 41,2014-01-18 16:42:07.294556,299,694,408, 41,2014-01-20 13:57:12.770752,677,845,961, 42,2014-01-13 13:43:08.092195,126,107,535, 42,2014-01-20 08:08:32.666545,885,197,676, 42,2014-01-13 02:34:17.581222,515,976,779, 41,2014-01-21 03:27:23.011346,101,9,908, 42,2014-01-15 04:17:29.207425,160,765,925, 41,2014-01-11 20:29:21.515538,908,93,663, 42,2014-01-14 21:52:59.758191,98,839,430, 42,2014-01-11 17:21:28.479385,939,82,8, 42,2014-01-20 10:46:32.881502,543,163,901, 41,2014-01-11 07:17:14.4641,729,293,834, 42,2014-01-16 23:03:26.851111,785,811,795, 41,2014-01-11 12:37:07.316646,579,222,337, 41,2014-01-20 13:03:41.640597,338,814,237, 42,2014-01-20 04:43:20.798687,903,115,19, 41,2014-01-21 04:46:17.073377,345,765,644, 41,2014-01-18 04:44:21.027027,569,87,233, 42,2014-01-20 08:23:26.713446,910,274,523, 42,2014-01-17 05:59:07.802047,391,873,517, 42,2014-01-14 00:39:25.33033,545,789,675, 42,2014-01-19 17:39:41.944057,866,458,5, 42,2014-01-14 10:15:50.711956,23,637,90, 42,2014-01-15 01:53:00.289868,40,414,496, 42,2014-01-16 12:04:46.488206,669,440,397, 41,2014-01-15 12:46:43.457308,741,850,381, 41,2014-01-20 02:34:37.770521,222,29,28, 41,2014-01-16 14:26:52.824981,695,485,24, 42,2014-01-15 12:33:18.616069,208,284,82, 42,2014-01-13 11:03:23.309224,907,269,442, 42,2014-01-17 10:05:00.998948,964,958,909, 41,2014-01-12 23:15:25.910625,787,494,188, 41,2014-01-12 19:00:36.079976,237,825,98, 42,2014-01-20 03:48:09.091143,999,102,438, 41,2014-01-12 01:23:58.57717,779,609,468, 42,2014-01-15 01:34:53.661198,634,990,441, 41,2014-01-20 01:15:32.102488,30,400,707, 41,2014-01-16 09:41:27.785015,78,256,323, 41,2014-01-16 02:04:15.177155,801,79,467, 41,2014-01-13 17:08:31.861098,478,118,427, 41,2014-01-17 07:02:56.2644,924,716,457, 42,2014-01-15 06:50:48.058656,844,942,510, 41,2014-01-14 04:25:46.325198,628,921,467, 41,2014-01-13 00:06:23.11593,946,620,699, 42,2014-01-15 23:27:55.375302,31,609,135, 42,2014-01-13 11:29:27.965795,836,903,376, 41,2014-01-11 22:49:43.914617,344,602,525, 42,2014-01-15 22:14:12.448679,180,752,827, 42,2014-01-20 20:13:48.993241,425,335,816, 42,2014-01-11 01:13:10.628063,979,337,466, 42,2014-01-11 22:13:20.251145,640,353,41, 42,2014-01-13 14:53:14.323571,332,774,106, 41,2014-01-19 22:52:52.57212,164,630,914, 42,2014-01-15 12:29:10.9631,807,716,788, 41,2014-01-17 20:46:14.613439,860,29,812, 41,2014-01-15 04:57:39.060327,296,91,815, 41,2014-01-20 05:15:54.420827,342,994,275, 42,2014-01-16 17:20:52.38396,886,670,159, 42,2014-01-20 23:24:31.885307,630,88,396, 42,2014-01-14 11:49:13.036896,490,88,971, 41,2014-01-19 08:24:15.693183,548,588,505, 42,2014-01-17 03:29:32.884255,472,393,952, 41,2014-01-16 15:04:38.947522,739,574,970, 41,2014-01-10 21:24:51.773159,115,779,726, 41,2014-01-16 01:17:41.975397,989,382,557, 41,2014-01-17 20:28:37.524692,85,159,68, 41,2014-01-15 02:59:45.434777,334,812,11, 42,2014-01-14 20:33:14.078304,585,702,588, 41,2014-01-13 00:10:05.309705,886,671,261, 42,2014-01-20 09:04:00.611931,939,195,431, 42,2014-01-17 17:41:45.939402,307,99,261, 42,2014-01-19 11:13:01.574524,799,710,700, 41,2014-01-14 09:54:48.670589,395,362,281, 41,2014-01-21 02:07:32.264599,974,136,401, 41,2014-01-11 05:19:24.690454,660,184,398, 41,2014-01-19 13:22:44.045891,400,836,350, 41,2014-01-13 07:37:00.230394,663,11,393, 42,2014-01-12 08:09:08.605071,489,481,85, 41,2014-01-14 05:36:56.494989,849,356,39, 42,2014-01-12 21:50:49.223634,393,758,608, 41,2014-01-12 13:22:19.233133,248,64,723, 41,2014-01-15 07:50:16.746133,472,791,607, 90,2014-01-15 16:44:03.547205,2,571,653, 76,2014-01-11 06:15:11.805673,212,498,665, 9,2014-01-20 00:19:27.709233,321,395,970, 80,2014-01-12 07:30:18.160644,432,318,428, 80,2014-01-15 15:12:50.865999,449,224,349, 23,2014-01-18 23:35:22.13006,329,527,486, 76,2014-01-18 04:51:10.544604,589,573,514, 90,2014-01-15 08:37:22.751726,569,947,478, 90,2014-01-12 05:24:35.166956,109,869,268, 81,2014-01-16 07:15:26.237788,416,819,905, 30,2014-01-11 06:06:55.63598,506,478,252, 23,2014-01-18 00:29:14.114478,91,182,884, 9,2014-01-16 08:40:18.011367,770,457,647, 90,2014-01-16 11:24:37.611797,968,348,43, 76,2014-01-14 14:57:51.639589,173,115,266, 90,2014-01-20 15:40:03.446144,283,641,674, 90,2014-01-20 11:57:51.690101,190,104,458, 30,2014-01-16 19:07:56.949294,952,543,519, 9,2014-01-19 18:44:04.058075,985,727,127, 80,2014-01-16 23:39:37.629503,957,386,139, 23,2014-01-15 00:20:58.523818,287,915,268, 23,2014-01-12 22:38:52.728664,116,587,746, 30,2014-01-16 19:47:09.894102,290,543,45, 80,2014-01-15 09:40:23.214272,101,36,985, 81,2014-01-11 06:01:36.774554,992,870,172, 30,2014-01-19 07:24:10.124496,914,123,550, 80,2014-01-16 21:49:31.819343,677,924,829, 90,2014-01-14 15:38:33.269542,722,295,897, 23,2014-01-10 23:14:59.348548,999,75,700, 30,2014-01-18 15:11:51.052896,175,408,275, 90,2014-01-19 03:28:50.015675,108,683,193, 30,2014-01-15 19:59:02.895753,314,636,844, 80,2014-01-19 01:27:02.858569,843,685,466, 76,2014-01-17 21:48:17.725327,734,173,309, 23,2014-01-17 07:29:21.056817,250,150,641, 76,2014-01-13 10:39:53.724568,914,515,256, 81,2014-01-15 15:23:39.855388,155,188,697, 81,2014-01-14 06:20:31.60142,993,498,473, 23,2014-01-17 23:17:16.476293,377,935,865, 9,2014-01-17 00:48:15.022764,498,190,229, 90,2014-01-19 17:35:57.839627,453,944,163, 23,2014-01-18 09:24:12.112693,410,755,93, 81,2014-01-13 19:17:29.137242,291,137,512, 23,2014-01-14 20:16:15.850994,828,277,291, 30,2014-01-13 14:48:49.72449,930,470,714, 23,2014-01-17 14:15:20.77683,244,588,381, 81,2014-01-14 05:56:19.297136,602,378,559, 76,2014-01-12 20:46:41.414591,845,627,105, 90,2014-01-13 03:23:17.726544,292,640,644, 30,2014-01-12 18:40:23.355212,322,606,523, 23,2014-01-19 00:26:19.044094,818,464,103, 30,2014-01-17 07:44:16.250361,402,87,571, 90,2014-01-14 21:19:16.083876,484,76,205, 76,2014-01-14 14:13:28.938196,397,113,301, 81,2014-01-11 11:24:39.465052,912,57,985, 23,2014-01-15 07:20:52.858429,710,1,399, 80,2014-01-12 15:37:39.062692,940,422,520, 90,2014-01-17 13:14:11.284396,54,947,487, 9,2014-01-19 02:59:26.128391,623,164,485, 90,2014-01-12 18:52:38.41124,942,773,151, 76,2014-01-14 22:26:02.337292,804,32,685, 23,2014-01-17 00:28:16.144066,293,459,938, 81,2014-01-17 18:51:41.307412,86,634,584, 81,2014-01-12 17:53:05.195859,657,314,176, 76,2014-01-13 16:16:33.869393,80,526,958, 9,2014-01-13 20:21:02.364228,591,986,901, 23,2014-01-15 08:32:58.920846,299,179,455, 23,2014-01-18 11:40:13.7252,539,472,631, 30,2014-01-17 06:41:33.965649,260,525,420, 76,2014-01-12 01:50:15.39714,354,336,98, 80,2014-01-14 06:28:28.747964,534,132,490, 30,2014-01-15 18:17:31.805275,395,909,315, 30,2014-01-19 15:14:27.509832,55,657,207, 30,2014-01-17 05:45:57.885205,961,663,266, 30,2014-01-19 18:33:47.656269,657,830,778, 23,2014-01-12 00:03:17.234322,677,373,393, 23,2014-01-13 14:01:18.662455,612,511,64, 23,2014-01-12 18:30:06.953405,285,798,811, 9,2014-01-14 00:49:58.648914,440,932,610, 90,2014-01-15 21:24:36.388998,929,717,260, 90,2014-01-14 17:10:30.308618,297,385,7, 9,2014-01-11 19:16:17.693007,350,165,955, 76,2014-01-11 23:08:52.639359,487,237,764, 80,2014-01-18 18:29:46.392493,855,441,702, 90,2014-01-15 13:29:46.631203,591,431,935, 80,2014-01-12 14:33:32.104411,283,436,317, 30,2014-01-12 19:50:39.251342,522,144,892, 30,2014-01-17 09:07:25.694314,620,448,226, 23,2014-01-19 07:47:43.389226,816,970,500, 90,2014-01-21 02:50:05.379732,546,152,198, 90,2014-01-19 08:00:04.105553,84,203,664, 9,2014-01-13 00:13:45.726518,488,417,262, 23,2014-01-16 21:18:21.523798,328,774,126, 90,2014-01-15 20:51:45.412966,988,288,742, 76,2014-01-14 22:06:50.922796,604,583,470, 76,2014-01-19 17:34:55.39361,224,748,670, 23,2014-01-18 21:12:47.777194,603,505,899, 9,2014-01-19 10:39:49.843643,789,338,227, 80,2014-01-16 23:15:09.11881,957,676,437, 80,2014-01-14 21:54:21.743261,434,610,129, 90,2014-01-20 16:30:05.240781,758,791,543, 81,2014-01-20 09:43:37.866774,36,869,58, 76,2014-01-19 20:11:53.548536,731,386,300, 90,2014-01-19 01:44:32.751033,593,983,101, 76,2014-01-16 09:29:35.751979,975,193,1000, 76,2014-01-18 08:45:41.205225,536,777,637, 90,2014-01-20 05:47:49.985355,382,595,448, 81,2014-01-19 03:30:54.414433,879,931,876, 23,2014-01-20 07:15:48.15863,779,877,573, 80,2014-01-13 00:37:48.634687,383,72,724, 23,2014-01-13 18:55:30.803431,453,702,859, 81,2014-01-13 18:26:18.467667,3,711,841, 81,2014-01-13 23:54:06.327695,558,77,987, 23,2014-01-14 22:04:23.44321,276,193,853, 90,2014-01-11 06:56:04.86016,220,479,344, 23,2014-01-18 17:23:52.959317,729,633,949, 90,2014-01-16 16:37:55.547622,232,257,253, 90,2014-01-13 06:46:44.111502,646,683,920, 80,2014-01-14 20:31:18.65363,419,6,976, 9,2014-01-14 18:25:38.936848,636,643,91, 23,2014-01-13 03:36:49.491234,266,892,317, 90,2014-01-12 22:31:22.759183,534,408,629, 76,2014-01-16 22:39:24.663785,409,486,666, 76,2014-01-19 04:55:11.015451,369,285,932, 23,2014-01-17 23:23:08.172569,290,987,426, 30,2014-01-21 00:46:15.586581,657,494,787, 23,2014-01-17 16:30:06.409061,510,253,234, 81,2014-01-15 14:35:55.949762,663,592,924, 76,2014-01-19 05:26:05.430224,986,909,567, 30,2014-01-13 09:45:15.527871,667,725,64, 90,2014-01-19 16:30:17.693024,881,48,342, 9,2014-01-18 15:56:10.671005,434,4,584, 23,2014-01-12 13:28:53.394645,231,680,262, 90,2014-01-18 06:42:11.24198,327,332,870, 23,2014-01-16 11:40:43.422039,590,158,465, 90,2014-01-18 02:58:29.146624,467,629,644, 81,2014-01-15 09:27:52.447205,867,174,631, 90,2014-01-15 11:28:33.407812,843,180,431, 81,2014-01-16 00:29:23.561476,384,729,601, 81,2014-01-14 10:43:40.605835,651,333,465, 76,2014-01-17 16:06:22.0425,193,619,707, 9,2014-01-18 23:24:54.364908,207,830,713, 80,2014-01-17 09:09:59.073502,887,661,720, 76,2014-01-20 20:00:28.370194,89,115,145, 76,2014-01-12 15:29:17.808118,262,310,95, 90,2014-01-17 20:06:03.934082,2,378,504, 23,2014-01-18 07:24:21.32951,937,454,998, 23,2014-01-18 12:07:13.356158,749,645,790, 9,2014-01-20 06:52:48.045583,245,482,190, 81,2014-01-11 21:55:39.983141,637,405,760, 80,2014-01-12 00:32:52.293007,781,288,625, 76,2014-01-12 04:29:37.537237,916,867,612, 76,2014-01-14 00:27:02.742744,854,126,433, 80,2014-01-18 03:12:16.956792,223,711,954, 76,2014-01-20 13:24:48.552689,728,251,241, 76,2014-01-21 03:50:10.915312,121,534,812, 23,2014-01-17 21:58:32.543373,318,214,892, 76,2014-01-16 23:54:54.96175,740,278,916, 30,2014-01-15 12:26:06.865075,599,780,978, 23,2014-01-15 21:24:37.973597,491,946,955, 81,2014-01-19 13:40:10.489621,567,900,910, 76,2014-01-13 22:56:24.558518,548,51,588, 9,2014-01-13 07:20:48.645021,634,265,303, 30,2014-01-10 21:09:03.884266,752,343,395, 23,2014-01-10 23:38:35.800498,783,960,333, 81,2014-01-18 23:01:32.067059,337,723,107, 81,2014-01-18 04:07:33.03089,355,966,948, 23,2014-01-15 13:06:28.247703,971,964,851, 9,2014-01-13 04:30:05.474872,256,185,660, 90,2014-01-12 22:36:56.592366,582,442,501, 81,2014-01-19 03:50:08.853539,719,569,647, 23,2014-01-20 00:36:27.517371,178,371,430, 30,2014-01-21 02:01:50.957693,277,764,911, 81,2014-01-15 07:00:07.927459,440,749,507, 30,2014-01-19 14:36:55.887984,974,821,150, 9,2014-01-12 11:31:08.76581,486,347,431, 81,2014-01-11 21:06:11.861542,703,291,181, 80,2014-01-16 16:01:17.217495,569,890,548, 90,2014-01-20 03:38:22.12197,405,20,597, 76,2014-01-10 21:58:59.907125,922,394,656, 9,2014-01-17 17:56:57.201055,309,801,245, 90,2014-01-10 22:11:14.415396,522,535,228, 90,2014-01-12 06:28:37.444362,827,870,123, 23,2014-01-20 22:23:59.943799,618,644,402, 30,2014-01-18 05:23:31.372188,270,669,381, 76,2014-01-11 13:53:25.997051,266,81,231, 9,2014-01-20 20:14:10.859111,344,441,471, 23,2014-01-14 21:22:03.91598,70,901,182, 90,2014-01-17 17:48:20.958801,178,574,354, 9,2014-01-15 02:40:17.724185,324,188,115, 76,2014-01-19 22:46:41.889577,134,693,910, 80,2014-01-16 01:28:31.448422,309,179,800, 80,2014-01-18 05:36:42.282704,601,549,344, 80,2014-01-12 00:07:30.534598,788,698,668, 23,2014-01-16 02:37:35.332687,121,422,439, 76,2014-01-18 09:15:18.083202,986,871,233, 30,2014-01-20 03:09:02.602076,275,212,702, 81,2014-01-13 00:45:08.363996,30,353,471, 9,2014-01-12 16:21:46.330905,158,691,592, 9,2014-01-12 01:39:08.076948,948,991,808, 30,2014-01-15 03:22:04.956361,551,348,818, 81,2014-01-21 00:11:55.184862,355,602,114, 23,2014-01-10 20:15:35.594738,338,176,42, 30,2014-01-14 23:23:55.914055,973,484,870, 76,2014-01-15 01:12:03.112322,528,231,63, 90,2014-01-19 14:52:31.482722,646,338,273, 9,2014-01-16 14:55:04.680284,240,737,909, 90,2014-01-16 02:18:14.973864,934,685,973, 9,2014-01-15 00:53:48.700217,621,570,982, 9,2014-01-15 12:33:26.802254,576,691,735, 30,2014-01-16 04:17:14.880989,762,881,745, 23,2014-01-11 12:50:45.901272,314,494,466, 23,2014-01-15 14:44:41.21765,589,154,197, 9,2014-01-17 14:45:52.325771,504,431,321, 81,2014-01-11 05:14:45.845071,471,75,530, 30,2014-01-12 14:08:12.590257,499,74,747, 30,2014-01-18 08:39:18.322822,535,19,414, 76,2014-01-21 05:28:56.704182,855,48,963, 30,2014-01-16 15:30:16.506237,67,532,303, 80,2014-01-14 20:27:39.282042,918,195,865, 90,2014-01-15 02:09:14.428787,752,341,688, 76,2014-01-15 08:16:58.395814,998,314,810, 30,2014-01-20 01:56:10.730464,573,340,168, 9,2014-01-12 01:45:56.711491,867,603,993, 76,2014-01-16 12:24:28.930831,782,358,252, 80,2014-01-14 22:33:46.06357,580,342,625, 80,2014-01-19 15:01:14.795111,819,728,982, 9,2014-01-15 09:33:31.532907,569,814,262, 81,2014-01-17 03:18:54.427566,468,109,181, 23,2014-01-11 01:23:01.126017,68,455,697, 9,2014-01-16 15:12:39.609854,74,435,45, 23,2014-01-11 00:40:59.383928,802,965,604, 81,2014-01-21 05:34:56.310878,852,188,54, 30,2014-01-16 15:28:15.204592,285,657,381, 9,2014-01-15 04:04:55.298402,553,110,47, 81,2014-01-15 00:46:59.423201,421,543,451, 23,2014-01-15 00:20:46.687314,994,770,79, 9,2014-01-20 22:59:59.978686,336,81,946, 90,2014-01-20 07:05:14.397065,953,754,990, 80,2014-01-19 05:14:35.38795,292,776,695, 23,2014-01-15 05:33:26.780941,442,653,899, 23,2014-01-15 05:22:29.278472,159,287,242, 90,2014-01-19 22:05:21.289222,965,127,688, 80,2014-01-20 00:18:07.99901,326,91,463, 9,2014-01-11 14:08:21.604243,866,808,584, 76,2014-01-20 07:20:07.134711,725,234,781, 81,2014-01-11 08:26:20.589268,552,286,599, 81,2014-01-19 02:47:39.927065,813,105,66, 30,2014-01-20 06:49:03.838894,923,110,928, 23,2014-01-17 03:56:37.095505,514,725,230, 30,2014-01-12 17:15:19.209106,521,349,490, 90,2014-01-14 02:58:18.267681,446,544,377, 90,2014-01-11 06:13:35.491319,947,143,775, 80,2014-01-11 13:11:29.939989,933,215,148, 76,2014-01-15 08:44:14.979591,820,184,505, 81,2014-01-16 22:38:04.42215,635,312,496, 30,2014-01-15 15:45:16.003557,293,90,114, 30,2014-01-13 17:45:29.774703,350,170,603, 30,2014-01-11 02:11:35.955056,25,458,235, 81,2014-01-11 13:04:10.431123,796,953,161, 81,2014-01-13 23:08:30.900719,294,251,816, 81,2014-01-16 21:24:15.565329,663,591,796, 90,2014-01-11 17:45:09.815051,254,132,977, 9,2014-01-13 22:43:27.212017,808,446,895, 23,2014-01-12 06:52:30.769922,810,251,70, 81,2014-01-16 01:50:05.113453,771,847,791, 81,2014-01-18 03:30:26.635218,818,854,657, 30,2014-01-16 11:13:17.457654,255,137,918, 80,2014-01-15 18:50:05.09214,443,258,930, 30,2014-01-17 04:35:41.032701,10,825,502, 90,2014-01-14 10:27:52.845603,912,592,554, 80,2014-01-20 04:23:31.873499,346,519,762, 80,2014-01-11 03:58:10.311173,283,472,255, 30,2014-01-13 02:33:14.134825,225,685,446, 90,2014-01-18 22:41:39.872509,329,964,524, 90,2014-01-11 22:06:31.915416,551,710,796, 80,2014-01-12 03:53:21.269536,339,74,13, 9,2014-01-19 11:08:00.461777,432,159,964, 81,2014-01-20 04:54:11.84248,506,268,881, 81,2014-01-11 04:42:25.10885,782,599,131, 90,2014-01-15 09:04:37.557282,436,230,333, 80,2014-01-11 16:09:31.052005,457,93,39, 30,2014-01-14 11:40:43.376531,375,768,239, 90,2014-01-15 19:18:13.048601,368,665,236, 9,2014-01-12 09:21:00.991994,569,780,505, 76,2014-01-19 00:24:58.356122,500,340,85, 81,2014-01-11 11:56:17.47117,64,806,444, 9,2014-01-17 05:06:16.995551,318,688,355, 81,2014-01-11 12:10:28.130825,102,763,102, 80,2014-01-11 18:07:53.426226,521,508,238, 23,2014-01-17 22:10:27.426675,268,657,439, 76,2014-01-14 15:18:59.031964,829,123,403, 80,2014-01-17 19:32:08.991555,472,514,326, 80,2014-01-18 19:55:37.241726,690,627,766, 81,2014-01-17 18:02:26.24398,716,220,173, 23,2014-01-19 06:24:39.761896,414,471,328, 80,2014-01-13 15:45:42.355597,634,184,800, 30,2014-01-14 15:32:52.879197,426,707,898, 30,2014-01-14 11:37:57.21955,82,218,547, 81,2014-01-18 14:35:47.447736,109,1,164, 90,2014-01-21 00:08:33.911898,739,188,218, 81,2014-01-18 02:05:50.065153,668,786,879, 81,2014-01-17 12:59:19.321235,217,633,398, 76,2014-01-10 22:06:44.223071,111,437,651, 90,2014-01-20 08:39:04.199978,135,985,37, 76,2014-01-15 05:40:59.193746,727,860,124, 81,2014-01-12 00:13:16.138486,680,945,196, 23,2014-01-11 06:32:25.469934,251,609,212, 9,2014-01-13 10:48:59.655523,133,907,813, 9,2014-01-10 23:07:27.981386,724,274,227, 76,2014-01-11 15:14:50.578784,968,655,478, 90,2014-01-17 23:53:37.212805,265,945,87, 76,2014-01-11 19:16:59.033392,600,846,794, 23,2014-01-15 06:55:33.955316,367,619,428, 80,2014-01-12 13:11:50.261825,255,501,289, 90,2014-01-13 08:38:00.025385,447,359,47, 80,2014-01-13 01:20:32.311856,880,241,831, 23,2014-01-13 05:08:07.732995,181,420,124, 90,2014-01-19 17:44:17.020936,494,873,902, 81,2014-01-13 17:31:00.442681,800,495,593, 9,2014-01-14 03:16:01.159221,396,102,319, 30,2014-01-16 09:54:44.447611,614,633,961, 23,2014-01-17 12:49:59.474645,804,283,70, 81,2014-01-21 02:48:10.151195,224,561,45, 80,2014-01-13 23:50:21.689336,2,146,338, 81,2014-01-14 00:52:25.718624,642,841,815, 80,2014-01-19 03:12:49.913091,857,225,442, 23,2014-01-16 19:36:04.044934,939,490,691, 23,2014-01-17 20:25:18.598241,677,327,396, 30,2014-01-12 08:50:47.132641,497,865,283, 90,2014-01-14 04:11:51.49309,148,78,573, 9,2014-01-17 14:33:52.510139,296,380,657, 9,2014-01-19 14:56:37.197794,938,12,610, 90,2014-01-20 21:11:10.814326,896,824,696, 80,2014-01-17 16:40:36.733628,721,340,905, 9,2014-01-18 17:35:41.397773,47,68,197, 81,2014-01-14 20:52:10.008491,29,223,944, 80,2014-01-18 20:53:52.872115,231,28,635, 76,2014-01-19 04:08:06.867707,635,721,567, 81,2014-01-17 11:41:09.664014,296,20,701, 76,2014-01-19 00:01:20.853501,847,822,719, 81,2014-01-19 23:22:57.446492,927,439,413, 23,2014-01-14 05:34:46.877238,427,18,870, 30,2014-01-19 23:18:19.886893,936,579,873, 90,2014-01-14 00:18:31.402226,372,906,646, 81,2014-01-15 22:46:37.139063,344,3,809, 76,2014-01-12 01:56:19.912278,134,877,774, 23,2014-01-16 05:39:03.714083,125,942,451, 23,2014-01-18 07:54:44.872057,986,429,176, 80,2014-01-20 23:40:36.933215,945,67,143, 30,2014-01-19 03:10:04.156765,172,745,563, 90,2014-01-11 05:10:46.031278,282,622,514, 30,2014-01-16 03:35:21.380826,637,202,166, 30,2014-01-15 10:00:03.631828,326,501,150, 9,2014-01-11 01:58:56.182473,507,493,128, 23,2014-01-19 07:25:43.070581,576,662,443, 30,2014-01-18 14:52:29.350453,106,698,605, 80,2014-01-19 09:11:46.095564,148,658,923, 76,2014-01-14 21:01:47.115516,726,62,365, 23,2014-01-15 05:17:47.948694,192,930,605, 81,2014-01-20 22:02:33.228204,522,786,821, 30,2014-01-18 05:13:38.608605,770,359,682, 90,2014-01-11 13:51:40.459251,874,801,464, 90,2014-01-19 06:59:10.425998,575,514,366, 90,2014-01-17 20:24:49.42335,280,665,168, 30,2014-01-18 10:32:17.192879,650,121,148, 80,2014-01-16 18:34:51.82419,539,391,127, 81,2014-01-11 11:16:59.431841,834,908,939, 23,2014-01-16 01:26:10.064994,541,838,969, 80,2014-01-14 16:42:58.691897,479,944,56, 90,2014-01-18 02:58:09.095854,456,841,694, 90,2014-01-13 19:27:30.918914,368,818,94, 81,2014-01-13 10:05:56.138808,561,252,427, 76,2014-01-11 02:32:55.973092,275,651,828, 81,2014-01-12 12:45:50.805806,215,507,13, 81,2014-01-16 14:24:27.541034,870,449,818, 80,2014-01-16 05:19:33.112154,489,964,118, 23,2014-01-13 18:42:10.718503,855,627,731, 23,2014-01-11 14:03:31.255117,362,168,935, 9,2014-01-13 07:14:17.984211,815,827,35, 30,2014-01-13 12:22:47.651718,27,186,384, 9,2014-01-19 21:13:35.411881,760,498,154, 81,2014-01-13 16:25:04.015908,187,684,329, 76,2014-01-18 19:58:09.032963,886,110,570, 76,2014-01-14 01:13:39.04371,16,814,206, 23,2014-01-13 22:24:00.198381,25,145,995, 81,2014-01-16 21:23:52.103545,129,388,590, 80,2014-01-11 10:06:08.394582,91,996,637, 80,2014-01-12 01:35:46.294364,143,419,334, 90,2014-01-20 22:25:39.21906,990,162,669, 23,2014-01-15 19:23:56.343275,913,72,172, 76,2014-01-11 17:38:19.522567,166,260,698, 76,2014-01-17 21:39:17.828084,403,859,364, 23,2014-01-16 13:15:36.802526,220,85,543, 30,2014-01-20 04:37:29.948984,665,191,910, 80,2014-01-14 12:04:07.251435,179,391,928, 80,2014-01-13 17:47:53.995405,543,367,404, 30,2014-01-15 17:12:21.773174,325,13,820, 9,2014-01-15 03:21:06.683276,340,82,465, 90,2014-01-18 23:14:04.060818,847,914,378, 80,2014-01-19 13:55:20.465072,121,804,355, 81,2014-01-12 00:19:15.779129,491,614,900, 80,2014-01-11 12:41:34.979731,595,459,258, 81,2014-01-11 18:01:16.603879,750,534,380, 30,2014-01-12 06:52:11.752641,973,878,370, 9,2014-01-13 05:27:25.785538,743,207,135, 80,2014-01-17 12:25:44.144914,587,152,398, 90,2014-01-17 16:11:44.864795,344,270,688, 81,2014-01-16 04:09:36.504042,812,707,475, 90,2014-01-18 06:29:15.40003,971,331,71, 23,2014-01-20 03:26:02.849426,248,931,115, 81,2014-01-18 20:32:24.156178,290,787,956, 76,2014-01-16 21:42:50.812331,662,73,989, 23,2014-01-12 13:51:06.865334,76,526,584, 81,2014-01-16 10:30:33.189142,436,119,282, 90,2014-01-20 02:56:29.856042,401,22,6, 90,2014-01-15 16:15:07.063715,350,417,237, 81,2014-01-12 01:54:25.292685,898,982,889, 76,2014-01-20 17:02:38.250624,449,966,970, 81,2014-01-16 21:50:53.358078,913,403,527, 80,2014-01-11 18:20:04.511746,149,543,774, 30,2014-01-15 06:26:34.594318,117,119,47, 76,2014-01-17 19:29:12.881065,207,130,935, 30,2014-01-18 19:59:22.339831,514,545,982, 23,2014-01-20 23:42:11.396844,19,383,910, 76,2014-01-16 18:06:42.830049,617,523,921, 76,2014-01-12 02:03:29.591266,816,392,640, 90,2014-01-13 11:30:05.392669,847,801,80, 30,2014-01-11 05:19:04.603223,4,995,131, 9,2014-01-16 09:24:36.274542,845,451,20, 76,2014-01-11 04:44:09.453486,196,763,844, 80,2014-01-19 23:14:25.068295,851,859,725, 23,2014-01-17 13:43:52.054091,650,589,382, 76,2014-01-11 21:25:44.433218,112,53,161, 81,2014-01-20 21:15:41.672593,601,870,781, 30,2014-01-19 00:36:03.80673,827,932,769, 9,2014-01-14 06:53:10.218755,718,933,834, 80,2014-01-17 03:41:25.817506,891,556,206, 90,2014-01-15 10:47:48.671525,791,601,285, 23,2014-01-19 15:02:46.723216,992,495,512, 23,2014-01-14 04:10:41.217956,678,519,105, 81,2014-01-14 08:13:51.520951,401,634,261, 23,2014-01-17 05:35:10.879394,528,443,219, 90,2014-01-20 05:53:32.030288,118,751,126, 23,2014-01-16 02:04:58.386284,946,370,565, 90,2014-01-12 10:05:44.068535,274,641,51, 23,2014-01-19 02:50:01.886329,48,1,776, 9,2014-01-20 22:20:05.450419,608,699,32, 23,2014-01-13 16:00:09.361221,934,700,743, 9,2014-01-18 13:52:40.137372,426,661,934, 80,2014-01-16 14:10:58.808497,259,972,891, 23,2014-01-14 14:20:13.872968,965,793,596, 90,2014-01-12 14:19:14.731691,80,681,121, 76,2014-01-13 03:40:11.68956,565,787,486, 80,2014-01-12 04:19:36.213217,957,501,652, 90,2014-01-12 08:01:26.128534,903,623,155, 81,2014-01-19 01:46:54.519609,29,686,611, 76,2014-01-13 10:23:05.804483,123,153,469, 30,2014-01-14 23:31:31.521203,595,770,38, 90,2014-01-19 11:05:59.122832,24,354,172, 81,2014-01-18 23:47:42.079444,493,429,384, 9,2014-01-15 08:15:40.974689,465,288,104, 30,2014-01-18 04:20:24.191546,684,326,571, 9,2014-01-15 07:31:34.133533,177,94,965, 76,2014-01-16 09:41:25.407906,122,114,13, 30,2014-01-17 19:36:05.864139,747,24,922, 90,2014-01-13 02:07:37.940264,661,475,4, 76,2014-01-19 20:34:35.62708,383,693,225, 9,2014-01-13 17:17:31.681646,598,777,280, 23,2014-01-17 16:55:26.61179,672,37,581, 81,2014-01-18 05:37:22.350296,548,477,854, 80,2014-01-17 01:28:12.899602,40,753,796, 76,2014-01-21 05:09:18.133159,847,235,744, 23,2014-01-14 05:12:33.229691,367,244,341, 30,2014-01-16 01:21:44.92989,460,71,216, 81,2014-01-16 01:14:16.519444,257,39,818, 81,2014-01-15 19:18:17.298226,408,454,289, 30,2014-01-13 22:11:46.816639,213,87,126, 76,2014-01-15 17:34:21.969863,456,762,572, 23,2014-01-18 15:18:26.659447,299,204,372, 30,2014-01-11 06:04:26.95401,110,633,934, 90,2014-01-11 01:45:20.778778,30,886,284, 30,2014-01-11 23:38:40.532415,2,895,876, 90,2014-01-13 00:23:41.6857,675,503,860, 23,2014-01-13 09:25:32.468339,956,213,732, 76,2014-01-13 07:58:16.745631,483,694,635, 80,2014-01-14 12:25:07.814234,61,369,42, 80,2014-01-11 05:12:26.988368,148,589,624, 90,2014-01-15 22:21:22.550533,580,317,283, 23,2014-01-18 15:56:39.335856,658,139,360, 76,2014-01-19 20:18:26.1112,499,120,388, 30,2014-01-13 16:09:04.629977,679,857,69, 80,2014-01-12 18:12:20.310546,24,534,635, 81,2014-01-16 18:38:50.302746,827,539,567, 81,2014-01-18 03:40:38.76351,126,383,592, 9,2014-01-18 07:53:45.718871,886,915,77, 23,2014-01-13 08:14:56.166885,563,329,661, 30,2014-01-20 09:48:16.703775,994,177,239, 23,2014-01-16 22:28:21.345952,247,17,145, 90,2014-01-16 05:32:27.84895,882,166,202, 80,2014-01-16 20:43:43.315564,455,193,298, 90,2014-01-13 22:05:43.696248,779,904,142, 76,2014-01-12 01:00:40.748552,468,797,541, 30,2014-01-16 19:53:01.448723,530,892,701, 81,2014-01-17 07:18:16.925939,400,848,630, 23,2014-01-17 06:22:25.67886,503,819,848, 76,2014-01-11 15:07:17.968586,47,538,388, 80,2014-01-11 20:36:34.224165,593,273,115, 30,2014-01-20 08:34:12.495499,585,420,474, 81,2014-01-16 12:41:39.938449,54,927,868, 23,2014-01-19 15:55:00.883611,267,390,470, 81,2014-01-20 18:38:39.449509,416,146,889, 90,2014-01-16 18:27:00.717227,749,303,389, 23,2014-01-20 19:33:41.416026,102,885,50, 9,2014-01-12 13:02:21.135209,858,350,966, 90,2014-01-19 07:52:33.185984,980,874,900, 81,2014-01-12 21:31:58.162076,915,184,7, 30,2014-01-16 05:27:28.949445,141,972,869, 76,2014-01-19 17:05:00.174352,138,728,857, 30,2014-01-17 23:53:20.712609,329,196,620, 76,2014-01-13 15:24:08.285301,791,219,588, 30,2014-01-18 07:23:26.285553,59,740,302, 30,2014-01-20 20:02:25.342586,385,204,496, 30,2014-01-15 13:36:28.596266,526,289,728, 9,2014-01-13 20:02:16.588718,495,963,648, 81,2014-01-17 17:43:04.106097,297,372,902, 81,2014-01-12 15:30:14.315137,779,968,159, 81,2014-01-16 08:17:12.755603,511,283,627, 90,2014-01-16 21:31:20.809872,74,162,509, 76,2014-01-18 17:58:35.66109,656,173,365, 30,2014-01-11 07:49:40.604553,27,255,81, 80,2014-01-12 12:15:04.125435,396,155,817, 76,2014-01-11 10:04:19.357337,679,887,254, 30,2014-01-12 12:50:21.353524,178,737,284, 76,2014-01-18 12:08:05.574577,90,658,478, 23,2014-01-17 21:22:36.283276,926,640,444, 76,2014-01-18 23:12:47.032384,530,373,676, 9,2014-01-19 07:15:23.543164,580,115,260, 76,2014-01-18 21:59:10.50744,280,759,363, 23,2014-01-18 01:46:59.527883,579,370,903, 81,2014-01-14 13:57:03.481611,600,472,496, 90,2014-01-17 07:54:11.391051,905,904,719, 30,2014-01-17 14:25:38.977393,225,755,171, 30,2014-01-20 12:24:04.199257,690,303,283, 23,2014-01-16 21:27:52.807077,601,73,488, 76,2014-01-19 07:28:00.112603,951,31,426, 81,2014-01-18 14:16:37.385243,150,529,553, 90,2014-01-18 22:59:50.969153,610,691,482, 23,2014-01-14 06:55:29.062048,591,496,52, 9,2014-01-17 01:21:37.559595,249,894,550, 9,2014-01-15 16:53:11.681763,24,765,326, 80,2014-01-17 02:19:37.347768,120,951,531, 23,2014-01-14 02:45:03.845148,965,485,428, 80,2014-01-15 06:55:37.024349,983,651,937, 23,2014-01-11 09:56:05.944034,56,683,520, 90,2014-01-17 02:47:20.433866,509,95,851, 76,2014-01-11 18:38:41.130446,797,474,272, 23,2014-01-13 05:26:20.259172,695,280,87, 81,2014-01-12 05:04:33.189889,443,80,555, 9,2014-01-17 20:10:01.940318,655,112,615, 9,2014-01-21 03:24:55.92026,48,374,123, 80,2014-01-12 16:54:13.794442,536,595,83, 9,2014-01-19 12:25:06.066172,185,699,92, 80,2014-01-11 07:29:15.277598,720,728,12, 9,2014-01-14 09:44:35.147966,385,20,418, 76,2014-01-16 18:33:11.64075,920,998,278, 9,2014-01-18 08:51:51.560874,733,514,709, 80,2014-01-12 02:57:22.18035,177,45,8, 9,2014-01-14 19:48:35.183913,35,46,689, 80,2014-01-16 00:51:02.068314,677,409,178, 90,2014-01-19 08:44:21.708234,575,756,298, 80,2014-01-18 13:45:38.665525,347,67,206, 23,2014-01-12 02:45:13.459365,118,425,762, 90,2014-01-15 17:10:00.685627,605,961,296, 81,2014-01-17 20:09:42.864782,780,461,986, 81,2014-01-18 04:13:06.266442,441,748,264, 80,2014-01-13 18:38:00.79823,1,499,56, 76,2014-01-15 04:26:20.250025,514,650,426, 81,2014-01-15 21:12:57.236015,786,876,127, 76,2014-01-17 05:33:29.860697,174,771,314, 81,2014-01-10 23:47:57.80962,941,556,727, 9,2014-01-20 18:06:08.917777,16,656,841, 30,2014-01-13 01:53:07.208466,428,818,669, 76,2014-01-14 06:33:01.655187,334,280,590, 80,2014-01-14 19:01:45.942546,67,358,555, 23,2014-01-13 15:49:13.152499,576,348,634, 23,2014-01-20 13:20:22.089053,277,376,286, 81,2014-01-15 17:40:27.072992,905,891,119, 80,2014-01-16 01:15:33.411671,548,675,656, 76,2014-01-11 12:24:55.278941,566,299,352, 23,2014-01-12 07:50:29.013729,694,168,334, 80,2014-01-15 22:40:29.331931,10,429,294, 76,2014-01-13 09:19:09.073801,636,790,295, 81,2014-01-21 00:15:35.079901,659,582,359, 30,2014-01-16 10:09:44.609529,264,535,887, 81,2014-01-16 23:03:44.221767,862,446,16, 90,2014-01-16 12:48:46.72065,353,987,974, 76,2014-01-13 13:01:36.170403,691,650,672, 90,2014-01-18 06:01:06.402536,152,145,223, 23,2014-01-20 12:37:21.904564,876,660,82, 90,2014-01-18 17:52:38.238717,488,79,446, 9,2014-01-16 08:45:28.110771,387,265,429, 23,2014-01-17 20:23:00.570089,166,380,960, 30,2014-01-20 00:37:51.698082,394,525,336, 23,2014-01-21 05:55:28.796818,573,495,790, 23,2014-01-14 10:32:43.43529,618,488,769, 81,2014-01-16 22:50:57.964524,393,469,34, 76,2014-01-13 18:33:29.595048,788,186,80, 23,2014-01-18 18:59:03.685316,471,217,709, 76,2014-01-11 14:03:55.200539,821,154,300, 30,2014-01-20 04:06:59.455745,582,884,812, 9,2014-01-18 22:47:01.495354,580,996,208, 90,2014-01-11 02:10:04.118734,97,821,995, 23,2014-01-12 00:00:06.664209,434,358,328, 9,2014-01-11 23:20:03.150542,275,418,665, 76,2014-01-15 01:11:50.061279,384,914,97, 23,2014-01-14 19:49:19.816709,381,865,443, 81,2014-01-11 06:40:25.239595,94,398,67, 23,2014-01-19 22:52:17.134272,25,20,862, 90,2014-01-20 01:04:53.22838,523,289,513, 90,2014-01-11 23:05:20.518955,651,498,849, 23,2014-01-11 00:42:46.148,359,325,910, 30,2014-01-14 17:45:22.560311,389,912,928, 30,2014-01-14 12:24:29.592756,729,569,30, 81,2014-01-21 04:58:21.227901,185,346,656, 80,2014-01-20 07:55:07.169841,574,68,951, 81,2014-01-20 15:28:13.814524,14,304,494, 76,2014-01-16 11:47:07.948552,139,835,784, 9,2014-01-12 14:56:43.340244,366,467,626, 23,2014-01-17 15:29:20.21706,135,108,144, 23,2014-01-15 18:24:29.853115,659,92,222, 90,2014-01-20 12:49:21.57896,343,52,590, 90,2014-01-12 18:14:48.32825,607,673,801, 23,2014-01-12 08:47:30.423623,262,886,742, 23,2014-01-19 07:27:13.277043,161,152,158, 80,2014-01-12 18:10:17.125068,601,977,162, 76,2014-01-15 23:20:13.858913,251,1,451, 30,2014-01-15 00:18:11.241985,382,766,272, 23,2014-01-15 16:43:46.720116,451,997,407, 80,2014-01-13 12:19:17.544228,254,408,914, 76,2014-01-15 18:22:06.442524,448,190,612, 80,2014-01-14 14:50:46.176279,380,456,948, 30,2014-01-11 05:06:19.039582,191,122,43, 81,2014-01-16 00:32:10.561258,54,769,195, 76,2014-01-15 18:50:52.840489,921,586,732, 23,2014-01-12 08:26:22.190124,375,672,349, 9,2014-01-20 05:44:00.622538,912,287,543, 90,2014-01-19 18:40:12.657617,497,728,158, 90,2014-01-12 19:06:47.430138,161,857,633, 76,2014-01-18 22:36:17.757228,439,385,9, 80,2014-01-18 13:45:05.885997,471,905,521, 90,2014-01-13 22:12:07.949093,903,163,887, 90,2014-01-19 03:19:03.905228,341,818,769, 80,2014-01-12 01:30:28.446728,383,527,718, 76,2014-01-17 14:36:37.541849,75,985,409, 81,2014-01-19 02:17:25.133549,218,242,285, 30,2014-01-11 03:25:35.616569,317,752,122, 81,2014-01-17 00:04:51.356373,262,208,222, 9,2014-01-14 08:04:33.0821,794,763,860, 9,2014-01-12 22:22:18.957231,372,507,502, 81,2014-01-13 09:34:11.573852,111,707,272, 80,2014-01-19 06:29:02.935634,422,291,302, 76,2014-01-12 05:11:40.536193,896,52,499, 80,2014-01-15 07:48:59.902102,252,360,557, 90,2014-01-20 19:16:33.359257,405,668,507, 30,2014-01-20 16:38:53.813654,713,358,764, 81,2014-01-17 05:59:17.02717,761,57,175, 90,2014-01-15 12:36:47.21817,483,139,235, 23,2014-01-14 10:57:05.05564,853,364,614, 23,2014-01-11 16:43:03.747284,185,605,745, 81,2014-01-18 04:56:04.762397,540,581,45, 23,2014-01-19 09:19:11.498164,306,897,690, 90,2014-01-15 11:33:49.923564,362,361,470, 23,2014-01-18 14:02:23.802399,539,637,3, 76,2014-01-14 03:51:22.059841,987,975,540, 76,2014-01-20 10:24:42.764471,665,285,496, 23,2014-01-20 02:28:45.992522,998,729,886, 23,2014-01-13 03:35:22.682378,460,978,681, 23,2014-01-18 23:04:55.422089,892,921,737, 23,2014-01-11 15:35:33.42266,765,644,459, 23,2014-01-10 20:11:40.439606,622,140,452, 30,2014-01-16 06:50:01.308085,448,373,991, 81,2014-01-14 17:47:41.371753,265,848,322, 30,2014-01-19 03:30:44.345253,642,661,250, 90,2014-01-17 12:20:29.754813,122,84,567, 30,2014-01-11 02:24:18.913601,427,986,210, 80,2014-01-16 23:48:09.478802,49,575,936, 90,2014-01-17 19:46:05.371801,122,389,423, 23,2014-01-17 06:29:10.270393,631,452,362, 9,2014-01-20 11:52:42.560902,190,414,93, 30,2014-01-19 22:08:24.329032,201,231,247, 30,2014-01-19 20:03:21.844245,784,800,708, 9,2014-01-18 12:21:45.496536,728,9,530, 76,2014-01-10 21:20:04.865224,110,313,766, 81,2014-01-13 21:52:21.746347,85,337,882, 80,2014-01-18 01:38:18.855794,927,812,988, 30,2014-01-20 17:58:58.678878,862,367,505, 80,2014-01-20 07:51:38.773516,505,651,349, 76,2014-01-12 08:15:06.073963,25,234,89, 76,2014-01-13 22:56:03.734518,686,329,801, 9,2014-01-21 04:34:42.5208,992,28,831, 90,2014-01-19 17:11:10.836779,135,742,338, 23,2014-01-12 02:15:15.2331,648,925,173, 30,2014-01-15 10:08:32.444783,59,319,251, 30,2014-01-17 01:13:34.639177,809,338,166, 9,2014-01-15 10:06:37.29294,251,502,246, 90,2014-01-14 14:33:15.209254,356,730,45, 76,2014-01-15 21:42:20.6317,471,60,322, 9,2014-01-17 17:41:59.975318,797,45,701, 9,2014-01-12 07:38:10.631343,612,906,685, 22,2014-01-16 17:17:54.054779,546,241,737, 59,2014-01-17 17:53:40.415343,439,346,870, 72,2014-01-17 18:28:11.939428,704,560,778, 72,2014-01-20 15:05:35.426531,638,406,49, 59,2014-01-15 15:24:24.760596,175,400,432, 59,2014-01-14 00:48:41.69466,66,601,270, 22,2014-01-20 21:29:54.340552,45,302,512, 72,2014-01-11 09:12:34.239398,669,520,992, 72,2014-01-21 00:34:47.06646,800,758,685, 22,2014-01-16 21:59:04.610946,419,611,428, 59,2014-01-21 01:05:16.8003,112,332,792, 22,2014-01-19 16:43:11.395493,392,909,421, 22,2014-01-15 12:02:26.455191,631,124,242, 22,2014-01-18 07:27:02.297255,209,239,869, 22,2014-01-19 18:03:16.260717,921,832,567, 59,2014-01-18 13:54:48.201537,879,543,637, 59,2014-01-15 03:05:21.153049,676,538,419, 72,2014-01-19 06:02:14.940015,617,294,373, 72,2014-01-17 15:46:26.975053,905,895,433, 72,2014-01-16 15:20:27.227431,754,919,531, 59,2014-01-11 22:58:18.674532,74,94,155, 22,2014-01-17 14:21:09.495853,849,267,349, 59,2014-01-15 02:31:38.06421,107,643,654, 22,2014-01-13 05:13:33.907632,935,360,17, 72,2014-01-11 18:29:41.940636,158,350,110, 59,2014-01-10 21:45:12.703388,244,446,725, 22,2014-01-17 19:20:11.200572,493,498,523, 72,2014-01-15 13:02:57.149891,965,795,319, 72,2014-01-15 23:27:33.335088,219,165,22, 59,2014-01-14 07:02:11.176308,621,330,115, 59,2014-01-17 00:41:07.781233,456,905,980, 72,2014-01-11 10:45:27.389867,201,587,195, 59,2014-01-21 04:55:51.591651,151,100,632, 59,2014-01-13 13:09:19.72108,59,893,626, 59,2014-01-21 05:51:02.816818,23,364,100, 22,2014-01-15 14:20:16.352247,366,989,833, 72,2014-01-16 17:58:01.415741,619,551,977, 22,2014-01-20 21:20:57.15737,668,681,692, 59,2014-01-16 03:32:50.591645,701,167,424, 72,2014-01-16 12:32:48.482201,520,950,499, 59,2014-01-16 13:20:01.768316,953,346,607, 59,2014-01-15 22:38:07.391945,731,340,92, 72,2014-01-14 23:15:59.877695,146,537,477, 22,2014-01-21 05:22:28.223506,901,859,757, 59,2014-01-13 00:05:09.689201,614,999,301, 59,2014-01-13 11:19:16.138411,184,397,523, 72,2014-01-18 13:17:16.425043,685,646,246, 22,2014-01-17 07:10:30.842669,340,187,453, 59,2014-01-11 11:21:31.078426,708,331,49, 22,2014-01-14 23:03:43.400096,924,504,171, 59,2014-01-12 16:30:58.0701,852,143,728, 72,2014-01-12 14:19:49.753377,679,564,640, 22,2014-01-11 07:24:52.895949,851,469,662, 22,2014-01-16 23:02:36.13431,788,861,867, 22,2014-01-14 23:33:23.661009,693,180,281, 72,2014-01-12 05:54:34.837005,498,133,701, 72,2014-01-17 00:47:48.837698,886,724,43, 22,2014-01-11 12:53:34.862,472,867,315, 72,2014-01-16 22:57:31.986896,349,505,813, 22,2014-01-11 18:15:22.172785,453,495,598, 59,2014-01-14 23:55:46.038308,629,581,399, 22,2014-01-12 17:38:39.768548,48,998,324, 72,2014-01-12 09:00:49.563072,517,669,787, 72,2014-01-14 22:51:37.629959,490,103,856, 59,2014-01-15 10:47:59.489628,300,836,597, 22,2014-01-12 08:51:52.37989,805,307,302, 59,2014-01-19 17:11:53.982206,389,965,745, 72,2014-01-10 22:46:00.90537,495,920,19, 72,2014-01-12 00:12:49.537259,63,292,695, 22,2014-01-14 14:44:44.573851,499,666,57, 22,2014-01-16 15:18:49.387571,148,494,493, 72,2014-01-17 17:32:51.305575,374,976,493, 22,2014-01-19 17:22:51.965797,278,896,571, 22,2014-01-20 18:34:49.265266,847,875,889, 72,2014-01-17 16:55:19.529081,287,151,284, 72,2014-01-11 11:28:01.655417,354,561,540, 72,2014-01-12 23:54:05.403677,430,529,592, 22,2014-01-15 00:12:35.954124,874,579,313, 72,2014-01-17 22:38:32.498085,472,582,162, 59,2014-01-13 15:15:36.482522,322,151,886, 72,2014-01-19 03:16:19.35422,413,226,215, 72,2014-01-19 19:09:30.568185,87,21,157, 72,2014-01-15 09:35:26.235899,852,294,642, 22,2014-01-19 14:41:12.250169,961,532,952, 22,2014-01-15 12:12:06.702496,704,314,212, 72,2014-01-19 13:08:49.896908,786,385,722, 59,2014-01-21 00:35:47.087174,980,705,236, 72,2014-01-11 06:59:55.540194,44,454,339, 59,2014-01-20 06:02:24.758907,578,514,979, 59,2014-01-16 17:33:19.074489,906,4,682, 59,2014-01-12 05:15:17.71298,778,704,696, 72,2014-01-13 23:58:10.797216,220,562,95, 22,2014-01-18 15:11:58.843036,432,490,125, 59,2014-01-13 18:16:07.276052,650,80,465, 59,2014-01-18 02:49:48.427176,291,860,977, 59,2014-01-12 19:59:58.332666,534,938,99, 59,2014-01-15 07:07:59.655942,366,688,665, 59,2014-01-16 14:01:42.409382,339,337,94, 72,2014-01-12 22:45:59.238034,805,654,310, 72,2014-01-16 11:20:49.193201,71,176,725, 22,2014-01-20 08:46:26.983233,965,573,60, 22,2014-01-18 18:04:48.625605,256,638,574, 22,2014-01-12 22:53:40.498776,215,523,547, 59,2014-01-18 20:09:18.949449,26,71,373, 59,2014-01-18 06:39:37.890871,232,390,246, 59,2014-01-19 19:49:00.027857,292,94,442, 59,2014-01-19 11:37:20.604866,521,702,103, 22,2014-01-20 10:33:43.294548,881,492,359, 22,2014-01-13 14:01:35.981981,32,774,19, 59,2014-01-16 04:15:53.102951,894,524,208, 72,2014-01-12 19:49:19.77707,809,692,211, 59,2014-01-11 11:17:55.336201,947,227,757, 72,2014-01-14 17:25:23.671136,22,409,549, 22,2014-01-17 09:24:46.012969,522,311,628, 72,2014-01-20 05:59:07.58679,57,359,631, 72,2014-01-19 09:37:30.373631,67,805,256, 59,2014-01-15 16:33:35.910296,387,270,961, 22,2014-01-20 00:34:54.673964,505,185,40, 59,2014-01-19 20:37:25.913826,115,984,286, 72,2014-01-14 16:36:00.669203,54,457,696, 22,2014-01-15 12:08:13.748452,153,677,349, 72,2014-01-21 05:52:43.626805,151,231,16, 59,2014-01-17 20:34:11.466839,590,921,362, 22,2014-01-12 21:20:12.591489,173,154,834, 72,2014-01-13 18:08:50.903276,41,638,109, 59,2014-01-14 17:23:59.894014,715,463,922, 22,2014-01-14 21:20:10.924153,689,920,281, 22,2014-01-18 05:16:50.559218,48,649,826, 22,2014-01-20 11:25:42.303816,79,923,733, 59,2014-01-17 00:06:10.162188,478,82,449, 72,2014-01-13 10:37:39.752837,209,572,397, 22,2014-01-19 14:12:09.287049,545,868,734, 22,2014-01-14 12:10:58.787794,570,518,558, 22,2014-01-15 13:31:20.251614,77,149,425, 22,2014-01-17 04:21:28.236498,138,986,886, 22,2014-01-11 12:50:36.678664,295,146,339, 22,2014-01-14 03:20:20.27989,435,102,392, 22,2014-01-15 09:58:48.841364,242,913,974, 72,2014-01-21 03:24:19.97363,809,88,508, 22,2014-01-16 21:21:56.261871,802,765,654, 22,2014-01-20 18:14:41.944315,441,126,296, 59,2014-01-12 17:13:39.7507,687,301,401, 72,2014-01-17 12:39:51.598491,265,212,616, 72,2014-01-14 05:40:05.61545,641,499,700, 22,2014-01-19 06:38:25.764088,853,675,51, 59,2014-01-16 12:38:59.18528,94,504,106, 22,2014-01-12 09:17:35.989082,363,256,638, 59,2014-01-13 17:12:01.674383,117,559,74, 72,2014-01-15 07:13:53.859244,513,873,611, 22,2014-01-10 23:55:01.902907,589,124,185, 22,2014-01-17 13:48:02.343586,79,358,704, 22,2014-01-19 23:22:07.608115,677,536,768, 72,2014-01-10 23:47:45.530132,682,726,624, 72,2014-01-14 04:22:13.810424,510,449,660, 72,2014-01-11 14:42:20.199603,549,781,936, 72,2014-01-13 21:56:36.433408,653,771,99, 72,2014-01-18 01:46:13.704857,346,659,118, 72,2014-01-15 16:02:31.124175,881,330,971, 59,2014-01-10 21:13:26.992625,609,347,537, 59,2014-01-17 07:11:56.008673,769,62,703, 72,2014-01-11 10:08:41.286364,697,400,338, 22,2014-01-13 11:51:06.745463,660,481,700, 59,2014-01-15 15:24:05.295721,581,735,456, 22,2014-01-15 02:19:40.074576,305,834,640, 72,2014-01-18 05:22:26.997495,936,895,514, 22,2014-01-11 13:45:33.532639,49,902,28, 72,2014-01-15 19:10:16.75324,64,501,363, 22,2014-01-11 02:42:47.277384,265,547,423, 22,2014-01-16 03:44:22.374002,639,407,467, 59,2014-01-15 16:34:36.72687,186,109,53, 72,2014-01-17 04:04:43.539255,666,375,431, 59,2014-01-15 15:59:04.318735,866,851,862, 59,2014-01-17 13:48:16.47799,921,859,235, 59,2014-01-13 10:44:35.137746,529,25,161, 22,2014-01-19 01:39:09.934186,22,752,691, 22,2014-01-15 14:26:42.242077,699,21,120, 59,2014-01-19 03:23:34.323027,231,465,780, 59,2014-01-20 14:56:45.923687,76,313,137, 59,2014-01-18 11:38:43.91646,784,168,348, 59,2014-01-13 04:37:28.18269,129,605,855, 22,2014-01-20 18:51:47.826595,4,596,149, 72,2014-01-14 19:26:46.260047,246,953,283, 22,2014-01-11 21:59:35.790804,72,160,204, 59,2014-01-20 22:39:33.356727,260,771,844, 72,2014-01-18 03:49:00.07089,341,149,378, 59,2014-01-12 16:41:55.990406,968,366,510, 72,2014-01-13 14:36:09.790135,321,565,785, 72,2014-01-14 23:35:13.775747,39,580,678, 59,2014-01-17 12:44:27.114582,515,485,299, 72,2014-01-13 15:49:36.78318,560,540,130, 72,2014-01-11 00:47:09.78442,122,946,467, 22,2014-01-18 02:53:08.401365,530,463,153, 22,2014-01-16 07:40:43.528643,537,478,821, 72,2014-01-15 20:11:15.080141,27,259,74, 59,2014-01-11 23:12:48.47594,240,178,437, 72,2014-01-13 07:03:10.526138,33,157,465, 22,2014-01-16 13:56:48.612779,661,200,868, 22,2014-01-16 22:23:05.229179,801,400,99, 22,2014-01-13 13:45:57.803522,646,473,327, 59,2014-01-11 11:41:10.987201,401,522,25, 59,2014-01-11 08:57:41.956469,153,343,789, 72,2014-01-19 21:50:41.343197,132,703,321, 72,2014-01-16 07:40:15.305936,988,365,607, 22,2014-01-18 02:45:58.434458,162,973,291, 72,2014-01-12 02:35:16.480943,202,784,443, 22,2014-01-14 03:19:25.240122,811,71,40, 22,2014-01-12 11:12:40.676535,372,978,155, 59,2014-01-20 09:58:50.804389,467,993,652, 59,2014-01-13 12:16:11.163809,447,131,666, 59,2014-01-20 02:51:24.592996,210,829,16, 22,2014-01-12 08:36:18.987078,442,711,27, 59,2014-01-13 01:07:58.990824,663,395,816, 59,2014-01-13 16:18:10.853042,645,161,347, 59,2014-01-13 10:35:54.777882,550,327,559, 72,2014-01-12 17:47:32.347551,749,407,962, 72,2014-01-21 00:07:10.923931,704,521,623, 59,2014-01-15 07:17:50.768289,989,532,738, 59,2014-01-15 12:23:42.138105,347,772,887, 59,2014-01-14 17:42:24.699679,254,14,265, 22,2014-01-11 14:02:17.88329,356,397,346, 59,2014-01-18 08:13:18.921285,180,458,467, 59,2014-01-14 22:29:34.484099,989,460,462, 59,2014-01-18 20:55:26.284654,640,890,563, 59,2014-01-13 09:54:02.449928,533,978,649, 22,2014-01-19 22:40:49.564658,846,545,692, 72,2014-01-20 00:08:14.760594,10,932,916, 72,2014-01-15 20:57:12.976484,734,197,962, 59,2014-01-15 06:37:38.177439,583,157,142, 72,2014-01-15 16:31:19.989773,212,425,659, 72,2014-01-18 14:43:10.780006,187,191,469, 22,2014-01-15 22:18:49.16464,367,129,932, 72,2014-01-16 05:29:01.946242,756,739,255, 22,2014-01-17 06:33:52.123203,17,889,987, 22,2014-01-10 23:59:04.470575,21,595,894, 59,2014-01-13 02:15:00.380699,192,51,75, 59,2014-01-18 13:09:08.604565,166,166,12, 59,2014-01-14 07:18:29.711117,377,930,598, 59,2014-01-14 17:27:41.057234,152,951,847, 72,2014-01-17 17:07:59.408953,95,677,907, 72,2014-01-16 23:34:40.874927,659,561,582, 72,2014-01-13 14:19:05.65023,246,670,398, 22,2014-01-19 05:44:18.396031,609,292,142, 22,2014-01-19 04:42:39.86575,557,461,122, 72,2014-01-16 10:37:16.503691,318,755,840, 72,2014-01-11 10:20:13.173914,61,948,776, 59,2014-01-21 02:30:12.213721,140,830,268, 22,2014-01-16 04:44:27.427623,300,160,121, 72,2014-01-15 21:38:03.942202,196,92,527, 72,2014-01-15 08:53:54.351826,186,790,65, 59,2014-01-20 02:26:52.127721,648,318,581, 72,2014-01-16 15:40:21.825492,510,659,700, 72,2014-01-12 11:07:13.27311,977,698,560, 72,2014-01-13 18:56:26.611819,677,735,855, 59,2014-01-14 06:35:48.110145,885,279,527, 59,2014-01-15 01:01:15.723039,769,764,189, 22,2014-01-12 11:37:16.176478,288,811,975, 72,2014-01-13 00:44:02.870739,675,404,723, 59,2014-01-20 01:58:28.699523,233,71,324, 22,2014-01-16 22:14:54.354335,287,955,170, 72,2014-01-17 21:15:22.86093,22,565,261, 22,2014-01-17 10:41:39.479948,482,444,486, 59,2014-01-11 14:33:43.518975,945,960,384, 22,2014-01-12 20:44:24.807172,268,428,548, 72,2014-01-13 11:15:31.603151,856,585,732, 59,2014-01-11 18:32:47.989969,799,566,106, 72,2014-01-15 02:59:25.18829,145,79,466, 29,2014-01-21 04:24:40.207716,943,165,456, 29,2014-01-15 05:51:17.701086,943,857,389, 69,2014-01-19 00:27:06.245524,650,708,450, 29,2014-01-17 15:32:39.616669,344,158,814, 29,2014-01-10 23:25:58.576013,421,368,91, 78,2014-01-11 08:46:11.896173,722,135,579, 69,2014-01-15 15:16:58.012701,231,134,960, 29,2014-01-19 08:08:38.442182,901,813,742, 11,2014-01-16 23:23:28.399865,327,726,179, 12,2014-01-16 05:37:11.186614,195,516,639, 78,2014-01-19 04:38:50.655903,235,453,275, 12,2014-01-11 22:07:55.827486,48,871,333, 29,2014-01-10 21:15:15.129234,659,50,328, 69,2014-01-13 07:32:45.007728,656,457,412, 69,2014-01-21 04:34:47.955207,214,174,498, 29,2014-01-16 16:55:36.954726,144,349,677, 69,2014-01-14 22:39:58.28084,940,693,767, 11,2014-01-13 17:31:14.567446,827,654,284, 78,2014-01-20 03:31:25.06529,48,489,889, 29,2014-01-19 03:41:14.003878,854,67,858, 29,2014-01-15 09:08:30.743924,11,403,138, 78,2014-01-11 22:15:27.936028,724,268,796, 69,2014-01-17 23:39:42.70382,803,207,992, 12,2014-01-11 01:23:25.098259,844,836,920, 69,2014-01-18 23:30:50.796958,521,386,144, 78,2014-01-14 04:21:22.183768,949,767,69, 69,2014-01-11 19:57:08.617653,87,471,824, 78,2014-01-21 00:15:15.604549,565,857,290, 78,2014-01-16 19:36:53.78692,529,437,50, 69,2014-01-12 18:29:56.607623,442,342,393, 69,2014-01-14 21:14:40.792839,155,504,968, 78,2014-01-16 18:01:33.995054,782,165,944, 78,2014-01-17 04:21:14.308708,700,914,194, 29,2014-01-12 15:41:47.038364,414,304,764, 29,2014-01-13 03:34:13.611724,808,671,830, 78,2014-01-17 07:47:12.884722,305,630,992, 69,2014-01-13 04:27:58.934536,639,378,401, 12,2014-01-17 22:51:11.624844,175,197,445, 69,2014-01-15 09:55:51.326904,615,536,849, 11,2014-01-19 07:51:27.334401,688,840,773, 11,2014-01-12 22:28:22.811457,725,716,898, 69,2014-01-13 08:34:41.982806,166,474,894, 69,2014-01-20 09:59:23.162306,915,841,269, 11,2014-01-12 23:43:37.940692,786,787,549, 78,2014-01-15 20:07:26.990534,721,551,417, 69,2014-01-20 08:34:11.117515,10,249,470, 12,2014-01-18 20:39:14.895836,954,689,244, 78,2014-01-19 22:47:25.271374,791,261,925, 12,2014-01-12 20:05:25.684959,362,360,505, 69,2014-01-17 18:10:39.961127,341,52,360, 11,2014-01-17 20:28:39.275671,47,812,760, 69,2014-01-17 09:13:56.429303,68,734,193, 11,2014-01-18 20:26:07.897155,708,793,96, 69,2014-01-14 14:08:21.979491,837,664,323, 69,2014-01-17 14:37:21.527562,878,587,401, 78,2014-01-16 13:56:58.694532,17,107,55, 11,2014-01-17 22:29:44.163259,906,121,817, 12,2014-01-18 14:34:30.145216,234,731,426, 11,2014-01-16 08:12:14.299081,635,957,672, 69,2014-01-13 12:06:37.950178,881,976,799, 12,2014-01-20 13:04:26.752838,51,826,688, 12,2014-01-20 09:26:55.09192,496,427,63, 69,2014-01-19 10:08:11.945233,719,254,896, 11,2014-01-16 11:25:41.061547,796,965,158, 78,2014-01-11 19:08:42.130283,158,961,39, 11,2014-01-11 07:42:25.557376,252,72,709, 29,2014-01-12 13:12:53.946687,171,477,648, 11,2014-01-14 03:36:41.065239,479,273,579, 69,2014-01-18 10:33:37.182219,37,497,109, 12,2014-01-17 03:08:45.273591,533,993,475, 12,2014-01-12 05:28:08.39964,250,254,807, 29,2014-01-20 13:01:59.993676,751,916,708, 29,2014-01-19 15:43:27.256817,849,2,624, 78,2014-01-11 09:27:31.561947,791,345,625, 12,2014-01-12 06:45:37.934787,944,239,898, 69,2014-01-14 05:50:54.247351,732,607,201, 11,2014-01-20 22:01:42.679879,567,871,553, 11,2014-01-20 07:24:52.830624,115,6,752, 78,2014-01-12 22:38:19.519144,698,442,270, 12,2014-01-12 12:07:08.36484,9,236,270, 12,2014-01-16 19:35:32.791751,503,877,548, 78,2014-01-19 23:06:58.794815,807,731,905, 29,2014-01-19 01:21:04.794142,856,775,389, 12,2014-01-14 10:01:40.688906,358,68,976, 78,2014-01-13 07:15:20.774305,648,723,629, 78,2014-01-15 09:58:26.322124,802,257,171, 69,2014-01-20 03:58:39.383439,227,77,515, 69,2014-01-20 09:45:04.937565,52,238,24, 11,2014-01-12 18:32:56.467339,420,685,112, 11,2014-01-15 06:10:53.68252,918,109,843, 69,2014-01-12 15:51:42.887743,455,511,267, 11,2014-01-12 01:37:23.220596,918,527,686, 69,2014-01-14 09:37:48.774441,306,537,957, 78,2014-01-21 05:59:54.833395,815,269,466, 12,2014-01-17 17:03:04.282143,806,423,829, 12,2014-01-15 08:46:30.905143,341,607,649, 29,2014-01-11 07:42:20.390771,557,354,419, 12,2014-01-19 10:15:58.22883,382,842,595, 78,2014-01-18 16:23:11.970382,796,700,857, 29,2014-01-18 22:15:57.57299,894,293,746, 69,2014-01-15 07:24:43.50284,831,873,297, 29,2014-01-20 01:51:20.370022,936,192,639, 29,2014-01-18 05:17:57.566666,833,435,188, 12,2014-01-13 17:08:10.759656,494,807,809, 69,2014-01-20 15:18:51.932387,636,701,808, 12,2014-01-19 16:03:35.501453,106,136,952, 69,2014-01-17 02:59:05.007007,206,346,732, 11,2014-01-20 07:20:34.612267,718,245,786, 69,2014-01-18 17:28:28.332077,862,365,806, 11,2014-01-19 05:37:24.526151,207,348,973, 12,2014-01-11 13:27:42.977106,42,905,923, 78,2014-01-14 07:04:01.123828,281,611,351, 11,2014-01-17 22:44:23.320966,295,683,464, 12,2014-01-19 18:48:47.771668,305,753,563, 69,2014-01-17 21:05:41.813153,439,103,326, 11,2014-01-20 09:59:44.095272,879,338,662, 12,2014-01-13 22:47:14.093791,996,123,841, 29,2014-01-16 19:04:21.196592,675,907,222, 12,2014-01-19 13:44:49.032836,629,529,735, 29,2014-01-15 21:20:10.56113,312,601,143, 11,2014-01-21 05:15:14.879531,459,545,80, 69,2014-01-10 23:36:31.920998,691,949,62, 78,2014-01-17 02:57:33.781726,521,19,505, 78,2014-01-14 08:53:03.653972,495,598,445, 11,2014-01-10 23:36:26.754393,298,593,754, 11,2014-01-13 14:00:38.064288,144,322,154, 29,2014-01-18 21:39:34.559115,650,140,224, 29,2014-01-11 11:18:47.145164,40,317,694, 78,2014-01-11 18:16:36.293118,899,92,123, 78,2014-01-16 08:02:46.529497,467,377,200, 12,2014-01-19 13:34:44.718153,148,985,583, 12,2014-01-16 05:41:19.795957,196,554,944, 69,2014-01-15 03:54:06.899937,957,704,860, 69,2014-01-16 12:52:42.284819,74,447,485, 78,2014-01-19 02:49:30.555614,599,192,351, 78,2014-01-14 13:12:58.832324,961,661,510, 12,2014-01-14 22:56:17.786691,979,128,879, 29,2014-01-14 23:48:35.56304,21,266,0, 12,2014-01-13 14:33:33.444591,198,773,353, 29,2014-01-12 10:24:46.118768,947,534,573, 12,2014-01-12 23:26:00.089191,81,395,359, 78,2014-01-14 08:01:16.421697,217,873,505, 78,2014-01-15 21:28:47.243016,753,317,688, 69,2014-01-20 02:10:23.410576,222,754,461, 29,2014-01-12 20:50:04.193365,86,680,130, 69,2014-01-12 12:34:29.056168,652,31,891, 69,2014-01-19 06:10:07.505848,788,410,937, 29,2014-01-15 23:37:18.287155,164,794,706, 29,2014-01-18 11:38:50.25276,857,813,681, 78,2014-01-17 13:54:56.539103,640,131,637, 78,2014-01-21 00:57:28.848704,793,874,599, 11,2014-01-18 10:54:05.132291,622,552,995, 12,2014-01-17 17:31:28.460101,859,534,168, 29,2014-01-16 21:55:02.63043,311,769,266, 78,2014-01-11 13:47:08.786681,576,874,827, 69,2014-01-17 21:07:55.214495,778,156,599, 11,2014-01-19 15:55:40.694718,26,145,677, 69,2014-01-19 15:26:43.345796,707,476,61, 78,2014-01-18 12:26:42.359658,808,799,422, 78,2014-01-20 14:12:16.988254,985,744,979, 11,2014-01-14 17:29:29.875711,288,445,660, 29,2014-01-16 20:01:27.077811,546,232,808, 29,2014-01-15 13:53:36.784211,622,325,562, 69,2014-01-19 01:23:36.775649,926,764,137, 78,2014-01-12 02:54:09.362631,160,549,755, 11,2014-01-13 10:43:07.340245,778,431,388, 78,2014-01-12 08:36:35.607973,504,823,699, 78,2014-01-16 05:50:27.149322,898,537,397, 29,2014-01-17 14:31:42.903284,184,538,109, 69,2014-01-15 03:10:09.052564,564,709,849, 12,2014-01-17 20:15:13.268509,758,58,773, 29,2014-01-19 17:57:42.992475,379,499,451, 12,2014-01-18 15:11:25.47426,517,340,716, 12,2014-01-12 11:44:00.511524,878,342,570, 12,2014-01-18 14:08:06.403051,377,481,696, 29,2014-01-20 16:01:29.668045,835,936,490, 29,2014-01-14 04:18:29.567693,33,292,704, 11,2014-01-16 14:18:13.909319,824,131,659, 12,2014-01-15 09:38:47.9552,25,924,696, 78,2014-01-11 09:57:19.820452,348,883,633, 69,2014-01-12 22:13:10.448422,231,306,84, 11,2014-01-15 04:36:16.803904,847,859,763, 29,2014-01-19 00:51:24.953162,512,731,442, 69,2014-01-19 19:44:38.908523,498,290,884, 69,2014-01-10 20:31:19.434753,748,858,50, 29,2014-01-19 18:38:33.739844,19,47,847, 11,2014-01-16 10:52:34.123018,860,804,438, 11,2014-01-19 16:27:00.129471,553,262,449, 29,2014-01-18 04:05:17.085641,822,344,218, 69,2014-01-13 17:19:16.482676,399,848,764, 11,2014-01-19 00:39:17.117725,343,627,254, 12,2014-01-11 15:34:46.961352,53,489,945, 29,2014-01-19 17:20:43.560489,141,997,263, 12,2014-01-13 08:32:53.902356,760,822,696, 11,2014-01-19 20:58:23.737,335,115,348, 11,2014-01-21 00:14:52.923119,807,327,271, 12,2014-01-15 23:16:01.2426,650,100,979, 78,2014-01-10 23:34:59.344973,832,844,280, 11,2014-01-16 00:05:20.07286,640,724,412, 11,2014-01-12 07:47:44.145884,650,843,679, 69,2014-01-15 06:45:08.397537,914,337,298, 78,2014-01-12 14:20:33.341369,97,432,339, 12,2014-01-21 05:45:27.138359,883,81,467, 29,2014-01-12 15:56:33.872217,509,52,686, 78,2014-01-14 06:04:33.852894,599,976,92, 12,2014-01-18 13:53:33.54183,939,771,518, 78,2014-01-12 01:58:03.540261,375,781,377, 12,2014-01-17 14:23:03.420585,837,86,937, 29,2014-01-13 22:11:47.451148,737,419,539, 69,2014-01-16 15:36:51.495461,165,907,695, 78,2014-01-18 04:20:23.241039,65,572,183, 29,2014-01-16 00:24:57.89957,534,700,483, 12,2014-01-21 00:13:08.299784,785,144,401, 78,2014-01-15 23:11:48.194201,347,607,305, 78,2014-01-14 14:09:36.808093,440,783,638, 11,2014-01-21 00:44:27.734538,738,114,355, 12,2014-01-14 11:50:21.934045,98,848,567, 29,2014-01-20 05:02:10.931111,168,256,661, 12,2014-01-19 11:11:27.864008,801,33,560, 11,2014-01-11 09:55:39.020104,158,606,564, 12,2014-01-12 16:21:27.413787,485,95,998, 69,2014-01-17 05:50:44.982153,364,657,850, 69,2014-01-12 05:30:25.981455,939,609,214, 78,2014-01-11 03:42:10.974276,648,676,408, 12,2014-01-19 18:23:38.884508,92,385,215, 78,2014-01-10 20:28:49.718875,20,904,449, 78,2014-01-10 21:57:03.897395,609,768,873, 69,2014-01-14 11:39:40.127108,106,831,85, 29,2014-01-11 00:03:49.063848,464,53,217, 12,2014-01-16 02:02:23.970254,211,192,69, 29,2014-01-15 23:27:24.272993,495,88,104, 69,2014-01-15 10:48:57.461804,427,189,619, 69,2014-01-17 20:22:57.311624,940,208,918, 12,2014-01-15 23:12:51.411353,914,350,791, 69,2014-01-17 06:45:31.334022,647,796,751, 12,2014-01-10 20:27:31.164517,795,442,354, 11,2014-01-13 07:06:24.953182,547,885,579, 12,2014-01-18 12:43:34.874283,147,12,665, 29,2014-01-17 14:50:34.585103,473,577,945, 78,2014-01-16 09:18:12.404331,891,23,994, 29,2014-01-13 22:20:26.369745,288,642,565, 69,2014-01-14 13:10:57.82656,604,955,467, 69,2014-01-11 03:43:10.303901,645,420,738, 11,2014-01-13 16:33:34.669529,575,951,636, 12,2014-01-19 16:22:46.02076,473,939,743, 11,2014-01-14 21:52:47.111994,687,993,987, 12,2014-01-13 11:18:02.404067,172,826,751, 29,2014-01-12 22:13:07.955225,298,411,696, 78,2014-01-13 20:54:58.043105,291,724,696, 12,2014-01-11 16:29:30.268494,86,171,875, 69,2014-01-13 12:08:46.975328,894,506,599, 78,2014-01-15 17:16:25.456893,387,415,149, 78,2014-01-18 02:20:15.250647,59,479,183, 78,2014-01-14 21:39:12.956784,143,637,955, 69,2014-01-16 00:58:36.431168,474,199,599, 29,2014-01-16 14:43:54.135156,123,146,750, 11,2014-01-14 22:08:02.675659,219,193,284, 78,2014-01-16 02:55:40.328981,675,782,14, 29,2014-01-20 06:23:34.262265,609,282,637, 11,2014-01-15 02:11:51.739927,478,373,747, 69,2014-01-10 22:58:04.299236,832,605,989, 29,2014-01-14 23:50:58.535257,986,948,945, 69,2014-01-19 17:00:49.201731,838,35,271, 78,2014-01-17 23:21:01.611279,230,46,571, 29,2014-01-20 03:03:49.94661,424,4,108, 12,2014-01-15 17:46:20.535753,284,904,309, 78,2014-01-17 23:48:32.775796,398,393,273, 69,2014-01-12 04:10:14.900212,644,291,584, 12,2014-01-13 00:29:55.410036,785,420,341, 29,2014-01-14 08:39:07.361318,172,981,758, 11,2014-01-17 17:28:27.304543,587,347,865, 12,2014-01-16 02:50:21.779781,986,200,212, 78,2014-01-18 01:50:05.187877,966,117,965, 29,2014-01-18 01:11:37.608863,673,478,948, 11,2014-01-18 23:23:56.449729,686,27,27, 78,2014-01-16 12:12:51.208638,929,769,350, 69,2014-01-11 17:04:24.720857,611,168,738, 78,2014-01-11 04:41:58.853796,184,601,976, 29,2014-01-18 14:25:59.163862,858,134,176, 12,2014-01-14 17:59:22.764381,671,734,157, 69,2014-01-12 01:11:29.12229,916,593,904, 11,2014-01-10 20:34:46.139191,991,571,720, 29,2014-01-19 15:15:48.221273,742,936,260, 12,2014-01-19 07:31:44.372936,952,569,814, 12,2014-01-14 22:13:59.096394,52,231,825, 11,2014-01-14 10:14:24.65286,966,540,462, 69,2014-01-14 16:15:38.508093,619,164,628, 78,2014-01-19 00:22:01.772053,603,695,444, 11,2014-01-19 17:10:04.981843,978,41,635, 29,2014-01-13 16:39:12.770357,891,677,70, 11,2014-01-12 20:33:53.51198,517,855,421, 78,2014-01-19 20:08:09.281078,992,937,7, 78,2014-01-17 20:30:11.305615,582,632,242, 78,2014-01-11 07:34:42.713711,341,414,319, 11,2014-01-16 13:29:10.892357,266,9,82, 11,2014-01-16 17:34:01.252645,744,824,835, 11,2014-01-16 05:21:03.249465,370,589,567, 69,2014-01-13 07:17:43.668572,251,215,453, 11,2014-01-18 01:44:16.152856,730,917,644, 69,2014-01-18 09:50:58.659501,692,748,449, 11,2014-01-16 19:56:51.029889,309,301,729, 11,2014-01-14 13:12:43.457399,601,619,63, 29,2014-01-13 06:41:20.439701,340,936,238, 78,2014-01-13 15:46:56.217767,673,891,145, 69,2014-01-11 08:24:21.066262,817,518,462, 12,2014-01-11 00:05:16.88943,28,758,90, 69,2014-01-19 07:59:47.426404,310,398,838, 69,2014-01-12 05:28:45.787538,571,0,426, 78,2014-01-11 08:47:15.743225,808,604,708, 29,2014-01-16 16:25:46.590685,918,56,976, 78,2014-01-16 03:28:08.551919,138,349,51, 69,2014-01-12 13:58:44.865934,968,95,556, 78,2014-01-16 17:00:32.729877,567,156,361, 12,2014-01-14 12:43:56.773192,188,682,988, 29,2014-01-21 01:30:29.238871,825,868,506, 69,2014-01-20 19:14:31.826271,633,825,509, 78,2014-01-18 02:58:21.426053,741,801,778, 69,2014-01-14 11:46:07.747383,317,523,971, 12,2014-01-18 13:36:33.598744,579,20,219, 11,2014-01-16 14:08:26.407895,953,32,197, 78,2014-01-17 08:25:20.51774,843,900,849, 11,2014-01-20 14:10:27.110724,841,456,124, 29,2014-01-15 04:16:35.689393,485,682,455, 11,2014-01-13 22:55:31.823774,666,635,102, 11,2014-01-21 01:45:09.824854,708,278,672, 29,2014-01-20 21:45:46.58175,848,383,925, 78,2014-01-19 20:29:33.076419,946,99,477, 11,2014-01-16 01:06:13.074319,9,545,448, 12,2014-01-12 23:03:30.250322,5,343,766, 12,2014-01-16 16:13:49.229275,624,776,541, 78,2014-01-13 04:57:11.73424,352,836,853, 29,2014-01-18 23:00:21.28021,388,687,448, 78,2014-01-20 09:26:32.686674,423,307,578, 12,2014-01-15 15:38:32.173941,64,576,487, 11,2014-01-11 08:47:17.497977,699,310,537, 12,2014-01-20 21:50:53.753355,100,38,273, 12,2014-01-15 19:43:49.06337,198,316,403, 69,2014-01-19 20:47:04.9248,286,448,463, 69,2014-01-11 21:19:39.540893,234,954,311, 12,2014-01-16 08:31:04.806596,138,313,770, 29,2014-01-15 07:12:51.515486,33,523,164, 11,2014-01-17 04:47:48.092811,643,323,149, 78,2014-01-18 02:29:49.67253,716,907,461, 11,2014-01-21 04:13:24.245363,178,429,116, 29,2014-01-20 21:31:44.866004,958,178,427, 69,2014-01-17 22:00:18.91182,587,500,893, 69,2014-01-20 17:27:56.072053,670,796,156, 69,2014-01-17 18:30:06.292476,480,881,374, 29,2014-01-11 03:46:26.659203,774,364,887, 69,2014-01-18 01:04:29.670797,301,90,978, 11,2014-01-13 02:38:32.700371,460,43,183, 78,2014-01-17 16:11:47.176943,511,182,688, 11,2014-01-17 09:14:56.78152,818,54,262, 69,2014-01-17 10:55:08.389764,296,139,570, 69,2014-01-20 19:07:19.000717,558,109,365, 78,2014-01-17 05:00:06.606375,977,827,821, 11,2014-01-17 02:40:54.971932,186,521,792, 12,2014-01-19 09:36:52.077135,197,709,129, 69,2014-01-12 00:06:19.681113,925,496,691, 11,2014-01-19 05:44:25.222253,395,628,540, 29,2014-01-14 19:50:41.30641,767,871,944, 78,2014-01-14 09:03:31.415352,530,410,552, 11,2014-01-16 22:44:46.502883,78,585,995, 69,2014-01-13 23:17:13.993503,704,550,828, 69,2014-01-19 04:42:03.589293,944,396,705, 12,2014-01-17 11:32:04.000859,568,68,396, 12,2014-01-13 15:08:07.746858,856,637,210, 11,2014-01-13 18:25:52.652663,854,216,865, 29,2014-01-16 02:19:08.925659,251,786,772, 12,2014-01-14 16:27:47.287751,768,538,802, 78,2014-01-19 06:56:57.459258,769,141,74, 69,2014-01-20 13:32:00.441146,613,101,229, 78,2014-01-21 01:15:35.380564,512,684,549, 12,2014-01-16 03:26:47.132207,960,808,373, 29,2014-01-20 11:45:24.686928,322,548,207, 69,2014-01-20 16:47:20.246567,738,704,957, 12,2014-01-12 19:27:06.044027,643,415,203, 78,2014-01-19 23:13:20.75898,855,962,1, 12,2014-01-17 05:17:26.539043,137,319,396, 78,2014-01-13 03:13:32.703229,143,422,816, 29,2014-01-16 18:17:50.429777,887,17,74, 69,2014-01-19 11:55:59.239832,667,429,720, 11,2014-01-19 23:25:19.880172,320,659,846, 11,2014-01-12 21:32:47.211297,719,270,581, 69,2014-01-15 16:51:07.629596,20,362,800, 69,2014-01-19 12:32:38.881308,991,373,156, 78,2014-01-19 06:32:53.818091,240,794,394, 29,2014-01-11 13:32:02.601528,515,484,981, 78,2014-01-17 16:09:30.958444,311,290,688, 78,2014-01-20 10:39:13.499204,505,666,45, 11,2014-01-19 23:16:27.8242,611,40,397, 78,2014-01-11 06:00:12.265273,891,483,267, 12,2014-01-13 13:42:44.914556,352,414,111, 11,2014-01-15 16:01:14.327083,457,525,6, 29,2014-01-14 09:17:26.258775,189,838,958, 11,2014-01-11 12:24:48.503849,857,334,323, 11,2014-01-11 21:33:18.327943,870,390,62, 29,2014-01-17 04:25:34.005633,445,440,663, 78,2014-01-14 10:50:41.156511,46,576,610, 12,2014-01-17 03:52:27.254022,146,76,538, 69,2014-01-21 00:53:21.293385,601,508,53, 78,2014-01-12 11:47:38.61577,44,143,495, 29,2014-01-16 11:24:27.695168,708,490,941, 29,2014-01-20 20:08:56.673948,528,305,222, 69,2014-01-17 19:14:25.747976,928,225,232, 78,2014-01-15 17:09:52.382095,124,425,131, 29,2014-01-20 06:56:16.920516,237,707,827, 78,2014-01-19 18:41:31.792003,80,299,775, 29,2014-01-14 10:23:13.141076,657,954,665, 29,2014-01-16 06:13:43.459977,229,505,939, 11,2014-01-11 15:55:04.495232,205,387,939, 29,2014-01-20 08:41:03.570852,653,678,708, 11,2014-01-14 12:09:42.69981,858,745,48, 29,2014-01-20 19:20:24.375824,298,345,923, 11,2014-01-12 00:13:50.782569,828,716,676, 11,2014-01-19 09:00:50.329406,145,981,727, 12,2014-01-19 01:53:03.257132,226,487,166, 11,2014-01-20 10:46:44.600659,107,843,95, 69,2014-01-20 02:32:52.931353,915,407,841, 11,2014-01-15 12:02:34.215575,313,324,415, 29,2014-01-19 15:25:58.099864,488,940,59, 12,2014-01-18 19:49:20.755553,848,776,211, 12,2014-01-15 22:02:46.480848,498,494,61, 12,2014-01-11 23:08:43.01442,118,440,450, 69,2014-01-13 05:50:35.082635,365,974,649, 11,2014-01-19 11:20:12.739623,996,141,705, 69,2014-01-12 15:33:31.518269,180,254,539, 29,2014-01-14 07:23:53.410578,772,3,456, 11,2014-01-15 09:45:46.745257,259,130,796, 12,2014-01-16 06:24:12.67478,975,33,694, 12,2014-01-20 15:16:20.6646,898,668,250, 11,2014-01-15 04:39:08.038642,794,219,971, 78,2014-01-17 22:11:51.29055,865,798,432, 11,2014-01-15 20:40:48.360186,546,554,349, 12,2014-01-14 18:48:04.712591,97,239,676, 29,2014-01-14 11:26:17.038945,887,968,372, 69,2014-01-20 17:50:40.742281,305,402,975, 12,2014-01-13 19:44:21.633525,446,763,636, 69,2014-01-13 00:07:48.830948,429,794,163, 69,2014-01-13 22:13:53.883776,72,45,18, 78,2014-01-19 05:58:05.093502,213,376,322, 12,2014-01-13 20:02:53.3266,370,755,177, 29,2014-01-13 00:54:57.454628,859,402,84, 11,2014-01-12 12:07:47.793311,274,900,859, 69,2014-01-13 09:23:17.702424,726,535,100, 12,2014-01-14 05:08:48.237197,334,319,805, 69,2014-01-21 01:08:38.122718,163,341,544, 11,2014-01-11 05:16:20.959555,218,770,192, 69,2014-01-13 09:55:32.837857,13,681,805, 78,2014-01-19 21:41:31.054071,308,746,275, 78,2014-01-15 21:18:55.175131,36,289,6, 69,2014-01-11 19:21:30.937721,845,338,465, 12,2014-01-17 11:30:51.809623,510,40,277, 12,2014-01-20 23:21:41.656398,686,723,901, 12,2014-01-12 22:30:13.952141,608,22,956, 78,2014-01-19 21:21:26.892258,867,784,112, 12,2014-01-19 04:41:54.396021,483,340,732, 29,2014-01-14 18:03:45.470409,656,598,149, 78,2014-01-12 22:45:20.303256,209,190,72, 29,2014-01-13 08:27:41.141697,773,573,389, 12,2014-01-20 04:27:58.14519,754,866,223, 78,2014-01-12 08:01:40.967856,447,556,572, 11,2014-01-17 17:06:49.18034,612,695,634, 69,2014-01-16 20:39:49.436157,105,801,344, 69,2014-01-17 08:42:29.328042,154,244,655, 11,2014-01-11 05:54:53.893349,777,659,974, 29,2014-01-20 12:06:06.475103,787,225,801, 69,2014-01-16 20:33:10.070742,880,629,730, 11,2014-01-14 05:39:15.526874,347,671,858, 78,2014-01-12 06:13:55.306051,143,539,620, 29,2014-01-19 22:47:03.954518,776,823,847, 11,2014-01-12 05:37:20.620375,928,146,837, 78,2014-01-15 06:16:48.63265,253,160,164, 69,2014-01-11 17:42:01.409146,832,383,653, 11,2014-01-13 21:45:08.413687,430,811,162, 69,2014-01-17 19:40:06.335074,129,693,552, 29,2014-01-15 02:50:49.646762,859,947,896, 11,2014-01-13 16:53:46.536405,958,129,961, 78,2014-01-18 04:56:27.294629,197,201,165, 12,2014-01-17 16:46:22.48462,679,599,111, 11,2014-01-12 08:35:17.590895,112,248,338, 78,2014-01-12 20:15:22.470179,235,751,925, 78,2014-01-18 16:07:53.422759,647,521,176, 12,2014-01-19 00:06:09.400518,162,729,123, 29,2014-01-12 13:37:04.126577,939,415,469, 69,2014-01-20 18:38:07.3749,992,34,551, 29,2014-01-17 15:27:36.293196,352,500,64, 69,2014-01-20 22:18:58.522598,721,677,708, 12,2014-01-14 06:41:52.845728,458,595,694, 69,2014-01-19 18:12:56.596451,503,673,890, 29,2014-01-13 00:46:39.664296,131,939,849, 78,2014-01-13 05:09:50.990917,520,436,140, 69,2014-01-10 20:14:37.564307,59,495,750, 12,2014-01-19 21:53:28.845054,554,470,704, 12,2014-01-19 05:49:40.427075,684,747,566, 78,2014-01-17 08:57:06.892768,325,862,895, 69,2014-01-20 07:48:22.738403,101,727,565, 78,2014-01-18 11:55:46.902177,300,619,836, 12,2014-01-12 23:30:16.96351,66,643,443, 11,2014-01-13 07:27:38.265276,43,1,534, 18,2014-01-19 22:09:42.208647,799,194,608, 75,2014-01-11 16:17:20.918028,31,646,972, 75,2014-01-14 17:04:58.885652,501,201,740, 18,2014-01-13 22:26:30.841298,750,897,677, 75,2014-01-12 13:59:22.327174,245,106,497, 75,2014-01-17 18:50:07.299339,307,237,958, 68,2014-01-20 22:06:37.176791,98,723,657, 18,2014-01-16 20:50:11.973936,248,359,299, 44,2014-01-20 15:43:53.835744,876,939,508, 68,2014-01-17 21:03:04.47142,522,817,959, 75,2014-01-13 07:36:34.458975,799,405,488, 68,2014-01-11 18:19:11.426639,225,58,779, 44,2014-01-19 21:18:26.941599,156,444,32, 75,2014-01-21 03:44:27.881734,640,229,817, 18,2014-01-19 22:25:20.827575,801,963,28, 67,2014-01-11 04:55:31.068176,294,859,633, 44,2014-01-20 16:22:35.256633,286,545,998, 44,2014-01-16 07:52:57.120771,216,552,464, 18,2014-01-10 21:14:29.591193,232,299,625, 44,2014-01-13 17:04:28.102361,276,804,421, 18,2014-01-14 20:05:53.717223,556,961,461, 44,2014-01-13 02:01:09.255489,320,478,101, 67,2014-01-16 02:14:19.093697,952,992,751, 67,2014-01-14 20:20:31.28153,482,760,220, 44,2014-01-11 17:54:38.100543,426,157,250, 18,2014-01-14 02:03:59.520772,558,865,623, 68,2014-01-10 23:17:38.174298,596,296,591, 44,2014-01-21 05:43:00.838945,682,641,448, 75,2014-01-11 07:59:46.423369,323,440,974, 44,2014-01-13 02:47:55.137808,215,58,908, 67,2014-01-13 07:10:39.104222,171,619,900, 75,2014-01-20 10:09:28.632016,472,421,878, 67,2014-01-13 23:05:16.055836,43,938,23, 67,2014-01-17 04:15:37.989873,347,327,505, 75,2014-01-13 02:35:59.473314,546,251,48, 68,2014-01-15 17:04:38.383429,918,31,57, 75,2014-01-13 17:05:45.289212,11,682,963, 44,2014-01-12 18:42:36.650103,717,726,444, 68,2014-01-11 07:54:50.357366,315,466,119, 67,2014-01-13 02:49:39.125375,101,484,563, 44,2014-01-19 19:45:41.121942,319,898,932, 67,2014-01-13 19:31:24.816341,488,945,219, 18,2014-01-14 01:08:50.552014,402,619,500, 18,2014-01-18 11:04:08.063541,298,76,497, 67,2014-01-13 17:15:52.698075,976,516,34, 18,2014-01-12 17:34:11.37959,148,323,490, 75,2014-01-18 19:59:39.132135,716,90,365, 68,2014-01-13 03:38:27.955127,959,665,737, 44,2014-01-18 05:27:08.50036,287,958,575, 75,2014-01-18 21:14:08.723328,23,943,721, 18,2014-01-16 00:42:56.057489,688,257,930, 67,2014-01-11 19:33:02.217582,702,17,901, 68,2014-01-21 03:15:17.978817,295,814,600, 67,2014-01-10 20:57:15.151185,320,945,245, 68,2014-01-15 19:53:33.499531,81,370,912, 18,2014-01-11 15:09:56.079778,511,476,190, 68,2014-01-14 03:01:14.672377,586,312,40, 75,2014-01-15 23:11:11.673829,772,824,884, 67,2014-01-11 14:52:56.918723,244,524,495, 18,2014-01-14 15:01:01.095746,755,748,214, 68,2014-01-18 05:59:06.812056,631,268,803, 44,2014-01-14 02:03:36.023364,682,226,521, 44,2014-01-13 19:10:29.727761,599,867,101, 18,2014-01-10 23:04:22.867893,810,456,443, 18,2014-01-20 10:19:14.013237,41,116,36, 44,2014-01-16 01:46:29.201075,994,478,771, 18,2014-01-15 20:09:01.251322,607,461,147, 67,2014-01-12 21:24:59.302868,673,105,649, 67,2014-01-18 00:29:05.851598,63,138,644, 68,2014-01-16 08:03:51.609107,285,793,516, 75,2014-01-15 04:14:38.428244,352,320,154, 44,2014-01-16 14:14:46.973541,891,474,668, 18,2014-01-19 07:35:16.425448,647,560,709, 44,2014-01-18 09:23:28.980257,888,466,39, 44,2014-01-13 19:18:55.037081,136,367,406, 68,2014-01-11 18:51:09.123942,53,926,271, 68,2014-01-20 06:57:40.360266,487,438,379, 75,2014-01-11 09:18:34.169217,281,336,999, 18,2014-01-14 02:29:37.079069,961,874,528, 44,2014-01-17 06:24:48.860626,533,71,378, 44,2014-01-19 10:32:42.892545,136,763,780, 44,2014-01-19 07:12:33.136557,614,36,580, 18,2014-01-18 05:57:51.078628,24,212,371, 75,2014-01-19 07:48:00.871781,719,857,741, 44,2014-01-19 08:09:48.288162,976,439,994, 44,2014-01-12 19:51:24.578159,270,618,506, 18,2014-01-20 02:57:56.951559,387,66,554, 44,2014-01-12 05:11:02.96054,638,204,222, 44,2014-01-17 23:02:36.251989,307,660,895, 44,2014-01-20 21:50:53.870282,448,465,471, 44,2014-01-16 00:12:04.056285,540,685,536, 18,2014-01-14 23:01:43.064046,644,460,521, 67,2014-01-13 17:54:29.893647,650,316,678, 75,2014-01-18 23:22:33.784047,357,338,287, 67,2014-01-15 02:06:05.932357,839,882,361, 18,2014-01-12 22:13:43.907303,95,162,756, 44,2014-01-13 19:09:02.985122,617,607,269, 75,2014-01-20 02:15:07.18368,388,569,342, 68,2014-01-14 23:38:43.210171,687,481,714, 75,2014-01-20 23:38:08.83672,491,78,199, 44,2014-01-15 04:18:58.792787,312,191,260, 18,2014-01-19 07:53:21.638415,405,397,400, 44,2014-01-16 07:52:55.81026,356,891,259, 44,2014-01-13 05:54:15.218654,611,838,296, 18,2014-01-16 11:16:50.618672,424,585,142, 44,2014-01-19 07:11:50.84776,394,606,626, 75,2014-01-14 04:45:24.342596,702,717,488, 67,2014-01-15 12:14:30.978938,555,500,555, 18,2014-01-19 20:30:25.016977,438,685,279, 18,2014-01-17 11:15:01.421665,798,59,895, 68,2014-01-11 12:39:19.839983,782,938,390, 75,2014-01-18 01:03:07.909941,814,337,459, 44,2014-01-15 12:27:34.558222,668,901,575, 44,2014-01-18 22:37:10.91861,850,91,965, 68,2014-01-16 02:51:08.781722,398,128,329, 67,2014-01-13 14:37:22.846384,382,202,669, 67,2014-01-20 22:28:35.49677,113,847,656, 68,2014-01-14 23:49:05.733281,221,563,444, 68,2014-01-14 23:48:25.806924,383,219,932, 68,2014-01-17 15:31:11.748759,77,633,369, 67,2014-01-14 15:39:59.603564,146,641,666, 44,2014-01-20 04:00:29.863209,495,901,118, 68,2014-01-11 08:32:54.813224,754,288,305, 75,2014-01-17 13:34:29.497629,20,650,675, 68,2014-01-17 21:23:03.647256,659,455,942, 75,2014-01-15 14:39:00.745581,778,137,115, 18,2014-01-19 15:48:13.404932,238,730,84, 68,2014-01-20 20:32:06.632797,274,295,934, 75,2014-01-14 10:54:07.929261,573,93,985, 75,2014-01-13 09:26:56.615103,464,414,543, 68,2014-01-20 14:10:15.469518,310,514,735, 44,2014-01-18 19:13:06.722468,856,833,864, 44,2014-01-11 11:20:18.253518,950,606,885, 68,2014-01-15 16:03:11.280197,263,965,8, 75,2014-01-21 05:07:21.941122,686,709,556, 18,2014-01-17 02:37:08.87219,321,404,310, 44,2014-01-13 17:15:02.127957,879,58,680, 75,2014-01-14 03:52:46.283718,760,729,563, 75,2014-01-11 08:51:39.851546,41,918,686, 67,2014-01-12 07:45:27.144934,360,419,421, 68,2014-01-20 19:07:47.705382,317,39,413, 68,2014-01-12 01:30:59.691529,790,335,281, 67,2014-01-19 12:48:35.054875,970,382,444, 67,2014-01-15 01:35:22.263604,263,550,711, 67,2014-01-20 04:08:10.61014,760,811,784, 18,2014-01-14 09:39:43.836597,39,106,278, 75,2014-01-17 20:12:45.110407,570,121,940, 18,2014-01-19 20:36:46.10691,14,249,685, 67,2014-01-18 13:28:49.569878,116,158,426, 67,2014-01-11 14:01:10.917331,372,128,939, 44,2014-01-16 06:07:57.85567,93,554,36, 44,2014-01-11 23:08:49.173861,419,471,537, 68,2014-01-20 22:01:40.780959,89,397,234, 68,2014-01-16 18:40:52.668894,367,974,382, 18,2014-01-18 16:43:18.671491,226,642,759, 18,2014-01-17 13:24:44.428216,662,646,861, 67,2014-01-11 03:19:53.414475,396,579,395, 44,2014-01-17 02:31:32.076423,278,633,265, 75,2014-01-17 03:56:51.061013,215,427,2, 68,2014-01-14 18:14:01.344156,955,780,570, 68,2014-01-19 15:58:28.691527,219,841,405, 67,2014-01-16 12:07:06.530949,387,374,220, 18,2014-01-12 07:27:08.066624,16,119,651, 75,2014-01-20 07:18:46.945044,86,387,129, 18,2014-01-10 22:10:17.811147,574,228,935, 67,2014-01-12 06:34:30.007746,991,283,147, 44,2014-01-16 03:55:55.817233,532,830,798, 18,2014-01-13 19:25:19.939104,396,718,279, 18,2014-01-15 14:27:16.291464,657,481,379, 75,2014-01-16 16:47:35.668779,841,37,201, 67,2014-01-15 07:10:47.084038,571,66,943, 67,2014-01-15 03:35:03.996846,597,905,962, 67,2014-01-17 22:18:35.360308,165,463,681, 75,2014-01-13 13:59:22.139332,175,17,234, 67,2014-01-19 09:10:26.26045,839,691,403, 18,2014-01-16 20:26:45.970448,456,958,308, 68,2014-01-17 03:39:05.975929,816,733,419, 68,2014-01-15 23:23:11.370858,248,86,745, 67,2014-01-15 11:03:32.077358,852,822,823, 44,2014-01-14 11:07:55.546227,114,265,599, 44,2014-01-16 17:24:22.288608,505,574,882, 75,2014-01-20 21:11:29.933447,999,225,28, 75,2014-01-15 14:16:44.720089,799,258,620, 44,2014-01-16 09:26:03.069568,906,188,611, 18,2014-01-16 09:52:22.602341,111,449,997, 75,2014-01-13 01:00:03.391579,97,565,186, 68,2014-01-12 16:50:47.497783,14,519,715, 75,2014-01-16 17:12:16.017234,694,487,5, 44,2014-01-19 07:31:35.468003,201,834,3, 18,2014-01-19 00:47:38.559215,58,755,317, 67,2014-01-20 15:26:17.36139,389,30,42, 44,2014-01-17 17:30:04.159529,179,27,369, 68,2014-01-14 06:54:45.090165,980,400,50, 44,2014-01-11 16:53:25.428014,528,554,918, 44,2014-01-16 18:48:51.104574,657,737,819, 75,2014-01-14 09:05:02.90131,137,623,700, 67,2014-01-13 03:27:55.435759,311,498,40, 67,2014-01-11 16:44:46.921807,930,644,726, 44,2014-01-17 08:30:22.840415,245,525,789, 18,2014-01-17 21:55:11.727223,141,622,894, 68,2014-01-17 13:32:22.591005,988,893,683, 67,2014-01-11 09:41:09.924452,852,776,348, 18,2014-01-11 19:30:15.724068,500,458,423, 68,2014-01-14 05:50:57.951314,181,141,133, 68,2014-01-14 03:40:32.063784,296,55,163, 67,2014-01-20 08:40:41.984519,118,767,192, 44,2014-01-20 06:17:43.922181,774,959,865, 44,2014-01-20 11:19:38.040133,806,810,633, 68,2014-01-15 02:03:53.355795,802,141,573, 68,2014-01-14 11:21:15.99954,30,619,574, 44,2014-01-13 16:27:33.58636,413,653,374, 44,2014-01-20 23:28:15.644403,73,915,654, 68,2014-01-14 02:32:45.932986,345,702,899, 44,2014-01-18 10:44:18.306449,722,365,48, 44,2014-01-16 02:54:18.713972,822,723,364, 67,2014-01-19 16:25:08.535746,60,578,991, 18,2014-01-20 15:44:21.698028,931,767,87, 67,2014-01-17 23:45:06.211754,327,702,298, 75,2014-01-15 03:37:24.55298,746,294,940, 67,2014-01-18 17:15:57.166031,875,818,223, 75,2014-01-15 18:32:44.77097,525,627,957, 18,2014-01-14 13:03:41.91437,999,789,447, 18,2014-01-15 04:46:01.32556,208,626,882, 18,2014-01-19 05:27:29.861134,187,240,68, 44,2014-01-15 09:57:07.342383,444,498,765, 68,2014-01-21 03:34:52.430134,305,266,888, 68,2014-01-12 08:32:32.762445,69,388,865, 67,2014-01-17 17:25:02.778143,958,819,969, 75,2014-01-11 14:19:39.351942,791,512,674, 18,2014-01-18 21:02:55.602859,654,491,133, 44,2014-01-14 09:20:14.505366,819,526,635, 67,2014-01-18 07:52:01.942947,197,597,301, 67,2014-01-19 10:44:05.527731,203,329,481, 67,2014-01-15 08:50:30.229433,742,186,579, 68,2014-01-11 07:42:59.89468,496,627,688, 18,2014-01-12 08:24:37.591516,568,104,601, 44,2014-01-14 11:31:12.213951,785,838,155, 75,2014-01-20 18:00:43.816861,612,531,63, 68,2014-01-11 13:44:15.63165,309,625,696, 67,2014-01-18 17:35:05.569747,772,789,905, 67,2014-01-13 23:21:59.816401,719,93,909, 68,2014-01-14 10:11:49.21801,946,997,553, 68,2014-01-18 11:03:21.21415,990,602,488, 68,2014-01-17 05:54:45.749807,804,872,849, 44,2014-01-11 14:56:07.524458,874,581,332, 67,2014-01-13 07:57:39.928122,252,682,199, 75,2014-01-15 16:19:54.285552,726,678,520, 75,2014-01-11 00:40:29.222487,780,465,507, 75,2014-01-20 11:42:46.140296,262,297,470, 75,2014-01-19 23:57:18.838532,140,786,173, 67,2014-01-18 21:56:26.388519,165,468,674, 68,2014-01-15 00:15:30.911266,702,256,21, 44,2014-01-13 07:01:00.752902,177,922,275, 44,2014-01-12 20:42:27.714079,249,576,5, 18,2014-01-12 23:43:00.772819,282,137,720, 44,2014-01-17 20:58:08.095286,528,686,646, 44,2014-01-12 18:17:20.144214,708,408,919, 18,2014-01-14 12:15:33.535264,870,782,611, 18,2014-01-14 08:23:10.873429,488,455,912, 68,2014-01-13 12:36:59.496575,973,645,744, 18,2014-01-12 03:18:29.138543,468,757,256, 67,2014-01-17 21:43:25.378794,293,891,664, 44,2014-01-21 00:29:01.439522,887,626,241, 68,2014-01-20 18:02:34.666274,650,504,116, 67,2014-01-12 00:33:55.608227,194,555,273, 68,2014-01-11 02:12:01.334202,506,854,908, 75,2014-01-11 20:27:12.25779,908,237,617, 75,2014-01-15 16:05:07.822179,869,30,35, 75,2014-01-21 00:12:45.151064,135,264,197, 67,2014-01-12 14:11:27.889858,686,505,213, 68,2014-01-13 03:40:13.392345,178,942,576, 44,2014-01-13 17:34:44.967883,403,693,911, 67,2014-01-16 04:23:17.107869,419,740,924, 75,2014-01-20 18:43:34.606495,698,366,768, 75,2014-01-20 03:29:30.717691,201,992,994, 18,2014-01-16 23:19:24.632746,151,127,515, 67,2014-01-12 20:41:14.535035,507,663,958, 18,2014-01-14 13:49:25.003243,412,57,488, 44,2014-01-17 03:59:53.855233,2,135,892, 18,2014-01-12 02:24:00.675331,888,329,592, 75,2014-01-13 07:46:43.841776,319,156,539, 18,2014-01-14 19:56:20.244171,281,68,532, 18,2014-01-16 06:39:31.586597,469,109,395, 44,2014-01-15 18:47:44.594678,778,948,313, 68,2014-01-16 20:38:47.958251,48,901,923, 18,2014-01-18 10:22:32.359416,751,212,141, 68,2014-01-12 09:45:52.689965,216,943,699, 18,2014-01-18 18:56:08.102884,636,411,187, 67,2014-01-11 16:38:05.895098,597,784,470, 18,2014-01-15 22:09:03.563393,392,409,965, 67,2014-01-11 01:33:07.599458,125,781,393, 67,2014-01-12 23:56:35.033641,562,329,26, 18,2014-01-12 13:52:28.942187,884,570,169, 18,2014-01-10 20:02:09.03898,951,540,758, 44,2014-01-12 11:59:09.699915,276,566,52, 67,2014-01-13 18:26:24.550415,716,984,932, 75,2014-01-11 02:14:10.373601,394,695,464, 18,2014-01-13 12:26:21.957704,26,482,698, 68,2014-01-18 14:31:32.372593,752,271,647, 75,2014-01-10 20:26:55.524665,492,821,403, 67,2014-01-15 06:37:49.847562,921,725,119, 68,2014-01-20 22:11:45.764937,65,109,332, 68,2014-01-13 18:01:40.492548,742,234,614, 68,2014-01-20 15:01:06.95585,154,97,959, 18,2014-01-20 10:55:20.371851,403,588,547, 67,2014-01-12 15:31:11.210239,995,710,934, 75,2014-01-16 08:20:31.588596,538,438,285, 67,2014-01-12 01:36:34.906887,793,129,529, 67,2014-01-16 09:20:36.213482,855,833,198, 44,2014-01-12 06:20:25.443829,712,636,689, 44,2014-01-13 08:00:35.582219,584,890,744, 75,2014-01-18 21:07:20.055257,660,244,656, 68,2014-01-16 06:16:45.688,544,501,509, 75,2014-01-18 18:40:07.168815,68,799,465, 44,2014-01-13 09:55:04.649935,159,600,44, 67,2014-01-11 20:55:33.646671,358,383,685, 67,2014-01-15 23:02:39.528649,546,432,56, 44,2014-01-14 23:40:57.3399,959,185,166, 68,2014-01-19 19:51:41.749554,829,102,508, 18,2014-01-16 19:40:45.423747,141,940,859, 18,2014-01-20 01:50:00.903293,13,777,4, 18,2014-01-20 01:24:49.349013,803,934,105, 18,2014-01-18 23:37:20.457388,463,177,29, 75,2014-01-11 09:42:29.84548,339,169,70, 18,2014-01-20 01:26:58.387993,471,805,126, 18,2014-01-20 15:36:30.157302,464,272,146, 67,2014-01-14 08:08:54.395895,136,512,896, 67,2014-01-20 07:41:08.761594,811,910,253, 67,2014-01-12 22:02:52.115005,463,514,535, 18,2014-01-11 16:40:26.768906,136,700,933, 67,2014-01-20 08:08:04.286678,505,443,118, 18,2014-01-17 08:40:41.962986,662,983,670, 44,2014-01-11 08:52:12.533844,787,441,812, 67,2014-01-12 20:09:44.779226,169,368,499, 75,2014-01-16 17:41:48.918836,181,352,902, 68,2014-01-20 23:47:32.905695,682,83,389, 67,2014-01-14 15:40:55.989883,482,325,458, 18,2014-01-11 20:02:20.507432,965,480,562, 44,2014-01-11 19:24:07.812582,807,622,594, 75,2014-01-20 05:01:32.203366,46,558,37, 18,2014-01-13 06:22:45.951261,909,354,425, 68,2014-01-14 07:24:43.3948,128,537,855, 67,2014-01-17 20:08:52.259043,240,390,983, 68,2014-01-18 16:39:31.639681,901,770,406, 18,2014-01-11 20:04:50.564034,499,818,571, 75,2014-01-20 10:03:56.908978,322,506,515, 68,2014-01-19 17:35:05.286351,859,801,909, 44,2014-01-16 23:07:30.092683,815,858,31, 67,2014-01-14 03:44:54.249297,944,232,929, 44,2014-01-18 07:26:47.035905,29,469,639, 44,2014-01-12 12:48:15.516431,983,238,347, 67,2014-01-12 23:34:55.15259,781,915,11, 67,2014-01-17 02:51:36.384919,532,970,139, 44,2014-01-20 16:25:35.973819,917,522,716, 75,2014-01-13 13:17:24.99849,865,20,442, 18,2014-01-15 22:18:34.772911,375,507,559, 18,2014-01-20 02:02:06.131121,675,170,806, 67,2014-01-17 01:26:19.394383,492,847,674, 67,2014-01-14 23:59:43.534925,891,861,513, 44,2014-01-11 18:04:58.246126,158,464,536, 67,2014-01-17 22:06:46.163291,859,306,111, 68,2014-01-14 02:07:47.821602,915,762,506, 68,2014-01-18 06:45:40.209113,479,979,993, 68,2014-01-18 10:58:58.697553,131,536,360, 44,2014-01-16 02:17:32.600828,361,40,560, 68,2014-01-13 18:27:29.127949,548,692,872, 68,2014-01-18 04:46:31.603248,36,349,559, 75,2014-01-19 21:58:28.590712,557,467,504, 68,2014-01-14 18:29:49.635381,412,971,326, 44,2014-01-19 04:10:39.416249,937,867,382, 44,2014-01-18 21:00:00.794497,495,195,170, 67,2014-01-17 04:52:35.587061,974,266,917, 75,2014-01-12 05:35:22.81105,472,436,854, 67,2014-01-15 11:08:53.05354,192,202,301, 67,2014-01-14 15:32:07.226743,12,264,345, 67,2014-01-13 05:40:13.375084,218,281,175, 44,2014-01-14 15:12:49.962517,629,232,60, 67,2014-01-13 03:07:12.513094,362,524,175, 75,2014-01-19 08:47:43.467768,796,282,386, 68,2014-01-17 22:57:44.211815,883,402,3, 68,2014-01-20 14:33:59.548999,156,484,359, 68,2014-01-21 01:35:58.984199,608,364,601, 18,2014-01-20 02:32:39.364823,773,829,347, 67,2014-01-16 11:25:35.933918,895,928,152, 68,2014-01-20 12:01:34.958018,45,971,416, 44,2014-01-12 09:50:04.363313,547,422,844, 44,2014-01-11 03:44:10.706829,601,801,568, 67,2014-01-19 08:03:41.089139,659,120,247, 67,2014-01-18 15:16:23.757696,676,753,300, 44,2014-01-15 07:43:54.241753,244,207,433, 44,2014-01-20 06:08:39.335683,505,505,666, 44,2014-01-15 07:23:09.921405,378,136,934, 44,2014-01-18 13:51:42.063356,976,118,421, 44,2014-01-17 06:54:19.544796,581,363,883, 44,2014-01-12 12:22:08.618958,358,436,993, 44,2014-01-13 10:09:14.664603,17,810,231, 18,2014-01-20 05:21:48.673164,91,187,468, 68,2014-01-19 21:08:40.222207,253,66,161, 44,2014-01-12 02:07:43.255316,639,720,626, 18,2014-01-13 17:51:38.308545,963,809,619, 44,2014-01-17 19:19:19.638456,340,861,21, 18,2014-01-20 03:07:44.049814,401,49,357, 68,2014-01-20 02:44:13.895606,959,533,669, 67,2014-01-19 04:54:42.449925,877,405,310, 75,2014-01-14 08:16:37.103352,790,304,182, 67,2014-01-13 12:16:21.122349,598,555,961, 75,2014-01-11 04:34:55.825009,156,503,508, 67,2014-01-18 03:29:27.066289,153,934,782, 67,2014-01-15 19:23:33.635443,1000,223,556, 75,2014-01-19 17:22:39.293197,866,985,306, 18,2014-01-14 20:27:11.278103,914,265,344, 68,2014-01-15 03:57:33.184442,287,843,263, 18,2014-01-19 12:58:38.277396,906,955,462, 67,2014-01-13 16:59:50.642926,378,612,636, 44,2014-01-20 19:23:09.118359,350,573,460, 44,2014-01-18 19:00:13.235833,676,830,715, 68,2014-01-15 06:49:55.006239,223,816,659, 75,2014-01-21 03:07:19.825606,151,653,578, 68,2014-01-16 21:03:54.324971,891,167,670, 68,2014-01-12 16:06:18.764354,144,501,494, 67,2014-01-15 04:51:14.06736,494,412,626, 67,2014-01-15 21:12:33.660654,684,218,676, 75,2014-01-17 03:29:28.685759,865,380,603, 68,2014-01-12 12:42:56.131135,526,321,944, 67,2014-01-11 22:06:53.205869,700,64,188, 68,2014-01-18 19:51:37.304718,2,778,885, 18,2014-01-15 02:52:10.795739,20,193,172, 44,2014-01-10 21:28:41.879033,384,335,724, 18,2014-01-17 11:00:17.526925,281,1,336, 44,2014-01-16 08:59:54.051055,338,334,317, 67,2014-01-13 19:20:20.187578,0,930,386, 68,2014-01-14 00:19:37.1658,426,697,630, 44,2014-01-15 06:07:38.101287,751,881,77, 67,2014-01-12 16:04:34.083603,326,448,831, 75,2014-01-11 23:14:19.615726,570,615,109, 68,2014-01-18 18:24:15.20464,911,223,257, 68,2014-01-15 08:20:55.205952,607,820,254, 75,2014-01-12 07:49:15.440735,935,906,770, 75,2014-01-15 15:53:42.270928,995,375,302, 68,2014-01-20 07:44:28.841814,484,342,904, 44,2014-01-21 05:11:54.733932,869,45,81, 68,2014-01-19 16:20:53.54945,966,398,246, 75,2014-01-14 05:42:02.026255,245,40,793, 68,2014-01-19 12:10:33.011748,623,865,63, 68,2014-01-12 03:20:44.192376,479,870,190, 67,2014-01-13 19:05:11.145033,522,902,794, 68,2014-01-17 01:10:46.247581,625,195,92, 68,2014-01-16 14:10:39.199034,80,524,484, 75,2014-01-13 16:12:30.97064,851,964,350, 68,2014-01-12 16:14:40.572552,408,503,222, 67,2014-01-18 10:16:57.963389,256,289,303, 75,2014-01-18 01:03:45.038,269,779,176, 75,2014-01-17 17:27:14.233626,358,156,507, 75,2014-01-14 07:46:26.649148,542,949,101, 18,2014-01-19 17:46:41.169135,846,168,437, 75,2014-01-18 19:34:07.439495,772,156,62, 68,2014-01-11 21:38:03.953866,379,485,831, 18,2014-01-13 14:38:51.964874,308,883,314, 68,2014-01-18 21:02:49.318528,85,446,626, 75,2014-01-18 12:38:21.480791,878,92,464, 68,2014-01-19 03:38:46.016348,727,775,32, 18,2014-01-11 10:23:09.506105,397,393,250, 18,2014-01-11 06:57:58.646592,158,683,478, 67,2014-01-13 03:46:24.117635,671,916,984, 18,2014-01-13 06:27:43.589709,709,972,112, 68,2014-01-12 10:12:18.262317,136,127,949, 18,2014-01-21 02:10:39.322275,411,845,716, 75,2014-01-17 18:48:38.79608,852,808,942, 18,2014-01-13 22:01:33.703472,699,994,984, 44,2014-01-15 12:04:21.593622,313,843,257, 75,2014-01-16 20:33:07.637895,537,192,113, 18,2014-01-13 21:13:28.437404,660,244,678, 75,2014-01-13 22:25:15.143073,388,316,480, 75,2014-01-20 06:15:09.66415,614,724,665, 67,2014-01-12 03:24:01.449152,679,164,212, 68,2014-01-15 05:45:59.335449,223,734,765, 67,2014-01-12 19:20:20.809183,262,262,950, 75,2014-01-18 08:34:47.696733,292,522,787, 68,2014-01-20 23:56:38.534483,683,284,564, 75,2014-01-15 15:32:51.779823,293,942,260, 68,2014-01-20 04:49:28.269704,189,458,892, 18,2014-01-18 04:13:36.497871,182,711,221, 44,2014-01-12 10:36:36.818243,153,7,992, 75,2014-01-16 16:16:42.50333,119,510,211, 18,2014-01-11 06:00:03.147019,220,708,295, 18,2014-01-10 22:23:17.987378,503,346,629, 18,2014-01-14 05:50:49.942825,106,423,774, 18,2014-01-12 07:38:07.100885,130,447,695, 68,2014-01-13 17:02:09.952672,571,983,832, 18,2014-01-11 20:53:39.261352,393,836,311, 44,2014-01-20 00:16:28.582095,406,542,666, 44,2014-01-11 14:40:55.96902,450,54,609, 44,2014-01-12 11:16:48.767458,853,765,769, 67,2014-01-20 11:14:27.228686,568,637,110, 68,2014-01-13 22:27:20.086655,282,841,883, 18,2014-01-14 21:44:32.357586,83,652,472, 68,2014-01-11 15:26:45.491423,392,831,554, 44,2014-01-13 18:37:59.40893,984,106,849, 44,2014-01-11 10:33:11.153667,302,642,24, 18,2014-01-14 17:28:19.194894,800,935,270, 18,2014-01-18 10:42:21.002552,825,741,812, 44,2014-01-17 11:06:18.791561,810,395,777, 18,2014-01-17 18:41:47.632298,51,854,699, 18,2014-01-11 03:07:36.145625,393,810,645, 67,2014-01-16 11:21:28.455711,822,334,987, citus-7.0.3/src/test/regress/data/large_records.data000066400000000000000000010000071317107136600224520ustar00rootroot000000000000001|aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 2|a citus-7.0.3/src/test/regress/data/lineitem.1.data000066400000000000000000026360631317107136600216260ustar00rootroot000000000000001|155190|7706|1|17|21168.23|0.04|0.02|N|O|1996-03-13|1996-02-12|1996-03-22|DELIVER IN PERSON|TRUCK|egular courts above the 1|67310|7311|2|36|45983.16|0.09|0.06|N|O|1996-04-12|1996-02-28|1996-04-20|TAKE BACK RETURN|MAIL|ly final dependencies: slyly bold 1|63700|3701|3|8|13309.60|0.10|0.02|N|O|1996-01-29|1996-03-05|1996-01-31|TAKE BACK RETURN|REG AIR|riously. regular, express dep 1|2132|4633|4|28|28955.64|0.09|0.06|N|O|1996-04-21|1996-03-30|1996-05-16|NONE|AIR|lites. fluffily even de 1|24027|1534|5|24|22824.48|0.10|0.04|N|O|1996-03-30|1996-03-14|1996-04-01|NONE|FOB| pending foxes. slyly re 1|15635|638|6|32|49620.16|0.07|0.02|N|O|1996-01-30|1996-02-07|1996-02-03|DELIVER IN PERSON|MAIL|arefully slyly ex 2|106170|1191|1|38|44694.46|0.00|0.05|N|O|1997-01-28|1997-01-14|1997-02-02|TAKE BACK RETURN|RAIL|ven requests. deposits breach a 3|4297|1798|1|45|54058.05|0.06|0.00|R|F|1994-02-02|1994-01-04|1994-02-23|NONE|AIR|ongside of the furiously brave acco 3|19036|6540|2|49|46796.47|0.10|0.00|R|F|1993-11-09|1993-12-20|1993-11-24|TAKE BACK RETURN|RAIL| unusual accounts. eve 3|128449|3474|3|27|39890.88|0.06|0.07|A|F|1994-01-16|1993-11-22|1994-01-23|DELIVER IN PERSON|SHIP|nal foxes wake. 3|29380|1883|4|2|2618.76|0.01|0.06|A|F|1993-12-04|1994-01-07|1994-01-01|NONE|TRUCK|y. fluffily pending d 3|183095|650|5|28|32986.52|0.04|0.00|R|F|1993-12-14|1994-01-10|1994-01-01|TAKE BACK RETURN|FOB|ages nag slyly pending 3|62143|9662|6|26|28733.64|0.10|0.02|A|F|1993-10-29|1993-12-18|1993-11-04|TAKE BACK RETURN|RAIL|ges sleep after the caref 4|88035|5560|1|30|30690.90|0.03|0.08|N|O|1996-01-10|1995-12-14|1996-01-18|DELIVER IN PERSON|REG AIR|- quickly regular packages sleep. idly 5|108570|8571|1|15|23678.55|0.02|0.04|R|F|1994-10-31|1994-08-31|1994-11-20|NONE|AIR|ts wake furiously 5|123927|3928|2|26|50723.92|0.07|0.08|R|F|1994-10-16|1994-09-25|1994-10-19|NONE|FOB|sts use slyly quickly special instruc 5|37531|35|3|50|73426.50|0.08|0.03|A|F|1994-08-08|1994-10-13|1994-08-26|DELIVER IN PERSON|AIR|eodolites. fluffily unusual 6|139636|2150|1|37|61998.31|0.08|0.03|A|F|1992-04-27|1992-05-15|1992-05-02|TAKE BACK RETURN|TRUCK|p furiously special foxes 7|182052|9607|1|12|13608.60|0.07|0.03|N|O|1996-05-07|1996-03-13|1996-06-03|TAKE BACK RETURN|FOB|ss pinto beans wake against th 7|145243|7758|2|9|11594.16|0.08|0.08|N|O|1996-02-01|1996-03-02|1996-02-19|TAKE BACK RETURN|SHIP|es. instructions 7|94780|9799|3|46|81639.88|0.10|0.07|N|O|1996-01-15|1996-03-27|1996-02-03|COLLECT COD|MAIL| unusual reques 7|163073|3074|4|28|31809.96|0.03|0.04|N|O|1996-03-21|1996-04-08|1996-04-20|NONE|FOB|. slyly special requests haggl 7|151894|9440|5|38|73943.82|0.08|0.01|N|O|1996-02-11|1996-02-24|1996-02-18|DELIVER IN PERSON|TRUCK|ns haggle carefully ironic deposits. bl 7|79251|1759|6|35|43058.75|0.06|0.03|N|O|1996-01-16|1996-02-23|1996-01-22|TAKE BACK RETURN|FOB|jole. excuses wake carefully alongside of 7|157238|2269|7|5|6476.15|0.04|0.02|N|O|1996-02-10|1996-03-26|1996-02-13|NONE|FOB|ithely regula 32|82704|7721|1|28|47227.60|0.05|0.08|N|O|1995-10-23|1995-08-27|1995-10-26|TAKE BACK RETURN|TRUCK|sleep quickly. req 32|197921|441|2|32|64605.44|0.02|0.00|N|O|1995-08-14|1995-10-07|1995-08-27|COLLECT COD|AIR|lithely regular deposits. fluffily 32|44161|6666|3|2|2210.32|0.09|0.02|N|O|1995-08-07|1995-10-07|1995-08-23|DELIVER IN PERSON|AIR| express accounts wake according to the 32|2743|7744|4|4|6582.96|0.09|0.03|N|O|1995-08-04|1995-10-01|1995-09-03|NONE|REG AIR|e slyly final pac 32|85811|8320|5|44|79059.64|0.05|0.06|N|O|1995-08-28|1995-08-20|1995-09-14|DELIVER IN PERSON|AIR|symptotes nag according to the ironic depo 32|11615|4117|6|6|9159.66|0.04|0.03|N|O|1995-07-21|1995-09-23|1995-07-25|COLLECT COD|RAIL| gifts cajole carefully. 33|61336|8855|1|31|40217.23|0.09|0.04|A|F|1993-10-29|1993-12-19|1993-11-08|COLLECT COD|TRUCK|ng to the furiously ironic package 33|60519|5532|2|32|47344.32|0.02|0.05|A|F|1993-12-09|1994-01-04|1993-12-28|COLLECT COD|MAIL|gular theodolites 33|137469|9983|3|5|7532.30|0.05|0.03|A|F|1993-12-09|1993-12-25|1993-12-23|TAKE BACK RETURN|AIR|. stealthily bold exc 33|33918|3919|4|41|75928.31|0.09|0.00|R|F|1993-11-09|1994-01-24|1993-11-11|TAKE BACK RETURN|MAIL|unusual packages doubt caref 34|88362|871|1|13|17554.68|0.00|0.07|N|O|1998-10-23|1998-09-14|1998-11-06|NONE|REG AIR|nic accounts. deposits are alon 34|89414|1923|2|22|30875.02|0.08|0.06|N|O|1998-10-09|1998-10-16|1998-10-12|NONE|FOB|thely slyly p 34|169544|4577|3|6|9681.24|0.02|0.06|N|O|1998-10-30|1998-09-20|1998-11-05|NONE|FOB|ar foxes sleep 35|450|2951|1|24|32410.80|0.02|0.00|N|O|1996-02-21|1996-01-03|1996-03-18|TAKE BACK RETURN|FOB|, regular tithe 35|161940|4457|2|34|68065.96|0.06|0.08|N|O|1996-01-22|1996-01-06|1996-01-27|DELIVER IN PERSON|RAIL|s are carefully against the f 35|120896|8433|3|7|13418.23|0.06|0.04|N|O|1996-01-19|1995-12-22|1996-01-29|NONE|MAIL| the carefully regular 35|85175|7684|4|25|29004.25|0.06|0.05|N|O|1995-11-26|1995-12-25|1995-12-21|DELIVER IN PERSON|SHIP| quickly unti 35|119917|4940|5|34|65854.94|0.08|0.06|N|O|1995-11-08|1996-01-15|1995-11-26|COLLECT COD|MAIL|. silent, unusual deposits boost 35|30762|3266|6|28|47397.28|0.03|0.02|N|O|1996-02-01|1995-12-24|1996-02-28|COLLECT COD|RAIL|ly alongside of 36|119767|9768|1|42|75043.92|0.09|0.00|N|O|1996-02-03|1996-01-21|1996-02-23|COLLECT COD|SHIP| careful courts. special 37|22630|5133|1|40|62105.20|0.09|0.03|A|F|1992-07-21|1992-08-01|1992-08-15|NONE|REG AIR|luffily regular requests. slyly final acco 37|126782|1807|2|39|70542.42|0.05|0.02|A|F|1992-07-02|1992-08-18|1992-07-28|TAKE BACK RETURN|RAIL|the final requests. ca 37|12903|5405|3|43|78083.70|0.05|0.08|A|F|1992-07-10|1992-07-06|1992-08-02|DELIVER IN PERSON|TRUCK|iously ste 38|175839|874|1|44|84252.52|0.04|0.02|N|O|1996-09-29|1996-11-17|1996-09-30|COLLECT COD|MAIL|s. blithely unusual theodolites am 39|2320|9821|1|44|53782.08|0.09|0.06|N|O|1996-11-14|1996-12-15|1996-12-12|COLLECT COD|RAIL|eodolites. careful 39|186582|4137|2|26|43383.08|0.08|0.04|N|O|1996-11-04|1996-10-20|1996-11-20|NONE|FOB|ckages across the slyly silent 39|67831|5350|3|46|82746.18|0.06|0.08|N|O|1996-09-26|1996-12-19|1996-10-26|DELIVER IN PERSON|AIR|he carefully e 39|20590|3093|4|32|48338.88|0.07|0.05|N|O|1996-10-02|1996-12-19|1996-10-14|COLLECT COD|MAIL|heodolites sleep silently pending foxes. ac 39|54519|9530|5|43|63360.93|0.01|0.01|N|O|1996-10-17|1996-11-14|1996-10-26|COLLECT COD|MAIL|yly regular i 39|94368|6878|6|40|54494.40|0.06|0.05|N|O|1996-12-08|1996-10-22|1997-01-01|COLLECT COD|AIR|quickly ironic fox 64|85951|5952|1|21|40675.95|0.05|0.02|R|F|1994-09-30|1994-09-18|1994-10-26|DELIVER IN PERSON|REG AIR|ch slyly final, thin platelets. 65|59694|4705|1|26|42995.94|0.03|0.03|A|F|1995-04-20|1995-04-25|1995-05-13|NONE|TRUCK|pending deposits nag even packages. ca 65|73815|8830|2|22|39353.82|0.00|0.05|N|O|1995-07-17|1995-06-04|1995-07-19|COLLECT COD|FOB| ideas. special, r 65|1388|3889|3|21|27076.98|0.09|0.07|N|O|1995-07-06|1995-05-14|1995-07-31|DELIVER IN PERSON|RAIL|bove the even packages. accounts nag carefu 66|115118|7630|1|31|35126.41|0.00|0.08|R|F|1994-02-19|1994-03-11|1994-02-20|TAKE BACK RETURN|RAIL|ut the unusual accounts sleep at the bo 66|173489|3490|2|41|64061.68|0.04|0.07|A|F|1994-02-21|1994-03-01|1994-03-18|COLLECT COD|AIR| regular de 67|21636|9143|1|4|6230.52|0.09|0.04|N|O|1997-04-17|1997-01-31|1997-04-20|NONE|SHIP| cajole thinly expres 67|20193|5198|2|12|13358.28|0.09|0.05|N|O|1997-01-27|1997-02-21|1997-02-22|NONE|REG AIR| even packages cajole 67|173600|6118|3|5|8368.00|0.03|0.07|N|O|1997-02-20|1997-02-12|1997-02-21|DELIVER IN PERSON|TRUCK|y unusual packages thrash pinto 67|87514|7515|4|44|66066.44|0.08|0.06|N|O|1997-03-18|1997-01-29|1997-04-13|DELIVER IN PERSON|RAIL|se quickly above the even, express reques 67|40613|8126|5|23|35733.03|0.05|0.07|N|O|1997-04-19|1997-02-14|1997-05-06|DELIVER IN PERSON|REG AIR|ly regular deposit 67|178306|824|6|29|40144.70|0.02|0.05|N|O|1997-01-25|1997-01-27|1997-01-27|DELIVER IN PERSON|FOB|ultipliers 68|7068|9569|1|3|2925.18|0.05|0.02|N|O|1998-07-04|1998-06-05|1998-07-21|NONE|RAIL|fully special instructions cajole. furious 68|175180|2732|2|46|57738.28|0.02|0.05|N|O|1998-06-26|1998-06-07|1998-07-05|NONE|MAIL| requests are unusual, regular pinto 68|34980|7484|3|46|88089.08|0.04|0.05|N|O|1998-08-13|1998-07-08|1998-08-29|NONE|RAIL|egular dependencies affix ironically along 68|94728|2256|4|20|34454.40|0.07|0.01|N|O|1998-06-27|1998-05-23|1998-07-02|NONE|REG AIR| excuses integrate fluffily 68|82758|5267|5|27|47000.25|0.03|0.06|N|O|1998-06-19|1998-06-25|1998-06-29|DELIVER IN PERSON|SHIP|ccounts. deposits use. furiously 68|102561|5072|6|30|46906.80|0.05|0.06|N|O|1998-08-11|1998-07-11|1998-08-14|NONE|RAIL|oxes are slyly blithely fin 68|139247|1761|7|41|52735.84|0.09|0.08|N|O|1998-06-24|1998-06-27|1998-07-06|NONE|SHIP|eposits nag special ideas. furiousl 69|115209|7721|1|48|58761.60|0.01|0.07|A|F|1994-08-17|1994-08-11|1994-09-08|NONE|TRUCK|regular epitaphs. carefully even ideas hag 69|104180|9201|2|32|37893.76|0.08|0.06|A|F|1994-08-24|1994-08-17|1994-08-31|NONE|REG AIR|s sleep carefully bold, 69|137267|4807|3|17|22172.42|0.09|0.00|A|F|1994-07-02|1994-07-07|1994-07-03|TAKE BACK RETURN|AIR|final, pending instr 69|37502|2509|4|3|4318.50|0.09|0.04|R|F|1994-06-06|1994-07-27|1994-06-15|NONE|MAIL| blithely final d 69|92070|7089|5|42|44606.94|0.07|0.04|R|F|1994-07-31|1994-07-26|1994-08-28|DELIVER IN PERSON|REG AIR|tect regular, speci 69|18504|1006|6|23|32717.50|0.05|0.00|A|F|1994-10-03|1994-08-06|1994-10-24|NONE|SHIP|nding accounts ca 70|64128|9141|1|8|8736.96|0.03|0.08|R|F|1994-01-12|1994-02-27|1994-01-14|TAKE BACK RETURN|FOB|ggle. carefully pending dependenc 70|196156|1195|2|13|16277.95|0.06|0.06|A|F|1994-03-03|1994-02-13|1994-03-26|COLLECT COD|AIR|lyly special packag 70|179809|7361|3|1|1888.80|0.03|0.05|R|F|1994-01-26|1994-03-05|1994-01-28|TAKE BACK RETURN|RAIL|quickly. fluffily unusual theodolites c 70|45734|743|4|11|18477.03|0.01|0.05|A|F|1994-03-17|1994-03-17|1994-03-27|NONE|MAIL|alongside of the deposits. fur 70|37131|2138|5|37|39520.81|0.09|0.04|R|F|1994-02-13|1994-03-16|1994-02-21|COLLECT COD|MAIL|n accounts are. q 70|55655|3171|6|19|30602.35|0.06|0.03|A|F|1994-01-26|1994-02-17|1994-02-06|TAKE BACK RETURN|SHIP| packages wake pending accounts. 71|61931|1932|1|25|47323.25|0.09|0.07|N|O|1998-04-10|1998-04-22|1998-04-11|COLLECT COD|FOB|ckly. slyly 71|65916|3435|2|3|5645.73|0.09|0.07|N|O|1998-05-23|1998-04-03|1998-06-02|COLLECT COD|SHIP|y. pinto beans haggle after the 71|34432|1942|3|45|61489.35|0.00|0.07|N|O|1998-02-23|1998-03-20|1998-03-24|DELIVER IN PERSON|SHIP| ironic packages believe blithely a 71|96645|9155|4|33|54174.12|0.00|0.01|N|O|1998-04-12|1998-03-20|1998-04-15|NONE|FOB| serve quickly fluffily bold deposi 71|103255|5766|5|39|49071.75|0.08|0.06|N|O|1998-01-29|1998-04-07|1998-02-18|DELIVER IN PERSON|RAIL|l accounts sleep across the pack 71|195635|674|6|34|58841.42|0.04|0.01|N|O|1998-03-05|1998-04-22|1998-03-30|DELIVER IN PERSON|TRUCK|s cajole. 96|123076|613|1|23|25278.61|0.10|0.06|A|F|1994-07-19|1994-06-29|1994-07-25|DELIVER IN PERSON|TRUCK|ep-- carefully reg 96|135390|5391|2|30|42761.70|0.01|0.06|R|F|1994-06-03|1994-05-29|1994-06-22|DELIVER IN PERSON|TRUCK|e quickly even ideas. furiou 97|119477|1989|1|13|19454.11|0.00|0.02|R|F|1993-04-01|1993-04-04|1993-04-08|NONE|TRUCK|ayers cajole against the furiously 97|49568|2073|2|37|56149.72|0.02|0.06|A|F|1993-04-13|1993-03-30|1993-04-14|DELIVER IN PERSON|SHIP|ic requests boost carefully quic 97|77699|5221|3|19|31857.11|0.06|0.08|R|F|1993-05-14|1993-03-05|1993-05-25|TAKE BACK RETURN|RAIL|gifts. furiously ironic packages cajole. 98|40216|217|1|28|32373.88|0.06|0.07|A|F|1994-12-24|1994-10-25|1995-01-16|COLLECT COD|REG AIR| pending, regular accounts s 98|109743|7274|2|1|1752.74|0.00|0.00|A|F|1994-12-01|1994-12-12|1994-12-15|DELIVER IN PERSON|TRUCK|. unusual instructions against 98|44706|4707|3|14|23109.80|0.05|0.02|A|F|1994-12-30|1994-11-22|1995-01-27|COLLECT COD|AIR| cajole furiously. blithely ironic ideas 98|167180|7181|4|10|12471.80|0.03|0.03|A|F|1994-10-23|1994-11-08|1994-11-09|COLLECT COD|RAIL| carefully. quickly ironic ideas 99|87114|4639|1|10|11011.10|0.02|0.01|A|F|1994-05-18|1994-06-03|1994-05-23|COLLECT COD|RAIL|kages. requ 99|123766|3767|2|5|8948.80|0.02|0.07|R|F|1994-05-06|1994-05-28|1994-05-20|TAKE BACK RETURN|RAIL|ests cajole fluffily waters. blithe 99|134082|1622|3|42|46875.36|0.02|0.02|A|F|1994-04-19|1994-05-18|1994-04-20|NONE|RAIL|kages are fluffily furiously ir 99|108338|849|4|36|48467.88|0.09|0.02|A|F|1994-07-04|1994-04-17|1994-07-30|DELIVER IN PERSON|AIR|slyly. slyly e 100|62029|2030|1|28|27748.56|0.04|0.05|N|O|1998-05-08|1998-05-13|1998-06-07|COLLECT COD|TRUCK|sts haggle. slowl 100|115979|8491|2|22|43889.34|0.00|0.07|N|O|1998-06-24|1998-04-12|1998-06-29|DELIVER IN PERSON|SHIP|nto beans alongside of the fi 100|46150|8655|3|46|50422.90|0.03|0.04|N|O|1998-05-02|1998-04-10|1998-05-22|TAKE BACK RETURN|SHIP|ular accounts. even 100|38024|3031|4|14|13468.28|0.06|0.03|N|O|1998-05-22|1998-05-01|1998-06-03|COLLECT COD|MAIL|y. furiously ironic ideas gr 100|53439|955|5|37|51519.91|0.05|0.00|N|O|1998-03-06|1998-04-16|1998-03-31|TAKE BACK RETURN|TRUCK|nd the quickly s 101|118282|5816|1|49|63713.72|0.10|0.00|N|O|1996-06-21|1996-05-27|1996-06-29|DELIVER IN PERSON|REG AIR|ts-- final packages sleep furiousl 101|163334|883|2|36|50303.88|0.00|0.01|N|O|1996-05-19|1996-05-01|1996-06-04|DELIVER IN PERSON|AIR|tes. blithely pending dolphins x-ray f 101|138418|5958|3|12|17476.92|0.06|0.02|N|O|1996-03-29|1996-04-20|1996-04-12|COLLECT COD|MAIL|. quickly regular 102|88914|3931|1|37|70407.67|0.06|0.00|N|O|1997-07-24|1997-08-02|1997-08-07|TAKE BACK RETURN|SHIP|ully across the ideas. final deposit 102|169238|6787|2|34|44445.82|0.03|0.08|N|O|1997-08-09|1997-07-28|1997-08-26|TAKE BACK RETURN|SHIP|eposits cajole across 102|182321|4840|3|25|35083.00|0.01|0.01|N|O|1997-07-31|1997-07-24|1997-08-17|NONE|RAIL|bits. ironic accoun 102|61158|8677|4|15|16787.25|0.07|0.07|N|O|1997-06-02|1997-07-13|1997-06-04|DELIVER IN PERSON|SHIP|final packages. carefully even excu 103|194658|2216|1|6|10515.90|0.03|0.05|N|O|1996-10-11|1996-07-25|1996-10-28|NONE|FOB|cajole. carefully ex 103|10426|2928|2|37|49447.54|0.02|0.07|N|O|1996-09-17|1996-07-27|1996-09-20|TAKE BACK RETURN|MAIL|ies. quickly ironic requests use blithely 103|28431|8432|3|23|31266.89|0.01|0.04|N|O|1996-09-11|1996-09-18|1996-09-26|NONE|FOB|ironic accou 103|29022|4027|4|32|30432.64|0.01|0.07|N|O|1996-07-30|1996-08-06|1996-08-04|NONE|RAIL|kages doze. special, regular deposit 128|106828|9339|1|38|69723.16|0.06|0.01|A|F|1992-09-01|1992-08-27|1992-10-01|TAKE BACK RETURN|FOB| cajole careful 129|2867|5368|1|46|81413.56|0.08|0.02|R|F|1993-02-15|1993-01-24|1993-03-05|COLLECT COD|TRUCK|uietly bold theodolites. fluffil 129|185164|5165|2|36|44969.76|0.01|0.02|A|F|1992-11-25|1992-12-25|1992-12-09|TAKE BACK RETURN|REG AIR|packages are care 129|39444|1948|3|33|45653.52|0.04|0.06|A|F|1993-01-08|1993-02-14|1993-01-29|COLLECT COD|SHIP|sts nag bravely. fluffily 129|135137|164|4|34|39852.42|0.00|0.01|R|F|1993-01-29|1993-02-14|1993-02-10|COLLECT COD|MAIL|quests. express ideas 129|31373|8883|5|24|31304.88|0.06|0.00|A|F|1992-12-07|1993-01-02|1992-12-11|TAKE BACK RETURN|FOB|uests. foxes cajole slyly after the ca 129|77050|4572|6|22|22595.10|0.06|0.01|R|F|1993-02-15|1993-01-31|1993-02-24|COLLECT COD|SHIP|e. fluffily regular 129|168569|3602|7|1|1637.56|0.05|0.04|R|F|1993-01-26|1993-01-08|1993-02-24|DELIVER IN PERSON|FOB|e carefully blithely bold dolp 130|128816|8817|1|14|25827.34|0.08|0.05|A|F|1992-08-15|1992-07-25|1992-09-13|COLLECT COD|RAIL| requests. final instruction 130|1739|4240|2|48|78755.04|0.03|0.02|R|F|1992-07-01|1992-07-12|1992-07-24|NONE|AIR|lithely alongside of the regu 130|11860|1861|3|18|31893.48|0.04|0.08|A|F|1992-07-04|1992-06-14|1992-07-29|DELIVER IN PERSON|MAIL| slyly ironic decoys abou 130|115635|3169|4|13|21458.19|0.09|0.02|R|F|1992-06-26|1992-07-29|1992-07-05|NONE|FOB| pending dolphins sleep furious 130|69130|4143|5|31|34073.03|0.06|0.05|R|F|1992-09-01|1992-07-18|1992-09-02|TAKE BACK RETURN|RAIL|thily about the ruth 131|167505|22|1|45|70762.50|0.10|0.02|R|F|1994-09-14|1994-09-02|1994-10-04|NONE|FOB|ironic, bold accounts. careful 131|44255|9264|2|50|59962.50|0.02|0.04|A|F|1994-09-17|1994-08-10|1994-09-21|NONE|SHIP|ending requests. final, ironic pearls slee 131|189021|1540|3|4|4440.08|0.04|0.03|A|F|1994-09-20|1994-08-30|1994-09-23|COLLECT COD|REG AIR| are carefully slyly i 132|140449|2964|1|18|26809.92|0.00|0.08|R|F|1993-07-10|1993-08-05|1993-07-13|NONE|TRUCK|ges. platelets wake furio 132|119053|9054|2|43|46098.15|0.01|0.08|R|F|1993-09-01|1993-08-16|1993-09-22|NONE|TRUCK|y pending theodolites 132|114419|4420|3|32|45869.12|0.04|0.04|A|F|1993-07-12|1993-08-05|1993-08-05|COLLECT COD|TRUCK|d instructions hagg 132|28082|5589|4|23|23231.84|0.10|0.00|A|F|1993-06-16|1993-08-27|1993-06-23|DELIVER IN PERSON|AIR|refully blithely bold acco 133|103432|5943|1|27|38756.61|0.00|0.02|N|O|1997-12-21|1998-02-23|1997-12-27|TAKE BACK RETURN|MAIL|yly even gifts after the sl 133|176279|3831|2|12|16263.24|0.02|0.06|N|O|1997-12-02|1998-01-15|1997-12-29|DELIVER IN PERSON|REG AIR|ts cajole fluffily quickly i 133|117350|4884|3|29|39653.15|0.09|0.08|N|O|1998-02-28|1998-01-30|1998-03-09|DELIVER IN PERSON|RAIL| the carefully regular theodoli 133|89855|7380|4|11|20293.35|0.06|0.01|N|O|1998-03-21|1998-01-15|1998-04-04|DELIVER IN PERSON|REG AIR|e quickly across the dolphins 134|641|642|1|21|32374.44|0.00|0.03|A|F|1992-07-17|1992-07-08|1992-07-26|COLLECT COD|SHIP|s. quickly regular 134|164645|9678|2|35|59837.40|0.06|0.07|A|F|1992-08-23|1992-06-01|1992-08-24|NONE|MAIL|ajole furiously. instructio 134|188252|3289|3|26|34846.50|0.09|0.06|A|F|1992-06-20|1992-07-12|1992-07-16|NONE|RAIL| among the pending depos 134|144002|4003|4|47|49162.00|0.05|0.00|A|F|1992-08-16|1992-07-06|1992-08-28|NONE|REG AIR|s! carefully unusual requests boost careful 134|35172|5173|5|12|13286.04|0.05|0.02|A|F|1992-07-03|1992-06-01|1992-07-11|COLLECT COD|TRUCK|nts are quic 134|133103|5617|6|12|13633.20|0.00|0.00|A|F|1992-08-08|1992-07-07|1992-08-20|TAKE BACK RETURN|FOB|lyly regular pac 135|108205|8206|1|47|57020.40|0.06|0.08|N|O|1996-02-18|1996-01-01|1996-02-25|COLLECT COD|RAIL|ctions wake slyly abo 135|198344|5902|2|21|30289.14|0.00|0.07|N|O|1996-02-11|1996-01-12|1996-02-13|DELIVER IN PERSON|SHIP| deposits believe. furiously regular p 135|157510|5056|3|33|51727.83|0.02|0.00|N|O|1996-01-03|1995-11-21|1996-02-01|TAKE BACK RETURN|MAIL|ptotes boost slowly care 135|67005|9512|4|34|33048.00|0.02|0.03|N|O|1996-01-12|1996-01-19|1996-02-05|NONE|TRUCK|counts doze against the blithely ironi 135|136248|1275|5|20|25684.80|0.01|0.04|N|O|1996-01-25|1995-11-20|1996-02-09|NONE|MAIL|theodolites. quickly p 135|115000|2534|6|13|13195.00|0.04|0.02|N|O|1995-11-12|1995-12-22|1995-11-17|NONE|FOB|nal ideas. final instr 160|14785|9788|1|36|61192.08|0.07|0.01|N|O|1997-03-11|1997-03-11|1997-03-20|COLLECT COD|MAIL|old, ironic deposits are quickly abov 160|86382|8891|2|22|30104.36|0.00|0.04|N|O|1997-02-18|1997-03-05|1997-03-05|COLLECT COD|RAIL|ncies about the request 160|20080|5085|3|34|34002.72|0.01|0.05|N|O|1997-01-31|1997-03-13|1997-02-14|NONE|FOB|st sleep even gifts. dependencies along 161|102810|341|1|19|34443.39|0.01|0.01|A|F|1994-12-13|1994-11-19|1994-12-26|DELIVER IN PERSON|TRUCK|, regular sheaves sleep along 162|189288|4325|1|2|2754.56|0.02|0.01|N|O|1995-09-02|1995-06-17|1995-09-08|COLLECT COD|FOB|es! final somas integrate 163|167545|5094|1|43|69339.22|0.01|0.00|N|O|1997-09-19|1997-11-19|1997-10-03|COLLECT COD|REG AIR|al, bold dependencies wake. iron 163|120702|703|2|13|22395.10|0.01|0.04|N|O|1997-11-11|1997-10-18|1997-12-07|DELIVER IN PERSON|TRUCK|inal requests. even pinto beans hag 163|36818|9322|3|27|47379.87|0.04|0.08|N|O|1997-12-26|1997-11-28|1998-01-05|COLLECT COD|REG AIR|ously express dependen 163|192642|5162|4|5|8673.20|0.02|0.00|N|O|1997-11-17|1997-10-09|1997-12-05|DELIVER IN PERSON|TRUCK| must belie 163|126090|8603|5|12|13393.08|0.10|0.00|N|O|1997-12-18|1997-10-26|1997-12-22|COLLECT COD|TRUCK|ly blithe accounts cajole 163|190825|5864|6|20|38316.40|0.00|0.07|N|O|1997-09-27|1997-11-15|1997-10-07|TAKE BACK RETURN|FOB|tructions integrate b 164|91309|3819|1|26|33807.80|0.09|0.04|A|F|1993-01-04|1992-11-21|1993-01-07|NONE|RAIL|s. blithely special courts are blithel 164|18488|3491|2|24|33755.52|0.05|0.05|R|F|1992-12-22|1992-11-27|1993-01-06|NONE|AIR|side of the slyly unusual theodolites. f 164|125509|3046|3|38|58311.00|0.03|0.06|R|F|1992-12-04|1992-11-23|1993-01-02|TAKE BACK RETURN|AIR|counts cajole fluffily regular packages. b 164|17526|28|4|32|46192.64|0.05|0.01|R|F|1992-12-21|1992-12-23|1992-12-28|COLLECT COD|RAIL|ts wake again 164|147505|2534|5|43|66757.50|0.06|0.01|R|F|1992-11-26|1993-01-03|1992-12-08|COLLECT COD|RAIL|y carefully regular dep 164|108896|8897|6|27|51432.03|0.10|0.04|R|F|1992-12-23|1993-01-16|1993-01-10|DELIVER IN PERSON|AIR|ayers wake carefully a 164|3037|5538|7|23|21620.69|0.09|0.04|A|F|1992-11-03|1992-12-02|1992-11-12|NONE|REG AIR|ress packages haggle ideas. blithely spec 165|33175|8182|1|3|3324.51|0.01|0.08|R|F|1993-03-29|1993-03-06|1993-04-12|DELIVER IN PERSON|REG AIR|riously requests. depos 165|161627|9176|2|43|72610.66|0.08|0.05|R|F|1993-02-27|1993-04-19|1993-03-03|DELIVER IN PERSON|TRUCK|jole slyly according 165|58520|6036|3|15|22177.80|0.00|0.05|R|F|1993-04-10|1993-03-29|1993-05-01|COLLECT COD|SHIP| bold packages mainta 165|139190|4217|4|49|60230.31|0.07|0.06|A|F|1993-02-20|1993-04-02|1993-03-10|COLLECT COD|REG AIR|uses sleep slyly ruthlessly regular a 165|155084|7600|5|27|30755.16|0.01|0.04|R|F|1993-04-27|1993-03-04|1993-05-13|NONE|MAIL|around the ironic, even orb 166|64888|9901|1|37|68556.56|0.09|0.03|N|O|1995-11-16|1995-10-17|1995-12-13|NONE|MAIL|lar frays wake blithely a 166|166366|6367|2|13|18620.68|0.09|0.05|N|O|1995-11-09|1995-11-18|1995-11-14|COLLECT COD|SHIP|fully above the blithely fina 166|99652|2162|3|41|67717.65|0.07|0.03|N|O|1995-11-13|1995-11-07|1995-12-08|COLLECT COD|FOB|hily along the blithely pending fo 166|45027|7532|4|8|7776.16|0.05|0.02|N|O|1995-12-30|1995-11-29|1996-01-29|DELIVER IN PERSON|RAIL|e carefully bold 167|101171|1172|1|28|32820.76|0.06|0.01|R|F|1993-02-19|1993-02-16|1993-03-03|DELIVER IN PERSON|TRUCK|sly during the u 167|171555|4073|2|27|43916.85|0.09|0.00|R|F|1993-05-01|1993-03-31|1993-05-31|TAKE BACK RETURN|FOB|eans affix furiously-- packages 192|97017|2036|1|23|23322.23|0.00|0.00|N|O|1998-02-05|1998-02-06|1998-03-07|TAKE BACK RETURN|AIR|ly pending theodolites haggle quickly fluf 192|161368|8917|2|20|28587.20|0.07|0.01|N|O|1998-03-13|1998-02-02|1998-03-31|TAKE BACK RETURN|REG AIR|tes. carefu 192|110252|5275|3|15|18933.75|0.09|0.01|N|O|1998-01-30|1998-02-10|1998-02-23|TAKE BACK RETURN|TRUCK|he ironic requests haggle about 192|196400|3958|4|2|2992.80|0.06|0.02|N|O|1998-03-06|1998-02-03|1998-03-24|COLLECT COD|SHIP|s. dependencies nag furiously alongside 192|82915|7932|5|25|47447.75|0.02|0.03|N|O|1998-02-15|1998-01-11|1998-03-17|COLLECT COD|TRUCK|. carefully regular 192|141003|3518|6|45|46980.00|0.00|0.05|N|O|1998-03-11|1998-01-09|1998-04-03|NONE|MAIL|equests. ideas sleep idea 193|92638|5148|1|9|14675.67|0.06|0.06|A|F|1993-09-17|1993-10-08|1993-09-30|COLLECT COD|TRUCK|against the fluffily regular d 193|153954|1500|2|15|30119.25|0.02|0.07|R|F|1993-11-22|1993-10-09|1993-12-05|TAKE BACK RETURN|SHIP|ffily. regular packages d 193|93878|6388|3|23|43053.01|0.06|0.05|A|F|1993-08-21|1993-10-11|1993-09-02|DELIVER IN PERSON|TRUCK|ly even accounts wake blithely bold 194|2594|5095|1|17|25442.03|0.05|0.04|R|F|1992-05-24|1992-05-22|1992-05-30|COLLECT COD|AIR| regular deposi 194|183523|6042|2|1|1606.52|0.04|0.06|R|F|1992-04-30|1992-05-18|1992-05-23|NONE|REG AIR| regular theodolites. regular, iron 194|65994|3513|3|13|25479.87|0.08|0.08|A|F|1992-05-07|1992-06-18|1992-05-10|NONE|AIR|about the blit 194|145146|5147|4|36|42881.04|0.00|0.05|R|F|1992-05-21|1992-05-18|1992-05-27|TAKE BACK RETURN|RAIL|pecial packages wake after the slyly r 194|56176|1187|5|8|9057.36|0.04|0.00|R|F|1992-07-06|1992-06-25|1992-07-11|COLLECT COD|FOB|uriously unusual excuses 194|148984|1499|6|16|32527.68|0.06|0.03|A|F|1992-05-14|1992-06-14|1992-05-21|TAKE BACK RETURN|TRUCK|y regular requests. furious 194|167828|345|7|21|39812.22|0.02|0.01|R|F|1992-05-06|1992-05-20|1992-05-07|COLLECT COD|REG AIR|accounts detect quickly dogged 195|84590|9607|1|6|9447.54|0.04|0.02|A|F|1994-01-09|1994-03-27|1994-01-28|COLLECT COD|REG AIR|y, even deposits haggle carefully. bli 195|93847|1375|2|41|75474.44|0.05|0.07|A|F|1994-02-24|1994-02-11|1994-03-20|NONE|TRUCK|rts detect in place of t 195|85446|7955|3|34|48668.96|0.08|0.08|R|F|1994-01-31|1994-02-11|1994-02-12|NONE|TRUCK| cajole furiously bold i 195|85442|7951|4|41|58525.04|0.06|0.04|R|F|1994-03-14|1994-03-13|1994-04-09|COLLECT COD|RAIL|ggle fluffily foxes. fluffily ironic ex 196|135052|79|1|19|20653.95|0.03|0.02|R|F|1993-04-17|1993-05-27|1993-04-30|NONE|SHIP|sts maintain foxes. furiously regular p 196|9852|2353|2|15|26427.75|0.03|0.04|A|F|1993-07-05|1993-05-08|1993-07-06|TAKE BACK RETURN|SHIP|s accounts. furio 197|98494|1004|1|39|58207.11|0.02|0.04|N|O|1995-07-21|1995-07-01|1995-08-14|TAKE BACK RETURN|AIR|press accounts. daringly sp 197|177103|9621|2|8|9440.80|0.09|0.02|A|F|1995-04-17|1995-07-01|1995-04-27|DELIVER IN PERSON|SHIP|y blithely even deposits. blithely fina 197|155829|8345|3|17|32041.94|0.06|0.02|N|O|1995-08-02|1995-06-23|1995-08-03|COLLECT COD|REG AIR|ts. careful 197|17936|2939|4|25|46348.25|0.04|0.01|N|F|1995-06-13|1995-05-23|1995-06-24|TAKE BACK RETURN|FOB|s-- quickly final accounts 197|41466|3971|5|14|19704.44|0.09|0.01|R|F|1995-05-08|1995-05-24|1995-05-12|TAKE BACK RETURN|RAIL|use slyly slyly silent depo 197|105880|901|6|1|1885.88|0.07|0.05|N|O|1995-07-15|1995-06-21|1995-08-11|COLLECT COD|RAIL| even, thin dependencies sno 198|56061|6062|1|33|33562.98|0.07|0.02|N|O|1998-01-05|1998-03-20|1998-01-10|TAKE BACK RETURN|TRUCK|carefully caref 198|15229|7731|2|20|22884.40|0.03|0.00|N|O|1998-01-15|1998-03-31|1998-01-25|DELIVER IN PERSON|FOB|carefully final escapades a 198|148058|3087|3|15|16590.75|0.04|0.02|N|O|1998-04-12|1998-02-26|1998-04-15|COLLECT COD|MAIL|es. quickly pending deposits s 198|10371|2873|4|35|44847.95|0.08|0.02|N|O|1998-02-27|1998-03-23|1998-03-14|TAKE BACK RETURN|RAIL|ests nod quickly furiously sly pinto be 198|101952|1953|5|33|64480.35|0.02|0.01|N|O|1998-03-22|1998-03-12|1998-04-14|DELIVER IN PERSON|SHIP|ending foxes acr 199|132072|9612|1|50|55203.50|0.02|0.00|N|O|1996-06-12|1996-06-03|1996-07-04|DELIVER IN PERSON|MAIL|essly regular ideas boost sly 199|133998|3999|2|30|60959.70|0.08|0.05|N|O|1996-03-27|1996-05-29|1996-04-14|NONE|TRUCK|ilent packages doze quickly. thinly 224|150111|112|1|16|18577.76|0.04|0.00|A|F|1994-08-01|1994-07-30|1994-08-27|DELIVER IN PERSON|MAIL|y unusual foxes 224|108609|1120|2|34|54998.40|0.04|0.08|R|F|1994-07-13|1994-08-25|1994-07-31|COLLECT COD|TRUCK| carefully. final platelets 224|189967|7522|3|41|84335.36|0.07|0.04|A|F|1994-09-01|1994-09-15|1994-09-02|TAKE BACK RETURN|SHIP|after the furiou 224|166377|1410|4|12|17320.44|0.08|0.06|R|F|1994-10-12|1994-08-29|1994-10-20|DELIVER IN PERSON|MAIL|uriously regular packages. slyly fina 224|93857|8876|5|45|83288.25|0.07|0.07|R|F|1994-08-14|1994-09-02|1994-08-27|COLLECT COD|AIR|leep furiously regular requests. furiousl 224|50010|7526|6|4|3840.04|0.02|0.00|R|F|1994-09-08|1994-08-24|1994-10-04|DELIVER IN PERSON|FOB|tructions 225|171925|1926|1|4|7987.68|0.09|0.07|N|O|1995-08-05|1995-08-19|1995-09-03|TAKE BACK RETURN|SHIP|ng the ironic packages. asymptotes among 225|130565|8105|2|3|4786.68|0.00|0.08|N|O|1995-07-25|1995-07-08|1995-08-17|DELIVER IN PERSON|REG AIR| fluffily about the carefully bold a 225|198212|3251|3|45|58959.45|0.06|0.01|N|O|1995-08-17|1995-08-20|1995-08-30|TAKE BACK RETURN|FOB|the slyly even platelets use aro 225|146071|8586|4|24|26809.68|0.00|0.06|N|O|1995-09-23|1995-08-05|1995-10-16|COLLECT COD|MAIL|ironic accounts are final account 225|7589|5090|5|31|46393.98|0.04|0.06|N|O|1995-06-21|1995-07-24|1995-07-04|TAKE BACK RETURN|FOB|special platelets. quickly r 225|131835|9375|6|12|22401.96|0.00|0.00|A|F|1995-06-04|1995-07-15|1995-06-08|COLLECT COD|MAIL| unusual requests. bus 225|141233|8776|7|44|56066.12|0.10|0.06|N|O|1995-09-22|1995-08-16|1995-10-22|NONE|REG AIR|leep slyly 226|96909|9419|1|4|7623.60|0.00|0.00|R|F|1993-03-31|1993-04-30|1993-04-10|NONE|TRUCK|c foxes integrate carefully against th 226|137802|5342|2|46|84630.80|0.06|0.01|A|F|1993-07-06|1993-04-24|1993-07-13|COLLECT COD|FOB|s. carefully bold accounts cajol 226|37309|4819|3|35|43620.50|0.09|0.03|A|F|1993-03-31|1993-05-18|1993-04-01|NONE|RAIL|osits cajole. final, even foxes a 226|40633|8146|4|45|70813.35|0.10|0.02|R|F|1993-04-17|1993-05-27|1993-05-11|DELIVER IN PERSON|AIR| carefully pending pi 226|117956|5490|5|2|3947.90|0.07|0.02|R|F|1993-03-26|1993-04-13|1993-04-20|TAKE BACK RETURN|SHIP|al platelets. express somas 226|82937|7954|6|48|92156.64|0.02|0.00|A|F|1993-06-11|1993-05-15|1993-06-19|NONE|REG AIR|efully silent packages. final deposit 226|117961|5495|7|14|27705.44|0.09|0.00|R|F|1993-05-20|1993-06-05|1993-05-27|COLLECT COD|MAIL|ep carefully regular accounts. ironic 227|165335|2884|1|19|26606.27|0.05|0.06|N|O|1995-12-10|1996-01-30|1995-12-26|NONE|RAIL|s cajole furiously a 227|174102|1654|2|24|28226.40|0.07|0.07|N|O|1996-02-03|1995-12-24|1996-02-12|COLLECT COD|SHIP|uses across the blithe dependencies cajol 228|4039|6540|1|3|2829.09|0.10|0.08|A|F|1993-05-20|1993-04-08|1993-05-26|DELIVER IN PERSON|SHIP|ckages. sly 229|83580|8597|1|20|31271.60|0.02|0.03|R|F|1994-01-11|1994-01-31|1994-01-26|DELIVER IN PERSON|REG AIR|le. instructions use across the quickly fin 229|128904|8905|2|29|56054.10|0.07|0.00|A|F|1994-03-15|1994-03-02|1994-03-26|COLLECT COD|SHIP|s, final request 229|78526|8527|3|28|42126.56|0.02|0.02|R|F|1994-02-10|1994-02-02|1994-03-10|DELIVER IN PERSON|FOB| final, regular requests. platel 229|176948|1983|4|3|6074.82|0.02|0.08|R|F|1994-03-22|1994-03-24|1994-04-04|DELIVER IN PERSON|REG AIR|posits. furiously regular theodol 229|155180|211|5|33|40760.94|0.03|0.06|R|F|1994-03-25|1994-02-11|1994-04-13|NONE|FOB| deposits; bold, ruthless theodolites 229|105393|7904|6|29|40553.31|0.04|0.00|R|F|1994-01-14|1994-02-16|1994-01-22|NONE|FOB|uriously pending 230|185863|900|1|46|89647.56|0.09|0.00|R|F|1994-02-03|1994-01-15|1994-02-23|TAKE BACK RETURN|SHIP|old packages ha 230|194908|7428|2|6|12017.40|0.03|0.08|A|F|1994-01-26|1994-01-25|1994-02-13|NONE|REG AIR| sleep furiously about the p 230|7367|4868|3|1|1274.36|0.07|0.06|R|F|1994-01-22|1994-01-03|1994-02-05|TAKE BACK RETURN|RAIL|blithely unusual dolphins. bold, ex 230|9164|1665|4|44|47219.04|0.08|0.06|R|F|1994-02-09|1994-01-18|1994-03-11|NONE|MAIL|deposits integrate slyly sile 230|18923|6427|5|8|14735.36|0.09|0.06|R|F|1993-11-03|1994-01-20|1993-11-11|TAKE BACK RETURN|TRUCK|g the instructions. fluffil 230|33927|1437|6|8|14887.36|0.00|0.05|R|F|1993-11-21|1994-01-05|1993-12-19|TAKE BACK RETURN|FOB|nal ideas. silent, reg 231|158356|8357|1|16|22629.60|0.04|0.08|R|F|1994-11-20|1994-10-29|1994-12-17|TAKE BACK RETURN|AIR|e furiously ironic pinto beans. 231|83359|884|2|46|61748.10|0.04|0.05|R|F|1994-12-13|1994-12-02|1994-12-14|DELIVER IN PERSON|SHIP|affix blithely. bold requests among the f 231|198124|644|3|50|61106.00|0.09|0.01|A|F|1994-12-11|1994-12-14|1994-12-13|NONE|RAIL|onic packages haggle fluffily a 231|56760|6761|4|31|53219.56|0.08|0.02|A|F|1994-11-05|1994-12-27|1994-11-30|TAKE BACK RETURN|SHIP|iously special decoys wake q 256|88233|742|1|22|26867.06|0.09|0.02|R|F|1994-01-12|1993-12-28|1994-01-26|COLLECT COD|FOB|ke quickly ironic, ironic deposits. reg 256|118399|3422|2|40|56695.60|0.10|0.01|A|F|1993-11-30|1993-12-13|1993-12-02|NONE|FOB|nal theodolites. deposits cajole s 256|129111|4136|3|45|51304.95|0.02|0.08|R|F|1994-01-14|1994-01-17|1994-02-10|COLLECT COD|SHIP| grouches. ideas wake quickly ar 257|146229|6230|1|7|8926.54|0.05|0.02|N|O|1998-06-18|1998-05-15|1998-06-27|COLLECT COD|FOB|ackages sleep bold realms. f 258|106194|3725|1|8|9601.52|0.00|0.07|R|F|1994-01-20|1994-03-21|1994-02-09|NONE|REG AIR|ully about the fluffily silent dependencies 258|196119|3677|2|40|48604.40|0.10|0.01|A|F|1994-03-13|1994-02-23|1994-04-05|DELIVER IN PERSON|FOB|silent frets nod daringly busy, bold 258|161762|1763|3|45|82069.20|0.07|0.07|R|F|1994-03-04|1994-02-13|1994-03-30|DELIVER IN PERSON|TRUCK|regular excuses-- fluffily ruthl 258|132912|5426|4|31|60292.21|0.02|0.05|A|F|1994-04-20|1994-03-20|1994-04-28|COLLECT COD|REG AIR| slyly blithely special mul 258|35959|8463|5|25|47373.75|0.08|0.02|A|F|1994-04-13|1994-02-26|1994-04-29|TAKE BACK RETURN|TRUCK|leep pending packages. 258|146467|8982|6|36|54484.56|0.09|0.04|A|F|1994-01-11|1994-03-04|1994-01-18|DELIVER IN PERSON|AIR|nic asymptotes. slyly silent r 259|98779|8780|1|14|24888.78|0.00|0.08|A|F|1993-12-17|1993-12-09|1993-12-31|COLLECT COD|SHIP|ons against the express acco 259|161982|4499|2|14|28615.72|0.03|0.05|R|F|1993-11-10|1993-11-20|1993-11-17|DELIVER IN PERSON|FOB|ully even, regul 259|23514|3515|3|42|60375.42|0.09|0.00|R|F|1993-10-20|1993-11-18|1993-11-12|NONE|TRUCK|the slyly ironic pinto beans. fi 259|195335|2893|4|3|4290.99|0.08|0.06|R|F|1993-10-04|1993-11-07|1993-10-14|TAKE BACK RETURN|SHIP|ng slyly at the accounts. 259|192201|7240|5|6|7759.20|0.00|0.05|R|F|1993-12-05|1993-12-22|1993-12-21|COLLECT COD|TRUCK| requests sleep 260|155887|5888|1|50|97144.00|0.07|0.08|N|O|1997-03-24|1997-02-09|1997-04-20|TAKE BACK RETURN|REG AIR|c deposits 260|182736|2737|2|26|47286.98|0.02|0.07|N|O|1996-12-12|1997-02-06|1996-12-15|NONE|TRUCK|ld theodolites boost fl 260|41222|8735|3|27|31406.94|0.05|0.08|N|O|1997-03-23|1997-02-15|1997-04-22|TAKE BACK RETURN|RAIL|ions according to the 260|5337|338|4|29|36027.57|0.10|0.06|N|O|1997-03-15|1997-01-14|1997-04-13|NONE|MAIL|fluffily even asymptotes. express wa 260|95286|305|5|44|56376.32|0.01|0.05|N|O|1997-03-26|1997-02-03|1997-04-19|DELIVER IN PERSON|MAIL|above the blithely ironic instr 261|1349|6350|1|34|42511.56|0.05|0.08|R|F|1993-08-18|1993-09-24|1993-08-20|COLLECT COD|REG AIR|c packages. asymptotes da 261|65662|5663|2|20|32553.20|0.00|0.06|R|F|1993-10-21|1993-08-02|1993-11-04|DELIVER IN PERSON|RAIL|ites hinder 261|173959|8994|3|28|56922.60|0.08|0.03|R|F|1993-07-24|1993-08-20|1993-08-05|COLLECT COD|AIR|ironic packages nag slyly. carefully fin 261|118455|967|4|49|72199.05|0.04|0.05|R|F|1993-09-12|1993-08-31|1993-10-07|COLLECT COD|SHIP|ions. bold accounts 261|60469|7988|5|49|70043.54|0.01|0.08|A|F|1993-09-29|1993-09-08|1993-10-01|COLLECT COD|SHIP| pinto beans haggle slyly furiously pending 261|96989|9499|6|20|39719.60|0.06|0.06|A|F|1993-10-15|1993-09-05|1993-11-07|NONE|AIR|ing to the special, ironic deposi 262|191186|1187|1|39|49810.02|0.01|0.05|N|O|1996-01-15|1996-02-18|1996-01-28|COLLECT COD|RAIL|usual, regular requests 262|60074|7593|2|33|34124.31|0.09|0.03|N|O|1996-03-10|1996-01-31|1996-03-27|TAKE BACK RETURN|AIR|atelets sleep furiously. requests cajole. b 262|58695|6211|3|35|57879.15|0.05|0.08|N|O|1996-03-12|1996-02-14|1996-04-11|COLLECT COD|MAIL|lites cajole along the pending packag 263|23960|6463|1|22|41447.12|0.06|0.08|R|F|1994-08-24|1994-06-20|1994-09-09|NONE|FOB|efully express fo 263|84557|9574|2|9|13873.95|0.08|0.00|A|F|1994-07-21|1994-07-16|1994-08-08|TAKE BACK RETURN|TRUCK|lms wake bl 263|142891|434|3|50|96694.50|0.06|0.04|R|F|1994-08-18|1994-07-31|1994-08-22|NONE|TRUCK|re the packages. special 288|50641|8157|1|31|49340.84|0.00|0.03|N|O|1997-03-17|1997-04-28|1997-04-06|TAKE BACK RETURN|AIR|instructions wa 288|116386|8898|2|49|68716.62|0.08|0.05|N|O|1997-04-19|1997-05-19|1997-05-18|TAKE BACK RETURN|TRUCK|ic excuses sleep always spe 288|98833|8834|3|36|65945.88|0.02|0.02|N|O|1997-02-22|1997-05-07|1997-03-07|TAKE BACK RETURN|TRUCK|yly pending excu 288|78406|8407|4|19|26303.60|0.07|0.07|N|O|1997-03-14|1997-04-04|1997-03-26|NONE|MAIL|deposits. blithely quick courts ar 288|161894|6927|5|31|60632.59|0.10|0.04|N|O|1997-05-29|1997-04-24|1997-06-20|TAKE BACK RETURN|RAIL|ns. fluffily 289|173280|832|1|25|33832.00|0.07|0.05|N|O|1997-03-18|1997-05-05|1997-04-15|DELIVER IN PERSON|FOB|out the quickly bold theodol 289|111800|9334|2|6|10870.80|0.06|0.05|N|O|1997-02-18|1997-05-08|1997-03-19|DELIVER IN PERSON|SHIP|d packages use fluffily furiously 289|16996|1999|3|44|84171.56|0.10|0.08|N|O|1997-06-05|1997-04-20|1997-07-02|COLLECT COD|MAIL|ly ironic foxes. asymptotes 289|39439|1943|4|48|66164.64|0.01|0.08|N|O|1997-03-14|1997-03-30|1997-03-24|DELIVER IN PERSON|RAIL|sits cajole. bold pinto beans x-ray fl 289|46285|8790|5|13|16006.64|0.10|0.03|N|O|1997-06-08|1997-04-06|1997-06-18|TAKE BACK RETURN|REG AIR|ts. quickly bold deposits alongside 290|5351|352|1|35|43972.25|0.01|0.02|R|F|1994-04-01|1994-02-05|1994-04-27|NONE|MAIL|ove the final foxes detect slyly fluffily 290|128923|1436|2|2|3903.84|0.05|0.04|A|F|1994-01-30|1994-02-13|1994-02-21|TAKE BACK RETURN|TRUCK|. permanently furious reques 290|1888|4389|3|5|8949.40|0.03|0.05|A|F|1994-01-19|1994-02-24|1994-01-27|NONE|MAIL|ans integrate. requests sleep. fur 290|123741|6254|4|23|40589.02|0.05|0.08|R|F|1994-03-14|1994-02-21|1994-04-09|NONE|AIR|refully unusual packages. 291|122565|102|1|21|33338.76|0.05|0.07|A|F|1994-05-26|1994-05-10|1994-06-23|COLLECT COD|TRUCK|y quickly regular theodolites. final t 291|137316|7317|2|19|25712.89|0.08|0.02|R|F|1994-06-14|1994-04-25|1994-06-19|NONE|REG AIR|e. ruthlessly final accounts after the 291|60874|5887|3|30|55046.10|0.10|0.02|R|F|1994-03-22|1994-04-30|1994-03-24|DELIVER IN PERSON|FOB| fluffily regular deposits. quickl 292|153561|3562|1|8|12916.48|0.10|0.03|R|F|1992-02-18|1992-03-30|1992-03-18|DELIVER IN PERSON|RAIL|sily bold deposits alongside of the ex 292|99249|9250|2|24|29957.76|0.08|0.04|R|F|1992-03-24|1992-03-06|1992-04-20|COLLECT COD|TRUCK| bold, pending theodolites u 293|8960|6461|1|14|26165.44|0.02|0.05|R|F|1992-10-19|1992-12-23|1992-11-10|DELIVER IN PERSON|SHIP|es. packages above the 293|186406|6407|2|11|16416.40|0.10|0.04|R|F|1992-12-24|1992-12-01|1993-01-12|COLLECT COD|MAIL| affix carefully quickly special idea 293|117267|4801|3|13|16695.38|0.04|0.02|A|F|1992-12-17|1992-12-26|1992-12-22|COLLECT COD|RAIL| wake after the quickly even deposits. bli 294|59620|7136|1|31|48968.22|0.00|0.01|R|F|1993-08-06|1993-08-19|1993-08-13|TAKE BACK RETURN|AIR|le fluffily along the quick 295|197507|27|1|29|46530.50|0.02|0.07|A|F|1994-11-09|1994-12-08|1994-12-07|COLLECT COD|MAIL|inst the carefully ironic pinto beans. blit 295|91344|8872|2|26|34718.84|0.04|0.03|R|F|1994-12-13|1994-11-30|1995-01-06|DELIVER IN PERSON|AIR|ts above the slyly regular requests x-ray q 295|15283|7785|3|8|9586.24|0.10|0.07|R|F|1995-01-13|1994-11-17|1995-01-25|NONE|TRUCK| final instructions h 295|60621|3128|4|26|41122.12|0.10|0.04|A|F|1995-01-12|1994-11-22|1995-01-22|DELIVER IN PERSON|MAIL| carefully iron 320|4415|1916|1|30|39582.30|0.05|0.01|N|O|1997-12-04|1998-01-21|1997-12-13|NONE|RAIL| ironic, final accounts wake quick de 320|192158|4678|2|13|16251.95|0.03|0.00|N|O|1997-12-16|1997-12-26|1997-12-17|TAKE BACK RETURN|AIR|he furiously regular pinto beans. car 321|318|7819|1|21|25584.51|0.01|0.08|A|F|1993-07-18|1993-04-24|1993-08-13|TAKE BACK RETURN|REG AIR|hockey players sleep slyly sl 321|140433|5462|2|41|60410.63|0.08|0.07|R|F|1993-06-21|1993-06-07|1993-07-09|NONE|REG AIR|special packages shall have to doze blit 322|152499|7530|1|12|18617.88|0.08|0.07|A|F|1992-06-29|1992-05-30|1992-07-11|NONE|AIR|ular theodolites promise qu 322|43662|3663|2|48|77071.68|0.02|0.07|A|F|1992-06-11|1992-06-16|1992-06-26|COLLECT COD|RAIL|dolites detect qu 322|12673|177|3|20|31713.40|0.04|0.01|R|F|1992-04-26|1992-05-04|1992-05-22|DELIVER IN PERSON|MAIL|ckly toward 322|183246|5765|4|10|13292.40|0.06|0.03|R|F|1992-04-12|1992-05-13|1992-04-14|DELIVER IN PERSON|AIR| deposits grow slyly according to th 322|11605|9109|5|35|53081.00|0.07|0.06|A|F|1992-07-17|1992-05-03|1992-08-14|TAKE BACK RETURN|RAIL|egular accounts cajole carefully. even d 322|33310|8317|6|3|3729.93|0.08|0.05|A|F|1992-07-03|1992-05-10|1992-07-28|NONE|AIR|ending, ironic deposits along the blith 322|37435|4945|7|5|6862.15|0.01|0.02|A|F|1992-04-15|1992-05-12|1992-04-26|COLLECT COD|REG AIR| special grouches sleep quickly instructio 323|163628|1177|1|50|84581.00|0.05|0.04|A|F|1994-04-20|1994-04-25|1994-05-12|DELIVER IN PERSON|REG AIR|cial requests 323|95136|7646|2|18|20360.34|0.06|0.07|R|F|1994-04-13|1994-06-02|1994-05-10|DELIVER IN PERSON|TRUCK|posits cajole furiously pinto beans. 323|142725|2726|3|9|15909.48|0.07|0.04|A|F|1994-06-26|1994-06-10|1994-07-13|COLLECT COD|TRUCK|nic accounts. regular, regular pack 324|199475|4514|1|26|40936.22|0.07|0.01|R|F|1992-04-19|1992-05-28|1992-05-12|DELIVER IN PERSON|RAIL|ross the slyly regular s 325|158791|6337|1|34|62892.86|0.09|0.04|A|F|1993-10-28|1993-12-13|1993-11-17|TAKE BACK RETURN|MAIL|ly bold deposits. always iron 325|185139|7658|2|5|6120.65|0.07|0.08|A|F|1994-01-02|1994-01-05|1994-01-04|TAKE BACK RETURN|MAIL| theodolites. 325|18788|1290|3|35|59737.30|0.07|0.07|A|F|1993-12-06|1994-01-03|1993-12-26|DELIVER IN PERSON|REG AIR|packages wa 326|179094|4129|1|41|48096.69|0.06|0.03|N|O|1995-08-30|1995-07-09|1995-09-12|DELIVER IN PERSON|TRUCK|ily quickly bold ideas. 326|19480|1982|2|38|53180.24|0.02|0.08|N|O|1995-09-12|1995-08-23|1995-09-14|COLLECT COD|RAIL|es sleep slyly. carefully regular inst 326|183739|8776|3|25|45568.25|0.03|0.04|N|O|1995-08-03|1995-07-27|1995-08-16|NONE|AIR|ily furiously unusual accounts. 326|84836|9853|4|5|9104.15|0.03|0.08|N|O|1995-07-29|1995-07-13|1995-08-12|NONE|REG AIR|deas sleep according to the sometimes spe 326|34543|9550|5|31|45803.74|0.04|0.08|N|O|1995-09-27|1995-07-06|1995-10-22|NONE|TRUCK|cies sleep quick 326|156712|4258|6|41|72517.11|0.02|0.00|N|O|1995-07-05|1995-07-23|1995-07-20|TAKE BACK RETURN|REG AIR|to beans wake before the furiously re 326|42134|4639|7|47|50578.11|0.04|0.04|N|O|1995-09-16|1995-07-04|1995-10-04|NONE|REG AIR| special accounts sleep 327|143503|1046|1|16|24744.00|0.03|0.01|N|O|1995-07-05|1995-06-07|1995-07-09|TAKE BACK RETURN|TRUCK|cial ideas sleep af 327|41715|4220|2|9|14910.39|0.09|0.05|A|F|1995-05-24|1995-07-11|1995-06-05|NONE|AIR| asymptotes are fu 352|63762|3763|1|17|29337.92|0.07|0.05|R|F|1994-06-02|1994-05-31|1994-06-29|NONE|FOB|pending deposits sleep furiously 353|119305|4328|1|41|54296.30|0.00|0.06|A|F|1994-03-25|1994-03-31|1994-03-30|DELIVER IN PERSON|AIR|refully final theodoli 353|147542|7543|2|29|46096.66|0.09|0.00|A|F|1994-01-11|1994-03-19|1994-02-09|COLLECT COD|FOB|ctions impr 353|134318|1858|3|12|16227.72|0.06|0.01|R|F|1994-01-02|1994-03-26|1994-01-19|DELIVER IN PERSON|RAIL|g deposits cajole 353|77071|2086|4|46|48211.22|0.00|0.04|A|F|1994-04-14|1994-01-31|1994-05-05|DELIVER IN PERSON|FOB| ironic dolphins 353|116803|1826|5|9|16378.20|0.02|0.02|A|F|1994-03-15|1994-03-20|1994-03-18|TAKE BACK RETURN|RAIL|ual accounts! carefu 353|102699|2700|6|39|66365.91|0.02|0.05|A|F|1994-01-15|1994-03-30|1994-02-01|NONE|MAIL|losely quickly even accounts. c 354|49480|1985|1|14|20012.72|0.08|0.04|N|O|1996-04-12|1996-06-03|1996-05-08|NONE|SHIP|quickly regular grouches will eat. careful 354|193864|1422|2|24|46988.64|0.01|0.01|N|O|1996-05-08|1996-05-17|1996-06-07|DELIVER IN PERSON|AIR|y silent requests. regular, even accounts 354|58125|8126|3|50|54156.00|0.08|0.05|N|O|1996-03-21|1996-05-20|1996-04-04|COLLECT COD|TRUCK|to beans s 354|106672|4203|4|7|11750.69|0.06|0.01|N|O|1996-05-07|1996-04-18|1996-05-24|NONE|MAIL|ously idly ironic accounts-- quickl 354|30527|528|5|18|26235.36|0.04|0.08|N|O|1996-03-31|1996-05-13|1996-04-27|DELIVER IN PERSON|RAIL| about the carefully unusual 354|61082|3589|6|36|37550.88|0.03|0.02|N|O|1996-03-19|1996-05-29|1996-03-30|NONE|AIR|onic requests thrash bold g 354|4660|9661|7|14|21905.24|0.01|0.07|N|O|1996-07-06|1996-06-08|1996-07-10|TAKE BACK RETURN|MAIL|t thinly above the ironic, 355|113959|8982|1|31|61161.45|0.09|0.07|A|F|1994-07-13|1994-08-18|1994-07-18|DELIVER IN PERSON|FOB|y unusual, ironic 355|96030|3558|2|41|42067.23|0.05|0.00|A|F|1994-08-15|1994-07-19|1994-09-06|DELIVER IN PERSON|TRUCK| deposits. carefully r 356|45214|5215|1|4|4636.84|0.10|0.01|A|F|1994-07-28|1994-08-01|1994-08-04|DELIVER IN PERSON|REG AIR| the dependencies nod unusual, final ac 356|107463|2484|2|48|70582.08|0.02|0.03|R|F|1994-08-12|1994-07-31|1994-08-26|NONE|FOB|unusual packages. furiously 356|118002|514|3|35|35700.00|0.08|0.07|R|F|1994-10-14|1994-07-31|1994-10-23|COLLECT COD|TRUCK|s. unusual, final 356|55342|353|4|41|53190.94|0.07|0.05|A|F|1994-09-28|1994-09-20|1994-10-07|COLLECT COD|SHIP| according to the express foxes will 356|124271|1808|5|37|47924.99|0.05|0.03|A|F|1994-07-15|1994-08-24|1994-08-09|DELIVER IN PERSON|FOB|ndencies are since the packag 357|113143|3144|1|26|30059.64|0.06|0.03|N|O|1996-12-28|1996-11-26|1997-01-13|NONE|FOB| carefully pending accounts use a 357|185814|8333|2|36|68393.16|0.07|0.06|N|O|1996-12-28|1996-11-13|1997-01-24|DELIVER IN PERSON|AIR|d the carefully even requests. 357|164807|9840|3|32|59897.60|0.05|0.07|N|O|1997-01-28|1996-12-29|1997-02-14|NONE|MAIL|y above the carefully final accounts 358|190028|2548|1|41|45838.82|0.06|0.01|A|F|1993-11-18|1993-11-14|1993-11-28|NONE|TRUCK|ely frets. furious deposits sleep 358|189955|7510|2|32|65438.40|0.05|0.08|A|F|1993-10-18|1993-12-12|1993-10-31|NONE|TRUCK|y final foxes sleep blithely sl 358|168710|3743|3|40|71148.40|0.09|0.01|A|F|1993-12-05|1993-11-04|1994-01-01|COLLECT COD|MAIL|ng the ironic theo 358|96557|1576|4|15|23303.25|0.08|0.08|A|F|1993-10-04|1993-12-17|1993-10-23|TAKE BACK RETURN|MAIL|out the blithely ironic deposits slee 358|28629|6136|5|18|28037.16|0.01|0.02|R|F|1993-10-07|1993-11-01|1993-10-26|COLLECT COD|SHIP|olphins haggle ironic accounts. f 358|161283|1284|6|32|43016.96|0.03|0.05|R|F|1993-12-21|1993-11-06|1994-01-17|DELIVER IN PERSON|RAIL|lyly express deposits 358|82916|7933|7|45|85450.95|0.05|0.02|A|F|1993-12-08|1993-10-29|1993-12-30|NONE|REG AIR|to beans. regular, unusual deposits sl 359|165980|5981|1|30|61379.40|0.00|0.08|A|F|1995-01-06|1995-02-20|1995-01-20|TAKE BACK RETURN|AIR|uses detect spec 359|11158|6161|2|18|19244.70|0.00|0.03|A|F|1995-01-27|1995-03-18|1995-01-31|DELIVER IN PERSON|RAIL|unusual warthogs. ironically sp 359|131463|3977|3|17|25405.82|0.07|0.06|A|F|1995-01-31|1995-03-18|1995-02-10|COLLECT COD|SHIP|sts according to the blithely 359|89985|2494|4|38|75049.24|0.10|0.08|R|F|1995-03-30|1995-01-20|1995-04-25|DELIVER IN PERSON|RAIL|g furiously. regular, sile 359|167239|2272|5|11|14368.53|0.01|0.03|A|F|1995-02-15|1995-01-27|1995-02-18|NONE|FOB|rets wake blithely. slyly final dep 359|182663|218|6|23|40150.18|0.04|0.07|R|F|1995-01-31|1995-03-11|1995-02-16|DELIVER IN PERSON|REG AIR|ic courts snooze quickly furiously final fo 384|178442|3477|1|38|57776.72|0.07|0.01|R|F|1992-06-02|1992-04-18|1992-06-10|DELIVER IN PERSON|TRUCK|totes cajole blithely against the even 384|63342|5849|2|49|63961.66|0.09|0.07|A|F|1992-04-01|1992-04-25|1992-04-18|COLLECT COD|AIR|refully carefully ironic instructions. bl 384|181502|6539|3|11|17418.50|0.02|0.08|A|F|1992-04-02|1992-04-21|1992-04-15|COLLECT COD|MAIL|ash carefully 384|92053|7072|4|11|11495.55|0.00|0.06|R|F|1992-06-24|1992-05-29|1992-07-22|COLLECT COD|TRUCK|nic excuses are furiously above the blith 384|131403|8943|5|14|20081.60|0.08|0.06|R|F|1992-06-14|1992-05-29|1992-07-05|DELIVER IN PERSON|TRUCK|ckages are slyly after the slyly specia 385|166446|8963|1|7|10587.08|0.05|0.06|N|O|1996-05-23|1996-05-09|1996-06-06|DELIVER IN PERSON|REG AIR| special asymptote 385|53025|8036|2|46|44988.92|0.08|0.07|N|O|1996-03-29|1996-05-17|1996-04-18|NONE|REG AIR|lthily ironic f 386|152405|9951|1|39|56838.60|0.10|0.07|A|F|1995-05-10|1995-02-28|1995-05-25|NONE|SHIP|hely. carefully regular accounts hag 386|68123|5642|2|16|17457.92|0.06|0.01|A|F|1995-04-12|1995-04-18|1995-05-11|DELIVER IN PERSON|MAIL|lithely fluffi 386|130081|82|3|37|41109.96|0.09|0.04|A|F|1995-05-23|1995-03-01|1995-05-25|TAKE BACK RETURN|MAIL|ending pearls breach fluffily. slyly pen 387|136667|1694|1|1|1703.66|0.08|0.03|N|O|1997-05-06|1997-04-23|1997-05-10|NONE|SHIP| pinto beans wake furiously carefu 387|152800|2801|2|42|77817.60|0.07|0.05|N|O|1997-05-25|1997-02-25|1997-05-29|DELIVER IN PERSON|RAIL|lithely final theodolites. 387|96392|1411|3|40|55535.60|0.09|0.02|N|O|1997-03-08|1997-04-18|1997-03-31|COLLECT COD|TRUCK| quickly ironic platelets are slyly. fluff 387|55927|5928|4|19|35775.48|0.08|0.00|N|O|1997-03-14|1997-04-21|1997-04-04|NONE|REG AIR|gular dependencies 387|148313|828|5|32|43561.92|0.08|0.06|N|O|1997-05-02|1997-04-11|1997-05-11|DELIVER IN PERSON|TRUCK|gle. silent, fur 388|32590|100|1|42|63948.78|0.05|0.06|R|F|1993-02-21|1993-02-26|1993-03-15|COLLECT COD|FOB|accounts sleep furiously 388|127808|7809|2|46|84446.80|0.07|0.01|A|F|1993-03-22|1993-01-26|1993-03-24|COLLECT COD|FOB|to beans nag about the careful reque 388|64486|9499|3|40|58019.20|0.06|0.01|A|F|1992-12-24|1993-01-28|1993-01-19|TAKE BACK RETURN|REG AIR|quests against the carefully unusual epi 389|189295|1814|1|2|2768.58|0.09|0.00|R|F|1994-04-13|1994-04-10|1994-04-25|TAKE BACK RETURN|RAIL|fts. courts eat blithely even dependenc 390|106523|9034|1|10|15295.20|0.02|0.05|N|O|1998-05-26|1998-07-06|1998-06-23|TAKE BACK RETURN|SHIP| requests. final accounts x-ray beside the 390|123353|890|2|17|23397.95|0.09|0.06|N|O|1998-06-07|1998-06-14|1998-07-07|COLLECT COD|SHIP|ending, pending pinto beans wake slyl 390|183266|8303|3|46|62065.96|0.07|0.04|N|O|1998-06-06|1998-05-20|1998-06-14|DELIVER IN PERSON|SHIP|cial excuses. bold, pending packages 390|141937|1938|4|42|83115.06|0.01|0.05|N|O|1998-06-06|1998-06-22|1998-07-05|COLLECT COD|SHIP|counts nag across the sly, sil 390|127657|170|5|13|21900.45|0.02|0.06|N|O|1998-07-08|1998-05-10|1998-07-18|DELIVER IN PERSON|SHIP|sleep carefully idle packages. blithely 390|124632|9657|6|11|18222.93|0.09|0.06|N|O|1998-05-05|1998-05-15|1998-06-01|DELIVER IN PERSON|SHIP|according to the foxes are furiously 390|84937|2462|7|24|46126.32|0.05|0.02|N|O|1998-04-18|1998-05-19|1998-04-28|TAKE BACK RETURN|AIR|y. enticingly final depos 391|121586|6611|1|14|22506.12|0.09|0.02|R|F|1995-02-11|1995-02-03|1995-02-13|TAKE BACK RETURN|TRUCK| escapades sleep furiously about 416|93563|6073|1|25|38914.00|0.00|0.05|A|F|1993-10-11|1993-11-26|1993-10-21|DELIVER IN PERSON|TRUCK|y final theodolites about 416|110869|8403|2|22|41356.92|0.10|0.00|R|F|1993-12-27|1993-12-17|1994-01-09|COLLECT COD|RAIL|rint blithely above the pending sentim 416|174101|6619|3|25|29377.50|0.07|0.01|R|F|1993-10-16|1993-12-03|1993-10-29|NONE|AIR|ses boost after the bold requests. 417|39560|9561|1|39|58482.84|0.01|0.02|A|F|1994-05-31|1994-05-02|1994-06-06|NONE|SHIP|y regular requests wake along 417|69212|4225|2|18|21261.78|0.00|0.01|R|F|1994-03-29|1994-04-10|1994-04-26|TAKE BACK RETURN|FOB|- final requests sle 417|44192|6697|3|41|46583.79|0.10|0.01|R|F|1994-04-11|1994-03-08|1994-05-06|COLLECT COD|RAIL|tes. regular requests across the 417|131087|1088|4|2|2236.16|0.01|0.03|R|F|1994-02-13|1994-04-19|1994-03-15|DELIVER IN PERSON|SHIP|uriously bol 418|18552|1054|1|31|45587.05|0.00|0.03|N|F|1995-06-05|1995-06-18|1995-06-26|COLLECT COD|FOB|final theodolites. fluffil 418|1062|3563|2|1|963.06|0.04|0.07|N|O|1995-06-23|1995-06-16|1995-07-23|DELIVER IN PERSON|AIR|regular, silent pinto 418|34829|7333|3|3|5291.46|0.04|0.06|N|O|1995-06-29|1995-07-12|1995-07-01|COLLECT COD|AIR|ly furiously regular w 419|152691|7722|1|33|57541.77|0.05|0.02|N|O|1996-11-06|1996-12-25|1996-11-20|TAKE BACK RETURN|TRUCK|y above the bli 419|64192|9205|2|32|36998.08|0.01|0.06|N|O|1996-12-04|1996-12-04|1996-12-24|COLLECT COD|SHIP|blithely regular requests. special pinto 419|70495|3003|3|15|21982.35|0.07|0.04|N|O|1996-12-17|1996-11-28|1996-12-19|TAKE BACK RETURN|REG AIR| sleep final, regular theodolites. fluffi 419|8756|6257|4|15|24971.25|0.01|0.02|N|O|1997-01-09|1996-12-22|1997-01-25|COLLECT COD|FOB|of the careful, thin theodolites. quickly s 419|148401|3430|5|17|24639.80|0.01|0.00|N|O|1997-01-13|1996-12-20|1997-02-01|COLLECT COD|REG AIR|lar dependencies: carefully regu 420|100885|5906|1|5|9429.40|0.04|0.03|N|O|1995-11-04|1996-01-02|1995-11-30|NONE|REG AIR|cajole blit 420|161079|8628|2|22|25081.54|0.05|0.04|N|O|1996-01-25|1995-12-16|1996-02-03|TAKE BACK RETURN|AIR|ly against the blithely re 420|47557|2566|3|45|67704.75|0.09|0.08|N|O|1996-01-14|1996-01-01|1996-01-26|COLLECT COD|FOB| final accounts. furiously express forges 420|74795|4796|4|12|21237.48|0.08|0.08|N|O|1996-02-05|1996-01-03|1996-02-12|TAKE BACK RETURN|REG AIR|c instructions are 420|72918|7933|5|37|69963.67|0.02|0.00|N|O|1995-11-16|1995-12-13|1995-11-19|DELIVER IN PERSON|SHIP|rbits. bold requests along the quickl 420|123736|1273|6|40|70389.20|0.01|0.05|N|O|1995-11-26|1995-12-26|1995-12-20|TAKE BACK RETURN|FOB| after the special 420|15978|5979|7|39|73864.83|0.00|0.08|N|O|1995-12-09|1995-12-16|1995-12-31|DELIVER IN PERSON|REG AIR|s. ironic waters about the car 421|133070|3071|1|1|1103.07|0.02|0.07|R|F|1992-05-29|1992-04-27|1992-06-09|NONE|TRUCK|oldly busy deposit 422|151816|4332|1|25|46695.25|0.10|0.07|N|O|1997-07-01|1997-08-17|1997-07-09|DELIVER IN PERSON|SHIP|carefully bold theodolit 422|170666|3184|2|10|17366.60|0.02|0.03|N|O|1997-06-15|1997-08-04|1997-07-08|TAKE BACK RETURN|AIR|he furiously ironic theodolite 422|175984|3536|3|46|94759.08|0.09|0.00|N|O|1997-06-21|1997-07-14|1997-06-27|DELIVER IN PERSON|RAIL| ideas. qu 422|161622|9171|4|25|42090.50|0.10|0.04|N|O|1997-08-24|1997-07-09|1997-09-22|NONE|FOB|ep along the furiousl 423|131890|6917|1|27|51891.03|0.06|0.03|N|O|1996-08-20|1996-08-01|1996-08-23|TAKE BACK RETURN|SHIP|ccounts. blithely regular pack 448|125197|5198|1|4|4888.76|0.00|0.04|N|O|1995-11-25|1995-10-20|1995-11-26|TAKE BACK RETURN|MAIL|nts thrash quickly among the b 448|172359|9911|2|46|65842.10|0.05|0.00|N|O|1995-08-31|1995-09-30|1995-09-09|COLLECT COD|SHIP| to the fluffily ironic packages. 448|26809|1814|3|35|60753.00|0.10|0.08|N|O|1995-09-27|1995-11-19|1995-10-20|COLLECT COD|REG AIR|ses nag quickly quickly ir 448|169045|9046|4|8|8912.32|0.10|0.00|N|O|1995-11-02|1995-10-16|1995-11-15|COLLECT COD|TRUCK|ounts wake blithely. furiously pending 448|137283|7284|5|23|30366.44|0.02|0.05|N|O|1995-09-26|1995-11-02|1995-10-17|NONE|SHIP|ious, final gifts 449|151908|6939|1|12|23518.80|0.02|0.08|N|O|1995-11-06|1995-08-25|1995-11-18|TAKE BACK RETURN|SHIP|ly. blithely ironic 449|108408|5939|2|4|5665.60|0.10|0.06|N|O|1995-10-27|1995-09-14|1995-11-21|DELIVER IN PERSON|FOB|are fluffily. requests are furiously 449|9982|9983|3|3|5675.94|0.07|0.08|N|O|1995-07-28|1995-09-11|1995-08-01|NONE|RAIL| bold deposits. express theodolites haggle 449|157659|2690|4|22|37766.30|0.07|0.00|N|O|1995-08-17|1995-09-04|1995-09-10|COLLECT COD|FOB|furiously final theodolites eat careful 450|161582|9131|1|42|69030.36|0.03|0.00|N|F|1995-06-07|1995-05-29|1995-06-23|TAKE BACK RETURN|SHIP|y asymptotes. regular depen 450|106298|6299|2|5|6521.45|0.03|0.02|A|F|1995-04-02|1995-05-06|1995-04-13|TAKE BACK RETURN|TRUCK|the pinto bea 450|142528|7557|3|32|50256.64|0.06|0.03|N|O|1995-07-02|1995-04-25|1995-07-30|TAKE BACK RETURN|SHIP| accounts nod fluffily even, pending 450|56267|3783|4|40|48930.40|0.05|0.03|R|F|1995-03-20|1995-05-25|1995-04-14|NONE|RAIL|ve. asymptote 450|78048|8049|5|2|2052.08|0.09|0.00|A|F|1995-03-11|1995-05-21|1995-03-16|COLLECT COD|AIR|y even pinto beans; qui 450|152726|5242|6|33|58697.76|0.08|0.05|R|F|1995-05-18|1995-05-22|1995-05-23|TAKE BACK RETURN|REG AIR|ily carefully final depo 451|129532|4557|1|36|56215.08|0.02|0.06|N|O|1998-06-18|1998-08-14|1998-06-20|TAKE BACK RETURN|AIR|rges can haggle carefully ironic, dogged 451|32028|7035|2|42|40320.84|0.05|0.01|N|O|1998-08-01|1998-08-05|1998-08-30|DELIVER IN PERSON|TRUCK|express excuses. blithely ironic pin 451|86136|6137|3|1|1122.13|0.07|0.05|N|O|1998-07-13|1998-07-03|1998-08-04|DELIVER IN PERSON|AIR| carefully ironic packages solve furiously 451|76558|4080|4|28|42967.40|0.04|0.05|N|O|1998-06-16|1998-07-09|1998-06-17|DELIVER IN PERSON|SHIP| theodolites. even cou 452|114639|4640|1|2|3307.26|0.04|0.03|N|O|1997-12-26|1998-01-03|1998-01-12|COLLECT COD|FOB|y express instru 453|197917|2956|1|45|90670.95|0.01|0.00|N|O|1997-06-30|1997-08-20|1997-07-19|COLLECT COD|REG AIR|ifts wake carefully. 453|175131|2683|2|38|45832.94|0.08|0.04|N|O|1997-06-30|1997-07-08|1997-07-16|DELIVER IN PERSON|REG AIR| furiously f 453|13144|8147|3|38|40171.32|0.10|0.01|N|O|1997-08-10|1997-07-24|1997-09-07|NONE|SHIP|sts cajole. furiously un 453|95748|5749|4|45|78468.30|0.10|0.01|N|O|1997-09-18|1997-06-29|1997-10-14|TAKE BACK RETURN|AIR|ironic foxes. slyly pending depos 453|25722|8225|5|32|52727.04|0.04|0.01|N|O|1997-07-15|1997-06-27|1997-07-18|NONE|REG AIR|s. fluffily bold packages cajole. unu 453|94318|6828|6|28|36744.68|0.07|0.07|N|O|1997-08-16|1997-08-12|1997-08-27|NONE|MAIL|final dependencies. slyly special pl 454|117595|5129|1|24|38702.16|0.06|0.01|N|O|1996-04-26|1996-03-23|1996-05-20|NONE|TRUCK|le. deposits after the ideas nag unusual pa 455|156485|4031|1|42|64742.16|0.10|0.02|N|O|1997-01-26|1997-01-10|1997-02-22|DELIVER IN PERSON|REG AIR|around the quickly blit 455|27230|7231|2|44|50918.12|0.05|0.08|N|O|1997-01-17|1997-02-22|1997-02-12|TAKE BACK RETURN|TRUCK| accounts sleep slyly ironic asymptote 455|48360|3369|3|45|58876.20|0.04|0.06|N|O|1996-12-20|1997-01-31|1997-01-07|TAKE BACK RETURN|SHIP|thrash ironically regular packages. qui 455|170012|7564|4|11|11902.11|0.01|0.02|N|O|1997-03-15|1997-02-14|1997-03-26|DELIVER IN PERSON|MAIL|g deposits against the slyly idle foxes u 480|52148|2149|1|22|24203.08|0.04|0.02|A|F|1993-06-16|1993-07-28|1993-07-09|NONE|MAIL|into beans cajole furiously. accounts s 481|18649|6153|1|17|26649.88|0.07|0.05|A|F|1992-10-21|1992-12-09|1992-11-19|DELIVER IN PERSON|MAIL|. quickly final accounts among the 481|20646|647|2|19|29766.16|0.08|0.01|R|F|1993-01-09|1992-11-27|1993-01-14|TAKE BACK RETURN|AIR|p blithely after t 481|185785|5786|3|42|78572.76|0.08|0.08|A|F|1992-11-27|1992-11-11|1992-12-08|COLLECT COD|RAIL|mptotes are furiously among the iron 481|81009|6026|4|11|10890.00|0.05|0.06|A|F|1993-01-12|1992-11-17|1993-02-05|NONE|FOB|eful attai 481|111956|6979|5|31|61006.45|0.05|0.01|A|F|1993-01-15|1992-12-31|1993-01-21|DELIVER IN PERSON|AIR|usly final packages believe. quick 482|137343|7344|1|32|44170.88|0.00|0.02|N|O|1996-05-22|1996-05-14|1996-05-29|NONE|SHIP|usual deposits affix against 482|121382|8919|2|1|1403.38|0.05|0.08|N|O|1996-05-29|1996-05-20|1996-05-31|COLLECT COD|AIR|es. quickly ironic escapades sleep furious 482|61141|6154|3|31|34166.34|0.04|0.03|N|O|1996-06-01|1996-05-06|1996-06-17|NONE|MAIL| blithe pin 482|195826|5827|4|8|15374.56|0.02|0.05|N|O|1996-04-19|1996-05-05|1996-04-21|NONE|TRUCK|tructions near the final, regular ideas de 482|38215|3222|5|46|53047.66|0.01|0.06|N|O|1996-07-19|1996-06-05|1996-08-10|NONE|MAIL|furiously thin realms. final, fina 482|78696|8697|6|19|31819.11|0.04|0.00|N|O|1996-03-27|1996-04-25|1996-04-15|NONE|FOB|ts hinder carefully silent requests 483|32694|5198|1|8|13013.52|0.00|0.08|N|O|1995-08-22|1995-08-23|1995-09-18|COLLECT COD|RAIL|osits. carefully fin 483|79758|9759|2|23|39968.25|0.04|0.06|N|O|1995-07-20|1995-08-11|1995-08-04|DELIVER IN PERSON|MAIL|requests was quickly against th 483|87745|254|3|9|15594.66|0.04|0.03|N|O|1995-09-10|1995-09-02|1995-09-13|NONE|AIR| carefully express ins 484|30133|5140|1|49|52093.37|0.10|0.02|N|O|1997-03-06|1997-02-28|1997-03-23|COLLECT COD|TRUCK|ven accounts 484|31950|9460|2|45|84687.75|0.06|0.07|N|O|1997-04-09|1997-03-20|1997-04-19|DELIVER IN PERSON|TRUCK|usly final excuses boost slyly blithe 484|183351|5870|3|50|71717.50|0.06|0.05|N|O|1997-01-24|1997-03-27|1997-02-22|DELIVER IN PERSON|MAIL|uctions wake. final, silent requests haggle 484|164805|4806|4|22|41135.60|0.07|0.03|N|O|1997-04-29|1997-03-26|1997-05-17|TAKE BACK RETURN|SHIP|es are pending instructions. furiously unu 484|76308|1323|5|48|61646.40|0.00|0.05|N|O|1997-03-05|1997-02-08|1997-03-22|TAKE BACK RETURN|MAIL|l, bold packages? even mult 484|96871|9381|6|10|18678.70|0.01|0.08|N|O|1997-04-06|1997-02-14|1997-04-16|COLLECT COD|FOB|x fluffily carefully regular 485|149523|9524|1|50|78626.00|0.01|0.00|N|O|1997-03-28|1997-05-26|1997-04-18|TAKE BACK RETURN|MAIL|iously quick excuses. carefully final f 485|27973|2978|2|40|76038.80|0.08|0.01|N|O|1997-04-29|1997-05-08|1997-04-30|TAKE BACK RETURN|TRUCK|al escapades 485|136884|4424|3|22|42259.36|0.00|0.05|N|O|1997-04-06|1997-04-27|1997-05-01|DELIVER IN PERSON|TRUCK|refully final notornis haggle according 486|75437|5438|1|36|50847.48|0.00|0.01|N|O|1996-06-25|1996-05-06|1996-07-07|COLLECT COD|AIR|deposits around the quickly regular packa 486|67040|7041|2|40|40281.60|0.03|0.08|N|O|1996-05-21|1996-06-06|1996-06-07|COLLECT COD|SHIP|ts nag quickly among the slyl 486|135912|8426|3|26|50645.66|0.04|0.03|N|O|1996-03-16|1996-05-25|1996-03-31|NONE|RAIL|forges along the 486|71865|6880|4|38|69800.68|0.08|0.05|N|O|1996-05-07|1996-04-26|1996-05-26|TAKE BACK RETURN|TRUCK| blithely final pinto 486|28099|5606|5|3|3081.27|0.07|0.05|N|O|1996-07-07|1996-04-20|1996-07-23|DELIVER IN PERSON|RAIL|ccounts ha 486|46543|9048|6|46|68518.84|0.00|0.03|N|O|1996-04-18|1996-05-02|1996-04-20|COLLECT COD|AIR|theodolites eat carefully furious 487|91896|1897|1|47|88730.83|0.06|0.06|R|F|1992-09-30|1992-10-08|1992-10-24|NONE|TRUCK|tions. blithely reg 487|82099|2100|2|2|2162.18|0.02|0.06|R|F|1992-10-19|1992-11-04|1992-11-11|COLLECT COD|TRUCK|oss the unusual pinto beans. reg 512|188804|1323|1|19|35963.20|0.08|0.05|N|O|1995-07-12|1995-07-11|1995-08-04|COLLECT COD|MAIL| sleep. requests alongside of the fluff 512|22847|7852|2|37|65484.08|0.01|0.04|N|O|1995-06-20|1995-07-05|1995-07-16|NONE|RAIL|nic depths cajole? blithely b 512|179419|9420|3|40|59936.40|0.05|0.02|N|O|1995-07-06|1995-07-08|1995-07-08|COLLECT COD|TRUCK|quests are da 512|82470|7487|4|10|14524.70|0.09|0.02|N|O|1995-09-16|1995-07-29|1995-10-07|NONE|AIR|xes. pinto beans cajole carefully; 512|64154|4155|5|6|6708.90|0.03|0.05|R|F|1995-06-10|1995-06-21|1995-06-16|DELIVER IN PERSON|FOB|en ideas haggle 512|32014|4518|6|12|11352.12|0.04|0.00|R|F|1995-05-21|1995-08-03|1995-06-09|COLLECT COD|FOB|old furiously express deposits. specia 512|50769|3275|7|2|3439.52|0.09|0.08|N|O|1995-06-19|1995-08-13|1995-06-24|NONE|TRUCK|e slyly silent accounts serve with 513|61732|9251|1|20|33874.60|0.09|0.07|N|O|1995-07-12|1995-05-31|1995-07-31|NONE|AIR|efully ironic ideas doze slyl 513|121628|9165|2|44|72583.28|0.01|0.01|N|O|1995-07-14|1995-07-14|1995-08-12|NONE|MAIL|kages sleep boldly ironic theodolites. acco 514|78713|1221|1|21|35525.91|0.06|0.02|N|O|1996-06-09|1996-05-15|1996-07-07|DELIVER IN PERSON|RAIL|s sleep quickly blithely 514|117452|9964|2|34|49961.30|0.08|0.02|N|O|1996-04-14|1996-06-03|1996-04-23|COLLECT COD|REG AIR|ily even patterns. bold, silent instruc 514|12812|5314|3|6|10348.86|0.06|0.01|N|O|1996-05-30|1996-06-04|1996-06-28|COLLECT COD|SHIP|as haggle blithely; quickly s 514|115362|5363|4|43|59226.48|0.00|0.08|N|O|1996-06-07|1996-05-14|1996-07-01|TAKE BACK RETURN|FOB|thely regular 515|104014|6525|1|10|10180.10|0.03|0.02|A|F|1993-10-04|1993-11-03|1993-10-08|NONE|FOB|ar deposits th 515|147605|2634|2|38|62798.80|0.10|0.07|A|F|1993-09-19|1993-11-12|1993-10-03|DELIVER IN PERSON|SHIP|ays. furiously express requests haggle furi 515|182145|9700|3|11|13498.54|0.00|0.02|R|F|1993-09-04|1993-10-02|1993-09-05|DELIVER IN PERSON|FOB|ly pending accounts haggle blithel 515|108606|8607|4|34|54896.40|0.09|0.03|R|F|1993-10-03|1993-10-26|1993-10-15|DELIVER IN PERSON|REG AIR|ic dependencie 515|130881|3395|5|32|61180.16|0.01|0.07|R|F|1993-10-10|1993-10-08|1993-11-02|TAKE BACK RETURN|FOB|r sauternes boost. final theodolites wake a 515|108692|3713|6|25|42517.25|0.04|0.08|R|F|1993-11-14|1993-11-07|1993-12-03|DELIVER IN PERSON|MAIL|e packages engag 516|24974|9979|1|11|20888.67|0.01|0.06|N|O|1998-05-02|1998-05-23|1998-05-12|DELIVER IN PERSON|FOB|ongside of the blithely final reque 517|44551|4552|1|28|41875.40|0.03|0.02|N|O|1997-04-30|1997-05-18|1997-05-17|COLLECT COD|MAIL| requests. special, fi 517|155391|7907|2|15|21695.85|0.02|0.00|N|O|1997-04-09|1997-06-26|1997-05-01|NONE|TRUCK| slyly. express requests ar 517|40932|3437|3|9|16856.37|0.04|0.00|N|O|1997-05-03|1997-06-16|1997-05-24|COLLECT COD|SHIP| slyly stealthily express instructions. 517|132197|2198|4|11|13521.09|0.06|0.02|N|O|1997-06-20|1997-06-01|1997-06-27|NONE|REG AIR|ly throughout the fu 517|23349|8354|5|23|29263.82|0.00|0.01|N|O|1997-04-19|1997-05-07|1997-05-12|COLLECT COD|RAIL| kindle. furiously bold requests mus 518|164711|4712|1|30|53271.30|0.07|0.05|N|O|1998-02-18|1998-03-27|1998-03-16|COLLECT COD|TRUCK|slyly by the packages. carefull 518|83164|689|2|23|26384.68|0.05|0.07|N|O|1998-02-20|1998-05-05|1998-03-11|COLLECT COD|TRUCK| special requests. fluffily ironic re 518|133178|8205|3|12|14534.04|0.01|0.06|N|O|1998-03-08|1998-03-31|1998-04-06|NONE|AIR| packages thrash slyly 518|121990|1991|4|46|92551.54|0.07|0.02|N|O|1998-04-07|1998-04-17|1998-04-29|NONE|MAIL|. blithely even ideas cajole furiously. b 518|70019|20|5|16|15824.16|0.01|0.01|N|O|1998-03-15|1998-03-24|1998-04-08|NONE|MAIL|use quickly expre 518|196358|1397|6|39|56719.65|0.09|0.08|N|O|1998-02-26|1998-03-17|1998-03-21|DELIVER IN PERSON|FOB| the bold, special deposits are carefully 518|185956|8475|7|48|98013.60|0.03|0.07|N|O|1998-03-06|1998-04-22|1998-03-14|NONE|FOB| slyly final platelets; quickly even deposi 519|158970|4001|1|1|2028.97|0.07|0.07|N|O|1997-12-01|1998-01-26|1997-12-23|COLLECT COD|REG AIR|bold requests believe furiou 519|2946|2947|2|38|70259.72|0.05|0.08|N|O|1998-02-19|1997-12-15|1998-03-19|DELIVER IN PERSON|FOB|gular excuses detect quickly furiously 519|105900|921|3|19|36212.10|0.00|0.02|N|O|1998-01-09|1998-01-03|1998-02-06|COLLECT COD|AIR|asymptotes. p 519|46267|3780|4|27|32758.02|0.08|0.06|N|O|1997-11-20|1997-12-06|1997-12-16|DELIVER IN PERSON|REG AIR|le. even, final dependencies 519|9041|4042|5|13|12350.52|0.06|0.08|N|O|1998-02-06|1997-12-02|1998-03-03|TAKE BACK RETURN|TRUCK|c accounts wake along the ironic so 519|150926|5957|6|3|5930.76|0.04|0.00|N|O|1998-02-01|1998-01-25|1998-02-27|TAKE BACK RETURN|FOB|erve blithely blithely ironic asymp 544|138474|8475|1|47|71086.09|0.08|0.06|R|F|1993-03-14|1993-03-27|1993-03-27|COLLECT COD|SHIP|ecial pains. deposits grow foxes. 545|169547|9548|1|4|6466.16|0.02|0.00|N|O|1996-02-23|1995-12-16|1996-03-21|DELIVER IN PERSON|FOB|, ironic grouches cajole over 545|170188|5223|2|18|22647.24|0.00|0.00|N|O|1996-02-21|1996-01-17|1996-02-26|NONE|RAIL|al, final packages affix. even a 546|84585|2110|1|16|25113.28|0.08|0.02|N|O|1997-02-04|1996-12-30|1997-02-25|DELIVER IN PERSON|TRUCK|de of the orbits. sometimes regula 547|70789|5804|1|44|77430.32|0.08|0.08|N|O|1996-10-18|1996-08-17|1996-10-27|TAKE BACK RETURN|FOB|thely express dependencies. qu 547|136347|1374|2|48|66400.32|0.01|0.04|N|O|1996-10-21|1996-08-04|1996-11-20|COLLECT COD|SHIP|thely specia 547|181345|6382|3|3|4279.02|0.05|0.02|N|O|1996-09-04|1996-08-01|1996-09-21|COLLECT COD|SHIP|pinto beans. ironi 548|196550|6551|1|2|3293.10|0.06|0.05|A|F|1994-11-26|1994-11-06|1994-12-06|COLLECT COD|MAIL|ests haggle quickly eve 548|4641|4642|2|6|9273.84|0.00|0.08|A|F|1995-01-18|1994-12-08|1995-02-10|NONE|TRUCK|sits wake furiously regular 548|182|7683|3|21|22725.78|0.03|0.08|A|F|1995-01-13|1994-12-18|1995-01-25|NONE|AIR|ideas. special accounts above the furiou 548|56720|4236|4|21|35211.12|0.08|0.03|A|F|1994-10-27|1994-12-04|1994-11-21|DELIVER IN PERSON|AIR| engage quickly. regular theo 548|92995|523|5|19|37771.81|0.00|0.02|A|F|1994-09-24|1994-11-24|1994-10-01|DELIVER IN PERSON|MAIL|courts boost care 548|152753|7784|6|32|57784.00|0.06|0.04|A|F|1994-12-16|1994-11-20|1994-12-29|NONE|REG AIR|c instruction 549|195061|100|1|18|20809.08|0.07|0.04|R|F|1992-10-19|1992-08-12|1992-11-13|COLLECT COD|REG AIR|furiously according to the ironic, regular 549|188735|8736|2|38|69301.74|0.07|0.05|A|F|1992-08-17|1992-08-28|1992-09-05|COLLECT COD|RAIL|the regular, furious excuses. carefu 549|65213|5214|3|36|42415.56|0.08|0.04|R|F|1992-09-11|1992-10-11|1992-09-12|DELIVER IN PERSON|AIR|ts against the ironic, even theodolites eng 549|20101|7608|4|18|18379.80|0.09|0.01|A|F|1992-07-31|1992-09-11|1992-08-08|NONE|RAIL|ely regular accounts above the 549|23480|987|5|38|53332.24|0.06|0.02|R|F|1992-08-23|1992-08-12|1992-08-25|COLLECT COD|REG AIR|eposits. carefully regular depos 550|190307|2827|1|31|43316.30|0.04|0.02|N|O|1995-10-24|1995-09-27|1995-11-04|COLLECT COD|AIR|thely silent packages. unusual 551|23786|6289|1|8|13678.24|0.08|0.02|N|O|1995-07-29|1995-07-18|1995-08-02|NONE|REG AIR| wake quickly slyly pending platel 551|158813|3844|2|20|37436.20|0.00|0.07|N|O|1995-09-18|1995-08-25|1995-10-11|COLLECT COD|TRUCK|r ideas. final, even ideas hinder alongside 551|161089|6122|3|16|18401.28|0.07|0.06|N|O|1995-07-29|1995-08-19|1995-08-10|COLLECT COD|MAIL|y along the carefully ex 576|86490|1507|1|2|2952.98|0.07|0.01|N|O|1997-05-15|1997-06-30|1997-05-28|NONE|RAIL|ccounts along the ac 576|33096|8103|2|6|6174.54|0.06|0.05|N|O|1997-05-15|1997-07-26|1997-06-03|DELIVER IN PERSON|TRUCK|al deposits. slyly even sauternes a 576|36565|9069|3|6|9009.36|0.08|0.07|N|O|1997-08-28|1997-06-16|1997-09-25|DELIVER IN PERSON|FOB|ts. ironic multipliers 576|137608|2635|4|5|8228.00|0.03|0.07|N|O|1997-06-11|1997-06-17|1997-07-05|NONE|REG AIR|l foxes boost slyly. accounts af 577|25886|891|1|25|45297.00|0.06|0.01|A|F|1995-04-09|1995-02-20|1995-05-09|TAKE BACK RETURN|AIR|ve slyly of the frets. careful 577|63233|8246|2|14|16747.22|0.08|0.03|R|F|1995-03-19|1995-02-25|1995-04-09|DELIVER IN PERSON|RAIL|l accounts wake deposits. ironic packa 578|155542|5543|1|40|63901.60|0.02|0.08|N|O|1997-02-10|1997-03-18|1997-02-11|NONE|SHIP|usly even platel 578|187025|2062|2|23|25576.46|0.05|0.08|N|O|1997-03-06|1997-03-03|1997-03-20|TAKE BACK RETURN|FOB|nstructions. ironic deposits 579|150618|5649|1|9|15017.49|0.00|0.05|N|O|1998-06-20|1998-04-28|1998-07-19|DELIVER IN PERSON|RAIL|e ironic, express deposits are furiously 579|32145|7152|2|39|42008.46|0.02|0.01|N|O|1998-06-21|1998-06-03|1998-06-26|COLLECT COD|REG AIR|ncies. furiously final r 579|59048|4059|3|6|6042.24|0.03|0.00|N|O|1998-04-24|1998-05-03|1998-05-08|TAKE BACK RETURN|TRUCK|ickly final requests-- bold accou 579|6189|8690|4|41|44902.38|0.04|0.05|N|O|1998-05-28|1998-05-01|1998-06-04|COLLECT COD|REG AIR|bold, express requests sublate slyly. blith 579|12612|5114|5|28|42689.08|0.00|0.03|N|O|1998-07-10|1998-05-24|1998-07-19|NONE|RAIL|ic ideas until th 579|166717|9234|6|5|8918.55|0.05|0.08|N|O|1998-05-02|1998-04-25|1998-05-05|COLLECT COD|REG AIR|refully silent ideas cajole furious 580|84916|2441|1|33|62730.03|0.03|0.05|N|O|1997-10-11|1997-09-19|1997-10-16|TAKE BACK RETURN|FOB|y express theodolites cajole carefully 580|173320|3321|2|31|43192.92|0.04|0.08|N|O|1997-10-04|1997-09-08|1997-10-15|COLLECT COD|FOB|ose alongside of the sl 580|184444|6963|3|19|29040.36|0.04|0.04|N|O|1997-07-23|1997-09-21|1997-08-15|NONE|FOB|mong the special packag 581|63384|8397|1|41|55242.58|0.09|0.07|N|O|1997-05-26|1997-04-06|1997-06-10|TAKE BACK RETURN|MAIL|nts. quickly 581|92527|5037|2|14|21273.28|0.06|0.08|N|O|1997-05-17|1997-04-14|1997-06-08|NONE|MAIL|. deposits s 581|100106|5127|3|49|54198.90|0.10|0.02|N|O|1997-02-27|1997-04-24|1997-03-10|TAKE BACK RETURN|MAIL|. slyly regular pinto beans acr 581|74925|9940|4|30|56997.60|0.10|0.08|N|O|1997-06-19|1997-05-21|1997-06-22|TAKE BACK RETURN|TRUCK| regular ideas grow furio 582|56409|3925|1|7|9557.80|0.07|0.00|N|O|1997-11-16|1997-11-29|1997-12-10|TAKE BACK RETURN|FOB|ithely unusual t 582|50262|263|2|49|59400.74|0.05|0.03|N|O|1997-12-17|1998-01-12|1997-12-31|COLLECT COD|REG AIR|nts according to the furiously regular pin 582|140309|5338|3|42|56670.60|0.07|0.00|N|O|1997-11-15|1997-12-21|1997-12-03|COLLECT COD|SHIP|iously beside the silent de 582|167750|7751|4|36|65439.00|0.06|0.01|N|O|1997-12-09|1997-11-27|1997-12-26|TAKE BACK RETURN|SHIP|lar requests. quickly 583|144364|4365|1|1|1408.36|0.07|0.07|N|O|1997-06-17|1997-04-29|1997-06-28|NONE|TRUCK| regular, regular ideas. even, bra 583|119625|2137|2|47|77297.14|0.10|0.06|N|O|1997-07-14|1997-05-12|1997-08-11|DELIVER IN PERSON|AIR|nts are fluffily. furiously even re 583|129431|1944|3|34|49654.62|0.01|0.02|N|O|1997-05-11|1997-04-24|1997-06-03|DELIVER IN PERSON|MAIL|express req 583|141250|8793|4|33|42611.25|0.10|0.01|N|O|1997-05-28|1997-04-25|1997-06-24|NONE|AIR|kages cajole slyly across the 583|188537|6092|5|13|21131.89|0.04|0.06|N|O|1997-06-23|1997-05-29|1997-07-08|COLLECT COD|TRUCK|y sly theodolites. ironi 608|153579|1125|1|19|31018.83|0.08|0.06|N|O|1996-04-19|1996-05-02|1996-05-03|DELIVER IN PERSON|RAIL|ideas. the 608|197310|2349|2|40|56292.40|0.03|0.01|N|O|1996-05-21|1996-04-11|1996-06-02|NONE|AIR| alongside of the regular tithes. sly 609|65533|8040|1|21|31469.13|0.01|0.05|R|F|1994-08-24|1994-08-23|1994-08-27|DELIVER IN PERSON|FOB|de of the special warthogs. excu 610|110792|5815|1|49|88336.71|0.10|0.07|N|O|1995-08-29|1995-10-26|1995-09-12|TAKE BACK RETURN|SHIP|ular instruc 610|67896|5415|2|11|20502.79|0.07|0.08|N|O|1995-10-31|1995-10-25|1995-11-18|NONE|MAIL|blithely final 610|117617|7618|3|26|42499.86|0.09|0.04|N|O|1995-11-22|1995-09-09|1995-12-04|TAKE BACK RETURN|AIR|cross the furiously even theodolites sl 610|185206|7725|4|17|21950.40|0.03|0.03|N|O|1995-11-01|1995-10-30|1995-11-04|COLLECT COD|FOB|p quickly instead of the slyly pending foxe 610|145743|5744|5|39|69760.86|0.08|0.05|N|O|1995-10-30|1995-10-21|1995-11-11|TAKE BACK RETURN|REG AIR|counts. ironic warhorses are 610|94365|6875|6|5|6796.80|0.00|0.07|N|O|1995-08-11|1995-10-22|1995-08-26|TAKE BACK RETURN|FOB|n pinto beans. iro 610|189280|4317|7|27|36970.56|0.06|0.03|N|O|1995-09-02|1995-09-19|1995-09-15|NONE|REG AIR| ironic pinto beans haggle. blithe 611|16855|4359|1|39|69102.15|0.05|0.06|R|F|1993-05-06|1993-04-09|1993-05-22|TAKE BACK RETURN|SHIP|nto beans 611|80676|677|2|1|1656.67|0.08|0.07|R|F|1993-05-17|1993-02-26|1993-06-15|DELIVER IN PERSON|MAIL|ts. pending platelets aff 611|119545|2057|3|39|61017.06|0.09|0.02|A|F|1993-03-10|1993-03-10|1993-03-17|TAKE BACK RETURN|TRUCK|the evenly bold requests. furious 612|184959|9996|1|5|10219.75|0.07|0.00|R|F|1992-11-08|1992-11-20|1992-12-03|TAKE BACK RETURN|RAIL|structions. q 612|194864|7384|2|28|54848.08|0.07|0.06|R|F|1993-01-02|1992-12-11|1993-01-30|DELIVER IN PERSON|TRUCK|regular instructions affix bl 612|66130|1143|3|49|53710.37|0.00|0.08|A|F|1993-01-08|1992-11-25|1993-01-17|TAKE BACK RETURN|REG AIR|theodolite 612|38942|1446|4|28|52666.32|0.05|0.00|A|F|1992-11-12|1992-12-05|1992-12-02|TAKE BACK RETURN|REG AIR|lyly regular asym 612|87737|246|5|1|1724.73|0.08|0.04|R|F|1992-12-18|1992-12-13|1992-12-20|TAKE BACK RETURN|FOB| requests. 612|188203|5758|6|33|42609.60|0.10|0.03|R|F|1992-11-30|1992-12-01|1992-12-12|COLLECT COD|MAIL|bove the blithely even ideas. careful 613|90027|7555|1|17|17289.34|0.06|0.06|N|O|1995-09-23|1995-08-04|1995-10-15|NONE|SHIP|ar dependencie 613|78348|5870|2|6|7958.04|0.05|0.05|N|O|1995-08-05|1995-08-09|1995-08-08|TAKE BACK RETURN|MAIL|y ironic deposits eat 613|185016|7535|3|3|3303.03|0.03|0.01|N|O|1995-09-27|1995-09-11|1995-10-05|NONE|TRUCK|ccounts cajole. 613|158304|8305|4|7|9536.10|0.02|0.04|N|O|1995-09-07|1995-08-02|1995-09-16|DELIVER IN PERSON|MAIL|ously blithely final pinto beans. regula 614|194109|9148|1|21|25265.10|0.00|0.03|R|F|1993-03-29|1993-01-06|1993-04-16|TAKE BACK RETURN|TRUCK|arefully. slyly express packag 614|186897|9416|2|48|95226.72|0.07|0.07|A|F|1993-03-09|1993-01-19|1993-03-19|DELIVER IN PERSON|SHIP|riously special excuses haggle along the 614|166963|4512|3|43|87288.28|0.05|0.00|A|F|1993-03-07|1993-02-22|1993-03-18|DELIVER IN PERSON|SHIP| express accounts wake. slyly ironic ins 614|146951|4494|4|14|27971.30|0.04|0.06|A|F|1992-12-03|1993-02-14|1992-12-27|DELIVER IN PERSON|SHIP|ular packages haggle about the pack 614|195308|7828|5|30|42099.00|0.08|0.07|R|F|1993-01-16|1993-02-08|1993-02-12|TAKE BACK RETURN|FOB|tructions are f 614|136241|1268|6|48|61307.52|0.04|0.08|A|F|1992-12-14|1993-01-22|1993-01-11|NONE|TRUCK| regular platelets cajole quickly eve 615|104545|4546|1|36|55783.44|0.10|0.01|A|F|1992-06-01|1992-07-14|1992-06-27|NONE|FOB| packages. carefully final pinto bea 640|92997|525|1|49|97509.51|0.09|0.02|R|F|1993-03-27|1993-04-17|1993-04-15|NONE|RAIL|s haggle slyly 640|416|2917|2|40|52656.40|0.09|0.05|A|F|1993-05-11|1993-04-11|1993-05-15|COLLECT COD|TRUCK|oach according to the bol 640|179475|7027|3|22|34198.34|0.05|0.07|A|F|1993-05-07|1993-04-14|1993-05-21|TAKE BACK RETURN|TRUCK|osits across the slyly regular theodo 640|31474|1475|4|45|63246.15|0.07|0.07|R|F|1993-04-15|1993-04-23|1993-04-21|DELIVER IN PERSON|REG AIR|ong the qui 641|125192|2729|1|18|21909.42|0.01|0.08|R|F|1993-10-17|1993-10-11|1993-10-29|DELIVER IN PERSON|AIR|p blithely bold packages. quick 641|99477|1987|2|1|1476.47|0.09|0.02|R|F|1993-12-03|1993-10-28|1993-12-26|TAKE BACK RETURN|RAIL| nag across the regular foxes. 641|94311|6821|3|40|52212.40|0.05|0.06|R|F|1993-11-22|1993-10-20|1993-12-11|DELIVER IN PERSON|REG AIR|lets. furiously regular requests cajo 641|70043|5058|4|25|25326.00|0.03|0.02|A|F|1993-12-04|1993-11-18|1993-12-18|TAKE BACK RETURN|FOB|d, regular d 641|3794|8795|5|41|69609.39|0.07|0.04|R|F|1993-11-29|1993-10-27|1993-12-04|TAKE BACK RETURN|FOB| asymptotes are quickly. bol 642|53624|3625|1|26|41018.12|0.10|0.03|A|F|1994-04-16|1994-02-01|1994-04-27|COLLECT COD|REG AIR|quests according to the unu 643|12260|9764|1|28|32823.28|0.00|0.08|A|F|1995-04-13|1995-05-12|1995-04-14|TAKE BACK RETURN|TRUCK|ly regular requests nag sly 643|50168|169|2|48|53671.68|0.01|0.02|N|O|1995-07-10|1995-06-07|1995-08-01|NONE|FOB|ly ironic accounts 643|162447|4964|3|23|34717.12|0.05|0.03|N|O|1995-07-09|1995-05-18|1995-07-31|COLLECT COD|RAIL|sits are carefully according to the e 643|44743|2256|4|39|65821.86|0.08|0.04|A|F|1995-06-08|1995-06-16|1995-06-13|COLLECT COD|RAIL| the pains. carefully s 643|189459|4496|5|47|72777.15|0.10|0.03|R|F|1995-04-05|1995-06-14|1995-04-26|DELIVER IN PERSON|RAIL|y against 644|133143|5657|1|46|54102.44|0.02|0.01|A|F|1992-05-20|1992-06-14|1992-06-14|DELIVER IN PERSON|RAIL| special requests was sometimes expre 644|129821|7358|2|11|20359.02|0.05|0.02|A|F|1992-08-20|1992-07-21|1992-09-11|TAKE BACK RETURN|TRUCK|ealthy pinto beans use carefu 644|100047|5068|3|44|46069.76|0.04|0.04|R|F|1992-08-17|1992-07-26|1992-08-20|COLLECT COD|REG AIR|iously ironic pinto beans. bold packa 644|79744|7266|4|7|12066.18|0.01|0.02|A|F|1992-05-18|1992-07-01|1992-06-07|COLLECT COD|RAIL| regular requests are blithely. slyly 644|49295|9296|5|23|28618.67|0.02|0.04|R|F|1992-07-31|1992-07-28|1992-08-13|DELIVER IN PERSON|TRUCK|uctions nag quickly alongside of t 644|84932|2457|6|33|63258.69|0.00|0.07|R|F|1992-08-26|1992-07-27|1992-08-28|NONE|AIR|ages sleep. bold, bo 644|50239|2745|7|38|45190.74|0.08|0.06|R|F|1992-05-17|1992-07-10|1992-06-06|TAKE BACK RETURN|MAIL| packages. blithely slow accounts nag quic 645|159694|2210|1|33|57871.77|0.01|0.02|A|F|1994-12-09|1995-02-21|1995-01-03|NONE|TRUCK|heodolites b 645|169422|9423|2|47|70096.74|0.07|0.05|R|F|1995-02-16|1995-02-15|1995-02-25|COLLECT COD|TRUCK|hely regular instructions alon 645|69227|4240|3|46|55026.12|0.10|0.01|A|F|1995-01-04|1995-02-21|1995-01-21|COLLECT COD|REG AIR| regular dependencies across the speci 645|95402|421|4|49|68472.60|0.05|0.03|R|F|1995-01-24|1995-01-06|1995-02-17|NONE|TRUCK|y. slyly iron 645|4703|7204|5|43|69131.10|0.06|0.02|A|F|1995-02-12|1995-02-27|1995-03-06|TAKE BACK RETURN|REG AIR| furiously accounts. slyly 645|33631|8638|6|18|28163.34|0.10|0.08|A|F|1995-03-02|1995-02-08|1995-03-03|COLLECT COD|RAIL|ep. slyly even 645|27031|7032|7|9|8622.27|0.03|0.03|A|F|1994-12-25|1995-01-04|1995-01-15|COLLECT COD|REG AIR|special deposits. regular, final th 646|108975|6506|1|31|61503.07|0.00|0.05|R|F|1994-12-17|1995-02-16|1995-01-04|COLLECT COD|MAIL|ag furiousl 646|126723|6724|2|1|1749.72|0.07|0.01|A|F|1994-12-05|1995-01-07|1994-12-31|TAKE BACK RETURN|MAIL|t blithely regular deposits. quic 646|29744|4749|3|24|40169.76|0.06|0.02|A|F|1995-02-20|1994-12-30|1995-03-16|TAKE BACK RETURN|TRUCK|regular accounts haggle dog 646|98738|3757|4|34|59048.82|0.01|0.00|R|F|1994-12-28|1994-12-27|1994-12-31|COLLECT COD|SHIP|slow accounts. fluffily idle instructions 646|89173|4190|5|17|19756.89|0.04|0.01|A|F|1994-12-31|1994-12-26|1995-01-01|DELIVER IN PERSON|REG AIR|inal packages haggle carefully 646|114481|9504|6|40|59819.20|0.10|0.01|R|F|1995-01-01|1995-01-13|1995-01-11|COLLECT COD|TRUCK|ronic packages sleep across th 647|16310|8812|1|41|50278.71|0.08|0.08|N|O|1997-11-19|1997-09-24|1997-12-15|COLLECT COD|REG AIR|r instructions. quickly unusu 647|112177|7200|2|5|5945.85|0.10|0.00|N|O|1997-09-25|1997-09-22|1997-10-25|TAKE BACK RETURN|AIR|ly express packages haggle caref 647|152882|7913|3|15|29023.20|0.08|0.00|N|O|1997-09-23|1997-10-09|1997-10-21|NONE|MAIL|ve the even, bold foxes sleep 672|172190|2191|1|41|51749.79|0.06|0.06|R|F|1994-06-20|1994-07-03|1994-06-22|COLLECT COD|REG AIR| dependencies in 672|189656|9657|2|9|15710.85|0.03|0.04|R|F|1994-06-25|1994-06-06|1994-07-19|TAKE BACK RETURN|TRUCK|haggle carefully carefully reg 672|142390|9933|3|35|50133.65|0.02|0.01|R|F|1994-07-13|1994-06-04|1994-07-14|COLLECT COD|RAIL| dependencies haggle quickly. theo 673|70495|5510|1|22|32240.78|0.03|0.02|R|F|1994-03-15|1994-04-27|1994-03-29|TAKE BACK RETURN|TRUCK| the regular, even requests. carefully fin 674|101366|3877|1|23|31449.28|0.06|0.07|A|F|1992-10-25|1992-10-15|1992-11-03|COLLECT COD|SHIP|ve the quickly even deposits. blithe 674|58285|3296|2|4|4973.12|0.02|0.07|R|F|1992-10-05|1992-11-22|1992-10-22|NONE|RAIL|ly express pinto beans sleep car 675|156455|4001|1|1|1511.45|0.04|0.08|N|O|1997-11-27|1997-09-30|1997-12-12|DELIVER IN PERSON|REG AIR|ide of the slyly regular packages. unus 675|136633|4173|2|35|58437.05|0.08|0.07|N|O|1997-08-19|1997-10-16|1997-09-17|DELIVER IN PERSON|REG AIR|s. furiously expre 675|175802|8320|3|34|63845.20|0.10|0.04|N|O|1997-11-17|1997-10-07|1997-11-27|NONE|FOB|y final accounts unwind around the 675|99269|6797|4|15|19023.90|0.09|0.05|N|O|1997-10-18|1997-09-28|1997-11-13|COLLECT COD|TRUCK|posits after the furio 675|4669|7170|5|46|72388.36|0.09|0.05|N|O|1997-09-18|1997-10-14|1997-10-01|DELIVER IN PERSON|AIR| deposits along the express foxes 676|50972|8488|1|9|17306.73|0.09|0.02|N|O|1997-04-03|1997-02-02|1997-04-08|COLLECT COD|REG AIR|aintain sl 676|77668|5190|2|20|32913.20|0.07|0.07|N|O|1997-02-02|1997-02-01|1997-02-11|NONE|REG AIR|riously around the blithely 676|162330|2331|3|35|48731.55|0.05|0.01|N|O|1996-12-30|1997-01-13|1997-01-19|DELIVER IN PERSON|RAIL|into beans. blithe 676|72825|347|4|24|43147.68|0.01|0.06|N|O|1997-02-05|1997-01-16|1997-03-07|TAKE BACK RETURN|TRUCK|ress, regular dep 676|165127|2676|5|31|36955.72|0.01|0.06|N|O|1997-02-06|1997-02-28|1997-03-08|COLLECT COD|TRUCK|ial deposits cajo 676|75930|5931|6|33|62895.69|0.09|0.05|N|O|1997-03-02|1997-02-22|1997-03-19|TAKE BACK RETURN|TRUCK|as wake slyly furiously close pinto b 676|142123|7152|7|11|12816.32|0.07|0.02|N|O|1997-03-09|1997-03-06|1997-03-31|TAKE BACK RETURN|MAIL|he final acco 677|58986|1492|1|32|62239.36|0.04|0.08|R|F|1994-01-06|1994-01-31|1994-02-02|NONE|RAIL|slyly final 677|167361|7362|2|39|55706.04|0.00|0.07|R|F|1993-12-19|1994-02-11|1994-01-05|TAKE BACK RETURN|SHIP|ges. furiously regular packages use 677|23226|3227|3|46|52864.12|0.01|0.02|R|F|1993-12-02|1994-02-12|1993-12-06|COLLECT COD|RAIL|ng theodolites. furiously unusual theodo 677|147638|5181|4|1|1685.63|0.06|0.05|R|F|1993-12-01|1994-01-14|1993-12-26|DELIVER IN PERSON|MAIL|ly. regular 677|149613|7156|5|25|41565.25|0.00|0.05|A|F|1994-03-12|1994-02-02|1994-03-28|DELIVER IN PERSON|AIR| packages integrate blithely 678|145537|5538|1|20|31650.60|0.05|0.08|R|F|1993-06-21|1993-04-07|1993-07-10|TAKE BACK RETURN|MAIL|furiously express excuses. foxes eat fu 678|36553|4063|2|22|32770.10|0.01|0.02|A|F|1993-05-10|1993-04-29|1993-06-08|NONE|REG AIR|de of the carefully even requests. bl 678|142489|5004|3|16|24503.68|0.06|0.02|R|F|1993-03-20|1993-04-13|1993-04-16|DELIVER IN PERSON|REG AIR|equests cajole around the carefully regular 678|198067|8068|4|48|55922.88|0.08|0.08|R|F|1993-02-28|1993-04-04|1993-03-24|NONE|REG AIR|ithely. slyly express foxes 678|97451|7452|5|16|23175.20|0.06|0.04|R|F|1993-03-09|1993-04-18|1993-04-07|NONE|AIR| about the 678|42888|2889|6|11|20139.68|0.09|0.00|R|F|1993-04-28|1993-05-16|1993-05-11|COLLECT COD|TRUCK|ess deposits dazzle f 679|191759|1760|1|9|16656.75|0.09|0.00|N|O|1995-12-20|1996-01-27|1996-01-07|COLLECT COD|REG AIR|leep slyly. entici 704|189981|9982|1|40|82839.20|0.05|0.05|N|O|1997-01-30|1997-01-10|1997-02-20|COLLECT COD|AIR|ggle quickly. r 704|3839|3840|2|14|24399.62|0.07|0.08|N|O|1997-02-02|1996-12-26|1997-02-19|DELIVER IN PERSON|REG AIR|ve the quickly final forges. furiously p 705|188322|841|1|46|64874.72|0.05|0.06|N|O|1997-04-18|1997-05-06|1997-05-05|DELIVER IN PERSON|SHIP|ss deposits. ironic packa 705|116218|3752|2|35|43197.35|0.10|0.04|N|O|1997-03-25|1997-03-20|1997-04-23|TAKE BACK RETURN|FOB|carefully ironic accounts 706|196629|9149|1|23|39689.26|0.05|0.00|N|O|1995-12-06|1995-12-02|1995-12-16|COLLECT COD|SHIP|ckey players. requests above the 707|154736|4737|1|34|60884.82|0.01|0.02|R|F|1994-12-08|1995-01-15|1995-01-02|NONE|RAIL| dependencies 707|42642|5147|2|22|34862.08|0.00|0.06|A|F|1995-01-12|1994-12-28|1995-01-16|DELIVER IN PERSON|REG AIR| kindle ironically 708|123805|1342|1|3|5486.40|0.05|0.02|N|O|1998-10-09|1998-09-22|1998-11-07|COLLECT COD|FOB|e slyly pending foxes. 708|179124|9125|2|19|22859.28|0.06|0.00|N|O|1998-10-28|1998-09-23|1998-11-25|COLLECT COD|SHIP| requests. even, thin ideas 708|121298|8835|3|33|43536.57|0.09|0.06|N|O|1998-09-10|1998-09-20|1998-09-22|COLLECT COD|RAIL|s boost carefully ruthless theodolites. f 708|55176|5177|4|5|5655.85|0.07|0.07|N|O|1998-07-22|1998-08-15|1998-07-28|TAKE BACK RETURN|REG AIR|c pinto beans nag after the account 708|142490|33|5|36|55169.64|0.08|0.01|N|O|1998-07-16|1998-09-04|1998-08-11|NONE|SHIP|ests. even, regular hockey p 708|22352|9859|6|7|8920.45|0.10|0.03|N|O|1998-08-16|1998-08-15|1998-09-10|COLLECT COD|REG AIR|lly express ac 709|86203|1220|1|7|8324.40|0.00|0.00|N|O|1998-06-14|1998-06-08|1998-06-18|TAKE BACK RETURN|RAIL| special orbits cajole 709|197250|9770|2|15|20208.75|0.08|0.00|N|O|1998-07-10|1998-06-26|1998-08-09|NONE|RAIL|ily regular deposits. sauternes was accor 709|168496|1013|3|10|15644.90|0.01|0.02|N|O|1998-06-04|1998-06-30|1998-06-11|NONE|REG AIR|ts cajole boldly 709|107229|7230|4|40|49448.80|0.10|0.08|N|O|1998-08-12|1998-06-20|1998-08-20|DELIVER IN PERSON|RAIL|ggle fluffily carefully ironic 710|162111|9660|1|47|55136.17|0.06|0.08|A|F|1993-01-18|1993-03-24|1993-01-24|TAKE BACK RETURN|MAIL|usual ideas into th 710|192916|2917|2|38|76338.58|0.07|0.02|R|F|1993-04-18|1993-03-12|1993-05-15|COLLECT COD|FOB|sts boost fluffily aft 710|138984|6524|3|7|14160.86|0.04|0.06|R|F|1993-01-20|1993-03-28|1993-02-15|TAKE BACK RETURN|REG AIR|xpress, special ideas. bl 710|89308|9309|4|25|32432.50|0.00|0.05|R|F|1993-03-31|1993-02-05|1993-04-22|COLLECT COD|FOB|eas detect do 710|185454|491|5|12|18473.40|0.01|0.02|A|F|1993-02-18|1993-02-27|1993-03-07|DELIVER IN PERSON|MAIL|ions. slyly express theodolites al 710|113665|1199|6|21|35251.86|0.04|0.06|R|F|1993-03-22|1993-03-05|1993-03-27|DELIVER IN PERSON|SHIP|es. furiously p 710|159288|6834|7|46|61974.88|0.03|0.07|R|F|1993-04-16|1993-03-27|1993-05-05|COLLECT COD|MAIL|ges use; blithely pending excuses inte 711|145767|8282|1|2|3625.52|0.10|0.04|R|F|1993-12-01|1993-12-09|1993-12-16|DELIVER IN PERSON|REG AIR|ely across t 711|102501|7522|2|27|40594.50|0.00|0.08|A|F|1993-10-02|1993-10-26|1993-10-08|DELIVER IN PERSON|MAIL|slyly. ironic asy 711|127086|2111|3|46|51201.68|0.10|0.00|R|F|1993-12-26|1993-11-19|1994-01-21|TAKE BACK RETURN|MAIL|deposits. permanen 711|127682|7683|4|20|34193.60|0.09|0.00|A|F|1994-01-17|1993-11-10|1994-01-31|DELIVER IN PERSON|TRUCK|kly regular acco 736|157380|7381|1|46|66119.48|0.05|0.01|N|O|1998-07-16|1998-09-01|1998-08-09|NONE|AIR|uctions cajole 736|79423|9424|2|23|32255.66|0.02|0.05|N|O|1998-10-08|1998-08-27|1998-10-19|TAKE BACK RETURN|AIR|k accounts are carefully 736|56626|4142|3|13|20574.06|0.00|0.03|N|O|1998-08-16|1998-07-26|1998-08-19|DELIVER IN PERSON|FOB|st furiously among the 736|97851|5379|4|14|25883.90|0.06|0.04|N|O|1998-10-04|1998-08-14|1998-10-16|COLLECT COD|REG AIR|nstructions. 736|168002|3035|5|32|34240.00|0.04|0.03|N|O|1998-07-30|1998-08-22|1998-08-12|DELIVER IN PERSON|RAIL|iously final accoun 737|181815|9370|1|12|22761.72|0.01|0.01|R|F|1992-04-28|1992-06-30|1992-05-08|COLLECT COD|RAIL|posits after the slyly bold du 738|197203|2242|1|34|44206.80|0.00|0.06|R|F|1993-06-09|1993-04-15|1993-07-09|TAKE BACK RETURN|TRUCK|s against the ironic exc 738|187016|7017|2|4|4412.04|0.00|0.03|A|F|1993-06-20|1993-04-08|1993-07-09|NONE|AIR|ar packages. fluffily bo 738|169700|9701|3|23|40703.10|0.04|0.08|A|F|1993-03-17|1993-04-02|1993-04-05|TAKE BACK RETURN|SHIP|nic, final excuses promise quickly regula 738|140786|8329|4|12|21921.36|0.04|0.08|A|F|1993-06-16|1993-05-05|1993-06-22|NONE|SHIP|ove the slyly regular p 738|174837|9872|5|30|57354.90|0.02|0.00|A|F|1993-06-12|1993-05-29|1993-06-25|NONE|AIR|ecial instructions haggle blithely regula 739|84489|6998|1|28|41257.44|0.00|0.03|N|O|1998-06-03|1998-08-04|1998-06-29|TAKE BACK RETURN|RAIL|elets about the pe 739|3502|6003|2|50|70275.00|0.07|0.06|N|O|1998-08-26|1998-07-16|1998-09-02|COLLECT COD|MAIL|ndencies. blith 739|48733|3742|3|12|20180.76|0.05|0.00|N|O|1998-08-20|1998-07-24|1998-08-22|NONE|MAIL|le slyly along the close i 739|43470|983|4|47|66433.09|0.09|0.07|N|O|1998-08-12|1998-07-09|1998-08-28|NONE|REG AIR|deas according to the theodolites sn 739|187523|5078|5|30|48315.60|0.07|0.06|N|O|1998-06-19|1998-08-26|1998-07-02|DELIVER IN PERSON|REG AIR|above the even deposits. ironic requests 740|1206|8707|1|22|24358.40|0.10|0.02|N|O|1995-07-24|1995-09-11|1995-08-11|TAKE BACK RETURN|FOB|odolites cajole ironic, pending instruc 740|65211|2730|2|35|41167.35|0.00|0.00|N|O|1995-09-06|1995-08-22|1995-10-02|NONE|TRUCK|p quickly. fu 740|198037|8038|3|29|32915.87|0.06|0.05|N|O|1995-10-26|1995-09-17|1995-10-29|DELIVER IN PERSON|FOB|ntly bold pinto beans sleep quickl 741|186404|1441|1|25|37260.00|0.03|0.06|N|O|1998-07-15|1998-08-27|1998-08-12|DELIVER IN PERSON|MAIL|accounts. blithely bold pa 741|90396|5415|2|22|30500.58|0.09|0.01|N|O|1998-09-07|1998-09-28|1998-09-12|COLLECT COD|AIR|ven deposits about the regular, ironi 742|101309|1310|1|46|60273.80|0.04|0.08|A|F|1995-03-12|1995-03-20|1995-03-16|TAKE BACK RETURN|SHIP|e slyly bold deposits cajole according to 742|95395|7905|2|15|20855.85|0.08|0.05|A|F|1995-02-26|1995-03-20|1995-03-03|NONE|SHIP|blithely unusual pinto 742|101553|4064|3|24|37309.20|0.08|0.08|A|F|1995-02-12|1995-03-12|1995-02-14|DELIVER IN PERSON|SHIP|affix slyly. furiously i 742|191891|4411|4|16|31726.24|0.01|0.05|A|F|1995-01-15|1995-02-25|1995-01-24|COLLECT COD|AIR|eodolites haggle carefully regul 742|100006|2517|5|48|48288.00|0.09|0.08|R|F|1995-03-24|1995-01-23|1995-04-08|TAKE BACK RETURN|TRUCK| platelets 742|191966|9524|6|49|100840.04|0.02|0.07|A|F|1995-01-13|1995-02-13|1995-01-26|TAKE BACK RETURN|RAIL| carefully bold foxes sle 743|191460|6499|1|21|32580.66|0.01|0.04|N|O|1996-10-26|1996-11-05|1996-11-11|COLLECT COD|MAIL|d requests. packages afte 768|195813|5814|1|39|74443.59|0.06|0.08|N|O|1996-09-25|1996-10-27|1996-10-20|NONE|SHIP|out the ironic 768|17862|7863|2|2|3559.72|0.00|0.04|N|O|1996-11-13|1996-10-03|1996-11-25|DELIVER IN PERSON|SHIP|ular courts. slyly dogged accou 768|5964|965|3|30|56098.80|0.06|0.05|N|O|1996-09-22|1996-11-03|1996-10-13|NONE|MAIL| furiously fluffy pinto beans haggle along 768|24493|2000|4|37|52447.13|0.10|0.00|N|O|1996-10-02|1996-09-23|1996-10-14|TAKE BACK RETURN|REG AIR|ending requests across the quickly 768|46500|1509|5|47|67985.50|0.06|0.05|N|O|1996-11-28|1996-10-30|1996-12-12|NONE|TRUCK|foxes. slyly ironic deposits a 768|111635|6658|6|43|70805.09|0.10|0.06|N|O|1996-09-22|1996-11-03|1996-10-22|TAKE BACK RETURN|AIR|sual ideas wake quickly 768|48062|8063|7|33|33331.98|0.01|0.04|N|O|1996-09-06|1996-09-29|1996-10-01|COLLECT COD|RAIL|sly ironic instructions. excuses can hagg 769|175068|7586|1|36|41150.16|0.02|0.02|A|F|1993-10-01|1993-08-07|1993-10-15|NONE|AIR|es. furiously iro 769|159077|1593|2|4|4544.28|0.01|0.04|R|F|1993-06-25|1993-08-12|1993-07-15|DELIVER IN PERSON|FOB| ideas. even 770|180119|2638|1|39|46765.29|0.09|0.06|N|O|1998-07-19|1998-08-09|1998-08-04|NONE|REG AIR|osits. foxes cajole 770|53281|5787|2|25|30857.00|0.03|0.02|N|O|1998-05-26|1998-07-23|1998-06-04|TAKE BACK RETURN|AIR| deposits dazzle fluffily alongside of 771|6983|4484|1|12|22679.76|0.10|0.08|N|O|1995-07-18|1995-08-02|1995-08-07|COLLECT COD|TRUCK|carefully. pending in 771|160505|3022|2|38|59489.00|0.03|0.08|N|O|1995-07-22|1995-09-10|1995-07-29|TAKE BACK RETURN|REG AIR| quickly final requests are final packages. 771|6393|6394|3|14|18191.46|0.02|0.05|N|O|1995-07-31|1995-08-13|1995-08-07|DELIVER IN PERSON|AIR|r, final packages are slyly iro 771|41262|1263|4|7|8422.82|0.06|0.02|N|O|1995-06-18|1995-08-31|1995-06-20|NONE|REG AIR|theodolites after the fluffily express 771|77402|4924|5|13|17932.20|0.09|0.01|N|O|1995-08-10|1995-08-21|1995-08-30|NONE|FOB|packages affix slyly about the quickly 771|81921|9446|6|23|43767.16|0.08|0.03|N|O|1995-06-19|1995-09-07|1995-07-09|COLLECT COD|FOB|cajole besides the quickly ironic pin 772|52698|214|1|35|57774.15|0.10|0.06|R|F|1993-07-05|1993-06-05|1993-08-02|NONE|SHIP|kly thin packages wake slowly 772|83007|8024|2|10|9900.00|0.05|0.01|R|F|1993-05-20|1993-05-19|1993-06-15|DELIVER IN PERSON|MAIL| deposits cajole carefully instructions. t 772|85119|5120|3|35|38643.85|0.03|0.04|R|F|1993-04-18|1993-06-13|1993-05-01|COLLECT COD|MAIL|ng ideas. special packages haggle alon 772|179834|7386|4|10|19138.30|0.08|0.02|A|F|1993-05-17|1993-06-09|1993-05-29|COLLECT COD|AIR|o the furiously final deposits. furi 772|53649|3650|5|42|67310.88|0.02|0.07|A|F|1993-06-09|1993-07-16|1993-06-12|DELIVER IN PERSON|MAIL| express foxes abo 773|99317|9318|1|5|6581.55|0.06|0.04|A|F|1993-11-21|1993-12-19|1993-12-21|COLLECT COD|MAIL|ar requests. regular, thin packages u 773|10005|2507|2|31|28365.00|0.02|0.06|A|F|1993-12-30|1993-11-02|1994-01-01|TAKE BACK RETURN|MAIL|e slyly unusual deposit 773|150082|7628|3|39|44151.12|0.06|0.05|A|F|1994-01-04|1993-12-23|1994-01-26|DELIVER IN PERSON|FOB|quickly eve 773|28056|3061|4|28|27553.40|0.10|0.06|R|F|1994-01-19|1993-11-05|1994-01-23|NONE|TRUCK|he furiously slow deposits. 773|133007|3008|5|9|9360.00|0.09|0.02|R|F|1993-10-09|1993-12-25|1993-11-04|TAKE BACK RETURN|FOB|ent orbits haggle fluffily after the 773|39505|9506|6|43|62113.50|0.07|0.03|A|F|1993-11-06|1993-11-20|1993-11-08|TAKE BACK RETURN|SHIP|furiously bold dependencies. blithel 774|182652|2653|1|49|84997.85|0.08|0.03|N|O|1995-12-06|1996-01-07|1995-12-14|DELIVER IN PERSON|SHIP|ess accounts are carefully 774|16038|1041|2|3|2862.09|0.10|0.06|N|O|1996-02-13|1996-01-14|1996-03-04|COLLECT COD|FOB| slyly even courts nag blith 774|147809|5352|3|34|63131.20|0.02|0.07|N|O|1996-03-16|1996-01-03|1996-03-22|COLLECT COD|FOB|lar excuses are furiously final instr 774|14993|4994|4|8|15263.92|0.00|0.02|N|O|1996-01-24|1996-01-15|1996-02-13|COLLECT COD|RAIL|ully ironic requests c 774|176160|3712|5|44|54391.04|0.09|0.07|N|O|1996-02-29|1996-01-16|1996-03-06|NONE|REG AIR|s according to the deposits unwind ca 774|119813|9814|6|2|3665.62|0.07|0.03|N|O|1995-12-11|1996-02-10|1995-12-14|TAKE BACK RETURN|SHIP|accounts; slyly regular 775|31640|6647|1|16|25146.24|0.10|0.06|N|F|1995-05-23|1995-05-07|1995-06-19|NONE|TRUCK|un quickly slyly 775|173244|796|2|21|27662.04|0.01|0.06|R|F|1995-05-01|1995-06-02|1995-05-13|DELIVER IN PERSON|FOB| quickly sile 775|107093|4624|3|20|22001.80|0.01|0.08|N|F|1995-06-17|1995-05-22|1995-07-13|COLLECT COD|AIR|en dependencies nag slowly 800|71833|6848|1|38|68583.54|0.00|0.05|N|O|1998-07-21|1998-09-25|1998-08-07|TAKE BACK RETURN|TRUCK|according to the bold, final dependencies 800|84262|6771|2|21|26171.46|0.04|0.05|N|O|1998-07-23|1998-10-01|1998-08-20|TAKE BACK RETURN|RAIL|ckly even requests after the carefully r 800|175028|63|3|26|28678.52|0.01|0.02|N|O|1998-07-23|1998-10-08|1998-07-25|DELIVER IN PERSON|FOB|bove the pending requests. 801|5111|2612|1|13|13209.43|0.10|0.02|R|F|1992-04-25|1992-04-24|1992-05-16|COLLECT COD|RAIL|s are fluffily stealthily expres 801|94616|9635|2|21|33822.81|0.05|0.02|A|F|1992-03-14|1992-04-01|1992-04-05|COLLECT COD|AIR|wake silently furiously idle deposits. 801|2700|2701|3|21|33656.70|0.05|0.03|A|F|1992-04-25|1992-03-20|1992-05-04|COLLECT COD|REG AIR|cial, special packages. 801|163556|1105|4|12|19434.60|0.08|0.04|A|F|1992-06-06|1992-04-14|1992-06-12|TAKE BACK RETURN|RAIL|s. ironic pinto b 801|73144|666|5|45|50271.30|0.01|0.06|R|F|1992-03-22|1992-03-22|1992-03-25|COLLECT COD|REG AIR| even asymptotes 801|121868|4381|6|10|18898.60|0.08|0.01|A|F|1992-06-05|1992-05-15|1992-06-21|DELIVER IN PERSON|MAIL|al accounts. carefully regular foxes wake 801|25641|646|7|11|17233.04|0.01|0.03|A|F|1992-05-09|1992-04-19|1992-05-15|DELIVER IN PERSON|REG AIR|y special pinto beans cajole 802|142568|7597|1|40|64422.40|0.08|0.08|A|F|1995-01-07|1995-04-03|1995-01-23|DELIVER IN PERSON|RAIL|y bold accou 802|132751|2752|2|34|60647.50|0.08|0.06|A|F|1995-03-01|1995-03-15|1995-03-12|COLLECT COD|AIR|instructions cajole carefully. quietl 802|130920|921|3|44|85840.48|0.07|0.04|R|F|1995-01-09|1995-02-04|1995-01-18|TAKE BACK RETURN|SHIP|rmanently idly special requ 802|156598|1629|4|18|29782.62|0.09|0.02|R|F|1995-03-06|1995-02-07|1995-03-19|TAKE BACK RETURN|RAIL|y regular requests engage furiously final d 802|131708|1709|5|19|33054.30|0.08|0.06|A|F|1995-04-01|1995-02-20|1995-04-23|DELIVER IN PERSON|REG AIR|old, furious 803|53401|8412|1|8|10835.20|0.07|0.01|N|O|1997-08-04|1997-06-19|1997-08-12|NONE|SHIP|ronic theodo 803|98771|8772|2|21|37165.17|0.08|0.06|N|O|1997-08-25|1997-06-30|1997-09-10|TAKE BACK RETURN|AIR|ironic packages cajole slyly. un 804|125293|5294|1|30|39548.70|0.08|0.04|A|F|1993-03-29|1993-05-07|1993-04-14|COLLECT COD|REG AIR|ehind the quietly regular pac 804|198375|5933|2|2|2946.74|0.02|0.00|A|F|1993-06-23|1993-04-30|1993-06-25|NONE|TRUCK|slyly silent 804|75519|534|3|44|65758.44|0.04|0.05|R|F|1993-07-06|1993-04-13|1993-07-28|DELIVER IN PERSON|TRUCK|ly final deposits? special 804|37511|2518|4|21|30418.71|0.01|0.00|A|F|1993-04-12|1993-06-06|1993-04-20|DELIVER IN PERSON|RAIL|ular, ironic foxes. quickly even accounts 805|197826|346|1|25|48095.50|0.07|0.06|N|O|1995-08-05|1995-09-30|1995-08-06|NONE|AIR|ide of the pending, sly requests. quickly f 805|56623|9129|2|29|45808.98|0.07|0.01|N|O|1995-08-24|1995-08-15|1995-09-16|TAKE BACK RETURN|AIR|dolites according to the slyly f 805|46414|6415|3|12|16324.92|0.01|0.06|N|O|1995-07-13|1995-09-27|1995-08-02|TAKE BACK RETURN|REG AIR| regular foxes. furio 805|75351|7859|4|26|34485.10|0.08|0.07|N|O|1995-08-28|1995-09-24|1995-09-11|TAKE BACK RETURN|RAIL|. ironic deposits sleep across 806|104752|2283|1|1|1756.75|0.04|0.07|N|O|1996-07-14|1996-09-12|1996-07-25|COLLECT COD|RAIL|ar accounts? pending, pending foxes a 806|159749|4780|2|22|39792.28|0.08|0.02|N|O|1996-10-03|1996-08-11|1996-10-20|DELIVER IN PERSON|REG AIR|fily pending 806|90335|2845|3|4|5301.32|0.04|0.03|N|O|1996-08-09|1996-09-18|1996-08-13|COLLECT COD|TRUCK|eans. quickly ironic ideas 807|116568|4102|1|49|77643.44|0.00|0.00|R|F|1993-12-05|1994-01-13|1993-12-25|COLLECT COD|REG AIR| furiously according to the un 807|154875|9906|2|49|94563.63|0.01|0.06|A|F|1994-01-17|1994-01-24|1994-01-22|COLLECT COD|TRUCK|y regular requests haggle. 807|180228|7783|3|48|62794.56|0.07|0.07|A|F|1994-01-08|1994-02-02|1994-01-15|DELIVER IN PERSON|SHIP|kly across the f 807|79226|9227|4|10|12052.20|0.09|0.00|R|F|1994-01-19|1994-02-12|1994-01-28|NONE|TRUCK|furiously final depths sleep a 807|142418|7447|5|30|43812.30|0.02|0.01|R|F|1994-01-19|1994-01-09|1994-01-27|NONE|RAIL|cial accoun 807|11627|9131|6|11|16924.82|0.02|0.04|R|F|1994-03-25|1994-01-26|1994-04-14|NONE|FOB|unts above the slyly final ex 807|149|5150|7|19|19933.66|0.08|0.05|A|F|1994-02-10|1994-02-20|1994-03-06|NONE|SHIP|ns haggle quickly across the furi 832|102949|5460|1|45|87837.30|0.01|0.02|A|F|1992-05-08|1992-06-06|1992-06-04|COLLECT COD|MAIL|foxes engage slyly alon 832|47411|2420|2|24|32601.84|0.05|0.06|A|F|1992-06-15|1992-07-14|1992-06-17|NONE|TRUCK|ully. carefully speci 833|53556|3557|1|1|1509.55|0.04|0.04|R|F|1994-04-26|1994-04-05|1994-04-29|COLLECT COD|MAIL|ffily ironic theodolites 833|111833|4345|2|38|70103.54|0.05|0.05|A|F|1994-04-05|1994-04-21|1994-05-01|COLLECT COD|TRUCK| platelets promise furiously. 833|161174|8723|3|9|11116.53|0.05|0.07|A|F|1994-02-28|1994-04-26|1994-03-20|TAKE BACK RETURN|FOB|ecial, even requests. even, bold instructi 834|144119|6634|1|36|41871.96|0.06|0.04|R|F|1994-06-28|1994-07-25|1994-07-07|TAKE BACK RETURN|SHIP|ccounts haggle after the furiously 834|6951|1952|2|11|20437.45|0.03|0.00|A|F|1994-09-18|1994-08-03|1994-10-02|DELIVER IN PERSON|TRUCK|inst the regular packa 835|106431|1452|1|33|47435.19|0.09|0.06|N|O|1995-11-01|1995-12-02|1995-11-24|DELIVER IN PERSON|RAIL|onic instructions among the carefully iro 835|184562|9599|2|28|46103.68|0.02|0.02|N|O|1995-12-27|1995-12-11|1996-01-21|NONE|SHIP| fluffily furious pinto beans 836|187915|5470|1|6|12017.46|0.09|0.03|N|O|1996-12-09|1997-01-31|1996-12-29|COLLECT COD|TRUCK|fully bold theodolites are daringly across 836|83534|1059|2|18|27315.54|0.03|0.05|N|O|1997-02-27|1997-02-11|1997-03-22|NONE|REG AIR|y pending packages use alon 836|140671|3186|3|46|78736.82|0.05|0.07|N|O|1997-03-21|1997-02-06|1997-04-05|NONE|REG AIR|boldly final pinto beans haggle furiously 837|56565|9071|1|39|59340.84|0.03|0.08|A|F|1994-07-22|1994-08-10|1994-08-11|NONE|RAIL|ecial pinto bea 837|87341|4866|2|24|31880.16|0.08|0.00|R|F|1994-06-27|1994-09-02|1994-07-27|DELIVER IN PERSON|FOB|p carefully. theodolites use. bold courts a 838|133708|6222|1|20|34834.00|0.10|0.07|N|O|1998-04-11|1998-03-25|1998-04-19|COLLECT COD|TRUCK| furiously final ideas. slow, bold 838|28244|8245|2|27|31650.48|0.05|0.07|N|O|1998-02-15|1998-04-03|1998-02-20|DELIVER IN PERSON|SHIP| pending pinto beans haggle about t 838|94785|7295|3|23|40934.94|0.10|0.07|N|O|1998-03-26|1998-04-17|1998-04-02|COLLECT COD|AIR|ets haggle furiously furiously regular r 838|43152|3153|4|18|19712.70|0.09|0.00|N|O|1998-03-28|1998-04-06|1998-03-31|TAKE BACK RETURN|AIR|hely unusual foxes. furio 839|157077|4623|1|23|26083.61|0.07|0.02|N|O|1995-10-17|1995-11-03|1995-11-04|COLLECT COD|AIR|ng ideas haggle accord 839|188315|8316|2|47|65955.57|0.08|0.00|N|O|1995-10-17|1995-11-06|1995-11-10|NONE|AIR|refully final excuses about 864|129655|2168|1|34|57278.10|0.03|0.04|N|O|1997-12-16|1997-10-23|1998-01-12|TAKE BACK RETURN|SHIP|gside of the furiously special 864|97941|2960|2|7|13572.58|0.01|0.02|N|O|1997-11-13|1997-10-07|1997-12-13|TAKE BACK RETURN|MAIL|ven requests should sleep along 864|79304|1812|3|34|43632.20|0.03|0.00|N|O|1997-09-14|1997-11-04|1997-09-21|TAKE BACK RETURN|REG AIR|to the furiously ironic platelets! 865|197300|9820|1|16|22356.80|0.07|0.03|R|F|1993-08-24|1993-06-26|1993-08-28|TAKE BACK RETURN|TRUCK|y even accounts. quickly bold decoys 865|19188|4191|2|3|3321.54|0.02|0.05|A|F|1993-07-17|1993-07-14|1993-08-01|NONE|MAIL|fully regular the 865|86034|1051|3|15|15300.45|0.00|0.06|R|F|1993-07-05|1993-06-25|1993-07-26|NONE|SHIP| deposits sleep quickl 865|168398|5947|4|34|49857.26|0.09|0.06|A|F|1993-05-09|1993-07-28|1993-05-18|DELIVER IN PERSON|REG AIR|furiously fluffily unusual account 866|135703|5704|1|5|8693.50|0.08|0.00|R|F|1993-01-22|1993-01-14|1993-02-07|TAKE BACK RETURN|AIR|tegrate fluffily. carefully f 867|138798|8799|1|7|12857.53|0.04|0.07|A|F|1994-02-19|1993-12-25|1994-02-25|DELIVER IN PERSON|TRUCK|pendencies-- slyly unusual packages hagg 868|167908|7909|1|8|15807.20|0.06|0.03|R|F|1992-10-07|1992-08-01|1992-10-16|NONE|MAIL|l deposits. blithely regular pint 868|28659|3664|2|13|20639.45|0.05|0.07|R|F|1992-07-25|1992-08-26|1992-08-04|NONE|AIR|gged instructi 868|67432|2445|3|19|26589.17|0.09|0.06|R|F|1992-06-22|1992-08-27|1992-07-04|COLLECT COD|SHIP|lyly ironic platelets wake. rut 868|121059|6084|4|43|46442.15|0.02|0.04|A|F|1992-07-02|1992-07-22|1992-07-21|COLLECT COD|SHIP|kly silent deposits wake dar 868|24498|2005|5|27|38407.23|0.04|0.01|R|F|1992-08-01|1992-08-25|1992-08-12|TAKE BACK RETURN|RAIL|oss the fluffily unusual pinto 868|124220|4221|6|19|23640.18|0.02|0.05|R|F|1992-09-20|1992-07-18|1992-10-04|NONE|FOB|ely even deposits lose blithe 869|62275|4782|1|27|33406.29|0.07|0.07|N|O|1997-01-30|1997-02-17|1997-02-26|TAKE BACK RETURN|TRUCK|uffily even excuses? slyly even deposits 869|46068|8573|2|36|36506.16|0.04|0.01|N|O|1997-05-03|1997-03-17|1997-05-24|NONE|RAIL|ong the furiously bold instructi 870|49110|6623|1|36|38127.96|0.04|0.07|A|F|1993-10-18|1993-09-16|1993-11-15|COLLECT COD|MAIL|fily. furiously final accounts are 870|185850|5851|2|5|9679.25|0.06|0.05|A|F|1993-08-13|1993-09-11|1993-08-24|COLLECT COD|FOB|e slyly excuses. ironi 871|96392|6393|1|48|66642.72|0.10|0.03|N|O|1996-02-25|1996-02-09|1996-03-18|NONE|AIR|coys dazzle slyly slow notornis. f 871|54591|9602|2|47|72642.73|0.07|0.03|N|O|1995-12-25|1996-02-01|1996-01-24|TAKE BACK RETURN|RAIL|ss, final dep 871|107967|5498|3|13|25674.48|0.09|0.01|N|O|1996-01-25|1996-01-24|1996-02-03|NONE|REG AIR| haggle furiou 871|189917|9918|4|29|58200.39|0.06|0.07|N|O|1995-11-16|1996-01-27|1995-12-16|DELIVER IN PERSON|RAIL|ests are carefu 871|127480|2505|5|8|12059.84|0.00|0.01|N|O|1995-11-25|1996-01-12|1995-12-12|DELIVER IN PERSON|AIR|lar ideas-- slyly even accou 871|142499|42|6|26|40078.74|0.00|0.06|N|O|1996-02-07|1996-01-05|1996-02-25|COLLECT COD|AIR|symptotes use quickly near the 871|173371|8406|7|4|5777.48|0.00|0.07|N|O|1996-03-09|1996-01-20|1996-03-26|COLLECT COD|FOB|l, regular dependencies w 896|38675|8676|1|47|75842.49|0.07|0.08|R|F|1993-05-28|1993-05-15|1993-06-15|DELIVER IN PERSON|TRUCK|ly even pinto beans integrate. b 896|197621|5179|2|10|17186.20|0.03|0.07|A|F|1993-07-07|1993-06-03|1993-07-24|COLLECT COD|SHIP| quickly even theodolites. carefully regu 896|1925|9426|3|7|12788.44|0.09|0.02|A|F|1993-05-02|1993-05-24|1993-05-31|DELIVER IN PERSON|MAIL| requests 896|151663|1664|4|11|18861.26|0.08|0.04|A|F|1993-05-19|1993-05-22|1993-06-08|COLLECT COD|MAIL|the multipliers sleep 896|187481|5036|5|34|53328.32|0.00|0.05|R|F|1993-05-21|1993-06-01|1993-05-23|NONE|TRUCK|ular, close requests cajo 896|176862|1897|6|44|85309.84|0.09|0.08|R|F|1993-05-19|1993-04-14|1993-06-02|DELIVER IN PERSON|FOB|lar, pending packages. deposits are q 896|108889|1400|7|11|20876.68|0.01|0.07|A|F|1993-05-01|1993-04-09|1993-05-06|TAKE BACK RETURN|FOB|rding to the pinto beans wa 897|90450|5469|1|15|21606.75|0.07|0.04|R|F|1995-05-25|1995-05-09|1995-06-07|COLLECT COD|REG AIR|r ideas. slyly spec 897|183170|725|2|26|32582.42|0.05|0.08|N|O|1995-07-01|1995-06-10|1995-07-14|COLLECT COD|MAIL|tions sleep according to the special 897|125704|8217|3|13|22486.10|0.07|0.00|A|F|1995-03-30|1995-05-17|1995-04-21|TAKE BACK RETURN|MAIL|bold accounts mold carefully! braids 897|101182|6203|4|2|2366.36|0.08|0.08|R|F|1995-05-22|1995-05-07|1995-06-16|COLLECT COD|RAIL|into beans. slyly special fox 898|160159|160|1|9|10972.35|0.07|0.08|A|F|1993-07-04|1993-07-09|1993-07-25|NONE|AIR|e slyly across the blithe 898|178512|6064|2|37|58848.87|0.03|0.05|A|F|1993-08-17|1993-08-04|1993-09-01|DELIVER IN PERSON|REG AIR|packages sleep furiously 898|48242|5755|3|11|13092.64|0.01|0.00|A|F|1993-09-13|1993-08-31|1993-09-25|TAKE BACK RETURN|MAIL|etly bold accounts 898|192166|7205|4|36|45293.76|0.04|0.07|R|F|1993-08-04|1993-07-25|1993-08-23|DELIVER IN PERSON|REG AIR| after the carefully 899|60085|2592|1|18|18811.44|0.04|0.05|N|O|1998-08-06|1998-05-09|1998-09-05|DELIVER IN PERSON|AIR|re daring, pending deposits. blit 899|46070|8575|2|25|25401.75|0.00|0.07|N|O|1998-07-21|1998-05-12|1998-08-16|NONE|REG AIR|rly final sentiments. bold pinto beans 899|84976|2501|3|4|7843.88|0.09|0.05|N|O|1998-06-02|1998-06-28|1998-06-14|TAKE BACK RETURN|REG AIR|ter the carefully regular deposits are agai 899|179704|4739|4|14|24971.80|0.05|0.03|N|O|1998-05-21|1998-05-28|1998-06-03|TAKE BACK RETURN|FOB|ades impress carefully 899|70776|5791|5|4|6987.08|0.06|0.02|N|O|1998-04-11|1998-05-14|1998-04-27|NONE|TRUCK|ges. blithe, ironic waters cajole care 899|119083|1595|6|47|51797.76|0.00|0.04|N|O|1998-04-14|1998-05-30|1998-05-13|DELIVER IN PERSON|TRUCK|furiously final foxes after the s 899|13423|8426|7|11|14700.62|0.02|0.08|N|O|1998-06-03|1998-06-15|1998-06-20|COLLECT COD|REG AIR|t the ironic 900|198532|1052|1|44|71743.32|0.01|0.06|R|F|1994-12-15|1994-12-03|1994-12-27|COLLECT COD|MAIL| detect quick 900|114624|4625|2|48|78653.76|0.08|0.04|A|F|1994-12-22|1994-11-08|1995-01-19|COLLECT COD|TRUCK|cial pinto beans nag 900|74130|4131|3|24|26499.12|0.03|0.00|R|F|1994-10-21|1994-12-25|1994-10-22|TAKE BACK RETURN|SHIP|-ray furiously un 901|21113|3616|1|36|37227.96|0.01|0.01|N|O|1998-08-11|1998-10-09|1998-08-27|DELIVER IN PERSON|REG AIR|. accounts are care 901|45222|5223|2|2|2334.44|0.09|0.02|N|O|1998-10-25|1998-09-27|1998-11-01|DELIVER IN PERSON|AIR|d foxes use slyly 901|42615|5120|3|37|57631.57|0.04|0.08|N|O|1998-11-01|1998-09-13|1998-11-05|NONE|AIR|ickly final deposits 901|17758|7759|4|11|18433.25|0.00|0.06|N|O|1998-11-13|1998-10-19|1998-11-14|TAKE BACK RETURN|TRUCK|ourts among the quickly expre 902|110509|510|1|3|4558.50|0.06|0.00|R|F|1994-10-01|1994-10-25|1994-10-28|COLLECT COD|MAIL|into beans thrash blithely about the flu 902|117211|9723|2|8|9825.68|0.06|0.07|R|F|1994-10-25|1994-09-20|1994-11-07|COLLECT COD|RAIL| orbits al 902|164836|9869|3|24|45619.92|0.02|0.05|R|F|1994-11-08|1994-10-12|1994-11-26|NONE|FOB|. blithely even accounts poach furiously i 903|64367|1886|1|27|35946.72|0.04|0.03|N|O|1995-09-18|1995-09-20|1995-10-02|TAKE BACK RETURN|SHIP|lly pending foxes. furiously 903|8890|1391|2|35|62961.15|0.06|0.05|N|O|1995-09-18|1995-08-21|1995-10-12|TAKE BACK RETURN|TRUCK|rets wake fin 903|8238|739|3|33|37825.59|0.02|0.03|N|O|1995-09-24|1995-09-01|1995-10-12|COLLECT COD|MAIL|ely ironic packages wake blithely 903|55408|419|4|9|12270.60|0.09|0.00|N|O|1995-10-06|1995-09-14|1995-10-24|NONE|TRUCK|he slyly ev 903|41041|1042|5|1|982.04|0.04|0.00|N|O|1995-10-22|1995-09-13|1995-11-03|NONE|AIR|y final platelets sublate among the 903|167067|7068|6|13|14742.78|0.07|0.02|N|O|1995-09-11|1995-10-04|1995-10-03|COLLECT COD|SHIP|sleep along the final 928|168148|8149|1|29|35268.06|0.07|0.02|R|F|1995-05-17|1995-05-12|1995-05-21|NONE|REG AIR|ly alongside of the s 928|47775|5288|2|24|41346.48|0.05|0.08|A|F|1995-04-06|1995-05-08|1995-04-24|DELIVER IN PERSON|AIR|s the furiously regular warthogs im 928|151774|4290|3|46|83985.42|0.08|0.00|A|F|1995-05-09|1995-04-09|1995-06-01|DELIVER IN PERSON|REG AIR| beans sleep against the carefully ir 928|51313|8829|4|43|54365.33|0.10|0.05|A|F|1995-04-14|1995-04-21|1995-05-09|NONE|REG AIR|blithely. express, silent requests doze at 928|11725|1726|5|38|62195.36|0.02|0.08|N|F|1995-06-08|1995-04-15|1995-06-30|TAKE BACK RETURN|SHIP|xpress grouc 928|54038|4039|6|50|49601.50|0.05|0.00|N|F|1995-06-07|1995-04-15|1995-07-01|DELIVER IN PERSON|TRUCK| slyly slyly special request 928|10337|2839|7|11|13720.63|0.00|0.01|A|F|1995-04-29|1995-04-16|1995-04-30|NONE|AIR|longside of 929|128672|3697|1|45|76530.15|0.09|0.01|R|F|1993-01-24|1992-12-06|1993-02-16|DELIVER IN PERSON|REG AIR|ges haggle careful 929|174218|6736|2|44|56857.24|0.02|0.00|A|F|1992-10-09|1992-11-20|1992-10-22|DELIVER IN PERSON|SHIP|s. excuses cajole. carefully regu 929|73036|3037|3|14|14126.42|0.06|0.07|A|F|1992-10-21|1992-11-17|1992-11-15|NONE|FOB|gainst the 929|101639|4150|4|7|11484.41|0.06|0.01|A|F|1992-12-24|1992-12-19|1993-01-08|TAKE BACK RETURN|TRUCK|ithely. slyly c 930|44804|2317|1|36|62956.80|0.10|0.04|R|F|1994-12-21|1995-02-20|1994-12-24|COLLECT COD|RAIL|quickly regular pinto beans sle 930|17295|4799|2|47|56977.63|0.08|0.00|A|F|1995-03-20|1995-02-04|1995-04-04|DELIVER IN PERSON|AIR|ackages. fluffily e 930|64230|1749|3|10|11942.30|0.07|0.08|A|F|1994-12-18|1995-01-27|1995-01-16|COLLECT COD|AIR|ckly regular requests: regular instructions 930|99635|2145|4|21|34327.23|0.06|0.02|A|F|1995-02-16|1995-03-03|1995-03-13|DELIVER IN PERSON|SHIP|foxes. regular deposits integrate carefu 930|163239|788|5|50|65111.50|0.03|0.06|A|F|1995-04-03|1995-01-29|1995-04-22|COLLECT COD|MAIL| excuses among the furiously express ideas 930|144557|2100|6|10|16015.50|0.00|0.04|A|F|1995-02-09|1995-02-17|1995-02-16|NONE|SHIP|blithely bold i 930|166196|1229|7|30|37865.70|0.07|0.08|R|F|1995-01-20|1995-02-28|1995-02-04|TAKE BACK RETURN|RAIL|g accounts sleep along the platelets. 931|39783|9784|1|18|31010.04|0.00|0.05|A|F|1993-04-04|1993-01-11|1993-04-13|NONE|RAIL|slyly ironic re 931|16788|4292|2|10|17047.80|0.05|0.07|A|F|1993-03-01|1993-01-09|1993-03-07|TAKE BACK RETURN|SHIP|ajole quickly. slyly sil 931|146052|3595|3|48|52706.40|0.01|0.08|A|F|1993-02-03|1993-03-02|1993-02-09|TAKE BACK RETURN|FOB|ep alongside of the fluffy 931|81324|6341|4|38|49602.16|0.08|0.08|A|F|1993-03-06|1993-02-24|1993-03-27|DELIVER IN PERSON|RAIL|usly final packages integrate carefully 932|43763|6268|1|41|69977.16|0.01|0.05|N|O|1997-06-05|1997-07-22|1997-06-26|COLLECT COD|RAIL|foxes. ironic pl 933|48267|5780|1|23|27950.98|0.02|0.04|R|F|1992-08-13|1992-09-18|1992-08-25|DELIVER IN PERSON|MAIL| the furiously bold dinos. sly 933|12132|2133|2|27|28191.51|0.02|0.01|R|F|1992-10-03|1992-10-02|1992-10-26|DELIVER IN PERSON|RAIL|ests. express 933|99192|1702|3|26|30970.94|0.05|0.00|A|F|1992-11-09|1992-11-03|1992-11-16|DELIVER IN PERSON|AIR| the deposits affix slyly after t 934|117900|2923|1|18|34522.20|0.07|0.01|N|O|1996-09-10|1996-09-20|1996-09-25|COLLECT COD|RAIL|y unusual requests dazzle above t 935|27410|9913|1|23|30760.43|0.05|0.00|N|O|1997-11-11|1997-11-22|1997-11-29|COLLECT COD|REG AIR|ular accounts about 935|64553|2072|2|23|34903.65|0.02|0.01|N|O|1998-01-11|1997-11-25|1998-02-05|COLLECT COD|TRUCK|hes haggle furiously dolphins. qu 935|134031|6545|3|36|38341.08|0.06|0.00|N|O|1997-11-05|1997-12-05|1997-11-25|TAKE BACK RETURN|AIR|leep about the exp 935|57192|2203|4|13|14939.47|0.08|0.04|N|O|1998-01-13|1997-11-30|1998-02-08|DELIVER IN PERSON|TRUCK|ld platelet 935|12130|4632|5|8|8337.04|0.02|0.05|N|O|1998-01-12|1997-11-02|1998-02-05|NONE|TRUCK|cept the quickly regular p 935|58299|5815|6|1|1257.29|0.01|0.08|N|O|1997-12-14|1997-11-22|1998-01-08|DELIVER IN PERSON|TRUCK| instructions. ironic acc 960|106999|9510|1|1|2005.99|0.07|0.00|A|F|1994-12-24|1994-10-26|1995-01-20|DELIVER IN PERSON|AIR|y ironic packages. quickly even 960|116212|3746|2|25|30705.25|0.06|0.08|R|F|1994-12-01|1994-10-29|1994-12-27|DELIVER IN PERSON|RAIL|ts. fluffily regular requests 960|174776|2328|3|32|59224.64|0.01|0.08|R|F|1995-01-19|1994-12-17|1995-02-04|DELIVER IN PERSON|FOB|around the blithe, even pl 961|117921|2944|1|7|13572.44|0.10|0.00|N|O|1995-07-23|1995-07-20|1995-08-11|TAKE BACK RETURN|RAIL|usual dolphins. ironic pearls sleep blit 961|90469|470|2|18|26270.28|0.09|0.05|N|O|1995-07-01|1995-08-14|1995-07-04|DELIVER IN PERSON|AIR|rmanent foxes haggle speci 961|96938|6939|3|42|81267.06|0.06|0.01|N|O|1995-08-24|1995-08-21|1995-09-10|TAKE BACK RETURN|SHIP|ests do cajole blithely. furiously bo 961|33476|5980|4|29|40874.63|0.00|0.07|N|F|1995-06-10|1995-08-20|1995-06-26|TAKE BACK RETURN|TRUCK|l accounts use blithely against the 961|25276|5277|5|38|45648.26|0.03|0.05|N|O|1995-08-21|1995-07-19|1995-08-27|NONE|RAIL|he blithely special requests. furiousl 961|196988|6989|6|30|62549.40|0.09|0.03|N|O|1995-07-06|1995-07-20|1995-07-26|DELIVER IN PERSON|MAIL|warhorses slee 962|56598|6599|1|36|55965.24|0.01|0.03|R|F|1994-08-09|1994-07-10|1994-09-02|COLLECT COD|TRUCK|al foxes. iron 962|35276|7780|2|27|32704.29|0.09|0.02|A|F|1994-05-11|1994-07-10|1994-06-03|TAKE BACK RETURN|SHIP|y slyly express deposits. final i 962|79649|9650|3|3|4885.92|0.07|0.08|A|F|1994-05-08|1994-07-06|1994-06-02|DELIVER IN PERSON|FOB|ag furiously. even pa 962|56308|8814|4|20|25286.00|0.04|0.02|R|F|1994-08-26|1994-06-27|1994-09-11|DELIVER IN PERSON|SHIP| deposits use fluffily according to 962|151996|7027|5|12|24575.88|0.02|0.00|A|F|1994-06-09|1994-06-07|1994-06-11|COLLECT COD|TRUCK|across the furiously regular escapades daz 962|187354|2391|6|5|7206.75|0.02|0.05|A|F|1994-08-29|1994-07-15|1994-09-19|COLLECT COD|TRUCK|efully bold packages run slyly caref 963|193152|710|1|7|8716.05|0.01|0.00|R|F|1994-09-12|1994-07-18|1994-09-17|DELIVER IN PERSON|REG AIR|s. slyly regular depe 963|97483|9993|2|48|71063.04|0.10|0.06|R|F|1994-08-25|1994-08-12|1994-09-21|DELIVER IN PERSON|RAIL|ages. quickly express deposits cajole pe 964|198936|8937|1|39|79362.27|0.04|0.01|N|O|1995-06-21|1995-07-24|1995-06-24|NONE|AIR|se furiously regular instructions. blith 964|112231|2232|2|1|1243.23|0.02|0.05|N|O|1995-08-20|1995-07-29|1995-09-10|DELIVER IN PERSON|REG AIR|unts. quickly even platelets s 964|56790|9296|3|49|85592.71|0.01|0.03|N|O|1995-09-06|1995-08-10|1995-10-05|NONE|MAIL|ounts. blithely regular packag 964|54514|7020|4|44|64614.44|0.05|0.02|N|O|1995-09-18|1995-08-02|1995-10-17|TAKE BACK RETURN|TRUCK|ronic deposit 965|107207|9718|1|20|24284.00|0.04|0.05|N|F|1995-06-16|1995-07-20|1995-07-06|COLLECT COD|MAIL|kly. carefully pending requ 965|17015|2018|2|23|21436.23|0.06|0.08|N|O|1995-07-12|1995-07-08|1995-08-11|COLLECT COD|MAIL|ld kindle carefully across th 966|179337|6889|1|19|26910.27|0.07|0.01|N|O|1998-05-26|1998-07-15|1998-05-29|COLLECT COD|FOB|efully final pinto beans. quickly 966|116560|1583|2|42|66215.52|0.02|0.06|N|O|1998-06-28|1998-06-20|1998-07-05|NONE|TRUCK|tions boost furiously car 966|21218|6223|3|42|47846.82|0.06|0.08|N|O|1998-06-15|1998-06-08|1998-07-05|NONE|RAIL|sly ironic asymptotes hagg 966|4189|1690|4|20|21863.60|0.04|0.01|N|O|1998-07-19|1998-07-15|1998-07-27|NONE|TRUCK|pecial ins 967|58957|3968|1|41|78553.95|0.05|0.05|R|F|1992-09-21|1992-08-15|1992-10-21|NONE|MAIL|ld foxes wake closely special 967|84402|1927|2|4|5545.60|0.01|0.02|A|F|1992-07-15|1992-07-27|1992-07-18|DELIVER IN PERSON|REG AIR|platelets hang carefully along 967|131550|4064|3|10|15815.50|0.00|0.02|A|F|1992-09-18|1992-08-06|1992-09-19|DELIVER IN PERSON|MAIL|old pinto beans alongside of the exp 967|147690|5233|4|49|85146.81|0.01|0.04|A|F|1992-09-28|1992-09-15|1992-10-14|NONE|SHIP|the slyly even ideas. carefully even 967|16893|9395|5|41|74205.49|0.08|0.04|A|F|1992-07-23|1992-08-07|1992-08-13|TAKE BACK RETURN|FOB|efully special ide 967|105311|7822|6|17|22377.27|0.05|0.06|A|F|1992-10-02|1992-08-19|1992-10-25|NONE|MAIL|y ironic foxes caj 967|160878|5911|7|18|34899.66|0.00|0.02|A|F|1992-10-06|1992-08-05|1992-10-15|DELIVER IN PERSON|RAIL|ngage blith 992|59906|7422|1|14|26122.60|0.10|0.03|N|O|1998-01-29|1997-12-29|1998-02-18|TAKE BACK RETURN|MAIL|the unusual, even dependencies affix fluff 992|37967|7968|2|34|64768.64|0.02|0.00|N|O|1997-11-29|1998-01-21|1997-11-30|NONE|RAIL|s use silently. blithely regular ideas b 992|104722|4723|3|30|51801.60|0.10|0.00|N|O|1997-12-15|1998-02-02|1998-01-12|NONE|SHIP|nic instructions n 992|47212|9717|4|21|24343.41|0.06|0.06|N|O|1997-11-13|1997-12-28|1997-12-10|NONE|TRUCK|fily. quickly special deposit 992|91050|3560|5|7|7287.35|0.09|0.05|N|O|1997-11-30|1997-12-24|1997-12-16|DELIVER IN PERSON|TRUCK|ideas haggle. special theodolit 992|74101|1623|6|41|44079.10|0.10|0.01|N|O|1997-11-14|1998-02-04|1997-11-23|TAKE BACK RETURN|AIR|eodolites cajole across the accounts. 993|174476|6994|1|33|51165.51|0.01|0.05|N|O|1996-01-03|1995-11-28|1996-01-23|DELIVER IN PERSON|AIR| the deposits affix agains 993|2145|4646|2|28|29319.92|0.06|0.08|N|O|1995-10-24|1995-11-20|1995-11-06|DELIVER IN PERSON|RAIL|lites. even theodolite 993|39924|4931|3|10|18639.20|0.03|0.08|N|O|1995-12-17|1995-11-13|1995-12-20|NONE|RAIL|encies wake fur 993|190751|5790|4|40|73670.00|0.01|0.01|N|O|1995-11-16|1995-11-01|1995-12-05|TAKE BACK RETURN|RAIL|gle above the furiously 993|145484|5485|5|33|50472.84|0.09|0.08|N|O|1995-09-28|1995-10-24|1995-10-03|COLLECT COD|RAIL|fluffily. quiet excuses sleep furiously sly 993|136979|4519|6|35|70558.95|0.04|0.02|N|O|1995-10-26|1995-10-20|1995-11-05|DELIVER IN PERSON|FOB|es. ironic, ironic requests 993|4967|2468|7|15|28079.40|0.09|0.03|N|O|1995-09-27|1995-10-21|1995-10-17|DELIVER IN PERSON|MAIL|sits. pending pinto beans haggle? ca 994|64486|4487|1|4|5801.92|0.07|0.03|R|F|1994-07-05|1994-05-21|1994-07-20|COLLECT COD|SHIP|aggle carefully acc 994|9177|1678|2|11|11947.87|0.01|0.00|R|F|1994-05-03|1994-06-10|1994-05-22|NONE|AIR|ular accounts sleep 994|30745|8255|3|5|8378.70|0.08|0.08|A|F|1994-06-24|1994-06-14|1994-06-26|NONE|MAIL|ainst the pending requests. packages sl 994|130471|8011|4|25|37536.75|0.10|0.00|A|F|1994-06-03|1994-06-02|1994-06-06|COLLECT COD|RAIL|usual pinto beans. 995|172294|9846|1|15|20494.35|0.08|0.05|N|O|1995-06-30|1995-08-04|1995-07-27|NONE|REG AIR|uses. fluffily fina 995|128707|1220|2|28|48599.60|0.08|0.03|N|F|1995-06-12|1995-07-20|1995-06-19|DELIVER IN PERSON|SHIP|pades. quick, final frays use flu 995|165067|100|3|45|50942.70|0.00|0.05|N|O|1995-08-02|1995-07-21|1995-08-03|DELIVER IN PERSON|SHIP|lar packages detect blithely above t 995|65736|749|4|25|42543.25|0.01|0.08|N|O|1995-09-08|1995-08-05|1995-09-28|NONE|TRUCK|lyly even 995|23196|3197|5|18|20145.42|0.06|0.03|N|O|1995-07-03|1995-07-29|1995-07-22|TAKE BACK RETURN|AIR| even accounts unwind c 996|172396|7431|1|43|63140.77|0.03|0.06|N|O|1998-03-27|1998-03-25|1998-04-06|COLLECT COD|SHIP| the blithely ironic foxes. slyly silent d 997|162614|2615|1|11|18442.71|0.00|0.02|N|O|1997-06-16|1997-07-21|1997-07-14|DELIVER IN PERSON|TRUCK|p furiously according to t 997|47438|7439|2|17|23552.31|0.03|0.00|N|O|1997-07-28|1997-07-26|1997-08-20|DELIVER IN PERSON|SHIP|aggle quickly furiously 998|9924|7425|1|22|40346.24|0.04|0.05|A|F|1994-12-03|1995-02-17|1994-12-19|TAKE BACK RETURN|RAIL|lites. qui 998|180410|5447|2|7|10432.87|0.10|0.05|R|F|1995-03-24|1995-01-18|1995-04-03|NONE|MAIL|nic deposits. even asym 998|141752|4267|3|30|53812.50|0.05|0.07|A|F|1994-12-02|1995-01-23|1994-12-23|NONE|SHIP|lyly idle Tir 998|10143|5146|4|6|6318.84|0.09|0.05|R|F|1995-03-20|1994-12-27|1995-04-13|DELIVER IN PERSON|MAIL|refully accounts. carefully express ac 998|72407|7422|5|1|1379.40|0.04|0.00|R|F|1995-01-05|1995-01-06|1995-01-13|NONE|SHIP|es sleep. regular dependencies use bl 999|60071|7590|1|34|35056.38|0.00|0.08|R|F|1993-10-30|1993-10-17|1993-10-31|TAKE BACK RETURN|SHIP|its. daringly final instruc 999|198758|1278|2|41|76126.75|0.08|0.01|A|F|1993-10-16|1993-12-04|1993-11-03|DELIVER IN PERSON|REG AIR|us depths. carefully ironic instruc 999|117178|2201|3|15|17927.55|0.07|0.06|A|F|1993-12-12|1993-10-18|1994-01-08|COLLECT COD|REG AIR|y ironic requests. carefully regu 999|2643|2644|4|10|15456.40|0.05|0.02|A|F|1993-11-23|1993-12-02|1993-11-29|NONE|MAIL|efully pending 999|18337|8338|5|3|3765.99|0.03|0.00|R|F|1993-09-17|1993-10-22|1993-10-13|NONE|FOB|nic, pending ideas. bl 999|180934|8489|6|37|74552.41|0.00|0.04|R|F|1994-01-03|1993-10-28|1994-01-12|DELIVER IN PERSON|TRUCK|ckly slyly unusual packages: packages hagg 1024|198068|3107|1|49|57136.94|0.03|0.05|N|O|1998-03-06|1998-01-26|1998-03-29|TAKE BACK RETURN|FOB|ts. asymptotes nag fur 1024|125344|369|2|34|46557.56|0.00|0.01|N|O|1998-01-06|1998-02-05|1998-01-26|COLLECT COD|SHIP|des the slyly even 1024|43565|1078|3|28|42239.68|0.04|0.01|N|O|1998-03-04|1998-03-12|1998-03-15|TAKE BACK RETURN|TRUCK|e blithely regular pi 1024|183259|5778|4|13|17449.25|0.02|0.04|N|O|1998-04-11|1998-02-26|1998-04-18|NONE|FOB|e slyly around the slyly special instructi 1024|20829|8336|5|49|85741.18|0.02|0.04|N|O|1998-02-27|1998-03-10|1998-03-27|COLLECT COD|FOB| carefully bold 1025|149335|9336|1|36|49835.88|0.03|0.04|A|F|1995-05-15|1995-07-05|1995-06-10|COLLECT COD|FOB|e unusual, regular instr 1025|68247|8248|2|23|27950.52|0.08|0.03|N|F|1995-06-02|1995-07-29|1995-06-23|COLLECT COD|RAIL| regular platelets nag carefu 1025|22445|7450|3|25|34186.00|0.06|0.05|R|F|1995-05-29|1995-06-21|1995-06-13|DELIVER IN PERSON|REG AIR|xpress foxes. furiousl 1026|37213|4723|1|36|41407.56|0.10|0.02|N|O|1997-06-14|1997-07-20|1997-06-23|NONE|SHIP|st the ide 1026|36855|1862|2|6|10751.10|0.10|0.08|N|O|1997-07-07|1997-08-16|1997-07-14|TAKE BACK RETURN|TRUCK|to beans. special, regular packages hagg 1027|155942|973|1|43|85911.42|0.07|0.08|R|F|1992-06-17|1992-08-28|1992-07-10|DELIVER IN PERSON|MAIL|oxes. carefully regular deposits 1027|112414|7437|2|20|28528.20|0.01|0.02|A|F|1992-06-08|1992-08-29|1992-06-14|NONE|TRUCK|ar excuses eat f 1027|125692|3229|3|2|3435.38|0.01|0.02|R|F|1992-08-28|1992-07-09|1992-09-10|NONE|FOB|s. quickly unusual waters inside 1027|99229|6757|4|13|15966.86|0.08|0.01|R|F|1992-08-22|1992-07-10|1992-09-12|DELIVER IN PERSON|RAIL|ily ironic ideas use 1027|135427|7941|5|22|32173.24|0.02|0.00|A|F|1992-09-03|1992-08-14|1992-10-01|DELIVER IN PERSON|FOB|the furiously express ex 1027|104958|7469|6|10|19629.50|0.06|0.08|R|F|1992-08-28|1992-08-06|1992-09-03|COLLECT COD|REG AIR|ilent, express foxes near the blithely sp 1028|127811|324|1|2|3677.62|0.09|0.03|A|F|1994-01-10|1994-03-22|1994-01-26|COLLECT COD|FOB|s alongside of the regular asymptotes sleep 1028|111876|6899|2|39|73626.93|0.06|0.05|R|F|1994-02-18|1994-03-22|1994-03-06|TAKE BACK RETURN|MAIL| final dependencies affix a 1028|99556|4575|3|8|12444.40|0.03|0.07|A|F|1994-02-14|1994-03-28|1994-02-22|NONE|AIR|e carefully final packages. furiously fi 1028|31239|3743|4|26|30425.98|0.07|0.02|A|F|1994-03-18|1994-02-08|1994-03-19|TAKE BACK RETURN|RAIL|ronic platelets. carefully f 1028|28550|6057|5|27|39920.85|0.00|0.04|A|F|1994-04-03|1994-02-07|1994-04-26|NONE|REG AIR|ial accounts nag. slyly 1028|25111|7614|6|39|40408.29|0.03|0.02|A|F|1994-02-27|1994-02-16|1994-03-02|DELIVER IN PERSON|AIR|c theodoli 1028|30960|5967|7|22|41601.12|0.03|0.00|R|F|1994-04-24|1994-02-27|1994-05-08|NONE|REG AIR| Tiresias alongside of the carefully spec 1029|136137|8651|1|45|52790.85|0.05|0.07|R|F|1994-07-21|1994-08-30|1994-07-29|TAKE BACK RETURN|FOB|sits boost blithely 1030|64397|1916|1|17|23143.63|0.06|0.06|R|F|1994-10-13|1994-08-01|1994-11-10|DELIVER IN PERSON|RAIL|ly. carefully even packages dazz 1031|45551|5552|1|15|22448.25|0.10|0.08|A|F|1994-11-07|1994-10-29|1994-11-09|TAKE BACK RETURN|FOB|about the carefully bold a 1031|164788|7305|2|28|51877.84|0.05|0.01|A|F|1994-12-10|1994-10-29|1994-12-18|COLLECT COD|FOB|ly ironic accounts across the q 1031|186928|4483|3|27|54402.84|0.07|0.02|R|F|1994-09-20|1994-10-18|1994-10-10|DELIVER IN PERSON|SHIP|gular deposits cajole. blithely unus 1031|87154|2171|4|7|7988.05|0.03|0.03|R|F|1994-12-07|1994-11-11|1994-12-30|COLLECT COD|FOB|r instructions. car 1031|190193|7751|5|44|56460.36|0.01|0.07|R|F|1994-11-20|1994-11-24|1994-12-11|NONE|AIR|re slyly above the furio 1056|120970|3483|1|37|73665.89|0.04|0.06|R|F|1995-02-18|1995-04-01|1995-03-20|NONE|TRUCK| special packages. qui 1057|192240|4760|1|29|38634.96|0.10|0.01|A|F|1992-05-05|1992-05-05|1992-06-03|TAKE BACK RETURN|SHIP|es wake according to the q 1057|168078|595|2|11|12606.77|0.00|0.02|R|F|1992-03-31|1992-04-18|1992-04-18|COLLECT COD|AIR|yly final theodolites. furi 1057|84143|9160|3|21|23669.94|0.03|0.04|A|F|1992-02-28|1992-05-01|1992-03-10|NONE|REG AIR|ar orbits boost bli 1057|181895|6932|4|20|39537.80|0.06|0.03|R|F|1992-03-02|1992-05-19|1992-03-13|DELIVER IN PERSON|TRUCK|s wake bol 1057|96404|3932|5|7|9802.80|0.06|0.05|R|F|1992-06-05|1992-04-30|1992-06-20|NONE|TRUCK|y slyly express theodolites. slyly bo 1057|51461|6472|6|19|26836.74|0.04|0.07|A|F|1992-05-31|1992-05-09|1992-06-02|DELIVER IN PERSON|FOB|r-- packages haggle alon 1058|139555|7095|1|24|38269.20|0.08|0.04|A|F|1993-07-09|1993-05-28|1993-07-22|DELIVER IN PERSON|TRUCK|fully ironic accounts. express accou 1058|88088|5613|2|5|5380.40|0.04|0.07|R|F|1993-05-11|1993-05-29|1993-05-27|COLLECT COD|TRUCK|refully even requests boost along 1058|89249|9250|3|44|54482.56|0.10|0.01|R|F|1993-06-26|1993-06-21|1993-07-20|COLLECT COD|TRUCK|uriously f 1058|4119|1620|4|25|25577.75|0.09|0.01|A|F|1993-05-27|1993-06-10|1993-06-20|TAKE BACK RETURN|MAIL| the final requests believe carefully 1059|177583|7584|1|16|26569.28|0.07|0.02|A|F|1994-04-24|1994-03-31|1994-04-28|DELIVER IN PERSON|SHIP|y ironic pinto 1059|28839|6346|2|7|12374.81|0.07|0.06|R|F|1994-03-30|1994-04-01|1994-04-24|DELIVER IN PERSON|MAIL|the furiously silent excuses are e 1059|87935|444|3|45|86531.85|0.00|0.02|R|F|1994-06-10|1994-05-08|1994-06-21|COLLECT COD|RAIL|riously even theodolites. slyly regula 1059|109784|7315|4|26|46638.28|0.09|0.01|A|F|1994-03-17|1994-04-18|1994-03-26|DELIVER IN PERSON|TRUCK|ar pinto beans at the furiously 1059|138061|575|5|37|40665.22|0.09|0.04|R|F|1994-03-31|1994-05-08|1994-04-06|COLLECT COD|RAIL| packages lose in place of the slyly unusu 1059|189215|1734|6|50|65210.50|0.00|0.03|A|F|1994-06-15|1994-05-11|1994-06-29|NONE|MAIL|s impress furiously about 1059|122187|2188|7|13|15719.34|0.01|0.03|R|F|1994-06-12|1994-05-11|1994-07-02|COLLECT COD|TRUCK|usly regular theodo 1060|195479|3037|1|8|12595.76|0.07|0.04|R|F|1993-05-21|1993-05-06|1993-06-10|DELIVER IN PERSON|FOB|iously. furiously regular in 1060|7614|5115|2|26|39561.86|0.06|0.08|R|F|1993-04-12|1993-04-01|1993-04-20|DELIVER IN PERSON|TRUCK|counts; even deposits are carefull 1060|163141|5658|3|11|13245.54|0.01|0.07|A|F|1993-05-13|1993-05-08|1993-05-17|TAKE BACK RETURN|MAIL|e regular deposits: re 1060|109815|7346|4|16|29196.96|0.03|0.06|A|F|1993-06-15|1993-04-18|1993-07-05|COLLECT COD|SHIP|ccounts. foxes maintain care 1060|52852|7863|5|1|1804.85|0.04|0.06|A|F|1993-06-19|1993-05-10|1993-06-21|COLLECT COD|RAIL|posits detect carefully abo 1060|71383|3891|6|26|35213.88|0.01|0.03|A|F|1993-02-28|1993-04-01|1993-03-09|TAKE BACK RETURN|FOB|quickly abo 1060|120102|5127|7|36|40395.60|0.09|0.01|R|F|1993-03-14|1993-03-24|1993-04-02|TAKE BACK RETURN|FOB|r the quickly 1061|150016|5047|1|7|7462.07|0.04|0.04|N|O|1998-08-09|1998-08-12|1998-08-16|COLLECT COD|FOB|es are slyly expr 1061|118529|1041|2|2|3095.04|0.06|0.02|N|O|1998-08-15|1998-08-05|1998-08-24|COLLECT COD|MAIL|. regular accounts impre 1061|110165|5188|3|26|30554.16|0.08|0.02|N|O|1998-06-18|1998-07-25|1998-06-24|TAKE BACK RETURN|AIR|ave to slee 1061|135309|5310|4|41|55116.30|0.00|0.05|N|O|1998-06-29|1998-07-02|1998-07-27|NONE|MAIL|s are. ironic theodolites cajole. dep 1061|130014|15|5|50|52200.50|0.04|0.08|N|O|1998-05-25|1998-07-22|1998-06-22|COLLECT COD|AIR|nding excuses are around the e 1061|143492|6007|6|35|53742.15|0.09|0.05|N|O|1998-07-05|1998-07-07|1998-07-30|TAKE BACK RETURN|MAIL|ending requests nag careful 1062|136066|6067|1|38|41878.28|0.00|0.01|N|O|1997-01-27|1997-03-07|1997-02-16|DELIVER IN PERSON|TRUCK|deas. pending acc 1063|95042|61|1|42|43555.68|0.03|0.02|A|F|1994-07-10|1994-05-25|1994-07-26|NONE|RAIL|tructions about the blithely ex 1088|106899|6900|1|30|57176.70|0.07|0.03|A|F|1992-05-22|1992-06-25|1992-06-11|TAKE BACK RETURN|SHIP|long the packages snooze careful 1088|36841|4351|2|11|19556.24|0.06|0.00|A|F|1992-08-30|1992-07-25|1992-09-10|TAKE BACK RETURN|AIR|inal requests. fluffily express theod 1088|180349|2868|3|5|7146.70|0.03|0.07|A|F|1992-07-01|1992-07-25|1992-07-02|NONE|AIR|refully ironic packages. r 1088|123962|3963|4|3|5957.88|0.09|0.03|A|F|1992-06-15|1992-08-02|1992-06-18|DELIVER IN PERSON|MAIL|pecial theodolites 1089|150036|7582|1|47|51043.41|0.05|0.06|N|O|1996-06-26|1996-06-25|1996-07-11|NONE|TRUCK|aggle furiously among the bravely eve 1089|49934|2439|2|35|65937.55|0.03|0.00|N|O|1996-08-14|1996-07-10|1996-08-26|NONE|TRUCK|ly express deposits haggle 1089|25189|5190|3|23|25626.14|0.10|0.05|N|O|1996-06-24|1996-07-25|1996-07-20|DELIVER IN PERSON|AIR|g dolphins. deposits integrate. s 1089|140435|7978|4|1|1475.43|0.01|0.03|N|O|1996-07-08|1996-07-07|1996-07-17|COLLECT COD|RAIL|n courts among the caref 1090|21522|1523|1|5|7217.60|0.02|0.05|N|O|1998-02-19|1997-12-25|1998-02-24|DELIVER IN PERSON|AIR|s above the 1090|112475|7498|2|28|41649.16|0.08|0.08|N|O|1998-02-20|1998-01-03|1998-03-19|NONE|FOB|s cajole above the regular 1091|37233|7234|1|40|46809.20|0.10|0.06|N|O|1996-12-17|1996-10-14|1996-12-24|TAKE BACK RETURN|REG AIR|platelets. regular packag 1092|183493|3494|1|48|75671.52|0.04|0.04|N|O|1995-06-25|1995-04-06|1995-07-18|DELIVER IN PERSON|AIR|unusual accounts. fluffi 1092|152637|183|2|1|1689.63|0.01|0.06|A|F|1995-03-10|1995-04-21|1995-04-06|COLLECT COD|RAIL|lent, pending requests-- requests nag accor 1092|160701|5734|3|28|49327.60|0.05|0.08|R|F|1995-04-08|1995-05-01|1995-05-02|DELIVER IN PERSON|FOB|affix carefully. u 1092|85157|5158|4|2|2284.30|0.05|0.07|R|F|1995-04-09|1995-05-12|1995-05-03|TAKE BACK RETURN|TRUCK|ans. slyly eve 1093|86927|1944|1|7|13397.44|0.04|0.02|N|O|1997-11-24|1997-09-23|1997-11-25|TAKE BACK RETURN|SHIP|bold deposits. blithely ironic depos 1093|176300|3852|2|37|50923.10|0.08|0.04|N|O|1997-11-06|1997-10-08|1997-11-22|COLLECT COD|FOB|le furiously across the carefully sp 1093|60434|435|3|34|47410.62|0.01|0.06|N|O|1997-11-07|1997-09-06|1997-11-28|TAKE BACK RETURN|REG AIR|sits. express accounts play carefully. bol 1094|114492|4493|1|9|13558.41|0.07|0.06|N|O|1997-12-28|1998-03-16|1998-01-18|DELIVER IN PERSON|AIR|as. slyly pe 1095|136083|8597|1|33|36929.64|0.01|0.02|N|O|1995-10-03|1995-09-22|1995-10-13|NONE|MAIL|slyly around the iron 1095|135953|3493|2|24|47734.80|0.04|0.06|N|O|1995-08-24|1995-10-20|1995-09-09|COLLECT COD|TRUCK|packages nod furiously above the carefully 1095|155206|7722|3|13|16395.60|0.06|0.01|N|O|1995-08-24|1995-10-19|1995-09-02|TAKE BACK RETURN|REG AIR|ously even accounts. slyly bold a 1095|134380|1920|4|28|39602.64|0.08|0.03|N|O|1995-09-20|1995-11-18|1995-10-02|DELIVER IN PERSON|SHIP| regular pac 1095|111798|9332|5|40|72391.60|0.09|0.03|N|O|1995-10-18|1995-11-14|1995-11-09|NONE|MAIL| bold accounts haggle slyly furiously even 1095|180389|2908|6|37|54367.06|0.07|0.08|N|O|1995-10-04|1995-11-13|1995-10-12|NONE|SHIP|. quickly even dolphins sle 1120|177951|469|1|10|20289.50|0.08|0.05|N|O|1997-12-17|1998-01-21|1997-12-23|DELIVER IN PERSON|MAIL|dependencies. blithel 1120|19045|9046|2|49|47237.96|0.01|0.07|N|O|1998-01-03|1998-02-02|1998-01-09|TAKE BACK RETURN|RAIL|heodolites. quick re 1120|75472|7980|3|21|30396.87|0.06|0.01|N|O|1998-01-11|1998-02-04|1998-01-19|COLLECT COD|REG AIR|s: fluffily even packages c 1120|45653|662|4|22|35170.30|0.09|0.08|N|O|1997-11-15|1998-01-25|1997-12-07|TAKE BACK RETURN|REG AIR|ons. slyly silent requests sleep silent 1120|82435|2436|5|10|14174.30|0.07|0.08|N|O|1997-11-10|1998-02-01|1997-11-28|TAKE BACK RETURN|AIR|ages haggle furiously 1121|167726|5275|1|42|75336.24|0.04|0.05|N|O|1997-03-05|1997-03-18|1997-03-14|DELIVER IN PERSON|SHIP|nts are slyly special packages. f 1121|160276|2793|2|27|36079.29|0.08|0.00|N|O|1997-05-08|1997-03-28|1997-05-14|NONE|MAIL|ly ironic accounts cajole slyly abou 1121|156826|9342|3|10|18828.20|0.00|0.04|N|O|1997-04-17|1997-03-18|1997-05-02|TAKE BACK RETURN|RAIL|dencies. quickly regular theodolites n 1121|165255|2804|4|29|38287.25|0.02|0.01|N|O|1997-03-07|1997-04-02|1997-04-01|DELIVER IN PERSON|REG AIR| use furiously. quickly silent package 1121|29726|4731|5|47|77818.84|0.09|0.03|N|O|1997-04-27|1997-03-28|1997-05-14|COLLECT COD|FOB|ly idle, i 1121|199422|9423|6|50|76071.00|0.06|0.03|N|O|1997-04-21|1997-02-16|1997-04-25|NONE|TRUCK|odolites. slyly even accounts 1121|79080|6602|7|37|39185.96|0.06|0.01|N|O|1997-02-27|1997-03-04|1997-03-02|COLLECT COD|RAIL|special packages. fluffily final requests s 1122|91176|8704|1|8|9337.36|0.10|0.06|N|O|1997-02-02|1997-04-03|1997-02-22|TAKE BACK RETURN|RAIL|c foxes are along the slyly r 1122|181767|6804|2|29|53614.04|0.05|0.04|N|O|1997-05-07|1997-04-07|1997-05-15|COLLECT COD|SHIP|ptotes. quickl 1122|146422|3965|3|25|36710.50|0.09|0.01|N|O|1997-03-21|1997-03-03|1997-04-07|TAKE BACK RETURN|RAIL|d furiously. pinto 1122|105680|8191|4|40|67427.20|0.08|0.08|N|O|1997-02-07|1997-03-25|1997-02-25|NONE|REG AIR|packages sleep after the asym 1122|150352|353|5|15|21035.25|0.05|0.03|N|O|1997-04-15|1997-03-15|1997-05-07|COLLECT COD|SHIP|olve blithely regular, 1122|161814|9363|6|24|45019.44|0.04|0.01|N|O|1997-03-08|1997-02-20|1997-04-05|NONE|RAIL|blithely requests. slyly pending r 1122|299|5300|7|38|45573.02|0.00|0.08|N|O|1997-01-23|1997-04-02|1997-02-16|NONE|TRUCK|t theodolites sleep. even, ironic 1123|11173|8677|1|10|10841.70|0.05|0.08|N|O|1996-11-12|1996-10-04|1996-11-30|NONE|MAIL|ckages are above the depths. slyly ir 1123|177599|117|2|39|65387.01|0.03|0.08|N|O|1996-08-25|1996-10-21|1996-09-04|DELIVER IN PERSON|REG AIR|rding to the furiously ironic requests: r 1123|100807|3318|3|38|68696.40|0.03|0.08|N|O|1996-09-23|1996-10-04|1996-09-27|DELIVER IN PERSON|FOB| blithely carefully unusual reques 1124|197704|5262|1|1|1801.70|0.09|0.08|N|O|1998-10-06|1998-10-02|1998-10-30|NONE|REG AIR| instructions cajole qu 1124|5301|302|2|13|15681.90|0.05|0.04|N|O|1998-09-05|1998-10-03|1998-09-30|DELIVER IN PERSON|SHIP|t the slyly 1124|92298|4808|3|35|45160.15|0.10|0.05|N|O|1998-11-25|1998-10-08|1998-12-25|TAKE BACK RETURN|AIR|ut the slyly bold pinto beans; fi 1124|49104|9105|4|25|26327.50|0.08|0.05|N|O|1998-08-05|1998-10-14|1998-08-11|NONE|MAIL|ggle slyly according 1124|74888|7396|5|33|61475.04|0.05|0.04|N|O|1998-10-19|1998-09-17|1998-10-26|TAKE BACK RETURN|SHIP|eposits sleep slyly. stealthily f 1124|26745|1750|6|43|71884.82|0.01|0.03|N|O|1998-09-19|1998-10-28|1998-10-10|COLLECT COD|MAIL|across the 1124|94195|4196|7|1|1189.19|0.09|0.01|N|O|1998-10-07|1998-08-31|1998-10-12|NONE|TRUCK|ly bold accou 1125|132073|7100|1|4|4420.28|0.08|0.02|A|F|1994-12-10|1994-12-28|1994-12-30|NONE|MAIL| quickly express packages a 1125|137772|7773|2|24|43434.48|0.10|0.03|R|F|1995-01-31|1994-12-02|1995-02-20|COLLECT COD|AIR|es about the slyly s 1125|121039|3552|3|26|27560.78|0.05|0.04|A|F|1995-02-24|1995-01-18|1995-03-05|COLLECT COD|TRUCK|l instruction 1125|97823|2842|4|29|52803.78|0.06|0.00|A|F|1994-11-29|1994-12-20|1994-12-10|DELIVER IN PERSON|RAIL| platelets wake against the carefully i 1126|35025|2535|1|44|42240.88|0.08|0.03|N|O|1998-05-07|1998-04-02|1998-05-29|NONE|TRUCK|es. carefully special 1126|57176|2187|2|7|7932.19|0.06|0.01|N|O|1998-05-02|1998-03-22|1998-05-21|COLLECT COD|MAIL|ons. final, unusual 1126|146694|1723|3|14|24369.66|0.07|0.07|N|O|1998-04-17|1998-04-15|1998-05-12|DELIVER IN PERSON|TRUCK|nstructions. blithe 1127|42596|5101|1|35|53850.65|0.02|0.03|N|O|1995-11-25|1995-11-03|1995-12-17|NONE|TRUCK|l instructions boost blithely according 1127|109765|4786|2|38|67440.88|0.09|0.05|N|O|1995-11-07|1995-11-11|1995-11-26|DELIVER IN PERSON|RAIL|. never final packages boost acro 1127|19646|9647|3|29|45403.56|0.09|0.07|N|O|1995-09-20|1995-11-21|1995-10-11|DELIVER IN PERSON|REG AIR|y. blithely r 1127|174304|4305|4|7|9648.10|0.07|0.05|N|O|1995-11-05|1995-11-02|1995-11-11|DELIVER IN PERSON|FOB| idly pending pains 1152|8646|8647|1|23|35756.72|0.06|0.04|A|F|1994-10-14|1994-10-22|1994-10-21|DELIVER IN PERSON|MAIL|equests alongside of the unusual 1152|99604|2114|2|25|40090.00|0.04|0.08|R|F|1994-10-20|1994-09-18|1994-10-28|DELIVER IN PERSON|REG AIR|efully ironic accounts. sly instructions wa 1152|41846|4351|3|6|10727.04|0.07|0.03|A|F|1994-12-07|1994-11-05|1994-12-25|DELIVER IN PERSON|FOB|p furiously; packages above th 1153|85058|2583|1|15|15645.75|0.00|0.08|N|O|1996-04-24|1996-07-17|1996-04-29|TAKE BACK RETURN|SHIP|uctions boost fluffily according to 1153|168992|1509|2|50|103049.50|0.00|0.07|N|O|1996-06-27|1996-07-13|1996-07-05|COLLECT COD|REG AIR|ronic asymptotes nag slyly. 1153|43393|3394|3|25|33409.75|0.00|0.05|N|O|1996-06-18|1996-06-28|1996-07-09|NONE|TRUCK| theodolites 1153|91126|1127|4|43|48036.16|0.01|0.00|N|O|1996-06-09|1996-06-01|1996-07-04|DELIVER IN PERSON|MAIL|special instructions are. unusual, final du 1153|141212|6241|5|45|56394.45|0.00|0.02|N|O|1996-06-18|1996-06-20|1996-07-03|TAKE BACK RETURN|AIR|oss the ex 1153|135360|5361|6|26|36279.36|0.02|0.03|N|O|1996-08-16|1996-07-12|1996-09-08|NONE|MAIL|kages haggle carefully. f 1153|191834|4354|7|5|9629.15|0.02|0.03|N|O|1996-05-03|1996-06-12|1996-05-28|TAKE BACK RETURN|FOB|special excuses promi 1154|142724|5239|1|31|54768.32|0.06|0.06|A|F|1992-04-17|1992-04-26|1992-05-17|COLLECT COD|AIR|ithely. final, blithe 1154|147409|4952|2|50|72820.00|0.07|0.06|A|F|1992-04-22|1992-04-21|1992-05-01|NONE|TRUCK|ove the furiously bold Tires 1154|96103|3631|3|5|5495.50|0.09|0.04|A|F|1992-06-07|1992-05-07|1992-07-05|DELIVER IN PERSON|MAIL|the furiously 1154|574|575|4|35|51609.95|0.00|0.07|A|F|1992-03-30|1992-04-02|1992-04-21|DELIVER IN PERSON|TRUCK|the carefully regular pinto beans boost 1154|35462|7966|5|18|25154.28|0.02|0.03|A|F|1992-02-26|1992-03-24|1992-03-20|TAKE BACK RETURN|REG AIR|y regular excuses cajole blithely. fi 1154|195101|7621|6|50|59805.00|0.06|0.03|A|F|1992-03-04|1992-04-01|1992-04-01|TAKE BACK RETURN|TRUCK| even, special 1155|69329|9330|1|4|5193.28|0.09|0.05|N|O|1997-10-19|1997-12-09|1997-11-02|DELIVER IN PERSON|SHIP|ic foxes according to the carefully final 1155|195749|788|2|39|71944.86|0.08|0.05|N|O|1998-01-29|1998-01-03|1998-02-01|COLLECT COD|TRUCK|ckly final pinto beans was. 1155|146710|9225|3|23|40404.33|0.08|0.03|N|O|1997-11-24|1997-11-28|1997-12-06|DELIVER IN PERSON|FOB|ly unusual packages. iro 1155|139205|9206|4|12|14930.40|0.01|0.06|N|O|1997-11-01|1998-01-03|1997-11-19|DELIVER IN PERSON|RAIL|packages do 1155|4313|1814|5|49|59648.19|0.04|0.08|N|O|1997-12-07|1997-12-30|1997-12-08|NONE|AIR|ccounts are alongside of t 1156|86609|1626|1|15|23934.00|0.07|0.06|N|O|1996-12-21|1997-01-03|1997-01-10|TAKE BACK RETURN|AIR|the furiously pen 1156|32488|2489|2|21|29830.08|0.02|0.08|N|O|1996-11-07|1997-01-14|1996-12-03|NONE|AIR|dolphins. fluffily ironic packages sleep re 1156|11149|8653|3|29|30744.06|0.09|0.06|N|O|1997-01-24|1996-12-26|1997-02-04|DELIVER IN PERSON|TRUCK|ts sleep sly 1156|171422|1423|4|42|62723.64|0.02|0.00|N|O|1997-01-18|1997-01-12|1997-02-13|NONE|REG AIR|s. quickly bold pains are 1156|73755|6263|5|49|84708.75|0.04|0.01|N|O|1996-11-16|1996-12-02|1996-12-05|COLLECT COD|AIR|ithely unusual in 1156|194006|1564|6|42|46200.00|0.02|0.06|N|O|1997-01-27|1997-01-09|1997-01-28|DELIVER IN PERSON|MAIL|even requests boost ironic deposits. pe 1156|46115|3628|7|20|21222.20|0.08|0.07|N|O|1997-01-01|1997-01-06|1997-01-16|COLLECT COD|MAIL|deposits sleep bravel 1157|48128|3137|1|16|17217.92|0.06|0.00|N|O|1998-04-12|1998-03-09|1998-04-23|DELIVER IN PERSON|MAIL|tions hang 1157|82041|7058|2|4|4092.16|0.10|0.05|N|O|1998-02-24|1998-03-30|1998-03-24|DELIVER IN PERSON|SHIP|ounts. ironic deposits 1157|47985|5498|3|8|15463.84|0.02|0.00|N|O|1998-03-25|1998-03-16|1998-03-29|NONE|REG AIR|blithely even pa 1157|76907|6908|4|46|86659.40|0.07|0.08|N|O|1998-04-19|1998-03-13|1998-04-23|NONE|FOB|slyly regular excuses. accounts 1157|159850|4881|5|14|26737.90|0.03|0.03|N|O|1998-04-17|1998-03-03|1998-05-01|NONE|FOB|theodolites. fluffily re 1158|44218|6723|1|5|5811.05|0.02|0.04|N|O|1996-10-20|1996-07-30|1996-11-14|COLLECT COD|AIR|symptotes along the care 1158|156084|3630|2|23|26221.84|0.00|0.08|N|O|1996-10-21|1996-08-19|1996-10-31|COLLECT COD|MAIL|ularly ironic requests use care 1159|108033|8034|1|39|40600.17|0.01|0.00|A|F|1992-11-20|1992-10-28|1992-12-18|TAKE BACK RETURN|FOB| blithely express reques 1159|95490|509|2|7|10398.43|0.08|0.00|A|F|1992-11-25|1992-10-27|1992-12-20|NONE|AIR|olve somet 1159|97431|9941|3|11|15712.73|0.10|0.03|R|F|1992-12-09|1992-12-07|1992-12-18|DELIVER IN PERSON|MAIL|h furiousl 1184|46728|9233|1|27|45217.44|0.01|0.00|N|O|1998-01-10|1997-12-02|1998-02-06|TAKE BACK RETURN|REG AIR|s wake fluffily. fl 1184|146616|1645|2|4|6650.44|0.04|0.03|N|O|1997-12-25|1998-01-24|1998-01-18|DELIVER IN PERSON|RAIL| express packages. slyly expres 1184|163940|3941|3|7|14027.58|0.05|0.00|N|O|1998-02-14|1998-01-06|1998-03-11|COLLECT COD|TRUCK|ckly warthogs. blithely bold foxes hag 1184|125544|3081|4|3|4708.62|0.02|0.05|N|O|1998-01-15|1997-12-19|1998-02-02|NONE|REG AIR|ar packages. final packages cajol 1185|71925|6940|1|8|15175.36|0.01|0.06|A|F|1992-12-05|1992-10-05|1992-12-28|DELIVER IN PERSON|MAIL|ely according to the furiously regular r 1185|30749|750|2|28|47032.72|0.07|0.06|A|F|1992-09-24|1992-10-07|1992-10-10|DELIVER IN PERSON|REG AIR|ke. slyly regular t 1185|189350|1869|3|12|17272.20|0.05|0.06|R|F|1992-10-12|1992-09-26|1992-11-11|NONE|REG AIR|instructions. daringly pend 1186|2457|2458|1|28|38064.60|0.08|0.07|N|O|1996-12-08|1996-10-17|1996-12-15|TAKE BACK RETURN|TRUCK|ffily spec 1186|91123|6142|2|11|12255.32|0.07|0.05|N|O|1996-10-03|1996-10-21|1996-10-17|DELIVER IN PERSON|AIR|s haggle furiously; slyl 1186|100774|775|3|20|35495.40|0.07|0.07|N|O|1996-08-20|1996-10-23|1996-09-05|COLLECT COD|FOB|ely alongside of the blithel 1186|105917|5918|4|27|51918.57|0.06|0.04|N|O|1996-10-08|1996-11-06|1996-10-09|TAKE BACK RETURN|SHIP|accounts. express, e 1187|177779|5331|1|29|53846.33|0.01|0.04|R|F|1992-12-10|1993-02-09|1992-12-29|TAKE BACK RETURN|RAIL|riously express ac 1187|130451|2965|2|15|22221.75|0.03|0.04|A|F|1992-12-22|1993-01-13|1993-01-01|NONE|TRUCK|ests. foxes wake. carefu 1187|77243|9751|3|40|48809.60|0.08|0.06|R|F|1993-03-05|1992-12-31|1993-03-12|NONE|TRUCK|ar, brave deposits nag blithe 1188|114959|7471|1|2|3947.90|0.00|0.04|N|O|1996-05-22|1996-05-23|1996-06-06|COLLECT COD|RAIL|its breach blit 1188|112024|2025|2|9|9324.18|0.01|0.08|N|O|1996-08-04|1996-06-04|1996-08-19|NONE|REG AIR|ow carefully ironic d 1188|178737|8738|3|41|74444.93|0.07|0.04|N|O|1996-06-29|1996-05-21|1996-07-21|TAKE BACK RETURN|TRUCK|althy packages. fluffily unusual ideas h 1189|50676|677|1|23|37413.41|0.06|0.00|R|F|1994-07-25|1994-06-07|1994-08-02|COLLECT COD|FOB|s. fluffy Tiresias run quickly. bra 1189|104889|2420|2|32|60604.16|0.09|0.02|R|F|1994-05-06|1994-07-03|1994-05-15|TAKE BACK RETURN|FOB|e regular deposits. quickly quiet deposi 1189|56505|9011|3|22|32153.00|0.05|0.03|R|F|1994-06-09|1994-06-29|1994-06-23|DELIVER IN PERSON|TRUCK|quickly unusual platelets lose forges. ca 1190|83788|1313|1|32|56696.96|0.07|0.06|N|O|1997-05-08|1997-04-17|1997-06-01|COLLECT COD|FOB|y final packages? slyly even 1191|48288|793|1|29|35852.12|0.00|0.04|N|O|1996-01-24|1996-01-28|1996-02-17|COLLECT COD|AIR| regular pin 1216|96917|4445|1|8|15311.28|0.03|0.04|R|F|1993-02-01|1993-03-06|1993-02-08|TAKE BACK RETURN|TRUCK| of the carefully express 1216|74067|1589|2|48|49970.88|0.10|0.01|R|F|1993-01-17|1993-02-01|1993-02-13|COLLECT COD|SHIP|symptotes use against th 1216|41738|1739|3|18|30235.14|0.00|0.03|A|F|1993-01-20|1993-01-28|1993-02-02|COLLECT COD|MAIL|y final packages nod 1217|59148|4159|1|45|49821.30|0.07|0.02|A|F|1992-07-01|1992-06-23|1992-07-06|COLLECT COD|AIR|riously close ideas 1218|139055|1569|1|16|17504.80|0.04|0.07|A|F|1994-06-26|1994-08-07|1994-06-30|TAKE BACK RETURN|FOB|ven realms be 1218|93393|5903|2|41|56841.99|0.06|0.06|R|F|1994-08-04|1994-08-05|1994-08-11|TAKE BACK RETURN|SHIP|dolphins. theodolites beyond th 1218|47852|5365|3|44|79193.40|0.07|0.06|A|F|1994-10-05|1994-09-03|1994-10-30|COLLECT COD|TRUCK|thely ironic accounts wake slyly 1218|41364|3869|4|1|1305.36|0.01|0.08|R|F|1994-09-15|1994-09-07|1994-10-03|COLLECT COD|TRUCK|press furio 1219|131882|6909|1|6|11483.28|0.08|0.04|N|O|1995-11-13|1995-12-24|1995-11-18|NONE|MAIL|pecial, ironic requ 1219|128599|1112|2|4|6510.36|0.01|0.04|N|O|1995-11-24|1995-11-22|1995-12-07|TAKE BACK RETURN|SHIP|lly quick requests. blithely even h 1220|168916|6465|1|25|49622.75|0.10|0.03|N|O|1996-10-15|1996-11-07|1996-11-06|COLLECT COD|REG AIR| regular orbi 1220|159710|4741|2|36|63709.56|0.01|0.02|N|O|1996-12-10|1996-11-14|1997-01-07|COLLECT COD|SHIP|ar packages. blithely final acc 1220|36807|6808|3|3|5231.40|0.08|0.06|N|O|1996-09-06|1996-11-03|1996-09-10|COLLECT COD|REG AIR| final theodolites. blithely silent 1220|5144|145|4|36|37769.04|0.07|0.03|N|O|1996-12-12|1996-10-03|1996-12-15|TAKE BACK RETURN|TRUCK|unusual, silent pinto beans aga 1220|48992|4001|5|25|48524.75|0.03|0.08|N|O|1996-09-11|1996-10-09|1996-09-25|DELIVER IN PERSON|RAIL|packages affi 1221|80049|2558|1|43|44248.72|0.05|0.05|R|F|1992-06-22|1992-07-15|1992-07-20|DELIVER IN PERSON|FOB|y slyly above the slyly unusual ideas 1221|169470|9471|2|12|18473.64|0.00|0.08|R|F|1992-08-07|1992-06-24|1992-08-13|COLLECT COD|AIR|yly ironic 1221|68229|3242|3|3|3591.66|0.10|0.08|R|F|1992-07-01|1992-06-04|1992-07-27|COLLECT COD|TRUCK|ing to the fluffily 1221|119731|7265|4|41|71779.93|0.06|0.02|A|F|1992-04-28|1992-07-02|1992-05-19|NONE|RAIL|ns. bold deposit 1221|107651|162|5|13|21562.45|0.10|0.00|R|F|1992-08-01|1992-06-29|1992-08-27|TAKE BACK RETURN|AIR|ajole furiously. blithely expres 1221|84249|4250|6|7|8632.68|0.08|0.06|A|F|1992-06-27|1992-06-16|1992-07-23|TAKE BACK RETURN|RAIL|xpress accounts 1222|71266|8788|1|12|14847.12|0.09|0.02|A|F|1993-02-12|1993-03-14|1993-03-12|TAKE BACK RETURN|RAIL|s print permanently unusual packages. 1222|158637|1153|2|12|20347.56|0.08|0.01|A|F|1993-05-05|1993-03-27|1993-05-18|TAKE BACK RETURN|REG AIR| furiously bold instructions 1222|7554|55|3|26|38000.30|0.02|0.08|R|F|1993-02-13|1993-03-20|1993-02-22|TAKE BACK RETURN|MAIL|, even accounts are ironic 1223|99460|9461|1|28|40864.88|0.10|0.06|N|O|1996-08-07|1996-07-24|1996-08-13|TAKE BACK RETURN|MAIL| quickly ironic requests. furious 1248|163061|3062|1|45|50582.70|0.00|0.08|A|F|1992-04-17|1992-03-31|1992-05-13|NONE|RAIL|ter the pending pl 1248|150275|2791|2|37|49034.99|0.06|0.06|R|F|1992-01-26|1992-02-05|1992-02-13|COLLECT COD|TRUCK|. final requests integrate quickly. blit 1248|55368|2884|3|26|34407.36|0.09|0.06|A|F|1992-01-16|1992-03-01|1992-02-06|TAKE BACK RETURN|AIR| ironic dependen 1248|155169|5170|4|49|59983.84|0.02|0.01|A|F|1992-04-24|1992-02-18|1992-05-03|TAKE BACK RETURN|AIR|beans run quickly according to the carefu 1248|121465|3978|5|20|29729.20|0.08|0.00|A|F|1992-03-12|1992-03-23|1992-04-07|TAKE BACK RETURN|AIR|nal foxes cajole carefully slyl 1248|61457|6470|6|30|42553.50|0.10|0.01|R|F|1992-02-01|1992-03-24|1992-02-08|TAKE BACK RETURN|MAIL|fily special foxes kindle am 1249|58157|3168|1|49|54642.35|0.07|0.05|A|F|1994-03-03|1994-02-28|1994-03-08|NONE|RAIL|ffily express theodo 1250|1035|1036|1|15|14040.45|0.10|0.06|A|F|1992-11-05|1992-12-17|1992-12-03|TAKE BACK RETURN|SHIP| regular, i 1251|3325|3326|1|37|45447.84|0.08|0.08|N|O|1997-12-21|1998-01-12|1997-12-26|COLLECT COD|AIR|. furiously 1251|77902|7903|2|36|67676.40|0.07|0.04|N|O|1997-11-29|1998-01-07|1997-12-03|TAKE BACK RETURN|RAIL|y ironic Tiresias are slyly furio 1251|98778|6306|3|37|65740.49|0.09|0.02|N|O|1998-01-11|1997-12-01|1998-01-23|DELIVER IN PERSON|RAIL|finally bold requests 1251|149818|7361|4|7|13074.67|0.07|0.00|N|O|1998-01-08|1997-12-27|1998-01-18|COLLECT COD|MAIL|riously pe 1251|187484|2521|5|1|1571.48|0.02|0.03|N|O|1997-12-08|1998-01-06|1998-01-01|DELIVER IN PERSON|REG AIR| use quickly final packages. iron 1252|86435|8944|1|13|18478.59|0.10|0.01|N|O|1997-09-07|1997-09-12|1997-10-01|COLLECT COD|REG AIR|sts dazzle 1252|110885|5908|2|27|51188.76|0.00|0.08|N|O|1997-10-22|1997-10-10|1997-11-10|TAKE BACK RETURN|REG AIR|packages hag 1252|39144|9145|3|19|20579.66|0.07|0.02|N|O|1997-10-13|1997-10-23|1997-10-18|NONE|AIR|ts wake carefully-- packages sleep. quick 1252|91193|3703|4|11|13026.09|0.10|0.01|N|O|1997-10-16|1997-09-22|1997-10-28|COLLECT COD|AIR|s are. slyly final requests among the 1252|78748|8749|5|26|44895.24|0.05|0.05|N|O|1997-08-05|1997-10-24|1997-08-07|DELIVER IN PERSON|SHIP|onic pinto beans haggle furiously 1253|179988|7540|1|14|28951.72|0.00|0.06|R|F|1993-04-03|1993-04-16|1993-04-27|TAKE BACK RETURN|MAIL|lar foxes sleep furiously final, final pack 1253|53958|8969|2|13|24855.35|0.01|0.06|A|F|1993-03-05|1993-04-26|1993-03-08|DELIVER IN PERSON|FOB|al packages 1253|69375|9376|3|22|29576.14|0.05|0.06|A|F|1993-02-23|1993-04-06|1993-03-07|TAKE BACK RETURN|SHIP|telets cajole alongside of the final reques 1253|175589|624|4|23|38285.34|0.09|0.02|R|F|1993-04-18|1993-04-18|1993-05-07|COLLECT COD|FOB| the slyly silent re 1253|113918|6430|5|19|36706.29|0.05|0.05|A|F|1993-04-01|1993-04-22|1993-04-14|TAKE BACK RETURN|AIR|al pinto bea 1254|192385|4905|1|6|8864.28|0.08|0.01|N|O|1996-02-02|1996-03-21|1996-02-29|NONE|REG AIR|lithely even deposits eat! 1254|199414|4453|2|47|71130.27|0.05|0.06|N|O|1996-03-07|1996-02-20|1996-04-05|COLLECT COD|MAIL| platelets cajol 1254|134485|4486|3|35|53181.80|0.05|0.06|N|O|1996-04-08|1996-02-29|1996-04-18|DELIVER IN PERSON|FOB|ckages boost. furious warhorses cajole 1255|191801|4321|1|12|22713.60|0.00|0.02|A|F|1994-08-17|1994-06-29|1994-09-04|TAKE BACK RETURN|REG AIR| regular, express accounts are 1255|193712|1270|2|46|83062.66|0.07|0.05|R|F|1994-07-06|1994-07-14|1994-08-05|NONE|MAIL|ons nag qui 1280|128696|3721|1|17|29319.73|0.01|0.01|A|F|1993-02-04|1993-04-10|1993-02-07|NONE|FOB|ructions integrate across the th 1280|188305|5860|2|6|8359.80|0.05|0.06|R|F|1993-03-30|1993-02-16|1993-04-18|DELIVER IN PERSON|AIR|gular deposits 1280|32176|7183|3|13|14406.21|0.03|0.02|R|F|1993-03-06|1993-03-11|1993-03-18|DELIVER IN PERSON|TRUCK|blithely final accounts use evenly 1280|174752|2304|4|5|9133.75|0.06|0.03|R|F|1993-02-03|1993-02-11|1993-02-23|DELIVER IN PERSON|AIR|beans haggle. quickly bold instructions h 1280|51971|4477|5|24|46151.28|0.07|0.02|R|F|1993-03-20|1993-03-01|1993-04-09|COLLECT COD|RAIL|y pending orbits boost after the slyly 1280|65229|242|6|9|10747.98|0.00|0.05|R|F|1993-04-18|1993-03-28|1993-05-04|DELIVER IN PERSON|FOB|usual accou 1280|91590|9118|7|19|30050.21|0.02|0.06|A|F|1993-02-07|1993-02-28|1993-02-12|NONE|TRUCK|lyly along the furiously regular 1281|137813|327|1|33|61076.73|0.07|0.08|R|F|1995-02-01|1995-01-18|1995-03-03|NONE|REG AIR|dencies. thinly final pinto beans wake 1281|6372|1373|2|37|47299.69|0.08|0.03|A|F|1995-03-19|1995-02-02|1995-03-27|NONE|AIR|ounts detect 1281|93625|8644|3|2|3237.24|0.05|0.06|A|F|1994-12-27|1995-01-26|1995-01-21|TAKE BACK RETURN|FOB|ly unusual requests. final reques 1281|153005|5521|4|38|40204.00|0.04|0.06|R|F|1995-03-28|1995-01-11|1995-04-14|TAKE BACK RETURN|MAIL| ideas-- blithely regular 1281|151357|3873|5|13|18308.55|0.03|0.07|A|F|1995-02-06|1995-02-13|1995-02-18|DELIVER IN PERSON|TRUCK|fully final platelets wa 1281|49270|6783|6|4|4877.08|0.07|0.04|R|F|1995-03-15|1995-02-21|1995-03-20|NONE|SHIP|ggle against the even requests. requests 1281|77886|5408|7|43|80146.84|0.10|0.02|R|F|1995-01-28|1995-02-08|1995-02-10|DELIVER IN PERSON|AIR|final accounts. final packages slee 1282|22493|2494|1|14|19816.86|0.04|0.02|R|F|1992-06-29|1992-04-05|1992-07-21|TAKE BACK RETURN|REG AIR|ecial deposit 1282|29129|4134|2|10|10581.20|0.09|0.06|R|F|1992-04-10|1992-04-16|1992-05-01|DELIVER IN PERSON|SHIP|r theodolite 1282|159521|9522|3|19|30029.88|0.01|0.03|R|F|1992-05-07|1992-04-07|1992-05-13|NONE|RAIL|ts x-ray across the furi 1282|58316|8317|4|19|24211.89|0.00|0.05|A|F|1992-06-20|1992-04-17|1992-07-05|DELIVER IN PERSON|REG AIR|nto beans. carefully close theodo 1283|92682|210|1|47|78709.96|0.05|0.03|N|O|1996-10-21|1996-10-29|1996-11-12|DELIVER IN PERSON|TRUCK|even instructions boost slyly blithely 1283|105334|355|2|1|1339.33|0.00|0.08|N|O|1996-10-07|1996-10-12|1996-10-08|NONE|RAIL|d the sauternes. slyly ev 1283|137074|4614|3|18|19999.26|0.02|0.01|N|O|1996-10-14|1996-11-07|1996-10-22|DELIVER IN PERSON|AIR|equests use along the fluff 1283|191999|4519|4|40|83639.60|0.07|0.03|N|O|1996-11-09|1996-11-23|1996-11-28|NONE|MAIL|riously. even, ironic instructions after 1283|123473|5986|5|43|64348.21|0.01|0.04|N|O|1996-09-29|1996-11-19|1996-10-26|TAKE BACK RETURN|RAIL|requests sleep slyly about the 1283|7533|5034|6|30|43215.90|0.06|0.07|N|O|1996-11-22|1996-11-22|1996-12-15|COLLECT COD|TRUCK|t the fluffily 1283|196227|6228|7|21|27787.62|0.04|0.03|N|O|1996-09-12|1996-10-02|1996-10-12|NONE|REG AIR|fully regular 1284|177427|2462|1|49|73716.58|0.00|0.06|N|O|1996-04-11|1996-03-04|1996-04-16|NONE|MAIL|lar packages. special packages ac 1284|5304|5305|2|4|4837.20|0.07|0.06|N|O|1996-02-29|1996-02-11|1996-03-01|TAKE BACK RETURN|TRUCK| regular asymptotes. 1284|132592|2593|3|39|63359.01|0.08|0.00|N|O|1996-01-11|1996-02-07|1996-02-05|COLLECT COD|MAIL|even accoun 1284|58252|8253|4|1|1210.25|0.01|0.07|N|O|1996-04-28|1996-04-02|1996-05-08|DELIVER IN PERSON|SHIP|al packages use carefully express de 1284|33328|3329|5|9|11351.88|0.05|0.06|N|O|1996-03-03|1996-03-19|1996-04-01|DELIVER IN PERSON|REG AIR|after the pending 1285|21548|1549|1|12|17634.48|0.00|0.06|A|F|1992-06-21|1992-08-16|1992-07-12|COLLECT COD|MAIL|ss foxes. blithe theodolites cajole slyly 1285|142274|4789|2|45|59232.15|0.01|0.02|R|F|1992-09-05|1992-08-08|1992-10-02|COLLECT COD|REG AIR| special requests haggle blithely. 1285|188148|5703|3|4|4944.56|0.09|0.06|A|F|1992-07-20|1992-08-17|1992-07-26|DELIVER IN PERSON|FOB|l packages sleep slyly quiet i 1285|187439|9958|4|39|59530.77|0.05|0.01|A|F|1992-09-15|1992-08-05|1992-10-05|DELIVER IN PERSON|TRUCK|uctions. car 1285|83541|8558|5|33|50309.82|0.00|0.08|R|F|1992-09-08|1992-08-25|1992-09-16|NONE|SHIP|ites affix 1286|177013|7014|1|49|53410.49|0.08|0.01|R|F|1993-06-24|1993-08-12|1993-06-26|DELIVER IN PERSON|SHIP|gged accoun 1286|48230|735|2|48|56555.04|0.01|0.04|A|F|1993-07-11|1993-07-11|1993-08-01|COLLECT COD|TRUCK|unts alongs 1286|188576|3613|3|11|18310.27|0.03|0.04|R|F|1993-08-08|1993-07-30|1993-09-05|DELIVER IN PERSON|FOB| slyly even packages. requ 1286|183767|3768|4|37|68478.12|0.00|0.02|R|F|1993-05-27|1993-07-11|1993-06-01|COLLECT COD|SHIP|lyly ironic pinto beans cajole furiously s 1286|164048|1597|5|14|15568.56|0.00|0.01|R|F|1993-05-23|1993-08-09|1993-06-01|NONE|REG AIR|blithely bo 1286|145926|3469|6|41|80848.72|0.04|0.05|R|F|1993-08-02|1993-08-06|1993-08-07|TAKE BACK RETURN|FOB| the furiously expre 1287|173172|8207|1|35|43580.95|0.09|0.06|A|F|1994-09-07|1994-09-12|1994-09-30|TAKE BACK RETURN|FOB|s wake unusual grou 1287|94556|9575|2|10|15505.50|0.08|0.03|R|F|1994-07-08|1994-08-28|1994-07-10|TAKE BACK RETURN|RAIL|thely alongside of the unusual, ironic pa 1287|278|279|3|30|35348.10|0.00|0.07|R|F|1994-07-12|1994-09-23|1994-08-07|NONE|RAIL|ar packages. even, even 1287|61652|9171|4|10|16136.50|0.01|0.05|A|F|1994-09-03|1994-08-12|1994-09-16|TAKE BACK RETURN|REG AIR|ding, regular accounts 1287|178903|3938|5|21|41619.90|0.06|0.02|A|F|1994-10-06|1994-09-25|1994-10-16|TAKE BACK RETURN|TRUCK|y quickly bold theodoli 1287|20481|5486|6|26|36438.48|0.03|0.08|R|F|1994-10-03|1994-09-27|1994-10-30|DELIVER IN PERSON|RAIL|egular foxes. theodolites nag along t 1312|80689|5706|1|9|15027.12|0.04|0.08|R|F|1994-07-19|1994-06-29|1994-07-24|TAKE BACK RETURN|MAIL|. furiously 1312|135990|5991|2|28|56727.72|0.06|0.06|A|F|1994-09-09|1994-08-01|1994-10-02|TAKE BACK RETURN|FOB|uriously final frays should use quick 1312|172935|487|3|18|36142.74|0.03|0.07|A|F|1994-09-13|1994-07-08|1994-09-22|TAKE BACK RETURN|MAIL|. slyly ironic 1313|51361|8877|1|48|62993.28|0.01|0.03|A|F|1994-12-20|1994-10-29|1995-01-07|COLLECT COD|MAIL|s are quick 1314|197722|242|1|5|9098.60|0.03|0.01|A|F|1994-05-26|1994-08-06|1994-05-31|TAKE BACK RETURN|AIR|equests nag across the furious 1314|109303|4324|2|39|51179.70|0.01|0.03|R|F|1994-08-09|1994-06-14|1994-08-31|TAKE BACK RETURN|TRUCK| unusual accounts slee 1314|40911|912|3|11|20371.01|0.01|0.04|A|F|1994-05-16|1994-07-30|1994-05-31|COLLECT COD|REG AIR|tegrate furious 1315|95509|8019|1|27|40621.50|0.01|0.03|N|O|1998-07-04|1998-06-13|1998-07-28|NONE|SHIP|latelets. fluffily ironic account 1315|15742|3246|2|15|24866.10|0.05|0.01|N|O|1998-07-12|1998-06-10|1998-08-07|COLLECT COD|AIR|. foxes integrate carefully special 1315|167190|4739|3|25|31429.75|0.01|0.08|N|O|1998-06-26|1998-06-10|1998-07-06|TAKE BACK RETURN|FOB|lites. unusual foxes affi 1315|160919|8468|4|19|37618.29|0.02|0.05|N|O|1998-07-05|1998-05-23|1998-08-04|TAKE BACK RETURN|SHIP|nal, regular warhorses about the fu 1315|158532|1048|5|32|50896.96|0.10|0.05|N|O|1998-03-30|1998-06-12|1998-04-25|NONE|SHIP|neath the final p 1316|126429|1454|1|46|66949.32|0.05|0.04|A|F|1994-01-13|1994-01-24|1994-02-03|COLLECT COD|TRUCK|ges haggle of the 1316|78158|666|2|15|17042.25|0.02|0.01|R|F|1994-03-12|1994-03-02|1994-03-14|COLLECT COD|FOB|se. furiously final depo 1316|197244|7245|3|33|44260.92|0.10|0.06|R|F|1994-03-31|1994-01-23|1994-04-20|TAKE BACK RETURN|AIR|manently; blithely special deposits 1316|65047|60|4|15|15180.60|0.00|0.06|R|F|1993-12-17|1994-02-04|1993-12-20|NONE|RAIL|fully express dugouts. furiously silent ide 1316|40746|747|5|40|67469.60|0.01|0.03|R|F|1994-02-04|1994-02-09|1994-02-27|NONE|REG AIR|l dugouts. co 1316|3390|5891|6|7|9053.73|0.05|0.04|A|F|1993-12-09|1994-01-12|1993-12-30|TAKE BACK RETURN|MAIL|. furiously even accounts a 1316|162096|9645|7|8|9264.72|0.10|0.04|A|F|1994-03-26|1994-02-08|1994-04-19|NONE|SHIP|packages against the express requests wa 1317|133799|3800|1|34|62314.86|0.08|0.04|N|O|1995-08-13|1995-08-08|1995-09-10|COLLECT COD|RAIL|deposits boost thinly blithely final id 1317|159586|7132|2|7|11519.06|0.05|0.01|A|F|1995-06-08|1995-08-03|1995-06-16|TAKE BACK RETURN|SHIP| pinto beans according to the final, pend 1317|157179|7180|3|26|32140.42|0.01|0.02|N|O|1995-07-13|1995-06-26|1995-08-06|COLLECT COD|RAIL|leep along th 1317|105156|2687|4|35|40640.25|0.05|0.02|N|O|1995-07-16|1995-07-07|1995-07-22|TAKE BACK RETURN|FOB|r packages impress blithely car 1317|149415|6958|5|36|52718.76|0.02|0.00|N|O|1995-09-03|1995-07-06|1995-09-04|DELIVER IN PERSON|AIR| deposits. quic 1318|113682|1216|1|24|40696.32|0.08|0.06|N|O|1998-09-27|1998-09-15|1998-10-12|TAKE BACK RETURN|AIR|ual, unusual packages. fluffy, iro 1318|45822|8327|2|26|45963.32|0.01|0.03|N|O|1998-09-26|1998-08-09|1998-10-07|DELIVER IN PERSON|FOB|ly. regular, u 1318|128344|857|3|31|42542.54|0.01|0.04|N|O|1998-08-25|1998-07-31|1998-08-31|COLLECT COD|AIR|ve the carefully expr 1319|60918|5931|1|21|39457.11|0.03|0.04|N|O|1996-10-05|1996-12-02|1996-10-28|COLLECT COD|FOB|s: carefully express 1319|36685|1692|2|12|19460.16|0.09|0.05|N|O|1996-11-05|1996-12-12|1996-11-29|DELIVER IN PERSON|TRUCK|packages integrate furiously. expres 1344|140001|5030|1|15|15615.00|0.10|0.07|A|F|1992-06-22|1992-06-24|1992-06-23|TAKE BACK RETURN|MAIL|rding to the blithely ironic theodolite 1344|189888|4925|2|29|57358.52|0.09|0.00|A|F|1992-07-17|1992-06-07|1992-07-21|NONE|REG AIR|ffily quiet foxes wake blithely. slyly 1345|197110|7111|1|49|59148.39|0.08|0.00|A|F|1992-12-27|1993-01-23|1993-01-06|NONE|FOB|sly. furiously final accounts are blithely 1345|11261|6264|2|37|43373.62|0.10|0.07|A|F|1992-11-27|1992-12-11|1992-12-07|COLLECT COD|FOB|e slyly express requests. ironic accounts c 1345|56666|6667|3|31|50302.46|0.08|0.07|R|F|1992-12-02|1992-12-29|1992-12-14|COLLECT COD|REG AIR|. slyly silent accounts sublat 1346|159096|1612|1|29|33497.61|0.07|0.05|A|F|1992-08-18|1992-09-15|1992-09-17|TAKE BACK RETURN|REG AIR|the pinto 1346|124499|4500|2|48|73127.52|0.06|0.03|A|F|1992-09-28|1992-07-22|1992-10-13|TAKE BACK RETURN|REG AIR| along the carefully spec 1346|53819|3820|3|13|23046.53|0.10|0.04|A|F|1992-07-22|1992-08-10|1992-08-06|NONE|SHIP|arefully brave deposits into the slyly iro 1346|123555|3556|4|6|9471.30|0.02|0.02|R|F|1992-09-13|1992-07-21|1992-09-27|TAKE BACK RETURN|AIR|inst the furiously final theodolites. caref 1346|186043|6044|5|30|33871.20|0.01|0.07|R|F|1992-10-01|1992-07-22|1992-10-24|NONE|SHIP| nag blithely. unusual, ru 1346|15002|2506|6|45|41265.00|0.02|0.04|A|F|1992-09-11|1992-08-06|1992-09-12|COLLECT COD|FOB|press deposits. 1347|80130|7655|1|45|49955.85|0.02|0.05|N|O|1997-08-24|1997-09-03|1997-09-08|COLLECT COD|AIR|ages wake around t 1347|142381|7410|2|34|48394.92|0.07|0.04|N|O|1997-06-25|1997-09-08|1997-07-24|COLLECT COD|FOB|r packages. f 1347|184871|4872|3|23|44985.01|0.03|0.04|N|O|1997-07-31|1997-08-25|1997-08-21|COLLECT COD|SHIP|ronic pinto beans. express reques 1347|112077|4589|4|28|30493.96|0.01|0.00|N|O|1997-07-30|1997-07-22|1997-08-18|TAKE BACK RETURN|FOB|foxes after the blithely special i 1347|64173|4174|5|9|10234.53|0.01|0.03|N|O|1997-08-28|1997-09-16|1997-09-26|DELIVER IN PERSON|AIR| detect blithely above the fina 1347|152357|7388|6|21|29596.35|0.06|0.04|N|O|1997-10-10|1997-08-16|1997-11-02|NONE|FOB|g pinto beans affix car 1347|50283|7799|7|10|12332.80|0.02|0.07|N|O|1997-07-04|1997-07-23|1997-07-05|DELIVER IN PERSON|SHIP|y ironic pin 1348|94296|6806|1|13|16773.77|0.01|0.01|N|O|1998-04-28|1998-06-05|1998-05-12|TAKE BACK RETURN|SHIP| blithely r 1348|21908|9415|2|41|75025.90|0.07|0.03|N|O|1998-05-02|1998-05-26|1998-05-09|COLLECT COD|RAIL|kages. platelets about the ca 1348|198221|8222|3|40|52768.80|0.07|0.05|N|O|1998-08-14|1998-07-10|1998-08-27|COLLECT COD|AIR|fter the regu 1348|97544|2563|4|2|3083.08|0.01|0.04|N|O|1998-05-30|1998-06-20|1998-06-05|COLLECT COD|MAIL|lly final packages use fluffily express ac 1349|180575|3094|1|1|1655.57|0.06|0.03|N|O|1998-01-07|1998-01-14|1998-02-03|COLLECT COD|REG AIR| express inst 1349|117686|198|2|45|76665.60|0.03|0.02|N|O|1997-12-24|1998-01-17|1997-12-28|NONE|AIR| ironic, unusual deposits wake carefu 1350|53683|8694|1|21|34370.28|0.04|0.04|A|F|1993-12-17|1993-10-17|1993-12-25|COLLECT COD|REG AIR|lyly above the evenly 1350|43604|3605|2|32|49523.20|0.03|0.00|R|F|1993-11-18|1993-09-30|1993-12-16|COLLECT COD|MAIL|ic, final 1351|107227|7228|1|25|30855.50|0.06|0.04|N|O|1998-06-02|1998-05-25|1998-06-22|COLLECT COD|SHIP|iously regul 1376|168528|1045|1|22|35123.44|0.01|0.03|N|O|1997-08-05|1997-07-08|1997-09-03|NONE|REG AIR|inst the final, pending 1377|153593|1139|1|5|8232.95|0.06|0.05|N|O|1998-05-06|1998-07-08|1998-06-01|TAKE BACK RETURN|FOB| final, final grouches. accoun 1377|32599|5103|2|3|4594.77|0.10|0.04|N|O|1998-04-30|1998-07-02|1998-05-14|DELIVER IN PERSON|REG AIR|yly enticing requ 1377|83378|8395|3|26|35395.62|0.07|0.07|N|O|1998-05-28|1998-06-11|1998-06-25|COLLECT COD|SHIP|egular deposits. quickly regular acco 1377|120024|7561|4|39|40716.78|0.00|0.03|N|O|1998-07-27|1998-07-18|1998-08-13|DELIVER IN PERSON|SHIP|e ironic, regular requests. carefully 1377|32294|4798|5|19|23299.51|0.10|0.00|N|O|1998-06-20|1998-06-27|1998-07-20|NONE|AIR|ught to are bold foxes 1377|153755|1301|6|17|30748.75|0.03|0.04|N|O|1998-06-19|1998-07-20|1998-07-14|NONE|REG AIR|s must have to mold b 1378|196057|1096|1|34|39203.70|0.09|0.07|N|O|1996-07-08|1996-04-23|1996-07-09|COLLECT COD|RAIL|le furiously slyly final accounts. careful 1378|123358|5871|2|18|24864.30|0.05|0.02|N|O|1996-06-19|1996-05-16|1996-06-21|DELIVER IN PERSON|RAIL| theodolites. i 1378|72873|2874|3|11|20304.57|0.10|0.03|N|O|1996-06-07|1996-05-09|1996-07-05|COLLECT COD|TRUCK| blithely express hoc 1378|170689|690|4|12|21116.16|0.02|0.06|N|O|1996-06-16|1996-05-23|1996-07-09|COLLECT COD|SHIP|notornis. b 1378|155233|5234|5|9|11594.07|0.06|0.05|N|O|1996-04-20|1996-04-13|1996-05-09|COLLECT COD|REG AIR|e carefully. carefully iron 1378|193453|5973|6|29|44847.05|0.05|0.05|N|O|1996-04-15|1996-04-23|1996-05-14|NONE|REG AIR|ual packages are furiously blith 1379|72894|5402|1|13|24269.57|0.04|0.01|N|O|1998-06-08|1998-07-13|1998-06-16|NONE|AIR|ully across the furiously iron 1379|117107|9619|2|50|56205.00|0.07|0.08|N|O|1998-08-31|1998-07-13|1998-09-02|TAKE BACK RETURN|FOB|olphins. ca 1379|12350|4852|3|24|30296.40|0.05|0.02|N|O|1998-07-06|1998-07-09|1998-07-29|DELIVER IN PERSON|MAIL|ages cajole carefully idly express re 1380|148197|3226|1|6|7471.14|0.00|0.04|N|O|1996-08-06|1996-10-01|1996-08-14|NONE|RAIL|e foxes. slyly specia 1380|140792|5821|2|40|73311.60|0.02|0.02|N|O|1996-10-01|1996-08-14|1996-10-20|COLLECT COD|RAIL|ly final frets. ironic, 1380|77746|7747|3|15|25856.10|0.05|0.02|N|O|1996-07-14|1996-08-12|1996-08-03|NONE|FOB|riously ironic foxes aff 1380|60776|3283|4|33|57313.41|0.04|0.07|N|O|1996-08-23|1996-10-01|1996-09-18|TAKE BACK RETURN|SHIP|e ironic, even excuses haggle 1381|143074|5589|1|47|52502.29|0.08|0.04|N|O|1998-09-22|1998-08-12|1998-10-12|DELIVER IN PERSON|AIR|ly ironic deposits 1381|33128|5632|2|12|12733.44|0.07|0.08|N|O|1998-08-13|1998-08-12|1998-08-28|TAKE BACK RETURN|AIR| furiously regular package 1382|161373|1374|1|18|25818.66|0.08|0.03|R|F|1993-08-30|1993-10-19|1993-09-03|DELIVER IN PERSON|AIR|hely regular deposits. fluffy s 1382|180829|830|2|29|55384.78|0.08|0.04|A|F|1993-10-08|1993-11-11|1993-10-10|COLLECT COD|FOB| haggle: closely even asymptot 1382|177866|2901|3|43|83585.98|0.10|0.04|A|F|1993-09-02|1993-10-06|1993-09-15|DELIVER IN PERSON|AIR|ress deposits. slyly ironic foxes are blit 1382|180589|8144|4|11|18365.38|0.04|0.04|R|F|1993-09-17|1993-09-29|1993-09-21|NONE|SHIP|furiously unusual packages play quickly 1382|156162|6163|5|31|37762.96|0.07|0.03|R|F|1993-10-26|1993-10-15|1993-11-09|TAKE BACK RETURN|FOB|hely regular dependencies. f 1382|9855|4856|6|38|67064.30|0.07|0.07|R|F|1993-11-17|1993-09-28|1993-11-20|COLLECT COD|SHIP|ake pending pinto beans. s 1382|22788|2789|7|5|8553.90|0.07|0.01|R|F|1993-10-02|1993-09-29|1993-10-12|DELIVER IN PERSON|REG AIR|ter the carefully final excuses. blit 1383|192894|452|1|14|27816.46|0.07|0.06|A|F|1993-08-25|1993-07-09|1993-09-12|DELIVER IN PERSON|RAIL|ole carefully silent requests. car 1383|160122|2639|2|19|22460.28|0.06|0.04|R|F|1993-05-24|1993-07-07|1993-06-14|NONE|AIR|lyly unusual accounts sle 1408|147902|5445|1|29|56547.10|0.03|0.04|N|O|1998-03-12|1998-02-14|1998-03-17|COLLECT COD|MAIL|en accounts grow furiousl 1408|172326|7361|2|7|9788.24|0.05|0.06|N|O|1998-01-14|1998-03-21|1998-01-29|COLLECT COD|AIR|fully final instructions. theodolites ca 1408|75549|8057|3|11|16769.94|0.00|0.03|N|O|1998-04-04|1998-01-29|1998-04-18|NONE|REG AIR|y even accounts thrash care 1408|147092|9607|4|20|22781.80|0.06|0.00|N|O|1998-04-21|1998-01-25|1998-05-12|DELIVER IN PERSON|TRUCK| blithely fluffi 1408|169688|9689|5|41|72064.88|0.02|0.06|N|O|1998-02-25|1998-02-03|1998-03-13|COLLECT COD|REG AIR|ep along the fina 1408|133484|1024|6|42|63734.16|0.05|0.08|N|O|1998-01-30|1998-02-07|1998-02-18|TAKE BACK RETURN|REG AIR|even packages. even accounts cajole 1408|54519|4520|7|26|38311.26|0.00|0.00|N|O|1998-03-19|1998-03-14|1998-04-01|COLLECT COD|RAIL|ic foxes ca 1409|98868|1378|1|23|42937.78|0.01|0.03|A|F|1993-04-18|1993-02-25|1993-05-06|DELIVER IN PERSON|FOB|ions. slyly ironic packages wake quick 1409|64412|9425|2|36|49550.76|0.09|0.02|A|F|1993-01-27|1993-01-31|1993-02-07|COLLECT COD|FOB|ncies sleep carefully r 1409|159012|9013|3|17|18207.17|0.07|0.00|R|F|1993-04-15|1993-03-01|1993-04-29|NONE|REG AIR|pending accounts poach. care 1410|120271|5296|1|15|19369.05|0.06|0.05|N|O|1997-05-25|1997-07-08|1997-06-15|NONE|SHIP| bold packages are fluf 1410|178206|724|2|18|23115.60|0.03|0.00|N|O|1997-06-03|1997-05-17|1997-06-07|TAKE BACK RETURN|RAIL|gle furiously fluffily regular requests 1410|108109|3130|3|37|41332.70|0.02|0.01|N|O|1997-04-17|1997-06-18|1997-04-19|COLLECT COD|TRUCK|to beans b 1410|187510|5065|4|22|35145.22|0.10|0.00|N|O|1997-07-31|1997-05-17|1997-08-19|TAKE BACK RETURN|RAIL|gular account 1410|65802|3321|5|25|44195.00|0.09|0.02|N|O|1997-05-07|1997-07-10|1997-05-16|NONE|REG AIR|unts haggle against the furiously fina 1411|16321|3825|1|9|11135.88|0.06|0.04|A|F|1995-03-08|1995-03-04|1995-03-11|DELIVER IN PERSON|AIR|accounts. furiou 1411|106434|6435|2|26|37451.18|0.02|0.02|A|F|1995-04-12|1995-01-24|1995-05-03|TAKE BACK RETURN|TRUCK|c packages. 1411|26013|1018|3|37|34743.37|0.00|0.06|A|F|1995-02-27|1995-03-02|1995-03-24|NONE|MAIL|d excuses. furiously final pear 1411|199946|4985|4|20|40918.80|0.01|0.03|R|F|1995-04-06|1995-03-16|1995-04-17|COLLECT COD|FOB|s against the 1411|82816|7833|5|46|82745.26|0.08|0.05|A|F|1995-04-03|1995-01-20|1995-04-05|DELIVER IN PERSON|REG AIR|ly daring instructions 1411|76148|1163|6|30|33724.20|0.09|0.04|A|F|1995-01-12|1995-02-01|1995-01-23|DELIVER IN PERSON|MAIL|ious foxes wake courts. caref 1412|57200|2211|1|37|42816.40|0.06|0.01|A|F|1993-04-10|1993-04-19|1993-04-12|DELIVER IN PERSON|RAIL|hely express excuses are 1412|155021|52|2|20|21520.40|0.10|0.05|A|F|1993-07-04|1993-05-18|1993-07-22|DELIVER IN PERSON|REG AIR|odolites sleep ironically 1412|22620|7625|3|2|3085.24|0.10|0.07|R|F|1993-04-01|1993-05-03|1993-04-12|DELIVER IN PERSON|REG AIR|s among the requests are a 1412|166155|6156|4|11|13432.65|0.05|0.07|R|F|1993-05-27|1993-05-30|1993-06-07|DELIVER IN PERSON|MAIL|en packages. regular packages dete 1412|157647|163|5|11|18751.04|0.08|0.06|A|F|1993-03-30|1993-05-25|1993-04-21|NONE|FOB|se slyly. special, unusual accounts nag bl 1413|177936|7937|1|18|36250.74|0.08|0.05|N|O|1997-10-11|1997-08-17|1997-10-25|NONE|FOB|yly bold packages haggle quickly acr 1413|164292|1841|2|49|66458.21|0.07|0.06|N|O|1997-08-28|1997-08-23|1997-09-12|DELIVER IN PERSON|MAIL|nstructions br 1413|41545|4050|3|6|8919.24|0.04|0.02|N|O|1997-09-07|1997-07-30|1997-09-21|TAKE BACK RETURN|MAIL|lithely excuses. f 1414|37241|4751|1|39|45951.36|0.10|0.03|N|O|1995-09-22|1995-09-30|1995-10-07|NONE|MAIL|quickly aro 1414|106423|6424|2|4|5717.68|0.02|0.05|N|O|1995-09-16|1995-11-01|1995-10-02|COLLECT COD|AIR| haggle quickly 1415|148235|8236|1|25|32080.75|0.06|0.00|A|F|1994-09-03|1994-07-12|1994-09-13|DELIVER IN PERSON|RAIL|ect never fluff 1440|192213|7252|1|3|3915.63|0.06|0.01|N|O|1995-10-30|1995-10-17|1995-11-08|COLLECT COD|SHIP|instructions boost. fluffily regul 1440|113748|1282|2|46|81040.04|0.02|0.03|N|O|1995-09-21|1995-10-19|1995-10-19|NONE|RAIL|blithely even instructions. 1441|143130|8159|1|5|5865.65|0.04|0.01|N|O|1997-05-17|1997-05-11|1997-05-30|COLLECT COD|MAIL|egular courts. fluffily even grouches 1441|176636|9154|2|5|8563.15|0.02|0.05|N|O|1997-04-25|1997-04-16|1997-05-23|COLLECT COD|FOB|he quickly enticing pac 1441|117735|2758|3|14|24538.22|0.01|0.03|N|O|1997-06-30|1997-04-29|1997-07-24|DELIVER IN PERSON|REG AIR|special requests ha 1441|159879|2395|4|37|71738.19|0.01|0.00|N|O|1997-04-26|1997-04-27|1997-04-29|NONE|REG AIR|accounts. slyly special dolphins b 1441|71377|8899|5|34|45844.58|0.09|0.00|N|O|1997-06-12|1997-05-11|1997-06-29|TAKE BACK RETURN|RAIL|e carefully. blithely ironic dep 1441|24329|9334|6|15|18799.80|0.09|0.08|N|O|1997-05-21|1997-05-06|1997-06-04|NONE|REG AIR| dependencies-- cour 1441|95795|3323|7|50|89539.50|0.03|0.01|N|O|1997-06-07|1997-05-12|1997-06-08|NONE|SHIP| requests. blithely e 1442|25080|85|1|8|8040.64|0.05|0.01|A|F|1994-10-31|1994-09-04|1994-11-25|COLLECT COD|AIR|c deposits haggle after the even 1443|33679|6183|1|47|75795.49|0.04|0.06|N|O|1997-02-05|1997-02-02|1997-03-03|NONE|RAIL|carefully ironic requests sl 1444|169552|7101|1|42|68105.10|0.01|0.02|R|F|1994-12-22|1995-03-03|1994-12-31|NONE|SHIP|ly bold packages boost regular ideas. spe 1444|56665|1676|2|34|55136.44|0.04|0.08|A|F|1995-02-22|1995-02-15|1995-03-19|TAKE BACK RETURN|AIR|y. doggedly pend 1444|154729|7245|3|34|60646.48|0.02|0.07|R|F|1994-12-17|1995-01-12|1995-01-03|COLLECT COD|AIR|ular accounts 1444|118714|3737|4|6|10396.26|0.06|0.03|A|F|1995-01-07|1995-03-05|1995-01-17|COLLECT COD|RAIL|al accounts. br 1444|19173|9174|5|35|38225.95|0.02|0.05|A|F|1995-02-25|1995-03-05|1995-03-24|DELIVER IN PERSON|SHIP|aggle furiou 1444|32266|7273|6|42|50326.92|0.00|0.02|A|F|1994-12-16|1995-02-18|1994-12-22|DELIVER IN PERSON|RAIL|ss requests. ironic ideas wake above 1444|81925|1926|7|12|22883.04|0.00|0.03|R|F|1994-12-23|1995-01-15|1995-01-13|COLLECT COD|TRUCK|ly among the bol 1445|99368|9369|1|24|32816.64|0.01|0.00|A|F|1995-02-21|1995-02-22|1995-03-18|DELIVER IN PERSON|SHIP|al accounts use furiously a 1445|66877|6878|2|48|88505.76|0.10|0.02|A|F|1995-02-28|1995-03-16|1995-03-12|COLLECT COD|MAIL|. final ideas are carefully dar 1445|191125|3645|3|7|8512.84|0.10|0.04|A|F|1995-04-25|1995-02-25|1995-05-10|NONE|SHIP|structions: slyly regular re 1445|27878|5385|4|17|30699.79|0.04|0.07|A|F|1995-04-02|1995-04-04|1995-05-01|COLLECT COD|FOB|ges. furiously regular pint 1445|134431|6945|5|24|35170.32|0.10|0.06|R|F|1995-04-23|1995-02-16|1995-05-18|NONE|REG AIR|rate after the carefully reg 1445|167046|7047|6|39|43408.56|0.03|0.02|A|F|1995-02-05|1995-02-20|1995-02-06|NONE|MAIL|ully unusual reques 1446|71867|1868|1|31|57004.66|0.10|0.02|N|O|1998-05-01|1998-05-17|1998-05-30|NONE|REG AIR|. slyly reg 1447|166076|1109|1|19|21699.33|0.06|0.04|A|F|1993-01-31|1992-12-07|1993-02-04|COLLECT COD|MAIL|. quickly ironic 1447|31049|6056|2|6|5880.24|0.01|0.05|A|F|1992-10-24|1992-12-10|1992-11-05|DELIVER IN PERSON|AIR|as! regular packages poach above the 1447|38700|6210|3|9|14748.30|0.04|0.00|R|F|1992-11-15|1993-01-07|1992-11-29|DELIVER IN PERSON|MAIL|counts wake s 1447|21725|9232|4|8|13173.76|0.09|0.08|R|F|1992-11-20|1993-01-12|1992-12-14|COLLECT COD|FOB|ost carefully 1447|129435|9436|5|23|33681.89|0.02|0.07|A|F|1992-12-07|1992-12-25|1993-01-06|TAKE BACK RETURN|AIR| dazzle quickly deposits. f 1447|199588|4627|6|41|69190.78|0.08|0.02|R|F|1993-01-06|1993-01-05|1993-01-13|TAKE BACK RETURN|MAIL|rts boost s 1472|7243|4744|1|36|41408.64|0.04|0.05|N|O|1996-11-06|1996-11-13|1996-11-12|COLLECT COD|SHIP|riously silent deposits to the pending d 1472|132029|7056|2|26|27586.52|0.03|0.05|N|O|1996-11-08|1996-11-13|1996-12-02|DELIVER IN PERSON|FOB|ic packages w 1472|567|8068|3|6|8805.36|0.08|0.01|N|O|1996-10-24|1996-11-19|1996-11-23|COLLECT COD|FOB|onic theodolites hinder slyly slyly r 1473|53482|8493|1|50|71774.00|0.04|0.03|N|O|1997-05-05|1997-05-20|1997-05-09|NONE|TRUCK|requests wake express deposits. special, ir 1473|67401|4920|2|32|43788.80|0.00|0.08|N|O|1997-04-18|1997-05-12|1997-05-10|DELIVER IN PERSON|REG AIR|out the packages lose furiously ab 1474|14918|2422|1|5|9164.55|0.05|0.04|A|F|1995-04-22|1995-02-20|1995-05-06|COLLECT COD|SHIP|ully final a 1474|122645|5158|2|30|50029.20|0.04|0.02|A|F|1995-03-23|1995-02-11|1995-04-17|DELIVER IN PERSON|TRUCK|usly. evenly express 1474|91052|6071|3|18|18774.90|0.06|0.02|A|F|1995-01-23|1995-03-28|1995-02-03|NONE|RAIL|after the special 1475|167997|5546|1|15|30974.85|0.08|0.06|N|O|1998-02-12|1997-12-17|1998-03-02|TAKE BACK RETURN|SHIP|xpress requests haggle after the final, fi 1475|117282|7283|2|18|23387.04|0.07|0.00|N|O|1998-03-08|1998-01-18|1998-03-10|TAKE BACK RETURN|AIR|al deposits use. ironic packages along the 1475|143964|6479|3|30|60238.80|0.03|0.02|N|O|1998-03-11|1997-12-30|1998-03-15|COLLECT COD|REG AIR| regular theodolites mold across th 1475|186972|6973|4|50|102948.50|0.03|0.05|N|O|1997-12-14|1997-12-13|1997-12-21|COLLECT COD|AIR|. slyly bold re 1475|31490|6497|5|33|46909.17|0.01|0.06|N|O|1998-01-02|1998-01-27|1998-01-11|NONE|FOB|quickly fluffy 1475|49379|1884|6|12|15940.44|0.04|0.04|N|O|1998-01-09|1997-12-30|1998-01-23|NONE|TRUCK|arefully-- excuses sublate 1475|111789|1790|7|23|41417.94|0.02|0.00|N|O|1998-02-13|1998-02-05|1998-03-08|NONE|TRUCK|hely regular hocke 1476|30547|3051|1|20|29550.80|0.02|0.03|N|O|1996-08-11|1996-09-18|1996-08-26|TAKE BACK RETURN|AIR|. bold deposits are carefully amo 1477|71965|6980|1|31|60045.76|0.00|0.06|N|O|1997-12-16|1997-09-30|1997-12-17|COLLECT COD|RAIL| requests. fluffily final 1477|109605|7136|2|8|12916.80|0.09|0.05|N|O|1997-10-25|1997-10-18|1997-11-16|COLLECT COD|MAIL|ironic realms wake unusual, even ac 1477|124479|4480|3|42|63145.74|0.06|0.00|N|O|1997-11-02|1997-11-02|1997-11-20|DELIVER IN PERSON|SHIP|lithely after the ir 1477|106201|6202|4|32|38630.40|0.05|0.08|N|O|1997-09-12|1997-10-26|1997-10-12|TAKE BACK RETURN|AIR|; quickly regula 1477|114850|4851|5|41|76458.85|0.04|0.06|N|O|1997-12-16|1997-10-31|1998-01-12|DELIVER IN PERSON|REG AIR|y. final pearls kindle. accounts 1477|68931|3944|6|49|93096.57|0.06|0.00|N|O|1997-11-18|1997-11-06|1997-11-27|COLLECT COD|FOB|ise according to the sly, bold p 1477|119012|1524|7|33|34023.33|0.06|0.00|N|O|1997-11-12|1997-11-06|1997-11-24|DELIVER IN PERSON|TRUCK|yly regular p 1478|33650|3651|1|21|33256.65|0.00|0.06|N|O|1997-09-20|1997-10-25|1997-10-06|TAKE BACK RETURN|MAIL| fluffily pending acc 1479|148818|1333|1|33|61604.73|0.10|0.01|N|O|1996-03-12|1996-02-28|1996-03-31|DELIVER IN PERSON|FOB| carefully special courts affix. fluff 1504|81389|3898|1|42|57555.96|0.02|0.03|R|F|1992-10-18|1992-10-14|1992-11-10|TAKE BACK RETURN|FOB|ep. carefully ironic excuses haggle quickl 1504|102385|9916|2|22|30522.36|0.04|0.03|A|F|1992-09-09|1992-10-29|1992-09-10|NONE|REG AIR| accounts sleep. furiou 1504|177148|9666|3|9|11026.26|0.07|0.02|R|F|1992-11-02|1992-10-12|1992-11-15|TAKE BACK RETURN|RAIL|y slyly regular courts. 1504|114073|9096|4|10|10870.70|0.04|0.07|A|F|1992-09-22|1992-10-22|1992-10-13|TAKE BACK RETURN|TRUCK|final theodolites. furiously e 1504|19772|7276|5|7|11842.39|0.02|0.00|R|F|1992-11-20|1992-11-23|1992-12-13|COLLECT COD|MAIL|y final packa 1505|119898|4921|1|4|7671.56|0.09|0.00|A|F|1992-12-14|1992-11-11|1993-01-02|COLLECT COD|SHIP|side of the s 1505|122702|5215|2|50|86235.00|0.00|0.02|R|F|1992-11-22|1992-09-24|1992-11-26|TAKE BACK RETURN|FOB|lyly special platelets. requests ar 1506|132024|2025|1|46|48576.92|0.04|0.05|R|F|1993-01-18|1992-11-11|1993-02-09|COLLECT COD|REG AIR|sits whithout the blithely ironic packages 1506|113893|1427|2|30|57206.70|0.07|0.02|A|F|1992-11-22|1992-10-25|1992-12-04|DELIVER IN PERSON|FOB|deposits cajole 1506|190697|3217|3|28|50055.32|0.10|0.06|A|F|1992-09-22|1992-11-19|1992-10-09|TAKE BACK RETURN|AIR| unwind carefully: theodolit 1506|27798|2803|4|37|63854.23|0.00|0.03|R|F|1992-11-04|1992-12-01|1992-11-23|TAKE BACK RETURN|TRUCK|carefully bold dolphins. accounts su 1506|194708|9747|5|15|27040.50|0.05|0.00|R|F|1992-09-24|1992-11-11|1992-10-05|NONE|REG AIR| carefully fluffy packages-- caref 1506|49549|4558|6|38|56944.52|0.05|0.02|R|F|1992-12-02|1992-12-19|1992-12-29|NONE|REG AIR|xpress, regular excuse 1506|168077|3110|7|4|4580.28|0.07|0.00|R|F|1993-01-03|1992-12-06|1993-01-05|COLLECT COD|REG AIR|posits. furiou 1507|67094|2107|1|25|26527.25|0.01|0.08|R|F|1994-01-07|1994-01-06|1994-01-11|NONE|RAIL|xes. slyly busy de 1507|39977|2481|2|33|63260.01|0.04|0.02|A|F|1993-10-29|1993-12-23|1993-11-14|DELIVER IN PERSON|REG AIR| asymptotes nag furiously above t 1507|85770|8279|3|39|68475.03|0.03|0.07|R|F|1993-11-04|1993-12-16|1993-12-03|TAKE BACK RETURN|REG AIR|ly even instructions. 1508|50350|7866|1|16|20805.60|0.02|0.06|N|O|1998-06-21|1998-05-30|1998-07-11|COLLECT COD|MAIL|riously across the ironic, unusua 1508|24829|9834|2|20|35076.40|0.06|0.01|N|O|1998-04-17|1998-06-11|1998-05-17|DELIVER IN PERSON|MAIL|nic platelets. carefully final fra 1508|92173|9701|3|43|50102.31|0.01|0.02|N|O|1998-06-01|1998-06-24|1998-06-03|TAKE BACK RETURN|TRUCK|ndencies h 1508|147102|4645|4|1|1149.10|0.02|0.02|N|O|1998-07-13|1998-06-03|1998-07-17|TAKE BACK RETURN|AIR|s the blithely bold instruction 1508|134671|9698|5|29|49464.43|0.02|0.00|N|O|1998-08-03|1998-07-08|1998-08-22|COLLECT COD|RAIL|r instructions. carefully 1508|2673|174|6|5|7878.35|0.06|0.08|N|O|1998-05-22|1998-07-06|1998-06-04|COLLECT COD|REG AIR|cording to the furiously ironic depe 1508|116953|6954|7|38|74858.10|0.03|0.06|N|O|1998-04-30|1998-06-23|1998-05-18|DELIVER IN PERSON|RAIL|tes wake furiously regular w 1509|27109|2114|1|14|14505.40|0.04|0.01|A|F|1993-10-04|1993-09-25|1993-10-21|NONE|TRUCK|nal realms 1509|10883|884|2|46|82518.48|0.08|0.02|A|F|1993-10-15|1993-10-04|1993-11-01|TAKE BACK RETURN|FOB|uriously regula 1509|106243|6244|3|17|21237.08|0.06|0.05|A|F|1993-07-25|1993-08-28|1993-08-19|DELIVER IN PERSON|AIR| furiously. blithely regular ideas haggle c 1509|19486|1988|4|11|15460.28|0.03|0.08|R|F|1993-11-04|1993-10-03|1993-11-14|TAKE BACK RETURN|FOB|ily ironic packages nod carefully. 1509|89534|7059|5|37|56370.61|0.01|0.08|A|F|1993-08-31|1993-09-10|1993-09-24|NONE|FOB|he slyly even deposits wake a 1509|186349|3904|6|31|44495.54|0.04|0.03|A|F|1993-07-14|1993-08-21|1993-08-06|COLLECT COD|SHIP|ic deposits cajole carefully. quickly bold 1509|156167|1198|7|27|33025.32|0.01|0.01|A|F|1993-09-29|1993-09-08|1993-10-04|TAKE BACK RETURN|FOB|lithely after the 1510|97465|4993|1|11|16087.06|0.09|0.04|N|O|1996-09-23|1996-12-03|1996-10-01|DELIVER IN PERSON|RAIL|e of the unusual accounts. stealthy deposit 1510|83096|621|2|24|25898.16|0.05|0.04|N|O|1996-10-07|1996-10-22|1996-11-03|DELIVER IN PERSON|REG AIR|yly brave theod 1510|189992|2511|3|36|74951.64|0.07|0.02|N|O|1996-10-02|1996-11-23|1996-10-05|NONE|SHIP|old deposits along the carefully 1510|181428|8983|4|8|12075.36|0.01|0.08|N|O|1996-10-26|1996-11-07|1996-10-30|TAKE BACK RETURN|RAIL|blithely express 1510|58453|8454|5|27|38109.15|0.08|0.06|N|O|1996-10-20|1996-12-05|1996-11-02|NONE|MAIL|he blithely regular req 1510|13698|3699|6|3|4835.07|0.05|0.02|N|O|1996-10-31|1996-12-03|1996-11-13|COLLECT COD|RAIL|along the slyly regular pin 1510|21462|6467|7|50|69173.00|0.04|0.05|N|O|1996-11-01|1996-10-17|1996-11-28|NONE|MAIL|even packages. carefully regular fo 1511|97166|4694|1|29|33731.64|0.01|0.04|N|O|1997-03-17|1997-02-11|1997-03-27|DELIVER IN PERSON|AIR|s cajole furiously against 1511|61718|6731|2|32|53750.72|0.04|0.01|N|O|1997-01-06|1997-03-21|1997-01-26|TAKE BACK RETURN|REG AIR| deposits. carefully ironi 1536|193719|3720|1|5|9063.55|0.08|0.03|N|O|1997-02-08|1997-03-11|1997-03-02|COLLECT COD|MAIL|requests sleep pe 1537|17492|9994|1|17|23961.33|0.01|0.03|A|F|1992-04-12|1992-04-19|1992-04-13|NONE|TRUCK|he regular pack 1537|178379|3414|2|50|72868.50|0.08|0.00|R|F|1992-05-30|1992-05-14|1992-06-23|TAKE BACK RETURN|MAIL|special packages haggle slyly at the silent 1537|12480|2481|3|44|61269.12|0.05|0.04|R|F|1992-04-01|1992-03-31|1992-04-21|NONE|TRUCK|lar courts. 1537|139612|2126|4|3|4954.83|0.08|0.07|R|F|1992-03-20|1992-04-14|1992-03-21|TAKE BACK RETURN|SHIP|s, final ideas detect sl 1538|101743|4254|1|32|55831.68|0.05|0.05|N|O|1995-07-08|1995-07-29|1995-08-01|TAKE BACK RETURN|RAIL|uses maintain blithely. fluffily 1538|191738|1739|2|27|49402.71|0.05|0.01|N|O|1995-09-19|1995-08-03|1995-09-24|DELIVER IN PERSON|TRUCK|ngly even packag 1538|129762|7299|3|36|64503.36|0.08|0.04|N|O|1995-07-11|1995-09-10|1995-07-26|DELIVER IN PERSON|MAIL|al deposits mo 1538|103730|1261|4|28|48544.44|0.10|0.04|N|O|1995-09-19|1995-08-27|1995-10-10|COLLECT COD|RAIL|bout the fluffily unusual 1538|177315|2350|5|13|18100.03|0.01|0.05|N|O|1995-06-26|1995-07-30|1995-07-25|NONE|SHIP|ly. packages sleep f 1538|127205|9718|6|42|51752.40|0.08|0.08|N|O|1995-10-10|1995-09-12|1995-11-08|DELIVER IN PERSON|TRUCK|equests cajole blithely 1539|195722|761|1|21|38172.12|0.08|0.02|R|F|1995-04-19|1995-05-10|1995-04-27|COLLECT COD|TRUCK|ounts haggle. busy 1539|85981|998|2|11|21636.78|0.01|0.08|A|F|1995-05-27|1995-04-13|1995-06-10|TAKE BACK RETURN|TRUCK|ly express requests. furiously 1539|67462|2475|3|7|10006.22|0.09|0.04|R|F|1995-05-14|1995-04-16|1995-05-30|DELIVER IN PERSON|AIR|. fluffily reg 1540|172385|9937|1|38|55380.44|0.03|0.01|R|F|1992-09-30|1992-10-27|1992-10-12|TAKE BACK RETURN|SHIP| final grouches bo 1540|59905|7421|2|35|65271.50|0.02|0.07|R|F|1992-10-31|1992-09-04|1992-11-05|TAKE BACK RETURN|SHIP|e blithely a 1540|7166|2167|3|25|26829.00|0.08|0.04|R|F|1992-11-15|1992-10-24|1992-12-14|DELIVER IN PERSON|SHIP|ironic deposits amo 1540|24603|2110|4|6|9165.60|0.09|0.03|R|F|1992-08-28|1992-09-17|1992-09-14|COLLECT COD|MAIL|ing to the slyly express asymptote 1540|86906|9415|5|27|51108.30|0.10|0.08|R|F|1992-12-02|1992-10-18|1992-12-31|NONE|SHIP|carefully final packages; b 1541|63380|5887|1|44|59108.72|0.10|0.05|N|O|1995-08-24|1995-07-13|1995-08-26|TAKE BACK RETURN|MAIL|o beans boost fluffily abou 1541|25684|5685|2|8|12877.44|0.10|0.08|N|F|1995-06-05|1995-08-07|1995-06-21|TAKE BACK RETURN|TRUCK|y pending packages. blithely fi 1542|57750|7751|1|37|63186.75|0.07|0.06|A|F|1993-12-15|1993-10-17|1994-01-07|TAKE BACK RETURN|REG AIR|e blithely unusual accounts. quic 1542|2488|4989|2|12|16685.76|0.09|0.06|R|F|1993-10-29|1993-11-02|1993-11-09|TAKE BACK RETURN|RAIL|carefully 1542|5566|5567|3|18|26488.08|0.05|0.05|R|F|1993-10-17|1993-11-15|1993-10-26|TAKE BACK RETURN|FOB|pending instr 1542|142665|5180|4|21|35860.86|0.01|0.05|R|F|1993-10-13|1993-12-13|1993-11-12|NONE|RAIL|y pending foxes nag blithely 1542|154955|2501|5|46|92457.70|0.00|0.00|R|F|1993-09-28|1993-11-03|1993-10-15|COLLECT COD|FOB|ial instructions. ironically 1543|70883|5898|1|34|63031.92|0.02|0.08|N|O|1997-05-25|1997-03-30|1997-06-04|NONE|AIR|ic requests are ac 1543|114492|7004|2|6|9038.94|0.09|0.01|N|O|1997-04-16|1997-05-20|1997-05-16|DELIVER IN PERSON|MAIL| among the carefully bold or 1543|66020|6021|3|42|41412.84|0.06|0.01|N|O|1997-05-26|1997-03-30|1997-06-12|DELIVER IN PERSON|FOB|its sleep until the fur 1543|188591|3628|4|42|70542.78|0.05|0.06|N|O|1997-04-11|1997-04-11|1997-04-23|TAKE BACK RETURN|MAIL|xpress instructions. regular acc 1543|39868|9869|5|9|16270.74|0.08|0.06|N|O|1997-03-14|1997-05-19|1997-03-26|DELIVER IN PERSON|FOB|ravely special requests 1543|48782|6295|6|3|5192.34|0.10|0.04|N|O|1997-03-29|1997-05-10|1997-04-22|COLLECT COD|MAIL|sleep along the furiou 1543|67763|270|7|3|5192.28|0.00|0.02|N|O|1997-03-22|1997-04-06|1997-03-30|NONE|AIR|quickly. final accounts haggle slyl 1568|89369|1878|1|36|48900.96|0.02|0.03|N|O|1997-05-31|1997-04-22|1997-06-21|TAKE BACK RETURN|RAIL|platelets-- furiously sly excu 1568|8928|1429|2|46|84498.32|0.04|0.00|N|O|1997-04-06|1997-04-08|1997-04-23|TAKE BACK RETURN|MAIL|g the blithely even acco 1569|74460|1982|1|5|7172.30|0.07|0.00|N|O|1998-04-16|1998-06-21|1998-04-18|COLLECT COD|REG AIR| packages. ironic, even excuses a 1569|38749|8750|2|16|27003.84|0.01|0.08|N|O|1998-04-26|1998-06-16|1998-05-26|COLLECT COD|MAIL|deposits. blithely final asymptotes ac 1569|48818|8819|3|43|75972.83|0.10|0.03|N|O|1998-06-05|1998-05-31|1998-06-28|DELIVER IN PERSON|FOB| instructions. 1569|69193|9194|4|30|34865.70|0.02|0.03|N|O|1998-07-19|1998-06-04|1998-08-10|NONE|SHIP|packages. excuses lose evenly carefully reg 1570|182632|5151|1|25|42865.75|0.00|0.06|N|O|1998-05-03|1998-06-02|1998-06-02|DELIVER IN PERSON|REG AIR|its. slyly regular sentiments 1570|85017|34|2|7|7014.07|0.05|0.05|N|O|1998-07-10|1998-06-01|1998-07-23|TAKE BACK RETURN|MAIL|requests boost quickly re 1571|51371|1372|1|47|62151.39|0.00|0.05|R|F|1992-12-07|1993-02-24|1993-01-01|TAKE BACK RETURN|REG AIR|ng to the fluffily unusual 1571|182725|280|2|6|10846.32|0.03|0.00|A|F|1993-01-08|1993-02-13|1993-02-07|COLLECT COD|SHIP| special, ironic depo 1571|58030|536|3|18|17784.54|0.05|0.08|A|F|1993-01-09|1993-01-12|1993-01-31|COLLECT COD|AIR| pending grouches 1571|100330|2841|4|48|63855.84|0.05|0.05|A|F|1992-12-28|1993-01-04|1993-01-04|DELIVER IN PERSON|RAIL|slyly pending p 1571|41509|6518|5|10|14505.00|0.03|0.06|R|F|1992-12-12|1993-02-13|1992-12-29|DELIVER IN PERSON|AIR|lets. carefully regular ideas wake 1571|33276|786|6|24|29022.48|0.05|0.07|A|F|1993-03-22|1993-01-31|1993-04-09|NONE|TRUCK|warthogs wake carefully acro 1572|23675|3676|1|41|65545.47|0.02|0.00|N|O|1996-05-16|1996-04-09|1996-05-28|TAKE BACK RETURN|REG AIR|. pinto beans alongside 1572|92330|9858|2|10|13223.30|0.04|0.06|N|O|1996-05-17|1996-03-26|1996-05-19|NONE|AIR| accounts affix slyly. 1573|185330|5331|1|5|7076.65|0.05|0.01|A|F|1993-04-24|1993-03-13|1993-05-17|TAKE BACK RETURN|MAIL|ymptotes could u 1573|30683|684|2|17|27432.56|0.00|0.06|R|F|1993-02-24|1993-02-16|1993-03-08|TAKE BACK RETURN|TRUCK|carefully regular deposits. 1573|82204|2205|3|16|18979.20|0.04|0.03|A|F|1993-03-15|1993-03-16|1993-03-31|COLLECT COD|AIR|ely. furiously final requests wake slyl 1573|193749|8788|4|11|20270.14|0.09|0.01|R|F|1993-03-23|1993-03-24|1993-04-12|TAKE BACK RETURN|RAIL|nently pending 1573|136810|1837|5|7|12927.67|0.00|0.01|R|F|1993-01-30|1993-03-14|1993-02-27|DELIVER IN PERSON|SHIP|eodolites sleep slyly. slyly f 1573|153063|609|6|30|33481.80|0.03|0.01|A|F|1992-12-29|1993-03-06|1993-01-02|DELIVER IN PERSON|TRUCK|. blithely even theodolites boos 1574|47706|5219|1|41|67801.70|0.06|0.02|N|O|1997-03-08|1997-02-09|1997-04-01|COLLECT COD|AIR|s. slyly regular depen 1574|190353|7911|2|50|72167.50|0.00|0.05|N|O|1996-12-14|1997-02-14|1996-12-16|TAKE BACK RETURN|FOB|le regular, regular foxes. blithely e 1574|54013|6519|3|25|24175.25|0.06|0.02|N|O|1997-01-16|1997-02-14|1997-02-12|DELIVER IN PERSON|TRUCK|ly silent accounts. 1574|190880|5919|4|6|11825.28|0.03|0.05|N|O|1997-02-24|1997-02-03|1997-03-01|NONE|AIR|e silent, final packages. speci 1574|108301|3322|5|6|7855.80|0.05|0.05|N|O|1997-02-09|1997-03-02|1997-02-14|COLLECT COD|MAIL|nic, final ideas snooze. 1574|4733|2234|6|42|68784.66|0.07|0.01|N|O|1996-12-19|1997-01-13|1996-12-28|NONE|FOB|o beans according t 1574|135755|782|7|14|25070.50|0.04|0.01|N|O|1996-12-30|1997-01-19|1997-01-20|NONE|AIR|ily bold a 1575|28443|8444|1|42|57600.48|0.05|0.08|N|O|1995-10-21|1995-11-25|1995-10-24|DELIVER IN PERSON|RAIL|ly pending pinto beans. 1575|35236|243|2|39|45677.97|0.00|0.06|N|O|1995-10-30|1995-10-15|1995-11-10|COLLECT COD|TRUCK| ironic requests snooze ironic, regular acc 1575|1887|4388|3|12|21466.56|0.01|0.05|N|O|1995-12-27|1995-11-11|1996-01-23|TAKE BACK RETURN|AIR| bold accounts. furi 1575|110162|7696|4|39|45714.24|0.07|0.00|N|O|1995-09-23|1995-11-05|1995-09-25|TAKE BACK RETURN|TRUCK| after the unusual asym 1575|82215|4724|5|10|11972.10|0.09|0.00|N|O|1996-01-10|1995-11-20|1996-01-13|DELIVER IN PERSON|RAIL|k excuses. pinto beans wake a 1575|177660|5212|6|14|24327.24|0.08|0.06|N|O|1995-10-31|1995-12-06|1995-11-30|NONE|AIR|beans breach among the furiously specia 1575|116986|9498|7|48|96143.04|0.08|0.04|N|O|1995-11-19|1995-10-25|1995-12-07|DELIVER IN PERSON|TRUCK|cies. regu 1600|171408|8960|1|20|29588.00|0.02|0.01|R|F|1993-06-16|1993-04-23|1993-07-02|COLLECT COD|FOB|pths sleep blithely about the 1600|43128|641|2|48|51413.76|0.07|0.02|R|F|1993-04-17|1993-04-14|1993-05-03|DELIVER IN PERSON|FOB|furiously silent foxes could wake. car 1600|38465|3472|3|8|11227.68|0.04|0.07|R|F|1993-03-07|1993-04-22|1993-03-26|TAKE BACK RETURN|FOB|cajole furiously fluf 1600|68481|988|4|25|36237.00|0.00|0.06|A|F|1993-05-25|1993-04-07|1993-06-05|TAKE BACK RETURN|REG AIR|press packages. ironic excuses bo 1600|146041|6042|5|30|32611.20|0.03|0.08|R|F|1993-06-03|1993-05-03|1993-06-07|DELIVER IN PERSON|RAIL|al escapades alongside of the depo 1601|166156|6157|1|6|7332.90|0.00|0.00|A|F|1994-10-19|1994-09-28|1994-10-23|COLLECT COD|SHIP| bold sheaves. furiously per 1601|174374|1926|2|50|72418.50|0.03|0.02|R|F|1994-12-24|1994-10-23|1995-01-11|COLLECT COD|FOB|ideas doubt 1601|89887|2396|3|14|26276.32|0.04|0.08|R|F|1994-09-17|1994-11-22|1994-10-03|DELIVER IN PERSON|RAIL|he special, fin 1602|182806|2807|1|4|7555.20|0.08|0.06|R|F|1993-10-31|1993-09-05|1993-11-21|NONE|RAIL|y. even excuses 1603|38191|5701|1|1|1129.19|0.08|0.00|R|F|1993-08-17|1993-09-04|1993-08-22|TAKE BACK RETURN|REG AIR|d accounts. special warthogs use fur 1603|65209|7716|2|29|34051.80|0.06|0.08|A|F|1993-09-28|1993-09-20|1993-10-28|NONE|SHIP|ses wake furiously. theodolite 1604|41563|1564|1|15|22568.40|0.09|0.08|R|F|1993-09-22|1993-09-03|1993-09-29|TAKE BACK RETURN|MAIL| instructions haggle 1604|140413|5442|2|37|53776.17|0.06|0.06|A|F|1993-08-22|1993-09-21|1993-09-10|COLLECT COD|SHIP|requests. blithely ironic somas s 1604|113214|5726|3|19|23316.99|0.09|0.07|A|F|1993-10-15|1993-10-04|1993-11-09|COLLECT COD|RAIL| ideas. bol 1604|174239|9274|4|15|19698.45|0.03|0.00|R|F|1993-09-10|1993-08-31|1993-09-30|TAKE BACK RETURN|RAIL|ending realms along the special, p 1604|20091|7598|5|23|23255.07|0.08|0.05|A|F|1993-10-11|1993-08-30|1993-10-18|DELIVER IN PERSON|RAIL|en requests. blithely fin 1605|141821|9364|1|47|87552.54|0.00|0.01|N|O|1998-04-29|1998-06-12|1998-05-20|DELIVER IN PERSON|AIR|. carefully r 1605|179691|7243|2|18|31872.42|0.10|0.00|N|O|1998-05-13|1998-06-17|1998-06-03|COLLECT COD|REG AIR|ly regular foxes wake carefully. bol 1605|58546|8547|3|39|58677.06|0.02|0.03|N|O|1998-07-12|1998-06-05|1998-08-09|DELIVER IN PERSON|MAIL|nal dependencies-- quickly final frets acc 1605|182574|129|4|25|41414.25|0.06|0.02|N|O|1998-05-26|1998-06-14|1998-06-05|COLLECT COD|AIR|ole carefully car 1606|114082|4083|1|21|23017.68|0.04|0.00|N|O|1997-06-02|1997-07-02|1997-06-27|DELIVER IN PERSON|RAIL| pending theodolites prom 1606|173565|8600|2|35|57349.60|0.00|0.02|N|O|1997-06-20|1997-06-19|1997-06-22|COLLECT COD|TRUCK|carefully sil 1606|99477|7005|3|23|33958.81|0.00|0.06|N|O|1997-04-19|1997-06-26|1997-04-30|NONE|MAIL|ously final requests. slowly ironic ex 1606|96286|8796|4|20|25645.60|0.02|0.04|N|O|1997-05-01|1997-05-26|1997-05-28|TAKE BACK RETURN|TRUCK|fily carefu 1606|70181|5196|5|14|16116.52|0.10|0.01|N|O|1997-05-19|1997-07-05|1997-06-10|COLLECT COD|FOB|structions haggle f 1607|189131|6686|1|2|2440.26|0.02|0.00|N|O|1996-01-11|1996-02-15|1996-01-19|DELIVER IN PERSON|MAIL|packages haggle. regular requests boost s 1607|118923|1435|2|37|71851.04|0.05|0.02|N|O|1996-02-27|1996-02-18|1996-03-16|NONE|AIR|alongside 1607|122680|2681|3|39|66404.52|0.00|0.00|N|O|1996-02-01|1996-02-12|1996-02-16|NONE|FOB|uches cajole. accounts ar 1607|75587|8095|4|34|53127.72|0.05|0.06|N|O|1996-01-06|1996-02-24|1996-01-10|DELIVER IN PERSON|SHIP| quickly above the 1607|177948|466|5|48|97245.12|0.00|0.05|N|O|1996-02-22|1996-02-13|1996-03-09|TAKE BACK RETURN|MAIL|ular forges. deposits a 1632|190513|8071|1|47|75364.97|0.08|0.00|N|O|1997-01-25|1997-02-09|1997-02-19|TAKE BACK RETURN|RAIL|g to the closely special no 1632|147914|5457|2|14|27466.74|0.08|0.05|N|O|1997-01-15|1997-02-25|1997-01-28|NONE|RAIL|oxes. deposits nag slyly along the slyly 1632|176169|1204|3|47|58522.52|0.03|0.04|N|O|1997-01-29|1997-03-03|1997-02-21|NONE|MAIL|sts. blithely regular 1632|56164|3680|4|33|36965.28|0.09|0.02|N|O|1997-04-01|1997-02-24|1997-04-29|TAKE BACK RETURN|REG AIR|ructions! slyly 1632|141072|8615|5|43|47862.01|0.10|0.03|N|O|1997-02-24|1997-02-19|1997-03-25|DELIVER IN PERSON|FOB|ts. blithe, bold ideas cajo 1633|177903|2938|1|35|69331.50|0.01|0.02|N|O|1996-01-09|1995-12-02|1996-01-21|COLLECT COD|REG AIR|ly against the dolph 1633|4124|4125|2|15|15421.80|0.00|0.05|N|O|1995-12-13|1995-11-13|1996-01-04|TAKE BACK RETURN|FOB|ges wake fluffil 1634|47063|7064|1|21|21211.26|0.00|0.00|N|O|1996-10-04|1996-10-22|1996-11-01|NONE|MAIL|counts alo 1634|171632|1633|2|44|74959.72|0.05|0.01|N|O|1996-09-17|1996-11-09|1996-10-03|COLLECT COD|SHIP|requests affix slyly. quickly even pack 1634|18568|8569|3|21|31217.76|0.06|0.07|N|O|1996-11-16|1996-10-21|1996-11-27|NONE|TRUCK|y along the excuses. 1634|67465|4984|4|17|24351.82|0.08|0.07|N|O|1996-10-29|1996-10-15|1996-11-02|TAKE BACK RETURN|SHIP|cial, bold platelets alongside of the f 1634|75908|5909|5|2|3767.80|0.07|0.04|N|O|1996-11-22|1996-10-28|1996-12-17|NONE|SHIP|ly. carefully regular asymptotes wake 1634|169681|2198|6|11|19257.48|0.01|0.08|N|O|1996-10-04|1996-12-06|1996-10-14|DELIVER IN PERSON|SHIP|final requests 1634|12416|4918|7|35|46494.35|0.06|0.02|N|O|1996-11-25|1996-11-25|1996-12-12|TAKE BACK RETURN|RAIL|cies. regular, special de 1635|70167|2675|1|3|3411.48|0.06|0.08|N|O|1997-03-13|1997-03-25|1997-03-27|COLLECT COD|FOB| quickly ironic r 1635|89018|9019|2|8|8056.08|0.04|0.05|N|O|1997-04-30|1997-04-21|1997-05-09|DELIVER IN PERSON|AIR|ravely carefully express 1635|113791|3792|3|20|36095.80|0.07|0.01|N|O|1997-05-19|1997-04-01|1997-06-17|TAKE BACK RETURN|FOB|oost according to the carefully even accou 1635|76063|3585|4|40|41562.40|0.01|0.04|N|O|1997-02-25|1997-03-20|1997-03-12|TAKE BACK RETURN|RAIL|uriously up the ironic deposits. slyly i 1636|84083|1608|1|2|2134.16|0.09|0.03|N|O|1997-09-26|1997-08-22|1997-10-05|NONE|TRUCK|nal foxes cajole above the blithely reg 1636|168925|8926|2|45|89726.40|0.03|0.01|N|O|1997-07-14|1997-08-08|1997-07-27|COLLECT COD|RAIL|ely express reque 1636|107869|380|3|24|45044.64|0.07|0.08|N|O|1997-10-07|1997-08-12|1997-11-04|TAKE BACK RETURN|MAIL|e carefully unusual ideas are f 1636|152367|4883|4|43|61032.48|0.06|0.00|N|O|1997-08-23|1997-08-10|1997-09-17|NONE|REG AIR|blithely special r 1636|18378|3381|5|22|28520.14|0.05|0.02|N|O|1997-07-22|1997-08-18|1997-08-03|COLLECT COD|AIR|ular, regu 1636|62910|5417|6|34|63678.94|0.10|0.01|N|O|1997-08-11|1997-09-09|1997-08-23|NONE|TRUCK|ular depos 1636|113018|8041|7|7|7217.07|0.04|0.00|N|O|1997-07-28|1997-09-10|1997-07-31|NONE|MAIL|ronic instructions. final 1637|85755|5756|1|49|85296.75|0.02|0.03|N|F|1995-06-08|1995-04-19|1995-07-01|COLLECT COD|REG AIR|. blithely i 1637|72269|7284|2|1|1241.26|0.10|0.02|A|F|1995-02-14|1995-03-26|1995-03-09|TAKE BACK RETURN|AIR|ly final pinto beans. furiously 1637|21652|6657|3|10|15736.50|0.02|0.05|R|F|1995-02-21|1995-03-17|1995-03-11|NONE|AIR|uriously? blithely even sauternes wake. 1637|92629|5139|4|42|68108.04|0.06|0.01|A|F|1995-03-18|1995-04-24|1995-03-31|COLLECT COD|SHIP|blithely a 1637|4005|6506|5|25|22725.00|0.05|0.00|R|F|1995-06-07|1995-03-26|1995-06-08|COLLECT COD|RAIL| haggle carefully silent accou 1637|108780|3801|6|38|67973.64|0.02|0.08|R|F|1995-03-20|1995-05-05|1995-04-14|DELIVER IN PERSON|SHIP|even, pending foxes nod regular 1637|51705|4211|7|21|34790.70|0.07|0.08|A|F|1995-04-30|1995-04-30|1995-05-05|COLLECT COD|SHIP|ly ironic theodolites use b 1638|5840|5841|1|46|80308.64|0.03|0.02|N|O|1997-10-16|1997-10-28|1997-11-09|COLLECT COD|MAIL|otes haggle before the slyly bold instructi 1638|148976|8977|2|30|60749.10|0.00|0.04|N|O|1997-12-05|1997-09-17|1997-12-06|NONE|REG AIR|s cajole boldly bold requests. closely 1638|30158|2662|3|5|5440.75|0.08|0.07|N|O|1997-10-15|1997-11-01|1997-11-08|DELIVER IN PERSON|FOB|xcuses sleep furiou 1638|55974|3490|4|19|36669.43|0.00|0.08|N|O|1997-10-15|1997-10-27|1997-11-03|DELIVER IN PERSON|MAIL| quickly expres 1638|142867|7896|5|25|47746.50|0.05|0.03|N|O|1997-10-06|1997-09-30|1997-11-02|DELIVER IN PERSON|REG AIR|gle final, ironic pinto beans. 1638|154436|9467|6|46|68559.78|0.07|0.08|N|O|1997-08-20|1997-10-10|1997-09-09|COLLECT COD|AIR|ckages are carefully even instru 1639|186473|6474|1|24|37427.28|0.07|0.00|N|O|1995-08-24|1995-10-06|1995-08-31|COLLECT COD|REG AIR| the regular packages. courts dou 1639|42945|7954|2|38|71741.72|0.01|0.04|N|O|1995-08-23|1995-11-09|1995-08-29|TAKE BACK RETURN|FOB|y regular packages. b 1639|170376|5411|3|41|59301.17|0.04|0.02|N|O|1995-12-19|1995-11-11|1996-01-12|DELIVER IN PERSON|FOB|structions w 1664|117307|2330|1|48|63566.40|0.04|0.02|N|O|1996-06-21|1996-05-01|1996-07-19|TAKE BACK RETURN|RAIL| use. ironic deposits integrate. slyly unu 1664|172152|7187|2|30|36724.50|0.06|0.05|N|O|1996-04-04|1996-05-04|1996-05-03|COLLECT COD|FOB|ess multip 1664|150899|900|3|10|19498.90|0.00|0.06|N|O|1996-04-10|1996-05-13|1996-05-07|TAKE BACK RETURN|RAIL|instructions up the acc 1664|154273|6789|4|35|46454.45|0.00|0.04|N|O|1996-03-06|1996-05-16|1996-03-09|DELIVER IN PERSON|REG AIR|y regular ide 1664|56747|6748|5|9|15333.66|0.07|0.04|N|O|1996-04-15|1996-05-14|1996-05-11|DELIVER IN PERSON|TRUCK|ges. fluffil 1664|140115|2630|6|40|46204.40|0.09|0.07|N|O|1996-04-02|1996-04-22|1996-04-17|COLLECT COD|REG AIR|se blithely unusual pains. carefully 1665|46402|3915|1|4|5393.60|0.02|0.03|A|F|1994-09-01|1994-06-07|1994-09-12|DELIVER IN PERSON|TRUCK|ely final requests. requests 1665|77098|4620|2|1|1075.09|0.03|0.05|R|F|1994-05-22|1994-07-06|1994-05-24|TAKE BACK RETURN|TRUCK|sly final p 1666|184344|1899|1|30|42850.20|0.04|0.03|N|O|1995-10-28|1995-11-30|1995-11-18|TAKE BACK RETURN|AIR| breach evenly final accounts. r 1666|63994|9007|2|20|39159.80|0.01|0.00|N|O|1996-01-27|1995-12-12|1996-01-31|NONE|REG AIR|uietly regular foxes wake quick 1666|133479|1019|3|31|46886.57|0.05|0.07|N|O|1996-02-11|1996-01-11|1996-02-28|COLLECT COD|RAIL|ding to the express, bold accounts. fu 1666|168802|1319|4|41|76702.80|0.06|0.08|N|O|1995-11-29|1996-01-04|1995-12-24|NONE|TRUCK|ly regular excuses; regular ac 1667|20267|7774|1|6|7123.56|0.04|0.02|N|O|1997-12-07|1997-11-16|1998-01-02|COLLECT COD|FOB|riously busy requests. blithely final a 1667|21164|6169|2|29|31469.64|0.06|0.07|N|O|1997-10-15|1997-11-09|1997-11-11|TAKE BACK RETURN|MAIL|l accounts. furiously final courts h 1667|94941|9960|3|48|92925.12|0.05|0.01|N|O|1998-01-27|1998-01-06|1998-02-09|TAKE BACK RETURN|SHIP|tes sleep furiously. carefully eve 1667|58119|5635|4|24|25850.64|0.04|0.01|N|O|1997-10-14|1997-12-01|1997-11-09|TAKE BACK RETURN|MAIL|hrash final requests. care 1667|194315|1873|5|2|2818.62|0.07|0.00|N|O|1997-12-17|1997-11-22|1998-01-16|NONE|SHIP|pecial requests hag 1667|47733|5246|6|6|10084.38|0.01|0.03|N|O|1998-01-21|1997-12-19|1998-01-28|NONE|TRUCK| nag quickly above th 1667|39956|2460|7|19|36023.05|0.09|0.03|N|O|1998-01-23|1997-11-24|1998-01-26|DELIVER IN PERSON|SHIP|around the pinto beans. express, special 1668|131636|4150|1|8|13341.04|0.06|0.01|N|O|1997-07-23|1997-10-09|1997-08-06|DELIVER IN PERSON|FOB|arefully regular tithes! slyl 1668|945|8446|2|25|46148.50|0.01|0.06|N|O|1997-08-08|1997-09-28|1997-09-01|NONE|TRUCK|y ironic requests. bold, final ideas a 1668|74160|6668|3|42|47634.72|0.08|0.01|N|O|1997-08-09|1997-09-08|1997-08-31|NONE|FOB|ole carefully excuses. final 1668|190774|8332|4|9|16782.93|0.05|0.03|N|O|1997-10-17|1997-09-05|1997-11-01|COLLECT COD|RAIL|wake furiously even instructions. sil 1668|127924|7925|5|25|48798.00|0.01|0.02|N|O|1997-10-08|1997-09-20|1997-10-11|DELIVER IN PERSON|REG AIR|even platelets across the silent 1668|9920|2421|6|38|69536.96|0.07|0.01|N|O|1997-08-26|1997-09-17|1997-09-05|DELIVER IN PERSON|TRUCK|ep slyly across the furi 1669|78373|8374|1|24|32432.88|0.04|0.08|N|O|1997-09-04|1997-07-30|1997-09-20|DELIVER IN PERSON|RAIL| regular, final deposits use quick 1670|31411|6418|1|41|55038.81|0.07|0.01|N|O|1997-07-19|1997-08-20|1997-07-23|DELIVER IN PERSON|TRUCK|thely according to the sly 1670|121378|1379|2|10|13993.70|0.07|0.03|N|O|1997-09-14|1997-08-16|1997-09-23|NONE|SHIP|fily special ideas 1670|185284|321|3|41|56140.48|0.07|0.07|N|O|1997-07-19|1997-08-05|1997-07-26|COLLECT COD|SHIP|al gifts. speci 1671|148885|3914|1|21|40611.48|0.02|0.07|N|O|1996-07-28|1996-09-28|1996-08-08|TAKE BACK RETURN|AIR|s accounts slee 1671|95111|2639|2|4|4424.44|0.05|0.00|N|O|1996-08-30|1996-09-19|1996-09-23|DELIVER IN PERSON|TRUCK|lyly regular ac 1671|123591|8616|3|11|17760.49|0.06|0.08|N|O|1996-09-16|1996-10-21|1996-09-18|NONE|SHIP|tes sleep blithely 1671|177303|2338|4|5|6901.50|0.00|0.00|N|O|1996-11-14|1996-10-20|1996-11-25|TAKE BACK RETURN|FOB|luffily regular deposits 1671|126590|6591|5|12|19399.08|0.07|0.04|N|O|1996-11-17|1996-09-02|1996-12-17|COLLECT COD|RAIL|special, ironic 1671|196217|8737|6|46|60407.66|0.08|0.05|N|O|1996-09-13|1996-10-14|1996-09-28|TAKE BACK RETURN|REG AIR|. slyly bold instructions boost. furiousl 1696|15419|422|1|8|10675.28|0.04|0.02|N|O|1998-04-28|1998-02-07|1998-05-10|NONE|TRUCK|the blithely 1696|138312|826|2|13|17554.03|0.08|0.06|N|O|1998-03-01|1998-03-25|1998-03-24|TAKE BACK RETURN|TRUCK|tructions play slyly q 1696|1107|3608|3|19|19153.90|0.08|0.05|N|O|1998-05-03|1998-03-13|1998-05-28|TAKE BACK RETURN|REG AIR|its maintain alongside of the f 1696|192712|2713|4|21|37898.91|0.05|0.00|N|O|1998-05-04|1998-02-18|1998-05-07|NONE|MAIL|y players sleep along the final, pending 1696|93788|8807|5|43|76616.54|0.03|0.06|N|O|1998-02-14|1998-03-29|1998-02-20|COLLECT COD|FOB|arefully regular dep 1697|74452|6960|1|6|8558.70|0.05|0.00|N|O|1997-01-28|1996-11-27|1997-01-31|NONE|FOB|accounts breach slyly even de 1697|103417|5928|2|24|34089.84|0.00|0.08|N|O|1996-12-29|1996-12-19|1997-01-10|NONE|SHIP|ts cajole carefully above the carefully 1697|123693|6206|3|27|46350.63|0.06|0.00|N|O|1997-01-20|1996-12-02|1997-02-05|COLLECT COD|MAIL|ly regular packages across the silent, b 1697|93083|3084|4|49|52727.92|0.08|0.04|N|O|1996-12-07|1997-01-02|1996-12-31|COLLECT COD|TRUCK|lar foxes. fluffily furious ideas doubt qu 1697|34808|7312|5|19|33113.20|0.03|0.07|N|O|1997-01-08|1996-11-12|1997-01-11|DELIVER IN PERSON|FOB|ons? special, special accounts after 1698|96537|6538|1|44|67475.32|0.05|0.05|N|O|1997-05-16|1997-07-05|1997-05-27|NONE|RAIL|ts wake slyly after t 1698|92106|4616|2|6|6588.60|0.08|0.00|N|O|1997-08-21|1997-06-08|1997-09-03|DELIVER IN PERSON|RAIL| pending packages affix ne 1698|20546|3049|3|22|32263.88|0.03|0.04|N|O|1997-08-07|1997-05-28|1997-08-24|DELIVER IN PERSON|TRUCK|oward the furiously iro 1698|111909|4421|4|19|36497.10|0.00|0.07|N|O|1997-07-04|1997-06-21|1997-08-01|NONE|RAIL| fluffily e 1698|52417|2418|5|37|50668.17|0.00|0.03|N|O|1997-05-16|1997-05-29|1997-05-27|NONE|AIR|ly regular ideas. deposit 1698|165824|5825|6|15|28347.30|0.10|0.01|N|O|1997-07-20|1997-06-07|1997-07-21|TAKE BACK RETURN|RAIL|final ideas. even, ironic 1699|37350|2357|1|50|64367.50|0.00|0.06|A|F|1994-03-26|1994-03-23|1994-04-20|NONE|FOB|to the final requests are carefully silent 1699|134825|4826|2|17|31616.94|0.07|0.02|R|F|1994-01-12|1994-03-12|1994-02-08|NONE|AIR|haggle blithely slyly 1700|139702|4729|1|38|66184.60|0.04|0.04|N|O|1996-10-03|1996-07-27|1996-10-22|NONE|RAIL|ular dependencies engage slyly 1700|155943|5944|2|49|97948.06|0.04|0.00|N|O|1996-09-26|1996-07-28|1996-10-16|NONE|TRUCK|kly even dependencies haggle fluffi 1701|149418|6961|1|47|68968.27|0.08|0.05|R|F|1992-05-25|1992-06-29|1992-06-15|NONE|RAIL|slyly final requests cajole requests. f 1701|53004|3005|2|2|1914.00|0.01|0.04|R|F|1992-06-24|1992-07-12|1992-06-29|COLLECT COD|SHIP|ween the pending, final accounts. 1701|34683|7187|3|26|42059.68|0.10|0.06|R|F|1992-06-04|1992-07-11|1992-07-04|DELIVER IN PERSON|FOB| accounts. blithely pending pinto be 1702|66974|4493|1|19|36878.43|0.02|0.01|N|F|1995-06-02|1995-06-30|1995-06-29|NONE|REG AIR|ies haggle blith 1702|29762|2265|2|38|64286.88|0.00|0.00|N|O|1995-09-01|1995-06-10|1995-09-10|DELIVER IN PERSON|REG AIR|as believe blithely. bo 1702|194425|4426|3|46|69893.32|0.00|0.08|N|O|1995-07-14|1995-06-30|1995-07-20|NONE|FOB|y even foxes. carefully final dependencies 1702|92039|2040|4|28|28868.84|0.07|0.05|R|F|1995-06-10|1995-07-26|1995-06-16|TAKE BACK RETURN|AIR|nts haggle along the packa 1702|88038|8039|5|34|34885.02|0.01|0.06|N|O|1995-07-04|1995-06-08|1995-07-28|DELIVER IN PERSON|AIR|y careful packages; dogged acco 1702|41506|4011|6|28|40530.00|0.10|0.00|N|O|1995-08-14|1995-07-31|1995-09-08|COLLECT COD|RAIL|ackages sleep. furiously even excuses snooz 1703|165404|7921|1|36|52898.40|0.09|0.01|R|F|1993-04-22|1993-03-05|1993-04-24|DELIVER IN PERSON|SHIP|riously express 1703|136118|1145|2|35|40393.85|0.01|0.08|R|F|1993-04-14|1993-03-31|1993-04-27|NONE|RAIL|he carefully 1703|123460|3461|3|48|71206.08|0.06|0.02|R|F|1993-02-07|1993-04-20|1993-02-24|TAKE BACK RETURN|AIR|ggle slyly furiously regular theodol 1728|125205|230|1|1|1230.20|0.07|0.04|N|O|1996-09-16|1996-08-19|1996-09-18|COLLECT COD|FOB|lly. carefully ex 1728|104383|6894|2|23|31909.74|0.05|0.02|N|O|1996-09-08|1996-07-24|1996-09-20|NONE|FOB|ns. pending, final ac 1728|164065|1614|3|44|49678.64|0.08|0.07|N|O|1996-07-31|1996-06-22|1996-08-06|COLLECT COD|FOB|ide of the slyly blithe 1728|26347|6348|4|34|43293.56|0.08|0.05|N|O|1996-08-28|1996-07-20|1996-09-12|DELIVER IN PERSON|MAIL|special req 1728|198088|3127|5|31|36768.48|0.09|0.02|N|O|1996-07-26|1996-06-28|1996-08-14|NONE|REG AIR|kly sly theodolites. 1729|156948|6949|1|12|24059.28|0.08|0.04|A|F|1992-08-11|1992-07-24|1992-08-16|COLLECT COD|RAIL|y pending packages detect. carefully re 1730|165429|7946|1|41|61271.22|0.01|0.03|N|O|1998-08-11|1998-08-29|1998-09-02|TAKE BACK RETURN|TRUCK| instructions. unusual, even Tiresi 1730|161312|1313|2|15|20599.65|0.07|0.04|N|O|1998-09-07|1998-09-12|1998-09-30|TAKE BACK RETURN|AIR|pinto beans cajole. bravely bold 1730|161029|3546|3|9|9810.18|0.10|0.00|N|O|1998-09-18|1998-09-15|1998-09-21|DELIVER IN PERSON|FOB|gular dependencies wake. blithely final e 1730|9457|6958|4|40|54658.00|0.02|0.03|N|O|1998-10-02|1998-10-06|1998-10-03|NONE|SHIP|ven dinos slee 1730|140291|5320|5|43|57245.47|0.04|0.06|N|O|1998-10-26|1998-10-22|1998-11-02|DELIVER IN PERSON|TRUCK|ng deposits cajo 1731|183932|3933|1|36|72573.48|0.10|0.00|N|O|1996-04-18|1996-04-03|1996-04-29|TAKE BACK RETURN|MAIL|ngside of the even instruct 1731|138267|3294|2|7|9136.82|0.04|0.07|N|O|1996-04-11|1996-02-13|1996-04-30|DELIVER IN PERSON|REG AIR|fily quick asymptotes 1731|50920|3426|3|50|93546.00|0.05|0.04|N|O|1996-01-14|1996-03-13|1996-01-29|COLLECT COD|RAIL|ly slyly speci 1731|195874|3432|4|23|45307.01|0.10|0.04|N|O|1996-04-22|1996-02-25|1996-05-16|TAKE BACK RETURN|RAIL|rays? bold, express pac 1731|52355|2356|5|37|48371.95|0.10|0.05|N|O|1996-04-30|1996-03-17|1996-05-27|TAKE BACK RETURN|RAIL| beans use furiously slyly b 1731|123416|953|6|41|59015.81|0.03|0.08|N|O|1996-04-05|1996-02-28|1996-05-01|TAKE BACK RETURN|RAIL|haggle across the blithely ironi 1732|4397|4398|1|50|65069.50|0.02|0.01|R|F|1993-12-05|1994-01-23|1993-12-20|TAKE BACK RETURN|FOB|fily final asymptotes according 1732|98183|8184|2|36|42522.48|0.01|0.03|A|F|1994-03-15|1994-02-09|1994-04-02|DELIVER IN PERSON|TRUCK|ve the accounts. slowly ironic multip 1732|160895|5928|3|41|80191.49|0.00|0.04|R|F|1994-02-20|1994-01-07|1994-02-27|TAKE BACK RETURN|AIR|quests sublate against the silent 1732|151257|1258|4|9|11774.25|0.04|0.04|A|F|1994-02-25|1994-01-29|1994-03-16|TAKE BACK RETURN|FOB|ular platelets. deposits wak 1732|168899|1416|5|25|49197.25|0.02|0.05|A|F|1994-02-15|1994-01-07|1994-02-21|COLLECT COD|REG AIR|nag slyly. even, special de 1732|72930|452|6|16|30446.88|0.01|0.05|R|F|1994-01-07|1994-01-02|1994-01-25|COLLECT COD|SHIP|ix carefully at the furiously regular pac 1733|110444|2956|1|41|59632.04|0.08|0.01|N|O|1996-06-13|1996-07-08|1996-07-07|TAKE BACK RETURN|AIR|ess notornis. fur 1733|23938|1445|2|16|29790.88|0.00|0.04|N|O|1996-08-28|1996-07-25|1996-09-27|COLLECT COD|MAIL|slyly express deposits sleep abo 1733|119673|7207|3|29|49087.43|0.10|0.06|N|O|1996-07-16|1996-08-08|1996-07-28|NONE|TRUCK|ns detect among the special accounts. qu 1733|135517|544|4|38|58995.38|0.01|0.03|N|O|1996-08-26|1996-07-23|1996-08-28|NONE|FOB| deposits 1733|33586|8593|5|22|33430.76|0.06|0.07|N|O|1996-07-16|1996-07-24|1996-07-30|COLLECT COD|AIR|gainst the final deposits. carefully final 1733|65422|5423|6|9|12486.78|0.06|0.08|N|O|1996-05-25|1996-07-23|1996-06-10|COLLECT COD|TRUCK|ven foxes was according to t 1733|145889|918|7|13|25153.44|0.02|0.03|N|O|1996-08-03|1996-08-02|1996-08-18|NONE|MAIL|olites sleep furious 1734|154883|7399|1|38|73639.44|0.03|0.03|R|F|1994-08-09|1994-09-07|1994-08-12|COLLECT COD|FOB|ts doubt b 1734|117726|238|2|4|6974.88|0.06|0.03|A|F|1994-08-20|1994-07-17|1994-08-25|DELIVER IN PERSON|AIR|final warhorses. 1735|155456|5457|1|43|64992.35|0.02|0.06|A|F|1993-01-14|1993-03-25|1993-02-02|DELIVER IN PERSON|FOB|iously after the 1735|138514|1028|2|49|76072.99|0.03|0.04|A|F|1992-12-31|1993-02-03|1993-01-25|TAKE BACK RETURN|TRUCK|y express accounts above the exp 1760|95414|433|1|38|53557.58|0.09|0.03|N|O|1996-06-15|1996-06-29|1996-07-11|NONE|MAIL|tions. blithely regular orbits against the 1760|7256|7257|2|3|3489.75|0.00|0.06|N|O|1996-07-18|1996-07-01|1996-08-01|NONE|RAIL|lyly bold dolphins haggle carefully. sl 1760|136818|1845|3|44|81611.64|0.05|0.01|N|O|1996-06-11|1996-06-16|1996-07-02|COLLECT COD|REG AIR|instructions poach slyly ironic theodolites 1761|51042|8558|1|33|32770.32|0.09|0.03|R|F|1994-01-03|1994-01-23|1994-01-31|NONE|FOB|s. excuses a 1761|51137|1138|2|37|40260.81|0.02|0.07|R|F|1994-02-17|1994-03-08|1994-03-16|NONE|RAIL| integrate. quickly unusual 1761|48901|1406|3|37|68446.30|0.06|0.04|R|F|1994-01-02|1994-03-12|1994-01-25|DELIVER IN PERSON|TRUCK|regular packages wake after 1761|72503|25|4|49|72299.50|0.06|0.07|R|F|1994-01-08|1994-03-03|1994-02-05|TAKE BACK RETURN|FOB|y even packages promise 1761|156877|9393|5|37|71553.19|0.03|0.04|R|F|1994-04-24|1994-03-14|1994-04-29|TAKE BACK RETURN|MAIL|express requests print blithely around the 1761|23419|926|6|12|16108.92|0.01|0.05|A|F|1994-04-16|1994-03-08|1994-04-21|DELIVER IN PERSON|AIR| sleep furiously. deposits are acco 1761|811|5812|7|13|22253.53|0.03|0.08|R|F|1994-03-06|1994-03-18|1994-03-22|DELIVER IN PERSON|TRUCK|ons boost fu 1762|25243|248|1|15|17523.60|0.04|0.08|A|F|1994-12-18|1994-10-29|1995-01-17|TAKE BACK RETURN|REG AIR|old packages thrash. care 1762|49506|4515|2|39|56764.50|0.10|0.02|A|F|1994-09-12|1994-11-09|1994-10-08|DELIVER IN PERSON|MAIL| ironic platelets sleep along t 1762|31865|4369|3|7|12578.02|0.05|0.01|R|F|1994-09-03|1994-10-02|1994-09-10|NONE|REG AIR|uickly express packages wake slyly-- regul 1762|144131|6646|4|24|28203.12|0.03|0.03|A|F|1994-11-30|1994-11-02|1994-12-20|NONE|REG AIR|accounts solve alongside of the fluffily 1762|7408|7409|5|49|64454.60|0.08|0.05|A|F|1994-10-20|1994-11-02|1994-11-10|TAKE BACK RETURN|SHIP| packages sleep fluffily pen 1762|93430|8449|6|35|49820.05|0.05|0.05|A|F|1994-11-25|1994-10-21|1994-11-28|COLLECT COD|AIR|ind quickly. accounts ca 1762|72971|5479|7|47|91366.59|0.03|0.01|A|F|1994-11-02|1994-10-07|1994-11-08|NONE|SHIP| blithely brave 1763|11731|6734|1|22|36140.06|0.09|0.06|N|O|1997-01-17|1997-01-15|1997-02-03|TAKE BACK RETURN|SHIP|ld. fluffily final ideas boos 1763|156496|9012|2|43|66757.07|0.04|0.04|N|O|1996-11-04|1996-12-09|1996-11-28|DELIVER IN PERSON|FOB|r deposits integrate blithely pending, quic 1763|24137|6640|3|16|16978.08|0.06|0.02|N|O|1996-12-12|1996-12-04|1996-12-25|DELIVER IN PERSON|RAIL|ously pending asymptotes a 1763|60187|7706|4|44|50475.92|0.04|0.05|N|O|1996-12-04|1997-01-06|1996-12-25|DELIVER IN PERSON|REG AIR| instructions need to integrate deposits. 1763|146555|9070|5|13|20820.15|0.03|0.05|N|O|1996-11-23|1997-01-24|1996-12-05|TAKE BACK RETURN|SHIP|s sleep carefully. fluffily unusua 1763|142048|2049|6|3|3270.12|0.05|0.03|N|O|1996-12-10|1996-12-06|1997-01-04|TAKE BACK RETURN|FOB|ut the slyly pending deposi 1763|183931|1486|7|2|4029.86|0.05|0.07|N|O|1997-02-27|1996-12-04|1997-03-27|COLLECT COD|FOB|even pinto beans snooze fluffi 1764|120300|301|1|20|26406.00|0.09|0.02|A|F|1992-06-09|1992-05-22|1992-07-06|COLLECT COD|MAIL|y quickly regular packages. car 1764|66465|1478|2|3|4294.38|0.07|0.07|R|F|1992-05-13|1992-06-07|1992-05-26|COLLECT COD|RAIL|es wake slowly. 1764|77176|4698|3|27|31135.59|0.07|0.04|A|F|1992-05-06|1992-05-11|1992-05-23|COLLECT COD|TRUCK|ly final foxes wake blithely even requests 1765|160566|567|1|36|58556.16|0.08|0.04|N|O|1996-03-02|1996-02-17|1996-03-14|DELIVER IN PERSON|SHIP|he blithely pending accou 1766|86716|6717|1|32|54486.72|0.08|0.01|N|O|1997-01-08|1996-11-11|1997-01-31|TAKE BACK RETURN|AIR|ess accounts. stealthily ironic accou 1766|33969|6473|2|12|22835.52|0.05|0.01|N|O|1996-10-28|1996-12-18|1996-11-15|DELIVER IN PERSON|AIR|heodolites above the final, regular acc 1766|110761|8295|3|1|1771.76|0.10|0.02|N|O|1997-01-21|1997-01-07|1997-02-19|NONE|TRUCK|ly blithely pending accounts. reg 1767|24933|9938|1|32|59453.76|0.08|0.04|A|F|1995-05-22|1995-05-14|1995-05-23|COLLECT COD|SHIP|to the bravely ironic requests i 1767|41434|8947|2|1|1375.43|0.09|0.05|N|O|1995-06-23|1995-05-25|1995-07-03|TAKE BACK RETURN|RAIL|ing to the slyly fin 1767|173977|3978|3|24|49223.28|0.06|0.03|R|F|1995-03-16|1995-04-29|1995-04-11|DELIVER IN PERSON|RAIL|luffy theodolites need to detect furi 1767|22387|4890|4|50|65469.00|0.01|0.02|R|F|1995-05-29|1995-04-14|1995-06-15|NONE|REG AIR|y unusual foxe 1767|51549|4055|5|40|60021.60|0.06|0.00|R|F|1995-04-16|1995-05-06|1995-04-21|TAKE BACK RETURN|AIR|ep. accounts nag blithely fu 1792|87191|2208|1|9|10603.71|0.09|0.04|R|F|1994-02-28|1993-12-11|1994-03-12|TAKE BACK RETURN|AIR|final packages s 1792|8700|6201|2|5|8043.50|0.04|0.02|R|F|1994-02-13|1994-01-03|1994-02-28|DELIVER IN PERSON|TRUCK|ely regular accounts are slyly. pending, bo 1792|8139|640|3|8|8377.04|0.01|0.04|A|F|1994-02-21|1994-01-26|1994-02-27|DELIVER IN PERSON|RAIL|nts. fluffily special instructions integr 1792|190135|2655|4|45|55130.85|0.00|0.01|A|F|1994-02-27|1993-12-24|1994-03-07|DELIVER IN PERSON|MAIL|ests are. ironic, regular asy 1792|198786|3825|5|35|65967.30|0.06|0.05|R|F|1994-01-31|1994-01-20|1994-02-17|NONE|FOB|e against the quic 1793|47119|9624|1|29|30917.19|0.01|0.06|R|F|1992-10-24|1992-09-20|1992-11-23|NONE|MAIL|ar excuses. 1793|125194|2731|2|4|4876.76|0.07|0.05|A|F|1992-07-28|1992-08-26|1992-08-21|COLLECT COD|RAIL|nic foxes along the even 1793|130060|2574|3|6|6540.36|0.01|0.05|R|F|1992-09-21|1992-09-05|1992-10-01|DELIVER IN PERSON|REG AIR|uctions; depo 1793|117679|5213|4|4|6786.68|0.00|0.08|R|F|1992-09-27|1992-09-21|1992-10-07|DELIVER IN PERSON|AIR|equests nod ac 1793|24521|4522|5|42|60711.84|0.03|0.03|A|F|1992-10-13|1992-10-02|1992-11-06|NONE|RAIL|uctions sleep carefully special, fl 1794|167389|7390|1|36|52429.68|0.09|0.08|N|O|1997-11-07|1997-11-01|1997-11-18|TAKE BACK RETURN|FOB|ely fluffily ironi 1794|94575|9594|2|3|4708.71|0.02|0.03|N|O|1997-11-15|1997-12-16|1997-11-20|DELIVER IN PERSON|FOB| sentiments according to the q 1794|116160|6161|3|23|27051.68|0.08|0.04|N|O|1997-10-13|1997-11-30|1997-10-28|TAKE BACK RETURN|AIR|usly unusual theodolites doze about 1794|84627|9644|4|34|54795.08|0.06|0.08|N|O|1997-09-29|1997-11-13|1997-10-07|TAKE BACK RETURN|SHIP|rs above the accoun 1794|116434|1457|5|47|68170.21|0.10|0.06|N|O|1998-01-15|1997-11-30|1998-02-14|DELIVER IN PERSON|TRUCK| haggle slyly. furiously express orbit 1794|90185|2695|6|37|43481.66|0.01|0.01|N|O|1998-01-12|1997-12-21|1998-01-17|DELIVER IN PERSON|MAIL|ackages. pinto 1795|136200|6201|1|44|54392.80|0.08|0.08|A|F|1994-04-28|1994-05-24|1994-05-27|NONE|AIR|ites sleep carefully slyly p 1795|113154|3155|2|34|39683.10|0.08|0.00|A|F|1994-04-24|1994-06-01|1994-05-08|DELIVER IN PERSON|SHIP|closely regular instructions wake. 1795|167545|5094|3|25|40313.50|0.07|0.01|A|F|1994-05-18|1994-05-22|1994-05-20|TAKE BACK RETURN|RAIL|he always express accounts ca 1795|124863|2400|4|32|60411.52|0.03|0.06|R|F|1994-05-10|1994-04-21|1994-05-17|DELIVER IN PERSON|SHIP| asymptotes across the bold, 1795|162306|9855|5|11|15051.30|0.08|0.02|R|F|1994-06-19|1994-04-24|1994-07-02|TAKE BACK RETURN|TRUCK|slyly. special pa 1796|9888|9889|1|28|50340.64|0.08|0.04|A|F|1992-12-01|1993-01-01|1992-12-24|DELIVER IN PERSON|FOB|y quickly ironic accounts. 1796|184834|9871|2|8|15350.64|0.00|0.08|R|F|1993-01-07|1993-01-04|1993-01-10|NONE|SHIP|slyly bold accounts are furiously agains 1797|30639|3143|1|17|26683.71|0.01|0.02|N|O|1996-08-06|1996-07-11|1996-08-29|NONE|TRUCK| cajole carefully. unusual Tiresias e 1797|144215|6730|2|16|20147.36|0.01|0.00|N|O|1996-06-03|1996-07-21|1996-06-07|NONE|FOB|o beans wake regular accounts. blit 1797|11709|6712|3|21|34034.70|0.02|0.01|N|O|1996-08-05|1996-08-05|1996-08-06|DELIVER IN PERSON|AIR|ns. regular, regular deposit 1798|108858|8859|1|43|80274.55|0.01|0.08|N|O|1997-08-27|1997-10-23|1997-09-09|DELIVER IN PERSON|MAIL|ld packages sleep furiously. depend 1799|51159|3665|1|8|8881.20|0.04|0.08|R|F|1994-06-14|1994-05-27|1994-06-27|TAKE BACK RETURN|MAIL|ealms upon the special, ironic waters 1799|26318|3825|2|42|52261.02|0.02|0.02|R|F|1994-04-05|1994-04-28|1994-04-09|DELIVER IN PERSON|FOB|es pending 1824|119669|7203|1|45|75989.70|0.03|0.02|R|F|1994-08-21|1994-06-21|1994-09-19|NONE|RAIL|ent Tiresias. quickly express 1824|68198|5717|2|40|46647.60|0.10|0.03|A|F|1994-05-08|1994-07-24|1994-06-06|NONE|FOB|es mold furiously final instructions. s 1825|155040|71|1|43|47086.72|0.05|0.05|A|F|1994-02-18|1994-02-19|1994-03-02|TAKE BACK RETURN|RAIL| accounts breach fluffily spe 1825|147333|9848|2|39|53832.87|0.00|0.00|R|F|1994-04-01|1994-01-12|1994-04-21|DELIVER IN PERSON|REG AIR|ual, bold ideas haggle above the quickly ir 1825|16370|1373|3|7|9004.59|0.04|0.03|A|F|1994-01-02|1994-01-30|1994-01-30|TAKE BACK RETURN|REG AIR|fully ironic requests. requests cajole ex 1825|120223|5248|4|23|28594.06|0.05|0.01|R|F|1994-01-08|1994-02-08|1994-01-19|NONE|MAIL| wake express, even r 1825|177840|7841|5|33|63288.72|0.04|0.04|A|F|1993-12-07|1994-03-01|1993-12-16|TAKE BACK RETURN|RAIL|about the ne 1826|26601|4108|1|4|6110.40|0.06|0.00|R|F|1992-07-05|1992-06-12|1992-08-04|DELIVER IN PERSON|MAIL|alongside of the quickly unusual re 1826|67102|4621|2|9|9621.90|0.07|0.07|R|F|1992-07-12|1992-07-11|1992-07-15|DELIVER IN PERSON|TRUCK| blithely special 1826|175212|2764|3|14|18020.94|0.05|0.01|A|F|1992-04-28|1992-05-31|1992-05-25|COLLECT COD|TRUCK|uriously bold pinto beans are carefully ag 1826|179826|4861|4|6|11434.92|0.05|0.04|R|F|1992-06-30|1992-05-17|1992-07-30|DELIVER IN PERSON|RAIL|kages. blithely silent 1826|134617|2157|5|46|75974.06|0.05|0.06|R|F|1992-05-02|1992-06-25|1992-05-26|TAKE BACK RETURN|FOB|ously? quickly pe 1826|107352|2373|6|43|58452.05|0.02|0.03|A|F|1992-07-28|1992-06-14|1992-08-03|NONE|MAIL|ss tithes use even ideas. fluffily final t 1827|89460|6985|1|47|68124.62|0.00|0.01|N|O|1996-08-01|1996-08-07|1996-08-23|TAKE BACK RETURN|RAIL|. pending courts about the even e 1827|153091|8122|2|48|54916.32|0.03|0.05|N|O|1996-08-28|1996-09-15|1996-09-01|COLLECT COD|RAIL|oxes. special, final asymptote 1827|199818|9819|3|37|70958.97|0.01|0.07|N|O|1996-07-20|1996-08-18|1996-08-08|DELIVER IN PERSON|REG AIR|ously ironic theodolites serve quickly af 1827|126488|4025|4|4|6057.92|0.04|0.04|N|O|1996-07-22|1996-09-10|1996-08-11|DELIVER IN PERSON|RAIL|special requests. blithely 1827|79528|2036|5|24|36180.48|0.00|0.08|N|O|1996-08-07|1996-09-01|1996-09-04|DELIVER IN PERSON|SHIP|al gifts! re 1827|20469|470|6|7|9726.22|0.10|0.02|N|O|1996-08-28|1996-08-07|1996-08-31|DELIVER IN PERSON|AIR|egular foxes 1827|5933|5934|7|38|69879.34|0.05|0.01|N|O|1996-10-17|1996-08-29|1996-11-07|TAKE BACK RETURN|SHIP| blithely. express, bo 1828|99555|7083|1|33|51300.15|0.05|0.04|R|F|1994-06-27|1994-06-10|1994-07-24|COLLECT COD|FOB|s boost carefully. pending d 1828|12436|9940|2|40|53937.20|0.08|0.07|R|F|1994-05-05|1994-07-02|1994-05-19|COLLECT COD|REG AIR|s use above the quietly fin 1828|195860|5861|3|11|21514.46|0.07|0.08|R|F|1994-07-21|1994-05-28|1994-08-13|DELIVER IN PERSON|FOB| wake blithely 1828|7764|2765|4|45|75229.20|0.02|0.05|R|F|1994-05-15|1994-05-29|1994-05-28|COLLECT COD|RAIL| accounts run slyly 1828|78153|5675|5|14|15836.10|0.01|0.08|A|F|1994-05-20|1994-06-02|1994-05-25|TAKE BACK RETURN|SHIP|. final packages along the carefully bold 1829|149926|2441|1|12|23711.04|0.05|0.06|A|F|1994-08-23|1994-07-13|1994-09-04|DELIVER IN PERSON|FOB|ges wake furiously express pinto 1829|4612|4613|2|11|16682.71|0.04|0.05|A|F|1994-05-18|1994-06-13|1994-06-07|COLLECT COD|MAIL|ding orbits 1829|103502|8523|3|49|73769.50|0.09|0.08|A|F|1994-08-26|1994-08-01|1994-09-16|NONE|TRUCK|ound the quickly 1829|152994|2995|4|14|28657.86|0.03|0.06|A|F|1994-08-15|1994-06-08|1994-08-30|TAKE BACK RETURN|AIR|regular deposits alongside of the flu 1829|165795|8312|5|6|11164.74|0.02|0.07|A|F|1994-08-09|1994-08-05|1994-09-05|DELIVER IN PERSON|MAIL|s haggle! slyl 1829|114621|2155|6|36|58882.32|0.09|0.04|R|F|1994-06-10|1994-06-23|1994-06-22|NONE|FOB|ackages-- express requests sleep; pen 1830|119595|2107|1|38|61354.42|0.00|0.07|R|F|1995-04-20|1995-05-22|1995-04-24|TAKE BACK RETURN|TRUCK|ely even a 1830|24483|6986|2|9|12667.32|0.05|0.07|R|F|1995-03-09|1995-05-24|1995-03-14|NONE|SHIP|st furiously among 1830|81005|3514|3|36|35496.00|0.07|0.07|R|F|1995-04-21|1995-04-14|1995-05-10|DELIVER IN PERSON|SHIP| slowly unusual orbits. carefull 1831|135126|7640|1|9|10450.08|0.02|0.03|A|F|1993-12-17|1994-01-27|1993-12-26|NONE|TRUCK|mptotes. furiously regular dolphins al 1831|47035|7036|2|9|8838.27|0.07|0.06|R|F|1994-03-22|1994-01-07|1994-04-06|COLLECT COD|MAIL|ent deposits. regular saute 1831|114911|2445|3|17|32740.47|0.02|0.08|R|F|1994-01-18|1994-02-12|1994-01-30|TAKE BACK RETURN|MAIL|s boost ironic foxe 1831|94173|9192|4|23|26844.91|0.06|0.02|R|F|1993-12-21|1994-02-08|1994-01-04|NONE|SHIP|ests. express pinto beans abou 1856|54971|9982|1|10|19259.70|0.05|0.07|R|F|1992-05-11|1992-05-20|1992-06-02|TAKE BACK RETURN|FOB|he furiously even theodolites. account 1856|96908|1927|2|47|89530.30|0.07|0.07|R|F|1992-03-22|1992-06-09|1992-04-17|DELIVER IN PERSON|FOB|ingly blithe theodolites. slyly pending 1856|116508|4042|3|20|30490.00|0.04|0.06|R|F|1992-05-04|1992-05-06|1992-05-11|DELIVER IN PERSON|MAIL|ost carefully. slyly bold accounts 1856|149295|9296|4|22|29574.38|0.08|0.02|A|F|1992-05-02|1992-05-26|1992-05-20|TAKE BACK RETURN|REG AIR|platelets detect slyly regular packages. ca 1856|189673|9674|5|14|24677.38|0.01|0.01|A|F|1992-04-14|1992-05-02|1992-05-11|COLLECT COD|SHIP|ans are even requests. deposits caj 1856|22677|184|6|36|57588.12|0.03|0.05|A|F|1992-06-19|1992-05-12|1992-06-28|TAKE BACK RETURN|TRUCK|ly even foxes kindle blithely even realm 1856|129838|7375|7|42|78448.86|0.04|0.00|R|F|1992-05-23|1992-06-06|1992-06-19|COLLECT COD|RAIL|usly final deposits 1857|173807|3808|1|15|28212.00|0.10|0.03|R|F|1993-04-05|1993-02-28|1993-04-13|COLLECT COD|RAIL|egular, regular inst 1857|166343|8860|2|40|56373.60|0.10|0.00|R|F|1993-02-15|1993-03-08|1993-02-21|NONE|AIR|slyly close d 1857|118981|1493|3|8|15999.84|0.01|0.07|R|F|1993-01-27|1993-04-04|1993-02-20|TAKE BACK RETURN|AIR|slyly about the fluffily silent req 1857|99627|4646|4|41|66691.42|0.07|0.07|A|F|1993-04-16|1993-02-16|1993-04-18|NONE|REG AIR| the slyly 1858|13932|6434|1|33|60915.69|0.01|0.02|N|O|1997-12-28|1998-02-03|1998-01-13|NONE|RAIL|tect along the slyly final 1859|74969|4970|1|18|34991.28|0.10|0.00|N|O|1997-08-08|1997-06-30|1997-08-26|TAKE BACK RETURN|SHIP|e carefully a 1859|187741|5296|2|36|65834.64|0.02|0.01|N|O|1997-05-05|1997-07-08|1997-05-25|TAKE BACK RETURN|REG AIR|regular requests. carefully unusual theo 1859|157708|5254|3|5|8828.50|0.06|0.03|N|O|1997-06-20|1997-05-20|1997-07-19|TAKE BACK RETURN|AIR|across the p 1859|190876|877|4|21|41304.27|0.00|0.03|N|O|1997-08-06|1997-05-29|1997-08-26|TAKE BACK RETURN|REG AIR|lar packages wake quickly exp 1859|45333|7838|5|11|14061.63|0.06|0.06|N|O|1997-07-15|1997-06-05|1997-07-29|TAKE BACK RETURN|SHIP|ffily ironic pac 1859|104305|6816|6|12|15711.60|0.08|0.03|N|O|1997-05-22|1997-06-08|1997-06-07|COLLECT COD|TRUCK|es. unusual, silent request 1860|112942|2943|1|9|17594.46|0.04|0.04|N|O|1996-08-03|1996-05-31|1996-08-04|DELIVER IN PERSON|TRUCK|c realms print carefully car 1861|67093|2106|1|7|7420.63|0.08|0.05|A|F|1994-01-14|1994-04-03|1994-01-16|COLLECT COD|RAIL|s foxes. slyly 1861|26765|6766|2|31|52444.56|0.10|0.05|R|F|1994-01-29|1994-03-07|1994-02-15|TAKE BACK RETURN|RAIL|arefully unusual 1861|23676|6179|3|23|36792.41|0.00|0.08|A|F|1994-04-09|1994-03-04|1994-04-11|DELIVER IN PERSON|MAIL|in packages sleep silent dolphins; sly 1861|115108|2642|4|38|42677.80|0.10|0.05|R|F|1994-02-26|1994-02-05|1994-03-01|NONE|RAIL|pending deposits cajole quic 1861|15110|113|5|2|2050.22|0.03|0.08|R|F|1994-04-26|1994-03-15|1994-05-15|TAKE BACK RETURN|MAIL|e final, regular requests. carefully 1862|29629|2132|1|41|63903.42|0.10|0.00|N|O|1998-06-05|1998-05-17|1998-07-04|COLLECT COD|FOB| carefully along 1862|165414|5415|2|37|54738.17|0.06|0.02|N|O|1998-04-15|1998-05-15|1998-05-14|TAKE BACK RETURN|MAIL|l deposits. carefully even dep 1862|103981|1512|3|26|51609.48|0.02|0.01|N|O|1998-03-25|1998-05-17|1998-04-17|TAKE BACK RETURN|TRUCK|g carefully: thinly ironic deposits af 1863|62472|4979|1|48|68854.56|0.09|0.04|A|F|1993-10-10|1993-12-09|1993-10-19|NONE|FOB|ans hinder furiou 1863|156951|1982|2|48|96381.60|0.04|0.08|A|F|1993-11-08|1993-11-05|1993-12-08|COLLECT COD|AIR|onic theodolites alongside of the pending a 1888|97643|153|1|27|44297.28|0.03|0.06|R|F|1994-02-13|1994-01-16|1994-02-25|NONE|REG AIR|. carefully special dolphins sle 1888|73403|3404|2|38|52303.20|0.03|0.03|R|F|1993-11-29|1994-01-16|1993-12-08|TAKE BACK RETURN|TRUCK|dazzle carefull 1888|79623|9624|3|49|78528.38|0.07|0.05|A|F|1994-02-27|1994-01-14|1994-03-28|DELIVER IN PERSON|FOB|lar accounts haggle carefu 1888|18538|8539|4|9|13108.77|0.01|0.04|A|F|1994-02-09|1994-01-22|1994-02-19|NONE|AIR| packages are blithely. carefu 1888|159509|9510|5|4|6274.00|0.03|0.06|R|F|1993-12-28|1993-12-19|1994-01-11|COLLECT COD|FOB|lphins. ironically special theodolit 1888|52536|7547|6|48|71449.44|0.08|0.08|R|F|1994-02-28|1993-12-16|1994-03-15|COLLECT COD|TRUCK|ar ideas cajole. regular p 1888|166333|8850|7|50|69966.50|0.04|0.07|R|F|1993-12-22|1994-01-10|1994-01-06|DELIVER IN PERSON|FOB|ependencies affix blithely regular warhors 1889|151174|8720|1|41|50231.97|0.10|0.02|N|O|1997-06-15|1997-05-10|1997-07-08|NONE|AIR|s! furiously pending r 1889|171604|1605|2|13|21782.80|0.05|0.00|N|O|1997-06-12|1997-04-28|1997-06-23|NONE|REG AIR|to the regular accounts. carefully express 1889|137156|2183|3|36|42953.40|0.05|0.07|N|O|1997-05-19|1997-06-14|1997-05-23|NONE|SHIP|l pinto beans kindle 1889|167656|2689|4|5|8618.25|0.02|0.07|N|O|1997-06-26|1997-06-09|1997-07-21|COLLECT COD|AIR|ording to the blithely silent r 1890|140281|2796|1|26|34353.28|0.03|0.07|N|O|1997-04-02|1997-03-13|1997-04-22|DELIVER IN PERSON|FOB|ngage. slyly ironic 1890|99403|9404|2|43|60303.20|0.07|0.03|N|O|1996-12-30|1997-01-31|1997-01-19|DELIVER IN PERSON|FOB|p ironic, express accounts. fu 1890|58873|6389|3|24|43964.88|0.06|0.04|N|O|1997-02-09|1997-02-10|1997-02-12|COLLECT COD|MAIL|is wake carefully above the even id 1890|67265|7266|4|43|52987.18|0.09|0.04|N|O|1997-04-08|1997-02-19|1997-04-30|TAKE BACK RETURN|FOB|lyly. instructions across the furiously 1890|121175|1176|5|45|53827.65|0.08|0.05|N|O|1997-04-15|1997-03-16|1997-04-19|COLLECT COD|FOB|he carefully regular sauternes. ironic fret 1890|180789|3308|6|16|29916.48|0.08|0.02|N|O|1997-02-13|1997-02-18|1997-03-12|TAKE BACK RETURN|TRUCK|ged pinto beans. regular, regular id 1890|120205|7742|7|10|12252.00|0.01|0.04|N|O|1996-12-24|1997-02-19|1997-01-01|DELIVER IN PERSON|AIR|. even, unusual inst 1891|76099|6100|1|45|48379.05|0.07|0.04|A|F|1994-12-20|1995-01-16|1995-01-05|NONE|RAIL|ests along 1891|183091|5610|2|18|21133.62|0.06|0.00|A|F|1995-01-24|1995-01-29|1995-02-14|NONE|RAIL| foxes above the carefu 1891|197159|7160|3|15|18842.25|0.03|0.00|R|F|1995-03-11|1995-03-05|1995-03-18|TAKE BACK RETURN|MAIL| accounts are furiou 1892|112777|5289|1|48|85908.96|0.02|0.01|A|F|1994-06-16|1994-06-16|1994-06-28|NONE|RAIL|tornis detect regul 1892|42003|9516|2|35|33075.00|0.04|0.08|R|F|1994-04-05|1994-05-09|1994-05-03|NONE|MAIL|hes nod furiously around the instruc 1892|133278|8305|3|37|48516.99|0.10|0.03|R|F|1994-04-11|1994-06-04|1994-04-24|TAKE BACK RETURN|SHIP|nts. slyly regular asymptot 1892|196964|9484|4|14|28853.44|0.06|0.07|R|F|1994-04-08|1994-06-12|1994-04-27|DELIVER IN PERSON|FOB|furiously about the furiously 1893|98266|776|1|43|54363.18|0.10|0.00|N|O|1998-01-25|1998-01-06|1998-02-14|COLLECT COD|SHIP|he carefully regular 1893|147335|7336|2|49|67734.17|0.03|0.05|N|O|1998-01-19|1998-01-28|1998-02-02|TAKE BACK RETURN|FOB|y final foxes bo 1893|44119|4120|3|3|3189.33|0.03|0.02|N|O|1998-02-10|1998-01-18|1998-02-25|DELIVER IN PERSON|MAIL|gular, even ideas. fluffily bol 1893|100605|5626|4|18|28900.80|0.07|0.06|N|O|1998-01-24|1998-01-12|1998-02-13|TAKE BACK RETURN|RAIL|g packages. fluffily final reques 1893|52070|2071|5|6|6132.42|0.10|0.02|N|O|1998-01-23|1997-12-22|1998-02-09|DELIVER IN PERSON|TRUCK|ar accounts use. daringly ironic packag 1894|168292|8293|1|40|54411.60|0.03|0.07|R|F|1992-06-07|1992-05-11|1992-07-01|DELIVER IN PERSON|FOB|ily furiously bold packages. flu 1895|160192|7741|1|43|53844.17|0.09|0.07|R|F|1994-07-26|1994-07-19|1994-08-11|NONE|AIR| carefully eve 1920|95392|5393|1|24|33297.36|0.04|0.05|N|O|1998-09-27|1998-08-23|1998-10-15|DELIVER IN PERSON|AIR|thely. bold, pend 1920|50707|5718|2|31|51388.70|0.05|0.06|N|O|1998-08-01|1998-08-30|1998-08-17|COLLECT COD|SHIP|lly. ideas wa 1920|17189|9691|3|6|6637.08|0.01|0.05|N|O|1998-10-01|1998-08-20|1998-10-24|COLLECT COD|SHIP|l ideas boost slyly pl 1920|83349|3350|4|50|66617.00|0.09|0.06|N|O|1998-10-03|1998-08-04|1998-10-29|DELIVER IN PERSON|MAIL|e blithely unusual foxes. brave packages 1920|33789|1299|5|14|24118.92|0.08|0.05|N|O|1998-10-22|1998-08-10|1998-10-27|DELIVER IN PERSON|AIR|ickly ironic d 1921|20643|5648|1|9|14072.76|0.08|0.00|R|F|1994-02-01|1994-03-20|1994-03-01|DELIVER IN PERSON|FOB|to beans. even excuses integrate specia 1921|139685|2199|2|21|36218.28|0.02|0.06|R|F|1994-02-08|1994-03-28|1994-02-15|COLLECT COD|FOB|ckly regula 1921|70822|823|3|27|48406.14|0.00|0.04|A|F|1994-04-26|1994-04-07|1994-04-30|TAKE BACK RETURN|FOB|ing pinto beans above the pend 1922|9775|4776|1|13|21902.01|0.05|0.03|N|O|1996-10-24|1996-09-21|1996-11-15|NONE|SHIP|quests. furiously 1923|36198|6199|1|9|10207.71|0.01|0.08|N|O|1997-08-29|1997-09-13|1997-09-07|NONE|FOB|lites. ironic instructions integrate bravel 1923|177330|9848|2|23|32368.59|0.07|0.05|N|O|1997-09-08|1997-08-11|1997-09-14|TAKE BACK RETURN|MAIL|aggle carefully. furiously permanent 1923|179628|9629|3|11|18783.82|0.03|0.03|N|O|1997-07-12|1997-09-04|1997-08-01|TAKE BACK RETURN|REG AIR|ages wake slyly about the furiously regular 1923|192622|5142|4|49|84016.38|0.06|0.05|N|O|1997-07-21|1997-08-08|1997-07-26|NONE|AIR|de of the carefully expre 1923|183057|8094|5|25|28501.25|0.10|0.08|N|O|1997-08-18|1997-08-20|1997-09-12|DELIVER IN PERSON|TRUCK|the ideas: slyly pendin 1923|36955|9459|6|50|94597.50|0.03|0.03|N|O|1997-11-04|1997-08-08|1997-11-25|NONE|TRUCK|uickly along the bold courts. bold the 1924|72567|89|1|7|10776.92|0.06|0.07|N|O|1997-01-01|1996-12-02|1997-01-08|COLLECT COD|SHIP|osits. even accounts nag furious 1924|17366|4870|2|47|60317.92|0.02|0.06|N|O|1996-11-24|1996-10-18|1996-12-13|COLLECT COD|REG AIR|silent requests cajole blithely final pack 1924|56788|6789|3|40|69791.20|0.04|0.08|N|O|1996-10-31|1996-11-30|1996-11-21|NONE|REG AIR|ains sleep carefully 1924|33179|8186|4|31|34477.27|0.03|0.03|N|O|1996-09-20|1996-10-19|1996-10-19|DELIVER IN PERSON|SHIP| the slyly regular foxes. ruthle 1924|35419|426|5|17|23024.97|0.04|0.05|N|O|1996-12-31|1996-11-12|1997-01-25|COLLECT COD|TRUCK|e carefully theodolites. ironically ironic 1924|75709|3231|6|15|25270.50|0.02|0.04|N|O|1997-01-04|1996-11-13|1997-01-27|NONE|SHIP|he package 1924|39531|9532|7|21|30881.13|0.09|0.03|N|O|1996-09-21|1996-11-12|1996-10-02|TAKE BACK RETURN|AIR| blithely reg 1925|183718|8755|1|50|90085.50|0.01|0.02|R|F|1992-04-12|1992-04-23|1992-05-08|TAKE BACK RETURN|TRUCK|usual pinto 1925|134273|1813|2|35|45754.45|0.06|0.06|R|F|1992-05-11|1992-04-10|1992-05-14|TAKE BACK RETURN|AIR|counts. carefully ironic packages boost ab 1925|115014|7526|3|40|41160.40|0.08|0.08|A|F|1992-05-17|1992-05-20|1992-06-08|TAKE BACK RETURN|AIR|e carefully regul 1925|29757|2260|4|17|28674.75|0.06|0.02|R|F|1992-05-18|1992-04-06|1992-06-16|TAKE BACK RETURN|MAIL|instructions sleep. pinto bea 1926|50307|2813|1|24|30175.20|0.06|0.05|N|O|1996-05-04|1996-03-14|1996-06-01|DELIVER IN PERSON|RAIL|e theodolites. 1926|105027|7538|2|29|29928.58|0.09|0.08|N|O|1996-02-26|1996-03-14|1996-03-14|TAKE BACK RETURN|TRUCK|es. dependencies according to the fl 1926|177645|5197|3|10|17226.40|0.02|0.03|N|O|1996-05-23|1996-03-02|1996-06-04|NONE|AIR|usly bold accounts. express accounts 1926|67772|7773|4|13|22617.01|0.04|0.02|N|O|1996-04-26|1996-04-13|1996-05-08|DELIVER IN PERSON|MAIL|eans wake bli 1926|39928|4935|5|29|54169.68|0.06|0.00|N|O|1996-02-29|1996-03-13|1996-03-24|DELIVER IN PERSON|MAIL|hily unusual packages are fluffily am 1927|67893|2906|1|3|5582.67|0.00|0.05|N|O|1995-10-06|1995-12-08|1995-11-05|COLLECT COD|FOB|ccounts affi 1927|72462|7477|2|15|21516.90|0.08|0.08|N|O|1995-12-25|1995-12-26|1995-12-31|COLLECT COD|RAIL| carefully regular requests sleep car 1927|64783|2302|3|6|10486.68|0.05|0.05|N|O|1995-11-29|1995-11-20|1995-12-08|TAKE BACK RETURN|TRUCK|furiously even wat 1952|52059|7070|1|7|7077.35|0.04|0.05|A|F|1994-05-06|1994-06-11|1994-05-12|NONE|RAIL|about the express, even requ 1952|141675|6704|2|6|10300.02|0.06|0.05|A|F|1994-05-09|1994-05-21|1994-05-26|DELIVER IN PERSON|AIR|packages haggle. 1953|127615|5152|1|25|41065.25|0.07|0.06|A|F|1994-01-07|1994-01-28|1994-01-29|TAKE BACK RETURN|RAIL|ular, regular i 1953|13880|3881|2|35|62785.80|0.06|0.06|R|F|1994-02-03|1994-02-25|1994-02-14|DELIVER IN PERSON|FOB|among the fur 1954|151270|6301|1|31|40959.37|0.06|0.06|N|O|1997-08-18|1997-07-07|1997-09-03|DELIVER IN PERSON|RAIL|against the packages. bold, ironic e 1954|181175|6212|2|1|1256.17|0.03|0.01|N|O|1997-09-16|1997-07-08|1997-10-07|COLLECT COD|MAIL|te. furiously final deposits hag 1954|198665|3704|3|11|19400.26|0.07|0.07|N|O|1997-08-07|1997-07-23|1997-08-25|DELIVER IN PERSON|TRUCK|y carefully ironi 1954|158218|3249|4|12|15314.52|0.02|0.08|N|O|1997-07-19|1997-07-04|1997-08-06|COLLECT COD|AIR|ongside of the slyly unusual requests. reg 1954|169702|4735|5|29|51379.30|0.08|0.08|N|O|1997-08-25|1997-07-15|1997-09-02|DELIVER IN PERSON|RAIL|use thinly furiously regular asy 1954|176272|6273|6|13|17527.51|0.00|0.07|N|O|1997-06-15|1997-08-22|1997-06-20|TAKE BACK RETURN|MAIL|y ironic instructions cajole 1954|193476|3477|7|49|76904.03|0.05|0.06|N|O|1997-06-04|1997-08-29|1997-06-14|COLLECT COD|TRUCK|eans. final pinto beans sleep furiousl 1955|136052|8566|1|32|34817.60|0.02|0.02|A|F|1992-07-05|1992-06-29|1992-08-03|TAKE BACK RETURN|TRUCK|g to the carefully sile 1955|17074|4578|2|2|1982.14|0.03|0.01|R|F|1992-07-06|1992-07-06|1992-08-01|COLLECT COD|TRUCK|ickly aroun 1955|157697|213|3|41|71942.29|0.08|0.06|A|F|1992-08-01|1992-06-04|1992-08-07|COLLECT COD|AIR| carefully against the furiously reg 1955|8245|3246|4|16|18451.84|0.03|0.07|A|F|1992-04-30|1992-06-23|1992-05-23|TAKE BACK RETURN|FOB|odolites eat s 1955|158432|8433|5|11|16394.73|0.09|0.01|A|F|1992-06-03|1992-07-04|1992-06-07|NONE|REG AIR|ously quickly pendi 1956|176350|6351|1|8|11410.80|0.02|0.04|A|F|1992-12-25|1992-11-24|1993-01-12|TAKE BACK RETURN|AIR|efully about the ironic, ironic de 1956|102954|5465|2|16|31311.20|0.00|0.05|R|F|1992-11-11|1992-11-11|1992-11-30|NONE|FOB|es cajole blithely. pen 1956|138920|1434|3|39|76397.88|0.08|0.02|A|F|1992-09-24|1992-11-26|1992-10-15|DELIVER IN PERSON|REG AIR|r theodolites sleep above the b 1956|28284|8285|4|11|13335.08|0.10|0.00|A|F|1992-12-19|1992-10-29|1993-01-07|TAKE BACK RETURN|AIR| the braids slee 1956|154900|9931|5|16|31278.40|0.08|0.02|R|F|1992-09-28|1992-10-21|1992-09-30|TAKE BACK RETURN|FOB| wake after the 1957|78284|792|1|50|63114.00|0.09|0.05|N|O|1998-08-08|1998-09-28|1998-08-27|COLLECT COD|FOB|gainst the re 1957|118027|539|2|31|32395.62|0.10|0.08|N|O|1998-08-13|1998-08-31|1998-08-16|NONE|REG AIR|express packages maintain fluffi 1958|72052|7067|1|9|9216.45|0.01|0.05|N|O|1995-12-08|1995-12-17|1995-12-18|DELIVER IN PERSON|REG AIR|ickly. slyly bold 1958|175065|5066|2|29|33061.74|0.05|0.06|N|O|1996-01-19|1995-12-05|1996-02-14|COLLECT COD|SHIP|d pinto beans 1958|101773|1774|3|4|7099.08|0.04|0.02|N|O|1995-10-24|1995-12-09|1995-10-28|DELIVER IN PERSON|AIR|he slyly even dependencies 1958|82603|7620|4|38|60252.80|0.09|0.07|N|O|1995-10-09|1995-11-26|1995-11-05|COLLECT COD|TRUCK|yly. slyly regular courts use silentl 1958|100912|8443|5|31|59300.21|0.08|0.01|N|O|1995-10-31|1995-11-12|1995-11-07|TAKE BACK RETURN|TRUCK|r deposits c 1958|16175|1178|6|44|48011.48|0.08|0.04|N|O|1995-12-17|1995-11-30|1996-01-15|TAKE BACK RETURN|RAIL|c theodolites after the unusual deposit 1958|38399|5909|7|29|38784.31|0.02|0.05|N|O|1995-10-14|1995-11-06|1995-11-01|NONE|REG AIR|final requests nag according to the 1959|168675|8676|1|46|80208.82|0.04|0.00|N|O|1997-05-05|1997-03-03|1997-05-24|TAKE BACK RETURN|AIR| furiously ex 1959|119439|4462|2|15|21876.45|0.08|0.07|N|O|1997-01-20|1997-02-18|1997-02-08|DELIVER IN PERSON|MAIL| quickly sp 1984|52054|9570|1|45|45272.25|0.03|0.04|N|O|1998-04-09|1998-06-11|1998-05-01|COLLECT COD|AIR|p. quickly final ideas sle 1984|69990|5003|2|35|68599.65|0.01|0.07|N|O|1998-05-18|1998-05-04|1998-06-01|COLLECT COD|RAIL|tes. quickly pending packages haggle boldl 1985|27157|4664|1|33|35776.95|0.10|0.03|R|F|1994-12-04|1994-11-01|1994-12-05|DELIVER IN PERSON|FOB|s are express packages. pendin 1985|20393|2896|2|50|65669.50|0.04|0.02|R|F|1994-09-30|1994-10-18|1994-10-12|COLLECT COD|AIR|ate carefully. carefully 1985|133207|5721|3|20|24804.00|0.07|0.03|R|F|1994-10-29|1994-11-12|1994-11-27|NONE|TRUCK|regular requests. furiously express 1985|198140|8141|4|30|37144.20|0.05|0.07|R|F|1994-09-06|1994-10-10|1994-09-26|NONE|RAIL|uickly. instr 1985|123819|6332|5|42|77398.02|0.05|0.05|R|F|1994-10-25|1994-11-03|1994-11-19|DELIVER IN PERSON|SHIP| patterns? final requests after the sp 1985|19676|4679|6|2|3191.34|0.02|0.00|A|F|1994-11-25|1994-10-09|1994-12-25|TAKE BACK RETURN|FOB| silent inst 1986|91481|1482|1|12|17669.76|0.06|0.05|A|F|1994-08-17|1994-06-28|1994-09-02|COLLECT COD|RAIL|sleep furiously fluffily final 1986|104675|7186|2|10|16796.70|0.10|0.03|R|F|1994-05-14|1994-06-21|1994-06-02|COLLECT COD|REG AIR|yly into the carefully even 1986|62714|5221|3|14|23473.94|0.04|0.02|R|F|1994-07-14|1994-06-19|1994-08-08|NONE|SHIP|the packages. pending, unusual 1987|15018|2522|1|7|6531.07|0.03|0.03|A|F|1994-07-30|1994-07-06|1994-08-29|NONE|REG AIR| regular a 1988|71007|6022|1|36|35208.00|0.09|0.04|N|O|1996-01-21|1995-11-24|1996-01-27|NONE|RAIL|gular theodolites. 1988|198445|6003|2|19|29325.36|0.08|0.08|N|O|1996-02-03|1995-12-10|1996-02-14|COLLECT COD|FOB|lly about the slyly thin instructions. f 1988|53790|1306|3|8|13950.32|0.06|0.01|N|O|1995-10-20|1995-11-11|1995-11-18|DELIVER IN PERSON|AIR|le quickly ac 1988|35669|8173|4|27|43325.82|0.08|0.00|N|O|1996-01-27|1995-12-24|1996-02-24|TAKE BACK RETURN|TRUCK|uests. regular requests are according to t 1988|78175|3190|5|26|29982.42|0.08|0.04|N|O|1996-01-25|1995-12-15|1996-01-26|COLLECT COD|SHIP| ironic dolphins haggl 1988|85943|3468|6|9|17360.46|0.08|0.03|N|O|1995-12-26|1996-01-02|1996-01-25|DELIVER IN PERSON|MAIL|lar platelets. slyly ironic packa 1989|9871|7372|1|47|83700.89|0.10|0.02|R|F|1994-06-21|1994-05-27|1994-06-22|TAKE BACK RETURN|REG AIR|final deposits s 1990|100961|962|1|46|90250.16|0.01|0.07|R|F|1994-12-29|1995-03-14|1995-01-13|NONE|TRUCK|ar sentiments. 1991|109006|1517|1|39|39585.00|0.06|0.02|A|F|1993-01-01|1992-11-29|1993-01-10|TAKE BACK RETURN|TRUCK|ckages? carefully bold depos 1991|52810|5316|2|49|86377.69|0.08|0.06|R|F|1992-10-19|1992-11-29|1992-10-25|NONE|SHIP|nd the ideas affi 1991|173555|3556|3|6|9771.30|0.02|0.01|A|F|1992-11-02|1992-10-08|1992-11-14|TAKE BACK RETURN|REG AIR|hes nag slyly 1991|137375|2402|4|6|8474.22|0.10|0.06|A|F|1992-11-21|1992-11-03|1992-11-27|NONE|RAIL|uickly blithely final de 1991|59310|1816|5|49|62196.19|0.06|0.00|R|F|1992-09-10|1992-11-30|1992-10-07|NONE|AIR|quests cajole blithely 2016|146003|8518|1|2|2098.00|0.02|0.07|N|O|1996-10-12|1996-11-09|1996-10-31|DELIVER IN PERSON|TRUCK|carefully according to the 2016|62672|191|2|15|24520.05|0.04|0.05|N|O|1996-09-24|1996-10-05|1996-10-21|TAKE BACK RETURN|MAIL|uests haggle carefully furiously regul 2016|121423|3936|3|8|11555.36|0.09|0.02|N|O|1996-09-19|1996-10-21|1996-10-13|TAKE BACK RETURN|SHIP|mptotes haggle ideas. packages wake flu 2017|102545|2546|1|49|75829.46|0.10|0.06|N|O|1998-05-26|1998-07-01|1998-06-06|COLLECT COD|TRUCK| after the unusual instructions. sly 2017|70072|73|2|14|14588.98|0.07|0.04|N|O|1998-06-28|1998-06-15|1998-07-11|NONE|TRUCK|ily final w 2017|83446|8463|3|11|15723.84|0.05|0.02|N|O|1998-05-22|1998-07-13|1998-05-26|TAKE BACK RETURN|TRUCK|gside of the slyly dogged dolp 2018|194201|4202|1|2|2590.40|0.02|0.07|N|O|1995-06-25|1995-06-20|1995-07-04|NONE|TRUCK|ly ironic accounts against the slyly sly 2018|128111|8112|2|23|26199.53|0.05|0.01|R|F|1995-05-05|1995-05-12|1995-05-22|TAKE BACK RETURN|RAIL|ingly even theodolites s 2019|3228|8229|1|31|35067.82|0.07|0.03|R|F|1992-11-18|1992-12-26|1992-11-24|DELIVER IN PERSON|FOB|l ideas across the slowl 2019|51852|6863|2|18|32469.30|0.04|0.03|R|F|1993-01-24|1992-12-22|1993-02-02|NONE|MAIL|are carefully furiously regular requ 2020|33521|1031|1|50|72726.00|0.06|0.01|R|F|1993-07-12|1993-08-28|1993-08-02|COLLECT COD|TRUCK|ts against the pending ideas serve along 2020|175666|3218|2|40|69666.40|0.09|0.00|A|F|1993-10-17|1993-09-14|1993-10-29|TAKE BACK RETURN|RAIL|ently across the 2020|13808|1312|3|30|51654.00|0.07|0.04|A|F|1993-09-08|1993-08-11|1993-09-29|TAKE BACK RETURN|AIR|ly about the blithely ironic foxes. bold 2020|60852|5865|4|27|48946.95|0.05|0.06|A|F|1993-07-14|1993-09-02|1993-08-03|NONE|FOB|e of the bold foxes haggle 2021|84701|2226|1|7|11799.90|0.08|0.04|N|O|1995-10-17|1995-09-29|1995-10-20|NONE|MAIL| accounts boost blithely. blithely reg 2021|165276|309|2|19|25484.13|0.04|0.05|N|O|1995-08-14|1995-09-05|1995-08-23|NONE|RAIL| above the slyly fl 2022|168410|927|1|38|56179.58|0.00|0.08|R|F|1992-07-05|1992-04-20|1992-07-13|TAKE BACK RETURN|REG AIR| against the express accounts wake ca 2022|54766|7272|2|38|65388.88|0.05|0.04|R|F|1992-06-17|1992-05-15|1992-06-28|COLLECT COD|SHIP|instructions dazzle carefull 2022|48896|8897|3|48|88554.72|0.10|0.02|A|F|1992-06-14|1992-06-04|1992-07-12|DELIVER IN PERSON|SHIP|counts. slyly enticing accounts are during 2022|181336|1337|4|16|22677.28|0.05|0.03|R|F|1992-06-23|1992-05-22|1992-07-07|NONE|TRUCK|ages wake slyly care 2022|99490|9491|5|36|53621.64|0.05|0.02|R|F|1992-03-24|1992-05-07|1992-04-13|NONE|MAIL|ly after the foxes. regular, final inst 2022|128142|5679|6|20|23402.80|0.08|0.08|A|F|1992-03-31|1992-04-17|1992-04-02|NONE|SHIP|r deposits kindle 2022|77154|7155|7|13|14704.95|0.06|0.08|R|F|1992-04-04|1992-05-30|1992-04-21|NONE|FOB| orbits haggle fluffily fl 2023|126248|3785|1|9|11468.16|0.05|0.04|R|F|1992-06-04|1992-06-30|1992-06-10|NONE|AIR|ly regular pinto beans poa 2023|37736|5246|2|2|3347.46|0.01|0.00|R|F|1992-08-27|1992-07-16|1992-08-29|DELIVER IN PERSON|RAIL|ing packages. fluffily silen 2023|18539|3542|3|25|36438.25|0.10|0.03|A|F|1992-07-19|1992-07-07|1992-08-15|NONE|REG AIR| wake furiously among the slyly final 2023|184818|9855|4|9|17125.29|0.02|0.00|A|F|1992-07-23|1992-07-04|1992-08-20|TAKE BACK RETURN|AIR|nts maintain blithely alongside of the 2023|19435|6939|5|22|29797.46|0.04|0.06|A|F|1992-06-15|1992-07-13|1992-06-21|TAKE BACK RETURN|SHIP|ronic attainments. 2023|42824|337|6|29|51237.78|0.02|0.06|A|F|1992-08-29|1992-07-28|1992-09-18|COLLECT COD|RAIL|usual instructions. bli 2023|133971|6485|7|50|100248.50|0.00|0.03|R|F|1992-06-20|1992-07-04|1992-06-23|DELIVER IN PERSON|FOB|its! carefully ex 2048|34296|6800|1|7|8612.03|0.06|0.01|R|F|1993-12-07|1994-01-31|1994-01-05|TAKE BACK RETURN|REG AIR|lent platelets boost deposits. carefully sp 2048|7372|4873|2|5|6396.85|0.04|0.04|A|F|1994-01-18|1994-02-01|1994-01-29|TAKE BACK RETURN|TRUCK|affix carefully against 2048|100536|537|3|12|18438.36|0.01|0.05|R|F|1994-01-28|1994-01-19|1994-02-08|NONE|AIR| even theodoli 2048|96292|3820|4|11|14171.19|0.10|0.03|R|F|1993-12-20|1994-01-19|1994-01-04|TAKE BACK RETURN|MAIL|totes. idly ironic packages nag 2049|188067|8068|1|25|28876.50|0.08|0.00|N|O|1996-03-31|1996-02-29|1996-04-15|DELIVER IN PERSON|MAIL| excuses above the 2049|34609|7113|2|31|47851.60|0.10|0.05|N|O|1995-12-25|1996-02-25|1995-12-29|TAKE BACK RETURN|MAIL| packages are slyly alongside 2049|66556|9063|3|18|27405.90|0.05|0.05|N|O|1996-01-09|1996-01-22|1996-01-25|TAKE BACK RETURN|AIR| sleep fluffily. dependencies use never 2049|5410|5411|4|39|51300.99|0.02|0.05|N|O|1996-01-17|1996-01-21|1996-02-03|TAKE BACK RETURN|MAIL|the even pinto beans 2049|125275|7788|5|30|39008.10|0.04|0.06|N|O|1995-12-16|1996-02-04|1995-12-22|NONE|TRUCK|ial accounts are among the furiously perma 2049|83083|8100|6|17|18123.36|0.07|0.00|N|O|1996-02-04|1996-03-01|1996-02-24|NONE|FOB|al, regular foxes. pending, 2050|72622|7637|1|47|74947.14|0.05|0.03|A|F|1994-08-25|1994-07-18|1994-09-15|DELIVER IN PERSON|TRUCK|tside the blithely pending packages eat f 2050|151587|1588|2|48|78651.84|0.05|0.01|A|F|1994-09-30|1994-08-23|1994-10-29|COLLECT COD|AIR| final packages. pinto 2050|112099|2100|3|41|45554.69|0.10|0.04|A|F|1994-06-08|1994-08-27|1994-06-23|NONE|AIR| final theodolites. depende 2050|31521|9031|4|11|15977.72|0.02|0.01|A|F|1994-07-27|1994-08-18|1994-08-02|DELIVER IN PERSON|REG AIR|ns. bold, final ideas cajole among the fi 2050|167826|7827|5|16|30301.12|0.07|0.01|R|F|1994-08-17|1994-07-28|1994-09-05|DELIVER IN PERSON|REG AIR|al accounts. closely even 2050|48412|3421|6|29|39451.89|0.00|0.05|A|F|1994-09-23|1994-08-01|1994-10-23|TAKE BACK RETURN|MAIL|oxes alongsid 2050|47268|9773|7|25|30381.50|0.10|0.00|R|F|1994-08-18|1994-07-04|1994-09-04|TAKE BACK RETURN|RAIL|y according to 2051|24284|4285|1|43|51956.04|0.08|0.04|N|O|1996-04-22|1996-06-16|1996-04-28|COLLECT COD|RAIL|ounts sleep fluffily even requ 2051|129342|9343|2|48|65824.32|0.01|0.02|N|O|1996-05-04|1996-06-14|1996-05-19|NONE|TRUCK|unts. pending platelets believe about 2052|67826|333|1|50|89691.00|0.09|0.08|R|F|1992-06-22|1992-06-03|1992-07-19|DELIVER IN PERSON|AIR|wake after the decoy 2052|134863|7377|2|35|66425.10|0.09|0.05|A|F|1992-05-29|1992-05-24|1992-06-11|NONE|TRUCK|ts according t 2052|42203|9716|3|16|18323.20|0.01|0.08|A|F|1992-06-30|1992-07-09|1992-07-12|NONE|SHIP|y final deposits cajole according 2052|95911|5912|4|47|89624.77|0.08|0.01|A|F|1992-06-18|1992-05-16|1992-07-02|TAKE BACK RETURN|REG AIR|final requests. stealt 2053|100687|3198|1|20|33753.60|0.09|0.00|A|F|1995-04-25|1995-04-12|1995-05-13|NONE|TRUCK|ly ironic foxes haggle slyly speci 2053|32300|2301|2|34|41898.20|0.07|0.00|A|F|1995-03-15|1995-03-20|1995-04-09|TAKE BACK RETURN|TRUCK|ions. unusual dependencies 2053|64279|9292|3|46|57190.42|0.01|0.03|R|F|1995-04-01|1995-04-02|1995-04-18|NONE|RAIL|tions. furiously even requests hagg 2053|120953|3466|4|31|61192.45|0.06|0.08|R|F|1995-03-23|1995-03-13|1995-04-16|DELIVER IN PERSON|SHIP|ts. fluffily final mul 2054|112318|2319|1|11|14633.41|0.03|0.05|R|F|1992-08-13|1992-08-26|1992-08-22|NONE|AIR|ular accou 2054|119303|4326|2|31|40991.30|0.05|0.08|A|F|1992-08-18|1992-09-04|1992-08-24|NONE|FOB|se bold, regular accounts. unusual depos 2054|120984|985|3|32|64159.36|0.06|0.00|A|F|1992-06-23|1992-07-08|1992-07-22|NONE|FOB| packages thrash. carefully final 2054|173988|9023|4|14|28867.72|0.10|0.05|R|F|1992-06-25|1992-09-05|1992-07-14|DELIVER IN PERSON|SHIP|uickly final 2054|5970|971|5|40|75038.80|0.08|0.06|R|F|1992-06-23|1992-08-09|1992-07-04|TAKE BACK RETURN|RAIL|n pinto beans. ironic courts are iro 2054|133446|5960|6|17|25150.48|0.08|0.01|A|F|1992-06-09|1992-08-28|1992-06-16|NONE|AIR|ges nag acc 2054|10356|7860|7|4|5065.40|0.00|0.08|R|F|1992-08-12|1992-08-31|1992-08-15|DELIVER IN PERSON|AIR|lyly careful requests wake fl 2055|44447|4448|1|15|20871.60|0.04|0.06|A|F|1993-09-15|1993-10-06|1993-10-07|NONE|REG AIR|furiously bold 2055|8279|8280|2|15|17809.05|0.06|0.05|R|F|1993-10-30|1993-11-21|1993-11-22|COLLECT COD|RAIL|gular foxes. b 2055|134333|6847|3|12|16407.96|0.00|0.02|A|F|1993-10-26|1993-11-23|1993-11-22|COLLECT COD|TRUCK|al pains. acco 2055|133702|6216|4|16|27771.20|0.02|0.02|A|F|1993-11-16|1993-11-12|1993-11-28|NONE|TRUCK|arefully daringly regular accounts. 2080|6855|4356|1|5|8809.25|0.08|0.05|R|F|1993-08-26|1993-08-07|1993-09-02|DELIVER IN PERSON|TRUCK|refully unusual theo 2080|196959|9479|2|39|80182.05|0.07|0.04|A|F|1993-08-22|1993-09-09|1993-08-23|COLLECT COD|FOB|ic deposits haggle slyly carefully eve 2081|88449|5974|1|26|37373.44|0.03|0.08|N|O|1997-10-21|1997-10-03|1997-11-10|NONE|FOB|among the slyly express accounts. silen 2081|148309|3338|2|13|17644.90|0.07|0.05|N|O|1997-08-23|1997-08-22|1997-09-09|TAKE BACK RETURN|MAIL|fter the even deposi 2081|12941|7944|3|32|59326.08|0.09|0.07|N|O|1997-09-05|1997-09-26|1997-10-03|TAKE BACK RETURN|SHIP|e. final, regular dependencies sleep slyly! 2081|84662|4663|4|23|37873.18|0.03|0.08|N|O|1997-07-06|1997-09-11|1997-07-21|TAKE BACK RETURN|MAIL|ual requests wake blithely above the 2081|112141|4653|5|19|21909.66|0.02|0.06|N|O|1997-10-01|1997-08-12|1997-10-18|COLLECT COD|SHIP|s affix sometimes express requests. quickly 2081|141545|4060|6|31|49182.74|0.03|0.06|N|O|1997-09-19|1997-09-13|1997-09-27|NONE|AIR| silent, spe 2082|74452|1974|1|36|51352.20|0.00|0.00|R|F|1995-01-20|1995-03-18|1995-01-31|COLLECT COD|MAIL|haggle furiously silent pinto beans 2082|104536|9557|2|12|18486.36|0.08|0.05|A|F|1995-01-27|1995-02-11|1995-02-07|NONE|FOB| ironic instructions. carefull 2083|23917|8922|1|37|68113.67|0.07|0.00|R|F|1993-09-07|1993-09-30|1993-09-18|TAKE BACK RETURN|MAIL|ng the special foxes wake packages. f 2084|181162|8717|1|42|52212.72|0.03|0.05|A|F|1993-03-29|1993-05-05|1993-04-22|COLLECT COD|REG AIR|y fluffily even foxes. 2084|179838|2356|2|23|44110.09|0.09|0.08|A|F|1993-06-05|1993-05-26|1993-06-06|DELIVER IN PERSON|AIR|es against 2084|135354|2894|3|37|51405.95|0.07|0.05|A|F|1993-07-16|1993-04-20|1993-08-06|NONE|AIR|y careful courts. 2084|93773|1301|4|9|15900.93|0.02|0.02|A|F|1993-03-18|1993-06-08|1993-03-30|NONE|TRUCK|heaves boost slyly after the pla 2084|26853|4360|5|28|49835.80|0.07|0.02|R|F|1993-05-04|1993-05-14|1993-05-31|COLLECT COD|TRUCK|cajole quickly carefu 2084|114964|7476|6|15|29684.40|0.09|0.04|A|F|1993-06-23|1993-04-25|1993-07-23|COLLECT COD|SHIP|tithes. bravely pendi 2084|193410|5930|7|34|51115.94|0.09|0.02|R|F|1993-06-20|1993-05-28|1993-06-25|DELIVER IN PERSON|RAIL| carefully ironic requests. fluffil 2085|40196|2701|1|45|51128.55|0.00|0.07|R|F|1994-02-27|1994-01-11|1994-03-29|TAKE BACK RETURN|MAIL|. carefully e 2086|59829|9830|1|22|39354.04|0.03|0.07|R|F|1994-12-04|1994-12-16|1994-12-20|DELIVER IN PERSON|RAIL|idly busy acc 2086|140912|8455|2|32|62493.12|0.04|0.06|A|F|1994-11-15|1995-01-05|1994-12-09|TAKE BACK RETURN|TRUCK|e carefully along th 2086|104938|4939|3|44|85488.92|0.02|0.01|A|F|1994-12-04|1994-11-30|1994-12-21|DELIVER IN PERSON|FOB|latelets s 2086|83891|8908|4|27|50622.03|0.02|0.00|A|F|1994-11-04|1995-01-14|1994-11-25|COLLECT COD|REG AIR|theodolites haggle blithely blithe p 2086|155067|98|5|33|37027.98|0.04|0.00|A|F|1995-02-06|1994-11-25|1995-02-15|NONE|SHIP| slyly regular foxes. un 2086|199165|4204|6|20|25283.20|0.01|0.03|R|F|1994-11-30|1994-12-28|1994-12-07|COLLECT COD|FOB|lithely ironic acc 2086|155955|3501|7|7|14076.65|0.04|0.05|R|F|1994-12-27|1994-12-10|1995-01-05|COLLECT COD|RAIL| beans haggle car 2087|126684|6685|1|1|1710.68|0.05|0.04|N|O|1998-03-27|1998-03-24|1998-04-18|DELIVER IN PERSON|REG AIR|the quickly idle acco 2087|167256|4805|2|46|60869.50|0.10|0.03|N|O|1998-02-24|1998-04-02|1998-03-04|DELIVER IN PERSON|AIR|ter the dolphins. 2087|61740|1741|3|1|1701.74|0.02|0.05|N|O|1998-05-27|1998-04-11|1998-06-12|COLLECT COD|REG AIR|hely final acc 2087|58738|6254|4|6|10180.38|0.03|0.08|N|O|1998-04-23|1998-03-27|1998-05-18|DELIVER IN PERSON|REG AIR|dazzle after the slyly si 2112|70095|96|1|18|19171.62|0.02|0.05|N|O|1997-05-02|1997-03-16|1997-05-25|TAKE BACK RETURN|TRUCK|lphins solve ideas. even, special reque 2113|122730|5243|1|40|70109.20|0.04|0.06|N|O|1998-01-16|1997-12-11|1998-02-06|TAKE BACK RETURN|TRUCK|bout the quickly ironic t 2113|111214|8748|2|24|29405.04|0.03|0.02|N|O|1998-02-19|1998-01-08|1998-03-16|COLLECT COD|MAIL|kly regular accounts hinder about the 2114|167477|7478|1|50|77223.50|0.05|0.05|A|F|1995-02-05|1995-03-18|1995-02-13|COLLECT COD|RAIL|pecial pinto bean 2114|185076|113|2|26|30187.82|0.02|0.02|A|F|1995-04-30|1995-04-16|1995-05-28|NONE|SHIP|ar asymptotes sleep 2114|161760|4277|3|25|45544.00|0.07|0.01|A|F|1995-02-15|1995-03-13|1995-02-22|COLLECT COD|AIR|unts. regular, express accounts wake. b 2115|195180|7700|1|27|34429.86|0.06|0.03|N|O|1998-09-01|1998-07-29|1998-09-04|NONE|AIR|de of the carefully bold accounts 2115|183023|578|2|43|47558.86|0.06|0.02|N|O|1998-07-14|1998-07-25|1998-07-24|COLLECT COD|FOB| carefully pending requests alongs 2115|50851|8367|3|3|5405.55|0.03|0.04|N|O|1998-07-23|1998-07-30|1998-08-14|DELIVER IN PERSON|FOB|quickly ironic dolphin 2115|48123|8124|4|47|50342.64|0.06|0.07|N|O|1998-08-29|1998-07-30|1998-09-05|TAKE BACK RETURN|REG AIR|regular accounts integrate brav 2115|198956|6514|5|13|26714.35|0.04|0.00|N|O|1998-08-07|1998-08-06|1998-08-13|DELIVER IN PERSON|REG AIR|into beans. even accounts abou 2116|130050|51|1|2|2160.10|0.00|0.02|R|F|1994-10-16|1994-11-24|1994-11-09|DELIVER IN PERSON|TRUCK|r theodolites use blithely about the ir 2116|139382|9383|2|47|66804.86|0.10|0.06|R|F|1994-09-01|1994-11-18|1994-09-25|COLLECT COD|MAIL|iously ironic dependencies around the iro 2116|183247|5766|3|11|14632.64|0.03|0.05|R|F|1994-09-15|1994-10-21|1994-09-21|NONE|FOB| pinto beans. final, final sauternes play 2117|164531|9564|1|36|57439.08|0.10|0.01|N|O|1997-08-06|1997-07-15|1997-08-07|DELIVER IN PERSON|SHIP|ronic accounts wake 2117|60193|7712|2|19|21910.61|0.04|0.00|N|O|1997-07-30|1997-06-18|1997-08-13|DELIVER IN PERSON|REG AIR|s between the slyly regula 2117|57434|2445|3|43|59831.49|0.04|0.03|N|O|1997-06-27|1997-06-12|1997-07-22|DELIVER IN PERSON|SHIP| foxes sleep furiously 2117|90507|5526|4|24|35940.00|0.00|0.07|N|O|1997-06-15|1997-05-27|1997-06-18|COLLECT COD|SHIP|thely slyly pending platelets. ironic, 2117|146773|6774|5|3|5459.31|0.02|0.05|N|O|1997-05-05|1997-07-20|1997-05-26|TAKE BACK RETURN|TRUCK|tes cajole 2117|179|2680|6|27|29137.59|0.09|0.08|N|O|1997-06-30|1997-06-27|1997-07-11|TAKE BACK RETURN|REG AIR| the carefully ironic ideas 2118|159552|9553|1|24|38677.20|0.10|0.03|N|O|1997-01-06|1996-12-14|1997-01-14|TAKE BACK RETURN|RAIL|about the slyly bold depende 2118|183278|3279|2|4|5445.08|0.08|0.01|N|O|1996-10-25|1996-11-10|1996-11-22|COLLECT COD|AIR|theodolites affix according 2118|144382|1925|3|11|15690.18|0.05|0.04|N|O|1996-12-23|1996-12-20|1997-01-01|COLLECT COD|RAIL|y ironic accounts sleep upon the packages. 2119|101751|6772|1|36|63099.00|0.04|0.00|N|O|1996-11-10|1996-10-25|1996-12-03|TAKE BACK RETURN|RAIL|ly bold foxes. ironic accoun 2144|91477|9005|1|33|48459.51|0.00|0.07|R|F|1994-04-04|1994-06-20|1994-04-23|NONE|AIR| ironic excuses haggle final dependencies. 2144|50884|3390|2|46|84404.48|0.03|0.08|R|F|1994-04-08|1994-04-29|1994-05-07|COLLECT COD|SHIP| foxes haggle blithel 2144|3496|8497|3|29|40585.21|0.00|0.07|R|F|1994-05-03|1994-05-16|1994-06-01|DELIVER IN PERSON|FOB|ns wake carefully carefully ironic 2144|157171|7172|4|10|12281.70|0.00|0.04|R|F|1994-06-16|1994-05-03|1994-07-05|COLLECT COD|AIR| furiously unusual ideas. carefull 2145|77370|9878|1|13|17515.81|0.04|0.05|A|F|1992-11-12|1992-12-13|1992-12-07|TAKE BACK RETURN|MAIL|alongside of the slyly final 2145|153894|1440|2|6|11687.34|0.05|0.01|A|F|1992-10-10|1992-11-29|1992-10-14|NONE|AIR|s. fluffily express accounts sleep. slyl 2146|56891|9397|1|42|77611.38|0.10|0.01|A|F|1992-09-21|1992-11-02|1992-09-23|NONE|AIR|ns according to the doggedly 2146|156627|9143|2|6|10101.72|0.07|0.05|A|F|1993-01-03|1992-10-24|1993-01-24|DELIVER IN PERSON|RAIL|ing to the requests. dependencies boost 2146|24361|1868|3|14|17995.04|0.03|0.01|R|F|1992-09-16|1992-10-16|1992-09-20|COLLECT COD|SHIP|ecial, express a 2146|25952|3459|4|31|58216.45|0.02|0.00|A|F|1993-01-04|1992-10-24|1993-01-15|DELIVER IN PERSON|TRUCK|lly even deposit 2146|168542|6091|5|28|45095.12|0.02|0.05|R|F|1993-01-03|1992-10-17|1993-01-08|COLLECT COD|MAIL|r accounts sleep furio 2146|70175|7697|6|32|36645.44|0.07|0.03|R|F|1993-01-10|1992-10-19|1993-02-05|COLLECT COD|TRUCK|y regular foxes wake among the final 2146|24278|4279|7|39|46888.53|0.07|0.06|R|F|1993-01-05|1992-11-06|1993-01-14|DELIVER IN PERSON|TRUCK|uickly regular excuses detect. regular c 2147|28768|3773|1|50|84838.00|0.04|0.06|R|F|1992-11-18|1992-11-30|1992-11-30|NONE|RAIL|al accounts. even, even foxes wake 2147|100604|605|2|4|6418.40|0.01|0.04|A|F|1992-09-27|1992-11-15|1992-10-22|NONE|AIR|mong the blithely special 2147|43483|8492|3|34|48500.32|0.10|0.04|R|F|1992-11-29|1992-11-08|1992-12-22|TAKE BACK RETURN|REG AIR|egular deposits hang car 2147|10236|5239|4|11|12608.53|0.06|0.07|A|F|1992-09-27|1992-11-16|1992-10-16|NONE|AIR| the fluffily 2148|115933|3467|1|21|40927.53|0.09|0.01|R|F|1995-05-28|1995-05-26|1995-06-15|NONE|FOB|deposits ag 2149|18213|5717|1|12|13574.52|0.05|0.07|R|F|1993-06-01|1993-05-06|1993-06-11|TAKE BACK RETURN|TRUCK|riously bl 2149|98974|8975|2|10|19729.70|0.06|0.01|R|F|1993-06-09|1993-04-17|1993-06-16|DELIVER IN PERSON|TRUCK|eposits sleep above 2149|48678|3687|3|47|76453.49|0.00|0.04|R|F|1993-06-27|1993-05-12|1993-07-11|COLLECT COD|AIR|hely final depo 2149|128105|3130|4|18|20395.80|0.06|0.00|A|F|1993-04-05|1993-05-11|1993-04-23|DELIVER IN PERSON|REG AIR|uriously final pac 2149|59360|4371|5|22|29025.92|0.06|0.04|R|F|1993-05-24|1993-04-23|1993-06-20|TAKE BACK RETURN|SHIP|ptotes sleep along the blithely ir 2150|77886|2901|1|26|48460.88|0.00|0.03|A|F|1994-06-21|1994-08-05|1994-06-23|NONE|TRUCK|. always unusual packages 2150|17738|5242|2|29|48016.17|0.04|0.03|A|F|1994-09-02|1994-08-04|1994-10-02|TAKE BACK RETURN|RAIL|y ironic theodolites. foxes ca 2150|106769|1790|3|29|51497.04|0.04|0.08|R|F|1994-06-10|1994-07-31|1994-06-26|COLLECT COD|RAIL|arefully final att 2150|53727|1243|4|39|65548.08|0.05|0.02|R|F|1994-07-31|1994-08-17|1994-08-11|TAKE BACK RETURN|TRUCK|ess accounts nag. unusual asymptotes haggl 2150|182496|2497|5|35|55247.15|0.01|0.01|A|F|1994-09-27|1994-08-17|1994-10-13|COLLECT COD|RAIL|refully pending dependen 2150|6331|8832|6|12|14847.96|0.09|0.03|A|F|1994-08-27|1994-08-22|1994-09-18|COLLECT COD|AIR|press platelets haggle until the slyly fi 2151|166944|4493|1|23|46251.62|0.06|0.02|N|O|1996-11-20|1996-12-17|1996-11-30|DELIVER IN PERSON|AIR| silent dependencies about the slyl 2151|14480|6982|2|29|40439.92|0.00|0.02|N|O|1997-03-04|1996-12-27|1997-03-21|TAKE BACK RETURN|SHIP| bold packages acro 2151|164838|9871|3|49|93238.67|0.07|0.01|N|O|1997-01-20|1997-02-09|1997-02-18|NONE|FOB| packages. f 2151|17670|2673|4|28|44454.76|0.10|0.08|N|O|1996-12-11|1996-12-26|1996-12-12|DELIVER IN PERSON|AIR|y special packages. carefully ironic instru 2176|190213|5252|1|38|49521.98|0.02|0.08|R|F|1992-11-29|1993-01-14|1992-12-22|DELIVER IN PERSON|REG AIR|lithely ironic pinto beans. furious 2176|94137|9156|2|14|15835.82|0.00|0.06|A|F|1992-11-17|1993-01-07|1992-12-03|DELIVER IN PERSON|SHIP|ely ironic platelets 2176|159466|9467|3|25|38136.50|0.02|0.02|R|F|1993-02-23|1993-01-05|1993-03-07|COLLECT COD|RAIL| ruthless deposits according to the ent 2176|142226|7255|4|2|2536.44|0.05|0.06|A|F|1993-02-26|1993-01-08|1993-03-23|DELIVER IN PERSON|AIR|s pinto beans 2177|128562|8563|1|45|71575.20|0.02|0.01|N|O|1997-02-11|1997-02-27|1997-02-17|NONE|SHIP|. theodolites haggle carefu 2177|138448|962|2|27|40133.88|0.04|0.08|N|O|1997-01-29|1997-03-20|1997-02-04|DELIVER IN PERSON|SHIP|even, regula 2177|80066|2575|3|23|24059.38|0.07|0.05|N|O|1997-01-28|1997-03-02|1997-02-13|DELIVER IN PERSON|AIR|he silent foxes. iro 2177|54933|7439|4|34|64189.62|0.05|0.07|N|O|1997-02-03|1997-04-10|1997-02-21|COLLECT COD|REG AIR|tes are doggedly quickly 2177|56437|3953|5|46|64097.78|0.09|0.05|N|O|1997-05-10|1997-02-23|1997-05-28|COLLECT COD|RAIL|ending asymptotes. 2177|121616|4129|6|11|18013.71|0.02|0.04|N|O|1997-03-20|1997-03-07|1997-04-09|DELIVER IN PERSON|MAIL|gainst the ca 2178|156127|1158|1|15|17746.80|0.10|0.01|N|O|1997-03-27|1997-03-10|1997-04-18|NONE|REG AIR|l accounts. quickly expr 2178|15804|8306|2|27|46434.60|0.01|0.02|N|O|1997-02-26|1997-02-19|1997-03-25|NONE|MAIL| across the ironic reques 2178|4825|2326|3|40|69192.80|0.00|0.03|N|O|1997-03-17|1997-02-09|1997-04-15|COLLECT COD|RAIL|foxes are slowly regularly specia 2178|77639|5161|4|3|4849.89|0.07|0.07|N|O|1997-04-07|1997-01-23|1997-04-18|COLLECT COD|MAIL| permanentl 2179|129479|4504|1|22|33186.34|0.05|0.08|N|O|1996-11-16|1996-11-03|1996-11-25|DELIVER IN PERSON|FOB|lphins cajole acr 2179|138056|570|2|20|21881.00|0.03|0.01|N|O|1996-09-30|1996-11-10|1996-10-30|NONE|REG AIR|ncies. fin 2179|103363|8384|3|5|6831.80|0.03|0.02|N|O|1996-11-09|1996-10-08|1996-11-11|DELIVER IN PERSON|REG AIR|ts haggle blithely. ironic, careful theodol 2179|5267|2768|4|24|28134.24|0.04|0.04|N|O|1996-10-26|1996-11-05|1996-11-16|COLLECT COD|RAIL| cajole carefully. 2179|107337|4868|5|7|9410.31|0.00|0.02|N|O|1996-10-24|1996-11-14|1996-11-21|TAKE BACK RETURN|RAIL|gular dependencies. ironic packages haggle 2180|15125|128|1|31|32243.72|0.06|0.04|N|O|1996-10-20|1996-11-21|1996-11-06|COLLECT COD|REG AIR|n requests are furiously at the quickly 2180|192230|9788|2|39|51566.97|0.01|0.00|N|O|1997-01-03|1996-10-29|1997-01-25|NONE|RAIL|ep furiously furiously final request 2180|196985|9505|3|24|49967.52|0.03|0.00|N|O|1997-01-03|1996-10-24|1997-01-19|NONE|SHIP|uriously f 2180|110249|2761|4|47|59184.28|0.07|0.02|N|O|1996-09-23|1996-12-08|1996-10-12|NONE|FOB|pending, regular ideas. iron 2180|142825|368|5|23|42959.86|0.02|0.06|N|O|1996-11-08|1996-10-25|1996-11-28|NONE|TRUCK|ggle alongside of the fluffily speci 2180|54140|4141|6|48|52518.72|0.09|0.03|N|O|1996-12-30|1996-11-22|1997-01-16|DELIVER IN PERSON|RAIL|nic instructions haggle careful 2181|177848|7849|1|4|7703.36|0.05|0.04|N|O|1995-09-25|1995-11-12|1995-09-28|COLLECT COD|FOB|tes. slyly silent packages use along th 2181|87791|2808|2|46|81824.34|0.00|0.02|N|O|1995-11-28|1995-10-17|1995-12-26|COLLECT COD|AIR|osits. final packages sleep 2181|90740|741|3|15|25961.10|0.08|0.05|N|O|1995-10-05|1995-10-27|1995-11-03|DELIVER IN PERSON|FOB|e above the fluffily regul 2181|54662|9673|4|28|45266.48|0.04|0.05|N|O|1995-12-21|1995-10-23|1996-01-04|TAKE BACK RETURN|AIR|s excuses sleep car 2181|95783|5784|5|9|16009.02|0.06|0.07|N|O|1996-01-05|1995-12-05|1996-01-08|COLLECT COD|TRUCK|ward the quietly even requests. ir 2182|131671|9211|1|27|45972.09|0.02|0.07|R|F|1994-05-10|1994-07-04|1994-06-04|DELIVER IN PERSON|SHIP|en platele 2182|189760|7315|2|3|5549.28|0.05|0.03|R|F|1994-04-20|1994-07-04|1994-04-24|TAKE BACK RETURN|SHIP|y bold theodolites wi 2182|93343|5853|3|34|45435.56|0.02|0.03|R|F|1994-05-28|1994-06-02|1994-06-10|COLLECT COD|MAIL| slow tithes. ironi 2182|6068|3569|4|12|11688.72|0.04|0.07|A|F|1994-05-08|1994-06-02|1994-05-09|COLLECT COD|REG AIR|ments are fu 2182|178869|1387|5|37|72070.82|0.06|0.02|A|F|1994-04-08|1994-06-29|1994-04-18|TAKE BACK RETURN|TRUCK|ges. blithely ironic 2183|70665|3173|1|29|47434.14|0.05|0.01|N|O|1996-07-21|1996-08-24|1996-08-15|TAKE BACK RETURN|RAIL|ly unusual deposits sleep carefully 2183|51094|1095|2|25|26127.25|0.06|0.02|N|O|1996-07-06|1996-08-21|1996-08-05|NONE|RAIL|he quickly f 2208|57232|2243|1|48|57083.04|0.08|0.07|A|F|1995-05-13|1995-06-30|1995-05-20|COLLECT COD|MAIL|sits. idly permanent request 2208|96798|4326|2|11|19742.69|0.08|0.01|A|F|1995-05-06|1995-07-19|1995-05-22|COLLECT COD|TRUCK|ding waters lose. furiously regu 2208|73389|5897|3|41|55857.58|0.08|0.02|N|O|1995-08-18|1995-06-19|1995-09-05|COLLECT COD|RAIL|nd the furious, express dependencies. 2208|42342|9855|4|50|64217.00|0.07|0.07|N|F|1995-06-11|1995-05-31|1995-06-29|TAKE BACK RETURN|FOB|al foxes will hav 2208|29570|2073|5|43|64481.51|0.03|0.06|A|F|1995-05-10|1995-06-02|1995-06-09|TAKE BACK RETURN|MAIL|es. accounts cajole. fi 2208|166229|3778|6|18|23313.96|0.02|0.08|R|F|1995-06-06|1995-06-10|1995-06-11|TAKE BACK RETURN|TRUCK|packages are quickly bold de 2208|6106|1107|7|45|45544.50|0.00|0.08|A|F|1995-05-05|1995-06-10|1995-05-11|NONE|SHIP|e fluffily regular theodolites caj 2209|22676|7681|1|40|63946.80|0.05|0.01|R|F|1992-11-01|1992-09-25|1992-11-08|DELIVER IN PERSON|SHIP|ully special sheaves serve 2209|102771|2772|2|10|17737.70|0.00|0.02|R|F|1992-09-02|1992-09-24|1992-09-21|DELIVER IN PERSON|AIR|players. carefully reg 2209|63521|8534|3|11|16329.72|0.01|0.01|A|F|1992-07-12|1992-08-24|1992-08-10|DELIVER IN PERSON|REG AIR|express, regular pinto be 2209|180897|3416|4|39|77137.71|0.08|0.07|R|F|1992-11-04|1992-09-02|1992-11-11|TAKE BACK RETURN|MAIL|ly around the final packages. deposits ca 2209|123364|901|5|24|33296.64|0.08|0.06|R|F|1992-08-09|1992-08-18|1992-08-25|COLLECT COD|AIR| along the bol 2209|177413|2448|6|7|10432.87|0.09|0.07|A|F|1992-08-18|1992-09-09|1992-09-12|DELIVER IN PERSON|AIR| quickly regular pack 2210|77764|2779|1|36|62703.36|0.10|0.00|A|F|1992-03-04|1992-03-24|1992-03-21|DELIVER IN PERSON|AIR| requests wake enticingly final 2211|47822|2831|1|25|44245.50|0.04|0.01|A|F|1994-10-09|1994-08-04|1994-11-03|TAKE BACK RETURN|RAIL|deas. carefully special theodolites along 2211|139773|2287|2|40|72510.80|0.09|0.06|A|F|1994-09-30|1994-09-10|1994-10-26|NONE|MAIL|posits among the express dolphins 2211|159820|7366|3|25|46995.50|0.00|0.07|A|F|1994-08-13|1994-08-17|1994-08-16|NONE|AIR|ly regular, express 2211|84711|9728|4|23|39001.33|0.03|0.02|R|F|1994-10-05|1994-09-13|1994-10-17|DELIVER IN PERSON|AIR|ependencies 2211|134301|6815|5|3|4005.90|0.02|0.04|A|F|1994-08-28|1994-09-10|1994-09-06|TAKE BACK RETURN|SHIP|pendencies after the regular f 2211|186189|6190|6|18|22953.24|0.05|0.08|A|F|1994-08-31|1994-09-07|1994-09-22|NONE|TRUCK|c grouches. slyly express pinto 2211|78139|647|7|3|3351.39|0.06|0.05|R|F|1994-09-21|1994-08-10|1994-10-19|TAKE BACK RETURN|RAIL|y slyly final 2212|70810|5825|1|18|32054.58|0.07|0.06|R|F|1994-06-22|1994-06-18|1994-06-25|TAKE BACK RETURN|FOB| cajole. final, pending ideas should are bl 2213|117158|4692|1|20|23503.00|0.01|0.00|A|F|1993-01-21|1993-04-14|1993-01-29|COLLECT COD|REG AIR|iously express accounts; 2213|59646|9647|2|4|6422.56|0.09|0.05|R|F|1993-04-15|1993-04-15|1993-05-05|COLLECT COD|SHIP| affix carefully furiously 2213|69541|7060|3|1|1510.54|0.05|0.05|A|F|1993-04-25|1993-04-06|1993-04-28|TAKE BACK RETURN|AIR|s along the ironic reques 2213|173204|8239|4|39|49810.80|0.09|0.05|A|F|1993-05-12|1993-04-07|1993-05-23|TAKE BACK RETURN|SHIP|the blithely 2213|37976|2983|5|43|82300.71|0.04|0.03|A|F|1993-04-18|1993-03-11|1993-05-11|TAKE BACK RETURN|RAIL|r packages are along the carefully bol 2213|47675|180|6|41|66529.47|0.01|0.00|R|F|1993-01-31|1993-03-31|1993-02-28|COLLECT COD|FOB| carefully pend 2213|63836|1355|7|3|5399.49|0.02|0.04|A|F|1993-03-09|1993-03-17|1993-04-07|TAKE BACK RETURN|AIR|o wake. ironic platel 2214|75623|638|1|27|43162.74|0.04|0.04|N|O|1998-05-31|1998-06-07|1998-06-19|DELIVER IN PERSON|REG AIR|x fluffily along the even packages-- 2214|193950|3951|2|50|102197.50|0.00|0.02|N|O|1998-07-06|1998-06-16|1998-07-16|TAKE BACK RETURN|MAIL|accounts. blith 2214|112406|4918|3|42|59572.80|0.04|0.08|N|O|1998-05-26|1998-07-13|1998-06-22|COLLECT COD|FOB|ons. deposi 2214|195389|428|4|22|32656.36|0.01|0.01|N|O|1998-05-30|1998-07-02|1998-06-09|DELIVER IN PERSON|RAIL|t the blithely 2215|72044|9566|1|33|33529.32|0.00|0.00|N|O|1996-07-19|1996-08-10|1996-07-30|COLLECT COD|RAIL|dolites cajole b 2215|32526|5030|2|30|43755.60|0.01|0.00|N|O|1996-08-15|1996-09-10|1996-08-25|NONE|FOB|ckages caj 2215|56652|9158|3|30|48259.50|0.07|0.03|N|O|1996-09-09|1996-07-20|1996-09-28|COLLECT COD|TRUCK|against the carefu 2215|145112|7627|4|20|23142.20|0.02|0.02|N|O|1996-09-09|1996-08-10|1996-09-19|NONE|MAIL| unusual deposits haggle carefully. ide 2240|163898|6415|1|6|11771.34|0.01|0.00|A|F|1992-06-23|1992-05-17|1992-07-20|COLLECT COD|AIR|ymptotes boost. furiously bold p 2240|27897|5404|2|37|67520.93|0.03|0.07|R|F|1992-03-16|1992-05-31|1992-04-05|COLLECT COD|FOB| quickly after the packages? blithely si 2240|52103|9619|3|39|41148.90|0.08|0.06|A|F|1992-05-22|1992-05-10|1992-06-08|NONE|FOB|y orbits. final depos 2240|85867|884|4|10|18528.60|0.09|0.00|A|F|1992-05-25|1992-04-14|1992-06-23|DELIVER IN PERSON|REG AIR|are across the ironic packages. 2240|160522|3039|5|29|45893.08|0.02|0.06|A|F|1992-03-29|1992-05-08|1992-04-09|COLLECT COD|MAIL|lyly even ideas w 2240|80721|722|6|32|54455.04|0.06|0.06|R|F|1992-04-11|1992-04-18|1992-04-22|NONE|MAIL|ss thinly deposits. blithely bold package 2240|77504|2519|7|24|35556.00|0.04|0.05|R|F|1992-05-13|1992-04-09|1992-05-14|DELIVER IN PERSON|FOB|ng the silent accounts. slyly ironic t 2241|4034|4035|1|25|23450.75|0.00|0.08|R|F|1993-08-11|1993-07-23|1993-09-01|DELIVER IN PERSON|MAIL| final deposits use fluffily. even f 2241|194616|9655|2|38|65003.18|0.04|0.06|A|F|1993-08-04|1993-07-31|1993-08-06|TAKE BACK RETURN|TRUCK| silent, unusual d 2241|96673|1692|3|48|80144.16|0.08|0.04|A|F|1993-05-14|1993-07-30|1993-05-26|TAKE BACK RETURN|RAIL|ss accounts engage furiously. slyly even re 2241|166305|1338|4|19|26054.70|0.10|0.00|A|F|1993-06-01|1993-08-05|1993-06-07|TAKE BACK RETURN|TRUCK| are furiously quickl 2241|81485|1486|5|2|2932.96|0.04|0.03|A|F|1993-08-16|1993-08-02|1993-08-24|NONE|REG AIR|, express deposits. pear 2241|115401|424|6|22|31160.80|0.02|0.08|R|F|1993-08-13|1993-06-15|1993-08-16|DELIVER IN PERSON|TRUCK|, ironic depen 2241|141592|1593|7|9|14702.31|0.09|0.03|A|F|1993-05-14|1993-07-12|1993-05-29|NONE|AIR|lyly final 2242|122099|2100|1|15|16816.35|0.09|0.08|N|O|1997-08-04|1997-09-21|1997-08-11|COLLECT COD|FOB|its. carefully express packages cajole. bli 2243|126988|6989|1|10|20149.80|0.04|0.06|N|O|1995-07-26|1995-07-18|1995-08-03|NONE|RAIL|express, daring foxes affix fur 2244|50086|5097|1|3|3108.24|0.02|0.02|A|F|1993-04-30|1993-03-15|1993-05-19|TAKE BACK RETURN|FOB| beans for the regular platel 2244|192426|7465|2|16|24294.72|0.01|0.06|R|F|1993-02-12|1993-03-09|1993-02-28|COLLECT COD|FOB|rate around the reques 2245|75664|5665|1|44|72145.04|0.03|0.03|A|F|1993-06-12|1993-06-10|1993-06-16|NONE|TRUCK|refully even sheaves 2245|73630|8645|2|28|44901.64|0.05|0.03|R|F|1993-08-19|1993-07-27|1993-09-04|COLLECT COD|TRUCK|e requests sleep furiou 2245|85310|7819|3|33|42745.23|0.03|0.01|R|F|1993-06-26|1993-06-11|1993-07-17|TAKE BACK RETURN|AIR|ing to the carefully ruthless accounts 2245|188474|8475|4|14|21874.58|0.02|0.04|R|F|1993-05-06|1993-07-21|1993-05-19|DELIVER IN PERSON|RAIL|nts. always unusual dep 2245|79264|6786|5|33|41027.58|0.03|0.07|R|F|1993-06-16|1993-06-05|1993-07-07|NONE|MAIL| across the express reques 2246|52330|2331|1|22|28211.26|0.02|0.01|N|O|1996-07-25|1996-08-03|1996-08-24|DELIVER IN PERSON|SHIP|ructions wake carefully fina 2246|103090|3091|2|43|47002.87|0.07|0.06|N|O|1996-08-25|1996-08-23|1996-09-19|DELIVER IN PERSON|AIR|ainst the ironic theodolites haggle fi 2246|17158|4662|3|11|11826.65|0.10|0.00|N|O|1996-06-21|1996-07-24|1996-07-18|TAKE BACK RETURN|TRUCK|quests alongside o 2246|162721|270|4|13|23188.36|0.08|0.05|N|O|1996-09-15|1996-07-21|1996-10-08|DELIVER IN PERSON|AIR|equests. fluffily special epitaphs use 2247|171922|4440|1|12|23927.04|0.02|0.07|A|F|1992-09-06|1992-09-18|1992-09-26|NONE|MAIL|final accounts. requests across the furiou 2272|89989|9990|1|18|35621.64|0.04|0.00|R|F|1993-08-01|1993-07-06|1993-08-25|NONE|MAIL|ons along the blithely e 2272|33466|976|2|40|55978.40|0.07|0.00|A|F|1993-04-25|1993-07-12|1993-05-15|DELIVER IN PERSON|FOB|lithely ir 2272|55426|7932|3|36|49731.12|0.03|0.02|A|F|1993-05-25|1993-05-23|1993-06-09|TAKE BACK RETURN|RAIL|about the ironic packages; quickly iron 2272|137602|116|4|30|49188.00|0.09|0.07|A|F|1993-07-27|1993-05-15|1993-08-13|NONE|RAIL|quests at the foxes haggle evenly pack 2272|75858|3380|5|12|22006.20|0.03|0.03|A|F|1993-04-19|1993-05-14|1993-04-23|NONE|RAIL| accounts cajole. quickly b 2273|183596|8633|1|34|57106.06|0.02|0.03|N|O|1997-01-08|1997-02-02|1997-01-23|COLLECT COD|MAIL| furiously carefully bold de 2273|84638|9655|2|35|56792.05|0.00|0.05|N|O|1997-01-02|1997-01-19|1997-01-14|NONE|REG AIR|arefully f 2273|94138|9157|3|8|9057.04|0.00|0.04|N|O|1996-12-15|1997-02-27|1997-01-10|NONE|FOB|dependencies. slyly ir 2273|160895|8444|4|20|39117.80|0.06|0.04|N|O|1997-03-05|1997-02-25|1997-04-01|NONE|RAIL|cuses. quickly enticing requests wake 2273|161026|8575|5|18|19566.36|0.07|0.00|N|O|1996-12-16|1997-01-21|1997-01-03|COLLECT COD|TRUCK| beans. doggedly final packages wake 2273|154366|1912|6|16|22725.76|0.10|0.03|N|O|1997-01-10|1997-02-03|1997-02-01|TAKE BACK RETURN|RAIL|furiously above the ironic requests. 2273|19493|9494|7|7|9887.43|0.05|0.05|N|O|1997-02-19|1997-01-22|1997-02-21|TAKE BACK RETURN|TRUCK|ts. furiou 2274|11038|3540|1|18|17082.54|0.04|0.03|R|F|1993-09-06|1993-12-03|1993-09-22|COLLECT COD|SHIP|usly final re 2274|110510|5533|2|23|34971.73|0.04|0.03|R|F|1993-10-28|1993-11-03|1993-11-05|NONE|MAIL|kly special warhorse 2274|128795|8796|3|18|32828.22|0.03|0.06|R|F|1993-09-28|1993-11-22|1993-10-12|DELIVER IN PERSON|SHIP| express packages. even accounts hagg 2275|33183|8190|1|30|33485.40|0.08|0.05|R|F|1993-01-10|1992-11-21|1993-01-22|NONE|REG AIR|re slyly slyly special idea 2275|90399|5418|2|11|15283.29|0.08|0.03|A|F|1993-01-16|1992-12-10|1993-01-25|COLLECT COD|REG AIR|ost across the never express instruction 2276|118185|5719|1|5|6015.90|0.07|0.08|N|O|1996-05-09|1996-06-18|1996-05-13|DELIVER IN PERSON|FOB|ias instea 2276|134871|2411|2|13|24776.31|0.08|0.04|N|O|1996-07-24|1996-06-18|1996-08-16|COLLECT COD|RAIL|arefully ironic foxes cajole q 2276|170995|996|3|27|55781.73|0.07|0.08|N|O|1996-07-30|1996-06-10|1996-07-31|DELIVER IN PERSON|RAIL|the carefully unusual accoun 2276|108060|5591|4|38|40586.28|0.06|0.03|N|O|1996-07-07|1996-06-28|1996-07-17|COLLECT COD|RAIL|ans. pinto beans boost c 2276|152402|9948|5|50|72720.00|0.03|0.05|N|O|1996-07-13|1996-06-25|1996-07-22|DELIVER IN PERSON|REG AIR| accounts dete 2276|5041|7542|6|4|3784.16|0.10|0.03|N|O|1996-07-05|1996-06-30|1996-08-04|COLLECT COD|FOB|s. deposits 2277|136849|6850|1|38|71661.92|0.03|0.07|R|F|1995-04-23|1995-03-25|1995-05-20|TAKE BACK RETURN|TRUCK|fully bold 2277|7914|415|2|2|3643.82|0.10|0.08|A|F|1995-02-01|1995-02-04|1995-03-02|TAKE BACK RETURN|AIR|endencies sleep idly pending p 2277|197832|352|3|4|7719.32|0.05|0.06|R|F|1995-04-27|1995-03-16|1995-04-29|TAKE BACK RETURN|SHIP|. quickly unusual deposi 2277|158467|3498|4|31|47289.26|0.02|0.00|R|F|1995-03-07|1995-03-19|1995-03-26|TAKE BACK RETURN|MAIL|ic instructions detect ru 2278|44247|6752|1|36|42884.64|0.04|0.05|N|O|1998-06-04|1998-06-06|1998-06-30|NONE|TRUCK|y ironic pinto beans br 2278|44868|7373|2|50|90643.00|0.02|0.00|N|O|1998-08-09|1998-07-08|1998-09-05|DELIVER IN PERSON|RAIL|into beans. blit 2278|96343|8853|3|22|29465.48|0.03|0.00|N|O|1998-05-15|1998-07-14|1998-06-04|TAKE BACK RETURN|REG AIR|ep regular accounts. blithely even 2279|13676|3677|1|12|19076.04|0.07|0.08|A|F|1993-05-10|1993-03-25|1993-06-02|COLLECT COD|REG AIR|lets across the excuses nag quickl 2279|40972|973|2|38|72692.86|0.08|0.07|R|F|1993-06-09|1993-04-06|1993-06-26|COLLECT COD|TRUCK|s above the furiously express dep 2279|3443|5944|3|3|4039.32|0.09|0.04|A|F|1993-05-31|1993-05-07|1993-06-05|COLLECT COD|REG AIR|ing foxes above the even accounts use slyly 2279|51132|8648|4|42|45491.46|0.02|0.00|R|F|1993-02-28|1993-04-25|1993-03-02|TAKE BACK RETURN|REG AIR| above the furiously ironic deposits. 2279|168844|1361|5|9|17215.56|0.05|0.04|R|F|1993-05-21|1993-03-29|1993-06-17|DELIVER IN PERSON|MAIL|ns cajole after the final platelets. s 2279|146161|1190|6|12|14485.92|0.02|0.00|R|F|1993-05-04|1993-04-26|1993-05-28|DELIVER IN PERSON|FOB|ccounts. slyl 2279|118915|6449|7|32|61885.12|0.05|0.05|A|F|1993-04-20|1993-05-22|1993-05-18|DELIVER IN PERSON|RAIL|re quickly. furiously ironic ide 2304|199490|2010|1|42|66758.58|0.00|0.01|A|F|1994-01-20|1994-03-04|1994-02-05|COLLECT COD|RAIL|quests are blithely alongside of 2304|18186|5690|2|48|53000.64|0.00|0.00|R|F|1994-02-12|1994-02-16|1994-03-10|COLLECT COD|REG AIR| deposits cajole blithely e 2304|47934|7935|3|3|5645.79|0.00|0.05|R|F|1994-03-19|1994-03-04|1994-03-20|DELIVER IN PERSON|AIR|l excuses after the ev 2305|173979|6497|1|3|6158.91|0.00|0.01|A|F|1993-03-24|1993-04-05|1993-03-29|NONE|AIR|kages haggle quickly across the blithely 2305|59018|1524|2|39|38103.39|0.07|0.00|R|F|1993-04-16|1993-04-17|1993-04-22|COLLECT COD|MAIL|ms after the foxes 2305|101013|1014|3|32|32448.32|0.03|0.06|A|F|1993-04-02|1993-03-18|1993-04-03|NONE|AIR| haggle caref 2305|111463|1464|4|17|25065.82|0.00|0.05|A|F|1993-02-21|1993-03-30|1993-03-19|TAKE BACK RETURN|MAIL| carefully alongside of 2305|154380|1926|5|26|37293.88|0.06|0.07|A|F|1993-05-14|1993-02-28|1993-06-04|NONE|SHIP|arefully final theodo 2305|50969|8485|6|7|13439.72|0.06|0.00|R|F|1993-05-15|1993-04-25|1993-06-09|DELIVER IN PERSON|RAIL|gular deposits boost about the foxe 2306|195761|800|1|50|92838.00|0.09|0.01|N|O|1995-07-27|1995-09-26|1995-08-06|DELIVER IN PERSON|FOB|y quickly 2306|148556|3585|2|39|62577.45|0.04|0.00|N|O|1995-09-07|1995-09-13|1995-10-03|COLLECT COD|SHIP|f the slyly unusual accounts. furiousl 2306|177684|5236|3|35|61658.80|0.01|0.07|N|O|1995-08-18|1995-08-30|1995-08-20|TAKE BACK RETURN|RAIL|raids along the furiously unusual asympto 2306|118936|1448|4|21|41053.53|0.06|0.01|N|O|1995-10-07|1995-09-18|1995-10-17|COLLECT COD|MAIL| ironic pinto 2306|141746|4261|5|42|75085.08|0.04|0.07|N|O|1995-09-05|1995-08-25|1995-09-28|COLLECT COD|MAIL|furiously final acco 2306|123507|3508|6|29|44384.50|0.00|0.03|N|O|1995-11-01|1995-09-01|1995-11-22|DELIVER IN PERSON|REG AIR|uld have to mold. s 2306|175400|2952|7|19|28032.60|0.07|0.01|N|O|1995-11-17|1995-09-06|1995-11-30|DELIVER IN PERSON|AIR|tainments nag furiously carefull 2307|141367|3882|1|24|33800.64|0.10|0.05|R|F|1993-10-07|1993-08-05|1993-10-20|COLLECT COD|AIR|stealthily special packages nag a 2307|139516|2030|2|2|3111.02|0.01|0.00|A|F|1993-09-21|1993-08-22|1993-10-03|COLLECT COD|SHIP|ously. furiously furious requ 2307|33306|5810|3|7|8675.10|0.07|0.04|R|F|1993-08-03|1993-09-04|1993-08-28|DELIVER IN PERSON|AIR|ven instructions wake fluffily 2307|164357|4358|4|19|27005.65|0.08|0.06|R|F|1993-10-23|1993-09-09|1993-11-09|TAKE BACK RETURN|TRUCK|olites haggle furiously around the 2307|142685|2686|5|7|12093.76|0.01|0.06|R|F|1993-09-01|1993-08-08|1993-09-29|NONE|AIR| packages cajo 2308|117514|7515|1|24|36756.24|0.06|0.04|R|F|1993-02-23|1992-12-24|1993-03-10|NONE|MAIL|ts sleep. busy excuses along the s 2308|55630|641|2|36|57082.68|0.05|0.06|A|F|1992-11-11|1992-11-27|1992-11-23|NONE|MAIL|ong the pending hockey players. blithe 2309|169856|4889|1|14|26961.90|0.10|0.03|N|O|1996-01-01|1995-10-22|1996-01-23|NONE|AIR|asymptotes. furiously pending acco 2309|168253|770|2|1|1321.25|0.01|0.05|N|O|1995-12-08|1995-11-03|1995-12-31|COLLECT COD|RAIL|eposits alongside of the final re 2309|14764|9767|3|5|8393.80|0.01|0.00|N|O|1995-12-10|1995-10-29|1996-01-06|TAKE BACK RETURN|SHIP|s. requests wake blithely specia 2309|138046|3073|4|46|49865.84|0.08|0.04|N|O|1995-10-02|1995-10-30|1995-10-30|NONE|REG AIR|sly according to the carefully 2309|136205|3745|5|9|11170.80|0.00|0.07|N|O|1995-12-21|1995-10-10|1996-01-20|COLLECT COD|AIR|ding, unusual instructions. dep 2309|194880|9919|6|21|41472.48|0.09|0.00|N|O|1995-11-05|1995-11-07|1995-11-22|NONE|AIR|unts around the dolphins ar 2309|137744|258|7|48|85523.52|0.03|0.05|N|O|1995-10-21|1995-11-21|1995-11-09|NONE|MAIL|ccounts. id 2310|57879|385|1|36|66127.32|0.03|0.03|N|O|1996-10-09|1996-10-28|1996-10-29|TAKE BACK RETURN|RAIL|iously against the slyly special accounts 2310|170444|445|2|6|9086.64|0.07|0.01|N|O|1996-11-08|1996-12-09|1996-12-07|COLLECT COD|REG AIR|e slyly about the quickly ironic theodo 2310|41031|8544|3|48|46657.44|0.08|0.02|N|O|1996-10-04|1996-11-20|1996-10-25|TAKE BACK RETURN|FOB|ep slyly alongside of the 2311|140068|2583|1|18|19945.08|0.01|0.01|N|F|1995-06-11|1995-06-18|1995-07-02|NONE|FOB| fluffily even patterns haggle blithely. re 2311|121572|6597|2|49|78084.93|0.09|0.02|R|F|1995-05-14|1995-07-11|1995-05-20|COLLECT COD|FOB|ideas sleep 2311|53793|3794|3|15|26201.85|0.08|0.04|N|O|1995-06-23|1995-06-06|1995-07-09|COLLECT COD|AIR|ve the blithely pending accounts. furio 2311|89740|4757|4|42|72649.08|0.01|0.06|R|F|1995-06-03|1995-06-27|1995-06-11|DELIVER IN PERSON|MAIL|gle furiously. bold 2311|46446|1455|5|1|1392.44|0.05|0.02|A|F|1995-06-07|1995-06-20|1995-06-10|NONE|AIR|ptotes. furiously regular theodolite 2311|11993|6996|6|32|60959.68|0.01|0.03|N|O|1995-07-19|1995-06-26|1995-07-26|NONE|RAIL|sts along the slyly 2336|192161|4681|1|20|25063.20|0.01|0.03|N|O|1996-03-12|1996-02-25|1996-03-18|NONE|REG AIR|across the fi 2337|44792|7297|1|49|85102.71|0.06|0.05|N|O|1997-08-08|1997-08-15|1997-08-31|TAKE BACK RETURN|FOB| along the packages. furiously p 2338|51054|6065|1|30|30151.50|0.07|0.06|N|O|1997-12-10|1997-10-15|1997-12-11|TAKE BACK RETURN|REG AIR|ould have to nag quickly 2339|191542|1543|1|22|35937.88|0.03|0.03|A|F|1994-01-06|1994-03-06|1994-01-10|NONE|FOB| furiously above 2339|29709|2212|2|28|45883.60|0.00|0.00|R|F|1994-01-25|1994-01-22|1994-01-28|DELIVER IN PERSON|RAIL|e bold, even packag 2339|116749|1772|3|13|22954.62|0.06|0.08|R|F|1994-03-10|1994-02-18|1994-03-20|TAKE BACK RETURN|REG AIR|ges. blithely special depend 2340|137788|5328|1|9|16432.02|0.08|0.02|N|O|1996-05-01|1996-02-24|1996-05-16|COLLECT COD|RAIL|. carefully ironic 2340|192904|5424|2|21|41934.90|0.06|0.02|N|O|1996-01-17|1996-03-04|1996-01-29|DELIVER IN PERSON|SHIP| asymptotes. unusual theo 2341|46009|1018|1|12|11460.00|0.08|0.03|R|F|1993-06-06|1993-07-08|1993-06-17|DELIVER IN PERSON|FOB|. quickly final deposits sl 2341|70364|5379|2|37|49371.32|0.07|0.08|A|F|1993-09-23|1993-07-25|1993-10-14|DELIVER IN PERSON|RAIL|was blithel 2341|194541|9580|3|8|13084.32|0.03|0.07|R|F|1993-06-08|1993-07-09|1993-06-10|COLLECT COD|FOB|ns affix above the iron 2342|41169|8682|1|12|13321.92|0.00|0.08|N|O|1996-07-31|1996-07-26|1996-08-14|NONE|TRUCK|print blithely even deposits. carefull 2342|116462|8974|2|24|35483.04|0.10|0.06|N|O|1996-09-30|1996-07-22|1996-10-28|TAKE BACK RETURN|AIR|nstructions c 2342|169920|9921|3|50|99496.00|0.10|0.01|N|O|1996-08-28|1996-07-18|1996-09-22|COLLECT COD|RAIL|cial asymptotes pr 2342|35809|5810|4|1|1744.80|0.04|0.06|N|O|1996-08-31|1996-08-09|1996-09-07|DELIVER IN PERSON|REG AIR|ffily. unusual pinto beans wake c 2342|26756|9259|5|22|37020.50|0.08|0.01|N|O|1996-08-10|1996-08-02|1996-08-31|DELIVER IN PERSON|AIR|s. ironic 2343|109899|9900|1|27|51540.03|0.00|0.00|N|O|1995-11-10|1995-11-17|1995-12-10|TAKE BACK RETURN|SHIP|old theodolites. 2343|65555|3074|2|35|53219.25|0.03|0.06|N|O|1995-10-24|1995-11-09|1995-10-26|COLLECT COD|TRUCK|ges haggle furiously carefully regular req 2343|178754|6306|3|21|38487.75|0.00|0.03|N|O|1995-09-07|1995-10-26|1995-10-07|TAKE BACK RETURN|RAIL|osits. unusual theodolites boost furio 2368|151986|1987|1|16|32607.68|0.04|0.03|R|F|1993-10-31|1993-10-22|1993-11-06|NONE|REG AIR|telets wake carefully iro 2368|13052|3053|2|32|30881.60|0.03|0.00|R|F|1993-09-23|1993-10-07|1993-09-27|COLLECT COD|TRUCK|gular courts use blithely around the 2368|148805|1320|3|39|72298.20|0.08|0.03|R|F|1993-09-03|1993-09-20|1993-09-28|COLLECT COD|RAIL|ng the doggedly ironic requests are blithe 2368|155900|3446|4|17|33250.30|0.10|0.08|A|F|1993-10-03|1993-09-27|1993-10-05|NONE|FOB|fily. slyly final ideas alongside o 2369|23834|1341|1|30|52734.90|0.05|0.04|N|O|1997-04-23|1997-02-12|1997-05-21|COLLECT COD|REG AIR|pecial deposits sleep. blithely unusual w 2369|168416|8417|2|47|69767.27|0.10|0.02|N|O|1997-01-02|1997-02-18|1997-01-13|COLLECT COD|RAIL| to the regular dep 2370|45041|7546|1|3|2958.12|0.03|0.07|R|F|1994-03-24|1994-03-26|1994-04-15|COLLECT COD|SHIP|ly regular Tiresia 2370|1051|3552|2|24|22849.20|0.00|0.05|A|F|1994-05-15|1994-04-09|1994-06-12|NONE|REG AIR|final depen 2370|60844|845|3|32|57754.88|0.05|0.02|A|F|1994-04-24|1994-03-03|1994-05-15|DELIVER IN PERSON|MAIL|ies since the final deposits 2370|5044|2545|4|21|19929.84|0.04|0.01|R|F|1994-02-01|1994-02-19|1994-02-09|TAKE BACK RETURN|MAIL|ecial dependencies must have to 2371|158603|3634|1|37|61479.20|0.05|0.05|N|O|1998-02-11|1998-03-24|1998-02-27|DELIVER IN PERSON|TRUCK|s boost fluffil 2371|34147|6651|2|21|22703.94|0.00|0.05|N|O|1998-04-14|1998-02-14|1998-04-18|COLLECT COD|AIR|gle furiously regu 2371|100521|3032|3|11|16736.72|0.05|0.02|N|O|1998-02-25|1998-04-06|1998-03-23|TAKE BACK RETURN|TRUCK|requests. regular pinto beans wake. car 2371|42076|7085|4|33|33596.31|0.05|0.08|N|O|1998-03-30|1998-02-06|1998-04-05|DELIVER IN PERSON|AIR|deas are. express r 2371|164508|9541|5|22|34595.00|0.02|0.05|N|O|1998-03-26|1998-03-19|1998-04-16|DELIVER IN PERSON|REG AIR|y daring accounts. regular ins 2371|85027|5028|6|39|39468.78|0.05|0.03|N|O|1998-04-01|1998-03-13|1998-04-27|NONE|REG AIR|tructions. regular, stealthy packages wak 2371|35714|8218|7|32|52790.72|0.07|0.07|N|O|1998-02-15|1998-04-03|1998-02-23|NONE|REG AIR|the ruthless accounts. 2372|42661|2662|1|42|67353.72|0.08|0.02|N|O|1998-01-04|1998-01-02|1998-02-02|COLLECT COD|REG AIR|lar packages. regular 2372|2319|9820|2|17|20762.27|0.07|0.01|N|O|1997-12-17|1998-01-17|1997-12-25|NONE|RAIL|xcuses. slyly ironic theod 2372|163333|8366|3|12|16755.96|0.04|0.04|N|O|1998-03-21|1997-12-21|1998-04-12|DELIVER IN PERSON|SHIP|lyly according to 2372|121677|6702|4|4|6794.68|0.00|0.07|N|O|1997-12-14|1997-12-28|1997-12-16|TAKE BACK RETURN|REG AIR|e carefully blithely even epitaphs. r 2372|19420|4423|5|5|6697.10|0.02|0.04|N|O|1998-02-08|1998-01-18|1998-03-02|TAKE BACK RETURN|RAIL|ets against the 2372|188437|5992|6|11|16779.73|0.02|0.06|N|O|1998-02-14|1998-01-18|1998-03-10|TAKE BACK RETURN|FOB| silent, pending de 2372|56012|6013|7|19|18392.19|0.01|0.06|N|O|1997-12-26|1998-02-19|1998-01-02|COLLECT COD|SHIP| beans haggle sometimes 2373|190025|7583|1|17|18955.34|0.02|0.01|R|F|1994-03-29|1994-05-19|1994-04-20|COLLECT COD|AIR|auternes. blithely even pinto bea 2373|135214|2754|2|3|3747.63|0.08|0.08|R|F|1994-05-15|1994-06-10|1994-06-04|COLLECT COD|TRUCK|dependencies wake ironical 2373|140804|3319|3|29|53499.20|0.05|0.02|A|F|1994-06-01|1994-05-14|1994-06-17|NONE|TRUCK|yly silent ideas affix furiousl 2373|90692|8220|4|5|8413.45|0.10|0.01|R|F|1994-06-02|1994-05-03|1994-06-21|NONE|REG AIR|uffily blithely ironic requests 2374|117116|9628|1|41|46457.51|0.07|0.00|A|F|1994-01-27|1993-12-11|1994-02-12|TAKE BACK RETURN|RAIL|heodolites. requests 2374|159783|7329|2|24|44226.72|0.07|0.08|A|F|1994-02-02|1994-01-12|1994-02-04|DELIVER IN PERSON|TRUCK|. requests are above t 2374|60682|5695|3|2|3285.36|0.06|0.02|R|F|1993-12-30|1994-01-24|1994-01-02|COLLECT COD|FOB|, unusual ideas. deposits cajole quietl 2374|73993|3994|4|28|55075.72|0.04|0.08|R|F|1994-02-19|1993-12-16|1994-03-15|COLLECT COD|MAIL|ets cajole fu 2374|309|310|5|25|30232.50|0.08|0.00|A|F|1993-11-26|1993-12-15|1993-12-10|COLLECT COD|RAIL|refully pending d 2375|167272|7273|1|3|4017.81|0.02|0.08|N|O|1997-02-14|1996-12-25|1997-02-22|COLLECT COD|RAIL|slyly across the furiously e 2375|131282|3796|2|9|11819.52|0.09|0.02|N|O|1997-02-17|1996-12-27|1997-02-27|DELIVER IN PERSON|MAIL|ly against the packages. bold pinto bean 2375|46470|8975|3|26|36828.22|0.02|0.06|N|O|1997-03-18|1997-02-02|1997-03-29|TAKE BACK RETURN|TRUCK|rate across the 2375|4801|7302|4|5|8529.00|0.01|0.00|N|O|1997-01-31|1997-01-25|1997-02-22|COLLECT COD|REG AIR|final packages cajole according to the furi 2375|87210|4735|5|42|50282.82|0.01|0.08|N|O|1997-01-24|1997-02-15|1997-02-07|DELIVER IN PERSON|FOB|apades. idea 2375|125524|5525|6|20|30990.40|0.09|0.08|N|O|1996-12-01|1996-12-26|1996-12-19|TAKE BACK RETURN|SHIP|ckages! blithely enticing deposi 2400|102546|5057|1|48|74329.92|0.01|0.02|N|O|1998-10-07|1998-08-30|1998-11-03|DELIVER IN PERSON|MAIL|fore the car 2400|89966|2475|2|1|1955.96|0.04|0.07|N|O|1998-08-18|1998-09-12|1998-09-11|NONE|MAIL|silent deposits serve furious 2400|52503|19|3|23|33476.50|0.02|0.08|N|O|1998-08-05|1998-08-28|1998-08-30|NONE|SHIP|tions. fluffily ironic platelets cajole c 2400|16820|4324|4|23|39946.86|0.09|0.04|N|O|1998-10-04|1998-10-04|1998-10-31|NONE|RAIL|ages lose carefully around the regula 2401|181615|4134|1|39|66167.79|0.00|0.03|N|O|1997-09-29|1997-10-21|1997-10-17|DELIVER IN PERSON|FOB|ould affix 2401|2221|7222|2|49|55037.78|0.05|0.07|N|O|1997-09-02|1997-09-11|1997-09-13|TAKE BACK RETURN|AIR|lites cajole carefully 2402|85533|8042|1|43|65296.79|0.03|0.08|N|O|1996-09-17|1996-11-20|1996-09-22|DELIVER IN PERSON|RAIL|slyly slyly blithe sheaves 2402|151328|8874|2|24|33103.68|0.02|0.05|N|O|1996-11-21|1996-10-19|1996-11-29|DELIVER IN PERSON|SHIP|as; blithely ironic requ 2403|82868|393|1|34|62929.24|0.04|0.07|N|O|1998-05-30|1998-06-19|1998-06-05|NONE|REG AIR| slyly bold re 2403|151901|9447|2|19|37105.10|0.08|0.07|N|O|1998-04-20|1998-07-02|1998-05-13|DELIVER IN PERSON|FOB|sits. ironic in 2403|192434|2435|3|27|41213.61|0.05|0.03|N|O|1998-07-27|1998-07-08|1998-08-03|NONE|SHIP|deposits sleep slyly special theodolit 2403|30422|423|4|30|40572.60|0.05|0.06|N|O|1998-08-08|1998-06-17|1998-08-20|NONE|TRUCK|ackages sleep furiously pendin 2404|146703|1732|1|36|62989.20|0.07|0.00|N|O|1997-03-27|1997-05-16|1997-04-06|COLLECT COD|REG AIR|s nag furi 2404|35641|8145|2|1|1576.64|0.02|0.04|N|O|1997-05-22|1997-06-06|1997-05-28|DELIVER IN PERSON|MAIL|from the final orbits? even pinto beans hag 2404|17378|2381|3|41|53110.17|0.02|0.06|N|O|1997-06-12|1997-05-03|1997-07-12|NONE|AIR| dolphins are 2404|56668|6669|4|19|30868.54|0.09|0.03|N|O|1997-05-07|1997-05-24|1997-05-24|TAKE BACK RETURN|SHIP|cuses. quickly even in 2404|3058|8059|5|18|17298.90|0.00|0.04|N|O|1997-06-25|1997-05-06|1997-07-02|NONE|RAIL|packages. even requests according to 2405|88560|3577|1|18|27874.08|0.09|0.07|N|O|1997-01-23|1997-03-10|1997-02-03|COLLECT COD|REG AIR|carefully ironic accounts. slyly 2405|26169|3676|2|30|32854.80|0.10|0.08|N|O|1997-03-24|1997-03-10|1997-04-14|TAKE BACK RETURN|AIR|y final deposits are slyly caref 2405|16135|6136|3|49|51505.37|0.00|0.06|N|O|1996-12-24|1997-03-23|1997-01-01|TAKE BACK RETURN|FOB|cial requests. ironic, regu 2405|176791|9309|4|23|42959.17|0.08|0.05|N|O|1996-12-28|1997-01-29|1997-01-07|NONE|AIR|t wake blithely blithely regular idea 2406|169075|6624|1|18|20593.26|0.07|0.05|N|O|1997-02-17|1996-12-25|1997-02-19|COLLECT COD|MAIL|azzle furiously careful 2406|40475|2980|2|40|56618.80|0.02|0.07|N|O|1997-01-09|1996-12-02|1997-01-16|NONE|SHIP|gular accounts caj 2406|49072|9073|3|16|16337.12|0.07|0.03|N|O|1996-10-31|1996-11-28|1996-11-08|TAKE BACK RETURN|SHIP| special accou 2406|145035|64|4|34|36721.02|0.07|0.06|N|O|1996-12-01|1996-12-07|1996-12-16|NONE|AIR|hinly even accounts are slyly q 2406|186950|9469|5|25|50923.75|0.08|0.02|N|O|1996-12-03|1996-12-14|1996-12-26|COLLECT COD|MAIL|al, regular in 2406|58277|3288|6|22|27175.94|0.05|0.02|N|O|1996-11-22|1997-01-17|1996-12-15|NONE|TRUCK|hely even foxes unwind furiously aga 2406|59491|7007|7|30|43514.70|0.07|0.07|N|O|1997-01-17|1997-01-12|1997-01-22|TAKE BACK RETURN|TRUCK| final pinto beans han 2407|63032|5539|1|14|13930.42|0.04|0.02|N|O|1998-10-10|1998-08-25|1998-10-27|NONE|FOB|l dependencies s 2407|165166|5167|2|9|11080.44|0.07|0.05|N|O|1998-08-06|1998-08-11|1998-08-20|TAKE BACK RETURN|TRUCK|ts. special deposits are closely. 2407|130261|262|3|39|50359.14|0.02|0.02|N|O|1998-08-20|1998-09-12|1998-08-22|DELIVER IN PERSON|MAIL|iously final deposits solv 2407|90439|5458|4|10|14294.30|0.01|0.07|N|O|1998-08-14|1998-09-10|1998-08-29|COLLECT COD|FOB| pending instructions. theodolites x- 2407|197553|2592|5|14|23107.70|0.04|0.05|N|O|1998-09-24|1998-08-18|1998-10-06|DELIVER IN PERSON|FOB|tructions wake stealt 2407|70290|7812|6|18|22685.22|0.04|0.01|N|O|1998-10-03|1998-08-30|1998-10-19|TAKE BACK RETURN|MAIL| wake carefully. fluffily 2407|160877|5910|7|7|13565.09|0.07|0.03|N|O|1998-09-11|1998-08-15|1998-09-30|TAKE BACK RETURN|MAIL|totes are carefully accordin 2432|49371|4380|1|30|39611.10|0.03|0.02|N|O|1996-09-05|1996-10-10|1996-10-05|TAKE BACK RETURN|TRUCK| requests wake alongside of 2432|161797|1798|2|8|14870.32|0.07|0.01|N|O|1996-10-16|1996-10-01|1996-11-13|COLLECT COD|RAIL|s about the bold, close deposit 2432|108837|1348|3|13|23995.79|0.07|0.06|N|O|1996-09-03|1996-10-10|1996-10-03|NONE|RAIL|arefully about the caref 2432|12405|2406|4|14|18443.60|0.00|0.06|N|O|1996-08-18|1996-09-04|1996-08-27|TAKE BACK RETURN|RAIL|riously regular packages. p 2433|86794|6795|1|39|69450.81|0.01|0.04|R|F|1994-11-20|1994-09-23|1994-12-10|DELIVER IN PERSON|SHIP|ly final asy 2433|133323|8350|2|20|27126.40|0.05|0.06|A|F|1994-12-09|1994-10-20|1994-12-15|COLLECT COD|REG AIR|lithely blithely final ide 2433|156347|1378|3|38|53326.92|0.08|0.03|A|F|1994-10-15|1994-10-23|1994-11-06|DELIVER IN PERSON|SHIP|. slyly regular requests sle 2433|120254|2767|4|43|54792.75|0.01|0.05|A|F|1994-10-16|1994-10-23|1994-11-08|DELIVER IN PERSON|RAIL|ular requests. slyly even pa 2433|107164|9675|5|3|3513.48|0.06|0.02|A|F|1994-11-08|1994-09-24|1994-11-17|COLLECT COD|AIR|usly pending depos 2434|94433|4434|1|1|1427.43|0.01|0.06|N|O|1997-08-02|1997-05-28|1997-08-19|TAKE BACK RETURN|MAIL| furiously express packages. ironic, pend 2434|126777|4314|2|39|70347.03|0.09|0.05|N|O|1997-06-10|1997-06-08|1997-07-03|COLLECT COD|RAIL|r deposits sleep furiou 2434|129445|6982|3|28|41284.32|0.02|0.05|N|O|1997-06-28|1997-06-26|1997-07-15|COLLECT COD|RAIL|ven theodolites around the slyly 2434|167173|7174|4|49|60768.33|0.00|0.05|N|O|1997-08-08|1997-07-23|1997-08-27|DELIVER IN PERSON|FOB| after the requests haggle bold, fina 2435|38868|8869|1|8|14454.88|0.08|0.03|A|F|1993-06-08|1993-04-04|1993-06-29|COLLECT COD|SHIP|e fluffily quickly final accounts. care 2435|48350|3359|2|43|55829.05|0.03|0.08|A|F|1993-03-27|1993-05-20|1993-04-18|DELIVER IN PERSON|TRUCK|alongside of the s 2435|11457|6460|3|24|32842.80|0.07|0.08|R|F|1993-03-14|1993-05-20|1993-03-26|DELIVER IN PERSON|SHIP|s. carefully regular d 2435|155868|8384|4|22|42324.92|0.02|0.05|R|F|1993-05-23|1993-04-14|1993-06-04|NONE|SHIP|e final, final deposits. carefully regular 2435|71676|4184|5|3|4943.01|0.07|0.07|R|F|1993-06-01|1993-03-25|1993-06-27|DELIVER IN PERSON|FOB| final accounts ar 2435|45524|533|6|17|24981.84|0.02|0.02|A|F|1993-06-05|1993-05-05|1993-06-14|NONE|TRUCK|cajole aft 2435|120686|5711|7|8|13653.44|0.07|0.02|R|F|1993-05-03|1993-04-02|1993-05-17|COLLECT COD|SHIP|ng the fluffily special foxes nag 2436|154062|4063|1|48|53570.88|0.04|0.02|N|O|1995-10-22|1995-10-22|1995-11-16|DELIVER IN PERSON|FOB|he furiously 2436|116509|4043|2|18|27459.00|0.05|0.03|N|O|1995-10-14|1995-11-21|1995-11-12|TAKE BACK RETURN|TRUCK|y ironic accounts. furiously even packa 2436|163647|6164|3|6|10263.84|0.06|0.08|N|O|1995-10-25|1995-11-30|1995-11-24|DELIVER IN PERSON|RAIL|odolites. ep 2437|93699|6209|1|46|77863.74|0.07|0.04|A|F|1993-08-12|1993-06-16|1993-08-29|NONE|RAIL|e of the bold, dogged requests 2437|189398|1917|2|26|38672.14|0.00|0.04|A|F|1993-06-25|1993-05-22|1993-07-07|DELIVER IN PERSON|REG AIR|lyly regular accounts. 2437|1745|6746|3|23|37875.02|0.01|0.00|A|F|1993-08-15|1993-06-28|1993-08-23|TAKE BACK RETURN|SHIP|s deposits. pendi 2437|115739|8251|4|12|21056.76|0.03|0.08|A|F|1993-04-27|1993-07-01|1993-05-18|TAKE BACK RETURN|FOB|thely regular deposits. ironic fray 2437|16697|4201|5|29|46797.01|0.02|0.06|A|F|1993-05-12|1993-06-10|1993-05-25|NONE|FOB|ress dolphins. furiously fin 2437|18571|1073|6|10|14895.70|0.10|0.06|A|F|1993-05-20|1993-06-23|1993-05-22|TAKE BACK RETURN|MAIL|unts. even, ironic pl 2438|164492|9525|1|45|70042.05|0.01|0.00|A|F|1993-10-27|1993-09-24|1993-11-02|COLLECT COD|REG AIR|en theodolites w 2438|12785|2786|2|31|52631.18|0.08|0.01|R|F|1993-10-16|1993-08-31|1993-11-10|COLLECT COD|REG AIR|t. slyly ironic sh 2438|67446|9953|3|10|14134.40|0.10|0.00|R|F|1993-08-18|1993-08-28|1993-09-08|NONE|SHIP|engage car 2438|160408|5441|4|27|39646.80|0.01|0.02|R|F|1993-07-27|1993-10-01|1993-08-06|TAKE BACK RETURN|FOB|inal accounts. slyly final reques 2438|165750|783|5|28|50841.00|0.07|0.06|R|F|1993-11-05|1993-08-22|1993-11-22|TAKE BACK RETURN|TRUCK|ctions. bli 2438|148426|941|6|23|33911.66|0.09|0.02|R|F|1993-10-06|1993-08-17|1993-10-16|DELIVER IN PERSON|MAIL|ely; blithely special pinto beans breach 2438|182589|7626|7|46|76892.68|0.02|0.05|R|F|1993-10-27|1993-08-30|1993-11-14|COLLECT COD|SHIP| ironic requests cajole f 2439|163036|8069|1|2|2198.06|0.09|0.03|N|O|1997-04-14|1997-06-11|1997-05-09|COLLECT COD|MAIL|courts boos 2439|143488|3489|2|5|7657.40|0.07|0.01|N|O|1997-04-23|1997-04-26|1997-04-28|DELIVER IN PERSON|FOB|ites. furiously 2439|194676|7196|3|33|58432.11|0.08|0.05|N|O|1997-06-01|1997-05-15|1997-06-07|TAKE BACK RETURN|FOB|asymptotes wake packages-- furiously 2464|48367|5880|1|10|13153.60|0.05|0.03|N|O|1998-02-04|1997-12-29|1998-02-16|TAKE BACK RETURN|RAIL|slyly final pinto bean 2464|100451|5472|2|20|29029.00|0.01|0.07|N|O|1997-12-26|1998-01-02|1998-01-24|DELIVER IN PERSON|FOB|sts. slyly close ideas shall h 2465|67190|2203|1|27|31244.13|0.05|0.02|N|O|1995-09-05|1995-09-07|1995-09-17|DELIVER IN PERSON|FOB|posits boost carefully unusual instructio 2465|50636|8152|2|34|53945.42|0.02|0.05|N|O|1995-10-02|1995-08-04|1995-10-09|COLLECT COD|RAIL|posits wake. regular package 2465|31032|6039|3|8|7704.24|0.10|0.00|N|O|1995-10-16|1995-08-26|1995-11-07|TAKE BACK RETURN|FOB|s across the express deposits wak 2465|147716|5259|4|45|79366.95|0.03|0.01|N|O|1995-09-27|1995-08-25|1995-10-06|NONE|TRUCK|y silent foxes. final pinto beans above 2465|46762|9267|5|50|85438.00|0.01|0.04|N|O|1995-09-01|1995-09-06|1995-09-18|TAKE BACK RETURN|TRUCK|the pending th 2465|123555|3556|6|20|31571.00|0.03|0.03|N|O|1995-08-16|1995-08-13|1995-09-02|COLLECT COD|FOB|uriously? furiously ironic excu 2466|185322|2877|1|16|22517.12|0.00|0.02|R|F|1994-04-20|1994-04-20|1994-05-09|COLLECT COD|FOB|to beans sl 2466|104696|7207|2|10|17006.90|0.00|0.00|A|F|1994-05-08|1994-04-06|1994-06-05|DELIVER IN PERSON|AIR|sly regular deposits. regular, regula 2466|13198|8201|3|29|32224.51|0.10|0.07|A|F|1994-06-11|1994-04-27|1994-07-10|DELIVER IN PERSON|FOB|ckages. bold requests nag carefully. 2466|10394|5397|4|29|37827.31|0.04|0.04|A|F|1994-04-01|1994-04-20|1994-04-23|DELIVER IN PERSON|MAIL|es boost fluffily ab 2466|78716|8717|5|30|50841.30|0.02|0.01|A|F|1994-04-11|1994-05-02|1994-05-02|DELIVER IN PERSON|REG AIR|. fluffily even pinto beans are idly. f 2466|172628|7663|6|19|32311.78|0.10|0.07|R|F|1994-06-12|1994-04-18|1994-07-12|NONE|MAIL|ccounts cajole a 2466|154218|1764|7|35|44527.35|0.10|0.00|A|F|1994-06-01|1994-05-27|1994-06-21|COLLECT COD|AIR| packages detect carefully: ironically sl 2467|132883|423|1|7|13411.16|0.00|0.00|N|O|1995-07-28|1995-10-04|1995-08-27|NONE|REG AIR|gular packages cajole 2468|93522|8541|1|46|69713.92|0.00|0.04|N|O|1997-07-16|1997-08-09|1997-08-07|COLLECT COD|SHIP|unusual theodolites su 2468|20527|5532|2|43|62243.36|0.00|0.04|N|O|1997-08-17|1997-08-21|1997-08-30|DELIVER IN PERSON|FOB|uriously eve 2468|194115|4116|3|44|53200.84|0.00|0.03|N|O|1997-10-01|1997-08-02|1997-10-09|TAKE BACK RETURN|RAIL|egular, silent sheave 2468|81607|1608|4|5|7943.00|0.08|0.00|N|O|1997-06-28|1997-08-02|1997-07-22|NONE|MAIL| sleep fluffily acc 2468|158353|869|5|18|25404.30|0.07|0.00|N|O|1997-07-25|1997-08-26|1997-08-14|DELIVER IN PERSON|REG AIR|cies. fluffily r 2469|165688|3237|1|11|19290.48|0.00|0.04|N|O|1997-02-09|1997-01-26|1997-02-16|NONE|TRUCK|ies wake carefully b 2469|113359|8382|2|16|21957.60|0.07|0.06|N|O|1997-02-19|1997-02-04|1997-03-18|NONE|MAIL|ing asymptotes 2469|10263|2765|3|48|56316.48|0.05|0.06|N|O|1997-01-11|1997-01-03|1997-01-15|TAKE BACK RETURN|AIR|riously even theodolites u 2469|87876|5401|4|35|65235.45|0.06|0.06|N|O|1997-02-04|1997-02-02|1997-02-17|DELIVER IN PERSON|RAIL|ld packages haggle regular frets. fluffily 2469|120671|8208|5|30|50750.10|0.09|0.01|N|O|1996-12-21|1997-01-29|1997-01-02|COLLECT COD|SHIP| accounts. regular theodolites affix fu 2469|103013|3014|6|49|49784.49|0.02|0.02|N|O|1997-03-03|1996-12-26|1997-03-13|NONE|AIR| requests are car 2469|126264|3801|7|8|10322.08|0.02|0.00|N|O|1997-03-15|1997-01-20|1997-04-13|NONE|TRUCK|s. regular 2470|109485|4506|1|12|17933.76|0.06|0.06|N|O|1997-07-12|1997-05-24|1997-07-17|TAKE BACK RETURN|FOB|l accounts. deposits nag daringly. express, 2470|99927|7455|2|50|96346.00|0.03|0.03|N|O|1997-06-02|1997-06-01|1997-06-09|COLLECT COD|AIR| packages 2470|63936|6443|3|10|18999.30|0.05|0.08|N|O|1997-06-20|1997-06-19|1997-06-24|TAKE BACK RETURN|FOB| ironic requests a 2470|161046|1047|4|30|33211.20|0.04|0.08|N|O|1997-08-04|1997-07-13|1997-08-14|DELIVER IN PERSON|AIR|s across the furiously fina 2471|83845|3846|1|37|67667.08|0.05|0.01|N|O|1998-05-28|1998-04-17|1998-06-08|COLLECT COD|TRUCK|ounts mold blithely carefully express depo 2496|140193|2708|1|38|46861.22|0.02|0.07|R|F|1994-03-26|1994-04-06|1994-04-23|COLLECT COD|RAIL| bold accounts. furi 2496|22609|2610|2|39|59732.40|0.03|0.00|R|F|1994-03-23|1994-02-18|1994-04-10|TAKE BACK RETURN|FOB|arefully special dependencies abo 2496|188554|3591|3|36|59131.80|0.09|0.04|R|F|1994-03-27|1994-03-15|1994-04-17|TAKE BACK RETURN|SHIP|ully ironic f 2496|23236|5739|4|30|34776.90|0.04|0.01|A|F|1994-01-27|1994-03-11|1994-01-31|DELIVER IN PERSON|RAIL|ake. ironic foxes cajole quickly. fu 2497|11060|8564|1|34|33016.04|0.02|0.03|R|F|1992-09-02|1992-10-19|1992-09-12|COLLECT COD|AIR|ronic accounts. p 2497|76887|9395|2|15|27958.20|0.09|0.02|A|F|1992-12-23|1992-11-20|1993-01-18|DELIVER IN PERSON|SHIP|sly against the 2497|33578|8585|3|28|42323.96|0.02|0.08|A|F|1992-12-02|1992-11-21|1992-12-04|DELIVER IN PERSON|REG AIR|ouches. special, regular requests 2497|143832|3833|4|48|90039.84|0.06|0.05|A|F|1992-09-29|1992-11-13|1992-10-19|TAKE BACK RETURN|AIR| even, regular requests across 2497|174813|7331|5|28|52858.68|0.04|0.05|A|F|1992-11-10|1992-09-30|1992-11-18|DELIVER IN PERSON|MAIL|hely bold ideas. unusual instructions ac 2497|70598|599|6|19|29803.21|0.05|0.08|A|F|1992-11-10|1992-11-20|1992-12-05|TAKE BACK RETURN|TRUCK| instructions? carefully daring accounts 2498|142210|9753|1|48|60106.08|0.10|0.01|R|F|1993-11-25|1994-01-09|1993-12-24|DELIVER IN PERSON|RAIL|onic requests wake 2499|149310|4339|1|15|20389.65|0.04|0.06|N|O|1995-12-21|1995-12-06|1996-01-19|DELIVER IN PERSON|FOB| slyly across the slyly 2499|45085|7590|2|48|49443.84|0.09|0.03|N|O|1995-10-14|1995-12-12|1995-11-11|DELIVER IN PERSON|AIR|ronic ideas cajole quickly requests. caref 2499|132086|4600|3|31|34660.48|0.09|0.05|N|O|1995-12-09|1995-10-28|1996-01-05|COLLECT COD|AIR|to beans across the carefully ironic theodo 2499|158406|922|4|39|57111.60|0.06|0.02|N|O|1995-10-26|1995-10-27|1995-11-07|TAKE BACK RETURN|SHIP|otes sublat 2499|129132|4157|5|6|6966.78|0.02|0.01|N|O|1995-11-19|1995-12-14|1995-12-08|NONE|SHIP|cording to the 2499|118583|1095|6|12|19218.96|0.04|0.05|N|O|1995-11-18|1995-12-13|1995-11-23|COLLECT COD|REG AIR|le furiously along the r 2500|191268|1269|1|40|54370.40|0.00|0.02|A|F|1992-09-02|1992-09-30|1992-09-06|DELIVER IN PERSON|SHIP|efully unusual dolphins s 2500|36252|6253|2|34|40400.50|0.06|0.02|R|F|1992-10-03|1992-11-11|1992-10-29|DELIVER IN PERSON|TRUCK| stealthy a 2500|79210|1718|3|41|48757.61|0.02|0.00|R|F|1992-09-02|1992-11-11|1992-09-06|DELIVER IN PERSON|RAIL|s could have to integrate after the 2500|68182|689|4|17|19553.06|0.01|0.02|A|F|1992-09-30|1992-10-16|1992-10-05|DELIVER IN PERSON|REG AIR|encies-- ironic, even packages 2501|83413|938|1|4|5585.64|0.10|0.06|N|O|1997-07-17|1997-07-27|1997-07-22|COLLECT COD|RAIL|quests. furiously final 2501|105961|982|2|33|64909.68|0.01|0.04|N|O|1997-07-14|1997-08-09|1997-07-26|NONE|MAIL|leep furiously packages. even sauternes 2501|71879|4387|3|20|37017.40|0.10|0.06|N|O|1997-09-23|1997-07-01|1997-10-03|DELIVER IN PERSON|RAIL|equests. furiou 2501|57411|4927|4|26|35578.66|0.09|0.01|N|O|1997-07-15|1997-08-15|1997-07-28|DELIVER IN PERSON|SHIP|c accounts. express, iron 2502|162155|2156|1|33|40165.95|0.10|0.06|R|F|1993-08-12|1993-07-22|1993-09-04|COLLECT COD|REG AIR|have to print 2503|122548|7573|1|33|51827.82|0.06|0.01|R|F|1993-07-06|1993-08-14|1993-08-02|NONE|SHIP|nal courts integrate according to the 2503|64060|1579|2|28|28673.68|0.06|0.01|R|F|1993-08-08|1993-08-31|1993-08-10|NONE|SHIP|s wake quickly slyly 2503|45308|5309|3|50|62665.00|0.09|0.01|A|F|1993-09-22|1993-08-17|1993-09-29|DELIVER IN PERSON|TRUCK|s around the slyly 2503|90061|7589|4|27|28378.62|0.09|0.00|A|F|1993-07-12|1993-07-24|1993-07-22|DELIVER IN PERSON|TRUCK|lly even p 2503|47679|184|5|3|4880.01|0.04|0.02|A|F|1993-07-10|1993-09-17|1993-07-19|TAKE BACK RETURN|TRUCK|s cajole. slyly close courts nod f 2503|127722|2747|6|39|68239.08|0.05|0.05|R|F|1993-10-11|1993-09-09|1993-10-16|NONE|MAIL|d carefully fluffily 2503|18588|3591|7|17|25611.86|0.09|0.08|R|F|1993-09-04|1993-07-31|1993-09-23|DELIVER IN PERSON|SHIP|c accounts haggle blithel 2528|195|196|1|10|10951.90|0.02|0.03|R|F|1994-12-12|1994-12-29|1994-12-28|COLLECT COD|REG AIR|ely. fluffily even re 2528|73433|8448|2|13|18283.59|0.00|0.03|A|F|1994-11-27|1995-01-20|1994-12-03|TAKE BACK RETURN|REG AIR|ggle furiously. slyly final asympt 2528|174348|4349|3|35|49781.90|0.10|0.00|R|F|1994-12-19|1995-02-04|1995-01-15|NONE|MAIL|, even excuses. even, 2528|64125|6632|4|37|40297.44|0.00|0.01|A|F|1994-12-25|1995-02-02|1994-12-31|COLLECT COD|AIR|ng the pending excuses haggle after the bl 2529|130636|8176|1|4|6666.52|0.07|0.07|N|O|1996-10-19|1996-11-18|1996-10-24|DELIVER IN PERSON|SHIP|al dependencies haggle slyly alongsi 2530|20319|320|1|9|11153.79|0.09|0.03|R|F|1994-05-10|1994-04-30|1994-05-24|TAKE BACK RETURN|REG AIR|lyly ironic 2530|92397|9925|2|42|58354.38|0.04|0.08|R|F|1994-03-27|1994-05-20|1994-03-29|NONE|RAIL|ng platelets wake s 2530|107824|335|3|8|14654.56|0.10|0.08|A|F|1994-05-02|1994-05-08|1994-05-24|DELIVER IN PERSON|MAIL|ial asymptotes snooze slyly regular 2531|147722|5265|1|9|15927.48|0.03|0.07|N|O|1996-07-27|1996-07-03|1996-08-01|DELIVER IN PERSON|AIR|t the dogged, un 2531|157000|2031|2|3|3171.00|0.07|0.06|N|O|1996-07-20|1996-06-20|1996-08-10|NONE|MAIL|he quickly ev 2531|85917|934|3|20|38058.20|0.06|0.04|N|O|1996-07-18|1996-06-25|1996-07-29|TAKE BACK RETURN|TRUCK|into beans. furious 2531|190889|8447|4|36|71275.68|0.08|0.01|N|O|1996-06-11|1996-07-26|1996-06-27|NONE|MAIL|y ironic, bold packages. blithely e 2531|55891|8397|5|28|51712.92|0.03|0.07|N|O|1996-07-06|1996-07-31|1996-07-19|TAKE BACK RETURN|REG AIR|its. busily 2531|144342|1885|6|46|63771.64|0.10|0.08|N|O|1996-07-03|1996-06-27|1996-07-12|TAKE BACK RETURN|REG AIR|e final, bold pains. ir 2532|52631|2632|1|3|4750.89|0.06|0.07|N|O|1995-12-14|1995-11-28|1995-12-15|COLLECT COD|FOB|unusual sentiments. even pinto 2532|159799|7345|2|33|61340.07|0.06|0.05|N|O|1995-11-23|1996-01-04|1995-12-16|DELIVER IN PERSON|TRUCK|rve carefully slyly ironic accounts! fluf 2532|134223|1763|3|1|1257.22|0.00|0.06|N|O|1996-01-27|1995-11-23|1996-01-29|DELIVER IN PERSON|REG AIR|ely final ideas cajole despite the ca 2532|77534|42|4|50|75576.50|0.02|0.02|N|O|1995-11-13|1996-01-01|1995-11-26|NONE|TRUCK|yly after the fluffily regul 2532|113046|8069|5|9|9531.36|0.09|0.04|N|O|1995-11-30|1995-11-23|1995-12-12|DELIVER IN PERSON|TRUCK|cial ideas haggle slyly pending request 2532|149281|9282|6|20|26605.60|0.09|0.05|N|O|1995-12-02|1995-11-26|1995-12-08|TAKE BACK RETURN|AIR|er the slyly pending 2533|53585|8596|1|36|55388.88|0.06|0.04|N|O|1997-06-10|1997-04-28|1997-07-01|NONE|REG AIR|ss requests sleep neve 2533|197072|9592|2|5|5845.35|0.10|0.04|N|O|1997-05-26|1997-06-02|1997-06-24|NONE|FOB|ccounts. ironic, special accounts boo 2533|182962|517|3|37|75663.52|0.00|0.08|N|O|1997-05-10|1997-04-26|1997-05-28|COLLECT COD|SHIP| haggle carefully 2533|29341|1844|4|17|21595.78|0.06|0.02|N|O|1997-05-23|1997-05-10|1997-06-18|NONE|FOB|ackages. blith 2533|125554|8067|5|38|60022.90|0.09|0.00|N|O|1997-05-10|1997-06-02|1997-05-28|TAKE BACK RETURN|REG AIR|of the regular accounts. even packages caj 2533|183334|3335|6|20|28346.60|0.05|0.08|N|O|1997-07-04|1997-04-30|1997-07-05|COLLECT COD|FOB|thless excuses are b 2533|93767|8786|7|14|24650.64|0.06|0.04|N|O|1997-07-06|1997-05-08|1997-08-03|COLLECT COD|FOB|ut the pending, special depos 2534|138552|6092|1|29|46125.95|0.07|0.07|N|O|1996-08-09|1996-09-29|1996-08-11|COLLECT COD|TRUCK|ugouts haggle slyly. final 2534|26716|1721|2|49|80492.79|0.08|0.08|N|O|1996-09-01|1996-08-20|1996-09-06|NONE|SHIP|sometimes regular requests. blithely unus 2534|802|3303|3|50|85140.00|0.10|0.06|N|O|1996-09-25|1996-10-07|1996-10-09|TAKE BACK RETURN|AIR|ideas. deposits use. slyly regular pa 2534|74217|1739|4|43|51222.03|0.09|0.02|N|O|1996-10-25|1996-09-30|1996-11-05|TAKE BACK RETURN|REG AIR|ngly final depos 2534|164808|9841|5|14|26219.20|0.05|0.02|N|O|1996-08-12|1996-09-26|1996-08-28|COLLECT COD|MAIL|eposits doze quickly final 2534|115962|8474|6|12|23735.52|0.02|0.02|N|O|1996-07-29|1996-10-12|1996-08-14|TAKE BACK RETURN|AIR|sual depos 2534|172536|5054|7|17|27345.01|0.02|0.07|N|O|1996-07-22|1996-09-15|1996-08-03|NONE|SHIP|riously regular 2535|198027|3066|1|5|5625.10|0.06|0.01|A|F|1993-09-07|1993-07-25|1993-09-29|DELIVER IN PERSON|REG AIR|, unusual reque 2535|38012|516|2|12|11400.12|0.08|0.05|A|F|1993-07-17|1993-08-17|1993-07-31|TAKE BACK RETURN|FOB|uses sleep among the packages. excuses 2535|53012|3013|3|5|4825.05|0.09|0.06|R|F|1993-07-28|1993-08-14|1993-08-11|DELIVER IN PERSON|SHIP| across the express requests. silent, eve 2535|159290|4321|4|19|25636.51|0.01|0.02|A|F|1993-06-01|1993-08-01|1993-06-19|DELIVER IN PERSON|FOB|ructions. final requests 2535|173753|8788|5|25|45668.75|0.07|0.04|A|F|1993-07-19|1993-08-07|1993-07-27|NONE|REG AIR|ions believe ab 2560|168823|8824|1|41|77564.62|0.07|0.01|R|F|1992-10-23|1992-11-11|1992-11-22|NONE|SHIP| after the accounts. regular foxes are be 2560|3764|8765|2|27|45029.52|0.00|0.01|R|F|1992-12-03|1992-11-16|1992-12-30|NONE|MAIL| against the carefully 2560|45914|3427|3|31|57657.21|0.01|0.05|A|F|1992-11-14|1992-10-14|1992-12-11|DELIVER IN PERSON|AIR|to beans. blithely regular Tiresias int 2560|71128|6143|4|36|39568.32|0.01|0.02|A|F|1992-10-18|1992-10-30|1992-11-05|TAKE BACK RETURN|MAIL|accounts alongside of the excuses are 2560|41835|9348|5|9|15991.47|0.04|0.02|A|F|1992-10-23|1992-10-29|1992-11-02|COLLECT COD|REG AIR| deposits affix quickly. unusual, eve 2560|107511|7512|6|13|19740.63|0.03|0.06|A|F|1992-09-07|1992-10-21|1992-09-24|COLLECT COD|FOB|slyly final accoun 2561|24579|9584|1|32|48114.24|0.02|0.01|N|O|1998-01-05|1997-12-28|1998-01-26|DELIVER IN PERSON|REG AIR|bold packages wake slyly. slyly 2561|97066|2085|2|5|5315.30|0.07|0.04|N|O|1997-12-27|1998-01-23|1998-01-13|TAKE BACK RETURN|AIR|p ironic, regular pinto beans. 2561|172518|2519|3|47|74753.97|0.04|0.02|N|O|1997-11-19|1998-01-21|1997-12-03|DELIVER IN PERSON|REG AIR|larly pending t 2561|107536|7537|4|39|60197.67|0.08|0.06|N|O|1998-01-20|1997-12-16|1998-02-05|TAKE BACK RETURN|MAIL|equests are furiously against the 2561|149929|4958|5|2|3957.84|0.04|0.08|N|O|1998-03-14|1998-01-21|1998-03-27|DELIVER IN PERSON|TRUCK|s are. silently silent foxes sleep about 2561|50268|5279|6|14|17055.64|0.02|0.03|N|O|1998-03-07|1998-02-04|1998-03-21|COLLECT COD|RAIL|ep unusual, ironic accounts 2562|52202|9718|1|28|32317.60|0.04|0.03|R|F|1992-10-04|1992-09-24|1992-10-09|COLLECT COD|MAIL|ans haggle special, special packages. 2562|147205|7206|2|1|1252.20|0.01|0.06|R|F|1992-10-16|1992-09-18|1992-10-17|NONE|TRUCK| slyly final ideas haggle car 2562|65577|5578|3|25|38564.25|0.05|0.03|A|F|1992-11-23|1992-10-08|1992-12-19|DELIVER IN PERSON|REG AIR| accounts-- silent, unusual ideas a 2562|147127|2156|4|37|43442.44|0.08|0.03|R|F|1992-10-29|1992-10-06|1992-11-09|COLLECT COD|FOB|. slyly regular ideas according to the fl 2562|159068|1584|5|29|32684.74|0.05|0.08|A|F|1992-11-01|1992-09-29|1992-11-13|TAKE BACK RETURN|MAIL|eep against the furiously r 2562|49696|2201|6|17|27976.73|0.01|0.06|A|F|1992-10-15|1992-10-08|1992-10-26|DELIVER IN PERSON|TRUCK|lar pinto beans. blithely ev 2563|64353|6860|1|10|13173.50|0.07|0.04|A|F|1994-01-26|1993-12-19|1994-01-28|DELIVER IN PERSON|AIR|tealthily abo 2563|166759|1792|2|28|51121.00|0.04|0.03|R|F|1994-03-17|1994-02-04|1994-04-13|TAKE BACK RETURN|RAIL|hely regular depe 2563|118456|5990|3|39|57503.55|0.07|0.00|R|F|1994-02-10|1993-12-31|1994-02-19|COLLECT COD|FOB|lent requests should integrate; carefully e 2563|89071|6596|4|50|53003.50|0.01|0.01|A|F|1994-01-26|1994-01-03|1994-02-09|DELIVER IN PERSON|SHIP|ly regular, regular excuses. bold plate 2563|14990|4991|5|42|80009.58|0.06|0.08|R|F|1994-02-21|1994-02-14|1994-03-04|DELIVER IN PERSON|AIR|ymptotes nag furiously slyly even inst 2563|120830|831|6|5|9254.15|0.10|0.00|R|F|1993-12-27|1993-12-19|1994-01-02|DELIVER IN PERSON|REG AIR| the quickly final theodolite 2564|111220|1221|1|4|4924.88|0.02|0.00|R|F|1994-11-12|1994-10-29|1994-12-04|NONE|MAIL|y express requests sleep furi 2565|143630|3631|1|42|70292.46|0.04|0.08|N|O|1998-04-07|1998-04-02|1998-05-04|NONE|AIR|ngly silent 2565|188845|8846|2|26|50279.84|0.05|0.08|N|O|1998-05-07|1998-04-09|1998-05-15|DELIVER IN PERSON|TRUCK| pinto beans about the slyly regula 2565|114394|1928|3|34|47885.26|0.06|0.06|N|O|1998-03-19|1998-04-12|1998-04-17|DELIVER IN PERSON|SHIP|nstructions was carefu 2565|16224|3728|4|25|28505.50|0.10|0.08|N|O|1998-06-27|1998-05-20|1998-07-13|DELIVER IN PERSON|RAIL|, express accounts. final id 2565|75465|5466|5|26|37451.96|0.08|0.03|N|O|1998-03-05|1998-04-11|1998-03-11|TAKE BACK RETURN|AIR|ites wake. ironic acco 2565|140191|5220|6|48|59097.12|0.08|0.07|N|O|1998-06-18|1998-05-06|1998-07-13|DELIVER IN PERSON|TRUCK|r instructions sleep qui 2566|147224|9739|1|19|24153.18|0.06|0.07|R|F|1992-12-21|1992-11-24|1992-12-22|DELIVER IN PERSON|MAIL|ests. silent 2566|180474|8029|2|42|65287.74|0.08|0.02|R|F|1992-12-20|1992-12-22|1992-12-29|COLLECT COD|MAIL|ously ironic accounts 2566|22648|5151|3|18|28271.52|0.09|0.02|A|F|1992-11-16|1992-12-24|1992-12-16|COLLECT COD|FOB| braids according t 2566|41449|3954|4|3|4171.32|0.05|0.02|A|F|1992-11-04|1992-12-30|1992-12-04|TAKE BACK RETURN|FOB|ckages are ironic Tiresias. furious 2566|21214|1215|5|9|10216.89|0.04|0.03|R|F|1992-12-14|1992-12-28|1992-12-16|NONE|FOB|blithely bold accounts? quickl 2566|127422|9935|6|1|1449.42|0.07|0.03|A|F|1992-10-28|1992-11-20|1992-11-22|TAKE BACK RETURN|AIR|theodolites wake pending 2567|25711|3218|1|39|63831.69|0.03|0.04|N|O|1998-05-10|1998-05-10|1998-05-21|NONE|SHIP|ns. furiously final dependencies cajo 2567|111886|1887|2|50|94894.00|0.06|0.05|N|O|1998-05-05|1998-04-18|1998-05-09|DELIVER IN PERSON|TRUCK|. carefully pending foxes are furi 2567|51679|4185|3|6|9784.02|0.03|0.06|N|O|1998-04-21|1998-04-14|1998-05-11|NONE|RAIL|s cajole regular, final acco 2567|157535|51|4|50|79626.50|0.05|0.03|N|O|1998-03-27|1998-05-25|1998-04-23|DELIVER IN PERSON|FOB|pinto beans? r 2567|80318|7843|5|46|59722.26|0.07|0.02|N|O|1998-06-02|1998-04-30|1998-06-13|COLLECT COD|AIR|efully pending epitaphs. carefully reg 2567|99455|4474|6|32|46542.40|0.01|0.07|N|O|1998-05-24|1998-04-30|1998-06-14|NONE|RAIL| the even, iro 2567|134307|9334|7|43|57675.90|0.06|0.02|N|O|1998-05-11|1998-04-15|1998-05-29|NONE|RAIL|requests. final courts cajole 2592|89634|9635|1|7|11365.41|0.10|0.04|R|F|1993-03-13|1993-04-25|1993-04-01|NONE|REG AIR| carefully special theodolites integrate 2592|65700|3219|2|2|3331.40|0.10|0.00|A|F|1993-03-24|1993-04-05|1993-04-16|DELIVER IN PERSON|RAIL|side of the b 2593|104022|1553|1|37|37962.74|0.08|0.06|R|F|1993-12-14|1993-10-08|1994-01-04|NONE|SHIP|s wake bravel 2593|89185|6710|2|28|32877.04|0.08|0.03|A|F|1993-10-30|1993-10-18|1993-11-06|DELIVER IN PERSON|SHIP|y even escapades shall 2593|127040|9553|3|6|6402.24|0.04|0.05|A|F|1993-11-28|1993-10-04|1993-12-28|TAKE BACK RETURN|REG AIR|ular packages. re 2593|160005|2522|4|44|46860.00|0.02|0.08|A|F|1993-09-05|1993-10-23|1993-09-29|NONE|RAIL|ents impress furiously; unusual theodoli 2593|3498|3499|5|3|4204.47|0.03|0.00|A|F|1993-12-16|1993-11-01|1993-12-29|COLLECT COD|SHIP|the furiously 2593|174570|4571|6|1|1644.57|0.08|0.08|A|F|1993-11-23|1993-10-25|1993-12-04|DELIVER IN PERSON|RAIL| accounts wake slyly 2593|191389|6428|7|11|16284.18|0.00|0.07|R|F|1993-11-01|1993-11-19|1993-11-28|TAKE BACK RETURN|RAIL|express packages sleep bold re 2594|71490|1491|1|7|10230.43|0.06|0.02|R|F|1993-03-26|1993-03-05|1993-04-24|DELIVER IN PERSON|FOB|arls cajole 2594|123171|5684|2|13|15524.21|0.10|0.05|R|F|1993-02-06|1993-03-01|1993-02-23|TAKE BACK RETURN|TRUCK|fully special accounts use courts 2594|125492|8005|3|24|36419.76|0.03|0.00|A|F|1993-01-31|1993-03-10|1993-02-04|COLLECT COD|REG AIR|lar accounts sleep fur 2594|143957|8986|4|46|92043.70|0.00|0.08|R|F|1993-04-17|1993-03-06|1993-04-21|TAKE BACK RETURN|SHIP|beans. instructions across t 2595|60945|946|1|42|80049.48|0.08|0.02|N|O|1996-03-24|1996-01-28|1996-04-10|DELIVER IN PERSON|MAIL|ggle furiou 2595|87061|9570|2|30|31441.80|0.05|0.01|N|O|1996-03-05|1996-02-23|1996-03-19|NONE|AIR|ctions. regula 2595|23369|8374|3|19|24554.84|0.01|0.05|N|O|1995-12-23|1996-03-02|1996-01-17|COLLECT COD|MAIL|ns are neve 2595|158785|6331|4|29|53469.62|0.07|0.05|N|O|1996-01-01|1996-02-13|1996-01-18|TAKE BACK RETURN|RAIL|ronic accounts haggle carefully fin 2595|85077|94|5|30|31862.10|0.09|0.07|N|O|1996-03-16|1996-01-31|1996-04-05|TAKE BACK RETURN|FOB|. final orbits cajole 2595|81859|9384|6|31|57066.35|0.06|0.04|N|O|1996-02-07|1996-02-10|1996-03-05|DELIVER IN PERSON|AIR|tipliers w 2596|169486|7035|1|6|9332.88|0.05|0.01|N|O|1996-12-15|1996-11-02|1996-12-29|TAKE BACK RETURN|TRUCK|ily special re 2596|138885|3912|2|43|82726.84|0.07|0.03|N|O|1996-09-03|1996-10-26|1996-09-15|NONE|FOB|ial packages haggl 2596|38443|947|3|19|26247.36|0.10|0.00|N|O|1996-09-02|1996-11-03|1996-09-06|COLLECT COD|AIR|ias mold! sp 2596|104266|4267|4|10|12702.60|0.06|0.05|N|O|1996-08-25|1996-11-05|1996-09-13|DELIVER IN PERSON|REG AIR| instructions shall have 2597|83782|8799|1|24|42378.72|0.07|0.00|A|F|1993-05-15|1993-03-06|1993-05-25|TAKE BACK RETURN|FOB|pending packages. enticingly fi 2598|6983|4484|1|12|22679.76|0.00|0.01|N|O|1996-06-17|1996-04-12|1996-06-24|COLLECT COD|TRUCK|express packages nag sly 2598|147005|4548|2|40|42080.00|0.07|0.02|N|O|1996-05-11|1996-05-19|1996-06-08|TAKE BACK RETURN|AIR|the enticing 2598|103709|8730|3|4|6850.80|0.03|0.03|N|O|1996-05-23|1996-05-13|1996-05-25|COLLECT COD|AIR| across the furiously fi 2598|22081|7086|4|19|19058.52|0.02|0.00|N|O|1996-04-09|1996-05-30|1996-04-17|TAKE BACK RETURN|RAIL|nic packages. even accounts 2598|105924|3455|5|12|23159.04|0.01|0.08|N|O|1996-04-14|1996-04-24|1996-04-21|TAKE BACK RETURN|REG AIR|eposits cajol 2599|100735|3246|1|11|19093.03|0.08|0.08|N|O|1997-02-01|1996-12-14|1997-02-27|TAKE BACK RETURN|FOB| express accoun 2599|41370|6379|2|26|34095.62|0.03|0.04|N|O|1996-11-08|1996-12-21|1996-11-24|TAKE BACK RETURN|AIR|nag carefully 2599|98819|8820|3|29|52716.49|0.09|0.03|N|O|1997-01-10|1996-12-10|1997-02-02|COLLECT COD|RAIL|ly express dolphins. special, 2624|62426|7439|1|15|20826.30|0.03|0.07|N|O|1997-02-28|1997-02-19|1997-03-21|DELIVER IN PERSON|AIR|le. quickly pending requests 2624|188685|8686|2|12|21284.16|0.07|0.00|N|O|1997-02-24|1997-02-22|1997-02-27|DELIVER IN PERSON|SHIP|er the quickly unu 2625|19346|9347|1|42|53144.28|0.02|0.04|R|F|1992-10-18|1992-11-17|1992-10-23|DELIVER IN PERSON|AIR| even accounts haggle furiously 2626|21431|8938|1|45|60859.35|0.09|0.04|N|O|1995-11-22|1995-11-01|1995-11-23|NONE|AIR|deposits wake blithely according to 2626|174354|1906|2|2|2856.70|0.05|0.07|N|O|1995-10-19|1995-11-09|1995-10-24|TAKE BACK RETURN|FOB|uffy accounts haggle furiously above 2626|153317|5833|3|40|54812.40|0.05|0.07|N|O|1995-09-28|1995-12-03|1995-10-10|NONE|REG AIR|eans. ironic deposits haggle. depo 2627|130582|8122|1|28|45152.24|0.09|0.02|R|F|1992-05-14|1992-05-09|1992-05-31|COLLECT COD|SHIP|ggedly final excuses nag packages. f 2628|105867|8378|1|44|82405.84|0.07|0.03|R|F|1994-01-11|1994-01-14|1994-01-13|DELIVER IN PERSON|SHIP|lyly final, pending ide 2628|105425|7936|2|14|20025.88|0.01|0.03|A|F|1994-01-28|1993-11-30|1994-02-20|TAKE BACK RETURN|SHIP|g the furiously unusual pi 2628|63884|1403|3|42|77610.96|0.00|0.00|A|F|1993-11-20|1994-01-04|1993-12-19|DELIVER IN PERSON|TRUCK|ld notornis alongside 2628|94014|6524|4|23|23184.23|0.08|0.04|A|F|1993-10-27|1994-01-08|1993-11-12|DELIVER IN PERSON|TRUCK|usual packages sleep about the fina 2628|89996|2505|5|50|99299.50|0.07|0.01|A|F|1994-01-13|1993-12-11|1994-01-14|NONE|AIR|posits serve carefully toward 2629|117875|7876|1|6|11357.22|0.06|0.05|N|O|1998-06-10|1998-05-29|1998-06-13|DELIVER IN PERSON|SHIP|dolites hinder bli 2629|123076|613|2|31|34071.17|0.08|0.03|N|O|1998-05-24|1998-05-26|1998-06-10|COLLECT COD|AIR|ate blithely bold, regular deposits. bold 2629|127428|7429|3|29|42207.18|0.08|0.07|N|O|1998-07-09|1998-06-17|1998-07-12|TAKE BACK RETURN|AIR|eposits serve unusual, express i 2629|69303|6822|4|33|41985.90|0.06|0.03|N|O|1998-05-29|1998-05-14|1998-05-30|NONE|TRUCK|es. slowly express accounts are along the 2630|28480|3485|1|46|64790.08|0.05|0.03|R|F|1992-11-05|1992-12-17|1992-12-05|TAKE BACK RETURN|MAIL|uests cajole. e 2630|56285|1296|2|8|9930.24|0.09|0.07|A|F|1992-11-16|1993-01-01|1992-12-07|DELIVER IN PERSON|TRUCK|indle fluffily silent, ironic pi 2630|172605|7640|3|45|75492.00|0.08|0.07|A|F|1993-01-04|1993-01-11|1993-01-09|NONE|FOB|edly express ideas. carefully final 2630|161536|6569|4|29|46328.37|0.08|0.07|A|F|1992-12-03|1993-01-04|1992-12-12|DELIVER IN PERSON|SHIP|efully unusual dependencies. even i 2631|121656|4169|1|42|70461.30|0.00|0.03|A|F|1994-01-04|1993-12-01|1994-01-16|TAKE BACK RETURN|SHIP|ect carefully at the furiously final the 2631|66551|1564|2|4|6070.20|0.07|0.06|R|F|1993-11-03|1993-12-17|1993-11-05|COLLECT COD|AIR|special theodolites. a 2631|117876|5410|3|15|28408.05|0.06|0.05|A|F|1993-09-30|1993-11-06|1993-10-13|DELIVER IN PERSON|SHIP|y. furiously even pinto be 2656|180671|3190|1|10|17516.70|0.02|0.06|R|F|1993-06-28|1993-07-04|1993-07-12|TAKE BACK RETURN|TRUCK|s nag regularly about the deposits. slyly 2656|136884|6885|2|38|72993.44|0.07|0.02|A|F|1993-06-25|1993-06-04|1993-07-24|NONE|RAIL|structions wake along the furio 2656|1209|3710|3|19|21093.80|0.03|0.02|R|F|1993-08-03|1993-07-25|1993-08-20|TAKE BACK RETURN|MAIL|ts serve deposi 2656|109246|1757|4|40|50209.60|0.05|0.04|R|F|1993-06-09|1993-07-24|1993-06-21|DELIVER IN PERSON|RAIL|refully final pearls. final ideas wake. qu 2657|114332|6844|1|22|29619.26|0.02|0.03|N|O|1995-12-08|1995-12-28|1995-12-21|TAKE BACK RETURN|MAIL|r ideas. furiously special dolphins 2657|164877|9910|2|15|29128.05|0.08|0.05|N|O|1995-12-09|1995-12-16|1995-12-18|NONE|RAIL|ole carefully above the ironic ideas. b 2657|78233|741|3|25|30280.75|0.02|0.04|N|O|1995-10-21|1995-12-12|1995-11-09|COLLECT COD|FOB|lly pinto beans. final 2657|54839|2355|4|11|19732.13|0.04|0.08|N|O|1995-11-19|1995-12-11|1995-11-24|COLLECT COD|TRUCK|ckly enticing requests. fur 2657|77611|7612|5|42|66721.62|0.06|0.03|N|O|1996-01-23|1995-11-22|1996-01-25|COLLECT COD|RAIL|ckly slyly even accounts. platelets x-ray 2657|193153|8192|6|31|38630.65|0.01|0.03|N|O|1995-11-10|1995-11-27|1995-12-06|COLLECT COD|RAIL|re blithely 2658|131174|1175|1|41|49411.97|0.05|0.04|N|O|1995-11-07|1995-11-04|1995-12-04|NONE|MAIL|eposits. furiously final theodolite 2658|28573|1076|2|22|33034.54|0.08|0.05|N|O|1995-11-12|1995-11-18|1995-11-14|DELIVER IN PERSON|TRUCK|ts cajole. pending packages affix 2658|17832|2835|3|13|22747.79|0.07|0.06|N|O|1995-10-24|1995-12-12|1995-11-14|COLLECT COD|FOB|s kindle blithely regular accounts. 2658|91939|6958|4|22|42480.46|0.04|0.04|N|O|1995-12-02|1995-11-03|1995-12-26|DELIVER IN PERSON|SHIP| dependencies. blithely pending foxes abou 2658|6721|6722|5|45|73247.40|0.03|0.01|N|O|1995-11-02|1995-11-08|1995-11-29|DELIVER IN PERSON|MAIL|e special requests. quickly ex 2658|146785|9300|6|27|49458.06|0.05|0.07|N|O|1995-09-26|1995-12-08|1995-09-30|NONE|AIR|ecial packages use abov 2659|41783|9296|1|28|48293.84|0.08|0.05|A|F|1994-03-17|1994-01-24|1994-03-19|NONE|FOB|idle tithes 2659|42253|9766|2|21|25100.25|0.00|0.00|A|F|1993-12-23|1994-02-10|1994-01-17|DELIVER IN PERSON|RAIL|y beyond the furiously even co 2659|134547|2087|3|24|37956.96|0.04|0.03|R|F|1994-03-28|1994-02-20|1994-04-05|DELIVER IN PERSON|REG AIR| haggle carefully 2659|118095|3118|4|2|2226.18|0.00|0.08|R|F|1994-02-19|1994-03-12|1994-02-21|NONE|MAIL|sts above the fluffily express fo 2659|6161|3662|5|9|9604.44|0.08|0.03|A|F|1994-02-07|1994-03-17|1994-03-04|DELIVER IN PERSON|AIR|ly final packages sleep ac 2660|47703|5216|1|17|28061.90|0.00|0.05|N|O|1995-08-18|1995-09-13|1995-09-17|NONE|SHIP|al pinto beans wake after the furious 2661|177159|7160|1|31|38320.65|0.03|0.02|N|O|1997-04-07|1997-03-10|1997-04-23|TAKE BACK RETURN|AIR|e ironicall 2661|102402|7423|2|22|30896.80|0.08|0.02|N|O|1997-03-14|1997-03-17|1997-04-08|COLLECT COD|REG AIR| foxes affix quickly ironic request 2661|66183|8690|3|11|12640.98|0.00|0.08|N|O|1997-04-14|1997-02-11|1997-05-05|TAKE BACK RETURN|FOB|equests are a 2661|136127|1154|4|41|47687.92|0.06|0.02|N|O|1997-03-06|1997-03-27|1997-03-15|DELIVER IN PERSON|AIR|iously ironically ironic requests. 2662|101609|4120|1|43|69255.80|0.09|0.07|N|O|1996-11-24|1996-11-04|1996-12-08|NONE|RAIL|. slyly specia 2662|127424|7425|2|8|11611.36|0.02|0.07|N|O|1996-09-10|1996-10-09|1996-09-21|TAKE BACK RETURN|REG AIR|ajole carefully. sp 2662|1049|3550|3|6|5700.24|0.02|0.00|N|O|1996-11-30|1996-09-20|1996-12-03|DELIVER IN PERSON|REG AIR|olites cajole quickly along the b 2662|29760|9761|4|34|57451.84|0.06|0.07|N|O|1996-10-04|1996-11-05|1996-10-19|NONE|SHIP|ding theodolites use carefully. p 2663|113221|755|1|35|43197.70|0.02|0.01|N|O|1995-12-11|1995-10-16|1996-01-07|TAKE BACK RETURN|REG AIR|tect. slyly fina 2688|17768|2771|1|45|75859.20|0.08|0.08|R|F|1992-05-21|1992-04-14|1992-05-28|NONE|FOB|sits run carefully 2688|14500|4501|2|46|65067.00|0.01|0.01|R|F|1992-05-24|1992-04-01|1992-05-26|COLLECT COD|TRUCK|elets. regular reque 2688|88810|1319|3|30|53964.30|0.05|0.04|A|F|1992-04-18|1992-03-18|1992-05-18|TAKE BACK RETURN|RAIL|ithely final 2688|24410|6913|4|3|4003.23|0.00|0.03|R|F|1992-02-04|1992-03-18|1992-02-24|DELIVER IN PERSON|RAIL|e fluffily 2688|58449|8450|5|22|30963.68|0.02|0.05|R|F|1992-02-09|1992-04-09|1992-02-11|DELIVER IN PERSON|RAIL|press, ironic excuses wake carefully id 2688|148297|8298|6|42|56502.18|0.01|0.01|R|F|1992-04-29|1992-04-04|1992-05-17|TAKE BACK RETURN|FOB|lly even account 2689|5731|732|1|45|73652.85|0.02|0.04|R|F|1992-04-29|1992-06-22|1992-04-30|COLLECT COD|SHIP|e quickly. carefully silent 2690|139065|4092|1|44|48578.64|0.05|0.06|N|O|1996-05-30|1996-05-19|1996-06-26|NONE|REG AIR|ly alongside of th 2690|50963|964|2|50|95698.00|0.03|0.03|N|O|1996-06-13|1996-05-22|1996-06-14|DELIVER IN PERSON|MAIL| doubt careful 2690|124448|4449|3|45|66259.80|0.02|0.07|N|O|1996-05-23|1996-06-02|1996-05-29|DELIVER IN PERSON|MAIL|ounts. slyly regular dependencies wa 2690|194436|4437|4|12|18365.16|0.04|0.07|N|O|1996-07-18|1996-06-03|1996-07-25|NONE|AIR|nal, regular atta 2690|85190|2715|5|30|35255.70|0.01|0.08|N|O|1996-05-20|1996-06-01|1996-06-04|TAKE BACK RETURN|SHIP|d accounts above the express req 2690|188034|5589|6|3|3366.09|0.07|0.01|N|O|1996-07-04|1996-05-28|1996-07-06|TAKE BACK RETURN|RAIL|. final reques 2690|78616|6138|7|35|55811.35|0.05|0.06|N|O|1996-07-25|1996-05-14|1996-08-03|COLLECT COD|FOB|y silent pinto be 2691|90549|3059|1|11|16934.94|0.04|0.07|R|F|1992-06-21|1992-06-08|1992-07-09|COLLECT COD|FOB|leep alongside of the accounts. slyly ironi 2691|47716|5229|2|2|3327.42|0.00|0.07|R|F|1992-05-10|1992-06-04|1992-05-11|TAKE BACK RETURN|TRUCK|s cajole at the blithely ironic warthog 2691|161253|1254|3|16|21028.00|0.09|0.03|R|F|1992-06-11|1992-07-29|1992-06-29|NONE|RAIL|bove the even foxes. unusual theodoli 2691|165770|803|4|1|1835.77|0.08|0.00|A|F|1992-08-11|1992-06-07|1992-08-16|NONE|SHIP|egular instructions b 2692|16131|8633|1|3|3141.39|0.10|0.04|N|O|1998-02-25|1998-01-29|1998-03-27|TAKE BACK RETURN|MAIL|equests. bold, even foxes haggle slyl 2692|113501|8524|2|21|31804.50|0.03|0.05|N|O|1998-03-11|1998-02-11|1998-03-19|NONE|SHIP|posits. final, express requests nag furi 2693|8670|8671|1|26|41045.42|0.04|0.00|N|O|1996-09-14|1996-10-07|1996-10-03|COLLECT COD|MAIL|cajole alo 2693|101842|1843|2|43|79285.12|0.03|0.04|N|O|1996-10-24|1996-10-24|1996-11-03|TAKE BACK RETURN|TRUCK|as are according to th 2694|152518|5034|1|30|47115.30|0.02|0.06|N|O|1996-06-20|1996-06-01|1996-07-15|NONE|TRUCK|oxes. never iro 2694|156889|1920|2|35|68105.80|0.07|0.03|N|O|1996-05-24|1996-06-01|1996-05-25|NONE|RAIL|atelets past the furiously final deposits 2694|18030|532|3|15|14220.45|0.08|0.02|N|O|1996-06-30|1996-05-01|1996-07-25|TAKE BACK RETURN|REG AIR|e blithely even platelets. special wa 2694|19451|6955|4|12|16445.40|0.00|0.05|N|O|1996-04-24|1996-04-22|1996-05-14|DELIVER IN PERSON|RAIL|foxes atop the hockey pla 2694|107185|7186|5|10|11921.80|0.08|0.08|N|O|1996-06-23|1996-05-28|1996-06-27|COLLECT COD|REG AIR|fluffily fluffy accounts. even packages hi 2695|183259|8296|1|21|28187.25|0.07|0.00|N|O|1996-10-04|1996-11-02|1996-10-21|NONE|MAIL|y regular pinto beans. evenly regular packa 2695|18835|6339|2|44|77168.52|0.09|0.07|N|O|1996-10-05|1996-10-10|1996-11-01|NONE|MAIL|ts. busy platelets boost 2695|143480|8509|3|21|31993.08|0.02|0.07|N|O|1996-09-13|1996-09-25|1996-10-13|NONE|TRUCK|s. furiously ironic platelets ar 2695|57398|9904|4|16|21686.24|0.08|0.08|N|O|1996-11-16|1996-10-05|1996-11-22|NONE|TRUCK|its. theodolites sleep slyly 2695|85282|2807|5|40|50691.20|0.02|0.03|N|O|1996-11-02|1996-10-26|1996-11-14|NONE|FOB|ructions. pending 2720|44483|4484|1|5|7137.40|0.10|0.06|A|F|1993-06-24|1993-08-08|1993-07-08|NONE|FOB|ously ironic foxes thrash 2720|16655|6656|2|42|66009.30|0.09|0.03|R|F|1993-07-25|1993-07-23|1993-08-23|COLLECT COD|REG AIR|fter the inst 2720|119065|9066|3|50|54203.00|0.10|0.02|A|F|1993-08-10|1993-07-29|1993-09-06|NONE|SHIP|l requests. deposits nag furiously 2720|108938|1449|4|49|95399.57|0.06|0.02|A|F|1993-07-09|1993-07-14|1993-07-13|NONE|REG AIR| accounts. fluffily bold pack 2720|120884|3397|5|27|51431.76|0.04|0.00|R|F|1993-06-29|1993-08-06|1993-07-28|NONE|TRUCK|eas. carefully regular 2721|182460|4979|1|49|75580.54|0.00|0.08|N|O|1996-02-14|1996-04-26|1996-03-02|DELIVER IN PERSON|AIR|ounts poach carefu 2721|2633|2634|2|2|3071.26|0.02|0.05|N|O|1996-02-13|1996-03-14|1996-02-28|TAKE BACK RETURN|TRUCK| slyly final requests against 2722|123803|1340|1|21|38362.80|0.09|0.01|A|F|1994-07-29|1994-06-26|1994-08-09|NONE|RAIL|e carefully around the furiously ironic pac 2722|145739|5740|2|15|26770.95|0.05|0.03|R|F|1994-07-02|1994-06-01|1994-07-13|COLLECT COD|AIR|refully final asympt 2722|33709|6213|3|16|26283.20|0.04|0.06|R|F|1994-05-25|1994-06-09|1994-05-26|NONE|MAIL|ts besides the fluffy, 2723|12688|5190|1|47|75231.96|0.09|0.07|N|O|1995-12-05|1995-11-19|1995-12-11|TAKE BACK RETURN|AIR|furiously r 2723|31913|6920|2|10|18449.10|0.06|0.08|N|O|1995-11-27|1995-11-29|1995-12-12|DELIVER IN PERSON|MAIL|al, special r 2723|161747|4264|3|2|3617.48|0.10|0.01|N|O|1995-11-09|1995-11-10|1995-11-14|TAKE BACK RETURN|FOB| courts boost quickly about th 2723|81212|6229|4|12|14318.52|0.01|0.05|N|O|1995-12-24|1995-11-15|1996-01-17|DELIVER IN PERSON|RAIL|bold foxes are bold packages. regular, fin 2723|128077|8078|5|40|44202.80|0.09|0.05|N|O|1995-11-17|1995-11-22|1995-11-18|TAKE BACK RETURN|MAIL|unwind fluffily carefully regular realms. 2724|91316|3826|1|47|61443.57|0.09|0.01|A|F|1994-11-23|1994-11-13|1994-12-03|COLLECT COD|TRUCK|unusual patterns nag. special p 2724|146966|6967|2|21|42272.16|0.09|0.02|A|F|1994-11-25|1994-10-15|1994-12-07|COLLECT COD|RAIL|as. carefully regular dependencies wak 2724|49839|4848|3|22|39354.26|0.04|0.06|A|F|1994-09-19|1994-11-18|1994-10-17|TAKE BACK RETURN|TRUCK|express fo 2724|34495|9502|4|1|1429.49|0.07|0.03|A|F|1994-12-26|1994-11-27|1995-01-07|NONE|MAIL|lyly carefully blithe theodolites-- pl 2724|148071|3100|5|29|32453.03|0.05|0.06|A|F|1995-01-10|1994-11-17|1995-02-04|COLLECT COD|MAIL|l requests hagg 2725|117595|107|1|23|37089.57|0.10|0.08|R|F|1994-08-25|1994-06-22|1994-08-28|TAKE BACK RETURN|REG AIR|y regular deposits. brave foxes 2725|4819|7320|2|41|70676.21|0.01|0.00|R|F|1994-07-05|1994-06-29|1994-08-02|DELIVER IN PERSON|TRUCK|ns sleep furiously c 2725|188318|5873|3|15|21094.65|0.07|0.03|R|F|1994-08-06|1994-08-09|1994-08-15|TAKE BACK RETURN|AIR|? furiously regular a 2726|655|5656|1|50|77782.50|0.00|0.06|R|F|1993-03-04|1993-01-29|1993-03-28|COLLECT COD|TRUCK| furiously bold theodolites 2727|150928|5959|1|3|5936.76|0.03|0.01|N|O|1998-06-18|1998-06-06|1998-06-23|NONE|RAIL| the carefully regular foxes u 2752|30702|5709|1|41|66940.70|0.02|0.05|A|F|1994-03-02|1994-01-31|1994-03-06|DELIVER IN PERSON|AIR|tructions hag 2752|6810|1811|2|29|49787.49|0.02|0.04|R|F|1994-01-22|1994-01-08|1994-01-28|COLLECT COD|TRUCK|gly blithely re 2752|55050|5051|3|4|4020.20|0.08|0.00|A|F|1993-12-14|1994-02-13|1994-01-05|DELIVER IN PERSON|TRUCK|telets haggle. regular, final 2752|23153|660|4|40|43046.00|0.09|0.06|A|F|1994-01-24|1994-01-18|1994-02-22|DELIVER IN PERSON|MAIL|into beans are after the sly 2752|125934|959|5|22|43118.46|0.03|0.04|A|F|1994-03-20|1994-02-08|1994-04-01|TAKE BACK RETURN|TRUCK|equests nag. regular dependencies are furio 2752|169347|6896|6|21|29743.14|0.09|0.05|R|F|1994-01-01|1994-01-24|1994-01-24|COLLECT COD|SHIP| along the quickly 2752|198918|8919|7|38|76642.58|0.08|0.00|R|F|1994-02-23|1993-12-23|1994-03-24|DELIVER IN PERSON|SHIP|es boost. slyly silent ideas 2753|12855|359|1|6|10607.10|0.10|0.04|A|F|1993-12-30|1994-01-28|1994-01-29|COLLECT COD|TRUCK|s accounts 2753|47010|4523|2|40|38280.40|0.03|0.05|A|F|1994-01-06|1994-02-13|1994-02-03|DELIVER IN PERSON|SHIP|latelets kindle slyly final depos 2753|88608|3625|3|30|47898.00|0.00|0.07|A|F|1994-01-26|1994-01-29|1994-02-02|NONE|RAIL|ans wake fluffily blithely iro 2753|30853|3357|4|7|12486.95|0.07|0.03|R|F|1994-02-11|1994-01-22|1994-03-10|DELIVER IN PERSON|AIR|xpress ideas detect b 2753|136899|6900|5|36|69692.04|0.04|0.08|R|F|1994-03-15|1994-01-03|1994-04-03|DELIVER IN PERSON|SHIP|gle slyly final c 2753|49203|9204|6|17|19587.40|0.01|0.08|A|F|1994-03-08|1994-01-17|1994-03-11|TAKE BACK RETURN|REG AIR| carefully bold deposits sublate s 2753|147138|7139|7|20|23702.60|0.01|0.06|R|F|1994-02-24|1994-02-04|1994-03-23|DELIVER IN PERSON|FOB| express pack 2754|148092|607|1|4|4560.36|0.05|0.08|A|F|1994-07-13|1994-05-15|1994-08-02|NONE|REG AIR|blithely silent requests. regular depo 2754|176662|4214|2|19|33034.54|0.01|0.07|A|F|1994-06-27|1994-05-06|1994-06-28|NONE|FOB|latelets hag 2755|91404|3914|1|19|26512.60|0.10|0.00|R|F|1992-02-11|1992-03-15|1992-02-14|TAKE BACK RETURN|MAIL|furiously special deposits 2755|23483|8488|2|11|15471.28|0.03|0.08|A|F|1992-04-12|1992-05-07|1992-04-21|COLLECT COD|RAIL|egular excuses sleep carefully. 2755|63720|6227|3|21|35358.12|0.08|0.04|R|F|1992-02-13|1992-04-20|1992-03-02|NONE|AIR|furious re 2755|130096|7636|4|5|5630.45|0.01|0.00|A|F|1992-02-27|1992-04-07|1992-03-09|TAKE BACK RETURN|AIR|e the furi 2755|115039|5040|5|48|50593.44|0.05|0.06|R|F|1992-03-22|1992-03-10|1992-04-14|DELIVER IN PERSON|MAIL|yly even epitaphs for the 2756|117254|7255|1|35|44493.75|0.03|0.02|R|F|1994-06-08|1994-06-01|1994-06-21|TAKE BACK RETURN|AIR| deposits grow bold sheaves; iro 2756|79212|4227|2|47|55986.87|0.06|0.01|R|F|1994-05-10|1994-05-25|1994-05-13|NONE|AIR|e final, f 2756|104806|7317|3|31|56134.80|0.01|0.07|A|F|1994-07-27|1994-07-06|1994-08-22|TAKE BACK RETURN|TRUCK|en instructions use quickly. 2756|71763|4271|4|30|52042.80|0.00|0.04|A|F|1994-06-05|1994-06-30|1994-06-14|DELIVER IN PERSON|TRUCK|ular packages. regular deposi 2757|147369|9884|1|26|36825.36|0.07|0.00|N|O|1995-08-19|1995-10-02|1995-09-06|DELIVER IN PERSON|MAIL|around the blithely 2757|21865|4368|2|12|21442.32|0.07|0.08|N|O|1995-08-01|1995-09-04|1995-08-08|TAKE BACK RETURN|SHIP| regular, eve 2757|72740|5248|3|17|29116.58|0.10|0.04|N|O|1995-09-06|1995-09-27|1995-09-22|DELIVER IN PERSON|AIR|er the furiously silent 2757|139197|4224|4|25|30904.75|0.08|0.01|N|O|1995-11-09|1995-09-12|1995-11-23|NONE|AIR|uickly regular 2757|69696|4709|5|14|23319.66|0.04|0.05|N|O|1995-09-01|1995-08-24|1995-09-03|TAKE BACK RETURN|SHIP|special deposits u 2758|120880|5905|1|20|38017.60|0.02|0.04|N|O|1998-07-27|1998-09-10|1998-08-21|TAKE BACK RETURN|AIR|ptotes sleep furiously 2758|22897|5400|2|17|30938.13|0.10|0.06|N|O|1998-09-25|1998-10-03|1998-10-25|NONE|MAIL| accounts! qui 2758|25560|565|3|1|1485.56|0.06|0.02|N|O|1998-10-09|1998-09-15|1998-10-16|NONE|TRUCK|ake furious 2759|58995|6511|1|10|19539.90|0.10|0.03|R|F|1993-12-14|1994-01-08|1994-01-01|COLLECT COD|FOB|s. busily ironic theodo 2759|112674|7697|2|37|62406.79|0.00|0.06|R|F|1994-03-05|1994-02-22|1994-03-18|DELIVER IN PERSON|REG AIR|lar Tiresias affix ironically carefully sp 2759|111646|6669|3|11|18234.04|0.03|0.08|A|F|1994-01-24|1994-01-16|1994-02-21|DELIVER IN PERSON|TRUCK|hely regular 2759|22777|7782|4|31|52692.87|0.02|0.05|A|F|1994-01-11|1994-01-15|1994-01-23|NONE|SHIP|ithely aft 2784|32335|7342|1|45|57029.85|0.03|0.01|N|O|1998-02-15|1998-04-07|1998-02-26|COLLECT COD|AIR|yly along the asymptotes. reque 2784|53524|3525|2|23|33982.96|0.03|0.05|N|O|1998-03-28|1998-02-07|1998-04-17|DELIVER IN PERSON|AIR|uests lose after 2784|174715|9750|3|40|71588.40|0.07|0.01|N|O|1998-04-28|1998-03-19|1998-05-03|DELIVER IN PERSON|TRUCK|deas nag furiously never unusual 2784|28004|8005|4|3|2796.00|0.04|0.03|N|O|1998-01-19|1998-04-05|1998-02-05|TAKE BACK RETURN|AIR|n packages. foxes haggle quickly sile 2785|99049|4068|1|34|35633.36|0.08|0.06|N|O|1995-08-07|1995-09-09|1995-09-05|NONE|RAIL|ly final packages haggl 2785|109744|7275|2|37|64888.38|0.08|0.04|N|O|1995-07-25|1995-09-12|1995-08-06|DELIVER IN PERSON|TRUCK|tructions. furiously 2785|64524|2043|3|33|49121.16|0.08|0.06|N|O|1995-10-16|1995-08-24|1995-11-02|DELIVER IN PERSON|MAIL|fter the furiously final p 2785|47167|2176|4|34|37881.44|0.00|0.02|N|O|1995-09-16|1995-09-09|1995-10-11|COLLECT COD|SHIP|kages wake carefully silent 2786|135566|3106|1|15|24023.40|0.03|0.04|A|F|1992-05-19|1992-05-08|1992-05-28|COLLECT COD|TRUCK|low deposits are ironic 2786|50144|7660|2|42|45953.88|0.10|0.04|R|F|1992-05-15|1992-04-22|1992-05-30|DELIVER IN PERSON|AIR|unts are against the furious 2786|155318|349|3|41|56305.71|0.04|0.05|R|F|1992-07-01|1992-06-04|1992-07-13|COLLECT COD|RAIL|ix requests. bold requests a 2786|22696|2697|4|24|38848.56|0.05|0.02|A|F|1992-04-04|1992-06-09|1992-05-02|DELIVER IN PERSON|MAIL|ans. slyly unusual platelets detect. unus 2786|49478|4487|5|43|61381.21|0.06|0.03|R|F|1992-04-22|1992-05-13|1992-04-29|NONE|RAIL|ons. theodolites after 2786|161228|3745|6|21|27073.62|0.08|0.00|A|F|1992-05-03|1992-05-01|1992-05-14|COLLECT COD|AIR|slow instructi 2787|32479|9989|1|4|5645.88|0.04|0.04|N|O|1996-01-26|1995-11-26|1996-02-20|TAKE BACK RETURN|SHIP|ts. instructions nag furiously according 2788|176196|6197|1|16|20355.04|0.06|0.06|A|F|1994-10-04|1994-11-25|1994-10-18|DELIVER IN PERSON|AIR| requests wake carefully. carefully si 2789|162410|9959|1|16|23558.56|0.03|0.02|N|O|1998-04-18|1998-05-25|1998-05-12|DELIVER IN PERSON|REG AIR|o beans use carefully 2789|22835|2836|2|41|72071.03|0.02|0.05|N|O|1998-03-20|1998-05-15|1998-03-21|COLLECT COD|MAIL|d packages-- fluffily specia 2789|175892|927|3|33|64940.37|0.06|0.02|N|O|1998-04-21|1998-05-02|1998-04-30|COLLECT COD|TRUCK|deposits. ironic 2789|15982|985|4|47|89205.06|0.02|0.04|N|O|1998-03-29|1998-05-05|1998-04-07|NONE|RAIL|usly busy packages wake against the unusual 2789|196629|4187|5|23|39689.26|0.02|0.07|N|O|1998-03-25|1998-05-10|1998-04-24|COLLECT COD|RAIL|cording to the careful de 2789|143603|3604|6|16|26345.60|0.07|0.03|N|O|1998-05-11|1998-05-08|1998-05-24|TAKE BACK RETURN|RAIL|d the carefully iron 2789|132206|7233|7|42|52004.40|0.01|0.00|N|O|1998-04-28|1998-05-17|1998-05-24|TAKE BACK RETURN|AIR|ending packages shoul 2790|184300|4301|1|27|37376.10|0.06|0.08|R|F|1994-09-04|1994-09-27|1994-09-16|TAKE BACK RETURN|MAIL|ilent packages cajole. quickly ironic requ 2790|116975|9487|2|50|99598.50|0.00|0.06|A|F|1994-12-08|1994-11-17|1994-12-19|NONE|RAIL|fter the regular ideas. f 2790|183138|8175|3|19|23201.47|0.06|0.00|R|F|1994-10-23|1994-10-03|1994-10-26|TAKE BACK RETURN|RAIL|uffily even excuses. furiously thin 2790|196404|6405|4|24|36009.60|0.07|0.01|A|F|1994-12-04|1994-10-10|1994-12-25|NONE|MAIL|ments. slyly f 2790|147955|7956|5|11|22032.45|0.08|0.03|A|F|1994-09-28|1994-11-14|1994-10-04|TAKE BACK RETURN|AIR|lar requests poach slyly foxes 2790|72637|5145|6|13|20925.19|0.08|0.00|R|F|1994-09-20|1994-10-10|1994-10-20|COLLECT COD|SHIP|n deposits according to the regul 2790|3931|1432|7|32|58717.76|0.08|0.02|A|F|1994-09-25|1994-10-26|1994-10-01|NONE|SHIP|ully pending 2791|58694|8695|1|49|80981.81|0.10|0.04|A|F|1995-01-11|1994-11-10|1995-02-08|COLLECT COD|MAIL| accounts sleep at the bold, regular pinto 2791|62804|2805|2|4|7067.20|0.10|0.08|A|F|1995-01-02|1994-12-28|1995-01-29|NONE|SHIP|slyly bold packages boost. slyly 2791|132300|4814|3|44|58621.20|0.08|0.06|R|F|1994-11-17|1994-11-12|1994-12-14|NONE|FOB|heodolites use furio 2791|155894|3440|4|24|46797.36|0.04|0.02|R|F|1995-01-30|1994-11-20|1995-02-08|DELIVER IN PERSON|TRUCK|ilent forges. quickly special pinto beans 2791|104521|2052|5|8|12204.16|0.02|0.04|R|F|1995-01-30|1994-11-24|1995-02-13|NONE|FOB|se. close ideas alongs 2791|74435|1957|6|9|12684.87|0.08|0.02|R|F|1994-11-19|1994-12-14|1994-12-10|TAKE BACK RETURN|AIR|pendencies. blithely bold patterns acr 2791|28635|6142|7|26|40654.38|0.06|0.03|R|F|1995-02-06|1994-12-07|1995-02-23|DELIVER IN PERSON|AIR|uriously special instructio 2816|58258|8259|1|33|40136.25|0.00|0.07|R|F|1994-10-19|1994-11-10|1994-11-09|NONE|REG AIR|s; slyly even theodo 2816|141342|1343|2|4|5533.36|0.05|0.04|R|F|1994-12-11|1994-12-07|1995-01-03|NONE|FOB|. blithely pending id 2816|120406|2919|3|4|5705.60|0.02|0.06|R|F|1994-12-12|1994-12-05|1994-12-30|NONE|RAIL| requests print above the final deposits 2817|59597|2103|1|25|38914.75|0.07|0.01|R|F|1994-04-21|1994-06-20|1994-05-07|DELIVER IN PERSON|FOB|doze blithely. 2817|31271|3775|2|5|6011.35|0.03|0.04|A|F|1994-05-07|1994-05-31|1994-05-12|TAKE BACK RETURN|AIR|furiously unusual theodolites use furiou 2817|171177|8729|3|35|43685.95|0.01|0.07|A|F|1994-05-20|1994-06-03|1994-05-22|COLLECT COD|FOB|gular foxes 2817|160884|885|4|4|7779.52|0.00|0.05|R|F|1994-06-04|1994-06-11|1994-06-10|NONE|TRUCK|n accounts wake across the fluf 2818|120379|7916|1|12|16792.44|0.10|0.03|A|F|1995-02-01|1995-03-10|1995-02-16|NONE|AIR|lms. quickly bold asymp 2818|198766|3805|2|22|41024.72|0.06|0.07|R|F|1995-02-28|1995-03-10|1995-03-06|TAKE BACK RETURN|RAIL|egrate toward the carefully iron 2818|44234|4235|3|11|12960.53|0.01|0.06|R|F|1995-02-18|1995-02-11|1995-03-19|TAKE BACK RETURN|TRUCK|ggle across the carefully blithe 2818|39711|7221|4|32|52822.72|0.08|0.08|R|F|1995-02-04|1995-03-05|1995-02-18|COLLECT COD|REG AIR|arefully! ac 2818|17380|4884|5|42|54489.96|0.08|0.04|A|F|1995-02-12|1995-02-19|1995-03-13|COLLECT COD|MAIL|ar accounts wake carefully a 2818|90636|8164|6|7|11386.41|0.06|0.03|R|F|1995-03-24|1995-03-09|1995-04-06|TAKE BACK RETURN|TRUCK|ly according to the r 2819|69083|9084|1|17|17885.36|0.08|0.08|A|F|1994-07-16|1994-07-15|1994-07-17|TAKE BACK RETURN|RAIL|en deposits above the f 2819|66461|3980|2|12|17129.52|0.03|0.08|R|F|1994-07-18|1994-06-24|1994-07-28|NONE|MAIL| regular, regular a 2819|4924|2425|3|28|51209.76|0.03|0.08|R|F|1994-05-09|1994-07-02|1994-05-15|NONE|RAIL|ckages sublate carefully closely regular 2819|152830|2831|4|5|9414.15|0.00|0.02|R|F|1994-05-29|1994-06-12|1994-06-28|NONE|TRUCK| fluffily unusual foxes sleep caref 2819|199179|4218|5|6|7669.02|0.03|0.01|A|F|1994-07-22|1994-08-02|1994-07-29|NONE|REG AIR|eas after the carefully express pack 2820|173150|702|1|23|28132.45|0.04|0.08|R|F|1994-07-10|1994-08-08|1994-07-21|NONE|MAIL| was furiously. deposits among the ironic 2820|125718|3255|2|33|57542.43|0.08|0.06|A|F|1994-07-07|1994-08-17|1994-08-02|DELIVER IN PERSON|AIR|carefully even pinto beans. 2820|140013|7556|3|38|40014.38|0.03|0.08|A|F|1994-09-10|1994-08-07|1994-10-07|TAKE BACK RETURN|MAIL|ests despite the carefully unusual a 2820|196270|8790|4|40|54650.80|0.06|0.06|A|F|1994-08-08|1994-07-30|1994-08-21|TAKE BACK RETURN|REG AIR|g multipliers. final c 2821|180834|8389|1|4|7659.32|0.00|0.00|A|F|1993-09-15|1993-10-02|1993-09-17|TAKE BACK RETURN|TRUCK|nding foxes. 2821|71159|6174|2|4|4520.60|0.09|0.00|A|F|1993-11-19|1993-09-20|1993-11-27|TAKE BACK RETURN|TRUCK|ual multipliers. final deposits cajol 2821|163639|8672|3|27|45971.01|0.01|0.01|A|F|1993-11-27|1993-10-11|1993-12-08|COLLECT COD|TRUCK|requests. blit 2822|150765|3281|1|39|70814.64|0.04|0.02|R|F|1993-09-11|1993-08-29|1993-09-18|NONE|MAIL|kly about the sly 2823|85712|8221|1|45|76396.95|0.03|0.04|N|O|1995-12-28|1995-11-27|1996-01-02|DELIVER IN PERSON|SHIP|furiously special idea 2823|159624|4655|2|18|30305.16|0.00|0.03|N|O|1995-11-11|1995-10-30|1995-12-08|TAKE BACK RETURN|TRUCK| final deposits. furiously regular foxes u 2823|185277|7796|3|11|14984.97|0.07|0.02|N|O|1995-12-10|1995-11-24|1995-12-21|DELIVER IN PERSON|SHIP|bold requests nag blithely s 2823|138806|3833|4|48|88550.40|0.09|0.03|N|O|1995-11-21|1995-10-30|1995-11-27|NONE|SHIP|ously busily slow excus 2823|98943|3962|5|18|34954.92|0.04|0.06|N|O|1995-11-09|1995-10-30|1995-11-19|NONE|AIR|eas. decoys cajole deposi 2823|122443|7468|6|20|29308.80|0.07|0.00|N|O|1995-11-13|1995-12-06|1995-12-07|NONE|MAIL|its sleep between the unusual, ironic pac 2823|85076|2601|7|12|12732.84|0.02|0.04|N|O|1995-12-22|1995-11-20|1996-01-13|NONE|REG AIR|the slyly ironic dolphins; fin 2848|64222|6729|1|44|52193.68|0.01|0.05|R|F|1992-04-14|1992-05-09|1992-04-19|DELIVER IN PERSON|MAIL|ions. slyly express instructions n 2848|164452|4453|2|8|12131.60|0.07|0.01|A|F|1992-03-21|1992-05-18|1992-04-07|DELIVER IN PERSON|TRUCK|. silent, final ideas sublate packages. ir 2848|137332|9846|3|8|10954.64|0.07|0.08|A|F|1992-06-20|1992-04-12|1992-07-09|NONE|SHIP|sly regular foxes. 2848|124948|4949|4|34|67079.96|0.02|0.08|A|F|1992-03-15|1992-04-24|1992-04-12|TAKE BACK RETURN|RAIL|ts along the blithely regu 2848|194197|6717|5|18|23241.42|0.07|0.03|R|F|1992-04-10|1992-06-01|1992-05-05|DELIVER IN PERSON|TRUCK|osits haggle. stealthily ironic packa 2849|153805|6321|1|16|29740.80|0.09|0.08|N|O|1996-05-20|1996-07-23|1996-06-18|NONE|TRUCK|. furiously regular requ 2849|186843|1880|2|39|75263.76|0.10|0.03|N|O|1996-05-22|1996-07-18|1996-06-05|TAKE BACK RETURN|SHIP|s sleep furiously silently regul 2849|59872|9873|3|24|43964.88|0.01|0.05|N|O|1996-06-12|1996-07-10|1996-06-27|TAKE BACK RETURN|AIR|e slyly even asymptotes. slo 2849|54230|1746|4|48|56843.04|0.05|0.02|N|O|1996-05-03|1996-06-05|1996-05-28|NONE|AIR|mong the carefully regular theodol 2849|27255|2260|5|30|35467.50|0.10|0.06|N|O|1996-08-24|1996-07-08|1996-09-03|TAKE BACK RETURN|SHIP|ly. carefully silent 2849|68651|6170|6|30|48589.50|0.06|0.07|N|O|1996-06-20|1996-07-23|1996-07-06|NONE|FOB|yly furiously even id 2850|96931|4459|1|43|82900.99|0.02|0.05|N|O|1997-01-11|1996-11-03|1997-02-01|COLLECT COD|REG AIR|unusual accounts 2850|109283|6814|2|30|38768.40|0.09|0.01|N|O|1996-12-14|1996-11-29|1997-01-03|COLLECT COD|AIR|even ideas. busy pinto beans sleep above t 2850|104731|4732|3|49|85050.77|0.09|0.04|N|O|1996-10-07|1996-12-12|1996-10-12|TAKE BACK RETURN|MAIL| slyly unusual req 2850|198372|5930|4|4|5881.48|0.04|0.04|N|O|1996-10-28|1996-12-26|1996-11-07|COLLECT COD|RAIL|al deposits cajole carefully quickly 2851|147452|9967|1|8|11995.60|0.09|0.03|N|O|1997-11-12|1997-11-22|1997-12-11|NONE|REG AIR|y special theodolites. carefully 2852|176179|1214|1|6|7531.02|0.01|0.01|R|F|1993-03-02|1993-04-11|1993-03-11|TAKE BACK RETURN|RAIL| accounts above the furiously un 2852|40065|7578|2|24|24121.44|0.05|0.07|R|F|1993-01-18|1993-03-13|1993-02-14|DELIVER IN PERSON|MAIL| the blithe 2852|163403|952|3|29|42525.60|0.09|0.05|R|F|1993-04-21|1993-03-22|1993-05-02|COLLECT COD|SHIP|lyly ironi 2852|99183|4202|4|12|14186.16|0.08|0.02|A|F|1993-02-25|1993-03-24|1993-03-07|TAKE BACK RETURN|TRUCK|le. request 2852|153182|5698|5|28|34585.04|0.05|0.03|R|F|1993-02-08|1993-03-30|1993-02-11|NONE|MAIL|e accounts. caref 2853|138855|1369|1|14|26513.90|0.07|0.05|R|F|1994-05-16|1994-07-01|1994-05-27|NONE|TRUCK|oach slyly along t 2853|133051|591|2|26|28185.30|0.06|0.01|R|F|1994-06-26|1994-06-05|1994-07-02|TAKE BACK RETURN|MAIL|dolphins wake slyly. blith 2853|172674|5192|3|40|69866.80|0.06|0.04|A|F|1994-08-06|1994-06-24|1994-08-29|NONE|RAIL|lyly. pearls cajole. final accounts ca 2853|131257|8797|4|20|25765.00|0.02|0.04|A|F|1994-08-30|1994-06-16|1994-09-06|TAKE BACK RETURN|TRUCK|e slyly silent foxes. express deposits sno 2853|35872|879|5|1|1807.87|0.08|0.05|R|F|1994-09-01|1994-06-27|1994-09-12|TAKE BACK RETURN|FOB|refully slyly quick packages. final c 2854|180193|7748|1|46|58566.74|0.00|0.04|A|F|1994-09-22|1994-08-02|1994-09-30|COLLECT COD|AIR|. furiously regular deposits across th 2854|87581|7582|2|29|45488.82|0.09|0.07|R|F|1994-07-06|1994-08-26|1994-07-09|COLLECT COD|SHIP|y slyly ironic accounts. foxes haggle slyl 2854|159170|1686|3|20|24583.40|0.08|0.01|R|F|1994-09-18|1994-08-03|1994-10-12|COLLECT COD|AIR|rs impress after the deposits. 2854|169448|9449|4|34|51592.96|0.06|0.03|A|F|1994-09-06|1994-08-07|1994-09-22|NONE|REG AIR|age carefully 2854|101316|1317|5|7|9221.17|0.03|0.06|A|F|1994-09-23|1994-08-14|1994-10-10|DELIVER IN PERSON|REG AIR| the pending 2854|17762|264|6|13|21836.88|0.04|0.03|R|F|1994-09-15|1994-08-18|1994-09-19|DELIVER IN PERSON|SHIP| excuses wak 2855|32224|7231|1|50|57811.00|0.03|0.07|A|F|1993-05-20|1993-06-28|1993-06-16|TAKE BACK RETURN|TRUCK|beans. deposits 2880|34078|9085|1|40|40482.80|0.09|0.00|A|F|1992-05-26|1992-06-01|1992-05-31|COLLECT COD|TRUCK|even requests. quick 2880|138854|6394|2|26|49214.10|0.07|0.07|R|F|1992-04-12|1992-04-15|1992-04-28|NONE|RAIL|ully among the regular warthogs 2880|114490|7002|3|42|63188.58|0.01|0.01|R|F|1992-06-17|1992-05-29|1992-07-11|NONE|REG AIR|ions. carefully final accounts are unusual, 2880|17656|158|4|46|72387.90|0.02|0.02|A|F|1992-04-21|1992-06-05|1992-05-16|COLLECT COD|RAIL|eep quickly according to t 2881|179343|1861|1|16|22757.44|0.02|0.06|A|F|1992-06-21|1992-06-27|1992-07-03|TAKE BACK RETURN|TRUCK|usly bold 2881|9181|9182|2|1|1090.18|0.09|0.03|A|F|1992-05-13|1992-07-21|1992-05-18|COLLECT COD|MAIL|final theodolites. quickly 2881|92018|7037|3|21|21210.21|0.07|0.03|A|F|1992-05-28|1992-07-03|1992-06-02|TAKE BACK RETURN|SHIP|hely express Tiresias. final dependencies 2881|139814|7354|4|7|12976.67|0.06|0.01|R|F|1992-08-03|1992-07-10|1992-08-27|NONE|REG AIR|ironic packages are carefully final ac 2882|3763|6264|1|14|23334.64|0.09|0.02|N|O|1995-09-28|1995-11-11|1995-10-18|TAKE BACK RETURN|MAIL|kly. even requests w 2882|41007|8520|2|30|28440.00|0.00|0.00|N|O|1995-10-15|1995-10-13|1995-10-25|NONE|REG AIR|among the furiously even theodolites. regu 2882|196935|9455|3|29|58925.97|0.10|0.08|N|O|1995-09-10|1995-11-01|1995-10-02|NONE|TRUCK|kages. furiously ironic 2882|77298|4820|4|27|34432.83|0.06|0.02|N|O|1995-09-04|1995-11-11|1995-09-12|DELIVER IN PERSON|MAIL|rding to the regu 2882|133878|3879|5|32|61179.84|0.07|0.03|N|O|1995-10-21|1995-11-10|1995-11-01|COLLECT COD|RAIL|sts. quickly regular e 2882|86425|6426|6|47|66336.74|0.06|0.03|N|O|1995-09-13|1995-09-21|1995-09-14|NONE|REG AIR|l, special 2883|91|2592|1|33|32705.97|0.08|0.07|R|F|1995-02-26|1995-03-04|1995-03-01|NONE|RAIL|s. final i 2883|124341|4342|2|27|36864.18|0.00|0.02|A|F|1995-03-12|1995-03-10|1995-04-04|TAKE BACK RETURN|REG AIR|s. brave pinto beans nag furiously 2883|188690|6245|3|47|83598.43|0.05|0.04|R|F|1995-01-29|1995-04-19|1995-02-05|DELIVER IN PERSON|SHIP|ep carefully ironic 2883|97483|5011|4|23|34051.04|0.00|0.02|R|F|1995-02-03|1995-03-17|1995-02-19|TAKE BACK RETURN|AIR| even requests cajole. special, regular 2883|194465|9504|5|36|56140.56|0.07|0.06|A|F|1995-05-02|1995-03-14|1995-05-30|COLLECT COD|MAIL|ests detect slyly special packages 2884|70492|493|1|41|59962.09|0.03|0.00|N|O|1998-01-02|1997-12-17|1998-01-20|DELIVER IN PERSON|TRUCK|ep. slyly even accounts a 2884|145132|2675|2|25|29428.25|0.09|0.08|N|O|1998-01-18|1997-12-06|1998-02-16|TAKE BACK RETURN|MAIL|onic theodolites with the instructi 2884|25106|5107|3|8|8248.80|0.08|0.08|N|O|1997-11-30|1997-11-28|1997-12-14|COLLECT COD|TRUCK|pending accounts about 2885|3054|8055|1|6|5742.30|0.10|0.01|A|F|1993-01-05|1992-12-12|1993-01-19|COLLECT COD|FOB|ctions solve. slyly regular requests n 2885|111966|1967|2|4|7911.84|0.07|0.00|A|F|1992-10-09|1992-12-17|1992-11-04|TAKE BACK RETURN|SHIP| pending packages wake. 2885|716|5717|3|45|72751.95|0.10|0.04|A|F|1992-12-24|1992-10-30|1993-01-04|NONE|SHIP|ess ideas. regular, silen 2885|31307|6314|4|15|18574.50|0.03|0.04|R|F|1992-10-31|1992-11-24|1992-11-21|DELIVER IN PERSON|MAIL|odolites. boldly pending packages han 2885|174189|6707|5|43|54316.74|0.06|0.00|R|F|1992-11-17|1992-10-30|1992-12-04|DELIVER IN PERSON|SHIP|cial deposits use bold 2885|189615|7170|6|5|8523.05|0.01|0.02|R|F|1993-01-06|1992-11-13|1993-02-05|TAKE BACK RETURN|TRUCK|s. slyly express th 2885|49234|6747|7|40|47329.20|0.05|0.03|A|F|1992-09-23|1992-11-15|1992-10-07|TAKE BACK RETURN|AIR| express depos 2886|59748|9749|1|1|1707.74|0.09|0.05|A|F|1995-02-01|1994-12-18|1995-02-28|COLLECT COD|REG AIR|eposits fr 2886|183832|3833|2|38|72801.54|0.02|0.04|A|F|1995-01-21|1995-01-08|1995-01-30|NONE|SHIP|old requests along the fur 2886|62934|453|3|2|3793.86|0.04|0.07|A|F|1994-11-18|1995-01-31|1994-12-05|COLLECT COD|REG AIR|ar theodolites. e 2886|129991|7528|4|46|92965.54|0.03|0.08|A|F|1995-02-02|1995-01-26|1995-02-15|TAKE BACK RETURN|SHIP|ously final packages sleep blithely regular 2887|65504|517|1|11|16164.50|0.06|0.00|N|O|1997-07-08|1997-07-17|1997-07-15|COLLECT COD|SHIP|ackages. unusual, speci 2887|111534|4046|2|17|26274.01|0.00|0.08|N|O|1997-08-31|1997-07-04|1997-09-17|DELIVER IN PERSON|SHIP|fily final packages. regula 2912|121445|6470|1|8|11731.52|0.06|0.04|A|F|1992-04-09|1992-04-19|1992-04-26|NONE|RAIL|hs cajole over the slyl 2912|114577|7089|2|18|28648.26|0.00|0.08|R|F|1992-03-13|1992-04-19|1992-03-30|TAKE BACK RETURN|RAIL|unts cajole reg 2913|122736|273|1|39|68590.47|0.06|0.04|N|O|1997-08-28|1997-09-27|1997-09-02|TAKE BACK RETURN|AIR|. final packages a 2913|21727|9234|2|22|36271.84|0.10|0.07|N|O|1997-09-18|1997-08-11|1997-10-02|COLLECT COD|MAIL|riously pending realms. blithely even pac 2913|165309|2858|3|17|23363.10|0.07|0.04|N|O|1997-10-21|1997-09-25|1997-11-20|NONE|FOB|requests doze quickly. furious 2913|142502|2503|4|5|7722.50|0.10|0.07|N|O|1997-10-07|1997-08-25|1997-10-09|TAKE BACK RETURN|RAIL|haggle. even, bold instructi 2913|14885|7387|5|13|23398.44|0.03|0.01|N|O|1997-10-02|1997-08-20|1997-10-26|COLLECT COD|MAIL|inos are carefully alongside of the bol 2913|167718|2751|6|35|62499.85|0.06|0.08|N|O|1997-08-30|1997-08-21|1997-09-03|COLLECT COD|MAIL|es. quickly even braids against 2914|65685|5686|1|22|36314.96|0.05|0.06|R|F|1993-05-11|1993-04-09|1993-05-22|DELIVER IN PERSON|FOB| carefully about the fluffily ironic gifts 2914|162197|7230|2|25|31479.75|0.03|0.04|A|F|1993-05-14|1993-04-04|1993-05-22|NONE|SHIP|cross the carefully even accounts. 2914|34588|7092|3|4|6090.32|0.00|0.05|R|F|1993-06-11|1993-04-09|1993-06-14|TAKE BACK RETURN|SHIP|s integrate. bold deposits sleep req 2914|120324|325|4|9|12098.88|0.06|0.01|R|F|1993-06-17|1993-05-26|1993-06-19|NONE|REG AIR|s. carefully final foxes ar 2915|174124|6642|1|28|33547.36|0.10|0.02|R|F|1994-04-17|1994-06-09|1994-05-10|NONE|MAIL|yly special 2915|93386|8405|2|12|16552.56|0.00|0.03|A|F|1994-07-18|1994-06-11|1994-07-27|TAKE BACK RETURN|RAIL|accounts. slyly final 2915|135401|7915|3|15|21546.00|0.07|0.00|A|F|1994-05-01|1994-06-12|1994-05-15|DELIVER IN PERSON|TRUCK|al requests haggle furiousl 2915|80116|2625|4|43|47132.73|0.06|0.05|R|F|1994-06-02|1994-05-24|1994-06-06|DELIVER IN PERSON|SHIP|into beans dazzle alongside of 2916|82501|5010|1|21|31153.50|0.06|0.04|N|O|1996-03-11|1996-02-21|1996-03-30|NONE|REG AIR|uickly express ideas over the slyly even 2917|92630|2631|1|36|58414.68|0.10|0.01|N|O|1998-04-07|1998-02-23|1998-05-01|DELIVER IN PERSON|RAIL|usly ironic d 2917|20711|712|2|20|32634.20|0.06|0.03|N|O|1997-12-31|1998-01-22|1998-01-12|NONE|MAIL|slyly even ideas wa 2917|89342|6867|3|4|5325.36|0.02|0.07|N|O|1998-01-10|1998-01-18|1998-02-08|TAKE BACK RETURN|REG AIR|s. unusual instruct 2917|166826|4375|4|5|9464.10|0.05|0.01|N|O|1997-12-16|1998-01-26|1998-01-07|NONE|RAIL|bove the furiously silent packages. pend 2917|40412|7925|5|37|50039.17|0.04|0.01|N|O|1997-12-12|1998-02-03|1997-12-23|COLLECT COD|RAIL|dependencies. express 2917|193921|1479|6|7|14104.44|0.05|0.01|N|O|1998-03-21|1998-03-03|1998-03-25|NONE|REG AIR|ly about the regular accounts. carefully pe 2918|77412|2427|1|24|33345.84|0.10|0.03|N|O|1996-12-20|1996-10-28|1996-12-26|DELIVER IN PERSON|FOB| quickly. express requests haggle careful 2919|101323|3834|1|2|2648.64|0.03|0.05|R|F|1993-12-28|1994-02-23|1994-01-18|COLLECT COD|TRUCK|re slyly. regular ideas detect furiousl 2919|120203|7740|2|49|59936.80|0.07|0.02|R|F|1993-12-16|1994-02-28|1993-12-19|COLLECT COD|FOB|hely final inst 2919|45595|3108|3|44|67785.96|0.07|0.07|A|F|1994-04-01|1994-01-12|1994-04-07|TAKE BACK RETURN|TRUCK|final ideas haggle carefully fluff 2919|101408|3919|4|44|62013.60|0.00|0.05|R|F|1994-02-04|1994-02-03|1994-03-02|TAKE BACK RETURN|AIR|es doze around the furiously 2944|119834|9835|1|44|81568.52|0.08|0.05|N|O|1997-12-25|1997-10-28|1998-01-21|COLLECT COD|AIR|ickly special theodolit 2944|41779|4284|2|44|75713.88|0.06|0.02|N|O|1997-10-28|1997-11-22|1997-11-10|NONE|SHIP|ickly. regular requests haggle. idea 2944|169657|7206|3|2|3453.30|0.06|0.07|N|O|1997-12-13|1997-12-01|1998-01-08|DELIVER IN PERSON|REG AIR|luffily expr 2944|16331|3835|4|23|28688.59|0.02|0.03|N|O|1998-01-12|1997-12-03|1998-01-17|TAKE BACK RETURN|MAIL| excuses? regular platelets e 2944|74247|9262|5|18|21982.32|0.10|0.01|N|O|1998-01-07|1997-10-26|1998-01-27|TAKE BACK RETURN|FOB| furiously slyl 2944|59267|6783|6|17|20846.42|0.00|0.03|N|O|1997-10-18|1997-11-27|1997-10-29|TAKE BACK RETURN|SHIP|slyly final dolphins sleep silent the 2944|89264|1773|7|7|8772.82|0.01|0.06|N|O|1997-10-30|1997-11-03|1997-11-03|DELIVER IN PERSON|FOB|fluffily blithely express pea 2945|58944|8945|1|37|70408.78|0.00|0.02|N|O|1996-02-10|1996-03-20|1996-02-12|COLLECT COD|SHIP|l instructions. regular, regular 2945|71252|3760|2|30|36697.50|0.05|0.01|N|O|1996-01-19|1996-02-11|1996-01-26|NONE|TRUCK|ular instructions 2945|126197|6198|3|28|34249.32|0.06|0.02|N|O|1996-03-17|1996-03-13|1996-04-15|COLLECT COD|FOB|le slyly along the eve 2945|187861|7862|4|34|66261.24|0.08|0.06|N|O|1996-02-03|1996-03-17|1996-02-29|COLLECT COD|REG AIR|at the unusual theodolite 2945|172280|9832|5|10|13522.80|0.09|0.05|N|O|1996-03-13|1996-03-10|1996-04-06|COLLECT COD|FOB|thely. final courts could hang qu 2945|96154|8664|6|45|51756.75|0.07|0.00|N|O|1996-03-01|1996-03-25|1996-03-08|TAKE BACK RETURN|MAIL|ainst the final packages 2945|51230|3736|7|47|55517.81|0.07|0.05|N|O|1996-01-05|1996-02-11|1996-01-12|DELIVER IN PERSON|MAIL|quests use 2946|9193|4194|1|25|27554.75|0.05|0.02|N|O|1996-05-06|1996-04-23|1996-05-16|DELIVER IN PERSON|SHIP|ic deposits. furiously 2946|93176|3177|2|48|56120.16|0.03|0.07|N|O|1996-06-02|1996-03-31|1996-06-16|COLLECT COD|TRUCK|oss the platelets. furi 2946|2968|5469|3|35|65483.60|0.03|0.00|N|O|1996-03-15|1996-04-02|1996-03-26|NONE|REG AIR| sublate along the fluffily iron 2947|9960|9961|1|37|69188.52|0.09|0.07|N|O|1995-08-09|1995-07-05|1995-08-20|DELIVER IN PERSON|RAIL|e accounts: expres 2947|185207|7726|2|10|12922.00|0.09|0.07|A|F|1995-06-07|1995-06-26|1995-06-08|NONE|MAIL|lly special 2948|117469|7470|1|48|71350.08|0.00|0.04|R|F|1994-08-29|1994-10-23|1994-09-23|NONE|TRUCK|unusual excuses use about the 2948|91710|1711|2|49|83383.79|0.04|0.07|R|F|1994-12-16|1994-11-08|1995-01-07|DELIVER IN PERSON|MAIL|ress requests. furiously blithe foxes 2949|20139|2642|1|4|4236.52|0.06|0.06|A|F|1994-06-07|1994-06-17|1994-07-04|TAKE BACK RETURN|REG AIR|gular pinto beans wake alongside of the reg 2949|69423|6942|2|50|69621.00|0.05|0.04|A|F|1994-08-04|1994-06-23|1994-08-17|TAKE BACK RETURN|FOB|gular courts cajole across t 2949|179465|4500|3|38|58689.48|0.02|0.06|R|F|1994-05-22|1994-05-25|1994-05-27|COLLECT COD|REG AIR|se slyly requests. carefull 2950|129486|9487|1|32|48495.36|0.01|0.05|N|O|1997-09-21|1997-08-25|1997-10-08|DELIVER IN PERSON|REG AIR|its wake carefully slyly final ideas. 2950|65786|5787|2|18|31532.04|0.10|0.01|N|O|1997-07-19|1997-08-29|1997-08-17|COLLECT COD|TRUCK|uests cajole furio 2950|52680|2681|3|14|22857.52|0.01|0.02|N|O|1997-07-29|1997-08-05|1997-07-31|TAKE BACK RETURN|MAIL|ccounts haggle carefully according 2950|186367|6368|4|45|65401.20|0.08|0.00|N|O|1997-09-05|1997-09-23|1997-09-11|NONE|FOB|ides the b 2950|60950|951|5|46|87903.70|0.02|0.05|N|O|1997-07-15|1997-09-30|1997-07-25|COLLECT COD|RAIL|to the regular accounts are slyly carefu 2950|173548|3549|6|27|43781.58|0.01|0.03|N|O|1997-10-01|1997-09-13|1997-10-08|NONE|TRUCK|are alongside of the carefully silent 2951|2079|7080|1|5|4905.35|0.03|0.03|N|O|1996-03-27|1996-04-16|1996-03-30|NONE|REG AIR|to beans wake ac 2951|135955|8469|2|24|47782.80|0.07|0.03|N|O|1996-03-24|1996-04-16|1996-04-08|NONE|SHIP| ironic multipliers. express, regular 2951|186046|3601|3|40|45281.60|0.02|0.07|N|O|1996-05-03|1996-04-20|1996-05-22|COLLECT COD|REG AIR|ial deposits wake fluffily about th 2951|72188|4696|4|21|24363.78|0.06|0.08|N|O|1996-04-12|1996-04-27|1996-04-14|DELIVER IN PERSON|REG AIR|nt instructions toward the f 2951|50131|5142|5|15|16216.95|0.07|0.00|N|O|1996-03-25|1996-04-23|1996-03-27|COLLECT COD|REG AIR|inal account 2951|137099|4639|6|18|20449.62|0.06|0.00|N|O|1996-04-04|1996-04-27|1996-04-06|COLLECT COD|FOB|ep about the final, even package 2976|8521|3522|1|32|45744.64|0.06|0.00|A|F|1994-01-26|1994-02-13|1994-02-10|NONE|MAIL|nding, ironic deposits sleep f 2976|3059|3060|2|24|23089.20|0.00|0.03|A|F|1994-03-19|1994-01-26|1994-04-18|COLLECT COD|TRUCK|ronic pinto beans. slyly bol 2976|9775|4776|3|35|58966.95|0.10|0.07|R|F|1993-12-19|1994-02-14|1994-01-11|NONE|RAIL|boost slyly about the regular, regular re 2976|81070|6087|4|22|23123.54|0.00|0.04|A|F|1994-02-08|1994-03-03|1994-02-12|TAKE BACK RETURN|FOB|ncies kindle furiously. carefull 2976|133261|8288|5|13|16825.38|0.00|0.06|A|F|1994-02-06|1994-02-02|1994-02-19|NONE|FOB| furiously final courts boost 2976|108348|859|6|30|40690.20|0.08|0.03|R|F|1994-03-27|1994-02-01|1994-04-26|TAKE BACK RETURN|RAIL|c ideas! unusual 2977|69769|7288|1|25|43469.00|0.03|0.07|N|O|1996-09-21|1996-10-06|1996-10-13|TAKE BACK RETURN|RAIL|furiously pe 2978|89697|9698|1|29|48914.01|0.00|0.08|A|F|1995-06-03|1995-07-25|1995-06-06|NONE|SHIP|ecial ideas promise slyly 2978|126998|9511|2|42|85049.58|0.01|0.06|N|O|1995-08-19|1995-07-18|1995-09-07|DELIVER IN PERSON|MAIL|ial requests nag blithely alongside of th 2978|42972|485|3|26|49789.22|0.07|0.05|N|O|1995-07-29|1995-07-22|1995-08-20|COLLECT COD|REG AIR|as haggle against the carefully express dep 2978|27039|4546|4|7|6762.21|0.00|0.00|N|O|1995-07-18|1995-07-03|1995-07-23|NONE|FOB|. final ideas are blithe 2978|28403|5910|5|33|43936.20|0.09|0.03|R|F|1995-05-06|1995-07-23|1995-05-16|COLLECT COD|FOB|s. blithely unusual pack 2978|167063|9580|6|4|4520.24|0.08|0.04|N|O|1995-07-06|1995-07-31|1995-07-19|COLLECT COD|AIR|ffily unusual 2979|8022|5523|1|8|7440.16|0.00|0.08|N|O|1996-06-18|1996-05-21|1996-07-06|COLLECT COD|REG AIR|st blithely; blithely regular gifts dazz 2979|10662|663|2|47|73915.02|0.05|0.00|N|O|1996-03-25|1996-05-13|1996-04-04|TAKE BACK RETURN|SHIP|iously unusual dependencies wake across 2979|187872|5427|3|35|68595.45|0.04|0.03|N|O|1996-05-25|1996-06-11|1996-06-24|DELIVER IN PERSON|MAIL|old ideas beneath the blit 2979|164037|6554|4|28|30828.84|0.05|0.08|N|O|1996-06-04|1996-04-23|1996-06-24|DELIVER IN PERSON|FOB|ing, regular pinto beans. blithel 2980|36367|8871|1|2|2606.72|0.09|0.03|N|O|1996-11-18|1996-10-22|1996-11-27|TAKE BACK RETURN|SHIP|enly across the special, pending packag 2980|9576|7077|2|48|71307.36|0.04|0.05|N|O|1996-09-25|1996-12-09|1996-10-12|NONE|REG AIR|totes. regular pinto 2980|132090|4604|3|27|30296.43|0.08|0.08|N|O|1996-12-08|1996-12-03|1996-12-14|NONE|REG AIR| theodolites cajole blithely sl 2980|24644|7147|4|49|76863.36|0.03|0.02|N|O|1996-10-04|1996-12-04|1996-10-06|NONE|RAIL|hy packages sleep quic 2980|186048|6049|5|24|27216.96|0.05|0.04|N|O|1997-01-12|1996-10-27|1997-01-14|NONE|MAIL|elets. fluffily regular in 2980|108666|3687|6|43|72010.38|0.01|0.01|N|O|1996-12-07|1996-11-10|1997-01-02|COLLECT COD|AIR|sts. slyly regu 2981|13505|1009|1|17|24114.50|0.03|0.05|N|O|1998-10-17|1998-10-02|1998-10-21|DELIVER IN PERSON|RAIL|, unusual packages x-ray. furious 2981|175430|2982|2|8|12043.44|0.06|0.03|N|O|1998-08-21|1998-09-28|1998-09-05|DELIVER IN PERSON|MAIL|ng to the f 2981|36615|4125|3|14|21722.54|0.03|0.07|N|O|1998-08-30|1998-10-04|1998-09-04|DELIVER IN PERSON|MAIL|kages detect furiously express requests. 2982|111936|4448|1|21|40906.53|0.00|0.01|A|F|1995-04-03|1995-06-08|1995-04-18|DELIVER IN PERSON|AIR|ironic deposits. furiously ex 2982|98071|3090|2|13|13897.91|0.02|0.08|R|F|1995-03-31|1995-05-07|1995-04-18|TAKE BACK RETURN|RAIL|regular deposits unwind alongside 2982|69479|6998|3|21|30417.87|0.01|0.01|R|F|1995-04-19|1995-06-03|1995-04-28|COLLECT COD|SHIP|egular ideas use furiously? bl 2983|162174|2175|1|44|54391.48|0.03|0.06|R|F|1992-02-09|1992-03-07|1992-03-09|TAKE BACK RETURN|AIR|ly regular instruct 2983|48039|5552|2|11|10857.33|0.09|0.06|A|F|1992-04-29|1992-02-27|1992-05-26|NONE|MAIL|aids integrate s 3008|131565|6592|1|8|12772.48|0.10|0.04|N|O|1995-12-06|1996-01-12|1995-12-22|TAKE BACK RETURN|FOB|yly ironic foxes. regular requests h 3008|199716|4755|2|31|56287.01|0.05|0.06|N|O|1995-12-14|1995-12-11|1995-12-31|TAKE BACK RETURN|AIR| bold packages. quic 3008|23054|3055|3|40|39082.00|0.01|0.03|N|O|1995-12-18|1996-01-06|1996-01-11|COLLECT COD|AIR|esias. theodolites detect blithely 3008|59018|9019|4|48|46896.48|0.07|0.06|N|O|1996-01-23|1996-01-07|1996-02-09|COLLECT COD|SHIP|ld theodolites. fluffily bold theodolit 3008|104156|9177|5|31|35964.65|0.03|0.02|N|O|1995-12-01|1996-01-20|1995-12-28|COLLECT COD|RAIL|nts use thinly around the carefully iro 3009|44862|9871|1|48|86729.28|0.10|0.02|N|O|1997-03-19|1997-05-13|1997-04-11|TAKE BACK RETURN|TRUCK| dependencies sleep quickly a 3009|184598|7117|2|38|63938.42|0.00|0.01|N|O|1997-05-01|1997-04-10|1997-05-17|TAKE BACK RETURN|AIR|nal packages should haggle slyly. quickl 3009|129770|7307|3|26|46794.02|0.08|0.02|N|O|1997-05-15|1997-05-10|1997-06-13|TAKE BACK RETURN|SHIP|uriously specia 3010|137941|455|1|23|45515.62|0.04|0.00|N|O|1996-03-08|1996-02-29|1996-03-27|NONE|TRUCK|ounts. pendin 3010|173015|5533|2|22|23936.22|0.09|0.06|N|O|1996-03-06|1996-04-06|1996-03-18|COLLECT COD|REG AIR| final deposit 3010|57050|9556|3|24|24169.20|0.04|0.07|N|O|1996-05-09|1996-03-14|1996-05-15|DELIVER IN PERSON|RAIL|ar, even reques 3010|23675|1182|4|28|44762.76|0.09|0.06|N|O|1996-03-05|1996-03-28|1996-04-03|DELIVER IN PERSON|FOB|ake carefully carefully even request 3010|103499|3500|5|9|13522.41|0.02|0.02|N|O|1996-04-28|1996-03-17|1996-05-18|NONE|SHIP|inal packages. quickly even pinto 3010|91955|1956|6|38|73984.10|0.05|0.07|N|O|1996-04-15|1996-03-16|1996-04-21|DELIVER IN PERSON|RAIL|accounts ar 3011|197592|112|1|5|8447.95|0.02|0.04|R|F|1992-04-21|1992-02-23|1992-05-15|NONE|TRUCK|nusual sentiments. carefully bold idea 3011|122709|2710|2|42|72731.40|0.05|0.00|A|F|1992-02-01|1992-03-18|1992-02-29|NONE|TRUCK|osits haggle quickly pending, 3012|194274|6794|1|49|67045.23|0.00|0.00|A|F|1993-08-07|1993-07-01|1993-08-08|NONE|MAIL| quickly furious packages. silently unusua 3012|160226|227|2|37|47590.14|0.06|0.03|A|F|1993-08-16|1993-06-07|1993-08-24|TAKE BACK RETURN|REG AIR|uickly permanent packages sleep caref 3013|93613|6123|1|31|49804.91|0.08|0.08|N|O|1997-05-03|1997-04-05|1997-05-25|NONE|AIR|y furious depen 3013|138866|6406|2|30|57145.80|0.05|0.06|N|O|1997-05-02|1997-03-09|1997-05-12|TAKE BACK RETURN|MAIL|ronic packages. slyly even 3013|119628|7162|3|35|57666.70|0.00|0.03|N|O|1997-04-02|1997-05-04|1997-04-16|COLLECT COD|MAIL|ely accord 3013|180929|3448|4|17|34168.64|0.01|0.07|N|O|1997-02-26|1997-05-02|1997-03-27|DELIVER IN PERSON|SHIP|fully unusual account 3013|59896|4907|5|20|37117.80|0.00|0.04|N|O|1997-05-06|1997-03-18|1997-05-12|COLLECT COD|RAIL|unts boost regular ideas. slyly pe 3013|71557|4065|6|19|29042.45|0.08|0.07|N|O|1997-05-11|1997-04-18|1997-05-15|COLLECT COD|REG AIR|fluffily pending packages nag furiously al 3014|162368|2369|1|36|51492.96|0.05|0.03|A|F|1992-11-16|1993-01-20|1992-11-28|TAKE BACK RETURN|FOB|ding accounts boost fu 3014|105025|46|2|36|37080.72|0.00|0.08|R|F|1992-12-28|1992-12-29|1993-01-24|COLLECT COD|MAIL|iously ironic r 3014|150885|3401|3|48|92922.24|0.06|0.02|A|F|1992-12-19|1993-01-08|1992-12-25|DELIVER IN PERSON|REG AIR|y pending theodolites wake. reg 3014|113767|8790|4|14|24930.64|0.10|0.02|R|F|1992-11-19|1993-01-01|1992-12-17|DELIVER IN PERSON|SHIP|. slyly brave platelets nag. careful, 3014|74210|6718|5|28|33157.88|0.02|0.08|R|F|1993-01-09|1992-12-18|1993-01-10|TAKE BACK RETURN|FOB|es are. final braids nag slyly. fluff 3014|37443|9947|6|30|41413.20|0.04|0.01|R|F|1993-02-28|1993-01-02|1993-03-20|TAKE BACK RETURN|AIR| final foxes. 3015|2643|7644|1|5|7728.20|0.09|0.00|A|F|1993-01-10|1992-12-02|1993-01-19|TAKE BACK RETURN|RAIL| the furiously pendi 3015|17510|12|2|17|24267.67|0.03|0.01|R|F|1992-10-16|1992-11-20|1992-10-28|COLLECT COD|AIR|s above the fluffily final t 3015|90543|5562|3|23|35271.42|0.03|0.05|A|F|1992-12-03|1992-11-19|1992-12-23|DELIVER IN PERSON|FOB|s are slyly carefully special pinto bea 3015|155005|5006|4|7|7420.00|0.10|0.03|A|F|1992-12-07|1992-12-17|1992-12-30|DELIVER IN PERSON|REG AIR| after the evenly special packages ca 3015|164614|7131|5|42|70501.62|0.04|0.02|R|F|1993-01-21|1992-11-07|1993-02-11|DELIVER IN PERSON|AIR|encies haggle furious 3015|65123|5124|6|18|19586.16|0.02|0.03|R|F|1992-10-10|1992-11-19|1992-10-18|TAKE BACK RETURN|MAIL|equests wake fluffil 3040|15347|2851|1|18|22722.12|0.08|0.04|R|F|1993-06-25|1993-07-06|1993-07-19|TAKE BACK RETURN|SHIP|ly thin accou 3040|132715|255|2|9|15729.39|0.00|0.01|A|F|1993-06-12|1993-05-16|1993-06-14|NONE|RAIL|ges. pending packages wake. requests 3040|125608|633|3|30|49008.00|0.01|0.01|A|F|1993-08-06|1993-05-18|1993-08-19|NONE|MAIL|x furiously bold packages. expres 3040|82065|7082|4|14|14658.84|0.05|0.04|A|F|1993-05-13|1993-05-18|1993-05-19|TAKE BACK RETURN|REG AIR| haggle carefully. express hocke 3040|51516|1517|5|43|63102.93|0.04|0.04|R|F|1993-05-21|1993-05-25|1993-05-26|NONE|MAIL|sts nag slyly alongside of the depos 3040|17878|2881|6|10|17958.70|0.08|0.04|R|F|1993-05-16|1993-06-24|1993-06-11|DELIVER IN PERSON|MAIL|ely regular foxes haggle dari 3041|180636|8191|1|5|8583.15|0.07|0.04|N|O|1997-07-20|1997-07-15|1997-08-17|COLLECT COD|FOB|posits dazzle special p 3041|145837|866|2|9|16945.47|0.03|0.03|N|O|1997-06-29|1997-08-14|1997-07-19|COLLECT COD|AIR|iously across the silent pinto beans. furi 3041|67448|2461|3|9|12738.96|0.09|0.06|N|O|1997-08-28|1997-07-23|1997-09-16|TAKE BACK RETURN|FOB|scapades after the special 3042|104915|2446|1|30|57597.30|0.08|0.06|A|F|1995-01-12|1995-02-15|1995-01-24|DELIVER IN PERSON|SHIP|the requests detect fu 3042|101459|1460|2|28|40892.60|0.05|0.03|A|F|1994-11-24|1995-01-02|1994-12-06|TAKE BACK RETURN|MAIL|ng the furiously r 3042|13664|6166|3|34|53640.44|0.04|0.00|R|F|1994-12-11|1995-02-03|1994-12-21|TAKE BACK RETURN|TRUCK|can wake after the enticingly stealthy i 3042|47338|2347|4|19|24421.27|0.02|0.01|A|F|1995-03-05|1995-01-24|1995-03-17|COLLECT COD|TRUCK|e carefully. regul 3043|45387|396|1|23|30644.74|0.07|0.04|R|F|1992-05-08|1992-07-22|1992-05-18|COLLECT COD|TRUCK|uickly above the pending, 3043|5894|3395|2|15|26998.35|0.03|0.05|A|F|1992-05-27|1992-06-03|1992-06-09|COLLECT COD|FOB|usly furiously 3043|59231|9232|3|42|49989.66|0.10|0.07|R|F|1992-07-15|1992-06-19|1992-07-23|NONE|MAIL|ide of the un 3043|90453|454|4|5|7217.25|0.10|0.01|A|F|1992-05-22|1992-07-02|1992-06-20|TAKE BACK RETURN|TRUCK|ake blithely re 3044|100992|993|1|10|19929.90|0.07|0.08|N|O|1996-07-13|1996-05-06|1996-07-21|TAKE BACK RETURN|REG AIR| slyly ironic requests. s 3044|167015|9532|2|3|3246.03|0.06|0.02|N|O|1996-07-27|1996-05-26|1996-08-15|TAKE BACK RETURN|AIR|ecoys haggle furiously pending requests. 3044|18478|980|3|47|65634.09|0.09|0.00|N|O|1996-05-24|1996-06-22|1996-05-30|NONE|REG AIR|ly around the car 3045|87670|7671|1|41|67964.47|0.05|0.01|N|O|1995-09-30|1995-11-24|1995-10-03|TAKE BACK RETURN|MAIL|ely final foxes. carefully ironic pinto b 3045|68109|3122|2|48|51700.80|0.02|0.03|N|O|1995-10-01|1995-12-16|1995-10-10|TAKE BACK RETURN|MAIL|ole quickly outside th 3046|73235|3236|1|44|53162.12|0.03|0.03|N|O|1996-03-03|1996-02-25|1996-04-01|NONE|AIR| are quickly. blithe 3046|53645|3646|2|46|73537.44|0.03|0.08|N|O|1996-03-22|1996-02-28|1996-04-07|TAKE BACK RETURN|AIR|sits sleep furious 3046|1537|9038|3|31|44594.43|0.03|0.07|N|O|1996-03-24|1996-01-30|1996-03-26|NONE|RAIL|y pending somas alongside of the slyly iro 3047|103994|3995|1|17|33965.83|0.08|0.02|N|O|1997-06-14|1997-04-20|1997-06-23|COLLECT COD|FOB|onic instruction 3047|13957|8960|2|23|43031.85|0.00|0.04|N|O|1997-05-20|1997-06-14|1997-05-28|TAKE BACK RETURN|REG AIR| slyly ironi 3072|56531|4047|1|6|8925.18|0.09|0.05|R|F|1994-02-09|1994-03-24|1994-02-28|DELIVER IN PERSON|REG AIR|gular requests abov 3072|107152|2173|2|36|41729.40|0.07|0.02|R|F|1994-04-14|1994-04-22|1994-05-06|COLLECT COD|AIR| theodolites. blithely e 3072|96715|6716|3|7|11981.97|0.04|0.07|R|F|1994-05-09|1994-03-31|1994-05-19|COLLECT COD|TRUCK|uests. ironic, ironic depos 3072|82745|5254|4|39|67381.86|0.05|0.08|A|F|1994-05-27|1994-04-20|1994-06-14|COLLECT COD|MAIL|es; slyly spe 3072|87380|7381|5|1|1367.38|0.01|0.08|R|F|1994-02-26|1994-03-14|1994-03-19|NONE|AIR| slyly ironic attainments. car 3073|193992|9031|1|16|33375.84|0.07|0.01|R|F|1994-03-02|1994-03-23|1994-03-31|DELIVER IN PERSON|AIR|n requests. ironi 3073|21735|9242|2|47|77866.31|0.09|0.00|R|F|1994-03-26|1994-02-12|1994-04-21|NONE|REG AIR|eposits. fluffily 3073|86050|8559|3|10|10360.50|0.03|0.00|R|F|1994-02-11|1994-03-24|1994-02-26|COLLECT COD|FOB| furiously caref 3073|28574|1077|4|14|21035.98|0.09|0.07|R|F|1994-03-24|1994-04-01|1994-04-07|NONE|RAIL|ilently quiet epitaphs. 3073|40433|7946|5|25|34335.75|0.00|0.07|R|F|1994-04-14|1994-03-07|1994-04-22|NONE|TRUCK|nag asymptotes. pinto beans sleep 3073|146763|6764|6|39|70580.64|0.09|0.02|R|F|1994-05-01|1994-02-16|1994-05-12|DELIVER IN PERSON|AIR|lar excuses across the furiously even 3073|43770|3771|7|11|18851.47|0.08|0.07|A|F|1994-05-01|1994-03-06|1994-05-08|COLLECT COD|SHIP|instructions sleep according to the 3074|36636|1643|1|50|78631.50|0.08|0.08|A|F|1993-01-31|1992-12-15|1993-02-20|NONE|AIR|furiously pending requests haggle s 3074|138309|5849|2|39|52544.70|0.03|0.00|R|F|1992-12-08|1993-01-28|1992-12-09|DELIVER IN PERSON|TRUCK|iously throu 3075|8905|6406|1|39|70742.10|0.02|0.03|A|F|1994-06-10|1994-06-21|1994-06-20|NONE|FOB|ing deposits nag 3075|51164|3670|2|2|2230.32|0.07|0.08|R|F|1994-06-14|1994-06-10|1994-06-25|TAKE BACK RETURN|AIR|. unusual, unusual accounts haggle furious 3076|84436|4437|1|44|62498.92|0.00|0.05|A|F|1993-09-14|1993-10-04|1993-09-17|TAKE BACK RETURN|FOB| instructions h 3076|105266|287|2|22|27967.72|0.08|0.00|A|F|1993-09-05|1993-09-10|1993-09-27|NONE|REG AIR|packages wake furiou 3076|4172|6673|3|31|33361.27|0.06|0.06|A|F|1993-08-10|1993-09-17|1993-08-17|TAKE BACK RETURN|SHIP|regular depos 3077|71785|4293|1|25|43919.50|0.06|0.01|N|O|1997-09-14|1997-10-16|1997-10-06|NONE|TRUCK|lent account 3077|90110|2620|2|40|44004.40|0.05|0.06|N|O|1997-10-22|1997-09-19|1997-11-19|DELIVER IN PERSON|AIR|to the enticing packag 3077|77820|2835|3|13|23371.66|0.03|0.07|N|O|1997-09-09|1997-10-15|1997-09-19|NONE|TRUCK|luffily close depende 3077|114603|2137|4|23|37204.80|0.03|0.02|N|O|1997-11-05|1997-09-16|1997-11-20|NONE|MAIL|lly. fluffily pending dinos across 3078|131450|1451|1|25|37036.25|0.01|0.03|A|F|1993-04-22|1993-05-01|1993-04-28|TAKE BACK RETURN|AIR|express dinos. carefully ironic 3078|77158|9666|2|21|23838.15|0.09|0.07|A|F|1993-03-20|1993-03-21|1993-04-01|COLLECT COD|AIR|e fluffily. 3079|69026|6545|1|20|19900.40|0.05|0.00|N|O|1997-10-18|1997-10-26|1997-11-14|NONE|RAIL|ets are according to the quickly dari 3079|116489|9001|2|38|57208.24|0.08|0.07|N|O|1997-11-07|1997-11-25|1997-12-06|NONE|RAIL|e carefully regular realms 3079|16662|6663|3|40|63146.40|0.02|0.08|N|O|1997-09-26|1997-12-11|1997-10-09|NONE|RAIL|ide of the pending, special deposi 3079|23216|3217|4|2|2278.42|0.00|0.08|N|O|1998-01-05|1997-11-17|1998-01-28|NONE|FOB|ly busy requests believ 3079|187516|35|5|2|3207.02|0.10|0.00|N|O|1997-12-27|1997-10-25|1998-01-08|COLLECT COD|SHIP|y regular asymptotes doz 3079|165391|2940|6|46|66993.94|0.00|0.00|N|O|1997-11-19|1997-11-04|1997-11-25|DELIVER IN PERSON|REG AIR|es. final, regula 3104|50589|5600|1|20|30791.60|0.01|0.08|A|F|1993-12-31|1993-11-24|1994-01-12|DELIVER IN PERSON|REG AIR|s are. furiously s 3104|47520|2529|2|47|68973.44|0.02|0.05|A|F|1993-12-25|1993-11-02|1994-01-12|COLLECT COD|RAIL|ily daring acc 3104|62977|2978|3|11|21339.67|0.02|0.03|A|F|1993-10-05|1993-11-30|1993-10-27|NONE|TRUCK| special deposits u 3104|37868|7869|4|26|46952.36|0.02|0.08|R|F|1994-01-02|1993-12-05|1994-01-31|TAKE BACK RETURN|TRUCK|es boost carefully. slyly 3105|183486|8523|1|11|17264.28|0.01|0.06|N|O|1997-02-07|1997-02-09|1997-03-01|NONE|FOB|kly bold depths caj 3105|44436|4437|2|9|12423.87|0.08|0.08|N|O|1996-12-25|1997-02-04|1997-01-09|COLLECT COD|SHIP|es wake among t 3105|24472|9477|3|48|67030.56|0.02|0.05|N|O|1997-02-28|1997-01-31|1997-03-18|DELIVER IN PERSON|REG AIR|ending platelets wake carefully ironic inst 3105|90456|7984|4|23|33268.35|0.04|0.07|N|O|1997-03-08|1996-12-14|1997-03-18|COLLECT COD|REG AIR| detect slyly. blithely unusual requests ar 3105|89347|4364|5|8|10690.72|0.07|0.07|N|O|1996-12-28|1996-12-28|1997-01-25|NONE|FOB|s. blithely unusual ideas was after 3105|46261|3774|6|30|36217.80|0.08|0.05|N|O|1997-03-03|1997-02-03|1997-03-05|NONE|FOB|ess accounts boost among t 3106|85699|8208|1|22|37063.18|0.03|0.02|N|O|1997-02-28|1997-02-12|1997-03-03|DELIVER IN PERSON|FOB|structions atop the blithely 3106|135816|3356|2|49|90738.69|0.06|0.06|N|O|1997-02-27|1997-03-11|1997-03-12|NONE|TRUCK|lets. quietly regular courts 3106|51764|6775|3|42|72061.92|0.09|0.07|N|O|1997-04-05|1997-03-17|1997-04-22|COLLECT COD|REG AIR|nstructions wake. furiously 3106|195649|3207|4|6|10467.84|0.10|0.07|N|O|1997-02-02|1997-04-11|1997-02-27|COLLECT COD|REG AIR|symptotes. slyly bold platelets cajol 3106|64580|9593|5|16|24713.28|0.09|0.08|N|O|1997-02-25|1997-04-10|1997-03-16|NONE|AIR|sits wake slyl 3107|148150|665|1|16|19170.40|0.05|0.04|N|O|1997-08-30|1997-10-20|1997-09-20|TAKE BACK RETURN|REG AIR|regular pinto beans. ironic ideas haggle 3107|141690|1691|2|35|60609.15|0.05|0.06|N|O|1997-08-27|1997-11-19|1997-09-14|COLLECT COD|TRUCK|ets doubt furiously final ideas. final 3107|169733|2250|3|23|41462.79|0.03|0.06|N|O|1997-12-10|1997-11-11|1997-12-14|TAKE BACK RETURN|SHIP|atelets must ha 3107|86080|8589|4|27|28784.16|0.00|0.08|N|O|1997-11-15|1997-10-31|1997-11-28|DELIVER IN PERSON|FOB|furiously final 3108|108989|1500|1|37|73925.26|0.06|0.04|A|F|1993-10-16|1993-10-01|1993-11-09|DELIVER IN PERSON|RAIL| final requests. 3108|165850|3399|2|26|49812.10|0.08|0.05|A|F|1993-11-12|1993-10-05|1993-12-09|COLLECT COD|TRUCK| slyly slow foxes wake furious 3109|17514|16|1|32|45808.32|0.08|0.03|A|F|1993-09-05|1993-10-06|1993-09-18|DELIVER IN PERSON|FOB|ecial orbits are furiou 3109|144736|2279|2|49|87255.77|0.08|0.06|R|F|1993-10-24|1993-09-30|1993-11-21|TAKE BACK RETURN|AIR| even pearls. furiously pending 3109|175265|2817|3|43|57631.18|0.04|0.07|R|F|1993-09-29|1993-09-06|1993-10-13|COLLECT COD|MAIL|ding to the foxes. 3109|78107|8108|4|26|28212.60|0.01|0.05|R|F|1993-11-16|1993-10-18|1993-12-06|TAKE BACK RETURN|TRUCK| sleep slyly according to t 3109|142097|9640|5|50|56954.50|0.01|0.08|A|F|1993-09-17|1993-10-16|1993-10-11|NONE|FOB| regular packages boost blithely even, re 3109|14933|7435|6|10|18479.30|0.10|0.04|A|F|1993-10-26|1993-10-03|1993-11-09|NONE|TRUCK|sits haggle carefully. regular, unusual ac 3110|88455|3472|1|1|1443.45|0.02|0.07|A|F|1995-01-15|1995-01-20|1995-01-30|DELIVER IN PERSON|REG AIR|c theodolites a 3110|56453|1464|2|31|43692.95|0.01|0.06|R|F|1995-03-31|1995-03-07|1995-04-21|TAKE BACK RETURN|REG AIR|en deposits. ironic 3110|2097|9598|3|34|33969.06|0.02|0.02|A|F|1995-02-23|1995-01-27|1995-03-09|TAKE BACK RETURN|FOB|ly pending requests ha 3110|39004|9005|4|16|15088.00|0.04|0.04|A|F|1995-01-10|1995-02-06|1995-01-26|NONE|MAIL|across the regular acco 3110|139664|7204|5|39|66442.74|0.09|0.01|A|F|1995-02-09|1995-01-21|1995-02-21|NONE|MAIL|side of the blithely unusual courts. slyly 3111|136959|1986|1|22|43910.90|0.06|0.05|N|O|1995-09-21|1995-11-09|1995-10-17|COLLECT COD|REG AIR|quests. regular dolphins against the 3111|57194|4710|2|30|34535.70|0.06|0.05|N|O|1995-10-05|1995-11-15|1995-11-01|TAKE BACK RETURN|TRUCK|eas are furiously slyly special deposits. 3111|51332|1333|3|10|12833.30|0.02|0.02|N|O|1995-11-10|1995-11-02|1995-12-04|NONE|FOB|ng the slyly ironic inst 3111|131080|6107|4|31|34443.48|0.00|0.08|N|O|1995-10-26|1995-09-26|1995-11-02|TAKE BACK RETURN|MAIL|kages detect express attainments 3111|53483|999|5|14|20110.72|0.05|0.04|N|O|1995-10-17|1995-10-19|1995-10-19|TAKE BACK RETURN|SHIP|re. pinto 3111|85509|8018|6|5|7472.50|0.03|0.08|N|O|1995-08-30|1995-10-16|1995-09-04|DELIVER IN PERSON|TRUCK|. carefully even ideas 3111|147305|7306|7|41|55444.30|0.09|0.05|N|O|1995-11-22|1995-11-01|1995-12-01|TAKE BACK RETURN|FOB|fily slow ideas. 3136|141341|6370|1|30|41470.20|0.02|0.08|R|F|1994-08-13|1994-10-02|1994-09-02|TAKE BACK RETURN|RAIL|leep blithel 3136|102634|2635|2|7|11456.41|0.05|0.07|A|F|1994-10-08|1994-09-14|1994-10-11|TAKE BACK RETURN|SHIP|ic pinto beans are slyly. f 3136|157204|2235|3|43|54231.60|0.00|0.07|A|F|1994-09-05|1994-09-25|1994-09-11|NONE|RAIL|. special theodolites ha 3136|115183|2717|4|26|31152.68|0.04|0.05|A|F|1994-10-13|1994-11-07|1994-11-05|TAKE BACK RETURN|AIR|eep fluffily. daringly silent attainments d 3136|66751|6752|5|2|3435.50|0.08|0.07|R|F|1994-11-21|1994-11-03|1994-11-26|DELIVER IN PERSON|TRUCK|? special, silent 3136|79827|9828|6|29|52397.78|0.08|0.07|A|F|1994-11-16|1994-10-03|1994-12-14|NONE|FOB|latelets. final 3137|2357|2358|1|6|7556.10|0.02|0.02|N|O|1995-09-19|1995-10-23|1995-10-16|NONE|SHIP|ly express as 3137|5178|2679|2|4|4332.68|0.06|0.04|N|O|1995-10-01|1995-09-11|1995-10-30|COLLECT COD|RAIL|posits wake. silent excuses boost about 3138|92417|4927|1|7|9865.87|0.05|0.05|R|F|1994-03-04|1994-03-14|1994-03-20|NONE|AIR|lithely quickly even packages. packages 3138|43554|3555|2|27|40433.85|0.09|0.01|R|F|1994-03-24|1994-03-23|1994-04-18|DELIVER IN PERSON|FOB|counts cajole fluffily carefully special i 3138|196512|6513|3|32|51472.32|0.00|0.01|R|F|1994-02-24|1994-05-07|1994-02-28|TAKE BACK RETURN|MAIL|inal foxes affix slyly. fluffily regul 3138|171358|1359|4|38|54315.30|0.07|0.04|R|F|1994-02-21|1994-03-21|1994-03-13|COLLECT COD|FOB|lithely fluffily un 3138|9594|9595|5|12|18043.08|0.09|0.02|A|F|1994-03-04|1994-04-11|1994-03-21|COLLECT COD|FOB|. bold pinto beans haggl 3138|43107|8116|6|25|26252.50|0.05|0.08|A|F|1994-05-19|1994-04-07|1994-06-17|TAKE BACK RETURN|AIR|dolites around the carefully busy the 3139|39310|1814|1|46|57468.26|0.08|0.03|R|F|1992-04-28|1992-03-04|1992-05-19|TAKE BACK RETURN|FOB|of the unusual, unusual re 3140|6539|4040|1|21|30356.13|0.08|0.02|R|F|1992-04-12|1992-05-31|1992-04-21|NONE|REG AIR| furiously sly excuses according to the 3140|88674|1183|2|10|16626.70|0.07|0.01|A|F|1992-05-30|1992-05-09|1992-06-09|COLLECT COD|RAIL|accounts. expres 3140|132668|2669|3|28|47618.48|0.06|0.00|R|F|1992-06-08|1992-07-07|1992-07-08|TAKE BACK RETURN|SHIP|lar ideas. slyly ironic d 3141|176416|1451|1|32|47757.12|0.06|0.00|N|O|1995-11-21|1995-12-18|1995-11-26|DELIVER IN PERSON|FOB|oxes are quickly about t 3141|9358|6859|2|37|46891.95|0.10|0.05|N|O|1996-01-24|1995-12-16|1996-01-27|DELIVER IN PERSON|AIR|press pinto beans. bold accounts boost b 3141|78185|5707|3|9|10468.62|0.09|0.02|N|O|1995-11-11|1995-12-10|1995-12-02|DELIVER IN PERSON|MAIL|uickly ironic, pendi 3141|45956|965|4|47|89391.65|0.03|0.01|N|O|1995-11-29|1996-01-13|1995-12-10|TAKE BACK RETURN|TRUCK| are slyly pi 3142|119225|4248|1|15|18663.30|0.03|0.08|R|F|1992-08-15|1992-08-18|1992-08-22|DELIVER IN PERSON|AIR|instructions are. ironic packages doz 3143|89831|7356|1|22|40058.26|0.02|0.00|A|F|1993-05-11|1993-03-26|1993-05-20|TAKE BACK RETURN|MAIL|l, special instructions nag 3143|182257|9812|2|40|53570.00|0.03|0.08|A|F|1993-05-07|1993-03-29|1993-05-17|COLLECT COD|FOB|sly unusual theodolites. slyly ev 3143|182464|4983|3|22|34022.12|0.05|0.03|A|F|1993-03-18|1993-05-09|1993-04-14|DELIVER IN PERSON|MAIL|beans. fluf 3143|65651|5652|4|46|74365.90|0.05|0.08|R|F|1993-04-19|1993-03-21|1993-05-05|COLLECT COD|REG AIR|low forges haggle. even packages use bli 3168|59901|2407|1|46|85601.40|0.08|0.08|R|F|1992-02-14|1992-03-02|1992-03-02|TAKE BACK RETURN|SHIP|y across the express accounts. fluff 3168|153658|3659|2|1|1711.65|0.06|0.08|A|F|1992-05-27|1992-03-12|1992-06-09|TAKE BACK RETURN|SHIP|pinto beans. slyly regular courts haggle 3168|127053|9566|3|13|14040.65|0.09|0.02|A|F|1992-03-05|1992-04-29|1992-03-15|NONE|SHIP|ironic somas haggle quick 3168|164440|1989|4|11|16548.84|0.02|0.05|R|F|1992-04-12|1992-03-17|1992-05-12|COLLECT COD|SHIP|ously furious dependenc 3169|191052|3572|1|12|13716.60|0.01|0.04|R|F|1994-01-05|1994-03-18|1994-01-21|COLLECT COD|REG AIR| regular d 3169|199869|4908|2|17|33470.62|0.05|0.04|R|F|1994-03-02|1994-01-21|1994-03-03|DELIVER IN PERSON|TRUCK|usly regular packages. ironi 3169|187322|7323|3|12|16911.84|0.08|0.07|A|F|1994-04-18|1994-03-12|1994-05-08|TAKE BACK RETURN|FOB|atelets. pac 3169|104999|5000|4|26|52103.74|0.10|0.04|R|F|1994-04-08|1994-03-21|1994-04-29|NONE|TRUCK|ter the regular ideas. slyly iro 3169|107704|7705|5|6|10270.20|0.09|0.01|A|F|1994-03-24|1994-02-22|1994-04-04|TAKE BACK RETURN|AIR|ular instructions. ca 3169|176053|6054|6|46|51936.30|0.02|0.07|A|F|1994-02-01|1994-01-22|1994-02-24|DELIVER IN PERSON|RAIL|thely bold theodolites are fl 3170|39883|7393|1|12|21874.56|0.03|0.03|N|O|1998-02-12|1998-01-17|1998-02-24|NONE|TRUCK|ing accounts along the speci 3170|99060|1570|2|21|22240.26|0.01|0.00|N|O|1997-12-09|1998-01-31|1997-12-21|DELIVER IN PERSON|MAIL|o beans. carefully final requests dou 3170|88383|3400|3|27|37027.26|0.00|0.05|N|O|1998-02-25|1998-01-29|1998-02-27|COLLECT COD|AIR|efully bold foxes. regular, ev 3170|40846|847|4|34|60752.56|0.05|0.04|N|O|1998-02-01|1998-01-11|1998-02-20|TAKE BACK RETURN|TRUCK|s about the fluffily final de 3170|89767|9768|5|32|56216.32|0.02|0.04|N|O|1997-11-24|1997-12-12|1997-12-15|COLLECT COD|SHIP|ggle about the furiously r 3170|109150|4171|6|43|49843.45|0.08|0.05|N|O|1998-01-05|1998-01-04|1998-01-14|NONE|REG AIR|. express dolphins use sly 3170|83873|3874|7|26|48278.62|0.10|0.05|N|O|1998-02-12|1997-12-22|1998-02-28|COLLECT COD|TRUCK|s engage furiously. 3171|46382|8887|1|34|45164.92|0.04|0.00|A|F|1993-05-30|1993-05-27|1993-06-06|DELIVER IN PERSON|REG AIR|r the final, even packages. quickly 3171|138340|8341|2|50|68917.00|0.01|0.04|A|F|1993-07-19|1993-05-15|1993-07-31|TAKE BACK RETURN|REG AIR|riously final foxes about the ca 3172|95767|786|1|4|7051.04|0.06|0.07|A|F|1992-09-26|1992-08-15|1992-10-20|DELIVER IN PERSON|TRUCK|s are slyly thin package 3172|147816|5359|2|43|80143.83|0.05|0.07|R|F|1992-08-22|1992-07-07|1992-08-26|COLLECT COD|MAIL| final packages. 3172|131989|7016|3|13|26272.74|0.03|0.01|R|F|1992-07-06|1992-08-06|1992-08-05|DELIVER IN PERSON|MAIL|inal deposits haggle along the 3172|134555|9582|4|28|44507.40|0.08|0.04|R|F|1992-07-09|1992-07-14|1992-07-16|NONE|MAIL|regular ideas. packages are furi 3172|63775|3776|5|31|53901.87|0.05|0.08|A|F|1992-09-01|1992-08-27|1992-09-23|NONE|SHIP|. slyly regular dependencies haggle quiet 3173|194121|4122|1|35|42529.20|0.01|0.08|N|O|1996-09-09|1996-10-15|1996-10-04|TAKE BACK RETURN|RAIL| across the slyly even requests. 3173|177060|2095|2|5|5685.30|0.09|0.07|N|O|1996-12-06|1996-09-17|1996-12-07|DELIVER IN PERSON|REG AIR|express depo 3173|45832|841|3|16|28445.28|0.06|0.01|N|O|1996-08-12|1996-09-21|1996-08-22|NONE|SHIP|e special, 3173|93949|3950|4|2|3885.88|0.00|0.00|N|O|1996-10-15|1996-11-06|1996-10-18|COLLECT COD|MAIL|ular pearls 3173|184484|4485|5|2|3136.96|0.00|0.06|N|O|1996-08-18|1996-09-21|1996-09-07|DELIVER IN PERSON|MAIL|fluffily above t 3174|185221|7740|1|6|7837.32|0.04|0.08|N|O|1996-03-13|1996-02-09|1996-03-22|DELIVER IN PERSON|AIR| furiously ironic 3174|193509|8548|2|4|6410.00|0.01|0.05|N|O|1995-11-17|1996-01-08|1995-11-27|DELIVER IN PERSON|RAIL|deas sleep thi 3174|91355|3865|3|21|28273.35|0.08|0.05|N|O|1996-02-20|1995-12-28|1996-03-17|NONE|MAIL|iously. idly bold theodolites a 3174|191449|9007|4|13|20025.72|0.08|0.06|N|O|1996-01-11|1996-01-26|1996-02-01|DELIVER IN PERSON|SHIP|leep quickly? slyly special platelets 3174|71002|3510|5|39|37947.00|0.02|0.06|N|O|1995-12-02|1996-02-08|1995-12-12|TAKE BACK RETURN|TRUCK| wake slyly foxes. bold requests p 3174|119325|4348|6|8|10754.56|0.07|0.08|N|O|1995-12-07|1996-01-08|1995-12-29|DELIVER IN PERSON|TRUCK|nic deposits among t 3175|119225|6759|1|28|34838.16|0.10|0.01|R|F|1994-09-27|1994-10-05|1994-10-04|NONE|FOB|ore the even, silent foxes. b 3175|532|3033|2|38|54436.14|0.01|0.07|R|F|1994-10-10|1994-08-25|1994-10-28|NONE|MAIL|the quickly even dolph 3175|128833|1346|3|12|22341.96|0.09|0.07|R|F|1994-10-16|1994-09-15|1994-10-18|NONE|AIR|ter the pending deposits. slyly e 3175|84978|2503|4|14|27481.58|0.02|0.05|R|F|1994-10-21|1994-09-05|1994-11-15|NONE|MAIL|nt dependencies are quietly even 3175|17028|4532|5|47|44415.94|0.08|0.03|R|F|1994-08-08|1994-09-10|1994-08-21|COLLECT COD|REG AIR| final requests x-r 3175|174487|4488|6|44|68705.12|0.01|0.00|R|F|1994-09-26|1994-08-30|1994-10-24|TAKE BACK RETURN|MAIL|are carefully furiously ironic accounts. e 3175|864|3365|7|32|56475.52|0.01|0.02|R|F|1994-09-29|1994-09-20|1994-10-10|TAKE BACK RETURN|SHIP|lites sleep 3200|115903|3437|1|17|32621.30|0.10|0.00|N|O|1996-06-06|1996-04-21|1996-06-14|DELIVER IN PERSON|AIR|side of the furiously pendin 3200|165487|3036|2|27|41916.96|0.03|0.00|N|O|1996-05-07|1996-05-01|1996-05-09|TAKE BACK RETURN|REG AIR|as haggle furiously against the fluff 3200|130983|6010|3|36|72503.28|0.01|0.01|N|O|1996-03-22|1996-03-19|1996-03-30|DELIVER IN PERSON|FOB|f the carefu 3200|29905|4910|4|11|20183.90|0.10|0.02|N|O|1996-03-18|1996-03-21|1996-04-14|COLLECT COD|RAIL|osits sleep fur 3200|197667|7668|5|16|28234.56|0.05|0.00|N|O|1996-02-28|1996-03-13|1996-03-11|NONE|RAIL|ly against the quiet packages. blith 3200|174336|1888|6|25|35258.25|0.10|0.01|N|O|1996-02-08|1996-04-11|1996-03-06|COLLECT COD|FOB| slyly regular hockey players! pinto beans 3201|45786|5787|1|11|19049.58|0.10|0.06|A|F|1993-09-27|1993-08-29|1993-10-18|NONE|TRUCK|ing to the furiously expr 3201|117792|2815|2|27|48864.33|0.08|0.02|R|F|1993-08-31|1993-08-24|1993-09-08|TAKE BACK RETURN|FOB|deposits are slyly along 3201|118958|3981|3|50|98847.50|0.00|0.08|R|F|1993-10-27|1993-09-30|1993-11-16|COLLECT COD|TRUCK| deposits. express, ir 3202|182676|7713|1|30|52760.10|0.09|0.02|A|F|1993-03-18|1993-03-10|1993-03-23|COLLECT COD|SHIP|ven platelets. furiously final 3202|19748|2250|2|22|36690.28|0.01|0.02|R|F|1993-02-16|1993-02-16|1993-03-16|TAKE BACK RETURN|MAIL|the express packages. fu 3203|143488|3489|1|23|35224.04|0.01|0.07|N|O|1998-01-04|1998-01-12|1998-01-24|COLLECT COD|SHIP|uses. fluffily ironic pinto bea 3203|187707|2744|2|22|39483.40|0.03|0.03|N|O|1998-02-12|1998-01-01|1998-02-18|TAKE BACK RETURN|REG AIR|e the blithely regular accounts boost f 3204|11924|9428|1|10|18359.20|0.10|0.07|R|F|1993-01-27|1993-03-08|1993-01-29|COLLECT COD|SHIP|counts. bold 3204|6474|8975|2|39|53838.33|0.10|0.03|R|F|1993-02-11|1993-03-19|1993-02-28|TAKE BACK RETURN|MAIL|sits sleep theodolites. slyly bo 3205|67629|2642|1|7|11176.34|0.09|0.00|R|F|1992-07-05|1992-06-17|1992-07-07|NONE|SHIP|ly alongsi 3205|28792|8793|2|32|55065.28|0.08|0.03|A|F|1992-06-01|1992-07-10|1992-06-06|TAKE BACK RETURN|RAIL|lar accoun 3205|102663|5174|3|38|63295.08|0.10|0.08|A|F|1992-07-31|1992-06-03|1992-08-20|DELIVER IN PERSON|AIR|usly quiet accounts. slyly pending pinto 3205|55351|5352|4|10|13063.50|0.01|0.07|A|F|1992-06-18|1992-07-04|1992-07-16|COLLECT COD|RAIL| deposits cajole careful 3205|69090|1597|5|18|19063.62|0.03|0.03|A|F|1992-07-04|1992-06-14|1992-08-03|TAKE BACK RETURN|RAIL|symptotes. slyly even deposits ar 3205|194388|9427|6|19|28165.22|0.07|0.08|R|F|1992-05-28|1992-05-30|1992-06-05|COLLECT COD|AIR|yly pending packages snooz 3205|68994|1501|7|36|70667.64|0.06|0.03|A|F|1992-05-31|1992-06-19|1992-06-03|TAKE BACK RETURN|SHIP|s. ironic platelets above the s 3206|175184|2736|1|1|1259.18|0.07|0.05|N|O|1996-11-22|1996-10-16|1996-12-07|TAKE BACK RETURN|FOB|y unusual foxes cajole ab 3206|110775|3287|2|37|66073.49|0.07|0.01|N|O|1996-09-06|1996-10-31|1996-09-25|COLLECT COD|SHIP| quick theodolites hagg 3206|185591|5592|3|24|40238.16|0.00|0.08|N|O|1996-08-25|1996-10-01|1996-09-04|COLLECT COD|TRUCK|encies sleep deposits-- 3207|112470|4|1|2|2964.94|0.10|0.03|N|O|1998-06-15|1998-04-20|1998-06-21|COLLECT COD|MAIL|among the ironic, even packages 3207|70504|8026|2|42|61929.00|0.00|0.00|N|O|1998-05-02|1998-05-10|1998-06-01|NONE|SHIP|to the quickly special accounts? ironically 3207|151474|6505|3|17|25932.99|0.03|0.04|N|O|1998-03-27|1998-04-06|1998-03-28|COLLECT COD|RAIL|eep against the instructions. gifts hag 3207|18890|3893|4|32|57884.48|0.00|0.03|N|O|1998-06-17|1998-04-26|1998-07-07|TAKE BACK RETURN|SHIP|y across the slyly express foxes. bl 3207|82724|5233|5|8|13653.76|0.00|0.06|N|O|1998-06-13|1998-04-26|1998-07-11|COLLECT COD|SHIP|y. final pint 3207|133661|8688|6|32|54229.12|0.03|0.05|N|O|1998-04-19|1998-05-01|1998-05-08|COLLECT COD|FOB|l deposits wake beyond the carefully 3232|13217|3218|1|22|24864.62|0.10|0.01|A|F|1992-11-30|1992-12-09|1992-12-04|NONE|RAIL|thely. furio 3232|134360|6874|2|34|47408.24|0.07|0.04|R|F|1993-01-09|1992-11-14|1993-02-03|NONE|SHIP|old packages integrate quickly 3232|180226|7781|3|3|3918.66|0.04|0.06|R|F|1992-12-14|1992-12-11|1992-12-29|DELIVER IN PERSON|FOB|ily blithely ironic acco 3233|50584|585|1|23|35295.34|0.04|0.05|A|F|1994-12-07|1995-01-11|1994-12-26|NONE|AIR|pending instructions use after the carefu 3233|153443|989|2|6|8978.64|0.02|0.08|A|F|1994-12-06|1994-12-05|1994-12-07|TAKE BACK RETURN|REG AIR|requests are quickly above the slyly p 3233|99840|7368|3|2|3679.68|0.04|0.06|R|F|1995-01-03|1995-01-02|1995-01-21|TAKE BACK RETURN|AIR| across the bold packages 3233|8532|1033|4|25|36013.25|0.04|0.07|A|F|1994-11-24|1995-01-07|1994-12-11|NONE|RAIL|oss the pl 3234|78973|8974|1|45|87838.65|0.01|0.04|N|O|1996-05-15|1996-05-09|1996-06-02|DELIVER IN PERSON|TRUCK| express packages are carefully. f 3234|83220|745|2|23|27674.06|0.03|0.00|N|O|1996-05-29|1996-05-15|1996-06-17|DELIVER IN PERSON|AIR|d-- fluffily special packag 3234|74096|9111|3|16|17121.44|0.06|0.05|N|O|1996-06-10|1996-05-30|1996-06-18|COLLECT COD|RAIL|ithely ironic accounts wake along t 3234|121031|6056|4|50|52601.50|0.09|0.05|N|O|1996-06-11|1996-05-19|1996-06-18|NONE|MAIL|ly regular ideas according to the regula 3234|164274|9307|5|14|18735.78|0.01|0.07|N|O|1996-04-06|1996-05-30|1996-04-13|NONE|REG AIR|lithely regular f 3235|108187|698|1|9|10756.62|0.07|0.00|N|O|1995-11-17|1995-12-24|1995-11-30|COLLECT COD|AIR|l courts sleep quickly slyly 3235|94886|4887|2|43|80877.84|0.10|0.07|N|O|1995-12-25|1996-01-23|1996-01-09|COLLECT COD|MAIL|ckly final instru 3235|137620|7621|3|29|48070.98|0.06|0.06|N|O|1996-01-28|1995-12-26|1996-02-12|DELIVER IN PERSON|RAIL|e fluffy pinto bea 3235|177363|7364|4|23|33128.28|0.00|0.01|N|O|1996-02-16|1996-01-05|1996-03-07|DELIVER IN PERSON|SHIP|ldly ironic pinto beans 3236|116245|1268|1|10|12612.40|0.06|0.05|N|O|1996-11-15|1996-12-14|1996-11-29|TAKE BACK RETURN|AIR|arefully. fluffily reg 3236|121623|4136|2|21|34537.02|0.01|0.07|N|O|1996-12-23|1996-12-12|1997-01-21|NONE|AIR| final pinto 3236|117099|9611|3|7|7812.63|0.07|0.01|N|O|1996-12-27|1996-12-18|1997-01-24|DELIVER IN PERSON|SHIP|dolites. slyly unus 3237|10711|3213|1|11|17838.81|0.02|0.07|A|F|1992-08-03|1992-07-31|1992-08-13|TAKE BACK RETURN|AIR|es. permanently express platelets besid 3238|71612|1613|1|12|19003.32|0.06|0.01|R|F|1993-03-06|1993-05-08|1993-04-01|DELIVER IN PERSON|AIR|ackages affix furiously. furiously bol 3238|172398|7433|2|26|38230.14|0.01|0.06|A|F|1993-02-25|1993-04-04|1993-03-20|TAKE BACK RETURN|REG AIR|g accounts sleep furiously ironic attai 3238|80096|2605|3|1|1076.09|0.00|0.04|R|F|1993-05-17|1993-04-18|1993-05-27|NONE|SHIP|wake alongs 3239|44485|9494|1|50|71474.00|0.05|0.01|N|O|1998-02-09|1998-04-02|1998-02-22|NONE|FOB|d blithely stea 3239|44840|9849|2|43|76748.12|0.01|0.06|N|O|1998-01-15|1998-03-12|1998-01-29|COLLECT COD|REG AIR|y. bold pinto beans use 3239|12947|5449|3|13|24179.22|0.01|0.05|N|O|1998-02-10|1998-02-19|1998-02-25|DELIVER IN PERSON|MAIL|r deposits solve fluf 3239|194825|4826|4|26|49915.32|0.03|0.05|N|O|1998-01-21|1998-03-21|1998-02-08|DELIVER IN PERSON|SHIP|ngly pending platelets are fluff 3239|11739|6742|5|31|51172.63|0.10|0.08|N|O|1998-04-14|1998-03-24|1998-04-17|DELIVER IN PERSON|FOB|foxes. pendin 3264|199532|9533|1|39|63629.67|0.06|0.06|N|O|1996-11-07|1996-12-12|1996-11-20|TAKE BACK RETURN|REG AIR|sleep carefully after the slyly final 3264|130251|252|2|34|43562.50|0.00|0.01|N|O|1997-01-03|1997-01-06|1997-01-29|TAKE BACK RETURN|REG AIR|rns haggle carefully. blit 3264|124205|1742|3|11|13521.20|0.09|0.03|N|O|1996-12-11|1996-12-19|1996-12-15|DELIVER IN PERSON|SHIP|regular packages 3264|108626|8627|4|24|39230.88|0.09|0.07|N|O|1997-01-07|1996-12-13|1997-01-11|TAKE BACK RETURN|RAIL|ctions. quick 3264|62126|2127|5|6|6528.72|0.04|0.03|N|O|1996-11-10|1996-12-05|1996-11-22|TAKE BACK RETURN|SHIP|press packages. ironical 3264|140345|346|6|43|59569.62|0.06|0.06|N|O|1997-01-17|1997-01-24|1997-02-01|TAKE BACK RETURN|TRUCK|leep at the blithely bold 3265|24887|9892|1|8|14495.04|0.06|0.02|A|F|1992-09-01|1992-09-12|1992-09-27|DELIVER IN PERSON|TRUCK|thely ironic requests sleep slyly-- i 3265|71780|4288|2|7|12262.46|0.09|0.00|R|F|1992-09-16|1992-09-04|1992-10-14|DELIVER IN PERSON|MAIL|he forges. fluffily regular asym 3265|190046|5085|3|28|31809.12|0.09|0.08|A|F|1992-10-22|1992-08-23|1992-10-25|NONE|RAIL|n requests. quickly final dinos 3266|63179|8192|1|31|35407.27|0.09|0.02|N|O|1995-06-19|1995-05-04|1995-07-06|COLLECT COD|MAIL|grate among the quickly express deposits 3266|37805|309|2|43|74940.40|0.06|0.07|R|F|1995-05-04|1995-05-30|1995-05-11|COLLECT COD|AIR|ular asymptotes use careful 3267|184283|1838|1|33|45120.24|0.06|0.01|N|O|1997-03-30|1997-03-25|1997-04-23|TAKE BACK RETURN|AIR|es boost. 3268|95677|5678|1|1|1672.67|0.06|0.08|A|F|1994-09-12|1994-08-31|1994-09-16|NONE|TRUCK|. ironic, bold requests use carefull 3268|41914|4419|2|40|74236.40|0.08|0.01|R|F|1994-06-30|1994-08-22|1994-07-25|COLLECT COD|FOB|ly. bold, eve 3269|160745|3262|1|40|72229.60|0.02|0.07|N|O|1996-06-11|1996-05-06|1996-06-15|DELIVER IN PERSON|TRUCK|es. pending d 3269|37202|4712|2|46|52403.20|0.00|0.02|N|O|1996-04-21|1996-04-12|1996-05-10|DELIVER IN PERSON|MAIL|final asymptotes nag 3269|43059|572|3|39|39079.95|0.02|0.03|N|O|1996-03-13|1996-05-26|1996-03-19|COLLECT COD|MAIL|he express packages? 3269|82008|7025|4|37|36630.00|0.07|0.05|N|O|1996-06-14|1996-04-27|1996-07-07|NONE|MAIL|egular requests. carefully un 3269|92446|9974|5|42|60414.48|0.09|0.05|N|O|1996-03-19|1996-04-24|1996-04-18|COLLECT COD|TRUCK| the special packages. 3269|130091|7631|6|16|17937.44|0.01|0.08|N|O|1996-03-03|1996-04-06|1996-03-06|NONE|RAIL|s cajole. silent deposits are f 3270|34060|6564|1|11|10934.66|0.07|0.06|N|O|1997-07-29|1997-08-11|1997-08-05|TAKE BACK RETURN|AIR| solve at the regular deposits. 3270|37156|4666|2|44|48098.60|0.10|0.05|N|O|1997-07-20|1997-08-15|1997-08-04|DELIVER IN PERSON|SHIP| accounts. carefully even 3270|64294|6801|3|20|25165.80|0.01|0.02|N|O|1997-08-26|1997-07-31|1997-08-30|DELIVER IN PERSON|FOB|en accounts among the c 3270|188091|5646|4|29|34193.61|0.06|0.05|N|O|1997-07-01|1997-07-23|1997-07-10|TAKE BACK RETURN|MAIL|sly regular asymptotes. slyly dog 3270|33740|1250|5|32|53559.68|0.03|0.00|N|O|1997-09-23|1997-08-17|1997-09-27|NONE|REG AIR|promise carefully. 3270|56984|9490|6|29|56288.42|0.01|0.04|N|O|1997-08-22|1997-08-17|1997-09-06|COLLECT COD|RAIL|ptotes nag above the quickly bold deposits 3270|116753|9265|7|9|15927.75|0.06|0.08|N|O|1997-08-14|1997-08-11|1997-09-09|DELIVER IN PERSON|SHIP|ual packages 3271|56433|3949|1|30|41682.90|0.01|0.04|A|F|1992-01-16|1992-03-20|1992-01-17|DELIVER IN PERSON|AIR|r the unusual Tiresia 3271|53210|3211|2|18|20937.78|0.09|0.06|R|F|1992-05-01|1992-03-28|1992-05-29|DELIVER IN PERSON|FOB| packages eat around the furiously regul 3271|94870|4871|3|14|26108.18|0.05|0.01|A|F|1992-02-24|1992-02-14|1992-03-23|NONE|AIR|ending, even packa 3271|63369|8382|4|29|38638.44|0.07|0.04|A|F|1992-03-10|1992-02-05|1992-03-14|COLLECT COD|MAIL|lar instructions. carefully regular 3296|83475|1000|1|12|17501.64|0.06|0.07|R|F|1994-12-08|1994-12-14|1994-12-24|COLLECT COD|AIR|y about the slyly bold pinto bea 3296|148438|5981|2|31|46079.33|0.08|0.00|R|F|1995-01-26|1994-12-25|1995-02-16|NONE|REG AIR|ainst the furi 3296|184182|4183|3|29|36719.22|0.02|0.04|A|F|1995-01-12|1994-11-26|1995-02-06|DELIVER IN PERSON|SHIP|ss ideas are reg 3296|139691|4718|4|47|81342.43|0.06|0.00|A|F|1994-11-08|1994-12-20|1994-11-30|NONE|FOB|egular deposits. quic 3296|176498|1533|5|16|25191.84|0.06|0.02|R|F|1995-01-11|1994-12-27|1995-01-12|DELIVER IN PERSON|SHIP|kages cajole carefully 3296|196504|4062|6|40|64020.00|0.00|0.04|A|F|1994-12-28|1994-12-08|1995-01-13|COLLECT COD|REG AIR|ronic ideas across the 3296|35755|3265|7|6|10144.50|0.02|0.01|R|F|1995-01-03|1994-12-23|1995-01-27|TAKE BACK RETURN|AIR|carefully fur 3297|133262|5776|1|10|12952.60|0.10|0.04|A|F|1992-12-14|1993-01-21|1992-12-26|NONE|SHIP|ironic idea 3298|148414|929|1|9|13161.69|0.01|0.06|N|O|1996-08-15|1996-05-24|1996-09-12|COLLECT COD|REG AIR|ly final accou 3298|185925|5926|2|27|54294.84|0.06|0.06|N|O|1996-07-10|1996-05-21|1996-07-15|DELIVER IN PERSON|FOB|lar packages. regular deposit 3298|28881|6388|3|25|45247.00|0.10|0.08|N|O|1996-06-30|1996-05-31|1996-07-23|COLLECT COD|SHIP|ly express f 3298|190710|8268|4|1|1800.71|0.10|0.03|N|O|1996-07-31|1996-05-23|1996-08-24|TAKE BACK RETURN|FOB|refully regular requ 3299|182485|40|1|40|62699.20|0.03|0.02|A|F|1994-03-21|1994-03-23|1994-04-12|COLLECT COD|AIR|lyly even request 3300|128765|1278|1|3|5381.28|0.07|0.02|N|O|1995-11-01|1995-10-02|1995-11-20|NONE|REG AIR|g according to the dugouts. caref 3300|148043|8044|2|23|25093.92|0.02|0.02|N|O|1995-08-17|1995-09-03|1995-09-04|COLLECT COD|TRUCK|he fluffily final a 3301|168995|1512|1|45|92879.55|0.04|0.05|A|F|1994-11-19|1994-10-27|1994-11-24|TAKE BACK RETURN|FOB|nusual, final excuses after the entici 3302|35376|7880|1|45|59011.65|0.09|0.00|N|O|1996-01-24|1995-12-16|1996-02-13|COLLECT COD|FOB|counts use quickl 3303|183830|3831|1|25|47845.75|0.06|0.01|N|O|1998-03-25|1998-01-31|1998-04-12|NONE|SHIP|lly regular pi 3303|20879|880|2|15|26998.05|0.04|0.06|N|O|1998-01-29|1998-01-22|1998-02-21|COLLECT COD|SHIP| detect sly 3303|98533|8534|3|37|56666.61|0.05|0.02|N|O|1998-02-16|1998-03-07|1998-02-18|TAKE BACK RETURN|TRUCK| carefully ironic asympt 3303|35272|2782|4|26|31389.02|0.09|0.00|N|O|1998-01-18|1998-03-11|1998-02-11|DELIVER IN PERSON|REG AIR|ickly permanent requests w 3328|112585|5097|1|6|9585.48|0.03|0.08|A|F|1993-03-07|1993-01-25|1993-03-29|COLLECT COD|TRUCK|ffily even instructions detect b 3328|4400|1901|2|23|30001.20|0.01|0.06|R|F|1993-01-12|1993-02-07|1993-01-30|TAKE BACK RETURN|MAIL|y. careful 3328|138924|8925|3|44|86368.48|0.05|0.00|R|F|1992-12-03|1992-12-19|1992-12-09|TAKE BACK RETURN|FOB|dly quickly final foxes? re 3328|94005|1533|4|42|41958.00|0.01|0.05|R|F|1992-11-24|1992-12-20|1992-12-06|DELIVER IN PERSON|AIR|ronic requests 3328|130322|7862|5|25|33808.00|0.05|0.00|R|F|1993-01-28|1993-01-04|1993-01-31|NONE|RAIL|e unusual, r 3329|137884|398|1|36|69187.68|0.09|0.08|N|O|1995-08-06|1995-08-03|1995-08-14|DELIVER IN PERSON|TRUCK|ts at the re 3329|5898|3399|2|9|16235.01|0.00|0.02|N|O|1995-07-24|1995-08-02|1995-08-01|COLLECT COD|MAIL|lly final depo 3329|122108|2109|3|1|1130.10|0.04|0.08|N|O|1995-08-22|1995-09-28|1995-09-09|COLLECT COD|REG AIR|regular packages are carefull 3330|19095|4098|1|49|49690.41|0.05|0.01|R|F|1995-03-02|1995-03-03|1995-03-16|DELIVER IN PERSON|TRUCK|haggle carefully alongside of the bold r 3331|63261|780|1|9|11018.34|0.08|0.07|A|F|1993-07-18|1993-07-03|1993-08-16|TAKE BACK RETURN|AIR|odolites. bold accounts 3331|20445|446|2|38|51886.72|0.06|0.04|R|F|1993-07-24|1993-06-22|1993-08-23|NONE|AIR|ymptotes haggle across the ca 3331|2546|47|3|26|37662.04|0.09|0.05|A|F|1993-08-05|1993-07-17|1993-08-29|DELIVER IN PERSON|MAIL|p asymptotes. carefully unusual in 3332|83771|3772|1|28|49133.56|0.10|0.02|R|F|1994-12-30|1995-01-16|1995-01-16|COLLECT COD|FOB|s against the carefully special multipl 3332|135964|3504|2|21|41999.16|0.08|0.04|R|F|1995-02-04|1995-01-08|1995-02-06|COLLECT COD|MAIL| quick packages sle 3332|133508|8535|3|27|41620.50|0.03|0.02|A|F|1994-12-10|1995-01-14|1994-12-11|TAKE BACK RETURN|FOB|ording to the slyly regula 3333|149926|7469|1|27|53349.84|0.06|0.08|A|F|1992-12-06|1992-10-26|1992-12-07|COLLECT COD|SHIP|s dazzle fluffil 3333|198900|6458|2|36|71960.40|0.08|0.07|R|F|1992-11-20|1992-11-06|1992-12-16|TAKE BACK RETURN|FOB|foxes sleep neve 3333|107224|9735|3|38|46786.36|0.05|0.05|A|F|1992-10-30|1992-11-03|1992-11-04|NONE|MAIL|ccounts promise bl 3333|112230|2231|4|49|60869.27|0.07|0.07|R|F|1992-10-02|1992-11-30|1992-10-12|DELIVER IN PERSON|MAIL|riously ironic r 3333|42929|442|5|45|84236.40|0.07|0.08|A|F|1992-10-04|1992-11-08|1992-10-27|COLLECT COD|SHIP|dolites. quickly r 3334|186434|1471|1|20|30408.60|0.04|0.03|N|O|1996-05-21|1996-04-08|1996-05-26|TAKE BACK RETURN|AIR|uses nag furiously. instructions are ca 3334|189013|4050|2|7|7714.07|0.09|0.07|N|O|1996-04-28|1996-04-08|1996-05-25|NONE|SHIP|nts sublate slyly express pack 3335|104399|9420|1|13|18244.07|0.06|0.07|N|O|1996-01-20|1995-12-20|1996-02-09|COLLECT COD|REG AIR|out the special asymptotes 3335|30734|735|2|44|73248.12|0.07|0.02|N|O|1996-01-05|1995-12-25|1996-01-18|DELIVER IN PERSON|SHIP|r packages cajole ac 3335|139710|2224|3|16|27995.36|0.01|0.06|N|O|1995-10-18|1995-12-08|1995-11-03|DELIVER IN PERSON|SHIP|g packages. carefully regular reque 3335|89353|6878|4|47|63090.45|0.10|0.03|N|O|1995-12-02|1995-11-19|1995-12-27|NONE|MAIL| quickly special ideas. 3360|173707|6225|1|31|55201.70|0.08|0.04|N|O|1998-04-24|1998-04-12|1998-05-23|COLLECT COD|REG AIR|quests. carefully even deposits wake acros 3360|90182|2692|2|29|33993.22|0.00|0.06|N|O|1998-04-15|1998-02-25|1998-05-13|TAKE BACK RETURN|FOB|press asymptotes. furiously final 3360|81355|1356|3|39|52117.65|0.08|0.03|N|O|1998-04-09|1998-04-20|1998-05-05|DELIVER IN PERSON|REG AIR|s. blithely express pinto bean 3360|116741|4275|4|29|50974.46|0.10|0.01|N|O|1998-05-19|1998-03-03|1998-06-09|TAKE BACK RETURN|FOB|hely gifts. spe 3360|57076|9582|5|4|4132.28|0.08|0.07|N|O|1998-02-27|1998-03-23|1998-03-28|COLLECT COD|SHIP|ly busy inst 3360|70055|2563|6|42|43052.10|0.04|0.01|N|O|1998-05-07|1998-04-18|1998-06-04|DELIVER IN PERSON|FOB|ages cajole. pending, 3361|143748|3749|1|6|10750.44|0.02|0.02|R|F|1992-10-02|1992-10-25|1992-10-05|DELIVER IN PERSON|FOB| packages sleep. furiously unus 3361|170012|5047|2|33|35706.33|0.01|0.02|R|F|1992-11-09|1992-10-15|1992-11-11|TAKE BACK RETURN|MAIL|uriously ironic accounts. ironic, ir 3361|190085|7643|3|31|36427.48|0.06|0.04|R|F|1992-08-29|1992-10-13|1992-09-08|NONE|FOB|ts. pending, regular accounts sleep fur 3362|21372|8879|1|14|18107.18|0.06|0.05|N|O|1995-08-01|1995-09-06|1995-08-22|NONE|FOB|even Tires 3362|194902|4903|2|41|81872.90|0.05|0.03|N|O|1995-10-31|1995-09-04|1995-11-17|COLLECT COD|REG AIR|ake alongside of the 3362|114822|7334|3|40|73472.80|0.05|0.06|N|O|1995-08-19|1995-10-17|1995-09-05|TAKE BACK RETURN|FOB|packages haggle furi 3362|1127|6128|4|3|3084.36|0.03|0.01|N|O|1995-08-26|1995-09-02|1995-09-17|NONE|SHIP|its cajole blithely excuses. de 3362|137109|7110|5|36|41259.60|0.06|0.00|N|O|1995-10-05|1995-08-28|1995-11-03|TAKE BACK RETURN|RAIL|es against the quickly permanent pint 3362|187847|7848|6|46|89002.64|0.09|0.05|N|O|1995-08-02|1995-10-12|1995-08-28|COLLECT COD|REG AIR|ly bold packages. regular deposits cajol 3363|9770|2271|1|42|70550.34|0.00|0.08|N|O|1995-11-09|1995-11-25|1995-11-15|TAKE BACK RETURN|RAIL| blithely final ideas nag after 3363|190181|5220|2|21|26694.78|0.08|0.08|N|O|1995-12-10|1995-10-28|1995-12-28|COLLECT COD|RAIL|he regular, brave deposits. f 3363|158620|1136|3|2|3357.24|0.01|0.07|N|O|1996-01-22|1995-12-01|1996-02-18|TAKE BACK RETURN|SHIP|uickly bold ide 3363|112620|154|4|20|32652.40|0.07|0.06|N|O|1995-12-11|1995-11-15|1995-12-21|COLLECT COD|MAIL|carefully quiet excuses wake. sl 3363|199176|6734|5|4|5100.68|0.00|0.08|N|O|1995-10-30|1995-11-17|1995-11-22|COLLECT COD|FOB| ironic dependencie 3364|89208|6733|1|49|58662.80|0.03|0.05|N|O|1997-09-17|1997-08-23|1997-10-06|NONE|SHIP|d accounts? caref 3364|110107|108|2|38|42449.80|0.02|0.02|N|O|1997-08-30|1997-09-12|1997-09-27|COLLECT COD|REG AIR| slyly express 3364|155969|8485|3|10|20249.60|0.00|0.01|N|O|1997-08-10|1997-08-24|1997-08-15|TAKE BACK RETURN|SHIP|g the accounts. final, busy accounts wi 3364|159356|4387|4|7|9907.45|0.10|0.05|N|O|1997-07-09|1997-08-01|1997-07-16|NONE|TRUCK|furiously regular ideas haggle furiously b 3364|80467|5484|5|3|4342.38|0.01|0.00|N|O|1997-10-19|1997-08-15|1997-10-28|TAKE BACK RETURN|TRUCK|c theodolites. blithely ir 3365|150138|5169|1|37|43960.81|0.02|0.08|R|F|1994-12-22|1995-02-07|1995-01-20|TAKE BACK RETURN|SHIP|requests. quickly pending instructions a 3365|166378|3927|2|37|53441.69|0.07|0.08|A|F|1994-11-24|1995-01-09|1994-11-27|NONE|REG AIR|oze blithely. furiously ironic theodolit 3365|114118|4119|3|13|14717.43|0.09|0.02|R|F|1995-02-25|1995-01-31|1995-03-16|NONE|RAIL|pths wake r 3365|175036|2588|4|49|54440.47|0.02|0.07|R|F|1995-01-03|1995-01-01|1995-01-18|COLLECT COD|MAIL|lyly unusual asymptotes. final 3365|15264|267|5|2|2358.52|0.00|0.03|R|F|1995-02-04|1994-12-30|1995-03-06|TAKE BACK RETURN|FOB|es cajole fluffily pe 3365|125386|411|6|24|33873.12|0.01|0.00|R|F|1995-02-27|1995-01-09|1995-03-27|DELIVER IN PERSON|REG AIR|into beans? carefully regula 3366|39754|9755|1|4|6775.00|0.07|0.01|N|O|1997-05-20|1997-06-25|1997-06-03|DELIVER IN PERSON|AIR| carefully about 3366|135173|7687|2|9|10873.53|0.00|0.08|N|O|1997-06-02|1997-07-05|1997-06-26|COLLECT COD|REG AIR|ackages sleep carefully across the bli 3367|40397|7910|1|27|36109.53|0.01|0.03|A|F|1993-04-13|1993-03-16|1993-04-26|NONE|RAIL|kly even instructions caj 3367|140913|8456|2|34|66432.94|0.04|0.08|A|F|1993-03-30|1993-02-23|1993-04-11|COLLECT COD|MAIL| accounts wake slyly 3367|119437|4460|3|38|55344.34|0.03|0.03|R|F|1993-03-13|1993-02-12|1993-03-31|NONE|RAIL|even packages sleep blithely slyly expr 3392|170472|5507|1|40|61698.80|0.01|0.01|N|O|1996-02-18|1995-12-16|1996-02-26|COLLECT COD|MAIL|ress instructions affix carefully. fur 3392|122112|7137|2|13|14743.43|0.09|0.02|N|O|1995-11-26|1996-01-17|1995-12-01|NONE|MAIL|across the fluffily bold deposits. 3392|126515|4052|3|34|52411.34|0.10|0.08|N|O|1996-01-20|1996-01-21|1996-01-24|DELIVER IN PERSON|MAIL|e carefully even braids. 3392|123260|8285|4|7|8982.82|0.08|0.05|N|O|1995-12-07|1996-01-09|1995-12-29|TAKE BACK RETURN|RAIL|as. express, final accounts dou 3393|116495|4029|1|16|24183.84|0.01|0.00|N|O|1995-07-17|1995-08-19|1995-08-04|COLLECT COD|TRUCK|uses. instructions after the blithely 3393|124847|9872|2|44|82360.96|0.08|0.04|N|O|1995-10-16|1995-08-05|1995-11-01|NONE|AIR|ld requests hag 3393|96369|3897|3|25|34134.00|0.07|0.02|N|O|1995-10-17|1995-08-12|1995-11-11|DELIVER IN PERSON|MAIL|ng excuses 3393|71005|3513|4|48|46848.00|0.06|0.06|N|O|1995-07-12|1995-09-15|1995-08-02|NONE|FOB| blithely final reques 3393|177894|2929|5|37|72959.93|0.07|0.02|N|O|1995-10-16|1995-08-19|1995-10-19|COLLECT COD|AIR|ss the slyly ironic pinto beans. ironic, 3393|61866|9385|6|17|31073.62|0.04|0.01|N|O|1995-08-15|1995-09-07|1995-09-10|COLLECT COD|MAIL|kly ironic deposits could 3394|154659|4660|1|33|56550.45|0.07|0.08|N|O|1996-08-07|1996-07-17|1996-09-02|TAKE BACK RETURN|SHIP|ideas alongside of th 3394|145731|8246|2|43|76399.39|0.08|0.03|N|O|1996-08-23|1996-07-20|1996-08-25|COLLECT COD|RAIL|hockey players. slyly regular requests afte 3394|87945|454|3|26|50256.44|0.01|0.00|N|O|1996-08-08|1996-06-12|1996-09-05|TAKE BACK RETURN|RAIL|its use furiously. even, even account 3394|80118|119|4|14|15373.54|0.08|0.00|N|O|1996-06-02|1996-07-02|1996-06-19|COLLECT COD|MAIL|e furiously final theodolites. furio 3394|126483|6484|5|30|45284.40|0.04|0.06|N|O|1996-05-12|1996-07-24|1996-05-19|COLLECT COD|REG AIR|t ideas according to the fluffily iro 3394|183447|8484|6|14|21426.16|0.05|0.05|N|O|1996-06-18|1996-06-24|1996-07-17|NONE|REG AIR|arefully regular do 3395|141634|1635|1|21|35188.23|0.03|0.06|R|F|1994-12-19|1995-01-13|1994-12-25|TAKE BACK RETURN|SHIP| careful dep 3395|35345|2855|2|38|48652.92|0.01|0.07|R|F|1995-01-13|1995-01-13|1995-01-25|COLLECT COD|SHIP| silent accounts are blithely 3395|42169|2170|3|43|47779.88|0.06|0.07|A|F|1994-12-13|1995-01-07|1994-12-14|COLLECT COD|AIR|ckages above the furiously regu 3395|121513|6538|4|39|59845.89|0.05|0.07|R|F|1994-12-03|1995-01-17|1994-12-10|NONE|AIR|riously unusual theodolites. fur 3396|127938|2963|1|34|66841.62|0.00|0.06|A|F|1994-05-30|1994-08-16|1994-06-11|NONE|AIR|. slyly unusual packages wak 3396|48322|827|2|43|54623.76|0.03|0.08|A|F|1994-07-03|1994-08-09|1994-07-14|TAKE BACK RETURN|MAIL|cial packages cajole blithely around the 3396|137579|93|3|9|14549.13|0.01|0.06|R|F|1994-07-01|1994-08-18|1994-07-21|DELIVER IN PERSON|AIR|usly special foxes. accounts wake careful 3396|74334|1856|4|32|41866.56|0.06|0.02|R|F|1994-08-07|1994-08-10|1994-09-05|COLLECT COD|TRUCK|osits are slyly. final, bold foxes s 3396|125022|47|5|27|28269.54|0.02|0.01|A|F|1994-09-14|1994-07-26|1994-09-28|DELIVER IN PERSON|FOB| theodolites 3396|38687|8688|6|18|29262.24|0.10|0.00|A|F|1994-07-27|1994-06-26|1994-08-25|TAKE BACK RETURN|REG AIR|l requests haggle furiously along the fur 3396|197228|4786|7|31|41081.82|0.05|0.06|A|F|1994-06-07|1994-06-23|1994-06-19|TAKE BACK RETURN|REG AIR|l, express pinto beans. quic 3397|194289|9328|1|8|11066.24|0.07|0.01|A|F|1994-08-05|1994-08-11|1994-08-08|DELIVER IN PERSON|RAIL|y final foxes 3397|12042|9546|2|11|10494.44|0.00|0.07|A|F|1994-07-29|1994-09-18|1994-08-12|DELIVER IN PERSON|REG AIR|iously careful packages. s 3397|183905|6424|3|1|1988.90|0.07|0.05|R|F|1994-08-03|1994-07-30|1994-08-28|NONE|RAIL| regular packag 3397|85592|5593|4|33|52060.47|0.05|0.01|R|F|1994-09-04|1994-08-06|1994-09-22|COLLECT COD|RAIL|gular accounts. blithely re 3397|131561|1562|5|28|44591.68|0.05|0.05|R|F|1994-07-13|1994-08-26|1994-07-17|NONE|TRUCK|counts around the final reques 3398|172957|2958|1|1|2029.95|0.01|0.08|N|O|1996-11-22|1996-11-16|1996-12-09|COLLECT COD|MAIL| blithely final deposits. 3399|133853|8880|1|28|52831.80|0.09|0.05|N|O|1995-06-29|1995-05-19|1995-07-12|COLLECT COD|AIR|oggedly final theodolites grow. fi 3399|54517|4518|2|8|11772.08|0.01|0.05|A|F|1995-05-15|1995-04-19|1995-06-05|COLLECT COD|TRUCK|s use carefully carefully ir 3399|66486|1499|3|3|4357.44|0.03|0.00|N|F|1995-06-16|1995-04-04|1995-06-23|NONE|SHIP|hely pending dugouts 3399|13659|3660|4|21|33025.65|0.09|0.06|A|F|1995-03-12|1995-05-18|1995-03-28|TAKE BACK RETURN|MAIL|se final courts. exc 3424|180866|8421|1|39|75927.54|0.06|0.07|N|O|1996-11-03|1996-11-08|1996-11-23|DELIVER IN PERSON|MAIL|bits boost closely slyly p 3425|119527|9528|1|11|17011.72|0.03|0.08|N|O|1996-04-24|1996-05-29|1996-05-23|DELIVER IN PERSON|FOB|ckly final deposits use quickly? 3425|78421|5943|2|37|51778.54|0.06|0.03|N|O|1996-06-04|1996-05-09|1996-06-12|NONE|SHIP|as sleep carefully into the caref 3425|13460|964|3|8|10987.68|0.06|0.08|N|O|1996-07-22|1996-06-07|1996-07-26|TAKE BACK RETURN|AIR|iously regular theodolites wake. s 3425|18914|8915|4|37|67817.67|0.04|0.01|N|O|1996-07-10|1996-05-10|1996-08-02|NONE|SHIP|ngside of the furiously thin dol 3425|78322|830|5|48|62415.36|0.08|0.04|N|O|1996-04-14|1996-05-25|1996-04-23|TAKE BACK RETURN|AIR|uctions wake fluffily. care 3425|147931|7932|6|24|47494.32|0.05|0.04|N|O|1996-04-22|1996-06-24|1996-04-25|TAKE BACK RETURN|AIR|ajole blithely sl 3426|109706|4727|1|20|34314.00|0.05|0.04|N|O|1996-11-10|1996-12-24|1996-12-01|COLLECT COD|FOB|sits cajole blit 3426|13356|860|2|19|24117.65|0.10|0.08|N|O|1996-11-02|1997-01-13|1996-11-15|DELIVER IN PERSON|RAIL|slyly special packages oug 3426|66426|8933|3|19|26455.98|0.08|0.05|N|O|1996-12-07|1996-12-15|1996-12-14|DELIVER IN PERSON|FOB|c accounts cajole carefu 3426|5263|5264|4|9|10514.34|0.09|0.05|N|O|1996-12-24|1997-01-14|1997-01-13|NONE|FOB|pecial theodolites haggle fluf 3426|48579|1084|5|31|47354.67|0.07|0.08|N|O|1996-11-11|1996-12-10|1996-12-10|DELIVER IN PERSON|SHIP| even sentiment 3427|53394|3395|1|41|55242.99|0.10|0.01|N|O|1997-09-11|1997-07-03|1997-10-04|COLLECT COD|RAIL|s the carefully 3427|188918|3955|2|24|48165.84|0.02|0.04|N|O|1997-07-01|1997-07-28|1997-07-30|NONE|SHIP|y bold, sly deposits. pendi 3427|138997|1511|3|40|81439.60|0.06|0.05|N|O|1997-06-12|1997-08-19|1997-06-23|COLLECT COD|MAIL|patterns cajole ca 3427|118088|3111|4|31|34288.48|0.08|0.04|N|O|1997-08-12|1997-07-26|1997-08-25|COLLECT COD|RAIL|s are carefull 3428|197651|7652|1|4|6994.60|0.00|0.03|N|O|1996-05-09|1996-06-13|1996-06-02|NONE|REG AIR|sly pending requests int 3428|117436|7437|2|35|50870.05|0.02|0.03|N|O|1996-05-01|1996-06-07|1996-05-20|COLLECT COD|TRUCK|ly regular pinto beans sleep 3428|135307|5308|3|47|63088.10|0.07|0.05|N|O|1996-04-16|1996-06-08|1996-05-05|NONE|REG AIR|y final pinto 3429|136768|1795|1|48|86628.48|0.06|0.02|N|O|1997-04-08|1997-03-09|1997-04-25|TAKE BACK RETURN|SHIP| haggle furiously ir 3429|58421|927|2|15|20691.30|0.03|0.04|N|O|1997-02-04|1997-03-09|1997-03-01|TAKE BACK RETURN|TRUCK|beans are fu 3429|68561|6080|3|10|15295.60|0.05|0.07|N|O|1997-01-19|1997-02-22|1997-01-25|TAKE BACK RETURN|REG AIR|ackages. quickly e 3429|88366|875|4|28|37922.08|0.10|0.07|N|O|1997-01-30|1997-03-18|1997-02-17|TAKE BACK RETURN|AIR|nstructions boost. thin 3429|164054|4055|5|45|50312.25|0.10|0.00|N|O|1997-04-21|1997-03-08|1997-05-05|COLLECT COD|REG AIR|ites poach a 3430|188117|3154|1|2|2410.22|0.07|0.06|R|F|1995-03-07|1995-01-28|1995-03-30|TAKE BACK RETURN|MAIL|sh furiously according to the evenly e 3430|80604|5621|2|32|50707.20|0.08|0.00|R|F|1995-01-17|1995-01-28|1995-02-06|NONE|TRUCK|egular instruction 3430|96381|6382|3|41|56472.58|0.06|0.04|R|F|1995-02-18|1995-02-21|1995-03-11|TAKE BACK RETURN|AIR|cuses. silent excuses h 3430|64955|9968|4|50|95997.50|0.01|0.00|R|F|1994-12-15|1995-03-03|1994-12-24|COLLECT COD|REG AIR|ironic theodolites. carefully regular pac 3430|94745|2273|5|5|8698.70|0.05|0.05|A|F|1995-04-02|1995-02-12|1995-04-08|DELIVER IN PERSON|FOB|even accounts haggle slyly bol 3430|170718|5753|6|15|26830.65|0.08|0.07|A|F|1995-02-01|1995-03-12|1995-02-04|COLLECT COD|SHIP|cajole around the accounts. qui 3430|51323|6334|7|23|29309.36|0.09|0.08|A|F|1995-03-06|1995-03-01|1995-03-10|COLLECT COD|MAIL|eas according to the 3431|179230|6782|1|41|53678.43|0.03|0.06|A|F|1993-09-26|1993-10-13|1993-10-22|NONE|AIR| sleep carefully ironically special 3456|110400|5423|1|34|47953.60|0.10|0.06|A|F|1993-08-29|1993-08-26|1993-09-07|TAKE BACK RETURN|SHIP|usy pinto beans b 3457|181229|6266|1|29|37996.38|0.03|0.02|R|F|1995-05-12|1995-07-13|1995-06-05|NONE|TRUCK|refully final excuses wake 3457|105915|5916|2|22|42260.02|0.06|0.01|N|O|1995-06-23|1995-06-16|1995-06-29|NONE|SHIP|packages nag furiously against 3457|108003|514|3|7|7077.00|0.07|0.08|N|O|1995-08-14|1995-07-06|1995-08-18|COLLECT COD|SHIP| pending accounts along the 3457|983|984|4|24|45215.52|0.07|0.07|N|O|1995-08-03|1995-05-30|1995-08-14|TAKE BACK RETURN|REG AIR|tructions haggle alongsid 3457|108595|3616|5|42|67350.78|0.05|0.01|A|F|1995-06-12|1995-06-14|1995-06-14|COLLECT COD|MAIL|riously final instruc 3457|143724|6239|6|45|79547.40|0.08|0.01|N|O|1995-08-12|1995-07-18|1995-08-23|TAKE BACK RETURN|SHIP| packages. care 3457|166641|1674|7|9|15368.76|0.04|0.00|R|F|1995-05-29|1995-06-30|1995-06-12|DELIVER IN PERSON|FOB|quests. foxes sleep quickly 3458|132484|7511|1|48|72791.04|0.06|0.04|R|F|1995-03-17|1995-01-25|1995-03-28|TAKE BACK RETURN|AIR|iously pending dep 3458|49179|4188|2|46|51895.82|0.06|0.06|R|F|1995-03-08|1995-01-21|1995-03-10|TAKE BACK RETURN|SHIP|nod across the boldly even instruct 3458|142061|2062|3|36|39710.16|0.01|0.06|R|F|1995-04-20|1995-02-14|1995-05-09|TAKE BACK RETURN|REG AIR|s lose. blithely ironic requests boost 3458|15253|7755|4|16|18692.00|0.09|0.03|R|F|1995-03-01|1995-02-25|1995-03-06|TAKE BACK RETURN|AIR|s grow carefully. express, final grouc 3458|156396|8912|5|2|2904.78|0.09|0.03|A|F|1995-02-05|1995-02-01|1995-03-07|COLLECT COD|FOB|ironic packages haggle past the furiously 3458|141853|9396|6|6|11369.10|0.09|0.04|A|F|1995-03-10|1995-02-02|1995-03-23|TAKE BACK RETURN|AIR|dolites; regular theodolites cajole 3459|178065|5617|1|31|35434.86|0.06|0.01|A|F|1994-09-05|1994-10-20|1994-10-03|NONE|REG AIR|y regular pain 3459|129270|4295|2|30|38978.10|0.04|0.08|R|F|1994-11-22|1994-09-12|1994-12-11|NONE|REG AIR|nic theodolites; evenly i 3459|40035|2540|3|45|43876.35|0.04|0.05|A|F|1994-07-31|1994-09-09|1994-08-02|TAKE BACK RETURN|REG AIR|ntly speci 3459|68031|8032|4|10|9990.30|0.05|0.06|A|F|1994-10-06|1994-09-16|1994-11-03|TAKE BACK RETURN|REG AIR| furiously silent dolphi 3459|188574|6129|5|10|16625.70|0.02|0.02|R|F|1994-08-01|1994-10-17|1994-08-11|TAKE BACK RETURN|FOB|. blithely ironic pinto beans above 3460|10655|8159|1|40|62626.00|0.10|0.06|N|O|1995-12-28|1995-12-14|1996-01-02|NONE|REG AIR|odolites are slyly bold deposits 3460|73365|5873|2|3|4015.08|0.06|0.00|N|O|1996-01-19|1995-12-28|1996-01-31|COLLECT COD|AIR|er quickly 3460|34658|7162|3|40|63706.00|0.08|0.07|N|O|1995-10-29|1995-11-10|1995-11-24|TAKE BACK RETURN|REG AIR|o the even deposits 3460|94647|9666|4|50|82082.00|0.02|0.07|N|O|1996-01-30|1995-12-10|1996-02-06|DELIVER IN PERSON|SHIP|e slyly about the sly 3460|129391|9392|5|47|66758.33|0.08|0.05|N|O|1995-12-09|1995-11-12|1995-12-22|TAKE BACK RETURN|SHIP|es haggle slyly regular accounts. fi 3460|62084|7097|6|46|48119.68|0.03|0.07|N|O|1996-01-27|1996-01-01|1996-02-01|NONE|TRUCK|uses run among the carefully even deposits 3460|44512|7017|7|28|40782.28|0.00|0.01|N|O|1995-10-28|1995-11-13|1995-11-17|COLLECT COD|SHIP|inal, ironic instructions. carefully 3461|99796|7324|1|49|87993.71|0.06|0.06|A|F|1993-03-09|1993-04-16|1993-03-13|DELIVER IN PERSON|RAIL|ual request 3461|62417|2418|2|27|37244.07|0.06|0.06|A|F|1993-02-10|1993-03-02|1993-03-04|COLLECT COD|SHIP|ely unusual deposits. quickly ir 3461|38930|1434|3|44|82232.92|0.09|0.06|A|F|1993-05-20|1993-04-03|1993-05-27|COLLECT COD|RAIL| haggle quickly even ideas. fin 3461|94288|6798|4|41|52573.48|0.09|0.02|R|F|1993-02-19|1993-04-20|1993-02-21|NONE|TRUCK|heodolites. blithely ironi 3461|89771|4788|5|16|28172.32|0.08|0.06|A|F|1993-05-09|1993-04-29|1993-05-26|TAKE BACK RETURN|TRUCK| pending deposi 3461|166821|4370|6|24|45307.68|0.10|0.00|A|F|1993-06-01|1993-03-12|1993-06-20|TAKE BACK RETURN|MAIL|thely. carefully re 3462|150880|8426|1|4|7723.52|0.09|0.04|N|O|1997-06-12|1997-07-31|1997-06-16|COLLECT COD|RAIL|ackages. fu 3462|39432|4439|2|43|58971.49|0.08|0.03|N|O|1997-08-01|1997-07-18|1997-08-29|NONE|RAIL| carefully. final, final ideas sleep slyly 3462|128298|811|3|6|7957.74|0.05|0.04|N|O|1997-06-02|1997-08-09|1997-06-30|NONE|RAIL|iously regular fo 3462|98093|5621|4|2|2182.18|0.09|0.07|N|O|1997-09-10|1997-08-08|1997-09-19|NONE|AIR|nic packages. even accounts alongside 3462|37353|4863|5|14|18064.90|0.01|0.02|N|O|1997-05-31|1997-07-05|1997-06-24|COLLECT COD|MAIL|yly. blithely bold theodolites wa 3463|60779|3286|1|45|78289.65|0.02|0.02|A|F|1993-10-30|1993-11-04|1993-11-08|DELIVER IN PERSON|FOB|nts are slyly 3463|97924|2943|2|43|82642.56|0.04|0.02|A|F|1993-10-28|1993-09-24|1993-11-03|DELIVER IN PERSON|FOB| across the 3488|159702|4733|1|1|1761.70|0.04|0.01|A|F|1995-03-06|1995-02-16|1995-03-23|DELIVER IN PERSON|FOB| final excuses. carefully even waters hagg 3488|103455|8476|2|48|70005.60|0.00|0.03|A|F|1995-03-29|1995-03-26|1995-04-28|COLLECT COD|SHIP|sly? final requests 3488|159080|9081|3|11|12529.88|0.03|0.08|R|F|1995-03-25|1995-02-08|1995-04-16|COLLECT COD|TRUCK|unusual re 3488|41028|3533|4|12|11628.24|0.05|0.07|R|F|1995-04-27|1995-02-16|1995-05-09|DELIVER IN PERSON|RAIL|e slyly; furiously final packages wak 3488|155132|163|5|18|21368.34|0.09|0.06|A|F|1995-03-18|1995-03-19|1995-03-29|DELIVER IN PERSON|FOB|s the carefully r 3489|185663|8182|1|19|33224.54|0.09|0.05|A|F|1993-07-31|1993-10-26|1993-08-15|NONE|SHIP|c deposits alongside of the pending, fu 3489|28091|594|2|46|46878.14|0.00|0.00|A|F|1993-08-02|1993-10-09|1993-08-10|TAKE BACK RETURN|TRUCK|xcuses? quickly stealthy dependenci 3490|91360|8888|1|43|58108.48|0.05|0.05|N|O|1997-08-04|1997-08-06|1997-08-14|TAKE BACK RETURN|SHIP|. even requests cajol 3490|85447|2972|2|50|71622.00|0.05|0.07|N|O|1997-06-27|1997-08-15|1997-06-28|NONE|RAIL| haggle carefu 3490|92453|9981|3|8|11563.60|0.10|0.04|N|O|1997-08-11|1997-07-25|1997-08-28|COLLECT COD|MAIL|inal deposits use furiousl 3491|153491|6007|1|28|43245.72|0.04|0.03|N|O|1998-09-29|1998-09-08|1998-10-23|COLLECT COD|FOB|ccounts. sly 3491|121773|1774|2|22|39484.94|0.08|0.02|N|O|1998-08-19|1998-08-22|1998-09-03|TAKE BACK RETURN|REG AIR| grow against the boldly pending pinto bea 3492|155860|5861|1|3|5747.58|0.02|0.08|R|F|1994-11-26|1994-12-28|1994-12-19|COLLECT COD|REG AIR|the deposits. carefully 3492|125526|3063|2|7|10860.64|0.04|0.00|R|F|1995-03-10|1995-01-03|1995-03-16|COLLECT COD|FOB|thely regular dolphi 3492|108869|8870|3|34|63847.24|0.05|0.06|A|F|1994-12-07|1994-12-29|1994-12-24|COLLECT COD|AIR| unusual requests. ir 3492|146899|4442|4|30|58376.70|0.02|0.06|A|F|1995-01-29|1995-01-02|1995-02-13|DELIVER IN PERSON|MAIL| detect furiously permanent, unusual accou 3492|121499|6524|5|47|71463.03|0.09|0.07|R|F|1995-03-24|1994-12-28|1995-03-29|NONE|REG AIR|deposits. quickly express 3492|21118|3621|6|47|48838.17|0.04|0.07|R|F|1994-12-12|1995-01-18|1994-12-26|COLLECT COD|RAIL|ronic instructions u 3493|92506|7525|1|31|46453.50|0.06|0.07|R|F|1993-10-22|1993-10-12|1993-11-07|DELIVER IN PERSON|REG AIR|ructions. slyly regular accounts across the 3493|131994|1995|2|10|20259.90|0.02|0.06|R|F|1993-08-27|1993-10-07|1993-09-23|COLLECT COD|TRUCK|hall have to integ 3494|116147|8659|1|40|46525.60|0.05|0.04|R|F|1993-07-10|1993-06-01|1993-07-25|TAKE BACK RETURN|TRUCK|lites haggle furiously about the fin 3494|74124|4125|2|23|25256.76|0.10|0.01|A|F|1993-06-19|1993-06-04|1993-07-14|NONE|FOB|osits nag 3494|197161|4719|3|40|50326.40|0.02|0.08|A|F|1993-05-30|1993-07-02|1993-06-20|TAKE BACK RETURN|MAIL|uests cajole blithely 3494|76076|6077|4|30|31562.10|0.04|0.03|R|F|1993-07-01|1993-06-08|1993-07-15|TAKE BACK RETURN|TRUCK|ns are quickly regular, 3495|27321|9824|1|20|24966.40|0.10|0.03|N|O|1996-04-24|1996-05-18|1996-05-01|TAKE BACK RETURN|RAIL|posits are carefully; forges cajole qui 3495|172119|9671|2|24|28586.64|0.05|0.02|N|O|1996-03-22|1996-04-10|1996-04-07|DELIVER IN PERSON|RAIL|ic, final pains along the even request 3495|198597|8598|3|16|27129.44|0.08|0.02|N|O|1996-03-30|1996-04-02|1996-04-12|TAKE BACK RETURN|AIR|y bold dependencies; blithely idle sautern 3520|27999|5506|1|30|57809.70|0.04|0.02|N|O|1997-11-11|1997-10-02|1997-12-06|COLLECT COD|SHIP|deas should solve blithely among the ironi 3520|166759|1792|2|38|69378.50|0.00|0.04|N|O|1997-08-14|1997-10-26|1997-09-09|NONE|RAIL|yly final packages according to the quickl 3520|105258|7769|3|5|6316.25|0.01|0.02|N|O|1997-11-13|1997-09-22|1997-12-09|NONE|MAIL|ly even ideas haggle 3520|63745|3746|4|41|70058.34|0.01|0.01|N|O|1997-08-06|1997-09-20|1997-08-20|TAKE BACK RETURN|AIR| carefully pendi 3520|162174|7207|5|35|43265.95|0.02|0.02|N|O|1997-09-16|1997-09-03|1997-09-24|DELIVER IN PERSON|FOB|s nag carefully. sometimes unusual account 3521|58448|3459|1|48|67509.12|0.09|0.03|A|F|1993-01-03|1992-12-31|1993-01-22|NONE|AIR|ses use. furiously express ideas wake f 3521|130897|898|2|2|3855.78|0.05|0.06|R|F|1993-01-29|1992-12-20|1993-02-23|NONE|MAIL|refully duri 3521|177971|489|3|38|77860.86|0.00|0.08|A|F|1993-02-15|1992-12-10|1993-03-10|COLLECT COD|FOB|ges hang q 3521|143978|9007|4|26|52571.22|0.02|0.08|R|F|1993-01-04|1993-01-20|1993-01-17|DELIVER IN PERSON|AIR|onic dependencies haggle. fur 3521|35052|59|5|28|27637.40|0.10|0.01|A|F|1993-01-06|1993-01-22|1993-02-02|TAKE BACK RETURN|FOB|e slyly above the slyly final 3522|3671|8672|1|6|9448.02|0.08|0.03|A|F|1995-01-21|1994-12-09|1995-01-23|NONE|SHIP|tes snooze 3522|86438|8947|2|48|68372.64|0.00|0.03|R|F|1994-12-05|1994-10-30|1994-12-26|TAKE BACK RETURN|SHIP|ve the quickly special packages 3522|156693|1724|3|46|80485.74|0.09|0.02|A|F|1994-11-12|1994-11-30|1994-11-20|NONE|AIR|d the express, silent foxes. blit 3522|129818|4843|4|7|12934.67|0.10|0.02|A|F|1994-10-31|1994-11-19|1994-11-28|NONE|TRUCK|e stealthil 3522|49144|6657|5|27|29514.78|0.02|0.05|R|F|1994-11-29|1994-12-15|1994-12-08|COLLECT COD|REG AIR|ic tithes. car 3522|157970|5516|6|18|36503.46|0.01|0.03|A|F|1994-11-16|1994-10-29|1994-11-29|COLLECT COD|RAIL|sits wake carefully pen 3523|24474|4475|1|15|20977.05|0.06|0.02|N|O|1998-06-26|1998-05-22|1998-07-24|COLLECT COD|REG AIR|se slyly pending, sp 3523|132127|9667|2|4|4636.48|0.03|0.06|N|O|1998-05-08|1998-05-18|1998-05-25|TAKE BACK RETURN|MAIL|ts. final accounts detect furiously along 3523|49146|1651|3|24|26283.36|0.07|0.04|N|O|1998-08-02|1998-06-22|1998-08-27|COLLECT COD|FOB|ke according to the doggedly re 3523|191238|3758|4|36|47852.28|0.06|0.08|N|O|1998-05-26|1998-06-04|1998-06-25|DELIVER IN PERSON|SHIP|accounts. fluffily regu 3523|133109|3110|5|48|54820.80|0.00|0.01|N|O|1998-07-22|1998-06-25|1998-08-19|DELIVER IN PERSON|AIR| regular requests 3524|136119|6120|1|5|5775.55|0.01|0.04|R|F|1992-05-23|1992-07-25|1992-06-19|DELIVER IN PERSON|RAIL|ts whithout the bold depende 3524|142948|7977|2|17|33845.98|0.09|0.08|A|F|1992-09-01|1992-07-17|1992-09-05|DELIVER IN PERSON|FOB|g, final epitaphs about the pinto 3525|45478|5479|1|12|17081.64|0.01|0.03|N|O|1996-03-08|1996-03-18|1996-03-16|NONE|TRUCK|lar excuses wake carefull 3525|137210|2237|2|27|33674.67|0.03|0.03|N|O|1995-12-30|1996-01-23|1996-01-02|DELIVER IN PERSON|SHIP|y slyly special asymptotes 3525|74397|6905|3|31|42513.09|0.00|0.03|N|O|1996-03-08|1996-02-27|1996-03-13|COLLECT COD|TRUCK|he careful 3525|183399|954|4|28|41506.92|0.03|0.02|N|O|1996-01-22|1996-02-08|1996-01-27|COLLECT COD|FOB| nag according 3526|97645|7646|1|11|18069.04|0.02|0.03|R|F|1995-05-23|1995-05-28|1995-05-24|NONE|TRUCK|ges. furiously regular d 3526|116383|3917|2|23|32185.74|0.03|0.04|A|F|1995-05-01|1995-05-31|1995-05-25|DELIVER IN PERSON|FOB|special, regular packages cajole. 3526|32541|51|3|20|29470.80|0.05|0.08|N|F|1995-06-16|1995-04-26|1995-06-22|DELIVER IN PERSON|REG AIR|kages. bold, special requests detect sl 3527|101388|6409|1|47|65300.86|0.07|0.02|N|O|1997-07-14|1997-07-29|1997-07-21|DELIVER IN PERSON|RAIL|unts. express re 3527|25083|2590|2|33|33266.64|0.01|0.02|N|O|1997-09-25|1997-09-17|1997-10-12|NONE|FOB|kly alongside of 3527|161298|8847|3|50|67964.50|0.09|0.07|N|O|1997-07-17|1997-08-03|1997-07-29|DELIVER IN PERSON|SHIP|e even accounts was about th 3527|127163|9676|4|17|20232.72|0.02|0.05|N|O|1997-07-30|1997-09-01|1997-08-17|COLLECT COD|MAIL|ular instruction 3552|196483|6484|1|18|28430.64|0.01|0.07|N|O|1997-08-11|1997-07-14|1997-08-15|DELIVER IN PERSON|TRUCK|s deposits against the blithely unusual pin 3552|89318|1827|2|44|57521.64|0.01|0.00|N|O|1997-08-08|1997-06-15|1997-08-29|COLLECT COD|FOB|ns after the blithely reg 3552|160455|8004|3|36|54556.20|0.04|0.08|N|O|1997-06-29|1997-06-24|1997-07-21|COLLECT COD|TRUCK|ly regular theodolites. fin 3553|142076|4591|1|4|4472.28|0.05|0.01|R|F|1994-06-13|1994-07-10|1994-07-03|COLLECT COD|RAIL|olites boost bli 3553|64118|6625|2|26|28134.86|0.05|0.08|A|F|1994-08-06|1994-07-30|1994-08-23|DELIVER IN PERSON|MAIL|fily special p 3553|21291|8798|3|18|21821.22|0.04|0.03|A|F|1994-07-03|1994-06-30|1994-07-07|COLLECT COD|RAIL|. quickly ironic 3553|31009|3513|4|40|37600.00|0.06|0.00|A|F|1994-09-14|1994-06-26|1994-09-25|NONE|RAIL| slyly pending asymptotes against the furi 3553|156020|1051|5|36|38736.72|0.06|0.08|R|F|1994-08-12|1994-06-25|1994-09-06|DELIVER IN PERSON|TRUCK| realms. pending, bold theodolites 3554|174151|6669|1|32|39204.80|0.01|0.05|N|O|1995-09-28|1995-09-01|1995-10-07|NONE|RAIL|. blithely ironic t 3554|144071|4072|2|18|20071.26|0.03|0.00|N|O|1995-09-11|1995-08-12|1995-10-04|DELIVER IN PERSON|REG AIR| haggle. furiously fluffy requests ac 3554|191547|1548|3|41|67180.14|0.02|0.01|N|O|1995-07-13|1995-08-28|1995-07-27|DELIVER IN PERSON|MAIL|ent dependencies. sly 3555|165543|576|1|11|17693.94|0.05|0.02|N|O|1996-09-25|1996-10-01|1996-10-03|NONE|FOB|oost caref 3555|78411|8412|2|15|20841.15|0.03|0.08|N|O|1996-07-13|1996-09-01|1996-08-02|TAKE BACK RETURN|RAIL|y across the pending a 3555|42078|9591|3|25|25501.75|0.09|0.07|N|O|1996-10-01|1996-08-23|1996-10-24|TAKE BACK RETURN|MAIL|sual packages. quickly 3555|4429|4430|4|19|25334.98|0.00|0.05|N|O|1996-09-08|1996-09-14|1996-10-01|COLLECT COD|REG AIR|leep special theodolit 3555|32976|7983|5|29|55360.13|0.07|0.04|N|O|1996-08-02|1996-09-04|1996-08-08|DELIVER IN PERSON|TRUCK|deas. carefully s 3555|27342|9845|6|33|41888.22|0.04|0.08|N|O|1996-09-20|1996-09-23|1996-10-05|TAKE BACK RETURN|AIR|fluffily regular a 3555|125254|279|7|9|11513.25|0.07|0.02|N|O|1996-10-13|1996-10-02|1996-10-22|NONE|SHIP|are. slyly final foxes acro 3556|141443|3958|1|45|66799.80|0.05|0.06|A|F|1992-10-14|1992-12-21|1992-10-16|NONE|TRUCK|ckages boost quickl 3556|30350|5357|2|43|55055.05|0.02|0.06|R|F|1993-01-18|1992-11-09|1993-02-04|NONE|FOB|wake carefull 3556|86856|6857|3|28|51599.80|0.10|0.04|A|F|1993-01-06|1992-11-27|1993-01-16|NONE|MAIL|refully final instructions? ironic packa 3557|174245|1797|1|41|54088.84|0.01|0.07|R|F|1993-01-30|1992-12-31|1993-02-18|COLLECT COD|FOB|ideas breach c 3557|128842|8843|2|37|69221.08|0.03|0.05|R|F|1993-02-16|1993-01-05|1993-03-15|DELIVER IN PERSON|RAIL|gside of the ca 3558|86334|6335|1|8|10562.64|0.01|0.03|N|O|1996-05-31|1996-05-26|1996-06-25|COLLECT COD|AIR|? even requests sle 3558|9567|7068|2|28|41343.68|0.02|0.08|N|O|1996-06-02|1996-04-18|1996-06-24|COLLECT COD|TRUCK|l deposits 3558|186452|1489|3|3|4615.35|0.03|0.06|N|O|1996-05-19|1996-04-28|1996-05-26|DELIVER IN PERSON|RAIL|l, final deposits haggle. fina 3558|90560|8088|4|22|34112.32|0.06|0.03|N|O|1996-04-27|1996-04-19|1996-04-30|DELIVER IN PERSON|SHIP|refully ironic theodolites are fu 3558|28850|3855|5|38|67596.30|0.03|0.08|N|O|1996-05-29|1996-05-02|1996-06-09|COLLECT COD|RAIL|refully permanently iron 3558|71821|6836|6|17|30477.94|0.07|0.07|N|O|1996-03-14|1996-05-04|1996-04-05|NONE|RAIL|ithely unusual packa 3559|89591|9592|1|29|45837.11|0.00|0.07|R|F|1992-12-10|1992-12-03|1992-12-20|COLLECT COD|REG AIR|l, regular accounts wake flu 3584|10196|5199|1|4|4424.76|0.04|0.08|N|O|1997-08-16|1997-10-31|1997-08-28|DELIVER IN PERSON|TRUCK|nal packag 3584|159495|2011|2|23|35753.27|0.00|0.03|N|O|1997-09-10|1997-10-15|1997-09-30|COLLECT COD|TRUCK|l platelets until the asymptotes 3584|23297|3298|3|6|7321.74|0.03|0.06|N|O|1997-10-28|1997-11-09|1997-11-24|TAKE BACK RETURN|MAIL|deposits across the 3584|145596|3139|4|11|18057.49|0.06|0.02|N|O|1997-11-27|1997-10-15|1997-12-08|NONE|REG AIR|lithely slyly 3584|17924|2927|5|39|71834.88|0.09|0.07|N|O|1997-09-20|1997-10-31|1997-10-06|COLLECT COD|AIR|eposits. carefu 3585|121713|6738|1|21|36428.91|0.05|0.04|A|F|1994-12-04|1994-12-25|1995-01-01|TAKE BACK RETURN|TRUCK|ounts use. express, final platelets us 3585|18535|8536|2|40|58141.20|0.03|0.00|R|F|1995-01-22|1995-01-17|1995-02-07|TAKE BACK RETURN|RAIL|elets affix. even asymptotes play care 3585|111350|8884|3|11|14974.85|0.01|0.04|R|F|1995-01-04|1995-02-14|1995-01-15|NONE|MAIL|even packages 3585|47911|2920|4|33|61344.03|0.08|0.08|A|F|1994-12-14|1995-01-19|1994-12-22|NONE|RAIL|ironic dependencies serve furi 3585|24543|2050|5|13|19078.02|0.06|0.07|R|F|1995-03-15|1995-01-22|1995-03-17|DELIVER IN PERSON|AIR|ccording to the foxes. slyly iro 3585|93452|8471|6|7|10118.15|0.10|0.02|A|F|1994-12-13|1995-01-20|1995-01-05|TAKE BACK RETURN|TRUCK|dependencies sleep un 3585|41685|9198|7|45|73200.60|0.03|0.00|A|F|1995-01-20|1995-02-19|1995-02-11|DELIVER IN PERSON|MAIL|are blithely c 3586|193699|8738|1|2|3585.38|0.03|0.08|R|F|1994-02-10|1994-01-07|1994-03-03|DELIVER IN PERSON|RAIL|he even, unusual decoy 3586|83314|3315|2|29|37621.99|0.04|0.07|R|F|1994-03-06|1994-03-02|1994-03-13|DELIVER IN PERSON|RAIL| slyly unusual i 3586|57422|2433|3|2|2758.84|0.03|0.06|R|F|1994-03-22|1994-02-20|1994-04-08|NONE|REG AIR|unts. slyly final ideas agai 3586|83722|3723|4|33|56288.76|0.06|0.01|R|F|1994-01-24|1994-02-09|1994-02-07|NONE|TRUCK|refully across the fur 3586|107949|460|5|8|15655.52|0.06|0.02|A|F|1994-03-29|1994-02-26|1994-04-02|NONE|FOB|theodolites hagg 3586|98598|1108|6|8|12772.72|0.09|0.01|A|F|1994-03-18|1994-01-17|1994-04-06|DELIVER IN PERSON|RAIL| ironic pinto beans cajole carefully theo 3586|122699|2700|7|33|56815.77|0.05|0.04|A|F|1994-02-11|1994-01-15|1994-03-03|NONE|REG AIR|iously regular pinto beans integrate 3587|196712|1751|1|5|9043.55|0.09|0.07|N|O|1996-09-03|1996-07-05|1996-09-11|DELIVER IN PERSON|SHIP|ithely regular decoys above the 3587|131240|8780|2|48|61019.52|0.00|0.03|N|O|1996-08-02|1996-07-02|1996-08-05|TAKE BACK RETURN|MAIL|beans. blithely final depe 3587|150350|7896|3|36|50412.60|0.05|0.05|N|O|1996-07-26|1996-06-16|1996-08-23|TAKE BACK RETURN|MAIL|ully regular excuse 3587|123620|6133|4|31|50952.22|0.03|0.01|N|O|1996-07-21|1996-07-01|1996-07-23|COLLECT COD|SHIP|press fluffily regul 3587|69585|4598|5|12|18654.96|0.06|0.03|N|O|1996-08-30|1996-07-04|1996-09-22|DELIVER IN PERSON|RAIL|g the even pinto beans. special, 3587|106080|1101|6|16|17377.28|0.01|0.03|N|O|1996-05-11|1996-06-19|1996-06-04|COLLECT COD|FOB|y ruthless dolphins to 3587|73225|747|7|23|27559.06|0.07|0.05|N|O|1996-08-30|1996-07-01|1996-09-10|COLLECT COD|FOB|l multipliers sleep theodolites-- slyly 3588|90135|7663|1|28|31503.64|0.04|0.08|R|F|1995-05-03|1995-05-03|1995-05-14|DELIVER IN PERSON|TRUCK|special pinto beans cajole slyly. slyly 3588|87759|268|2|6|10480.50|0.06|0.08|A|F|1995-04-09|1995-05-30|1995-04-10|TAKE BACK RETURN|MAIL|s. fluffily fluf 3588|158716|8717|3|45|79861.95|0.04|0.02|R|F|1995-05-07|1995-05-04|1995-05-28|TAKE BACK RETURN|TRUCK|ecial pains integrate blithely. reques 3588|126284|3821|4|22|28826.16|0.05|0.00|A|F|1995-04-08|1995-05-06|1995-04-27|NONE|RAIL|inal accounts. pending, bo 3588|54948|7454|5|28|53282.32|0.03|0.03|A|F|1995-04-23|1995-05-25|1995-04-28|DELIVER IN PERSON|TRUCK| express sheaves. unusual theodo 3588|109433|1944|6|37|53369.91|0.08|0.04|N|F|1995-06-17|1995-05-25|1995-06-24|TAKE BACK RETURN|RAIL|xcuses sleep quickly along th 3588|38463|967|7|46|64467.16|0.08|0.07|A|F|1995-06-06|1995-05-08|1995-06-08|NONE|AIR| slyly ironic deposits sublate ab 3589|36519|4029|1|42|61131.42|0.08|0.08|R|F|1994-08-11|1994-07-17|1994-08-23|DELIVER IN PERSON|AIR|he blithely unusual pac 3590|175980|8498|1|10|20559.80|0.08|0.00|N|O|1995-07-17|1995-06-26|1995-08-12|TAKE BACK RETURN|SHIP|t the quickly ironic 3590|94336|4337|2|19|25276.27|0.03|0.03|N|O|1995-08-02|1995-06-20|1995-08-08|NONE|SHIP|special pinto beans. blithely reg 3590|95583|602|3|43|67878.94|0.07|0.06|N|O|1995-07-12|1995-07-25|1995-07-16|DELIVER IN PERSON|SHIP|s could have to use 3590|55334|2850|4|26|33522.58|0.01|0.03|N|O|1995-07-08|1995-06-17|1995-08-02|DELIVER IN PERSON|SHIP|arefully along th 3590|190226|227|5|37|48700.14|0.00|0.08|N|O|1995-09-01|1995-06-29|1995-09-10|NONE|SHIP|ccounts above the silent waters thrash f 3590|118843|8844|6|31|57717.04|0.03|0.01|N|O|1995-06-24|1995-07-12|1995-06-25|DELIVER IN PERSON|REG AIR|ve furiously final instructions. slyly regu 3590|193517|8556|7|44|70862.44|0.05|0.04|N|F|1995-06-07|1995-06-15|1995-06-27|NONE|MAIL|s sleep after the regular platelets. blit 3591|28873|3878|1|21|37839.27|0.03|0.03|A|F|1994-02-25|1994-02-02|1994-03-05|DELIVER IN PERSON|TRUCK|structions against 3591|68232|3245|2|24|28805.52|0.04|0.04|R|F|1993-12-26|1994-01-07|1994-01-25|COLLECT COD|FOB|ages. slyly regular dependencies cajo 3591|163239|788|3|4|5208.92|0.01|0.03|A|F|1994-04-04|1994-02-19|1994-05-02|DELIVER IN PERSON|RAIL|he final packages. deposits serve quick 3591|152523|2524|4|49|77200.48|0.01|0.00|A|F|1994-03-21|1994-01-26|1994-03-28|COLLECT COD|AIR| mold slyly. bl 3616|196546|9066|1|30|49276.20|0.01|0.00|A|F|1994-05-05|1994-04-24|1994-05-12|TAKE BACK RETURN|FOB|ly ironic accounts unwind b 3616|137984|7985|2|28|56615.44|0.08|0.06|R|F|1994-02-20|1994-04-18|1994-03-05|DELIVER IN PERSON|REG AIR|ironic packages. furiously ev 3617|116065|6066|1|46|49728.76|0.03|0.02|N|O|1996-05-19|1996-05-14|1996-06-11|NONE|RAIL|ar theodolites. regu 3617|97942|7943|2|16|31039.04|0.05|0.02|N|O|1996-05-08|1996-06-03|1996-05-19|COLLECT COD|RAIL| slyly on th 3617|97724|5252|3|32|55095.04|0.00|0.06|N|O|1996-04-20|1996-06-07|1996-05-19|DELIVER IN PERSON|MAIL|uriously against the express accounts. ex 3617|40706|8219|4|22|36227.40|0.10|0.05|N|O|1996-07-11|1996-05-02|1996-07-25|NONE|REG AIR|uffily even accounts. packages sleep blithe 3617|136286|1313|5|11|14545.08|0.08|0.05|N|O|1996-07-16|1996-04-23|1996-07-28|COLLECT COD|MAIL|ly quickly even requests. final 3618|139641|4668|1|38|63864.32|0.08|0.00|N|O|1997-12-22|1998-02-23|1998-01-03|TAKE BACK RETURN|TRUCK|nts haggle fluffily above the regular 3618|143397|3398|2|48|69138.72|0.04|0.00|N|O|1998-03-12|1998-02-13|1998-03-29|DELIVER IN PERSON|TRUCK|tructions atop the ironi 3618|62032|4539|3|24|23856.72|0.01|0.04|N|O|1998-01-26|1998-01-15|1998-02-17|TAKE BACK RETURN|AIR|xpress acc 3618|160418|419|4|26|38438.66|0.01|0.05|N|O|1998-03-23|1998-01-24|1998-04-15|DELIVER IN PERSON|AIR|iously regular deposits cajole ruthless 3619|95741|5742|1|49|85100.26|0.01|0.08|N|O|1997-01-22|1996-12-21|1997-02-17|TAKE BACK RETURN|MAIL| waters. furiously even deposits 3619|115622|8134|2|27|44215.74|0.08|0.04|N|O|1996-12-12|1997-01-18|1996-12-18|TAKE BACK RETURN|SHIP|pecial accounts haggle care 3619|47022|4535|3|46|44574.92|0.08|0.03|N|O|1997-01-31|1997-01-27|1997-02-11|NONE|SHIP|press, expres 3619|92969|7988|4|18|35315.28|0.04|0.02|N|O|1997-03-18|1996-12-24|1997-03-21|COLLECT COD|AIR|eodolites 3619|119269|6803|5|38|48953.88|0.05|0.08|N|O|1996-12-08|1997-02-03|1997-01-07|NONE|RAIL|theodolites detect abo 3619|151951|1952|6|43|86126.85|0.01|0.01|N|O|1997-01-25|1997-01-06|1997-02-07|COLLECT COD|RAIL| bold, even 3620|58688|1194|1|41|67513.88|0.03|0.08|N|O|1997-03-21|1997-04-20|1997-03-30|COLLECT COD|FOB|t attainments cajole qui 3620|166574|1607|2|16|26249.12|0.00|0.06|N|O|1997-05-17|1997-05-08|1997-06-03|COLLECT COD|SHIP|s. even, pending in 3621|16475|6476|1|29|40352.63|0.02|0.06|A|F|1993-08-03|1993-07-08|1993-08-10|DELIVER IN PERSON|FOB|al requests. fl 3621|92329|4839|2|13|17177.16|0.09|0.04|R|F|1993-08-30|1993-06-30|1993-09-01|NONE|REG AIR|r the unusual packages. brave theodoli 3621|163518|1067|3|45|71167.95|0.07|0.07|R|F|1993-08-09|1993-06-18|1993-09-05|DELIVER IN PERSON|AIR| doubt about the bold deposits. carefully 3621|43242|755|4|20|23704.80|0.05|0.04|R|F|1993-05-27|1993-07-04|1993-06-22|TAKE BACK RETURN|SHIP|gular accounts use carefully with 3622|174754|4755|1|47|85951.25|0.09|0.00|N|O|1996-02-24|1996-02-22|1996-03-12|TAKE BACK RETURN|TRUCK|are careful 3622|88392|901|2|4|5521.56|0.04|0.04|N|O|1996-02-03|1996-02-19|1996-02-16|TAKE BACK RETURN|TRUCK|lithely brave foxes. furi 3622|189480|9481|3|46|72196.08|0.07|0.07|N|O|1995-12-18|1996-01-23|1996-01-12|TAKE BACK RETURN|AIR|sits wake. blithe 3622|176517|6518|4|9|14341.59|0.08|0.05|N|O|1995-12-12|1996-02-09|1995-12-13|TAKE BACK RETURN|SHIP|arefully. furiously regular ideas n 3623|79831|2339|1|32|57946.56|0.05|0.00|N|O|1997-04-18|1997-03-15|1997-05-09|COLLECT COD|SHIP| courts. furiously regular ideas b 3623|116203|1226|2|33|40233.60|0.08|0.01|N|O|1997-03-17|1997-02-13|1997-04-02|TAKE BACK RETURN|TRUCK|odolites. blithely spe 3623|23025|532|3|21|19908.42|0.02|0.02|N|O|1997-01-19|1997-03-18|1997-01-24|NONE|FOB|ress ideas are furio 3623|164747|9780|4|42|76093.08|0.05|0.06|N|O|1997-01-11|1997-03-24|1997-01-21|COLLECT COD|RAIL|g to the slyly regular packa 3623|87761|5286|5|30|52462.80|0.10|0.04|N|O|1997-04-04|1997-03-03|1997-05-01|NONE|RAIL| ironic somas sleep fluffily 3623|185158|195|6|7|8702.05|0.01|0.02|N|O|1997-01-05|1997-03-26|1997-01-26|NONE|TRUCK|aves. slyly special packages cajole. fu 3623|139856|2370|7|13|24646.05|0.03|0.08|N|O|1997-01-02|1997-02-26|1997-01-26|DELIVER IN PERSON|SHIP|deas. furiously expres 3648|143090|3091|1|16|18129.44|0.02|0.06|A|F|1993-08-14|1993-08-14|1993-08-15|COLLECT COD|FOB|s nag packages. 3648|104769|2300|2|30|53212.80|0.00|0.01|R|F|1993-08-31|1993-09-06|1993-09-06|DELIVER IN PERSON|FOB| above the somas boost furious 3648|45851|5852|3|34|61092.90|0.10|0.00|A|F|1993-08-21|1993-07-25|1993-09-15|DELIVER IN PERSON|FOB| deposits are furiously. careful, 3648|12026|7029|4|16|15008.32|0.06|0.03|R|F|1993-07-27|1993-08-26|1993-08-24|DELIVER IN PERSON|FOB|uriously stealthy deposits haggle furi 3648|116962|4496|5|25|49474.00|0.06|0.03|R|F|1993-08-15|1993-08-25|1993-09-09|TAKE BACK RETURN|TRUCK|s requests. silent asymp 3648|168406|8407|6|14|20641.60|0.08|0.06|R|F|1993-10-02|1993-08-26|1993-10-09|COLLECT COD|AIR|sly pending excuses. carefully i 3648|194110|4111|7|49|59001.39|0.09|0.03|R|F|1993-06-27|1993-07-27|1993-07-24|TAKE BACK RETURN|FOB|egular instructions. slyly regular pinto 3649|4730|4731|1|25|40868.25|0.10|0.04|A|F|1994-10-27|1994-08-23|1994-11-05|TAKE BACK RETURN|TRUCK|special re 3649|88968|3985|2|23|45010.08|0.08|0.00|R|F|1994-09-26|1994-10-01|1994-09-28|NONE|REG AIR|rs promise blithe 3649|69238|4251|3|14|16901.22|0.02|0.04|A|F|1994-09-19|1994-08-17|1994-10-12|DELIVER IN PERSON|TRUCK|ithely bold accounts wake 3649|75499|3021|4|40|58979.60|0.00|0.08|R|F|1994-07-20|1994-08-30|1994-08-14|TAKE BACK RETURN|RAIL|luffy somas sleep quickly-- ironic de 3649|99669|9670|5|24|40047.84|0.05|0.03|A|F|1994-07-07|1994-08-20|1994-07-27|TAKE BACK RETURN|FOB|c accounts. quickly final theodo 3649|121063|1064|6|3|3252.18|0.10|0.04|A|F|1994-07-17|1994-08-10|1994-08-03|NONE|FOB|lly bold requests nag; 3650|135535|8049|1|30|47115.90|0.10|0.00|A|F|1992-08-26|1992-07-05|1992-09-01|DELIVER IN PERSON|SHIP|ckly special platelets. furiously sil 3650|127935|7936|2|43|84405.99|0.05|0.05|A|F|1992-09-07|1992-08-12|1992-09-10|COLLECT COD|TRUCK|gside of the quick 3650|1634|9135|3|1|1535.63|0.04|0.06|A|F|1992-06-23|1992-07-18|1992-07-08|NONE|REG AIR|re about the pinto 3650|62057|4564|4|31|31590.55|0.10|0.08|R|F|1992-06-15|1992-07-01|1992-07-15|DELIVER IN PERSON|RAIL| against the ironic accounts cajol 3650|186345|6346|5|19|27195.46|0.05|0.04|R|F|1992-08-29|1992-08-09|1992-09-21|DELIVER IN PERSON|AIR|y even forges. fluffily furious accounts 3650|93525|1053|6|27|41000.04|0.07|0.08|A|F|1992-07-03|1992-07-23|1992-07-13|COLLECT COD|MAIL|ular requests snooze fluffily regular pi 3650|69009|4022|7|43|42054.00|0.10|0.07|A|F|1992-06-25|1992-07-09|1992-07-22|DELIVER IN PERSON|RAIL|structions use caref 3651|18097|5601|1|20|20301.80|0.01|0.04|N|O|1998-06-10|1998-06-06|1998-06-23|NONE|SHIP|tect quickly among the r 3651|154336|1882|2|24|33367.92|0.09|0.04|N|O|1998-06-22|1998-07-17|1998-07-10|DELIVER IN PERSON|RAIL|excuses haggle according to th 3651|112460|7483|3|41|60370.86|0.00|0.05|N|O|1998-05-10|1998-07-09|1998-05-13|NONE|RAIL|blithely. furiously 3651|109126|4147|4|27|30648.24|0.05|0.03|N|O|1998-05-03|1998-06-30|1998-05-05|DELIVER IN PERSON|RAIL| sleep blithely furiously do 3652|179779|7331|1|24|44610.48|0.05|0.03|N|O|1997-06-07|1997-04-07|1997-06-12|COLLECT COD|MAIL|the final p 3652|136489|6490|2|37|56442.76|0.02|0.05|N|O|1997-05-11|1997-04-06|1997-06-05|COLLECT COD|MAIL|osits haggle carefu 3652|162640|189|3|39|66402.96|0.01|0.02|N|O|1997-03-10|1997-04-03|1997-03-21|NONE|REG AIR|y express instructions. un 3652|79671|4686|4|1|1650.67|0.01|0.04|N|O|1997-04-20|1997-05-03|1997-05-18|DELIVER IN PERSON|SHIP| bold dependencies sublate. r 3653|144753|2296|1|38|68314.50|0.08|0.05|A|F|1994-06-26|1994-05-13|1994-07-13|NONE|REG AIR|ainst the 3653|63021|8034|2|29|28536.58|0.07|0.01|A|F|1994-04-11|1994-06-11|1994-04-29|COLLECT COD|RAIL|ording to the special, final 3653|180579|3098|3|17|28212.69|0.09|0.03|R|F|1994-06-24|1994-06-02|1994-07-17|DELIVER IN PERSON|RAIL|gle slyly regular 3653|185220|257|4|9|11746.98|0.10|0.07|R|F|1994-04-03|1994-05-19|1994-04-10|COLLECT COD|FOB|slyly silent account 3653|187794|313|5|41|77153.39|0.08|0.01|A|F|1994-06-18|1994-05-18|1994-06-20|COLLECT COD|RAIL|onic packages affix sly 3653|42462|2463|6|9|12640.14|0.05|0.03|A|F|1994-07-21|1994-05-31|1994-08-17|NONE|MAIL|tes: blithely bo 3653|48442|947|7|2|2780.88|0.06|0.03|R|F|1994-06-02|1994-05-31|1994-06-29|NONE|FOB|n accounts. fina 3654|164675|9708|1|46|80024.82|0.08|0.05|A|F|1992-06-05|1992-08-19|1992-06-06|DELIVER IN PERSON|FOB|usly regular foxes. furio 3654|92688|2689|2|29|48739.72|0.07|0.06|A|F|1992-09-11|1992-07-20|1992-10-04|DELIVER IN PERSON|FOB|odolites detect. quickly r 3654|1676|6677|3|37|58373.79|0.07|0.05|A|F|1992-09-22|1992-07-20|1992-10-19|TAKE BACK RETURN|RAIL|unts doze bravely ab 3654|167246|7247|4|11|14445.64|0.08|0.00|A|F|1992-07-20|1992-07-30|1992-07-23|TAKE BACK RETURN|SHIP|quickly along the express, ironic req 3654|93515|3516|5|34|51289.34|0.04|0.00|R|F|1992-07-26|1992-08-26|1992-08-12|TAKE BACK RETURN|REG AIR| the quick 3654|106597|4128|6|20|32071.80|0.03|0.02|A|F|1992-07-30|1992-07-05|1992-08-05|COLLECT COD|SHIP|s sleep about the slyly 3654|172225|9777|7|45|58374.90|0.01|0.07|A|F|1992-09-15|1992-07-04|1992-09-20|DELIVER IN PERSON|FOB|sly ironic notornis nag slyly 3655|183811|1366|1|5|9474.05|0.03|0.04|R|F|1993-01-17|1992-12-31|1993-01-23|DELIVER IN PERSON|TRUCK|riously bold pinto be 3655|96652|1671|2|1|1648.65|0.10|0.06|R|F|1992-10-24|1992-12-18|1992-11-07|DELIVER IN PERSON|AIR|arefully slow pinto beans are 3655|29968|2471|3|35|66428.60|0.01|0.04|R|F|1992-12-20|1992-11-16|1993-01-15|TAKE BACK RETURN|MAIL|blithely even accounts! furiously regular 3655|71347|1348|4|35|46141.90|0.04|0.07|R|F|1992-10-17|1992-12-23|1992-10-28|COLLECT COD|MAIL|ng foxes cajole fluffily slyly final fo 3680|176451|1486|1|48|73317.60|0.00|0.06|R|F|1993-01-16|1993-01-23|1993-01-19|COLLECT COD|FOB|packages. quickly fluff 3680|4228|6729|2|41|46421.02|0.00|0.04|A|F|1993-01-06|1993-03-02|1993-01-08|NONE|FOB|iously ironic platelets in 3680|55001|7507|3|33|31548.00|0.09|0.08|R|F|1993-03-16|1993-02-19|1993-04-05|NONE|FOB|ts. ironic, fina 3681|105232|7743|1|35|43303.05|0.03|0.08|R|F|1992-07-31|1992-05-18|1992-08-07|COLLECT COD|FOB|lyly special pinto 3682|60637|3144|1|6|9585.78|0.07|0.02|N|O|1997-05-06|1997-04-04|1997-05-11|NONE|AIR|ronic deposits wake slyly. ca 3682|115598|5599|2|18|29044.62|0.06|0.06|N|O|1997-04-30|1997-03-21|1997-05-10|NONE|FOB|regular dependencies 3682|46639|1648|3|17|26955.71|0.03|0.05|N|O|1997-02-12|1997-04-04|1997-02-22|COLLECT COD|FOB|, ironic packages wake a 3682|56495|9001|4|30|43544.70|0.09|0.05|N|O|1997-04-16|1997-04-16|1997-04-29|NONE|MAIL|he requests cajole quickly pending package 3683|100842|3353|1|35|64499.40|0.05|0.03|A|F|1993-05-31|1993-04-17|1993-06-14|NONE|SHIP| the furiously expr 3683|48689|6202|2|41|67144.88|0.01|0.06|A|F|1993-03-26|1993-05-06|1993-04-09|NONE|TRUCK|ress instructions. slyly express a 3683|99314|4333|3|23|30206.13|0.00|0.08|R|F|1993-07-02|1993-05-16|1993-07-30|NONE|TRUCK|xpress accounts sleep slyly re 3684|125106|5107|1|48|54292.80|0.04|0.06|A|F|1993-08-20|1993-09-02|1993-09-10|DELIVER IN PERSON|REG AIR|its boost alongside 3684|45384|5385|2|6|7976.28|0.06|0.08|R|F|1993-08-09|1993-10-05|1993-09-06|DELIVER IN PERSON|FOB|he silent requests. packages sleep fu 3684|162500|49|3|19|29687.50|0.04|0.02|A|F|1993-10-19|1993-08-25|1993-11-02|COLLECT COD|FOB|e slyly carefully pending foxes. d 3684|134811|2351|4|13|23995.53|0.02|0.05|A|F|1993-07-23|1993-09-16|1993-08-06|NONE|TRUCK|ing, unusual pinto beans! thinly p 3685|46199|8704|1|37|42372.03|0.02|0.03|R|F|1992-03-11|1992-04-09|1992-04-05|DELIVER IN PERSON|TRUCK|ress attai 3685|57413|9919|2|7|9592.87|0.05|0.00|R|F|1992-05-16|1992-02-23|1992-05-17|DELIVER IN PERSON|FOB|sits. special asymptotes about the r 3685|133706|3707|3|38|66108.60|0.08|0.03|A|F|1992-05-17|1992-03-16|1992-06-06|TAKE BACK RETURN|TRUCK|thely unusual pack 3685|191626|6665|4|39|66987.18|0.10|0.05|R|F|1992-02-19|1992-04-06|1992-03-02|COLLECT COD|FOB|ic courts nag carefully after the 3685|55782|5783|5|37|64297.86|0.00|0.01|A|F|1992-03-02|1992-04-10|1992-03-04|NONE|FOB|. carefully sly requests are regular, regu 3686|121557|9094|1|7|11049.85|0.02|0.04|N|O|1998-07-15|1998-08-22|1998-07-30|DELIVER IN PERSON|TRUCK| furiously unusual accou 3686|199146|1666|2|38|47315.32|0.06|0.03|N|O|1998-09-04|1998-08-11|1998-09-19|DELIVER IN PERSON|AIR|y silent foxes! carefully ruthless cour 3686|44727|4728|3|31|51823.32|0.10|0.06|N|O|1998-09-09|1998-08-28|1998-10-09|COLLECT COD|MAIL|gle across the courts. furiously regu 3686|116078|8590|4|7|7658.49|0.10|0.01|N|O|1998-07-16|1998-09-02|1998-07-22|NONE|FOB|ake carefully carefully q 3687|144005|1548|1|32|33568.00|0.03|0.06|R|F|1993-05-07|1993-04-05|1993-05-25|DELIVER IN PERSON|AIR|deas cajole fo 3687|80501|3010|2|2|2963.00|0.00|0.08|R|F|1993-02-23|1993-03-25|1993-03-11|NONE|TRUCK| express requests. slyly regular depend 3687|173664|6182|3|10|17376.60|0.01|0.02|A|F|1993-02-11|1993-03-22|1993-03-09|NONE|FOB|ing pinto beans 3687|161643|6676|4|19|32388.16|0.02|0.05|A|F|1993-05-14|1993-04-24|1993-06-01|DELIVER IN PERSON|MAIL|ly final asymptotes according to t 3687|118039|5573|5|31|32767.93|0.07|0.08|A|F|1993-05-28|1993-03-20|1993-06-05|DELIVER IN PERSON|FOB|foxes cajole quickly about the furiously f 3712|140013|5042|1|27|28431.27|0.01|0.05|R|F|1992-02-01|1992-02-26|1992-03-02|TAKE BACK RETURN|SHIP|ctions. even accounts haggle alongside 3712|184915|2470|2|13|25998.83|0.03|0.03|R|F|1992-04-30|1992-02-11|1992-05-30|DELIVER IN PERSON|FOB|s around the furiously ironic account 3712|63923|8936|3|44|83024.48|0.01|0.01|A|F|1992-03-26|1992-02-19|1992-04-18|TAKE BACK RETURN|FOB|ously permanently regular req 3712|147022|4565|4|38|40622.76|0.01|0.06|A|F|1992-01-15|1992-03-24|1992-01-27|COLLECT COD|RAIL|s nag carefully-- even, reg 3713|111310|3822|1|41|54173.71|0.07|0.08|N|O|1998-05-11|1998-07-17|1998-05-22|COLLECT COD|RAIL|eposits wake blithely fina 3713|176914|9432|2|19|37827.29|0.04|0.04|N|O|1998-06-25|1998-07-24|1998-07-08|DELIVER IN PERSON|AIR|tructions serve blithely around the furi 3713|179662|9663|3|19|33091.54|0.03|0.02|N|O|1998-05-19|1998-07-06|1998-06-09|DELIVER IN PERSON|REG AIR|quests cajole careful 3713|168912|8913|4|45|89140.95|0.06|0.04|N|O|1998-06-15|1998-07-30|1998-07-14|DELIVER IN PERSON|MAIL|al pinto beans affix after the slyly 3713|89165|4182|5|46|53091.36|0.10|0.04|N|O|1998-08-22|1998-06-27|1998-08-31|NONE|MAIL|totes. carefully special theodolites s 3713|181492|4011|6|29|45631.21|0.09|0.03|N|O|1998-08-04|1998-06-13|1998-08-21|NONE|RAIL|the regular dugouts wake furiously sil 3713|129891|9892|7|14|26892.46|0.04|0.00|N|O|1998-07-19|1998-07-02|1998-07-28|DELIVER IN PERSON|SHIP|eposits impress according 3714|68433|3446|1|13|18218.59|0.07|0.03|N|O|1998-06-26|1998-06-17|1998-07-07|TAKE BACK RETURN|REG AIR| the furiously final 3714|145675|8190|2|14|24089.38|0.02|0.05|N|O|1998-05-30|1998-06-30|1998-05-31|DELIVER IN PERSON|RAIL|ending ideas. thinly unusual theodo 3714|158877|8878|3|16|30973.92|0.00|0.02|N|O|1998-05-25|1998-07-07|1998-06-17|TAKE BACK RETURN|AIR|ccounts cajole fu 3714|29287|4292|4|44|53516.32|0.04|0.02|N|O|1998-07-18|1998-07-10|1998-07-22|DELIVER IN PERSON|AIR|s. quickly ironic dugouts sublat 3715|96625|4153|1|13|21081.06|0.00|0.03|N|O|1996-05-11|1996-04-25|1996-06-09|TAKE BACK RETURN|SHIP|e quickly ironic 3715|168955|3988|2|16|32383.20|0.01|0.06|N|O|1996-06-28|1996-04-22|1996-06-30|TAKE BACK RETURN|AIR|usly regular pearls haggle final packages 3715|11629|1630|3|37|57002.94|0.05|0.02|N|O|1996-05-03|1996-04-30|1996-05-17|NONE|SHIP|ut the carefully expr 3716|31701|4205|1|10|16327.00|0.09|0.04|N|O|1997-12-02|1997-11-09|1997-12-14|TAKE BACK RETURN|SHIP|ts. quickly sly ideas slee 3716|193081|3082|2|39|45789.12|0.02|0.08|N|O|1997-11-27|1997-10-23|1997-12-24|COLLECT COD|REG AIR|even deposits. 3716|106882|6883|3|42|79332.96|0.02|0.08|N|O|1997-12-03|1997-10-12|1997-12-15|NONE|TRUCK| of the pend 3716|164383|1932|4|19|27500.22|0.05|0.08|N|O|1997-09-25|1997-10-18|1997-10-12|NONE|TRUCK|arefully unusual accounts. flu 3716|181638|4157|5|25|42990.75|0.06|0.05|N|O|1997-11-23|1997-10-24|1997-11-24|COLLECT COD|REG AIR|fully unusual accounts. carefu 3717|152273|7304|1|45|59637.15|0.07|0.04|N|O|1998-08-09|1998-08-18|1998-08-14|TAKE BACK RETURN|TRUCK|ests wake whithout the blithely final pl 3717|52014|9530|2|3|2898.03|0.01|0.07|N|O|1998-06-09|1998-07-31|1998-06-14|NONE|REG AIR|nside the regular packages sleep 3717|195221|5222|3|45|59229.90|0.05|0.08|N|O|1998-09-19|1998-07-22|1998-09-28|DELIVER IN PERSON|MAIL|s the blithely unu 3717|68318|3331|4|5|6431.55|0.06|0.03|N|O|1998-09-02|1998-08-20|1998-09-26|TAKE BACK RETURN|AIR|quickly among 3717|15272|5273|5|7|8310.89|0.09|0.02|N|O|1998-09-08|1998-07-18|1998-09-10|DELIVER IN PERSON|RAIL| after the packa 3717|63587|8600|6|38|58922.04|0.01|0.07|N|O|1998-07-10|1998-07-08|1998-07-29|COLLECT COD|RAIL|ly about the car 3717|105267|5268|7|28|35623.28|0.03|0.01|N|O|1998-07-25|1998-08-12|1998-08-16|COLLECT COD|RAIL|ts sleep q 3718|20775|5780|1|40|67830.80|0.01|0.04|N|O|1996-11-20|1996-12-17|1996-12-03|DELIVER IN PERSON|MAIL|out the express deposits 3718|162663|212|2|16|27610.56|0.02|0.06|N|O|1996-11-11|1996-12-25|1996-11-12|COLLECT COD|TRUCK|slyly even accounts. blithely special acco 3718|69553|7072|3|8|12180.40|0.05|0.03|N|O|1996-12-06|1996-12-06|1996-12-15|TAKE BACK RETURN|AIR| the even deposits sleep carefully b 3719|21430|8937|1|35|47300.05|0.06|0.08|N|O|1997-06-11|1997-04-03|1997-06-15|TAKE BACK RETURN|TRUCK|ly foxes. pending braids haggle furio 3719|173998|6516|2|2|4143.98|0.02|0.08|N|O|1997-02-17|1997-04-25|1997-03-03|NONE|REG AIR|ccounts boost carefu 3719|181502|6539|3|12|19002.00|0.05|0.06|N|O|1997-06-10|1997-05-04|1997-07-09|TAKE BACK RETURN|REG AIR|grate according to the 3719|89266|6791|4|13|16318.38|0.02|0.00|N|O|1997-05-03|1997-04-16|1997-05-27|TAKE BACK RETURN|SHIP|iously. regular dep 3719|77643|151|5|19|30792.16|0.06|0.08|N|O|1997-05-22|1997-03-20|1997-06-12|COLLECT COD|TRUCK|he regular ideas integrate acros 3719|141336|6365|6|43|59225.19|0.03|0.08|N|O|1997-05-08|1997-04-15|1997-06-06|COLLECT COD|RAIL|the furiously special pinto bean 3719|18433|8434|7|16|21622.88|0.10|0.01|N|O|1997-03-02|1997-03-18|1997-03-28|TAKE BACK RETURN|RAIL| express asymptotes. ir 3744|194756|9795|1|30|55522.50|0.05|0.06|A|F|1992-05-07|1992-02-12|1992-05-17|TAKE BACK RETURN|FOB|nts among 3745|136294|1321|1|18|23945.22|0.01|0.05|A|F|1993-10-17|1993-11-16|1993-11-13|DELIVER IN PERSON|SHIP| slyly bold pinto beans according to 3746|164012|4013|1|37|39812.37|0.07|0.00|A|F|1994-12-29|1994-10-25|1995-01-03|COLLECT COD|FOB|e of the careful 3746|143595|8624|2|28|45880.52|0.06|0.08|R|F|1994-09-20|1994-10-21|1994-09-27|DELIVER IN PERSON|FOB|s after the even, special requests 3746|187888|7889|3|3|5927.64|0.10|0.01|R|F|1994-11-03|1994-12-10|1994-11-12|NONE|MAIL| the silent ideas cajole carefully 3746|27486|2491|4|11|15548.28|0.00|0.05|R|F|1994-10-02|1994-11-19|1994-10-10|COLLECT COD|SHIP| ironic theodolites are among th 3747|140559|8102|1|42|67181.10|0.05|0.05|N|O|1996-11-10|1996-10-19|1996-11-19|TAKE BACK RETURN|REG AIR|y. blithely fina 3747|169914|9915|2|33|65469.03|0.01|0.03|N|O|1996-10-14|1996-11-12|1996-11-11|NONE|REG AIR| regular p 3747|138552|8553|3|30|47716.50|0.00|0.07|N|O|1996-12-16|1996-11-15|1996-12-17|NONE|RAIL|! furiously f 3747|32970|480|4|21|39962.37|0.00|0.06|N|O|1996-11-18|1996-09-23|1996-11-26|TAKE BACK RETURN|AIR|ithely bold orbits mold furiously blit 3747|125802|827|5|32|58489.60|0.08|0.05|N|O|1996-09-10|1996-11-04|1996-10-10|DELIVER IN PERSON|MAIL|quests shall h 3747|153991|3992|6|14|28629.86|0.08|0.07|N|O|1996-11-03|1996-10-29|1996-11-06|TAKE BACK RETURN|AIR|packages cajole carefu 3747|117993|505|7|23|46252.77|0.00|0.04|N|O|1996-11-08|1996-11-10|1996-12-03|NONE|REG AIR|kages are ironic 3748|103539|6050|1|12|18510.36|0.06|0.01|N|O|1998-04-17|1998-04-15|1998-05-12|NONE|AIR|old reques 3748|164717|7234|2|24|42761.04|0.08|0.04|N|O|1998-06-07|1998-05-02|1998-06-21|DELIVER IN PERSON|TRUCK|al deposits. blithely 3748|196085|3643|3|19|22440.52|0.05|0.01|N|O|1998-04-23|1998-05-17|1998-05-23|COLLECT COD|RAIL|pinto beans run carefully quic 3748|186693|1730|4|5|8898.45|0.00|0.07|N|O|1998-06-29|1998-05-06|1998-07-12|DELIVER IN PERSON|MAIL| regular accounts sleep quickly-- furious 3748|146699|9214|5|21|36659.49|0.07|0.08|N|O|1998-03-30|1998-04-07|1998-04-05|TAKE BACK RETURN|MAIL|fix carefully furiously express ideas. furi 3749|172888|5406|1|11|21569.68|0.07|0.05|N|O|1995-06-25|1995-05-23|1995-07-10|TAKE BACK RETURN|RAIL|egular requests along the 3749|128102|3127|2|9|10170.90|0.08|0.05|A|F|1995-04-23|1995-04-18|1995-04-26|NONE|REG AIR|uses cajole blithely pla 3749|198854|3893|3|31|60538.35|0.00|0.05|N|F|1995-06-11|1995-05-20|1995-06-27|COLLECT COD|REG AIR|s. foxes sleep slyly unusual grouc 3749|130302|5329|4|7|9326.10|0.07|0.06|A|F|1995-03-31|1995-04-05|1995-04-11|NONE|TRUCK|he slyly ironic packages 3749|182309|4828|5|14|19478.20|0.02|0.00|N|F|1995-06-11|1995-05-19|1995-07-11|DELIVER IN PERSON|SHIP|press instruc 3749|53716|1232|6|10|16697.10|0.10|0.03|N|O|1995-06-24|1995-05-24|1995-07-18|COLLECT COD|SHIP|essly. regular pi 3750|133299|5813|1|37|49294.73|0.04|0.03|N|O|1995-07-08|1995-07-28|1995-07-28|DELIVER IN PERSON|REG AIR|usly busy account 3750|151799|1800|2|33|61076.07|0.05|0.03|N|O|1995-06-27|1995-06-20|1995-07-03|TAKE BACK RETURN|REG AIR|theodolites haggle. slyly pendin 3750|79370|1878|3|20|26987.40|0.09|0.05|N|F|1995-06-17|1995-06-06|1995-06-28|TAKE BACK RETURN|REG AIR|ss, ironic requests! fur 3750|165817|3366|4|33|62132.73|0.04|0.03|N|F|1995-06-15|1995-06-04|1995-06-29|COLLECT COD|RAIL|ep blithely according to the flu 3750|82651|5160|5|1|1633.65|0.05|0.01|N|O|1995-07-24|1995-06-25|1995-08-21|DELIVER IN PERSON|REG AIR|l dolphins against the slyly 3750|112383|4895|6|47|65582.86|0.01|0.08|R|F|1995-05-11|1995-06-13|1995-06-02|TAKE BACK RETURN|FOB|slowly regular accounts. blithely ev 3751|171747|4265|1|37|67293.38|0.00|0.04|R|F|1994-04-30|1994-05-30|1994-05-30|NONE|REG AIR|ly express courts 3751|140684|3199|2|32|55189.76|0.03|0.05|R|F|1994-05-05|1994-07-02|1994-06-02|COLLECT COD|MAIL|rthogs could have to slee 3751|64236|9249|3|45|54010.35|0.08|0.06|R|F|1994-05-27|1994-06-19|1994-06-14|NONE|RAIL|according to 3751|13891|1395|4|39|70390.71|0.07|0.01|A|F|1994-08-16|1994-07-11|1994-09-12|COLLECT COD|TRUCK|refully according to the iro 3751|57100|2111|5|12|12685.20|0.02|0.03|A|F|1994-08-09|1994-06-30|1994-08-12|TAKE BACK RETURN|TRUCK|accounts wake furious 3751|75535|550|6|39|58910.67|0.02|0.08|R|F|1994-08-01|1994-06-01|1994-08-26|COLLECT COD|SHIP|to beans. pending, express packages c 3776|2128|9629|1|39|40174.68|0.05|0.01|R|F|1993-01-03|1993-02-05|1993-01-08|COLLECT COD|FOB|yly blithely pending packages 3776|158157|3188|2|14|17012.10|0.06|0.08|R|F|1992-12-30|1993-02-12|1993-01-27|DELIVER IN PERSON|RAIL|y special ideas. express packages pr 3776|140995|3510|3|49|99763.51|0.01|0.08|R|F|1992-12-03|1993-02-16|1992-12-28|TAKE BACK RETURN|RAIL|equests. final, thin grouches 3776|91553|9081|4|49|75682.95|0.08|0.05|A|F|1993-02-11|1993-01-06|1993-02-27|COLLECT COD|MAIL|es: careful warthogs haggle fluffi 3777|99495|7023|1|11|16439.39|0.02|0.03|A|F|1994-04-09|1994-06-05|1994-04-14|NONE|FOB|ld ideas. even theodolites 3777|7436|4937|2|10|13434.30|0.03|0.01|R|F|1994-05-22|1994-05-29|1994-06-13|COLLECT COD|RAIL|le. ironic depths a 3777|165409|5410|3|18|26539.20|0.10|0.06|R|F|1994-05-04|1994-05-23|1994-05-22|COLLECT COD|REG AIR|eful packages use slyly: even deposits 3777|17130|7131|4|35|36649.55|0.10|0.04|A|F|1994-05-25|1994-05-26|1994-06-13|COLLECT COD|AIR|s. carefully express asymptotes accordi 3777|97412|9922|5|14|19731.74|0.04|0.05|R|F|1994-05-06|1994-06-24|1994-05-31|NONE|TRUCK|ording to the iro 3778|56669|1680|1|21|34138.86|0.01|0.06|R|F|1993-05-27|1993-07-10|1993-06-03|COLLECT COD|REG AIR|ts. blithely special theodoli 3778|28589|8590|2|32|48562.56|0.09|0.00|A|F|1993-06-22|1993-08-18|1993-07-03|TAKE BACK RETURN|MAIL|tes affix carefully above the 3778|93436|5946|3|41|58606.63|0.05|0.00|R|F|1993-06-21|1993-07-27|1993-07-15|COLLECT COD|FOB|e the furiously ironi 3778|168841|6390|4|28|53475.52|0.03|0.05|R|F|1993-08-18|1993-07-10|1993-09-06|TAKE BACK RETURN|REG AIR|y silent orbits print carefully against 3778|97021|4549|5|28|28504.56|0.01|0.06|R|F|1993-09-02|1993-08-08|1993-10-02|DELIVER IN PERSON|FOB|r deposits. theodol 3778|19451|4454|6|26|35631.70|0.00|0.01|A|F|1993-09-24|1993-07-06|1993-10-22|NONE|TRUCK| against the fluffily 3778|104793|4794|7|49|88091.71|0.02|0.04|A|F|1993-06-13|1993-08-08|1993-07-04|DELIVER IN PERSON|MAIL|ans. furiously 3779|45738|3251|1|28|47144.44|0.04|0.05|N|O|1997-05-06|1997-04-01|1997-05-18|TAKE BACK RETURN|AIR|s. close requests sleep 3779|109004|1515|2|5|5065.00|0.07|0.03|N|O|1997-01-07|1997-03-26|1997-02-05|DELIVER IN PERSON|AIR|heodolites. slyly regular a 3780|126173|6174|1|25|29979.25|0.08|0.04|N|O|1996-06-27|1996-07-02|1996-07-22|NONE|AIR|l, unusual 3780|189496|7051|2|40|63419.60|0.10|0.04|N|O|1996-06-06|1996-05-29|1996-07-01|COLLECT COD|SHIP|gular deposits-- furiously regular 3781|13185|3186|1|48|52712.64|0.02|0.06|N|O|1996-08-22|1996-08-13|1996-09-15|NONE|REG AIR|equests may cajole careful 3781|187496|7497|2|39|61756.11|0.10|0.00|N|O|1996-08-20|1996-08-16|1996-09-01|DELIVER IN PERSON|REG AIR|unts are carefully. ir 3781|29895|9896|3|17|31023.13|0.01|0.03|N|O|1996-06-23|1996-09-04|1996-07-19|TAKE BACK RETURN|REG AIR|. theodolite 3781|30810|5817|4|15|26112.15|0.05|0.00|N|O|1996-08-23|1996-08-08|1996-09-06|TAKE BACK RETURN|AIR| carefully blithe 3781|15001|2505|5|23|21068.00|0.09|0.08|N|O|1996-09-05|1996-08-18|1996-09-27|DELIVER IN PERSON|SHIP|pendencies are b 3782|26844|4351|1|29|51354.36|0.01|0.07|N|O|1996-09-17|1996-10-03|1996-10-07|DELIVER IN PERSON|REG AIR|quickly unusual pinto beans. carefully fina 3782|152897|5413|2|10|19498.90|0.03|0.05|N|O|1996-09-07|1996-11-19|1996-10-04|COLLECT COD|FOB|ven pinto b 3782|135201|5202|3|30|37086.00|0.06|0.06|N|O|1996-12-19|1996-10-31|1997-01-14|TAKE BACK RETURN|MAIL|slyly even pinto beans hag 3782|116305|3839|4|34|44924.20|0.02|0.06|N|O|1996-11-07|1996-10-22|1996-11-19|DELIVER IN PERSON|MAIL|gage after the even 3782|129239|6776|5|40|50729.20|0.09|0.04|N|O|1996-12-16|1996-11-22|1997-01-01|COLLECT COD|AIR|s instructions. regular accou 3783|166578|1611|1|36|59204.52|0.04|0.08|R|F|1993-12-17|1994-02-26|1994-01-03|DELIVER IN PERSON|SHIP|ites haggle among the carefully unusu 3783|72856|5364|2|36|65838.60|0.02|0.02|R|F|1994-03-02|1994-02-09|1994-03-15|COLLECT COD|TRUCK|egular accounts 3783|84967|7476|3|50|97598.00|0.04|0.01|R|F|1994-03-14|1994-01-09|1994-04-10|DELIVER IN PERSON|FOB|he furiously regular deposits. 3783|26279|1284|4|37|44594.99|0.10|0.05|R|F|1993-12-09|1994-02-17|1993-12-30|COLLECT COD|REG AIR|ing to the ideas. regular accounts de 3808|42716|5221|1|28|46443.88|0.02|0.01|R|F|1994-05-27|1994-06-18|1994-06-22|TAKE BACK RETURN|FOB|lly final accounts alo 3808|126319|1344|2|47|63229.57|0.04|0.08|R|F|1994-06-12|1994-06-03|1994-07-02|COLLECT COD|TRUCK|fully for the quickly final deposits: flu 3808|30291|292|3|45|54958.05|0.00|0.03|R|F|1994-07-03|1994-05-29|1994-07-14|TAKE BACK RETURN|REG AIR| carefully special 3808|99468|9469|4|34|49893.64|0.07|0.04|R|F|1994-08-13|1994-07-22|1994-08-31|DELIVER IN PERSON|FOB| pearls will have to 3808|154755|2301|5|29|52482.75|0.08|0.03|A|F|1994-06-22|1994-05-26|1994-07-06|TAKE BACK RETURN|TRUCK| deposits across the pac 3808|167127|2160|6|44|52541.28|0.06|0.06|A|F|1994-06-07|1994-06-04|1994-06-25|NONE|REG AIR|the blithely regular foxes. even, final 3809|190322|2842|1|17|24009.44|0.10|0.04|N|O|1996-08-14|1996-07-05|1996-09-04|DELIVER IN PERSON|FOB|es detect furiously sil 3809|132963|2964|2|32|63870.72|0.01|0.02|N|O|1996-07-03|1996-06-01|1996-07-25|COLLECT COD|SHIP|xcuses would boost against the fluffily eve 3809|104789|4790|3|46|82513.88|0.10|0.06|N|O|1996-08-20|1996-06-01|1996-08-24|TAKE BACK RETURN|TRUCK|l asymptotes. special 3809|177249|7250|4|43|57028.32|0.00|0.04|N|O|1996-05-06|1996-06-22|1996-06-05|TAKE BACK RETURN|TRUCK|yly ironic decoys; regular, iron 3810|183458|1013|1|49|75531.05|0.05|0.01|R|F|1992-11-27|1992-10-30|1992-12-16|COLLECT COD|AIR|cajole. fur 3810|168724|1241|2|18|32268.96|0.01|0.04|A|F|1992-11-28|1992-11-15|1992-12-27|DELIVER IN PERSON|SHIP|s. furiously careful deposi 3810|136788|9302|3|41|74815.98|0.08|0.08|A|F|1992-10-26|1992-10-27|1992-11-05|COLLECT COD|SHIP|l requests boost slyly along the slyl 3810|181036|3555|4|11|12287.33|0.06|0.04|A|F|1992-12-18|1992-12-11|1993-01-15|DELIVER IN PERSON|MAIL| the pending pinto beans. expr 3811|163383|5900|1|24|34713.12|0.04|0.02|N|O|1998-07-13|1998-05-16|1998-08-12|TAKE BACK RETURN|TRUCK|deposits. slyly regular accounts cajo 3811|165958|8475|2|2|4047.90|0.01|0.08|N|O|1998-06-16|1998-06-16|1998-06-23|NONE|MAIL|slyly fluff 3811|42469|7478|3|19|26817.74|0.02|0.06|N|O|1998-07-20|1998-06-14|1998-07-29|NONE|MAIL|s boost blithely furiou 3811|170416|2934|4|50|74320.50|0.08|0.03|N|O|1998-07-28|1998-07-06|1998-08-16|COLLECT COD|FOB|ts are slyly fluffy ideas. furiou 3811|181104|3623|5|23|27257.30|0.00|0.04|N|O|1998-08-13|1998-07-09|1998-08-29|COLLECT COD|AIR|nstructions sleep quickly. slyly final 3811|1635|6636|6|35|53782.05|0.04|0.07|N|O|1998-04-17|1998-06-30|1998-04-25|NONE|REG AIR|yly final dolphins? quickly ironic frets 3812|144641|2184|1|33|55626.12|0.00|0.05|N|O|1996-10-10|1996-10-05|1996-10-15|TAKE BACK RETURN|MAIL|posits engage. ironic, regular p 3812|172434|7469|2|33|49712.19|0.06|0.03|N|O|1996-10-05|1996-10-13|1996-10-22|TAKE BACK RETURN|MAIL|inal excuses d 3813|175506|5507|1|37|58515.50|0.05|0.04|N|O|1998-10-13|1998-09-19|1998-10-28|NONE|REG AIR|ravely special packages haggle p 3813|122720|7745|2|39|67966.08|0.05|0.00|N|O|1998-08-30|1998-08-12|1998-09-29|COLLECT COD|FOB|y ideas. final ideas about the sp 3814|130445|7985|1|7|10328.08|0.02|0.02|R|F|1995-05-01|1995-05-09|1995-05-28|DELIVER IN PERSON|REG AIR|es sleep furiou 3814|172629|5147|2|14|23822.68|0.01|0.00|R|F|1995-03-17|1995-05-10|1995-04-16|DELIVER IN PERSON|AIR|sits along the final, ironic deposit 3814|167125|9642|3|36|42916.32|0.06|0.02|N|O|1995-06-19|1995-04-18|1995-06-28|COLLECT COD|SHIP|beans cajole quickly sl 3814|65004|5005|4|20|19380.00|0.04|0.07|R|F|1995-02-23|1995-03-26|1995-03-04|DELIVER IN PERSON|SHIP|. doggedly ironic deposits will have to wa 3814|106211|1232|5|15|18258.15|0.03|0.04|N|O|1995-06-23|1995-03-25|1995-07-09|COLLECT COD|SHIP| carefully final deposits haggle slyly 3814|82129|7146|6|47|52222.64|0.09|0.05|A|F|1995-04-16|1995-04-03|1995-05-14|DELIVER IN PERSON|AIR|nusual requests. bli 3814|131307|3821|7|12|16059.60|0.10|0.01|R|F|1995-03-18|1995-04-16|1995-03-20|TAKE BACK RETURN|REG AIR|ages cajole. packages haggle. final 3815|76048|8556|1|3|3072.12|0.07|0.00|N|O|1997-11-16|1997-11-15|1997-11-30|NONE|FOB|egular, express ideas. ironic, final dep 3815|129840|2353|2|11|20568.24|0.02|0.04|N|O|1997-11-01|1997-11-05|1997-11-27|COLLECT COD|TRUCK|sleep blithe 3840|186402|8921|1|45|66978.00|0.02|0.08|N|O|1998-10-31|1998-09-19|1998-11-30|DELIVER IN PERSON|TRUCK|o beans are. carefully final courts x 3840|45522|531|2|12|17610.24|0.04|0.07|N|O|1998-10-02|1998-08-19|1998-10-20|TAKE BACK RETURN|RAIL|xpress pinto beans. accounts a 3840|72943|2944|3|45|86217.30|0.02|0.05|N|O|1998-10-12|1998-10-12|1998-10-28|TAKE BACK RETURN|FOB|onic, even packages are. pe 3840|147247|7248|4|41|53063.84|0.07|0.02|N|O|1998-07-21|1998-10-08|1998-08-01|TAKE BACK RETURN|MAIL| nag slyly? slyly pending accounts 3840|172923|5441|5|7|13971.44|0.09|0.08|N|O|1998-09-17|1998-09-20|1998-10-14|DELIVER IN PERSON|MAIL|. furiously final gifts sleep carefully pin 3840|106112|6113|6|33|36897.63|0.10|0.02|N|O|1998-07-29|1998-10-06|1998-08-04|DELIVER IN PERSON|SHIP|hely silent deposits w 3841|156889|9405|1|1|1945.88|0.06|0.03|A|F|1994-10-10|1994-11-12|1994-10-21|DELIVER IN PERSON|AIR| boost even re 3841|20882|5887|2|31|55889.28|0.09|0.03|A|F|1995-01-24|1994-11-25|1995-02-20|TAKE BACK RETURN|SHIP|n theodolites shall promise carefully. qui 3841|151821|4337|3|40|74912.80|0.06|0.02|A|F|1995-02-02|1994-11-30|1995-02-14|TAKE BACK RETURN|MAIL|its. quickly regular ideas nag carefully 3841|49764|9765|4|9|15423.84|0.10|0.07|A|F|1994-11-21|1994-12-26|1994-11-26|NONE|FOB|s according to the courts shall nag s 3841|175794|5795|5|3|5609.37|0.04|0.02|R|F|1994-10-24|1994-12-07|1994-11-09|COLLECT COD|FOB|foxes integrate 3841|162372|9921|6|48|68849.76|0.03|0.00|R|F|1994-11-23|1994-11-22|1994-12-01|DELIVER IN PERSON|FOB| according to the regular, 3842|161893|9442|1|28|54736.92|0.05|0.07|A|F|1992-06-17|1992-06-03|1992-06-24|DELIVER IN PERSON|TRUCK|s excuses thrash carefully. 3842|121783|6808|2|21|37900.38|0.07|0.05|R|F|1992-07-15|1992-06-02|1992-07-21|NONE|RAIL|r pinto be 3842|193009|8048|3|28|30856.00|0.00|0.00|A|F|1992-06-20|1992-05-22|1992-07-13|DELIVER IN PERSON|MAIL|lly alongside of the 3842|87373|4898|4|15|20405.55|0.07|0.01|A|F|1992-06-26|1992-06-23|1992-07-09|COLLECT COD|MAIL|ave packages are slyl 3842|67312|4831|5|13|16631.03|0.09|0.02|R|F|1992-04-13|1992-06-22|1992-05-11|COLLECT COD|RAIL|t blithely. busily regular accounts alon 3842|106971|4502|6|24|47471.28|0.08|0.08|R|F|1992-08-05|1992-06-29|1992-08-16|TAKE BACK RETURN|MAIL|phins are quickly 3843|14732|4733|1|7|11527.11|0.10|0.03|N|O|1997-02-13|1997-02-21|1997-02-20|TAKE BACK RETURN|SHIP|slyly even instructions. furiously eve 3843|484|2985|2|30|41534.40|0.01|0.05|N|O|1997-02-14|1997-03-25|1997-03-13|DELIVER IN PERSON|AIR| wake. slyly even packages boost 3844|134522|7036|1|2|3113.04|0.03|0.07|R|F|1995-02-24|1995-02-03|1995-03-18|TAKE BACK RETURN|AIR|es haggle final acco 3844|101965|6986|2|5|9834.80|0.10|0.03|R|F|1995-04-29|1995-02-24|1995-05-05|TAKE BACK RETURN|RAIL| unwind quickly about the pending, i 3845|33809|3810|1|44|76683.20|0.01|0.08|A|F|1992-07-20|1992-07-15|1992-07-24|DELIVER IN PERSON|REG AIR|s haggle among the fluffily regula 3845|23875|1382|2|16|28781.92|0.09|0.05|A|F|1992-08-08|1992-06-08|1992-08-26|DELIVER IN PERSON|SHIP|ely bold ideas use. ex 3845|58967|6483|3|17|32741.32|0.08|0.01|A|F|1992-06-12|1992-07-05|1992-06-26|TAKE BACK RETURN|RAIL|counts haggle. reg 3845|45398|407|4|1|1343.39|0.04|0.05|R|F|1992-05-21|1992-06-07|1992-06-17|COLLECT COD|REG AIR| blithely ironic t 3845|195390|5391|5|27|40105.53|0.00|0.05|R|F|1992-08-20|1992-07-17|1992-09-02|COLLECT COD|REG AIR|kages. care 3845|104773|7284|6|30|53333.10|0.09|0.06|R|F|1992-08-21|1992-07-07|1992-08-25|COLLECT COD|FOB|counts do wake blithely. ironic requests 3846|60152|2659|1|15|16682.25|0.06|0.03|N|O|1998-02-17|1998-04-27|1998-02-21|NONE|REG AIR|uternes. carefully even 3846|170299|300|2|30|41078.70|0.08|0.07|N|O|1998-05-01|1998-03-12|1998-05-20|TAKE BACK RETURN|FOB|deposits according to the fur 3846|14707|2211|3|49|79463.30|0.08|0.07|N|O|1998-02-14|1998-03-22|1998-02-17|DELIVER IN PERSON|RAIL|efully even packages against the blithe 3846|164058|1607|4|33|37027.65|0.05|0.00|N|O|1998-05-12|1998-03-14|1998-05-14|DELIVER IN PERSON|TRUCK|s instructions are. fu 3847|188290|8291|1|7|9648.03|0.08|0.00|A|F|1993-05-06|1993-06-06|1993-05-22|COLLECT COD|MAIL| about the blithely daring Tiresias. fl 3872|180331|5368|1|28|39517.24|0.10|0.04|N|O|1996-11-05|1996-11-10|1996-11-24|DELIVER IN PERSON|REG AIR|t after the carefully ironic excuses. f 3872|16635|1638|2|38|58961.94|0.04|0.05|N|O|1996-10-18|1996-12-03|1996-11-15|TAKE BACK RETURN|AIR|iously against the ironic, unusual a 3872|168467|6016|3|18|27638.28|0.07|0.07|N|O|1996-12-25|1996-10-24|1997-01-08|TAKE BACK RETURN|SHIP|s. regular, brave accounts sleep blith 3872|10643|644|4|41|63699.24|0.07|0.03|N|O|1996-11-23|1996-11-12|1996-12-03|COLLECT COD|REG AIR|ly regular epitaphs boost 3872|69266|4279|5|42|51880.92|0.03|0.00|N|O|1997-01-03|1996-10-12|1997-01-16|COLLECT COD|MAIL|s the furio 3872|139604|7144|6|40|65744.00|0.07|0.05|N|O|1997-01-02|1996-10-29|1997-01-14|NONE|REG AIR|nts? regularly ironic ex 3873|67790|5309|1|19|33398.01|0.04|0.04|N|O|1998-05-15|1998-05-10|1998-05-17|NONE|FOB|y final ac 3873|144960|9989|2|44|88218.24|0.05|0.05|N|O|1998-07-23|1998-05-22|1998-08-14|COLLECT COD|AIR|yly even platelets wake. 3873|139267|1781|3|29|37881.54|0.01|0.04|N|O|1998-06-22|1998-05-20|1998-07-05|COLLECT COD|REG AIR|olphins af 3874|169193|4226|1|21|26505.99|0.09|0.08|R|F|1993-06-19|1993-07-20|1993-07-08|DELIVER IN PERSON|SHIP| requests cajole fluff 3874|18904|3907|2|48|87499.20|0.06|0.07|R|F|1993-06-13|1993-07-20|1993-06-20|NONE|RAIL| ideas throughout 3875|80973|8498|1|24|46895.28|0.02|0.08|N|O|1997-10-15|1997-11-27|1997-11-09|COLLECT COD|AIR|ecial packages. 3875|112716|5228|2|49|84706.79|0.04|0.04|N|O|1997-10-18|1997-10-13|1997-10-19|NONE|MAIL|sleep furiously about the deposits. quickl 3876|140105|2620|1|12|13741.20|0.06|0.07|N|O|1996-09-16|1996-10-23|1996-10-05|TAKE BACK RETURN|REG AIR|y above the pending tithes. blithely ironi 3876|139212|1726|2|37|46294.77|0.00|0.03|N|O|1996-11-30|1996-10-18|1996-12-18|DELIVER IN PERSON|AIR|t dependencies. blithely final packages u 3876|126650|6651|3|41|68742.65|0.02|0.04|N|O|1996-10-15|1996-10-17|1996-10-19|NONE|AIR| quickly blit 3877|49774|2279|1|12|20685.24|0.06|0.01|R|F|1993-05-30|1993-08-09|1993-06-24|TAKE BACK RETURN|FOB|nal requests. even requests are. pac 3877|144635|2178|2|47|78942.61|0.05|0.00|A|F|1993-08-01|1993-08-16|1993-08-04|NONE|FOB|furiously quick requests nag along the theo 3877|79690|7212|3|44|73466.36|0.09|0.00|A|F|1993-06-07|1993-07-15|1993-07-06|DELIVER IN PERSON|REG AIR|elets. quickly regular accounts caj 3877|147844|7845|4|36|68106.24|0.06|0.01|A|F|1993-07-27|1993-07-13|1993-08-11|DELIVER IN PERSON|AIR|lithely about the dogged ideas. ac 3877|4649|4650|5|41|63699.24|0.03|0.07|A|F|1993-06-30|1993-07-20|1993-07-01|DELIVER IN PERSON|FOB|integrate against the expres 3877|122502|2503|6|7|10671.50|0.04|0.08|R|F|1993-06-14|1993-07-09|1993-06-28|NONE|TRUCK|lar dolphins cajole silently 3878|199696|9697|1|6|10774.14|0.07|0.04|N|O|1997-06-21|1997-05-22|1997-07-01|COLLECT COD|FOB|s. regular instru 3878|87395|7396|2|13|17971.07|0.01|0.06|N|O|1997-06-08|1997-06-03|1997-06-25|TAKE BACK RETURN|TRUCK|leep ruthlessly about the carefu 3878|40052|2557|3|20|19841.00|0.08|0.03|N|O|1997-06-20|1997-05-24|1997-07-20|TAKE BACK RETURN|MAIL|the furiously careful ideas cajole slyly sl 3878|151976|1977|4|20|40559.40|0.01|0.07|N|O|1997-07-13|1997-05-22|1997-07-20|NONE|FOB|about the carefully ironic pa 3879|125689|714|1|45|77160.60|0.10|0.08|N|O|1996-03-18|1996-01-03|1996-04-03|COLLECT COD|RAIL|ly according to the expr 3879|44020|1533|2|35|33740.70|0.00|0.07|N|O|1995-12-08|1996-01-23|1995-12-28|TAKE BACK RETURN|MAIL|o beans. accounts cajole furiously. re 3904|37424|4934|1|22|29951.24|0.04|0.03|N|O|1998-02-02|1998-02-09|1998-02-10|TAKE BACK RETURN|REG AIR|structions cajole carefully. carefully f 3904|183753|1308|2|19|34898.25|0.09|0.01|N|O|1998-02-10|1998-02-13|1998-02-20|TAKE BACK RETURN|AIR| excuses sleep slyly according to th 3905|100739|8270|1|43|74808.39|0.07|0.08|A|F|1994-03-30|1994-02-18|1994-04-09|DELIVER IN PERSON|REG AIR|uses are care 3905|115915|8427|2|7|13516.37|0.03|0.00|R|F|1994-03-01|1994-02-19|1994-03-11|DELIVER IN PERSON|AIR|ully furiously furious packag 3905|169943|4976|3|6|12077.64|0.07|0.02|R|F|1994-04-07|1994-03-07|1994-04-21|DELIVER IN PERSON|RAIL|ow furiously. deposits wake ironic 3906|152987|5503|1|42|85679.16|0.00|0.04|R|F|1992-09-03|1992-07-22|1992-09-04|COLLECT COD|RAIL|jole blithely after the furiously regular 3906|39340|4347|2|50|63967.00|0.01|0.07|R|F|1992-09-24|1992-08-24|1992-09-29|NONE|MAIL|ke slyly. stealt 3906|179655|4690|3|15|26019.75|0.06|0.02|R|F|1992-07-30|1992-08-26|1992-08-02|TAKE BACK RETURN|FOB|dependencies at the 3906|58264|8265|4|36|44001.36|0.08|0.08|A|F|1992-08-07|1992-08-08|1992-08-24|NONE|SHIP|y. ironic deposits haggle sl 3907|111784|4296|1|41|73626.98|0.06|0.02|A|F|1992-09-13|1992-10-23|1992-09-29|COLLECT COD|MAIL|ackages wake along the carefully regul 3907|144491|2034|2|41|62955.09|0.03|0.00|A|F|1992-10-25|1992-10-17|1992-11-01|TAKE BACK RETURN|RAIL|s above the unusual ideas sleep furiousl 3907|51501|9017|3|45|65362.50|0.02|0.07|R|F|1992-09-21|1992-09-19|1992-10-18|COLLECT COD|REG AIR| about the regular pac 3907|175693|728|4|48|84897.12|0.05|0.07|A|F|1992-09-24|1992-10-16|1992-10-06|DELIVER IN PERSON|TRUCK|nt asymptotes lose across th 3907|61958|1959|5|22|42238.90|0.09|0.01|R|F|1992-09-20|1992-10-30|1992-09-29|TAKE BACK RETURN|TRUCK|ly. furiously unusual deposits use afte 3907|125062|2599|6|34|36960.04|0.02|0.02|R|F|1992-09-06|1992-10-08|1992-09-12|COLLECT COD|FOB| requests according to the slyly pending 3907|109234|4255|7|8|9945.84|0.10|0.01|A|F|1992-09-18|1992-10-29|1992-09-27|NONE|REG AIR|furiously final packages. 3908|91840|4350|1|50|91592.00|0.05|0.04|R|F|1993-06-19|1993-04-27|1993-07-05|DELIVER IN PERSON|MAIL| even accounts wake 3908|147084|7085|2|8|9048.64|0.06|0.03|A|F|1993-03-12|1993-04-13|1993-03-22|DELIVER IN PERSON|SHIP|r instructions was requests. ironically 3909|177914|5466|1|30|59757.30|0.03|0.07|N|O|1998-10-17|1998-10-14|1998-10-28|COLLECT COD|TRUCK|ly even deposits across the ironic notorni 3909|190896|5935|2|46|91396.94|0.03|0.01|N|O|1998-10-08|1998-10-15|1998-10-24|NONE|FOB|the blithely unusual ideas 3910|138602|3629|1|10|16406.00|0.00|0.08|N|O|1996-10-18|1996-10-31|1996-11-14|DELIVER IN PERSON|FOB|tions boost furiously unusual e 3910|70782|5797|2|31|54336.18|0.05|0.03|N|O|1996-12-22|1996-11-14|1997-01-01|TAKE BACK RETURN|SHIP|ess instructions. 3910|19716|4719|3|6|9814.26|0.04|0.04|N|O|1996-12-08|1996-10-30|1996-12-31|DELIVER IN PERSON|MAIL|ly sly platelets are fluffily slyly si 3910|152611|5127|4|1|1663.61|0.03|0.06|N|O|1996-09-12|1996-10-21|1996-09-19|DELIVER IN PERSON|FOB|s sleep neve 3911|112842|5354|1|10|18548.40|0.07|0.06|N|O|1995-06-22|1995-05-30|1995-06-28|COLLECT COD|FOB|ss theodolites are blithely along t 3911|118861|6395|2|14|26318.04|0.08|0.05|R|F|1995-04-28|1995-05-03|1995-05-22|NONE|RAIL|e blithely brave depo 3911|91085|6104|3|12|12912.96|0.10|0.05|R|F|1995-04-04|1995-04-16|1995-04-10|COLLECT COD|FOB|uctions. blithely regula 3936|136372|6373|1|25|35209.25|0.06|0.03|N|O|1996-12-03|1996-12-27|1997-01-01|DELIVER IN PERSON|RAIL|gular requests nag quic 3936|187641|160|2|24|41487.36|0.10|0.07|N|O|1996-11-22|1997-01-01|1996-12-08|NONE|AIR|ns. accounts mold fl 3936|82065|7082|3|42|43976.52|0.00|0.07|N|O|1997-01-03|1997-01-29|1997-01-14|COLLECT COD|AIR|elets wake amo 3936|61051|8570|4|12|12144.60|0.06|0.05|N|O|1996-11-25|1997-01-09|1996-12-06|DELIVER IN PERSON|SHIP|ithely across the carefully brave req 3936|83519|3520|5|35|52587.85|0.02|0.08|N|O|1996-12-04|1997-01-06|1996-12-22|NONE|SHIP|lly ironic requ 3936|102029|4540|6|26|26806.52|0.01|0.02|N|O|1997-02-27|1997-01-16|1997-03-22|NONE|RAIL|quickly pen 3937|69439|4452|1|48|67604.64|0.10|0.02|N|O|1998-03-15|1998-02-22|1998-03-30|DELIVER IN PERSON|FOB|gainst the thinl 3937|47321|2330|2|30|38049.60|0.01|0.07|N|O|1998-01-17|1998-01-03|1998-02-08|COLLECT COD|TRUCK|al packages slee 3937|114474|2008|3|27|40188.69|0.03|0.00|N|O|1998-02-06|1998-01-12|1998-02-20|NONE|MAIL|ven ideas. slyly expr 3937|153056|5572|4|50|55452.50|0.01|0.02|N|O|1998-01-15|1998-01-09|1998-02-04|DELIVER IN PERSON|AIR|ong the carefully exp 3937|2904|405|5|29|52400.10|0.03|0.07|N|O|1998-03-06|1998-02-22|1998-03-14|NONE|TRUCK|nt pinto beans above the pending instr 3937|192002|7041|6|6|6564.00|0.00|0.00|N|O|1998-01-24|1998-02-13|1998-01-27|DELIVER IN PERSON|FOB|into beans. slyly silent orbits alongside o 3937|163846|1395|7|1|1909.84|0.02|0.05|N|O|1998-03-29|1998-01-08|1998-04-27|TAKE BACK RETURN|TRUCK|refully agains 3938|158367|3398|1|46|65566.56|0.10|0.07|R|F|1993-05-20|1993-05-04|1993-06-12|DELIVER IN PERSON|FOB|ly even foxes are slyly fu 3939|159523|2039|1|8|12660.16|0.03|0.06|N|O|1996-01-29|1996-04-05|1996-02-26|COLLECT COD|REG AIR|e packages. express, pen 3940|177038|2073|1|33|36795.99|0.10|0.07|N|O|1996-05-19|1996-04-19|1996-05-23|TAKE BACK RETURN|RAIL|ly ironic packages about the pending accou 3940|68501|6020|2|40|58780.00|0.08|0.02|N|O|1996-02-29|1996-03-22|1996-03-04|NONE|MAIL|ts. regular fox 3940|88876|8877|3|8|14918.96|0.07|0.08|N|O|1996-04-04|1996-04-12|1996-04-18|DELIVER IN PERSON|RAIL|ions cajole furiously regular pinto beans. 3940|136206|8720|4|11|13664.20|0.09|0.05|N|O|1996-03-09|1996-05-13|1996-03-17|COLLECT COD|REG AIR|e of the special packages. furiously 3940|942|5943|5|41|75560.54|0.00|0.07|N|O|1996-05-08|1996-05-03|1996-06-03|COLLECT COD|MAIL|thily. deposits cajole. 3941|40836|837|1|47|83511.01|0.05|0.07|N|O|1996-11-24|1996-10-09|1996-12-22|DELIVER IN PERSON|RAIL| carefully pending 3941|122533|70|2|19|29555.07|0.05|0.00|N|O|1996-11-10|1996-10-26|1996-12-05|COLLECT COD|RAIL|eposits haggle furiously even 3941|9693|2194|3|2|3205.38|0.01|0.03|N|O|1996-12-04|1996-10-01|1996-12-25|NONE|REG AIR|es wake after the 3941|109348|6879|4|29|39362.86|0.00|0.03|N|O|1996-09-14|1996-10-04|1996-09-19|NONE|MAIL|g the blithely 3942|182281|7318|1|6|8179.68|0.05|0.05|A|F|1993-07-01|1993-09-14|1993-07-23|DELIVER IN PERSON|SHIP|ep ruthlessly carefully final accounts: s 3942|193715|8754|2|5|9043.55|0.06|0.02|R|F|1993-09-27|1993-09-24|1993-10-07|DELIVER IN PERSON|MAIL|. fluffily pending deposits above the flu 3942|155497|8013|3|25|38812.25|0.04|0.06|R|F|1993-09-13|1993-08-01|1993-09-29|COLLECT COD|RAIL|d the quick packages 3943|197698|5256|1|15|26935.35|0.03|0.01|N|O|1997-01-13|1996-12-17|1997-02-02|COLLECT COD|REG AIR| grow fluffily according to the 3943|95557|5558|2|9|13972.95|0.00|0.06|N|O|1996-11-27|1997-01-03|1996-12-17|COLLECT COD|RAIL|refully ironic 3943|16818|1821|3|32|55513.92|0.00|0.02|N|O|1996-10-22|1996-12-17|1996-11-04|TAKE BACK RETURN|TRUCK| unusual ideas into the furiously even pack 3943|49034|9035|4|5|4915.15|0.04|0.04|N|O|1997-01-09|1996-11-10|1997-02-06|COLLECT COD|RAIL|arefully regular deposits accord 3968|53646|6152|1|27|43190.28|0.04|0.05|N|O|1997-04-25|1997-04-17|1997-05-11|TAKE BACK RETURN|MAIL|t silently. 3968|25166|2673|2|45|49102.20|0.00|0.07|N|O|1997-06-18|1997-04-24|1997-06-25|DELIVER IN PERSON|FOB|ully slyly fi 3968|155118|5119|3|43|50443.73|0.07|0.06|N|O|1997-04-30|1997-05-14|1997-05-18|TAKE BACK RETURN|SHIP|ly regular accounts 3968|60159|5172|4|7|7834.05|0.07|0.02|N|O|1997-03-30|1997-05-01|1997-04-12|DELIVER IN PERSON|SHIP|efully bold instructions. express 3969|51062|8578|1|39|39509.34|0.04|0.04|N|O|1997-06-12|1997-06-13|1997-07-05|NONE|MAIL|ly bold ideas s 3969|196745|4303|2|26|47885.24|0.05|0.03|N|O|1997-07-08|1997-07-30|1997-07-10|TAKE BACK RETURN|AIR|fluffily; braids detect. 3969|78703|3718|3|46|77358.20|0.04|0.02|N|O|1997-05-29|1997-06-15|1997-06-10|TAKE BACK RETURN|SHIP|fully final requests sleep stealthily. care 3969|150168|2684|4|21|25581.36|0.07|0.04|N|O|1997-08-31|1997-07-16|1997-09-02|TAKE BACK RETURN|MAIL|unts doze quickly final reque 3969|71513|1514|5|40|59380.40|0.09|0.00|N|O|1997-05-19|1997-08-02|1997-06-05|COLLECT COD|TRUCK|lar requests cajole furiously blithely regu 3969|104963|7474|6|4|7871.84|0.02|0.01|N|O|1997-06-04|1997-07-31|1997-06-13|COLLECT COD|REG AIR|dencies wake blithely? quickly even theodo 3970|87268|7269|1|2|2510.52|0.01|0.07|R|F|1992-04-24|1992-06-03|1992-05-16|TAKE BACK RETURN|RAIL|carefully pending foxes wake blithely 3970|108084|5615|2|18|19657.44|0.03|0.08|A|F|1992-06-06|1992-06-18|1992-07-05|DELIVER IN PERSON|TRUCK| maintain slyly. ir 3970|153569|1115|3|10|16225.60|0.10|0.04|A|F|1992-07-01|1992-05-31|1992-07-02|NONE|AIR| special packages wake after the final br 3970|21600|9107|4|34|51734.40|0.05|0.00|A|F|1992-06-25|1992-05-23|1992-07-12|COLLECT COD|SHIP|y final gifts are. carefully pe 3970|29048|6555|5|23|22471.92|0.05|0.04|A|F|1992-06-04|1992-06-14|1992-06-13|COLLECT COD|TRUCK| above the final braids. regular 3970|8771|6272|6|46|77269.42|0.07|0.04|R|F|1992-04-29|1992-05-14|1992-05-24|NONE|FOB|yly ironic 3970|4076|6577|7|46|45083.22|0.08|0.08|R|F|1992-05-02|1992-05-12|1992-05-10|COLLECT COD|MAIL|ix slyly. quickly silen 3971|95389|7899|1|47|65065.86|0.06|0.04|N|O|1996-07-07|1996-08-08|1996-08-01|TAKE BACK RETURN|RAIL|e slyly final dependencies x-ray 3971|190223|7781|2|2|2626.44|0.04|0.03|N|O|1996-07-15|1996-08-12|1996-07-26|NONE|SHIP|haggle abou 3972|50440|7956|1|2|2780.88|0.05|0.03|A|F|1994-07-24|1994-06-30|1994-08-13|TAKE BACK RETURN|SHIP|y final theodolite 3973|29590|4595|1|21|31911.39|0.02|0.06|R|F|1992-06-18|1992-06-03|1992-07-02|COLLECT COD|REG AIR|equests. furiously 3973|114205|9228|2|37|45110.40|0.07|0.00|A|F|1992-05-29|1992-05-04|1992-06-23|TAKE BACK RETURN|SHIP|inos wake fluffily. pending requests nag 3973|39243|1747|3|40|47289.60|0.08|0.05|R|F|1992-05-03|1992-06-09|1992-05-21|COLLECT COD|RAIL|g the carefully blithe f 3974|21091|6096|1|47|47568.23|0.10|0.03|N|O|1996-06-03|1996-05-08|1996-06-28|NONE|TRUCK|dencies above the re 3974|60454|5467|2|17|24045.65|0.05|0.07|N|O|1996-04-05|1996-05-21|1996-04-28|COLLECT COD|TRUCK|ions eat slyly after the blithely 3975|56042|3558|1|38|37925.52|0.01|0.05|N|O|1995-08-02|1995-06-18|1995-08-19|COLLECT COD|TRUCK|es are furiously: furi 4000|195697|5698|1|41|73500.29|0.06|0.01|A|F|1992-03-02|1992-03-14|1992-03-27|COLLECT COD|FOB|ve the even, fi 4000|74526|7034|2|44|66022.88|0.09|0.06|A|F|1992-03-27|1992-02-18|1992-03-31|COLLECT COD|AIR|equests use blithely blithely bold d 4001|105185|206|1|26|30944.68|0.00|0.01|N|O|1997-07-26|1997-06-18|1997-08-08|DELIVER IN PERSON|RAIL|tegrate blithely 4001|40918|8431|2|19|35319.29|0.03|0.02|N|O|1997-08-23|1997-06-15|1997-09-18|COLLECT COD|SHIP|ackages. carefully ironi 4001|93081|3082|3|18|19333.44|0.07|0.00|N|O|1997-06-04|1997-06-22|1997-06-13|DELIVER IN PERSON|MAIL|lithely ironic d 4001|1923|9424|4|39|71171.88|0.00|0.00|N|O|1997-06-13|1997-06-17|1997-06-25|NONE|SHIP| dogged excuses. blithe 4002|110728|3240|1|35|60855.20|0.01|0.08|N|O|1997-05-16|1997-06-15|1997-06-02|DELIVER IN PERSON|TRUCK|eep. quickly 4002|197837|7838|2|20|38696.60|0.00|0.03|N|O|1997-06-15|1997-05-20|1997-07-11|NONE|MAIL|lly even ins 4002|39841|4848|3|6|10685.04|0.08|0.07|N|O|1997-05-02|1997-07-07|1997-05-16|TAKE BACK RETURN|RAIL| furiously furiously special theodoli 4002|198650|6208|4|6|10491.90|0.06|0.06|N|O|1997-07-01|1997-05-15|1997-07-31|NONE|MAIL|he slyly iro 4002|98186|696|5|4|4736.72|0.08|0.07|N|O|1997-05-06|1997-06-15|1997-05-24|NONE|REG AIR|ccording to the careful 4003|51112|8628|1|18|19135.98|0.04|0.07|R|F|1993-02-02|1993-04-15|1993-02-28|TAKE BACK RETURN|AIR|ar grouches s 4004|120061|62|1|23|24864.38|0.07|0.02|A|F|1993-08-12|1993-07-13|1993-08-16|TAKE BACK RETURN|TRUCK| bold theodolites? special packages accordi 4004|63505|3506|2|47|69019.50|0.07|0.04|R|F|1993-06-25|1993-08-03|1993-07-12|NONE|SHIP|thely instead of the even, unu 4004|113136|3137|3|39|44816.07|0.10|0.05|R|F|1993-07-12|1993-07-27|1993-07-18|NONE|MAIL|ccounts sleep furious 4004|73634|6142|4|46|73950.98|0.10|0.04|R|F|1993-09-04|1993-07-13|1993-09-28|COLLECT COD|FOB|ncies. slyly pending dolphins sleep furio 4004|154767|7283|5|9|16395.84|0.04|0.06|A|F|1993-08-25|1993-06-10|1993-09-24|COLLECT COD|MAIL|ly ironic requests. quickly pending ide 4004|160037|2554|6|44|48269.32|0.07|0.05|R|F|1993-07-25|1993-07-23|1993-08-16|TAKE BACK RETURN|REG AIR|ut the sauternes. bold, ironi 4004|125377|2914|7|20|28047.40|0.07|0.05|A|F|1993-06-19|1993-06-14|1993-07-04|COLLECT COD|REG AIR|. ironic deposits cajole blithely? 4005|3667|1168|1|26|40837.16|0.09|0.05|N|O|1996-12-01|1997-02-03|1996-12-15|NONE|REG AIR| to the quic 4005|16605|6606|2|28|42604.80|0.02|0.06|N|O|1996-12-11|1997-01-24|1996-12-17|DELIVER IN PERSON|REG AIR|ly carefully ironic deposits. slyly 4005|71317|8839|3|28|36072.68|0.03|0.01|N|O|1996-12-08|1997-01-14|1996-12-30|TAKE BACK RETURN|MAIL|y pending dependenc 4005|14650|7152|4|49|76667.85|0.09|0.00|N|O|1997-01-31|1996-12-24|1997-03-02|NONE|RAIL|tions sleep across the silent d 4005|5747|5748|5|14|23138.36|0.09|0.08|N|O|1996-11-27|1997-01-09|1996-12-25|NONE|TRUCK|ld requests. slyly final instructi 4006|54773|2289|1|11|19005.47|0.05|0.08|A|F|1995-04-29|1995-02-21|1995-05-20|TAKE BACK RETURN|RAIL|ress foxes cajole quick 4006|158031|3062|2|18|19602.54|0.05|0.03|A|F|1995-01-29|1995-03-08|1995-02-02|TAKE BACK RETURN|MAIL|gouts! slyly iron 4006|23176|3177|3|15|16487.55|0.01|0.02|R|F|1995-02-23|1995-04-02|1995-02-25|TAKE BACK RETURN|RAIL|n deposits cajole slyl 4006|113466|3467|4|25|36986.50|0.00|0.07|A|F|1995-02-23|1995-02-09|1995-02-24|DELIVER IN PERSON|SHIP| requests use depos 4007|56091|1102|1|32|33506.88|0.00|0.03|R|F|1993-09-30|1993-08-16|1993-10-03|DELIVER IN PERSON|RAIL|nal accounts across t 4007|115470|7982|2|41|60904.27|0.04|0.06|A|F|1993-10-11|1993-08-30|1993-11-04|DELIVER IN PERSON|TRUCK|eposits. regular epitaphs boost blithely. 4007|101478|9009|3|5|7397.35|0.09|0.06|A|F|1993-09-17|1993-08-29|1993-10-12|TAKE BACK RETURN|FOB|y unusual packa 4007|137992|5532|4|15|30449.85|0.05|0.02|A|F|1993-09-01|1993-07-19|1993-09-03|DELIVER IN PERSON|FOB|le furiously quickly 4007|25894|5895|5|23|41857.47|0.02|0.07|A|F|1993-10-08|1993-09-09|1993-10-23|COLLECT COD|MAIL|ter the accounts. expr 4032|101625|1626|1|8|13012.96|0.06|0.00|N|O|1998-06-04|1998-05-17|1998-07-03|TAKE BACK RETURN|RAIL|ometimes even cou 4032|1652|9153|2|27|41948.55|0.09|0.00|N|O|1998-05-31|1998-04-19|1998-06-24|COLLECT COD|REG AIR|le furiously according to 4032|153608|6124|3|23|38216.80|0.09|0.06|N|O|1998-06-12|1998-05-11|1998-06-24|COLLECT COD|MAIL|ording to the 4032|84644|7153|4|10|16286.40|0.09|0.05|N|O|1998-03-31|1998-04-22|1998-04-07|NONE|REG AIR| carefully bol 4033|109529|9530|1|27|41540.04|0.01|0.04|R|F|1993-08-08|1993-08-14|1993-08-09|NONE|AIR|pinto beans 4033|37321|9825|2|34|42782.88|0.07|0.00|R|F|1993-07-19|1993-08-05|1993-07-26|NONE|RAIL|t the blithely dogg 4034|189949|4986|1|48|97869.12|0.03|0.03|A|F|1994-03-01|1994-01-16|1994-03-16|NONE|RAIL| blithely regular requests play carefull 4034|56730|9236|2|47|79276.31|0.07|0.05|A|F|1994-01-27|1993-12-26|1994-02-04|NONE|TRUCK|eodolites was slyly ironic ideas. de 4034|53884|3885|3|43|79028.84|0.10|0.03|A|F|1993-11-29|1994-01-08|1993-12-10|DELIVER IN PERSON|FOB|posits wake carefully af 4034|27679|7680|4|46|73906.82|0.06|0.00|A|F|1994-02-22|1994-01-09|1994-03-04|DELIVER IN PERSON|AIR|uests. furiously unusual instructions wake 4034|195196|2754|5|7|9038.33|0.07|0.06|R|F|1994-03-04|1994-01-22|1994-04-01|NONE|AIR|y even theodolites. slyly regular instru 4034|49283|6796|6|5|6161.40|0.01|0.06|A|F|1994-02-12|1994-01-24|1994-02-13|COLLECT COD|AIR|fully around the furiously ironic re 4035|96790|6791|1|4|7147.16|0.08|0.03|R|F|1992-04-21|1992-04-23|1992-04-25|COLLECT COD|AIR|ilent, even pear 4035|135094|5095|2|4|4516.36|0.07|0.00|A|F|1992-05-21|1992-04-24|1992-05-24|DELIVER IN PERSON|FOB|en instructions sleep blith 4035|117010|4544|3|1|1027.01|0.03|0.01|R|F|1992-06-18|1992-05-19|1992-07-02|COLLECT COD|FOB| requests. quickly 4035|181284|8839|4|13|17748.64|0.00|0.01|R|F|1992-06-10|1992-05-16|1992-07-10|NONE|SHIP|s. furiously even courts wake slyly 4036|5585|586|1|46|68566.68|0.09|0.00|N|O|1997-06-21|1997-05-29|1997-07-18|NONE|REG AIR|usly across the even th 4036|52428|4934|2|21|28988.82|0.09|0.07|N|O|1997-08-08|1997-06-28|1997-08-09|COLLECT COD|MAIL|e carefully. qui 4036|141687|1688|3|6|10372.08|0.07|0.01|N|O|1997-06-19|1997-06-16|1997-07-01|DELIVER IN PERSON|SHIP|equests wake about the bold id 4036|126016|3553|4|20|20840.20|0.08|0.02|N|O|1997-08-11|1997-07-11|1997-09-03|NONE|TRUCK|slyly bold deposits cajole pending, blithe 4037|63703|1222|1|32|53334.40|0.00|0.06|A|F|1993-05-06|1993-06-08|1993-05-31|DELIVER IN PERSON|AIR|e of the pending, iron 4037|46766|6767|2|4|6851.04|0.09|0.07|A|F|1993-07-05|1993-06-12|1993-08-03|DELIVER IN PERSON|RAIL|s around the blithely ironic ac 4038|195610|3168|1|40|68224.40|0.05|0.01|N|O|1996-01-15|1996-03-13|1996-01-25|COLLECT COD|TRUCK|t. slyly silent pinto beans amo 4038|11283|6286|2|37|44188.36|0.04|0.03|N|O|1996-03-17|1996-03-19|1996-04-07|DELIVER IN PERSON|REG AIR| packages 4038|31841|1842|3|24|42548.16|0.10|0.04|N|O|1996-04-06|1996-02-15|1996-04-18|TAKE BACK RETURN|RAIL|the furiously regu 4038|149264|9265|4|29|38084.54|0.07|0.06|N|O|1996-01-07|1996-03-08|1996-01-13|NONE|FOB|ffix. quietly ironic packages a 4038|78767|6289|5|24|41898.24|0.07|0.06|N|O|1996-04-01|1996-04-05|1996-04-28|DELIVER IN PERSON|TRUCK|ake quickly after the final, ironic ac 4038|35867|8371|6|6|10817.16|0.07|0.05|N|O|1996-02-09|1996-03-05|1996-03-10|COLLECT COD|SHIP| special instructions. packa 4039|93195|3196|1|38|45151.22|0.03|0.06|N|O|1998-03-09|1997-12-31|1998-03-21|DELIVER IN PERSON|REG AIR|sual asymptotes. ironic deposits nag aft 4039|121085|8622|2|17|18803.36|0.10|0.04|N|O|1998-01-15|1998-01-20|1998-01-28|TAKE BACK RETURN|MAIL| regular foxes haggle carefully bo 4039|63297|8310|3|9|11342.61|0.10|0.01|N|O|1998-03-08|1998-02-05|1998-04-05|TAKE BACK RETURN|FOB|t? pinto beans cajole across the thinly r 4039|27080|9583|4|43|43304.44|0.01|0.02|N|O|1998-01-02|1997-12-22|1998-01-15|NONE|FOB|beans believe bene 4039|133273|3274|5|43|56169.61|0.09|0.00|N|O|1998-01-20|1998-01-11|1998-02-05|COLLECT COD|SHIP|sts along the regular in 4064|198313|833|1|3|4233.93|0.10|0.04|N|O|1997-01-04|1997-01-01|1997-01-23|NONE|SHIP|its! quickly sp 4064|39472|1976|2|15|21172.05|0.02|0.02|N|O|1996-11-09|1996-12-04|1996-11-18|DELIVER IN PERSON|MAIL|braids affix across the regular sheave 4064|196654|1693|3|32|56020.80|0.04|0.07|N|O|1997-01-14|1997-01-01|1997-01-21|COLLECT COD|REG AIR|es boost. careful 4064|162125|9674|4|24|28490.88|0.02|0.02|N|O|1997-01-01|1996-12-31|1997-01-23|DELIVER IN PERSON|SHIP|ly regular ideas. 4064|20658|659|5|12|18943.80|0.08|0.08|N|O|1997-02-08|1996-12-18|1997-03-06|TAKE BACK RETURN|RAIL|ding to the requests 4064|183388|943|6|46|67683.48|0.03|0.00|N|O|1996-10-13|1997-01-05|1996-11-06|DELIVER IN PERSON|REG AIR|alongside of the f 4064|199699|2219|7|9|16188.21|0.01|0.06|N|O|1996-12-17|1996-12-13|1997-01-12|NONE|AIR|furiously f 4065|137373|7374|1|14|19745.18|0.04|0.02|A|F|1994-08-22|1994-07-29|1994-09-19|DELIVER IN PERSON|TRUCK|e furiously outside 4065|14060|4061|2|46|44806.76|0.03|0.05|A|F|1994-06-29|1994-08-01|1994-07-03|TAKE BACK RETURN|SHIP|, regular requests may mold above the 4065|96443|1462|3|33|47501.52|0.00|0.03|A|F|1994-09-03|1994-08-16|1994-09-13|DELIVER IN PERSON|AIR|ain blithely 4065|106463|1484|4|8|11755.68|0.00|0.01|R|F|1994-10-04|1994-08-05|1994-10-25|TAKE BACK RETURN|SHIP|ages haggle carefully 4065|122670|2671|5|29|49087.43|0.02|0.07|A|F|1994-06-29|1994-08-19|1994-07-17|NONE|RAIL|equests. packages sleep slyl 4065|109059|4080|6|16|17088.80|0.05|0.00|R|F|1994-08-25|1994-08-06|1994-09-09|COLLECT COD|TRUCK|ncies use furiously. quickly un 4065|143199|8228|7|11|13664.09|0.10|0.04|A|F|1994-07-25|1994-08-02|1994-07-30|NONE|RAIL|hang silently about 4066|138329|5869|1|9|12305.88|0.01|0.05|N|O|1997-05-06|1997-03-25|1997-05-27|COLLECT COD|FOB|nal, ironic accounts. blithel 4066|92550|5060|2|19|29308.45|0.05|0.00|N|O|1997-05-13|1997-04-17|1997-06-08|NONE|TRUCK|quests. slyly regu 4066|75789|804|3|8|14118.24|0.03|0.03|N|O|1997-04-24|1997-03-11|1997-05-20|NONE|REG AIR|accounts. special pinto beans 4066|178139|657|4|49|59639.37|0.01|0.01|N|O|1997-02-17|1997-03-24|1997-02-19|NONE|TRUCK|ial braids. furiously final deposits sl 4066|170234|235|5|43|56081.89|0.05|0.02|N|O|1997-02-16|1997-04-14|1997-02-18|DELIVER IN PERSON|MAIL|r instructions. slyly special 4066|108491|1002|6|44|65977.56|0.01|0.00|N|O|1997-03-01|1997-04-27|1997-03-29|DELIVER IN PERSON|MAIL|express accounts nag bli 4067|179063|9064|1|18|20557.08|0.03|0.08|A|F|1993-01-24|1992-12-23|1993-02-20|TAKE BACK RETURN|FOB|e the slyly final packages d 4067|95338|2866|2|14|18666.62|0.00|0.00|R|F|1993-02-03|1992-12-02|1993-02-07|TAKE BACK RETURN|TRUCK|ructions. quickly ironic accounts detect 4067|140221|7764|3|17|21440.74|0.03|0.05|A|F|1993-01-26|1992-11-23|1993-01-27|NONE|REG AIR|ts haggle slyly unusual, final 4067|89682|2191|4|40|66867.20|0.07|0.08|R|F|1993-01-09|1992-11-21|1993-01-16|DELIVER IN PERSON|TRUCK|lar theodolites nag blithely above the 4067|84828|9845|5|17|30817.94|0.08|0.03|A|F|1993-01-20|1992-12-29|1993-02-03|DELIVER IN PERSON|REG AIR|r accounts. slyly special pa 4067|95501|8011|6|12|17958.00|0.04|0.03|A|F|1992-12-12|1992-11-28|1992-12-15|DELIVER IN PERSON|AIR|lly slyly even theodol 4067|82675|5184|7|17|28180.39|0.01|0.01|R|F|1992-12-12|1992-12-23|1992-12-30|NONE|AIR|ts affix. regular, regular requests s 4068|109101|9102|1|43|47734.30|0.05|0.06|N|O|1996-11-28|1996-11-16|1996-12-22|NONE|AIR|ructions. regular, special packag 4068|56989|9495|2|31|60325.38|0.08|0.03|N|O|1996-12-11|1996-12-07|1996-12-30|NONE|SHIP|ds wake carefully amon 4069|128033|5570|1|39|41380.17|0.09|0.02|R|F|1992-09-06|1992-07-22|1992-09-25|COLLECT COD|SHIP|ven theodolites nag quickly. fluffi 4069|42553|2554|2|32|47857.60|0.10|0.08|A|F|1992-06-18|1992-07-20|1992-07-07|TAKE BACK RETURN|TRUCK|unts. deposit 4069|185023|7542|3|3|3324.06|0.06|0.01|R|F|1992-07-26|1992-07-07|1992-08-04|COLLECT COD|FOB|l packages. even, 4069|78465|3480|4|22|31756.12|0.10|0.05|A|F|1992-08-05|1992-08-04|1992-08-25|COLLECT COD|SHIP|ts. slyly special instruction 4069|156895|9411|5|50|97594.50|0.09|0.06|A|F|1992-07-26|1992-06-30|1992-08-01|TAKE BACK RETURN|REG AIR|even foxes among the express wate 4069|124947|2484|6|3|5915.82|0.02|0.01|A|F|1992-05-24|1992-06-18|1992-06-12|COLLECT COD|MAIL|y final deposits wake furiously! slyl 4069|183483|6002|7|50|78324.00|0.00|0.01|R|F|1992-09-03|1992-06-14|1992-10-01|NONE|REG AIR|ages. carefully regular 4070|182612|7649|1|2|3389.22|0.09|0.08|N|O|1995-08-03|1995-09-10|1995-08-17|TAKE BACK RETURN|REG AIR|ptotes affix 4070|154868|7384|2|40|76914.40|0.07|0.07|N|O|1995-07-13|1995-07-23|1995-08-06|COLLECT COD|MAIL|about the sentiments. quick 4070|61774|1775|3|11|19093.47|0.00|0.08|N|O|1995-08-23|1995-08-15|1995-08-31|TAKE BACK RETURN|MAIL| carefully final pack 4070|28495|998|4|46|65480.54|0.02|0.02|N|O|1995-06-22|1995-07-14|1995-07-11|DELIVER IN PERSON|REG AIR|nticing ideas. boldly 4071|111872|9406|1|22|41445.14|0.02|0.07|N|O|1996-10-31|1996-12-14|1996-11-05|NONE|REG AIR|sits cajole carefully final instructio 4071|17736|5240|2|47|77725.31|0.00|0.03|N|O|1996-11-04|1996-12-09|1996-11-16|NONE|TRUCK|ts cajole furiously along the 4096|26850|4357|1|31|55082.35|0.10|0.02|A|F|1992-07-14|1992-09-03|1992-07-31|COLLECT COD|TRUCK|y final, even platelets. boldly 4096|56025|3541|2|17|16677.34|0.07|0.03|R|F|1992-09-30|1992-08-11|1992-10-11|TAKE BACK RETURN|REG AIR|platelets alongside of the 4096|8635|8636|3|21|32416.23|0.08|0.00|A|F|1992-08-24|1992-09-04|1992-09-11|DELIVER IN PERSON|MAIL|tes mold flu 4096|127636|149|4|20|33272.60|0.02|0.07|R|F|1992-08-24|1992-09-13|1992-08-28|DELIVER IN PERSON|TRUCK|sual requests. furiously bold packages wake 4097|73486|3487|1|50|72974.00|0.04|0.04|N|O|1996-08-31|1996-08-14|1996-09-27|DELIVER IN PERSON|MAIL|egular deposits. blithely pending 4097|73230|5738|2|46|55348.58|0.10|0.01|N|O|1996-07-29|1996-08-19|1996-08-25|COLLECT COD|AIR| even depend 4097|173706|1258|3|42|74747.40|0.06|0.06|N|O|1996-08-11|1996-07-30|1996-08-15|NONE|FOB|carefully silent foxes are against the 4098|199417|9418|1|46|69754.86|0.07|0.03|N|O|1997-01-26|1997-01-27|1997-02-13|TAKE BACK RETURN|SHIP|e slyly blithely silent deposits. fluff 4099|3007|5508|1|29|26390.00|0.09|0.07|R|F|1992-11-21|1992-11-04|1992-11-30|NONE|FOB| slowly final warthogs sleep blithely. q 4099|136994|4534|2|3|6092.97|0.04|0.06|A|F|1992-09-12|1992-10-18|1992-10-01|NONE|RAIL|. special packages sleep 4099|50012|7528|3|36|34632.36|0.06|0.06|R|F|1992-11-06|1992-09-28|1992-12-02|NONE|FOB|beans cajole slyly quickly ironic 4099|138940|6480|4|7|13852.58|0.05|0.02|A|F|1992-09-12|1992-11-13|1992-09-14|TAKE BACK RETURN|AIR|onic foxes. quickly final fox 4099|162936|7969|5|48|95948.64|0.00|0.02|R|F|1992-10-18|1992-10-14|1992-11-01|NONE|REG AIR|ts haggle according to the slyly f 4099|58106|8107|6|39|41499.90|0.07|0.02|R|F|1992-12-13|1992-11-13|1992-12-26|DELIVER IN PERSON|REG AIR|fluffy accounts impress pending, iro 4099|179228|6780|7|46|60132.12|0.06|0.07|R|F|1992-10-29|1992-11-03|1992-11-10|DELIVER IN PERSON|REG AIR|ages nag requests. 4100|73624|3625|1|4|6390.48|0.03|0.03|N|O|1996-06-20|1996-04-29|1996-06-21|TAKE BACK RETURN|FOB|lyly regular, bold requ 4101|114376|9399|1|22|30588.14|0.05|0.02|R|F|1994-02-02|1994-02-19|1994-02-12|COLLECT COD|AIR|ly express instructions. careful 4102|9762|2263|1|17|28419.92|0.02|0.02|N|O|1996-06-03|1996-05-06|1996-07-02|COLLECT COD|AIR|ly silent theodolites sleep unusual exc 4102|68535|1042|2|5|7517.65|0.08|0.02|N|O|1996-05-11|1996-05-11|1996-05-16|COLLECT COD|AIR| the furiously even 4102|66779|1792|3|39|68085.03|0.08|0.01|N|O|1996-04-14|1996-05-18|1996-04-20|DELIVER IN PERSON|AIR|ffix blithely slyly special 4102|139097|1611|4|39|44307.51|0.02|0.00|N|O|1996-06-15|1996-06-06|1996-06-30|DELIVER IN PERSON|SHIP|y among the furiously special 4102|175|5176|5|32|34405.44|0.08|0.01|N|O|1996-05-14|1996-04-29|1996-05-29|NONE|RAIL| the even requests; regular pinto 4102|136116|1143|6|7|8064.77|0.02|0.01|N|O|1996-06-19|1996-05-21|1996-07-15|NONE|REG AIR|bove the carefully pending the 4103|74738|9753|1|40|68509.20|0.05|0.03|R|F|1992-09-19|1992-08-14|1992-09-21|COLLECT COD|RAIL|usly across the slyly busy accounts! fin 4128|195824|8344|1|5|9599.10|0.04|0.04|N|O|1995-10-18|1995-11-28|1995-10-28|TAKE BACK RETURN|FOB|ake permanently 4129|55253|2769|1|32|38664.00|0.03|0.04|A|F|1993-09-16|1993-08-25|1993-09-25|TAKE BACK RETURN|MAIL|ckages haggl 4129|26905|1910|2|39|71444.10|0.06|0.07|R|F|1993-10-21|1993-08-04|1993-10-29|COLLECT COD|MAIL|y regular foxes. slyly ironic deposits 4130|177966|5518|1|44|89934.24|0.07|0.04|N|O|1996-05-14|1996-04-15|1996-05-15|COLLECT COD|TRUCK|eaves haggle qui 4130|62910|7923|2|2|3745.82|0.05|0.06|N|O|1996-05-19|1996-04-24|1996-06-17|TAKE BACK RETURN|RAIL|uriously regular instructions around th 4131|49740|2245|1|6|10138.44|0.05|0.01|N|O|1998-04-27|1998-04-18|1998-04-29|TAKE BACK RETURN|MAIL|ns cajole slyly. even, iro 4131|177535|53|2|32|51600.96|0.08|0.01|N|O|1998-03-02|1998-03-21|1998-03-07|TAKE BACK RETURN|TRUCK| furiously regular asymptotes nod sly 4131|25254|2761|3|25|29481.25|0.02|0.07|N|O|1998-02-24|1998-03-01|1998-02-27|TAKE BACK RETURN|FOB|uickly exp 4131|35077|84|4|8|8096.56|0.04|0.01|N|O|1998-03-03|1998-03-15|1998-03-26|COLLECT COD|FOB| after the furiously ironic d 4131|124360|4361|5|30|41530.80|0.01|0.01|N|O|1998-04-01|1998-04-13|1998-04-08|TAKE BACK RETURN|FOB|he fluffily express depen 4131|101926|6947|6|47|90612.24|0.02|0.00|N|O|1998-03-09|1998-04-05|1998-03-13|TAKE BACK RETURN|RAIL|ges. ironic pinto be 4132|137396|9910|1|28|40134.92|0.07|0.03|N|O|1995-08-16|1995-08-01|1995-08-29|TAKE BACK RETURN|SHIP|pths wake against the stealthily special pi 4132|14238|1742|2|23|26501.29|0.07|0.07|N|O|1995-06-27|1995-07-27|1995-07-13|TAKE BACK RETURN|FOB|d deposits. fluffily even requests haggle b 4132|86626|1643|3|18|29027.16|0.09|0.04|A|F|1995-06-01|1995-08-01|1995-06-02|TAKE BACK RETURN|RAIL|y final de 4133|23646|3647|1|35|54937.40|0.02|0.00|A|F|1992-11-25|1992-09-15|1992-12-25|NONE|AIR|g above the quickly bold packages. ev 4134|120218|7755|1|34|42099.14|0.02|0.05|R|F|1995-04-29|1995-03-13|1995-05-11|DELIVER IN PERSON|FOB|e furiously regular sheaves sleep 4134|95661|3189|2|34|56326.44|0.01|0.03|A|F|1995-05-06|1995-03-28|1995-05-13|DELIVER IN PERSON|SHIP|ual asymptotes wake carefully alo 4134|170507|8059|3|12|18930.00|0.05|0.04|A|F|1995-03-19|1995-03-27|1995-04-14|COLLECT COD|TRUCK|kly above the quickly regular 4134|99989|7517|4|45|89504.10|0.08|0.02|A|F|1995-04-11|1995-03-27|1995-04-17|TAKE BACK RETURN|MAIL|ironic pin 4135|1947|1948|1|23|42525.62|0.06|0.01|N|O|1997-04-09|1997-05-12|1997-04-16|TAKE BACK RETURN|FOB|posits cajole furiously carefully 4135|119317|9318|2|32|42761.92|0.07|0.00|N|O|1997-03-14|1997-04-23|1997-04-12|TAKE BACK RETURN|TRUCK| ideas. requests use. furiously 4135|159231|4262|3|33|42577.59|0.05|0.05|N|O|1997-05-01|1997-05-23|1997-05-23|DELIVER IN PERSON|AIR|he fluffil 4135|194719|4720|4|13|23578.23|0.04|0.07|N|O|1997-03-16|1997-05-19|1997-04-03|COLLECT COD|RAIL|efully special account 4160|112413|7436|1|25|35635.25|0.10|0.04|N|O|1996-09-22|1996-10-17|1996-09-24|NONE|SHIP|ar accounts sleep blithe 4160|121668|4181|2|12|20275.92|0.00|0.03|N|O|1996-11-22|1996-09-25|1996-12-10|DELIVER IN PERSON|REG AIR|y bold package 4160|62498|2499|3|48|70103.52|0.04|0.04|N|O|1996-09-19|1996-11-02|1996-09-24|COLLECT COD|FOB| unusual dolphins 4161|121535|4048|1|12|18678.36|0.08|0.02|R|F|1993-08-25|1993-10-04|1993-09-22|COLLECT COD|RAIL|onic dolphins. in 4161|27319|9822|2|47|58576.57|0.05|0.00|A|F|1993-12-20|1993-10-29|1994-01-19|TAKE BACK RETURN|RAIL|r requests about the final, even foxes hag 4161|137027|9541|3|42|44688.84|0.03|0.04|R|F|1993-11-12|1993-10-04|1993-11-27|COLLECT COD|MAIL|thely across the even attainments. express 4161|9463|4464|4|45|61760.70|0.02|0.06|A|F|1993-10-22|1993-10-17|1993-10-30|COLLECT COD|REG AIR|about the ironic packages cajole blithe 4161|28343|8344|5|46|58481.64|0.05|0.01|A|F|1993-11-09|1993-11-17|1993-11-17|TAKE BACK RETURN|TRUCK|he stealthily ironic foxes. ideas haggl 4161|147061|7062|6|19|21053.14|0.07|0.00|R|F|1993-08-22|1993-11-11|1993-09-01|TAKE BACK RETURN|REG AIR|beans breach s 4162|73106|8121|1|45|48559.50|0.10|0.07|A|F|1992-03-21|1992-05-02|1992-03-29|DELIVER IN PERSON|AIR|elets. slyly regular i 4162|89349|9350|2|29|38811.86|0.00|0.05|R|F|1992-02-25|1992-04-25|1992-03-17|NONE|REG AIR|nding pinto beans haggle blithe 4163|32870|2871|1|13|23437.31|0.08|0.03|A|F|1993-02-17|1993-03-13|1993-03-15|DELIVER IN PERSON|REG AIR|phins wake. pending requests inte 4164|119658|4681|1|9|15098.85|0.07|0.02|N|O|1998-08-25|1998-08-13|1998-09-19|DELIVER IN PERSON|SHIP|re fluffily slyly bold requests. 4165|40102|103|1|12|12505.20|0.00|0.01|N|O|1997-09-20|1997-10-20|1997-10-12|TAKE BACK RETURN|REG AIR|nwind slow theodolites. carefully pending 4166|140721|8264|1|8|14093.76|0.00|0.08|A|F|1993-06-05|1993-04-10|1993-07-05|COLLECT COD|MAIL|uickly. blithely pending de 4166|92165|4675|2|8|9257.28|0.06|0.04|A|F|1993-06-07|1993-04-17|1993-06-16|DELIVER IN PERSON|REG AIR|es along the furiously regular acc 4166|6324|8825|3|17|20915.44|0.02|0.06|R|F|1993-06-29|1993-05-15|1993-07-24|DELIVER IN PERSON|SHIP|ackages. re 4166|85553|3078|4|36|55387.80|0.06|0.05|R|F|1993-03-01|1993-05-25|1993-03-05|COLLECT COD|MAIL|unts. furiously express accounts w 4166|76379|1394|5|5|6776.85|0.08|0.01|A|F|1993-06-19|1993-04-24|1993-06-27|NONE|REG AIR|hely unusual packages are above the f 4166|101840|4351|6|6|11051.04|0.04|0.08|R|F|1993-04-30|1993-04-17|1993-05-08|DELIVER IN PERSON|MAIL|ily ironic deposits print furiously. iron 4166|23149|3150|7|26|27875.64|0.09|0.01|R|F|1993-03-17|1993-05-09|1993-03-25|NONE|MAIL|lar dependencies. s 4167|60623|5636|1|47|74430.14|0.04|0.02|N|O|1998-08-02|1998-08-24|1998-08-28|DELIVER IN PERSON|REG AIR| carefully final asymptotes. slyly bo 4167|86504|6505|2|17|25338.50|0.06|0.07|N|O|1998-09-18|1998-09-06|1998-10-07|COLLECT COD|REG AIR|ly around the even instr 4167|72505|5013|3|1|1477.50|0.03|0.06|N|O|1998-10-11|1998-08-14|1998-10-13|COLLECT COD|TRUCK|xpress platelets. blithely 4192|10837|8341|1|36|62921.88|0.06|0.08|N|O|1998-04-25|1998-05-26|1998-05-03|COLLECT COD|TRUCK|eodolites sleep 4192|120696|3209|2|15|25750.35|0.04|0.08|N|O|1998-06-26|1998-05-26|1998-07-16|COLLECT COD|AIR|e slyly special grouches. express pinto b 4192|134922|9949|3|7|13698.44|0.06|0.03|N|O|1998-05-19|1998-07-08|1998-05-31|COLLECT COD|FOB|y; excuses use. ironic, close instru 4192|23256|8261|4|32|37736.00|0.09|0.04|N|O|1998-06-23|1998-06-25|1998-07-17|NONE|FOB|ounts are fluffily slyly bold req 4192|47037|4550|5|48|47233.44|0.08|0.01|N|O|1998-08-17|1998-07-11|1998-09-03|NONE|AIR|ests. quickly bol 4192|149567|2082|6|44|71128.64|0.10|0.02|N|O|1998-08-06|1998-07-09|1998-08-20|NONE|FOB|structions mai 4192|169807|7356|7|27|50673.60|0.02|0.00|N|O|1998-07-03|1998-06-26|1998-07-13|TAKE BACK RETURN|AIR| carefully even escapades. care 4193|130286|7826|1|37|48702.36|0.09|0.06|A|F|1994-04-25|1994-02-24|1994-05-08|NONE|AIR|er the quickly regular dependencies wake 4193|116403|3937|2|3|4258.20|0.09|0.05|R|F|1994-04-29|1994-03-20|1994-05-29|TAKE BACK RETURN|REG AIR|osits above the depo 4193|178441|8442|3|10|15194.40|0.06|0.03|A|F|1994-02-10|1994-03-22|1994-03-09|COLLECT COD|RAIL|uffily spe 4193|50040|2546|4|29|28711.16|0.09|0.05|A|F|1994-02-11|1994-03-11|1994-03-13|TAKE BACK RETURN|RAIL|ly. final packages use blit 4193|19877|4880|5|50|89843.50|0.01|0.01|R|F|1994-04-28|1994-03-23|1994-05-09|NONE|FOB| beans. regular accounts cajole. de 4193|65248|2767|6|21|25478.04|0.02|0.04|R|F|1994-04-26|1994-03-22|1994-05-23|DELIVER IN PERSON|TRUCK|accounts cajole b 4194|196755|4313|1|43|79625.25|0.08|0.06|A|F|1994-11-06|1994-12-09|1994-11-16|NONE|TRUCK|olites are after the exp 4194|46577|1586|2|18|27424.26|0.07|0.07|A|F|1995-02-14|1994-12-04|1995-03-11|TAKE BACK RETURN|TRUCK|ld packages. quickly eve 4195|5693|8194|1|14|22381.66|0.09|0.04|R|F|1993-09-06|1993-07-21|1993-09-18|DELIVER IN PERSON|REG AIR|ironic packages. carefully express 4195|65973|3492|2|22|42657.34|0.10|0.08|R|F|1993-07-01|1993-07-23|1993-07-28|COLLECT COD|RAIL|lly express pinto bea 4195|193646|1204|3|19|33053.16|0.01|0.06|R|F|1993-09-06|1993-08-13|1993-09-15|TAKE BACK RETURN|REG AIR|telets sleep even requests. final, even i 4196|155079|7595|1|30|34022.10|0.02|0.06|N|O|1998-08-09|1998-06-30|1998-09-05|COLLECT COD|SHIP|egular foxes us 4196|8234|5735|2|31|35409.13|0.09|0.08|N|O|1998-06-12|1998-07-28|1998-07-11|NONE|MAIL|ut the blithely ironic inst 4196|177505|7506|3|46|72795.00|0.05|0.00|N|O|1998-09-05|1998-06-28|1998-09-10|TAKE BACK RETURN|MAIL|according to t 4196|113898|6410|4|42|80299.38|0.04|0.06|N|O|1998-08-13|1998-07-18|1998-09-07|TAKE BACK RETURN|AIR| instructions. courts cajole slyly ev 4196|71690|4198|5|3|4985.07|0.01|0.03|N|O|1998-05-17|1998-07-21|1998-05-18|DELIVER IN PERSON|TRUCK| accounts. fu 4196|86060|8569|6|43|44980.58|0.01|0.06|N|O|1998-08-12|1998-07-12|1998-08-22|DELIVER IN PERSON|FOB|es. slyly even 4196|3066|567|7|3|2907.18|0.00|0.06|N|O|1998-08-05|1998-07-28|1998-08-15|DELIVER IN PERSON|REG AIR|y regular packages haggle furiously alongs 4197|128035|3060|1|50|53151.50|0.06|0.03|N|O|1996-11-15|1996-11-01|1996-11-20|NONE|FOB|. carefully bold asymptotes nag blithe 4197|69664|2171|2|39|63712.74|0.02|0.08|N|O|1996-10-07|1996-10-11|1996-10-18|DELIVER IN PERSON|RAIL|ronic requests. quickly bold packages in 4197|31921|4425|3|28|51881.76|0.06|0.02|N|O|1996-10-05|1996-10-24|1996-10-22|TAKE BACK RETURN|AIR|regular pin 4197|95301|5302|4|23|29814.90|0.00|0.03|N|O|1996-09-10|1996-10-10|1996-09-25|NONE|AIR|l instructions print slyly past the reg 4197|120952|3465|5|37|72999.15|0.03|0.04|N|O|1996-10-20|1996-10-10|1996-11-10|COLLECT COD|TRUCK|carefully enticing decoys boo 4197|30658|3162|6|48|76255.20|0.08|0.00|N|O|1996-10-07|1996-10-25|1996-10-23|COLLECT COD|REG AIR| final instructions. blithe, spe 4198|145163|192|1|48|57991.68|0.09|0.05|N|O|1997-09-03|1997-07-18|1997-09-11|NONE|REG AIR|cajole carefully final, ironic ide 4198|142247|7276|2|46|59305.04|0.09|0.01|N|O|1997-08-17|1997-09-08|1997-09-11|COLLECT COD|TRUCK|posits among th 4198|144524|2067|3|13|20390.76|0.03|0.04|N|O|1997-07-18|1997-07-24|1997-08-10|NONE|REG AIR| furious excuses. bli 4199|69662|7181|1|16|26106.56|0.10|0.00|A|F|1992-06-11|1992-04-10|1992-07-10|COLLECT COD|TRUCK|ncies. furiously special accounts 4199|8840|8841|2|18|31479.12|0.00|0.01|A|F|1992-06-01|1992-03-30|1992-06-28|DELIVER IN PERSON|RAIL|pending, regular accounts. carefully 4224|198265|8266|1|27|36808.02|0.05|0.03|N|O|1997-09-05|1997-08-19|1997-09-30|NONE|SHIP|ly special deposits sleep qui 4224|36701|4211|2|20|32754.00|0.07|0.05|N|O|1997-11-09|1997-08-23|1997-11-14|NONE|FOB|unts promise across the requests. blith 4224|23726|1233|3|4|6598.88|0.08|0.05|N|O|1997-09-07|1997-09-05|1997-09-25|TAKE BACK RETURN|FOB| even dinos. carefull 4224|159653|7199|4|50|85632.50|0.10|0.06|N|O|1997-07-30|1997-09-10|1997-08-19|COLLECT COD|RAIL|side of the carefully silent dep 4224|84136|6645|5|48|53766.24|0.00|0.04|N|O|1997-10-03|1997-08-31|1997-10-10|NONE|RAIL| final, regular asymptotes use alway 4225|48772|6285|1|25|43019.25|0.08|0.04|N|O|1997-07-10|1997-08-08|1997-07-31|TAKE BACK RETURN|TRUCK|se fluffily. busily ironic requests are; 4225|95002|7512|2|23|22931.00|0.02|0.04|N|O|1997-09-18|1997-08-31|1997-10-11|TAKE BACK RETURN|RAIL|. quickly b 4225|97926|436|3|28|53869.76|0.08|0.03|N|O|1997-07-11|1997-09-01|1997-08-03|DELIVER IN PERSON|FOB|ts are requests. even, bold depos 4226|187065|9584|1|27|31105.62|0.06|0.08|A|F|1993-05-03|1993-04-12|1993-05-16|COLLECT COD|AIR|sly alongside of the slyly ironic pac 4227|157199|9715|1|19|23867.61|0.01|0.08|A|F|1995-05-05|1995-05-03|1995-05-22|COLLECT COD|REG AIR|ns sleep along the blithely even theodolit 4227|32622|2623|2|8|12436.96|0.09|0.00|N|F|1995-06-11|1995-04-30|1995-06-28|COLLECT COD|REG AIR| packages since the bold, u 4227|74624|4625|3|11|17584.82|0.10|0.04|A|F|1995-03-30|1995-05-02|1995-04-26|DELIVER IN PERSON|SHIP|l requests-- bold requests cajole dogg 4227|199283|6841|4|2|2764.56|0.02|0.05|R|F|1995-04-24|1995-05-09|1995-05-21|DELIVER IN PERSON|AIR|ep. specia 4227|146835|4378|5|49|92209.67|0.05|0.06|R|F|1995-05-19|1995-04-12|1995-06-12|TAKE BACK RETURN|REG AIR|ts sleep blithely carefully unusual ideas. 4228|140949|8492|1|20|39798.80|0.00|0.06|N|O|1997-04-24|1997-05-29|1997-05-17|NONE|RAIL|f the slyly fluffy pinto beans are 4229|95318|337|1|44|57785.64|0.02|0.05|N|O|1998-05-29|1998-05-12|1998-06-16|DELIVER IN PERSON|AIR|s. carefully e 4229|4570|7071|2|34|50135.38|0.07|0.05|N|O|1998-05-26|1998-04-13|1998-06-08|DELIVER IN PERSON|MAIL|thely final accounts use even packa 4230|45124|2637|1|38|40626.56|0.10|0.03|A|F|1992-04-28|1992-04-21|1992-05-28|TAKE BACK RETURN|FOB|ly regular packages. regular ideas boost 4230|198644|6202|2|43|74933.52|0.02|0.08|R|F|1992-03-14|1992-05-13|1992-03-28|NONE|FOB|ses lose blithely slyly final e 4230|195055|94|3|10|11500.50|0.06|0.02|A|F|1992-06-11|1992-04-11|1992-07-02|TAKE BACK RETURN|MAIL|ar packages are 4230|74334|4335|4|28|36633.24|0.01|0.03|R|F|1992-05-12|1992-05-10|1992-06-01|TAKE BACK RETURN|MAIL|nt instruct 4230|124748|7261|5|50|88637.00|0.00|0.01|A|F|1992-03-29|1992-05-19|1992-04-20|TAKE BACK RETURN|SHIP|ts. final instructions in 4230|34864|9871|6|30|53965.80|0.05|0.07|A|F|1992-03-11|1992-04-29|1992-03-30|NONE|AIR|s. final excuses across the 4230|151041|1042|7|18|19656.72|0.10|0.04|R|F|1992-06-23|1992-05-10|1992-07-04|COLLECT COD|SHIP| the final acco 4231|141535|1536|1|47|74096.91|0.09|0.03|N|O|1997-11-27|1998-01-26|1997-12-17|NONE|REG AIR|hely along the silent at 4231|165189|222|2|4|5016.72|0.06|0.02|N|O|1997-11-28|1998-01-26|1997-12-12|TAKE BACK RETURN|MAIL|lithely even packages. 4231|120334|335|3|31|41984.23|0.07|0.08|N|O|1998-02-14|1997-12-27|1998-03-01|DELIVER IN PERSON|FOB|ublate. theodoli 4231|39030|4037|4|35|33916.05|0.10|0.00|N|O|1998-02-21|1998-01-24|1998-03-18|DELIVER IN PERSON|FOB|le quickly regular, unus 4256|150384|2900|1|22|31556.36|0.05|0.05|R|F|1992-07-30|1992-05-14|1992-08-14|NONE|TRUCK|, final platelets are slyly final pint 4257|64416|1935|1|3|4141.23|0.10|0.03|N|O|1995-06-18|1995-05-01|1995-07-12|DELIVER IN PERSON|MAIL|thin the theodolites use after the bl 4257|34970|9977|2|5|9524.85|0.01|0.04|R|F|1995-04-29|1995-06-05|1995-05-13|TAKE BACK RETURN|TRUCK|n deposits. furiously e 4257|127007|7008|3|33|34122.00|0.03|0.04|A|F|1995-05-23|1995-05-03|1995-05-31|COLLECT COD|AIR|uffily regular accounts ar 4258|165025|5026|1|36|39240.72|0.02|0.06|N|O|1997-02-23|1997-01-25|1997-02-27|TAKE BACK RETURN|SHIP|ns use alongs 4258|161733|4250|2|19|34099.87|0.03|0.02|N|O|1997-01-14|1996-12-12|1997-01-20|TAKE BACK RETURN|AIR|ly busily ironic foxes. f 4258|30847|8357|3|46|81780.64|0.04|0.07|N|O|1997-01-02|1996-12-26|1997-01-12|DELIVER IN PERSON|AIR| furiously pend 4258|34234|4235|4|22|25701.06|0.04|0.04|N|O|1996-12-12|1996-12-06|1996-12-20|TAKE BACK RETURN|AIR|e regular, even asym 4258|162806|7839|5|9|16819.20|0.04|0.03|N|O|1996-12-04|1996-12-08|1996-12-20|DELIVER IN PERSON|TRUCK|counts wake permanently after the bravely 4259|42595|7604|1|14|21526.26|0.05|0.03|N|O|1998-01-09|1997-11-21|1998-01-29|TAKE BACK RETURN|RAIL| furiously pending excuses. ideas hagg 4260|23397|904|1|21|27728.19|0.08|0.04|R|F|1992-08-06|1992-06-18|1992-08-22|NONE|AIR|al, pending accounts must 4261|109388|9389|1|12|16768.56|0.05|0.01|A|F|1992-11-01|1993-01-01|1992-11-12|NONE|FOB|into beans 4261|81703|9228|2|4|6738.80|0.02|0.07|R|F|1992-12-11|1992-12-18|1992-12-24|DELIVER IN PERSON|FOB|ackages unwind furiously fluff 4261|174248|6766|3|3|3966.72|0.07|0.02|R|F|1992-11-10|1992-12-14|1992-11-17|COLLECT COD|RAIL|ly even deposits eat blithely alo 4261|173239|8274|4|36|47240.28|0.04|0.06|R|F|1992-12-02|1992-12-18|1992-12-25|NONE|REG AIR| slyly pendi 4261|23678|1185|5|28|44846.76|0.07|0.06|A|F|1992-10-08|1992-12-23|1992-10-11|TAKE BACK RETURN|MAIL|packages. fluffily i 4262|75546|5547|1|30|45646.20|0.01|0.03|N|O|1996-08-11|1996-10-11|1996-09-09|TAKE BACK RETURN|RAIL|tes after the carefully 4262|95696|5697|2|5|8458.45|0.02|0.05|N|O|1996-09-27|1996-09-05|1996-10-25|COLLECT COD|SHIP|blithely final asymptotes integrate 4262|161033|3550|3|5|5470.15|0.08|0.00|N|O|1996-10-02|1996-10-16|1996-10-05|NONE|REG AIR|ironic accounts are unusu 4262|73333|855|4|45|58784.85|0.02|0.01|N|O|1996-11-09|1996-09-09|1996-11-12|DELIVER IN PERSON|SHIP|ackages boost. pending, even instruction 4262|99103|4122|5|28|30858.80|0.06|0.02|N|O|1996-10-22|1996-09-06|1996-11-13|DELIVER IN PERSON|FOB|ironic, regular depend 4262|16420|3924|6|26|34746.92|0.03|0.02|N|O|1996-08-29|1996-09-25|1996-08-31|NONE|RAIL|s boost slyly along the bold, iro 4262|159435|4466|7|41|61271.63|0.03|0.01|N|O|1996-08-28|1996-09-14|1996-09-20|COLLECT COD|RAIL|cuses unwind ac 4263|17017|7018|1|9|8406.09|0.08|0.07|N|O|1998-04-04|1998-04-29|1998-05-04|COLLECT COD|AIR|structions cajole quic 4263|195562|3120|2|28|46411.68|0.05|0.03|N|O|1998-06-24|1998-06-08|1998-07-14|NONE|MAIL|ideas for the carefully re 4263|10259|7763|3|38|44431.50|0.01|0.01|N|O|1998-07-10|1998-05-08|1998-07-17|NONE|TRUCK|rding to the dep 4263|18148|650|4|20|21322.80|0.02|0.07|N|O|1998-04-09|1998-04-30|1998-05-04|NONE|RAIL|uietly regular deposits. sly deposits w 4263|197901|5459|5|14|27984.60|0.09|0.06|N|O|1998-05-06|1998-04-17|1998-05-11|DELIVER IN PERSON|TRUCK|d accounts. daringly regular accounts hagg 4263|112955|7978|6|47|92493.65|0.08|0.06|N|O|1998-06-28|1998-05-09|1998-07-02|DELIVER IN PERSON|TRUCK|y. theodolites wake idly ironic do 4263|28779|1282|7|6|10246.62|0.04|0.04|N|O|1998-05-01|1998-06-02|1998-05-14|TAKE BACK RETURN|REG AIR|g the final, regular instructions: 4288|73060|3061|1|32|33057.92|0.10|0.07|R|F|1993-03-19|1993-01-26|1993-04-18|TAKE BACK RETURN|AIR|e blithely even instructions. speci 4288|104952|4953|2|39|76321.05|0.05|0.02|R|F|1993-03-25|1993-02-06|1993-03-28|DELIVER IN PERSON|AIR|uffy theodolites run 4288|124632|2169|3|7|11596.41|0.03|0.01|A|F|1993-01-15|1993-02-05|1993-01-26|NONE|TRUCK|ngside of the special platelet 4289|195619|5620|1|19|32577.59|0.06|0.06|R|F|1993-12-31|1993-11-06|1994-01-23|DELIVER IN PERSON|TRUCK|e carefully regular ideas. sl 4290|136795|9309|1|23|42131.17|0.06|0.04|R|F|1995-04-04|1995-02-16|1995-04-07|TAKE BACK RETURN|REG AIR|uests cajole carefully. 4290|98378|3397|2|3|4129.11|0.09|0.03|A|F|1995-03-25|1995-03-07|1995-04-11|NONE|RAIL|lar platelets cajole 4291|191809|9367|1|3|5702.40|0.08|0.08|A|F|1994-03-17|1994-02-21|1994-03-27|COLLECT COD|SHIP|tes sleep slyly above the quickly sl 4291|124889|2426|2|43|82296.84|0.01|0.06|A|F|1994-02-01|1994-02-27|1994-02-06|DELIVER IN PERSON|REG AIR|s. quietly regular 4291|7976|477|3|25|47099.25|0.09|0.08|R|F|1994-02-14|1994-02-08|1994-03-15|COLLECT COD|AIR|uctions. furiously regular ins 4292|43509|1022|1|22|31955.00|0.08|0.03|R|F|1992-02-14|1992-02-16|1992-03-01|NONE|FOB|refully expres 4292|39824|7334|2|1|1763.82|0.03|0.01|A|F|1992-02-07|1992-03-16|1992-02-10|DELIVER IN PERSON|FOB| the furiously ev 4292|119234|6768|3|35|43863.05|0.03|0.06|A|F|1992-03-23|1992-04-04|1992-04-02|COLLECT COD|TRUCK|dugouts use. furiously bold packag 4292|162381|7414|4|40|57735.20|0.05|0.04|A|F|1992-04-27|1992-03-07|1992-05-04|COLLECT COD|REG AIR|ounts according to the furiously 4292|130292|7832|5|6|7933.74|0.07|0.08|R|F|1992-03-03|1992-02-24|1992-03-25|COLLECT COD|FOB|bove the silently regula 4292|3041|542|6|47|44369.88|0.05|0.00|R|F|1992-05-02|1992-03-21|1992-05-27|TAKE BACK RETURN|FOB|y packages; even ideas boost 4293|957|5958|1|34|63170.30|0.03|0.08|N|O|1996-11-05|1996-10-12|1996-12-04|NONE|FOB|ions sleep blithely on 4293|76837|4359|2|50|90691.50|0.01|0.05|N|O|1996-11-27|1996-10-30|1996-12-22|COLLECT COD|MAIL| special deposits. furiousl 4293|198566|1086|3|47|78234.32|0.08|0.02|N|O|1996-09-07|1996-10-24|1996-09-15|NONE|RAIL|ithely pending deposits af 4293|87832|5357|4|25|45495.75|0.04|0.04|N|O|1996-09-11|1996-11-14|1996-09-22|DELIVER IN PERSON|FOB|inal asympt 4293|180656|3175|5|1|1736.65|0.06|0.05|N|O|1996-11-15|1996-10-09|1996-11-26|COLLECT COD|AIR|eposits should boost along the 4293|78993|6515|6|45|88739.55|0.10|0.04|N|O|1996-11-04|1996-11-06|1996-11-23|NONE|MAIL|lar ideas use carefully 4294|104911|7422|1|19|36402.29|0.03|0.04|A|F|1992-10-16|1992-11-13|1992-10-26|DELIVER IN PERSON|AIR|nt dependencies. furiously regular ideas d 4294|26561|9064|2|16|23800.96|0.01|0.02|R|F|1992-08-17|1992-09-24|1992-09-04|TAKE BACK RETURN|REG AIR|lithely pint 4294|197032|2071|3|30|33870.90|0.01|0.00|A|F|1992-09-12|1992-11-06|1992-09-25|NONE|MAIL|olites. bold foxes affix ironic theodolite 4294|104405|1936|4|34|47919.60|0.02|0.01|R|F|1992-09-09|1992-11-06|1992-10-04|TAKE BACK RETURN|REG AIR|pendencies! 4294|118956|1468|5|37|73073.15|0.05|0.01|R|F|1992-09-07|1992-10-13|1992-09-08|NONE|REG AIR|cial packages nag f 4294|86273|8782|6|42|52889.34|0.02|0.03|A|F|1992-09-30|1992-11-13|1992-10-15|DELIVER IN PERSON|FOB| carefully; furiously ex 4294|174787|2339|7|47|87503.66|0.02|0.08|R|F|1992-11-09|1992-11-03|1992-12-05|TAKE BACK RETURN|SHIP|es. blithely r 4295|28410|5917|1|49|65582.09|0.09|0.01|N|O|1996-05-25|1996-03-17|1996-06-19|TAKE BACK RETURN|REG AIR|refully silent requests. f 4295|70554|8076|2|4|6098.20|0.09|0.07|N|O|1996-06-05|1996-04-26|1996-06-13|DELIVER IN PERSON|TRUCK|arefully according to the pending ac 4295|192627|2628|3|3|5158.86|0.04|0.00|N|O|1996-06-04|1996-04-24|1996-06-24|DELIVER IN PERSON|AIR|telets cajole bravely 4295|79439|4454|4|30|42552.90|0.07|0.06|N|O|1996-03-22|1996-04-23|1996-04-20|NONE|SHIP|yly ironic frets. pending foxes after 4320|45894|3407|1|28|51516.92|0.02|0.06|N|O|1997-01-28|1997-02-07|1997-02-07|COLLECT COD|FOB|nts. even, ironic excuses hagg 4320|139168|1682|2|6|7242.96|0.08|0.08|N|O|1997-01-11|1997-01-26|1997-01-22|DELIVER IN PERSON|SHIP|against the carefully careful asym 4320|187628|2665|3|33|56615.46|0.09|0.02|N|O|1996-12-11|1997-02-27|1997-01-08|TAKE BACK RETURN|SHIP|ess asymptotes so 4321|146895|4438|1|33|64082.37|0.09|0.02|A|F|1994-09-01|1994-08-17|1994-09-05|DELIVER IN PERSON|TRUCK|yly special excuses. fluffily 4321|53251|5757|2|45|54191.25|0.00|0.08|R|F|1994-11-13|1994-09-15|1994-11-18|DELIVER IN PERSON|SHIP| haggle ironically bold theodolites. quick 4321|185298|5299|3|23|31815.67|0.01|0.05|A|F|1994-11-03|1994-10-08|1994-11-06|DELIVER IN PERSON|SHIP|ly even orbits slee 4321|90271|272|4|4|5045.08|0.02|0.00|R|F|1994-09-10|1994-10-06|1994-09-11|NONE|FOB|ironic deposi 4321|171625|4143|5|10|16966.20|0.04|0.03|A|F|1994-09-07|1994-08-23|1994-09-17|TAKE BACK RETURN|SHIP|wake carefully alongside of 4322|68766|6285|1|39|67655.64|0.04|0.02|N|O|1998-04-27|1998-06-03|1998-05-04|TAKE BACK RETURN|MAIL|its integrate fluffily 4322|139800|9801|2|9|16558.20|0.05|0.08|N|O|1998-05-18|1998-04-27|1998-05-28|COLLECT COD|AIR|ual instructio 4322|7548|7549|3|12|17466.48|0.09|0.05|N|O|1998-03-29|1998-06-05|1998-04-16|DELIVER IN PERSON|TRUCK|e blithely against the slyly unusu 4322|45880|5881|4|17|31039.96|0.09|0.08|N|O|1998-05-31|1998-05-31|1998-06-10|TAKE BACK RETURN|FOB|ructions boost 4322|101331|6352|5|10|13323.30|0.00|0.05|N|O|1998-05-31|1998-04-27|1998-06-25|TAKE BACK RETURN|REG AIR| regular ideas engage carefully quick 4322|59477|1983|6|39|56022.33|0.09|0.08|N|O|1998-03-16|1998-05-21|1998-04-11|COLLECT COD|AIR|ccounts. dogged pin 4322|13150|654|7|34|36147.10|0.05|0.00|N|O|1998-05-27|1998-04-12|1998-06-16|NONE|REG AIR|ounts haggle fluffily ideas. pend 4323|358|359|1|33|41525.55|0.09|0.02|A|F|1994-05-04|1994-03-06|1994-05-23|COLLECT COD|TRUCK|the slyly bold deposits slee 4324|50292|293|1|44|54660.76|0.05|0.04|N|O|1995-10-15|1995-09-07|1995-11-07|DELIVER IN PERSON|AIR|ainst the u 4324|47117|4630|2|12|12769.32|0.04|0.02|N|O|1995-10-05|1995-09-07|1995-10-18|NONE|REG AIR|c packages. furiously express sauternes 4324|81374|8899|3|14|18975.18|0.07|0.06|N|O|1995-11-12|1995-08-26|1995-11-21|COLLECT COD|AIR| packages nag express excuses. qui 4324|49985|2490|4|14|27089.72|0.02|0.04|N|O|1995-09-20|1995-10-08|1995-10-06|COLLECT COD|RAIL| express ideas. blithely blit 4324|83480|5989|5|22|32196.56|0.07|0.03|N|O|1995-09-13|1995-10-04|1995-09-23|DELIVER IN PERSON|SHIP|ke express, special ideas. 4324|42432|9945|6|31|42607.33|0.08|0.04|N|O|1995-10-23|1995-09-14|1995-11-09|COLLECT COD|RAIL|efully flu 4324|153018|564|7|46|49266.46|0.00|0.03|N|O|1995-11-03|1995-09-28|1995-11-22|NONE|SHIP|ular, final theodo 4325|159610|7156|1|18|30052.98|0.01|0.07|N|O|1996-10-07|1996-09-28|1996-10-31|DELIVER IN PERSON|RAIL|. blithely 4326|162728|2729|1|11|19697.92|0.01|0.01|N|O|1997-02-02|1996-12-10|1997-02-20|DELIVER IN PERSON|TRUCK|press reque 4326|166954|9471|2|27|54565.65|0.06|0.01|N|O|1996-11-29|1997-01-20|1996-12-23|COLLECT COD|AIR|inal packages. final asymptotes about t 4327|94813|9832|1|18|32540.58|0.08|0.00|N|F|1995-06-16|1995-04-20|1995-07-12|COLLECT COD|RAIL|y final excuses. ironic, special requests a 4327|105917|8428|2|40|76916.40|0.07|0.01|N|F|1995-05-26|1995-04-17|1995-06-18|NONE|AIR|quests. packages are after th 4327|144418|6933|3|11|16086.51|0.10|0.07|R|F|1995-04-24|1995-05-27|1995-05-24|TAKE BACK RETURN|FOB| ironic dolphins 4327|20003|5008|4|8|7384.00|0.04|0.08|N|F|1995-05-26|1995-05-28|1995-06-19|DELIVER IN PERSON|AIR|eodolites cajole; unusual Tiresias 4327|189038|9039|5|39|43954.17|0.01|0.00|N|O|1995-06-23|1995-04-18|1995-07-13|TAKE BACK RETURN|FOB|kages against the blit 4327|151058|8604|6|10|11090.50|0.00|0.06|A|F|1995-04-28|1995-06-11|1995-05-07|TAKE BACK RETURN|TRUCK|arefully sile 4352|105786|8297|1|18|32252.04|0.00|0.03|N|O|1998-02-27|1998-02-02|1998-03-01|DELIVER IN PERSON|RAIL|ding to th 4353|93627|1155|1|22|35653.64|0.05|0.05|N|O|1998-01-19|1998-01-23|1998-02-10|COLLECT COD|FOB|ent packages. accounts are slyly. 4354|14218|6720|1|30|33966.30|0.08|0.07|R|F|1995-01-27|1994-11-24|1995-02-25|TAKE BACK RETURN|REG AIR|around the ir 4354|152107|7138|2|23|26659.30|0.01|0.08|R|F|1994-11-20|1994-12-23|1994-11-27|TAKE BACK RETURN|AIR|kly along the ironic, ent 4354|50734|5745|3|2|3369.46|0.10|0.04|A|F|1995-01-09|1994-12-15|1995-01-24|TAKE BACK RETURN|REG AIR|s nag quickly 4354|85171|2696|4|36|41622.12|0.05|0.05|A|F|1994-11-20|1994-12-06|1994-12-06|DELIVER IN PERSON|AIR| wake slyly eve 4354|64313|1832|5|37|47260.47|0.06|0.02|R|F|1995-01-13|1994-12-29|1995-01-31|DELIVER IN PERSON|FOB|deas use blithely! special foxes print af 4354|107393|2414|6|36|50414.04|0.03|0.04|R|F|1994-12-03|1994-12-05|1995-01-02|TAKE BACK RETURN|TRUCK|efully special packages use fluffily 4354|138728|1242|7|18|31800.96|0.03|0.04|A|F|1994-12-07|1994-12-11|1994-12-11|TAKE BACK RETURN|SHIP|ross the furiously 4355|194040|6560|1|32|36289.28|0.10|0.02|N|O|1996-12-29|1997-02-08|1997-01-24|DELIVER IN PERSON|REG AIR|y silent deposits. b 4355|16565|9067|2|4|5926.24|0.05|0.02|N|O|1997-02-25|1997-01-29|1997-03-17|TAKE BACK RETURN|TRUCK|slyly blithely regular packag 4355|738|739|3|13|21303.49|0.07|0.05|N|O|1997-01-21|1996-12-22|1997-02-14|COLLECT COD|TRUCK| ought to mold. blithely pending ideas 4355|193886|6406|4|14|27718.32|0.04|0.02|N|O|1997-03-08|1997-01-22|1997-03-26|NONE|RAIL|he furiously ironic accounts. quickly iro 4355|30531|8041|5|50|73076.50|0.10|0.00|N|O|1996-11-25|1997-01-01|1996-12-06|DELIVER IN PERSON|REG AIR| regular accounts boost along the 4355|121860|4373|6|35|65865.10|0.00|0.08|N|O|1997-01-28|1997-01-28|1997-02-20|NONE|FOB|ess accounts affix ironic 4355|100815|3326|7|47|85343.07|0.09|0.02|N|O|1996-12-28|1996-12-29|1997-01-09|NONE|RAIL|e. realms integrate 4356|193770|3771|1|35|65231.95|0.00|0.04|R|F|1994-05-30|1994-06-14|1994-06-08|COLLECT COD|MAIL|arefully ironic 4357|83306|831|1|50|64465.00|0.04|0.07|N|O|1997-11-25|1997-12-03|1997-12-17|DELIVER IN PERSON|RAIL|s. final, e 4357|107896|7897|2|17|32366.13|0.10|0.07|N|O|1998-02-01|1997-12-08|1998-02-09|DELIVER IN PERSON|MAIL|e carefully furiou 4358|125030|55|1|47|49586.41|0.04|0.00|N|O|1997-10-15|1997-10-14|1997-11-04|DELIVER IN PERSON|SHIP|refully busy dep 4359|173759|8794|1|41|75142.75|0.03|0.07|A|F|1993-04-06|1993-05-06|1993-04-14|COLLECT COD|RAIL|s affix sly 4359|152794|7825|2|8|14774.32|0.03|0.08|R|F|1993-06-27|1993-05-16|1993-07-04|DELIVER IN PERSON|MAIL|packages affix. fluffily regular f 4359|192211|7250|3|32|41702.72|0.10|0.03|R|F|1993-06-18|1993-04-04|1993-07-18|COLLECT COD|MAIL|olites nag quietly caref 4359|77506|14|4|1|1483.50|0.05|0.03|R|F|1993-04-27|1993-05-09|1993-05-08|NONE|MAIL| fluffily ironic, bold pac 4359|32507|7514|5|22|31669.00|0.04|0.01|A|F|1993-03-28|1993-06-01|1993-04-13|NONE|REG AIR|accounts wake ironic deposits. ironic 4384|135485|5486|1|5|7602.40|0.09|0.01|A|F|1992-08-22|1992-08-24|1992-09-20|DELIVER IN PERSON|MAIL|instructions sleep. blithely express pa 4384|88695|6220|2|38|63980.22|0.07|0.06|A|F|1992-10-18|1992-09-24|1992-11-04|NONE|FOB|ly final requests. regu 4384|88921|6446|3|11|21009.12|0.05|0.04|R|F|1992-08-31|1992-10-04|1992-09-28|TAKE BACK RETURN|FOB|deposits promise carefully even, regular e 4385|110994|6017|1|38|76189.62|0.00|0.02|N|O|1996-11-22|1996-10-30|1996-12-21|DELIVER IN PERSON|TRUCK|inal frays. final, bold exc 4386|129394|6931|1|10|14233.90|0.05|0.07|N|O|1998-06-03|1998-04-16|1998-06-28|TAKE BACK RETURN|MAIL|gainst the quickly expre 4386|117055|9567|2|28|30017.40|0.03|0.06|N|O|1998-03-19|1998-05-01|1998-03-27|NONE|FOB|. quick packages play slyly 4386|139628|4655|3|4|6670.48|0.07|0.05|N|O|1998-04-07|1998-03-25|1998-04-19|COLLECT COD|FOB|ns wake carefully carefully iron 4386|120458|459|4|21|31047.45|0.09|0.00|N|O|1998-05-05|1998-03-19|1998-05-13|NONE|RAIL|e pending, sp 4386|129214|6751|5|39|48485.19|0.09|0.06|N|O|1998-03-05|1998-03-15|1998-03-16|NONE|RAIL|structions cajole quickly express 4386|89453|9454|6|18|25964.10|0.02|0.05|N|O|1998-04-12|1998-04-09|1998-05-12|TAKE BACK RETURN|SHIP| deposits use according to the pending, 4386|19821|2323|7|16|27853.12|0.07|0.02|N|O|1998-05-05|1998-03-17|1998-06-03|COLLECT COD|AIR|e furiously final pint 4387|121746|9283|1|3|5303.22|0.02|0.01|N|O|1996-01-17|1996-01-14|1996-01-28|COLLECT COD|AIR| boost slyly ironic instructions. furiou 4387|176820|4372|2|48|91047.36|0.06|0.05|N|O|1995-10-29|1995-12-11|1995-11-01|NONE|REG AIR|sleep slyly. blithely sl 4387|1622|4123|3|15|22854.30|0.00|0.03|N|O|1996-01-11|1996-01-14|1996-01-30|TAKE BACK RETURN|REG AIR|s hinder quietly across the pla 4387|46843|6844|4|9|16108.56|0.00|0.03|N|O|1996-01-04|1995-12-26|1996-01-12|DELIVER IN PERSON|REG AIR|c ideas. slyly regular packages sol 4387|81324|6341|5|3|3915.96|0.05|0.08|N|O|1995-11-17|1995-12-28|1995-11-25|COLLECT COD|SHIP| pinto beans 4387|5012|2513|6|40|36680.40|0.02|0.04|N|O|1995-11-29|1995-12-10|1995-12-20|NONE|REG AIR|deas according to the blithely regular fox 4388|64943|2462|1|30|57238.20|0.02|0.07|N|O|1996-06-07|1996-05-07|1996-06-22|DELIVER IN PERSON|FOB|s cajole fluffil 4388|83255|8272|2|28|34671.00|0.05|0.04|N|O|1996-05-08|1996-06-20|1996-05-12|TAKE BACK RETURN|RAIL|ove the ide 4388|51377|8893|3|13|17268.81|0.07|0.05|N|O|1996-06-28|1996-05-23|1996-07-04|DELIVER IN PERSON|REG AIR|ly even, expre 4389|156730|1761|1|20|35734.60|0.08|0.00|A|F|1994-06-06|1994-06-17|1994-06-17|DELIVER IN PERSON|SHIP|ng the carefully express d 4389|152822|368|2|13|24372.66|0.00|0.00|A|F|1994-08-18|1994-06-06|1994-08-20|NONE|RAIL|nal, regula 4389|78001|509|3|39|38181.00|0.04|0.07|A|F|1994-06-08|1994-06-04|1994-06-10|TAKE BACK RETURN|TRUCK| unusual, final excuses cajole carefully 4389|159554|7100|4|5|8067.75|0.09|0.00|A|F|1994-09-03|1994-06-23|1994-09-16|NONE|FOB| ironic request 4389|10461|2963|5|22|30172.12|0.08|0.00|R|F|1994-07-05|1994-06-12|1994-07-12|NONE|TRUCK|lly silent de 4389|1428|1429|6|22|29247.24|0.01|0.04|R|F|1994-06-07|1994-06-29|1994-06-19|COLLECT COD|TRUCK|at the final excuses hinder carefully a 4389|184028|4029|7|4|4448.08|0.09|0.08|R|F|1994-06-14|1994-06-30|1994-07-06|NONE|REG AIR| blithely even d 4390|151140|3656|1|35|41689.90|0.07|0.04|R|F|1995-05-30|1995-07-02|1995-06-15|DELIVER IN PERSON|TRUCK|ongside of the slyly regular ideas 4390|195068|7588|2|28|32565.68|0.03|0.00|N|O|1995-09-07|1995-06-22|1995-10-05|COLLECT COD|SHIP|ld braids haggle atop the for 4390|100762|8293|3|42|74035.92|0.05|0.08|A|F|1995-06-12|1995-07-16|1995-06-17|NONE|AIR|arefully even accoun 4390|97112|4640|4|32|35491.52|0.07|0.08|N|O|1995-09-15|1995-08-12|1995-10-05|TAKE BACK RETURN|TRUCK|ctions across 4391|160992|3509|1|1|2052.99|0.09|0.00|R|F|1992-06-18|1992-04-27|1992-06-20|COLLECT COD|TRUCK|ong the silent deposits 4391|186943|4498|2|45|91347.30|0.07|0.04|R|F|1992-04-01|1992-05-01|1992-04-13|TAKE BACK RETURN|TRUCK|ep quickly after 4416|93919|8938|1|37|70777.67|0.08|0.03|A|F|1992-10-23|1992-08-23|1992-11-16|COLLECT COD|RAIL|fluffily ironic 4416|88976|8977|2|3|5894.91|0.06|0.03|R|F|1992-10-22|1992-08-06|1992-11-13|DELIVER IN PERSON|SHIP| requests sleep along the 4416|8657|6158|3|45|70454.25|0.09|0.03|A|F|1992-10-16|1992-09-09|1992-10-28|COLLECT COD|AIR|the final pinto beans. special frets 4417|74480|6988|1|28|40725.44|0.08|0.02|N|O|1998-09-04|1998-10-04|1998-09-19|TAKE BACK RETURN|REG AIR|ies across the furious 4417|180729|5766|2|1|1809.72|0.06|0.08|N|O|1998-10-23|1998-08-22|1998-10-24|NONE|REG AIR|press deposits promise stealthily amo 4417|97701|5229|3|35|59454.50|0.06|0.04|N|O|1998-08-08|1998-09-23|1998-09-02|DELIVER IN PERSON|FOB|slyly regular, silent courts. even packag 4418|34773|7277|1|32|54648.64|0.02|0.06|A|F|1993-05-28|1993-06-02|1993-05-30|TAKE BACK RETURN|RAIL|ly. bold pinto b 4418|21913|9420|2|14|25688.74|0.03|0.04|A|F|1993-05-20|1993-06-18|1993-06-05|TAKE BACK RETURN|SHIP| blithely regular requests. blith 4418|78027|5549|3|3|3015.06|0.00|0.02|R|F|1993-04-08|1993-06-04|1993-05-02|NONE|SHIP|luffily across the unusual ideas. reque 4419|107398|7399|1|45|63242.55|0.01|0.05|N|O|1996-07-20|1996-09-07|1996-08-18|DELIVER IN PERSON|TRUCK|s doze sometimes fluffily regular a 4419|31638|4142|2|42|65924.46|0.00|0.03|N|O|1996-09-18|1996-07-25|1996-09-21|COLLECT COD|RAIL|sts. furious 4419|131738|6765|3|6|10618.38|0.02|0.08|N|O|1996-06-25|1996-09-04|1996-07-20|DELIVER IN PERSON|AIR|ts wake slyly final dugou 4420|7317|4818|1|7|8570.17|0.07|0.03|R|F|1994-08-30|1994-09-03|1994-09-25|NONE|FOB| regular instructions sleep around 4421|97523|5051|1|37|56259.24|0.09|0.08|N|O|1997-07-22|1997-06-27|1997-07-25|DELIVER IN PERSON|SHIP|l accounts. ironic request 4421|55380|391|2|46|61427.48|0.04|0.04|N|O|1997-04-21|1997-05-13|1997-05-15|DELIVER IN PERSON|FOB|reful packages. bold, 4421|166181|8698|3|46|57370.28|0.00|0.06|N|O|1997-05-25|1997-05-21|1997-06-23|COLLECT COD|TRUCK|g dependenci 4421|190694|5733|4|32|57110.08|0.06|0.04|N|O|1997-07-09|1997-06-03|1997-07-25|NONE|SHIP|ar ideas eat among the furiousl 4421|189912|9913|5|32|64061.12|0.06|0.04|N|O|1997-07-28|1997-06-14|1997-08-13|NONE|REG AIR|uickly final pinto beans impress. bold 4421|46583|4096|6|44|67301.52|0.09|0.06|N|O|1997-06-17|1997-06-20|1997-06-29|NONE|TRUCK|le carefully. bl 4421|115306|329|7|18|23783.40|0.01|0.00|N|O|1997-06-07|1997-05-13|1997-06-10|DELIVER IN PERSON|FOB|. regular, s 4422|134519|2059|1|5|7767.55|0.09|0.07|N|O|1995-07-17|1995-08-13|1995-07-25|NONE|SHIP|e furiously about t 4422|47451|9956|2|41|57336.45|0.08|0.05|N|F|1995-06-12|1995-07-09|1995-06-20|COLLECT COD|TRUCK| theodolites shal 4422|102348|9879|3|39|52663.26|0.00|0.05|N|O|1995-09-02|1995-06-24|1995-09-14|NONE|TRUCK|en hockey players engage 4422|152192|2193|4|4|4976.76|0.02|0.05|N|O|1995-09-18|1995-08-12|1995-10-18|COLLECT COD|FOB|cies along the bo 4422|79692|4707|5|20|33433.80|0.07|0.05|N|O|1995-08-17|1995-07-16|1995-09-13|DELIVER IN PERSON|RAIL|ructions wake slyly al 4423|149239|6782|1|3|3864.69|0.03|0.00|A|F|1995-03-22|1995-04-06|1995-04-19|NONE|TRUCK| final theodolites nag after the bli 4423|59576|4587|2|2|3071.14|0.07|0.04|A|F|1995-03-04|1995-04-04|1995-03-08|TAKE BACK RETURN|REG AIR|old sheaves sleep 4448|51574|6585|1|24|36613.68|0.10|0.07|N|O|1998-09-09|1998-07-06|1998-09-27|DELIVER IN PERSON|SHIP|nal packages along the ironic instructi 4448|188627|1146|2|13|22303.06|0.00|0.01|N|O|1998-07-26|1998-07-03|1998-08-14|COLLECT COD|MAIL|fluffily express accounts integrate furiou 4448|40980|5989|3|35|67234.30|0.10|0.06|N|O|1998-09-18|1998-07-27|1998-10-08|NONE|REG AIR|aggle carefully alongside of the q 4448|140138|139|4|3|3534.39|0.01|0.01|N|O|1998-07-20|1998-07-10|1998-08-07|DELIVER IN PERSON|TRUCK|ronic theod 4448|90140|141|5|41|46335.74|0.00|0.08|N|O|1998-07-30|1998-08-09|1998-08-03|NONE|AIR|pon the permanently even excuses nag 4448|171401|1402|6|12|17668.80|0.06|0.03|N|O|1998-08-21|1998-06-30|1998-09-09|COLLECT COD|RAIL|sits about the ironic, bu 4449|31484|6491|1|42|59450.16|0.10|0.07|N|O|1998-03-22|1998-05-09|1998-04-03|NONE|FOB| packages. blithely final 4449|140638|3153|2|10|16786.30|0.02|0.03|N|O|1998-05-09|1998-05-04|1998-05-15|NONE|SHIP|ccounts alongside of the platelets integr 4450|173191|3192|1|44|55624.36|0.10|0.00|N|O|1997-10-12|1997-10-13|1997-10-29|DELIVER IN PERSON|RAIL| the slyly eve 4450|14714|4715|2|9|14658.39|0.03|0.03|N|O|1997-08-13|1997-08-16|1997-08-15|NONE|FOB|gular requests cajole carefully. regular c 4450|95500|8010|3|45|67297.50|0.08|0.01|N|O|1997-09-01|1997-10-06|1997-09-19|NONE|TRUCK|express ideas are furiously regular 4450|61930|6943|4|13|24595.09|0.00|0.00|N|O|1997-08-26|1997-09-18|1997-09-20|COLLECT COD|MAIL| brave foxes. slyly unusual 4450|55040|5041|5|6|5970.24|0.09|0.01|N|O|1997-09-02|1997-09-30|1997-09-09|NONE|FOB|eposits. foxes cajole unusual fox 4451|163032|3033|1|40|43801.20|0.03|0.03|A|F|1994-11-18|1994-12-25|1994-11-26|DELIVER IN PERSON|RAIL|y. slyly special deposits are sly 4451|62282|2283|2|34|42305.52|0.10|0.02|A|F|1994-11-30|1994-12-04|1994-12-13|COLLECT COD|SHIP| regular ideas. 4451|158586|8587|3|19|31247.02|0.05|0.06|R|F|1994-10-09|1994-11-26|1994-10-23|COLLECT COD|FOB|ly after the fluffi 4452|113429|5941|1|21|30290.82|0.07|0.03|R|F|1994-10-06|1994-08-23|1994-10-15|COLLECT COD|TRUCK|multipliers x-ray carefully in place of 4452|149|7650|2|47|49309.58|0.01|0.06|A|F|1994-10-08|1994-08-09|1994-10-09|TAKE BACK RETURN|TRUCK|ts. slyly regular cour 4453|146971|2000|1|41|82736.77|0.00|0.08|N|O|1997-07-17|1997-05-15|1997-07-31|NONE|REG AIR|anent theodolites are slyly except t 4453|132711|2712|2|16|27899.36|0.03|0.00|N|O|1997-07-22|1997-05-05|1997-08-03|COLLECT COD|FOB|ar excuses nag quickly even accounts. b 4453|61826|9345|3|48|85815.36|0.02|0.07|N|O|1997-05-29|1997-06-24|1997-06-03|NONE|SHIP|eep. fluffily express accounts at the furi 4453|101706|4217|4|26|44400.20|0.06|0.07|N|O|1997-05-07|1997-06-07|1997-05-22|NONE|TRUCK|express packages are 4454|150851|3367|1|20|38037.00|0.10|0.08|R|F|1994-05-06|1994-03-17|1994-05-20|COLLECT COD|SHIP|lar theodolites. even instructio 4454|151032|3548|2|22|23826.66|0.06|0.02|A|F|1994-02-06|1994-04-11|1994-03-06|DELIVER IN PERSON|RAIL|ully. carefully final accounts accordi 4454|191552|1553|3|45|73959.75|0.07|0.04|A|F|1994-03-29|1994-03-26|1994-04-04|TAKE BACK RETURN|RAIL|ests promise. packages print fur 4454|1324|1325|4|1|1225.32|0.09|0.05|A|F|1994-02-05|1994-04-19|1994-02-12|COLLECT COD|RAIL|equests run. 4454|51060|8576|5|48|48530.88|0.00|0.07|R|F|1994-04-23|1994-04-03|1994-04-26|COLLECT COD|FOB|to beans wake across th 4454|159632|2148|6|20|33832.60|0.10|0.03|A|F|1994-04-08|1994-03-06|1994-04-26|DELIVER IN PERSON|TRUCK|quickly regular requests. furiously 4455|69731|7250|1|20|34014.60|0.01|0.05|A|F|1994-01-31|1993-11-21|1994-03-02|DELIVER IN PERSON|MAIL| express packages. packages boost quickly 4455|152587|2588|2|47|77060.26|0.09|0.01|R|F|1994-01-01|1993-12-25|1994-01-05|COLLECT COD|FOB| requests. even, even accou 4455|122723|7748|3|34|59354.48|0.00|0.06|A|F|1993-10-24|1993-11-27|1993-11-04|TAKE BACK RETURN|AIR| slyly ironic requests. quickly even d 4480|107888|5419|1|30|56876.40|0.08|0.03|R|F|1994-07-29|1994-06-22|1994-08-01|NONE|FOB|ven braids us 4481|23918|6421|1|50|92095.50|0.02|0.06|N|O|1996-07-22|1996-05-13|1996-08-14|DELIVER IN PERSON|RAIL|ar packages. regula 4481|189040|1559|2|27|30484.08|0.02|0.03|N|O|1996-04-06|1996-05-17|1996-04-12|TAKE BACK RETURN|AIR|ackages haggle even, 4482|70869|870|1|32|58875.52|0.06|0.03|A|F|1995-05-16|1995-07-22|1995-06-07|NONE|RAIL| quickly pendin 4482|95182|201|2|32|37669.76|0.01|0.06|N|O|1995-08-16|1995-06-26|1995-09-10|DELIVER IN PERSON|AIR|eans wake according 4483|5133|5134|1|32|33220.16|0.07|0.07|R|F|1992-04-05|1992-05-25|1992-04-08|DELIVER IN PERSON|MAIL|ests haggle. slyl 4483|61832|4339|2|50|89691.50|0.01|0.06|A|F|1992-06-19|1992-05-12|1992-07-08|DELIVER IN PERSON|TRUCK|ag blithely even 4483|8334|3335|3|50|62116.50|0.00|0.04|R|F|1992-06-10|1992-04-18|1992-06-17|DELIVER IN PERSON|MAIL|ackages. furiously ironi 4484|94779|2307|1|4|7095.08|0.06|0.03|N|O|1997-04-09|1997-02-11|1997-04-12|TAKE BACK RETURN|TRUCK|packages de 4484|136812|6813|2|39|72103.59|0.05|0.02|N|O|1997-04-01|1997-01-26|1997-04-21|NONE|RAIL|onic accounts wake blithel 4484|190000|1|3|38|41420.00|0.06|0.07|N|O|1997-03-07|1997-01-31|1997-04-01|COLLECT COD|REG AIR|. even requests un 4484|121153|8690|4|41|48140.15|0.06|0.03|N|O|1997-01-25|1997-02-15|1997-01-29|TAKE BACK RETURN|REG AIR|ress accounts. ironic deposits unwind fur 4484|2585|2586|5|42|62478.36|0.03|0.07|N|O|1997-03-25|1997-02-21|1997-04-05|DELIVER IN PERSON|REG AIR|ding, pending requests wake. fluffily 4484|35516|5517|6|29|42093.79|0.09|0.06|N|O|1996-12-27|1997-03-10|1997-01-13|NONE|FOB| wake blithely ironic 4484|102267|7288|7|50|63463.00|0.07|0.01|N|O|1997-03-17|1997-03-16|1997-03-21|COLLECT COD|FOB|the ironic, final theodo 4485|190656|8214|1|1|1746.65|0.03|0.05|R|F|1994-12-04|1995-02-07|1994-12-09|NONE|AIR|play according to the ironic, ironic 4485|140072|7615|2|46|51155.22|0.04|0.06|R|F|1995-03-09|1994-12-14|1995-03-23|DELIVER IN PERSON|AIR|. ironic foxes haggle. regular war 4485|174651|4652|3|43|74202.95|0.01|0.05|R|F|1995-01-17|1995-02-11|1995-02-07|DELIVER IN PERSON|TRUCK|al accounts according to the slyly r 4485|143072|3073|4|43|47948.01|0.08|0.06|R|F|1995-01-28|1995-01-26|1995-02-07|DELIVER IN PERSON|AIR|. blithely 4485|5035|5036|5|47|44181.41|0.08|0.04|R|F|1995-03-11|1995-01-11|1995-03-21|TAKE BACK RETURN|RAIL|luffily pending acc 4486|134209|6723|1|46|57187.20|0.08|0.00|N|O|1998-05-02|1998-04-05|1998-05-08|COLLECT COD|MAIL|ackages. specia 4486|48748|3757|2|19|32238.06|0.10|0.01|N|O|1998-06-07|1998-05-28|1998-07-02|NONE|MAIL|pending foxes after 4486|95192|5193|3|47|55797.93|0.02|0.07|N|O|1998-04-09|1998-05-24|1998-05-07|DELIVER IN PERSON|MAIL|ts around the quiet packages ar 4486|90018|5037|4|28|28224.28|0.07|0.02|N|O|1998-04-21|1998-04-19|1998-04-26|TAKE BACK RETURN|AIR|to the furious, regular foxes play abov 4487|137105|9619|1|37|42257.70|0.03|0.07|R|F|1993-02-28|1993-04-18|1993-03-17|TAKE BACK RETURN|MAIL|bove the fu 4487|112846|7869|2|49|91083.16|0.10|0.00|R|F|1993-06-13|1993-05-08|1993-07-10|COLLECT COD|FOB|sual packages should ha 4487|189582|2101|3|1|1671.58|0.02|0.07|A|F|1993-05-11|1993-05-23|1993-05-17|TAKE BACK RETURN|FOB|ithely final asym 4487|92846|2847|4|25|45971.00|0.07|0.03|A|F|1993-03-09|1993-04-27|1993-03-30|COLLECT COD|RAIL|g the final instructions. slyly c 4512|161210|3727|1|30|38136.30|0.07|0.07|N|O|1996-01-28|1995-12-22|1996-02-22|TAKE BACK RETURN|TRUCK|ly unusual package 4512|40933|5942|2|24|44974.32|0.04|0.06|N|O|1995-12-16|1996-01-16|1995-12-25|NONE|SHIP|ly regular pinto beans. carefully bold depo 4512|144996|25|3|21|42860.79|0.00|0.00|N|O|1995-10-31|1995-12-30|1995-11-15|NONE|REG AIR|lly unusual pinto b 4512|140248|249|4|32|41223.68|0.10|0.01|N|O|1995-11-25|1995-12-28|1995-12-06|NONE|FOB|counts are against the quickly regular 4512|132998|8025|5|43|87332.57|0.06|0.00|N|O|1995-12-20|1995-11-28|1996-01-14|NONE|AIR|are carefully. theodolites wake 4513|169018|9019|1|29|31523.29|0.03|0.01|N|O|1996-05-18|1996-05-23|1996-06-08|NONE|REG AIR|cajole. regular packages boost. s 4513|69944|2451|2|39|74643.66|0.01|0.04|N|O|1996-06-25|1996-05-14|1996-07-24|NONE|MAIL|slyly furiously unusual deposits. blit 4513|137348|4888|3|34|47101.56|0.00|0.03|N|O|1996-03-27|1996-06-12|1996-04-06|DELIVER IN PERSON|SHIP|sits. quickly even instructions 4513|191361|8919|4|13|18880.68|0.08|0.08|N|O|1996-04-12|1996-05-19|1996-04-25|DELIVER IN PERSON|AIR|l, final excuses detect furi 4514|163673|1222|1|27|46890.09|0.06|0.06|R|F|1994-07-01|1994-07-13|1994-07-26|COLLECT COD|AIR| even, silent foxes be 4514|45129|7634|2|15|16111.80|0.10|0.04|R|F|1994-08-24|1994-07-11|1994-09-14|DELIVER IN PERSON|RAIL|! unusual, special deposits afte 4514|77425|9933|3|10|14024.20|0.09|0.05|A|F|1994-06-19|1994-06-25|1994-07-01|COLLECT COD|SHIP|ake furiously. carefully regular requests 4514|80738|3247|4|9|15468.57|0.10|0.03|A|F|1994-08-04|1994-07-01|1994-09-01|DELIVER IN PERSON|REG AIR|wly. quick 4514|148628|6171|5|12|20119.44|0.02|0.03|R|F|1994-08-20|1994-06-09|1994-09-15|TAKE BACK RETURN|FOB| carefully ironic foxes nag caref 4514|188706|8707|6|38|68198.60|0.03|0.05|A|F|1994-07-28|1994-07-06|1994-08-25|NONE|AIR|ending excuses. sl 4514|176429|6430|7|27|40646.34|0.04|0.06|A|F|1994-06-24|1994-07-14|1994-06-30|TAKE BACK RETURN|TRUCK|. slyly sile 4515|38003|8004|1|15|14115.00|0.06|0.01|R|F|1992-05-26|1992-05-25|1992-06-03|NONE|SHIP|posits wake 4515|102050|9581|2|50|52602.50|0.06|0.03|A|F|1992-03-28|1992-05-16|1992-04-20|NONE|AIR|ding instructions again 4515|153202|748|3|27|33890.40|0.09|0.01|A|F|1992-06-06|1992-06-08|1992-06-07|DELIVER IN PERSON|REG AIR| against the even re 4515|53814|3815|4|32|56569.92|0.06|0.03|R|F|1992-04-07|1992-05-11|1992-04-09|COLLECT COD|MAIL|carefully express depo 4515|44659|9668|5|22|35280.30|0.09|0.07|A|F|1992-07-16|1992-05-07|1992-07-23|NONE|SHIP|le quickly above the even, bold ideas. 4515|179908|7460|6|23|45721.70|0.04|0.00|R|F|1992-05-23|1992-06-15|1992-06-20|TAKE BACK RETURN|FOB|ns. bold r 4516|169570|2087|1|34|55745.38|0.05|0.04|A|F|1994-05-16|1994-06-23|1994-06-12|NONE|SHIP|even pinto beans wake qui 4517|42540|2541|1|50|74127.00|0.01|0.02|N|O|1998-06-08|1998-04-18|1998-06-20|DELIVER IN PERSON|MAIL|refully pending acco 4518|143741|8770|1|9|16062.66|0.09|0.04|N|O|1997-06-26|1997-07-07|1997-07-10|NONE|RAIL| pending deposits. slyly re 4518|44083|4084|2|19|19514.52|0.10|0.05|N|O|1997-08-09|1997-06-06|1997-08-27|COLLECT COD|RAIL|ter the slyly bo 4519|54680|7186|1|30|49040.40|0.09|0.07|R|F|1993-04-11|1993-06-05|1993-04-22|DELIVER IN PERSON|REG AIR|totes. slyly bold somas after the 4519|190062|2582|2|37|42626.22|0.06|0.08|R|F|1993-07-22|1993-06-16|1993-08-19|COLLECT COD|AIR|ly slyly furious depth 4544|130682|3196|1|40|68507.20|0.07|0.01|N|O|1997-08-15|1997-10-16|1997-08-20|DELIVER IN PERSON|RAIL| detect slyly. evenly pending instru 4544|171088|3606|2|19|22022.52|0.08|0.01|N|O|1997-08-14|1997-09-08|1997-08-25|NONE|SHIP|regular ideas are furiously about 4544|70629|8151|3|20|31992.40|0.02|0.07|N|O|1997-10-12|1997-10-11|1997-10-13|COLLECT COD|REG AIR| waters about the 4544|50571|5582|4|39|59341.23|0.07|0.05|N|O|1997-08-20|1997-09-07|1997-08-27|COLLECT COD|REG AIR|ular packages. s 4544|132280|7307|5|31|40680.68|0.09|0.03|N|O|1997-08-09|1997-09-29|1997-08-17|COLLECT COD|TRUCK|dolites detect quickly reg 4544|26100|6101|6|8|8208.80|0.10|0.03|N|O|1997-10-13|1997-10-06|1997-10-25|COLLECT COD|AIR|olites. fi 4545|172885|437|1|38|74399.44|0.06|0.06|R|F|1993-01-27|1993-03-01|1993-02-04|NONE|TRUCK|nts serve according to th 4545|62456|2457|2|27|38298.15|0.01|0.06|R|F|1993-02-07|1993-02-18|1993-02-18|NONE|FOB|ously bold asymptotes! blithely pen 4545|86195|3720|3|9|10630.71|0.10|0.06|R|F|1993-03-20|1993-02-23|1993-04-11|TAKE BACK RETURN|AIR|xpress accounts 4545|63981|1500|4|2|3889.96|0.10|0.00|R|F|1993-04-16|1993-04-17|1993-05-03|NONE|REG AIR|ages use. slyly even i 4545|116732|9244|5|27|47215.71|0.08|0.05|A|F|1993-03-18|1993-02-22|1993-03-23|NONE|RAIL|ccounts haggle carefully. deposits 4545|108682|1193|6|8|13525.44|0.03|0.02|A|F|1993-05-01|1993-03-12|1993-05-15|NONE|FOB| boost slyly. slyly 4545|8362|863|7|36|45732.96|0.10|0.04|R|F|1993-01-28|1993-03-30|1993-02-04|DELIVER IN PERSON|SHIP|sublate slyly. furiously ironic accounts b 4546|132467|7494|1|10|14994.60|0.09|0.02|N|O|1995-09-23|1995-10-10|1995-10-23|COLLECT COD|TRUCK|osits alongside of the 4546|170327|5362|2|15|20959.80|0.04|0.07|N|O|1995-07-31|1995-10-17|1995-08-06|NONE|REG AIR|ught to cajole furiously. qu 4546|76977|6978|3|4|7815.88|0.06|0.08|N|O|1995-08-14|1995-10-07|1995-08-16|COLLECT COD|MAIL|kly pending dependencies along the furio 4546|148745|1260|4|10|17937.40|0.08|0.02|N|O|1995-09-02|1995-09-16|1995-09-10|DELIVER IN PERSON|FOB|above the enticingly ironic dependencies 4547|187798|317|1|15|28286.85|0.10|0.04|A|F|1993-12-08|1993-11-15|1993-12-22|NONE|REG AIR|ets haggle. regular dinos affix fu 4547|115181|7693|2|7|8373.26|0.10|0.02|A|F|1993-09-04|1993-09-29|1993-09-20|COLLECT COD|RAIL|slyly express a 4547|44955|7460|3|15|28499.25|0.00|0.00|R|F|1993-11-18|1993-10-06|1993-12-13|NONE|TRUCK|e carefully across the unus 4547|147087|4630|4|15|17011.20|0.05|0.08|R|F|1993-11-29|1993-10-12|1993-12-29|COLLECT COD|REG AIR|ironic gifts integrate 4548|13272|5774|1|21|24890.67|0.10|0.05|N|O|1996-07-11|1996-09-04|1996-07-30|COLLECT COD|REG AIR|pecial theodoli 4548|46001|1010|2|17|16099.00|0.00|0.08|N|O|1996-07-23|1996-09-21|1996-07-26|DELIVER IN PERSON|REG AIR|y ironic requests above the fluffily d 4548|122710|7735|3|47|81437.37|0.05|0.04|N|O|1996-07-24|1996-09-12|1996-08-08|NONE|MAIL|ts. excuses use slyly spec 4548|176323|1358|4|22|30785.04|0.07|0.01|N|O|1996-07-06|1996-08-23|1996-07-15|DELIVER IN PERSON|RAIL|s. furiously ironic theodolites c 4548|44556|2069|5|36|54019.80|0.04|0.06|N|O|1996-08-19|1996-09-12|1996-09-08|COLLECT COD|FOB|tions integrat 4549|158023|5569|1|44|47564.88|0.08|0.00|N|O|1998-03-13|1998-04-15|1998-03-27|TAKE BACK RETURN|TRUCK|ding to the regular, silent requests 4549|88030|539|2|1|1018.03|0.05|0.08|N|O|1998-05-04|1998-04-11|1998-05-14|TAKE BACK RETURN|AIR| requests wake. furiously even 4550|149038|1553|1|9|9783.27|0.05|0.06|R|F|1995-04-19|1995-02-07|1995-04-24|COLLECT COD|SHIP|l dependencies boost slyly after th 4550|65547|8054|2|19|28738.26|0.06|0.04|A|F|1995-01-01|1995-02-13|1995-01-20|NONE|AIR|quests. express 4551|10447|7951|1|6|8144.64|0.08|0.08|N|O|1996-05-18|1996-04-23|1996-06-13|DELIVER IN PERSON|TRUCK|fily silent fo 4551|178094|3129|2|26|30474.34|0.02|0.04|N|O|1996-04-14|1996-04-26|1996-04-17|TAKE BACK RETURN|RAIL|le. carefully dogged accounts use furiousl 4551|21706|6711|3|22|35809.40|0.08|0.01|N|O|1996-05-12|1996-03-17|1996-05-29|TAKE BACK RETURN|REG AIR|ly ironic reques 4551|197665|185|4|27|47591.82|0.00|0.01|N|O|1996-04-28|1996-03-22|1996-05-22|TAKE BACK RETURN|RAIL|y along the slyly even 4576|89514|9515|1|5|7517.55|0.09|0.03|N|O|1996-08-23|1996-11-08|1996-09-20|TAKE BACK RETURN|AIR|ly express, special asymptote 4576|57907|7908|2|43|80190.70|0.08|0.06|N|O|1996-10-24|1996-09-23|1996-11-10|NONE|SHIP|ly final deposits. never 4576|41350|8863|3|14|18078.90|0.09|0.01|N|O|1996-09-12|1996-09-30|1996-09-24|COLLECT COD|MAIL|detect slyly. 4577|184089|6608|1|43|50442.44|0.01|0.03|N|O|1998-06-16|1998-07-09|1998-06-17|TAKE BACK RETURN|AIR|packages. 4577|176990|2025|2|43|88880.57|0.05|0.03|N|O|1998-08-24|1998-06-02|1998-09-14|TAKE BACK RETURN|RAIL|ly accounts. carefully 4577|68494|3507|3|12|17549.88|0.07|0.05|N|O|1998-07-29|1998-06-17|1998-08-04|DELIVER IN PERSON|TRUCK|equests alongsi 4578|73111|633|1|10|10841.10|0.09|0.06|R|F|1993-01-01|1992-11-19|1993-01-28|TAKE BACK RETURN|REG AIR|uests. blithely unus 4578|168393|8394|2|42|61378.38|0.06|0.00|R|F|1993-01-05|1992-11-06|1993-01-13|DELIVER IN PERSON|FOB|s are caref 4578|178494|3529|3|15|23587.35|0.01|0.01|R|F|1992-10-23|1992-11-22|1992-11-09|DELIVER IN PERSON|REG AIR|gular theodo 4578|138667|3694|4|7|11939.62|0.09|0.08|A|F|1992-12-07|1992-11-27|1993-01-05|TAKE BACK RETURN|SHIP|odolites. carefully unusual ideas accor 4578|162341|4858|5|20|28066.80|0.04|0.02|A|F|1993-01-11|1992-11-09|1993-01-23|TAKE BACK RETURN|RAIL|iously pending theodolites-- 4579|174085|9120|1|14|16227.12|0.02|0.02|N|O|1996-02-01|1996-01-08|1996-02-08|TAKE BACK RETURN|MAIL|nding theodolites. fluffil 4579|41886|1887|2|28|51180.64|0.02|0.05|N|O|1996-01-22|1996-02-13|1996-02-03|DELIVER IN PERSON|RAIL|slyly across the 4579|177457|7458|3|34|52171.30|0.05|0.02|N|O|1996-02-26|1996-02-22|1996-03-16|COLLECT COD|MAIL|hely. carefully blithe dependen 4579|119554|9555|4|8|12588.40|0.05|0.06|N|O|1995-12-16|1996-01-15|1995-12-18|TAKE BACK RETURN|AIR|posits. carefully perman 4580|91060|6079|1|22|23123.32|0.01|0.05|A|F|1994-01-16|1994-01-26|1994-02-05|COLLECT COD|AIR|nticingly final packag 4580|31880|6887|2|10|18118.80|0.05|0.04|R|F|1993-12-20|1993-12-30|1994-01-17|COLLECT COD|RAIL|gular, pending deposits. fina 4580|963|8464|3|41|76422.36|0.00|0.07|R|F|1993-12-13|1994-01-31|1994-01-06|NONE|SHIP|requests. quickly silent asymptotes sle 4580|177771|289|4|5|9243.85|0.07|0.00|A|F|1994-01-28|1993-12-17|1994-02-22|NONE|TRUCK|o beans. f 4580|188804|1323|5|39|73819.20|0.03|0.02|R|F|1993-12-28|1993-12-26|1994-01-23|NONE|RAIL|. fluffily final dolphins use furiously al 4581|164947|7464|1|37|74441.78|0.01|0.04|A|F|1992-10-17|1992-11-05|1992-11-04|DELIVER IN PERSON|MAIL|e the blithely bold pearls ha 4581|49672|4681|2|7|11351.69|0.01|0.02|A|F|1992-10-09|1992-10-20|1992-10-21|TAKE BACK RETURN|MAIL|express accounts d 4581|20508|5513|3|46|65711.00|0.04|0.04|A|F|1992-09-09|1992-11-27|1992-09-26|NONE|REG AIR|nag toward the carefully final accounts. 4582|191457|6496|1|17|26323.65|0.09|0.08|N|O|1996-08-17|1996-08-26|1996-08-20|COLLECT COD|REG AIR|ng packages. depo 4583|140441|442|1|17|25184.48|0.01|0.05|A|F|1994-11-08|1994-11-03|1994-11-29|COLLECT COD|MAIL|romise. reques 4583|186542|6543|2|43|70027.22|0.04|0.04|A|F|1994-10-30|1994-12-17|1994-11-16|COLLECT COD|RAIL|fully after the speci 4583|195513|3071|3|28|45038.28|0.00|0.07|A|F|1994-10-29|1994-11-21|1994-11-28|NONE|SHIP|to beans haggle sly 4583|172130|2131|4|27|32457.51|0.08|0.03|R|F|1995-01-11|1994-12-24|1995-02-10|DELIVER IN PERSON|TRUCK| detect silent requests. furiously speci 4583|183800|8837|5|36|67816.80|0.09|0.06|A|F|1995-01-06|1994-11-25|1995-01-29|DELIVER IN PERSON|RAIL|ar requests haggle after the furiously 4583|121705|4218|6|14|24173.80|0.09|0.01|R|F|1994-11-17|1994-11-08|1994-11-21|DELIVER IN PERSON|AIR|detect. doggedly regular pi 4583|86867|1884|7|32|59323.52|0.04|0.00|A|F|1995-01-13|1994-10-29|1995-02-08|TAKE BACK RETURN|RAIL|across the pinto beans-- quickly 4608|172217|9769|1|30|38676.30|0.08|0.05|R|F|1994-10-08|1994-07-18|1994-10-25|DELIVER IN PERSON|SHIP|s cajole. slyly 4608|46212|6213|2|50|57910.50|0.06|0.01|A|F|1994-07-25|1994-09-01|1994-08-10|NONE|FOB| theodolites 4608|78184|692|3|50|58109.00|0.03|0.01|A|F|1994-08-04|1994-09-10|1994-08-13|COLLECT COD|TRUCK| wake closely. even decoys haggle above 4608|30557|558|4|36|53551.80|0.05|0.06|R|F|1994-10-04|1994-08-02|1994-10-21|COLLECT COD|FOB|ages wake quickly slyly iron 4609|46409|3922|1|28|37951.20|0.10|0.05|N|O|1997-02-02|1997-02-17|1997-03-02|DELIVER IN PERSON|REG AIR|ously. quickly final requests cajole fl 4609|184318|6837|2|3|4206.93|0.09|0.03|N|O|1996-12-28|1997-02-06|1997-01-20|NONE|FOB|nstructions. furious instructions 4609|22399|2400|3|46|60783.94|0.05|0.05|N|O|1997-02-11|1997-01-16|1997-03-07|NONE|FOB|r foxes. fluffily ironic ideas ha 4610|86177|1194|1|21|24426.57|0.07|0.07|R|F|1993-08-10|1993-08-05|1993-08-27|NONE|REG AIR|ly special theodolites. even, 4610|174842|7360|2|14|26835.76|0.00|0.07|R|F|1993-07-28|1993-07-25|1993-07-31|TAKE BACK RETURN|SHIP| ironic frays. dependencies detect blithel 4610|158379|5925|3|44|63244.28|0.05|0.03|A|F|1993-08-05|1993-07-20|1993-08-19|COLLECT COD|TRUCK| final theodolites 4610|74538|2060|4|26|39325.78|0.06|0.03|R|F|1993-07-01|1993-07-19|1993-07-19|NONE|MAIL| to the fluffily ironic requests h 4610|146158|6159|5|29|34920.35|0.08|0.04|R|F|1993-08-09|1993-07-27|1993-08-16|DELIVER IN PERSON|AIR| foxes. special, express package 4611|51267|6278|1|47|57258.22|0.09|0.06|A|F|1993-03-05|1993-03-01|1993-03-17|COLLECT COD|TRUCK|iously. furiously regular 4611|34872|4873|2|31|56012.97|0.04|0.02|A|F|1993-01-28|1993-02-14|1993-01-29|TAKE BACK RETURN|AIR| final pinto beans. permanent, sp 4611|81649|1650|3|50|81532.00|0.08|0.01|R|F|1993-01-22|1993-03-30|1993-02-16|TAKE BACK RETURN|AIR|l platelets. 4611|70451|7973|4|48|68229.60|0.02|0.08|R|F|1993-02-28|1993-02-12|1993-03-01|COLLECT COD|AIR|ular accounts 4612|5989|8490|1|20|37899.60|0.02|0.03|R|F|1993-09-24|1993-12-18|1993-10-22|NONE|AIR|beans sleep blithely iro 4612|49750|2255|2|17|28895.75|0.10|0.06|A|F|1994-01-09|1993-11-08|1994-02-06|TAKE BACK RETURN|REG AIR|equests haggle carefully silent excus 4612|136371|1398|3|40|56294.80|0.08|0.01|R|F|1993-10-08|1993-11-23|1993-10-24|DELIVER IN PERSON|RAIL|special platelets. 4612|184664|7183|4|10|17486.60|0.10|0.06|A|F|1993-11-11|1993-11-19|1993-11-13|TAKE BACK RETURN|SHIP|unusual theodol 4613|37372|7373|1|17|22259.29|0.09|0.07|N|O|1998-06-07|1998-05-11|1998-06-29|DELIVER IN PERSON|SHIP|liers cajole a 4613|107883|394|2|25|47272.00|0.05|0.04|N|O|1998-05-22|1998-04-11|1998-05-27|TAKE BACK RETURN|SHIP|y pending platelets x-ray ironically! pend 4613|173667|8702|3|15|26109.90|0.10|0.02|N|O|1998-05-31|1998-04-16|1998-06-25|DELIVER IN PERSON|MAIL|against the quickly r 4613|7183|9684|4|36|39246.48|0.04|0.01|N|O|1998-04-22|1998-05-05|1998-05-04|DELIVER IN PERSON|AIR|gainst the furiously ironic 4613|110121|5144|5|35|39589.20|0.04|0.06|N|O|1998-06-04|1998-04-17|1998-06-20|COLLECT COD|MAIL|e blithely against the even, bold pi 4613|195878|8398|6|47|92771.89|0.04|0.04|N|O|1998-07-03|1998-05-26|1998-07-09|NONE|FOB|uriously special requests wak 4613|118713|1225|7|39|67536.69|0.09|0.05|N|O|1998-06-12|1998-06-01|1998-07-06|DELIVER IN PERSON|REG AIR|ously express 4614|6014|1015|1|19|17480.19|0.09|0.08|N|O|1996-05-17|1996-06-21|1996-06-08|TAKE BACK RETURN|AIR|ix. carefully regular 4614|64629|4630|2|3|4780.86|0.08|0.01|N|O|1996-07-22|1996-07-21|1996-08-07|NONE|MAIL|ions engage final, ironic 4614|7136|9637|3|36|37552.68|0.10|0.04|N|O|1996-07-05|1996-06-26|1996-07-07|NONE|REG AIR|onic foxes affix furi 4614|125036|2573|4|6|6366.18|0.09|0.01|N|O|1996-06-11|1996-05-30|1996-07-03|COLLECT COD|REG AIR|ake quickly quickly regular epitap 4614|72044|4552|5|24|24384.96|0.07|0.06|N|O|1996-07-01|1996-06-24|1996-07-08|COLLECT COD|REG AIR|regular, even 4614|33149|3150|6|32|34628.48|0.10|0.05|N|O|1996-08-21|1996-05-28|1996-09-16|NONE|REG AIR|ickly furio 4614|127060|4597|7|41|44569.46|0.01|0.07|N|O|1996-07-31|1996-07-12|1996-08-16|COLLECT COD|REG AIR|ackages haggle carefully about the even, b 4615|91296|3806|1|10|12872.90|0.02|0.08|A|F|1993-11-20|1993-10-05|1993-12-08|DELIVER IN PERSON|AIR|sits. slyly express deposits are 4640|87422|4947|1|5|7047.10|0.03|0.08|N|O|1996-02-05|1996-02-14|1996-02-15|TAKE BACK RETURN|RAIL| warthogs against the regular 4640|87287|4812|2|9|11468.52|0.03|0.05|N|O|1996-02-12|1996-02-14|1996-02-29|DELIVER IN PERSON|AIR| accounts. unu 4640|26265|3772|3|18|21442.68|0.02|0.07|N|O|1996-02-28|1996-03-06|1996-03-28|DELIVER IN PERSON|RAIL|boost furiously accord 4640|22346|7351|4|36|45660.24|0.06|0.08|N|O|1996-01-03|1996-03-09|1996-01-11|DELIVER IN PERSON|RAIL|iously furious accounts boost. carefully 4640|155974|1005|5|15|30449.55|0.03|0.02|N|O|1996-03-19|1996-02-09|1996-04-11|TAKE BACK RETURN|FOB|y regular instructions doze furiously. reg 4641|189796|7351|1|45|84860.55|0.07|0.03|R|F|1993-05-11|1993-04-19|1993-05-21|DELIVER IN PERSON|MAIL| about the close 4641|94261|6771|2|39|48955.14|0.06|0.00|R|F|1993-02-10|1993-03-06|1993-02-15|TAKE BACK RETURN|REG AIR| the bold reque 4641|35287|5288|3|15|18334.20|0.01|0.08|R|F|1993-01-25|1993-04-09|1993-02-05|TAKE BACK RETURN|AIR|s. carefully even exc 4642|193181|8220|1|11|14015.98|0.04|0.07|A|F|1995-05-23|1995-04-26|1995-06-04|COLLECT COD|TRUCK|lithely express asympt 4642|179594|2112|2|34|56902.06|0.04|0.07|R|F|1995-04-01|1995-05-11|1995-04-23|COLLECT COD|SHIP|theodolites detect among the ironically sp 4642|20152|153|3|10|10721.50|0.04|0.02|R|F|1995-04-16|1995-04-28|1995-04-24|COLLECT COD|RAIL|urts. even deposits nag beneath 4642|93698|8717|4|18|30450.42|0.00|0.04|N|F|1995-06-16|1995-04-16|1995-06-21|NONE|TRUCK|ily pending accounts hag 4642|178937|8938|5|41|82653.13|0.10|0.00|R|F|1995-04-08|1995-04-13|1995-05-01|DELIVER IN PERSON|MAIL|s are blithely. requests wake above the fur 4643|184931|2486|1|50|100796.50|0.08|0.05|N|O|1995-09-11|1995-08-13|1995-09-30|DELIVER IN PERSON|SHIP|. ironic deposits cajo 4644|176873|9391|1|4|7799.48|0.06|0.03|N|O|1998-05-06|1998-03-19|1998-05-28|NONE|MAIL|gular requests? pendi 4644|96503|6504|2|16|23992.00|0.03|0.04|N|O|1998-03-13|1998-02-21|1998-04-03|COLLECT COD|SHIP|lar excuses across the 4644|114307|6819|3|10|13213.00|0.02|0.02|N|O|1998-02-21|1998-02-28|1998-03-19|COLLECT COD|REG AIR|osits according to the 4644|153953|6469|4|45|90312.75|0.10|0.07|N|O|1998-02-02|1998-04-08|1998-02-15|COLLECT COD|SHIP| carefully a 4644|86667|4192|5|10|16536.60|0.08|0.08|N|O|1998-03-12|1998-03-11|1998-03-19|TAKE BACK RETURN|REG AIR| the slow, final fo 4645|49353|1858|1|45|58605.75|0.09|0.05|A|F|1994-12-27|1994-11-02|1994-12-31|DELIVER IN PERSON|AIR|ular ideas. slyly 4645|65060|5061|2|32|32801.92|0.10|0.08|A|F|1994-11-17|1994-10-30|1994-11-18|COLLECT COD|REG AIR| final accounts alongside 4645|53599|3600|3|25|38814.75|0.03|0.00|R|F|1994-10-25|1994-12-11|1994-11-14|NONE|REG AIR|braids. ironic dependencies main 4645|36614|1621|4|42|65125.62|0.10|0.02|R|F|1994-12-02|1994-12-18|1994-12-16|COLLECT COD|TRUCK|regular pinto beans amon 4645|160043|2560|5|35|38606.40|0.03|0.07|A|F|1994-12-08|1994-11-25|1994-12-09|TAKE BACK RETURN|FOB|sias believe bl 4645|41437|3942|6|27|37217.61|0.09|0.08|R|F|1994-11-26|1994-10-25|1994-12-04|NONE|SHIP|ously express pinto beans. ironic depos 4645|30142|143|7|42|45029.88|0.10|0.06|A|F|1994-12-31|1994-10-22|1995-01-28|DELIVER IN PERSON|AIR|e slyly regular pinto beans. thin 4646|190170|2690|1|24|30244.08|0.02|0.05|N|O|1996-09-18|1996-08-09|1996-09-21|TAKE BACK RETURN|RAIL|ic platelets lose carefully. blithely unu 4646|177583|5135|2|26|43175.08|0.07|0.00|N|O|1996-10-02|1996-08-25|1996-10-27|DELIVER IN PERSON|MAIL|ix according to the slyly spe 4646|33120|630|3|18|18956.16|0.01|0.00|N|O|1996-06-30|1996-08-10|1996-07-12|TAKE BACK RETURN|TRUCK|beans sleep car 4646|39105|9106|4|38|39675.80|0.08|0.01|N|O|1996-09-01|1996-08-23|1996-09-27|COLLECT COD|SHIP|al platelets cajole. slyly final dol 4646|25896|8399|5|22|40081.58|0.01|0.08|N|O|1996-07-14|1996-08-06|1996-07-29|DELIVER IN PERSON|MAIL|cies are blithely after the slyly reg 4647|92862|7881|1|16|29677.76|0.09|0.07|R|F|1994-09-07|1994-07-15|1994-10-06|COLLECT COD|RAIL|o beans about the fluffily special the 4647|128884|6421|2|34|65037.92|0.01|0.02|R|F|1994-05-20|1994-06-20|1994-05-29|COLLECT COD|TRUCK|ly sly accounts 4647|146614|6615|3|27|44836.47|0.03|0.08|R|F|1994-05-20|1994-06-26|1994-05-30|NONE|FOB|ully even ti 4647|138275|8276|4|2|2626.54|0.04|0.07|R|F|1994-07-03|1994-07-22|1994-07-22|TAKE BACK RETURN|RAIL|dolites wake furiously special pinto be 4647|186829|6830|5|2|3831.64|0.07|0.06|A|F|1994-05-27|1994-08-05|1994-06-10|TAKE BACK RETURN|FOB| pinto beans believe furiously slyly silent 4647|28488|991|6|28|39661.44|0.02|0.03|A|F|1994-08-25|1994-08-06|1994-09-18|DELIVER IN PERSON|FOB| are above the fluffily fin 4672|58147|653|1|22|24313.08|0.01|0.07|N|O|1995-12-03|1995-12-08|1995-12-17|COLLECT COD|AIR|l instructions. blithely ironic packages 4672|60961|3468|2|41|78800.36|0.00|0.00|N|O|1995-12-01|1995-12-15|1995-12-12|COLLECT COD|RAIL| slyly quie 4672|162043|7076|3|24|26520.96|0.04|0.03|N|O|1995-11-11|1995-12-28|1995-12-04|NONE|REG AIR|y fluffily stealt 4672|56171|1182|4|13|14653.21|0.10|0.03|N|O|1996-02-02|1995-12-13|1996-03-02|DELIVER IN PERSON|RAIL|ar requests? pending accounts against 4672|54022|9033|5|45|43920.90|0.08|0.07|N|O|1996-02-07|1996-01-16|1996-02-14|DELIVER IN PERSON|MAIL| platelets use amon 4672|140922|3437|6|20|39258.40|0.02|0.07|N|O|1995-12-08|1996-01-25|1995-12-19|COLLECT COD|REG AIR|s boost at the ca 4672|71853|9375|7|38|69344.30|0.01|0.01|N|O|1995-11-28|1995-12-08|1995-12-13|COLLECT COD|SHIP|ests. idle, regular ex 4673|16607|6608|1|8|12188.80|0.08|0.01|N|O|1996-10-12|1996-10-05|1996-11-04|TAKE BACK RETURN|FOB|lithely final re 4673|100894|895|2|44|83375.16|0.06|0.01|N|O|1996-12-11|1996-10-31|1997-01-08|DELIVER IN PERSON|RAIL| gifts cajole dari 4673|122258|7283|3|9|11522.25|0.04|0.07|N|O|1996-10-15|1996-09-30|1996-10-30|DELIVER IN PERSON|MAIL|ages nag across 4674|149045|1560|1|50|54702.00|0.07|0.08|A|F|1994-05-13|1994-06-15|1994-06-05|COLLECT COD|RAIL|haggle about the blithel 4674|188714|1233|2|35|63094.85|0.02|0.05|A|F|1994-08-02|1994-06-04|1994-08-21|COLLECT COD|FOB|le quickly after the express sent 4674|110443|2955|3|3|4360.32|0.01|0.05|A|F|1994-07-19|1994-05-28|1994-07-23|TAKE BACK RETURN|RAIL| regular requests na 4674|12164|4666|4|21|22599.36|0.02|0.08|R|F|1994-05-08|1994-07-02|1994-06-04|COLLECT COD|RAIL|ent accounts sublate deposits. instruc 4675|170390|391|1|6|8762.34|0.00|0.05|R|F|1994-01-22|1994-01-06|1994-02-12|TAKE BACK RETURN|TRUCK| unusual ideas thrash bl 4675|143689|8718|2|12|20792.16|0.00|0.04|A|F|1993-12-22|1994-01-12|1993-12-23|TAKE BACK RETURN|AIR|posits affix carefully 4675|180963|964|3|5|10219.80|0.05|0.05|A|F|1994-01-16|1994-01-05|1994-01-18|DELIVER IN PERSON|RAIL|lent pinto beans 4675|33646|1156|4|26|41070.64|0.03|0.01|A|F|1993-12-16|1993-12-29|1993-12-23|DELIVER IN PERSON|SHIP|nts. express requests are quickly 4675|80498|499|5|18|26612.82|0.01|0.08|R|F|1994-02-23|1994-01-18|1994-03-05|TAKE BACK RETURN|FOB|cajole unusual dep 4675|118089|8090|6|1|1107.08|0.10|0.06|R|F|1994-03-18|1994-02-14|1994-04-17|NONE|SHIP|unts. caref 4676|164646|9679|1|47|80400.08|0.03|0.06|N|O|1995-12-20|1995-10-04|1996-01-09|NONE|AIR|lithely about the carefully special requ 4676|5043|44|2|33|31285.32|0.08|0.05|N|O|1995-12-29|1995-10-01|1996-01-18|TAKE BACK RETURN|FOB|yly express 4676|145173|7688|3|4|4872.68|0.10|0.06|N|O|1995-12-12|1995-10-22|1995-12-13|TAKE BACK RETURN|TRUCK|detect above the ironic platelets. fluffily 4676|110831|832|4|50|92091.50|0.07|0.01|N|O|1995-09-20|1995-11-20|1995-10-18|TAKE BACK RETURN|AIR|r deposits boost boldly quickly quick asymp 4676|121096|3609|5|29|32395.61|0.01|0.02|N|O|1995-12-29|1995-11-12|1996-01-06|TAKE BACK RETURN|RAIL|ly regular theodolites sleep. 4676|45569|5570|6|8|12116.48|0.08|0.08|N|O|1995-12-05|1995-10-18|1996-01-02|COLLECT COD|AIR|cuses boost above 4676|63244|8257|7|13|15694.12|0.05|0.07|N|O|1995-11-18|1995-11-07|1995-12-10|TAKE BACK RETURN|TRUCK| at the slyly bold attainments. silently e 4677|127864|377|1|25|47296.50|0.04|0.04|N|O|1998-04-11|1998-05-11|1998-04-18|TAKE BACK RETURN|REG AIR|unts doubt furiousl 4678|57388|9894|1|35|47088.30|0.04|0.08|N|O|1998-11-27|1998-10-02|1998-12-17|TAKE BACK RETURN|AIR|he accounts. fluffily bold sheaves b 4678|116672|9184|2|18|30396.06|0.03|0.06|N|O|1998-10-30|1998-09-22|1998-11-25|TAKE BACK RETURN|SHIP|usly ironic 4678|95355|374|3|13|17554.55|0.10|0.07|N|O|1998-11-03|1998-10-17|1998-11-06|TAKE BACK RETURN|SHIP|its. carefully final fr 4678|21704|6709|4|23|37391.10|0.06|0.05|N|O|1998-09-03|1998-09-20|1998-09-04|DELIVER IN PERSON|SHIP|ily sly deposi 4678|177771|7772|5|40|73950.80|0.03|0.07|N|O|1998-11-11|1998-10-27|1998-11-24|TAKE BACK RETURN|AIR|. final, unusual requests sleep thinl 4679|189136|4173|1|7|8575.91|0.10|0.05|R|F|1993-05-11|1993-04-11|1993-05-16|NONE|TRUCK|kages. bold, regular packa 4704|77934|5456|1|14|26767.02|0.04|0.04|N|O|1996-10-27|1996-11-02|1996-11-07|DELIVER IN PERSON|TRUCK| above the slyly final requests. quickly 4704|27671|174|2|7|11190.69|0.03|0.04|N|O|1996-12-04|1996-10-30|1996-12-23|DELIVER IN PERSON|SHIP|ers wake car 4704|63081|3082|3|44|45939.52|0.02|0.05|N|O|1996-09-02|1996-10-07|1996-09-17|DELIVER IN PERSON|REG AIR|out the care 4705|110778|5801|1|22|39352.94|0.04|0.04|R|F|1992-07-05|1992-05-11|1992-07-29|DELIVER IN PERSON|SHIP| fluffily pending accounts ca 4705|30527|3031|2|14|20405.28|0.00|0.08|R|F|1992-07-14|1992-05-23|1992-07-25|DELIVER IN PERSON|TRUCK|ain carefully amon 4705|55426|437|3|16|22102.72|0.07|0.08|R|F|1992-07-02|1992-06-06|1992-07-06|DELIVER IN PERSON|RAIL|special ideas nag sl 4705|129147|6684|4|31|36460.34|0.03|0.03|R|F|1992-04-03|1992-05-30|1992-04-05|COLLECT COD|TRUCK|furiously final accou 4705|162432|7465|5|28|41844.04|0.10|0.01|A|F|1992-06-03|1992-06-07|1992-06-22|DELIVER IN PERSON|MAIL|tes wake according to the unusual plate 4705|183715|6234|6|23|41370.33|0.06|0.03|R|F|1992-06-22|1992-06-11|1992-07-18|DELIVER IN PERSON|MAIL| above the furiously ev 4705|88534|3551|7|40|60901.20|0.08|0.06|A|F|1992-04-19|1992-04-28|1992-05-07|COLLECT COD|TRUCK|blithely. sly 4706|181872|4391|1|37|72293.19|0.02|0.06|A|F|1993-02-20|1993-03-05|1993-03-03|DELIVER IN PERSON|TRUCK|kly final deposits c 4706|121133|1134|2|23|26544.99|0.03|0.01|A|F|1993-04-01|1993-03-13|1993-05-01|COLLECT COD|FOB|deas across t 4706|67134|2147|3|6|6606.78|0.01|0.04|R|F|1993-01-20|1993-03-18|1993-01-26|NONE|MAIL|efully eve 4706|115455|7967|4|5|7352.25|0.06|0.06|R|F|1993-02-14|1993-01-31|1993-02-26|NONE|REG AIR|ptotes haggle ca 4706|49189|1694|5|27|30730.86|0.06|0.08|A|F|1993-04-04|1993-03-11|1993-04-09|COLLECT COD|REG AIR|into beans. finally special instruct 4707|33590|8597|1|7|10665.13|0.02|0.05|R|F|1995-05-14|1995-04-06|1995-06-06|COLLECT COD|SHIP|ecial sheaves boost blithely accor 4707|135550|577|2|49|77691.95|0.00|0.07|N|F|1995-06-17|1995-05-16|1995-06-25|COLLECT COD|FOB| alongside of the slyly ironic instructio 4708|190913|5952|1|18|36070.38|0.02|0.04|A|F|1994-11-11|1994-11-15|1994-11-26|NONE|REG AIR|special, eve 4708|74046|1568|2|5|5100.20|0.05|0.05|A|F|1994-10-15|1994-12-02|1994-11-12|COLLECT COD|MAIL|ely. carefully sp 4708|76824|9332|3|32|57626.24|0.04|0.07|A|F|1994-11-12|1994-11-14|1994-11-23|TAKE BACK RETURN|MAIL|the accounts. e 4709|24334|4335|1|25|31458.25|0.03|0.05|N|O|1996-02-21|1996-02-11|1996-03-17|DELIVER IN PERSON|AIR|deposits grow. fluffily unusual accounts 4709|176003|3555|2|25|26975.00|0.05|0.03|N|O|1996-01-22|1996-03-03|1996-02-21|DELIVER IN PERSON|REG AIR|inst the ironic, regul 4710|182634|189|1|40|68665.20|0.10|0.08|A|F|1995-03-09|1995-02-25|1995-03-29|TAKE BACK RETURN|AIR|cross the blithely bold packages. silen 4710|127253|9766|2|47|60171.75|0.04|0.01|R|F|1995-02-22|1995-01-12|1995-02-28|NONE|RAIL|blithely express packages. even, ironic re 4711|132347|2348|1|7|9655.38|0.03|0.01|N|O|1998-05-12|1998-06-24|1998-05-24|COLLECT COD|MAIL|ly. bold accounts use fluff 4711|144056|4057|2|15|16500.75|0.08|0.07|N|O|1998-06-09|1998-07-30|1998-06-18|COLLECT COD|SHIP| beans wake. deposits could bo 4711|149014|9015|3|22|23386.22|0.02|0.03|N|O|1998-06-21|1998-06-18|1998-07-19|TAKE BACK RETURN|REG AIR|along the quickly careful packages. bli 4711|64012|1531|4|8|7808.08|0.07|0.00|N|O|1998-06-17|1998-06-13|1998-06-27|TAKE BACK RETURN|SHIP|g to the carefully ironic deposits. specia 4711|48820|3829|5|15|26532.30|0.05|0.01|N|O|1998-09-03|1998-07-15|1998-09-13|TAKE BACK RETURN|SHIP|ld requests: furiously final inst 4711|115669|5670|6|45|75809.70|0.05|0.06|N|O|1998-05-19|1998-07-14|1998-05-21|COLLECT COD|SHIP| ironic theodolites 4711|45160|2673|7|18|19892.88|0.05|0.04|N|O|1998-07-03|1998-07-31|1998-07-23|DELIVER IN PERSON|RAIL| blithely. bold asymptote 4736|195526|3084|1|26|42159.52|0.03|0.03|N|O|1996-02-02|1996-01-18|1996-02-09|DELIVER IN PERSON|AIR|efully speci 4736|3390|891|2|43|55615.77|0.06|0.07|N|O|1996-02-05|1995-12-21|1996-02-06|COLLECT COD|MAIL|quests. carefully 4737|190140|7698|1|37|45515.18|0.03|0.04|R|F|1993-05-17|1993-04-10|1993-05-30|DELIVER IN PERSON|TRUCK|s. fluffily regular 4737|68303|810|2|22|27968.60|0.04|0.04|A|F|1993-03-29|1993-05-22|1993-04-16|TAKE BACK RETURN|RAIL| hang fluffily around t 4738|186778|4333|1|9|16782.93|0.04|0.04|A|F|1992-06-01|1992-06-26|1992-06-02|COLLECT COD|TRUCK|posits serve slyly. unusual pint 4738|172482|5000|2|16|24871.68|0.07|0.08|A|F|1992-06-17|1992-06-20|1992-06-21|NONE|MAIL|nic deposits are slyly! carefu 4738|99175|1685|3|50|58708.50|0.04|0.02|A|F|1992-06-18|1992-07-04|1992-07-07|TAKE BACK RETURN|TRUCK|the blithely ironic braids sleep slyly 4738|28274|777|4|22|26449.94|0.02|0.08|A|F|1992-05-25|1992-05-19|1992-06-12|COLLECT COD|SHIP|ld, even packages. furio 4738|186242|6243|5|13|17267.12|0.04|0.05|R|F|1992-05-30|1992-06-11|1992-06-26|COLLECT COD|AIR| wake. unusual platelets for the 4738|158558|6104|6|10|16165.50|0.10|0.01|R|F|1992-07-10|1992-06-16|1992-07-25|TAKE BACK RETURN|SHIP|hins above the 4738|82750|275|7|28|48517.00|0.05|0.07|A|F|1992-06-09|1992-07-05|1992-06-25|NONE|AIR|e furiously ironic excuses. care 4739|167941|7942|1|8|16071.52|0.07|0.07|R|F|1993-06-22|1993-05-10|1993-07-11|TAKE BACK RETURN|SHIP|cording to the 4739|184061|6580|2|31|35496.86|0.09|0.06|R|F|1993-06-20|1993-05-18|1993-06-26|COLLECT COD|SHIP|blithely special pin 4739|99183|6711|3|30|35465.40|0.09|0.00|A|F|1993-05-29|1993-04-12|1993-06-18|NONE|TRUCK|ly even packages use across th 4740|2282|2283|1|22|26054.16|0.06|0.01|N|O|1996-10-04|1996-08-17|1996-10-05|TAKE BACK RETURN|RAIL|final dependencies nag 4740|152247|9793|2|24|31181.76|0.08|0.02|N|O|1996-09-10|1996-09-27|1996-10-07|TAKE BACK RETURN|TRUCK|hely regular deposits 4741|72763|7778|1|24|41658.24|0.00|0.01|A|F|1992-09-16|1992-09-19|1992-09-20|DELIVER IN PERSON|RAIL|deas boost furiously slyly regular id 4741|112165|2166|2|16|18834.56|0.01|0.07|R|F|1992-08-25|1992-08-10|1992-08-29|TAKE BACK RETURN|FOB|final foxes haggle r 4741|155605|3151|3|24|39854.40|0.05|0.08|A|F|1992-11-04|1992-08-14|1992-11-06|TAKE BACK RETURN|MAIL|even requests. 4741|50954|8470|4|39|74293.05|0.09|0.06|R|F|1992-10-28|1992-10-03|1992-11-11|COLLECT COD|SHIP|t, regular requests 4741|178507|8508|5|40|63420.00|0.09|0.03|R|F|1992-09-20|1992-09-23|1992-10-09|TAKE BACK RETURN|REG AIR| fluffily slow deposits. fluffily regu 4741|156686|9202|6|34|59251.12|0.02|0.07|R|F|1992-08-25|1992-08-18|1992-09-20|DELIVER IN PERSON|RAIL|sly special packages after the furiously 4742|155564|8080|1|32|51825.92|0.10|0.08|R|F|1995-04-04|1995-06-12|1995-04-19|COLLECT COD|RAIL|eposits boost blithely. carefully regular a 4742|154833|2379|2|29|54747.07|0.02|0.03|N|F|1995-06-15|1995-05-05|1995-06-24|COLLECT COD|REG AIR|integrate closely among t 4742|71018|8540|3|15|14835.15|0.06|0.04|N|O|1995-07-20|1995-05-26|1995-08-11|NONE|SHIP|terns are sl 4742|187940|5495|4|31|62866.14|0.05|0.08|N|F|1995-06-13|1995-05-08|1995-06-24|COLLECT COD|REG AIR|ke slyly among the furiousl 4742|99331|9332|5|45|59864.85|0.05|0.00|R|F|1995-05-12|1995-05-14|1995-06-07|TAKE BACK RETURN|RAIL|ke carefully. do 4743|59402|4413|1|19|25866.60|0.04|0.07|A|F|1993-06-23|1993-05-03|1993-07-20|COLLECT COD|AIR|hely even accounts 4743|158379|3410|2|3|4312.11|0.01|0.03|R|F|1993-04-14|1993-06-08|1993-05-09|NONE|TRUCK|al requests. express idea 4743|72936|7951|3|21|40087.53|0.08|0.03|A|F|1993-07-02|1993-06-15|1993-07-26|DELIVER IN PERSON|RAIL|ake blithely against the packages. reg 4743|33527|3528|4|27|39434.04|0.08|0.05|R|F|1993-07-26|1993-05-27|1993-08-24|DELIVER IN PERSON|AIR|aids use. express deposits 4768|35822|829|1|5|8789.10|0.00|0.03|R|F|1993-12-27|1994-02-09|1994-01-11|NONE|MAIL|egular accounts. bravely final fra 4769|34240|1750|1|16|18787.84|0.08|0.05|N|O|1995-07-16|1995-07-05|1995-07-22|TAKE BACK RETURN|FOB| deposits. slyly even asymptote 4769|62918|437|2|34|63950.94|0.06|0.07|N|O|1995-07-26|1995-05-18|1995-08-03|COLLECT COD|REG AIR|ven instructions. ca 4769|46414|1423|3|36|48974.76|0.10|0.03|N|O|1995-07-22|1995-06-16|1995-08-11|NONE|RAIL|. slyly even deposit 4769|68431|8432|4|45|62974.35|0.08|0.06|R|F|1995-06-01|1995-07-13|1995-06-04|TAKE BACK RETURN|RAIL|accounts are. even accounts sleep 4769|111623|4135|5|15|24519.30|0.07|0.08|N|F|1995-06-12|1995-07-07|1995-07-04|NONE|SHIP|egular platelets can cajole across the 4770|31961|9471|1|41|77611.36|0.00|0.08|N|O|1995-09-04|1995-08-08|1995-09-10|COLLECT COD|FOB|ithely even packages sleep caref 4770|156036|8552|2|30|32760.90|0.09|0.07|N|O|1995-08-25|1995-08-27|1995-09-07|COLLECT COD|SHIP|ffily carefully ironic ideas. ironic d 4771|48612|8613|1|9|14045.49|0.01|0.00|R|F|1993-02-28|1993-02-19|1993-03-25|NONE|FOB|riously after the packages. fina 4771|15316|5317|2|21|25857.51|0.09|0.01|R|F|1993-01-19|1993-02-10|1993-02-01|NONE|FOB|fluffily pendi 4771|11568|1569|3|5|7397.80|0.06|0.08|R|F|1993-01-07|1993-01-19|1993-01-26|NONE|RAIL|ar, quiet accounts nag furiously express id 4771|8448|3449|4|21|28485.24|0.05|0.04|A|F|1992-12-20|1993-01-22|1992-12-26|TAKE BACK RETURN|SHIP| carefully re 4772|86830|6831|1|1|1816.83|0.10|0.00|R|F|1994-11-13|1994-10-25|1994-11-15|DELIVER IN PERSON|AIR|ans. slyly even acc 4772|145505|534|2|16|24808.00|0.07|0.06|R|F|1994-10-27|1994-12-07|1994-10-29|TAKE BACK RETURN|MAIL|egular accounts wake s 4772|94751|4752|3|31|54118.25|0.02|0.04|A|F|1994-10-02|1994-10-21|1994-10-13|TAKE BACK RETURN|FOB|ests are thinly. furiously unusua 4772|70799|5814|4|15|26546.85|0.02|0.07|R|F|1994-09-19|1994-10-22|1994-09-26|COLLECT COD|TRUCK| requests. express, regular th 4773|143131|3132|1|23|27004.99|0.00|0.08|N|O|1996-01-01|1996-03-19|1996-01-04|NONE|FOB|ly express grouches wak 4773|196476|8996|2|36|56608.92|0.09|0.04|N|O|1996-04-08|1996-03-03|1996-05-01|COLLECT COD|REG AIR| dependencies. quickly 4773|166028|6029|3|49|53606.98|0.05|0.02|N|O|1996-01-26|1996-02-29|1996-01-27|TAKE BACK RETURN|FOB|y final reque 4773|19370|6874|4|49|63179.13|0.09|0.04|N|O|1996-01-12|1996-02-17|1996-02-05|TAKE BACK RETURN|TRUCK|ly pending theodolites cajole caref 4773|149461|4490|5|20|30209.20|0.02|0.07|N|O|1995-12-28|1996-02-17|1996-01-15|COLLECT COD|TRUCK| blithely final deposits nag after t 4773|189276|1795|6|11|15017.97|0.10|0.06|N|O|1996-01-02|1996-01-29|1996-01-24|DELIVER IN PERSON|REG AIR|en accounts. slyly b 4773|157333|2364|7|6|8341.98|0.07|0.01|N|O|1996-03-09|1996-03-18|1996-03-27|NONE|AIR|latelets haggle s 4774|83182|5691|1|45|52433.10|0.10|0.00|R|F|1993-07-07|1993-06-08|1993-07-31|COLLECT COD|TRUCK| haggle busily afte 4774|38828|1332|2|4|7067.28|0.02|0.03|A|F|1993-08-03|1993-05-30|1993-08-19|COLLECT COD|FOB|xes according to the foxes wake above the f 4774|172460|2461|3|47|72025.62|0.10|0.08|R|F|1993-06-13|1993-07-04|1993-07-09|TAKE BACK RETURN|FOB|regular dolphins above the furi 4774|129151|6688|4|30|35404.50|0.05|0.08|A|F|1993-08-18|1993-06-08|1993-08-21|DELIVER IN PERSON|REG AIR|tions against the blithely final theodolit 4775|73979|6487|1|1|1952.97|0.10|0.02|N|O|1995-09-06|1995-09-28|1995-09-29|DELIVER IN PERSON|MAIL|furiously ironic theodolite 4775|152779|5295|2|37|67775.49|0.02|0.01|N|O|1995-09-06|1995-09-28|1995-09-28|COLLECT COD|TRUCK|ts. pinto beans use according to th 4775|152242|9788|3|34|44004.16|0.09|0.06|N|O|1995-09-14|1995-10-15|1995-09-21|DELIVER IN PERSON|MAIL|onic epitaphs. f 4775|118461|5995|4|39|57698.94|0.07|0.04|N|O|1995-08-30|1995-10-12|1995-09-20|NONE|AIR|eep never with the slyly regular acc 4800|96498|1517|1|11|16439.39|0.03|0.03|R|F|1992-01-27|1992-03-16|1992-02-19|TAKE BACK RETURN|RAIL|ic dependenc 4800|25491|496|2|1|1416.49|0.06|0.06|A|F|1992-02-23|1992-03-16|1992-03-20|TAKE BACK RETURN|MAIL|nal accounts are blithely deposits. bol 4800|10518|5521|3|21|29998.71|0.09|0.05|A|F|1992-02-14|1992-03-15|1992-02-26|NONE|SHIP|ithely according to 4800|175228|5229|4|38|49522.36|0.10|0.08|R|F|1992-02-01|1992-02-28|1992-02-21|NONE|TRUCK|s sleep fluffily. furiou 4800|52240|2241|5|24|28613.76|0.08|0.04|R|F|1992-01-14|1992-02-23|1992-01-25|NONE|TRUCK|ully carefully r 4801|183937|1492|1|37|74774.41|0.10|0.02|N|O|1996-03-09|1996-02-29|1996-03-25|TAKE BACK RETURN|FOB|uests hinder blithely against the instr 4801|25246|7749|2|34|39822.16|0.03|0.02|N|O|1996-02-05|1996-04-16|1996-02-23|NONE|SHIP|y final requests 4801|109282|9283|3|4|5165.12|0.04|0.04|N|O|1996-03-23|1996-04-04|1996-03-25|COLLECT COD|RAIL|pitaphs. regular, reg 4801|91183|1184|4|39|45793.02|0.07|0.01|N|O|1996-03-19|1996-03-21|1996-04-17|TAKE BACK RETURN|REG AIR|warhorses wake never for the care 4802|39444|9445|1|6|8300.64|0.00|0.06|N|O|1997-04-16|1997-03-25|1997-04-21|TAKE BACK RETURN|SHIP|unusual accounts wake blithely. b 4803|131052|6079|1|2|2166.10|0.08|0.03|N|O|1996-04-16|1996-03-20|1996-05-15|NONE|REG AIR|gular reque 4803|175165|2717|2|47|58287.52|0.10|0.00|N|O|1996-03-14|1996-03-30|1996-03-15|DELIVER IN PERSON|FOB|ly final excuses. slyly express requ 4803|195742|8262|3|42|77185.08|0.04|0.08|N|O|1996-04-27|1996-05-05|1996-05-17|NONE|TRUCK| accounts affix quickly ar 4803|21312|6317|4|24|29599.44|0.10|0.04|N|O|1996-02-24|1996-04-02|1996-02-28|NONE|MAIL|t blithely slyly special decoys. 4803|188148|5703|5|21|25958.94|0.03|0.06|N|O|1996-05-25|1996-03-15|1996-06-09|COLLECT COD|FOB| silent packages use. b 4803|193599|3600|6|19|32159.21|0.07|0.00|N|O|1996-04-20|1996-03-25|1996-04-27|TAKE BACK RETURN|RAIL|sts. enticing, even 4804|127009|4546|1|44|45584.00|0.06|0.08|A|F|1992-05-02|1992-03-24|1992-05-28|TAKE BACK RETURN|AIR|aggle quickly among the slyly fi 4804|34942|9949|2|41|76954.54|0.10|0.02|R|F|1992-04-06|1992-04-12|1992-05-03|COLLECT COD|MAIL|. deposits haggle express tithes? 4804|64829|9842|3|33|59196.06|0.09|0.05|A|F|1992-03-02|1992-04-14|1992-03-13|DELIVER IN PERSON|AIR|, thin excuses. 4805|149235|9236|1|7|8989.61|0.09|0.03|A|F|1992-05-01|1992-07-09|1992-05-09|NONE|FOB| requests. regular deposit 4805|188967|1486|2|45|92518.20|0.02|0.03|R|F|1992-06-16|1992-06-08|1992-07-03|NONE|TRUCK|the furiously sly t 4805|153857|1403|3|44|84077.40|0.01|0.02|R|F|1992-05-14|1992-06-23|1992-05-25|DELIVER IN PERSON|SHIP|eposits sleep furiously qui 4805|64284|9297|4|13|16227.64|0.04|0.04|R|F|1992-07-16|1992-06-07|1992-08-10|COLLECT COD|AIR|its serve about the accounts. slyly regu 4805|8529|8530|5|42|60375.84|0.03|0.03|R|F|1992-08-17|1992-07-03|1992-09-14|NONE|REG AIR|the regular, fina 4805|135959|986|6|18|35909.10|0.06|0.04|A|F|1992-06-07|1992-07-10|1992-06-12|COLLECT COD|TRUCK|o use pending, unusu 4806|15046|5047|1|26|24987.04|0.10|0.05|R|F|1993-05-28|1993-06-07|1993-05-29|DELIVER IN PERSON|SHIP| bold pearls sublate blithely. quickly pe 4806|71151|8673|2|6|6732.90|0.01|0.06|A|F|1993-05-17|1993-07-19|1993-05-29|TAKE BACK RETURN|SHIP|even theodolites. packages sl 4806|28806|1309|3|8|13878.40|0.09|0.00|A|F|1993-05-08|1993-07-16|1993-05-28|NONE|TRUCK|requests boost blithely. qui 4807|121903|6928|1|9|17324.10|0.04|0.08|N|O|1997-04-23|1997-03-01|1997-05-15|TAKE BACK RETURN|TRUCK|may are blithely. carefully even pinto b 4807|9767|9768|2|41|68747.16|0.07|0.08|N|O|1997-05-02|1997-03-31|1997-05-15|TAKE BACK RETURN|AIR| fluffily re 4807|144309|4310|3|34|46012.20|0.06|0.02|N|O|1997-01-31|1997-03-13|1997-02-01|NONE|SHIP|ecial ideas. deposits according to the fin 4807|189336|4373|4|32|45610.56|0.05|0.00|N|O|1997-04-04|1997-03-21|1997-04-16|NONE|RAIL|efully even dolphins slee 4807|158706|6252|5|2|3529.40|0.02|0.05|N|O|1997-05-09|1997-04-03|1997-06-05|TAKE BACK RETURN|RAIL|deas wake bli 4807|159022|9023|6|22|23782.44|0.09|0.06|N|O|1997-03-13|1997-02-23|1997-04-01|NONE|FOB|es use final excuses. furiously final 4832|14730|4731|1|23|37828.79|0.03|0.01|N|O|1997-12-05|1998-01-05|1997-12-10|NONE|RAIL|y express depo 4832|151303|8849|2|10|13543.00|0.00|0.06|N|O|1998-01-08|1998-02-01|1998-01-11|DELIVER IN PERSON|MAIL|ly. blithely bold pinto beans should have 4832|148228|743|3|4|5104.88|0.04|0.01|N|O|1998-01-16|1998-02-12|1998-02-08|TAKE BACK RETURN|AIR|ages. slyly express deposits cajole car 4832|63875|3876|4|6|11033.22|0.02|0.01|N|O|1997-12-08|1998-02-03|1997-12-10|COLLECT COD|TRUCK|ages cajole after the bold requests. furi 4832|137354|4894|5|43|59828.05|0.10|0.08|N|O|1997-12-31|1998-02-20|1998-01-26|COLLECT COD|RAIL|oze according to the accou 4833|106154|8665|1|31|35964.65|0.08|0.04|N|O|1996-06-24|1996-07-15|1996-07-02|NONE|SHIP|ven instructions cajole against the caref 4833|116084|3618|2|11|12100.88|0.03|0.01|N|O|1996-08-24|1996-07-26|1996-09-19|NONE|REG AIR|s nag above the busily sile 4833|17043|7044|3|26|24961.04|0.08|0.04|N|O|1996-05-13|1996-07-12|1996-05-31|NONE|SHIP|s packages. even gif 4833|35345|352|4|19|24326.46|0.07|0.07|N|O|1996-08-21|1996-07-09|1996-09-10|TAKE BACK RETURN|AIR|y quick theodolit 4833|34266|1776|5|4|4801.04|0.10|0.02|N|O|1996-08-16|1996-06-29|1996-08-22|NONE|AIR|y pending packages sleep blithely regular r 4834|182667|222|1|27|47240.82|0.06|0.02|N|O|1997-01-09|1996-10-27|1997-01-27|DELIVER IN PERSON|RAIL|es nag blithe 4834|70960|3468|2|26|50204.96|0.01|0.00|N|O|1996-10-04|1996-10-21|1996-10-10|DELIVER IN PERSON|TRUCK|ages dazzle carefully. slyly daring foxes 4834|22952|7957|3|34|63748.30|0.03|0.01|N|O|1996-12-09|1996-11-26|1996-12-10|NONE|MAIL|ounts haggle bo 4834|142668|5183|4|38|65005.08|0.03|0.06|N|O|1997-01-10|1996-12-06|1997-01-22|COLLECT COD|FOB|alongside of the carefully even plate 4835|178289|8290|1|18|24611.04|0.00|0.03|R|F|1995-02-17|1994-12-14|1995-03-17|DELIVER IN PERSON|MAIL|eat furiously against the slyly 4835|90575|3085|2|3|4696.71|0.09|0.06|R|F|1995-01-24|1995-01-12|1995-02-16|COLLECT COD|AIR|etimes final pac 4835|85483|5484|3|27|39648.96|0.05|0.00|A|F|1994-12-10|1994-12-13|1995-01-02|DELIVER IN PERSON|REG AIR| accounts after the car 4835|101125|6146|4|23|25900.76|0.08|0.07|A|F|1995-02-05|1995-01-04|1995-02-28|NONE|SHIP|e carefully regular foxes. deposits are sly 4836|161531|4048|1|22|35035.66|0.01|0.03|N|O|1997-03-03|1997-02-23|1997-03-04|NONE|SHIP|al pinto beans. care 4836|47331|9836|2|16|20453.28|0.07|0.08|N|O|1997-01-14|1997-03-05|1997-01-30|COLLECT COD|MAIL|gular packages against the express reque 4836|75666|3188|3|14|22983.24|0.03|0.08|N|O|1997-02-21|1997-02-06|1997-03-08|COLLECT COD|MAIL|lites. unusual, bold dolphins ar 4836|105341|362|4|15|20195.10|0.10|0.00|N|O|1997-03-08|1997-03-14|1997-03-30|TAKE BACK RETURN|TRUCK|eep slyly. even requests cajole 4836|50120|5131|5|12|12841.44|0.01|0.04|N|O|1997-02-02|1997-02-10|1997-02-03|COLLECT COD|TRUCK|sly ironic accoun 4837|41768|9281|1|16|27356.16|0.09|0.04|N|O|1998-08-12|1998-06-06|1998-08-26|COLLECT COD|FOB|ing requests are blithely regular instructi 4837|192758|5278|2|16|29612.00|0.01|0.02|N|O|1998-08-19|1998-06-18|1998-08-26|NONE|RAIL|counts cajole slyly furiou 4837|67706|2719|3|42|70295.40|0.10|0.00|N|O|1998-06-19|1998-07-06|1998-06-23|COLLECT COD|MAIL|o the furiously final theodolites boost 4838|121792|1793|1|35|63482.65|0.01|0.00|R|F|1992-10-30|1992-10-23|1992-11-21|TAKE BACK RETURN|RAIL|ly blithely unusual foxes. even package 4838|147002|9517|2|2|2098.00|0.03|0.08|R|F|1992-08-11|1992-09-16|1992-08-26|COLLECT COD|MAIL|hely final notornis are furiously blithe 4838|51200|1201|3|26|29931.20|0.06|0.04|R|F|1992-09-03|1992-10-25|1992-09-11|TAKE BACK RETURN|FOB|ular requests boost about the packages. r 4839|59762|7278|1|5|8608.80|0.10|0.07|A|F|1994-09-07|1994-07-15|1994-10-05|DELIVER IN PERSON|FOB|ses integrate. regular deposits are about 4839|9631|9632|2|25|38515.75|0.02|0.02|R|F|1994-05-20|1994-07-08|1994-05-30|NONE|REG AIR|regular packages ab 4839|59660|9661|3|18|29153.88|0.06|0.01|R|F|1994-05-18|1994-06-13|1994-06-09|TAKE BACK RETURN|FOB|blithely ironic theodolites use along 4839|99142|9143|4|19|21681.66|0.07|0.08|R|F|1994-05-20|1994-07-14|1994-05-30|NONE|REG AIR| deposits sublate furiously ir 4839|70988|6003|5|9|17630.82|0.05|0.01|R|F|1994-06-17|1994-06-18|1994-07-10|NONE|SHIP|ounts haggle carefully above 4864|149726|7269|1|28|49720.16|0.06|0.08|A|F|1993-02-06|1992-12-15|1993-02-10|COLLECT COD|REG AIR|thely around the bli 4864|37481|9985|2|38|53902.24|0.10|0.02|R|F|1992-12-20|1993-01-07|1993-01-06|TAKE BACK RETURN|SHIP|ording to the ironic, ir 4864|132447|7474|3|45|66574.80|0.02|0.01|A|F|1992-11-17|1993-01-02|1992-11-26|COLLECT COD|SHIP|round the furiously careful pa 4864|30832|5839|4|46|81090.18|0.07|0.03|A|F|1993-02-24|1993-01-02|1993-03-17|TAKE BACK RETURN|RAIL|sts use carefully across the carefull 4865|161406|8955|1|16|23478.40|0.07|0.05|N|O|1997-10-02|1997-08-20|1997-10-04|COLLECT COD|TRUCK|osits haggle. fur 4865|136210|6211|2|4|4984.84|0.07|0.01|N|O|1997-07-24|1997-07-25|1997-08-07|TAKE BACK RETURN|FOB|sts. blithely special instruction 4865|67618|5137|3|44|69766.84|0.10|0.08|N|O|1997-07-25|1997-08-20|1997-08-22|COLLECT COD|FOB|even deposits sleep against the quickly r 4865|49947|4956|4|21|39835.74|0.04|0.02|N|O|1997-07-17|1997-08-10|1997-07-21|NONE|RAIL|eposits detect sly 4865|53744|8755|5|33|56025.42|0.00|0.05|N|O|1997-07-17|1997-08-16|1997-07-30|TAKE BACK RETURN|FOB|y pending notornis ab 4865|64770|9783|6|47|81534.19|0.00|0.05|N|O|1997-08-26|1997-08-07|1997-08-31|NONE|RAIL|y unusual packages. packages 4866|10585|5588|1|9|13460.22|0.01|0.05|N|O|1997-08-30|1997-09-18|1997-09-24|TAKE BACK RETURN|MAIL|ven dependencies x-ray. quic 4866|101252|1253|2|1|1253.25|0.06|0.00|N|O|1997-10-15|1997-10-01|1997-11-14|TAKE BACK RETURN|AIR|latelets nag. q 4866|130432|2946|3|17|24861.31|0.07|0.00|N|O|1997-11-26|1997-10-11|1997-12-12|COLLECT COD|TRUCK|ess packages doubt. even somas wake f 4867|81666|1667|1|7|11533.62|0.09|0.03|A|F|1992-07-17|1992-08-17|1992-07-22|COLLECT COD|FOB|e carefully even packages. slyly ironic i 4867|159641|2157|2|3|5101.92|0.04|0.08|R|F|1992-07-04|1992-07-15|1992-07-21|NONE|AIR|yly silent deposits 4868|72576|5084|1|47|72782.79|0.03|0.03|N|O|1997-04-29|1997-04-27|1997-05-11|DELIVER IN PERSON|SHIP|gle unusual, fluffy packages. foxes cajol 4868|179044|9045|2|8|8984.32|0.10|0.08|N|O|1997-03-26|1997-05-09|1997-04-16|NONE|RAIL|ly special th 4868|190723|724|3|49|88872.28|0.09|0.03|N|O|1997-04-23|1997-05-07|1997-04-26|NONE|SHIP|ys engage. th 4868|79046|9047|4|34|34851.36|0.04|0.02|N|O|1997-05-19|1997-04-27|1997-06-15|NONE|RAIL|en instructions about th 4868|121574|1575|5|22|35102.54|0.07|0.06|N|O|1997-04-26|1997-05-16|1997-05-01|DELIVER IN PERSON|FOB|osits. final foxes boost regular, 4869|40808|3313|1|31|54212.80|0.10|0.01|A|F|1995-01-17|1994-11-30|1995-02-02|NONE|SHIP|ins. always unusual ideas across the ir 4869|57367|2378|2|24|31784.64|0.09|0.06|A|F|1994-11-17|1994-11-07|1994-11-27|COLLECT COD|MAIL|olites cajole after the ideas. special t 4869|156025|6026|3|25|27025.50|0.00|0.05|R|F|1994-11-25|1994-11-14|1994-12-19|DELIVER IN PERSON|AIR|e according t 4869|102539|7560|4|24|36996.72|0.10|0.07|R|F|1994-11-23|1994-11-18|1994-12-11|DELIVER IN PERSON|MAIL|se deposits above the sly, q 4869|172852|7887|5|42|80843.70|0.07|0.04|R|F|1994-10-16|1994-12-10|1994-11-07|TAKE BACK RETURN|REG AIR| slyly even instructions. 4869|121042|8579|6|30|31891.20|0.00|0.05|A|F|1995-01-09|1994-11-20|1995-02-02|COLLECT COD|RAIL|gedly even requests. s 4870|47881|386|1|49|89615.12|0.05|0.05|R|F|1994-11-14|1994-10-24|1994-12-12|TAKE BACK RETURN|SHIP| regular packages 4870|126176|3713|2|6|7213.02|0.06|0.08|A|F|1994-09-09|1994-10-16|1994-09-21|DELIVER IN PERSON|TRUCK|ress requests. bold, silent pinto bea 4870|30179|5186|3|5|5545.85|0.05|0.00|R|F|1994-10-11|1994-10-07|1994-10-24|NONE|AIR|s haggle furiously. slyly ironic dinos 4870|5978|8479|4|4|7535.88|0.03|0.08|A|F|1994-10-23|1994-09-16|1994-11-04|COLLECT COD|RAIL|its wake quickly. slyly quick 4870|70598|3106|5|36|56469.24|0.09|0.06|A|F|1994-09-06|1994-09-17|1994-10-01|COLLECT COD|REG AIR| instructions. carefully pending pac 4871|176651|4203|1|14|24187.10|0.07|0.03|N|O|1995-09-30|1995-07-29|1995-10-18|TAKE BACK RETURN|REG AIR|inst the never ironic 4871|160509|8058|2|17|26681.50|0.07|0.03|N|O|1995-09-09|1995-09-01|1995-10-02|DELIVER IN PERSON|AIR|es. carefully ev 4871|62067|2068|3|3|3087.18|0.03|0.06|N|O|1995-10-03|1995-08-10|1995-10-06|DELIVER IN PERSON|TRUCK|y special packages wak 4871|148471|6014|4|35|53181.45|0.08|0.07|N|O|1995-08-11|1995-07-18|1995-08-29|DELIVER IN PERSON|TRUCK|ackages sle 4871|151422|1423|5|10|14734.20|0.09|0.02|N|O|1995-09-12|1995-09-02|1995-10-05|TAKE BACK RETURN|AIR|s integrate after the a 4871|135500|3040|6|36|55278.00|0.02|0.08|N|O|1995-09-18|1995-08-29|1995-10-05|TAKE BACK RETURN|AIR|ely according 4871|139040|6580|7|10|10790.40|0.10|0.02|N|O|1995-07-13|1995-08-19|1995-07-29|NONE|REG AIR|p ironic theodolites. slyly even platel 4896|40766|767|1|19|32428.44|0.09|0.05|A|F|1992-12-13|1992-11-13|1993-01-09|NONE|AIR|nusual requ 4896|139589|4616|2|44|71657.52|0.04|0.03|A|F|1992-11-24|1992-11-15|1992-12-18|COLLECT COD|MAIL|e after the slowly f 4896|57169|4685|3|6|6756.96|0.04|0.04|A|F|1992-10-30|1992-11-12|1992-11-28|DELIVER IN PERSON|TRUCK|usly regular deposits 4896|22746|2747|4|5|8343.70|0.08|0.02|R|F|1992-12-02|1992-11-11|1992-12-19|COLLECT COD|SHIP|eposits hang carefully. sly 4896|85033|2558|5|21|21378.63|0.07|0.08|R|F|1992-11-18|1992-11-18|1992-11-29|DELIVER IN PERSON|TRUCK|ly express deposits. carefully pending depo 4897|54403|4404|1|26|35292.40|0.01|0.01|R|F|1992-12-22|1992-10-25|1992-12-27|DELIVER IN PERSON|TRUCK|. carefully ironic dep 4897|142171|7200|2|34|41247.78|0.02|0.00|R|F|1992-12-31|1992-11-11|1993-01-30|COLLECT COD|AIR|ts. special dependencies use fluffily 4897|54770|2286|3|42|72440.34|0.09|0.03|A|F|1992-09-23|1992-10-28|1992-10-02|DELIVER IN PERSON|FOB|sts. blithely regular deposits will have 4897|103352|3353|4|19|25751.65|0.03|0.00|A|F|1992-11-08|1992-12-14|1992-12-03|DELIVER IN PERSON|FOB|! ironic, pending dependencies doze furiou 4898|71592|6607|1|44|68797.96|0.07|0.02|A|F|1994-09-13|1994-08-18|1994-09-16|NONE|FOB|y regular grouches about 4899|33087|597|1|14|14281.12|0.06|0.00|R|F|1993-11-10|1994-01-10|1993-11-20|NONE|REG AIR| foxes eat 4900|115397|420|1|40|56495.60|0.10|0.03|A|F|1992-09-02|1992-09-25|1992-09-21|COLLECT COD|TRUCK|heodolites. request 4900|76356|6357|2|33|43967.55|0.06|0.06|R|F|1992-08-18|1992-09-20|1992-08-19|COLLECT COD|MAIL|nto beans nag slyly reg 4900|102019|7040|3|48|49008.48|0.02|0.00|R|F|1992-09-18|1992-08-14|1992-09-28|TAKE BACK RETURN|MAIL|uickly ironic ideas kindle s 4900|31089|1090|4|20|20401.60|0.05|0.00|R|F|1992-09-22|1992-09-23|1992-09-27|TAKE BACK RETURN|MAIL|yers. accounts affix somet 4900|104891|7402|5|40|75835.60|0.03|0.02|R|F|1992-07-14|1992-09-05|1992-07-20|NONE|REG AIR|luffily final dol 4900|102405|4916|6|46|64740.40|0.06|0.08|R|F|1992-07-11|1992-09-19|1992-07-16|TAKE BACK RETURN|SHIP|ly final acco 4901|140928|8471|1|37|72850.04|0.00|0.04|N|O|1998-01-26|1998-02-20|1998-01-31|DELIVER IN PERSON|TRUCK| furiously ev 4901|164332|6849|2|12|16755.96|0.00|0.04|N|O|1998-01-12|1998-02-06|1998-02-03|COLLECT COD|REG AIR|y unusual deposits prom 4901|119727|2239|3|16|27947.52|0.05|0.08|N|O|1998-04-19|1998-03-18|1998-04-21|NONE|AIR|deposits. blithely fin 4901|35623|5624|4|41|63903.42|0.03|0.00|N|O|1998-03-18|1998-02-18|1998-04-14|TAKE BACK RETURN|AIR|efully bold packages affix carefully eve 4901|115677|5678|5|40|67706.80|0.06|0.02|N|O|1998-01-08|1998-01-30|1998-01-15|DELIVER IN PERSON|MAIL|ect across the furiou 4902|195198|2756|1|22|28450.18|0.00|0.04|N|O|1998-10-17|1998-08-10|1998-10-21|COLLECT COD|RAIL|r the furiously final fox 4902|82370|7387|2|1|1352.37|0.09|0.04|N|O|1998-10-12|1998-08-20|1998-11-08|NONE|RAIL|daring foxes? even, bold requests wake f 4903|120758|759|1|1|1778.75|0.06|0.03|R|F|1992-04-23|1992-06-13|1992-05-03|NONE|SHIP|nusual requests 4903|164392|4393|2|6|8738.34|0.09|0.07|R|F|1992-04-01|1992-05-16|1992-04-11|DELIVER IN PERSON|SHIP|azzle quickly along the blithely final pla 4903|119755|7289|3|27|47918.25|0.07|0.06|A|F|1992-06-29|1992-06-09|1992-07-08|COLLECT COD|RAIL|pinto beans are; 4928|99357|9358|1|4|5425.40|0.04|0.02|R|F|1993-10-25|1993-12-24|1993-11-16|TAKE BACK RETURN|REG AIR|bout the slyly final accounts. carefull 4928|92887|2888|2|20|37597.60|0.03|0.08|A|F|1994-01-19|1993-11-29|1994-02-13|DELIVER IN PERSON|SHIP|quiet theodolites ca 4928|148249|5792|3|34|44106.16|0.06|0.05|A|F|1993-10-12|1993-12-31|1993-10-14|DELIVER IN PERSON|AIR|, regular depos 4929|13595|8598|1|20|30171.80|0.00|0.04|N|O|1996-03-12|1996-05-23|1996-03-20|COLLECT COD|REG AIR| final pinto beans detect. final, 4929|78044|5566|2|40|40881.60|0.08|0.03|N|O|1996-05-30|1996-04-13|1996-06-22|TAKE BACK RETURN|AIR|unts against 4929|76404|8912|3|32|44172.80|0.08|0.02|N|O|1996-04-28|1996-05-23|1996-04-30|COLLECT COD|TRUCK|usly at the blithely pending pl 4929|108935|3956|4|26|50542.18|0.00|0.05|N|O|1996-06-10|1996-05-29|1996-06-26|DELIVER IN PERSON|RAIL| slyly. fl 4929|66430|6431|5|24|33514.32|0.09|0.05|N|O|1996-04-15|1996-04-30|1996-05-09|NONE|MAIL| accounts boost 4930|186572|6573|1|35|58049.95|0.03|0.01|A|F|1994-07-09|1994-07-30|1994-07-15|NONE|RAIL|lose slyly regular dependencies. fur 4930|114748|2282|2|20|35254.80|0.02|0.04|A|F|1994-08-21|1994-06-17|1994-08-24|COLLECT COD|FOB|he carefully 4930|167762|279|3|28|51233.28|0.00|0.08|R|F|1994-08-27|1994-06-27|1994-09-18|COLLECT COD|TRUCK|e ironic, unusual courts. regula 4930|165538|5539|4|42|67348.26|0.00|0.00|A|F|1994-06-18|1994-06-22|1994-07-10|COLLECT COD|AIR|ions haggle. furiously regular ideas use 4930|189517|7072|5|38|61047.38|0.02|0.03|A|F|1994-06-06|1994-06-18|1994-07-03|TAKE BACK RETURN|AIR|bold requests sleep never 4931|193048|8087|1|1|1141.04|0.08|0.06|A|F|1995-01-24|1994-12-19|1995-02-07|DELIVER IN PERSON|SHIP| furiously 4931|150506|8052|2|8|12452.00|0.06|0.02|R|F|1994-12-15|1995-01-14|1995-01-06|NONE|SHIP|ts boost. packages wake sly 4931|143024|3025|3|20|21340.40|0.09|0.00|A|F|1995-01-25|1994-12-21|1995-02-06|DELIVER IN PERSON|MAIL|the furious 4931|199283|6841|4|50|69114.00|0.04|0.01|A|F|1994-12-15|1994-12-18|1994-12-23|COLLECT COD|REG AIR|s haggle al 4931|149123|1638|5|25|29303.00|0.05|0.05|R|F|1994-12-19|1995-01-05|1994-12-21|COLLECT COD|FOB|aggle bravely according to the quic 4931|102419|4930|6|8|11371.28|0.02|0.03|A|F|1995-02-16|1994-12-30|1995-03-15|DELIVER IN PERSON|SHIP|dependencies are slyly 4932|50248|7764|1|13|15577.12|0.04|0.03|A|F|1993-09-13|1993-10-16|1993-09-20|DELIVER IN PERSON|SHIP|slyly according to the furiously fin 4932|102499|30|2|15|22522.35|0.01|0.02|R|F|1993-11-15|1993-10-25|1993-11-29|NONE|REG AIR|yly. unusu 4932|86827|9336|3|5|9069.10|0.06|0.06|A|F|1993-10-01|1993-09-13|1993-10-04|NONE|MAIL| haggle furiously. slyly ironic packages sl 4932|97299|2318|4|11|14259.19|0.09|0.06|A|F|1993-09-21|1993-09-30|1993-09-23|COLLECT COD|SHIP|as. special depende 4933|31326|8836|1|48|60351.36|0.08|0.00|N|O|1995-10-10|1995-10-03|1995-11-04|COLLECT COD|SHIP|ideas. sly 4933|81917|6934|2|2|3797.82|0.09|0.00|N|O|1995-10-01|1995-09-29|1995-10-19|DELIVER IN PERSON|MAIL|ctions nag final instructions. accou 4934|96074|1093|1|48|51363.36|0.00|0.01|N|O|1997-05-20|1997-04-22|1997-06-02|TAKE BACK RETURN|SHIP| ideas cajol 4934|109380|9381|2|41|56964.58|0.06|0.06|N|O|1997-06-04|1997-04-11|1997-06-25|TAKE BACK RETURN|FOB|wake final, ironic f 4934|139725|9726|3|8|14117.76|0.03|0.06|N|O|1997-05-20|1997-04-30|1997-05-27|TAKE BACK RETURN|MAIL|arefully express pains cajo 4934|147233|9748|4|9|11522.07|0.06|0.08|N|O|1997-06-10|1997-04-09|1997-06-12|TAKE BACK RETURN|REG AIR| haggle alongside of the 4934|137089|7090|5|29|32656.32|0.09|0.03|N|O|1997-04-10|1997-05-05|1997-05-04|DELIVER IN PERSON|AIR|aggle furiously among the busily final re 4934|51634|1635|6|42|66596.46|0.00|0.07|N|O|1997-03-19|1997-05-05|1997-03-25|NONE|MAIL|ven, ironic ideas 4934|10672|3174|7|2|3165.34|0.10|0.06|N|O|1997-06-05|1997-03-26|1997-06-09|COLLECT COD|MAIL|ongside of the brave, regula 4935|160733|734|1|13|23318.49|0.09|0.01|A|F|1993-06-20|1993-08-13|1993-06-27|COLLECT COD|REG AIR|ly requests. final deposits might 4935|39163|6673|2|37|40779.92|0.01|0.05|R|F|1993-08-30|1993-07-23|1993-09-07|TAKE BACK RETURN|RAIL|y even dependencies nag a 4935|10868|5871|3|24|42692.64|0.06|0.04|A|F|1993-05-29|1993-08-17|1993-06-22|NONE|RAIL|ly quickly s 4935|44935|4936|4|49|92116.57|0.06|0.01|A|F|1993-09-16|1993-08-21|1993-10-12|COLLECT COD|TRUCK|ffily after the furiou 4935|9508|9509|5|14|19845.00|0.08|0.08|A|F|1993-05-30|1993-07-25|1993-05-31|COLLECT COD|FOB|slowly. blith 4935|187915|2952|6|36|72104.76|0.10|0.00|R|F|1993-07-11|1993-07-04|1993-08-01|DELIVER IN PERSON|RAIL|requests across the quick 4960|17091|2094|1|36|36291.24|0.01|0.05|R|F|1995-03-06|1995-05-04|1995-04-05|TAKE BACK RETURN|RAIL|c, unusual accou 4960|44360|9369|2|6|7826.16|0.03|0.08|R|F|1995-03-21|1995-05-13|1995-04-14|TAKE BACK RETURN|SHIP|ual package 4960|148041|5584|3|9|9801.36|0.01|0.03|A|F|1995-03-20|1995-05-05|1995-04-17|COLLECT COD|RAIL|e blithely carefully fina 4960|119052|4075|4|14|14994.70|0.00|0.06|A|F|1995-04-03|1995-04-17|1995-04-07|NONE|RAIL|accounts. warhorses are. grouches 4960|97171|2190|5|8|9345.36|0.07|0.04|R|F|1995-03-14|1995-04-18|1995-04-09|NONE|FOB|as. busily regular packages nag. 4960|145105|5106|6|37|42553.70|0.10|0.04|R|F|1995-05-23|1995-04-12|1995-06-01|DELIVER IN PERSON|MAIL|ending theodolites w 4960|169635|9636|7|42|71594.46|0.08|0.07|A|F|1995-04-19|1995-04-11|1995-05-08|NONE|SHIP|s requests cajole. 4961|43748|8757|1|38|64286.12|0.10|0.07|N|O|1998-07-09|1998-06-03|1998-07-11|TAKE BACK RETURN|FOB|e on the blithely bold accounts. unu 4961|59997|5008|2|1|1956.99|0.08|0.08|N|O|1998-07-08|1998-05-25|1998-07-12|DELIVER IN PERSON|MAIL|s affix carefully silent dependen 4961|161177|1178|3|41|50764.97|0.02|0.02|N|O|1998-07-15|1998-06-15|1998-08-05|TAKE BACK RETURN|REG AIR|ily against the n 4961|99354|4373|4|10|13533.50|0.02|0.04|N|O|1998-04-15|1998-07-03|1998-04-18|DELIVER IN PERSON|MAIL|quests. regular, ironic ideas at the ironi 4962|18377|3380|1|46|59587.02|0.01|0.07|R|F|1993-08-23|1993-09-04|1993-08-27|COLLECT COD|REG AIR| pinto beans grow about the sl 4963|167840|2873|1|38|72497.92|0.08|0.02|N|O|1996-12-25|1996-12-12|1997-01-02|COLLECT COD|AIR|tegrate daringly accou 4963|75326|2848|2|16|20821.12|0.00|0.03|N|O|1996-11-20|1997-01-13|1996-12-06|COLLECT COD|MAIL| carefully slyly u 4964|132406|4920|1|29|41713.60|0.04|0.01|N|O|1997-10-18|1997-08-30|1997-11-01|NONE|AIR|k accounts nag carefully-- ironic, fin 4964|147067|9582|2|46|51246.76|0.06|0.06|N|O|1997-10-05|1997-09-12|1997-10-11|NONE|TRUCK|althy deposits 4964|142559|2560|3|18|28827.90|0.00|0.06|N|O|1997-10-13|1997-09-01|1997-11-10|DELIVER IN PERSON|AIR| platelets. furio 4964|179773|2291|4|12|22233.24|0.08|0.01|N|O|1997-09-03|1997-10-25|1997-09-15|NONE|TRUCK|ully silent instructions ca 4964|40697|8210|5|42|68782.98|0.06|0.04|N|O|1997-09-04|1997-08-28|1997-10-02|TAKE BACK RETURN|AIR| hinder. idly even 4964|192728|286|6|22|40055.84|0.04|0.08|N|O|1997-09-11|1997-10-06|1997-09-29|NONE|AIR|equests doubt quickly. caref 4964|172770|2771|7|28|51597.56|0.00|0.05|N|O|1997-08-30|1997-09-15|1997-09-18|COLLECT COD|RAIL|among the carefully regula 4965|130042|5069|1|28|30017.12|0.05|0.03|A|F|1994-01-02|1993-11-20|1994-01-04|TAKE BACK RETURN|REG AIR| deposits. requests sublate quickly 4965|12001|7004|2|25|22825.00|0.10|0.02|R|F|1994-02-05|1993-12-15|1994-02-24|TAKE BACK RETURN|MAIL|wake at the carefully speci 4965|100514|8045|3|27|40891.77|0.05|0.06|R|F|1993-11-06|1993-12-24|1993-11-30|TAKE BACK RETURN|SHIP|efully final foxes 4965|137469|2496|4|33|49713.18|0.04|0.04|A|F|1993-12-31|1993-11-29|1994-01-27|DELIVER IN PERSON|REG AIR|iously slyly 4966|75766|8274|1|10|17417.60|0.06|0.03|N|O|1996-09-23|1996-11-02|1996-10-07|TAKE BACK RETURN|SHIP| requests. carefully pending requests 4966|193834|6354|2|6|11566.98|0.02|0.01|N|O|1996-12-09|1996-11-29|1996-12-30|NONE|AIR|d deposits are sly excuses. slyly iro 4966|164992|4993|3|7|14398.93|0.00|0.01|N|O|1996-12-08|1996-10-09|1997-01-06|COLLECT COD|MAIL|ckly ironic tithe 4966|15871|3375|4|26|46458.62|0.08|0.03|N|O|1996-11-14|1996-11-29|1996-12-05|COLLECT COD|REG AIR|nt pearls haggle carefully slyly even 4966|143660|6175|5|12|20443.92|0.02|0.07|N|O|1996-12-07|1996-11-23|1996-12-20|DELIVER IN PERSON|RAIL|eodolites. ironic requests across the exp 4967|70796|3304|1|50|88339.50|0.07|0.01|N|O|1997-05-27|1997-05-13|1997-06-12|NONE|REG AIR|kages. final, unusual accounts c 4967|52398|9914|2|43|58066.77|0.00|0.07|N|O|1997-05-28|1997-04-10|1997-06-09|NONE|TRUCK|ons. slyly ironic requests 4967|49975|9976|3|15|28874.55|0.08|0.02|N|O|1997-04-16|1997-04-12|1997-05-08|TAKE BACK RETURN|MAIL|y. blithel 4967|122478|7503|4|1|1500.47|0.10|0.07|N|O|1997-06-04|1997-03-29|1997-06-23|NONE|FOB|osits. unusual frets thrash furiously 4992|183528|3529|1|42|67683.84|0.07|0.01|R|F|1992-07-19|1992-06-16|1992-08-17|TAKE BACK RETURN|RAIL|foxes about the quickly final platele 4992|146436|8951|2|47|69674.21|0.10|0.08|A|F|1992-09-04|1992-08-05|1992-09-21|COLLECT COD|MAIL|atterns use fluffily. 4992|143147|8176|3|17|20232.38|0.03|0.03|A|F|1992-07-05|1992-07-19|1992-07-30|TAKE BACK RETURN|FOB|s along the perma 4992|69920|4933|4|25|47248.00|0.04|0.06|R|F|1992-08-06|1992-07-11|1992-08-20|NONE|SHIP|ly about the never ironic requests. pe 4992|138500|6040|5|23|35385.50|0.01|0.08|R|F|1992-06-28|1992-07-15|1992-07-12|DELIVER IN PERSON|MAIL|uickly regul 4992|162601|150|6|44|73198.40|0.05|0.02|A|F|1992-06-01|1992-07-22|1992-06-03|NONE|RAIL|rmanent, sly packages print slyly. regula 4993|37231|4741|1|34|39719.82|0.05|0.00|R|F|1994-09-21|1994-10-31|1994-09-24|TAKE BACK RETURN|REG AIR|ular, pending packages at the even packa 4993|128552|1065|2|39|61641.45|0.03|0.08|R|F|1994-09-10|1994-09-04|1994-09-26|COLLECT COD|SHIP|pending, regular requests solve caref 4993|165619|3168|3|42|70753.62|0.06|0.00|A|F|1994-08-27|1994-09-24|1994-09-05|NONE|MAIL| final packages at the q 4993|157398|9914|4|31|45117.09|0.10|0.06|A|F|1994-10-02|1994-10-29|1994-10-15|NONE|AIR|nwind thinly platelets. a 4994|155833|3379|1|36|67997.88|0.00|0.06|N|O|1996-09-29|1996-07-30|1996-10-03|TAKE BACK RETURN|TRUCK|ess ideas. blithely silent brai 4994|79276|4291|2|47|58997.69|0.04|0.05|N|O|1996-09-20|1996-08-04|1996-10-15|COLLECT COD|TRUCK|sts. blithely close ideas sleep quic 4994|182034|4553|3|29|32364.87|0.08|0.01|N|O|1996-08-26|1996-09-27|1996-09-25|DELIVER IN PERSON|RAIL|ptotes boost carefully 4994|38154|8155|4|40|43686.00|0.01|0.06|N|O|1996-08-25|1996-08-16|1996-09-07|TAKE BACK RETURN|REG AIR|eposits. regula 4994|41357|3862|5|24|31160.40|0.01|0.07|N|O|1996-08-19|1996-09-24|1996-08-25|TAKE BACK RETURN|FOB|s. slyly ironic deposits cajole f 4994|72561|2562|6|6|9201.36|0.01|0.02|N|O|1996-09-05|1996-08-04|1996-09-30|TAKE BACK RETURN|FOB|grate carefully around th 4994|129748|9749|7|31|55109.94|0.07|0.04|N|O|1996-10-14|1996-09-23|1996-11-08|TAKE BACK RETURN|RAIL|lar decoys cajole fluffil 4995|64943|7450|1|16|30527.04|0.02|0.05|N|O|1996-02-27|1996-04-03|1996-02-29|DELIVER IN PERSON|MAIL|egular, bold packages. accou 4995|80476|8001|2|43|62628.21|0.00|0.06|N|O|1996-02-24|1996-02-20|1996-03-07|NONE|AIR|ts. blithely silent ideas after t 4995|155462|5463|3|22|33384.12|0.03|0.06|N|O|1996-03-17|1996-03-12|1996-04-01|DELIVER IN PERSON|MAIL|s wake furious, express dependencies. 4995|39933|4940|4|9|16856.37|0.07|0.07|N|O|1996-03-07|1996-03-17|1996-03-11|DELIVER IN PERSON|FOB| ironic packages cajole across t 4995|147648|5191|5|48|81390.72|0.08|0.07|N|O|1996-03-22|1996-04-01|1996-04-07|NONE|SHIP|t blithely. requests affix blithely. 4995|109830|4851|6|48|88311.84|0.09|0.07|N|O|1996-04-14|1996-04-04|1996-05-07|DELIVER IN PERSON|RAIL|nstructions. carefully final depos 4996|55891|902|1|35|64641.15|0.07|0.01|A|F|1992-10-30|1992-10-27|1992-11-05|TAKE BACK RETURN|SHIP|s. unusual, regular dolphins integrate care 4996|155848|5849|2|39|74249.76|0.02|0.07|A|F|1992-09-19|1992-10-19|1992-10-06|COLLECT COD|FOB|equests are carefully final 4996|127777|2802|3|12|21657.24|0.04|0.06|R|F|1993-01-09|1992-11-22|1993-02-04|DELIVER IN PERSON|SHIP|usly bold requests sleep dogge 4996|143576|1119|4|13|21054.41|0.00|0.00|A|F|1992-09-17|1992-12-02|1992-10-07|DELIVER IN PERSON|TRUCK|o beans use about the furious 4997|78040|5562|1|44|44793.76|0.02|0.05|N|O|1998-06-09|1998-06-12|1998-07-07|NONE|RAIL|r escapades ca 4997|16188|3692|2|5|5520.90|0.02|0.04|N|O|1998-05-16|1998-06-05|1998-06-07|COLLECT COD|REG AIR|cuses are furiously unusual asymptotes 4997|57193|7194|3|24|27604.56|0.04|0.06|N|O|1998-04-20|1998-04-23|1998-05-16|NONE|AIR|xpress, bo 4997|39081|1585|4|5|5100.40|0.10|0.03|N|O|1998-06-12|1998-04-24|1998-06-13|DELIVER IN PERSON|TRUCK|aggle slyly alongside of the slyly i 4997|21826|4329|5|46|80399.72|0.00|0.04|N|O|1998-04-28|1998-06-04|1998-05-08|TAKE BACK RETURN|SHIP|ecial courts are carefully 4997|28912|6419|6|2|3681.82|0.07|0.01|N|O|1998-07-09|1998-06-10|1998-07-21|TAKE BACK RETURN|REG AIR|counts. slyl 4998|153471|5987|1|12|18293.64|0.04|0.03|A|F|1992-02-20|1992-03-06|1992-03-01|TAKE BACK RETURN|RAIL| sleep slyly furiously final accounts. ins 4998|182630|7667|2|15|25689.45|0.06|0.00|R|F|1992-04-24|1992-03-21|1992-05-02|NONE|REG AIR|heodolites sleep quickly. 4998|58992|8993|3|27|52676.73|0.06|0.02|R|F|1992-03-17|1992-02-26|1992-04-05|DELIVER IN PERSON|MAIL|the blithely ironic 4998|62376|7389|4|47|62903.39|0.10|0.04|A|F|1992-02-07|1992-03-07|1992-02-19|DELIVER IN PERSON|TRUCK|mong the careful 4998|144589|2132|5|24|39205.92|0.01|0.04|R|F|1992-01-25|1992-03-16|1992-01-27|COLLECT COD|REG AIR| unwind about 4998|98492|1002|6|8|11923.92|0.03|0.07|A|F|1992-05-01|1992-03-03|1992-05-24|TAKE BACK RETURN|AIR|ions nag quickly according to the theodolit 4999|152687|7718|1|30|52190.40|0.00|0.02|A|F|1993-08-20|1993-08-15|1993-08-30|NONE|AIR|ades cajole carefully unusual ide 4999|9204|9205|2|44|48980.80|0.03|0.01|A|F|1993-08-01|1993-08-04|1993-08-17|COLLECT COD|REG AIR|ependencies. slowly regu 4999|85996|5997|3|30|59459.70|0.09|0.01|R|F|1993-07-21|1993-08-11|1993-08-20|DELIVER IN PERSON|RAIL|s cajole among the blithel 5024|165411|444|1|17|25098.97|0.10|0.02|N|O|1996-11-24|1997-01-10|1996-12-04|NONE|AIR| to the expre 5024|57578|84|2|41|62958.37|0.06|0.01|N|O|1996-11-09|1996-12-03|1996-12-01|COLLECT COD|REG AIR|osits hinder carefully 5024|111009|3521|3|18|18360.00|0.04|0.03|N|O|1996-12-02|1997-01-16|1996-12-05|NONE|MAIL|zle carefully sauternes. quickly 5024|122508|5021|4|42|64281.00|0.03|0.06|N|O|1996-12-02|1996-12-08|1996-12-04|DELIVER IN PERSON|RAIL|tegrate. busily spec 5025|29421|4426|1|11|14854.62|0.00|0.04|N|O|1997-02-21|1997-04-16|1997-03-14|COLLECT COD|SHIP|the carefully final esc 5025|77470|2485|2|10|14474.70|0.07|0.04|N|O|1997-06-04|1997-04-29|1997-06-28|COLLECT COD|RAIL|lly silent deposits boost busily again 5026|95360|7870|1|13|17619.68|0.02|0.04|N|O|1997-12-23|1997-11-02|1998-01-03|TAKE BACK RETURN|SHIP|endencies sleep carefully alongs 5027|97261|4789|1|6|7549.56|0.04|0.05|N|O|1997-09-28|1997-11-24|1997-10-25|NONE|FOB|ar, ironic deposi 5027|61744|1745|2|39|66523.86|0.06|0.01|N|O|1997-09-09|1997-11-13|1997-09-21|TAKE BACK RETURN|FOB|ess requests! quickly regular pac 5027|125140|165|3|32|37284.48|0.00|0.01|N|O|1997-11-13|1997-10-29|1997-11-18|TAKE BACK RETURN|RAIL|cording to 5027|25594|5595|4|37|56224.83|0.02|0.00|N|O|1997-10-05|1997-10-30|1997-10-26|NONE|REG AIR|ost slyly fluffily 5027|142732|2733|5|3|5324.19|0.03|0.06|N|O|1997-09-30|1997-11-26|1997-10-05|DELIVER IN PERSON|AIR|t the even mu 5027|86493|9002|6|25|36987.25|0.06|0.00|N|O|1997-09-16|1997-11-25|1997-10-08|TAKE BACK RETURN|RAIL|ic ideas. requests sleep fluffily am 5027|80444|2953|7|50|71222.00|0.07|0.02|N|O|1997-09-18|1997-11-07|1997-10-05|DELIVER IN PERSON|MAIL| beans dazzle according to the fluffi 5028|13876|8879|1|15|26848.05|0.07|0.07|R|F|1992-07-17|1992-07-16|1992-08-05|COLLECT COD|REG AIR|es are quickly final pains. furiously pend 5028|198686|8687|2|15|26770.20|0.03|0.07|R|F|1992-08-02|1992-07-09|1992-08-30|NONE|REG AIR|gular, bold pinto bea 5029|153597|3598|1|17|28060.03|0.02|0.01|A|F|1993-03-12|1992-12-18|1993-04-02|DELIVER IN PERSON|FOB|! packages boost blithely. furious 5029|96432|8942|2|2|2856.86|0.00|0.04|A|F|1992-11-25|1993-01-04|1992-12-20|DELIVER IN PERSON|MAIL|packages. furiously ironi 5030|101356|1357|1|22|29861.70|0.04|0.06|N|O|1998-09-01|1998-08-15|1998-09-30|TAKE BACK RETURN|TRUCK|. quickly regular foxes believe 5030|79762|4777|2|50|87088.00|0.05|0.06|N|O|1998-08-22|1998-07-25|1998-09-18|TAKE BACK RETURN|FOB|ss excuses serve bli 5031|49942|9943|1|15|28379.10|0.02|0.05|R|F|1995-04-01|1995-02-24|1995-04-12|DELIVER IN PERSON|AIR|yly pending theodolites. 5031|160895|8444|2|40|78235.60|0.10|0.04|A|F|1994-12-04|1995-01-27|1995-01-01|NONE|TRUCK|ns hang blithely across th 5031|153067|613|3|4|4480.24|0.01|0.07|R|F|1994-12-26|1995-02-24|1995-01-11|NONE|RAIL|after the even frays: ironic, unusual th 5031|180435|2954|4|31|46978.33|0.10|0.08|R|F|1995-01-15|1995-01-08|1995-02-09|COLLECT COD|MAIL|ts across the even requests doze furiously 5056|47302|4815|1|7|8745.10|0.09|0.01|N|O|1997-04-28|1997-04-07|1997-05-15|DELIVER IN PERSON|TRUCK|rouches after the pending instruc 5056|196388|3946|2|19|28203.22|0.04|0.00|N|O|1997-03-24|1997-05-05|1997-04-23|DELIVER IN PERSON|AIR|c theodolites. ironic a 5056|89040|4057|3|23|23667.92|0.02|0.05|N|O|1997-05-12|1997-04-28|1997-05-25|NONE|SHIP|ickly regular requests cajole. depos 5056|86915|1932|4|14|26626.74|0.08|0.00|N|O|1997-06-09|1997-04-13|1997-07-06|COLLECT COD|SHIP|sts haggle carefully along the slyl 5057|36914|4424|1|38|70334.58|0.02|0.03|N|O|1997-10-24|1997-09-07|1997-10-30|TAKE BACK RETURN|MAIL|packages. stealthily bold wa 5057|7083|9584|2|45|44553.60|0.08|0.07|N|O|1997-09-20|1997-10-02|1997-10-20|NONE|FOB| asymptotes wake slyl 5058|192440|4960|1|16|24519.04|0.09|0.07|N|O|1998-07-12|1998-06-09|1998-07-15|DELIVER IN PERSON|SHIP| the special foxes 5059|69538|7057|1|5|7537.65|0.03|0.08|R|F|1993-12-23|1994-01-12|1993-12-24|TAKE BACK RETURN|FOB|ts affix slyly accordi 5059|122882|7907|2|19|36192.72|0.06|0.04|R|F|1994-03-02|1993-12-26|1994-03-14|TAKE BACK RETURN|MAIL| special ideas poach blithely qu 5059|76305|8813|3|45|57658.50|0.02|0.00|A|F|1994-01-28|1994-01-08|1994-02-18|DELIVER IN PERSON|MAIL|enly. requests doze. express, close pa 5060|24932|2439|1|27|50137.11|0.10|0.07|R|F|1992-07-23|1992-09-05|1992-08-07|COLLECT COD|SHIP|s. ironic 5060|31033|3537|2|28|26992.84|0.04|0.04|R|F|1992-09-25|1992-08-11|1992-10-09|NONE|REG AIR|c requests 5060|160257|258|3|15|19758.75|0.06|0.01|A|F|1992-08-28|1992-08-20|1992-09-01|DELIVER IN PERSON|AIR|ular deposits sl 5061|164896|9929|1|18|35296.02|0.03|0.00|A|F|1993-10-20|1993-10-05|1993-10-28|TAKE BACK RETURN|SHIP|atelets among the ca 5061|197491|2530|2|8|12707.92|0.01|0.02|R|F|1993-09-07|1993-10-31|1993-10-04|DELIVER IN PERSON|REG AIR|regular foxes. ir 5061|23216|3217|3|26|29619.46|0.02|0.05|A|F|1993-11-07|1993-09-13|1993-11-13|NONE|REG AIR| cajole slyly. carefully spe 5062|100784|3295|1|9|16063.02|0.08|0.00|R|F|1993-01-02|1992-12-01|1993-01-20|TAKE BACK RETURN|MAIL| silent theodolites wake. c 5062|74681|4682|2|4|6622.72|0.02|0.02|R|F|1993-02-06|1992-12-14|1993-03-03|DELIVER IN PERSON|AIR|ke furiously express theodolites. 5062|158504|8505|3|50|78125.00|0.09|0.07|A|F|1992-12-25|1992-12-13|1992-12-29|TAKE BACK RETURN|MAIL| the regular, unusual pains. specia 5062|160148|2665|4|18|21746.52|0.03|0.07|R|F|1992-11-04|1992-12-25|1992-11-05|NONE|SHIP|furiously pending requests are ruthles 5062|193632|1190|5|25|43140.75|0.08|0.02|R|F|1992-12-15|1992-11-17|1993-01-01|NONE|TRUCK|uthless excuses ag 5063|128529|8530|1|31|48283.12|0.08|0.01|N|O|1997-06-02|1997-06-20|1997-06-27|NONE|RAIL|kages. ironic, ironic courts wake. carefu 5063|173640|1192|2|43|73686.52|0.04|0.08|N|O|1997-09-14|1997-07-05|1997-10-05|TAKE BACK RETURN|TRUCK|latelets might nod blithely regular requ 5063|166201|1234|3|2|2534.40|0.02|0.03|N|O|1997-06-17|1997-07-27|1997-06-24|COLLECT COD|SHIP|kly regular i 5063|134525|9552|4|18|28071.36|0.08|0.05|N|O|1997-06-02|1997-06-18|1997-06-06|TAKE BACK RETURN|RAIL|refully quiet reques 5063|160636|5669|5|1|1696.63|0.06|0.07|N|O|1997-09-03|1997-06-26|1997-10-03|NONE|FOB|ously special 5088|77900|5422|1|23|43191.70|0.06|0.06|R|F|1993-03-03|1993-03-07|1993-03-08|NONE|FOB|cording to the fluffily expr 5088|50009|7525|2|41|39319.00|0.09|0.00|R|F|1993-01-22|1993-03-07|1993-02-09|TAKE BACK RETURN|TRUCK|ing requests. 5088|85245|2770|3|36|44288.64|0.10|0.05|A|F|1993-04-16|1993-04-03|1993-05-14|NONE|TRUCK|the furiously final deposits. furiously re 5088|108479|6010|4|10|14874.70|0.04|0.05|R|F|1993-04-07|1993-02-06|1993-04-26|NONE|FOB|beans. special requests af 5089|157064|9580|1|4|4484.24|0.05|0.06|R|F|1992-09-18|1992-09-28|1992-10-13|DELIVER IN PERSON|TRUCK|nts sleep blithely 5089|161548|1549|2|20|32190.80|0.00|0.07|R|F|1992-10-10|1992-10-07|1992-11-06|COLLECT COD|RAIL| ironic accounts 5089|123570|1107|3|46|73304.22|0.03|0.04|A|F|1992-11-09|1992-10-13|1992-11-10|TAKE BACK RETURN|RAIL|above the express accounts. exc 5089|33436|5940|4|38|52038.34|0.05|0.03|R|F|1992-11-23|1992-09-11|1992-12-22|TAKE BACK RETURN|SHIP|regular instructions are 5090|21309|1310|1|22|27066.60|0.07|0.00|N|O|1997-05-10|1997-05-25|1997-05-24|TAKE BACK RETURN|TRUCK|ets integrate ironic, regul 5090|128542|8543|2|46|72244.84|0.05|0.00|N|O|1997-04-05|1997-04-14|1997-05-01|COLLECT COD|REG AIR|lose theodolites sleep blit 5090|1804|9305|3|22|37527.60|0.09|0.05|N|O|1997-07-03|1997-04-12|1997-07-26|NONE|REG AIR|ular requests su 5090|113646|6158|4|2|3319.28|0.03|0.06|N|O|1997-04-07|1997-04-23|1997-05-01|TAKE BACK RETURN|AIR|tes. slowly iro 5090|47604|7605|5|21|32583.60|0.10|0.02|N|O|1997-03-29|1997-04-24|1997-04-25|TAKE BACK RETURN|FOB|ly express accounts. slyly even r 5090|79662|4677|6|30|49249.80|0.02|0.03|N|O|1997-05-04|1997-04-14|1997-05-30|COLLECT COD|MAIL|osits nag slyly. fluffily ex 5091|77312|4834|1|50|64465.50|0.05|0.03|N|O|1998-07-21|1998-06-22|1998-07-26|COLLECT COD|REG AIR|al dependencies. r 5092|163090|8123|1|30|34592.70|0.06|0.00|N|O|1995-12-27|1995-12-08|1996-01-09|DELIVER IN PERSON|MAIL|ss, ironic deposits. furiously stea 5092|44716|2229|2|34|56464.14|0.04|0.02|N|O|1995-12-09|1995-12-26|1995-12-21|TAKE BACK RETURN|AIR|ckages nag 5092|139286|6826|3|13|17228.64|0.06|0.01|N|O|1995-11-21|1996-01-05|1995-12-19|TAKE BACK RETURN|SHIP|es detect sly 5092|179246|9247|4|14|18553.36|0.04|0.00|N|O|1996-02-20|1995-11-30|1996-03-20|DELIVER IN PERSON|REG AIR| deposits cajole furiously against the sly 5092|185660|8179|5|42|73317.72|0.01|0.02|N|O|1995-11-06|1996-01-01|1995-12-06|DELIVER IN PERSON|AIR|s use along t 5092|177555|5107|6|11|17958.05|0.03|0.03|N|O|1995-12-02|1995-12-27|1995-12-11|COLLECT COD|MAIL|ly against the slyly silen 5092|158325|8326|7|50|69166.00|0.10|0.03|N|O|1995-11-30|1996-01-14|1995-12-19|NONE|REG AIR|r platelets maintain car 5093|167649|7650|1|40|68665.60|0.05|0.01|R|F|1993-09-16|1993-11-04|1993-10-05|TAKE BACK RETURN|REG AIR|ing pinto beans. quickly bold dependenci 5093|73419|941|2|15|20886.15|0.01|0.04|A|F|1993-12-02|1993-11-18|1994-01-01|DELIVER IN PERSON|FOB|ly among the unusual foxe 5093|150780|3296|3|31|56754.18|0.00|0.02|R|F|1993-09-22|1993-11-14|1993-09-26|TAKE BACK RETURN|REG AIR| against the 5093|155669|700|4|37|63812.42|0.04|0.00|A|F|1993-10-26|1993-12-02|1993-10-27|NONE|TRUCK|courts. qui 5093|114865|9888|5|30|56395.80|0.06|0.05|A|F|1993-11-22|1993-11-27|1993-12-14|DELIVER IN PERSON|TRUCK|ithely ironic sheaves use fluff 5093|120197|2710|6|31|37732.89|0.01|0.08|A|F|1993-12-17|1993-11-14|1994-01-02|NONE|SHIP|he final foxes. fluffily ironic 5094|142519|5034|1|19|29668.69|0.03|0.03|R|F|1993-03-31|1993-06-12|1993-04-04|NONE|AIR|ronic foxes. furi 5094|107604|5135|2|23|37066.80|0.05|0.07|R|F|1993-06-13|1993-05-19|1993-07-06|NONE|MAIL|st furiously above the fluffily care 5094|91455|8983|3|11|15910.95|0.04|0.08|A|F|1993-06-25|1993-06-24|1993-07-18|TAKE BACK RETURN|MAIL|s cajole quickly against the furiously ex 5094|78510|8511|4|21|31258.71|0.09|0.08|R|F|1993-07-26|1993-05-03|1993-08-16|NONE|MAIL| blithely furiously final re 5095|64242|1761|1|46|55487.04|0.07|0.01|A|F|1992-06-26|1992-06-25|1992-07-05|COLLECT COD|RAIL|egular instruction 5095|105488|3019|2|2|2986.96|0.07|0.08|A|F|1992-07-09|1992-05-25|1992-07-21|DELIVER IN PERSON|REG AIR|detect car 5095|122758|5271|3|28|49861.00|0.01|0.04|A|F|1992-06-20|1992-06-27|1992-06-22|DELIVER IN PERSON|AIR| into the final courts. ca 5095|177984|3019|4|42|86603.16|0.08|0.08|R|F|1992-05-23|1992-06-01|1992-06-18|COLLECT COD|TRUCK|ccounts. packages could have t 5095|165425|5426|5|9|13413.78|0.10|0.07|R|F|1992-08-14|1992-06-23|1992-08-16|TAKE BACK RETURN|REG AIR|bold theodolites wake about the expr 5095|96768|6769|6|15|26471.40|0.01|0.06|A|F|1992-07-11|1992-07-12|1992-08-09|COLLECT COD|AIR| to the packages wake sly 5095|168890|8891|7|40|78355.60|0.05|0.02|A|F|1992-07-11|1992-06-07|1992-07-26|DELIVER IN PERSON|MAIL|carefully unusual plat 5120|132365|7392|1|28|39126.08|0.06|0.03|N|O|1996-07-20|1996-08-31|1996-08-06|NONE|RAIL| across the silent requests. caref 5121|183562|1117|1|23|37847.88|0.06|0.01|A|F|1992-05-18|1992-06-20|1992-06-02|TAKE BACK RETURN|REG AIR|even courts are blithely ironically 5121|110722|8256|2|45|77972.40|0.08|0.04|A|F|1992-08-13|1992-07-27|1992-09-12|NONE|TRUCK|pecial accounts cajole ca 5121|96595|1614|3|27|42972.93|0.08|0.07|R|F|1992-06-17|1992-06-11|1992-06-19|NONE|MAIL|ly silent theodolit 5121|67014|9521|4|10|9810.10|0.04|0.05|R|F|1992-06-08|1992-07-10|1992-07-02|TAKE BACK RETURN|FOB|e quickly according 5121|88202|8203|5|46|54749.20|0.03|0.02|R|F|1992-05-27|1992-07-19|1992-05-28|TAKE BACK RETURN|FOB|use express foxes. slyly 5121|79|7580|6|2|1958.14|0.04|0.07|R|F|1992-08-10|1992-06-28|1992-08-11|NONE|FOB| final, regular account 5122|182287|9842|1|28|38339.84|0.03|0.00|N|O|1996-04-20|1996-03-29|1996-04-29|DELIVER IN PERSON|FOB|g the busily ironic accounts boos 5122|81957|9482|2|43|83374.85|0.09|0.03|N|O|1996-05-31|1996-04-12|1996-06-13|NONE|MAIL|ut the carefully special foxes. idle, 5122|44058|4059|3|12|12024.60|0.07|0.03|N|O|1996-04-02|1996-04-27|1996-04-10|DELIVER IN PERSON|AIR|lar instructions 5123|25745|5746|1|13|21719.62|0.08|0.07|N|O|1998-05-17|1998-03-23|1998-06-02|COLLECT COD|MAIL|regular pearls 5124|54193|1709|1|43|49329.17|0.00|0.02|N|O|1997-07-10|1997-05-13|1997-07-31|COLLECT COD|AIR|onic package 5124|5101|2602|2|41|41250.10|0.02|0.06|N|O|1997-07-05|1997-06-29|1997-07-23|DELIVER IN PERSON|RAIL|wake across the 5124|124798|4799|3|44|80202.76|0.03|0.03|N|O|1997-07-13|1997-06-26|1997-08-01|DELIVER IN PERSON|RAIL|equests. carefully unusual d 5124|69465|1972|4|36|51640.56|0.10|0.07|N|O|1997-04-20|1997-07-03|1997-05-04|TAKE BACK RETURN|AIR|r deposits ab 5125|5424|7925|1|38|50517.96|0.09|0.05|N|O|1998-03-20|1998-04-14|1998-03-22|COLLECT COD|MAIL|ily even deposits w 5125|159077|9078|2|5|5680.35|0.08|0.06|N|O|1998-04-07|1998-04-14|1998-04-29|COLLECT COD|RAIL| thinly even pack 5126|23568|8573|1|33|49221.48|0.02|0.02|R|F|1993-02-04|1992-12-23|1993-02-14|NONE|RAIL|ipliers promise furiously whithout the 5126|100832|5853|2|43|78811.69|0.09|0.04|R|F|1993-01-07|1992-12-19|1993-01-16|COLLECT COD|MAIL|e silently. ironic, unusual accounts 5126|77521|29|3|23|34465.96|0.08|0.01|R|F|1993-01-02|1993-01-02|1993-01-05|COLLECT COD|TRUCK|egular, blithe packages. 5127|18637|1139|1|33|51335.79|0.08|0.04|N|O|1997-03-25|1997-03-02|1997-04-04|NONE|SHIP| bold deposits use carefully a 5127|31122|3626|2|20|21062.40|0.01|0.03|N|O|1997-05-11|1997-02-26|1997-05-12|TAKE BACK RETURN|SHIP|dolites about the final platelets w 5152|104917|2448|1|9|17297.19|0.04|0.03|N|O|1997-04-11|1997-02-11|1997-04-18|COLLECT COD|AIR| cajole furiously alongside of the bo 5152|133398|5912|2|50|71569.50|0.04|0.04|N|O|1997-03-10|1997-02-04|1997-03-15|COLLECT COD|FOB| the final deposits. slyly ironic warth 5153|34465|6969|1|42|58777.32|0.03|0.01|N|O|1995-10-03|1995-11-09|1995-10-11|COLLECT COD|RAIL|re thinly. ironic 5153|52804|320|2|14|24595.20|0.05|0.05|N|O|1995-11-29|1995-10-21|1995-12-08|TAKE BACK RETURN|TRUCK| slyly daring pinto beans lose blithely fi 5153|67863|370|3|30|54925.80|0.09|0.01|N|O|1995-11-10|1995-11-14|1995-11-16|DELIVER IN PERSON|AIR|beans sleep bl 5153|172162|7197|4|32|39493.12|0.10|0.08|N|O|1995-12-05|1995-09-25|1996-01-03|DELIVER IN PERSON|MAIL|egular deposits. ironi 5153|111676|9210|5|36|60756.12|0.01|0.03|N|O|1995-12-15|1995-11-08|1995-12-30|COLLECT COD|TRUCK| ironic instru 5153|135055|2595|6|42|45782.10|0.00|0.03|N|O|1995-10-19|1995-11-23|1995-11-06|TAKE BACK RETURN|RAIL|ickly even deposi 5154|189775|2294|1|11|20512.47|0.02|0.05|N|O|1997-08-06|1997-06-30|1997-09-04|NONE|RAIL|luffily bold foxes. final 5154|143948|3949|2|15|29879.10|0.07|0.08|N|O|1997-06-23|1997-07-11|1997-07-11|NONE|AIR|even packages. packages use 5155|47932|7933|1|1|1879.93|0.00|0.00|A|F|1994-07-03|1994-08-11|1994-07-29|COLLECT COD|FOB|oze slyly after the silent, regular idea 5155|187548|2585|2|5|8177.70|0.08|0.02|A|F|1994-06-30|1994-08-13|1994-07-15|TAKE BACK RETURN|AIR|ole blithely slyly ironic 5155|105449|2980|3|28|40724.32|0.05|0.02|R|F|1994-07-01|1994-07-19|1994-07-18|COLLECT COD|REG AIR|s cajole. accounts wake. thinly quiet pla 5155|78860|6382|4|39|71715.54|0.09|0.06|A|F|1994-08-25|1994-09-01|1994-09-18|COLLECT COD|TRUCK|l dolphins nag caref 5156|116520|1543|1|21|32266.92|0.06|0.03|N|O|1997-01-01|1997-01-30|1997-01-11|TAKE BACK RETURN|TRUCK|ts detect against the furiously reg 5156|147020|2049|2|36|38412.72|0.04|0.07|N|O|1997-02-12|1996-12-10|1997-03-13|TAKE BACK RETURN|REG AIR| slyly even orbi 5157|54165|1681|1|35|39170.60|0.06|0.08|N|O|1997-07-28|1997-09-30|1997-08-15|TAKE BACK RETURN|REG AIR|to the furiously sil 5157|137834|7835|2|18|33692.94|0.10|0.04|N|O|1997-09-06|1997-10-03|1997-09-19|COLLECT COD|MAIL|y bold deposits nag blithely. final reque 5157|166387|6388|3|15|21800.70|0.09|0.00|N|O|1997-07-27|1997-08-30|1997-08-08|DELIVER IN PERSON|RAIL|cajole. spec 5157|58188|694|4|25|28654.50|0.00|0.03|N|O|1997-08-24|1997-09-23|1997-08-28|COLLECT COD|REG AIR| packages detect. even requests against th 5157|148947|6490|5|40|79837.60|0.09|0.06|N|O|1997-08-11|1997-08-28|1997-09-01|TAKE BACK RETURN|FOB|ial packages according to 5157|149156|6699|6|26|31333.90|0.10|0.01|N|O|1997-07-28|1997-08-22|1997-08-22|NONE|FOB|nto beans cajole car 5157|48745|6258|7|12|20324.88|0.10|0.08|N|O|1997-10-19|1997-08-07|1997-10-26|NONE|FOB|es. busily 5158|44249|1762|1|43|51309.32|0.10|0.04|N|O|1997-04-10|1997-03-06|1997-04-15|DELIVER IN PERSON|AIR|nusual platelets. slyly even foxes cajole 5158|84248|4249|2|18|22180.32|0.04|0.04|N|O|1997-04-30|1997-03-28|1997-05-12|COLLECT COD|REG AIR|hely regular pa 5158|141878|4393|3|41|78714.67|0.05|0.05|N|O|1997-02-25|1997-03-19|1997-03-03|COLLECT COD|AIR|deposits. quickly special 5158|130049|7589|4|49|52872.96|0.05|0.01|N|O|1997-04-10|1997-03-21|1997-04-30|NONE|REG AIR|r requests sleep q 5158|118468|6002|5|20|29729.20|0.01|0.04|N|O|1997-02-03|1997-02-20|1997-02-08|TAKE BACK RETURN|AIR|latelets use accordin 5158|87969|478|6|39|76321.44|0.08|0.04|N|O|1997-05-15|1997-04-04|1997-06-02|DELIVER IN PERSON|FOB|lithely fina 5158|90030|7558|7|38|38761.14|0.10|0.05|N|O|1997-05-09|1997-03-03|1997-06-04|NONE|SHIP|uffily regular ac 5159|123367|904|1|39|54224.04|0.06|0.07|N|O|1996-12-17|1996-12-08|1997-01-10|COLLECT COD|MAIL|re furiously after the pending dolphin 5159|16681|9183|2|46|73493.28|0.01|0.01|N|O|1996-12-15|1996-12-07|1996-12-30|DELIVER IN PERSON|SHIP|s kindle slyly carefully regular 5159|151238|8784|3|22|28363.06|0.01|0.02|N|O|1996-11-06|1996-11-04|1996-11-15|TAKE BACK RETURN|SHIP|he furiously sile 5159|51907|1908|4|5|9294.50|0.10|0.00|N|O|1996-11-25|1996-12-19|1996-12-25|TAKE BACK RETURN|FOB|nal deposits. pending, ironic ideas grow 5159|197897|417|5|36|71816.04|0.06|0.01|N|O|1997-01-24|1996-11-07|1997-02-08|NONE|REG AIR|packages wake. 5184|152521|7552|1|33|51926.16|0.07|0.04|N|O|1998-08-17|1998-10-16|1998-08-24|TAKE BACK RETURN|AIR|posits. carefully express asympto 5184|15749|3253|2|47|78242.78|0.05|0.01|N|O|1998-11-02|1998-08-19|1998-11-07|COLLECT COD|TRUCK|se. carefully express pinto beans x 5184|87427|9936|3|39|55162.38|0.03|0.06|N|O|1998-10-27|1998-10-17|1998-11-19|DELIVER IN PERSON|FOB|es above the care 5184|175958|5959|4|26|52882.70|0.05|0.08|N|O|1998-11-11|1998-08-26|1998-12-01|TAKE BACK RETURN|TRUCK| packages are 5184|123097|5610|5|19|21281.71|0.06|0.03|N|O|1998-11-15|1998-10-12|1998-11-21|COLLECT COD|REG AIR|refully express platelets sleep carefull 5184|79525|4540|6|49|73721.48|0.02|0.00|N|O|1998-09-18|1998-08-28|1998-10-14|COLLECT COD|FOB|thlessly closely even reque 5185|196408|3966|1|37|55662.80|0.00|0.04|N|O|1997-08-08|1997-09-08|1997-08-14|COLLECT COD|SHIP|gainst the courts dazzle care 5185|24597|2104|2|32|48690.88|0.06|0.00|N|O|1997-08-17|1997-09-30|1997-08-24|TAKE BACK RETURN|REG AIR|ackages. slyly even requests 5185|195889|928|3|41|81380.08|0.00|0.05|N|O|1997-10-15|1997-10-11|1997-11-02|COLLECT COD|REG AIR|ly blithe deposits. furi 5185|95730|5731|4|30|51771.90|0.09|0.04|N|O|1997-10-17|1997-09-16|1997-10-23|TAKE BACK RETURN|SHIP|ress packages are furiously 5185|127706|7707|5|8|13869.60|0.04|0.00|N|O|1997-08-30|1997-09-02|1997-09-22|COLLECT COD|REG AIR|sts around the slyly perma 5185|145806|835|6|50|92590.00|0.03|0.04|N|O|1997-10-15|1997-10-19|1997-11-06|TAKE BACK RETURN|FOB|final platelets. ideas sleep careful 5186|54412|9423|1|38|51923.58|0.06|0.02|N|O|1996-11-23|1996-09-21|1996-12-11|DELIVER IN PERSON|MAIL|y ruthless foxes. fluffily 5186|90920|921|2|31|59238.52|0.09|0.03|N|O|1996-10-19|1996-09-26|1996-10-25|TAKE BACK RETURN|REG AIR| accounts use furiously slyly spe 5186|88279|3296|3|26|32949.02|0.03|0.02|N|O|1996-08-08|1996-10-05|1996-08-21|DELIVER IN PERSON|FOB|capades. accounts sublate. pinto 5186|89226|9227|4|8|9721.76|0.10|0.05|N|O|1996-09-23|1996-09-29|1996-09-30|COLLECT COD|RAIL|y regular notornis k 5186|17403|9905|5|28|36971.20|0.09|0.03|N|O|1996-10-05|1996-10-27|1996-10-19|TAKE BACK RETURN|RAIL|al decoys. blit 5186|81990|4499|6|35|69019.65|0.00|0.05|N|O|1996-10-20|1996-10-12|1996-11-12|TAKE BACK RETURN|RAIL|sly silent pack 5186|197847|367|7|44|85572.96|0.00|0.08|N|O|1996-09-23|1996-10-14|1996-10-01|NONE|TRUCK|old, final accounts cajole sl 5187|10977|8481|1|49|92510.53|0.04|0.06|N|O|1997-10-20|1997-10-12|1997-10-26|DELIVER IN PERSON|AIR|l, regular platelets instead of the foxes w 5187|82848|2849|2|1|1830.84|0.10|0.08|N|O|1997-08-08|1997-08-24|1997-08-22|DELIVER IN PERSON|REG AIR|aggle never bold 5188|117534|46|1|18|27927.54|0.04|0.03|N|O|1995-06-19|1995-05-19|1995-06-25|DELIVER IN PERSON|AIR|p according to the sometimes regu 5188|193702|1260|2|36|64645.20|0.04|0.02|A|F|1995-03-09|1995-05-16|1995-03-19|NONE|TRUCK|packages? blithely s 5188|147932|2961|3|9|17819.37|0.06|0.08|A|F|1995-05-09|1995-05-22|1995-05-19|TAKE BACK RETURN|REG AIR|r attainments are across the 5189|137325|2352|1|44|59942.08|0.02|0.06|A|F|1994-01-13|1994-02-07|1994-01-21|DELIVER IN PERSON|MAIL|y finally pendin 5189|15881|884|2|38|68281.44|0.06|0.00|A|F|1994-03-26|1994-01-28|1994-04-20|DELIVER IN PERSON|REG AIR|ideas. idle, final deposits de 5189|109889|4910|3|4|7595.52|0.09|0.02|A|F|1993-12-21|1994-02-23|1994-01-09|DELIVER IN PERSON|REG AIR|. blithely exp 5189|93195|8214|4|49|58221.31|0.05|0.01|R|F|1994-01-22|1994-01-19|1994-02-04|TAKE BACK RETURN|SHIP| requests 5189|122046|7071|5|14|14952.56|0.02|0.03|A|F|1994-01-23|1994-01-05|1994-02-12|DELIVER IN PERSON|REG AIR|unusual packag 5189|16138|6139|6|41|43219.33|0.02|0.06|R|F|1993-12-12|1994-02-05|1994-01-09|DELIVER IN PERSON|RAIL|ial theodolites cajole slyly. slyly unus 5190|55509|520|1|43|62973.50|0.09|0.06|A|F|1992-08-19|1992-06-10|1992-09-01|DELIVER IN PERSON|FOB|encies use fluffily unusual requests? hoc 5190|131308|6335|2|6|8035.80|0.10|0.08|A|F|1992-08-08|1992-07-14|1992-08-22|COLLECT COD|RAIL|furiously regular pinto beans. furiously i 5190|88403|3420|3|45|62613.00|0.04|0.03|A|F|1992-07-23|1992-06-16|1992-08-04|NONE|FOB|y carefully final ideas. f 5191|114713|4714|1|41|70836.11|0.00|0.08|A|F|1995-02-05|1995-02-27|1995-02-15|DELIVER IN PERSON|AIR|uests! ironic theodolites cajole care 5191|167719|236|2|40|71468.40|0.02|0.01|A|F|1995-03-31|1995-02-21|1995-04-02|NONE|AIR|nes haggle sometimes. requests eng 5191|42244|2245|3|27|32028.48|0.07|0.05|A|F|1994-12-26|1995-01-24|1995-01-14|DELIVER IN PERSON|RAIL|tructions nag bravely within the re 5191|182828|383|4|7|13375.74|0.01|0.04|A|F|1995-03-24|1995-01-30|1995-03-30|NONE|RAIL|eposits. express 5216|68007|8008|1|17|16575.00|0.04|0.06|N|O|1997-08-20|1997-11-07|1997-09-14|COLLECT COD|FOB|s according to the accounts bo 5217|79528|9529|1|50|75376.00|0.05|0.02|N|O|1995-12-26|1995-11-21|1996-01-24|DELIVER IN PERSON|MAIL|s. express, express accounts c 5217|15345|5346|2|23|28987.82|0.06|0.07|N|O|1996-01-18|1995-12-24|1996-02-10|COLLECT COD|RAIL|ven ideas. requests amo 5217|101296|6317|3|23|29837.67|0.03|0.02|N|O|1995-11-15|1995-12-17|1995-11-27|DELIVER IN PERSON|FOB|pending packages cajole ne 5217|80311|2820|4|47|60691.57|0.04|0.00|N|O|1995-11-24|1995-12-25|1995-11-25|COLLECT COD|AIR|ronic packages i 5218|82747|272|1|43|74378.82|0.05|0.04|A|F|1992-08-04|1992-09-12|1992-08-17|DELIVER IN PERSON|SHIP|k theodolites. express, even id 5218|124729|9754|2|33|57872.76|0.06|0.01|R|F|1992-09-16|1992-09-30|1992-09-27|NONE|TRUCK|ronic instructi 5219|134892|4893|1|2|3853.78|0.08|0.00|N|O|1997-06-26|1997-04-29|1997-07-08|TAKE BACK RETURN|FOB| blithely according to the stea 5219|118635|6169|2|20|33072.60|0.05|0.00|N|O|1997-04-20|1997-05-26|1997-05-13|COLLECT COD|FOB|e along the ironic, 5220|82721|7738|1|27|46000.44|0.10|0.04|R|F|1992-09-21|1992-08-29|1992-10-16|DELIVER IN PERSON|RAIL|s cajole blithely furiously iron 5221|103446|8467|1|24|34786.56|0.07|0.03|N|O|1995-10-04|1995-08-11|1995-10-30|COLLECT COD|REG AIR|s pinto beans sleep. sly 5221|8710|8711|2|34|55036.14|0.01|0.05|N|O|1995-09-11|1995-07-17|1995-10-10|COLLECT COD|SHIP|eans. furio 5221|179197|1715|3|16|20419.04|0.04|0.01|N|O|1995-08-29|1995-09-06|1995-09-12|TAKE BACK RETURN|TRUCK|ending request 5222|150471|8017|1|1|1521.47|0.00|0.00|A|F|1994-08-19|1994-07-16|1994-09-08|TAKE BACK RETURN|FOB|idle requests. carefully pending pinto bean 5223|44573|2086|1|24|36421.68|0.00|0.00|A|F|1994-10-03|1994-09-20|1994-10-11|TAKE BACK RETURN|TRUCK|refully bold courts besides the regular, 5223|123366|5879|2|25|34734.00|0.09|0.02|R|F|1994-07-12|1994-08-13|1994-08-01|NONE|FOB|y express ideas impress 5223|5732|3233|3|19|31116.87|0.04|0.01|R|F|1994-10-28|1994-08-26|1994-10-31|COLLECT COD|REG AIR|ntly. furiously even excuses a 5223|129873|4898|4|40|76114.80|0.01|0.04|R|F|1994-10-01|1994-09-18|1994-10-28|COLLECT COD|SHIP|kly pending 5248|80249|7774|1|39|47940.36|0.05|0.03|N|O|1995-08-10|1995-07-04|1995-09-09|TAKE BACK RETURN|MAIL|yly even accounts. spe 5248|137103|2130|2|45|51304.50|0.00|0.06|A|F|1995-05-09|1995-07-12|1995-05-27|DELIVER IN PERSON|FOB|. bold, pending foxes h 5249|49720|7233|1|31|51761.32|0.07|0.03|A|F|1994-11-21|1994-11-19|1994-12-08|NONE|REG AIR|f the excuses. furiously fin 5249|30026|2530|2|44|42064.88|0.05|0.00|A|F|1994-12-28|1994-11-29|1994-12-29|TAKE BACK RETURN|MAIL|ole furiousl 5249|31770|4274|3|13|22123.01|0.09|0.00|R|F|1994-09-27|1994-10-20|1994-10-05|DELIVER IN PERSON|SHIP|ites. finally exp 5249|145318|7833|4|29|39535.99|0.00|0.05|A|F|1994-09-16|1994-11-03|1994-10-06|NONE|TRUCK| players. f 5249|157742|258|5|12|21596.88|0.01|0.08|R|F|1994-12-28|1994-11-07|1995-01-15|COLLECT COD|MAIL|press depths could have to sleep carefu 5250|43529|1042|1|2|2945.04|0.08|0.04|N|O|1995-08-09|1995-10-10|1995-08-13|COLLECT COD|AIR|its. final pinto 5250|191534|9092|2|27|43889.31|0.10|0.05|N|O|1995-10-24|1995-09-03|1995-11-18|COLLECT COD|TRUCK|l forges are. furiously unusual pin 5251|138965|3992|1|36|72142.56|0.10|0.01|N|O|1995-07-16|1995-07-05|1995-07-28|DELIVER IN PERSON|FOB|slowly! bli 5252|140986|8529|1|13|26350.74|0.02|0.01|N|O|1996-03-02|1996-05-10|1996-03-11|NONE|FOB|boost fluffily across 5252|138978|1492|2|39|78661.83|0.06|0.05|N|O|1996-05-17|1996-04-23|1996-05-23|COLLECT COD|AIR|gular requests. 5252|194020|1578|3|9|10026.18|0.09|0.03|N|O|1996-05-30|1996-05-03|1996-06-26|TAKE BACK RETURN|RAIL|x. slyly special depos 5252|86576|1593|4|48|75003.36|0.01|0.06|N|O|1996-04-17|1996-03-19|1996-05-03|COLLECT COD|AIR|bold requests. furious 5252|67746|2759|5|24|41129.76|0.04|0.05|N|O|1996-05-11|1996-04-17|1996-05-12|COLLECT COD|REG AIR|posits after the fluffi 5252|2202|9703|6|41|45272.20|0.02|0.03|N|O|1996-03-16|1996-04-18|1996-03-17|NONE|TRUCK|ording to the blithely express somas sho 5253|30616|5623|1|35|54131.35|0.02|0.00|N|O|1995-07-23|1995-06-12|1995-08-03|DELIVER IN PERSON|AIR|ven deposits. careful 5253|149475|1990|2|38|57929.86|0.02|0.06|N|O|1995-08-03|1995-06-14|1995-08-27|DELIVER IN PERSON|REG AIR|onic dependencies are furiou 5253|13981|3982|3|9|17054.82|0.03|0.08|N|F|1995-06-08|1995-05-12|1995-06-23|DELIVER IN PERSON|REG AIR|lyly express deposits use furiou 5253|165076|2625|4|25|28526.75|0.04|0.03|A|F|1995-05-21|1995-06-13|1995-06-09|COLLECT COD|TRUCK|urts. even theodoli 5254|110141|142|1|35|40289.90|0.01|0.07|A|F|1992-07-28|1992-09-05|1992-08-07|COLLECT COD|REG AIR|ntegrate carefully among the pending 5254|134265|9292|2|10|12992.60|0.05|0.04|A|F|1992-11-19|1992-10-20|1992-12-15|COLLECT COD|SHIP| accounts. silent deposit 5254|191752|6791|3|32|59000.00|0.00|0.08|A|F|1992-08-10|1992-09-21|1992-08-16|NONE|RAIL|ts impress closely furi 5254|162208|4725|4|45|57159.00|0.05|0.06|A|F|1992-11-11|1992-09-01|1992-12-07|COLLECT COD|REG AIR| wake. blithely silent excuse 5254|28653|3658|5|23|36377.95|0.02|0.06|A|F|1992-08-16|1992-09-05|1992-09-15|COLLECT COD|RAIL|lyly regular accounts. furiously pendin 5254|157626|2657|6|34|57243.08|0.09|0.02|R|F|1992-08-29|1992-10-16|1992-09-20|TAKE BACK RETURN|RAIL| furiously above the furiously 5254|19583|4586|7|9|13523.22|0.09|0.03|R|F|1992-07-29|1992-10-15|1992-08-20|TAKE BACK RETURN|REG AIR| wake blithely fluff 5255|130706|8246|1|2|3473.40|0.04|0.08|N|O|1996-09-27|1996-10-04|1996-10-04|DELIVER IN PERSON|RAIL|ajole blithely fluf 5255|171139|8691|2|30|36303.90|0.04|0.08|N|O|1996-09-20|1996-08-18|1996-10-09|TAKE BACK RETURN|AIR| to the silent requests cajole b 5255|129900|7437|3|41|79125.90|0.09|0.03|N|O|1996-08-21|1996-09-24|1996-09-05|COLLECT COD|FOB|tect blithely against t 5280|96078|8588|1|16|17185.12|0.02|0.03|N|O|1998-03-29|1998-01-28|1998-04-03|TAKE BACK RETURN|SHIP| foxes are furiously. theodoli 5280|175550|585|2|46|74775.30|0.01|0.06|N|O|1998-01-04|1998-01-21|1998-02-03|TAKE BACK RETURN|FOB|efully carefully pen 5281|113555|8578|1|37|58036.35|0.05|0.02|N|O|1995-11-10|1996-01-31|1995-11-22|DELIVER IN PERSON|MAIL|ronic dependencies. fluffily final p 5281|104572|2103|2|38|59909.66|0.00|0.05|N|O|1996-02-17|1995-12-19|1996-02-29|NONE|RAIL|n asymptotes could wake about th 5281|126120|8633|3|23|26360.76|0.08|0.00|N|O|1995-12-30|1996-01-26|1996-01-23|COLLECT COD|REG AIR|. final theodolites cajole. ironic p 5281|86431|1448|4|48|68036.64|0.03|0.05|N|O|1996-01-31|1995-12-23|1996-02-08|TAKE BACK RETURN|REG AIR|ss the furiously 5281|42872|5377|5|33|59890.71|0.01|0.07|N|O|1996-03-01|1995-12-28|1996-03-05|COLLECT COD|RAIL|ly brave foxes. bold deposits above the 5282|117371|9883|1|36|49981.32|0.05|0.02|N|O|1998-05-20|1998-04-10|1998-06-14|DELIVER IN PERSON|TRUCK|re slyly accor 5282|51792|4298|2|32|55801.28|0.02|0.05|N|O|1998-03-01|1998-03-31|1998-03-03|NONE|FOB|onic deposits; furiou 5282|57236|4752|3|28|33410.44|0.03|0.06|N|O|1998-05-06|1998-04-24|1998-05-30|COLLECT COD|SHIP|fily final instruc 5283|4165|1666|1|20|21383.20|0.05|0.02|A|F|1994-09-16|1994-08-03|1994-10-15|TAKE BACK RETURN|TRUCK|al deposits? blithely even pinto beans 5283|185925|8444|2|1|2010.92|0.10|0.08|R|F|1994-06-20|1994-08-03|1994-07-01|COLLECT COD|FOB|deposits within the furio 5284|172419|9971|1|16|23862.56|0.04|0.02|N|O|1995-08-17|1995-08-23|1995-08-26|DELIVER IN PERSON|TRUCK|unts detect furiously even d 5284|43086|8095|2|24|24697.92|0.03|0.08|N|O|1995-10-21|1995-08-23|1995-10-27|COLLECT COD|AIR| haggle according 5285|192958|5478|1|31|63579.45|0.08|0.00|A|F|1994-04-17|1994-04-05|1994-05-09|NONE|RAIL|ubt. quickly blithe 5285|30061|5068|2|37|36669.22|0.09|0.02|R|F|1994-02-26|1994-02-18|1994-03-27|NONE|SHIP|uffily regu 5285|33846|1356|3|24|42716.16|0.02|0.04|A|F|1994-04-19|1994-04-03|1994-04-25|DELIVER IN PERSON|FOB|ess packages. quick, even deposits snooze b 5285|42130|9643|4|12|12865.56|0.05|0.06|A|F|1994-04-22|1994-04-07|1994-05-19|NONE|AIR| deposits-- quickly bold requests hag 5285|70694|695|5|1|1664.69|0.03|0.05|R|F|1994-03-14|1994-02-20|1994-04-10|COLLECT COD|TRUCK|e fluffily about the slyly special pa 5285|145672|5673|6|1|1717.67|0.06|0.01|R|F|1994-02-08|1994-04-02|1994-02-17|COLLECT COD|SHIP|ing deposits integra 5286|198566|1086|1|1|1664.56|0.01|0.07|N|O|1997-11-25|1997-11-07|1997-12-17|COLLECT COD|REG AIR|ly! furiously final pack 5286|96223|3751|2|7|8534.54|0.06|0.05|N|O|1997-10-23|1997-12-10|1997-11-20|TAKE BACK RETURN|RAIL|y express instructions sleep carefull 5286|15473|7975|3|3|4165.41|0.06|0.08|N|O|1997-12-04|1997-11-06|1997-12-09|COLLECT COD|MAIL|re fluffily 5286|39019|1523|4|6|5748.06|0.04|0.03|N|O|1997-10-15|1997-12-05|1997-11-12|COLLECT COD|RAIL|y special a 5286|185301|5302|5|38|52679.40|0.07|0.05|N|O|1997-11-29|1997-11-26|1997-12-15|TAKE BACK RETURN|SHIP|fluffily. special, ironic deposit 5286|137943|7944|6|24|47542.56|0.08|0.00|N|O|1997-09-27|1997-12-21|1997-09-30|COLLECT COD|TRUCK|s. express foxes of the 5287|38269|3276|1|32|38632.32|0.01|0.01|A|F|1994-01-29|1994-01-27|1994-02-08|NONE|RAIL|heodolites haggle caref 5312|60844|8363|1|27|48730.68|0.04|0.08|A|F|1995-04-20|1995-04-09|1995-04-25|COLLECT COD|SHIP|tructions cajol 5312|1004|3505|2|43|38915.00|0.05|0.08|A|F|1995-03-24|1995-05-07|1995-03-28|NONE|TRUCK|ly unusual 5313|16090|8592|1|34|34207.06|0.10|0.02|N|O|1997-08-07|1997-08-12|1997-08-24|DELIVER IN PERSON|FOB|ccording to the blithely final account 5313|12109|7112|2|17|17358.70|0.00|0.02|N|O|1997-09-02|1997-08-20|1997-09-07|NONE|SHIP|uests wake 5313|111704|6727|3|47|80637.90|0.06|0.08|N|O|1997-08-12|1997-08-18|1997-08-13|TAKE BACK RETURN|RAIL|pinto beans across the 5313|196554|4112|4|16|26408.80|0.08|0.00|N|O|1997-10-04|1997-08-02|1997-10-25|COLLECT COD|REG AIR|ckages wake carefully aga 5313|71942|6957|5|30|57418.20|0.06|0.08|N|O|1997-06-27|1997-07-18|1997-06-30|NONE|SHIP|nding packages use 5313|119930|4953|6|21|40948.53|0.05|0.05|N|O|1997-09-26|1997-09-02|1997-10-18|COLLECT COD|FOB|he blithely regular packages. quickly 5314|117209|7210|1|10|12262.00|0.07|0.05|N|O|1995-09-26|1995-07-24|1995-10-19|DELIVER IN PERSON|RAIL|latelets haggle final 5314|124781|4782|2|16|28892.48|0.00|0.04|N|O|1995-09-25|1995-07-08|1995-10-17|COLLECT COD|SHIP|hely unusual packages acc 5315|34544|2054|1|12|17742.48|0.08|0.06|R|F|1992-12-18|1993-01-16|1993-01-10|NONE|AIR|ccounts. furiously ironi 5315|178618|8619|2|39|66167.79|0.00|0.06|R|F|1992-11-09|1992-12-29|1992-12-07|NONE|SHIP|ly alongside of the ca 5316|107714|225|1|29|49929.59|0.10|0.05|R|F|1994-03-28|1994-04-29|1994-04-09|DELIVER IN PERSON|REG AIR|ckly unusual foxes bo 5316|135125|5126|2|31|35963.72|0.00|0.08|A|F|1994-04-01|1994-04-21|1994-04-12|DELIVER IN PERSON|MAIL|s. deposits cajole around t 5317|81001|8526|1|29|28478.00|0.02|0.06|A|F|1994-11-28|1994-11-27|1994-12-16|COLLECT COD|FOB|oss the carefull 5317|170294|295|2|18|24557.22|0.06|0.06|A|F|1995-01-02|1994-10-29|1995-01-16|NONE|RAIL|g to the blithely p 5317|119181|1693|3|37|44406.66|0.09|0.00|R|F|1994-09-15|1994-10-24|1994-09-23|NONE|TRUCK|totes nag theodolites. pend 5317|66431|8938|4|50|69871.50|0.09|0.01|A|F|1994-10-17|1994-10-25|1994-11-03|NONE|REG AIR|cajole furiously. accounts use quick 5317|94765|9784|5|19|33435.44|0.07|0.07|R|F|1994-12-15|1994-10-18|1994-12-27|NONE|MAIL|onic requests boost bli 5317|114725|7237|6|48|83506.56|0.01|0.03|A|F|1994-09-19|1994-11-25|1994-10-03|COLLECT COD|MAIL|ts about the packages cajole furio 5317|168493|6042|7|30|46844.70|0.07|0.07|A|F|1994-10-13|1994-10-31|1994-10-28|NONE|AIR|cross the attainments. slyly 5318|60046|7565|1|13|13078.52|0.10|0.04|R|F|1993-07-15|1993-06-25|1993-08-13|COLLECT COD|REG AIR|ly silent ideas. ideas haggle among the 5318|179521|9522|2|26|41613.52|0.00|0.04|R|F|1993-07-07|1993-05-23|1993-07-28|COLLECT COD|SHIP|al, express foxes. bold requests sleep alwa 5318|6173|8674|3|37|39929.29|0.07|0.05|A|F|1993-07-09|1993-06-22|1993-07-21|COLLECT COD|SHIP|ickly final deposi 5318|141228|6257|4|31|39345.82|0.01|0.04|R|F|1993-07-28|1993-05-06|1993-08-06|DELIVER IN PERSON|REG AIR|requests must sleep slyly quickly 5319|149703|7246|1|31|54333.70|0.04|0.07|N|O|1996-03-26|1996-03-07|1996-04-24|COLLECT COD|TRUCK|d carefully about the courts. fluffily spe 5319|43282|795|2|39|47785.92|0.09|0.05|N|O|1996-05-17|1996-03-14|1996-06-11|NONE|TRUCK|unts. furiously silent 5344|18176|678|1|6|6565.02|0.07|0.01|N|O|1998-08-04|1998-09-03|1998-08-11|TAKE BACK RETURN|REG AIR|ithely about the pending plate 5344|78150|658|2|37|41741.55|0.03|0.07|N|O|1998-10-09|1998-07-26|1998-11-08|NONE|TRUCK|thely express packages 5344|66249|6250|3|26|31596.24|0.02|0.06|N|O|1998-08-27|1998-08-22|1998-09-24|NONE|AIR|furiously pending, silent multipliers. 5344|38164|3171|4|21|23145.36|0.03|0.01|N|O|1998-08-31|1998-09-06|1998-09-02|NONE|MAIL|xes. furiously even pinto beans sleep f 5345|82522|5031|1|3|4513.56|0.05|0.01|N|O|1997-12-10|1997-10-03|1998-01-05|COLLECT COD|SHIP|ites wake carefully unusual 5345|145618|3161|2|2|3327.22|0.10|0.02|N|O|1997-11-18|1997-10-12|1997-12-08|NONE|MAIL|ut the slyly specia 5345|191094|6133|3|46|54514.14|0.06|0.04|N|O|1997-10-06|1997-09-27|1997-10-18|COLLECT COD|REG AIR|slyly special deposits. fin 5345|113634|1168|4|37|60962.31|0.01|0.01|N|O|1997-11-01|1997-10-09|1997-11-26|DELIVER IN PERSON|AIR| along the ironically fina 5345|33600|6104|5|22|33739.20|0.02|0.02|N|O|1997-08-27|1997-11-22|1997-09-10|TAKE BACK RETURN|MAIL|leep slyly regular fox 5346|148105|5648|1|21|24215.10|0.07|0.08|R|F|1994-03-11|1994-03-07|1994-04-04|DELIVER IN PERSON|RAIL|integrate blithely a 5346|191665|6704|2|13|22836.58|0.04|0.04|A|F|1994-02-03|1994-02-05|1994-02-09|COLLECT COD|TRUCK|y. fluffily bold accounts grow. furio 5346|108881|1392|3|7|13229.16|0.08|0.05|A|F|1994-01-30|1994-03-26|1994-01-31|DELIVER IN PERSON|SHIP|equests use carefully care 5346|161238|1239|4|35|45473.05|0.06|0.02|A|F|1994-02-09|1994-03-01|1994-02-14|TAKE BACK RETURN|FOB|nic excuses cajole entic 5346|120595|596|5|25|40389.75|0.05|0.06|R|F|1993-12-28|1994-03-19|1994-01-09|TAKE BACK RETURN|REG AIR|he ironic ideas are boldly slyly ironi 5346|32956|466|6|6|11333.70|0.08|0.04|R|F|1994-03-01|1994-02-04|1994-03-09|NONE|REG AIR|escapades sleep furiously beside the 5346|79234|4249|7|41|49742.43|0.05|0.04|R|F|1994-01-10|1994-02-15|1994-01-26|TAKE BACK RETURN|REG AIR|fully close instructi 5347|82283|7300|1|48|60733.44|0.04|0.08|A|F|1995-02-25|1995-04-26|1995-03-26|NONE|SHIP|equests are slyly. blithely regu 5347|123439|8464|2|47|68734.21|0.02|0.01|N|F|1995-06-05|1995-03-29|1995-06-28|COLLECT COD|AIR|across the slyly bol 5347|22862|7867|3|34|60685.24|0.06|0.00|A|F|1995-05-18|1995-04-04|1995-06-02|DELIVER IN PERSON|SHIP| pending deposits. fluffily regular senti 5347|39456|9457|4|4|5581.80|0.06|0.03|A|F|1995-03-24|1995-04-03|1995-04-01|NONE|SHIP|ldly pending asymptotes ki 5347|130711|5738|5|21|36575.91|0.08|0.04|R|F|1995-04-01|1995-04-16|1995-04-23|NONE|SHIP|sly slyly final requests. careful 5347|55468|479|6|6|8540.76|0.06|0.02|A|F|1995-04-11|1995-04-14|1995-05-02|NONE|TRUCK|lly unusual ideas. sl 5347|49567|2072|7|18|27298.08|0.01|0.01|N|F|1995-05-24|1995-05-07|1995-06-19|NONE|FOB|he ideas among the requests 5348|68446|5965|1|21|29703.24|0.10|0.04|N|O|1997-12-11|1997-12-24|1997-12-28|NONE|REG AIR| regular theodolites haggle car 5348|155865|896|2|31|59546.66|0.07|0.02|N|O|1998-01-04|1997-12-09|1998-01-17|COLLECT COD|RAIL|are finally 5348|16638|6639|3|16|24874.08|0.06|0.08|N|O|1998-02-28|1997-12-25|1998-03-12|DELIVER IN PERSON|AIR|uriously thin pinto beans 5348|19183|1685|4|7|7715.26|0.04|0.00|N|O|1998-01-29|1997-12-20|1998-02-10|DELIVER IN PERSON|RAIL|even foxes. epitap 5348|1818|4319|5|37|63632.97|0.06|0.07|N|O|1997-12-01|1998-02-02|1997-12-07|NONE|FOB|y according to the carefully pending acco 5348|142875|5390|6|14|26850.18|0.06|0.05|N|O|1997-12-16|1998-01-12|1997-12-24|COLLECT COD|FOB|en pinto beans. somas cajo 5349|155065|5066|1|19|21281.14|0.06|0.01|N|O|1996-09-11|1996-11-18|1996-09-22|TAKE BACK RETURN|FOB|endencies use whithout the special 5349|167110|4659|2|14|16479.54|0.06|0.00|N|O|1996-11-07|1996-11-17|1996-11-20|TAKE BACK RETURN|TRUCK|fully regular 5349|3482|3483|3|6|8312.88|0.10|0.01|N|O|1996-12-30|1996-10-08|1997-01-01|DELIVER IN PERSON|MAIL|inal deposits affix carefully 5350|121045|1046|1|19|20254.76|0.02|0.06|R|F|1993-10-20|1993-11-15|1993-11-17|DELIVER IN PERSON|RAIL|romise slyly alongsi 5350|190829|5868|2|44|84472.08|0.04|0.06|R|F|1993-10-30|1993-11-23|1993-11-25|DELIVER IN PERSON|AIR|p above the ironic, pending dep 5350|53095|8106|3|12|12577.08|0.10|0.04|A|F|1994-01-30|1993-11-21|1994-02-15|COLLECT COD|REG AIR| cajole. even instructions haggle. blithe 5350|154932|9963|4|7|13908.51|0.08|0.00|R|F|1993-10-19|1993-12-28|1993-11-04|NONE|SHIP|alongside of th 5350|128746|8747|5|27|47917.98|0.07|0.04|A|F|1993-11-25|1993-12-27|1993-12-08|COLLECT COD|TRUCK|es. blithe theodolites haggl 5351|6095|1096|1|36|36039.24|0.06|0.05|N|O|1998-07-27|1998-07-06|1998-08-25|NONE|MAIL|ss the ironic, regular asymptotes cajole 5351|32041|4545|2|47|45732.88|0.04|0.01|N|O|1998-05-30|1998-08-08|1998-06-23|DELIVER IN PERSON|REG AIR|s. grouches cajole. sile 5351|105327|2858|3|2|2664.64|0.00|0.02|N|O|1998-05-12|1998-07-15|1998-05-24|NONE|TRUCK|g accounts wake furiously slyly even dolph 5376|60778|8297|1|42|73028.34|0.10|0.04|A|F|1994-09-20|1994-08-30|1994-09-29|TAKE BACK RETURN|REG AIR|y even asymptotes. courts are unusual pa 5376|90990|6009|2|44|87163.56|0.05|0.02|R|F|1994-08-30|1994-08-05|1994-09-07|COLLECT COD|AIR|ithe packages detect final theodolites. f 5376|64871|4872|3|18|33045.66|0.02|0.08|A|F|1994-10-29|1994-09-13|1994-11-01|COLLECT COD|MAIL| accounts boo 5377|78172|3187|1|40|46006.80|0.00|0.04|N|O|1997-05-21|1997-06-15|1997-05-26|DELIVER IN PERSON|AIR|lithely ironic theodolites are care 5377|29316|6823|2|17|21170.27|0.09|0.00|N|O|1997-07-05|1997-05-25|1997-07-22|COLLECT COD|RAIL|dencies. carefully regular re 5377|102464|7485|3|23|33728.58|0.07|0.08|N|O|1997-06-26|1997-07-13|1997-07-08|COLLECT COD|RAIL| silent wa 5377|103123|5634|4|12|13513.44|0.05|0.07|N|O|1997-05-08|1997-06-15|1997-05-15|DELIVER IN PERSON|MAIL| ironic, final 5377|172292|4810|5|27|36835.83|0.08|0.02|N|O|1997-07-11|1997-06-12|1997-08-08|TAKE BACK RETURN|MAIL|press theodolites. e 5378|154773|7289|1|39|71283.03|0.07|0.04|R|F|1992-11-25|1992-12-22|1992-12-02|COLLECT COD|AIR|ts are quickly around the 5378|61083|6096|2|46|48027.68|0.01|0.04|A|F|1993-02-17|1993-01-20|1993-02-26|COLLECT COD|REG AIR|into beans sleep. fu 5378|9760|7261|3|18|30055.68|0.02|0.03|R|F|1992-11-25|1992-12-21|1992-12-10|COLLECT COD|FOB|onic accounts was bold, 5379|198787|1307|1|40|75431.20|0.01|0.08|N|O|1995-10-01|1995-10-19|1995-10-30|COLLECT COD|MAIL|carefully final accounts haggle blithely. 5380|181291|1292|1|14|19212.06|0.10|0.01|N|O|1997-12-18|1997-12-03|1998-01-06|NONE|RAIL|final platelets. 5380|146343|3886|2|10|13893.40|0.09|0.05|N|O|1997-11-24|1998-01-10|1997-12-21|COLLECT COD|AIR|refully pending deposits. special, even t 5380|183714|6233|3|40|71908.40|0.02|0.08|N|O|1997-12-30|1997-11-27|1998-01-09|DELIVER IN PERSON|SHIP|ar asymptotes. blithely r 5380|65789|802|4|6|10528.68|0.09|0.05|N|O|1997-11-15|1998-01-08|1997-12-11|COLLECT COD|MAIL|es. fluffily brave accounts across t 5380|106753|6754|5|48|84468.00|0.04|0.03|N|O|1997-12-01|1997-12-28|1997-12-05|DELIVER IN PERSON|FOB|encies haggle car 5381|187310|9829|1|37|51700.47|0.04|0.01|A|F|1993-04-08|1993-04-07|1993-04-12|DELIVER IN PERSON|SHIP|ly final deposits print carefully. unusua 5381|110002|5025|2|48|48576.00|0.04|0.03|R|F|1993-04-22|1993-04-17|1993-05-14|TAKE BACK RETURN|FOB|luffily spec 5381|191097|1098|3|13|15445.17|0.08|0.03|R|F|1993-05-09|1993-04-26|1993-05-25|NONE|FOB|s after the f 5381|167208|4757|4|17|21678.40|0.05|0.05|R|F|1993-05-25|1993-04-14|1993-06-17|NONE|MAIL|ckly final requests haggle qui 5381|62375|9894|5|49|65531.13|0.06|0.02|R|F|1993-05-08|1993-04-07|1993-06-03|NONE|FOB| accounts. regular, regula 5381|131056|6083|6|33|35872.65|0.10|0.00|A|F|1993-04-09|1993-04-03|1993-04-22|DELIVER IN PERSON|SHIP|ly special deposits 5381|43349|862|7|31|40062.54|0.04|0.05|A|F|1993-04-10|1993-03-22|1993-04-13|TAKE BACK RETURN|MAIL|the carefully expre 5382|152952|7983|1|34|68168.30|0.03|0.03|R|F|1992-02-22|1992-02-18|1992-03-02|DELIVER IN PERSON|FOB|gular accounts. even accounts integrate 5382|54879|7385|2|13|23840.31|0.09|0.06|A|F|1992-01-16|1992-03-12|1992-02-06|NONE|MAIL|eodolites. final foxes 5382|148097|8098|3|3|3435.27|0.10|0.06|A|F|1992-03-22|1992-03-06|1992-04-19|TAKE BACK RETURN|AIR|efully unusua 5382|61771|6784|4|20|34655.40|0.08|0.02|A|F|1992-03-26|1992-02-17|1992-04-15|DELIVER IN PERSON|FOB|carefully regular accounts. slyly ev 5382|176212|6213|5|14|18034.94|0.02|0.02|A|F|1992-04-05|1992-04-05|1992-05-04|TAKE BACK RETURN|FOB| brave platelets. ev 5382|179706|4741|6|6|10714.20|0.02|0.01|A|F|1992-03-07|1992-04-02|1992-03-18|TAKE BACK RETURN|FOB|y final foxes by the sl 5382|104356|1887|7|48|65296.80|0.05|0.05|A|F|1992-02-14|1992-03-19|1992-02-25|DELIVER IN PERSON|REG AIR|nts integrate quickly ca 5383|95116|5117|1|12|13333.32|0.04|0.00|N|O|1995-07-02|1995-08-16|1995-08-01|TAKE BACK RETURN|AIR|y regular instructi 5408|101941|6962|1|2|3885.88|0.07|0.04|R|F|1992-08-21|1992-10-03|1992-08-28|DELIVER IN PERSON|MAIL|cross the dolphins h 5408|117718|230|2|35|60749.85|0.04|0.05|R|F|1992-10-02|1992-10-17|1992-10-13|TAKE BACK RETURN|AIR|thely ironic requests alongside of the sl 5408|75127|7635|3|34|37472.08|0.10|0.02|A|F|1992-10-22|1992-08-25|1992-11-16|DELIVER IN PERSON|TRUCK|requests detect blithely a 5408|53108|5614|4|48|50932.80|0.04|0.05|R|F|1992-09-30|1992-08-27|1992-10-27|NONE|TRUCK|. furiously regular 5408|182529|7566|5|8|12892.16|0.03|0.07|A|F|1992-10-24|1992-09-06|1992-11-03|NONE|AIR|thely regular hocke 5409|193319|877|1|27|38132.37|0.01|0.02|A|F|1992-02-14|1992-03-18|1992-02-23|DELIVER IN PERSON|AIR|eodolites 5409|103573|3574|2|38|59909.66|0.01|0.02|A|F|1992-03-17|1992-03-29|1992-04-13|NONE|REG AIR|onic, regular accounts! blithely even 5409|140347|7890|3|17|23584.78|0.07|0.00|A|F|1992-01-13|1992-04-05|1992-01-20|DELIVER IN PERSON|AIR|cross the sil 5409|403|7904|4|9|11730.60|0.07|0.03|A|F|1992-02-15|1992-04-02|1992-02-28|DELIVER IN PERSON|AIR| unusual, unusual reques 5409|158141|8142|5|37|44368.18|0.06|0.04|R|F|1992-05-07|1992-02-10|1992-05-20|DELIVER IN PERSON|FOB|ously regular packages. packages 5409|63902|6409|6|14|26122.60|0.03|0.08|R|F|1992-02-14|1992-03-26|1992-02-29|DELIVER IN PERSON|AIR|osits cajole furiously 5410|116768|6769|1|48|85668.48|0.04|0.08|N|O|1998-09-27|1998-09-11|1998-10-01|TAKE BACK RETURN|AIR| about the slyly even courts. quickly regul 5410|104411|6922|2|41|58031.81|0.01|0.07|N|O|1998-08-25|1998-10-20|1998-09-01|DELIVER IN PERSON|REG AIR|sly. slyly ironic theodolites 5410|28027|530|3|40|38200.80|0.07|0.08|N|O|1998-11-17|1998-10-02|1998-11-27|COLLECT COD|TRUCK|iously special accounts are along th 5410|49503|2008|4|8|11620.00|0.05|0.04|N|O|1998-09-12|1998-10-22|1998-09-22|DELIVER IN PERSON|TRUCK|ly. fluffily ironic platelets alon 5411|95432|451|1|17|24266.31|0.05|0.01|N|O|1997-07-22|1997-07-14|1997-07-30|TAKE BACK RETURN|REG AIR| slyly slyly even deposits. carefully b 5411|112029|4541|2|10|10410.20|0.08|0.01|N|O|1997-07-19|1997-08-04|1997-07-26|TAKE BACK RETURN|MAIL|nding, special foxes unw 5411|55057|5058|3|5|5060.25|0.10|0.01|N|O|1997-09-12|1997-08-03|1997-09-23|DELIVER IN PERSON|FOB| bold, ironic theodo 5411|128590|3615|4|15|24278.85|0.08|0.05|N|O|1997-07-01|1997-07-15|1997-07-07|COLLECT COD|RAIL|attainments sleep slyly ironic 5411|3016|3017|5|19|17461.19|0.05|0.08|N|O|1997-05-25|1997-07-30|1997-06-19|COLLECT COD|RAIL|ial accounts according to the f 5412|53078|8089|1|2|2062.14|0.03|0.07|N|O|1998-04-14|1998-04-02|1998-04-19|TAKE BACK RETURN|REG AIR| sleep above the furiou 5412|65168|2687|2|48|54391.68|0.01|0.08|N|O|1998-02-22|1998-03-28|1998-03-18|TAKE BACK RETURN|TRUCK|s. slyly final packages cajole blithe 5412|73678|1200|3|31|51201.77|0.05|0.08|N|O|1998-03-23|1998-04-17|1998-04-10|NONE|SHIP|t the accounts detect slyly about the c 5412|96319|1338|4|26|34198.06|0.02|0.08|N|O|1998-01-22|1998-04-19|1998-02-17|NONE|AIR| the blithel 5413|125457|5458|1|48|71157.60|0.02|0.08|N|O|1998-01-25|1997-11-20|1998-02-22|COLLECT COD|SHIP| theodolites. furiously ironic instr 5413|141805|4320|2|37|68331.60|0.02|0.07|N|O|1997-12-08|1998-01-01|1997-12-13|COLLECT COD|TRUCK|usly bold instructions affix idly unusual, 5413|110854|5877|3|36|67134.60|0.02|0.07|N|O|1997-12-12|1997-11-28|1997-12-25|NONE|TRUCK|ular, regular ideas mold! final requests 5413|109216|1727|4|22|26954.62|0.02|0.08|N|O|1997-11-10|1997-11-24|1997-11-22|DELIVER IN PERSON|FOB|posits. quick 5413|188636|6191|5|5|8623.15|0.10|0.01|N|O|1997-11-28|1997-11-24|1997-12-05|NONE|RAIL|tes are al 5413|189019|9020|6|32|35456.32|0.02|0.03|N|O|1997-10-28|1998-01-03|1997-11-10|NONE|TRUCK|refully special package 5413|30016|2520|7|32|30272.32|0.06|0.07|N|O|1997-10-23|1997-12-09|1997-11-17|NONE|TRUCK|he quickly ironic ideas. slyly ironic ide 5414|67051|7052|1|40|40722.00|0.07|0.06|R|F|1993-04-07|1993-05-18|1993-04-23|COLLECT COD|AIR|ts are evenly across 5414|122106|4619|2|48|54148.80|0.06|0.07|R|F|1993-06-08|1993-05-14|1993-07-06|DELIVER IN PERSON|FOB| silent dolphins; fluffily regular tithe 5414|34768|2278|3|23|39163.48|0.10|0.00|A|F|1993-07-22|1993-05-26|1993-08-08|COLLECT COD|MAIL|e bold, express dolphins. spec 5414|132077|2078|4|15|16636.05|0.06|0.08|R|F|1993-05-18|1993-06-09|1993-05-27|DELIVER IN PERSON|REG AIR|e slyly about the carefully regula 5414|8372|873|5|19|24327.03|0.01|0.05|R|F|1993-04-06|1993-05-12|1993-05-02|DELIVER IN PERSON|RAIL|ffily silent theodolites na 5414|97119|2138|6|28|31251.08|0.10|0.05|A|F|1993-03-27|1993-06-04|1993-04-07|TAKE BACK RETURN|SHIP|ts sleep sl 5415|101746|4257|1|44|76900.56|0.00|0.06|A|F|1992-08-19|1992-10-26|1992-09-17|TAKE BACK RETURN|TRUCK| requests. unusual theodolites sleep agains 5415|30220|7730|2|16|18403.52|0.08|0.00|A|F|1992-09-29|1992-09-12|1992-10-10|NONE|AIR|pinto beans haggle furiously 5415|101066|6087|3|6|6402.36|0.10|0.03|A|F|1992-10-28|1992-09-09|1992-11-20|COLLECT COD|RAIL|ges around the fur 5415|15099|5100|4|43|43605.87|0.01|0.02|R|F|1992-11-17|1992-09-14|1992-12-14|DELIVER IN PERSON|SHIP|yly blithely stealthy deposits. carefu 5415|160014|7563|5|11|11814.11|0.00|0.01|R|F|1992-11-22|1992-10-19|1992-12-10|DELIVER IN PERSON|SHIP|gle among t 5415|143712|6227|6|46|80762.66|0.03|0.03|R|F|1992-08-25|1992-09-10|1992-09-22|DELIVER IN PERSON|REG AIR|ve the fluffily 5415|152967|2968|7|11|22219.56|0.08|0.06|A|F|1992-08-21|1992-09-04|1992-08-23|NONE|TRUCK|unts maintain carefully unusual 5440|114100|9123|1|3|3342.30|0.02|0.08|N|O|1997-02-18|1997-02-28|1997-03-15|NONE|SHIP|y. accounts haggle along the blit 5441|163452|8485|1|3|4546.35|0.00|0.02|R|F|1994-08-12|1994-10-14|1994-09-01|TAKE BACK RETURN|REG AIR|are. unusual, 5441|130764|765|2|49|87943.24|0.02|0.03|A|F|1994-09-23|1994-09-22|1994-10-22|NONE|FOB|ording to the furio 5441|143016|559|3|33|34947.33|0.09|0.02|R|F|1994-10-09|1994-10-06|1994-10-30|DELIVER IN PERSON|TRUCK|ges. final instruction 5441|66124|1137|4|47|51235.64|0.07|0.08|R|F|1994-11-19|1994-10-16|1994-12-16|TAKE BACK RETURN|FOB|ounts wake slyly about the express instr 5442|41917|6926|1|16|29742.56|0.00|0.00|N|O|1998-04-12|1998-03-03|1998-05-04|TAKE BACK RETURN|RAIL|r packages. accounts haggle dependencies. f 5442|87111|4636|2|45|49414.95|0.08|0.01|N|O|1998-03-30|1998-02-24|1998-04-18|TAKE BACK RETURN|AIR|old slyly after 5442|60318|5331|3|12|15339.72|0.01|0.08|N|O|1998-04-15|1998-03-18|1998-05-05|DELIVER IN PERSON|TRUCK|fully final 5442|157234|7235|4|21|27115.83|0.07|0.06|N|O|1998-03-13|1998-02-19|1998-04-06|COLLECT COD|MAIL|ffily furiously ironic theodolites. furio 5442|15990|5991|5|25|47649.75|0.04|0.00|N|O|1998-03-29|1998-02-13|1998-04-13|TAKE BACK RETURN|REG AIR|ake furiously. slyly express th 5442|143507|1050|6|26|40313.00|0.08|0.07|N|O|1998-03-21|1998-03-21|1998-03-25|TAKE BACK RETURN|AIR|have to sleep furiously bold ideas. blith 5443|177288|7289|1|14|19113.92|0.02|0.00|N|O|1996-10-27|1996-11-11|1996-11-21|DELIVER IN PERSON|RAIL|s after the regular, regular deposits hag 5443|71326|1327|2|39|50595.48|0.03|0.07|N|O|1996-11-01|1996-11-30|1996-11-19|NONE|RAIL|gage carefully across the furiously 5443|159719|4750|3|25|44467.75|0.05|0.00|N|O|1996-12-07|1997-01-08|1997-01-05|NONE|FOB|use carefully above the pinto bea 5443|190188|5227|4|6|7669.08|0.05|0.02|N|O|1996-11-17|1996-12-03|1996-11-30|TAKE BACK RETURN|AIR|p fluffily foxe 5443|82716|5225|5|40|67948.40|0.03|0.03|N|O|1997-01-28|1996-12-10|1997-02-13|NONE|FOB|n courts. special re 5444|185509|3064|1|21|33484.50|0.01|0.07|A|F|1995-04-11|1995-04-25|1995-04-21|DELIVER IN PERSON|RAIL|ar packages haggle above th 5444|42182|7191|2|40|44967.20|0.05|0.08|N|O|1995-07-09|1995-04-25|1995-07-19|COLLECT COD|TRUCK|ously bold ideas. instructions wake slyl 5444|149858|7401|3|40|76314.00|0.08|0.01|A|F|1995-04-06|1995-05-08|1995-05-06|DELIVER IN PERSON|AIR| even packages. 5444|58950|3961|4|33|62995.35|0.05|0.04|N|O|1995-06-24|1995-04-24|1995-07-13|DELIVER IN PERSON|SHIP|ut the courts cajole blithely excuses 5444|170692|8244|5|21|37016.49|0.04|0.00|R|F|1995-05-05|1995-05-25|1995-05-29|TAKE BACK RETURN|REG AIR|aves serve sly 5444|19026|4029|6|21|19845.42|0.07|0.01|A|F|1995-03-30|1995-05-01|1995-03-31|COLLECT COD|AIR|furiously even theodolites. 5445|89126|4143|1|33|36798.96|0.08|0.07|A|F|1993-10-21|1993-10-14|1993-10-28|DELIVER IN PERSON|REG AIR|ests. final instructions 5445|130119|120|2|12|13789.32|0.09|0.08|R|F|1993-11-02|1993-09-05|1993-11-26|COLLECT COD|FOB| slyly pending pinto beans was slyly al 5445|102859|7880|3|46|85645.10|0.04|0.07|A|F|1993-10-06|1993-09-15|1993-10-28|DELIVER IN PERSON|RAIL|old depend 5445|148677|8678|4|10|17256.70|0.08|0.06|A|F|1993-09-16|1993-10-05|1993-10-01|NONE|TRUCK|ncies abou 5445|12496|7499|5|14|19718.86|0.00|0.02|R|F|1993-11-19|1993-10-18|1993-12-07|NONE|RAIL| requests. bravely i 5446|189366|4403|1|27|39294.72|0.05|0.07|R|F|1994-07-21|1994-08-25|1994-08-17|TAKE BACK RETURN|RAIL|ously across the quic 5447|98536|1046|1|31|47570.43|0.09|0.03|N|O|1996-07-14|1996-05-07|1996-07-17|COLLECT COD|SHIP| foxes sleep. blithely unusual accounts det 5472|58956|8957|1|27|51703.65|0.09|0.06|A|F|1993-08-04|1993-07-07|1993-09-03|COLLECT COD|TRUCK|fily pending attainments. unus 5472|67697|5216|2|28|46611.32|0.00|0.03|A|F|1993-07-28|1993-05-28|1993-08-11|TAKE BACK RETURN|FOB|ffily pendin 5472|177295|2330|3|45|61753.05|0.06|0.02|R|F|1993-06-05|1993-05-14|1993-06-10|NONE|TRUCK| idle packages. furi 5472|183690|8727|4|37|65626.53|0.07|0.05|R|F|1993-06-15|1993-07-03|1993-07-09|DELIVER IN PERSON|RAIL|egrate carefully dependencies. 5472|74051|4052|5|40|41002.00|0.02|0.05|A|F|1993-04-13|1993-07-04|1993-05-04|NONE|REG AIR|e requests detect furiously. ruthlessly un 5472|166920|4469|6|39|77489.88|0.02|0.03|R|F|1993-04-18|1993-07-10|1993-05-12|TAKE BACK RETURN|MAIL|uriously carefully 5472|14390|1894|7|1|1304.39|0.03|0.02|A|F|1993-04-14|1993-06-28|1993-04-16|NONE|RAIL|s use furiou 5473|47878|383|1|9|16432.83|0.03|0.07|R|F|1992-06-03|1992-05-30|1992-06-09|TAKE BACK RETURN|AIR| excuses sleep blithely! regular dep 5473|69115|9116|2|27|29270.97|0.01|0.03|A|F|1992-04-06|1992-04-26|1992-04-29|TAKE BACK RETURN|MAIL|the deposits. warthogs wake fur 5473|14473|1977|3|33|45786.51|0.09|0.00|R|F|1992-05-18|1992-06-10|1992-06-13|TAKE BACK RETURN|MAIL|efully above the even, 5474|183763|3764|1|38|70176.88|0.01|0.08|A|F|1992-07-15|1992-07-16|1992-07-20|NONE|REG AIR| slyly beneath 5474|93631|1159|2|10|16246.30|0.06|0.00|R|F|1992-08-08|1992-08-10|1992-08-24|TAKE BACK RETURN|TRUCK|pinto bean 5474|47831|2840|3|31|55143.73|0.00|0.08|R|F|1992-08-02|1992-07-12|1992-08-04|NONE|TRUCK|the furiously express ideas. speci 5474|89506|4523|4|46|68793.00|0.03|0.04|A|F|1992-06-07|1992-07-11|1992-06-22|NONE|TRUCK|nstructions. furio 5475|182422|9977|1|10|15044.20|0.09|0.08|N|O|1996-07-19|1996-08-22|1996-07-23|COLLECT COD|AIR|ding to the deposits wake fina 5476|47660|2669|1|13|20899.58|0.01|0.04|N|O|1997-12-27|1997-12-08|1997-12-29|COLLECT COD|TRUCK|iously special ac 5476|19679|2181|2|17|27177.39|0.10|0.01|N|O|1998-02-02|1998-01-28|1998-02-14|COLLECT COD|FOB|ng dependencies until the f 5477|79222|6744|1|20|24024.40|0.03|0.01|N|O|1998-03-21|1998-02-09|1998-04-07|TAKE BACK RETURN|SHIP|platelets about the ironic 5477|76155|8663|2|21|23754.15|0.03|0.00|N|O|1998-01-28|1998-02-15|1998-02-24|TAKE BACK RETURN|SHIP|blate slyly. silent 5477|133790|3791|3|31|56537.49|0.04|0.01|N|O|1998-01-11|1998-01-30|1998-02-04|DELIVER IN PERSON|MAIL| special Tiresias cajole furiously. pending 5477|192129|7168|4|16|19537.92|0.00|0.01|N|O|1998-03-07|1998-03-12|1998-04-06|COLLECT COD|RAIL|regular, s 5477|95742|761|5|23|39968.02|0.00|0.06|N|O|1998-01-04|1998-02-23|1998-01-24|NONE|REG AIR|telets wake blithely ab 5477|120319|2832|6|19|25446.89|0.10|0.03|N|O|1998-02-03|1998-01-30|1998-03-04|TAKE BACK RETURN|MAIL|ost carefully packages. 5478|7283|7284|1|39|46420.92|0.09|0.06|N|O|1996-08-19|1996-06-25|1996-09-08|DELIVER IN PERSON|SHIP|s. furiously 5478|1079|3580|2|47|46063.29|0.10|0.01|N|O|1996-08-15|1996-07-12|1996-08-31|NONE|RAIL| instructions; slyly even accounts hagg 5478|118847|1359|3|25|46646.00|0.09|0.07|N|O|1996-06-08|1996-07-12|1996-07-07|NONE|TRUCK|unusual, pending requests haggle accoun 5479|137771|285|1|50|90438.50|0.02|0.02|A|F|1993-12-24|1994-02-14|1994-01-18|DELIVER IN PERSON|MAIL|ironic gifts. even dependencies sno 5479|103906|3907|2|19|36288.10|0.05|0.03|A|F|1994-01-22|1994-03-07|1994-02-11|DELIVER IN PERSON|SHIP|arefully bo 5504|67102|2115|1|4|4276.40|0.10|0.07|A|F|1993-04-30|1993-03-01|1993-05-22|DELIVER IN PERSON|AIR|into beans boost. 5504|176056|6057|2|7|7924.35|0.03|0.05|R|F|1993-04-25|1993-03-15|1993-05-06|NONE|TRUCK|packages detect furiously express reques 5504|159600|7146|3|29|48128.40|0.05|0.03|A|F|1993-01-28|1993-02-13|1993-02-27|NONE|SHIP|ajole carefully. care 5505|24501|2008|1|43|61296.50|0.07|0.01|N|O|1997-12-30|1997-11-28|1998-01-09|TAKE BACK RETURN|TRUCK|y alongside of the special requests. 5505|181434|8989|2|33|50009.19|0.05|0.08|N|O|1998-01-11|1997-11-11|1998-01-30|TAKE BACK RETURN|AIR|ithely unusual excuses integrat 5505|154641|9672|3|10|16956.40|0.06|0.01|N|O|1997-10-28|1997-11-27|1997-10-29|DELIVER IN PERSON|AIR| furiously special asym 5505|39422|4429|4|18|24505.56|0.04|0.04|N|O|1997-10-25|1997-12-12|1997-10-30|TAKE BACK RETURN|RAIL| to the quickly express pac 5505|161992|7025|5|46|94483.54|0.05|0.00|N|O|1998-01-06|1997-11-04|1998-02-04|TAKE BACK RETURN|SHIP|usly ironic dependencies haggle across 5506|139309|9310|1|2|2696.60|0.00|0.03|R|F|1994-02-04|1994-01-13|1994-02-17|COLLECT COD|MAIL|onic theodolites are fluffil 5506|159024|9025|2|6|6498.12|0.07|0.06|R|F|1994-02-21|1994-01-30|1994-02-27|DELIVER IN PERSON|MAIL|hely according to the furiously unusua 5507|9600|4601|1|23|34720.80|0.05|0.04|N|O|1998-09-04|1998-07-04|1998-09-18|TAKE BACK RETURN|AIR|ously slow packages poach whithout the 5507|137601|7602|2|48|78652.80|0.03|0.01|N|O|1998-08-03|1998-08-10|1998-08-24|DELIVER IN PERSON|AIR|yly idle deposits. final, final fox 5507|44676|7181|3|4|6482.68|0.04|0.06|N|O|1998-06-06|1998-07-02|1998-06-27|TAKE BACK RETURN|RAIL|into beans are 5507|66850|9357|4|22|39970.70|0.07|0.01|N|O|1998-07-08|1998-08-10|1998-07-22|DELIVER IN PERSON|TRUCK|gular ideas. carefully unu 5507|131412|1413|5|48|69283.68|0.06|0.01|N|O|1998-07-21|1998-07-15|1998-07-31|DELIVER IN PERSON|SHIP|uriously regular acc 5508|116947|4481|1|4|7855.76|0.10|0.04|N|O|1996-09-01|1996-08-02|1996-09-17|COLLECT COD|AIR|fluffily about the even 5509|196663|1702|1|3|5278.98|0.03|0.02|A|F|1994-06-14|1994-05-11|1994-06-17|NONE|SHIP| quickly fin 5509|98560|6088|2|17|26495.52|0.03|0.07|R|F|1994-07-01|1994-06-30|1994-07-31|COLLECT COD|AIR|ccounts wake ar 5509|92611|139|3|30|48108.30|0.04|0.04|A|F|1994-07-23|1994-06-01|1994-08-08|NONE|AIR|counts haggle pinto beans. furiously 5509|99849|4868|4|45|83197.80|0.00|0.07|A|F|1994-07-24|1994-05-28|1994-08-20|COLLECT COD|AIR|counts sleep. f 5509|155719|3265|5|35|62114.85|0.04|0.03|A|F|1994-04-17|1994-06-29|1994-04-24|COLLECT COD|RAIL|c accounts. ca 5510|15828|3332|1|8|13950.56|0.01|0.01|A|F|1993-03-16|1993-03-29|1993-03-24|DELIVER IN PERSON|FOB|n packages boost sly 5510|19300|6804|2|46|56087.80|0.02|0.07|A|F|1993-03-12|1993-02-09|1993-03-19|NONE|TRUCK|silent packages cajole doggedly regular 5510|161054|1055|3|47|52407.35|0.03|0.01|A|F|1993-01-20|1993-03-25|1993-02-15|DELIVER IN PERSON|SHIP|riously even requests. slyly bold accou 5510|23375|882|4|29|37652.73|0.09|0.08|A|F|1993-02-28|1993-03-28|1993-03-12|COLLECT COD|AIR|lithely fluffily ironic req 5511|164477|6994|1|16|24663.52|0.10|0.05|A|F|1995-02-02|1995-01-06|1995-02-19|TAKE BACK RETURN|RAIL|thely bold theodolites 5511|164130|1679|2|31|37018.03|0.09|0.01|A|F|1995-02-23|1995-01-21|1995-03-02|COLLECT COD|REG AIR|gular excuses. fluffily even pinto beans c 5511|127764|277|3|49|87796.24|0.05|0.05|R|F|1994-12-21|1995-01-27|1994-12-26|NONE|REG AIR|bout the requests. theodolites 5511|121474|3987|4|4|5981.88|0.08|0.02|R|F|1994-12-28|1995-01-16|1995-01-24|TAKE BACK RETURN|RAIL|lphins. carefully blithe de 5511|8948|1449|5|23|42709.62|0.10|0.07|A|F|1995-03-11|1995-01-21|1995-03-27|TAKE BACK RETURN|TRUCK|ing dugouts 5511|187419|2456|6|5|7532.05|0.08|0.05|R|F|1994-12-29|1995-01-16|1995-01-24|DELIVER IN PERSON|MAIL|al theodolites. blithely final de 5511|142696|239|7|23|39989.87|0.02|0.07|R|F|1995-02-03|1995-01-05|1995-02-18|COLLECT COD|REG AIR|ully deposits. warthogs hagg 5536|89717|7242|1|14|23893.94|0.08|0.06|N|O|1998-05-18|1998-05-08|1998-06-05|COLLECT COD|MAIL|instructions sleep 5536|61023|3530|2|20|19680.40|0.08|0.04|N|O|1998-05-08|1998-05-10|1998-05-31|DELIVER IN PERSON|REG AIR|equests mo 5536|196848|9368|3|35|68069.40|0.07|0.02|N|O|1998-05-19|1998-06-08|1998-06-05|NONE|MAIL|c, final theo 5536|8827|8828|4|30|52074.60|0.05|0.07|N|O|1998-04-15|1998-05-23|1998-05-03|NONE|FOB|arefully regular theodolites according 5536|140844|845|5|11|20733.24|0.02|0.08|N|O|1998-03-18|1998-05-12|1998-03-28|TAKE BACK RETURN|FOB| snooze furio 5537|44015|9024|1|10|9590.10|0.05|0.08|N|O|1997-01-13|1996-12-25|1997-01-28|TAKE BACK RETURN|AIR| sleep carefully slyly bold depos 5537|149459|7002|2|15|22626.75|0.07|0.04|N|O|1997-01-13|1996-12-25|1997-01-27|COLLECT COD|AIR|eposits. permanently pending packag 5537|150896|5927|3|39|75928.71|0.03|0.00|N|O|1996-12-17|1996-11-08|1997-01-15|COLLECT COD|REG AIR| slyly bold packages are. qu 5537|96020|3548|4|38|38608.76|0.01|0.00|N|O|1996-11-06|1996-11-23|1996-11-12|TAKE BACK RETURN|MAIL|s above the carefully ironic deposits 5538|153301|8332|1|42|56880.60|0.05|0.00|A|F|1994-04-08|1994-03-17|1994-05-05|DELIVER IN PERSON|REG AIR|vely ironic accounts. furiously unusual acc 5538|120083|84|2|4|4412.32|0.02|0.03|R|F|1994-03-21|1994-02-17|1994-04-11|TAKE BACK RETURN|REG AIR|ithely along the c 5538|18521|1023|3|38|54701.76|0.03|0.06|R|F|1994-03-17|1994-02-11|1994-04-10|TAKE BACK RETURN|FOB|ular pinto beans. silent ideas above 5538|77667|5189|4|9|14801.94|0.00|0.01|R|F|1993-12-26|1994-01-31|1994-01-03|TAKE BACK RETURN|REG AIR|encies across the blithely fina 5539|64404|1923|1|42|57472.80|0.10|0.08|A|F|1994-09-29|1994-09-17|1994-10-20|DELIVER IN PERSON|RAIL|ons across the carefully si 5540|180895|896|1|42|82987.38|0.02|0.08|N|O|1996-11-12|1996-12-18|1996-12-05|TAKE BACK RETURN|RAIL|ss dolphins haggle 5540|101022|1023|2|2|2046.04|0.06|0.02|N|O|1996-12-12|1997-01-09|1996-12-25|DELIVER IN PERSON|MAIL|nic asymptotes could hav 5540|63336|5843|3|19|24687.27|0.01|0.03|N|O|1997-02-06|1996-11-18|1997-02-20|DELIVER IN PERSON|SHIP| slyly slyl 5540|71466|8988|4|24|34499.04|0.10|0.05|N|O|1997-01-09|1996-12-02|1997-01-23|COLLECT COD|FOB|deposits! ironic depths may engage-- b 5541|95359|7869|1|39|52819.65|0.08|0.05|N|O|1997-11-17|1997-12-27|1997-12-11|TAKE BACK RETURN|RAIL|ding theodolites haggle against the slyly 5542|188250|8251|1|6|8029.50|0.03|0.01|N|O|1996-06-14|1996-05-28|1996-07-11|DELIVER IN PERSON|TRUCK| foxes doubt. theodolites ca 5543|142617|5132|1|14|23234.54|0.02|0.03|R|F|1993-10-09|1993-12-09|1993-10-21|NONE|SHIP|ecial reque 5543|161544|9093|2|22|35321.88|0.04|0.00|A|F|1993-11-06|1993-11-02|1993-12-02|DELIVER IN PERSON|SHIP|instructions. deposits use quickly. ir 5543|66993|9500|3|3|5879.97|0.08|0.05|R|F|1993-11-18|1993-11-05|1993-12-17|NONE|FOB|ress, even 5543|146766|1795|4|8|14502.08|0.05|0.01|R|F|1993-10-28|1993-11-18|1993-11-07|NONE|SHIP|totes? iron 5543|79919|9920|5|32|60765.12|0.03|0.03|R|F|1993-10-04|1993-11-14|1993-11-03|DELIVER IN PERSON|AIR|ully around the 5543|183372|927|6|1|1455.37|0.03|0.07|A|F|1993-10-29|1993-11-11|1993-11-23|TAKE BACK RETURN|FOB|uriously. slyly 5543|128171|3196|7|39|46767.63|0.06|0.00|R|F|1993-10-07|1993-11-15|1993-10-28|TAKE BACK RETURN|MAIL|l excuses are furiously. slyly unusual requ 5568|165351|7868|1|50|70817.50|0.05|0.05|N|O|1995-07-14|1995-09-04|1995-08-03|COLLECT COD|TRUCK|furious ide 5568|43012|3013|2|18|17190.18|0.01|0.08|N|O|1995-08-19|1995-08-18|1995-08-24|DELIVER IN PERSON|SHIP|structions haggle. carefully regular 5568|88766|8767|3|35|61416.60|0.08|0.07|N|O|1995-09-17|1995-09-04|1995-10-14|NONE|SHIP|lyly. blit 5569|28825|1328|1|25|43845.50|0.10|0.03|R|F|1993-06-29|1993-07-18|1993-07-05|TAKE BACK RETURN|TRUCK| deposits cajole above 5569|57218|4734|2|26|30555.46|0.09|0.06|A|F|1993-08-21|1993-07-22|1993-09-09|DELIVER IN PERSON|MAIL|pitaphs. ironic req 5569|54713|7219|3|48|80050.08|0.02|0.03|R|F|1993-06-16|1993-06-15|1993-07-09|COLLECT COD|SHIP|the fluffily 5569|146526|1555|4|19|29877.88|0.10|0.08|R|F|1993-07-30|1993-06-21|1993-08-13|TAKE BACK RETURN|FOB| detect ca 5569|58516|6032|5|15|22117.65|0.02|0.06|A|F|1993-06-29|1993-07-06|1993-07-05|DELIVER IN PERSON|MAIL|lithely bold requests boost fur 5570|160555|8104|1|37|59775.35|0.08|0.02|N|O|1996-08-29|1996-10-23|1996-09-11|NONE|RAIL|y ironic pin 5570|38607|3614|2|15|23184.00|0.09|0.02|N|O|1996-10-04|1996-10-05|1996-10-28|TAKE BACK RETURN|REG AIR|beans nag slyly special, regular pack 5570|59349|9350|3|29|37941.86|0.02|0.05|N|O|1996-10-12|1996-10-20|1996-11-08|TAKE BACK RETURN|SHIP|he silent, enticing requests. 5571|153454|5970|1|32|48238.40|0.05|0.01|R|F|1992-12-25|1993-03-01|1993-01-23|NONE|FOB| the blithely even packages nag q 5571|93241|769|2|31|38261.44|0.09|0.07|R|F|1993-01-05|1993-01-18|1993-02-04|DELIVER IN PERSON|SHIP|uffily even accounts. quickly re 5571|91430|8958|3|18|25585.74|0.10|0.05|R|F|1993-03-11|1993-02-28|1993-04-03|COLLECT COD|REG AIR|uests haggle furiously pending d 5572|21549|6554|1|24|35292.96|0.08|0.08|R|F|1994-10-30|1994-10-02|1994-11-27|TAKE BACK RETURN|MAIL|ests cajole. evenly ironic exc 5572|171660|9212|2|27|46754.82|0.03|0.04|A|F|1994-08-29|1994-09-10|1994-08-30|TAKE BACK RETURN|SHIP| accounts. carefully final accoun 5572|86234|6235|3|19|23184.37|0.10|0.00|A|F|1994-08-12|1994-10-07|1994-09-01|DELIVER IN PERSON|RAIL|es. final, final requests wake blithely ag 5572|134303|1843|4|46|61515.80|0.02|0.01|R|F|1994-09-08|1994-10-14|1994-10-01|NONE|REG AIR|ully regular platelet 5572|23056|8061|5|34|33287.70|0.10|0.08|R|F|1994-10-22|1994-08-16|1994-11-08|NONE|TRUCK|asymptotes integrate. s 5572|100044|45|6|14|14616.56|0.04|0.05|A|F|1994-11-02|1994-09-20|1994-11-03|COLLECT COD|RAIL|he fluffily express packages. fluffily fina 5572|25241|7744|7|24|27989.76|0.01|0.05|R|F|1994-09-26|1994-09-04|1994-10-22|DELIVER IN PERSON|FOB| beans. foxes sleep fluffily across th 5573|20138|2641|1|32|33860.16|0.05|0.07|N|O|1996-09-30|1996-10-25|1996-10-15|DELIVER IN PERSON|RAIL|egular depths haggl 5573|49469|4478|2|2|2836.92|0.01|0.07|N|O|1996-08-26|1996-09-29|1996-09-04|COLLECT COD|TRUCK| even foxes. specia 5573|10720|5723|3|46|75013.12|0.06|0.01|N|O|1996-11-04|1996-10-02|1996-11-15|DELIVER IN PERSON|MAIL|s haggle qu 5573|168653|6202|4|43|74030.95|0.10|0.03|N|O|1996-10-22|1996-11-03|1996-11-02|TAKE BACK RETURN|FOB| furiously pending packages against 5573|137649|7650|5|43|72525.52|0.05|0.04|N|O|1996-09-09|1996-09-24|1996-09-28|COLLECT COD|AIR| bold package 5574|184533|7052|1|46|74406.38|0.02|0.07|A|F|1992-06-20|1992-04-19|1992-07-11|NONE|FOB|arefully express requests wake furiousl 5574|32304|7311|2|21|25962.30|0.05|0.08|A|F|1992-03-22|1992-04-26|1992-04-16|TAKE BACK RETURN|TRUCK|fully final dugouts. express foxes nag 5574|118564|3587|3|27|42729.12|0.10|0.06|R|F|1992-05-08|1992-05-19|1992-06-05|TAKE BACK RETURN|REG AIR|ecial realms. furiously entici 5574|93359|5869|4|14|18932.90|0.09|0.01|R|F|1992-05-20|1992-04-09|1992-05-23|COLLECT COD|REG AIR| use slyly carefully special requests? slyl 5574|84053|9070|5|19|19703.95|0.05|0.03|A|F|1992-05-28|1992-04-24|1992-06-11|TAKE BACK RETURN|REG AIR|old deposits int 5575|57026|4542|1|7|6881.14|0.01|0.07|N|O|1995-10-01|1995-09-30|1995-10-06|NONE|FOB|s. slyly pending theodolites prin 5575|30170|7680|2|23|25303.91|0.04|0.02|N|O|1995-10-26|1995-10-09|1995-11-13|TAKE BACK RETURN|AIR|enticingly final requests. ironically 5575|62433|9952|3|16|22326.88|0.00|0.07|N|O|1995-08-17|1995-10-14|1995-08-30|NONE|RAIL|jole boldly beyond the final as 5575|109718|9719|4|7|12093.97|0.01|0.04|N|O|1995-10-15|1995-09-14|1995-10-18|DELIVER IN PERSON|RAIL|special requests. final, final 5600|186280|8799|1|34|46453.52|0.02|0.00|N|O|1997-03-22|1997-04-05|1997-04-09|TAKE BACK RETURN|MAIL|ly above the stealthy ideas. permane 5600|7352|4853|2|19|23927.65|0.00|0.01|N|O|1997-04-10|1997-03-24|1997-04-16|TAKE BACK RETURN|TRUCK|dencies. carefully p 5601|37982|5492|1|29|55679.42|0.09|0.04|A|F|1992-04-06|1992-02-24|1992-04-29|DELIVER IN PERSON|TRUCK| ironic ideas. final 5601|163175|8208|2|45|55717.65|0.10|0.07|A|F|1992-03-25|1992-04-03|1992-04-04|TAKE BACK RETURN|MAIL|ts-- blithely final accounts cajole. carefu 5601|72205|2206|3|38|44733.60|0.07|0.00|A|F|1992-01-08|1992-03-01|1992-01-09|TAKE BACK RETURN|REG AIR|ter the evenly final deposit 5601|147847|362|4|12|22738.08|0.03|0.01|A|F|1992-02-27|1992-03-16|1992-03-27|COLLECT COD|TRUCK|ep carefully a 5602|175324|2876|1|9|12593.88|0.08|0.03|N|O|1997-10-14|1997-09-14|1997-11-11|COLLECT COD|FOB|lar foxes; quickly ironic ac 5602|61015|8534|2|31|30256.31|0.04|0.08|N|O|1997-09-04|1997-10-24|1997-09-07|NONE|TRUCK|rate fluffily regular platelets. blithel 5602|67680|2693|3|30|49430.40|0.04|0.00|N|O|1997-09-20|1997-10-25|1997-10-12|DELIVER IN PERSON|FOB|e slyly even packages. careful 5603|97812|5340|1|50|90490.50|0.03|0.02|A|F|1992-10-06|1992-08-20|1992-10-08|COLLECT COD|SHIP|final theodolites accor 5603|115704|3238|2|49|84265.30|0.06|0.05|A|F|1992-06-24|1992-07-28|1992-07-01|DELIVER IN PERSON|FOB|fully silent requests. carefully fin 5603|31836|9346|3|49|86623.67|0.00|0.02|R|F|1992-10-07|1992-07-21|1992-10-10|DELIVER IN PERSON|TRUCK|nic, pending dependencies print 5604|135036|5037|1|44|47125.32|0.05|0.01|N|O|1998-08-06|1998-07-08|1998-09-04|NONE|RAIL|efully ironi 5604|135056|7570|2|49|53461.45|0.10|0.00|N|O|1998-05-02|1998-07-07|1998-05-20|NONE|FOB|ove the regula 5604|77004|9512|3|10|9810.00|0.07|0.05|N|O|1998-08-03|1998-06-23|1998-08-04|COLLECT COD|SHIP|ly final realms wake blit 5605|86747|1764|1|50|86687.00|0.08|0.05|N|O|1996-08-26|1996-10-15|1996-09-04|TAKE BACK RETURN|RAIL|instructions sleep carefully ironic req 5605|150710|711|2|7|12324.97|0.06|0.01|N|O|1996-12-13|1996-10-13|1996-12-15|TAKE BACK RETURN|FOB|lowly special courts nag among the furi 5605|172146|7181|3|3|3654.42|0.01|0.02|N|O|1996-09-01|1996-10-02|1996-09-20|TAKE BACK RETURN|AIR|posits. accounts boost. t 5605|54851|7357|4|45|81263.25|0.00|0.01|N|O|1996-09-05|1996-10-04|1996-09-13|COLLECT COD|FOB|ly unusual instructions. carefully ironic p 5605|69922|4935|5|39|73784.88|0.00|0.08|N|O|1996-12-13|1996-11-03|1996-12-24|DELIVER IN PERSON|REG AIR|cial deposits. theodolites w 5605|165991|5992|6|29|59652.71|0.08|0.08|N|O|1996-09-19|1996-10-22|1996-10-06|DELIVER IN PERSON|SHIP| quickly. quickly pending sen 5606|173945|3946|1|47|94890.18|0.10|0.04|N|O|1996-12-23|1997-01-31|1997-01-20|DELIVER IN PERSON|REG AIR|carefully final foxes. pending, final 5606|91684|1685|2|34|56973.12|0.09|0.06|N|O|1997-02-23|1997-02-08|1997-03-09|TAKE BACK RETURN|REG AIR|uses. slyly final 5606|126750|6751|3|46|81730.50|0.04|0.00|N|O|1997-03-11|1997-01-13|1997-03-23|DELIVER IN PERSON|REG AIR|ter the ironic accounts. even, ironic depos 5606|81657|1658|4|30|49159.50|0.08|0.04|N|O|1997-02-06|1997-01-26|1997-02-16|DELIVER IN PERSON|REG AIR| nag always. blithely express packages 5606|6317|1318|5|25|30582.75|0.06|0.00|N|O|1996-12-25|1997-01-12|1997-01-11|TAKE BACK RETURN|AIR|breach about the furiously bold 5606|153929|3930|6|3|5948.76|0.04|0.06|N|O|1997-01-11|1997-01-04|1997-02-08|COLLECT COD|AIR| sauternes. asympto 5606|73859|3860|7|46|84311.10|0.07|0.01|N|O|1997-02-01|1997-01-31|1997-02-15|DELIVER IN PERSON|TRUCK|ow requests wake around the regular accoun 5607|131532|4046|1|23|35961.19|0.02|0.06|R|F|1992-04-17|1992-02-12|1992-04-30|DELIVER IN PERSON|MAIL|the special, final patterns 5632|9074|1575|1|48|47187.36|0.06|0.06|N|O|1996-05-08|1996-03-24|1996-06-04|TAKE BACK RETURN|FOB|unts. decoys u 5632|105591|5592|2|21|33528.39|0.02|0.08|N|O|1996-03-22|1996-03-10|1996-04-10|NONE|AIR|refully regular pinto beans. ironic reques 5632|66291|3810|3|24|30174.96|0.04|0.06|N|O|1996-03-23|1996-04-02|1996-03-30|TAKE BACK RETURN|MAIL|beans detect. quickly final i 5633|159226|6772|1|28|35986.16|0.02|0.00|N|O|1998-08-14|1998-07-24|1998-08-26|TAKE BACK RETURN|SHIP|as boost quickly. unusual pinto 5633|101711|1712|2|10|17127.10|0.09|0.04|N|O|1998-07-15|1998-08-03|1998-08-03|COLLECT COD|AIR|its cajole fluffily fluffily special pinto 5633|45872|5873|3|27|49082.49|0.03|0.02|N|O|1998-09-28|1998-07-28|1998-10-12|DELIVER IN PERSON|AIR|ructions. even ideas haggle carefully r 5633|163653|3654|4|50|85832.50|0.02|0.05|N|O|1998-07-23|1998-07-09|1998-08-21|DELIVER IN PERSON|TRUCK|ts. slyly regular 5633|99323|1833|5|48|63471.36|0.01|0.05|N|O|1998-06-24|1998-07-22|1998-07-18|DELIVER IN PERSON|TRUCK|even courts haggle slyly at the requ 5633|106564|1585|6|1|1570.56|0.02|0.03|N|O|1998-09-29|1998-08-28|1998-10-19|NONE|RAIL|thely notornis: 5633|10697|3199|7|39|62699.91|0.02|0.08|N|O|1998-07-12|1998-07-03|1998-07-13|COLLECT COD|TRUCK|ding ideas cajole furiously after 5634|184279|1834|1|26|35445.02|0.10|0.08|N|O|1996-10-29|1996-09-15|1996-11-24|COLLECT COD|REG AIR|ptotes mold qu 5634|174875|2427|2|22|42897.14|0.02|0.05|N|O|1996-09-01|1996-08-31|1996-09-05|DELIVER IN PERSON|MAIL|silently unusual foxes above the blithely 5634|108573|6104|3|16|25305.12|0.08|0.02|N|O|1996-11-15|1996-09-14|1996-12-04|NONE|AIR|ess ideas are carefully pending, even re 5634|181044|6081|4|29|32626.16|0.00|0.01|N|O|1996-08-10|1996-10-29|1996-08-11|TAKE BACK RETURN|MAIL|ely final ideas. deposits sleep. reg 5634|309|310|5|1|1209.30|0.04|0.02|N|O|1996-10-02|1996-10-21|1996-10-27|COLLECT COD|MAIL|ctions haggle carefully. carefully clo 5635|82066|7083|1|43|45066.58|0.03|0.00|R|F|1992-10-12|1992-09-29|1992-11-01|TAKE BACK RETURN|TRUCK|cross the d 5635|71392|1393|2|5|6816.95|0.05|0.08|R|F|1992-10-02|1992-11-05|1992-10-26|TAKE BACK RETURN|REG AIR|yly along the ironic, fi 5635|71245|6260|3|12|14594.88|0.09|0.02|A|F|1992-10-18|1992-09-24|1992-11-17|NONE|REG AIR|ke slyly against the carefully final req 5635|7781|5282|4|40|67551.20|0.03|0.01|A|F|1992-09-25|1992-11-05|1992-10-11|NONE|FOB|pending foxes. regular packages 5635|168511|8512|5|38|60021.38|0.05|0.06|A|F|1992-10-09|1992-09-25|1992-10-18|NONE|MAIL|ckly pendin 5635|161513|6546|6|23|36213.73|0.05|0.04|A|F|1992-08-24|1992-11-10|1992-09-21|NONE|AIR|ily pending packages. bold, 5635|136198|8712|7|32|39494.08|0.03|0.08|R|F|1992-11-24|1992-09-20|1992-12-17|TAKE BACK RETURN|TRUCK|slyly even 5636|69821|2328|1|18|32234.76|0.05|0.03|R|F|1995-05-14|1995-05-17|1995-06-12|DELIVER IN PERSON|REG AIR|slyly express requests. furiously pen 5636|69120|6639|2|26|28317.12|0.03|0.06|A|F|1995-03-05|1995-05-16|1995-03-23|TAKE BACK RETURN|AIR| furiously final pinto beans o 5636|89315|9316|3|21|27390.51|0.03|0.03|A|F|1995-03-13|1995-05-11|1995-03-24|COLLECT COD|AIR| are furiously unusual 5636|108256|5787|4|15|18963.75|0.03|0.04|R|F|1995-04-21|1995-04-30|1995-05-05|DELIVER IN PERSON|REG AIR|efully special 5636|46791|9296|5|13|22591.27|0.10|0.03|A|F|1995-05-11|1995-04-27|1995-05-26|COLLECT COD|AIR|en, fluffy accounts amon 5636|11278|1279|6|33|39245.91|0.06|0.04|A|F|1995-03-09|1995-04-05|1995-03-23|DELIVER IN PERSON|MAIL|ding to the 5636|133257|5771|7|24|30966.00|0.10|0.05|R|F|1995-04-12|1995-03-27|1995-04-16|DELIVER IN PERSON|RAIL|counts sleep furiously b 5637|46670|9175|1|14|22633.38|0.03|0.05|N|O|1996-07-20|1996-07-26|1996-08-14|COLLECT COD|MAIL|y bold deposits wak 5637|171887|1888|2|35|68560.80|0.09|0.08|N|O|1996-08-01|1996-08-04|1996-08-20|NONE|AIR|s sleep blithely alongside of the ironic 5637|95346|2874|3|22|29509.48|0.01|0.07|N|O|1996-08-28|1996-07-30|1996-09-17|COLLECT COD|REG AIR|nding requests are ca 5637|65175|2694|4|16|18242.72|0.03|0.03|N|O|1996-09-08|1996-08-31|1996-09-29|TAKE BACK RETURN|TRUCK|d packages. express requests 5637|195700|5701|5|10|17957.00|0.01|0.00|N|O|1996-08-25|1996-08-11|1996-09-23|TAKE BACK RETURN|MAIL|ickly ironic gifts. blithely even cour 5637|128281|794|6|27|35350.56|0.01|0.05|N|O|1996-06-27|1996-08-09|1996-07-27|DELIVER IN PERSON|REG AIR|oss the carefully express warhorses 5638|137834|7835|1|45|84232.35|0.09|0.07|A|F|1994-05-17|1994-03-09|1994-06-15|NONE|TRUCK|ar foxes. fluffily pending accounts 5638|167678|5227|2|12|20948.04|0.02|0.05|A|F|1994-02-05|1994-04-01|1994-02-25|COLLECT COD|TRUCK|n, even requests. furiously ironic not 5638|161923|6956|3|21|41683.32|0.08|0.00|A|F|1994-03-13|1994-03-27|1994-03-17|DELIVER IN PERSON|TRUCK|press courts use f 5639|46609|1618|1|11|17111.60|0.09|0.02|R|F|1994-09-18|1994-07-10|1994-10-12|TAKE BACK RETURN|SHIP|g the unusual pinto beans caj 5664|121079|6104|1|25|27501.75|0.00|0.06|N|O|1998-10-29|1998-09-23|1998-11-25|COLLECT COD|FOB|eposits: furiously ironic grouch 5664|172408|7443|2|9|13323.60|0.07|0.05|N|O|1998-07-31|1998-08-26|1998-08-12|COLLECT COD|RAIL| ironic deposits haggle furiously. re 5664|52439|2440|3|31|43134.33|0.01|0.03|N|O|1998-11-10|1998-09-12|1998-12-07|TAKE BACK RETURN|FOB|ainst the never silent request 5664|137545|2572|4|33|52223.82|0.08|0.03|N|O|1998-08-29|1998-09-17|1998-09-25|DELIVER IN PERSON|RAIL|d the final 5664|111779|9313|5|44|78793.88|0.01|0.06|N|O|1998-09-24|1998-09-26|1998-10-23|NONE|TRUCK|ang thinly bold pa 5664|67733|2746|6|34|57824.82|0.09|0.01|N|O|1998-09-10|1998-10-05|1998-09-15|COLLECT COD|RAIL|st. fluffily pending foxes na 5664|181490|9045|7|9|14143.41|0.01|0.05|N|O|1998-11-04|1998-10-15|1998-11-20|TAKE BACK RETURN|REG AIR|yly. express ideas agai 5665|100935|936|1|32|61949.76|0.00|0.02|A|F|1993-08-11|1993-08-01|1993-09-07|NONE|AIR|f the slyly even requests! regular request 5665|4924|7425|2|14|25604.88|0.02|0.00|R|F|1993-06-29|1993-09-16|1993-07-16|DELIVER IN PERSON|AIR|- special pinto beans sleep quickly blithel 5665|157104|7105|3|41|47605.10|0.09|0.02|A|F|1993-08-23|1993-09-22|1993-09-11|COLLECT COD|REG AIR| idle ideas across 5665|45118|127|4|47|49966.17|0.01|0.01|A|F|1993-10-06|1993-09-19|1993-11-01|NONE|RAIL|s mold fluffily. final deposits along the 5666|121226|8763|1|7|8730.54|0.09|0.08|R|F|1994-05-10|1994-04-06|1994-05-21|NONE|FOB| ideas. regular packag 5666|35783|790|2|14|24062.92|0.08|0.01|A|F|1994-02-27|1994-04-11|1994-03-06|DELIVER IN PERSON|TRUCK|lar deposits nag against the slyly final d 5666|192178|7217|3|39|49536.63|0.00|0.01|A|F|1994-05-13|1994-04-02|1994-06-12|DELIVER IN PERSON|TRUCK|the even, final foxes. quickly iron 5666|130643|5670|4|24|40167.36|0.07|0.01|R|F|1994-02-14|1994-03-09|1994-03-06|DELIVER IN PERSON|FOB|on the carefully pending asympto 5666|108483|8484|5|36|53693.28|0.07|0.07|R|F|1994-03-15|1994-03-16|1994-03-18|COLLECT COD|TRUCK|accounts. furiousl 5667|144431|1974|1|37|54590.91|0.09|0.06|N|O|1995-09-24|1995-09-17|1995-10-03|NONE|REG AIR|s cajole blit 5668|3645|8646|1|15|23229.60|0.03|0.04|A|F|1995-04-06|1995-05-12|1995-04-17|COLLECT COD|FOB| the express, pending requests. bo 5669|190847|848|1|7|13564.88|0.06|0.06|N|O|1996-06-19|1996-07-07|1996-07-11|COLLECT COD|SHIP|yly regular requests lose blithely. careful 5669|155814|3360|2|2|3739.62|0.06|0.07|N|O|1996-08-04|1996-06-15|1996-08-20|NONE|SHIP| blithely excuses. slyly 5669|157654|7655|3|40|68466.00|0.00|0.02|N|O|1996-08-30|1996-06-15|1996-09-07|TAKE BACK RETURN|FOB|ar accounts alongside of the final, p 5669|89245|9246|4|31|38261.44|0.04|0.05|N|O|1996-08-05|1996-06-10|1996-08-29|COLLECT COD|AIR|to beans against the regular depo 5669|139680|7220|5|30|51590.40|0.07|0.01|N|O|1996-07-14|1996-07-28|1996-08-10|TAKE BACK RETURN|TRUCK|l accounts. care 5670|89569|9570|1|27|42081.12|0.10|0.06|R|F|1993-05-09|1993-05-30|1993-06-06|TAKE BACK RETURN|REG AIR| ideas promise bli 5670|185257|7776|2|43|57716.75|0.06|0.00|A|F|1993-07-09|1993-06-03|1993-07-14|DELIVER IN PERSON|FOB|ests in place of the carefully sly depos 5670|6482|6483|3|24|33323.52|0.09|0.04|A|F|1993-07-17|1993-07-01|1993-08-03|NONE|AIR|press, express requests haggle 5670|141383|3898|4|11|15668.18|0.06|0.06|R|F|1993-07-11|1993-06-26|1993-07-24|DELIVER IN PERSON|MAIL|etect furiously among the even pin 5671|119482|4505|1|25|37537.00|0.00|0.08|N|O|1998-04-17|1998-03-28|1998-05-06|DELIVER IN PERSON|AIR|cording to the quickly final requests-- 5671|128838|3863|2|46|85874.18|0.05|0.08|N|O|1998-03-28|1998-04-22|1998-04-19|TAKE BACK RETURN|MAIL|lar pinto beans detect care 5671|171340|8892|3|13|18347.42|0.10|0.06|N|O|1998-03-02|1998-04-03|1998-03-08|TAKE BACK RETURN|TRUCK|bold theodolites about 5671|110212|7746|4|42|51332.82|0.00|0.07|N|O|1998-02-17|1998-04-24|1998-03-17|TAKE BACK RETURN|SHIP|carefully slyly special deposit 5671|128097|610|5|13|14626.17|0.09|0.00|N|O|1998-04-24|1998-03-26|1998-04-27|NONE|REG AIR|ers according to the ironic, unusual excu 5671|113340|8363|6|30|40600.20|0.09|0.07|N|O|1998-06-06|1998-04-15|1998-07-01|DELIVER IN PERSON|TRUCK|fily ironi 5696|136316|3856|1|28|37864.68|0.03|0.06|N|O|1995-07-03|1995-06-14|1995-07-27|COLLECT COD|REG AIR| the fluffily brave pearls 5696|58914|6430|2|46|86153.86|0.01|0.00|N|O|1995-08-10|1995-07-08|1995-08-25|COLLECT COD|AIR|ter the instruct 5696|166282|3831|3|42|56627.76|0.04|0.01|N|F|1995-06-06|1995-06-11|1995-06-19|TAKE BACK RETURN|SHIP|te furious 5696|97646|156|4|20|32872.80|0.08|0.00|N|O|1995-06-25|1995-07-18|1995-07-16|NONE|TRUCK|silent, pending ideas sleep fluffil 5696|123505|6018|5|19|29041.50|0.07|0.05|N|O|1995-08-31|1995-06-13|1995-09-10|COLLECT COD|SHIP|unusual requests sleep furiously ru 5696|131928|9468|6|37|72517.04|0.04|0.05|N|O|1995-07-21|1995-06-23|1995-08-19|NONE|RAIL| carefully expres 5696|101569|9100|7|6|9423.36|0.07|0.05|N|O|1995-08-03|1995-07-15|1995-09-01|DELIVER IN PERSON|REG AIR|n patterns lose slyly fina 5697|54193|1709|1|24|27532.56|0.10|0.07|R|F|1992-10-27|1992-11-28|1992-11-20|NONE|RAIL|uffily iro 5697|15283|7785|2|43|51526.04|0.06|0.02|R|F|1992-12-08|1992-12-03|1992-12-17|TAKE BACK RETURN|FOB|blithely reg 5697|55460|2976|3|42|59449.32|0.03|0.01|A|F|1992-12-19|1992-12-08|1993-01-03|COLLECT COD|TRUCK|inal theodolites cajole after the bli 5698|10470|5473|1|30|41414.10|0.01|0.05|A|F|1994-05-26|1994-08-16|1994-06-19|COLLECT COD|AIR|its. quickly regular foxes aro 5698|162073|2074|2|25|28376.75|0.08|0.07|R|F|1994-08-06|1994-06-21|1994-08-25|NONE|SHIP| asymptotes sleep slyly above the 5698|154975|7491|3|45|91348.65|0.03|0.01|A|F|1994-06-23|1994-08-13|1994-07-02|NONE|FOB|ng excuses. slyly express asymptotes 5698|57104|9610|4|15|15916.50|0.07|0.08|R|F|1994-06-29|1994-07-03|1994-07-02|COLLECT COD|REG AIR|ly ironic frets haggle carefully 5698|139406|9407|5|37|53479.80|0.06|0.06|A|F|1994-06-30|1994-06-23|1994-07-22|TAKE BACK RETURN|SHIP|ts. even, ironic 5698|187630|149|6|1|1717.63|0.06|0.04|R|F|1994-05-31|1994-07-10|1994-06-03|DELIVER IN PERSON|MAIL|nts. slyly quiet pinto beans nag carefu 5699|1884|6885|1|24|42861.12|0.01|0.07|A|F|1992-10-21|1992-09-04|1992-11-04|COLLECT COD|AIR|kages. fin 5699|54419|9430|2|26|35708.66|0.06|0.06|R|F|1992-08-11|1992-09-21|1992-08-14|COLLECT COD|MAIL|y final deposits wake fluffily u 5699|17473|9975|3|48|66742.56|0.10|0.05|R|F|1992-11-23|1992-10-20|1992-11-29|DELIVER IN PERSON|TRUCK|s. carefully regul 5699|54313|6819|4|46|58296.26|0.08|0.02|A|F|1992-11-28|1992-09-23|1992-12-27|TAKE BACK RETURN|FOB|o the slyly 5699|27286|2291|5|21|25478.88|0.02|0.02|A|F|1992-10-13|1992-09-30|1992-10-19|NONE|MAIL|lyly final pla 5699|190036|7594|6|30|33780.90|0.08|0.05|R|F|1992-11-13|1992-10-01|1992-12-11|DELIVER IN PERSON|AIR| the carefully final 5699|128219|3244|7|45|56124.45|0.09|0.06|A|F|1992-09-23|1992-10-22|1992-10-04|DELIVER IN PERSON|SHIP|rmanent packages sleep across the f 5700|167890|2923|1|24|46989.36|0.09|0.00|N|O|1997-12-26|1998-01-28|1998-01-18|DELIVER IN PERSON|REG AIR|ix carefully 5700|122421|4934|2|30|43302.60|0.00|0.06|N|O|1998-04-19|1998-03-13|1998-04-27|COLLECT COD|MAIL|ly blithely final instructions. fl 5700|125320|345|3|23|30942.36|0.03|0.05|N|O|1998-01-30|1998-01-31|1998-01-31|NONE|REG AIR| wake quickly carefully fluffy hockey 5701|53487|5993|1|17|24488.16|0.02|0.05|N|O|1997-03-27|1997-04-08|1997-04-21|DELIVER IN PERSON|RAIL|tes. quickly final a 5702|76997|9505|1|44|86855.56|0.06|0.02|R|F|1994-01-04|1993-11-25|1994-01-22|NONE|RAIL|lites. carefully final requests doze b 5702|85532|5533|2|37|56148.61|0.10|0.05|R|F|1993-12-14|1993-10-21|1994-01-08|NONE|FOB|ix slyly. regular instructions slee 5702|130640|8180|3|44|73508.16|0.00|0.02|R|F|1993-11-28|1993-12-02|1993-12-22|NONE|TRUCK|ake according to th 5702|62204|9723|4|31|36152.20|0.00|0.04|A|F|1994-01-04|1993-10-22|1994-01-26|DELIVER IN PERSON|TRUCK|pinto beans. blithely 5703|87698|207|1|2|3371.38|0.09|0.01|R|F|1993-05-29|1993-07-26|1993-06-05|TAKE BACK RETURN|REG AIR|nts against the blithely sile 5728|43599|6104|1|47|72501.73|0.10|0.05|A|F|1994-12-13|1995-01-25|1994-12-25|TAKE BACK RETURN|MAIL|nd the bravely final deposits. final ideas 5728|158786|6332|2|40|73791.20|0.05|0.08|A|F|1995-03-28|1995-01-17|1995-04-14|TAKE BACK RETURN|SHIP|final deposits. theodolite 5729|142388|2389|1|5|7151.90|0.07|0.00|R|F|1994-11-27|1994-11-11|1994-12-23|TAKE BACK RETURN|MAIL|s. even sheaves nag courts. 5729|106136|8647|2|39|44543.07|0.10|0.00|A|F|1995-01-22|1994-11-21|1995-02-13|TAKE BACK RETURN|MAIL|. special pl 5729|11181|1182|3|50|54609.00|0.00|0.05|R|F|1994-12-09|1994-12-31|1994-12-24|TAKE BACK RETURN|AIR|ly special sentiments. car 5730|150009|10|1|2|2118.00|0.08|0.00|N|O|1998-02-24|1998-03-15|1998-03-11|COLLECT COD|SHIP|ely ironic foxes. carefu 5730|199690|9691|2|9|16107.21|0.10|0.01|N|O|1998-03-05|1998-02-02|1998-03-28|DELIVER IN PERSON|MAIL|s lose blithely. specia 5731|191796|9354|1|13|24541.27|0.02|0.04|N|O|1997-07-30|1997-06-23|1997-08-13|COLLECT COD|RAIL|ngside of the quickly regular depos 5731|104178|4179|2|11|13003.87|0.00|0.08|N|O|1997-06-06|1997-07-08|1997-06-25|NONE|MAIL| furiously final accounts wake. d 5731|110812|813|3|6|10936.86|0.01|0.04|N|O|1997-07-02|1997-07-01|1997-07-08|COLLECT COD|SHIP|sits integrate slyly close platelets. quick 5731|13471|8474|4|6|8306.82|0.03|0.06|N|O|1997-09-07|1997-06-20|1997-09-20|TAKE BACK RETURN|RAIL|rs. quickly regular theo 5731|194960|4961|5|19|39044.24|0.08|0.02|N|O|1997-06-29|1997-06-27|1997-07-15|NONE|REG AIR|ly unusual ideas above the 5732|138287|5827|1|26|34457.28|0.02|0.07|N|O|1997-08-18|1997-10-25|1997-09-12|TAKE BACK RETURN|TRUCK|totes cajole according to the theodolites. 5733|32868|2869|1|39|70233.54|0.01|0.07|A|F|1993-03-22|1993-05-24|1993-04-04|DELIVER IN PERSON|FOB|side of the 5734|182945|5464|1|29|58810.26|0.05|0.01|N|O|1997-12-01|1997-12-08|1997-12-23|NONE|RAIL|structions cajole final, express 5734|149299|4328|2|6|8089.74|0.07|0.00|N|O|1997-10-27|1997-12-19|1997-11-02|COLLECT COD|RAIL|s. regular platelets cajole furiously. regu 5734|66727|6728|3|10|16937.20|0.01|0.03|N|O|1997-12-28|1997-12-24|1998-01-24|DELIVER IN PERSON|TRUCK|equests; accounts above 5735|59754|9755|1|41|70263.75|0.01|0.01|R|F|1994-12-23|1995-02-10|1995-01-22|COLLECT COD|MAIL|lthily ruthless i 5760|667|8168|1|6|9405.96|0.09|0.03|R|F|1994-07-30|1994-07-31|1994-08-16|COLLECT COD|REG AIR|ng the acco 5760|5757|758|2|24|39906.00|0.04|0.05|A|F|1994-07-15|1994-07-04|1994-08-08|NONE|MAIL|s. bravely ironic accounts among 5760|147375|9890|3|8|11378.96|0.07|0.04|A|F|1994-09-06|1994-08-03|1994-10-06|NONE|AIR|l accounts among the carefully even de 5760|122293|2294|4|19|24990.51|0.10|0.01|R|F|1994-08-02|1994-08-02|1994-08-15|COLLECT COD|SHIP|sits nag. even, regular ideas cajole b 5760|165638|3187|5|6|10221.78|0.03|0.07|R|F|1994-06-09|1994-07-06|1994-06-16|DELIVER IN PERSON|MAIL| shall have to cajole along the 5761|46273|3786|1|41|49990.07|0.08|0.00|N|O|1998-07-31|1998-08-09|1998-08-08|TAKE BACK RETURN|TRUCK|pecial deposits. qu 5761|107289|7290|2|36|46666.08|0.00|0.07|N|O|1998-09-07|1998-09-21|1998-09-11|TAKE BACK RETURN|TRUCK| pinto beans thrash alongside of the pendi 5761|197395|4953|3|49|73127.11|0.04|0.08|N|O|1998-07-14|1998-08-20|1998-07-25|NONE|SHIP|ly bold accounts wake above the 5762|174993|4994|1|6|12407.94|0.05|0.02|N|O|1997-04-07|1997-03-25|1997-05-02|NONE|AIR|ironic dependencies doze carefu 5762|101749|9280|2|27|47269.98|0.02|0.08|N|O|1997-02-21|1997-05-08|1997-03-23|NONE|REG AIR|across the bold ideas. carefully sp 5762|88317|8318|3|40|52212.40|0.00|0.08|N|O|1997-04-30|1997-05-09|1997-05-08|COLLECT COD|SHIP|al instructions. furiousl 5762|132375|2376|4|47|66146.39|0.05|0.06|N|O|1997-03-02|1997-03-23|1997-03-19|NONE|RAIL|equests sleep after the furiously ironic pa 5762|24527|4528|5|28|40642.56|0.02|0.06|N|O|1997-02-22|1997-03-25|1997-02-24|TAKE BACK RETURN|SHIP|ic foxes among the blithely qui 5762|11673|4175|6|12|19016.04|0.00|0.06|N|O|1997-04-18|1997-04-27|1997-05-11|DELIVER IN PERSON|REG AIR|ages are abo 5763|130799|5826|1|32|58553.28|0.02|0.06|N|O|1998-07-16|1998-09-13|1998-08-02|DELIVER IN PERSON|FOB|ding instruct 5763|135695|8209|2|23|39805.87|0.09|0.04|N|O|1998-07-25|1998-09-21|1998-08-15|DELIVER IN PERSON|SHIP|re after the blithel 5763|12360|9864|3|25|31809.00|0.01|0.02|N|O|1998-10-04|1998-08-16|1998-10-09|DELIVER IN PERSON|REG AIR|inal theodolites. even re 5763|120969|3482|4|47|93528.12|0.09|0.00|N|O|1998-08-22|1998-09-22|1998-09-04|NONE|REG AIR|gle slyly. slyly final re 5763|122996|2997|5|8|16151.92|0.06|0.05|N|O|1998-09-23|1998-09-15|1998-09-27|DELIVER IN PERSON|TRUCK|foxes wake slyly. car 5763|189104|9105|6|9|10737.90|0.08|0.02|N|O|1998-09-24|1998-09-01|1998-10-02|NONE|AIR| deposits. instru 5764|100625|626|1|28|45517.36|0.04|0.04|A|F|1993-12-07|1993-12-20|1993-12-26|TAKE BACK RETURN|RAIL|sleep furi 5764|199143|4182|2|20|24842.80|0.10|0.05|A|F|1993-10-17|1993-12-24|1993-10-18|TAKE BACK RETURN|FOB|ng to the fluffily qu 5764|187173|4728|3|4|5040.68|0.03|0.05|A|F|1993-10-25|1993-12-23|1993-11-06|DELIVER IN PERSON|AIR|ily regular courts haggle 5765|161940|9489|1|31|62060.14|0.00|0.06|A|F|1995-01-11|1995-02-13|1995-01-23|TAKE BACK RETURN|AIR|r foxes. ev 5765|123802|6315|2|29|52948.20|0.07|0.08|A|F|1994-12-29|1995-02-01|1995-01-26|NONE|RAIL|nic requests. deposits wake quickly among 5765|138309|3336|3|31|41766.30|0.05|0.01|R|F|1995-03-01|1995-01-23|1995-03-31|TAKE BACK RETURN|REG AIR|the furiou 5765|151733|9279|4|46|82097.58|0.07|0.07|R|F|1995-03-13|1995-02-12|1995-03-20|DELIVER IN PERSON|MAIL|ccounts sleep about th 5765|173421|8456|5|48|71732.16|0.09|0.02|A|F|1995-03-30|1995-01-14|1995-04-09|DELIVER IN PERSON|SHIP|theodolites integrate furiously 5765|82052|7069|6|41|42396.05|0.04|0.00|A|F|1994-12-31|1995-02-11|1995-01-17|TAKE BACK RETURN|SHIP| furiously. slyly sile 5765|41209|6218|7|21|24154.20|0.05|0.04|R|F|1995-04-05|1995-02-12|1995-05-05|COLLECT COD|TRUCK|ole furiously. quick, special dependencies 5766|187050|9569|1|1|1137.05|0.10|0.01|R|F|1994-01-16|1993-11-16|1994-01-23|NONE|MAIL|blithely regular the 5766|148529|6072|2|39|61523.28|0.02|0.07|A|F|1993-10-24|1993-12-07|1993-11-08|DELIVER IN PERSON|SHIP| furiously unusual courts. slyly final pear 5766|117751|5285|3|4|7075.00|0.08|0.08|R|F|1993-11-10|1993-10-30|1993-12-01|COLLECT COD|TRUCK|ly even requests. furiou 5767|166864|6865|1|11|21239.46|0.08|0.01|A|F|1992-06-02|1992-05-30|1992-06-08|NONE|TRUCK|instructions. carefully final accou 5767|68555|1062|2|15|22853.25|0.07|0.05|R|F|1992-06-05|1992-07-28|1992-06-08|DELIVER IN PERSON|MAIL|warthogs. carefully unusual g 5767|190490|3010|3|42|66380.58|0.06|0.01|R|F|1992-07-31|1992-06-09|1992-08-09|COLLECT COD|TRUCK| blithe deposi 5767|152475|2476|4|34|51933.98|0.06|0.01|R|F|1992-06-02|1992-06-23|1992-06-17|NONE|FOB|sits among the 5767|45270|5271|5|36|43749.72|0.03|0.00|A|F|1992-07-17|1992-06-10|1992-07-19|COLLECT COD|AIR|ake carefully. packages 5792|177019|9537|1|34|37264.34|0.08|0.07|R|F|1993-05-23|1993-06-25|1993-06-12|NONE|RAIL|requests are against t 5792|156410|8926|2|47|68921.27|0.10|0.00|A|F|1993-06-08|1993-05-10|1993-06-26|COLLECT COD|AIR|regular, ironic excuses n 5792|182806|2807|3|32|60441.60|0.05|0.08|R|F|1993-06-26|1993-05-23|1993-07-07|COLLECT COD|RAIL|s are slyly against the ev 5792|13894|6396|4|14|25310.46|0.09|0.02|A|F|1993-07-28|1993-06-17|1993-08-27|DELIVER IN PERSON|RAIL|olites print carefully 5792|101362|8893|5|31|42264.16|0.02|0.01|A|F|1993-06-17|1993-05-05|1993-07-01|COLLECT COD|TRUCK|s? furiously even instructions 5793|52084|9600|1|20|20721.60|0.05|0.03|N|O|1997-10-05|1997-09-04|1997-10-30|COLLECT COD|AIR|e carefully ex 5793|169992|7541|2|41|84541.59|0.06|0.06|N|O|1997-08-04|1997-10-10|1997-08-12|DELIVER IN PERSON|TRUCK|snooze quick 5793|42777|2778|3|8|13758.16|0.07|0.03|N|O|1997-08-16|1997-09-08|1997-08-28|COLLECT COD|AIR|al foxes l 5793|147676|5219|4|48|82736.16|0.02|0.02|N|O|1997-09-27|1997-08-23|1997-10-27|DELIVER IN PERSON|REG AIR|quickly enticing excuses use slyly abov 5794|157144|7145|1|42|50447.88|0.06|0.05|R|F|1993-06-29|1993-05-30|1993-07-28|COLLECT COD|REG AIR|he careful 5794|114786|9809|2|14|25210.92|0.09|0.02|R|F|1993-04-19|1993-07-02|1993-05-18|COLLECT COD|SHIP|uriously carefully ironic reque 5794|6827|6828|3|15|26007.30|0.09|0.06|R|F|1993-06-25|1993-06-27|1993-07-09|NONE|MAIL|blithely regular ideas. final foxes haggle 5794|136244|8758|4|47|60171.28|0.00|0.08|A|F|1993-07-16|1993-06-21|1993-08-05|TAKE BACK RETURN|REG AIR|quests. blithely final excu 5795|192991|8030|1|34|70855.66|0.09|0.05|A|F|1992-08-21|1992-07-30|1992-08-27|COLLECT COD|REG AIR|al instructions must affix along the ironic 5796|57971|2982|1|27|52082.19|0.10|0.00|N|O|1996-04-06|1996-02-29|1996-04-20|DELIVER IN PERSON|RAIL|s wake quickly aro 5797|60967|8486|1|17|32775.32|0.09|0.03|N|O|1997-12-13|1998-01-12|1997-12-23|NONE|REG AIR|the ironic, even theodoli 5798|126015|6016|1|2|2082.02|0.09|0.00|N|O|1998-05-25|1998-06-22|1998-06-09|COLLECT COD|FOB|e furiously across 5798|123814|6327|2|14|25729.34|0.06|0.05|N|O|1998-04-01|1998-06-14|1998-04-27|NONE|RAIL|he special, bold packages. carefully iron 5798|133608|8635|3|22|36115.20|0.02|0.01|N|O|1998-06-24|1998-06-06|1998-07-20|COLLECT COD|TRUCK|sits poach carefully 5798|145699|8214|4|40|69787.60|0.08|0.06|N|O|1998-07-09|1998-06-24|1998-07-16|NONE|TRUCK| integrate carefu 5798|148631|6174|5|7|11757.41|0.06|0.07|N|O|1998-06-06|1998-05-10|1998-06-07|NONE|SHIP|ts against the blithely final p 5798|37444|4954|6|9|12432.96|0.06|0.02|N|O|1998-05-05|1998-05-25|1998-05-09|DELIVER IN PERSON|REG AIR|e blithely 5798|114851|7363|7|32|59707.20|0.08|0.01|N|O|1998-04-27|1998-05-03|1998-05-08|TAKE BACK RETURN|REG AIR|ubt blithely above the 5799|94586|4587|1|41|64803.78|0.04|0.02|N|O|1995-11-13|1995-10-31|1995-11-16|COLLECT COD|TRUCK|al accounts sleep ruthlessl 5799|99882|4901|2|30|56456.40|0.03|0.08|N|O|1995-09-12|1995-09-13|1995-09-19|NONE|RAIL| furiously s 5824|76111|8619|1|40|43484.40|0.06|0.06|N|O|1997-01-14|1997-01-17|1997-02-02|NONE|REG AIR|he final packag 5824|181001|6038|2|42|45444.00|0.09|0.00|N|O|1997-02-01|1997-02-20|1997-02-07|COLLECT COD|SHIP|ts sleep. carefully regular accounts h 5824|72131|9653|3|16|17650.08|0.03|0.02|N|O|1997-02-13|1997-01-07|1997-02-17|TAKE BACK RETURN|TRUCK|sly express Ti 5824|91100|6119|4|32|34915.20|0.03|0.02|N|O|1997-02-16|1997-01-24|1997-02-20|DELIVER IN PERSON|RAIL|ven requests. 5824|107000|7001|5|44|44308.00|0.08|0.03|N|O|1997-01-24|1997-01-31|1997-02-11|COLLECT COD|TRUCK|fily fluffily bold 5825|158214|730|1|23|29260.83|0.10|0.05|R|F|1995-05-10|1995-04-28|1995-05-13|DELIVER IN PERSON|TRUCK| special pinto beans. dependencies haggl 5826|143968|6483|1|4|8047.84|0.03|0.06|N|O|1998-07-31|1998-09-10|1998-08-27|NONE|AIR| packages across the fluffily spec 5826|63777|3778|2|18|31333.86|0.04|0.01|N|O|1998-07-17|1998-09-03|1998-07-22|NONE|SHIP|atelets use above t 5827|186619|6620|1|30|51168.30|0.03|0.05|N|O|1998-11-11|1998-09-27|1998-11-30|DELIVER IN PERSON|RAIL|ounts may c 5827|102134|4645|2|23|26130.99|0.09|0.05|N|O|1998-11-16|1998-09-14|1998-11-17|COLLECT COD|RAIL|ans. furiously special instruct 5827|163706|8739|3|3|5309.10|0.03|0.06|N|O|1998-10-17|1998-09-29|1998-10-28|DELIVER IN PERSON|MAIL|uses eat along the furiously 5827|199067|9068|4|26|30317.56|0.06|0.00|N|O|1998-07-29|1998-09-24|1998-07-30|DELIVER IN PERSON|SHIP|arefully special packages wake thin 5827|111610|6633|5|38|61621.18|0.03|0.06|N|O|1998-10-18|1998-08-27|1998-10-23|TAKE BACK RETURN|TRUCK|ly ruthless accounts 5827|16474|1477|6|14|19466.58|0.05|0.01|N|O|1998-08-31|1998-09-06|1998-09-13|TAKE BACK RETURN|RAIL|rges. fluffily pending 5828|1343|8844|1|28|34841.52|0.10|0.03|A|F|1994-05-15|1994-05-20|1994-06-08|DELIVER IN PERSON|MAIL| special ideas haggle slyly ac 5828|157303|2334|2|37|50331.10|0.01|0.00|R|F|1994-06-07|1994-05-30|1994-06-17|NONE|RAIL|e carefully spec 5829|39657|4664|1|4|6386.60|0.01|0.02|N|O|1997-03-01|1997-02-17|1997-03-22|NONE|TRUCK|ithely; accounts cajole ideas. regular foxe 5829|106523|9034|2|40|61180.80|0.04|0.01|N|O|1997-04-21|1997-02-12|1997-05-04|COLLECT COD|TRUCK| the carefully ironic accounts. a 5829|128238|3263|3|6|7597.38|0.05|0.06|N|O|1997-01-22|1997-03-12|1997-02-02|TAKE BACK RETURN|AIR|sts. slyly special fo 5829|89153|4170|4|42|47970.30|0.02|0.07|N|O|1997-03-26|1997-04-01|1997-03-30|COLLECT COD|REG AIR|pearls. slyly bold deposits solve final 5829|190779|8337|5|49|91618.73|0.05|0.01|N|O|1997-01-31|1997-03-13|1997-02-18|NONE|MAIL| ironic excuses use fluf 5829|17069|2072|6|17|16763.02|0.09|0.02|N|O|1997-04-10|1997-03-29|1997-04-22|COLLECT COD|AIR|after the furiously ironic ideas no 5829|77942|7943|7|27|51838.38|0.08|0.04|N|O|1997-02-25|1997-03-31|1997-03-03|DELIVER IN PERSON|AIR|ns about the excuses are c 5830|159261|6807|1|29|38287.54|0.10|0.02|R|F|1993-06-19|1993-05-10|1993-07-13|DELIVER IN PERSON|REG AIR|y bold excuses 5831|190330|331|1|2|2840.66|0.10|0.01|N|O|1997-02-09|1997-01-20|1997-03-07|TAKE BACK RETURN|TRUCK|quickly silent req 5831|73475|8490|2|33|47799.51|0.04|0.03|N|O|1996-11-20|1997-01-18|1996-12-18|TAKE BACK RETURN|MAIL| instructions wake. slyly sil 5831|81622|4131|3|6|9621.72|0.05|0.07|N|O|1997-01-29|1997-01-14|1997-02-09|NONE|MAIL|ly ironic accounts nag pendin 5831|12962|7965|4|46|86248.16|0.06|0.02|N|O|1997-02-24|1997-01-18|1997-03-02|COLLECT COD|MAIL|ly final pa 5831|42828|2829|5|37|65520.34|0.05|0.01|N|O|1997-01-17|1997-02-08|1997-02-01|NONE|FOB|uriously even requests 5856|3680|1181|1|1|1583.68|0.03|0.02|A|F|1994-12-29|1995-01-07|1995-01-10|TAKE BACK RETURN|MAIL|tly. special deposits wake blithely even 5856|34305|9312|2|35|43375.50|0.09|0.02|R|F|1994-11-24|1994-12-23|1994-11-30|COLLECT COD|AIR|excuses. finally ir 5856|152739|2740|3|39|69877.47|0.05|0.03|A|F|1995-01-18|1995-01-11|1995-01-19|DELIVER IN PERSON|TRUCK|uickly quickly fluffy in 5857|57226|7227|1|25|29580.50|0.03|0.02|N|O|1997-12-02|1997-12-17|1997-12-08|DELIVER IN PERSON|REG AIR|ding platelets. pending excu 5857|194769|2327|2|50|93188.00|0.06|0.07|N|O|1997-12-04|1997-12-16|1997-12-20|NONE|TRUCK|y regular d 5857|67860|5379|3|1|1827.86|0.03|0.01|N|O|1998-02-01|1997-12-09|1998-02-20|TAKE BACK RETURN|SHIP|instructions detect final reques 5857|117998|510|4|12|24191.88|0.03|0.08|N|O|1998-01-24|1997-12-27|1998-02-10|TAKE BACK RETURN|AIR|counts. express, final 5857|191260|3780|5|14|18917.64|0.07|0.07|N|O|1997-12-10|1998-01-06|1998-01-04|TAKE BACK RETURN|TRUCK|ffily pendin 5857|92862|5372|6|49|90888.14|0.00|0.04|N|O|1998-01-23|1997-12-12|1998-01-28|DELIVER IN PERSON|REG AIR|egular pinto beans 5858|120832|8369|1|20|37056.60|0.02|0.06|A|F|1992-07-23|1992-08-26|1992-07-24|COLLECT COD|SHIP|uffily unusual pinto beans sleep 5858|15003|5004|2|36|33048.00|0.00|0.05|A|F|1992-09-25|1992-08-16|1992-10-11|NONE|SHIP|osits wake quickly quickly sile 5858|147996|511|3|7|14307.93|0.08|0.02|A|F|1992-10-07|1992-08-16|1992-10-15|TAKE BACK RETURN|REG AIR|. doggedly regular packages use pendin 5858|163490|1039|4|46|71460.54|0.07|0.06|R|F|1992-09-07|1992-10-06|1992-10-06|DELIVER IN PERSON|MAIL|posits withi 5858|160181|5214|5|18|22341.24|0.00|0.07|A|F|1992-11-05|1992-10-08|1992-12-03|NONE|TRUCK|al excuses. bold 5858|153937|8968|6|7|13936.51|0.04|0.00|A|F|1992-09-14|1992-10-01|1992-10-01|TAKE BACK RETURN|RAIL|dly pending ac 5858|10709|3211|7|50|80985.00|0.06|0.00|R|F|1992-07-20|1992-10-07|1992-07-25|NONE|TRUCK|r the ironic ex 5859|174988|23|1|50|103149.00|0.07|0.01|N|O|1997-07-08|1997-06-20|1997-07-27|COLLECT COD|MAIL|ly regular deposits use. ironic 5859|8773|6274|2|17|28590.09|0.03|0.03|N|O|1997-05-15|1997-06-30|1997-05-26|DELIVER IN PERSON|AIR|ly ironic requests. quickly unusual pin 5859|45701|8206|3|33|54341.10|0.10|0.04|N|O|1997-07-08|1997-06-22|1997-07-18|TAKE BACK RETURN|TRUCK|eposits unwind furiously final pinto bea 5859|92269|2270|4|40|50450.40|0.09|0.02|N|O|1997-08-05|1997-06-17|1997-08-20|NONE|REG AIR|l dependenci 5859|152689|7720|5|35|60958.80|0.00|0.08|N|O|1997-05-28|1997-07-14|1997-06-15|COLLECT COD|TRUCK|egular acco 5859|43446|3447|6|9|12504.96|0.01|0.02|N|O|1997-06-15|1997-06-06|1997-06-20|NONE|RAIL|ges boost quickly. blithely r 5859|190127|7685|7|27|32862.24|0.05|0.08|N|O|1997-07-30|1997-07-08|1997-08-08|NONE|MAIL| across th 5860|50168|7684|1|10|11181.60|0.04|0.04|A|F|1992-03-11|1992-03-30|1992-03-31|NONE|MAIL|ual patterns try to eat carefully above 5861|190867|8425|1|32|62651.52|0.00|0.03|N|O|1997-05-27|1997-05-29|1997-05-28|TAKE BACK RETURN|MAIL|nt asymptotes. carefully express request 5861|85611|628|2|6|9579.66|0.10|0.03|N|O|1997-07-28|1997-05-18|1997-08-24|TAKE BACK RETURN|TRUCK|olites. slyly 5862|112061|4573|1|4|4292.24|0.09|0.06|N|O|1997-06-04|1997-04-26|1997-06-19|NONE|TRUCK|yly silent deposit 5862|1334|6335|2|29|35824.57|0.03|0.05|N|O|1997-04-02|1997-04-16|1997-04-04|NONE|FOB|e fluffily. furiously 5863|160562|3079|1|45|73015.20|0.07|0.06|A|F|1993-12-19|1994-01-25|1994-01-05|NONE|REG AIR| deposits are ab 5863|159369|1885|2|21|29995.56|0.09|0.03|R|F|1994-01-13|1994-01-09|1994-01-28|DELIVER IN PERSON|FOB|atelets nag blithely furi 5888|61291|8810|1|46|57605.34|0.02|0.00|N|O|1996-11-18|1996-11-05|1996-12-08|TAKE BACK RETURN|FOB|yly final accounts hag 5888|111595|1596|2|24|38558.16|0.03|0.01|N|O|1996-11-07|1996-11-30|1996-11-20|COLLECT COD|SHIP|ing to the spe 5889|76129|8637|1|17|18787.04|0.09|0.02|N|O|1995-07-01|1995-08-12|1995-07-25|NONE|AIR|blithely pending packages. flu 5890|112221|2222|1|38|46862.36|0.01|0.08|A|F|1993-02-14|1992-12-09|1993-02-27|COLLECT COD|FOB| accounts. carefully final asymptotes 5891|84905|4906|1|22|41577.80|0.00|0.06|R|F|1993-01-01|1993-02-18|1993-01-14|DELIVER IN PERSON|TRUCK|iresias cajole deposits. special, ir 5891|185580|8099|2|9|14990.22|0.03|0.07|R|F|1993-01-20|1993-02-27|1993-02-10|COLLECT COD|REG AIR|cajole carefully 5891|29674|4679|3|10|16036.70|0.08|0.01|A|F|1993-04-14|1993-02-07|1993-04-15|DELIVER IN PERSON|RAIL|nding requests. b 5892|147722|7723|1|7|12388.04|0.02|0.03|N|O|1995-06-26|1995-07-18|1995-07-25|COLLECT COD|AIR|e furiously. quickly even deposits da 5892|149926|7469|2|37|73109.04|0.09|0.06|N|O|1995-08-12|1995-06-11|1995-09-05|NONE|REG AIR|maintain. bold, expre 5892|2064|2065|3|28|27049.68|0.03|0.06|N|O|1995-08-16|1995-07-06|1995-08-22|DELIVER IN PERSON|MAIL|ithely unusual accounts will have to integ 5892|74798|4799|4|23|40774.17|0.08|0.04|R|F|1995-05-18|1995-07-06|1995-05-29|COLLECT COD|MAIL| foxes nag slyly about the qui 5893|133707|1247|1|43|74850.10|0.05|0.02|R|F|1992-11-02|1992-09-27|1992-11-21|TAKE BACK RETURN|RAIL|s. regular courts above the carefully silen 5893|1868|9369|2|2|3539.72|0.10|0.04|R|F|1992-07-18|1992-09-10|1992-08-12|NONE|RAIL|ckages wake sly 5894|7312|4813|1|23|28044.13|0.04|0.08|A|F|1994-09-05|1994-10-27|1994-09-13|NONE|TRUCK| furiously even deposits haggle alw 5894|78446|3461|2|48|68373.12|0.04|0.08|A|F|1994-09-04|1994-11-03|1994-09-17|NONE|TRUCK| asymptotes among the blithely silent 5895|14728|7230|1|38|62423.36|0.05|0.08|N|O|1997-04-05|1997-03-06|1997-05-03|DELIVER IN PERSON|RAIL|ts are furiously. regular, final excuses 5895|121538|1539|2|47|73297.91|0.04|0.06|N|O|1997-04-27|1997-03-17|1997-05-07|DELIVER IN PERSON|AIR|r packages wake carefull 5895|83712|1237|3|49|83089.79|0.03|0.07|N|O|1997-03-15|1997-02-17|1997-04-04|NONE|TRUCK|permanent foxes. packages 5895|145714|5715|4|31|54551.01|0.03|0.01|N|O|1997-03-03|1997-03-30|1997-03-08|TAKE BACK RETURN|TRUCK| final deposits nod slyly careful 5895|199172|9173|5|20|25423.40|0.07|0.00|N|O|1997-04-30|1997-02-07|1997-05-08|DELIVER IN PERSON|AIR|gular deposits wake blithely carefully fin 5895|77733|2748|6|15|25660.95|0.08|0.08|N|O|1997-04-19|1997-03-09|1997-05-13|TAKE BACK RETURN|RAIL|silent package 5920|186069|6070|1|50|57753.00|0.06|0.00|A|F|1995-03-13|1995-01-03|1995-03-31|TAKE BACK RETURN|RAIL|across the carefully pending platelets 5920|57793|7794|2|24|42018.96|0.01|0.05|A|F|1994-12-28|1995-01-21|1994-12-31|DELIVER IN PERSON|FOB|fully regular dolphins. furiousl 5920|116952|9464|3|2|3937.90|0.08|0.07|A|F|1995-02-18|1995-01-13|1995-03-04|NONE|SHIP| evenly spe 5920|11323|8827|4|28|34560.96|0.06|0.02|R|F|1994-12-17|1995-02-13|1994-12-31|NONE|SHIP|le slyly slyly even deposits. f 5920|99165|6693|5|42|48894.72|0.09|0.08|A|F|1994-12-18|1995-01-07|1995-01-14|COLLECT COD|AIR|lar, ironic dependencies sno 5921|98016|5544|1|44|44616.44|0.07|0.01|R|F|1994-07-14|1994-06-30|1994-07-15|NONE|TRUCK|ain about the special 5921|145670|699|2|25|42891.75|0.06|0.01|A|F|1994-05-19|1994-06-15|1994-06-17|COLLECT COD|TRUCK|nd the slyly regular deposits. quick 5921|67177|2190|3|17|19450.89|0.06|0.01|R|F|1994-05-20|1994-05-26|1994-05-23|NONE|FOB|final asymptotes. even packages boost 5921|27331|2336|4|26|32716.58|0.03|0.04|A|F|1994-05-03|1994-07-06|1994-05-06|NONE|AIR|hy dependenc 5921|142246|4761|5|41|52817.84|0.04|0.02|R|F|1994-04-13|1994-05-31|1994-04-26|DELIVER IN PERSON|AIR|nusual, regular theodol 5921|114421|4422|6|5|7177.10|0.02|0.00|R|F|1994-06-01|1994-05-07|1994-06-10|COLLECT COD|TRUCK|eas cajole across the final, fi 5922|195631|3189|1|9|15539.67|0.07|0.00|N|O|1996-12-04|1997-01-20|1996-12-08|DELIVER IN PERSON|RAIL|haggle slyly even packages. packages 5922|156595|1626|2|37|61108.83|0.01|0.04|N|O|1996-12-19|1996-12-16|1997-01-15|COLLECT COD|RAIL|s wake slyly. requests cajole furiously asy 5922|89515|4532|3|35|52657.85|0.08|0.00|N|O|1996-12-12|1997-01-21|1997-01-01|DELIVER IN PERSON|SHIP|accounts. regu 5922|65048|5049|4|13|13169.52|0.08|0.07|N|O|1997-03-08|1996-12-26|1997-04-03|DELIVER IN PERSON|FOB|sly special accounts wake ironically. 5922|56428|8934|5|39|53992.38|0.04|0.07|N|O|1997-03-04|1997-01-17|1997-03-25|TAKE BACK RETURN|SHIP|e of the instructions. quick 5922|178810|1328|6|10|18888.10|0.04|0.01|N|O|1997-02-23|1996-12-26|1997-03-04|NONE|REG AIR|sly regular deposits haggle quickly ins 5923|176934|6935|1|27|54295.11|0.08|0.03|N|O|1997-08-16|1997-06-27|1997-08-29|DELIVER IN PERSON|RAIL|arefully i 5923|118577|1089|2|42|67013.94|0.01|0.08|N|O|1997-09-16|1997-07-23|1997-09-27|COLLECT COD|REG AIR|y regular theodolites w 5923|107597|5128|3|2|3209.18|0.06|0.05|N|O|1997-06-19|1997-07-31|1997-06-28|TAKE BACK RETURN|TRUCK|express patterns. even deposits 5923|173158|5676|4|46|56632.90|0.05|0.04|N|O|1997-07-29|1997-07-23|1997-08-23|COLLECT COD|SHIP|nto beans cajole blithe 5923|58535|3546|5|35|52273.55|0.04|0.05|N|O|1997-07-21|1997-07-11|1997-08-01|DELIVER IN PERSON|AIR|sts affix unusual, final requests. request 5924|175145|180|1|38|46365.32|0.06|0.05|N|O|1995-12-17|1995-12-11|1996-01-06|TAKE BACK RETURN|AIR|ions cajole carefully along the 5924|52349|4855|2|49|63765.66|0.04|0.00|N|O|1995-10-25|1995-12-11|1995-11-08|NONE|MAIL|inly final excuses. blithely regular requ 5924|16705|6706|3|24|38920.80|0.09|0.08|N|O|1996-01-12|1995-12-13|1996-01-25|COLLECT COD|REG AIR| use carefully. special, e 5925|86543|9052|1|42|64240.68|0.05|0.02|N|O|1996-03-05|1996-01-13|1996-03-10|COLLECT COD|SHIP|to the furiously 5925|124966|9991|2|31|61719.76|0.03|0.03|N|O|1996-01-02|1995-12-14|1996-01-07|TAKE BACK RETURN|FOB|e slyly. furiously regular deposi 5925|88288|3305|3|50|63814.00|0.03|0.04|N|O|1996-02-14|1996-01-10|1996-02-15|NONE|TRUCK|es. stealthily express pains print bli 5925|53309|8320|4|30|37869.00|0.02|0.07|N|O|1996-02-21|1996-02-11|1996-03-10|NONE|TRUCK| the packa 5925|159746|9747|5|41|74035.34|0.00|0.06|N|O|1996-02-03|1995-12-24|1996-02-20|NONE|SHIP| across the pending deposits nag caref 5925|49812|7325|6|48|84566.88|0.02|0.00|N|O|1996-02-03|1996-01-19|1996-03-04|DELIVER IN PERSON|REG AIR| haggle after the fo 5926|89216|4233|1|8|9641.68|0.02|0.00|R|F|1994-07-17|1994-07-20|1994-08-11|COLLECT COD|MAIL|gle furiously express foxes. bo 5926|49773|7286|2|27|46514.79|0.09|0.05|A|F|1994-07-05|1994-08-11|1994-08-02|DELIVER IN PERSON|MAIL|ironic requests 5926|126721|6722|3|46|80395.12|0.01|0.03|R|F|1994-09-05|1994-08-12|1994-09-11|COLLECT COD|RAIL|ts integrate. courts haggl 5926|189693|7248|4|23|41001.87|0.01|0.02|A|F|1994-07-23|1994-08-10|1994-07-27|DELIVER IN PERSON|FOB|ickly special packages among 5927|89541|2050|1|44|67343.76|0.04|0.05|N|O|1997-11-29|1997-11-21|1997-12-13|DELIVER IN PERSON|TRUCK|rding to the special, final decoy 5927|114570|9593|2|8|12676.56|0.04|0.05|N|O|1997-09-24|1997-11-15|1997-10-22|TAKE BACK RETURN|SHIP|ilent dependencies nod c 5927|166058|8575|3|32|35969.60|0.10|0.07|N|O|1997-12-26|1997-10-27|1997-12-31|COLLECT COD|AIR|telets. carefully bold accounts was 5952|199810|2330|1|49|93580.69|0.10|0.02|N|O|1997-06-30|1997-07-10|1997-07-02|COLLECT COD|AIR|e furiously regular 5952|190128|7686|2|11|13399.32|0.10|0.05|N|O|1997-05-13|1997-06-04|1997-05-27|DELIVER IN PERSON|FOB|y nag blithely aga 5952|70338|339|3|43|56258.19|0.01|0.01|N|O|1997-06-29|1997-06-06|1997-07-15|COLLECT COD|MAIL|posits sleep furiously quickly final p 5952|157819|2850|4|23|43166.63|0.00|0.07|N|O|1997-05-13|1997-06-27|1997-05-20|NONE|TRUCK|e blithely packages. eve 5953|128103|8104|1|36|40719.60|0.03|0.00|R|F|1992-05-28|1992-06-24|1992-05-29|DELIVER IN PERSON|FOB| cajole furio 5953|12029|4531|2|34|31994.68|0.03|0.04|A|F|1992-05-04|1992-06-12|1992-06-02|NONE|RAIL|hockey players use furiously against th 5953|161015|6048|3|5|5380.05|0.07|0.06|A|F|1992-04-10|1992-04-27|1992-04-14|NONE|SHIP|s. blithely 5953|168308|825|4|23|31654.90|0.09|0.02|R|F|1992-06-05|1992-06-03|1992-06-29|TAKE BACK RETURN|FOB|he silent ideas. silent foxes po 5954|146706|4249|1|8|14021.60|0.03|0.00|A|F|1993-03-27|1993-01-22|1993-04-04|TAKE BACK RETURN|AIR|unusual th 5954|80235|2744|2|40|48609.20|0.02|0.01|A|F|1992-12-30|1993-01-16|1993-01-09|COLLECT COD|RAIL|iously ironic deposits after 5954|93387|915|3|20|27607.60|0.09|0.07|A|F|1992-12-25|1993-02-05|1992-12-31|COLLECT COD|REG AIR| accounts wake carefu 5954|144672|2215|4|20|34333.40|0.00|0.01|R|F|1993-02-27|1993-01-04|1993-03-08|NONE|TRUCK|ke furiously blithely special packa 5954|99780|7308|5|35|62292.30|0.04|0.06|A|F|1993-03-17|1993-02-06|1993-04-10|NONE|SHIP|tions maintain slyly. furious 5954|192029|4549|6|39|43719.78|0.04|0.08|A|F|1993-02-27|1993-02-25|1993-03-29|DELIVER IN PERSON|REG AIR| always regular dolphins. furiously p 5955|139375|4402|1|14|19801.18|0.08|0.08|N|O|1995-06-22|1995-05-23|1995-06-24|DELIVER IN PERSON|TRUCK| unusual, bold theodolit 5955|61553|9072|2|15|22718.25|0.08|0.07|R|F|1995-04-22|1995-05-28|1995-04-27|NONE|FOB|y final accounts above the regu 5955|111196|6219|3|40|48287.60|0.03|0.00|R|F|1995-04-01|1995-06-11|1995-04-27|NONE|FOB|oss the fluffily regular 5956|154047|6563|1|10|11010.40|0.04|0.05|N|O|1998-07-27|1998-07-04|1998-08-21|NONE|MAIL|ic packages am 5956|54179|1695|2|23|26062.91|0.08|0.03|N|O|1998-06-06|1998-07-10|1998-06-15|DELIVER IN PERSON|RAIL|ly slyly special 5956|174834|7352|3|47|89715.01|0.04|0.06|N|O|1998-09-06|1998-06-29|1998-09-18|TAKE BACK RETURN|MAIL|lyly express theodol 5956|19995|7499|4|40|76599.60|0.09|0.05|N|O|1998-06-11|1998-07-19|1998-06-21|NONE|MAIL|final theodolites sleep carefully ironic c 5957|14617|7119|1|37|56669.57|0.07|0.00|A|F|1994-04-18|1994-02-19|1994-05-11|NONE|AIR| ideas use ruthlessly. 5957|58726|3737|2|46|77497.12|0.04|0.08|A|F|1994-01-23|1994-01-30|1994-02-07|NONE|SHIP|platelets. furiously unusual requests 5957|1377|6378|3|17|21732.29|0.01|0.01|A|F|1994-01-24|1994-02-16|1994-02-08|TAKE BACK RETURN|SHIP|. final, pending packages 5957|131499|6526|4|29|44384.21|0.01|0.03|R|F|1994-02-24|1994-03-04|1994-03-08|COLLECT COD|REG AIR|sits. final, even asymptotes cajole quickly 5957|87262|4787|5|40|49970.40|0.04|0.04|R|F|1994-01-07|1994-02-05|1994-01-26|DELIVER IN PERSON|SHIP|ironic asymptotes sleep blithely again 5957|5079|80|6|41|40346.87|0.10|0.07|R|F|1994-03-25|1994-02-20|1994-03-31|DELIVER IN PERSON|MAIL|es across the regular requests maint 5957|158431|5977|7|32|47661.76|0.10|0.07|A|F|1994-03-05|1994-02-20|1994-03-09|NONE|TRUCK| boost carefully across the 5958|148834|6377|1|33|62133.39|0.02|0.04|N|O|1995-09-24|1995-12-12|1995-10-05|COLLECT COD|MAIL|lar, regular accounts wake furi 5958|42932|7941|2|23|43123.39|0.03|0.04|N|O|1995-09-26|1995-10-19|1995-09-27|COLLECT COD|SHIP|regular requests. bold, bold deposits unwin 5958|152606|7637|3|42|69661.20|0.10|0.00|N|O|1995-12-12|1995-10-19|1996-01-09|NONE|AIR|n accounts. final, ironic packages 5958|38433|8434|4|18|24685.74|0.04|0.05|N|O|1995-12-02|1995-10-17|1995-12-22|COLLECT COD|FOB|regular requests haggle 5958|131786|4300|5|32|58168.96|0.06|0.00|N|O|1995-09-20|1995-12-10|1995-10-14|COLLECT COD|REG AIR|e carefully special theodolites. carefully 5959|134156|6670|1|49|58317.35|0.07|0.03|R|F|1992-07-16|1992-08-09|1992-08-14|DELIVER IN PERSON|SHIP|usual packages haggle slyly pi 5959|146297|6298|2|17|22835.93|0.09|0.07|R|F|1992-06-10|1992-07-06|1992-06-23|COLLECT COD|MAIL|ackages. blithely ex 5959|4379|4380|3|4|5133.48|0.04|0.03|R|F|1992-06-14|1992-07-05|1992-07-01|NONE|MAIL|gular requests ar 5959|195921|5922|4|13|26219.96|0.03|0.00|A|F|1992-07-29|1992-07-13|1992-08-20|COLLECT COD|SHIP|ar forges. deposits det 5959|39979|7489|5|37|71001.89|0.04|0.01|R|F|1992-06-05|1992-07-18|1992-06-29|NONE|TRUCK|endencies. brai 5959|118109|621|6|35|39448.50|0.03|0.00|A|F|1992-05-27|1992-06-19|1992-06-23|NONE|TRUCK|ely silent deposits. 5959|42154|4659|7|47|51519.05|0.02|0.01|R|F|1992-08-28|1992-07-24|1992-09-09|TAKE BACK RETURN|RAIL|deposits. slyly special cou 5984|69454|6973|1|13|18504.85|0.06|0.07|R|F|1994-10-16|1994-09-06|1994-11-11|NONE|MAIL|lar platelets. f 5984|101208|1209|2|25|30230.00|0.05|0.08|R|F|1994-10-06|1994-07-21|1994-10-28|COLLECT COD|RAIL|gular accounts. even packages nag slyly 5984|321|2822|3|8|9770.56|0.10|0.00|R|F|1994-09-17|1994-08-28|1994-09-25|COLLECT COD|RAIL|its. express, 5984|189708|9709|4|35|62919.50|0.00|0.01|A|F|1994-08-25|1994-08-05|1994-08-31|DELIVER IN PERSON|SHIP|le fluffily regula 5985|85717|8226|1|4|6810.84|0.02|0.02|A|F|1995-05-04|1995-04-01|1995-05-17|DELIVER IN PERSON|MAIL|ole along the quickly slow d 5986|78789|6311|1|26|45962.28|0.00|0.00|R|F|1992-08-10|1992-05-23|1992-08-24|TAKE BACK RETURN|SHIP|e fluffily ironic ideas. silent 5986|195254|7774|2|25|33731.25|0.03|0.06|A|F|1992-06-16|1992-07-17|1992-06-29|TAKE BACK RETURN|MAIL| instructions. slyly regular de 5986|29636|2139|3|1|1565.63|0.07|0.06|A|F|1992-05-21|1992-06-21|1992-05-24|DELIVER IN PERSON|REG AIR|fix quickly quickly final deposits. fluffil 5986|89590|7115|4|31|48967.29|0.00|0.03|A|F|1992-08-21|1992-06-29|1992-09-14|NONE|AIR|structions! furiously pending instructi 5986|135143|5144|5|6|7068.84|0.05|0.02|A|F|1992-07-16|1992-06-10|1992-07-29|DELIVER IN PERSON|RAIL|al foxes within the slyly speci citus-7.0.3/src/test/regress/data/lineitem.2.data000066400000000000000000026505541317107136600216300ustar00rootroot000000000000008997|19353|9354|2|25|31808.75|0.05|0.03|R|F|1994-08-05|1994-09-29|1994-08-15|TAKE BACK RETURN|AIR| blithely asymptotes. 8997|57771|7772|3|11|19016.47|0.04|0.00|R|F|1994-09-12|1994-09-10|1994-09-19|NONE|RAIL|nic sheaves are 8997|149605|4634|4|1|1654.60|0.07|0.01|A|F|1994-10-13|1994-09-05|1994-10-21|NONE|REG AIR|theodolite 8997|195853|5854|5|33|64312.05|0.05|0.04|A|F|1994-09-02|1994-09-25|1994-09-19|NONE|MAIL|requests wake furiously. foxes d 8997|101036|1037|6|23|23851.69|0.00|0.01|R|F|1994-08-20|1994-09-23|1994-09-18|DELIVER IN PERSON|TRUCK|quickly regular 8998|119794|2306|1|47|85248.13|0.01|0.07|A|F|1993-01-22|1993-02-21|1993-02-09|DELIVER IN PERSON|RAIL|about the sometimes ironic excuse 8998|166895|1928|2|37|72589.93|0.02|0.05|A|F|1993-02-21|1993-03-24|1993-03-23|COLLECT COD|AIR|ay fluffily final pinto beans! bli 8998|3110|5611|3|12|12157.32|0.04|0.02|R|F|1993-03-04|1993-02-08|1993-03-27|NONE|AIR| engage furiously blithely ironic 8998|58188|3199|4|15|17192.70|0.03|0.01|A|F|1993-01-19|1993-02-27|1993-01-23|DELIVER IN PERSON|RAIL|gular Tiresias kindle 8999|107599|7600|1|21|33738.39|0.10|0.06|A|F|1994-08-23|1994-08-12|1994-08-27|TAKE BACK RETURN|FOB|among the slyly 8999|8987|6488|2|48|91007.04|0.02|0.06|A|F|1994-10-04|1994-08-16|1994-10-24|TAKE BACK RETURN|FOB|nts sleep quickl 8999|38529|1033|3|26|38155.52|0.09|0.03|R|F|1994-09-03|1994-08-03|1994-10-03|DELIVER IN PERSON|FOB|r instructions. deposits use furiously aga 8999|150058|5089|4|9|9972.45|0.00|0.06|A|F|1994-08-24|1994-08-18|1994-08-31|TAKE BACK RETURN|AIR|ost blithely final, regu 9024|186958|6959|1|21|42943.95|0.05|0.06|A|F|1992-09-11|1992-08-30|1992-09-17|DELIVER IN PERSON|MAIL|the blithel 9024|189463|1982|2|12|18629.52|0.04|0.08|A|F|1992-09-21|1992-07-19|1992-10-08|DELIVER IN PERSON|AIR|ven deposits 9024|95896|5897|3|19|35945.91|0.10|0.08|A|F|1992-07-12|1992-08-14|1992-07-20|NONE|SHIP|deposits use ironic requests. final re 9024|119442|6976|4|46|67226.24|0.10|0.05|A|F|1992-06-27|1992-08-22|1992-07-15|TAKE BACK RETURN|RAIL|fluffy requests sleep 9024|58677|8678|5|31|50705.77|0.05|0.03|A|F|1992-09-28|1992-08-20|1992-10-19|NONE|TRUCK|xpress forges? final platelets 9024|174695|7213|6|37|65478.53|0.03|0.08|A|F|1992-08-25|1992-08-03|1992-09-17|COLLECT COD|TRUCK|posits cajole blithely alon 9024|84320|4321|7|27|35216.64|0.06|0.07|A|F|1992-09-27|1992-07-16|1992-10-14|COLLECT COD|SHIP|g asymptotes. 9025|159410|9411|1|18|26449.38|0.04|0.01|A|F|1994-06-14|1994-06-28|1994-06-19|TAKE BACK RETURN|TRUCK|ffily final excuses wake sl 9025|674|675|2|15|23620.05|0.00|0.08|R|F|1994-08-26|1994-07-06|1994-09-21|TAKE BACK RETURN|RAIL|deposits. slyly unusual d 9025|127232|7233|3|49|61702.27|0.08|0.07|A|F|1994-09-15|1994-06-20|1994-10-04|TAKE BACK RETURN|FOB|print across the fluffily silent account 9026|177512|2547|1|21|33379.71|0.04|0.03|N|O|1996-10-02|1996-09-06|1996-10-25|NONE|MAIL|ly unusual as 9026|35171|7675|2|1|1106.17|0.05|0.08|N|O|1996-10-22|1996-10-13|1996-11-07|NONE|AIR| pinto beans. ironi 9026|102332|4843|3|28|37361.24|0.03|0.08|N|O|1996-09-14|1996-09-21|1996-10-12|NONE|RAIL|te fluffily regul 9027|7068|4569|1|39|38027.34|0.00|0.03|N|O|1995-09-26|1995-12-02|1995-10-01|DELIVER IN PERSON|MAIL|sts. furiously r 9027|175635|3187|2|44|75267.72|0.02|0.03|N|O|1995-10-01|1995-11-29|1995-10-23|TAKE BACK RETURN|FOB|ve furiously. furiously 9027|90479|5498|3|24|35267.28|0.05|0.04|N|O|1995-11-26|1995-11-09|1995-12-15|NONE|RAIL|. slyly unus 9028|53205|721|1|45|52119.00|0.06|0.04|R|F|1994-04-01|1994-01-28|1994-04-09|COLLECT COD|SHIP|furiously furiously 9029|76225|8733|1|40|48048.80|0.03|0.05|R|F|1992-12-15|1993-01-13|1993-01-02|NONE|FOB|sly ironic pinto beans sl 9029|107623|7624|2|11|17936.82|0.06|0.08|A|F|1993-03-01|1992-12-28|1993-03-15|TAKE BACK RETURN|REG AIR|heodolites. fluffily e 9030|159615|9616|1|28|46889.08|0.09|0.08|N|O|1998-09-02|1998-08-16|1998-09-10|DELIVER IN PERSON|TRUCK|ace of the sl 9030|36087|6088|2|2|2046.16|0.09|0.03|N|O|1998-08-19|1998-09-14|1998-09-18|DELIVER IN PERSON|TRUCK|es wake among the slyly regular ideas 9030|112873|2874|3|43|81092.41|0.00|0.05|N|O|1998-08-27|1998-09-02|1998-08-31|COLLECT COD|MAIL|posits. qu 9030|48049|3058|4|5|4985.20|0.10|0.08|N|O|1998-07-20|1998-09-17|1998-07-28|COLLECT COD|RAIL|ove the pinto bean 9030|150168|169|5|47|57253.52|0.00|0.04|N|O|1998-09-29|1998-09-24|1998-10-13|COLLECT COD|AIR|fily unusual foxes! flu 9030|62890|409|6|38|70409.82|0.04|0.07|N|O|1998-09-03|1998-10-04|1998-10-02|TAKE BACK RETURN|MAIL|ct furiousl 9031|181160|1161|1|5|6205.80|0.02|0.07|R|F|1994-01-24|1994-02-11|1994-02-10|TAKE BACK RETURN|SHIP|furiously. furio 9031|146553|6554|2|48|76778.40|0.06|0.07|R|F|1994-04-05|1994-03-14|1994-04-18|DELIVER IN PERSON|REG AIR|accounts before the even pinto be 9031|105141|7652|3|19|21776.66|0.10|0.03|R|F|1994-03-18|1994-02-17|1994-04-10|TAKE BACK RETURN|RAIL|ong the doggedly special deposits. 9031|104337|1868|4|21|28167.93|0.09|0.08|R|F|1994-03-07|1994-02-24|1994-03-22|COLLECT COD|FOB|ackages cajole slyly 9031|187842|361|5|33|63684.72|0.01|0.03|A|F|1994-04-15|1994-03-12|1994-05-06|COLLECT COD|MAIL|ic deposits ca 9031|55903|3419|6|49|91086.10|0.10|0.00|A|F|1994-02-21|1994-03-20|1994-03-08|COLLECT COD|RAIL|lar packages. b 9031|159991|2507|7|24|49223.76|0.03|0.00|A|F|1993-12-31|1994-03-11|1994-01-29|DELIVER IN PERSON|SHIP|y final accounts detect. q 9056|159443|1959|1|34|51082.96|0.07|0.07|N|O|1996-09-20|1996-10-18|1996-09-30|DELIVER IN PERSON|REG AIR|nal accounts nag furiously 9056|152800|5316|2|43|79670.40|0.01|0.05|N|O|1996-10-24|1996-10-06|1996-11-19|TAKE BACK RETURN|AIR|anent theodolites! furiously sil 9056|97264|2283|3|19|23963.94|0.06|0.05|N|O|1996-09-09|1996-10-19|1996-09-20|NONE|MAIL| doubt abo 9056|109236|9237|4|5|6226.15|0.02|0.06|N|O|1996-10-23|1996-09-19|1996-11-08|NONE|REG AIR|uickly express packages haggle bl 9056|122746|7771|5|13|22993.62|0.03|0.07|N|O|1996-08-28|1996-09-24|1996-09-12|DELIVER IN PERSON|FOB|nic packages boost furiously unusua 9057|24643|4644|1|35|54867.40|0.07|0.05|R|F|1994-11-06|1995-01-15|1994-11-18|NONE|SHIP|attainments. slyly final requests cajo 9057|168578|3611|2|41|67509.37|0.09|0.03|A|F|1995-01-26|1995-01-24|1995-02-09|DELIVER IN PERSON|REG AIR|fter the slyly dogged excuses. ironic, b 9057|90016|7544|3|45|45270.45|0.02|0.01|A|F|1994-12-08|1995-01-10|1994-12-27|NONE|TRUCK|y even decoys sleep. slyly 9057|85561|3086|4|17|26291.52|0.06|0.00|A|F|1995-03-03|1994-12-29|1995-03-29|NONE|SHIP|ndencies use carefully along 9057|14681|2185|5|17|27126.56|0.05|0.06|R|F|1995-01-22|1995-01-16|1995-02-12|COLLECT COD|FOB|carefully regular frets. furiously even a 9057|132402|2403|6|9|12909.60|0.07|0.06|R|F|1994-12-30|1994-12-27|1995-01-07|DELIVER IN PERSON|REG AIR| after the slyly bold accounts a 9057|75919|8427|7|48|90955.68|0.01|0.05|R|F|1995-02-04|1994-12-31|1995-02-19|DELIVER IN PERSON|FOB| maintain slyly. ca 9058|158834|3865|1|41|77606.03|0.07|0.08|A|F|1993-09-11|1993-09-25|1993-09-17|COLLECT COD|SHIP|uffily even instruction 9058|122910|7935|2|2|3865.82|0.08|0.00|A|F|1993-09-02|1993-09-26|1993-10-01|TAKE BACK RETURN|AIR|ely. courts are? furiou 9059|152748|5264|1|50|90037.00|0.06|0.03|N|O|1996-08-11|1996-09-17|1996-09-02|NONE|FOB|e furiously close ideas. s 9059|31290|6297|2|20|24425.80|0.02|0.01|N|O|1996-11-22|1996-10-06|1996-11-23|TAKE BACK RETURN|MAIL|usly pending packages. platelets a 9059|89746|2255|3|29|50336.46|0.10|0.03|N|O|1996-09-06|1996-10-14|1996-09-10|NONE|TRUCK| the even requests. slyly 9059|153935|1481|4|24|47734.32|0.05|0.08|N|O|1996-09-23|1996-10-18|1996-10-03|TAKE BACK RETURN|AIR|thy packages. blithely final packages haggl 9059|178283|5835|5|6|8167.68|0.00|0.04|N|O|1996-11-25|1996-10-15|1996-12-06|TAKE BACK RETURN|SHIP|ironic requests haggle. furiously pending 9060|166475|8992|1|11|16956.17|0.10|0.02|N|O|1996-06-21|1996-07-23|1996-07-02|COLLECT COD|REG AIR|luffily regular packages wake furiously 9060|131051|1052|2|12|12984.60|0.06|0.02|N|O|1996-09-02|1996-07-11|1996-09-07|NONE|MAIL| special courts. furiously even packages sl 9060|160989|8538|3|10|20499.80|0.04|0.07|N|O|1996-08-21|1996-08-01|1996-09-01|NONE|FOB|es cajole slyly unusual requests. slyly bol 9061|181110|6147|1|23|27395.53|0.00|0.08|N|O|1996-03-31|1996-02-27|1996-04-25|NONE|TRUCK|y express requests. furiously si 9061|110391|7925|2|24|33633.36|0.04|0.02|N|O|1996-04-08|1996-03-15|1996-05-02|NONE|RAIL|ic theodolites. regular, ir 9062|159691|7237|1|34|59523.46|0.09|0.03|R|F|1995-03-28|1995-05-07|1995-04-25|DELIVER IN PERSON|SHIP| regular theod 9062|125314|5315|2|13|17411.03|0.06|0.07|N|F|1995-06-08|1995-05-26|1995-06-22|NONE|SHIP|ckages are quickly quickly specia 9062|148561|6104|3|9|14486.04|0.04|0.07|N|O|1995-07-06|1995-05-24|1995-07-21|COLLECT COD|FOB|counts haggle someti 9062|54927|4928|4|46|86568.32|0.04|0.04|A|F|1995-03-22|1995-04-22|1995-04-03|TAKE BACK RETURN|MAIL|heodolites d 9062|144289|1832|5|50|66664.00|0.08|0.01|R|F|1995-03-28|1995-05-18|1995-04-23|TAKE BACK RETURN|AIR|ly unusual deposits cajole 9062|64034|6541|6|5|4990.15|0.00|0.03|R|F|1995-04-02|1995-06-02|1995-04-20|NONE|RAIL|ave to wake bli 9063|174926|2478|1|31|62028.52|0.06|0.06|N|O|1997-06-05|1997-04-29|1997-06-27|COLLECT COD|FOB|furiously requests. fina 9088|174809|7327|1|49|92306.20|0.09|0.00|A|F|1994-09-14|1994-08-30|1994-10-03|TAKE BACK RETURN|RAIL|ess deposits sleep always. carefu 9088|13094|8097|2|42|42297.78|0.02|0.02|A|F|1994-10-14|1994-09-06|1994-11-01|COLLECT COD|SHIP|ts integrate carefully according 9088|68532|3545|3|31|46516.43|0.09|0.08|A|F|1994-10-18|1994-09-10|1994-10-25|TAKE BACK RETURN|FOB|o the theodolites. pending foxes 9088|7666|167|4|13|20457.58|0.02|0.08|R|F|1994-09-20|1994-08-17|1994-10-06|NONE|REG AIR|yers. furiously ironic instru 9088|34183|1693|5|39|43570.02|0.08|0.02|R|F|1994-08-10|1994-08-31|1994-08-17|COLLECT COD|TRUCK|after the final packag 9089|104735|9756|1|28|48712.44|0.06|0.02|R|F|1993-06-23|1993-08-05|1993-07-17|COLLECT COD|SHIP|s integrate 9089|76004|8512|2|8|7840.00|0.05|0.08|A|F|1993-06-14|1993-07-31|1993-07-05|DELIVER IN PERSON|TRUCK| blithely even theodoli 9090|114007|9030|1|10|10210.00|0.10|0.05|N|O|1997-03-15|1997-02-14|1997-03-25|NONE|REG AIR|grate final, regul 9091|91081|3591|1|4|4288.32|0.04|0.06|R|F|1993-12-06|1994-01-08|1993-12-24|TAKE BACK RETURN|FOB|are across the p 9091|195833|3391|2|48|92583.84|0.09|0.01|R|F|1993-12-22|1994-01-29|1994-01-14|COLLECT COD|REG AIR|nts. furiously regular accounts 9091|155340|5341|3|36|50232.24|0.01|0.03|A|F|1993-12-12|1993-12-24|1994-01-08|DELIVER IN PERSON|TRUCK|ajole slyly 9091|194311|4312|4|18|25295.58|0.10|0.02|R|F|1994-02-14|1994-01-26|1994-02-19|COLLECT COD|SHIP|t the quickly even 9091|179349|6901|5|25|35708.50|0.01|0.00|A|F|1994-03-01|1993-12-25|1994-03-23|DELIVER IN PERSON|TRUCK|ess ideas boost 9091|115378|401|6|26|36227.62|0.06|0.04|R|F|1993-12-05|1994-02-02|1993-12-10|NONE|TRUCK|c requests above 9092|99552|7080|1|19|29479.45|0.03|0.07|A|F|1994-08-10|1994-09-15|1994-08-15|COLLECT COD|RAIL| regular packages. 9092|167146|2179|2|19|23049.66|0.04|0.01|R|F|1994-11-10|1994-10-14|1994-11-24|NONE|RAIL|fily above the final acco 9092|12987|5489|3|33|62699.34|0.03|0.05|R|F|1994-10-22|1994-10-08|1994-11-10|DELIVER IN PERSON|MAIL|ly final pinto beans sleep fluffily 9092|70885|5900|4|14|25982.32|0.03|0.01|A|F|1994-09-29|1994-10-03|1994-10-29|TAKE BACK RETURN|FOB| even packages wake a 9092|161570|9119|5|11|17947.27|0.05|0.04|R|F|1994-10-10|1994-09-14|1994-10-30|TAKE BACK RETURN|AIR|g to the quickly ironic asympt 9092|92123|7142|6|15|16726.80|0.00|0.08|A|F|1994-09-19|1994-09-17|1994-09-30|TAKE BACK RETURN|RAIL|lar accounts wake above the carefully exp 9092|105725|746|7|37|64036.64|0.05|0.00|A|F|1994-08-01|1994-09-25|1994-08-05|COLLECT COD|AIR|quests engage blithely across the bold t 9093|112141|9675|1|19|21909.66|0.02|0.02|N|O|1996-03-25|1996-02-11|1996-04-20|TAKE BACK RETURN|SHIP|riously bold theodolites? dep 9094|57706|5222|1|32|53238.40|0.04|0.03|N|O|1998-07-09|1998-08-19|1998-07-30|TAKE BACK RETURN|MAIL|y even theodolites sleep 9095|27289|9792|1|43|52300.04|0.01|0.00|N|O|1995-09-05|1995-08-30|1995-09-16|COLLECT COD|MAIL| sleep furiously express de 9120|156720|1751|1|5|8883.60|0.00|0.07|A|F|1992-08-03|1992-07-26|1992-08-21|COLLECT COD|MAIL|onic dinos. final packages cajo 9120|191829|1830|2|4|7683.28|0.06|0.04|A|F|1992-09-09|1992-08-04|1992-10-02|TAKE BACK RETURN|TRUCK|ld foxes eat above the regular, eve 9120|66777|1790|3|24|41850.48|0.03|0.04|R|F|1992-08-05|1992-08-18|1992-08-08|COLLECT COD|AIR|according to the quickly bold orbits 9120|104713|2244|4|21|36071.91|0.07|0.02|A|F|1992-07-03|1992-08-14|1992-07-16|COLLECT COD|TRUCK|riously thin packa 9120|102057|7078|5|15|15885.75|0.02|0.07|A|F|1992-10-04|1992-09-12|1992-10-27|NONE|SHIP| sauternes integrate. unusual, ironic ac 9120|59694|4705|6|22|36381.18|0.02|0.04|A|F|1992-07-19|1992-08-29|1992-07-27|DELIVER IN PERSON|RAIL|iously regular foxes must have to de 9120|62190|7203|7|6|6913.14|0.04|0.00|A|F|1992-08-05|1992-09-04|1992-08-14|COLLECT COD|FOB|aringly even instructions 9121|15157|160|1|49|52535.35|0.03|0.07|N|O|1996-07-24|1996-08-04|1996-08-01|NONE|AIR|ly above the qu 9121|140523|5552|2|30|46905.60|0.02|0.07|N|O|1996-10-31|1996-08-15|1996-11-20|DELIVER IN PERSON|RAIL|ld foxes. blithely ironic wa 9122|191935|9493|1|40|81077.20|0.06|0.00|N|O|1997-01-30|1997-02-10|1997-02-09|DELIVER IN PERSON|SHIP|ven requests are furio 9122|49033|9034|2|42|41245.26|0.03|0.07|N|O|1997-04-01|1997-01-19|1997-04-28|DELIVER IN PERSON|SHIP|its. carefu 9122|94903|7413|3|16|30366.40|0.06|0.04|N|O|1997-03-06|1997-01-24|1997-03-25|TAKE BACK RETURN|SHIP|uctions integrate busily along 9122|21812|1813|4|29|50280.49|0.04|0.07|N|O|1997-02-23|1997-02-26|1997-03-07|NONE|SHIP|instructions. regular accou 9122|177526|2561|5|28|44898.56|0.09|0.01|N|O|1997-03-22|1997-02-09|1997-04-20|NONE|TRUCK|y silent accounts against the dependenc 9123|156933|4479|1|5|9949.65|0.05|0.01|R|F|1993-07-07|1993-08-16|1993-07-29|NONE|AIR|lar, bold accounts use. 9123|162219|4736|2|32|40998.72|0.02|0.00|A|F|1993-08-04|1993-09-01|1993-08-27|NONE|FOB|ess deposits cajole a 9123|5727|728|3|7|11429.04|0.00|0.06|R|F|1993-07-20|1993-09-05|1993-08-01|DELIVER IN PERSON|AIR|ronic, bold deposits. 9123|52698|7709|4|2|3301.38|0.10|0.01|A|F|1993-08-10|1993-09-17|1993-08-31|COLLECT COD|AIR|sits. slyly express theodolites sno 9124|169074|1591|1|16|18289.12|0.04|0.07|N|O|1995-12-24|1996-01-10|1996-01-07|COLLECT COD|AIR|side of the fluffily even asym 9124|24359|9364|2|3|3850.05|0.04|0.02|N|O|1995-10-28|1995-12-13|1995-10-31|COLLECT COD|MAIL|to haggle blithely. blithely careful pint 9124|186459|6460|3|46|71090.70|0.03|0.08|N|O|1995-12-28|1995-12-08|1996-01-15|TAKE BACK RETURN|MAIL|ully bold packages. 9124|2555|5056|4|2|2915.10|0.07|0.02|N|O|1995-12-10|1995-12-31|1995-12-12|TAKE BACK RETURN|FOB| blithe packag 9124|134598|9625|5|19|31019.21|0.09|0.03|N|O|1995-11-13|1995-11-28|1995-12-05|TAKE BACK RETURN|SHIP|totes. blithely even accoun 9124|183982|9019|6|21|43385.58|0.07|0.03|N|O|1995-12-27|1995-12-16|1996-01-03|DELIVER IN PERSON|TRUCK|s thrash. fluffily busy acco 9125|188758|1277|1|5|9233.75|0.07|0.05|N|O|1998-07-11|1998-07-31|1998-07-21|TAKE BACK RETURN|MAIL|unts. bold, regular accounts wake. package 9125|46941|1950|2|8|15103.52|0.06|0.06|N|O|1998-08-01|1998-07-28|1998-08-25|TAKE BACK RETURN|TRUCK|lithely regular packa 9125|123102|5615|3|14|15751.40|0.08|0.07|N|O|1998-06-05|1998-08-05|1998-06-11|TAKE BACK RETURN|AIR|s. furiously regular platelets after the fi 9125|170828|5863|4|42|79750.44|0.05|0.04|N|O|1998-09-07|1998-08-20|1998-09-11|TAKE BACK RETURN|SHIP|lent theod 9125|90075|2585|5|11|11715.77|0.03|0.02|N|O|1998-06-06|1998-08-27|1998-06-08|COLLECT COD|REG AIR|efully express ex 9125|88097|5622|6|25|27127.25|0.09|0.03|N|O|1998-09-11|1998-07-30|1998-09-29|TAKE BACK RETURN|MAIL|nto beans. blithely spe 9125|37036|9540|7|26|25298.78|0.05|0.02|N|O|1998-07-16|1998-08-10|1998-07-18|DELIVER IN PERSON|SHIP| packages doubt carefully. regular 9126|58481|3492|1|28|40305.44|0.05|0.03|R|F|1995-01-05|1995-02-03|1995-01-26|TAKE BACK RETURN|SHIP|accounts sleep carefully. carefully final r 9126|73598|6106|2|40|62863.60|0.02|0.02|A|F|1995-01-03|1995-02-14|1995-01-17|TAKE BACK RETURN|REG AIR|are blithel 9127|199675|9676|1|23|40817.41|0.05|0.00|N|O|1995-12-05|1995-11-17|1995-12-19|TAKE BACK RETURN|SHIP| cajole final t 9127|130954|5981|2|33|65503.35|0.08|0.02|N|O|1995-11-09|1995-12-08|1995-11-10|COLLECT COD|SHIP|ly alongside of the 9152|144506|7021|1|20|31010.00|0.03|0.00|A|F|1993-08-29|1993-10-18|1993-09-03|DELIVER IN PERSON|RAIL|uickly. blithely even de 9152|100901|3412|2|8|15215.20|0.00|0.06|R|F|1993-11-14|1993-11-02|1993-12-13|TAKE BACK RETURN|MAIL|nto beans according to the care 9152|37773|5283|3|32|54744.64|0.02|0.04|R|F|1993-09-03|1993-11-04|1993-09-18|TAKE BACK RETURN|REG AIR|old packages engage blithely. fur 9152|34519|9526|4|46|66861.46|0.07|0.01|A|F|1993-09-06|1993-10-02|1993-09-28|NONE|REG AIR| alongside of the express accounts a 9153|99051|4070|1|41|43052.05|0.02|0.04|N|O|1997-09-08|1997-10-05|1997-09-24|DELIVER IN PERSON|RAIL|furiously ironic asymptotes. idly 9153|148432|5975|2|29|42932.47|0.00|0.08|N|O|1997-07-27|1997-08-22|1997-08-03|DELIVER IN PERSON|TRUCK| requests: bold dep 9153|82118|7135|3|5|5500.55|0.07|0.00|N|O|1997-08-10|1997-09-17|1997-09-02|TAKE BACK RETURN|MAIL|t ideas after th 9153|155396|7912|4|40|58055.60|0.08|0.00|N|O|1997-10-13|1997-08-21|1997-10-15|NONE|MAIL| blithely re 9153|127491|7492|5|18|27332.82|0.08|0.01|N|O|1997-09-13|1997-10-11|1997-09-14|COLLECT COD|FOB|y above the carefully regu 9153|140134|135|6|5|5870.65|0.03|0.03|N|O|1997-08-14|1997-10-14|1997-09-02|COLLECT COD|SHIP|beans haggl 9154|86513|9022|1|45|67477.95|0.06|0.06|N|O|1997-09-24|1997-08-11|1997-10-14|NONE|FOB|nal, careful instructions wake carefully. b 9154|14212|1716|2|7|7883.47|0.09|0.05|N|O|1997-09-28|1997-09-21|1997-10-27|TAKE BACK RETURN|SHIP|nts cajole near 9154|53311|5817|3|46|58158.26|0.04|0.01|N|O|1997-07-18|1997-08-22|1997-07-31|NONE|MAIL|inal depths. blithely quick deposits n 9154|196620|4178|4|31|53215.22|0.00|0.00|N|O|1997-08-28|1997-07-29|1997-09-07|NONE|AIR|final warthogs. slyly pending request 9154|176947|9465|5|12|24287.28|0.00|0.00|N|O|1997-08-20|1997-07-26|1997-09-17|NONE|RAIL|es. requests print furiously instead of th 9154|140238|2753|6|40|51129.20|0.03|0.02|N|O|1997-06-24|1997-09-03|1997-07-24|COLLECT COD|TRUCK|t haggle bli 9154|173448|3449|7|47|71507.68|0.04|0.05|N|O|1997-07-07|1997-09-07|1997-07-25|DELIVER IN PERSON|SHIP|wake boldly above the furiousl 9155|129335|1848|1|10|13643.30|0.04|0.03|R|F|1992-11-01|1992-10-23|1992-11-23|DELIVER IN PERSON|AIR| carefully. final packages 9155|131031|1032|2|44|46729.32|0.02|0.02|A|F|1992-11-09|1992-11-20|1992-11-20|COLLECT COD|TRUCK|, fluffy platelets wake above the 9155|35698|8202|3|14|22871.66|0.01|0.04|A|F|1992-10-02|1992-10-21|1992-10-23|TAKE BACK RETURN|REG AIR|sits above the furiously even pack 9156|92865|7884|1|5|9289.30|0.06|0.01|A|F|1994-05-31|1994-03-20|1994-06-03|NONE|REG AIR|endencies. sl 9156|176190|1225|2|22|27856.18|0.03|0.00|R|F|1994-02-08|1994-04-01|1994-02-24|DELIVER IN PERSON|AIR|equests dete 9156|13621|6123|3|15|23019.30|0.05|0.00|A|F|1994-03-29|1994-05-01|1994-04-06|NONE|SHIP|nts. foxes cajole. ironic packages c 9156|119085|1597|4|35|38642.80|0.07|0.05|A|F|1994-05-31|1994-03-24|1994-06-20|TAKE BACK RETURN|FOB|e quickly. express, ev 9156|51687|1688|5|28|45883.04|0.03|0.01|A|F|1994-05-08|1994-04-04|1994-05-25|TAKE BACK RETURN|FOB|he carefully final deposits use slyly ca 9157|14625|4626|1|3|4618.86|0.10|0.00|A|F|1992-08-27|1992-07-26|1992-09-11|TAKE BACK RETURN|MAIL|lyly bold ideas cajole even packages. bol 9157|190856|3376|2|28|54511.80|0.09|0.08|A|F|1992-06-27|1992-08-24|1992-07-12|NONE|RAIL|ests haggle above the carefully s 9158|184272|4273|1|32|43400.64|0.06|0.05|N|F|1995-06-13|1995-06-19|1995-07-02|DELIVER IN PERSON|MAIL|g deposits. b 9158|44401|9410|2|1|1345.40|0.07|0.00|A|F|1995-05-10|1995-06-27|1995-05-24|DELIVER IN PERSON|AIR| fluffily bold deposits. blithely re 9158|42107|4612|3|33|34620.30|0.10|0.03|N|F|1995-06-02|1995-07-02|1995-06-20|TAKE BACK RETURN|SHIP|ular foxes nag even excuses. slyly regular 9159|1269|3770|1|34|39788.84|0.03|0.05|N|O|1995-08-22|1995-10-18|1995-09-04|DELIVER IN PERSON|MAIL| wake blithely 9159|117697|2720|2|8|13717.52|0.06|0.08|N|O|1995-08-16|1995-09-20|1995-09-09|DELIVER IN PERSON|AIR|accounts wake. regular accou 9159|120863|3376|3|49|92309.14|0.07|0.05|N|O|1995-09-06|1995-09-06|1995-09-28|TAKE BACK RETURN|MAIL| after the dependencies. special fray 9159|142513|2514|4|5|7777.55|0.09|0.01|N|O|1995-08-04|1995-09-19|1995-08-17|COLLECT COD|REG AIR|iously ironic theodolites; regular, re 9184|118166|3189|1|46|54471.36|0.01|0.04|N|O|1997-09-07|1997-09-30|1997-10-03|NONE|SHIP|ntegrate furiously final asy 9184|10202|5205|2|1|1112.20|0.05|0.02|N|O|1997-11-15|1997-09-08|1997-12-12|COLLECT COD|SHIP|nag carefully. sl 9184|57392|7393|3|3|4048.17|0.10|0.02|N|O|1997-08-26|1997-10-07|1997-09-08|TAKE BACK RETURN|REG AIR|ic accounts. excuses a 9185|137990|7991|1|32|64895.68|0.08|0.08|R|F|1994-07-12|1994-07-24|1994-07-20|DELIVER IN PERSON|AIR|accounts snooze 9185|183999|4000|2|14|29161.86|0.06|0.02|R|F|1994-08-19|1994-07-30|1994-09-15|TAKE BACK RETURN|REG AIR|ackages sleep blithely regular deposits 9186|54814|9825|1|48|84902.88|0.05|0.06|A|F|1992-05-10|1992-05-03|1992-05-19|TAKE BACK RETURN|SHIP|s haggle furiously b 9186|42400|4905|2|45|60408.00|0.07|0.06|R|F|1992-04-30|1992-05-10|1992-05-13|TAKE BACK RETURN|REG AIR|y regular requests. even dependencies 9186|13523|1027|3|5|7182.60|0.07|0.03|R|F|1992-07-01|1992-06-27|1992-07-03|TAKE BACK RETURN|REG AIR|yly final deposits use caref 9186|65260|5261|4|22|26955.72|0.03|0.03|R|F|1992-05-30|1992-06-01|1992-06-18|COLLECT COD|TRUCK|ts. dependencies along the even epitaphs 9187|25511|8014|1|45|64642.95|0.00|0.06|R|F|1994-11-21|1994-11-06|1994-12-17|TAKE BACK RETURN|FOB|ar excuses boost fur 9187|161772|9321|2|49|89854.73|0.05|0.02|R|F|1994-09-21|1994-11-04|1994-10-12|COLLECT COD|AIR|fully final 9187|96390|6391|3|24|33273.36|0.03|0.01|A|F|1994-10-30|1994-11-11|1994-11-24|TAKE BACK RETURN|MAIL|kly alongside of 9187|23585|1092|4|36|54308.88|0.00|0.01|R|F|1994-11-12|1994-09-27|1994-11-21|TAKE BACK RETURN|TRUCK|bold ideas cajole fur 9188|25258|263|1|38|44963.50|0.05|0.08|N|O|1998-01-28|1998-04-16|1998-02-12|TAKE BACK RETURN|TRUCK|ely final instructions about the 9188|100867|3378|2|13|24282.18|0.06|0.00|N|O|1998-05-24|1998-03-02|1998-06-12|NONE|AIR|r deposits haggle carefu 9188|57314|2325|3|23|29240.13|0.07|0.06|N|O|1998-04-01|1998-03-18|1998-04-21|COLLECT COD|SHIP|y express packages poach f 9188|71385|6400|4|38|51542.44|0.09|0.03|N|O|1998-03-27|1998-04-16|1998-03-31|NONE|REG AIR|tructions boost car 9188|165425|2974|5|50|74521.00|0.09|0.05|N|O|1998-02-23|1998-04-13|1998-03-23|NONE|MAIL|mptotes boost carefully q 9188|85348|365|6|2|2666.68|0.07|0.06|N|O|1998-03-10|1998-04-01|1998-04-04|TAKE BACK RETURN|MAIL|ual deposits affix around t 9189|153079|625|1|44|49811.08|0.09|0.00|N|O|1996-12-30|1997-01-12|1997-01-26|TAKE BACK RETURN|SHIP|ecial theodolit 9189|190381|382|2|27|39727.26|0.00|0.07|N|O|1997-03-16|1997-01-25|1997-03-27|DELIVER IN PERSON|SHIP|kages along the furiously pending e 9190|79688|7210|1|30|50030.40|0.02|0.07|N|O|1998-05-01|1998-05-14|1998-05-12|NONE|RAIL|fily expres 9190|104491|9512|2|27|40378.23|0.08|0.02|N|O|1998-04-12|1998-05-25|1998-04-28|DELIVER IN PERSON|MAIL|e across the car 9190|164225|6742|3|33|42544.26|0.09|0.04|N|O|1998-05-08|1998-05-20|1998-06-01|COLLECT COD|MAIL|egular foxes impress quickly slyly ir 9190|122380|4893|4|44|61704.72|0.01|0.05|N|O|1998-04-29|1998-05-19|1998-05-29|TAKE BACK RETURN|FOB| along the final, ironic ac 9190|40578|5587|5|40|60742.80|0.01|0.06|N|O|1998-03-11|1998-04-06|1998-04-10|COLLECT COD|RAIL|riously ironic package 9190|188861|3898|6|24|46796.64|0.05|0.00|N|O|1998-04-11|1998-04-16|1998-04-19|TAKE BACK RETURN|FOB|t blithely upon the carefu 9191|41105|6114|1|17|17783.70|0.06|0.01|N|O|1996-06-17|1996-06-15|1996-07-02|DELIVER IN PERSON|MAIL|ng accounts do wake quickly 9216|132812|2813|1|43|79326.83|0.00|0.04|N|O|1995-07-27|1995-05-17|1995-08-13|TAKE BACK RETURN|MAIL|oss the quickly bold foxes wa 9216|168513|1030|2|22|34793.22|0.06|0.03|N|O|1995-07-13|1995-05-23|1995-08-11|COLLECT COD|REG AIR|y bold accounts use slyly. bold courts abov 9216|189321|6876|3|31|43719.92|0.00|0.05|N|O|1995-07-12|1995-06-13|1995-07-19|COLLECT COD|SHIP|re slyly slyly unusual accounts. 9217|114037|6549|1|26|27326.78|0.09|0.08|N|O|1997-07-30|1997-08-16|1997-08-20|DELIVER IN PERSON|RAIL|s. ironic accounts cajole 9217|5722|5723|2|5|8138.60|0.06|0.02|N|O|1997-08-22|1997-08-24|1997-08-30|NONE|RAIL|sly upon the fluffily regul 9217|153390|3391|3|34|49075.26|0.04|0.05|N|O|1997-07-06|1997-08-14|1997-07-19|TAKE BACK RETURN|AIR|ackages. b 9217|9246|1747|4|37|42743.88|0.01|0.01|N|O|1997-06-21|1997-07-14|1997-07-21|DELIVER IN PERSON|TRUCK|ts. furiously even deposits hag 9218|51243|8759|1|14|16719.36|0.10|0.00|N|O|1997-12-22|1998-02-10|1997-12-26|COLLECT COD|MAIL|deas cajole care 9218|37324|2331|2|11|13874.52|0.03|0.00|N|O|1998-03-31|1998-02-20|1998-04-02|DELIVER IN PERSON|RAIL|y ironic asymptotes. slyly regular pack 9219|127664|7665|1|29|49058.14|0.00|0.06|R|F|1995-01-21|1994-11-10|1995-02-02|COLLECT COD|MAIL|ular excuses above 9219|37592|2599|2|26|39769.34|0.00|0.01|R|F|1995-01-25|1994-11-23|1995-02-10|DELIVER IN PERSON|AIR|y even acc 9219|196140|6141|3|37|45737.18|0.02|0.02|R|F|1994-10-08|1994-12-10|1994-10-12|DELIVER IN PERSON|MAIL|yly final orbits. regular theodo 9219|124322|9347|4|19|25580.08|0.01|0.07|A|F|1994-11-13|1994-12-07|1994-12-13|NONE|RAIL|ies. furiously regular de 9219|73562|8577|5|28|42995.68|0.10|0.06|R|F|1994-12-26|1994-12-18|1995-01-09|COLLECT COD|AIR|arefully final pinto beans cajole furio 9220|112446|4958|1|1|1458.44|0.07|0.04|N|O|1998-07-06|1998-07-06|1998-08-03|DELIVER IN PERSON|AIR|s haggle among the furio 9221|188455|974|1|43|66368.35|0.00|0.02|R|F|1995-03-30|1995-03-26|1995-04-15|NONE|REG AIR|carefully. furiously pending courts 9221|157932|5478|2|41|81587.13|0.09|0.06|R|F|1995-02-19|1995-02-27|1995-02-25|TAKE BACK RETURN|RAIL|cross the furiously regular sentiments. unu 9221|149585|2100|3|49|80094.42|0.08|0.04|R|F|1995-03-20|1995-02-11|1995-04-03|NONE|MAIL|riously alongside of 9221|72850|5358|4|43|78382.55|0.04|0.06|A|F|1995-04-16|1995-03-22|1995-04-29|TAKE BACK RETURN|MAIL| ironic braids wake furiously unusua 9222|23687|3688|1|38|61205.84|0.03|0.08|A|F|1994-04-24|1994-03-30|1994-05-03|TAKE BACK RETURN|AIR|nusual requests wake behind th 9222|94739|9758|2|49|84952.77|0.03|0.01|A|F|1994-03-07|1994-05-02|1994-03-25|NONE|TRUCK| ironic theodolites. 9222|70423|424|3|50|69671.00|0.08|0.01|A|F|1994-02-22|1994-03-24|1994-03-23|NONE|FOB|the carefully reg 9222|194568|2126|4|1|1662.56|0.05|0.02|A|F|1994-02-23|1994-03-17|1994-03-12|NONE|SHIP|ily regular ideas. fluffily ironic depo 9222|97026|9536|5|26|26598.52|0.06|0.05|A|F|1994-04-30|1994-03-31|1994-05-11|TAKE BACK RETURN|REG AIR|ar, final de 9223|72012|9534|1|5|4920.05|0.08|0.08|N|O|1997-10-24|1997-09-22|1997-11-06|NONE|TRUCK|al packages across 9223|101537|6558|2|37|56925.61|0.08|0.08|N|O|1997-11-10|1997-10-31|1997-11-28|DELIVER IN PERSON|TRUCK|stealthy ideas. blithely unusual 9223|117570|5104|3|43|68265.51|0.02|0.01|N|O|1997-11-02|1997-10-02|1997-11-26|TAKE BACK RETURN|SHIP|, unusual ideas haggle theodolites. sl 9223|195301|2859|4|36|50266.80|0.03|0.04|N|O|1997-09-27|1997-10-19|1997-10-16|DELIVER IN PERSON|AIR| haggle about the 9223|20939|8446|5|3|5579.79|0.03|0.02|N|O|1997-09-28|1997-09-22|1997-10-13|NONE|MAIL|ly dependencies 9223|107044|7045|6|16|16816.64|0.02|0.00|N|O|1997-09-27|1997-10-12|1997-10-04|TAKE BACK RETURN|TRUCK|as are quic 9223|73575|3576|7|26|40262.82|0.03|0.02|N|O|1997-10-08|1997-09-12|1997-10-24|NONE|RAIL|ly ironic packages wake after the s 9248|165458|7975|1|22|33515.90|0.07|0.00|R|F|1995-04-24|1995-02-08|1995-05-19|DELIVER IN PERSON|REG AIR|ly even theodolites? furiously pend 9248|36543|1550|2|8|11836.32|0.02|0.08|R|F|1995-01-08|1995-03-21|1995-01-14|NONE|AIR|telets use along the slyly re 9248|172293|2294|3|26|35497.54|0.08|0.08|A|F|1995-04-14|1995-01-29|1995-05-10|COLLECT COD|MAIL|iously. silent, express instructions dete 9248|124711|7224|4|24|41657.04|0.07|0.03|A|F|1995-01-27|1995-03-11|1995-02-12|COLLECT COD|RAIL|. furiously f 9249|17709|211|1|36|58561.20|0.02|0.04|A|F|1993-06-03|1993-06-28|1993-06-29|DELIVER IN PERSON|FOB|efully final accounts cajole p 9249|25832|3339|2|36|63281.88|0.09|0.08|R|F|1993-06-30|1993-07-29|1993-07-07|COLLECT COD|FOB|onic dolphins. dependencies us 9249|151415|6446|3|15|21996.15|0.02|0.01|R|F|1993-05-29|1993-07-16|1993-06-26|COLLECT COD|SHIP|ly ironic foxes are slyly. ca 9249|30963|3467|4|28|53030.88|0.00|0.01|A|F|1993-05-23|1993-06-22|1993-05-26|NONE|MAIL|ts wake requests. 9249|188804|1323|5|15|28392.00|0.07|0.04|R|F|1993-06-01|1993-06-29|1993-06-25|NONE|FOB|le unusual deposits. blithely regular pla 9250|85178|195|1|11|12794.87|0.02|0.08|R|F|1992-08-17|1992-09-14|1992-08-28|TAKE BACK RETURN|TRUCK|ically. slyly expr 9250|184894|7413|2|20|39577.80|0.09|0.05|A|F|1992-12-08|1992-09-18|1992-12-26|COLLECT COD|REG AIR|es cajole across 9250|110095|5118|3|15|16576.35|0.02|0.08|R|F|1992-09-17|1992-09-30|1992-10-05|NONE|TRUCK|ut the deposits. final, regular packages 9250|149927|7470|4|12|23723.04|0.07|0.01|A|F|1992-11-30|1992-10-31|1992-12-08|TAKE BACK RETURN|SHIP|bout the f 9251|10288|2790|1|29|34750.12|0.03|0.06|R|F|1993-12-19|1993-10-12|1994-01-04|TAKE BACK RETURN|RAIL|sts. final warhorses about the blithely s 9252|91328|8856|1|24|31663.68|0.08|0.03|N|O|1998-02-18|1998-02-07|1998-03-06|TAKE BACK RETURN|SHIP|side of the care 9252|147473|9988|2|9|13684.23|0.08|0.02|N|O|1998-03-17|1998-02-05|1998-04-16|DELIVER IN PERSON|FOB|usly. accounts are sl 9252|169048|4081|3|23|25691.92|0.01|0.02|N|O|1998-03-25|1998-02-14|1998-04-17|DELIVER IN PERSON|FOB|furiously 9253|172435|2436|1|34|51252.62|0.10|0.07|A|F|1992-03-03|1992-04-25|1992-03-09|COLLECT COD|RAIL|hely across the carefull 9253|111547|1548|2|44|68575.76|0.03|0.06|R|F|1992-04-22|1992-04-09|1992-05-09|COLLECT COD|AIR| regular packages sleep slyly; express de 9253|154658|7174|3|26|44528.90|0.09|0.06|A|F|1992-02-11|1992-03-28|1992-03-09|COLLECT COD|TRUCK|regular notornis. iro 9253|128948|6485|4|30|59308.20|0.03|0.01|R|F|1992-05-10|1992-03-09|1992-06-07|TAKE BACK RETURN|AIR|rate thinly quickly ironic 9253|18312|814|5|23|28297.13|0.00|0.02|A|F|1992-02-08|1992-03-27|1992-02-19|TAKE BACK RETURN|RAIL|deposits print 9253|160065|66|6|42|47252.52|0.08|0.03|A|F|1992-02-21|1992-04-14|1992-03-21|TAKE BACK RETURN|REG AIR| patterns play sometimes 9254|117383|7384|1|6|8402.28|0.05|0.03|R|F|1993-06-29|1993-07-11|1993-07-13|TAKE BACK RETURN|TRUCK|e carefully acc 9254|47579|2588|2|16|24425.12|0.05|0.01|A|F|1993-08-20|1993-07-11|1993-09-10|COLLECT COD|MAIL| the silently final instructi 9254|59159|6675|3|16|17890.40|0.03|0.03|R|F|1993-06-14|1993-07-02|1993-06-25|TAKE BACK RETURN|RAIL|ly above the instructions. 9254|77847|355|4|3|5474.52|0.07|0.08|R|F|1993-06-07|1993-07-03|1993-06-16|NONE|REG AIR|y fluffily spec 9255|91102|6121|1|8|8744.80|0.06|0.03|N|O|1996-02-29|1996-01-26|1996-03-19|DELIVER IN PERSON|FOB|tes. pending, ir 9255|137229|2256|2|49|62044.78|0.08|0.06|N|O|1996-03-26|1996-02-06|1996-04-10|DELIVER IN PERSON|MAIL|sts. packages integrate slyly. furiou 9255|191504|4024|3|36|57438.00|0.10|0.08|N|O|1995-12-15|1996-03-03|1995-12-27|COLLECT COD|SHIP|onic requests. slyly regul 9255|5192|5193|4|16|17555.04|0.08|0.04|N|O|1996-02-16|1996-02-28|1996-02-17|TAKE BACK RETURN|TRUCK| ironic accounts wake caref 9280|42165|4670|1|30|33214.80|0.03|0.05|N|O|1998-08-21|1998-07-31|1998-08-27|COLLECT COD|TRUCK|arefully carefully spe 9280|60733|734|2|21|35568.33|0.09|0.08|N|O|1998-09-19|1998-08-24|1998-09-22|NONE|MAIL|o beans. fluffily ironic packa 9280|137366|7367|3|7|9823.52|0.05|0.02|N|O|1998-08-17|1998-07-20|1998-09-12|DELIVER IN PERSON|FOB|nic ideas. unusual, regular pinto beans 9280|96638|9148|4|49|80096.87|0.10|0.01|N|O|1998-09-02|1998-09-10|1998-09-10|NONE|TRUCK|he dinos. regular ac 9280|182392|2393|5|36|53078.04|0.10|0.07|N|O|1998-09-07|1998-07-18|1998-09-14|COLLECT COD|REG AIR|ress ideas are theodolit 9280|57572|5088|6|14|21413.98|0.07|0.04|N|O|1998-09-21|1998-08-16|1998-10-15|DELIVER IN PERSON|AIR|l packages. carefully silent hockey player 9280|11281|8785|7|31|36960.68|0.03|0.05|N|O|1998-07-11|1998-08-02|1998-07-17|TAKE BACK RETURN|MAIL|ending, busy courts p 9281|189089|9090|1|4|4712.32|0.05|0.05|R|F|1992-04-24|1992-05-20|1992-05-16|TAKE BACK RETURN|RAIL|somas. quickly expr 9281|10943|5946|2|22|40786.68|0.01|0.07|A|F|1992-03-27|1992-04-22|1992-04-23|TAKE BACK RETURN|REG AIR|fix alongside 9281|109134|6665|3|26|29721.38|0.00|0.06|A|F|1992-04-08|1992-04-17|1992-04-15|COLLECT COD|MAIL| use above the sl 9281|13657|8660|4|48|75391.20|0.03|0.05|A|F|1992-06-18|1992-03-30|1992-06-21|TAKE BACK RETURN|FOB|ully unusual accounts detect 9281|120094|5119|5|26|28966.34|0.04|0.01|A|F|1992-05-09|1992-05-21|1992-06-01|NONE|TRUCK| slyly along the blithely express accou 9281|6744|6745|6|27|44569.98|0.04|0.00|R|F|1992-03-11|1992-04-13|1992-03-20|COLLECT COD|TRUCK|deposits nag abov 9281|135164|7678|7|10|11991.60|0.09|0.02|R|F|1992-02-28|1992-05-05|1992-03-21|NONE|FOB|c pinto beans. bold instruc 9282|96895|1914|1|19|35945.91|0.05|0.03|N|O|1996-02-12|1996-01-19|1996-02-23|COLLECT COD|REG AIR|telets. slyly e 9283|157127|2158|1|5|5920.60|0.02|0.08|R|F|1994-07-15|1994-07-10|1994-07-29|DELIVER IN PERSON|RAIL|s theodolites cajole. regular ideas wak 9284|131824|9364|1|49|90935.18|0.06|0.00|R|F|1994-08-20|1994-09-08|1994-09-07|TAKE BACK RETURN|AIR|ecial packages ag 9285|37702|7703|1|13|21316.10|0.04|0.02|A|F|1994-02-14|1994-03-19|1994-03-07|COLLECT COD|MAIL|carefully final pinto beans ca 9285|54398|4399|2|50|67619.50|0.06|0.07|A|F|1994-01-29|1994-02-15|1994-02-17|TAKE BACK RETURN|RAIL| quickly after the final theodolit 9285|51665|4171|3|21|33949.86|0.01|0.01|R|F|1994-03-01|1994-04-03|1994-03-10|TAKE BACK RETURN|TRUCK|heodolites. blithely regular requ 9286|127938|451|1|10|19659.30|0.01|0.07|R|F|1995-05-27|1995-06-22|1995-06-06|TAKE BACK RETURN|FOB|y against the brave ideas. carefully bold i 9286|38242|5752|2|8|9441.92|0.07|0.05|N|O|1995-07-11|1995-05-11|1995-07-15|COLLECT COD|FOB|e blithely ironic foxes 9286|116907|1930|3|11|21162.90|0.07|0.05|N|O|1995-06-27|1995-06-27|1995-07-17|NONE|AIR|ets. fluffily final accounts 9286|54679|4680|4|21|34307.07|0.07|0.03|N|F|1995-05-25|1995-05-27|1995-06-20|TAKE BACK RETURN|MAIL|ckages doubt above the 9286|183652|3653|5|11|19092.15|0.06|0.08|N|O|1995-07-22|1995-05-24|1995-08-07|COLLECT COD|RAIL|slyly slyly special foxes. fluffily bold a 9286|34485|4486|6|12|17033.76|0.07|0.05|N|O|1995-06-21|1995-06-22|1995-07-02|TAKE BACK RETURN|REG AIR| deposits. sl 9286|177796|5348|7|34|63708.86|0.04|0.07|R|F|1995-04-09|1995-06-01|1995-04-26|DELIVER IN PERSON|TRUCK|express requests wake slyly ruth 9287|824|825|1|16|27597.12|0.07|0.00|N|O|1998-03-25|1998-02-10|1998-04-16|DELIVER IN PERSON|FOB|ironic accounts sl 9312|44079|9088|1|9|9207.63|0.01|0.04|A|F|1992-07-21|1992-07-28|1992-08-04|NONE|SHIP|t instructions. blithely final do 9312|24092|6595|2|19|19305.71|0.05|0.01|A|F|1992-07-02|1992-07-09|1992-07-10|DELIVER IN PERSON|TRUCK|es. regular, 9312|99355|4374|3|49|66363.15|0.02|0.08|R|F|1992-07-06|1992-08-16|1992-07-17|TAKE BACK RETURN|AIR|efully even deco 9312|58326|3337|4|35|44951.20|0.07|0.03|R|F|1992-05-21|1992-07-24|1992-05-30|NONE|AIR|deposits. never spec 9312|77377|4899|5|28|37922.36|0.08|0.05|A|F|1992-06-08|1992-07-20|1992-06-16|TAKE BACK RETURN|SHIP|fily final realms are. fluf 9313|11918|9422|1|37|67706.67|0.09|0.03|N|O|1996-07-26|1996-06-10|1996-08-18|DELIVER IN PERSON|SHIP|ously regular 9313|91264|6283|2|49|61507.74|0.01|0.08|N|O|1996-06-14|1996-06-26|1996-06-20|TAKE BACK RETURN|REG AIR| ideas doze furiously. bravely ironic pa 9313|64570|4571|3|16|24553.12|0.10|0.04|N|O|1996-04-29|1996-06-12|1996-05-25|DELIVER IN PERSON|AIR|are bold deposits. special th 9313|11454|6457|4|22|30039.90|0.05|0.06|N|O|1996-04-25|1996-06-11|1996-04-26|NONE|REG AIR| even instructions 9313|103476|8497|5|29|42904.63|0.01|0.07|N|O|1996-06-24|1996-06-16|1996-07-13|COLLECT COD|FOB|cross the furiously regula 9313|105107|2638|6|18|20017.80|0.01|0.06|N|O|1996-06-28|1996-06-08|1996-07-14|NONE|RAIL|ep ironic platelets. dogge 9314|133242|782|1|12|15302.88|0.07|0.03|R|F|1995-03-26|1995-04-15|1995-04-10|TAKE BACK RETURN|SHIP|riously final accounts wake furiously ca 9315|46059|3572|1|11|11055.55|0.01|0.08|N|O|1998-04-21|1998-05-11|1998-05-19|NONE|REG AIR|ly according to the special, regular 9316|184937|4938|1|27|54592.11|0.06|0.06|N|O|1995-10-17|1995-12-03|1995-10-27|NONE|SHIP|y express pinto beans ar 9316|26760|9263|2|32|53976.32|0.05|0.06|N|O|1995-09-26|1995-12-04|1995-09-29|NONE|MAIL|heodolites wake exp 9316|144110|9139|3|25|28852.75|0.03|0.01|N|O|1995-12-12|1995-11-18|1995-12-24|DELIVER IN PERSON|TRUCK|to the blithely special deposits. blithely 9316|49339|6852|4|46|59263.18|0.05|0.05|N|O|1995-11-23|1995-12-12|1995-12-22|TAKE BACK RETURN|RAIL|cording to the furious 9316|29162|4167|5|36|39281.76|0.06|0.08|N|O|1995-12-09|1995-12-04|1996-01-08|DELIVER IN PERSON|RAIL|ss, special d 9317|2957|5458|1|8|14879.60|0.10|0.01|A|F|1994-05-08|1994-05-05|1994-05-12|TAKE BACK RETURN|REG AIR|y final deposits nag daringly ab 9317|91028|8556|2|39|39741.78|0.06|0.00|R|F|1994-04-04|1994-05-11|1994-04-11|NONE|MAIL|cial requests haggle blithely. i 9317|93723|3724|3|28|48068.16|0.00|0.07|R|F|1994-03-17|1994-06-08|1994-04-02|COLLECT COD|AIR|lar accounts are furi 9317|199575|4614|4|15|25118.55|0.00|0.00|A|F|1994-05-24|1994-05-30|1994-05-25|COLLECT COD|FOB|e thin waters. furiously regular dep 9318|136377|8891|1|10|14133.70|0.02|0.07|R|F|1992-11-18|1992-09-15|1992-11-20|DELIVER IN PERSON|RAIL|according to the 9318|83046|8063|2|44|45277.76|0.06|0.05|A|F|1992-08-18|1992-10-23|1992-08-22|NONE|FOB|s. stealthy theodolit 9318|154010|4011|3|15|15960.15|0.04|0.00|R|F|1992-07-29|1992-10-26|1992-08-10|NONE|TRUCK|ckly carefully special packages. carefull 9319|152719|5235|1|6|10630.26|0.05|0.02|A|F|1992-10-23|1992-12-05|1992-11-10|NONE|FOB| ironic asymptotes wake slyly carefully eve 9319|143992|3993|2|31|63115.69|0.01|0.02|A|F|1993-01-03|1992-11-13|1993-02-01|TAKE BACK RETURN|SHIP|the quickl 9319|73520|8535|3|20|29870.40|0.09|0.05|R|F|1992-10-05|1992-12-03|1992-10-23|DELIVER IN PERSON|REG AIR|nt excuses. expr 9319|46396|3909|4|17|22820.63|0.06|0.03|A|F|1992-12-24|1992-10-14|1993-01-12|DELIVER IN PERSON|REG AIR|lar platelets haggle furiously? express 9344|199309|4348|1|29|40840.70|0.00|0.04|N|O|1996-03-13|1996-02-27|1996-03-22|NONE|REG AIR|ng realms nag. fi 9344|169866|7415|2|2|3871.72|0.10|0.03|N|O|1996-02-08|1996-03-03|1996-02-15|TAKE BACK RETURN|MAIL|deposits. ironic packages abo 9344|121294|6319|3|40|52611.60|0.04|0.05|N|O|1996-02-25|1996-02-21|1996-03-16|NONE|RAIL|counts sleep slyly. care 9344|183596|3597|4|39|65504.01|0.06|0.07|N|O|1995-12-20|1996-02-23|1995-12-21|DELIVER IN PERSON|REG AIR|nusual, pending dolphins wa 9344|97880|2899|5|44|82626.72|0.04|0.04|N|O|1995-12-18|1996-02-23|1995-12-25|TAKE BACK RETURN|FOB|ermanent requests sleep along the furiou 9344|56075|3591|6|39|40211.73|0.03|0.03|N|O|1996-03-02|1996-03-06|1996-03-27|TAKE BACK RETURN|MAIL|rts. slyly regular theo 9344|45101|2614|7|27|28244.70|0.09|0.07|N|O|1996-01-16|1996-01-20|1996-02-11|NONE|RAIL|unusual asymp 9345|197049|2088|1|20|22920.80|0.06|0.01|N|O|1996-03-07|1996-03-13|1996-03-20|COLLECT COD|RAIL|ound the express deposits ha 9345|196532|6533|2|16|26056.48|0.03|0.06|N|O|1996-03-31|1996-03-30|1996-04-26|NONE|SHIP|ickly blithely even th 9345|98436|5964|3|17|24385.31|0.02|0.05|N|O|1996-03-06|1996-04-01|1996-03-07|TAKE BACK RETURN|AIR|ely final ideas across the quickly 9345|10529|530|4|43|61899.36|0.08|0.06|N|O|1996-04-22|1996-03-21|1996-05-08|NONE|SHIP| carefully id 9345|149611|9612|5|16|26569.76|0.01|0.00|N|O|1996-03-31|1996-04-03|1996-04-13|NONE|AIR|es. special, expr 9346|179972|7524|1|44|90286.68|0.10|0.04|N|O|1996-12-22|1996-11-08|1997-01-06|NONE|MAIL|lar requests. 9346|183861|1416|2|7|13614.02|0.08|0.04|N|O|1996-12-15|1996-11-20|1996-12-20|COLLECT COD|RAIL| theodolites affix slyly about the expr 9346|142461|4|3|24|36083.04|0.03|0.04|N|O|1997-01-01|1997-01-06|1997-01-05|DELIVER IN PERSON|RAIL| deposits must have to cajol 9346|129194|1707|4|14|17124.66|0.00|0.07|N|O|1997-01-06|1996-12-06|1997-01-25|COLLECT COD|REG AIR|ld, silent dolphins cajole bli 9346|147486|5029|5|33|50604.84|0.08|0.01|N|O|1996-10-12|1996-11-16|1996-11-06|DELIVER IN PERSON|REG AIR| unusual foxes wake furiously regular de 9347|26225|8728|1|40|46048.80|0.09|0.06|R|F|1992-11-10|1992-11-03|1992-12-09|DELIVER IN PERSON|MAIL|bold instr 9347|163397|3398|2|43|62796.77|0.02|0.08|A|F|1992-11-07|1992-10-28|1992-11-15|TAKE BACK RETURN|RAIL| carefully at the furiou 9347|364|2865|3|4|5057.44|0.08|0.05|R|F|1992-10-31|1992-09-19|1992-11-17|TAKE BACK RETURN|REG AIR|ly final d 9347|104454|4455|4|46|67088.70|0.05|0.08|R|F|1992-08-31|1992-11-03|1992-09-21|NONE|MAIL|fully special ideas caj 9348|66780|4299|1|8|13974.24|0.09|0.05|N|O|1996-08-09|1996-08-23|1996-09-06|DELIVER IN PERSON|AIR|the special pack 9348|168532|8533|2|1|1600.53|0.08|0.03|N|O|1996-06-16|1996-08-28|1996-06-24|COLLECT COD|MAIL|y regular pa 9348|113796|6308|3|22|39815.38|0.04|0.03|N|O|1996-07-24|1996-08-11|1996-08-11|NONE|TRUCK|unusual pinto beans haggle slyly slyly 9348|167733|250|4|34|61224.82|0.07|0.04|N|O|1996-09-27|1996-08-24|1996-10-07|COLLECT COD|MAIL|regular packages nag slyly silent 9348|78826|6348|5|6|10828.92|0.04|0.08|N|O|1996-07-22|1996-07-14|1996-08-12|TAKE BACK RETURN|RAIL|ronic epitaphs about the p 9348|12318|2319|6|22|27066.82|0.03|0.00|N|O|1996-09-01|1996-09-01|1996-09-06|TAKE BACK RETURN|TRUCK|nd the fin 9349|186734|9253|1|13|23669.49|0.09|0.07|N|O|1997-09-02|1997-10-08|1997-09-11|NONE|RAIL|into beans are 9349|37874|378|2|12|21742.44|0.01|0.06|N|O|1997-12-23|1997-11-09|1998-01-09|DELIVER IN PERSON|SHIP|sits sleep careful 9349|141459|9002|3|3|4501.35|0.06|0.04|N|O|1997-10-28|1997-10-27|1997-11-22|TAKE BACK RETURN|RAIL|uses. regular instructions wake. 9349|94795|2323|4|35|62642.65|0.06|0.04|N|O|1997-09-25|1997-10-28|1997-10-08|NONE|TRUCK|: carefully pending ideas are 9349|17179|2182|5|46|50423.82|0.07|0.02|N|O|1997-10-28|1997-09-29|1997-11-03|DELIVER IN PERSON|RAIL|ites affix quickly deposits. fu 9349|120800|5825|6|29|52803.20|0.03|0.05|N|O|1997-09-28|1997-10-01|1997-10-25|COLLECT COD|AIR|he slyly regular pinto beans. express cou 9349|72852|374|7|20|36497.00|0.04|0.02|N|O|1997-10-18|1997-10-16|1997-10-22|DELIVER IN PERSON|AIR|impress blithely slyly unusual d 9350|9721|4722|1|31|50552.32|0.03|0.07|R|F|1993-02-24|1993-01-19|1993-02-25|COLLECT COD|FOB|ns integrate slow 9350|169368|9369|2|8|11498.88|0.00|0.04|A|F|1992-11-15|1993-01-16|1992-11-27|DELIVER IN PERSON|TRUCK|the even requests. blithely bold depths ha 9350|164782|2331|3|39|72024.42|0.06|0.01|A|F|1993-01-03|1993-01-21|1993-01-07|COLLECT COD|SHIP|ully regular foxes. fina 9350|88760|6285|4|9|15738.84|0.04|0.00|A|F|1992-11-17|1992-12-30|1992-12-05|COLLECT COD|FOB|y regular packages sublate. final, ir 9350|182370|9925|5|50|72618.50|0.03|0.02|R|F|1993-01-10|1993-01-19|1993-01-14|TAKE BACK RETURN|MAIL|ely pending deposits. blithely even p 9350|81515|1516|6|30|44895.30|0.04|0.05|A|F|1992-11-09|1992-12-19|1992-12-04|COLLECT COD|TRUCK|requests mold slyly pe 9351|153358|5874|1|12|16936.20|0.08|0.01|N|O|1996-08-08|1996-07-22|1996-08-25|TAKE BACK RETURN|REG AIR|accounts wake since the bol 9351|72104|2105|2|39|41967.90|0.01|0.04|N|O|1996-07-02|1996-06-28|1996-07-21|DELIVER IN PERSON|TRUCK|ole blithely pinto beans. carefully i 9376|103245|776|1|33|41191.92|0.04|0.01|N|O|1997-11-07|1997-09-11|1997-11-24|NONE|FOB|ffily even instructions 9376|27711|7712|2|13|21303.23|0.08|0.01|N|O|1997-10-28|1997-09-12|1997-10-29|DELIVER IN PERSON|AIR|ges sleep slyly caref 9376|123182|8207|3|18|21693.24|0.00|0.01|N|O|1997-10-15|1997-09-15|1997-11-07|COLLECT COD|AIR|requests hang 9376|104162|9183|4|38|44314.08|0.04|0.06|N|O|1997-08-11|1997-08-27|1997-08-24|TAKE BACK RETURN|SHIP|lyly according to 9376|46430|3943|5|13|17893.59|0.02|0.06|N|O|1997-11-17|1997-09-21|1997-12-02|COLLECT COD|TRUCK| regular instr 9376|135450|2990|6|20|29709.00|0.04|0.06|N|O|1997-08-22|1997-08-24|1997-08-25|DELIVER IN PERSON|FOB|ide of the blithely regular fo 9377|29222|1725|1|8|9209.76|0.10|0.05|A|F|1993-07-17|1993-06-23|1993-08-08|COLLECT COD|MAIL|ole fluffily across the pending instru 9378|58253|5769|1|14|16957.50|0.05|0.01|N|O|1997-11-24|1997-12-22|1997-12-03|COLLECT COD|AIR|al deposits. special, ironic ideas boos 9378|43565|1078|2|17|25645.52|0.09|0.05|N|O|1997-12-26|1997-11-16|1998-01-01|TAKE BACK RETURN|TRUCK|carefully bold foxes cajole slyly slyly fin 9378|192511|7550|3|43|68950.93|0.10|0.05|N|O|1997-12-27|1997-12-01|1998-01-05|TAKE BACK RETURN|REG AIR|gle fluffily-- ir 9379|121797|1798|1|31|56382.49|0.01|0.06|A|F|1992-04-03|1992-02-19|1992-04-25|COLLECT COD|REG AIR| instructions boost quietly around t 9379|38591|6101|2|22|33650.98|0.09|0.08|A|F|1992-02-12|1992-03-27|1992-02-22|DELIVER IN PERSON|SHIP|re fluffily final sheaves. carefully 9379|185319|2874|3|38|53363.78|0.02|0.05|R|F|1992-03-22|1992-03-27|1992-03-27|DELIVER IN PERSON|REG AIR|fully regula 9379|55596|5597|4|13|20170.67|0.00|0.01|A|F|1992-01-09|1992-03-20|1992-01-12|COLLECT COD|TRUCK|. blithely unusual deposits boost reque 9380|4088|1589|1|35|34722.80|0.00|0.08|A|F|1994-12-18|1994-10-01|1994-12-28|NONE|REG AIR|eposits after the furiously even foxes 9380|91867|6886|2|38|70636.68|0.08|0.05|R|F|1994-11-12|1994-10-05|1994-11-24|COLLECT COD|SHIP|lithely final packages alongside 9380|193779|8818|3|36|67419.72|0.03|0.02|A|F|1994-11-07|1994-11-15|1994-11-08|TAKE BACK RETURN|AIR|osits are blithely. slyly 9381|133307|3308|1|17|22785.10|0.10|0.06|N|O|1996-01-02|1996-01-31|1996-01-29|TAKE BACK RETURN|RAIL|c requests. final, regular deposits 9381|77611|119|2|43|68310.23|0.02|0.05|N|O|1996-01-01|1996-02-11|1996-01-28|TAKE BACK RETURN|REG AIR| slyly regular instructions use. 9382|32770|7777|1|23|39163.71|0.09|0.06|N|O|1996-03-27|1996-04-16|1996-04-04|TAKE BACK RETURN|AIR|yly furiously bold deposits? furio 9382|162553|7586|2|45|72699.75|0.07|0.04|N|O|1996-05-31|1996-05-03|1996-06-10|DELIVER IN PERSON|TRUCK|rve across the pending 9382|25679|684|3|4|6418.68|0.07|0.00|N|O|1996-03-08|1996-04-27|1996-04-02|TAKE BACK RETURN|REG AIR|instructions. excuses haggle. fu 9382|176070|1105|4|30|34382.10|0.02|0.08|N|O|1996-03-27|1996-04-28|1996-04-21|DELIVER IN PERSON|SHIP|boost carefully. theodolites along the ir 9383|162634|2635|1|12|20359.56|0.03|0.02|R|F|1994-03-22|1994-05-16|1994-04-17|NONE|TRUCK| accounts impress fin 9383|175325|2877|2|27|37808.64|0.01|0.04|R|F|1994-07-09|1994-05-16|1994-07-25|TAKE BACK RETURN|AIR|al courts. express frays sh 9383|83483|8500|3|1|1466.48|0.04|0.03|A|F|1994-06-10|1994-04-16|1994-06-28|DELIVER IN PERSON|TRUCK|al asympto 9383|86362|8871|4|31|41799.16|0.09|0.01|A|F|1994-06-05|1994-04-21|1994-06-10|TAKE BACK RETURN|TRUCK|out the fluffily bol 9383|74623|2145|5|43|68697.66|0.10|0.01|A|F|1994-04-28|1994-06-10|1994-05-10|COLLECT COD|AIR|ly express foxes 9408|153529|8560|1|9|14242.68|0.09|0.01|A|F|1992-11-19|1992-11-29|1992-11-25|NONE|MAIL|he fluffily regular packages are 9409|65556|5557|1|14|21301.70|0.09|0.08|R|F|1992-08-27|1992-07-24|1992-09-09|TAKE BACK RETURN|AIR|furiously notornis. regular ideas s 9409|184651|2206|2|6|10413.90|0.03|0.05|A|F|1992-06-21|1992-06-30|1992-07-19|TAKE BACK RETURN|FOB|heodolites. sp 9409|12718|7721|3|22|35875.62|0.08|0.05|R|F|1992-06-15|1992-06-05|1992-06-30|NONE|FOB|thely express deposits 9409|140319|320|4|47|63887.57|0.04|0.03|A|F|1992-06-27|1992-06-19|1992-07-18|DELIVER IN PERSON|FOB| packages cajole slyly 9409|135388|5389|5|31|44124.78|0.00|0.05|R|F|1992-05-09|1992-07-25|1992-05-31|COLLECT COD|SHIP|lithely bold inst 9410|56465|8971|1|48|68230.08|0.03|0.06|N|O|1997-11-02|1997-09-19|1997-11-03|NONE|AIR|otes sleep. f 9410|5483|7984|2|6|8330.88|0.02|0.07|N|O|1997-07-21|1997-10-13|1997-08-06|DELIVER IN PERSON|SHIP|ly ironic dependencies serve s 9410|137813|7814|3|8|14806.48|0.07|0.04|N|O|1997-10-10|1997-10-04|1997-11-01|COLLECT COD|TRUCK|eposits. deposi 9411|15044|2548|1|25|23976.00|0.09|0.02|R|F|1995-04-04|1995-03-10|1995-04-17|TAKE BACK RETURN|AIR|c ideas boost quickly ironic acc 9411|44502|9511|2|23|33269.50|0.05|0.06|A|F|1995-03-15|1995-03-26|1995-03-16|DELIVER IN PERSON|MAIL|ctions haggle blithely a 9412|134984|7498|1|11|22208.78|0.09|0.03|N|O|1998-10-31|1998-09-04|1998-11-22|COLLECT COD|SHIP|ackages. pending, unusual deposits integra 9412|75406|2928|2|47|64925.80|0.08|0.06|N|O|1998-09-24|1998-09-30|1998-10-09|COLLECT COD|TRUCK|hely ironic packages. always final theodol 9412|140047|2562|3|50|54352.00|0.00|0.08|N|O|1998-10-08|1998-09-13|1998-10-25|TAKE BACK RETURN|AIR|y bold ideas. bold, bold 9412|160025|7574|4|40|43400.80|0.10|0.07|N|O|1998-10-20|1998-09-04|1998-11-01|COLLECT COD|SHIP|s nag quickly. quickly express depende 9412|134132|4133|5|24|27987.12|0.09|0.06|N|O|1998-08-11|1998-08-28|1998-08-22|TAKE BACK RETURN|RAIL|ithely final pinto beans. brave, final hoc 9413|96693|4221|1|33|55759.77|0.04|0.08|N|O|1995-09-19|1995-09-10|1995-10-02|COLLECT COD|AIR|ld packages. requests cajole s 9413|114436|4437|2|41|59467.63|0.01|0.03|N|O|1995-07-25|1995-09-06|1995-07-29|TAKE BACK RETURN|TRUCK|bold foxes nag carefully. ironic pin 9413|119070|6604|3|24|26137.68|0.08|0.06|N|O|1995-08-19|1995-09-06|1995-09-13|COLLECT COD|RAIL| quickly bold requests. blithely ironic fox 9413|222|7723|4|18|20199.96|0.03|0.08|N|O|1995-10-22|1995-08-23|1995-11-10|NONE|TRUCK|ven deposits haggle blithely alo 9414|166411|1444|1|3|4432.23|0.09|0.07|A|F|1993-11-23|1993-11-11|1993-11-25|COLLECT COD|SHIP|ng to the closely regu 9414|54999|5000|2|26|50803.74|0.07|0.03|R|F|1993-12-09|1993-10-15|1993-12-17|TAKE BACK RETURN|FOB|eep furiously from the pending depo 9414|151574|4090|3|39|63397.23|0.01|0.05|R|F|1993-11-20|1993-12-04|1993-12-05|COLLECT COD|MAIL|uriously ironi 9415|188600|8601|1|36|60789.60|0.04|0.01|A|F|1992-08-26|1992-10-27|1992-08-29|NONE|AIR|al deposit 9415|193150|8189|2|32|39780.80|0.06|0.07|R|F|1992-08-25|1992-10-04|1992-09-15|DELIVER IN PERSON|RAIL|long the special, silent re 9415|68978|3991|3|8|15575.76|0.10|0.05|R|F|1992-10-22|1992-11-01|1992-11-14|COLLECT COD|FOB|le according to t 9415|100241|5262|4|25|31031.00|0.10|0.04|R|F|1992-12-11|1992-10-15|1992-12-25|TAKE BACK RETURN|RAIL|rding to the furiously u 9415|135662|689|5|13|22069.58|0.07|0.03|R|F|1992-11-28|1992-11-12|1992-12-22|NONE|AIR|ly about the ironic packages. ironic p 9415|68380|5899|6|38|51238.44|0.00|0.06|R|F|1992-10-13|1992-10-29|1992-10-17|COLLECT COD|REG AIR| theodolites alongside of 9415|55467|7973|7|5|7112.30|0.00|0.01|A|F|1992-09-20|1992-11-01|1992-10-20|COLLECT COD|RAIL|leep alongside of 9440|19472|1974|1|4|5565.88|0.04|0.03|A|F|1994-07-29|1994-07-25|1994-07-30|TAKE BACK RETURN|TRUCK|iously idle 9441|177807|5359|1|2|3769.60|0.08|0.04|N|O|1997-08-25|1997-09-16|1997-09-03|TAKE BACK RETURN|TRUCK|, daring instruction 9442|64332|9345|1|49|63520.17|0.07|0.02|A|F|1993-04-29|1993-05-14|1993-05-03|TAKE BACK RETURN|MAIL|ts snooze carefully ironi 9442|11921|6924|2|21|38491.32|0.08|0.02|A|F|1993-06-17|1993-06-06|1993-07-13|DELIVER IN PERSON|REG AIR| boost furiously at th 9442|152987|8018|3|8|16319.84|0.03|0.01|A|F|1993-03-31|1993-04-11|1993-04-16|DELIVER IN PERSON|RAIL|ckages haggle against the quickly regul 9443|164774|9807|1|20|36775.40|0.10|0.00|N|O|1997-08-15|1997-06-25|1997-08-19|NONE|FOB|nts are blithely unu 9443|155313|344|2|11|15051.41|0.06|0.08|N|O|1997-05-18|1997-06-27|1997-06-15|DELIVER IN PERSON|AIR|ites? quickly unusua 9443|131544|4058|3|28|44115.12|0.02|0.08|N|O|1997-08-08|1997-06-16|1997-08-31|COLLECT COD|SHIP|ronic foxes. carefully f 9443|55167|2683|4|50|56108.00|0.04|0.01|N|O|1997-08-14|1997-06-07|1997-09-09|TAKE BACK RETURN|AIR|ct. fluffily pending foxes alongside of 9443|177161|4713|5|10|12381.60|0.09|0.07|N|O|1997-07-30|1997-07-14|1997-08-12|COLLECT COD|AIR| bold deposits cajole 9444|85626|5627|1|10|16116.20|0.09|0.06|N|O|1996-10-08|1996-10-14|1996-10-09|TAKE BACK RETURN|SHIP|ly furiously ironic excuses. slyly even p 9444|102466|9997|2|12|17621.52|0.01|0.06|N|O|1996-07-23|1996-08-23|1996-08-14|TAKE BACK RETURN|MAIL|ickly ironic ideas s 9444|143269|3270|3|6|7873.56|0.10|0.06|N|O|1996-11-12|1996-10-13|1996-11-25|NONE|MAIL|s sleep slyl 9445|34125|9132|1|22|23300.64|0.06|0.05|N|O|1998-03-06|1997-12-30|1998-03-12|DELIVER IN PERSON|TRUCK|ackages. regularly ironic deposits cajol 9445|129823|2336|2|40|74112.80|0.04|0.08|N|O|1998-01-30|1998-02-02|1998-02-12|NONE|SHIP|inder fluffily. quickly final deposits 9446|124663|2200|1|23|38816.18|0.03|0.00|N|O|1998-04-14|1998-02-06|1998-04-27|COLLECT COD|FOB|deas cajole special accounts. blithely eve 9446|245|246|2|36|41228.64|0.05|0.02|N|O|1998-02-03|1998-02-20|1998-02-28|TAKE BACK RETURN|FOB|. silent deposits sleep. express, 9446|111555|4067|3|49|76760.95|0.09|0.07|N|O|1998-03-05|1998-02-19|1998-03-21|DELIVER IN PERSON|FOB|tions use. carefully even platelets a 9446|103465|5976|4|16|23495.36|0.03|0.01|N|O|1998-04-28|1998-02-25|1998-05-02|NONE|RAIL|nic instructions slee 9446|135837|5838|5|44|82404.52|0.01|0.07|N|O|1998-04-06|1998-03-21|1998-05-04|TAKE BACK RETURN|REG AIR|ake busily regular requests. 9446|195666|8186|6|21|36994.86|0.09|0.05|N|O|1998-02-07|1998-02-11|1998-02-08|DELIVER IN PERSON|REG AIR|le fluffily blithely ironic id 9447|20285|286|1|32|38568.96|0.09|0.08|N|O|1995-07-10|1995-09-01|1995-08-08|DELIVER IN PERSON|FOB|latelets na 9447|119768|2280|2|43|76873.68|0.08|0.03|N|O|1995-10-14|1995-07-23|1995-11-04|DELIVER IN PERSON|FOB| express pinto bean 9472|53105|8116|1|12|12697.20|0.09|0.06|N|O|1995-08-20|1995-08-25|1995-08-23|COLLECT COD|MAIL|ly ironic accounts boost. 9472|133807|3808|2|24|44179.20|0.10|0.03|N|O|1995-07-10|1995-09-05|1995-07-23|TAKE BACK RETURN|REG AIR|s haggle across the slyly even instru 9473|128849|3874|1|36|67602.24|0.01|0.03|R|F|1993-02-02|1992-12-20|1993-02-22|DELIVER IN PERSON|FOB|eposits detect slyl 9473|153732|8763|2|4|7142.92|0.10|0.04|A|F|1992-10-16|1992-12-02|1992-10-29|COLLECT COD|AIR|hely unusual requ 9473|172057|2058|3|23|25968.15|0.07|0.00|R|F|1992-11-15|1992-11-11|1992-12-04|DELIVER IN PERSON|FOB|iously. quickly quiet 9473|159009|4040|4|33|35244.00|0.05|0.06|A|F|1992-11-29|1992-12-14|1992-12-21|COLLECT COD|SHIP|r the always ironi 9473|62934|2935|5|14|26557.02|0.04|0.00|A|F|1993-02-06|1992-12-09|1993-02-25|COLLECT COD|FOB|uses. final excuses haggle. fur 9473|129728|2241|6|50|87886.00|0.04|0.02|A|F|1992-11-03|1993-01-02|1992-11-27|DELIVER IN PERSON|MAIL|riously along the accou 9474|24286|1793|1|9|10892.52|0.09|0.00|N|F|1995-06-10|1995-06-21|1995-06-29|TAKE BACK RETURN|REG AIR| was after the fur 9474|172736|2737|2|44|79584.12|0.10|0.05|N|O|1995-08-28|1995-06-11|1995-09-10|NONE|TRUCK| the quickly pending platelets cajol 9474|164524|4525|3|44|69894.88|0.00|0.06|N|O|1995-08-23|1995-06-13|1995-08-28|NONE|AIR|. slyly express deposits a 9474|154506|9537|4|48|74904.00|0.07|0.00|N|O|1995-07-28|1995-07-17|1995-08-11|COLLECT COD|RAIL|en instructi 9474|171143|1144|5|21|25496.94|0.08|0.05|N|O|1995-08-02|1995-08-02|1995-08-10|COLLECT COD|RAIL|ckly final deposits. slyly final pac 9475|111405|1406|1|3|4249.20|0.00|0.07|N|O|1997-01-12|1997-01-08|1997-01-16|TAKE BACK RETURN|TRUCK|nts are carefully. quickly close 9475|168173|3206|2|50|62058.50|0.07|0.03|N|O|1997-01-02|1996-12-27|1997-01-14|DELIVER IN PERSON|REG AIR| final requests 9475|69055|4068|3|24|24577.20|0.00|0.03|N|O|1997-02-25|1997-02-17|1997-03-02|DELIVER IN PERSON|RAIL|e slyly furiously 9475|194141|9180|4|47|58051.58|0.06|0.08|N|O|1997-03-14|1997-02-17|1997-04-04|COLLECT COD|MAIL|al accounts. b 9476|189211|6766|1|24|31205.04|0.08|0.07|A|F|1994-06-16|1994-08-22|1994-06-22|COLLECT COD|SHIP|ructions mus 9476|55682|3198|2|1|1637.68|0.09|0.06|R|F|1994-08-31|1994-08-02|1994-09-05|NONE|TRUCK|counts. even dolph 9476|39971|4978|3|6|11465.82|0.09|0.07|A|F|1994-07-02|1994-08-08|1994-07-24|COLLECT COD|TRUCK|ly above the regular 9476|175828|3380|4|25|47595.50|0.03|0.00|R|F|1994-07-15|1994-08-09|1994-08-08|TAKE BACK RETURN|MAIL|ring requests. sile 9476|137780|5320|5|2|3635.56|0.02|0.06|R|F|1994-07-04|1994-07-25|1994-07-07|NONE|MAIL|: ironic accounts sleep across the f 9476|66938|6939|6|26|49528.18|0.00|0.00|R|F|1994-08-09|1994-08-22|1994-08-14|TAKE BACK RETURN|TRUCK|y. bold pinto 9476|25270|7773|7|36|43029.72|0.00|0.02|R|F|1994-06-27|1994-07-25|1994-07-26|TAKE BACK RETURN|REG AIR|ts sleep bravely even foxes! fluffil 9477|105052|73|1|3|3171.15|0.06|0.00|N|O|1998-04-07|1998-05-13|1998-04-30|NONE|SHIP|nstructions nag slyly after the flu 9477|199284|4323|2|17|23515.76|0.07|0.06|N|O|1998-05-13|1998-05-27|1998-06-08|TAKE BACK RETURN|AIR|eep at the care 9477|160878|3395|3|45|87249.15|0.07|0.02|N|O|1998-04-18|1998-05-29|1998-04-30|TAKE BACK RETURN|RAIL|ar pinto bea 9477|72177|9699|4|44|50563.48|0.08|0.00|N|O|1998-04-28|1998-05-15|1998-05-04|TAKE BACK RETURN|MAIL|ic foxes. slyly special dependencies tr 9477|62911|7924|5|28|52469.48|0.09|0.03|N|O|1998-04-01|1998-04-19|1998-04-16|COLLECT COD|REG AIR| theodolite 9477|142746|289|6|20|35774.80|0.08|0.02|N|O|1998-04-09|1998-05-10|1998-04-20|TAKE BACK RETURN|FOB|? quickly even instructions are 9477|118889|8890|7|34|64867.92|0.00|0.07|N|O|1998-04-10|1998-06-06|1998-05-03|NONE|SHIP|ully final ideas acco 9478|163525|3526|1|19|30181.88|0.09|0.05|A|F|1994-07-05|1994-06-21|1994-07-06|DELIVER IN PERSON|REG AIR|ously. carefully final reque 9478|158313|8314|2|44|60337.64|0.05|0.00|R|F|1994-05-01|1994-05-21|1994-05-09|COLLECT COD|TRUCK|iously bold packages ar 9478|155703|3249|3|15|26380.50|0.05|0.02|R|F|1994-07-12|1994-05-13|1994-07-16|COLLECT COD|TRUCK|y even requests haggle quickly i 9478|98579|6107|4|35|55214.95|0.10|0.04|R|F|1994-05-10|1994-05-24|1994-06-08|NONE|SHIP|le carefully 9479|86472|1489|1|42|61255.74|0.02|0.05|A|F|1994-10-23|1994-12-16|1994-11-05|DELIVER IN PERSON|RAIL|sits. slyly even excuses nag quic 9504|136766|9280|1|28|50477.28|0.08|0.03|A|F|1992-12-07|1993-01-08|1992-12-25|DELIVER IN PERSON|MAIL|e the furiously pending 9504|11167|6170|2|44|47439.04|0.05|0.03|R|F|1993-03-08|1993-02-11|1993-03-28|NONE|MAIL|e of the final, unusual accounts. f 9504|69703|2210|3|48|80289.60|0.08|0.05|A|F|1992-12-14|1993-02-21|1992-12-23|TAKE BACK RETURN|MAIL|s; closely ironic asymptotes accor 9504|94552|2080|4|11|17012.05|0.09|0.07|R|F|1993-01-24|1993-02-07|1993-02-09|COLLECT COD|MAIL|print blithely fl 9505|142134|2135|1|21|24698.73|0.06|0.02|A|F|1992-10-07|1992-08-23|1992-11-05|TAKE BACK RETURN|MAIL|ld instructions. blithely bold ideas use sl 9505|31216|3720|2|14|16060.94|0.07|0.04|R|F|1992-06-16|1992-08-23|1992-07-14|DELIVER IN PERSON|RAIL|gedly pending pinto beans. ironi 9505|40542|5551|3|32|47441.28|0.08|0.02|A|F|1992-06-13|1992-08-05|1992-06-19|TAKE BACK RETURN|MAIL|ckages. always final accounts play 9505|176188|8706|4|49|61944.82|0.00|0.06|R|F|1992-08-20|1992-07-13|1992-08-25|DELIVER IN PERSON|MAIL|quests. carefully ironic pla 9505|185902|3457|5|25|49697.50|0.05|0.04|R|F|1992-09-14|1992-07-12|1992-10-02|COLLECT COD|TRUCK|arefully final accounts are across the 9505|38547|6057|6|5|7427.70|0.04|0.08|R|F|1992-09-16|1992-07-25|1992-09-28|DELIVER IN PERSON|TRUCK|odolites. carefully express 9505|48756|3765|7|46|78418.50|0.05|0.07|A|F|1992-07-09|1992-08-28|1992-08-03|DELIVER IN PERSON|MAIL|ep slyly bli 9506|39741|2245|1|38|63868.12|0.07|0.04|A|F|1994-06-02|1994-04-10|1994-06-27|DELIVER IN PERSON|TRUCK|hlessly around 9507|13584|3585|1|33|49420.14|0.03|0.03|A|F|1995-01-28|1995-02-02|1995-02-19|DELIVER IN PERSON|FOB|hely carefully express fox 9508|149318|6861|1|4|5469.24|0.04|0.02|N|O|1996-08-18|1996-06-19|1996-09-08|NONE|TRUCK|aggle furiously among 9508|184998|7517|2|46|95817.54|0.06|0.01|N|O|1996-06-20|1996-06-10|1996-06-28|DELIVER IN PERSON|TRUCK| cajole carefull 9508|59432|9433|3|41|57048.63|0.04|0.02|N|O|1996-07-01|1996-07-16|1996-07-20|DELIVER IN PERSON|REG AIR|egular ideas solve permanently 9508|65872|3391|4|24|44108.88|0.01|0.01|N|O|1996-08-10|1996-05-29|1996-08-23|COLLECT COD|REG AIR|bold foxes haggle slyly. ironic, 9508|109271|6802|5|42|53771.34|0.02|0.00|N|O|1996-06-10|1996-07-20|1996-07-09|COLLECT COD|RAIL|ages. furi 9508|105039|2570|6|17|17748.51|0.09|0.01|N|O|1996-06-25|1996-06-03|1996-07-22|COLLECT COD|TRUCK| deposits nag. carefully silent theodolites 9509|17281|7282|1|45|53922.60|0.01|0.01|A|F|1993-01-31|1992-12-15|1993-02-08|NONE|TRUCK|nding pinto beans cajole blithely carefull 9509|36568|1575|2|41|61686.96|0.08|0.07|A|F|1992-12-16|1992-11-15|1992-12-23|DELIVER IN PERSON|MAIL|press packages 9509|196916|6917|3|22|44284.02|0.07|0.06|R|F|1993-01-02|1992-11-20|1993-01-24|DELIVER IN PERSON|REG AIR|lar deposits to the sl 9509|160021|5054|4|49|52969.98|0.10|0.05|R|F|1993-01-30|1992-12-27|1993-02-05|NONE|REG AIR|d patterns. quickly pending packag 9509|70870|5885|5|9|16567.83|0.03|0.05|R|F|1992-10-24|1992-11-22|1992-11-06|COLLECT COD|REG AIR| final courts sle 9510|135947|8461|1|3|5948.82|0.10|0.08|N|O|1997-02-08|1996-12-23|1997-02-21|DELIVER IN PERSON|FOB|e. quickly regular deposits in 9510|50303|5314|2|24|30079.20|0.10|0.01|N|O|1997-02-04|1997-02-12|1997-02-05|COLLECT COD|TRUCK|fully about the accounts. even excuses gr 9510|28175|5682|3|49|54055.33|0.02|0.05|N|O|1996-12-29|1997-01-06|1997-01-19|COLLECT COD|AIR|s against the pending pinto beans so 9511|42418|4923|1|45|61218.45|0.05|0.01|N|O|1996-10-29|1996-10-17|1996-11-20|DELIVER IN PERSON|REG AIR|ealthily regula 9511|108580|1091|2|39|61954.62|0.02|0.03|N|O|1996-11-24|1996-10-04|1996-12-15|DELIVER IN PERSON|TRUCK|ending theodolite 9536|162621|5138|1|23|38723.26|0.00|0.03|N|O|1997-09-14|1997-11-09|1997-09-20|TAKE BACK RETURN|TRUCK|ts. requests sleep carefully about the care 9536|167762|5311|2|29|53063.04|0.01|0.00|N|O|1997-11-20|1997-10-13|1997-11-22|NONE|REG AIR|final requests. slyly ironic foxes 9536|169803|9804|3|16|29964.80|0.06|0.00|N|O|1997-09-18|1997-10-03|1997-10-18|NONE|RAIL|efully pending deposits according 9537|83262|5771|1|39|48565.14|0.07|0.04|A|F|1995-02-25|1995-04-25|1995-03-10|TAKE BACK RETURN|REG AIR|hely regular pinto beans on th 9537|182340|4859|2|45|64005.30|0.08|0.00|R|F|1995-03-18|1995-04-16|1995-03-28|NONE|REG AIR|pitaphs cajole fu 9537|180554|8109|3|45|73554.75|0.05|0.02|A|F|1995-03-09|1995-05-11|1995-03-25|DELIVER IN PERSON|REG AIR|uthless packages haggle enticingly. re 9537|165664|3213|4|24|41511.84|0.04|0.06|R|F|1995-05-17|1995-05-12|1995-05-29|DELIVER IN PERSON|RAIL|its. slyly furious 9538|178612|1130|1|19|32121.59|0.02|0.03|A|F|1994-03-21|1994-05-15|1994-03-31|DELIVER IN PERSON|SHIP|h. quickly special requests wake. slyly reg 9539|110937|938|1|29|56489.97|0.09|0.00|N|O|1996-01-20|1995-12-19|1996-02-04|COLLECT COD|FOB| packages haggle furious 9540|30245|7755|1|36|42308.64|0.02|0.01|R|F|1994-09-07|1994-07-23|1994-09-15|TAKE BACK RETURN|MAIL| accounts bo 9540|125559|8072|2|47|74473.85|0.03|0.01|A|F|1994-06-28|1994-09-03|1994-06-29|DELIVER IN PERSON|AIR|out the regular sauternes. carefully 9541|107323|9834|1|43|57203.76|0.10|0.05|R|F|1992-07-14|1992-04-30|1992-08-08|COLLECT COD|FOB|nding depos 9541|175495|5496|2|40|62819.60|0.01|0.01|A|F|1992-06-25|1992-05-24|1992-06-27|DELIVER IN PERSON|SHIP| are fluffily final sauternes. express 9542|80553|8078|1|43|65942.65|0.04|0.06|N|O|1996-02-23|1996-02-12|1996-03-15|NONE|SHIP|eposits sleep slyly. thinly 9542|37947|451|2|37|69742.78|0.01|0.00|N|O|1996-04-15|1996-02-05|1996-05-09|COLLECT COD|RAIL|sits sleep finally ideas; final, final 9543|25382|7885|1|3|3922.14|0.10|0.06|A|F|1992-08-03|1992-08-03|1992-08-21|NONE|AIR|counts cajole careful 9543|188109|628|2|35|41898.50|0.08|0.00|A|F|1992-06-22|1992-07-31|1992-07-06|NONE|FOB|ously pending instructions after the 9543|143570|6085|3|37|59702.09|0.01|0.01|R|F|1992-05-25|1992-06-25|1992-06-10|TAKE BACK RETURN|SHIP|st after the slyly final request 9543|174941|2493|4|23|46366.62|0.10|0.03|A|F|1992-09-01|1992-06-27|1992-09-17|NONE|SHIP|nto beans. regular, ironic 9568|51138|3644|1|37|40297.81|0.08|0.07|A|F|1993-05-15|1993-05-23|1993-05-25|NONE|FOB|after the carefully 9568|65635|8142|2|19|30411.97|0.06|0.05|R|F|1993-04-25|1993-04-19|1993-05-03|TAKE BACK RETURN|FOB|e carefully 9569|40304|5313|1|44|54749.20|0.08|0.04|N|O|1997-11-30|1998-01-03|1997-12-19|TAKE BACK RETURN|AIR| silent packages doubt blithel 9569|180402|2921|2|25|37060.00|0.04|0.05|N|O|1998-01-19|1997-12-22|1998-02-08|NONE|TRUCK|heaves! carefully expr 9569|15706|8208|3|30|48651.00|0.07|0.00|N|O|1998-02-23|1997-12-08|1998-03-02|COLLECT COD|TRUCK|ckages are 9569|167758|275|4|40|73030.00|0.00|0.02|N|O|1997-11-15|1997-12-15|1997-11-26|NONE|RAIL|ial accounts cajole. furiously even depen 9569|97572|5100|5|10|15695.70|0.07|0.06|N|O|1998-01-09|1997-12-12|1998-01-13|COLLECT COD|REG AIR|ly unusual pinto beans. bold ideas a 9570|78583|8584|1|6|9369.48|0.02|0.06|N|O|1996-06-19|1996-07-15|1996-06-27|NONE|AIR|ounts detect 9570|135805|832|2|7|12885.60|0.02|0.05|N|O|1996-06-08|1996-07-16|1996-06-17|DELIVER IN PERSON|TRUCK|the unusual packages. pending 9570|65140|5141|3|20|22102.80|0.09|0.07|N|O|1996-09-06|1996-08-06|1996-09-17|COLLECT COD|RAIL|ges about the pe 9570|198168|688|4|6|7596.96|0.01|0.04|N|O|1996-07-12|1996-07-10|1996-07-19|DELIVER IN PERSON|REG AIR|c pinto beans caj 9570|2309|9810|5|19|23014.70|0.06|0.07|N|O|1996-06-18|1996-08-07|1996-06-22|DELIVER IN PERSON|AIR|excuses dazzle carefully accor 9571|134976|3|1|4|8043.88|0.07|0.04|N|O|1996-08-10|1996-06-01|1996-08-25|TAKE BACK RETURN|FOB|posits sleep. quickly 9571|134589|7103|2|45|73061.10|0.06|0.01|N|O|1996-06-21|1996-07-14|1996-07-20|TAKE BACK RETURN|SHIP|nal requests b 9572|41732|1733|1|33|55233.09|0.08|0.02|N|O|1998-06-13|1998-06-02|1998-06-30|COLLECT COD|TRUCK|lent packages sl 9572|173131|683|2|17|20470.21|0.10|0.00|N|O|1998-05-13|1998-05-30|1998-05-27|TAKE BACK RETURN|REG AIR|nic pinto beans haggl 9572|10558|5561|3|21|30839.55|0.07|0.01|N|O|1998-04-29|1998-04-27|1998-05-23|COLLECT COD|AIR| deposits cajole qui 9573|124255|4256|1|5|6396.25|0.01|0.00|R|F|1992-09-23|1992-08-02|1992-10-20|NONE|AIR|l, final packages detect blithely 9573|141184|6213|2|45|55133.10|0.04|0.00|R|F|1992-09-05|1992-07-19|1992-09-14|NONE|TRUCK|regular requests caj 9573|67876|2889|3|50|92193.50|0.08|0.07|A|F|1992-07-12|1992-07-16|1992-07-16|NONE|AIR|g the fluffily quiet asymptotes. unus 9574|130456|457|1|48|71349.60|0.04|0.03|N|O|1995-09-13|1995-09-02|1995-10-12|COLLECT COD|RAIL| final dependen 9574|166446|6447|2|24|36298.56|0.04|0.06|N|O|1995-08-01|1995-09-20|1995-08-18|COLLECT COD|FOB|refully express decoys boost furious 9574|47833|5346|3|38|67671.54|0.04|0.00|N|O|1995-11-16|1995-10-22|1995-11-23|NONE|MAIL| packages affix. 9575|117151|4685|1|2|2336.30|0.10|0.06|R|F|1992-07-01|1992-06-05|1992-07-19|TAKE BACK RETURN|REG AIR|nding packages wake quickly. furio 9575|143007|8036|2|19|19950.00|0.07|0.08|A|F|1992-04-15|1992-06-09|1992-05-09|DELIVER IN PERSON|RAIL|y express w 9600|150890|891|1|40|77635.60|0.03|0.00|A|F|1994-02-01|1994-01-13|1994-02-24|DELIVER IN PERSON|REG AIR|he express requests. bold pack 9600|1529|4030|2|3|4291.56|0.01|0.03|R|F|1993-12-19|1993-12-28|1994-01-15|DELIVER IN PERSON|MAIL| realms. even, regular frets sl 9600|86351|3876|3|24|32096.40|0.08|0.08|R|F|1993-12-11|1993-12-15|1993-12-20|TAKE BACK RETURN|TRUCK|ly blithely final ins 9601|30052|53|1|25|24551.25|0.00|0.06|N|O|1996-04-07|1996-04-24|1996-05-04|NONE|SHIP|uffily regular packages engage slyly st 9601|72394|2395|2|28|38258.92|0.10|0.05|N|O|1996-03-31|1996-04-21|1996-04-30|DELIVER IN PERSON|MAIL|g foxes wake among 9601|122791|2792|3|17|30834.43|0.04|0.05|N|O|1996-06-29|1996-05-18|1996-07-12|TAKE BACK RETURN|FOB|nto beans 9601|137381|2408|4|14|19857.32|0.02|0.08|N|O|1996-06-10|1996-06-10|1996-06-24|TAKE BACK RETURN|SHIP|ously special a 9601|160371|372|5|21|30058.77|0.10|0.07|N|O|1996-05-04|1996-05-05|1996-06-03|TAKE BACK RETURN|MAIL|-- furiously final accou 9602|3426|5927|1|24|31906.08|0.04|0.05|A|F|1993-01-21|1992-12-28|1993-02-19|NONE|AIR|ithes are after the sly 9602|179455|1973|2|44|67515.80|0.05|0.03|R|F|1993-01-09|1993-01-01|1993-01-25|DELIVER IN PERSON|RAIL|xpress excuses wak 9602|97604|2623|3|20|32032.00|0.09|0.02|A|F|1993-02-16|1993-01-29|1993-02-26|COLLECT COD|AIR|efully accounts. slyly 9602|18612|1114|4|28|42857.08|0.04|0.00|R|F|1992-11-15|1993-01-12|1992-11-21|COLLECT COD|SHIP|fluffily special accounts. c 9602|196886|1925|5|19|37674.72|0.06|0.02|A|F|1993-01-17|1993-02-07|1993-01-31|COLLECT COD|RAIL|the quickly final accounts. reg 9603|188171|3208|1|46|57921.82|0.08|0.02|N|O|1998-04-08|1998-02-13|1998-04-19|DELIVER IN PERSON|FOB|ifts nod. slyly regular accounts nag 9603|189801|4838|2|36|68068.80|0.05|0.03|N|O|1998-03-10|1998-03-15|1998-03-22|NONE|RAIL|sts. slyly silent hockey players s 9603|172767|319|3|11|20237.36|0.02|0.04|N|O|1998-04-22|1998-02-12|1998-05-11|NONE|TRUCK|ending depth 9603|90840|8368|4|26|47601.84|0.05|0.06|N|O|1998-04-25|1998-03-12|1998-04-27|TAKE BACK RETURN|MAIL|ake furiously around th 9603|139221|6761|5|45|56709.90|0.01|0.02|N|O|1998-01-14|1998-04-02|1998-01-20|DELIVER IN PERSON|TRUCK|raids sleep carefully. deposits 9603|73456|3457|6|33|47171.85|0.10|0.05|N|O|1998-04-25|1998-03-20|1998-05-15|NONE|MAIL|nly bold theodolites. unusual 9604|199340|6898|1|26|37422.84|0.04|0.03|N|O|1996-07-15|1996-08-13|1996-08-11|DELIVER IN PERSON|FOB|ly unusual requests. carefully express d 9605|187052|4607|1|22|25059.10|0.04|0.02|R|F|1993-03-24|1993-02-08|1993-04-01|TAKE BACK RETURN|MAIL| haggle fluffily. blithely final 9605|177771|289|2|48|88740.96|0.08|0.01|R|F|1993-02-10|1993-03-23|1993-02-25|DELIVER IN PERSON|FOB|. theodoli 9606|144368|1911|1|45|63556.20|0.00|0.04|A|F|1994-08-24|1994-09-17|1994-09-07|TAKE BACK RETURN|SHIP|heodolites. slyly bold theodolite 9606|178683|1201|2|18|31710.24|0.05|0.04|R|F|1994-08-11|1994-10-11|1994-08-13|DELIVER IN PERSON|RAIL| use slyly across the pending, 9606|113580|6092|3|7|11155.06|0.00|0.01|A|F|1994-08-07|1994-10-25|1994-08-24|TAKE BACK RETURN|AIR|ly pending packages 9606|139012|4039|4|23|24173.23|0.08|0.00|R|F|1994-10-25|1994-09-10|1994-11-21|TAKE BACK RETURN|AIR|ies boost slyly furiously express ide 9606|162432|9981|5|14|20922.02|0.04|0.08|R|F|1994-11-05|1994-09-23|1994-11-23|DELIVER IN PERSON|TRUCK|ss the carefully pendin 9607|152253|2254|1|33|43073.25|0.08|0.04|N|O|1996-01-07|1996-03-11|1996-01-24|DELIVER IN PERSON|SHIP|packages wake quickly. 9632|146679|6680|1|31|53495.77|0.08|0.01|R|F|1992-04-30|1992-04-07|1992-05-22|NONE|AIR|essly. carefully ironi 9632|27554|7555|2|9|13333.95|0.07|0.04|R|F|1992-02-28|1992-04-12|1992-03-02|TAKE BACK RETURN|TRUCK|he furiously 9632|95106|7616|3|46|50650.60|0.05|0.07|A|F|1992-02-26|1992-03-17|1992-03-03|DELIVER IN PERSON|SHIP|orges cajole furiously slyly un 9632|30594|8104|4|8|12196.72|0.01|0.02|A|F|1992-05-05|1992-04-12|1992-06-03|NONE|RAIL| the carefully ex 9633|59955|9956|1|36|68938.20|0.07|0.06|N|O|1995-12-12|1995-11-15|1996-01-05|COLLECT COD|SHIP|nto beans. slyly speci 9633|54783|7289|2|43|74724.54|0.09|0.04|N|O|1995-12-26|1995-11-09|1996-01-10|DELIVER IN PERSON|MAIL|s after the slyly final requests are fluffi 9633|135364|2904|3|42|58773.12|0.05|0.00|N|O|1995-09-15|1995-11-09|1995-10-15|NONE|FOB| unusual ideas are above the quickl 9633|58995|1501|4|12|23447.88|0.09|0.04|N|O|1995-11-12|1995-11-13|1995-11-18|COLLECT COD|AIR|t foxes. ironic ideas haggle c 9633|121099|8636|5|11|12320.99|0.09|0.06|N|O|1995-12-04|1995-11-20|1995-12-14|COLLECT COD|FOB|believe carefully furiously ev 9634|89345|6870|1|43|57376.62|0.06|0.04|N|O|1998-02-23|1998-03-01|1998-03-05|COLLECT COD|SHIP|platelets. expre 9634|11823|4325|2|25|43370.50|0.00|0.07|N|O|1998-04-21|1998-03-30|1998-04-27|NONE|TRUCK|fully regularly regular courts. depos 9634|103771|8792|3|19|33720.63|0.07|0.02|N|O|1998-04-17|1998-02-17|1998-05-05|DELIVER IN PERSON|MAIL|se slyly after the sly 9634|71160|3668|4|47|53164.52|0.09|0.05|N|O|1998-05-03|1998-02-12|1998-05-24|NONE|TRUCK|foxes thrash quickly bold 9634|183506|8543|5|27|42916.50|0.10|0.06|N|O|1998-03-26|1998-03-06|1998-04-14|DELIVER IN PERSON|MAIL|usual accounts. even requests nag bli 9634|169366|4399|6|48|68897.28|0.01|0.03|N|O|1998-04-03|1998-03-20|1998-04-12|TAKE BACK RETURN|FOB| furiously among the furiously express inst 9635|58860|3871|1|11|20007.46|0.07|0.08|A|F|1994-05-02|1994-06-08|1994-05-18|DELIVER IN PERSON|SHIP|gular packages nag 9635|50334|335|2|30|38529.90|0.05|0.07|R|F|1994-07-14|1994-06-25|1994-07-19|NONE|REG AIR|dolites sleep. busy, unusual requests 9635|156763|1794|3|19|34575.44|0.09|0.03|A|F|1994-08-08|1994-06-09|1994-08-26|NONE|FOB|final packages cajole f 9635|112469|3|4|12|17777.52|0.05|0.06|R|F|1994-07-09|1994-06-05|1994-07-31|TAKE BACK RETURN|MAIL|eposits haggl 9635|55848|5849|5|26|46899.84|0.03|0.02|A|F|1994-07-24|1994-05-29|1994-08-21|NONE|FOB|s detect furiously even accounts. quickly 9635|33812|8819|6|28|48882.68|0.06|0.01|R|F|1994-08-11|1994-06-14|1994-08-24|DELIVER IN PERSON|SHIP| quickly about the theodolites. un 9636|14487|9490|1|47|65869.56|0.02|0.02|N|O|1997-02-05|1997-03-31|1997-02-26|TAKE BACK RETURN|AIR| even deposits wake slyly a 9636|73732|8747|2|36|61406.28|0.08|0.02|N|O|1997-02-10|1997-04-14|1997-03-05|NONE|SHIP|ickly darin 9636|8147|648|3|5|5275.70|0.00|0.04|N|O|1997-05-25|1997-03-07|1997-06-16|TAKE BACK RETURN|SHIP|uctions are carefu 9637|63519|6026|1|45|66712.95|0.10|0.02|R|F|1994-03-24|1994-02-28|1994-03-30|COLLECT COD|SHIP|even foxes doubt slyly. quickly stealthy 9637|147364|4907|2|6|8468.16|0.05|0.02|A|F|1994-03-18|1994-04-04|1994-03-28|DELIVER IN PERSON|MAIL|s. instructions nag quickly. carefully 9637|136557|9071|3|33|52587.15|0.05|0.01|R|F|1994-02-27|1994-04-02|1994-03-26|NONE|FOB|y ironic deposits was blithely accord 9637|110850|5873|4|1|1860.85|0.02|0.02|A|F|1994-03-21|1994-03-19|1994-04-15|TAKE BACK RETURN|FOB|ccounts solve furiously blithely 9638|167879|396|1|48|93449.76|0.06|0.07|N|O|1996-09-26|1996-09-06|1996-10-15|NONE|FOB| around the pe 9638|134563|7077|2|4|6390.24|0.06|0.02|N|O|1996-07-03|1996-08-07|1996-07-28|COLLECT COD|FOB|fily special, regular deposits? sly 9638|197483|2522|3|6|9482.88|0.03|0.04|N|O|1996-07-24|1996-08-24|1996-08-02|TAKE BACK RETURN|FOB|ding accounts! furiously ironic deposits 9638|94737|4738|4|10|17317.30|0.00|0.06|N|O|1996-09-04|1996-09-04|1996-09-21|COLLECT COD|SHIP|e special, regular pinto beans int 9639|32155|7162|1|34|36963.10|0.06|0.05|R|F|1993-10-23|1993-11-24|1993-11-01|TAKE BACK RETURN|RAIL|fter the furiously final 9639|22990|497|2|42|80345.58|0.09|0.08|A|F|1993-09-24|1993-11-16|1993-09-27|NONE|MAIL|. carefully fi 9639|180852|3371|3|33|63784.05|0.01|0.03|A|F|1993-10-28|1993-11-22|1993-11-01|COLLECT COD|TRUCK|boost boldly across the furious 9639|179538|7090|4|2|3235.06|0.10|0.04|A|F|1993-11-22|1993-11-01|1993-12-21|NONE|TRUCK|each along the bold foxes. 9639|94490|9509|5|44|65317.56|0.02|0.07|A|F|1993-12-28|1993-10-18|1994-01-16|DELIVER IN PERSON|MAIL|uickly ironi 9639|88113|5638|6|3|3303.33|0.10|0.07|A|F|1993-11-13|1993-10-14|1993-12-13|TAKE BACK RETURN|RAIL|the regular, unusual requests. 9639|110576|3088|7|6|9519.42|0.01|0.03|A|F|1993-12-21|1993-11-26|1993-12-30|TAKE BACK RETURN|AIR|ar accounts integrate slyly alongside 9664|35714|8218|1|50|82485.50|0.09|0.07|A|F|1993-04-18|1993-05-28|1993-04-22|DELIVER IN PERSON|MAIL|packages nag furiously 9664|35214|221|2|1|1149.21|0.00|0.04|A|F|1993-06-01|1993-06-06|1993-06-19|TAKE BACK RETURN|AIR|es poach blithely. caref 9664|30960|5967|3|37|69965.52|0.08|0.03|R|F|1993-05-30|1993-05-27|1993-06-04|COLLECT COD|TRUCK|es sleep about the furiousl 9665|88355|5880|1|11|14776.85|0.06|0.00|R|F|1994-05-11|1994-06-28|1994-06-04|TAKE BACK RETURN|MAIL|y across the quickly even frays? fluffi 9665|182360|4879|2|5|7211.80|0.02|0.04|A|F|1994-06-27|1994-06-12|1994-07-21|NONE|RAIL|ly ironic tithes 9665|116755|4289|3|50|88587.50|0.02|0.04|R|F|1994-07-31|1994-07-12|1994-08-11|TAKE BACK RETURN|REG AIR| ironic deposits. final warhorses h 9666|95963|5964|1|32|62686.72|0.00|0.02|N|O|1996-06-27|1996-07-09|1996-07-01|DELIVER IN PERSON|RAIL|into beans. quickly unusual asymptotes h 9666|43024|5529|2|49|47383.98|0.07|0.08|N|O|1996-06-03|1996-07-08|1996-06-07|DELIVER IN PERSON|SHIP|fully regular requests 9666|97079|9589|3|2|2152.14|0.04|0.01|N|O|1996-05-19|1996-06-25|1996-05-20|DELIVER IN PERSON|SHIP|could have to sublate i 9666|194928|9967|4|46|93054.32|0.03|0.02|N|O|1996-06-12|1996-08-05|1996-06-13|COLLECT COD|AIR|regular accounts haggle f 9666|148399|3428|5|8|11579.12|0.09|0.07|N|O|1996-07-06|1996-07-24|1996-07-30|TAKE BACK RETURN|SHIP|slyly. account 9666|141975|7004|6|12|24203.64|0.03|0.02|N|O|1996-06-05|1996-07-02|1996-07-04|DELIVER IN PERSON|MAIL|uickly special accounts use abo 9667|21696|4199|1|16|25883.04|0.03|0.08|N|O|1996-04-11|1996-05-11|1996-05-11|DELIVER IN PERSON|RAIL|he regular request 9667|34359|4360|2|43|55614.05|0.04|0.04|N|O|1996-04-29|1996-04-04|1996-05-15|COLLECT COD|SHIP| silent instructions. quickly regular th 9667|61109|8628|3|38|40663.80|0.03|0.04|N|O|1996-06-19|1996-05-17|1996-07-07|NONE|FOB| regular accounts 9667|43101|614|4|8|8352.80|0.07|0.02|N|O|1996-04-01|1996-05-14|1996-04-16|DELIVER IN PERSON|RAIL| after the final requests sle 9667|192388|4908|5|25|37009.50|0.09|0.04|N|O|1996-03-28|1996-04-16|1996-04-06|DELIVER IN PERSON|TRUCK|heodolites detect 9667|61785|9304|6|7|12227.46|0.06|0.06|N|O|1996-03-01|1996-03-27|1996-03-16|DELIVER IN PERSON|REG AIR|gular realms cajole sl 9667|11734|4236|7|16|26331.68|0.09|0.00|N|O|1996-04-26|1996-04-30|1996-05-20|COLLECT COD|REG AIR|ccounts wake f 9668|198979|1499|1|41|85196.77|0.04|0.04|R|F|1994-10-06|1994-10-07|1994-11-02|NONE|AIR|es boost furiously. courts sleep blit 9669|79922|2430|1|49|93194.08|0.08|0.04|R|F|1994-06-19|1994-07-14|1994-06-25|TAKE BACK RETURN|TRUCK|mpress blithely pending 9670|32654|5158|1|44|69812.60|0.10|0.07|N|O|1998-02-14|1997-12-15|1998-03-09|DELIVER IN PERSON|AIR|boost against the 9670|1896|9397|2|34|61128.26|0.01|0.04|N|O|1998-01-20|1997-12-25|1998-02-14|TAKE BACK RETURN|TRUCK|thely sentiments. 9670|57916|422|3|27|50595.57|0.06|0.04|N|O|1997-11-27|1998-01-25|1997-12-27|COLLECT COD|FOB|g packages. quickly pending ideas c 9670|184117|1672|4|46|55251.06|0.06|0.03|N|O|1998-01-07|1998-02-05|1998-01-23|COLLECT COD|SHIP|bold, pending requests ki 9671|173606|8641|1|50|83980.00|0.07|0.07|N|O|1995-10-28|1995-11-15|1995-11-24|COLLECT COD|TRUCK|equests sleep blithely 9671|193774|8813|2|18|33619.86|0.10|0.07|N|O|1995-12-10|1995-12-01|1995-12-12|NONE|TRUCK|uffy asymptotes a 9671|155470|3016|3|32|48815.04|0.06|0.00|N|O|1995-10-07|1995-11-02|1995-10-14|NONE|TRUCK|o beans. carefully pending requ 9696|169143|6692|1|43|52122.02|0.07|0.07|A|F|1995-05-24|1995-03-31|1995-05-31|TAKE BACK RETURN|TRUCK|efully accounts. blith 9696|176762|6763|2|28|51485.28|0.03|0.03|A|F|1995-03-28|1995-05-06|1995-04-04|COLLECT COD|REG AIR|ing accounts are. ironic packages nag af 9696|25919|5920|3|49|90400.59|0.01|0.04|A|F|1995-04-08|1995-04-21|1995-04-20|NONE|AIR|s poach furiously pending, ironic instru 9696|14948|2452|4|37|68928.78|0.07|0.05|N|F|1995-06-08|1995-04-01|1995-06-20|DELIVER IN PERSON|TRUCK|usual pains! ironic, even acco 9697|129726|2239|1|8|14045.76|0.10|0.05|A|F|1995-02-26|1995-04-16|1995-03-25|COLLECT COD|RAIL|ns. even requests nod alon 9697|96814|6815|2|21|38027.01|0.08|0.03|R|F|1995-04-06|1995-03-10|1995-04-23|NONE|FOB|egular theodolites boost carefully 9697|137535|5075|3|6|9435.18|0.09|0.08|R|F|1995-02-05|1995-03-30|1995-02-07|NONE|RAIL|ily regular accounts doze furiously. i 9697|147922|2951|4|19|37428.48|0.04|0.04|A|F|1995-02-08|1995-04-05|1995-03-02|NONE|RAIL|le carefully foxes. enti 9697|113678|8701|5|34|57516.78|0.05|0.02|A|F|1995-04-16|1995-04-16|1995-05-10|NONE|AIR|requests de 9697|175102|5103|6|39|45906.90|0.07|0.01|A|F|1995-03-25|1995-03-31|1995-04-23|DELIVER IN PERSON|RAIL|ix fluffily accounts. qui 9698|70382|2890|1|5|6761.90|0.08|0.04|N|O|1995-09-15|1995-10-18|1995-10-11|COLLECT COD|AIR|e furiously 9699|22967|2968|1|10|18899.60|0.10|0.06|A|F|1995-03-29|1995-04-18|1995-04-21|DELIVER IN PERSON|TRUCK|o beans use slyly a 9700|26812|4319|1|47|81724.07|0.10|0.02|N|O|1995-11-12|1995-10-16|1995-12-03|NONE|REG AIR|are furiously pend 9700|25400|7903|2|42|55666.80|0.06|0.06|N|O|1995-11-15|1995-10-19|1995-12-10|TAKE BACK RETURN|AIR|ess foxes. slyly unusual deposits wake ca 9700|88280|5805|3|42|53267.76|0.04|0.04|N|O|1995-11-26|1995-09-20|1995-11-27|NONE|FOB|ts nag. furiousl 9700|117589|7590|4|44|70689.52|0.03|0.00|N|O|1995-08-30|1995-09-20|1995-09-28|NONE|FOB|s haggle. special accounts about 9700|110347|348|5|31|42077.54|0.08|0.03|N|O|1995-08-31|1995-10-25|1995-09-12|TAKE BACK RETURN|AIR|lly unusual requests. furiousl 9700|195678|3236|6|9|15963.03|0.01|0.06|N|O|1995-10-25|1995-09-30|1995-11-06|DELIVER IN PERSON|REG AIR|ly regular theodolit 9701|159445|6991|1|33|49646.52|0.03|0.07|A|F|1993-04-29|1993-05-09|1993-05-07|NONE|RAIL|g, even deposit 9701|190939|5978|2|32|64957.76|0.02|0.01|R|F|1993-03-20|1993-05-01|1993-03-26|TAKE BACK RETURN|MAIL| ironic accounts wake c 9701|107213|7214|3|17|20743.57|0.06|0.03|R|F|1993-05-28|1993-05-17|1993-06-24|NONE|SHIP|ironic pinto beans nag furiously expres 9701|125092|5093|4|36|40215.24|0.10|0.00|R|F|1993-04-20|1993-04-03|1993-05-12|DELIVER IN PERSON|MAIL|l asymptotes use 9701|16569|4073|5|38|56451.28|0.04|0.08|A|F|1993-05-07|1993-04-23|1993-05-12|DELIVER IN PERSON|SHIP|daring pinto beans cajole quickly accord 9701|71111|6126|6|43|46530.73|0.03|0.05|A|F|1993-06-26|1993-05-04|1993-07-01|TAKE BACK RETURN|TRUCK|pinto beans. ex 9701|155369|5370|7|2|2848.72|0.06|0.00|R|F|1993-06-19|1993-04-02|1993-07-11|TAKE BACK RETURN|RAIL|ld courts. bold, bold 9702|82467|7484|1|18|26090.28|0.09|0.05|N|O|1996-11-07|1996-10-15|1996-11-15|TAKE BACK RETURN|AIR|ed theodolites. ironic accounts hang bl 9702|12117|9621|2|14|14407.54|0.05|0.06|N|O|1996-10-10|1996-10-06|1996-10-14|DELIVER IN PERSON|AIR|. final packages ato 9702|37261|2268|3|5|5991.30|0.08|0.00|N|O|1996-09-02|1996-10-16|1996-09-24|COLLECT COD|RAIL|lithely special accounts shall kind 9702|30981|3485|4|37|70743.26|0.06|0.06|N|O|1996-10-08|1996-09-27|1996-10-25|DELIVER IN PERSON|FOB|ly unusual deposits haggle carefull 9702|97005|9515|5|28|28056.00|0.07|0.07|N|O|1996-11-25|1996-11-14|1996-12-03|TAKE BACK RETURN|TRUCK|heodolites sleep furiously special exc 9702|152267|7298|6|17|22427.42|0.04|0.06|N|O|1996-09-16|1996-11-04|1996-10-02|DELIVER IN PERSON|SHIP| special platelets. carefully bold account 9702|136243|6244|7|9|11513.16|0.09|0.06|N|O|1996-10-29|1996-11-02|1996-11-12|DELIVER IN PERSON|TRUCK| blithely even requests about th 9703|35644|3154|1|24|37911.36|0.05|0.01|R|F|1994-10-22|1994-12-24|1994-11-01|COLLECT COD|REG AIR|e slyly. regular forge 9728|45399|5400|1|32|43020.48|0.10|0.01|A|F|1993-07-04|1993-09-25|1993-07-08|COLLECT COD|SHIP|y final instructions cajole furiously regu 9729|44362|9371|1|39|50948.04|0.04|0.03|N|O|1996-07-30|1996-08-22|1996-08-17|COLLECT COD|MAIL|lar accounts a 9729|179117|6669|2|44|52628.84|0.08|0.08|N|O|1996-08-04|1996-08-23|1996-08-26|COLLECT COD|MAIL|regular deposits detec 9729|2994|7995|3|25|47424.75|0.05|0.05|N|O|1996-10-21|1996-09-12|1996-11-14|DELIVER IN PERSON|RAIL| blithely even deposits nod ironi 9729|103663|8684|4|15|24999.90|0.09|0.05|N|O|1996-08-22|1996-08-25|1996-09-15|DELIVER IN PERSON|RAIL|n packages cajole slyly slyly final requ 9730|55350|7856|1|24|31328.40|0.05|0.05|N|O|1997-04-22|1997-05-31|1997-05-22|NONE|FOB|ess asymptotes boo 9730|61422|3929|2|37|51186.54|0.06|0.06|N|O|1997-05-01|1997-05-12|1997-05-22|COLLECT COD|MAIL|e furiously even, express foxe 9730|113663|8686|3|29|48623.14|0.03|0.05|N|O|1997-03-24|1997-05-24|1997-03-26|COLLECT COD|AIR|f the furiously regula 9730|126899|4436|4|50|96294.50|0.10|0.02|N|O|1997-04-06|1997-06-02|1997-04-17|NONE|TRUCK|express dependencies are fluffily 9730|189785|9786|5|33|61867.74|0.01|0.08|N|O|1997-06-15|1997-05-04|1997-07-09|COLLECT COD|MAIL|packages are carefully dolphins. reques 9730|105436|457|6|41|59098.63|0.02|0.03|N|O|1997-06-16|1997-05-23|1997-07-04|NONE|FOB|sual theodolites dazzle slyl 9731|123746|1283|1|8|14157.92|0.08|0.06|A|F|1993-11-26|1994-01-09|1993-12-05|COLLECT COD|RAIL|always regular deposits. ironic 9731|193979|6499|2|7|14510.79|0.06|0.05|A|F|1994-01-03|1993-12-09|1994-01-12|COLLECT COD|REG AIR| theodolites dazzle ironic accounts. furio 9732|145137|7652|1|4|4728.52|0.09|0.06|N|O|1995-06-25|1995-05-26|1995-07-12|DELIVER IN PERSON|AIR|s. regular pac 9732|101014|8545|2|47|47705.47|0.10|0.08|A|F|1995-04-02|1995-04-20|1995-04-06|DELIVER IN PERSON|REG AIR|ole carefully furiou 9732|133412|5926|3|20|28908.20|0.08|0.00|A|F|1995-04-11|1995-05-09|1995-04-12|DELIVER IN PERSON|REG AIR|lly. regularly even excuses cajole furi 9733|197698|2737|1|23|41300.87|0.01|0.06|A|F|1994-04-28|1994-05-02|1994-05-19|TAKE BACK RETURN|FOB|theodolites about the 9733|109605|9606|2|10|16146.00|0.10|0.01|A|F|1994-06-04|1994-05-31|1994-06-18|COLLECT COD|SHIP|ajole according to the c 9733|127148|9661|3|17|19977.38|0.10|0.00|R|F|1994-06-24|1994-06-03|1994-07-02|TAKE BACK RETURN|AIR|refully above the flu 9734|98732|3751|1|48|83075.04|0.07|0.05|N|O|1997-02-24|1997-05-04|1997-02-27|TAKE BACK RETURN|SHIP|ptotes doubt about the 9734|187333|2370|2|21|29826.93|0.06|0.02|N|O|1997-05-22|1997-04-29|1997-06-02|DELIVER IN PERSON|FOB|usly speci 9735|108001|5532|1|1|1009.00|0.10|0.08|N|O|1997-10-24|1997-08-05|1997-11-19|NONE|AIR|. close, final reque 9735|160380|7929|2|13|18724.94|0.03|0.07|N|O|1997-08-15|1997-09-10|1997-09-14|COLLECT COD|FOB|ess pinto beans. regula 9735|90550|5569|3|10|15405.50|0.09|0.06|N|O|1997-07-09|1997-08-12|1997-07-30|COLLECT COD|MAIL|c instructio 9735|63313|5820|4|22|28078.82|0.06|0.03|N|O|1997-07-23|1997-08-29|1997-08-17|COLLECT COD|AIR|re thinly bold 9735|89757|4774|5|26|45415.50|0.08|0.02|N|O|1997-08-23|1997-08-27|1997-09-05|NONE|AIR|egular grouches cajole fluffil 9760|74592|2114|1|17|26632.03|0.02|0.05|N|O|1998-04-28|1998-06-20|1998-04-30|TAKE BACK RETURN|FOB| permanent attainments kindle carefully 9760|65102|7609|2|30|32013.00|0.03|0.07|N|O|1998-05-31|1998-05-24|1998-06-21|NONE|SHIP|y fluffily careful packages. fluffily bol 9760|166643|1676|3|14|23934.96|0.03|0.05|N|O|1998-05-25|1998-06-04|1998-06-22|TAKE BACK RETURN|REG AIR| instructions haggle. furio 9760|155108|139|4|40|46524.00|0.02|0.04|N|O|1998-07-05|1998-06-29|1998-08-01|COLLECT COD|FOB|bove the blithely pending requests 9761|79926|2434|1|27|51459.84|0.07|0.04|A|F|1992-02-12|1992-03-22|1992-02-16|NONE|FOB|uctions integrate unusual, 9761|109068|1579|2|26|28003.56|0.05|0.08|A|F|1992-04-08|1992-03-09|1992-04-23|COLLECT COD|MAIL|furiously regular packages. furiously 9762|117423|7424|1|17|24487.14|0.02|0.06|R|F|1994-04-24|1994-03-13|1994-05-17|TAKE BACK RETURN|AIR|instructions nag a 9762|117816|7817|2|30|55014.30|0.10|0.03|R|F|1994-02-02|1994-04-01|1994-02-25|DELIVER IN PERSON|TRUCK|ly special packages. blithely regular depos 9762|118568|6102|3|30|47596.80|0.06|0.08|A|F|1994-04-21|1994-03-16|1994-04-22|COLLECT COD|REG AIR|ions are above the blithel 9762|157550|66|4|33|53049.15|0.00|0.02|R|F|1994-04-18|1994-03-20|1994-05-15|DELIVER IN PERSON|RAIL|ackages. quickly ev 9762|130684|5711|5|12|20576.16|0.04|0.00|A|F|1994-04-21|1994-02-16|1994-05-17|COLLECT COD|RAIL|xpress requests; c 9762|193298|856|6|50|69564.50|0.06|0.08|A|F|1994-03-26|1994-03-14|1994-04-08|NONE|FOB|ly after the slyly regular foxes: ironic s 9763|107354|7355|1|50|68067.50|0.10|0.04|N|O|1995-11-21|1995-10-27|1995-11-28|COLLECT COD|REG AIR| the slyly iron 9763|89625|9626|2|2|3229.24|0.07|0.00|N|O|1995-10-09|1995-09-22|1995-11-05|DELIVER IN PERSON|RAIL|sts sleep fluffily accounts. slyly unusual 9763|112967|7990|3|1|1979.96|0.02|0.02|N|O|1995-11-11|1995-10-18|1995-12-01|NONE|RAIL| final, ironic deposits. hockey play 9763|22353|2354|4|14|17854.90|0.02|0.07|N|O|1995-09-27|1995-09-28|1995-10-22|COLLECT COD|SHIP|lar pinto beans are blit 9763|76096|3618|5|12|12865.08|0.09|0.02|N|O|1995-09-17|1995-10-06|1995-10-14|NONE|MAIL|gle blithely alongside 9763|144749|2292|6|14|25112.36|0.01|0.04|N|O|1995-09-14|1995-10-22|1995-09-20|COLLECT COD|MAIL|furiously special instructions caj 9763|193535|6055|7|15|24427.95|0.06|0.07|N|O|1995-10-23|1995-10-12|1995-10-28|COLLECT COD|REG AIR| furiously bold accounts. furiously speci 9764|135481|5482|1|23|34879.04|0.09|0.03|A|F|1993-02-25|1993-03-04|1993-03-15|DELIVER IN PERSON|AIR|regular, ironic requests wake across 9764|18928|1430|2|5|9234.60|0.00|0.06|R|F|1993-01-01|1993-03-27|1993-01-23|TAKE BACK RETURN|RAIL|cial packages. special requ 9764|108463|3484|3|26|38257.96|0.08|0.01|A|F|1993-02-17|1993-02-04|1993-02-28|NONE|RAIL|h-- quickly express foxes bo 9765|136467|6468|1|34|51117.64|0.01|0.06|A|F|1992-11-04|1992-12-16|1992-11-19|COLLECT COD|FOB|sits. slyly careful theodolites integrat 9766|139772|2286|1|46|83341.42|0.08|0.01|A|F|1992-11-18|1992-10-02|1992-12-04|TAKE BACK RETURN|FOB|beans. pinto beans use. final requests 9766|138656|8657|2|3|5083.95|0.04|0.04|R|F|1992-08-28|1992-10-22|1992-09-17|TAKE BACK RETURN|FOB|ending deposits cajole among the quickly 9766|180225|5262|3|25|32630.50|0.03|0.06|A|F|1992-11-21|1992-10-14|1992-12-11|COLLECT COD|TRUCK| the excuses. fluffily eve 9766|26389|6390|4|50|65769.00|0.00|0.07|R|F|1992-12-11|1992-10-12|1992-12-17|COLLECT COD|RAIL|s; even, final requests wake sl 9767|84775|9792|1|28|49273.56|0.08|0.04|A|F|1994-08-29|1994-08-17|1994-09-07|NONE|AIR| deposits alongside of the 9792|169745|9746|1|16|29035.84|0.07|0.03|N|O|1995-10-24|1995-10-03|1995-10-29|TAKE BACK RETURN|AIR| was. slyly even accoun 9792|98588|6116|2|25|39664.50|0.03|0.06|N|O|1995-09-25|1995-10-12|1995-10-25|COLLECT COD|TRUCK| regular packages ought to detect. furi 9792|157942|2973|3|41|81997.54|0.01|0.00|N|O|1995-08-05|1995-10-20|1995-08-25|COLLECT COD|FOB|ly bold acc 9792|115811|834|4|4|7307.24|0.06|0.02|N|O|1995-08-27|1995-08-26|1995-09-02|NONE|AIR|ar dolphins. carefully 9792|30240|5247|5|29|33936.96|0.06|0.05|N|O|1995-11-05|1995-08-25|1995-11-24|DELIVER IN PERSON|MAIL|uctions haggle furiously f 9793|28378|5885|1|7|9144.59|0.04|0.06|A|F|1992-06-22|1992-06-10|1992-07-07|TAKE BACK RETURN|MAIL|nst the closely s 9793|141637|4152|2|2|3357.26|0.01|0.06|R|F|1992-05-01|1992-05-05|1992-05-03|DELIVER IN PERSON|AIR|ding theodolites abov 9793|84752|7261|3|4|6947.00|0.09|0.04|R|F|1992-06-22|1992-06-14|1992-07-02|NONE|TRUCK|grate quickly. fluffily regular pack 9793|13918|1422|4|33|60453.03|0.01|0.05|A|F|1992-04-14|1992-05-06|1992-04-22|COLLECT COD|MAIL|the regular pinto beans. ex 9793|106791|9302|5|29|52135.91|0.06|0.03|R|F|1992-07-20|1992-06-25|1992-07-21|COLLECT COD|FOB|l packages. even pinto bean 9794|104868|2399|1|32|59931.52|0.09|0.01|R|F|1992-04-28|1992-05-15|1992-05-03|NONE|FOB| slyly along the dependenc 9794|111191|6214|2|3|3606.57|0.09|0.08|A|F|1992-07-12|1992-05-17|1992-07-28|NONE|REG AIR|ost according to the busily regular realm 9794|185507|8026|3|17|27072.50|0.06|0.06|A|F|1992-05-14|1992-04-24|1992-06-11|NONE|MAIL|egular asymptotes. carefully 9794|199929|9930|4|44|89272.48|0.08|0.07|R|F|1992-05-14|1992-05-14|1992-05-31|DELIVER IN PERSON|AIR|the furiou 9794|3648|3649|5|32|49652.48|0.06|0.08|R|F|1992-04-30|1992-05-13|1992-05-22|NONE|FOB|ending accounts. carefully regula 9794|96937|6938|6|40|77357.20|0.01|0.05|R|F|1992-05-03|1992-05-27|1992-05-24|TAKE BACK RETURN|AIR|e the fluffily regular pa 9794|10360|5363|7|8|10162.88|0.00|0.05|A|F|1992-07-01|1992-05-15|1992-07-25|DELIVER IN PERSON|AIR|ular asympto 9795|115264|2798|1|13|16630.38|0.08|0.01|N|O|1998-05-26|1998-03-16|1998-06-07|NONE|SHIP|es are slyly. unusual co 9795|41109|1110|2|26|27302.60|0.06|0.02|N|O|1998-04-22|1998-05-01|1998-05-06|TAKE BACK RETURN|FOB|ironic, silent theodolites wake accordi 9795|104652|9673|3|23|38102.95|0.06|0.06|N|O|1998-05-13|1998-04-05|1998-06-09|NONE|RAIL| engage iro 9795|75286|5287|4|47|59280.16|0.01|0.04|N|O|1998-03-10|1998-05-04|1998-03-21|TAKE BACK RETURN|AIR|ntegrate fluffily blithely 9795|117424|9936|5|27|38918.34|0.09|0.02|N|O|1998-02-09|1998-04-22|1998-02-26|DELIVER IN PERSON|RAIL|ing to the ironi 9795|128671|8672|6|34|57788.78|0.05|0.07|N|O|1998-02-25|1998-03-07|1998-03-07|TAKE BACK RETURN|SHIP|egrate slyly blithel 9796|182132|9687|1|36|43708.68|0.00|0.06|A|F|1993-10-15|1993-10-15|1993-11-01|COLLECT COD|RAIL|ng pinto beans integrate. requests 9796|78417|3432|2|25|34885.25|0.03|0.05|A|F|1993-10-10|1993-11-13|1993-10-31|TAKE BACK RETURN|AIR|gle slyly ironic pearls. requ 9796|141456|3971|3|18|26954.10|0.03|0.06|A|F|1993-09-28|1993-10-30|1993-10-02|TAKE BACK RETURN|SHIP|accounts. final ideas sle 9796|44955|2468|4|31|58898.45|0.03|0.06|R|F|1993-09-19|1993-12-01|1993-09-29|NONE|RAIL|haggle carefully furiously even asym 9796|158237|5783|5|39|50513.97|0.07|0.06|A|F|1993-09-28|1993-10-11|1993-10-23|COLLECT COD|REG AIR|ake. slyly final cou 9796|74987|2509|6|8|15695.84|0.03|0.07|R|F|1993-11-27|1993-11-01|1993-12-19|DELIVER IN PERSON|FOB|ironic pinto beans. even 9797|41400|1401|1|4|5365.60|0.10|0.05|R|F|1993-12-20|1993-11-21|1993-12-26|NONE|REG AIR| beans above the carefully bold hoc 9797|193478|5998|2|28|44001.16|0.10|0.01|R|F|1993-10-03|1993-11-03|1993-10-04|NONE|FOB|en instructions. quickly regular theodoli 9797|175257|5258|3|22|29309.50|0.00|0.04|A|F|1993-10-01|1993-11-07|1993-10-14|NONE|SHIP| final deposits at the even 9797|142470|7499|4|33|49911.51|0.02|0.01|A|F|1993-12-16|1993-11-03|1994-01-06|NONE|FOB|ackages boost fluffily. spec 9797|86998|2015|5|31|61534.69|0.09|0.02|R|F|1993-10-24|1993-12-12|1993-11-21|DELIVER IN PERSON|AIR| permanently final theodolites 9797|170215|7767|6|2|2570.42|0.00|0.06|A|F|1993-12-25|1993-11-04|1993-12-30|DELIVER IN PERSON|REG AIR|counts after the 9797|2631|2632|7|24|36807.12|0.04|0.03|R|F|1993-12-08|1993-12-01|1994-01-02|COLLECT COD|MAIL|usly thin packages among the theod 9798|6017|8518|1|2|1846.02|0.05|0.03|N|O|1996-06-11|1996-05-07|1996-07-08|NONE|MAIL|ending asymptot 9798|118587|6121|2|20|32111.60|0.03|0.03|N|O|1996-05-20|1996-05-27|1996-05-29|DELIVER IN PERSON|FOB|fully regular braids. 9799|35698|705|1|4|6534.76|0.06|0.03|N|O|1996-04-25|1996-06-29|1996-05-24|COLLECT COD|SHIP|eposits nag after the reg 9799|162974|2975|2|12|24443.64|0.02|0.05|N|O|1996-05-28|1996-05-26|1996-06-05|DELIVER IN PERSON|RAIL|fluffily final accounts. final ideas sleep. 9799|88999|6524|3|16|31807.84|0.04|0.05|N|O|1996-04-12|1996-06-15|1996-05-06|TAKE BACK RETURN|TRUCK|ffily regular d 9799|198175|5733|4|9|11458.53|0.06|0.07|N|O|1996-08-06|1996-06-19|1996-08-19|DELIVER IN PERSON|MAIL|s. final, ironic instructions affix 9799|125373|7886|5|49|68520.13|0.08|0.04|N|O|1996-07-18|1996-06-11|1996-08-15|COLLECT COD|REG AIR|ly special deposits. regular foxe 9799|129764|7301|6|23|41256.48|0.02|0.05|N|O|1996-07-10|1996-05-25|1996-08-08|TAKE BACK RETURN|REG AIR|deposits nag alongside of the even instru 9824|41380|8893|1|8|10571.04|0.05|0.07|N|F|1995-05-24|1995-04-21|1995-06-21|TAKE BACK RETURN|FOB| are. packages haggl 9824|73184|5692|2|1|1157.18|0.02|0.07|A|F|1995-01-26|1995-04-16|1995-02-11|COLLECT COD|TRUCK|dencies. final, 9824|191278|1279|3|38|52032.26|0.03|0.05|A|F|1995-03-11|1995-03-27|1995-03-15|COLLECT COD|REG AIR|gular, ironic package 9824|194659|7179|4|11|19290.15|0.07|0.01|R|F|1995-05-08|1995-04-17|1995-05-14|NONE|REG AIR| furious t 9824|30349|350|5|11|14072.74|0.09|0.00|A|F|1995-04-15|1995-04-14|1995-05-04|TAKE BACK RETURN|FOB|s use doggedly. pending, regular pac 9824|71024|1025|6|49|48755.98|0.04|0.01|R|F|1995-03-10|1995-04-07|1995-03-11|NONE|MAIL|ly ironic packages. 9824|97930|440|7|25|48198.25|0.00|0.07|A|F|1995-02-17|1995-03-12|1995-02-22|COLLECT COD|RAIL|inst the blithely final excuses. final 9825|105098|7609|1|22|24267.98|0.04|0.06|N|O|1996-04-21|1996-07-01|1996-05-19|NONE|MAIL|hely final sentimen 9825|180049|50|2|16|18064.64|0.07|0.07|N|O|1996-06-15|1996-07-08|1996-07-11|DELIVER IN PERSON|TRUCK|en asymptotes according to the r 9825|75854|869|3|4|7319.40|0.09|0.00|N|O|1996-06-29|1996-06-28|1996-07-15|NONE|TRUCK|l platelets boost car 9825|63603|1122|4|25|39165.00|0.02|0.04|N|O|1996-05-03|1996-07-06|1996-05-18|DELIVER IN PERSON|RAIL|d, ironic instructions. carefully spec 9826|172074|4592|1|34|38966.38|0.10|0.05|R|F|1992-07-18|1992-06-12|1992-07-30|DELIVER IN PERSON|AIR|old, ironic courts alongside of 9826|45559|8064|2|1|1504.55|0.02|0.04|A|F|1992-04-22|1992-05-06|1992-05-15|COLLECT COD|MAIL|uickly. blithely pending depths lose slyly 9826|108587|3608|3|50|79779.00|0.03|0.08|R|F|1992-05-07|1992-06-13|1992-05-21|COLLECT COD|FOB|ound the blithely spec 9827|27876|379|1|16|28861.92|0.09|0.08|N|O|1998-08-15|1998-09-13|1998-09-08|NONE|SHIP| blithely silent pa 9827|99930|9931|2|20|38598.60|0.08|0.08|N|O|1998-10-23|1998-09-07|1998-11-15|DELIVER IN PERSON|TRUCK|, bold accounts af 9828|71566|9088|1|39|59964.84|0.00|0.05|R|F|1992-04-29|1992-06-16|1992-05-23|COLLECT COD|RAIL|he furiousl 9828|6370|1371|2|17|21698.29|0.00|0.03|A|F|1992-05-31|1992-06-28|1992-06-23|DELIVER IN PERSON|FOB|final accounts according to 9828|50776|3282|3|21|36262.17|0.00|0.06|R|F|1992-06-05|1992-06-29|1992-07-02|COLLECT COD|AIR|nts. special 9828|183615|8652|4|32|54355.52|0.06|0.00|A|F|1992-08-16|1992-06-17|1992-08-17|DELIVER IN PERSON|SHIP|uickly even foxes across the special accou 9828|12218|9722|5|25|28255.25|0.01|0.01|R|F|1992-08-19|1992-06-12|1992-08-21|NONE|TRUCK| blithely fluffil 9828|139798|9799|6|41|75349.39|0.02|0.06|A|F|1992-07-25|1992-07-10|1992-08-03|TAKE BACK RETURN|RAIL|e furiously across the even requests. ir 9828|177261|9779|7|13|17397.38|0.00|0.01|R|F|1992-06-27|1992-07-15|1992-07-18|COLLECT COD|RAIL|ding, ironic packages promise blithel 9829|20948|8455|1|2|3737.88|0.08|0.08|N|O|1996-03-31|1996-01-25|1996-04-06|DELIVER IN PERSON|MAIL|ss the quickly bold de 9829|59855|4866|2|3|5444.55|0.03|0.01|N|O|1996-02-25|1996-03-01|1996-03-20|NONE|AIR|. unusual accounts affix carefully about 9830|157412|2443|1|26|38204.66|0.00|0.06|N|O|1997-12-01|1997-10-09|1997-12-22|DELIVER IN PERSON|TRUCK|ular dependencies. request 9830|23029|5532|2|34|32368.68|0.04|0.03|N|O|1997-10-06|1997-11-28|1997-11-03|NONE|SHIP| boldly pen 9830|34622|2132|3|17|26462.54|0.02|0.01|N|O|1997-12-04|1997-10-16|1997-12-20|NONE|MAIL|y pending ideas. furiously e 9830|83441|8458|4|19|27064.36|0.03|0.04|N|O|1997-11-16|1997-10-10|1997-11-21|COLLECT COD|AIR|y across the regular requests. quickly r 9830|180595|596|5|42|70374.78|0.10|0.08|N|O|1997-12-25|1997-11-07|1998-01-15|DELIVER IN PERSON|AIR|platelets: silent instructions c 9831|16176|8678|1|23|25119.91|0.06|0.04|N|O|1996-10-05|1996-08-13|1996-10-20|DELIVER IN PERSON|RAIL|pending theodolites. iro 9831|69594|2101|2|37|57852.83|0.02|0.08|N|O|1996-07-28|1996-08-05|1996-08-24|DELIVER IN PERSON|MAIL|l accounts. care 9856|113771|1305|1|31|55327.87|0.09|0.03|R|F|1993-06-07|1993-08-10|1993-06-30|COLLECT COD|AIR|posits. quick, final pinto b 9856|135955|3495|2|49|97556.55|0.08|0.06|R|F|1993-08-25|1993-07-24|1993-09-12|COLLECT COD|SHIP|ests engage furio 9856|191669|4189|3|48|84511.68|0.03|0.00|A|F|1993-06-24|1993-08-11|1993-07-18|DELIVER IN PERSON|MAIL|arefully quickly unusu 9856|168935|1452|4|37|74145.41|0.00|0.04|A|F|1993-06-14|1993-08-23|1993-06-27|NONE|TRUCK|refully furiously regular de 9857|108800|8801|1|6|10852.80|0.06|0.02|A|F|1993-05-15|1993-06-26|1993-05-19|TAKE BACK RETURN|FOB|ickly regular deposit 9857|199912|7470|2|42|84500.22|0.05|0.07|A|F|1993-07-22|1993-08-03|1993-07-27|TAKE BACK RETURN|MAIL|scapades are blithely abou 9857|106188|6189|3|6|7165.08|0.03|0.03|A|F|1993-06-01|1993-07-27|1993-06-18|TAKE BACK RETURN|MAIL|lites. ironic foxes wa 9857|87608|2625|4|40|63824.00|0.00|0.07|R|F|1993-07-18|1993-06-14|1993-08-11|TAKE BACK RETURN|RAIL|ording to the regular theodolites are ac 9857|16637|6638|5|10|15536.30|0.00|0.03|R|F|1993-08-08|1993-07-25|1993-08-20|COLLECT COD|REG AIR|cuses would af 9857|16240|1243|6|5|5781.20|0.09|0.00|A|F|1993-08-04|1993-06-27|1993-09-02|DELIVER IN PERSON|AIR|ongside of the fin 9857|143609|3610|7|13|21483.80|0.08|0.03|A|F|1993-08-31|1993-06-17|1993-09-02|DELIVER IN PERSON|SHIP|ions. furiously ironic 9858|35273|2783|1|18|21748.86|0.10|0.01|N|O|1997-12-15|1998-02-04|1998-01-09|NONE|RAIL| deposits was quickly. accounts i 9858|31739|9249|2|37|61817.01|0.04|0.03|N|O|1998-01-13|1998-02-19|1998-02-06|DELIVER IN PERSON|TRUCK|y carefully furiousl 9858|34584|2094|3|30|45557.40|0.02|0.06|N|O|1998-01-04|1998-01-10|1998-01-11|NONE|RAIL|ffily unusual requests ab 9859|109160|9161|1|20|23383.20|0.04|0.06|N|O|1996-09-03|1996-09-06|1996-10-02|DELIVER IN PERSON|AIR|es according to the quickly r 9859|48922|3931|2|50|93546.00|0.02|0.05|N|O|1996-11-09|1996-10-13|1996-11-18|TAKE BACK RETURN|AIR|its are slyly final accounts. pe 9859|20489|7996|3|30|42284.40|0.03|0.08|N|O|1996-10-26|1996-09-25|1996-11-25|TAKE BACK RETURN|TRUCK| fluffily regular deposits; slyly regular f 9859|147859|5402|4|37|70553.45|0.09|0.00|N|O|1996-08-20|1996-09-25|1996-09-15|DELIVER IN PERSON|TRUCK|he pending pinto beans 9859|53981|6487|5|49|94814.02|0.00|0.06|N|O|1996-08-27|1996-09-16|1996-09-17|COLLECT COD|REG AIR|ve theodolites will nag carefully aga 9860|75599|614|1|4|6298.36|0.06|0.07|A|F|1993-12-03|1993-12-27|1993-12-13|COLLECT COD|MAIL|posits thrash. en 9860|186045|1082|2|30|33931.20|0.03|0.00|A|F|1993-11-02|1993-11-10|1993-11-30|COLLECT COD|SHIP| the slyly bold foxes 9860|41810|4315|3|37|64816.97|0.03|0.05|A|F|1994-02-07|1994-01-03|1994-03-04|DELIVER IN PERSON|FOB|foxes cajole furiously slyly express 9860|99360|4379|4|4|5437.44|0.04|0.03|A|F|1993-10-18|1993-12-23|1993-11-03|TAKE BACK RETURN|RAIL|carefully special accoun 9860|130601|3115|5|15|24474.00|0.03|0.08|R|F|1994-01-12|1993-12-08|1994-01-30|TAKE BACK RETURN|REG AIR|. regular pearls u 9860|2543|44|6|19|27465.26|0.04|0.04|R|F|1993-10-28|1993-12-10|1993-11-15|TAKE BACK RETURN|REG AIR|ges. even packages cajole pending, ironi 9861|134874|7388|1|1|1908.87|0.08|0.03|R|F|1992-11-16|1992-11-30|1992-12-04|COLLECT COD|AIR|ular pinto beans wake fluffily aft 9861|10802|5805|2|7|11989.60|0.09|0.02|A|F|1992-11-28|1992-12-12|1992-12-23|DELIVER IN PERSON|RAIL|es sleep carefully speci 9861|135558|5559|3|21|33464.55|0.03|0.08|R|F|1993-01-28|1993-01-10|1993-01-31|DELIVER IN PERSON|REG AIR|ly express pinto b 9861|112191|9725|4|6|7219.14|0.10|0.05|A|F|1992-12-26|1992-12-04|1993-01-21|COLLECT COD|SHIP|e furiously express req 9861|191342|1343|5|13|18633.42|0.02|0.08|R|F|1992-12-31|1993-01-15|1993-01-30|COLLECT COD|MAIL| carefully 9862|86241|8750|1|26|31908.24|0.05|0.06|N|O|1998-01-15|1997-11-19|1998-01-25|NONE|FOB|p blithely 9862|39555|9556|2|11|16440.05|0.06|0.06|N|O|1997-12-18|1998-01-03|1998-01-12|TAKE BACK RETURN|AIR|s haggle quietl 9862|195846|3404|3|37|71848.08|0.05|0.07|N|O|1997-11-20|1997-12-27|1997-12-15|DELIVER IN PERSON|SHIP| according t 9862|182659|214|4|4|6966.60|0.10|0.01|N|O|1997-12-12|1997-12-06|1998-01-04|TAKE BACK RETURN|SHIP|each slyly after the furiously unusual req 9862|135675|5676|5|45|76980.15|0.07|0.07|N|O|1997-12-15|1997-11-21|1997-12-30|NONE|RAIL|nal theodolites sleep quickly th 9863|85537|8046|1|48|73081.44|0.06|0.08|N|O|1995-12-11|1995-10-11|1996-01-04|NONE|SHIP|arefully at the blithely unusual excuses. 9863|12911|2912|2|47|85723.77|0.06|0.04|N|O|1995-11-17|1995-10-14|1995-12-14|TAKE BACK RETURN|AIR|ts. asymptotes 9863|187433|2470|3|27|41051.61|0.07|0.05|N|O|1995-11-13|1995-11-22|1995-11-29|NONE|RAIL|boost bold requests. pending, pe 9863|174462|4463|4|36|55312.56|0.04|0.04|N|O|1995-11-17|1995-10-12|1995-11-27|COLLECT COD|AIR|ts haggle. regular accounts haggle q 9863|170852|8404|5|18|34611.30|0.01|0.06|N|O|1995-12-12|1995-11-22|1995-12-14|TAKE BACK RETURN|AIR|press packa 9863|103671|1202|6|43|72010.81|0.03|0.06|N|O|1995-10-21|1995-10-06|1995-10-22|COLLECT COD|FOB|ly unusual foxes a 9863|186664|4219|7|29|50769.14|0.07|0.08|N|O|1995-11-06|1995-11-19|1995-11-12|COLLECT COD|TRUCK|uests cajole slyly aro 9888|58135|641|1|34|37166.42|0.04|0.06|A|F|1994-11-25|1994-12-09|1994-12-13|TAKE BACK RETURN|FOB| slyly daring accounts. regular dependen 9888|66812|4331|2|12|21345.72|0.10|0.03|A|F|1994-11-11|1994-11-25|1994-11-12|TAKE BACK RETURN|REG AIR|ach blithely along the evenly f 9888|104133|4134|3|6|6822.78|0.07|0.04|A|F|1994-11-25|1994-12-06|1994-12-12|NONE|TRUCK|se requests sleep slyly above t 9889|180812|5849|1|11|20820.91|0.02|0.00|A|F|1994-12-26|1994-11-30|1995-01-06|DELIVER IN PERSON|MAIL|deposits detect care 9889|103863|3864|2|20|37337.20|0.10|0.00|A|F|1994-11-10|1994-12-01|1994-11-14|NONE|RAIL|are quickly furiou 9890|118699|3722|1|26|44659.94|0.00|0.01|R|F|1995-03-15|1995-03-27|1995-03-23|COLLECT COD|MAIL|lly ironic deposits. 9890|165934|967|2|41|81997.13|0.09|0.01|N|O|1995-06-22|1995-04-14|1995-06-25|DELIVER IN PERSON|AIR|ully final packages. quickl 9891|195822|861|1|5|9589.10|0.02|0.05|N|O|1995-09-16|1995-07-23|1995-09-18|TAKE BACK RETURN|RAIL| dolphins. boldly final ac 9891|173014|5532|2|32|34784.32|0.03|0.02|N|F|1995-06-13|1995-07-09|1995-06-23|COLLECT COD|AIR|s sleep. carefully special theodolites was 9892|196949|1988|1|5|10229.70|0.09|0.08|R|F|1992-09-11|1992-10-06|1992-09-16|NONE|MAIL| bold instruction 9892|108997|8998|2|4|8023.96|0.05|0.02|A|F|1992-09-22|1992-08-24|1992-10-04|DELIVER IN PERSON|RAIL|y unusual asymptotes: bli 9892|110096|97|3|3|3318.27|0.02|0.04|R|F|1992-09-21|1992-10-06|1992-10-17|NONE|TRUCK|even warthogs. accounts affix 9893|144553|7068|1|31|49524.05|0.03|0.05|R|F|1994-11-22|1994-10-21|1994-11-29|NONE|REG AIR|deas sleep carefully carefully st 9893|87426|2443|2|13|18374.46|0.02|0.07|R|F|1994-08-31|1994-09-17|1994-09-12|DELIVER IN PERSON|TRUCK|quests. sometimes unusual i 9893|164560|2109|3|25|40614.00|0.04|0.04|A|F|1994-09-08|1994-11-02|1994-10-01|DELIVER IN PERSON|MAIL|. blithely pendi 9894|132438|9978|1|23|33819.89|0.01|0.00|A|F|1994-05-06|1994-04-26|1994-06-03|DELIVER IN PERSON|SHIP|ages sleep according to th 9894|80110|111|2|6|6540.66|0.00|0.07|R|F|1994-05-15|1994-06-09|1994-06-05|NONE|AIR|ckages engage 9894|197156|2195|3|33|41353.95|0.06|0.06|R|F|1994-05-18|1994-04-26|1994-06-07|DELIVER IN PERSON|MAIL| carefully even accounts mol 9895|72990|5498|1|50|98149.50|0.00|0.01|R|F|1993-05-07|1993-07-17|1993-05-14|DELIVER IN PERSON|RAIL|he enticingly final co 9895|137457|9971|2|24|35866.80|0.05|0.05|A|F|1993-04-23|1993-06-30|1993-04-29|COLLECT COD|REG AIR|ding ideas detec 9895|27701|7702|3|46|74920.20|0.02|0.04|A|F|1993-05-23|1993-07-18|1993-06-04|DELIVER IN PERSON|TRUCK| carefully silent packages. 9895|162041|9590|4|48|52945.92|0.02|0.08|A|F|1993-05-24|1993-07-06|1993-06-18|DELIVER IN PERSON|REG AIR|ecial dugouts. silent excuses detect furiou 9920|133813|8840|1|1|1846.81|0.06|0.02|N|O|1996-08-05|1996-07-12|1996-08-31|TAKE BACK RETURN|FOB|ently special warhorses. b 9920|190951|3471|2|28|57174.60|0.08|0.03|N|O|1996-09-25|1996-08-30|1996-10-01|DELIVER IN PERSON|FOB|es detect. fluffily 9920|103580|6091|3|29|45923.82|0.08|0.03|N|O|1996-08-31|1996-07-13|1996-09-19|TAKE BACK RETURN|TRUCK|uctions haggle among the express deposits. 9921|56036|8542|1|9|8928.27|0.07|0.05|N|O|1995-07-10|1995-08-07|1995-07-14|COLLECT COD|FOB|ackages. furiously bold req 9921|180355|7910|2|48|68896.80|0.00|0.04|N|F|1995-05-31|1995-07-02|1995-06-20|TAKE BACK RETURN|TRUCK|thely. slyly unusual asymptot 9921|24553|7056|3|7|10342.85|0.03|0.04|N|O|1995-08-24|1995-06-29|1995-08-30|NONE|RAIL|usly regular in 9921|49522|9523|4|28|41202.56|0.05|0.01|N|O|1995-06-19|1995-06-22|1995-06-25|COLLECT COD|MAIL|lar, regular pinto bean 9922|88564|6089|1|20|31051.20|0.01|0.02|R|F|1995-05-09|1995-07-03|1995-05-17|NONE|FOB|bove the ironic depths haggle blithely spec 9922|80232|7757|2|1|1212.23|0.09|0.00|N|F|1995-06-16|1995-06-17|1995-06-27|DELIVER IN PERSON|SHIP| final accou 9922|52868|384|3|21|38238.06|0.03|0.04|R|F|1995-04-21|1995-06-09|1995-04-29|DELIVER IN PERSON|MAIL|indle slyly ideas. requests nag quietly ab 9922|147236|4779|4|31|39780.13|0.09|0.04|R|F|1995-05-27|1995-06-04|1995-06-10|DELIVER IN PERSON|MAIL|s integrate. packages hinder silent, re 9922|179906|2424|5|16|31774.40|0.06|0.05|A|F|1995-05-24|1995-06-09|1995-06-05|DELIVER IN PERSON|MAIL|iously special depos 9922|68566|3579|6|32|49105.92|0.09|0.04|N|F|1995-06-13|1995-05-17|1995-06-21|NONE|TRUCK|usly regular pac 9922|176269|6270|7|2|2690.52|0.09|0.02|N|O|1995-08-01|1995-06-15|1995-08-10|NONE|REG AIR|ironic pint 9923|143875|1418|1|11|21107.57|0.09|0.05|R|F|1995-01-26|1994-11-15|1995-02-22|TAKE BACK RETURN|SHIP|y final deposits sleep. furi 9923|102166|7187|2|45|52567.20|0.02|0.00|R|F|1994-12-05|1994-11-11|1994-12-12|COLLECT COD|RAIL|urts. bold courts above the furiou 9923|92784|312|3|33|58633.74|0.04|0.03|A|F|1994-12-30|1994-11-29|1995-01-21|TAKE BACK RETURN|MAIL|fully fluffily even d 9923|16621|1624|4|6|9225.72|0.07|0.02|R|F|1994-10-23|1994-12-09|1994-11-02|DELIVER IN PERSON|FOB|unts wake sl 9923|142601|5116|5|26|42733.60|0.05|0.01|R|F|1995-01-21|1994-11-29|1995-01-22|COLLECT COD|RAIL|usly. ironic platel 9923|82692|217|6|41|68662.29|0.02|0.01|A|F|1995-01-02|1995-01-01|1995-02-01|COLLECT COD|REG AIR|ronic accounts cajole. unusual account 9924|80257|7782|1|2|2474.50|0.04|0.01|N|O|1997-09-05|1997-06-15|1997-09-18|COLLECT COD|TRUCK|ilent theodolites. bli 9924|67448|4967|2|31|43878.64|0.09|0.06|N|O|1997-08-25|1997-06-24|1997-09-06|NONE|MAIL|, ironic packages. deposits se 9924|193257|815|3|27|36456.75|0.01|0.05|N|O|1997-05-13|1997-07-21|1997-05-20|TAKE BACK RETURN|RAIL|regular pinto beans. slyly f 9924|56154|8660|4|27|29974.05|0.00|0.00|N|O|1997-06-17|1997-06-19|1997-07-16|COLLECT COD|AIR|e after the even, express dependencies. re 9924|175084|2636|5|34|39408.72|0.09|0.00|N|O|1997-08-23|1997-08-06|1997-08-29|COLLECT COD|MAIL|eas boost careful 9925|158985|1501|1|40|81759.20|0.08|0.01|R|F|1992-03-18|1992-04-09|1992-03-27|COLLECT COD|AIR|s use quickly. regular pinto beans use 9925|50339|2845|2|46|59309.18|0.09|0.04|R|F|1992-02-02|1992-03-26|1992-02-06|DELIVER IN PERSON|FOB|ns. pending, special deposits 9925|41690|9203|3|48|78321.12|0.07|0.00|A|F|1992-05-25|1992-04-11|1992-06-10|DELIVER IN PERSON|RAIL|kages nag above 9925|81623|6640|4|38|60975.56|0.07|0.08|A|F|1992-05-16|1992-04-30|1992-06-03|TAKE BACK RETURN|SHIP|t the courts boost carefully 9925|31442|8952|5|6|8240.64|0.10|0.03|A|F|1992-05-18|1992-04-14|1992-06-07|DELIVER IN PERSON|RAIL|s boost. carefully final requests wake qui 9925|29854|9855|6|40|71354.00|0.03|0.04|A|F|1992-02-29|1992-04-22|1992-03-10|NONE|TRUCK|le regularly final asymptotes 9926|28201|3206|1|42|47426.40|0.04|0.02|A|F|1995-01-21|1994-11-21|1995-02-06|COLLECT COD|AIR|lowly? carefully final accounts wake. 9926|173681|8716|2|4|7018.72|0.08|0.07|A|F|1994-11-26|1994-11-23|1994-12-09|TAKE BACK RETURN|TRUCK|he pending tithes wake acco 9926|40736|8249|3|33|55332.09|0.10|0.05|A|F|1994-11-28|1995-01-10|1994-12-25|COLLECT COD|AIR|r the blithely ironic platelets. 9926|34487|6991|4|34|48330.32|0.09|0.00|R|F|1995-01-02|1994-12-09|1995-01-22|NONE|TRUCK|s. fluffily 9926|18782|6286|5|32|54424.96|0.10|0.05|A|F|1995-01-30|1994-11-19|1995-02-22|DELIVER IN PERSON|TRUCK|lar, even t 9926|64220|1739|6|39|46184.58|0.07|0.03|R|F|1995-01-24|1994-12-12|1995-02-05|NONE|AIR|ackages about 9927|875|3376|1|43|76362.41|0.01|0.05|N|O|1995-12-16|1995-09-16|1995-12-28|COLLECT COD|TRUCK|regular in 9927|103164|3165|2|9|10504.44|0.07|0.04|N|O|1995-10-12|1995-09-25|1995-10-28|COLLECT COD|MAIL|efully fin 9927|70144|7666|3|46|51250.44|0.09|0.06|N|O|1995-10-25|1995-10-24|1995-11-09|COLLECT COD|AIR|arefully according to the express request 9927|99528|2038|4|32|48880.64|0.07|0.00|N|O|1995-11-03|1995-09-18|1995-11-15|DELIVER IN PERSON|SHIP|all have to are above the furiously even r 9927|151611|1612|5|37|61516.57|0.01|0.03|N|O|1995-11-15|1995-09-23|1995-12-15|TAKE BACK RETURN|RAIL|he quickly regular packages. ca 9927|119356|4379|6|8|11002.80|0.07|0.08|N|O|1995-10-21|1995-10-31|1995-11-11|DELIVER IN PERSON|MAIL|ously above the quickly regular depo 9952|99735|2245|1|23|39898.79|0.05|0.04|R|F|1992-08-24|1992-07-22|1992-08-27|NONE|REG AIR|cies about the furiously express 9952|41867|9380|2|20|36177.20|0.03|0.00|A|F|1992-05-21|1992-06-07|1992-06-01|NONE|AIR|requests. regularly 9952|42894|7903|3|15|27553.35|0.10|0.04|R|F|1992-05-07|1992-06-24|1992-05-19|COLLECT COD|SHIP|e ironic, special accounts. car 9953|179971|5006|1|2|4101.94|0.09|0.06|A|F|1995-04-05|1995-04-28|1995-05-03|DELIVER IN PERSON|MAIL|usly regular accou 9953|159217|4248|2|10|12762.10|0.06|0.05|R|F|1995-03-09|1995-05-23|1995-04-06|COLLECT COD|MAIL|ckly regular 9954|92138|2139|1|30|33903.90|0.00|0.07|R|F|1994-07-26|1994-09-01|1994-08-24|DELIVER IN PERSON|RAIL|requests are blithely after the ironic d 9954|154176|1722|2|3|3690.51|0.01|0.06|A|F|1994-09-10|1994-09-02|1994-10-04|DELIVER IN PERSON|REG AIR|s haggle evenly. dogged 9954|31841|4345|3|28|49639.52|0.04|0.02|A|F|1994-09-08|1994-07-24|1994-09-28|TAKE BACK RETURN|TRUCK|nding, bol 9954|143464|3465|4|37|55776.02|0.05|0.02|R|F|1994-08-08|1994-07-21|1994-09-01|DELIVER IN PERSON|FOB| express requests sleep 9954|182959|5478|5|20|40839.00|0.05|0.06|R|F|1994-07-14|1994-09-08|1994-07-20|NONE|REG AIR|ntly pending dolphins. quickly ironic pa 9954|181203|8758|6|18|23115.60|0.09|0.02|A|F|1994-06-21|1994-08-26|1994-06-29|TAKE BACK RETURN|TRUCK|refully fin 9955|6119|1120|1|36|36903.96|0.09|0.00|N|O|1995-12-17|1995-10-24|1995-12-23|NONE|AIR|es. carefully p 9955|28536|3541|2|15|21967.95|0.00|0.03|N|O|1995-11-12|1995-11-19|1995-11-26|NONE|RAIL|deas. regular, express 9955|193828|1386|3|42|80716.44|0.01|0.08|N|O|1995-10-11|1995-10-16|1995-10-14|TAKE BACK RETURN|AIR|ecial, special deposit 9955|52552|5058|4|9|13540.95|0.01|0.07|N|O|1995-10-24|1995-12-02|1995-11-14|COLLECT COD|SHIP|de of the pending 9955|38777|6287|5|23|39462.71|0.05|0.00|N|O|1995-12-23|1995-11-06|1996-01-13|NONE|REG AIR|ages. express inst 9955|113117|3118|6|14|15821.54|0.05|0.04|N|O|1995-10-30|1995-10-25|1995-11-25|NONE|TRUCK|ending packages sleep fluffily 9956|6898|9399|1|40|72195.60|0.02|0.02|R|F|1993-05-13|1993-05-30|1993-06-01|COLLECT COD|MAIL|n ideas detect blithely package 9957|149692|7235|1|23|40058.87|0.03|0.03|A|F|1994-03-30|1994-02-05|1994-04-13|DELIVER IN PERSON|SHIP| according to the special accounts doubt at 9957|70593|8115|2|21|32835.39|0.09|0.06|R|F|1994-03-08|1994-01-21|1994-04-04|TAKE BACK RETURN|AIR|ages. blithely 9958|178719|6271|1|37|66515.27|0.09|0.04|A|F|1994-01-25|1994-01-27|1994-02-03|DELIVER IN PERSON|AIR|ter the carefully final plate 9958|128173|3198|2|38|45644.46|0.03|0.05|R|F|1994-03-24|1993-12-27|1994-04-12|DELIVER IN PERSON|FOB|instructions. regular pinto beans boost 9958|191054|3574|3|23|26336.15|0.08|0.00|A|F|1993-12-21|1994-01-21|1993-12-23|DELIVER IN PERSON|REG AIR|cuses detect. flu 9958|39415|9416|4|38|51467.58|0.05|0.06|R|F|1994-01-11|1994-01-22|1994-01-30|NONE|AIR|refully reg 9959|48904|1409|1|6|11117.40|0.00|0.03|N|O|1995-11-13|1995-12-13|1995-11-16|COLLECT COD|RAIL| unusual pin 9959|114641|9664|2|10|16556.40|0.07|0.07|N|O|1995-11-07|1995-11-17|1995-11-28|TAKE BACK RETURN|SHIP|bove the pendi 9984|120079|80|1|18|19783.26|0.01|0.04|N|O|1998-09-26|1998-08-27|1998-10-13|NONE|REG AIR|nusual excuses; fluffily ironic pack 9984|151767|1768|2|49|89119.24|0.01|0.03|N|O|1998-08-02|1998-09-13|1998-08-08|TAKE BACK RETURN|FOB|er the special sheaves. unusual 9984|139257|9258|3|33|42776.25|0.03|0.00|N|O|1998-09-03|1998-09-15|1998-09-16|COLLECT COD|REG AIR| theodolites. carefully re 9984|79982|7504|4|27|52973.46|0.04|0.05|N|O|1998-07-14|1998-08-21|1998-07-15|TAKE BACK RETURN|AIR|lly. furiously express 9984|48156|8157|5|41|45270.15|0.07|0.02|N|O|1998-10-18|1998-08-20|1998-10-31|TAKE BACK RETURN|TRUCK|ructions. doggedly special requests de 9985|160380|5413|1|38|54734.44|0.06|0.08|A|F|1993-08-21|1993-10-23|1993-09-06|NONE|MAIL|fully ironic hockey players use q 9985|97630|7631|2|1|1627.63|0.03|0.00|A|F|1993-10-08|1993-11-06|1993-10-24|COLLECT COD|RAIL|bove the ac 9985|52396|4902|3|17|22922.63|0.08|0.01|R|F|1993-10-17|1993-10-03|1993-11-10|NONE|TRUCK|rding to the regular 9986|21662|1663|1|40|63346.40|0.04|0.07|N|O|1998-01-20|1997-11-03|1998-01-31|DELIVER IN PERSON|RAIL| the regular asymptotes. fl 9986|67666|2679|2|47|76782.02|0.02|0.02|N|O|1997-10-02|1997-12-19|1997-10-13|TAKE BACK RETURN|REG AIR|ular platelets. quickly final asymptotes a 9986|46637|1646|3|22|34839.86|0.00|0.01|N|O|1997-12-10|1997-12-17|1997-12-16|TAKE BACK RETURN|REG AIR|ing to the slyly ironic r 9987|183000|3001|1|11|11913.00|0.06|0.07|N|O|1995-12-28|1996-02-16|1996-01-10|TAKE BACK RETURN|SHIP|accounts are furio 9987|65367|5368|2|41|54626.76|0.10|0.08|N|O|1996-02-23|1996-01-22|1996-03-22|DELIVER IN PERSON|TRUCK|unts toward the blithely unusual ide 9987|6983|1984|3|20|37799.60|0.09|0.06|N|O|1996-01-15|1996-02-10|1996-01-22|NONE|AIR|slyly final requ 9987|161053|1054|4|41|45676.05|0.06|0.07|N|O|1996-03-03|1996-01-30|1996-03-25|TAKE BACK RETURN|MAIL|ctions haggle furiously furiousl 9987|14038|6540|5|38|36177.14|0.02|0.01|N|O|1996-01-10|1996-02-13|1996-02-06|COLLECT COD|SHIP|s. express, dogged instr 9987|133634|3635|6|33|55031.79|0.06|0.05|N|O|1996-01-22|1996-02-02|1996-01-25|DELIVER IN PERSON|RAIL|ent, silent pinto beans about th 9988|175628|8146|1|37|63033.94|0.07|0.05|N|O|1998-02-01|1998-02-05|1998-02-22|TAKE BACK RETURN|TRUCK|es wake fluffily bold multipliers. slyly 9988|173163|5681|2|50|61808.00|0.04|0.01|N|O|1997-12-19|1998-02-02|1997-12-31|COLLECT COD|MAIL| unusual accounts. carefully 9989|24718|9723|1|8|13141.68|0.06|0.08|A|F|1993-08-17|1993-09-16|1993-09-14|COLLECT COD|TRUCK|ly ironic packages. f 9989|25985|5986|2|4|7643.92|0.00|0.02|R|F|1993-09-02|1993-08-16|1993-09-21|TAKE BACK RETURN|MAIL|inal instructions impress according 9989|119365|4388|3|28|38762.08|0.08|0.02|A|F|1993-08-21|1993-07-22|1993-09-13|NONE|SHIP|packages. i 9989|152320|2321|4|50|68616.00|0.05|0.04|A|F|1993-09-11|1993-09-12|1993-09-13|NONE|TRUCK|lithely fi 9990|78600|8601|1|48|75772.80|0.06|0.06|R|F|1992-03-29|1992-06-08|1992-04-14|COLLECT COD|AIR|ly ironic theodolites-- bold deposit 9991|4827|7328|1|37|64077.34|0.01|0.00|N|O|1996-07-07|1996-07-22|1996-07-16|COLLECT COD|AIR|ng asymptotes across the blithely r 9991|127269|4806|2|47|60924.22|0.02|0.00|N|O|1996-06-21|1996-08-16|1996-07-11|COLLECT COD|AIR|ular accounts? furiousl 10016|32092|9602|1|23|23554.07|0.02|0.06|R|F|1993-05-10|1993-04-02|1993-06-03|TAKE BACK RETURN|FOB|ons. requests haggle furiously aft 10017|32566|5070|1|50|74928.00|0.01|0.01|N|O|1998-06-07|1998-05-25|1998-06-10|NONE|SHIP|have to wake. packages inte 10017|131861|6888|2|31|58678.66|0.04|0.07|N|O|1998-07-27|1998-05-12|1998-08-23|TAKE BACK RETURN|SHIP|about the ironically ironic 10018|136298|1325|1|1|1334.29|0.08|0.03|A|F|1993-07-16|1993-08-04|1993-08-02|COLLECT COD|SHIP|ter the fluffily pending asymp 10018|155140|7656|2|35|41829.90|0.10|0.02|R|F|1993-07-13|1993-08-30|1993-07-28|DELIVER IN PERSON|MAIL|ls. carefully pending pinto beans dazzle 10018|32266|4770|3|31|37146.06|0.06|0.05|A|F|1993-07-12|1993-08-02|1993-08-07|COLLECT COD|FOB|after the furiously regular theodolites: 10019|55050|7556|1|50|50252.50|0.02|0.04|R|F|1994-10-08|1994-10-09|1994-10-20|COLLECT COD|RAIL|ts are even, special grouches. slyly regula 10019|24390|6893|2|32|42060.48|0.01|0.07|R|F|1994-08-10|1994-08-20|1994-08-30|COLLECT COD|FOB|uickly. blithely final dependencies 10019|108905|8906|3|42|80383.80|0.05|0.03|A|F|1994-08-31|1994-09-07|1994-09-28|TAKE BACK RETURN|REG AIR| serve carefully carefully even ideas. 10019|154987|18|4|11|22461.78|0.03|0.04|R|F|1994-10-06|1994-09-22|1994-10-18|TAKE BACK RETURN|REG AIR|n furiously regular deposits. sly 10020|121658|6683|1|23|38631.95|0.02|0.00|N|O|1998-05-24|1998-06-21|1998-06-01|NONE|REG AIR|ackages use accounts. unusual, final packa 10020|91478|6497|2|40|58778.80|0.03|0.05|N|O|1998-07-31|1998-06-27|1998-08-13|NONE|REG AIR|ously express requests integrate fu 10021|17774|2777|1|18|30451.86|0.02|0.00|R|F|1992-09-09|1992-10-16|1992-09-28|COLLECT COD|SHIP|ounts use. silent foxe 10021|121007|8544|2|28|28784.00|0.04|0.06|A|F|1992-10-02|1992-10-16|1992-10-21|COLLECT COD|RAIL|s. regular, ironic deposits sleep 10022|26504|9007|1|40|57220.00|0.05|0.08|R|F|1994-04-17|1994-05-04|1994-04-27|COLLECT COD|REG AIR|leep furiously. furiously 10022|40185|7698|2|18|20253.24|0.03|0.02|A|F|1994-07-04|1994-04-26|1994-07-30|DELIVER IN PERSON|FOB|. carefully final asymptotes above th 10023|24002|6505|1|40|37040.00|0.03|0.03|N|O|1996-12-17|1997-02-18|1996-12-30|DELIVER IN PERSON|MAIL|special excuses cajole fluffily blit 10023|199662|9663|2|7|12331.62|0.02|0.02|N|O|1997-01-04|1997-01-17|1997-01-12|NONE|SHIP|ages solve blithely regular, fina 10023|108439|8440|3|31|44870.33|0.05|0.04|N|O|1997-03-31|1997-01-01|1997-04-16|NONE|REG AIR|efully. fluffily bold dolphins haggle 10023|132174|7201|4|15|18092.55|0.03|0.06|N|O|1997-02-27|1997-01-18|1997-03-06|COLLECT COD|MAIL| integrate furiously even package 10048|173678|1230|1|19|33281.73|0.01|0.04|A|F|1994-08-31|1994-06-29|1994-09-22|TAKE BACK RETURN|AIR|sits. unusual pinto b 10048|204|7705|2|34|37542.80|0.03|0.05|A|F|1994-06-07|1994-08-10|1994-06-11|DELIVER IN PERSON|RAIL|usual requests shoul 10049|68506|8507|1|39|57505.50|0.00|0.06|N|O|1997-10-09|1997-09-25|1997-10-12|NONE|RAIL|final courts. furiousl 10049|165548|581|2|34|54860.36|0.04|0.07|N|O|1997-08-27|1997-10-05|1997-09-07|DELIVER IN PERSON|AIR|eodolites. final, even deposits 10049|159301|6847|3|4|5441.20|0.00|0.00|N|O|1997-11-13|1997-09-26|1997-12-11|NONE|FOB|e. bold packages nod qu 10049|164042|6559|4|30|33181.20|0.09|0.04|N|O|1997-08-26|1997-09-18|1997-09-07|DELIVER IN PERSON|MAIL|ses sleep along the furiously unusual shea 10050|199821|7379|1|33|63387.06|0.08|0.05|N|O|1996-11-12|1996-11-09|1996-11-25|NONE|TRUCK|ests haggl 10050|175779|814|2|35|64916.95|0.06|0.00|N|O|1997-01-02|1996-11-14|1997-01-19|COLLECT COD|FOB| bold, regular foxe 10050|104518|7029|3|5|7612.55|0.08|0.08|N|O|1996-09-15|1996-10-24|1996-09-28|COLLECT COD|RAIL|he ideas detect slyly dur 10050|22788|295|4|29|49612.62|0.06|0.04|N|O|1996-11-15|1996-11-14|1996-11-27|COLLECT COD|MAIL|st furiously final requ 10051|82853|378|1|31|56911.35|0.08|0.04|N|O|1996-06-20|1996-07-17|1996-07-18|DELIVER IN PERSON|SHIP|nal packages sleep along the r 10051|97751|2770|2|28|48965.00|0.06|0.08|N|O|1996-06-04|1996-06-24|1996-06-08|TAKE BACK RETURN|TRUCK| final ideas sle 10051|86917|6918|3|14|26654.74|0.08|0.00|N|O|1996-08-23|1996-08-18|1996-09-10|TAKE BACK RETURN|REG AIR|ully even requests are fi 10051|5431|5432|4|46|61475.78|0.08|0.02|N|O|1996-07-11|1996-08-21|1996-07-29|COLLECT COD|TRUCK|eaves are after the blithely ev 10052|38654|8655|1|30|47779.50|0.07|0.06|A|F|1994-12-18|1994-12-02|1994-12-29|NONE|SHIP|ove the blithely final a 10052|54372|1888|2|39|51728.43|0.09|0.07|A|F|1994-12-22|1994-10-10|1995-01-06|TAKE BACK RETURN|AIR| express packages. even packages are care 10052|17613|7614|3|29|44387.69|0.01|0.04|A|F|1994-12-09|1994-11-26|1995-01-04|NONE|TRUCK| the unusual accounts. furiousl 10052|13442|3443|4|25|33886.00|0.06|0.05|R|F|1995-01-02|1994-11-11|1995-01-07|DELIVER IN PERSON|MAIL|lar deposits! f 10052|109995|2506|5|29|58144.71|0.05|0.04|A|F|1994-09-10|1994-12-06|1994-09-16|NONE|MAIL|refully bold ins 10052|71363|3871|6|10|13343.60|0.01|0.01|R|F|1994-10-19|1994-11-06|1994-11-02|DELIVER IN PERSON|MAIL|ross the regular platel 10053|7672|5173|1|15|23695.05|0.08|0.00|R|F|1992-01-12|1992-03-08|1992-02-08|TAKE BACK RETURN|TRUCK|ously unusual requests wake against t 10053|134417|4418|2|24|34833.84|0.01|0.01|R|F|1992-04-16|1992-02-18|1992-05-13|NONE|TRUCK|ar, ironic 10053|143937|3938|3|21|41599.53|0.02|0.07|R|F|1992-03-20|1992-02-10|1992-03-25|COLLECT COD|TRUCK|ts boost furiously ironic fo 10053|140152|7695|4|24|28611.60|0.07|0.06|R|F|1992-01-14|1992-03-05|1992-01-18|TAKE BACK RETURN|TRUCK|es. even warthogs are sly 10053|128705|3730|5|43|74549.10|0.09|0.05|A|F|1992-04-08|1992-02-22|1992-04-11|TAKE BACK RETURN|REG AIR|the express asympto 10054|96535|1554|1|34|52072.02|0.00|0.01|N|O|1995-08-11|1995-07-21|1995-09-10|TAKE BACK RETURN|MAIL|press accounts detect slyly. quickly speci 10054|58123|629|2|31|33514.72|0.02|0.06|N|O|1995-07-17|1995-07-08|1995-07-25|DELIVER IN PERSON|AIR|nto beans are. even foxes use idle 10055|160218|219|1|43|54963.03|0.00|0.04|N|O|1996-04-23|1996-05-26|1996-04-29|COLLECT COD|REG AIR|slyly final package 10055|168451|6000|2|2|3038.90|0.04|0.01|N|O|1996-05-14|1996-04-02|1996-05-18|DELIVER IN PERSON|REG AIR|ove the slyly even accounts. ir 10055|147373|7374|3|11|15624.07|0.00|0.00|N|O|1996-03-10|1996-05-07|1996-03-31|DELIVER IN PERSON|RAIL|old pinto bea 10055|95918|8428|4|13|24880.83|0.04|0.01|N|O|1996-04-18|1996-05-17|1996-05-13|DELIVER IN PERSON|RAIL|s sleep among th 10080|20683|684|1|44|70561.92|0.03|0.05|R|F|1993-05-21|1993-04-23|1993-05-26|COLLECT COD|FOB|fluffily unusual sentiments run. 10080|14831|9834|2|24|41899.92|0.05|0.01|R|F|1993-04-20|1993-03-26|1993-04-30|TAKE BACK RETURN|SHIP| slyly quick dolphins. 10080|57039|7040|3|49|48805.47|0.04|0.03|R|F|1993-06-06|1993-05-12|1993-06-09|NONE|SHIP|beans against the 10080|40848|8361|4|32|57242.88|0.04|0.04|R|F|1993-04-11|1993-05-01|1993-05-04|TAKE BACK RETURN|TRUCK|final ideas integra 10080|128152|665|5|10|11801.50|0.05|0.01|A|F|1993-03-18|1993-04-13|1993-04-06|NONE|REG AIR|riously about 10080|49386|6899|6|14|18695.32|0.07|0.06|R|F|1993-05-17|1993-03-17|1993-05-27|DELIVER IN PERSON|FOB|ely unusual requests boost 10080|25736|741|7|2|3323.46|0.07|0.07|A|F|1993-05-11|1993-03-19|1993-05-12|TAKE BACK RETURN|REG AIR|ress theodolites sleep bravely. final, eve 10081|141819|6848|1|28|52102.68|0.02|0.07|A|F|1993-10-18|1993-09-28|1993-11-15|NONE|TRUCK|nic requests are slyly. packages ca 10081|148533|1048|2|41|64842.73|0.04|0.02|A|F|1993-10-22|1993-10-08|1993-10-28|DELIVER IN PERSON|FOB|e packages. bol 10081|180304|5341|3|37|51219.10|0.07|0.06|A|F|1993-10-31|1993-09-30|1993-11-28|COLLECT COD|TRUCK|iously thin accounts? regular, unu 10081|166665|1698|4|38|65803.08|0.02|0.06|R|F|1993-11-23|1993-10-03|1993-11-25|TAKE BACK RETURN|REG AIR|e final courts. quickly express accou 10081|134875|2415|5|1|1909.87|0.04|0.08|R|F|1993-09-19|1993-10-17|1993-10-19|COLLECT COD|SHIP|unusual deposi 10081|38249|8250|6|39|46302.36|0.06|0.02|A|F|1993-12-06|1993-09-28|1993-12-20|NONE|REG AIR|nic ideas haggle quickly ar 10082|20056|7563|1|48|46850.40|0.05|0.00|R|F|1994-12-13|1994-11-10|1994-12-25|TAKE BACK RETURN|FOB|telets. ironic requests hagg 10082|77354|4876|2|10|13313.50|0.06|0.08|R|F|1994-12-17|1994-10-19|1994-12-20|NONE|AIR| alongside of the deposits 10082|84626|2151|3|25|40265.50|0.10|0.04|R|F|1994-09-04|1994-11-18|1994-09-07|TAKE BACK RETURN|TRUCK|sits. blithely regul 10082|96929|9439|4|23|44296.16|0.10|0.03|A|F|1994-09-15|1994-10-10|1994-09-27|TAKE BACK RETURN|AIR|esias nag ca 10082|71710|6725|5|20|33634.20|0.07|0.04|R|F|1994-10-11|1994-11-12|1994-10-29|DELIVER IN PERSON|SHIP|slyly final accoun 10082|18117|8118|6|35|36228.85|0.05|0.02|A|F|1994-10-12|1994-09-30|1994-11-06|TAKE BACK RETURN|SHIP|equests are quickly ironic requests. b 10083|150645|8191|1|40|67825.60|0.04|0.02|N|O|1995-10-12|1995-12-05|1995-10-22|NONE|SHIP|unusual instructions a 10084|95352|2880|1|3|4042.05|0.09|0.05|N|O|1997-09-07|1997-09-18|1997-09-22|NONE|AIR|gainst the slyly 10085|195730|5731|1|35|63900.55|0.08|0.06|N|O|1996-07-12|1996-06-15|1996-07-19|NONE|MAIL| about the blithely daring deposi 10085|27484|7485|2|34|47990.32|0.05|0.04|N|O|1996-04-06|1996-06-22|1996-04-20|TAKE BACK RETURN|MAIL|rate quickly stealthily clos 10085|123287|3288|3|14|18343.92|0.01|0.00|N|O|1996-05-19|1996-06-04|1996-05-20|DELIVER IN PERSON|TRUCK|ans. doggedly pending excuses 10085|72189|4697|4|33|38318.94|0.07|0.06|N|O|1996-06-04|1996-05-08|1996-06-26|NONE|MAIL| foxes alongside of the careful 10085|74613|4614|5|33|52391.13|0.06|0.04|N|O|1996-06-13|1996-05-03|1996-06-16|COLLECT COD|REG AIR|arefully after the requests. iron 10086|157864|2895|1|35|67265.10|0.08|0.07|N|O|1995-06-29|1995-08-01|1995-07-21|TAKE BACK RETURN|MAIL| deposits haggle blithely. even packages a 10087|140968|8511|1|48|96430.08|0.09|0.06|N|O|1997-05-08|1997-06-05|1997-05-13|NONE|MAIL|c patterns alongs 10087|33430|5934|2|14|19088.02|0.00|0.00|N|O|1997-05-11|1997-06-07|1997-05-13|DELIVER IN PERSON|RAIL|usly above 10112|171163|8715|1|21|25917.36|0.00|0.04|A|F|1994-03-03|1994-03-04|1994-03-30|DELIVER IN PERSON|TRUCK|ully special theodolites about the stealthi 10113|187288|9807|1|35|48134.80|0.03|0.02|R|F|1994-07-20|1994-07-18|1994-07-25|COLLECT COD|REG AIR|grate carefull 10113|149264|6807|2|20|26265.20|0.08|0.02|A|F|1994-08-12|1994-09-04|1994-08-22|NONE|SHIP|uickly express packages wake fluffily. s 10113|72444|4952|3|28|39660.32|0.05|0.05|R|F|1994-10-04|1994-07-13|1994-10-24|COLLECT COD|FOB|ccounts sleep. blithely dog 10113|153335|8366|4|40|55533.20|0.01|0.07|R|F|1994-07-02|1994-07-23|1994-07-04|COLLECT COD|REG AIR|haggle quickly platelets. furiously 10114|144275|9304|1|34|44855.18|0.05|0.08|R|F|1993-04-11|1993-03-13|1993-04-30|TAKE BACK RETURN|MAIL|ly final packages. specia 10115|176655|6656|1|16|27706.40|0.00|0.00|N|O|1998-10-01|1998-08-10|1998-10-26|NONE|FOB|n foxes haggle carefully according 10115|27859|7860|2|40|71474.00|0.07|0.06|N|O|1998-09-05|1998-08-31|1998-09-19|COLLECT COD|RAIL|oss the expres 10115|23656|3657|3|28|44230.20|0.06|0.01|N|O|1998-09-15|1998-09-11|1998-09-22|TAKE BACK RETURN|MAIL|blithely after t 10115|185632|5633|4|23|39505.49|0.04|0.07|N|O|1998-10-25|1998-08-06|1998-11-05|TAKE BACK RETURN|RAIL|ourts among the blithely spe 10116|87915|7916|1|36|68504.76|0.00|0.03|N|O|1997-04-26|1997-04-26|1997-05-17|DELIVER IN PERSON|FOB|nts. blithely bo 10116|177796|7797|2|9|16864.11|0.06|0.07|N|O|1997-07-20|1997-06-12|1997-08-17|TAKE BACK RETURN|FOB|s. fluffily unusual instructions run abov 10116|10319|2821|3|48|59006.88|0.09|0.05|N|O|1997-04-21|1997-06-21|1997-05-20|COLLECT COD|FOB|e blithely special theodolites. s 10116|15931|8433|4|31|57254.83|0.02|0.07|N|O|1997-06-09|1997-04-29|1997-06-17|TAKE BACK RETURN|TRUCK|pending instructions use slyly. unusual 10116|150511|512|5|43|67144.93|0.04|0.05|N|O|1997-04-17|1997-05-08|1997-05-11|DELIVER IN PERSON|REG AIR|equests. slyly 10117|168157|5706|1|47|57582.05|0.08|0.00|R|F|1993-12-22|1993-12-18|1994-01-08|DELIVER IN PERSON|FOB|use according to t 10117|11617|9121|2|12|18343.32|0.03|0.08|R|F|1994-02-15|1993-12-28|1994-03-03|DELIVER IN PERSON|FOB|ular excuses. 10117|42753|266|3|38|64438.50|0.03|0.00|A|F|1994-02-26|1994-02-13|1994-02-27|NONE|TRUCK|-- requests about 10118|188894|1413|1|14|27760.46|0.03|0.01|R|F|1994-06-12|1994-05-12|1994-06-18|COLLECT COD|MAIL|ely quick pinto beans. slyly express the 10119|198109|629|1|27|32591.70|0.04|0.06|N|O|1996-07-12|1996-09-21|1996-08-05|NONE|TRUCK|en excuses grow evenly furiously 10144|141663|1664|1|17|28979.22|0.06|0.08|A|F|1995-04-05|1995-03-21|1995-04-26|TAKE BACK RETURN|AIR|s. furiously even deposits acros 10144|128812|8813|2|19|34975.39|0.07|0.05|A|F|1995-03-30|1995-02-21|1995-04-12|DELIVER IN PERSON|FOB|s serve blithely permanent package 10144|136479|4019|3|41|62134.27|0.07|0.06|A|F|1995-02-19|1995-03-27|1995-03-01|DELIVER IN PERSON|MAIL|osits nag carefully express 10144|197167|4725|4|40|50566.40|0.07|0.00|R|F|1995-02-07|1995-03-15|1995-03-02|NONE|RAIL|uses. even requests play 10144|176216|3768|5|42|54272.82|0.06|0.06|R|F|1995-02-19|1995-03-30|1995-03-02|NONE|REG AIR|eas sleep furiously. pending 10145|175671|3223|1|30|52400.10|0.01|0.02|N|O|1995-07-31|1995-07-15|1995-08-02|NONE|FOB|ironic warthogs bo 10145|90677|3187|2|16|26682.72|0.01|0.03|N|O|1995-07-11|1995-08-10|1995-07-28|NONE|AIR|refully special 10145|198700|8701|3|19|34175.30|0.00|0.06|N|O|1995-08-11|1995-07-24|1995-08-17|TAKE BACK RETURN|TRUCK|y behind the furiously regular reque 10145|143870|6385|4|47|89951.89|0.00|0.07|R|F|1995-05-27|1995-07-02|1995-06-14|TAKE BACK RETURN|FOB|regular pinto bea 10145|21312|8819|5|37|45632.47|0.10|0.03|N|O|1995-06-19|1995-06-29|1995-07-12|DELIVER IN PERSON|REG AIR|iously bold req 10145|175895|930|6|46|90660.94|0.02|0.05|N|F|1995-05-31|1995-07-21|1995-06-30|COLLECT COD|TRUCK|ial platelets above the furiously i 10146|28088|3093|1|32|32514.56|0.01|0.04|A|F|1994-05-29|1994-06-18|1994-06-09|COLLECT COD|FOB| ironic ideas. depo 10146|64363|4364|2|33|43802.88|0.07|0.05|A|F|1994-06-19|1994-07-21|1994-07-01|NONE|REG AIR|mptotes sleep furiously. slyl 10146|134555|7069|3|13|20664.15|0.03|0.06|R|F|1994-05-10|1994-06-15|1994-06-04|COLLECT COD|REG AIR|ts. packages wake slyly against 10146|62327|4834|4|11|14182.52|0.05|0.07|R|F|1994-05-25|1994-07-31|1994-06-08|COLLECT COD|AIR|ously regular packages affix along the unus 10146|116058|3592|5|31|33295.55|0.04|0.07|R|F|1994-07-27|1994-07-29|1994-08-03|DELIVER IN PERSON|MAIL|nic accounts. ideas sleep slyly 10146|182088|7125|6|30|35102.40|0.03|0.08|A|F|1994-06-26|1994-07-18|1994-07-25|COLLECT COD|FOB|old ideas. slyly regular 10146|143846|3847|7|20|37796.80|0.00|0.01|R|F|1994-05-16|1994-07-20|1994-06-05|NONE|FOB| blithely ironic notornis. furiously bo 10147|15121|124|1|40|41444.80|0.08|0.07|A|F|1993-12-23|1993-11-14|1994-01-05|NONE|MAIL|fluffily bold pinto beans. ironic, 10147|138383|5923|2|1|1421.38|0.06|0.08|A|F|1993-12-30|1993-11-14|1994-01-23|NONE|AIR| furiously even orbits. furious 10147|190695|5734|3|10|17856.90|0.00|0.07|A|F|1993-11-13|1994-01-01|1993-12-13|DELIVER IN PERSON|SHIP|lar deposits. furiously 10147|194511|4512|4|44|70642.44|0.10|0.07|A|F|1993-12-25|1993-12-18|1994-01-03|TAKE BACK RETURN|AIR|furiously regular packages. 10147|138188|3215|5|47|57630.46|0.02|0.07|A|F|1993-10-15|1993-12-18|1993-10-22|TAKE BACK RETURN|AIR|lar packages. ironic depo 10148|101480|9011|1|10|14814.80|0.00|0.01|R|F|1994-04-25|1994-05-15|1994-05-10|NONE|MAIL|g, regular hockey players cajole si 10148|162766|7799|2|16|29260.16|0.02|0.02|R|F|1994-05-19|1994-06-02|1994-06-15|COLLECT COD|REG AIR| accounts. carefully final foxes among the 10148|195608|8128|3|28|47700.80|0.07|0.08|A|F|1994-05-21|1994-06-24|1994-05-28|DELIVER IN PERSON|FOB|al accounts sleep finally carefu 10148|180599|600|4|33|55426.47|0.05|0.02|R|F|1994-05-20|1994-04-29|1994-06-01|TAKE BACK RETURN|FOB|equests slee 10149|68636|3649|1|32|51348.16|0.08|0.02|A|F|1994-10-03|1994-11-07|1994-11-02|COLLECT COD|RAIL|lithely? slyly final theo 10149|163019|8052|2|46|49772.46|0.10|0.02|R|F|1994-11-04|1994-11-05|1994-12-03|TAKE BACK RETURN|TRUCK|l, regular instructions boost 10149|50073|74|3|13|13299.91|0.10|0.06|R|F|1994-12-27|1994-11-19|1995-01-05|TAKE BACK RETURN|SHIP|tly stealthy requests are fluffily alongsid 10149|164076|9109|4|8|9120.56|0.02|0.03|A|F|1994-12-19|1994-11-25|1995-01-06|DELIVER IN PERSON|MAIL| the regul 10149|13808|3809|5|23|39601.40|0.00|0.06|A|F|1995-01-15|1994-11-13|1995-01-21|COLLECT COD|RAIL|sly packages. slyly unusual package 10149|70113|2621|6|1|1083.11|0.07|0.04|A|F|1994-11-08|1994-10-28|1994-11-19|NONE|RAIL|nusual, express packages wak 10150|83663|8680|1|49|80686.34|0.02|0.03|A|F|1992-07-18|1992-07-21|1992-08-09|TAKE BACK RETURN|REG AIR|ess dolphins. blithely regular f 10150|117479|2502|2|27|40404.69|0.00|0.00|R|F|1992-06-16|1992-06-03|1992-06-25|NONE|FOB|ackages. silent foxe 10150|54897|7403|3|41|75927.49|0.08|0.04|R|F|1992-05-12|1992-06-19|1992-06-05|NONE|MAIL|counts. excuses agains 10150|37786|5296|4|13|22409.14|0.10|0.01|A|F|1992-06-14|1992-06-01|1992-07-03|NONE|SHIP|ickly special accounts integrate quickly f 10150|54076|1592|5|28|28841.96|0.04|0.00|A|F|1992-06-10|1992-06-28|1992-06-17|DELIVER IN PERSON|TRUCK|efully even i 10150|53849|8860|6|44|79324.96|0.03|0.07|A|F|1992-08-29|1992-06-30|1992-09-19|NONE|REG AIR|e ironic ide 10151|163988|9021|1|7|14363.86|0.10|0.06|R|F|1992-04-13|1992-03-07|1992-04-15|NONE|MAIL|nusual packages. furiously final instru 10176|32093|2094|1|46|47154.14|0.08|0.03|N|O|1996-09-01|1996-11-18|1996-09-20|TAKE BACK RETURN|TRUCK| never final, pending ac 10176|185648|685|2|29|50275.56|0.06|0.03|N|O|1996-11-14|1996-10-24|1996-12-13|TAKE BACK RETURN|TRUCK| slyly among the 10177|135285|312|1|43|56772.04|0.05|0.03|N|O|1996-09-20|1996-08-05|1996-10-13|COLLECT COD|FOB|realms. spe 10177|118635|6169|2|32|52916.16|0.07|0.03|N|O|1996-07-14|1996-08-19|1996-08-02|DELIVER IN PERSON|TRUCK|inal packages sleep furiously entici 10177|96426|1445|3|41|58319.22|0.02|0.02|N|O|1996-07-12|1996-09-02|1996-07-23|NONE|SHIP|about the blithely regular packages. excus 10177|20231|7738|4|41|47200.43|0.03|0.04|N|O|1996-10-22|1996-08-21|1996-11-17|COLLECT COD|TRUCK|e ideas. slyly regular packages a 10177|17452|4956|5|1|1369.45|0.10|0.01|N|O|1996-08-19|1996-08-11|1996-08-22|TAKE BACK RETURN|TRUCK|er the carefully even 10178|148709|3738|1|33|58004.10|0.00|0.01|A|F|1994-01-29|1994-02-27|1994-02-06|COLLECT COD|RAIL|oss the carefully s 10179|67970|2983|1|34|65890.98|0.09|0.03|N|O|1996-08-23|1996-06-27|1996-08-29|COLLECT COD|SHIP|ithely regu 10179|165928|8445|2|31|61811.52|0.10|0.06|N|O|1996-07-26|1996-07-09|1996-08-10|COLLECT COD|AIR|courts. furiously ironi 10179|141337|3852|3|44|60646.52|0.08|0.07|N|O|1996-06-02|1996-06-15|1996-06-16|DELIVER IN PERSON|REG AIR|the final instructions wake about the 10180|125189|7702|1|9|10927.62|0.00|0.05|N|O|1997-04-23|1997-05-20|1997-04-28|COLLECT COD|SHIP|ly alongside of the 10180|49205|9206|2|29|33471.80|0.10|0.05|N|O|1997-04-28|1997-04-24|1997-05-02|NONE|FOB|cuses within the ironic, final pinto be 10180|174482|4483|3|17|26460.16|0.04|0.01|N|O|1997-04-20|1997-05-23|1997-05-02|NONE|SHIP|counts are carefully bli 10181|67236|4755|1|12|14438.76|0.00|0.02|N|O|1996-03-25|1996-04-30|1996-03-29|DELIVER IN PERSON|FOB|nts sleep ca 10181|28997|1500|2|23|44297.77|0.07|0.06|N|O|1996-02-22|1996-04-09|1996-03-21|NONE|SHIP|ular, final excuses. furiou 10182|141766|4281|1|44|79541.44|0.05|0.04|R|F|1994-06-24|1994-07-04|1994-07-20|DELIVER IN PERSON|SHIP|hins. furiou 10182|44701|7206|2|24|39496.80|0.04|0.08|A|F|1994-09-23|1994-08-16|1994-10-22|DELIVER IN PERSON|TRUCK|r dependencies wake. quickly special 10182|86226|6227|3|3|3636.66|0.03|0.07|A|F|1994-08-08|1994-07-25|1994-08-19|NONE|MAIL|ake furiously packages. pen 10182|183789|1344|4|46|86147.88|0.05|0.08|A|F|1994-06-15|1994-07-28|1994-07-10|TAKE BACK RETURN|RAIL|ely across the deposits. slyly s 10182|140930|8473|5|16|31534.88|0.05|0.05|R|F|1994-09-10|1994-08-21|1994-10-10|DELIVER IN PERSON|RAIL|e slyly regular 10183|53735|1251|1|33|55728.09|0.02|0.03|A|F|1995-03-02|1995-01-26|1995-03-17|NONE|SHIP|ckages hinder carefully. furiously 10183|113981|9004|2|2|3989.96|0.03|0.00|R|F|1995-02-24|1995-01-03|1995-03-19|DELIVER IN PERSON|FOB|lent packages against the furiously final t 10183|67479|2492|3|1|1446.47|0.00|0.06|A|F|1994-12-23|1995-01-29|1995-01-09|DELIVER IN PERSON|FOB|. carefully final requests could wake among 10183|113964|1498|4|19|37581.24|0.05|0.03|R|F|1995-02-09|1995-01-19|1995-02-25|TAKE BACK RETURN|MAIL|y unusual sheaves 10183|180890|5927|5|15|29563.35|0.02|0.07|A|F|1995-03-25|1995-01-23|1995-04-02|DELIVER IN PERSON|MAIL|sual packages. quiet, special excuses at 10183|10622|3124|6|13|19924.06|0.08|0.05|A|F|1994-11-26|1995-02-11|1994-12-08|TAKE BACK RETURN|FOB|ests alongside of the furiously ironic 10208|176632|1667|1|31|52967.53|0.08|0.05|N|O|1996-11-16|1996-11-18|1996-12-05|NONE|RAIL|al requests; ironic, ironi 10208|39153|9154|2|44|48054.60|0.00|0.03|N|O|1996-09-29|1996-09-28|1996-10-08|NONE|REG AIR| above the final packages. fluffily regular 10208|41563|4068|3|38|57173.28|0.01|0.04|N|O|1996-10-20|1996-10-06|1996-10-25|TAKE BACK RETURN|FOB|ent instructions haggle around the slyly sp 10208|140579|580|4|22|35630.54|0.06|0.02|N|O|1996-09-24|1996-09-26|1996-10-24|TAKE BACK RETURN|FOB|nusual accounts. r 10208|101815|6836|5|2|3633.62|0.08|0.03|N|O|1996-10-24|1996-11-13|1996-11-06|DELIVER IN PERSON|MAIL|rding to the brave, bold asympto 10208|193071|5591|6|26|30265.82|0.07|0.08|N|O|1996-11-19|1996-10-03|1996-11-28|TAKE BACK RETURN|TRUCK|the idle, unusual instructions. fluffily fi 10208|143474|1017|7|41|62216.27|0.10|0.00|N|O|1996-10-30|1996-10-27|1996-11-20|TAKE BACK RETURN|TRUCK|ly bold packag 10209|160597|3114|1|44|72933.96|0.04|0.07|A|F|1994-02-20|1994-01-17|1994-03-04|COLLECT COD|REG AIR|iously along the bold foxes: furiously fina 10209|146995|2024|2|18|36755.82|0.05|0.06|A|F|1994-02-07|1993-12-31|1994-02-08|TAKE BACK RETURN|FOB|of the blit 10209|139167|1681|3|46|55483.36|0.02|0.07|R|F|1994-03-07|1994-01-11|1994-03-31|DELIVER IN PERSON|MAIL| alongside of the blithely regular pa 10209|171623|9175|4|27|45754.74|0.10|0.08|R|F|1993-12-14|1994-02-21|1994-01-02|COLLECT COD|MAIL|egular pinto bean 10209|59630|7146|5|47|74712.61|0.00|0.05|A|F|1994-01-22|1994-01-05|1994-02-07|DELIVER IN PERSON|REG AIR| even ideas 10209|198063|3102|6|31|35992.86|0.03|0.01|A|F|1993-12-03|1994-02-28|1993-12-12|COLLECT COD|TRUCK|e. quickly even th 10209|43024|3025|7|50|48351.00|0.00|0.07|A|F|1994-01-20|1994-02-17|1994-02-16|NONE|TRUCK|tes are according to the regular asymptote 10210|102801|5312|1|8|14430.40|0.02|0.02|N|O|1995-10-11|1995-09-22|1995-11-08|NONE|AIR|uick orbits. slyl 10210|175359|5360|2|11|15777.85|0.10|0.04|N|O|1995-08-08|1995-08-31|1995-08-10|DELIVER IN PERSON|FOB|y express dependencies wake furiously 10210|47011|9516|3|36|34488.36|0.02|0.03|N|O|1995-10-20|1995-09-19|1995-11-10|TAKE BACK RETURN|RAIL|across the w 10210|106952|1973|4|11|21548.45|0.03|0.05|N|O|1995-09-06|1995-09-14|1995-09-11|COLLECT COD|REG AIR|es wake blit 10210|129121|4146|5|42|48305.04|0.06|0.01|N|O|1995-10-13|1995-08-26|1995-10-18|NONE|AIR|tegrate bl 10210|124653|4654|6|4|6710.60|0.04|0.03|N|O|1995-07-11|1995-08-21|1995-07-12|NONE|MAIL|nding patterns sleep excuses. regular 10210|33382|5886|7|10|13153.80|0.09|0.00|N|O|1995-07-02|1995-07-27|1995-08-01|COLLECT COD|TRUCK| even accounts ar 10211|46990|9495|1|44|85227.56|0.09|0.06|N|O|1997-10-07|1997-09-11|1997-10-22|DELIVER IN PERSON|TRUCK|ng the carefully unusual deposits 10211|144633|7148|2|21|35230.23|0.04|0.07|N|O|1997-10-24|1997-09-24|1997-11-07|NONE|MAIL| are throughout the carefully 10211|39834|7344|3|15|26607.45|0.03|0.03|N|O|1997-11-26|1997-08-30|1997-12-20|NONE|RAIL|furiously fi 10211|78504|3519|4|28|41510.00|0.08|0.07|N|O|1997-10-12|1997-10-19|1997-11-09|TAKE BACK RETURN|SHIP|cuses. carefully brave req 10211|8985|6486|5|42|79547.16|0.08|0.02|N|O|1997-10-30|1997-10-02|1997-11-08|DELIVER IN PERSON|AIR|ithely express theodolites. bli 10212|128144|657|1|23|26959.22|0.09|0.03|A|F|1993-08-09|1993-08-10|1993-08-19|TAKE BACK RETURN|MAIL| haggle blithely. final, bold 10212|107983|3004|2|40|79639.20|0.01|0.00|A|F|1993-08-17|1993-08-18|1993-08-18|COLLECT COD|AIR|slowly final pack 10212|53595|8606|3|46|71235.14|0.06|0.00|A|F|1993-07-06|1993-07-06|1993-07-12|NONE|MAIL|counts sleep. final asymptotes are sl 10213|151567|4083|1|4|6474.24|0.00|0.02|A|F|1995-03-15|1995-03-24|1995-04-12|DELIVER IN PERSON|MAIL|tes. unusual requests cajole care 10213|182090|9645|2|9|10548.81|0.10|0.08|N|F|1995-06-14|1995-04-24|1995-07-01|NONE|REG AIR|uctions boost slyly about the flu 10213|182839|394|3|13|24983.79|0.03|0.01|N|F|1995-05-28|1995-03-22|1995-06-22|TAKE BACK RETURN|MAIL|into beans. blithe deposits wake carefully 10214|36646|9150|1|22|34818.08|0.02|0.02|N|O|1996-03-08|1996-04-09|1996-04-03|DELIVER IN PERSON|REG AIR|ove the frets cajo 10214|94680|9699|2|5|8373.40|0.01|0.03|N|O|1996-03-20|1996-04-15|1996-04-09|TAKE BACK RETURN|RAIL| regular, express sheaves cajole fluf 10214|77129|2144|3|40|44244.80|0.01|0.06|N|O|1996-04-16|1996-04-17|1996-04-22|NONE|REG AIR|xpress accounts cajo 10214|92033|2034|4|20|20500.60|0.04|0.08|N|O|1996-03-15|1996-03-04|1996-04-10|TAKE BACK RETURN|RAIL|ructions. carefully close 10215|160337|338|1|1|1397.33|0.00|0.05|N|O|1996-11-18|1996-11-07|1996-11-29|COLLECT COD|SHIP|c accounts boost carefu 10215|168207|5756|2|30|38256.00|0.05|0.02|N|O|1996-10-13|1996-11-15|1996-10-27|TAKE BACK RETURN|FOB|are. stealthily r 10215|51618|1619|3|17|26683.37|0.10|0.00|N|O|1996-08-31|1996-10-10|1996-09-26|DELIVER IN PERSON|MAIL|ly special foxes among the 10215|140940|3455|4|9|17828.46|0.02|0.02|N|O|1996-11-27|1996-09-19|1996-12-05|COLLECT COD|AIR|furiously among the slyl 10215|163480|8513|5|47|72543.56|0.04|0.08|N|O|1996-11-21|1996-10-07|1996-12-11|NONE|SHIP|sly unusual requests boost 10240|35204|211|1|33|37593.60|0.02|0.02|A|F|1994-08-01|1994-06-19|1994-08-06|COLLECT COD|FOB|express asymptote 10241|41555|9068|1|27|40406.85|0.06|0.02|N|O|1997-12-01|1997-12-06|1997-12-22|DELIVER IN PERSON|TRUCK|ckly unusual foxes ha 10241|8903|6404|2|20|36238.00|0.02|0.05|N|O|1997-10-21|1997-10-30|1997-11-11|TAKE BACK RETURN|TRUCK| the ironic 10242|32317|7324|1|44|54969.64|0.00|0.03|R|F|1994-02-01|1994-02-02|1994-02-02|COLLECT COD|RAIL| dependencies boost furiously among 10242|144308|4309|2|8|10818.40|0.10|0.00|R|F|1994-03-02|1994-03-11|1994-03-14|TAKE BACK RETURN|RAIL|ronic instructions. instructions on the fin 10242|169321|9322|3|3|4170.96|0.05|0.05|A|F|1994-04-03|1994-03-10|1994-04-27|TAKE BACK RETURN|SHIP|e stealthily i 10242|163966|8999|4|30|60898.80|0.01|0.01|R|F|1993-12-16|1994-02-18|1994-01-08|TAKE BACK RETURN|FOB|ve slyly. blithely even 10242|173971|6489|5|9|18404.73|0.03|0.07|A|F|1994-03-23|1994-03-04|1994-04-05|TAKE BACK RETURN|AIR|tructions h 10242|130108|7648|6|23|26176.30|0.07|0.05|A|F|1994-01-26|1994-02-02|1994-02-19|DELIVER IN PERSON|REG AIR|lites nag blithely. iron 10242|115797|3331|7|32|58009.28|0.02|0.06|A|F|1994-03-29|1994-01-25|1994-04-03|COLLECT COD|SHIP|ithely. blithel 10243|185080|117|1|40|46603.20|0.01|0.04|N|O|1996-04-21|1996-02-01|1996-04-25|DELIVER IN PERSON|RAIL| accounts integrate carefully ca 10244|5308|309|1|19|23052.70|0.10|0.02|N|O|1996-04-22|1996-06-11|1996-05-11|COLLECT COD|AIR|e carefully special dependencies. depos 10244|2328|9829|2|48|59055.36|0.07|0.05|N|O|1996-07-22|1996-05-09|1996-07-24|TAKE BACK RETURN|SHIP| regular deposits 10244|112559|7582|3|21|33002.55|0.05|0.01|N|O|1996-07-29|1996-06-03|1996-08-16|DELIVER IN PERSON|FOB|across the slyly special acc 10244|178991|6543|4|28|57959.72|0.01|0.04|N|O|1996-08-02|1996-05-26|1996-08-09|TAKE BACK RETURN|REG AIR|s alongside of the ir 10245|128622|8623|1|49|80880.38|0.08|0.02|R|F|1995-03-18|1995-03-21|1995-04-17|TAKE BACK RETURN|SHIP|ag furiously. fur 10245|144321|9350|2|5|6826.60|0.06|0.01|A|F|1995-05-06|1995-05-07|1995-05-14|TAKE BACK RETURN|FOB|ins. final packages sublate never 10245|190134|135|3|2|2448.26|0.02|0.05|R|F|1995-04-17|1995-04-09|1995-05-10|DELIVER IN PERSON|REG AIR|riously dar 10245|171121|3639|4|50|59606.00|0.06|0.03|R|F|1995-04-08|1995-04-25|1995-04-21|TAKE BACK RETURN|AIR|ding to the blithely regula 10245|22532|39|5|39|56726.67|0.04|0.01|R|F|1995-03-08|1995-04-12|1995-03-19|NONE|SHIP|s final accounts sleep blithely regular ins 10245|84930|4931|6|48|91916.64|0.06|0.00|A|F|1995-03-22|1995-04-06|1995-04-10|DELIVER IN PERSON|TRUCK|t the stea 10245|10633|8137|7|4|6174.52|0.01|0.01|R|F|1995-03-23|1995-03-19|1995-04-02|NONE|SHIP|posits. idly unu 10246|99563|7091|1|50|78128.00|0.07|0.02|N|O|1997-10-04|1997-09-07|1997-10-15|COLLECT COD|SHIP|eodolites. blithel 10246|153129|3130|2|13|15367.56|0.10|0.06|N|O|1997-08-24|1997-07-21|1997-09-03|NONE|AIR| haggle blithely pending pinto b 10246|24370|1877|3|11|14238.07|0.09|0.04|N|O|1997-09-11|1997-09-01|1997-10-03|NONE|REG AIR|y stealthy tithes. special, regular in 10246|173520|6038|4|42|66927.84|0.06|0.06|N|O|1997-08-08|1997-08-10|1997-08-21|COLLECT COD|REG AIR|iously special dependencies kindle? fur 10247|61583|6596|1|13|20079.54|0.01|0.01|A|F|1993-01-25|1993-03-28|1993-02-23|COLLECT COD|REG AIR|instructions. pinto beans hagg 10247|19854|9855|2|42|74501.70|0.09|0.08|A|F|1993-02-12|1993-03-13|1993-03-02|COLLECT COD|SHIP|t deposits grow slyly along the fi 10247|79131|1639|3|45|49955.85|0.08|0.04|R|F|1993-02-06|1993-03-12|1993-02-12|DELIVER IN PERSON|MAIL|ges maintain furiously even pinto bea 10247|140246|247|4|2|2572.48|0.06|0.07|A|F|1993-03-07|1993-04-03|1993-03-27|DELIVER IN PERSON|SHIP|press pinto beans print furiously 10247|105942|8453|5|42|81813.48|0.04|0.04|A|F|1993-03-31|1993-04-11|1993-04-21|TAKE BACK RETURN|TRUCK|y pending pinto beans: 10247|158858|6404|6|5|9584.25|0.06|0.01|A|F|1993-02-22|1993-02-25|1993-02-27|DELIVER IN PERSON|FOB|ilent foxe 10272|15512|5513|1|34|48535.34|0.03|0.04|R|F|1994-03-12|1994-04-07|1994-03-27|NONE|TRUCK|lent requests. packages boost agai 10272|93604|3605|2|46|73489.60|0.03|0.06|R|F|1994-02-02|1994-04-16|1994-02-22|DELIVER IN PERSON|TRUCK|ly bold excus 10272|195440|5441|3|14|21496.16|0.06|0.04|A|F|1994-03-01|1994-02-20|1994-03-27|COLLECT COD|SHIP|s boost slyly even theodolites. carefully f 10272|158123|639|4|16|18897.92|0.00|0.07|R|F|1994-02-08|1994-04-10|1994-03-09|NONE|REG AIR|ic asymptotes cajole among the 10272|157794|2825|5|35|64812.65|0.05|0.07|R|F|1994-02-13|1994-02-22|1994-02-14|NONE|RAIL|luffily final inst 10272|37579|2586|6|18|27298.26|0.04|0.07|A|F|1994-04-27|1994-03-08|1994-05-01|TAKE BACK RETURN|FOB|l foxes nag 10272|179164|6716|7|32|39781.12|0.02|0.01|A|F|1994-05-11|1994-04-12|1994-05-23|DELIVER IN PERSON|RAIL| across th 10273|196991|9511|1|21|43847.79|0.06|0.06|N|O|1998-06-29|1998-07-15|1998-07-07|COLLECT COD|REG AIR|y unusual platelets are quickly final pint 10273|22731|2732|2|50|82686.50|0.05|0.03|N|O|1998-07-08|1998-07-03|1998-07-27|COLLECT COD|SHIP|. carefully stealthy ideas a 10273|30224|7734|3|47|54248.34|0.03|0.00|N|O|1998-07-09|1998-07-11|1998-07-21|COLLECT COD|FOB|pecial packages among th 10273|164952|2501|4|23|46389.85|0.02|0.01|N|O|1998-05-30|1998-06-09|1998-06-06|TAKE BACK RETURN|FOB|along the regular, unusual requests. fl 10273|131756|6783|5|33|58995.75|0.02|0.06|N|O|1998-07-09|1998-06-10|1998-07-29|DELIVER IN PERSON|FOB|ole bold, bold th 10273|15138|141|6|49|51603.37|0.10|0.00|N|O|1998-06-18|1998-06-11|1998-07-17|DELIVER IN PERSON|TRUCK|es. furiously final dolphins wake furiousl 10274|25373|7876|1|26|33757.62|0.04|0.03|R|F|1993-12-17|1994-01-09|1994-01-01|TAKE BACK RETURN|MAIL|dencies haggle. furiously even packa 10274|34146|9153|2|17|18362.38|0.03|0.03|R|F|1994-03-27|1994-02-03|1994-04-15|TAKE BACK RETURN|SHIP| furiously p 10275|170630|631|1|12|20407.56|0.00|0.01|N|O|1997-08-31|1997-08-14|1997-09-17|DELIVER IN PERSON|AIR|ts use alongside of the g 10275|167379|2412|2|36|52069.32|0.02|0.06|N|O|1997-07-12|1997-08-08|1997-07-24|TAKE BACK RETURN|TRUCK|he slyly regular depo 10276|123917|1454|1|39|75695.49|0.09|0.03|N|O|1996-06-24|1996-04-15|1996-07-14|DELIVER IN PERSON|SHIP|s cajole. regula 10276|61733|4240|2|45|76262.85|0.03|0.08|N|O|1996-06-21|1996-04-16|1996-07-17|COLLECT COD|SHIP| even requests haggle. even, 10276|141623|6652|3|29|48273.98|0.06|0.03|N|O|1996-05-25|1996-05-07|1996-05-27|NONE|TRUCK|ses breach. final ideas are carefully 10276|48615|3624|4|43|67235.23|0.09|0.06|N|O|1996-05-31|1996-04-22|1996-06-18|COLLECT COD|RAIL|y express depths mold slyly regular 10276|55774|785|5|40|69190.80|0.09|0.07|N|O|1996-04-13|1996-04-01|1996-05-10|DELIVER IN PERSON|SHIP| bold platelets. stealthy r 10277|127499|2524|1|12|18317.88|0.07|0.07|A|F|1994-07-27|1994-05-22|1994-08-11|NONE|RAIL| according to the furiously silent th 10277|68298|8299|2|8|10130.32|0.04|0.07|R|F|1994-04-25|1994-06-14|1994-05-11|COLLECT COD|SHIP|ing asymptotes unwind s 10277|81031|1032|3|45|45541.35|0.10|0.05|A|F|1994-06-25|1994-06-26|1994-07-01|NONE|AIR|ual deposits. deposits nag to the blith 10277|79269|6791|4|21|26213.46|0.00|0.06|R|F|1994-08-07|1994-06-20|1994-08-18|NONE|FOB|cajole. blithely careful ideas dete 10277|68772|3785|5|1|1740.77|0.05|0.08|R|F|1994-07-21|1994-06-04|1994-08-05|COLLECT COD|RAIL| ironic instructions. q 10278|179582|9583|1|34|56493.72|0.07|0.01|N|O|1995-11-02|1995-12-10|1995-11-06|DELIVER IN PERSON|AIR|kly even packages above the furiou 10278|28842|8843|2|28|49583.52|0.04|0.06|N|O|1995-12-01|1995-12-06|1995-12-15|COLLECT COD|AIR|e the special instruction 10278|131390|6417|3|21|29849.19|0.05|0.02|N|O|1995-10-19|1995-10-14|1995-11-12|TAKE BACK RETURN|TRUCK|requests. regular 10278|60199|2706|4|36|41730.84|0.10|0.01|N|O|1995-09-24|1995-11-21|1995-10-06|NONE|FOB|nts against t 10279|144465|4466|1|29|43774.34|0.07|0.00|N|O|1996-10-22|1996-12-13|1996-11-17|COLLECT COD|FOB|r the blith 10279|18141|643|2|29|30715.06|0.01|0.00|N|O|1996-11-18|1996-10-22|1996-11-22|TAKE BACK RETURN|MAIL|ronic accounts. carefully regular 10279|91595|4105|3|47|74569.73|0.00|0.01|N|O|1996-11-16|1996-11-19|1996-12-10|COLLECT COD|FOB|dolites cajole carefu 10279|35179|2689|4|15|16712.55|0.01|0.05|N|O|1997-01-02|1996-11-05|1997-01-11|COLLECT COD|MAIL|ully carefully special reques 10279|41687|1688|5|42|68404.56|0.06|0.05|N|O|1997-01-18|1996-11-14|1997-02-07|DELIVER IN PERSON|FOB|ithely close 10304|2004|7005|1|20|18120.00|0.07|0.05|N|O|1996-02-13|1996-01-28|1996-03-07|COLLECT COD|SHIP|ges across th 10304|76716|1731|2|35|59244.85|0.05|0.02|N|O|1996-01-18|1996-01-19|1996-02-02|COLLECT COD|SHIP|ep furiously above the pending de 10304|150547|3063|3|40|63901.60|0.09|0.04|N|O|1995-11-17|1995-12-23|1995-12-06|NONE|SHIP|ding to the pending pack 10304|42662|5167|4|3|4813.98|0.04|0.08|N|O|1996-01-14|1996-01-23|1996-02-10|NONE|REG AIR|express sauternes 10304|19855|2357|5|36|63894.60|0.05|0.08|N|O|1995-11-30|1996-01-14|1995-12-16|NONE|AIR|fter the quickly 10304|86901|6902|6|43|81179.70|0.01|0.03|N|O|1996-01-29|1995-12-27|1996-02-26|COLLECT COD|REG AIR| quickly blithely regul 10304|130962|963|7|43|85697.28|0.06|0.07|N|O|1996-02-20|1996-02-06|1996-03-13|DELIVER IN PERSON|SHIP| to the caref 10305|70906|907|1|36|67568.40|0.01|0.08|R|F|1994-08-13|1994-09-09|1994-08-29|TAKE BACK RETURN|TRUCK|s detect blithely final packages 10305|106709|1730|2|1|1715.70|0.08|0.06|A|F|1994-10-14|1994-09-18|1994-10-30|TAKE BACK RETURN|RAIL|g to the slyly unusual deposits. furio 10305|55965|8471|3|40|76838.40|0.09|0.07|A|F|1994-09-18|1994-09-26|1994-09-23|DELIVER IN PERSON|AIR|never special 10305|198192|8193|4|17|21933.23|0.02|0.02|A|F|1994-08-11|1994-09-17|1994-09-07|NONE|FOB|en deposits boost. pinto beans pr 10305|199499|9500|5|1|1598.49|0.08|0.05|A|F|1994-08-05|1994-09-23|1994-09-02|DELIVER IN PERSON|RAIL|to the slyly final de 10306|120901|902|1|8|15375.20|0.04|0.02|A|F|1993-07-26|1993-09-21|1993-08-12|TAKE BACK RETURN|TRUCK|es use slyly slyly ironic accounts. qui 10306|178441|3476|2|18|27349.92|0.03|0.05|R|F|1993-10-22|1993-08-27|1993-10-29|TAKE BACK RETURN|AIR|uests cajole fluffily-- re 10306|55738|3254|3|36|60974.28|0.05|0.04|A|F|1993-10-21|1993-08-24|1993-10-27|COLLECT COD|FOB|final theodolites wake blithel 10307|48865|3874|1|40|72554.40|0.02|0.00|N|O|1998-05-26|1998-04-28|1998-06-10|DELIVER IN PERSON|TRUCK|ully express 10307|57897|7898|2|50|92744.50|0.03|0.05|N|O|1998-02-18|1998-03-21|1998-02-27|DELIVER IN PERSON|MAIL|e furiously p 10307|68067|3080|3|30|31051.80|0.00|0.05|N|O|1998-04-22|1998-04-01|1998-04-24|DELIVER IN PERSON|REG AIR| regular deposits are a 10307|186346|8865|4|29|41537.86|0.08|0.06|N|O|1998-03-10|1998-04-28|1998-04-02|COLLECT COD|SHIP|oze. furiously express the 10307|114221|9244|5|1|1235.22|0.02|0.08|N|O|1998-04-26|1998-03-23|1998-05-10|NONE|TRUCK| beans haggl 10308|8745|6246|1|49|81033.26|0.06|0.07|A|F|1995-05-09|1995-06-27|1995-05-30|TAKE BACK RETURN|REG AIR|arly regular requests. even, pendi 10308|162143|2144|2|27|32538.78|0.06|0.02|N|O|1995-06-27|1995-06-30|1995-07-05|DELIVER IN PERSON|RAIL|ronic instructions above the blithe 10308|124798|9823|3|4|7291.16|0.04|0.08|R|F|1995-05-08|1995-06-04|1995-05-29|COLLECT COD|SHIP|iously regular pinto beans detect quickly 10308|73373|3374|4|41|55201.17|0.04|0.05|N|O|1995-07-25|1995-07-15|1995-08-03|DELIVER IN PERSON|FOB|ing to the final, even packages. e 10308|178125|5677|5|42|50531.04|0.08|0.05|N|O|1995-08-04|1995-06-30|1995-08-13|NONE|AIR|ests haggle carefully pen 10308|140149|5178|6|39|46376.46|0.04|0.02|R|F|1995-05-07|1995-07-20|1995-05-16|NONE|MAIL|ents. forges nag slyly. furiousl 10309|69383|6902|1|20|27047.60|0.10|0.00|R|F|1995-01-18|1995-02-08|1995-01-27|COLLECT COD|MAIL|ges cajole alongside of the furiously 10309|107294|2315|2|30|39038.70|0.01|0.01|A|F|1995-02-22|1995-01-19|1995-03-19|COLLECT COD|AIR|regular foxes. p 10309|84432|1957|3|19|26912.17|0.01|0.03|A|F|1995-01-23|1994-12-22|1995-02-03|NONE|FOB|the slyly final excuse 10309|46686|6687|4|12|19592.16|0.06|0.04|R|F|1995-01-11|1995-01-20|1995-01-19|NONE|SHIP|uffily special requests. 10309|43828|1341|5|47|83275.54|0.04|0.05|R|F|1994-12-30|1995-01-19|1995-01-15|NONE|TRUCK|nal pinto beans nag among the quick 10309|12563|7566|6|44|64924.64|0.04|0.03|A|F|1994-12-06|1994-12-26|1994-12-14|DELIVER IN PERSON|REG AIR|lly regular ideas against the blithely p 10310|20104|105|1|36|36867.60|0.08|0.07|A|F|1994-03-09|1994-04-08|1994-03-21|DELIVER IN PERSON|AIR|telets. sly 10310|84926|9943|2|33|63060.36|0.09|0.07|R|F|1994-03-24|1994-03-14|1994-03-29|COLLECT COD|TRUCK|fully special deposits run 10310|142833|376|3|7|13130.81|0.08|0.02|R|F|1994-04-03|1994-03-31|1994-04-06|DELIVER IN PERSON|MAIL|lyly final ideas. 10310|187705|2742|4|50|89635.00|0.01|0.08|R|F|1994-02-19|1994-05-07|1994-03-21|TAKE BACK RETURN|REG AIR|e furiously regular deposits. regular, 10310|144222|9251|5|12|15194.64|0.10|0.01|R|F|1994-06-04|1994-03-29|1994-07-04|NONE|AIR|thely. even packag 10311|96413|8923|1|3|4228.23|0.09|0.02|R|F|1992-11-12|1992-09-18|1992-11-27|TAKE BACK RETURN|TRUCK|even requests haggle 10336|53149|5655|1|16|17634.24|0.10|0.05|N|O|1997-07-09|1997-08-11|1997-07-18|NONE|RAIL|sleep blithely. deposits detect 10336|60665|666|2|24|39015.84|0.08|0.08|N|O|1997-06-16|1997-09-04|1997-07-08|DELIVER IN PERSON|REG AIR|ly furiously unusual deposi 10337|103491|3492|1|7|10461.43|0.06|0.08|N|O|1996-04-22|1996-05-13|1996-05-18|DELIVER IN PERSON|FOB|carefully specia 10337|156582|1613|2|9|14747.22|0.04|0.02|N|O|1996-07-02|1996-06-03|1996-07-20|NONE|REG AIR|ully bold packages! carefully slow 10337|67157|9664|3|4|4496.60|0.01|0.07|N|O|1996-05-24|1996-06-05|1996-05-30|NONE|MAIL|ly unusual depe 10337|92334|4844|4|20|26526.60|0.03|0.04|N|O|1996-05-09|1996-06-06|1996-05-28|DELIVER IN PERSON|AIR|s are across the fluffily regular deposi 10338|39907|9908|1|2|3693.80|0.05|0.06|N|O|1995-08-01|1995-09-02|1995-08-07|DELIVER IN PERSON|AIR| deposits integrate slyly slyly unusual i 10338|115167|2701|2|6|7092.96|0.08|0.08|N|O|1995-08-12|1995-09-08|1995-08-13|TAKE BACK RETURN|REG AIR|dle pinto beans. slyly even pinto beans 10338|10580|8084|3|38|56642.04|0.01|0.07|N|O|1995-09-09|1995-09-01|1995-09-12|COLLECT COD|AIR|. enticing ideas lose about t 10338|11253|1254|4|4|4657.00|0.07|0.06|N|O|1995-09-01|1995-08-20|1995-09-27|NONE|MAIL|its. dependencies hinder. 10338|124791|2328|5|2|3631.58|0.09|0.07|N|O|1995-09-09|1995-08-27|1995-09-21|NONE|MAIL| blithely regu 10338|157969|3000|6|30|60808.80|0.06|0.07|N|O|1995-11-01|1995-08-14|1995-11-13|NONE|RAIL| requests 10339|86310|8819|1|20|25926.20|0.06|0.00|N|O|1996-02-05|1995-12-19|1996-03-01|TAKE BACK RETURN|TRUCK|ages are blithely around the asym 10339|10288|5291|2|48|57517.44|0.07|0.05|N|O|1996-01-24|1995-12-02|1996-02-04|NONE|FOB|ular theodolites haggle. furiousl 10339|96807|9317|3|19|34272.20|0.00|0.01|N|O|1995-12-25|1996-01-10|1996-01-24|DELIVER IN PERSON|MAIL|odolites. slyly ir 10340|4996|7497|1|47|89346.53|0.06|0.05|N|O|1995-09-08|1995-08-11|1995-09-24|COLLECT COD|TRUCK|riously even platelets hagg 10340|163951|6468|2|37|74553.15|0.00|0.05|N|O|1995-07-15|1995-08-07|1995-08-10|DELIVER IN PERSON|SHIP|aggle carefully regular ideas. slyly 10340|114048|6560|3|12|12744.48|0.09|0.04|N|O|1995-08-05|1995-09-16|1995-08-20|DELIVER IN PERSON|SHIP|accounts boost against the caref 10340|2318|2319|4|19|23185.89|0.01|0.01|N|O|1995-08-08|1995-08-11|1995-08-18|NONE|AIR|blithely ironic 10340|156320|1351|5|27|37160.64|0.01|0.01|N|O|1995-07-04|1995-07-30|1995-07-14|DELIVER IN PERSON|MAIL|cajole against the 10340|63819|8832|6|43|76660.83|0.01|0.08|N|O|1995-09-16|1995-08-30|1995-09-23|NONE|RAIL|! carefully special pint 10341|37919|2926|1|8|14855.28|0.09|0.07|R|F|1993-03-22|1993-01-30|1993-04-20|TAKE BACK RETURN|AIR|l dependencie 10341|96568|6569|2|41|64146.96|0.09|0.00|R|F|1993-03-02|1993-02-17|1993-03-11|COLLECT COD|FOB|posits. carefully regular deposits cajole f 10341|2594|5095|3|47|70339.73|0.06|0.04|A|F|1993-01-31|1993-01-31|1993-02-16|NONE|FOB|ix boldly instructions. daringl 10341|183238|5757|4|43|56812.89|0.00|0.04|R|F|1993-01-13|1993-02-17|1993-02-09|COLLECT COD|SHIP|press packages believe above the 10341|78018|8019|5|21|20916.21|0.00|0.05|R|F|1993-01-19|1993-01-21|1993-01-27|COLLECT COD|FOB|fully quickly unusual accounts. furiously 10341|41112|6121|6|11|11584.21|0.08|0.06|A|F|1993-02-02|1993-03-16|1993-02-16|DELIVER IN PERSON|RAIL| dependencies are stealthily ab 10342|156257|8773|1|19|24951.75|0.02|0.04|N|O|1996-01-05|1996-01-10|1996-01-14|NONE|TRUCK|ag slyly. packages across th 10342|197714|5272|2|50|90585.50|0.06|0.05|N|O|1995-12-12|1996-02-10|1995-12-27|TAKE BACK RETURN|TRUCK| pending ideas alo 10342|172166|9718|3|8|9905.28|0.10|0.00|N|O|1996-01-14|1995-12-25|1996-01-27|COLLECT COD|AIR|gular foxes past the 10342|183172|8209|4|41|51461.97|0.04|0.06|N|O|1996-02-03|1996-02-09|1996-02-10|TAKE BACK RETURN|TRUCK|en deposit 10342|159057|9058|5|42|46874.10|0.00|0.02|N|O|1996-01-15|1996-01-04|1996-02-05|TAKE BACK RETURN|AIR|close packages slee 10342|62620|5127|6|3|4747.86|0.08|0.00|N|O|1996-02-20|1996-01-16|1996-03-19|COLLECT COD|MAIL|n instructions are furiously caref 10343|79215|1723|1|27|32243.67|0.08|0.05|A|F|1992-03-31|1992-05-20|1992-04-06|NONE|AIR|es. carefully stealthy requests 10368|159990|5021|1|43|88149.57|0.04|0.04|N|O|1996-07-24|1996-08-11|1996-08-02|COLLECT COD|SHIP| special grouches wake furiously along th 10369|125673|8186|1|43|73042.81|0.01|0.04|N|O|1996-03-06|1996-01-15|1996-03-26|DELIVER IN PERSON|TRUCK|sts nag carefully. carefully even pear 10369|177561|2596|2|45|73735.20|0.08|0.00|N|O|1995-11-17|1995-12-16|1995-11-24|DELIVER IN PERSON|REG AIR|about the special, unusual multipli 10370|137603|2630|1|35|57421.00|0.07|0.01|A|F|1994-07-26|1994-06-16|1994-08-06|DELIVER IN PERSON|REG AIR|eans haggle slyly slyly regular i 10370|84057|9074|2|41|42683.05|0.05|0.04|A|F|1994-08-17|1994-06-02|1994-09-14|NONE|REG AIR|ual accounts. quickly special pinto beans b 10371|44835|4836|1|24|42715.92|0.09|0.00|A|F|1994-11-21|1994-12-06|1994-12-08|NONE|REG AIR| quickly bold pinto beans. regular platelet 10371|139703|2217|2|43|74936.10|0.08|0.05|A|F|1994-09-21|1994-10-17|1994-10-01|DELIVER IN PERSON|AIR|eposits pr 10371|185281|7800|3|50|68314.00|0.00|0.02|A|F|1994-12-05|1994-12-04|1994-12-11|DELIVER IN PERSON|MAIL|gular deposits. 10371|3771|3772|4|35|58616.95|0.09|0.06|A|F|1994-10-19|1994-11-15|1994-10-30|DELIVER IN PERSON|SHIP|ly bold ideas haggle. blithe 10372|58506|1012|1|50|73225.00|0.03|0.04|N|O|1995-07-11|1995-09-24|1995-07-21|DELIVER IN PERSON|REG AIR|gular foxes. express 10372|100866|8397|2|30|56005.80|0.03|0.02|N|O|1995-10-23|1995-08-04|1995-10-24|NONE|SHIP|sits. blithely regular asymptotes boost bl 10373|125226|251|1|45|56304.90|0.01|0.06|A|F|1993-08-10|1993-08-10|1993-09-01|NONE|FOB|ironic packages sleep 10373|63988|3989|2|45|87839.10|0.04|0.06|R|F|1993-09-04|1993-09-19|1993-09-12|TAKE BACK RETURN|AIR| slyly near the pending packages-- ironic 10373|36047|6048|3|12|11796.48|0.09|0.05|A|F|1993-10-16|1993-07-31|1993-11-03|NONE|RAIL|d hockey p 10373|28570|8571|4|3|4495.71|0.00|0.08|A|F|1993-10-18|1993-08-19|1993-11-02|TAKE BACK RETURN|MAIL|final requests. regular, regular theo 10373|162620|7653|5|48|80765.76|0.01|0.05|A|F|1993-10-16|1993-09-22|1993-10-22|NONE|SHIP|s requests. furiously final reque 10373|145459|3002|6|25|37611.25|0.02|0.06|R|F|1993-08-14|1993-09-02|1993-08-20|NONE|SHIP|uriously about t 10374|153565|8596|1|19|30752.64|0.00|0.04|A|F|1993-11-09|1993-09-24|1993-11-16|DELIVER IN PERSON|RAIL|. even requ 10375|77849|357|1|7|12787.88|0.00|0.03|N|O|1997-04-10|1997-03-10|1997-04-18|TAKE BACK RETURN|TRUCK|nding pinto beans wake. deposits above 10375|5827|3328|2|23|39854.86|0.06|0.05|N|O|1997-02-08|1997-03-20|1997-02-20|TAKE BACK RETURN|TRUCK|ly regular deposits sleep care 10375|119573|7107|3|21|33443.97|0.05|0.01|N|O|1997-03-16|1997-03-10|1997-04-09|TAKE BACK RETURN|AIR| requests. dep 10400|145989|8504|1|50|101749.00|0.07|0.04|N|O|1996-11-07|1996-11-27|1996-11-19|NONE|REG AIR|al platelets play carefully even 10400|33500|8507|2|27|38704.50|0.00|0.03|N|O|1996-12-23|1997-01-17|1997-01-13|NONE|REG AIR|ial packages boost furiou 10400|31910|9420|3|3|5525.73|0.03|0.04|N|O|1997-01-08|1997-01-16|1997-02-02|NONE|MAIL|oxes. slyly bold accounts use. carefull 10400|105847|868|4|48|88936.32|0.03|0.08|N|O|1997-02-01|1996-12-12|1997-02-15|DELIVER IN PERSON|RAIL|ow fluffily quickly ironic i 10400|159407|6953|5|20|29328.00|0.07|0.04|N|O|1996-11-08|1996-12-19|1996-11-18|DELIVER IN PERSON|TRUCK|ironic deposits along the blithely fina 10400|141502|1503|6|27|41674.50|0.08|0.03|N|O|1996-11-26|1997-01-22|1996-12-09|TAKE BACK RETURN|AIR|ounts. blithely bold packages detect bli 10401|142093|2094|1|1|1135.09|0.08|0.04|N|O|1998-08-30|1998-09-14|1998-09-23|NONE|SHIP|ar deposits. bold, express excuses are alo 10401|153432|5948|2|10|14854.30|0.07|0.04|N|O|1998-09-04|1998-10-08|1998-09-28|TAKE BACK RETURN|REG AIR|ve the final, 10402|53661|1177|1|38|61357.08|0.07|0.07|N|O|1998-04-04|1998-02-09|1998-04-19|COLLECT COD|TRUCK|l, regular requests integrate. dog 10402|64514|7021|2|10|14785.10|0.01|0.02|N|O|1997-12-21|1998-01-20|1997-12-27|COLLECT COD|MAIL|efully final packages. slyly e 10403|178924|6476|1|29|58084.68|0.10|0.02|N|O|1996-08-10|1996-07-30|1996-09-03|NONE|FOB|ake fluffily. requ 10403|173289|3290|2|47|64027.16|0.02|0.08|N|O|1996-06-06|1996-06-29|1996-06-08|COLLECT COD|AIR|ideas thrash aga 10403|57245|7246|3|26|31258.24|0.10|0.07|N|O|1996-09-11|1996-07-22|1996-09-15|TAKE BACK RETURN|FOB|fully among the carefully even depe 10403|115251|7763|4|24|30390.00|0.07|0.02|N|O|1996-09-03|1996-08-19|1996-09-22|TAKE BACK RETURN|TRUCK|y unusual f 10403|8747|6248|5|29|48016.46|0.07|0.05|N|O|1996-06-27|1996-06-23|1996-07-12|COLLECT COD|RAIL|g slyly pending deposits. fu 10403|337|5338|6|21|25983.93|0.07|0.02|N|O|1996-07-12|1996-07-18|1996-07-16|COLLECT COD|FOB|usly across t 10403|55512|523|7|29|42557.79|0.09|0.04|N|O|1996-06-28|1996-07-31|1996-07-20|COLLECT COD|REG AIR|onic deposits cajole carefu 10404|178467|985|1|35|54091.10|0.08|0.01|N|O|1998-06-12|1998-05-14|1998-06-15|NONE|MAIL|efully express deposits sl 10404|82499|2500|2|9|13333.41|0.03|0.08|N|O|1998-04-13|1998-05-05|1998-04-21|NONE|TRUCK|ial deposits. asymptotes detect. quick 10404|150821|822|3|49|91719.18|0.02|0.02|N|O|1998-04-20|1998-05-13|1998-04-22|DELIVER IN PERSON|SHIP|thely unusual acc 10404|46294|8799|4|6|7441.74|0.07|0.04|N|O|1998-06-27|1998-06-13|1998-07-02|COLLECT COD|AIR|he carefully regular theodolit 10405|134957|9984|1|25|49798.75|0.08|0.07|N|O|1998-01-31|1998-03-02|1998-02-25|DELIVER IN PERSON|MAIL|deposits. platel 10405|10667|668|2|14|22087.24|0.01|0.02|N|O|1998-05-01|1998-03-17|1998-05-10|NONE|REG AIR|thely ironic d 10405|68342|5861|3|37|48482.58|0.06|0.00|N|O|1998-02-28|1998-02-25|1998-03-27|TAKE BACK RETURN|REG AIR|ges according to the regular depos 10405|9267|4268|4|22|25877.72|0.08|0.05|N|O|1998-04-01|1998-03-13|1998-04-28|TAKE BACK RETURN|TRUCK|thely even 10405|138918|6458|5|27|52836.57|0.07|0.08|N|O|1998-04-19|1998-03-08|1998-05-07|NONE|MAIL|taphs haggle quick 10406|130929|8469|1|23|45078.16|0.01|0.06|N|O|1996-06-12|1996-05-23|1996-07-11|NONE|RAIL|uests cajole fluffily 10406|112911|2912|2|41|78880.31|0.09|0.05|N|O|1996-04-25|1996-05-20|1996-04-26|NONE|AIR|ual account 10406|83336|3337|3|41|54092.53|0.10|0.00|N|O|1996-04-27|1996-04-27|1996-05-20|COLLECT COD|MAIL| beans. ev 10406|14430|4431|4|10|13444.30|0.06|0.00|N|O|1996-05-04|1996-04-16|1996-05-15|DELIVER IN PERSON|SHIP| packages nag enticingly 10407|33001|5505|1|9|8406.00|0.10|0.04|N|O|1996-05-30|1996-04-25|1996-06-05|DELIVER IN PERSON|TRUCK| ideas kindle slyly abo 10407|38956|6466|2|10|18949.50|0.01|0.03|N|O|1996-07-18|1996-05-25|1996-07-27|COLLECT COD|AIR|ess deposits 10407|118454|5988|3|20|29449.00|0.00|0.00|N|O|1996-05-13|1996-05-05|1996-06-05|DELIVER IN PERSON|TRUCK|ously ironic foxes. carefully 10407|43275|5780|4|12|14619.24|0.06|0.00|N|O|1996-04-06|1996-05-09|1996-05-04|NONE|REG AIR|g the final, even accounts. regular acc 10407|108468|3489|5|10|14764.60|0.06|0.08|N|O|1996-06-08|1996-05-21|1996-06-10|NONE|RAIL|arefully even 10407|20726|3229|6|2|3293.44|0.04|0.02|N|O|1996-04-19|1996-05-18|1996-05-19|TAKE BACK RETURN|REG AIR|nding requests. carefully ironic 10432|170454|5489|1|40|60978.00|0.06|0.05|R|F|1992-03-04|1992-02-10|1992-03-18|COLLECT COD|REG AIR|ly even requ 10432|5363|7864|2|46|58344.56|0.00|0.02|A|F|1992-04-29|1992-03-19|1992-05-03|COLLECT COD|SHIP|efully express requests wake blithe 10432|129153|9154|3|26|30735.90|0.05|0.07|A|F|1992-04-23|1992-03-08|1992-04-26|NONE|REG AIR|after the ironic, unusual ideas cajo 10432|64660|2179|4|18|29243.88|0.06|0.07|A|F|1992-01-27|1992-03-19|1992-02-06|NONE|FOB| ironic ideas. 10433|126788|1813|1|16|29036.48|0.05|0.08|A|F|1993-05-09|1993-04-10|1993-05-29|COLLECT COD|REG AIR|he unusual theodolites. furiously iro 10434|152202|2203|1|6|7525.20|0.02|0.07|R|F|1995-02-11|1995-03-17|1995-03-12|DELIVER IN PERSON|SHIP|uests. packages integrate 10434|46437|6438|2|42|58104.06|0.09|0.04|R|F|1995-03-27|1995-02-24|1995-04-25|DELIVER IN PERSON|TRUCK|es the unusual platelets are furiou 10435|49276|9277|1|39|47785.53|0.08|0.03|R|F|1993-08-23|1993-11-04|1993-08-27|DELIVER IN PERSON|AIR|usly pending requests 10435|170131|132|2|9|10810.17|0.10|0.06|R|F|1993-12-05|1993-09-19|1993-12-10|TAKE BACK RETURN|SHIP|ily regular pack 10435|179479|4514|3|4|6233.88|0.05|0.05|R|F|1993-08-25|1993-09-14|1993-08-26|TAKE BACK RETURN|TRUCK|osits affix furiously 10435|95698|717|4|25|42342.25|0.04|0.01|A|F|1993-10-29|1993-10-23|1993-10-31|NONE|MAIL|, unusual deposits. ruthless 10435|193436|994|5|19|29059.17|0.00|0.05|A|F|1993-08-27|1993-10-30|1993-09-25|DELIVER IN PERSON|MAIL|regular theodolites mold along the asymp 10435|66083|6084|6|20|20981.60|0.03|0.06|A|F|1993-09-23|1993-10-07|1993-10-15|TAKE BACK RETURN|AIR|s. carefully regular 10436|119696|2208|1|31|53186.39|0.09|0.05|A|F|1995-04-26|1995-03-11|1995-05-07|TAKE BACK RETURN|MAIL|thely ironic packages. silent account 10437|88256|5781|1|13|16175.25|0.09|0.01|A|F|1994-08-24|1994-09-20|1994-09-17|NONE|TRUCK|. busy requests sleep carefully en 10437|116442|6443|2|27|39377.88|0.02|0.03|A|F|1994-11-04|1994-11-07|1994-11-25|DELIVER IN PERSON|TRUCK|telets thrash carefull 10437|35786|8290|3|11|18939.58|0.03|0.05|A|F|1994-10-08|1994-11-16|1994-10-27|DELIVER IN PERSON|REG AIR|e ideas believe slyly after the fluffil 10438|197774|5332|1|46|86101.42|0.09|0.03|A|F|1993-02-05|1993-03-01|1993-02-06|DELIVER IN PERSON|RAIL|n dependencies grow along the furiously e 10438|170937|938|2|34|68269.62|0.03|0.04|A|F|1992-12-12|1993-02-17|1992-12-26|TAKE BACK RETURN|MAIL|nal, even deposits wake 10438|135851|3391|3|15|28302.75|0.02|0.00|A|F|1993-02-12|1993-03-02|1993-02-27|TAKE BACK RETURN|AIR|ular requests boos 10438|38975|8976|4|27|51677.19|0.06|0.07|R|F|1993-01-27|1993-01-15|1993-02-15|DELIVER IN PERSON|AIR|packages affix blithely along 10439|16078|3582|1|32|31810.24|0.08|0.04|R|F|1992-05-20|1992-06-23|1992-06-13|DELIVER IN PERSON|FOB|cajole furiously after the slyly un 10439|12108|9612|2|27|27542.70|0.05|0.03|R|F|1992-06-29|1992-05-06|1992-07-10|NONE|MAIL|rding to the 10439|89835|7360|3|49|89416.67|0.00|0.00|A|F|1992-04-13|1992-06-03|1992-04-16|COLLECT COD|FOB|nal accounts 10439|53666|3667|4|15|24294.90|0.09|0.03|A|F|1992-04-06|1992-06-13|1992-05-04|COLLECT COD|FOB|ly across the reg 10439|159551|9552|5|30|48316.50|0.05|0.04|R|F|1992-06-05|1992-06-01|1992-06-13|NONE|REG AIR|ly final acco 10439|159732|7278|6|33|59127.09|0.00|0.03|R|F|1992-06-06|1992-05-08|1992-06-20|DELIVER IN PERSON|FOB|n dependenci 10439|199792|4831|7|20|37835.80|0.04|0.01|R|F|1992-06-02|1992-04-29|1992-06-27|NONE|AIR|e express, ironic accou 10464|101451|6472|1|47|68265.15|0.06|0.03|R|F|1994-08-21|1994-10-19|1994-09-01|TAKE BACK RETURN|AIR|luffily unusual requests along the caref 10465|151174|1175|1|16|19602.72|0.00|0.05|N|O|1997-04-20|1997-05-20|1997-05-15|COLLECT COD|REG AIR|ourts. pending 10465|177226|2261|2|21|27367.62|0.06|0.02|N|O|1997-05-06|1997-05-23|1997-05-23|TAKE BACK RETURN|FOB|regular foxes mold sl 10465|33903|8910|3|1|1836.90|0.00|0.03|N|O|1997-04-23|1997-05-13|1997-05-08|TAKE BACK RETURN|RAIL|eep furiously after the fur 10465|194519|9558|4|47|75834.97|0.07|0.03|N|O|1997-08-02|1997-06-16|1997-08-14|NONE|SHIP|ly unusual accounts. iro 10465|77826|5348|5|49|88387.18|0.10|0.03|N|O|1997-07-07|1997-05-21|1997-07-09|COLLECT COD|MAIL|e blithely about the furiously unusual 10465|6209|6210|6|49|54644.80|0.06|0.08|N|O|1997-07-22|1997-05-29|1997-08-03|TAKE BACK RETURN|MAIL|c pinto beans. slyly unusual 10465|147495|2524|7|17|26222.33|0.04|0.04|N|O|1997-05-30|1997-07-09|1997-06-13|COLLECT COD|TRUCK|out the flu 10466|137134|7135|1|44|51529.72|0.08|0.02|R|F|1993-03-13|1993-02-25|1993-04-07|COLLECT COD|SHIP|es. brave accounts wake furiou 10466|195816|8336|2|15|28677.15|0.00|0.08|A|F|1993-01-18|1993-02-17|1993-02-02|NONE|REG AIR|rate furiously steal 10466|65411|2930|3|42|57809.22|0.06|0.06|A|F|1993-02-25|1993-03-01|1993-03-21|TAKE BACK RETURN|AIR|even foxes nag slyly according to the 10467|112703|2704|1|36|61765.20|0.00|0.05|N|O|1997-06-30|1997-05-17|1997-07-07|COLLECT COD|MAIL|ly regular ideas. carefull 10467|189929|2448|2|16|32302.72|0.03|0.01|N|O|1997-07-04|1997-05-07|1997-07-31|TAKE BACK RETURN|RAIL| carefully. blithely even excuses haggle. 10467|133778|1318|3|26|47106.02|0.08|0.05|N|O|1997-05-13|1997-05-15|1997-05-19|NONE|AIR|s use. ideas above 10467|199672|4711|4|28|49606.76|0.01|0.02|N|O|1997-03-28|1997-05-08|1997-04-22|COLLECT COD|RAIL|al accounts 10467|76301|3823|5|3|3831.90|0.07|0.03|N|O|1997-06-04|1997-04-22|1997-06-08|NONE|REG AIR|t foxes. package 10467|180962|5999|6|20|40859.20|0.10|0.02|N|O|1997-05-07|1997-05-09|1997-05-18|DELIVER IN PERSON|RAIL|usly slyly re 10467|22338|9845|7|11|13863.63|0.07|0.05|N|O|1997-04-26|1997-05-20|1997-05-16|NONE|RAIL|jole blithely ironic instructions. blith 10468|34701|7205|1|6|9814.20|0.02|0.00|A|F|1995-02-28|1995-03-05|1995-03-02|DELIVER IN PERSON|FOB|, ironic pack 10468|19519|9520|2|39|56101.89|0.00|0.02|R|F|1994-12-26|1995-03-06|1994-12-31|COLLECT COD|REG AIR|against the fluffily pending foxe 10469|73648|8663|1|26|42162.64|0.08|0.00|N|O|1996-12-20|1996-10-21|1996-12-27|DELIVER IN PERSON|FOB|ronic ideas arou 10469|1273|6274|2|1|1174.27|0.02|0.06|N|O|1996-09-18|1996-10-05|1996-09-27|TAKE BACK RETURN|SHIP|ronic, final accounts use car 10469|179442|9443|3|28|42600.32|0.01|0.06|N|O|1996-10-10|1996-11-03|1996-10-21|TAKE BACK RETURN|RAIL|accounts. furiously 10470|169517|7066|1|48|76152.48|0.07|0.04|R|F|1992-08-01|1992-07-05|1992-08-02|DELIVER IN PERSON|SHIP|ven accounts. regular, reg 10470|71961|4469|2|46|88916.16|0.08|0.03|R|F|1992-08-16|1992-06-11|1992-08-23|DELIVER IN PERSON|MAIL| deposits. pending, final inst 10470|45499|5500|3|50|72224.50|0.05|0.08|R|F|1992-07-18|1992-06-06|1992-07-28|NONE|RAIL|heodolites. blithely regular accounts a 10470|98456|8457|4|41|59632.45|0.00|0.01|R|F|1992-06-19|1992-06-16|1992-07-12|TAKE BACK RETURN|SHIP| integrate carefully slyly 10470|143007|5522|5|21|22050.00|0.05|0.07|R|F|1992-06-11|1992-07-12|1992-06-23|DELIVER IN PERSON|REG AIR|regular theodolites sleep? furiously re 10470|111619|1620|6|1|1630.61|0.06|0.05|R|F|1992-07-18|1992-06-28|1992-08-01|TAKE BACK RETURN|MAIL|e above the stealthy, final instru 10471|24317|4318|1|46|57100.26|0.08|0.01|R|F|1993-01-21|1992-12-24|1993-02-17|TAKE BACK RETURN|MAIL|ages. slyly bold platelet 10471|91198|3708|2|35|41621.65|0.02|0.05|A|F|1992-10-11|1992-12-09|1992-11-08|DELIVER IN PERSON|TRUCK|deas sleep furio 10471|164223|4224|3|8|10297.76|0.06|0.01|A|F|1992-12-10|1992-12-23|1992-12-26|DELIVER IN PERSON|REG AIR| packages impress slyly about the carefully 10471|82438|7455|4|35|49715.05|0.09|0.00|R|F|1992-10-17|1993-01-03|1992-11-07|COLLECT COD|SHIP|lent deposits. carefully final pa 10471|124323|1860|5|39|52545.48|0.04|0.05|R|F|1992-12-05|1992-11-11|1992-12-25|COLLECT COD|FOB|packages. quickly s 10471|81539|4048|6|48|72985.44|0.03|0.05|A|F|1992-11-28|1992-12-13|1992-12-16|DELIVER IN PERSON|SHIP|hely special deposits. package 10496|10232|5235|1|8|9137.84|0.08|0.01|N|O|1997-03-26|1997-04-11|1997-04-22|NONE|REG AIR|the express, final requests. furiously ex 10496|168660|1177|2|37|63960.42|0.00|0.01|N|O|1997-05-06|1997-05-01|1997-05-30|TAKE BACK RETURN|RAIL|fter the final courts. final, pend 10497|39416|6926|1|10|13554.10|0.08|0.06|N|O|1996-09-11|1996-09-14|1996-09-13|NONE|AIR|y regular packages along the fluf 10497|53773|6279|2|29|50076.33|0.05|0.03|N|O|1996-08-01|1996-09-19|1996-08-25|TAKE BACK RETURN|FOB|even deposits boost slyly about the bold, 10497|147098|9613|3|36|41223.24|0.07|0.08|N|O|1996-09-13|1996-08-12|1996-09-27|COLLECT COD|MAIL|ges. furiousl 10498|113921|3922|1|3|5804.76|0.00|0.07|A|F|1993-11-01|1993-12-23|1993-11-10|DELIVER IN PERSON|REG AIR|ronic accounts wake quickly 10498|69678|7197|2|34|56020.78|0.03|0.00|A|F|1994-02-16|1993-12-09|1994-02-19|COLLECT COD|REG AIR|ously bold 10498|71970|6985|3|31|60201.07|0.08|0.05|A|F|1994-01-21|1993-11-30|1994-02-07|NONE|REG AIR|tegrate never fin 10498|196032|8552|4|25|28200.75|0.09|0.00|A|F|1994-01-07|1993-11-27|1994-01-08|DELIVER IN PERSON|FOB|leep blithely after the regul 10499|17534|36|1|12|17418.36|0.00|0.08|R|F|1994-03-08|1994-02-02|1994-04-04|TAKE BACK RETURN|SHIP|t pinto beans. quic 10499|85307|7816|2|32|41353.60|0.05|0.07|R|F|1993-12-20|1994-01-22|1994-01-17|NONE|RAIL|r packages alongside of the regular 10499|147347|7348|3|30|41830.20|0.09|0.04|R|F|1994-01-27|1994-02-14|1994-02-11|NONE|RAIL|le fluffily among the blithely 10499|48187|8188|4|39|44272.02|0.09|0.06|R|F|1994-02-28|1994-02-11|1994-03-26|NONE|REG AIR|eans. quickly ironic 10499|69508|9509|5|2|2955.00|0.03|0.00|R|F|1994-03-14|1994-01-04|1994-04-09|DELIVER IN PERSON|FOB|ke blithely regular ideas. accoun 10499|7483|9984|6|21|29200.08|0.01|0.04|R|F|1994-02-19|1993-12-28|1994-03-07|COLLECT COD|SHIP|brave requests. blithely unusual 10500|72565|87|1|16|24600.96|0.00|0.03|R|F|1994-04-14|1994-03-10|1994-04-19|COLLECT COD|REG AIR|y. frets according to the 10501|170006|7558|1|12|12912.00|0.02|0.04|N|O|1998-09-28|1998-09-24|1998-10-08|NONE|RAIL|ckages. slyly even theodolites are bold i 10501|83012|8029|2|40|39800.40|0.04|0.03|N|O|1998-07-24|1998-08-06|1998-08-01|NONE|FOB|ular requests cajole. ironic ideas 10501|179005|6557|3|43|46612.00|0.01|0.03|N|O|1998-07-27|1998-09-26|1998-08-23|NONE|REG AIR|lyly bold theodolites sleep carefully f 10501|133448|988|4|22|32591.68|0.00|0.04|N|O|1998-09-25|1998-08-20|1998-10-10|COLLECT COD|FOB|gged package 10501|46215|8720|5|11|12773.31|0.06|0.02|N|O|1998-10-28|1998-09-06|1998-11-21|TAKE BACK RETURN|AIR|lly even dependencies. busy deposits hagg 10501|124284|4285|6|21|27473.88|0.09|0.03|N|O|1998-09-18|1998-08-22|1998-10-01|COLLECT COD|REG AIR|refully ironic platelets hag 10502|177508|5060|1|50|79275.00|0.01|0.00|A|F|1993-05-20|1993-06-25|1993-05-22|COLLECT COD|FOB|ents could have to 10502|166630|9147|2|48|81438.24|0.09|0.01|A|F|1993-06-11|1993-08-01|1993-06-12|TAKE BACK RETURN|MAIL|egular requests. bold 10502|146999|7000|3|8|16367.92|0.10|0.04|R|F|1993-08-30|1993-06-23|1993-09-19|TAKE BACK RETURN|REG AIR|dolphins. b 10502|3600|6101|4|17|25561.20|0.09|0.04|R|F|1993-09-02|1993-06-16|1993-09-26|DELIVER IN PERSON|MAIL|ecial orbits boost 10503|9137|9138|1|28|29291.64|0.07|0.06|N|O|1996-09-15|1996-08-07|1996-09-26|TAKE BACK RETURN|AIR|packages wake never packages. quickly ir 10503|149991|5020|2|12|24491.88|0.04|0.01|N|O|1996-09-07|1996-07-15|1996-09-18|DELIVER IN PERSON|RAIL|fully slow pinto beans. slyly slow theodol 10503|84275|1800|3|45|56667.15|0.03|0.04|N|O|1996-08-08|1996-07-15|1996-08-29|NONE|TRUCK|y final packa 10503|4452|1953|4|28|37980.60|0.01|0.07|N|O|1996-06-23|1996-07-25|1996-07-17|DELIVER IN PERSON|RAIL|haggle final dependencies? fluffily regular 10528|84490|6999|1|45|66352.05|0.09|0.02|A|F|1995-01-07|1994-10-20|1995-01-22|NONE|FOB|y. quickly sly accounts wake after the bo 10528|18491|8492|2|4|5637.96|0.02|0.08|A|F|1994-10-20|1994-12-07|1994-11-12|NONE|MAIL|ly final accou 10528|169905|9906|3|18|35548.20|0.06|0.03|R|F|1994-10-02|1994-10-28|1994-10-06|TAKE BACK RETURN|SHIP|ily. pending, unusual requests 10528|178783|6335|4|13|24203.14|0.08|0.07|R|F|1994-10-11|1994-11-18|1994-10-28|NONE|AIR|onic accounts. blithely even accounts are f 10529|101055|8586|1|37|39073.85|0.01|0.08|N|O|1997-10-28|1998-01-16|1997-11-12|COLLECT COD|SHIP|e furiously express accounts. slowl 10529|26917|4424|2|41|75600.31|0.01|0.04|N|O|1998-01-10|1997-12-29|1998-01-21|TAKE BACK RETURN|MAIL|onic forges 10529|186513|6514|3|31|49584.81|0.05|0.08|N|O|1998-01-15|1997-12-13|1998-02-05|DELIVER IN PERSON|AIR|uriously even depo 10530|198910|6468|1|20|40178.20|0.06|0.00|A|F|1994-03-03|1994-03-20|1994-03-06|NONE|TRUCK|lly regular deposits wake al 10530|80120|2629|2|28|30803.36|0.05|0.04|R|F|1994-04-30|1994-03-16|1994-05-14|COLLECT COD|RAIL|press requests. un 10530|166123|6124|3|25|29728.00|0.07|0.04|R|F|1994-04-10|1994-03-24|1994-04-29|DELIVER IN PERSON|REG AIR|blithely a 10531|70447|2955|1|24|34018.56|0.08|0.00|R|F|1992-03-21|1992-05-29|1992-04-10|COLLECT COD|RAIL| lose fluffily slyly express asymptotes 10531|2494|2495|2|14|19550.86|0.09|0.07|A|F|1992-03-27|1992-04-19|1992-04-10|DELIVER IN PERSON|RAIL|al instructions 10531|109619|9620|3|40|65144.40|0.08|0.07|R|F|1992-04-20|1992-04-29|1992-04-26|COLLECT COD|REG AIR| quickly ironic requests. regular pac 10531|163865|8898|4|25|48221.50|0.09|0.05|A|F|1992-07-16|1992-05-18|1992-08-10|COLLECT COD|REG AIR|nently. sheaves 10531|78835|3850|5|2|3627.66|0.00|0.02|A|F|1992-04-14|1992-04-26|1992-04-23|NONE|RAIL|eodolites above the carefully 10531|165961|3510|6|2|4053.92|0.00|0.00|A|F|1992-05-25|1992-05-16|1992-06-06|DELIVER IN PERSON|SHIP|. carefully sly ideas use furiously pe 10531|92246|9774|7|24|29717.76|0.00|0.04|R|F|1992-04-02|1992-05-30|1992-04-09|NONE|TRUCK|regular asymptotes. blithely 10532|177057|9575|1|25|28351.25|0.00|0.01|N|O|1997-12-30|1998-01-26|1998-01-13|DELIVER IN PERSON|SHIP|g deposits are furiously: carefully 10532|182074|7111|2|15|17341.05|0.00|0.07|N|O|1998-03-26|1997-12-30|1998-04-25|NONE|SHIP| special deposits 10532|108204|3225|3|26|31517.20|0.02|0.02|N|O|1998-01-05|1998-02-02|1998-01-23|DELIVER IN PERSON|AIR|xpress deposits. forges are regularly a 10532|170166|2684|4|44|54391.04|0.03|0.07|N|O|1998-03-14|1998-01-08|1998-03-29|NONE|REG AIR|blithely idle account 10532|168858|1375|5|23|44317.55|0.10|0.05|N|O|1998-01-25|1998-01-13|1998-01-30|DELIVER IN PERSON|REG AIR|cajole. always final de 10532|185602|8121|6|50|84380.00|0.05|0.00|N|O|1998-01-08|1998-01-04|1998-01-26|COLLECT COD|SHIP|endencies cajole. slow asymptot 10532|12313|2314|7|30|36759.30|0.05|0.07|N|O|1997-12-24|1998-02-19|1998-01-14|COLLECT COD|REG AIR|e slyly bold packages. furiously u 10533|139838|2352|1|16|30045.28|0.04|0.06|N|O|1998-08-02|1998-07-16|1998-08-21|TAKE BACK RETURN|MAIL|quests haggle quickly. carefully express 10533|52052|2053|2|16|16064.80|0.00|0.06|N|O|1998-07-07|1998-06-29|1998-07-09|NONE|FOB| across the carefully silent a 10533|35349|7853|3|46|59079.64|0.02|0.07|N|O|1998-08-10|1998-06-15|1998-08-31|COLLECT COD|FOB|thely regular 10533|101856|1857|4|42|78029.70|0.05|0.04|N|O|1998-07-13|1998-07-14|1998-07-31|COLLECT COD|FOB|encies maintain careful 10533|81300|8825|5|27|34595.10|0.09|0.00|N|O|1998-07-18|1998-06-29|1998-08-11|DELIVER IN PERSON|MAIL|tly regular 10534|155376|407|1|3|4294.11|0.05|0.07|R|F|1993-07-13|1993-07-16|1993-08-06|COLLECT COD|MAIL|counts are regular 10535|74247|4248|1|2|2442.48|0.00|0.04|N|O|1995-06-29|1995-08-08|1995-07-21|NONE|REG AIR|unusual in 10535|68405|8406|2|30|41202.00|0.01|0.07|N|O|1995-07-29|1995-07-15|1995-08-03|DELIVER IN PERSON|REG AIR|usly final excuses nod. blithely 10535|72786|308|3|33|58039.74|0.09|0.01|N|O|1995-07-29|1995-07-30|1995-08-18|TAKE BACK RETURN|REG AIR|carefully even excuses against t 10535|103719|3720|4|22|37899.62|0.02|0.03|N|O|1995-09-22|1995-07-01|1995-09-27|NONE|FOB|e carefully final ideas kindle 10535|191146|8704|5|40|49485.60|0.00|0.03|N|O|1995-09-04|1995-08-14|1995-09-11|TAKE BACK RETURN|RAIL|y ironic asymptotes should sleep after 10535|185224|5225|6|20|26184.40|0.01|0.04|N|O|1995-09-02|1995-08-15|1995-09-15|DELIVER IN PERSON|MAIL|d packages haggle f 10535|44607|9616|7|11|17067.60|0.01|0.01|N|O|1995-07-09|1995-07-27|1995-07-20|COLLECT COD|RAIL|even asymptotes bo 10560|101574|9105|1|8|12604.56|0.08|0.00|N|O|1997-10-08|1997-10-19|1997-10-31|DELIVER IN PERSON|MAIL| slyly final ideas sleep 10560|138597|3624|2|49|80143.91|0.07|0.06|N|O|1997-11-27|1997-09-07|1997-12-25|DELIVER IN PERSON|AIR|ously bold sauternes boost among the fin 10560|197713|7714|3|8|14485.68|0.04|0.07|N|O|1997-08-08|1997-10-24|1997-09-04|NONE|MAIL|ss the blithely unusual foxes snooze 10560|156618|9134|4|18|30142.98|0.10|0.06|N|O|1997-08-07|1997-10-17|1997-08-15|TAKE BACK RETURN|RAIL|nding asymptotes boost fluffily alo 10560|70202|7724|5|6|7033.20|0.01|0.07|N|O|1997-10-08|1997-10-02|1997-10-09|TAKE BACK RETURN|REG AIR|. carefully even 10560|73524|8539|6|38|56905.76|0.03|0.01|N|O|1997-10-05|1997-08-29|1997-11-02|DELIVER IN PERSON|SHIP|hinly final instruct 10560|108268|3289|7|5|6381.30|0.10|0.04|N|O|1997-10-03|1997-09-07|1997-10-12|NONE|MAIL|s requests are carefully final re 10561|55067|5068|1|30|30661.80|0.02|0.08|N|O|1997-06-18|1997-07-08|1997-06-22|COLLECT COD|SHIP|es. blithely pe 10561|94896|7406|2|25|47272.25|0.09|0.00|N|O|1997-07-24|1997-06-18|1997-07-29|TAKE BACK RETURN|FOB|riously. express, final theodolites 10561|102092|9623|3|44|48139.96|0.08|0.05|N|O|1997-05-16|1997-06-26|1997-06-12|COLLECT COD|RAIL| above the ironic asymptotes. 10561|43981|1494|4|30|57749.40|0.01|0.04|N|O|1997-07-28|1997-07-26|1997-08-18|NONE|AIR|y against the deposits. carefully fin 10561|173609|1161|5|31|52160.60|0.08|0.08|N|O|1997-06-06|1997-06-13|1997-06-30|NONE|TRUCK|se slyly. s 10562|143546|3547|1|37|58812.98|0.06|0.07|R|F|1994-12-21|1995-01-08|1995-01-12|COLLECT COD|FOB|ickly final packages cajole 10562|164233|9266|2|37|47997.51|0.05|0.04|R|F|1994-12-12|1995-01-07|1994-12-24|NONE|RAIL|regular pinto beans 10562|59124|9125|3|25|27078.00|0.09|0.02|R|F|1995-03-05|1995-01-18|1995-03-07|COLLECT COD|REG AIR| ironic instruction 10562|95120|139|4|13|14496.56|0.06|0.04|R|F|1995-02-05|1995-01-23|1995-02-26|TAKE BACK RETURN|SHIP|es boost across th 10562|76172|8680|5|46|52815.82|0.03|0.02|R|F|1995-03-06|1994-12-22|1995-03-17|TAKE BACK RETURN|TRUCK|es haggle ruthlessly. ca 10562|11713|6716|6|8|12997.68|0.10|0.00|A|F|1995-01-17|1995-01-10|1995-02-09|DELIVER IN PERSON|TRUCK|bold deposits. 10562|56831|1842|7|34|60786.22|0.02|0.02|A|F|1995-02-19|1995-01-30|1995-02-20|NONE|SHIP|carefully careful packages wake fluffily a 10563|147459|5002|1|12|18077.40|0.06|0.01|R|F|1994-01-04|1993-11-14|1994-02-01|DELIVER IN PERSON|SHIP|ithely unusual packages cajole blithe 10563|136884|6885|2|35|67230.80|0.10|0.06|R|F|1993-11-15|1993-12-21|1993-12-09|COLLECT COD|RAIL|equests inte 10563|199924|7482|3|46|93100.32|0.05|0.00|A|F|1993-11-11|1993-11-05|1993-11-18|COLLECT COD|MAIL|quests. bli 10563|110741|742|4|46|80580.04|0.07|0.07|R|F|1994-01-10|1993-11-02|1994-01-17|DELIVER IN PERSON|RAIL|quickly ironic asymptotes wake according 10564|161233|3750|1|13|16824.99|0.06|0.06|N|O|1996-03-31|1996-04-20|1996-04-19|TAKE BACK RETURN|AIR| regular deposits 10564|40151|2656|2|23|25096.45|0.04|0.03|N|O|1996-05-03|1996-05-15|1996-05-09|DELIVER IN PERSON|TRUCK|egular accounts use 10564|8647|3648|3|25|38891.00|0.00|0.06|N|O|1996-06-19|1996-05-26|1996-07-02|NONE|MAIL|raids-- carefully f 10564|114370|9393|4|2|2768.74|0.02|0.05|N|O|1996-03-19|1996-04-27|1996-03-31|COLLECT COD|AIR|ents wake furi 10565|86664|6665|1|11|18157.26|0.03|0.03|N|O|1997-08-26|1997-10-21|1997-09-21|NONE|MAIL|uests lose about the quickly bold 10565|151832|9378|2|15|28257.45|0.07|0.04|N|O|1997-10-07|1997-11-04|1997-10-14|COLLECT COD|AIR|y pending accounts. packag 10565|39431|4438|3|31|42483.33|0.01|0.05|N|O|1997-10-30|1997-10-17|1997-11-13|DELIVER IN PERSON|AIR|to the caref 10565|115167|5168|4|27|31918.32|0.01|0.06|N|O|1997-11-21|1997-11-07|1997-12-18|COLLECT COD|MAIL|s run idly carefully 10565|3992|3993|5|27|51191.73|0.01|0.06|N|O|1997-09-23|1997-10-05|1997-10-08|DELIVER IN PERSON|REG AIR|ites are car 10566|66288|8795|1|44|55188.32|0.01|0.04|N|O|1995-10-25|1995-11-18|1995-11-14|TAKE BACK RETURN|MAIL|en instructions doubt past t 10566|91056|3566|2|1|1047.05|0.01|0.03|N|O|1995-10-03|1995-11-04|1995-10-31|NONE|RAIL|sual theodolites. slyly regula 10566|166467|4016|3|18|27602.28|0.10|0.00|N|O|1995-10-13|1995-09-30|1995-11-11|COLLECT COD|RAIL|ic dependencies. furiousl 10566|8130|631|4|36|37372.68|0.01|0.02|N|O|1995-10-18|1995-10-13|1995-11-11|NONE|SHIP| slyly pending depths impress slyly ab 10566|28488|3493|5|9|12748.32|0.00|0.04|N|O|1995-09-22|1995-10-21|1995-10-16|NONE|RAIL|deposits wake 10566|191002|3522|6|41|44813.00|0.06|0.08|N|O|1995-11-10|1995-09-29|1995-11-23|COLLECT COD|RAIL| carefully even deposits s 10567|147068|2097|1|37|41257.22|0.05|0.01|A|F|1992-05-05|1992-06-07|1992-05-06|NONE|REG AIR|e furiously regular warthogs: special 10567|166044|6045|2|9|9990.36|0.02|0.07|A|F|1992-05-01|1992-06-07|1992-05-29|COLLECT COD|TRUCK|out the qui 10567|98368|878|3|22|30059.92|0.05|0.05|A|F|1992-04-10|1992-05-16|1992-04-26|TAKE BACK RETURN|MAIL|e fluffily e 10592|183486|8523|1|41|64348.68|0.10|0.07|N|O|1996-01-08|1995-12-14|1996-02-06|TAKE BACK RETURN|AIR|ronic, final accounts. fl 10592|37757|261|2|12|20337.00|0.03|0.08|N|O|1995-12-02|1996-02-02|1995-12-19|NONE|TRUCK|e fluffily bold accounts. blithely eve 10593|137824|5364|1|10|18618.20|0.03|0.02|R|F|1992-05-24|1992-03-23|1992-06-12|DELIVER IN PERSON|SHIP|are bravely quickly bold depos 10593|192667|2668|2|7|12317.62|0.08|0.05|R|F|1992-05-10|1992-04-24|1992-05-22|COLLECT COD|AIR|p the slyl 10593|140212|2727|3|36|45079.56|0.08|0.08|A|F|1992-03-03|1992-03-22|1992-03-15|COLLECT COD|SHIP|endencies can 10593|128732|3757|4|36|63386.28|0.01|0.01|R|F|1992-05-13|1992-03-20|1992-06-01|DELIVER IN PERSON|AIR|refully. carefully final accounts run car 10593|183907|8944|5|23|45790.70|0.05|0.08|A|F|1992-04-09|1992-04-18|1992-04-17|DELIVER IN PERSON|SHIP|sts are bold, i 10593|120929|5954|6|5|9749.60|0.09|0.00|A|F|1992-05-14|1992-04-20|1992-05-20|TAKE BACK RETURN|MAIL|er the furiously ironic a 10594|44310|1823|1|5|6271.55|0.03|0.02|N|O|1996-05-03|1996-05-18|1996-05-10|NONE|TRUCK|al foxes. final ideas among the slyly s 10595|26458|3965|1|19|26304.55|0.01|0.06|N|O|1998-03-17|1998-01-30|1998-03-19|TAKE BACK RETURN|SHIP|wake carefully at t 10596|163071|5588|1|42|47630.94|0.10|0.03|N|O|1997-05-31|1997-06-04|1997-06-06|TAKE BACK RETURN|TRUCK| the packages. ev 10596|130885|8425|2|19|36401.72|0.09|0.00|N|O|1997-06-22|1997-05-04|1997-06-25|DELIVER IN PERSON|MAIL|ins boost above the fina 10596|179212|1730|3|15|19368.15|0.03|0.04|N|O|1997-05-02|1997-06-25|1997-05-20|TAKE BACK RETURN|SHIP|gle accord 10597|116175|6176|1|23|27396.91|0.07|0.08|N|O|1996-12-24|1996-10-28|1997-01-19|NONE|FOB|ly regular ideas ought to dazzle 10597|141365|1366|2|27|37971.72|0.01|0.07|N|O|1996-12-01|1996-10-22|1996-12-19|COLLECT COD|TRUCK|y carefully u 10597|108343|8344|3|6|8108.04|0.06|0.03|N|O|1996-09-24|1996-10-30|1996-10-03|DELIVER IN PERSON|AIR|iously final depths boost bold requests. d 10598|28544|3549|1|16|23560.64|0.03|0.05|R|F|1992-06-02|1992-04-07|1992-07-02|NONE|AIR|ructions. fluffily bold requests abo 10599|10655|8159|1|42|65757.30|0.02|0.07|N|O|1997-08-11|1997-08-26|1997-08-31|TAKE BACK RETURN|TRUCK|l packages near the pending, even re 10599|69140|9141|2|16|17746.24|0.00|0.04|N|O|1997-08-27|1997-09-19|1997-08-31|DELIVER IN PERSON|RAIL|blithely ironic theodolites use against the 10599|30560|5567|3|24|35773.44|0.00|0.06|N|O|1997-10-24|1997-09-22|1997-10-25|NONE|RAIL|s. quickly special dep 10599|18894|3897|4|40|72515.60|0.09|0.04|N|O|1997-07-30|1997-09-21|1997-08-13|COLLECT COD|SHIP| slyly regular packages. regular fra 10624|105112|133|1|23|25693.53|0.06|0.04|R|F|1995-02-09|1995-02-23|1995-03-09|COLLECT COD|AIR|thely regular requests nag furi 10624|11243|8747|2|46|53095.04|0.03|0.05|A|F|1995-03-13|1995-02-07|1995-03-20|COLLECT COD|AIR|inal packages after 10624|154730|7246|3|22|39264.06|0.02|0.05|A|F|1995-01-19|1995-03-08|1995-01-23|COLLECT COD|REG AIR|yly regular foxes boost d 10624|147063|7064|4|24|26641.44|0.10|0.07|A|F|1995-04-16|1995-02-19|1995-05-01|NONE|RAIL|e the ideas. carefully ironic p 10624|75349|2871|5|30|39730.20|0.09|0.05|A|F|1995-02-18|1995-03-07|1995-03-18|COLLECT COD|REG AIR|slyly alongside of the slyly 10624|185864|5865|6|7|13649.02|0.10|0.02|A|F|1994-12-24|1995-03-04|1995-01-07|TAKE BACK RETURN|TRUCK|beans. pending, regular 10625|88855|8856|1|20|36877.00|0.09|0.01|N|O|1996-02-14|1996-03-20|1996-03-06|TAKE BACK RETURN|AIR|o beans above the even, even ideas c 10625|180842|843|2|13|24996.92|0.03|0.00|N|O|1996-02-07|1996-03-24|1996-03-08|DELIVER IN PERSON|REG AIR| fluffily bold packages. fluf 10625|196134|8654|3|46|56585.98|0.03|0.01|N|O|1996-03-01|1996-03-27|1996-03-14|COLLECT COD|MAIL|s integrate furiousl 10625|11655|1656|4|7|10966.55|0.07|0.02|N|O|1996-02-15|1996-04-20|1996-03-12|COLLECT COD|MAIL|ly bold ideas wake fi 10625|81872|4381|5|12|22246.44|0.03|0.08|N|O|1996-02-02|1996-03-06|1996-02-19|COLLECT COD|MAIL|luffy packages. furiously bold 10625|11654|4156|6|12|18787.80|0.10|0.04|N|O|1996-02-15|1996-03-22|1996-02-28|TAKE BACK RETURN|SHIP|p along the 10625|61719|9238|7|35|58824.85|0.04|0.08|N|O|1996-03-20|1996-03-22|1996-04-11|NONE|AIR| above the furiously final deposits. de 10626|101850|1851|1|21|38888.85|0.04|0.03|N|O|1995-12-26|1996-01-28|1996-01-11|DELIVER IN PERSON|TRUCK|o the furious 10627|174246|1798|1|24|31685.76|0.01|0.03|R|F|1993-02-26|1993-04-30|1993-03-20|DELIVER IN PERSON|TRUCK|quickly bold forges. e 10627|147002|4545|2|27|28323.00|0.08|0.02|A|F|1993-03-16|1993-05-19|1993-03-29|DELIVER IN PERSON|MAIL|at the cou 10627|56397|1408|3|41|55488.99|0.06|0.02|R|F|1993-03-11|1993-05-18|1993-04-07|COLLECT COD|RAIL|sits unwind 10627|49844|4853|4|28|50227.52|0.01|0.08|A|F|1993-04-06|1993-04-10|1993-04-20|TAKE BACK RETURN|SHIP|e even pinto b 10627|120630|8167|5|35|57772.05|0.09|0.02|R|F|1993-04-08|1993-03-30|1993-05-06|DELIVER IN PERSON|RAIL|ily final theodolites sle 10628|48662|6175|1|4|6442.64|0.01|0.04|N|O|1998-08-16|1998-08-08|1998-09-08|COLLECT COD|REG AIR|regularly b 10629|110716|5739|1|24|41441.04|0.04|0.08|R|F|1994-11-03|1994-10-23|1994-11-16|TAKE BACK RETURN|SHIP|dogged requests. ir 10629|193362|3363|2|16|23285.76|0.00|0.02|A|F|1994-12-03|1994-11-23|1994-12-24|COLLECT COD|MAIL|ounts haggle bl 10630|144651|9680|1|29|49173.85|0.10|0.01|R|F|1993-04-23|1993-04-10|1993-05-11|NONE|AIR|deposits boo 10630|146897|6898|2|15|29158.35|0.10|0.00|R|F|1993-04-03|1993-04-23|1993-04-20|NONE|RAIL|y. slyly furious foxes ought to are bli 10630|84699|7208|3|37|62296.53|0.06|0.08|A|F|1993-03-04|1993-04-09|1993-03-26|NONE|TRUCK|l, special instruc 10630|130438|5465|4|20|29368.60|0.08|0.05|R|F|1993-05-16|1993-04-01|1993-06-02|DELIVER IN PERSON|FOB|ld platelets wake blithely after the 10631|75273|5274|1|45|56172.15|0.08|0.03|A|F|1993-07-24|1993-07-15|1993-08-08|NONE|MAIL| regular ideas s 10631|103443|8464|2|10|14464.40|0.05|0.02|A|F|1993-07-07|1993-07-29|1993-07-09|COLLECT COD|REG AIR|pliers boos 10631|154005|4006|3|36|38124.00|0.08|0.00|R|F|1993-08-16|1993-07-19|1993-09-14|NONE|AIR|ve the Tiresias integrate against the 10631|156366|8882|4|23|32714.28|0.09|0.01|R|F|1993-08-11|1993-07-25|1993-09-10|COLLECT COD|REG AIR|kly? fluffily final foxes wake b 10631|28875|8876|5|24|43292.88|0.08|0.01|A|F|1993-06-20|1993-08-09|1993-07-17|TAKE BACK RETURN|SHIP|ns use sly 10631|89379|4396|6|41|56103.17|0.02|0.02|A|F|1993-08-02|1993-07-07|1993-08-13|TAKE BACK RETURN|MAIL|ding, special theod 10656|198104|5662|1|47|56498.70|0.05|0.00|N|O|1995-11-06|1996-01-23|1995-11-16|NONE|TRUCK|o beans. furiously regular foxes 10656|118013|5547|2|11|11341.11|0.08|0.03|N|O|1995-12-02|1995-12-30|1995-12-10|TAKE BACK RETURN|SHIP|arefully regular deposits. even co 10656|31955|6962|3|1|1886.95|0.00|0.05|N|O|1995-12-27|1995-12-03|1996-01-07|DELIVER IN PERSON|RAIL|ck, unusual accounts. iron 10657|106396|3927|1|20|28047.80|0.03|0.04|A|F|1993-11-16|1993-12-09|1993-11-30|TAKE BACK RETURN|TRUCK|s. carefully silent courts arou 10657|192434|9992|2|29|44266.47|0.01|0.06|A|F|1993-11-12|1993-11-14|1993-12-06|NONE|AIR|gular accounts. ironic package 10657|37229|4739|3|13|15160.86|0.06|0.03|R|F|1993-12-01|1993-12-05|1993-12-07|TAKE BACK RETURN|TRUCK|lithely quickly brave request 10658|86342|1359|1|19|25238.46|0.09|0.00|R|F|1994-10-15|1994-10-05|1994-10-24|NONE|SHIP|osits cajole slyly around the bold dec 10658|136785|4325|2|2|3643.56|0.00|0.04|R|F|1994-12-01|1994-11-04|1994-12-30|NONE|FOB|totes sleep quickly over the carefu 10658|133198|3199|3|6|7387.14|0.01|0.08|R|F|1994-09-28|1994-10-19|1994-10-12|COLLECT COD|SHIP| sleep according to the fluffily ironic a 10658|55072|2588|4|2|2054.14|0.06|0.00|A|F|1994-12-02|1994-10-17|1994-12-14|COLLECT COD|TRUCK|de of the deposits. blithely busy asymptote 10658|186744|1781|5|3|5492.22|0.05|0.01|A|F|1994-11-17|1994-11-02|1994-11-22|TAKE BACK RETURN|TRUCK| requests use pending, final fr 10658|3266|3267|6|37|43262.62|0.06|0.08|A|F|1994-09-21|1994-09-30|1994-09-27|NONE|FOB| deposits. quickly reg 10658|76921|1936|7|21|39856.32|0.07|0.04|R|F|1994-12-06|1994-11-08|1994-12-15|COLLECT COD|AIR|theodolites us 10659|199827|7385|1|6|11560.92|0.08|0.06|R|F|1994-08-28|1994-07-12|1994-09-24|DELIVER IN PERSON|SHIP|slyly even re 10659|86957|1974|2|41|79701.95|0.10|0.00|R|F|1994-07-01|1994-06-16|1994-07-15|TAKE BACK RETURN|TRUCK| slyly final asymptotes 10659|77721|2736|3|26|44166.72|0.03|0.05|R|F|1994-05-19|1994-06-17|1994-06-01|TAKE BACK RETURN|MAIL| slyly. final, special accounts det 10659|49753|7266|4|15|25541.25|0.09|0.01|R|F|1994-08-14|1994-07-17|1994-08-23|DELIVER IN PERSON|SHIP|kages solve c 10659|184777|9814|5|41|76332.57|0.00|0.07|A|F|1994-08-21|1994-07-16|1994-09-19|NONE|AIR|rays across t 10659|145884|8399|6|49|94564.12|0.04|0.00|R|F|1994-08-10|1994-08-02|1994-09-08|COLLECT COD|MAIL|es believe alongside of the furi 10659|64008|4009|7|6|5832.00|0.00|0.06|R|F|1994-08-07|1994-07-09|1994-08-11|DELIVER IN PERSON|SHIP|s cajole. fluffily bold theodolites along 10660|179853|9854|1|49|94709.65|0.09|0.04|N|O|1998-08-22|1998-06-27|1998-09-07|TAKE BACK RETURN|SHIP|g furiously. carefully pending depo 10660|178373|5925|2|10|14513.70|0.05|0.03|N|O|1998-07-02|1998-07-14|1998-07-18|COLLECT COD|REG AIR|efully ruthles 10660|99940|7468|3|48|93117.12|0.10|0.02|N|O|1998-07-24|1998-06-28|1998-08-13|TAKE BACK RETURN|AIR|ilent requests. fu 10661|156228|6229|1|33|42379.26|0.09|0.01|R|F|1994-05-03|1994-03-08|1994-05-06|TAKE BACK RETURN|FOB|ly above the ne 10661|108975|1486|2|16|31743.52|0.00|0.08|R|F|1994-02-16|1994-03-17|1994-02-20|DELIVER IN PERSON|RAIL| regular packages. even, quiet 10661|126679|9192|3|27|46053.09|0.09|0.00|A|F|1994-01-27|1994-03-23|1994-02-08|COLLECT COD|REG AIR|ns. ironically fin 10662|46431|8936|1|4|5509.72|0.10|0.01|N|O|1995-07-15|1995-07-14|1995-07-24|TAKE BACK RETURN|AIR| accounts boost above the depos 10662|152860|5376|2|41|78427.26|0.03|0.04|N|O|1995-07-15|1995-06-26|1995-08-13|NONE|RAIL| regular pinto beans nag express the 10662|110535|3047|3|39|60275.67|0.01|0.02|A|F|1995-05-26|1995-07-07|1995-06-09|NONE|TRUCK|ackages. final packages do 10662|148822|8823|4|39|72961.98|0.07|0.06|R|F|1995-05-26|1995-06-26|1995-05-28|DELIVER IN PERSON|AIR|ss the blithely s 10663|7746|5247|1|27|44650.98|0.09|0.03|R|F|1994-06-09|1994-08-16|1994-07-08|DELIVER IN PERSON|AIR|se besides the regular pinto beans. slyly 10663|178586|6138|2|35|58260.30|0.07|0.01|R|F|1994-07-20|1994-07-21|1994-08-01|TAKE BACK RETURN|MAIL|ounts cajole stealthily agains 10663|90271|272|3|25|31531.75|0.10|0.00|A|F|1994-05-29|1994-07-05|1994-06-27|NONE|AIR|ajole furiously express asymptotes. care 10663|176069|1104|4|19|21756.14|0.00|0.04|A|F|1994-07-13|1994-07-29|1994-08-06|DELIVER IN PERSON|MAIL|c requests around the carefully 10688|45037|7542|1|16|15712.48|0.05|0.01|A|F|1992-06-19|1992-07-31|1992-07-06|TAKE BACK RETURN|AIR|ing instructions nag according t 10688|132574|2575|2|19|30524.83|0.09|0.07|R|F|1992-06-16|1992-06-27|1992-07-05|TAKE BACK RETURN|MAIL|to beans solve blithely quickly permane 10689|137029|2056|1|26|27716.52|0.08|0.08|N|O|1996-09-09|1996-08-25|1996-09-21|COLLECT COD|FOB|y. regular, busy requests us 10689|39420|4427|2|11|14953.62|0.09|0.07|N|O|1996-07-25|1996-10-17|1996-08-20|TAKE BACK RETURN|RAIL| furiously. pending depen 10689|128688|8689|3|47|80683.96|0.06|0.00|N|O|1996-08-15|1996-09-21|1996-09-02|TAKE BACK RETURN|REG AIR|uctions are. blithely speci 10690|150523|3039|1|14|22029.28|0.05|0.07|N|O|1997-12-19|1998-01-22|1997-12-31|NONE|REG AIR| fluffily regular deposits are 10690|34397|9404|2|27|35947.53|0.08|0.06|N|O|1998-01-06|1998-01-26|1998-01-23|DELIVER IN PERSON|MAIL|lent foxes wake busily. blith 10690|98521|1031|3|1|1519.52|0.00|0.05|N|O|1998-03-22|1998-02-21|1998-04-04|TAKE BACK RETURN|REG AIR|al accounts! fl 10691|154742|9773|1|10|17967.40|0.04|0.00|R|F|1995-04-11|1995-05-16|1995-05-10|DELIVER IN PERSON|MAIL|pecial deposits cajole 10691|148690|8691|2|18|31296.42|0.05|0.08|A|F|1995-05-14|1995-05-05|1995-06-06|TAKE BACK RETURN|MAIL|nal deposits sleep ir 10691|30770|5777|3|43|73133.11|0.10|0.02|R|F|1995-06-02|1995-05-16|1995-06-17|DELIVER IN PERSON|SHIP|c deposits. furiously expre 10692|187763|5318|1|1|1850.76|0.09|0.03|A|F|1994-08-27|1994-09-13|1994-09-13|NONE|REG AIR|kages. quickly ironic packages n 10692|131810|4324|2|3|5525.43|0.07|0.06|R|F|1994-10-04|1994-09-19|1994-10-09|DELIVER IN PERSON|AIR|he fluffy, unusual accounts. blithel 10692|123457|994|3|49|72542.05|0.03|0.06|A|F|1994-08-14|1994-09-08|1994-08-19|TAKE BACK RETURN|REG AIR|thely brave deposits sleep b 10692|130855|856|4|7|13200.95|0.07|0.01|A|F|1994-08-27|1994-10-13|1994-09-22|COLLECT COD|REG AIR|sits. carefully unusual requests coul 10692|72340|4848|5|28|36745.52|0.02|0.00|A|F|1994-08-06|1994-09-25|1994-08-30|DELIVER IN PERSON|TRUCK|ng ideas use 10692|18077|579|6|40|39802.80|0.05|0.05|A|F|1994-07-29|1994-08-30|1994-08-11|DELIVER IN PERSON|SHIP|yly careful pinto beans 10693|145215|244|1|35|44107.35|0.10|0.02|N|F|1995-06-15|1995-07-14|1995-07-10|COLLECT COD|FOB|ng the special accounts nag f 10694|180562|8117|1|19|31208.64|0.05|0.03|N|O|1998-01-22|1998-02-10|1998-01-28|TAKE BACK RETURN|TRUCK|e regular, express instruction 10694|100524|525|2|11|16769.72|0.08|0.03|N|O|1998-03-07|1998-02-26|1998-03-23|TAKE BACK RETURN|AIR|ully ironic notornis are ironic, special f 10694|90290|2800|3|32|40969.28|0.08|0.04|N|O|1997-12-24|1998-01-17|1998-01-19|COLLECT COD|AIR| blithely i 10694|89899|2408|4|15|28333.35|0.07|0.03|N|O|1997-12-25|1998-02-24|1998-01-06|TAKE BACK RETURN|REG AIR| detect bl 10694|116054|3588|5|38|40661.90|0.00|0.05|N|O|1998-01-03|1998-01-19|1998-01-30|COLLECT COD|REG AIR|ely regular accounts. care 10694|103987|9008|6|8|15927.84|0.03|0.08|N|O|1998-03-08|1998-01-17|1998-03-25|NONE|FOB|cording to the blithel 10694|101951|4462|7|12|23435.40|0.07|0.03|N|O|1998-01-05|1998-02-06|1998-01-12|DELIVER IN PERSON|FOB|furiously even deposits sleep among t 10695|78490|998|1|5|7342.45|0.09|0.02|N|O|1995-07-05|1995-07-07|1995-07-28|COLLECT COD|AIR|lar pinto beans. final 10695|176986|2021|2|39|80456.22|0.06|0.01|N|F|1995-06-02|1995-06-20|1995-06-23|TAKE BACK RETURN|MAIL|ect. slyly ironic foxes according to the f 10695|199879|2399|3|9|17809.83|0.05|0.01|R|F|1995-05-28|1995-07-14|1995-06-16|NONE|REG AIR|sly. ironic sauternes across the 10695|156999|9515|4|5|10279.95|0.02|0.03|N|O|1995-07-31|1995-07-29|1995-08-02|TAKE BACK RETURN|MAIL|lent asymptotes cajole carefully accordin 10720|14133|9136|1|9|9424.17|0.03|0.06|N|O|1998-04-19|1998-06-07|1998-04-21|TAKE BACK RETURN|MAIL|haggle furiously around t 10720|118834|8835|2|32|59290.56|0.05|0.03|N|O|1998-07-29|1998-06-24|1998-08-05|TAKE BACK RETURN|SHIP|ependencies. carefully final theodo 10720|35748|8252|3|7|11786.18|0.01|0.03|N|O|1998-04-25|1998-06-10|1998-04-29|DELIVER IN PERSON|SHIP|iously bold dolphins. regular, regular 10720|11217|8721|4|20|22564.20|0.09|0.07|N|O|1998-06-28|1998-05-25|1998-07-23|DELIVER IN PERSON|RAIL|omise furiously express instr 10720|122784|5297|5|15|27101.70|0.06|0.06|N|O|1998-05-05|1998-06-11|1998-05-22|NONE|AIR|quests sleep. ironic theodo 10721|134703|9730|1|25|43442.50|0.02|0.08|N|O|1996-04-18|1996-05-09|1996-04-28|DELIVER IN PERSON|AIR|nto beans. careful 10721|141279|3794|2|11|14522.97|0.05|0.00|N|O|1996-05-22|1996-04-18|1996-06-01|NONE|RAIL|riously regular theodolites. slyl 10721|68611|6130|3|39|61604.79|0.05|0.03|N|O|1996-06-22|1996-04-26|1996-07-09|DELIVER IN PERSON|RAIL|lets. slyly regular 10722|71795|9317|1|27|47703.33|0.00|0.08|R|F|1995-04-26|1995-02-23|1995-05-13|NONE|REG AIR|s are. carefully special excuses 10722|48845|3854|2|30|53815.20|0.08|0.05|A|F|1995-02-26|1995-03-29|1995-03-24|COLLECT COD|TRUCK|atelets: ironic, pe 10722|127239|7240|3|48|60779.04|0.04|0.00|R|F|1995-03-04|1995-03-23|1995-03-17|NONE|TRUCK| quickly. platelets promise furiously at 10722|101748|6769|4|7|12248.18|0.06|0.03|R|F|1995-05-10|1995-04-13|1995-05-16|COLLECT COD|FOB|urts wake. blith 10722|65968|5969|5|43|83160.28|0.04|0.01|R|F|1995-02-25|1995-03-01|1995-03-04|DELIVER IN PERSON|AIR|he dependencies. furiously eve 10722|113068|5580|6|6|6486.36|0.09|0.03|R|F|1995-02-12|1995-04-02|1995-02-21|NONE|FOB|eas. carefully special deposits after the 10723|90356|5375|1|23|30966.05|0.06|0.05|N|O|1998-07-25|1998-06-30|1998-08-11|TAKE BACK RETURN|REG AIR|counts sleep blithely silent re 10723|11345|6348|2|27|33921.18|0.03|0.06|N|O|1998-07-07|1998-08-08|1998-07-13|TAKE BACK RETURN|TRUCK|jole carefully. deposits wake slyly. unusua 10724|60572|573|1|38|58237.66|0.02|0.08|A|F|1994-10-30|1994-11-06|1994-11-12|NONE|FOB|l, bold packages are sl 10724|25371|376|2|14|18149.18|0.00|0.08|A|F|1994-09-21|1994-11-24|1994-10-05|COLLECT COD|SHIP|he depths detect s 10724|195983|1022|3|47|97712.06|0.10|0.08|A|F|1995-01-04|1994-11-02|1995-01-14|COLLECT COD|MAIL|. furiously final asymptotes afte 10724|75024|5025|4|32|31968.64|0.01|0.01|R|F|1994-11-28|1994-10-17|1994-12-28|DELIVER IN PERSON|MAIL|al dolphins cajole busily. regular asympto 10725|101123|8654|1|28|31475.36|0.01|0.07|N|O|1998-05-14|1998-06-10|1998-06-05|NONE|MAIL|nstructions. always regular requ 10725|171672|4190|2|28|48822.76|0.09|0.00|N|O|1998-05-19|1998-06-29|1998-06-09|COLLECT COD|SHIP| furiously final notornis wake carefully 10725|87247|7248|3|5|6171.20|0.01|0.01|N|O|1998-07-07|1998-05-22|1998-07-09|NONE|FOB|accounts affix 10726|167880|7881|1|17|33113.96|0.01|0.04|A|F|1993-06-25|1993-07-02|1993-07-14|DELIVER IN PERSON|REG AIR|unts integrate fluffily alongs 10726|143391|3392|2|10|14343.90|0.01|0.01|A|F|1993-05-14|1993-06-11|1993-05-16|COLLECT COD|SHIP|. ironic, ironic a 10726|172108|4626|3|26|30682.60|0.08|0.06|A|F|1993-05-17|1993-06-10|1993-06-05|DELIVER IN PERSON|RAIL|ely final i 10726|17540|5044|4|48|69961.92|0.09|0.02|R|F|1993-06-24|1993-06-12|1993-07-21|DELIVER IN PERSON|AIR| across the quickly 10726|189988|7543|5|13|27013.74|0.02|0.05|A|F|1993-06-20|1993-07-15|1993-07-07|NONE|REG AIR|ze slyly against the fu 10726|117878|390|6|48|91001.76|0.05|0.08|A|F|1993-06-13|1993-06-12|1993-07-10|DELIVER IN PERSON|SHIP|ironic accounts. si 10726|171613|6648|7|5|8423.05|0.05|0.03|R|F|1993-05-31|1993-07-07|1993-06-10|COLLECT COD|FOB|jole above the regular packages. ent 10727|98482|3501|1|34|50336.32|0.08|0.07|A|F|1992-10-10|1992-10-23|1992-11-07|TAKE BACK RETURN|MAIL|ake requests. 10727|182390|9945|2|38|55950.82|0.07|0.02|R|F|1992-11-22|1992-10-03|1992-11-29|DELIVER IN PERSON|RAIL|nts haggle slyly theodolites. quickly fina 10727|27198|7199|3|17|19128.23|0.02|0.07|R|F|1992-08-13|1992-10-28|1992-08-14|COLLECT COD|REG AIR|y furiously at the blithely pending as 10727|113125|659|4|22|25038.64|0.10|0.07|R|F|1992-08-29|1992-09-21|1992-09-01|TAKE BACK RETURN|REG AIR|unusual platelets. fluffi 10727|84834|4835|5|48|87303.84|0.09|0.00|A|F|1992-08-06|1992-09-26|1992-09-03|TAKE BACK RETURN|RAIL|ct carefully. silent 10727|189781|2300|6|20|37415.60|0.07|0.00|R|F|1992-10-13|1992-09-04|1992-11-07|COLLECT COD|MAIL| quickly. carefully final accounts wake fur 10727|39486|6996|7|27|38487.96|0.07|0.03|A|F|1992-11-08|1992-09-08|1992-11-16|DELIVER IN PERSON|MAIL|usual packages. furiously 10752|33055|3056|1|39|38533.95|0.03|0.02|N|O|1995-09-27|1995-12-09|1995-10-15|COLLECT COD|MAIL|y alongside of the even fo 10752|139938|4965|2|49|96918.57|0.08|0.02|N|O|1995-10-20|1995-11-02|1995-11-18|COLLECT COD|SHIP|sly special deposits af 10752|132778|318|3|27|48890.79|0.09|0.01|N|O|1995-09-25|1995-11-28|1995-09-26|NONE|AIR|ular requests. 10752|185813|850|4|50|94940.50|0.06|0.04|N|O|1995-11-08|1995-11-30|1995-12-01|DELIVER IN PERSON|SHIP|aids at the quickly final foxes maintai 10753|12535|7538|1|22|31845.66|0.08|0.00|A|F|1994-08-14|1994-07-29|1994-08-19|COLLECT COD|TRUCK|ccounts. instructi 10754|116553|1576|1|46|72199.30|0.10|0.03|R|F|1993-06-16|1993-04-25|1993-06-29|DELIVER IN PERSON|AIR|kly across the 10754|90299|5318|2|34|43835.86|0.03|0.05|R|F|1993-07-11|1993-06-09|1993-08-09|DELIVER IN PERSON|REG AIR|e carefully against 10754|40069|7582|3|14|14126.84|0.03|0.03|A|F|1993-04-18|1993-05-13|1993-05-01|COLLECT COD|MAIL|y ironic ideas. bold packages n 10754|36825|1832|4|34|59901.88|0.03|0.06|R|F|1993-03-31|1993-05-01|1993-04-22|NONE|FOB|gular accounts. regular courts above the c 10754|107788|5319|5|27|48486.06|0.08|0.01|A|F|1993-06-09|1993-05-18|1993-06-11|DELIVER IN PERSON|FOB| the blithely regular requests 10754|188952|3989|6|21|42859.95|0.10|0.03|A|F|1993-06-30|1993-04-22|1993-07-30|COLLECT COD|TRUCK|furiously ironic 10755|122062|7087|1|48|52034.88|0.04|0.04|R|F|1993-08-22|1993-09-22|1993-09-13|DELIVER IN PERSON|RAIL|ress dependencies engage slyly ab 10755|82566|91|2|6|9291.36|0.08|0.08|A|F|1993-08-18|1993-09-05|1993-09-09|COLLECT COD|SHIP|ng foxes cajole carefu 10755|77413|4935|3|9|12513.69|0.06|0.05|A|F|1993-10-06|1993-09-25|1993-10-18|NONE|SHIP|sits. silent packages wake furiously 10755|74129|1651|4|34|37506.08|0.07|0.04|A|F|1993-07-17|1993-09-12|1993-08-09|NONE|SHIP|iously spe 10756|115702|8214|1|39|66990.30|0.06|0.05|A|F|1992-06-23|1992-07-14|1992-06-27|TAKE BACK RETURN|SHIP|slowly regular or 10757|96764|4292|1|39|68669.64|0.04|0.07|N|O|1998-09-04|1998-07-18|1998-09-16|NONE|REG AIR|sits cajole sile 10757|105991|1012|2|19|37942.81|0.07|0.05|N|O|1998-07-21|1998-08-26|1998-08-18|TAKE BACK RETURN|RAIL|packages use instructions. pi 10758|52776|2777|1|34|58778.18|0.05|0.08|N|O|1997-01-26|1997-01-28|1997-02-24|COLLECT COD|FOB|counts again 10758|193504|1062|2|17|27157.50|0.01|0.00|N|O|1997-02-19|1996-12-17|1997-03-02|DELIVER IN PERSON|RAIL|ly asymptotes. furiously regular pearls 10758|14207|4208|3|2|2242.40|0.06|0.07|N|O|1997-01-29|1997-01-18|1997-01-30|DELIVER IN PERSON|FOB|s. pinto beans u 10758|170856|3374|4|33|63586.05|0.03|0.04|N|O|1997-01-24|1996-12-28|1997-01-29|NONE|AIR|y against the furiously ironic ac 10759|150941|3457|1|10|19919.40|0.05|0.07|N|O|1996-10-18|1996-11-16|1996-11-02|DELIVER IN PERSON|TRUCK|ts thrash blithely among th 10759|62932|7945|2|26|49268.18|0.01|0.02|N|O|1996-10-06|1996-12-16|1996-10-25|TAKE BACK RETURN|RAIL|requests. s 10784|158100|8101|1|31|35901.10|0.03|0.01|A|F|1993-10-30|1993-09-13|1993-11-28|DELIVER IN PERSON|AIR| deposits nag slyl 10784|174365|6883|2|9|12954.24|0.00|0.06|R|F|1993-09-08|1993-09-11|1993-09-30|COLLECT COD|RAIL|nto beans cajole blithely ironic, si 10784|138262|5802|3|20|26005.20|0.03|0.04|A|F|1993-10-28|1993-09-27|1993-11-05|NONE|SHIP|azzle across the blithely spe 10784|163219|3220|4|42|53852.82|0.09|0.00|A|F|1993-10-15|1993-08-13|1993-11-14|COLLECT COD|FOB|lyly even requests boost quick 10784|9728|4729|5|8|13101.76|0.08|0.03|A|F|1993-09-04|1993-09-29|1993-09-30|COLLECT COD|FOB|sly special deposits wake fu 10784|90236|237|6|18|22072.14|0.05|0.03|A|F|1993-07-20|1993-09-21|1993-07-21|NONE|REG AIR|y. fluffily sp 10785|155114|5115|1|1|1169.11|0.03|0.07|R|F|1992-09-08|1992-08-14|1992-09-16|TAKE BACK RETURN|MAIL| instructions. regular, sile 10786|47961|2970|1|36|68722.56|0.08|0.03|N|O|1995-10-30|1995-09-26|1995-11-14|COLLECT COD|MAIL|y even, final requests. regular, ironi 10786|78969|1477|2|14|27271.44|0.08|0.05|N|O|1995-10-07|1995-09-16|1995-10-19|TAKE BACK RETURN|SHIP|xcuses boost 10786|15782|8284|3|1|1697.78|0.08|0.03|N|O|1995-10-08|1995-11-02|1995-10-10|NONE|SHIP|t. foxes along the furiously spec 10787|66790|6791|1|29|50946.91|0.00|0.04|N|O|1997-05-01|1997-04-01|1997-05-08|NONE|TRUCK|e carefully according 10787|134138|6652|2|42|49229.46|0.06|0.02|N|O|1997-02-15|1997-05-05|1997-03-14|COLLECT COD|AIR|ts are quickly. eve 10787|53394|910|3|47|63327.33|0.01|0.07|N|O|1997-04-22|1997-03-19|1997-05-01|TAKE BACK RETURN|REG AIR|ffily bold waters wake express deposits; sp 10787|176351|8869|4|36|51384.60|0.10|0.07|N|O|1997-04-01|1997-04-11|1997-04-23|COLLECT COD|RAIL|st are. blithely even gifts along t 10787|115617|8129|5|47|76732.67|0.04|0.00|N|O|1997-06-06|1997-04-25|1997-06-17|COLLECT COD|FOB|eas. instructions wake blith 10787|167062|9579|6|30|33871.80|0.07|0.05|N|O|1997-05-16|1997-04-03|1997-06-07|DELIVER IN PERSON|MAIL|usual requests wake slyl 10787|197695|2734|7|28|50195.32|0.02|0.02|N|O|1997-04-30|1997-04-15|1997-05-06|NONE|AIR|ular pinto beans. carefu 10788|50814|8330|1|22|38825.82|0.00|0.06|N|O|1997-04-18|1997-03-18|1997-05-01|NONE|AIR|y final instructio 10788|15283|2787|2|27|32353.56|0.01|0.04|N|O|1997-03-06|1997-03-17|1997-03-28|NONE|REG AIR|pinto beans. ironic theo 10788|53407|923|3|26|35370.40|0.03|0.01|N|O|1997-03-06|1997-04-13|1997-03-28|NONE|AIR|dolphins. carefully enticing requests 10788|196473|6474|4|28|43945.16|0.08|0.02|N|O|1997-05-13|1997-04-10|1997-06-05|TAKE BACK RETURN|SHIP|ake carefully silently bold pa 10789|155580|3126|1|7|11449.06|0.08|0.00|R|F|1993-09-23|1993-10-24|1993-10-11|NONE|SHIP|y unusual theodolit 10789|17566|5070|2|36|53408.16|0.08|0.00|R|F|1993-08-25|1993-10-23|1993-09-11|TAKE BACK RETURN|MAIL|alongside of the furiously 10789|20873|8380|3|25|44846.75|0.09|0.06|A|F|1993-09-06|1993-10-31|1993-09-12|COLLECT COD|SHIP|tructions nag quickly. slyly silent forge 10789|9727|9728|4|9|14730.48|0.00|0.08|R|F|1993-12-11|1993-10-11|1993-12-16|NONE|TRUCK|endencies-- blithely f 10789|75990|5991|5|7|13761.93|0.00|0.07|A|F|1993-08-26|1993-11-11|1993-09-02|COLLECT COD|TRUCK|ncies above the final courts hag 10790|156577|6578|1|27|44106.39|0.09|0.00|A|F|1994-12-17|1995-01-23|1995-01-15|TAKE BACK RETURN|TRUCK|as. requests 10790|182698|2699|2|24|42736.56|0.03|0.03|R|F|1995-01-30|1995-01-29|1995-02-15|TAKE BACK RETURN|FOB|thely. bus 10791|28487|8488|1|35|49541.80|0.03|0.08|N|O|1995-09-21|1995-10-26|1995-10-04|DELIVER IN PERSON|SHIP|slyly unusual foxes wake entic 10791|174102|1654|2|24|28226.40|0.07|0.03|N|O|1995-10-08|1995-10-09|1995-10-24|COLLECT COD|AIR|furiously pending dependencies. pinto beans 10791|116160|3694|3|40|47046.40|0.06|0.03|N|O|1995-08-22|1995-11-02|1995-09-21|NONE|FOB|sly ironic packages. thin deposits ab 10791|92935|2936|4|23|44342.39|0.09|0.06|N|O|1995-10-29|1995-11-10|1995-11-02|NONE|TRUCK|efully above the ironic, re 10791|156949|6950|5|8|16047.52|0.10|0.02|N|O|1995-10-06|1995-11-12|1995-10-21|TAKE BACK RETURN|FOB|es about the quickly even 10816|121900|4413|1|22|42281.80|0.01|0.06|N|O|1996-02-17|1996-02-27|1996-03-10|NONE|MAIL|osits are slyly even packag 10816|160226|7775|2|14|18007.08|0.08|0.02|N|O|1996-04-25|1996-02-22|1996-05-17|NONE|FOB|ach furiously above the regular sentiments. 10817|22309|9816|1|4|4925.20|0.06|0.05|N|O|1997-02-26|1997-02-13|1997-03-06|TAKE BACK RETURN|FOB|lithely pending p 10817|137646|5186|2|39|65661.96|0.00|0.07|N|O|1997-02-23|1997-01-17|1997-02-28|TAKE BACK RETURN|FOB|re fluffily according to the 10817|1979|9480|3|43|80881.71|0.05|0.05|N|O|1997-04-06|1997-03-04|1997-04-25|DELIVER IN PERSON|REG AIR|ar accounts. express frets affix 10817|57844|5360|4|33|59460.72|0.03|0.04|N|O|1997-03-04|1997-02-19|1997-03-24|COLLECT COD|FOB|gular requests wake 10817|169203|4236|5|44|55976.80|0.07|0.03|N|O|1996-12-27|1997-01-08|1997-01-21|TAKE BACK RETURN|REG AIR|de of the fluffily final reque 10818|64694|2213|1|9|14928.21|0.03|0.08|N|O|1998-06-06|1998-07-22|1998-06-26|DELIVER IN PERSON|SHIP|es-- bravely regular platelets haggle care 10818|103934|1465|2|45|87206.85|0.08|0.08|N|O|1998-07-26|1998-07-28|1998-08-09|TAKE BACK RETURN|MAIL|heodolites sleep quic 10818|14819|2323|3|15|26007.15|0.02|0.08|N|O|1998-06-07|1998-06-26|1998-06-19|NONE|MAIL| furiously express a 10818|59744|9745|4|33|56223.42|0.06|0.03|N|O|1998-08-11|1998-07-30|1998-08-25|TAKE BACK RETURN|AIR|s across the regular pinto beans 10818|104747|7258|5|38|66566.12|0.06|0.05|N|O|1998-07-03|1998-06-20|1998-07-26|TAKE BACK RETURN|FOB|ven requests. bold, dogged senti 10818|73585|1107|6|50|77929.00|0.07|0.04|N|O|1998-06-05|1998-07-11|1998-07-03|NONE|RAIL|deas are slyly. even deposits 10818|138211|5751|7|8|9993.68|0.10|0.02|N|O|1998-08-23|1998-07-29|1998-09-13|COLLECT COD|MAIL|st furiously final instructions! 10819|96448|8958|1|12|17333.28|0.03|0.08|R|F|1993-11-02|1993-11-09|1993-11-04|TAKE BACK RETURN|RAIL|tithes. ironic, special ideas about the 10819|199691|4730|2|32|57302.08|0.07|0.00|A|F|1993-08-31|1993-10-30|1993-09-26|DELIVER IN PERSON|FOB|ecial theodolites. i 10820|71257|6272|1|28|34391.00|0.10|0.03|N|O|1996-04-14|1996-01-27|1996-05-01|TAKE BACK RETURN|TRUCK| carefully ironic instructions 10820|14646|4647|2|37|57743.68|0.09|0.03|N|O|1995-12-29|1996-02-08|1996-01-11|COLLECT COD|REG AIR|s. blithe dep 10820|149020|4049|3|24|25656.48|0.02|0.07|N|O|1996-03-17|1996-03-09|1996-04-09|NONE|TRUCK|rets. requests affix f 10820|170268|5303|4|26|34794.76|0.01|0.02|N|O|1996-03-15|1996-02-17|1996-03-26|DELIVER IN PERSON|FOB|l requests wake-- 10820|84755|4756|5|21|36534.75|0.01|0.04|N|O|1996-01-28|1996-02-28|1996-02-08|DELIVER IN PERSON|REG AIR|ly final theodol 10820|66887|4406|6|16|29662.08|0.05|0.01|N|O|1996-04-04|1996-03-16|1996-04-25|NONE|RAIL|st the fluffily even accoun 10820|154541|4542|7|2|3191.08|0.06|0.08|N|O|1996-03-20|1996-03-17|1996-04-15|COLLECT COD|RAIL|t to use furious 10821|163202|8235|1|46|58199.20|0.09|0.07|N|O|1996-07-18|1996-06-19|1996-07-23|COLLECT COD|MAIL|hely according to the sl 10821|124679|7192|2|47|80072.49|0.01|0.07|N|O|1996-07-31|1996-07-28|1996-08-28|DELIVER IN PERSON|MAIL| bold ideas. fluffily 10821|73531|6039|3|33|49649.49|0.05|0.05|N|O|1996-08-02|1996-07-11|1996-08-26|TAKE BACK RETURN|REG AIR|posits. final instructions about th 10822|98457|8458|1|10|14554.50|0.03|0.05|A|F|1994-02-23|1994-02-08|1994-03-09|TAKE BACK RETURN|RAIL|ully regular requests. theodolites amon 10823|67702|7703|1|32|53430.40|0.06|0.04|A|F|1992-07-03|1992-08-24|1992-07-17|TAKE BACK RETURN|AIR|s grow about the ideas. s 10823|62804|2805|2|40|70672.00|0.01|0.00|R|F|1992-09-01|1992-09-07|1992-09-21|NONE|SHIP|gular, regular 10848|69183|4196|1|41|47239.38|0.01|0.04|N|O|1997-01-31|1996-12-28|1997-02-12|TAKE BACK RETURN|TRUCK|usual excuses. 10849|21596|6601|1|33|50080.47|0.01|0.06|N|O|1997-05-08|1997-03-03|1997-05-20|COLLECT COD|TRUCK|refully expr 10849|158963|1479|2|38|76834.48|0.10|0.03|N|O|1997-05-26|1997-03-26|1997-06-03|COLLECT COD|MAIL|foxes. express fo 10849|87878|2895|3|2|3731.74|0.03|0.00|N|O|1997-04-16|1997-04-08|1997-05-16|COLLECT COD|RAIL|osits affix idly pending instructions 10849|158416|3447|4|50|73720.50|0.06|0.06|N|O|1997-02-01|1997-03-20|1997-02-24|NONE|AIR| unusual, thin sentiments. fu 10849|83457|982|5|50|72022.50|0.04|0.00|N|O|1997-05-13|1997-04-15|1997-05-17|TAKE BACK RETURN|MAIL| final pinto beans u 10849|59612|7128|6|39|61292.79|0.02|0.00|N|O|1997-04-09|1997-04-30|1997-04-16|DELIVER IN PERSON|REG AIR|requests are about t 10850|129760|7297|1|9|16107.84|0.06|0.02|N|O|1996-11-25|1996-12-19|1996-12-04|COLLECT COD|FOB|ular deposits. packages am 10850|60300|7819|2|1|1260.30|0.07|0.05|N|O|1996-12-11|1996-11-20|1997-01-09|DELIVER IN PERSON|REG AIR|ironic packages sleep 10850|47821|7822|3|15|26532.30|0.06|0.04|N|O|1996-11-23|1996-10-25|1996-12-02|TAKE BACK RETURN|SHIP|equests boost platelets. i 10850|122151|2152|4|41|48099.15|0.03|0.00|N|O|1996-11-29|1996-10-29|1996-12-12|NONE|MAIL| slyly silent excuses affix slyly accordin 10850|189336|9337|5|1|1425.33|0.04|0.02|N|O|1996-09-27|1996-11-24|1996-10-02|DELIVER IN PERSON|FOB|unts. quickly regular warthogs thras 10851|7478|9979|1|4|5541.88|0.06|0.04|R|F|1994-06-27|1994-06-12|1994-07-19|NONE|FOB|gular tithes. unusual deposits h 10851|69635|4648|2|33|52952.79|0.07|0.05|R|F|1994-06-16|1994-07-01|1994-07-08|DELIVER IN PERSON|AIR|tions should have to sleep fluffil 10851|145153|7668|3|18|21566.70|0.00|0.04|R|F|1994-05-01|1994-06-15|1994-05-09|TAKE BACK RETURN|TRUCK|uffily unusual platelets slee 10851|171030|3548|4|29|31929.87|0.01|0.02|A|F|1994-05-18|1994-06-26|1994-05-21|NONE|MAIL|le. ironic dependencie 10851|92333|4843|5|27|35783.91|0.05|0.04|R|F|1994-04-12|1994-06-14|1994-04-18|DELIVER IN PERSON|MAIL|furiously instructions. deposits a 10851|27053|7054|6|18|17640.90|0.04|0.05|R|F|1994-07-06|1994-07-01|1994-07-31|NONE|AIR|pendencies. slyly regular 10851|67006|4525|7|35|34055.00|0.00|0.07|R|F|1994-06-27|1994-06-04|1994-07-22|COLLECT COD|REG AIR|sly unusual dep 10852|159610|2126|1|31|51757.91|0.03|0.07|A|F|1994-01-15|1994-01-14|1994-02-09|TAKE BACK RETURN|MAIL| accounts haggle quickly. carefully br 10852|155297|2843|2|11|14875.19|0.10|0.00|R|F|1994-01-24|1993-12-01|1994-02-07|DELIVER IN PERSON|MAIL|packages wake 10852|74594|2116|3|39|61175.01|0.02|0.06|A|F|1993-12-03|1994-01-12|1993-12-07|DELIVER IN PERSON|MAIL|ly final request 10852|85429|2954|4|35|49504.70|0.07|0.00|A|F|1993-12-17|1993-11-26|1994-01-03|TAKE BACK RETURN|MAIL| according to the requests. careful 10852|196586|9106|5|43|72350.94|0.01|0.02|A|F|1993-12-10|1993-12-27|1993-12-26|TAKE BACK RETURN|RAIL|r courts? p 10852|13251|755|6|23|26777.75|0.03|0.01|A|F|1993-12-11|1993-12-29|1994-01-02|COLLECT COD|TRUCK|nts are. ev 10853|148996|1511|1|14|28629.86|0.02|0.04|R|F|1994-01-27|1994-03-28|1994-02-07|NONE|SHIP|regular packages. slyly bold asympt 10853|161854|4371|2|21|40232.85|0.03|0.02|A|F|1994-01-31|1994-04-01|1994-02-21|DELIVER IN PERSON|SHIP|ithely fluffi 10853|64203|4204|3|12|14006.40|0.10|0.04|A|F|1994-02-10|1994-03-19|1994-03-11|NONE|RAIL|eful platelets. even, final deposits cajo 10854|78461|5983|1|3|4318.38|0.08|0.05|N|O|1998-08-11|1998-07-30|1998-08-17|DELIVER IN PERSON|AIR|ing to the ironic, final foxes. special ac 10855|426|2927|1|38|50403.96|0.10|0.06|N|O|1997-07-23|1997-09-17|1997-08-22|COLLECT COD|RAIL|ross the slyly regular packages. even 10855|151223|1224|2|36|45871.92|0.08|0.00|N|O|1997-10-07|1997-08-17|1997-10-27|DELIVER IN PERSON|RAIL|old packages. idle depende 10855|1927|9428|3|46|84130.32|0.02|0.03|N|O|1997-10-15|1997-09-27|1997-10-18|COLLECT COD|RAIL|y silent packages cajole at the sly 10855|175713|748|4|11|19675.81|0.00|0.00|N|O|1997-09-24|1997-09-10|1997-10-01|COLLECT COD|REG AIR|ias. furiously bold instru 10855|3103|5604|5|27|27164.70|0.09|0.05|N|O|1997-09-17|1997-09-22|1997-09-23|TAKE BACK RETURN|REG AIR|ctions. quick 10855|152017|2018|6|8|8552.08|0.07|0.05|N|O|1997-08-09|1997-09-17|1997-08-26|TAKE BACK RETURN|TRUCK| hinder blithely. furiously special a 10855|140744|3259|7|41|73174.34|0.09|0.05|N|O|1997-09-21|1997-09-19|1997-10-04|COLLECT COD|AIR|lly regular request 10880|84170|6679|1|25|28854.25|0.09|0.01|R|F|1992-06-22|1992-07-31|1992-07-02|COLLECT COD|REG AIR|arefully alongside of the caref 10881|126670|1695|1|15|25450.05|0.00|0.08|R|F|1994-06-01|1994-03-29|1994-06-14|TAKE BACK RETURN|SHIP|en platelets haggle after th 10881|135021|2561|2|15|15840.30|0.09|0.00|A|F|1994-04-08|1994-04-27|1994-04-22|DELIVER IN PERSON|AIR|dugouts boost against the 10881|88337|8338|3|11|14578.63|0.06|0.07|A|F|1994-03-01|1994-04-11|1994-03-02|DELIVER IN PERSON|FOB|ending pinto 10881|79771|2279|4|19|33264.63|0.01|0.01|A|F|1994-02-24|1994-04-26|1994-03-08|TAKE BACK RETURN|REG AIR|es haggle slyly. carefully final exc 10881|97559|7560|5|32|49809.60|0.01|0.08|A|F|1994-05-19|1994-04-01|1994-05-29|COLLECT COD|FOB|beans print sl 10881|69594|4607|6|46|71925.14|0.02|0.01|R|F|1994-03-20|1994-04-02|1994-03-26|NONE|SHIP|sits wake quick 10881|51794|9310|7|7|12220.53|0.00|0.05|A|F|1994-06-09|1994-03-29|1994-06-25|COLLECT COD|AIR|ctions haggle slyly ironic platelets. bli 10882|96380|6381|1|37|50926.06|0.05|0.02|N|O|1998-07-18|1998-07-23|1998-08-11|COLLECT COD|AIR| unusual requests affi 10882|52266|7277|2|1|1218.26|0.08|0.00|N|O|1998-07-06|1998-07-14|1998-07-10|TAKE BACK RETURN|REG AIR|ole carefully permanently eve 10882|19375|1877|3|1|1294.37|0.06|0.02|N|O|1998-06-21|1998-08-10|1998-07-07|TAKE BACK RETURN|FOB|te alongside of the even accounts. excuses 10882|32249|2250|4|3|3543.72|0.02|0.05|N|O|1998-08-09|1998-08-05|1998-08-28|NONE|FOB|inal, unusual depos 10882|195999|1038|5|16|33519.84|0.00|0.04|N|O|1998-08-25|1998-08-18|1998-09-22|TAKE BACK RETURN|REG AIR|, special somas s 10883|50665|666|1|47|75936.02|0.06|0.04|N|O|1998-05-17|1998-04-04|1998-06-01|DELIVER IN PERSON|SHIP|ven requests. flu 10883|122733|270|2|19|33358.87|0.06|0.05|N|O|1998-04-22|1998-04-07|1998-05-01|DELIVER IN PERSON|TRUCK| accounts sleep carefully-- furiously un 10883|167101|4650|3|9|10512.90|0.10|0.05|N|O|1998-03-04|1998-03-08|1998-04-02|DELIVER IN PERSON|MAIL|ickly. pending, final th 10883|57855|7856|4|14|25379.90|0.07|0.01|N|O|1998-03-02|1998-04-18|1998-03-08|DELIVER IN PERSON|RAIL|nic packages maintain. r 10884|139163|4190|1|17|20436.72|0.01|0.07|A|F|1995-02-02|1995-02-02|1995-02-27|COLLECT COD|FOB|counts cajole q 10884|103673|8694|2|32|53653.44|0.06|0.07|A|F|1994-12-30|1995-01-03|1995-01-09|DELIVER IN PERSON|REG AIR|ost quickly 10884|22246|9753|3|21|24533.04|0.03|0.06|R|F|1995-01-13|1995-01-16|1995-02-03|DELIVER IN PERSON|MAIL|deposits against 10884|81535|9060|4|1|1516.53|0.05|0.00|R|F|1995-01-08|1995-01-29|1995-01-24|COLLECT COD|FOB|y final requests. fi 10884|150849|8395|5|8|15198.72|0.01|0.00|R|F|1995-01-19|1995-01-12|1995-02-13|DELIVER IN PERSON|FOB|erns along the blithely express instru 10884|111093|8627|6|50|55204.50|0.00|0.03|R|F|1995-01-15|1994-12-27|1995-01-18|NONE|MAIL|uses integrate slyly fur 10884|127911|424|7|46|89189.86|0.09|0.03|A|F|1994-12-14|1995-01-16|1994-12-23|COLLECT COD|FOB|ssly furiously ironic dep 10885|196244|8764|1|22|29485.28|0.01|0.05|N|O|1998-05-29|1998-07-13|1998-06-04|NONE|AIR|tegrate fluffily ironic accounts. quickl 10885|67707|2720|2|36|60289.20|0.01|0.01|N|O|1998-05-09|1998-06-21|1998-06-02|NONE|MAIL|thely even asymptotes nag furio 10885|141866|9409|3|9|17170.74|0.04|0.00|N|O|1998-05-27|1998-05-22|1998-06-04|DELIVER IN PERSON|REG AIR|nding accounts; slyl 10885|133992|1532|4|45|91169.55|0.09|0.05|N|O|1998-04-22|1998-05-25|1998-05-07|NONE|MAIL|cial, even theodolit 10885|191645|6684|5|47|81622.08|0.00|0.04|N|O|1998-05-01|1998-07-16|1998-05-28|TAKE BACK RETURN|REG AIR| the pending packages haggle asymptot 10886|77856|7857|1|50|91692.50|0.03|0.02|A|F|1994-12-16|1994-11-18|1995-01-10|COLLECT COD|FOB|elets serve slyly about the accounts 10886|113896|1430|2|42|80215.38|0.05|0.00|R|F|1994-11-18|1994-11-02|1994-12-03|COLLECT COD|AIR|dependencies haggle quickly along t 10886|45966|3479|3|36|68830.56|0.02|0.06|R|F|1994-10-21|1994-10-16|1994-10-24|DELIVER IN PERSON|AIR|nts sleep blithely. id 10886|147658|5201|4|12|20467.80|0.07|0.08|A|F|1994-10-27|1994-10-29|1994-10-30|NONE|TRUCK|ackages. fi 10886|79493|2001|5|21|30922.29|0.03|0.03|R|F|1994-10-27|1994-11-13|1994-11-14|DELIVER IN PERSON|FOB| across th 10886|36781|4291|6|21|36073.38|0.06|0.08|R|F|1994-09-03|1994-09-26|1994-09-11|COLLECT COD|SHIP|ternes along the ironic r 10886|170308|5343|7|13|17917.90|0.06|0.05|R|F|1994-10-11|1994-11-04|1994-11-08|COLLECT COD|TRUCK|l sauternes. ideas wake 10887|156868|4414|1|36|69294.96|0.05|0.00|N|O|1995-08-17|1995-07-09|1995-09-07|DELIVER IN PERSON|SHIP|blithely ironic packages cajole caref 10912|127624|5161|1|26|42942.12|0.02|0.08|N|O|1996-12-30|1997-03-06|1997-01-24|NONE|MAIL|nts. regular packages promise care 10912|173074|5592|2|12|13764.84|0.07|0.00|N|O|1997-03-15|1997-02-06|1997-03-20|DELIVER IN PERSON|RAIL|lyly about the furiously ev 10912|38240|8241|3|37|43594.88|0.02|0.08|N|O|1997-03-05|1997-03-23|1997-03-30|COLLECT COD|AIR|e after the express accounts. 10912|89867|7392|4|48|89129.28|0.03|0.02|N|O|1997-04-06|1997-02-03|1997-05-01|TAKE BACK RETURN|REG AIR|r accounts detect slyly 10912|191621|4141|5|31|53091.22|0.10|0.00|N|O|1997-03-30|1997-02-25|1997-04-05|NONE|FOB|ar, regular requests w 10912|167264|7265|6|4|5325.04|0.07|0.06|N|O|1997-03-03|1997-02-15|1997-03-18|COLLECT COD|SHIP|. regular deposits engage over 10913|66990|4509|1|27|52838.73|0.10|0.02|A|F|1993-03-23|1993-04-26|1993-04-10|TAKE BACK RETURN|AIR|blithely special, ironic 10913|96835|9345|2|18|32972.94|0.08|0.03|R|F|1993-02-26|1993-04-30|1993-03-24|TAKE BACK RETURN|TRUCK|fully carefully pending acco 10913|95138|157|3|30|33993.90|0.09|0.08|R|F|1993-02-19|1993-04-25|1993-02-27|DELIVER IN PERSON|RAIL|requests nag carefully. ruthle 10914|153523|6039|1|19|29953.88|0.09|0.03|R|F|1995-02-26|1995-04-01|1995-03-07|COLLECT COD|REG AIR|l packages. pinto beans eat. f 10914|58808|1314|2|28|49470.40|0.07|0.04|R|F|1995-03-24|1995-03-06|1995-04-08|TAKE BACK RETURN|AIR|cajole quickly. bli 10914|177450|5002|3|13|19856.85|0.05|0.02|R|F|1995-04-24|1995-03-28|1995-05-20|NONE|TRUCK|ever. ironic, bold ideas boost 10914|189406|4443|4|32|47852.80|0.03|0.07|R|F|1995-02-28|1995-02-27|1995-03-07|DELIVER IN PERSON|TRUCK| blithely exp 10914|144158|9187|5|24|28851.60|0.05|0.06|A|F|1995-04-28|1995-03-27|1995-05-11|DELIVER IN PERSON|TRUCK|riously express pack 10914|56255|1266|6|24|29070.00|0.04|0.01|A|F|1995-04-21|1995-04-03|1995-04-26|NONE|REG AIR|ans sleep slyly along 10915|91562|9090|1|2|3107.12|0.00|0.05|R|F|1993-12-30|1994-01-28|1994-01-02|COLLECT COD|AIR| ruthless p 10915|71987|4495|2|6|11753.88|0.09|0.05|R|F|1993-11-30|1994-01-14|1993-12-05|TAKE BACK RETURN|SHIP| print carefully unusual 10915|77222|9730|3|30|35976.60|0.04|0.02|A|F|1994-01-07|1994-01-19|1994-01-23|NONE|AIR|refully express pinto beans-- ironi 10915|64848|4849|4|22|39882.48|0.07|0.01|A|F|1994-02-20|1993-12-28|1994-02-25|DELIVER IN PERSON|SHIP|ites use ab 10915|97153|4681|5|44|50606.60|0.00|0.02|A|F|1994-02-22|1993-12-21|1994-03-10|COLLECT COD|MAIL|the carefully regular mu 10916|182329|9884|1|39|55041.48|0.00|0.08|N|O|1995-07-02|1995-05-17|1995-07-13|COLLECT COD|MAIL|quickly bold packages 10916|194545|9584|2|33|54104.82|0.09|0.02|R|F|1995-04-17|1995-05-04|1995-04-25|DELIVER IN PERSON|AIR|t the even frets. final instructions caj 10916|107053|4584|3|32|33921.60|0.08|0.07|N|F|1995-06-13|1995-05-28|1995-07-02|NONE|TRUCK|ometimes blithe 10916|38354|5864|4|30|38770.50|0.01|0.02|R|F|1995-05-15|1995-04-26|1995-06-06|COLLECT COD|RAIL| blithely regular requests! closely blith 10916|14664|9667|5|34|53674.44|0.05|0.05|N|F|1995-06-10|1995-05-27|1995-07-07|NONE|SHIP|platelets along the special 10916|53239|3240|6|2|2384.46|0.01|0.00|A|F|1995-05-21|1995-04-14|1995-05-25|DELIVER IN PERSON|SHIP|wake according 10916|175631|5632|7|10|17066.30|0.09|0.05|N|O|1995-06-21|1995-05-28|1995-07-06|NONE|MAIL|cally final ac 10917|19234|9235|1|15|17298.45|0.01|0.01|R|F|1992-03-23|1992-03-31|1992-04-12|DELIVER IN PERSON|AIR|pite the slyly final p 10917|56165|3681|2|1|1121.16|0.05|0.02|A|F|1992-06-09|1992-04-30|1992-06-10|COLLECT COD|MAIL|usly regular dolphins. packages 10917|149827|9828|3|18|33782.76|0.09|0.08|A|F|1992-03-12|1992-04-18|1992-04-08|DELIVER IN PERSON|AIR| doubt furiously carefully regula 10917|125734|3271|4|45|79187.85|0.07|0.02|A|F|1992-04-10|1992-04-07|1992-04-14|TAKE BACK RETURN|REG AIR|deposits solve furiously. blithely final 10917|195956|3514|5|19|38987.05|0.05|0.04|A|F|1992-04-17|1992-04-14|1992-04-29|DELIVER IN PERSON|SHIP|iously final deposits are after the qu 10918|31027|3531|1|8|7664.16|0.09|0.06|N|O|1996-02-13|1995-11-29|1996-03-03|NONE|AIR|the furious 10918|65587|8094|2|46|71418.68|0.05|0.04|N|O|1995-11-30|1995-12-27|1995-12-19|COLLECT COD|SHIP|s. regular accounts throughout the p 10919|136404|8918|1|45|64818.00|0.05|0.04|A|F|1993-07-04|1993-05-29|1993-07-15|COLLECT COD|SHIP|telets across the 10944|83518|8535|1|11|16516.61|0.05|0.05|N|O|1996-01-14|1995-12-28|1996-02-05|NONE|SHIP|ourts. deposits nag blithely at the accou 10944|75874|889|2|41|75844.67|0.07|0.07|N|O|1995-12-03|1996-02-03|1995-12-14|DELIVER IN PERSON|FOB|egularly bold deposits 10944|669|3170|3|34|53368.44|0.01|0.06|N|O|1995-12-02|1995-12-20|1995-12-03|COLLECT COD|RAIL|ording to the blithely silent accounts. sly 10944|36452|1459|4|9|12496.05|0.05|0.00|N|O|1996-02-21|1996-01-29|1996-02-27|COLLECT COD|RAIL|ly regular packages! carefu 10945|179796|9797|1|28|52522.12|0.06|0.07|A|F|1992-02-13|1992-03-30|1992-02-25|TAKE BACK RETURN|REG AIR|egular packages haggle. slyly i 10945|26118|6119|2|35|36543.85|0.06|0.06|A|F|1992-02-19|1992-03-13|1992-03-14|COLLECT COD|AIR| carefully even theodolites. 10945|155258|5259|3|17|22325.25|0.09|0.01|A|F|1992-01-28|1992-02-13|1992-02-22|NONE|FOB|telets use slyly according to the furi 10945|14709|7211|4|49|79561.30|0.08|0.01|R|F|1992-02-18|1992-03-06|1992-03-07|DELIVER IN PERSON|RAIL|fully furiously even theodolites. quickly 10945|197899|419|5|5|9984.45|0.04|0.06|R|F|1992-03-19|1992-02-24|1992-04-12|NONE|TRUCK|iously unusual ideas haggle slyly. ironi 10946|115457|480|1|43|63315.35|0.08|0.05|N|O|1997-05-11|1997-06-09|1997-05-19|NONE|SHIP|ole furiously. bold packages slee 10946|73825|3826|2|30|53964.60|0.04|0.02|N|O|1997-06-26|1997-05-10|1997-07-18|COLLECT COD|RAIL|es. carefully pending instru 10946|166811|4360|3|20|37556.20|0.10|0.06|N|O|1997-03-30|1997-05-23|1997-04-01|NONE|RAIL|uthless ideas wak 10946|178282|5834|4|39|53050.92|0.01|0.07|N|O|1997-05-29|1997-05-25|1997-06-03|DELIVER IN PERSON|AIR|ns. slyly special pint 10946|172475|7510|5|48|74278.56|0.06|0.03|N|O|1997-05-10|1997-05-20|1997-06-04|TAKE BACK RETURN|TRUCK|riously fi 10946|180260|5297|6|39|52270.14|0.04|0.02|N|O|1997-05-30|1997-06-01|1997-06-09|COLLECT COD|FOB|nusual inst 10947|167855|372|1|21|40379.85|0.07|0.04|N|O|1996-01-14|1996-03-28|1996-01-25|TAKE BACK RETURN|MAIL|dogged requests. slyly even plate 10947|136724|1751|2|1|1760.72|0.05|0.03|N|O|1996-02-20|1996-03-24|1996-02-23|DELIVER IN PERSON|TRUCK|the carefully final requests 10947|110791|5814|3|42|75675.18|0.05|0.02|N|O|1996-05-06|1996-02-07|1996-05-16|TAKE BACK RETURN|SHIP|pinto beans sleep fluffily against the p 10947|58273|5789|4|43|52944.61|0.09|0.05|N|O|1996-04-07|1996-04-02|1996-04-23|TAKE BACK RETURN|REG AIR|y. carefully final accounts sleep fluff 10948|3770|3771|1|44|73645.88|0.09|0.01|A|F|1992-10-24|1992-10-16|1992-11-01|TAKE BACK RETURN|SHIP|leep blithely deposits. foxes n 10948|157064|7065|2|49|54931.94|0.05|0.06|A|F|1992-11-25|1992-09-16|1992-12-14|DELIVER IN PERSON|FOB|y along the quickly bold a 10948|170795|8347|3|43|80228.97|0.01|0.08|R|F|1992-10-27|1992-11-09|1992-11-19|DELIVER IN PERSON|TRUCK| special requests 10948|148621|3650|4|27|45079.74|0.01|0.06|A|F|1992-10-09|1992-10-31|1992-10-24|NONE|AIR|ver the slyly regular dependencies nag b 10949|24705|9710|1|24|39112.80|0.10|0.07|A|F|1994-05-13|1994-06-06|1994-06-12|TAKE BACK RETURN|SHIP|yly unusual theodolites? deposits sleep fl 10949|9227|9228|2|19|21588.18|0.01|0.07|R|F|1994-07-13|1994-06-10|1994-07-16|NONE|AIR|deposits. blithely special requests sle 10949|75092|7600|3|39|41616.51|0.02|0.05|R|F|1994-05-26|1994-06-01|1994-06-23|DELIVER IN PERSON|MAIL|owly about the regular, regu 10950|140319|2834|1|29|39419.99|0.00|0.00|N|O|1997-08-01|1997-08-25|1997-08-14|NONE|REG AIR|ts haggle ruthlessly depos 10951|102569|7590|1|26|40860.56|0.02|0.08|R|F|1993-02-10|1993-01-31|1993-02-12|TAKE BACK RETURN|SHIP|ctions promise blithely ironic packages. 10951|64517|4518|2|22|32593.22|0.10|0.05|R|F|1992-12-09|1993-01-14|1992-12-24|DELIVER IN PERSON|MAIL|ages. silent accounts cajole ruthlessly. 10951|135353|380|3|26|36097.10|0.08|0.01|R|F|1992-12-27|1993-01-19|1993-01-13|DELIVER IN PERSON|MAIL|ns. requests wake fluffily. 10951|74088|9103|4|31|32924.48|0.03|0.05|R|F|1992-12-04|1993-01-27|1992-12-29|COLLECT COD|FOB|final pinto 10951|183009|3010|5|21|22932.00|0.10|0.08|R|F|1992-12-10|1992-12-18|1993-01-02|NONE|MAIL|silent packages? blit 10976|24040|1547|1|11|10604.44|0.02|0.04|A|F|1992-12-21|1992-11-13|1993-01-13|TAKE BACK RETURN|SHIP|lyly pending pinto beans 10976|38295|3302|2|26|32065.54|0.04|0.03|A|F|1993-01-05|1992-11-14|1993-01-20|TAKE BACK RETURN|FOB|ssly against the cour 10976|18259|3262|3|19|22367.75|0.05|0.03|R|F|1992-11-28|1992-12-25|1992-12-16|TAKE BACK RETURN|REG AIR|even, regular dependencies haggle 10976|77587|5109|4|13|20339.54|0.08|0.06|R|F|1992-10-26|1992-11-15|1992-11-09|DELIVER IN PERSON|REG AIR|y ironic ideas unwind in 10976|190762|5801|5|28|51877.28|0.03|0.08|A|F|1992-12-08|1992-12-07|1992-12-16|TAKE BACK RETURN|TRUCK|its use sly 10976|128068|8069|6|27|29593.62|0.10|0.06|R|F|1992-10-21|1992-12-25|1992-10-27|COLLECT COD|SHIP|al ideas above the quickly 10976|34654|2164|7|13|20652.45|0.10|0.08|A|F|1992-11-13|1992-12-05|1992-11-21|NONE|RAIL|ake slyly. attainme 10977|23770|1277|1|41|69444.57|0.01|0.08|N|O|1998-06-19|1998-07-24|1998-07-14|DELIVER IN PERSON|SHIP|even packages. stealt 10977|96874|9384|2|6|11225.22|0.10|0.01|N|O|1998-09-01|1998-08-14|1998-09-19|TAKE BACK RETURN|RAIL|nal asymptotes 10977|149166|4195|3|3|3645.48|0.03|0.04|N|O|1998-06-27|1998-08-08|1998-06-30|DELIVER IN PERSON|MAIL|y slow accounts according to the som 10977|17187|9689|4|31|34229.58|0.00|0.05|N|O|1998-09-21|1998-08-17|1998-10-11|NONE|TRUCK|osits play! fluffily unusual instructions i 10978|159639|2155|1|40|67945.20|0.00|0.04|A|F|1994-05-31|1994-05-06|1994-06-13|DELIVER IN PERSON|TRUCK|ole slyly even, pendi 10978|45660|669|2|13|20873.58|0.07|0.03|R|F|1994-06-11|1994-06-13|1994-07-07|DELIVER IN PERSON|MAIL|y express hockey players except the care 10978|1017|6018|3|32|29376.32|0.10|0.05|R|F|1994-05-03|1994-05-13|1994-05-15|COLLECT COD|REG AIR|e slyly about the daringly pending p 10979|138834|8835|1|28|52439.24|0.09|0.02|N|O|1995-11-09|1995-11-06|1995-12-05|DELIVER IN PERSON|TRUCK|ng the slyly bold theodolites. fi 10979|166462|8979|2|5|7642.30|0.04|0.03|N|O|1996-01-30|1995-11-18|1996-01-31|NONE|FOB| will have to are pack 10979|116370|3904|3|25|34659.25|0.10|0.06|N|O|1995-12-18|1995-12-31|1995-12-29|NONE|TRUCK|blithely even foxes wake 10979|26845|1850|4|11|19490.24|0.00|0.07|N|O|1995-10-29|1995-12-04|1995-11-17|TAKE BACK RETURN|TRUCK|nal decoys. carefully even p 10979|171962|9514|5|23|46781.08|0.02|0.03|N|O|1995-10-28|1995-11-07|1995-11-09|NONE|RAIL|quests wake slyly about the blithely silent 10979|161253|1254|6|27|35484.75|0.06|0.05|N|O|1995-11-05|1995-11-13|1995-11-20|COLLECT COD|AIR|e slyly sp 10979|170870|5905|7|5|9704.35|0.09|0.05|N|O|1995-10-25|1995-12-15|1995-11-10|TAKE BACK RETURN|SHIP|pitaphs cajole 10980|960|961|1|28|52106.88|0.09|0.07|N|O|1996-11-13|1996-10-12|1996-12-02|TAKE BACK RETURN|SHIP|es. carefully even platelets na 10980|128470|6007|2|43|64434.21|0.04|0.01|N|O|1996-10-08|1996-09-09|1996-11-02|TAKE BACK RETURN|MAIL|ackages wake according to the asymp 10980|179662|2180|3|1|1741.66|0.03|0.02|N|O|1996-08-10|1996-09-24|1996-09-06|TAKE BACK RETURN|AIR|e carefully 10981|67653|160|1|26|42136.90|0.04|0.07|A|F|1993-08-16|1993-07-10|1993-09-13|NONE|FOB|ingly regular theodolites hinder blithely 10981|32871|2872|2|20|36077.40|0.01|0.06|R|F|1993-08-04|1993-07-04|1993-08-25|COLLECT COD|TRUCK| slyly regular 10981|51280|3786|3|25|30782.00|0.10|0.00|R|F|1993-07-04|1993-08-11|1993-07-06|TAKE BACK RETURN|MAIL|ecial asymptotes. final fox 10981|60378|2885|4|16|21413.92|0.07|0.01|R|F|1993-06-03|1993-07-25|1993-06-20|DELIVER IN PERSON|SHIP|y regular foxes are quickly care 10981|158862|1378|5|19|36496.34|0.08|0.02|R|F|1993-08-25|1993-07-01|1993-08-28|TAKE BACK RETURN|SHIP|nag slyly-- c 10981|179962|9963|6|30|61258.80|0.01|0.08|R|F|1993-06-09|1993-07-15|1993-06-12|DELIVER IN PERSON|AIR|heodolites above the 10981|19368|6872|7|30|38620.80|0.02|0.00|A|F|1993-07-10|1993-08-12|1993-07-26|DELIVER IN PERSON|TRUCK|egular packages wake above the 10982|107537|5068|1|6|9267.18|0.07|0.01|A|F|1993-02-25|1993-01-22|1993-03-01|NONE|REG AIR| fluffily speci 10982|167653|2686|2|18|30971.70|0.07|0.06|A|F|1993-01-31|1993-01-14|1993-02-09|TAKE BACK RETURN|AIR|instructions are foxes. de 10982|130743|3257|3|39|69175.86|0.05|0.06|A|F|1993-01-31|1993-01-26|1993-02-08|DELIVER IN PERSON|SHIP|pinto beans after the blithely 10982|186156|1193|4|35|43475.25|0.09|0.05|R|F|1993-01-06|1993-01-13|1993-01-22|DELIVER IN PERSON|SHIP|lithely pen 10982|111291|1292|5|4|5209.16|0.06|0.01|A|F|1993-02-16|1993-02-09|1993-03-03|NONE|REG AIR|refully special ideas: quick 10982|51891|1892|6|25|46072.25|0.08|0.07|R|F|1993-01-09|1993-01-13|1993-01-26|COLLECT COD|AIR|refully accordin 10982|120551|5576|7|18|28287.90|0.01|0.04|R|F|1992-12-21|1993-03-11|1993-01-17|TAKE BACK RETURN|SHIP| pending, final requests nag regular 10983|84365|6874|1|36|48576.96|0.07|0.04|N|O|1997-03-15|1997-03-12|1997-04-01|COLLECT COD|AIR| to the regular ideas. slyly even re 10983|116106|6107|2|43|48250.30|0.06|0.07|N|O|1997-04-15|1997-03-18|1997-05-11|DELIVER IN PERSON|MAIL|y regular accounts. gifts sleep ab 11008|181073|3592|1|30|34622.10|0.10|0.06|A|F|1994-03-05|1994-04-04|1994-03-22|TAKE BACK RETURN|RAIL| blithely slow pinto beans. fluff 11008|84208|4209|2|39|46495.80|0.00|0.03|R|F|1994-06-10|1994-04-18|1994-06-11|TAKE BACK RETURN|SHIP|atelets. fluffily 11008|79887|2395|3|16|29870.08|0.08|0.01|A|F|1994-05-07|1994-04-02|1994-05-31|NONE|MAIL|ely final foxes. bold, final depe 11008|58854|3865|4|15|27192.75|0.05|0.03|R|F|1994-05-04|1994-03-31|1994-06-02|DELIVER IN PERSON|TRUCK|lyly along the blithely bold wa 11008|144533|9562|5|42|66256.26|0.01|0.00|R|F|1994-03-23|1994-05-24|1994-04-18|DELIVER IN PERSON|RAIL|rash furiously blithely silent 11009|148175|5718|1|41|50149.97|0.07|0.05|N|O|1997-03-09|1997-05-17|1997-04-03|TAKE BACK RETURN|MAIL| accounts. furiously regular cou 11009|166636|6637|2|6|10215.78|0.00|0.08|N|O|1997-04-06|1997-04-21|1997-04-24|TAKE BACK RETURN|TRUCK|lly bold a 11010|81106|8631|1|23|25003.30|0.08|0.07|N|O|1997-04-03|1997-06-20|1997-04-18|COLLECT COD|AIR|uests. ironic, pend 11010|147609|124|2|23|38101.80|0.01|0.03|N|O|1997-06-07|1997-05-04|1997-06-26|DELIVER IN PERSON|FOB|ter the furiou 11010|56411|3927|3|8|10939.28|0.03|0.08|N|O|1997-04-13|1997-04-27|1997-05-10|DELIVER IN PERSON|FOB|er the express, express sheaves w 11010|96985|2004|4|22|43603.56|0.09|0.00|N|O|1997-04-17|1997-05-20|1997-04-21|DELIVER IN PERSON|SHIP|s about the enticingly final ideas wa 11010|20904|5909|5|39|71171.10|0.07|0.06|N|O|1997-04-28|1997-05-30|1997-05-03|TAKE BACK RETURN|MAIL|regular requests. slyly special inst 11011|172485|5003|1|16|24919.68|0.05|0.08|R|F|1992-05-27|1992-08-01|1992-05-29|NONE|FOB|xpress, quiet deposit 11011|140442|7985|2|50|74122.00|0.07|0.02|R|F|1992-08-18|1992-08-11|1992-09-02|COLLECT COD|MAIL|nd the furiously ironic 11011|723|5724|3|36|58453.92|0.06|0.05|R|F|1992-08-02|1992-06-28|1992-08-15|TAKE BACK RETURN|TRUCK|unusual, pending foxes cajole unusual, un 11011|141985|4500|4|29|58782.42|0.00|0.02|R|F|1992-05-27|1992-07-06|1992-06-26|COLLECT COD|RAIL|ts above th 11011|128974|3999|5|14|28041.58|0.03|0.08|A|F|1992-08-30|1992-08-03|1992-09-08|NONE|SHIP|dolites. express, regular orbits agai 11011|57839|7840|6|12|21561.96|0.08|0.06|A|F|1992-05-22|1992-08-01|1992-06-02|DELIVER IN PERSON|FOB|ully ironic theodolites about th 11011|95939|3467|7|31|59982.83|0.08|0.01|R|F|1992-06-18|1992-06-19|1992-06-23|TAKE BACK RETURN|FOB| instructions. final r 11012|40913|8426|1|47|87133.77|0.06|0.06|N|O|1998-07-25|1998-05-23|1998-08-01|NONE|AIR| about the ironi 11012|17062|7063|2|49|47973.94|0.07|0.08|N|O|1998-07-14|1998-06-14|1998-08-08|COLLECT COD|REG AIR| final dependencies. special pack 11013|44071|6576|1|2|2030.14|0.08|0.05|R|F|1994-07-24|1994-06-09|1994-08-02|DELIVER IN PERSON|AIR|ges nag slyly even 11014|32212|2213|1|43|49201.03|0.04|0.02|A|F|1993-12-16|1993-11-15|1993-12-27|COLLECT COD|RAIL|kages haggle 11014|170763|8315|2|9|16503.84|0.10|0.05|A|F|1993-12-12|1993-10-09|1994-01-06|NONE|TRUCK|l asymptotes. slyly ev 11014|6997|9498|3|20|38079.80|0.02|0.00|A|F|1993-09-24|1993-12-03|1993-09-28|DELIVER IN PERSON|RAIL|y final warhorses after the special 11014|184056|6575|4|36|41041.80|0.04|0.01|R|F|1993-11-10|1993-11-05|1993-12-09|TAKE BACK RETURN|REG AIR| carefully final d 11014|25466|5467|5|5|6957.30|0.08|0.07|A|F|1993-09-18|1993-10-13|1993-09-19|NONE|REG AIR|y around the blit 11015|138737|8738|1|16|28411.68|0.04|0.05|N|O|1997-12-03|1997-11-30|1997-12-26|DELIVER IN PERSON|SHIP|lyly final accounts are blithel 11015|152427|9973|2|16|23670.72|0.05|0.07|N|O|1997-10-06|1997-10-19|1997-10-28|NONE|SHIP|carefully 11015|25706|8209|3|15|24475.50|0.10|0.02|N|O|1998-01-06|1997-11-15|1998-01-18|COLLECT COD|SHIP|s. depths na 11015|39006|6516|4|43|40635.00|0.08|0.03|N|O|1997-09-22|1997-12-11|1997-09-24|COLLECT COD|REG AIR|thely. slyly bold theodolite 11015|171796|1797|5|13|24281.27|0.08|0.04|N|O|1997-10-02|1997-11-25|1997-10-31|TAKE BACK RETURN|MAIL|he always final foxes? silent deposits 11040|170940|8492|1|48|96525.12|0.03|0.02|N|O|1995-11-13|1996-01-14|1995-12-09|TAKE BACK RETURN|SHIP|sly ironic requ 11040|186364|1401|2|19|27556.84|0.07|0.08|N|O|1996-02-11|1995-12-03|1996-02-27|NONE|RAIL|inal deposits. carefully bold asymptot 11040|10168|5171|3|21|22641.36|0.03|0.06|N|O|1996-01-15|1995-12-12|1996-02-06|TAKE BACK RETURN|MAIL|ly pending accounts. furiously slow reques 11040|77456|7457|4|49|70239.05|0.04|0.01|N|O|1996-02-14|1996-01-18|1996-02-27|COLLECT COD|MAIL|deposits detect fluffily e 11041|47470|7471|1|40|56698.80|0.03|0.01|N|O|1997-06-29|1997-05-29|1997-07-27|NONE|SHIP| blithely fluffily 11041|18491|8492|2|10|14094.90|0.10|0.06|N|O|1997-05-30|1997-05-29|1997-06-27|COLLECT COD|TRUCK| ironic accounts alongsid 11041|168529|8530|3|9|14377.68|0.04|0.02|N|O|1997-07-05|1997-06-02|1997-07-19|DELIVER IN PERSON|FOB|egrate furio 11041|54440|9451|4|24|33466.56|0.06|0.03|N|O|1997-04-17|1997-05-13|1997-04-18|COLLECT COD|MAIL|its wake. blithely fin 11041|158840|3871|5|12|22786.08|0.08|0.03|N|O|1997-05-18|1997-05-06|1997-06-10|TAKE BACK RETURN|SHIP|was fluffily pending 11042|8030|531|1|6|5628.18|0.04|0.08|N|O|1998-06-03|1998-07-22|1998-06-05|TAKE BACK RETURN|SHIP|ep furiously re 11042|156474|1505|2|49|74993.03|0.03|0.04|N|O|1998-07-04|1998-07-24|1998-07-14|NONE|SHIP| orbits. fluffily 11042|42096|7105|3|49|50866.41|0.04|0.07|N|O|1998-05-30|1998-06-30|1998-06-19|DELIVER IN PERSON|SHIP|ng, final asymptotes. regular, ex 11042|95673|692|4|41|68415.47|0.02|0.05|N|O|1998-07-29|1998-07-14|1998-08-01|COLLECT COD|RAIL|o beans are s 11043|115203|7715|1|23|28018.60|0.03|0.06|R|F|1995-03-17|1995-04-30|1995-03-29|DELIVER IN PERSON|SHIP|the bold ideas sleep against the fur 11043|2343|9844|2|24|29888.16|0.07|0.04|R|F|1995-02-24|1995-05-11|1995-02-25|NONE|FOB|he regular, unusual packages a 11043|167568|5117|3|23|37617.88|0.04|0.03|R|F|1995-05-30|1995-05-19|1995-06-08|DELIVER IN PERSON|TRUCK|nooze furiously pen 11043|101050|8581|4|21|22072.05|0.01|0.05|R|F|1995-04-26|1995-04-29|1995-05-20|DELIVER IN PERSON|RAIL|ments. final requests sleep into the even 11044|119582|4605|1|37|59258.46|0.07|0.01|R|F|1992-08-31|1992-08-08|1992-09-03|DELIVER IN PERSON|FOB|s along the fur 11044|8460|8461|2|6|8210.76|0.03|0.00|R|F|1992-08-22|1992-07-30|1992-09-21|TAKE BACK RETURN|MAIL|ntly ironic attainments 11044|174977|7495|3|15|30779.55|0.01|0.02|A|F|1992-09-07|1992-08-12|1992-10-02|NONE|FOB|y final platelets are blithely caref 11045|9629|7130|1|36|55390.32|0.03|0.00|A|F|1992-05-09|1992-05-14|1992-05-29|COLLECT COD|TRUCK|after the pack 11045|23120|5623|2|16|16689.92|0.04|0.02|A|F|1992-07-23|1992-05-04|1992-08-06|DELIVER IN PERSON|RAIL|eposits detect quick 11045|175517|5518|3|22|35035.22|0.03|0.00|A|F|1992-05-16|1992-05-04|1992-06-10|NONE|MAIL|s play carefully. theodolites 11045|100859|5880|4|5|9299.25|0.00|0.03|R|F|1992-06-11|1992-05-12|1992-07-05|NONE|MAIL|refully final deposits boost ar 11045|134456|9483|5|46|68560.70|0.06|0.06|R|F|1992-04-15|1992-05-06|1992-04-30|COLLECT COD|RAIL|requests among the furiously final excuse 11045|191187|1188|6|18|23007.24|0.03|0.07|R|F|1992-04-09|1992-05-17|1992-04-12|NONE|SHIP|olites integrate slyly express, speci 11045|73077|5585|7|27|28351.89|0.06|0.04|R|F|1992-06-26|1992-05-24|1992-07-18|NONE|RAIL|asymptotes. blithely even excuses are qui 11046|195577|5578|1|46|76938.22|0.06|0.03|N|O|1997-08-01|1997-08-24|1997-08-11|NONE|RAIL|gular, bold accounts grow evenl 11046|48857|1362|2|33|59593.05|0.07|0.05|N|O|1997-10-06|1997-08-23|1997-10-22|TAKE BACK RETURN|TRUCK|ronic foxes. regul 11046|131772|4286|3|45|81169.65|0.02|0.02|N|O|1997-08-03|1997-07-21|1997-08-21|TAKE BACK RETURN|TRUCK|haggle. asymptotes use fluffily according 11046|83996|1521|4|24|47519.76|0.02|0.04|N|O|1997-09-21|1997-08-02|1997-10-14|TAKE BACK RETURN|MAIL|ckly regular asymptotes breach furi 11046|104337|1868|5|4|5365.32|0.04|0.02|N|O|1997-09-08|1997-09-09|1997-10-06|NONE|AIR|furious deposits. evenly f 11046|183885|6404|6|48|94506.24|0.02|0.04|N|O|1997-08-14|1997-08-22|1997-08-21|TAKE BACK RETURN|RAIL|ever. quickly regular pinto beans 11046|150398|399|7|18|26071.02|0.08|0.02|N|O|1997-10-08|1997-09-09|1997-10-12|COLLECT COD|AIR|ly quickly 11047|125940|5941|1|34|66841.96|0.05|0.08|A|F|1994-03-29|1994-02-04|1994-04-27|NONE|FOB|boost against the regula 11047|65936|949|2|15|28528.95|0.06|0.06|R|F|1994-02-08|1994-01-20|1994-03-09|TAKE BACK RETURN|SHIP|dencies. regular accou 11047|185685|8204|3|4|7082.72|0.09|0.02|R|F|1994-01-14|1994-02-01|1994-01-30|COLLECT COD|AIR|never special asymptotes. p 11072|142734|7763|1|37|65739.01|0.04|0.02|N|O|1996-09-26|1996-09-29|1996-10-13|NONE|RAIL|ely after the quickly final instructions. c 11072|125993|8506|2|34|68645.66|0.00|0.04|N|O|1996-07-21|1996-08-31|1996-08-03|DELIVER IN PERSON|AIR| use carefully unusual p 11072|151661|6692|3|29|49667.14|0.01|0.04|N|O|1996-08-10|1996-08-13|1996-08-26|TAKE BACK RETURN|SHIP|ss the carefully special instructions 11072|153937|8968|4|21|41809.53|0.06|0.06|N|O|1996-11-08|1996-09-14|1996-11-09|TAKE BACK RETURN|REG AIR|ending platelets nag furiously enticing 11072|5902|3403|5|48|86779.20|0.03|0.02|N|O|1996-08-12|1996-10-03|1996-08-21|NONE|SHIP|l packages wake ideas. furiou 11073|142790|7819|1|40|73311.60|0.04|0.00|R|F|1995-01-09|1995-01-08|1995-01-16|NONE|TRUCK|ages against the even ideas sleep quiet 11073|66632|9139|2|5|7993.15|0.07|0.04|A|F|1995-03-24|1995-01-08|1995-04-17|NONE|TRUCK|egular pinto beans are carefully against t 11073|67957|464|3|43|82772.85|0.05|0.04|A|F|1995-03-15|1995-02-26|1995-03-17|NONE|MAIL|ges are furiousl 11073|148643|1158|4|37|62590.68|0.03|0.04|R|F|1994-12-16|1995-01-03|1995-01-14|DELIVER IN PERSON|SHIP|ounts. blithely thin pa 11074|159942|9943|1|46|92089.24|0.06|0.01|N|O|1995-06-23|1995-07-20|1995-07-19|COLLECT COD|MAIL|y above the caref 11074|130768|3282|2|18|32377.68|0.04|0.02|N|O|1995-08-15|1995-08-11|1995-08-24|TAKE BACK RETURN|MAIL|ave to integrat 11074|16100|1103|3|44|44708.40|0.08|0.01|N|O|1995-08-11|1995-07-11|1995-08-24|NONE|RAIL|leep carefully acro 11074|192297|2298|4|44|61128.76|0.10|0.06|N|O|1995-07-13|1995-08-28|1995-07-30|NONE|REG AIR|haggle stealthily slyly express requests. 11074|132508|2509|5|24|36972.00|0.08|0.07|N|O|1995-08-10|1995-08-03|1995-08-20|NONE|MAIL|cross the even reque 11074|49008|6521|6|48|45936.00|0.02|0.02|N|O|1995-10-04|1995-08-25|1995-10-24|DELIVER IN PERSON|RAIL|ckages. evenly daring platelets according 11075|126591|4128|1|40|64703.60|0.01|0.04|N|O|1996-04-25|1996-05-15|1996-05-23|COLLECT COD|RAIL|ding requests nod 11075|199146|1666|2|29|36109.06|0.03|0.05|N|O|1996-04-12|1996-05-05|1996-04-23|TAKE BACK RETURN|MAIL|ts are carefully among the furiously ironi 11075|34817|7321|3|37|64816.97|0.10|0.07|N|O|1996-05-19|1996-05-04|1996-05-29|COLLECT COD|SHIP|le furiously around the even d 11075|162931|2932|4|45|89726.85|0.09|0.06|N|O|1996-03-20|1996-04-28|1996-04-03|COLLECT COD|RAIL|le furiousl 11076|97883|393|1|26|48902.88|0.10|0.05|R|F|1994-09-28|1994-11-02|1994-10-22|TAKE BACK RETURN|SHIP|eep among the hockey p 11076|112608|7631|2|8|12964.80|0.00|0.01|A|F|1994-08-22|1994-11-01|1994-09-05|DELIVER IN PERSON|MAIL|s haggle slyly ironic deposits. slyl 11077|171691|9243|1|45|79321.05|0.00|0.05|R|F|1994-07-03|1994-04-22|1994-07-10|NONE|FOB| ironic de 11078|75669|5670|1|28|46050.48|0.06|0.04|A|F|1992-08-06|1992-08-12|1992-08-07|TAKE BACK RETURN|RAIL|xcuses. ironic Tire 11078|156173|1204|2|40|49166.80|0.02|0.02|A|F|1992-08-22|1992-08-20|1992-09-11|COLLECT COD|REG AIR|arefully. quickly ex 11079|22488|2489|1|3|4231.44|0.01|0.03|N|O|1997-11-26|1997-10-22|1997-12-08|DELIVER IN PERSON|SHIP|never even pinto beans. quickly express a 11079|153485|3486|2|35|53846.80|0.06|0.05|N|O|1997-12-06|1997-10-08|1997-12-18|NONE|FOB|the foxes eat carefully 11079|20491|492|3|42|59282.58|0.05|0.02|N|O|1997-11-30|1997-10-26|1997-12-21|COLLECT COD|FOB| furiously. pending ideas cajole slyl 11104|197058|9578|1|26|30031.30|0.00|0.05|N|O|1998-09-08|1998-09-06|1998-09-16|TAKE BACK RETURN|FOB|kages. blithely cl 11105|31064|6071|1|17|16916.02|0.03|0.04|N|O|1996-03-26|1996-02-07|1996-04-25|COLLECT COD|MAIL|ngside of the stealthily final 11106|136107|8621|1|10|11431.00|0.05|0.00|N|O|1995-06-27|1995-06-07|1995-07-21|COLLECT COD|SHIP|kages. slyly regular platelets ar 11106|146654|4197|2|35|59522.75|0.03|0.02|R|F|1995-04-16|1995-06-19|1995-05-08|TAKE BACK RETURN|SHIP|ole slyly. slyly specia 11107|119459|1971|1|27|39918.15|0.01|0.06|N|O|1995-11-19|1996-01-20|1995-12-16|DELIVER IN PERSON|REG AIR|posits. reque 11107|131246|3760|2|2|2554.48|0.05|0.05|N|O|1995-11-04|1995-12-26|1995-11-14|NONE|REG AIR|s shall have to sleep slyly before the 11107|41242|8755|3|23|27214.52|0.05|0.07|N|O|1995-11-27|1995-12-03|1995-12-25|NONE|AIR|arefully ironic foxes gro 11108|194541|9580|1|37|60514.98|0.06|0.04|R|F|1992-08-26|1992-10-08|1992-09-07|COLLECT COD|MAIL|he instructions haggle about the carefull 11108|45114|123|2|39|41305.29|0.03|0.06|A|F|1992-10-26|1992-11-05|1992-11-12|DELIVER IN PERSON|TRUCK|ckages. quickly ironic asymptotes wak 11108|21992|9499|3|15|28709.85|0.08|0.07|R|F|1992-12-06|1992-11-12|1992-12-10|NONE|SHIP|thely blithely express packages. slyly f 11108|3023|8024|4|4|3704.08|0.08|0.03|R|F|1992-11-27|1992-10-28|1992-12-08|TAKE BACK RETURN|AIR|accounts: 11108|4365|4366|5|41|52043.76|0.06|0.08|A|F|1992-10-25|1992-09-23|1992-11-21|NONE|TRUCK|e. furiously ironic accounts are. furi 11108|149161|6704|6|19|22993.04|0.00|0.00|R|F|1992-10-05|1992-10-05|1992-10-19|TAKE BACK RETURN|TRUCK|al theodolites nag fluffily bold depo 11108|132441|9981|7|24|35362.56|0.00|0.06|R|F|1992-11-24|1992-10-03|1992-12-06|NONE|RAIL|. bold excuses are ruthlessly express, pend 11109|135663|5664|1|3|5095.98|0.09|0.07|R|F|1995-03-24|1995-05-09|1995-04-03|NONE|AIR|eas-- evenly even excuses mold caref 11109|78684|8685|2|1|1662.68|0.06|0.05|R|F|1995-03-21|1995-05-31|1995-04-13|NONE|SHIP|nts wake slyly pending instructions. foxes 11110|54990|4991|1|2|3889.98|0.10|0.03|N|O|1996-09-25|1996-08-08|1996-10-18|NONE|AIR|ns! bold courts besid 11110|9449|1950|2|33|44828.52|0.09|0.07|N|O|1996-09-01|1996-07-26|1996-09-21|COLLECT COD|RAIL|s about the quickly express deposits gro 11110|199155|6713|3|21|26337.15|0.03|0.06|N|O|1996-10-07|1996-08-02|1996-10-30|NONE|TRUCK|beans are among the thin de 11110|194473|4474|4|6|9404.82|0.03|0.03|N|O|1996-10-07|1996-07-27|1996-10-14|COLLECT COD|REG AIR|lithely regular ideas. dependencies sub 11110|107501|5032|5|41|61848.50|0.05|0.04|N|O|1996-07-15|1996-09-13|1996-07-22|TAKE BACK RETURN|REG AIR|ay accordin 11110|162643|192|6|18|30701.52|0.01|0.01|N|O|1996-07-21|1996-08-18|1996-07-30|COLLECT COD|MAIL| among the blithely 11111|72329|7344|1|39|50751.48|0.06|0.07|A|F|1995-02-03|1995-04-12|1995-02-24|NONE|FOB|ress instructions are according to the car 11111|21492|6497|2|25|35337.25|0.00|0.03|A|F|1995-03-24|1995-04-08|1995-03-31|DELIVER IN PERSON|MAIL|ring the pinto beans are careful 11111|13370|8373|3|1|1283.37|0.01|0.06|A|F|1995-02-19|1995-03-13|1995-03-03|TAKE BACK RETURN|MAIL|uses among the deposits ca 11111|99611|9612|4|16|25769.76|0.10|0.04|A|F|1995-05-10|1995-04-17|1995-05-15|NONE|MAIL|arefully even excuses wake. blithel 11136|30422|2926|1|50|67621.00|0.07|0.08|R|F|1994-01-23|1994-02-02|1994-02-20|COLLECT COD|FOB|y express packages ab 11137|126867|6868|1|3|5681.58|0.08|0.08|R|F|1992-12-07|1993-02-17|1993-01-04|COLLECT COD|RAIL|s haggle furiously. f 11137|43946|1459|2|2|3779.88|0.00|0.06|A|F|1993-01-06|1993-01-01|1993-01-22|COLLECT COD|AIR|heodolites cajole about the pendi 11137|186555|9074|3|19|31189.45|0.07|0.04|R|F|1992-12-16|1993-01-13|1993-01-06|COLLECT COD|SHIP|tructions wake carefully among the reg 11137|14031|6533|4|28|26460.84|0.02|0.04|R|F|1993-02-25|1993-01-20|1993-03-21|COLLECT COD|AIR|fter the regular accounts. car 11137|12405|7408|5|47|61917.80|0.04|0.04|A|F|1993-02-03|1993-01-07|1993-03-01|NONE|SHIP|unts wake furiously. slyly pending pin 11137|88880|1389|6|32|59804.16|0.04|0.08|A|F|1993-03-05|1993-01-20|1993-03-23|TAKE BACK RETURN|TRUCK|carefully about the instructio 11137|308|309|7|31|37457.30|0.04|0.05|A|F|1993-01-06|1993-01-04|1993-01-13|DELIVER IN PERSON|RAIL|es promise blithely 11138|174350|1902|1|40|56974.00|0.06|0.06|R|F|1995-05-12|1995-04-13|1995-05-21|NONE|TRUCK|ut the carefully even orbits wake caref 11138|87867|7868|2|48|89033.28|0.07|0.05|N|F|1995-06-07|1995-05-05|1995-07-05|NONE|FOB|ions detect. unusual foxes nag daringly 11138|169532|4565|3|9|14413.77|0.05|0.00|R|F|1995-05-04|1995-05-19|1995-05-10|DELIVER IN PERSON|FOB|ickly carefully 11139|55505|5506|1|15|21907.50|0.05|0.08|N|O|1998-08-03|1998-07-07|1998-08-16|NONE|TRUCK| unusual p 11139|63645|1164|2|39|62736.96|0.08|0.03|N|O|1998-09-09|1998-08-15|1998-10-02|TAKE BACK RETURN|SHIP|gular ideas engage flu 11139|65302|2821|3|13|16474.90|0.10|0.06|N|O|1998-07-01|1998-07-06|1998-07-19|TAKE BACK RETURN|SHIP|s packages. carefully specia 11139|114018|1552|4|17|17544.17|0.10|0.05|N|O|1998-09-15|1998-08-07|1998-10-03|TAKE BACK RETURN|TRUCK|unusual pinto beans integrate 11140|185779|5780|1|25|46619.25|0.06|0.02|N|O|1997-10-25|1997-09-30|1997-10-28|TAKE BACK RETURN|TRUCK|atelets. blithely regular exc 11140|174923|4924|2|16|31966.72|0.07|0.06|N|O|1997-08-23|1997-10-04|1997-09-11|TAKE BACK RETURN|SHIP|fully even sentiments. slyly even foxes 11140|117237|4771|3|9|11288.07|0.08|0.03|N|O|1997-10-07|1997-08-28|1997-10-14|DELIVER IN PERSON|TRUCK|ix slyly pending deposits. fluffily u 11140|1927|4428|4|9|16460.28|0.07|0.01|N|O|1997-08-19|1997-09-06|1997-09-17|NONE|AIR|ic decoys alongside of the ironic, 11140|173588|8623|5|33|54832.14|0.02|0.00|N|O|1997-08-15|1997-09-28|1997-09-10|TAKE BACK RETURN|AIR|d foxes wake blithely 11140|77440|2455|6|13|18426.72|0.10|0.01|N|O|1997-10-30|1997-10-09|1997-11-13|DELIVER IN PERSON|TRUCK|packages. furiously 11140|129611|4636|7|31|50858.91|0.10|0.08|N|O|1997-08-27|1997-09-23|1997-09-18|TAKE BACK RETURN|SHIP|oss the blithe, regular hockey player 11141|171369|1370|1|33|47531.88|0.07|0.02|N|O|1998-04-27|1998-03-03|1998-05-06|TAKE BACK RETURN|FOB|requests a 11141|184589|4590|2|11|18409.38|0.06|0.05|N|O|1998-04-20|1998-02-19|1998-05-14|COLLECT COD|RAIL|riously ironic courts? c 11141|175434|2986|3|2|3018.86|0.03|0.01|N|O|1998-05-16|1998-04-08|1998-05-27|NONE|FOB|ully ruthless accounts sleep slyl 11142|62787|5294|1|48|83989.44|0.05|0.07|N|O|1997-12-06|1997-12-05|1997-12-14|TAKE BACK RETURN|SHIP|jole furiously against the express a 11142|53183|3184|2|49|55672.82|0.08|0.07|N|O|1997-11-11|1997-12-24|1997-12-01|NONE|RAIL| sauternes. carefully 11142|44833|2346|3|9|16000.47|0.02|0.01|N|O|1997-11-29|1997-12-23|1997-12-20|TAKE BACK RETURN|MAIL| sleep along 11142|98552|3571|4|29|44965.95|0.01|0.06|N|O|1997-10-16|1997-11-04|1997-10-30|TAKE BACK RETURN|AIR|ons haggle attainments. 11142|159591|9592|5|44|72625.96|0.06|0.02|N|O|1997-11-17|1997-11-28|1997-11-21|TAKE BACK RETURN|SHIP|ter the final r 11142|30987|988|6|37|70965.26|0.06|0.04|N|O|1997-10-19|1997-11-19|1997-11-11|TAKE BACK RETURN|SHIP|xpress courts along the re 11142|185421|7940|7|44|66282.48|0.04|0.01|N|O|1997-11-17|1997-12-06|1997-11-29|TAKE BACK RETURN|TRUCK|ithely even packages alongsid 11143|160967|6000|1|40|81118.40|0.09|0.00|R|F|1993-03-26|1993-01-26|1993-04-24|COLLECT COD|FOB|re. final packages haggle furiously. pinto 11143|160689|8238|2|38|66487.84|0.10|0.04|A|F|1992-12-22|1993-02-10|1993-01-16|DELIVER IN PERSON|SHIP|ronic deposits haggle. packages boost sl 11143|97914|424|3|34|65004.94|0.04|0.02|A|F|1993-02-05|1993-02-17|1993-02-14|TAKE BACK RETURN|MAIL|e silent dependencies. quietly special th 11143|30185|186|4|1|1115.18|0.05|0.08|R|F|1992-12-16|1993-01-07|1993-01-08|NONE|AIR|uriously regular 11168|65350|5351|1|8|10522.80|0.04|0.05|R|F|1993-01-21|1992-12-13|1993-01-22|NONE|AIR|ely furious instructions. furiously expr 11168|136809|9323|2|47|86752.60|0.01|0.05|R|F|1993-01-20|1992-12-07|1993-02-05|DELIVER IN PERSON|REG AIR|al courts cajole af 11169|145967|3510|1|35|70453.60|0.05|0.06|R|F|1993-08-14|1993-07-23|1993-08-19|NONE|REG AIR| blithely. carefully expres 11169|56930|1941|2|1|1886.93|0.00|0.06|R|F|1993-09-11|1993-08-20|1993-09-20|DELIVER IN PERSON|SHIP| deposits haggle. fluffily even p 11169|12311|7314|3|39|47709.09|0.10|0.04|A|F|1993-08-13|1993-06-22|1993-09-09|NONE|FOB| instructions. 11169|102406|4917|4|41|57744.40|0.10|0.02|R|F|1993-06-05|1993-07-05|1993-06-13|TAKE BACK RETURN|TRUCK|cuses. regular excuse 11170|162157|2158|1|40|48766.00|0.03|0.07|N|O|1997-09-07|1997-09-15|1997-09-08|NONE|FOB|ests. slyly exp 11170|170261|262|2|25|33281.50|0.08|0.01|N|O|1997-11-03|1997-10-01|1997-11-27|DELIVER IN PERSON|REG AIR|refully daring pearls wake quickly acco 11170|171946|4464|3|4|8071.76|0.00|0.03|N|O|1997-09-29|1997-09-17|1997-10-24|COLLECT COD|AIR|the regular packages. fluffily 11170|81208|3717|4|28|33297.60|0.10|0.06|N|O|1997-08-10|1997-09-12|1997-08-21|TAKE BACK RETURN|MAIL|leep instead of the furiously ir 11171|195819|3377|1|36|68933.16|0.08|0.08|N|O|1995-09-11|1995-09-28|1995-09-16|NONE|MAIL|ccording to the 11171|125866|5867|2|35|66215.10|0.08|0.02|N|O|1995-09-21|1995-09-15|1995-10-15|NONE|AIR| notornis could nag about the ironic dep 11172|190772|8330|1|22|40980.94|0.09|0.01|N|O|1998-03-09|1998-02-09|1998-04-06|NONE|REG AIR|carefully 11172|92170|7189|2|23|26729.91|0.10|0.07|N|O|1998-02-06|1998-02-02|1998-02-15|NONE|FOB|d foxes will ha 11172|92688|2689|3|35|58823.80|0.08|0.02|N|O|1998-02-02|1998-03-28|1998-03-02|TAKE BACK RETURN|FOB|ackages among the blithely 11172|5045|7546|4|32|30401.28|0.03|0.01|N|O|1998-03-06|1998-02-13|1998-04-01|COLLECT COD|MAIL|always enticing requests after 11173|75244|5245|1|6|7315.44|0.10|0.06|R|F|1992-04-07|1992-06-29|1992-04-09|NONE|MAIL|cies. blithely ironic sentiments 11173|23844|6347|2|26|45963.84|0.08|0.05|A|F|1992-05-28|1992-05-07|1992-06-25|TAKE BACK RETURN|AIR|ng pinto bean 11173|144655|7170|3|32|54388.80|0.04|0.04|A|F|1992-07-02|1992-05-12|1992-07-31|TAKE BACK RETURN|RAIL|unusual packages haggle furious 11173|13343|8346|4|16|20101.44|0.05|0.03|R|F|1992-05-30|1992-06-10|1992-06-09|COLLECT COD|AIR| instructions. regular packages sleep 11173|40520|8033|5|9|13144.68|0.05|0.06|R|F|1992-04-01|1992-05-11|1992-04-17|DELIVER IN PERSON|SHIP|iously unusu 11173|19623|7127|6|39|60162.18|0.06|0.04|R|F|1992-05-24|1992-05-05|1992-06-22|TAKE BACK RETURN|MAIL|ons. pending deposits wake a 11173|3196|3197|7|29|31876.51|0.04|0.08|R|F|1992-07-23|1992-05-25|1992-08-07|TAKE BACK RETURN|MAIL|fully express foxes. regular 11174|113431|965|1|22|31777.46|0.01|0.06|A|F|1994-04-24|1994-05-19|1994-05-10|TAKE BACK RETURN|REG AIR|fter the waters. regular 11174|23936|8941|2|2|3719.86|0.04|0.05|R|F|1994-05-30|1994-05-19|1994-06-27|COLLECT COD|MAIL| the deposit 11174|84065|1590|3|18|18883.08|0.02|0.04|A|F|1994-05-31|1994-05-12|1994-06-14|NONE|RAIL|carefully bold dependencies use packages. r 11174|64889|9902|4|8|14831.04|0.01|0.00|R|F|1994-04-22|1994-07-04|1994-05-07|NONE|TRUCK|sits. accounts boost quickly ab 11174|180491|5528|5|31|48716.19|0.01|0.00|R|F|1994-06-23|1994-05-16|1994-07-16|TAKE BACK RETURN|AIR|t the ironic deposits cajole slyly across 11174|99052|6580|6|14|14714.70|0.07|0.08|A|F|1994-08-08|1994-05-11|1994-08-12|NONE|RAIL|. slyly daring requests 11175|134377|4378|1|6|8468.22|0.04|0.02|N|O|1998-03-08|1998-05-13|1998-03-31|COLLECT COD|TRUCK|mong the slyl 11175|74131|6639|2|6|6630.78|0.05|0.00|N|O|1998-04-23|1998-05-25|1998-05-19|TAKE BACK RETURN|REG AIR|ms wake quickly. furiously 11175|115322|2856|3|26|34770.32|0.00|0.00|N|O|1998-05-27|1998-05-05|1998-05-31|TAKE BACK RETURN|TRUCK|tegrate along the ironi 11200|29324|1827|1|36|45119.52|0.10|0.01|N|O|1997-04-14|1997-04-04|1997-04-18|COLLECT COD|TRUCK| theodolites wake. carefull 11200|42326|4831|2|3|3804.96|0.02|0.00|N|O|1997-03-14|1997-04-28|1997-04-03|COLLECT COD|SHIP|ies. fluffily regular dependencies are fina 11200|162319|4836|3|47|64921.57|0.05|0.04|N|O|1997-04-28|1997-05-01|1997-05-26|NONE|SHIP|posits. pending deposits sleep ironic 11201|176601|6602|1|2|3355.20|0.05|0.04|N|O|1996-12-21|1997-02-21|1996-12-27|DELIVER IN PERSON|AIR|ns. regular i 11202|29182|6689|1|39|43336.02|0.04|0.06|R|F|1992-04-29|1992-06-11|1992-05-13|NONE|MAIL| ideas wake fluffily around the 11202|50642|5653|2|44|70076.16|0.01|0.05|R|F|1992-06-22|1992-05-09|1992-07-16|DELIVER IN PERSON|SHIP|equests wake blithely 11202|135027|7541|3|38|40356.76|0.04|0.01|R|F|1992-06-23|1992-04-27|1992-07-12|COLLECT COD|SHIP|y ironic theodolites detect car 11203|71198|8720|1|39|45598.41|0.05|0.06|N|O|1996-09-06|1996-07-31|1996-09-29|NONE|SHIP|egular ideas sleep according to the fur 11203|24147|4148|2|19|20351.66|0.04|0.03|N|O|1996-07-26|1996-09-11|1996-08-09|COLLECT COD|REG AIR|g accounts cajole furiously 11204|34209|1719|1|22|25150.40|0.07|0.06|N|O|1998-11-19|1998-08-25|1998-12-18|TAKE BACK RETURN|REG AIR| deposits. carefully pending acc 11204|149204|6747|2|2|2506.40|0.09|0.04|N|O|1998-09-12|1998-09-30|1998-10-01|DELIVER IN PERSON|TRUCK| haggle furiously pending 11205|79806|2314|1|9|16072.20|0.03|0.08|N|O|1996-11-01|1996-11-05|1996-11-05|TAKE BACK RETURN|RAIL|he foxes at the even asymptotes haggle 11205|90659|660|2|9|14846.85|0.07|0.05|N|O|1996-10-20|1996-10-16|1996-11-10|COLLECT COD|AIR| ideas. fluffil 11205|92562|5072|3|34|52855.04|0.04|0.03|N|O|1996-11-20|1996-10-28|1996-12-16|COLLECT COD|TRUCK|ular forges affix boldly after the silent a 11206|176764|1799|1|33|60745.08|0.04|0.01|R|F|1995-04-20|1995-03-28|1995-05-11|TAKE BACK RETURN|REG AIR| accounts use slyly final, even pin 11206|57746|2757|2|22|37482.28|0.02|0.02|A|F|1995-02-16|1995-02-28|1995-03-06|DELIVER IN PERSON|RAIL|eposits. carefully special pinto b 11206|126517|4054|3|23|35500.73|0.03|0.05|A|F|1995-04-02|1995-01-29|1995-04-17|NONE|TRUCK|dolites-- carefully un 11206|168257|8258|4|27|35781.75|0.04|0.03|A|F|1995-01-07|1995-02-06|1995-01-14|TAKE BACK RETURN|MAIL| ironic deposits? dolph 11206|81802|4311|5|33|58865.40|0.08|0.03|A|F|1995-02-18|1995-03-16|1995-03-08|DELIVER IN PERSON|SHIP|beans. blithely iron 11207|8450|5951|1|40|54338.00|0.02|0.00|N|O|1996-07-23|1996-06-04|1996-08-13|DELIVER IN PERSON|RAIL|usy theodolites cajole against the regula 11207|8793|1294|2|15|25526.85|0.02|0.02|N|O|1996-04-19|1996-05-07|1996-05-14|NONE|FOB|uests cajole blithely express s 11207|174686|4687|3|19|33452.92|0.02|0.00|N|O|1996-07-21|1996-06-20|1996-08-15|NONE|REG AIR|yly regular accounts. excuses use. 11232|87149|2166|1|10|11361.40|0.00|0.04|N|O|1996-04-14|1996-03-30|1996-05-01|DELIVER IN PERSON|RAIL|sly about the furiously silent ide 11232|97892|5420|2|4|7559.56|0.02|0.05|N|O|1996-05-15|1996-03-22|1996-06-05|DELIVER IN PERSON|RAIL|ng. slyly regular theo 11232|56837|6838|3|29|52021.07|0.07|0.04|N|O|1996-05-21|1996-04-05|1996-06-03|TAKE BACK RETURN|REG AIR|. blithely regula 11232|52442|2443|4|7|9761.08|0.04|0.06|N|O|1996-05-15|1996-04-18|1996-05-29|TAKE BACK RETURN|RAIL|onic requests according to the sheaves wake 11233|151577|1578|1|17|27685.69|0.02|0.00|N|O|1995-09-12|1995-10-23|1995-09-19|DELIVER IN PERSON|FOB|usual reques 11233|138106|620|2|25|28602.50|0.05|0.03|N|O|1995-11-28|1995-10-30|1995-12-12|COLLECT COD|RAIL|, regular decoys. ruthlessly 11233|136030|1057|3|36|38377.08|0.10|0.02|N|O|1995-10-14|1995-10-26|1995-10-24|COLLECT COD|AIR|ar theodolites. 11234|52252|7263|1|36|43353.00|0.07|0.07|A|F|1993-04-12|1993-04-03|1993-05-01|DELIVER IN PERSON|REG AIR|nstructions. car 11234|191265|1266|2|9|12206.34|0.03|0.07|A|F|1993-05-17|1993-05-01|1993-05-26|NONE|AIR|y about the quickly regular attai 11234|179260|1778|3|29|38838.54|0.01|0.01|R|F|1993-05-16|1993-04-18|1993-05-26|TAKE BACK RETURN|AIR|usly pending theodolites 11235|141531|9074|1|15|23587.95|0.08|0.04|N|O|1996-05-27|1996-06-10|1996-06-03|NONE|REG AIR| packages 11235|104517|7028|2|28|42602.28|0.01|0.07|N|O|1996-05-23|1996-06-27|1996-06-13|NONE|SHIP|d dependencies. caref 11235|13092|8095|3|50|50254.50|0.10|0.01|N|O|1996-05-23|1996-06-01|1996-06-14|NONE|TRUCK|pinto beans hinder quickly even, iro 11235|35451|5452|4|9|12478.05|0.08|0.03|N|O|1996-05-29|1996-06-17|1996-06-06|DELIVER IN PERSON|SHIP|inal theodolites sleep furi 11235|23743|3744|5|48|80003.52|0.01|0.01|N|O|1996-05-16|1996-06-30|1996-06-01|TAKE BACK RETURN|MAIL|ly regular packages. slyl 11235|41608|6617|6|43|66632.80|0.00|0.06|N|O|1996-04-15|1996-06-01|1996-04-27|COLLECT COD|REG AIR|tegrate furiously accounts. c 11235|90799|8327|7|34|60852.86|0.05|0.01|N|O|1996-07-26|1996-06-23|1996-08-15|DELIVER IN PERSON|FOB|quests sleep furiously carefully fu 11236|51111|1112|1|17|18055.87|0.02|0.08|A|F|1993-11-23|1994-01-21|1993-11-29|NONE|MAIL|even instructions against the f 11236|8046|5547|2|19|18126.76|0.02|0.00|A|F|1993-12-29|1994-01-28|1994-01-20|COLLECT COD|MAIL|refully even dinos nod care 11236|27820|7821|3|46|80399.72|0.00|0.04|R|F|1993-11-05|1993-12-14|1993-11-30|TAKE BACK RETURN|SHIP|s according 11236|162441|7474|4|23|34579.12|0.07|0.08|R|F|1993-12-02|1994-01-08|1993-12-08|COLLECT COD|FOB|ding to the furiousl 11236|136678|9192|5|26|44581.42|0.06|0.00|R|F|1994-02-10|1993-12-17|1994-02-18|TAKE BACK RETURN|SHIP|st carefully regular deposits. qu 11236|141988|9531|6|34|69019.32|0.07|0.06|R|F|1993-11-13|1993-12-01|1993-11-15|TAKE BACK RETURN|MAIL| carefully regular foxes 11237|54501|9512|1|43|62586.50|0.05|0.06|N|O|1996-10-06|1996-08-01|1996-11-01|NONE|REG AIR|sly pending accounts use. furiously 11237|189682|9683|2|32|56693.76|0.05|0.03|N|O|1996-09-20|1996-07-21|1996-10-18|TAKE BACK RETURN|REG AIR| the fluffily p 11237|185168|5169|3|15|18797.40|0.01|0.06|N|O|1996-07-09|1996-08-14|1996-07-16|COLLECT COD|FOB|s. furiously ir 11237|117395|9907|4|8|11299.12|0.04|0.04|N|O|1996-06-29|1996-08-11|1996-06-30|TAKE BACK RETURN|TRUCK|according to the fluffily final 11238|130254|255|1|35|44948.75|0.01|0.05|N|O|1996-10-09|1996-10-16|1996-10-30|DELIVER IN PERSON|TRUCK|tect carefully again 11238|162312|2313|2|7|9620.17|0.04|0.01|N|O|1996-07-27|1996-09-19|1996-07-31|COLLECT COD|AIR|ly blithely unusual asymptot 11238|166116|8633|3|5|5910.55|0.09|0.05|N|O|1996-09-10|1996-08-28|1996-09-16|COLLECT COD|AIR|unts thrash. blithely final requ 11238|104440|4441|4|50|72222.00|0.09|0.07|N|O|1996-09-12|1996-09-05|1996-09-25|DELIVER IN PERSON|FOB|t instructions sublate fluffily fl 11238|107837|7838|5|2|3689.66|0.00|0.04|N|O|1996-10-17|1996-09-14|1996-10-18|TAKE BACK RETURN|SHIP|ely even id 11238|10373|374|6|17|21817.29|0.05|0.05|N|O|1996-11-01|1996-09-14|1996-11-06|COLLECT COD|REG AIR|ic sauternes haggle across t 11239|132016|4530|1|41|42968.41|0.04|0.05|R|F|1992-05-15|1992-04-14|1992-05-27|COLLECT COD|RAIL|y ironic pa 11239|182352|7389|2|39|55939.65|0.06|0.05|A|F|1992-04-08|1992-04-26|1992-05-04|DELIVER IN PERSON|RAIL| of the carefully final reque 11239|182476|4995|3|10|15584.70|0.08|0.04|A|F|1992-02-26|1992-04-03|1992-02-28|DELIVER IN PERSON|SHIP|cial somas. pinto beans may wak 11239|66906|6907|4|34|63678.60|0.00|0.05|R|F|1992-05-25|1992-04-11|1992-05-28|COLLECT COD|REG AIR|ly final deposits; f 11239|78481|8482|5|39|56919.72|0.08|0.03|R|F|1992-06-10|1992-04-02|1992-06-27|DELIVER IN PERSON|FOB|ithely. slyly regul 11239|17483|7484|6|5|7002.40|0.10|0.04|R|F|1992-05-13|1992-03-15|1992-05-24|NONE|REG AIR|he carefully regular requests. f 11239|33449|3450|7|40|55297.60|0.00|0.08|A|F|1992-05-05|1992-03-21|1992-05-29|NONE|AIR| foxes wake. slyly 11264|169411|9412|1|22|32569.02|0.08|0.07|N|O|1996-12-13|1996-10-03|1996-12-17|TAKE BACK RETURN|FOB|inst the quickly final reque 11264|89135|6660|2|11|12365.43|0.01|0.05|N|O|1996-10-04|1996-11-25|1996-10-25|TAKE BACK RETURN|TRUCK|nal, ironic requests wake a 11264|79359|6881|3|20|26767.00|0.00|0.02|N|O|1996-12-26|1996-11-18|1997-01-10|TAKE BACK RETURN|REG AIR|luffy patterns use careful 11264|170634|3152|4|26|44320.38|0.03|0.03|N|O|1996-11-11|1996-11-13|1996-11-30|TAKE BACK RETURN|REG AIR|nts haggle carefully across the u 11264|35445|7949|5|16|22087.04|0.03|0.08|N|O|1996-11-12|1996-11-13|1996-12-03|DELIVER IN PERSON|AIR| express theodolites. even accounts hag 11265|183060|3061|1|24|27433.44|0.07|0.02|N|O|1997-07-09|1997-08-03|1997-07-20|TAKE BACK RETURN|AIR|to the final 11265|73744|1266|2|29|49814.46|0.04|0.00|N|O|1997-07-18|1997-07-25|1997-07-19|NONE|MAIL|ke dependencies? slyly quick foxe 11265|9144|9145|3|4|4212.56|0.03|0.02|N|O|1997-07-21|1997-06-26|1997-08-15|DELIVER IN PERSON|FOB|ly ironic dolphi 11266|173342|3343|1|8|11322.72|0.00|0.04|N|O|1997-10-16|1997-10-24|1997-11-07|COLLECT COD|FOB|onic asymptotes sleep slyly furiousl 11267|19898|2400|1|3|5453.67|0.00|0.01|R|F|1992-05-28|1992-04-11|1992-06-23|NONE|REG AIR|ly express requ 11267|15506|3010|2|40|56860.00|0.01|0.01|A|F|1992-03-01|1992-03-28|1992-03-29|NONE|TRUCK|symptotes haggle furiously. careful 11268|74499|9514|1|12|17681.88|0.00|0.03|N|O|1998-10-13|1998-09-05|1998-10-15|TAKE BACK RETURN|TRUCK|carefully. deposits boost across th 11268|97786|5314|2|7|12486.46|0.01|0.01|N|O|1998-09-15|1998-08-10|1998-10-11|NONE|FOB|ully according to the blit 11268|77651|7652|3|48|78175.20|0.09|0.07|N|O|1998-10-06|1998-07-30|1998-11-02|COLLECT COD|REG AIR|ut the even excuses. final reque 11268|77680|7681|4|49|81226.32|0.10|0.05|N|O|1998-09-25|1998-09-03|1998-10-21|TAKE BACK RETURN|RAIL|ns are fluffily according to th 11269|78038|3053|1|31|31496.93|0.02|0.00|A|F|1992-08-06|1992-08-23|1992-08-12|TAKE BACK RETURN|TRUCK|ual theodolites are q 11269|182004|2005|2|33|35838.00|0.01|0.04|R|F|1992-07-26|1992-08-11|1992-07-30|NONE|FOB|lly. silent theodolite 11269|126442|6443|3|6|8810.64|0.01|0.06|R|F|1992-08-16|1992-08-23|1992-08-30|DELIVER IN PERSON|SHIP|r ideas wake furio 11269|106154|8665|4|41|47566.15|0.09|0.08|A|F|1992-09-18|1992-07-05|1992-09-19|DELIVER IN PERSON|RAIL|theodolites. care 11269|129780|2293|5|49|88679.22|0.07|0.03|A|F|1992-08-03|1992-08-20|1992-08-15|DELIVER IN PERSON|REG AIR|d, dogged pains are blithely. car 11269|8172|8173|6|34|36725.78|0.07|0.02|R|F|1992-08-30|1992-07-27|1992-09-12|TAKE BACK RETURN|FOB|lly regular deposits haggle carefully 11269|144659|7174|7|9|15332.85|0.01|0.03|R|F|1992-09-25|1992-06-26|1992-10-14|DELIVER IN PERSON|SHIP|ar foxes boost 11270|71997|9519|1|6|11813.94|0.03|0.01|N|O|1995-09-05|1995-09-23|1995-09-09|TAKE BACK RETURN|SHIP|deas. sly accounts integrate bl 11270|37320|9824|2|29|36462.28|0.09|0.06|N|O|1995-08-10|1995-09-06|1995-08-19|TAKE BACK RETURN|REG AIR|requests sleep quic 11270|28150|8151|3|39|42047.85|0.02|0.06|N|O|1995-10-04|1995-08-25|1995-10-07|TAKE BACK RETURN|SHIP| even pint 11270|110745|746|4|49|86031.26|0.10|0.02|N|O|1995-09-20|1995-08-21|1995-09-21|TAKE BACK RETURN|SHIP|he blithely even packages 11270|84215|6724|5|28|33577.88|0.02|0.03|N|O|1995-10-16|1995-09-25|1995-11-03|COLLECT COD|AIR|pending foxes use fluffily fluffily 11270|191662|1663|6|6|10521.96|0.09|0.01|N|O|1995-08-01|1995-08-19|1995-08-25|NONE|MAIL| haggle under the furiously express pack 11271|148623|6166|1|23|38447.26|0.00|0.04|N|O|1995-12-06|1995-12-19|1995-12-10|NONE|SHIP|refully above th 11271|99475|9476|2|17|25065.99|0.08|0.06|N|O|1995-12-15|1995-11-18|1995-12-30|DELIVER IN PERSON|REG AIR|c pinto beans. quickly regular depend 11271|61492|9011|3|45|65407.05|0.04|0.04|N|O|1996-01-22|1995-11-19|1996-02-09|DELIVER IN PERSON|TRUCK|y regular dependencies cajole furio 11271|79482|1990|4|41|59920.68|0.09|0.06|N|O|1996-01-15|1996-01-15|1996-01-18|COLLECT COD|TRUCK|ely even platelets. fluffily permanent idea 11271|48436|941|5|21|29073.03|0.04|0.02|N|O|1996-01-19|1995-12-13|1996-02-12|COLLECT COD|MAIL|xpress deposi 11271|60821|5834|6|2|3563.64|0.00|0.07|N|O|1996-01-21|1995-12-12|1996-02-15|TAKE BACK RETURN|MAIL|pendencies. careful 11271|12547|2548|7|25|36488.50|0.09|0.02|N|O|1995-12-11|1996-01-16|1995-12-12|NONE|REG AIR|dazzle ironically fu 11296|70172|5187|1|29|33122.93|0.03|0.02|R|F|1992-03-08|1992-02-11|1992-04-05|DELIVER IN PERSON|TRUCK|riously silent deposits han 11296|171725|1726|2|36|64681.92|0.02|0.06|A|F|1992-03-06|1992-03-29|1992-03-09|COLLECT COD|MAIL|, stealthy p 11296|178828|6380|3|35|66738.70|0.08|0.00|R|F|1992-02-17|1992-03-08|1992-02-26|NONE|FOB|counts. waters doze blithely instruct 11296|161987|1988|4|50|102449.00|0.00|0.07|A|F|1992-02-14|1992-03-21|1992-02-25|NONE|SHIP| packages wake quickly. furiou 11296|99577|2087|5|35|55179.95|0.07|0.03|A|F|1992-03-24|1992-03-19|1992-04-13|NONE|MAIL|yly above the ruthl 11296|184791|2346|6|41|76907.39|0.06|0.07|A|F|1992-03-09|1992-02-12|1992-04-07|NONE|FOB|egular asymptotes are carefully special req 11297|42178|4683|1|32|35845.44|0.08|0.04|R|F|1993-04-07|1993-03-20|1993-04-30|COLLECT COD|RAIL|ding, expres 11297|75480|7988|2|45|65496.60|0.08|0.01|R|F|1993-03-15|1993-03-11|1993-04-01|NONE|AIR|ndencies haggle caref 11298|36904|4414|1|34|62590.60|0.04|0.00|N|O|1998-03-09|1998-03-07|1998-03-26|DELIVER IN PERSON|AIR|ously even i 11298|44136|4137|2|17|18362.21|0.00|0.08|N|O|1998-02-08|1998-03-21|1998-02-26|COLLECT COD|AIR|. regular, 11298|187355|9874|3|32|46155.20|0.09|0.01|N|O|1998-01-29|1998-03-26|1998-02-02|NONE|RAIL|quests cajole quickly furiously careful 11298|65090|7597|4|43|45368.87|0.01|0.06|N|O|1998-01-18|1998-04-03|1998-02-08|COLLECT COD|MAIL|ffily even packages. quick 11298|156898|4444|5|15|29323.35|0.07|0.03|N|O|1998-03-05|1998-03-03|1998-03-30|DELIVER IN PERSON|SHIP|r dolphins play furi 11298|168694|1211|6|12|21152.28|0.06|0.07|N|O|1998-01-20|1998-03-21|1998-02-15|DELIVER IN PERSON|AIR|refully regular ideas 11299|114121|4122|1|42|47675.04|0.06|0.03|R|F|1993-12-02|1994-01-07|1993-12-04|COLLECT COD|FOB|l requests 11299|25646|3153|2|15|23574.60|0.07|0.07|R|F|1993-12-21|1993-12-13|1994-01-14|DELIVER IN PERSON|AIR|yly at the carefully ironic theodo 11299|17524|5028|3|45|64868.40|0.10|0.00|A|F|1993-12-03|1993-12-14|1993-12-13|COLLECT COD|REG AIR|y regular accounts. 11299|122763|5276|4|17|30357.92|0.05|0.05|A|F|1993-12-29|1994-02-03|1994-01-03|DELIVER IN PERSON|REG AIR| packages wake 11299|65332|345|5|38|49298.54|0.09|0.02|R|F|1994-03-09|1993-12-13|1994-03-13|TAKE BACK RETURN|MAIL|riously pending ex 11299|32371|7378|6|47|61258.39|0.01|0.07|A|F|1994-01-02|1994-02-01|1994-02-01|TAKE BACK RETURN|FOB|y final packages sleep express foxes. ironi 11299|56648|1659|7|10|16046.40|0.06|0.08|A|F|1994-01-12|1994-01-19|1994-02-10|DELIVER IN PERSON|RAIL|thely even theodolites! accounts h 11300|71283|8805|1|47|58951.16|0.03|0.05|N|O|1996-09-07|1996-08-31|1996-09-11|NONE|AIR|ic instructions! even, final 11301|83447|5956|1|24|34330.56|0.04|0.07|R|F|1992-07-05|1992-06-16|1992-07-06|NONE|MAIL|uctions are carefully about the fu 11301|85807|824|2|20|35856.00|0.02|0.00|R|F|1992-06-20|1992-06-15|1992-07-14|NONE|SHIP|egular excuses n 11301|148757|3786|3|22|39726.50|0.06|0.01|R|F|1992-03-28|1992-06-07|1992-04-13|NONE|TRUCK|s boost fluffily quickly bold dugouts. 11301|149006|9007|4|2|2110.00|0.05|0.08|A|F|1992-06-19|1992-05-10|1992-06-22|TAKE BACK RETURN|MAIL|deas use slyly accoun 11301|141630|6659|5|23|38447.49|0.01|0.00|A|F|1992-04-24|1992-05-13|1992-04-26|COLLECT COD|RAIL|lar foxes poach blithel 11301|166120|6121|6|33|39141.96|0.02|0.03|A|F|1992-06-23|1992-04-26|1992-06-28|DELIVER IN PERSON|FOB|ges. furiously pending theodolites wake 11302|41317|1318|1|39|49074.09|0.10|0.07|R|F|1994-04-23|1994-04-04|1994-05-04|DELIVER IN PERSON|AIR| express packages wake careful 11302|7682|183|2|16|25434.88|0.00|0.08|A|F|1994-04-11|1994-04-29|1994-05-07|NONE|AIR|lyly regular cou 11302|94802|9821|3|50|89840.00|0.08|0.01|A|F|1994-05-10|1994-04-01|1994-05-18|NONE|MAIL|posits wake according to th 11302|121522|4035|4|32|49392.64|0.10|0.06|A|F|1994-03-27|1994-05-24|1994-03-31|TAKE BACK RETURN|REG AIR|fily special sauternes. furiously fina 11302|8899|6400|5|27|48813.03|0.01|0.07|R|F|1994-06-30|1994-04-21|1994-07-06|DELIVER IN PERSON|MAIL|r the carefully p 11302|154128|1674|6|21|24824.52|0.03|0.00|A|F|1994-06-19|1994-05-13|1994-06-21|TAKE BACK RETURN|RAIL|lar asymptotes integrate carefully bli 11303|128731|8732|1|25|43993.25|0.06|0.05|R|F|1992-06-28|1992-06-24|1992-07-19|DELIVER IN PERSON|SHIP|packages: even requests doubt quietly aft 11303|172446|7481|2|23|34924.12|0.10|0.04|A|F|1992-07-14|1992-06-16|1992-07-19|TAKE BACK RETURN|SHIP|ts integrate slyly enticingly unusual as 11303|96992|2011|3|16|31823.84|0.09|0.05|R|F|1992-04-11|1992-05-08|1992-04-12|COLLECT COD|SHIP|l foxes cajole carefully atop th 11328|35060|2570|1|15|14925.90|0.06|0.06|R|F|1992-07-08|1992-05-04|1992-08-02|TAKE BACK RETURN|MAIL|silent deposits about the carefully bold 11329|1850|6851|1|27|47299.95|0.02|0.03|N|O|1995-10-27|1995-10-17|1995-11-07|TAKE BACK RETURN|FOB|special, regular dolphins 11329|77261|7262|2|38|47053.88|0.06|0.01|N|O|1995-11-04|1995-11-02|1995-11-07|NONE|RAIL|sly unusual accounts cajole slyly i 11329|110671|8205|3|42|70630.14|0.03|0.02|N|O|1995-10-01|1995-10-03|1995-10-11|DELIVER IN PERSON|AIR| quickly carefully regular deposits! fi 11329|37585|2592|4|32|48722.56|0.02|0.06|N|O|1995-08-19|1995-10-10|1995-09-04|COLLECT COD|RAIL|ress pinto be 11329|89847|7372|5|20|36736.80|0.04|0.02|N|O|1995-09-08|1995-09-17|1995-09-26|COLLECT COD|RAIL|c patterns. furi 11329|57006|9512|6|44|42372.00|0.01|0.05|N|O|1995-10-10|1995-10-08|1995-11-05|NONE|RAIL|tes boost against the quickly pending re 11330|43850|3851|1|3|5381.55|0.05|0.00|R|F|1992-07-15|1992-05-28|1992-07-28|DELIVER IN PERSON|REG AIR|ly final instructions. pe 11331|119367|1879|1|18|24954.48|0.09|0.01|N|O|1996-03-11|1996-03-15|1996-03-12|COLLECT COD|AIR|ly. carefully even multipliers after the 11332|6817|6818|1|42|72400.02|0.00|0.03|R|F|1994-11-18|1995-01-26|1994-12-02|DELIVER IN PERSON|AIR|nic asymptotes b 11332|157150|2181|2|8|9657.20|0.08|0.00|A|F|1994-12-26|1995-01-28|1995-01-12|TAKE BACK RETURN|REG AIR|y quick requests boost slyly. 11332|12522|5024|3|15|21517.80|0.05|0.06|R|F|1995-03-08|1994-12-28|1995-03-24|TAKE BACK RETURN|MAIL|eans haggle furiously 11332|50726|3232|4|5|8383.60|0.08|0.00|A|F|1995-03-05|1994-12-24|1995-03-11|DELIVER IN PERSON|SHIP|regular pinto bean 11332|135508|8022|5|3|4630.50|0.01|0.01|R|F|1995-01-22|1994-12-25|1995-02-21|COLLECT COD|FOB|ackages according to the finally f 11332|67191|7192|6|45|52118.55|0.00|0.07|R|F|1994-12-02|1994-12-22|1994-12-28|NONE|MAIL|iously brave accounts boost slyly. furiousl 11332|66816|6817|7|38|67746.78|0.04|0.04|R|F|1994-11-24|1995-01-11|1994-12-03|DELIVER IN PERSON|SHIP|oost blithely acco 11333|172255|7290|1|10|13272.50|0.06|0.03|A|F|1994-10-05|1994-08-14|1994-10-10|COLLECT COD|AIR|ily regular instruction 11334|190587|3107|1|43|72135.94|0.01|0.01|N|O|1997-09-23|1997-10-22|1997-10-03|COLLECT COD|FOB|ular ideas wake furiously above 11334|194487|2045|2|35|55351.80|0.09|0.05|N|O|1997-10-08|1997-10-10|1997-10-14|NONE|AIR|er the caref 11334|130911|8451|3|4|7767.64|0.08|0.04|N|O|1997-11-03|1997-09-07|1997-11-11|COLLECT COD|SHIP|eodolites 11334|14036|1540|4|38|36101.14|0.03|0.06|N|O|1997-10-07|1997-09-10|1997-11-04|NONE|TRUCK|dazzle furiously above th 11334|91975|6994|5|33|64910.01|0.10|0.08|N|O|1997-09-18|1997-09-20|1997-10-18|NONE|AIR|as are acro 11334|7265|9766|6|25|29306.50|0.08|0.00|N|O|1997-11-25|1997-09-28|1997-11-30|TAKE BACK RETURN|FOB|shall sleep. blithely careful 11334|88498|3515|7|43|63919.07|0.06|0.08|N|O|1997-11-02|1997-10-17|1997-11-17|TAKE BACK RETURN|RAIL| packages. express, unusual accoun 11335|178511|1029|1|4|6358.04|0.01|0.08|R|F|1994-12-16|1994-12-10|1994-12-19|NONE|FOB|ven pinto beans sleep car 11335|25816|8319|2|46|80123.26|0.00|0.02|R|F|1995-01-05|1994-12-26|1995-01-23|COLLECT COD|REG AIR|nal theodoli 11335|83788|1313|3|7|12402.46|0.08|0.05|A|F|1995-01-25|1994-12-19|1995-02-22|NONE|MAIL|ly ironic, regular accou 11335|17375|2378|4|17|21970.29|0.03|0.05|R|F|1995-02-08|1994-12-11|1995-02-23|COLLECT COD|SHIP|ong the special, express fox 11335|11448|1449|5|41|55737.04|0.03|0.03|R|F|1994-12-24|1995-01-15|1995-01-06|COLLECT COD|FOB|pending request 11360|44426|9435|1|3|4111.26|0.01|0.05|N|O|1997-11-09|1997-10-30|1997-12-08|NONE|FOB|the quickly unusu 11360|66016|6017|2|41|40262.41|0.07|0.06|N|O|1997-11-25|1997-11-16|1997-12-13|COLLECT COD|FOB| pending asymptotes. car 11361|51698|1699|1|49|80834.81|0.04|0.01|R|F|1994-10-02|1994-11-23|1994-11-01|TAKE BACK RETURN|RAIL|y ironic deposi 11361|85161|2686|2|33|37823.28|0.02|0.03|R|F|1994-09-29|1994-11-01|1994-10-22|DELIVER IN PERSON|FOB|ts. ironic 11361|88898|1407|3|1|1886.89|0.04|0.03|R|F|1994-11-07|1994-12-17|1994-12-07|COLLECT COD|REG AIR|tes. carefully express platelets pl 11361|98332|8333|4|48|63855.84|0.08|0.03|A|F|1995-01-16|1994-12-06|1995-01-22|TAKE BACK RETURN|MAIL|tions. pending, regular request 11361|54400|4401|5|22|29796.80|0.02|0.00|R|F|1995-01-09|1994-11-07|1995-02-08|NONE|FOB|counts are blithely accordin 11361|85173|190|6|4|4632.68|0.02|0.05|R|F|1994-11-29|1994-11-10|1994-12-15|NONE|MAIL|s sleep asymptotes. caref 11362|180911|912|1|32|63741.12|0.09|0.07|R|F|1992-10-24|1992-11-12|1992-11-04|NONE|REG AIR|eposits cajole carefully according to the 11362|164085|6602|2|10|11490.80|0.02|0.05|R|F|1992-12-19|1992-12-09|1993-01-10|NONE|SHIP|affix. quietly daring packages s 11362|164086|4087|3|45|51753.60|0.09|0.07|R|F|1993-01-03|1992-10-22|1993-01-30|COLLECT COD|MAIL|blithely after the fi 11362|179798|2316|4|50|93889.50|0.00|0.03|A|F|1992-10-23|1992-11-22|1992-11-14|DELIVER IN PERSON|TRUCK|ons after the carefully regular a 11362|55061|72|5|18|18289.08|0.08|0.07|A|F|1992-11-05|1992-12-05|1992-11-16|NONE|RAIL|uickly express 11363|177817|5369|1|44|83371.64|0.03|0.03|N|O|1998-06-07|1998-05-07|1998-06-08|COLLECT COD|FOB|od careful 11363|160279|280|2|9|12053.43|0.03|0.04|N|O|1998-03-12|1998-04-15|1998-03-20|TAKE BACK RETURN|REG AIR|foxes affix slyly special, bol 11363|7051|4552|3|20|19161.00|0.03|0.03|N|O|1998-03-15|1998-04-23|1998-03-21|TAKE BACK RETURN|AIR|ents alongside of th 11363|95856|875|4|29|53703.65|0.02|0.07|N|O|1998-03-22|1998-03-20|1998-04-12|COLLECT COD|FOB|nal, even theodolite 11364|39858|4865|1|24|43148.40|0.08|0.08|N|O|1997-04-22|1997-05-25|1997-05-20|NONE|FOB|s above the furiously 11364|85887|3412|2|44|82406.72|0.04|0.03|N|O|1997-06-12|1997-05-16|1997-06-27|NONE|SHIP|kly ironic deposits a 11365|48185|690|1|31|35128.58|0.00|0.04|N|O|1997-12-28|1997-11-03|1998-01-13|COLLECT COD|MAIL|ven depende 11365|40661|8174|2|31|49651.46|0.00|0.00|N|O|1997-10-21|1997-10-27|1997-11-08|TAKE BACK RETURN|REG AIR|ss packages? sp 11365|185504|8023|3|27|42916.50|0.06|0.07|N|O|1997-11-24|1997-10-22|1997-12-14|NONE|MAIL| are. dolp 11365|150401|7947|4|17|24673.80|0.09|0.06|N|O|1997-10-30|1997-11-13|1997-11-20|COLLECT COD|AIR| have to cajole blithely? blithely unu 11365|179909|4944|5|32|63644.80|0.02|0.05|N|O|1997-12-28|1997-10-09|1998-01-22|DELIVER IN PERSON|AIR| accounts mold blithely about th 11366|151079|3595|1|9|10170.63|0.01|0.01|R|F|1992-08-08|1992-07-22|1992-08-30|DELIVER IN PERSON|FOB|counts across the 11367|107563|7564|1|19|29840.64|0.06|0.04|A|F|1995-04-13|1995-01-24|1995-04-19|TAKE BACK RETURN|RAIL|ges wake around the ev 11392|33795|8802|1|46|79524.34|0.00|0.03|R|F|1994-12-02|1994-11-09|1994-12-24|TAKE BACK RETURN|SHIP|y ironic re 11392|189062|6617|2|46|52948.76|0.09|0.02|A|F|1994-12-11|1994-10-15|1994-12-25|TAKE BACK RETURN|SHIP|ggle slyly after the ironic packages. final 11393|189941|7496|1|49|99516.06|0.08|0.01|A|F|1992-05-08|1992-05-26|1992-05-26|NONE|TRUCK|tions wake courts. carefully even 11393|127972|485|2|48|95998.56|0.06|0.06|A|F|1992-04-12|1992-06-13|1992-05-02|NONE|AIR|cording to the blithely bold 11394|128173|3198|1|35|42040.95|0.04|0.06|N|O|1998-04-13|1998-04-28|1998-04-28|DELIVER IN PERSON|FOB|ly regular fo 11394|188545|1064|2|8|13068.32|0.09|0.01|N|O|1998-04-16|1998-04-17|1998-05-12|NONE|TRUCK|e regular, special foxes. 11394|68401|5920|3|32|43820.80|0.02|0.01|N|O|1998-06-06|1998-04-20|1998-06-25|TAKE BACK RETURN|RAIL|heodolites. blithely 11394|699|8200|4|50|79984.50|0.06|0.06|N|O|1998-04-05|1998-04-06|1998-05-04|COLLECT COD|FOB|encies. express accounts sleep blithely; sl 11394|135460|7974|5|25|37386.50|0.01|0.00|N|O|1998-06-14|1998-04-20|1998-06-27|COLLECT COD|REG AIR|ld foxes nag. quickly ironic 11395|32137|2138|1|24|25659.12|0.02|0.03|N|O|1997-10-28|1997-10-16|1997-10-31|DELIVER IN PERSON|AIR| regular, enticing accounts breach ab 11395|112300|9834|2|7|9186.10|0.04|0.08|N|O|1997-09-25|1997-10-26|1997-10-23|TAKE BACK RETURN|AIR|lar deposit 11395|20046|2549|3|9|8694.36|0.06|0.05|N|O|1997-09-13|1997-10-01|1997-10-13|COLLECT COD|TRUCK|egular packages. packages accordi 11396|67948|5467|1|15|28739.10|0.09|0.03|R|F|1992-09-16|1992-10-05|1992-09-19|COLLECT COD|REG AIR|lly even instructions sleep daringl 11396|199457|9458|2|15|23346.75|0.03|0.02|A|F|1992-09-14|1992-08-25|1992-09-17|DELIVER IN PERSON|TRUCK|according to the slyly even pa 11396|58100|8101|3|38|40207.80|0.05|0.07|A|F|1992-10-21|1992-09-26|1992-10-23|DELIVER IN PERSON|REG AIR|odolites believe quickly. bli 11397|51626|9142|1|41|64682.42|0.09|0.05|N|O|1996-11-19|1996-12-24|1996-12-06|NONE|AIR| ironic pa 11398|21246|1247|1|18|21010.32|0.06|0.08|A|F|1992-09-21|1992-10-04|1992-10-05|DELIVER IN PERSON|RAIL|en dolphins. slyly fina 11398|77243|4765|2|17|20744.08|0.09|0.07|R|F|1992-09-30|1992-11-05|1992-10-08|COLLECT COD|FOB| accounts haggle f 11398|20106|7613|3|46|47200.60|0.10|0.05|R|F|1992-08-14|1992-10-12|1992-09-08|COLLECT COD|AIR|thily regular plat 11398|113048|3049|4|35|37136.40|0.07|0.01|R|F|1992-08-30|1992-10-30|1992-09-13|TAKE BACK RETURN|RAIL|aggle quickly alongside of the 11398|184413|6932|5|39|58398.99|0.01|0.07|A|F|1992-09-06|1992-10-24|1992-09-12|TAKE BACK RETURN|RAIL|ts doubt furiously special pa 11398|28748|8749|6|49|82160.26|0.10|0.07|A|F|1992-09-12|1992-11-03|1992-10-05|NONE|RAIL|packages use furiously clos 11399|77514|5036|1|23|34304.73|0.03|0.06|R|F|1994-07-11|1994-06-02|1994-07-16|TAKE BACK RETURN|SHIP|ses kindle quickly. slyly pe 11424|167541|2574|1|37|59515.98|0.03|0.04|A|F|1993-06-30|1993-05-03|1993-07-12|NONE|FOB|ar instructions boost slyly abo 11425|195170|5171|1|15|18977.55|0.08|0.05|N|O|1995-09-14|1995-09-09|1995-10-11|DELIVER IN PERSON|SHIP|slyly ironic instructions. careful 11425|5868|5869|2|4|7095.44|0.05|0.06|N|O|1995-09-19|1995-07-31|1995-10-10|COLLECT COD|REG AIR|e carefully silent foxes. quickly final 11425|14094|4095|3|44|44355.96|0.02|0.07|N|O|1995-08-04|1995-07-31|1995-08-31|NONE|SHIP|gle. slyly pe 11425|68292|8293|4|8|10082.32|0.02|0.07|N|O|1995-09-18|1995-08-08|1995-09-21|TAKE BACK RETURN|REG AIR|ily even foxes about the 11425|178030|5582|5|5|5540.15|0.04|0.01|N|O|1995-09-01|1995-08-30|1995-09-27|NONE|TRUCK|y. theodolites 11425|136747|1774|6|44|78484.56|0.00|0.00|N|O|1995-07-01|1995-08-26|1995-07-29|DELIVER IN PERSON|AIR| courts cajole blithely according 11426|46935|9440|1|8|15055.44|0.03|0.05|A|F|1994-06-18|1994-07-24|1994-06-28|DELIVER IN PERSON|FOB|y after the special packages. furious 11426|29197|1700|2|32|36038.08|0.03|0.00|R|F|1994-05-22|1994-07-08|1994-06-19|DELIVER IN PERSON|REG AIR|ly final packages. quickly final asymptote 11426|101523|1524|3|20|30490.40|0.06|0.03|A|F|1994-07-19|1994-07-27|1994-08-02|NONE|TRUCK|ents wake accordin 11426|84056|6565|4|13|13520.65|0.10|0.08|A|F|1994-05-24|1994-07-02|1994-06-10|COLLECT COD|TRUCK|usly. blithely re 11426|128624|1137|5|6|9915.72|0.07|0.00|R|F|1994-08-04|1994-07-22|1994-08-29|NONE|RAIL|slyly requests. pendi 11427|4066|9067|1|24|23281.44|0.05|0.03|R|F|1994-03-03|1994-01-27|1994-03-24|DELIVER IN PERSON|AIR|e furiously. final, regular sauternes 11427|124007|6520|2|14|14434.00|0.08|0.04|R|F|1994-03-08|1994-02-13|1994-03-22|TAKE BACK RETURN|MAIL|gside of th 11427|175738|3290|3|34|61666.82|0.01|0.07|A|F|1994-03-05|1994-02-08|1994-03-17|TAKE BACK RETURN|REG AIR| the blithely silent ideas. c 11427|21509|4012|4|50|71525.00|0.09|0.04|A|F|1994-01-18|1993-12-25|1994-02-02|NONE|TRUCK|nts through the slyly unusual pinto 11427|93709|8728|5|20|34054.00|0.05|0.07|R|F|1994-03-06|1994-02-19|1994-03-29|NONE|MAIL| ironic packages. carefull 11427|156874|4420|6|42|81096.54|0.01|0.03|R|F|1994-03-13|1994-02-07|1994-03-25|COLLECT COD|RAIL|y. carefully unusual 11427|178873|3908|7|35|68315.45|0.09|0.05|A|F|1993-12-14|1994-01-23|1994-01-11|NONE|REG AIR|usual packages 11428|114029|9052|1|7|7301.14|0.08|0.05|R|F|1992-04-06|1992-04-19|1992-04-28|COLLECT COD|SHIP|e quickly even packages. furious 11429|53688|6194|1|30|49250.40|0.10|0.05|A|F|1993-04-10|1993-04-20|1993-04-14|COLLECT COD|AIR|. blithely speci 11429|126706|9219|2|28|48515.60|0.06|0.01|R|F|1993-04-06|1993-05-11|1993-04-26|COLLECT COD|REG AIR|gular dependencies above 11429|139372|6912|3|32|45163.84|0.02|0.05|R|F|1993-05-04|1993-06-02|1993-05-07|DELIVER IN PERSON|AIR|even instructions. regular deposits ca 11429|10030|2532|4|33|31020.99|0.06|0.00|R|F|1993-03-21|1993-05-06|1993-03-29|NONE|MAIL|packages boost furious 11430|36371|8875|1|3|3922.11|0.04|0.00|N|O|1998-02-04|1998-03-21|1998-02-05|TAKE BACK RETURN|AIR|y slyly bold asymp 11431|62112|2113|1|11|11815.21|0.04|0.03|R|F|1992-08-08|1992-10-18|1992-08-30|COLLECT COD|RAIL|ins cajole furiously pending ide 11431|102126|2127|2|28|31587.36|0.06|0.08|R|F|1992-09-24|1992-10-07|1992-10-20|TAKE BACK RETURN|REG AIR|furiously slyly final p 11431|19499|9500|3|44|62413.56|0.01|0.08|R|F|1992-09-16|1992-10-14|1992-10-05|TAKE BACK RETURN|SHIP|le furiously above the 11431|110090|2602|4|16|17601.44|0.05|0.04|R|F|1992-08-19|1992-10-14|1992-08-21|NONE|FOB|riously. bold asympt 11431|79064|6586|5|22|22947.32|0.06|0.02|R|F|1992-08-13|1992-10-14|1992-08-26|COLLECT COD|TRUCK|s sleep bravely ca 11431|20873|5878|6|50|89693.50|0.05|0.07|R|F|1992-08-12|1992-09-16|1992-08-15|TAKE BACK RETURN|TRUCK|accounts are fl 11431|8563|3564|7|28|41203.68|0.05|0.07|R|F|1992-09-28|1992-09-02|1992-09-30|NONE|SHIP|e carefully above the bo 11456|103167|3168|1|24|28083.84|0.04|0.07|A|F|1993-06-13|1993-06-28|1993-06-28|DELIVER IN PERSON|MAIL| haggle blithely around the bravely regular 11456|122613|2614|2|6|9813.66|0.04|0.03|A|F|1993-06-20|1993-07-12|1993-06-21|DELIVER IN PERSON|AIR|ly even packages grow. furiou 11456|155167|198|3|5|6110.80|0.03|0.08|A|F|1993-06-20|1993-07-12|1993-06-26|NONE|AIR|oss the regular deposits cajole slyly o 11456|80812|5829|4|10|17928.10|0.10|0.05|R|F|1993-06-13|1993-05-23|1993-06-18|DELIVER IN PERSON|REG AIR| after the carefu 11457|38383|3390|1|22|29070.36|0.07|0.04|N|O|1996-02-17|1996-02-11|1996-03-14|TAKE BACK RETURN|FOB|ole quickly. bold, ironic pinto beans wake 11457|87729|238|2|12|20600.64|0.01|0.05|N|O|1996-03-21|1996-03-12|1996-04-20|COLLECT COD|TRUCK|ckly even i 11457|54843|2359|3|34|61126.56|0.08|0.04|N|O|1996-02-07|1996-02-23|1996-02-25|COLLECT COD|AIR|ly. even deposits are final acc 11457|139331|9332|4|14|19184.62|0.10|0.08|N|O|1996-02-26|1996-01-14|1996-03-25|NONE|TRUCK|unts boost blithely sly accou 11457|126458|8971|5|37|54924.65|0.06|0.03|N|O|1995-12-17|1996-01-24|1996-01-03|COLLECT COD|MAIL|ongside of the 11457|171830|9382|6|50|95091.50|0.10|0.05|N|O|1996-01-13|1996-02-04|1996-02-04|DELIVER IN PERSON|SHIP|thely regular courts sleep furiou 11458|137561|5101|1|8|12788.48|0.08|0.07|N|O|1998-05-04|1998-06-10|1998-05-25|NONE|MAIL|ake after the carefully silent pinto b 11458|173767|6285|2|38|69948.88|0.08|0.04|N|O|1998-05-29|1998-05-05|1998-06-20|TAKE BACK RETURN|RAIL| instructions. platelets de 11458|95191|7701|3|22|26096.18|0.09|0.03|N|O|1998-07-11|1998-05-18|1998-07-13|TAKE BACK RETURN|FOB| ideas. care 11458|69651|4664|4|36|58343.40|0.02|0.05|N|O|1998-04-11|1998-06-26|1998-04-21|NONE|AIR|ic deposits. blithely thin platelets 11458|14101|9104|5|30|30453.00|0.08|0.06|N|O|1998-05-31|1998-06-02|1998-06-11|COLLECT COD|SHIP| accounts integrate slyly about 11458|188197|5752|6|2|2570.38|0.05|0.04|N|O|1998-07-13|1998-06-05|1998-08-05|TAKE BACK RETURN|AIR|kages! sil 11459|63646|3647|1|9|14486.76|0.10|0.00|N|O|1996-09-10|1996-10-05|1996-10-10|COLLECT COD|AIR|dependencies affix furiously 11459|93459|5969|2|46|66812.70|0.01|0.04|N|O|1996-10-17|1996-08-29|1996-11-06|TAKE BACK RETURN|REG AIR| carefully above the blithely regular pin 11459|160673|5706|3|16|27738.72|0.01|0.00|N|O|1996-10-08|1996-09-12|1996-10-26|TAKE BACK RETURN|SHIP|ly final pinto beans 11459|15307|2811|4|34|41558.20|0.06|0.04|N|O|1996-07-25|1996-08-18|1996-07-29|TAKE BACK RETURN|MAIL|bout the flu 11459|53270|8281|5|39|47707.53|0.05|0.07|N|O|1996-09-07|1996-09-25|1996-09-18|COLLECT COD|FOB|s. slyly f 11459|98838|6366|6|5|9184.15|0.09|0.02|N|O|1996-11-04|1996-10-09|1996-11-23|DELIVER IN PERSON|REG AIR|tithes. silent deposits boost along 11459|157139|7140|7|5|5980.65|0.08|0.03|N|O|1996-10-12|1996-09-29|1996-11-02|COLLECT COD|REG AIR|egular pinto beans. regular theodolites sn 11460|32043|7050|1|6|5850.24|0.07|0.06|A|F|1993-11-14|1993-10-31|1993-11-25|DELIVER IN PERSON|AIR|packages. blithely pending ideas i 11460|136716|9230|2|32|56086.72|0.10|0.08|A|F|1993-09-21|1993-10-23|1993-09-28|COLLECT COD|SHIP|s the even, regular accounts. 11460|174536|9571|3|3|4831.59|0.04|0.01|A|F|1993-12-24|1993-11-03|1993-12-25|NONE|AIR|ing foxes are. bold foxes eat carefull 11461|168128|645|1|16|19137.92|0.05|0.08|N|O|1996-06-29|1996-07-13|1996-07-16|TAKE BACK RETURN|TRUCK|nding packages according to the f 11461|119705|9706|2|23|39668.10|0.10|0.03|N|O|1996-05-16|1996-07-19|1996-05-28|COLLECT COD|RAIL|he carefully silent 11461|71848|9370|3|19|34576.96|0.01|0.01|N|O|1996-08-08|1996-06-09|1996-08-25|TAKE BACK RETURN|FOB|integrate blithely final, eve 11462|84222|6731|1|8|9649.76|0.03|0.02|N|O|1996-09-07|1996-10-28|1996-09-14|NONE|REG AIR|structions was stealthily. furiously quick 11462|115697|3231|2|14|23977.66|0.05|0.07|N|O|1996-09-23|1996-11-16|1996-10-22|NONE|AIR|es sleep furiously acros 11462|118718|8719|3|44|76415.24|0.04|0.08|N|O|1996-11-17|1996-11-23|1996-12-09|TAKE BACK RETURN|MAIL| requests acco 11462|84011|6520|4|5|4975.05|0.04|0.05|N|O|1996-11-26|1996-09-28|1996-12-24|NONE|TRUCK|oxes. furious 11462|170225|2743|5|9|11656.98|0.00|0.02|N|O|1996-12-01|1996-11-01|1996-12-28|COLLECT COD|RAIL|ounts-- blithely ironic depos 11462|169777|9778|6|39|72024.03|0.05|0.05|N|O|1996-09-03|1996-11-10|1996-09-27|DELIVER IN PERSON|REG AIR|riously. regular ideas after the slyly 11463|30332|333|1|17|21459.61|0.03|0.03|N|O|1998-04-02|1998-03-13|1998-04-29|COLLECT COD|MAIL|nts. blithely 11463|180400|2919|2|42|62176.80|0.03|0.00|N|O|1998-04-19|1998-03-08|1998-05-13|NONE|RAIL|l deposits! carefully ironic foxes 11463|173705|3706|3|48|85377.60|0.07|0.03|N|O|1998-03-15|1998-02-09|1998-04-13|TAKE BACK RETURN|AIR|ages sleep against the blith 11463|57064|7065|4|8|8168.48|0.06|0.03|N|O|1998-03-20|1998-03-12|1998-04-03|NONE|TRUCK|s. finally special ideas are blithely. 11463|66050|6051|5|5|5080.25|0.04|0.06|N|O|1998-03-21|1998-02-11|1998-04-13|COLLECT COD|MAIL|inal, unusual ideas cajole furiously regu 11463|97260|4788|6|25|31431.50|0.02|0.08|N|O|1998-04-16|1998-01-30|1998-04-23|DELIVER IN PERSON|AIR|t requests. final instructions hang acc 11463|33178|5682|7|19|21112.23|0.00|0.04|N|O|1998-04-14|1998-02-14|1998-05-08|NONE|REG AIR|- even pinto beans cajole after the blithel 11488|12214|2215|1|28|31533.88|0.09|0.06|R|F|1993-11-10|1993-10-16|1993-11-24|TAKE BACK RETURN|AIR|ccounts. ruthlessly even g 11488|66168|6169|2|45|51037.20|0.09|0.02|R|F|1993-09-27|1993-11-21|1993-10-22|TAKE BACK RETURN|REG AIR|ffily bold deposits. fluffily thin p 11489|76350|8858|1|49|64991.15|0.03|0.07|N|O|1996-12-05|1996-12-10|1996-12-26|NONE|REG AIR|gular dependencies detect dolphins. slyly 11489|199901|7459|2|19|38017.10|0.06|0.01|N|O|1996-12-24|1996-12-09|1997-01-15|DELIVER IN PERSON|RAIL|usual theodolites. unu 11490|126180|1205|1|42|50659.56|0.05|0.01|N|O|1996-11-02|1996-11-28|1996-11-23|NONE|SHIP|roughout the dependencies. furiously 11490|92901|429|2|10|18939.00|0.02|0.08|N|O|1996-12-10|1996-12-01|1996-12-25|DELIVER IN PERSON|REG AIR|s above the grouc 11491|185472|5473|1|10|15574.70|0.04|0.01|A|F|1993-10-30|1993-11-07|1993-10-31|DELIVER IN PERSON|FOB| accounts n 11491|19681|7185|2|45|72030.60|0.10|0.05|R|F|1993-12-03|1993-09-29|1993-12-19|COLLECT COD|TRUCK| somas-- pending, regular instruct 11491|165727|760|3|29|51988.88|0.09|0.05|R|F|1993-10-28|1993-09-30|1993-11-19|DELIVER IN PERSON|RAIL|regular excuses. furiousl 11491|167228|4777|4|48|62170.56|0.05|0.03|R|F|1993-09-26|1993-10-31|1993-10-26|TAKE BACK RETURN|RAIL|y pending warthogs! blit 11491|197849|369|5|16|31149.44|0.00|0.04|A|F|1993-11-25|1993-10-08|1993-12-25|COLLECT COD|SHIP|ely even dependencies use 11491|45729|5730|6|18|30144.96|0.02|0.08|R|F|1993-12-10|1993-10-12|1994-01-03|TAKE BACK RETURN|FOB|the pinto bea 11491|163242|5759|7|3|3915.72|0.02|0.03|R|F|1993-09-13|1993-10-31|1993-09-23|NONE|REG AIR|uriously regular packages. accoun 11492|7397|9898|1|20|26087.80|0.02|0.06|N|O|1997-05-30|1997-05-22|1997-05-31|DELIVER IN PERSON|REG AIR|accounts haggle. c 11492|109404|6935|2|46|65016.40|0.00|0.00|N|O|1997-06-13|1997-05-05|1997-07-05|NONE|AIR|ackages eat blithely 11492|146036|1065|3|48|51937.44|0.08|0.02|N|O|1997-04-15|1997-04-15|1997-05-08|DELIVER IN PERSON|TRUCK| the ironic sauternes wake requests. acco 11492|10427|428|4|14|18723.88|0.05|0.03|N|O|1997-04-05|1997-05-02|1997-04-27|NONE|RAIL|y blithe realms 11493|158267|783|1|9|11927.34|0.09|0.02|N|O|1995-12-02|1996-01-04|1995-12-08|COLLECT COD|AIR|lly special theodolite 11493|188641|8642|2|28|48429.92|0.04|0.07|N|O|1995-10-26|1995-12-29|1995-11-14|DELIVER IN PERSON|RAIL|final deposits. regular, pending inst 11494|81614|1615|1|13|20742.93|0.05|0.00|N|O|1997-10-03|1997-10-15|1997-10-17|COLLECT COD|RAIL|the bold requests-- ironic deposit 11494|76221|3743|2|2|2394.44|0.07|0.02|N|O|1997-08-07|1997-10-17|1997-08-25|TAKE BACK RETURN|TRUCK|ake furiously sp 11494|40901|8414|3|15|27628.50|0.00|0.03|N|O|1997-08-08|1997-09-05|1997-08-15|TAKE BACK RETURN|RAIL|ng platelets. even pac 11494|22409|7414|4|40|53256.00|0.01|0.03|N|O|1997-10-09|1997-09-11|1997-10-23|TAKE BACK RETURN|MAIL| carefully regular deposits haggle blithel 11495|198614|8615|1|15|25689.15|0.05|0.08|N|O|1995-09-26|1995-09-23|1995-10-26|TAKE BACK RETURN|TRUCK|ronic accounts integrate. furiously regular 11495|94853|9872|2|8|14782.80|0.06|0.07|N|O|1995-08-19|1995-09-21|1995-09-02|TAKE BACK RETURN|RAIL|ously regular deposits. furiously bold in 11520|30859|3363|1|45|80543.25|0.10|0.00|A|F|1994-12-11|1994-11-03|1995-01-08|NONE|MAIL|aggle. ruthlessly ironic pac 11520|47003|4516|2|47|44650.00|0.07|0.05|A|F|1995-01-01|1994-10-30|1995-01-23|COLLECT COD|AIR|express ideas are quickly. bold, bold p 11521|116548|6549|1|8|12516.32|0.03|0.05|N|O|1996-09-13|1996-10-26|1996-09-26|COLLECT COD|SHIP|gular foxes 11521|8014|3015|2|12|11064.12|0.09|0.03|N|O|1997-01-03|1996-11-04|1997-01-11|TAKE BACK RETURN|FOB| among the slyly regula 11522|104376|4377|1|16|22085.92|0.03|0.05|A|F|1993-10-02|1993-11-02|1993-10-14|COLLECT COD|RAIL|onic courts. bold dolphins sle 11522|34706|7210|2|5|8203.50|0.03|0.00|A|F|1993-10-10|1993-11-13|1993-10-19|DELIVER IN PERSON|AIR| foxes. express packages 11523|31447|1448|1|31|42731.64|0.08|0.02|N|O|1998-11-22|1998-09-13|1998-12-11|TAKE BACK RETURN|AIR|deposits haggle about t 11523|113552|6064|2|21|32876.55|0.03|0.06|N|O|1998-08-06|1998-09-09|1998-08-13|DELIVER IN PERSON|AIR| pinto beans 11523|60486|8005|3|15|21697.20|0.03|0.03|N|O|1998-09-20|1998-08-30|1998-10-14|TAKE BACK RETURN|MAIL| regular platelets. furiously ironic fox 11523|173759|1311|4|48|87972.00|0.08|0.05|N|O|1998-08-18|1998-10-11|1998-09-09|NONE|SHIP|ecial instructions poach permanently across 11523|161651|4168|5|19|32540.35|0.07|0.00|N|O|1998-11-17|1998-09-01|1998-11-27|COLLECT COD|SHIP|the enticingly regular 11523|66679|4198|6|17|27976.39|0.04|0.05|N|O|1998-07-28|1998-10-08|1998-08-08|DELIVER IN PERSON|MAIL|affix slyly among the reg 11524|132726|2727|1|6|10552.32|0.02|0.03|N|O|1996-02-23|1996-03-06|1996-03-16|TAKE BACK RETURN|SHIP|kly after the final ideas. fluffily reg 11525|79590|7112|1|33|51796.47|0.08|0.07|N|O|1997-03-21|1997-05-15|1997-03-24|TAKE BACK RETURN|MAIL|. finally express excuses around the q 11525|59466|6982|2|32|45614.72|0.00|0.02|N|O|1997-06-16|1997-05-19|1997-07-03|DELIVER IN PERSON|TRUCK|are furiously ironic accounts. fluffily exp 11525|36535|9039|3|5|7357.65|0.03|0.07|N|O|1997-07-12|1997-05-23|1997-08-07|NONE|TRUCK|tions. quickly final p 11525|39708|4715|4|46|75794.20|0.03|0.01|N|O|1997-05-10|1997-06-18|1997-05-23|TAKE BACK RETURN|SHIP|es haggle car 11526|133223|8250|1|12|15074.64|0.08|0.08|N|O|1998-05-26|1998-04-30|1998-06-12|DELIVER IN PERSON|REG AIR|le final, final 11526|73925|3926|2|30|56967.60|0.01|0.08|N|O|1998-06-12|1998-05-03|1998-06-26|DELIVER IN PERSON|SHIP|uriously pending pearls are slyl 11526|52398|2399|3|39|52665.21|0.00|0.02|N|O|1998-05-29|1998-05-05|1998-06-11|DELIVER IN PERSON|RAIL| above the ironically spec 11527|20901|8408|1|47|85629.30|0.10|0.00|A|F|1994-10-24|1994-09-03|1994-11-16|NONE|TRUCK|sual packages haggle. b 11552|137065|4605|1|28|30857.68|0.08|0.06|R|F|1993-03-08|1993-03-03|1993-03-31|COLLECT COD|MAIL| are furiously according to the quickly iro 11553|24558|7061|1|49|72644.95|0.00|0.01|A|F|1994-08-03|1994-06-16|1994-08-23|COLLECT COD|TRUCK|lphins integrate across the ironic 11553|132507|5021|2|21|32329.50|0.02|0.07|R|F|1994-07-09|1994-06-14|1994-07-16|COLLECT COD|FOB|inal theodolites. furiously ironic 11553|37116|9620|3|49|51602.39|0.10|0.02|R|F|1994-04-24|1994-06-21|1994-05-18|NONE|FOB|lly according to the fur 11553|7144|4645|4|41|43096.74|0.02|0.03|R|F|1994-07-09|1994-05-28|1994-08-06|NONE|REG AIR|bold theodoli 11553|59864|4875|5|18|32829.48|0.05|0.03|R|F|1994-06-18|1994-06-09|1994-06-26|NONE|SHIP|ing packages. slyly regular dinos wake quic 11554|76343|6344|1|49|64647.66|0.03|0.03|N|O|1998-07-24|1998-07-14|1998-08-11|COLLECT COD|TRUCK| platelets use blithely 11554|85601|3126|2|27|42838.20|0.06|0.01|N|O|1998-07-13|1998-07-09|1998-07-20|TAKE BACK RETURN|RAIL|gainst the furiously regular accounts. si 11554|95556|8066|3|1|1551.55|0.01|0.04|N|O|1998-07-26|1998-08-06|1998-08-13|DELIVER IN PERSON|TRUCK|. ideas sublate slyly f 11554|4280|6781|4|50|59214.00|0.06|0.07|N|O|1998-07-08|1998-07-20|1998-07-10|DELIVER IN PERSON|SHIP|nic, quiet requests may boost! 11554|125497|522|5|27|41107.23|0.02|0.07|N|O|1998-09-24|1998-07-11|1998-10-23|NONE|REG AIR|alongside of the slyly 11554|19552|7056|6|40|58862.00|0.09|0.08|N|O|1998-08-20|1998-08-01|1998-09-13|NONE|TRUCK|efully. ironic 11554|193924|8963|7|10|20179.20|0.02|0.00|N|O|1998-09-15|1998-07-12|1998-10-08|TAKE BACK RETURN|AIR|quests doubt along the permane 11555|67787|7788|1|42|73700.76|0.01|0.05|N|O|1996-07-06|1996-07-20|1996-07-12|COLLECT COD|MAIL|kages use carefully; bold i 11555|81030|3539|2|34|34375.02|0.04|0.03|N|O|1996-07-10|1996-07-10|1996-07-13|DELIVER IN PERSON|REG AIR|ing to the slyly final pains. blith 11555|59691|7207|3|16|26411.04|0.04|0.01|N|O|1996-06-10|1996-08-22|1996-06-24|TAKE BACK RETURN|SHIP|l packages according to t 11555|13842|3843|4|47|82524.48|0.09|0.03|N|O|1996-09-05|1996-08-13|1996-10-01|NONE|SHIP|aggle. unusual requests about the 11555|27317|4824|5|40|49772.40|0.03|0.03|N|O|1996-09-08|1996-06-27|1996-09-15|COLLECT COD|TRUCK|ual excuses. quickly regula 11556|80541|5558|1|34|51732.36|0.09|0.06|N|O|1996-03-06|1996-04-07|1996-03-23|DELIVER IN PERSON|FOB| even accounts. regular accounts hagg 11556|44578|2091|2|32|48722.24|0.05|0.04|N|O|1996-04-16|1996-05-16|1996-04-17|NONE|SHIP|nos. slow waters wake blithely 11556|12224|7227|3|21|23860.62|0.05|0.03|N|O|1996-04-07|1996-04-21|1996-04-27|TAKE BACK RETURN|MAIL|y blithely special dependencies. f 11556|33163|3164|4|13|14250.08|0.09|0.02|N|O|1996-05-22|1996-05-26|1996-06-13|DELIVER IN PERSON|AIR| pinto beans wake furiousl 11556|170459|5494|5|15|22941.75|0.04|0.06|N|O|1996-07-02|1996-05-08|1996-07-25|COLLECT COD|TRUCK|uses sleep blithely. furiously ir 11556|90199|200|6|39|46378.41|0.04|0.00|N|O|1996-06-30|1996-04-05|1996-07-22|TAKE BACK RETURN|AIR| ironic, bold accounts snooze carefully. 11556|172469|4987|7|7|10790.22|0.03|0.03|N|O|1996-04-26|1996-05-08|1996-05-14|TAKE BACK RETURN|MAIL| bold account 11557|75231|5232|1|2|2412.46|0.09|0.02|N|O|1997-06-15|1997-05-24|1997-07-04|NONE|MAIL|otes across the carefully regular instruct 11557|6004|3505|2|35|31850.00|0.04|0.07|N|O|1997-07-28|1997-05-22|1997-08-04|DELIVER IN PERSON|REG AIR|. furiously special 11557|95553|5554|3|1|1548.55|0.08|0.05|N|O|1997-08-05|1997-05-22|1997-08-19|NONE|RAIL|. asymptotes 11558|64174|1693|1|9|10243.53|0.04|0.05|A|F|1994-05-02|1994-06-09|1994-05-31|DELIVER IN PERSON|TRUCK|arefully even foxes cajole furiously ac 11558|167634|2667|2|21|35734.23|0.03|0.04|A|F|1994-05-05|1994-05-04|1994-05-24|DELIVER IN PERSON|REG AIR|ts. regular re 11559|47339|4852|1|42|54025.86|0.08|0.02|N|O|1996-01-12|1995-12-03|1996-01-14|DELIVER IN PERSON|FOB|y daring asymptotes. regular 11584|41214|1215|1|39|45053.19|0.01|0.01|A|F|1994-12-22|1994-12-24|1994-12-29|COLLECT COD|FOB|g the slyly pendin 11584|73295|3296|2|46|58341.34|0.01|0.08|R|F|1994-12-07|1995-01-04|1995-01-01|COLLECT COD|AIR|ully above the 11584|68647|1154|3|25|40391.00|0.01|0.06|A|F|1995-01-19|1995-01-09|1995-01-20|TAKE BACK RETURN|SHIP|y express packages haggle. final, iron 11584|137281|9795|4|32|42184.96|0.09|0.02|A|F|1995-02-18|1994-12-25|1995-03-07|COLLECT COD|SHIP|hins cajole unusual asymptotes. blithely 11584|70464|7986|5|22|31558.12|0.03|0.07|R|F|1995-01-13|1995-01-15|1995-02-08|NONE|TRUCK| even deposits? final requests belie 11584|72180|7195|6|45|51848.10|0.10|0.03|A|F|1994-12-23|1994-12-24|1995-01-05|DELIVER IN PERSON|REG AIR|e carefully regular deposits. fluffily 11584|117646|158|7|32|53236.48|0.00|0.01|R|F|1994-12-21|1995-01-14|1994-12-31|NONE|SHIP|unts. carefully bold pains impress. s 11585|65421|7928|1|28|38819.76|0.02|0.02|R|F|1994-05-20|1994-04-17|1994-06-16|DELIVER IN PERSON|MAIL|thely at the blithely ironic packages. 11585|120271|7808|2|1|1291.27|0.08|0.05|R|F|1994-04-04|1994-05-30|1994-04-18|DELIVER IN PERSON|REG AIR|e. carefully regular 11585|179088|1606|3|9|10503.72|0.04|0.03|R|F|1994-06-13|1994-05-29|1994-06-17|DELIVER IN PERSON|FOB|lly regular requests. carefully expre 11586|178790|3825|1|12|22425.48|0.06|0.07|R|F|1993-12-29|1994-01-23|1994-01-24|NONE|FOB|are regular, final deposits. slyly bold re 11586|113545|1079|2|4|6234.16|0.04|0.00|A|F|1993-11-27|1994-01-30|1993-12-24|COLLECT COD|REG AIR|sits cajole slyly. quickly 11587|170022|7574|1|22|24024.44|0.10|0.06|N|O|1998-06-16|1998-05-31|1998-07-09|NONE|SHIP|lyly unusual foxes above the slyly 11587|150020|5051|2|33|35310.66|0.09|0.00|N|O|1998-06-03|1998-04-12|1998-06-09|COLLECT COD|SHIP|ly ironic accounts. slyly regular instr 11587|174801|2353|3|44|82535.20|0.03|0.05|N|O|1998-05-05|1998-05-27|1998-05-06|COLLECT COD|RAIL|the carefully express pinto bea 11588|170453|8005|1|33|50273.85|0.00|0.05|N|O|1998-01-14|1997-12-06|1998-01-28|COLLECT COD|SHIP|r packages are above the blithely s 11588|186857|6858|2|18|34989.30|0.06|0.05|N|O|1998-01-16|1997-12-10|1998-01-24|NONE|AIR|regular foxes cajole 11588|94899|9918|3|9|17045.01|0.06|0.04|N|O|1997-11-15|1997-12-22|1997-12-04|COLLECT COD|FOB|r deposits snooze blithely. sl 11588|160315|5348|4|35|48135.85|0.08|0.08|N|O|1998-02-14|1997-12-16|1998-03-14|NONE|MAIL|sly final dol 11588|6202|8703|5|35|38787.00|0.09|0.04|N|O|1998-01-26|1998-01-16|1998-02-18|TAKE BACK RETURN|TRUCK|posits are; carefully final ideas nag even 11589|85088|7597|1|21|22534.68|0.07|0.04|A|F|1992-06-03|1992-07-02|1992-07-01|TAKE BACK RETURN|TRUCK|nusual instructions. reg 11589|68516|1023|2|45|66802.95|0.02|0.02|A|F|1992-06-09|1992-07-08|1992-06-21|COLLECT COD|AIR|es. final, regul 11590|2989|2990|1|43|81355.14|0.01|0.00|N|O|1996-06-23|1996-06-18|1996-07-07|COLLECT COD|FOB|s cajole packages. 11590|31055|3559|2|47|46344.35|0.03|0.06|N|O|1996-07-26|1996-06-12|1996-08-25|NONE|TRUCK| ideas wake regular, regular theodoli 11590|132643|7670|3|41|68701.24|0.01|0.06|N|O|1996-07-08|1996-05-31|1996-07-15|NONE|SHIP|mptotes run. 11590|122412|9949|4|49|70286.09|0.01|0.08|N|O|1996-04-27|1996-05-29|1996-05-15|DELIVER IN PERSON|AIR|es. blithely pending foxes alongside of th 11590|162359|7392|5|27|38376.45|0.06|0.07|N|O|1996-05-01|1996-05-26|1996-05-04|COLLECT COD|REG AIR|odolites boost blithel 11591|61941|6954|1|42|79923.48|0.07|0.01|R|F|1993-02-01|1993-04-02|1993-02-24|COLLECT COD|SHIP| carefully unusual sentiments. b 11591|27375|7376|2|42|54699.54|0.05|0.04|A|F|1993-02-06|1993-02-22|1993-02-12|COLLECT COD|TRUCK|its nag according to the furiously exp 11616|62918|7931|1|6|11285.46|0.06|0.00|N|O|1996-10-28|1996-10-10|1996-11-11|TAKE BACK RETURN|RAIL|ideas haggle blithely even theo 11616|50272|5283|2|42|51335.34|0.00|0.06|N|O|1996-11-08|1996-09-07|1996-11-12|COLLECT COD|REG AIR|nic ideas nag 11616|116561|4095|3|27|42594.12|0.02|0.07|N|O|1996-08-10|1996-09-24|1996-09-09|TAKE BACK RETURN|TRUCK|efully final sauternes sleep carefully 11616|25047|52|4|42|40825.68|0.08|0.01|N|O|1996-09-13|1996-09-09|1996-09-21|TAKE BACK RETURN|REG AIR|nal packages wake slyly amon 11616|152475|2476|5|37|56516.39|0.10|0.02|N|O|1996-08-03|1996-10-29|1996-08-07|COLLECT COD|AIR|counts sleep bl 11616|44778|4779|6|6|10336.62|0.10|0.07|N|O|1996-09-18|1996-09-28|1996-10-13|COLLECT COD|FOB|nticing excuses. 11616|178840|6392|7|34|65240.56|0.03|0.05|N|O|1996-09-13|1996-09-12|1996-09-30|TAKE BACK RETURN|REG AIR|usly even packages. slyly brave s 11617|149032|9033|1|8|8648.24|0.05|0.02|R|F|1994-12-29|1995-01-29|1995-01-25|TAKE BACK RETURN|FOB|usly final ideas affix 11617|173791|3792|2|46|85780.34|0.02|0.04|A|F|1995-04-03|1995-01-14|1995-04-23|DELIVER IN PERSON|FOB|s sleep blithely ironic pl 11617|98628|1138|3|33|53678.46|0.08|0.02|A|F|1995-03-20|1995-02-27|1995-03-30|DELIVER IN PERSON|SHIP|slyly express packa 11617|32169|7176|4|2|2202.32|0.00|0.03|A|F|1995-03-02|1995-02-14|1995-03-10|NONE|AIR|s. slyly bold f 11617|59492|9493|5|16|23223.84|0.04|0.04|R|F|1995-03-07|1995-02-17|1995-04-04|DELIVER IN PERSON|SHIP|leep carefully. quickly pending 11617|68657|6176|6|9|14630.85|0.03|0.01|A|F|1995-01-06|1995-03-05|1995-01-13|TAKE BACK RETURN|MAIL|c theodolites. bold excuses boost slyly ne 11618|59367|4378|1|27|35811.72|0.01|0.02|N|O|1998-05-17|1998-04-01|1998-05-25|COLLECT COD|SHIP|e of the asymptotes are quickly pinto 11618|168154|671|2|4|4888.60|0.00|0.03|N|O|1998-02-23|1998-04-23|1998-03-18|DELIVER IN PERSON|MAIL|t have to sleep carefully in place of the i 11618|150644|8190|3|22|37282.08|0.08|0.06|N|O|1998-03-02|1998-03-19|1998-03-28|NONE|AIR|. express platelets breach quickly along 11618|58153|3164|4|6|6666.90|0.07|0.00|N|O|1998-03-20|1998-04-04|1998-03-23|COLLECT COD|SHIP|ages: busily r 11618|176547|1582|5|25|40588.50|0.07|0.06|N|O|1998-06-04|1998-04-26|1998-07-02|DELIVER IN PERSON|RAIL|d packages? furiousl 11619|166090|8607|1|8|9248.72|0.02|0.00|N|O|1998-10-15|1998-08-12|1998-11-05|DELIVER IN PERSON|MAIL|bold accounts. daring, unu 11619|73698|6206|2|10|16716.90|0.08|0.04|N|O|1998-08-05|1998-10-09|1998-09-02|NONE|SHIP|ptotes use quickly furiously regular p 11619|36284|8788|3|13|15863.64|0.09|0.00|N|O|1998-10-27|1998-08-26|1998-11-07|DELIVER IN PERSON|MAIL|ding dependencies boost fluffily even, 11620|178525|1043|1|7|11224.64|0.09|0.04|N|O|1997-07-28|1997-10-07|1997-08-22|COLLECT COD|RAIL| quickly. fluffily ironic 11620|53711|6217|2|5|8323.55|0.00|0.08|N|O|1997-09-23|1997-10-16|1997-09-26|NONE|REG AIR|uriously express orbits. furiou 11620|106333|8844|3|23|30804.59|0.09|0.05|N|O|1997-08-18|1997-09-15|1997-09-11|COLLECT COD|RAIL|into beans. final accounts above the regul 11620|137631|5171|4|26|43384.38|0.08|0.06|N|O|1997-07-29|1997-09-13|1997-08-19|NONE|FOB|riously regular warhorses use slyly am 11620|152910|5426|5|31|60850.21|0.07|0.03|N|O|1997-09-26|1997-10-18|1997-10-14|COLLECT COD|RAIL|quests. bold requests alongside o 11621|127197|4734|1|8|9793.52|0.03|0.08|N|O|1996-01-20|1996-01-26|1996-02-12|COLLECT COD|AIR|usual requests 11621|191934|9492|2|47|95218.71|0.03|0.03|N|O|1996-02-12|1995-12-25|1996-03-10|NONE|AIR|ironic ideas impress quickly enti 11621|19965|7469|3|12|22619.52|0.01|0.05|N|O|1995-12-01|1996-02-07|1995-12-30|COLLECT COD|RAIL|regular excuses; deposits affix fu 11621|137250|9764|4|10|12872.50|0.02|0.03|N|O|1996-01-14|1995-12-13|1996-01-28|COLLECT COD|RAIL|pinto beans. carefully final 11621|152524|70|5|35|55178.20|0.01|0.05|N|O|1996-01-08|1996-01-01|1996-01-14|TAKE BACK RETURN|SHIP|o the blithely regular accounts. idly 11621|59206|6722|6|42|48938.40|0.09|0.05|N|O|1995-12-07|1996-01-10|1995-12-17|TAKE BACK RETURN|MAIL|instructions are according to the care 11622|150070|2586|1|22|24641.54|0.02|0.07|N|O|1996-06-01|1996-07-25|1996-06-04|DELIVER IN PERSON|MAIL|cial foxes. slyly thin platelets around 11622|23819|3820|2|20|34856.20|0.07|0.00|N|O|1996-05-22|1996-06-18|1996-06-19|NONE|REG AIR|old dependencies. slyly regular dolphi 11622|122888|7913|3|40|76435.20|0.03|0.03|N|O|1996-08-02|1996-06-20|1996-08-20|TAKE BACK RETURN|SHIP| ironic instructions n 11622|163017|566|4|20|21600.20|0.06|0.07|N|O|1996-07-31|1996-07-22|1996-08-25|COLLECT COD|SHIP|nding realms integrate slyly careful 11622|16344|8846|5|40|50413.60|0.09|0.03|N|O|1996-06-19|1996-05-27|1996-06-23|NONE|SHIP|lly bold ideas cajole slyly across the 11623|71504|4012|1|45|66397.50|0.03|0.01|N|O|1996-01-15|1996-03-06|1996-01-19|COLLECT COD|AIR|c deposits. pending f 11623|161341|3858|2|48|67312.32|0.05|0.01|N|O|1996-03-27|1996-03-03|1996-04-13|NONE|AIR|ely regular requests. ideas beyond the bli 11623|57154|7155|3|35|38890.25|0.07|0.04|N|O|1996-02-16|1996-01-21|1996-03-07|DELIVER IN PERSON|TRUCK|ons are. fluffi 11623|174001|1553|4|19|20425.00|0.04|0.03|N|O|1996-03-25|1996-01-18|1996-04-13|DELIVER IN PERSON|MAIL|cording to the pending 11623|19513|9514|5|15|21487.65|0.10|0.05|N|O|1995-12-18|1996-02-24|1995-12-31|TAKE BACK RETURN|SHIP|. dolphins cajole blithely. quickly expres 11623|141995|1996|6|46|93701.54|0.06|0.00|N|O|1996-04-05|1996-03-05|1996-04-30|DELIVER IN PERSON|SHIP|tructions. fluffily final warhorses engag 11623|101572|4083|7|46|72384.22|0.09|0.08|N|O|1995-12-28|1996-03-14|1996-01-24|NONE|SHIP|ts. furiou 11648|113296|830|1|38|49753.02|0.02|0.00|R|F|1995-05-18|1995-06-21|1995-05-21|NONE|REG AIR|eodolites 11648|162352|2353|2|39|55159.65|0.01|0.00|N|O|1995-08-07|1995-06-17|1995-08-15|COLLECT COD|FOB|egular braids run furiousl 11648|35732|5733|3|5|8338.65|0.04|0.08|A|F|1995-04-26|1995-06-20|1995-05-01|NONE|SHIP|ter the carefully iron 11648|146055|1084|4|2|2202.10|0.05|0.08|R|F|1995-05-08|1995-07-01|1995-05-26|NONE|REG AIR|riously carefully 11648|143599|8628|5|38|62418.42|0.10|0.06|N|O|1995-06-24|1995-07-04|1995-07-24|COLLECT COD|RAIL|ly express requests. regular as 11648|57011|9517|6|22|21296.22|0.03|0.00|A|F|1995-05-01|1995-06-20|1995-05-12|DELIVER IN PERSON|REG AIR| haggle bravely t 11648|169231|9232|7|24|31205.52|0.07|0.05|N|O|1995-08-06|1995-06-09|1995-09-03|COLLECT COD|MAIL|. sometimes even deposits 11649|61039|3546|1|3|3000.09|0.08|0.00|N|O|1996-08-07|1996-09-08|1996-08-22|COLLECT COD|REG AIR|sits. carefully regular pac 11649|72671|2672|2|35|57528.45|0.06|0.01|N|O|1996-10-16|1996-09-08|1996-11-11|COLLECT COD|TRUCK|y final th 11649|168787|1304|3|45|83510.10|0.03|0.03|N|O|1996-07-20|1996-07-30|1996-07-30|COLLECT COD|REG AIR|nic packages. quickly express dolphins aff 11649|188093|3130|4|44|51967.96|0.10|0.04|N|O|1996-09-21|1996-09-19|1996-10-16|COLLECT COD|TRUCK|osits wake blithely. special dependencies w 11649|67965|7966|5|40|77318.40|0.04|0.04|N|O|1996-08-01|1996-08-24|1996-08-22|COLLECT COD|FOB|ely ironic requests cajole thinly slyly exp 11649|71731|1732|6|16|27243.68|0.06|0.03|N|O|1996-07-21|1996-08-13|1996-08-04|COLLECT COD|FOB|le. slyly regular ideas sleep ironically qu 11649|178827|3862|7|26|49551.32|0.01|0.04|N|O|1996-08-19|1996-08-28|1996-08-22|COLLECT COD|SHIP|ilently regular accounts cajole 11650|133459|5973|1|48|71637.60|0.06|0.01|A|F|1992-11-23|1992-12-02|1992-12-23|DELIVER IN PERSON|MAIL|fily final ideas. 11650|44339|1852|2|50|64166.50|0.03|0.07|R|F|1993-01-06|1992-12-12|1993-01-16|DELIVER IN PERSON|TRUCK|lly special theodo 11650|3391|8392|3|30|38831.70|0.04|0.00|R|F|1992-11-09|1992-12-18|1992-11-25|DELIVER IN PERSON|FOB|urts. somet 11651|138059|5599|1|41|44979.05|0.00|0.06|A|F|1995-05-26|1995-04-19|1995-06-16|DELIVER IN PERSON|RAIL|ould have to are. slyly expre 11651|156233|1264|2|8|10313.84|0.08|0.03|N|F|1995-06-03|1995-04-08|1995-06-18|TAKE BACK RETURN|RAIL|ic accounts detect 11652|197430|2469|1|39|59569.77|0.07|0.04|N|O|1995-12-22|1996-01-28|1996-01-04|TAKE BACK RETURN|TRUCK|nic sentiments bo 11652|199693|9694|2|11|19719.59|0.06|0.06|N|O|1996-02-13|1996-01-16|1996-03-01|COLLECT COD|AIR|ges. silent, silent waters cajole 11652|29189|9190|3|28|31309.04|0.08|0.08|N|O|1996-02-11|1996-01-04|1996-02-28|DELIVER IN PERSON|FOB| final requests doubt furiously 11652|167214|4763|4|24|30749.04|0.02|0.02|N|O|1996-02-28|1996-01-30|1996-03-07|COLLECT COD|AIR|y about the s 11652|164074|1623|5|10|11380.70|0.07|0.08|N|O|1996-01-26|1996-01-13|1996-02-20|TAKE BACK RETURN|REG AIR|blithely express as 11652|185690|8209|6|8|14205.52|0.10|0.00|N|O|1996-03-01|1996-01-30|1996-03-13|COLLECT COD|MAIL|an sleep furiously silent Tiresias; bold, s 11652|81147|8672|7|21|23690.94|0.03|0.07|N|O|1996-02-26|1996-01-22|1996-03-18|DELIVER IN PERSON|SHIP|quickly regular packages haggle. frays h 11653|36226|8730|1|18|20919.96|0.01|0.01|N|O|1997-09-30|1997-08-24|1997-10-05|DELIVER IN PERSON|FOB|. carefully pending 11653|45469|2982|2|25|35361.50|0.07|0.01|N|O|1997-09-27|1997-09-25|1997-10-26|TAKE BACK RETURN|TRUCK| even packages. reg 11653|191348|3868|3|27|38862.18|0.01|0.04|N|O|1997-10-26|1997-08-18|1997-11-17|TAKE BACK RETURN|AIR|regular packages. pending pa 11653|178332|850|4|42|59233.86|0.10|0.01|N|O|1997-07-30|1997-09-18|1997-08-20|DELIVER IN PERSON|MAIL|ar, regular ideas integrate quickly 11653|15888|5889|5|22|39685.36|0.07|0.00|N|O|1997-09-01|1997-08-31|1997-09-18|COLLECT COD|AIR|pendencies. blithely even pa 11654|74512|2034|1|47|69865.97|0.06|0.02|R|F|1993-11-29|1993-12-23|1993-12-15|COLLECT COD|MAIL|sleep blithely against the furiously ironi 11654|117724|5258|2|39|67927.08|0.03|0.00|A|F|1993-11-29|1993-12-01|1993-12-22|NONE|FOB|ts integrate blithely. even, enticing inst 11655|135252|7766|1|32|41192.00|0.04|0.04|N|O|1998-08-11|1998-09-25|1998-09-02|TAKE BACK RETURN|SHIP|g to the slyly ironic requests. 11655|174050|1602|2|49|55078.45|0.06|0.07|N|O|1998-07-04|1998-09-20|1998-07-06|COLLECT COD|SHIP|nal courts. s 11655|49898|7411|3|46|85002.94|0.01|0.00|N|O|1998-10-10|1998-09-01|1998-10-21|DELIVER IN PERSON|MAIL|ges. blithely even accounts sleep furiou 11680|24361|1868|1|27|34704.72|0.00|0.01|A|F|1994-11-10|1994-11-05|1994-11-12|TAKE BACK RETURN|SHIP|ounts haggle blithely 11680|26880|1885|2|35|63240.80|0.09|0.01|R|F|1994-09-17|1994-11-29|1994-09-25|NONE|MAIL|ly pending gifts sleep carefully 11680|168923|6472|3|43|85652.56|0.06|0.01|R|F|1994-11-05|1994-11-16|1994-11-25|COLLECT COD|MAIL|integrate furiously. special instruct 11680|84966|7475|4|49|95597.04|0.02|0.08|R|F|1994-11-05|1994-11-05|1994-11-20|TAKE BACK RETURN|REG AIR| carefully express, spec 11680|20183|2686|5|24|26476.32|0.01|0.00|R|F|1994-09-08|1994-11-11|1994-09-09|NONE|TRUCK|silent depo 11680|14963|7465|6|26|48826.96|0.10|0.01|R|F|1994-10-22|1994-11-02|1994-11-02|NONE|MAIL|uickly above the slyly pending 11680|69164|1671|7|9|10198.44|0.09|0.02|R|F|1994-12-20|1994-11-29|1995-01-19|DELIVER IN PERSON|AIR|ently ironic grouches after the 11681|22974|7979|1|28|53115.16|0.07|0.02|A|F|1993-02-15|1992-12-11|1993-03-04|TAKE BACK RETURN|REG AIR| carefully carefully ruth 11681|123592|8617|2|37|59776.83|0.06|0.06|A|F|1992-12-29|1992-12-11|1993-01-27|TAKE BACK RETURN|RAIL|y ironic ideas. slyly ironic re 11681|904|905|3|31|55951.90|0.09|0.01|A|F|1993-01-26|1992-12-18|1993-02-07|NONE|AIR|uffily. ideas kin 11681|191944|1945|4|30|61078.20|0.02|0.06|A|F|1993-01-12|1992-11-24|1993-02-09|NONE|RAIL|. theodolites across the even packages are 11681|199627|7185|5|47|81151.14|0.02|0.05|A|F|1993-01-27|1992-12-13|1993-01-29|COLLECT COD|TRUCK|ly even depths. furious 11682|31634|4138|1|13|20353.19|0.06|0.06|R|F|1993-10-04|1993-08-06|1993-10-11|NONE|REG AIR|lyly among the pending platelets. 11682|68851|1358|2|35|63694.75|0.05|0.08|R|F|1993-11-01|1993-09-07|1993-11-04|TAKE BACK RETURN|RAIL|ly even foxes 11682|177152|9670|3|46|56540.90|0.02|0.07|R|F|1993-08-06|1993-10-03|1993-08-28|COLLECT COD|AIR|arefully unusual or 11683|162515|64|1|31|48902.81|0.00|0.01|R|F|1992-05-04|1992-04-01|1992-06-03|NONE|MAIL|y regular requests. foxes eat fu 11683|179349|4384|2|18|25710.12|0.08|0.04|A|F|1992-03-15|1992-03-28|1992-03-19|NONE|TRUCK|nstructions. blithely even accounts cajole 11683|111989|1990|3|38|76037.24|0.04|0.07|A|F|1992-05-29|1992-03-17|1992-06-19|NONE|MAIL|totes use slyly carefully bold pa 11683|198711|8712|4|1|1809.71|0.08|0.05|R|F|1992-03-17|1992-03-31|1992-03-29|DELIVER IN PERSON|FOB|beans are furiously blithe platele 11683|122448|7473|5|7|10293.08|0.09|0.02|A|F|1992-06-01|1992-03-26|1992-06-25|NONE|AIR| requests. slyly u 11683|171639|9191|6|8|13685.04|0.03|0.03|R|F|1992-05-27|1992-03-20|1992-06-12|NONE|SHIP|l dolphins: quickly even packa 11683|124774|4775|7|40|71950.80|0.08|0.05|A|F|1992-03-02|1992-03-11|1992-04-01|COLLECT COD|MAIL|ooze alongsi 11684|61558|4065|1|48|72938.40|0.03|0.02|A|F|1992-11-15|1992-10-05|1992-11-25|COLLECT COD|TRUCK|ld pinto beans. carefully d 11684|191251|3771|2|30|40267.50|0.07|0.03|R|F|1992-10-10|1992-09-09|1992-10-19|TAKE BACK RETURN|TRUCK|ckly. blithe 11684|143701|8730|3|27|47106.90|0.02|0.04|R|F|1992-09-03|1992-09-15|1992-10-02|DELIVER IN PERSON|MAIL|ironic pinto beans-- sly depos 11684|170381|5416|4|49|71117.62|0.08|0.05|R|F|1992-08-24|1992-09-08|1992-09-02|NONE|FOB|yly across the furiously pending 11684|190117|2637|5|25|30177.75|0.01|0.01|A|F|1992-10-20|1992-10-19|1992-11-09|DELIVER IN PERSON|AIR|e theodolites. foxes solve carefu 11685|64149|4150|1|19|21149.66|0.08|0.04|N|O|1997-04-02|1997-04-22|1997-05-01|NONE|AIR|ly bold platele 11685|146250|3793|2|16|20740.00|0.04|0.02|N|O|1997-06-24|1997-05-28|1997-06-28|TAKE BACK RETURN|RAIL|sits haggle 11685|7071|4572|3|7|6846.49|0.06|0.07|N|O|1997-04-01|1997-04-30|1997-04-05|NONE|FOB| to the flu 11685|36369|3879|4|19|24801.84|0.01|0.07|N|O|1997-06-13|1997-04-23|1997-07-04|COLLECT COD|FOB|ests. carefully regular requests among 11685|50022|7538|5|36|34992.72|0.01|0.00|N|O|1997-07-10|1997-05-26|1997-08-07|DELIVER IN PERSON|FOB|regular platelets. asymp 11685|119379|9380|6|30|41951.10|0.03|0.06|N|O|1997-04-05|1997-06-13|1997-04-12|DELIVER IN PERSON|AIR|ily ironic theod 11685|192949|2950|7|50|102097.00|0.06|0.04|N|O|1997-04-23|1997-06-05|1997-05-20|DELIVER IN PERSON|REG AIR|nts. furiously final d 11686|89930|7455|1|36|69117.48|0.10|0.01|A|F|1994-03-28|1994-04-04|1994-04-02|COLLECT COD|FOB|ts cajole. carefully 11686|49749|2254|2|5|8493.70|0.01|0.04|R|F|1994-04-18|1994-04-05|1994-05-12|TAKE BACK RETURN|SHIP|nding pinto beans h 11686|128836|8837|3|50|93241.50|0.04|0.01|A|F|1994-02-06|1994-03-12|1994-02-14|DELIVER IN PERSON|RAIL|he final theodolite 11686|140082|83|4|37|41516.96|0.07|0.01|R|F|1994-03-09|1994-04-18|1994-04-07|DELIVER IN PERSON|MAIL|ully along 11687|23468|975|1|23|32003.58|0.07|0.00|N|O|1996-02-22|1996-02-04|1996-02-28|NONE|FOB| are slyly bold ideas. 11687|13326|3327|2|18|22307.76|0.10|0.07|N|O|1996-04-04|1996-02-29|1996-05-03|NONE|REG AIR|he slyly regular accounts. final requests 11687|162717|266|3|23|40933.33|0.02|0.08|N|O|1996-02-28|1996-02-06|1996-03-22|DELIVER IN PERSON|RAIL|al instructions 11687|180603|604|4|32|53875.20|0.08|0.01|N|O|1996-01-09|1996-03-08|1996-01-20|NONE|AIR|uses detect slyly. slyly even accou 11687|187801|5356|5|28|52886.40|0.00|0.05|N|O|1996-01-05|1996-02-21|1996-01-10|TAKE BACK RETURN|TRUCK|nts cajole 11712|152579|2580|1|19|30999.83|0.10|0.04|A|F|1994-06-23|1994-07-24|1994-07-01|NONE|MAIL|y final reque 11712|178601|1119|2|36|60465.60|0.07|0.05|R|F|1994-05-14|1994-07-09|1994-05-15|COLLECT COD|TRUCK|dolites. furiously 11712|134274|6788|3|26|34015.02|0.03|0.04|R|F|1994-08-23|1994-06-26|1994-09-22|NONE|AIR|ounts are. slyly quick theodolites ough 11713|135589|8103|1|17|27617.86|0.10|0.08|R|F|1994-03-11|1994-02-07|1994-03-28|COLLECT COD|MAIL|kages. carefully ironic reques 11713|42573|2574|2|33|50013.81|0.04|0.03|A|F|1994-03-11|1994-02-09|1994-03-21|NONE|TRUCK|gle fluffily quickly c 11713|123091|3092|3|46|51248.14|0.09|0.06|A|F|1994-04-07|1994-03-08|1994-04-08|DELIVER IN PERSON|TRUCK|ly final foxes sleep fl 11713|184762|7281|4|6|11080.56|0.10|0.07|A|F|1994-03-19|1994-02-20|1994-04-05|COLLECT COD|REG AIR|mong the even instructions wake f 11714|143625|6140|1|24|40046.88|0.09|0.02|A|F|1994-10-20|1994-10-27|1994-11-14|DELIVER IN PERSON|FOB|ily blithely unusual deposits. quietly reg 11714|94110|6620|2|20|22082.20|0.01|0.05|A|F|1994-11-20|1994-10-29|1994-11-24|DELIVER IN PERSON|FOB|g after the pending reques 11714|102084|4595|3|46|49959.68|0.09|0.06|A|F|1994-08-29|1994-09-13|1994-09-04|TAKE BACK RETURN|TRUCK|ites wake sometim 11714|116041|6042|4|23|24311.92|0.07|0.08|A|F|1994-11-11|1994-10-29|1994-12-08|DELIVER IN PERSON|AIR|r ideas. furious pinto beans wake slyl 11714|99079|6607|5|17|18327.19|0.08|0.04|R|F|1994-11-05|1994-09-05|1994-11-11|TAKE BACK RETURN|RAIL| ideas should haggle furiously 11714|5889|890|6|6|10769.28|0.00|0.01|A|F|1994-11-01|1994-10-27|1994-11-14|DELIVER IN PERSON|SHIP|requests. ironic, even requests 11715|90962|963|1|7|13670.72|0.08|0.02|R|F|1994-10-02|1994-10-30|1994-10-22|COLLECT COD|RAIL|s. regular, exp 11715|194622|9661|2|25|42915.50|0.03|0.05|A|F|1994-10-18|1994-10-30|1994-10-26|TAKE BACK RETURN|TRUCK|etly slyly regular requests 11715|11374|8878|3|2|2570.74|0.05|0.01|A|F|1994-12-03|1994-11-13|1994-12-19|TAKE BACK RETURN|FOB|. slyly regular depths n 11715|157821|5367|4|4|7515.28|0.01|0.05|A|F|1995-01-04|1994-12-05|1995-01-18|DELIVER IN PERSON|REG AIR|ording to the dogged packages. ironic bra 11716|144142|6657|1|38|45073.32|0.02|0.03|A|F|1994-02-09|1993-11-28|1994-03-05|TAKE BACK RETURN|MAIL|osits promise closely. slyly b 11716|180178|2697|2|41|51584.97|0.09|0.03|R|F|1993-11-11|1993-12-31|1993-11-29|NONE|AIR|t theodolites among the unusu 11716|42719|5224|3|48|79762.08|0.08|0.05|A|F|1993-11-27|1993-12-09|1993-12-07|NONE|RAIL|ual accounts affix s 11717|188907|8908|1|12|23950.80|0.00|0.06|N|O|1998-06-21|1998-05-11|1998-06-25|NONE|AIR| packages play regular, ironic req 11717|156726|4272|2|41|73091.52|0.08|0.08|N|O|1998-04-19|1998-05-13|1998-04-30|NONE|AIR|haggle behind the pending, bol 11717|81925|4434|3|11|20976.12|0.04|0.00|N|O|1998-05-02|1998-06-16|1998-05-07|TAKE BACK RETURN|MAIL|gular ideas use furiously against the 11717|103486|1017|4|38|56600.24|0.04|0.04|N|O|1998-03-23|1998-05-11|1998-04-06|DELIVER IN PERSON|SHIP|deposits. quickly 11717|82061|7078|5|10|10430.60|0.07|0.06|N|O|1998-06-15|1998-05-21|1998-07-07|COLLECT COD|SHIP|egular accounts. furiously bu 11718|76551|1566|1|20|30551.00|0.00|0.06|A|F|1995-04-12|1995-03-11|1995-05-03|COLLECT COD|MAIL|across the regular in 11719|94710|4711|1|21|35798.91|0.03|0.01|A|F|1995-04-16|1995-05-07|1995-05-02|NONE|AIR|carefully against the 11719|179157|6709|2|44|54390.60|0.06|0.00|R|F|1995-03-27|1995-03-09|1995-04-06|DELIVER IN PERSON|TRUCK|sits are bli 11719|88684|8685|3|5|8363.40|0.10|0.06|R|F|1995-05-30|1995-03-19|1995-06-06|DELIVER IN PERSON|SHIP|deas detect past t 11719|97588|5116|4|17|26954.86|0.01|0.03|A|F|1995-02-20|1995-03-29|1995-03-19|COLLECT COD|SHIP| excuses integrate 11719|159291|4322|5|40|54011.60|0.02|0.00|A|F|1995-03-29|1995-03-09|1995-04-09|DELIVER IN PERSON|AIR|hs. carefully fluffy 11719|197506|5064|6|18|28863.00|0.05|0.02|R|F|1995-03-09|1995-04-11|1995-03-13|TAKE BACK RETURN|SHIP|nt accounts. fluffily sly 11744|82750|2751|1|28|48517.00|0.06|0.03|N|O|1996-09-12|1996-08-16|1996-09-23|NONE|FOB|eep ironically. slyl 11744|171974|1975|2|42|85930.74|0.01|0.02|N|O|1996-08-25|1996-09-01|1996-08-29|TAKE BACK RETURN|SHIP|requests use quickly; blithely express 11744|152329|7360|3|50|69066.00|0.07|0.03|N|O|1996-09-21|1996-08-25|1996-10-15|NONE|FOB| regular packages believe above the re 11745|124422|6935|1|34|49178.28|0.03|0.01|R|F|1992-08-03|1992-09-29|1992-08-28|NONE|AIR|s wake. regular platelets haggle ag 11745|153385|931|2|35|50343.30|0.02|0.03|R|F|1992-09-13|1992-09-23|1992-09-19|NONE|RAIL| even sheaves! slyly sile 11745|125755|5756|3|29|51641.75|0.02|0.00|A|F|1992-10-02|1992-10-14|1992-10-17|COLLECT COD|REG AIR|ns around the carefully final theo 11745|152344|7375|4|12|16756.08|0.07|0.08|A|F|1992-09-17|1992-08-28|1992-09-20|COLLECT COD|TRUCK| the ironic, reg 11745|33891|3892|5|7|12774.23|0.07|0.03|A|F|1992-11-16|1992-08-29|1992-11-26|COLLECT COD|TRUCK|mptotes. acc 11746|180724|725|1|22|39703.84|0.09|0.05|N|O|1998-05-09|1998-07-05|1998-05-12|DELIVER IN PERSON|RAIL|rbits according 11746|16197|1200|2|6|6679.14|0.04|0.06|N|O|1998-05-27|1998-06-12|1998-06-09|NONE|RAIL|deas. slowly regul 11746|13705|3706|3|25|40467.50|0.00|0.02|N|O|1998-08-27|1998-06-08|1998-09-01|COLLECT COD|REG AIR|en deposits de 11746|131768|9308|4|26|46793.76|0.00|0.02|N|O|1998-08-09|1998-06-30|1998-08-14|COLLECT COD|REG AIR|st across the ir 11746|17060|7061|5|13|12701.78|0.01|0.02|N|O|1998-06-16|1998-07-14|1998-07-01|DELIVER IN PERSON|RAIL|nusual excu 11746|114250|6762|6|36|45513.00|0.00|0.06|N|O|1998-08-16|1998-06-15|1998-08-26|COLLECT COD|REG AIR| the slyly ironic accounts. i 11746|193807|3808|7|12|22809.60|0.06|0.07|N|O|1998-07-22|1998-06-20|1998-07-23|DELIVER IN PERSON|RAIL|heodolites haggle slowly 11747|99279|6807|1|27|34513.29|0.10|0.01|A|F|1993-06-25|1993-04-13|1993-07-16|COLLECT COD|TRUCK|y express requests. regular de 11748|197679|7680|1|23|40863.41|0.00|0.00|A|F|1994-07-04|1994-07-30|1994-07-15|COLLECT COD|AIR|its against the 11749|76477|3999|1|7|10174.29|0.01|0.07|N|O|1998-04-11|1998-05-02|1998-04-15|COLLECT COD|SHIP|ully according to t 11749|142096|4611|2|48|54628.32|0.02|0.01|N|O|1998-05-24|1998-05-11|1998-06-20|NONE|TRUCK|packages. deposits slee 11750|143307|5822|1|23|31056.90|0.07|0.08|N|O|1997-04-21|1997-04-21|1997-04-25|COLLECT COD|TRUCK|ending excuses. packages hagg 11750|148878|6421|2|21|40464.27|0.06|0.06|N|O|1997-06-09|1997-05-18|1997-06-19|DELIVER IN PERSON|TRUCK|ronic dinos cajole 11750|191844|9402|3|31|60011.04|0.02|0.02|N|O|1997-06-27|1997-04-08|1997-07-09|TAKE BACK RETURN|AIR|ckages. caref 11750|108784|1295|4|1|1792.78|0.05|0.08|N|O|1997-04-02|1997-04-14|1997-04-13|DELIVER IN PERSON|SHIP| even dugouts; slyly special i 11751|90705|706|1|23|39001.10|0.05|0.00|N|O|1996-07-06|1996-08-19|1996-07-19|COLLECT COD|RAIL| the regular packages thrash ironic, ir 11751|71117|6132|2|45|48964.95|0.08|0.07|N|O|1996-09-05|1996-07-12|1996-09-07|DELIVER IN PERSON|SHIP|theodolites. carefully even r 11751|54146|4147|3|5|5500.70|0.06|0.08|N|O|1996-07-09|1996-08-10|1996-08-01|COLLECT COD|AIR|efully express foxes hagg 11751|20891|892|4|29|52544.81|0.01|0.01|N|O|1996-06-09|1996-08-19|1996-07-05|TAKE BACK RETURN|REG AIR|gular excuses wak 11776|180896|8451|1|50|98844.50|0.07|0.08|A|F|1992-05-26|1992-04-21|1992-05-31|NONE|AIR|kly regular accounts believe account 11776|102368|2369|2|5|6851.80|0.10|0.02|A|F|1992-06-05|1992-03-28|1992-06-12|COLLECT COD|MAIL|fully carefully pending 11777|187033|9552|1|31|34720.93|0.05|0.08|R|F|1995-03-13|1995-03-16|1995-03-30|NONE|SHIP|grate carefully according to t 11778|134653|4654|1|43|72568.95|0.04|0.05|N|O|1997-06-21|1997-05-07|1997-07-02|COLLECT COD|MAIL|tions according to the furiously 11778|107426|7427|2|5|7167.10|0.08|0.05|N|O|1997-07-01|1997-06-20|1997-07-15|TAKE BACK RETURN|SHIP|wake carefully regular sauternes. sl 11778|106511|9022|3|47|71322.97|0.05|0.02|N|O|1997-05-20|1997-06-05|1997-05-31|DELIVER IN PERSON|TRUCK|accounts. slyly even requests are be 11779|150205|7751|1|15|18828.00|0.05|0.08|R|F|1993-10-05|1993-08-06|1993-10-10|NONE|SHIP|about the quickly pend 11779|86451|3976|2|9|12937.05|0.00|0.03|A|F|1993-10-15|1993-09-27|1993-11-13|COLLECT COD|RAIL|ins are carefull 11779|170976|3494|3|27|55268.19|0.03|0.01|R|F|1993-10-25|1993-08-04|1993-11-13|COLLECT COD|RAIL|. fluffily eve 11779|181843|1844|4|7|13473.88|0.06|0.03|A|F|1993-10-24|1993-09-21|1993-11-08|NONE|MAIL|y bold excuses sleep sl 11779|20624|8131|5|50|77231.00|0.06|0.08|R|F|1993-09-30|1993-09-10|1993-10-20|TAKE BACK RETURN|TRUCK|thely bold packages nag carefully q 11779|15122|125|6|32|33187.84|0.02|0.06|R|F|1993-08-12|1993-09-23|1993-09-11|TAKE BACK RETURN|REG AIR|r deposits cajole sl 11780|39412|4419|1|41|55407.81|0.09|0.07|A|F|1993-10-21|1993-12-12|1993-10-30|TAKE BACK RETURN|SHIP| asymptotes. ironic, even ideas engage s 11780|194901|9940|2|36|71852.40|0.04|0.02|A|F|1993-12-24|1993-12-11|1994-01-09|DELIVER IN PERSON|RAIL|above the fluffily even a 11780|93548|3549|3|7|10790.78|0.09|0.05|R|F|1993-10-18|1993-11-28|1993-10-20|DELIVER IN PERSON|TRUCK|longside of the blithely 11781|34909|7413|1|37|68224.30|0.07|0.01|N|O|1996-06-14|1996-06-27|1996-06-20|TAKE BACK RETURN|MAIL|packages use alongside of 11781|113505|1039|2|42|63777.00|0.07|0.05|N|O|1996-04-24|1996-06-02|1996-04-28|NONE|SHIP| deposits wake. slyly regular dependenc 11781|68546|6065|3|42|63610.68|0.10|0.03|N|O|1996-07-08|1996-07-07|1996-07-17|NONE|TRUCK| express, 11782|159204|1720|1|27|34106.40|0.01|0.00|A|F|1992-08-10|1992-09-05|1992-08-20|DELIVER IN PERSON|AIR|above the quickly even pains. bold 11782|136196|3736|2|35|43126.65|0.09|0.08|A|F|1992-09-14|1992-09-06|1992-09-21|TAKE BACK RETURN|FOB|s eat slyly according to th 11782|36552|6553|3|50|74427.50|0.10|0.07|R|F|1992-10-25|1992-10-02|1992-11-07|COLLECT COD|MAIL|even accounts boost slyly de 11782|128290|8291|4|17|22410.93|0.07|0.04|R|F|1992-11-20|1992-09-20|1992-12-20|NONE|AIR|ess deposits cajole carefull 11783|59568|4579|1|3|4582.68|0.03|0.04|N|O|1998-06-12|1998-05-04|1998-06-21|DELIVER IN PERSON|MAIL|es nag. furiously ca 11783|156003|8519|2|13|13767.00|0.05|0.02|N|O|1998-07-20|1998-05-23|1998-07-26|DELIVER IN PERSON|SHIP|ly even theodolites are furiously. reg 11808|60788|789|1|12|20985.36|0.03|0.08|A|F|1992-11-15|1992-09-17|1992-11-27|TAKE BACK RETURN|RAIL|instructions affix closel 11808|52650|7661|2|30|48079.50|0.06|0.04|R|F|1992-09-03|1992-10-22|1992-09-20|TAKE BACK RETURN|SHIP|usual requests ha 11809|22356|7361|1|22|28123.70|0.07|0.07|N|O|1996-07-18|1996-07-18|1996-07-19|DELIVER IN PERSON|FOB|er the special platelets. request 11809|123135|5648|2|49|56748.37|0.03|0.03|N|O|1996-05-03|1996-06-15|1996-05-12|COLLECT COD|FOB|lar sauternes. slyly ironic packages ha 11809|113164|3165|3|18|21188.88|0.08|0.03|N|O|1996-07-21|1996-07-05|1996-08-10|TAKE BACK RETURN|AIR|ackages use thinly express 11809|132643|5157|4|29|48593.56|0.04|0.02|N|O|1996-06-04|1996-06-14|1996-06-18|NONE|MAIL|ely regular asymptotes lose caref 11809|114504|4505|5|3|4555.50|0.08|0.03|N|O|1996-08-04|1996-06-23|1996-08-22|TAKE BACK RETURN|SHIP|urts. deposits wake across the ideas. regul 11809|56578|4094|6|18|27622.26|0.02|0.01|N|O|1996-07-16|1996-06-10|1996-07-23|NONE|SHIP|ly even ideas integr 11810|69374|9375|1|47|63138.39|0.00|0.03|A|F|1992-08-02|1992-06-20|1992-08-22|NONE|RAIL|final foxes wake blithely regular idea 11811|105578|599|1|8|12668.56|0.09|0.06|A|F|1993-02-07|1992-11-28|1993-02-09|COLLECT COD|RAIL| quickly along the pendin 11812|592|5593|1|20|29851.80|0.05|0.00|A|F|1993-09-16|1993-08-23|1993-10-03|COLLECT COD|REG AIR|ounts doze regular 11813|147502|2531|1|49|75925.50|0.03|0.05|A|F|1994-05-24|1994-03-21|1994-05-26|COLLECT COD|MAIL|regular dependencies cajole carefu 11813|50530|8046|2|32|47376.96|0.02|0.04|A|F|1994-03-17|1994-04-27|1994-03-21|DELIVER IN PERSON|FOB|e alongside of the fu 11813|46828|1837|3|47|83416.54|0.02|0.07|R|F|1994-02-24|1994-04-13|1994-03-21|NONE|TRUCK|es impress quickly sly 11813|30924|3428|4|25|46373.00|0.02|0.00|R|F|1994-03-30|1994-05-07|1994-04-21|DELIVER IN PERSON|MAIL|side of the ironic, express pin 11814|115154|7666|1|9|10522.35|0.08|0.04|R|F|1993-04-25|1993-03-22|1993-04-29|TAKE BACK RETURN|RAIL|al accounts boost slyly alongsid 11814|183586|1141|2|46|76800.68|0.09|0.03|A|F|1993-04-09|1993-03-11|1993-04-19|COLLECT COD|RAIL|eposits wake. slyly regular pack 11814|115553|3087|3|39|61173.45|0.06|0.03|R|F|1993-05-12|1993-04-02|1993-06-01|NONE|TRUCK|ss pinto beans nod fl 11814|99147|6675|4|41|46991.74|0.04|0.00|A|F|1993-04-27|1993-03-06|1993-04-30|TAKE BACK RETURN|AIR|d the pinto b 11815|137648|162|1|38|64054.32|0.04|0.03|N|O|1995-10-31|1995-11-25|1995-11-06|COLLECT COD|REG AIR|e above the fluffily pending 11815|40332|333|2|28|35625.24|0.02|0.04|N|O|1995-10-22|1995-11-11|1995-10-29|TAKE BACK RETURN|AIR|ns doze ironically pendin 11815|45621|3134|3|37|57964.94|0.03|0.06|N|O|1995-10-29|1995-10-19|1995-11-05|DELIVER IN PERSON|SHIP|coys are. carefully ironic platelets hag 11815|137524|38|4|33|51530.16|0.02|0.06|N|O|1995-12-01|1995-10-22|1995-12-07|DELIVER IN PERSON|SHIP|uests are fluffily carefully sp 11815|161562|4079|5|12|19482.72|0.02|0.04|N|O|1995-11-21|1995-11-19|1995-12-18|DELIVER IN PERSON|REG AIR|silent deposit 11840|41372|1373|1|27|35460.99|0.01|0.08|A|F|1994-12-02|1994-10-03|1994-12-16|DELIVER IN PERSON|TRUCK|ymptotes. ironic accounts integra 11840|123478|1015|2|13|19519.11|0.10|0.05|R|F|1994-12-10|1994-10-27|1994-12-19|COLLECT COD|MAIL|lly regular packages. ruthlessly regu 11840|83851|6360|3|25|45871.25|0.10|0.01|A|F|1994-08-31|1994-10-08|1994-09-20|NONE|FOB|fluffily even accounts; quickly expr 11840|73814|6322|4|37|66148.97|0.00|0.06|R|F|1994-09-17|1994-10-15|1994-09-30|DELIVER IN PERSON|FOB|uests. pinto beans sleep regular pinto bea 11840|187166|9685|5|41|51379.56|0.06|0.01|R|F|1994-09-14|1994-10-16|1994-10-14|NONE|MAIL|even packages det 11840|86200|6201|6|33|39144.60|0.04|0.05|A|F|1994-10-23|1994-11-12|1994-11-19|COLLECT COD|RAIL| stealthy accounts wak 11841|16862|6863|1|22|39134.92|0.01|0.06|R|F|1993-07-28|1993-05-25|1993-08-21|COLLECT COD|MAIL|c pinto bea 11841|184199|4200|2|26|33362.94|0.06|0.05|R|F|1993-05-22|1993-06-21|1993-06-02|DELIVER IN PERSON|SHIP|ockey players. furiously even requ 11841|22484|4987|3|26|36568.48|0.09|0.05|A|F|1993-05-21|1993-06-21|1993-06-12|COLLECT COD|TRUCK|against the fi 11841|84012|4013|4|22|21912.22|0.06|0.07|A|F|1993-06-30|1993-06-06|1993-07-25|DELIVER IN PERSON|FOB|counts. furiously sp 11841|185853|8372|5|4|7755.40|0.03|0.01|A|F|1993-08-01|1993-07-08|1993-08-09|DELIVER IN PERSON|AIR|iously silent the 11841|24432|9437|6|14|18990.02|0.08|0.03|A|F|1993-05-10|1993-06-18|1993-06-06|COLLECT COD|MAIL|. special theodolites wake blithely 11842|62915|434|1|17|31924.47|0.08|0.00|N|O|1996-07-24|1996-06-12|1996-07-27|DELIVER IN PERSON|MAIL|ns sleep permanently blithely 11842|10020|5023|2|26|24180.52|0.02|0.06|N|O|1996-04-13|1996-05-25|1996-04-22|TAKE BACK RETURN|AIR|y silent excuses affix s 11842|191409|3929|3|21|31508.40|0.01|0.04|N|O|1996-04-14|1996-06-05|1996-04-27|NONE|AIR| according to the stealthily special ho 11843|189162|4199|1|37|46292.92|0.02|0.02|R|F|1994-11-10|1994-08-21|1994-11-22|NONE|AIR|e slyly final requests: fluffily pending 11843|36779|9283|2|32|54904.64|0.06|0.02|R|F|1994-07-26|1994-09-09|1994-08-12|NONE|FOB|es must hav 11843|141353|3868|3|50|69717.50|0.01|0.01|A|F|1994-07-25|1994-09-15|1994-08-02|NONE|SHIP| cajole above the ironic deposits. bl 11843|119667|9668|4|37|62406.42|0.04|0.05|R|F|1994-11-01|1994-10-04|1994-11-04|TAKE BACK RETURN|AIR|nal excuses. slyly special instruct 11844|48088|5601|1|5|5180.40|0.06|0.07|N|O|1997-04-06|1997-03-27|1997-04-24|NONE|AIR|odolites sl 11844|4272|9273|2|38|44698.26|0.09|0.08|N|O|1997-04-03|1997-02-27|1997-04-05|TAKE BACK RETURN|AIR|gainst the ide 11844|196609|9129|3|23|39228.80|0.06|0.08|N|O|1997-02-04|1997-03-19|1997-02-28|TAKE BACK RETURN|RAIL|y regular dependenc 11844|2641|2642|4|1|1543.64|0.03|0.08|N|O|1997-01-21|1997-03-13|1997-02-17|NONE|AIR|ending deposits wake theodolites. express 11845|152227|7258|1|5|6396.10|0.10|0.05|N|O|1997-05-29|1997-06-18|1997-06-03|NONE|MAIL|ding courts from the careful, 11845|64284|9297|2|30|37448.40|0.04|0.01|N|O|1997-07-18|1997-05-17|1997-08-07|COLLECT COD|REG AIR|ecial deposits. slyly final platele 11845|11614|1615|3|48|73229.28|0.05|0.08|N|O|1997-05-21|1997-06-25|1997-06-13|DELIVER IN PERSON|REG AIR|d packages sleep 11845|188155|674|4|13|16160.95|0.03|0.05|N|O|1997-06-14|1997-05-26|1997-07-05|TAKE BACK RETURN|TRUCK|efully across the quickly even theodolite 11846|142518|61|1|15|23407.65|0.04|0.03|A|F|1993-07-01|1993-07-06|1993-07-10|COLLECT COD|REG AIR| final instructions. sl 11846|85818|3343|2|17|30664.77|0.10|0.02|A|F|1993-07-22|1993-06-29|1993-08-08|TAKE BACK RETURN|SHIP|inal foxes run furiously. pi 11846|142868|7897|3|2|3821.72|0.04|0.00|R|F|1993-05-22|1993-07-16|1993-06-01|NONE|TRUCK|ecial accounts. 11846|175535|3087|4|1|1610.53|0.10|0.08|A|F|1993-07-01|1993-06-23|1993-07-06|COLLECT COD|RAIL|oach above the slyly regula 11847|110848|8382|1|19|35317.96|0.05|0.07|N|O|1997-11-10|1997-11-02|1997-12-06|COLLECT COD|FOB|lyly careful pinto beans are a 11847|17662|164|2|14|22115.24|0.02|0.00|N|O|1997-12-20|1997-12-02|1998-01-08|TAKE BACK RETURN|MAIL| ironic deposits. blithe 11847|33282|3283|3|10|12152.80|0.08|0.00|N|O|1997-10-31|1997-11-23|1997-11-30|TAKE BACK RETURN|FOB|final, ironic 11847|166839|9356|4|5|9529.15|0.04|0.05|N|O|1997-12-31|1997-12-25|1998-01-14|NONE|MAIL|fully among the express dependencies. 11847|49043|9044|5|4|3968.16|0.10|0.05|N|O|1997-10-14|1997-11-23|1997-11-04|NONE|MAIL|egular foxes. carefu 11847|49355|9356|6|26|33913.10|0.07|0.06|N|O|1997-12-15|1997-11-07|1997-12-23|TAKE BACK RETURN|AIR|efully regular deposits. carefully iron 11872|97438|2457|1|43|61723.49|0.07|0.01|A|F|1994-10-29|1994-10-07|1994-11-17|COLLECT COD|MAIL|pecial depo 11873|69603|4616|1|14|22016.40|0.07|0.01|N|O|1996-06-23|1996-07-05|1996-07-20|COLLECT COD|FOB|er. regular foxes above the platele 11874|133463|5977|1|50|74823.00|0.09|0.00|R|F|1992-12-02|1992-10-10|1992-12-15|COLLECT COD|REG AIR|mptotes. pending requests 11874|106147|8658|2|20|23062.80|0.03|0.05|A|F|1992-12-17|1992-10-29|1992-12-19|NONE|FOB|esias. slyly pending requests are accord 11874|9919|2420|3|29|53038.39|0.02|0.07|R|F|1992-11-11|1992-11-05|1992-11-17|COLLECT COD|TRUCK|eep blithely quickly special 11874|106947|9458|4|15|29309.10|0.05|0.08|R|F|1992-10-24|1992-10-22|1992-11-22|COLLECT COD|RAIL|ole ironic instructions 11874|49149|4158|5|44|48318.16|0.02|0.00|A|F|1992-12-19|1992-10-28|1993-01-01|DELIVER IN PERSON|TRUCK| slyly regular accounts. unusual pinto 11874|45226|5227|6|9|10540.98|0.10|0.03|A|F|1992-10-12|1992-12-02|1992-10-31|COLLECT COD|REG AIR|the pending, 11874|107079|4610|7|32|34754.24|0.08|0.01|R|F|1992-12-25|1992-12-07|1993-01-15|DELIVER IN PERSON|RAIL|he carefull 11875|68212|5731|1|40|47208.40|0.04|0.05|R|F|1992-11-11|1992-10-27|1992-12-03|COLLECT COD|MAIL|olphins sleep slyl 11875|22448|7453|2|30|41113.20|0.05|0.03|R|F|1992-09-08|1992-10-23|1992-10-08|TAKE BACK RETURN|RAIL|egular packages; 11875|71076|1077|3|1|1047.07|0.01|0.07|A|F|1992-10-06|1992-10-05|1992-10-09|COLLECT COD|RAIL|ve the idly express deposits. 11876|27216|4723|1|14|16004.94|0.05|0.06|N|O|1995-09-25|1995-08-10|1995-10-04|NONE|FOB|its sleep blithely regular requests. i 11876|7609|110|2|3|4549.80|0.00|0.06|N|O|1995-09-07|1995-08-06|1995-09-23|DELIVER IN PERSON|REG AIR|ainst the blithely final 11876|83496|1021|3|8|11835.92|0.01|0.04|N|O|1995-10-20|1995-09-23|1995-10-27|COLLECT COD|MAIL|ly ruthless accounts; blithely final mul 11876|102360|2361|4|34|46320.24|0.09|0.07|N|O|1995-10-29|1995-09-21|1995-11-17|NONE|FOB|r requests detect furiously 11876|159730|7276|5|2|3579.46|0.00|0.07|N|O|1995-10-12|1995-09-19|1995-11-04|DELIVER IN PERSON|AIR|heaves-- slyly fluffy deposits sleep 11876|172450|2|6|43|65465.35|0.10|0.00|N|O|1995-10-09|1995-08-04|1995-10-12|DELIVER IN PERSON|AIR|y ironic packages use slyly across t 11877|89961|2470|1|4|7803.84|0.10|0.01|R|F|1993-06-19|1993-07-08|1993-07-18|COLLECT COD|TRUCK|. special 11877|164288|9321|2|4|5409.12|0.00|0.03|R|F|1993-06-12|1993-08-11|1993-06-17|DELIVER IN PERSON|SHIP|ly unusual excuses. special packages boost 11877|175487|522|3|50|78124.00|0.07|0.07|R|F|1993-08-31|1993-07-01|1993-09-23|TAKE BACK RETURN|MAIL|ly final requests 11877|7505|7506|4|22|31075.00|0.03|0.00|A|F|1993-05-29|1993-07-24|1993-06-09|TAKE BACK RETURN|FOB|ress deposits use eve 11877|134290|4291|5|5|6621.45|0.04|0.04|R|F|1993-09-03|1993-08-01|1993-09-19|COLLECT COD|MAIL|to the ironic accounts. slyly pending pack 11878|134125|9152|1|50|57956.00|0.06|0.00|A|F|1993-05-26|1993-05-22|1993-06-14|TAKE BACK RETURN|AIR|ns among the ironi 11878|31045|8555|2|29|28305.16|0.08|0.04|R|F|1993-03-29|1993-03-31|1993-04-04|COLLECT COD|REG AIR|inal accounts boost furiously dep 11879|68327|5846|1|41|53108.12|0.04|0.07|A|F|1993-07-26|1993-09-10|1993-08-20|TAKE BACK RETURN|TRUCK|t the silent dolphins. carefully pendin 11879|171027|8579|2|14|15372.28|0.03|0.06|A|F|1993-10-05|1993-08-24|1993-10-29|COLLECT COD|AIR|quickly final requests. ironic packages 11879|49724|4733|3|31|51885.32|0.06|0.05|R|F|1993-07-13|1993-09-13|1993-08-03|COLLECT COD|MAIL| carefully slyly even pinto beans. even 11879|109027|4048|4|32|33152.64|0.02|0.03|R|F|1993-09-09|1993-09-02|1993-09-14|TAKE BACK RETURN|FOB|engage furiou 11879|511|3012|5|22|31053.22|0.02|0.04|R|F|1993-10-13|1993-09-15|1993-10-24|DELIVER IN PERSON|TRUCK|usly along the final, even acc 11904|16991|6992|1|45|85859.55|0.08|0.06|N|O|1998-01-29|1998-02-07|1998-02-06|DELIVER IN PERSON|MAIL|theodolites haggle busy accounts. 11904|165129|2678|2|27|32241.24|0.00|0.02|N|O|1997-12-09|1998-01-28|1998-01-08|DELIVER IN PERSON|FOB| special, even attainments cajole slyly fl 11904|121294|3807|3|3|3945.87|0.08|0.07|N|O|1998-03-21|1998-01-08|1998-04-09|COLLECT COD|SHIP|ly fluffily final excuses. always daring 11904|187759|2796|4|27|49862.25|0.06|0.03|N|O|1998-03-18|1998-02-17|1998-03-21|COLLECT COD|MAIL|uctions. furiou 11904|49675|4684|5|28|45490.76|0.02|0.07|N|O|1998-02-09|1998-01-02|1998-02-26|TAKE BACK RETURN|RAIL|t pains are above the ironic packages 11904|86985|9494|6|21|41411.58|0.07|0.02|N|O|1998-03-31|1998-02-01|1998-04-28|COLLECT COD|REG AIR| integrate carefully ac 11904|155241|7757|7|44|57034.56|0.08|0.08|N|O|1998-01-15|1998-02-11|1998-01-19|NONE|AIR|s use unusual instructions. furious 11905|122666|5179|1|27|45593.82|0.05|0.01|N|O|1997-08-19|1997-08-11|1997-09-17|NONE|TRUCK|uctions breach quickly acc 11905|39027|6537|2|18|17388.36|0.01|0.00|N|O|1997-08-30|1997-09-13|1997-09-08|TAKE BACK RETURN|MAIL|pending theodolites nag 11905|116763|9275|3|43|76529.68|0.05|0.08|N|O|1997-10-06|1997-08-04|1997-10-22|COLLECT COD|MAIL|sleep carefully. fluffily regular pla 11905|30785|8295|4|2|3431.56|0.04|0.03|N|O|1997-08-28|1997-09-05|1997-09-25|DELIVER IN PERSON|SHIP|lithely ironi 11906|5599|3100|1|41|61688.19|0.07|0.07|N|O|1997-01-08|1997-01-21|1997-01-17|DELIVER IN PERSON|SHIP| sentiments detect along the careful 11906|91232|6251|2|39|47705.97|0.03|0.03|N|O|1996-12-21|1997-02-28|1997-01-11|TAKE BACK RETURN|REG AIR|ly ironic pinto beans nag q 11906|127909|2934|3|35|67791.50|0.09|0.00|N|O|1997-01-14|1997-02-08|1997-01-24|TAKE BACK RETURN|TRUCK| carefully final asympt 11906|160091|5124|4|45|51799.05|0.10|0.00|N|O|1997-02-14|1997-02-21|1997-02-21|DELIVER IN PERSON|MAIL|. furiously unusual acc 11906|42048|2049|5|33|32671.32|0.01|0.02|N|O|1997-03-10|1997-02-12|1997-04-09|COLLECT COD|MAIL|efully according t 11906|98641|3660|6|39|63945.96|0.04|0.03|N|O|1997-01-07|1997-02-09|1997-01-12|NONE|RAIL|y across the 11907|126669|9182|1|26|44087.16|0.02|0.07|A|F|1993-12-11|1993-11-09|1993-12-28|DELIVER IN PERSON|TRUCK|s. furious dolphins could 11907|110099|5122|2|35|38818.15|0.10|0.01|R|F|1993-10-23|1993-10-01|1993-11-01|COLLECT COD|MAIL|he carefully iron 11907|19405|4408|3|32|42380.80|0.03|0.08|R|F|1993-12-18|1993-10-16|1993-12-19|COLLECT COD|REG AIR|sleep quickly around the quickly final ac 11907|130183|5210|4|24|29116.32|0.08|0.03|A|F|1993-09-03|1993-11-02|1993-09-05|DELIVER IN PERSON|REG AIR|oost. furiously ironic gifts prin 11908|87504|13|1|29|43253.50|0.03|0.05|R|F|1993-07-02|1993-08-12|1993-07-27|NONE|REG AIR|arefully blithely special asymptot 11908|75461|2983|2|13|18673.98|0.08|0.02|A|F|1993-08-31|1993-08-19|1993-09-19|NONE|FOB|ajole along the carefully ironic requests. 11908|67901|2914|3|30|56067.00|0.02|0.00|A|F|1993-06-19|1993-07-18|1993-06-26|TAKE BACK RETURN|REG AIR|ructions cajole furiously blith 11908|196191|3749|4|3|3861.57|0.06|0.07|R|F|1993-06-10|1993-08-13|1993-06-18|NONE|REG AIR|e thinly at the ideas. slyly unusual th 11908|171984|1985|5|12|24671.76|0.10|0.08|R|F|1993-07-22|1993-07-01|1993-08-21|NONE|MAIL| requests ar 11908|120936|8473|6|35|68492.55|0.08|0.02|A|F|1993-08-22|1993-07-15|1993-09-17|DELIVER IN PERSON|TRUCK| quickly unusual 11909|159237|6783|1|8|10369.84|0.07|0.00|R|F|1995-03-01|1995-01-15|1995-03-16|DELIVER IN PERSON|SHIP|bout the slyly final platelets. blithel 11909|79502|2010|2|20|29630.00|0.07|0.05|A|F|1995-02-10|1995-01-17|1995-02-21|COLLECT COD|FOB|s the furiously ironic re 11909|189805|2324|3|4|7579.20|0.01|0.03|R|F|1994-12-08|1995-02-12|1994-12-12|NONE|AIR|nstructions. closely iro 11910|96826|1845|1|16|29165.12|0.10|0.06|N|O|1995-08-07|1995-09-05|1995-08-27|NONE|AIR|le according to the ironic hockey play 11910|144648|9677|2|11|18619.04|0.05|0.05|N|O|1995-07-20|1995-08-09|1995-07-28|NONE|TRUCK|. slyly express platelets det 11910|85025|7534|3|28|28280.56|0.06|0.08|N|O|1995-09-19|1995-08-25|1995-10-16|NONE|FOB| special instruction 11910|199561|4600|4|5|8302.80|0.05|0.02|N|O|1995-08-14|1995-08-23|1995-08-24|DELIVER IN PERSON|TRUCK|lites along the accounts wake blithely 11911|56701|9207|1|33|54704.10|0.02|0.02|R|F|1994-03-11|1994-02-04|1994-04-01|DELIVER IN PERSON|TRUCK|rets-- slyly final packages boost fluffil 11911|159355|1871|2|47|66474.45|0.05|0.05|R|F|1994-02-19|1994-01-04|1994-03-12|NONE|SHIP|asymptotes sle 11911|70537|538|3|16|24120.48|0.05|0.08|A|F|1994-01-03|1994-01-27|1994-01-07|NONE|REG AIR|ts hang bli 11911|108339|3360|4|43|57935.19|0.06|0.06|A|F|1994-01-03|1994-01-25|1994-01-18|DELIVER IN PERSON|AIR|about the express, bold pinto be 11911|48207|3216|5|25|28880.00|0.07|0.00|R|F|1993-11-21|1993-12-13|1993-12-04|NONE|TRUCK|deas cajole ironic, final acco 11936|98703|3722|1|13|22122.10|0.10|0.03|N|O|1995-11-10|1995-11-04|1995-12-06|COLLECT COD|TRUCK|uriously even 11936|84827|4828|2|5|9059.10|0.08|0.06|N|O|1996-01-05|1995-10-25|1996-01-22|DELIVER IN PERSON|AIR|the ironic instructions. quickly i 11936|71711|6726|3|21|35336.91|0.03|0.07|N|O|1995-10-16|1995-10-31|1995-11-08|COLLECT COD|REG AIR|pending, even asymptot 11936|34107|9114|4|5|5205.50|0.05|0.04|N|O|1995-10-22|1995-11-22|1995-11-17|DELIVER IN PERSON|MAIL|nusual dependencies. 11936|20043|5048|5|33|31780.32|0.10|0.01|N|O|1995-10-05|1995-12-01|1995-10-16|COLLECT COD|REG AIR|iously quickly express 11936|52318|2319|6|42|53353.02|0.02|0.06|N|O|1995-11-28|1995-11-19|1995-12-16|COLLECT COD|RAIL|e. careful 11937|123736|3737|1|6|10558.38|0.07|0.00|N|O|1998-10-24|1998-09-19|1998-11-08|COLLECT COD|TRUCK|neath the ironic excuses. slyly 11937|29277|9278|2|21|25331.67|0.03|0.04|N|O|1998-08-26|1998-09-17|1998-09-21|NONE|FOB|lyly. fluffily final foxes play to the care 11937|42080|4585|3|10|10220.80|0.05|0.04|N|O|1998-10-07|1998-08-06|1998-11-06|COLLECT COD|AIR|re fluffily sp 11937|23482|5985|4|32|44975.36|0.09|0.07|N|O|1998-08-18|1998-09-06|1998-08-30|DELIVER IN PERSON|AIR|quietly bold depe 11937|48568|8569|5|19|28814.64|0.10|0.03|N|O|1998-08-04|1998-09-22|1998-08-08|DELIVER IN PERSON|RAIL|lites wake. fina 11937|72938|2939|6|18|34396.74|0.00|0.00|N|O|1998-07-26|1998-09-06|1998-07-30|NONE|FOB|al deposits. final attainments affi 11937|63136|655|7|18|19784.34|0.08|0.01|N|O|1998-09-22|1998-08-30|1998-10-15|NONE|REG AIR|e slyly carefully fi 11938|110006|5029|1|40|40640.00|0.01|0.05|N|O|1998-08-22|1998-10-18|1998-08-28|COLLECT COD|TRUCK|lay after the silent, ironic ideas. unus 11938|64593|7100|2|48|74764.32|0.09|0.07|N|O|1998-08-25|1998-10-23|1998-09-02|TAKE BACK RETURN|AIR|hely until the sl 11938|12269|2270|3|19|22443.94|0.06|0.03|N|O|1998-10-29|1998-08-29|1998-11-14|NONE|MAIL| accounts. 11938|835|836|4|33|57282.39|0.08|0.03|N|O|1998-11-14|1998-09-22|1998-11-20|COLLECT COD|REG AIR|quickly even accounts affix fur 11939|41461|6470|1|46|64513.16|0.03|0.01|N|O|1996-12-12|1996-11-14|1996-12-14|TAKE BACK RETURN|FOB| boost alongside of the carefu 11939|30880|8390|2|22|39839.36|0.07|0.03|N|O|1996-12-11|1996-12-23|1996-12-30|DELIVER IN PERSON|REG AIR|dazzle. furiously 11940|73394|916|1|9|12306.51|0.01|0.08|N|O|1998-07-16|1998-08-04|1998-08-12|COLLECT COD|TRUCK| cajole furio 11940|130310|5337|2|11|14743.41|0.09|0.00|N|O|1998-08-29|1998-08-14|1998-09-27|NONE|SHIP|uickly carefully even 11940|114308|9331|3|42|55536.60|0.00|0.00|N|O|1998-07-14|1998-07-04|1998-07-26|TAKE BACK RETURN|FOB|uriously against the regular, unusual ideas 11940|170051|7603|4|7|7847.35|0.02|0.03|N|O|1998-09-15|1998-08-19|1998-10-10|NONE|TRUCK| accounts cajole blithel 11940|33589|8596|5|20|30451.60|0.04|0.04|N|O|1998-06-10|1998-08-25|1998-06-21|NONE|TRUCK|s sleep qu 11940|126424|1449|6|36|52215.12|0.00|0.00|N|O|1998-08-30|1998-07-06|1998-09-09|TAKE BACK RETURN|REG AIR|lar, final 11941|45007|7512|1|11|10472.00|0.00|0.05|A|F|1993-07-16|1993-08-01|1993-07-19|NONE|SHIP|al deposits poach. quickly express 11941|20314|2817|2|33|40732.23|0.07|0.07|A|F|1993-10-21|1993-08-09|1993-10-26|DELIVER IN PERSON|SHIP|y even realms engage carefully. 11941|14966|4967|3|34|63952.64|0.08|0.02|A|F|1993-09-23|1993-08-06|1993-09-26|TAKE BACK RETURN|RAIL|unusual dependencies sleep carefully a 11941|125563|588|4|36|57188.16|0.02|0.03|A|F|1993-07-03|1993-07-31|1993-07-21|TAKE BACK RETURN|TRUCK|thely final 11941|136891|6892|5|20|38557.80|0.05|0.00|A|F|1993-08-15|1993-08-04|1993-08-18|NONE|AIR|venly regular frets. bold pinto bean 11941|117786|7787|6|5|9018.90|0.00|0.08|A|F|1993-08-29|1993-09-08|1993-09-06|NONE|SHIP|. blithely 11942|125764|5765|1|36|64431.36|0.01|0.06|A|F|1994-07-04|1994-07-23|1994-07-25|COLLECT COD|REG AIR|s integrate carefully bold, bold ideas. 11943|102173|7194|1|46|54057.82|0.07|0.00|R|F|1993-06-08|1993-06-01|1993-06-15|COLLECT COD|SHIP|ly even pinto beans. slyly regul 11943|9998|9999|2|40|76319.60|0.10|0.04|A|F|1993-04-15|1993-06-18|1993-04-30|NONE|FOB| deposits are pending excuses. furious 11943|31767|1768|3|50|84938.00|0.00|0.02|R|F|1993-07-23|1993-06-27|1993-07-24|NONE|RAIL|sual asymptotes wake slyly according 11943|92390|9918|4|33|45618.87|0.09|0.04|A|F|1993-06-24|1993-05-21|1993-07-01|TAKE BACK RETURN|AIR|into beans sleep blit 11943|193988|3989|5|9|18737.82|0.04|0.02|R|F|1993-08-06|1993-06-25|1993-08-15|TAKE BACK RETURN|REG AIR|atelets across the careful 11943|150271|5302|6|17|22461.59|0.03|0.01|R|F|1993-07-23|1993-06-22|1993-08-13|TAKE BACK RETURN|AIR|ar courts. carefull 11968|45256|265|1|15|18018.75|0.06|0.02|A|F|1995-05-19|1995-05-02|1995-06-16|TAKE BACK RETURN|RAIL|ag against the even account 11968|14348|4349|2|25|31558.50|0.10|0.02|N|F|1995-05-26|1995-06-13|1995-06-22|COLLECT COD|SHIP|sts wake carefully about the close 11969|29924|7431|1|10|18539.20|0.10|0.04|R|F|1992-01-30|1992-03-01|1992-02-01|COLLECT COD|TRUCK|eans above 11969|125906|8419|2|38|73412.20|0.09|0.05|A|F|1992-01-29|1992-03-26|1992-02-10|DELIVER IN PERSON|AIR|yly final hockey players al 11969|95479|7989|3|37|54555.39|0.04|0.05|R|F|1992-04-24|1992-03-24|1992-04-29|COLLECT COD|FOB|ts. furiously final req 11969|101537|9068|4|4|6154.12|0.04|0.05|A|F|1992-02-15|1992-03-27|1992-02-16|COLLECT COD|REG AIR|t the permanently even theodolites sle 11969|124049|4050|5|43|46140.72|0.02|0.04|R|F|1992-02-12|1992-04-16|1992-02-21|TAKE BACK RETURN|AIR|he slyly ironic foxe 11969|82282|7299|6|30|37928.40|0.06|0.07|R|F|1992-04-23|1992-02-19|1992-05-14|COLLECT COD|REG AIR|accounts are after the blithe 11969|111024|6047|7|44|45540.88|0.08|0.04|A|F|1992-04-21|1992-03-14|1992-04-24|DELIVER IN PERSON|REG AIR| beans above the carefully express requests 11970|177623|7624|1|18|30611.16|0.06|0.00|N|O|1998-06-03|1998-08-20|1998-06-15|DELIVER IN PERSON|SHIP|t the busily regular accounts thrash p 11970|98294|3313|2|28|36184.12|0.09|0.04|N|O|1998-08-20|1998-07-30|1998-09-08|TAKE BACK RETURN|MAIL|eposits. blithely pending a 11970|19872|7376|3|17|30461.79|0.02|0.07|N|O|1998-09-05|1998-07-11|1998-09-22|COLLECT COD|MAIL|uickly. carefully unusual asymptotes sle 11970|180076|77|4|21|24277.47|0.01|0.00|N|O|1998-06-12|1998-07-06|1998-06-15|TAKE BACK RETURN|AIR|ake furiously. slyly regula 11970|136506|4046|5|48|74040.00|0.00|0.05|N|O|1998-07-28|1998-07-21|1998-08-18|COLLECT COD|REG AIR|e accounts are busily above the 11970|46275|1284|6|41|50072.07|0.03|0.08|N|O|1998-06-04|1998-07-13|1998-06-08|TAKE BACK RETURN|FOB|y along th 11971|116679|6680|1|5|8478.35|0.03|0.08|N|O|1997-04-12|1997-05-15|1997-05-05|NONE|MAIL|ic requests. slyly unusual i 11971|9312|1813|2|5|6106.55|0.08|0.05|N|O|1997-04-01|1997-06-04|1997-04-23|COLLECT COD|FOB|y furiously ironi 11971|90822|3332|3|36|65261.52|0.05|0.00|N|O|1997-05-05|1997-06-10|1997-05-27|DELIVER IN PERSON|FOB|yly across the slyly daring foxes-- slyly 11971|44625|4626|4|34|53367.08|0.00|0.03|N|O|1997-04-03|1997-05-16|1997-05-01|TAKE BACK RETURN|MAIL|out the quickly special accounts ca 11972|193036|3037|1|10|11290.30|0.02|0.03|R|F|1994-03-13|1994-02-22|1994-04-08|DELIVER IN PERSON|REG AIR|refully bold instru 11972|141252|1253|2|38|49143.50|0.02|0.02|R|F|1994-03-24|1994-02-23|1994-04-19|DELIVER IN PERSON|RAIL|gular sentiments boost. quic 11972|17183|9685|3|38|41806.84|0.02|0.00|A|F|1994-03-26|1994-02-07|1994-04-05|COLLECT COD|REG AIR| accounts det 11972|179784|7336|4|20|37275.60|0.02|0.01|A|F|1994-02-10|1994-02-03|1994-03-04|DELIVER IN PERSON|FOB|sts cajole idly regular reques 11972|20546|547|5|5|7332.70|0.01|0.03|A|F|1994-03-15|1994-03-19|1994-04-11|DELIVER IN PERSON|REG AIR|ash across the ca 11973|18344|8345|1|2|2524.68|0.10|0.07|R|F|1994-04-05|1994-04-21|1994-05-04|NONE|FOB|y requests. regular, express theo 11973|96564|4092|2|45|70225.20|0.05|0.02|R|F|1994-06-16|1994-05-28|1994-06-26|TAKE BACK RETURN|REG AIR|endencies try 11973|138824|6364|3|15|27942.30|0.08|0.07|A|F|1994-03-05|1994-04-06|1994-03-07|NONE|TRUCK|arefully after the 11973|13341|8344|4|11|13797.74|0.09|0.06|A|F|1994-04-23|1994-04-04|1994-05-20|DELIVER IN PERSON|SHIP| silent deposit 11973|5853|3354|5|1|1758.85|0.05|0.01|R|F|1994-03-31|1994-04-08|1994-04-11|NONE|RAIL|unts. carefully regular acco 11973|159837|2353|6|30|56904.90|0.08|0.07|R|F|1994-04-27|1994-04-04|1994-05-16|COLLECT COD|SHIP|ess, final dept 11974|76806|4328|1|42|74877.60|0.07|0.06|R|F|1992-10-24|1992-12-28|1992-11-08|DELIVER IN PERSON|RAIL|eas cajole; silent 11974|78181|8182|2|12|13910.16|0.10|0.05|R|F|1992-10-18|1993-01-02|1992-10-28|DELIVER IN PERSON|MAIL|s beyond th 11975|173575|1127|1|23|37917.11|0.07|0.05|N|O|1995-06-18|1995-07-03|1995-06-30|COLLECT COD|TRUCK|s detect above the b 11975|67370|7371|2|19|25410.03|0.10|0.01|N|O|1995-07-01|1995-06-17|1995-07-17|NONE|FOB|regular requests nag qui 11975|71225|1226|3|40|47848.80|0.02|0.00|N|O|1995-07-23|1995-06-15|1995-08-02|NONE|SHIP|uffily furiously bo 11975|66673|9180|4|34|55748.78|0.06|0.03|N|O|1995-06-22|1995-05-24|1995-06-26|NONE|MAIL|ate furiously quickly ironic re 11975|172938|5456|5|32|64349.76|0.04|0.03|N|O|1995-08-09|1995-05-25|1995-08-11|NONE|FOB|eposits cajole across the fur 12000|129438|4463|1|33|48425.19|0.03|0.03|R|F|1994-09-10|1994-08-01|1994-10-04|DELIVER IN PERSON|MAIL|lyly even gifts. f 12000|64359|6866|2|6|7940.10|0.09|0.01|A|F|1994-06-15|1994-06-17|1994-07-12|COLLECT COD|MAIL|counts. carefu 12000|77944|452|3|9|17297.46|0.03|0.05|R|F|1994-09-05|1994-07-22|1994-09-26|TAKE BACK RETURN|AIR|quests play blithel 12000|196890|4448|4|13|25829.57|0.00|0.06|R|F|1994-06-09|1994-08-08|1994-06-13|COLLECT COD|RAIL|l ideas haggle across the e 12001|195743|8263|1|8|14709.92|0.01|0.01|R|F|1994-07-18|1994-09-30|1994-08-03|TAKE BACK RETURN|SHIP|ickly final packages cajole bli 12001|45856|865|2|23|41442.55|0.04|0.05|R|F|1994-08-11|1994-10-05|1994-08-30|NONE|FOB| fluffily final ideas haggle above t 12001|90652|8180|3|11|18069.15|0.05|0.04|R|F|1994-07-17|1994-09-25|1994-08-12|TAKE BACK RETURN|TRUCK|totes sleep qu 12001|171889|1890|4|46|90200.48|0.09|0.07|R|F|1994-09-19|1994-08-10|1994-10-12|NONE|SHIP|eep quietly. regula 12002|196745|1784|1|44|81036.56|0.07|0.04|R|F|1994-03-28|1994-02-10|1994-03-30|COLLECT COD|REG AIR|lly pending ideas promise theodolites! r 12003|171996|1997|1|10|20679.90|0.06|0.07|N|O|1998-07-06|1998-08-10|1998-07-27|COLLECT COD|REG AIR|nstruction 12004|64822|7329|1|47|83980.54|0.04|0.07|R|F|1995-02-10|1995-01-30|1995-02-11|COLLECT COD|SHIP|s. dugouts use. slyly regula 12005|34175|4176|1|13|14419.21|0.03|0.02|R|F|1992-07-09|1992-07-31|1992-08-05|TAKE BACK RETURN|AIR|thely around the quickly express de 12005|163319|8352|2|26|35940.06|0.10|0.01|R|F|1992-08-03|1992-08-10|1992-08-29|TAKE BACK RETURN|MAIL| the blithely reg 12005|92410|7429|3|25|35060.25|0.06|0.07|R|F|1992-09-21|1992-08-17|1992-10-01|NONE|REG AIR|blithely final accounts. even requests ab 12005|125147|5148|4|2|2344.28|0.09|0.00|A|F|1992-08-18|1992-08-05|1992-09-10|TAKE BACK RETURN|AIR|refully ironic accounts. fu 12005|135008|35|5|41|42763.00|0.06|0.01|R|F|1992-07-19|1992-08-20|1992-08-14|NONE|FOB|deposits. requests haggle carefu 12005|67318|9825|6|40|51412.40|0.09|0.07|R|F|1992-07-29|1992-09-22|1992-08-20|COLLECT COD|AIR|s. slyly ironic requests haggle blith 12005|18|7519|7|46|42228.46|0.05|0.03|R|F|1992-07-01|1992-08-15|1992-07-18|TAKE BACK RETURN|AIR| furiously express grouches sleep reg 12006|101018|8549|1|37|37703.37|0.01|0.04|N|O|1997-06-20|1997-08-31|1997-07-05|TAKE BACK RETURN|AIR|ly thin exc 12006|6408|6409|2|32|42060.80|0.00|0.05|N|O|1997-09-21|1997-07-05|1997-10-17|NONE|FOB|bout the slyly regular foxes. fur 12006|91903|9431|3|18|34108.20|0.04|0.02|N|O|1997-09-02|1997-07-25|1997-09-03|COLLECT COD|REG AIR| asymptotes haggle blithely. pe 12006|1375|1376|4|19|24251.03|0.02|0.05|N|O|1997-08-08|1997-07-24|1997-09-06|COLLECT COD|REG AIR|ial deposits sleep blithely furiously pend 12006|93486|1014|5|7|10356.36|0.08|0.01|N|O|1997-08-04|1997-08-06|1997-08-10|TAKE BACK RETURN|REG AIR|requests according t 12007|94741|2269|1|45|78108.30|0.06|0.05|R|F|1994-12-18|1994-12-22|1994-12-30|TAKE BACK RETURN|FOB|s integrate slyly. ideas use. blith 12007|98362|3381|2|11|14963.96|0.10|0.03|R|F|1994-10-10|1994-10-30|1994-11-04|TAKE BACK RETURN|REG AIR|es sleep blithely a 12007|158164|680|3|41|50108.56|0.08|0.03|R|F|1995-01-27|1994-11-19|1995-02-17|DELIVER IN PERSON|RAIL| about the s 12007|56905|1916|4|8|14895.20|0.02|0.08|R|F|1994-11-05|1994-12-18|1994-11-21|NONE|SHIP|t the pinto beans. bold, u 12007|192503|7542|5|40|63820.00|0.07|0.00|A|F|1994-10-20|1994-12-07|1994-11-05|COLLECT COD|AIR|. dogged w 12032|41408|1409|1|28|37783.20|0.05|0.03|N|O|1997-10-19|1997-10-13|1997-11-16|TAKE BACK RETURN|AIR|equests are 12032|128837|6374|2|39|72767.37|0.01|0.06|N|O|1997-11-27|1997-10-22|1997-11-29|DELIVER IN PERSON|MAIL|sits are carefull 12032|147611|126|3|1|1658.61|0.04|0.07|N|O|1997-11-18|1997-10-18|1997-11-27|DELIVER IN PERSON|TRUCK|ickly care 12032|87639|148|4|10|16266.30|0.09|0.00|N|O|1997-08-25|1997-11-08|1997-09-10|TAKE BACK RETURN|TRUCK|ainments wake blithely accor 12033|52063|9579|1|33|33496.98|0.06|0.03|A|F|1992-11-03|1992-11-27|1992-11-23|DELIVER IN PERSON|SHIP|yly express excuses boost slyly fu 12033|19638|7142|2|30|46728.90|0.07|0.00|R|F|1992-10-19|1992-10-22|1992-11-09|COLLECT COD|AIR|ily express 12033|48461|966|3|22|31008.12|0.02|0.07|R|F|1992-12-22|1992-11-08|1993-01-18|NONE|MAIL|egular courts. ironic, bold deposits a 12033|70943|5958|4|35|66987.90|0.10|0.08|R|F|1992-11-07|1992-11-03|1992-12-05|TAKE BACK RETURN|RAIL| fluffily even theodolites. pac 12033|134644|7158|5|39|65466.96|0.01|0.07|R|F|1993-01-07|1992-10-21|1993-01-13|NONE|AIR|efully ideas. platelets 12034|167264|2297|1|33|43931.58|0.03|0.00|N|O|1997-01-22|1997-01-17|1997-02-12|DELIVER IN PERSON|AIR|ffily final foxes. furiously 12034|192644|202|2|21|36469.44|0.06|0.06|N|O|1997-01-29|1997-02-11|1997-02-26|DELIVER IN PERSON|SHIP|le slyly across the blithely final courts 12035|144806|7321|1|35|64778.00|0.08|0.04|N|O|1996-12-11|1996-12-30|1996-12-26|DELIVER IN PERSON|RAIL|ly deposits wake. fluffily regul 12035|144599|9628|2|41|67387.19|0.00|0.06|N|O|1996-11-15|1997-02-06|1996-11-26|DELIVER IN PERSON|RAIL|accounts. final requests 12035|60111|112|3|7|7497.77|0.04|0.05|N|O|1996-12-31|1997-01-17|1997-01-03|NONE|TRUCK| unusual foxes. even, spe 12035|79923|2431|4|39|74213.88|0.04|0.01|N|O|1997-02-16|1997-02-05|1997-03-05|COLLECT COD|REG AIR|gular dependencies with 12035|55954|5955|5|1|1909.95|0.08|0.00|N|O|1997-01-20|1997-01-28|1997-01-26|TAKE BACK RETURN|FOB|permanently. slyl 12035|9618|4619|6|34|51938.74|0.06|0.00|N|O|1996-12-01|1996-12-19|1996-12-27|TAKE BACK RETURN|RAIL|e slyly regula 12035|49109|9110|7|25|26452.50|0.04|0.02|N|O|1997-03-01|1996-12-15|1997-03-22|TAKE BACK RETURN|SHIP|ions shall have to ca 12036|164335|9368|1|38|53174.54|0.04|0.02|N|O|1998-03-23|1998-03-30|1998-04-05|TAKE BACK RETURN|FOB|ounts. pac 12036|172126|2127|2|33|39537.96|0.01|0.08|N|O|1998-03-28|1998-02-15|1998-04-19|TAKE BACK RETURN|MAIL| final ideas. slyl 12036|107423|7424|3|29|41482.18|0.09|0.00|N|O|1998-03-02|1998-04-07|1998-03-05|TAKE BACK RETURN|SHIP|sts. carefully final r 12036|44430|4431|4|19|26114.17|0.02|0.07|N|O|1998-04-02|1998-03-12|1998-04-20|DELIVER IN PERSON|TRUCK| are furiously. un 12036|134528|2068|5|7|10937.64|0.03|0.05|N|O|1998-01-23|1998-03-03|1998-02-21|COLLECT COD|TRUCK|ntegrate slyly alongside of the c 12036|5580|581|6|35|51995.30|0.00|0.04|N|O|1998-02-08|1998-04-16|1998-02-28|TAKE BACK RETURN|REG AIR| carefully always pend 12036|170742|5777|7|20|36254.80|0.09|0.03|N|O|1998-05-09|1998-03-02|1998-06-06|TAKE BACK RETURN|FOB| theodolite 12037|55485|5486|1|3|4321.44|0.02|0.05|N|O|1995-08-20|1995-08-23|1995-09-01|DELIVER IN PERSON|MAIL| platelets above the unusual deposit 12037|122147|2148|2|20|23382.80|0.03|0.05|N|O|1995-07-20|1995-08-26|1995-08-07|NONE|FOB|ly express theodolites. special ideas a 12037|119146|6680|3|12|13981.68|0.05|0.03|N|O|1995-08-19|1995-09-29|1995-08-26|DELIVER IN PERSON|RAIL| packages u 12037|80269|5286|4|10|12492.60|0.00|0.01|N|O|1995-09-23|1995-09-19|1995-10-11|NONE|RAIL|aggle slyly. final pint 12037|66892|1905|5|5|9294.45|0.09|0.06|N|O|1995-10-16|1995-08-14|1995-10-26|NONE|SHIP|ar dugouts kindle quickly regular id 12037|49635|4644|6|46|72892.98|0.09|0.07|N|O|1995-08-29|1995-08-16|1995-09-04|NONE|FOB|bold instructions. quickly regular r 12037|199943|7501|7|13|26558.22|0.04|0.08|N|O|1995-07-03|1995-08-21|1995-07-26|NONE|AIR|ly quickly ironic theodolites. ironic i 12038|31586|9096|1|15|22763.70|0.04|0.06|N|O|1995-12-14|1996-01-07|1996-01-04|TAKE BACK RETURN|RAIL|dencies. blithely special plate 12038|56094|3610|2|28|29402.52|0.04|0.04|N|O|1996-02-29|1996-01-26|1996-03-16|COLLECT COD|REG AIR|silent dolphins sleep carefully pend 12039|44983|7488|1|49|94471.02|0.03|0.06|A|F|1993-08-09|1993-07-29|1993-08-22|TAKE BACK RETURN|RAIL|ly furious 12039|28913|8914|2|43|79202.13|0.06|0.07|R|F|1993-07-24|1993-08-14|1993-08-21|DELIVER IN PERSON|TRUCK|side of the ironic, final platelets 12039|131258|3772|3|44|56727.00|0.10|0.03|R|F|1993-09-25|1993-08-14|1993-10-14|TAKE BACK RETURN|AIR|. quickly re 12039|50037|7553|4|7|6909.21|0.01|0.04|A|F|1993-08-05|1993-08-10|1993-09-01|NONE|RAIL|le furiously. asymptot 12039|168603|8604|5|46|76893.60|0.09|0.01|A|F|1993-07-17|1993-08-08|1993-08-02|DELIVER IN PERSON|TRUCK|earls. slyly 12039|108582|3603|6|46|73166.68|0.06|0.05|R|F|1993-08-12|1993-07-17|1993-08-16|COLLECT COD|FOB|hy accounts: final theod 12039|136931|6932|7|10|19679.30|0.10|0.07|R|F|1993-08-05|1993-08-31|1993-08-07|DELIVER IN PERSON|AIR|instructions 12064|196280|8800|1|9|12386.52|0.04|0.06|R|F|1993-01-01|1992-11-14|1993-01-26|NONE|MAIL|d, bold deposits wake quickly regular 12064|75298|7806|2|13|16552.77|0.04|0.04|A|F|1992-11-08|1992-11-26|1992-11-25|TAKE BACK RETURN|AIR|ts. ironic deposits alongside of the ironic 12064|120203|7740|3|3|3669.60|0.09|0.08|R|F|1992-12-27|1992-12-06|1992-12-31|NONE|FOB|boost slyly pending deposits. care 12064|46454|8959|4|24|33610.80|0.03|0.04|R|F|1992-12-08|1992-12-03|1993-01-06|DELIVER IN PERSON|AIR|ly. carefully silent a 12064|144257|4258|5|24|31230.00|0.08|0.04|A|F|1992-11-17|1992-11-23|1992-12-17|NONE|MAIL|e blithely unusual theodo 12064|115320|5321|6|27|36053.64|0.05|0.01|A|F|1992-09-26|1992-10-24|1992-10-05|NONE|RAIL|into beans. fl 12065|130182|7722|1|48|58184.64|0.00|0.04|N|O|1997-02-04|1997-03-12|1997-02-17|COLLECT COD|FOB| since the slyl 12066|121182|1183|1|12|14438.16|0.09|0.03|A|F|1995-05-15|1995-04-09|1995-06-07|COLLECT COD|TRUCK|ts above the slowly regular pinto beans w 12066|98939|6467|2|21|40696.53|0.03|0.01|A|F|1995-04-05|1995-03-22|1995-04-12|TAKE BACK RETURN|SHIP|sleep after the quic 12066|67270|2283|3|23|28457.21|0.03|0.01|R|F|1995-04-22|1995-04-07|1995-05-08|TAKE BACK RETURN|AIR|uests. speci 12066|196285|8805|4|35|48344.80|0.08|0.05|R|F|1995-05-27|1995-03-31|1995-06-10|TAKE BACK RETURN|SHIP|ss accounts. even accounts sleep darin 12066|148942|8943|5|27|53755.38|0.07|0.07|N|F|1995-06-03|1995-04-18|1995-06-22|COLLECT COD|REG AIR|nusual pinto beans. careful theodo 12067|150268|269|1|23|30319.98|0.06|0.04|R|F|1993-03-16|1993-04-24|1993-04-10|COLLECT COD|FOB|pinto beans cajole 12067|148470|8471|2|32|48591.04|0.10|0.00|A|F|1993-04-14|1993-04-04|1993-04-25|NONE|RAIL|eas. blithely ironic d 12067|133136|8163|3|22|25720.86|0.04|0.05|A|F|1993-05-09|1993-04-08|1993-06-03|COLLECT COD|TRUCK|the even realms. 12067|10612|8116|4|30|45678.30|0.08|0.00|R|F|1993-04-06|1993-05-06|1993-04-12|NONE|AIR|es sleep fluffily furi 12067|142765|5280|5|1|1807.76|0.02|0.04|A|F|1993-03-09|1993-04-09|1993-03-21|COLLECT COD|TRUCK|nto beans; regular courts are 12067|50597|3103|6|50|77379.50|0.07|0.07|A|F|1993-02-10|1993-05-06|1993-02-24|NONE|MAIL|es. bold instructions use after the re 12067|182215|9770|7|18|23349.78|0.06|0.01|A|F|1993-05-09|1993-05-01|1993-05-11|TAKE BACK RETURN|TRUCK| blithely bold excuses. requests beneath 12068|80926|3435|1|8|15255.36|0.09|0.03|N|O|1996-08-19|1996-08-03|1996-08-24|NONE|RAIL|ly. unusual, bold theodolites use carefully 12068|107705|5236|2|8|13701.60|0.04|0.00|N|O|1996-06-07|1996-08-06|1996-06-15|TAKE BACK RETURN|FOB|der carefully. furiously unusual deposits 12069|187335|4890|1|35|49781.55|0.03|0.01|N|O|1995-12-12|1995-12-12|1995-12-22|COLLECT COD|MAIL|ely final acco 12069|129073|4098|2|28|30857.96|0.01|0.05|N|O|1995-12-14|1995-12-10|1995-12-22|NONE|REG AIR| carefully regular accounts are slyly: ex 12069|113501|6013|3|8|12116.00|0.08|0.08|N|O|1995-11-20|1995-12-29|1995-12-03|DELIVER IN PERSON|MAIL|ans. bold, silent dolphins snooze blithel 12069|9791|4792|4|35|59527.65|0.05|0.03|N|O|1995-12-28|1996-01-25|1995-12-29|COLLECT COD|REG AIR|uses are! dol 12069|142339|7368|5|21|29007.93|0.08|0.07|N|O|1995-12-15|1996-01-29|1996-01-09|DELIVER IN PERSON|REG AIR| bold ideas are furiously accord 12069|85550|3075|6|41|62957.55|0.08|0.06|N|O|1995-12-04|1996-01-13|1995-12-16|NONE|SHIP|y across the carefull 12070|3972|8973|1|4|7503.88|0.10|0.03|N|O|1998-08-08|1998-06-23|1998-09-06|TAKE BACK RETURN|TRUCK|slyly ironi 12070|145009|2552|2|50|52700.00|0.00|0.04|N|O|1998-06-18|1998-08-15|1998-07-06|TAKE BACK RETURN|TRUCK|sias cajole slyly. furiously dogged ins 12070|153444|5960|3|19|28451.36|0.02|0.01|N|O|1998-06-16|1998-07-29|1998-06-19|DELIVER IN PERSON|REG AIR|luffy instructions integrate furious 12070|127577|7578|4|48|77019.36|0.02|0.07|N|O|1998-05-28|1998-08-19|1998-06-07|NONE|RAIL|lent theodolites. pi 12070|178453|6005|5|14|21440.30|0.09|0.03|N|O|1998-07-22|1998-08-10|1998-07-27|NONE|TRUCK| accounts. quickly silent 12070|48764|6277|6|40|68510.40|0.00|0.07|N|O|1998-06-19|1998-08-13|1998-06-22|DELIVER IN PERSON|REG AIR|kly final instr 12070|174778|4779|7|9|16674.93|0.01|0.03|N|O|1998-05-26|1998-08-16|1998-05-31|DELIVER IN PERSON|FOB|ular packages must wak 12071|93347|875|1|27|36189.18|0.02|0.00|N|O|1998-08-17|1998-08-24|1998-09-10|NONE|SHIP|refully carefully final deposits. f 12071|70329|2837|2|23|29884.36|0.05|0.06|N|O|1998-10-07|1998-08-15|1998-10-28|DELIVER IN PERSON|SHIP|. slyly special attainments poach blit 12071|9340|1841|3|15|18740.10|0.08|0.06|N|O|1998-08-18|1998-08-13|1998-08-27|DELIVER IN PERSON|RAIL|; careful, unusual requests haggle c 12096|5930|3431|1|21|38554.53|0.10|0.03|A|F|1992-12-10|1992-11-08|1992-12-13|DELIVER IN PERSON|SHIP|kly after the slyly bold tithes? deposi 12096|49374|6887|2|48|63521.76|0.00|0.05|A|F|1992-10-08|1992-10-23|1992-10-27|TAKE BACK RETURN|TRUCK|quickly across the slyly 12096|18446|948|3|13|17737.72|0.10|0.00|A|F|1992-11-07|1992-11-29|1992-11-25|COLLECT COD|TRUCK|ss deposits hinder caref 12096|5655|8156|4|29|45258.85|0.04|0.07|A|F|1993-01-15|1992-11-03|1993-01-28|DELIVER IN PERSON|REG AIR|final foxes accord 12096|34848|2358|5|24|42788.16|0.00|0.04|R|F|1992-12-03|1992-11-20|1992-12-18|TAKE BACK RETURN|TRUCK|aringly ironic ideas h 12096|81180|1181|6|40|46447.20|0.05|0.03|A|F|1992-11-09|1992-11-12|1992-11-29|DELIVER IN PERSON|SHIP| furiously 12096|187062|7063|7|17|19534.02|0.08|0.04|R|F|1992-11-14|1992-11-11|1992-12-02|NONE|TRUCK|dolites affix? slyly even realms integ 12097|138074|3101|1|50|55603.50|0.03|0.02|R|F|1994-01-19|1994-01-23|1994-02-14|DELIVER IN PERSON|REG AIR| after the carefull 12097|5135|5136|2|17|17682.21|0.08|0.06|A|F|1993-12-30|1994-01-17|1994-01-12|NONE|AIR|osits about the depths integ 12097|91917|6936|3|33|62994.03|0.06|0.01|R|F|1994-03-01|1993-12-16|1994-03-12|TAKE BACK RETURN|RAIL|hins alongside of the blithely i 12097|39592|4599|4|15|22973.85|0.03|0.08|A|F|1994-02-20|1994-01-10|1994-03-19|TAKE BACK RETURN|TRUCK|lyly silent platele 12098|157360|2391|1|25|35434.00|0.08|0.03|A|F|1993-06-24|1993-07-19|1993-07-02|NONE|TRUCK|lithely ironic deposits solve slyly. regula 12099|133057|8084|1|22|23981.10|0.10|0.07|A|F|1994-09-23|1994-10-29|1994-10-14|TAKE BACK RETURN|TRUCK|ld deposits boost until the regular plat 12099|85848|5849|2|19|34842.96|0.01|0.04|R|F|1994-10-10|1994-10-27|1994-10-22|COLLECT COD|AIR|n, special packages sleep across the bl 12099|46597|4110|3|41|63287.19|0.05|0.08|R|F|1994-12-18|1994-12-11|1994-12-26|NONE|FOB|eath the carefully 12099|142194|9737|4|37|45739.03|0.01|0.03|A|F|1994-11-08|1994-12-09|1994-11-15|NONE|SHIP|the carefully careful ideas cajole aga 12099|41305|8818|5|17|21187.10|0.10|0.03|A|F|1994-12-28|1994-11-25|1995-01-25|NONE|MAIL|nal pinto beans abov 12099|196863|9383|6|22|43116.92|0.02|0.03|R|F|1994-09-14|1994-10-25|1994-10-13|TAKE BACK RETURN|FOB|ng courts boost furiously 12100|180958|8513|1|36|73402.20|0.00|0.02|N|O|1996-06-10|1996-06-11|1996-06-20|TAKE BACK RETURN|SHIP|regular deposits run carefully final p 12101|120010|7547|1|28|28840.28|0.08|0.01|A|F|1995-02-13|1995-02-27|1995-03-14|DELIVER IN PERSON|TRUCK|es haggle express excuses. 12101|1596|1597|2|25|37439.75|0.04|0.04|A|F|1995-02-13|1995-03-02|1995-02-14|COLLECT COD|REG AIR|al, final foxes about the ir 12101|14373|1877|3|48|61793.76|0.03|0.06|R|F|1995-03-07|1995-02-06|1995-04-02|NONE|TRUCK|gular platelets. care 12102|36536|4046|1|22|32395.66|0.07|0.04|N|O|1996-02-09|1995-12-20|1996-03-02|COLLECT COD|MAIL|pending escapade 12102|59974|4985|2|20|38679.40|0.01|0.05|N|O|1996-01-21|1996-01-18|1996-02-15|COLLECT COD|RAIL|- furiously express warhorses ca 12102|173298|8333|3|32|43881.28|0.07|0.06|N|O|1996-01-24|1995-12-28|1996-02-23|TAKE BACK RETURN|RAIL|ccounts cajole around the even ideas. quick 12102|7651|7652|4|34|52994.10|0.05|0.05|N|O|1996-01-04|1996-01-20|1996-01-25|TAKE BACK RETURN|MAIL|ts nag bold, special deposits. even req 12102|182156|7193|5|38|47049.70|0.06|0.07|N|O|1995-11-20|1996-01-10|1995-12-04|TAKE BACK RETURN|REG AIR| platelets; furiously 12103|69727|9728|1|24|40721.28|0.09|0.01|N|O|1997-02-17|1997-02-23|1997-03-08|COLLECT COD|REG AIR|he finally regular r 12103|92318|2319|2|33|43240.23|0.02|0.03|N|O|1996-12-23|1996-12-27|1997-01-21|COLLECT COD|RAIL|gular deposits. slyly regular request 12103|176524|6525|3|5|8002.60|0.05|0.08|N|O|1997-01-23|1996-12-29|1997-02-05|TAKE BACK RETURN|RAIL|lyly above the quickly unusu 12103|37177|2184|4|35|38995.95|0.06|0.01|N|O|1996-12-26|1997-01-16|1997-01-22|TAKE BACK RETURN|RAIL| according to the slyly pendin 12103|28762|1265|5|19|32124.44|0.01|0.00|N|O|1997-01-22|1997-02-17|1997-01-27|DELIVER IN PERSON|SHIP|es. unusual foxes wake foxes. quick 12128|26707|4214|1|22|35941.40|0.10|0.04|N|O|1997-08-30|1997-09-04|1997-09-01|DELIVER IN PERSON|AIR| accounts are slyl 12128|60785|8304|2|16|27932.48|0.07|0.03|N|O|1997-07-13|1997-08-19|1997-08-10|COLLECT COD|AIR|he fluffily regular asymptotes thrash aga 12128|10572|8076|3|31|45959.67|0.01|0.08|N|O|1997-07-06|1997-08-14|1997-07-22|NONE|SHIP| theodolites cajole fluf 12128|83352|877|4|47|62761.45|0.06|0.08|N|O|1997-10-20|1997-08-14|1997-11-07|DELIVER IN PERSON|REG AIR|, regular pinto 12129|191010|8568|1|8|8808.08|0.00|0.00|A|F|1993-02-06|1993-02-21|1993-03-05|NONE|RAIL|regular requests haggle furious 12129|91681|4191|2|11|18399.48|0.02|0.06|R|F|1993-01-09|1993-03-15|1993-01-16|TAKE BACK RETURN|SHIP|carefully final theodol 12129|74871|9886|3|45|83064.15|0.00|0.00|R|F|1993-03-19|1993-02-24|1993-03-21|COLLECT COD|AIR|t the quic 12129|143967|3968|4|2|4021.92|0.07|0.01|A|F|1993-01-31|1993-02-16|1993-03-01|NONE|MAIL| above the c 12129|49757|2262|5|4|6827.00|0.05|0.04|A|F|1993-01-15|1993-02-25|1993-01-30|NONE|RAIL|fix silent asym 12130|94877|4878|1|35|65515.45|0.07|0.03|N|O|1996-02-04|1996-03-06|1996-03-05|COLLECT COD|SHIP|. regular, express requests befor 12130|195879|5880|2|11|21723.57|0.05|0.05|N|O|1995-12-18|1996-02-19|1996-01-03|NONE|MAIL|l tithes about the blithel 12130|122789|2790|3|47|85153.66|0.01|0.02|N|O|1996-01-17|1996-02-05|1996-02-08|NONE|TRUCK|yly ironic packages. bold pa 12130|110894|895|4|10|19048.90|0.09|0.04|N|O|1995-12-13|1996-01-20|1995-12-22|COLLECT COD|RAIL|iresias. special packages wak 12130|183881|1436|5|7|13754.16|0.05|0.00|N|O|1996-03-01|1996-02-24|1996-03-21|COLLECT COD|FOB|deposits. express, 12131|105988|1009|1|26|51843.48|0.03|0.01|N|O|1998-09-20|1998-08-06|1998-10-02|COLLECT COD|SHIP|courts. blithely r 12131|133985|9012|2|9|18170.82|0.06|0.06|N|O|1998-06-30|1998-08-12|1998-07-21|DELIVER IN PERSON|FOB|ular requests. unusual pinto 12131|79805|7327|3|21|37480.80|0.07|0.07|N|O|1998-10-06|1998-07-31|1998-10-13|COLLECT COD|FOB|posits. ideas across the requests kindl 12131|68864|6383|4|40|73314.40|0.08|0.00|N|O|1998-07-05|1998-08-27|1998-07-07|NONE|AIR|ests haggle 12131|186130|8649|5|21|25538.73|0.06|0.04|N|O|1998-07-22|1998-08-07|1998-07-26|TAKE BACK RETURN|SHIP|nstructions are bli 12131|75741|5742|6|43|73819.82|0.03|0.08|N|O|1998-09-12|1998-07-31|1998-09-25|COLLECT COD|MAIL|pendencies nag slyly. fluffil 12131|163520|3521|7|28|44338.56|0.01|0.06|N|O|1998-07-08|1998-09-14|1998-07-23|COLLECT COD|MAIL|usual deposits. furiously regular packa 12132|75402|7910|1|32|44076.80|0.04|0.05|N|O|1997-01-03|1996-11-05|1997-01-20|TAKE BACK RETURN|SHIP| according to the quickly even theod 12132|70683|5698|2|48|79376.64|0.01|0.08|N|O|1996-09-09|1996-11-27|1996-09-11|DELIVER IN PERSON|AIR|al ideas! carefully unusual deposits n 12132|166916|9433|3|7|13880.37|0.07|0.05|N|O|1996-09-23|1996-10-08|1996-10-19|NONE|REG AIR|nic, express ideas. furiously regul 12132|155282|5283|4|31|41455.68|0.04|0.00|N|O|1996-10-02|1996-10-11|1996-10-20|NONE|RAIL|heodolites use along the even packages: 12132|21886|1887|5|11|19886.68|0.03|0.00|N|O|1996-09-28|1996-11-01|1996-10-15|DELIVER IN PERSON|REG AIR|. busily unusual pinto bean 12132|35480|2990|6|44|62281.12|0.03|0.08|N|O|1996-10-05|1996-11-17|1996-10-24|COLLECT COD|MAIL|onic, pending deposits 12132|105217|2748|7|13|15888.73|0.08|0.07|N|O|1996-10-14|1996-11-03|1996-10-21|TAKE BACK RETURN|RAIL|y. furiously regular theodolites cajole fur 12133|167412|7413|1|35|51779.35|0.09|0.04|R|F|1992-08-09|1992-05-16|1992-09-08|COLLECT COD|FOB|en accounts are fluffily 12134|49297|6810|1|23|28664.67|0.08|0.00|N|O|1996-06-05|1996-07-21|1996-06-26|DELIVER IN PERSON|FOB|sleep fluffil 12134|131795|9335|2|22|40189.38|0.03|0.04|N|O|1996-07-22|1996-08-09|1996-08-13|NONE|REG AIR|of the furiously 12135|98456|8457|1|26|37815.70|0.02|0.08|N|O|1996-01-16|1995-12-18|1996-02-08|DELIVER IN PERSON|SHIP|e pending foxes. unusual packa 12135|139783|7323|2|49|89316.22|0.04|0.02|N|O|1995-10-21|1995-11-20|1995-11-08|TAKE BACK RETURN|REG AIR|nto beans sleep closely about the slowl 12135|118050|8051|3|36|38449.80|0.10|0.08|N|O|1995-12-23|1995-12-31|1996-01-21|DELIVER IN PERSON|TRUCK|lar theodolites haggle 12135|63097|5604|4|5|5300.45|0.07|0.06|N|O|1995-10-14|1995-12-13|1995-11-06|DELIVER IN PERSON|MAIL|kages. express gifts affix 12135|60463|5476|5|12|17081.52|0.04|0.06|N|O|1995-12-02|1995-11-15|1995-12-26|NONE|RAIL|slyly unusual packages. slyly pending 12135|191097|6136|6|46|54652.14|0.08|0.08|N|O|1995-11-27|1995-11-09|1995-11-30|NONE|RAIL|was above the 12135|153038|3039|7|5|5455.15|0.05|0.06|N|O|1996-01-21|1995-11-28|1996-01-22|TAKE BACK RETURN|FOB|ts. bravely ironic packages wake blithel 12160|100056|5077|1|3|3168.15|0.10|0.07|R|F|1994-02-15|1994-02-27|1994-02-27|DELIVER IN PERSON|SHIP|leep quickly along the theodolites. 12160|32059|9569|2|9|8919.45|0.01|0.01|R|F|1994-01-29|1994-02-18|1994-02-23|TAKE BACK RETURN|TRUCK|ites among the 12160|11420|6423|3|44|58582.48|0.00|0.01|R|F|1993-12-29|1994-03-09|1994-01-15|NONE|FOB|ets sleep furiously ironic req 12160|124592|9617|4|8|12932.72|0.03|0.04|A|F|1994-01-06|1994-01-28|1994-01-12|NONE|FOB|sits wake furious 12161|42139|2140|1|31|33515.03|0.06|0.05|N|O|1997-04-03|1997-06-10|1997-04-22|COLLECT COD|REG AIR|leep above 12161|24748|4749|2|5|8363.70|0.02|0.01|N|O|1997-06-28|1997-05-08|1997-07-01|TAKE BACK RETURN|MAIL|y pending ideas. slyly special package 12161|135494|521|3|42|64238.58|0.09|0.02|N|O|1997-03-26|1997-04-28|1997-04-09|COLLECT COD|AIR|uctions about the ev 12161|39833|9834|4|9|15955.47|0.00|0.07|N|O|1997-05-10|1997-05-10|1997-06-02|DELIVER IN PERSON|FOB|eep carefully 12161|67763|5282|5|42|72691.92|0.10|0.01|N|O|1997-03-24|1997-05-27|1997-04-11|DELIVER IN PERSON|TRUCK|ly bold ins 12161|86524|1541|6|14|21147.28|0.05|0.08|N|O|1997-05-08|1997-05-14|1997-05-16|NONE|FOB|grate. slyly regular deposits mold accor 12161|8096|8097|7|22|22089.98|0.02|0.03|N|O|1997-04-01|1997-05-08|1997-04-29|NONE|FOB|xpress requests sleep fluffi 12162|65245|2764|1|31|37517.44|0.06|0.06|N|O|1997-03-21|1997-05-09|1997-04-07|DELIVER IN PERSON|FOB|efully express platelets. carefully r 12163|181419|3938|1|38|57015.58|0.09|0.07|N|O|1997-08-21|1997-09-28|1997-09-07|TAKE BACK RETURN|FOB|cial packages use acc 12163|104538|9559|2|39|60158.67|0.08|0.00|N|O|1997-09-28|1997-09-23|1997-10-04|TAKE BACK RETURN|TRUCK|es. express, si 12163|165249|5250|3|32|42055.68|0.01|0.07|N|O|1997-08-10|1997-08-26|1997-08-18|TAKE BACK RETURN|AIR| pending dolphins use furiously ir 12163|136074|6075|4|48|53283.36|0.10|0.02|N|O|1997-07-23|1997-09-29|1997-08-09|COLLECT COD|FOB|eans across the furiously final theodolit 12164|182450|5|1|27|41376.15|0.06|0.07|R|F|1994-01-31|1993-11-30|1994-02-26|NONE|SHIP|tead of the blithely r 12164|37007|7008|2|19|17936.00|0.09|0.04|A|F|1993-12-22|1993-12-16|1994-01-02|COLLECT COD|RAIL|riously ab 12164|160393|394|3|22|31974.58|0.01|0.06|R|F|1993-10-28|1993-12-07|1993-11-18|DELIVER IN PERSON|REG AIR|lithely regular accounts sleep slyly alo 12164|119377|6911|4|28|39098.36|0.02|0.02|R|F|1994-01-28|1993-12-30|1994-02-06|COLLECT COD|SHIP|ckly unusual foxes. final deposits a 12164|165163|7680|5|5|6140.80|0.04|0.08|R|F|1993-11-06|1993-11-24|1993-12-01|TAKE BACK RETURN|AIR|s affix about 12164|78167|8168|6|22|25193.52|0.04|0.06|R|F|1993-12-16|1993-11-29|1994-01-07|DELIVER IN PERSON|REG AIR|wake. pendi 12165|137408|9922|1|12|17344.80|0.02|0.07|R|F|1994-07-14|1994-06-28|1994-07-25|DELIVER IN PERSON|FOB|tructions. ironic, even foxes 12165|3282|8283|2|42|49781.76|0.01|0.00|A|F|1994-08-05|1994-07-09|1994-08-13|NONE|SHIP|lly even foxes along the f 12165|155591|3137|3|30|49397.70|0.00|0.05|A|F|1994-06-15|1994-05-17|1994-06-30|NONE|MAIL|foxes affix above the 12165|13234|5736|4|47|53919.81|0.07|0.07|R|F|1994-06-09|1994-05-17|1994-06-30|DELIVER IN PERSON|RAIL|use quickly bold ideas. slyly express braid 12165|23422|5925|5|4|5381.68|0.01|0.06|A|F|1994-05-13|1994-05-29|1994-05-29|NONE|RAIL|ounts ought to da 12166|176941|1976|1|11|22197.34|0.04|0.02|A|F|1995-05-08|1995-05-08|1995-05-21|DELIVER IN PERSON|RAIL| accounts are after the carefully special 12167|2633|5134|1|22|33783.86|0.00|0.05|N|O|1998-08-10|1998-07-01|1998-09-08|NONE|REG AIR|above the f 12167|50763|3269|2|26|44557.76|0.02|0.08|N|O|1998-06-25|1998-07-09|1998-07-13|COLLECT COD|SHIP| of the quickly s 12192|92163|7182|1|15|17327.40|0.07|0.07|R|F|1995-02-10|1995-01-14|1995-03-04|DELIVER IN PERSON|RAIL|dinos. final, silent accounts p 12193|194164|4165|1|35|44035.60|0.04|0.06|N|O|1996-10-16|1996-09-18|1996-11-02|DELIVER IN PERSON|TRUCK| quickly slyly regular requests. furiously 12193|102933|7954|2|9|17423.37|0.02|0.02|N|O|1996-09-25|1996-09-19|1996-10-08|NONE|SHIP|ly special d 12193|181069|6106|3|38|43702.28|0.08|0.03|N|O|1996-10-06|1996-10-09|1996-10-19|TAKE BACK RETURN|AIR|even asymptotes sleep ironic accounts 12193|20766|5771|4|21|35421.96|0.05|0.07|N|O|1996-10-28|1996-10-09|1996-11-02|DELIVER IN PERSON|RAIL|e blithely. pin 12194|32241|2242|1|32|37543.68|0.02|0.01|A|F|1995-04-11|1995-05-14|1995-04-18|TAKE BACK RETURN|FOB| ironic instructi 12194|59404|1910|2|14|19087.60|0.06|0.07|A|F|1995-04-10|1995-05-24|1995-04-17|DELIVER IN PERSON|AIR|deas impress at the quickly unusual the 12195|178760|3795|1|40|73550.40|0.04|0.00|N|O|1997-07-16|1997-06-09|1997-07-28|NONE|AIR|eas wake never. fluffily final foxe 12195|6797|6798|2|46|78374.34|0.03|0.02|N|O|1997-07-17|1997-05-30|1997-08-10|COLLECT COD|REG AIR|ular requests are quickly 12196|87576|5101|1|1|1563.57|0.09|0.07|A|F|1993-06-15|1993-08-12|1993-06-30|COLLECT COD|SHIP|ounts. blithel 12196|74240|4241|2|4|4856.96|0.02|0.07|R|F|1993-09-20|1993-08-09|1993-10-02|DELIVER IN PERSON|MAIL|ly regular requests 12196|147678|193|3|6|10354.02|0.04|0.01|A|F|1993-06-18|1993-08-05|1993-06-25|NONE|MAIL|even packages above the instructions are 12196|23481|988|4|30|42134.40|0.01|0.00|A|F|1993-07-26|1993-09-01|1993-08-01|DELIVER IN PERSON|FOB|es wake slyly. regular 12196|38261|5771|5|19|22785.94|0.07|0.08|R|F|1993-09-19|1993-08-01|1993-09-21|COLLECT COD|TRUCK| final Tir 12196|37671|7672|6|7|11260.69|0.03|0.05|A|F|1993-08-10|1993-07-16|1993-08-16|DELIVER IN PERSON|RAIL|. slyly even 12197|67782|289|1|48|83989.44|0.08|0.05|N|O|1997-08-23|1997-09-23|1997-08-29|TAKE BACK RETURN|AIR|usly special instructions according t 12197|10142|143|2|44|46294.16|0.01|0.02|N|O|1997-07-26|1997-09-25|1997-08-13|DELIVER IN PERSON|MAIL| to the evenly final ideas. bra 12197|43021|3022|3|20|19280.40|0.03|0.06|N|O|1997-09-27|1997-08-20|1997-10-01|TAKE BACK RETURN|SHIP|ent theodolites sleep f 12197|43075|5580|4|41|41740.87|0.01|0.03|N|O|1997-07-29|1997-08-27|1997-08-01|TAKE BACK RETURN|AIR| after the dep 12197|157882|2913|5|4|7759.52|0.09|0.03|N|O|1997-10-01|1997-09-08|1997-10-06|COLLECT COD|RAIL|ual instructions affix furiousl 12198|169225|4258|1|37|47886.14|0.05|0.07|A|F|1993-06-04|1993-05-15|1993-06-05|TAKE BACK RETURN|REG AIR|ses. pending, final instructions cajo 12199|134883|2423|1|3|5753.64|0.03|0.03|N|O|1996-03-04|1996-03-06|1996-03-25|DELIVER IN PERSON|TRUCK|ly silent pinto beans 12199|171898|6933|2|2|3939.78|0.08|0.02|N|O|1996-02-23|1996-01-16|1996-03-04|COLLECT COD|FOB|ully about the fur 12199|83081|3082|3|38|40435.04|0.05|0.08|N|O|1996-02-10|1996-03-08|1996-02-14|COLLECT COD|TRUCK|haggle busily among the blithely spec 12199|141022|3537|4|22|23386.44|0.06|0.07|N|O|1995-12-17|1996-02-04|1996-01-09|DELIVER IN PERSON|MAIL|eposits affix blithely even 12224|43212|725|1|11|12707.31|0.03|0.00|R|F|1995-04-12|1995-02-06|1995-04-13|NONE|SHIP| integrate bl 12224|55114|5115|2|46|49179.06|0.00|0.03|A|F|1995-03-12|1995-02-15|1995-04-10|COLLECT COD|MAIL|asymptotes. special requests eat 12224|97205|2224|3|47|56503.40|0.09|0.05|R|F|1995-03-14|1995-02-09|1995-04-11|DELIVER IN PERSON|AIR|the slyly i 12224|114290|1824|4|42|54780.18|0.04|0.08|A|F|1995-01-03|1995-01-23|1995-01-21|DELIVER IN PERSON|RAIL|otes are above the final 12225|110946|8480|1|2|3913.88|0.02|0.04|N|O|1997-03-27|1997-03-30|1997-03-31|TAKE BACK RETURN|MAIL|nt requests w 12225|56514|9020|2|29|42644.79|0.08|0.05|N|O|1997-03-14|1997-03-06|1997-03-19|NONE|TRUCK|cajole fluffily furiously regular a 12225|30272|2776|3|48|57708.96|0.04|0.04|N|O|1997-04-14|1997-03-11|1997-05-05|DELIVER IN PERSON|MAIL|e, regular excuses boo 12225|179154|1672|4|13|16030.95|0.05|0.00|N|O|1997-02-14|1997-04-12|1997-03-04|COLLECT COD|FOB|ide of the carefully silent f 12225|28777|8778|5|36|61407.72|0.10|0.04|N|O|1997-03-06|1997-03-02|1997-03-19|TAKE BACK RETURN|TRUCK|e blithely. 12225|54743|7249|6|19|32257.06|0.09|0.05|N|O|1997-05-25|1997-03-21|1997-06-08|COLLECT COD|RAIL|lthily among the quietly ironic asymptotes. 12226|105936|3467|1|33|64083.69|0.06|0.07|N|O|1998-04-06|1998-05-12|1998-05-02|TAKE BACK RETURN|REG AIR|ans nag. bold, expr 12226|58969|3980|2|39|75190.44|0.02|0.07|N|O|1998-04-02|1998-06-12|1998-04-05|COLLECT COD|REG AIR|s. fluffily regular ins 12226|81554|4063|3|16|24568.80|0.04|0.06|N|O|1998-07-11|1998-06-17|1998-07-20|DELIVER IN PERSON|MAIL|s nag along the ca 12227|117948|2971|1|3|5897.82|0.08|0.03|N|O|1998-08-11|1998-08-13|1998-08-29|DELIVER IN PERSON|RAIL|, ironic tithes. furiously even 12227|151425|6456|2|44|64962.48|0.00|0.02|N|O|1998-10-09|1998-09-09|1998-11-05|DELIVER IN PERSON|TRUCK|ular accounts. furiously regula 12227|183277|832|3|11|14962.97|0.01|0.07|N|O|1998-08-13|1998-07-28|1998-08-29|COLLECT COD|MAIL|odolites haggle. pint 12227|124622|9647|4|34|55985.08|0.10|0.01|N|O|1998-10-06|1998-08-04|1998-10-27|TAKE BACK RETURN|REG AIR|ronic excuses nag fur 12227|109016|4037|5|40|41000.40|0.00|0.01|N|O|1998-07-25|1998-07-31|1998-08-22|NONE|TRUCK|as nag around the carefully specia 12227|15184|187|6|23|25281.14|0.00|0.03|N|O|1998-09-08|1998-09-12|1998-09-19|COLLECT COD|REG AIR| the furiously pending escapades. 12227|188046|565|7|13|14742.52|0.09|0.08|N|O|1998-08-15|1998-09-22|1998-08-31|DELIVER IN PERSON|AIR| platelets. final, ironic pin 12228|80995|8520|1|17|33591.83|0.06|0.00|A|F|1994-08-20|1994-08-22|1994-09-18|DELIVER IN PERSON|SHIP|blithely ironic depths 12228|73597|3598|2|50|78529.50|0.06|0.04|A|F|1994-10-14|1994-08-25|1994-11-01|TAKE BACK RETURN|MAIL| theodolites hagg 12228|143501|8530|3|18|27801.00|0.08|0.00|A|F|1994-08-21|1994-08-13|1994-08-27|COLLECT COD|FOB|equests. quickly pendi 12228|11554|6557|4|9|13189.95|0.10|0.01|R|F|1994-08-07|1994-09-09|1994-09-04|COLLECT COD|TRUCK|jole silently. even deposits hagg 12228|180656|5693|5|30|52099.50|0.01|0.00|A|F|1994-08-10|1994-09-04|1994-08-14|COLLECT COD|MAIL|ites cajole. regu 12228|68953|6472|6|47|90331.65|0.02|0.05|R|F|1994-08-10|1994-09-02|1994-08-21|NONE|AIR|accounts? slyly unusua 12228|83954|8971|7|6|11627.70|0.02|0.04|A|F|1994-10-01|1994-08-13|1994-10-19|DELIVER IN PERSON|TRUCK|g to the regular warhorses. slyly blithe co 12229|14403|1907|1|12|15808.80|0.09|0.06|N|O|1996-08-28|1996-09-09|1996-09-11|TAKE BACK RETURN|AIR|ag across the unusual theodolites. a 12229|70171|172|2|21|23964.57|0.06|0.06|N|O|1996-09-27|1996-10-14|1996-10-19|TAKE BACK RETURN|AIR|about the bold deposits 12229|163220|769|3|38|48762.36|0.03|0.07|N|O|1996-07-27|1996-08-26|1996-08-03|NONE|SHIP|en deposits: special foxes nag above 12230|4363|6864|1|19|24079.84|0.03|0.06|N|O|1998-05-03|1998-05-12|1998-05-05|COLLECT COD|TRUCK|e slyly against the 12230|114089|4090|2|30|33092.40|0.03|0.03|N|O|1998-06-07|1998-05-10|1998-07-06|TAKE BACK RETURN|TRUCK|ackages. furiously final exc 12230|85305|2830|3|12|15483.60|0.07|0.00|N|O|1998-06-03|1998-06-20|1998-07-03|COLLECT COD|AIR|le furiousl 12230|117093|9605|4|25|27752.25|0.07|0.08|N|O|1998-07-09|1998-06-20|1998-08-07|DELIVER IN PERSON|RAIL|es cajole. quickly regu 12230|175713|8231|5|40|71548.40|0.07|0.06|N|O|1998-07-05|1998-06-10|1998-07-10|TAKE BACK RETURN|MAIL|ct unusual requests? carefully ironic foxe 12231|95465|2993|1|9|13144.14|0.09|0.06|N|O|1997-10-17|1997-09-10|1997-11-16|COLLECT COD|SHIP|ound the fur 12231|63935|6442|2|17|32281.81|0.07|0.01|N|O|1997-11-22|1997-10-27|1997-12-22|DELIVER IN PERSON|TRUCK|ts cajole. permanently bold attainments ag 12231|142031|4546|3|16|17168.48|0.09|0.04|N|O|1997-09-25|1997-09-07|1997-10-02|TAKE BACK RETURN|TRUCK|rts. special pac 12231|109185|1696|4|35|41796.30|0.05|0.07|N|O|1997-11-16|1997-08-29|1997-12-12|TAKE BACK RETURN|TRUCK| the pending accoun 12231|59939|9940|5|44|83552.92|0.02|0.08|N|O|1997-11-17|1997-10-04|1997-12-16|DELIVER IN PERSON|SHIP|ourts. silent, bold T 12231|190509|510|6|43|68778.50|0.01|0.01|N|O|1997-08-29|1997-09-03|1997-09-04|COLLECT COD|SHIP|, bold deposits are never express platelet 12256|182959|514|1|17|34713.15|0.03|0.04|A|F|1993-02-12|1993-03-09|1993-02-24|TAKE BACK RETURN|FOB|ges. unusual multiplier 12256|190364|7922|2|11|15997.96|0.02|0.04|R|F|1993-03-21|1993-02-11|1993-04-20|COLLECT COD|TRUCK|xpress deposits thrash blithely about the 12256|41062|3567|3|20|20061.20|0.09|0.04|R|F|1993-04-12|1993-02-15|1993-05-10|NONE|RAIL|r ideas wake 12256|117709|7710|4|19|32807.30|0.08|0.06|R|F|1993-01-16|1993-03-09|1993-02-03|NONE|TRUCK|regular deposits. regular, regular pinto 12256|124380|9405|5|44|61792.72|0.01|0.04|R|F|1992-12-24|1993-02-11|1992-12-28|COLLECT COD|TRUCK|ess packages use fluffily inside the 12257|109205|6736|1|21|25498.20|0.04|0.05|N|O|1996-07-28|1996-05-29|1996-08-17|TAKE BACK RETURN|REG AIR|nusual asymptotes. quickly even a 12257|192155|7194|2|27|33673.05|0.00|0.03|N|O|1996-08-14|1996-07-10|1996-08-20|DELIVER IN PERSON|FOB|gular tithes haggle slyly 12257|132847|7874|3|46|86472.64|0.04|0.04|N|O|1996-05-27|1996-05-28|1996-06-07|DELIVER IN PERSON|REG AIR| fluffily final foxes. ca 12258|76987|4509|1|31|60883.38|0.07|0.03|A|F|1995-02-11|1995-01-22|1995-03-09|TAKE BACK RETURN|FOB|s use. sil 12258|109655|2166|2|27|44945.55|0.03|0.05|A|F|1994-12-05|1995-02-25|1995-01-01|DELIVER IN PERSON|MAIL|e slyly stealthy packages. slyly busy id 12258|160850|3367|3|25|47771.25|0.05|0.02|R|F|1995-02-28|1995-01-11|1995-03-02|NONE|AIR|ounts are acros 12258|197161|2200|4|36|45293.76|0.07|0.05|A|F|1994-12-23|1995-02-07|1995-01-07|TAKE BACK RETURN|TRUCK|iously. care 12258|82090|2091|5|20|21441.80|0.01|0.00|A|F|1994-12-19|1995-02-06|1994-12-24|DELIVER IN PERSON|REG AIR|elets. bli 12258|82199|7216|6|39|46066.41|0.07|0.06|R|F|1995-01-20|1995-01-26|1995-02-08|NONE|RAIL|refully. blithely bold deposits ab 12258|116052|6053|7|20|21361.00|0.05|0.00|R|F|1994-12-15|1995-01-30|1995-01-03|TAKE BACK RETURN|SHIP|s. quickly express excuses eat: never r 12259|82264|4773|1|1|1246.26|0.02|0.03|R|F|1993-05-15|1993-05-14|1993-05-31|TAKE BACK RETURN|RAIL| fluffily blithe foxes are ca 12259|5666|3167|2|34|53436.44|0.06|0.02|R|F|1993-05-15|1993-05-11|1993-05-24|COLLECT COD|SHIP|y even dugouts. final instructions along th 12259|20358|5363|3|13|16618.55|0.09|0.05|R|F|1993-05-17|1993-04-22|1993-06-14|TAKE BACK RETURN|TRUCK|ets boost. quickly unu 12259|147680|2709|4|32|55285.76|0.08|0.05|R|F|1993-04-18|1993-04-16|1993-04-29|DELIVER IN PERSON|TRUCK| ironic attainments. carefully ironic du 12259|51135|3641|5|34|36928.42|0.10|0.00|A|F|1993-04-07|1993-03-21|1993-04-14|COLLECT COD|SHIP|t have to are 12259|13881|8884|6|16|28718.08|0.01|0.06|R|F|1993-05-17|1993-03-20|1993-05-28|DELIVER IN PERSON|RAIL|gular accounts haggl 12259|93517|1045|7|14|21147.14|0.07|0.00|R|F|1993-03-11|1993-04-12|1993-03-29|DELIVER IN PERSON|AIR|ecial sheaves. final accounts cajole 12260|125302|7815|1|26|34509.80|0.03|0.02|R|F|1993-01-29|1993-02-15|1993-02-21|COLLECT COD|MAIL|quests. regular pinto beans against the 12260|147539|5082|2|11|17451.83|0.09|0.06|A|F|1993-03-26|1993-01-17|1993-04-17|NONE|REG AIR|y. regular packag 12260|80531|532|3|36|54415.08|0.07|0.03|A|F|1993-03-05|1993-02-20|1993-03-16|DELIVER IN PERSON|RAIL|ffily even theo 12260|70468|5483|4|13|18699.98|0.06|0.00|A|F|1993-03-11|1992-12-31|1993-03-19|DELIVER IN PERSON|FOB|ironic courts. fu 12260|143666|3667|5|38|64967.08|0.09|0.07|R|F|1993-01-31|1993-01-11|1993-02-24|COLLECT COD|SHIP|sly even theodolites cajole. final requ 12261|30576|3080|1|28|42183.96|0.09|0.00|A|F|1994-02-13|1994-01-22|1994-03-14|COLLECT COD|SHIP|s cajole carefully. pending Tiresias 12261|75034|7542|2|30|30270.90|0.08|0.00|R|F|1993-12-30|1993-12-18|1994-01-28|COLLECT COD|AIR|ual, special 12261|92472|4982|3|41|60043.27|0.05|0.01|R|F|1994-02-03|1993-12-03|1994-02-26|DELIVER IN PERSON|MAIL|o beans sleep across the doggedly reg 12261|174193|1745|4|37|46886.03|0.10|0.06|R|F|1993-11-24|1993-12-18|1993-12-21|NONE|REG AIR|after the quickly ironic id 12261|60130|7649|5|39|42515.07|0.05|0.08|A|F|1994-02-03|1993-12-08|1994-02-26|DELIVER IN PERSON|MAIL|ly quietly ir 12261|4705|4706|6|49|78875.30|0.05|0.06|R|F|1993-10-29|1994-01-07|1993-11-28|TAKE BACK RETURN|TRUCK|ublate slyly beside th 12261|63701|3702|7|32|53270.40|0.04|0.04|R|F|1994-01-21|1994-01-15|1994-01-26|NONE|MAIL|as nag carefully a 12262|21521|1522|1|17|24522.84|0.02|0.07|N|O|1996-01-29|1996-01-25|1996-02-11|DELIVER IN PERSON|AIR|onic packages. ex 12262|95743|762|2|33|57378.42|0.05|0.04|N|O|1996-01-12|1996-01-21|1996-02-07|DELIVER IN PERSON|MAIL|ven packages sleep slyly 12262|152260|9806|3|9|11810.34|0.08|0.08|N|O|1996-03-04|1996-03-09|1996-03-18|COLLECT COD|FOB|ly silent accounts ha 12262|28024|8025|4|43|40936.86|0.09|0.00|N|O|1995-12-24|1996-02-18|1996-01-02|DELIVER IN PERSON|RAIL|bold deposit 12263|65253|5254|1|35|42638.75|0.08|0.02|N|O|1995-08-24|1995-08-29|1995-09-11|TAKE BACK RETURN|SHIP|ccounts sleep slyly blithely express de 12263|93355|3356|2|33|44495.55|0.05|0.00|N|O|1995-09-14|1995-09-02|1995-10-07|TAKE BACK RETURN|REG AIR|ans use blithely 12263|3859|6360|3|13|22917.05|0.03|0.00|N|O|1995-08-21|1995-09-16|1995-08-23|COLLECT COD|TRUCK|y. special accounts a 12263|54466|6972|4|29|41193.34|0.08|0.06|N|O|1995-09-19|1995-09-02|1995-10-19|NONE|RAIL|usly. carefully ironic 12288|75235|5236|1|21|25414.83|0.05|0.00|N|O|1996-12-29|1996-12-15|1997-01-15|TAKE BACK RETURN|REG AIR|blithely even foxes h 12288|62354|7367|2|13|17112.55|0.04|0.04|N|O|1996-12-26|1996-12-06|1997-01-09|TAKE BACK RETURN|TRUCK|ackages are blithely. 12288|167627|144|3|17|28808.54|0.05|0.03|N|O|1997-01-19|1997-01-13|1997-01-24|NONE|SHIP|uests detect furiously. qu 12288|102003|2004|4|48|48240.00|0.02|0.07|N|O|1997-02-15|1996-12-13|1997-03-01|DELIVER IN PERSON|MAIL|ies about the carefully ir 12289|185411|448|1|39|58359.99|0.02|0.07|N|O|1996-04-16|1996-02-12|1996-05-13|NONE|RAIL|ely. final, even foxes according to 12289|193595|6115|2|29|48969.11|0.01|0.08|N|O|1996-03-11|1996-03-21|1996-04-06|COLLECT COD|AIR|ess requests eat thinly blithely unus 12290|166534|9051|1|49|78425.97|0.07|0.04|N|O|1995-09-30|1995-08-11|1995-10-14|TAKE BACK RETURN|MAIL|, pending deposits sleep iron 12290|136061|3601|2|23|25232.38|0.03|0.02|N|O|1995-10-09|1995-09-01|1995-10-10|NONE|FOB|usual packa 12291|140230|7773|1|14|17783.22|0.02|0.08|N|O|1998-05-03|1998-06-12|1998-05-17|TAKE BACK RETURN|RAIL|jole quickly final foxes. unusu 12292|81949|6966|1|45|86892.30|0.07|0.08|A|F|1992-08-07|1992-07-14|1992-08-09|COLLECT COD|REG AIR|tes? carefully bold packages above th 12292|115640|663|2|25|41391.00|0.10|0.01|A|F|1992-07-20|1992-07-06|1992-07-24|TAKE BACK RETURN|AIR| bold theodolites 12292|151368|8914|3|42|59613.12|0.05|0.00|R|F|1992-06-12|1992-07-05|1992-07-07|DELIVER IN PERSON|SHIP| even deposits. qu 12292|29244|6751|4|15|17598.60|0.02|0.03|R|F|1992-08-26|1992-07-20|1992-09-11|COLLECT COD|RAIL|ely ironic asymptotes. final, 12293|168359|8360|1|28|39965.80|0.04|0.07|N|O|1995-10-02|1995-08-26|1995-10-14|COLLECT COD|AIR| slyly slyly 12293|199973|7531|2|37|76699.89|0.10|0.02|N|O|1995-09-11|1995-09-07|1995-09-25|DELIVER IN PERSON|REG AIR|lites. carefully final 12293|144290|4291|3|20|26685.80|0.03|0.00|N|O|1995-10-16|1995-07-26|1995-10-31|DELIVER IN PERSON|SHIP|warthogs. carefully quiet acc 12294|132083|2084|1|15|16726.20|0.06|0.04|N|O|1995-08-31|1995-08-25|1995-09-20|COLLECT COD|AIR|thely special accounts wake slyly 12294|104905|4906|2|39|74486.10|0.10|0.00|N|O|1995-11-02|1995-10-03|1995-11-26|DELIVER IN PERSON|AIR|t the slyly bold theodoli 12294|136813|4353|3|16|29596.96|0.00|0.08|N|O|1995-07-25|1995-09-13|1995-08-01|COLLECT COD|RAIL|ructions. packages across the ironi 12294|600|8101|4|39|58523.40|0.06|0.01|N|O|1995-09-09|1995-08-07|1995-09-22|COLLECT COD|TRUCK|special, regular deposi 12294|75933|3455|5|48|91628.64|0.05|0.00|N|O|1995-08-02|1995-09-11|1995-08-31|TAKE BACK RETURN|AIR|e of the blithely bold asympto 12295|82213|9738|1|45|53784.45|0.06|0.00|A|F|1993-12-02|1994-01-17|1993-12-13|COLLECT COD|FOB|ccounts affix f 12295|148978|1493|2|8|16215.76|0.06|0.08|A|F|1994-03-01|1994-01-01|1994-03-12|NONE|TRUCK|iously pending ide 12320|136931|4471|1|43|84620.99|0.02|0.07|N|O|1996-02-29|1996-02-29|1996-03-28|NONE|REG AIR|ts. special packages above the packages caj 12320|193674|3675|2|33|58333.11|0.05|0.06|N|O|1996-02-18|1996-03-09|1996-02-29|COLLECT COD|TRUCK| excuses are carefully abov 12321|105127|2658|1|13|14717.56|0.01|0.02|A|F|1994-05-16|1994-05-14|1994-05-31|NONE|AIR|nag. express deposits are blithely fu 12322|55282|7788|1|11|13610.08|0.00|0.06|N|F|1995-06-10|1995-06-12|1995-06-21|COLLECT COD|MAIL|ily furiou 12322|118458|970|2|20|29529.00|0.00|0.04|N|O|1995-07-20|1995-06-24|1995-08-04|TAKE BACK RETURN|REG AIR| carefully iron 12322|108775|1286|3|3|5351.31|0.07|0.06|R|F|1995-05-24|1995-08-05|1995-06-16|NONE|AIR|o the fluffily ironic deposits 12323|118585|1097|1|1|1603.58|0.04|0.00|N|O|1995-06-26|1995-06-13|1995-07-13|NONE|MAIL|es sleep slyly. fu 12323|54955|4956|2|4|7639.80|0.02|0.05|N|O|1995-06-20|1995-06-04|1995-07-06|TAKE BACK RETURN|MAIL|c packages 12323|19248|4251|3|34|39686.16|0.00|0.08|N|O|1995-07-28|1995-05-26|1995-08-05|COLLECT COD|RAIL|ctions. carefully regular req 12323|97783|5311|4|35|62327.30|0.09|0.08|N|O|1995-06-28|1995-06-19|1995-07-05|COLLECT COD|TRUCK|efully after the 12323|37037|9541|5|6|5844.18|0.03|0.05|R|F|1995-04-19|1995-06-16|1995-05-02|NONE|AIR| ideas. fluffily bold dep 12323|75483|5484|6|12|17501.76|0.00|0.02|N|O|1995-07-20|1995-06-01|1995-08-19|TAKE BACK RETURN|FOB|nts. blithely final platelets are c 12324|27201|2206|1|11|12410.20|0.07|0.06|N|O|1998-10-08|1998-10-01|1998-11-04|DELIVER IN PERSON|REG AIR| final foxes. slyly sp 12324|153140|8171|2|42|50111.88|0.00|0.03|N|O|1998-11-09|1998-09-27|1998-12-08|DELIVER IN PERSON|TRUCK| the furiously pending deposits. i 12324|11237|3739|3|35|40188.05|0.02|0.08|N|O|1998-09-25|1998-10-22|1998-09-27|DELIVER IN PERSON|MAIL|r, ironic deposits sleep. unus 12324|48672|1177|4|13|21068.71|0.04|0.03|N|O|1998-11-07|1998-10-14|1998-11-10|NONE|AIR|g to the fluffily even packages cajole 12324|25881|8384|5|16|28910.08|0.07|0.00|N|O|1998-09-21|1998-10-21|1998-10-02|COLLECT COD|REG AIR|old accounts cajole carefully requests 12324|165386|2935|6|34|49346.92|0.03|0.07|N|O|1998-09-12|1998-10-16|1998-09-20|NONE|TRUCK|eans. requests are furiou 12325|62491|4998|1|49|71221.01|0.08|0.02|R|F|1995-03-07|1995-04-04|1995-03-17|DELIVER IN PERSON|RAIL| furiously final deposits. orbits are qui 12326|42694|7703|1|12|19640.28|0.06|0.03|R|F|1995-04-17|1995-04-12|1995-05-02|TAKE BACK RETURN|MAIL|ironic, final accounts above the furious 12326|158058|3089|2|29|32365.45|0.05|0.03|R|F|1995-03-11|1995-05-04|1995-04-06|NONE|REG AIR|pecial instructions. carefully fin 12326|77516|2531|3|43|64220.93|0.02|0.00|A|F|1995-05-07|1995-04-09|1995-05-10|DELIVER IN PERSON|TRUCK|eep slyly pending req 12326|197759|7760|4|30|55702.50|0.09|0.04|R|F|1995-05-02|1995-03-19|1995-05-08|COLLECT COD|SHIP| bold asymptotes detect regular 12327|162360|9909|1|9|12801.24|0.07|0.04|N|O|1998-05-31|1998-05-30|1998-06-11|NONE|SHIP| foxes. express frets at the regular 12327|168349|8350|2|10|14173.40|0.05|0.02|N|O|1998-06-27|1998-06-24|1998-07-04|TAKE BACK RETURN|FOB|foxes are regular acco 12327|27568|71|3|9|13460.04|0.04|0.03|N|O|1998-07-12|1998-06-19|1998-08-02|COLLECT COD|SHIP|ithely spe 12352|34284|6788|1|31|37766.68|0.08|0.03|R|F|1992-06-15|1992-05-26|1992-07-15|NONE|FOB|ring, silent d 12352|195641|8161|2|44|76412.16|0.09|0.01|A|F|1992-07-23|1992-05-04|1992-08-19|NONE|TRUCK|riously blithely reg 12352|129830|7367|3|39|72533.37|0.05|0.08|A|F|1992-04-01|1992-05-27|1992-04-19|TAKE BACK RETURN|AIR|he furiously ironic requests. slyly re 12352|52148|9664|4|30|33004.20|0.10|0.07|A|F|1992-05-15|1992-05-15|1992-05-23|COLLECT COD|TRUCK|ts wake quietly alon 12352|37734|5244|5|20|33434.60|0.03|0.03|R|F|1992-06-13|1992-05-04|1992-06-30|COLLECT COD|FOB|t blithely. regular requests 12352|185368|405|6|18|26160.48|0.04|0.00|R|F|1992-04-12|1992-05-27|1992-04-30|DELIVER IN PERSON|SHIP|p ruthlessly despite the slyl 12352|75056|7564|7|48|49490.40|0.09|0.04|A|F|1992-06-12|1992-05-22|1992-06-29|DELIVER IN PERSON|REG AIR|ularly above the ironic requests. 12353|61838|9357|1|28|50395.24|0.02|0.04|N|O|1996-05-24|1996-06-10|1996-06-22|TAKE BACK RETURN|MAIL|uests use. fluffily regular foxes 12353|100105|2616|2|43|47519.30|0.04|0.03|N|O|1996-08-05|1996-06-20|1996-08-28|NONE|TRUCK|y special accounts are: expres 12353|53311|3312|3|33|41722.23|0.10|0.01|N|O|1996-06-27|1996-05-25|1996-07-02|TAKE BACK RETURN|SHIP|slyly ironic epitaphs. 12353|197681|5239|4|26|46245.68|0.00|0.06|N|O|1996-07-25|1996-06-06|1996-08-02|TAKE BACK RETURN|REG AIR| of the unusual, special packages. eve 12353|13471|5973|5|43|59532.21|0.05|0.02|N|O|1996-05-07|1996-06-11|1996-05-25|DELIVER IN PERSON|REG AIR|al ideas sleep. 12354|172573|5091|1|18|29620.26|0.04|0.07|N|O|1997-02-07|1997-03-16|1997-03-05|TAKE BACK RETURN|TRUCK|ual foxes detect f 12354|21897|6902|2|15|27283.35|0.09|0.08|N|O|1997-04-27|1997-03-19|1997-05-14|TAKE BACK RETURN|SHIP|d about the regular, 12354|14948|7450|3|8|14903.52|0.05|0.05|N|O|1997-05-02|1997-03-10|1997-05-08|DELIVER IN PERSON|REG AIR| accounts wake acro 12355|125725|5726|1|31|54272.32|0.01|0.02|N|O|1995-09-08|1995-08-19|1995-09-19|NONE|SHIP|ns. blithely reg 12355|55091|7597|2|36|37659.24|0.09|0.06|N|O|1995-08-11|1995-07-14|1995-08-17|COLLECT COD|TRUCK|egular accounts. fluff 12355|101599|1600|3|14|22408.26|0.08|0.02|N|F|1995-06-13|1995-06-28|1995-07-11|NONE|MAIL| express de 12355|168483|8484|4|47|72919.56|0.10|0.08|N|O|1995-06-30|1995-08-25|1995-07-03|NONE|TRUCK|thely even deposits haggle fluffily regu 12355|79033|1541|5|18|18216.54|0.09|0.04|N|F|1995-06-17|1995-08-20|1995-07-13|DELIVER IN PERSON|MAIL|slyly above the ac 12355|94634|9653|6|35|57002.05|0.05|0.06|N|O|1995-09-21|1995-07-25|1995-10-20|DELIVER IN PERSON|FOB|ackages use by the fur 12355|112918|2919|7|20|38618.20|0.01|0.02|N|O|1995-09-08|1995-07-10|1995-10-01|TAKE BACK RETURN|MAIL|ven excuses u 12356|5787|8288|1|50|84639.00|0.05|0.03|A|F|1993-02-13|1992-12-03|1993-02-17|TAKE BACK RETURN|MAIL|t the blithely slow packages. care 12356|54663|4664|2|26|42059.16|0.06|0.05|R|F|1992-11-25|1992-12-06|1992-12-24|DELIVER IN PERSON|TRUCK|ep quickly. slyly bo 12356|112337|2338|3|38|51274.54|0.07|0.04|A|F|1992-11-19|1992-12-26|1992-12-09|TAKE BACK RETURN|TRUCK|l, pending dependencies. blithely iron 12357|35814|8318|1|4|6999.24|0.09|0.04|N|O|1995-11-07|1995-11-16|1995-11-25|NONE|REG AIR| pinto beans. carefull 12357|114840|2374|2|18|33387.12|0.06|0.04|N|O|1995-09-16|1995-10-06|1995-09-18|TAKE BACK RETURN|TRUCK|detect furiously 12357|113518|3519|3|42|64323.42|0.03|0.08|N|O|1995-11-27|1995-10-06|1995-12-10|NONE|REG AIR|final reques 12357|94231|9250|4|12|14702.76|0.03|0.03|N|O|1995-11-29|1995-11-20|1995-11-30|TAKE BACK RETURN|TRUCK|ages against the unusual d 12358|189895|9896|1|19|37712.91|0.04|0.00|N|O|1997-01-15|1996-12-03|1997-02-10|DELIVER IN PERSON|AIR|. carefully even deposi 12358|153885|3886|2|27|52349.76|0.02|0.02|N|O|1996-11-17|1996-11-29|1996-12-14|TAKE BACK RETURN|RAIL|yly regular packages about the ir 12358|133567|3568|3|3|4801.68|0.04|0.00|N|O|1996-12-26|1996-12-09|1997-01-02|TAKE BACK RETURN|SHIP| regular requests above the de 12358|51074|6085|4|31|31777.17|0.05|0.05|N|O|1997-01-29|1996-11-28|1997-02-27|TAKE BACK RETURN|REG AIR|nts. fluffily permanent ins 12358|191167|8725|5|32|40261.12|0.08|0.01|N|O|1997-01-30|1996-11-10|1997-02-03|NONE|RAIL|ests sleep furiously ironic pa 12358|140436|7979|6|1|1476.43|0.06|0.04|N|O|1996-10-16|1996-11-19|1996-11-13|COLLECT COD|RAIL|r the carefully express theodolites. stealt 12358|97582|5110|7|30|47387.40|0.01|0.02|N|O|1997-01-24|1997-01-06|1997-01-28|COLLECT COD|FOB|the final, final deposits sleep exp 12359|58090|5606|1|33|34586.97|0.00|0.08|N|O|1997-07-23|1997-07-21|1997-08-08|DELIVER IN PERSON|FOB|nts haggle quickly aft 12359|108675|8676|2|42|70714.14|0.03|0.08|N|O|1997-06-16|1997-07-18|1997-06-17|TAKE BACK RETURN|RAIL|ely final theodolit 12359|91211|8739|3|27|32459.67|0.10|0.02|N|O|1997-08-05|1997-08-19|1997-08-25|NONE|REG AIR|final pinto beans are fluffily amo 12359|169624|9625|4|48|81293.76|0.03|0.00|N|O|1997-08-18|1997-08-17|1997-09-12|COLLECT COD|REG AIR|n dependencies above t 12384|187657|176|1|17|29659.05|0.03|0.00|N|O|1998-11-14|1998-09-05|1998-12-01|COLLECT COD|AIR|ove the fluffily regular asympt 12384|150286|287|2|47|62805.16|0.05|0.07|N|O|1998-09-01|1998-09-28|1998-10-01|NONE|MAIL|kly pending foxes cajole regular 12384|45204|2717|3|20|22984.00|0.09|0.06|N|O|1998-08-11|1998-10-23|1998-09-05|DELIVER IN PERSON|MAIL|arefully among th 12384|135758|3298|4|8|14350.00|0.09|0.01|N|O|1998-10-15|1998-10-09|1998-11-05|COLLECT COD|MAIL|eodolites-- always ev 12384|84161|1686|5|6|6870.96|0.04|0.00|N|O|1998-11-26|1998-10-04|1998-12-08|COLLECT COD|RAIL|ep blithely. blithely ironic r 12384|79057|4072|6|39|40405.95|0.04|0.01|N|O|1998-08-06|1998-10-11|1998-09-01|NONE|REG AIR|nstructions affix blithely according to 12384|107297|4828|7|8|10434.32|0.01|0.01|N|O|1998-09-22|1998-09-22|1998-10-18|TAKE BACK RETURN|RAIL|al requests sleep carefully 12385|132181|7208|1|6|7279.08|0.01|0.03|R|F|1993-03-20|1993-02-10|1993-03-26|COLLECT COD|RAIL|quickly unusual 12385|158731|6277|2|22|39374.06|0.01|0.04|R|F|1993-01-01|1993-02-12|1993-01-19|DELIVER IN PERSON|TRUCK|fily. ironic, ironic 12385|191899|1900|3|1|1990.89|0.00|0.08|R|F|1993-01-28|1993-01-05|1993-02-22|NONE|FOB|ns-- furiously unusual requests snooze a 12385|37018|9522|4|32|30560.32|0.01|0.02|A|F|1993-01-10|1993-01-01|1993-01-22|NONE|RAIL|quests wake blithely. regular 12385|145100|2643|5|6|6870.60|0.02|0.08|A|F|1993-02-19|1993-01-29|1993-03-09|NONE|RAIL|l accounts thrash 12386|96471|8981|1|49|71906.03|0.10|0.06|R|F|1992-06-29|1992-06-01|1992-07-24|COLLECT COD|REG AIR| the blithely 12386|175077|112|2|34|39170.38|0.07|0.02|R|F|1992-05-26|1992-05-18|1992-06-15|TAKE BACK RETURN|MAIL|ronic theodolites. regular, 12386|105581|602|3|32|50770.56|0.08|0.06|A|F|1992-05-29|1992-05-08|1992-06-08|TAKE BACK RETURN|TRUCK|ecial deposits. furiously steal 12386|84452|9469|4|43|61767.35|0.03|0.08|A|F|1992-06-19|1992-05-25|1992-07-10|TAKE BACK RETURN|MAIL|odolites. evenly even requ 12386|170228|2746|5|43|55823.46|0.07|0.02|A|F|1992-04-15|1992-05-17|1992-05-01|NONE|AIR|ons. furiously regular requ 12387|128638|6175|1|46|76664.98|0.10|0.07|N|O|1997-09-18|1997-08-30|1997-10-03|TAKE BACK RETURN|AIR|ic accounts sleep. foxes sleep along the 12388|186826|9345|1|47|89902.54|0.01|0.04|N|O|1997-12-07|1997-12-01|1997-12-08|COLLECT COD|SHIP|le after the slyly expres 12388|169235|9236|2|17|22171.91|0.09|0.07|N|O|1998-01-13|1997-12-17|1998-01-25|NONE|MAIL|tructions 12388|119544|7078|3|15|23453.10|0.04|0.08|N|O|1997-11-17|1998-01-12|1997-12-15|COLLECT COD|TRUCK|le carefully quickly regular acco 12388|170252|2770|4|31|40989.75|0.02|0.04|N|O|1997-12-28|1997-11-26|1998-01-07|TAKE BACK RETURN|REG AIR|nic escapades. furiousl 12388|20812|5817|5|40|69312.40|0.03|0.01|N|O|1997-12-19|1997-11-15|1998-01-12|DELIVER IN PERSON|RAIL|y final packag 12388|181289|6326|6|50|68514.00|0.10|0.00|N|O|1997-11-29|1997-12-10|1997-12-09|NONE|REG AIR|ding to the blit 12388|116171|8683|7|29|34427.93|0.08|0.08|N|O|1997-11-01|1998-01-07|1997-11-11|NONE|AIR|foxes. quickly i 12389|79233|4248|1|31|37579.13|0.06|0.06|R|F|1994-09-07|1994-09-30|1994-09-29|DELIVER IN PERSON|TRUCK|fily carefully even foxes. even de 12389|56151|1162|2|44|48714.60|0.02|0.03|A|F|1994-10-29|1994-10-22|1994-11-13|DELIVER IN PERSON|REG AIR|s? unusual, even instruction 12389|118764|8765|3|12|21393.12|0.07|0.04|R|F|1994-09-19|1994-10-18|1994-10-15|DELIVER IN PERSON|AIR|e special accounts. bl 12389|56692|6693|4|43|70893.67|0.10|0.03|A|F|1994-09-22|1994-09-20|1994-10-05|TAKE BACK RETURN|AIR|ts about the car 12389|6666|1667|5|20|31453.20|0.06|0.08|A|F|1994-10-04|1994-10-21|1994-10-06|NONE|TRUCK|s. special accounts wake blithely 12389|20906|5911|6|8|14615.20|0.01|0.02|R|F|1994-08-12|1994-10-06|1994-09-11|DELIVER IN PERSON|FOB|ic foxes. qui 12389|150745|746|7|19|34119.06|0.10|0.06|A|F|1994-10-14|1994-10-04|1994-10-15|TAKE BACK RETURN|MAIL|each carefully about the 12390|167594|2627|1|16|26585.44|0.03|0.02|N|O|1996-07-21|1996-08-04|1996-08-20|TAKE BACK RETURN|TRUCK|egular packages boost slyly slyly s 12390|139156|4183|2|8|9561.20|0.10|0.03|N|O|1996-07-20|1996-08-17|1996-08-17|DELIVER IN PERSON|REG AIR|sits. slyly ironic dependencies cajole furi 12390|185166|5167|3|9|11260.44|0.09|0.06|N|O|1996-06-30|1996-08-18|1996-07-17|TAKE BACK RETURN|FOB|ymptotes cajole slyly after the depo 12391|47912|5425|1|26|48357.66|0.05|0.05|N|O|1998-10-26|1998-09-23|1998-11-24|DELIVER IN PERSON|TRUCK|s doubt fluffily furiously ironic t 12391|46665|6666|2|45|72524.70|0.07|0.03|N|O|1998-08-15|1998-08-29|1998-08-22|NONE|SHIP|the final platelets 12416|60034|35|1|7|6958.21|0.10|0.05|R|F|1993-10-18|1993-09-01|1993-10-27|DELIVER IN PERSON|AIR|packages. slyly un 12417|45147|5148|1|25|27303.50|0.03|0.08|R|F|1993-11-23|1993-12-29|1993-12-11|NONE|AIR|unts haggl 12417|184243|9280|2|21|27872.04|0.02|0.01|A|F|1993-11-26|1994-01-11|1993-12-12|NONE|AIR|nal instructions use quick 12417|156646|1677|3|45|76618.80|0.08|0.00|R|F|1994-01-03|1994-01-07|1994-01-11|COLLECT COD|AIR|ts use carefully acc 12417|144074|4075|4|45|50313.15|0.01|0.06|A|F|1994-02-04|1993-11-25|1994-02-28|NONE|TRUCK|ly even reque 12417|49828|9829|5|24|42667.68|0.01|0.05|A|F|1993-11-03|1993-12-25|1993-11-18|NONE|REG AIR|ages. furiously iron 12418|192140|2141|1|7|8624.98|0.05|0.03|A|F|1992-05-20|1992-04-09|1992-06-09|COLLECT COD|RAIL|yly express idea 12418|86697|4222|2|5|8418.45|0.09|0.03|R|F|1992-04-14|1992-03-27|1992-05-07|COLLECT COD|RAIL|s affix quickly regular theodolite 12418|110812|3324|3|38|69266.78|0.05|0.04|A|F|1992-05-11|1992-03-28|1992-06-07|NONE|MAIL|lites above the daringl 12418|10259|260|4|34|39754.50|0.04|0.06|A|F|1992-06-05|1992-05-03|1992-06-19|DELIVER IN PERSON|MAIL|ecial requests run fluffily special de 12418|11852|4354|5|10|17638.50|0.10|0.05|A|F|1992-04-24|1992-05-03|1992-05-16|DELIVER IN PERSON|MAIL|dolites haggle carefully 12418|188904|8905|6|12|23914.80|0.08|0.08|A|F|1992-06-15|1992-05-12|1992-06-17|COLLECT COD|SHIP|ly bold pl 12419|90612|3122|1|26|41667.86|0.09|0.06|N|O|1995-11-28|1995-12-26|1995-12-01|COLLECT COD|REG AIR|quests. unusual request 12419|115094|5095|2|18|19963.62|0.02|0.07|N|O|1996-01-29|1995-12-20|1996-02-06|COLLECT COD|FOB|yly according 12419|174183|4184|3|44|55315.92|0.08|0.00|N|O|1995-10-31|1996-01-08|1995-11-02|NONE|TRUCK|regular accounts cajole 12419|81943|6960|4|42|80847.48|0.04|0.04|N|O|1996-02-09|1995-11-15|1996-02-19|TAKE BACK RETURN|REG AIR|oost evenly. slyly final accounts after 12419|4959|9960|5|24|44734.80|0.05|0.06|N|O|1996-01-25|1996-01-14|1996-02-05|COLLECT COD|RAIL| beans. final, regular accounts nag slyly 12419|143968|1511|6|19|38227.24|0.09|0.01|N|O|1995-11-30|1995-12-06|1995-12-20|COLLECT COD|FOB| instructions. care 12419|58777|1283|7|12|20829.24|0.00|0.04|N|O|1995-11-21|1995-11-18|1995-12-07|DELIVER IN PERSON|RAIL|gular courts. blithe 12420|64389|9402|1|39|52781.82|0.04|0.06|R|F|1994-04-06|1994-03-20|1994-04-25|NONE|AIR|pinto beans are sl 12420|177090|7091|2|22|25675.98|0.09|0.03|A|F|1994-03-31|1994-02-03|1994-04-05|NONE|SHIP|e furiously regular theodolites. fl 12420|135019|7533|3|5|5270.05|0.07|0.08|A|F|1994-01-16|1994-03-02|1994-02-01|NONE|REG AIR|excuses believe 12420|63396|8409|4|7|9515.73|0.10|0.08|A|F|1994-02-18|1994-02-09|1994-02-22|COLLECT COD|TRUCK|y. blithely even deposits haggl 12420|95147|7657|5|30|34264.20|0.02|0.03|R|F|1994-01-12|1994-02-26|1994-02-06|DELIVER IN PERSON|RAIL|ular deposits boost carefully. quickly 12420|134518|9545|6|9|13972.59|0.08|0.03|A|F|1994-02-12|1994-02-12|1994-02-18|DELIVER IN PERSON|FOB| express packages unwind quickly slyly unus 12421|29162|9163|1|37|40372.92|0.05|0.01|N|O|1996-02-26|1995-12-15|1996-03-22|COLLECT COD|REG AIR|ithely final dolphins sleep afte 12421|117245|4779|2|24|30293.76|0.00|0.03|N|O|1995-12-25|1995-12-20|1996-01-06|DELIVER IN PERSON|SHIP|ithely regular acc 12421|130483|8023|3|43|65079.64|0.04|0.08|N|O|1995-11-09|1996-01-26|1995-11-20|TAKE BACK RETURN|REG AIR| carefully expr 12421|13296|5798|4|6|7255.74|0.09|0.08|N|O|1995-12-04|1996-01-05|1995-12-20|DELIVER IN PERSON|MAIL|pending excuses. express ideas detect 12421|50463|2969|5|43|60778.78|0.07|0.00|N|O|1996-02-20|1996-01-08|1996-03-06|TAKE BACK RETURN|FOB|ages. blit 12421|116678|6679|6|27|45756.09|0.10|0.05|N|O|1996-01-13|1996-01-10|1996-01-18|NONE|TRUCK|s. quickly pend 12422|8183|3184|1|33|36008.94|0.08|0.05|N|O|1995-08-01|1995-08-11|1995-08-24|COLLECT COD|FOB|ts detect against the furiously brave sh 12422|130300|301|2|21|27936.30|0.05|0.08|N|O|1995-07-08|1995-07-24|1995-07-19|COLLECT COD|MAIL|ole. slyly regu 12422|148331|846|3|14|19310.62|0.02|0.00|N|F|1995-06-06|1995-08-09|1995-06-28|NONE|REG AIR|egular foxes. carefully regular 12422|195562|5563|4|46|76247.76|0.09|0.08|N|O|1995-09-20|1995-06-26|1995-10-07|TAKE BACK RETURN|REG AIR|ag blithely 12422|9279|9280|5|16|19012.32|0.02|0.03|N|O|1995-06-19|1995-07-01|1995-06-27|DELIVER IN PERSON|TRUCK|ding to the ideas could use blithely fluf 12422|149617|4646|6|29|48331.69|0.02|0.03|N|F|1995-06-16|1995-07-10|1995-06-24|DELIVER IN PERSON|FOB| bold sentimen 12423|89959|2468|1|2|3897.90|0.08|0.02|R|F|1992-09-05|1992-09-09|1992-09-14|COLLECT COD|RAIL|the fluffily daring pinto beans 12423|126320|3857|2|26|35004.32|0.04|0.05|R|F|1992-08-07|1992-10-01|1992-08-11|COLLECT COD|MAIL|uickly ironic theodolites affix. i 12423|59370|6886|3|38|50516.06|0.05|0.05|R|F|1992-10-09|1992-08-20|1992-10-29|COLLECT COD|SHIP|ggedly unusual theodolites. dogged 12448|111283|8817|1|16|20708.48|0.06|0.02|N|O|1996-03-01|1996-01-19|1996-03-06|DELIVER IN PERSON|TRUCK|ic requests; foxes detect s 12448|122530|2531|2|17|26393.01|0.05|0.05|N|O|1996-03-05|1995-12-31|1996-04-02|COLLECT COD|TRUCK|e carefully deposits. furiously idle 12448|158758|6304|3|42|76303.50|0.03|0.06|N|O|1996-01-24|1995-12-15|1996-02-10|NONE|FOB|ependencies? reg 12448|35116|123|4|24|25226.64|0.01|0.05|N|O|1996-02-12|1996-01-05|1996-02-26|COLLECT COD|SHIP|uests haggle s 12448|179805|9806|5|17|32041.60|0.07|0.05|N|O|1995-11-24|1996-01-30|1995-12-09|DELIVER IN PERSON|MAIL|foxes above the blithely sl 12449|152105|7136|1|45|52069.50|0.02|0.03|A|F|1993-04-08|1993-03-28|1993-04-10|TAKE BACK RETURN|REG AIR|furiously pending accounts 12449|17944|446|2|45|83787.30|0.09|0.02|A|F|1993-02-06|1993-04-26|1993-02-24|COLLECT COD|TRUCK|packages. carefully final deposits accor 12449|168146|3179|3|16|19426.24|0.06|0.00|A|F|1993-02-04|1993-04-27|1993-03-06|COLLECT COD|RAIL|carefully always final foxe 12450|109032|9033|1|40|41641.20|0.01|0.06|R|F|1994-03-31|1994-06-09|1994-04-07|TAKE BACK RETURN|SHIP|efully ironic p 12451|30996|8506|1|49|94422.51|0.07|0.06|R|F|1993-06-27|1993-07-05|1993-06-28|NONE|FOB|arefully even excuses wake fi 12451|142947|7976|2|29|57708.26|0.04|0.06|A|F|1993-06-24|1993-07-11|1993-07-17|NONE|MAIL|pending platelets wake furiously accordin 12451|100951|5972|3|19|37087.05|0.02|0.02|R|F|1993-06-04|1993-05-23|1993-06-15|NONE|MAIL|quests about 12451|82372|2373|4|49|66364.13|0.02|0.08|A|F|1993-05-08|1993-07-14|1993-05-26|COLLECT COD|REG AIR|wake carefull 12451|24309|1816|5|20|24666.00|0.09|0.05|R|F|1993-06-27|1993-05-29|1993-07-22|DELIVER IN PERSON|TRUCK|the furiously ironic packages? 12451|156032|3578|6|28|30464.84|0.00|0.00|A|F|1993-06-13|1993-07-13|1993-07-04|DELIVER IN PERSON|MAIL|fully even foxes wake. special, final acco 12451|27699|7700|7|26|42293.94|0.00|0.03|R|F|1993-07-01|1993-06-30|1993-07-27|DELIVER IN PERSON|RAIL|ions: ironic packages eat 12452|125160|185|1|14|16592.24|0.08|0.01|R|F|1994-03-29|1994-01-17|1994-04-10|NONE|MAIL|es. carefully regular r 12452|161354|1355|2|31|43875.85|0.10|0.07|R|F|1994-01-04|1994-01-15|1994-01-16|NONE|RAIL|g instructions. furious, bra 12452|71689|9211|3|31|51481.08|0.09|0.07|A|F|1994-01-14|1994-02-09|1994-01-31|NONE|MAIL|jole. special, 12452|61542|9061|4|44|66155.76|0.10|0.07|A|F|1994-03-29|1994-01-10|1994-04-14|DELIVER IN PERSON|TRUCK|osits. deposits sleep fluffi 12453|178720|8721|1|33|59357.76|0.03|0.06|R|F|1994-03-26|1994-03-08|1994-04-06|TAKE BACK RETURN|RAIL|y ironic deposits sleep furiously pen 12453|139962|2476|2|20|40039.20|0.00|0.03|R|F|1994-04-04|1994-03-08|1994-04-05|COLLECT COD|MAIL|r the ruthlessly regular 12453|139578|7118|3|26|42056.82|0.08|0.05|R|F|1994-01-20|1994-04-07|1994-01-22|DELIVER IN PERSON|TRUCK|ges kindle after the final, unusual pinto 12453|83273|3274|4|1|1256.27|0.10|0.05|R|F|1994-04-20|1994-04-07|1994-04-26|NONE|REG AIR|y above the fluffily special foxes. f 12453|158324|840|5|28|38704.96|0.07|0.01|R|F|1994-05-09|1994-02-19|1994-06-06|NONE|SHIP|oxes grow past the carefully regular pint 12453|139258|4285|6|17|22053.25|0.01|0.08|A|F|1994-02-12|1994-03-04|1994-03-07|DELIVER IN PERSON|MAIL|the carefully final pinto 12454|116913|9425|1|39|75266.49|0.05|0.07|N|O|1997-04-01|1997-05-23|1997-04-25|DELIVER IN PERSON|AIR|ts integrate carefully about 12455|199506|7064|1|37|59403.50|0.10|0.07|A|F|1992-12-14|1992-10-29|1992-12-28|TAKE BACK RETURN|MAIL|refully alo 12455|86374|1391|2|10|13603.70|0.09|0.01|A|F|1992-10-05|1992-11-26|1992-10-11|NONE|RAIL|s. ironic foxes are 12455|79628|7150|3|45|72342.90|0.05|0.06|R|F|1992-12-21|1992-11-06|1992-12-22|NONE|AIR|deposits. quickly even foxes wake quic 12455|99414|9415|4|29|40988.89|0.09|0.06|A|F|1992-11-23|1992-11-21|1992-11-24|DELIVER IN PERSON|AIR|l escapades. quickly ex 12455|44704|2217|5|2|3297.40|0.01|0.03|A|F|1992-09-30|1992-10-28|1992-10-05|DELIVER IN PERSON|REG AIR|xpress packages cajole. quickly 12480|61252|8771|1|30|36397.50|0.05|0.02|A|F|1994-09-11|1994-09-10|1994-09-17|COLLECT COD|RAIL| the quickly regular ideas. 12480|52044|9560|2|22|21912.88|0.06|0.08|R|F|1994-07-31|1994-08-26|1994-08-12|NONE|RAIL|posits. slyly fluffy dolphins are regular, 12480|86811|6812|3|26|46743.06|0.02|0.03|R|F|1994-11-05|1994-09-16|1994-11-06|COLLECT COD|TRUCK|ide the slyly 12481|85187|2712|1|14|16410.52|0.02|0.00|N|O|1995-11-05|1995-08-18|1995-11-20|TAKE BACK RETURN|AIR|ss packages cajole. ironic, regul 12481|129097|9098|2|23|25900.07|0.04|0.03|N|O|1995-09-22|1995-09-15|1995-09-27|NONE|TRUCK|ely against the foxes. 12481|120149|150|3|23|26890.22|0.09|0.04|N|O|1995-08-10|1995-08-15|1995-09-07|COLLECT COD|RAIL|s snooze. carefully express accounts ha 12481|129110|4135|4|9|10251.99|0.08|0.08|N|O|1995-07-31|1995-10-05|1995-08-17|DELIVER IN PERSON|TRUCK|jole furiously. slowly unusual ac 12482|16751|1754|1|10|16677.50|0.03|0.05|N|O|1996-02-19|1996-03-05|1996-03-09|DELIVER IN PERSON|RAIL|ckly silent notornis sleep sl 12482|117405|9917|2|40|56896.00|0.05|0.06|N|O|1996-04-10|1996-03-09|1996-04-22|NONE|TRUCK|ns. fluffily regular somas against the blit 12482|13722|3723|3|50|81786.00|0.10|0.02|N|O|1996-05-04|1996-03-04|1996-05-28|COLLECT COD|TRUCK|ayers wake quickly quickly 12482|17809|5313|4|8|13814.40|0.07|0.02|N|O|1996-03-18|1996-04-06|1996-04-16|COLLECT COD|MAIL|r pinto beans 12482|112756|2757|5|45|79593.75|0.10|0.04|N|O|1996-01-11|1996-02-10|1996-02-10|NONE|TRUCK|: special packages above the furiously 12483|89379|1888|1|22|30104.14|0.07|0.08|R|F|1993-10-31|1993-10-27|1993-11-02|NONE|TRUCK|blithely. epitaphs boost c 12483|188791|1310|2|28|52634.12|0.10|0.06|R|F|1993-11-19|1993-10-07|1993-11-23|DELIVER IN PERSON|REG AIR|. slyly even theodolites boost. carefull 12483|1051|8552|3|29|27609.45|0.07|0.00|R|F|1993-08-22|1993-09-22|1993-09-04|NONE|REG AIR|ng packages affix deposi 12483|51118|6129|4|28|29935.08|0.01|0.04|R|F|1993-10-28|1993-09-23|1993-11-10|COLLECT COD|RAIL|y bold requests ca 12483|129881|2394|5|1|1910.88|0.05|0.01|R|F|1993-08-12|1993-09-11|1993-08-27|TAKE BACK RETURN|MAIL|lyly regular packages. slyly r 12484|35012|7516|1|34|32198.34|0.06|0.02|N|O|1995-09-28|1995-11-06|1995-10-19|COLLECT COD|TRUCK|quickly even dependenci 12484|36126|3636|2|30|31863.60|0.10|0.00|N|O|1995-12-12|1995-10-13|1995-12-25|COLLECT COD|SHIP|ly unusual accounts boost furiously ac 12484|167098|2131|3|26|30292.34|0.01|0.01|N|O|1995-09-29|1995-10-09|1995-10-28|NONE|REG AIR|have to wake blithely: ironically ironic 12484|7451|2452|4|1|1358.45|0.04|0.07|N|O|1995-11-15|1995-11-16|1995-12-02|DELIVER IN PERSON|REG AIR|s mold blithely boldly express theodolit 12484|12503|7506|5|50|70775.00|0.01|0.00|N|O|1995-09-19|1995-10-16|1995-09-30|TAKE BACK RETURN|RAIL|after the pending deposits. ex 12484|128177|3202|6|14|16872.38|0.07|0.08|N|O|1995-12-07|1995-10-27|1995-12-21|TAKE BACK RETURN|AIR|r theodolites about the unusual foxes belie 12485|105047|5048|1|41|43133.64|0.03|0.07|R|F|1993-11-14|1993-10-15|1993-11-30|COLLECT COD|RAIL|egular packages. car 12485|118929|1441|2|32|62333.44|0.01|0.07|R|F|1993-11-26|1993-09-10|1993-12-05|DELIVER IN PERSON|FOB|furiously special theodolites sl 12485|36100|1107|3|8|8288.80|0.08|0.01|R|F|1993-10-05|1993-10-23|1993-10-27|TAKE BACK RETURN|RAIL|hely regular instructions. 12485|121213|1214|4|28|34557.88|0.07|0.04|A|F|1993-11-17|1993-11-03|1993-12-07|TAKE BACK RETURN|MAIL|ges. deposits against the regu 12485|10277|2779|5|29|34430.83|0.05|0.03|A|F|1993-08-31|1993-09-09|1993-09-22|TAKE BACK RETURN|FOB|eposits haggle furiously. regula 12485|114966|7478|6|19|37638.24|0.10|0.08|A|F|1993-08-17|1993-09-07|1993-09-07|COLLECT COD|MAIL| packages. s 12485|21131|3634|7|43|45241.59|0.08|0.01|R|F|1993-09-12|1993-10-16|1993-09-30|COLLECT COD|FOB|lphins. instructions among the slyly s 12486|147042|7043|1|26|28315.04|0.09|0.04|A|F|1995-04-30|1995-04-11|1995-05-14|NONE|SHIP| carefully re 12486|118492|3515|2|19|28699.31|0.08|0.04|A|F|1995-06-01|1995-04-10|1995-06-07|DELIVER IN PERSON|MAIL|xpress asymptotes u 12486|83645|8662|3|15|24429.60|0.07|0.04|N|F|1995-06-07|1995-04-09|1995-06-22|TAKE BACK RETURN|SHIP|y idle deposits. furiously 12486|19677|7181|4|19|30336.73|0.06|0.04|N|O|1995-06-20|1995-05-13|1995-07-19|NONE|RAIL|ts nag. slyly reg 12486|110520|521|5|20|30610.40|0.10|0.05|N|F|1995-06-16|1995-05-08|1995-06-28|TAKE BACK RETURN|MAIL|riously special foxes sleep quickly. f 12486|99671|2181|6|4|6682.68|0.08|0.05|R|F|1995-03-05|1995-04-03|1995-03-07|DELIVER IN PERSON|TRUCK| slyly ironic pinto beans wake 12486|157907|7908|7|38|74666.20|0.01|0.05|N|F|1995-06-15|1995-04-17|1995-06-29|NONE|SHIP|s. carefully regular braids haggle sly 12487|127699|7700|1|34|58707.46|0.02|0.05|R|F|1995-02-07|1995-02-28|1995-02-27|TAKE BACK RETURN|FOB|endencies. unusual ideas nod carefully 12512|124769|4770|1|25|44844.00|0.03|0.08|N|O|1996-05-02|1996-05-17|1996-05-18|NONE|MAIL|the final, ruthless requests affix 12512|178374|3409|2|28|40666.36|0.10|0.01|N|O|1996-07-04|1996-05-06|1996-07-22|DELIVER IN PERSON|AIR| packages. regular accounts haggle fur 12512|128716|3741|3|27|47107.17|0.03|0.06|N|O|1996-07-22|1996-05-11|1996-07-25|NONE|RAIL|even, express requests. furiously 12513|25364|7867|1|31|39970.16|0.07|0.02|N|O|1997-11-18|1997-10-14|1997-12-18|NONE|TRUCK|gside of the re 12513|82619|144|2|2|3203.22|0.01|0.04|N|O|1997-09-13|1997-11-28|1997-09-24|TAKE BACK RETURN|REG AIR|ing notornis 12513|176470|8988|3|16|24743.52|0.09|0.01|N|O|1997-10-10|1997-10-10|1997-10-15|TAKE BACK RETURN|FOB|foxes. ironic, ironic accounts will 12514|139815|9816|1|22|40805.82|0.04|0.06|A|F|1994-08-23|1994-05-28|1994-09-02|COLLECT COD|TRUCK|cial foxes are blithel 12514|67596|2609|2|6|9381.54|0.01|0.00|R|F|1994-05-27|1994-07-07|1994-06-23|TAKE BACK RETURN|AIR|ily regular accounts poach according to th 12515|173285|5803|1|37|50256.36|0.00|0.05|A|F|1993-06-07|1993-07-28|1993-07-01|TAKE BACK RETURN|FOB|ages hang above the final requests. 12515|191017|3537|2|29|32132.29|0.07|0.07|A|F|1993-07-03|1993-07-17|1993-07-28|COLLECT COD|MAIL|fily pending package 12515|16180|1183|3|11|12057.98|0.02|0.07|A|F|1993-06-11|1993-07-23|1993-06-25|COLLECT COD|REG AIR|beans. quickly regular 12516|181548|1549|1|35|57033.90|0.09|0.03|R|F|1994-08-04|1994-08-13|1994-08-23|TAKE BACK RETURN|MAIL|refully final depo 12516|61814|9333|2|28|49722.68|0.03|0.04|R|F|1994-05-26|1994-07-10|1994-06-24|COLLECT COD|SHIP| about the quickly expres 12516|104249|4250|3|14|17545.36|0.05|0.08|R|F|1994-09-23|1994-07-18|1994-10-22|DELIVER IN PERSON|SHIP|e the slyly f 12517|54553|2069|1|21|31658.55|0.07|0.07|N|O|1997-03-27|1997-03-24|1997-04-01|COLLECT COD|RAIL|l dependencies hagg 12517|65294|307|2|17|21407.93|0.08|0.08|N|O|1997-02-22|1997-03-24|1997-03-21|COLLECT COD|REG AIR|ully even reque 12517|180291|292|3|17|23311.93|0.05|0.01|N|O|1997-04-18|1997-03-24|1997-04-26|NONE|SHIP|mong the deposits. 12518|63098|617|1|11|11671.99|0.08|0.03|A|F|1993-10-25|1993-09-09|1993-10-31|TAKE BACK RETURN|TRUCK|l frays along the bold, bol 12518|75289|7797|2|19|24021.32|0.08|0.00|A|F|1993-11-03|1993-09-25|1993-11-16|COLLECT COD|MAIL| asymptotes cajole unusual req 12518|176283|3835|3|21|28544.88|0.09|0.07|A|F|1993-09-13|1993-10-12|1993-10-13|NONE|RAIL|t according to th 12518|171760|6795|4|7|12822.32|0.05|0.02|A|F|1993-09-16|1993-10-03|1993-09-17|DELIVER IN PERSON|TRUCK|st the even foxes. requests haggle blithe 12518|166866|9383|5|6|11597.16|0.05|0.00|A|F|1993-07-27|1993-10-21|1993-08-21|DELIVER IN PERSON|TRUCK|ular depos 12518|106314|1335|6|3|3960.93|0.03|0.00|R|F|1993-08-12|1993-10-19|1993-08-13|TAKE BACK RETURN|TRUCK|quiet, silent packages sleep quickly 12518|7702|7703|7|50|80485.00|0.00|0.07|A|F|1993-10-10|1993-08-22|1993-10-11|NONE|MAIL|ets wake slyly. theodolites cajole fluffi 12519|45913|8418|1|23|42754.93|0.08|0.02|R|F|1994-04-30|1994-03-15|1994-05-04|DELIVER IN PERSON|SHIP|ly. pending instr 12519|48696|6209|2|47|77300.43|0.05|0.02|A|F|1994-03-08|1994-03-01|1994-04-04|DELIVER IN PERSON|SHIP|e across the fluffily regular packages. 12519|28226|729|3|26|30009.72|0.08|0.00|R|F|1994-01-11|1994-02-28|1994-02-04|DELIVER IN PERSON|AIR|g the blithely special accou 12519|191537|4057|4|4|6514.12|0.07|0.04|A|F|1994-04-19|1994-03-21|1994-05-04|NONE|RAIL|wake slyly express requests. fur 12519|148475|6018|5|20|30469.40|0.03|0.02|A|F|1994-02-17|1994-02-02|1994-02-24|DELIVER IN PERSON|MAIL|sh regular, ironic 12519|7123|2124|6|36|37084.32|0.09|0.01|A|F|1994-02-25|1994-02-20|1994-03-20|NONE|MAIL|, quiet accounts are al 12544|133239|3240|1|25|31805.75|0.04|0.05|A|F|1994-05-22|1994-04-24|1994-06-01|NONE|FOB| ironic accounts 12544|145485|514|2|24|36731.52|0.04|0.02|R|F|1994-04-07|1994-05-09|1994-04-14|COLLECT COD|TRUCK|furiously according to the theodolites. 12545|174994|4995|1|4|8275.96|0.03|0.06|N|O|1996-07-19|1996-09-13|1996-08-01|DELIVER IN PERSON|RAIL|unusual ide 12545|115389|412|2|42|58983.96|0.07|0.04|N|O|1996-08-31|1996-08-09|1996-09-09|COLLECT COD|SHIP|as are carefully ironic pinto beans. qui 12545|132374|2375|3|20|28127.40|0.10|0.07|N|O|1996-07-08|1996-09-02|1996-07-21|COLLECT COD|SHIP| packages engage carefully according 12545|1331|6332|4|1|1232.33|0.00|0.08|N|O|1996-07-18|1996-09-16|1996-07-21|NONE|SHIP|carefully bol 12546|120156|2669|1|4|4704.60|0.00|0.03|N|O|1996-03-30|1996-04-19|1996-04-17|DELIVER IN PERSON|FOB|y unusual foxes h 12546|61123|6136|2|5|5420.60|0.06|0.07|N|O|1996-05-05|1996-03-30|1996-05-21|TAKE BACK RETURN|FOB|. furiously express Tiresias sleep 12546|90136|5155|3|28|31531.64|0.01|0.03|N|O|1996-03-13|1996-04-27|1996-03-29|COLLECT COD|TRUCK|eposits; carefully ironic requ 12546|103748|3749|4|25|43793.50|0.06|0.01|N|O|1996-02-23|1996-03-28|1996-02-24|NONE|TRUCK|ously ironic deposi 12546|90417|418|5|2|2814.82|0.06|0.01|N|O|1996-03-15|1996-03-18|1996-03-18|DELIVER IN PERSON|TRUCK|refully final ideas caj 12547|152899|2900|1|50|97594.50|0.07|0.05|N|O|1998-08-04|1998-06-23|1998-08-07|DELIVER IN PERSON|FOB|hely unusual deposits cajo 12547|172081|2082|2|46|53041.68|0.00|0.02|N|O|1998-06-04|1998-05-19|1998-06-20|NONE|AIR|ly regular requests 12547|148670|1185|3|1|1718.67|0.09|0.06|N|O|1998-04-16|1998-07-11|1998-04-22|TAKE BACK RETURN|RAIL|about the quickly regular fret 12547|92638|7657|4|32|52180.16|0.03|0.02|N|O|1998-07-08|1998-06-24|1998-08-03|DELIVER IN PERSON|SHIP|yly. furiously reg 12548|156497|4043|1|27|41944.23|0.03|0.08|N|O|1998-01-19|1998-01-29|1998-01-26|TAKE BACK RETURN|FOB|he furious 12548|31285|1286|2|31|37704.68|0.10|0.07|N|O|1997-12-13|1998-02-08|1997-12-25|COLLECT COD|TRUCK|ys dogged deposits. carefully ironic depen 12548|6802|1803|3|23|39302.40|0.03|0.01|N|O|1997-12-15|1998-02-16|1997-12-18|DELIVER IN PERSON|MAIL|nstructions are furiousl 12548|116456|6457|4|41|60370.45|0.03|0.05|N|O|1998-03-19|1997-12-31|1998-04-12|DELIVER IN PERSON|AIR|riously final instructions use qui 12548|73078|5586|5|40|42042.80|0.05|0.06|N|O|1997-12-29|1998-01-29|1998-01-01|TAKE BACK RETURN|TRUCK|nic platelets solv 12549|37018|9522|1|13|12415.13|0.05|0.03|N|O|1997-08-07|1997-07-12|1997-08-08|NONE|RAIL|. carefully ironic theodo 12549|152208|9754|2|33|41586.60|0.05|0.01|N|O|1997-07-05|1997-08-20|1997-07-22|NONE|AIR|requests. furiously even accounts n 12549|152719|7750|3|28|49607.88|0.04|0.02|N|O|1997-07-09|1997-07-06|1997-07-21|NONE|RAIL|oxes haggle against the qui 12550|10372|5375|1|3|3847.11|0.01|0.06|R|F|1993-03-17|1993-05-22|1993-04-03|DELIVER IN PERSON|RAIL|accounts. furiously bold ac 12550|115164|187|2|35|41270.60|0.08|0.07|R|F|1993-05-17|1993-05-23|1993-06-09|TAKE BACK RETURN|MAIL| accounts. furiously regular p 12551|165658|8175|1|30|51709.50|0.01|0.02|A|F|1992-07-31|1992-09-10|1992-08-20|DELIVER IN PERSON|RAIL|equests. carefu 12551|8957|6458|2|26|48514.70|0.04|0.08|R|F|1992-10-11|1992-07-15|1992-10-13|TAKE BACK RETURN|TRUCK|fully special accou 12551|131355|8895|3|25|34658.75|0.07|0.00|A|F|1992-08-26|1992-08-21|1992-09-05|DELIVER IN PERSON|MAIL|he blithely final acc 12551|80466|7991|4|47|67983.62|0.08|0.01|R|F|1992-07-13|1992-07-31|1992-07-30|NONE|TRUCK|kly unusual instructio 12576|131014|8554|1|28|29260.28|0.03|0.03|R|F|1993-04-02|1993-04-10|1993-04-17|DELIVER IN PERSON|RAIL|ll have to are sly 12577|145893|3436|1|8|15511.12|0.06|0.04|N|O|1998-06-09|1998-07-29|1998-06-28|TAKE BACK RETURN|SHIP|egular depo 12577|17282|2285|2|17|20387.76|0.05|0.07|N|O|1998-06-04|1998-08-06|1998-06-06|DELIVER IN PERSON|AIR|refully ironic requests. fin 12577|42203|4708|3|49|56114.80|0.00|0.06|N|O|1998-06-14|1998-06-30|1998-06-21|NONE|FOB|ickly silent waters cajole furiously a 12577|104124|9145|4|43|48509.16|0.10|0.01|N|O|1998-07-15|1998-07-09|1998-08-09|TAKE BACK RETURN|AIR|ily carefully unusual request 12578|186626|6627|1|32|54803.84|0.01|0.04|N|O|1996-12-16|1997-01-28|1996-12-27|TAKE BACK RETURN|AIR|lyly express pinto 12578|8071|3072|2|21|20560.47|0.05|0.03|N|O|1997-01-18|1997-02-03|1997-01-21|COLLECT COD|SHIP|ncies lose. requests after the furiously b 12578|32899|7906|3|49|89762.61|0.09|0.07|N|O|1997-02-03|1997-01-18|1997-02-28|DELIVER IN PERSON|MAIL|y express foxes 12578|121587|1588|4|11|17694.38|0.08|0.04|N|O|1996-12-07|1997-01-01|1997-01-04|TAKE BACK RETURN|FOB|le after th 12578|108065|8066|5|17|18242.02|0.01|0.06|N|O|1996-12-20|1997-01-18|1996-12-24|DELIVER IN PERSON|FOB|ronic accounts sublate. final requests wak 12578|47246|9751|6|17|20285.08|0.07|0.06|N|O|1997-03-16|1996-12-28|1997-04-01|DELIVER IN PERSON|FOB|ts. furiously permanent excuses 12578|58880|8881|7|47|86427.36|0.06|0.00|N|O|1997-03-05|1996-12-30|1997-03-25|TAKE BACK RETURN|FOB|ccounts boost quickly final, pending req 12579|187859|2896|1|26|50618.10|0.02|0.04|N|O|1995-07-08|1995-08-31|1995-07-26|NONE|TRUCK|at the carefully silent ideas. ironic, 12580|831|3332|1|32|55418.56|0.10|0.07|R|F|1993-04-05|1993-02-28|1993-05-05|COLLECT COD|REG AIR|s affix. carefully 12580|152523|7554|2|10|15755.20|0.03|0.00|A|F|1993-04-22|1993-03-29|1993-05-08|DELIVER IN PERSON|MAIL| haggle carefully quickly ironic d 12581|165914|947|1|37|73256.67|0.06|0.03|A|F|1994-11-11|1994-12-03|1994-12-06|NONE|SHIP|y regular foxes. special 12581|104915|2446|2|25|47997.75|0.04|0.07|A|F|1994-12-08|1994-12-06|1995-01-03|DELIVER IN PERSON|FOB|uriously express dep 12581|101877|6898|3|28|52608.36|0.01|0.02|R|F|1994-11-13|1994-12-23|1994-11-22|COLLECT COD|TRUCK|oss the quickly 12582|106620|1641|1|23|37412.26|0.06|0.08|A|F|1993-03-21|1993-04-08|1993-04-08|TAKE BACK RETURN|AIR|refully regular reque 12583|60853|8372|1|30|54415.50|0.09|0.04|N|O|1996-02-10|1996-03-27|1996-02-23|DELIVER IN PERSON|AIR|ial, regular plate 12608|61390|1391|1|4|5405.56|0.10|0.04|R|F|1993-06-16|1993-04-09|1993-07-08|NONE|TRUCK|ironic accounts sleep along the quickly 12608|181421|1422|2|7|10516.94|0.08|0.01|R|F|1993-06-22|1993-06-04|1993-07-02|TAKE BACK RETURN|AIR|side of the u 12608|141771|6800|3|36|65259.72|0.06|0.01|A|F|1993-05-04|1993-05-26|1993-05-30|COLLECT COD|MAIL|nic realms. sly 12609|68400|3413|1|31|42420.40|0.03|0.06|N|O|1996-02-08|1996-04-01|1996-02-24|DELIVER IN PERSON|MAIL|en pinto beans haggle furiously according 12609|192508|5028|2|15|24007.50|0.08|0.02|N|O|1996-04-29|1996-03-25|1996-05-24|COLLECT COD|TRUCK|ously unusual platelets. carefully 12609|68011|8012|3|24|23496.24|0.04|0.01|N|O|1996-05-15|1996-03-25|1996-06-13|COLLECT COD|MAIL|ructions. carefully bold asy 12609|45114|123|4|11|11650.21|0.01|0.02|N|O|1996-05-15|1996-03-21|1996-06-06|COLLECT COD|TRUCK|nal ideas along th 12610|183151|3152|1|6|7404.90|0.07|0.04|A|F|1993-09-26|1993-09-13|1993-10-16|NONE|TRUCK| final, bold platele 12611|74929|9944|1|26|49501.92|0.04|0.07|N|O|1995-11-08|1995-12-06|1995-11-17|TAKE BACK RETURN|RAIL|ieve carefully along t 12611|124716|9741|2|35|60924.85|0.05|0.04|N|O|1995-12-06|1995-12-24|1995-12-28|COLLECT COD|TRUCK|uses across the express, express accoun 12611|97217|7218|3|3|3642.63|0.00|0.02|N|O|1996-01-20|1995-12-14|1996-01-26|NONE|RAIL|lyly ironic accounts wake slyly special ins 12611|125917|8430|4|48|93259.68|0.06|0.03|N|O|1996-01-07|1995-12-26|1996-01-12|NONE|MAIL|al requests should 12611|73837|6345|5|25|45270.75|0.01|0.04|N|O|1995-11-22|1995-12-25|1995-11-27|COLLECT COD|TRUCK|l requests cajole stealthy asymptotes-- re 12612|43599|6104|1|25|38564.75|0.04|0.08|R|F|1994-02-17|1994-02-07|1994-03-11|COLLECT COD|FOB|e blithely silent platelets are quickly 12612|156705|9221|2|36|63421.20|0.06|0.08|A|F|1994-02-27|1994-01-24|1994-03-18|DELIVER IN PERSON|MAIL|riously regular requ 12612|126104|3641|3|45|50854.50|0.01|0.03|R|F|1994-03-14|1994-03-01|1994-04-04|COLLECT COD|AIR|arefully bold instructions caj 12612|16871|6872|4|15|26818.05|0.01|0.08|A|F|1994-01-18|1994-02-05|1994-02-09|NONE|FOB|ly among the furiously pending theodo 12612|139451|4478|5|4|5961.80|0.06|0.01|R|F|1994-04-11|1994-01-21|1994-04-12|COLLECT COD|SHIP|nal excuses boost fur 12612|137588|102|6|17|27634.86|0.06|0.06|A|F|1994-01-27|1994-02-06|1994-01-31|DELIVER IN PERSON|RAIL|slyly regular requests. special 12613|28520|6027|1|17|24624.84|0.10|0.04|A|F|1993-04-18|1993-06-04|1993-05-06|DELIVER IN PERSON|REG AIR|ic theodolites around the slyly even pac 12613|128121|5658|2|9|10342.08|0.03|0.07|A|F|1993-04-20|1993-05-26|1993-05-15|COLLECT COD|TRUCK|ajole blithely even p 12613|118337|8338|3|9|12197.97|0.06|0.02|R|F|1993-04-16|1993-07-03|1993-05-12|COLLECT COD|RAIL|s sleep careful 12613|86967|1984|4|35|68388.60|0.04|0.06|R|F|1993-04-24|1993-05-18|1993-04-27|NONE|TRUCK|osits wake blithely slyly u 12613|49408|1913|5|11|14931.40|0.10|0.01|R|F|1993-08-01|1993-07-07|1993-08-06|NONE|AIR|iously above the blithely iro 12614|153828|6344|1|25|47045.50|0.02|0.05|N|O|1996-05-16|1996-05-25|1996-06-03|NONE|FOB|s. ironic, even packages should h 12614|178895|8896|2|18|35530.02|0.08|0.07|N|O|1996-04-27|1996-04-12|1996-05-08|TAKE BACK RETURN|FOB|s by the idly ironic instru 12614|87523|5048|3|39|58910.28|0.10|0.06|N|O|1996-03-07|1996-04-20|1996-03-28|NONE|RAIL|ccounts across the blithely bol 12614|196119|3677|4|35|42528.85|0.01|0.01|N|O|1996-05-30|1996-05-27|1996-06-14|DELIVER IN PERSON|REG AIR| players haggle. special dinos sleep 12614|155234|7750|5|46|59304.58|0.03|0.00|N|O|1996-04-19|1996-04-25|1996-05-19|NONE|RAIL|out the furiously special re 12614|4089|6590|6|11|10923.88|0.09|0.04|N|O|1996-06-09|1996-04-15|1996-06-20|DELIVER IN PERSON|FOB|instructions. regular packages sleep ac 12615|140527|528|1|16|25080.32|0.05|0.05|A|F|1995-04-19|1995-05-18|1995-05-04|COLLECT COD|TRUCK|ously final packages d 12640|195843|882|1|21|40715.64|0.01|0.03|N|O|1998-07-31|1998-09-05|1998-08-05|NONE|TRUCK|uriously ironic deposit 12640|120115|116|2|33|37458.63|0.02|0.03|N|O|1998-09-26|1998-10-14|1998-10-24|NONE|RAIL|carefully quick platelets. 12641|35087|94|1|33|33728.64|0.03|0.05|A|F|1995-05-01|1995-05-14|1995-05-29|TAKE BACK RETURN|AIR|d instructions cajole sl 12641|94963|9982|2|7|13705.72|0.04|0.03|A|F|1995-04-30|1995-04-18|1995-05-24|COLLECT COD|MAIL|deposits haggle bold, 12641|41555|9068|3|24|35917.20|0.03|0.05|R|F|1995-04-01|1995-04-17|1995-04-04|DELIVER IN PERSON|FOB|luffily reg 12641|198862|1382|4|3|5882.58|0.07|0.03|R|F|1995-02-26|1995-05-06|1995-03-19|COLLECT COD|REG AIR|o the fluffil 12641|68162|3175|5|39|44076.24|0.04|0.03|A|F|1995-04-14|1995-03-28|1995-05-11|COLLECT COD|TRUCK|encies. reg 12641|194139|1697|6|46|56723.98|0.06|0.08|N|F|1995-06-17|1995-04-10|1995-07-11|TAKE BACK RETURN|SHIP| platelets. courts thras 12642|24569|7072|1|40|59742.40|0.04|0.01|R|F|1994-07-08|1994-07-03|1994-07-15|COLLECT COD|AIR|y. furiously quick pack 12642|121633|4146|2|13|21510.19|0.08|0.08|A|F|1994-08-23|1994-05-28|1994-09-20|TAKE BACK RETURN|SHIP| pending packages cajole slyly i 12642|74074|1596|3|1|1048.07|0.07|0.01|R|F|1994-06-01|1994-07-01|1994-06-11|COLLECT COD|RAIL|ts. regular, expr 12642|160876|877|4|37|71664.19|0.00|0.05|R|F|1994-07-07|1994-07-25|1994-07-19|DELIVER IN PERSON|RAIL|o beans boost furiously against the pint 12642|32032|9542|5|20|19280.60|0.01|0.04|R|F|1994-06-12|1994-05-29|1994-07-04|DELIVER IN PERSON|MAIL|. dependenci 12642|152981|8012|6|26|52883.48|0.03|0.07|A|F|1994-08-08|1994-07-13|1994-08-27|COLLECT COD|AIR|e the final pinto beans. packages hag 12642|140806|3321|7|50|92340.00|0.04|0.03|A|F|1994-06-09|1994-06-15|1994-06-17|COLLECT COD|RAIL|ckages. sile 12643|118903|8904|1|25|48047.50|0.03|0.00|A|F|1993-05-25|1993-06-13|1993-06-10|DELIVER IN PERSON|REG AIR|e carefully 12643|187258|4813|2|40|53810.00|0.09|0.06|R|F|1993-07-13|1993-05-04|1993-07-27|COLLECT COD|TRUCK|kages cajole blithely sly 12643|29063|4068|3|25|24801.50|0.07|0.06|R|F|1993-04-15|1993-04-26|1993-05-09|TAKE BACK RETURN|MAIL| requests. express p 12644|109752|7283|1|19|33473.25|0.09|0.04|A|F|1992-02-07|1992-03-31|1992-02-24|DELIVER IN PERSON|AIR|r furiously final packa 12644|192133|4653|2|39|47780.07|0.01|0.05|R|F|1992-04-21|1992-03-27|1992-04-30|COLLECT COD|FOB|t quickly after the ironic, final asymptote 12644|170803|8355|3|32|59961.60|0.06|0.05|A|F|1992-02-25|1992-04-07|1992-03-08|DELIVER IN PERSON|RAIL|osits detect against the quickly ironic 12644|72938|2939|4|5|9554.65|0.03|0.00|A|F|1992-04-23|1992-04-23|1992-04-25|COLLECT COD|FOB|ts boost at the ironic br 12645|112611|5123|1|17|27601.37|0.01|0.00|A|F|1994-08-25|1994-08-13|1994-09-20|COLLECT COD|RAIL|iet, bold requests wake. care 12645|44527|7032|2|34|50031.68|0.03|0.06|R|F|1994-09-07|1994-08-27|1994-09-12|DELIVER IN PERSON|RAIL|es dazzle carefully. quick, unusual p 12645|150594|8140|3|32|52626.88|0.10|0.02|A|F|1994-09-21|1994-08-06|1994-09-24|NONE|FOB|sly along the regular deposits. even, 12645|17146|2149|4|11|11694.54|0.01|0.06|A|F|1994-07-12|1994-09-20|1994-07-23|NONE|AIR|. blithely special theodolites according t 12645|162601|7634|5|50|83180.00|0.09|0.00|A|F|1994-11-01|1994-09-14|1994-11-16|DELIVER IN PERSON|MAIL|inal requests impress fluffily slyly ir 12645|29495|7002|6|8|11395.92|0.03|0.01|R|F|1994-10-08|1994-09-25|1994-10-27|TAKE BACK RETURN|AIR|bout the furiously regular depos 12645|122447|9984|7|32|47022.08|0.07|0.03|R|F|1994-10-20|1994-09-04|1994-11-19|TAKE BACK RETURN|AIR|theodolites slee 12646|157739|255|1|35|62885.55|0.07|0.06|A|F|1993-05-20|1993-04-08|1993-05-22|DELIVER IN PERSON|FOB|l requests. bold, bold instructions a 12646|113662|8685|2|29|48594.14|0.06|0.03|A|F|1993-03-02|1993-05-08|1993-03-15|COLLECT COD|TRUCK|nstructions. quickly express requ 12646|106505|6506|3|40|60460.00|0.05|0.00|R|F|1993-05-16|1993-04-07|1993-05-19|DELIVER IN PERSON|TRUCK|pinto beans sol 12647|30132|5139|1|32|33988.16|0.07|0.07|N|O|1997-06-02|1997-05-15|1997-06-10|NONE|RAIL|thely bold requests. ironic, even cour 12672|171515|6550|1|6|9519.06|0.01|0.05|R|F|1994-09-14|1994-08-30|1994-09-17|DELIVER IN PERSON|FOB|y ironic ideas. evenly special orbits int 12672|45406|2919|2|20|27028.00|0.02|0.02|A|F|1994-08-09|1994-08-16|1994-09-03|NONE|AIR|beans cajole blithel 12673|132103|4617|1|18|20431.80|0.07|0.07|R|F|1994-08-22|1994-08-31|1994-09-20|COLLECT COD|SHIP|ckages haggle c 12674|145220|7735|1|29|36691.38|0.08|0.00|A|F|1992-09-21|1992-08-31|1992-10-01|DELIVER IN PERSON|AIR|furiously unusu 12674|110135|7669|2|21|24047.73|0.08|0.06|R|F|1992-08-19|1992-09-06|1992-08-23|NONE|RAIL|c accounts are carefully 12674|22784|5287|3|21|35842.38|0.01|0.03|R|F|1992-10-09|1992-08-14|1992-10-20|TAKE BACK RETURN|RAIL| fluffy deposits. quickly bold excus 12674|114882|7394|4|9|17071.92|0.01|0.01|A|F|1992-08-19|1992-09-28|1992-09-13|TAKE BACK RETURN|MAIL|al accounts. furiously express pinto beans 12674|17757|5261|5|6|10048.50|0.00|0.03|R|F|1992-08-14|1992-10-01|1992-08-27|TAKE BACK RETURN|FOB|quickly special f 12675|77350|9858|1|17|22564.95|0.06|0.08|R|F|1994-02-03|1994-03-19|1994-02-12|NONE|FOB|ites. instructions use always accordin 12675|8375|5876|2|35|44917.95|0.08|0.01|A|F|1994-02-11|1994-03-08|1994-02-28|DELIVER IN PERSON|MAIL|de of the slyly bold accounts sleep ag 12675|150566|567|3|41|66278.96|0.05|0.07|A|F|1994-05-11|1994-03-06|1994-05-15|DELIVER IN PERSON|FOB|ly regular platelets haggle carefully reg 12675|160177|7726|4|26|32166.42|0.08|0.00|R|F|1994-05-10|1994-03-24|1994-06-06|TAKE BACK RETURN|AIR|ns. furiousl 12676|131174|1175|1|5|6025.85|0.03|0.00|A|F|1994-04-17|1994-03-10|1994-04-23|NONE|AIR|ly final theodolites. even instructi 12677|139233|1747|1|30|38166.90|0.10|0.03|N|O|1996-08-12|1996-08-12|1996-08-15|DELIVER IN PERSON|SHIP|onic platelets. carefully pending ideas abo 12677|76340|1355|2|13|17112.42|0.03|0.00|N|O|1996-08-17|1996-08-14|1996-08-22|TAKE BACK RETURN|FOB| carefully special requests detect. re 12677|39348|6858|3|21|27034.14|0.01|0.00|N|O|1996-06-28|1996-07-10|1996-07-15|TAKE BACK RETURN|AIR|bold pinto beans. ca 12677|107228|9739|4|40|49408.80|0.06|0.05|N|O|1996-09-07|1996-07-07|1996-09-08|NONE|MAIL|even asymptotes cajole. sly 12677|176826|9344|5|17|32347.94|0.05|0.03|N|O|1996-07-12|1996-08-07|1996-07-19|COLLECT COD|TRUCK|slyly ironic forges 12677|103217|748|6|41|50028.61|0.07|0.04|N|O|1996-07-07|1996-08-12|1996-07-19|COLLECT COD|MAIL|refully. slyly final instructions wake. fu 12678|145133|5134|1|10|11781.30|0.10|0.05|N|O|1998-05-13|1998-04-03|1998-06-10|TAKE BACK RETURN|FOB|ly bold multipliers. fluffil 12678|34105|1615|2|5|5195.50|0.10|0.03|N|O|1998-05-26|1998-04-22|1998-06-03|NONE|MAIL|ckages are slyly above the slyly bold i 12678|190553|5592|3|16|26296.80|0.03|0.03|N|O|1998-03-25|1998-04-26|1998-03-26|NONE|RAIL|ial deposits nag slyly af 12678|17932|5436|4|25|46248.25|0.00|0.06|N|O|1998-05-26|1998-04-18|1998-05-27|DELIVER IN PERSON|TRUCK|ackages could have to integr 12678|178827|6379|5|1|1905.82|0.01|0.05|N|O|1998-04-16|1998-03-25|1998-05-09|TAKE BACK RETURN|RAIL|ccounts maintain slyly about the q 12679|3866|1367|1|3|5309.58|0.10|0.06|N|O|1997-10-05|1997-08-24|1997-10-30|TAKE BACK RETURN|SHIP|ular, unusual ideas wake alongside of t 12679|159368|4399|2|50|71368.00|0.05|0.03|N|O|1997-07-04|1997-09-15|1997-07-20|DELIVER IN PERSON|SHIP|ccounts across 12704|134845|4846|1|21|39476.64|0.09|0.02|A|F|1993-02-20|1993-05-05|1993-03-09|COLLECT COD|RAIL|posits. regular deposits cajol 12704|133122|5636|2|30|34653.60|0.10|0.06|R|F|1993-04-18|1993-04-24|1993-05-08|TAKE BACK RETURN|SHIP| pending theo 12704|179274|9275|3|28|37891.56|0.07|0.03|A|F|1993-06-07|1993-03-21|1993-06-27|COLLECT COD|RAIL|riously ironic excuses. carefully ex 12704|43593|8602|4|11|16902.49|0.05|0.03|A|F|1993-02-14|1993-04-25|1993-02-19|NONE|MAIL|lly even pack 12704|64000|9013|5|16|15424.00|0.10|0.06|A|F|1993-04-29|1993-04-03|1993-05-08|TAKE BACK RETURN|RAIL| are slyly. carefully daring account 12705|136908|9422|1|40|77796.00|0.01|0.08|R|F|1995-01-05|1994-12-08|1995-01-30|TAKE BACK RETURN|SHIP| instructions. furiously unusual th 12705|3107|3108|2|24|24242.40|0.09|0.01|R|F|1994-10-15|1994-11-20|1994-10-22|TAKE BACK RETURN|FOB|- stealthily even requests 12706|190600|8158|1|32|54099.20|0.02|0.04|R|F|1995-01-04|1994-12-24|1995-01-16|NONE|RAIL|leep carefully. even 12706|3401|5902|2|41|53480.40|0.09|0.08|A|F|1995-02-24|1995-01-07|1995-03-16|NONE|MAIL|ss the final, even asympt 12706|148013|528|3|16|16976.16|0.02|0.02|A|F|1995-03-18|1994-12-27|1995-03-19|TAKE BACK RETURN|TRUCK|eposits around the express, special 12706|40264|7777|4|30|36127.80|0.06|0.05|A|F|1995-01-12|1995-01-16|1995-01-15|COLLECT COD|TRUCK|lly pending asymptote 12707|168678|6227|1|17|29693.39|0.09|0.06|R|F|1993-08-15|1993-07-21|1993-09-07|TAKE BACK RETURN|AIR|g carefully final theodolites. blit 12707|169022|9023|2|47|51277.94|0.07|0.02|A|F|1993-07-07|1993-07-25|1993-07-08|NONE|MAIL|requests. slyly ironic accounts about th 12707|150327|7873|3|11|15150.52|0.06|0.01|A|F|1993-07-31|1993-08-01|1993-08-21|COLLECT COD|REG AIR|roughout the dogg 12707|135662|689|4|35|59418.10|0.04|0.02|R|F|1993-08-04|1993-07-03|1993-08-29|COLLECT COD|RAIL|gle above the silent, bold dependenci 12707|62504|23|5|47|68925.50|0.04|0.06|A|F|1993-09-05|1993-08-01|1993-09-26|COLLECT COD|RAIL|efully regular platelets. fl 12707|96895|9405|6|13|24594.57|0.02|0.03|R|F|1993-08-08|1993-07-01|1993-08-10|COLLECT COD|REG AIR|ges. regular packages cajole furiously. c 12707|110996|6019|7|1|2006.99|0.07|0.06|A|F|1993-08-11|1993-07-31|1993-08-13|TAKE BACK RETURN|MAIL|y ironic accounts. blithely pending ideas i 12708|98516|6044|1|14|21203.14|0.02|0.05|R|F|1993-03-08|1993-02-24|1993-03-22|DELIVER IN PERSON|MAIL|iously pending deposits 12708|149517|9518|2|20|31330.20|0.04|0.05|A|F|1993-03-06|1993-03-03|1993-03-24|TAKE BACK RETURN|MAIL|ly unusual requests. quickly regular re 12708|128863|1376|3|49|92701.14|0.10|0.08|A|F|1993-01-10|1993-03-07|1993-01-26|DELIVER IN PERSON|MAIL|among the quickly r 12708|194950|4951|4|44|89977.80|0.04|0.08|A|F|1993-03-03|1993-03-07|1993-03-26|TAKE BACK RETURN|FOB| carefully ironic pin 12708|124533|2070|5|10|15575.30|0.06|0.06|R|F|1993-01-25|1993-03-21|1993-02-11|TAKE BACK RETURN|TRUCK|ly hockey players. slyly unusual dep 12708|21766|1767|6|7|11814.32|0.02|0.03|A|F|1993-02-03|1993-03-17|1993-02-15|TAKE BACK RETURN|TRUCK|sts. slyly even packages cajole closely. ca 12708|10380|381|7|50|64519.00|0.09|0.03|R|F|1993-03-11|1993-03-15|1993-04-03|TAKE BACK RETURN|RAIL|ages nag car 12709|55306|2822|1|17|21442.10|0.09|0.01|N|O|1996-08-01|1996-06-17|1996-08-21|COLLECT COD|REG AIR|daring instructions use. slyly unusual i 12709|121235|3748|2|49|61555.27|0.08|0.03|N|O|1996-07-06|1996-07-07|1996-07-18|DELIVER IN PERSON|FOB|s. slyly ironic de 12710|32123|7130|1|13|13716.56|0.04|0.06|A|F|1993-11-25|1993-10-07|1993-12-07|COLLECT COD|FOB| sleep blithely across the requests. even 12710|84517|2042|2|28|42042.28|0.03|0.04|R|F|1993-12-10|1993-11-07|1993-12-17|COLLECT COD|AIR| dependencies alongside of the f 12710|65338|7845|3|49|63863.17|0.10|0.03|R|F|1993-11-18|1993-11-06|1993-11-26|NONE|RAIL|to beans kindle blithel 12710|119919|7453|4|21|40717.11|0.09|0.03|A|F|1993-12-01|1993-11-14|1993-12-30|COLLECT COD|SHIP|accounts. furiously sp 12710|64654|7161|5|49|79313.85|0.06|0.04|A|F|1993-09-27|1993-09-24|1993-10-27|DELIVER IN PERSON|AIR|n accounts according to the ironic deposi 12710|23538|3539|6|41|59922.73|0.03|0.06|R|F|1993-12-16|1993-10-30|1994-01-13|TAKE BACK RETURN|REG AIR|ans nag carefully express 12710|193054|5574|7|14|16058.70|0.05|0.04|A|F|1993-12-06|1993-10-10|1994-01-01|TAKE BACK RETURN|SHIP|xcuses nag carefu 12711|53211|8222|1|9|10477.89|0.06|0.01|A|F|1992-06-20|1992-05-15|1992-06-22|COLLECT COD|AIR|ag slyly against the ironic 12711|113418|5930|2|48|68707.68|0.09|0.04|A|F|1992-06-11|1992-04-16|1992-06-27|TAKE BACK RETURN|RAIL|symptotes use across the 12711|7927|7928|3|17|31193.64|0.02|0.06|A|F|1992-06-01|1992-04-19|1992-06-11|DELIVER IN PERSON|SHIP|ges are furiously ironic ideas. furiously 12711|14804|2308|4|4|6875.20|0.09|0.07|A|F|1992-04-09|1992-03-27|1992-04-27|TAKE BACK RETURN|SHIP|ly unusual foxes use slyly across th 12736|44178|4179|1|26|29176.42|0.09|0.02|R|F|1993-04-28|1993-05-31|1993-05-06|NONE|MAIL|ke furiously furi 12736|93561|6071|2|33|51300.48|0.09|0.07|R|F|1993-05-31|1993-05-21|1993-06-22|NONE|AIR|nts solve fluffily about the caref 12736|72173|9695|3|18|20613.06|0.03|0.05|R|F|1993-04-20|1993-04-28|1993-04-26|DELIVER IN PERSON|REG AIR| wake. quickly st 12736|198662|3701|4|48|84511.68|0.10|0.07|A|F|1993-06-13|1993-04-15|1993-07-11|TAKE BACK RETURN|MAIL|s-- quickly special packages shall 12736|96428|6429|5|48|68372.16|0.10|0.06|R|F|1993-04-14|1993-04-10|1993-05-11|NONE|TRUCK|out the idle ideas. regular, even ideas ca 12736|52557|73|6|37|55853.35|0.01|0.00|R|F|1993-06-02|1993-05-28|1993-06-30|TAKE BACK RETURN|TRUCK|ly. carefully regular r 12737|58954|6470|1|3|5738.85|0.06|0.02|R|F|1994-05-12|1994-06-25|1994-05-18|NONE|MAIL|es are ironically carefu 12737|32159|4663|2|9|9820.35|0.02|0.07|R|F|1994-05-14|1994-07-03|1994-05-27|NONE|AIR|e quietly along the furi 12737|90506|5525|3|50|74825.00|0.08|0.07|A|F|1994-08-16|1994-07-16|1994-08-22|DELIVER IN PERSON|FOB|rash according to the slyly final instruc 12737|131043|3557|4|42|45109.68|0.03|0.04|R|F|1994-08-11|1994-07-27|1994-09-07|TAKE BACK RETURN|REG AIR|odolites. blithely regul 12737|35532|5533|5|50|73376.50|0.09|0.04|R|F|1994-06-09|1994-06-16|1994-06-21|COLLECT COD|RAIL|ts detect slyly slowly even 12737|177448|7449|6|22|33559.68|0.01|0.05|A|F|1994-08-08|1994-07-23|1994-08-12|TAKE BACK RETURN|FOB|into beans about the furiously eve 12737|164517|4518|7|45|71167.95|0.10|0.06|R|F|1994-05-26|1994-06-28|1994-06-07|TAKE BACK RETURN|REG AIR|nal pinto beans use along 12738|27684|187|1|11|17728.48|0.01|0.07|N|O|1998-07-12|1998-08-25|1998-08-08|TAKE BACK RETURN|RAIL|sual reque 12738|80209|7734|2|16|19027.20|0.02|0.03|N|O|1998-07-03|1998-08-03|1998-07-04|COLLECT COD|FOB|r platelets. final dependencies among th 12738|65353|5354|3|24|31640.40|0.03|0.06|N|O|1998-08-21|1998-08-31|1998-09-09|NONE|SHIP|de of the dep 12738|185193|2748|4|37|47293.03|0.10|0.06|N|O|1998-09-25|1998-07-31|1998-10-18|TAKE BACK RETURN|AIR|c foxes use according to t 12738|137983|497|5|32|64671.36|0.01|0.01|N|O|1998-09-04|1998-08-16|1998-10-02|COLLECT COD|FOB|s are quickly ent 12739|198723|6281|1|30|54651.60|0.08|0.08|A|F|1992-05-15|1992-05-10|1992-05-26|COLLECT COD|REG AIR| final requests serve blithely blithely 12740|62109|7122|1|41|43915.10|0.04|0.03|N|O|1997-08-19|1997-07-12|1997-09-09|DELIVER IN PERSON|RAIL| pending requests. regular depo 12741|132436|7463|1|41|60205.63|0.00|0.02|A|F|1992-10-17|1992-09-06|1992-11-11|TAKE BACK RETURN|MAIL|of the quickly regular foxes. 12741|49800|4809|2|27|47244.60|0.10|0.04|A|F|1992-09-10|1992-10-04|1992-10-06|DELIVER IN PERSON|AIR|usual accounts. e 12741|177611|5163|3|6|10131.66|0.06|0.06|A|F|1992-07-25|1992-09-16|1992-08-03|DELIVER IN PERSON|RAIL|wake even courts. f 12741|92129|9657|4|1|1121.12|0.05|0.07|A|F|1992-09-11|1992-08-24|1992-09-13|DELIVER IN PERSON|RAIL|ggle above the regular, ironic requests. as 12741|198842|3881|5|32|62106.88|0.01|0.07|R|F|1992-09-24|1992-08-20|1992-10-08|TAKE BACK RETURN|AIR| express theodolite 12742|78600|1108|1|22|34729.20|0.03|0.01|N|O|1997-08-15|1997-07-11|1997-08-20|TAKE BACK RETURN|AIR| integrate carefully pinto 12743|42944|5449|1|32|60382.08|0.05|0.00|A|F|1993-10-07|1993-09-06|1993-10-31|NONE|REG AIR|ies sleep carefully slyly special r 12743|144731|2274|2|11|19533.03|0.09|0.08|A|F|1993-07-24|1993-08-20|1993-08-04|COLLECT COD|AIR|aters are thinly pending 12743|80671|8196|3|22|36336.74|0.09|0.06|A|F|1993-08-23|1993-08-07|1993-09-22|TAKE BACK RETURN|SHIP|al theodolites. slyly even foxes atop 12743|33477|8484|4|9|12694.23|0.09|0.02|R|F|1993-08-07|1993-09-06|1993-08-11|COLLECT COD|AIR|kages boost slyly up the fin 12768|16354|6355|1|25|31758.75|0.06|0.06|R|F|1994-05-01|1994-06-01|1994-05-31|TAKE BACK RETURN|MAIL|ests cajole furiously pearls. carefully 12768|46995|2004|2|18|34955.82|0.03|0.08|R|F|1994-04-15|1994-06-09|1994-04-27|TAKE BACK RETURN|TRUCK|ccording to the unusual 12768|35810|817|3|17|29678.77|0.08|0.02|R|F|1994-05-26|1994-05-25|1994-06-05|NONE|RAIL|theodolites cajole b 12768|55066|77|4|21|21442.26|0.00|0.04|A|F|1994-07-15|1994-06-10|1994-08-02|COLLECT COD|FOB| of the regular pack 12768|80508|509|5|10|14885.00|0.06|0.07|R|F|1994-04-23|1994-05-10|1994-05-21|DELIVER IN PERSON|AIR|ly alongside o 12769|191206|1207|1|16|20755.20|0.07|0.08|R|F|1992-04-01|1992-03-24|1992-04-21|NONE|RAIL|ven Tiresias cajole qui 12770|45953|8458|1|3|5696.85|0.09|0.02|A|F|1993-07-22|1993-06-15|1993-08-04|NONE|RAIL|even pinto beans haggle regular, express a 12770|116655|4189|2|17|28418.05|0.02|0.08|R|F|1993-07-11|1993-07-11|1993-08-04|NONE|TRUCK|iously even instructions. furiously 12770|10139|5142|3|49|51407.37|0.05|0.04|A|F|1993-04-28|1993-05-20|1993-04-29|DELIVER IN PERSON|FOB|ly final pinto beans about 12770|190091|92|4|4|4724.36|0.01|0.02|A|F|1993-06-08|1993-05-18|1993-07-08|COLLECT COD|TRUCK| fluffily pend 12770|53014|8025|5|37|35779.37|0.10|0.06|A|F|1993-07-09|1993-07-04|1993-07-27|DELIVER IN PERSON|MAIL| even deposits wake bravely 12771|59508|2014|1|30|44025.00|0.08|0.06|R|F|1994-04-14|1994-03-03|1994-04-21|COLLECT COD|SHIP|s. furiously final requests use slyly blith 12771|148248|763|2|28|36294.72|0.09|0.06|A|F|1994-04-03|1994-03-09|1994-04-22|TAKE BACK RETURN|SHIP|ss pinto beans. slyly specia 12771|196779|9299|3|28|52521.56|0.01|0.07|R|F|1994-02-16|1994-03-31|1994-03-03|TAKE BACK RETURN|SHIP|ong the quickly furious instruct 12771|55018|2534|4|41|39893.41|0.06|0.03|A|F|1994-04-09|1994-02-21|1994-05-06|TAKE BACK RETURN|AIR|ole quickly. carefully final foxes use 12771|77197|2212|5|5|5870.95|0.02|0.08|A|F|1994-04-27|1994-04-06|1994-05-25|NONE|REG AIR|the blithely 12772|7826|2827|1|37|64151.34|0.09|0.02|R|F|1995-05-17|1995-07-03|1995-05-19|DELIVER IN PERSON|MAIL| furiously reg 12772|116247|6248|2|34|42950.16|0.07|0.00|N|O|1995-07-24|1995-08-07|1995-08-06|DELIVER IN PERSON|SHIP|ake slyly against the r 12772|160715|716|3|44|78131.24|0.02|0.03|N|O|1995-07-14|1995-08-06|1995-07-18|NONE|TRUCK|pendencies. even, enticing 12772|122044|9581|4|32|34113.28|0.01|0.04|R|F|1995-06-07|1995-06-16|1995-06-13|TAKE BACK RETURN|TRUCK|s. fluffily ironic pinto beans at 12772|178615|3650|5|30|50808.30|0.04|0.03|N|O|1995-07-15|1995-06-26|1995-07-21|NONE|SHIP|lly final packages engage carefully reg 12773|179116|9117|1|17|20316.87|0.06|0.08|R|F|1995-02-04|1995-04-02|1995-02-09|NONE|AIR| accounts nag sl 12773|189160|9161|2|39|48717.24|0.01|0.06|R|F|1995-02-25|1995-03-12|1995-03-27|COLLECT COD|TRUCK|. slyly bold requests 12773|197469|2508|3|38|59525.48|0.05|0.02|A|F|1995-03-18|1995-03-13|1995-03-24|NONE|TRUCK| requests. 12773|53351|867|4|16|20869.60|0.08|0.03|A|F|1995-03-09|1995-02-28|1995-03-13|TAKE BACK RETURN|RAIL|inly thin Tiresias. furiou 12773|65254|7761|5|41|49989.25|0.07|0.02|R|F|1995-03-31|1995-03-03|1995-04-18|NONE|SHIP|s; quickly ironic hockey players cajol 12773|123724|6237|6|23|40197.56|0.02|0.06|R|F|1995-04-01|1995-03-27|1995-04-11|TAKE BACK RETURN|REG AIR|kly furiously final theodolites. ironic d 12773|23092|3093|7|20|20301.80|0.03|0.07|R|F|1995-03-09|1995-03-26|1995-03-25|COLLECT COD|FOB|ckly regular packages. final 12774|105359|2890|1|16|21829.60|0.08|0.05|R|F|1993-01-09|1992-12-25|1993-01-19|DELIVER IN PERSON|FOB|refully boldly 12774|152390|2391|2|29|41829.31|0.04|0.04|A|F|1992-10-24|1992-11-27|1992-11-08|DELIVER IN PERSON|RAIL|ts are furiously: furiously even ac 12774|2572|73|3|5|7372.85|0.04|0.04|R|F|1993-02-07|1992-12-13|1993-02-28|DELIVER IN PERSON|TRUCK|ly bold deposits across t 12775|84317|4318|1|45|58558.95|0.02|0.00|N|O|1996-12-06|1996-12-07|1996-12-25|TAKE BACK RETURN|RAIL| integrate furiously along th 12775|103430|8451|2|9|12900.87|0.06|0.02|N|O|1996-10-24|1996-12-11|1996-10-31|DELIVER IN PERSON|AIR| express hockey players sleep fur 12775|137183|2210|3|33|40265.94|0.05|0.01|N|O|1996-11-09|1996-11-17|1996-11-25|COLLECT COD|TRUCK| unusual ideas. care 12775|33483|3484|4|2|2832.96|0.10|0.05|N|O|1996-10-16|1996-12-21|1996-10-28|NONE|TRUCK|lyly final excuses. carefully regular 12800|152806|7837|1|47|87363.60|0.01|0.06|R|F|1993-07-12|1993-07-23|1993-07-27|TAKE BACK RETURN|MAIL|equests. slyly regular accounts could 12800|193965|3966|2|43|88535.28|0.03|0.07|R|F|1993-09-19|1993-08-17|1993-10-01|DELIVER IN PERSON|TRUCK|s. blithely even foxes impress above 12800|163839|3840|3|27|51376.41|0.08|0.06|R|F|1993-06-12|1993-07-02|1993-06-19|DELIVER IN PERSON|REG AIR|icingly bold fo 12800|30986|987|4|24|46007.52|0.10|0.04|R|F|1993-07-20|1993-06-25|1993-08-07|COLLECT COD|FOB|itaphs. express, unusual instructions 12800|179110|9111|5|47|55888.17|0.02|0.05|A|F|1993-08-23|1993-08-02|1993-09-22|NONE|AIR|refully bold requests should 12801|42447|9960|1|42|58356.48|0.08|0.07|N|O|1997-08-30|1997-10-01|1997-09-03|COLLECT COD|AIR|old, final deposits 12801|6684|4185|2|1|1590.68|0.04|0.05|N|O|1997-09-10|1997-10-27|1997-10-01|COLLECT COD|SHIP|ve the express, final foxes. 12801|136092|1119|3|39|43995.51|0.03|0.03|N|O|1997-10-17|1997-11-15|1997-11-01|NONE|FOB|usly blithely final i 12801|93165|3166|4|45|52117.20|0.01|0.07|N|O|1997-09-13|1997-10-21|1997-09-18|DELIVER IN PERSON|RAIL|ong the final acc 12801|7371|4872|5|35|44742.95|0.02|0.01|N|O|1997-09-03|1997-10-03|1997-09-27|COLLECT COD|REG AIR|ously unus 12802|88993|4010|1|43|85225.57|0.02|0.02|N|O|1998-05-27|1998-05-17|1998-06-19|COLLECT COD|MAIL|s haggle furiously after th 12802|105348|369|2|28|37893.52|0.09|0.01|N|O|1998-03-29|1998-04-25|1998-04-03|TAKE BACK RETURN|REG AIR|he final ideas. blithely fi 12802|181975|9530|3|17|34968.49|0.04|0.04|N|O|1998-04-27|1998-04-06|1998-05-06|NONE|AIR|unts wake. even, final 12802|46158|6159|4|22|24291.30|0.02|0.04|N|O|1998-04-29|1998-05-26|1998-05-16|DELIVER IN PERSON|MAIL|. ironic deposits cajole fluffily 12802|174094|4095|5|44|51395.96|0.00|0.00|N|O|1998-06-29|1998-05-20|1998-07-16|DELIVER IN PERSON|SHIP|lyly bold deposits. 12803|4494|6995|1|16|22375.84|0.08|0.04|R|F|1993-03-30|1993-05-24|1993-04-18|TAKE BACK RETURN|AIR|leep across the furiously pending courts 12803|116025|1048|2|13|13533.26|0.10|0.04|A|F|1993-03-12|1993-05-24|1993-03-20|TAKE BACK RETURN|AIR|thogs cajole along the pe 12804|22965|472|1|50|94398.00|0.02|0.04|R|F|1992-09-17|1992-07-25|1992-10-13|TAKE BACK RETURN|MAIL|. express, special excuses doubt. carefully 12804|165842|875|2|24|45788.16|0.09|0.05|A|F|1992-07-08|1992-08-04|1992-07-15|NONE|AIR| ironic dolphins cajo 12804|98939|6467|3|40|77517.20|0.01|0.04|A|F|1992-08-19|1992-08-02|1992-09-04|TAKE BACK RETURN|SHIP|affix after the even, special account 12804|59949|4960|4|49|93538.06|0.04|0.06|A|F|1992-06-25|1992-08-12|1992-07-04|DELIVER IN PERSON|SHIP|thlessly acc 12804|155838|3384|5|19|35982.77|0.00|0.04|R|F|1992-08-13|1992-06-25|1992-09-01|NONE|MAIL|l asymptotes. final reques 12804|155724|755|6|46|81867.12|0.03|0.08|R|F|1992-07-22|1992-08-18|1992-08-01|DELIVER IN PERSON|RAIL|requests use around the even depo 12804|43149|8158|7|10|10921.40|0.06|0.02|R|F|1992-07-23|1992-07-09|1992-07-28|NONE|MAIL| foxes ought to ha 12805|190941|3461|1|9|18287.46|0.04|0.07|N|O|1997-02-13|1997-02-24|1997-02-15|COLLECT COD|FOB|nding, express courts 12805|135845|3385|2|24|45140.16|0.04|0.06|N|O|1997-02-16|1997-03-10|1997-02-22|NONE|REG AIR|e regular, regular packages. ex 12805|142936|5451|3|23|45515.39|0.09|0.03|N|O|1997-02-25|1997-02-13|1997-03-08|NONE|REG AIR|le quickly 12806|120141|7678|1|1|1161.14|0.00|0.06|N|O|1996-01-18|1996-02-21|1996-01-25|TAKE BACK RETURN|FOB|, special deposits nag after 12806|195941|8461|2|44|89625.36|0.07|0.07|N|O|1996-03-03|1996-01-25|1996-03-28|COLLECT COD|SHIP|kages. platelets dazzle c 12806|168953|6502|3|41|82899.95|0.04|0.03|N|O|1996-01-16|1996-03-19|1996-01-21|COLLECT COD|TRUCK| ironic platelets are 12806|177495|7496|4|14|22014.86|0.08|0.06|N|O|1995-12-30|1996-02-26|1996-01-06|TAKE BACK RETURN|TRUCK|ly regular requests. 12806|156267|6268|5|3|3969.78|0.00|0.03|N|O|1996-03-03|1996-02-02|1996-03-20|DELIVER IN PERSON|TRUCK|patterns cajole sly 12806|169099|6648|6|21|24529.89|0.01|0.03|N|O|1996-01-15|1996-02-19|1996-01-25|TAKE BACK RETURN|AIR|nusual ideas sleep furiously 12807|130759|8299|1|7|12528.25|0.10|0.01|N|O|1998-04-14|1998-03-20|1998-05-13|NONE|SHIP|s the deposits use of 12807|63106|3107|2|16|17105.60|0.00|0.08|N|O|1998-03-19|1998-04-07|1998-04-01|COLLECT COD|REG AIR|rate carefully regular theodolites. 12832|142761|2762|1|49|88384.24|0.02|0.04|A|F|1995-03-13|1995-03-04|1995-03-18|NONE|RAIL|ag. carefully final 12833|76248|6249|1|11|13466.64|0.02|0.01|N|O|1998-08-10|1998-08-01|1998-09-08|TAKE BACK RETURN|FOB|ges affix furiously final courts. 12833|95041|60|2|47|48693.88|0.07|0.04|N|O|1998-09-27|1998-08-12|1998-10-22|NONE|RAIL|about the 12834|143389|5904|1|28|40106.64|0.06|0.03|N|O|1996-12-26|1996-12-03|1997-01-03|TAKE BACK RETURN|SHIP|p carefully. care 12834|125756|781|2|41|73051.75|0.05|0.01|N|O|1997-01-30|1997-01-28|1997-02-27|NONE|FOB|s. foxes among 12834|171836|9388|3|37|70589.71|0.06|0.05|N|O|1996-11-15|1996-12-29|1996-12-11|TAKE BACK RETURN|RAIL|theodolites s 12834|41907|1908|4|18|33280.20|0.04|0.01|N|O|1996-12-12|1996-12-20|1996-12-19|TAKE BACK RETURN|MAIL|lar gifts. blithely even escapades acr 12834|116607|1630|5|16|25977.60|0.03|0.06|N|O|1997-02-21|1996-12-17|1997-03-17|DELIVER IN PERSON|SHIP|the blithely even reque 12834|4373|9374|6|34|43430.58|0.01|0.01|N|O|1996-12-06|1996-12-25|1996-12-17|TAKE BACK RETURN|REG AIR|sly unusual, regular packages. ide 12834|90805|3315|7|26|46690.80|0.01|0.07|N|O|1996-12-24|1996-12-26|1996-12-30|TAKE BACK RETURN|MAIL|. ironic deposits haggle quickly p 12835|155946|8462|1|1|2001.94|0.08|0.07|N|O|1997-01-27|1997-03-27|1997-01-30|DELIVER IN PERSON|SHIP|ole stealthily blit 12835|183026|3027|2|15|16635.30|0.04|0.00|N|O|1997-03-24|1997-03-05|1997-03-31|TAKE BACK RETURN|REG AIR| accounts use along the fluffy 12835|112232|7255|3|11|13686.53|0.03|0.03|N|O|1997-05-13|1997-04-20|1997-05-19|DELIVER IN PERSON|FOB|quickly ironic accounts. blithely si 12835|72688|7703|4|33|54802.44|0.08|0.06|N|O|1997-05-09|1997-03-25|1997-05-28|DELIVER IN PERSON|AIR|xes are furiously after the express acc 12835|60377|5390|5|5|6686.85|0.04|0.02|N|O|1997-02-24|1997-02-22|1997-03-25|TAKE BACK RETURN|MAIL|sits. carefully final pinto 12835|144081|1624|6|5|5625.40|0.00|0.03|N|O|1997-03-10|1997-04-06|1997-03-30|COLLECT COD|RAIL|ely regular asymptotes: regular, e 12835|157696|5242|7|28|49103.32|0.07|0.00|N|O|1997-05-20|1997-04-17|1997-06-01|DELIVER IN PERSON|TRUCK|s nag furiously. regular packag 12836|183156|711|1|1|1239.15|0.04|0.03|N|O|1996-05-14|1996-06-23|1996-05-20|TAKE BACK RETURN|AIR|hely pending asymptotes? 12836|100898|5919|2|48|91146.72|0.07|0.06|N|O|1996-06-13|1996-06-30|1996-06-28|COLLECT COD|SHIP| the ruthless requests. fina 12836|183448|1003|3|33|50537.52|0.03|0.00|N|O|1996-07-08|1996-05-21|1996-08-04|NONE|SHIP|e the foxes snooze carefu 12836|4424|9425|4|44|58450.48|0.06|0.01|N|O|1996-07-10|1996-05-18|1996-07-14|COLLECT COD|SHIP|dolphins haggle blithely sometimes 12836|137694|7695|5|19|32902.11|0.01|0.01|N|O|1996-04-21|1996-06-22|1996-05-08|COLLECT COD|RAIL| pinto beans alongsi 12836|7386|9887|6|21|27160.98|0.09|0.03|N|O|1996-07-08|1996-06-12|1996-07-11|NONE|REG AIR|ns. ironic accounts could h 12836|132699|2700|7|31|53682.39|0.04|0.07|N|O|1996-05-11|1996-05-27|1996-05-30|NONE|TRUCK|arefully even packages. bold, final pint 12837|59547|2053|1|14|21091.56|0.10|0.03|A|F|1993-01-09|1993-01-26|1993-01-15|DELIVER IN PERSON|RAIL|g the pending deposits are so 12837|2342|2343|2|38|47284.92|0.05|0.07|R|F|1993-01-11|1993-02-24|1993-01-16|TAKE BACK RETURN|MAIL| the never regular foxes haggle blithely 12837|146346|3889|3|8|11138.72|0.00|0.00|R|F|1993-01-22|1993-01-17|1993-02-17|NONE|REG AIR|ckly ironic deposits. 12837|23184|8189|4|44|48715.92|0.08|0.02|R|F|1993-02-08|1993-01-03|1993-02-13|TAKE BACK RETURN|FOB|gage. slyly bold pinto 12838|189852|7407|1|21|40778.85|0.10|0.07|N|O|1998-07-17|1998-08-28|1998-07-25|DELIVER IN PERSON|AIR|nes sleep furiously against the 12838|42278|9791|2|27|32947.29|0.10|0.05|N|O|1998-09-05|1998-09-18|1998-09-28|DELIVER IN PERSON|MAIL|ways even deposits. furio 12838|165410|443|3|24|35409.84|0.07|0.00|N|O|1998-10-21|1998-08-26|1998-11-10|TAKE BACK RETURN|RAIL|to the bold packages x-ray slyly near 12839|163996|6513|1|26|53559.74|0.05|0.01|N|O|1998-01-23|1998-03-12|1998-02-08|COLLECT COD|RAIL|gly bold accoun 12839|77385|4907|2|34|46320.92|0.02|0.02|N|O|1998-02-16|1998-03-11|1998-03-06|DELIVER IN PERSON|AIR|e furiously 12839|8947|1448|3|2|3711.88|0.01|0.08|N|O|1998-04-26|1998-03-31|1998-05-26|NONE|MAIL|ages are c 12839|165815|8332|4|21|39497.01|0.00|0.07|N|O|1998-02-12|1998-03-01|1998-02-22|DELIVER IN PERSON|TRUCK| fluffily above the final theodolit 12839|45786|5787|5|5|8658.90|0.10|0.04|N|O|1998-02-14|1998-02-19|1998-03-11|DELIVER IN PERSON|AIR|express accounts b 12839|116785|4319|6|37|66665.86|0.08|0.08|N|O|1998-03-14|1998-03-31|1998-04-09|NONE|SHIP|lar deposits cajole 12864|146165|6166|1|5|6055.80|0.03|0.02|R|F|1994-08-05|1994-06-23|1994-08-09|TAKE BACK RETURN|TRUCK|uctions. slyly special pearls wake 12864|193889|1447|2|43|85263.84|0.04|0.03|R|F|1994-07-12|1994-07-17|1994-08-04|COLLECT COD|MAIL|e carefully final pinto beans cajole 12864|78885|3900|3|34|63371.92|0.00|0.00|A|F|1994-06-07|1994-07-31|1994-06-28|TAKE BACK RETURN|REG AIR|asymptotes lose quickly 12865|153942|8973|1|33|65866.02|0.04|0.00|N|O|1997-06-28|1997-07-18|1997-07-27|COLLECT COD|FOB|ickly final pinto beans haggle furiously 12865|99278|4297|2|1|1277.27|0.01|0.01|N|O|1997-07-13|1997-08-18|1997-08-07|COLLECT COD|REG AIR|ly special instructions about the qu 12865|161828|4345|3|50|94491.00|0.04|0.00|N|O|1997-09-17|1997-07-02|1997-10-16|NONE|AIR|uriously express dolphins haggle bli 12865|43084|8093|4|21|21568.68|0.08|0.02|N|O|1997-06-11|1997-07-07|1997-07-06|NONE|SHIP|riously pending theodolites. quickly 12865|97514|5042|5|32|48368.32|0.00|0.05|N|O|1997-09-08|1997-08-04|1997-09-11|DELIVER IN PERSON|MAIL| regularly pending inst 12865|112541|7564|6|2|3107.08|0.10|0.02|N|O|1997-08-13|1997-07-05|1997-08-16|NONE|REG AIR|uests use car 12865|70009|2517|7|21|20559.00|0.08|0.06|N|O|1997-07-03|1997-08-05|1997-07-18|COLLECT COD|MAIL|hinder carefully special deposits. caref 12866|37959|463|1|6|11381.70|0.02|0.06|N|O|1997-02-06|1997-04-10|1997-02-07|COLLECT COD|MAIL|eposits. regular Tire 12866|166355|3904|2|32|45483.20|0.00|0.07|N|O|1997-05-11|1997-04-10|1997-06-05|COLLECT COD|MAIL|bove the furiousl 12866|111733|9267|3|35|61065.55|0.06|0.01|N|O|1997-04-27|1997-04-07|1997-05-26|DELIVER IN PERSON|FOB|y even accounts thr 12866|91196|1197|4|9|10684.71|0.01|0.02|N|O|1997-04-11|1997-02-20|1997-05-09|DELIVER IN PERSON|SHIP|olites. care 12866|123228|5741|5|17|21270.74|0.06|0.07|N|O|1997-02-05|1997-03-10|1997-02-25|DELIVER IN PERSON|FOB|y furiously ironic deposits. unusual 12866|87504|2521|6|8|11932.00|0.05|0.05|N|O|1997-04-04|1997-03-18|1997-05-04|TAKE BACK RETURN|AIR|t. blithely special ideas wake. slyly 12866|63645|6152|7|12|19303.68|0.00|0.08|N|O|1997-05-04|1997-03-02|1997-05-28|TAKE BACK RETURN|SHIP|sheaves wake fluffi 12867|72148|7163|1|14|15681.96|0.02|0.04|A|F|1995-04-20|1995-05-29|1995-05-09|DELIVER IN PERSON|AIR|ts. furiously regular instructio 12867|183275|830|2|45|61122.15|0.10|0.08|N|F|1995-06-10|1995-05-02|1995-07-04|TAKE BACK RETURN|SHIP|final tithes. s 12867|102014|4525|3|48|48768.48|0.00|0.06|A|F|1995-06-06|1995-06-08|1995-06-13|NONE|REG AIR|quickly brave 12867|148961|3990|4|20|40199.20|0.07|0.03|R|F|1995-04-15|1995-04-15|1995-04-29|DELIVER IN PERSON|SHIP|atelets wake fluffily after th 12867|173650|8685|5|15|25854.75|0.06|0.06|A|F|1995-05-17|1995-06-07|1995-06-10|TAKE BACK RETURN|RAIL|ccording to the furio 12868|188092|8093|1|10|11800.90|0.02|0.01|R|F|1995-03-12|1995-04-08|1995-03-29|NONE|MAIL|nal ideas. furiousl 12868|50423|5434|2|46|63177.32|0.00|0.03|A|F|1995-05-30|1995-05-17|1995-06-15|COLLECT COD|MAIL|cajole. carefully even packages integra 12868|42933|2934|3|32|60029.76|0.00|0.04|N|O|1995-06-21|1995-05-28|1995-07-04|COLLECT COD|RAIL| outside the furiously bold courts. packa 12868|165058|5059|4|45|50537.25|0.06|0.05|A|F|1995-04-08|1995-05-31|1995-04-12|DELIVER IN PERSON|MAIL|nal, ironic ideas use blith 12868|115054|7566|5|3|3207.15|0.07|0.04|N|F|1995-06-10|1995-04-30|1995-07-07|COLLECT COD|MAIL|efully about the car 12868|99853|2363|6|15|27792.75|0.02|0.04|R|F|1995-04-11|1995-04-26|1995-04-12|DELIVER IN PERSON|TRUCK|y final accounts lose regular 12869|143673|6188|1|35|60083.45|0.06|0.07|A|F|1994-05-23|1994-03-12|1994-06-12|TAKE BACK RETURN|FOB|e slyly. pending foxes along the carefull 12869|98221|3240|2|44|53645.68|0.05|0.07|R|F|1994-05-15|1994-04-08|1994-05-19|TAKE BACK RETURN|TRUCK|ckages according to the f 12870|33602|1112|1|39|59888.40|0.01|0.08|A|F|1993-10-22|1993-09-21|1993-11-02|NONE|AIR|ilent ideas. quickly special asympt 12871|14151|1655|1|33|35149.95|0.00|0.02|N|O|1995-11-29|1995-12-04|1995-12-28|COLLECT COD|FOB|posits. furiously daring foxes boost 12871|31717|6724|2|50|82435.50|0.09|0.02|N|O|1996-01-13|1995-12-22|1996-01-18|TAKE BACK RETURN|MAIL| snooze quic 12871|53443|3444|3|29|40496.76|0.08|0.04|N|O|1995-10-17|1995-12-12|1995-10-23|COLLECT COD|REG AIR|ajole alongside of the furiously regular 12871|788|789|4|15|25331.70|0.07|0.02|N|O|1995-11-29|1995-11-09|1995-12-18|TAKE BACK RETURN|MAIL|l packages. grouches use c 12871|37758|2765|5|25|42393.75|0.02|0.03|N|O|1995-11-29|1995-12-03|1995-12-07|COLLECT COD|RAIL|olites sleep carefully blithely 12871|187307|4862|6|10|13943.00|0.00|0.03|N|O|1996-01-26|1995-12-24|1996-02-02|COLLECT COD|MAIL|totes. slyly p 12896|84370|9387|1|47|63655.39|0.08|0.00|N|O|1997-04-18|1997-02-18|1997-04-30|DELIVER IN PERSON|REG AIR|eposits are quickly. eve 12896|194930|7450|2|16|32398.88|0.01|0.02|N|O|1997-01-01|1997-02-27|1997-01-16|NONE|RAIL|its sleep f 12896|180323|2842|3|42|58939.44|0.09|0.07|N|O|1996-12-26|1997-02-03|1997-01-04|COLLECT COD|TRUCK|r escapades alongs 12896|84613|2138|4|48|76685.28|0.01|0.00|N|O|1997-01-30|1997-03-13|1997-02-08|DELIVER IN PERSON|MAIL| final grouches boost 12896|83826|6335|5|10|18098.20|0.09|0.08|N|O|1997-04-14|1997-02-08|1997-04-27|COLLECT COD|AIR|xes nag carefully. theodolites are 12896|55584|3100|6|11|16935.38|0.03|0.03|N|O|1997-03-13|1997-02-13|1997-04-09|COLLECT COD|MAIL|against the deposits doze 12896|191920|4440|7|17|34202.64|0.06|0.04|N|O|1997-01-21|1997-02-03|1997-02-10|DELIVER IN PERSON|RAIL|th the doggedly regular foxes. iron 12897|195630|5631|1|32|55220.16|0.10|0.02|N|O|1995-08-08|1995-08-29|1995-08-22|NONE|RAIL|the furiously regular accoun 12897|138091|8092|2|27|30485.43|0.06|0.01|N|O|1995-11-04|1995-09-27|1995-11-27|TAKE BACK RETURN|FOB|g from the slyly silent accounts. packages 12897|81568|4077|3|19|29441.64|0.03|0.06|N|O|1995-08-02|1995-09-17|1995-08-15|TAKE BACK RETURN|FOB|nstructions wake. fu 12897|111824|1825|4|4|7343.28|0.02|0.00|N|O|1995-10-20|1995-08-29|1995-10-29|DELIVER IN PERSON|TRUCK|ully final packages h 12897|24056|4057|5|2|1960.10|0.00|0.03|N|O|1995-09-27|1995-09-07|1995-10-01|COLLECT COD|AIR|refully ironic theodolites. qui 12897|103953|8974|6|29|56751.55|0.08|0.03|N|O|1995-08-17|1995-10-13|1995-08-22|TAKE BACK RETURN|MAIL|lent instructions boost carefully sp 12898|144643|2186|1|32|54004.48|0.01|0.05|R|F|1994-11-05|1994-12-06|1994-11-27|COLLECT COD|MAIL|osits affix quickly fluffily regul 12899|32847|357|1|18|32037.12|0.10|0.01|N|O|1996-04-09|1996-06-22|1996-05-03|COLLECT COD|AIR|ffily even platelets sleep bl 12899|47659|5172|2|5|8033.25|0.04|0.07|N|O|1996-07-03|1996-05-28|1996-07-24|TAKE BACK RETURN|SHIP|ic platelets. requests integrate furi 12899|193568|3569|3|7|11630.92|0.03|0.07|N|O|1996-04-12|1996-06-30|1996-04-26|TAKE BACK RETURN|MAIL|press asymptotes sle 12900|182705|5224|1|11|19664.70|0.03|0.05|R|F|1993-05-20|1993-07-27|1993-05-28|NONE|REG AIR|iously regular accounts. express platel 12900|119564|9565|2|19|30087.64|0.00|0.05|R|F|1993-07-08|1993-06-19|1993-07-25|NONE|AIR| pinto beans cajole alongside of the quickl 12901|163222|771|1|33|42412.26|0.04|0.06|R|F|1994-07-09|1994-07-30|1994-07-30|DELIVER IN PERSON|SHIP|ect furiousl 12901|60894|8413|2|21|38952.69|0.09|0.08|A|F|1994-06-16|1994-07-27|1994-06-26|NONE|AIR|arefully bold accounts 12901|31679|6686|3|39|62816.13|0.07|0.05|A|F|1994-08-10|1994-07-25|1994-08-13|NONE|RAIL|ests. slyl 12901|27122|2127|4|46|48259.52|0.04|0.08|R|F|1994-06-30|1994-07-06|1994-07-06|NONE|FOB|st about the unusual 12901|26216|6217|5|42|47972.82|0.00|0.05|R|F|1994-06-27|1994-07-25|1994-07-21|NONE|RAIL|ructions caj 12901|10639|3141|6|43|66634.09|0.03|0.08|A|F|1994-09-03|1994-06-16|1994-09-19|TAKE BACK RETURN|TRUCK|according to the furiously ir 12901|193200|5720|7|12|15518.40|0.07|0.01|A|F|1994-08-06|1994-07-06|1994-08-08|COLLECT COD|RAIL|anent ideas use fu 12902|97534|7535|1|50|76576.50|0.09|0.04|R|F|1993-11-27|1993-12-26|1993-12-06|TAKE BACK RETURN|REG AIR|unts haggle furio 12902|37566|2573|2|15|22553.40|0.05|0.02|A|F|1994-01-28|1993-12-05|1994-02-17|COLLECT COD|AIR|s. even foxes boost carefull 12902|155428|5429|3|24|35602.08|0.05|0.04|A|F|1993-10-31|1993-12-14|1993-11-16|NONE|SHIP|after the quickl 12902|73626|3627|4|37|59185.94|0.04|0.00|R|F|1993-12-06|1993-12-05|1993-12-15|DELIVER IN PERSON|FOB|s. carefully regular requests b 12902|31143|1144|5|26|27927.64|0.10|0.05|A|F|1993-12-31|1993-12-18|1994-01-26|NONE|FOB|ly. never special 12902|15443|2947|6|40|54337.60|0.08|0.00|R|F|1993-11-15|1994-01-09|1993-11-26|COLLECT COD|AIR| across th 12902|134829|2369|7|44|82008.08|0.08|0.04|R|F|1993-12-14|1993-12-26|1994-01-02|COLLECT COD|MAIL|ites. special, regular ideas across the 12903|57913|419|1|20|37418.20|0.05|0.02|R|F|1994-11-04|1994-11-13|1994-11-19|DELIVER IN PERSON|REG AIR|ar, furious hockey play 12903|131876|9416|2|20|38157.40|0.05|0.02|R|F|1994-10-30|1994-11-24|1994-11-16|DELIVER IN PERSON|REG AIR|uffily. fluffily ironic packages nag. sly 12903|35801|3311|3|29|50367.20|0.03|0.01|R|F|1994-12-15|1994-11-30|1995-01-14|DELIVER IN PERSON|TRUCK| requests print furiously above th 12903|107301|2322|4|1|1308.30|0.09|0.08|R|F|1994-11-20|1994-11-06|1994-12-07|NONE|AIR| pending packages across the final, final 12928|132657|5171|1|28|47310.20|0.00|0.03|R|F|1994-04-05|1994-06-06|1994-04-21|NONE|SHIP|s the quickly furious ideas. ironic, ironic 12928|160901|3418|2|2|3923.80|0.04|0.08|A|F|1994-04-26|1994-05-01|1994-05-11|NONE|RAIL| ironic accoun 12928|60762|8281|3|48|82692.48|0.01|0.04|R|F|1994-06-06|1994-05-17|1994-06-10|NONE|MAIL|ctions sleep quickly along the as 12928|19323|9324|4|8|9938.56|0.00|0.04|R|F|1994-05-02|1994-06-09|1994-05-06|NONE|FOB|ns sleep regular accounts. fluffily regu 12928|151477|1478|5|33|50439.51|0.04|0.06|R|F|1994-07-20|1994-05-25|1994-08-14|COLLECT COD|TRUCK|leep fluffily slyly ironic requests. 12928|64859|7366|6|29|52891.65|0.02|0.02|A|F|1994-07-27|1994-06-04|1994-08-11|COLLECT COD|FOB|gular deposits. regular 12928|83076|8093|7|27|28594.89|0.10|0.03|A|F|1994-05-24|1994-05-09|1994-06-13|COLLECT COD|RAIL| around the final deposits. furi 12929|57081|7082|1|40|41523.20|0.02|0.02|A|F|1993-10-11|1993-11-22|1993-11-02|TAKE BACK RETURN|AIR|ages. regu 12929|158534|8535|2|7|11147.71|0.06|0.04|A|F|1993-10-04|1993-10-02|1993-10-11|COLLECT COD|AIR|gular deposits use deposits. b 12929|74863|9878|3|34|62487.24|0.04|0.04|A|F|1993-12-23|1993-10-23|1994-01-18|TAKE BACK RETURN|AIR|sly against the regular 12929|13037|5539|4|13|12350.39|0.03|0.04|A|F|1993-11-11|1993-10-09|1993-11-16|DELIVER IN PERSON|SHIP|ke regular, close ideas. 12930|18656|3659|1|10|15746.50|0.04|0.04|N|O|1995-12-27|1996-01-08|1996-01-18|NONE|AIR|ven packages are furiously orbits. ca 12931|181733|6770|1|4|7258.92|0.08|0.03|R|F|1992-06-11|1992-06-28|1992-06-23|NONE|TRUCK| epitaphs across the blithely even pin 12931|182324|9879|2|24|33751.68|0.04|0.06|A|F|1992-05-27|1992-07-17|1992-06-18|COLLECT COD|AIR| cajole blithely. pending, pendin 12931|109139|4160|3|24|27555.12|0.09|0.08|R|F|1992-05-22|1992-06-25|1992-06-19|DELIVER IN PERSON|MAIL|s eat express, ironic packages. blithely 12932|161614|6647|1|44|73726.84|0.05|0.07|N|O|1997-10-17|1997-11-19|1997-10-23|COLLECT COD|REG AIR|eodolites cajole pending, iro 12932|40155|2660|2|50|54757.50|0.07|0.06|N|O|1997-12-16|1997-11-20|1997-12-30|DELIVER IN PERSON|AIR| excuses. regular asymptotes 12932|77055|4577|3|5|5160.25|0.02|0.02|N|O|1997-09-07|1997-10-15|1997-09-26|NONE|TRUCK|olites. slyly special packages integrate 12932|59868|4879|4|7|12795.02|0.00|0.03|N|O|1997-09-08|1997-11-12|1997-10-01|COLLECT COD|FOB|ar ideas poach b 12933|165120|7637|1|3|3555.36|0.00|0.03|N|O|1998-06-07|1998-04-05|1998-06-11|DELIVER IN PERSON|TRUCK|s among the fur 12933|159414|6960|2|27|39782.07|0.07|0.06|N|O|1998-05-24|1998-03-26|1998-06-05|TAKE BACK RETURN|RAIL|furiously final foxes cajole 12933|60149|150|3|17|18855.38|0.04|0.03|N|O|1998-03-17|1998-05-09|1998-03-22|COLLECT COD|RAIL|lly unusual dependenc 12933|109191|9192|4|30|36005.70|0.00|0.05|N|O|1998-03-06|1998-04-13|1998-03-12|DELIVER IN PERSON|TRUCK| quickly ironic 12933|167657|2690|5|50|86232.50|0.07|0.01|N|O|1998-05-29|1998-05-16|1998-06-22|COLLECT COD|REG AIR|uffily. daringly pending packages c 12933|197702|7703|6|31|55790.70|0.09|0.06|N|O|1998-03-19|1998-05-07|1998-04-02|TAKE BACK RETURN|AIR|ronic dependencies. carefully regular hocke 12933|170856|5891|7|14|26975.90|0.04|0.00|N|O|1998-05-15|1998-05-07|1998-05-25|DELIVER IN PERSON|RAIL| accounts among the special deposits 12934|168185|8186|1|37|46367.66|0.10|0.02|A|F|1994-03-15|1994-05-31|1994-04-13|TAKE BACK RETURN|FOB|e stealthy, express epit 12934|78960|6482|2|34|65924.64|0.01|0.07|R|F|1994-07-04|1994-05-16|1994-07-26|DELIVER IN PERSON|REG AIR|uring the final theodolites. fur 12934|63927|3928|3|5|9454.60|0.03|0.01|R|F|1994-05-20|1994-05-28|1994-06-04|COLLECT COD|RAIL|ly unusual deposits alongside of the slyl 12934|11580|6583|4|7|10441.06|0.07|0.04|R|F|1994-06-20|1994-05-10|1994-07-13|TAKE BACK RETURN|SHIP|quickly unusu 12934|11024|1025|5|13|12155.26|0.02|0.07|A|F|1994-04-25|1994-05-06|1994-05-21|DELIVER IN PERSON|AIR| final requests cajole bold, silent reque 12934|79308|9309|6|50|64365.00|0.03|0.00|A|F|1994-06-16|1994-05-06|1994-06-23|TAKE BACK RETURN|MAIL|uthless deposits wake slyly. care 12934|118380|5914|7|47|65723.86|0.06|0.01|A|F|1994-04-22|1994-05-25|1994-05-16|NONE|SHIP|pecial instructions nag s 12935|1698|1699|1|9|14397.21|0.10|0.01|A|F|1994-08-10|1994-07-01|1994-08-20|TAKE BACK RETURN|MAIL|special asymptotes sleep furiously. c 12935|127450|2475|2|4|5909.80|0.01|0.00|A|F|1994-05-27|1994-08-05|1994-06-13|DELIVER IN PERSON|FOB|jole above the regular, regular gifts 12935|41292|6301|3|3|3699.87|0.00|0.02|R|F|1994-06-01|1994-07-15|1994-06-22|TAKE BACK RETURN|REG AIR|ons. blithely final instructions print furi 12935|188188|3225|4|31|39561.58|0.10|0.07|A|F|1994-08-23|1994-07-07|1994-08-29|NONE|SHIP|ts cajole blithely against 12960|30203|204|1|38|43061.60|0.02|0.04|A|F|1993-10-26|1994-01-08|1993-11-14|NONE|AIR|structions 12960|12622|2623|2|32|49107.84|0.06|0.07|A|F|1994-02-08|1993-12-19|1994-02-21|COLLECT COD|REG AIR|s detect blithely. slyly careful 12960|137302|2329|3|8|10714.40|0.01|0.02|R|F|1994-01-05|1993-11-27|1994-02-01|DELIVER IN PERSON|AIR|its cajole. special, bold requests 12960|23622|6125|4|38|58733.56|0.08|0.06|A|F|1993-12-04|1993-11-18|1993-12-21|TAKE BACK RETURN|SHIP| quiet requ 12961|15645|8147|1|43|67107.52|0.02|0.01|A|F|1995-04-08|1995-01-31|1995-05-01|COLLECT COD|REG AIR|ilent pinto beans cajole fluffily regul 12961|137203|7204|2|15|18603.00|0.04|0.01|A|F|1995-02-16|1995-01-30|1995-03-04|COLLECT COD|TRUCK| pending requests ab 12961|169935|9936|3|22|44108.46|0.04|0.02|R|F|1995-03-25|1995-02-06|1995-04-17|NONE|MAIL|blithely i 12961|95926|3454|4|20|38438.40|0.07|0.03|R|F|1995-01-10|1995-01-30|1995-01-29|NONE|AIR|ges alongside of the accounts 12962|161802|6835|1|25|46595.00|0.10|0.07|N|O|1995-10-30|1996-01-26|1995-11-03|NONE|RAIL|ut the blithely express braids. slyly fi 12962|193072|630|2|16|18641.12|0.07|0.04|N|O|1995-12-29|1995-11-30|1996-01-06|NONE|AIR|olites haggle. inst 12962|153619|1165|3|43|71922.23|0.00|0.06|N|O|1996-02-04|1996-01-16|1996-03-01|NONE|TRUCK|nts haggle a 12963|74528|4529|1|42|63105.84|0.00|0.05|N|O|1998-08-27|1998-08-23|1998-09-01|COLLECT COD|TRUCK|d packages affix quickly quickly u 12964|26402|1407|1|7|9298.80|0.05|0.04|N|O|1998-03-31|1998-03-30|1998-04-06|TAKE BACK RETURN|FOB|uriously even pinto beans 12964|137023|7024|2|11|11660.22|0.02|0.07|N|O|1998-05-08|1998-03-27|1998-06-07|NONE|TRUCK|y. regular deposits among the blithely cl 12964|130354|355|3|14|19380.90|0.03|0.00|N|O|1998-05-11|1998-02-26|1998-06-03|DELIVER IN PERSON|REG AIR|pinto beans after the blit 12964|44380|9389|4|33|43704.54|0.07|0.08|N|O|1998-01-30|1998-03-19|1998-02-12|TAKE BACK RETURN|MAIL|latelets. qui 12964|88465|974|5|32|46510.72|0.05|0.07|N|O|1998-03-14|1998-04-02|1998-03-24|DELIVER IN PERSON|AIR|r the furiousl 12964|18223|725|6|28|31954.16|0.02|0.07|N|O|1998-02-26|1998-02-22|1998-03-01|DELIVER IN PERSON|MAIL|gh the regular accounts. 12964|61871|6884|7|38|69649.06|0.10|0.05|N|O|1998-02-02|1998-03-11|1998-02-09|TAKE BACK RETURN|RAIL|riously final deposits; theodoli 12965|49893|9894|1|15|27643.35|0.00|0.08|A|F|1993-08-17|1993-08-30|1993-09-12|TAKE BACK RETURN|FOB|counts are furiously 12965|146161|3704|2|34|41043.44|0.03|0.08|R|F|1993-09-10|1993-09-07|1993-09-21|COLLECT COD|TRUCK| regular requests. bold, 12965|111475|3987|3|35|52026.45|0.02|0.03|R|F|1993-06-29|1993-09-19|1993-07-05|COLLECT COD|FOB|even requests haggle quietly spe 12966|46301|3814|1|35|43655.50|0.02|0.07|N|O|1997-12-15|1997-12-11|1997-12-21|COLLECT COD|AIR|ly furiously regular depo 12966|164725|7242|2|1|1789.72|0.05|0.04|N|O|1997-12-28|1997-12-21|1998-01-01|TAKE BACK RETURN|AIR|uickly furious 12967|38722|8723|1|8|13285.76|0.07|0.02|R|F|1993-10-01|1993-12-03|1993-10-02|TAKE BACK RETURN|FOB|the carefully regular accounts 12992|1515|4016|1|32|45328.32|0.01|0.02|N|O|1997-04-09|1997-04-13|1997-04-18|COLLECT COD|RAIL|ic deposits. quick 12992|61340|6353|2|6|7808.04|0.06|0.08|N|O|1997-04-04|1997-04-29|1997-04-17|DELIVER IN PERSON|RAIL|atelets about the slyly pendin 12992|129260|4285|3|32|41256.32|0.03|0.01|N|O|1997-05-01|1997-04-03|1997-05-19|NONE|RAIL|pecial ideas. blithely final accounts are b 12992|60170|2677|4|26|29384.42|0.01|0.02|N|O|1997-06-15|1997-03-29|1997-06-17|DELIVER IN PERSON|MAIL|tes haggle b 12992|66584|6585|5|44|68225.52|0.03|0.00|N|O|1997-03-14|1997-04-21|1997-03-31|NONE|REG AIR|oxes. carefully bold grouches 12992|67060|4579|6|10|10270.60|0.00|0.05|N|O|1997-05-31|1997-05-04|1997-06-22|NONE|MAIL|lyly express packages. bold, final foxe 12993|120860|5885|1|7|13166.02|0.08|0.06|N|O|1998-07-09|1998-07-21|1998-08-04|COLLECT COD|FOB|er the deposi 12993|90635|636|2|22|35763.86|0.05|0.04|N|O|1998-08-03|1998-07-14|1998-08-29|NONE|AIR|never. carefully ironic platelets 12994|17449|7450|1|48|65589.12|0.00|0.05|A|F|1995-03-12|1995-02-07|1995-03-21|NONE|AIR|long the pending, bold excuses. pending 12995|101041|6062|1|46|47933.84|0.03|0.00|N|O|1996-08-02|1996-09-14|1996-09-01|DELIVER IN PERSON|SHIP|fily along the ironic platelets. exc 12995|182578|5097|2|14|23247.98|0.01|0.08|N|O|1996-08-20|1996-09-04|1996-09-01|NONE|MAIL|nts haggle bli 12995|176767|1802|3|31|57156.56|0.01|0.02|N|O|1996-10-19|1996-10-17|1996-10-30|DELIVER IN PERSON|MAIL|blithely ironic asymptotes nag quickly 12995|117823|2846|4|33|60747.06|0.09|0.00|N|O|1996-10-20|1996-09-27|1996-11-18|NONE|FOB|s cajole. furiously ironic theodolites 12995|36887|1894|5|6|10943.28|0.10|0.08|N|O|1996-10-13|1996-09-01|1996-10-19|TAKE BACK RETURN|MAIL|snooze alongside of t 12995|145831|5832|6|30|56304.90|0.01|0.04|N|O|1996-09-23|1996-10-10|1996-09-30|COLLECT COD|SHIP| cajole fluffily express requests. bold, r 12996|65992|1005|1|8|15663.92|0.09|0.03|N|O|1996-02-19|1996-02-26|1996-03-19|TAKE BACK RETURN|TRUCK|express, ironic 12996|123659|1196|2|6|10095.90|0.08|0.01|N|O|1996-02-16|1996-02-03|1996-02-28|NONE|RAIL|y above the always ironic instructions. re 12996|132333|2334|3|49|66901.17|0.04|0.03|N|O|1995-12-25|1996-02-12|1996-01-08|NONE|SHIP| nod above the carefully final 12996|110133|7667|4|30|34293.90|0.02|0.02|N|O|1996-04-04|1996-01-14|1996-04-28|COLLECT COD|FOB|sts haggle slowly e 12997|129157|6694|1|6|7116.90|0.05|0.05|N|O|1996-06-06|1996-08-07|1996-06-30|NONE|RAIL|eposits. sly 12997|130248|249|2|7|8947.68|0.04|0.06|N|O|1996-07-07|1996-06-27|1996-07-08|NONE|REG AIR|eas cajole enticingly? even requests after 12997|69583|2090|3|5|7762.90|0.05|0.00|N|O|1996-08-18|1996-08-10|1996-09-12|NONE|AIR| accounts wake deposits. request 12998|64934|4935|1|2|3797.86|0.04|0.08|N|O|1997-01-25|1996-11-29|1997-02-04|COLLECT COD|AIR|cial packages boost. regula 12998|137994|7995|2|18|36575.82|0.07|0.06|N|O|1996-10-23|1996-11-23|1996-10-27|COLLECT COD|REG AIR|efully. slyly even platelets cajole 12999|189835|2354|1|45|86617.35|0.10|0.05|A|F|1993-07-09|1993-06-23|1993-08-04|TAKE BACK RETURN|TRUCK|y ironic ideas use furiously carefully regu 12999|152131|2132|2|31|36677.03|0.01|0.06|A|F|1993-05-09|1993-05-21|1993-05-14|DELIVER IN PERSON|TRUCK|jole silently. quickly final foxes d 12999|64990|7497|3|36|70379.64|0.00|0.03|A|F|1993-04-24|1993-05-19|1993-05-11|TAKE BACK RETURN|RAIL|unts maintain blithely furiously regular 12999|85528|5529|4|20|30270.40|0.01|0.04|A|F|1993-07-05|1993-06-24|1993-07-10|DELIVER IN PERSON|TRUCK|e carefully pending Tiresias? pa 13024|78939|8940|1|24|46030.32|0.04|0.04|R|F|1992-06-16|1992-05-26|1992-07-04|DELIVER IN PERSON|RAIL|ckages along 13024|125737|762|2|1|1762.73|0.10|0.02|R|F|1992-05-15|1992-06-11|1992-06-09|COLLECT COD|AIR|sleep? blithely slow requests 13024|53294|3295|3|12|14967.48|0.09|0.05|R|F|1992-04-17|1992-04-30|1992-04-24|DELIVER IN PERSON|MAIL|en instructions x-ra 13025|54312|6818|1|26|32924.06|0.10|0.01|N|O|1998-02-07|1998-03-22|1998-02-08|NONE|FOB|fully bold theodolites wake. ironic 13025|10693|5696|2|24|38488.56|0.00|0.03|N|O|1998-05-19|1998-03-21|1998-05-31|TAKE BACK RETURN|RAIL|mold blithely final accounts. even acc 13025|103618|8639|3|21|34053.81|0.08|0.02|N|O|1998-04-15|1998-02-27|1998-04-21|TAKE BACK RETURN|FOB|nag blithely 13025|93815|3816|4|32|57881.92|0.00|0.03|N|O|1998-03-15|1998-03-13|1998-04-03|DELIVER IN PERSON|REG AIR| blithely final requests boost 13026|96411|8921|1|44|61926.04|0.05|0.07|N|O|1997-06-08|1997-05-23|1997-06-18|DELIVER IN PERSON|RAIL|he slyly silent dugouts 13026|174513|7031|2|18|28575.18|0.04|0.01|N|O|1997-05-06|1997-06-23|1997-05-29|COLLECT COD|RAIL|ously. carefully regular reques 13026|30748|3252|3|11|18466.14|0.05|0.00|N|O|1997-07-30|1997-05-17|1997-08-23|TAKE BACK RETURN|AIR|nal foxes do maintain am 13026|170915|5950|4|26|51633.66|0.10|0.06|N|O|1997-05-21|1997-06-10|1997-06-20|COLLECT COD|REG AIR|uctions. express 13026|152382|9928|5|21|30121.98|0.06|0.07|N|O|1997-06-01|1997-05-07|1997-06-29|DELIVER IN PERSON|FOB|es are never. blithely re 13027|100270|271|1|30|38108.10|0.05|0.02|A|F|1992-01-17|1992-02-24|1992-02-04|DELIVER IN PERSON|TRUCK| accounts. unusual deposits are carefull 13027|22198|9705|2|33|36966.27|0.01|0.07|R|F|1992-03-24|1992-03-22|1992-04-04|TAKE BACK RETURN|MAIL|, regular package 13027|72825|7840|3|15|26967.30|0.08|0.04|A|F|1992-04-06|1992-04-04|1992-04-28|NONE|SHIP| final accounts a 13027|167047|9564|4|6|6684.24|0.09|0.01|A|F|1992-03-19|1992-04-04|1992-04-10|TAKE BACK RETURN|REG AIR|olites engage slyly carefully regular 13027|155303|334|5|45|61123.50|0.10|0.06|A|F|1992-03-07|1992-03-05|1992-03-18|COLLECT COD|MAIL|along the 13028|108441|952|1|35|50730.40|0.07|0.07|A|F|1994-04-13|1994-06-22|1994-05-09|DELIVER IN PERSON|RAIL|slyly special accoun 13028|155810|841|2|3|5597.43|0.06|0.00|A|F|1994-05-21|1994-07-03|1994-05-30|NONE|RAIL|ake slyly about the regular 13028|92387|9915|3|40|55175.20|0.10|0.01|A|F|1994-05-30|1994-06-25|1994-06-05|DELIVER IN PERSON|SHIP| blithely. regular, silent foxes cajole c 13028|143015|558|4|49|51842.49|0.03|0.06|A|F|1994-05-18|1994-06-13|1994-06-06|COLLECT COD|TRUCK|ts affix b 13028|44656|9665|5|43|68827.95|0.01|0.03|R|F|1994-08-05|1994-06-30|1994-08-31|DELIVER IN PERSON|TRUCK|ss the stealthily q 13029|101331|3842|1|4|5329.32|0.04|0.08|N|O|1997-11-13|1998-01-13|1997-12-09|DELIVER IN PERSON|MAIL|the ironic, final requests. 13029|61385|8904|2|14|18849.32|0.04|0.06|N|O|1998-03-03|1998-01-16|1998-03-26|TAKE BACK RETURN|RAIL| carefully regular ex 13029|82063|9588|3|39|40757.34|0.10|0.01|N|O|1997-12-04|1998-01-14|1997-12-26|NONE|TRUCK| even accounts. accounts cajole. b 13029|16936|6937|4|34|62999.62|0.00|0.01|N|O|1998-02-04|1998-01-24|1998-03-02|TAKE BACK RETURN|AIR|apades are furiously ab 13029|31315|3819|5|44|54837.64|0.03|0.04|N|O|1998-01-23|1997-12-21|1998-02-06|NONE|AIR| dependencies. deposits ab 13030|167075|9592|1|42|47966.94|0.03|0.00|A|F|1994-10-17|1994-09-24|1994-10-30|TAKE BACK RETURN|RAIL|ts sleep after the express deposits. blithe 13030|18179|5683|2|46|50469.82|0.08|0.07|A|F|1994-08-07|1994-09-16|1994-09-02|TAKE BACK RETURN|MAIL|refully ironic 13030|133381|8408|3|27|38188.26|0.05|0.07|R|F|1994-11-19|1994-09-26|1994-11-27|COLLECT COD|REG AIR|ual, final accounts. final, pe 13031|112161|4673|1|19|22290.04|0.01|0.07|R|F|1993-01-17|1993-02-26|1993-01-24|COLLECT COD|TRUCK|round the accounts 13031|76379|8887|2|39|52859.43|0.01|0.07|R|F|1993-01-07|1993-02-15|1993-01-31|DELIVER IN PERSON|TRUCK|ss packages. final theodolites sleep af 13031|93925|3926|3|17|32621.64|0.07|0.07|A|F|1993-03-08|1993-02-26|1993-03-27|TAKE BACK RETURN|REG AIR|e slyly sl 13031|185830|3385|4|12|22989.96|0.00|0.05|R|F|1993-01-28|1993-02-16|1993-02-07|TAKE BACK RETURN|AIR|ecial asymptotes doze bli 13031|41400|1401|5|44|59021.60|0.05|0.08|R|F|1993-04-03|1993-02-08|1993-04-04|TAKE BACK RETURN|SHIP|unusual packages. blithely regular fra 13031|7761|2762|6|38|63412.88|0.08|0.04|R|F|1993-03-08|1993-02-08|1993-03-18|COLLECT COD|MAIL|ly regular dugouts boost carefully careful 13056|122183|2184|1|29|34950.22|0.09|0.08|A|F|1994-05-27|1994-05-29|1994-05-31|NONE|FOB|ce of the furiously even the 13056|123489|3490|2|36|54449.28|0.03|0.02|R|F|1994-04-24|1994-06-30|1994-05-01|DELIVER IN PERSON|REG AIR|, special deposits haggle thinly accordi 13056|70424|5439|3|28|39043.76|0.07|0.03|R|F|1994-06-20|1994-07-18|1994-07-13|NONE|MAIL|efully fluffily unusual acc 13056|3146|3147|4|11|11540.54|0.02|0.01|R|F|1994-07-21|1994-05-28|1994-08-20|TAKE BACK RETURN|REG AIR|lly regular frays boost about 13056|62442|4949|5|21|29493.24|0.07|0.04|R|F|1994-07-14|1994-06-09|1994-07-30|NONE|SHIP|arefully regular requests are above 13056|46695|4208|6|28|45967.32|0.09|0.06|R|F|1994-07-06|1994-06-21|1994-08-03|COLLECT COD|SHIP| express deposi 13057|184904|9941|1|44|87511.60|0.02|0.02|N|O|1998-02-22|1998-02-12|1998-03-19|COLLECT COD|SHIP|x-ray above the slyly regular instructi 13057|80805|5822|2|16|28572.80|0.08|0.03|N|O|1998-01-14|1998-01-28|1998-02-07|TAKE BACK RETURN|AIR|ests detect 13058|45801|8306|1|9|15721.20|0.03|0.05|N|O|1997-05-19|1997-06-14|1997-05-24|COLLECT COD|SHIP|gular deposits. slyly e 13059|41262|1263|1|22|26471.72|0.01|0.01|N|O|1997-07-20|1997-06-30|1997-08-04|DELIVER IN PERSON|TRUCK|ecial, regul 13059|84924|7433|2|3|5726.76|0.00|0.05|N|O|1997-07-14|1997-08-20|1997-07-17|TAKE BACK RETURN|AIR| slyly bold theodolites-- quickly fin 13059|116043|1066|3|40|42361.60|0.03|0.00|N|O|1997-09-20|1997-07-27|1997-09-22|COLLECT COD|RAIL|elets cajole slyly after the furiou 13059|133856|8883|4|36|68034.60|0.08|0.08|N|O|1997-06-29|1997-08-08|1997-07-13|NONE|FOB| ironic packages try to haggle caref 13059|107205|4736|5|21|25456.20|0.09|0.05|N|O|1997-06-15|1997-08-06|1997-07-06|TAKE BACK RETURN|RAIL|ely ironic pinto beans. deposits haggle- 13059|178875|1393|6|35|68385.45|0.05|0.03|N|O|1997-09-24|1997-07-21|1997-10-14|DELIVER IN PERSON|AIR| instructions 13060|199119|4158|1|17|20707.87|0.01|0.08|N|O|1997-06-03|1997-04-24|1997-06-05|NONE|REG AIR|ely final instructi 13060|178415|3450|2|30|44802.30|0.05|0.00|N|O|1997-04-30|1997-04-05|1997-05-04|COLLECT COD|REG AIR|es sleep bold re 13060|5277|5278|3|7|8275.89|0.10|0.03|N|O|1997-02-10|1997-04-24|1997-02-14|COLLECT COD|REG AIR| pending platelets sleep blithely 13060|85222|7731|4|40|48288.80|0.10|0.07|N|O|1997-03-26|1997-04-14|1997-04-10|TAKE BACK RETURN|AIR|he blithely re 13060|121514|4027|5|36|55278.36|0.05|0.01|N|O|1997-04-19|1997-03-10|1997-04-26|COLLECT COD|TRUCK|the doggedly ironic asympt 13060|73022|5530|6|19|18905.38|0.06|0.04|N|O|1997-05-09|1997-05-02|1997-05-26|NONE|RAIL|he pending courts thrash slyl 13060|73973|8988|7|32|62303.04|0.10|0.00|N|O|1997-04-14|1997-03-28|1997-05-01|TAKE BACK RETURN|MAIL|encies. furiously unu 13061|58769|1275|1|1|1727.76|0.04|0.04|N|O|1997-10-17|1997-10-19|1997-11-03|TAKE BACK RETURN|MAIL|ly special deposits wake 13061|120331|7868|2|9|12161.97|0.04|0.08|N|O|1997-11-27|1997-12-10|1997-12-05|TAKE BACK RETURN|SHIP|equests are ironic, regular dep 13061|187331|7332|3|37|52478.21|0.06|0.04|N|O|1997-11-14|1997-10-28|1997-11-28|TAKE BACK RETURN|AIR|furiously regular sheaves doze a 13062|102511|42|1|39|59026.89|0.05|0.07|N|O|1996-08-08|1996-06-03|1996-08-14|DELIVER IN PERSON|RAIL|enticingly? 13062|102092|4603|2|27|29540.43|0.09|0.06|N|O|1996-07-19|1996-07-29|1996-07-25|NONE|FOB|ending braids haggle carefully final pi 13062|44162|6667|3|2|2212.32|0.10|0.02|N|O|1996-07-22|1996-07-19|1996-08-02|DELIVER IN PERSON|MAIL|he unusual, regular ide 13063|44086|9095|1|29|29872.32|0.08|0.00|N|O|1997-03-03|1997-03-29|1997-03-22|NONE|MAIL|tes. pending, even warhors 13063|150559|3075|2|24|38629.20|0.07|0.06|N|O|1997-02-22|1997-03-13|1997-03-20|COLLECT COD|SHIP|de of the fluffil 13063|44782|7287|3|27|46623.06|0.03|0.01|N|O|1997-02-25|1997-03-05|1997-03-09|COLLECT COD|REG AIR|to are. furiously regular r 13063|35848|8352|4|45|80272.80|0.05|0.03|N|O|1997-05-13|1997-03-06|1997-05-17|DELIVER IN PERSON|REG AIR|yly express, express fo 13063|96588|6589|5|5|7922.90|0.06|0.04|N|O|1997-04-28|1997-04-02|1997-05-11|DELIVER IN PERSON|MAIL|efully furiously r 13063|154079|9110|6|45|50988.15|0.04|0.03|N|O|1997-04-24|1997-03-10|1997-05-20|TAKE BACK RETURN|TRUCK|ymptotes integrate? final foxe 13063|197919|5477|7|45|90760.95|0.01|0.01|N|O|1997-03-19|1997-03-05|1997-04-08|TAKE BACK RETURN|FOB|s. carefully care 13088|15280|283|1|18|21515.04|0.01|0.00|R|F|1993-08-15|1993-05-27|1993-08-21|NONE|RAIL|tes. carefully bold dependencies are 13088|1997|9498|2|42|79757.58|0.10|0.07|R|F|1993-08-02|1993-07-08|1993-08-14|NONE|REG AIR|s wake slyly above the furiously regular 13088|151051|3567|3|35|38571.75|0.00|0.06|A|F|1993-05-08|1993-06-15|1993-05-21|COLLECT COD|AIR|quickly final pinto be 13088|100898|3409|4|15|28483.35|0.06|0.01|R|F|1993-07-31|1993-07-06|1993-08-02|COLLECT COD|FOB|ts-- carefu 13088|190246|2766|5|3|4008.72|0.01|0.06|A|F|1993-08-04|1993-06-09|1993-08-14|COLLECT COD|TRUCK|s cajole regularly furiously ironic acco 13088|58146|5662|6|48|52998.72|0.10|0.05|A|F|1993-06-01|1993-06-04|1993-06-23|DELIVER IN PERSON|SHIP|theodolites wak 13088|56476|8982|7|46|65893.62|0.04|0.04|A|F|1993-08-11|1993-05-29|1993-09-07|NONE|RAIL|y about the slyly final accou 13089|190891|892|1|28|55492.92|0.05|0.08|N|O|1996-04-07|1996-01-25|1996-04-22|DELIVER IN PERSON|AIR|theodolites boost blithely 13089|94321|9340|2|50|65766.00|0.03|0.08|N|O|1996-03-01|1996-02-01|1996-03-18|COLLECT COD|SHIP|nts detect carefully above th 13089|47497|7498|3|40|57779.60|0.06|0.08|N|O|1996-02-24|1996-01-29|1996-03-11|NONE|AIR|sts. carefull 13089|65295|5296|4|49|61754.21|0.07|0.03|N|O|1996-03-19|1996-03-01|1996-04-10|DELIVER IN PERSON|FOB|packages haggle blithel 13090|85418|5419|1|32|44909.12|0.10|0.00|R|F|1993-10-08|1993-08-31|1993-11-01|COLLECT COD|RAIL|ress instruction 13090|10351|7855|2|34|42885.90|0.09|0.03|R|F|1993-09-24|1993-09-19|1993-09-28|COLLECT COD|TRUCK|ly regular dinos a 13090|166352|3901|3|18|25530.30|0.10|0.08|R|F|1993-11-24|1993-08-28|1993-12-02|COLLECT COD|RAIL|unusual packag 13090|71614|4122|4|10|15856.10|0.01|0.07|A|F|1993-08-20|1993-09-29|1993-08-25|TAKE BACK RETURN|MAIL|ideas. regular waters 13090|2186|2187|5|20|21763.60|0.00|0.05|R|F|1993-11-01|1993-10-24|1993-11-16|COLLECT COD|RAIL| furiously regular a 13091|192233|9791|1|11|14577.53|0.00|0.00|R|F|1995-02-08|1994-12-28|1995-02-18|DELIVER IN PERSON|SHIP|st boldly after the furiously regular court 13091|43567|1080|2|11|16616.16|0.10|0.04|A|F|1994-11-22|1995-01-17|1994-12-08|TAKE BACK RETURN|MAIL|egular, pending braids across the 13091|23164|8169|3|23|25004.68|0.02|0.02|R|F|1995-02-22|1994-12-27|1995-02-23|NONE|AIR|pinto beans sleep. bold instructions aroun 13091|105703|5704|4|31|52969.70|0.07|0.06|A|F|1995-01-26|1995-01-21|1995-01-30|DELIVER IN PERSON|TRUCK|even theodolites snooze quickly special 13091|139725|4752|5|38|67059.36|0.08|0.04|R|F|1995-01-26|1995-02-10|1995-02-21|NONE|REG AIR|ly final pinto beans wake ideas. bli 13091|150927|5958|6|48|94940.16|0.06|0.01|A|F|1995-03-02|1995-01-23|1995-03-29|DELIVER IN PERSON|SHIP|fully. carefully pending accounts 13092|97346|4874|1|31|41643.54|0.00|0.02|N|O|1998-10-20|1998-08-27|1998-11-17|DELIVER IN PERSON|TRUCK|atelets alongside of the caref 13092|93535|6045|2|48|73369.44|0.09|0.04|N|O|1998-07-24|1998-09-10|1998-07-27|DELIVER IN PERSON|AIR|dencies. fluffily express dep 13093|162962|2963|1|31|62773.76|0.06|0.06|N|O|1996-07-24|1996-06-30|1996-08-11|COLLECT COD|FOB|ckages. regular sentiments use slyly sl 13093|96995|4523|2|39|77687.61|0.08|0.07|N|O|1996-07-02|1996-07-18|1996-07-20|COLLECT COD|MAIL| pinto beans u 13093|181619|4138|3|15|25509.15|0.03|0.03|N|O|1996-05-15|1996-06-18|1996-05-31|COLLECT COD|TRUCK|ages haggle: unusua 13094|127614|5151|1|41|67306.01|0.00|0.02|R|F|1993-09-15|1993-11-04|1993-09-19|COLLECT COD|TRUCK| about the regular packages. warthogs 13094|3179|5680|2|16|17314.72|0.00|0.01|R|F|1993-10-05|1993-09-30|1993-10-14|NONE|FOB|e slyly unusual requests. 13095|22461|9968|1|42|58105.32|0.10|0.02|N|O|1996-11-20|1996-09-24|1996-12-05|NONE|SHIP|e requests. bold 13095|101561|4072|2|49|76565.44|0.02|0.01|N|O|1996-10-18|1996-10-14|1996-11-10|COLLECT COD|FOB|eep packages. f 13095|128760|6297|3|46|82282.96|0.08|0.03|N|O|1996-08-29|1996-09-07|1996-09-21|COLLECT COD|SHIP| doggedly furiously ironic deposits. p 13095|59153|6669|4|34|37813.10|0.10|0.03|N|O|1996-08-25|1996-09-14|1996-09-02|TAKE BACK RETURN|TRUCK|ke against th 13095|168961|1478|5|29|58868.84|0.10|0.08|N|O|1996-07-28|1996-09-15|1996-08-24|TAKE BACK RETURN|RAIL|iously iron 13095|126325|6326|6|17|22972.44|0.07|0.06|N|O|1996-09-20|1996-09-18|1996-10-10|NONE|REG AIR|en ideas. deposits br 13095|131461|6488|7|27|40296.42|0.05|0.00|N|O|1996-10-07|1996-08-21|1996-10-24|TAKE BACK RETURN|RAIL|affix carefully after the carefully pendin 13120|62148|9667|1|44|48846.16|0.04|0.08|A|F|1994-12-21|1995-01-15|1995-01-03|COLLECT COD|MAIL|en pinto beans. fluffily special 13120|113634|1168|2|22|36247.86|0.06|0.00|R|F|1995-03-14|1995-02-11|1995-04-08|NONE|MAIL|. requests cajole slyly. 13121|106028|8539|1|50|51701.00|0.10|0.06|A|F|1993-03-26|1993-05-21|1993-03-31|DELIVER IN PERSON|TRUCK| blithely against the car 13121|9947|9948|2|17|31567.98|0.08|0.06|R|F|1993-03-14|1993-05-20|1993-03-17|TAKE BACK RETURN|REG AIR| the even pin 13121|172259|2260|3|1|1331.25|0.06|0.05|A|F|1993-06-17|1993-04-14|1993-06-28|NONE|AIR|ggedly. slyly ironic accou 13121|149421|9422|4|44|64698.48|0.00|0.04|A|F|1993-05-01|1993-06-02|1993-05-07|NONE|MAIL|jole. sometimes ironic account 13121|106294|6295|5|24|31206.96|0.01|0.07|R|F|1993-05-27|1993-04-12|1993-06-03|TAKE BACK RETURN|AIR|ts. express packages about the fur 13121|75482|5483|6|11|16032.28|0.05|0.03|A|F|1993-06-23|1993-04-25|1993-07-11|NONE|MAIL|pending idea 13121|24652|7155|7|18|28379.70|0.00|0.00|R|F|1993-07-01|1993-05-12|1993-07-10|COLLECT COD|MAIL|de of the unusual, regular packages ha 13122|109495|2006|1|42|63188.58|0.08|0.08|A|F|1994-10-15|1994-11-28|1994-11-06|DELIVER IN PERSON|AIR|s the final 13122|80506|8031|2|8|11892.00|0.09|0.02|A|F|1995-01-15|1994-11-23|1995-01-19|DELIVER IN PERSON|AIR|carefully 13122|61629|9148|3|34|54081.08|0.05|0.06|A|F|1994-12-16|1994-12-09|1995-01-08|COLLECT COD|FOB|usly silent accounts wake slyly. 13122|193766|1324|4|16|29756.16|0.00|0.08|A|F|1995-01-02|1994-12-15|1995-01-20|DELIVER IN PERSON|MAIL| ideas. blithely ironic 13122|19619|4622|5|23|35388.03|0.01|0.08|A|F|1994-10-31|1994-10-30|1994-11-08|TAKE BACK RETURN|AIR|lar courts 13123|153683|1229|1|11|19103.48|0.02|0.00|N|O|1998-10-05|1998-08-04|1998-10-27|DELIVER IN PERSON|REG AIR|ts sleep quickly blithe 13123|147584|2613|2|13|21210.54|0.06|0.01|N|O|1998-09-17|1998-08-17|1998-10-16|TAKE BACK RETURN|AIR|ously final forges use blithely a 13123|31795|1796|3|37|63891.23|0.09|0.03|N|O|1998-10-28|1998-09-04|1998-11-25|COLLECT COD|MAIL|al foxes affix furiously ironic accounts 13123|166905|9422|4|50|98595.00|0.04|0.07|N|O|1998-07-21|1998-08-30|1998-07-31|TAKE BACK RETURN|MAIL|ts nag according 13123|156391|1422|5|16|23158.24|0.03|0.08|N|O|1998-08-22|1998-08-13|1998-09-02|NONE|MAIL|to beans. quickly final accounts ar 13123|52076|2077|6|18|18505.26|0.09|0.03|N|O|1998-09-21|1998-08-15|1998-10-19|DELIVER IN PERSON|TRUCK|ns sleep furiously. furiously bold 13123|37427|7428|7|4|5457.68|0.08|0.01|N|O|1998-08-12|1998-09-09|1998-08-21|TAKE BACK RETURN|TRUCK|egular dependencies are carefully. slyly 13124|29414|6921|1|28|37615.48|0.03|0.07|A|F|1994-01-18|1994-01-27|1994-01-28|NONE|REG AIR|y express patterns 13124|153607|6123|2|12|19927.20|0.06|0.04|A|F|1994-02-23|1994-02-06|1994-03-24|TAKE BACK RETURN|SHIP|ld ideas. quickly regular 13124|67055|7056|3|4|4088.20|0.09|0.06|A|F|1994-04-04|1994-02-26|1994-04-09|NONE|AIR|into beans. daring theodolites cajole s 13125|148446|3475|1|16|23911.04|0.06|0.04|R|F|1994-03-07|1994-02-18|1994-03-27|DELIVER IN PERSON|RAIL|structions wake. exp 13125|121034|1035|2|2|2110.06|0.01|0.05|R|F|1994-04-10|1994-02-04|1994-04-18|NONE|REG AIR|ies wake blithely ironic d 13125|9454|1955|3|25|34086.25|0.05|0.05|A|F|1994-02-19|1994-03-03|1994-02-23|DELIVER IN PERSON|SHIP| the blithely bold pi 13125|91162|6181|4|41|47279.56|0.06|0.06|A|F|1994-01-18|1994-01-22|1994-02-03|DELIVER IN PERSON|FOB|ncies haggle alongsid 13125|152711|5227|5|27|47620.17|0.08|0.03|A|F|1994-02-03|1994-02-18|1994-02-06|NONE|TRUCK| dependencies. re 13125|3669|3670|6|27|42461.82|0.01|0.01|A|F|1994-04-05|1994-02-04|1994-04-17|TAKE BACK RETURN|SHIP|iously final courts. slyly special acc 13125|53594|8605|7|4|6190.36|0.08|0.00|A|F|1994-01-23|1994-02-16|1994-02-21|COLLECT COD|TRUCK|lithely carefull 13126|145211|7726|1|50|62810.50|0.10|0.08|N|O|1996-02-12|1995-12-22|1996-02-26|DELIVER IN PERSON|FOB|luffily furiously final a 13126|156523|1554|2|16|25272.32|0.08|0.02|N|O|1995-12-13|1996-01-02|1995-12-28|COLLECT COD|SHIP|r accounts about the 13126|78001|509|3|2|1958.00|0.02|0.00|N|O|1995-12-09|1995-12-09|1995-12-26|DELIVER IN PERSON|TRUCK|lar deposits cajole carefully carefu 13126|146981|6982|4|11|22307.78|0.10|0.04|N|O|1995-12-13|1996-01-12|1996-01-12|COLLECT COD|SHIP|es. slyly iron 13126|108158|8159|5|26|30319.90|0.02|0.04|N|O|1995-12-05|1996-01-16|1995-12-23|DELIVER IN PERSON|TRUCK|o beans haggle quickly 13127|113849|3850|1|40|74513.60|0.07|0.00|R|F|1993-09-06|1993-10-30|1993-09-25|COLLECT COD|FOB|tes. furiously special asympt 13152|170532|533|1|40|64101.20|0.00|0.01|R|F|1994-04-12|1994-04-27|1994-04-17|COLLECT COD|MAIL|ts. slyly special accounts sleep agai 13152|124211|4212|2|47|58054.87|0.08|0.06|R|F|1994-03-13|1994-05-16|1994-03-19|NONE|MAIL|ns wake carefull 13152|9061|4062|3|24|23281.44|0.05|0.06|A|F|1994-04-16|1994-04-01|1994-05-15|COLLECT COD|MAIL|ests along the regular accounts caj 13152|86561|4086|4|48|74282.88|0.01|0.06|A|F|1994-02-18|1994-04-25|1994-02-23|COLLECT COD|FOB|usual pinto beans 13152|23445|8450|5|47|64316.68|0.07|0.08|R|F|1994-05-07|1994-03-29|1994-05-25|NONE|RAIL| gifts cajole unusual 13153|175018|5019|1|23|25139.23|0.06|0.07|N|O|1997-01-26|1997-01-23|1997-02-06|DELIVER IN PERSON|RAIL|nts integrate carefully. 13153|113605|6117|2|7|11330.20|0.04|0.06|N|O|1996-12-03|1997-01-16|1997-01-01|NONE|FOB|eep quickly quickly daring foxes; slyly reg 13153|159188|9189|3|22|27437.96|0.04|0.00|N|O|1997-02-04|1997-01-10|1997-03-01|TAKE BACK RETURN|SHIP|er. furiously unusu 13153|57972|478|4|13|25089.61|0.01|0.05|N|O|1997-03-03|1997-01-08|1997-03-06|DELIVER IN PERSON|AIR|s thrash. regular acco 13153|120799|800|5|32|58233.28|0.03|0.04|N|O|1997-02-28|1997-01-14|1997-03-13|TAKE BACK RETURN|FOB|onic courts affix instruct 13153|61328|1329|6|21|27075.72|0.04|0.07|N|O|1997-03-07|1996-12-11|1997-04-01|COLLECT COD|SHIP|sely ironic requests sleep. furiously exp 13154|16092|3596|1|42|42339.78|0.09|0.05|N|O|1997-12-03|1998-01-02|1997-12-27|TAKE BACK RETURN|MAIL| careful pi 13154|48328|8329|2|31|39565.92|0.04|0.03|N|O|1997-11-02|1997-12-26|1997-11-25|TAKE BACK RETURN|SHIP|hely requests! quickly express asymptotes 13154|42280|7289|3|33|40335.24|0.04|0.05|N|O|1997-11-12|1997-11-21|1997-12-06|NONE|MAIL|t the carefully 13154|186864|4419|4|10|19508.60|0.03|0.05|N|O|1998-02-07|1997-12-09|1998-02-11|COLLECT COD|REG AIR|, ironic ideas haggle en 13154|13022|3023|5|45|42075.90|0.01|0.07|N|O|1998-02-09|1998-01-06|1998-02-26|COLLECT COD|RAIL|s among the furiously express 13154|60151|7670|6|34|37779.10|0.10|0.05|N|O|1997-12-12|1997-11-25|1997-12-30|NONE|AIR|ns sleep sometimes? regular, silent 13154|157330|2361|7|29|40232.57|0.07|0.05|N|O|1998-01-06|1997-11-26|1998-01-30|COLLECT COD|FOB|lets. caref 13155|30284|7794|1|5|6071.40|0.06|0.01|N|O|1996-07-15|1996-08-27|1996-07-22|DELIVER IN PERSON|AIR|haggle slyly final hockey pla 13155|174918|9953|2|16|31886.56|0.04|0.06|N|O|1996-07-18|1996-08-25|1996-07-27|DELIVER IN PERSON|FOB|ress instructions nag after the 13155|36087|1094|3|12|12276.96|0.10|0.02|N|O|1996-06-20|1996-08-05|1996-07-14|COLLECT COD|FOB|es cajole. regular foxes cajole care 13155|112909|2910|4|24|46125.60|0.05|0.01|N|O|1996-10-13|1996-08-15|1996-11-09|TAKE BACK RETURN|SHIP| packages affix carefully. 13155|53123|639|5|23|24750.76|0.08|0.08|N|O|1996-10-13|1996-09-04|1996-10-18|TAKE BACK RETURN|REG AIR|g deposits cajole 13155|25952|957|6|31|58216.45|0.08|0.07|N|O|1996-07-24|1996-07-30|1996-07-31|DELIVER IN PERSON|TRUCK|e the slyly unusual deposits. regular, 13155|172680|5198|7|35|61343.80|0.10|0.02|N|O|1996-07-18|1996-07-18|1996-07-27|COLLECT COD|RAIL|s. carefully even deposits afte 13156|27357|7358|1|4|5137.40|0.05|0.06|N|O|1996-01-02|1995-11-14|1996-01-31|NONE|SHIP|nag. furiously ironic depen 13156|188372|8373|2|22|32128.14|0.03|0.00|N|O|1995-12-03|1995-12-18|1995-12-07|TAKE BACK RETURN|AIR|the always regular platelets. stealth 13156|162958|507|3|48|97005.60|0.08|0.08|N|O|1996-01-16|1995-11-27|1996-02-15|NONE|REG AIR|lly quickl 13156|27919|7920|4|32|59101.12|0.10|0.06|N|O|1995-12-20|1995-11-12|1996-01-02|COLLECT COD|SHIP|ess, unusual req 13156|29101|1604|5|26|26782.60|0.09|0.02|N|O|1996-01-04|1995-12-15|1996-01-13|COLLECT COD|REG AIR|eposits along th 13157|149306|6849|1|29|39303.70|0.01|0.08|R|F|1992-03-20|1992-05-03|1992-04-19|COLLECT COD|FOB|ccording to the furiously thin packages. 13157|181884|1885|2|13|25556.44|0.09|0.07|R|F|1992-06-12|1992-03-19|1992-06-26|DELIVER IN PERSON|REG AIR|ages wake even accou 13157|111831|9365|3|34|62656.22|0.07|0.04|R|F|1992-03-17|1992-03-19|1992-03-28|COLLECT COD|REG AIR|ding pinto 13157|143188|731|4|47|57865.46|0.04|0.02|R|F|1992-03-09|1992-04-04|1992-03-14|NONE|FOB|n requests boost 13157|145857|886|5|33|62794.05|0.03|0.04|R|F|1992-04-05|1992-05-02|1992-04-11|COLLECT COD|RAIL|fluffily. fluffily ironic deposits are car 13157|10034|35|6|41|38705.23|0.04|0.07|A|F|1992-02-18|1992-04-13|1992-02-23|DELIVER IN PERSON|FOB|ackages. slyly even requests 13157|25221|7724|7|26|29801.72|0.08|0.08|A|F|1992-05-11|1992-04-14|1992-05-14|COLLECT COD|RAIL|kages nod furiously expr 13158|87174|9683|1|49|56897.33|0.10|0.07|A|F|1992-12-08|1993-01-10|1992-12-18|NONE|FOB|s. fluffily pending idea 13158|132117|9657|2|24|27578.64|0.06|0.04|R|F|1993-03-22|1993-01-28|1993-03-27|COLLECT COD|MAIL|furiously against the iron 13158|89014|6539|3|13|13039.13|0.08|0.08|R|F|1993-01-09|1993-02-08|1993-02-08|TAKE BACK RETURN|MAIL|equests affix 13158|49170|4179|4|2|2238.34|0.07|0.03|R|F|1992-12-14|1992-12-28|1992-12-26|DELIVER IN PERSON|REG AIR|uriously even account 13158|194854|2412|5|32|62363.20|0.06|0.08|R|F|1993-01-23|1993-01-21|1993-02-06|DELIVER IN PERSON|RAIL|y ironic asymptotes haggle pending, 13159|99791|4810|1|50|89539.50|0.02|0.05|N|O|1996-12-25|1997-02-14|1997-01-21|DELIVER IN PERSON|AIR|t, regular pinto beans nag 13159|181873|9428|2|26|50826.62|0.09|0.04|N|O|1996-12-20|1997-02-27|1997-01-13|TAKE BACK RETURN|FOB|ckly final somas. deposits af 13159|130937|8477|3|12|23615.16|0.10|0.01|N|O|1997-02-20|1997-01-29|1997-03-14|DELIVER IN PERSON|FOB| affix furiously. flu 13159|57675|7676|4|16|26122.72|0.07|0.02|N|O|1997-02-22|1997-01-14|1997-03-13|COLLECT COD|MAIL|etween the quickly sp 13184|195917|8437|1|12|24154.92|0.06|0.04|N|O|1998-05-01|1998-06-18|1998-05-14|TAKE BACK RETURN|MAIL|s wake ruthlessly. furiou 13185|65360|7867|1|26|34459.36|0.06|0.07|N|O|1997-11-12|1997-09-11|1997-11-18|DELIVER IN PERSON|RAIL|lites use furiously. furiously unusual es 13185|102854|385|2|30|55705.50|0.01|0.05|N|O|1997-09-14|1997-09-07|1997-10-02|NONE|AIR|old dugouts use fur 13185|50782|783|3|49|84906.22|0.08|0.00|N|O|1997-08-15|1997-09-17|1997-08-28|TAKE BACK RETURN|SHIP| deposits wake blithely 13185|85227|5228|4|44|53337.68|0.00|0.03|N|O|1997-09-24|1997-09-24|1997-09-27|NONE|REG AIR|gside of the reg 13185|193629|1187|5|6|10335.72|0.08|0.04|N|O|1997-11-11|1997-08-15|1997-11-13|NONE|FOB|ideas wake 13185|114817|4818|6|30|54954.30|0.01|0.06|N|O|1997-09-23|1997-10-08|1997-09-27|NONE|TRUCK|lithely pending instructions. even idea 13185|123654|8679|7|40|67106.00|0.08|0.05|N|O|1997-10-12|1997-08-21|1997-10-14|COLLECT COD|TRUCK|accounts haggle 13186|44544|7049|1|42|62518.68|0.01|0.05|N|O|1996-12-20|1996-11-20|1996-12-23|NONE|AIR|posits again 13186|50763|5774|2|21|35988.96|0.07|0.03|N|O|1996-10-22|1996-10-30|1996-11-19|NONE|MAIL|en requests cajole slyly ag 13186|164857|4858|3|18|34593.30|0.08|0.04|N|O|1996-11-11|1996-11-27|1996-11-28|DELIVER IN PERSON|REG AIR|dolphins could boost careful 13186|146887|1916|4|35|67685.80|0.04|0.06|N|O|1996-09-29|1996-11-30|1996-10-16|NONE|REG AIR|ecial requests. special, regular acc 13186|113195|8218|5|43|51952.17|0.06|0.04|N|O|1996-10-04|1996-11-22|1996-10-16|NONE|TRUCK|c packages wake blithely about the bold 13187|44054|9063|1|24|23953.20|0.08|0.07|R|F|1994-02-01|1994-02-18|1994-02-08|DELIVER IN PERSON|TRUCK|yly unusual 13187|12310|2311|2|38|46447.78|0.10|0.08|R|F|1994-01-01|1994-01-12|1994-01-12|COLLECT COD|TRUCK|sts. enticingly ironic requests about t 13187|91247|8775|3|5|6191.20|0.05|0.05|A|F|1994-01-28|1994-02-21|1994-01-29|TAKE BACK RETURN|AIR|efully iro 13188|178253|5805|1|26|34612.50|0.10|0.04|N|O|1998-08-27|1998-09-28|1998-09-18|COLLECT COD|MAIL|lly ironic instruc 13188|95732|3260|2|18|31099.14|0.08|0.03|N|O|1998-09-03|1998-09-10|1998-09-09|TAKE BACK RETURN|MAIL|ests. blithely final accounts ar 13188|155895|5896|3|16|31214.24|0.04|0.02|N|O|1998-10-03|1998-10-06|1998-10-17|TAKE BACK RETURN|RAIL|. carefully regular ac 13189|135476|5477|1|2|3022.94|0.06|0.00|A|F|1995-06-04|1995-07-02|1995-06-09|NONE|SHIP|heodolites affix evenly. final, iro 13190|188508|6063|1|14|22351.00|0.02|0.01|N|O|1998-07-21|1998-08-01|1998-07-31|DELIVER IN PERSON|RAIL|out the careful 13191|100303|304|1|21|27369.30|0.09|0.05|N|O|1998-03-09|1998-03-24|1998-03-27|DELIVER IN PERSON|REG AIR|ully ironic dugouts a 13191|186999|7000|2|41|85525.59|0.06|0.03|N|O|1998-01-01|1998-02-05|1998-01-29|DELIVER IN PERSON|SHIP|ly final theod 13191|81460|6477|3|33|47568.18|0.03|0.05|N|O|1998-04-09|1998-02-09|1998-04-14|NONE|SHIP|riously packa 13191|82971|5480|4|29|56665.13|0.07|0.03|N|O|1997-12-29|1998-03-21|1998-01-26|NONE|TRUCK| express deposits. ironic ideas solve a 13191|84183|6692|5|24|28012.32|0.03|0.02|N|O|1998-02-13|1998-02-02|1998-03-04|DELIVER IN PERSON|MAIL|ainst the packages haggle slyly quickly eve 13191|61231|8750|6|48|57227.04|0.09|0.02|N|O|1998-02-04|1998-03-21|1998-02-25|COLLECT COD|AIR|uffily silent exc 13191|107952|5483|7|28|54878.60|0.03|0.07|N|O|1998-01-12|1998-02-03|1998-01-30|TAKE BACK RETURN|MAIL|ix carefully along the bravely even packa 13216|149032|6575|1|27|29187.81|0.10|0.06|A|F|1992-05-31|1992-06-19|1992-06-05|COLLECT COD|FOB|ithely ironic 13216|178514|1032|2|37|58922.87|0.06|0.05|R|F|1992-05-29|1992-07-01|1992-06-01|DELIVER IN PERSON|RAIL|gle slyly pending, bold excuses. 13217|46088|8593|1|50|51704.00|0.07|0.06|N|O|1997-09-27|1997-11-11|1997-10-21|NONE|TRUCK|ular requests detect 13218|26902|9405|1|36|65840.40|0.07|0.01|A|F|1994-08-06|1994-08-14|1994-08-27|DELIVER IN PERSON|SHIP|slyly regular pi 13218|132449|7476|2|20|29628.80|0.00|0.08|A|F|1994-05-18|1994-06-20|1994-05-25|NONE|REG AIR|nic courts. blithely pend 13218|57380|2391|3|30|40121.40|0.07|0.04|R|F|1994-07-12|1994-07-07|1994-07-22|DELIVER IN PERSON|REG AIR|ular instructions 13218|180673|674|4|49|85929.83|0.06|0.00|A|F|1994-08-03|1994-07-05|1994-08-11|DELIVER IN PERSON|RAIL|fts sleep slyly 13218|163210|759|5|14|17824.94|0.09|0.01|R|F|1994-08-25|1994-07-26|1994-09-23|NONE|REG AIR|instructions. bold, fluffy pea 13218|68021|8022|6|2|1978.04|0.02|0.04|A|F|1994-08-03|1994-07-10|1994-09-01|DELIVER IN PERSON|RAIL|packages cajole carefully accordi 13219|89562|4579|1|21|32582.76|0.01|0.02|N|O|1998-01-30|1998-01-05|1998-02-05|TAKE BACK RETURN|FOB|lets boost fluffily. furiousl 13219|52365|7376|2|43|56646.48|0.04|0.04|N|O|1997-12-27|1997-12-09|1998-01-16|NONE|SHIP|ironic pinto beans are along the careful 13219|94840|4841|3|29|53210.36|0.00|0.06|N|O|1997-11-02|1997-12-08|1997-11-30|TAKE BACK RETURN|TRUCK|carefully blithely bold deposits 13219|163012|561|4|23|24725.23|0.10|0.07|N|O|1997-12-08|1997-11-29|1997-12-27|DELIVER IN PERSON|FOB|y. even, bol 13220|101899|9430|1|30|57026.70|0.01|0.06|N|O|1998-10-23|1998-10-23|1998-11-13|DELIVER IN PERSON|MAIL|odolites sleep. express instructions g 13220|5946|947|2|1|1851.94|0.01|0.03|N|O|1998-11-16|1998-08-30|1998-12-12|NONE|MAIL|ts. idle courts wake blithely. slyly u 13220|118369|5903|3|22|30521.92|0.02|0.06|N|O|1998-09-07|1998-08-30|1998-09-26|COLLECT COD|MAIL|y unusual packages. fur 13220|24839|4840|4|9|15874.47|0.05|0.01|N|O|1998-09-12|1998-10-10|1998-10-02|COLLECT COD|RAIL|ar deposits. even hockey pla 13220|62821|340|5|44|78488.08|0.01|0.05|N|O|1998-10-06|1998-09-14|1998-10-08|DELIVER IN PERSON|SHIP|lly atop the slyly special requests. caref 13221|101879|6900|1|20|37617.40|0.03|0.06|N|O|1995-11-09|1996-01-09|1995-11-13|TAKE BACK RETURN|AIR|excuses. car 13221|80351|7876|2|22|29289.70|0.10|0.07|N|O|1996-01-18|1995-12-18|1996-02-04|COLLECT COD|AIR| somas. quickly express decoys n 13221|56999|9505|3|34|66503.66|0.00|0.03|N|O|1995-12-24|1995-11-15|1996-01-22|DELIVER IN PERSON|TRUCK|cross the ironic 13222|142347|9890|1|20|27786.80|0.06|0.07|N|O|1997-07-29|1997-06-10|1997-08-24|COLLECT COD|TRUCK|c accounts. bold foxes use c 13222|14627|7129|2|10|15416.20|0.07|0.00|N|O|1997-06-17|1997-07-12|1997-07-14|TAKE BACK RETURN|TRUCK|fully bold packages. even, 13222|32065|4569|3|35|34897.10|0.03|0.07|N|O|1997-05-19|1997-07-08|1997-05-30|DELIVER IN PERSON|RAIL|nstructions boost furio 13222|100309|2820|4|14|18330.20|0.05|0.06|N|O|1997-06-02|1997-07-16|1997-06-07|NONE|FOB|along the packages was blithely e 13223|66490|4009|1|30|43694.70|0.06|0.04|N|O|1996-08-22|1996-10-14|1996-09-07|NONE|SHIP|slyly above the slyly bo 13248|131484|6511|1|10|15154.80|0.08|0.07|N|O|1997-05-12|1997-04-15|1997-05-13|TAKE BACK RETURN|AIR|et theodolites n 13248|37260|9764|2|18|21550.68|0.03|0.02|N|O|1997-05-09|1997-03-07|1997-05-24|TAKE BACK RETURN|FOB|elets. furiousl 13248|14590|9593|3|5|7522.95|0.08|0.07|N|O|1997-03-02|1997-04-03|1997-03-30|DELIVER IN PERSON|MAIL|slow deposits det 13249|64030|9043|1|38|37773.14|0.03|0.05|N|O|1997-09-06|1997-09-27|1997-09-25|TAKE BACK RETURN|RAIL|e sometimes 13249|148816|3845|2|24|44755.44|0.08|0.01|N|O|1997-08-14|1997-10-02|1997-08-18|COLLECT COD|TRUCK|inal packages above the final p 13249|143242|5757|3|14|17993.36|0.10|0.08|N|O|1997-09-19|1997-09-17|1997-10-07|DELIVER IN PERSON|REG AIR| lose slyly quickly eve 13249|55007|2523|4|26|25012.00|0.04|0.00|N|O|1997-10-20|1997-08-19|1997-10-21|COLLECT COD|TRUCK|lites. ironic, even d 13249|97193|2212|5|37|44037.03|0.10|0.08|N|O|1997-11-01|1997-09-24|1997-11-22|DELIVER IN PERSON|MAIL|sly pending warho 13249|117474|2497|6|6|8948.82|0.08|0.04|N|O|1997-08-02|1997-10-04|1997-08-13|TAKE BACK RETURN|FOB|hely unusua 13249|179749|2267|7|2|3657.48|0.04|0.05|N|O|1997-10-07|1997-09-26|1997-11-04|TAKE BACK RETURN|AIR|ously sile 13250|38251|755|1|33|39245.25|0.08|0.06|N|O|1997-09-26|1997-10-27|1997-10-19|TAKE BACK RETURN|RAIL|ts. unusual, silent accounts sleep. furiou 13250|82894|419|2|2|3753.78|0.09|0.05|N|O|1997-09-25|1997-11-16|1997-10-17|TAKE BACK RETURN|RAIL|longside of the fi 13250|187013|2050|3|37|40700.37|0.09|0.07|N|O|1997-09-12|1997-09-29|1997-09-24|NONE|TRUCK| above the furiou 13250|114689|2223|4|4|6814.72|0.03|0.05|N|O|1997-10-12|1997-10-22|1997-10-29|TAKE BACK RETURN|AIR|ckly theodol 13250|161673|1674|5|16|27754.72|0.05|0.06|N|O|1997-11-13|1997-09-30|1997-12-10|TAKE BACK RETURN|SHIP| cajole furiously after th 13250|25027|7530|6|4|3808.08|0.04|0.05|N|O|1997-10-07|1997-11-02|1997-10-09|TAKE BACK RETURN|RAIL|ainst the carefully pending pac 13251|184734|7253|1|33|60018.09|0.08|0.07|N|O|1997-07-18|1997-05-23|1997-08-02|TAKE BACK RETURN|SHIP|r packages use along 13251|20369|2872|2|14|18051.04|0.02|0.06|N|O|1997-05-20|1997-06-03|1997-05-21|DELIVER IN PERSON|TRUCK|old requests nag 13251|126895|9408|3|12|23062.68|0.03|0.04|N|O|1997-03-29|1997-05-01|1997-04-19|COLLECT COD|FOB|dolites across the p 13251|108638|6169|4|24|39519.12|0.01|0.03|N|O|1997-04-05|1997-04-29|1997-04-30|TAKE BACK RETURN|SHIP|nd furiously regular p 13251|69033|1540|5|8|8016.24|0.10|0.04|N|O|1997-07-18|1997-05-03|1997-08-16|COLLECT COD|MAIL|ke. blithely silent requests kindle car 13252|50155|5166|1|9|9946.35|0.10|0.04|R|F|1994-06-28|1994-05-18|1994-07-02|COLLECT COD|MAIL|! quickly regular the 13252|145788|5789|2|18|33008.04|0.04|0.03|R|F|1994-05-31|1994-05-06|1994-06-06|NONE|TRUCK| unusual ideas use. packag 13252|57846|2857|3|50|90192.00|0.10|0.05|A|F|1994-05-17|1994-06-21|1994-06-08|TAKE BACK RETURN|REG AIR|press pinto beans. platel 13252|16204|1207|4|32|35846.40|0.10|0.06|R|F|1994-05-15|1994-06-04|1994-06-06|DELIVER IN PERSON|FOB|ts wake. quickly special 13252|129488|9489|5|18|27314.64|0.03|0.08|A|F|1994-06-09|1994-04-23|1994-07-01|COLLECT COD|REG AIR|press foxes. sly 13252|101806|6827|6|3|5423.40|0.08|0.00|A|F|1994-06-28|1994-04-29|1994-07-24|NONE|RAIL| haggle furiously. even accounts affix slyl 13253|184302|9339|1|42|58224.60|0.07|0.06|N|O|1997-02-05|1996-12-13|1997-03-02|NONE|REG AIR|haggle. final, special epita 13253|152317|7348|2|28|38340.68|0.00|0.05|N|O|1997-01-28|1997-01-12|1997-02-02|COLLECT COD|SHIP|cies affix quickly deposits. bl 13253|181482|4001|3|16|25015.68|0.06|0.00|N|O|1996-11-05|1996-12-22|1996-11-17|NONE|AIR|cuses above the regular, 13253|158918|8919|4|18|35584.38|0.10|0.08|N|O|1996-11-17|1997-01-03|1996-12-01|NONE|FOB|requests sleep furiously. final 13253|120221|2734|5|50|62061.00|0.03|0.07|N|O|1996-12-17|1996-12-12|1996-12-19|COLLECT COD|SHIP|sleep around th 13253|142928|7957|6|4|7883.68|0.01|0.00|N|O|1996-12-28|1997-01-01|1997-01-04|NONE|REG AIR|ng the blithely regular id 13253|179825|7377|7|30|57144.60|0.10|0.06|N|O|1997-01-20|1996-12-05|1997-02-04|DELIVER IN PERSON|MAIL| furiously near the carefully iron 13254|102772|2773|1|49|86963.73|0.09|0.00|N|O|1998-04-08|1998-03-25|1998-05-03|COLLECT COD|SHIP|regular deposits are after the 13254|80092|2601|2|46|49316.14|0.10|0.03|N|O|1998-03-17|1998-04-25|1998-04-02|TAKE BACK RETURN|MAIL| bold plat 13254|99009|1519|3|37|37296.00|0.02|0.01|N|O|1998-03-11|1998-05-13|1998-04-10|TAKE BACK RETURN|SHIP|er the furiously ironic frays? 13254|42074|2075|4|12|12192.84|0.01|0.06|N|O|1998-05-19|1998-03-25|1998-06-12|NONE|AIR|ously. fluffily unusual de 13254|123202|3203|5|4|4900.80|0.08|0.04|N|O|1998-03-01|1998-04-08|1998-03-18|TAKE BACK RETURN|MAIL|usual, bold requests boost. furiou 13255|178995|4030|1|8|16591.92|0.09|0.08|A|F|1993-11-05|1993-09-06|1993-11-25|COLLECT COD|MAIL|hely ironic pinto beans haggle 13255|163393|8426|2|18|26215.02|0.10|0.03|R|F|1993-07-25|1993-09-10|1993-07-30|TAKE BACK RETURN|TRUCK|equests. blithely pending deposit 13255|140539|5568|3|8|12636.24|0.09|0.02|A|F|1993-09-30|1993-08-24|1993-10-22|COLLECT COD|TRUCK| slyly; furiously bold 13255|29602|4607|4|39|59732.40|0.07|0.00|A|F|1993-10-15|1993-09-09|1993-10-26|DELIVER IN PERSON|AIR|ly pending packages impress of the expr 13255|107218|7219|5|22|26954.62|0.02|0.04|A|F|1993-11-11|1993-10-03|1993-11-16|COLLECT COD|FOB|osits nag carefully after 13280|142760|2761|1|24|43266.24|0.08|0.02|A|F|1994-12-19|1994-11-18|1995-01-10|COLLECT COD|AIR|iously reg 13280|159486|9487|2|41|63364.68|0.05|0.08|R|F|1995-01-10|1994-12-04|1995-01-18|DELIVER IN PERSON|RAIL|es. fluffi 13280|71901|6916|3|45|84280.50|0.07|0.07|A|F|1994-10-05|1994-11-30|1994-10-13|TAKE BACK RETURN|RAIL|al deposits. asy 13280|26007|1012|4|1|933.00|0.04|0.02|R|F|1994-10-08|1994-10-22|1994-10-22|DELIVER IN PERSON|RAIL|s sleep slyly blith 13280|86329|8838|5|42|55243.44|0.07|0.02|R|F|1995-01-02|1994-11-09|1995-01-06|DELIVER IN PERSON|REG AIR|ously after the slyly regular 13281|87629|7630|1|27|43648.74|0.10|0.01|R|F|1992-08-01|1992-06-22|1992-08-16|DELIVER IN PERSON|TRUCK|lyly final deposits. slyly even ac 13281|168661|8662|2|50|86483.00|0.09|0.02|R|F|1992-06-04|1992-07-10|1992-06-30|DELIVER IN PERSON|REG AIR|usly against the carefully ir 13281|80721|722|3|27|45946.44|0.00|0.02|A|F|1992-05-09|1992-06-16|1992-05-13|DELIVER IN PERSON|RAIL|nic foxes wake fluffily regu 13282|67657|7658|1|17|27619.05|0.10|0.05|A|F|1992-09-01|1992-06-15|1992-09-15|TAKE BACK RETURN|TRUCK|ronic requests hag 13282|106580|9091|2|1|1586.58|0.08|0.03|R|F|1992-08-29|1992-08-03|1992-09-19|COLLECT COD|TRUCK|uriously silent deposits should integra 13282|74169|1691|3|34|38867.44|0.02|0.02|R|F|1992-07-04|1992-07-15|1992-07-30|TAKE BACK RETURN|RAIL|anent packages cajole packages. silentl 13282|151302|6333|4|7|9473.10|0.07|0.02|R|F|1992-09-06|1992-07-28|1992-09-07|NONE|TRUCK|fully bold asymptotes integrate 13282|125804|5805|5|8|14638.40|0.02|0.07|A|F|1992-05-14|1992-07-03|1992-06-04|COLLECT COD|FOB|ests. slyly ironic pinto beans after t 13282|181804|9359|6|28|52802.40|0.00|0.08|A|F|1992-09-04|1992-07-17|1992-09-22|DELIVER IN PERSON|REG AIR| regularly. blithely pending ideas affi 13283|173733|1285|1|37|66849.01|0.03|0.05|N|O|1998-04-07|1998-03-12|1998-04-26|COLLECT COD|FOB|en, regular excuses along the car 13283|115556|3090|2|12|18858.60|0.05|0.07|N|O|1998-02-18|1998-02-27|1998-02-25|TAKE BACK RETURN|SHIP| x-ray. final, e 13283|144293|6808|3|40|53491.60|0.03|0.06|N|O|1998-03-11|1998-03-02|1998-04-08|NONE|FOB|evenly. silen 13284|84502|2027|1|39|57973.50|0.00|0.03|R|F|1992-09-23|1992-10-10|1992-09-27|TAKE BACK RETURN|FOB|s courts wake regular accounts. carefully 13284|17785|5289|2|31|52786.18|0.01|0.05|A|F|1992-09-18|1992-10-29|1992-10-06|TAKE BACK RETURN|FOB| requests use fu 13284|100757|758|3|12|21093.00|0.01|0.00|R|F|1992-09-24|1992-11-04|1992-10-12|TAKE BACK RETURN|FOB|iously ironic ins 13284|14949|2453|4|20|37278.80|0.03|0.01|R|F|1992-11-21|1992-09-12|1992-12-18|DELIVER IN PERSON|TRUCK|lites wake. even requests 13285|63980|8993|1|5|9719.90|0.00|0.06|N|O|1995-07-26|1995-06-20|1995-08-12|DELIVER IN PERSON|TRUCK|lyly final reque 13286|156962|4508|1|1|2018.96|0.00|0.03|N|O|1995-10-28|1996-01-04|1995-11-25|NONE|FOB| the blithely bold deposits sleep speci 13286|48579|8580|2|5|7637.85|0.04|0.08|N|O|1996-01-11|1995-11-18|1996-01-28|DELIVER IN PERSON|REG AIR|efully express dependen 13286|63726|8739|3|46|77727.12|0.10|0.08|N|O|1996-01-18|1995-12-31|1996-01-21|DELIVER IN PERSON|REG AIR|c pinto beans. packages against the fur 13286|38720|3727|4|3|4976.16|0.09|0.06|N|O|1996-01-07|1995-12-22|1996-02-02|DELIVER IN PERSON|MAIL|eans. regular, exp 13287|100295|5316|1|3|3885.87|0.09|0.04|N|O|1997-07-06|1997-04-12|1997-07-15|DELIVER IN PERSON|FOB|gainst the regular asymptote 13287|57551|7552|2|34|51290.70|0.07|0.00|N|O|1997-05-26|1997-06-03|1997-06-08|NONE|REG AIR|pecial packages. packages sleep final ac 13287|46444|3957|3|13|18075.72|0.01|0.00|N|O|1997-03-23|1997-06-02|1997-04-20|NONE|RAIL|y ironic requests-- dependenc 13287|170942|943|4|16|32207.04|0.02|0.03|N|O|1997-03-17|1997-04-18|1997-03-19|TAKE BACK RETURN|REG AIR|s run quickly. qu 13287|11001|8505|5|41|37392.00|0.03|0.05|N|O|1997-06-16|1997-06-04|1997-07-11|DELIVER IN PERSON|FOB|ly fluffily enticing accounts. accounts 13287|91125|3635|6|8|8928.96|0.08|0.02|N|O|1997-06-15|1997-05-15|1997-07-12|DELIVER IN PERSON|SHIP| deposits sleep after the quic 13287|123011|5524|7|18|18612.18|0.05|0.01|N|O|1997-06-25|1997-05-14|1997-07-25|COLLECT COD|TRUCK|lly slyly regular escapades. pin 13312|31080|1081|1|12|12132.96|0.02|0.02|N|O|1997-01-27|1997-01-11|1997-02-23|TAKE BACK RETURN|FOB|ress requests wake. permanently final 13312|156097|1128|2|49|56501.41|0.10|0.03|N|O|1996-12-19|1997-01-15|1997-01-12|TAKE BACK RETURN|MAIL|ffix quickly p 13313|23380|887|1|36|46921.68|0.10|0.01|A|F|1994-10-19|1994-09-17|1994-11-13|NONE|REG AIR|uriously. slyly unusual 13313|139680|2194|2|19|32673.92|0.02|0.01|A|F|1994-10-04|1994-09-02|1994-11-01|NONE|MAIL|ily ironic ideas above the blithely unusua 13313|196897|4455|3|11|21932.79|0.00|0.08|R|F|1994-11-09|1994-09-13|1994-11-21|DELIVER IN PERSON|SHIP| carefully ironic deposits 13313|41034|6043|4|7|6825.21|0.05|0.01|A|F|1994-10-15|1994-08-17|1994-10-22|TAKE BACK RETURN|RAIL|unts. regular theodolit 13313|53701|6207|5|30|49641.00|0.00|0.02|R|F|1994-11-04|1994-09-03|1994-11-13|TAKE BACK RETURN|AIR|ular pinto beans 13313|138304|3331|6|47|63088.10|0.08|0.06|A|F|1994-10-06|1994-09-26|1994-10-09|DELIVER IN PERSON|RAIL|ously special theodolites nag quickly e 13313|73935|6443|7|37|70630.41|0.08|0.05|R|F|1994-11-05|1994-08-28|1994-11-27|DELIVER IN PERSON|MAIL|ent foxes thr 13314|8884|3885|1|41|73508.08|0.04|0.06|A|F|1994-06-05|1994-07-18|1994-06-07|TAKE BACK RETURN|RAIL|lithely regular accounts. even 13314|111786|6809|2|36|64720.08|0.05|0.08|A|F|1994-06-25|1994-08-02|1994-07-13|COLLECT COD|MAIL|after the fluffily express account 13314|183707|3708|3|12|21488.40|0.02|0.05|A|F|1994-06-24|1994-06-24|1994-07-18|TAKE BACK RETURN|AIR|tions sleep. unusual 13314|161050|8599|4|32|35553.60|0.03|0.07|A|F|1994-09-05|1994-07-23|1994-09-12|COLLECT COD|SHIP|al packages. blithely final packages 13314|162841|7874|5|1|1903.84|0.03|0.01|A|F|1994-07-25|1994-06-18|1994-08-20|NONE|RAIL| dolphins about 13314|66039|1052|6|46|46231.38|0.00|0.04|R|F|1994-08-20|1994-07-05|1994-09-12|TAKE BACK RETURN|RAIL|. dogged ideas x-ray slyly furiously 13315|69925|7444|1|22|41688.24|0.10|0.05|N|O|1996-12-27|1996-12-27|1997-01-14|DELIVER IN PERSON|SHIP|deposits use furiously across the 13315|23003|8008|2|27|25002.00|0.07|0.03|N|O|1997-01-03|1997-01-11|1997-01-20|TAKE BACK RETURN|REG AIR| the even foxes nag blit 13315|2408|7409|3|8|10483.20|0.00|0.05|N|O|1996-12-27|1997-01-01|1996-12-31|DELIVER IN PERSON|AIR|olites. careful 13315|56475|6476|4|43|61553.21|0.07|0.07|N|O|1996-12-10|1997-01-13|1997-01-02|COLLECT COD|RAIL|gle closely with the blithely bold req 13315|174435|4436|5|29|43773.47|0.03|0.06|N|O|1996-10-24|1996-11-15|1996-11-21|NONE|FOB|arefully final depo 13315|122968|5481|6|10|19909.60|0.07|0.03|N|O|1996-12-12|1996-12-20|1996-12-29|DELIVER IN PERSON|RAIL|equests. regular accoun 13316|107528|7529|1|1|1535.52|0.10|0.02|R|F|1993-01-26|1993-01-09|1993-02-22|NONE|AIR|gular foxes are fluffily around t 13316|11845|9349|2|43|75544.12|0.10|0.05|A|F|1993-01-25|1992-12-08|1993-02-08|DELIVER IN PERSON|TRUCK|ts detect furiously according to the exp 13317|120146|5171|1|22|25655.08|0.06|0.02|N|O|1996-04-25|1996-06-26|1996-05-16|COLLECT COD|MAIL|unts cajole bli 13317|77323|7324|2|13|16904.16|0.07|0.01|N|O|1996-07-08|1996-07-06|1996-07-17|TAKE BACK RETURN|SHIP|o the express, 13318|119497|2009|1|44|66725.56|0.06|0.02|N|O|1997-09-14|1997-08-03|1997-10-01|TAKE BACK RETURN|MAIL|ly among the 13318|169978|7527|2|50|102398.50|0.05|0.01|N|O|1997-06-16|1997-07-14|1997-07-03|TAKE BACK RETURN|RAIL| to the even, pending deposits 13318|8438|939|3|11|14810.73|0.02|0.05|N|O|1997-08-26|1997-08-19|1997-09-08|TAKE BACK RETURN|AIR| sometimes regular ex 13318|16915|1918|4|5|9159.55|0.03|0.07|N|O|1997-06-27|1997-07-21|1997-07-20|NONE|RAIL|ounts promise according to the blithe 13318|89041|4058|5|39|40171.56|0.01|0.02|N|O|1997-08-13|1997-07-16|1997-08-20|NONE|RAIL|epitaphs use sly 13318|111245|6268|6|16|20099.84|0.06|0.07|N|O|1997-07-15|1997-08-11|1997-07-19|DELIVER IN PERSON|TRUCK|y carefully express packages. r 13318|91096|8624|7|11|11957.99|0.00|0.06|N|O|1997-10-03|1997-08-11|1997-11-02|NONE|FOB|ess instruction 13319|37395|7396|1|8|10659.12|0.03|0.01|N|O|1996-04-25|1996-04-10|1996-05-16|COLLECT COD|SHIP|ld deposits nag. ironic i 13319|95479|7989|2|46|67825.62|0.06|0.00|N|O|1996-04-23|1996-04-17|1996-05-16|DELIVER IN PERSON|MAIL|lly excuses 13319|103702|1233|3|50|85285.00|0.04|0.07|N|O|1996-01-22|1996-04-12|1996-02-10|DELIVER IN PERSON|RAIL|losely final requests are sly 13344|62405|9924|1|17|23245.80|0.09|0.02|N|O|1997-04-07|1997-05-26|1997-04-19|NONE|REG AIR|the slyly regu 13344|32413|9923|2|5|6727.05|0.00|0.06|N|O|1997-04-05|1997-05-19|1997-04-17|DELIVER IN PERSON|TRUCK|ckages promise slyly fi 13344|154057|6573|3|30|33331.50|0.08|0.02|N|O|1997-07-17|1997-05-20|1997-08-15|DELIVER IN PERSON|AIR|sts. fluffily bold plate 13344|29892|4897|4|17|30972.13|0.04|0.01|N|O|1997-07-12|1997-05-21|1997-07-30|TAKE BACK RETURN|MAIL|counts wak 13345|54326|9337|1|16|20485.12|0.02|0.00|A|F|1992-10-25|1992-10-05|1992-10-26|NONE|AIR| final requests ca 13345|53468|8479|2|45|63965.70|0.04|0.06|R|F|1992-11-13|1992-11-29|1992-11-16|TAKE BACK RETURN|TRUCK|y final requests hag 13345|32113|9623|3|38|39714.18|0.04|0.08|R|F|1992-12-08|1992-10-11|1992-12-20|NONE|AIR|elets. carefully special requests sleep flu 13346|29815|4820|1|15|26172.15|0.05|0.04|A|F|1993-01-23|1993-01-17|1993-02-04|DELIVER IN PERSON|SHIP|ess accounts sleep s 13346|93803|8822|2|42|75465.60|0.00|0.01|R|F|1993-02-22|1993-01-29|1993-03-11|COLLECT COD|REG AIR|rbits about the e 13346|141116|1117|3|32|37027.52|0.05|0.08|A|F|1992-12-15|1993-01-28|1993-01-07|COLLECT COD|FOB|thely pending foxes haggle blithely. s 13346|131485|6512|4|26|39428.48|0.02|0.07|A|F|1993-01-05|1993-01-08|1993-02-02|COLLECT COD|RAIL|structions ab 13346|63527|3528|5|32|47696.64|0.08|0.06|R|F|1992-12-05|1993-01-17|1992-12-27|TAKE BACK RETURN|RAIL|ke regularly according to th 13346|81714|4223|6|8|13565.68|0.08|0.01|R|F|1993-01-13|1993-01-25|1993-01-19|NONE|RAIL|, special foxes nag carefu 13346|166844|4393|7|8|15286.72|0.04|0.00|A|F|1993-01-10|1992-12-20|1993-01-21|TAKE BACK RETURN|RAIL|ts; pending, p 13347|134053|1593|1|26|28263.30|0.07|0.05|A|F|1993-07-01|1993-07-13|1993-07-11|DELIVER IN PERSON|REG AIR|ronic deposits 13347|12809|7812|2|44|75759.20|0.09|0.06|R|F|1993-05-13|1993-06-07|1993-05-26|DELIVER IN PERSON|FOB| furiously 13347|78763|3778|3|30|52252.80|0.02|0.05|A|F|1993-06-13|1993-05-28|1993-07-07|DELIVER IN PERSON|RAIL|regular depo 13347|155171|7687|4|39|47820.63|0.10|0.07|A|F|1993-06-20|1993-06-06|1993-06-28|COLLECT COD|RAIL|ang slyly after the regular foxes. stealt 13347|154579|4580|5|34|55541.38|0.05|0.03|A|F|1993-08-10|1993-07-09|1993-08-15|DELIVER IN PERSON|RAIL| requests nag. sentiments sleep fluffily. 13347|197839|359|6|11|21305.13|0.10|0.07|R|F|1993-05-23|1993-06-24|1993-06-13|COLLECT COD|FOB|lar excuses. unusual requests use sly 13348|6866|4367|1|11|19501.46|0.10|0.06|R|F|1993-11-22|1994-01-24|1993-12-10|TAKE BACK RETURN|RAIL|lithely about the blithely even d 13348|182702|2703|2|19|33909.30|0.06|0.01|R|F|1993-11-26|1993-12-23|1993-12-03|NONE|REG AIR|l accounts. slyly regular ideas are b 13348|65644|657|3|1|1609.64|0.08|0.01|R|F|1993-12-02|1994-01-20|1993-12-12|NONE|TRUCK|instructions c 13348|69169|1676|4|15|17072.40|0.03|0.03|A|F|1993-12-19|1993-12-26|1994-01-06|COLLECT COD|RAIL|ffily. furiously bold instructions 13348|110833|8367|5|28|51627.24|0.08|0.06|R|F|1994-02-02|1994-01-15|1994-02-26|NONE|AIR|gle daring plate 13349|198325|5883|1|20|28466.40|0.06|0.08|A|F|1994-09-28|1994-11-02|1994-10-14|DELIVER IN PERSON|MAIL|usly. quickly pending d 13349|44634|7139|2|40|63145.20|0.04|0.02|A|F|1994-11-03|1994-11-30|1994-12-01|NONE|REG AIR|uests. even accounts p 13350|174351|9386|1|49|69842.15|0.05|0.08|N|O|1998-09-04|1998-07-09|1998-09-24|NONE|TRUCK| the fluffily final packages haggle bl 13350|109521|2032|2|31|47446.12|0.08|0.07|N|O|1998-06-29|1998-07-29|1998-07-24|NONE|FOB|al pinto beans haggle along the sile 13350|108285|3306|3|2|2586.56|0.09|0.02|N|O|1998-07-10|1998-06-08|1998-07-31|COLLECT COD|AIR|according 13350|132976|516|4|39|78349.83|0.08|0.07|N|O|1998-08-13|1998-08-01|1998-08-29|COLLECT COD|AIR| wake. ironic waters nag acr 13350|122830|5343|5|35|64849.05|0.08|0.01|N|O|1998-05-25|1998-06-29|1998-06-02|COLLECT COD|MAIL|ans instead of the slyly pending d 13350|193719|3720|6|24|43505.04|0.00|0.08|N|O|1998-08-19|1998-07-12|1998-09-12|TAKE BACK RETURN|MAIL|e requests. pending excus 13351|18549|6053|1|42|61636.68|0.02|0.07|R|F|1993-08-19|1993-07-19|1993-08-26|NONE|FOB|uickly even deposits doubt slyly 13351|146543|1572|2|31|49275.74|0.06|0.08|R|F|1993-05-16|1993-06-06|1993-06-12|DELIVER IN PERSON|AIR|. blithely p 13351|142597|5112|3|4|6558.36|0.10|0.01|A|F|1993-08-08|1993-07-14|1993-09-07|TAKE BACK RETURN|AIR|evenly ironic i 13351|22323|4826|4|29|36114.28|0.09|0.05|A|F|1993-08-30|1993-07-07|1993-09-02|DELIVER IN PERSON|REG AIR|fluffily after the quickly ironic reque 13351|172027|7062|5|42|46158.84|0.05|0.03|A|F|1993-07-02|1993-07-14|1993-07-27|DELIVER IN PERSON|RAIL|ly unusual packages are about the quick 13351|43044|5549|6|7|6909.28|0.02|0.08|R|F|1993-09-03|1993-06-16|1993-09-21|TAKE BACK RETURN|SHIP| accounts. furiously even dependencies ha 13376|39894|9895|1|43|78857.27|0.09|0.06|A|F|1992-10-20|1992-12-22|1992-10-22|NONE|SHIP|t the ironic deposits. blithe 13376|92575|5085|2|17|26648.69|0.02|0.08|A|F|1992-11-02|1992-11-19|1992-11-04|DELIVER IN PERSON|RAIL|egular theodoli 13376|91732|4242|3|27|46540.71|0.10|0.01|A|F|1992-12-28|1992-12-07|1993-01-23|NONE|MAIL|ns are carefully caref 13376|138043|5583|4|28|30269.12|0.08|0.07|A|F|1992-12-27|1993-01-03|1993-01-16|NONE|REG AIR|lithely furio 13376|72929|7944|5|25|47548.00|0.05|0.06|A|F|1993-01-06|1993-01-01|1993-01-21|DELIVER IN PERSON|TRUCK|al, unusual packages unw 13376|111863|6886|6|37|69369.82|0.00|0.00|A|F|1992-10-14|1992-11-17|1992-11-06|COLLECT COD|AIR|ckages. final requests wake 13377|71563|1564|1|35|53709.60|0.03|0.07|N|O|1995-11-15|1995-09-06|1995-11-28|COLLECT COD|TRUCK|press dolph 13377|147329|9844|2|1|1376.32|0.02|0.00|N|O|1995-11-17|1995-09-30|1995-12-07|DELIVER IN PERSON|AIR|lthy excuses. 13378|125772|797|1|37|66517.49|0.08|0.07|A|F|1994-09-30|1994-12-10|1994-10-25|TAKE BACK RETURN|AIR|haggle slyly. blithely bold instruct 13378|40204|5213|2|2|2288.40|0.04|0.01|A|F|1994-09-30|1994-12-09|1994-10-05|COLLECT COD|REG AIR|the thin packages. f 13378|98970|6498|3|50|98448.50|0.10|0.08|A|F|1994-10-18|1994-11-21|1994-11-09|TAKE BACK RETURN|FOB|kages sleep final, silent pack 13378|174226|9261|4|21|27304.62|0.03|0.01|R|F|1994-10-11|1994-11-07|1994-11-02|DELIVER IN PERSON|MAIL|blithely final foxes x-ray quick 13378|9127|1628|5|37|38336.44|0.06|0.07|A|F|1994-10-03|1994-11-11|1994-10-17|TAKE BACK RETURN|FOB| fluffy asymptotes sleep furiously 13379|152325|7356|1|40|55092.80|0.04|0.07|R|F|1994-02-01|1993-12-14|1994-03-03|DELIVER IN PERSON|RAIL|y regular packages. s 13379|119146|9147|2|8|9321.12|0.05|0.00|R|F|1993-12-05|1993-12-25|1993-12-26|DELIVER IN PERSON|RAIL|y bold deposits 13379|82304|4813|3|2|2572.60|0.04|0.02|R|F|1993-11-26|1994-01-26|1993-12-14|COLLECT COD|SHIP|l deposits. special realms sleep fluffil 13379|66717|1730|4|24|40409.04|0.04|0.03|A|F|1994-03-08|1994-01-11|1994-03-29|COLLECT COD|SHIP|ong the blithely silent packages. 13380|185482|8001|1|14|21944.72|0.09|0.00|R|F|1994-09-29|1994-09-11|1994-10-05|NONE|RAIL|ding to the requests. carefully ironi 13380|193606|6126|2|5|8498.00|0.02|0.06|A|F|1994-10-06|1994-09-30|1994-10-11|TAKE BACK RETURN|REG AIR|s after the carefu 13380|129993|7530|3|21|42482.79|0.09|0.05|A|F|1994-10-20|1994-08-24|1994-11-01|DELIVER IN PERSON|FOB| regular theodolites 13380|184303|6822|4|43|59653.90|0.08|0.07|R|F|1994-07-17|1994-08-29|1994-07-21|TAKE BACK RETURN|REG AIR|lets. even at 13381|79903|7425|1|32|60252.80|0.01|0.04|N|O|1995-10-14|1995-09-28|1995-11-13|TAKE BACK RETURN|MAIL|ly final requests dete 13381|120007|7544|2|29|29783.00|0.07|0.05|N|O|1995-08-28|1995-09-17|1995-09-16|COLLECT COD|MAIL|ilent orbits along the fluffily unusual 13381|150062|63|3|12|13344.72|0.10|0.03|N|O|1995-12-12|1995-10-14|1995-12-14|COLLECT COD|MAIL|y. carefully special requests ca 13382|54279|6785|1|27|33298.29|0.02|0.05|N|O|1996-02-16|1996-01-14|1996-02-24|TAKE BACK RETURN|RAIL|er blithely bl 13382|66557|9064|2|15|22853.25|0.03|0.02|N|O|1996-02-22|1995-12-02|1996-03-04|COLLECT COD|FOB|tions. even, pending foxes integrat 13382|7359|7360|3|1|1266.35|0.09|0.08|N|O|1996-01-05|1996-01-16|1996-01-18|DELIVER IN PERSON|AIR|s detect closely a 13383|72424|9946|1|36|50271.12|0.09|0.07|R|F|1992-10-03|1992-07-21|1992-10-15|NONE|TRUCK|hely idle foxes 13383|26208|1213|2|30|34026.00|0.00|0.06|R|F|1992-07-01|1992-08-12|1992-07-04|DELIVER IN PERSON|FOB|y. regular, even excuses aff 13383|68836|8837|3|44|79412.52|0.07|0.01|A|F|1992-08-16|1992-08-07|1992-08-18|DELIVER IN PERSON|MAIL|osits after the fluffily final packages 13383|112631|165|4|6|9861.78|0.03|0.03|A|F|1992-06-18|1992-07-31|1992-07-12|COLLECT COD|FOB|y slowly carefully i 13408|39713|9714|1|34|56192.14|0.05|0.04|R|F|1994-06-04|1994-04-24|1994-07-03|DELIVER IN PERSON|SHIP|. carefully pending accounts according 13408|48560|3569|2|32|48273.92|0.07|0.07|A|F|1994-04-27|1994-04-12|1994-05-18|TAKE BACK RETURN|MAIL|symptotes haggle around the ironic 13408|137184|9698|3|20|24423.60|0.06|0.01|R|F|1994-06-02|1994-05-10|1994-06-14|NONE|FOB|blithely un 13408|34970|4971|4|40|76198.80|0.01|0.07|A|F|1994-06-11|1994-05-13|1994-07-08|COLLECT COD|MAIL|l requests hang silently. sile 13408|131208|6235|5|40|49568.00|0.05|0.02|R|F|1994-05-04|1994-04-19|1994-05-22|COLLECT COD|REG AIR|ly special 13409|178581|3616|1|28|46468.24|0.06|0.05|R|F|1993-08-30|1993-09-27|1993-09-10|NONE|AIR|regular accounts-- final decoys pri 13410|131558|4072|1|4|6358.20|0.08|0.02|A|F|1993-09-10|1993-07-20|1993-09-17|COLLECT COD|TRUCK|st carefully across the unusual a 13410|90100|7628|2|12|13081.20|0.08|0.05|A|F|1993-09-18|1993-08-10|1993-09-25|COLLECT COD|RAIL|carefully ironic requests haggl 13410|104793|2324|3|1|1797.79|0.09|0.06|A|F|1993-08-24|1993-07-21|1993-09-15|DELIVER IN PERSON|REG AIR|deposits. ruthless, u 13410|50875|5886|4|32|58427.84|0.08|0.08|A|F|1993-09-13|1993-08-19|1993-09-22|COLLECT COD|MAIL|s nag furiously quickly final in 13411|137967|7968|1|21|42104.16|0.03|0.03|R|F|1994-07-22|1994-05-18|1994-07-26|COLLECT COD|AIR| packages hag 13411|5374|375|2|4|5117.48|0.06|0.01|R|F|1994-07-25|1994-06-16|1994-08-23|TAKE BACK RETURN|MAIL|ipliers. quietly ironic instructions 13411|104310|9331|3|22|28914.82|0.04|0.00|A|F|1994-06-29|1994-05-27|1994-07-07|COLLECT COD|TRUCK| special platelets. accounts ac 13411|132974|5488|4|26|52181.22|0.04|0.05|R|F|1994-07-17|1994-05-19|1994-08-13|NONE|MAIL|arefully special ideas. b 13411|79657|7179|5|29|47462.85|0.09|0.06|R|F|1994-08-05|1994-06-14|1994-08-12|TAKE BACK RETURN|TRUCK| ironic pl 13412|120961|3474|1|49|97116.04|0.09|0.03|N|O|1997-09-04|1997-09-12|1997-09-16|DELIVER IN PERSON|FOB|ges about the car 13412|189355|9356|2|39|56329.65|0.09|0.01|N|O|1997-09-26|1997-09-30|1997-10-24|TAKE BACK RETURN|SHIP|pending pinto beans cajole expr 13413|89034|9035|1|47|48082.41|0.05|0.01|A|F|1993-03-12|1993-03-28|1993-03-22|NONE|REG AIR|ccounts sleep carefully 13413|184100|6619|2|3|3552.30|0.06|0.02|R|F|1993-03-28|1993-03-29|1993-04-11|TAKE BACK RETURN|FOB|egular foxes belie 13413|154497|2043|3|44|68265.56|0.10|0.01|A|F|1993-02-21|1993-03-04|1993-03-11|DELIVER IN PERSON|FOB|g. slyly r 13413|24452|4453|4|46|63316.70|0.08|0.03|R|F|1993-05-08|1993-04-25|1993-05-09|TAKE BACK RETURN|REG AIR|lent theodolites haggle ca 13413|157166|2197|5|26|31802.16|0.06|0.06|R|F|1993-04-01|1993-03-13|1993-04-19|TAKE BACK RETURN|SHIP| boldly express dinos 13414|5288|289|1|10|11932.80|0.01|0.07|N|O|1995-07-28|1995-08-24|1995-08-10|TAKE BACK RETURN|FOB|otes use qu 13414|73234|8249|2|1|1207.23|0.04|0.02|N|O|1995-10-27|1995-08-21|1995-11-10|DELIVER IN PERSON|TRUCK|ets detect alo 13414|27491|4998|3|30|42554.70|0.07|0.07|N|O|1995-07-15|1995-08-25|1995-08-10|NONE|MAIL|cuses. blithely iro 13414|28556|8557|4|35|51959.25|0.02|0.01|N|O|1995-08-24|1995-08-07|1995-08-28|NONE|AIR|usual packages. even 13414|134362|6876|5|45|62836.20|0.05|0.04|N|O|1995-08-02|1995-08-08|1995-08-12|NONE|REG AIR|ions cajole blithely. car 13415|130737|8277|1|19|33586.87|0.02|0.01|N|O|1997-06-18|1997-08-05|1997-07-06|NONE|FOB| the packages. regular account 13415|90295|2805|2|16|20564.64|0.03|0.01|N|O|1997-07-30|1997-08-25|1997-08-14|DELIVER IN PERSON|REG AIR|osits sleep 13415|183846|8883|3|2|3859.68|0.01|0.00|N|O|1997-07-05|1997-08-07|1997-07-16|DELIVER IN PERSON|MAIL| final accounts 13415|95662|5663|4|31|51387.46|0.07|0.05|N|O|1997-09-26|1997-08-01|1997-10-06|DELIVER IN PERSON|MAIL|ccounts should have to w 13415|190275|5314|5|32|43688.64|0.04|0.04|N|O|1997-09-06|1997-08-28|1997-09-10|DELIVER IN PERSON|AIR|s. express ac 13415|138134|5674|6|44|51573.72|0.04|0.03|N|O|1997-06-27|1997-07-31|1997-07-16|TAKE BACK RETURN|MAIL|tructions. fluffily bold asy 13415|16408|3912|7|45|59598.00|0.01|0.00|N|O|1997-07-06|1997-08-09|1997-07-07|TAKE BACK RETURN|FOB|riously regular packages nag around the sl 13440|163059|5576|1|17|19074.85|0.08|0.02|R|F|1994-01-25|1993-11-15|1994-02-14|TAKE BACK RETURN|RAIL|e carefully. 13441|26849|1854|1|2|3551.68|0.06|0.07|N|O|1995-10-22|1995-10-03|1995-11-12|NONE|TRUCK|tions. requests at the car 13441|39363|6873|2|46|59908.56|0.05|0.02|N|O|1995-11-14|1995-11-29|1995-11-27|DELIVER IN PERSON|RAIL|cies. quickly regular asymptotes 13442|115498|8010|1|45|68107.05|0.10|0.04|N|O|1997-11-13|1997-09-25|1997-11-19|COLLECT COD|REG AIR| instruction 13442|170248|7800|2|3|3954.72|0.01|0.04|N|O|1997-11-24|1997-09-25|1997-12-18|NONE|REG AIR|ickly regular theodolites. fu 13442|151658|9204|3|46|78643.90|0.02|0.07|N|O|1997-11-20|1997-10-11|1997-11-24|NONE|FOB|ke carefully aft 13442|113393|5905|4|22|30940.58|0.09|0.01|N|O|1997-10-17|1997-09-28|1997-10-29|COLLECT COD|FOB|ironic dinos. stealthily even ideas ha 13443|195394|7914|1|14|20851.46|0.01|0.00|A|F|1994-01-23|1993-11-14|1994-01-27|COLLECT COD|AIR|ide of the final packages. slow 13443|170463|5498|2|36|55204.56|0.03|0.07|A|F|1993-11-19|1993-12-27|1993-11-28|TAKE BACK RETURN|MAIL|ublate along the furiously pending 13443|158027|3058|3|23|24955.46|0.07|0.06|R|F|1994-01-29|1993-11-15|1994-01-31|COLLECT COD|RAIL| across the eve 13443|157864|2895|4|21|40359.06|0.03|0.05|A|F|1994-01-24|1993-12-13|1994-02-18|NONE|SHIP|l ideas are sl 13443|4317|1818|5|20|24426.20|0.03|0.00|R|F|1993-10-29|1993-11-26|1993-11-19|DELIVER IN PERSON|SHIP|uests according to the slyly fi 13443|143237|780|6|13|16642.99|0.02|0.07|A|F|1993-10-22|1993-12-29|1993-10-25|NONE|REG AIR|unts. requests sleep f 13443|176092|6093|7|17|19857.53|0.04|0.06|R|F|1993-11-13|1993-12-18|1993-11-14|COLLECT COD|TRUCK|hlessly unusual asymptotes. quick 13444|175368|5369|1|22|31753.92|0.05|0.01|A|F|1992-04-09|1992-03-02|1992-04-18|COLLECT COD|TRUCK|ep slyly above the special 13444|197889|7890|2|48|95370.24|0.05|0.04|A|F|1992-04-29|1992-02-08|1992-05-20|COLLECT COD|AIR|ickly even accounts. 13444|118739|6273|3|12|21092.76|0.06|0.08|R|F|1992-03-21|1992-02-13|1992-04-03|DELIVER IN PERSON|MAIL|hely at the e 13444|39643|7153|4|5|7913.20|0.04|0.07|A|F|1992-01-15|1992-04-01|1992-01-17|NONE|REG AIR|quests. fluffily final packages 13444|73626|8641|5|38|60785.56|0.04|0.01|A|F|1992-01-24|1992-03-13|1992-02-18|DELIVER IN PERSON|REG AIR|foxes about the carefully pen 13444|26739|4246|6|36|59966.28|0.05|0.05|R|F|1992-04-25|1992-02-19|1992-05-13|COLLECT COD|TRUCK|ly, special packages. regular reque 13444|185975|5976|7|35|72133.95|0.09|0.03|A|F|1992-01-20|1992-04-04|1992-02-19|DELIVER IN PERSON|TRUCK|y. final asymptotes believe finally. 13445|80153|154|1|14|15864.10|0.09|0.00|N|O|1998-01-15|1998-03-10|1998-02-11|COLLECT COD|REG AIR|lly above the blithely 13446|196044|1083|1|13|14820.52|0.02|0.00|N|O|1996-06-28|1996-04-17|1996-07-21|DELIVER IN PERSON|AIR|ular deposits. blithely regular acco 13446|97688|198|2|17|28656.56|0.02|0.02|N|O|1996-06-03|1996-05-13|1996-06-29|COLLECT COD|FOB|ly along the fluf 13447|160203|7752|1|45|56844.00|0.01|0.08|A|F|1993-02-06|1993-02-09|1993-03-05|NONE|FOB|ckly about the furiously ironi 13472|75219|7727|1|30|35826.30|0.08|0.04|N|O|1995-11-17|1995-12-17|1995-12-02|TAKE BACK RETURN|RAIL|pinto bean 13472|195930|8450|2|3|6077.79|0.06|0.02|N|O|1996-02-19|1996-01-09|1996-03-15|DELIVER IN PERSON|AIR|slyly after the carefully 13472|184551|2106|3|9|14719.95|0.01|0.06|N|O|1995-11-22|1995-12-08|1995-12-11|DELIVER IN PERSON|REG AIR|riously pending packages 13473|101995|1996|1|2|3993.98|0.06|0.05|N|O|1996-03-27|1996-05-09|1996-04-12|DELIVER IN PERSON|TRUCK|according to the carefully even idea 13473|16677|6678|2|13|20717.71|0.08|0.05|N|O|1996-06-26|1996-06-08|1996-06-30|DELIVER IN PERSON|MAIL|gular sheave 13473|73779|8794|3|27|47324.79|0.05|0.06|N|O|1996-07-11|1996-05-17|1996-07-18|DELIVER IN PERSON|SHIP|luffy tithes hagg 13473|196111|8631|4|18|21727.98|0.08|0.05|N|O|1996-05-27|1996-05-23|1996-05-28|COLLECT COD|AIR|into beans. b 13473|20902|903|5|19|34635.10|0.06|0.00|N|O|1996-04-26|1996-05-15|1996-05-22|COLLECT COD|AIR| promise abo 13473|89493|9494|6|46|68194.54|0.06|0.04|N|O|1996-06-03|1996-05-08|1996-06-16|NONE|AIR|wake fluffily 13474|69662|9663|1|20|32633.20|0.04|0.04|N|O|1997-06-13|1997-07-09|1997-06-18|DELIVER IN PERSON|TRUCK|heodolites detect 13474|5774|5775|2|37|62151.49|0.05|0.04|N|O|1997-06-27|1997-08-13|1997-07-22|NONE|SHIP|al waters. blithely final pinto beans nag c 13474|27551|7552|3|30|44356.50|0.09|0.07|N|O|1997-07-11|1997-07-10|1997-07-24|NONE|TRUCK|nal, ironic instructions are bravely acc 13475|166185|6186|1|45|56303.10|0.04|0.04|A|F|1994-07-19|1994-07-29|1994-07-28|NONE|FOB|y ironic accounts integrate qu 13475|57505|2516|2|45|65812.50|0.07|0.05|A|F|1994-08-09|1994-07-13|1994-08-20|DELIVER IN PERSON|FOB|ly final warthogs along the car 13475|75395|2917|3|36|49334.04|0.02|0.03|R|F|1994-09-16|1994-08-07|1994-09-18|NONE|SHIP|ong the carefully even theodol 13475|146977|2006|4|22|44527.34|0.07|0.04|A|F|1994-06-25|1994-08-28|1994-07-02|TAKE BACK RETURN|RAIL|lar accounts. eve 13475|28022|8023|5|12|11400.24|0.09|0.01|A|F|1994-07-05|1994-08-01|1994-07-11|DELIVER IN PERSON|REG AIR|y. bold foxes sleep. slyly permanent pla 13475|155318|349|6|24|32959.44|0.03|0.03|A|F|1994-06-19|1994-08-02|1994-07-01|DELIVER IN PERSON|TRUCK| the thinly reg 13475|14441|6943|7|22|29819.68|0.03|0.04|A|F|1994-10-04|1994-08-26|1994-10-31|NONE|SHIP|ular theodolites. bli 13476|103419|3420|1|10|14224.10|0.05|0.05|A|F|1994-01-25|1993-12-07|1994-01-26|DELIVER IN PERSON|MAIL|ly even ideas. 13476|148734|1249|2|30|53481.90|0.09|0.06|R|F|1994-01-30|1993-12-04|1994-02-04|DELIVER IN PERSON|TRUCK| regular requests. caref 13476|161390|3907|3|38|55152.82|0.09|0.00|R|F|1994-02-08|1993-12-27|1994-02-09|NONE|AIR|blithely special ideas haggle fluffi 13476|69597|7116|4|29|45431.11|0.06|0.04|R|F|1993-12-21|1993-12-18|1994-01-05|NONE|MAIL|osits. slyly ironic asymptotes w 13476|101327|6348|5|40|53132.80|0.05|0.06|A|F|1993-12-20|1993-11-25|1994-01-07|DELIVER IN PERSON|RAIL|nto beans are carefully atop the 13476|199041|6599|6|41|46741.64|0.02|0.06|A|F|1994-01-01|1993-12-16|1994-01-11|DELIVER IN PERSON|SHIP|ecial accou 13477|16060|3564|1|14|13664.84|0.01|0.01|N|O|1998-03-22|1998-02-13|1998-04-06|NONE|MAIL|ic foxes wake slyly. final accounts haggl 13477|116094|8606|2|2|2220.18|0.03|0.04|N|O|1997-12-27|1998-01-14|1998-01-17|NONE|AIR|s. quickly special accounts are f 13477|188689|3726|3|23|40886.64|0.03|0.05|N|O|1998-02-05|1998-02-14|1998-02-16|DELIVER IN PERSON|REG AIR| are blithely. regular requests sleep abo 13477|95807|826|4|19|34253.20|0.07|0.06|N|O|1998-02-23|1998-01-27|1998-03-16|NONE|RAIL|requests boost slyly flu 13477|19071|1573|5|46|45543.22|0.00|0.00|N|O|1997-12-20|1998-01-03|1998-01-02|NONE|SHIP|gle slyly silent warhorses. 13478|151749|6780|1|28|50420.72|0.06|0.06|N|O|1998-01-16|1998-03-08|1998-02-09|COLLECT COD|MAIL|e carefully. slyly express 13478|44074|9083|2|50|50903.50|0.09|0.00|N|O|1998-03-20|1998-02-10|1998-04-01|NONE|AIR|n requests across th 13478|148149|8150|3|18|21548.52|0.10|0.08|N|O|1998-04-23|1998-03-06|1998-05-10|COLLECT COD|MAIL|es. slyly final requests ag 13478|135582|3122|4|22|35586.76|0.06|0.04|N|O|1998-04-21|1998-03-23|1998-04-25|NONE|TRUCK|lphins use. fi 13478|117643|5177|5|15|24909.60|0.08|0.03|N|O|1998-02-08|1998-03-26|1998-02-27|COLLECT COD|SHIP|uests. ironically express dolphins 13479|144067|1610|1|28|31109.68|0.06|0.08|R|F|1995-02-16|1995-01-19|1995-03-07|TAKE BACK RETURN|SHIP|al ideas. unu 13479|119189|4212|2|18|21747.24|0.06|0.03|A|F|1995-04-04|1995-02-10|1995-04-18|COLLECT COD|FOB|lly ironic pack 13479|195370|2928|3|22|32238.14|0.07|0.06|A|F|1995-02-20|1995-01-24|1995-03-17|COLLECT COD|MAIL|fully slyly final courts. carefull 13479|179825|9826|4|34|64763.88|0.08|0.06|A|F|1995-03-11|1995-02-02|1995-03-21|NONE|RAIL|he furiously express packa 13479|112408|4920|5|16|22726.40|0.08|0.08|R|F|1995-02-06|1995-01-06|1995-02-28|TAKE BACK RETURN|SHIP|regular requests. silent, final accounts a 13504|132116|7143|1|26|29850.86|0.02|0.04|N|O|1997-10-02|1997-08-17|1997-10-06|NONE|RAIL|nent deposits. slyly final dependencies ca 13504|66225|6226|2|5|5956.10|0.00|0.06|N|O|1997-08-02|1997-09-07|1997-08-27|TAKE BACK RETURN|AIR|inal, regular de 13504|37439|4949|3|41|56433.63|0.07|0.00|N|O|1997-06-19|1997-07-19|1997-06-25|NONE|RAIL|ect about the qui 13504|23276|8281|4|31|37177.37|0.02|0.08|N|O|1997-06-24|1997-08-28|1997-06-26|DELIVER IN PERSON|AIR|alongside of t 13505|197359|2398|1|6|8738.10|0.02|0.02|N|O|1997-06-09|1997-08-06|1997-06-19|TAKE BACK RETURN|FOB|nding, regular ideas ab 13505|12655|7658|2|22|34488.30|0.03|0.08|N|O|1997-08-14|1997-07-05|1997-08-31|DELIVER IN PERSON|TRUCK|foxes doubt furiously. ideas serve quickly 13505|82401|9926|3|32|44268.80|0.10|0.05|N|O|1997-08-13|1997-07-27|1997-09-09|NONE|AIR|unusual ideas 13505|99029|4048|4|40|41120.80|0.05|0.05|N|O|1997-07-23|1997-07-17|1997-08-08|COLLECT COD|AIR|ven theodolites about the q 13505|164725|7242|5|39|69799.08|0.08|0.02|N|O|1997-09-11|1997-08-03|1997-10-02|COLLECT COD|MAIL|heodolites integrate furiou 13506|104014|6525|1|12|12216.12|0.02|0.00|N|O|1995-10-03|1995-10-03|1995-10-04|TAKE BACK RETURN|RAIL|es. even, ironic packa 13506|147678|193|2|18|31062.06|0.06|0.01|N|O|1995-08-24|1995-08-30|1995-09-16|DELIVER IN PERSON|MAIL|ove the slyly stealthy instructions 13506|18601|6105|3|48|72940.80|0.02|0.01|N|O|1995-09-24|1995-09-21|1995-09-26|NONE|TRUCK| pending theodolites cajole furi 13506|11296|1297|4|25|30182.25|0.06|0.04|N|O|1995-10-18|1995-10-15|1995-11-12|DELIVER IN PERSON|TRUCK|ly final deposits na 13507|67193|4712|1|11|12762.09|0.07|0.07|A|F|1993-02-16|1993-02-04|1993-03-03|TAKE BACK RETURN|TRUCK| silent packages wake quickly a 13507|105998|3529|2|47|94187.53|0.10|0.08|A|F|1993-01-16|1993-02-05|1993-02-09|DELIVER IN PERSON|MAIL|ts. quickly regular depos 13507|103038|569|3|30|31230.90|0.04|0.03|A|F|1993-02-13|1993-01-30|1993-03-09|DELIVER IN PERSON|RAIL|nts hang slyly against the 13507|148644|3673|4|22|37238.08|0.09|0.03|R|F|1993-01-11|1993-02-24|1993-02-03|NONE|FOB|. fluffily ironic gifts wake quickly boldl 13508|169529|4562|1|2|3197.04|0.10|0.03|N|O|1997-07-18|1997-07-15|1997-07-24|TAKE BACK RETURN|TRUCK|ounts. furiou 13509|89079|9080|1|39|41654.73|0.03|0.01|N|O|1997-01-04|1996-12-30|1997-01-16|COLLECT COD|REG AIR|fully quic 13510|105808|8319|1|22|39903.60|0.06|0.02|N|O|1998-01-16|1997-12-03|1998-01-17|TAKE BACK RETURN|FOB|osits along the even 13510|113340|8363|2|29|39246.86|0.08|0.05|N|O|1998-01-15|1997-11-14|1998-01-24|TAKE BACK RETURN|RAIL|theodolites. furiously iron 13510|97091|7092|3|16|17409.44|0.01|0.05|N|O|1998-01-25|1997-11-18|1998-01-30|COLLECT COD|REG AIR|es use blithely after the cl 13511|57644|150|1|42|67268.88|0.00|0.01|N|O|1998-03-24|1998-02-28|1998-04-19|COLLECT COD|RAIL|c accounts nag carefully. slyly fin 13511|9609|4610|2|29|44039.40|0.05|0.04|N|O|1998-02-16|1998-03-24|1998-03-10|NONE|TRUCK| ironic requests are quickly fur 13511|92290|2291|3|4|5129.16|0.00|0.07|N|O|1998-03-21|1998-02-23|1998-03-30|TAKE BACK RETURN|REG AIR|wake according to the pinto beans. fina 13511|115367|5368|4|13|17970.68|0.10|0.02|N|O|1998-02-08|1998-03-01|1998-02-22|DELIVER IN PERSON|FOB| packages. carefull 13511|157164|2195|5|16|19538.56|0.02|0.00|N|O|1998-04-20|1998-03-30|1998-05-01|NONE|RAIL|s requests haggle furiously final asymptote 13536|5520|8021|1|19|27084.88|0.10|0.06|R|F|1994-10-04|1994-09-24|1994-10-24|NONE|MAIL|l tithes. furiously ironic accou 13536|163196|3197|2|25|31479.75|0.01|0.00|A|F|1994-11-23|1994-10-09|1994-12-07|COLLECT COD|AIR|ly even frets nag regular, 13536|22637|2638|3|36|56146.68|0.10|0.07|A|F|1994-11-09|1994-09-19|1994-11-12|NONE|REG AIR|elets. final theodolites alongside of the 13536|57542|48|4|37|55482.98|0.03|0.04|R|F|1994-09-24|1994-09-27|1994-09-28|DELIVER IN PERSON|REG AIR|accounts. slyly 13536|93653|3654|5|20|32933.00|0.06|0.01|A|F|1994-10-25|1994-10-12|1994-11-19|TAKE BACK RETURN|RAIL|y deposits pro 13536|11319|6322|6|8|9842.48|0.02|0.08|R|F|1994-10-21|1994-10-06|1994-10-24|DELIVER IN PERSON|MAIL| slyly carefully express acco 13537|48059|3068|1|20|20141.00|0.08|0.04|R|F|1993-07-29|1993-07-25|1993-08-22|TAKE BACK RETURN|MAIL|grate blithely about the ironic de 13538|35956|3466|1|49|92705.55|0.02|0.06|N|O|1997-05-23|1997-03-08|1997-06-05|DELIVER IN PERSON|SHIP|against the foxes haggle bli 13538|109611|9612|2|1|1620.61|0.07|0.07|N|O|1997-02-12|1997-03-12|1997-02-27|DELIVER IN PERSON|MAIL|ng about the regular, ir 13539|49694|9695|1|15|24655.35|0.10|0.00|N|O|1997-05-08|1997-03-29|1997-05-30|NONE|RAIL|y among the sly 13539|3551|8552|2|47|68363.85|0.09|0.01|N|O|1997-04-13|1997-04-28|1997-04-30|TAKE BACK RETURN|AIR|heodolites integ 13539|74618|7126|3|11|17518.71|0.01|0.04|N|O|1997-03-19|1997-05-12|1997-03-25|TAKE BACK RETURN|MAIL|egular, regular theodoli 13539|100101|5122|4|50|55055.00|0.06|0.04|N|O|1997-02-15|1997-04-27|1997-03-10|TAKE BACK RETURN|FOB|as use quic 13539|194753|9792|5|35|64671.25|0.01|0.01|N|O|1997-03-11|1997-04-01|1997-04-07|COLLECT COD|MAIL|omas sleep daringly. blithely regular exc 13539|5642|8143|6|4|6190.56|0.04|0.04|N|O|1997-02-24|1997-04-18|1997-03-12|COLLECT COD|FOB|ar, pending excuses around the careful 13540|88303|812|1|25|32282.50|0.10|0.02|R|F|1993-06-02|1993-08-02|1993-06-29|TAKE BACK RETURN|MAIL|ily final deposits cajo 13540|95382|5383|2|44|60604.72|0.06|0.08|R|F|1993-07-01|1993-07-19|1993-07-16|TAKE BACK RETURN|MAIL|ckages x-ray 13540|76149|6150|3|6|6750.84|0.01|0.01|R|F|1993-08-09|1993-06-26|1993-09-08|NONE|SHIP|lites nag about the slyly even instruc 13540|24670|2177|4|48|76544.16|0.08|0.02|A|F|1993-08-04|1993-08-04|1993-08-30|TAKE BACK RETURN|TRUCK|lyly. fluffily ironic requests 13540|13364|3365|5|26|33211.36|0.01|0.02|A|F|1993-07-18|1993-07-22|1993-08-04|COLLECT COD|AIR| pending requests wake 13540|192976|2977|6|17|35172.49|0.10|0.04|R|F|1993-09-04|1993-08-12|1993-09-19|TAKE BACK RETURN|AIR|nts. slyly fi 13540|144990|19|7|28|56979.72|0.09|0.05|R|F|1993-09-08|1993-08-13|1993-10-08|DELIVER IN PERSON|MAIL|sly unusual 13541|35518|8022|1|21|30523.71|0.07|0.02|N|O|1996-09-03|1996-09-02|1996-09-06|NONE|RAIL|ccounts-- final 13541|146932|1961|2|1|1978.93|0.04|0.07|N|O|1996-10-02|1996-09-24|1996-10-27|TAKE BACK RETURN|AIR|y even requests. slyly 13541|76813|9321|3|17|30426.77|0.07|0.04|N|O|1996-09-20|1996-10-02|1996-09-25|DELIVER IN PERSON|MAIL|busily against the slyly even foxe 13542|95266|7776|1|42|52972.92|0.04|0.03|R|F|1994-07-01|1994-05-21|1994-07-22|TAKE BACK RETURN|RAIL|ep blithely along the slyly regular a 13542|120336|337|2|46|62391.18|0.00|0.06|R|F|1994-06-02|1994-06-05|1994-06-27|TAKE BACK RETURN|SHIP|hely. regular, silent req 13542|78797|6319|3|46|81686.34|0.10|0.01|A|F|1994-06-21|1994-04-20|1994-07-02|DELIVER IN PERSON|AIR|sts haggle: 13542|141105|8648|4|45|51574.50|0.03|0.02|R|F|1994-04-16|1994-05-25|1994-05-06|DELIVER IN PERSON|SHIP|kages cajole quickly about 13542|141549|1550|5|34|54078.36|0.05|0.00|R|F|1994-03-24|1994-04-26|1994-03-26|TAKE BACK RETURN|RAIL|press tithes sleep after the pending 13543|78522|3537|1|11|16505.72|0.10|0.00|R|F|1992-10-05|1992-09-19|1992-10-25|DELIVER IN PERSON|RAIL|g the pending, ironic deposits. packages 13543|112521|7544|2|7|10734.64|0.02|0.05|A|F|1992-08-28|1992-10-15|1992-09-08|DELIVER IN PERSON|AIR|ously bold multipliers 13568|136086|1113|1|17|19075.36|0.03|0.02|R|F|1993-07-23|1993-07-20|1993-07-26|COLLECT COD|MAIL|riously even depo 13568|194815|9854|2|4|7639.24|0.01|0.07|A|F|1993-05-31|1993-07-25|1993-06-12|DELIVER IN PERSON|MAIL|s haggle furiously ironic, even d 13568|50910|8426|3|38|70714.58|0.06|0.04|A|F|1993-07-18|1993-06-15|1993-07-31|DELIVER IN PERSON|FOB|ver special dolphins sleep blithely ab 13569|510|5511|1|26|36673.26|0.05|0.08|R|F|1995-05-21|1995-05-21|1995-06-17|TAKE BACK RETURN|MAIL|kly unusual, even packages. blithely f 13569|160580|8129|2|38|62342.04|0.04|0.08|R|F|1995-04-29|1995-05-12|1995-05-26|COLLECT COD|SHIP|es are daringly after the unusual, final a 13569|63740|1259|3|15|25556.10|0.08|0.01|A|F|1995-05-11|1995-07-04|1995-05-30|COLLECT COD|MAIL|, regular requests believe alongsi 13569|70923|924|4|9|17045.28|0.02|0.04|N|O|1995-06-21|1995-06-28|1995-06-24|DELIVER IN PERSON|TRUCK|x carefully against the regular pack 13569|1560|1561|5|14|20461.84|0.03|0.06|N|O|1995-07-11|1995-05-30|1995-08-04|TAKE BACK RETURN|RAIL|ly. carefully final h 13569|10596|8100|6|33|49717.47|0.05|0.03|R|F|1995-05-10|1995-06-11|1995-05-22|NONE|SHIP|uests use quickly regular platelets. care 13569|73100|622|7|14|15023.40|0.06|0.01|N|O|1995-07-08|1995-05-31|1995-08-05|COLLECT COD|TRUCK|oxes play along the bold ins 13570|184569|2124|1|19|31417.64|0.05|0.03|A|F|1993-02-02|1993-02-26|1993-02-04|NONE|SHIP|thely ironic foxes. quickly final re 13570|45234|7739|2|17|20046.91|0.03|0.05|A|F|1993-02-28|1993-02-17|1993-03-19|COLLECT COD|FOB| packages doubt fluffily. 13570|31980|1981|3|44|84127.12|0.06|0.08|R|F|1992-12-27|1993-02-19|1993-01-12|TAKE BACK RETURN|SHIP|ainments use furiously spe 13570|79772|2280|4|33|57808.41|0.08|0.08|A|F|1993-01-23|1993-01-30|1993-01-29|TAKE BACK RETURN|REG AIR|s cajole. carefully regular pla 13570|127785|2810|5|50|90639.00|0.03|0.06|R|F|1993-01-25|1993-03-03|1993-02-16|DELIVER IN PERSON|FOB|ly quickly bold pin 13571|3190|8191|1|25|27329.75|0.02|0.01|N|O|1998-06-08|1998-08-01|1998-07-01|DELIVER IN PERSON|SHIP| the bold, regular platelets. sa 13571|199336|9337|2|32|45930.56|0.03|0.02|N|O|1998-09-29|1998-07-15|1998-10-18|TAKE BACK RETURN|AIR|uffily final ideas 13571|39753|4760|3|20|33855.00|0.01|0.04|N|O|1998-07-02|1998-08-21|1998-07-15|NONE|FOB|lar requests. fluffil 13571|118557|1069|4|31|48842.05|0.06|0.02|N|O|1998-10-01|1998-07-10|1998-10-03|NONE|SHIP|usual packages cajole. carefully 13572|135689|5690|1|7|12072.76|0.05|0.03|R|F|1992-05-28|1992-08-11|1992-06-02|NONE|AIR|sly bold deposit 13572|108762|3783|2|1|1770.76|0.08|0.07|A|F|1992-07-15|1992-07-05|1992-08-05|DELIVER IN PERSON|REG AIR|sual accounts. quickly r 13572|155268|5269|3|21|27788.46|0.03|0.07|R|F|1992-07-11|1992-06-28|1992-08-01|TAKE BACK RETURN|RAIL|r asymptotes? slyly ev 13572|175346|7864|4|8|11370.72|0.10|0.07|A|F|1992-06-18|1992-07-14|1992-06-25|TAKE BACK RETURN|TRUCK|yly ironic accounts; blithely regular attai 13572|37308|2315|5|42|52302.60|0.05|0.08|R|F|1992-06-26|1992-08-02|1992-07-05|DELIVER IN PERSON|FOB|beans shall snooze thinly re 13572|30841|3345|6|5|8859.20|0.04|0.07|R|F|1992-07-27|1992-06-25|1992-08-23|COLLECT COD|MAIL|use final requests. carefully express ac 13573|69558|7077|1|9|13747.95|0.01|0.04|N|O|1997-07-30|1997-06-10|1997-08-15|DELIVER IN PERSON|SHIP|osits among the slyly bol 13573|54916|9927|2|30|56127.30|0.06|0.01|N|O|1997-05-05|1997-07-08|1997-06-02|DELIVER IN PERSON|SHIP|. blithely ev 13573|166967|9484|3|7|14237.72|0.09|0.06|N|O|1997-06-24|1997-06-21|1997-06-27|COLLECT COD|TRUCK|y. quickly final dependencies are carefull 13573|12368|4870|4|12|15364.32|0.06|0.01|N|O|1997-06-28|1997-06-04|1997-07-26|NONE|REG AIR|t the fluffily special depen 13574|92775|5285|1|41|72478.57|0.10|0.08|N|O|1997-11-27|1997-12-25|1997-12-27|DELIVER IN PERSON|AIR|usual tithes. slyl 13575|86685|9194|1|29|48478.72|0.01|0.01|R|F|1992-11-22|1992-11-25|1992-12-08|COLLECT COD|AIR|s sleep fluffily fluffily express sent 13575|107358|4889|2|42|57344.70|0.05|0.05|R|F|1992-12-06|1992-10-23|1992-12-27|DELIVER IN PERSON|REG AIR| the deposits. even instr 13600|143251|3252|1|24|31062.00|0.02|0.05|A|F|1993-08-07|1993-08-10|1993-08-30|NONE|TRUCK|fully bold asymptotes. platelets sleep. 13600|7003|9504|2|7|6370.00|0.06|0.05|A|F|1993-08-16|1993-09-01|1993-08-24|DELIVER IN PERSON|AIR|ests. ironic instructions 13600|92577|7596|3|48|75339.36|0.06|0.03|A|F|1993-11-03|1993-09-19|1993-11-12|DELIVER IN PERSON|FOB|arefully even cour 13601|40504|5513|1|24|34668.00|0.02|0.07|R|F|1992-11-05|1992-10-28|1992-11-28|NONE|TRUCK| bold deposits. 13601|146355|6356|2|48|67264.80|0.04|0.07|R|F|1992-12-06|1992-10-16|1992-12-07|TAKE BACK RETURN|REG AIR|ly final accounts. slyly 13601|177170|7171|3|43|53628.31|0.05|0.07|A|F|1992-10-11|1992-10-17|1992-11-09|NONE|MAIL| packages can haggle furiously furio 13601|95759|3287|4|27|47378.25|0.03|0.02|A|F|1992-08-23|1992-09-29|1992-09-15|NONE|AIR| across the special theodolites affi 13601|12470|4972|5|32|44239.04|0.04|0.07|R|F|1992-08-28|1992-11-04|1992-09-25|DELIVER IN PERSON|RAIL|yly regular dependencies! blithely daring 13602|151853|1854|1|34|64764.90|0.03|0.00|A|F|1994-03-03|1994-03-04|1994-03-27|NONE|SHIP|st the quickly regular instructions. bold 13602|187052|4607|2|49|55813.45|0.04|0.07|A|F|1994-03-19|1994-03-24|1994-04-01|COLLECT COD|SHIP|sts thrash fu 13602|176488|6489|3|44|68837.12|0.09|0.02|A|F|1994-04-30|1994-03-03|1994-05-27|NONE|SHIP|yly regular excuses. furiously specia 13602|23346|3347|4|4|5077.36|0.01|0.08|R|F|1994-04-08|1994-03-14|1994-04-26|COLLECT COD|SHIP|luffily special dinos 13602|162193|2194|5|49|61504.31|0.01|0.02|R|F|1994-02-08|1994-03-03|1994-02-28|TAKE BACK RETURN|TRUCK|beans detect f 13603|52956|472|1|26|49632.70|0.10|0.03|N|O|1997-07-29|1997-09-05|1997-08-27|DELIVER IN PERSON|MAIL|press, even packages 13603|29051|4056|2|3|2940.15|0.10|0.00|N|O|1997-08-04|1997-07-26|1997-08-16|DELIVER IN PERSON|FOB| the furiously pend 13603|44869|7374|3|18|32649.48|0.09|0.05|N|O|1997-09-08|1997-08-28|1997-09-15|TAKE BACK RETURN|RAIL|lithely. requests again 13603|108970|1481|4|22|43537.34|0.03|0.08|N|O|1997-06-18|1997-07-10|1997-07-07|DELIVER IN PERSON|FOB|ickly even 13603|49377|1882|5|4|5305.48|0.10|0.08|N|O|1997-06-29|1997-07-29|1997-07-09|COLLECT COD|MAIL|he quickly final foxes are blithely r 13603|68091|5610|6|44|46599.96|0.03|0.04|N|O|1997-08-06|1997-08-05|1997-08-12|DELIVER IN PERSON|MAIL|ecial courts? final accounts haggle a 13603|2460|2461|7|39|53135.94|0.00|0.08|N|O|1997-09-27|1997-07-13|1997-10-07|TAKE BACK RETURN|MAIL| blithely even instruc 13604|141649|6678|1|15|25359.60|0.02|0.00|R|F|1994-05-22|1994-04-28|1994-06-01|TAKE BACK RETURN|FOB|sleep. furiously special requests u 13604|82427|2428|2|20|28188.40|0.01|0.05|A|F|1994-03-31|1994-04-23|1994-04-10|NONE|FOB|st have to wake among the silen 13604|142756|2757|3|17|30578.75|0.05|0.03|R|F|1994-04-26|1994-04-27|1994-05-01|NONE|TRUCK|ding to the ironic pinto bean 13604|83777|1302|4|30|52823.10|0.10|0.02|R|F|1994-06-03|1994-03-25|1994-06-13|TAKE BACK RETURN|FOB|deas. regular dependencies are 13604|27953|7954|5|35|65833.25|0.02|0.08|R|F|1994-03-12|1994-03-07|1994-04-05|TAKE BACK RETURN|RAIL|egular deposits. slyly regular ideas cajole 13604|2362|9863|6|40|50574.40|0.06|0.03|R|F|1994-04-29|1994-03-10|1994-05-16|COLLECT COD|RAIL|ctions. slyly carefu 13604|88413|3430|7|42|58859.22|0.01|0.02|A|F|1994-03-06|1994-03-27|1994-03-29|TAKE BACK RETURN|TRUCK| quickly deposits. regular pinto beans 13605|145290|5291|1|30|40058.70|0.08|0.01|N|O|1996-08-24|1996-09-02|1996-08-29|NONE|AIR|ep carefully above 13605|78979|6501|2|27|52865.19|0.08|0.01|N|O|1996-08-01|1996-09-01|1996-08-09|NONE|MAIL|ual accounts. fur 13605|196829|1868|3|7|13480.74|0.03|0.05|N|O|1996-10-03|1996-09-08|1996-10-09|COLLECT COD|MAIL|pecial requests alongside of the d 13605|102936|467|4|4|7755.72|0.09|0.08|N|O|1996-09-03|1996-09-03|1996-09-21|DELIVER IN PERSON|MAIL|lar packages sublate blithely final request 13605|39009|4016|5|40|37920.00|0.04|0.07|N|O|1996-07-05|1996-09-04|1996-07-15|TAKE BACK RETURN|RAIL|s use furio 13605|21319|8826|6|34|42170.54|0.00|0.05|N|O|1996-07-26|1996-09-01|1996-08-14|COLLECT COD|RAIL|inal packages sleep after 13605|105590|5591|7|12|19147.08|0.02|0.04|N|O|1996-06-16|1996-07-30|1996-07-12|DELIVER IN PERSON|REG AIR|luffily regular platelets integrate-- bli 13606|49648|9649|1|27|43136.28|0.09|0.04|R|F|1994-04-29|1994-07-22|1994-05-01|DELIVER IN PERSON|TRUCK|ic pinto beans sleep furiously after t 13606|17245|2248|2|13|15109.12|0.09|0.06|R|F|1994-06-23|1994-06-25|1994-07-03|COLLECT COD|REG AIR|kages nag furiously after 13606|30889|3393|3|27|49136.76|0.10|0.08|R|F|1994-04-28|1994-06-10|1994-05-22|NONE|REG AIR|the excuses. even br 13606|140107|7650|4|34|39001.40|0.07|0.07|A|F|1994-05-05|1994-05-28|1994-05-17|COLLECT COD|REG AIR|e blithely. special fox 13606|166379|1412|5|17|24571.29|0.02|0.01|R|F|1994-06-15|1994-06-27|1994-07-03|COLLECT COD|SHIP|en accounts 13606|120620|8157|6|45|73827.90|0.01|0.04|A|F|1994-07-31|1994-06-19|1994-08-02|DELIVER IN PERSON|TRUCK| packages nag ab 13606|56630|9136|7|5|7933.15|0.06|0.00|A|F|1994-06-29|1994-07-13|1994-07-22|DELIVER IN PERSON|MAIL|ronic instructions. pen 13607|172165|9717|1|42|51960.72|0.03|0.01|R|F|1994-02-20|1994-01-27|1994-03-19|COLLECT COD|FOB| requests; fluffily u 13607|171916|1917|2|44|87468.04|0.10|0.08|A|F|1994-01-01|1994-02-09|1994-01-24|COLLECT COD|FOB|ins wake brav 13607|181410|8965|3|6|8948.46|0.07|0.02|R|F|1994-02-17|1993-12-19|1994-03-03|COLLECT COD|FOB|e requests. regul 13607|151046|1047|4|7|7679.28|0.01|0.03|R|F|1994-02-16|1993-12-26|1994-02-18|COLLECT COD|RAIL|ect blithely regular dependencie 13607|19841|7345|5|22|38738.48|0.00|0.01|A|F|1993-11-29|1993-12-19|1993-12-20|NONE|RAIL|ans. unusual instructions above 13607|53374|5880|6|14|18583.18|0.07|0.04|R|F|1994-03-07|1993-12-25|1994-04-02|DELIVER IN PERSON|REG AIR|unts against the 13607|53824|1340|7|7|12444.74|0.02|0.03|A|F|1994-03-12|1994-02-09|1994-03-19|DELIVER IN PERSON|RAIL|yly express packages. enticingly eve 13632|17060|2063|1|45|43967.70|0.08|0.07|A|F|1994-04-23|1994-04-29|1994-05-19|TAKE BACK RETURN|SHIP| regular packages are 13632|120613|8150|2|1|1633.61|0.04|0.08|R|F|1994-03-20|1994-05-21|1994-04-13|DELIVER IN PERSON|AIR|ons. instructions are ca 13632|134028|1568|3|49|52038.98|0.10|0.04|R|F|1994-03-15|1994-06-05|1994-03-24|TAKE BACK RETURN|AIR|jole. pendi 13632|6406|6407|4|50|65620.00|0.00|0.00|A|F|1994-06-08|1994-05-24|1994-07-03|COLLECT COD|MAIL|ged, final realms. furiously bold accoun 13632|63423|8436|5|28|38819.76|0.05|0.04|A|F|1994-06-20|1994-05-05|1994-07-07|TAKE BACK RETURN|RAIL|e fluffily accor 13632|135363|2903|6|8|11186.88|0.10|0.08|A|F|1994-04-11|1994-05-28|1994-04-12|TAKE BACK RETURN|RAIL|structions after the even account 13632|45718|727|7|42|69875.82|0.00|0.03|R|F|1994-06-24|1994-05-31|1994-07-03|TAKE BACK RETURN|RAIL|! quickly final braids a 13633|180395|7950|1|13|19180.07|0.02|0.08|A|F|1992-06-30|1992-05-16|1992-07-20|COLLECT COD|REG AIR|ording to the 13633|90052|7580|2|11|11462.55|0.04|0.06|R|F|1992-07-07|1992-06-21|1992-07-15|TAKE BACK RETURN|AIR|onic accounts are slyly car 13633|102262|4773|3|20|25285.20|0.10|0.08|R|F|1992-05-25|1992-06-22|1992-06-02|DELIVER IN PERSON|FOB| carefully regular deposits af 13633|103003|8024|4|23|23138.00|0.02|0.01|A|F|1992-06-28|1992-06-14|1992-07-06|DELIVER IN PERSON|RAIL|olites. fluffily regu 13633|169459|1976|5|5|7642.25|0.09|0.04|R|F|1992-06-24|1992-06-26|1992-06-28|NONE|AIR|ost carefully. blithely bold d 13633|97284|7285|6|44|56376.32|0.05|0.06|R|F|1992-05-09|1992-06-28|1992-05-14|DELIVER IN PERSON|RAIL|s integrate. deposits haggle slyly 13634|25083|2590|1|21|21169.68|0.03|0.07|A|F|1995-05-14|1995-06-01|1995-06-07|NONE|REG AIR|odolites are along the final foxes. 13634|156338|8854|2|13|18126.29|0.07|0.02|N|O|1995-07-03|1995-07-07|1995-08-02|TAKE BACK RETURN|FOB|s. slyly dogged patterns nag furiously 13635|72440|7455|1|22|31073.68|0.02|0.05|A|F|1994-08-19|1994-06-28|1994-09-17|TAKE BACK RETURN|FOB|ourts play c 13635|95997|8507|2|46|91677.54|0.08|0.07|R|F|1994-06-22|1994-06-27|1994-06-28|DELIVER IN PERSON|AIR| nag blithely idly special ac 13636|152314|2315|1|1|1366.31|0.00|0.06|R|F|1994-05-11|1994-03-14|1994-05-31|NONE|TRUCK|ar pinto beans? slyly pendin 13636|137307|2334|2|27|36296.10|0.03|0.02|A|F|1994-04-03|1994-03-20|1994-04-08|COLLECT COD|RAIL|are furious 13636|107026|2047|3|48|49584.96|0.00|0.06|A|F|1994-05-11|1994-04-19|1994-05-18|DELIVER IN PERSON|REG AIR|lent requests. ironic excuses 13636|181540|9095|4|49|79455.46|0.05|0.08|A|F|1994-01-25|1994-03-20|1994-02-10|TAKE BACK RETURN|FOB|dolites. silently pendin 13637|141460|3975|1|47|70568.62|0.00|0.08|N|O|1996-02-27|1996-03-02|1996-03-22|COLLECT COD|FOB|he quickly ironic platelets boost b 13638|24573|4574|1|24|35941.68|0.02|0.07|N|O|1995-11-27|1995-11-07|1995-12-21|DELIVER IN PERSON|TRUCK|fluffily bold accounts affix at th 13638|181686|4205|2|43|76010.24|0.01|0.07|N|O|1996-01-06|1995-12-26|1996-01-17|COLLECT COD|TRUCK|special pai 13638|189830|7385|3|22|42236.26|0.02|0.04|N|O|1995-11-19|1995-11-28|1995-12-14|TAKE BACK RETURN|AIR|ve the final, sly requests wake f 13638|71634|4142|4|42|67436.46|0.00|0.02|N|O|1995-10-30|1995-11-18|1995-11-04|TAKE BACK RETURN|REG AIR|ding to the bold theodolites boost fin 13638|146224|8739|5|28|35566.16|0.02|0.06|N|O|1995-11-16|1995-11-29|1995-11-30|TAKE BACK RETURN|AIR|ymptotes are slyly 13638|182547|102|6|41|66811.14|0.04|0.05|N|O|1995-12-30|1995-12-11|1996-01-07|COLLECT COD|SHIP|s use. deposits are carefully according t 13639|101652|4163|1|34|56224.10|0.05|0.01|N|O|1997-09-25|1997-08-18|1997-10-05|DELIVER IN PERSON|TRUCK|nts. fluffily even reque 13639|52003|9519|2|46|43930.00|0.03|0.03|N|O|1997-09-18|1997-07-20|1997-10-12|DELIVER IN PERSON|FOB|en accounts eat slyly 13639|6244|1245|3|31|35657.44|0.01|0.06|N|O|1997-09-05|1997-08-23|1997-10-03|DELIVER IN PERSON|SHIP| even instructi 13664|188470|8471|1|19|29610.93|0.04|0.07|N|O|1996-03-23|1996-02-15|1996-04-03|NONE|AIR| print among the regular, regular deposits. 13664|13951|8954|2|40|74598.00|0.05|0.02|N|O|1996-04-01|1996-03-04|1996-04-15|DELIVER IN PERSON|AIR| poach ent 13664|66326|8833|3|48|62031.36|0.01|0.08|N|O|1995-12-31|1996-02-16|1996-01-26|NONE|REG AIR|ainst the final cour 13664|136392|1419|4|47|67134.33|0.00|0.02|N|O|1996-02-05|1996-02-22|1996-02-07|DELIVER IN PERSON|RAIL|lar accoun 13664|125249|274|5|12|15290.88|0.06|0.06|N|O|1995-12-18|1996-02-25|1996-01-06|NONE|TRUCK|dolites according to the carefully bold pl 13664|53940|1456|6|45|85227.30|0.06|0.02|N|O|1996-03-22|1996-02-23|1996-04-07|COLLECT COD|REG AIR|posits haggle. cou 13665|165990|8507|1|34|69903.66|0.09|0.08|N|O|1996-06-19|1996-06-27|1996-07-01|DELIVER IN PERSON|RAIL|ding asymptotes sl 13665|188903|8904|2|16|31870.40|0.02|0.06|N|O|1996-07-03|1996-06-20|1996-08-02|NONE|AIR|uffily final requests engage carefully upo 13665|86170|6171|3|20|23123.40|0.04|0.07|N|O|1996-04-10|1996-05-10|1996-04-28|DELIVER IN PERSON|MAIL|, ironic pack 13665|42457|7466|4|30|41983.50|0.01|0.06|N|O|1996-06-07|1996-06-03|1996-06-10|COLLECT COD|RAIL|r instructions boo 13665|170385|5420|5|4|5821.52|0.04|0.03|N|O|1996-08-01|1996-06-28|1996-08-29|COLLECT COD|RAIL|etect carefully. carefully ironic deposit 13665|59392|9393|6|10|13513.90|0.06|0.06|N|O|1996-05-17|1996-06-24|1996-06-03|TAKE BACK RETURN|REG AIR|ackages above the regular asymptote 13666|8721|8722|1|28|45632.16|0.07|0.05|R|F|1992-05-12|1992-05-31|1992-05-14|NONE|RAIL|ven packages haggle. carefully bold 13666|159927|4958|2|21|41725.32|0.00|0.02|R|F|1992-03-31|1992-04-20|1992-04-13|COLLECT COD|FOB| regular requests hang blithely according t 13667|133648|6162|1|5|8408.20|0.06|0.04|N|O|1997-01-18|1997-02-03|1997-02-04|TAKE BACK RETURN|RAIL|ul escapades! caref 13667|20219|7726|2|2|2278.42|0.05|0.08|N|O|1997-03-24|1997-01-03|1997-04-03|NONE|AIR|old theodolites. carefully final pa 13667|11825|6828|3|16|27789.12|0.05|0.03|N|O|1997-01-05|1997-02-04|1997-01-25|DELIVER IN PERSON|RAIL|ly careful deposits. p 13667|135501|528|4|31|47631.50|0.08|0.08|N|O|1997-01-05|1997-02-13|1997-01-22|COLLECT COD|AIR|. even instructions above th 13667|151661|6692|5|35|59943.10|0.03|0.08|N|O|1997-02-13|1997-01-20|1997-03-03|COLLECT COD|MAIL|along the special notornis. quickly spec 13667|158603|8604|6|38|63140.80|0.00|0.00|N|O|1996-12-23|1997-01-24|1996-12-28|TAKE BACK RETURN|MAIL|ress theodol 13667|28731|3736|7|34|56430.82|0.10|0.02|N|O|1997-02-09|1997-01-10|1997-02-19|DELIVER IN PERSON|SHIP|e daringly against the carefu 13668|76371|3893|1|49|66021.13|0.02|0.02|A|F|1992-04-18|1992-04-04|1992-05-08|NONE|TRUCK|as sleep. per 13668|163193|8226|2|6|7537.14|0.09|0.04|A|F|1992-06-04|1992-05-22|1992-06-20|DELIVER IN PERSON|TRUCK|uses. bold p 13668|178617|1135|3|50|84780.50|0.05|0.02|R|F|1992-06-09|1992-04-26|1992-06-13|TAKE BACK RETURN|FOB|rding to the furiously bold platelet 13668|10098|2600|4|44|44355.96|0.01|0.07|A|F|1992-03-24|1992-04-19|1992-03-25|COLLECT COD|REG AIR|s theodolites boost regu 13669|90220|221|1|29|35096.38|0.05|0.03|N|O|1996-10-21|1996-10-20|1996-11-18|NONE|REG AIR| special deposits. sile 13669|114193|1727|2|40|48287.60|0.03|0.03|N|O|1996-08-30|1996-11-08|1996-09-18|NONE|SHIP|ly unusual asymptotes boost caref 13669|35983|990|3|16|30703.68|0.06|0.00|N|O|1996-10-18|1996-11-09|1996-11-15|NONE|MAIL|luffily pending requests. slowly ironic 13669|152308|2309|4|7|9522.10|0.04|0.03|N|O|1996-12-09|1996-09-26|1997-01-01|NONE|AIR|instructions. accounts 13669|27993|2998|5|22|42261.78|0.07|0.08|N|O|1996-08-16|1996-09-27|1996-09-07|TAKE BACK RETURN|AIR|ironic, enticing pac 13669|66124|3643|6|10|10901.20|0.08|0.02|N|O|1996-09-29|1996-10-16|1996-10-08|NONE|REG AIR|. slowly even excuses nag after t 13669|139763|9764|7|13|23435.88|0.06|0.04|N|O|1996-10-15|1996-10-15|1996-11-14|NONE|FOB|o beans nag quickly about the even t 13670|188271|8272|1|38|51652.26|0.04|0.08|A|F|1992-06-29|1992-07-17|1992-07-17|TAKE BACK RETURN|MAIL|xpress pack 13670|55318|5319|2|21|26739.51|0.05|0.01|R|F|1992-06-03|1992-07-06|1992-06-21|NONE|SHIP| theodolites affix ru 13670|119002|4025|3|50|51050.00|0.05|0.03|A|F|1992-06-20|1992-07-21|1992-07-13|COLLECT COD|AIR|kly past the requests. qui 13671|88845|8846|1|14|25673.76|0.06|0.03|N|O|1996-11-11|1996-09-24|1996-12-06|DELIVER IN PERSON|RAIL|uests doubt slyly. quickly final packages w 13671|11939|1940|2|40|74037.20|0.07|0.05|N|O|1996-10-06|1996-09-24|1996-11-03|COLLECT COD|FOB|le above the ironically final reques 13671|52820|5326|3|20|35456.40|0.04|0.02|N|O|1996-08-12|1996-10-10|1996-08-24|TAKE BACK RETURN|AIR|g the carefully final dep 13671|142076|9619|4|12|13416.84|0.07|0.01|N|O|1996-08-17|1996-09-13|1996-08-27|NONE|MAIL|ost fluffily even deposits. even d 13671|61645|4152|5|41|65872.24|0.10|0.05|N|O|1996-08-31|1996-09-20|1996-09-05|NONE|RAIL|y pending deposits wake slyl 13671|55350|361|6|30|39160.50|0.10|0.06|N|O|1996-10-04|1996-08-26|1996-10-05|COLLECT COD|AIR|gle slyly quic 13671|59727|7243|7|8|13493.76|0.06|0.01|N|O|1996-08-31|1996-08-25|1996-09-03|TAKE BACK RETURN|REG AIR|ironic foxes wake furiously ironic depos 13696|22578|2579|1|30|45017.10|0.00|0.00|N|O|1997-12-01|1997-11-01|1997-12-31|COLLECT COD|TRUCK|odolites sleep along the fi 13696|62787|7800|2|8|13998.24|0.06|0.02|N|O|1997-09-12|1997-11-11|1997-09-20|NONE|REG AIR|ickly final requests. quickl 13696|46166|8671|3|7|7785.12|0.09|0.04|N|O|1997-11-24|1997-10-31|1997-11-25|NONE|SHIP|ions eat furiously. ironically ironic p 13696|108052|563|4|46|48762.30|0.09|0.02|N|O|1997-09-01|1997-10-17|1997-09-08|DELIVER IN PERSON|MAIL|sual accounts alongside of the 13696|22820|5323|5|1|1742.82|0.00|0.04|N|O|1997-09-02|1997-10-31|1997-09-24|TAKE BACK RETURN|MAIL|the special, 13696|133882|1422|6|18|34485.84|0.07|0.01|N|O|1997-11-15|1997-10-16|1997-12-03|NONE|MAIL|ajole blithely pending pinto b 13697|66614|6615|1|2|3161.22|0.07|0.08|N|O|1998-07-11|1998-07-01|1998-08-04|DELIVER IN PERSON|TRUCK|the bold pinto beans: expre 13697|180764|8319|2|30|55342.80|0.04|0.08|N|O|1998-07-19|1998-05-22|1998-08-18|NONE|SHIP|foxes. furiously pending f 13697|90584|5603|3|28|44088.24|0.08|0.02|N|O|1998-04-26|1998-06-21|1998-05-10|TAKE BACK RETURN|TRUCK|nly final requests. bo 13698|350|351|1|50|62517.50|0.03|0.03|A|F|1994-09-23|1994-11-01|1994-09-26|TAKE BACK RETURN|MAIL|ffily express accounts use after th 13698|67996|503|2|46|90343.54|0.08|0.03|R|F|1994-10-27|1994-10-21|1994-11-06|TAKE BACK RETURN|REG AIR|uses wake careful 13698|3107|608|3|34|34343.40|0.00|0.07|R|F|1994-08-19|1994-10-25|1994-09-03|NONE|FOB|y. platelets are quickly. enticingly 13698|15875|8377|4|7|12536.09|0.10|0.06|R|F|1994-10-13|1994-09-29|1994-11-01|TAKE BACK RETURN|REG AIR|egular courts. ironic instruct 13699|154807|7323|1|15|27927.00|0.00|0.05|N|O|1997-05-31|1997-06-07|1997-06-15|DELIVER IN PERSON|FOB| the blithe, pending dependencie 13700|14284|9287|1|19|22767.32|0.01|0.03|R|F|1992-06-11|1992-03-19|1992-07-06|DELIVER IN PERSON|FOB|, final requests. slyly 13701|61036|3543|1|47|46860.41|0.02|0.01|N|O|1998-08-01|1998-06-17|1998-08-12|TAKE BACK RETURN|RAIL|s. even depe 13701|24802|7305|2|5|8634.00|0.04|0.01|N|O|1998-07-14|1998-07-11|1998-08-07|TAKE BACK RETURN|FOB|inal platelets. quickly f 13701|31059|3563|3|25|24751.25|0.04|0.05|N|O|1998-06-26|1998-08-13|1998-07-13|COLLECT COD|TRUCK|after the furiously even dependenc 13701|3700|6201|4|3|4811.10|0.01|0.00|N|O|1998-07-25|1998-06-30|1998-08-12|DELIVER IN PERSON|TRUCK|ding epitaphs are. regular 13701|180298|7853|5|30|41348.70|0.01|0.08|N|O|1998-07-17|1998-08-05|1998-08-14|TAKE BACK RETURN|FOB|lyly regular theodolites. 13701|54200|9211|6|41|47322.20|0.02|0.01|N|O|1998-06-11|1998-06-28|1998-06-15|COLLECT COD|TRUCK| sleep slyly. slyly final deposits nag 13702|189769|7324|1|20|37175.20|0.10|0.00|N|O|1995-12-23|1995-10-28|1996-01-16|COLLECT COD|FOB|s platelets boost according t 13702|36185|1192|2|16|17938.88|0.05|0.02|N|O|1995-12-04|1995-11-21|1995-12-22|TAKE BACK RETURN|TRUCK|thely after the carefully bold accounts. 13703|81203|6220|1|13|15394.60|0.04|0.08|N|O|1997-04-22|1997-03-17|1997-05-14|NONE|REG AIR|kly above the idle deposits. 13703|163488|3489|2|20|31029.60|0.02|0.02|N|O|1997-03-14|1997-03-15|1997-04-09|DELIVER IN PERSON|TRUCK|ly silent deposits about the regular pint 13703|137378|7379|3|42|59445.54|0.03|0.05|N|O|1997-04-11|1997-03-13|1997-04-17|NONE|MAIL|refully ironic requests. bold do 13703|105437|2968|4|38|54812.34|0.02|0.06|N|O|1997-02-27|1997-03-07|1997-03-12|NONE|FOB|kly bold deposits. dependencies thrash som 13703|79042|9043|5|40|40841.60|0.00|0.08|N|O|1997-03-04|1997-02-18|1997-03-16|NONE|FOB|ructions haggle slyly carefully unusual 13728|86216|3741|1|36|43279.56|0.04|0.00|N|O|1996-01-22|1996-01-26|1996-02-19|NONE|SHIP|excuses x-ray slyly special ideas. 13728|23075|5578|2|20|19961.40|0.09|0.02|N|O|1996-01-28|1996-03-09|1996-02-11|NONE|RAIL|sual packages hinder abov 13728|12450|2451|3|40|54498.00|0.03|0.05|N|O|1996-01-30|1996-01-15|1996-01-31|TAKE BACK RETURN|REG AIR|deas wake furiously. 13729|6285|8786|1|14|16677.92|0.01|0.06|A|F|1994-04-27|1994-03-19|1994-05-04|COLLECT COD|RAIL|ully above the idly e 13729|27003|2008|2|18|16740.00|0.02|0.06|A|F|1994-01-19|1994-03-22|1994-01-28|TAKE BACK RETURN|SHIP| sleep. carefully expr 13729|29196|4201|3|44|49508.36|0.04|0.04|A|F|1994-02-11|1994-03-24|1994-02-26|DELIVER IN PERSON|TRUCK|quests believe alon 13730|103273|3274|1|42|53603.34|0.09|0.03|N|F|1995-06-06|1995-07-22|1995-06-28|NONE|RAIL|slyly according to the fu 13731|64682|9695|1|41|67513.88|0.05|0.02|A|F|1993-07-09|1993-07-07|1993-07-17|TAKE BACK RETURN|RAIL|oldly against t 13731|93975|6485|2|31|61038.07|0.07|0.00|R|F|1993-05-23|1993-06-29|1993-06-03|TAKE BACK RETURN|SHIP|lithely. sl 13731|24269|4270|3|30|35797.80|0.00|0.06|R|F|1993-08-23|1993-06-12|1993-08-27|NONE|AIR|oost furiously bold 13731|84644|2169|4|33|53745.12|0.01|0.02|R|F|1993-08-08|1993-06-21|1993-08-19|COLLECT COD|MAIL|kages sleep quickly. reque 13732|21120|6125|1|5|5205.60|0.09|0.07|R|F|1993-01-21|1993-02-08|1993-02-07|TAKE BACK RETURN|RAIL|slyly after 13732|160159|5192|2|15|18287.25|0.06|0.07|R|F|1993-03-11|1993-02-17|1993-04-06|COLLECT COD|SHIP|o beans. slyly sly dolphins nag. sly 13732|180264|265|3|40|53770.40|0.02|0.07|A|F|1993-03-23|1993-02-19|1993-04-04|NONE|MAIL|y even instruc 13732|86399|8908|4|38|52644.82|0.04|0.04|A|F|1993-02-04|1993-02-02|1993-02-08|TAKE BACK RETURN|AIR|ronic dolphins use across the foxes. 13733|198334|3373|1|50|71616.50|0.02|0.05|N|O|1998-01-15|1998-03-01|1998-02-06|DELIVER IN PERSON|AIR|refully special packages. 13733|188670|6225|2|8|14069.36|0.00|0.05|N|O|1998-01-11|1998-03-20|1998-01-14|DELIVER IN PERSON|MAIL|sual dolphins are carefully. furiously sp 13733|175993|5994|3|39|80690.61|0.01|0.02|N|O|1998-03-17|1998-03-28|1998-03-26|DELIVER IN PERSON|RAIL|ges: ironic ideas integrate 13733|111511|6534|4|3|4567.53|0.10|0.07|N|O|1998-03-12|1998-02-16|1998-03-31|DELIVER IN PERSON|AIR|ly express excuse 13733|163974|3975|5|41|83556.77|0.01|0.03|N|O|1998-03-26|1998-03-18|1998-04-23|NONE|RAIL|rding to the quickly regular accounts ca 13733|98541|8542|6|2|3079.08|0.02|0.05|N|O|1998-02-02|1998-03-23|1998-02-17|COLLECT COD|SHIP|r deposits. blithe 13733|164467|6984|7|49|75041.54|0.01|0.06|N|O|1998-03-18|1998-02-14|1998-03-19|NONE|FOB| regular platelets detect. iro 13734|183468|3469|1|31|48095.26|0.03|0.01|N|O|1996-11-02|1996-11-07|1996-11-22|TAKE BACK RETURN|REG AIR|lar pinto beans about 13734|135770|5771|2|34|61396.18|0.05|0.08|N|O|1996-10-29|1996-10-28|1996-11-04|TAKE BACK RETURN|FOB|ccounts haggle. qui 13734|79338|1846|3|37|48741.21|0.10|0.06|N|O|1996-09-04|1996-10-08|1996-09-13|NONE|RAIL|y ironic deposits boost slyly ironic ins 13735|83671|6180|1|25|41366.75|0.03|0.00|A|F|1994-07-11|1994-08-17|1994-08-07|COLLECT COD|RAIL| fluffily final requests wake 13735|54475|9486|2|23|32877.81|0.05|0.01|R|F|1994-05-30|1994-08-10|1994-06-26|DELIVER IN PERSON|SHIP|osits wake after the final, e 13735|144955|2498|3|40|79998.00|0.00|0.05|R|F|1994-06-10|1994-07-20|1994-07-06|COLLECT COD|REG AIR|ickly bold pack 13760|51768|4274|1|29|49873.04|0.03|0.07|N|O|1996-07-15|1996-07-06|1996-07-16|COLLECT COD|RAIL|t even grouches? blithely 13760|57573|5089|2|11|16836.27|0.05|0.05|N|O|1996-07-24|1996-08-07|1996-08-11|COLLECT COD|TRUCK|heodolites sleep carefully about the ironi 13760|17868|7869|3|23|41074.78|0.05|0.03|N|O|1996-08-02|1996-07-15|1996-08-04|NONE|FOB|haggle fluffily final excu 13760|91292|3802|4|3|3849.87|0.03|0.05|N|O|1996-08-12|1996-08-23|1996-08-28|TAKE BACK RETURN|TRUCK|its alongside o 13760|128382|3407|5|14|19745.32|0.08|0.02|N|O|1996-08-09|1996-07-10|1996-09-05|NONE|MAIL|en accounts. slyly 13760|109085|1596|6|21|22975.68|0.10|0.08|N|O|1996-07-30|1996-07-10|1996-08-11|NONE|RAIL|ously idle packages haggle furiously even a 13760|190564|5603|7|3|4963.68|0.00|0.07|N|O|1996-07-13|1996-07-27|1996-07-22|TAKE BACK RETURN|MAIL|nts are according to the ironi 13761|196145|8665|1|22|27305.08|0.03|0.02|R|F|1994-11-12|1994-12-05|1994-11-16|NONE|SHIP|unts use regular asymptotes. pending, fu 13761|199493|2013|2|39|62107.11|0.07|0.04|A|F|1994-11-15|1994-12-09|1994-12-09|COLLECT COD|FOB|even, thin foxes ab 13761|67710|2723|3|19|31876.49|0.03|0.01|A|F|1994-12-19|1994-12-07|1995-01-04|DELIVER IN PERSON|SHIP|boldly final 13761|192750|5270|4|1|1842.75|0.10|0.08|R|F|1995-02-09|1994-12-19|1995-02-18|DELIVER IN PERSON|TRUCK| the unusual, final foxes cajole fluffily 13761|140075|5104|5|24|26761.68|0.02|0.05|R|F|1995-02-13|1994-12-15|1995-02-18|DELIVER IN PERSON|FOB| carefully bold requests along the even p 13761|35208|5209|6|2|2286.40|0.10|0.02|R|F|1995-01-23|1995-01-01|1995-01-29|TAKE BACK RETURN|AIR| carefully ironic pi 13761|129862|9863|7|17|32161.62|0.00|0.06|A|F|1994-12-28|1994-12-28|1995-01-12|TAKE BACK RETURN|FOB| regular ideas. regular asymptotes s 13762|176776|4328|1|27|50024.79|0.09|0.08|A|F|1993-10-02|1993-07-27|1993-10-09|DELIVER IN PERSON|FOB|ording to the furiously blithe dug 13762|62902|7915|2|22|41027.80|0.09|0.00|A|F|1993-10-14|1993-08-05|1993-10-24|NONE|TRUCK|sts nag blithely af 13762|179173|6725|3|3|3756.51|0.04|0.01|R|F|1993-09-24|1993-08-02|1993-10-24|DELIVER IN PERSON|SHIP|sly pending, special pac 13762|152810|356|4|27|50295.87|0.04|0.01|A|F|1993-08-17|1993-08-28|1993-08-23|TAKE BACK RETURN|AIR| quickly ironic foxes slee 13762|61170|8689|5|25|28279.25|0.08|0.08|R|F|1993-08-15|1993-09-12|1993-08-22|DELIVER IN PERSON|TRUCK|lar, express excuses. ironic, 13762|78826|6348|6|30|54144.60|0.01|0.05|R|F|1993-08-21|1993-08-15|1993-09-06|COLLECT COD|FOB|. slowly bold requests run furiously across 13762|13888|1392|7|3|5405.64|0.04|0.05|R|F|1993-07-09|1993-08-20|1993-07-20|DELIVER IN PERSON|TRUCK|ven packages boost carefully bl 13763|7793|5294|1|25|42519.75|0.03|0.02|N|O|1997-07-28|1997-08-07|1997-08-02|COLLECT COD|RAIL|blithely quiet pinto 13763|161162|8711|2|48|58711.68|0.03|0.08|N|O|1997-10-19|1997-08-15|1997-11-18|COLLECT COD|TRUCK|ve to nag carefully. slyly bold accounts 13763|43091|8100|3|41|42397.69|0.03|0.01|N|O|1997-10-06|1997-08-15|1997-11-04|NONE|REG AIR|y final instructions. even cou 13763|14567|4568|4|10|14815.60|0.02|0.02|N|O|1997-10-24|1997-09-24|1997-11-14|COLLECT COD|SHIP|g to the quickly 13764|139711|4738|1|20|35014.20|0.03|0.03|N|O|1995-09-11|1995-09-14|1995-09-29|COLLECT COD|SHIP|s. quickly regular theodolites 13764|109116|1627|2|11|12376.21|0.02|0.03|N|O|1995-11-08|1995-10-08|1995-12-05|TAKE BACK RETURN|AIR|even platelets. carefully even tithes 13764|99885|2395|3|44|82934.72|0.04|0.06|N|O|1995-08-24|1995-10-05|1995-09-09|DELIVER IN PERSON|RAIL|he silent instruct 13765|78419|927|1|27|37730.07|0.02|0.03|A|F|1994-06-01|1994-06-28|1994-06-18|COLLECT COD|REG AIR|r, pending requests about 13765|185924|961|2|9|18089.28|0.03|0.03|R|F|1994-04-22|1994-06-20|1994-05-18|TAKE BACK RETURN|RAIL| regularly fluffily express de 13765|16360|8862|3|48|61265.28|0.02|0.01|R|F|1994-06-19|1994-05-24|1994-07-03|NONE|SHIP|dependencies integrate ironic wartho 13765|156556|9072|4|32|51601.60|0.10|0.05|A|F|1994-04-23|1994-05-20|1994-04-28|NONE|RAIL|uffily above the final requests. bold, expr 13766|91578|6597|1|26|40808.82|0.06|0.02|R|F|1993-01-09|1992-12-13|1993-01-19|TAKE BACK RETURN|REG AIR|carefully regular warhorses use bl 13767|150404|2920|1|17|24724.80|0.01|0.00|R|F|1993-09-15|1993-11-09|1993-10-03|TAKE BACK RETURN|FOB|affix caref 13767|39580|2084|2|43|65341.94|0.03|0.08|A|F|1993-09-30|1993-11-20|1993-10-04|DELIVER IN PERSON|RAIL|tes. somas t 13767|20928|8435|3|19|35129.48|0.02|0.08|A|F|1993-11-06|1993-11-13|1993-11-24|NONE|MAIL|ial, daring packages. quickly expr 13767|126084|6085|4|9|9990.72|0.03|0.01|R|F|1993-09-21|1993-10-12|1993-10-19|DELIVER IN PERSON|SHIP|l asymptotes. bold ideas cajole ca 13767|83145|670|5|13|14665.82|0.09|0.03|A|F|1993-11-27|1993-11-01|1993-11-28|COLLECT COD|SHIP|entiments-- ide 13767|10319|7823|6|44|54089.64|0.06|0.07|R|F|1993-12-23|1993-11-10|1994-01-21|DELIVER IN PERSON|RAIL|egular packages. furiously iro 13767|19923|7427|7|28|51601.76|0.06|0.08|A|F|1993-11-26|1993-10-09|1993-12-19|COLLECT COD|SHIP|regular packages gro 13792|34783|9790|1|42|72146.76|0.10|0.03|N|O|1996-06-01|1996-03-30|1996-06-26|TAKE BACK RETURN|REG AIR|riously ironic requests poach along 13793|109944|2455|1|17|33216.98|0.00|0.06|N|O|1995-10-30|1995-11-03|1995-11-14|DELIVER IN PERSON|TRUCK| asymptotes. final, unusual account 13794|50101|102|1|50|52555.00|0.10|0.03|R|F|1994-02-14|1994-04-04|1994-02-21|NONE|TRUCK|regular theodolites. quickly sile 13794|37080|7081|2|6|6102.48|0.03|0.05|R|F|1994-04-16|1994-03-01|1994-04-23|TAKE BACK RETURN|REG AIR|out the regularly ironic p 13794|189418|1937|3|24|36177.84|0.03|0.02|R|F|1994-03-08|1994-03-10|1994-03-20|NONE|TRUCK|ding accounts are carefully final, 13794|140091|2606|4|49|55423.41|0.01|0.04|A|F|1994-03-23|1994-02-13|1994-04-20|NONE|TRUCK|ccounts. blithely unusu 13794|104781|4782|5|35|62502.30|0.03|0.04|A|F|1994-03-05|1994-04-02|1994-03-22|COLLECT COD|SHIP|slyly. blithely final instructio 13794|43744|8753|6|2|3375.48|0.04|0.03|A|F|1994-04-18|1994-02-15|1994-04-27|COLLECT COD|FOB|sits sleep. thin theodolites thrash furiou 13794|197615|2654|7|9|15413.49|0.04|0.05|A|F|1994-01-14|1994-03-26|1994-01-28|COLLECT COD|FOB|sleep after the fluffily pendin 13795|99776|4795|1|4|7103.08|0.05|0.08|N|O|1995-07-01|1995-06-19|1995-07-24|TAKE BACK RETURN|RAIL|e slyly slyly enticing requests. quick 13795|120452|2965|2|10|14724.50|0.01|0.05|R|F|1995-06-06|1995-06-25|1995-06-14|NONE|REG AIR|uriously even requests 13795|26815|1820|3|9|15676.29|0.06|0.05|A|F|1995-04-14|1995-06-01|1995-04-17|NONE|REG AIR|e the permanent, special 13795|75340|5341|4|43|56559.62|0.06|0.03|R|F|1995-05-17|1995-05-13|1995-06-06|DELIVER IN PERSON|FOB|uriously express pin 13795|30906|8416|5|49|90008.10|0.06|0.07|N|O|1995-07-13|1995-06-30|1995-07-14|TAKE BACK RETURN|REG AIR|to the requests use carefully quickly eve 13795|35150|2660|6|50|54257.50|0.02|0.07|R|F|1995-05-03|1995-06-03|1995-05-16|TAKE BACK RETURN|REG AIR| the carefully bold packages. furiou 13795|156839|4385|7|7|13270.81|0.08|0.01|N|O|1995-07-19|1995-06-02|1995-07-21|COLLECT COD|TRUCK|ic, unusual requests nag sly 13796|183420|5939|1|47|70660.74|0.00|0.03|N|O|1997-08-14|1997-09-17|1997-09-02|DELIVER IN PERSON|MAIL| deposits serve slyly special a 13796|124493|9518|2|40|60699.60|0.00|0.04|N|O|1997-10-12|1997-09-04|1997-10-18|NONE|RAIL| blithely ironic de 13796|151141|1142|3|49|58414.86|0.09|0.03|N|O|1997-09-07|1997-08-14|1997-09-16|COLLECT COD|MAIL|ial hockey players after the instructions s 13796|26698|4205|4|42|68236.98|0.07|0.07|N|O|1997-06-27|1997-09-13|1997-07-02|COLLECT COD|SHIP|gular escapades. bl 13797|66981|4500|1|34|66231.32|0.02|0.02|N|O|1996-12-14|1996-11-02|1997-01-08|NONE|TRUCK|osits. final deposits use b 13797|145243|272|2|18|23188.32|0.03|0.08|N|O|1996-11-23|1996-11-03|1996-12-22|COLLECT COD|RAIL|gular theodolites. realms boo 13797|87104|7105|3|22|24004.20|0.08|0.04|N|O|1996-10-07|1996-10-05|1996-10-28|COLLECT COD|MAIL|y silent dependencies. blithely final depo 13797|150810|8356|4|35|65128.35|0.06|0.02|N|O|1996-09-09|1996-10-23|1996-09-30|NONE|TRUCK| carefully unusua 13797|54194|6700|5|36|41334.84|0.10|0.07|N|O|1996-10-31|1996-10-27|1996-11-13|NONE|SHIP|ckages are furiously around 13797|35201|208|6|11|12498.20|0.06|0.04|N|O|1996-11-28|1996-11-12|1996-12-17|TAKE BACK RETURN|FOB|en request 13798|127534|2559|1|30|46845.90|0.01|0.01|N|O|1996-03-31|1996-03-19|1996-04-07|COLLECT COD|REG AIR| final, express dolphins sl 13798|56561|1572|2|26|39456.56|0.08|0.06|N|O|1996-05-24|1996-04-28|1996-05-29|NONE|FOB|entiments. requests are accounts. s 13798|13647|8650|3|11|17167.04|0.01|0.02|N|O|1996-06-02|1996-04-10|1996-06-20|DELIVER IN PERSON|AIR|. ironic ide 13798|148828|8829|4|29|54427.78|0.02|0.01|N|O|1996-05-05|1996-04-22|1996-05-17|DELIVER IN PERSON|SHIP| ideas are. slyly express pinto be 13799|50524|3030|1|42|61929.84|0.09|0.03|R|F|1994-01-30|1994-01-09|1994-02-21|TAKE BACK RETURN|SHIP|tithes. slyly silent requests are carefull 13799|152193|2194|2|39|48562.41|0.07|0.02|R|F|1994-03-04|1994-03-01|1994-03-15|DELIVER IN PERSON|RAIL| slyly fin 13799|102297|2298|3|38|49373.02|0.07|0.06|A|F|1994-03-27|1994-01-29|1994-04-15|NONE|RAIL|nding instruction 13824|129495|9496|1|33|50308.17|0.01|0.07|R|F|1995-04-26|1995-03-08|1995-05-04|COLLECT COD|TRUCK|sly accord 13824|19170|6674|2|8|8713.36|0.04|0.03|A|F|1995-01-15|1995-02-25|1995-02-06|TAKE BACK RETURN|FOB|e. special requests nag fina 13824|182867|422|3|19|37047.34|0.08|0.01|A|F|1995-03-01|1995-02-24|1995-03-29|TAKE BACK RETURN|REG AIR|riously pending, idle r 13824|41713|6722|4|27|44677.17|0.09|0.05|A|F|1995-03-19|1995-03-18|1995-04-03|DELIVER IN PERSON|TRUCK|aringly silent requests boost slyly. 13825|1286|6287|1|34|40367.52|0.06|0.03|A|F|1994-04-14|1994-05-03|1994-04-15|TAKE BACK RETURN|MAIL|g to the regular epitaphs! ironic depos 13825|198222|5780|2|5|6601.10|0.04|0.06|R|F|1994-05-01|1994-04-29|1994-05-02|TAKE BACK RETURN|SHIP|e boldly reg 13825|108726|3747|3|19|32959.68|0.10|0.08|R|F|1994-05-21|1994-04-26|1994-06-01|COLLECT COD|FOB|onic deposits beneath the 13825|157439|4985|4|29|43396.47|0.09|0.06|R|F|1994-07-08|1994-04-29|1994-08-07|TAKE BACK RETURN|REG AIR| silent, express requests. furiou 13825|77246|9754|5|2|2446.48|0.04|0.07|A|F|1994-06-17|1994-06-11|1994-06-18|DELIVER IN PERSON|MAIL| asymptotes 13825|66925|9432|6|11|20811.12|0.03|0.01|A|F|1994-05-14|1994-05-22|1994-05-15|COLLECT COD|TRUCK|ously expr 13825|198404|924|7|29|43569.60|0.04|0.04|A|F|1994-07-03|1994-05-27|1994-07-05|NONE|FOB| blithely ironic 13826|166572|4121|1|3|4915.71|0.00|0.03|N|O|1997-06-02|1997-05-05|1997-06-04|COLLECT COD|AIR| express pinto beans after the silently reg 13826|165568|3117|2|10|16335.60|0.06|0.01|N|O|1997-03-21|1997-04-25|1997-04-08|NONE|MAIL|t the blithely express plate 13826|86044|3569|3|26|26781.04|0.09|0.04|N|O|1997-04-25|1997-05-20|1997-04-27|COLLECT COD|AIR|eas snooze 13826|135159|186|4|43|51348.45|0.09|0.07|N|O|1997-02-28|1997-04-16|1997-03-21|COLLECT COD|FOB|regular excuses. regular accounts are 13826|5394|7895|5|48|62370.72|0.06|0.04|N|O|1997-05-01|1997-04-17|1997-05-13|NONE|SHIP|unusual packages h 13827|126891|9404|1|38|72879.82|0.01|0.01|R|F|1993-08-08|1993-09-17|1993-08-11|NONE|RAIL|p furiously arou 13828|55088|99|1|31|32335.48|0.02|0.08|A|F|1994-08-06|1994-08-25|1994-08-18|NONE|MAIL|lly even excuses. furiously 13828|48793|1298|2|28|48770.12|0.07|0.05|A|F|1994-09-18|1994-09-04|1994-10-01|TAKE BACK RETURN|MAIL|y. even, unusual deposits sleep 13829|35228|5229|1|26|30243.72|0.00|0.01|N|O|1996-12-06|1996-11-18|1997-01-05|COLLECT COD|MAIL|s detect slyly final asymptote 13829|76319|8827|2|23|29792.13|0.07|0.00|N|O|1997-01-17|1996-11-30|1997-01-19|NONE|SHIP|ckly ironic ins 13829|91581|1582|3|12|18870.96|0.05|0.04|N|O|1996-09-29|1996-11-21|1996-10-20|DELIVER IN PERSON|AIR|ld ideas-- regular instructions 13829|199305|9306|4|50|70215.00|0.00|0.04|N|O|1996-11-18|1996-12-14|1996-11-25|DELIVER IN PERSON|SHIP|. platelet 13830|49896|7409|1|11|20304.79|0.03|0.05|R|F|1995-04-15|1995-02-18|1995-05-06|COLLECT COD|REG AIR|c excuses. 13830|197174|2213|2|30|38135.10|0.04|0.08|R|F|1995-03-03|1995-02-18|1995-04-01|TAKE BACK RETURN|TRUCK|ly furiously silent accounts: 13830|101086|8617|3|30|32612.40|0.07|0.00|R|F|1995-03-28|1995-01-23|1995-04-15|COLLECT COD|TRUCK|usual pinto beans. blithely regu 13830|146280|8795|4|49|64987.72|0.00|0.05|R|F|1995-04-05|1995-02-18|1995-04-23|COLLECT COD|SHIP|iously special dependenc 13831|92518|7537|1|46|69483.46|0.03|0.05|N|O|1996-05-28|1996-06-07|1996-06-05|DELIVER IN PERSON|RAIL|nd the regularly bold req 13831|140722|5751|2|38|66983.36|0.08|0.02|N|O|1996-07-03|1996-06-23|1996-07-17|DELIVER IN PERSON|AIR|kages. instructions according to the s 13831|107869|7870|3|20|37537.20|0.03|0.04|N|O|1996-07-16|1996-05-25|1996-07-30|DELIVER IN PERSON|MAIL|aggle furiously even ideas. bl 13831|150050|5081|4|30|33001.50|0.07|0.02|N|O|1996-07-31|1996-07-11|1996-08-12|NONE|RAIL|even instruc 13831|83192|717|5|12|14102.28|0.01|0.01|N|O|1996-06-09|1996-06-15|1996-06-28|COLLECT COD|REG AIR|ackages mold above the furiously exp 13831|3734|6235|6|24|39305.52|0.08|0.07|N|O|1996-06-07|1996-06-06|1996-07-03|COLLECT COD|RAIL|thely pending ide 13856|186653|9172|1|4|6958.60|0.06|0.00|N|O|1998-03-06|1998-02-26|1998-03-12|DELIVER IN PERSON|SHIP|fully ironic deposits; slyly fl 13856|60350|7869|2|8|10482.80|0.01|0.07|N|O|1997-12-09|1998-01-14|1997-12-23|COLLECT COD|TRUCK|ly final accounts boost. 13856|96445|1464|3|23|33153.12|0.06|0.07|N|O|1998-01-26|1998-01-09|1998-02-02|DELIVER IN PERSON|TRUCK|symptotes do 13856|148673|1188|4|30|51650.10|0.10|0.00|N|O|1998-01-16|1998-01-23|1998-01-18|NONE|MAIL|usly above the carefully final 13856|132269|4783|5|15|19518.90|0.02|0.01|N|O|1998-03-12|1998-01-24|1998-04-07|NONE|MAIL|ng instructions. carefully final account 13856|39682|2186|6|8|12973.44|0.10|0.03|N|O|1998-03-15|1998-01-24|1998-04-01|TAKE BACK RETURN|FOB|ers among the carefully pendin 13856|135091|5092|7|21|23647.89|0.02|0.02|N|O|1998-03-14|1998-01-09|1998-04-11|NONE|FOB|fluffily final packages about the instruct 13857|58594|8595|1|6|9315.54|0.02|0.06|N|O|1995-08-26|1995-10-11|1995-09-15|TAKE BACK RETURN|TRUCK| furiously pending hock 13858|95440|7950|1|24|34450.56|0.09|0.02|A|F|1994-07-14|1994-09-27|1994-07-15|TAKE BACK RETURN|REG AIR|nts. ironic accounts sleep slyly 13858|57643|7644|2|2|3201.28|0.09|0.05|A|F|1994-07-29|1994-08-19|1994-08-05|TAKE BACK RETURN|TRUCK|structions. quickly ironic i 13859|146389|1418|1|25|35884.50|0.02|0.07|N|O|1997-02-21|1997-03-05|1997-03-18|NONE|FOB|integrate. quickly ex 13859|150544|545|2|5|7972.70|0.00|0.03|N|O|1997-04-29|1997-03-25|1997-05-23|NONE|FOB|lay. carefully pending excuses h 13859|190886|3406|3|38|75121.44|0.06|0.03|N|O|1997-02-20|1997-03-20|1997-03-14|COLLECT COD|SHIP|sts detect 13859|18463|3466|4|10|13814.60|0.05|0.03|N|O|1997-03-16|1997-03-30|1997-03-24|TAKE BACK RETURN|REG AIR|arefully dolphins. slyly 13859|104331|4332|5|5|6676.65|0.04|0.03|N|O|1997-01-20|1997-03-15|1997-02-14|TAKE BACK RETURN|FOB|. carefully speci 13859|85149|2674|6|32|36292.48|0.02|0.00|N|O|1997-02-14|1997-03-02|1997-02-19|NONE|SHIP|fluffily thin accounts. busily regular requ 13859|95970|5971|7|27|53081.19|0.08|0.05|N|O|1997-02-13|1997-03-15|1997-02-15|TAKE BACK RETURN|RAIL|ests are quickly according to the 13860|160316|7865|1|42|57805.02|0.05|0.03|A|F|1994-11-06|1994-11-29|1994-11-19|TAKE BACK RETURN|SHIP|even, special foxes 13861|44098|1611|1|18|18757.62|0.06|0.03|A|F|1993-10-06|1993-09-23|1993-10-07|COLLECT COD|TRUCK|nts will have to wake s 13861|140484|5513|2|17|25916.16|0.03|0.08|A|F|1993-10-04|1993-10-08|1993-10-30|DELIVER IN PERSON|TRUCK|. pending orbits play 13861|98594|8595|3|29|46185.11|0.03|0.08|R|F|1993-09-21|1993-09-22|1993-10-13|TAKE BACK RETURN|FOB|ial requests 13861|61323|8842|4|31|39813.92|0.01|0.05|A|F|1993-11-20|1993-09-24|1993-12-02|DELIVER IN PERSON|REG AIR|nic excuses. final, ironic platelets inte 13861|51070|3576|5|22|22463.54|0.01|0.08|R|F|1993-11-18|1993-10-12|1993-12-03|COLLECT COD|REG AIR|ts. furiously speci 13861|123183|3184|6|25|30154.50|0.09|0.07|A|F|1993-09-28|1993-09-29|1993-10-21|TAKE BACK RETURN|MAIL| ironic instructions. final, even pinto b 13861|124285|4286|7|9|11783.52|0.05|0.05|A|F|1993-11-10|1993-09-24|1993-11-21|DELIVER IN PERSON|AIR|ual theodolites haggle furiously until th 13862|56391|3907|1|39|52548.21|0.08|0.08|N|O|1997-11-22|1997-10-28|1997-12-03|TAKE BACK RETURN|TRUCK|its use permanent 13862|147854|2883|2|34|64662.90|0.01|0.06|N|O|1997-12-22|1997-10-03|1998-01-13|COLLECT COD|RAIL|unusual depos 13862|172790|5308|3|15|27941.85|0.02|0.00|N|O|1997-10-19|1997-10-05|1997-10-28|TAKE BACK RETURN|FOB|wake alongside of the 13862|80343|5360|4|3|3970.02|0.04|0.07|N|O|1997-09-06|1997-10-14|1997-09-30|COLLECT COD|RAIL|endencies. slyly ironic accounts e 13862|111861|4373|5|34|63677.24|0.04|0.08|N|O|1997-10-06|1997-10-09|1997-11-01|NONE|MAIL|ithely special escapades. carefully bo 13862|39131|1635|6|50|53506.50|0.03|0.00|N|O|1997-10-17|1997-10-21|1997-10-18|TAKE BACK RETURN|SHIP|ar pains c 13863|159211|1727|1|19|24133.99|0.01|0.03|R|F|1992-09-14|1992-07-10|1992-10-01|COLLECT COD|FOB| sleep blithely regular reque 13863|58511|6027|2|15|22042.65|0.02|0.00|A|F|1992-09-23|1992-07-06|1992-09-27|TAKE BACK RETURN|SHIP|ly even requests use. unusual, bold ide 13863|188028|3065|3|48|53568.96|0.10|0.06|A|F|1992-06-09|1992-08-15|1992-06-22|NONE|AIR|quickly about the quickly final depen 13888|101057|3568|1|8|8464.40|0.10|0.01|R|F|1993-08-04|1993-07-04|1993-08-22|TAKE BACK RETURN|REG AIR|sly against the careful 13889|174474|2026|1|26|40260.22|0.02|0.00|N|O|1995-11-27|1995-11-24|1995-12-27|DELIVER IN PERSON|RAIL|y after the f 13889|183160|5679|2|19|23620.04|0.09|0.06|N|O|1995-12-31|1995-11-04|1996-01-06|TAKE BACK RETURN|FOB|nts wake slyly pending accounts. 13890|143656|3657|1|12|20395.80|0.04|0.01|N|O|1996-09-18|1996-06-24|1996-09-23|TAKE BACK RETURN|FOB|ar packages nag blithely carefully bold 13890|19147|1649|2|4|4264.56|0.05|0.06|N|O|1996-06-05|1996-07-04|1996-06-24|TAKE BACK RETURN|MAIL|ounts. blithely silent ideas i 13890|193524|3525|3|3|4852.56|0.04|0.02|N|O|1996-08-23|1996-08-04|1996-09-12|DELIVER IN PERSON|SHIP|ic accounts. fluffily regular accounts wak 13890|152444|2445|4|18|26935.92|0.06|0.04|N|O|1996-08-05|1996-07-27|1996-08-23|COLLECT COD|TRUCK|y express requests 13891|107268|9779|1|25|31881.50|0.08|0.00|N|O|1996-07-15|1996-08-22|1996-07-27|NONE|AIR|e special, ironic deposits. bold pinto bea 13891|41472|1473|2|17|24028.99|0.05|0.01|N|O|1996-07-24|1996-08-09|1996-08-07|TAKE BACK RETURN|SHIP|efully pending platelets 13891|18338|8339|3|5|6281.65|0.03|0.04|N|O|1996-07-09|1996-08-23|1996-07-14|TAKE BACK RETURN|MAIL|cajole in place of the slyly iron 13891|191561|6600|4|7|11567.92|0.01|0.04|N|O|1996-07-23|1996-09-09|1996-08-01|DELIVER IN PERSON|SHIP| even requests boost blithely daring 13892|34920|4921|1|49|90891.08|0.06|0.05|A|F|1992-08-09|1992-06-26|1992-09-04|NONE|AIR|ix fluffily around the quickly 13892|86042|8551|2|44|45233.76|0.06|0.08|R|F|1992-06-09|1992-06-02|1992-06-17|TAKE BACK RETURN|SHIP|aggle carefully along 13892|98009|519|3|38|38266.00|0.00|0.08|A|F|1992-07-14|1992-06-19|1992-08-05|NONE|FOB|out the quickly sly in 13893|85995|5996|1|47|93106.53|0.01|0.07|N|O|1998-06-19|1998-06-10|1998-07-11|NONE|REG AIR|ly regular excus 13893|116198|3732|2|31|37639.89|0.09|0.03|N|O|1998-05-03|1998-05-17|1998-05-06|COLLECT COD|REG AIR|structions. 13893|133397|8424|3|12|17164.68|0.04|0.00|N|O|1998-05-20|1998-06-30|1998-05-21|TAKE BACK RETURN|TRUCK|ly express theodolites. final packa 13894|22488|2489|1|22|31030.56|0.01|0.08|R|F|1992-04-19|1992-05-19|1992-05-16|DELIVER IN PERSON|RAIL|furiously silent dolphins. 13894|149824|7367|2|39|73078.98|0.01|0.02|A|F|1992-07-21|1992-05-13|1992-08-18|TAKE BACK RETURN|REG AIR|nic dependencies sleep fluffily ca 13894|87114|7115|3|8|8808.88|0.04|0.06|R|F|1992-08-06|1992-06-15|1992-08-27|TAKE BACK RETURN|TRUCK|ly ironic theodolites 13894|109587|9588|4|30|47897.40|0.09|0.08|R|F|1992-08-06|1992-06-11|1992-08-16|COLLECT COD|AIR|even accounts wake slyly unusual pac 13895|189586|2105|1|47|78752.26|0.09|0.02|R|F|1994-03-12|1994-01-27|1994-03-19|TAKE BACK RETURN|AIR|blithely slyly steal 13895|169158|1675|2|45|55221.75|0.05|0.03|R|F|1994-03-18|1994-01-21|1994-04-07|NONE|RAIL|final sauternes a 13895|26594|4101|3|9|13685.31|0.08|0.01|A|F|1994-03-17|1993-12-21|1994-04-10|COLLECT COD|REG AIR|fily even 13895|148885|6428|4|50|96694.00|0.10|0.08|A|F|1993-12-15|1994-01-13|1993-12-27|DELIVER IN PERSON|MAIL|pecial foxes. ironic accoun 13920|58231|737|1|37|44001.51|0.01|0.04|R|F|1995-01-14|1994-12-29|1995-02-02|DELIVER IN PERSON|SHIP|yly fluffily express theodolit 13920|78556|8557|2|39|59847.45|0.00|0.02|A|F|1995-01-30|1994-12-14|1995-02-17|TAKE BACK RETURN|REG AIR| haggle bold, regular 13920|80533|8058|3|19|28757.07|0.09|0.07|R|F|1995-01-17|1994-12-06|1995-01-23|NONE|REG AIR|gainst the always quiet 13921|36168|3678|1|4|4416.64|0.03|0.07|N|O|1997-09-09|1997-09-27|1997-10-05|COLLECT COD|FOB|ounts. fluffily even packag 13921|74379|6887|2|40|54134.80|0.01|0.04|N|O|1997-09-07|1997-10-29|1997-09-09|NONE|AIR|nic requests cajole furious theodoli 13921|87825|5350|3|14|25379.48|0.03|0.08|N|O|1997-10-16|1997-11-05|1997-11-04|DELIVER IN PERSON|SHIP|lites. furiously pending 13921|74438|4439|4|33|46610.19|0.03|0.01|N|O|1997-12-07|1997-10-12|1998-01-03|NONE|MAIL|ss the slyly exp 13922|16168|6169|1|14|15178.24|0.08|0.03|N|O|1995-10-13|1995-09-28|1995-11-06|COLLECT COD|FOB|eposits x-ray blithely over 13922|126747|4284|2|50|88687.00|0.03|0.00|N|O|1995-09-18|1995-10-11|1995-10-04|DELIVER IN PERSON|TRUCK|metimes. reg 13922|24282|6785|3|37|44632.36|0.01|0.05|N|O|1995-09-30|1995-10-17|1995-10-29|NONE|SHIP| courts sleep among the p 13922|102792|7813|4|45|80765.55|0.02|0.07|N|O|1995-11-20|1995-10-23|1995-11-24|COLLECT COD|MAIL| unusual requests. carefully blit 13922|11312|8816|5|20|24466.20|0.00|0.07|N|O|1995-10-11|1995-09-14|1995-10-26|TAKE BACK RETURN|SHIP|nic requests. furiously unusual foxes c 13922|109741|7272|6|41|71780.34|0.09|0.08|N|O|1995-11-01|1995-10-25|1995-11-16|TAKE BACK RETURN|MAIL|ounts sleep bli 13922|14914|7416|7|32|58525.12|0.09|0.06|N|O|1995-08-31|1995-09-28|1995-09-22|COLLECT COD|FOB|cajole carefully about the reg 13923|55436|5437|1|42|58440.06|0.02|0.00|R|F|1992-09-02|1992-09-20|1992-09-27|DELIVER IN PERSON|RAIL|ages. bold Tiresi 13923|97143|2162|2|13|14821.82|0.07|0.03|A|F|1992-10-19|1992-09-09|1992-11-01|TAKE BACK RETURN|RAIL|tealthily spe 13923|73669|1191|3|1|1642.66|0.00|0.03|R|F|1992-07-23|1992-09-24|1992-07-26|COLLECT COD|AIR|wake furiously accordi 13923|154198|6714|4|26|32556.94|0.06|0.01|R|F|1992-10-01|1992-09-08|1992-10-08|NONE|REG AIR| unusual, unusual excuses affix. ca 13923|197076|7077|5|2|2346.14|0.08|0.05|A|F|1992-08-15|1992-08-14|1992-08-27|NONE|REG AIR| excuses print carefully bli 13923|43730|8739|6|24|40169.52|0.05|0.04|A|F|1992-09-17|1992-08-20|1992-10-06|COLLECT COD|AIR|o beans against the f 13924|98639|1149|1|2|3275.26|0.05|0.02|A|F|1995-04-03|1995-01-28|1995-04-28|DELIVER IN PERSON|SHIP|l courts about the slyly regular frets nag 13925|81268|3777|1|16|19988.16|0.01|0.07|N|O|1997-04-29|1997-03-16|1997-05-24|TAKE BACK RETURN|FOB|y about the regular dolphins. blithel 13925|56193|6194|2|16|18387.04|0.00|0.03|N|O|1997-02-14|1997-02-07|1997-02-24|DELIVER IN PERSON|FOB|s print slyly final pa 13925|32898|408|3|1|1830.89|0.06|0.08|N|O|1997-04-06|1997-02-11|1997-05-04|NONE|REG AIR|furiously ironic sheaves. requests acro 13925|111554|4066|4|38|59490.90|0.06|0.08|N|O|1997-03-08|1997-03-17|1997-03-25|COLLECT COD|MAIL|e alongside of the furiously un 13925|76621|1636|5|43|68697.66|0.09|0.05|N|O|1997-01-19|1997-03-24|1997-02-04|NONE|AIR|p against the final accounts. pendin 13926|180260|5297|1|28|37527.28|0.10|0.05|N|O|1997-08-29|1997-08-17|1997-09-26|NONE|AIR|ke furiously at the quickly ironi 13926|28661|3666|2|1|1589.66|0.01|0.01|N|O|1997-09-14|1997-08-12|1997-09-26|NONE|AIR|the furiously silent sheaves. ironic, 13926|89759|2268|3|3|5246.25|0.00|0.08|N|O|1997-10-30|1997-09-19|1997-11-05|COLLECT COD|SHIP|even deposits cajole quickly 13927|109042|4063|1|17|17867.68|0.04|0.00|R|F|1994-11-13|1994-10-10|1994-12-07|TAKE BACK RETURN|FOB|ss accounts engage slyly instructions; pen 13927|54949|9960|2|9|17135.46|0.05|0.02|A|F|1994-11-27|1994-11-16|1994-12-07|DELIVER IN PERSON|SHIP|usly unusual ac 13927|122669|7694|3|21|35524.86|0.05|0.01|R|F|1994-09-08|1994-11-11|1994-09-13|COLLECT COD|SHIP| foxes sleep. ironic ideas 13927|95170|189|4|24|27964.08|0.01|0.06|A|F|1994-11-27|1994-10-29|1994-12-02|TAKE BACK RETURN|FOB|to beans. furiously final theodolite 13927|116240|6241|5|6|7537.44|0.00|0.02|A|F|1994-12-05|1994-10-10|1994-12-07|TAKE BACK RETURN|FOB|posits sleep slyly caref 13927|41125|3630|6|13|13859.56|0.00|0.00|R|F|1994-10-19|1994-11-25|1994-10-25|COLLECT COD|MAIL|g according to the ironic package 13952|27050|7051|1|31|30288.55|0.05|0.06|N|O|1997-06-06|1997-06-03|1997-06-18|DELIVER IN PERSON|REG AIR|press depe 13952|13167|8170|2|41|44286.56|0.09|0.02|N|O|1997-08-12|1997-07-24|1997-08-28|COLLECT COD|AIR|y ironic, regular courts. qui 13952|84858|2383|3|22|40542.70|0.03|0.05|N|O|1997-05-28|1997-06-28|1997-06-08|TAKE BACK RETURN|REG AIR|express excuses. carefully ru 13953|183927|1482|1|29|58316.68|0.02|0.02|N|O|1995-09-09|1995-08-22|1995-09-24|COLLECT COD|AIR|lar packages cajole blithely. car 13953|58579|6095|2|49|75340.93|0.05|0.06|N|O|1995-09-09|1995-08-03|1995-09-10|NONE|TRUCK| slyly about the r 13953|133161|701|3|48|57319.68|0.03|0.07|N|O|1995-08-11|1995-08-14|1995-09-02|COLLECT COD|REG AIR|uests haggle slyly. theodol 13953|22484|7489|4|47|66104.56|0.09|0.01|N|O|1995-08-15|1995-07-09|1995-08-28|NONE|TRUCK|ecial deposits. carefully regular dependen 13954|196115|8635|1|3|3633.33|0.05|0.06|N|O|1996-06-17|1996-06-13|1996-07-03|COLLECT COD|REG AIR|the ironically regular deposits. expr 13954|97636|2655|2|45|73513.35|0.08|0.08|N|O|1996-08-30|1996-06-08|1996-09-28|TAKE BACK RETURN|RAIL| deposits. quickly ironic 13954|164891|7408|3|20|39117.80|0.06|0.07|N|O|1996-06-06|1996-06-17|1996-06-25|DELIVER IN PERSON|REG AIR|luffy packages are never above the carefu 13954|108191|3212|4|13|15589.47|0.06|0.01|N|O|1996-07-03|1996-06-13|1996-07-12|COLLECT COD|RAIL| according to the final accounts use final 13955|97215|2234|1|28|33941.88|0.00|0.00|N|O|1998-07-18|1998-07-02|1998-08-09|DELIVER IN PERSON|REG AIR|accounts after the blithely regular t 13955|77570|78|2|8|12380.56|0.10|0.00|N|O|1998-05-20|1998-08-05|1998-06-03|DELIVER IN PERSON|FOB|nusual packages. 13955|112304|2305|3|16|21060.80|0.06|0.00|N|O|1998-07-31|1998-07-24|1998-08-10|TAKE BACK RETURN|MAIL|y even packages. 13955|92489|17|4|42|62222.16|0.01|0.08|N|O|1998-08-10|1998-07-14|1998-08-22|COLLECT COD|RAIL|rmanent excuses. even, bo 13956|112692|226|1|20|34093.80|0.03|0.03|A|F|1995-04-02|1995-04-27|1995-04-03|TAKE BACK RETURN|TRUCK|s instructions. slyly bold 13956|197969|5527|2|13|26870.48|0.05|0.04|R|F|1995-05-09|1995-05-02|1995-05-13|TAKE BACK RETURN|REG AIR|onic, regular packages boost slyly. pend 13956|53833|8844|3|4|7147.32|0.08|0.00|N|F|1995-06-12|1995-05-03|1995-06-20|COLLECT COD|TRUCK|deposits instead of the packa 13956|165692|5693|4|38|66792.22|0.10|0.07|N|O|1995-07-04|1995-04-17|1995-07-13|TAKE BACK RETURN|TRUCK|ajole slyly. carefully ironic pa 13957|152582|128|1|46|75190.68|0.05|0.00|N|O|1995-08-13|1995-10-12|1995-08-30|TAKE BACK RETURN|TRUCK| nag carefully even packages. fluffily e 13957|33063|5567|2|32|31873.92|0.07|0.06|N|O|1995-09-09|1995-09-12|1995-10-05|COLLECT COD|MAIL| accounts wake gifts. fluffily d 13957|79434|1942|3|48|67844.64|0.09|0.02|N|O|1995-09-19|1995-09-21|1995-09-30|NONE|RAIL|y under the slyly e 13958|109665|2176|1|7|11722.62|0.10|0.00|R|F|1993-03-15|1993-03-08|1993-03-31|TAKE BACK RETURN|MAIL|s instructions. pending ideas sleep 13958|126365|1390|2|29|40349.44|0.10|0.02|R|F|1993-04-23|1993-03-13|1993-05-04|TAKE BACK RETURN|TRUCK|egular theodolites wake agai 13958|10920|3422|3|16|29294.72|0.10|0.01|A|F|1993-04-25|1993-03-31|1993-05-20|DELIVER IN PERSON|TRUCK|eas after the quickly bold fo 13959|124028|9053|1|44|46288.88|0.03|0.04|R|F|1994-05-09|1994-05-25|1994-05-25|DELIVER IN PERSON|SHIP|fily final requ 13959|122936|2937|2|47|92069.71|0.10|0.01|R|F|1994-07-22|1994-05-11|1994-07-30|NONE|FOB|are around the dolphin 13984|92361|2362|1|39|52781.04|0.07|0.00|A|F|1992-11-30|1992-11-08|1992-12-21|TAKE BACK RETURN|AIR|l pinto beans. fin 13984|108573|3594|2|45|71170.65|0.07|0.08|R|F|1992-12-03|1992-11-25|1992-12-16|NONE|MAIL|y slyly even 13984|182037|7074|3|1|1119.03|0.00|0.04|A|F|1993-01-22|1992-11-12|1993-02-17|COLLECT COD|REG AIR| nag slyly ironic ideas. f 13984|79300|1808|4|48|61406.40|0.01|0.06|A|F|1992-12-19|1992-12-17|1992-12-22|DELIVER IN PERSON|FOB|ses. furiously even gifts wake furi 13985|35416|7920|1|7|9459.87|0.02|0.08|N|O|1998-10-09|1998-08-24|1998-11-02|NONE|SHIP|s haggle beyond the slyly bold dinos. regu 13985|36502|1509|2|7|10069.50|0.05|0.08|N|O|1998-08-22|1998-08-26|1998-09-08|NONE|RAIL|. furiously regular accounts use furiousl 13985|87827|5352|3|38|68963.16|0.06|0.03|N|O|1998-07-24|1998-07-15|1998-08-03|COLLECT COD|SHIP|es are! silent, regular a 13985|103296|3297|4|36|46774.44|0.09|0.04|N|O|1998-09-09|1998-07-24|1998-09-18|DELIVER IN PERSON|MAIL|inal pinto beans. always pending package 13985|80572|573|5|8|12420.56|0.04|0.06|N|O|1998-08-17|1998-07-15|1998-08-20|NONE|REG AIR|g deposits: f 13985|164261|6778|6|29|38432.54|0.01|0.02|N|O|1998-07-02|1998-08-07|1998-07-29|DELIVER IN PERSON|MAIL|platelets sleep carefully? even dependenci 13986|35526|533|1|9|13153.68|0.00|0.08|N|O|1995-11-07|1995-11-13|1995-11-20|DELIVER IN PERSON|TRUCK|old packages use. 13986|73059|581|2|24|24769.20|0.03|0.04|N|O|1995-11-07|1996-01-04|1995-11-16|DELIVER IN PERSON|TRUCK|he final accounts; pinto beans wak 13986|87191|2208|3|23|27098.37|0.10|0.05|N|O|1996-02-09|1996-01-02|1996-03-01|TAKE BACK RETURN|AIR| theodolites. slyly silent deposits are 13986|12776|7779|4|13|21954.01|0.07|0.08|N|O|1995-11-16|1996-01-05|1995-11-29|DELIVER IN PERSON|REG AIR|le blithely a 13987|47483|2492|1|25|35762.00|0.07|0.03|N|O|1998-07-28|1998-05-17|1998-08-12|TAKE BACK RETURN|RAIL|refully fina 13987|32567|7574|2|20|29991.20|0.04|0.05|N|O|1998-07-25|1998-05-17|1998-08-23|DELIVER IN PERSON|AIR|carefully brave foxes. carefully final p 13987|147955|7956|3|5|10014.75|0.09|0.07|N|O|1998-07-28|1998-07-01|1998-08-22|DELIVER IN PERSON|TRUCK|y even patter 13988|28827|3832|1|40|70232.80|0.03|0.08|A|F|1992-08-22|1992-09-15|1992-08-27|NONE|SHIP|slyly final instructions. evenly sile 13988|78847|3862|2|10|18258.40|0.07|0.06|A|F|1992-08-29|1992-10-30|1992-09-24|DELIVER IN PERSON|FOB|s. even deposits unwind 13988|172544|2545|3|28|45263.12|0.01|0.06|A|F|1992-12-01|1992-09-21|1992-12-27|COLLECT COD|AIR|ncies. regular, reg 13989|173056|8091|1|14|15806.70|0.03|0.01|N|O|1997-07-26|1997-09-18|1997-07-30|NONE|FOB|nto the the 13989|149139|1654|2|49|58218.37|0.06|0.05|N|O|1997-09-26|1997-08-27|1997-10-23|DELIVER IN PERSON|RAIL| warthogs 13989|162896|2897|3|33|64643.37|0.04|0.08|N|O|1997-08-13|1997-08-25|1997-08-20|COLLECT COD|REG AIR|tes mold furiously. blithely pending i 13989|184382|6901|4|7|10264.66|0.02|0.06|N|O|1997-10-11|1997-08-11|1997-11-08|COLLECT COD|MAIL|eep fluffily amon 13989|106581|6582|5|37|58740.46|0.05|0.02|N|O|1997-11-01|1997-08-18|1997-11-12|DELIVER IN PERSON|SHIP|. regular requests hang slyly even f 13990|96939|9449|1|11|21295.23|0.04|0.01|N|O|1998-05-10|1998-03-10|1998-06-04|TAKE BACK RETURN|AIR|ep blithely. 13990|39816|7326|2|13|22825.53|0.01|0.01|N|O|1998-05-13|1998-03-16|1998-05-15|TAKE BACK RETURN|FOB|past the furiou 13990|170884|3402|3|12|23458.56|0.06|0.02|N|O|1998-03-10|1998-04-18|1998-03-18|TAKE BACK RETURN|REG AIR|inal requests-- carefully 13990|37299|4809|4|5|6181.45|0.02|0.05|N|O|1998-03-14|1998-04-09|1998-03-30|TAKE BACK RETURN|RAIL|uickly after the regular, unusual 13990|80288|2797|5|37|46926.36|0.05|0.03|N|O|1998-05-30|1998-03-29|1998-06-20|DELIVER IN PERSON|TRUCK| the slyly final theodolites. blithel 13990|198060|5618|6|11|12738.66|0.07|0.03|N|O|1998-05-04|1998-04-05|1998-05-10|NONE|TRUCK|ke carefully sl 13991|185788|8307|1|17|31854.26|0.10|0.01|R|F|1992-06-04|1992-04-12|1992-06-26|NONE|SHIP|ests sleep slyly. sheaves sleep above t 13991|136728|4268|2|43|75882.96|0.02|0.02|A|F|1992-04-17|1992-04-14|1992-05-12|DELIVER IN PERSON|FOB| beans nag blithely above th 13991|182085|2086|3|32|37346.56|0.03|0.05|A|F|1992-03-21|1992-05-08|1992-04-18|TAKE BACK RETURN|SHIP|ironic asymptotes? blithely unusua 13991|88600|3617|4|43|68309.80|0.00|0.00|R|F|1992-03-18|1992-06-02|1992-03-21|COLLECT COD|FOB|lar packages slee 13991|89875|9876|5|26|48486.62|0.02|0.01|A|F|1992-03-28|1992-04-25|1992-04-02|DELIVER IN PERSON|SHIP|the ideas. accounts are c 14016|190582|5621|1|14|23416.12|0.00|0.00|A|F|1992-09-29|1992-08-12|1992-10-11|TAKE BACK RETURN|AIR|integrate Tiresias. furiously e 14016|105291|312|2|44|57036.76|0.08|0.02|A|F|1992-10-31|1992-08-25|1992-11-16|COLLECT COD|RAIL|ges affix carefully accord 14016|21047|8554|3|41|39689.64|0.07|0.01|R|F|1992-07-18|1992-09-21|1992-08-05|COLLECT COD|FOB|slyly silent foxes. final accounts na 14016|131300|6327|4|48|63902.40|0.03|0.06|A|F|1992-08-27|1992-08-14|1992-09-12|NONE|RAIL|riously even a 14016|148151|3180|5|2|2398.30|0.02|0.02|R|F|1992-10-31|1992-09-07|1992-11-11|DELIVER IN PERSON|RAIL| ironic accounts ar 14016|165613|8130|6|18|30214.98|0.10|0.08|R|F|1992-09-09|1992-09-20|1992-09-10|DELIVER IN PERSON|TRUCK|deposits. slyly even requ 14016|54443|1959|7|3|4192.32|0.10|0.00|A|F|1992-08-17|1992-09-30|1992-09-16|NONE|AIR|es. expres 14017|20429|7936|1|10|13494.20|0.08|0.05|A|F|1994-07-02|1994-05-13|1994-07-12|NONE|TRUCK|uests. special deposits x-ray. rut 14017|140411|412|2|22|31931.02|0.10|0.03|R|F|1994-05-06|1994-04-17|1994-05-18|NONE|RAIL|press foxes wak 14017|78510|1018|3|31|46143.81|0.05|0.06|R|F|1994-04-21|1994-05-12|1994-05-17|DELIVER IN PERSON|TRUCK|. carefully final d 14017|109378|9379|4|37|51332.69|0.06|0.03|R|F|1994-05-12|1994-06-05|1994-05-25|NONE|FOB|tes play slyly after the ironic, 14017|114972|4973|5|1|1986.97|0.00|0.08|R|F|1994-05-17|1994-05-29|1994-06-14|DELIVER IN PERSON|REG AIR|ts above the fin 14017|119517|7051|6|14|21511.14|0.02|0.06|R|F|1994-06-16|1994-04-29|1994-06-20|TAKE BACK RETURN|FOB|efully carefully bold dependencies. care 14018|32997|8004|1|11|21229.89|0.00|0.03|R|F|1993-08-05|1993-08-02|1993-08-25|COLLECT COD|AIR| blithely according 14018|174454|2006|2|16|24455.20|0.06|0.03|A|F|1993-07-28|1993-08-17|1993-08-03|NONE|FOB|its. blithely pending re 14018|36619|9123|3|21|32667.81|0.03|0.01|A|F|1993-06-20|1993-07-26|1993-07-18|DELIVER IN PERSON|TRUCK|cross the regular packages hinder pendin 14018|46451|1460|4|27|37731.15|0.07|0.04|A|F|1993-06-08|1993-08-13|1993-06-25|COLLECT COD|AIR|brave requests. iron 14019|47545|5058|1|6|8955.24|0.05|0.00|N|O|1997-12-17|1997-10-21|1998-01-12|COLLECT COD|MAIL|hins. fluffily even i 14019|73913|8928|2|30|56607.30|0.01|0.07|N|O|1997-11-23|1997-11-07|1997-12-07|COLLECT COD|RAIL|nic deposits: special instructi 14019|39895|9896|3|47|86239.83|0.05|0.04|N|O|1997-11-27|1997-12-05|1997-11-29|NONE|FOB| cajole quickly alongside of the special d 14019|99689|2199|4|19|32084.92|0.01|0.03|N|O|1997-10-16|1997-12-01|1997-11-13|DELIVER IN PERSON|AIR|s among the fluffily special requests boo 14019|59582|9583|5|39|60121.62|0.05|0.07|N|O|1997-10-14|1997-11-12|1997-10-30|TAKE BACK RETURN|MAIL|sits wake furiously blithely ironic acc 14020|197282|4840|1|24|33102.72|0.00|0.03|N|O|1998-04-02|1998-03-23|1998-04-10|COLLECT COD|TRUCK|unts. packages are carefully about the f 14020|102615|146|2|33|53381.13|0.06|0.01|N|O|1998-04-16|1998-04-16|1998-04-23|NONE|REG AIR|re evenly even, final theodolites. bli 14021|199795|7353|1|4|7579.16|0.10|0.00|A|F|1995-04-17|1995-05-13|1995-04-21|DELIVER IN PERSON|FOB|cally slowly 14021|140451|7994|2|34|50709.30|0.02|0.00|A|F|1995-04-08|1995-05-11|1995-04-14|COLLECT COD|RAIL|sts haggle slyly. final ideas ca 14021|151820|4336|3|20|37436.40|0.10|0.02|R|F|1995-03-12|1995-05-03|1995-03-31|DELIVER IN PERSON|REG AIR|he regular, ironic instructions. furiously 14021|36830|1837|4|40|70673.20|0.02|0.00|N|O|1995-06-27|1995-06-05|1995-07-01|TAKE BACK RETURN|REG AIR|l instructions out 14021|196445|8965|5|27|41618.88|0.10|0.05|A|F|1995-04-04|1995-04-17|1995-04-22|TAKE BACK RETURN|TRUCK|ual foxes. furiously ir 14021|49016|9017|6|15|14475.15|0.00|0.00|R|F|1995-05-09|1995-04-26|1995-05-18|NONE|RAIL|ly final packages. deposi 14021|6778|9279|7|12|20217.24|0.01|0.02|A|F|1995-06-03|1995-05-09|1995-06-09|DELIVER IN PERSON|SHIP|ly bold requests are fluffily according to 14022|110756|3268|1|4|7067.00|0.05|0.00|R|F|1995-05-16|1995-05-03|1995-06-14|TAKE BACK RETURN|AIR|ully ironic excuses doze quickly. ironic 14022|69831|4844|2|41|73834.03|0.03|0.06|N|F|1995-06-08|1995-04-01|1995-06-28|TAKE BACK RETURN|REG AIR|es use against the 14023|27236|9739|1|36|41876.28|0.06|0.08|R|F|1993-05-15|1993-05-04|1993-05-30|TAKE BACK RETURN|RAIL|al accounts are blithely according to the s 14023|147153|9668|2|3|3600.45|0.02|0.02|R|F|1993-04-17|1993-05-05|1993-05-07|COLLECT COD|TRUCK|lly express pack 14023|197819|7820|3|31|59421.11|0.06|0.07|R|F|1993-04-20|1993-04-13|1993-04-30|TAKE BACK RETURN|RAIL|ress, regular ac 14023|137022|4562|4|17|18003.34|0.04|0.05|A|F|1993-02-28|1993-04-10|1993-03-22|DELIVER IN PERSON|SHIP|nic deposits. c 14048|101818|1819|1|25|45495.25|0.02|0.07|R|F|1993-08-26|1993-06-27|1993-09-13|NONE|SHIP|; regular instructions print furiously. 14048|38483|3490|2|7|9950.36|0.01|0.08|A|F|1993-08-25|1993-07-27|1993-09-01|COLLECT COD|TRUCK|telets. regular epitaphs cajole caref 14048|178336|8337|3|30|42429.90|0.00|0.00|A|F|1993-06-07|1993-07-29|1993-06-21|COLLECT COD|FOB|s nag quickly. sly dolphins 14048|84919|9936|4|37|70444.67|0.08|0.06|R|F|1993-07-02|1993-06-24|1993-07-19|COLLECT COD|AIR|ongside of 14048|18521|3524|5|35|50383.20|0.02|0.08|A|F|1993-09-09|1993-08-01|1993-10-01|TAKE BACK RETURN|AIR|at the pending accounts boost fluffil 14048|67785|5304|6|2|3505.56|0.10|0.01|A|F|1993-06-13|1993-07-29|1993-07-06|TAKE BACK RETURN|RAIL|he silent packa 14048|60406|5419|7|21|28694.40|0.06|0.03|A|F|1993-06-04|1993-07-20|1993-06-20|NONE|SHIP|sts among the ironic packages maintain q 14049|31311|1312|1|39|48450.09|0.00|0.02|N|O|1996-05-14|1996-04-27|1996-05-24|DELIVER IN PERSON|MAIL| haggle according to the slyly da 14049|42697|5202|2|3|4919.07|0.06|0.00|N|O|1996-02-22|1996-03-22|1996-03-15|COLLECT COD|RAIL| regular asymptotes breach quickly bl 14049|192490|48|3|10|15824.90|0.07|0.07|N|O|1996-05-07|1996-03-31|1996-05-31|TAKE BACK RETURN|AIR|gside of the carefully r 14050|130335|336|1|7|9557.31|0.01|0.02|N|O|1997-10-27|1997-10-17|1997-11-21|COLLECT COD|FOB|egular platelets. regular, ironic request 14050|130444|5471|2|14|20642.16|0.08|0.08|N|O|1997-10-19|1997-09-26|1997-10-25|DELIVER IN PERSON|RAIL|inal foxes haggle. even asymptotes haggle 14050|165784|5785|3|32|59192.96|0.08|0.04|N|O|1997-09-07|1997-10-19|1997-09-28|COLLECT COD|SHIP|he fluffy packages. pa 14050|124176|1713|4|8|9601.36|0.00|0.08|N|O|1997-12-05|1997-11-17|1997-12-14|COLLECT COD|MAIL| regular platelets 14050|12589|7592|5|47|70574.26|0.07|0.04|N|O|1997-11-09|1997-09-26|1997-11-23|NONE|FOB|even dugouts. fluffily even account 14050|170806|8358|6|21|39412.80|0.06|0.01|N|O|1997-09-07|1997-10-06|1997-09-29|TAKE BACK RETURN|TRUCK|ons detect slyly 14051|29603|4608|1|19|29119.40|0.06|0.04|A|F|1993-10-12|1993-11-25|1993-11-10|TAKE BACK RETURN|FOB|cording to the furiousl 14052|140796|797|1|17|31225.43|0.04|0.08|R|F|1995-04-15|1995-04-17|1995-04-18|NONE|AIR|o the blit 14053|156746|4292|1|4|7210.96|0.10|0.08|R|F|1993-03-12|1993-03-22|1993-04-08|NONE|REG AIR|s sleep. even, special th 14053|23655|1162|2|19|29994.35|0.10|0.02|A|F|1993-04-26|1993-04-29|1993-04-30|NONE|REG AIR|theodolites abov 14054|139493|4520|1|46|70494.54|0.06|0.01|N|O|1996-04-10|1996-04-10|1996-04-11|NONE|FOB|slyly pending deposits along 14054|44368|4369|2|40|52494.40|0.09|0.07|N|O|1996-03-12|1996-02-15|1996-03-31|COLLECT COD|MAIL| carefully even deposits nag stealthily 14054|88745|6270|3|13|22538.62|0.05|0.05|N|O|1996-04-24|1996-04-03|1996-05-22|NONE|REG AIR|t deposits use fur 14054|131715|6742|4|20|34934.20|0.06|0.02|N|O|1996-04-12|1996-04-07|1996-04-30|DELIVER IN PERSON|AIR|ven packages. carefully ironic somas 14054|120775|776|5|17|30528.09|0.08|0.02|N|O|1996-01-26|1996-03-07|1996-02-13|DELIVER IN PERSON|RAIL|ackages. requests are furio 14054|48907|8908|6|32|59388.80|0.02|0.03|N|O|1996-03-08|1996-02-18|1996-03-19|COLLECT COD|SHIP|ns breach across the special accou 14054|165582|8099|7|45|74141.10|0.10|0.07|N|O|1996-01-25|1996-04-03|1996-02-17|NONE|RAIL|regular instructions haggle car 14055|135791|8305|1|46|84032.34|0.05|0.06|A|F|1994-12-08|1994-11-22|1994-12-14|COLLECT COD|RAIL|y special deposits use atop 14055|37516|20|2|1|1453.51|0.02|0.04|A|F|1994-12-12|1994-11-17|1994-12-17|DELIVER IN PERSON|SHIP|the carefull 14055|126830|4367|3|26|48277.58|0.06|0.07|A|F|1994-09-12|1994-11-19|1994-09-20|COLLECT COD|REG AIR| sleep furiously 14080|79562|2070|1|38|58579.28|0.04|0.01|N|O|1998-08-19|1998-07-11|1998-09-12|DELIVER IN PERSON|MAIL|against the 14081|70954|3462|1|45|86622.75|0.10|0.06|R|F|1993-10-14|1994-01-04|1993-10-29|TAKE BACK RETURN|REG AIR|ts. pending platele 14081|121672|6697|2|41|69440.47|0.08|0.07|A|F|1994-02-02|1993-11-24|1994-02-23|TAKE BACK RETURN|RAIL| bold foxes doubt bold excuses. furious 14082|95605|624|1|28|44816.80|0.10|0.03|N|O|1998-06-17|1998-06-10|1998-07-09|NONE|FOB|bold dependencies sleep alongs 14083|56563|1574|1|8|12156.48|0.00|0.00|A|F|1994-11-12|1994-10-17|1994-12-04|TAKE BACK RETURN|AIR|. express accounts h 14084|190259|7817|1|43|58017.75|0.07|0.02|N|O|1995-10-27|1995-10-31|1995-11-07|COLLECT COD|MAIL|ular pinto beans sl 14084|79390|1898|2|46|62991.94|0.05|0.00|N|O|1995-09-01|1995-09-08|1995-09-14|COLLECT COD|RAIL| foxes cajole blithel 14084|98209|719|3|31|37423.20|0.04|0.05|N|O|1995-10-23|1995-10-18|1995-10-29|TAKE BACK RETURN|TRUCK|s. ironic packag 14084|197744|5302|4|43|79194.82|0.00|0.08|N|O|1995-08-12|1995-10-09|1995-08-18|COLLECT COD|REG AIR|ut the idly pending id 14084|74901|9916|5|6|11255.40|0.02|0.02|N|O|1995-08-25|1995-10-12|1995-09-18|TAKE BACK RETURN|SHIP|. theodolites sleep-- packages cajole a 14085|112495|29|1|34|51254.66|0.09|0.03|R|F|1994-03-04|1994-04-17|1994-03-05|DELIVER IN PERSON|TRUCK|ding, final multipliers boo 14085|86723|9232|2|7|11968.04|0.07|0.01|A|F|1994-03-01|1994-03-11|1994-03-15|COLLECT COD|RAIL|instructions acro 14085|141508|4023|3|34|52683.00|0.09|0.03|R|F|1994-05-29|1994-04-03|1994-06-04|DELIVER IN PERSON|SHIP|ke even, bold accounts. quickly 14085|118893|8894|4|50|95594.50|0.10|0.02|R|F|1994-05-25|1994-04-11|1994-06-22|TAKE BACK RETURN|MAIL|unts wake carefully pinto 14086|121179|1180|1|7|8401.19|0.10|0.01|R|F|1993-12-26|1993-12-11|1994-01-10|NONE|RAIL|use regularly. carefully fina 14086|46187|6188|2|21|23796.78|0.04|0.04|A|F|1993-12-03|1993-12-03|1993-12-20|COLLECT COD|SHIP|as. slyly bold foxes nag carefully after t 14086|49963|7476|3|7|13390.72|0.04|0.00|A|F|1993-11-27|1994-01-09|1993-12-01|COLLECT COD|MAIL|aggle. even accounts along the p 14086|114171|9194|4|9|10666.53|0.09|0.07|R|F|1993-11-18|1993-12-15|1993-11-30|NONE|REG AIR|gular deposits haggle ag 14087|50737|3243|1|23|38817.79|0.00|0.06|N|O|1997-04-18|1997-04-06|1997-04-23|TAKE BACK RETURN|REG AIR|ly ironic pin 14087|128733|3758|2|35|61660.55|0.02|0.07|N|O|1997-05-17|1997-03-28|1997-06-04|TAKE BACK RETURN|AIR|fix across the final packages. fl 14087|3642|1143|3|50|77282.00|0.08|0.00|N|O|1997-06-02|1997-05-11|1997-06-08|NONE|TRUCK| express package 14087|6616|1617|4|37|56336.57|0.06|0.02|N|O|1997-04-29|1997-03-25|1997-05-04|COLLECT COD|REG AIR|ress courts poach quickly against 14087|185362|7881|5|30|43420.80|0.01|0.02|N|O|1997-05-14|1997-03-16|1997-06-05|NONE|AIR|arefully about the silent dolphins 14112|51072|3578|1|15|15346.05|0.03|0.01|R|F|1994-09-13|1994-10-14|1994-09-20|NONE|REG AIR|he accounts. pinto beans boost furious 14113|164873|4874|1|4|7751.48|0.02|0.03|N|O|1995-09-09|1995-10-21|1995-09-17|TAKE BACK RETURN|RAIL|ntegrate after 14113|6282|1283|2|21|24953.88|0.00|0.03|N|O|1995-10-28|1995-10-19|1995-11-10|COLLECT COD|TRUCK|fully fluffy foxes. dependencies 14114|128598|8599|1|31|50424.29|0.09|0.05|A|F|1994-05-19|1994-05-22|1994-06-17|NONE|AIR|ross the blithely express 14114|142944|487|2|35|69542.90|0.01|0.06|R|F|1994-06-02|1994-06-04|1994-06-24|TAKE BACK RETURN|AIR|the always even 14114|48361|8362|3|17|22259.12|0.05|0.04|R|F|1994-05-27|1994-05-19|1994-06-14|DELIVER IN PERSON|RAIL| beans around the even accou 14114|1051|6052|4|15|14280.75|0.03|0.03|R|F|1994-05-13|1994-05-21|1994-05-24|DELIVER IN PERSON|MAIL|tes detect fluffil 14114|58786|1292|5|9|15703.02|0.08|0.02|A|F|1994-07-26|1994-05-21|1994-08-17|TAKE BACK RETURN|REG AIR|ggle. regular, even instructio 14114|10606|3108|6|40|60664.00|0.09|0.03|A|F|1994-05-12|1994-06-12|1994-06-02|TAKE BACK RETURN|MAIL|sly pending packages. even d 14115|7012|7013|1|31|28489.31|0.08|0.06|A|F|1994-09-05|1994-10-07|1994-09-24|TAKE BACK RETURN|RAIL|even deposits. instructio 14115|40351|352|2|30|38740.50|0.00|0.00|A|F|1994-11-05|1994-11-03|1994-11-27|NONE|FOB|aggle quickly at the f 14116|23980|3981|1|9|17135.82|0.04|0.01|N|O|1995-10-12|1995-12-15|1995-10-22|DELIVER IN PERSON|RAIL|ole besides 14116|29208|1711|2|7|7960.40|0.04|0.06|N|O|1995-11-09|1995-12-01|1995-12-01|TAKE BACK RETURN|AIR|s unwind carefully blithely spe 14116|98775|6303|3|30|53213.10|0.05|0.06|N|O|1995-12-17|1995-11-21|1995-12-28|DELIVER IN PERSON|MAIL|aggle. carefully pending Tiresias 14116|109854|7385|4|36|67098.60|0.00|0.07|N|O|1995-12-25|1995-12-21|1996-01-19|DELIVER IN PERSON|REG AIR| accounts. furiously even dolphins are 14116|102923|7944|5|39|75110.88|0.09|0.03|N|O|1995-12-27|1995-12-07|1996-01-10|TAKE BACK RETURN|RAIL| print quickly. sl 14116|10526|3028|6|41|58897.32|0.00|0.07|N|O|1995-12-06|1995-11-29|1995-12-16|DELIVER IN PERSON|RAIL| slyly unusual p 14117|163814|3815|1|23|43189.63|0.03|0.06|A|F|1993-10-14|1993-10-31|1993-10-25|DELIVER IN PERSON|AIR|sly regular courts. ca 14117|17696|2699|2|43|69388.67|0.09|0.04|R|F|1993-12-03|1993-10-29|1994-01-01|NONE|SHIP|ts. platelets nag clos 14117|10516|5519|3|47|67045.97|0.03|0.02|R|F|1993-11-04|1993-10-12|1993-11-05|COLLECT COD|FOB|furiously bold platelets use. furiousl 14117|136403|6404|4|48|69091.20|0.06|0.07|A|F|1993-10-09|1993-11-07|1993-10-26|TAKE BACK RETURN|MAIL|hely final d 14117|119581|9582|5|32|51218.56|0.04|0.04|A|F|1993-10-29|1993-09-27|1993-11-09|DELIVER IN PERSON|RAIL|wake quickly ironic foxes. slyly sil 14117|189835|9836|6|5|9624.15|0.05|0.05|A|F|1993-09-08|1993-10-08|1993-09-28|NONE|FOB|oxes detect furiously. excuses 14117|141348|1349|7|39|54184.26|0.05|0.02|R|F|1993-11-02|1993-10-22|1993-11-07|NONE|MAIL|r, stealthy accounts use after the 14118|32226|4730|1|37|42854.14|0.10|0.01|A|F|1993-07-02|1993-08-16|1993-07-25|DELIVER IN PERSON|SHIP|y careful accounts are 14119|118706|6240|1|42|72437.40|0.04|0.01|A|F|1993-07-15|1993-07-28|1993-08-11|NONE|RAIL|ial accounts. blithely even excuses x-ray q 14119|76212|8720|2|30|35646.30|0.06|0.00|A|F|1993-07-12|1993-07-26|1993-07-19|TAKE BACK RETURN|FOB|furiously pending theodolites. 14119|84105|4106|3|6|6534.60|0.01|0.04|R|F|1993-07-21|1993-07-27|1993-07-23|TAKE BACK RETURN|AIR|e carefull 14119|141746|6775|4|26|46481.24|0.03|0.02|A|F|1993-08-01|1993-07-16|1993-08-16|NONE|MAIL|never express dependencies; quickly bo 14119|120868|8405|5|10|18888.60|0.05|0.05|A|F|1993-08-12|1993-08-06|1993-09-08|TAKE BACK RETURN|FOB|sly even pinto beans. carefully pendi 14119|20858|8365|6|48|85384.80|0.02|0.08|A|F|1993-07-31|1993-07-13|1993-08-21|COLLECT COD|MAIL|y unusual, bo 14144|82691|2692|1|29|48537.01|0.03|0.05|N|O|1997-08-18|1997-06-28|1997-08-21|DELIVER IN PERSON|SHIP|ed asymptotes cajole fluffil 14144|184193|4194|2|48|61305.12|0.04|0.06|N|O|1997-07-13|1997-07-06|1997-07-19|DELIVER IN PERSON|SHIP| the final dependencies. carefully r 14144|131320|3834|3|31|41890.92|0.05|0.06|N|O|1997-07-25|1997-07-12|1997-08-16|TAKE BACK RETURN|AIR|carefully ironic dependencies. bl 14144|79241|4256|4|22|26845.28|0.07|0.05|N|O|1997-08-20|1997-07-16|1997-09-11|DELIVER IN PERSON|REG AIR|y across the even 14144|192818|376|5|8|15286.48|0.05|0.01|N|O|1997-05-29|1997-07-22|1997-06-22|NONE|MAIL|ges wake furiou 14144|84236|9253|6|46|56130.58|0.00|0.04|N|O|1997-05-24|1997-07-18|1997-05-27|NONE|SHIP|lent request 14145|138729|1243|1|45|79547.40|0.02|0.05|N|O|1997-06-13|1997-06-19|1997-06-20|DELIVER IN PERSON|TRUCK|ely final accounts haggle slyly. 14145|15512|5513|2|43|61382.93|0.02|0.06|N|O|1997-04-25|1997-06-09|1997-04-28|NONE|MAIL|ctions. final, regular requ 14145|103641|1172|3|6|9867.84|0.02|0.08|N|O|1997-06-27|1997-07-07|1997-07-11|COLLECT COD|FOB|to are carefully 14145|79500|2008|4|40|59180.00|0.08|0.05|N|O|1997-07-26|1997-07-09|1997-08-22|COLLECT COD|MAIL|l accounts wake fluffily s 14145|146388|8903|5|27|38728.26|0.10|0.00|N|O|1997-05-06|1997-06-30|1997-05-12|TAKE BACK RETURN|RAIL|ly carefully regular foxes 14145|130723|3237|6|48|84178.56|0.00|0.02|N|O|1997-05-10|1997-07-09|1997-06-01|NONE|RAIL|ole blithely? ruth 14146|172424|9976|1|25|37410.50|0.08|0.06|N|O|1996-01-25|1996-03-17|1996-02-05|TAKE BACK RETURN|FOB| across the final courts. fur 14146|127007|7008|2|14|14476.00|0.01|0.03|N|O|1996-04-20|1996-03-31|1996-05-13|NONE|SHIP|aggle blithely ar 14146|198844|8845|3|20|38856.80|0.08|0.01|N|O|1996-01-14|1996-02-15|1996-01-29|TAKE BACK RETURN|REG AIR| packages try to hang furiously unusual a 14146|159935|7481|4|8|15959.44|0.03|0.05|N|O|1996-03-04|1996-02-04|1996-03-05|TAKE BACK RETURN|REG AIR|lar hockey 14146|15121|2625|5|4|4144.48|0.02|0.08|N|O|1996-04-11|1996-03-04|1996-04-19|NONE|TRUCK|usy, pending pac 14147|42968|481|1|4|7643.84|0.08|0.01|A|F|1993-05-20|1993-05-15|1993-05-27|TAKE BACK RETURN|SHIP|s after the ironic 14147|160647|8196|2|12|20491.68|0.05|0.01|A|F|1993-07-01|1993-04-30|1993-07-29|COLLECT COD|SHIP| accounts. final ideas haggle care 14147|193347|905|3|38|54732.92|0.05|0.03|R|F|1993-06-30|1993-05-09|1993-07-30|COLLECT COD|AIR|uctions. ideas unwind! ironic 14147|167115|7116|4|47|55559.17|0.07|0.06|R|F|1993-07-02|1993-05-24|1993-07-21|DELIVER IN PERSON|FOB|st the blithel 14147|86771|1788|5|31|54490.87|0.07|0.08|R|F|1993-03-26|1993-05-27|1993-04-16|TAKE BACK RETURN|FOB|usly even requests. carefully 14147|143547|3548|6|17|27039.18|0.06|0.05|R|F|1993-07-07|1993-06-06|1993-07-28|DELIVER IN PERSON|MAIL|eep around the pending pack 14147|179884|2402|7|12|23566.56|0.00|0.03|A|F|1993-06-03|1993-05-15|1993-06-25|TAKE BACK RETURN|SHIP| asymptotes along the ironic, regular 14148|108393|904|1|12|16816.68|0.02|0.08|N|O|1998-07-29|1998-06-26|1998-08-09|COLLECT COD|FOB| use fluffily a 14148|154643|9674|2|50|84882.00|0.07|0.00|N|O|1998-05-25|1998-06-10|1998-06-07|COLLECT COD|FOB|mptotes cajole among the sl 14148|84706|2231|3|14|23669.80|0.02|0.07|N|O|1998-08-10|1998-05-30|1998-08-17|DELIVER IN PERSON|FOB|ix. quickly final notornis mol 14148|39635|2139|4|21|33067.23|0.10|0.03|N|O|1998-05-27|1998-06-24|1998-06-03|NONE|MAIL|uffy, even deposits. blithely ironic pl 14149|56876|4392|1|5|9164.35|0.04|0.08|N|O|1998-02-17|1998-03-23|1998-03-03|DELIVER IN PERSON|RAIL|luffily ironic dependencies boos 14149|109594|9595|2|4|6414.36|0.01|0.08|N|O|1998-05-09|1998-03-18|1998-05-21|NONE|AIR|edly regular asympto 14149|139308|4335|3|15|20209.50|0.00|0.04|N|O|1998-05-06|1998-03-05|1998-05-24|TAKE BACK RETURN|REG AIR|iously bold courts! blithely final inst 14149|148441|5984|4|10|14894.40|0.05|0.08|N|O|1998-05-04|1998-03-28|1998-05-22|DELIVER IN PERSON|SHIP|ress platelets; slyl 14149|44048|9057|5|12|11904.48|0.02|0.02|N|O|1998-02-03|1998-04-12|1998-02-22|COLLECT COD|SHIP|nic requests. ca 14149|98540|3559|6|24|36924.96|0.06|0.08|N|O|1998-03-25|1998-03-16|1998-04-12|NONE|RAIL|onic deposits aft 14149|152729|275|7|35|62360.20|0.08|0.06|N|O|1998-03-27|1998-03-19|1998-04-18|NONE|FOB|instructions 14150|103093|5604|1|3|3288.27|0.09|0.03|A|F|1994-03-07|1994-04-27|1994-03-29|TAKE BACK RETURN|AIR|l pinto be 14150|78908|6430|2|38|71702.20|0.02|0.07|R|F|1994-02-12|1994-05-09|1994-03-03|DELIVER IN PERSON|MAIL|ronic packages integrate enticing 14150|195870|5871|3|33|64873.71|0.01|0.04|A|F|1994-02-17|1994-04-16|1994-03-05|TAKE BACK RETURN|SHIP| deposits haggl 14150|176730|4282|4|37|66849.01|0.02|0.04|R|F|1994-03-20|1994-04-09|1994-04-01|COLLECT COD|FOB|s. slyly pending pinto be 14151|74922|2444|1|37|70186.04|0.10|0.04|N|O|1997-08-19|1997-08-07|1997-08-25|TAKE BACK RETURN|RAIL|gular packages are carefully. pendi 14151|2069|2070|2|36|34958.16|0.01|0.04|N|O|1997-09-30|1997-08-26|1997-10-03|TAKE BACK RETURN|TRUCK|he quickly 14151|168539|3572|3|39|62693.67|0.01|0.01|N|O|1997-09-30|1997-07-23|1997-10-29|TAKE BACK RETURN|FOB|about the silently sp 14151|34913|7417|4|9|16631.19|0.09|0.00|N|O|1997-09-12|1997-07-21|1997-09-20|NONE|FOB| quickly unusual asymptotes haggle 14151|167620|7621|5|32|54003.84|0.08|0.04|N|O|1997-08-03|1997-08-18|1997-08-30|COLLECT COD|MAIL| among the quickly spec 14151|173429|5947|6|40|60096.80|0.09|0.00|N|O|1997-07-03|1997-08-22|1997-07-12|COLLECT COD|SHIP|d ideas wake carefully ironic idea 14151|6570|6571|7|16|23625.12|0.00|0.01|N|O|1997-10-04|1997-09-01|1997-10-20|TAKE BACK RETURN|MAIL|uld use quickly bold 14176|7418|2419|1|22|29159.02|0.04|0.06|R|F|1994-06-14|1994-04-12|1994-06-20|DELIVER IN PERSON|TRUCK|carefully final accounts cajole even, 14176|67727|5246|2|33|55925.76|0.09|0.07|A|F|1994-06-28|1994-05-04|1994-07-20|COLLECT COD|SHIP|egular theodolites 14176|72128|7143|3|24|26402.88|0.00|0.02|A|F|1994-05-15|1994-05-10|1994-05-29|TAKE BACK RETURN|AIR|gular theodolites wake-- furio 14176|44499|9508|4|20|28869.80|0.01|0.08|A|F|1994-06-11|1994-05-05|1994-06-23|COLLECT COD|SHIP|cording to the 14177|48976|3985|1|23|44274.31|0.03|0.07|R|F|1992-04-09|1992-06-03|1992-04-17|NONE|SHIP|wake furiously blithely final acco 14177|130838|839|2|16|29901.28|0.04|0.07|A|F|1992-04-29|1992-07-01|1992-05-29|TAKE BACK RETURN|TRUCK|ully. ideas sleep a 14177|186608|6609|3|34|57616.40|0.02|0.01|A|F|1992-07-10|1992-06-24|1992-08-04|NONE|SHIP|sublate care 14178|131122|8662|1|9|10378.08|0.02|0.05|R|F|1995-04-10|1995-03-21|1995-04-21|NONE|RAIL|s past the furio 14179|176468|8986|1|41|63322.86|0.02|0.07|N|O|1997-11-29|1997-11-07|1997-11-30|DELIVER IN PERSON|SHIP|osits instead of the slyly regular pin 14179|96715|1734|2|30|51351.30|0.00|0.08|N|O|1997-11-19|1997-12-02|1997-11-20|NONE|TRUCK|y unusual excuses abou 14179|85220|2745|3|43|51824.46|0.00|0.05|N|O|1997-09-23|1997-10-10|1997-10-15|TAKE BACK RETURN|REG AIR|into beans are blithely regular requ 14179|79629|4644|4|40|64344.80|0.01|0.05|N|O|1997-11-15|1997-10-20|1997-12-15|NONE|RAIL|carefully final 14179|109088|6619|5|46|50465.68|0.00|0.03|N|O|1997-10-15|1997-10-13|1997-10-29|TAKE BACK RETURN|TRUCK|requests wake 14179|25410|5411|6|46|61428.86|0.00|0.04|N|O|1997-10-04|1997-10-08|1997-10-25|DELIVER IN PERSON|TRUCK|sts boost furiously. quic 14180|56629|9135|1|24|38054.88|0.00|0.05|N|O|1997-09-08|1997-07-19|1997-09-23|NONE|TRUCK|t the unusual deposits. bold, pen 14180|161557|6590|2|46|74453.30|0.09|0.01|N|O|1997-07-07|1997-06-28|1997-07-26|COLLECT COD|AIR|sts unwind fluffily 14180|85598|615|3|32|50674.88|0.08|0.02|N|O|1997-06-13|1997-07-28|1997-07-10|TAKE BACK RETURN|AIR|sits lose slyly 14180|33956|6460|4|21|39688.95|0.00|0.00|N|O|1997-08-03|1997-06-27|1997-09-01|NONE|MAIL|ts use carefully across the quickly iron 14180|96657|1676|5|4|6614.60|0.05|0.04|N|O|1997-07-09|1997-08-20|1997-08-02|NONE|FOB|ccording to the quietly final ideas. re 14181|89552|2061|1|18|27747.90|0.06|0.03|N|O|1998-06-13|1998-03-15|1998-06-21|DELIVER IN PERSON|RAIL|l tithes are. carefully 14181|83823|8840|2|26|46977.32|0.07|0.08|N|O|1998-03-14|1998-03-15|1998-03-29|NONE|MAIL|furiously final depths. unusual foxes wake 14181|2265|9766|3|34|39686.84|0.05|0.08|N|O|1998-03-20|1998-03-21|1998-04-19|DELIVER IN PERSON|RAIL|yly pending deposits. final theo 14181|51277|3783|4|18|22108.86|0.00|0.07|N|O|1998-03-06|1998-03-22|1998-03-07|DELIVER IN PERSON|REG AIR|le slyly final, special pla 14181|7691|2692|5|3|4796.07|0.00|0.01|N|O|1998-05-04|1998-04-08|1998-05-20|DELIVER IN PERSON|AIR|ly regular ideas cajole fluffi 14181|46472|8977|6|37|52483.39|0.06|0.08|N|O|1998-05-19|1998-03-16|1998-06-08|DELIVER IN PERSON|MAIL|requests sleep carefully against the furi 14181|44933|4934|7|29|54459.97|0.07|0.00|N|O|1998-04-01|1998-05-11|1998-04-12|NONE|MAIL|t courts. carefully 14182|186680|9199|1|37|65367.16|0.09|0.05|A|F|1994-09-05|1994-07-30|1994-09-23|COLLECT COD|SHIP|quests. ironic, express pe 14182|118495|6029|2|5|7567.45|0.02|0.08|R|F|1994-10-21|1994-09-12|1994-10-26|TAKE BACK RETURN|TRUCK|lets are. pending, final packages use. care 14183|184565|4566|1|4|6598.24|0.09|0.01|N|O|1997-05-02|1997-05-18|1997-05-10|COLLECT COD|TRUCK|ffily pending instructions haggle bl 14183|173348|5866|2|50|71067.00|0.04|0.01|N|O|1997-03-18|1997-06-03|1997-03-21|COLLECT COD|FOB|luffily silent depos 14208|98390|900|1|33|45816.87|0.04|0.08|N|O|1995-08-26|1995-07-23|1995-09-10|DELIVER IN PERSON|MAIL| blithely unusual accounts boost car 14208|26912|6913|2|27|49650.57|0.06|0.04|N|O|1995-07-14|1995-06-06|1995-07-28|COLLECT COD|FOB| alongside o 14208|102265|7286|3|28|35483.28|0.04|0.04|N|O|1995-06-29|1995-06-17|1995-07-17|DELIVER IN PERSON|FOB|ar ideas. furiously unusual forges use flu 14209|190853|3373|1|26|50540.10|0.06|0.03|N|O|1996-12-16|1997-01-30|1996-12-30|TAKE BACK RETURN|RAIL| ironic deposits. ironic 14209|63644|3645|2|7|11253.48|0.09|0.04|N|O|1996-11-23|1996-12-24|1996-12-21|COLLECT COD|REG AIR|rding to the bold, silent patterns. blith 14209|58992|8993|3|35|68284.65|0.07|0.04|N|O|1997-01-22|1997-01-08|1997-02-19|NONE|MAIL|hely above the bold accounts? sly 14209|68254|8255|4|7|8555.75|0.06|0.07|N|O|1997-01-29|1997-01-11|1997-02-16|DELIVER IN PERSON|RAIL|y regular theodolites aff 14209|129468|1981|5|17|25456.82|0.04|0.03|N|O|1996-12-23|1996-12-16|1996-12-29|DELIVER IN PERSON|RAIL|e slyly quick accounts; carefully unusual 14210|60883|5896|1|15|27658.20|0.00|0.04|A|F|1992-09-13|1992-08-17|1992-10-12|COLLECT COD|MAIL| even depo 14211|47107|4620|1|11|11595.10|0.00|0.08|R|F|1994-02-28|1994-04-21|1994-03-05|TAKE BACK RETURN|REG AIR|ges nag ruthlessly ironic foxes. pinto bean 14211|112553|2554|2|14|21917.70|0.00|0.01|R|F|1994-03-07|1994-04-17|1994-03-13|NONE|SHIP|cross the furiously regular accounts. fl 14211|74664|7172|3|29|47521.14|0.08|0.02|A|F|1994-03-14|1994-05-08|1994-04-01|DELIVER IN PERSON|REG AIR|ar deposits besides the 14211|72196|2197|4|35|40886.65|0.06|0.04|R|F|1994-05-01|1994-03-15|1994-05-09|COLLECT COD|MAIL|sits integrate furiously quickly silent exc 14211|192167|7206|5|3|3777.48|0.05|0.03|A|F|1994-03-26|1994-05-07|1994-04-03|DELIVER IN PERSON|AIR|lar excuses boost. slyly pen 14211|137493|7|6|41|62750.09|0.00|0.01|R|F|1994-05-28|1994-04-16|1994-05-31|NONE|REG AIR|ctions cajole furious 14212|174575|2127|1|44|72581.08|0.08|0.01|A|F|1992-04-24|1992-04-18|1992-05-19|COLLECT COD|MAIL|carefully express r 14212|75991|5992|2|45|88514.55|0.04|0.07|R|F|1992-03-07|1992-04-28|1992-04-06|COLLECT COD|RAIL|r, special foxes cajole furious 14212|168672|3705|3|20|34813.40|0.09|0.03|R|F|1992-02-28|1992-04-14|1992-03-13|COLLECT COD|TRUCK|. dependencies accord 14212|65583|596|4|15|23228.70|0.09|0.06|R|F|1992-04-11|1992-03-26|1992-04-20|DELIVER IN PERSON|AIR|ilent platelets abo 14213|18872|3875|1|45|80589.15|0.01|0.05|R|F|1994-01-05|1994-03-18|1994-01-24|TAKE BACK RETURN|TRUCK|ironic requests are excuses. 14214|199460|7018|1|26|40545.96|0.00|0.05|N|O|1997-09-17|1997-10-12|1997-10-15|TAKE BACK RETURN|MAIL|lyly final accounts cajole slyly abov 14214|107594|5125|2|3|4804.77|0.09|0.06|N|O|1997-12-08|1997-09-23|1998-01-03|TAKE BACK RETURN|SHIP|ent dependencies. instructions haggle b 14214|122636|5149|3|29|48100.27|0.07|0.06|N|O|1997-11-23|1997-10-10|1997-12-06|NONE|TRUCK|pecial sheaves haggle slyly about t 14214|129497|4522|4|36|54953.64|0.09|0.07|N|O|1997-09-24|1997-09-20|1997-10-23|TAKE BACK RETURN|TRUCK|cuses. pending, fin 14215|183217|5736|1|4|5200.84|0.07|0.05|A|F|1992-09-10|1992-09-26|1992-10-09|DELIVER IN PERSON|SHIP|unts breach quickly 14215|124889|9914|2|11|21052.68|0.04|0.08|R|F|1992-08-09|1992-10-02|1992-08-22|TAKE BACK RETURN|TRUCK|y daring pinto 14215|3496|997|3|35|48982.15|0.09|0.03|R|F|1992-07-18|1992-08-16|1992-08-08|NONE|RAIL|ously regular courts hang. c 14240|111270|6293|1|47|60219.69|0.06|0.00|R|F|1992-06-06|1992-04-08|1992-07-04|COLLECT COD|AIR|eep carefully along the ironic, eve 14240|111486|9020|2|26|38934.48|0.00|0.02|R|F|1992-05-08|1992-04-19|1992-06-05|TAKE BACK RETURN|REG AIR|ely slyly regular dependencies: even reque 14240|137070|9584|3|14|15498.98|0.06|0.02|A|F|1992-05-24|1992-04-04|1992-06-07|DELIVER IN PERSON|TRUCK|y. thinly special accounts around the dep 14240|133118|5632|4|35|40288.85|0.03|0.02|A|F|1992-05-08|1992-05-15|1992-06-07|COLLECT COD|FOB|lites dazzle by the quickly silent account 14240|112078|7101|5|9|9810.63|0.05|0.01|A|F|1992-06-04|1992-03-21|1992-06-11|TAKE BACK RETURN|MAIL|express pi 14240|86452|1469|6|14|20138.30|0.09|0.04|R|F|1992-04-29|1992-04-21|1992-05-03|COLLECT COD|RAIL|ording to the carefu 14240|187824|2861|7|26|49707.32|0.00|0.00|R|F|1992-05-10|1992-04-15|1992-05-23|DELIVER IN PERSON|FOB|gular, ironic pinto beans n 14241|156393|3939|1|44|63773.16|0.05|0.08|A|F|1992-10-03|1992-09-12|1992-10-04|COLLECT COD|SHIP|ly bold foxes wake. ir 14241|87761|2778|2|47|82191.72|0.00|0.04|A|F|1992-09-04|1992-09-02|1992-09-22|DELIVER IN PERSON|RAIL|ed excuses above the carefully ironic 14242|44414|9423|1|17|23092.97|0.03|0.04|N|F|1995-06-17|1995-06-09|1995-06-25|DELIVER IN PERSON|AIR|posits. quickly even instructions are 14242|57589|5105|2|7|10826.06|0.04|0.05|R|F|1995-05-13|1995-07-04|1995-05-18|DELIVER IN PERSON|FOB|eans according to the slyly 14242|88331|5856|3|10|13193.30|0.02|0.07|N|O|1995-08-21|1995-07-29|1995-09-15|DELIVER IN PERSON|RAIL|e blithely over the blithely pen 14242|174174|6692|4|34|42437.78|0.08|0.08|N|O|1995-08-22|1995-07-23|1995-09-12|DELIVER IN PERSON|MAIL|te at the sil 14242|137106|9620|5|6|6858.60|0.09|0.08|N|O|1995-07-13|1995-06-29|1995-07-17|NONE|SHIP|eposits. fluffily express deposits inte 14243|187706|2743|1|33|59192.10|0.04|0.05|N|O|1997-03-24|1997-05-04|1997-03-30|TAKE BACK RETURN|REG AIR| silently. warthogs about the package 14243|167340|4889|2|3|4222.02|0.09|0.06|N|O|1997-02-28|1997-05-12|1997-03-14|COLLECT COD|RAIL|counts wake quickly against the 14244|77207|2222|1|14|16578.80|0.00|0.02|N|O|1998-03-25|1998-04-23|1998-03-27|NONE|MAIL|onic packages integrate fluffily 14244|5897|8398|2|3|5408.67|0.08|0.08|N|O|1998-05-19|1998-05-29|1998-06-06|COLLECT COD|RAIL|wake slyly ironic accounts. express 14244|106357|3888|3|27|36810.45|0.01|0.02|N|O|1998-05-19|1998-04-05|1998-06-04|COLLECT COD|FOB| warthogs nag blithely ironic reque 14245|141837|9380|1|3|5636.49|0.00|0.02|A|F|1993-03-11|1993-04-14|1993-04-03|DELIVER IN PERSON|FOB|endencies sleep carefu 14245|41467|8980|2|35|49296.10|0.09|0.07|R|F|1993-03-06|1993-04-13|1993-04-03|NONE|RAIL|d requests eat. quickl 14245|134484|4485|3|21|31888.08|0.06|0.01|A|F|1993-04-15|1993-04-11|1993-04-16|COLLECT COD|RAIL|nal, unusual ideas wake carefully. spe 14246|62405|2406|1|34|46491.60|0.03|0.07|N|O|1997-12-30|1998-01-02|1998-01-25|DELIVER IN PERSON|AIR|counts above the e 14246|40615|8128|2|20|31112.20|0.00|0.04|N|O|1998-01-07|1997-12-23|1998-01-26|COLLECT COD|AIR| nag. blithely pendi 14246|5659|660|3|37|57892.05|0.09|0.00|N|O|1997-11-16|1997-11-15|1997-12-14|NONE|REG AIR|affix among the carefully regul 14246|104269|1800|4|19|24191.94|0.08|0.04|N|O|1997-12-01|1997-11-21|1997-12-31|COLLECT COD|MAIL|sual deposits among the final 14246|47669|174|5|2|3233.32|0.01|0.05|N|O|1997-10-21|1997-11-12|1997-10-23|COLLECT COD|FOB|cuses. special requests caj 14247|157084|7085|1|19|21680.52|0.00|0.03|N|O|1995-11-24|1995-11-13|1995-12-19|COLLECT COD|TRUCK|d excuses caj 14247|103692|6203|2|21|35609.49|0.06|0.00|N|O|1995-09-27|1995-10-13|1995-10-14|DELIVER IN PERSON|MAIL|t carefully. furiously ironic mu 14247|134747|4748|3|21|37416.54|0.04|0.01|N|O|1995-11-06|1995-09-17|1995-11-21|COLLECT COD|MAIL|ross the slyly regular accounts! furiously 14247|80185|2694|4|33|38450.94|0.02|0.00|N|O|1995-08-18|1995-09-28|1995-09-02|DELIVER IN PERSON|TRUCK|onic court 14247|66880|4399|5|38|70181.44|0.02|0.00|N|O|1995-12-12|1995-11-14|1995-12-19|TAKE BACK RETURN|SHIP|eas are quick 14247|43940|3941|6|6|11303.64|0.03|0.00|N|O|1995-11-24|1995-10-29|1995-12-05|DELIVER IN PERSON|RAIL|kages. carefully even pinto bean 14247|97487|7488|7|13|19298.24|0.02|0.06|N|O|1995-12-06|1995-11-11|1996-01-02|DELIVER IN PERSON|MAIL|s believe slyly! ironic i 14272|61387|6400|1|28|37754.64|0.08|0.08|A|F|1992-03-02|1992-04-21|1992-03-20|TAKE BACK RETURN|TRUCK|iously unusu 14272|121597|4110|2|11|17804.49|0.09|0.07|A|F|1992-05-22|1992-05-08|1992-06-06|COLLECT COD|FOB| carefully among the ironic 14272|66919|6920|3|30|56577.30|0.09|0.02|R|F|1992-02-17|1992-04-01|1992-03-04|TAKE BACK RETURN|AIR|are along the slyly bold as 14273|59354|4365|1|21|27580.35|0.00|0.03|N|O|1996-03-26|1996-05-28|1996-04-12|NONE|RAIL|al accounts. carefully regular pint 14273|150612|5643|2|39|64841.79|0.00|0.06|N|O|1996-06-01|1996-06-03|1996-06-16|DELIVER IN PERSON|RAIL|even, final accounts. furiously 14274|113938|3939|1|44|85884.92|0.03|0.00|A|F|1994-05-05|1994-03-12|1994-06-03|NONE|TRUCK| regular decoys wake fluffily. theodoli 14274|150028|2544|2|34|36652.68|0.09|0.04|R|F|1994-04-12|1994-03-22|1994-04-30|TAKE BACK RETURN|TRUCK|tes. ironic, final package 14274|113387|5899|3|13|18204.94|0.03|0.03|R|F|1994-02-06|1994-04-04|1994-02-15|COLLECT COD|RAIL|o beans are above the package 14274|85399|5400|4|49|67835.11|0.10|0.03|A|F|1994-03-08|1994-03-05|1994-03-09|NONE|MAIL|haggle carefully abo 14274|94379|9398|5|4|5493.48|0.07|0.07|A|F|1994-03-06|1994-03-25|1994-03-11|COLLECT COD|TRUCK|nding patterns shall engage furiously final 14274|17539|7540|6|48|69913.44|0.01|0.07|R|F|1994-04-22|1994-02-27|1994-04-24|DELIVER IN PERSON|REG AIR|packages. accounts haggle fluffily after 14275|12140|7143|1|19|19990.66|0.01|0.07|R|F|1993-10-12|1993-08-13|1993-10-30|TAKE BACK RETURN|RAIL| deposits. frets sleep fluffily after th 14275|34103|1613|2|31|32150.10|0.01|0.05|A|F|1993-07-26|1993-08-07|1993-08-09|TAKE BACK RETURN|REG AIR|the special packa 14275|159759|4790|3|1|1818.75|0.03|0.04|A|F|1993-08-31|1993-09-01|1993-09-19|COLLECT COD|REG AIR|thely silent accounts. realms hagg 14275|69351|6870|4|6|7922.10|0.04|0.08|R|F|1993-07-01|1993-07-30|1993-07-23|TAKE BACK RETURN|MAIL|ckly silent multipliers a 14275|174126|4127|5|23|27602.76|0.08|0.08|R|F|1993-07-09|1993-09-12|1993-08-06|DELIVER IN PERSON|REG AIR|across the blithely re 14275|125230|2767|6|41|51464.43|0.09|0.07|R|F|1993-09-15|1993-07-26|1993-10-01|DELIVER IN PERSON|REG AIR|ions. special, final accounts wake 14275|136354|6355|7|11|15293.85|0.05|0.00|A|F|1993-07-18|1993-08-24|1993-08-15|COLLECT COD|RAIL|ic, even packages wake blithely. fin 14276|87051|7052|1|49|50864.45|0.09|0.01|R|F|1994-08-20|1994-08-24|1994-08-30|DELIVER IN PERSON|AIR| special accounts. blithely sly 14276|57621|2632|2|25|39465.50|0.06|0.01|R|F|1994-09-01|1994-08-13|1994-09-24|DELIVER IN PERSON|REG AIR| cajole slyly furiously ironic 14276|25200|2707|3|32|36006.40|0.01|0.05|R|F|1994-10-11|1994-07-28|1994-10-25|NONE|TRUCK|ests. silentl 14276|124443|6956|4|15|22011.60|0.10|0.00|A|F|1994-09-21|1994-09-16|1994-10-18|NONE|MAIL|haggle quickly regular deposits. car 14276|97635|5163|5|40|65305.20|0.10|0.07|R|F|1994-09-30|1994-08-25|1994-10-14|COLLECT COD|FOB| final theodolites 14277|18594|3597|1|35|52940.65|0.03|0.07|N|O|1998-04-24|1998-05-07|1998-04-29|NONE|MAIL|ges. carefully regular courts sleep agai 14277|105696|5697|2|3|5105.07|0.05|0.05|N|O|1998-05-14|1998-04-30|1998-06-04|TAKE BACK RETURN|REG AIR|cajole slyly 14277|29124|1627|3|41|43177.92|0.05|0.06|N|O|1998-05-12|1998-05-04|1998-05-23|NONE|REG AIR| pending requ 14277|85213|5214|4|16|19171.36|0.10|0.00|N|O|1998-03-16|1998-04-11|1998-04-03|NONE|FOB| furiously bold accounts boost along t 14278|139440|1954|1|30|44383.20|0.03|0.03|N|O|1998-06-14|1998-08-01|1998-06-28|COLLECT COD|MAIL|ic, special deposits. carefully regula 14278|162792|7825|2|46|85320.34|0.04|0.06|N|O|1998-07-30|1998-07-04|1998-08-17|COLLECT COD|FOB|en theodolites sleep furiously after th 14278|35521|528|3|23|33499.96|0.05|0.03|N|O|1998-09-10|1998-08-24|1998-10-04|DELIVER IN PERSON|REG AIR|o the carefully regular foxes. 14278|188133|8134|4|14|17095.82|0.03|0.01|N|O|1998-06-17|1998-08-19|1998-06-19|DELIVER IN PERSON|SHIP|sual pinto 14278|144344|4345|5|48|66640.32|0.02|0.03|N|O|1998-07-30|1998-07-17|1998-07-31|TAKE BACK RETURN|SHIP|y ironic dolphins 14278|188304|5859|6|6|8353.80|0.03|0.00|N|O|1998-07-16|1998-08-23|1998-08-01|NONE|AIR|ely ironic instr 14278|8708|8709|7|29|46884.30|0.09|0.06|N|O|1998-08-23|1998-07-28|1998-09-05|NONE|FOB|ithe theodolites across the fluff 14279|145541|570|1|4|6346.16|0.00|0.08|N|O|1998-04-15|1998-04-29|1998-05-11|DELIVER IN PERSON|REG AIR|. special, e 14279|93852|1380|2|47|86754.95|0.03|0.01|N|O|1998-06-02|1998-05-02|1998-06-19|NONE|TRUCK|old deposits use sl 14279|156943|1974|3|8|15999.52|0.09|0.05|N|O|1998-05-23|1998-04-08|1998-06-19|DELIVER IN PERSON|MAIL|theodolites nag blit 14279|135751|3291|4|42|75043.50|0.00|0.03|N|O|1998-03-14|1998-05-10|1998-03-19|COLLECT COD|AIR|ole blithely pending asympto 14279|166796|9313|5|1|1862.79|0.08|0.06|N|O|1998-04-24|1998-03-23|1998-05-10|TAKE BACK RETURN|SHIP|press orbits serve 14304|14803|9806|1|33|56687.40|0.09|0.03|N|O|1997-12-31|1998-01-01|1998-01-07|NONE|MAIL| unusual instructions grow fluffil 14304|181239|1240|2|9|11882.07|0.01|0.07|N|O|1997-11-16|1998-02-07|1997-12-06|COLLECT COD|MAIL|venly regular requests ha 14304|79941|9942|3|30|57628.20|0.06|0.00|N|O|1998-01-24|1998-01-19|1998-02-10|COLLECT COD|SHIP|efully even packages sleep slyl 14304|155953|3499|4|48|96429.60|0.09|0.06|N|O|1998-01-27|1997-12-24|1998-02-10|TAKE BACK RETURN|TRUCK| slyly pending accounts could h 14304|89806|4823|5|5|8979.00|0.10|0.02|N|O|1998-03-01|1997-12-15|1998-03-25|DELIVER IN PERSON|MAIL|gular instructions sleep fluffily 14304|161198|8747|6|30|37775.70|0.08|0.03|N|O|1998-01-07|1997-12-15|1998-01-28|TAKE BACK RETURN|MAIL| blithe accounts a 14305|99317|1827|1|44|57917.64|0.05|0.02|N|O|1996-08-27|1996-09-12|1996-09-07|TAKE BACK RETURN|SHIP|nto the slyly unusual accounts. furio 14306|86638|9147|1|15|24369.45|0.04|0.07|A|F|1993-11-15|1993-12-27|1993-11-23|COLLECT COD|FOB|s. permanently furious theodo 14307|68136|8137|1|16|17666.08|0.01|0.03|N|O|1997-06-30|1997-07-25|1997-07-24|COLLECT COD|AIR|se theodolites are fur 14307|145286|7801|2|47|62570.16|0.05|0.01|N|O|1997-07-11|1997-09-10|1997-07-24|COLLECT COD|MAIL|mong the carefully ironic accounts 14307|14042|1546|3|27|25813.08|0.08|0.05|N|O|1997-10-06|1997-08-01|1997-10-13|NONE|MAIL|he slyly final packages detect 14307|1494|8995|4|41|57215.09|0.07|0.07|N|O|1997-08-03|1997-08-19|1997-08-05|DELIVER IN PERSON|MAIL|es snooze furiously slyly sil 14307|94457|4458|5|15|21771.75|0.08|0.08|N|O|1997-08-19|1997-08-02|1997-08-20|NONE|REG AIR|. slyly regular 14308|157430|2461|1|20|29748.60|0.04|0.03|N|O|1997-11-11|1997-11-05|1997-11-30|DELIVER IN PERSON|REG AIR|courts. carefully final accounts acco 14308|111058|6081|2|46|49176.30|0.08|0.07|N|O|1997-10-17|1997-09-21|1997-11-11|DELIVER IN PERSON|RAIL|equests. slyly unusual instr 14308|140916|5945|3|45|88060.95|0.04|0.02|N|O|1997-12-09|1997-10-09|1998-01-07|DELIVER IN PERSON|RAIL|ully bold pinto beans. final, 14309|170960|5995|1|37|75145.52|0.08|0.04|R|F|1994-05-18|1994-03-02|1994-06-04|DELIVER IN PERSON|MAIL|ncies x-ray a 14309|120450|451|2|28|41172.60|0.08|0.03|R|F|1994-03-19|1994-03-09|1994-03-22|DELIVER IN PERSON|SHIP| instructions. furiously ironic foxes u 14309|190038|5077|3|10|11280.30|0.07|0.01|R|F|1994-05-06|1994-04-16|1994-06-01|DELIVER IN PERSON|AIR|detect fluffily along the 14310|50411|5422|1|42|57179.22|0.01|0.02|N|O|1997-09-24|1997-12-01|1997-10-08|NONE|SHIP|ly final theodolite 14311|194627|4628|1|26|44762.12|0.10|0.00|R|F|1994-11-07|1994-10-26|1994-11-15|COLLECT COD|SHIP|ptotes. carefully ironic foxes according 14311|83981|6490|2|2|3929.96|0.01|0.06|R|F|1994-09-15|1994-09-22|1994-10-03|TAKE BACK RETURN|TRUCK| careful id 14336|22492|4995|1|29|41020.21|0.03|0.08|R|F|1994-11-26|1994-12-25|1994-12-21|NONE|FOB|rding to the n 14336|6470|8971|2|6|8258.82|0.07|0.04|R|F|1995-02-06|1995-01-25|1995-02-10|COLLECT COD|REG AIR|usly at the slyly pending forges! ir 14336|131317|3831|3|23|31011.13|0.02|0.07|A|F|1994-12-14|1995-02-08|1995-01-04|DELIVER IN PERSON|REG AIR|lithely carefully fina 14336|29788|2291|4|44|75582.32|0.04|0.01|A|F|1994-11-25|1995-02-07|1994-12-23|TAKE BACK RETURN|FOB|e blithely acco 14336|31842|9352|5|45|79822.80|0.08|0.04|A|F|1994-12-02|1995-01-05|1994-12-06|TAKE BACK RETURN|SHIP|uests wake ironic courts. accounts use c 14337|89110|1619|1|25|27477.75|0.10|0.07|N|O|1997-10-17|1997-12-03|1997-11-12|COLLECT COD|SHIP|ven deposits a 14337|56206|8712|2|6|6973.20|0.04|0.01|N|O|1997-11-09|1997-12-08|1997-11-12|COLLECT COD|AIR|nd the furiously 14337|38120|5630|3|37|39150.44|0.04|0.03|N|O|1998-01-27|1997-12-11|1998-02-06|TAKE BACK RETURN|MAIL|osits. even ideas sleep blithely r 14337|69127|6646|4|50|54806.00|0.06|0.02|N|O|1998-01-07|1997-11-21|1998-01-08|COLLECT COD|RAIL|kly. ironic packages x-ray slyly agains 14337|8259|8260|5|18|21010.50|0.03|0.02|N|O|1997-11-20|1997-12-15|1997-12-15|NONE|REG AIR|equests. permanent packag 14337|193252|8291|6|25|33631.25|0.03|0.01|N|O|1998-01-26|1997-12-06|1998-02-23|COLLECT COD|AIR|ent accounts cajole carefull 14337|185357|7876|7|23|33174.05|0.01|0.07|N|O|1997-10-27|1997-12-28|1997-11-20|COLLECT COD|REG AIR|are blithely bold reque 14338|84252|1777|1|33|40796.25|0.00|0.03|R|F|1993-02-17|1993-05-01|1993-02-25|NONE|AIR|ounts among the bold, even instructions use 14338|23178|685|2|35|38540.95|0.04|0.00|A|F|1993-05-16|1993-03-18|1993-06-06|COLLECT COD|RAIL|final dolphins. 14339|116154|6155|1|34|39785.10|0.09|0.06|N|O|1997-08-05|1997-10-21|1997-08-24|TAKE BACK RETURN|REG AIR|kages. slyly express p 14339|197917|5475|2|28|56417.48|0.01|0.04|N|O|1997-10-24|1997-10-20|1997-11-18|COLLECT COD|MAIL| foxes haggle. fluffily ironic p 14339|182971|2972|3|28|57511.16|0.00|0.00|N|O|1997-07-28|1997-08-29|1997-08-01|TAKE BACK RETURN|SHIP|ng to the enticing, unusual p 14339|187700|219|4|14|25027.80|0.01|0.00|N|O|1997-08-15|1997-10-21|1997-09-02|TAKE BACK RETURN|MAIL|uriously ironic theodol 14339|61160|3667|5|34|38119.44|0.10|0.01|N|O|1997-10-28|1997-09-06|1997-11-12|NONE|REG AIR|he foxes. 14340|150833|8379|1|17|32025.11|0.06|0.00|N|O|1996-01-30|1995-12-08|1996-02-27|TAKE BACK RETURN|TRUCK|ronic pinto beans wake closely requests. 14340|46892|4405|2|29|53327.81|0.10|0.03|N|O|1996-01-29|1995-11-15|1996-02-01|DELIVER IN PERSON|FOB|ag. ironic packages across the furiou 14340|101567|9098|3|50|78428.00|0.06|0.08|N|O|1996-01-05|1995-12-05|1996-01-27|COLLECT COD|TRUCK|o beans. sly 14340|22899|406|4|18|32794.02|0.03|0.06|N|O|1996-01-08|1995-12-22|1996-02-01|COLLECT COD|AIR|out the even pi 14340|48283|788|5|23|28319.44|0.01|0.07|N|O|1995-10-30|1996-01-02|1995-11-21|TAKE BACK RETURN|MAIL|arefully under the slyly special theodol 14340|92019|2020|6|35|35385.35|0.04|0.01|N|O|1995-11-05|1995-12-21|1995-11-10|TAKE BACK RETURN|AIR|l requests could are about the 14341|68900|3913|1|48|89707.20|0.01|0.05|R|F|1993-09-18|1993-10-13|1993-10-14|COLLECT COD|RAIL|t the carefully eve 14341|190825|3345|2|7|13410.74|0.09|0.05|R|F|1993-10-14|1993-10-28|1993-10-25|COLLECT COD|AIR| along the furiously 14341|185096|5097|3|37|43700.33|0.00|0.02|A|F|1993-10-13|1993-10-19|1993-10-31|COLLECT COD|FOB|. final, ironi 14341|102013|9544|4|8|8120.08|0.04|0.01|A|F|1993-12-25|1993-11-18|1993-12-27|NONE|SHIP|nic sentiments are sly 14341|127465|7466|5|28|41788.88|0.01|0.00|A|F|1993-09-19|1993-11-05|1993-09-28|COLLECT COD|MAIL|oost furiously. ironic, final fox 14342|1101|6102|1|45|45094.50|0.00|0.06|A|F|1994-08-01|1994-06-09|1994-08-31|DELIVER IN PERSON|FOB|s. furiously even pinto beans accord 14342|89598|9599|2|33|52390.47|0.01|0.01|A|F|1994-06-16|1994-06-09|1994-06-22|TAKE BACK RETURN|SHIP|ecial deposi 14342|57121|2132|3|10|10781.20|0.03|0.03|R|F|1994-07-19|1994-07-27|1994-07-30|TAKE BACK RETURN|TRUCK|regular instru 14342|30190|2694|4|4|4480.76|0.01|0.08|A|F|1994-05-16|1994-05-28|1994-05-29|TAKE BACK RETURN|FOB|ckly pending accounts are. accounts nag 14342|195226|5227|5|8|10569.76|0.00|0.03|A|F|1994-08-03|1994-06-19|1994-08-05|NONE|FOB|ickly after the ironic accou 14343|101465|3976|1|13|19063.98|0.10|0.02|N|O|1996-04-25|1996-04-24|1996-05-19|TAKE BACK RETURN|MAIL|ly ironic packages breach unus 14343|114665|7177|2|44|73905.04|0.01|0.04|N|O|1996-04-23|1996-05-21|1996-05-15|COLLECT COD|SHIP|r accounts sublate about the carefully ir 14368|126076|6077|1|42|46286.94|0.09|0.02|N|O|1995-09-19|1995-11-14|1995-10-05|COLLECT COD|TRUCK|nal, ironic packages. fi 14368|144175|6690|2|6|7315.02|0.07|0.07|N|O|1995-11-17|1995-10-22|1995-12-14|NONE|RAIL|luffily according to the slowly ironic 14368|146430|3973|3|7|10335.01|0.01|0.07|N|O|1995-09-23|1995-11-19|1995-10-20|NONE|AIR|l foxes mold 14368|38459|3466|4|42|58692.90|0.01|0.00|N|O|1995-12-16|1995-11-24|1996-01-02|NONE|FOB|eas! blithely regular accounts ca 14368|167851|7852|5|3|5756.55|0.04|0.03|N|O|1995-11-30|1995-10-20|1995-12-30|TAKE BACK RETURN|SHIP|beneath the slyl 14369|127016|4553|1|45|46935.45|0.02|0.01|N|O|1996-12-04|1997-02-20|1996-12-09|NONE|MAIL|sly bold platelets are fluffily 14369|151571|4087|2|46|74638.22|0.09|0.05|N|O|1997-03-17|1997-01-01|1997-04-06|NONE|SHIP|refully even accounts cajole furious 14369|40948|949|3|34|64223.96|0.00|0.01|N|O|1997-01-20|1997-01-25|1997-01-28|COLLECT COD|REG AIR|eas. permanent instructio 14369|199621|9622|4|35|60221.70|0.09|0.05|N|O|1997-01-20|1997-02-06|1997-01-30|DELIVER IN PERSON|AIR|en ideas. regul 14369|16955|1958|5|33|61774.35|0.05|0.02|N|O|1997-03-17|1997-01-25|1997-04-05|NONE|MAIL|egular requests after the reque 14369|147671|186|6|11|18905.37|0.07|0.07|N|O|1996-12-05|1997-01-03|1996-12-14|TAKE BACK RETURN|SHIP|egrate abou 14370|8106|8107|1|44|44620.40|0.00|0.06|N|O|1997-07-30|1997-07-28|1997-08-19|DELIVER IN PERSON|MAIL|osits boost b 14370|30977|3481|2|38|72502.86|0.01|0.08|N|O|1997-07-06|1997-06-26|1997-07-17|COLLECT COD|FOB|phs after the express foxes use slyly bo 14370|28897|1400|3|42|76687.38|0.07|0.08|N|O|1997-07-01|1997-06-27|1997-07-08|DELIVER IN PERSON|AIR|the pending packages. blithely ironic in 14370|57819|5335|4|12|21321.72|0.00|0.01|N|O|1997-07-22|1997-06-03|1997-07-26|COLLECT COD|TRUCK|aring requests. foxes ha 14371|8637|1138|1|33|51005.79|0.08|0.05|A|F|1993-08-11|1993-09-08|1993-08-14|COLLECT COD|TRUCK|ording to the carefully special packages 14371|156437|1468|2|49|73178.07|0.08|0.03|R|F|1993-10-20|1993-08-26|1993-10-28|COLLECT COD|TRUCK|deas. pend 14371|21087|1088|3|16|16129.28|0.06|0.01|R|F|1993-09-13|1993-08-12|1993-09-22|TAKE BACK RETURN|RAIL| wake furiously q 14372|97894|2913|1|28|52972.92|0.09|0.02|R|F|1993-01-22|1992-12-23|1993-02-05|COLLECT COD|AIR|uickly iron 14372|96727|6728|2|2|3447.44|0.09|0.08|A|F|1993-02-06|1993-01-12|1993-03-05|NONE|REG AIR|d platelets. special instructions cajole sl 14372|74241|1763|3|31|37672.44|0.01|0.08|R|F|1992-12-13|1992-12-23|1992-12-29|TAKE BACK RETURN|RAIL|gside of the regular accou 14372|153980|6496|4|40|81359.20|0.02|0.06|A|F|1992-12-07|1992-12-30|1992-12-14|TAKE BACK RETURN|TRUCK|above the depos 14373|115106|5107|1|49|54933.90|0.01|0.04|A|F|1993-10-08|1993-10-21|1993-10-20|DELIVER IN PERSON|REG AIR|. slyly regular accounts acc 14374|198398|3437|1|39|58359.21|0.00|0.07|N|O|1995-08-30|1995-08-14|1995-09-18|COLLECT COD|AIR|. blithely final pinto beans haggl 14374|62522|7535|2|18|26721.36|0.06|0.07|N|O|1995-07-30|1995-07-04|1995-08-18|TAKE BACK RETURN|REG AIR|c escapades 14374|196982|2021|3|9|18710.82|0.10|0.01|N|O|1995-08-17|1995-06-17|1995-08-23|COLLECT COD|REG AIR|y final ideas. carefully final asymptote 14375|147032|4575|1|30|32370.90|0.10|0.07|A|F|1993-02-05|1993-04-27|1993-03-07|NONE|TRUCK|arefully at the ironic, regular packa 14375|156253|8769|2|5|6546.25|0.07|0.07|A|F|1993-04-15|1993-03-10|1993-05-09|NONE|RAIL|ccounts. carefully ru 14375|131605|6632|3|25|40915.00|0.06|0.03|R|F|1993-04-28|1993-03-13|1993-05-19|TAKE BACK RETURN|MAIL|usly after the carefully regular ins 14375|72469|7484|4|4|5765.84|0.04|0.03|A|F|1993-02-11|1993-03-30|1993-02-25|TAKE BACK RETURN|SHIP|warhorses wake among the qui 14375|185015|52|5|14|15400.14|0.00|0.03|R|F|1993-05-12|1993-03-09|1993-05-28|DELIVER IN PERSON|RAIL|e furiously. pinto beans believe fur 14375|141393|1394|6|9|12909.51|0.01|0.05|A|F|1993-02-16|1993-03-31|1993-02-25|DELIVER IN PERSON|REG AIR|deas. blithely even requests a 14375|177766|7767|7|12|22125.12|0.05|0.07|R|F|1993-05-05|1993-04-14|1993-05-26|TAKE BACK RETURN|FOB|nic dugouts. pen 14400|102213|2214|1|5|6076.05|0.02|0.05|A|F|1994-02-28|1994-03-21|1994-03-28|NONE|RAIL| after the 14400|93342|870|2|3|4006.02|0.05|0.05|A|F|1994-03-25|1994-03-16|1994-04-14|DELIVER IN PERSON|TRUCK|ructions nag 14400|190663|664|3|26|45595.16|0.00|0.01|R|F|1994-03-30|1994-04-15|1994-04-08|DELIVER IN PERSON|AIR|ans. special r 14400|59887|7403|4|23|42478.24|0.01|0.08|R|F|1994-02-09|1994-04-10|1994-02-25|NONE|AIR|posits. ir 14400|112957|491|5|40|78798.00|0.07|0.08|A|F|1994-02-23|1994-03-08|1994-03-25|DELIVER IN PERSON|FOB|long the special deposits. slyly ironic 14400|56071|6072|6|35|35947.45|0.05|0.02|A|F|1994-03-31|1994-04-02|1994-04-24|DELIVER IN PERSON|RAIL|ng to the slyly regular r 14400|171634|6669|7|48|81870.24|0.06|0.05|A|F|1994-05-27|1994-04-23|1994-06-26|COLLECT COD|SHIP|. requests nag instructio 14401|51832|9348|1|33|58866.39|0.04|0.03|N|O|1995-11-17|1995-09-14|1995-12-09|TAKE BACK RETURN|SHIP|osits haggle slyl 14401|129372|9373|2|24|33632.88|0.10|0.01|N|O|1995-10-24|1995-10-13|1995-10-27|COLLECT COD|MAIL|about the special pa 14401|149648|7191|3|15|25464.60|0.06|0.05|N|O|1995-09-24|1995-10-28|1995-10-03|NONE|FOB|en requests 14401|124471|4472|4|29|43368.63|0.06|0.00|N|O|1995-08-22|1995-10-31|1995-09-03|COLLECT COD|TRUCK|es! unusual multiplier 14401|179729|2247|5|20|36174.40|0.07|0.03|N|O|1995-10-07|1995-09-05|1995-10-14|COLLECT COD|MAIL|ns are furiousl 14402|24611|4612|1|33|50675.13|0.02|0.00|A|F|1993-11-11|1993-12-25|1993-11-25|DELIVER IN PERSON|FOB|cording to t 14402|21647|9154|2|41|64314.24|0.00|0.08|R|F|1994-02-01|1993-12-02|1994-02-20|DELIVER IN PERSON|MAIL|odolites! regular, regular depos 14402|8459|5960|3|25|34186.25|0.08|0.00|A|F|1994-01-27|1993-12-08|1994-02-21|TAKE BACK RETURN|TRUCK|ckages. bo 14403|130511|5538|1|48|73992.48|0.10|0.05|N|O|1998-03-25|1998-05-21|1998-04-04|COLLECT COD|SHIP|al pearls. blithely 14403|84128|9145|2|33|36699.96|0.05|0.05|N|O|1998-07-01|1998-05-09|1998-07-20|NONE|TRUCK|eas sleep carefully. theodolites impress. i 14403|137490|7491|3|7|10692.43|0.04|0.02|N|O|1998-06-25|1998-05-12|1998-07-18|DELIVER IN PERSON|AIR|lyly ironic asymptotes. fluffily regular 14404|143034|5549|1|39|42004.17|0.00|0.00|N|O|1996-12-25|1996-12-31|1997-01-11|DELIVER IN PERSON|SHIP|r pending pinto beans. 14404|161156|8705|2|32|38948.80|0.00|0.06|N|O|1997-02-14|1996-12-24|1997-03-11|DELIVER IN PERSON|RAIL|e to are f 14404|145195|7710|3|31|38445.89|0.03|0.05|N|O|1997-01-22|1996-12-15|1997-02-18|TAKE BACK RETURN|TRUCK|ronic, final deposit 14404|87298|2315|4|40|51411.60|0.02|0.03|N|O|1997-01-16|1997-01-18|1997-01-22|DELIVER IN PERSON|MAIL|odolites across the carefully regu 14404|4396|6897|5|41|53315.99|0.04|0.04|N|O|1996-11-18|1997-01-29|1996-11-22|TAKE BACK RETURN|REG AIR|ly blithely final packag 14404|75019|5020|6|33|32802.33|0.07|0.00|N|O|1997-02-16|1996-12-05|1997-02-23|COLLECT COD|MAIL|oss the requests boost slyly fu 14404|35349|7853|7|42|53942.28|0.10|0.01|N|O|1997-02-14|1997-01-20|1997-03-11|TAKE BACK RETURN|RAIL|s. slyly iro 14405|110624|625|1|36|58846.32|0.04|0.00|N|O|1996-06-26|1996-06-14|1996-06-29|COLLECT COD|SHIP|the silent requests. regular dep 14405|56238|3754|2|17|20301.91|0.04|0.04|N|O|1996-07-29|1996-06-30|1996-08-28|NONE|AIR|egular requests ar 14405|188461|3498|3|20|30989.20|0.10|0.08|N|O|1996-07-05|1996-06-26|1996-07-08|COLLECT COD|FOB|ag blithely. care 14406|31484|3988|1|12|16985.76|0.03|0.00|R|F|1993-07-05|1993-05-25|1993-07-25|DELIVER IN PERSON|MAIL|he blithely r 14406|144137|6652|2|9|10630.17|0.04|0.08|A|F|1993-06-04|1993-04-18|1993-06-27|NONE|FOB|grate slyly spe 14406|96844|1863|3|24|44180.16|0.04|0.01|A|F|1993-03-23|1993-04-18|1993-04-16|TAKE BACK RETURN|TRUCK|e accounts. pending, special instructi 14406|54658|4659|4|20|32253.00|0.02|0.02|R|F|1993-04-30|1993-05-07|1993-05-19|NONE|MAIL|lar ideas boost carefully instru 14406|28351|5858|5|12|15352.20|0.06|0.03|R|F|1993-05-21|1993-04-20|1993-06-03|DELIVER IN PERSON|TRUCK| deposits 14406|81979|6996|6|49|96087.53|0.09|0.07|R|F|1993-03-27|1993-05-28|1993-04-10|COLLECT COD|REG AIR|refully unusual requests haggle iron 14407|2135|7136|1|24|24891.12|0.04|0.03|A|F|1993-11-17|1994-01-22|1993-12-05|TAKE BACK RETURN|SHIP|bold pinto beans. ironic, re 14432|16765|9267|1|42|70633.92|0.07|0.06|N|O|1997-01-18|1997-01-20|1997-01-19|NONE|REG AIR|bout the slow 14433|129660|7197|1|46|77724.36|0.06|0.03|N|O|1996-05-20|1996-07-26|1996-06-08|DELIVER IN PERSON|SHIP| the blithe, regular foxes boost 14433|187551|7552|2|9|14746.95|0.02|0.08|N|O|1996-08-13|1996-08-06|1996-08-17|TAKE BACK RETURN|RAIL|carefully. packag 14433|160671|672|3|5|8658.35|0.03|0.02|N|O|1996-08-24|1996-07-31|1996-09-07|TAKE BACK RETURN|AIR|re. slyly re 14433|183186|3187|4|40|50767.20|0.09|0.04|N|O|1996-08-02|1996-07-10|1996-08-22|TAKE BACK RETURN|REG AIR|xes haggle 14433|195880|8400|5|5|9879.40|0.03|0.05|N|O|1996-07-28|1996-06-15|1996-08-02|COLLECT COD|FOB|c foxes slee 14433|143474|1017|6|40|60698.80|0.05|0.00|N|O|1996-07-10|1996-07-10|1996-08-02|COLLECT COD|SHIP|as. pending dolphins sleep after the 14434|48747|3756|1|7|11870.18|0.06|0.05|R|F|1995-05-28|1995-04-07|1995-06-17|NONE|REG AIR|ly even deposits. reg 14434|84423|1948|2|23|32370.66|0.05|0.02|R|F|1995-05-04|1995-04-02|1995-05-19|TAKE BACK RETURN|AIR|requests. ironic instruc 14434|94022|9041|3|35|35560.70|0.09|0.00|A|F|1995-04-06|1995-03-20|1995-04-15|NONE|TRUCK|cial requests. even, even courts x-ray 14434|15118|7620|4|2|2066.22|0.05|0.07|R|F|1995-03-25|1995-04-09|1995-04-18|TAKE BACK RETURN|TRUCK|accounts cajole quickly. pending som 14434|72906|7921|5|41|77034.90|0.08|0.01|A|F|1995-03-01|1995-04-14|1995-03-28|NONE|SHIP|ctions along the regula 14435|179454|7006|1|10|15334.50|0.10|0.08|A|F|1993-04-01|1993-02-11|1993-05-01|NONE|TRUCK|ress, ironic packages maintain 14435|71731|4239|2|25|42568.25|0.08|0.07|R|F|1992-12-23|1993-02-09|1992-12-25|DELIVER IN PERSON|MAIL| boost caref 14435|180992|6029|3|22|45605.78|0.01|0.06|A|F|1992-12-28|1993-02-15|1993-01-18|NONE|AIR|sleep slyly am 14435|116812|4346|4|32|58521.92|0.02|0.05|R|F|1993-01-22|1993-02-15|1993-01-24|DELIVER IN PERSON|AIR|ickly special packa 14435|44771|7276|5|8|13726.16|0.03|0.03|A|F|1993-02-22|1993-01-13|1993-03-12|TAKE BACK RETURN|REG AIR|ar foxes: slyly ironic Tiresias solve f 14435|66018|3537|6|17|16728.17|0.02|0.07|A|F|1993-01-23|1993-02-24|1993-01-29|TAKE BACK RETURN|FOB|thely about 14435|152893|439|7|14|27242.46|0.02|0.08|R|F|1992-12-31|1993-02-10|1993-01-27|TAKE BACK RETURN|RAIL|uses. furiously special notornis haggle flu 14436|69708|4721|1|33|55364.10|0.09|0.06|R|F|1994-05-05|1994-05-21|1994-05-25|NONE|SHIP| deposits. slyly unusual instr 14436|181786|6823|2|39|72843.42|0.03|0.03|A|F|1994-06-17|1994-05-19|1994-07-02|COLLECT COD|MAIL|counts haggle c 14436|72782|2783|3|9|15793.02|0.05|0.07|R|F|1994-06-07|1994-05-04|1994-07-02|TAKE BACK RETURN|AIR|o beans. regularly pending d 14436|31445|8955|4|11|15140.84|0.04|0.02|R|F|1994-03-27|1994-05-03|1994-04-14|COLLECT COD|FOB|rmanent deposits are 14436|92288|4798|5|50|64014.00|0.10|0.04|A|F|1994-05-12|1994-06-02|1994-05-15|TAKE BACK RETURN|AIR|. deposits are according to 14437|9243|1744|1|36|41480.64|0.09|0.03|R|F|1994-06-28|1994-07-13|1994-07-23|COLLECT COD|RAIL|ronically 14437|138095|5635|2|35|39658.15|0.01|0.05|R|F|1994-08-14|1994-07-06|1994-08-28|TAKE BACK RETURN|FOB| slyly pen 14437|159500|2016|3|19|29630.50|0.07|0.05|A|F|1994-05-22|1994-07-05|1994-06-20|TAKE BACK RETURN|FOB| instructions haggle carefully even, 14437|112504|38|4|27|40945.50|0.07|0.03|A|F|1994-07-25|1994-07-10|1994-08-15|DELIVER IN PERSON|SHIP|tes across the carefull 14437|39612|9613|5|30|46548.30|0.00|0.04|A|F|1994-06-07|1994-06-22|1994-06-23|NONE|SHIP|usly pending sentiments use blithely. 14437|158507|6053|6|42|65751.00|0.06|0.04|R|F|1994-06-24|1994-08-08|1994-07-13|TAKE BACK RETURN|AIR|ans according to the furiously fin 14437|19983|7487|7|45|85634.10|0.04|0.04|R|F|1994-07-21|1994-08-02|1994-08-19|DELIVER IN PERSON|FOB|ress foxes. furio 14438|52637|5143|1|37|58816.31|0.09|0.01|N|O|1995-10-27|1995-11-06|1995-11-08|COLLECT COD|TRUCK|fter the furiously regular sentiments sl 14438|62310|7323|2|11|13995.41|0.08|0.01|N|O|1995-08-29|1995-10-21|1995-09-16|COLLECT COD|FOB|ng the packages. furiou 14438|42480|2481|3|33|46941.84|0.06|0.05|N|O|1995-12-01|1995-10-16|1995-12-23|COLLECT COD|SHIP| carefully iron 14439|53377|5883|1|3|3991.11|0.09|0.03|R|F|1992-09-22|1992-10-22|1992-10-09|NONE|AIR| the carefully pending f 14464|170970|971|1|29|59188.13|0.07|0.01|N|O|1998-05-08|1998-04-19|1998-05-16|TAKE BACK RETURN|SHIP|as against the bold, 14465|112818|352|1|48|87878.88|0.00|0.07|R|F|1994-09-19|1994-09-05|1994-10-17|COLLECT COD|AIR|ding to the fluffily slow dependencie 14465|124353|4354|2|7|9641.45|0.01|0.03|R|F|1994-08-28|1994-07-28|1994-09-12|COLLECT COD|SHIP|its. furiously special gifts are regul 14465|186875|4430|3|17|33351.79|0.08|0.06|A|F|1994-09-25|1994-08-07|1994-10-02|NONE|TRUCK|r dependencies. quickly fi 14465|2585|86|4|12|17850.96|0.10|0.07|R|F|1994-06-26|1994-07-30|1994-07-25|COLLECT COD|REG AIR| are. ironic pinto 14465|36918|6919|5|6|11129.46|0.01|0.03|A|F|1994-08-24|1994-08-26|1994-09-13|DELIVER IN PERSON|FOB|ular deposits after the ruthless, bold 14466|130175|5202|1|17|20487.89|0.09|0.06|N|O|1996-08-21|1996-06-26|1996-09-15|COLLECT COD|RAIL|regular pin 14467|172320|2321|1|16|22277.12|0.00|0.08|N|O|1996-07-20|1996-07-10|1996-08-08|COLLECT COD|MAIL|g dependencies. reque 14467|174606|9641|2|44|73946.40|0.01|0.00|N|O|1996-08-17|1996-08-24|1996-08-20|COLLECT COD|SHIP|s. carefully regular packages affix acc 14467|199008|6566|3|24|26568.00|0.02|0.00|N|O|1996-09-13|1996-07-20|1996-10-07|COLLECT COD|REG AIR|ckages haggle slyly quickly iron 14467|123363|900|4|15|20795.40|0.05|0.01|N|O|1996-06-02|1996-08-13|1996-06-22|COLLECT COD|FOB| slyly careful 14468|92361|2362|1|10|13533.60|0.01|0.08|N|O|1998-07-27|1998-09-26|1998-08-18|DELIVER IN PERSON|TRUCK|inal theodolites. carefully express 14468|97666|176|2|23|38264.18|0.10|0.06|N|O|1998-07-22|1998-10-07|1998-08-14|DELIVER IN PERSON|RAIL|the blithely fi 14469|74039|6547|1|19|19247.57|0.08|0.07|N|O|1997-06-02|1997-04-23|1997-07-01|NONE|TRUCK|d pinto beans cajole fluffily fluffil 14469|170633|634|2|33|56219.79|0.02|0.00|N|O|1997-05-05|1997-04-09|1997-05-14|DELIVER IN PERSON|SHIP|deposits cajole doggedly sly, s 14470|55833|844|1|28|50087.24|0.10|0.06|R|F|1995-05-06|1995-03-26|1995-05-14|TAKE BACK RETURN|TRUCK|slyly express requests lose. bold pac 14470|168924|3957|2|38|75730.96|0.04|0.02|A|F|1995-02-22|1995-04-12|1995-03-22|COLLECT COD|RAIL| regular theodolites sleep slyly blit 14470|97002|7003|3|20|19980.00|0.00|0.07|R|F|1995-05-04|1995-03-11|1995-05-19|DELIVER IN PERSON|MAIL|arefully even ideas. fin 14470|98994|1504|4|1|1992.99|0.01|0.00|R|F|1995-04-20|1995-03-29|1995-05-05|DELIVER IN PERSON|RAIL|st the bli 14471|39029|6539|1|50|48401.00|0.06|0.05|N|O|1995-07-03|1995-07-25|1995-07-05|TAKE BACK RETURN|MAIL|kly regular theodolites 14471|146115|8630|2|13|15094.43|0.01|0.00|N|O|1995-08-11|1995-08-14|1995-08-30|TAKE BACK RETURN|RAIL|ructions hinder 14471|150106|2622|3|50|57805.00|0.01|0.07|N|O|1995-09-24|1995-07-20|1995-10-16|NONE|FOB|s. slyly even dolphins after the theodol 14471|21640|9147|4|19|29671.16|0.08|0.04|N|O|1995-06-30|1995-08-09|1995-07-30|TAKE BACK RETURN|SHIP|ing accounts use about the fluffily spe 14471|89450|4467|5|42|60456.90|0.05|0.04|N|F|1995-06-07|1995-08-23|1995-06-29|DELIVER IN PERSON|AIR| cajole quickly evenly special Tire 14471|177795|5347|6|35|65547.65|0.05|0.03|A|F|1995-06-07|1995-07-01|1995-06-08|TAKE BACK RETURN|SHIP|refully. furi 14471|190131|2651|7|9|10990.17|0.03|0.03|N|O|1995-09-13|1995-07-09|1995-09-15|NONE|AIR|pending theodolites integrat 14496|127361|9874|1|22|30543.92|0.03|0.07|N|O|1996-05-20|1996-06-25|1996-06-08|NONE|AIR|sits might 14496|142143|7172|2|27|31998.78|0.04|0.08|N|O|1996-07-14|1996-05-27|1996-08-03|DELIVER IN PERSON|AIR|g the blithely special t 14496|193332|8371|3|48|68415.84|0.07|0.03|N|O|1996-06-14|1996-05-22|1996-07-04|COLLECT COD|AIR|riously ironic pinto beans sleep ironica 14496|114285|9308|4|9|11693.52|0.07|0.05|N|O|1996-08-06|1996-06-30|1996-08-10|DELIVER IN PERSON|MAIL|lphins across the 14496|184832|4833|5|29|55588.07|0.06|0.02|N|O|1996-05-26|1996-06-27|1996-05-30|TAKE BACK RETURN|MAIL|es haggle quickly pending 14497|169342|9343|1|48|67744.32|0.02|0.07|R|F|1992-07-16|1992-07-26|1992-07-26|TAKE BACK RETURN|SHIP|losely ironic excuses across the carefully 14497|121921|9458|2|17|33029.64|0.07|0.03|A|F|1992-08-02|1992-07-20|1992-08-09|COLLECT COD|TRUCK|al deposits sleep among 14497|117788|7789|3|1|1805.78|0.08|0.07|A|F|1992-08-03|1992-08-14|1992-09-01|DELIVER IN PERSON|RAIL|s. regular, regular asympt 14497|50729|3235|4|22|36953.84|0.08|0.08|R|F|1992-08-13|1992-06-22|1992-08-26|TAKE BACK RETURN|REG AIR|ackages haggle permanently? qui 14498|14860|7362|1|5|8874.30|0.06|0.02|N|O|1996-11-17|1997-01-02|1996-12-04|DELIVER IN PERSON|SHIP|ges are. ideas haggle 14498|140281|7824|2|23|30389.44|0.08|0.08|N|O|1996-10-14|1996-11-21|1996-11-04|NONE|FOB|y even packages. bli 14498|94192|1720|3|10|11861.90|0.08|0.06|N|O|1996-10-28|1996-11-06|1996-11-12|DELIVER IN PERSON|FOB|c, final platelets. quickly special packag 14498|81013|6030|4|14|13916.14|0.10|0.08|N|O|1996-11-26|1996-12-24|1996-12-22|TAKE BACK RETURN|REG AIR|lthily express deposits. furious 14498|185242|279|5|7|9290.68|0.09|0.08|N|O|1996-10-31|1996-12-16|1996-11-03|COLLECT COD|AIR|to beans are slyly. c 14498|151917|4433|6|20|39378.20|0.08|0.03|N|O|1996-11-17|1996-12-19|1996-12-02|NONE|AIR|s. furiously fina 14498|64363|9376|7|40|53094.40|0.10|0.04|N|O|1996-12-24|1996-11-06|1997-01-01|NONE|FOB|iers. unusual 14499|137179|9693|1|46|55943.82|0.10|0.02|R|F|1993-12-24|1994-01-08|1994-01-02|NONE|FOB|eep blithely unusual de 14499|153793|1339|2|25|46169.75|0.02|0.01|R|F|1993-12-04|1994-02-07|1993-12-05|TAKE BACK RETURN|SHIP|e blithely final ideas! care 14499|194962|2520|3|41|84335.36|0.02|0.02|R|F|1994-01-01|1994-01-23|1994-01-29|TAKE BACK RETURN|TRUCK| foxes are across the 14499|121612|9149|4|41|66978.01|0.01|0.07|A|F|1994-02-05|1994-02-11|1994-02-18|TAKE BACK RETURN|RAIL| even excuses are silent foxes. 14499|127424|7425|5|42|60959.64|0.00|0.07|R|F|1994-02-10|1994-02-05|1994-03-12|NONE|AIR|tes detect along the carefull 14499|10241|2743|6|6|6907.44|0.04|0.04|A|F|1994-01-26|1994-01-07|1994-02-16|NONE|AIR| carefully ironic packages. even fox 14499|107738|5269|7|36|62846.28|0.04|0.01|A|F|1994-01-30|1994-01-12|1994-02-08|COLLECT COD|SHIP|ys special foxes. caref 14500|146148|8663|1|30|35824.20|0.07|0.03|N|O|1995-07-23|1995-07-29|1995-07-29|TAKE BACK RETURN|TRUCK|thely above the silent, pending pint 14500|94666|2194|2|33|54801.78|0.00|0.01|N|O|1995-09-21|1995-09-04|1995-10-09|NONE|TRUCK|g, stealthy packages about 14501|115948|8460|1|33|64810.02|0.01|0.08|N|O|1997-10-20|1997-10-10|1997-11-13|DELIVER IN PERSON|MAIL|lly after th 14501|136104|1131|2|36|41043.60|0.06|0.07|N|O|1997-12-04|1997-11-10|1997-12-17|NONE|AIR|fully slow asymptotes play furiously? pendi 14502|56286|1297|1|33|40995.24|0.05|0.00|N|O|1996-11-11|1996-10-05|1996-12-05|TAKE BACK RETURN|RAIL|xpress deposits 14502|189046|9047|2|26|29511.04|0.10|0.00|N|O|1996-09-09|1996-10-05|1996-09-14|NONE|MAIL| final foxes nag. furiously bold warh 14503|97881|391|1|11|20667.68|0.07|0.03|R|F|1995-03-26|1995-03-19|1995-04-04|NONE|RAIL|he quickly final depo 14503|76873|9381|2|30|55496.10|0.05|0.05|A|F|1995-03-22|1995-04-25|1995-04-04|DELIVER IN PERSON|REG AIR|lly according to the furiously express fo 14503|193619|1177|3|42|71929.62|0.06|0.02|A|F|1995-04-15|1995-03-24|1995-05-05|NONE|TRUCK|r the furiously pendin 14503|142632|2633|4|41|68659.83|0.06|0.06|A|F|1995-05-19|1995-04-21|1995-05-27|COLLECT COD|AIR|aggle furiously slyly ruthless excuses. 14528|165974|5975|1|48|97918.56|0.04|0.03|A|F|1994-08-18|1994-08-07|1994-09-02|TAKE BACK RETURN|MAIL|lites are fur 14528|113412|3413|2|36|51314.76|0.02|0.06|R|F|1994-07-16|1994-09-02|1994-07-29|DELIVER IN PERSON|AIR|ly even foxe 14528|103412|3413|3|21|29723.61|0.05|0.04|A|F|1994-08-27|1994-08-25|1994-08-30|COLLECT COD|REG AIR|ackages. even requests wake s 14528|32874|384|4|19|34330.53|0.01|0.00|R|F|1994-07-26|1994-09-01|1994-08-20|DELIVER IN PERSON|FOB|ix against the carefully spec 14529|19412|6916|1|43|57250.63|0.01|0.03|R|F|1993-11-15|1993-10-21|1993-12-08|NONE|FOB|re. quickly express instructions boo 14529|50347|348|2|26|33730.84|0.02|0.01|A|F|1994-01-09|1993-11-24|1994-02-06|TAKE BACK RETURN|TRUCK|ven requests boost carefully int 14529|176971|9489|3|22|45055.34|0.05|0.03|A|F|1994-01-08|1993-11-03|1994-01-10|COLLECT COD|RAIL|, silent a 14529|138599|6139|4|28|45852.52|0.06|0.04|R|F|1993-10-07|1993-12-05|1993-11-01|COLLECT COD|RAIL| blithely after the sl 14529|20557|558|5|22|32506.10|0.01|0.04|A|F|1993-11-16|1993-12-01|1993-12-04|NONE|REG AIR| special instruct 14529|89114|4131|6|7|7721.77|0.04|0.07|R|F|1994-01-15|1993-11-04|1994-02-12|NONE|SHIP|ts haggle across the ac 14530|181529|9084|1|48|77304.96|0.03|0.06|A|F|1994-06-09|1994-06-28|1994-06-18|COLLECT COD|AIR|inal asymptotes sleep alongsi 14530|149281|4310|2|35|46559.80|0.06|0.01|A|F|1994-07-28|1994-06-20|1994-08-22|DELIVER IN PERSON|FOB|t deposits! even platelets boost furiou 14530|153140|8171|3|16|19090.24|0.09|0.08|A|F|1994-07-04|1994-06-10|1994-07-18|DELIVER IN PERSON|MAIL|busy requests nag slyly doggedly regul 14530|8721|8722|4|45|73337.40|0.08|0.07|R|F|1994-08-06|1994-05-11|1994-09-04|NONE|FOB|totes sleep blithely ironic 14530|172427|4945|5|4|5997.68|0.02|0.07|R|F|1994-06-17|1994-06-05|1994-06-25|NONE|REG AIR|brave packages 14530|174330|9365|6|8|11234.64|0.10|0.06|R|F|1994-07-01|1994-06-18|1994-07-02|NONE|REG AIR|es. carefully express escapade 14530|155234|7750|7|16|20627.68|0.01|0.02|A|F|1994-06-17|1994-07-05|1994-07-10|COLLECT COD|REG AIR| foxes alongside of the quickly ironic depo 14531|3076|8077|1|47|46016.29|0.03|0.07|N|O|1997-07-17|1997-07-23|1997-07-18|COLLECT COD|MAIL|luffily regular package 14531|89009|6534|2|34|33932.00|0.04|0.00|N|O|1997-07-11|1997-07-11|1997-08-02|DELIVER IN PERSON|SHIP|oxes sleep quickly even, regular p 14531|165198|2747|3|45|56843.55|0.06|0.02|N|O|1997-08-22|1997-09-05|1997-09-13|TAKE BACK RETURN|MAIL|ng theodolites try to run 14531|73309|8324|4|34|43598.20|0.08|0.01|N|O|1997-10-05|1997-08-21|1997-10-27|TAKE BACK RETURN|RAIL|c foxes alo 14532|81179|3688|1|9|10441.53|0.06|0.04|N|O|1996-07-19|1996-06-21|1996-08-16|DELIVER IN PERSON|FOB|ely packages. carefully ironi 14533|11500|1501|1|19|26818.50|0.06|0.02|N|O|1996-08-27|1996-07-21|1996-08-29|NONE|SHIP|ic accounts. furiously b 14533|79264|1772|2|4|4973.04|0.01|0.01|N|O|1996-05-31|1996-06-20|1996-06-28|TAKE BACK RETURN|TRUCK|ggle across the ironic p 14533|188356|8357|3|36|51996.60|0.04|0.03|N|O|1996-06-21|1996-07-08|1996-07-04|NONE|FOB|into beans. even, 14533|98818|1328|4|8|14534.48|0.02|0.05|N|O|1996-06-06|1996-07-17|1996-06-19|COLLECT COD|RAIL|ly express somas under t 14533|27569|7570|5|17|25441.52|0.00|0.00|N|O|1996-06-30|1996-07-08|1996-07-05|NONE|FOB|kages across the fluffily 14533|149477|9478|6|27|41214.69|0.07|0.05|N|O|1996-07-21|1996-07-08|1996-08-13|NONE|REG AIR| quickly; unusual accounts 14534|125750|5751|1|46|81684.50|0.07|0.02|A|F|1993-09-02|1993-09-11|1993-09-24|COLLECT COD|FOB|. furiousl 14534|76768|1783|2|43|75024.68|0.05|0.08|R|F|1993-10-03|1993-08-28|1993-10-10|DELIVER IN PERSON|FOB|at furiously regular accounts. fluff 14534|26087|6088|3|14|14183.12|0.03|0.00|R|F|1993-08-15|1993-08-30|1993-08-16|TAKE BACK RETURN|RAIL|n instructions sleep carefully s 14534|27637|7638|4|19|29727.97|0.00|0.05|A|F|1993-08-04|1993-08-26|1993-08-07|DELIVER IN PERSON|MAIL|ts. unusual theodolites sleep against the f 14534|89491|7016|5|48|71063.52|0.03|0.02|R|F|1993-07-28|1993-09-08|1993-08-25|TAKE BACK RETURN|AIR|aggle slyly. furiously special foxes 14535|122664|7689|1|5|8433.30|0.00|0.01|A|F|1993-11-13|1993-12-12|1993-12-01|DELIVER IN PERSON|REG AIR|y special grouches. slyly regular packages 14535|6743|4244|2|27|44542.98|0.02|0.01|R|F|1994-01-19|1993-12-26|1994-02-16|TAKE BACK RETURN|AIR|yly express 14535|123387|924|3|27|38080.26|0.02|0.00|R|F|1993-12-03|1993-11-25|1993-12-08|TAKE BACK RETURN|TRUCK| beans cajole pa 14535|158831|3862|4|22|41576.26|0.02|0.06|R|F|1994-01-29|1993-11-03|1994-02-01|DELIVER IN PERSON|RAIL|hely expres 14560|146557|9072|1|24|38485.20|0.05|0.06|N|O|1998-01-29|1998-02-07|1998-02-16|COLLECT COD|AIR|refully fluffily express ideas. final p 14561|116713|4247|1|50|86485.50|0.03|0.07|N|O|1998-08-02|1998-08-15|1998-08-05|DELIVER IN PERSON|REG AIR|hins hinder ironi 14561|180854|3373|2|9|17413.65|0.02|0.02|N|O|1998-09-02|1998-09-09|1998-09-15|COLLECT COD|MAIL|thely after the express instr 14561|6107|1108|3|29|29379.90|0.04|0.08|N|O|1998-10-13|1998-08-09|1998-10-16|DELIVER IN PERSON|FOB|inal requests. furiously ironic packa 14561|38973|8974|4|48|91774.56|0.01|0.03|N|O|1998-08-11|1998-08-10|1998-08-13|NONE|FOB|ully. slyly unusual theodolites use; 14561|17061|2064|5|21|20539.26|0.00|0.02|N|O|1998-07-12|1998-08-29|1998-07-13|COLLECT COD|MAIL|uriously fluffily regular requests. asy 14561|136043|6044|6|36|38845.44|0.00|0.08|N|O|1998-07-29|1998-09-08|1998-08-04|TAKE BACK RETURN|SHIP|regular requests poach. always regular 14561|73931|6439|7|25|47623.25|0.09|0.06|N|O|1998-08-17|1998-08-24|1998-09-02|NONE|FOB|sleep blithely ironic depths. furiously f 14562|157860|7861|1|13|24932.18|0.01|0.03|N|O|1996-11-12|1996-10-11|1996-11-21|COLLECT COD|SHIP|out the quickly specia 14562|138023|8024|2|36|38196.72|0.07|0.03|N|O|1996-07-25|1996-09-27|1996-08-16|NONE|RAIL| slyly. blithely final 14562|139249|4276|3|17|21900.08|0.10|0.05|N|O|1996-08-19|1996-08-29|1996-09-17|NONE|RAIL|arefully regular requests. slyly special 14562|145650|679|4|50|84782.50|0.08|0.05|N|O|1996-11-06|1996-08-28|1996-12-03|DELIVER IN PERSON|REG AIR| after the furiously 14563|143517|1060|1|31|48375.81|0.08|0.05|N|O|1996-02-05|1996-01-04|1996-02-23|TAKE BACK RETURN|AIR|r packages haggle. ironic ideas ca 14563|82155|4664|2|44|50034.60|0.07|0.02|N|O|1996-02-06|1996-01-24|1996-03-05|NONE|SHIP|kly final instructions. furiously regular 14563|178603|1121|3|30|50448.00|0.01|0.00|N|O|1996-03-01|1996-01-26|1996-03-04|DELIVER IN PERSON|FOB| the final requests! regular 14563|176331|8849|4|32|45034.56|0.02|0.08|N|O|1996-02-05|1996-01-19|1996-02-21|NONE|TRUCK| slyly even warthogs haggle. 14563|187456|7457|5|10|15434.50|0.04|0.06|N|O|1996-03-11|1996-01-02|1996-04-05|TAKE BACK RETURN|FOB|fully special 14564|159052|4083|1|1|1111.05|0.09|0.00|A|F|1993-09-14|1993-07-15|1993-10-08|TAKE BACK RETURN|FOB|usual packages wake slyly bold, 14565|36770|4280|1|33|56323.41|0.01|0.00|N|O|1998-01-31|1998-02-05|1998-02-24|TAKE BACK RETURN|FOB|gular, regular dependencies doubt f 14565|185202|239|2|5|6436.00|0.05|0.03|N|O|1998-01-04|1998-02-05|1998-01-30|DELIVER IN PERSON|SHIP|ages nag regular requests 14566|170090|91|1|25|29002.25|0.02|0.01|N|O|1997-01-29|1997-01-18|1997-02-12|NONE|FOB|quickly pend 14566|87778|7779|2|22|38846.94|0.01|0.04|N|O|1996-12-17|1997-01-10|1996-12-29|DELIVER IN PERSON|TRUCK|dependencie 14566|80752|3261|3|14|24258.50|0.07|0.08|N|O|1996-12-25|1996-12-10|1997-01-15|DELIVER IN PERSON|REG AIR|yly carefull 14566|190576|5615|4|28|46663.96|0.07|0.04|N|O|1997-01-25|1997-01-06|1997-02-05|DELIVER IN PERSON|TRUCK|ters are. bravely ironi 14566|504|5505|5|40|56180.00|0.08|0.03|N|O|1997-02-01|1997-01-07|1997-02-10|DELIVER IN PERSON|FOB|egular theodol 14566|61602|6615|6|20|31272.00|0.00|0.01|N|O|1996-11-09|1997-01-06|1996-11-30|DELIVER IN PERSON|FOB|lites. fluffily express courts alongside o 14566|131641|1642|7|3|5017.92|0.07|0.00|N|O|1996-11-03|1996-12-01|1996-11-18|COLLECT COD|TRUCK|nder. furi 14567|81535|4044|1|19|28814.07|0.07|0.03|N|O|1996-11-03|1997-01-05|1996-11-30|NONE|AIR|e. even deposits haggle slyly quic 14567|145072|101|2|46|51385.22|0.03|0.04|N|O|1996-11-21|1997-01-05|1996-12-03|COLLECT COD|MAIL|? deposits cajole agai 14567|9898|7399|3|7|12655.23|0.09|0.02|N|O|1997-01-02|1996-11-18|1997-01-21|DELIVER IN PERSON|TRUCK|, even dugo 14567|140062|2577|4|6|6612.36|0.08|0.05|N|O|1996-12-31|1996-12-28|1997-01-25|DELIVER IN PERSON|SHIP|ly even accounts. evenly final platelet 14567|20717|718|5|31|50769.01|0.06|0.00|N|O|1996-10-29|1996-11-24|1996-11-20|NONE|MAIL|he slowly 14567|178932|3967|6|13|26142.09|0.08|0.02|N|O|1997-01-05|1997-01-13|1997-01-23|NONE|REG AIR|y regular platelets. 14567|106195|1216|7|16|19219.04|0.03|0.03|N|O|1996-11-17|1996-11-28|1996-11-22|COLLECT COD|TRUCK|platelets. blithel 14592|5656|5657|1|28|43726.20|0.00|0.04|N|O|1995-08-16|1995-09-28|1995-09-06|TAKE BACK RETURN|AIR|quickly across the slyly final accounts. 14592|53711|1227|2|21|34958.91|0.06|0.02|N|O|1995-07-22|1995-08-08|1995-08-16|NONE|SHIP| ideas haggle blit 14592|113557|1091|3|36|56539.80|0.08|0.07|N|O|1995-07-12|1995-09-15|1995-08-04|TAKE BACK RETURN|AIR|beans sleep according to the ironic, exp 14593|26526|4033|1|8|11620.16|0.08|0.00|N|O|1998-02-02|1998-04-05|1998-03-03|NONE|SHIP|ncies. pending packages ab 14593|14114|4115|2|6|6168.66|0.09|0.07|N|O|1998-03-20|1998-04-01|1998-04-12|COLLECT COD|MAIL|unusual pinto 14593|12098|9602|3|44|44443.96|0.01|0.06|N|O|1998-04-20|1998-04-01|1998-04-21|COLLECT COD|MAIL|s according to the regular, ironic 14594|131085|8625|1|15|16741.20|0.10|0.01|A|F|1993-10-13|1993-11-19|1993-11-10|COLLECT COD|SHIP|phs need to a 14595|166889|1922|1|20|39117.60|0.01|0.05|N|O|1998-01-11|1998-02-27|1998-02-10|TAKE BACK RETURN|FOB|osits nag sly 14596|196890|1929|1|28|55632.92|0.05|0.00|A|F|1994-01-30|1994-03-04|1994-02-15|TAKE BACK RETURN|TRUCK|ructions serve b 14596|129554|9555|2|37|58591.35|0.00|0.03|A|F|1994-03-09|1994-03-06|1994-03-23|NONE|SHIP|y across the r 14596|3139|5640|3|7|7294.91|0.06|0.03|R|F|1994-04-18|1994-03-29|1994-04-21|COLLECT COD|MAIL|al instructions. care 14596|144974|7489|4|31|62588.07|0.06|0.05|R|F|1994-02-06|1994-04-08|1994-02-21|DELIVER IN PERSON|FOB| pinto beans wake alo 14596|169725|4758|5|8|14357.76|0.01|0.02|A|F|1994-04-20|1994-03-23|1994-05-10|COLLECT COD|TRUCK|encies haggle after the final 14596|165801|3350|6|8|14934.40|0.09|0.04|R|F|1994-03-13|1994-04-02|1994-03-22|COLLECT COD|RAIL|uests doubt quickly. 14597|70785|3293|1|45|79010.10|0.09|0.06|A|F|1993-09-09|1993-10-21|1993-09-15|NONE|AIR|oost. always ironic platelets affix bli 14597|76250|3772|2|44|53955.00|0.00|0.05|R|F|1993-09-10|1993-10-29|1993-09-14|NONE|FOB|hely bold i 14597|128977|6514|3|10|20059.70|0.07|0.00|R|F|1993-10-10|1993-09-30|1993-10-13|NONE|FOB| accounts haggle carefully. even, bold p 14597|113491|1025|4|39|58675.11|0.04|0.01|R|F|1993-09-12|1993-11-26|1993-10-12|DELIVER IN PERSON|RAIL|jole after the even accoun 14597|34843|4844|5|48|85336.32|0.04|0.07|A|F|1993-11-24|1993-10-20|1993-12-16|COLLECT COD|AIR|yly packages. slyl 14597|199238|4277|6|18|24070.14|0.01|0.04|R|F|1993-11-11|1993-11-14|1993-12-06|DELIVER IN PERSON|SHIP|lithely regul 14598|150351|7897|1|43|60258.05|0.00|0.00|N|O|1998-05-30|1998-04-13|1998-06-19|NONE|TRUCK|en foxes sleep. furiousl 14598|138254|3281|2|35|45228.75|0.07|0.03|N|O|1998-05-23|1998-03-17|1998-06-19|TAKE BACK RETURN|RAIL|regular requests sleep slow 14598|18941|8942|3|40|74397.60|0.08|0.06|N|O|1998-03-05|1998-03-17|1998-03-17|TAKE BACK RETURN|MAIL| slyly bra 14598|136911|1938|4|13|25322.83|0.06|0.00|N|O|1998-03-30|1998-04-19|1998-04-08|COLLECT COD|REG AIR|ndencies cajole 14598|57242|4758|5|7|8394.68|0.06|0.02|N|O|1998-03-18|1998-04-28|1998-04-02|DELIVER IN PERSON|AIR|lithely even dependencie 14599|122739|2740|1|12|21140.76|0.02|0.04|A|F|1992-09-03|1992-07-14|1992-09-14|NONE|MAIL|sual deposits sleep furiously ex 14624|199006|6564|1|12|13260.00|0.03|0.08|N|O|1998-07-16|1998-06-28|1998-08-15|NONE|AIR| after the pending deposit 14624|85839|3364|2|35|63869.05|0.08|0.04|N|O|1998-06-18|1998-05-28|1998-06-25|DELIVER IN PERSON|RAIL|ly even deposits. fluffily unus 14624|84733|2258|3|21|36072.33|0.02|0.03|N|O|1998-06-23|1998-06-14|1998-06-25|DELIVER IN PERSON|FOB|ly special requests. fluffily reg 14624|103967|6478|4|36|70954.56|0.01|0.02|N|O|1998-08-12|1998-05-24|1998-09-01|TAKE BACK RETURN|TRUCK|ess packages. furiously special acco 14624|163152|701|5|24|29163.60|0.05|0.07|N|O|1998-07-12|1998-06-27|1998-07-29|DELIVER IN PERSON|REG AIR|phins. final, ironic multipliers 14624|86564|9073|6|5|7752.80|0.07|0.08|N|O|1998-07-05|1998-06-22|1998-07-12|TAKE BACK RETURN|REG AIR|ng dependencies against 14624|75952|8460|7|6|11567.70|0.04|0.04|N|O|1998-05-07|1998-07-06|1998-05-26|TAKE BACK RETURN|MAIL|onic requests maintain along 14625|110858|8392|1|2|3737.70|0.01|0.03|A|F|1992-03-10|1992-04-01|1992-04-01|DELIVER IN PERSON|REG AIR|ffily bold 14626|60915|8434|1|4|7503.64|0.01|0.03|N|O|1997-10-27|1997-09-26|1997-11-02|NONE|SHIP|requests. careful 14626|188120|8121|2|19|22954.28|0.00|0.00|N|O|1997-09-04|1997-09-14|1997-09-23|DELIVER IN PERSON|SHIP|blithely slyly ironic 14626|131530|1531|3|30|46845.90|0.02|0.07|N|O|1997-08-23|1997-10-06|1997-09-04|NONE|RAIL|gular excuses. slyly pendin 14626|12768|7771|4|41|68911.16|0.07|0.02|N|O|1997-08-14|1997-09-27|1997-08-18|COLLECT COD|TRUCK| packages wake slyly si 14627|18749|3752|1|40|66709.60|0.02|0.01|A|F|1994-04-23|1994-04-25|1994-04-24|NONE|REG AIR|nic instructions. ironic packages w 14627|98547|3566|2|1|1545.54|0.04|0.04|R|F|1994-03-29|1994-03-11|1994-04-04|NONE|SHIP|deas was slyly dogge 14627|77415|4937|3|33|45949.53|0.01|0.07|R|F|1994-02-15|1994-04-07|1994-03-04|DELIVER IN PERSON|MAIL|y silent foxes among the blithely r 14627|110519|3031|4|38|58121.38|0.08|0.01|A|F|1994-02-18|1994-03-27|1994-03-19|DELIVER IN PERSON|SHIP|ges. blithely brave dependencies 14627|80659|5676|5|27|44270.55|0.00|0.07|R|F|1994-04-04|1994-04-19|1994-04-16|TAKE BACK RETURN|FOB|fully unusua 14627|26312|6313|6|18|22289.58|0.00|0.04|A|F|1994-05-01|1994-03-19|1994-05-03|COLLECT COD|AIR|nag slyly quickly even the 14627|11250|3752|7|2|2322.50|0.01|0.06|A|F|1994-05-31|1994-04-29|1994-06-09|TAKE BACK RETURN|AIR|regular dinos. fluffily 14628|70249|250|1|50|60962.00|0.07|0.01|A|F|1993-08-08|1993-08-13|1993-08-18|TAKE BACK RETURN|MAIL|o integrate care 14629|175128|2680|1|50|60156.00|0.06|0.01|A|F|1993-02-13|1993-02-21|1993-02-19|DELIVER IN PERSON|MAIL|en asymptotes lose caref 14630|182185|2186|1|39|49420.02|0.02|0.04|A|F|1992-12-26|1992-11-20|1993-01-12|TAKE BACK RETURN|RAIL|ans sleep furiousl 14630|183172|727|2|40|50206.80|0.03|0.00|R|F|1992-11-27|1992-12-16|1992-12-17|DELIVER IN PERSON|TRUCK|ar pinto beans. regular platelets at 14630|156994|4540|3|14|28713.86|0.07|0.01|R|F|1993-01-29|1993-01-02|1993-02-15|DELIVER IN PERSON|TRUCK|osits. carefully f 14630|189259|1778|4|20|26965.00|0.08|0.03|R|F|1993-02-10|1993-01-10|1993-03-04|COLLECT COD|AIR|althy pinto beans wake. blithely 14631|158986|8987|1|14|28629.72|0.01|0.03|N|O|1997-06-01|1997-07-12|1997-06-22|COLLECT COD|FOB|arefully bold deposits are slyly acr 14631|62888|2889|2|9|16657.92|0.05|0.01|N|O|1997-05-01|1997-07-02|1997-05-19|TAKE BACK RETURN|FOB|ges around the furiously special requests 14656|59539|9540|1|21|31469.13|0.09|0.00|N|O|1998-01-23|1997-12-23|1998-02-15|DELIVER IN PERSON|AIR|lar deposits. fluffily regular accounts w 14657|33797|3798|1|49|84808.71|0.04|0.08|A|F|1994-03-23|1994-04-18|1994-03-29|COLLECT COD|SHIP| pinto beans boost fu 14657|23550|3551|2|50|73677.50|0.09|0.03|A|F|1994-05-07|1994-05-06|1994-05-28|TAKE BACK RETURN|REG AIR|lar platelet 14658|96747|6748|1|3|5231.22|0.04|0.00|R|F|1994-04-08|1994-06-25|1994-05-03|COLLECT COD|TRUCK|ravely regular pinto beans. b 14658|13234|3235|2|40|45889.20|0.03|0.07|A|F|1994-04-10|1994-05-21|1994-05-01|DELIVER IN PERSON|REG AIR|among the sl 14659|73635|8650|1|47|75605.61|0.08|0.08|N|O|1998-03-28|1998-04-22|1998-03-30|TAKE BACK RETURN|SHIP| ironic, unu 14659|172586|5104|2|18|29854.44|0.07|0.04|N|O|1998-06-08|1998-05-01|1998-06-20|TAKE BACK RETURN|FOB|ites. carefully 14659|45273|5274|3|16|19492.32|0.07|0.06|N|O|1998-05-06|1998-04-27|1998-05-22|COLLECT COD|TRUCK|s cajole carefully above the 14659|87575|2592|4|11|17188.27|0.03|0.02|N|O|1998-06-01|1998-05-04|1998-06-05|COLLECT COD|REG AIR|sleep finally fo 14660|125832|857|1|48|89175.84|0.10|0.04|N|O|1997-06-10|1997-07-02|1997-06-19|COLLECT COD|SHIP|accounts. slyly clos 14660|44934|9943|2|12|22547.16|0.04|0.07|N|O|1997-08-26|1997-08-09|1997-09-05|NONE|REG AIR|dolites are furiously. slyly ironic accou 14660|4521|9522|3|8|11404.16|0.06|0.07|N|O|1997-06-20|1997-08-10|1997-06-27|NONE|FOB|ymptotes haggl 14660|172037|9589|4|39|43252.17|0.08|0.08|N|O|1997-06-07|1997-08-20|1997-07-06|DELIVER IN PERSON|REG AIR|en requests. the 14661|27248|7249|1|32|37607.68|0.00|0.02|R|F|1993-03-30|1993-03-24|1993-04-18|DELIVER IN PERSON|FOB|symptotes use. 14662|70059|5074|1|17|17493.85|0.02|0.03|N|O|1996-01-08|1995-12-22|1996-01-21|TAKE BACK RETURN|TRUCK|across the c 14662|70005|7527|2|4|3900.00|0.09|0.04|N|O|1996-01-05|1996-01-01|1996-01-20|TAKE BACK RETURN|TRUCK|lent requests integr 14662|169533|2050|3|19|30448.07|0.03|0.05|N|O|1995-11-21|1996-02-01|1995-12-16|COLLECT COD|REG AIR|y final, ironic instructions. 14662|131473|1474|4|18|27080.46|0.00|0.01|N|O|1996-02-15|1995-12-31|1996-02-19|TAKE BACK RETURN|AIR|unts. express pinto beans c 14662|54401|4402|5|48|65059.20|0.01|0.04|N|O|1995-11-21|1996-01-21|1995-12-13|DELIVER IN PERSON|RAIL|lar packages nag about the ca 14662|113093|627|6|41|45349.69|0.07|0.07|N|O|1996-02-18|1996-01-13|1996-03-09|TAKE BACK RETURN|AIR|he fluffily regular Ti 14663|127337|2362|1|36|49115.88|0.06|0.03|A|F|1993-02-02|1992-12-23|1993-02-23|COLLECT COD|SHIP|elets. slyly 14663|152565|7596|2|1|1617.56|0.00|0.01|A|F|1993-02-02|1992-12-21|1993-03-01|DELIVER IN PERSON|MAIL|ges wake fluffily. 14688|1254|8755|1|11|12707.75|0.04|0.08|N|O|1997-06-19|1997-04-04|1997-07-09|COLLECT COD|FOB|ies are express packages. unusual 14688|65591|8098|2|20|31131.80|0.01|0.01|N|O|1997-02-19|1997-04-11|1997-03-08|DELIVER IN PERSON|SHIP|ages along the carefully idle requests wa 14688|173017|569|3|10|10900.10|0.02|0.08|N|O|1997-03-14|1997-04-22|1997-04-05|COLLECT COD|AIR|riously even packages sleep a 14688|81632|4141|4|40|64545.20|0.00|0.00|N|O|1997-04-08|1997-04-06|1997-05-07|DELIVER IN PERSON|SHIP|ruthless packages. furiously special instru 14689|98171|3190|1|29|33905.93|0.05|0.06|N|O|1998-08-22|1998-09-11|1998-09-09|NONE|TRUCK|ly bold theodolites. t 14689|148396|911|2|6|8666.34|0.01|0.06|N|O|1998-08-30|1998-10-19|1998-09-01|COLLECT COD|FOB|tes are quickly regular packages. bold dep 14689|89849|4866|3|3|5516.52|0.10|0.07|N|O|1998-09-17|1998-10-21|1998-09-27|DELIVER IN PERSON|SHIP|hy requests. unusual, special cou 14690|87337|4862|1|23|30459.59|0.02|0.03|N|O|1997-09-16|1997-07-31|1997-10-03|COLLECT COD|SHIP| pinto beans. ironic platele 14691|29610|2113|1|23|35411.03|0.09|0.01|N|O|1998-06-21|1998-05-28|1998-06-28|DELIVER IN PERSON|MAIL|gedly regular ac 14691|45081|5082|2|42|43095.36|0.08|0.07|N|O|1998-07-26|1998-07-07|1998-08-01|DELIVER IN PERSON|SHIP|hlessly final requests-- unusual t 14692|126330|1355|1|18|24413.94|0.01|0.00|N|O|1996-06-17|1996-05-15|1996-07-07|NONE|REG AIR|es alongside of the accounts nag according 14692|24090|9095|2|30|30422.70|0.06|0.02|N|O|1996-05-16|1996-03-29|1996-05-20|NONE|SHIP|arefully always even accounts. 14693|61159|6172|1|31|34724.65|0.01|0.03|A|F|1995-03-07|1995-02-10|1995-03-09|TAKE BACK RETURN|RAIL|packages are upon the packages. careful 14693|88681|8682|2|47|78474.96|0.06|0.04|A|F|1995-01-19|1995-02-22|1995-02-05|COLLECT COD|RAIL|ch slyly furio 14693|50710|3216|3|40|66428.40|0.10|0.03|A|F|1995-04-04|1995-02-25|1995-05-02|COLLECT COD|TRUCK|gainst the express, final accounts. fur 14693|74248|9263|4|16|19555.84|0.07|0.06|R|F|1995-01-24|1995-04-08|1995-02-13|NONE|FOB|e slyly; quickly f 14693|72747|7762|5|22|37834.28|0.01|0.07|A|F|1995-02-14|1995-04-07|1995-02-20|TAKE BACK RETURN|REG AIR|hall have to cajo 14694|50239|240|1|45|53515.35|0.07|0.05|N|O|1998-11-21|1998-10-22|1998-11-26|DELIVER IN PERSON|SHIP|r, express packages wake. furiously re 14694|164487|4488|2|17|26375.16|0.04|0.08|N|O|1998-11-01|1998-10-06|1998-11-04|DELIVER IN PERSON|TRUCK|slyly regular foxes. carefully regular 14694|123164|701|3|47|55796.52|0.05|0.02|N|O|1998-09-22|1998-10-13|1998-10-10|DELIVER IN PERSON|SHIP| the unusual 14694|1927|4428|4|33|60354.36|0.06|0.08|N|O|1998-08-11|1998-09-06|1998-09-10|DELIVER IN PERSON|AIR|rts. slyly even pinto 14694|172742|7777|5|22|39924.28|0.06|0.02|N|O|1998-09-23|1998-08-30|1998-10-13|COLLECT COD|REG AIR|arefully special excuses. blit 14694|71359|1360|6|40|53214.00|0.05|0.02|N|O|1998-10-06|1998-10-03|1998-10-14|COLLECT COD|FOB|venly special requ 14694|124237|6750|7|44|55494.12|0.06|0.07|N|O|1998-11-09|1998-10-25|1998-11-21|NONE|SHIP|the quickly express pinto beans. stealthi 14695|37154|7155|1|32|34916.80|0.08|0.01|A|F|1992-09-23|1992-10-16|1992-10-01|DELIVER IN PERSON|SHIP|g pinto beans are special theodolites. pend 14695|45727|3240|2|11|18399.92|0.01|0.00|R|F|1992-11-07|1992-11-20|1992-12-01|COLLECT COD|REG AIR|slyly final account 14695|127842|5379|3|32|59834.88|0.04|0.04|R|F|1992-10-04|1992-12-01|1992-10-19|COLLECT COD|RAIL|orses. regular theodolites alo 14695|35673|8177|4|43|69172.81|0.09|0.08|A|F|1992-09-28|1992-10-22|1992-10-24|NONE|AIR|ely regular packages s 14695|155411|2957|5|43|63055.63|0.06|0.05|A|F|1992-10-08|1992-12-02|1992-10-30|TAKE BACK RETURN|SHIP|lithely express asymptotes af 14720|44563|4564|1|23|34673.88|0.06|0.00|R|F|1993-10-06|1993-12-01|1993-10-23|TAKE BACK RETURN|RAIL|ges boost slyly sl 14720|156215|1246|2|4|5084.84|0.00|0.01|A|F|1993-09-11|1993-11-27|1993-09-26|DELIVER IN PERSON|AIR|ly silent requests wake fluf 14720|100576|3087|3|20|31531.40|0.08|0.06|R|F|1993-11-27|1993-11-09|1993-12-22|NONE|AIR|slyly unusual request 14720|164764|2313|4|23|42061.48|0.04|0.07|R|F|1993-10-03|1993-11-13|1993-10-29|DELIVER IN PERSON|MAIL|ly ironic foxes cajole furiously. even 14720|185734|5735|5|48|87347.04|0.10|0.08|A|F|1993-12-05|1993-11-24|1993-12-21|COLLECT COD|FOB|bout the ironic accounts sleep carefu 14721|129563|9564|1|39|62109.84|0.08|0.08|N|O|1997-07-31|1997-07-28|1997-08-18|TAKE BACK RETURN|REG AIR|ely alongside of the unusual deposits! 14721|151799|4315|2|37|68479.23|0.08|0.02|N|O|1997-07-06|1997-06-04|1997-07-28|TAKE BACK RETURN|RAIL|eodolites detect carefully ironic pinto 14721|78952|6474|3|21|40549.95|0.01|0.06|N|O|1997-06-03|1997-07-21|1997-06-16|NONE|AIR|e blithely sly accounts nag quickly abo 14721|141699|4214|4|23|40035.87|0.06|0.03|N|O|1997-05-31|1997-07-08|1997-06-16|COLLECT COD|AIR|ess pinto beans cajo 14721|133327|867|5|3|4080.96|0.00|0.02|N|O|1997-08-16|1997-07-30|1997-09-06|COLLECT COD|MAIL|lar accounts. blithely ironic 14722|79326|6848|1|49|63960.68|0.06|0.01|N|O|1997-07-18|1997-07-14|1997-07-31|DELIVER IN PERSON|REG AIR|ests. unusual requests dete 14722|28501|3506|2|35|50032.50|0.03|0.08|N|O|1997-06-14|1997-07-31|1997-07-14|NONE|REG AIR|thely according to the 14722|12861|2862|3|49|86919.14|0.05|0.03|N|O|1997-08-11|1997-08-01|1997-08-12|TAKE BACK RETURN|SHIP|low foxes. bold deposits against 14722|139463|1977|4|46|69113.16|0.02|0.03|N|O|1997-05-29|1997-07-28|1997-06-20|COLLECT COD|REG AIR|lyly ironic multi 14722|144500|7015|5|25|38612.50|0.08|0.06|N|O|1997-09-08|1997-08-11|1997-10-05|NONE|AIR|ar requests. even platelets thrash blithely 14723|28579|1082|1|34|51257.38|0.02|0.02|N|O|1997-01-16|1997-03-17|1997-02-10|COLLECT COD|TRUCK|ironic accoun 14724|130120|7660|1|36|41404.32|0.06|0.03|N|O|1996-02-06|1996-02-04|1996-02-11|COLLECT COD|MAIL| regular pinto 14725|145744|8259|1|15|26846.10|0.07|0.04|N|O|1995-06-27|1995-07-17|1995-07-24|TAKE BACK RETURN|REG AIR|yly regular packages. 14725|109317|6848|2|10|13263.10|0.03|0.02|N|O|1995-07-05|1995-07-29|1995-07-20|DELIVER IN PERSON|TRUCK|quickly ironic instruct 14725|79070|1578|3|14|14686.98|0.04|0.07|N|O|1995-07-08|1995-09-09|1995-07-20|COLLECT COD|RAIL|y ironic dependencies. sl 14725|125263|5264|4|48|61836.48|0.04|0.00|N|O|1995-09-06|1995-07-28|1995-10-04|DELIVER IN PERSON|SHIP|s cajole furiously permanen 14725|85888|5889|5|50|93694.00|0.05|0.08|N|O|1995-09-06|1995-08-21|1995-09-29|NONE|MAIL|xpress the 14725|118076|8077|6|22|24069.54|0.09|0.05|N|O|1995-08-11|1995-08-20|1995-09-09|TAKE BACK RETURN|SHIP| deposits. carefully sp 14725|89803|9804|7|23|41234.40|0.05|0.06|N|O|1995-06-24|1995-09-07|1995-07-03|COLLECT COD|TRUCK| furiously even theodolites believe bli 14726|111133|6156|1|30|34323.90|0.09|0.05|A|F|1992-02-16|1992-03-27|1992-02-29|NONE|TRUCK| above the caref 14726|2231|9732|2|12|13598.76|0.00|0.08|A|F|1992-03-18|1992-02-08|1992-04-16|DELIVER IN PERSON|TRUCK|ar foxes nag sly 14726|92071|2072|3|30|31892.10|0.08|0.05|R|F|1992-01-13|1992-02-11|1992-01-28|DELIVER IN PERSON|TRUCK|s cajole above the busily 14727|134297|6811|1|12|15975.48|0.10|0.05|R|F|1992-10-21|1992-09-01|1992-10-24|DELIVER IN PERSON|MAIL|ilent theodolites sleep sl 14727|121310|1311|2|11|14644.41|0.03|0.04|R|F|1992-08-03|1992-08-19|1992-08-24|DELIVER IN PERSON|TRUCK|al packages sleep before the flu 14727|56183|6184|3|16|18226.88|0.03|0.06|A|F|1992-07-31|1992-08-21|1992-08-20|DELIVER IN PERSON|RAIL|y quickly 14727|53811|6317|4|38|67062.78|0.08|0.03|R|F|1992-10-26|1992-09-22|1992-11-17|NONE|RAIL|ts haggle slyly asymptotes. i 14752|169824|7373|1|21|39770.22|0.08|0.02|R|F|1994-02-11|1994-03-19|1994-02-20|NONE|MAIL|y on the blithely regular dep 14753|42094|2095|1|23|23830.07|0.10|0.01|N|O|1997-01-13|1997-02-16|1997-01-23|DELIVER IN PERSON|SHIP|ptotes against the carefully pending pinto 14753|61413|3920|2|2|2748.82|0.08|0.06|N|O|1997-04-11|1997-03-31|1997-05-03|NONE|TRUCK|sleep quickly against the 14753|155173|2719|3|10|12281.70|0.03|0.05|N|O|1997-02-02|1997-03-19|1997-02-03|TAKE BACK RETURN|FOB|ccounts. ironic instructions are fluf 14754|25056|2563|1|18|17658.90|0.03|0.03|N|O|1996-07-17|1996-08-27|1996-08-10|DELIVER IN PERSON|SHIP|usly final depths wake 14754|102865|396|2|8|14942.88|0.07|0.07|N|O|1996-08-23|1996-08-28|1996-09-03|COLLECT COD|MAIL|sleep carefully according to the regular 14754|48608|6121|3|12|18679.20|0.02|0.04|N|O|1996-06-30|1996-09-07|1996-07-16|TAKE BACK RETURN|MAIL|ests cajole fina 14754|139590|9591|4|3|4888.77|0.06|0.07|N|O|1996-08-16|1996-08-17|1996-08-30|COLLECT COD|MAIL|as. quickly ir 14754|78237|3252|5|36|43748.28|0.10|0.08|N|O|1996-07-16|1996-08-05|1996-08-10|DELIVER IN PERSON|TRUCK|ncies sublate quickly against the final d 14754|122472|2473|6|4|5977.88|0.10|0.08|N|O|1996-07-26|1996-08-23|1996-08-03|DELIVER IN PERSON|FOB|lly thin accounts; carefully 14755|32105|2106|1|48|49780.80|0.05|0.06|A|F|1993-02-24|1993-03-13|1993-03-18|COLLECT COD|TRUCK|s. even theodolites a 14755|188118|5673|2|47|56687.17|0.01|0.05|R|F|1993-01-20|1993-03-13|1993-02-19|TAKE BACK RETURN|SHIP|y against the furiously brave ins 14755|92533|61|3|31|47291.43|0.04|0.07|R|F|1993-04-01|1993-03-23|1993-04-02|DELIVER IN PERSON|AIR|rate regular packages. ca 14755|186278|3833|4|14|19099.78|0.05|0.06|R|F|1993-01-22|1993-04-04|1993-02-11|NONE|MAIL|le blithely. bold, express deposits a 14755|158587|1103|5|29|47721.82|0.08|0.08|R|F|1993-04-03|1993-02-11|1993-04-20|COLLECT COD|FOB| sheaves are. furiously even requests 14755|169276|6825|6|36|48429.72|0.06|0.02|A|F|1993-04-26|1993-04-01|1993-05-17|TAKE BACK RETURN|RAIL|heodolites are. ironic, final theo 14755|14541|4542|7|30|43666.20|0.06|0.05|A|F|1993-02-19|1993-04-04|1993-03-20|COLLECT COD|AIR|y. ironic, ironic 14756|188915|8916|1|43|86168.13|0.01|0.02|R|F|1994-11-12|1994-11-06|1994-11-13|COLLECT COD|TRUCK|ldly even packages; blithely ev 14756|84634|4635|2|23|37228.49|0.05|0.00|A|F|1994-10-05|1994-11-17|1994-10-21|COLLECT COD|REG AIR|tions upon the s 14756|30304|5311|3|9|11108.70|0.10|0.04|R|F|1994-12-27|1994-10-03|1995-01-11|TAKE BACK RETURN|AIR| regular asymptotes wake furiousl 14756|112388|9922|4|6|8402.28|0.01|0.08|A|F|1994-10-09|1994-11-25|1994-10-14|NONE|AIR|he bold accounts boost slyly 14757|136123|1150|1|43|49842.16|0.00|0.03|N|O|1998-04-14|1998-03-21|1998-05-11|TAKE BACK RETURN|REG AIR|tes are fluffily a 14758|113627|8650|1|8|13124.96|0.09|0.07|R|F|1994-01-28|1994-01-04|1994-02-22|COLLECT COD|AIR|se alongside of the 14758|113351|885|2|9|12279.15|0.10|0.08|R|F|1994-02-06|1994-01-10|1994-02-12|DELIVER IN PERSON|AIR|al deposits wake blit 14758|84883|7392|3|6|11207.28|0.04|0.03|A|F|1994-02-21|1994-01-04|1994-03-20|COLLECT COD|TRUCK|efully furiously r 14758|26202|8705|4|2|2256.40|0.00|0.06|R|F|1993-12-13|1994-01-18|1994-01-02|DELIVER IN PERSON|MAIL|posits nag quic 14758|172288|7323|5|5|6801.40|0.10|0.07|R|F|1994-01-03|1993-12-24|1994-01-20|DELIVER IN PERSON|RAIL|ial, pending deposits haggl 14759|139528|9529|1|19|29782.88|0.01|0.03|N|O|1997-04-24|1997-03-22|1997-05-08|COLLECT COD|MAIL| furiously ironic pinto beans cajole expres 14759|35766|8270|2|13|22122.88|0.09|0.06|N|O|1997-02-19|1997-03-31|1997-03-05|DELIVER IN PERSON|RAIL| pinto beans. quickly express attainme 14784|52807|323|1|36|63352.80|0.05|0.03|R|F|1992-05-11|1992-04-23|1992-05-19|DELIVER IN PERSON|FOB|r special accounts. fu 14784|114073|9096|2|11|11957.77|0.05|0.01|R|F|1992-05-23|1992-05-31|1992-06-05|DELIVER IN PERSON|FOB|ously furiously special instructi 14784|23878|6381|3|21|37839.27|0.03|0.08|R|F|1992-04-10|1992-04-15|1992-04-27|COLLECT COD|AIR|ccounts haggle furiously beyond the regu 14784|107741|252|4|18|31477.32|0.03|0.02|R|F|1992-05-20|1992-05-21|1992-06-02|DELIVER IN PERSON|RAIL|lithely. blithely ironic dependencies bo 14784|188894|8895|5|24|47589.36|0.06|0.08|R|F|1992-07-03|1992-05-30|1992-08-01|TAKE BACK RETURN|RAIL|n packages. enticing dolphins sleep. ca 14784|127758|2783|6|37|66072.75|0.07|0.00|A|F|1992-06-29|1992-05-26|1992-07-01|COLLECT COD|SHIP|ic accounts. bold excuses integrate careful 14785|114180|6692|1|3|3582.54|0.03|0.07|A|F|1994-12-22|1994-12-03|1995-01-10|NONE|RAIL|thy platelets wake. 14785|13801|6303|2|2|3429.60|0.01|0.05|A|F|1994-10-18|1994-12-07|1994-11-15|DELIVER IN PERSON|MAIL|uriously silent inst 14785|152348|4864|3|3|4201.02|0.02|0.06|A|F|1995-01-05|1995-01-01|1995-01-24|TAKE BACK RETURN|MAIL|theodolites are care 14785|99622|7150|4|1|1621.62|0.01|0.08|R|F|1994-12-28|1994-12-31|1995-01-12|TAKE BACK RETURN|FOB|ages affix furiously excuses. 14785|139259|6799|5|21|27263.25|0.04|0.02|R|F|1994-11-05|1994-11-30|1994-12-01|TAKE BACK RETURN|RAIL|long the ironic requests are about 14785|117214|9726|6|5|6156.05|0.04|0.03|R|F|1994-10-14|1994-11-27|1994-11-12|COLLECT COD|AIR|he platelets can cajole 14785|199722|2242|7|34|61938.48|0.04|0.03|R|F|1994-10-28|1994-12-28|1994-11-04|COLLECT COD|REG AIR| the special, regular theodoli 14786|115117|2651|1|33|37359.63|0.02|0.00|N|O|1997-08-05|1997-05-29|1997-08-31|NONE|MAIL|its. pending, regular asymptotes doze qui 14787|151208|8754|1|6|7555.20|0.10|0.08|N|O|1998-07-24|1998-09-20|1998-07-28|DELIVER IN PERSON|SHIP|al theodolites ca 14787|141155|1156|2|50|59807.50|0.04|0.05|N|O|1998-10-23|1998-08-18|1998-11-08|TAKE BACK RETURN|SHIP|final asymptotes. pending, unusual patte 14787|189227|1746|3|32|42119.04|0.01|0.07|N|O|1998-07-29|1998-09-02|1998-08-01|TAKE BACK RETURN|AIR|efully regular instructions nod slyly. p 14788|136955|6956|1|41|81669.95|0.09|0.08|N|O|1997-11-05|1998-01-07|1997-11-27|DELIVER IN PERSON|SHIP|ording to the regular, pending foxes 14789|6127|8628|1|45|46490.40|0.07|0.06|A|F|1993-12-16|1994-02-05|1994-01-10|COLLECT COD|FOB|ut the final accounts. blith 14789|168042|8043|2|45|49951.80|0.02|0.05|A|F|1994-03-31|1994-02-20|1994-04-30|COLLECT COD|AIR| ideas. requests cajole furiously r 14789|81209|1210|3|49|58319.80|0.02|0.05|A|F|1994-02-13|1994-02-01|1994-02-24|COLLECT COD|TRUCK|ests. special packages sleep; 14789|68542|6061|4|8|12084.32|0.10|0.04|R|F|1994-03-30|1994-01-24|1994-04-06|NONE|TRUCK|equests. bold 14790|186591|4146|1|47|78846.73|0.02|0.06|N|O|1996-09-11|1996-10-09|1996-09-20|TAKE BACK RETURN|MAIL|e slowly with the stealthily regula 14790|27309|7310|2|30|37089.00|0.10|0.04|N|O|1996-09-14|1996-09-22|1996-10-04|TAKE BACK RETURN|SHIP|lyly ironic pinto be 14790|166099|1132|3|39|45438.51|0.06|0.07|N|O|1996-09-17|1996-10-12|1996-10-12|DELIVER IN PERSON|AIR|use across the quickly ironic 14790|24169|4170|4|4|4372.64|0.06|0.00|N|O|1996-09-23|1996-11-01|1996-09-28|DELIVER IN PERSON|AIR|fily ironic dinos c 14790|8106|8107|5|10|10141.00|0.06|0.04|N|O|1996-09-23|1996-09-23|1996-09-28|TAKE BACK RETURN|MAIL|lar sauternes 14790|28180|683|6|28|31029.04|0.00|0.02|N|O|1996-12-02|1996-09-27|1996-12-27|TAKE BACK RETURN|SHIP| accounts; careful accounts 14790|17291|9793|7|42|50748.18|0.07|0.00|N|O|1996-10-24|1996-10-18|1996-11-22|TAKE BACK RETURN|FOB|regular platele 14791|4477|4478|1|25|34536.75|0.06|0.05|R|F|1993-03-06|1993-04-10|1993-03-27|DELIVER IN PERSON|AIR|bold packages. furiously pending de 14791|34930|7434|2|50|93246.50|0.05|0.01|R|F|1993-06-05|1993-04-27|1993-06-14|DELIVER IN PERSON|AIR|the regular frays. fluffily ir 14791|67725|2738|3|2|3385.44|0.01|0.02|R|F|1993-04-06|1993-04-06|1993-04-30|COLLECT COD|RAIL|ts. orbits wake blithely furiously re 14816|149711|7254|1|47|82753.37|0.05|0.01|R|F|1993-08-07|1993-09-23|1993-08-13|NONE|FOB|ording to the acc 14816|189125|6680|2|15|18211.80|0.04|0.04|R|F|1993-08-26|1993-09-16|1993-09-08|DELIVER IN PERSON|MAIL|carefully ironic dugou 14817|85190|2715|1|27|31730.13|0.04|0.00|A|F|1992-08-29|1992-09-04|1992-09-18|COLLECT COD|FOB|press pinto beans. iro 14817|186376|3931|2|24|35096.88|0.10|0.04|R|F|1992-08-21|1992-09-14|1992-09-20|TAKE BACK RETURN|AIR|y even ideas wake slyly a 14817|18912|8913|3|5|9154.55|0.05|0.08|A|F|1992-08-15|1992-09-07|1992-08-26|TAKE BACK RETURN|SHIP|into beans sleep furiousl 14817|53342|8353|4|25|32383.50|0.10|0.04|A|F|1992-08-03|1992-10-17|1992-08-07|NONE|TRUCK|. deposits are carefully. slyly expre 14817|105618|8129|5|15|24354.15|0.08|0.08|R|F|1992-08-01|1992-09-28|1992-08-07|NONE|TRUCK| ironic, express gifts-- blith 14818|126888|6889|1|34|65105.92|0.00|0.04|N|O|1997-01-25|1997-02-05|1997-02-07|DELIVER IN PERSON|SHIP|the slyly regula 14818|192495|53|2|24|38099.76|0.10|0.08|N|O|1997-01-05|1997-02-09|1997-01-16|DELIVER IN PERSON|REG AIR|le slyly regular theodolites. furiously eve 14818|48257|3266|3|1|1205.25|0.08|0.05|N|O|1997-02-23|1997-01-06|1997-03-01|NONE|AIR|ilent theodolites print regular, unusu 14818|48176|8177|4|22|24731.74|0.08|0.03|N|O|1997-02-03|1997-02-13|1997-02-19|NONE|MAIL|uts detect regular, regular depo 14818|87761|2778|5|26|45467.76|0.05|0.03|N|O|1996-12-09|1997-01-16|1996-12-21|COLLECT COD|REG AIR|ts sleep carefully after th 14818|184352|9389|6|13|18672.55|0.06|0.06|N|O|1997-02-20|1997-02-09|1997-03-14|COLLECT COD|MAIL|unusual deposits haggle blithely quick 14819|115742|765|1|10|17577.40|0.01|0.04|A|F|1993-05-23|1993-03-16|1993-05-28|DELIVER IN PERSON|FOB|ording to the dogged pearls. quickly pen 14819|63806|1325|2|9|15928.20|0.07|0.04|R|F|1993-02-27|1993-03-23|1993-03-20|COLLECT COD|RAIL|long the accounts. furiously e 14819|170826|8378|3|49|92944.18|0.09|0.03|A|F|1993-05-13|1993-02-25|1993-05-17|COLLECT COD|FOB|kages wake blithely. 14819|59230|1736|4|28|33298.44|0.00|0.04|A|F|1993-04-17|1993-03-16|1993-05-04|TAKE BACK RETURN|SHIP|ular instructions. r 14820|131362|3876|1|9|12540.24|0.05|0.02|A|F|1992-03-04|1992-02-13|1992-03-31|COLLECT COD|SHIP| foxes sleep 14820|188254|5809|2|46|61743.50|0.01|0.08|R|F|1992-03-09|1992-03-28|1992-03-16|DELIVER IN PERSON|REG AIR|ges cajole carefully regular request 14820|169352|9353|3|38|54011.30|0.05|0.07|R|F|1992-02-21|1992-02-27|1992-03-17|DELIVER IN PERSON|SHIP|ep slyly after the f 14821|7148|7149|1|28|29543.92|0.00|0.00|N|O|1998-10-03|1998-07-31|1998-10-10|NONE|TRUCK|old packages. furious 14821|135022|5023|2|39|41223.78|0.00|0.04|N|O|1998-07-05|1998-09-09|1998-07-12|COLLECT COD|FOB|requests sleep carefully among the fu 14821|109822|7353|3|22|40300.04|0.09|0.07|N|O|1998-10-10|1998-09-10|1998-10-13|NONE|FOB|le careful 14821|177307|9825|4|27|37376.10|0.06|0.06|N|O|1998-06-13|1998-08-31|1998-07-09|DELIVER IN PERSON|SHIP|posits dazzle blithely quickly express acco 14821|185290|7809|5|45|61888.05|0.02|0.06|N|O|1998-10-07|1998-07-30|1998-11-05|TAKE BACK RETURN|MAIL|instructions. blithely bold deposit 14821|155936|3482|6|39|77685.27|0.08|0.03|N|O|1998-08-14|1998-08-11|1998-08-27|DELIVER IN PERSON|MAIL|ely across the idly spec 14821|4414|4415|7|43|56691.63|0.01|0.03|N|O|1998-09-27|1998-07-17|1998-10-26|COLLECT COD|AIR|fluffily final deposits eat. furiousl 14822|183138|693|1|23|28085.99|0.06|0.04|N|O|1996-07-22|1996-06-05|1996-08-13|DELIVER IN PERSON|AIR|ironic dep 14822|199146|6704|2|29|36109.06|0.08|0.04|N|O|1996-04-21|1996-06-17|1996-05-11|COLLECT COD|TRUCK| fluffily silent multiplier 14822|38796|3803|3|34|58982.86|0.02|0.02|N|O|1996-06-05|1996-05-29|1996-07-05|COLLECT COD|SHIP|old accounts nag quickly ironic, bold req 14822|36742|6743|4|38|63792.12|0.06|0.04|N|O|1996-06-22|1996-05-26|1996-07-16|NONE|MAIL|e about the 14823|16733|1736|1|29|47842.17|0.08|0.04|A|F|1994-03-06|1994-04-12|1994-03-22|TAKE BACK RETURN|FOB|ly final ideas affix daringly across the sl 14823|30091|7601|2|35|35738.15|0.01|0.00|R|F|1994-05-21|1994-04-06|1994-05-25|NONE|TRUCK|al accounts across the 14823|129982|2495|3|31|62371.38|0.01|0.02|A|F|1994-02-22|1994-04-10|1994-03-20|COLLECT COD|SHIP|y bold theodolites-- f 14823|190826|827|4|47|90090.54|0.10|0.06|A|F|1994-04-22|1994-03-27|1994-05-22|NONE|MAIL|uctions x-ray slyly quic 14848|34473|6977|1|4|5629.88|0.00|0.06|N|O|1996-12-06|1996-12-09|1996-12-27|COLLECT COD|MAIL|e. furiously pending req 14848|186170|6171|2|38|47734.46|0.02|0.07|N|O|1996-11-16|1996-12-15|1996-11-17|TAKE BACK RETURN|AIR|ts above t 14848|150696|8242|3|11|19213.59|0.05|0.05|N|O|1997-01-11|1997-01-14|1997-01-25|DELIVER IN PERSON|SHIP|gular packages. quickly regul 14848|140215|7758|4|5|6276.05|0.10|0.01|N|O|1996-10-26|1997-01-01|1996-11-08|COLLECT COD|AIR|on the special instructions wake slyly alon 14848|185114|7633|5|7|8393.77|0.08|0.05|N|O|1996-10-27|1997-01-03|1996-11-01|COLLECT COD|SHIP|ely regular tithes; regular 14848|8441|942|6|6|8096.64|0.01|0.06|N|O|1997-01-15|1996-11-30|1997-01-28|NONE|AIR|among the express pinto beans. fluffily 14849|58262|3273|1|3|3660.78|0.08|0.08|N|O|1997-03-03|1997-04-07|1997-03-11|NONE|AIR|bout the stea 14849|6042|8543|2|27|25597.08|0.07|0.07|N|O|1997-06-03|1997-05-02|1997-06-20|COLLECT COD|RAIL| shall boost? quickly special theo 14849|143300|3301|3|2|2686.60|0.07|0.00|N|O|1997-03-20|1997-04-27|1997-04-05|NONE|RAIL|s maintain bli 14849|42412|7421|4|25|33860.25|0.04|0.04|N|O|1997-04-17|1997-03-18|1997-05-10|DELIVER IN PERSON|REG AIR|pinto bean 14849|4389|6890|5|19|24574.22|0.10|0.04|N|O|1997-02-22|1997-05-04|1997-03-11|NONE|AIR|der blithely 14850|177439|7440|1|8|12131.44|0.06|0.07|N|O|1998-03-27|1998-01-08|1998-04-16|TAKE BACK RETURN|REG AIR| after the dolp 14850|5886|8387|2|41|73467.08|0.01|0.03|N|O|1998-02-18|1998-01-04|1998-03-10|COLLECT COD|TRUCK|lar, regular 14850|114564|7076|3|28|44199.68|0.01|0.03|N|O|1998-04-02|1998-02-01|1998-04-29|COLLECT COD|MAIL|s impress never about the slyly express 14850|67055|2068|4|15|15330.75|0.03|0.07|N|O|1998-02-19|1998-01-03|1998-02-27|DELIVER IN PERSON|REG AIR|osits use busily regular, regu 14850|177559|5111|5|47|76917.85|0.07|0.00|N|O|1998-03-22|1998-01-12|1998-04-17|NONE|AIR|requests are furiously above the furiou 14850|32612|122|6|40|61784.40|0.07|0.01|N|O|1997-12-07|1998-02-05|1998-01-06|NONE|FOB|he express instructions. furiously 14851|128017|530|1|23|24035.23|0.09|0.04|R|F|1992-06-06|1992-07-18|1992-06-07|DELIVER IN PERSON|TRUCK|he pending ideas. quickly final pi 14851|166102|6103|2|25|29202.50|0.06|0.03|R|F|1992-08-10|1992-06-27|1992-08-28|DELIVER IN PERSON|RAIL|eodolites unwind blithely according to th 14851|75733|5734|3|23|39300.79|0.08|0.02|R|F|1992-07-27|1992-06-25|1992-07-28|DELIVER IN PERSON|FOB| asymptotes wake. ironic accounts boost ir 14851|31669|4173|4|29|46419.14|0.04|0.05|R|F|1992-08-03|1992-07-12|1992-08-16|NONE|REG AIR|ajole furiously furiously regular acc 14851|58285|3296|5|9|11189.52|0.10|0.02|R|F|1992-08-05|1992-07-13|1992-08-29|DELIVER IN PERSON|SHIP|ously special foxe 14851|187820|2857|6|49|93483.18|0.09|0.04|A|F|1992-07-29|1992-07-26|1992-08-20|NONE|RAIL|cies lose blithely furiously regu 14852|25634|3141|1|22|34311.86|0.03|0.03|N|O|1996-03-26|1996-02-08|1996-04-25|DELIVER IN PERSON|FOB|al pinto beans according 14852|16474|8976|2|9|12514.23|0.05|0.01|N|O|1996-01-12|1996-02-16|1996-01-25|TAKE BACK RETURN|RAIL|hely final deposits a 14853|121211|1212|1|16|19715.36|0.07|0.00|N|O|1996-11-19|1996-12-02|1996-11-26|COLLECT COD|AIR|ld packages acros 14853|192401|7440|2|46|68696.40|0.03|0.08|N|O|1996-12-25|1996-11-02|1996-12-26|DELIVER IN PERSON|MAIL|among the slyly re 14853|82658|183|3|18|29531.70|0.00|0.00|N|O|1996-12-01|1996-11-20|1996-12-08|TAKE BACK RETURN|RAIL|nts. slyly blithe 14853|18576|8577|4|16|23913.12|0.02|0.01|N|O|1996-11-18|1996-10-10|1996-12-04|DELIVER IN PERSON|TRUCK|etly accounts. e 14854|169774|9775|1|20|36875.40|0.10|0.04|A|F|1993-04-30|1993-06-30|1993-05-08|DELIVER IN PERSON|REG AIR|. blithely final instructions ha 14854|183507|1062|2|3|4771.50|0.00|0.07|R|F|1993-08-27|1993-07-26|1993-09-09|DELIVER IN PERSON|AIR| ironic pinto beans use blithely around 14854|186680|4235|3|9|15900.12|0.00|0.00|R|F|1993-06-17|1993-06-21|1993-07-14|COLLECT COD|FOB|s dolphins. ironic 14854|119332|9333|4|23|31080.59|0.04|0.00|R|F|1993-05-24|1993-07-09|1993-06-14|DELIVER IN PERSON|TRUCK|is integrate quickly dep 14855|93981|1509|1|2|3949.96|0.09|0.06|A|F|1993-08-13|1993-06-16|1993-08-28|COLLECT COD|RAIL|he blithely fin 14855|127948|461|2|28|55326.32|0.04|0.01|R|F|1993-08-04|1993-06-11|1993-08-20|DELIVER IN PERSON|AIR|ronic packages. slyly 14855|18339|8340|3|49|61609.17|0.01|0.01|R|F|1993-06-30|1993-06-28|1993-07-06|DELIVER IN PERSON|TRUCK|ckages cajole furiously carefull 14855|11873|4375|4|43|76749.41|0.06|0.06|A|F|1993-05-16|1993-06-13|1993-06-03|NONE|SHIP|t the slyly 14855|142111|9654|5|7|8071.77|0.08|0.08|A|F|1993-08-02|1993-07-22|1993-08-14|DELIVER IN PERSON|REG AIR|uickly regular pinto be 14880|179008|9009|1|26|28262.00|0.02|0.03|N|O|1998-02-25|1998-02-18|1998-03-22|DELIVER IN PERSON|RAIL|nts detect. final pinto beans are. furio 14880|172714|2715|2|42|75041.82|0.04|0.03|N|O|1997-12-11|1998-01-14|1997-12-24|COLLECT COD|RAIL|bove the final, spec 14880|197606|126|3|23|39182.80|0.09|0.01|N|O|1998-01-10|1998-01-24|1998-02-08|TAKE BACK RETURN|MAIL|nag furiously. fu 14880|147307|9822|4|17|23023.10|0.06|0.01|N|O|1998-02-07|1998-01-04|1998-02-08|NONE|FOB|out the ideas. foxes cajole carefully 14880|180186|2705|5|13|16460.34|0.01|0.03|N|O|1998-01-25|1998-02-12|1998-02-01|NONE|REG AIR|dencies. bli 14880|177291|4843|6|8|10946.32|0.10|0.06|N|O|1998-01-17|1998-01-20|1998-02-12|NONE|REG AIR|-- final instructi 14881|179247|6799|1|35|46418.40|0.07|0.03|N|O|1996-10-30|1996-12-14|1996-11-25|NONE|AIR|regular accoun 14881|3657|1158|2|35|54622.75|0.04|0.00|N|O|1996-11-18|1997-01-08|1996-12-07|NONE|MAIL|nal deposits alongside of the 14881|61853|1854|3|23|41741.55|0.00|0.08|N|O|1996-11-20|1996-12-21|1996-12-03|TAKE BACK RETURN|REG AIR|cing excuses sleep slyly 14882|139468|7008|1|35|52761.10|0.08|0.01|N|O|1997-05-26|1997-03-22|1997-06-25|DELIVER IN PERSON|TRUCK|r accounts. the 14882|32167|7174|2|41|45065.56|0.08|0.03|N|O|1997-03-18|1997-04-05|1997-03-21|TAKE BACK RETURN|MAIL|s. account 14883|180536|3055|1|37|59811.61|0.02|0.03|N|O|1995-09-05|1995-06-07|1995-09-11|DELIVER IN PERSON|MAIL|into beans wake bl 14883|54835|7341|2|23|41166.09|0.00|0.07|A|F|1995-05-24|1995-07-16|1995-05-31|NONE|SHIP|ts cajole permanently 14883|7868|7869|3|9|15982.74|0.01|0.04|N|O|1995-06-20|1995-07-07|1995-07-17|COLLECT COD|FOB|ong the carefully regul 14883|33615|8622|4|9|13937.49|0.10|0.00|N|O|1995-07-16|1995-07-11|1995-08-06|COLLECT COD|AIR|ccounts. express, unusual foxes wa 14883|166991|6992|5|8|16463.92|0.04|0.04|N|O|1995-07-10|1995-07-31|1995-07-12|NONE|MAIL|g packages. furiously unusual packages s 14883|8118|8119|6|18|18469.98|0.02|0.03|N|O|1995-07-16|1995-06-16|1995-07-20|COLLECT COD|FOB|its. permanentl 14884|28731|1234|1|37|61410.01|0.10|0.00|N|O|1995-11-26|1995-12-03|1995-12-09|DELIVER IN PERSON|TRUCK| quickly slyly final theodolites. qu 14884|67955|462|2|18|34613.10|0.08|0.08|N|O|1995-10-05|1995-11-06|1995-10-17|COLLECT COD|REG AIR|y unusual accounts 14884|107456|2477|3|42|61464.90|0.03|0.01|N|O|1995-12-09|1995-12-24|1995-12-19|TAKE BACK RETURN|MAIL|regular epitaph 14884|654|655|4|17|26429.05|0.00|0.05|N|O|1995-11-26|1995-10-29|1995-12-19|NONE|TRUCK| cajole furiously regular, bol 14884|177747|7748|5|30|54742.20|0.08|0.08|N|O|1995-10-27|1995-12-13|1995-11-04|DELIVER IN PERSON|TRUCK|press dolphins. final excuses about 14884|178070|8071|6|34|39034.38|0.08|0.07|N|O|1995-11-19|1995-10-28|1995-11-29|TAKE BACK RETURN|MAIL|. slyly express account 14884|19078|4081|7|42|41876.94|0.01|0.05|N|O|1996-01-11|1995-12-26|1996-02-02|COLLECT COD|FOB|luffily pending dolphins-- fi 14885|36154|3664|1|15|16352.25|0.09|0.02|N|O|1997-11-04|1997-11-16|1997-11-10|TAKE BACK RETURN|RAIL| asymptotes across the 14885|39103|9104|2|28|29178.80|0.07|0.06|N|O|1998-01-15|1997-11-10|1998-02-09|TAKE BACK RETURN|FOB|above the carefully regular 14885|198838|3877|3|2|3873.66|0.00|0.08|N|O|1997-11-21|1997-10-27|1997-12-09|NONE|FOB|e furiously express ideas. furiou 14885|67242|9749|4|37|44741.88|0.09|0.08|N|O|1997-12-23|1997-11-16|1998-01-08|DELIVER IN PERSON|AIR|uests. fluffily pending accounts accord 14885|134174|6688|5|40|48326.80|0.05|0.06|N|O|1997-10-17|1997-12-11|1997-11-15|NONE|RAIL|e fluffily even packages use blithel 14885|47626|5139|6|30|47208.60|0.04|0.01|N|O|1997-12-04|1997-11-17|1997-12-09|COLLECT COD|SHIP| the requests hang slyly a 14885|45991|8496|7|42|81353.58|0.02|0.01|N|O|1997-10-19|1997-11-08|1997-11-14|TAKE BACK RETURN|AIR|ng to the regular, ironic pinto beans. bo 14886|160915|5948|1|50|98795.50|0.02|0.01|A|F|1994-08-08|1994-08-06|1994-08-20|COLLECT COD|AIR| to the packages. 14886|95628|647|2|5|8118.10|0.04|0.08|A|F|1994-07-04|1994-07-24|1994-07-07|COLLECT COD|TRUCK| requests. ironic instructio 14887|148649|1164|1|23|39045.72|0.05|0.04|N|O|1996-07-08|1996-06-05|1996-07-10|NONE|SHIP|unusual grouches. blithely ironic 14887|133470|8497|2|34|51117.98|0.08|0.06|N|O|1996-07-16|1996-05-11|1996-08-04|DELIVER IN PERSON|REG AIR|are quickly alongside of th 14887|26457|8960|3|14|19368.30|0.02|0.05|N|O|1996-07-16|1996-06-16|1996-08-09|COLLECT COD|RAIL|the quickly final excuses? fluffily even 14887|61414|3921|4|23|31634.43|0.06|0.08|N|O|1996-04-02|1996-05-22|1996-04-29|NONE|MAIL|unts above the 14912|35275|2785|1|14|16943.78|0.02|0.02|R|F|1993-11-16|1994-01-24|1993-12-09|COLLECT COD|AIR|the pinto beans-- express deposi 14912|56870|1881|2|7|12788.09|0.02|0.05|R|F|1993-12-20|1993-12-28|1993-12-22|DELIVER IN PERSON|MAIL| even ideas. unusual deposits sleep slyly 14913|169423|1940|1|27|40295.34|0.10|0.06|A|F|1994-03-14|1994-05-03|1994-04-09|NONE|FOB|pinto beans sleep carefull 14913|83528|8545|2|12|18138.24|0.05|0.01|R|F|1994-05-13|1994-04-20|1994-06-09|NONE|MAIL|ticingly regular courts boost. care 14913|41248|6257|3|10|11892.40|0.03|0.04|A|F|1994-04-23|1994-04-30|1994-05-22|NONE|SHIP| even, silent deposits. bold, iro 14913|46748|9253|4|44|74568.56|0.00|0.06|A|F|1994-03-31|1994-03-31|1994-04-05|TAKE BACK RETURN|AIR|cial instructions wake 14913|79152|9153|5|25|28278.75|0.10|0.05|A|F|1994-03-11|1994-03-28|1994-03-31|TAKE BACK RETURN|TRUCK| unusual, express ideas sleep slyly. 14914|86646|4171|1|20|32652.80|0.08|0.08|N|O|1998-05-26|1998-05-05|1998-06-02|NONE|SHIP|ests alongsid 14914|51355|1356|2|38|49641.30|0.04|0.02|N|O|1998-03-17|1998-04-26|1998-04-04|TAKE BACK RETURN|RAIL|even pinto beans against the quickly reg 14914|109131|4152|3|20|22802.60|0.07|0.05|N|O|1998-04-03|1998-04-24|1998-04-10|TAKE BACK RETURN|MAIL|oxes. fluffily e 14914|160032|7581|4|19|20748.57|0.06|0.05|N|O|1998-05-28|1998-03-25|1998-06-22|NONE|MAIL|es are always against the ir 14914|49477|6990|5|24|34235.28|0.10|0.02|N|O|1998-02-17|1998-05-09|1998-03-04|TAKE BACK RETURN|MAIL|y permanent foxes b 14914|150197|7743|6|20|24943.80|0.10|0.01|N|O|1998-02-19|1998-04-29|1998-03-21|TAKE BACK RETURN|TRUCK|nding depos 14915|82338|2339|1|1|1320.33|0.06|0.06|N|O|1996-11-22|1996-11-25|1996-12-11|NONE|FOB|ual sheaves sleep 14915|37981|5491|2|45|86354.10|0.00|0.01|N|O|1997-01-08|1996-10-27|1997-01-22|NONE|SHIP| carefully express instructions. carefu 14916|20029|30|1|25|23725.50|0.04|0.07|A|F|1994-05-07|1994-04-20|1994-06-03|TAKE BACK RETURN|FOB|cajole. silent accounts nod iron 14916|13490|3491|2|43|60350.07|0.04|0.03|A|F|1994-06-26|1994-04-18|1994-07-25|DELIVER IN PERSON|REG AIR|ests doubt furiously. bold 14917|44468|9477|1|26|36723.96|0.08|0.01|N|O|1998-04-25|1998-03-14|1998-05-07|COLLECT COD|SHIP|nic theodolites. theodolites 14917|173011|5529|2|29|31436.29|0.05|0.03|N|O|1998-03-24|1998-02-20|1998-03-28|NONE|FOB|nusual theodolit 14917|189874|4911|3|48|94265.76|0.08|0.07|N|O|1998-03-05|1998-04-11|1998-03-13|COLLECT COD|TRUCK|g theodolites affix slyly fina 14917|8532|1033|4|40|57621.20|0.04|0.06|N|O|1998-02-20|1998-03-13|1998-03-14|COLLECT COD|TRUCK|g bravely. fu 14917|191897|1898|5|21|41766.69|0.04|0.05|N|O|1998-02-18|1998-04-01|1998-03-02|DELIVER IN PERSON|AIR|as above the regular excuses ha 14917|6142|8643|6|43|45070.02|0.10|0.03|N|O|1998-05-11|1998-04-07|1998-05-12|DELIVER IN PERSON|AIR|e. final, pending pint 14918|27104|2109|1|33|34026.30|0.04|0.05|N|O|1997-09-04|1997-08-30|1997-09-11|TAKE BACK RETURN|FOB|he blithely 14918|129359|9360|2|24|33320.40|0.00|0.02|N|O|1997-06-20|1997-08-11|1997-07-10|COLLECT COD|FOB| the pending, e 14919|52516|32|1|24|35244.24|0.00|0.07|N|O|1996-09-29|1996-11-09|1996-10-16|COLLECT COD|AIR|jole quickly blith 14944|173320|3321|1|50|69666.00|0.01|0.05|N|O|1997-11-30|1997-12-08|1997-12-11|COLLECT COD|RAIL|ges. slyly special requ 14944|173250|8285|2|21|27788.25|0.05|0.06|N|O|1998-01-11|1997-12-01|1998-01-26|TAKE BACK RETURN|AIR|uriously express platelets 14945|9317|9318|1|28|34336.68|0.05|0.06|N|O|1996-07-04|1996-06-09|1996-07-23|COLLECT COD|MAIL|o the regular, regular asymptotes. c 14945|187788|307|2|24|45018.72|0.10|0.03|N|O|1996-05-27|1996-05-05|1996-06-01|NONE|REG AIR|to the carefully eve 14945|145205|5206|3|38|47507.60|0.01|0.05|N|O|1996-04-29|1996-04-30|1996-05-20|TAKE BACK RETURN|SHIP|eposits. fluff 14945|54657|2173|4|2|3223.30|0.09|0.07|N|O|1996-07-03|1996-05-13|1996-07-16|COLLECT COD|FOB|tween the expres 14945|8458|8459|5|44|60123.80|0.08|0.03|N|O|1996-06-20|1996-05-03|1996-07-14|DELIVER IN PERSON|RAIL|ly around the furiously regular 14945|139229|6769|6|37|46924.14|0.05|0.00|N|O|1996-07-02|1996-05-15|1996-07-31|NONE|FOB|ironic asymptotes. 14946|5229|7730|1|38|43100.36|0.00|0.04|N|O|1996-12-13|1997-02-04|1997-01-08|NONE|MAIL|pon the blithely final courts. bl 14946|79479|4494|2|37|53963.39|0.01|0.01|N|O|1996-11-27|1997-02-01|1996-11-29|COLLECT COD|AIR|sleep furiously after the furio 14947|31184|3688|1|14|15612.52|0.09|0.02|N|O|1995-11-05|1995-09-25|1995-11-27|TAKE BACK RETURN|RAIL|bout the even, iro 14947|107098|7099|2|29|32047.61|0.04|0.06|N|O|1995-11-08|1995-08-30|1995-12-03|TAKE BACK RETURN|FOB|inal sentiments t citus-7.0.3/src/test/regress/data/nation.data000066400000000000000000000042271317107136600211360ustar00rootroot000000000000000|ALGERIA|0| haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5|ETHIOPIA|0|ven packages wake quickly. regu 6|FRANCE|3|refully final requests. regular, ironi 7|GERMANY|3|l platelets. regular accounts x-ray: unusual, regular acco 8|INDIA|2|ss excuses cajole slyly across the packages. deposits print aroun 9|INDONESIA|2| slyly express asymptotes. regular deposits haggle slyly. carefully ironic hockey players sleep blithely. carefull 10|IRAN|4|efully alongside of the slyly final dependencies. 11|IRAQ|4|nic deposits boost atop the quickly final requests? quickly regula 12|JAPAN|2|ously. final, express gifts cajole a 13|JORDAN|4|ic deposits are blithely about the carefully regular pa 14|KENYA|0| pending excuses haggle furiously deposits. pending, express pinto beans wake fluffily past t 15|MOROCCO|0|rns. blithely bold courts among the closely regular packages use furiously bold platelets? 16|MOZAMBIQUE|0|s. ironic, unusual asymptotes wake blithely r 17|PERU|1|platelets. blithely pending dependencies use fluffily across the even pinto beans. carefully silent accoun 18|CHINA|2|c dependencies. furiously express notornis sleep slyly regular accounts. ideas sleep. depos 19|ROMANIA|3|ular asymptotes are about the furious multipliers. express dependencies nag above the ironically ironic account 20|SAUDI ARABIA|4|ts. silent requests haggle. closely express packages sleep across the blithely 21|VIETNAM|2|hely enticingly express accounts. even, final 22|RUSSIA|3| requests against the platelets use never according to the quickly regular pint 23|UNITED KINGDOM|3|eans boost carefully special requests. accounts are. carefull 24|UNITED STATES|1|y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be citus-7.0.3/src/test/regress/data/orders.1.data000066400000000000000000004750241317107136600213120ustar00rootroot000000000000001|370|O|172799.49|1996-01-02|5-LOW|Clerk#000000951|0|nstructions sleep furiously among 2|781|O|38426.09|1996-12-01|1-URGENT|Clerk#000000880|0| foxes. pending accounts at the pending, silent asymptot 3|1234|F|205654.30|1993-10-14|5-LOW|Clerk#000000955|0|sly final accounts boost. carefully regular ideas cajole carefully. depos 4|1369|O|56000.91|1995-10-11|5-LOW|Clerk#000000124|0|sits. slyly regular warthogs cajole. regular, regular theodolites acro 5|445|F|105367.67|1994-07-30|5-LOW|Clerk#000000925|0|quickly. bold deposits sleep slyly. packages use slyly 6|557|F|45523.10|1992-02-21|4-NOT SPECIFIED|Clerk#000000058|0|ggle. special, final requests are against the furiously specia 7|392|O|271885.66|1996-01-10|2-HIGH|Clerk#000000470|0|ly special requests 32|1301|O|198665.57|1995-07-16|2-HIGH|Clerk#000000616|0|ise blithely bold, regular requests. quickly unusual dep 33|670|F|146567.24|1993-10-27|3-MEDIUM|Clerk#000000409|0|uriously. furiously final request 34|611|O|73315.48|1998-07-21|3-MEDIUM|Clerk#000000223|0|ly final packages. fluffily final deposits wake blithely ideas. spe 35|1276|O|194641.93|1995-10-23|4-NOT SPECIFIED|Clerk#000000259|0|zzle. carefully enticing deposits nag furio 36|1153|O|42011.04|1995-11-03|1-URGENT|Clerk#000000358|0| quick packages are blithely. slyly silent accounts wake qu 37|862|F|131896.49|1992-06-03|3-MEDIUM|Clerk#000000456|0|kly regular pinto beans. carefully unusual waters cajole never 38|1249|O|71553.08|1996-08-21|4-NOT SPECIFIED|Clerk#000000604|0|haggle blithely. furiously express ideas haggle blithely furiously regular re 39|818|O|326565.37|1996-09-20|3-MEDIUM|Clerk#000000659|0|ole express, ironic requests: ir 64|322|F|35831.73|1994-07-16|3-MEDIUM|Clerk#000000661|0|wake fluffily. sometimes ironic pinto beans about the dolphin 65|163|P|95469.44|1995-03-18|1-URGENT|Clerk#000000632|0|ular requests are blithely pending orbits-- even requests against the deposit 66|1292|F|104190.66|1994-01-20|5-LOW|Clerk#000000743|0|y pending requests integrate 67|568|O|182481.16|1996-12-19|4-NOT SPECIFIED|Clerk#000000547|0|symptotes haggle slyly around the furiously iron 68|286|O|301968.79|1998-04-18|3-MEDIUM|Clerk#000000440|0| pinto beans sleep carefully. blithely ironic deposits haggle furiously acro 69|845|F|204110.73|1994-06-04|4-NOT SPECIFIED|Clerk#000000330|0| depths atop the slyly thin deposits detect among the furiously silent accou 70|644|F|125705.32|1993-12-18|5-LOW|Clerk#000000322|0| carefully ironic request 71|34|O|260603.38|1998-01-24|4-NOT SPECIFIED|Clerk#000000271|0| express deposits along the blithely regul 96|1078|F|64364.30|1994-04-17|2-HIGH|Clerk#000000395|0|oost furiously. pinto 97|211|F|100572.55|1993-01-29|3-MEDIUM|Clerk#000000547|0|hang blithely along the regular accounts. furiously even ideas after the 98|1045|F|71721.40|1994-09-25|1-URGENT|Clerk#000000448|0|c asymptotes. quickly regular packages should have to nag re 99|890|F|108594.87|1994-03-13|4-NOT SPECIFIED|Clerk#000000973|0|e carefully ironic packages. pending 100|1471|O|198978.27|1998-02-28|4-NOT SPECIFIED|Clerk#000000577|0|heodolites detect slyly alongside of the ent 101|280|O|118448.39|1996-03-17|3-MEDIUM|Clerk#000000419|0|ding accounts above the slyly final asymptote 102|8|O|184806.58|1997-05-09|2-HIGH|Clerk#000000596|0| slyly according to the asymptotes. carefully final packages integrate furious 103|292|O|118745.16|1996-06-20|4-NOT SPECIFIED|Clerk#000000090|0|ges. carefully unusual instructions haggle quickly regular f 128|740|F|34997.04|1992-06-15|1-URGENT|Clerk#000000385|0|ns integrate fluffily. ironic asymptotes after the regular excuses nag around 129|712|F|254281.41|1992-11-19|5-LOW|Clerk#000000859|0|ing tithes. carefully pending deposits boost about the silently express 130|370|F|140213.54|1992-05-08|2-HIGH|Clerk#000000036|0|le slyly unusual, regular packages? express deposits det 131|928|F|140726.47|1994-06-08|3-MEDIUM|Clerk#000000625|0|after the fluffily special foxes integrate s 132|265|F|133485.89|1993-06-11|3-MEDIUM|Clerk#000000488|0|sits are daringly accounts. carefully regular foxes sleep slyly about the 133|440|O|95971.06|1997-11-29|1-URGENT|Clerk#000000738|0|usly final asymptotes 134|62|F|208201.46|1992-05-01|4-NOT SPECIFIED|Clerk#000000711|0|lar theodolites boos 135|605|O|230472.84|1995-10-21|4-NOT SPECIFIED|Clerk#000000804|0|l platelets use according t 160|826|O|114742.32|1996-12-19|4-NOT SPECIFIED|Clerk#000000342|0|thely special sauternes wake slyly of t 161|167|F|17668.60|1994-08-31|2-HIGH|Clerk#000000322|0|carefully! special instructions sin 162|142|O|3553.15|1995-05-08|3-MEDIUM|Clerk#000000378|0|nts hinder fluffily ironic instructions. express, express excuses 163|878|O|177809.13|1997-09-05|3-MEDIUM|Clerk#000000379|0|y final packages. final foxes since the quickly even 164|8|F|250417.20|1992-10-21|5-LOW|Clerk#000000209|0|cajole ironic courts. slyly final ideas are slyly. blithely final Tiresias sub 165|274|F|193302.35|1993-01-30|4-NOT SPECIFIED|Clerk#000000292|0|across the blithely regular accounts. bold 166|1079|O|158207.39|1995-09-12|2-HIGH|Clerk#000000440|0|lets. ironic, bold asymptotes kindle 167|1195|F|64017.85|1993-01-04|4-NOT SPECIFIED|Clerk#000000731|0|s nag furiously bold excuses. fluffily iron 192|826|O|194637.57|1997-11-25|5-LOW|Clerk#000000483|0|y unusual platelets among the final instructions integrate rut 193|791|F|80834.26|1993-08-08|1-URGENT|Clerk#000000025|0|the furiously final pin 194|619|F|154284.73|1992-04-05|3-MEDIUM|Clerk#000000352|0|egular requests haggle slyly regular, regular pinto beans. asymptote 195|1355|F|216638.92|1993-12-28|3-MEDIUM|Clerk#000000216|0|old forges are furiously sheaves. slyly fi 196|649|F|38660.64|1993-03-17|2-HIGH|Clerk#000000988|0|beans boost at the foxes. silent foxes 197|326|P|155247.48|1995-04-07|2-HIGH|Clerk#000000969|0|solve quickly about the even braids. carefully express deposits affix care 198|1103|O|149551.63|1998-01-02|4-NOT SPECIFIED|Clerk#000000331|0|its. carefully ironic requests sleep. furiously express fox 199|530|O|95867.70|1996-03-07|2-HIGH|Clerk#000000489|0|g theodolites. special packag 224|25|F|234050.44|1994-06-18|4-NOT SPECIFIED|Clerk#000000642|0|r the quickly thin courts. carefully 225|331|P|226028.98|1995-05-25|1-URGENT|Clerk#000000177|0|s. blithely ironic accounts wake quickly fluffily special acc 226|1276|F|256459.40|1993-03-10|2-HIGH|Clerk#000000756|0|s are carefully at the blithely ironic acc 227|100|O|69020.68|1995-11-10|5-LOW|Clerk#000000919|0| express instructions. slyly regul 228|442|F|2744.06|1993-02-25|1-URGENT|Clerk#000000562|0|es was slyly among the regular foxes. blithely regular dependenci 229|1118|F|195619.74|1993-12-29|1-URGENT|Clerk#000000628|0|he fluffily even instructions. furiously i 230|1027|F|147711.01|1993-10-27|1-URGENT|Clerk#000000520|0|odolites. carefully quick requ 231|910|F|234383.86|1994-09-29|2-HIGH|Clerk#000000446|0| packages haggle slyly after the carefully ironic instruct 256|1249|F|132718.67|1993-10-19|4-NOT SPECIFIED|Clerk#000000834|0|he fluffily final ideas might are final accounts. carefully f 257|1228|O|9255.12|1998-03-28|3-MEDIUM|Clerk#000000680|0|ts against the sly warhorses cajole slyly accounts 258|419|F|259466.78|1993-12-29|1-URGENT|Clerk#000000167|0|dencies. blithely quick packages cajole. ruthlessly final accounts 259|433|F|110611.59|1993-09-29|4-NOT SPECIFIED|Clerk#000000601|0|ages doubt blithely against the final foxes. carefully express deposits dazzle 260|1048|O|268084.69|1996-12-10|3-MEDIUM|Clerk#000000960|0|lently regular pinto beans sleep after the slyly e 261|461|F|278279.30|1993-06-29|3-MEDIUM|Clerk#000000310|0|ully fluffily brave instructions. furiousl 262|304|O|173401.63|1995-11-25|4-NOT SPECIFIED|Clerk#000000551|0|l packages. blithely final pinto beans use carefu 263|1162|F|104961.32|1994-05-17|2-HIGH|Clerk#000000088|0| pending instructions. blithely un 288|71|O|239366.68|1997-02-21|1-URGENT|Clerk#000000109|0|uriously final requests. even, final ideas det 289|1039|O|174624.55|1997-02-10|3-MEDIUM|Clerk#000000103|0|sily. slyly special excuse 290|1180|F|67636.54|1994-01-01|4-NOT SPECIFIED|Clerk#000000735|0|efully dogged deposits. furiou 291|1411|F|88375.89|1994-03-13|1-URGENT|Clerk#000000923|0|dolites. carefully regular pinto beans cajol 292|223|F|54152.77|1992-01-13|2-HIGH|Clerk#000000193|0|g pinto beans will have to sleep f 293|301|F|46128.56|1992-10-02|2-HIGH|Clerk#000000629|0|re bold, ironic deposits. platelets c 294|505|F|46889.54|1993-07-16|3-MEDIUM|Clerk#000000499|0|kly according to the frays. final dolphins affix quickly 295|190|F|148569.49|1994-09-29|2-HIGH|Clerk#000000155|0| unusual pinto beans play. regular ideas haggle 320|4|O|50202.60|1997-11-21|2-HIGH|Clerk#000000573|0|ar foxes nag blithely 321|1226|F|73024.50|1993-03-21|3-MEDIUM|Clerk#000000289|0|equests run. blithely final dependencies after the deposits wake caref 322|1336|F|165992.05|1992-03-19|1-URGENT|Clerk#000000158|0|fully across the slyly bold packages. packages against the quickly regular i 323|392|F|121127.17|1994-03-26|1-URGENT|Clerk#000000959|0|arefully pending foxes sleep blithely. slyly express accoun 324|1052|F|46327.90|1992-03-20|1-URGENT|Clerk#000000352|0| about the ironic, regular deposits run blithely against the excuses 325|401|F|94638.59|1993-10-17|5-LOW|Clerk#000000844|0|ly sometimes pending pa 326|760|O|325448.68|1995-06-04|2-HIGH|Clerk#000000466|0| requests. furiously ironic asymptotes mold carefully alongside of the blit 327|1447|P|32302.12|1995-04-17|5-LOW|Clerk#000000992|0|ng the slyly final courts. slyly even escapades eat 352|1066|F|25542.02|1994-03-08|2-HIGH|Clerk#000000932|0|ke slyly bold pinto beans. blithely regular accounts against the spe 353|19|F|224983.69|1993-12-31|5-LOW|Clerk#000000449|0| quiet ideas sleep. even instructions cajole slyly. silently spe 354|1384|O|231311.22|1996-03-14|2-HIGH|Clerk#000000511|0|ly regular ideas wake across the slyly silent ideas. final deposits eat b 355|701|F|103949.82|1994-06-14|5-LOW|Clerk#000000532|0|s. sometimes regular requests cajole. regular, pending accounts a 356|1469|F|189160.02|1994-06-30|4-NOT SPECIFIED|Clerk#000000944|0|as wake along the bold accounts. even, 357|604|O|138936.83|1996-10-09|2-HIGH|Clerk#000000301|0|e blithely about the express, final accounts. quickl 358|23|F|362024.17|1993-09-20|2-HIGH|Clerk#000000392|0|l, silent instructions are slyly. silently even de 359|776|F|214770.97|1994-12-19|3-MEDIUM|Clerk#000000934|0|n dolphins. special courts above the carefully ironic requests use 384|1132|F|191275.12|1992-03-03|5-LOW|Clerk#000000206|0|, even accounts use furiously packages. slyly ironic pla 385|331|O|75866.47|1996-03-22|5-LOW|Clerk#000000600|0|hless accounts unwind bold pain 386|602|F|119718.02|1995-01-25|2-HIGH|Clerk#000000648|0| haggle quickly. stealthily bold asymptotes haggle among the furiously even re 387|34|O|197839.44|1997-01-26|4-NOT SPECIFIED|Clerk#000000768|0| are carefully among the quickly even deposits. furiously silent req 388|448|F|161560.04|1992-12-16|4-NOT SPECIFIED|Clerk#000000356|0|ar foxes above the furiously ironic deposits nag slyly final reque 389|1270|F|3266.69|1994-02-17|2-HIGH|Clerk#000000062|0|ing to the regular asymptotes. final, pending foxes about the blithely sil 390|1027|O|232256.36|1998-04-07|5-LOW|Clerk#000000404|0|xpress asymptotes use among the regular, final pinto b 391|1103|F|14517.91|1994-11-17|2-HIGH|Clerk#000000256|0|orges thrash fluffil 416|403|F|106818.50|1993-09-27|5-LOW|Clerk#000000294|0| the accounts. fluffily bold depo 417|547|F|132531.73|1994-02-06|3-MEDIUM|Clerk#000000468|0|ironic, even packages. thinly unusual accounts sleep along the slyly unusual 418|949|P|39431.46|1995-04-13|4-NOT SPECIFIED|Clerk#000000643|0|. furiously ironic instruc 419|1163|O|159079.22|1996-10-01|3-MEDIUM|Clerk#000000376|0|osits. blithely pending theodolites boost carefully 420|902|O|269064.47|1995-10-31|4-NOT SPECIFIED|Clerk#000000756|0|leep carefully final excuses. fluffily pending requests unwind carefully above 421|392|F|1292.21|1992-02-22|5-LOW|Clerk#000000405|0|egular, even packages according to the final, un 422|731|O|155533.71|1997-05-31|4-NOT SPECIFIED|Clerk#000000049|0|aggle carefully across the accounts. regular accounts eat fluffi 423|1034|O|31900.60|1996-06-01|1-URGENT|Clerk#000000674|0|quests. deposits cajole quickly. furiously bold accounts haggle q 448|1498|O|157247.56|1995-08-21|3-MEDIUM|Clerk#000000597|0| regular, express foxes use blithely. quic 449|958|O|55082.33|1995-07-20|2-HIGH|Clerk#000000841|0|. furiously regular theodolites affix blithely 450|475|P|213638.07|1995-03-05|4-NOT SPECIFIED|Clerk#000000293|0|d theodolites. boldly bold foxes since the pack 451|988|O|142756.81|1998-05-25|5-LOW|Clerk#000000048|0|nic pinto beans. theodolites poach carefully; 452|596|O|2072.79|1997-10-14|1-URGENT|Clerk#000000498|0|t, unusual instructions above the blithely bold pint 453|442|O|343004.49|1997-05-26|5-LOW|Clerk#000000504|0|ss foxes. furiously regular ideas sleep according to t 454|488|O|24543.95|1995-12-27|5-LOW|Clerk#000000890|0|dolites sleep carefully blithely regular deposits. quickly regul 455|121|O|190711.32|1996-12-04|1-URGENT|Clerk#000000796|0| about the final platelets. dependen 480|715|F|30644.49|1993-05-08|5-LOW|Clerk#000000004|0|ealthy pinto beans. fluffily regular requests along the special sheaves wake 481|304|F|160370.14|1992-10-08|2-HIGH|Clerk#000000230|0|ly final ideas. packages haggle fluffily 482|1252|O|197194.23|1996-03-26|1-URGENT|Clerk#000000295|0|ts. deposits wake: final acco 483|349|O|66194.38|1995-07-11|2-HIGH|Clerk#000000025|0|cross the carefully final e 484|544|O|331553.32|1997-01-03|3-MEDIUM|Clerk#000000545|0|grouches use. furiously bold accounts maintain. bold, regular deposits 485|1006|O|142389.70|1997-03-26|2-HIGH|Clerk#000000105|0| regular ideas nag thinly furiously s 486|509|O|286150.09|1996-03-11|4-NOT SPECIFIED|Clerk#000000803|0|riously dolphins. fluffily ironic requ 487|1079|F|88805.07|1992-08-18|1-URGENT|Clerk#000000086|0|ithely unusual courts eat accordi 512|631|P|183939.48|1995-05-20|5-LOW|Clerk#000000814|0|ding requests. carefully express theodolites was quickly. furious 513|607|O|78769.71|1995-05-01|2-HIGH|Clerk#000000522|0|regular packages. pinto beans cajole carefully against the even 514|749|O|123202.51|1996-04-04|2-HIGH|Clerk#000000094|0| cajole furiously. slyly final excuses cajole. slyly special instructions 515|1420|F|177231.12|1993-08-29|4-NOT SPECIFIED|Clerk#000000700|0|eposits are furiously furiously silent pinto beans. pending pack 516|440|O|13277.79|1998-04-21|2-HIGH|Clerk#000000305|0|lar, unusual platelets are carefully. even courts sleep bold, final pinto bea 517|94|O|109269.47|1997-04-07|5-LOW|Clerk#000000359|0|slyly pending deposits cajole quickly packages. furiou 518|1444|O|335285.37|1998-02-08|2-HIGH|Clerk#000000768|0| the carefully bold accounts. quickly regular excuses are 519|631|O|109395.60|1997-10-31|1-URGENT|Clerk#000000985|0|ains doze furiously against the f 544|934|F|58960.45|1993-02-17|2-HIGH|Clerk#000000145|0|the special, final accounts. dogged dolphins 545|632|O|35129.54|1995-11-07|2-HIGH|Clerk#000000537|0|as. blithely final hockey players about th 546|1433|O|26227.74|1996-11-01|2-HIGH|Clerk#000000041|0|osits sleep. slyly special dolphins about the q 547|983|O|137852.72|1996-06-22|3-MEDIUM|Clerk#000000976|0|ing accounts eat. carefully regular packa 548|1240|F|139094.89|1994-09-21|1-URGENT|Clerk#000000435|0|arefully express instru 549|1100|F|211787.30|1992-07-13|1-URGENT|Clerk#000000196|0|ideas alongside of 550|236|O|54818.45|1995-08-02|1-URGENT|Clerk#000000204|0|t requests. blithely 551|898|O|64301.40|1995-05-30|1-URGENT|Clerk#000000179|0|xpress accounts boost quic 576|296|O|24722.97|1997-05-13|3-MEDIUM|Clerk#000000955|0|l requests affix regular requests. final account 577|553|F|47860.53|1994-12-19|5-LOW|Clerk#000000154|0| deposits engage stealthil 578|926|O|103543.00|1997-01-10|5-LOW|Clerk#000000281|0|e blithely even packages. slyly pending platelets bes 579|671|O|146610.11|1998-03-11|2-HIGH|Clerk#000000862|0| regular instructions. blithely even p 580|593|O|144557.44|1997-07-05|2-HIGH|Clerk#000000314|0|tegrate fluffily regular accou 581|688|O|175985.28|1997-02-23|4-NOT SPECIFIED|Clerk#000000239|0| requests. even requests use slyly. blithely ironic 582|494|O|181813.20|1997-10-21|1-URGENT|Clerk#000000378|0|n pinto beans print a 583|472|O|154731.76|1997-03-19|3-MEDIUM|Clerk#000000792|0|efully express requests. a 608|260|O|100151.20|1996-02-28|3-MEDIUM|Clerk#000000995|0|nic waters wake slyly slyly expre 609|1252|F|33980.88|1994-06-01|3-MEDIUM|Clerk#000000348|0|- ironic gifts believe furiously ca 610|508|O|229411.94|1995-08-02|1-URGENT|Clerk#000000610|0|totes. ironic, unusual packag 611|1055|F|83388.31|1993-01-27|1-URGENT|Clerk#000000401|0|ounts detect furiously ac 612|815|F|232560.07|1992-10-21|3-MEDIUM|Clerk#000000759|0|boost quickly quickly final excuses. final foxes use bravely afte 613|1384|O|56355.92|1995-06-18|2-HIGH|Clerk#000000172|0|ts hinder among the deposits. fluffily ironic depos 614|1333|F|325942.94|1992-12-01|2-HIGH|Clerk#000000388|0| deposits! even, daring theodol 615|655|F|30990.93|1992-05-09|5-LOW|Clerk#000000388|0|t to promise asymptotes. packages haggle alongside of the fluffil 640|952|F|210229.36|1993-01-23|2-HIGH|Clerk#000000433|0|r, unusual accounts boost carefully final ideas. slyly silent theod 641|1330|F|175084.81|1993-08-30|5-LOW|Clerk#000000175|0|ents cajole furiously about the quickly silent pac 642|400|F|34647.34|1993-12-16|3-MEDIUM|Clerk#000000357|0| among the requests wake slyly alongside of th 643|578|P|261882.19|1995-03-25|2-HIGH|Clerk#000000354|0|g dependencies. regular accounts 644|74|F|267079.41|1992-05-01|1-URGENT|Clerk#000000550|0| blithely unusual platelets haggle ironic, special excuses. excuses unwi 645|1144|F|350110.21|1994-12-03|2-HIGH|Clerk#000000090|0|quickly daring theodolites across the regu 646|505|F|192105.59|1994-11-22|2-HIGH|Clerk#000000203|0|carefully even foxes. fina 647|1430|O|67696.52|1997-08-07|1-URGENT|Clerk#000000270|0|egular pearls. carefully express asymptotes are. even account 672|1081|F|128581.82|1994-04-14|5-LOW|Clerk#000000106|0|egular requests are furiously according to 673|793|F|34950.94|1994-03-10|1-URGENT|Clerk#000000448|0| special pinto beans use quickly furiously even depende 674|332|F|27390.00|1992-08-29|5-LOW|Clerk#000000448|0|ully special deposits. furiously final warhorses affix carefully. fluffily f 675|118|O|166843.13|1997-07-31|2-HIGH|Clerk#000000168|0|ffily between the careful 676|380|O|254171.66|1996-12-13|2-HIGH|Clerk#000000248|0|the final deposits. special, pending 677|1240|F|205917.22|1993-11-24|3-MEDIUM|Clerk#000000824|0|uriously special pinto beans cajole carefully. fi 678|1307|F|208791.37|1993-02-27|5-LOW|Clerk#000000530|0|. blithely final somas about the 679|485|O|14905.06|1995-12-15|2-HIGH|Clerk#000000853|0|tealthy, final pinto beans haggle slyly. pending platelets about the special, 704|844|O|85100.07|1996-11-21|3-MEDIUM|Clerk#000000682|0|blithely pending platelets wake alongside of the final, iron 705|428|O|117587.40|1997-02-13|4-NOT SPECIFIED|Clerk#000000294|0|ithely regular dependencies. express, even packages sleep slyly pending t 706|1474|O|40836.77|1995-09-09|1-URGENT|Clerk#000000448|0|g the packages. deposits caj 707|1162|F|80722.77|1994-11-20|3-MEDIUM|Clerk#000000199|0| ideas about the silent, bold deposits nag dolphins 708|316|O|127925.56|1998-07-03|3-MEDIUM|Clerk#000000101|0|lphins cajole about t 709|361|O|92115.73|1998-04-21|1-URGENT|Clerk#000000461|0|ons alongside of the carefully bold pinto bea 710|1312|F|309878.50|1993-01-02|5-LOW|Clerk#000000026|0| regular, regular requests boost. fluffily re 711|637|F|99553.27|1993-09-23|4-NOT SPECIFIED|Clerk#000000856|0|its. fluffily regular gifts are furi 736|464|O|200720.61|1998-06-21|5-LOW|Clerk#000000881|0|refully of the final pi 737|1198|F|20647.65|1992-04-26|5-LOW|Clerk#000000233|0|ake blithely express, ironic theodolites. blithely special accounts wa 738|208|F|177823.89|1993-03-02|4-NOT SPECIFIED|Clerk#000000669|0|ly even foxes. furiously regular accounts cajole ca 739|4|O|226008.80|1998-05-31|5-LOW|Clerk#000000900|0| against the slyly ironic packages nag slyly ironic 740|436|O|126713.42|1995-07-16|3-MEDIUM|Clerk#000000583|0|courts haggle furiously across the final, regul 741|1045|O|81912.56|1998-07-07|2-HIGH|Clerk#000000295|0|ic instructions. slyly express instructions solv 742|1030|F|258834.44|1994-12-23|5-LOW|Clerk#000000543|0|equests? slyly ironic dolphins boost carefully above the blithely 743|784|O|39284.49|1996-10-04|4-NOT SPECIFIED|Clerk#000000933|0|eans. furiously ironic deposits sleep carefully carefully qui 768|971|O|294534.22|1996-08-20|3-MEDIUM|Clerk#000000411|0|jole slyly ironic packages. slyly even idea 769|800|F|65622.27|1993-06-02|3-MEDIUM|Clerk#000000172|0|ggle furiously. ironic packages haggle slyly. bold platelets affix s 770|320|O|99554.29|1998-05-23|5-LOW|Clerk#000000572|0|heodolites. furiously special pinto beans cajole pac 771|446|O|151561.45|1995-06-17|1-URGENT|Clerk#000000105|0|s. furiously final instructions across the deposit 772|965|F|205442.15|1993-04-17|2-HIGH|Clerk#000000430|0|s boost blithely fluffily idle ideas? fluffily even pin 773|1319|F|190734.21|1993-09-26|3-MEDIUM|Clerk#000000307|0|tions are quickly accounts. accounts use bold, even pinto beans. gifts ag 774|796|O|214484.43|1995-12-04|1-URGENT|Clerk#000000883|0|tealthily even depths 775|1333|F|75392.93|1995-03-18|1-URGENT|Clerk#000000191|0|kly express requests. fluffily silent accounts poach furiously 800|560|O|144932.63|1998-07-14|2-HIGH|Clerk#000000213|0|y alongside of the pending packages? final platelets nag fluffily carefu 801|1178|F|186160.67|1992-02-18|1-URGENT|Clerk#000000186|0|iously from the furiously enticing reques 802|1367|F|192178.48|1995-01-05|1-URGENT|Clerk#000000516|0|posits. ironic, pending requests cajole. even theodol 803|145|O|49471.93|1997-04-29|5-LOW|Clerk#000000260|0|ic instructions. even deposits haggle furiously at the deposits-- regular de 804|500|F|136844.01|1993-03-12|3-MEDIUM|Clerk#000000931|0|s. blithely final foxes are about the packag 805|1261|O|145879.67|1995-07-05|4-NOT SPECIFIED|Clerk#000000856|0|y according to the fluffily 806|1309|O|39072.30|1996-06-20|2-HIGH|Clerk#000000240|0| the ironic packages wake carefully fina 807|1436|F|291066.38|1993-11-24|3-MEDIUM|Clerk#000000012|0|refully special tithes. blithely regular accoun 832|290|F|75549.27|1992-04-19|5-LOW|Clerk#000000495|0|xes. bravely regular packages sleep up the furiously bold accou 833|553|F|53948.73|1994-02-13|3-MEDIUM|Clerk#000000437|0|ts haggle quickly across the slyl 834|428|F|57631.12|1994-05-23|3-MEDIUM|Clerk#000000805|0| sleep. quickly even foxes are boldly. slyly express requests use slyly 835|649|O|79671.35|1995-10-08|4-NOT SPECIFIED|Clerk#000000416|0|s about the carefully special foxes haggle quickly about the 836|688|O|103041.70|1996-11-25|4-NOT SPECIFIED|Clerk#000000729|0|ely bold excuses sleep regular ideas. furiously unusual ideas wake furiou 837|1159|F|99107.47|1994-06-15|4-NOT SPECIFIED|Clerk#000000563|0|kages sleep slyly above the ironic, final orbits 838|164|O|119122.55|1998-01-29|5-LOW|Clerk#000000213|0| slyly around the slyly even 839|280|O|109349.68|1995-08-08|1-URGENT|Clerk#000000951|0|the carefully even platelets. furiously unusual fo 864|1378|O|110290.44|1997-08-17|1-URGENT|Clerk#000000036|0|ly after the slyly regular deposits. express, regular asymptotes nag ca 865|26|F|112126.26|1993-05-04|3-MEDIUM|Clerk#000000337|0|. special packages wake after the carefully final accounts. express pinto be 866|383|F|5793.01|1992-11-28|3-MEDIUM|Clerk#000000718|0|ins after the even, even accounts nod blithel 867|253|F|9271.15|1993-11-16|3-MEDIUM|Clerk#000000877|0|pades nag quickly final, 868|1036|F|157914.01|1992-06-09|4-NOT SPECIFIED|Clerk#000000782|0|onic theodolites print carefully. blithely dogge 869|1348|O|88458.72|1997-01-12|2-HIGH|Clerk#000000245|0|ar sheaves are slowly. slyly even attainments boost theodolites. furiously 870|322|F|60182.94|1993-06-20|4-NOT SPECIFIED|Clerk#000000123|0|blithely ironic ideas nod. sly, r 871|145|O|265450.06|1995-11-15|5-LOW|Clerk#000000882|0|oss the ironic theodolites. 896|17|F|248402.10|1993-03-09|1-URGENT|Clerk#000000187|0|inal packages eat blithely according to the warhorses. furiously quiet de 897|490|P|88281.28|1995-03-20|1-URGENT|Clerk#000000316|0| wake quickly against 898|536|F|159708.26|1993-06-03|2-HIGH|Clerk#000000611|0|. unusual pinto beans haggle quickly across 899|1085|O|165248.53|1998-04-08|5-LOW|Clerk#000000575|0|rts engage carefully final theodolites. 900|460|F|173555.09|1994-10-01|4-NOT SPECIFIED|Clerk#000000060|0| fluffily express deposits nag furiousl 901|113|O|106041.48|1998-07-21|4-NOT SPECIFIED|Clerk#000000929|0|lyly even foxes are furious, silent requests. requests about the quickly 902|86|F|49777.81|1994-07-27|4-NOT SPECIFIED|Clerk#000000811|0|yly final requests over the furiously regula 903|109|O|140726.26|1995-07-07|4-NOT SPECIFIED|Clerk#000000793|0|e slyly about the final pl 928|658|F|315638.02|1995-03-02|5-LOW|Clerk#000000450|0|ithely express pinto beans. 929|827|F|149145.44|1992-10-02|2-HIGH|Clerk#000000160|0|its. furiously even foxes affix carefully finally silent accounts. express req 930|1310|F|285619.42|1994-12-17|1-URGENT|Clerk#000000004|0| accounts nag slyly. ironic, ironic accounts wake blithel 931|1030|F|170066.75|1992-12-07|1-URGENT|Clerk#000000881|0|ss packages haggle furiously express, regular deposits. even, e 932|404|O|57043.21|1997-05-16|2-HIGH|Clerk#000000218|0|ly express instructions boost furiously reg 933|955|F|106503.56|1992-08-05|4-NOT SPECIFIED|Clerk#000000752|0|ial courts wake permanently against the furiously regular ideas. unusual 934|514|O|18262.86|1996-07-03|1-URGENT|Clerk#000000229|0|ts integrate carefully. sly, regular deposits af 935|493|O|131247.01|1997-09-24|5-LOW|Clerk#000000180|0|iously final deposits cajole. blithely even packages 960|347|F|84356.33|1994-09-21|3-MEDIUM|Clerk#000000120|0|regular accounts. requests 961|551|P|248080.06|1995-06-04|4-NOT SPECIFIED|Clerk#000000720|0|ons nag furiously among the quickl 962|353|F|144898.54|1994-05-06|5-LOW|Clerk#000000463|0|ments nag deposits. fluffily ironic a 963|253|F|98613.54|1994-05-26|3-MEDIUM|Clerk#000000497|0|uses haggle carefully. slyly even dependencies after the packages ha 964|752|O|207649.97|1995-05-20|3-MEDIUM|Clerk#000000657|0|print blithely ironic, careful theodolit 965|685|P|44648.62|1995-05-15|5-LOW|Clerk#000000218|0|iously special packages. slyly pending requests are carefully 966|134|O|142557.51|1998-04-30|2-HIGH|Clerk#000000239|0|special deposits. furious 967|1099|F|236000.05|1992-06-21|3-MEDIUM|Clerk#000000167|0|excuses engage quickly bold dep 992|538|O|189842.36|1997-11-11|3-MEDIUM|Clerk#000000875|0|ts. regular pinto beans thrash carefully sl 993|793|O|269632.17|1995-09-10|3-MEDIUM|Clerk#000000894|0|quickly express accounts among the furiously bol 994|13|F|49870.71|1994-04-20|5-LOW|Clerk#000000497|0|ole. slyly bold excuses nag caref 995|1160|P|189854.55|1995-05-31|3-MEDIUM|Clerk#000000439|0|deas. blithely final deposits play. express accounts wake blithely caref 996|710|O|71877.30|1997-12-29|1-URGENT|Clerk#000000497|0|arefully final packages into the slyly final requests affix blit 997|1087|O|39832.61|1997-05-19|2-HIGH|Clerk#000000651|0|ly express depths. furiously final requests haggle furiously. carefu 998|317|F|81009.81|1994-11-26|4-NOT SPECIFIED|Clerk#000000956|0|ronic dolphins. ironic, bold ideas haggle furiously furious 999|602|F|221018.87|1993-09-05|5-LOW|Clerk#000000464|0|pitaphs sleep. regular accounts use. f 1024|35|O|248306.79|1997-12-23|5-LOW|Clerk#000000903|0| blithely. even, express theodolites cajole slyly across 1025|1027|F|112960.50|1995-05-05|2-HIGH|Clerk#000000376|0|ross the slyly final pa 1026|712|O|49485.22|1997-06-04|5-LOW|Clerk#000000223|0|s wake blithely. special acco 1027|1280|F|145806.89|1992-06-03|3-MEDIUM|Clerk#000000241|0|equests cajole. slyly final pinto bean 1028|685|F|190755.43|1994-01-01|2-HIGH|Clerk#000000131|0|ts are. final, silent deposits are among the fl 1029|1288|F|57789.24|1994-06-21|2-HIGH|Clerk#000000700|0|quests sleep. slyly even foxes wake quickly final theodolites. clo 1030|1340|F|26164.34|1994-06-15|5-LOW|Clerk#000000422|0|ully ironic accounts sleep carefully. requests are carefully alongside of the 1031|37|F|203268.37|1994-09-01|3-MEDIUM|Clerk#000000448|0|s; ironic theodolites along the carefully ex 1056|275|F|41838.38|1995-02-11|1-URGENT|Clerk#000000125|0|t, even deposits hang about the slyly special i 1057|760|F|176625.96|1992-02-20|1-URGENT|Clerk#000000124|0|cuses dazzle carefully careful, ironic pinto beans. carefully even theod 1058|530|F|132338.09|1993-04-26|3-MEDIUM|Clerk#000000373|0|kly pending courts haggle. blithely regular sheaves integrate carefully fi 1059|1268|F|291519.65|1994-02-27|1-URGENT|Clerk#000000104|0|en accounts. carefully bold packages cajole daringly special depende 1060|1397|F|155247.64|1993-02-21|3-MEDIUM|Clerk#000000989|0|l platelets sleep quickly slyly special requests. furiously 1061|1027|O|197424.17|1998-05-15|5-LOW|Clerk#000000576|0|uests sleep at the packages. fur 1062|1051|O|48449.37|1997-01-15|1-URGENT|Clerk#000000152|0|eposits use blithely 1063|370|F|76957.40|1994-04-02|2-HIGH|Clerk#000000024|0|deposits nag quickly regular deposits. quickl 1088|1469|F|53063.07|1992-05-21|5-LOW|Clerk#000000347|0|counts are blithely. platelets print. carefully 1089|481|O|140302.14|1996-05-04|4-NOT SPECIFIED|Clerk#000000226|0|ns haggle ruthlessly. even requests are quickly abov 1090|172|O|34290.36|1997-11-15|2-HIGH|Clerk#000000300|0| furiously regular platelets haggle along the slyly unusual foxes! 1091|829|O|48591.79|1996-08-27|1-URGENT|Clerk#000000549|0| even pinto beans haggle quickly alongside of the eve 1092|1232|P|131664.83|1995-03-04|3-MEDIUM|Clerk#000000006|0|re quickly along the blithe 1093|1007|O|124792.59|1997-07-31|4-NOT SPECIFIED|Clerk#000000159|0| after the carefully ironic requests. carefully ironic packages wake fluffil 1094|1438|O|9281.56|1997-12-24|3-MEDIUM|Clerk#000000570|0|beans affix furiously about the pending, even deposits. finally pendi 1095|1433|O|225466.98|1995-08-22|3-MEDIUM|Clerk#000000709|0|sly bold requests cajole carefully according to 1120|1391|O|152580.64|1997-11-07|3-MEDIUM|Clerk#000000319|0|lly special requests. slyly pending platelets are quickly pending requ 1121|281|O|368220.47|1997-01-13|3-MEDIUM|Clerk#000000541|0|r escapades. deposits above the fluffily bold requests hag 1122|1196|O|225660.19|1997-01-10|1-URGENT|Clerk#000000083|0|uffily carefully final theodolites. furiously express packages affix 1123|715|O|115157.87|1996-08-03|3-MEDIUM|Clerk#000000929|0|uriously pending requests. slyly regular instruction 1124|796|O|214762.97|1998-07-30|5-LOW|Clerk#000000326|0|regular pinto beans along the fluffily silent packages 1125|242|F|112869.93|1994-10-27|2-HIGH|Clerk#000000510|0|ithely final requests. i 1126|1433|O|81025.22|1998-01-28|4-NOT SPECIFIED|Clerk#000000928|0|d slyly regular ideas: special ideas believe slyly. slyly ironic sheaves w 1127|575|O|125368.14|1995-09-19|4-NOT SPECIFIED|Clerk#000000397|0|usly silent, regular pinto beans. blithely express requests boos 1152|490|F|78973.34|1994-08-14|4-NOT SPECIFIED|Clerk#000000496|0|equests. deposits ab 1153|1196|O|328207.15|1996-04-18|5-LOW|Clerk#000000059|0| across the pending deposi 1154|352|F|264213.46|1992-02-15|1-URGENT|Clerk#000000268|0|old asymptotes are special requests. blithely even deposits sleep furiously 1155|1498|O|170273.99|1997-10-06|2-HIGH|Clerk#000000164|0|c deposits haggle among the ironic, even requests. carefully ironic sheaves n 1156|1318|O|333319.23|1996-10-19|1-URGENT|Clerk#000000200|0| blithely ironic dolphins. furiously pendi 1157|968|O|136306.85|1998-01-14|4-NOT SPECIFIED|Clerk#000000207|0|out the regular excuses boost carefully against the furio 1158|1414|O|43176.15|1996-06-30|2-HIGH|Clerk#000000549|0|integrate slyly furiously ironic deposit 1159|685|F|68999.67|1992-09-18|3-MEDIUM|Clerk#000000992|0|ts may sleep. requests according to the 1184|886|O|55815.77|1997-10-26|5-LOW|Clerk#000000777|0|iously even packages haggle fluffily care 1185|736|F|68659.75|1992-08-24|5-LOW|Clerk#000000344|0| even escapades are. package 1186|589|O|88428.13|1996-08-15|4-NOT SPECIFIED|Clerk#000000798|0|ingly regular pinto beans: instructi 1187|1337|F|133699.36|1992-11-20|3-MEDIUM|Clerk#000000047|0|s after the furiously final deposits boost slyly under the 1188|199|O|79030.59|1996-04-11|2-HIGH|Clerk#000000256|0|ully ironic deposits. slyl 1189|445|F|90219.24|1994-04-09|1-URGENT|Clerk#000000243|0|f the even accounts. courts print blithely ironic accounts. sile 1190|121|O|54852.43|1997-03-16|5-LOW|Clerk#000000575|0|ccounts above the foxes integrate carefully after the 1191|1103|O|41725.75|1995-11-07|3-MEDIUM|Clerk#000000011|0|uests nag furiously. carefully even requests 1216|1220|F|111175.17|1992-12-07|5-LOW|Clerk#000000918|0|nal foxes around the e 1217|64|F|63714.18|1992-04-26|4-NOT SPECIFIED|Clerk#000000538|0| foxes nag quickly. ironic excuses nod. blithely pending 1218|83|F|157442.11|1994-06-20|4-NOT SPECIFIED|Clerk#000000994|0|s cajole. special, silent deposits about the theo 1219|271|O|11895.23|1995-10-05|3-MEDIUM|Clerk#000000800|0|od carefully. slyly final dependencies across the even fray 1220|485|O|164363.11|1996-08-29|1-URGENT|Clerk#000000712|0|inal theodolites wake. fluffily ironic asymptotes cajol 1221|134|F|164863.12|1992-04-19|4-NOT SPECIFIED|Clerk#000000852|0| detect against the silent, even deposits. carefully ironic 1222|98|F|61432.27|1993-02-05|3-MEDIUM|Clerk#000000811|0|theodolites use quickly even accounts. carefully final asympto 1223|91|O|50645.67|1996-05-25|4-NOT SPECIFIED|Clerk#000000238|0|posits was blithely fr 1248|472|F|295111.15|1992-01-02|1-URGENT|Clerk#000000890|0|t the carefully regular dugouts. s 1249|1498|F|70939.22|1994-01-05|1-URGENT|Clerk#000000095|0|al ideas sleep above the pending pin 1250|359|F|13036.54|1992-09-29|4-NOT SPECIFIED|Clerk#000000652|0|ts after the fluffily pending instructions use slyly about the s 1251|379|O|168646.06|1997-10-30|1-URGENT|Clerk#000000276|0|, brave sauternes. deposits boost fluffily. 1252|1499|O|135540.70|1997-08-04|5-LOW|Clerk#000000348|0|ng the slyly regular excuses. special courts nag furiously blithely e 1253|1145|F|135358.41|1993-01-26|1-URGENT|Clerk#000000775|0| requests sleep furiously even foxes. ruthless packag 1254|698|O|143889.73|1995-12-22|1-URGENT|Clerk#000000607|0| pinto beans. carefully regular request 1255|1217|F|104935.74|1994-05-30|4-NOT SPECIFIED|Clerk#000000798|0|ct slyly regular accounts. quick 1280|956|F|137579.92|1993-01-11|5-LOW|Clerk#000000160|0|posits thrash quickly after the theodolites. furiously iro 1281|611|F|226132.40|1994-12-11|1-URGENT|Clerk#000000430|0|counts. carefully pending accounts eat 1282|1160|F|85558.32|1992-02-29|4-NOT SPECIFIED|Clerk#000000168|0|he quickly special packages. furiously final re 1283|1165|O|296055.80|1996-08-30|4-NOT SPECIFIED|Clerk#000000260|0| pinto beans boost slyly ac 1284|1340|O|147647.95|1996-01-07|2-HIGH|Clerk#000000492|0|s. blithely silent deposits s 1285|104|F|209656.04|1992-06-01|1-URGENT|Clerk#000000423|0|cial deposits cajole after the ironic requests. p 1286|1087|F|308115.46|1993-05-14|4-NOT SPECIFIED|Clerk#000000939|0| deposits use carefully from the excuses. slyly bold p 1287|173|F|181586.61|1994-07-05|2-HIGH|Clerk#000000288|0|ly ironic dolphins integrate furiously among the final packages. st 1312|1111|F|81611.04|1994-05-19|3-MEDIUM|Clerk#000000538|0|n, express accounts across the ironic 1313|1477|F|69234.03|1994-09-13|1-URGENT|Clerk#000000774|0|ld accounts. regular deposits cajole. ironically pending theodolites use car 1314|1424|F|63623.22|1994-05-13|3-MEDIUM|Clerk#000000485|0|ickly blithe packages nod ideas. furiously bold braids boost around the car 1315|215|O|182956.83|1998-03-22|5-LOW|Clerk#000000840|0|final theodolites alongside of the carefu 1316|160|F|232915.54|1993-12-03|1-URGENT|Clerk#000000857|0|ully bold theodolites? pending, bold pin 1317|991|P|170654.55|1995-05-19|2-HIGH|Clerk#000000373|0|sts. furiously special deposits lose fur 1318|1279|O|98170.50|1998-06-27|3-MEDIUM|Clerk#000000581|0|s hang bold requests. pending, re 1319|313|O|46533.50|1996-09-27|2-HIGH|Clerk#000000257|0|y across the ruthlessly ironic accounts. unusu 1344|170|F|66338.64|1992-04-16|5-LOW|Clerk#000000178|0|omise close, silent requests. pending theodolites boost pending 1345|943|F|165361.31|1992-10-28|5-LOW|Clerk#000000447|0| regular tithes. quickly fluffy de 1346|748|F|224106.48|1992-06-18|2-HIGH|Clerk#000000374|0|ges sleep quickly-- even pint 1347|403|O|249530.87|1997-06-20|5-LOW|Clerk#000000977|0|he furiously even foxes use carefully express req 1348|175|O|145456.56|1998-04-18|5-LOW|Clerk#000000206|0|tly. quickly even deposi 1349|637|O|49656.84|1997-10-26|1-URGENT|Clerk#000000543|0|yly! blithely special theodolites cajole. unusual, reg 1350|508|F|71653.64|1993-08-24|1-URGENT|Clerk#000000635|0|iously about the blithely special a 1351|1060|O|23806.26|1998-04-20|1-URGENT|Clerk#000000012|0| cajole. regular, special re 1376|461|O|35617.06|1997-05-04|4-NOT SPECIFIED|Clerk#000000730|0|der furiously final, final frets. carefull 1377|199|O|145303.74|1998-04-24|4-NOT SPECIFIED|Clerk#000000625|0|lly across the blithely express accounts. ironic excuses promise carefully de 1378|196|O|184205.63|1996-03-09|4-NOT SPECIFIED|Clerk#000000705|0| furiously even tithes cajole slyly among the quick 1379|643|O|98253.79|1998-05-25|5-LOW|Clerk#000000861|0|y deposits are caref 1380|1363|O|136526.77|1996-07-07|3-MEDIUM|Clerk#000000969|0|inal deposits wake slyly daringly even requests. bold, even foxe 1381|1264|O|74771.86|1998-05-25|3-MEDIUM|Clerk#000000107|0|even requests breach after the bold, ironic instructions. slyly even 1382|1324|F|246263.02|1993-08-17|5-LOW|Clerk#000000241|0|fully final packages sl 1383|1196|F|53197.34|1993-04-27|2-HIGH|Clerk#000000785|0|ts. express requests sleep blithel 1408|539|O|255424.00|1997-12-26|4-NOT SPECIFIED|Clerk#000000942|0|t the quickly final asymptotes. unusual 1409|1429|F|119571.58|1992-12-31|4-NOT SPECIFIED|Clerk#000000065|0|ructions. furiously unusual excuses are regular, unusual theodolites. fin 1410|1123|O|153160.36|1997-04-12|5-LOW|Clerk#000000123|0|iously along the bravely regular dolphins. pinto beans cajole furiously sp 1411|946|F|222485.73|1994-12-21|2-HIGH|Clerk#000000566|0|s. furiously special excuses across the pending pinto beans haggle sp 1412|529|F|114697.28|1993-03-13|4-NOT SPECIFIED|Clerk#000000083|0|uffily daring theodolit 1413|907|O|111589.54|1997-06-14|3-MEDIUM|Clerk#000000342|0|, ironic instructions. carefully even packages dazzle 1414|763|O|50012.42|1995-08-16|1-URGENT|Clerk#000000883|0|ccounts. ironic foxes haggle car 1415|787|F|32535.28|1994-05-29|4-NOT SPECIFIED|Clerk#000000601|0|rays. blithely final ideas affix quickl 1440|979|O|53447.02|1995-08-10|5-LOW|Clerk#000000956|0| pending requests. closely s 1441|1219|O|243310.98|1997-03-06|4-NOT SPECIFIED|Clerk#000000156|0|ter the excuses. ironic dependencies m 1442|1106|F|8836.99|1994-07-05|4-NOT SPECIFIED|Clerk#000000935|0|nal pinto beans. slyly ironic ideas cajol 1443|439|O|59178.01|1996-12-16|5-LOW|Clerk#000000185|0|x blithely against the carefully final somas. even asymptotes are. quickly spe 1444|1333|F|290512.20|1994-12-06|3-MEDIUM|Clerk#000000783|0|ove the bold accounts cajole fluffily about 1445|1136|F|235888.22|1995-01-10|3-MEDIUM|Clerk#000000211|0|even packages wake fluffily 1446|401|O|46093.69|1998-02-16|5-LOW|Clerk#000000274|0|lly regular notornis above the requests sleep final accounts! 1447|901|F|158371.04|1992-10-15|2-HIGH|Clerk#000000880|0|inly against the blithely pending excuses. regular, pe 1472|1499|O|72730.13|1996-10-06|5-LOW|Clerk#000000303|0|y special dolphins around the final dependencies wake quick 1473|923|O|125427.75|1997-03-17|3-MEDIUM|Clerk#000000960|0|furiously close accoun 1474|691|F|69600.97|1995-01-09|1-URGENT|Clerk#000000438|0|detect quickly above the carefully even 1475|49|O|252931.74|1997-11-12|2-HIGH|Clerk#000000972|0|cally final packages boost. blithely ironic packa 1476|1447|O|24352.78|1996-06-27|2-HIGH|Clerk#000000673|0|ding accounts hinder alongside of the quickly pending requests. fluf 1477|755|O|288752.23|1997-08-24|5-LOW|Clerk#000000612|0|ly bold foxes. final ideas would cajo 1478|499|O|27542.96|1997-08-03|2-HIGH|Clerk#000000827|0|lessly. carefully express 1479|146|O|41710.22|1995-12-16|4-NOT SPECIFIED|Clerk#000000697|0|he furiously even foxes. thinly bold deposits 1504|19|F|125349.58|1992-08-28|3-MEDIUM|Clerk#000000381|0|, brave deposits. bold de 1505|352|F|61594.91|1992-08-21|2-HIGH|Clerk#000000544|0|s. slyly ironic packages cajole. carefully regular packages haggle 1506|1462|F|263170.47|1992-09-21|3-MEDIUM|Clerk#000000620|0| dependencies. accounts affix blithely slowly unusual deposits. slyly regular 1507|1205|F|155226.23|1993-10-14|3-MEDIUM|Clerk#000000305|0|stealthy, ironic de 1508|1013|O|207727.99|1998-04-10|5-LOW|Clerk#000000117|0| after the furiously regular pinto beans hang slyly quickly ironi 1509|625|F|252994.90|1993-07-08|5-LOW|Clerk#000000770|0|the regular ideas. regul 1510|523|O|235151.19|1996-09-17|5-LOW|Clerk#000000128|0|ld carefully. furiously final asymptotes haggle furiously 1511|785|O|103042.07|1996-12-22|4-NOT SPECIFIED|Clerk#000000386|0|ts above the depend 1536|932|O|8717.58|1997-01-26|3-MEDIUM|Clerk#000000117|0|ges are! furiously final deposits cajole iron 1537|1076|F|144582.24|1992-02-15|4-NOT SPECIFIED|Clerk#000000862|0|g to the even deposits. ironic, final packages 1538|290|O|214050.85|1995-06-18|4-NOT SPECIFIED|Clerk#000000258|0| instructions. regular theod 1539|1118|F|67801.22|1995-03-10|5-LOW|Clerk#000000840|0|nstructions boost pa 1540|142|F|191775.25|1992-08-05|2-HIGH|Clerk#000000927|0|r ideas hinder blithe 1541|940|P|72808.67|1995-05-18|1-URGENT|Clerk#000000906|0|y. slyly ironic warhorses around the furiously regul 1542|1427|F|177573.43|1993-09-15|3-MEDIUM|Clerk#000000435|0|t the furiously close deposits do was f 1543|503|O|221716.54|1997-02-20|1-URGENT|Clerk#000000398|0|unts. furiously pend 1568|167|O|108945.79|1997-01-30|4-NOT SPECIFIED|Clerk#000000554|0|d notornis. carefully 1569|1036|O|133309.13|1998-04-02|5-LOW|Clerk#000000786|0|orbits. fluffily even decoys serve blithely. furiously furious realms nag acro 1570|1223|O|58046.02|1998-03-16|1-URGENT|Clerk#000000745|0|pinto beans haggle furiousl 1571|1027|F|194190.45|1992-12-05|2-HIGH|Clerk#000000565|0|ously furiously bold warthogs. slyly ironic instructions are quickly a 1572|104|O|64264.28|1996-02-24|2-HIGH|Clerk#000000994|0|fluffily ironic accounts haggle blithely final platelets! slyly regular foxes 1573|1480|F|127138.20|1992-12-28|2-HIGH|Clerk#000000940|0|ess, ironic deposits use along the carefu 1574|1334|O|255009.95|1996-12-12|3-MEDIUM|Clerk#000000809|0| ideas hinder after the carefully unusual 1575|1447|O|238537.34|1995-09-13|3-MEDIUM|Clerk#000000497|0|. furiously regular dep 1600|926|F|188076.26|1993-03-03|3-MEDIUM|Clerk#000000627|0|tions cajole quietly above the regular, silent requests. slyly fin 1601|524|F|116922.47|1994-08-27|5-LOW|Clerk#000000469|0|ent deposits are ca 1602|10|F|6751.57|1993-08-05|5-LOW|Clerk#000000660|0|deposits. busily silent instructions haggle furiously. fin 1603|13|F|46920.46|1993-07-31|4-NOT SPECIFIED|Clerk#000000869|0|s. slyly silent deposits boo 1604|1123|F|135098.39|1993-07-17|5-LOW|Clerk#000000512|0|lithely silent waters. blithely unusual packages alongside 1605|577|O|190137.68|1998-04-24|4-NOT SPECIFIED|Clerk#000000616|0|sleep furiously? ruthless, even pinto beans 1606|530|O|184091.64|1997-04-17|4-NOT SPECIFIED|Clerk#000000550|0|r requests. quickly even platelets breach before the ironically 1607|1493|O|228124.53|1995-12-16|2-HIGH|Clerk#000000498|0| bold, pending foxes haggle. slyly silent 1632|659|O|272854.60|1997-01-08|3-MEDIUM|Clerk#000000351|0|onic requests are accounts. bold a 1633|143|O|74276.27|1995-10-14|2-HIGH|Clerk#000000666|0|y silent accounts sl 1634|689|O|202866.17|1996-09-10|1-URGENT|Clerk#000000360|0|arefully blithely ironic requests. slyly unusual instructions alongside 1635|38|O|107289.95|1997-02-13|3-MEDIUM|Clerk#000000958|0|s. slyly ironic requests affix slyly 1636|787|O|232036.04|1997-06-17|3-MEDIUM|Clerk#000000457|0|ding requests. slyly ironic courts wake quickl 1637|728|F|264917.81|1995-02-08|4-NOT SPECIFIED|Clerk#000000189|0| final accounts. blithely silent ideas cajole bravely. carefully express 1638|1390|O|222330.55|1997-08-13|2-HIGH|Clerk#000000643|0|he fluffily regular asymp 1639|44|O|155953.46|1995-08-20|4-NOT SPECIFIED|Clerk#000000939|0|haggle furiously. final requests detect furious 1664|640|O|229693.92|1996-03-03|1-URGENT|Clerk#000000090|0|y quickly even asymptotes. furiously regular packages haggle quickly fin 1665|757|F|7215.86|1994-05-08|2-HIGH|Clerk#000000920|0|ly regular packages are fluffily even ideas. fluffily final 1666|941|O|187461.04|1995-10-18|1-URGENT|Clerk#000000849|0|ffily pending dependencies wake fluffily. pending, final accounts 1667|46|O|193695.84|1997-10-10|2-HIGH|Clerk#000000103|0|e accounts. slyly express accounts must are a 1668|1415|O|178694.86|1997-07-12|4-NOT SPECIFIED|Clerk#000000148|0|eodolites. carefully dogged dolphins haggle q 1669|19|O|41922.71|1997-06-09|3-MEDIUM|Clerk#000000663|0|er ironic requests detect furiously blithely sp 1670|245|O|129082.05|1997-05-24|2-HIGH|Clerk#000000320|0|unusual dependencies. furiously special platelets main 1671|343|O|155157.94|1996-07-27|4-NOT SPECIFIED|Clerk#000000275|0|ly. slyly pending requests was above the 1696|35|O|159110.93|1998-01-08|4-NOT SPECIFIED|Clerk#000000041|0|bravely bold accounts above the quickly bold 1697|758|O|173054.70|1996-10-07|1-URGENT|Clerk#000000815|0|o x-ray blithely. pl 1698|395|O|212919.29|1997-04-23|2-HIGH|Clerk#000000432|0|slyly. carefully express deposit 1699|835|F|87704.83|1993-12-30|1-URGENT|Clerk#000000125|0|jole blithely. furiously un 1700|649|O|118049.58|1996-06-15|3-MEDIUM|Clerk#000000328|0|ely final dolphins wake sometimes above the quietly regular deposits. fur 1701|1285|F|97290.23|1992-05-19|2-HIGH|Clerk#000000395|0|furiously. regular, close theodoli 1702|655|P|313394.52|1995-05-07|2-HIGH|Clerk#000000300|0|around the carefully final deposits cajole carefully according to the b 1703|1334|F|151075.27|1993-01-28|3-MEDIUM|Clerk#000000463|0| pinto beans poach. bold courts boost. regular, express deposits at 1728|631|O|181415.94|1996-05-22|2-HIGH|Clerk#000000711|0|beans. slyly regular instructions sleep! slyly final packages 1729|1312|F|16895.97|1992-05-19|2-HIGH|Clerk#000000158|0|pending foxes wake. accounts 1730|1234|O|196574.65|1998-07-24|5-LOW|Clerk#000000794|0| fluffily pending deposits serve. furiously even requests wake furiou 1731|1279|O|274023.72|1996-01-06|1-URGENT|Clerk#000000268|0|lithely regular, final instructions. ironic, express packages are above 1732|1459|F|261007.93|1993-11-29|5-LOW|Clerk#000000903|0|inal requests integrate dolph 1733|1475|O|195828.98|1996-05-12|2-HIGH|Clerk#000000789|0|e carefully according to the accounts. furiously pending instructions sleep 1734|64|F|59250.30|1994-06-11|2-HIGH|Clerk#000000722|0| final ideas haggle. blithely quick foxes sleep busily bold ideas. i 1735|206|F|128698.48|1992-12-27|1-URGENT|Clerk#000000458|0|ully idle requests wake qu 1760|1142|O|122830.52|1996-05-17|5-LOW|Clerk#000000917|0| deposits. busily regular deposits wake blithely along the furiously even re 1761|1049|F|310359.22|1993-12-24|2-HIGH|Clerk#000000817|0|efully slyly bold frets. packages boost b 1762|767|F|293674.75|1994-08-20|4-NOT SPECIFIED|Clerk#000000653|0|ly ironic packages. furi 1763|1205|O|194261.80|1996-10-29|2-HIGH|Clerk#000000321|0|es. bold dependencies haggle furiously along 1764|283|F|68855.70|1992-03-25|1-URGENT|Clerk#000000182|0|. slyly final packages integrate carefully acro 1765|727|O|51928.97|1995-12-03|4-NOT SPECIFIED|Clerk#000000490|0| regular excuses wake slyly 1766|1385|O|67803.59|1996-10-12|2-HIGH|Clerk#000000983|0|unusual deposits affix quickly beyond the carefully s 1767|247|P|184646.16|1995-03-14|2-HIGH|Clerk#000000327|0|eposits use carefully carefully regular platelets. quickly regular packages al 1792|472|F|175299.11|1993-11-09|5-LOW|Clerk#000000102|0|ructions haggle along the pending packages. carefully speci 1793|178|F|106527.58|1992-07-12|4-NOT SPECIFIED|Clerk#000000291|0|regular packages cajole. blithely special packages according to the final d 1794|1396|O|259572.58|1997-09-28|1-URGENT|Clerk#000000686|0|ally silent pinto beans. regular package 1795|928|F|178189.68|1994-03-19|2-HIGH|Clerk#000000815|0| quickly final packages! blithely dogged accounts c 1796|463|F|41893.26|1992-11-21|2-HIGH|Clerk#000000245|0|eans use furiously around th 1797|1243|O|63183.36|1996-05-07|3-MEDIUM|Clerk#000000508|0|quiet platelets haggle since the quickly ironic instructi 1798|517|O|45519.51|1997-07-28|1-URGENT|Clerk#000000741|0|al foxes are blithe 1799|601|F|60595.26|1994-03-07|4-NOT SPECIFIED|Clerk#000000339|0|ns sleep furiously final waters. blithely regular instructions h 1824|482|F|107580.47|1994-05-05|1-URGENT|Clerk#000000972|0|e blithely fluffily 1825|1465|F|203057.19|1993-12-05|3-MEDIUM|Clerk#000000345|0|ironic, final accou 1826|814|F|150931.79|1992-04-16|4-NOT SPECIFIED|Clerk#000000718|0|the even asymptotes dazzle fluffily slyly regular asymptotes. final, unu 1827|1051|O|320537.08|1996-06-22|4-NOT SPECIFIED|Clerk#000000369|0|luffily even requests haggle sly 1828|314|F|193253.78|1994-04-18|3-MEDIUM|Clerk#000000840|0|y quickly bold packag 1829|1114|F|138691.03|1994-05-08|2-HIGH|Clerk#000000537|0| accounts wake above the furiously unusual requests. pending package 1830|1315|F|116412.14|1995-02-23|1-URGENT|Clerk#000000045|0|according to the even, 1831|706|F|83108.48|1993-12-02|1-URGENT|Clerk#000000854|0| accounts. carefully even accounts boost furiously. regular ideas engage. 1856|1045|F|267665.56|1992-03-20|4-NOT SPECIFIED|Clerk#000000952|0|. special pinto beans run acr 1857|1321|F|165861.54|1993-01-13|2-HIGH|Clerk#000000083|0|hely final ideas slee 1858|1423|O|34660.99|1997-12-13|1-URGENT|Clerk#000000389|0|thely. slyly final deposits sleep 1859|598|O|162087.77|1997-04-11|4-NOT SPECIFIED|Clerk#000000949|0| the foxes. bravely special excuses nag carefully special r 1860|97|O|9265.31|1996-04-04|3-MEDIUM|Clerk#000000556|0|osits. quickly bold deposits according to 1861|685|F|113123.65|1994-01-03|3-MEDIUM|Clerk#000000847|0|r the fluffily close sauternes. furio 1862|325|O|123620.54|1998-02-24|5-LOW|Clerk#000000348|0|ts snooze ironically abou 1863|731|F|142539.37|1993-09-23|4-NOT SPECIFIED|Clerk#000000658|0|old sentiments. careful, 1888|1192|F|360144.81|1993-10-31|4-NOT SPECIFIED|Clerk#000000659|0|olites. pinto beans cajole. regular deposits affix. slyly regular 1889|244|O|128067.39|1997-03-16|1-URGENT|Clerk#000000854|0|p around the regular notornis. unusual deposits 1890|95|O|297716.58|1996-12-18|4-NOT SPECIFIED|Clerk#000000627|0|romise final, regular deposits. regular fox 1891|610|F|128912.17|1994-12-15|5-LOW|Clerk#000000495|0|unusual foxes sleep regular deposits. requests wake special pac 1892|235|F|165541.41|1994-03-26|5-LOW|Clerk#000000733|0|sts. slyly regular dependencies use slyly. ironic, spec 1893|1244|O|169481.16|1997-10-30|2-HIGH|Clerk#000000111|0|olites. silent, special deposits eat slyly quickly express packages; hockey p 1894|751|F|65789.57|1992-03-30|1-URGENT|Clerk#000000626|0|e furiously. furiously even accounts are slyly final accounts. closely speci 1895|67|F|62954.37|1994-05-30|3-MEDIUM|Clerk#000000878|0|ress accounts. bold accounts cajole. slyly final pinto beans poach regul 1920|1096|O|195949.19|1998-06-24|5-LOW|Clerk#000000018|0|hely; furiously regular excuses 1921|862|F|82692.09|1994-01-18|3-MEDIUM|Clerk#000000293|0|counts. slyly quiet requests along the ruthlessly regular accounts are 1922|556|O|12696.20|1996-07-13|3-MEDIUM|Clerk#000000984|0|side of the blithely final re 1923|1345|O|262475.36|1997-07-07|1-URGENT|Clerk#000000471|0| express dolphins. 1924|748|O|235357.72|1996-09-07|4-NOT SPECIFIED|Clerk#000000823|0| of the ironic accounts. instructions near the final instr 1925|167|F|192587.39|1992-03-05|1-URGENT|Clerk#000000986|0|e slyly regular deposits. furiously 1926|931|O|132894.56|1996-01-31|2-HIGH|Clerk#000000568|0|cajole. even warhorses sleep carefully. 1927|1399|O|38474.28|1995-09-30|3-MEDIUM|Clerk#000000616|0|riously special packages. permanent pearls wake furiously. even packages alo 1952|662|F|17837.86|1994-03-16|2-HIGH|Clerk#000000254|0| silent accounts boost 1953|1495|F|65277.06|1993-11-30|3-MEDIUM|Clerk#000000891|0| fluffily along the quickly even packages. 1954|559|O|244793.78|1997-05-31|4-NOT SPECIFIED|Clerk#000000104|0| unusual excuses cajole according to the blithely regular theodolites. 1955|121|F|132951.01|1992-04-20|1-URGENT|Clerk#000000792|0|ly special ideas. sometimes final 1956|1258|F|109978.44|1992-09-20|4-NOT SPECIFIED|Clerk#000000600|0|ironic ideas are silent ideas. furiously final deposits sleep slyly carefu 1957|302|O|113050.82|1998-07-21|2-HIGH|Clerk#000000639|0|nding excuses about the 1958|523|O|240252.23|1995-09-22|5-LOW|Clerk#000000343|0| haggle blithely. flu 1959|424|O|86342.43|1997-01-13|4-NOT SPECIFIED|Clerk#000000631|0| cajole about the blithely express requests. even excuses mold bl 1984|508|O|123878.05|1998-04-01|1-URGENT|Clerk#000000416|0| slyly special instructions. unusual foxes use packages. carefully regular req 1985|59|F|220942.19|1994-09-02|4-NOT SPECIFIED|Clerk#000000741|0|slyly slyly even pains. slyly reg 1986|1486|F|51251.50|1994-05-05|2-HIGH|Clerk#000000609|0|across the theodolites. quick 1987|982|F|7351.41|1994-04-30|2-HIGH|Clerk#000000652|0|gular platelets alongside 1988|1075|O|189572.71|1995-10-06|4-NOT SPECIFIED|Clerk#000000011|0|ly ironic dolphins serve quickly busy accounts. bu 1989|1162|F|43106.73|1994-03-16|4-NOT SPECIFIED|Clerk#000000747|0|ely bold pinto beans ha 1990|1181|F|44391.50|1994-12-16|2-HIGH|Clerk#000000114|0|e bold patterns. always regul 1991|187|F|191273.88|1992-09-07|4-NOT SPECIFIED|Clerk#000000854|0|ing accounts can haggle at the carefully final Tiresias-- pending, regular 2016|76|O|34243.46|1996-08-16|3-MEDIUM|Clerk#000000641|0|the carefully ironic foxes. requests nag bold, r 2017|1006|O|83524.96|1998-05-13|3-MEDIUM|Clerk#000000427|0|nusual requests. blit 2018|176|P|29982.40|1995-04-05|4-NOT SPECIFIED|Clerk#000000920|0|gular accounts wake fur 2019|1342|F|52971.22|1992-10-23|1-URGENT|Clerk#000000565|0| furiously bold packages. fluffily fi 2020|725|F|190262.97|1993-06-21|3-MEDIUM|Clerk#000000192|0|es. furiously regular packages above the furiously special theodolites are a 2021|700|O|41487.67|1995-07-15|1-URGENT|Clerk#000000155|0|ong the furiously regular requests. unusual deposits wake fluffily inside 2022|619|F|319320.43|1992-03-15|1-URGENT|Clerk#000000268|0| dependencies sleep fluffily even, ironic deposits. express, silen 2023|1180|F|181994.65|1992-05-06|5-LOW|Clerk#000000137|0|ular courts engage according to the 2048|169|F|43446.23|1993-11-15|1-URGENT|Clerk#000000934|0|s cajole after the blithely final accounts. f 2049|298|O|206620.70|1995-12-07|2-HIGH|Clerk#000000859|0|ly regular requests thrash blithely about the fluffily even theodolites. r 2050|274|F|289457.13|1994-06-02|4-NOT SPECIFIED|Clerk#000000821|0|d accounts against the furiously regular packages use bli 2051|382|O|104971.81|1996-03-18|4-NOT SPECIFIED|Clerk#000000333|0|ctions sleep blithely. blithely regu 2052|895|F|223352.03|1992-04-13|2-HIGH|Clerk#000000767|0| requests sleep around the even, even courts. ironic theodolites affix furious 2053|1406|F|162584.35|1995-02-07|1-URGENT|Clerk#000000717|0|ar requests: blithely sly accounts boost carefully across t 2054|401|F|162781.21|1992-06-08|4-NOT SPECIFIED|Clerk#000000103|0|l requests affix carefully about the furiously special 2055|970|F|70155.57|1993-09-04|1-URGENT|Clerk#000000067|0|. warhorses affix slyly blithely express instructions? fur 2080|946|F|75292.75|1993-06-18|5-LOW|Clerk#000000190|0|ironic, pending theodolites are carefully about the quickly regular theodolite 2081|1193|O|202626.98|1997-07-05|2-HIGH|Clerk#000000136|0|ong the regular theo 2082|487|F|70224.72|1995-01-10|2-HIGH|Clerk#000000354|0|cial accounts. ironic, express dolphins nod slyly sometimes final reques 2083|1009|F|39235.65|1993-07-14|3-MEDIUM|Clerk#000000361|0|al patterns. bold, final foxes nag bravely about the furiously express 2084|800|F|277607.01|1993-03-17|2-HIGH|Clerk#000000048|0|zle furiously final, careful packages. slyly ironic ideas amo 2085|488|F|62710.56|1993-11-21|3-MEDIUM|Clerk#000000818|0|ress, express ideas haggle 2086|1405|F|259355.86|1994-10-19|1-URGENT|Clerk#000000046|0| permanently regular 2087|493|O|79220.54|1998-01-31|2-HIGH|Clerk#000000626|0|e always regular packages nod against the furiously spec 2112|634|O|29666.67|1997-02-05|2-HIGH|Clerk#000000351|0|against the slyly even id 2113|319|O|70044.40|1997-11-08|2-HIGH|Clerk#000000527|0|slyly regular instruct 2114|787|F|159876.50|1995-01-16|5-LOW|Clerk#000000751|0|r, unusual accounts haggle across the busy platelets. carefully 2115|1042|O|213137.77|1998-05-23|4-NOT SPECIFIED|Clerk#000000101|0|odolites boost. carefully regular excuses cajole. quickly ironic pinto be 2116|223|F|79971.53|1994-08-26|1-URGENT|Clerk#000000197|0|efully after the asymptotes. furiously sp 2117|220|O|215355.06|1997-04-26|2-HIGH|Clerk#000000887|0|ely even dependencies. regular foxes use blithely. 2118|1333|O|54388.61|1996-10-09|1-URGENT|Clerk#000000196|0|ial requests wake carefully special packages. f 2119|622|O|31760.98|1996-08-20|2-HIGH|Clerk#000000434|0|uickly pending escapades. fluffily ir 2144|1348|F|176382.12|1994-03-29|3-MEDIUM|Clerk#000000546|0|t. carefully quick requests across the deposits wake regu 2145|1336|F|30239.30|1992-10-03|1-URGENT|Clerk#000000886|0|sts would snooze blithely alongside of th 2146|1177|F|254476.65|1992-09-14|4-NOT SPECIFIED|Clerk#000000476|0|ven packages. dependencies wake slyl 2147|988|F|117795.98|1992-09-06|4-NOT SPECIFIED|Clerk#000000424|0| haggle carefully furiously final foxes. pending escapades thrash. bold theod 2148|1283|F|20481.54|1995-04-19|4-NOT SPECIFIED|Clerk#000000517|0|ross the furiously unusual theodolites. always expre 2149|1007|F|151151.22|1993-03-13|5-LOW|Clerk#000000555|0|nusual accounts nag furiously special reques 2150|817|F|230677.37|1994-06-03|3-MEDIUM|Clerk#000000154|0|ect slyly against the even, final packages. quickly regular pinto beans wake c 2151|580|O|166259.86|1996-11-11|3-MEDIUM|Clerk#000000996|0|c requests. ironic platelets cajole across the quickly fluffy deposits. 2176|1036|F|140007.98|1992-11-10|1-URGENT|Clerk#000000195|0|s haggle regularly accor 2177|1351|O|254199.26|1997-01-20|3-MEDIUM|Clerk#000000161|0|ove the blithely unusual packages cajole carefully fluffily special request 2178|80|O|92942.00|1996-12-12|3-MEDIUM|Clerk#000000656|0|thely according to the instructions. furious 2179|410|O|86553.77|1996-09-07|2-HIGH|Clerk#000000935|0|ounts alongside of the furiously unusual braids cajol 2180|751|O|287379.99|1996-09-14|4-NOT SPECIFIED|Clerk#000000650|0|xpress, unusual pains. furiously ironic excu 2181|742|O|174000.86|1995-09-13|3-MEDIUM|Clerk#000000814|0|y against the ironic, even 2182|226|F|174578.47|1994-04-05|2-HIGH|Clerk#000000071|0|ccounts. quickly bold deposits across the excuses sl 2183|1121|O|78568.93|1996-06-22|1-URGENT|Clerk#000000287|0| among the express, ironic packages. slyly ironic platelets integrat 2208|676|P|346978.53|1995-05-01|4-NOT SPECIFIED|Clerk#000000900|0|symptotes wake slyly blithely unusual packages. 2209|892|F|173318.78|1992-07-10|2-HIGH|Clerk#000000056|0|er above the slyly silent requests. furiously reg 2210|319|F|54392.14|1992-01-16|2-HIGH|Clerk#000000941|0| believe carefully quickly express pinto beans. deposi 2211|920|F|204877.13|1994-06-30|2-HIGH|Clerk#000000464|0|ffily bold courts e 2212|1163|F|28563.15|1994-03-23|3-MEDIUM|Clerk#000000954|0|structions above the unusual requests use fur 2213|1213|F|204541.38|1993-01-15|4-NOT SPECIFIED|Clerk#000000598|0|osits are carefully reg 2214|1141|O|224136.38|1998-05-05|3-MEDIUM|Clerk#000000253|0|packages. fluffily even accounts haggle blithely. carefully ironic depen 2215|388|O|159169.89|1996-06-16|4-NOT SPECIFIED|Clerk#000000817|0|le final, final foxes. quickly regular gifts are carefully deposit 2240|554|F|265020.95|1992-03-06|4-NOT SPECIFIED|Clerk#000000622|0|accounts against the slyly express foxes are after the slyly regular 2241|1013|F|248564.48|1993-05-11|1-URGENT|Clerk#000000081|0|y about the silent excuses. furiously ironic instructions along the sil 2242|815|O|16543.76|1997-07-20|4-NOT SPECIFIED|Clerk#000000360|0| pending multipliers. carefully express asymptotes use quickl 2243|479|O|11918.84|1995-06-10|2-HIGH|Clerk#000000813|0|ously regular deposits integrate s 2244|1268|F|34877.52|1993-01-09|1-URGENT|Clerk#000001000|0|ckages. ironic, ironic accounts haggle blithely express excuses. 2245|565|F|257990.42|1993-04-28|3-MEDIUM|Clerk#000000528|0|ake carefully. braids haggle slyly quickly b 2246|1124|O|100351.39|1996-05-27|4-NOT SPECIFIED|Clerk#000000739|0| final gifts sleep 2247|947|F|20406.41|1992-08-02|4-NOT SPECIFIED|Clerk#000000947|0|furiously regular packages. final brai 2272|1384|F|186159.70|1993-04-13|2-HIGH|Clerk#000000449|0|s. bold, ironic pinto beans wake. silently specia 2273|1343|O|223151.74|1996-12-14|5-LOW|Clerk#000000155|0|uickly express foxes haggle quickly against 2274|1040|F|62910.69|1993-09-04|4-NOT SPECIFIED|Clerk#000000258|0|nstructions try to hag 2275|1484|F|54526.45|1992-10-22|4-NOT SPECIFIED|Clerk#000000206|0| furiously furious platelets. slyly final packa 2276|415|O|177003.87|1996-04-29|4-NOT SPECIFIED|Clerk#000000821|0|ecial requests. fox 2277|889|F|104747.35|1995-01-02|4-NOT SPECIFIED|Clerk#000000385|0|accounts cajole. even i 2278|1406|O|154671.54|1998-04-25|3-MEDIUM|Clerk#000000186|0|r pinto beans integrate after the carefully even deposits. blit 2279|794|F|187322.08|1993-02-23|3-MEDIUM|Clerk#000000898|0|de of the quickly unusual instructio 2304|451|F|136763.46|1994-01-07|4-NOT SPECIFIED|Clerk#000000415|0|onic platelets. ironic packages haggle. packages nag doggedly according to 2305|421|F|154302.46|1993-01-26|2-HIGH|Clerk#000000440|0|ove the furiously even acco 2306|268|O|341859.20|1995-07-26|2-HIGH|Clerk#000000975|0| wake furiously requests. permanent requests affix. final packages caj 2307|1042|F|79156.35|1993-06-29|5-LOW|Clerk#000000952|0|furiously even asymptotes? carefully regular accounts 2308|239|F|78112.08|1992-10-25|4-NOT SPECIFIED|Clerk#000000609|0|ts. slyly final depo 2309|997|O|194143.51|1995-09-04|5-LOW|Clerk#000000803|0|he carefully pending packages. fluffily stealthy foxes engage carefully 2310|304|O|121341.69|1996-09-20|5-LOW|Clerk#000000917|0|wake carefully. unusual instructions nag ironic, regular excuse 2311|718|P|208795.07|1995-05-02|2-HIGH|Clerk#000000761|0|ly pending asymptotes-- furiously bold excus 2336|1414|O|37197.01|1996-01-07|4-NOT SPECIFIED|Clerk#000000902|0|c, final excuses sleep furiously among the even theodolites. f 2337|1402|O|65214.59|1997-06-18|4-NOT SPECIFIED|Clerk#000000754|0| quickly. final accounts haggle. carefully final acco 2338|1394|O|41743.98|1997-09-15|2-HIGH|Clerk#000000951|0|riously final dugouts. final, ironic packages wake express, ironic id 2339|1084|F|87620.47|1993-12-15|5-LOW|Clerk#000000847|0| against the regular 2340|646|O|47690.57|1996-01-12|1-URGENT|Clerk#000000964|0|ter the deposits sleep according to the slyly regular packages. carefully 2341|817|F|90460.36|1993-05-30|5-LOW|Clerk#000000443|0|sts-- blithely bold dolphins through the deposits nag blithely carefully re 2342|365|O|139380.41|1996-06-09|1-URGENT|Clerk#000000615|0|oost carefully across the regular accounts. blithely final d 2343|712|O|119571.52|1995-08-21|3-MEDIUM|Clerk#000000170|0|fluffily over the slyly special deposits. quickl 2368|130|F|130017.09|1993-08-20|1-URGENT|Clerk#000000830|0|t the bold instructions. carefully unusual 2369|1097|O|102225.66|1996-12-24|2-HIGH|Clerk#000000752|0|iously even requests are dogged, express 2370|1405|F|93340.00|1994-01-17|1-URGENT|Clerk#000000231|0|lyly final packages. quickly final deposits haggl 2371|187|O|278700.24|1998-01-07|1-URGENT|Clerk#000000028|0|ckages haggle at th 2372|292|O|145396.99|1997-11-21|5-LOW|Clerk#000000342|0|s: deposits haggle along the final ideas. careful 2373|263|F|79116.08|1994-03-12|4-NOT SPECIFIED|Clerk#000000306|0| even, special courts grow quickly. pending, 2374|40|F|148382.76|1993-10-29|4-NOT SPECIFIED|Clerk#000000081|0| blithely regular packages. blithely unusua 2375|49|O|159126.15|1996-11-20|3-MEDIUM|Clerk#000000197|0|unusual, pending theodolites cajole carefully 2400|364|O|104781.57|1998-07-25|5-LOW|Clerk#000000782|0|nusual courts nag against the carefully unusual pinto b 2401|1477|O|115018.83|1997-07-29|4-NOT SPECIFIED|Clerk#000000531|0|ully unusual instructions boost carefully silently regular requests. 2402|658|O|114097.88|1996-09-06|4-NOT SPECIFIED|Clerk#000000162|0|slyly final sheaves sleep slyly. q 2403|544|O|171679.55|1998-04-11|3-MEDIUM|Clerk#000000820|0|furiously regular deposits use. furiously unusual accounts wake along the 2404|767|O|136444.96|1997-03-13|4-NOT SPECIFIED|Clerk#000000409|0|deposits breach furiously. ironic foxes haggle carefully bold packag 2405|713|O|157476.73|1996-12-23|3-MEDIUM|Clerk#000000535|0|ular, regular asympto 2406|67|O|267248.11|1996-10-28|5-LOW|Clerk#000000561|0|blithely regular accounts u 2407|536|O|165349.80|1998-06-19|2-HIGH|Clerk#000000068|0|uests affix slyly among the slyly regular depos 2432|1021|O|80711.22|1996-07-13|1-URGENT|Clerk#000000115|0|re. slyly even deposits wake bra 2433|299|F|200835.08|1994-08-22|4-NOT SPECIFIED|Clerk#000000324|0|ess patterns are slyly. packages haggle carefu 2434|238|O|160941.81|1997-04-27|3-MEDIUM|Clerk#000000190|0|s. quickly ironic dolphins impress final deposits. blithel 2435|715|F|165964.31|1993-02-21|5-LOW|Clerk#000000112|0|es are carefully along the carefully final instructions. pe 2436|1250|O|95969.65|1995-09-11|4-NOT SPECIFIED|Clerk#000000549|0|arefully. blithely bold deposits affix special accounts. final foxes nag. spe 2437|844|F|207060.97|1993-04-21|4-NOT SPECIFIED|Clerk#000000578|0|. theodolites wake slyly-- ironic, pending platelets above the carefully exp 2438|127|F|308209.95|1993-07-15|2-HIGH|Clerk#000000744|0|the final, regular warhorses. regularly 2439|541|O|68090.09|1997-03-15|2-HIGH|Clerk#000000819|0|lithely after the car 2464|1450|O|32741.64|1997-11-23|5-LOW|Clerk#000000633|0|le about the instructions. courts wake carefully even 2465|325|O|253098.40|1995-06-24|1-URGENT|Clerk#000000078|0|al pinto beans. final, bold packages wake quickly 2466|181|F|221482.78|1994-03-06|1-URGENT|Clerk#000000424|0|c pinto beans. express deposits wake quickly. even, final courts nag. package 2467|343|O|8612.24|1995-07-16|4-NOT SPECIFIED|Clerk#000000914|0|pades sleep furiously. sometimes regular packages again 2468|1109|O|253685.87|1997-06-09|4-NOT SPECIFIED|Clerk#000000260|0|ickly regular packages. slyly ruthless requests snooze quickly blithe 2469|1229|O|230050.24|1996-11-26|5-LOW|Clerk#000000730|0| sleep closely regular instructions. furiously ironic instructi 2470|563|O|119774.66|1997-04-19|3-MEDIUM|Clerk#000000452|0|to the furiously final packages? pa 2471|884|O|61766.57|1998-03-12|4-NOT SPECIFIED|Clerk#000000860|0|carefully blithely regular pac 2496|1342|F|188456.20|1994-01-09|2-HIGH|Clerk#000000142|0|slyly. pending instructions sleep. quic 2497|470|F|236003.51|1992-08-27|1-URGENT|Clerk#000000977|0|ily ironic pinto beans. furiously final platelets alongside of t 2498|959|F|57787.08|1993-11-08|5-LOW|Clerk#000000373|0|g the slyly special pinto beans. 2499|1210|O|194133.93|1995-09-24|1-URGENT|Clerk#000000277|0|r the quickly bold foxes. bold instructi 2500|1330|F|210458.54|1992-08-15|2-HIGH|Clerk#000000447|0|integrate slyly pending deposits. furiously ironic accounts across the s 2501|665|O|105442.80|1997-05-25|5-LOW|Clerk#000000144|0|ickly special theodolite 2502|694|F|47966.60|1993-05-28|4-NOT SPECIFIED|Clerk#000000914|0|lyly: carefully pending ideas affix again 2503|67|F|250884.59|1993-06-20|3-MEDIUM|Clerk#000000294|0|ly even packages was. ironic, regular deposits unwind furiously across the p 2528|550|F|140496.46|1994-11-20|1-URGENT|Clerk#000000789|0|ular dependencies? regular frays kindle according to the blith 2529|1349|O|4809.51|1996-08-20|2-HIGH|Clerk#000000511|0|posits across the silent instructions wake blithely across 2530|1271|F|96402.94|1994-03-21|3-MEDIUM|Clerk#000000291|0|ular instructions about the quic 2531|434|O|214816.43|1996-05-06|4-NOT SPECIFIED|Clerk#000000095|0|even accounts. furiously ironic excuses sleep fluffily. carefully silen 2532|937|O|173714.44|1995-10-11|2-HIGH|Clerk#000000498|0|the blithely pending accounts. regular, regular excuses boost aro 2533|496|O|248797.75|1997-03-24|1-URGENT|Clerk#000000594|0|ecial instructions. spec 2534|754|O|265775.52|1996-07-17|3-MEDIUM|Clerk#000000332|0|packages cajole ironic requests. furiously regular 2535|1202|F|99020.66|1993-05-25|5-LOW|Clerk#000000296|0|phins cajole beneath the fluffily express asymptotes. c 2560|1309|F|213993.64|1992-09-05|1-URGENT|Clerk#000000538|0|atelets; quickly sly requests 2561|572|O|180170.50|1997-11-14|1-URGENT|Clerk#000000861|0|ual requests. unusual deposits cajole furiously pending, regular platelets. 2562|82|F|196430.96|1992-08-01|1-URGENT|Clerk#000000467|0|elets. pending dolphins promise slyly. bo 2563|613|F|237151.86|1993-11-19|4-NOT SPECIFIED|Clerk#000000150|0|sly even packages after the furio 2564|761|F|3975.31|1994-09-09|2-HIGH|Clerk#000000718|0|usly regular pinto beans. orbits wake carefully. slyly e 2565|554|O|269678.73|1998-02-28|3-MEDIUM|Clerk#000000032|0|x-ray blithely along 2566|856|F|127226.57|1992-10-10|3-MEDIUM|Clerk#000000414|0|ructions boost bold ideas. idly ironic accounts use according to th 2567|694|O|366949.49|1998-02-27|2-HIGH|Clerk#000000031|0|detect. furiously ironic requests 2592|1006|F|14583.53|1993-03-05|4-NOT SPECIFIED|Clerk#000000524|0|ts nag fluffily. quickly stealthy theodolite 2593|913|F|184358.17|1993-09-04|2-HIGH|Clerk#000000468|0|r the carefully final 2594|776|F|118325.94|1992-12-17|1-URGENT|Clerk#000000550|0|ests. theodolites above the blithely even accounts detect furio 2595|737|O|278418.04|1995-12-14|4-NOT SPECIFIED|Clerk#000000222|0|arefully ironic requests nag carefully ideas. 2596|424|O|93640.14|1996-08-17|1-URGENT|Clerk#000000242|0|requests. ironic, bold theodolites wak 2597|1036|F|38810.68|1993-02-04|2-HIGH|Clerk#000000757|0|iously ruthless exc 2598|1120|O|100794.85|1996-03-05|3-MEDIUM|Clerk#000000391|0| ironic notornis according to the blithely final requests should 2599|1483|O|95783.25|1996-11-07|2-HIGH|Clerk#000000722|0|ts. slyly regular theodolites wake sil 2624|512|O|43715.51|1996-11-28|5-LOW|Clerk#000000930|0|ic, regular packages 2625|382|F|46838.33|1992-10-14|4-NOT SPECIFIED|Clerk#000000386|0| final deposits. blithely ironic ideas 2626|1372|O|109208.97|1995-09-08|4-NOT SPECIFIED|Clerk#000000289|0|gside of the carefully special packages are furiously after the slyly express 2627|1486|F|31377.24|1992-03-24|3-MEDIUM|Clerk#000000181|0|s. silent, ruthless requests 2628|553|F|243890.34|1993-10-22|5-LOW|Clerk#000000836|0|ajole across the blithely careful accounts. blithely silent deposits sl 2629|1372|O|124187.64|1998-04-06|5-LOW|Clerk#000000680|0|uches dazzle carefully even, express excuses. ac 2630|841|F|180224.30|1992-10-24|5-LOW|Clerk#000000712|0|inal theodolites. ironic instructions s 2631|352|F|70543.30|1993-09-24|5-LOW|Clerk#000000833|0| quickly unusual deposits doubt around 2656|766|F|119993.15|1993-05-04|1-URGENT|Clerk#000000307|0|elets. slyly final accou 2657|247|O|231309.69|1995-10-17|2-HIGH|Clerk#000000160|0| foxes-- slyly final dependencies around the slyly final theodo 2658|134|O|208449.54|1995-09-23|3-MEDIUM|Clerk#000000400|0|bout the slyly regular accounts. ironic, 2659|821|F|103595.36|1993-12-18|4-NOT SPECIFIED|Clerk#000000758|0|cross the pending requests maintain 2660|1264|O|24605.68|1995-08-05|5-LOW|Clerk#000000480|0|ly finally regular deposits. ironic theodolites cajole 2661|739|O|138682.43|1997-01-04|3-MEDIUM|Clerk#000000217|0|al, regular pinto beans. silently final deposits should have t 2662|370|O|94638.61|1996-08-21|3-MEDIUM|Clerk#000000589|0|bold pinto beans above the slyly final accounts affix furiously deposits. pac 2663|941|O|35825.35|1995-09-06|1-URGENT|Clerk#000000950|0|ar requests. furiously final dolphins along the fluffily spe 2688|974|F|244642.45|1992-01-24|2-HIGH|Clerk#000000720|0|have to nag according to the pending theodolites. sly 2689|1015|F|43940.00|1992-04-09|4-NOT SPECIFIED|Clerk#000000698|0|press pains wake. furiously express theodolites alongsid 2690|935|O|325313.14|1996-03-31|3-MEDIUM|Clerk#000000760|0|ravely even theodolites 2691|59|F|47515.61|1992-04-30|5-LOW|Clerk#000000439|0|es at the regular deposits sleep slyly by the fluffy requests. eve 2692|619|O|25165.18|1997-12-02|3-MEDIUM|Clerk#000000878|0|es. regular asymptotes cajole above t 2693|172|O|64546.06|1996-09-04|1-URGENT|Clerk#000000370|0|ndle never. blithely regular packages nag carefully enticing platelets. ca 2694|1192|O|132476.42|1996-03-14|5-LOW|Clerk#000000722|0| requests. bold deposits above the theodol 2695|563|O|204229.27|1996-08-20|1-URGENT|Clerk#000000697|0|ven deposits around the quickly regular packa 2720|301|F|173898.68|1993-06-08|1-URGENT|Clerk#000000948|0|quickly. special asymptotes are fluffily ironi 2721|773|O|93291.10|1996-01-27|2-HIGH|Clerk#000000401|0| ideas eat even, unusual ideas. theodolites are carefully 2722|347|F|62122.98|1994-04-09|5-LOW|Clerk#000000638|0|rding to the carefully quick deposits. bli 2723|592|O|128721.12|1995-10-06|5-LOW|Clerk#000000836|0|nts must have to cajo 2724|1369|F|178028.99|1994-09-14|2-HIGH|Clerk#000000217|0| sleep blithely. blithely idle 2725|884|F|88263.14|1994-05-21|4-NOT SPECIFIED|Clerk#000000835|0|ular deposits. spec 2726|58|F|48071.00|1992-11-27|5-LOW|Clerk#000000470|0| blithely even dinos sleep care 2727|739|O|4148.56|1998-04-19|4-NOT SPECIFIED|Clerk#000000879|0|sual theodolites cajole enticingly above the furiously fin 2752|581|F|252581.73|1993-11-19|2-HIGH|Clerk#000000648|0| carefully regular foxes are quickly quickl 2753|154|F|228723.78|1993-11-30|2-HIGH|Clerk#000000380|0|ending instructions. unusual deposits 2754|1438|F|39260.31|1994-04-03|2-HIGH|Clerk#000000960|0|cies detect slyly. 2755|1177|F|131857.47|1992-02-07|4-NOT SPECIFIED|Clerk#000000177|0|ously according to the sly foxes. blithely regular pinto bean 2756|1166|F|194478.82|1994-04-18|1-URGENT|Clerk#000000537|0|arefully special warho 2757|755|O|125256.68|1995-07-20|2-HIGH|Clerk#000000216|0| regular requests subl 2758|412|O|42052.59|1998-07-12|5-LOW|Clerk#000000863|0|s cajole according to the carefully special 2759|1159|F|101861.98|1993-11-25|4-NOT SPECIFIED|Clerk#000000071|0|ts. regular, pending pinto beans sleep ab 2784|943|O|153115.55|1998-01-07|1-URGENT|Clerk#000000540|0|g deposits alongside of the silent requests s 2785|1462|O|195472.29|1995-07-21|2-HIGH|Clerk#000000098|0|iously pending packages sleep according to the blithely unusual foxe 2786|788|F|247891.00|1992-03-22|2-HIGH|Clerk#000000976|0|al platelets cajole blithely ironic requests. ironic re 2787|1030|O|4893.42|1995-09-30|1-URGENT|Clerk#000000906|0|he ironic, regular 2788|1234|F|26524.32|1994-09-22|1-URGENT|Clerk#000000641|0|nts wake across the fluffily bold accoun 2789|361|O|291634.33|1998-03-14|2-HIGH|Clerk#000000972|0|gular patterns boost. carefully even re 2790|250|F|239747.97|1994-08-19|2-HIGH|Clerk#000000679|0| the carefully express deposits sleep slyly 2791|1207|F|212540.36|1994-10-10|2-HIGH|Clerk#000000662|0|as. slyly ironic accounts play furiously bl 2816|568|F|62180.16|1994-09-20|2-HIGH|Clerk#000000289|0|kages at the final deposits cajole furious foxes. quickly 2817|397|F|107437.21|1994-04-19|3-MEDIUM|Clerk#000000982|0|ic foxes haggle upon the daringly even pinto beans. slyly 2818|482|F|166324.37|1994-12-12|3-MEDIUM|Clerk#000000413|0|eep furiously special ideas. express 2819|1019|F|92864.28|1994-05-05|1-URGENT|Clerk#000000769|0|ngside of the blithely ironic dolphins. furio 2820|181|F|202441.88|1994-05-20|3-MEDIUM|Clerk#000000807|0|equests are furiously. carefu 2821|1166|F|54252.30|1993-08-09|3-MEDIUM|Clerk#000000323|0|ng requests. even instructions are quickly express, silent instructi 2822|788|F|53827.11|1993-07-26|2-HIGH|Clerk#000000510|0|furiously against the accounts. unusual accounts aft 2823|781|O|261004.83|1995-09-09|2-HIGH|Clerk#000000567|0|encies. carefully fluffy accounts m 2848|689|F|165694.90|1992-03-10|1-URGENT|Clerk#000000256|0|ly fluffy foxes sleep furiously across the slyly regu 2849|455|O|272713.22|1996-04-30|2-HIGH|Clerk#000000659|0|al packages are after the quickly bold requests. carefully special 2850|992|O|161737.06|1996-10-02|2-HIGH|Clerk#000000392|0|, regular deposits. furiously pending packages hinder carefully carefully u 2851|1445|O|10321.32|1997-09-07|5-LOW|Clerk#000000566|0|Tiresias wake quickly quickly even 2852|892|F|144902.15|1993-01-16|1-URGENT|Clerk#000000740|0|ruthless deposits against the final instructions use quickly al 2853|929|F|137718.56|1994-05-05|2-HIGH|Clerk#000000878|0|the carefully even packages. 2854|1384|F|232446.67|1994-06-27|1-URGENT|Clerk#000000010|0| furiously ironic tithes use furiously 2855|481|F|63484.19|1993-04-04|4-NOT SPECIFIED|Clerk#000000973|0| silent, regular packages sleep 2880|79|F|172033.71|1992-03-15|2-HIGH|Clerk#000000756|0|ves maintain doggedly spec 2881|986|F|74406.92|1992-05-10|5-LOW|Clerk#000000864|0|uriously. slyly express requests according to the silent dol 2882|1201|O|266004.51|1995-08-22|2-HIGH|Clerk#000000891|0|pending deposits. carefully eve 2883|1208|F|253433.41|1995-01-23|5-LOW|Clerk#000000180|0|uses. carefully ironic accounts lose fluffil 2884|914|O|106271.93|1997-10-12|3-MEDIUM|Clerk#000000780|0|efully express instructions sleep against 2885|61|F|195560.03|1992-09-19|4-NOT SPECIFIED|Clerk#000000280|0|ly sometimes special excuses. final requests are 2886|1075|F|129888.20|1994-11-13|4-NOT SPECIFIED|Clerk#000000619|0|uctions. ironic packages sle 2887|1075|O|34769.89|1997-05-26|5-LOW|Clerk#000000566|0|slyly even pinto beans. slyly bold epitaphs cajole blithely above t 2912|922|F|29086.04|1992-03-12|5-LOW|Clerk#000000186|0|jole blithely above the quickly regular packages. carefully regular pinto bean 2913|424|O|168204.36|1997-07-12|3-MEDIUM|Clerk#000000118|0|mptotes doubt furiously slyly regu 2914|1072|F|87611.96|1993-03-03|3-MEDIUM|Clerk#000000543|0|he slyly regular theodolites are furiously sile 2915|925|F|154725.01|1994-03-31|5-LOW|Clerk#000000410|0|ld packages. bold deposits boost blithely. ironic, unusual theodoli 2916|74|O|35450.91|1995-12-27|2-HIGH|Clerk#000000681|0|ithely blithe deposits sleep beyond the 2917|908|O|155542.57|1997-12-09|4-NOT SPECIFIED|Clerk#000000061|0| special dugouts among the special deposi 2918|1175|O|37282.52|1996-09-08|3-MEDIUM|Clerk#000000439|0|ular deposits across th 2919|526|F|154899.61|1993-12-10|2-HIGH|Clerk#000000209|0|es. pearls wake quietly slyly ironic instructions-- 2944|136|O|196529.29|1997-09-24|4-NOT SPECIFIED|Clerk#000000740|0|deas. permanently special foxes haggle carefully ab 2945|283|O|351345.57|1996-01-03|2-HIGH|Clerk#000000499|0|ons are carefully toward the permanent, bold pinto beans. regu 2946|1243|O|146922.98|1996-02-05|5-LOW|Clerk#000000329|0|g instructions about the regular accounts sleep carefully along the pen 2947|697|P|53117.45|1995-04-26|1-URGENT|Clerk#000000464|0|ronic accounts. accounts run furiously d 2948|439|F|145273.22|1994-08-23|5-LOW|Clerk#000000701|0| deposits according to the blithely pending 2949|1363|F|150199.75|1994-04-12|2-HIGH|Clerk#000000184|0|y ironic accounts use. quickly blithe accou 2950|1345|O|275176.74|1997-07-06|1-URGENT|Clerk#000000833|0| dolphins around the furiously 2951|737|O|183228.93|1996-02-06|2-HIGH|Clerk#000000680|0|gular deposits above the finally regular ideas integrate idly stealthil 2976|281|F|170496.06|1993-12-10|4-NOT SPECIFIED|Clerk#000000159|0|. furiously ironic asymptotes haggle ruthlessly silently regular r 2977|712|O|41482.00|1996-08-27|3-MEDIUM|Clerk#000000252|0|quickly special platelets are furio 2978|439|P|192591.22|1995-05-03|1-URGENT|Clerk#000000135|0|d. even platelets are. ironic dependencies cajole slow, e 2979|1330|O|159392.83|1996-03-23|3-MEDIUM|Clerk#000000820|0|even, ironic foxes sleep along 2980|38|O|223259.35|1996-09-14|3-MEDIUM|Clerk#000000661|0|y quick pinto beans wake. slyly re 2981|475|O|49188.07|1998-07-29|5-LOW|Clerk#000000299|0|hely among the express foxes. blithely stealthy requests cajole boldly. regu 2982|844|F|81058.61|1995-03-19|2-HIGH|Clerk#000000402|0|lyly. express theodolites affix slyly after the slyly speci 2983|613|F|83588.10|1992-01-07|1-URGENT|Clerk#000000278|0|r the even requests. accounts maintain. regular accounts 3008|394|O|214071.78|1995-11-08|3-MEDIUM|Clerk#000000701|0|ze quickly. blithely regular packages above the slyly bold foxes shall 3009|544|O|155804.55|1997-02-28|1-URGENT|Clerk#000000205|0|r ideas. carefully pe 3010|76|O|208692.43|1996-01-26|2-HIGH|Clerk#000000931|0| blithely final requests. special deposits are slyl 3011|898|F|54626.00|1992-01-14|5-LOW|Clerk#000000515|0|onic deposits kindle slyly. dependencies around the quickly iro 3012|314|F|144301.93|1993-05-05|1-URGENT|Clerk#000000414|0|ts after the regular pinto beans impress blithely s 3013|1423|O|227439.49|1997-02-05|5-LOW|Clerk#000000591|0|the furiously pendin 3014|289|F|254837.16|1992-10-30|4-NOT SPECIFIED|Clerk#000000476|0|ep blithely according to the blith 3015|1016|F|165794.93|1992-09-27|5-LOW|Clerk#000000013|0|ously regular deposits affix carefully. furiousl 3040|1108|F|159027.67|1993-04-12|3-MEDIUM|Clerk#000000544|0|carefully special packages. blithe 3041|1126|O|34175.78|1997-06-03|5-LOW|Clerk#000000092|0|s. unusual, pending deposits use carefully. thinly final 3042|199|F|112621.21|1994-11-21|3-MEDIUM|Clerk#000000573|0| the slyly ironic depo 3043|434|F|113399.95|1992-04-25|5-LOW|Clerk#000000137|0|cajole blithely furiously fina 3044|529|O|60086.93|1996-04-03|2-HIGH|Clerk#000000008|0|cajole final courts. ironic deposits about the quickly final re 3045|499|O|146623.24|1995-09-27|1-URGENT|Clerk#000000405|0| express courts sleep quickly special asymptotes. 3046|314|O|170565.91|1995-11-30|2-HIGH|Clerk#000000522|0|r deposits. platelets use furi 3047|247|O|39892.35|1997-03-21|1-URGENT|Clerk#000000962|0|as. slyly express deposits are dogged pearls. silent ide 3072|226|F|126150.96|1994-01-30|4-NOT SPECIFIED|Clerk#000000370|0|ely final deposits cajole carefully. ironic, re 3073|1354|F|207755.81|1994-01-08|3-MEDIUM|Clerk#000000404|0|kly slyly bold accounts. express courts near the regular ideas sleep bli 3074|658|F|111588.36|1992-11-01|5-LOW|Clerk#000000546|0|yly even asymptotes shall have to haggle fluffily. deposits are 3075|1252|F|41813.90|1994-05-07|3-MEDIUM|Clerk#000000433|0|ackages: carefully unusual reques 3076|914|F|129065.87|1993-07-23|2-HIGH|Clerk#000000099|0|busy foxes. deposits affix quickly ironic, pending pint 3077|1208|O|157557.18|1997-08-06|2-HIGH|Clerk#000000228|0|kly. fluffily ironic requests use qui 3078|473|F|65211.07|1993-02-12|2-HIGH|Clerk#000000110|0|ounts are alongside of the blith 3079|986|O|192511.27|1997-09-12|5-LOW|Clerk#000000505|0|lly ironic accounts 3104|689|F|148848.19|1993-09-16|3-MEDIUM|Clerk#000000871|0|ges boost-- regular accounts are furiousl 3105|1366|O|185113.31|1996-11-13|4-NOT SPECIFIED|Clerk#000000772|0|s. blithely final ins 3106|1445|O|192869.71|1997-01-12|3-MEDIUM|Clerk#000000729|0|its use slyly final theodolites; regular dolphins hang above t 3107|254|O|157543.64|1997-08-21|1-URGENT|Clerk#000000669|0|ously even deposits acr 3108|844|F|75046.18|1993-08-05|1-URGENT|Clerk#000000574|0|s packages haggle furiously am 3109|1222|F|296211.71|1993-07-24|5-LOW|Clerk#000000936|0|bold requests sleep quickly according to the slyly final 3110|874|F|148021.35|1994-12-17|2-HIGH|Clerk#000000564|0|round the fluffy instructions. carefully silent packages cajol 3111|1328|O|208880.19|1995-08-25|5-LOW|Clerk#000000922|0|slyly regular theodolites. furious deposits cajole deposits. ironic theodoli 3136|230|F|195429.40|1994-08-10|4-NOT SPECIFIED|Clerk#000000891|0|tructions sleep slyly. pending di 3137|1342|O|9264.77|1995-07-26|3-MEDIUM|Clerk#000000063|0|ymptotes wake carefully above t 3138|1390|F|210941.18|1994-02-09|4-NOT SPECIFIED|Clerk#000000650|0|e fluffily final theodolites. even dependencies wake along the quickly ir 3139|166|F|56421.93|1992-01-02|3-MEDIUM|Clerk#000000855|0|ounts against the ruthlessly unusual dolphins 3140|1450|F|68160.48|1992-04-09|1-URGENT|Clerk#000000670|0|carefully ironic deposits use furiously. blith 3141|253|O|161595.67|1995-11-10|1-URGENT|Clerk#000000475|0|es. furiously bold instructions after the carefully final p 3142|73|F|17194.09|1992-06-28|3-MEDIUM|Clerk#000000043|0|usual accounts about the carefully special requests sleep slyly quickly regul 3143|1069|F|221770.55|1993-02-17|1-URGENT|Clerk#000000519|0| are final, ironic accounts. ironic 3168|1358|F|101697.90|1992-01-30|5-LOW|Clerk#000000352|0|s sleep slyly? ironic, furious instructions detect. quickly final i 3169|181|F|184054.00|1993-12-21|3-MEDIUM|Clerk#000000252|0| even pinto beans are blithely special, special multip 3170|43|O|292168.10|1997-11-09|1-URGENT|Clerk#000000288|0|requests. furiously bold 3171|466|F|110707.33|1993-04-06|5-LOW|Clerk#000000940|0|ar deposits. idly r 3172|883|F|165727.78|1992-06-03|4-NOT SPECIFIED|Clerk#000000771|0|es. slyly ironic packages x-ra 3173|1462|O|105183.55|1996-08-10|5-LOW|Clerk#000000516|0|ial requests lose along t 3174|1264|O|152505.56|1995-11-15|5-LOW|Clerk#000000663|0|rts. silent, regular pinto beans are blithely regular packages. furiousl 3175|439|F|252261.28|1994-07-15|5-LOW|Clerk#000000629|0| across the slyly even realms use carefully ironic deposits: sl 3200|122|O|178687.59|1996-02-07|1-URGENT|Clerk#000000020|0| regular dependencies impress evenly even excuses. blithely 3201|952|F|100522.53|1993-07-02|4-NOT SPECIFIED|Clerk#000000738|0|. busy, express instruction 3202|868|F|72537.65|1992-12-24|5-LOW|Clerk#000000067|0|fluffily express requests affix carefully around th 3203|1259|O|71682.53|1997-11-05|2-HIGH|Clerk#000000493|0|e furiously silent warhorses. slyly silent deposits wake bli 3204|88|F|44713.55|1992-12-26|1-URGENT|Clerk#000000693|0|ess somas cajole slyly. pending accounts cajole 3205|1463|F|215063.16|1992-04-11|5-LOW|Clerk#000000803|0|e furiously. quickly regular dinos about the final pinto be 3206|1213|O|82248.70|1996-08-09|1-URGENT|Clerk#000000755|0|ntegrate furiously final, express 3207|205|O|184595.78|1998-02-16|1-URGENT|Clerk#000000695|0|uriously accounts. fluffily i 3232|809|F|66817.67|1992-10-09|1-URGENT|Clerk#000000314|0|yly final accounts. packages agains 3233|1397|F|70920.20|1994-10-24|5-LOW|Clerk#000000470|0|ly ironic epitaphs use stealthy, express deposits. quickly regular instruct 3234|139|O|218988.88|1996-04-05|4-NOT SPECIFIED|Clerk#000000367|0|ents according to the dependencies will sleep after the blithely even p 3235|448|O|160699.91|1995-11-15|5-LOW|Clerk#000000349|0| quickly pinto beans. ironi 3236|1405|O|42427.77|1996-11-06|4-NOT SPECIFIED|Clerk#000000553|0|ithely slyly pending req 3237|182|F|11628.02|1992-06-03|1-URGENT|Clerk#000000606|0|inal requests. slyly even foxes detect about the furiously exp 3238|610|F|64556.89|1993-02-21|5-LOW|Clerk#000000818|0|lly express deposits are. furiously unusual ideas wake carefully somas. instr 3239|343|O|219052.93|1998-01-12|4-NOT SPECIFIED|Clerk#000000619|0| cajole carefully along the furiously pending deposits. 3264|925|O|214961.96|1996-11-02|5-LOW|Clerk#000000244|0|carefully. express, bold 3265|523|F|68739.22|1992-06-27|1-URGENT|Clerk#000000265|0|re quickly quickly pe 3266|37|P|99432.16|1995-03-17|5-LOW|Clerk#000000545|0|refully ironic instructions. slyly final pi 3267|1120|O|54666.17|1997-01-07|5-LOW|Clerk#000000484|0| the packages. regular decoys about the bold dependencies grow fi 3268|1414|F|50963.54|1994-06-25|5-LOW|Clerk#000000746|0|y brave requests unwind furiously accordin 3269|169|O|331533.22|1996-03-01|3-MEDIUM|Clerk#000000378|0|ts. accounts wake carefully. carefully dogged accounts wake slyly slyly i 3270|379|O|240687.99|1997-05-28|1-URGENT|Clerk#000000375|0|uffily pending courts ca 3271|331|F|138053.06|1992-01-01|1-URGENT|Clerk#000000421|0|s. furiously regular requests 3296|1471|F|280148.00|1994-10-19|3-MEDIUM|Clerk#000000991|0|as! carefully final requests wake. furiously even 3297|1385|F|11553.32|1992-11-03|2-HIGH|Clerk#000000220|0| after the theodolites cajole carefully according to the finally 3298|1159|O|91070.97|1996-04-17|5-LOW|Clerk#000000241|0|even accounts boost 3299|892|F|68340.62|1993-12-26|3-MEDIUM|Clerk#000000853|0|bold deposits. special instructions sleep care 3300|1168|O|35168.76|1995-07-15|5-LOW|Clerk#000000198|0|ses. carefully unusual instructions must have to detect about the blithel 3301|1322|F|72199.05|1994-09-04|4-NOT SPECIFIED|Clerk#000000325|0|ular gifts impress enticingly carefully express deposits; instructions boo 3302|334|O|51365.63|1995-11-14|2-HIGH|Clerk#000000367|0|eep blithely ironic requests. quickly even courts haggle slyly 3303|1435|O|155557.55|1997-12-14|4-NOT SPECIFIED|Clerk#000000661|0|nto beans sleep furiously above the carefully ironic 3328|65|F|192264.80|1992-11-19|5-LOW|Clerk#000000384|0|ake among the express accounts? carefully ironic packages cajole never. 3329|34|O|55269.13|1995-07-03|2-HIGH|Clerk#000000236|0|old deposits. special accounts haggle furiousl 3330|65|F|51302.83|1994-12-19|1-URGENT|Clerk#000000124|0|kages use. carefully regular deposits cajole carefully about 3331|907|F|77649.29|1993-05-21|2-HIGH|Clerk#000000901|0|uffily carefully sly accounts. blithely unu 3332|1424|F|103092.90|1994-11-05|1-URGENT|Clerk#000000840|0|ans detect carefully furiously final deposits: regular accoun 3333|913|F|252419.80|1992-09-16|4-NOT SPECIFIED|Clerk#000000157|0|ctions boost slyly quickly even accounts. deposits along 3334|751|O|47161.56|1996-02-18|5-LOW|Clerk#000000532|0|ounts maintain carefully. furiously close request 3335|487|O|162808.07|1995-10-15|3-MEDIUM|Clerk#000000694|0| deposits poach. ironic ideas about the carefully ironi 3360|1030|O|266634.93|1998-01-23|5-LOW|Clerk#000000254|0| the deposits. fluffily bold requests cajole regula 3361|484|F|116078.71|1992-08-23|4-NOT SPECIFIED|Clerk#000000577|0|unts detect furiously instructions. slow deposi 3362|1399|O|256039.11|1995-07-29|5-LOW|Clerk#000000011|0|the quickly pending deposits. silent, ev 3363|512|O|114518.93|1995-09-23|2-HIGH|Clerk#000000615|0|posits. ironic, final deposits are furiously slyly pending 3364|448|O|157350.59|1997-06-21|1-URGENT|Clerk#000000280|0|y even foxes? blithely stea 3365|820|F|240272.44|1994-11-09|2-HIGH|Clerk#000000126|0|he slyly regular foxes nag about the accounts. fluffily 3366|508|O|17060.87|1997-05-18|1-URGENT|Clerk#000000160|0| pinto beans upon the quickly expres 3367|727|F|123759.09|1992-12-31|4-NOT SPECIFIED|Clerk#000000029|0|efully blithely ironic pinto beans. carefully close 3392|737|O|124060.27|1995-10-28|1-URGENT|Clerk#000000325|0|es thrash blithely depths. bold multipliers wake f 3393|973|O|270663.25|1995-07-04|2-HIGH|Clerk#000000076|0|even requests. excuses are carefully deposits. fluf 3394|1486|O|231035.67|1996-05-05|4-NOT SPECIFIED|Clerk#000000105|0| blithely among the attainments. carefully final accounts nag blit 3395|1496|F|180443.18|1994-10-30|4-NOT SPECIFIED|Clerk#000000682|0|ideas haggle beside the ev 3396|1492|F|277647.09|1994-05-21|3-MEDIUM|Clerk#000000868|0|uffily regular platelet 3397|1297|F|117210.56|1994-06-23|3-MEDIUM|Clerk#000000048|0|yly. final deposits wake f 3398|668|O|1744.64|1996-09-23|1-URGENT|Clerk#000000818|0|uthless, special courts atop the unusual accounts grow fur 3399|1220|P|70777.98|1995-02-28|4-NOT SPECIFIED|Clerk#000000575|0|the carefully sly accounts. regular, pending theodolites wa 3424|1013|O|67108.17|1996-08-21|1-URGENT|Clerk#000000190|0|ven requests are quickly pending accounts. blithely furious requests 3425|1150|O|230685.11|1996-03-31|4-NOT SPECIFIED|Clerk#000000188|0|ions. deposits nag blithely alongside of the carefully f 3426|521|O|118942.14|1996-10-16|3-MEDIUM|Clerk#000000283|0|alongside of the slyly 3427|28|O|180376.94|1997-05-29|4-NOT SPECIFIED|Clerk#000000404|0|y final pinto beans snooze fluffily bold asymptot 3428|85|O|103376.25|1996-04-07|5-LOW|Clerk#000000953|0|lar excuses. slyly pending ideas detect p 3429|1453|O|207631.78|1997-01-06|4-NOT SPECIFIED|Clerk#000000737|0|l deposits cajole furiously enticing deposits. blithe packages haggle careful 3430|1121|F|270267.01|1994-12-12|4-NOT SPECIFIED|Clerk#000000664|0| regular attainments are at the final foxes. final packages along the blithe 3431|463|F|71445.89|1993-08-22|1-URGENT|Clerk#000000439|0| sleep. slyly busy Tiresias a 3456|448|F|32601.42|1993-06-01|5-LOW|Clerk#000000924|0|es promise slyly. ironicall 3457|241|P|207252.29|1995-04-27|4-NOT SPECIFIED|Clerk#000000849|0|ely thin asymptotes. deposits kindle. pending 3458|946|F|197366.63|1994-12-22|2-HIGH|Clerk#000000392|0|rges snooze. slyly unusua 3459|1187|F|179504.40|1994-07-28|4-NOT SPECIFIED|Clerk#000000777|0|n instructions? carefully regular excuses are blithely. silent, ironi 3460|808|O|354036.76|1995-10-03|2-HIGH|Clerk#000000078|0|ans integrate carefu 3461|986|F|320694.21|1993-01-31|1-URGENT|Clerk#000000504|0|al, bold deposits cajole fluffily fluffily final foxes. pending ideas beli 3462|1328|O|86823.93|1997-05-17|3-MEDIUM|Clerk#000000657|0|uriously express asympto 3463|884|F|147059.62|1993-08-18|1-URGENT|Clerk#000000545|0|ding to the carefully ironic deposits 3488|1477|F|106165.56|1995-01-08|3-MEDIUM|Clerk#000000694|0|cording to the carefully regular deposits. re 3489|1072|F|86269.91|1993-07-29|3-MEDIUM|Clerk#000000307|0|s detect. carefully even platelets across the fur 3490|899|O|180759.57|1997-05-26|5-LOW|Clerk#000000703|0|gular ideas. furiously silent deposits across the unusual accounts boost i 3491|826|O|62878.19|1998-06-24|1-URGENT|Clerk#000000560|0|nic orbits believe carefully across the 3492|1019|F|193799.44|1994-11-24|5-LOW|Clerk#000000066|0|packages along the regular foxes lose final dependencie 3493|805|F|69650.05|1993-08-24|2-HIGH|Clerk#000000887|0|lyly special accounts use blithely across the furiously sil 3494|472|F|204991.65|1993-04-04|5-LOW|Clerk#000000559|0|r instructions haggle. accounts cajole. carefully final requests at the 3495|307|O|87879.06|1996-02-26|2-HIGH|Clerk#000000441|0|nticing excuses are carefully 3520|1250|O|217904.72|1997-08-04|1-URGENT|Clerk#000000023|0|hely. ideas nag; even, even fo 3521|68|F|207058.26|1992-10-26|5-LOW|Clerk#000000812|0|y even instructions cajole carefully above the bli 3522|256|F|228929.46|1994-09-26|5-LOW|Clerk#000000250|0|deposits-- slyly stealthy requests boost caref 3523|1499|O|179947.37|1998-04-07|2-HIGH|Clerk#000000688|0|are on the carefully even depe 3524|934|F|28748.76|1992-05-03|2-HIGH|Clerk#000000607|0|efully unusual tithes among the foxes use blithely daringly bold deposits. re 3525|1072|O|151567.42|1995-12-22|4-NOT SPECIFIED|Clerk#000000084|0|s nag among the blithely e 3526|559|F|70730.17|1995-03-16|5-LOW|Clerk#000000364|0|to the quickly special deposits print agai 3527|557|O|173421.31|1997-06-21|5-LOW|Clerk#000000874|0|regular ideas across the quickly bold theodo 3552|343|O|170016.99|1997-04-23|2-HIGH|Clerk#000000973|0| the ironic packages. furiously 3553|908|F|165037.95|1994-05-18|3-MEDIUM|Clerk#000000270|0|counts mold furiously. slyly i 3554|439|O|151890.41|1995-06-17|5-LOW|Clerk#000000931|0|hely ironic requests haggl 3555|455|O|178654.01|1996-07-07|5-LOW|Clerk#000000585|0|s nag carefully regular, even pinto be 3556|148|F|159831.74|1992-09-23|4-NOT SPECIFIED|Clerk#000000140|0|e. dependencies need to haggle alongs 3557|1192|F|116288.27|1992-11-09|2-HIGH|Clerk#000000291|0|ithely courts. furi 3558|265|O|162598.01|1996-02-29|1-URGENT|Clerk#000000841|0|around the furiously even requests. quickl 3559|1054|F|55757.49|1992-10-24|3-MEDIUM|Clerk#000000634|0|sly deposits. fluffily final ideas cajole careful 3584|118|O|101937.04|1997-08-11|1-URGENT|Clerk#000000760|0|fully bold packages. fluffily final braids haggle final, ironic dolphins. b 3585|1381|F|206416.27|1994-11-23|2-HIGH|Clerk#000000988|0|regular asymptotes. bold pains above the carefully pending asymptot 3586|1201|F|171227.96|1993-12-05|2-HIGH|Clerk#000000438|0|he quickly final courts. carefully regular requests nag unusua 3587|775|O|225119.62|1996-05-10|4-NOT SPECIFIED|Clerk#000000443|0|ular patterns detect 3588|1186|F|287191.29|1995-03-19|4-NOT SPECIFIED|Clerk#000000316|0|ong the pains. evenly unusual 3589|307|F|52846.72|1994-05-26|2-HIGH|Clerk#000000023|0|ithe deposits nag furiously. furiously pending packages sleep f 3590|1489|P|352720.59|1995-05-13|5-LOW|Clerk#000000986|0|lyly final deposits. 3591|1345|F|138408.31|1993-12-08|3-MEDIUM|Clerk#000000144|0|ual foxes haggle! unusual request 3616|1273|F|90467.25|1994-02-16|4-NOT SPECIFIED|Clerk#000000268|0|uickly about the quickly final requests. fluffily final packages wake evenly 3617|394|O|181875.51|1996-03-19|3-MEDIUM|Clerk#000000886|0|the carefully regular platelets ha 3618|98|O|185247.02|1997-12-13|3-MEDIUM|Clerk#000000894|0|. ideas run carefully. thin, pending 3619|1489|O|320560.24|1996-11-20|2-HIGH|Clerk#000000211|0|uests mold after the blithely ironic excuses. slyly pending pa 3620|440|O|90481.44|1997-03-07|5-LOW|Clerk#000000124|0|le quickly against the epitaphs. requests sleep slyly according to the 3621|1408|F|149743.36|1993-05-06|3-MEDIUM|Clerk#000000643|0|kly unusual deposits. qu 3622|901|O|174438.86|1995-11-27|5-LOW|Clerk#000000012|0|c deposits are fluffily about the blithely final theo 3623|29|O|253649.01|1996-12-26|1-URGENT|Clerk#000000184|0|- ironic excuses boost quickly in place 3648|1249|F|240597.67|1993-06-17|5-LOW|Clerk#000000717|0|foxes. unusual deposits boost quickly. slyly regular asymptotes across t 3649|388|F|202043.25|1994-07-06|5-LOW|Clerk#000000349|0|taphs boost above the final p 3650|445|F|280273.39|1992-05-28|4-NOT SPECIFIED|Clerk#000000454|0|kages sleep fluffily slyly 3651|986|O|125508.45|1998-04-27|1-URGENT|Clerk#000000222|0|ly unusual deposits thrash quickly after the ideas. 3652|1066|O|150080.43|1997-02-25|4-NOT SPECIFIED|Clerk#000000024|0|sly even requests after the 3653|383|F|215841.09|1994-03-27|1-URGENT|Clerk#000000402|0| pearls. bold accounts are along the ironic, 3654|56|F|326681.49|1992-06-03|5-LOW|Clerk#000000475|0|s cajole slyly carefully special theodolites. even deposits haggl 3655|487|F|111868.80|1992-10-06|1-URGENT|Clerk#000000815|0|er the carefully unusual deposits sleep quickly according to 3680|1267|F|172093.20|1992-12-10|4-NOT SPECIFIED|Clerk#000000793|0|ular platelets. carefully regular packages cajole blithely al 3681|517|F|34981.18|1992-04-04|1-URGENT|Clerk#000000566|0|. ironic deposits against the ironic, regular frets use pending plat 3682|317|O|93227.96|1997-01-22|2-HIGH|Clerk#000000001|0|es haggle carefully. decoys nag 3683|874|F|137933.64|1993-03-04|2-HIGH|Clerk#000000248|0|ze across the express foxes. carefully special acco 3684|229|F|109708.71|1993-07-20|2-HIGH|Clerk#000000835|0|bold accounts affix along the carefully ironic requ 3685|158|F|226866.11|1992-01-17|3-MEDIUM|Clerk#000000954|0| sleep fluffily special ide 3686|400|O|124290.79|1998-07-07|2-HIGH|Clerk#000000175|0|s. furiously final pinto beans poach carefully among 3687|422|F|127789.54|1993-02-03|1-URGENT|Clerk#000000585|0|gular accounts. slyly regular instructions can are final ide 3712|640|F|181818.96|1992-01-02|2-HIGH|Clerk#000000032|0| promise according 3713|1498|O|315464.25|1998-05-07|3-MEDIUM|Clerk#000000325|0|s haggle quickly. ironic, regular Tiresi 3714|392|O|115054.72|1998-05-01|3-MEDIUM|Clerk#000000595|0|nding accounts. ironic pinto beans wake slyly. furiously pendin 3715|644|O|88203.51|1996-03-18|1-URGENT|Clerk#000000463|0| always silent requests wake pinto beans. slyly pending foxes are aga 3716|415|O|202853.22|1997-08-19|4-NOT SPECIFIED|Clerk#000000748|0| pending ideas haggle. ironic, 3717|266|O|254701.10|1998-06-03|4-NOT SPECIFIED|Clerk#000000974|0|t the carefully even ideas use sp 3718|305|O|83545.86|1996-10-23|2-HIGH|Clerk#000000016|0|refully. furiously final packages use carefully slyly pending deposits! final, 3719|1180|O|194113.04|1997-02-16|2-HIGH|Clerk#000000034|0|, enticing accounts are blithely among the daringly final asymptotes. furious 3744|643|F|55886.68|1992-01-10|3-MEDIUM|Clerk#000000765|0|osits sublate about the regular requests. fluffily unusual accou 3745|1118|F|23657.43|1993-09-29|5-LOW|Clerk#000000181|0|ckages poach slyly against the foxes. slyly ironic instructi 3746|731|F|109530.26|1994-09-11|4-NOT SPECIFIED|Clerk#000000188|0|. express, special requests nag quic 3747|1492|O|258977.08|1996-08-20|1-URGENT|Clerk#000000226|0|refully across the final theodolites. carefully bold accounts cajol 3748|521|O|118550.25|1998-02-28|1-URGENT|Clerk#000000156|0|slyly special packages 3749|376|P|134685.89|1995-02-24|3-MEDIUM|Clerk#000000639|0|y regular instructions haggle blithel 3750|965|P|227479.98|1995-04-30|3-MEDIUM|Clerk#000000885|0|y. express, even packages wake after the ide 3751|91|F|296988.49|1994-04-27|4-NOT SPECIFIED|Clerk#000000925|0|sheaves. express, unusual t 3776|844|F|210295.67|1992-11-20|2-HIGH|Clerk#000000698|0|efully even platelets slee 3777|272|F|118933.43|1994-04-08|3-MEDIUM|Clerk#000000941|0| regular, special dolphins cajole enticingly ca 3778|1054|F|314918.09|1993-05-26|1-URGENT|Clerk#000000187|0| above the express requests. packages maintain fluffily according to 3779|733|O|43092.48|1997-01-05|4-NOT SPECIFIED|Clerk#000000670|0| against the deposits. quickly bold instructions x-ray. pending fox 3780|410|O|95100.73|1996-04-13|5-LOW|Clerk#000000967|0| around the brave, pendin 3781|1384|O|175615.29|1996-06-20|1-URGENT|Clerk#000000394|0|yly after the ruthless packages. pinto beans use slyly: never ironic dependenc 3782|650|O|170815.80|1996-08-24|1-URGENT|Clerk#000000121|0|counts are. pending, regular asym 3783|436|F|242713.60|1993-12-06|4-NOT SPECIFIED|Clerk#000000614|0| along the pinto beans. special packages use. regular theo 3808|778|F|320547.32|1994-04-24|1-URGENT|Clerk#000000717|0|odolites. blithely ironic cour 3809|1474|O|185067.55|1996-05-01|5-LOW|Clerk#000000646|0| regular excuses. even theodolites are fluffily according to t 3810|997|F|181248.66|1992-09-17|1-URGENT|Clerk#000000660|0|ters sleep across the carefully final 3811|799|O|215676.13|1998-04-16|3-MEDIUM|Clerk#000000290|0|sits wake slyly abo 3812|406|O|98698.11|1996-08-13|3-MEDIUM|Clerk#000000727|0|al, final requests cajole 3813|1459|O|102438.30|1998-06-29|1-URGENT|Clerk#000000531|0|g the furiously regular instructions 3814|1162|P|222423.89|1995-02-22|5-LOW|Clerk#000000669|0| the furiously pending theodo 3815|1039|O|18092.99|1997-08-26|1-URGENT|Clerk#000000249|0|es snooze carefully stealth 3840|985|O|270240.36|1998-07-17|4-NOT SPECIFIED|Clerk#000000713|0|yly slow theodolites. enticingly 3841|577|F|176161.65|1994-10-05|4-NOT SPECIFIED|Clerk#000000018|0| bold requests sleep quickly ironic packages. sometimes regular deposits nag 3842|269|F|184717.13|1992-04-09|5-LOW|Clerk#000000418|0|silent ideas. final deposits use furiously. blithely express excuses cajole fu 3843|85|O|35023.79|1997-01-04|4-NOT SPECIFIED|Clerk#000000693|0|eodolites; slyly unusual accounts nag boldly 3844|784|F|6858.13|1994-12-29|1-URGENT|Clerk#000000686|0|r dolphins. slyly ironic theodolites ag 3845|887|F|180711.26|1992-04-26|1-URGENT|Clerk#000000404|0|es among the pending, regular accounts sleep blithely blithely even de 3846|475|O|168131.67|1998-02-05|2-HIGH|Clerk#000000877|0|y alongside of the slyl 3847|337|F|11494.62|1993-03-12|5-LOW|Clerk#000000338|0|uriously even deposits. furiously pe 3872|1331|O|269197.14|1996-09-06|5-LOW|Clerk#000000943|0|counts boost slyly against the ironic platelets-- blithely p 3873|535|O|127910.27|1998-03-30|4-NOT SPECIFIED|Clerk#000000791|0|express deposits-- even ideas 3874|1186|F|85524.46|1993-06-09|3-MEDIUM|Clerk#000000208|0|ular asymptotes sleep blithely ironic ideas. blithel 3875|1177|O|93803.50|1997-09-10|1-URGENT|Clerk#000000587|0| solve among the fluffily even 3876|283|O|113879.01|1996-08-02|5-LOW|Clerk#000000708|0|into beans. blithely 3877|164|F|239783.44|1993-05-21|5-LOW|Clerk#000000652|0|foxes. thinly bold reques 3878|866|O|90013.77|1997-03-23|1-URGENT|Clerk#000000314|0|e carefully regular platelets. special, express dependencies slee 3879|1408|O|100898.76|1995-11-23|1-URGENT|Clerk#000000231|0|sts along the quickly ironic sentiments cajole carefully according to t 3904|1483|O|58126.35|1997-11-15|4-NOT SPECIFIED|Clerk#000000883|0|sits haggle furiously across the requests. theodolites ha 3905|211|F|55580.47|1993-12-21|4-NOT SPECIFIED|Clerk#000000573|0|usly even accounts lose quietly above the slyly express p 3906|443|F|208585.62|1992-05-28|3-MEDIUM|Clerk#000000867|0|ironic theodolites haggle blithely above the final re 3907|661|F|318521.56|1992-08-19|3-MEDIUM|Clerk#000000084|0|gular pinto beans sleep f 3908|412|F|100534.13|1993-03-09|3-MEDIUM|Clerk#000000490|0|ounts cajole. regularly 3909|206|O|133975.94|1998-07-27|1-URGENT|Clerk#000000980|0|nic, special theodolites sleep furiously! furiously 3910|629|O|70759.37|1996-08-26|3-MEDIUM|Clerk#000000270|0|ickly. furiously final packag 3911|89|P|45445.54|1995-03-17|4-NOT SPECIFIED|Clerk#000000818|0|he fluffily final forges haggle slyly according to the blithely 3936|314|O|255569.92|1996-11-07|2-HIGH|Clerk#000000200|0|iously express packages engage slyly fina 3937|935|O|254234.45|1997-11-30|4-NOT SPECIFIED|Clerk#000000189|0|ckages boost carefully blithely q 3938|298|F|65808.22|1993-03-03|1-URGENT|Clerk#000000199|0|. unusual, final foxes haggle 3939|682|O|12318.56|1996-01-11|5-LOW|Clerk#000000647|0|ly ruthlessly silent requests. blithely regular requests haggle blithely wh 3940|1495|O|180309.76|1996-02-14|5-LOW|Clerk#000000363|0|e above the ideas. quickly even dependencies along the blithely ir 3941|1354|O|114660.33|1996-08-29|2-HIGH|Clerk#000000503|0|gular theodolites integrate quickly 3942|754|F|56198.29|1993-06-28|4-NOT SPECIFIED|Clerk#000000608|0|eas cajole bold requests. idly silent instructions 3943|397|O|87171.39|1996-10-09|5-LOW|Clerk#000000482|0|se alongside of the final pinto beans. regular packages boost across the ca 3968|242|O|166197.28|1997-02-17|4-NOT SPECIFIED|Clerk#000000431|0| the slyly special accounts; 3969|512|O|269722.89|1997-05-14|2-HIGH|Clerk#000000731|0|uriously final dependencies slee 3970|751|F|185828.35|1992-03-27|3-MEDIUM|Clerk#000000190|0|luffily furiously regular deposits. blithely special requests cajole blithely 3971|1033|O|88799.11|1996-06-28|5-LOW|Clerk#000000287|0|alongside of the instructions ought to are 3972|1225|F|2750.56|1994-04-21|3-MEDIUM|Clerk#000000049|0|y regular requests haggle quickly. pending, express acco 3973|1018|F|112002.24|1992-03-24|4-NOT SPECIFIED|Clerk#000000114|0|somas according to the quickly even instructions wake fu 3974|931|O|74431.82|1996-03-05|4-NOT SPECIFIED|Clerk#000000938|0|deposits are furiously beneath the bl 3975|1175|O|57733.07|1995-04-11|3-MEDIUM|Clerk#000000016|0|ts. regular, regular Tiresias play furiously. ironi 4000|697|F|142251.94|1992-01-04|5-LOW|Clerk#000000339|0|le carefully closely even pinto beans. regular, ironic foxes against the 4001|1141|O|116208.23|1997-05-15|3-MEDIUM|Clerk#000000878|0|detect. asymptotes sleep furio 4002|1036|O|102892.70|1997-04-08|5-LOW|Clerk#000000097|0| regular braids are. furiously even patterns agains 4003|1102|F|26116.74|1993-01-27|1-URGENT|Clerk#000000177|0| blithe theodolites are slyly. slyly silent accounts toward 4004|683|F|302888.59|1993-05-07|3-MEDIUM|Clerk#000000273|0|accounts among the blithely regular sentiments 4005|1394|O|158486.49|1996-11-20|2-HIGH|Clerk#000000341|0|ily according to the slyly iron 4006|343|F|87327.23|1995-01-04|3-MEDIUM|Clerk#000000765|0|ly ironic packages integrate. regular requests alongside of 4007|80|F|143238.58|1993-06-18|2-HIGH|Clerk#000000623|0|ecial packages. slyly regular accounts integrate 4032|100|O|78049.89|1998-02-26|3-MEDIUM|Clerk#000000686|0|iresias sleep slyly regular ideas. quickly unusual 4033|830|F|68013.86|1993-06-02|5-LOW|Clerk#000000181|0|ously bold instructions haggle furiously above the fluf 4034|925|F|282204.87|1993-11-14|4-NOT SPECIFIED|Clerk#000000548|0|ts x-ray. express requests affix fluffily regular theodolites. pending, fina 4035|1165|F|35308.69|1992-02-19|5-LOW|Clerk#000000097|0|he ironic deposits sleep blith 4036|469|O|98412.41|1997-04-26|3-MEDIUM|Clerk#000000398|0|ly express deposits nag slyly. ironic, final asymptotes boost bra 4037|1210|F|57520.19|1993-03-24|2-HIGH|Clerk#000000384|0|t carefully above the unusual the 4038|937|O|223001.93|1996-01-06|1-URGENT|Clerk#000000272|0|re slyly. silent requests wake quickly. regular packages play quickly 4039|281|O|201017.09|1997-11-16|1-URGENT|Clerk#000000358|0|ly ironic deposits. ironic reques 4064|1288|O|231293.88|1996-10-10|4-NOT SPECIFIED|Clerk#000000598|0|ccounts. furiously unusual theodolites wake carefully about 4065|794|F|200609.19|1994-06-09|1-URGENT|Clerk#000000131|0|even foxes! slyly final deposits agai 4066|313|O|250572.93|1997-01-27|4-NOT SPECIFIED|Clerk#000000286|0|yly ironic dinos. quickly regular accounts haggle. requests wa 4067|145|F|232299.26|1992-10-07|2-HIGH|Clerk#000000027|0|tes boost furiously quick asymptotes. final deposits of the dolphins solv 4068|1250|O|86200.64|1996-09-18|3-MEDIUM|Clerk#000000203|0|lly even accounts wake furiously across the unusual platelets. unusu 4069|727|F|286148.59|1992-05-13|3-MEDIUM|Clerk#000000359|0|deposits: slyly bold ideas detect furiously. f 4070|284|O|133677.58|1995-06-12|2-HIGH|Clerk#000000713|0|xpress ideas poach ab 4071|1462|O|75727.31|1996-09-15|4-NOT SPECIFIED|Clerk#000000486|0|nal deposits. pending deposits d 4096|1378|F|100856.31|1992-07-03|4-NOT SPECIFIED|Clerk#000000706|0|sits. quickly thin deposits x-ray blith 4097|85|O|218589.15|1996-05-24|1-URGENT|Clerk#000000475|0|ickly under the even accounts. even packages after the furiously express 4098|221|O|83587.82|1996-11-05|4-NOT SPECIFIED|Clerk#000000491|0|otes. quickly final requests after the stealthily ironic pinto bean 4099|161|F|297452.57|1992-08-21|1-URGENT|Clerk#000000379|0|r platelets. slyly regular requests cajole carefully against the 4100|22|O|6545.02|1996-03-12|3-MEDIUM|Clerk#000000429|0|posits. carefully unusual packages use pending deposits. regular she 4101|1417|F|22280.28|1993-11-22|4-NOT SPECIFIED|Clerk#000000704|0|y around the express, careful epitaphs. accounts use fluffily. quickly p 4102|208|O|166220.63|1996-03-17|1-URGENT|Clerk#000000675|0|nding dependencies was slyly about the bl 4103|1048|F|64531.68|1992-07-03|5-LOW|Clerk#000000679|0|fully ironic dependencies. 4128|1390|O|9289.86|1995-10-07|4-NOT SPECIFIED|Clerk#000000635|0|ctions. dependencies from the slyly regular accounts nag slyly fu 4129|317|F|92828.14|1993-06-26|3-MEDIUM|Clerk#000000541|0|nwind. quickly final theodolites use packages. accounts 4130|1033|O|74653.84|1996-03-10|5-LOW|Clerk#000000609|0|omise alongside of the carefully final foxes. blithel 4131|436|O|174653.40|1998-01-30|1-URGENT|Clerk#000000612|0| above the foxes hang 4132|179|P|88197.43|1995-05-29|4-NOT SPECIFIED|Clerk#000000158|0|ld asymptotes solve alongside of the express, final packages. fluffily fi 4133|1001|F|39006.98|1992-08-07|4-NOT SPECIFIED|Clerk#000000268|0|al, express foxes. quickly pending deposits might cajole alongsi 4134|961|F|160154.58|1995-01-12|1-URGENT|Clerk#000000171|0|fully even deposits. regular de 4135|361|O|126584.08|1997-03-10|3-MEDIUM|Clerk#000000627|0|ly quietly even ideas. deposits haggle blithely 4160|542|O|110944.85|1996-08-20|5-LOW|Clerk#000000283|0|the carefully special accounts. furiously regular dugouts alongs 4161|1177|F|241960.17|1993-08-21|5-LOW|Clerk#000000047|0|nts. fluffily regular foxes above the quickly daring reques 4162|212|F|125408.74|1992-02-10|5-LOW|Clerk#000000179|0|r packages are slyly accounts. furiously special foxes detect carefully re 4163|628|F|15143.73|1992-12-21|2-HIGH|Clerk#000000268|0| integrate furiously slyly regular depende 4164|940|O|9375.68|1998-07-03|2-HIGH|Clerk#000000720|0| regularly busy theodolites boost furiously quickly bold packages. express, s 4165|29|O|15785.08|1997-07-25|3-MEDIUM|Clerk#000000621|0|special foxes affix never blithely ironic pinto beans; blithely 4166|413|F|145533.27|1993-02-28|5-LOW|Clerk#000000757|0|quickly sly forges impress. careful foxes across the blithely even a 4167|262|O|101266.76|1998-06-17|1-URGENT|Clerk#000000917|0|kly furiously even deposits. unu 4192|1453|O|256886.42|1998-04-19|1-URGENT|Clerk#000000369|0|equests above the slyly regular pinto beans unwi 4193|34|F|189411.15|1994-01-09|2-HIGH|Clerk#000000201|0|ng accounts haggle quickly. packages use fluffily ironic excu 4194|1058|F|102889.89|1994-10-16|3-MEDIUM|Clerk#000000385|0| instructions are quickly even pinto beans. courts boost furiously regular, ev 4195|1037|F|82719.34|1993-05-29|4-NOT SPECIFIED|Clerk#000000777|0| pinto beans cajole furiously theodolites-- slyly regular deposits doub 4196|1058|O|280403.32|1998-05-15|3-MEDIUM|Clerk#000000532|0|affix carefully. quickly final requests 4197|919|O|294678.85|1996-08-13|4-NOT SPECIFIED|Clerk#000000264|0| pinto beans according 4198|1429|O|135740.09|1997-06-16|3-MEDIUM|Clerk#000000583|0|g the special packages haggle pen 4199|41|F|40988.20|1992-02-13|1-URGENT|Clerk#000000309|0|e blithely. special deposits haggle slyly final foxes. carefully even 4224|689|O|237454.70|1997-07-14|1-URGENT|Clerk#000000034|0|jole quickly final dolphins. slyly pending foxes wake furiously bold pl 4225|1277|O|126532.76|1997-06-03|3-MEDIUM|Clerk#000000992|0|r the platelets nag among the special deposits. ironic, ironic re 4226|919|F|48595.07|1993-03-09|5-LOW|Clerk#000000203|0|phins wake slyly regular packages. deposits haggle slowl 4227|1318|F|127357.75|1995-02-24|1-URGENT|Clerk#000000063|0|ng the requests; ideas haggle fluffily. slyly unusual ideas c 4228|1100|O|27801.89|1997-03-28|5-LOW|Clerk#000000309|0|pecial requests aft 4229|133|O|115394.17|1998-03-03|1-URGENT|Clerk#000000301|0|p furiously: final excuses hagg 4230|1396|F|318270.36|1992-03-04|1-URGENT|Clerk#000000364|0|lly ironic deposits integrate carefully about the fu 4231|859|O|139085.25|1997-11-20|4-NOT SPECIFIED|Clerk#000000630|0|ly final accounts cajole furiously accounts. bravely ironic platelets am 4256|1178|F|30843.69|1992-04-05|4-NOT SPECIFIED|Clerk#000000043|0|y alongside of the fluffily iro 4257|163|P|49760.53|1995-03-25|3-MEDIUM|Clerk#000000682|0|r ideas cajole along the blithely regular gifts. 4258|916|O|184702.51|1996-10-27|4-NOT SPECIFIED|Clerk#000000364|0|efully final platelets around the blit 4259|1037|O|18170.62|1997-10-09|5-LOW|Clerk#000000781|0|es snooze slyly against the furiously unusual ideas. furious 4260|1418|F|22789.85|1992-05-16|4-NOT SPECIFIED|Clerk#000000919|0|e among the fluffily bold accounts. 4261|1174|F|114623.71|1992-10-03|1-URGENT|Clerk#000000662|0| about the even, pending packages. slyly bold deposits boost 4262|868|O|278346.38|1996-08-04|3-MEDIUM|Clerk#000000239|0| of the furious accounts. furiously regular accounts w 4263|25|O|200998.16|1998-03-16|1-URGENT|Clerk#000000265|0|sly ruthless deposits. final packages are instructions. fu 4288|340|F|94099.00|1992-12-04|4-NOT SPECIFIED|Clerk#000000823|0|usly carefully even theodolites: slyly express pac 4289|1246|F|35192.89|1993-10-07|3-MEDIUM|Clerk#000000912|0|e carefully close instructions. slyly special reques 4290|407|F|33841.66|1995-01-15|3-MEDIUM|Clerk#000000688|0| slyly quickly bold requests. final deposits haggle pending ideas! som 4291|881|F|81411.97|1993-11-29|3-MEDIUM|Clerk#000000655|0| sleep fluffily between the bold packages. bold 4292|244|F|177630.99|1992-01-09|3-MEDIUM|Clerk#000000794|0| ruthlessly. slyly bo 4293|1028|O|319796.01|1996-08-20|2-HIGH|Clerk#000000750|0|ly packages. regular packages nag according to t 4294|487|F|320119.78|1992-08-15|3-MEDIUM|Clerk#000000407|0|ng pinto beans breach. slyly express requests bo 4295|43|O|115056.47|1996-02-10|3-MEDIUM|Clerk#000000023|0|e boldly bold dependencies 4320|1144|O|101740.00|1996-12-08|4-NOT SPECIFIED|Clerk#000000223|0|ages haggle after the slowly bold se 4321|151|F|176680.44|1994-07-18|3-MEDIUM|Clerk#000000041|0|ending deposits are carefully carefully regular packa 4322|1412|O|206843.22|1998-03-13|3-MEDIUM|Clerk#000000433|0|totes nag across the fluffily special instructions. quickly silent hockey 4323|1036|F|27690.06|1994-01-23|2-HIGH|Clerk#000000282|0|lve after the slyly regular multipliers. even, regular excus 4324|713|O|264556.68|1995-07-17|1-URGENT|Clerk#000000800|0|ccounts. slyly stealthy requests shall have t 4325|1294|O|28574.21|1996-07-18|2-HIGH|Clerk#000000591|0|y around the always ev 4326|286|O|57111.99|1996-10-27|4-NOT SPECIFIED|Clerk#000000869|0|packages. carefully express deposit 4327|1456|P|174336.63|1995-03-16|2-HIGH|Clerk#000000571|0|yly pending braids. final requests abo 4352|136|O|17780.78|1997-11-26|2-HIGH|Clerk#000000620|0|ly final platelets integrate carefully even requ 4353|721|O|40333.36|1997-12-12|2-HIGH|Clerk#000000790|0|uickly even ideas cajole 4354|1436|F|245015.68|1994-09-30|4-NOT SPECIFIED|Clerk#000000046|0|pending notornis. requests serve 4355|28|O|231360.69|1996-11-16|1-URGENT|Clerk#000000362|0|ndencies use furiously across the regular 4356|961|F|66973.45|1994-04-11|5-LOW|Clerk#000000956|0| asymptotes sleep blithely. asymptotes sleep. blithely regul 4357|461|O|105145.58|1997-10-23|4-NOT SPECIFIED|Clerk#000000031|0|ages nag between the 4358|236|O|51989.52|1997-08-12|1-URGENT|Clerk#000000692|0|according to the fluffily special asymptotes 4359|142|F|163691.40|1993-03-03|1-URGENT|Clerk#000000393|0|sts. special, unusual deposits across the ironic theodo 4384|241|F|92211.63|1992-07-13|1-URGENT|Clerk#000000192|0|onic platelets. furiously regular asymptotes according to the special pac 4385|1217|O|39190.62|1996-08-06|2-HIGH|Clerk#000000597|0|ully final requests. ironic, even dolphins above the regular 4386|610|O|164103.41|1998-02-06|5-LOW|Clerk#000000070|0| dolphins. silent, idle pinto beans 4387|1091|O|153390.55|1995-10-23|1-URGENT|Clerk#000000025|0|ter the regular pinto beans. special, final gifts above the requests wi 4388|97|O|114701.49|1996-03-28|2-HIGH|Clerk#000000715|0|ts wake against the carefully final accounts. sly 4389|541|F|167744.85|1994-05-05|3-MEDIUM|Clerk#000000403|0|wly express excuses after the permanently even instructions are 4390|53|P|197545.62|1995-05-23|1-URGENT|Clerk#000000691|0|inal pinto beans. exp 4391|379|F|78494.42|1992-02-18|2-HIGH|Clerk#000000880|0|regular accounts. even depo 4416|1481|F|111380.89|1992-06-30|5-LOW|Clerk#000000391|0| deposits. ideas cajole express theodolites: 4417|670|O|109268.75|1998-07-09|1-URGENT|Clerk#000000365|0|ideas are alongside of the blithely final reque 4418|599|F|62464.13|1993-03-25|3-MEDIUM|Clerk#000000731|0|pecial pinto beans. close foxes affix iron 4419|1033|O|106015.25|1996-06-12|4-NOT SPECIFIED|Clerk#000000410|0|ages wake furiously slyly thin theodolit 4420|1090|F|6531.42|1994-06-18|1-URGENT|Clerk#000000706|0|lly bold deposits along the bold, pending foxes detect blithely after the acco 4421|85|O|401055.62|1997-04-04|3-MEDIUM|Clerk#000000246|0|t the pending warhorses. express waters a 4422|682|P|137446.09|1995-05-22|3-MEDIUM|Clerk#000000938|0|ly bold accounts sleep special, regular foxes. doggedly regular in 4423|635|F|6952.95|1995-02-17|5-LOW|Clerk#000000888|0|excuses are ruthless 4448|685|O|202506.72|1998-05-21|2-HIGH|Clerk#000000428|0|. deposits haggle around the silent packages; slyly unusual packages 4449|86|O|62361.39|1998-02-08|5-LOW|Clerk#000000035|0|ourts are carefully even deposits. pending 4450|1060|O|179497.92|1997-07-15|1-URGENT|Clerk#000000867|0|quests boost. furiously even realms are blithely bold requests. bl 4451|26|F|137267.09|1994-10-01|1-URGENT|Clerk#000000181|0|. carefully final foxes along the quickly express T 4452|122|F|65330.93|1994-06-21|5-LOW|Clerk#000000985|0|oxes are slyly. express, ironic pinto beans wake after the quickly pending re 4453|643|O|180326.79|1997-04-01|3-MEDIUM|Clerk#000000603|0|ages could have to nag slyly furiously even asymptotes! slowly regular 4454|1409|F|237490.06|1994-02-02|5-LOW|Clerk#000000411|0|uriously regular pint 4455|172|F|135599.42|1993-10-11|3-MEDIUM|Clerk#000000924|0|even requests. bravely regular foxes according to the carefully unusual 4480|832|F|27861.42|1994-03-31|4-NOT SPECIFIED|Clerk#000000534|0|press, bold deposits boost blit 4481|1468|O|108087.11|1996-03-30|5-LOW|Clerk#000000443|0|press sheaves cajole furio 4482|808|P|112095.89|1995-05-15|4-NOT SPECIFIED|Clerk#000000534|0|ravely bold accounts. furiously ironic instructions affix quickly. pend 4483|505|F|161222.40|1992-03-07|3-MEDIUM|Clerk#000000615|0|its. blithely idle accounts run; theodolites wake carefully around the fi 4484|1304|O|287136.09|1996-12-24|1-URGENT|Clerk#000000392|0|ct across the pinto beans. quickly pending excuses engage furiously. 4485|524|F|235145.28|1994-11-13|3-MEDIUM|Clerk#000000038|0|es wake slyly even packages. blithely brave requests nag above the regul 4486|367|O|215821.93|1998-03-03|2-HIGH|Clerk#000000656|0|ffily according to the carefully pending acc 4487|446|F|140035.87|1993-02-23|3-MEDIUM|Clerk#000000017|0|s up the never pending excuses wake furiously special pinto beans. furiously i 4512|683|O|193312.17|1995-10-25|5-LOW|Clerk#000000393|0|ending instructions maintain fu 4513|832|O|177644.71|1996-03-15|5-LOW|Clerk#000000154|0|ests. final, final ideas 4514|967|F|222536.94|1994-04-30|3-MEDIUM|Clerk#000000074|0|deposits according to the carefull 4515|1397|F|209521.83|1992-03-17|1-URGENT|Clerk#000000191|0|quests among the accounts sleep boldly about the regular f 4516|1294|F|53669.59|1994-03-29|3-MEDIUM|Clerk#000000739|0|ing packages sleep slyly regular attainments 4517|1129|O|66970.94|1998-03-07|4-NOT SPECIFIED|Clerk#000000231|0|uriously final deposits doze furiously furiously reg 4518|1246|O|35494.27|1997-05-01|3-MEDIUM|Clerk#000000187|0|luffily against the spec 4519|1343|F|110005.32|1993-03-30|4-NOT SPECIFIED|Clerk#000000938|0|ccording to the final 4544|1117|O|205337.52|1997-08-07|3-MEDIUM|Clerk#000000435|0|g dependencies dazzle slyly ironic somas. carefu 4545|586|F|191746.13|1993-01-17|4-NOT SPECIFIED|Clerk#000000303|0|ep. requests use sly 4546|418|O|55946.84|1995-07-29|5-LOW|Clerk#000000373|0|ns sleep. regular, regular instructions maintai 4547|1082|F|73135.94|1993-08-23|3-MEDIUM|Clerk#000000519|0|uctions thrash platelets. slyly final foxes wake slyly against th 4548|1265|O|181682.48|1996-06-28|5-LOW|Clerk#000000798|0| in place of the blithely express sentiments haggle slyly r 4549|638|O|61843.03|1998-03-05|4-NOT SPECIFIED|Clerk#000000965|0|ully even deposits dazzle. fluffily pending ideas against the requests 4550|1177|F|41533.95|1994-12-29|2-HIGH|Clerk#000000748|0|s haggle carefully acco 4551|1090|O|124682.71|1996-02-09|2-HIGH|Clerk#000000462|0|ts. slyly quick theodolite 4576|1381|O|87420.18|1996-08-14|5-LOW|Clerk#000000798|0|e pending deposits. 4577|785|O|165340.00|1998-05-02|5-LOW|Clerk#000000409|0|ly. unusual platelets are alw 4578|893|F|142392.37|1992-09-13|5-LOW|Clerk#000000121|0| to the furiously ironic instructions? furiou 4579|1042|O|125085.80|1995-12-01|2-HIGH|Clerk#000000951|0|its wake quickly blithely specia 4580|805|F|170320.34|1993-11-15|4-NOT SPECIFIED|Clerk#000000086|0|rs wake blithely regular requests. fluffily ev 4581|778|F|119792.47|1992-09-04|4-NOT SPECIFIED|Clerk#000000687|0|ges. carefully pending accounts use furiously abo 4582|184|O|30356.19|1996-07-04|1-URGENT|Clerk#000000638|0|g the furiously regular pac 4583|217|F|325377.79|1994-09-25|3-MEDIUM|Clerk#000000240|0|equests. slyly even platelets was qui 4608|796|F|237969.57|1994-06-17|1-URGENT|Clerk#000000259|0|y even instructions detect slyly asymptotes. blithely final packa 4609|1325|O|92624.01|1996-12-05|3-MEDIUM|Clerk#000000239|0|hang slyly slyly expre 4610|259|F|204812.77|1993-06-18|5-LOW|Clerk#000000616|0|e carefully express pinto 4611|286|F|263391.24|1993-01-10|2-HIGH|Clerk#000000152|0|. furiously regular instructions haggle dolphins. even instructions det 4612|604|F|105777.20|1993-09-20|3-MEDIUM|Clerk#000000397|0|bove the deposits. even deposits dazzle. slyly express packages haggle sl 4613|1315|O|265437.03|1998-03-05|3-MEDIUM|Clerk#000000541|0|furiously blithely pending dependen 4614|596|O|187916.73|1996-04-22|1-URGENT|Clerk#000000974|0| sauternes wake thinly special accounts. fur 4615|286|F|19198.41|1993-08-27|3-MEDIUM|Clerk#000000982|0|jole after the fluffily pending foxes. packages affix carefully acco 4640|955|O|110296.53|1996-01-01|5-LOW|Clerk#000000902|0|requests. deposits do detect above the blithely iron 4641|1334|F|165285.20|1993-01-20|4-NOT SPECIFIED|Clerk#000000755|0|ronic, final requests integrate slyly: specia 4642|1474|F|187642.74|1995-02-27|1-URGENT|Clerk#000000295|0|cial requests wake carefully around the regular, unusual ideas. furi 4643|667|O|84614.35|1995-06-30|2-HIGH|Clerk#000000292|0|ously regular packages. unusual, special platel 4644|935|O|127086.51|1998-01-17|5-LOW|Clerk#000000961|0|requests. fluffily even ideas bo 4645|434|F|329585.38|1994-09-20|1-URGENT|Clerk#000000764|0|fully even instructions. final gifts sublate quickly final requests. bl 4646|826|O|179979.78|1996-06-18|1-URGENT|Clerk#000000036|0|n place of the blithely qu 4647|265|F|147711.77|1994-05-14|3-MEDIUM|Clerk#000000626|0|out the deposits. slyly final pinto beans haggle idly. slyly s 4672|788|O|303147.86|1995-11-07|1-URGENT|Clerk#000000475|0|lyly final dependencies caj 4673|820|O|56339.98|1996-08-13|4-NOT SPECIFIED|Clerk#000000914|0|c deposits are slyly. bravely ironic deposits cajole carefully after the 4674|364|F|160248.90|1994-04-19|1-URGENT|Clerk#000000122|0|careful hockey players. carefully pending deposits caj 4675|860|F|100736.18|1993-11-25|4-NOT SPECIFIED|Clerk#000000741|0|al deposits haggle slyly final 4676|133|O|221378.75|1995-09-01|2-HIGH|Clerk#000000407|0|s. slyly bold accounts sleep furiously special 4677|382|O|29459.53|1998-02-21|3-MEDIUM|Clerk#000000245|0|ly pending deposits after the carefully regular foxes sleep blithely after t 4678|866|O|191622.17|1998-08-02|4-NOT SPECIFIED|Clerk#000000175|0|side of the bold platelets detect slyly blithely ironic e 4679|865|F|11866.57|1993-01-20|2-HIGH|Clerk#000000905|0|ely regular accounts affix slyly. final dolphins are. furiously final de 4704|13|O|101152.73|1996-08-16|4-NOT SPECIFIED|Clerk#000000256|0|lithely final requests about the fluffily regular 4705|979|F|248055.57|1992-03-22|4-NOT SPECIFIED|Clerk#000000522|0| special instructions poa 4706|238|F|144370.16|1992-12-29|4-NOT SPECIFIED|Clerk#000000722|0| packages above the never regular packages nag packages. deposits c 4707|893|F|74828.14|1995-02-27|2-HIGH|Clerk#000000943|0|ully enticing accounts behind the regular 4708|832|F|96314.41|1994-10-01|1-URGENT|Clerk#000000383|0|ly thinly even accounts. unusu 4709|256|O|69810.47|1996-01-08|3-MEDIUM|Clerk#000000785|0|he furiously even deposits! ironic theodolites haggle blithely. r 4710|985|F|120729.41|1994-12-08|4-NOT SPECIFIED|Clerk#000000734|0|the final, regular foxes. carefully ironic pattern 4711|1417|O|162618.22|1998-05-06|1-URGENT|Clerk#000000818|0|mptotes. unusual packages wake furiously qui 4736|1378|O|88659.44|1995-11-20|2-HIGH|Clerk#000000563|0|blithely regular courts affix into the carefully ironic deposits. slyly exp 4737|787|F|102138.81|1993-03-11|4-NOT SPECIFIED|Clerk#000000275|0|ents use slyly among the unusual, ironic pearls. furiously pending 4738|43|F|248132.99|1992-04-08|2-HIGH|Clerk#000000150|0|deposits. thin acco 4739|1472|F|116385.48|1993-02-21|5-LOW|Clerk#000000872|0|ing to the pending attainments: pending, express account 4740|677|O|51361.39|1996-07-05|2-HIGH|Clerk#000000420|0| dependencies haggle about the 4741|1270|F|261434.58|1992-07-07|4-NOT SPECIFIED|Clerk#000000983|0|ly bold deposits are slyly about the r 4742|637|P|249116.09|1995-03-23|3-MEDIUM|Clerk#000000058|0|n packages. quickly regular ideas cajole blithely 4743|967|F|98430.47|1993-03-31|5-LOW|Clerk#000000048|0|pinto beans above the bold, even idea 4768|1351|F|6485.65|1993-11-22|2-HIGH|Clerk#000000875|0|ctions snooze idly beneath the quick waters. fluffily u 4769|1205|P|202053.23|1995-04-14|4-NOT SPECIFIED|Clerk#000000116|0|pon the asymptotes. idle, final account 4770|589|O|96758.59|1995-06-20|2-HIGH|Clerk#000000461|0|cial instructions believe carefully. 4771|943|F|58304.87|1992-12-14|1-URGENT|Clerk#000000571|0|lly express deposits serve furiously along the f 4772|277|F|106724.51|1994-09-14|1-URGENT|Clerk#000000708|0|es sleep. regular requests haggle furiously slyly 4773|1213|O|278107.39|1995-12-23|1-URGENT|Clerk#000000327|0|ptotes was slyly along the 4774|508|F|186430.13|1993-04-20|3-MEDIUM|Clerk#000000299|0|eposits use blithely bold deposits. carefully regular gifts about the fin 4775|1273|O|141549.59|1995-08-13|4-NOT SPECIFIED|Clerk#000000609|0|s integrate slyly slyly final instructions. carefully bold pack 4800|352|F|135654.46|1992-01-06|5-LOW|Clerk#000000625|0|ggle furiously along the pending pinto beans. deposits use: final foxe 4801|874|O|168339.96|1996-01-25|1-URGENT|Clerk#000000553|0|r the final sentiments. pending theodolites sleep doggedly across t 4802|1288|O|8238.68|1997-01-23|3-MEDIUM|Clerk#000000400|0| ironic, thin packages wake furiously ironic, ironic deposits. the 4803|1234|O|249267.51|1996-02-08|5-LOW|Clerk#000000892|0|lly unusual courts are ironic 4804|358|F|148287.00|1992-01-28|2-HIGH|Clerk#000000614|0|ly final accounts. blithely unusual theodolite 4805|149|F|238156.01|1992-04-25|4-NOT SPECIFIED|Clerk#000000514|0|even accounts wake furiously slyly final accounts; blithel 4806|53|F|44638.95|1993-04-21|5-LOW|Clerk#000000625|0|ave accounts. furiously pending wa 4807|526|O|184759.45|1997-01-09|3-MEDIUM|Clerk#000000310|0|kly. slyly special accounts 4832|340|O|106440.70|1997-12-04|3-MEDIUM|Clerk#000000548|0|final accounts sleep among the blithe 4833|1319|O|94940.36|1996-05-12|3-MEDIUM|Clerk#000000256|0|r deposits against the slyly final excuses slee 4834|178|O|175765.87|1996-09-12|2-HIGH|Clerk#000000284|0|lar accounts. furiously ironic accounts haggle slyly 4835|1451|F|102173.80|1994-10-25|1-URGENT|Clerk#000000250|0|s integrate furiously blithely expr 4836|643|O|110667.95|1996-12-18|1-URGENT|Clerk#000000691|0|c packages cajole carefully through the accounts. careful 4837|1282|O|109203.24|1998-04-24|4-NOT SPECIFIED|Clerk#000000517|0|n accounts are regular, bold accounts. even instructions use request 4838|436|F|77558.81|1992-08-02|1-URGENT|Clerk#000000569|0|ffily bold sentiments. carefully close dolphins cajole across the 4839|244|F|107759.92|1994-05-10|1-URGENT|Clerk#000000925|0| even somas. slyly express ideas lose carefully. blithely unusu 4864|863|F|192178.70|1992-11-11|5-LOW|Clerk#000000423|0|ests nag within the quickly ironic asymptotes. ironic 4865|839|O|250988.92|1997-06-07|3-MEDIUM|Clerk#000000418|0|sits boost stealthily above the bl 4866|530|O|29343.32|1997-08-07|2-HIGH|Clerk#000000663|0|kages. unusual packages nag fluffily. qui 4867|82|F|15931.91|1992-05-21|1-URGENT|Clerk#000000891|0|ss the slyly regular dependencies. fluffily regular deposits within the car 4868|745|O|253228.69|1997-03-02|5-LOW|Clerk#000000729|0|regular asymptotes. regular packages sublate carefully al 4869|568|F|232130.29|1994-09-26|5-LOW|Clerk#000000802|0|boost! ironic packages un 4870|1016|F|140038.23|1994-08-06|3-MEDIUM|Clerk#000000911|0|nto beans about the blithely regular d 4871|448|O|172274.73|1995-06-12|1-URGENT|Clerk#000000531|0|ven, special instructions across t 4896|850|F|131264.74|1992-08-22|1-URGENT|Clerk#000000622|0|sly pending deposits. final accounts boost above the sly, even 4897|800|F|155933.30|1992-09-17|5-LOW|Clerk#000000184|0|s. bold pinto beans sleep. evenly final accounts daz 4898|137|F|67478.88|1994-07-11|4-NOT SPECIFIED|Clerk#000000841|0|final patterns. special theodolites haggle ruthlessly at the blithely spec 4899|601|F|16204.30|1993-10-18|4-NOT SPECIFIED|Clerk#000000348|0| instructions. furiously even packages are furiously speci 4900|1361|F|241094.13|1992-06-30|4-NOT SPECIFIED|Clerk#000000878|0|sleep quickly unusual 4901|790|O|178383.88|1997-12-31|4-NOT SPECIFIED|Clerk#000000980|0|inal dependencies cajole furiously. carefully express accounts na 4902|1372|O|44050.73|1998-07-04|3-MEDIUM|Clerk#000000874|0| the slyly express dolphins. 4903|920|F|39360.53|1992-03-22|4-NOT SPECIFIED|Clerk#000000907|0|yly. multipliers within the fo 4928|22|F|92223.13|1993-10-04|4-NOT SPECIFIED|Clerk#000000952|0|slyly brave instructions after the ironic excuses haggle ruthlessly about 4929|1492|O|198278.90|1996-02-29|3-MEDIUM|Clerk#000000109|0|uests. furiously special ideas poach. pending 4930|1483|F|264157.22|1994-05-06|5-LOW|Clerk#000000593|0| haggle slyly quietly final theodolites. packages are furious 4931|496|F|170930.79|1994-11-17|1-URGENT|Clerk#000000356|0|leep. slyly express dolphins nag slyly. furiously regular s 4932|1219|F|60768.93|1993-08-10|1-URGENT|Clerk#000000830|0|onic foxes. enticingly reg 4933|925|O|56755.81|1995-07-14|3-MEDIUM|Clerk#000000848|0|y special sauternes integr 4934|382|O|254375.72|1997-02-17|1-URGENT|Clerk#000000372|0|nes cajole; carefully special accounts haggle. special pinto beans nag 4935|400|F|225832.93|1993-05-25|4-NOT SPECIFIED|Clerk#000000601|0|c foxes. fluffily pendin 4960|1228|F|204893.89|1995-02-26|5-LOW|Clerk#000000229|0|uriously even excuses. fluffily regular instructions along the furiously ironi 4961|574|O|131816.39|1998-04-06|3-MEDIUM|Clerk#000000731|0| braids. furiously even theodolites 4962|1037|F|52829.69|1993-07-28|3-MEDIUM|Clerk#000000008|0| breach never ironic 4963|329|O|83635.70|1996-11-07|3-MEDIUM|Clerk#000000754|0|ully unusual epitaphs nod s 4964|1003|O|285003.26|1997-07-28|4-NOT SPECIFIED|Clerk#000000144|0|ithely final theodolites. blithely regu 4965|518|F|123088.54|1993-10-21|5-LOW|Clerk#000000638|0|dependencies poach packages. sometim 4966|688|O|80899.69|1996-09-07|2-HIGH|Clerk#000000243|0|accounts. blithely ironic courts wake boldly furiously express 4967|976|O|161892.72|1997-02-17|3-MEDIUM|Clerk#000000397|0|e theodolites; furiously b 4992|613|F|291161.83|1992-05-10|1-URGENT|Clerk#000000166|0|telets nag carefully am 4993|112|F|194811.83|1994-08-04|4-NOT SPECIFIED|Clerk#000000258|0|ing instructions nag furiously. un 4994|425|O|315464.22|1996-06-29|4-NOT SPECIFIED|Clerk#000000868|0|oxes wake above the asymptotes. bold requests sleep br 4995|388|O|259750.37|1996-01-06|4-NOT SPECIFIED|Clerk#000000748|0|s. even deposits boost along the express, even theodolites. stealthily ir 4996|1330|F|139500.09|1992-09-14|3-MEDIUM|Clerk#000000433|0|foxes. carefully special packages haggle quickly fluffi 4997|463|O|179285.35|1998-03-18|5-LOW|Clerk#000000040|0|egrate final pinto beans. fluffily special notornis use blith 4998|319|F|196035.93|1992-01-11|4-NOT SPECIFIED|Clerk#000000054|0|alongside of the quickly final requests hang always 4999|844|F|135073.82|1993-06-26|2-HIGH|Clerk#000000504|0| dolphins cajole blithely above the sly 5024|1222|O|148461.38|1996-10-25|3-MEDIUM|Clerk#000000659|0|r foxes. regular excuses are about the quickly regular theodolites. regular, 5025|1198|O|29882.15|1997-02-03|5-LOW|Clerk#000000805|0|ackages are slyly about the quickly 5026|271|O|24577.34|1997-09-06|1-URGENT|Clerk#000000955|0|y final requests us 5027|1463|O|273265.90|1997-08-30|2-HIGH|Clerk#000000751|0|e-- final, pending requests along t 5028|116|F|44919.14|1992-04-17|2-HIGH|Clerk#000000180|0|ickly blithely express deposits. b 5029|104|F|28069.92|1992-11-14|3-MEDIUM|Clerk#000000469|0|. regular accounts haggle slyly. regul 5030|1057|O|106018.58|1998-05-25|4-NOT SPECIFIED|Clerk#000000564|0| wake slyly furiously thin requests. ironic pinto beans ha 5031|1381|F|135672.76|1994-12-02|3-MEDIUM|Clerk#000000788|0|lar instructions haggle blithely pending foxes? sometimes final excuses h 5056|508|O|108095.49|1997-02-15|5-LOW|Clerk#000000828|0|lithely above the express ideas. blithely final deposits are fluffily spec 5057|640|O|91744.27|1997-08-03|1-URGENT|Clerk#000000955|0|r ironic requests of the carefully ironic dependencies wake slyly a 5058|1190|O|28461.94|1998-03-23|1-URGENT|Clerk#000000367|0| the pending packages wake after the quickly speci 5059|413|F|102772.50|1993-11-10|2-HIGH|Clerk#000000058|0|latelets. final, regular accounts cajole furiously ironic pinto beans? do 5060|1111|F|85197.05|1992-07-07|4-NOT SPECIFIED|Clerk#000000333|0|e according to the excuses. express theodo 5061|1006|F|72555.51|1993-08-14|1-URGENT|Clerk#000000009|0|e packages use fluffily according to the carefully ironic deposits. bol 5062|598|F|157769.50|1992-10-08|3-MEDIUM|Clerk#000000012|0|ithely. blithely bold theodolites affix. blithely final deposits haggle ac 5063|224|O|133623.15|1997-05-17|2-HIGH|Clerk#000000745|0|lyly after the pending foxes. express theodolites breach across t 5088|1294|F|160391.21|1993-01-06|5-LOW|Clerk#000000930|0|ole slyly since the quickly ironic br 5089|1295|F|137114.54|1992-07-29|1-URGENT|Clerk#000000677|0|cial platelets. quiet, final ideas cajole carefully. unusu 5090|881|O|174101.49|1997-03-09|1-URGENT|Clerk#000000953|0|ress accounts affix silently carefully quick accounts. carefully f 5091|1477|O|81938.11|1998-05-21|3-MEDIUM|Clerk#000000311|0|egular decoys mold carefully fluffily unus 5092|206|O|288758.19|1995-10-30|5-LOW|Clerk#000000194|0|are blithely along the pin 5093|776|F|249888.04|1993-09-03|3-MEDIUM|Clerk#000000802|0|ully ironic theodolites sleep above the furiously ruthless instructions. bli 5094|1051|F|103588.23|1993-03-29|4-NOT SPECIFIED|Clerk#000000406|0|uickly pending deposits haggle quickly ide 5095|952|F|275920.02|1992-04-22|2-HIGH|Clerk#000000964|0|accounts are carefully! slyly even packages wake slyly a 5120|155|O|33217.93|1996-06-05|1-URGENT|Clerk#000000332|0|against the slyly express requests. furiousl 5121|1328|F|229899.02|1992-05-11|4-NOT SPECIFIED|Clerk#000000736|0|gular requests. furiously final pearls against the permanent, thin courts s 5122|688|O|131621.44|1996-02-10|5-LOW|Clerk#000000780|0|blithely. slyly ironic deposits nag. excuses s 5123|97|O|14822.35|1998-02-10|1-URGENT|Clerk#000000776|0|ic requests. furiously ironic packages grow above the express, ironic inst 5124|238|O|209658.77|1997-04-04|4-NOT SPECIFIED|Clerk#000000749|0|kly even courts. bold packages solve. 5125|274|O|41954.76|1998-02-07|5-LOW|Clerk#000000834|0|ructions. dolphins wake slowly carefully unusual 5126|1120|F|110348.46|1992-10-12|4-NOT SPECIFIED|Clerk#000000270|0|s. unusual deposits 5127|722|O|59050.89|1997-01-15|5-LOW|Clerk#000000829|0|fully express pinto beans. slyly final accounts along the ironic dugouts use s 5152|433|O|70131.25|1997-01-04|3-MEDIUM|Clerk#000000963|0| for the blithely reg 5153|1124|O|256816.81|1995-08-26|1-URGENT|Clerk#000000954|0| the furiously ironic foxes. express packages shall cajole carefully across 5154|79|O|40583.07|1997-04-13|3-MEDIUM|Clerk#000000316|0|inal requests. slyly regular deposits nag. even deposits haggle agains 5155|763|F|99230.28|1994-06-12|2-HIGH|Clerk#000000108|0|y pending deposits are ag 5156|1249|O|72450.53|1996-11-04|5-LOW|Clerk#000000117|0|ngside of the multipliers solve slyly requests. regu 5157|1412|O|235193.85|1997-07-06|4-NOT SPECIFIED|Clerk#000000689|0|closely above the unusual deposits. furiously 5158|755|O|349464.54|1997-01-21|1-URGENT|Clerk#000000541|0| regular foxes. even foxes wake blithely 5159|1048|O|195720.25|1996-09-25|1-URGENT|Clerk#000000303|0|tegrate slyly around the slyly sly sauternes. final pa 5184|850|O|311131.08|1998-07-20|5-LOW|Clerk#000000250|0|nding accounts detect final, even 5185|1477|O|316890.58|1997-07-25|3-MEDIUM|Clerk#000000195|0| regular ideas about the even ex 5186|520|O|345577.74|1996-08-03|1-URGENT|Clerk#000000332|0|pecial platelets. slyly final ac 5187|538|O|52047.87|1997-07-16|3-MEDIUM|Clerk#000000682|0|ckly according to t 5188|1400|P|96653.92|1995-03-02|4-NOT SPECIFIED|Clerk#000000029|0|counts. finally ironic requests ab 5189|704|F|247112.62|1993-11-26|5-LOW|Clerk#000000940|0|e after the pending accounts. asymptotes boost. re 5190|580|F|146966.67|1992-04-26|5-LOW|Clerk#000000888|0|equests. slyly unusual 5191|770|F|156365.18|1994-12-11|4-NOT SPECIFIED|Clerk#000000318|0|ing, regular deposits alongside of the deposits boost fluffily quickly ev 5216|587|O|27361.79|1997-08-14|3-MEDIUM|Clerk#000000418|0|des boost across the platelets. slyly busy theodolit 5217|349|O|204315.50|1995-10-13|2-HIGH|Clerk#000000873|0|ons might wake quickly according to th 5218|802|F|109453.09|1992-07-30|4-NOT SPECIFIED|Clerk#000000683|0|y ruthless packages according to the bold, ironic package 5219|875|O|22976.04|1997-02-27|1-URGENT|Clerk#000000510|0|aggle always. foxes above the ironic deposits 5220|95|F|43690.73|1992-07-30|2-HIGH|Clerk#000000051|0| final packages. ideas detect slyly around 5221|125|O|82717.41|1995-06-09|4-NOT SPECIFIED|Clerk#000000324|0|lar accounts above the sl 5222|797|F|1406.50|1994-05-27|4-NOT SPECIFIED|Clerk#000000613|0|along the bold ideas. furiously final foxes snoo 5223|1486|F|125739.86|1994-06-30|1-URGENT|Clerk#000000745|0|e. theodolites serve blithely unusual, final foxes. carefully pending packag 5248|697|P|125759.30|1995-04-15|2-HIGH|Clerk#000000737|0|theodolites cajole according to the silent packages. quickly ironic packages a 5249|1019|F|166411.87|1994-09-06|3-MEDIUM|Clerk#000000019|0|refully bold accounts 5250|956|O|48941.36|1995-07-16|2-HIGH|Clerk#000000307|0|. carefully final instructions sleep among the finally regular dependen 5251|340|O|42259.44|1995-04-12|3-MEDIUM|Clerk#000000687|0| ironic dugouts detect. reque 5252|898|O|247519.93|1996-02-17|1-URGENT|Clerk#000000724|0| ironic accounts among the silent asym 5253|1480|P|144724.33|1995-04-11|2-HIGH|Clerk#000000275|0|egular requests! blithely regular deposits alongside of t 5254|1105|F|265744.68|1992-07-26|4-NOT SPECIFIED|Clerk#000000527|0|he express, even ideas cajole blithely special requests 5255|634|O|98826.70|1996-07-12|5-LOW|Clerk#000000591|0|ly slow forges. express foxes haggle. regular, even asymp 5280|329|O|110094.95|1997-12-03|3-MEDIUM|Clerk#000000604|0|riously ironic instructions. ironic ideas according to the accounts boost fur 5281|1240|O|232387.44|1995-11-02|2-HIGH|Clerk#000000158|0|ackages haggle slyly a 5282|499|O|126638.39|1998-01-30|1-URGENT|Clerk#000000030|0|rding to the unusual, bold accounts. regular instructions 5283|1309|F|19969.25|1994-06-04|3-MEDIUM|Clerk#000000579|0|ests. even, final ideas alongside of t 5284|608|O|58961.47|1995-07-09|4-NOT SPECIFIED|Clerk#000000155|0| careful dependencies use sly 5285|700|F|142725.60|1994-01-18|2-HIGH|Clerk#000000976|0|p across the furiously ironic deposits. 5286|1156|O|119200.31|1997-09-26|5-LOW|Clerk#000000606|0|structions are furiously quickly ironic asymptotes. quickly iro 5287|250|F|41064.04|1993-12-22|5-LOW|Clerk#000000406|0|regular packages. bold instructions sleep always. carefully final p 5312|649|F|82451.06|1995-02-24|2-HIGH|Clerk#000000690|0|ter the even, bold foxe 5313|130|O|199395.07|1997-06-17|4-NOT SPECIFIED|Clerk#000000896|0|le. final courts haggle furiously according to the 5314|331|O|29612.61|1995-06-02|2-HIGH|Clerk#000000617|0|ions across the quickly special d 5315|1387|F|84399.32|1992-10-29|4-NOT SPECIFIED|Clerk#000000035|0| furiously. quickly unusual packages use. sly 5316|997|F|68793.55|1994-01-31|1-URGENT|Clerk#000000734|0| requests haggle across the regular, pending deposits. furiously regular requ 5317|367|F|322782.46|1994-09-09|5-LOW|Clerk#000000687|0|jole quickly at the slyly pend 5318|590|F|140892.20|1993-04-04|2-HIGH|Clerk#000000663|0|efully regular dolphins. even ideas nag fluffily furiously even packa 5319|979|O|94253.83|1996-01-21|1-URGENT|Clerk#000000237|0|lent requests. quickly pe 5344|1072|O|139337.42|1998-06-21|3-MEDIUM|Clerk#000000569|0|s. ironic excuses cajole across the 5345|304|O|154585.90|1997-08-24|1-URGENT|Clerk#000000057|0|r the slyly silent packages. pending, even pinto b 5346|367|F|213859.93|1993-12-26|2-HIGH|Clerk#000000220|0|gly close packages against the even, regular escapades boost evenly accordi 5347|478|F|237559.02|1995-02-22|3-MEDIUM|Clerk#000000180|0|onic, regular deposits. packag 5348|524|O|151375.82|1997-11-08|5-LOW|Clerk#000000497|0|totes. accounts after the furiously 5349|670|O|52011.10|1996-09-01|1-URGENT|Clerk#000000960|0|le along the carefully bold dolphins. carefully special packa 5350|745|F|159518.11|1993-10-10|5-LOW|Clerk#000000604|0|ccounts after the carefully pending requests believe 5351|1213|O|91753.68|1998-05-11|1-URGENT|Clerk#000000443|0|to beans sleep furiously after the carefully even 5376|1486|F|166038.52|1994-07-04|5-LOW|Clerk#000000392|0|. quickly ironic deposits integrate along 5377|623|O|162417.75|1997-04-24|2-HIGH|Clerk#000000917|0|ons nag blithely furiously regula 5378|412|F|144404.53|1992-10-25|1-URGENT|Clerk#000000520|0|n ideas. regular accounts haggle. ironic ideas use along the bold ideas. blith 5379|884|O|80830.65|1995-08-08|2-HIGH|Clerk#000000503|0|he unusual accounts. carefully special instructi 5380|1462|O|163456.68|1997-10-12|1-URGENT|Clerk#000000481|0|le slyly about the slyly final dolphins. fu 5381|311|F|309639.39|1993-01-29|5-LOW|Clerk#000000531|0|arefully bold packages are slyly furiously ironic foxes. fluffil 5382|344|F|177903.56|1992-01-13|5-LOW|Clerk#000000809|0|lent deposits are according to the reg 5383|308|O|21345.98|1995-05-26|5-LOW|Clerk#000000409|0|ly bold requests hang furiously furiously unusual accounts. evenly unusu 5408|223|F|175098.30|1992-07-21|5-LOW|Clerk#000000735|0|egular requests according to the 5409|127|F|190710.92|1992-01-09|5-LOW|Clerk#000000171|0|eans. regular accounts are regul 5410|202|O|152792.92|1998-07-28|4-NOT SPECIFIED|Clerk#000000117|0|final deposits: pending excuses boost. ironic theodolites cajole furi 5411|598|O|81721.30|1997-05-16|3-MEDIUM|Clerk#000000800|0|equests cajole slyly furious 5412|1408|O|186066.37|1998-01-20|2-HIGH|Clerk#000000151|0|ets boost furiously regular accounts. regular foxes above th 5413|940|O|275955.54|1997-10-17|1-URGENT|Clerk#000000066|0|e even excuses. always final depen 5414|995|F|229650.28|1993-03-25|4-NOT SPECIFIED|Clerk#000000242|0|lent dependencies? carefully express requests sleep furiously ac 5415|221|F|204898.67|1992-08-05|3-MEDIUM|Clerk#000000998|0|ly even ideas nag blithely above the final instructions 5440|1291|O|3309.00|1997-01-12|1-URGENT|Clerk#000000154|0|posits boost regularly ironic packages. regular, ironic deposits wak 5441|401|F|179096.05|1994-07-21|4-NOT SPECIFIED|Clerk#000000257|0|after the furiously ironic 5442|413|O|204762.90|1998-01-13|4-NOT SPECIFIED|Clerk#000000954|0|ully. quickly express accounts against the 5443|1306|O|203531.90|1996-10-10|4-NOT SPECIFIED|Clerk#000000492|0|al foxes could detect. blithely stealthy asymptotes kind 5444|1288|P|247984.02|1995-03-18|1-URGENT|Clerk#000000677|0| asymptotes. asymptotes cajole quickly quickly bo 5445|1150|F|144557.32|1993-07-26|5-LOW|Clerk#000000623|0|s. even, special requests cajole furiously even, 5446|56|F|49289.08|1994-06-21|5-LOW|Clerk#000000304|0| furiously final pac 5447|130|O|54828.65|1996-03-16|3-MEDIUM|Clerk#000000597|0|uternes around the furiously bold accounts wake after 5472|686|F|349883.98|1993-04-11|5-LOW|Clerk#000000552|0|counts. deposits about the slyly dogged pinto beans cajole slyly 5473|641|F|88121.08|1992-03-25|4-NOT SPECIFIED|Clerk#000000306|0|te the quickly stealthy ideas. even, regular deposits above 5474|538|F|217534.09|1992-06-01|4-NOT SPECIFIED|Clerk#000000487|0|gle blithely enticing ideas. final, exp 5475|1372|O|16971.18|1996-07-07|5-LOW|Clerk#000000856|0|es shall boost slyly. furiously even deposits lose. instruc 5476|898|O|35392.02|1997-11-06|1-URGENT|Clerk#000000189|0|furiously final ideas. furiously bold dependencies sleep care 5477|1061|O|198571.43|1997-12-30|5-LOW|Clerk#000000689|0|ckages. ironic deposits caj 5478|1153|O|102065.07|1996-05-17|1-URGENT|Clerk#000000272|0|ckages. quickly pending deposits thrash furiously: bl 5479|697|F|81438.25|1993-12-22|3-MEDIUM|Clerk#000000335|0|ng asymptotes. pinto beans sleep care 5504|178|F|60408.91|1993-01-06|2-HIGH|Clerk#000000221|0|y pending packages. furiousl 5505|943|O|207951.17|1997-10-04|5-LOW|Clerk#000000719|0| final, regular packages according to the slyly ironic accounts nag ironica 5506|907|F|11496.86|1993-11-08|1-URGENT|Clerk#000000292|0|nusual theodolites. sly 5507|17|O|176100.92|1998-05-28|5-LOW|Clerk#000000692|0|the carefully ironic instructions are quickly iro 5508|557|O|4010.45|1996-06-21|1-URGENT|Clerk#000000128|0|y express packages cajole furiously. slyly unusual requests 5509|793|F|235588.34|1994-04-08|5-LOW|Clerk#000000164|0|usual deposits use packages. furiously final requests wake slyly about th 5510|361|F|163179.38|1993-01-08|3-MEDIUM|Clerk#000000819|0| nag slyly. carefully eve 5511|773|F|191746.63|1994-11-29|1-URGENT|Clerk#000000438|0|ng instructions integrate fluffily among the fluffily silent accounts. bli 5536|1151|O|161021.95|1998-03-16|4-NOT SPECIFIED|Clerk#000000076|0| carefully final dolphins. ironic, ironic deposits lose. bold, 5537|1178|O|157429.58|1996-10-03|3-MEDIUM|Clerk#000000742|0|ng to the daring, final 5538|1384|F|119417.57|1993-12-25|1-URGENT|Clerk#000000992|0|ttainments. slyly final ideas are about the furiously silent excuses. 5539|1189|F|63099.20|1994-07-31|5-LOW|Clerk#000000675|0|structions. slyly regular patterns solve above the carefully expres 5540|1285|O|144175.37|1996-10-12|4-NOT SPECIFIED|Clerk#000000120|0|y ironic packages cajole blithely 5541|1423|O|69883.38|1997-09-30|3-MEDIUM|Clerk#000000217|0|encies among the silent accounts sleep slyly quickly pending deposits 5542|481|O|10491.88|1996-04-20|4-NOT SPECIFIED|Clerk#000000100|0|riously among the regularly regular pac 5543|1150|F|165445.62|1993-09-25|3-MEDIUM|Clerk#000000644|0|ckly regular epitaphs. carefully bold accounts haggle furiously 5568|295|O|164846.25|1995-06-07|3-MEDIUM|Clerk#000000491|0| nag. fluffily pending de 5569|1088|F|183119.70|1993-04-30|4-NOT SPECIFIED|Clerk#000000759|0|e regular dependencies. furiously unusual ideas b 5570|1106|O|114869.30|1996-08-12|2-HIGH|Clerk#000000795|0|eans. ironic, even requests doze 5571|1021|F|130352.45|1992-12-19|4-NOT SPECIFIED|Clerk#000000184|0|ts cajole furiously carefully regular sheaves. un 5572|73|F|236455.84|1994-07-17|2-HIGH|Clerk#000000163|0|e fluffily express deposits cajole slyly across th 5573|358|O|200471.60|1996-08-15|3-MEDIUM|Clerk#000000055|0|lites. slyly final pinto beans about the carefully regul 5574|268|F|194685.04|1992-03-10|4-NOT SPECIFIED|Clerk#000000002|0|n deposits. special, regular t 5575|1012|O|71308.78|1995-07-24|5-LOW|Clerk#000000948|0|uriously express frays breach 5600|941|O|77497.53|1997-02-08|4-NOT SPECIFIED|Clerk#000000019|0|lly regular deposits. car 5601|110|F|175212.24|1992-01-06|2-HIGH|Clerk#000000827|0|gular deposits wake platelets? blithe 5602|1294|O|108142.13|1997-07-30|3-MEDIUM|Clerk#000000395|0|onic asymptotes haggl 5603|703|F|205166.88|1992-06-20|4-NOT SPECIFIED|Clerk#000000535|0| asymptotes. fluffily ironic instructions are. pending pinto bean 5604|443|O|124425.17|1998-04-14|4-NOT SPECIFIED|Clerk#000000123|0|ously across the blithely ironic pinto beans. sile 5605|346|O|278023.41|1996-08-22|2-HIGH|Clerk#000000538|0|sleep carefully final packages. dependencies wake slyly. theodol 5606|1486|O|330692.08|1996-11-12|5-LOW|Clerk#000000688|0|uriously express pinto beans. packages sh 5607|911|F|29084.44|1992-01-01|4-NOT SPECIFIED|Clerk#000000137|0|c requests promise quickly fluffily ironic deposits. caref 5632|784|O|106861.13|1996-02-05|1-URGENT|Clerk#000000508|0|ons. blithely pending pinto beans thrash. furiously busy theodoli 5633|775|O|302286.82|1998-05-31|3-MEDIUM|Clerk#000000841|0|cial deposits wake final, final 5634|677|O|147338.82|1996-07-31|3-MEDIUM|Clerk#000000915|0|out the accounts. carefully ironic ideas are slyly. sheaves could h 5635|686|F|273995.47|1992-08-16|3-MEDIUM|Clerk#000000734|0|nal platelets sleep daringly. idle, final accounts about 5636|1214|F|200472.48|1995-02-16|3-MEDIUM|Clerk#000000916|0|. boldly even Tiresias sleep. blithely ironic packages among the ca 5637|1030|O|194938.85|1996-06-17|3-MEDIUM|Clerk#000000183|0|nic dolphins are regular packages. ironic pinto beans hagg 5638|1078|F|104992.42|1994-01-17|1-URGENT|Clerk#000000355|0|enly bold deposits eat. special realms play against the regular, speci 5639|1450|F|13962.03|1994-06-02|3-MEDIUM|Clerk#000000005|0|ending packages use after the blithely regular accounts. regular package 5664|1186|O|241226.07|1998-07-23|2-HIGH|Clerk#000000789|0|the quickly ironic dolp 5665|994|F|162407.28|1993-06-28|4-NOT SPECIFIED|Clerk#000000513|0| carefully special instructions. ironic pinto beans nag slyly blithe 5666|139|F|158442.98|1994-02-02|2-HIGH|Clerk#000000396|0|mptotes. quickly final instructions are 5667|434|O|48054.70|1995-08-10|1-URGENT|Clerk#000000358|0|s print upon the quickly ironic packa 5668|1088|F|14179.13|1995-03-22|4-NOT SPECIFIED|Clerk#000000047|0|p slyly slyly express accoun 5669|736|O|168537.13|1996-05-06|1-URGENT|Clerk#000000336|0|ng packages nag fluffily furio 5670|64|F|153552.69|1993-04-21|5-LOW|Clerk#000000922|0|he carefully final packages. deposits are slyly among the requests. 5671|418|O|195159.38|1998-02-06|2-HIGH|Clerk#000000838|0|k dependencies. slyly 5696|1411|P|274605.94|1995-05-04|1-URGENT|Clerk#000000447|0|e quickly unusual pack 5697|541|F|136651.75|1992-10-05|1-URGENT|Clerk#000000112|0|pendencies impress furiously. bold, final requests solve ab 5698|943|F|204527.94|1994-05-21|3-MEDIUM|Clerk#000000455|0|he furiously silent accounts haggle blithely against the carefully unusual 5699|1414|F|300221.03|1992-07-30|5-LOW|Clerk#000000311|0|o beans. ironic asymptotes boost. blithe, final courts integrate 5700|1427|O|97397.92|1997-12-25|1-URGENT|Clerk#000000618|0|ly pending dolphins sleep carefully slyly pending i 5701|421|O|25111.71|1997-02-07|5-LOW|Clerk#000000798|0| blithely final pinto beans. blit 5702|967|F|235263.35|1993-09-07|4-NOT SPECIFIED|Clerk#000000743|0|ironic accounts. final accounts wake express deposits. final pac 5703|1196|F|3268.07|1993-05-16|3-MEDIUM|Clerk#000000647|0|ly special instructions. slyly even reque 5728|793|F|120489.88|1994-12-11|4-NOT SPECIFIED|Clerk#000000426|0|furiously express pin 5729|434|F|93102.37|1994-10-10|2-HIGH|Clerk#000000843|0|uffily sly accounts about 5730|110|O|18116.22|1997-12-18|1-URGENT|Clerk#000000181|0|l platelets. ironic pinto beans wake slyly. quickly b 5731|79|O|80961.79|1997-05-17|5-LOW|Clerk#000000841|0| silent excuses among the express accounts wake 5732|352|O|35016.82|1997-08-03|1-URGENT|Clerk#000000910|0|he quickly bold asymptotes: final platelets wake quickly. blithely final pinto 5733|1003|F|50786.52|1993-03-17|2-HIGH|Clerk#000000873|0|osits. pending accounts boost quickly. furiously permanent acco 5734|925|O|71965.89|1997-10-12|3-MEDIUM|Clerk#000000084|0|efully even braids detect blithely alo 5735|395|F|61436.03|1994-12-11|3-MEDIUM|Clerk#000000600|0| bold realms cajole slyly fu 5760|241|F|68043.47|1994-05-25|4-NOT SPECIFIED|Clerk#000000498|0|s among the blithely regular frays haggle ironically bold theodolites. al 5761|151|O|184256.14|1998-07-06|3-MEDIUM|Clerk#000000208|0|s asymptotes cajole boldly. regular, 5762|481|O|217243.20|1997-02-14|1-URGENT|Clerk#000000901|0|ly bold packages: slyly ironic deposits sleep quietly foxes. express a 5763|74|O|165052.79|1998-06-26|4-NOT SPECIFIED|Clerk#000000633|0|according to the furiously regular pinto beans. even accounts wake fu 5764|1306|F|68406.47|1993-10-03|4-NOT SPECIFIED|Clerk#000000363|0| furiously regular deposits haggle fluffily around th 5765|518|F|354366.71|1994-12-15|5-LOW|Clerk#000000959|0|longside of the quickly final packages. instructions so 5766|476|F|62642.11|1993-09-27|5-LOW|Clerk#000000753|0|. quickly final packages print slyly. fu 5767|1174|F|204654.62|1992-04-29|2-HIGH|Clerk#000000225|0|ts wake fluffily above the r 5792|256|F|216427.94|1993-04-04|2-HIGH|Clerk#000000731|0|packages. doggedly bold deposits integrate furiously across the 5793|362|O|169572.44|1997-07-13|2-HIGH|Clerk#000000294|0|thely. fluffily even instructi 5794|80|F|152918.94|1993-04-05|5-LOW|Clerk#000000855|0|t accounts kindle about the gifts. as 5795|368|F|59513.90|1992-05-05|2-HIGH|Clerk#000000581|0| even instructions x-ray ironic req 5796|1493|O|35978.09|1996-01-23|3-MEDIUM|Clerk#000000326|0|eodolites. slyly ironic pinto beans at the silent, special request 5797|1216|O|24070.20|1997-10-15|4-NOT SPECIFIED|Clerk#000000381|0|ng! packages against the blithely b 5798|1057|O|149833.20|1998-03-30|5-LOW|Clerk#000000343|0|lent accounts affix quickly! platelets run slyly slyly final packages. f 5799|260|O|133862.33|1995-08-03|1-URGENT|Clerk#000000238|0| unusual deposits sleep blithely along the carefully even requests. care 5824|556|O|255277.52|1996-12-03|2-HIGH|Clerk#000000171|0|unusual packages. even ideas along the even requests are along th 5825|607|F|32267.34|1995-02-21|5-LOW|Clerk#000000494|0|regular packages use bravely. 5826|217|O|32370.45|1998-06-13|1-URGENT|Clerk#000000087|0|even, regular dependenc 5827|305|O|179433.51|1998-07-23|3-MEDIUM|Clerk#000000660|0|hely furiously blithe dolphins. slyly 5828|1270|F|77774.16|1994-03-06|5-LOW|Clerk#000000377|0|ages boost never during the final packa 5829|1246|O|274279.62|1997-01-11|1-URGENT|Clerk#000000196|0|gular accounts. bold accounts are blithely furiously ironic r 5830|836|F|39788.96|1993-03-25|3-MEDIUM|Clerk#000000233|0|lites haggle. ironic, ironic instructions maintain blit 5831|1387|O|159764.61|1996-11-17|5-LOW|Clerk#000000585|0|s final, final pinto beans. unusual depos 5856|361|F|95904.56|1994-11-06|2-HIGH|Clerk#000000634|0|special excuses. slyly final theodolites cajole blithely furiou 5857|1231|O|263158.48|1997-11-06|4-NOT SPECIFIED|Clerk#000000267|0|gage blithely. quickly special ac 5858|625|F|227582.06|1992-07-14|4-NOT SPECIFIED|Clerk#000000580|0|lyly pending dugouts believe through the ironic deposits. silent s 5859|43|O|320141.27|1997-04-23|1-URGENT|Clerk#000000993|0|requests boost. asymptotes across the deposits solve slyly furiously pendin 5860|127|F|14002.56|1992-02-20|4-NOT SPECIFIED|Clerk#000000079|0| beans. bold, special foxes sleep about the ir 5861|1387|O|69464.42|1997-04-10|3-MEDIUM|Clerk#000000094|0|rthogs cajole slyly. express packages sleep blithely final 5862|628|O|30940.39|1997-02-20|1-URGENT|Clerk#000000039|0|leep beneath the quickly busy excuses. ironic theodolit 5863|641|F|96316.78|1993-11-22|3-MEDIUM|Clerk#000000774|0|ets about the slyly pending ideas sleep according to the blithely 5888|448|O|92148.63|1996-09-28|3-MEDIUM|Clerk#000000748|0|quickly against the furiously final requests. evenly fi 5889|209|O|26237.34|1995-05-23|5-LOW|Clerk#000000690|0|ites wake across the slyly ironic 5890|487|F|41609.58|1992-11-04|2-HIGH|Clerk#000000013|0|packages. final, final reques 5891|452|F|68375.28|1992-12-29|3-MEDIUM|Clerk#000000302|0|ounts haggle furiously abo 5892|1003|P|122565.47|1995-05-09|5-LOW|Clerk#000000639|0| pending instruction 5893|19|F|53359.53|1992-07-08|4-NOT SPECIFIED|Clerk#000000560|0|final sentiments. instructions boost above the never speci 5894|703|F|107123.24|1994-08-13|2-HIGH|Clerk#000000776|0|regular deposits wake 5895|622|O|284265.76|1997-01-01|4-NOT SPECIFIED|Clerk#000000747|0| ironic, unusual requests cajole blithely special, special deposits. s 5920|1189|F|227213.95|1994-11-20|2-HIGH|Clerk#000000081|0|ns: even ideas cajole slyly among the packages. never ironic patterns 5921|571|F|224503.60|1994-04-07|5-LOW|Clerk#000000125|0|kly special requests breach. 5922|1426|O|224288.29|1996-11-14|5-LOW|Clerk#000000625|0| ironic instructions haggle furiously blithely regular accounts: even platele 5923|1003|O|220218.91|1997-05-27|2-HIGH|Clerk#000000304|0|o beans haggle slyly above the regular, even dependencies 5924|305|O|154229.85|1995-10-10|3-MEDIUM|Clerk#000000433|0|arefully after the pains. blithely ironic pinto 5925|1453|O|373674.80|1995-11-13|5-LOW|Clerk#000000602|0|ourts. boldly regular foxes might sleep. slyly express tithes against 5926|748|F|146768.49|1994-05-20|5-LOW|Clerk#000000071|0| carefully after the furiously even re 5927|1153|O|136294.71|1997-08-28|4-NOT SPECIFIED|Clerk#000000972|0|endencies according to the slyly ironic foxes detect furiously about the furio 5952|1462|O|209693.87|1997-04-14|3-MEDIUM|Clerk#000000950|0| regular, final pla 5953|58|F|117651.40|1992-03-28|1-URGENT|Clerk#000000049|0|ages are furiously. slowly bold requests 5954|268|F|282400.28|1992-12-03|1-URGENT|Clerk#000000968|0|requests along the blith 5955|922|P|79722.29|1995-03-27|5-LOW|Clerk#000000340|0|deas integrate. fluffily regular pa 5956|208|O|166980.55|1998-05-18|1-URGENT|Clerk#000000587|0|le even, express platelets. 5957|883|F|312733.08|1993-12-27|2-HIGH|Clerk#000000020|0| dependencies are slyly. bold accounts according to the carefully regular r 5958|1141|O|191632.81|1995-09-16|3-MEDIUM|Clerk#000000787|0|e final requests detect alongside of the qu 5959|230|F|253103.52|1992-05-15|3-MEDIUM|Clerk#000000913|0|into beans use ironic, unusual foxes. carefully regular excuses boost caref 5984|688|F|114443.53|1994-06-18|5-LOW|Clerk#000000023|0|ickly final pains haggle along the furiously ironic pinto bea 5985|1430|F|7032.58|1995-01-12|3-MEDIUM|Clerk#000000417|0|as nag fluffily slyly permanent accounts. regular depo 5986|1147|F|157431.69|1992-04-22|2-HIGH|Clerk#000000674|0|iously unusual notornis are citus-7.0.3/src/test/regress/data/orders.2.data000066400000000000000000004742201317107136600213100ustar00rootroot000000000000008997|607|F|147614.36|1994-07-02|5-LOW|Clerk#000000404|0|lithely. express, ironic pearls nag permanently. 8998|80|F|147264.16|1993-01-04|5-LOW|Clerk#000000733|0| fluffily pending sauternes cajo 8999|1175|F|113671.53|1994-06-13|2-HIGH|Clerk#000000097|0|ly even foxes. slyly express a 9024|1469|F|298241.36|1992-06-03|3-MEDIUM|Clerk#000000901|0|ar the theodolites. fluffily stealthy requests among the quickly regular asy 9025|739|F|97454.69|1994-05-20|5-LOW|Clerk#000000379|0|ording to the quickly regular ideas integrate above the sly platelets. sly 9026|677|O|63256.87|1996-07-24|5-LOW|Clerk#000000320|0|ironic escapades would wake carefully 9027|514|O|155500.43|1995-09-03|5-LOW|Clerk#000000918|0| sleep carefully with th 9028|1475|F|63063.84|1993-12-22|5-LOW|Clerk#000000364|0| the regular packages. daringly even f 9029|1213|F|78703.86|1992-11-20|3-MEDIUM|Clerk#000000965|0| excuses nag quickly carefully unusual excuse 9030|535|O|225136.87|1998-07-14|3-MEDIUM|Clerk#000000872|0|foxes according to the furiously silent excuses could haggle unusual d 9031|1379|F|270589.18|1993-12-27|5-LOW|Clerk#000000890|0|r packages. slyly express ideas 9056|121|O|170074.18|1996-08-08|3-MEDIUM|Clerk#000000117|0|was carefully after the furiously bold dug 9057|71|F|320227.49|1994-11-05|1-URGENT|Clerk#000000539|0| fluffily quickly ironic packages. furiously ironic accounts a 9058|403|F|63464.13|1993-06-29|2-HIGH|Clerk#000000376|0|ealthily special deposits. quickly regular requests wake silently. fur 9059|926|O|187590.77|1996-08-01|2-HIGH|Clerk#000000725|0|ar pinto beans sleep special 9060|463|O|45295.71|1996-06-09|1-URGENT|Clerk#000000438|0|iously. slyly regular dol 9061|325|O|66191.69|1996-01-15|2-HIGH|Clerk#000000428|0|silent excuses! slyl 9062|1102|P|212710.32|1995-03-21|2-HIGH|Clerk#000000201|0|sts cajole slyly according to the carefully slow foxes. furio 9063|1399|O|51019.90|1997-02-12|2-HIGH|Clerk#000000323|0|ffily regular grouche 9088|610|F|224148.01|1994-06-23|1-URGENT|Clerk#000000975|0|ter the blithely final deposits. furiously 9089|370|F|39118.01|1993-05-24|1-URGENT|Clerk#000000538|0|d platelets are deposits. pinto beans cajole boldly. 9090|775|O|9848.22|1996-11-21|4-NOT SPECIFIED|Clerk#000000699|0|cial theodolites at the evenl 9091|679|F|242198.88|1993-11-12|2-HIGH|Clerk#000000426|0|final packages wake carefully. 9092|664|F|202836.36|1994-07-25|4-NOT SPECIFIED|Clerk#000000144|0| slyly final waters. special packages solve 9093|619|O|19431.49|1995-12-10|3-MEDIUM|Clerk#000000333|0|even accounts. special, 9094|1430|O|46784.32|1998-05-27|3-MEDIUM|Clerk#000000805|0|es boost slyly after the platelets. shea 9095|220|O|49946.10|1995-06-11|2-HIGH|Clerk#000000621|0|riously regular accounts wake slyly special theodolites. asymptotes use f 9120|1010|F|128719.48|1992-06-22|3-MEDIUM|Clerk#000000252|0|ly ironic realms. furio 9121|434|O|94637.46|1996-07-05|5-LOW|Clerk#000000930|0| final dependencies. carefully even excuses after the 9122|79|O|234630.52|1996-12-14|4-NOT SPECIFIED|Clerk#000000991|0|across the carefull 9123|367|F|64575.87|1993-06-27|1-URGENT|Clerk#000000988|0|wake carefully pendin 9124|559|O|173690.73|1995-10-17|5-LOW|Clerk#000000381|0|nstructions after the c 9125|151|O|195514.25|1998-05-30|2-HIGH|Clerk#000000591|0|kages around the fluffily bold f 9126|371|F|106144.91|1994-11-18|3-MEDIUM|Clerk#000000319|0|deposits boost carefully against the fluffily final instructions. i 9127|611|O|79003.80|1995-10-11|4-NOT SPECIFIED|Clerk#000000628|0|ress packages across the furio 9152|1210|F|129410.55|1993-08-21|4-NOT SPECIFIED|Clerk#000000612|0|nto beans! final instructions nag slyly. slyly ironic foxes nag blith 9153|1147|O|210245.80|1997-07-19|4-NOT SPECIFIED|Clerk#000000488|0|uffily express instructions haggle carefully. quickly fluffy th 9154|1|O|357345.46|1997-06-23|4-NOT SPECIFIED|Clerk#000000328|0|y ironic packages cajole. blithely final depende 9155|355|F|83263.25|1992-08-31|1-URGENT|Clerk#000000253|0|ickly regular requests sleep alongside of the car 9156|154|F|135183.22|1994-02-07|1-URGENT|Clerk#000000387|0|deposits. dependencies wake ca 9157|595|F|52660.33|1992-06-20|5-LOW|Clerk#000000736|0|requests detect furiously special, silent packages. carefu 9158|271|F|96814.40|1995-04-25|4-NOT SPECIFIED|Clerk#000000195|0|ecial, even packages. even foxes print. regu 9159|1135|O|99594.61|1995-07-26|1-URGENT|Clerk#000000892|0|xcuses. quickly ironic deposits wake alongside of the quickly pending p 9184|1156|O|56334.07|1997-07-19|1-URGENT|Clerk#000000086|0|blithely final packages haggle according to the slyly quick pack 9185|145|F|64122.78|1994-06-16|2-HIGH|Clerk#000000951|0|leep carefully blithely regular orbits. blithely regular foxes cajol 9186|457|F|167929.61|1992-03-30|5-LOW|Clerk#000000044|0|lar, silent asymptotes haggl 9187|769|F|212469.45|1994-08-22|2-HIGH|Clerk#000000985|0|lithely ironic theodolites. furiously ironic instructions print along 9188|619|O|224857.18|1998-01-25|1-URGENT|Clerk#000000765|0|aggle quickly above the accounts. excuses 9189|1069|O|109530.95|1996-12-02|3-MEDIUM|Clerk#000000501|0|r waters haggle furiously fluffily ironic id 9190|460|O|272754.44|1998-03-03|2-HIGH|Clerk#000000196|0|side of the permanently ironic courts unwind 9191|535|O|21182.03|1996-03-24|3-MEDIUM|Clerk#000000095|0|final packages wake carefully. 9216|643|O|147294.33|1995-04-15|3-MEDIUM|Clerk#000000320|0|its nag regular deposits. slyly even reques 9217|1489|O|117161.58|1997-05-26|1-URGENT|Clerk#000000051|0| above the blithely regular d 9218|241|O|31407.74|1997-12-10|4-NOT SPECIFIED|Clerk#000000795|0|hely regular foxes are alongside of the furiously regular deposit 9219|412|F|205446.61|1994-09-29|5-LOW|Clerk#000000382|0|nst the furiously regular dinos. regular asymptotes against the slyly fina 9220|482|O|992.46|1998-04-07|1-URGENT|Clerk#000000880|0|y bravely ironic deposits. furiously unusual sentiments about the fluffily ir 9221|85|F|273797.07|1995-01-04|5-LOW|Clerk#000000240|0|ess accounts sleep carefully. carefully final pinto beans sleep f 9222|211|F|258460.61|1994-02-11|2-HIGH|Clerk#000000148|0|ding to the instructions. regular requests nag f 9223|304|O|215658.19|1997-08-05|4-NOT SPECIFIED|Clerk#000000785|0|ully according to the blithely 9248|1163|F|110964.49|1994-12-24|4-NOT SPECIFIED|Clerk#000000259|0|ids cajole regular, regular dependencies. deposits along the dolphi 9249|1348|F|161815.08|1993-05-20|2-HIGH|Clerk#000000756|0|slyly pending accounts. packages wake c 9250|1345|F|85572.38|1992-08-14|1-URGENT|Clerk#000000535|0|y final packages; carefully pendi 9251|323|F|29910.23|1993-08-30|3-MEDIUM|Clerk#000000374|0|ld requests. deposits use blithely ruthlessly unusual packages. fluffil 9252|412|O|89891.37|1997-12-11|2-HIGH|Clerk#000000454|0|encies affix slyly perma 9253|818|F|255856.03|1992-01-31|1-URGENT|Clerk#000000307|0|es. sometimes regular grouches wa 9254|1276|F|56365.27|1993-05-15|3-MEDIUM|Clerk#000000761|0|r deposits. quickly bold requests use- 9255|472|O|153124.60|1995-12-08|4-NOT SPECIFIED|Clerk#000000476|0|y bold asymptotes-- carefully 9280|1223|O|275205.91|1998-06-12|5-LOW|Clerk#000000022|0|beans alongside of the fluffily express asymptotes integrate 9281|904|F|173278.28|1992-02-24|1-URGENT|Clerk#000000530|0|eep furiously according to the requests; ideas integrate 9282|79|O|34765.35|1995-11-26|4-NOT SPECIFIED|Clerk#000000824|0|ses? carefully express platelets sleep blithely against the blithely specia 9283|1255|F|7798.12|1994-06-03|3-MEDIUM|Clerk#000000859|0|ully even platelets. silent packages 9284|661|F|56207.47|1994-06-14|4-NOT SPECIFIED|Clerk#000000544|0| furiously across the final deposits. quickly ironic requests accordin 9285|1012|F|118683.77|1994-01-03|1-URGENT|Clerk#000000553|0|sts. express accounts snooze; furiously final 9286|505|P|155839.92|1995-04-04|1-URGENT|Clerk#000000557|0|ake. carefully bold packages promise with the 9287|790|O|13525.92|1998-01-02|4-NOT SPECIFIED|Clerk#000000636|0|ously unusual packages. regular foxes detect. blithely slow ideas sl 9312|920|F|226584.81|1992-05-19|4-NOT SPECIFIED|Clerk#000000897|0|ckly slyly regular packages. unusual packages are. regular 9313|1348|O|222818.15|1996-04-06|3-MEDIUM|Clerk#000000926|0|s wake slyly against the slyly express packages. ironic packages 9314|1313|F|14188.37|1995-02-15|4-NOT SPECIFIED|Clerk#000000907|0|eas. blithely regular asymptotes sleep carefully across the slyly even 9315|587|O|16012.39|1998-03-17|3-MEDIUM|Clerk#000000559|0|ackages are furiously alongside of the slyly regular theodolite 9316|1393|O|225240.88|1995-09-18|4-NOT SPECIFIED|Clerk#000000363|0|inal pinto beans haggle c 9317|100|F|156751.98|1994-03-15|4-NOT SPECIFIED|Clerk#000000347|0|aves across the unusual, regular sauternes wake 9318|1463|F|109251.02|1992-07-28|1-URGENT|Clerk#000000738|0|ss courts across the slyly silent foxes are regular, final pac 9319|880|F|104039.47|1992-09-11|1-URGENT|Clerk#000000155|0|. fluffily ironic instructions detect 9344|664|O|348335.72|1995-12-07|5-LOW|Clerk#000000961|0|ial deposits nag blithely alongside of the caref 9345|1376|O|163627.01|1996-01-22|4-NOT SPECIFIED|Clerk#000000126|0|out the daringly ironic packages affix furiously fluffi 9346|917|O|173943.50|1996-10-09|4-NOT SPECIFIED|Clerk#000000088|0|furiously final packages integrate stealthily expres 9347|673|F|162913.90|1992-08-10|5-LOW|Clerk#000000268|0|he gifts. slyly permanent requests 9348|658|O|120413.73|1996-06-07|5-LOW|Clerk#000000416|0|ackages. carefully ironic pinto beans about the close accoun 9349|1414|O|217101.05|1997-08-29|4-NOT SPECIFIED|Clerk#000000804|0|. fluffily final accounts haggle; furiously reg 9350|145|F|255516.01|1992-11-01|1-URGENT|Clerk#000000253|0|ackages doze evenly across the foxes. quickly ironic reques 9351|898|O|81166.06|1996-05-17|5-LOW|Clerk#000000430|0|ven dependencies. furiously ironic dependencies promise along the slyly 9376|835|O|145196.26|1997-07-24|2-HIGH|Clerk#000000348|0|ng the carefully pending ac 9377|340|F|9021.26|1993-04-22|2-HIGH|Clerk#000000466|0|ers above the carefull 9378|457|O|115914.73|1997-10-16|1-URGENT|Clerk#000000014|0|ultipliers wake furiously never special deposits! 9379|350|F|152004.78|1992-01-04|1-URGENT|Clerk#000000764|0|nusual deposits. furiously final packages wake evenly even tithes. qu 9380|887|F|167911.91|1994-08-26|1-URGENT|Clerk#000000962|0| deposits boost along the carefully ironic deposits. express requests use. 9381|1184|O|94270.85|1995-12-18|4-NOT SPECIFIED|Clerk#000000215|0|y final Tiresias are. requests 9382|337|O|150840.29|1996-02-19|1-URGENT|Clerk#000000308|0|l accounts. furiously ironic deposits use. 9383|140|F|180583.64|1994-03-17|4-NOT SPECIFIED|Clerk#000000237|0|uffily final pinto beans cajole ca 9408|184|F|11891.10|1992-10-21|3-MEDIUM|Clerk#000000664|0|xes cajole carefully furiously final pains. carefully express accou 9409|970|F|155483.25|1992-05-06|1-URGENT|Clerk#000000300|0|ly express platelets above the sl 9410|787|O|88246.41|1997-07-18|5-LOW|Clerk#000000985|0|theodolites solve fluffily. blithely iron 9411|661|F|55576.82|1995-02-07|2-HIGH|Clerk#000000594|0|rses. furiously regular requests haggl 9412|1216|O|245785.65|1998-07-14|2-HIGH|Clerk#000000016|0|requests. carefully regular packages play carefully pending dependencies. ca 9413|148|O|150237.99|1995-07-13|5-LOW|Clerk#000000202|0|latelets are blithely requests. ironic packages boost slyly across t 9414|1333|F|98169.43|1993-09-10|3-MEDIUM|Clerk#000000661|0|above the slyly ironic 9415|1049|F|241463.63|1992-08-24|1-URGENT|Clerk#000000311|0|ccounts boost blithely. final 9440|643|F|4331.68|1994-06-08|5-LOW|Clerk#000000711|0| requests. regular, final pinto beans are. furiously regular accounts det 9441|334|O|3216.31|1997-06-18|4-NOT SPECIFIED|Clerk#000000381|0|iously final deposits. express, regular waters haggle sl 9442|1267|F|103119.65|1993-03-11|4-NOT SPECIFIED|Clerk#000000948|0|hely special courts. furiously final foxes sleep quickly. quick, 9443|304|O|166940.25|1997-05-07|4-NOT SPECIFIED|Clerk#000000317|0|y along the carefully reg 9444|106|O|36255.65|1996-07-18|2-HIGH|Clerk#000000387|0| quickly. always silent platelets must sleep f 9445|928|O|76754.58|1997-11-14|4-NOT SPECIFIED|Clerk#000000394|0|as. slyly ironic deposits along 9446|1358|O|216362.20|1997-12-29|1-URGENT|Clerk#000000173|0|ake pinto beans. slyly f 9447|1162|O|79483.65|1995-06-19|2-HIGH|Clerk#000000115|0|theodolites haggle carefully instructio 9472|481|O|44176.66|1995-06-17|1-URGENT|Clerk#000000979|0|bove the quickly final deposits are carefully 9473|23|F|212682.92|1992-10-12|4-NOT SPECIFIED|Clerk#000000541|0|ts sleep above the accounts. slyly final deposits are regularly. r 9474|133|P|246662.69|1995-05-12|3-MEDIUM|Clerk#000000710|0|ely final requests: dependencies cajole slyly. slyly 9475|1442|O|206441.11|1996-11-22|2-HIGH|Clerk#000000233|0| notornis. ironically even instructions en 9476|1283|F|177524.44|1994-05-26|4-NOT SPECIFIED|Clerk#000000088|0|e slyly final requests might integrate carefully against the slyly express p 9477|170|O|269394.57|1998-03-19|3-MEDIUM|Clerk#000000812|0|lites along the even deposits use bold pinto beans. regular requests 9478|940|F|173044.15|1994-03-29|2-HIGH|Clerk#000000033|0|ound the blithely express instructions. furiously 9479|1373|F|76316.92|1994-09-22|3-MEDIUM|Clerk#000000047|0|ter the carefully ironic 9504|736|F|171165.10|1992-11-26|3-MEDIUM|Clerk#000000902|0|among the quickly bold deposits haggle exp 9505|316|F|283831.70|1992-06-08|2-HIGH|Clerk#000000873|0|hely ironic asymptot 9506|1232|F|47720.50|1994-02-11|4-NOT SPECIFIED|Clerk#000000441|0|carefully ironic accou 9507|517|F|34161.51|1994-11-30|5-LOW|Clerk#000000438|0|press pinto beans are slyly. final packages cajole. quickly regular excus 9508|448|O|235237.15|1996-04-22|5-LOW|Clerk#000000312|0|ss attainments slee 9509|37|F|224330.32|1992-10-08|4-NOT SPECIFIED|Clerk#000000350|0|iously final instructions sleep fu 9510|235|O|93930.42|1996-11-20|5-LOW|Clerk#000000302|0|packages sleep furiously. bold theodolites haggle slyly. ironic, 9511|1141|O|96086.28|1996-08-01|5-LOW|Clerk#000000555|0|out the packages may 9536|1138|O|105639.69|1997-08-20|4-NOT SPECIFIED|Clerk#000000224|0|. slyly even ideas doubt slyly. slowly regular orbits cajole. fur 9537|806|F|249384.96|1995-02-14|4-NOT SPECIFIED|Clerk#000000203|0| packages. final, regular foxes haggle bli 9538|1399|F|32388.43|1994-03-11|4-NOT SPECIFIED|Clerk#000000040|0|ly. slyly final accounts nag carefully regular ideas. blithely ironic theodo 9539|262|O|26683.19|1995-11-15|2-HIGH|Clerk#000000917|0|iously ironic deposits affix furiously alongside of the slyly even foxes. even 9540|784|F|96163.55|1994-06-11|1-URGENT|Clerk#000000048|0|t above the stealthily bold theodolites 9541|598|F|105885.33|1992-03-26|4-NOT SPECIFIED|Clerk#000000840|0|xcuses dazzle furiously regular the 9542|1147|O|121584.41|1995-12-16|2-HIGH|Clerk#000000349|0|theodolites. blithely pendin 9543|850|F|145441.29|1992-05-10|1-URGENT|Clerk#000000187|0| unusual warhorses cajole 9568|619|F|80658.16|1993-03-08|4-NOT SPECIFIED|Clerk#000000270|0|ng the quickly unusual pinto beans sublate blithely ca 9569|1264|O|210401.62|1997-11-01|2-HIGH|Clerk#000000856|0|pecial deposits after the final, pending packages wake carefully 9570|1435|O|79124.61|1996-06-04|2-HIGH|Clerk#000000397|0|ccounts. foxes nag special, special theodolites 9571|1366|O|58131.32|1996-04-30|4-NOT SPECIFIED|Clerk#000000263|0| furiously express requests. even requests are. carefully final a 9572|49|O|85669.13|1998-03-28|4-NOT SPECIFIED|Clerk#000000452|0|atelets. slyly regular requests nag quickly ev 9573|1361|F|140154.64|1992-06-14|3-MEDIUM|Clerk#000000250|0|ix carefully busily unusual deposits. thin excuses haggle quickly. quickly 9574|790|O|145838.53|1995-07-31|3-MEDIUM|Clerk#000000814|0|efully regular platelets. pending packages unwind carefully among the fur 9575|916|F|27475.15|1992-03-31|5-LOW|Clerk#000000133|0|nts haggle busily unusual, even packages. regular packages u 9600|536|F|99615.09|1993-11-08|4-NOT SPECIFIED|Clerk#000000467|0|he unusual, ironic requests nag furiously ironic accounts 9601|1435|O|143520.49|1996-03-20|1-URGENT|Clerk#000000677|0|ugh the slyly regular requests. furiously even pinto beans are blithely slyl 9602|1481|F|193843.49|1992-11-11|1-URGENT|Clerk#000000086|0| across the slyly ironic ideas. carefully unusual requests 9603|1204|O|315928.32|1998-01-09|5-LOW|Clerk#000000191|0|ng the quickly special requests caj 9604|620|O|48743.62|1996-06-25|2-HIGH|Clerk#000000414|0|usly ironic theodolites! blithely final ideas 9605|1220|F|113112.29|1992-12-26|4-NOT SPECIFIED|Clerk#000000777|0|yly bold pinto beans are furiously packages. slyly express courts play 9606|152|F|149838.50|1994-07-28|4-NOT SPECIFIED|Clerk#000000474|0| above the always regular pinto beans. special, unusual accounts grow. pending 9607|643|O|44978.35|1995-12-18|1-URGENT|Clerk#000000752|0| ironic requests are carefully. silent 9632|1429|F|146044.97|1992-01-17|5-LOW|Clerk#000000857|0|y ironic theodolites are silent foxes. blithely final plate 9633|1336|O|191021.09|1995-09-12|4-NOT SPECIFIED|Clerk#000000918|0|ns around the slyly ironic pinto beans cajole carefully ironic depths. careful 9634|958|O|314926.50|1998-01-13|2-HIGH|Clerk#000000166|0| carefully. slyly ironic courts mold. ironic 9635|847|F|168349.65|1994-04-14|1-URGENT|Clerk#000000127|0|the quickly ironic ideas sleep at the quickly 9636|304|O|109568.95|1997-01-30|1-URGENT|Clerk#000000263|0|lar foxes are requests. quickly even pinto be 9637|1411|F|112613.87|1994-01-04|5-LOW|Clerk#000000953|0|s. furiously final requests believe furiously against the brave 9638|1126|O|112055.66|1996-06-08|2-HIGH|Clerk#000000953|0|requests after the furiously unusual accounts integrate slyly u 9639|722|F|244899.69|1993-09-06|4-NOT SPECIFIED|Clerk#000000666|0|press, express pinto 9664|310|F|105001.15|1993-04-08|2-HIGH|Clerk#000000715|0|ccounts across the slyly ironic pinto be 9665|1067|F|81734.81|1994-04-24|5-LOW|Clerk#000000277|0|sleep furiously carefully ironic foxes. pinto beans wake. final deposits caj 9666|221|O|240666.92|1996-05-09|1-URGENT|Clerk#000000511|0|ss the fluffily express tithes. 9667|1102|O|208860.66|1996-02-24|3-MEDIUM|Clerk#000000142|0|nal instructions. special accounts along the regular 9668|847|F|77447.46|1994-09-07|5-LOW|Clerk#000000469|0|refully ironic accounts. furiously bold packages a 9669|619|F|79738.94|1994-04-23|3-MEDIUM|Clerk#000000297|0|deposits promise blithely ironic theodolites. fluffily bold requests 9670|1387|O|200921.16|1997-11-08|3-MEDIUM|Clerk#000000035|0|unts? furiously express deposits are blithely. regular, special forges sle 9671|136|O|157241.36|1995-09-13|2-HIGH|Clerk#000000417|0|ages boost slyly against the furiously 9696|575|F|211382.08|1995-02-20|4-NOT SPECIFIED|Clerk#000000971|0|xpress requests would are pinto beans. 9697|164|F|174800.10|1995-01-27|3-MEDIUM|Clerk#000000432|0|courts are. even platelets was alongside of 9698|1487|O|7676.88|1995-08-07|2-HIGH|Clerk#000000040|0| around the quickly bold packages sle 9699|875|F|10782.39|1995-03-12|3-MEDIUM|Clerk#000000853|0|kly regular platelets. slyly silent accounts alongside of the blithely r 9700|544|O|266611.98|1995-08-09|1-URGENT|Clerk#000000137|0|althy pinto beans wake blithely. qui 9701|112|F|277385.28|1993-02-28|5-LOW|Clerk#000000399|0|bold deposits cajole furiously bold requests. even excuses 9702|1201|O|182347.71|1996-08-22|3-MEDIUM|Clerk#000000852|0|tions haggle slyly among the ironic theodolites. blithel 9703|1126|F|28954.25|1994-10-07|5-LOW|Clerk#000000101|0|realms wake. express, ironic accou 9728|826|F|39398.24|1993-07-03|5-LOW|Clerk#000000647|0|ng requests. fluffily enticing re 9729|970|O|162532.59|1996-07-06|3-MEDIUM|Clerk#000000148|0|ndencies about the care 9730|143|O|278148.33|1997-03-04|5-LOW|Clerk#000000743|0| final dependencies doze furiously slyly silent ideas? furiousl 9731|283|F|21613.76|1993-10-21|5-LOW|Clerk#000000586|0| dolphins nag slyly even deposits-- blithely 9732|820|P|69634.87|1995-03-14|4-NOT SPECIFIED|Clerk#000000599|0|f the special, even instructions. carefully final requests use along the 9733|1000|F|72374.84|1994-03-18|4-NOT SPECIFIED|Clerk#000000997|0|n dolphins haggle. slyly express accounts wake carefully final asymptotes. qui 9734|835|O|124297.04|1997-02-03|1-URGENT|Clerk#000000086|0|equests affix quietly bold deposits. dependencies affix among the quietly 9735|337|O|115276.76|1997-06-28|4-NOT SPECIFIED|Clerk#000000454|0|nic foxes. accounts detect fur 9760|1181|O|158777.10|1998-04-14|5-LOW|Clerk#000000207|0|anent accounts. accounts sleep carefully slyl 9761|433|F|70880.34|1992-01-23|4-NOT SPECIFIED|Clerk#000000468|0| permanent requests. furious requests nag blithely about the q 9762|1492|F|238951.03|1994-01-04|2-HIGH|Clerk#000000861|0|ic requests cajole quickly unusual accounts. regular foxes are blithely. in 9763|1492|O|132201.56|1995-08-14|3-MEDIUM|Clerk#000000374|0| requests detect carefull 9764|1256|F|56685.16|1992-12-30|1-URGENT|Clerk#000000289|0|nts. platelets wake blithe 9765|1153|F|45183.21|1992-10-13|5-LOW|Clerk#000000364|0|ts. even pinto beans haggle alongside of the theodolites. 9766|235|F|165508.82|1992-08-15|1-URGENT|Clerk#000000265|0|s excuses cajole quickly. regul 9767|973|F|46852.11|1994-06-28|5-LOW|Clerk#000000546|0|asymptotes hinder blithely after the busily final fo 9792|1114|O|171667.54|1995-07-24|5-LOW|Clerk#000000929|0| the special accounts. carefully special ideas mainta 9793|1198|F|80712.82|1992-04-04|3-MEDIUM|Clerk#000000770|0| carefully ironic courts haggle evenly. carefully final requests ca 9794|505|F|216443.67|1992-03-25|4-NOT SPECIFIED|Clerk#000000852|0|dolites grow furiously fluffily final requests. blithely special 9795|370|O|215242.29|1998-02-04|3-MEDIUM|Clerk#000000481|0|ng the furiously iron 9796|1052|F|246896.17|1993-09-11|1-URGENT|Clerk#000000150|0|lyly regular packages. furio 9797|43|F|209264.66|1993-09-13|1-URGENT|Clerk#000000296|0|packages. blithe requests affix quickly carefully unusual ideas. blithe 9798|1135|O|23604.81|1996-03-09|1-URGENT|Clerk#000000485|0|ial deposits. accounts detect dependencies. silent accounts cajole flu 9799|461|O|152265.02|1996-04-07|4-NOT SPECIFIED|Clerk#000000526|0|ully final requests cajole carefully according to t 9824|472|F|240691.30|1995-01-23|1-URGENT|Clerk#000000653|0|furiously even theodolites impress even, special asymptotes. idle ideas dazzl 9825|28|O|93645.30|1996-04-09|1-URGENT|Clerk#000000990|0|rding to the pending 9826|698|F|105223.73|1992-03-23|3-MEDIUM|Clerk#000000777|0|deas haggle slyly final reques 9827|1289|O|36448.45|1998-07-04|4-NOT SPECIFIED|Clerk#000000085|0|e unusual dependencies. regular accounts alongside of the quickly regular f 9828|1306|F|269649.39|1992-04-25|3-MEDIUM|Clerk#000000355|0| deposits are carefully bold notornis. blith 9829|1379|O|6613.63|1995-12-09|1-URGENT|Clerk#000000004|0|ke across the requests. silently final foxes wake never amo 9830|493|O|202703.99|1997-09-08|2-HIGH|Clerk#000000191|0|final packages are furiously beside the quickly final e 9831|790|O|86410.08|1996-06-20|4-NOT SPECIFIED|Clerk#000000010|0|mas above the even instructions haggle ironical 9856|802|F|236403.75|1993-05-30|3-MEDIUM|Clerk#000000278|0|usual tithes against the express requests sleep across t 9857|172|F|158381.98|1993-05-12|2-HIGH|Clerk#000000627|0|ons haggle quickly above the blithely regular sauternes. slyly express instru 9858|1399|O|103920.84|1997-12-08|3-MEDIUM|Clerk#000000432|0|e of the fluffily final packa 9859|475|O|247788.34|1996-07-28|3-MEDIUM|Clerk#000000989|0|xes boost slyly. furiously regul 9860|623|F|151734.45|1993-10-11|1-URGENT|Clerk#000000345|0|carefully. ironic ideas haggle theodolites. quickly speci 9861|1477|F|66189.04|1992-10-29|5-LOW|Clerk#000000113|0|le after the unusual deposits. even acc 9862|10|O|193015.83|1997-10-10|2-HIGH|Clerk#000000168|0|among the instructions was after the slyly special packages. final, final hoc 9863|247|O|362398.22|1995-09-05|1-URGENT|Clerk#000000473|0|urts nag finally even, stealthy instructions. quickly final ideas 9888|1319|F|74228.41|1994-09-13|5-LOW|Clerk#000000434|0|g to the carefully final foxes. fur 9889|82|F|35362.96|1994-09-09|3-MEDIUM|Clerk#000000786|0|lly atop the foxes. doggedly final packages integrate thin 9890|731|P|87423.78|1995-02-22|2-HIGH|Clerk#000000168|0|uickly silent theodolites. pe 9891|1093|P|61268.10|1995-06-09|1-URGENT|Clerk#000000544|0|ut the theodolites. bold, even grouches across t 9892|691|F|16104.32|1992-07-17|3-MEDIUM|Clerk#000000387|0|ts. blithe deposits wake carefully carefully 9893|412|F|105380.78|1994-08-17|3-MEDIUM|Clerk#000000931|0|y special dependencies run 9894|851|F|100473.64|1994-03-19|2-HIGH|Clerk#000000424|0| asymptotes along the regular, bold instructions eat quickly again 9895|142|F|245503.17|1993-04-21|5-LOW|Clerk#000000965|0|lithely. furiously even pinto beans h 9920|1073|O|75014.21|1996-06-06|1-URGENT|Clerk#000000007|0|ites. special requests believe against the furiously regular requests. furiou 9921|1379|P|143610.80|1995-05-15|5-LOW|Clerk#000000936|0|r, blithe deposits. furiously bold 9922|802|P|186264.65|1995-04-04|4-NOT SPECIFIED|Clerk#000000131|0|ajole blithely across the unusual, unusual packages. car 9923|1217|F|223804.09|1994-10-06|2-HIGH|Clerk#000000818|0|instructions use slyl 9924|799|O|192545.87|1997-05-09|4-NOT SPECIFIED|Clerk#000000639|0|onic excuses wake slyly 9925|52|F|296109.93|1992-01-31|1-URGENT|Clerk#000000751|0|ve the always ironic pinto bean 9926|254|F|225023.94|1994-10-12|2-HIGH|Clerk#000000090|0|sits believe carefully ironic pinto beans. sl 9927|13|O|238640.42|1995-08-17|2-HIGH|Clerk#000000388|0|sts sleep regular, dogged packages. s 9952|521|F|87414.24|1992-05-02|2-HIGH|Clerk#000000549|0|ully about the daringly pending idea 9953|946|F|18034.69|1995-02-28|2-HIGH|Clerk#000000364|0|iresias. furiously bold deposits wake. furiously 9954|19|F|207935.24|1994-06-20|4-NOT SPECIFIED|Clerk#000000651|0| furiously express packages. slyly ironic packages nag sly 9955|1010|O|188555.97|1995-09-05|1-URGENT|Clerk#000000588|0|gainst the quickly regular deposits. carefully final accounts are furiously 9956|530|F|38746.89|1993-03-17|2-HIGH|Clerk#000000001|0|s? ironic foxes wake even, ironic de 9957|1465|F|64682.58|1993-12-01|4-NOT SPECIFIED|Clerk#000000874|0| unusual sauternes. fluffily e 9958|764|F|192897.71|1993-11-26|4-NOT SPECIFIED|Clerk#000000199|0| idle depths. quickly even pearls are express theodolites. final pint 9959|689|O|19023.25|1995-09-25|3-MEDIUM|Clerk#000000755|0|y special deposits. slyly final deposits against 9984|376|O|232843.69|1998-07-10|1-URGENT|Clerk#000000161|0|ronic packages-- final packages boost! even, express frets boo 9985|1207|F|82406.34|1993-08-08|5-LOW|Clerk#000000636|0|ing depths boost about the even depo 9986|289|O|150409.70|1997-09-22|3-MEDIUM|Clerk#000000913|0|the ruthlessly regular courts. ironic pack 9987|997|O|241701.18|1995-11-28|5-LOW|Clerk#000000308|0|ding to the regular foxes dazzle slyly f 9988|1372|O|139134.68|1997-12-05|4-NOT SPECIFIED|Clerk#000000015|0|d, final foxes. fluffily even sheaves w 9989|1210|F|113256.41|1993-06-22|3-MEDIUM|Clerk#000000298|0| bold orbits. pinto beans haggle carefully unusual accounts. furiously final 9990|391|F|80673.96|1992-03-28|2-HIGH|Clerk#000000466|0|y express foxes. blithely 9991|823|O|88850.20|1996-06-10|2-HIGH|Clerk#000000542|0|foxes nag fluffily packages. b 10016|1295|F|29180.26|1993-02-02|1-URGENT|Clerk#000000546|0|ptotes. platelets across the blithely regular accounts us 10017|811|O|100168.42|1998-04-09|4-NOT SPECIFIED|Clerk#000000617|0|ly accounts. carefully regular pinto beans wake according to th 10018|307|F|85330.56|1993-06-10|3-MEDIUM|Clerk#000000855|0|ld accounts above the bold, regular requests ha 10019|919|F|169596.33|1994-07-19|4-NOT SPECIFIED|Clerk#000000887|0|ests are carefully blithely ironic instructi 10020|469|O|99184.61|1998-04-08|3-MEDIUM|Clerk#000000162|0|yly against the final warth 10021|1282|F|50708.88|1992-08-01|4-NOT SPECIFIED|Clerk#000000321|0|ide of the slyly even Tiresias. carefully express fox 10022|929|F|71058.00|1994-03-05|5-LOW|Clerk#000000445|0|nto beans. furiously silent deposits nag. bold requests are ironic 10023|1204|O|107965.02|1996-12-02|1-URGENT|Clerk#000000596|0|onically even packages haggle among the slyly brave pin 10048|803|F|63327.47|1994-05-16|4-NOT SPECIFIED|Clerk#000000893|0|nt pinto beans are. ironi 10049|683|O|169774.94|1997-07-23|1-URGENT|Clerk#000000648|0|ly special forges. blithely bo 10050|1046|O|151895.89|1996-09-03|2-HIGH|Clerk#000000530|0|bove the permanently regular multipliers. special req 10051|661|O|168753.53|1996-05-25|2-HIGH|Clerk#000000804|0|ly. quickly unusual de 10052|763|F|195461.73|1994-09-08|4-NOT SPECIFIED|Clerk#000000488|0|carefully final packages. ironic foxes cajo 10053|1441|F|152632.75|1992-01-11|2-HIGH|Clerk#000000508|0|ual deposits haggle against the furiously final realms. b 10054|665|O|111854.62|1995-04-29|2-HIGH|Clerk#000000093|0|furiously regular platele 10055|1276|O|108949.78|1996-02-29|5-LOW|Clerk#000000508|0|e bravely bold notornis. carefully reg 10080|109|F|219815.04|1993-02-13|5-LOW|Clerk#000000362|0|carefully blithely express epitaph 10081|493|F|267898.00|1993-08-08|4-NOT SPECIFIED|Clerk#000000097|0|eans would sleep. carefully iro 10082|1264|F|216021.80|1994-08-31|4-NOT SPECIFIED|Clerk#000000049|0|ayers use special excuses. special req 10083|565|O|55168.12|1995-09-26|5-LOW|Clerk#000000949|0|t the brave decoys sleep regularly among the iron 10084|1424|O|5317.21|1997-07-08|1-URGENT|Clerk#000000088|0|grow blithely packages. slyly final requests cajole; accounts 10085|518|O|224630.63|1996-03-31|4-NOT SPECIFIED|Clerk#000000074|0|o beans wake against the blithely final accounts. fluffily silent f 10086|397|O|51011.55|1995-05-16|5-LOW|Clerk#000000728|0|its wake alongside of the f 10087|404|O|78013.94|1997-03-31|2-HIGH|Clerk#000000001|0|ounts integrate despite the regular ideas. furiously quiet idea 10112|223|F|35243.42|1993-12-21|5-LOW|Clerk#000000586|0| deposits. furiously even asymptotes alongside of the slyly regular 10113|56|F|193866.35|1994-06-13|1-URGENT|Clerk#000000094|0|e quickly. regular accounts boost bravely regular instructions. quickly fina 10114|1064|F|46899.44|1993-02-09|1-URGENT|Clerk#000000757|0|al requests. quickly express de 10115|799|O|144986.20|1998-07-07|2-HIGH|Clerk#000000530|0|even accounts. express ideas 10116|118|O|222761.53|1997-03-26|5-LOW|Clerk#000000991|0|mptotes. carefully express depo 10117|734|F|130230.17|1993-11-17|3-MEDIUM|Clerk#000000343|0|ilent theodolites sleep furiously 10118|742|F|24563.35|1994-04-11|1-URGENT|Clerk#000000431|0|ithely pending foxes integrate across the fin 10119|509|O|51762.72|1996-07-11|3-MEDIUM|Clerk#000000868|0|ar depths. slyly sly theodolites sleep slyly. furiously bold foxes integrate 10144|311|F|235400.84|1995-01-03|1-URGENT|Clerk#000000751|0|ironic accounts cajole blithely around the blithe deposits. ir 10145|646|P|302026.89|1995-05-25|5-LOW|Clerk#000000550|0|y unusual deposits sleep furiously i 10146|994|F|237459.34|1994-05-02|5-LOW|Clerk#000000653|0|hin gifts nag. furiously pending deposits after the sly frets sl 10147|1166|F|203640.84|1993-10-10|2-HIGH|Clerk#000000212|0|ilently ironic pinto beans. 10148|1462|F|140605.75|1994-03-26|5-LOW|Clerk#000000334|0|the bold ideas are carefully ruthlessly special pac 10149|26|F|169118.62|1994-09-24|2-HIGH|Clerk#000000681|0|final sentiments. slyly special bra 10150|1325|F|291469.33|1992-04-30|1-URGENT|Clerk#000000802|0|r the requests sleep s 10151|589|F|10295.06|1992-01-15|1-URGENT|Clerk#000000645|0|ideas. deposits haggle quickly accounts; f 10176|985|O|102621.47|1996-08-26|3-MEDIUM|Clerk#000000456|0|unts. bold accounts are carefully according to the quickly p 10177|568|O|209213.44|1996-06-29|3-MEDIUM|Clerk#000000221|0|nstructions; bold, regular attainments among th 10178|1148|F|46311.36|1993-12-15|4-NOT SPECIFIED|Clerk#000000756|0|ray across the ironic, final dependencies. 10179|1157|O|153533.06|1996-04-30|4-NOT SPECIFIED|Clerk#000000803|0|sly bold packages haggle about the the 10180|712|O|76230.32|1997-03-10|1-URGENT|Clerk#000000743|0|ke blithely carefully even packag 10181|874|O|46249.63|1996-02-13|3-MEDIUM|Clerk#000000866|0|, pending deposits. express 10182|268|F|199446.10|1994-05-25|5-LOW|Clerk#000000304|0|s. slyly final pinto beans wake a 10183|250|F|110520.40|1994-11-25|1-URGENT|Clerk#000000534|0| dependencies. blithely pending requests about the furiously regular pac 10208|1028|O|286549.53|1996-08-26|3-MEDIUM|Clerk#000000259|0| ideas. slow, ironic instructions doze furiou 10209|1210|F|400191.77|1993-11-30|3-MEDIUM|Clerk#000000153|0|ts wake. slyly blithe 10210|937|O|148557.25|1995-06-24|4-NOT SPECIFIED|Clerk#000000573|0|ts. furiously bold ideas about the furiously bold pac 10211|797|O|192242.04|1997-07-29|4-NOT SPECIFIED|Clerk#000000363|0| careful theodolites. special accounts caj 10212|1471|F|126475.34|1993-05-24|5-LOW|Clerk#000000011|0|uickly according to the pending pinto beans. instructions brea 10213|1288|F|42898.55|1995-02-18|2-HIGH|Clerk#000000845|0|ly ironic dependencies. final excuses acros 10214|913|O|145287.98|1996-01-30|2-HIGH|Clerk#000000991|0|encies after the deposits sleep furiously bold packages. even deposits 10215|1031|O|156012.73|1996-08-17|1-URGENT|Clerk#000000770|0|pecial instructions. fluffily regular packa 10240|691|F|41343.99|1994-05-16|4-NOT SPECIFIED|Clerk#000000109|0|al accounts are along the unusual packages. unusual, even platelets use slyl 10241|271|O|54454.72|1997-09-30|5-LOW|Clerk#000000366|0|y unusual asymptotes. regular dependencies wake fluffily about the 10242|101|F|193761.97|1993-12-13|3-MEDIUM|Clerk#000000234|0|s was slyly alongside of the deposits. 10243|385|O|72189.37|1995-12-25|2-HIGH|Clerk#000000844|0|nic requests. boldly regular deposits haggle blithely. orbits integrate 10244|298|O|129419.40|1996-04-04|3-MEDIUM|Clerk#000000459|0|lent pinto beans. furiously iro 10245|587|F|268471.58|1995-02-14|3-MEDIUM|Clerk#000000785|0| beans. foxes haggle around the car 10246|1231|O|188201.88|1997-06-19|1-URGENT|Clerk#000000571|0|s. blithely unusual packages affix according to the hockey players. regular 10247|866|F|187970.24|1993-01-11|3-MEDIUM|Clerk#000000646|0|fully after the carefully pending requests. even, pending ideas alongside 10272|730|F|303855.22|1994-01-18|3-MEDIUM|Clerk#000000680|0| blithely express accounts. slyly regular pinto beans use slyly. 10273|112|O|272748.60|1998-04-24|1-URGENT|Clerk#000000303|0|n, ironic packages. slyly close accounts sleep. blithe 10274|1369|F|50775.14|1993-12-10|1-URGENT|Clerk#000000939|0|sits. fluffily bold requests cajole accounts 10275|83|O|78422.44|1997-05-16|5-LOW|Clerk#000000202|0|fts at the furiously express accounts are whithout the slyly slow deposits. f 10276|1463|O|264683.22|1996-02-24|3-MEDIUM|Clerk#000000706|0|y even deposits. even accounts nag among the ironic asymptotes. req 10277|538|F|139188.55|1994-04-08|3-MEDIUM|Clerk#000000655|0|counts sleep around the special 10278|359|O|162008.27|1995-09-13|5-LOW|Clerk#000000697|0|le quickly. pending, bold theo 10279|1223|O|227770.72|1996-09-22|5-LOW|Clerk#000000051|0|nal deposits. fluffily silent ideas are across the 10304|814|O|302630.21|1995-11-11|3-MEDIUM|Clerk#000000230|0|l deposits nag pending, regular attainments. ironic ideas detect. unusu 10305|25|F|153676.38|1994-07-08|5-LOW|Clerk#000000429|0|phins use furiously about the quickly bold id 10306|152|F|91506.19|1993-07-22|5-LOW|Clerk#000000345|0| slyly never final requests. perm 10307|298|O|230682.65|1998-01-28|4-NOT SPECIFIED|Clerk#000000488|0|yly ironic foxes. quickly regular 10308|413|P|278555.38|1995-04-21|2-HIGH|Clerk#000001000|0|are above the furiously final deposits. special packages nag s 10309|1312|F|215890.35|1994-11-22|5-LOW|Clerk#000000622|0| above the carefully bold packages. carefully even pinto beans mai 10310|1027|F|213874.91|1994-02-06|5-LOW|Clerk#000000645|0| accounts kindle qu 10311|52|F|5195.95|1992-08-18|2-HIGH|Clerk#000000147|0|ntegrate carefully above the regular pinto beans. quick 10336|1358|O|57610.67|1997-06-15|4-NOT SPECIFIED|Clerk#000000519|0|. carefully special pinto beans are ironic accounts. foxes 10337|1360|O|63068.37|1996-03-19|3-MEDIUM|Clerk#000000499|0|ly silent instructions wake qui 10338|1213|O|100334.47|1995-07-15|4-NOT SPECIFIED|Clerk#000000964|0|gular packages. slyly even packages haggle furious 10339|154|O|116081.18|1995-10-12|5-LOW|Clerk#000000585|0|ts. always ironic deposits haggle thinly. 10340|1486|O|243706.09|1995-06-23|2-HIGH|Clerk#000000064|0|hely final warthogs detect blithely regular pa 10341|1276|F|250921.23|1992-12-22|2-HIGH|Clerk#000000964|0|efully final packages cajole f 10342|638|O|273325.12|1995-11-23|3-MEDIUM|Clerk#000000277|0|theodolites. carefully regul 10343|91|F|44177.42|1992-03-27|4-NOT SPECIFIED|Clerk#000000600|0|ackages. carefully pending idea 10368|508|O|64465.48|1996-06-25|3-MEDIUM|Clerk#000000722|0|carefully special theodolites doze. fluffily ironic pinto be 10369|1397|O|120738.63|1995-11-10|5-LOW|Clerk#000000636|0| deposits are quickly; regular requests nag carefully regular i 10370|839|F|112585.48|1994-04-28|4-NOT SPECIFIED|Clerk#000000084|0|thrash. accounts boost never quickly final ac 10371|1022|F|204611.98|1994-09-17|2-HIGH|Clerk#000000231|0|nding requests. ideas af 10372|1241|O|101993.71|1995-06-28|4-NOT SPECIFIED|Clerk#000000276|0|sleep after the carefully ironic platelets. regular, unusual deposits wake qui 10373|671|F|254861.41|1993-06-26|1-URGENT|Clerk#000000681|0|cajole blithely final foxes. carefully 10374|337|F|28405.59|1993-07-23|5-LOW|Clerk#000000885|0|ing courts? carefully unusual ideas 10375|814|O|55990.35|1997-01-08|2-HIGH|Clerk#000000384|0|ending deposits. special, regular tith 10400|230|O|214820.02|1996-10-24|2-HIGH|Clerk#000000375|0|cing accounts sleep slyly regu 10401|1405|O|15159.38|1998-07-20|4-NOT SPECIFIED|Clerk#000000351|0|ts x-ray? slyly unusual a 10402|7|O|69976.43|1997-12-20|1-URGENT|Clerk#000000487|0|ing pinto beans for the carefully regular 10403|820|O|273217.58|1996-05-21|5-LOW|Clerk#000000523|0|ounts. carefully special accounts grow 10404|1255|O|148128.43|1998-03-29|1-URGENT|Clerk#000000142|0|uffy foxes about the qui 10405|929|O|156274.53|1998-01-04|4-NOT SPECIFIED|Clerk#000000343|0|uriously final dolphins. sly 10406|46|O|143470.97|1996-02-29|4-NOT SPECIFIED|Clerk#000000923|0| the slyly express deposits. deposits 10407|442|O|72476.48|1996-03-26|4-NOT SPECIFIED|Clerk#000000011|0|onic foxes. asymptotes across the ironi 10432|1231|F|167753.06|1992-01-06|5-LOW|Clerk#000000429|0|thely bold pinto bea 10433|1295|F|19194.57|1993-02-20|2-HIGH|Clerk#000000566|0|ajole blithely. furiously re 10434|781|F|63237.89|1994-12-24|2-HIGH|Clerk#000000939|0| blithely regular ideas are deposits. requests nod slyly final r 10435|1195|F|185835.34|1993-08-11|3-MEDIUM|Clerk#000000660|0|te. slyly final theodolites are after th 10436|916|F|32528.92|1995-01-03|5-LOW|Clerk#000001000|0|lthily special asymptotes. blithely pending deposits after the unusual, e 10437|305|F|64469.14|1994-08-19|1-URGENT|Clerk#000000098|0|inal requests about the regular, 10438|715|F|189906.48|1992-12-03|5-LOW|Clerk#000000148|0|tect among the blithely 10439|940|F|300103.82|1992-03-29|4-NOT SPECIFIED|Clerk#000000626|0|nal, even requests h 10464|233|F|41683.39|1994-08-11|1-URGENT|Clerk#000000752|0|e slyly after the platelets; furiously even ac 10465|79|O|289512.24|1997-04-10|4-NOT SPECIFIED|Clerk#000000791|0|ic tithes sleep carefully across the furiously ironic ideas. sl 10466|616|F|147826.40|1992-12-26|2-HIGH|Clerk#000000899|0|thely even ideas cajole quickly across the carefu 10467|1088|O|200254.31|1997-03-12|4-NOT SPECIFIED|Clerk#000000125|0|ffily unusual deposits. carefully regular asymptotes use fur 10468|574|F|50946.66|1994-12-12|2-HIGH|Clerk#000000429|0|osits wake-- quickly pending foxes alongside of the slyly even 10469|622|O|89980.03|1996-09-03|2-HIGH|Clerk#000000913|0|hely unusual, unusual accounts. furiously fi 10470|184|F|321900.81|1992-05-05|5-LOW|Clerk#000000632|0|even, ironic accounts among the bold theodolites use across the ste 10471|262|F|309852.68|1992-10-10|2-HIGH|Clerk#000000590|0|lites wake doggedly accounts. blithely final deposits sleep evenly. 10496|400|O|66825.60|1997-02-20|2-HIGH|Clerk#000000907|0|iously regular requests. furiously slow hockey players are quickly even ac 10497|119|O|103079.38|1996-06-22|3-MEDIUM|Clerk#000000423|0|ar hockey players sleep furiously until the furious 10498|1081|F|146950.27|1993-10-24|5-LOW|Clerk#000000424|0|lithely bold packages. re 10499|872|F|186269.45|1993-11-17|4-NOT SPECIFIED|Clerk#000000065|0|es haggle slyly even instructions. ironica 10500|998|F|26808.34|1994-01-24|2-HIGH|Clerk#000000179|0| packages wake quickly pearls. slyly even deposits haggle blithe 10501|911|O|227512.69|1998-07-03|3-MEDIUM|Clerk#000000977|0|equests. packages nag slyly. quickly even accounts toward the ironic 10502|1462|F|177583.01|1993-05-15|4-NOT SPECIFIED|Clerk#000000530|0|tructions alongside of the blithely bold foxes could have to sleep slyly a 10503|853|O|150884.39|1996-06-09|5-LOW|Clerk#000000028|0|eodolites play against the pending accounts. special packages are slyly ab 10528|166|F|127054.41|1994-09-14|3-MEDIUM|Clerk#000000724|0|press pinto beans. express theodolites do cajole furiously. slyly f 10529|692|O|141709.31|1997-10-18|1-URGENT|Clerk#000000573|0|yly. doggedly final epitaphs haggle among the flu 10530|1276|F|120484.95|1994-01-28|3-MEDIUM|Clerk#000000879|0|ounts. blithely unus 10531|55|F|176261.61|1992-03-19|2-HIGH|Clerk#000000021|0|regular somas. fluffily special instructions doze blithely. quick 10532|1337|O|318050.97|1997-11-28|5-LOW|Clerk#000000805|0|ly slyly bold accounts. carefully express theodolites nag 10533|961|O|186070.73|1998-05-01|3-MEDIUM|Clerk#000000778|0| nag busily bold dependencies. special, ironic p 10534|1070|F|4438.69|1993-04-19|4-NOT SPECIFIED|Clerk#000000973|0|ly according to the slyly r 10535|1225|O|249751.99|1995-05-28|5-LOW|Clerk#000000967|0|lyly unusual frets nag alongsi 10560|91|O|185333.40|1997-07-29|1-URGENT|Clerk#000000157|0|lar requests among the blithely regular packages doze unusual requests; 10561|718|O|219244.25|1997-04-27|5-LOW|Clerk#000000435|0|r requests. even, even deposits b 10562|1246|F|297373.68|1994-11-12|4-NOT SPECIFIED|Clerk#000000034|0|ests across the courts print r 10563|2|F|143707.70|1993-09-30|1-URGENT|Clerk#000000889|0|etect after the pending requests. packa 10564|1019|O|77547.24|1996-03-14|4-NOT SPECIFIED|Clerk#000000362|0|ests. final, express foxes are slyly regular ideas. darin 10565|613|O|138255.60|1997-08-17|1-URGENT|Clerk#000000126|0| slyly unusual deposits after the carefully 10566|578|O|220319.71|1995-08-20|4-NOT SPECIFIED|Clerk#000000541|0|s sleep blithely permanently even ideas. bl 10567|328|F|104838.06|1992-03-30|2-HIGH|Clerk#000000046|0|y even pinto beans cajole bold instructions. carefully sly wate 10592|778|O|84645.88|1995-11-04|2-HIGH|Clerk#000000435|0|as-- ironic packages 10593|1123|F|160662.68|1992-02-13|2-HIGH|Clerk#000000573|0|excuses after the fina 10594|214|O|6650.94|1996-03-21|3-MEDIUM|Clerk#000000126|0|ly express asymptotes against th 10595|818|O|23233.65|1997-11-22|3-MEDIUM|Clerk#000000425|0|gular excuses. slowly ca 10596|308|O|106243.04|1997-03-31|5-LOW|Clerk#000000662|0|oss the furiously regular theodolit 10597|619|O|67904.94|1996-09-13|2-HIGH|Clerk#000000040|0|ch. bold ideas about the deposits wake fluffily 10598|461|F|19331.61|1992-03-08|4-NOT SPECIFIED|Clerk#000000682|0|e among the fluffily regular deposits. slyly final requests haggle furious 10599|194|O|142776.50|1997-07-21|4-NOT SPECIFIED|Clerk#000000559|0|ly regular requests wake regular instructions. pending, pending dolphins 10624|1288|F|192178.14|1994-12-21|1-URGENT|Clerk#000000666|0|re furiously along the slyly final ideas. carefu 10625|259|O|233381.66|1996-01-30|1-URGENT|Clerk#000000688|0|. pending deposits above the car 10626|997|O|19103.82|1995-12-14|3-MEDIUM|Clerk#000000656|0| regular deposits cajo 10627|127|F|210497.85|1993-02-25|5-LOW|Clerk#000000236|0|s. unusual instructions are. furiously express requests a 10628|1429|O|5714.19|1998-06-28|4-NOT SPECIFIED|Clerk#000000437|0|etimes quickly final packa 10629|650|F|55072.00|1994-09-12|1-URGENT|Clerk#000000229|0|endencies wake. accounts are carefully. a 10630|313|F|143006.19|1993-02-05|3-MEDIUM|Clerk#000000607|0|iously special instruct 10631|1243|F|258430.09|1993-05-27|1-URGENT|Clerk#000000844|0|affix blithely blithely regular dinos. furiously blithe instructions ar 10656|371|O|96681.23|1995-10-25|1-URGENT|Clerk#000000233|0| accounts use never blithely bold deposits. i 10657|734|F|91096.35|1993-09-26|3-MEDIUM|Clerk#000000502|0|ts wake. bold packages cajole fluffily according to the furi 10658|1199|F|117872.35|1994-08-28|5-LOW|Clerk#000000204|0|xes cajole thinly ironic requests. theodolites 10659|397|F|290769.79|1994-05-15|1-URGENT|Clerk#000000587|0| slyly across the bold request 10660|469|O|135068.75|1998-05-01|2-HIGH|Clerk#000000352|0|onic depths? furiously 10661|179|F|90250.70|1994-01-24|4-NOT SPECIFIED|Clerk#000000986|0| about the final foxes. eve 10662|1420|P|157252.72|1995-05-13|5-LOW|Clerk#000000241|0|tain blithely quickly regular pinto beans. furiously bold deposits 10663|317|F|153682.58|1994-05-24|2-HIGH|Clerk#000000137|0|s above the furiously final foxes nag idly requests. caref 10688|4|F|43453.24|1992-05-13|3-MEDIUM|Clerk#000000226|0|ly ironic instructions lose quickly alongside of the carefully 10689|203|O|99242.62|1996-07-23|5-LOW|Clerk#000000436|0|usly. furiously ironic instructions wak 10690|103|O|54775.41|1997-12-15|5-LOW|Clerk#000000041|0|ltipliers cajole slyly busy accounts. express theodolites af 10691|664|F|87254.45|1995-03-14|3-MEDIUM|Clerk#000000536|0|al decoys among the carefull 10692|187|F|158201.74|1994-07-27|5-LOW|Clerk#000000832|0|orbits. asymptotes cajole. furiously eve 10693|1030|F|43518.47|1995-04-17|4-NOT SPECIFIED|Clerk#000000797|0|s. slyly regular accounts are among the slyly regular sentiments. 10694|1423|O|183151.32|1997-12-05|2-HIGH|Clerk#000000812|0|olites. quickly bold package 10695|1456|P|93566.64|1995-05-03|2-HIGH|Clerk#000000039|0| asymptotes. quickly unus 10720|736|O|89368.05|1998-03-31|2-HIGH|Clerk#000000597|0|ccounts sleep bold, even theodol 10721|781|O|107381.35|1996-02-27|5-LOW|Clerk#000000599|0|haggle slyly blithely even instructions. quickly even dolphins wa 10722|79|F|218680.31|1995-01-20|4-NOT SPECIFIED|Clerk#000000743|0|ding instructions print. f 10723|1040|O|69126.13|1998-05-21|3-MEDIUM|Clerk#000000144|0| bold accounts. carefully busy platelets sleep fluffily. spe 10724|1492|F|215958.98|1994-09-14|2-HIGH|Clerk#000000165|0|furiously regular dolphins. regular foxes along the daringly express 10725|1444|O|77193.41|1998-04-09|5-LOW|Clerk#000000505|0| carefully unusual sauternes after the fluffily regular foxes use pending, 10726|164|F|215356.45|1993-04-25|4-NOT SPECIFIED|Clerk#000000210|0| regular dependencies. quickly spe 10727|496|F|311532.26|1992-08-05|5-LOW|Clerk#000000396|0|nusual pinto beans. special, pending requests breach slyly fina 10752|199|O|223930.05|1995-09-11|1-URGENT|Clerk#000000610|0|ic accounts. carefully ironic courts against the caref 10753|1430|F|20768.66|1994-05-02|1-URGENT|Clerk#000000218|0|ously ironic packag 10754|1061|F|229997.58|1993-03-18|5-LOW|Clerk#000000827|0|r pinto beans sleep even 10755|1435|F|132981.98|1993-07-15|1-URGENT|Clerk#000000257|0|se furiously excuses. always unusual packages above the furiously unusual de 10756|1087|F|40769.85|1992-04-15|2-HIGH|Clerk#000000994|0| even, express platelets nag above 10757|128|O|92703.05|1998-06-06|4-NOT SPECIFIED|Clerk#000001000|0|patterns haggle quickly across the bravely 10758|368|O|136484.05|1996-11-15|2-HIGH|Clerk#000000209|0|es. pinto beans sleep carefully final 10759|34|O|54534.36|1996-09-22|4-NOT SPECIFIED|Clerk#000000258|0| boost evenly final pinto beans. 10784|1396|F|184632.93|1993-07-06|4-NOT SPECIFIED|Clerk#000000982|0|en packages sleep. final foxes detec 10785|148|F|1508.63|1992-06-26|3-MEDIUM|Clerk#000000948|0|iously unusual packages use quickly furiously 10786|154|O|70962.05|1995-08-06|3-MEDIUM|Clerk#000000705|0| ironic, special theodolites caj 10787|1117|O|372954.40|1997-02-12|2-HIGH|Clerk#000000133|0|ckages could have to wake blithely above the final requests. furiously 10788|4|O|147767.08|1997-01-15|3-MEDIUM|Clerk#000000905|0|esides the slyly ironic dependencies. ironic foxes cajole fur 10789|1378|F|93918.24|1993-08-24|4-NOT SPECIFIED|Clerk#000000463|0|s unwind blithely agains 10790|113|F|77512.26|1994-12-15|5-LOW|Clerk#000000381|0|ly alongside of the express, regular foxes. req 10791|319|O|173850.27|1995-08-20|4-NOT SPECIFIED|Clerk#000000131|0|efully special pinto beans dazzle fluffily according to the express, silen 10816|1303|O|45628.88|1996-01-11|2-HIGH|Clerk#000000003|0|after the final, spe 10817|712|O|213714.56|1996-12-09|1-URGENT|Clerk#000000852|0|final ideas across the 10818|157|O|245383.91|1998-05-07|2-HIGH|Clerk#000000735|0|ids nag carefully against the final deposit 10819|571|F|79971.29|1993-08-25|1-URGENT|Clerk#000000144|0|ncies use furiously alongside of the fluffily final instructions. quickly re 10820|661|O|220224.56|1995-12-28|4-NOT SPECIFIED|Clerk#000000630|0|ecial ideas. silently unusual somas are blith 10821|590|O|179780.96|1996-05-13|2-HIGH|Clerk#000000856|0| the furiously final excuses. blithely final pack 10822|1186|F|19208.70|1993-12-01|5-LOW|Clerk#000000046|0|hely regular requests. carefully silent instructions use slyly f 10823|391|F|109958.79|1992-06-13|5-LOW|Clerk#000000869|0|blithely boldly pending deposits. silent hockey players acros 10848|841|O|67233.17|1996-10-26|3-MEDIUM|Clerk#000000131|0| according to the boldly final accounts. slyly bold asymptotes detect qui 10849|56|O|309269.38|1997-01-31|1-URGENT|Clerk#000000279|0|y even requests. blithely silent theodolites cajole carefully ev 10850|1052|O|78474.67|1996-09-23|2-HIGH|Clerk#000000055|0|its cajole quickly. permanently even pinto beans dazz 10851|1201|F|256653.25|1994-04-07|2-HIGH|Clerk#000000841|0|ffix blithely blithely even dugouts? quickly re 10852|559|F|290873.68|1993-10-27|2-HIGH|Clerk#000000623|0|oxes. carefully final pinto be 10853|275|F|68787.35|1994-01-06|4-NOT SPECIFIED|Clerk#000000276|0|ronic dugouts wake s 10854|917|O|4885.38|1998-06-20|2-HIGH|Clerk#000000219|0|old, ironic dependencies. fluffily bold excuses shall have to integrate amon 10855|608|O|227086.67|1997-07-16|1-URGENT|Clerk#000000149|0|fully unusual pinto beans sleep sl 10880|278|F|40046.10|1992-05-23|4-NOT SPECIFIED|Clerk#000000553|0|e furiously bold deposits. slyly spe 10881|1001|F|235385.68|1994-02-16|1-URGENT|Clerk#000000872|0|quickly final pinto beans engage quickly pending, final 10882|1462|O|103982.52|1998-06-02|5-LOW|Clerk#000000949|0|gouts sleep fluffily according to the dep 10883|226|O|118687.62|1998-02-03|5-LOW|Clerk#000000759|0|sly silent accounts. carefully express pinto bean 10884|749|F|193592.93|1994-11-26|5-LOW|Clerk#000000802|0| express, regular accounts nag furiously! platele 10885|991|O|253167.35|1998-04-18|5-LOW|Clerk#000000893|0|thely ironic accounts sleep carefully depo 10886|1312|F|275313.31|1994-08-24|1-URGENT|Clerk#000000422|0|along the final epitaphs dazzle carefull 10887|436|O|50293.15|1995-05-26|4-NOT SPECIFIED|Clerk#000000575|0|ag against the dependencies. furiously regular foxes sleep fu 10912|293|O|243339.25|1996-12-25|1-URGENT|Clerk#000000579|0|odolites wake sometimes on the unusual deposits. epitaphs cajole b 10913|514|F|125458.51|1993-02-07|5-LOW|Clerk#000000114|0|ches are blithely around t 10914|689|F|213268.60|1995-01-23|4-NOT SPECIFIED|Clerk#000000582|0| quickly regular packages above the furiously bold pl 10915|1357|F|178357.32|1993-11-11|5-LOW|Clerk#000000649|0|bold packages nag. furiously regular excu 10916|328|P|253026.49|1995-03-11|3-MEDIUM|Clerk#000000047|0|pinto beans cajole furiously according to the dependencies. fi 10917|251|F|127026.23|1992-02-11|1-URGENT|Clerk#000000078|0|beans across the regular theodolites affix silent instructions. even deposits 10918|1030|O|80094.04|1995-10-16|3-MEDIUM|Clerk#000000874|0|structions. ironic, express dep 10919|1339|F|56302.36|1993-03-28|2-HIGH|Clerk#000000180|0|press requests haggle blithely. blithely 10944|319|O|129953.86|1995-11-13|2-HIGH|Clerk#000000077|0|ily unusual requests. special instruct 10945|248|F|168422.71|1992-01-04|3-MEDIUM|Clerk#000000296|0|ages are. final, silent deposits haggle quickly! ironic 10946|1147|O|332313.82|1997-03-22|3-MEDIUM|Clerk#000000975|0|nal deposits detect among the quickly 10947|1070|O|135370.80|1996-01-07|2-HIGH|Clerk#000000347|0|ar excuses x-ray fluffily a 10948|670|F|223943.13|1992-08-14|3-MEDIUM|Clerk#000000489|0|nic grouches detect stealthily fluffily even de 10949|331|F|112812.10|1994-03-31|3-MEDIUM|Clerk#000000874|0|luffily even grouches. regular notornis against the sl 10950|767|O|37856.60|1997-06-11|3-MEDIUM|Clerk#000000015|0|y carefully pending packages. silent accounts nod careful 10951|1159|F|175198.95|1992-11-07|4-NOT SPECIFIED|Clerk#000000619|0|dependencies. sometimes pending foxes cajole. quickly 10976|685|F|186488.10|1992-10-02|4-NOT SPECIFIED|Clerk#000000282|0| deposits: brave, ironic packages maintain fluffily about the regular 10977|757|O|99211.94|1998-06-04|2-HIGH|Clerk#000000089|0|rious accounts about the regular excuses wake ironically special r 10978|1346|F|106794.18|1994-04-02|1-URGENT|Clerk#000000546|0|cial multipliers wake pending, pending theodolites: blithe 10979|1267|O|166156.36|1995-10-05|3-MEDIUM|Clerk#000000527|0|ans cajole carefully. regular pinto be 10980|754|O|75950.23|1996-07-29|4-NOT SPECIFIED|Clerk#000000818|0| ironic, stealthy patterns detect 10981|919|F|235684.93|1993-05-15|2-HIGH|Clerk#000000341|0|ly bold attainments nag! furiously special acco 10982|739|F|199304.65|1992-12-11|4-NOT SPECIFIED|Clerk#000000481|0|tithes use slyly silent, unusual a 10983|599|O|106734.94|1997-02-06|1-URGENT|Clerk#000000299|0|sts sleep of the regular foxes. permanent re 11008|523|F|222231.62|1994-02-25|1-URGENT|Clerk#000000927|0|ickly regular deposits detect fluffily: caref 11009|1303|O|65554.60|1997-03-05|3-MEDIUM|Clerk#000000515|0|deposits are blithely furio 11010|440|O|163532.15|1997-03-22|4-NOT SPECIFIED|Clerk#000000659|0|ithely-- express, final ideas sleep slyly to the blithely express reque 11011|14|F|248409.17|1992-05-14|2-HIGH|Clerk#000000948|0|luffily even warhors 11012|1420|O|114085.80|1998-04-07|1-URGENT|Clerk#000000771|0|ze fluffily quickly sly excuses. quickly pending frays haggle accordin 11013|395|F|2591.65|1994-04-18|2-HIGH|Clerk#000000759|0|ng deposits nag. special deposits sublate furio 11014|319|F|150733.48|1993-09-08|2-HIGH|Clerk#000000934|0|yly regular depths. re 11015|514|O|132711.70|1997-09-16|5-LOW|Clerk#000000216|0|its cajole against the unusual ideas? sly deposits boost fluffily. furiously 11040|82|O|211495.01|1995-10-24|4-NOT SPECIFIED|Clerk#000000003|0|arefully pending dolphins print brave 11041|1306|O|128785.82|1997-03-30|4-NOT SPECIFIED|Clerk#000000892|0|instructions. blithely pending packages solve of the furiously ironic requests 11042|857|O|223492.72|1998-05-07|4-NOT SPECIFIED|Clerk#000000677|0|ccounts. quickly unusual pinto beans nag enticingly a 11043|820|F|102166.94|1995-02-20|1-URGENT|Clerk#000000196|0|dolphins. accounts are f 11044|164|F|68884.04|1992-06-25|3-MEDIUM|Clerk#000000861|0|he deposits hinder above the furiously express excuses. silent depo 11045|556|F|226703.02|1992-03-26|3-MEDIUM|Clerk#000000216|0|uriously ironic asy 11046|1411|O|337558.78|1997-06-17|5-LOW|Clerk#000000454|0|lly special theodolites use furiously. carefully 11047|175|F|70365.24|1993-12-11|5-LOW|Clerk#000000223|0|. pending accounts alongside o 11072|265|O|207339.71|1996-07-14|2-HIGH|Clerk#000000804|0|osits: pending requests sleep quietly silently unusual deposits. special fo 11073|548|F|177610.35|1994-12-02|4-NOT SPECIFIED|Clerk#000000009|0|ing deposits-- slyly express dolphins detect fluffily ironic dep 11074|709|O|302642.18|1995-06-10|2-HIGH|Clerk#000000449|0|t the special asymptotes. furiously even dolphins sleep quickly ir 11075|1244|O|215012.92|1996-03-05|5-LOW|Clerk#000000379|0|efully furiously special pinto beans. fluffily unusual acc 11076|1012|F|54498.05|1994-08-18|1-URGENT|Clerk#000000361|0|lyly special notornis. bold requests haggle careful 11077|610|F|76484.04|1994-03-21|5-LOW|Clerk#000000024|0|gularly pending deposits. quick 11078|1301|F|103896.23|1992-05-28|2-HIGH|Clerk#000000227|0|ag. ironic, unusual accounts nag fluffily across the ironic, final ideas. car 11079|218|O|98046.49|1997-08-11|3-MEDIUM|Clerk#000000507|0|tainments affix doggedly slyly special asymptotes. 11104|1315|O|51132.08|1998-07-07|4-NOT SPECIFIED|Clerk#000000062|0|s cajole slyly alon 11105|1124|O|20773.48|1995-11-30|4-NOT SPECIFIED|Clerk#000000769|0|blithely about the closely silent foxes. blithely r 11106|1175|P|59390.31|1995-04-01|2-HIGH|Clerk#000000828|0|fter the quickly bold theodolites are blithely across 11107|445|O|64188.60|1995-10-24|5-LOW|Clerk#000000896|0|ymptotes. escapades use slyly? slyly b 11108|1186|F|238113.15|1992-08-17|4-NOT SPECIFIED|Clerk#000000137|0|ond the carefully unusual courts. regul 11109|664|F|5341.59|1995-03-16|3-MEDIUM|Clerk#000000578|0|. silent platelets haggle slyly. quickly ironic asymptotes 11110|757|O|153722.23|1996-06-23|4-NOT SPECIFIED|Clerk#000000243|0|es are quickly alongside of the spe 11111|631|F|121957.72|1995-01-18|3-MEDIUM|Clerk#000000301|0|nic deposits affix quickly regula 11136|1313|F|60530.16|1993-11-14|4-NOT SPECIFIED|Clerk#000000987|0|ular foxes haggle against the unusual frays. i 11137|668|F|204264.90|1992-11-30|2-HIGH|Clerk#000000515|0|bout the final accounts. regular ideas cajole slyly. blithely sly d 11138|181|F|162678.89|1995-02-27|4-NOT SPECIFIED|Clerk#000000983|0|d platelets. carefully ironic packages alongside of the carefully 11139|883|O|115264.28|1998-05-17|5-LOW|Clerk#000000891|0|the furiously silent i 11140|988|O|194048.42|1997-07-26|1-URGENT|Clerk#000000117|0|. fluffily busy sauternes cajole slyly above the quickly express excuses. exp 11141|1030|O|72800.37|1998-01-17|1-URGENT|Clerk#000000084|0|ckages. carefully regul 11142|1375|O|395039.05|1997-10-03|4-NOT SPECIFIED|Clerk#000000457|0| even accounts sublate carefully 11143|97|F|172537.11|1992-11-27|5-LOW|Clerk#000000613|0|unusual instructions. q 11168|1441|F|74602.03|1992-10-22|1-URGENT|Clerk#000000585|0|es. even patterns use fluff 11169|1222|F|121781.45|1993-05-22|3-MEDIUM|Clerk#000000536|0|eodolites wake. fluffily express packages 11170|736|O|152992.55|1997-07-30|1-URGENT|Clerk#000000832|0|eyond the furiously regular pinto beans. car 11171|730|O|104672.68|1995-07-17|1-URGENT|Clerk#000000792|0|l deposits promise according to the express deposits. fluffily final r 11172|1076|O|166824.37|1997-12-30|1-URGENT|Clerk#000000187|0|ructions. furiously pending dependencies wake. deposits 11173|319|F|179045.62|1992-03-31|1-URGENT|Clerk#000000815|0|e ironically silent deposits wake blithely across the regular instructions 11174|1315|F|149433.95|1994-04-10|3-MEDIUM|Clerk#000000321|0|se about the pending, special decoys-- fluffily express 11175|1421|O|44114.12|1998-02-26|2-HIGH|Clerk#000000812|0| slyly even deposits are blithely; packages integrate furiously. bravely ironi 11200|956|O|113819.40|1997-02-07|1-URGENT|Clerk#000000417|0|ajole. silent requests boost carefully blithely thin multiplier 11201|632|O|3297.46|1996-12-08|3-MEDIUM|Clerk#000000746|0|ns cajole after the pending pinto beans. carefully ironic requests wake r 11202|1168|F|157836.48|1992-03-18|3-MEDIUM|Clerk#000000614|0|wake daringly. carefully regular account 11203|1498|O|84795.43|1996-06-15|2-HIGH|Clerk#000000010|0|al instructions haggle slyly. fu 11204|1249|O|29604.53|1998-07-25|2-HIGH|Clerk#000000752|0| express requests. slyly special packages cajole fluffily; quickly ironi 11205|1294|O|93334.58|1996-09-15|1-URGENT|Clerk#000000800|0|ckages. furiously final packages ac 11206|379|F|209373.08|1994-12-29|5-LOW|Clerk#000000139|0|ending packages cajole busily. slyly 11207|1213|O|84129.92|1996-04-03|2-HIGH|Clerk#000000354|0|ress accounts haggle permanently about the furiously unusua 11232|661|O|77549.43|1996-02-01|1-URGENT|Clerk#000000676|0|fully regular accounts. final, unusual deposits sleep eve 11233|680|O|96728.84|1995-08-31|4-NOT SPECIFIED|Clerk#000000820|0|lphins. carefully ironic ideas use after the quickly unusual a 11234|1153|F|117092.84|1993-03-01|4-NOT SPECIFIED|Clerk#000000291|0|e slyly across the carefully even requests. slyly 11235|590|O|278256.83|1996-04-03|4-NOT SPECIFIED|Clerk#000000495|0|ackages solve carefully furiously final accounts. fluf 11236|223|F|210654.72|1993-10-31|2-HIGH|Clerk#000000792|0|ess pinto beans. foxes b 11237|1423|O|155158.38|1996-06-16|4-NOT SPECIFIED|Clerk#000000627|0|e the carefully special ideas inte 11238|535|O|126764.00|1996-07-23|1-URGENT|Clerk#000000809|0|gular foxes run after the even instructions. regular instructions wake. unu 11239|475|F|310241.51|1992-02-10|1-URGENT|Clerk#000000240|0|e regular, even excuses. 11264|676|O|152483.91|1996-09-02|4-NOT SPECIFIED|Clerk#000000227|0|usly silent deposits. carefully unusual asymptotes ha 11265|905|O|88998.81|1997-05-20|3-MEDIUM|Clerk#000000427|0|ias. carefully pending platel 11266|292|O|13609.27|1997-08-10|4-NOT SPECIFIED|Clerk#000000315|0|lets nag about the carefully special packages. ironic sheaves 11267|548|F|45572.31|1992-02-15|5-LOW|Clerk#000000096|0| wake slyly even deposits. c 11268|43|O|189596.87|1998-06-30|1-URGENT|Clerk#000000130|0|ptotes are blithely slyly silen 11269|199|F|256118.53|1992-05-27|5-LOW|Clerk#000000752|0|ath the decoys. final excuses must have to wake. ironic asymptotes wake q 11270|160|O|197781.66|1995-07-01|2-HIGH|Clerk#000000757|0|ily pending theodolites a 11271|745|O|255335.88|1995-10-19|2-HIGH|Clerk#000000144|0|sts. furiously ironic foxes wake ca 11296|832|F|378166.33|1992-01-10|1-URGENT|Clerk#000000966|0|s-- ironic, unusual requests haggle furiously. carefully special depend 11297|694|F|109722.85|1993-01-09|4-NOT SPECIFIED|Clerk#000000247|0|hely. express escapades kindle blithely. even requests wake 11298|805|O|228663.94|1998-01-03|2-HIGH|Clerk#000000160|0| blithely express ideas sleep slyly ruthl 11299|50|F|253114.92|1993-11-13|4-NOT SPECIFIED|Clerk#000000885|0|ns haggle bravely alongside of the fluffily express requests. 11300|346|O|77247.48|1996-07-10|5-LOW|Clerk#000000697|0|he final platelets. blithely even theodolites along the car 11301|1358|F|191271.29|1992-03-23|3-MEDIUM|Clerk#000000321|0|hes detect fluffily ironic requests. deposits br 11302|451|F|243997.09|1994-03-02|5-LOW|Clerk#000000492|0|. never ironic requests sleep furiousl 11303|388|F|92968.88|1992-03-30|5-LOW|Clerk#000000788|0|he slyly pending requests. final deposits 11328|466|F|18702.67|1992-03-21|1-URGENT|Clerk#000000101|0|theodolites. blithely pending accounts above the carefully 11329|1285|O|272307.07|1995-08-05|1-URGENT|Clerk#000000903|0|on the daringly express instructions. fluf 11330|281|F|3817.37|1992-03-28|5-LOW|Clerk#000000429|0|he slyly ironic ideas are q 11331|179|O|18118.60|1996-01-21|3-MEDIUM|Clerk#000000616|0|platelets wake carefully ruthless requests. silent deposits believe 11332|422|F|213792.99|1994-11-17|5-LOW|Clerk#000000572|0|o the unusual foxes-- ironic requests wake slyly enticingly re 11333|103|F|15730.53|1994-06-10|2-HIGH|Clerk#000000914|0|excuses cajole blithely against the slyly express r 11334|245|O|343561.63|1997-07-29|3-MEDIUM|Clerk#000000843|0|ronic deposits haggle fu 11335|871|F|133549.00|1994-10-22|2-HIGH|Clerk#000000669|0|ealms. theodolites maintain. regular, even instructions against t 11360|886|O|67314.59|1997-09-07|1-URGENT|Clerk#000000561|0|. slyly regular deposits lose 11361|442|F|251600.98|1994-09-28|1-URGENT|Clerk#000000345|0| final requests boost never pinto beans. special accounts are slyly unusual t 11362|1042|F|250063.55|1992-09-11|5-LOW|Clerk#000000922|0|ly blithe deposits cajole blithely slyly silent deposits. regular 11363|1358|O|163511.72|1998-02-09|5-LOW|Clerk#000000844|0| silent pinto beans haggle al 11364|163|O|107551.91|1997-03-11|1-URGENT|Clerk#000000582|0|sts wake fluffily about the slyly special accounts. qu 11365|1214|O|211938.69|1997-08-29|3-MEDIUM|Clerk#000000520|0|de of the blithely final requests. fluffily regular dolphin 11366|863|F|12711.31|1992-06-08|3-MEDIUM|Clerk#000000993|0|lites nag blithely un 11367|418|F|18148.48|1994-12-17|1-URGENT|Clerk#000000566|0| carefully regular deposits. bold theodolites haggle. enticingly final fo 11392|1340|F|135223.44|1994-09-06|5-LOW|Clerk#000000607|0|tect blithely across the express dependencies. instructions after the slyly re 11393|1150|F|138539.25|1992-03-19|5-LOW|Clerk#000000022|0|al packages. carefully speci 11394|1255|O|181795.18|1998-03-06|2-HIGH|Clerk#000000559|0| deposits! fluffily sp 11395|940|O|46826.01|1997-08-26|1-URGENT|Clerk#000000306|0|beans nag carefully even sentiments-- express platelets 11396|1273|F|107605.76|1992-07-22|1-URGENT|Clerk#000000265|0|ously regular accounts haggle 11397|757|O|55531.65|1996-10-29|5-LOW|Clerk#000000603|0|oss the regular pac 11398|587|F|258130.52|1992-08-07|2-HIGH|Clerk#000000407|0|f the theodolites are fu 11399|346|F|39653.25|1994-04-19|5-LOW|Clerk#000000460|0|ffily furiously even pa 11424|343|F|58887.47|1993-04-03|4-NOT SPECIFIED|Clerk#000000547|0|s. quickly final requests around the slyly unusual dependencies cajol 11425|1123|O|156058.42|1995-06-23|1-URGENT|Clerk#000000336|0|arefully special requests cajole bravely fluffy pinto beans. ironic 11426|1357|F|94572.71|1994-05-08|3-MEDIUM|Clerk#000000066|0|ilent notornis boost blithely quickly even pinto beans. carefully speci 11427|427|F|306906.43|1993-11-25|3-MEDIUM|Clerk#000000401|0|y slyly brave excuses. slow packages sleep quickl 11428|628|F|7046.94|1992-01-22|3-MEDIUM|Clerk#000000926|0|ep slyly even, unusual packages. spe 11429|1192|F|145545.35|1993-03-04|5-LOW|Clerk#000000765|0| bold courts sleep blithely. regular, even r 11430|1342|O|3641.35|1998-01-05|5-LOW|Clerk#000000817|0|tions breach. regular, express pinto beans wake across the carefully 11431|10|F|230289.60|1992-08-02|2-HIGH|Clerk#000000055|0|press ideas use slyly regular pinto beans. furiously 11456|271|F|53466.58|1993-04-13|5-LOW|Clerk#000000209|0|ar foxes against the quickly special th 11457|1430|O|232660.01|1995-12-14|1-URGENT|Clerk#000000294|0|the slyly quiet ideas. idly final deposits nag. carefully regu 11458|1468|O|201085.02|1998-04-02|5-LOW|Clerk#000000402|0|l pinto beans cajole carefully 11459|325|O|230925.13|1996-07-15|4-NOT SPECIFIED|Clerk#000000852|0|ular requests use blithely. quickly special packages boost furiously. deposits 11460|763|F|51498.95|1993-08-25|2-HIGH|Clerk#000000014|0| brave dependencies nag. blithely final foxe 11461|1049|O|80204.90|1996-05-01|1-URGENT|Clerk#000000630|0|ymptotes. packages haggle whithout the evenly final requests. req 11462|226|O|164290.80|1996-08-25|2-HIGH|Clerk#000000210|0|grouches. closely dogged deposits sleep. special, pending packages slee 11463|140|O|258622.38|1997-12-28|3-MEDIUM|Clerk#000000886|0|ular deposits. slyly final attainments detect careful 11488|751|F|92903.96|1993-08-23|4-NOT SPECIFIED|Clerk#000000119|0|ickly final requests. furiously even attainments sleep fluffily slyly regu 11489|929|O|100935.67|1996-09-19|4-NOT SPECIFIED|Clerk#000000155|0|ions. ironic foxes wake. regular platelets cajole slyly close 11490|745|O|66256.76|1996-09-27|1-URGENT|Clerk#000000489|0|quests affix according to the special p 11491|158|F|242695.19|1993-08-18|5-LOW|Clerk#000000329|0|cial foxes across the final packag 11492|478|O|141195.82|1997-03-06|2-HIGH|Clerk#000000708|0|ely regular instructions was fluffily among the fluffil 11493|1486|O|63852.92|1995-10-21|1-URGENT|Clerk#000000543|0|e blithely regular accounts 11494|1484|O|90512.73|1997-07-20|3-MEDIUM|Clerk#000000486|0|s. blithely unusual pi 11495|1048|O|43956.73|1995-07-05|2-HIGH|Clerk#000000684|0|y bold accounts cajo 11520|1217|F|111920.94|1994-09-28|3-MEDIUM|Clerk#000000106|0|courts. carefully final requests along the carefully final 11521|55|O|19730.00|1996-09-09|1-URGENT|Clerk#000000010|0| express platelets according to the iro 11522|47|F|21454.81|1993-09-16|5-LOW|Clerk#000000121|0|ual foxes x-ray carefully alo 11523|355|O|210039.66|1998-07-27|4-NOT SPECIFIED|Clerk#000000008|0|y: fluffily regular instructions sleep quickly enticing orbits. carefully un 11524|766|O|7445.25|1995-12-14|5-LOW|Clerk#000000943|0|encies. foxes detect around the final, even theodolites. special depos 11525|1495|O|169021.86|1997-03-20|1-URGENT|Clerk#000000159|0|ong the ironic accounts are slyly furiously pending dependencies. even exc 11526|466|O|124012.92|1998-02-23|5-LOW|Clerk#000000798|0|ickly regular packages use slyly ironic, 11527|1108|F|46961.88|1994-07-03|3-MEDIUM|Clerk#000000187|0|ously final packages was slyly above the bli 11552|1421|F|34742.82|1992-12-15|1-URGENT|Clerk#000000960|0|g, dogged notornis. carefully even instructions across the furiou 11553|196|F|207629.22|1994-04-19|3-MEDIUM|Clerk#000000467|0|s thrash blithely carefully ironic requests. quickl 11554|344|O|269777.97|1998-06-07|2-HIGH|Clerk#000000746|0| platelets are fluffily above the ironic theodolites. slyly pending 11555|1318|O|242411.22|1996-05-26|3-MEDIUM|Clerk#000000308|0|hinly bold deposits detect along the courts. express theodo 11556|707|O|237841.53|1996-03-03|5-LOW|Clerk#000000790|0|around the furiously ironic courts. blithely unusual deposits cajo 11557|1412|O|39415.83|1997-04-12|5-LOW|Clerk#000000522|0|ily. fluffily dogged asymptotes wake slyly regular grouches. a 11558|61|F|47438.62|1994-03-28|2-HIGH|Clerk#000000181|0| the slyly final excuses. 11559|931|O|54171.71|1995-09-26|1-URGENT|Clerk#000000582|0| pending excuses; carefully final dependencies 11584|643|F|350276.97|1994-11-23|3-MEDIUM|Clerk#000000150|0|l pinto beans haggle bold packages. fina 11585|895|F|59671.89|1994-03-16|3-MEDIUM|Clerk#000000105|0|g to the slyly unusual deposits. bold, final requests boost f 11586|1013|F|24377.52|1993-11-17|4-NOT SPECIFIED|Clerk#000000180|0|ously special packages solve carefully above th 11587|220|O|149730.78|1998-03-05|4-NOT SPECIFIED|Clerk#000000103|0| the slyly regular platelets. silently final packages may nag. foxes accordi 11588|1396|O|187668.87|1997-10-30|5-LOW|Clerk#000000492|0|ep. final, even excuses dazzle about the carefully special req 11589|1315|F|106954.20|1992-05-19|4-NOT SPECIFIED|Clerk#000000420|0|y express foxes shall have to cajole about the quickly unusu 11590|883|O|251411.55|1996-04-15|2-HIGH|Clerk#000000744|0|dolites sleep across the carefully regular depo 11591|269|F|108716.86|1993-01-21|2-HIGH|Clerk#000000691|0|pinto beans. bold deposits a 11616|403|O|261053.80|1996-08-02|2-HIGH|Clerk#000000003|0|cording to the express 11617|1459|F|186543.96|1994-12-11|4-NOT SPECIFIED|Clerk#000000519|0|etect blithely pending 11618|356|O|126867.40|1998-02-04|1-URGENT|Clerk#000000594|0|s kindle fluffily after 11619|55|O|42866.58|1998-07-12|2-HIGH|Clerk#000000245|0|ress instructions. ideas solve carefully special requests. patterns p 11620|353|O|115080.33|1997-07-22|4-NOT SPECIFIED|Clerk#000000961|0|ies nag slyly along 11621|964|O|233849.43|1995-11-11|5-LOW|Clerk#000000316|0|pinto beans. ruthless pinto beans haggle. slyly regular 11622|349|O|169440.18|1996-04-27|4-NOT SPECIFIED|Clerk#000000006|0| never instructions. express Tiresias boost. unusual, even pinto bean 11623|715|O|335923.49|1995-12-16|2-HIGH|Clerk#000000827|0|atelets cajole quietly final requests. slyly final courts are ironic, final 11648|949|P|223995.17|1995-04-10|5-LOW|Clerk#000000916|0| cajole carefully alon 11649|718|O|336620.13|1996-06-30|5-LOW|Clerk#000000666|0|kly even pinto beans cajole fu 11650|667|F|153010.80|1992-09-25|4-NOT SPECIFIED|Clerk#000000444|0|ccording to the ironic, regular excuses are 11651|689|F|66834.76|1995-02-23|3-MEDIUM|Clerk#000000982|0|sly furious sheaves. regular requests c 11652|1442|O|228052.10|1995-11-11|4-NOT SPECIFIED|Clerk#000000098|0|uriously bold foxes promise among the blithely ironic packages. fluffily regu 11653|314|O|191078.29|1997-07-05|2-HIGH|Clerk#000000533|0|kages doubt according to the final theodolites. furiously regular 11654|791|F|115033.02|1993-09-28|4-NOT SPECIFIED|Clerk#000000756|0|onic waters. carefully even packages haggle f 11655|910|O|184768.86|1998-06-27|1-URGENT|Clerk#000000614|0|slyly special asymptotes. final, unusual requests engage carefully pe 11680|1187|F|288910.60|1994-09-05|3-MEDIUM|Clerk#000000731|0|pecial asymptotes. final platelets cajole among the ironic pinto beans. pla 11681|857|F|246493.05|1992-10-17|1-URGENT|Clerk#000000485|0|final packages. carefully final accounts affi 11682|5|F|153588.74|1993-07-05|5-LOW|Clerk#000000852|0|s grow slyly. express, fin 11683|209|F|183381.18|1992-02-08|4-NOT SPECIFIED|Clerk#000000583|0|to beans cajole. furiously even frets detect. unusual pinto beans are 11684|323|F|282136.80|1992-07-28|2-HIGH|Clerk#000000986|0|instructions. final, even packages throughout the epi 11685|445|O|254981.92|1997-03-19|3-MEDIUM|Clerk#000000641|0|leep slyly unusual foxes. blithely even ideas through the t 11686|1039|F|169100.57|1994-01-28|4-NOT SPECIFIED|Clerk#000000169|0|ts wake furiously. accounts about the regular ideas are furiously 11687|1025|O|182611.71|1995-12-26|2-HIGH|Clerk#000000390|0|fluffily ideas. blithely pending packages lose blithely. slyly 11712|173|F|117391.91|1994-04-27|5-LOW|Clerk#000000376|0|special excuses. final accounts use. dependencies detect by the carefu 11713|1397|F|124407.44|1993-12-25|4-NOT SPECIFIED|Clerk#000000481|0|nic packages. unusual, unusual Ti 11714|1250|F|170165.48|1994-08-05|1-URGENT|Clerk#000000877|0|sts could have to wake ironic dependencies. furiously regul 11715|863|F|67076.38|1994-09-07|2-HIGH|Clerk#000000106|0|xpress foxes. pinto beans above the express deposits are final foxes. furio 11716|481|F|178602.59|1993-10-12|3-MEDIUM|Clerk#000000021|0|sly ironic packages haggle blithely around the unusual accounts. slyly re 11717|413|O|153316.94|1998-03-19|5-LOW|Clerk#000000185|0|furiously regular waters 11718|688|F|35335.31|1994-12-19|3-MEDIUM|Clerk#000000131|0|ly regular asymptotes. decoys integrate slyly among the bo 11719|1147|F|240503.39|1995-02-06|4-NOT SPECIFIED|Clerk#000000307|0|hely slyly special instr 11744|127|O|183922.62|1996-07-05|1-URGENT|Clerk#000000197|0|pecial packages above the furio 11745|25|F|147334.29|1992-07-20|2-HIGH|Clerk#000000549|0|lar ideas. slyly expre 11746|5|O|177360.54|1998-04-29|3-MEDIUM|Clerk#000000038|0|heodolites. final asymptotes was above the furiously final pin 11747|460|F|46484.18|1993-02-26|3-MEDIUM|Clerk#000000537|0|. requests use across 11748|733|F|43216.31|1994-06-04|4-NOT SPECIFIED|Clerk#000000147|0|uffily regular accounts cajole blithely alongside of the furiously pending p 11749|818|O|75180.45|1998-02-17|5-LOW|Clerk#000000819|0| excuses boost against the final pai 11750|823|O|117385.37|1997-03-05|2-HIGH|Clerk#000000801|0| furiously packages. carefully silent instruc 11751|1282|O|150450.18|1996-05-23|1-URGENT|Clerk#000000605|0|uickly express pinto beans. blithely bold foxes use above the dep 11776|386|F|90162.21|1992-02-25|5-LOW|Clerk#000000548|0|ck theodolites integrate furiously along the bold deposits. even, pending 11777|271|F|56387.90|1994-12-26|5-LOW|Clerk#000000292|0|g dependencies; quickly dogged courts wake quick 11778|217|O|102865.25|1997-03-28|5-LOW|Clerk#000000253|0|ss the final, unusual packages. quickly quick theodolites haggle. slyl 11779|1045|F|183456.75|1993-06-30|4-NOT SPECIFIED|Clerk#000000740|0|according to the care 11780|28|F|129283.92|1993-09-22|1-URGENT|Clerk#000000793|0|ove the regular theodolites are furiously regular notornis. 11781|1484|O|147766.37|1996-04-16|5-LOW|Clerk#000000619|0|s. final, final deposi 11782|427|F|163855.06|1992-07-29|5-LOW|Clerk#000000609|0|bold, unusual requests are slyly against the quickly bold dependencies. s 11783|1498|O|22953.13|1998-04-03|1-URGENT|Clerk#000000028|0|. regular theodolites snooze furiou 11808|874|F|60831.20|1992-08-01|4-NOT SPECIFIED|Clerk#000000710|0|could detect quickly unusu 11809|556|O|161697.76|1996-04-25|2-HIGH|Clerk#000000617|0|old realms alongside of the special dugouts use along the fluffily iro 11810|647|F|77198.94|1992-04-23|1-URGENT|Clerk#000000804|0| final requests boost among the furiously bold accounts. final 11811|527|F|7385.35|1992-10-28|2-HIGH|Clerk#000000940|0| packages haggle furiously pend 11812|940|F|17214.00|1993-06-03|3-MEDIUM|Clerk#000000844|0|y regular accounts. regular, pending requests print careful 11813|376|F|211762.16|1994-02-10|1-URGENT|Clerk#000000813|0|ly regular instructions. quickly ironi 11814|121|F|198422.64|1993-01-26|2-HIGH|Clerk#000000404|0|final ideas sleep sometimes deposits. final, final foxes boost fu 11815|1396|O|199250.12|1995-09-13|5-LOW|Clerk#000000342|0|counts use according to the bli 11840|596|F|283439.67|1994-08-15|1-URGENT|Clerk#000000466|0|ggle slowly across the sly 11841|727|F|158024.06|1993-04-21|3-MEDIUM|Clerk#000000521|0|s sleep carefully unusual ac 11842|1414|O|90262.04|1996-04-08|4-NOT SPECIFIED|Clerk#000000566|0|furiously unusual platelets. express requests 11843|730|F|211984.66|1994-07-18|3-MEDIUM|Clerk#000000747|0|g requests snooze care 11844|121|O|86777.18|1997-01-18|2-HIGH|Clerk#000000609|0|ular deposits haggle 11845|887|O|125342.31|1997-03-29|1-URGENT|Clerk#000000902|0|ts. final accounts detect furiously bold foxes. carefully regular packages h 11846|1046|F|51318.21|1993-04-21|5-LOW|Clerk#000000688|0|uriously ironic packages cajole furiously express, even ideas. ironic ideas 11847|1007|O|94551.77|1997-10-02|2-HIGH|Clerk#000000860|0|ely pending deposits haggle furiously across the pending 11872|145|F|75770.24|1994-07-25|5-LOW|Clerk#000000620|0| courts across the furiously 11873|863|O|21009.93|1996-04-06|4-NOT SPECIFIED|Clerk#000000322|0|press, final deposits da 11874|481|F|221467.60|1992-09-10|3-MEDIUM|Clerk#000000163|0|r the slyly bold deposits wake about the c 11875|698|F|98592.08|1992-08-21|3-MEDIUM|Clerk#000000300|0|. quickly special asymptotes after the slowly final pa 11876|611|O|130732.20|1995-07-04|2-HIGH|Clerk#000000325|0| to the quickly even pains: always 11877|268|F|122384.64|1993-05-24|4-NOT SPECIFIED|Clerk#000000312|0|ly above the unusua 11878|1349|F|92047.44|1993-02-25|4-NOT SPECIFIED|Clerk#000000041|0|ole blithely fluffy, unusua 11879|1057|F|185103.30|1993-06-20|2-HIGH|Clerk#000000371|0|to the special excuses detect carefully carefully express accounts. carefully 11904|415|O|279468.40|1997-12-02|4-NOT SPECIFIED|Clerk#000000924|0|accounts. platelets k 11905|343|O|101799.49|1997-07-04|5-LOW|Clerk#000000543|0|thely regular foxes. fluffily regular pinto beans i 11906|524|O|325004.58|1996-12-15|5-LOW|Clerk#000000929|0|deas cajole quickly? blithely final dolphins boost. blithely final deposits wa 11907|1327|F|127812.95|1993-08-24|5-LOW|Clerk#000000062|0|osits above the quickly ironic instructions are carefully after t 11908|1063|F|180189.92|1993-05-26|4-NOT SPECIFIED|Clerk#000000456|0|atelets across the express deposits kindle evenly unu 11909|868|F|51603.50|1994-11-25|4-NOT SPECIFIED|Clerk#000000012|0|lithely unusual accounts aga 11910|644|O|102331.93|1995-06-16|2-HIGH|Clerk#000000664|0|uriously final ideas cajole furi 11911|355|F|219307.51|1993-11-11|1-URGENT|Clerk#000000563|0| requests try to wake according to the carefully regular deposit 11936|749|O|167898.60|1995-09-16|1-URGENT|Clerk#000000869|0|ns wake blithely even, express theodolites; final 11937|1424|O|159471.41|1998-06-27|1-URGENT|Clerk#000000941|0|ic deposits sleep carefully special, regular foxes. special theo 11938|67|O|161199.79|1998-07-25|1-URGENT|Clerk#000000911|0|iously above the reg 11939|1333|O|84765.05|1996-10-01|2-HIGH|Clerk#000000610|0|iously regular dinos cajole furiously foxes. blithely regular foxes acr 11940|683|O|149688.34|1998-05-30|2-HIGH|Clerk#000000152|0|ular ideas cajole quickly 11941|659|F|157391.90|1993-06-25|1-URGENT|Clerk#000000392|0| instructions. carefully final theodolites wake quickly. carefully iro 11942|1324|F|43794.61|1994-05-09|1-URGENT|Clerk#000000317|0|ely bold accounts are carefully alongside of the unusual packages. slyly eve 11943|244|F|235683.21|1993-04-08|5-LOW|Clerk#000000724|0|requests haggle quickly about the carefully 11968|311|F|43428.32|1995-03-17|5-LOW|Clerk#000000526|0|g frets was above the fur 11969|449|F|270998.74|1992-01-20|4-NOT SPECIFIED|Clerk#000000641|0|efully. blithely pending deposits haggle regular, regular 11970|937|O|255762.04|1998-05-24|1-URGENT|Clerk#000000885|0|cial pinto beans. blithely expres 11971|1351|O|119482.38|1997-03-21|2-HIGH|Clerk#000000026|0|slyly regular requests lose quickly: depen 11972|1396|F|147645.78|1994-01-04|4-NOT SPECIFIED|Clerk#000000751|0|riously silent gifts affix slyl 11973|631|F|158758.99|1994-03-04|4-NOT SPECIFIED|Clerk#000000545|0| regular theodolites use carefully pen 11974|1366|F|88216.78|1992-10-09|4-NOT SPECIFIED|Clerk#000000595|0|slyly unusual accounts according to the blithely ironic ideas boost furio 11975|322|O|230472.15|1995-04-14|1-URGENT|Clerk#000000551|0|on the bold ideas cajole across the f 12000|1150|F|89148.77|1994-05-13|4-NOT SPECIFIED|Clerk#000000683|0|s against the furiously ironic grouches sleep according to the e 12001|739|F|138635.75|1994-07-07|2-HIGH|Clerk#000000863|0|old, even theodolites. regular, special theodolites use furio 12002|826|F|79579.51|1993-11-30|3-MEDIUM|Clerk#000000431|0| regular packages wake qui 12003|1205|O|16311.25|1998-05-24|1-URGENT|Clerk#000000708|0|s along the quickly regular instructions haggle carefully furiously u 12004|656|F|74814.13|1994-12-05|3-MEDIUM|Clerk#000000633|0|nis against the slyly specia 12005|1102|F|250917.29|1992-06-25|4-NOT SPECIFIED|Clerk#000000904|0| after the ironic, unusua 12006|944|O|129075.40|1997-06-02|4-NOT SPECIFIED|Clerk#000000501|0|n packages. carefully ironic accounts are after the pending platele 12007|613|F|239431.45|1994-09-28|4-NOT SPECIFIED|Clerk#000000363|0|even requests wake carefully unusual packages. quickly 12032|902|O|102348.27|1997-08-10|4-NOT SPECIFIED|Clerk#000000349|0| after the even, regular instructions. blithe tithes use furiously. b 12033|334|F|214360.53|1992-09-12|2-HIGH|Clerk#000000274|0| bold pearls haggle. carefully ironic pinto beans cajole. blit 12034|121|O|88674.23|1996-12-08|2-HIGH|Clerk#000000363|0|ges. deposits sleep slyly. ideas sleep 12035|1010|O|245999.17|1996-11-09|5-LOW|Clerk#000000963|0|furiously special instructions. pending deposits nod. blithely unusual pint 12036|838|O|241236.07|1998-01-16|5-LOW|Clerk#000000041|0|ic requests. unusual pinto beans sleep fluffily about the furiously regular 12037|1462|O|139739.68|1995-07-01|5-LOW|Clerk#000000375|0|pending foxes shall cajol 12038|164|O|59423.94|1995-11-10|2-HIGH|Clerk#000000587|0|ironic asymptotes mainta 12039|1252|F|303373.40|1993-06-17|2-HIGH|Clerk#000000765|0|packages integrate c 12064|38|F|133067.27|1992-09-16|5-LOW|Clerk#000000994|0| blithely quickly pending t 12065|557|O|60068.73|1997-01-30|4-NOT SPECIFIED|Clerk#000000064|0|the final deposits boost pending deposits. pending 12066|1066|F|187325.65|1995-02-07|3-MEDIUM|Clerk#000000084|0|heodolites cajole a 12067|257|F|227467.91|1993-02-05|1-URGENT|Clerk#000000019|0|requests. quickly regular packages run f 12068|1231|O|20347.58|1996-05-08|2-HIGH|Clerk#000000402|0|ctions. furiously even accounts 12069|1381|O|235666.55|1995-11-08|2-HIGH|Clerk#000000460|0|ong the pinto beans. deposits among the excuses cajole 12070|746|O|256785.35|1998-05-23|5-LOW|Clerk#000000287|0|bold pinto beans hagg 12071|956|O|100260.23|1998-06-26|1-URGENT|Clerk#000000205|0|ly bold multipliers cajole quickly re 12096|1004|F|257237.41|1992-09-20|3-MEDIUM|Clerk#000000156|0|hely final requests kindle among the regular foxes. orb 12097|118|F|156644.02|1993-11-10|1-URGENT|Clerk#000000239|0|out the slyly regular theodolites. regularly reg 12098|1325|F|34956.25|1993-05-01|2-HIGH|Clerk#000000214|0| carefully ironic, express deposits. ideas are slyly a 12099|112|F|230209.47|1994-09-12|1-URGENT|Clerk#000000042|0|furiously regular accounts haggle quic 12100|1298|O|62857.66|1996-03-15|2-HIGH|Clerk#000000518|0|ress frays use blithely pending requests-- quickly regular somas acros 12101|754|F|103072.24|1995-01-07|1-URGENT|Clerk#000000853|0|fily regular packages w 12102|1264|O|208734.50|1995-11-18|3-MEDIUM|Clerk#000000981|0|eposits are blithely along 12103|229|O|169239.68|1996-11-25|3-MEDIUM|Clerk#000000605|0|ironic foxes. quickly brave pinto beans 12128|1192|O|163301.92|1997-06-27|4-NOT SPECIFIED|Clerk#000000102|0|yly across the furiously ironic accounts. carefully special real 12129|418|F|117561.31|1992-12-26|3-MEDIUM|Clerk#000000119|0|dencies cajole furiously about the 12130|1261|O|157170.80|1995-12-10|4-NOT SPECIFIED|Clerk#000000894|0|according to the even, regular packages. furiously unusual pinto beans wake 12131|1172|O|285746.20|1998-06-29|3-MEDIUM|Clerk#000000074|0|ironic ideas. blithely unus 12132|1277|O|271963.86|1996-09-06|1-URGENT|Clerk#000000831|0|ptotes boost permanently. carefully unusual instr 12133|361|F|52225.60|1992-04-15|1-URGENT|Clerk#000000634|0|st after the furiously special 12134|355|O|56547.11|1996-05-31|3-MEDIUM|Clerk#000000868|0|blithely blithely regular theodolites. slyly even packages nag slyly slyly e 12135|1420|O|268120.03|1995-10-10|5-LOW|Clerk#000000246|0|ual excuses alongside of th 12160|1423|F|67966.80|1993-12-13|5-LOW|Clerk#000000707|0|ully about the furiously ironic braids-- carefully en 12161|1307|O|215072.11|1997-03-15|4-NOT SPECIFIED|Clerk#000000382|0|le blithely across the blithely slow fox 12162|1222|O|47989.76|1997-03-18|3-MEDIUM|Clerk#000000066|0| players according to the fluffily ironic dolphins cajole slyly spe 12163|88|O|205826.29|1997-07-20|5-LOW|Clerk#000000951|0|se furiously carefully special pinto beans. blithely special 12164|1313|F|180895.16|1993-10-17|1-URGENT|Clerk#000000148|0|ests. instructions haggle blithely express dependencies. furiously bold war 12165|1489|F|153853.42|1994-04-11|5-LOW|Clerk#000000091|0|nic pinto beans boost carefully 12166|62|F|18006.96|1995-03-15|4-NOT SPECIFIED|Clerk#000000469|0|of the slyly even pinto bean 12167|91|O|60173.82|1998-05-31|1-URGENT|Clerk#000000618|0|counts. bravely special pac 12192|277|F|27209.81|1994-10-19|1-URGENT|Clerk#000000387|0| bold ideas boost slyly slyly final frays. carefully unusual pinto b 12193|802|O|159382.08|1996-07-28|3-MEDIUM|Clerk#000000559|0|y. blithely final pinto beans according to the theodolites haggle care 12194|271|F|59806.63|1995-03-02|1-URGENT|Clerk#000000922|0|atelets haggle among the regular accounts. furiously even plate 12195|73|O|108946.27|1997-04-27|4-NOT SPECIFIED|Clerk#000000947|0|iously silent foxes eat carefull 12196|1261|F|83958.83|1993-06-03|4-NOT SPECIFIED|Clerk#000000284|0|e furiously ironic ideas affix fluffily above the quickly 12197|577|O|206318.73|1997-06-27|2-HIGH|Clerk#000000976|0|ular packages affix upon the slyly 12198|1300|F|59977.08|1993-02-28|3-MEDIUM|Clerk#000000917|0| ironic deposits. furiously express escapades detect 12199|520|O|103350.42|1995-12-15|5-LOW|Clerk#000000478|0|he ironic accounts. ideas detect. slyly final packa 12224|646|F|212674.36|1994-12-13|2-HIGH|Clerk#000000672|0|. slyly final accounts boost. blithely express deposits haggle sl 12225|656|O|188067.32|1997-01-29|2-HIGH|Clerk#000000903|0|ons. blithely bold requests are carefu 12226|1348|O|120809.59|1998-04-01|5-LOW|Clerk#000000415|0| across the final instructi 12227|1282|O|210133.42|1998-06-27|3-MEDIUM|Clerk#000000980|0| packages. quickly ironic accounts affix quickly. ironic, even packages 12228|226|F|276130.19|1994-06-26|4-NOT SPECIFIED|Clerk#000000474|0|fully regular excuses? ironic foxes are across the dogge 12229|1250|O|106159.32|1996-07-24|1-URGENT|Clerk#000000293|0|ptotes boost slyly even accounts. 12230|712|O|161632.17|1998-04-05|2-HIGH|Clerk#000000383|0|s. regular foxes breach regularly pending theodo 12231|1021|O|243683.82|1997-07-30|4-NOT SPECIFIED|Clerk#000000012|0|sual sauternes use across the bold theodolites. req 12256|532|F|146646.67|1992-12-19|3-MEDIUM|Clerk#000000238|0|iously alongside of the 12257|149|O|128270.26|1996-04-17|2-HIGH|Clerk#000000278|0|y above the unusual foxes. slyly even accounts haggl 12258|1408|F|300041.05|1994-12-01|5-LOW|Clerk#000000878|0|ing foxes boost. stealthy 12259|250|F|173853.14|1993-02-15|2-HIGH|Clerk#000000282|0| boost about the carefully ironic ideas. fluffily iron 12260|496|F|172336.18|1992-11-26|3-MEDIUM|Clerk#000000484|0|s along the slyly ruthless pinto beans haggle about the 12261|1126|F|362237.85|1993-10-24|5-LOW|Clerk#000000279|0|ccounts use quickly about the furiously bold foxes. furiously final 12262|1441|O|139469.07|1995-12-11|5-LOW|Clerk#000000886|0|efully. slyly final theodolit 12263|550|O|161275.31|1995-07-07|2-HIGH|Clerk#000000566|0|ts sleep across the carefully bold instruc 12288|1153|O|125448.87|1996-11-06|1-URGENT|Clerk#000000183|0|slyly regular deposits above the foxes are at the packages- 12289|421|O|128835.39|1995-12-25|5-LOW|Clerk#000000269|0| even deposits. ironic, regular deposits haggle blith 12290|1304|O|103022.30|1995-07-10|3-MEDIUM|Clerk#000000947|0|ully pending instructions boost slyly furiously 12291|740|O|19328.06|1998-03-23|1-URGENT|Clerk#000000074|0|al deposits. warhorses inte 12292|739|F|176370.50|1992-06-03|4-NOT SPECIFIED|Clerk#000000375|0|sometimes final foxes after the qui 12293|329|O|102326.14|1995-06-19|2-HIGH|Clerk#000000103|0| quietly ironic instructions sleep carefully furiously iro 12294|784|O|182534.50|1995-07-06|5-LOW|Clerk#000000583|0|ns run about the qui 12295|883|F|84218.70|1993-11-10|5-LOW|Clerk#000000955|0|ost around the slowly iron 12320|298|O|118435.28|1995-12-21|1-URGENT|Clerk#000000356|0|o beans wake carefully theodolites. final gifts haggle quickly 12321|79|F|12511.06|1994-04-04|3-MEDIUM|Clerk#000000705|0|uriously ironic deposits cajole furiously doggedly ironic depend 12322|856|P|42466.03|1995-05-12|1-URGENT|Clerk#000000760|0| the special instructions detect fluffily ac 12323|926|P|139824.58|1995-04-03|4-NOT SPECIFIED|Clerk#000000019|0|ts detect above the even dep 12324|1033|O|202248.40|1998-08-02|3-MEDIUM|Clerk#000000746|0|ending theodolites try to thrash. regular deposits about the fluffily pending 12325|973|F|70150.43|1995-01-05|1-URGENT|Clerk#000000440|0|packages are final p 12326|506|F|181528.08|1995-02-03|1-URGENT|Clerk#000000765|0|ccounts are fluffily carefu 12327|166|O|39113.28|1998-04-23|4-NOT SPECIFIED|Clerk#000000969|0|tions haggle carefully slyly 12352|1466|F|331745.34|1992-03-24|3-MEDIUM|Clerk#000000997|0|eas. regular instructions need to boost slyly re 12353|595|O|219706.88|1996-04-13|4-NOT SPECIFIED|Clerk#000000312|0|ependencies. special dependencies nag quickly even packages. slyly final de 12354|1243|O|54975.46|1997-01-21|5-LOW|Clerk#000000859|0|g to the carefully pending excuses cajole 12355|178|P|286003.34|1995-05-29|4-NOT SPECIFIED|Clerk#000000081|0|arhorses. close, final foxes are slyly 12356|656|F|121696.19|1992-10-19|1-URGENT|Clerk#000000540|0|ges haggle quickly! blithely careful courts impress alongside of the bold fox 12357|154|O|90986.72|1995-08-23|2-HIGH|Clerk#000000169|0|instructions poach furiously final requests. quickly express depo 12358|880|O|231011.45|1996-10-10|2-HIGH|Clerk#000000335|0| blithely pending ideas. quickly ironic pinto b 12359|1400|O|215672.79|1997-05-29|4-NOT SPECIFIED|Clerk#000000382|0| express packages grow above the blithely even foxes: 12384|1168|O|213609.26|1998-08-02|3-MEDIUM|Clerk#000000311|0|gle. furiously unus 12385|821|F|92862.99|1992-11-24|5-LOW|Clerk#000000324|0|sts use furiously. ironi 12386|424|F|314497.24|1992-03-21|3-MEDIUM|Clerk#000000709|0|e fluffily ironic dolphins. quick 12387|638|O|52638.42|1997-07-03|2-HIGH|Clerk#000000051|0|o the quickly regular requests use slyly unusual theodolites. 12388|952|O|331123.13|1997-10-16|4-NOT SPECIFIED|Clerk#000000611|0|boost furiously furiously final accounts. slyly regular deposits are am 12389|653|F|242568.31|1994-08-05|3-MEDIUM|Clerk#000000139|0|s boost according to the slyly even dolphins. final depend 12390|1285|O|49792.77|1996-06-24|3-MEDIUM|Clerk#000000421|0|posits doze blithely. fluffily ironic d 12391|1000|O|94747.78|1998-07-01|5-LOW|Clerk#000000433|0|cies are blithely. furious 12416|1150|F|9933.08|1993-06-26|1-URGENT|Clerk#000000400|0|ow ideas cajole furiou 12417|1081|F|230795.43|1993-10-25|1-URGENT|Clerk#000000152|0|requests. furiously express instructions haggle. e 12418|1300|F|124402.47|1992-02-19|4-NOT SPECIFIED|Clerk#000000547|0|tructions. foxes nag furiously abo 12419|982|O|268861.58|1995-10-16|1-URGENT|Clerk#000000104|0|y. furiously final sauternes sleep slyly above the pending 12420|1489|F|179274.43|1993-12-27|3-MEDIUM|Clerk#000000798|0|ackages cajole permanently. blithely quick packages alongs 12421|673|O|212213.47|1995-11-02|1-URGENT|Clerk#000000203|0|pending theodolites wake: ironic, express platelets cajole furiously. 12422|575|P|217235.25|1995-05-22|3-MEDIUM|Clerk#000000349|0|ssly pending requests cajole quietly final i 12423|986|F|90571.46|1992-07-18|1-URGENT|Clerk#000000742|0|nic courts haggle carefully finally regular requests. depo 12448|511|O|158476.25|1995-11-09|1-URGENT|Clerk#000000711|0|he quickly pending accounts. ironic 12449|461|F|133597.11|1993-02-01|3-MEDIUM|Clerk#000000312|0|s hinder carefully across the iro 12450|1321|F|41643.96|1994-03-13|4-NOT SPECIFIED|Clerk#000000550|0|kages use quickly final accounts. carefully regular packages integrate blithe 12451|1150|F|298853.62|1993-04-20|5-LOW|Clerk#000000319|0|uriously even ideas hagg 12452|364|F|173339.75|1993-12-08|3-MEDIUM|Clerk#000000867|0|indle furiously near the quickly regular accounts: silent, 12453|1213|F|180966.45|1994-01-14|5-LOW|Clerk#000000085|0|ic ideas promise. slyly ironic pinto beans above the dinos wake quickly bold 12454|217|O|42464.91|1997-03-21|1-URGENT|Clerk#000000910|0|ic, regular deposits are against the carefully unusual accounts. e 12455|292|F|216570.72|1992-09-04|4-NOT SPECIFIED|Clerk#000000594|0|nusual asymptotes. regular ideas haggle blithely across the ironic ideas. 12480|320|F|122198.33|1994-07-08|3-MEDIUM|Clerk#000000055|0|ructions wake fluffily fluffily final gifts! furiou 12481|271|O|85849.06|1995-07-11|1-URGENT|Clerk#000000785|0|uests sleep furiously bold deposits. blithely express acc 12482|998|O|153413.61|1996-01-08|5-LOW|Clerk#000000311|0|y express dependencies along the f 12483|124|F|153915.13|1993-08-05|1-URGENT|Clerk#000000652|0|cial ideas sleep furiously against the final, regular re 12484|1027|O|184195.36|1995-08-22|3-MEDIUM|Clerk#000000268|0|olphins. blithely ironic platelets s 12485|188|F|209369.25|1993-08-08|1-URGENT|Clerk#000000994|0|tions along the ideas 12486|1286|P|184001.37|1995-02-21|2-HIGH|Clerk#000000206|0| deposits; quickly ironic packages use stealthily about the qui 12487|401|F|41222.94|1994-11-30|2-HIGH|Clerk#000000575|0|xes. requests sleep carefully 12512|1402|O|106021.12|1996-04-02|2-HIGH|Clerk#000000813|0|nal foxes are fluffily. foxes snooze about the ironi 12513|419|O|62011.18|1997-09-03|5-LOW|Clerk#000000422|0| wake final, special requests. express instructions cajole after the furi 12514|1495|F|38477.49|1994-04-25|2-HIGH|Clerk#000000120|0|c courts doubt express, bo 12515|1459|F|128077.61|1993-05-14|3-MEDIUM|Clerk#000000624|0|kly special accounts. blithely regular packages boost slyly packages. pl 12516|1373|F|112837.28|1994-05-25|5-LOW|Clerk#000000087|0|y after the quickly unusual gifts. fluffily exp 12517|1303|O|84279.32|1997-02-01|5-LOW|Clerk#000000162|0|packages. express packages impress furiously even, bold theodolite 12518|1082|F|153987.45|1993-07-23|5-LOW|Clerk#000000194|0|ultipliers serve furiously a 12519|913|F|187489.35|1994-01-01|2-HIGH|Clerk#000000387|0|ependencies. carefully unusual deposits use finally ironic deposits. e 12544|440|F|62982.76|1994-03-04|5-LOW|Clerk#000000600|0|thely above the even 12545|175|O|74242.81|1996-07-01|1-URGENT|Clerk#000000472|0|ly. carefully silent excuses use carefully around the reg 12546|217|O|89344.66|1996-02-08|4-NOT SPECIFIED|Clerk#000000547|0|ounts mold. blithely stealthy depths haggle blithely blithe 12547|1141|O|205195.87|1998-04-14|2-HIGH|Clerk#000000729|0|odolites? blithely ir 12548|145|O|209786.08|1997-11-20|4-NOT SPECIFIED|Clerk#000000517|0|s excuses x-ray against 12549|28|O|100471.72|1997-06-03|1-URGENT|Clerk#000000845|0|he furiously regular pint 12550|878|F|39446.33|1993-03-09|4-NOT SPECIFIED|Clerk#000000282|0|as. blithely ironic pinto beans wake. slyly bold deposits detect blithely alon 12551|790|F|176659.70|1992-06-12|5-LOW|Clerk#000000090|0| slyly alongside of the special waters. asymptotes outside the pend 12576|1057|F|33914.11|1993-01-26|4-NOT SPECIFIED|Clerk#000000762|0|n decoys behind the carefully final requests 12577|616|O|134783.72|1998-05-18|4-NOT SPECIFIED|Clerk#000000586|0|special instruction 12578|1207|O|255196.69|1996-11-15|2-HIGH|Clerk#000000150|0|p slyly furious foxes. slyly pending a 12579|193|O|47191.62|1995-06-17|4-NOT SPECIFIED|Clerk#000000536|0|y silent pinto beans-- ironic deposits affix. furiously fina 12580|589|F|41858.68|1993-01-14|5-LOW|Clerk#000000373|0|y bold theodolites above the carefully regular 12581|823|F|106379.64|1994-10-21|2-HIGH|Clerk#000000582|0| final requests after the final patterns wake blithel 12582|538|F|22603.80|1993-01-22|4-NOT SPECIFIED|Clerk#000000671|0| ironic packages against the requests slee 12583|943|O|42860.56|1996-01-18|5-LOW|Clerk#000000676|0|yly fluffily even accounts. quickly careful asymptotes boost. ironic, bo 12608|301|F|61932.81|1993-03-08|1-URGENT|Clerk#000000087|0| have to nag quietly among the carefully bold pinto beans. expres 12609|805|O|128069.23|1996-01-17|3-MEDIUM|Clerk#000000207|0|l requests haggle furiously 12610|85|F|10061.76|1993-07-17|5-LOW|Clerk#000000952|0| bold theodolites engage blithely against th 12611|41|O|185736.53|1995-10-16|3-MEDIUM|Clerk#000000579|0|ckly carefully regular deposits 12612|1444|F|185433.93|1993-12-22|5-LOW|Clerk#000000381|0| accounts. furiously ironic requests along the final, ruthless accounts cajol 12613|328|F|116318.24|1993-04-11|3-MEDIUM|Clerk#000000550|0|lithely express ins 12614|670|O|273057.78|1996-02-28|3-MEDIUM|Clerk#000000387|0|wake. quickly stealthy foxes affix slyly ironic requests. 12615|118|F|20866.10|1995-03-06|4-NOT SPECIFIED|Clerk#000000993|0|old packages. accounts use slyly after the foxes. 12640|413|O|76597.62|1998-07-20|2-HIGH|Clerk#000000081|0|ites. slyly express deposits integrate fluffily. ironic courts abo 12641|1097|F|239626.14|1995-02-23|5-LOW|Clerk#000000617|0|its wake according to the slyly unusual excuses. even patterns are carefully 12642|1243|F|247694.31|1994-04-28|1-URGENT|Clerk#000000015|0|symptotes thrash blithely above the furiously regular accounts. q 12643|322|F|124302.25|1993-03-20|5-LOW|Clerk#000000486|0| slyly silent requests. qui 12644|1159|F|150689.23|1992-02-01|2-HIGH|Clerk#000000394|0| the final instructions. blithely ironic asymptotes boost carefully regular 12645|250|F|231538.56|1994-07-07|5-LOW|Clerk#000000705|0|de of the blithely regular accounts use slyly final accounts. fluff 12646|1334|F|116946.28|1993-02-18|1-URGENT|Clerk#000000155|0|regular ideas. slyly ironic re 12647|1198|O|38285.07|1997-02-27|4-NOT SPECIFIED|Clerk#000000190|0|. regular theodolites sleep after the care 12672|257|F|37187.79|1994-07-07|4-NOT SPECIFIED|Clerk#000000963|0|es are carefully blithely unusual dugouts. regular, i 12673|62|F|21911.85|1994-06-12|1-URGENT|Clerk#000000435|0|ffily. express accounts sleep slyly regular deposit 12674|586|F|96952.15|1992-07-05|5-LOW|Clerk#000000811|0|xcuses. express deposits wake quickl 12675|679|F|155533.85|1994-01-28|3-MEDIUM|Clerk#000000921|0|r theodolites. furiously pending gifts alongside 12676|839|F|5884.55|1994-01-16|2-HIGH|Clerk#000000960|0|ake slyly special deposits. slyly un 12677|190|O|187187.63|1996-05-25|5-LOW|Clerk#000000500|0|ely above the furiously silent accounts; carefully regular depend 12678|844|O|77831.02|1998-01-26|5-LOW|Clerk#000000395|0|around the blithely slow packages nag quickly express packages. carefully 12679|1117|O|75859.23|1997-06-22|2-HIGH|Clerk#000000326|0|s. furiously pending deposits sleep furiously a 12704|439|F|143027.03|1993-02-10|5-LOW|Clerk#000000553|0|ackages sleep. pinto beans haggle furiously. quickly final dolphins use blit 12705|295|F|74933.03|1994-09-16|5-LOW|Clerk#000000983|0|grow above the fluffily final packages. slyly pending ideas nag furiou 12706|458|F|157344.88|1994-11-21|1-URGENT|Clerk#000000997|0|ing instructions. deposits cajole. slyly sp 12707|1219|F|253413.30|1993-05-30|2-HIGH|Clerk#000000820|0|ffix furiously according to the final, ironic dolphin 12708|172|F|262921.58|1993-01-08|5-LOW|Clerk#000000882|0|ly ironic deposits. quickly final sentiments cajole car 12709|268|O|74462.32|1996-05-11|5-LOW|Clerk#000000991|0|hely alongside of the a 12710|1169|F|305191.71|1993-08-18|1-URGENT|Clerk#000000329|0|es wake furiously ironic accounts. fluffily ironic pinto bea 12711|85|F|80711.39|1992-02-20|1-URGENT|Clerk#000000326|0|usual instructions. pending, final deposits use. 12736|751|F|346186.75|1993-03-09|5-LOW|Clerk#000000385|0|ithely express deposits. pinto beans n 12737|1187|F|320360.43|1994-05-04|1-URGENT|Clerk#000000047|0|ly regular dinos affix slyly. ironic, quick packages boost carefully. specia 12738|1334|O|182506.16|1998-06-12|2-HIGH|Clerk#000000613|0|sts boost: carefully even decoys integrate against the furiously reg 12739|772|F|56336.51|1992-03-02|4-NOT SPECIFIED|Clerk#000000078|0|side the bold, express depend 12740|439|O|61728.22|1997-06-05|2-HIGH|Clerk#000000643|0| the special deposits ca 12741|1348|F|162616.34|1992-07-20|4-NOT SPECIFIED|Clerk#000000800|0| of the instructions. furiou 12742|748|O|36355.83|1997-06-07|5-LOW|Clerk#000000591|0|always final sauternes. carefully regula 12743|695|F|101594.83|1993-06-24|2-HIGH|Clerk#000000015|0|inal foxes cajole quickly. bli 12768|1202|F|121309.88|1994-04-09|1-URGENT|Clerk#000000642|0|sly even accounts haggle slyly. blithely pending epitaphs boost. carefully f 12769|1057|F|29166.32|1992-01-08|4-NOT SPECIFIED|Clerk#000000590|0|blithely across the furiously express foxes; slyly silent pa 12770|1415|F|129333.56|1993-04-18|4-NOT SPECIFIED|Clerk#000000243|0|bove the special, regular ideas. final pinto beans grow? iro 12771|451|F|203114.43|1994-01-18|1-URGENT|Clerk#000000548|0|counts. furiously final dependencies sleep. fluffi 12772|1451|P|221390.02|1995-05-12|4-NOT SPECIFIED|Clerk#000000248|0|ual requests wake quickly even pinto 12773|817|F|304639.93|1995-01-12|5-LOW|Clerk#000000670|0|s, pending ideas. fluffily ironic deposits cajole blithely furiously unusual 12774|1108|F|60657.84|1992-10-10|1-URGENT|Clerk#000000067|0|ounts. quickly final requests 12775|979|O|127678.72|1996-10-15|5-LOW|Clerk#000000553|0|s the quickly unusual 12800|11|F|302431.56|1993-05-23|5-LOW|Clerk#000000898|0|. slyly final accounts cajole i 12801|1412|O|226079.43|1997-08-28|3-MEDIUM|Clerk#000000582|0|bove the fluffily express asymptot 12802|1481|O|233609.74|1998-03-02|1-URGENT|Clerk#000000774|0|ounts. special, pending ideas cajole pe 12803|392|F|27391.77|1993-02-26|4-NOT SPECIFIED|Clerk#000000876|0|sly final accounts use slyly final accounts. blithe 12804|734|F|358058.27|1992-05-26|3-MEDIUM|Clerk#000000544|0|l packages boost quickly ironic, 12805|478|O|76234.32|1996-12-11|3-MEDIUM|Clerk#000000035|0| to the quickly special platelets? 12806|526|O|208746.01|1995-12-25|4-NOT SPECIFIED|Clerk#000000181|0|s about the slyly regular pinto beans nag 12807|391|O|34178.61|1998-01-16|5-LOW|Clerk#000000393|0|uffily final accounts wake fur 12832|362|F|66392.28|1994-12-11|3-MEDIUM|Clerk#000000566|0|unts are after the regular packages. furiously pending 12833|1285|O|102301.35|1998-06-14|5-LOW|Clerk#000000773|0|nts. even, pending foxes are 12834|757|O|263553.51|1996-11-03|4-NOT SPECIFIED|Clerk#000000370|0|ns. unusual ideas are. somas b 12835|370|O|142811.34|1997-01-20|4-NOT SPECIFIED|Clerk#000000894|0|ges. slyly final id 12836|1057|O|222420.89|1996-04-18|3-MEDIUM|Clerk#000000728|0|es sleep furiously against the quickly even inst 12837|590|F|112787.85|1992-12-03|3-MEDIUM|Clerk#000000431|0|ter the slowly unusual foxes. blithely express pinto beans haggle. furiou 12838|112|O|104930.86|1998-07-01|1-URGENT|Clerk#000000424|0|fully ironic foxes. quickly pending accounts about the fin 12839|1424|O|178226.93|1998-01-08|3-MEDIUM|Clerk#000001000|0|ng pinto beans. unusual theodolites engag 12864|647|F|142470.94|1994-05-17|2-HIGH|Clerk#000000620|0| the ironic, silent foxes. special 12865|1271|O|244542.24|1997-05-23|5-LOW|Clerk#000000362|0|requests boost slyly carefully even instructions. carefully regular packages c 12866|1433|O|165429.43|1997-01-17|3-MEDIUM|Clerk#000000996|0|y regular requests play carefully against the always final 12867|59|F|197098.68|1995-03-12|4-NOT SPECIFIED|Clerk#000000638|0|lar platelets. blithely unusual deposits cajole carefully s 12868|1042|P|229577.12|1995-03-02|1-URGENT|Clerk#000000993|0|ns sleep above the carefully ex 12869|1396|F|131379.62|1994-01-26|4-NOT SPECIFIED|Clerk#000000871|0|s the pending foxes: fluffily regular requests wake slyly against the slyl 12870|1366|F|51595.17|1993-07-28|1-URGENT|Clerk#000000157|0| furiously dogged theodolites. regular requests among the blithely 12871|1174|O|194923.17|1995-10-05|4-NOT SPECIFIED|Clerk#000000646|0|ally ironic packages. blithely regular asymptotes are a 12896|370|O|321570.67|1996-12-22|1-URGENT|Clerk#000000337|0|nic theodolites. slyly even pinto beans use quickly. care 12897|1169|O|153229.93|1995-07-19|4-NOT SPECIFIED|Clerk#000000854|0|ost carefully regular packages. blithely 12898|1045|F|44854.49|1994-09-17|3-MEDIUM|Clerk#000000873|0|packages maintain carefully after 12899|868|O|40541.89|1996-04-07|4-NOT SPECIFIED|Clerk#000000006|0|efully pending ideas cajole fluf 12900|220|F|41268.96|1993-05-04|2-HIGH|Clerk#000000285|0|s. even requests sleep carefully blithely unusual ac 12901|1283|F|302296.36|1994-05-06|2-HIGH|Clerk#000000994|0|ely final requests. fu 12902|640|F|321334.39|1993-10-30|4-NOT SPECIFIED|Clerk#000000254|0|y regular deposits. regular instructions sleep 12903|301|F|89081.29|1994-09-26|1-URGENT|Clerk#000000914|0|wake furiously fluffily bold dolphins. blithely regular pinto beans sle 12928|634|F|258054.14|1994-03-28|2-HIGH|Clerk#000000465|0|ng slyly after the final requests. furiously special 12929|917|F|138543.10|1993-08-31|1-URGENT|Clerk#000000397|0|g the blithely bold asymptotes. pending dependencies 12930|1072|O|10854.39|1995-11-25|2-HIGH|Clerk#000000162|0|es cajole alongside of the fluffily pending dependencies. regular 12931|775|F|72091.62|1992-04-25|3-MEDIUM|Clerk#000000196|0|final foxes boost across the slyly pending dependencies. even, 12932|238|O|151284.17|1997-08-27|3-MEDIUM|Clerk#000000787|0|ackages. foxes cajole blithely regular 12933|173|O|253181.34|1998-02-17|3-MEDIUM|Clerk#000000224|0|haggle according to 12934|623|F|273516.60|1994-03-06|4-NOT SPECIFIED|Clerk#000000441|0|sual, pending dependencies among 12935|509|F|69433.26|1994-05-10|3-MEDIUM|Clerk#000000549|0|gouts at the even packages cajole slyly final packages. slyly enticin 12960|1243|F|132107.73|1993-10-10|2-HIGH|Clerk#000000340|0|ng theodolites haggle after the final id 12961|1144|F|133684.91|1994-12-13|2-HIGH|Clerk#000000913|0|rding to the enticing, final foxes use blithely furiou 12962|1111|O|130541.81|1995-10-29|1-URGENT|Clerk#000000488|0|. accounts breach carefully blithely ironic deposits. 12963|1232|O|72621.23|1998-07-08|3-MEDIUM|Clerk#000000546|0|uriously regular deposits wake slowly along t 12964|556|O|228128.18|1998-01-13|2-HIGH|Clerk#000000980|0|ronic instructions. quickly regular a 12965|964|F|107134.13|1993-06-26|3-MEDIUM|Clerk#000000650|0|ate slyly against the carefully final packages. furiously final requests alo 12966|224|O|51608.06|1997-10-11|5-LOW|Clerk#000000252|0|ly furiously final packages. carefully regular packages integrate quic 12967|1147|F|9777.25|1993-09-22|2-HIGH|Clerk#000000650|0|gly final deposits are slyly carefully ironic cour 12992|139|O|199084.84|1997-02-25|5-LOW|Clerk#000000099|0|xpress theodolites use across the un 12993|1348|O|46875.17|1998-05-14|5-LOW|Clerk#000000435|0| across the depths. idle, final accounts lose furiously regular, 12994|1451|F|54188.56|1994-11-18|5-LOW|Clerk#000000557|0|ily among the slyly unusual deposits. quickly pending acco 12995|1162|O|200689.05|1996-07-29|3-MEDIUM|Clerk#000000027|0|ts except the quickly express request 12996|967|O|107497.47|1995-12-05|4-NOT SPECIFIED|Clerk#000000531|0|ccounts. blithely ironic pinto beans cajol 12997|520|O|23304.56|1996-05-27|1-URGENT|Clerk#000000085|0|accounts boost. final asymptotes affix blithely fluff 12998|799|O|25952.72|1996-10-03|2-HIGH|Clerk#000000213|0|instructions sleep. express, final patterns detect furiously. furi 12999|940|F|216567.07|1993-04-04|2-HIGH|Clerk#000000745|0|nusual foxes. express accounts sublate q 13024|664|F|58015.05|1992-03-14|3-MEDIUM|Clerk#000000700|0| bold requests nag quickly. d 13025|158|O|138165.03|1998-01-28|2-HIGH|Clerk#000000785|0|lly regular instructions serve. special deposits against the careful 13026|1126|O|194927.69|1997-04-01|2-HIGH|Clerk#000000050|0| bold ideas! express ideas across the furiously dogged theodolites use bold, 13027|955|F|160057.71|1992-01-16|1-URGENT|Clerk#000000236|0|ongside of the ideas wak 13028|685|F|231028.06|1994-04-09|1-URGENT|Clerk#000000687|0|regular platelets a 13029|340|O|177062.13|1997-11-11|3-MEDIUM|Clerk#000000699|0|riously above the blithely f 13030|1459|F|146978.50|1994-07-23|3-MEDIUM|Clerk#000000878|0|hely ironic packages are slyly regular ideas. quickly final pack 13031|7|F|237595.83|1992-12-05|5-LOW|Clerk#000000325|0|s are furiously. busy requests haggle furiously pinto beans. asymptotes are. 13056|437|F|193618.95|1994-04-23|4-NOT SPECIFIED|Clerk#000000174|0|ounts. quickly regular packages above the furiously unusual sauternes ha 13057|67|O|102973.85|1997-12-16|1-URGENT|Clerk#000000125|0|y. final requests breach furiously. regular deposits engage. 13058|1156|O|12461.38|1997-03-26|1-URGENT|Clerk#000000685|0|ly! special instructions sleep furiously throughout the deposits. 13059|136|O|197435.35|1997-05-31|5-LOW|Clerk#000000463|0| requests. blithely special 13060|1345|O|272394.13|1997-02-06|4-NOT SPECIFIED|Clerk#000000724|0|ously above the pending, special theodolites. dolphins ar 13061|821|O|76034.36|1997-09-17|1-URGENT|Clerk#000000937|0|regular deposits. s 13062|1291|O|63228.29|1996-05-03|2-HIGH|Clerk#000000216|0|s asymptotes cajole carefully regular depen 13063|1412|O|318204.60|1997-01-15|5-LOW|Clerk#000000506|0|packages are slyly fluffil 13088|1460|F|261581.56|1993-04-23|4-NOT SPECIFIED|Clerk#000000089|0|s. even, final packages on the carefully regular platelets cajole quick 13089|1403|O|277440.38|1995-12-15|4-NOT SPECIFIED|Clerk#000000502|0|e slyly ironic pinto beans. blithely express accounts boost. pen 13090|130|F|146458.92|1993-07-27|4-NOT SPECIFIED|Clerk#000000717|0|packages impress quickly across the fluffily regul 13091|856|F|200741.15|1994-11-16|3-MEDIUM|Clerk#000000786|0|ironic requests nag furiously stealthy pinto beans. slyly even pains 13092|1430|O|142733.13|1998-06-24|1-URGENT|Clerk#000000241|0|eans haggle furiously about the slyly daring deposi 13093|1132|O|144898.05|1996-05-05|2-HIGH|Clerk#000000244|0| slyly even sheaves haggl 13094|1129|F|64336.85|1993-08-19|4-NOT SPECIFIED|Clerk#000000376|0|theodolites serve around the slyly special accounts. instruc 13095|1454|O|282335.80|1996-07-22|1-URGENT|Clerk#000000837|0|ve theodolites detect according to the slyly special grouches. un 13120|1039|F|90929.21|1994-12-15|5-LOW|Clerk#000000149|0| to the quickly express requests nag carefully stealthily regular requ 13121|878|F|190958.73|1993-03-08|2-HIGH|Clerk#000000148|0|cross the blithely ironic the 13122|242|F|164977.59|1994-09-24|2-HIGH|Clerk#000000229|0|t blithely furiously special accounts. carefully regular 13123|841|O|208768.85|1998-07-03|2-HIGH|Clerk#000000231|0|fter the slyly speci 13124|10|F|57676.34|1993-12-18|3-MEDIUM|Clerk#000000862|0|ggle quickly according to the carefully even pac 13125|355|F|190034.02|1993-12-14|4-NOT SPECIFIED|Clerk#000000302|0|ly quickly final ideas. carefully bold ideas slee 13126|956|O|131328.54|1995-10-20|4-NOT SPECIFIED|Clerk#000000919|0|ithely bold foxes wake. accounts according to the quickly ev 13127|1165|F|38692.83|1993-08-01|3-MEDIUM|Clerk#000000178|0|cording to the ironic asymptotes sleep care 13152|850|F|283938.70|1994-02-15|2-HIGH|Clerk#000000367|0|ts. blithely final instructions sleep quickly final requ 13153|1144|O|165659.63|1996-11-06|1-URGENT|Clerk#000000666|0|refully across the carefully careful notornis. qu 13154|520|O|286298.87|1997-10-22|5-LOW|Clerk#000000557|0|foxes? quickly blithe ideas nag slyly. blithely regular packages cajole. fluff 13155|1079|O|190720.20|1996-06-18|1-URGENT|Clerk#000000727|0|ns play. express requests cajole quickly theodolites. carefully furious dino 13156|1283|O|180693.69|1995-10-06|5-LOW|Clerk#000000482|0| about the carefully regular a 13157|656|F|277209.62|1992-02-12|3-MEDIUM|Clerk#000000655|0|regular requests wake. never special ac 13158|134|F|198301.99|1992-11-21|1-URGENT|Clerk#000000638|0|ns promise slyly even requests. carefully ironic packages haggle 13159|784|O|175684.88|1996-12-02|1-URGENT|Clerk#000000262|0|efully among the blithely regular instructions. carefu 13184|472|O|21843.01|1998-03-28|4-NOT SPECIFIED|Clerk#000000845|0|ilent deposits haggle furiously about the silent deposits. 13185|805|O|300236.26|1997-07-15|5-LOW|Clerk#000000862|0|ly even tithes sleep daringly. unusual, ironic accou 13186|556|O|206062.94|1996-09-18|2-HIGH|Clerk#000000427|0|ckly pending accounts. quick 13187|923|F|78566.07|1993-12-09|2-HIGH|Clerk#000000421|0|ns haggle slyly. slyly pending instructions at the carefully final 13188|1436|O|95591.25|1998-07-20|3-MEDIUM|Clerk#000000061|0|ckey players. accounts haggle fluffily against the foxes. unusual, expre 13189|94|F|2361.93|1995-04-05|1-URGENT|Clerk#000000002|0|l deposits above the pending pinto beans are 13190|136|O|24775.00|1998-05-04|4-NOT SPECIFIED|Clerk#000000173|0|furiously express accounts. carefully i 13191|214|O|331578.75|1997-12-24|5-LOW|Clerk#000000080|0|regular accounts at the blithely even sh 13216|1075|F|97503.77|1992-04-14|3-MEDIUM|Clerk#000000901|0|e furiously express pinto beans. unusual deposits are. even instructio 13217|946|O|67106.36|1997-09-25|5-LOW|Clerk#000000659|0|se dependencies nag blithely after the blithely regular ideas. 13218|1438|F|210525.16|1994-05-17|2-HIGH|Clerk#000000715|0|ccounts. slyly pending instru 13219|535|O|190074.19|1997-10-15|2-HIGH|Clerk#000000251|0|s. regular, special platelets wake. unusual, even 13220|1480|O|134628.97|1998-07-31|1-URGENT|Clerk#000000586|0|refully final platelets: accounts of the busily special 13221|946|O|106536.33|1995-10-15|4-NOT SPECIFIED|Clerk#000000459|0|lthy accounts around the 13222|1363|O|93525.50|1997-05-04|4-NOT SPECIFIED|Clerk#000000435|0|quests. furiously silent packages 13223|352|O|45917.67|1996-08-08|4-NOT SPECIFIED|Clerk#000000713|0| slyly blithely regular requests. furiously regular multipliers wake 13248|892|O|39800.14|1997-02-02|3-MEDIUM|Clerk#000000920|0|eans use. even packages eat after the ca 13249|569|O|222028.79|1997-07-18|5-LOW|Clerk#000000660|0|nstructions nag blithely quick 13250|1325|O|141857.68|1997-08-18|3-MEDIUM|Clerk#000000150|0|sts about the final requests a 13251|982|O|123168.81|1997-03-28|1-URGENT|Clerk#000000928|0|cording to the blithely silent deposit 13252|1309|F|163473.91|1994-03-23|4-NOT SPECIFIED|Clerk#000000026|0|n ideas must boost blithely. expre 13253|790|O|277363.52|1996-10-28|2-HIGH|Clerk#000000767|0|refully even packages print. blithely bold deposits boost fluffily 13254|1066|O|204264.00|1998-02-23|3-MEDIUM|Clerk#000000890|0|blithely silent packages. fin 13255|1115|F|113899.92|1993-07-18|3-MEDIUM|Clerk#000000993|0|metimes across the even deposits. pending accounts affix blithely furi 13280|314|F|236922.45|1994-09-12|2-HIGH|Clerk#000000073|0|pinto beans promise carefully final requests. r 13281|1480|F|164425.24|1992-04-23|1-URGENT|Clerk#000000327|0|slyly. regularly ironic deposits haggle bli 13282|817|F|153262.10|1992-05-10|5-LOW|Clerk#000000436|0|luffily express foxes 13283|946|O|129981.62|1997-12-29|4-NOT SPECIFIED|Clerk#000000842|0|dependencies? quickly quick deposits cajol 13284|851|F|136289.59|1992-08-13|2-HIGH|Clerk#000000186|0|equests. slyly final de 13285|406|O|8165.39|1995-05-17|4-NOT SPECIFIED|Clerk#000000312|0|r packages. silent, even 13286|1114|O|81226.73|1995-10-12|2-HIGH|Clerk#000000186|0|osits. slyly express requests promise quickly. bold, final asymptote 13287|895|O|168246.03|1997-03-10|3-MEDIUM|Clerk#000000335|0|e theodolites. blithely even deposits dazzle care 13312|1324|O|80963.75|1996-11-04|1-URGENT|Clerk#000000904|0|final accounts wake slyl 13313|1438|F|254146.32|1994-07-17|4-NOT SPECIFIED|Clerk#000000036|0|d platelets. slyly express requests print carefully across the quickl 13314|412|F|226838.60|1994-05-14|2-HIGH|Clerk#000000842|0|ajole stealthily even deposits. furiously express foxes was evenly r 13315|449|O|192954.51|1996-10-16|2-HIGH|Clerk#000000759|0|thinly ironic accoun 13316|1316|F|42308.46|1992-10-23|5-LOW|Clerk#000000555|0|tain even, pending instructions. quickly pending pinto beans sleep against the 13317|865|O|43720.88|1996-04-12|3-MEDIUM|Clerk#000000096|0|ggedly unusual packages along the carefully final 13318|325|O|247805.76|1997-06-11|4-NOT SPECIFIED|Clerk#000000381|0|ounts. sometimes ironic packages sleep stealthily. slyly regular accounts are 13319|425|O|138467.85|1996-01-19|1-URGENT|Clerk#000000158|0|ecial excuses boost. carefully regular asymptotes was acco 13344|367|O|90946.06|1997-03-27|4-NOT SPECIFIED|Clerk#000000924|0|de the quickly regular excuses. blithely express packages nod fur 13345|1339|F|136543.60|1992-09-03|4-NOT SPECIFIED|Clerk#000000876|0|thely special dolphins. slyly regular foxes sleep carefull 13346|1012|F|244898.42|1992-11-16|5-LOW|Clerk#000000594|0|thely bold foxes sleep final reque 13347|560|F|250026.98|1993-04-24|4-NOT SPECIFIED|Clerk#000000933|0|uriously against the carefully final foxes. carefully 13348|1267|F|94270.40|1993-11-08|3-MEDIUM|Clerk#000000878|0|ns doubt fluffily across the requests. even theodolites around the fluffi 13349|1454|F|91069.45|1994-09-20|2-HIGH|Clerk#000000916|0|usual courts haggle ruthlessly. final deposi 13350|1307|O|246713.06|1998-05-06|2-HIGH|Clerk#000000152|0|gular requests cajole quickl 13351|709|F|203403.50|1993-05-07|3-MEDIUM|Clerk#000000892|0|eposits haggle slowly alongside of 13376|1456|F|245552.88|1992-10-10|3-MEDIUM|Clerk#000000807|0| deposits. furiously regular packages unwind slyly bl 13377|394|O|60077.37|1995-07-19|2-HIGH|Clerk#000000094|0|ily along the bold deposits. carefully unusual accounts use quickl 13378|1151|F|207411.39|1994-09-22|5-LOW|Clerk#000000729|0|nis integrate; deposits wake bravely special frets. furiously expre 13379|286|F|107484.04|1993-11-08|5-LOW|Clerk#000000398|0|hely special dependencies. carefully unusual deposits are regularly. b 13380|934|F|129938.34|1994-07-16|1-URGENT|Clerk#000000683|0|ffy deposits haggle blithely. bold deposits amon 13381|1109|O|102850.64|1995-08-18|3-MEDIUM|Clerk#000000835|0|s platelets nod carefully. final, unusual deposits nag blith 13382|1207|O|64313.96|1995-11-02|4-NOT SPECIFIED|Clerk#000000427|0|kages. regular packages boost slyly alongside 13383|1346|F|165841.68|1992-06-16|4-NOT SPECIFIED|Clerk#000000258|0|pecial ideas boost stealthily about the regular, express pinto bean 13408|1370|F|211990.67|1994-02-15|5-LOW|Clerk#000000772|0| since the pinto beans. furiously pending accounts ru 13409|1279|F|46643.47|1993-07-14|4-NOT SPECIFIED|Clerk#000000002|0|n dependencies detect furi 13410|592|F|71187.66|1993-06-07|4-NOT SPECIFIED|Clerk#000000922|0|lly ironic braids are. quickly final gifts haggle si 13411|1376|F|130230.72|1994-04-13|2-HIGH|Clerk#000000179|0|l foxes. silent theodolit 13412|1240|O|115408.80|1997-07-12|1-URGENT|Clerk#000000124|0|lithely slow packages. packages past the ru 13413|473|F|231772.27|1993-01-30|1-URGENT|Clerk#000000565|0|ironic instructions. special instructions boost quick 13414|1333|O|143244.82|1995-07-07|3-MEDIUM|Clerk#000000693|0|across the blithely regular 13415|1222|O|271662.08|1997-05-31|2-HIGH|Clerk#000000851|0|ully. packages haggle slyly around the 13440|865|F|24449.73|1993-10-14|4-NOT SPECIFIED|Clerk#000000189|0|fily ironic accounts. quietly regular deposits among th 13441|1282|O|60048.20|1995-08-31|1-URGENT|Clerk#000000983|0|tes. blithely regular packages wake enticingly. quickly da 13442|238|O|138794.82|1997-08-06|4-NOT SPECIFIED|Clerk#000000830|0|p quickly fluffily 13443|545|F|216308.57|1993-10-07|3-MEDIUM|Clerk#000000379|0|uriously silent packages nag. ideas cajole quickly carefully regula 13444|176|F|304179.27|1992-01-06|2-HIGH|Clerk#000000199|0|. forges are carefully. furiously final packages are blithely ironic the 13445|682|O|21693.67|1997-12-24|4-NOT SPECIFIED|Clerk#000000599|0|quickly. excuses cajole. idly express packages cajole blithely pending pinto b 13446|484|O|55646.82|1996-02-29|1-URGENT|Clerk#000000050|0|lly regular requests. blithely express excuses integra 13447|244|F|72392.32|1993-01-09|2-HIGH|Clerk#000000925|0|ts. blithely ironic ideas accord 13472|814|O|69332.61|1995-10-27|1-URGENT|Clerk#000000454|0|uests. regular asymptotes haggle quickly aro 13473|655|O|192774.21|1996-03-22|5-LOW|Clerk#000000655|0|usual pinto beans haggle blithely at the carefully regular p 13474|1069|O|101285.14|1997-06-09|3-MEDIUM|Clerk#000000912|0|out the bold instructions. requests a 13475|1403|F|295575.64|1994-06-06|1-URGENT|Clerk#000000426|0|ake among the quickly express depo 13476|1459|F|264702.77|1993-10-17|4-NOT SPECIFIED|Clerk#000000203|0|r, permanent accounts. ironic, ironic packages dazzle at the 13477|1276|O|143935.11|1997-11-23|3-MEDIUM|Clerk#000000242|0|express instructions. regular ideas 13478|325|O|167211.22|1998-01-04|2-HIGH|Clerk#000000849|0|ajole ironic, stealthy theodolites. d 13479|1217|F|170168.00|1994-12-05|2-HIGH|Clerk#000000469|0|s the furiously regular accounts. blithely 13504|1495|O|126515.95|1997-06-18|2-HIGH|Clerk#000000486|0|ngly after the pending accounts. final instr 13505|286|O|219315.83|1997-06-03|3-MEDIUM|Clerk#000000880|0|sly against the furiously regular packages. care 13506|1099|O|111048.07|1995-07-22|5-LOW|Clerk#000000974|0|. slyly ironic requests among the 13507|923|F|117398.65|1992-12-30|3-MEDIUM|Clerk#000000127|0|. bold deposits affix furiously. furiously ironi 13508|802|O|2962.11|1997-04-17|4-NOT SPECIFIED|Clerk#000000951|0|accounts. blithely eve 13509|946|O|68465.06|1996-10-07|2-HIGH|Clerk#000000562|0|p. dolphins are. regular reques 13510|710|O|80383.62|1997-10-15|3-MEDIUM|Clerk#000000723|0|tions. slyly ironic in 13511|467|O|134751.21|1998-01-03|4-NOT SPECIFIED|Clerk#000000611|0|ly regular foxes are slyly furiously bold deposits. furiously fin 13536|316|F|192964.26|1994-08-19|3-MEDIUM|Clerk#000000262|0|posits. blithely final i 13537|430|F|26435.99|1993-05-31|5-LOW|Clerk#000000623|0|telets haggle carefully. quickly regular frays are! slyly regular pla 13538|782|O|65147.01|1997-02-01|2-HIGH|Clerk#000000598|0|lly alongside of the pending, unusual reque 13539|1231|O|190664.21|1997-02-13|3-MEDIUM|Clerk#000000022|0|ding dolphins use. final pinto beans 13540|460|F|277929.48|1993-05-24|2-HIGH|Clerk#000000994|0|. blithely even asymptotes poach blithely alongside of the doggedly un 13541|634|O|53890.86|1996-07-29|4-NOT SPECIFIED|Clerk#000000094|0| the furiously quick accounts. final packages nag 13542|1016|F|302527.26|1994-03-09|5-LOW|Clerk#000000170|0|ts are until the furiously 13543|790|F|24097.46|1992-08-09|3-MEDIUM|Clerk#000000636|0|l dependencies nag against the packages. expr 13568|31|F|81474.17|1993-05-07|5-LOW|Clerk#000000024|0|sts. carefully bold dolphins cajole across the even requ 13569|839|P|187147.11|1995-04-11|1-URGENT|Clerk#000000973|0|s? regular platelets cajole above the furiousl 13570|1468|F|226767.02|1992-12-03|4-NOT SPECIFIED|Clerk#000000602|0|e of the theodolites. slyly pending deposits between the regula 13571|1231|O|142142.08|1998-06-04|5-LOW|Clerk#000000452|0|ites? carefully regular 13572|1498|F|115177.91|1992-05-18|1-URGENT|Clerk#000000144|0|inal requests are regularly against th 13573|898|O|78389.93|1997-04-23|5-LOW|Clerk#000000144|0| slyly final packages. carefully e 13574|151|O|72886.11|1997-11-14|1-URGENT|Clerk#000000472|0|y above the fluffily final instructions. express 13575|349|F|92113.36|1992-08-30|1-URGENT|Clerk#000000008|0|! express, express instruct 13600|1129|F|124567.73|1993-07-11|4-NOT SPECIFIED|Clerk#000000910|0|lites boost furiously after the quickly ironic packages. carefully fina 13601|8|F|256717.52|1992-08-17|5-LOW|Clerk#000000608|0| ironic, special foxes- 13602|1348|F|284393.60|1994-01-14|5-LOW|Clerk#000000312|0|nusual packages cajole stealthily regular accounts. regular, 13603|496|O|198335.91|1997-06-09|4-NOT SPECIFIED|Clerk#000000777|0|nic deposits? blithely ironic packages do nag 13604|1141|F|280511.75|1994-02-03|4-NOT SPECIFIED|Clerk#000000798|0|p stealthily. requests thrash carefully. accounts wake furiously afte 13605|1388|O|201717.90|1996-06-14|3-MEDIUM|Clerk#000000099|0| regular depths use furiou 13606|160|F|209589.77|1994-04-27|4-NOT SPECIFIED|Clerk#000000310|0|eas. unusual courts haggle slyly ruthless realms. even, unusua 13607|358|F|210043.39|1993-11-19|1-URGENT|Clerk#000000366|0|es mold. blithely express packages integrate after the bold pinto beans. b 13632|1084|F|264841.12|1994-03-11|3-MEDIUM|Clerk#000000635|0|quickly special ideas use bravely bold, bold packag 13633|1373|F|173408.51|1992-04-04|4-NOT SPECIFIED|Clerk#000000419|0|ifts cajole quickly 13634|869|P|43165.51|1995-04-19|2-HIGH|Clerk#000000538|0|ain carefully according to t 13635|269|F|121071.76|1994-05-02|1-URGENT|Clerk#000000172|0|uctions thrash quickly. furiously regular requests about the n 13636|107|F|171401.11|1994-01-23|5-LOW|Clerk#000000855|0|gular ideas wake among th 13637|188|O|66820.97|1995-12-11|1-URGENT|Clerk#000000074|0| quickly ironic requests. care 13638|1361|O|327915.84|1995-10-01|2-HIGH|Clerk#000000261|0| ironic requests haggle quickly ironic packages. final 13639|1009|O|126608.97|1997-06-13|2-HIGH|Clerk#000000848|0|s cajole slyly doggedly ironic asymptotes. ca 13664|814|O|292106.85|1995-12-16|3-MEDIUM|Clerk#000000542|0|ely bold theodolites nag. pinto beans above the p 13665|1039|O|181151.79|1996-04-08|1-URGENT|Clerk#000000591|0|as wake even, pending requests 13666|445|F|59180.35|1992-03-03|4-NOT SPECIFIED|Clerk#000000031|0| the packages. ironi 13667|1216|O|208665.97|1996-12-04|2-HIGH|Clerk#000000893|0| ruthless ideas poach ironically-- unusual 13668|878|F|218731.00|1992-02-25|1-URGENT|Clerk#000000198|0|press ideas. blithely unusual instructions are blithely. carefully final pac 13669|454|O|178957.15|1996-08-15|3-MEDIUM|Clerk#000000955|0| instructions could have to cajol 13670|226|F|153065.25|1992-05-24|1-URGENT|Clerk#000000988|0|s haggle at the even platelets. even 13671|1291|O|218819.92|1996-07-14|1-URGENT|Clerk#000000321|0| blithely regular requests. furiously special idea 13696|722|O|118614.92|1997-08-28|5-LOW|Clerk#000000882|0|ag fluffily carefully even theodolite 13697|1457|O|103807.36|1998-04-06|4-NOT SPECIFIED|Clerk#000000473|0|st the blithely unusual foxes cajole fluf 13698|1483|F|155040.76|1994-08-05|4-NOT SPECIFIED|Clerk#000000640|0|iously bold deposits are carefully blithely unusual 13699|577|O|22846.00|1997-04-08|4-NOT SPECIFIED|Clerk#000000746|0|. ironically ironic foxes among the f 13700|1063|F|20210.10|1992-02-14|1-URGENT|Clerk#000000801|0|heodolites cajole blithely. blithely unusual de 13701|1270|O|222424.50|1998-05-18|2-HIGH|Clerk#000000270|0|final accounts wake furiously among the regular foxes. slyly bold t 13702|1132|O|51969.64|1995-09-11|5-LOW|Clerk#000000834|0|round the fluffily silen 13703|236|O|219200.99|1996-12-29|5-LOW|Clerk#000000778|0| quickly furiously regular accou 13728|4|O|123722.52|1995-12-11|2-HIGH|Clerk#000000094|0|theodolites. ironic deposits boost among the slyly regular instru 13729|79|F|88426.57|1994-01-02|4-NOT SPECIFIED|Clerk#000000866|0|y regular platelets 13730|952|F|36769.57|1995-05-10|2-HIGH|Clerk#000000826|0|- quickly even pinto beans boost fluffily ironic, even requests. ironic, u 13731|1475|F|209159.47|1993-05-10|4-NOT SPECIFIED|Clerk#000000549|0|beans detect boldly. sheaves after the eve 13732|605|F|166563.09|1992-11-28|1-URGENT|Clerk#000000599|0|uriously regular courts 13733|100|O|328307.40|1998-01-03|5-LOW|Clerk#000000431|0|e quickly express foxes. blithely ironic instructio 13734|124|O|156502.58|1996-08-14|2-HIGH|Clerk#000000344|0|n deposits should have to thrash fluffily quickly regu 13735|58|F|130804.16|1994-05-22|3-MEDIUM|Clerk#000000589|0|ccounts are slyly blithely final accounts. furiously final accounts w 13760|964|O|130352.76|1996-06-03|5-LOW|Clerk#000000053|0|s wake. blithely bold dependencies 13761|169|F|199387.32|1994-10-18|5-LOW|Clerk#000000226|0|ainst the quickly regular accounts. quickly en 13762|1337|F|210739.37|1993-06-22|3-MEDIUM|Clerk#000000053|0|re blithely ideas. blithely regular ideas haggle above the foxe 13763|1142|O|164241.80|1997-06-27|3-MEDIUM|Clerk#000000027|0|as affix carefully express ideas. carefully ironic asymptotes unwin 13764|598|O|122061.97|1995-07-14|3-MEDIUM|Clerk#000000236|0|ronic deposits? furiously re 13765|715|F|156724.02|1994-04-06|4-NOT SPECIFIED|Clerk#000000969|0|ronic theodolites will have to haggle except the slyl 13766|1387|F|45293.38|1992-10-19|1-URGENT|Clerk#000000274|0|boost unusual accounts. regular reques 13767|923|F|211468.27|1993-08-27|2-HIGH|Clerk#000000679|0|. thinly express dolphins sleep; slyly final instr 13792|1366|O|48602.86|1996-02-04|4-NOT SPECIFIED|Clerk#000000980|0|y slyly final foxes. slyly even ideas affix furiously dependencies. caref 13793|904|O|18039.82|1995-09-18|2-HIGH|Clerk#000000222|0|e blithely above the slyly pending ideas. furiously furious pl 13794|587|F|234351.58|1994-01-05|1-URGENT|Clerk#000000119|0|ages cajole furiously alongside of the slyly express ideas- 13795|1406|P|233445.40|1995-04-11|4-NOT SPECIFIED|Clerk#000000673|0|ld dolphins cajole quickly unusual instructions. 13796|1004|O|245467.47|1997-06-22|4-NOT SPECIFIED|Clerk#000000344|0|special deposits wake 13797|272|O|227061.71|1996-08-26|1-URGENT|Clerk#000000379|0|usly silent theodolites wake slyly across 13798|1298|O|123932.29|1996-02-12|1-URGENT|Clerk#000000778|0|fluffily special, bol 13799|994|F|142646.96|1993-12-02|1-URGENT|Clerk#000000945|0|g according to the regularly even requests. carefully bold accoun 13824|1096|F|115028.49|1994-12-28|5-LOW|Clerk#000000604|0|equests. slow, sly ideas cajole fu 13825|56|F|174217.02|1994-03-22|2-HIGH|Clerk#000000038|0|ests. slyly express pin 13826|232|O|160230.62|1997-02-22|3-MEDIUM|Clerk#000000053|0|ost packages. slyly pending requests are c 13827|479|F|44465.43|1993-06-21|5-LOW|Clerk#000000455|0|busily close requests. ironic, even pinto beans use furiously requests. c 13828|985|F|85589.73|1994-07-13|1-URGENT|Clerk#000000682|0| unusual instructions sleep quickly. packages 13829|1075|O|188654.94|1996-09-22|3-MEDIUM|Clerk#000000010|0|raids alongside of the quick 13830|598|F|169613.65|1994-12-22|2-HIGH|Clerk#000000693|0|impress. even, special requests alongside of the instructions cajole fu 13831|740|O|234924.78|1996-04-17|1-URGENT|Clerk#000000204|0|efully unusual theodolites s 13856|1156|O|154144.16|1997-11-28|4-NOT SPECIFIED|Clerk#000000601|0| special theodolites cajole fluffi 13857|275|O|9265.55|1995-08-15|1-URGENT|Clerk#000000012|0|uses boost furiously even 13858|1105|F|44168.23|1994-07-13|2-HIGH|Clerk#000000003|0| fluffily even, regular accounts. furiously permanent dependencies are. slyl 13859|221|O|228467.72|1997-01-09|4-NOT SPECIFIED|Clerk#000000630|0|; enticingly ironic deposits detect along the blithely bold 13860|454|F|61875.64|1994-09-25|3-MEDIUM|Clerk#000000878|0|ccounts nod carefully. express requests whithout the slyly regular in 13861|1249|F|223838.87|1993-08-12|2-HIGH|Clerk#000000153|0|ng requests. quickly ironic packages around th 13862|1240|O|233858.28|1997-09-03|3-MEDIUM|Clerk#000000838|0|odolites integrate carefully unusual requests. sly ideas are 13863|1399|F|132450.97|1992-05-31|2-HIGH|Clerk#000000471|0|y special deposits boost ca 13888|733|F|6632.13|1993-05-04|3-MEDIUM|Clerk#000000635|0|ly regular pinto beans boost about the sheav 13889|848|O|73735.52|1995-09-05|3-MEDIUM|Clerk#000000379|0|ng the silent foxes. even, quiet patterns about th 13890|34|O|50473.48|1996-05-25|5-LOW|Clerk#000000263|0|eposits along the fluffily final accounts wake final, pending foxe 13891|508|O|62430.54|1996-07-05|4-NOT SPECIFIED|Clerk#000000147|0|es are slyly. special packages use slyly against the theodolites. pa 13892|1013|F|216407.28|1992-04-28|2-HIGH|Clerk#000000365|0|ithely ironic packages sleep after the ruthless instructions. quickly dogged t 13893|289|O|132790.59|1998-04-12|2-HIGH|Clerk#000000931|0| blithely regular, unusual dep 13894|1246|F|125452.99|1992-04-11|3-MEDIUM|Clerk#000000524|0|y pending hockey players cajole fluffily special sentiments. closely ironic 13895|671|F|225938.31|1993-11-21|2-HIGH|Clerk#000000381|0|across the fluffily even accounts. carefully silent ideas wake. blithe 13920|37|F|155193.67|1994-10-10|1-URGENT|Clerk#000000060|0| unusual deposits sleep blithely 13921|49|O|152288.97|1997-08-19|4-NOT SPECIFIED|Clerk#000000358|0|elets doubt against the slyly final 13922|241|O|252983.77|1995-08-03|2-HIGH|Clerk#000000301|0|requests use ironic de 13923|1000|F|155909.13|1992-07-09|2-HIGH|Clerk#000000189|0|s. regular packages 13924|328|F|3658.90|1994-12-20|2-HIGH|Clerk#000000251|0|haggle quickly ironic platelets. even tithes wake blit 13925|73|O|162158.06|1997-01-06|1-URGENT|Clerk#000000303|0|uriously ironic accounts around the never regular packages 13926|49|O|52124.55|1997-07-13|4-NOT SPECIFIED|Clerk#000000881|0|ages haggle furiously bold, final accounts. special foxes 13927|562|F|121815.82|1994-08-27|5-LOW|Clerk#000000534|0|lar, silent packages wake quickly 13952|571|O|115051.12|1997-04-25|4-NOT SPECIFIED|Clerk#000000896|0|ly ironic pinto beans. furiously even accounts haggle carefully accordi 13953|388|O|233895.85|1995-06-01|1-URGENT|Clerk#000000337|0|riously deposits. blithely 13954|775|O|132924.82|1996-05-02|5-LOW|Clerk#000000809|0|t furiously above the final requ 13955|1409|O|161957.19|1998-05-12|2-HIGH|Clerk#000000359|0|ar foxes. packages nag blithely fina 13956|716|P|107050.72|1995-03-13|1-URGENT|Clerk#000000062|0|e final deposits are according to the regula 13957|475|O|176779.20|1995-07-22|4-NOT SPECIFIED|Clerk#000000019|0|mpress carefully slyly even requests. blithely pending t 13958|616|F|52000.53|1993-01-24|5-LOW|Clerk#000000863|0|ly. carefully special accounts among the slyly express requests sleep afte 13959|451|F|99030.36|1994-04-05|3-MEDIUM|Clerk#000000952|0|efully bold requests above the busy, express deposits use express p 13984|401|F|197913.83|1992-09-25|5-LOW|Clerk#000000750|0|final, special foxes. carefully express theodolites boost ne 13985|397|O|174830.89|1998-06-14|2-HIGH|Clerk#000000876|0|old accounts wake instructions. furiously ironic requests use quickly. quickl 13986|1333|O|103675.53|1995-10-12|2-HIGH|Clerk#000000793|0|riously final foxes. fur 13987|643|O|64387.39|1998-04-14|4-NOT SPECIFIED|Clerk#000000268|0|es. furiously even 13988|53|F|114321.03|1992-08-02|4-NOT SPECIFIED|Clerk#000000556|0|le daring packages. final, final escapades cajole expre 13989|1480|O|189523.80|1997-07-06|5-LOW|Clerk#000000996|0|ate about the bold, express packages. closely brave accou 13990|751|O|143454.90|1998-02-07|1-URGENT|Clerk#000000683|0| slyly according to the furiously spe 13991|1204|F|261061.37|1992-03-07|2-HIGH|Clerk#000000245|0|nic, special accounts. quickly pending accounts wake 14016|1145|F|201366.56|1992-07-12|5-LOW|Clerk#000000150|0|ymptotes: packages around the 14017|1033|F|142714.54|1994-03-08|2-HIGH|Clerk#000000238|0|unusual deposits sleep furiously 14018|367|F|101182.03|1993-06-07|2-HIGH|Clerk#000000376|0|uctions. pending instructions cajole about th 14019|184|O|216427.67|1997-09-08|1-URGENT|Clerk#000000961|0|tect fluffily final pac 14020|841|O|75424.30|1998-01-29|2-HIGH|Clerk#000000275|0|aggle quickly along the blithely bold ins 14021|62|P|206447.05|1995-03-08|2-HIGH|Clerk#000000467|0|sly. carefully regular 14022|685|F|71271.43|1995-02-14|2-HIGH|Clerk#000000732|0|es. quickly express deposits nag across the iron 14023|787|F|127450.06|1993-02-21|4-NOT SPECIFIED|Clerk#000000349|0|ironic, regular dugouts 14048|1016|F|221205.20|1993-05-18|4-NOT SPECIFIED|Clerk#000000548|0|s. blithely even asy 14049|952|O|70228.24|1996-01-29|5-LOW|Clerk#000000189|0|yly express epitaphs are slyly express depo 14050|88|O|161689.12|1997-08-22|5-LOW|Clerk#000000283|0|en courts. carefully express packages 14051|599|F|22238.93|1993-09-17|4-NOT SPECIFIED|Clerk#000000098|0|ss blithely among the closely r 14052|1190|F|23078.95|1995-02-07|4-NOT SPECIFIED|Clerk#000000647|0|mong the regular foxes boost blithely special pinto beans. furious 14053|1453|F|25549.20|1993-02-09|2-HIGH|Clerk#000000597|0| carefully ironic courts sleep quickly pending, pending packag 14054|517|O|285667.97|1996-01-11|4-NOT SPECIFIED|Clerk#000000890|0|unusual waters above the dependencies cajole r 14055|904|F|90239.70|1994-09-02|2-HIGH|Clerk#000000825|0|ounts. carefully final somas unwind fluffily. 14080|791|O|62517.87|1998-05-29|4-NOT SPECIFIED|Clerk#000000106|0|nic packages use final, bold 14081|895|F|114279.17|1993-10-07|2-HIGH|Clerk#000000857|0|uests. fluffily final packages wake slow, ironic dep 14082|875|O|48224.95|1998-05-08|4-NOT SPECIFIED|Clerk#000000700|0|bove the sometimes even deposits. ruthlessly unusual deposit 14083|464|F|11732.48|1994-08-16|4-NOT SPECIFIED|Clerk#000000420|0| quickly ruthless a 14084|647|O|303760.75|1995-08-03|4-NOT SPECIFIED|Clerk#000000160|0|luffily above the final packages. requests 14085|895|F|136353.74|1994-02-05|2-HIGH|Clerk#000000503|0|eas boost carefully evenly pending requests-- furiously thin accounts 14086|346|F|54201.92|1993-10-21|5-LOW|Clerk#000000289|0|ial deposits sleep. express requests nag carefull 14087|275|O|208589.10|1997-02-12|5-LOW|Clerk#000000927|0|dependencies. quickly ironic frets above the fluffily ironic accounts use 14112|112|F|20742.84|1994-07-25|5-LOW|Clerk#000000463|0|ickly dogged accounts breach doggedly 14113|835|O|27091.83|1995-08-05|2-HIGH|Clerk#000000885|0|; carefully pending accounts use among the pending packag 14114|895|F|171306.58|1994-04-07|1-URGENT|Clerk#000000397|0|beans play regular, ironic deposits. instructio 14115|46|F|68488.60|1994-08-15|4-NOT SPECIFIED|Clerk#000000493|0|ckages sleep across the regular, silent pinto beans. bold, un 14116|1291|O|192210.86|1995-10-10|2-HIGH|Clerk#000000065|0|. carefully bold ideas sleep. carefully bold idea 14117|1297|F|281939.12|1993-08-26|4-NOT SPECIFIED|Clerk#000000480|0|uickly ironic ideas boost among the furiously pending deposits. 14118|385|F|41143.91|1993-05-23|4-NOT SPECIFIED|Clerk#000000730|0| toward the express, unusual instructions sleep accordin 14119|482|F|203405.63|1993-06-01|5-LOW|Clerk#000000989|0|osits. furiously pending accounts across the slyly 14144|301|O|308016.67|1997-04-26|1-URGENT|Clerk#000000089|0|nusual instructions sleep. blithely silent requests thr 14145|7|O|270751.41|1997-04-17|1-URGENT|Clerk#000000920|0|x carefully ideas. evenly silent 14146|592|O|108225.88|1996-01-03|3-MEDIUM|Clerk#000000968|0|xpress accounts. even packages about the pen 14147|728|F|262030.95|1993-03-08|4-NOT SPECIFIED|Clerk#000000152|0|, special epitaphs haggle a 14148|965|O|130798.36|1998-04-30|3-MEDIUM|Clerk#000000188|0|ag carefully special foxes. carefully unus 14149|73|O|157174.97|1998-01-28|1-URGENT|Clerk#000000303|0|ests. ironic, special pinto beans try to print slyly along the pending 14150|89|F|196189.74|1994-02-09|2-HIGH|Clerk#000000695|0|y ironic accounts haggle across the furi 14151|1418|O|286984.16|1997-06-06|4-NOT SPECIFIED|Clerk#000000278|0|special accounts. blithely express deposits cajole slyly slyly ex 14176|991|F|141050.11|1994-03-13|5-LOW|Clerk#000000407|0|eodolites. slyly final platelets ca 14177|565|F|112612.65|1992-04-03|5-LOW|Clerk#000000442|0|nooze carefully slyly final packages. platelets about the q 14178|391|F|11236.45|1995-01-01|4-NOT SPECIFIED|Clerk#000000246|0| quick accounts nag carefully. regular accounts could 14179|458|O|384265.43|1997-09-07|1-URGENT|Clerk#000000053|0|about the bold, final pinto bean 14180|292|O|187330.32|1997-05-25|3-MEDIUM|Clerk#000000478|0|out the carefully regular accoun 14181|94|O|226844.98|1998-02-13|3-MEDIUM|Clerk#000000106|0|efully even depende 14182|934|F|68283.44|1994-06-23|5-LOW|Clerk#000000791|0|arefully. sentiments integrate 14183|875|O|85725.94|1997-03-17|5-LOW|Clerk#000000052|0| final requests detect slyly at the 14208|385|O|121214.10|1995-05-01|4-NOT SPECIFIED|Clerk#000000758|0|sly slyly silent requests. carefully special accounts sleep 14209|1025|O|137491.26|1996-11-04|2-HIGH|Clerk#000000211|0|leep slyly against the carefully bold dolphins. 14210|25|F|23549.76|1992-06-27|4-NOT SPECIFIED|Clerk#000000356|0|atelets nag carefully final foxes. ironic, silent 14211|1202|F|189400.90|1994-02-13|4-NOT SPECIFIED|Clerk#000000937|0|the blithely bold deposits are according to the accounts. f 14212|806|F|196439.01|1992-02-17|1-URGENT|Clerk#000000384|0| packages affix special deposits? carefully special pinto bean 14213|478|F|50949.10|1993-12-28|1-URGENT|Clerk#000000959|0|, careful platelets abo 14214|79|O|128802.82|1997-08-15|1-URGENT|Clerk#000000891|0|thely unusual pinto beans against the blithely special warhorses hagg 14215|1408|F|50568.60|1992-07-14|3-MEDIUM|Clerk#000000823|0|ounts haggle furiously. special gifts am 14240|689|F|210064.90|1992-02-18|2-HIGH|Clerk#000000728|0|sly. express, unusual asymptotes across the never fin 14241|928|F|153112.39|1992-06-15|1-URGENT|Clerk#000000866|0|c packages. instructions a 14242|913|P|115246.93|1995-05-09|2-HIGH|Clerk#000000927|0|d have to haggle according 14243|1073|O|63765.25|1997-02-15|3-MEDIUM|Clerk#000000943|0|sly ironic foxes nag carefully along t 14244|205|O|53072.12|1998-03-05|5-LOW|Clerk#000000139|0|press excuses doubt permanently. 14245|1309|F|73717.72|1993-02-12|1-URGENT|Clerk#000000975|0|furiously final foxes boost silent, final requests. slyly unusual deposi 14246|289|O|133280.40|1997-10-10|4-NOT SPECIFIED|Clerk#000000806|0| bold pinto beans. regular accounts 14247|671|O|219431.33|1995-08-17|1-URGENT|Clerk#000000531|0|ests haggle furiously about the pa 14272|1348|F|97840.61|1992-02-13|3-MEDIUM|Clerk#000000075|0|ses. blithely final deposits are. stealthy accounts engage. slyly 14273|1033|O|90555.37|1996-03-13|5-LOW|Clerk#000000938|0| special foxes lose quickly; fina 14274|565|F|244782.98|1994-01-07|4-NOT SPECIFIED|Clerk#000000564|0|nt requests are furiously furiously daring accounts. even 14275|1412|F|168905.36|1993-06-14|5-LOW|Clerk#000000789|0|furiously. slyly pending packages sublate furious 14276|499|F|241009.94|1994-06-19|3-MEDIUM|Clerk#000000149|0|ly bold requests haggle slyly according to the closely 14277|946|O|116805.58|1998-02-14|4-NOT SPECIFIED|Clerk#000000951|0|eep. thin theodolites are bl 14278|559|O|266421.62|1998-06-03|2-HIGH|Clerk#000000768|0|jole slyly ironic theodolites. carefully even deposi 14279|1117|O|157844.27|1998-02-09|2-HIGH|Clerk#000000441|0| after the furiously unusual realms. 14304|788|O|215723.64|1997-11-13|3-MEDIUM|Clerk#000000488|0|egular requests affix. even requ 14305|1427|O|80794.79|1996-07-23|1-URGENT|Clerk#000000396|0|mas cajole above the 14306|587|F|27239.18|1993-10-30|4-NOT SPECIFIED|Clerk#000000981|0|furiously regular e 14307|484|O|178901.14|1997-06-24|1-URGENT|Clerk#000000255|0| deposits wake. regular, special excuses us 14308|5|O|132817.36|1997-08-11|3-MEDIUM|Clerk#000000354|0|iously. packages wake according to the furio 14309|1382|F|103342.31|1994-01-31|1-URGENT|Clerk#000000753|0|s haggle furiously against the carefully final asymptotes. blith 14310|1393|O|59609.50|1997-09-13|5-LOW|Clerk#000000627|0|p furiously evenly final requests. even, ironic excuses are 14311|1412|F|46918.86|1994-08-02|1-URGENT|Clerk#000000916|0|nic pinto beans along the enticingly final reque 14336|760|F|172716.15|1994-11-19|2-HIGH|Clerk#000000699|0|eodolites cajole carefully. quickly ironic foxes about the fluffily ironic 14337|299|O|280177.62|1997-10-09|4-NOT SPECIFIED|Clerk#000000949|0| special ideas into the silently quick dep 14338|752|F|97316.04|1993-02-01|5-LOW|Clerk#000000658|0|l accounts. quickly special packages h 14339|1199|O|209018.11|1997-07-23|4-NOT SPECIFIED|Clerk#000000303|0|ar theodolites after the express, final pinto beans engage car 14340|391|O|222337.16|1995-10-13|5-LOW|Clerk#000000386|0|theodolites. furiously ironic pinto beans are busily 14341|667|F|197341.53|1993-09-03|4-NOT SPECIFIED|Clerk#000000669|0|lphins cajole furiously after the c 14342|160|F|137933.49|1994-04-28|5-LOW|Clerk#000000040|0| haggle quickly. bold excuses against the regular re 14343|1408|O|58414.89|1996-02-25|3-MEDIUM|Clerk#000000792|0| unusual deposits wake slyly even, even packages. slyly final depe 14368|676|O|121598.53|1995-09-11|2-HIGH|Clerk#000000228|0|ly express requests affix furiously. ironically final asymptotes according 14369|1357|O|272333.98|1996-11-26|1-URGENT|Clerk#000000063|0|sublate quickly furiously bold asymptot 14370|958|O|163080.52|1997-05-03|2-HIGH|Clerk#000000475|0|ly regular accounts. fluffil 14371|1192|F|116441.72|1993-06-23|2-HIGH|Clerk#000000831|0|. carefully express packages at th 14372|523|F|166914.23|1992-10-24|1-URGENT|Clerk#000000449|0|y. ironic deposits sleep? finally special pinto bean 14373|511|F|53131.83|1993-09-18|3-MEDIUM|Clerk#000000099|0|nto beans. fluffy requests affix bravely fluffily iron 14374|1180|O|121655.04|1995-05-17|1-URGENT|Clerk#000000237|0|ross the blithely final deposits. asymptotes are. slyl 14375|733|F|140929.00|1993-02-01|3-MEDIUM|Clerk#000000046|0|ans sleep. blithely regular foxes cajole before the ironic packa 14400|632|F|262307.57|1994-02-05|3-MEDIUM|Clerk#000000479|0|uests. furiously unusual platelets hinder final packages. bold 14401|1489|O|156911.31|1995-08-05|4-NOT SPECIFIED|Clerk#000000059|0|ly carefully even instructions. epitaphs solve instructions! bli 14402|838|F|109228.63|1993-10-15|3-MEDIUM|Clerk#000000672|0|azzle slyly. carefully regular instructions affix carefully deposits. careful 14403|241|O|120881.79|1998-03-02|4-NOT SPECIFIED|Clerk#000000202|0|hely packages. blithely pending dependencies wake furiously 14404|7|O|354885.81|1996-11-03|5-LOW|Clerk#000000657|0| the furiously unus 14405|269|O|94417.57|1996-04-24|5-LOW|Clerk#000000460|0| beans until the final, regular theodolites 14406|409|F|194997.99|1993-03-09|3-MEDIUM|Clerk#000000623|0|s. excuses boost bl 14407|952|F|21880.64|1993-11-05|2-HIGH|Clerk#000000124|0|cajole ruthless theodolites. carefully ironic req 14432|1226|O|44225.65|1996-11-12|4-NOT SPECIFIED|Clerk#000000784|0|re carefully against the fluffily final theodolites. furiously unusual d 14433|1082|O|203663.86|1996-05-13|2-HIGH|Clerk#000000011|0|es. furiously final deposits wake b 14434|1015|F|171433.55|1995-02-16|4-NOT SPECIFIED|Clerk#000000769|0|he unusual pinto beans. special realms cajole. quietly blith 14435|269|F|190900.78|1992-12-02|4-NOT SPECIFIED|Clerk#000000308|0|quests nag. final platelets haggle among the st 14436|658|F|231251.51|1994-03-13|5-LOW|Clerk#000000906|0|regular requests run furiously. unusual, 14437|721|F|285484.64|1994-05-15|1-URGENT|Clerk#000000348|0|refully even excuses alongside of the packages are busily final 14438|1117|O|107298.91|1995-08-23|3-MEDIUM|Clerk#000000059|0| the silent ideas wake after the express requests. unusual, final inst 14439|523|F|4033.74|1992-08-31|2-HIGH|Clerk#000000944|0|riously even courts. ev 14464|1429|O|43902.48|1998-01-23|5-LOW|Clerk#000000197|0| blithely. special deposits 14465|617|F|108913.02|1994-06-12|2-HIGH|Clerk#000000816|0|ndencies nag against the furiously special pin 14466|353|O|19731.95|1996-05-22|3-MEDIUM|Clerk#000000795|0|lithely after the slyly special deposits-- quickly regular gifts acros 14467|1126|O|160773.46|1996-05-31|4-NOT SPECIFIED|Clerk#000000927|0|regular, brave foxes 14468|716|O|60718.43|1998-07-13|3-MEDIUM|Clerk#000000164|0|g pinto beans after the special accounts sleep carefully after the regula 14469|1076|O|82731.79|1997-02-01|2-HIGH|Clerk#000000837|0|ep regular, final accounts? fluffily pending the 14470|949|F|140145.86|1995-01-25|3-MEDIUM|Clerk#000000018|0| asymptotes cajole carefully final a 14471|94|P|324194.82|1995-05-27|4-NOT SPECIFIED|Clerk#000001000|0|l pinto beans ought to nag carefully carefully ironic foxes. 14496|1153|O|206156.59|1996-04-11|3-MEDIUM|Clerk#000000350|0|g theodolites eat c 14497|1492|F|130424.80|1992-05-21|3-MEDIUM|Clerk#000000053|0|symptotes. slyly fluffy excuses ought to wake according to the slyly even de 14498|115|O|172949.99|1996-10-05|3-MEDIUM|Clerk#000000709|0| deposits! quickly pend 14499|361|F|306903.43|1993-11-15|5-LOW|Clerk#000000358|0|packages. carefully final ide 14500|1222|O|100773.58|1995-06-25|4-NOT SPECIFIED|Clerk#000000451|0| slyly regular ideas haggle slyly unusual packages. quickly unusual pinto 14501|607|O|83186.27|1997-08-31|2-HIGH|Clerk#000000826|0|e ideas. ironic deposits sleep according to the blithely ironi 14502|799|O|87836.22|1996-07-21|2-HIGH|Clerk#000000875|0| furiously ironic deposits cajole around the carefully silent accounts. expre 14503|671|F|198098.70|1995-02-06|4-NOT SPECIFIED|Clerk#000000519|0|ely regular depths haggle carefull 14528|334|F|155412.29|1994-06-16|1-URGENT|Clerk#000000568|0|gle instead of the carefully pending pinto beans. express, ex 14529|1372|F|193341.12|1993-09-21|5-LOW|Clerk#000000521|0|arefully unusual packages haggle carefully slyly final pinto beans. exp 14530|133|F|240313.77|1994-04-07|2-HIGH|Clerk#000000444|0|eodolites may wake final requests. furious 14531|1331|O|222583.58|1997-06-11|2-HIGH|Clerk#000000003|0|across the blithely even instructions. carefull 14532|1252|O|15069.98|1996-04-17|5-LOW|Clerk#000000906|0|ep furiously according to the quickly final deposits. reg 14533|43|O|161212.58|1996-05-06|4-NOT SPECIFIED|Clerk#000000394|0|ans alongside of the carefully ironic re 14534|1414|F|248752.30|1993-07-19|3-MEDIUM|Clerk#000000324|0|furiously. silent foxes boost finally. dependencies among the furiously fi 14535|1306|F|95671.80|1993-10-02|4-NOT SPECIFIED|Clerk#000000983|0|thely unusual accounts are fluffi 14560|1258|O|33048.76|1997-12-15|3-MEDIUM|Clerk#000000587|0|ckages grow furiously-- carefully final foxes according 14561|697|O|274551.87|1998-06-29|3-MEDIUM|Clerk#000000840|0|onic accounts. even, express excuses cajole carefully. bl 14562|973|O|150256.16|1996-07-20|5-LOW|Clerk#000000189|0|ve the blithely even dependencies-- quickly busy realms wake quickly. permane 14563|568|O|236613.87|1995-12-03|1-URGENT|Clerk#000000904|0|quests. quickly even theodolites nag blit 14564|520|F|1358.25|1993-05-22|4-NOT SPECIFIED|Clerk#000000799|0|carefully even deposits. furiously express acco 14565|1301|O|50022.91|1997-11-23|1-URGENT|Clerk#000000409|0| special foxes. blithely ironic ideas sleep carefully quietly final instruct 14566|1480|O|221337.42|1996-10-25|3-MEDIUM|Clerk#000000118|0|ns? slyly regular theodolites haggle quickly. fluffily bol 14567|1060|O|176372.95|1996-10-18|1-URGENT|Clerk#000000527|0|efully blithe excuses. slyly regular ideas promise. escapad 14592|926|O|93588.02|1995-07-06|3-MEDIUM|Clerk#000000402|0|ts affix alongside of the blithely ironic pinto beans. 14593|184|O|61820.82|1998-01-23|1-URGENT|Clerk#000000880|0| dependencies. slyly 14594|143|F|16529.84|1993-09-07|3-MEDIUM|Clerk#000000022|0| across the carefull 14595|202|O|32654.01|1997-12-03|3-MEDIUM|Clerk#000000854|0|kages. asymptotes dazzle. final, bold pinto be 14596|1198|F|167802.21|1994-01-11|1-URGENT|Clerk#000000754|0| even accounts above the furiously brave dep 14597|1192|F|293673.06|1993-08-31|1-URGENT|Clerk#000000768|0|haggle ironic, quick accounts. quickly special foxes nag slyly above the furi 14598|898|O|171449.52|1998-02-10|4-NOT SPECIFIED|Clerk#000000229|0|slyly even deposits are carefully. final, pe 14599|814|F|13810.80|1992-05-09|2-HIGH|Clerk#000000704|0|ke final theodolites 14624|1177|O|210247.47|1998-04-17|4-NOT SPECIFIED|Clerk#000000014|0|heodolites. quickly regular theodolites haggle ag 14625|1075|F|2059.98|1992-02-12|1-URGENT|Clerk#000000491|0|instructions play blithely? silent excuses snooze quick 14626|1453|O|118335.57|1997-08-12|3-MEDIUM|Clerk#000000450|0|egular platelets are. even requ 14627|679|F|212366.75|1994-02-05|3-MEDIUM|Clerk#000000520|0|quickly according to the b 14628|668|F|75317.77|1993-06-07|3-MEDIUM|Clerk#000000076|0|accounts hang furiously. furiously regular theodolites 14629|1469|F|78503.51|1993-01-08|2-HIGH|Clerk#000000344|0|uests sleep quickly. furiously ironic accounts against the quickly ironic id 14630|277|F|189160.54|1992-10-17|5-LOW|Clerk#000000575|0| theodolites. carefully 14631|1291|O|34502.66|1997-04-14|2-HIGH|Clerk#000000022|0|ly above the pendin 14656|1|O|28599.83|1997-11-18|2-HIGH|Clerk#000000270|0|uests. blithely even platelet 14657|370|F|116160.53|1994-02-28|1-URGENT|Clerk#000000756|0|ly across the ironic, ironic instructions. bold ideas 14658|1381|F|48274.02|1994-04-07|1-URGENT|Clerk#000000175|0|kly regular requests? regular theod 14659|25|O|145504.68|1998-02-10|3-MEDIUM|Clerk#000000260|0|l, ironic attainment 14660|899|O|139267.14|1997-05-24|4-NOT SPECIFIED|Clerk#000000605|0|c pinto beans. fluff 14661|1468|F|38295.53|1993-01-25|4-NOT SPECIFIED|Clerk#000000429|0| believe. silent packages haggle express instructio 14662|1330|O|200128.12|1995-11-17|2-HIGH|Clerk#000000988|0|efully pending accounts about the bold 14663|592|F|42406.05|1992-11-17|1-URGENT|Clerk#000000596|0|ietly above the packages. regular frets haggle slyly blithely regular pinto b 14688|427|O|127535.78|1997-02-18|5-LOW|Clerk#000000822|0|unusual, bold deposits. furiously bold ideas cajole fluffily ironic theodo 14689|226|O|68912.05|1998-07-31|2-HIGH|Clerk#000000100|0|quickly regular realms are along the carefully spe 14690|935|O|41205.72|1997-05-24|1-URGENT|Clerk#000000342|0|ic packages affix sly 14691|1381|O|81185.28|1998-04-11|5-LOW|Clerk#000000211|0|ffily even instructions use blithely. careful 14692|478|O|53591.54|1996-02-27|5-LOW|Clerk#000000309|0|xpress deposits wake slyly after the deposits. slyly re 14693|67|F|246072.95|1995-01-10|1-URGENT|Clerk#000000039|0|to beans. Tiresias above the special, bold packages sleep 14694|379|O|318967.92|1998-07-28|1-URGENT|Clerk#000000742|0|nic pinto beans sleep blithely pending, unusual somas. blithely ironic id 14695|1406|F|205288.40|1992-09-06|2-HIGH|Clerk#000000475|0|. ideas boost carefully around the even, final instruc 14720|401|F|171435.70|1993-09-05|3-MEDIUM|Clerk#000000354|0|ongside of the quickly final excuses sleep quickly dolphins. dinos 14721|1087|O|166081.83|1997-05-05|4-NOT SPECIFIED|Clerk#000000701|0|instructions haggle slyly. 14722|742|O|264702.15|1997-05-28|5-LOW|Clerk#000000048|0|blithely bold requests ar 14723|1282|O|40317.37|1997-01-04|5-LOW|Clerk#000000822|0|ic deposits affix carefully above th 14724|1396|O|41941.26|1995-11-18|1-URGENT|Clerk#000000886|0|bold dependencies about the busy instructions haggle regular ins 14725|569|O|261801.45|1995-06-17|2-HIGH|Clerk#000000177|0|ng asymptotes. final, ironic accounts cajole after 14726|1279|F|93802.35|1992-01-09|5-LOW|Clerk#000000590|0| foxes. deposits cajole blithely even grouches. b 14727|316|F|102382.66|1992-07-20|3-MEDIUM|Clerk#000000383|0|structions. daringly even packages wake slyly final requests. c 14752|1051|F|31543.83|1994-01-30|5-LOW|Clerk#000000802|0|. carefully regular pinto beans grow idly abou 14753|1118|O|45387.36|1997-01-05|1-URGENT|Clerk#000000855|0|n deposits across the 14754|86|O|112289.25|1996-06-23|3-MEDIUM|Clerk#000000283|0|ns. quickly ironic packages sleep furiously fluffily unusual excuses. de 14755|592|F|358175.60|1993-01-06|2-HIGH|Clerk#000000867|0|egular requests sleep careful packages. quickly r 14756|328|F|132718.02|1994-09-01|5-LOW|Clerk#000000491|0|olites. ironic, final instructions pro 14757|1420|O|55954.21|1997-12-24|4-NOT SPECIFIED|Clerk#000000011|0|ggle furiously. carefully special packages are c 14758|1225|F|37812.49|1993-10-27|2-HIGH|Clerk#000000687|0|ages nag about the furio 14759|70|O|40915.46|1997-01-05|3-MEDIUM|Clerk#000000034|0|he dolphins. ruthlessly regular packages play carefully. f 14784|1036|F|188067.67|1992-03-15|3-MEDIUM|Clerk#000000479|0|lyly final theodoli 14785|1249|F|107683.78|1994-10-10|2-HIGH|Clerk#000000446|0| slyly about the quickly sp 14786|1255|O|34058.87|1997-04-08|2-HIGH|Clerk#000000656|0|e carefully special deposits can nag blithely express, express accounts 14787|578|O|135287.72|1998-07-19|5-LOW|Clerk#000000522|0|deas against the blithely r 14788|1192|O|51229.59|1997-10-23|1-URGENT|Clerk#000000647|0|e the slyly pending deposits. c 14789|451|F|214256.97|1993-11-30|4-NOT SPECIFIED|Clerk#000000616|0|st furiously about the ca 14790|613|O|270163.54|1996-08-21|2-HIGH|Clerk#000000347|0|p. regular deposits wake. final n 14791|289|F|86492.66|1993-02-15|3-MEDIUM|Clerk#000000770|0|ideas wake blithely regularly regular requests. s 14816|508|F|89977.38|1993-07-15|4-NOT SPECIFIED|Clerk#000000721|0| requests. slyly even requests haggle? unusual, regula 14817|235|F|138541.57|1992-07-28|3-MEDIUM|Clerk#000000963|0|arefully unusual dolphins. furiously final accounts above the slyly 14818|643|O|182026.46|1996-11-18|4-NOT SPECIFIED|Clerk#000000588|0|totes. bold, final requests are according to the deposits. quickly regular 14819|473|F|141776.24|1993-01-26|4-NOT SPECIFIED|Clerk#000000641|0| permanent deposits. blithely final warthogs x-ray blithely slyly ir 14820|1135|F|160021.51|1992-01-10|2-HIGH|Clerk#000000090|0|posits. regular pinto beans detect carefully at the final pinto beans. unu 14821|1435|O|322002.95|1998-06-12|2-HIGH|Clerk#000000630|0|n packages are furiously ironic ideas. d 14822|473|O|182443.15|1996-03-26|2-HIGH|Clerk#000000675|0|es. even, special request 14823|832|F|190065.85|1994-02-13|5-LOW|Clerk#000000844|0| slyly final accounts: packages integrate quickly along the packages 14848|256|O|115009.62|1996-10-17|5-LOW|Clerk#000000567|0|warthogs use furiously across the 14849|739|O|82597.02|1997-02-07|3-MEDIUM|Clerk#000000123|0| cajole fluffily against the final, reg 14850|1348|O|227401.85|1997-12-04|1-URGENT|Clerk#000000960|0| quickly regular braids. ironic, regular accounts are carefully 14851|175|F|229811.50|1992-05-12|4-NOT SPECIFIED|Clerk#000000075|0|. blithely regular requests wake blithely after the furiously final accou 14852|1060|O|34634.76|1995-12-25|3-MEDIUM|Clerk#000000907|0|ages haggle accounts. careful, ironic excuses sleep quickly dogged asymptotes 14853|458|O|152920.35|1996-09-10|5-LOW|Clerk#000000036|0|ding instructions about the quickly pending requests sleep quickly iro 14854|472|F|75626.15|1993-04-29|5-LOW|Clerk#000000872|0|ronic pinto beans? even deposits nag c 14855|836|F|141609.96|1993-05-01|4-NOT SPECIFIED|Clerk#000000655|0|nent accounts sleep dependencies. furiously regular ideas ca 14880|1210|O|209395.33|1997-11-23|4-NOT SPECIFIED|Clerk#000000545|0|equests above the foxes wake blithely about the packages: ironic ex 14881|23|O|126051.68|1996-10-20|2-HIGH|Clerk#000000826|0|ades past the fluffily ironic foxes m 14882|833|O|89650.26|1997-02-13|1-URGENT|Clerk#000000353|0|y against the slyly ironic accounts. blithely silent acc 14883|865|P|148947.38|1995-05-07|5-LOW|Clerk#000000543|0|detect blithely: carefully regular asymptotes caj 14884|1450|O|278225.35|1995-09-28|2-HIGH|Clerk#000000481|0| carefully ironic multipliers. furiously final deposits 14885|19|O|261263.74|1997-09-21|5-LOW|Clerk#000000462|0|p slyly regular excu 14886|337|F|84441.18|1994-06-23|3-MEDIUM|Clerk#000000757|0|slyly above the deposi 14887|245|O|124720.14|1996-03-25|2-HIGH|Clerk#000000176|0|he pinto beans boost slyly regular deposits. fi 14912|752|F|28125.10|1993-11-07|1-URGENT|Clerk#000000924|0| ideas cajole slyly around the ironic, bold asymptotes. 14913|1411|F|178192.17|1994-02-15|2-HIGH|Clerk#000000823|0|gainst the carefully final orbits hang furiously above t 14914|1199|O|191598.59|1998-02-08|1-URGENT|Clerk#000000536|0|e carefully carefully ironic requests: never final packag 14915|811|O|59911.87|1996-09-10|4-NOT SPECIFIED|Clerk#000000708|0| accounts are above the enticingly express pin 14916|380|F|72290.87|1994-02-28|5-LOW|Clerk#000000656|0|al requests solve slyly above the never express requests. furious 14917|1148|O|280961.18|1998-01-13|1-URGENT|Clerk#000000974|0|as would use fluffily after the 14918|884|O|68255.07|1997-06-01|5-LOW|Clerk#000000674|0|ets. bold, pending deposits sleep. foxes wake 14919|838|O|36633.03|1996-09-10|2-HIGH|Clerk#000000455|0|as. carefully final ideas cajole finally blithely express foxes. slow 14944|535|O|119586.69|1997-10-14|2-HIGH|Clerk#000000962|0|lly. even instructions against 14945|68|O|210519.05|1996-03-30|1-URGENT|Clerk#000000467|0|nts? fluffily bold grouches after 14946|580|O|100402.47|1996-11-12|1-URGENT|Clerk#000000116|0|ffily bold dependencies wake. furiously regular instructions aro citus-7.0.3/src/test/regress/data/part.data000066400000000000000000003444151317107136600206220ustar00rootroot000000000000001|goldenrod lavender spring chocolate lace|Manufacturer#1|Brand#13|PROMO BURNISHED COPPER|7|JUMBO PKG|901.00|ly. slyly ironi 2|blush thistle blue yellow saddle|Manufacturer#1|Brand#13|LARGE BRUSHED BRASS|1|LG CASE|902.00|lar accounts amo 3|spring green yellow purple cornsilk|Manufacturer#4|Brand#42|STANDARD POLISHED BRASS|21|WRAP CASE|903.00|egular deposits hag 4|cornflower chocolate smoke green pink|Manufacturer#3|Brand#34|SMALL PLATED BRASS|14|MED DRUM|904.00|p furiously r 5|forest brown coral puff cream|Manufacturer#3|Brand#32|STANDARD POLISHED TIN|15|SM PKG|905.00| wake carefully 6|bisque cornflower lawn forest magenta|Manufacturer#2|Brand#24|PROMO PLATED STEEL|4|MED BAG|906.00|sual a 7|moccasin green thistle khaki floral|Manufacturer#1|Brand#11|SMALL PLATED COPPER|45|SM BAG|907.00|lyly. ex 8|misty lace thistle snow royal|Manufacturer#4|Brand#44|PROMO BURNISHED TIN|41|LG DRUM|908.00|eposi 9|thistle dim navajo dark gainsboro|Manufacturer#4|Brand#43|SMALL BURNISHED STEEL|12|WRAP CASE|909.00|ironic foxe 10|linen pink saddle puff powder|Manufacturer#5|Brand#54|LARGE BURNISHED STEEL|44|LG CAN|910.01|ithely final deposit 11|spring maroon seashell almond orchid|Manufacturer#2|Brand#25|STANDARD BURNISHED NICKEL|43|WRAP BOX|911.01|ng gr 12|cornflower wheat orange maroon ghost|Manufacturer#3|Brand#33|MEDIUM ANODIZED STEEL|25|JUMBO CASE|912.01| quickly 13|ghost olive orange rosy thistle|Manufacturer#5|Brand#55|MEDIUM BURNISHED NICKEL|1|JUMBO PACK|913.01|osits. 14|khaki seashell rose cornsilk navajo|Manufacturer#1|Brand#13|SMALL POLISHED STEEL|28|JUMBO BOX|914.01|kages c 15|blanched honeydew sky turquoise medium|Manufacturer#1|Brand#15|LARGE ANODIZED BRASS|45|LG CASE|915.01|usual ac 16|deep sky turquoise drab peach|Manufacturer#3|Brand#32|PROMO PLATED TIN|2|MED PACK|916.01|unts a 17|indian navy coral pink deep|Manufacturer#4|Brand#43|ECONOMY BRUSHED STEEL|16|LG BOX|917.01| regular accounts 18|turquoise indian lemon lavender misty|Manufacturer#1|Brand#11|SMALL BURNISHED STEEL|42|JUMBO PACK|918.01|s cajole slyly a 19|chocolate navy tan deep brown|Manufacturer#2|Brand#23|SMALL ANODIZED NICKEL|33|WRAP BOX|919.01| pending acc 20|ivory navy honeydew sandy midnight|Manufacturer#1|Brand#12|LARGE POLISHED NICKEL|48|MED BAG|920.02|are across the asympt 21|lemon floral azure frosted lime|Manufacturer#3|Brand#33|SMALL BURNISHED TIN|31|MED BAG|921.02|ss packages. pendin 22|medium forest blue ghost black|Manufacturer#4|Brand#43|PROMO POLISHED BRASS|19|LG DRUM|922.02| even p 23|coral lavender seashell rosy burlywood|Manufacturer#3|Brand#35|MEDIUM BURNISHED TIN|42|JUMBO JAR|923.02|nic, fina 24|seashell coral metallic midnight floral|Manufacturer#5|Brand#52|MEDIUM PLATED STEEL|20|MED CASE|924.02| final the 25|aquamarine steel firebrick light turquoise|Manufacturer#5|Brand#55|STANDARD BRUSHED COPPER|3|JUMBO BAG|925.02|requests wake 26|beige frosted moccasin chocolate snow|Manufacturer#3|Brand#32|SMALL BRUSHED STEEL|32|SM CASE|926.02| instructions i 27|saddle puff beige linen yellow|Manufacturer#1|Brand#14|LARGE ANODIZED TIN|20|MED PKG|927.02|s wake. ir 28|navajo yellow drab white misty|Manufacturer#4|Brand#44|SMALL PLATED COPPER|19|JUMBO PKG|928.02|x-ray pending, iron 29|lemon sky grey salmon orchid|Manufacturer#3|Brand#33|PROMO PLATED COPPER|7|LG DRUM|929.02| carefully fluffi 30|cream misty steel spring medium|Manufacturer#4|Brand#42|PROMO ANODIZED TIN|17|LG BOX|930.03|carefully bus 31|slate seashell steel medium moccasin|Manufacturer#5|Brand#53|STANDARD BRUSHED TIN|10|LG BAG|931.03|uriously s 32|sandy wheat coral spring burnished|Manufacturer#4|Brand#42|ECONOMY PLATED BRASS|31|LG CASE|932.03|urts. carefully fin 33|spring bisque salmon slate pink|Manufacturer#2|Brand#22|ECONOMY PLATED NICKEL|16|LG PKG|933.03|ly eve 34|khaki steel rose ghost salmon|Manufacturer#1|Brand#13|LARGE BRUSHED STEEL|8|JUMBO BOX|934.03|riously ironic 35|green blush tomato burlywood seashell|Manufacturer#4|Brand#43|MEDIUM ANODIZED BRASS|14|JUMBO PACK|935.03|e carefully furi 36|chiffon tan forest moccasin dark|Manufacturer#2|Brand#25|SMALL BURNISHED COPPER|3|JUMBO CAN|936.03|olites o 37|royal coral orange burnished navajo|Manufacturer#4|Brand#45|LARGE POLISHED TIN|48|JUMBO BOX|937.03|silent 38|seashell papaya white mint brown|Manufacturer#4|Brand#43|ECONOMY ANODIZED BRASS|11|SM JAR|938.03|structions inte 39|rose medium floral salmon powder|Manufacturer#5|Brand#53|SMALL POLISHED TIN|43|JUMBO JAR|939.03|se slowly above the fl 40|lemon midnight metallic sienna steel|Manufacturer#2|Brand#25|ECONOMY BURNISHED COPPER|27|SM CASE|940.04|! blithely specia 41|burlywood goldenrod pink peru sienna|Manufacturer#2|Brand#23|ECONOMY ANODIZED TIN|7|WRAP JAR|941.04|uriously. furiously cl 42|midnight turquoise lawn beige thistle|Manufacturer#5|Brand#52|MEDIUM BURNISHED TIN|45|LG BOX|942.04|the slow 43|medium lace midnight royal chartreuse|Manufacturer#4|Brand#44|PROMO POLISHED STEEL|5|WRAP CASE|943.04|e slyly along the ir 44|saddle cream wheat lemon burnished|Manufacturer#4|Brand#45|MEDIUM PLATED TIN|48|SM PACK|944.04|pinto beans. carefully 45|lawn peru ghost khaki maroon|Manufacturer#4|Brand#43|SMALL BRUSHED NICKEL|9|WRAP BAG|945.04|nts bo 46|honeydew turquoise aquamarine spring tan|Manufacturer#1|Brand#11|STANDARD POLISHED TIN|45|WRAP CASE|946.04|the blithely unusual 47|honeydew red azure magenta brown|Manufacturer#4|Brand#45|LARGE BURNISHED BRASS|14|JUMBO PACK|947.04| even plate 48|slate thistle cornsilk pale forest|Manufacturer#5|Brand#53|STANDARD BRUSHED STEEL|27|JUMBO CASE|948.04|ng to the depo 49|light firebrick cyan puff blue|Manufacturer#2|Brand#24|SMALL BURNISHED TIN|31|MED DRUM|949.04|ar pack 50|linen blanched tomato slate medium|Manufacturer#3|Brand#33|LARGE ANODIZED TIN|25|WRAP PKG|950.05|kages m 51|lime frosted indian dodger linen|Manufacturer#4|Brand#45|ECONOMY BURNISHED NICKEL|34|JUMBO PACK|951.05|n foxes 52|lemon midnight lace sky deep|Manufacturer#3|Brand#35|STANDARD BURNISHED TIN|25|WRAP CASE|952.05| final deposits. fu 53|bisque rose cornsilk seashell purple|Manufacturer#2|Brand#23|ECONOMY BURNISHED NICKEL|32|MED BAG|953.05|mptot 54|blanched mint yellow papaya cyan|Manufacturer#2|Brand#21|LARGE BURNISHED COPPER|19|WRAP CASE|954.05|e blithely 55|sky cream deep tomato rosy|Manufacturer#2|Brand#23|ECONOMY BRUSHED COPPER|9|MED BAG|955.05|ly final pac 56|antique beige brown deep dodger|Manufacturer#1|Brand#12|MEDIUM PLATED STEEL|20|WRAP DRUM|956.05|ts. blithel 57|purple blue light sienna deep|Manufacturer#3|Brand#32|MEDIUM BURNISHED BRASS|49|MED PKG|957.05|lly abov 58|linen hot cornsilk drab bisque|Manufacturer#5|Brand#53|STANDARD POLISHED TIN|44|LG PACK|958.05| fluffily blithely reg 59|misty brown medium mint salmon|Manufacturer#5|Brand#53|MEDIUM POLISHED TIN|2|LG BAG|959.05|regular exc 60|snow spring sandy olive tomato|Manufacturer#1|Brand#11|LARGE POLISHED COPPER|27|JUMBO CASE|960.06| integ 61|light tan linen tomato peach|Manufacturer#5|Brand#54|SMALL BURNISHED NICKEL|18|WRAP DRUM|961.06|es. blithely en 62|tan cornsilk spring grey chocolate|Manufacturer#3|Brand#35|STANDARD BRUSHED BRASS|39|JUMBO BOX|962.06|ckly across the carefu 63|burnished puff coral light papaya|Manufacturer#3|Brand#32|STANDARD BURNISHED NICKEL|10|JUMBO CAN|963.06| quickly 64|aquamarine coral lemon ivory gainsboro|Manufacturer#2|Brand#21|MEDIUM ANODIZED BRASS|1|JUMBO CAN|964.06|efully regular pi 65|slate drab medium puff gainsboro|Manufacturer#5|Brand#53|MEDIUM BRUSHED COPPER|3|MED CAN|965.06|posits after the quic 66|cornflower pale almond lemon linen|Manufacturer#3|Brand#35|PROMO ANODIZED NICKEL|46|SM CASE|966.06|haggle blithely iro 67|slate salmon rose spring seashell|Manufacturer#2|Brand#21|SMALL BRUSHED TIN|31|WRAP DRUM|967.06| regular, p 68|bisque ivory mint purple almond|Manufacturer#1|Brand#11|PROMO ANODIZED STEEL|10|WRAP BOX|968.06|eposits shall h 69|lace burnished rosy antique metallic|Manufacturer#5|Brand#52|MEDIUM POLISHED BRASS|2|SM BOX|969.06|ely final depo 70|violet seashell firebrick dark navajo|Manufacturer#1|Brand#11|STANDARD BRUSHED STEEL|42|LG PACK|970.07|inal gifts. sl 71|violet firebrick cream peru white|Manufacturer#3|Brand#33|STANDARD PLATED BRASS|26|WRAP DRUM|971.07| packages alongside 72|hot spring yellow azure dodger|Manufacturer#2|Brand#23|STANDARD ANODIZED TIN|25|JUMBO PACK|972.07|efully final the 73|cream moccasin royal dim chiffon|Manufacturer#2|Brand#21|SMALL BRUSHED COPPER|35|WRAP DRUM|973.07|ts haggl 74|frosted grey aquamarine thistle papaya|Manufacturer#5|Brand#55|ECONOMY ANODIZED BRASS|25|JUMBO CASE|974.07|ent foxes 75|aquamarine maroon wheat salmon metallic|Manufacturer#3|Brand#35|SMALL BURNISHED NICKEL|39|SM JAR|975.07|s sleep furiou 76|rosy light lime puff sandy|Manufacturer#3|Brand#34|MEDIUM BRUSHED COPPER|9|SM PKG|976.07|n accounts sleep qu 77|mint bisque chiffon snow firebrick|Manufacturer#5|Brand#52|STANDARD BRUSHED COPPER|13|MED PKG|977.07|uests. 78|blush forest slate seashell puff|Manufacturer#1|Brand#14|ECONOMY POLISHED STEEL|24|LG JAR|978.07|icing deposits wake 79|gainsboro pink grey tan almond|Manufacturer#4|Brand#45|PROMO ANODIZED BRASS|22|JUMBO BAG|979.07| foxes are slyly regu 80|tomato chartreuse coral turquoise linen|Manufacturer#4|Brand#44|PROMO PLATED BRASS|28|MED CAN|980.08|unusual dependencies i 81|misty sandy cornsilk dodger blush|Manufacturer#5|Brand#53|ECONOMY BRUSHED TIN|21|MED BAG|981.08|ove the furiou 82|khaki tomato purple almond tan|Manufacturer#1|Brand#15|ECONOMY POLISHED TIN|12|WRAP BOX|982.08|ial requests haggle 83|blush green dim lawn peru|Manufacturer#1|Brand#12|PROMO BURNISHED NICKEL|47|SM CAN|983.08|ly regul 84|salmon floral cream rose dark|Manufacturer#4|Brand#45|SMALL ANODIZED NICKEL|26|JUMBO PACK|984.08|ideas nag 85|dim deep aquamarine smoke pale|Manufacturer#5|Brand#55|PROMO ANODIZED NICKEL|16|LG BAG|985.08| silent 86|green blanched firebrick dim cream|Manufacturer#4|Brand#44|STANDARD PLATED TIN|37|LG CASE|986.08| daring sheaves 87|purple lace seashell antique orange|Manufacturer#4|Brand#41|LARGE PLATED STEEL|41|WRAP PACK|987.08|yly final 88|lime orange bisque chartreuse lemon|Manufacturer#4|Brand#44|PROMO PLATED COPPER|16|SM CASE|988.08|e regular packages. 89|ghost lace lemon sienna saddle|Manufacturer#5|Brand#53|STANDARD BURNISHED STEEL|7|MED JAR|989.08|y final pinto 90|hot rosy violet plum pale|Manufacturer#5|Brand#51|ECONOMY POLISHED STEEL|49|JUMBO CAN|990.09|caref 91|misty bisque lavender spring turquoise|Manufacturer#2|Brand#21|STANDARD BRUSHED TIN|32|JUMBO PKG|991.09|counts dete 92|blush magenta ghost tomato rose|Manufacturer#2|Brand#22|STANDARD ANODIZED TIN|35|JUMBO PKG|992.09|he ironic accounts. sp 93|pale yellow cornsilk dodger moccasin|Manufacturer#2|Brand#24|LARGE ANODIZED TIN|2|WRAP DRUM|993.09| platel 94|blanched pink frosted mint snow|Manufacturer#3|Brand#35|STANDARD POLISHED BRASS|32|SM BOX|994.09|s accounts cajo 95|dodger beige wheat orchid navy|Manufacturer#3|Brand#33|LARGE BRUSHED TIN|36|WRAP DRUM|995.09| final pinto beans 96|chocolate light firebrick rose indian|Manufacturer#5|Brand#53|STANDARD BRUSHED STEEL|32|SM CASE|996.09|ng to the bli 97|coral dodger beige black chartreuse|Manufacturer#3|Brand#33|MEDIUM POLISHED BRASS|49|WRAP CAN|997.09|ss excuses sleep am 98|frosted peru chiffon yellow aquamarine|Manufacturer#5|Brand#54|STANDARD ANODIZED BRASS|22|MED JAR|998.09|e the q 99|mint grey purple sienna metallic|Manufacturer#2|Brand#21|SMALL BURNISHED STEEL|11|JUMBO PKG|999.09|press 100|cyan orchid indian cornflower saddle|Manufacturer#3|Brand#33|ECONOMY ANODIZED TIN|4|LG BAG|1000.10|of the steal 101|powder deep lavender violet gainsboro|Manufacturer#3|Brand#32|LARGE ANODIZED STEEL|26|JUMBO JAR|1001.10|ly even, 102|papaya maroon blush powder sky|Manufacturer#3|Brand#31|MEDIUM BURNISHED BRASS|17|SM DRUM|1002.10|ular packa 103|navy sky spring orchid forest|Manufacturer#2|Brand#25|MEDIUM PLATED BRASS|45|WRAP DRUM|1003.10|e blithely blith 104|plum cyan cornflower midnight royal|Manufacturer#1|Brand#13|MEDIUM ANODIZED STEEL|36|JUMBO BAG|1004.10|ites sleep quickly 105|dodger slate pale mint navajo|Manufacturer#1|Brand#15|SMALL POLISHED COPPER|27|LG DRUM|1005.10|odolites was 106|cornsilk bisque seashell lemon frosted|Manufacturer#3|Brand#31|MEDIUM PLATED BRASS|28|WRAP DRUM|1006.10|unts maintain 107|violet honeydew bisque sienna orchid|Manufacturer#5|Brand#53|SMALL BURNISHED TIN|12|MED BOX|1007.10|slyly special depos 108|bisque peach magenta tomato yellow|Manufacturer#1|Brand#12|PROMO PLATED NICKEL|41|MED PKG|1008.10|after the carefully 109|lemon black indian cornflower pale|Manufacturer#3|Brand#33|ECONOMY POLISHED TIN|11|LG PACK|1009.10|instruction 110|firebrick navy rose beige black|Manufacturer#3|Brand#33|STANDARD BURNISHED COPPER|46|LG DRUM|1010.11|t quickly a 111|orange cornflower mint snow peach|Manufacturer#5|Brand#54|LARGE BRUSHED COPPER|28|JUMBO JAR|1011.11|kly bold epitaphs 112|hot aquamarine tomato lace indian|Manufacturer#4|Brand#43|PROMO BRUSHED STEEL|42|JUMBO CAN|1012.11|the express, 113|almond seashell azure blanched light|Manufacturer#3|Brand#31|PROMO POLISHED TIN|23|LG CAN|1013.11|finally even 114|pink black blanched lace chartreuse|Manufacturer#5|Brand#51|MEDIUM POLISHED NICKEL|41|MED PACK|1014.11|ully final foxes. pint 115|spring chiffon cream orchid dodger|Manufacturer#4|Brand#45|STANDARD POLISHED STEEL|24|MED CAN|1015.11|counts nag! caref 116|goldenrod black slate forest red|Manufacturer#5|Brand#53|PROMO POLISHED NICKEL|33|SM PACK|1016.11|usly final courts 117|tomato honeydew pale red yellow|Manufacturer#1|Brand#14|SMALL BRUSHED TIN|25|LG BAG|1017.11|ages acc 118|ghost plum brown coral cornsilk|Manufacturer#2|Brand#25|PROMO ANODIZED TIN|31|MED PACK|1018.11|ly ironic pinto 119|olive metallic slate peach green|Manufacturer#4|Brand#43|LARGE POLISHED STEEL|30|WRAP CASE|1019.11|out the quickly r 120|pink powder mint moccasin navajo|Manufacturer#1|Brand#14|SMALL ANODIZED NICKEL|45|WRAP JAR|1020.12|lly a 121|bisque royal goldenrod medium thistle|Manufacturer#1|Brand#14|ECONOMY BRUSHED COPPER|13|SM PKG|1021.12|deposi 122|gainsboro royal forest dark lace|Manufacturer#2|Brand#21|MEDIUM ANODIZED TIN|8|LG DRUM|1022.12|sts c 123|deep dim peach light beige|Manufacturer#1|Brand#12|SMALL BURNISHED TIN|31|JUMBO PKG|1023.12|ray regula 124|wheat blush forest metallic navajo|Manufacturer#3|Brand#32|PROMO ANODIZED STEEL|1|LG BOX|1024.12|g the expr 125|mint ivory saddle peach midnight|Manufacturer#1|Brand#12|STANDARD BRUSHED BRASS|17|WRAP BAG|1025.12|kages against 126|burnished black blue metallic orchid|Manufacturer#4|Brand#45|MEDIUM BRUSHED NICKEL|4|LG BAG|1026.12|es sleep al 127|royal coral orchid spring sky|Manufacturer#5|Brand#52|SMALL BURNISHED NICKEL|14|LG JAR|1027.12|lithely expr 128|dark burlywood burnished snow sky|Manufacturer#2|Brand#22|PROMO PLATED TIN|5|SM BAG|1028.12|e of the furiously ex 129|grey spring chiffon thistle lime|Manufacturer#1|Brand#15|LARGE POLISHED TIN|20|SM JAR|1029.12| careful 130|gainsboro powder cyan pale rosy|Manufacturer#2|Brand#23|SMALL PLATED NICKEL|26|LG BOX|1030.13|ake slyly 131|tomato moccasin cyan brown goldenrod|Manufacturer#5|Brand#52|STANDARD ANODIZED BRASS|43|MED DRUM|1031.13|nts wake dar 132|seashell papaya tomato lime hot|Manufacturer#4|Brand#45|STANDARD BURNISHED BRASS|2|WRAP DRUM|1032.13|ckly expre 133|firebrick black dodger pink salmon|Manufacturer#1|Brand#13|SMALL BRUSHED NICKEL|19|LG PKG|1033.13| final pinto beans 134|steel beige mint maroon indian|Manufacturer#4|Brand#42|SMALL POLISHED STEEL|35|SM PKG|1034.13|es. bold pa 135|thistle chocolate ghost gainsboro peru|Manufacturer#2|Brand#21|MEDIUM BURNISHED STEEL|24|JUMBO CASE|1035.13|l frets 136|cornsilk maroon blanched thistle rosy|Manufacturer#2|Brand#22|SMALL PLATED STEEL|2|WRAP BAG|1036.13|kages print carefully 137|cornsilk drab ghost sandy royal|Manufacturer#3|Brand#31|ECONOMY PLATED STEEL|25|MED PACK|1037.13|the t 138|dark aquamarine tomato medium puff|Manufacturer#1|Brand#13|ECONOMY BURNISHED COPPER|42|JUMBO DRUM|1038.13|ts solve acro 139|floral steel burlywood navy cream|Manufacturer#3|Brand#32|MEDIUM BRUSHED STEEL|7|SM BOX|1039.13|ter t 140|aquamarine lavender maroon slate hot|Manufacturer#5|Brand#53|STANDARD PLATED STEEL|45|SM BOX|1040.14|oss the carefu 141|honeydew magenta tomato spring medium|Manufacturer#3|Brand#35|STANDARD ANODIZED STEEL|23|SM PKG|1041.14|ans nag furiously pen 142|chartreuse linen grey slate saddle|Manufacturer#5|Brand#55|STANDARD ANODIZED BRASS|36|MED JAR|1042.14|he accounts. pac 143|bisque dodger blanched steel maroon|Manufacturer#3|Brand#34|ECONOMY PLATED TIN|44|MED BAG|1043.14|nts across the 144|hot midnight orchid dim steel|Manufacturer#1|Brand#14|SMALL ANODIZED TIN|26|SM BOX|1044.14|owly 145|navajo lavender chocolate deep hot|Manufacturer#5|Brand#53|PROMO BRUSHED COPPER|24|SM BAG|1045.14|es wake furiously blit 146|azure smoke mint cream burlywood|Manufacturer#3|Brand#34|STANDARD BRUSHED COPPER|11|WRAP PACK|1046.14|unts cajole 147|honeydew orange dodger linen lace|Manufacturer#1|Brand#11|MEDIUM PLATED COPPER|29|JUMBO PKG|1047.14|wake never bold 148|yellow white ghost lavender salmon|Manufacturer#3|Brand#31|STANDARD PLATED STEEL|20|SM BOX|1048.14|platelets wake fu 149|tan thistle frosted indian lawn|Manufacturer#2|Brand#24|MEDIUM BURNISHED NICKEL|6|MED PKG|1049.14|leep requests. dog 150|pale rose navajo firebrick aquamarine|Manufacturer#3|Brand#35|LARGE BRUSHED TIN|21|SM BAG|1050.15|ironic foxes 151|chartreuse linen violet ghost thistle|Manufacturer#3|Brand#34|LARGE PLATED BRASS|45|MED CAN|1051.15|ccounts nag i 152|white sky antique tomato chartreuse|Manufacturer#5|Brand#53|MEDIUM POLISHED STEEL|48|MED CASE|1052.15|thely regular t 153|linen frosted slate coral peru|Manufacturer#1|Brand#11|STANDARD PLATED TIN|20|MED BAG|1053.15|thlessly. silen 154|peru moccasin peach pale spring|Manufacturer#1|Brand#11|ECONOMY ANODIZED TIN|1|JUMBO BAG|1054.15|posits 155|puff yellow cyan tomato purple|Manufacturer#2|Brand#21|SMALL BRUSHED NICKEL|28|WRAP CASE|1055.15|lly ironic, r 156|almond ghost powder blush forest|Manufacturer#4|Brand#43|SMALL POLISHED NICKEL|2|LG PKG|1056.15| pinto beans. eve 157|navajo linen coral brown forest|Manufacturer#1|Brand#11|ECONOMY ANODIZED STEEL|26|JUMBO PACK|1057.15|ial courts. ru 158|magenta light misty navy honeydew|Manufacturer#4|Brand#45|MEDIUM BURNISHED COPPER|47|LG JAR|1058.15| ideas detect slyl 159|white orange antique beige aquamarine|Manufacturer#4|Brand#43|SMALL ANODIZED BRASS|46|SM BAG|1059.15| ironic requests-- pe 160|frosted cornflower khaki salmon metallic|Manufacturer#5|Brand#55|STANDARD POLISHED COPPER|47|JUMBO CAN|1060.16|nts are carefully 161|metallic khaki navy forest cyan|Manufacturer#2|Brand#22|STANDARD PLATED TIN|17|SM PACK|1061.16|r the bl 162|burlywood cornflower aquamarine misty snow|Manufacturer#3|Brand#33|MEDIUM ANODIZED COPPER|35|JUMBO PACK|1062.16|e slyly around th 163|blush metallic maroon lawn forest|Manufacturer#2|Brand#21|ECONOMY PLATED TIN|34|WRAP DRUM|1063.16|nly s 164|orange cyan magenta navajo indian|Manufacturer#2|Brand#23|LARGE PLATED BRASS|35|JUMBO BAG|1064.16|mong th 165|white dim cornflower sky seashell|Manufacturer#1|Brand#15|STANDARD PLATED STEEL|24|SM CAN|1065.16| carefully fin 166|linen bisque tomato gainsboro goldenrod|Manufacturer#5|Brand#52|LARGE POLISHED COPPER|4|MED BAG|1066.16|ss the 167|almond floral grey dim sky|Manufacturer#3|Brand#32|LARGE ANODIZED STEEL|46|WRAP BOX|1067.16|ic ac 168|lace gainsboro burlywood smoke tomato|Manufacturer#1|Brand#13|SMALL BRUSHED COPPER|20|JUMBO DRUM|1068.16|ss package 169|bisque misty sky cornflower peach|Manufacturer#5|Brand#55|STANDARD POLISHED BRASS|10|JUMBO CASE|1069.16|lets alongside of 170|peru grey blanched goldenrod yellow|Manufacturer#3|Brand#33|LARGE POLISHED COPPER|28|LG DRUM|1070.17|yly s 171|beige violet black magenta chartreuse|Manufacturer#1|Brand#11|STANDARD BURNISHED COPPER|40|LG JAR|1071.17| the r 172|medium goldenrod linen sky coral|Manufacturer#5|Brand#53|PROMO PLATED NICKEL|28|MED CASE|1072.17|quick as 173|chartreuse seashell powder navy grey|Manufacturer#1|Brand#12|ECONOMY BURNISHED TIN|17|LG CASE|1073.17|sly bold excuses haggl 174|hot cornflower slate saddle pale|Manufacturer#1|Brand#15|ECONOMY BRUSHED COPPER|25|LG CASE|1074.17| accounts nag ab 175|magenta blue chartreuse tan green|Manufacturer#1|Brand#11|PROMO ANODIZED TIN|45|JUMBO JAR|1075.17|ole against the 176|pink drab ivory papaya grey|Manufacturer#2|Brand#24|SMALL ANODIZED STEEL|40|MED CAN|1076.17|blithely. ironic 177|indian turquoise purple green spring|Manufacturer#2|Brand#21|MEDIUM BRUSHED STEEL|42|LG BAG|1077.17|ermanently eve 178|lace blanched magenta yellow almond|Manufacturer#1|Brand#13|STANDARD POLISHED TIN|10|LG JAR|1078.17|regular instructions. 179|deep puff brown blue burlywood|Manufacturer#4|Brand#43|ECONOMY BRUSHED STEEL|20|LG JAR|1079.17|ely regul 180|seashell maroon lace burnished lavender|Manufacturer#3|Brand#33|STANDARD BURNISHED NICKEL|7|WRAP BAG|1080.18|oss the 181|antique plum smoke pink dodger|Manufacturer#2|Brand#24|MEDIUM PLATED STEEL|19|WRAP CAN|1081.18|al deposits 182|beige cyan burlywood chiffon light|Manufacturer#3|Brand#31|MEDIUM ANODIZED COPPER|11|JUMBO CAN|1082.18|bits are 183|ivory white burnished papaya cornflower|Manufacturer#5|Brand#52|PROMO POLISHED STEEL|35|LG PKG|1083.18|ly regular excus 184|ghost honeydew cyan lawn powder|Manufacturer#5|Brand#53|SMALL POLISHED TIN|42|LG BOX|1084.18|ding courts. idly iro 185|firebrick black ivory spring medium|Manufacturer#4|Brand#44|ECONOMY POLISHED TIN|4|WRAP BAG|1085.18|even foxe 186|grey purple chocolate turquoise plum|Manufacturer#2|Brand#23|ECONOMY BRUSHED TIN|15|JUMBO PKG|1086.18|ly reg 187|white red lace deep pale|Manufacturer#4|Brand#45|PROMO ANODIZED BRASS|45|MED CAN|1087.18|leep slyly s 188|moccasin steel rosy drab white|Manufacturer#5|Brand#54|ECONOMY ANODIZED BRASS|9|MED CAN|1088.18| above the silent p 189|dodger moccasin lemon purple thistle|Manufacturer#2|Brand#22|MEDIUM BRUSHED BRASS|13|WRAP DRUM|1089.18|en requests. sauternes 190|chartreuse goldenrod midnight cornflower blush|Manufacturer#5|Brand#53|LARGE BURNISHED NICKEL|23|WRAP BAG|1090.19| furiously even d 191|mint midnight puff forest peach|Manufacturer#3|Brand#31|MEDIUM POLISHED BRASS|36|WRAP BOX|1091.19| asymptote 192|thistle puff pink cream orange|Manufacturer#3|Brand#34|STANDARD BRUSHED COPPER|17|MED BAG|1092.19|uickly regular, expr 193|turquoise lime royal metallic azure|Manufacturer#4|Brand#45|ECONOMY BURNISHED BRASS|31|SM PKG|1093.19|final ideas wake furi 194|brown black cream navy plum|Manufacturer#5|Brand#51|ECONOMY POLISHED STEEL|7|SM CAN|1094.19|y special accoun 195|bisque sienna hot goldenrod khaki|Manufacturer#4|Brand#41|STANDARD BRUSHED NICKEL|40|MED CASE|1095.19|oxes sleep care 196|pale peru linen hot maroon|Manufacturer#3|Brand#33|SMALL BURNISHED NICKEL|3|JUMBO JAR|1096.19|uickly special 197|lawn lemon khaki rosy blue|Manufacturer#5|Brand#52|SMALL ANODIZED COPPER|18|SM JAR|1097.19|lithely after the eve 198|orange cornflower indian aquamarine white|Manufacturer#4|Brand#41|PROMO BRUSHED NICKEL|43|SM PACK|1098.19|ackages? carefully re 199|ivory slate lavender tan royal|Manufacturer#3|Brand#31|ECONOMY PLATED STEEL|23|JUMBO DRUM|1099.19|ickly regul 200|peach cornsilk navy rosy red|Manufacturer#5|Brand#54|MEDIUM POLISHED BRASS|22|LG PKG|1100.20|furiously even depo 201|dodger white chiffon moccasin green|Manufacturer#4|Brand#42|SMALL POLISHED STEEL|18|JUMBO BAG|1101.20| courts sl 202|brown violet turquoise frosted navajo|Manufacturer#4|Brand#45|MEDIUM BRUSHED COPPER|49|MED DRUM|1102.20| use slyly c 203|beige cornflower gainsboro chiffon wheat|Manufacturer#5|Brand#55|STANDARD ANODIZED STEEL|42|MED BAG|1103.20|usual instructio 204|floral powder deep linen hot|Manufacturer#5|Brand#54|LARGE BURNISHED COPPER|22|SM JAR|1104.20|egular pl 205|navajo bisque gainsboro forest coral|Manufacturer#5|Brand#54|ECONOMY PLATED NICKEL|2|JUMBO DRUM|1105.20| blithely regular 206|chiffon coral wheat peru lavender|Manufacturer#2|Brand#25|LARGE ANODIZED NICKEL|23|JUMBO CAN|1106.20|ng theodolites 207|blush chocolate light coral midnight|Manufacturer#3|Brand#35|STANDARD PLATED COPPER|48|MED CAN|1107.20|lar, 208|medium pink metallic honeydew ghost|Manufacturer#3|Brand#35|LARGE PLATED COPPER|5|MED PACK|1108.20|l deposits wak 209|sky plum pink lavender firebrick|Manufacturer#1|Brand#11|ECONOMY PLATED STEEL|6|MED CAN|1109.20| haggle a 210|white sky mint chiffon blanched|Manufacturer#3|Brand#32|ECONOMY BURNISHED TIN|5|LG JAR|1110.21|ly until the flu 211|turquoise forest orchid cream blue|Manufacturer#3|Brand#32|MEDIUM ANODIZED NICKEL|43|LG PKG|1111.21|s cajole 212|light sandy white bisque burlywood|Manufacturer#5|Brand#53|PROMO BRUSHED BRASS|42|JUMBO BOX|1112.21|uffily fin 213|pale midnight light ghost saddle|Manufacturer#3|Brand#35|SMALL POLISHED COPPER|14|WRAP CAN|1113.21|der slyly accor 214|cornsilk tomato firebrick tan puff|Manufacturer#4|Brand#41|PROMO PLATED BRASS|30|LG BAG|1114.21|quests. regular, fi 215|lace pink black orange cornflower|Manufacturer#5|Brand#52|ECONOMY BURNISHED NICKEL|19|SM PACK|1115.21| the carefully i 216|tan azure slate white mint|Manufacturer#4|Brand#45|MEDIUM PLATED NICKEL|35|MED JAR|1116.21|gside of the unus 217|lemon thistle sandy royal peru|Manufacturer#5|Brand#52|PROMO PLATED COPPER|44|MED DRUM|1117.21|heodolites integrat 218|sandy khaki ghost cornflower metallic|Manufacturer#4|Brand#41|PROMO BURNISHED TIN|24|WRAP PACK|1118.21| furiously 219|cyan aquamarine red plum frosted|Manufacturer#4|Brand#45|PROMO ANODIZED BRASS|31|MED JAR|1119.21|riously ironic reque 220|lemon firebrick lime white wheat|Manufacturer#1|Brand#14|PROMO BRUSHED BRASS|25|MED CASE|1120.22|heodolites sleep. c 221|chartreuse azure lemon saddle dark|Manufacturer#5|Brand#51|PROMO BRUSHED COPPER|7|LG DRUM|1121.22|ld asymptotes sleep ca 222|aquamarine puff antique drab beige|Manufacturer#3|Brand#33|LARGE ANODIZED BRASS|35|LG CASE|1122.22|ross the ironic, un 223|rose salmon yellow turquoise grey|Manufacturer#3|Brand#33|LARGE ANODIZED STEEL|29|MED BAG|1123.22|ons. even depos 224|drab lavender moccasin almond purple|Manufacturer#1|Brand#13|STANDARD POLISHED NICKEL|46|WRAP BOX|1124.22|y special depos 225|cornsilk powder bisque chartreuse spring|Manufacturer#4|Brand#45|ECONOMY BRUSHED TIN|23|MED CASE|1125.22|ts ca 226|blush navy indian peru lemon|Manufacturer#3|Brand#33|PROMO ANODIZED STEEL|7|SM PKG|1126.22|ular packages. some 227|dim yellow aquamarine lavender peach|Manufacturer#4|Brand#44|SMALL BRUSHED TIN|21|LG PACK|1127.22| silent r 228|saddle lawn blue burlywood white|Manufacturer#4|Brand#44|ECONOMY PLATED STEEL|12|SM DRUM|1128.22|f the fluffily 229|orchid misty cornsilk chartreuse medium|Manufacturer#1|Brand#15|SMALL POLISHED STEEL|19|MED CAN|1129.22|ng to the quick 230|thistle navy lawn sky slate|Manufacturer#5|Brand#51|STANDARD PLATED STEEL|20|SM PKG|1130.23|fter the ironic pin 231|bisque blush beige honeydew slate|Manufacturer#5|Brand#51|MEDIUM BURNISHED COPPER|17|MED PACK|1131.23|ffily. fur 232|ivory peru lavender orange dark|Manufacturer#5|Brand#53|LARGE BURNISHED NICKEL|50|SM PKG|1132.23|r, unusual requests 233|seashell tomato red lemon saddle|Manufacturer#3|Brand#34|MEDIUM ANODIZED BRASS|25|SM PACK|1133.23|ully ironic 234|chocolate slate maroon azure goldenrod|Manufacturer#1|Brand#13|MEDIUM ANODIZED NICKEL|26|WRAP BOX|1134.23| furiously special 235|sky mint aquamarine dark cornsilk|Manufacturer#3|Brand#32|ECONOMY BRUSHED COPPER|4|MED JAR|1135.23|s. carefully regular d 236|salmon antique burlywood linen peach|Manufacturer#5|Brand#55|ECONOMY ANODIZED STEEL|31|LG JAR|1136.23|ss packages hag 237|yellow orchid dark light smoke|Manufacturer#2|Brand#25|LARGE BURNISHED TIN|49|SM DRUM|1137.23|g the furiously 238|purple dark lawn navajo indian|Manufacturer#3|Brand#34|SMALL POLISHED TIN|35|LG CAN|1138.23|d, expres 239|medium olive pink dim firebrick|Manufacturer#2|Brand#24|LARGE PLATED NICKEL|36|WRAP DRUM|1139.23|lites are perman 240|rose beige magenta coral sandy|Manufacturer#3|Brand#32|MEDIUM BURNISHED NICKEL|1|LG CAN|1140.24|ructions. 241|purple drab puff peach tomato|Manufacturer#5|Brand#51|STANDARD BRUSHED COPPER|3|WRAP CASE|1141.24| quickly regular fo 242|spring green lemon medium olive|Manufacturer#3|Brand#35|SMALL POLISHED STEEL|42|LG BAG|1142.24|ously final theodo 243|bisque chiffon orange misty tan|Manufacturer#3|Brand#32|MEDIUM ANODIZED STEEL|49|SM BOX|1143.24|ress sentim 244|seashell ghost cyan burlywood thistle|Manufacturer#5|Brand#51|ECONOMY BURNISHED BRASS|48|LG BOX|1144.24|ns use above the ir 245|beige orchid chartreuse powder slate|Manufacturer#2|Brand#23|PROMO BURNISHED BRASS|39|JUMBO PKG|1145.24| instructions. ca 246|burlywood orchid dark drab dodger|Manufacturer#2|Brand#21|PROMO BRUSHED BRASS|18|SM CAN|1146.24|e caref 247|medium gainsboro lawn coral peach|Manufacturer#5|Brand#53|LARGE BURNISHED BRASS|4|JUMBO PACK|1147.24|r accounts. carefully 248|drab aquamarine red papaya pale|Manufacturer#1|Brand#15|SMALL PLATED BRASS|8|MED PACK|1148.24|furiously fluffily 249|hot sandy lavender saddle rosy|Manufacturer#4|Brand#44|ECONOMY BURNISHED BRASS|15|LG JAR|1149.24| excuses kindle f 250|antique goldenrod floral forest slate|Manufacturer#4|Brand#44|PROMO POLISHED NICKEL|35|WRAP CASE|1150.25|nstructions affix furi 251|dodger gainsboro violet steel papaya|Manufacturer#1|Brand#15|STANDARD BRUSHED NICKEL|41|WRAP CAN|1151.25|inal ideas aff 252|royal sienna magenta lace brown|Manufacturer#1|Brand#11|PROMO PLATED NICKEL|22|WRAP DRUM|1152.25|e slyly after the de 253|blanched ghost turquoise burlywood spring|Manufacturer#4|Brand#45|PROMO ANODIZED TIN|16|LG PKG|1153.25|he even, re 254|navy light red royal olive|Manufacturer#4|Brand#44|MEDIUM PLATED TIN|10|MED JAR|1154.25|ly at t 255|aquamarine seashell grey navy white|Manufacturer#2|Brand#21|LARGE ANODIZED NICKEL|24|JUMBO CAN|1155.25|fter the c 256|royal cream lawn purple powder|Manufacturer#3|Brand#32|LARGE BURNISHED TIN|2|SM DRUM|1156.25|bove the fur 257|blue gainsboro maroon green burnished|Manufacturer#4|Brand#41|ECONOMY POLISHED COPPER|11|SM JAR|1157.25|riously final foxes 258|royal frosted blue pale dim|Manufacturer#4|Brand#43|STANDARD ANODIZED COPPER|18|WRAP DRUM|1158.25|leep blith 259|ghost puff sky linen burnished|Manufacturer#2|Brand#23|SMALL BURNISHED BRASS|41|WRAP PACK|1159.25|l platelets. evenly 260|firebrick thistle lime frosted khaki|Manufacturer#5|Brand#55|STANDARD ANODIZED STEEL|10|LG BAG|1160.26| haggle f 261|floral moccasin puff rosy ghost|Manufacturer#4|Brand#41|STANDARD POLISHED TIN|44|JUMBO PACK|1161.26|ges was i 262|drab lavender lawn purple puff|Manufacturer#5|Brand#52|MEDIUM PLATED NICKEL|16|JUMBO BAG|1162.26|lar theodolites. f 263|linen red plum purple steel|Manufacturer#1|Brand#14|ECONOMY BURNISHED COPPER|46|MED CASE|1163.26|ly ironic the 264|plum floral sienna magenta ivory|Manufacturer#1|Brand#14|SMALL BURNISHED TIN|23|SM CAN|1164.26|ake. noto 265|aquamarine magenta seashell orange royal|Manufacturer#4|Brand#43|SMALL POLISHED STEEL|19|WRAP CASE|1165.26| slyly ac 266|medium brown tomato cornflower moccasin|Manufacturer#4|Brand#43|ECONOMY BRUSHED STEEL|25|LG DRUM|1166.26|odoli 267|pink salmon moccasin orange wheat|Manufacturer#2|Brand#24|LARGE POLISHED BRASS|41|JUMBO PACK|1167.26|ckly speci 268|cornsilk seashell lime aquamarine violet|Manufacturer#2|Brand#22|MEDIUM PLATED NICKEL|8|MED JAR|1168.26| carefully sil 269|chartreuse navajo moccasin orchid spring|Manufacturer#1|Brand#13|LARGE PLATED BRASS|5|WRAP BOX|1169.26|ully about the ca 270|mint deep white navajo floral|Manufacturer#2|Brand#25|PROMO BURNISHED COPPER|45|MED BAG|1170.27|ly even pinto beans. c 271|lace floral peru dim magenta|Manufacturer#5|Brand#52|PROMO PLATED COPPER|32|MED PKG|1171.27| fluffy acco 272|red purple tomato medium papaya|Manufacturer#5|Brand#52|LARGE POLISHED BRASS|39|LG PKG|1172.27| furiousl 273|pink white sky burnished coral|Manufacturer#2|Brand#25|STANDARD BRUSHED BRASS|50|LG BOX|1173.27|ackages along the 274|cyan lawn snow sky peru|Manufacturer#2|Brand#23|MEDIUM POLISHED STEEL|46|LG JAR|1174.27|hless accounts prin 275|steel chartreuse red plum sienna|Manufacturer#4|Brand#45|LARGE BURNISHED BRASS|37|SM BOX|1175.27|equests 276|orange yellow drab grey blanched|Manufacturer#2|Brand#25|SMALL BRUSHED COPPER|5|LG JAR|1176.27|iously. 277|grey navy rosy red burnished|Manufacturer#4|Brand#43|ECONOMY ANODIZED NICKEL|49|JUMBO JAR|1177.27|arefully special dep 278|blanched lime almond moccasin floral|Manufacturer#4|Brand#42|STANDARD BURNISHED NICKEL|49|SM JAR|1178.27| bold p 279|linen olive lawn blanched gainsboro|Manufacturer#3|Brand#35|SMALL ANODIZED BRASS|27|MED CASE|1179.27|eans boos 280|blue azure slate lace burlywood|Manufacturer#2|Brand#21|STANDARD BURNISHED STEEL|33|LG CAN|1180.28|egular, s 281|red mint brown powder rose|Manufacturer#1|Brand#12|SMALL PLATED STEEL|7|MED CAN|1181.28|regula 282|steel burnished cornsilk beige lace|Manufacturer#1|Brand#11|SMALL BRUSHED COPPER|44|WRAP CAN|1182.28|packages. final ex 283|metallic burlywood lavender midnight cornsilk|Manufacturer#1|Brand#12|SMALL PLATED TIN|8|SM DRUM|1183.28| above the i 284|burlywood chocolate almond ivory coral|Manufacturer#5|Brand#51|ECONOMY POLISHED COPPER|9|MED DRUM|1184.28|its haggle fu 285|cream seashell orange frosted brown|Manufacturer#2|Brand#25|LARGE ANODIZED TIN|42|SM PACK|1185.28| the furiously fi 286|chocolate cornsilk goldenrod violet puff|Manufacturer#5|Brand#53|MEDIUM POLISHED TIN|1|WRAP CASE|1186.28| foxes. regular, fi 287|misty snow thistle burlywood wheat|Manufacturer#1|Brand#15|SMALL PLATED NICKEL|27|SM DRUM|1187.28|y even foxes. final, 288|tan purple yellow ivory olive|Manufacturer#2|Brand#23|STANDARD BURNISHED BRASS|31|SM CASE|1188.28|dencies mold among the 289|peach antique deep peru saddle|Manufacturer#4|Brand#43|PROMO POLISHED TIN|40|LG CASE|1189.28|odolites sleep care 290|deep wheat forest powder firebrick|Manufacturer#1|Brand#12|LARGE BURNISHED BRASS|19|JUMBO JAR|1190.29|according to t 291|goldenrod aquamarine olive white pink|Manufacturer#5|Brand#54|ECONOMY PLATED BRASS|26|SM JAR|1191.29|kages alo 292|navajo tan coral slate thistle|Manufacturer#1|Brand#12|SMALL PLATED NICKEL|2|MED CAN|1192.29|zzle among t 293|lawn cyan plum bisque cream|Manufacturer#5|Brand#52|STANDARD POLISHED STEEL|24|SM PACK|1193.29|onic requests 294|midnight misty royal floral brown|Manufacturer#2|Brand#24|ECONOMY POLISHED TIN|35|SM JAR|1194.29|truct 295|honeydew lace powder almond red|Manufacturer#2|Brand#25|MEDIUM BURNISHED NICKEL|49|JUMBO JAR|1195.29|ully ironic 296|tan seashell chocolate lemon orange|Manufacturer#1|Brand#13|LARGE BURNISHED BRASS|28|SM PKG|1196.29|, regul 297|antique powder almond navajo lace|Manufacturer#2|Brand#22|LARGE BURNISHED STEEL|23|LG PACK|1197.29|e of the slyl 298|azure sky tomato midnight lace|Manufacturer#4|Brand#45|PROMO BRUSHED COPPER|13|JUMBO CAN|1198.29|al deposits wake 299|deep coral dodger green peach|Manufacturer#4|Brand#41|STANDARD ANODIZED COPPER|26|WRAP PKG|1199.29|regular dol 300|light goldenrod green lime papaya|Manufacturer#4|Brand#44|PROMO ANODIZED BRASS|1|WRAP PACK|1200.30|tructions int 301|misty snow lace burnished linen|Manufacturer#4|Brand#42|ECONOMY BRUSHED STEEL|6|WRAP CAN|1201.30|orges 302|black seashell sky grey snow|Manufacturer#4|Brand#41|PROMO POLISHED BRASS|34|SM BAG|1202.30| ideas. special asy 303|almond chocolate firebrick black bisque|Manufacturer#4|Brand#45|ECONOMY PLATED BRASS|7|JUMBO CASE|1203.30|yly according 304|forest mint sienna navy ghost|Manufacturer#4|Brand#43|SMALL POLISHED NICKEL|12|LG DRUM|1204.30|ly regu 305|moccasin tan chartreuse cornsilk drab|Manufacturer#1|Brand#15|SMALL ANODIZED TIN|6|SM BAG|1205.30|ake above 306|lemon peach spring rose ghost|Manufacturer#3|Brand#33|SMALL BURNISHED NICKEL|25|LG PKG|1206.30| regular req 307|peru beige firebrick royal navy|Manufacturer#2|Brand#21|LARGE BURNISHED BRASS|30|WRAP DRUM|1207.30|eas. e 308|olive ivory turquoise sienna wheat|Manufacturer#2|Brand#21|STANDARD BURNISHED BRASS|40|WRAP JAR|1208.30|ngside of the s 309|red smoke blue pale lace|Manufacturer#4|Brand#42|LARGE ANODIZED NICKEL|49|JUMBO PKG|1209.30|lly even c 310|sandy sky dark navy blanched|Manufacturer#1|Brand#14|ECONOMY BRUSHED NICKEL|32|MED JAR|1210.31|arefully. requ 311|indian orchid navajo dark forest|Manufacturer#4|Brand#44|MEDIUM PLATED TIN|18|LG DRUM|1211.31|final, 312|powder firebrick white peach goldenrod|Manufacturer#3|Brand#32|PROMO BURNISHED STEEL|29|MED CASE|1212.31|nts doze slyly. d 313|moccasin rosy lemon linen royal|Manufacturer#3|Brand#34|PROMO BRUSHED TIN|13|WRAP PACK|1213.31|nly regular 314|grey royal seashell violet misty|Manufacturer#3|Brand#35|STANDARD PLATED STEEL|14|LG DRUM|1214.31|ular deposits. 315|navy cyan orchid seashell moccasin|Manufacturer#2|Brand#23|LARGE ANODIZED BRASS|20|LG JAR|1215.31|s was slyly pac 316|almond rosy green hot midnight|Manufacturer#4|Brand#41|MEDIUM BRUSHED TIN|9|MED CASE|1216.31|deposits haggle 317|linen peru thistle turquoise medium|Manufacturer#1|Brand#15|LARGE POLISHED STEEL|34|JUMBO BOX|1217.31|e furiously regular pa 318|rosy rose indian puff pale|Manufacturer#1|Brand#12|PROMO BURNISHED NICKEL|20|JUMBO CASE|1218.31|ar re 319|royal metallic lawn spring firebrick|Manufacturer#3|Brand#31|MEDIUM BURNISHED TIN|44|LG BOX|1219.31|kages boost along 320|chiffon linen rose white rosy|Manufacturer#5|Brand#53|LARGE ANODIZED STEEL|26|WRAP DRUM|1220.32|structions a 321|tomato drab cream tan navajo|Manufacturer#5|Brand#51|ECONOMY ANODIZED STEEL|9|SM JAR|1221.32|tructions boost car 322|dark black rosy yellow ivory|Manufacturer#3|Brand#35|STANDARD PLATED STEEL|32|JUMBO CASE|1222.32|uses. blithely pendin 323|moccasin goldenrod tan maroon bisque|Manufacturer#4|Brand#41|MEDIUM BRUSHED BRASS|15|MED CASE|1223.32|ular pi 324|aquamarine ivory magenta linen black|Manufacturer#2|Brand#25|SMALL PLATED TIN|36|SM BAG|1224.32|ages. requests wake. 325|plum cyan almond dark orchid|Manufacturer#1|Brand#15|LARGE PLATED COPPER|22|MED PACK|1225.32|ackages. slyly pendin 326|sky papaya green azure chiffon|Manufacturer#3|Brand#34|LARGE ANODIZED STEEL|48|JUMBO CAN|1226.32|aggle slyly special f 327|metallic lavender green firebrick ghost|Manufacturer#2|Brand#23|PROMO POLISHED BRASS|14|SM PACK|1227.32|regular 328|turquoise bisque wheat cornflower forest|Manufacturer#3|Brand#34|PROMO POLISHED COPPER|24|JUMBO CASE|1228.32|ronic dependencies 329|mint dodger chocolate papaya azure|Manufacturer#3|Brand#31|MEDIUM ANODIZED NICKEL|21|LG PKG|1229.32|. quickly pending pa 330|royal lime misty frosted smoke|Manufacturer#3|Brand#35|LARGE PLATED COPPER|18|LG JAR|1230.33|efully pending depend 331|metallic ivory cornsilk mint hot|Manufacturer#2|Brand#25|SMALL BURNISHED COPPER|15|WRAP JAR|1231.33| the slyly iron 332|frosted blush firebrick linen salmon|Manufacturer#2|Brand#24|STANDARD BRUSHED BRASS|14|SM CAN|1232.33|instru 333|chocolate tomato light chartreuse orange|Manufacturer#2|Brand#21|ECONOMY POLISHED COPPER|1|LG DRUM|1233.33|accordi 334|lawn wheat dodger forest firebrick|Manufacturer#3|Brand#31|ECONOMY PLATED BRASS|32|LG JAR|1234.33|deas. blithely express 335|dark cream plum goldenrod smoke|Manufacturer#3|Brand#32|SMALL BRUSHED COPPER|3|SM JAR|1235.33|ly ironic deposits h 336|maroon medium green tomato rosy|Manufacturer#2|Brand#25|PROMO ANODIZED TIN|3|WRAP JAR|1236.33|nto beans. 337|magenta lavender blush peru aquamarine|Manufacturer#2|Brand#23|MEDIUM BRUSHED COPPER|29|MED DRUM|1237.33|uests. carefully fin 338|turquoise honeydew dim saddle grey|Manufacturer#1|Brand#14|LARGE ANODIZED NICKEL|26|JUMBO JAR|1238.33|of th 339|saddle white red royal midnight|Manufacturer#2|Brand#23|PROMO BURNISHED COPPER|14|LG PACK|1239.33|across the slyly expre 340|sienna purple lawn coral navy|Manufacturer#2|Brand#23|STANDARD BRUSHED TIN|3|JUMBO BOX|1240.34|l ideas wake. quic 341|pale smoke drab spring burnished|Manufacturer#4|Brand#45|PROMO POLISHED COPPER|34|MED PKG|1241.34|ully regular a 342|purple indian burnished khaki puff|Manufacturer#4|Brand#43|LARGE BURNISHED NICKEL|37|JUMBO CAN|1242.34|riously final pac 343|blush thistle chartreuse tomato navy|Manufacturer#3|Brand#35|PROMO ANODIZED NICKEL|7|SM JAR|1243.34|s-- ironic foxes boost 344|lime chiffon sky magenta midnight|Manufacturer#4|Brand#45|LARGE ANODIZED TIN|39|SM BOX|1244.34| according to the pend 345|cyan frosted spring orange puff|Manufacturer#4|Brand#45|ECONOMY BURNISHED NICKEL|22|LG CAN|1245.34|un carefull 346|medium honeydew blush lace lemon|Manufacturer#5|Brand#51|ECONOMY BRUSHED COPPER|8|LG PKG|1246.34|benea 347|deep antique royal bisque rosy|Manufacturer#3|Brand#34|SMALL ANODIZED TIN|28|WRAP BAG|1247.34|r, even instruct 348|blush navajo peru chartreuse dim|Manufacturer#5|Brand#53|MEDIUM POLISHED STEEL|16|WRAP BOX|1248.34| pending 349|ghost black hot salmon lace|Manufacturer#2|Brand#24|ECONOMY BURNISHED NICKEL|12|MED JAR|1249.34|tes. unusual acco 350|peach thistle royal papaya cornsilk|Manufacturer#3|Brand#32|STANDARD ANODIZED NICKEL|25|WRAP CAN|1250.35|luffily p 351|slate pale misty sienna red|Manufacturer#3|Brand#32|STANDARD PLATED TIN|48|WRAP CAN|1251.35|sts integrate. bl 352|sandy almond peach ivory violet|Manufacturer#4|Brand#42|MEDIUM BURNISHED BRASS|44|JUMBO CAN|1252.35|lites ne 353|azure linen grey blanched dim|Manufacturer#3|Brand#34|PROMO BURNISHED TIN|43|WRAP JAR|1253.35|final packages use 354|frosted deep powder beige lemon|Manufacturer#4|Brand#42|PROMO BURNISHED STEEL|38|LG CAN|1254.35|d requests wake 355|mint goldenrod sandy burnished olive|Manufacturer#5|Brand#52|ECONOMY POLISHED NICKEL|16|JUMBO JAR|1255.35| slyly bold theodoli 356|hot pink peach lemon chartreuse|Manufacturer#1|Brand#14|STANDARD BRUSHED NICKEL|46|WRAP BAG|1256.35|according to the furio 357|frosted rosy powder floral mint|Manufacturer#4|Brand#42|STANDARD PLATED COPPER|19|SM DRUM|1257.35|s use at 358|purple maroon lavender cornsilk salmon|Manufacturer#2|Brand#24|STANDARD POLISHED BRASS|39|JUMBO PACK|1258.35|g grouches use slyly 359|pink medium olive rosy linen|Manufacturer#3|Brand#35|PROMO PLATED BRASS|19|LG DRUM|1259.35|ithely 360|midnight papaya violet peach cream|Manufacturer#3|Brand#35|SMALL POLISHED COPPER|14|WRAP BOX|1260.36|. bold 361|indian dim tan antique puff|Manufacturer#5|Brand#52|SMALL ANODIZED COPPER|42|WRAP DRUM|1261.36| dolphins. carefully r 362|sienna green ghost rose lawn|Manufacturer#1|Brand#13|ECONOMY POLISHED COPPER|9|JUMBO BAG|1262.36|accounts. furi 363|rosy grey blush dark lime|Manufacturer#2|Brand#22|MEDIUM ANODIZED BRASS|16|WRAP JAR|1263.36|ideas. regular ac 364|puff almond blanched drab misty|Manufacturer#4|Brand#43|LARGE BRUSHED NICKEL|21|MED PKG|1264.36|ach caref 365|olive blue navy frosted violet|Manufacturer#3|Brand#31|SMALL POLISHED STEEL|8|LG CAN|1265.36|efully regula 366|seashell antique beige thistle cream|Manufacturer#5|Brand#53|ECONOMY BURNISHED STEEL|46|WRAP DRUM|1266.36|unts cajole 367|black lavender dark cornflower grey|Manufacturer#2|Brand#23|STANDARD PLATED BRASS|2|JUMBO PACK|1267.36|ly about the tithe 368|cornsilk blush bisque hot cyan|Manufacturer#5|Brand#52|ECONOMY BRUSHED TIN|8|SM PACK|1268.36|t the even, 369|drab plum olive firebrick beige|Manufacturer#2|Brand#21|SMALL POLISHED BRASS|11|SM BOX|1269.36|ilent request 370|almond linen pale aquamarine ghost|Manufacturer#5|Brand#52|LARGE BRUSHED COPPER|46|JUMBO BOX|1270.37|ost quickly against 371|maroon dark beige forest drab|Manufacturer#4|Brand#45|ECONOMY ANODIZED COPPER|27|MED BAG|1271.37|o beans. even inst 372|black frosted aquamarine maroon orange|Manufacturer#3|Brand#32|LARGE ANODIZED STEEL|35|LG CAN|1272.37| regular accounts 373|beige blanched coral cornsilk sky|Manufacturer#5|Brand#53|STANDARD PLATED TIN|37|JUMBO PACK|1273.37| after the fl 374|orange lemon cream dim midnight|Manufacturer#1|Brand#11|LARGE PLATED STEEL|18|MED BOX|1274.37| packages. furiou 375|floral blush lawn puff brown|Manufacturer#1|Brand#15|SMALL POLISHED BRASS|16|JUMBO JAR|1275.37| ironic courts aga 376|lace drab wheat red tan|Manufacturer#4|Brand#44|MEDIUM BRUSHED STEEL|44|SM PKG|1276.37|blithe 377|green forest rose slate cornflower|Manufacturer#4|Brand#44|STANDARD BURNISHED COPPER|16|WRAP BOX|1277.37|ly regula 378|burnished lime green turquoise red|Manufacturer#3|Brand#32|SMALL PLATED COPPER|45|JUMBO PACK|1278.37| slyly careful, ironic 379|cream sienna sandy thistle medium|Manufacturer#3|Brand#33|ECONOMY POLISHED STEEL|5|SM CASE|1279.37|l have to sl 380|khaki powder royal maroon snow|Manufacturer#4|Brand#41|LARGE BRUSHED TIN|6|JUMBO DRUM|1280.38|tions. blithely ev 381|sienna peach wheat linen tomato|Manufacturer#5|Brand#53|ECONOMY ANODIZED COPPER|37|LG CASE|1281.38|ons. furiousl 382|honeydew ghost lace plum sky|Manufacturer#4|Brand#41|LARGE POLISHED BRASS|4|SM CAN|1282.38|blithely 383|seashell tan purple almond coral|Manufacturer#2|Brand#23|SMALL BRUSHED COPPER|45|SM PACK|1283.38|telet 384|papaya thistle rosy rose puff|Manufacturer#4|Brand#43|STANDARD BURNISHED TIN|2|WRAP CAN|1284.38|blithel 385|snow chocolate ivory lemon magenta|Manufacturer#1|Brand#12|STANDARD PLATED TIN|41|WRAP JAR|1285.38|nts wake 386|red blanched thistle bisque wheat|Manufacturer#1|Brand#13|STANDARD PLATED STEEL|4|SM BOX|1286.38|ests. final a 387|smoke white metallic sky firebrick|Manufacturer#3|Brand#34|LARGE BRUSHED COPPER|23|WRAP CASE|1287.38|gular w 388|lawn puff grey forest indian|Manufacturer#1|Brand#11|STANDARD POLISHED STEEL|20|LG PKG|1288.38|nt pinto beans after t 389|magenta ivory lawn antique turquoise|Manufacturer#1|Brand#11|PROMO BRUSHED TIN|16|MED PKG|1289.38|cial pinto beans amo 390|maroon magenta saddle linen lime|Manufacturer#5|Brand#54|SMALL ANODIZED COPPER|24|MED DRUM|1290.39|unts integrate 391|seashell cornsilk metallic royal sky|Manufacturer#4|Brand#41|SMALL PLATED STEEL|18|LG PACK|1291.39|ickly ironic f 392|medium plum powder blush yellow|Manufacturer#4|Brand#42|MEDIUM BRUSHED COPPER|21|SM BOX|1292.39|even request 393|blue coral gainsboro slate lawn|Manufacturer#3|Brand#34|STANDARD PLATED NICKEL|36|JUMBO CAN|1293.39|gular pi 394|chocolate royal cornflower papaya yellow|Manufacturer#1|Brand#11|MEDIUM POLISHED STEEL|33|MED JAR|1294.39|ely even excuses. 395|drab steel royal medium chartreuse|Manufacturer#3|Brand#32|MEDIUM BURNISHED BRASS|9|LG BOX|1295.39|sits haggle fluffily 396|medium seashell lavender almond tan|Manufacturer#3|Brand#35|SMALL BURNISHED COPPER|37|SM CASE|1296.39|ctions. silent packag 397|purple spring turquoise goldenrod grey|Manufacturer#1|Brand#15|LARGE ANODIZED COPPER|3|JUMBO JAR|1297.39|ly ironic pains. f 398|red salmon sandy white hot|Manufacturer#3|Brand#32|MEDIUM BRUSHED NICKEL|32|SM DRUM|1298.39|ickly sp 399|orange peru yellow blanched seashell|Manufacturer#2|Brand#21|LARGE BRUSHED BRASS|37|WRAP BAG|1299.39|nts sleep dog 400|rose blue smoke olive sandy|Manufacturer#2|Brand#21|ECONOMY POLISHED BRASS|32|SM BOX|1300.40| nag. quick 401|almond chiffon indian green dim|Manufacturer#2|Brand#21|ECONOMY POLISHED STEEL|10|SM PKG|1301.40|ideas cajole 402|black rosy ghost yellow ivory|Manufacturer#4|Brand#45|LARGE ANODIZED BRASS|16|SM PACK|1302.40|s above t 403|indian lemon linen almond tomato|Manufacturer#4|Brand#45|MEDIUM BURNISHED STEEL|19|JUMBO DRUM|1303.40|ic requ 404|maroon olive pink rose cornflower|Manufacturer#4|Brand#41|STANDARD ANODIZED NICKEL|9|SM CASE|1304.40|ckages integrate blit 405|ivory yellow peru almond cornsilk|Manufacturer#3|Brand#34|STANDARD BRUSHED BRASS|17|WRAP JAR|1305.40|e to maintain careful 406|thistle saddle white puff lemon|Manufacturer#2|Brand#21|STANDARD PLATED STEEL|43|JUMBO JAR|1306.40|ake foxes- 407|tomato turquoise dim powder ivory|Manufacturer#5|Brand#53|LARGE BRUSHED NICKEL|39|SM JAR|1307.40|quickl 408|indian metallic peach gainsboro pale|Manufacturer#2|Brand#24|PROMO POLISHED STEEL|4|MED PKG|1308.40|refully b 409|smoke forest metallic saddle hot|Manufacturer#4|Brand#44|SMALL PLATED NICKEL|40|MED CASE|1309.40|usly above 410|tomato sienna hot chartreuse dodger|Manufacturer#2|Brand#25|STANDARD PLATED NICKEL|36|JUMBO BOX|1310.41| ironic pa 411|lemon dark khaki antique slate|Manufacturer#1|Brand#12|ECONOMY BURNISHED STEEL|17|JUMBO DRUM|1311.41|o beans a 412|slate forest spring pink peru|Manufacturer#1|Brand#14|SMALL ANODIZED STEEL|33|WRAP PKG|1312.41|ccording to the fur 413|chiffon ivory lawn metallic beige|Manufacturer#3|Brand#33|PROMO PLATED STEEL|49|MED PKG|1313.41|r deposits wake. alw 414|pink brown purple puff snow|Manufacturer#4|Brand#41|SMALL BURNISHED STEEL|50|WRAP CASE|1314.41|efully. dolph 415|dark chocolate wheat ivory orchid|Manufacturer#2|Brand#22|MEDIUM ANODIZED COPPER|42|SM DRUM|1315.41|blithely blit 416|ghost misty chocolate peach green|Manufacturer#1|Brand#13|MEDIUM POLISHED STEEL|11|WRAP JAR|1316.41|yly pending deposit 417|gainsboro sky turquoise ghost mint|Manufacturer#1|Brand#14|MEDIUM PLATED TIN|31|LG BAG|1317.41| nag blithely: ex 418|lavender forest puff beige tan|Manufacturer#1|Brand#11|MEDIUM ANODIZED NICKEL|32|JUMBO JAR|1318.41| ironic deposits. requ 419|moccasin navy seashell turquoise sienna|Manufacturer#4|Brand#43|ECONOMY BRUSHED TIN|42|WRAP BAG|1319.41|boost slyly among t 420|chocolate cornsilk midnight beige floral|Manufacturer#2|Brand#23|PROMO BRUSHED TIN|15|LG JAR|1320.42|y. fluffi 421|white black burnished brown medium|Manufacturer#1|Brand#12|LARGE PLATED STEEL|36|WRAP PACK|1321.42|regular 422|honeydew smoke violet pink bisque|Manufacturer#3|Brand#33|STANDARD POLISHED TIN|16|LG CAN|1322.42| bold 423|bisque misty dark hot blush|Manufacturer#3|Brand#31|PROMO BRUSHED BRASS|2|SM JAR|1323.42|accou 424|dim almond turquoise beige royal|Manufacturer#3|Brand#34|SMALL BRUSHED TIN|9|LG CASE|1324.42|usly slyl 425|ivory wheat chiffon navy burlywood|Manufacturer#3|Brand#32|PROMO BRUSHED STEEL|23|JUMBO BOX|1325.42|ndenci 426|maroon linen sienna firebrick burnished|Manufacturer#1|Brand#14|STANDARD ANODIZED NICKEL|42|WRAP PACK|1326.42|furiously even pa 427|honeydew azure brown royal sky|Manufacturer#2|Brand#22|PROMO BURNISHED NICKEL|47|JUMBO CAN|1327.42|ts. express, unusual 428|beige spring floral wheat drab|Manufacturer#5|Brand#54|LARGE BURNISHED TIN|14|JUMBO CASE|1328.42|furiously ir 429|red firebrick smoke magenta moccasin|Manufacturer#4|Brand#45|ECONOMY BRUSHED COPPER|21|MED DRUM|1329.42| carefully even 430|snow mint deep brown goldenrod|Manufacturer#3|Brand#34|PROMO BURNISHED NICKEL|34|MED CAN|1330.43| sleep. slyly pend 431|grey smoke navy peach white|Manufacturer#4|Brand#45|MEDIUM POLISHED TIN|30|SM PACK|1331.43|iousl 432|white cornflower honeydew red ivory|Manufacturer#2|Brand#21|MEDIUM PLATED BRASS|6|MED JAR|1332.43|s nag furiously fluff 433|olive moccasin almond tomato plum|Manufacturer#1|Brand#12|STANDARD BURNISHED TIN|2|SM PACK|1333.43|ts haggle furiously 434|turquoise coral violet dim midnight|Manufacturer#5|Brand#51|ECONOMY BURNISHED COPPER|20|JUMBO BAG|1334.43|y against the c 435|coral misty brown grey hot|Manufacturer#3|Brand#35|ECONOMY ANODIZED BRASS|17|WRAP BAG|1335.43|carefully ironic pack 436|turquoise yellow dim purple antique|Manufacturer#1|Brand#14|LARGE POLISHED BRASS|50|WRAP CASE|1336.43| the regul 437|purple slate gainsboro powder magenta|Manufacturer#5|Brand#54|ECONOMY PLATED TIN|17|WRAP PACK|1337.43|e furiously 438|antique orchid cornflower puff almond|Manufacturer#1|Brand#15|LARGE BURNISHED TIN|31|LG DRUM|1338.43|epitaphs wake 439|bisque cyan rosy ivory blanched|Manufacturer#2|Brand#21|MEDIUM BURNISHED TIN|32|SM PKG|1339.43|uests wake slyly over 440|khaki turquoise violet maroon misty|Manufacturer#3|Brand#34|ECONOMY BURNISHED BRASS|22|MED CASE|1340.44|furiously above the 441|goldenrod royal slate grey burlywood|Manufacturer#3|Brand#35|PROMO BURNISHED STEEL|24|MED BOX|1341.44|s wake atop the ru 442|honeydew cornsilk powder salmon purple|Manufacturer#4|Brand#41|LARGE BURNISHED TIN|22|MED PACK|1342.44|refully al 443|chocolate burnished orchid mint royal|Manufacturer#4|Brand#44|STANDARD BURNISHED BRASS|10|SM BOX|1343.44|arefully. sl 444|ghost blanched forest khaki rose|Manufacturer#3|Brand#32|MEDIUM ANODIZED COPPER|42|LG JAR|1344.44|ickly special excuse 445|metallic ivory floral cornsilk burnished|Manufacturer#1|Brand#12|LARGE BURNISHED NICKEL|47|SM DRUM|1345.44|s about the re 446|sandy brown midnight tan bisque|Manufacturer#1|Brand#12|STANDARD BURNISHED NICKEL|11|WRAP CASE|1346.44|s sentiments affix 447|forest smoke aquamarine cyan dim|Manufacturer#5|Brand#53|STANDARD BURNISHED COPPER|22|LG JAR|1347.44|s theodolites above th 448|wheat tomato cyan lemon maroon|Manufacturer#1|Brand#13|PROMO POLISHED BRASS|31|LG CAN|1348.44| excuses affix silen 449|almond thistle cornsilk bisque blush|Manufacturer#2|Brand#25|MEDIUM BRUSHED BRASS|47|JUMBO BAG|1349.44|ular, unusual p 450|sky lace green pale lime|Manufacturer#4|Brand#43|MEDIUM BURNISHED TIN|23|SM CASE|1350.45|slyly 451|rosy ghost sky mint smoke|Manufacturer#4|Brand#42|PROMO PLATED TIN|28|WRAP DRUM|1351.45| detect blithely bl 452|saddle sandy cream sienna blue|Manufacturer#1|Brand#11|STANDARD ANODIZED STEEL|48|JUMBO BOX|1352.45|ic requests wake 453|lemon navy seashell khaki sienna|Manufacturer#3|Brand#34|ECONOMY ANODIZED TIN|20|LG CASE|1353.45|y above the pen 454|royal lime saddle magenta violet|Manufacturer#5|Brand#53|MEDIUM BURNISHED STEEL|32|LG BOX|1354.45|ual requests. 455|aquamarine blanched misty moccasin sky|Manufacturer#2|Brand#22|LARGE BRUSHED BRASS|18|MED JAR|1355.45|ongside of the careful 456|antique maroon olive turquoise lace|Manufacturer#5|Brand#53|PROMO POLISHED BRASS|40|SM PKG|1356.45|usual, 457|navy linen forest moccasin slate|Manufacturer#5|Brand#52|SMALL POLISHED TIN|46|SM CAN|1357.45|express accounts. sp 458|beige papaya grey ivory white|Manufacturer#2|Brand#22|PROMO BRUSHED COPPER|38|MED BAG|1358.45|structions sleep 459|beige slate magenta dim salmon|Manufacturer#2|Brand#23|MEDIUM ANODIZED NICKEL|27|SM PACK|1359.45| blithely. fur 460|maroon papaya orange spring light|Manufacturer#3|Brand#35|STANDARD ANODIZED BRASS|47|JUMBO DRUM|1360.46|he carefully 461|goldenrod dim wheat tan violet|Manufacturer#5|Brand#54|SMALL BRUSHED TIN|44|JUMBO CASE|1361.46|: slyly express pa 462|lime yellow sandy floral khaki|Manufacturer#5|Brand#54|MEDIUM BURNISHED BRASS|11|WRAP CAN|1362.46|s sleep quickly care 463|steel cream grey mint dark|Manufacturer#2|Brand#21|MEDIUM ANODIZED COPPER|48|MED PACK|1363.46|unts use fluffily 464|azure magenta lace smoke ghost|Manufacturer#4|Brand#42|LARGE BURNISHED NICKEL|34|LG BOX|1364.46|l decoys. close exc 465|smoke sky bisque ghost black|Manufacturer#4|Brand#42|SMALL POLISHED BRASS|12|MED JAR|1365.46|nts. slyly special ac 466|deep ghost cornflower ivory burnished|Manufacturer#3|Brand#33|PROMO POLISHED BRASS|12|LG PKG|1366.46|ly special pains. iron 467|cornflower lime midnight plum forest|Manufacturer#2|Brand#24|STANDARD BURNISHED STEEL|48|JUMBO PACK|1367.46|ependenc 468|spring grey azure khaki lace|Manufacturer#4|Brand#41|MEDIUM BURNISHED STEEL|24|SM PACK|1368.46|o the 469|burlywood dark steel sky blue|Manufacturer#1|Brand#13|MEDIUM POLISHED STEEL|43|WRAP PKG|1369.46|osits. slyly unusua 470|honeydew lime khaki saddle indian|Manufacturer#2|Brand#24|STANDARD BRUSHED TIN|38|JUMBO CASE|1370.47|ess deposits 471|goldenrod honeydew frosted almond chiffon|Manufacturer#1|Brand#11|LARGE BRUSHED STEEL|21|JUMBO BAG|1371.47|sits nag caref 472|dim misty lime purple cornsilk|Manufacturer#3|Brand#31|PROMO BRUSHED COPPER|24|SM JAR|1372.47| according 473|moccasin antique lace peach chiffon|Manufacturer#1|Brand#12|ECONOMY PLATED STEEL|2|MED DRUM|1373.47|e furious 474|papaya green royal burlywood saddle|Manufacturer#1|Brand#14|ECONOMY PLATED STEEL|45|LG PACK|1374.47|ously even reques 475|coral peru forest thistle khaki|Manufacturer#3|Brand#34|STANDARD ANODIZED NICKEL|30|MED PACK|1375.47|thely unusual th 476|cornflower deep turquoise slate sandy|Manufacturer#3|Brand#33|STANDARD POLISHED COPPER|33|WRAP PACK|1376.47| according to the blit 477|plum peru spring firebrick lavender|Manufacturer#1|Brand#11|ECONOMY ANODIZED COPPER|35|JUMBO BAG|1377.47|lent s 478|lemon snow coral lime seashell|Manufacturer#4|Brand#45|STANDARD POLISHED COPPER|11|MED CASE|1378.47|ly bold foxes ca 479|snow frosted slate magenta sky|Manufacturer#2|Brand#25|MEDIUM PLATED STEEL|35|SM BAG|1379.47| pending ac 480|saddle light black drab navajo|Manufacturer#5|Brand#54|MEDIUM ANODIZED STEEL|31|WRAP BOX|1380.48|eans. quickly 481|slate red firebrick beige aquamarine|Manufacturer#1|Brand#14|MEDIUM BURNISHED BRASS|17|JUMBO PACK|1381.48|s cajole som 482|burnished spring azure green chocolate|Manufacturer#5|Brand#53|MEDIUM POLISHED STEEL|11|SM DRUM|1382.48|deposits. fluffily b 483|deep white khaki floral cornflower|Manufacturer#1|Brand#15|MEDIUM ANODIZED COPPER|9|SM PKG|1383.48|usly final instru 484|bisque rosy olive peach chiffon|Manufacturer#1|Brand#15|MEDIUM POLISHED BRASS|34|SM CASE|1384.48|uffily final deposits. 485|orchid dark antique deep ivory|Manufacturer#3|Brand#31|PROMO PLATED STEEL|44|LG JAR|1385.48| requests. regularly r 486|tomato lime moccasin turquoise grey|Manufacturer#4|Brand#45|SMALL PLATED COPPER|15|MED PKG|1386.48|ent accounts among t 487|firebrick saddle chocolate peru ghost|Manufacturer#5|Brand#52|SMALL BRUSHED COPPER|43|WRAP PACK|1387.48|iously ruthles 488|forest puff drab beige blue|Manufacturer#2|Brand#21|PROMO PLATED BRASS|47|JUMBO JAR|1388.48| the slyly pending r 489|smoke green blush deep royal|Manufacturer#2|Brand#22|LARGE BURNISHED TIN|36|LG CAN|1389.48|unts. quickly bold id 490|yellow mint saddle dodger orchid|Manufacturer#4|Brand#41|SMALL PLATED NICKEL|11|MED BOX|1390.49|fluffily express packa 491|snow mint rose almond frosted|Manufacturer#3|Brand#31|SMALL POLISHED BRASS|42|MED JAR|1391.49| deposit 492|turquoise rose navajo deep chiffon|Manufacturer#4|Brand#42|MEDIUM BRUSHED COPPER|13|JUMBO DRUM|1392.49|yly slyly express th 493|blue gainsboro sky burnished puff|Manufacturer#4|Brand#45|MEDIUM BURNISHED BRASS|8|MED PKG|1393.49|uses. bold 494|navy pale frosted lawn spring|Manufacturer#4|Brand#42|STANDARD BRUSHED NICKEL|40|MED PKG|1394.49|y pending theodo 495|lemon burlywood chartreuse forest honeydew|Manufacturer#4|Brand#42|SMALL BRUSHED BRASS|28|MED PKG|1395.49|, ironic pac 496|orchid bisque antique ivory lavender|Manufacturer#1|Brand#12|STANDARD BRUSHED BRASS|11|SM BAG|1396.49|ealthily 497|hot powder dim cream metallic|Manufacturer#2|Brand#25|PROMO PLATED TIN|1|SM CAN|1397.49|l accounts sl 498|sandy sky gainsboro peach cornflower|Manufacturer#2|Brand#25|MEDIUM POLISHED STEEL|21|WRAP CASE|1398.49|lthily even co 499|lemon pale blue burnished white|Manufacturer#3|Brand#35|SMALL PLATED COPPER|38|JUMBO PKG|1399.49|furiously across 500|hot medium spring orange violet|Manufacturer#4|Brand#41|MEDIUM POLISHED STEEL|13|SM JAR|1400.50|regul 501|tomato navy midnight cyan chiffon|Manufacturer#4|Brand#45|SMALL BURNISHED COPPER|37|SM BAG|1401.50|press pinto b 502|maroon dark plum brown red|Manufacturer#2|Brand#21|SMALL PLATED TIN|6|JUMBO DRUM|1402.50|quests integrate 503|lavender cyan red lace light|Manufacturer#2|Brand#24|PROMO BRUSHED NICKEL|30|SM PKG|1403.50| the dep 504|pink tan papaya ivory lace|Manufacturer#2|Brand#24|PROMO BRUSHED STEEL|10|SM JAR|1404.50|refully after the fu 505|pink cornsilk antique mint forest|Manufacturer#4|Brand#42|STANDARD PLATED BRASS|32|WRAP JAR|1405.50|ly even 506|beige floral goldenrod lace almond|Manufacturer#1|Brand#14|PROMO BRUSHED NICKEL|30|WRAP PACK|1406.50|y ruthless, fi 507|red misty salmon ivory khaki|Manufacturer#4|Brand#43|SMALL ANODIZED BRASS|20|SM CASE|1407.50|ial foxes. furiously 508|pale peach maroon burlywood tan|Manufacturer#1|Brand#14|LARGE ANODIZED TIN|40|WRAP PKG|1408.50|ect according 509|tomato cornsilk chartreuse blue magenta|Manufacturer#1|Brand#13|MEDIUM BRUSHED NICKEL|30|MED CAN|1409.50|eposits. slyly 510|blush thistle orchid red lace|Manufacturer#4|Brand#43|SMALL PLATED NICKEL|25|JUMBO PACK|1410.51|al platelets. iro 511|red pale plum orchid moccasin|Manufacturer#1|Brand#15|STANDARD BRUSHED COPPER|2|LG JAR|1411.51|ss package 512|firebrick azure mint chocolate wheat|Manufacturer#5|Brand#51|PROMO ANODIZED COPPER|18|LG PACK|1412.51|dolphins cajo 513|mint thistle chiffon magenta salmon|Manufacturer#3|Brand#32|MEDIUM BRUSHED STEEL|10|WRAP CAN|1413.51| after th 514|rosy sienna purple light rose|Manufacturer#4|Brand#42|LARGE POLISHED BRASS|10|JUMBO BOX|1414.51|omise 515|cream navajo drab dark peru|Manufacturer#5|Brand#53|MEDIUM PLATED TIN|37|WRAP BOX|1415.51|ular asymptotes im 516|purple linen lace blanched snow|Manufacturer#1|Brand#12|SMALL BRUSHED NICKEL|28|LG CAN|1416.51|ly express 517|aquamarine rosy violet moccasin snow|Manufacturer#3|Brand#34|SMALL BURNISHED COPPER|30|WRAP CAN|1417.51|uses. 518|yellow tomato lawn rosy lemon|Manufacturer#4|Brand#43|PROMO BRUSHED BRASS|33|JUMBO DRUM|1418.51|n requests hag 519|violet medium dark red smoke|Manufacturer#3|Brand#32|LARGE BRUSHED BRASS|24|WRAP PACK|1419.51|xes are fluffily fluf 520|spring honeydew sky sandy almond|Manufacturer#1|Brand#12|PROMO BURNISHED BRASS|49|MED CASE|1420.52| foxes. 521|grey drab honeydew coral pale|Manufacturer#3|Brand#35|PROMO BRUSHED STEEL|21|JUMBO DRUM|1421.52|the blithe 522|frosted lawn violet turquoise coral|Manufacturer#3|Brand#32|STANDARD BRUSHED NICKEL|15|MED CASE|1422.52|deas. brave ac 523|light goldenrod honeydew indian lemon|Manufacturer#4|Brand#42|ECONOMY BRUSHED NICKEL|7|JUMBO JAR|1423.52|e blithe 524|burnished hot drab midnight tan|Manufacturer#1|Brand#14|SMALL ANODIZED STEEL|17|JUMBO PACK|1424.52|ly reg 525|plum midnight coral snow lemon|Manufacturer#3|Brand#33|STANDARD BURNISHED STEEL|47|JUMBO CAN|1425.52|ly except 526|dodger moccasin wheat puff lace|Manufacturer#5|Brand#53|PROMO ANODIZED BRASS|47|JUMBO PKG|1426.52|ronic, iro 527|moccasin light purple sky magenta|Manufacturer#3|Brand#32|MEDIUM POLISHED NICKEL|39|LG JAR|1427.52|counts wake abov 528|dim deep puff pink floral|Manufacturer#2|Brand#24|STANDARD PLATED STEEL|40|LG CAN|1428.52|ly ironic pl 529|powder tan brown royal blush|Manufacturer#3|Brand#31|MEDIUM PLATED NICKEL|33|SM PKG|1429.52|y along the 530|mint grey chiffon magenta saddle|Manufacturer#4|Brand#42|PROMO PLATED STEEL|45|JUMBO CASE|1430.53|kages about t 531|navajo hot forest puff olive|Manufacturer#2|Brand#22|ECONOMY POLISHED NICKEL|16|WRAP DRUM|1431.53| atta 532|spring wheat purple chiffon puff|Manufacturer#1|Brand#14|LARGE BRUSHED NICKEL|45|WRAP BAG|1432.53|s. blithely 533|cream lemon goldenrod rosy aquamarine|Manufacturer#2|Brand#24|PROMO PLATED STEEL|38|JUMBO PACK|1433.53|ng the blithely final 534|bisque saddle hot steel frosted|Manufacturer#5|Brand#53|STANDARD PLATED NICKEL|27|LG CASE|1434.53|rts poach over th 535|mint chocolate tomato peach sky|Manufacturer#5|Brand#53|STANDARD PLATED TIN|17|SM CASE|1435.53|he unusual idea 536|orchid snow thistle peru moccasin|Manufacturer#2|Brand#23|STANDARD PLATED STEEL|36|JUMBO BAG|1436.53|le slyl 537|hot orange red cornsilk goldenrod|Manufacturer#1|Brand#15|ECONOMY BRUSHED TIN|26|WRAP BOX|1437.53|fully regular platelet 538|rose black peach orchid cyan|Manufacturer#1|Brand#15|ECONOMY POLISHED STEEL|24|SM PACK|1438.53|ic epitap 539|dodger midnight salmon drab saddle|Manufacturer#3|Brand#34|MEDIUM BURNISHED TIN|4|SM DRUM|1439.53| deposits. fi 540|violet white steel chocolate burlywood|Manufacturer#1|Brand#14|MEDIUM PLATED NICKEL|7|LG PACK|1440.54|cial, pending account 541|magenta tan antique sky pale|Manufacturer#3|Brand#35|STANDARD ANODIZED NICKEL|34|LG JAR|1441.54|s are fluffily above t 542|light lace gainsboro coral lavender|Manufacturer#1|Brand#11|SMALL BRUSHED STEEL|40|WRAP CASE|1442.54|sits bo 543|sky powder saddle metallic moccasin|Manufacturer#5|Brand#54|ECONOMY POLISHED BRASS|4|JUMBO BOX|1443.54| cajole 544|peach red azure indian lavender|Manufacturer#2|Brand#22|STANDARD BURNISHED NICKEL|42|SM CASE|1444.54|leep slyl 545|purple peru ivory gainsboro peach|Manufacturer#3|Brand#31|LARGE PLATED NICKEL|14|MED JAR|1445.54|arefully regular packa 546|metallic grey black rosy papaya|Manufacturer#2|Brand#25|ECONOMY ANODIZED STEEL|8|LG CASE|1446.54|ly unusu 547|white turquoise olive cornflower pale|Manufacturer#4|Brand#43|MEDIUM BURNISHED COPPER|48|WRAP DRUM|1447.54|ial theodolites among 548|cyan lime lawn misty mint|Manufacturer#3|Brand#33|ECONOMY ANODIZED TIN|28|SM CAN|1448.54|gular 549|thistle green indian sandy purple|Manufacturer#5|Brand#53|SMALL BRUSHED BRASS|36|SM CAN|1449.54|ly unus 550|brown blush dark white mint|Manufacturer#4|Brand#44|STANDARD BRUSHED STEEL|27|SM JAR|1450.55|arefully iron 551|tan misty floral firebrick puff|Manufacturer#5|Brand#54|LARGE BURNISHED TIN|10|MED JAR|1451.55|dolites cajole furious 552|lime violet puff snow chiffon|Manufacturer#4|Brand#44|LARGE POLISHED BRASS|1|JUMBO CAN|1452.55|longside of the sl 553|gainsboro tomato lawn peru bisque|Manufacturer#2|Brand#23|SMALL POLISHED NICKEL|1|WRAP DRUM|1453.55|fily regular depen 554|blue medium royal blush ghost|Manufacturer#5|Brand#53|STANDARD BRUSHED STEEL|31|LG JAR|1454.55|ts? ironically 555|midnight honeydew brown beige slate|Manufacturer#4|Brand#42|PROMO BURNISHED TIN|49|SM JAR|1455.55|es hag 556|frosted antique aquamarine seashell powder|Manufacturer#2|Brand#23|SMALL BRUSHED BRASS|49|WRAP JAR|1456.55|ar theodolites ma 557|firebrick ghost steel green chartreuse|Manufacturer#3|Brand#35|LARGE BURNISHED TIN|8|JUMBO PKG|1457.55|ccounts im 558|blanched seashell navy black dim|Manufacturer#3|Brand#31|STANDARD PLATED BRASS|25|MED BOX|1458.55|eposits. slyly 559|wheat coral navajo cornflower dark|Manufacturer#3|Brand#31|LARGE PLATED STEEL|27|SM PKG|1459.55|stealthily express 560|tan indian lime olive dodger|Manufacturer#2|Brand#25|STANDARD BRUSHED COPPER|37|WRAP PKG|1460.56|e special pac 561|firebrick slate grey coral goldenrod|Manufacturer#1|Brand#14|SMALL POLISHED STEEL|44|JUMBO CASE|1461.56|-- ironic, iron 562|tan blue thistle forest cornsilk|Manufacturer#3|Brand#32|SMALL BURNISHED STEEL|16|MED PACK|1462.56| fluffi 563|blanched light indian goldenrod dim|Manufacturer#3|Brand#31|MEDIUM POLISHED STEEL|39|MED DRUM|1463.56|furiously bold, 564|drab bisque puff ghost floral|Manufacturer#2|Brand#22|LARGE ANODIZED BRASS|41|SM PKG|1464.56|blithely after the ev 565|coral mint sky rosy red|Manufacturer#1|Brand#14|PROMO BRUSHED STEEL|9|JUMBO JAR|1465.56| blithely unusual r 566|seashell dark yellow turquoise white|Manufacturer#2|Brand#25|ECONOMY BURNISHED TIN|19|LG PKG|1466.56|ounts wake across t 567|cornsilk lace white medium honeydew|Manufacturer#5|Brand#55|STANDARD BRUSHED STEEL|19|JUMBO BAG|1467.56|pending pinto 568|purple burlywood pale cornsilk chartreuse|Manufacturer#3|Brand#31|PROMO POLISHED STEEL|6|SM BOX|1468.56|ts. ideas along t 569|cornflower burlywood blue spring purple|Manufacturer#4|Brand#41|SMALL BRUSHED TIN|46|MED BOX|1469.56|y according to 570|cyan navajo turquoise forest firebrick|Manufacturer#5|Brand#51|ECONOMY BRUSHED COPPER|44|SM PKG|1470.57|le. s 571|medium orchid purple metallic ghost|Manufacturer#2|Brand#22|LARGE PLATED TIN|1|WRAP CAN|1471.57|ly ironic 572|navy plum lime goldenrod saddle|Manufacturer#4|Brand#43|LARGE BURNISHED COPPER|3|SM BOX|1472.57|requests use 573|dark midnight forest lace sandy|Manufacturer#2|Brand#23|SMALL PLATED TIN|42|WRAP PACK|1473.57|expres 574|bisque snow turquoise lace blush|Manufacturer#2|Brand#23|SMALL PLATED BRASS|36|SM PACK|1474.57|oss th 575|brown seashell hot lavender frosted|Manufacturer#1|Brand#14|SMALL BRUSHED NICKEL|10|MED CAN|1475.57|ns integrate a 576|puff lavender black yellow lawn|Manufacturer#3|Brand#32|PROMO BURNISHED TIN|27|SM CAN|1476.57| foxes. accounts bo 577|lime green dark misty chiffon|Manufacturer#2|Brand#22|LARGE BRUSHED STEEL|34|JUMBO BAG|1477.57|ding, fur 578|black lawn light plum blush|Manufacturer#4|Brand#41|MEDIUM ANODIZED NICKEL|9|WRAP DRUM|1478.57| furiously iron 579|spring brown peach bisque sienna|Manufacturer#3|Brand#31|STANDARD POLISHED STEEL|8|WRAP JAR|1479.57|ades. carefully 580|navajo chocolate lemon hot green|Manufacturer#1|Brand#12|LARGE BURNISHED COPPER|30|JUMBO PACK|1480.58|usly expre 581|frosted gainsboro chiffon lawn green|Manufacturer#2|Brand#25|ECONOMY ANODIZED BRASS|44|SM BAG|1481.58| furious foxes. silen 582|tan cyan peach rose salmon|Manufacturer#4|Brand#45|LARGE BRUSHED TIN|23|MED JAR|1482.58|y bold 583|turquoise burnished orchid firebrick blush|Manufacturer#3|Brand#33|LARGE BURNISHED NICKEL|35|JUMBO CAN|1483.58| sleep 584|goldenrod magenta papaya medium chiffon|Manufacturer#1|Brand#12|SMALL POLISHED COPPER|42|MED JAR|1484.58|lar, ironic ac 585|slate hot honeydew midnight burnished|Manufacturer#5|Brand#53|SMALL PLATED COPPER|7|JUMBO PKG|1485.58|sts haggle q 586|ivory rosy royal powder firebrick|Manufacturer#1|Brand#11|PROMO ANODIZED BRASS|11|LG DRUM|1486.58|ts haggle fluffily 587|blanched maroon blush burlywood salmon|Manufacturer#4|Brand#44|SMALL POLISHED STEEL|35|MED PKG|1487.58|c accounts. qui 588|cyan powder brown sandy burlywood|Manufacturer#5|Brand#55|SMALL PLATED COPPER|45|WRAP BAG|1488.58|ss foxes. even, ex 589|red orange salmon rosy thistle|Manufacturer#1|Brand#13|LARGE POLISHED BRASS|3|SM PKG|1489.58|cies. regul 590|azure burnished honeydew navy cyan|Manufacturer#4|Brand#42|LARGE POLISHED COPPER|38|WRAP PKG|1490.59| dependencies sho 591|sienna burlywood salmon navy green|Manufacturer#1|Brand#14|ECONOMY PLATED NICKEL|40|SM PKG|1491.59| the b 592|dodger burnished yellow wheat antique|Manufacturer#2|Brand#23|LARGE POLISHED STEEL|7|MED DRUM|1492.59|ide of the notorni 593|metallic sienna sandy spring frosted|Manufacturer#2|Brand#25|PROMO POLISHED STEEL|20|JUMBO CASE|1493.59|ake after 594|seashell magenta rosy cornsilk smoke|Manufacturer#3|Brand#31|PROMO BRUSHED BRASS|28|SM JAR|1494.59|s detect fu 595|chiffon lace linen steel bisque|Manufacturer#5|Brand#51|PROMO BRUSHED BRASS|23|JUMBO JAR|1495.59|yly fin 596|drab snow chartreuse dim ivory|Manufacturer#5|Brand#52|STANDARD POLISHED TIN|35|JUMBO JAR|1496.59|epend 597|cyan orchid grey tan bisque|Manufacturer#4|Brand#44|STANDARD ANODIZED NICKEL|36|SM DRUM|1497.59|ronic foxes. speci 598|deep royal drab violet chartreuse|Manufacturer#2|Brand#24|MEDIUM POLISHED BRASS|9|MED JAR|1498.59|he fluffily regular 599|cornsilk peru steel beige blue|Manufacturer#5|Brand#51|MEDIUM BRUSHED NICKEL|30|JUMBO PACK|1499.59|ckages against t 600|puff bisque midnight seashell burlywood|Manufacturer#4|Brand#42|STANDARD POLISHED NICKEL|4|SM JAR|1500.60|ow furiously agai 601|coral thistle blanched linen tan|Manufacturer#1|Brand#13|STANDARD POLISHED BRASS|13|SM PKG|1501.60|lithely thi 602|metallic smoke purple rosy magenta|Manufacturer#1|Brand#12|PROMO BRUSHED COPPER|8|SM BAG|1502.60|telets a 603|dim blush puff chiffon salmon|Manufacturer#1|Brand#12|PROMO ANODIZED COPPER|41|WRAP PKG|1503.60|uests. fluffily regul 604|bisque light frosted white olive|Manufacturer#4|Brand#41|MEDIUM PLATED STEEL|19|MED PACK|1504.60|are after the regula 605|grey burlywood linen tan tomato|Manufacturer#1|Brand#11|LARGE POLISHED STEEL|30|LG JAR|1505.60| bold, final accou 606|navajo lace sandy aquamarine light|Manufacturer#3|Brand#32|STANDARD PLATED STEEL|36|LG DRUM|1506.60|hins p 607|dark sienna thistle sandy maroon|Manufacturer#5|Brand#54|MEDIUM POLISHED TIN|13|MED JAR|1507.60|ctions wake qu 608|mint medium blush black metallic|Manufacturer#2|Brand#25|PROMO PLATED BRASS|35|JUMBO PKG|1508.60|xes. express r 609|almond lemon olive smoke moccasin|Manufacturer#2|Brand#21|ECONOMY BRUSHED TIN|49|JUMBO DRUM|1509.60|cial deposits haggle 610|plum purple seashell white lemon|Manufacturer#1|Brand#11|ECONOMY BURNISHED TIN|38|JUMBO BOX|1510.61|slyly pending re 611|light burnished linen sienna dim|Manufacturer#4|Brand#42|MEDIUM PLATED TIN|45|SM DRUM|1511.61|iously even packages. 612|midnight azure moccasin rosy lawn|Manufacturer#4|Brand#42|PROMO PLATED STEEL|19|LG BOX|1512.61|se quickly; 613|cream coral sandy thistle spring|Manufacturer#3|Brand#34|LARGE BURNISHED COPPER|22|LG DRUM|1513.61| cajo 614|dodger beige maroon puff medium|Manufacturer#2|Brand#23|STANDARD BURNISHED NICKEL|6|WRAP PKG|1514.61|ending accounts ca 615|rosy seashell brown blush tan|Manufacturer#4|Brand#41|PROMO ANODIZED COPPER|4|WRAP PKG|1515.61| dogged 616|tan powder lavender dim goldenrod|Manufacturer#2|Brand#25|STANDARD BRUSHED STEEL|1|WRAP JAR|1516.61|w packages cajole b 617|peru grey snow papaya firebrick|Manufacturer#2|Brand#22|MEDIUM ANODIZED NICKEL|37|WRAP DRUM|1517.61|furious 618|ivory wheat smoke deep lemon|Manufacturer#2|Brand#22|PROMO BRUSHED BRASS|9|LG BOX|1518.61|ogs. furiously 619|powder medium drab lemon tomato|Manufacturer#3|Brand#34|LARGE ANODIZED STEEL|46|MED JAR|1519.61|cuses are 620|frosted tomato almond salmon powder|Manufacturer#3|Brand#35|MEDIUM BRUSHED COPPER|4|LG PKG|1520.62|efully carefully speci 621|almond azure drab ghost mint|Manufacturer#5|Brand#53|PROMO PLATED TIN|47|JUMBO DRUM|1521.62|ffily furiously fin 622|burlywood dark blue thistle navajo|Manufacturer#5|Brand#51|LARGE POLISHED STEEL|19|WRAP BAG|1522.62|sly deposits. f 623|chiffon bisque steel orange tomato|Manufacturer#3|Brand#32|ECONOMY POLISHED NICKEL|8|MED BOX|1523.62|ly ironic 624|burlywood ghost orchid misty coral|Manufacturer#1|Brand#12|LARGE BURNISHED NICKEL|29|SM BAG|1524.62|dencies. even d 625|beige indian lime sienna plum|Manufacturer#3|Brand#35|SMALL PLATED COPPER|38|LG JAR|1525.62|blithely slow multipl 626|cream ivory brown peach honeydew|Manufacturer#1|Brand#12|SMALL ANODIZED COPPER|23|SM JAR|1526.62|furiously-- 627|drab yellow spring turquoise rose|Manufacturer#4|Brand#44|PROMO ANODIZED NICKEL|13|SM BAG|1527.62|y ironic 628|black mint olive drab medium|Manufacturer#3|Brand#33|ECONOMY BRUSHED BRASS|40|LG BOX|1528.62|sits. 629|magenta lace red hot pale|Manufacturer#5|Brand#51|MEDIUM POLISHED COPPER|25|WRAP BOX|1529.62|fully even co 630|lawn lace burnished hot frosted|Manufacturer#2|Brand#25|MEDIUM PLATED NICKEL|20|LG DRUM|1530.63|g to th 631|moccasin lawn cornsilk chartreuse midnight|Manufacturer#2|Brand#22|STANDARD ANODIZED NICKEL|34|WRAP BAG|1531.63|ely silen 632|blush floral tan thistle chiffon|Manufacturer#3|Brand#34|PROMO PLATED NICKEL|2|MED PACK|1532.63|y ironic instru 633|navy burnished wheat slate deep|Manufacturer#3|Brand#33|ECONOMY POLISHED NICKEL|24|MED PKG|1533.63|bold brai 634|grey aquamarine deep violet blush|Manufacturer#3|Brand#35|SMALL ANODIZED NICKEL|34|MED DRUM|1534.63|ions. 635|ghost aquamarine gainsboro lemon peach|Manufacturer#5|Brand#51|PROMO POLISHED STEEL|49|WRAP JAR|1535.63|y ironic excuses. 636|burlywood deep antique snow wheat|Manufacturer#5|Brand#55|PROMO BRUSHED STEEL|36|SM PACK|1536.63|ithely 637|brown smoke sandy honeydew antique|Manufacturer#5|Brand#51|LARGE BURNISHED NICKEL|48|JUMBO CASE|1537.63|oxes after the blithel 638|lace royal pink cornsilk antique|Manufacturer#5|Brand#54|SMALL PLATED TIN|37|JUMBO PACK|1538.63| carefully. blithely 639|black plum tomato cornflower medium|Manufacturer#2|Brand#25|STANDARD POLISHED BRASS|33|SM BAG|1539.63|s cajole quickly. care 640|grey seashell bisque indian deep|Manufacturer#1|Brand#12|MEDIUM BRUSHED TIN|20|WRAP CASE|1540.64|ly regular acco 641|mint saddle yellow sienna frosted|Manufacturer#5|Brand#55|MEDIUM PLATED COPPER|18|SM JAR|1541.64| bold instr 642|moccasin ghost sandy goldenrod cornsilk|Manufacturer#4|Brand#43|ECONOMY BRUSHED BRASS|12|MED PACK|1542.64| bold packages bey 643|frosted chocolate dodger honeydew ghost|Manufacturer#3|Brand#32|MEDIUM POLISHED STEEL|8|MED DRUM|1543.64|refully fina 644|rosy bisque hot burnished dark|Manufacturer#5|Brand#52|LARGE PLATED STEEL|34|SM PACK|1544.64|lphins. blithely 645|thistle sky antique khaki chartreuse|Manufacturer#4|Brand#45|MEDIUM BURNISHED NICKEL|45|LG CASE|1545.64| the theodol 646|tan honeydew lime light white|Manufacturer#5|Brand#52|SMALL POLISHED COPPER|21|LG PACK|1546.64|ajole according to t 647|rose khaki drab smoke peach|Manufacturer#3|Brand#35|LARGE BURNISHED STEEL|38|MED PKG|1547.64|ters. ironic pinto 648|bisque blue drab cyan almond|Manufacturer#3|Brand#34|SMALL BRUSHED STEEL|2|WRAP JAR|1548.64|eas. regular 649|lavender blush rosy beige sky|Manufacturer#1|Brand#12|STANDARD BURNISHED TIN|33|JUMBO DRUM|1549.64|fully! 650|bisque navajo mint medium seashell|Manufacturer#5|Brand#53|PROMO ANODIZED NICKEL|40|MED JAR|1550.65|ar dependen 651|bisque floral dim burlywood moccasin|Manufacturer#5|Brand#52|MEDIUM POLISHED BRASS|4|WRAP BAG|1551.65|efully final instruct 652|navajo white indian yellow grey|Manufacturer#1|Brand#12|LARGE BRUSHED TIN|39|WRAP PKG|1552.65|le furiously unusua 653|lime cornflower sky beige antique|Manufacturer#1|Brand#15|ECONOMY BURNISHED BRASS|28|JUMBO DRUM|1553.65|ias. i 654|cyan burnished tomato chiffon navy|Manufacturer#2|Brand#23|PROMO PLATED COPPER|2|WRAP BAG|1554.65|posits haggle 655|tomato firebrick yellow rosy orchid|Manufacturer#1|Brand#12|STANDARD BURNISHED TIN|23|JUMBO CAN|1555.65|ic instructions. 656|maroon dodger ghost gainsboro pink|Manufacturer#1|Brand#15|STANDARD BURNISHED COPPER|17|MED CASE|1556.65|tions wake ca 657|saddle lawn blue cyan cornsilk|Manufacturer#3|Brand#31|ECONOMY BURNISHED COPPER|36|LG PACK|1557.65| packages accord 658|ghost mint hot bisque salmon|Manufacturer#1|Brand#15|MEDIUM PLATED COPPER|27|SM JAR|1558.65| somas haggle quickly 659|black bisque plum pale cyan|Manufacturer#3|Brand#34|MEDIUM BRUSHED BRASS|20|LG JAR|1559.65|across the 660|sky dim thistle saddle pale|Manufacturer#5|Brand#51|STANDARD POLISHED TIN|8|SM CAN|1560.66|ages cajole fluffily 661|snow beige moccasin red lime|Manufacturer#3|Brand#32|PROMO PLATED STEEL|35|SM PKG|1561.66|ng asympt 662|ivory chocolate slate midnight ghost|Manufacturer#4|Brand#45|STANDARD PLATED STEEL|35|LG PACK|1562.66|e eve 663|ivory orchid azure frosted light|Manufacturer#4|Brand#41|PROMO POLISHED COPPER|6|WRAP PACK|1563.66| beans 664|frosted burnished tomato chiffon seashell|Manufacturer#4|Brand#43|MEDIUM BURNISHED TIN|26|LG BAG|1564.66|about the slyly s 665|rose antique cyan cornflower drab|Manufacturer#1|Brand#13|MEDIUM POLISHED STEEL|14|MED DRUM|1565.66|usly final excuse 666|ivory dim orchid antique spring|Manufacturer#1|Brand#13|ECONOMY BURNISHED NICKEL|20|MED PACK|1566.66|ronic theodolites ca 667|wheat ghost honeydew plum grey|Manufacturer#4|Brand#43|MEDIUM PLATED STEEL|42|MED JAR|1567.66|ly pending packages. 668|honeydew pink dodger cream dim|Manufacturer#2|Brand#22|SMALL ANODIZED NICKEL|10|LG PACK|1568.66|odolites. furio 669|khaki metallic plum smoke hot|Manufacturer#1|Brand#11|STANDARD BRUSHED TIN|37|MED BAG|1569.66|bold deposi 670|slate black smoke pale moccasin|Manufacturer#1|Brand#12|ECONOMY PLATED COPPER|24|LG DRUM|1570.67|es use fluffily unusu 671|powder metallic salmon slate chocolate|Manufacturer#5|Brand#53|SMALL PLATED STEEL|31|JUMBO BOX|1571.67|es. s 672|lawn rosy cornsilk floral misty|Manufacturer#2|Brand#23|PROMO BURNISHED STEEL|13|LG PACK|1572.67|, pen 673|navy azure sandy yellow sky|Manufacturer#2|Brand#23|MEDIUM ANODIZED COPPER|26|SM DRUM|1573.67|y final 674|tomato chartreuse cornflower green pale|Manufacturer#3|Brand#35|SMALL BRUSHED TIN|5|LG PKG|1574.67|unts use slyly bold 675|misty hot pale ghost yellow|Manufacturer#1|Brand#11|LARGE BRUSHED COPPER|2|SM CAN|1575.67|ely re 676|moccasin cornflower burlywood tomato light|Manufacturer#1|Brand#11|MEDIUM ANODIZED BRASS|45|SM CAN|1576.67|ges. enticingly ironi 677|pale steel blush mint cream|Manufacturer#4|Brand#44|PROMO ANODIZED BRASS|6|WRAP BAG|1577.67|accounts 678|salmon ivory red dodger spring|Manufacturer#4|Brand#44|PROMO BRUSHED TIN|15|SM BAG|1578.67|ronic pin 679|purple blanched linen metallic indian|Manufacturer#4|Brand#41|SMALL BURNISHED TIN|50|MED BOX|1579.67|iously ironic in 680|burnished ghost coral maroon yellow|Manufacturer#1|Brand#14|SMALL PLATED COPPER|44|LG PACK|1580.68|l depo 681|dark slate almond ghost chartreuse|Manufacturer#3|Brand#32|ECONOMY PLATED COPPER|9|WRAP CAN|1581.68|ic requests wake accor 682|lawn khaki green cornflower sienna|Manufacturer#5|Brand#53|LARGE POLISHED NICKEL|48|SM BOX|1582.68|ackages. 683|sienna cornsilk chiffon olive blush|Manufacturer#5|Brand#53|LARGE BURNISHED NICKEL|1|MED PACK|1583.68|its hag 684|metallic azure hot orange spring|Manufacturer#3|Brand#33|PROMO BURNISHED TIN|6|WRAP BOX|1584.68|unts are fluff 685|turquoise orchid plum green tomato|Manufacturer#3|Brand#31|SMALL PLATED COPPER|36|LG JAR|1585.68|s. furiously ruthless 686|goldenrod deep cornflower dodger rose|Manufacturer#5|Brand#54|STANDARD BRUSHED COPPER|15|WRAP BAG|1586.68|lly ironic accounts 687|frosted cornsilk tomato burnished smoke|Manufacturer#5|Brand#51|LARGE POLISHED NICKEL|47|JUMBO BOX|1587.68|packages. even, 688|navajo khaki almond royal chartreuse|Manufacturer#4|Brand#45|PROMO BRUSHED COPPER|38|JUMBO CAN|1588.68|rious ideas 689|bisque red peru almond grey|Manufacturer#4|Brand#43|LARGE PLATED COPPER|22|SM PACK|1589.68|ual package 690|cornflower dodger saddle ivory tan|Manufacturer#3|Brand#32|SMALL PLATED BRASS|31|WRAP PACK|1590.69|al, bold fra 691|burnished dim gainsboro thistle blue|Manufacturer#3|Brand#35|PROMO BURNISHED BRASS|3|SM JAR|1591.69|ully slyly unusu 692|puff beige smoke seashell sienna|Manufacturer#1|Brand#14|MEDIUM BRUSHED BRASS|20|SM DRUM|1592.69|tructions. ev 693|honeydew ghost azure yellow magenta|Manufacturer#5|Brand#54|SMALL PLATED TIN|14|WRAP CASE|1593.69|lets sle 694|pink maroon blanched beige cyan|Manufacturer#3|Brand#35|ECONOMY PLATED STEEL|35|JUMBO CASE|1594.69|es. final 695|deep peru lavender antique royal|Manufacturer#2|Brand#23|PROMO ANODIZED COPPER|19|MED DRUM|1595.69|eep blithely 696|forest lemon cream black pink|Manufacturer#3|Brand#33|MEDIUM ANODIZED BRASS|3|LG CAN|1596.69|al acco 697|dark white slate honeydew maroon|Manufacturer#4|Brand#41|LARGE PLATED TIN|8|MED CAN|1597.69| special pa 698|blush sienna honeydew yellow goldenrod|Manufacturer#4|Brand#42|ECONOMY ANODIZED COPPER|44|LG PACK|1598.69|ronic, ironic 699|plum orchid red linen misty|Manufacturer#1|Brand#11|MEDIUM PLATED STEEL|5|WRAP CAN|1599.69| regular notornis b 700|lace hot khaki steel orchid|Manufacturer#3|Brand#34|PROMO BRUSHED STEEL|31|SM JAR|1600.70|ly even foxes. fi 701|frosted lavender black deep ghost|Manufacturer#4|Brand#43|MEDIUM ANODIZED COPPER|36|WRAP DRUM|1601.70| use blithe 702|navajo lavender dim puff bisque|Manufacturer#4|Brand#41|STANDARD BURNISHED BRASS|26|SM CAN|1602.70|yly f 703|puff floral red linen dark|Manufacturer#3|Brand#32|ECONOMY POLISHED TIN|20|WRAP CASE|1603.70|nstructions wake alon 704|salmon lawn chocolate lace honeydew|Manufacturer#3|Brand#34|LARGE ANODIZED BRASS|23|MED CAN|1604.70|l, ironic 705|snow drab lawn dark tan|Manufacturer#3|Brand#33|ECONOMY PLATED TIN|47|LG PKG|1605.70|ironic 706|cream white navajo frosted puff|Manufacturer#3|Brand#33|LARGE PLATED BRASS|6|SM PKG|1606.70|quests. i 707|salmon khaki misty deep peru|Manufacturer#1|Brand#11|SMALL POLISHED BRASS|17|SM BAG|1607.70|osits about the fluf 708|brown midnight plum violet tomato|Manufacturer#3|Brand#32|SMALL BRUSHED NICKEL|29|SM DRUM|1608.70|ully final instru 709|tomato sienna cornflower black misty|Manufacturer#4|Brand#43|SMALL BRUSHED TIN|16|SM BAG|1609.70|ns. blithely i 710|chartreuse thistle midnight magenta violet|Manufacturer#1|Brand#15|SMALL POLISHED TIN|27|JUMBO PACK|1610.71|fluffily unu 711|goldenrod sienna linen steel floral|Manufacturer#1|Brand#11|PROMO BRUSHED NICKEL|24|LG DRUM|1611.71|s hang slyly regula 712|blanched rose royal tomato hot|Manufacturer#3|Brand#33|ECONOMY BRUSHED COPPER|25|SM CASE|1612.71|even, fluffy 713|gainsboro plum powder seashell lawn|Manufacturer#2|Brand#25|SMALL ANODIZED COPPER|20|SM CAN|1613.71|onic de 714|chartreuse medium gainsboro honeydew saddle|Manufacturer#1|Brand#11|MEDIUM POLISHED NICKEL|35|SM JAR|1614.71|ions. furiously final 715|deep yellow coral sienna white|Manufacturer#2|Brand#24|LARGE BRUSHED TIN|28|WRAP BAG|1615.71|ons boost. furiou 716|grey floral sienna cyan gainsboro|Manufacturer#1|Brand#12|SMALL POLISHED TIN|37|JUMBO CAN|1616.71|osits! packages hag 717|papaya turquoise spring midnight medium|Manufacturer#2|Brand#22|STANDARD PLATED TIN|28|JUMBO JAR|1617.71| final notornis na 718|khaki navy saddle ghost orchid|Manufacturer#3|Brand#35|PROMO BURNISHED COPPER|24|SM CAN|1618.71|ests. final, exp 719|chocolate honeydew khaki aquamarine white|Manufacturer#3|Brand#35|STANDARD BRUSHED NICKEL|49|JUMBO BAG|1619.71|ect. furiously 720|light khaki dodger dark lawn|Manufacturer#4|Brand#45|SMALL BURNISHED NICKEL|8|JUMBO BAG|1620.72|as. furiously 721|gainsboro chocolate maroon hot sienna|Manufacturer#5|Brand#52|STANDARD ANODIZED STEEL|33|MED PKG|1621.72|ly special instruct 722|forest chartreuse plum royal papaya|Manufacturer#4|Brand#43|ECONOMY BURNISHED NICKEL|43|JUMBO BAG|1622.72|ic packages. fin 723|slate magenta lemon peru pink|Manufacturer#3|Brand#31|LARGE BRUSHED BRASS|34|WRAP CASE|1623.72|unts. excu 724|rosy coral thistle ghost papaya|Manufacturer#3|Brand#35|PROMO POLISHED COPPER|28|JUMBO BOX|1624.72| carefully pend 725|hot blanched salmon slate burlywood|Manufacturer#4|Brand#43|LARGE PLATED NICKEL|11|JUMBO BOX|1625.72|r theo 726|cream aquamarine violet medium midnight|Manufacturer#4|Brand#43|PROMO POLISHED NICKEL|41|JUMBO BAG|1626.72|arefully? carefully i 727|royal pink smoke bisque lime|Manufacturer#3|Brand#35|PROMO PLATED BRASS|35|SM PKG|1627.72| about the furio 728|cornsilk grey sienna blue sky|Manufacturer#3|Brand#33|ECONOMY PLATED TIN|27|LG JAR|1628.72|fily regular package 729|papaya cornsilk gainsboro rose pink|Manufacturer#3|Brand#35|ECONOMY BRUSHED TIN|6|LG JAR|1629.72|egular accou 730|rose blush saddle cyan hot|Manufacturer#4|Brand#45|MEDIUM PLATED TIN|8|SM PKG|1630.73|ironic 731|firebrick indian forest rose khaki|Manufacturer#3|Brand#33|PROMO BURNISHED NICKEL|34|MED CASE|1631.73|snooze above the 732|violet frosted red papaya drab|Manufacturer#2|Brand#21|MEDIUM BRUSHED BRASS|1|SM BAG|1632.73|. sauternes haggl 733|burnished goldenrod royal plum midnight|Manufacturer#4|Brand#43|PROMO ANODIZED STEEL|1|MED CASE|1633.73| special instruct 734|lace light bisque dodger aquamarine|Manufacturer#2|Brand#21|MEDIUM BRUSHED STEEL|34|WRAP DRUM|1634.73|quests 735|wheat white honeydew almond coral|Manufacturer#4|Brand#42|SMALL BRUSHED STEEL|44|SM CAN|1635.73|kages are according 736|slate honeydew pink magenta lace|Manufacturer#3|Brand#32|PROMO ANODIZED BRASS|44|JUMBO DRUM|1636.73|telets wake carefu 737|orange navajo blanched cyan snow|Manufacturer#5|Brand#53|PROMO POLISHED NICKEL|25|SM BAG|1637.73|re slyly. bold, regul 738|peru floral sky steel maroon|Manufacturer#2|Brand#21|ECONOMY BURNISHED COPPER|29|JUMBO CAN|1638.73|ckages wake. slyly fin 739|powder lavender wheat pale puff|Manufacturer#3|Brand#31|ECONOMY BRUSHED COPPER|24|JUMBO BOX|1639.73|the regular 740|almond aquamarine mint misty red|Manufacturer#3|Brand#35|STANDARD POLISHED COPPER|7|WRAP PACK|1640.74| even dep 741|thistle seashell cyan cornsilk indian|Manufacturer#4|Brand#44|STANDARD PLATED TIN|33|WRAP PACK|1641.74|oxes slee 742|dodger cream snow pink floral|Manufacturer#5|Brand#51|STANDARD BRUSHED NICKEL|30|SM PACK|1642.74|out the furiously 743|navajo midnight peru orange salmon|Manufacturer#4|Brand#42|LARGE ANODIZED COPPER|4|MED JAR|1643.74|al packages ar 744|burlywood deep hot turquoise chartreuse|Manufacturer#5|Brand#53|MEDIUM BURNISHED COPPER|47|MED PKG|1644.74|uriously brave ideas 745|seashell dark lime midnight puff|Manufacturer#3|Brand#31|PROMO POLISHED COPPER|25|JUMBO BOX|1645.74| silent 746|honeydew brown sienna red chocolate|Manufacturer#2|Brand#23|ECONOMY ANODIZED STEEL|31|MED CAN|1646.74|osits affix furiously 747|lace red hot mint goldenrod|Manufacturer#1|Brand#11|LARGE BURNISHED NICKEL|20|WRAP PKG|1647.74|deposits b 748|forest indian snow grey peru|Manufacturer#5|Brand#54|STANDARD BURNISHED BRASS|32|SM CAN|1648.74|arefully bo 749|snow goldenrod puff violet mint|Manufacturer#2|Brand#24|SMALL PLATED COPPER|16|MED JAR|1649.74| bold re 750|puff slate magenta powder lawn|Manufacturer#4|Brand#43|LARGE POLISHED NICKEL|47|MED PACK|1650.75|tipliers about the q 751|steel blush indian rosy pink|Manufacturer#1|Brand#11|SMALL POLISHED TIN|17|SM DRUM|1651.75|uests haggle bl 752|medium blue midnight misty frosted|Manufacturer#5|Brand#53|MEDIUM ANODIZED COPPER|1|MED PACK|1652.75|lites mold b 753|blanched plum navajo beige indian|Manufacturer#2|Brand#25|STANDARD ANODIZED TIN|13|LG DRUM|1653.75|nal foxes. quickly f 754|thistle steel sky light red|Manufacturer#5|Brand#51|SMALL ANODIZED STEEL|42|WRAP JAR|1654.75|es. blithely dogge 755|spring dodger floral metallic moccasin|Manufacturer#2|Brand#22|STANDARD PLATED TIN|12|WRAP PACK|1655.75| wake. even packages a 756|ghost antique snow cream cyan|Manufacturer#5|Brand#52|STANDARD PLATED BRASS|44|MED BAG|1656.75|rious 757|papaya dim slate saddle navajo|Manufacturer#3|Brand#35|ECONOMY POLISHED NICKEL|12|JUMBO CASE|1657.75|the quickly 758|yellow orchid dim cyan burlywood|Manufacturer#1|Brand#11|ECONOMY ANODIZED TIN|25|MED BOX|1658.75|haggle 759|yellow powder navajo maroon chartreuse|Manufacturer#3|Brand#34|LARGE BURNISHED TIN|20|JUMBO CAN|1659.75|struction 760|orange navy hot aquamarine sienna|Manufacturer#4|Brand#44|MEDIUM POLISHED COPPER|6|LG JAR|1660.76|jole according 761|seashell green dodger beige linen|Manufacturer#3|Brand#34|ECONOMY ANODIZED TIN|7|SM JAR|1661.76|ages according 762|peach grey firebrick dim smoke|Manufacturer#1|Brand#15|LARGE POLISHED TIN|35|MED PKG|1662.76|xpress ideas. fluff 763|wheat seashell azure chartreuse dodger|Manufacturer#4|Brand#44|LARGE BRUSHED TIN|50|SM PKG|1663.76|counts. regu 764|cyan moccasin blanched light purple|Manufacturer#1|Brand#13|SMALL ANODIZED NICKEL|11|LG JAR|1664.76|es. final, bold 765|thistle red smoke chartreuse orange|Manufacturer#3|Brand#35|STANDARD BRUSHED BRASS|3|WRAP BAG|1665.76|ly regular pint 766|midnight sienna orange gainsboro black|Manufacturer#2|Brand#24|MEDIUM BURNISHED NICKEL|20|MED PACK|1666.76|inal ideas. asy 767|blush firebrick misty blanched purple|Manufacturer#2|Brand#24|LARGE POLISHED TIN|50|MED DRUM|1667.76|ts. carefully unu 768|maroon gainsboro seashell hot sandy|Manufacturer#4|Brand#43|LARGE BRUSHED COPPER|41|SM DRUM|1668.76|olites haggle: car 769|tomato royal firebrick turquoise cream|Manufacturer#2|Brand#24|MEDIUM BRUSHED TIN|8|LG CASE|1669.76|ake carefull 770|lemon yellow coral deep lime|Manufacturer#2|Brand#25|LARGE BRUSHED TIN|7|SM PACK|1670.77|ave ideas. 771|plum maroon lavender tan firebrick|Manufacturer#1|Brand#15|SMALL BURNISHED TIN|17|LG DRUM|1671.77| cajole slyly fina 772|dodger firebrick peach ivory seashell|Manufacturer#3|Brand#35|SMALL PLATED BRASS|33|MED PKG|1672.77|dolites haggle sp 773|saddle medium beige purple plum|Manufacturer#3|Brand#32|PROMO BURNISHED STEEL|16|LG CASE|1673.77| final, sp 774|wheat chiffon cyan misty moccasin|Manufacturer#2|Brand#24|STANDARD ANODIZED NICKEL|26|WRAP CASE|1674.77|ly express dependen 775|papaya misty orchid snow metallic|Manufacturer#1|Brand#14|LARGE PLATED TIN|2|MED PACK|1675.77|azzle carefully 776|powder indian dodger hot lemon|Manufacturer#5|Brand#51|STANDARD ANODIZED BRASS|21|WRAP DRUM|1676.77|egular orbits haggl 777|blanched indian pink frosted grey|Manufacturer#5|Brand#53|SMALL ANODIZED STEEL|50|JUMBO JAR|1677.77| theodolites 778|saddle mint navy cyan cornflower|Manufacturer#3|Brand#32|PROMO BURNISHED STEEL|20|WRAP BOX|1678.77|nic re 779|salmon burnished orange rose cornsilk|Manufacturer#2|Brand#22|ECONOMY BRUSHED STEEL|26|WRAP DRUM|1679.77| regular epitaphs are 780|rosy chocolate tan moccasin salmon|Manufacturer#5|Brand#55|MEDIUM ANODIZED STEEL|31|MED CASE|1680.78|t quickly sly 781|light orchid purple black navy|Manufacturer#1|Brand#13|ECONOMY PLATED STEEL|22|LG DRUM|1681.78|gular, regu 782|peru firebrick coral chartreuse rosy|Manufacturer#1|Brand#13|LARGE POLISHED BRASS|29|SM BAG|1682.78|hely u 783|cyan turquoise azure pink dark|Manufacturer#1|Brand#13|SMALL PLATED NICKEL|4|MED DRUM|1683.78|ss acc 784|light smoke seashell snow chartreuse|Manufacturer#1|Brand#12|MEDIUM BRUSHED COPPER|47|LG DRUM|1684.78|unts. furiously 785|coral saddle indian lime frosted|Manufacturer#3|Brand#33|PROMO ANODIZED TIN|41|SM PACK|1685.78|use carefully 786|royal azure ivory moccasin salmon|Manufacturer#2|Brand#23|STANDARD PLATED NICKEL|20|SM PACK|1686.78|eodoli 787|pale metallic ivory peach slate|Manufacturer#5|Brand#55|SMALL ANODIZED NICKEL|35|WRAP PKG|1687.78|es alongside of 788|floral dark dodger chartreuse lavender|Manufacturer#3|Brand#34|LARGE PLATED STEEL|10|LG JAR|1688.78|s solv 789|dodger spring antique lace papaya|Manufacturer#3|Brand#35|PROMO BURNISHED STEEL|14|LG PACK|1689.78|ts hagg 790|lavender yellow pink puff olive|Manufacturer#4|Brand#42|PROMO POLISHED COPPER|35|JUMBO BOX|1690.79|s are 791|indian blush medium thistle lime|Manufacturer#2|Brand#24|STANDARD ANODIZED COPPER|24|MED PACK|1691.79| special pinto bean 792|plum indian cornflower frosted purple|Manufacturer#1|Brand#13|SMALL BURNISHED BRASS|8|MED PKG|1692.79|efully unusual deposi 793|white chiffon blue green violet|Manufacturer#5|Brand#51|LARGE POLISHED NICKEL|28|JUMBO CAN|1693.79|tions. furiou 794|ivory peach light thistle antique|Manufacturer#5|Brand#51|PROMO PLATED COPPER|47|MED PKG|1694.79|le bl 795|cream royal light yellow hot|Manufacturer#5|Brand#54|MEDIUM POLISHED TIN|46|MED JAR|1695.79|ully regula 796|beige frosted cyan hot puff|Manufacturer#5|Brand#51|ECONOMY BRUSHED STEEL|50|WRAP CAN|1696.79|yly fina 797|violet peach puff orange white|Manufacturer#4|Brand#41|STANDARD BURNISHED STEEL|10|WRAP BAG|1697.79|ments 798|turquoise indian white bisque chartreuse|Manufacturer#1|Brand#12|ECONOMY PLATED NICKEL|18|LG JAR|1698.79|ffily even excuses 799|green azure rose aquamarine floral|Manufacturer#3|Brand#35|PROMO POLISHED TIN|7|MED JAR|1699.79|he slyly brave excuses 800|maroon mint medium lace plum|Manufacturer#1|Brand#11|PROMO BRUSHED BRASS|29|WRAP BAG|1700.80|s sleep about the car 801|linen steel salmon beige lemon|Manufacturer#5|Brand#54|LARGE BRUSHED NICKEL|18|JUMBO DRUM|1701.80|latelets. slyly fi 802|rosy ghost cyan puff dark|Manufacturer#3|Brand#34|ECONOMY PLATED BRASS|41|WRAP JAR|1702.80|equest 803|brown navy tan salmon honeydew|Manufacturer#5|Brand#52|SMALL ANODIZED TIN|50|MED PKG|1703.80|ly at the accou 804|bisque grey honeydew goldenrod ghost|Manufacturer#2|Brand#23|SMALL POLISHED BRASS|9|SM BAG|1704.80|itaphs sle 805|tan ghost cyan salmon goldenrod|Manufacturer#5|Brand#52|STANDARD PLATED BRASS|8|JUMBO PKG|1705.80|t blithe 806|blue violet yellow khaki azure|Manufacturer#3|Brand#32|PROMO BURNISHED TIN|45|LG BAG|1706.80| ironic theodolites a 807|turquoise snow navy brown lime|Manufacturer#2|Brand#24|LARGE ANODIZED NICKEL|25|WRAP JAR|1707.80| quietly express pi 808|rosy indian sky frosted blush|Manufacturer#5|Brand#51|SMALL BRUSHED TIN|36|MED PKG|1708.80| pending asymptotes a 809|almond steel maroon chiffon frosted|Manufacturer#3|Brand#32|STANDARD BRUSHED COPPER|17|SM BAG|1709.80|n pla 810|blanched lemon magenta medium dark|Manufacturer#5|Brand#54|LARGE ANODIZED TIN|13|WRAP BOX|1710.81|instructions. 811|ivory bisque black chiffon gainsboro|Manufacturer#3|Brand#33|PROMO BRUSHED NICKEL|32|SM BOX|1711.81|pecial, ironic pac 812|firebrick olive chartreuse frosted ivory|Manufacturer#3|Brand#33|LARGE PLATED TIN|11|WRAP BAG|1712.81|asymptot 813|sandy ghost khaki frosted goldenrod|Manufacturer#4|Brand#43|MEDIUM POLISHED STEEL|2|WRAP DRUM|1713.81|riously ironic deposi 814|burnished seashell floral moccasin antique|Manufacturer#5|Brand#54|ECONOMY PLATED NICKEL|43|MED PKG|1714.81|xpress requests. 815|light peach saddle medium firebrick|Manufacturer#2|Brand#22|ECONOMY BRUSHED NICKEL|5|LG PACK|1715.81|nt requests a 816|thistle ivory pink olive puff|Manufacturer#1|Brand#13|STANDARD POLISHED NICKEL|35|LG CAN|1716.81| thin 817|azure chocolate lavender blanched burnished|Manufacturer#4|Brand#41|STANDARD ANODIZED TIN|36|JUMBO PKG|1717.81|carefu 818|navajo saddle indian cornflower steel|Manufacturer#4|Brand#43|PROMO BRUSHED BRASS|36|MED JAR|1718.81|ckages 819|light magenta indian khaki lavender|Manufacturer#5|Brand#54|ECONOMY BRUSHED STEEL|27|SM CAN|1719.81|as are slyly against 820|wheat salmon forest lavender papaya|Manufacturer#1|Brand#11|STANDARD BURNISHED STEEL|20|WRAP JAR|1720.82|ely regular theodo 821|smoke drab rose hot spring|Manufacturer#4|Brand#42|PROMO POLISHED TIN|15|MED BOX|1721.82|gle across t 822|purple lawn ivory black slate|Manufacturer#5|Brand#52|PROMO ANODIZED BRASS|21|LG CASE|1722.82|ding to t 823|maroon salmon puff medium brown|Manufacturer#2|Brand#25|SMALL BURNISHED COPPER|21|WRAP CASE|1723.82|inal the 824|dodger snow orange almond magenta|Manufacturer#1|Brand#15|LARGE BURNISHED TIN|34|MED BAG|1724.82|s. ironic 825|puff deep hot tomato powder|Manufacturer#1|Brand#13|PROMO PLATED TIN|47|LG CAN|1725.82|lar ide 826|ghost rosy beige salmon lemon|Manufacturer#3|Brand#34|ECONOMY PLATED TIN|21|SM CASE|1726.82|ifts could 827|linen ghost smoke blanched cream|Manufacturer#1|Brand#13|PROMO POLISHED COPPER|15|JUMBO PACK|1727.82|sits; depe 828|lemon light grey burlywood drab|Manufacturer#4|Brand#44|LARGE BRUSHED NICKEL|11|LG PKG|1728.82|symptotes. furiously 829|cream indian deep linen antique|Manufacturer#3|Brand#35|MEDIUM BURNISHED BRASS|33|SM JAR|1729.82|tegrate. 830|cornflower medium salmon coral magenta|Manufacturer#5|Brand#54|SMALL POLISHED TIN|37|JUMBO CAN|1730.83|ep final instructi 831|violet dark midnight lawn cream|Manufacturer#5|Brand#51|STANDARD POLISHED COPPER|8|LG CAN|1731.83|se blithely. special, 832|orange peach midnight burnished lavender|Manufacturer#4|Brand#44|STANDARD ANODIZED COPPER|3|WRAP BAG|1732.83|fily bo 833|thistle puff cream papaya deep|Manufacturer#4|Brand#41|SMALL BRUSHED STEEL|34|JUMBO CAN|1733.83|s thrash furiously fin 834|aquamarine frosted orange dodger khaki|Manufacturer#5|Brand#52|MEDIUM POLISHED TIN|43|JUMBO CAN|1734.83|mptotes integrate 835|turquoise peru light aquamarine dark|Manufacturer#5|Brand#53|MEDIUM PLATED STEEL|21|SM BOX|1735.83|uses. furi 836|almond sky red lime smoke|Manufacturer#3|Brand#31|LARGE BURNISHED TIN|32|SM JAR|1736.83|lyly bold accounts. t 837|papaya goldenrod burlywood purple brown|Manufacturer#1|Brand#13|MEDIUM BURNISHED BRASS|27|WRAP PKG|1737.83|hely furious packages 838|lime peach puff papaya olive|Manufacturer#2|Brand#22|MEDIUM ANODIZED TIN|9|WRAP BAG|1738.83| final pac 839|cornflower white papaya violet navajo|Manufacturer#2|Brand#25|LARGE BRUSHED NICKEL|19|JUMBO CAN|1739.83|ggle a 840|khaki mint chartreuse dark frosted|Manufacturer#4|Brand#44|ECONOMY ANODIZED BRASS|42|WRAP PKG|1740.84|ending ideas w 841|floral khaki grey metallic firebrick|Manufacturer#1|Brand#15|MEDIUM BURNISHED BRASS|48|WRAP BOX|1741.84|y regul 842|gainsboro dim navajo chartreuse olive|Manufacturer#1|Brand#11|SMALL ANODIZED BRASS|4|MED PACK|1742.84|en accounts! carefu 843|frosted metallic mint lawn blanched|Manufacturer#1|Brand#15|MEDIUM POLISHED STEEL|50|LG PACK|1743.84| the 844|honeydew smoke lemon ghost navajo|Manufacturer#3|Brand#35|LARGE PLATED BRASS|22|SM CASE|1744.84|ix furiously beyond 845|papaya burlywood bisque linen navy|Manufacturer#4|Brand#41|LARGE PLATED STEEL|25|LG JAR|1745.84|ully regula 846|purple papaya saddle cream medium|Manufacturer#2|Brand#24|ECONOMY POLISHED NICKEL|6|SM BAG|1746.84|sometimes bold foxes 847|chartreuse pink azure tan slate|Manufacturer#2|Brand#25|SMALL PLATED STEEL|21|SM PACK|1747.84|ely blith 848|orange olive puff midnight almond|Manufacturer#5|Brand#53|LARGE BURNISHED TIN|26|LG JAR|1748.84|gular requests u 849|lawn cornflower puff rosy saddle|Manufacturer#4|Brand#44|STANDARD POLISHED TIN|41|MED DRUM|1749.84|nts among the pending 850|peach goldenrod honeydew moccasin sienna|Manufacturer#4|Brand#41|STANDARD BRUSHED NICKEL|31|JUMBO DRUM|1750.85|kages are carefu 851|maroon beige navy forest honeydew|Manufacturer#5|Brand#54|ECONOMY BURNISHED COPPER|30|LG CAN|1751.85| asympto 852|lemon slate khaki misty hot|Manufacturer#2|Brand#24|PROMO PLATED NICKEL|47|JUMBO BOX|1752.85|side of the 853|peach honeydew cyan peru light|Manufacturer#5|Brand#51|SMALL ANODIZED NICKEL|42|JUMBO CASE|1753.85|ts sleep! 854|pale wheat lace midnight papaya|Manufacturer#4|Brand#41|SMALL POLISHED COPPER|29|JUMBO CASE|1754.85|ests cajole furious 855|dim blue slate dodger yellow|Manufacturer#3|Brand#31|STANDARD BURNISHED TIN|46|MED BOX|1755.85|ly express fox 856|blush frosted powder antique blanched|Manufacturer#1|Brand#13|MEDIUM BRUSHED STEEL|12|MED BAG|1756.85|posits integrate sly 857|goldenrod beige lemon midnight cornflower|Manufacturer#3|Brand#35|PROMO PLATED TIN|8|MED PACK|1757.85|final de 858|papaya maroon hot blue pink|Manufacturer#3|Brand#33|ECONOMY BURNISHED COPPER|14|SM CAN|1758.85|elets breach f 859|ghost blue indian cornsilk drab|Manufacturer#3|Brand#35|ECONOMY ANODIZED COPPER|10|WRAP BAG|1759.85| even Tiresias wake fu 860|burlywood peach papaya violet midnight|Manufacturer#3|Brand#32|SMALL POLISHED STEEL|36|LG PKG|1760.86|oss the f 861|blue sienna navy coral violet|Manufacturer#4|Brand#43|ECONOMY BRUSHED COPPER|48|JUMBO BOX|1761.86| furiousl 862|tan dim chiffon steel purple|Manufacturer#5|Brand#54|LARGE POLISHED NICKEL|12|LG PKG|1762.86| pending depo 863|light khaki lime lemon burnished|Manufacturer#2|Brand#25|STANDARD ANODIZED COPPER|41|LG CAN|1763.86| regular pinto beans 864|linen drab blush maroon sienna|Manufacturer#1|Brand#11|STANDARD POLISHED TIN|39|JUMBO PACK|1764.86|boost. 865|brown seashell red bisque sandy|Manufacturer#1|Brand#11|SMALL ANODIZED BRASS|12|JUMBO PACK|1765.86|s along t 866|dodger antique tan coral honeydew|Manufacturer#1|Brand#11|STANDARD PLATED COPPER|13|WRAP BOX|1766.86|eas sleep quickly 867|snow spring black white burnished|Manufacturer#5|Brand#54|PROMO POLISHED NICKEL|4|LG BOX|1767.86|nts wake care 868|navajo azure beige aquamarine blush|Manufacturer#3|Brand#33|PROMO BRUSHED BRASS|48|JUMBO PKG|1768.86|packages. e 869|snow blush violet lace ghost|Manufacturer#2|Brand#24|STANDARD ANODIZED COPPER|28|LG BAG|1769.86|y fina 870|antique hot snow burnished ghost|Manufacturer#5|Brand#54|SMALL PLATED NICKEL|11|JUMBO CAN|1770.87|ldly unusual depth 871|midnight dark coral ivory burlywood|Manufacturer#5|Brand#55|SMALL ANODIZED BRASS|27|SM PKG|1771.87|olites snooze quickl 872|papaya frosted cornflower green almond|Manufacturer#1|Brand#15|MEDIUM PLATED STEEL|41|LG CASE|1772.87|gular inst 873|goldenrod maroon snow cream indian|Manufacturer#1|Brand#11|MEDIUM BRUSHED BRASS|45|MED CAN|1773.87|ecial ideas. slyly 874|drab slate hot black khaki|Manufacturer#1|Brand#15|MEDIUM PLATED TIN|9|MED BOX|1774.87|e regular req 875|pale peru red orchid almond|Manufacturer#2|Brand#24|ECONOMY BRUSHED COPPER|1|MED DRUM|1775.87|e furiously. b 876|frosted lace turquoise sky sandy|Manufacturer#3|Brand#31|MEDIUM PLATED NICKEL|25|SM CAN|1776.87|s after the fur 877|lime violet tomato drab blush|Manufacturer#3|Brand#31|STANDARD ANODIZED NICKEL|20|MED PACK|1777.87|thely rut 878|steel lace wheat orchid linen|Manufacturer#3|Brand#32|STANDARD POLISHED NICKEL|40|JUMBO CASE|1778.87| slyly ironic g 879|spring red indian floral sky|Manufacturer#3|Brand#34|STANDARD BURNISHED TIN|23|MED PKG|1779.87|ar, even request 880|sandy turquoise cream firebrick rose|Manufacturer#2|Brand#25|STANDARD ANODIZED COPPER|10|LG CASE|1780.88|ly stealthy deposits 881|bisque frosted khaki linen royal|Manufacturer#2|Brand#21|LARGE BRUSHED BRASS|12|MED PACK|1781.88|ully for 882|chiffon royal lime almond midnight|Manufacturer#4|Brand#44|ECONOMY BRUSHED STEEL|12|MED BAG|1782.88| silent, pendi 883|smoke grey dark yellow brown|Manufacturer#3|Brand#31|STANDARD PLATED NICKEL|18|LG PACK|1783.88|fluffily unusual pint 884|peach medium goldenrod cyan ghost|Manufacturer#1|Brand#11|SMALL POLISHED NICKEL|38|WRAP BAG|1784.88|luffy accounts. 885|dim peach moccasin snow floral|Manufacturer#4|Brand#42|ECONOMY BRUSHED STEEL|31|WRAP DRUM|1785.88|lar, even ideas caj 886|turquoise dodger lemon antique green|Manufacturer#5|Brand#55|LARGE BRUSHED COPPER|46|MED BAG|1786.88|tornis haggle! 887|navajo cream salmon orange smoke|Manufacturer#4|Brand#41|SMALL BRUSHED STEEL|48|JUMBO CAN|1787.88|x slyly aroun 888|pale turquoise rose olive chiffon|Manufacturer#3|Brand#31|STANDARD BRUSHED NICKEL|44|SM DRUM|1788.88|y according t 889|saddle midnight almond drab blanched|Manufacturer#5|Brand#51|LARGE BURNISHED BRASS|19|JUMBO DRUM|1789.88|efully regular 890|sandy lawn chartreuse peru blue|Manufacturer#5|Brand#52|SMALL BRUSHED STEEL|23|MED PKG|1790.89|ests d 891|plum floral dodger lemon almond|Manufacturer#4|Brand#42|ECONOMY POLISHED STEEL|5|MED CASE|1791.89|yly regular, expr 892|thistle chocolate sandy powder drab|Manufacturer#2|Brand#22|SMALL BRUSHED NICKEL|3|LG DRUM|1792.89|quickly regular r 893|dark moccasin bisque cyan azure|Manufacturer#1|Brand#14|PROMO BRUSHED STEEL|9|LG JAR|1793.89|ly regular 894|chocolate blue smoke azure sandy|Manufacturer#3|Brand#35|SMALL BURNISHED TIN|43|MED BAG|1794.89|s. carefully expr 895|thistle azure magenta mint burnished|Manufacturer#4|Brand#41|MEDIUM BRUSHED STEEL|17|SM BOX|1795.89|eaves might 896|peach ivory rose honeydew lace|Manufacturer#4|Brand#43|PROMO ANODIZED TIN|7|LG CASE|1796.89|e across 897|misty olive purple cream red|Manufacturer#4|Brand#41|SMALL BRUSHED COPPER|26|SM JAR|1797.89|cajole bli 898|olive burlywood royal cream drab|Manufacturer#4|Brand#45|PROMO POLISHED NICKEL|8|WRAP BAG|1798.89|e against the pack 899|gainsboro dim misty violet coral|Manufacturer#2|Brand#25|SMALL ANODIZED NICKEL|50|WRAP CASE|1799.89|lithely pending excuse 900|medium antique khaki cream slate|Manufacturer#2|Brand#21|STANDARD PLATED NICKEL|40|LG CAN|1800.90|foxes. carefully s 901|white honeydew orange saddle black|Manufacturer#2|Brand#23|ECONOMY PLATED TIN|38|LG CAN|1801.90|ffily final theodoli 902|red chartreuse firebrick burlywood papaya|Manufacturer#2|Brand#23|LARGE ANODIZED COPPER|14|MED CAN|1802.90|inal pint 903|navajo chiffon mint ghost burlywood|Manufacturer#4|Brand#44|ECONOMY BURNISHED BRASS|4|SM BAG|1803.90|the i 904|black olive light sandy linen|Manufacturer#1|Brand#12|STANDARD ANODIZED TIN|20|WRAP DRUM|1804.90|ar theodolites sleep b 905|burlywood honeydew cornsilk indian green|Manufacturer#2|Brand#25|PROMO BRUSHED BRASS|27|MED CASE|1805.90|es. furiously ir 906|rosy forest navajo light turquoise|Manufacturer#4|Brand#44|MEDIUM BURNISHED COPPER|45|MED DRUM|1806.90|unusua 907|plum lawn black chartreuse sandy|Manufacturer#5|Brand#52|ECONOMY ANODIZED STEEL|8|LG CASE|1807.90|es. carefu 908|pink navy lace papaya brown|Manufacturer#2|Brand#25|MEDIUM BURNISHED TIN|13|JUMBO JAR|1808.90|tions ca 909|cornsilk tomato chiffon bisque navy|Manufacturer#1|Brand#12|STANDARD PLATED TIN|40|WRAP PKG|1809.90|ht boost quic 910|coral antique peach navy violet|Manufacturer#4|Brand#42|STANDARD ANODIZED TIN|15|JUMBO JAR|1810.91|ly final 911|blush coral medium ivory snow|Manufacturer#1|Brand#14|ECONOMY BURNISHED TIN|23|MED CAN|1811.91|foxes cajole after the 912|indian tomato rose powder turquoise|Manufacturer#5|Brand#55|STANDARD POLISHED NICKEL|17|SM BAG|1812.91|ar theodo 913|seashell medium burnished moccasin blue|Manufacturer#2|Brand#23|LARGE PLATED BRASS|49|SM DRUM|1813.91|yly even dol 914|snow rose moccasin tomato linen|Manufacturer#3|Brand#35|ECONOMY POLISHED STEEL|8|LG PKG|1814.91|ckly busil 915|aquamarine purple spring gainsboro salmon|Manufacturer#2|Brand#21|LARGE ANODIZED BRASS|25|WRAP BAG|1815.91|depos 916|chartreuse tomato rose chocolate magenta|Manufacturer#2|Brand#21|MEDIUM BURNISHED BRASS|28|MED CAN|1816.91|ons. bravely ironic 917|midnight khaki medium linen peru|Manufacturer#2|Brand#24|SMALL ANODIZED COPPER|3|SM CASE|1817.91|riously past th 918|ghost steel beige turquoise ivory|Manufacturer#5|Brand#55|ECONOMY PLATED TIN|15|LG PKG|1818.91|losely according to 919|sandy cream royal cyan orchid|Manufacturer#1|Brand#13|PROMO BRUSHED COPPER|49|MED BOX|1819.91|along 920|sky lemon misty slate lawn|Manufacturer#4|Brand#44|MEDIUM BRUSHED STEEL|19|WRAP CAN|1820.92|y regu 921|khaki yellow plum cyan forest|Manufacturer#1|Brand#11|LARGE BURNISHED STEEL|35|JUMBO DRUM|1821.92|pecial in 922|frosted dodger cyan medium indian|Manufacturer#3|Brand#34|LARGE BURNISHED BRASS|30|SM CASE|1822.92|uctions. sl 923|moccasin lace rose navajo forest|Manufacturer#2|Brand#24|STANDARD PLATED TIN|26|WRAP CASE|1823.92|he packag 924|indian smoke orange ghost papaya|Manufacturer#1|Brand#11|ECONOMY POLISHED STEEL|41|JUMBO PKG|1824.92|the ac 925|tomato antique plum snow burlywood|Manufacturer#4|Brand#44|STANDARD POLISHED NICKEL|44|MED PACK|1825.92|aggle slyly. blithe 926|tan deep medium smoke slate|Manufacturer#4|Brand#45|STANDARD BRUSHED TIN|39|MED JAR|1826.92|gle quickly ironic 927|dodger purple green peach navajo|Manufacturer#1|Brand#14|LARGE ANODIZED TIN|13|WRAP BAG|1827.92|otes snooz 928|violet dodger cyan orange moccasin|Manufacturer#4|Brand#43|SMALL ANODIZED TIN|43|SM BOX|1828.92|ully ev 929|burnished pale medium turquoise lace|Manufacturer#3|Brand#33|LARGE PLATED COPPER|40|MED DRUM|1829.92|hely iron 930|violet ivory orchid rose frosted|Manufacturer#2|Brand#23|SMALL BRUSHED BRASS|26|LG DRUM|1830.93|uickly ironic 931|red puff chiffon sienna dark|Manufacturer#3|Brand#31|STANDARD ANODIZED NICKEL|37|LG CAN|1831.93|are fluffily. 932|cornflower sky powder cyan linen|Manufacturer#2|Brand#24|SMALL ANODIZED COPPER|12|LG BOX|1832.93|ly regular depos 933|pink magenta frosted blanched lime|Manufacturer#3|Brand#33|STANDARD BRUSHED STEEL|25|LG CASE|1833.93|he blit 934|beige saddle magenta cyan dim|Manufacturer#4|Brand#45|STANDARD ANODIZED BRASS|40|MED JAR|1834.93|ual packages 935|royal cornflower maroon khaki cornsilk|Manufacturer#5|Brand#53|STANDARD BRUSHED STEEL|8|JUMBO DRUM|1835.93|arefully ironic 936|white beige firebrick dodger olive|Manufacturer#2|Brand#24|MEDIUM PLATED BRASS|6|WRAP DRUM|1836.93|longside of the reg 937|drab peru royal pink peach|Manufacturer#3|Brand#33|PROMO ANODIZED TIN|2|JUMBO CAN|1837.93|ithely 938|chartreuse peach spring cornflower tan|Manufacturer#2|Brand#22|LARGE BRUSHED NICKEL|1|JUMBO CASE|1838.93|ajole slyly r 939|salmon magenta orange coral aquamarine|Manufacturer#1|Brand#14|PROMO BURNISHED BRASS|41|WRAP PACK|1839.93|ages wake doggedly of 940|violet coral blanched orange light|Manufacturer#1|Brand#15|PROMO POLISHED TIN|49|JUMBO JAR|1840.94|ate blithely bold, si 941|lace cornsilk pink medium slate|Manufacturer#2|Brand#25|MEDIUM PLATED NICKEL|8|LG PKG|1841.94|fully 942|coral peach bisque green seashell|Manufacturer#3|Brand#35|PROMO POLISHED TIN|44|MED BAG|1842.94|nic pinto b 943|smoke sky chiffon green steel|Manufacturer#5|Brand#55|ECONOMY BRUSHED BRASS|46|SM PKG|1843.94|s wake 944|medium sandy olive wheat turquoise|Manufacturer#3|Brand#33|SMALL PLATED STEEL|1|LG CAN|1844.94| requests nag furiou 945|chiffon steel peach black moccasin|Manufacturer#5|Brand#52|MEDIUM PLATED NICKEL|22|MED JAR|1845.94|uffily abo 946|medium drab rosy spring salmon|Manufacturer#2|Brand#25|MEDIUM PLATED TIN|30|LG BOX|1846.94|y along 947|black tan salmon maroon chartreuse|Manufacturer#1|Brand#13|MEDIUM BURNISHED STEEL|16|JUMBO JAR|1847.94|usual accounts. b 948|gainsboro chocolate bisque sandy steel|Manufacturer#2|Brand#22|MEDIUM ANODIZED BRASS|31|WRAP PKG|1848.94|furious 949|thistle burnished pink yellow wheat|Manufacturer#5|Brand#51|LARGE BURNISHED TIN|47|LG PKG|1849.94|l theodolit 950|steel purple midnight beige coral|Manufacturer#3|Brand#31|SMALL ANODIZED STEEL|26|WRAP CASE|1850.95|kages cajole quickly 951|gainsboro indian aquamarine lemon blush|Manufacturer#2|Brand#21|STANDARD BURNISHED STEEL|40|JUMBO JAR|1851.95| final deposits nag a 952|frosted purple sky steel papaya|Manufacturer#3|Brand#31|MEDIUM BRUSHED TIN|4|MED PACK|1852.95|l deposits sleep bli 953|tomato cornflower honeydew sandy grey|Manufacturer#4|Brand#41|PROMO ANODIZED TIN|2|WRAP JAR|1853.95|ly quickly final acco 954|turquoise bisque khaki coral antique|Manufacturer#5|Brand#55|ECONOMY ANODIZED STEEL|35|JUMBO CASE|1854.95|furious 955|blue olive tan rosy navajo|Manufacturer#3|Brand#32|PROMO BURNISHED STEEL|43|SM CASE|1855.95|tions 956|orchid yellow dark lawn thistle|Manufacturer#1|Brand#13|STANDARD BRUSHED STEEL|7|LG CASE|1856.95|furiously pending d 957|snow turquoise coral lime goldenrod|Manufacturer#3|Brand#33|SMALL ANODIZED COPPER|23|SM CAN|1857.95|ndencies wake s 958|turquoise sandy drab dodger cyan|Manufacturer#3|Brand#35|LARGE PLATED NICKEL|13|LG BOX|1858.95|quickly careful i 959|sky snow peach olive spring|Manufacturer#4|Brand#41|SMALL BURNISHED STEEL|5|WRAP JAR|1859.95|iously bold depos 960|sienna royal light orchid moccasin|Manufacturer#5|Brand#55|LARGE PLATED COPPER|26|LG CASE|1860.96|lly b 961|cornflower chocolate floral burnished green|Manufacturer#1|Brand#15|PROMO BRUSHED BRASS|1|SM PACK|1861.96|fully. f 962|magenta linen ghost almond floral|Manufacturer#1|Brand#13|MEDIUM PLATED BRASS|38|JUMBO BOX|1862.96|lithely. 963|violet beige lime dark metallic|Manufacturer#4|Brand#44|MEDIUM PLATED BRASS|44|LG CASE|1863.96|y final deposits wake. 964|sky orange indian pale burlywood|Manufacturer#5|Brand#54|SMALL POLISHED BRASS|48|MED BOX|1864.96|nal asymptotes. regul 965|dim cyan bisque black smoke|Manufacturer#3|Brand#34|MEDIUM PLATED STEEL|39|LG PKG|1865.96|dolite 966|turquoise antique brown violet plum|Manufacturer#4|Brand#42|PROMO BURNISHED TIN|24|MED CAN|1866.96| the carefully 967|cyan snow azure slate papaya|Manufacturer#5|Brand#55|SMALL BURNISHED COPPER|16|JUMBO CAN|1867.96|leep fur 968|rose chiffon chocolate lace papaya|Manufacturer#1|Brand#11|LARGE PLATED TIN|23|LG PKG|1868.96|press accounts acro 969|hot azure royal red dark|Manufacturer#5|Brand#55|PROMO POLISHED STEEL|34|JUMBO CASE|1869.96|le aga 970|sienna azure light medium green|Manufacturer#1|Brand#11|STANDARD BURNISHED STEEL|44|SM CAN|1870.97|ironic 971|orange misty green aquamarine forest|Manufacturer#5|Brand#54|LARGE POLISHED BRASS|19|SM DRUM|1871.97|ugouts. bl 972|tomato burlywood pale moccasin plum|Manufacturer#3|Brand#32|MEDIUM PLATED BRASS|21|JUMBO JAR|1872.97|yly ironic deposits 973|medium lavender blue tomato slate|Manufacturer#2|Brand#22|STANDARD ANODIZED STEEL|42|LG PACK|1873.97|along 974|almond steel slate sky rose|Manufacturer#4|Brand#44|MEDIUM ANODIZED BRASS|35|WRAP BAG|1874.97|s. caref 975|moccasin tomato peach yellow drab|Manufacturer#3|Brand#34|MEDIUM ANODIZED COPPER|22|LG JAR|1875.97|torni 976|white maroon firebrick snow light|Manufacturer#5|Brand#51|STANDARD BRUSHED NICKEL|19|JUMBO PACK|1876.97| special reque 977|frosted tomato deep burnished indian|Manufacturer#5|Brand#53|STANDARD BRUSHED STEEL|2|LG CASE|1877.97|the quick 978|cream tomato rose floral steel|Manufacturer#4|Brand#45|MEDIUM POLISHED TIN|46|LG DRUM|1878.97|quests sleep abo 979|blush cyan azure snow sandy|Manufacturer#4|Brand#45|STANDARD ANODIZED TIN|4|MED CAN|1879.97| accounts. blithe 980|saddle burlywood drab deep red|Manufacturer#4|Brand#41|SMALL ANODIZED STEEL|45|LG CASE|1880.98|ronic deposi 981|floral blanched papaya khaki light|Manufacturer#2|Brand#25|MEDIUM POLISHED COPPER|31|WRAP CAN|1881.98| ironic as 982|drab steel linen puff rosy|Manufacturer#3|Brand#31|SMALL POLISHED BRASS|28|SM BAG|1882.98|boost blithely 983|violet mint moccasin black olive|Manufacturer#4|Brand#43|ECONOMY POLISHED TIN|39|MED BOX|1883.98| pack 984|tan frosted antique midnight thistle|Manufacturer#2|Brand#22|LARGE PLATED TIN|40|SM BAG|1884.98|ly. furiously pe 985|drab rosy orange peach medium|Manufacturer#5|Brand#53|MEDIUM BURNISHED STEEL|10|JUMBO PKG|1885.98|s beside 986|forest purple lawn yellow azure|Manufacturer#3|Brand#33|PROMO BRUSHED BRASS|50|JUMBO DRUM|1886.98|ronic foxes 987|seashell pink linen salmon khaki|Manufacturer#5|Brand#53|PROMO POLISHED BRASS|39|LG BOX|1887.98|furiou 988|rose saddle linen peach cream|Manufacturer#2|Brand#24|STANDARD BURNISHED COPPER|27|WRAP PKG|1888.98|special requests sleep 989|misty ivory plum tan steel|Manufacturer#5|Brand#55|LARGE PLATED TIN|9|JUMBO JAR|1889.98|lets. e 990|azure cornsilk indian floral aquamarine|Manufacturer#1|Brand#14|SMALL BURNISHED COPPER|14|SM BAG|1890.99|quick 991|indian yellow red lime seashell|Manufacturer#3|Brand#32|MEDIUM POLISHED BRASS|9|SM CASE|1891.99|ickly slowly spe 992|hot peru beige magenta metallic|Manufacturer#5|Brand#51|PROMO BURNISHED COPPER|39|JUMBO BOX|1892.99| haggle 993|chiffon papaya lavender tomato spring|Manufacturer#5|Brand#54|MEDIUM BRUSHED NICKEL|2|MED JAR|1893.99|bold pear 994|almond saddle papaya seashell burlywood|Manufacturer#2|Brand#25|PROMO ANODIZED STEEL|38|JUMBO BOX|1894.99|ost slyly. boldly bo 995|papaya violet navy khaki sky|Manufacturer#3|Brand#34|ECONOMY BRUSHED TIN|47|MED DRUM|1895.99|ke furiously fluffily 996|slate pink navajo orange firebrick|Manufacturer#3|Brand#31|STANDARD POLISHED COPPER|13|LG CAN|1896.99|ously 997|khaki lawn rose drab cornsilk|Manufacturer#5|Brand#51|STANDARD ANODIZED BRASS|22|JUMBO BAG|1897.99|riously final 998|ivory maroon cream red peru|Manufacturer#5|Brand#53|STANDARD BRUSHED NICKEL|2|MED BOX|1898.99|s. bold requests wake 999|indian honeydew chartreuse navy cyan|Manufacturer#4|Brand#44|STANDARD PLATED STEEL|16|WRAP CAN|1899.99|ans. slyly expre 1000|wheat frosted chiffon aquamarine saddle|Manufacturer#2|Brand#24|ECONOMY BRUSHED NICKEL|10|SM DRUM|901.00|g fluf citus-7.0.3/src/test/regress/data/part.more.data000066400000000000000000003467231317107136600215670ustar00rootroot000000000000006001|black lavender tomato brown violet|Manufacturer#3|Brand#32|STANDARD BURNISHED STEEL|41|LG BAG|907.00|eposits. 6002|cyan saddle plum navajo tan|Manufacturer#5|Brand#53|MEDIUM POLISHED BRASS|19|MED BOX|908.00|final pinto beans 6003|chocolate wheat cornflower spring linen|Manufacturer#4|Brand#42|LARGE PLATED BRASS|50|LG JAR|909.00|structions. fluffily 6004|white indian burlywood smoke medium|Manufacturer#1|Brand#13|PROMO BURNISHED TIN|19|JUMBO CAN|910.00|about the care 6005|powder green red peru lavender|Manufacturer#2|Brand#25|PROMO BURNISHED TIN|40|SM CAN|911.00|lar notorni 6006|maroon green blanched indian ghost|Manufacturer#5|Brand#51|SMALL BRUSHED COPPER|46|LG CASE|912.00|es. slyly re 6007|steel cornsilk puff navajo drab|Manufacturer#5|Brand#54|PROMO BURNISHED STEEL|37|SM BOX|913.00|t excus 6008|medium midnight orange papaya brown|Manufacturer#5|Brand#53|PROMO POLISHED STEEL|15|LG BAG|914.00|s unwind unusual 6009|navy floral puff misty salmon|Manufacturer#5|Brand#54|STANDARD ANODIZED STEEL|27|SM BAG|915.00|arefull 6010|saddle green salmon red olive|Manufacturer#3|Brand#32|STANDARD ANODIZED COPPER|42|LG JAR|916.01|bold, spec 6011|turquoise royal papaya azure grey|Manufacturer#1|Brand#15|PROMO BRUSHED NICKEL|50|SM BAG|917.01|ts maintai 6012|burlywood chocolate peru deep rose|Manufacturer#4|Brand#42|STANDARD BRUSHED COPPER|18|SM CASE|918.01|final deposits doubt 6013|gainsboro lime rose hot sky|Manufacturer#3|Brand#32|PROMO ANODIZED COPPER|42|JUMBO JAR|919.01|ate slyly silent, iro 6014|red frosted powder medium sienna|Manufacturer#5|Brand#55|STANDARD ANODIZED NICKEL|46|MED DRUM|920.01|egular packages 6015|chartreuse chocolate peru smoke lace|Manufacturer#1|Brand#11|MEDIUM BRUSHED NICKEL|50|LG DRUM|921.01|nts. evenly unusual 6016|red linen orange floral tomato|Manufacturer#1|Brand#14|ECONOMY POLISHED BRASS|41|MED BAG|922.01| pinto be 6017|firebrick light smoke mint midnight|Manufacturer#3|Brand#32|LARGE BRUSHED NICKEL|44|LG BOX|923.01|ly against the 6018|lavender plum gainsboro mint salmon|Manufacturer#3|Brand#33|ECONOMY POLISHED BRASS|46|WRAP BAG|924.01|ckly r 6019|plum rose midnight lawn papaya|Manufacturer#5|Brand#51|PROMO BRUSHED TIN|38|MED CASE|925.01|press dolp 6020|olive forest purple magenta smoke|Manufacturer#5|Brand#55|STANDARD POLISHED TIN|17|WRAP BOX|926.02|ar packages hagg 6021|lavender slate sienna blush tomato|Manufacturer#5|Brand#52|LARGE PLATED STEEL|17|LG PACK|927.02|ve furiou 6022|cream royal white lace rose|Manufacturer#1|Brand#14|PROMO POLISHED COPPER|18|SM DRUM|928.02|ickly 6023|blush chartreuse gainsboro beige maroon|Manufacturer#1|Brand#15|ECONOMY BURNISHED BRASS|43|SM PKG|929.02|ave packa 6024|almond blush snow salmon midnight|Manufacturer#5|Brand#55|LARGE BURNISHED COPPER|5|LG CAN|930.02|l accoun 6025|purple medium light aquamarine dark|Manufacturer#5|Brand#55|MEDIUM PLATED STEEL|34|SM BAG|931.02|ake after t 6026|coral blush honeydew rose dark|Manufacturer#5|Brand#53|STANDARD BRUSHED STEEL|10|JUMBO BAG|932.02|final 6027|snow cream salmon misty tan|Manufacturer#3|Brand#32|LARGE BURNISHED NICKEL|6|JUMBO JAR|933.02|al dep 6028|orchid almond honeydew grey lace|Manufacturer#1|Brand#12|SMALL PLATED NICKEL|38|SM BOX|934.02|ss the pe 6029|seashell moccasin bisque goldenrod violet|Manufacturer#1|Brand#13|MEDIUM ANODIZED TIN|20|MED CASE|935.02| the special pac 6030|linen yellow orchid drab bisque|Manufacturer#5|Brand#51|SMALL PLATED STEEL|28|JUMBO DRUM|936.03|ly across 6031|green drab coral metallic orchid|Manufacturer#1|Brand#14|LARGE POLISHED BRASS|36|MED CAN|937.03|e. regularly ironic 6032|papaya cream cornsilk khaki navajo|Manufacturer#3|Brand#33|ECONOMY BRUSHED TIN|29|SM PACK|938.03|dolites. evenly ir 6033|drab green smoke salmon navy|Manufacturer#3|Brand#32|STANDARD BRUSHED STEEL|31|JUMBO DRUM|939.03|he fur 6034|forest lawn steel smoke dodger|Manufacturer#4|Brand#42|ECONOMY ANODIZED BRASS|44|MED DRUM|940.03|usual packages. depe 6035|royal orange pink burnished salmon|Manufacturer#1|Brand#11|MEDIUM POLISHED BRASS|39|WRAP CAN|941.03|y thin 6036|chartreuse antique linen puff medium|Manufacturer#3|Brand#34|PROMO ANODIZED NICKEL|1|WRAP BAG|942.03|slyly afte 6037|chocolate rose black steel cyan|Manufacturer#1|Brand#13|ECONOMY PLATED STEEL|34|WRAP BAG|943.03|s are speci 6038|dim almond red burnished lace|Manufacturer#2|Brand#24|MEDIUM ANODIZED TIN|1|WRAP PKG|944.03| deposits. 6039|lavender grey dark drab purple|Manufacturer#4|Brand#45|STANDARD BURNISHED TIN|14|LG CAN|945.03|ress packages cajole q 6040|honeydew dark midnight khaki bisque|Manufacturer#4|Brand#42|MEDIUM PLATED NICKEL|29|LG CASE|946.04|l, even asympt 6041|seashell forest linen brown goldenrod|Manufacturer#2|Brand#21|STANDARD ANODIZED NICKEL|19|MED DRUM|947.04|ecial 6042|peach chocolate firebrick frosted papaya|Manufacturer#5|Brand#54|LARGE BURNISHED STEEL|48|WRAP PKG|948.04|y quiet theodol 6043|khaki misty deep almond medium|Manufacturer#4|Brand#41|SMALL ANODIZED TIN|38|SM CASE|949.04|ideas affix 6044|dodger turquoise midnight papaya seashell|Manufacturer#2|Brand#22|SMALL BURNISHED TIN|4|JUMBO CASE|950.04|ccounts. blith 6045|turquoise blanched chocolate blush almond|Manufacturer#4|Brand#42|SMALL BRUSHED NICKEL|37|WRAP DRUM|951.04| final theodolites 6046|dim green wheat light honeydew|Manufacturer#5|Brand#51|ECONOMY BURNISHED BRASS|14|JUMBO PKG|952.04|l packages 6047|orchid dodger magenta beige snow|Manufacturer#2|Brand#22|MEDIUM POLISHED NICKEL|8|JUMBO BOX|953.04|ut the warthog 6048|ivory saddle drab steel pink|Manufacturer#1|Brand#12|MEDIUM BURNISHED STEEL|34|SM PACK|954.04|oss the carefully exp 6049|khaki floral yellow lemon metallic|Manufacturer#5|Brand#55|MEDIUM POLISHED COPPER|17|LG PKG|955.04|uests use 6050|dark thistle orchid slate brown|Manufacturer#4|Brand#42|ECONOMY BRUSHED BRASS|7|JUMBO BAG|956.05|nding packages 6051|linen steel brown medium floral|Manufacturer#3|Brand#33|ECONOMY POLISHED STEEL|20|LG PKG|957.05|usual requests. regul 6052|almond purple burnished dim coral|Manufacturer#2|Brand#21|STANDARD PLATED BRASS|20|SM BOX|958.05| the quickly special 6053|puff chocolate snow sky pale|Manufacturer#2|Brand#24|PROMO BRUSHED TIN|10|MED CAN|959.05|tes. even, 6054|aquamarine antique firebrick saddle cornsilk|Manufacturer#2|Brand#24|MEDIUM ANODIZED COPPER|45|WRAP PACK|960.05|ecial 6055|gainsboro firebrick coral lace hot|Manufacturer#3|Brand#33|SMALL PLATED BRASS|25|MED BOX|961.05|ses are dolphins. qu 6056|green dark tomato blanched cornflower|Manufacturer#4|Brand#43|PROMO PLATED COPPER|42|WRAP CAN|962.05|s. furiously re 6057|cornflower lawn royal linen cyan|Manufacturer#4|Brand#44|STANDARD BURNISHED NICKEL|11|SM CASE|963.05|ly pend 6058|light tomato ivory white medium|Manufacturer#5|Brand#51|LARGE BRUSHED COPPER|21|WRAP CASE|964.05|o the slyly regula 6059|azure peach peru pale ghost|Manufacturer#3|Brand#33|STANDARD POLISHED NICKEL|15|SM BAG|965.05|sits affix around 6060|bisque firebrick antique sandy white|Manufacturer#3|Brand#35|ECONOMY POLISHED COPPER|6|JUMBO BOX|966.06|lyly re 6061|snow coral green midnight floral|Manufacturer#1|Brand#12|SMALL BRUSHED NICKEL|23|MED BOX|967.06|fully iron 6062|lace pink lavender frosted snow|Manufacturer#5|Brand#54|PROMO BURNISHED BRASS|11|JUMBO DRUM|968.06|al ideas use slyly 6063|pale plum aquamarine violet cornflower|Manufacturer#2|Brand#23|PROMO ANODIZED TIN|28|SM BAG|969.06|y unus 6064|dark navajo floral navy chocolate|Manufacturer#5|Brand#55|STANDARD PLATED NICKEL|2|SM BAG|970.06|es-- always regu 6065|royal sienna khaki frosted dim|Manufacturer#1|Brand#11|STANDARD BURNISHED BRASS|3|WRAP BAG|971.06|ronic Tiresias. e 6066|brown papaya seashell magenta burnished|Manufacturer#3|Brand#34|SMALL ANODIZED COPPER|24|JUMBO BOX|972.06| haggle caref 6067|spring sandy blue orange cyan|Manufacturer#4|Brand#43|MEDIUM PLATED BRASS|12|WRAP DRUM|973.06|ly carefully silent 6068|cyan sienna moccasin grey tan|Manufacturer#4|Brand#44|SMALL BURNISHED TIN|2|LG PKG|974.06|gedly regular platel 6069|gainsboro lime maroon cornflower olive|Manufacturer#2|Brand#22|STANDARD ANODIZED TIN|32|WRAP PKG|975.06|ding to t 6070|orange cornsilk khaki salmon snow|Manufacturer#3|Brand#33|MEDIUM POLISHED TIN|35|MED CAN|976.07|s boo 6071|burlywood pink lime turquoise orchid|Manufacturer#2|Brand#24|PROMO BRUSHED NICKEL|35|JUMBO PACK|977.07| maintain ag 6072|pink orange cornflower floral light|Manufacturer#4|Brand#44|SMALL BRUSHED NICKEL|46|MED BAG|978.07|sauternes 6073|cornsilk puff metallic cyan peru|Manufacturer#3|Brand#35|LARGE PLATED STEEL|18|WRAP BOX|979.07|nic instructions d 6074|puff cornflower bisque black khaki|Manufacturer#3|Brand#35|MEDIUM POLISHED STEEL|44|JUMBO CAN|980.07|equests after t 6075|wheat mint chartreuse papaya spring|Manufacturer#3|Brand#35|MEDIUM BRUSHED COPPER|17|WRAP BAG|981.07|unusual, regular 6076|thistle magenta green floral olive|Manufacturer#5|Brand#51|PROMO PLATED NICKEL|41|JUMBO BOX|982.07|ously iron 6077|peru ivory dim pink blush|Manufacturer#1|Brand#15|LARGE BRUSHED NICKEL|32|LG BAG|983.07|arefully. 6078|pale navy coral lace peach|Manufacturer#2|Brand#21|STANDARD BURNISHED NICKEL|23|LG PACK|984.07|y bold d 6079|olive hot magenta maroon thistle|Manufacturer#2|Brand#23|PROMO ANODIZED COPPER|13|MED BOX|985.07|as. express accounts 6080|coral red dim maroon green|Manufacturer#5|Brand#51|PROMO POLISHED NICKEL|44|WRAP CAN|986.08|al instructions wake 6081|blush puff midnight wheat navajo|Manufacturer#5|Brand#53|SMALL PLATED TIN|27|WRAP DRUM|987.08|sts. fluffily fi 6082|bisque blue burnished turquoise steel|Manufacturer#1|Brand#15|LARGE BURNISHED BRASS|34|MED PKG|988.08|lly even courts wake 6083|cyan slate dim purple sky|Manufacturer#2|Brand#25|LARGE ANODIZED COPPER|25|WRAP BAG|989.08|nal requests along 6084|gainsboro floral goldenrod rose papaya|Manufacturer#4|Brand#42|SMALL BURNISHED STEEL|42|SM PKG|990.08| dolphi 6085|blush burnished lime purple mint|Manufacturer#2|Brand#24|ECONOMY POLISHED NICKEL|29|WRAP PKG|991.08|foxes 6086|misty hot tomato gainsboro azure|Manufacturer#1|Brand#11|SMALL BURNISHED COPPER|30|JUMBO CAN|992.08|ly dogged accounts 6087|indian cornsilk khaki burlywood pale|Manufacturer#1|Brand#12|MEDIUM POLISHED BRASS|7|SM BAG|993.08|ly. ex 6088|slate wheat orange orchid chartreuse|Manufacturer#4|Brand#45|SMALL BURNISHED BRASS|8|WRAP PACK|994.08|ffix 6089|light royal green yellow dark|Manufacturer#5|Brand#52|PROMO BURNISHED STEEL|31|JUMBO BAG|995.08|ing excu 6090|turquoise thistle cornsilk forest linen|Manufacturer#4|Brand#42|MEDIUM ANODIZED NICKEL|26|LG BAG|996.09| accounts 6091|wheat violet saddle royal metallic|Manufacturer#5|Brand#51|ECONOMY PLATED COPPER|41|LG PACK|997.09|es. carefully pendin 6092|forest goldenrod deep maroon misty|Manufacturer#3|Brand#34|PROMO ANODIZED STEEL|22|JUMBO JAR|998.09|ns run f 6093|bisque antique cream gainsboro navajo|Manufacturer#4|Brand#43|LARGE ANODIZED BRASS|40|JUMBO CASE|999.09|g depende 6094|bisque frosted drab sky snow|Manufacturer#5|Brand#55|ECONOMY BRUSHED TIN|28|JUMBO BAG|1000.09|ess, pending requests 6095|green orchid dark misty moccasin|Manufacturer#1|Brand#14|ECONOMY BRUSHED STEEL|30|JUMBO JAR|1001.09|. furiously 6096|khaki gainsboro black firebrick royal|Manufacturer#3|Brand#31|SMALL BURNISHED BRASS|30|MED CAN|1002.09|nd the blithely 6097|chocolate violet gainsboro smoke puff|Manufacturer#3|Brand#31|PROMO ANODIZED NICKEL|17|SM CASE|1003.09|cajole blithely 6098|ghost chocolate steel moccasin goldenrod|Manufacturer#1|Brand#13|MEDIUM BRUSHED NICKEL|26|MED PKG|1004.09|. daringly 6099|royal green plum honeydew medium|Manufacturer#2|Brand#25|LARGE BURNISHED STEEL|11|LG PACK|1005.09|ng packages hag 6100|papaya yellow chocolate sky indian|Manufacturer#5|Brand#55|MEDIUM PLATED NICKEL|20|LG BOX|1006.10|ns integrate. regu 6101|goldenrod rosy metallic plum seashell|Manufacturer#2|Brand#25|PROMO ANODIZED COPPER|34|MED BAG|1007.10|. furiously 6102|rosy yellow violet sienna snow|Manufacturer#5|Brand#51|ECONOMY ANODIZED BRASS|50|SM BOX|1008.10|ts are. c 6103|olive mint deep dodger bisque|Manufacturer#4|Brand#44|STANDARD ANODIZED BRASS|8|WRAP PKG|1009.10|leep blith 6104|aquamarine bisque cornflower cyan pale|Manufacturer#5|Brand#52|SMALL BRUSHED STEEL|37|WRAP JAR|1010.10|gedly a 6105|goldenrod cornflower light sandy rosy|Manufacturer#4|Brand#42|MEDIUM BRUSHED TIN|14|MED BAG|1011.10|ns. regular ideas 6106|lace pale dark blush frosted|Manufacturer#3|Brand#35|LARGE ANODIZED COPPER|26|SM BOX|1012.10|ccounts doze careful 6107|dim chartreuse brown salmon antique|Manufacturer#4|Brand#42|ECONOMY ANODIZED TIN|40|MED BOX|1013.10|are carefully among th 6108|cream medium rosy navy brown|Manufacturer#5|Brand#54|SMALL PLATED COPPER|42|SM CAN|1014.10| ironic requests inte 6109|saddle lawn goldenrod yellow dark|Manufacturer#1|Brand#13|STANDARD PLATED COPPER|19|SM PKG|1015.10|y among th 6110|chiffon snow ivory blanched cream|Manufacturer#1|Brand#12|SMALL BURNISHED NICKEL|42|LG JAR|1016.11|ual requests-- fluffi 6111|deep yellow dark lawn antique|Manufacturer#5|Brand#52|PROMO BRUSHED TIN|18|SM JAR|1017.11|tain above the 6112|metallic dark indian brown chartreuse|Manufacturer#4|Brand#44|ECONOMY ANODIZED NICKEL|30|MED PACK|1018.11|equests amo 6113|peach purple puff beige spring|Manufacturer#4|Brand#45|PROMO PLATED COPPER|10|JUMBO PKG|1019.11|he slyly ironic 6114|pale powder chartreuse lime moccasin|Manufacturer#4|Brand#41|STANDARD BRUSHED BRASS|41|LG CASE|1020.11|thely s 6115|chiffon rose plum puff peach|Manufacturer#1|Brand#11|ECONOMY BURNISHED STEEL|33|SM JAR|1021.11|n, unus 6116|burnished khaki wheat sienna violet|Manufacturer#5|Brand#54|PROMO BRUSHED TIN|43|LG BOX|1022.11|ing acc 6117|brown dim lace honeydew burlywood|Manufacturer#5|Brand#51|STANDARD PLATED COPPER|13|SM JAR|1023.11|sual instructions. 6118|rose violet linen burnished frosted|Manufacturer#3|Brand#32|ECONOMY ANODIZED STEEL|37|LG BOX|1024.11|ly acr 6119|royal almond magenta puff navy|Manufacturer#3|Brand#32|MEDIUM BURNISHED BRASS|36|LG BAG|1025.11|atelets. quickly regul 6120|peach white puff dim yellow|Manufacturer#2|Brand#23|ECONOMY ANODIZED COPPER|28|MED BAG|1026.12|bold instructions. 6121|sky papaya bisque gainsboro aquamarine|Manufacturer#1|Brand#13|MEDIUM BRUSHED TIN|29|LG DRUM|1027.12|ly pending pinto b 6122|tomato drab misty grey medium|Manufacturer#5|Brand#55|ECONOMY PLATED NICKEL|41|JUMBO BAG|1028.12|sublate furi 6123|cornsilk sandy khaki indian lawn|Manufacturer#3|Brand#33|PROMO POLISHED STEEL|23|JUMBO CAN|1029.12|bove 6124|red papaya lime cornsilk maroon|Manufacturer#1|Brand#13|SMALL ANODIZED TIN|15|MED DRUM|1030.12|efully fi 6125|ghost cornflower forest indian antique|Manufacturer#4|Brand#44|ECONOMY BURNISHED BRASS|34|MED BAG|1031.12|ve the express pint 6126|yellow smoke cyan khaki steel|Manufacturer#5|Brand#55|MEDIUM PLATED NICKEL|20|WRAP BOX|1032.12|of the blithel 6127|dim lavender thistle magenta sienna|Manufacturer#3|Brand#35|STANDARD BURNISHED TIN|15|WRAP PKG|1033.12| bold deposits ca 6128|sandy tomato light cream midnight|Manufacturer#2|Brand#24|LARGE POLISHED COPPER|8|SM JAR|1034.12|ans integrate quickl 6129|grey cyan sky seashell light|Manufacturer#4|Brand#43|LARGE POLISHED TIN|44|MED JAR|1035.12| should boos 6130|coral azure chartreuse tomato turquoise|Manufacturer#3|Brand#31|MEDIUM BURNISHED STEEL|9|WRAP PACK|1036.13|ggle furiousl 6131|coral brown forest puff lemon|Manufacturer#1|Brand#11|ECONOMY POLISHED NICKEL|1|WRAP BAG|1037.13|es must nag across the 6132|spring lime beige chiffon ghost|Manufacturer#1|Brand#13|MEDIUM PLATED COPPER|41|SM DRUM|1038.13|eas among the slyl 6133|turquoise goldenrod coral hot chartreuse|Manufacturer#3|Brand#32|STANDARD ANODIZED NICKEL|32|LG BOX|1039.13|ully special 6134|brown steel magenta blush sky|Manufacturer#4|Brand#42|LARGE BURNISHED NICKEL|18|MED CAN|1040.13|uests dazzle 6135|cream yellow puff lavender almond|Manufacturer#4|Brand#44|LARGE BURNISHED BRASS|48|JUMBO JAR|1041.13|riously regular 6136|slate peach moccasin mint forest|Manufacturer#4|Brand#41|ECONOMY POLISHED COPPER|16|LG BAG|1042.13|ar pinto 6137|snow misty honeydew thistle ivory|Manufacturer#1|Brand#11|ECONOMY PLATED COPPER|19|JUMBO DRUM|1043.13|riously 6138|forest black deep mint navy|Manufacturer#1|Brand#11|MEDIUM BRUSHED COPPER|8|SM JAR|1044.13|ges play after t 6139|rose lavender snow forest cream|Manufacturer#4|Brand#45|ECONOMY BRUSHED STEEL|4|JUMBO CASE|1045.13|es. asympt 6140|almond medium midnight cornflower beige|Manufacturer#2|Brand#24|SMALL PLATED NICKEL|6|MED CASE|1046.14|equests 6141|purple honeydew midnight lavender red|Manufacturer#3|Brand#35|STANDARD POLISHED TIN|32|SM BOX|1047.14|ole carefully speci 6142|drab deep ivory ghost lace|Manufacturer#2|Brand#23|SMALL BURNISHED NICKEL|30|MED JAR|1048.14|endencies sleep furio 6143|lawn peru medium maroon blue|Manufacturer#1|Brand#12|ECONOMY ANODIZED BRASS|8|LG BAG|1049.14|equests. fluffily 6144|burlywood grey sky sienna gainsboro|Manufacturer#1|Brand#12|STANDARD BURNISHED STEEL|33|WRAP DRUM|1050.14|erns are. sl 6145|dodger peru metallic ghost cornsilk|Manufacturer#1|Brand#15|STANDARD PLATED BRASS|30|WRAP CASE|1051.14|etly ironic accounts. 6146|pink powder white firebrick smoke|Manufacturer#3|Brand#34|SMALL BURNISHED NICKEL|37|JUMBO CASE|1052.14|rhorses wake. caref 6147|yellow bisque cream light medium|Manufacturer#5|Brand#54|ECONOMY POLISHED BRASS|34|MED BOX|1053.14|ously r 6148|sienna spring turquoise almond orange|Manufacturer#3|Brand#34|PROMO BRUSHED STEEL|50|LG PACK|1054.14|y silent 6149|deep papaya blanched honeydew snow|Manufacturer#3|Brand#35|ECONOMY BRUSHED TIN|50|MED CAN|1055.14|coys. 6150|orange powder tomato lemon brown|Manufacturer#1|Brand#11|STANDARD ANODIZED TIN|26|SM PKG|1056.15| blithe do 6151|floral ivory cyan sienna pale|Manufacturer#4|Brand#42|LARGE BRUSHED NICKEL|7|JUMBO CASE|1057.15|hins should cajole. 6152|burnished chiffon cornsilk linen orange|Manufacturer#3|Brand#32|PROMO BURNISHED TIN|42|WRAP PKG|1058.15|ly. unusua 6153|almond orange indian ghost chocolate|Manufacturer#2|Brand#23|MEDIUM BURNISHED COPPER|25|MED BAG|1059.15|instr 6154|lavender metallic black chiffon orchid|Manufacturer#5|Brand#54|PROMO PLATED BRASS|1|LG DRUM|1060.15|ss, express accoun 6155|cyan orange yellow papaya moccasin|Manufacturer#5|Brand#52|PROMO ANODIZED COPPER|2|JUMBO CAN|1061.15|es affi 6156|red maroon lavender burnished brown|Manufacturer#3|Brand#34|PROMO BRUSHED STEEL|36|SM PKG|1062.15|nal instructions 6157|blanched ghost deep sky red|Manufacturer#2|Brand#21|MEDIUM ANODIZED NICKEL|38|JUMBO CASE|1063.15|s wake 6158|navajo peach pink orchid midnight|Manufacturer#3|Brand#34|STANDARD POLISHED STEEL|13|WRAP PACK|1064.15|lly b 6159|orchid salmon tomato honeydew indian|Manufacturer#3|Brand#34|LARGE POLISHED STEEL|15|JUMBO DRUM|1065.15|cording to the final, 6160|navy navajo gainsboro chocolate pale|Manufacturer#2|Brand#22|SMALL POLISHED TIN|40|LG BAG|1066.16|ithely ironic 6161|smoke medium green gainsboro violet|Manufacturer#2|Brand#22|STANDARD PLATED STEEL|12|MED JAR|1067.16| fluffily. slyly 6162|seashell dim snow saddle grey|Manufacturer#3|Brand#32|STANDARD ANODIZED COPPER|16|JUMBO BAG|1068.16|hely slyly regu 6163|linen ghost dodger drab burlywood|Manufacturer#1|Brand#11|MEDIUM BURNISHED TIN|1|WRAP BOX|1069.16|t, final foxes cajo 6164|white drab saddle snow purple|Manufacturer#1|Brand#14|PROMO BRUSHED TIN|8|JUMBO BAG|1070.16|sly slow pearls wake 6165|frosted moccasin red orchid khaki|Manufacturer#1|Brand#12|SMALL ANODIZED BRASS|44|JUMBO JAR|1071.16|carefully b 6166|indian orange orchid turquoise blush|Manufacturer#4|Brand#45|MEDIUM PLATED NICKEL|25|WRAP PACK|1072.16|ven pinto b 6167|lawn seashell cornflower coral indian|Manufacturer#5|Brand#54|SMALL PLATED COPPER|16|LG BOX|1073.16|packages mold slyly r 6168|midnight plum blue spring floral|Manufacturer#1|Brand#14|STANDARD BURNISHED BRASS|12|JUMBO BOX|1074.16|ideas affix 6169|dim violet sienna bisque saddle|Manufacturer#4|Brand#43|STANDARD PLATED BRASS|26|LG PKG|1075.16|even accounts boost. f 6170|honeydew yellow deep blanched smoke|Manufacturer#3|Brand#31|LARGE BRUSHED BRASS|5|LG DRUM|1076.17| fluffily ab 6171|peru light sandy sky antique|Manufacturer#4|Brand#42|ECONOMY ANODIZED STEEL|48|JUMBO JAR|1077.17| along the c 6172|khaki rose dodger metallic orange|Manufacturer#5|Brand#54|PROMO BRUSHED COPPER|34|JUMBO CAN|1078.17|ests integrate 6173|rosy mint cream pale lavender|Manufacturer#4|Brand#43|SMALL PLATED STEEL|10|WRAP BOX|1079.17|ang slyly of 6174|dark seashell spring lavender sienna|Manufacturer#2|Brand#25|SMALL BRUSHED STEEL|8|JUMBO DRUM|1080.17|onic, ironic requests 6175|plum olive papaya purple orange|Manufacturer#1|Brand#15|STANDARD POLISHED TIN|43|SM JAR|1081.17|y against the slyly p 6176|tomato cyan chartreuse sky navy|Manufacturer#5|Brand#55|SMALL BRUSHED STEEL|47|SM CAN|1082.17|ully care 6177|ghost peach turquoise goldenrod spring|Manufacturer#5|Brand#51|ECONOMY BURNISHED COPPER|45|SM CASE|1083.17|ids integrate sil 6178|blush aquamarine black moccasin slate|Manufacturer#3|Brand#33|STANDARD POLISHED COPPER|46|JUMBO PKG|1084.17|ial instructions 6179|beige dark peru moccasin brown|Manufacturer#3|Brand#33|LARGE ANODIZED STEEL|3|LG PKG|1085.17|nts of the unu 6180|mint gainsboro powder maroon royal|Manufacturer#2|Brand#21|MEDIUM POLISHED BRASS|14|LG BOX|1086.18|e carefully unusual th 6181|brown mint drab lemon orange|Manufacturer#2|Brand#23|ECONOMY POLISHED COPPER|8|WRAP BAG|1087.18|ans cajole. 6182|burlywood light dodger honeydew cyan|Manufacturer#3|Brand#34|MEDIUM BRUSHED NICKEL|5|LG BOX|1088.18|ly inside the pla 6183|sienna sandy aquamarine cornsilk deep|Manufacturer#5|Brand#52|SMALL BRUSHED STEEL|39|WRAP BAG|1089.18|ly ironic accounts int 6184|dark orchid blush goldenrod khaki|Manufacturer#5|Brand#55|LARGE PLATED BRASS|15|MED JAR|1090.18|fully final 6185|light snow tan cornsilk misty|Manufacturer#4|Brand#41|STANDARD POLISHED COPPER|39|LG BOX|1091.18|ts bo 6186|antique burnished moccasin cornflower light|Manufacturer#1|Brand#13|LARGE BRUSHED NICKEL|29|LG JAR|1092.18| careful 6187|black burnished cornflower light saddle|Manufacturer#1|Brand#11|LARGE PLATED STEEL|5|JUMBO BAG|1093.18|sits. 6188|indian steel magenta misty olive|Manufacturer#2|Brand#23|SMALL BRUSHED STEEL|13|MED CASE|1094.18| beans boost. r 6189|chocolate midnight deep papaya salmon|Manufacturer#5|Brand#55|SMALL BURNISHED COPPER|29|JUMBO CASE|1095.18|bold foxes wake 6190|chiffon slate navy orchid almond|Manufacturer#4|Brand#43|PROMO BURNISHED COPPER|4|WRAP BOX|1096.19|e fluffily a 6191|papaya cyan purple aquamarine ghost|Manufacturer#4|Brand#43|STANDARD BURNISHED STEEL|4|SM BAG|1097.19|ts. idl 6192|frosted medium mint light drab|Manufacturer#5|Brand#52|PROMO POLISHED BRASS|1|MED PACK|1098.19|s. express pl 6193|aquamarine thistle green almond spring|Manufacturer#4|Brand#45|MEDIUM POLISHED COPPER|38|MED BOX|1099.19|gside of the ruthless, 6194|gainsboro violet pink blanched royal|Manufacturer#1|Brand#11|LARGE BURNISHED COPPER|24|WRAP CAN|1100.19| foxes cajole. s 6195|violet tan salmon moccasin linen|Manufacturer#4|Brand#45|LARGE ANODIZED NICKEL|10|SM JAR|1101.19|ular theodolites. r 6196|cream maroon metallic orange peru|Manufacturer#4|Brand#43|ECONOMY POLISHED TIN|48|MED JAR|1102.19|d, unu 6197|violet saddle cyan orange ivory|Manufacturer#4|Brand#44|SMALL POLISHED STEEL|27|MED JAR|1103.19|gular pinto beans nag 6198|saddle cyan mint linen turquoise|Manufacturer#3|Brand#32|LARGE POLISHED NICKEL|33|JUMBO CASE|1104.19|accounts across t 6199|chartreuse snow saddle ghost medium|Manufacturer#1|Brand#12|LARGE BURNISHED STEEL|18|MED JAR|1105.19|r courts. sl 6200|sky indian lavender coral midnight|Manufacturer#4|Brand#41|ECONOMY BURNISHED COPPER|25|JUMBO BAG|1106.20|ccounts are deposits. 6201|orchid sky frosted yellow lemon|Manufacturer#2|Brand#25|LARGE ANODIZED STEEL|46|SM CAN|1107.20|ole carefully sile 6202|cornsilk metallic sky puff papaya|Manufacturer#5|Brand#53|PROMO BRUSHED BRASS|45|WRAP PACK|1108.20|courts 6203|navy plum linen gainsboro midnight|Manufacturer#5|Brand#54|MEDIUM BRUSHED COPPER|50|WRAP DRUM|1109.20| tithes. furiously ev 6204|hot gainsboro beige rosy chiffon|Manufacturer#5|Brand#51|SMALL PLATED COPPER|40|LG CASE|1110.20|ecial depos 6205|antique gainsboro floral puff blush|Manufacturer#1|Brand#13|LARGE ANODIZED BRASS|16|SM PKG|1111.20|ar requests sleep abov 6206|salmon sandy floral khaki mint|Manufacturer#3|Brand#31|SMALL ANODIZED BRASS|35|LG JAR|1112.20|refully final 6207|antique goldenrod rosy metallic cornsilk|Manufacturer#4|Brand#42|STANDARD POLISHED NICKEL|45|MED PKG|1113.20|nal packa 6208|bisque light saddle black sandy|Manufacturer#1|Brand#11|PROMO BRUSHED TIN|17|SM CAN|1114.20|y unusual deposits a 6209|maroon sienna peach tomato deep|Manufacturer#2|Brand#25|MEDIUM PLATED BRASS|47|JUMBO DRUM|1115.20|ial theodoli 6210|lime lavender grey chiffon burlywood|Manufacturer#5|Brand#51|MEDIUM ANODIZED TIN|9|MED BAG|1116.21|ons lose 6211|orange cyan light beige chiffon|Manufacturer#4|Brand#42|STANDARD POLISHED NICKEL|2|SM JAR|1117.21|ss asymptot 6212|lace medium burnished peach tan|Manufacturer#2|Brand#25|LARGE POLISHED BRASS|9|SM BAG|1118.21|lithely sly 6213|steel snow slate papaya dark|Manufacturer#3|Brand#35|MEDIUM ANODIZED BRASS|15|LG BAG|1119.21|across the furiously 6214|floral spring light firebrick blush|Manufacturer#5|Brand#55|MEDIUM PLATED NICKEL|42|MED CASE|1120.21|ideas. accou 6215|midnight cornflower cornsilk brown antique|Manufacturer#3|Brand#35|STANDARD POLISHED TIN|28|LG JAR|1121.21|e fluffi 6216|forest beige red blush deep|Manufacturer#5|Brand#51|MEDIUM BRUSHED BRASS|47|MED JAR|1122.21|ly about the furiou 6217|aquamarine peru chartreuse green pink|Manufacturer#4|Brand#43|PROMO POLISHED STEEL|36|LG JAR|1123.21|egular pinto beans caj 6218|powder coral olive maroon lace|Manufacturer#3|Brand#33|STANDARD ANODIZED NICKEL|8|WRAP BOX|1124.21|ounts must 6219|honeydew wheat chartreuse drab sky|Manufacturer#4|Brand#44|ECONOMY ANODIZED NICKEL|37|LG PKG|1125.21|nic de 6220|purple light misty frosted puff|Manufacturer#5|Brand#54|LARGE PLATED NICKEL|18|SM BAG|1126.22|ly iron 6221|chartreuse rosy peach tan goldenrod|Manufacturer#4|Brand#41|STANDARD ANODIZED NICKEL|42|MED PKG|1127.22|lly final idea 6222|yellow lemon blush beige light|Manufacturer#2|Brand#25|SMALL POLISHED TIN|16|LG JAR|1128.22| quickly expre 6223|bisque sienna drab yellow almond|Manufacturer#3|Brand#32|PROMO POLISHED COPPER|40|SM BAG|1129.22|lly ironic th 6224|sky dim orchid snow pink|Manufacturer#5|Brand#53|LARGE BRUSHED NICKEL|23|LG PKG|1130.22|realms nag carefu 6225|red cyan green dodger burnished|Manufacturer#5|Brand#54|LARGE BRUSHED STEEL|9|JUMBO BAG|1131.22|usly unusual 6226|drab black tomato pink almond|Manufacturer#3|Brand#34|LARGE ANODIZED TIN|18|JUMBO CAN|1132.22|ls. foxes i 6227|sienna firebrick misty linen sandy|Manufacturer#4|Brand#42|ECONOMY POLISHED TIN|40|LG CASE|1133.22|lly regular theodolit 6228|frosted burlywood dodger red aquamarine|Manufacturer#1|Brand#13|PROMO PLATED STEEL|16|LG BOX|1134.22|d excuses c 6229|bisque white khaki honeydew azure|Manufacturer#4|Brand#43|PROMO BRUSHED BRASS|29|WRAP PKG|1135.22|lar fo 6230|powder navajo linen salmon turquoise|Manufacturer#2|Brand#22|SMALL POLISHED COPPER|35|SM PACK|1136.23|ickly express i 6231|green dodger almond gainsboro indian|Manufacturer#2|Brand#21|PROMO ANODIZED STEEL|20|MED PACK|1137.23| even accounts m 6232|blanched linen cream almond orange|Manufacturer#1|Brand#11|MEDIUM ANODIZED BRASS|30|LG JAR|1138.23|unusual, regular acco 6233|blush floral bisque dodger drab|Manufacturer#3|Brand#33|ECONOMY BURNISHED BRASS|49|JUMBO BOX|1139.23| the final foxes s 6234|green steel grey olive thistle|Manufacturer#4|Brand#45|STANDARD BURNISHED BRASS|50|LG PACK|1140.23|phins wake caref 6235|white tomato goldenrod wheat grey|Manufacturer#2|Brand#25|SMALL PLATED TIN|50|WRAP PACK|1141.23|packages boost 6236|misty lace metallic ivory pink|Manufacturer#4|Brand#45|ECONOMY BRUSHED NICKEL|41|MED PKG|1142.23|onic grouches detect 6237|firebrick white papaya sandy magenta|Manufacturer#1|Brand#14|ECONOMY PLATED TIN|15|SM PKG|1143.23|y after the 6238|powder lace deep lime steel|Manufacturer#1|Brand#15|LARGE ANODIZED TIN|30|MED PACK|1144.23|under the final cour 6239|chocolate spring wheat lawn tan|Manufacturer#5|Brand#51|LARGE BURNISHED STEEL|8|SM JAR|1145.23|y regular multipliers 6240|chartreuse grey thistle azure moccasin|Manufacturer#2|Brand#22|MEDIUM ANODIZED STEEL|34|JUMBO CAN|1146.24|y eve 6241|misty cornflower plum slate medium|Manufacturer#1|Brand#14|SMALL ANODIZED TIN|23|WRAP JAR|1147.24|ptotes across the eve 6242|linen powder brown lace red|Manufacturer#2|Brand#22|PROMO POLISHED STEEL|14|MED BAG|1148.24|ly regular the 6243|royal blush forest papaya navajo|Manufacturer#5|Brand#53|SMALL POLISHED TIN|1|SM CASE|1149.24|he express, reg 6244|azure plum magenta rosy sandy|Manufacturer#5|Brand#55|SMALL ANODIZED TIN|35|LG CAN|1150.24|lly express packages 6245|almond burnished beige peru brown|Manufacturer#1|Brand#12|STANDARD PLATED BRASS|38|WRAP PKG|1151.24|al platele 6246|aquamarine azure smoke lime rose|Manufacturer#4|Brand#44|PROMO PLATED BRASS|47|MED BOX|1152.24|y iro 6247|rose yellow chiffon lemon thistle|Manufacturer#3|Brand#33|LARGE PLATED BRASS|18|MED BOX|1153.24|ong the unusual acco 6248|orange goldenrod chocolate papaya smoke|Manufacturer#3|Brand#35|SMALL POLISHED NICKEL|29|MED PKG|1154.24|efully stealthy req 6249|metallic chiffon ghost goldenrod salmon|Manufacturer#4|Brand#42|MEDIUM BRUSHED BRASS|25|JUMBO BOX|1155.24|e unusua 6250|navajo lace slate steel purple|Manufacturer#3|Brand#34|PROMO BURNISHED NICKEL|2|MED DRUM|1156.25| requests. blithel 6251|lemon gainsboro khaki lavender ivory|Manufacturer#3|Brand#34|PROMO POLISHED BRASS|40|SM BOX|1157.25|egular, unusual re 6252|salmon peach chocolate sky blush|Manufacturer#2|Brand#23|MEDIUM ANODIZED STEEL|45|SM CAN|1158.25|ong the deposits. 6253|seashell navajo snow sky grey|Manufacturer#2|Brand#21|STANDARD BRUSHED STEEL|18|LG CASE|1159.25|uffily ironic 6254|dark tan beige black moccasin|Manufacturer#3|Brand#32|SMALL BRUSHED BRASS|17|WRAP DRUM|1160.25|ular acc 6255|misty olive saddle peach navajo|Manufacturer#5|Brand#55|LARGE BURNISHED BRASS|38|LG PKG|1161.25|ymptotes about the fr 6256|steel lace lavender hot tomato|Manufacturer#3|Brand#33|ECONOMY ANODIZED STEEL|21|LG PACK|1162.25|sly pending gifts ha 6257|tan smoke azure rosy firebrick|Manufacturer#2|Brand#25|ECONOMY ANODIZED NICKEL|22|SM BOX|1163.25|wake. ironic pearls i 6258|red plum tan snow navajo|Manufacturer#5|Brand#55|ECONOMY PLATED STEEL|48|JUMBO BAG|1164.25|eposits. ironic, 6259|chartreuse cream saddle hot spring|Manufacturer#3|Brand#35|STANDARD ANODIZED TIN|33|WRAP PKG|1165.25|ly ironic ideas will 6260|dark cornsilk thistle cornflower saddle|Manufacturer#1|Brand#11|LARGE PLATED STEEL|23|SM PACK|1166.26|ss requests sleep fl 6261|khaki pale blush turquoise mint|Manufacturer#2|Brand#25|ECONOMY BURNISHED COPPER|11|MED PACK|1167.26|ns wake regu 6262|goldenrod aquamarine forest beige purple|Manufacturer#1|Brand#11|MEDIUM POLISHED STEEL|28|WRAP PKG|1168.26|le. pinto beans d 6263|saddle cornsilk aquamarine coral magenta|Manufacturer#5|Brand#52|STANDARD BRUSHED COPPER|14|MED PACK|1169.26|s haggle final pinto 6264|khaki ghost azure antique red|Manufacturer#2|Brand#21|ECONOMY POLISHED TIN|1|WRAP PKG|1170.26|heodolite 6265|lawn lemon indian thistle firebrick|Manufacturer#1|Brand#12|PROMO PLATED COPPER|27|SM PACK|1171.26|ons are careful 6266|powder lace indian rose brown|Manufacturer#1|Brand#13|ECONOMY BURNISHED BRASS|38|SM CAN|1172.26|iously. final instr 6267|azure sky peach slate frosted|Manufacturer#3|Brand#35|ECONOMY POLISHED STEEL|13|WRAP DRUM|1173.26|. carefully b 6268|spring puff coral sandy drab|Manufacturer#1|Brand#11|MEDIUM ANODIZED TIN|17|LG CASE|1174.26|es. fi 6269|navajo saddle thistle papaya black|Manufacturer#3|Brand#31|LARGE ANODIZED NICKEL|41|LG PKG|1175.26|g the 6270|wheat coral chiffon puff blanched|Manufacturer#3|Brand#34|STANDARD BURNISHED STEEL|28|LG BOX|1176.27|l deposits cajole ca 6271|azure sandy blue dodger honeydew|Manufacturer#5|Brand#52|LARGE BURNISHED NICKEL|42|JUMBO JAR|1177.27|onic platelets above 6272|blue floral forest steel salmon|Manufacturer#3|Brand#32|SMALL BRUSHED COPPER|20|JUMBO DRUM|1178.27|among the carefully re 6273|ghost saddle black seashell moccasin|Manufacturer#5|Brand#52|ECONOMY PLATED TIN|26|WRAP BAG|1179.27|inal 6274|lace azure blush wheat frosted|Manufacturer#2|Brand#24|MEDIUM BURNISHED STEEL|18|WRAP DRUM|1180.27|o the even grouches 6275|gainsboro ghost tan rosy ivory|Manufacturer#1|Brand#13|MEDIUM PLATED STEEL|10|SM DRUM|1181.27|foxes. f 6276|khaki slate red frosted snow|Manufacturer#5|Brand#53|SMALL ANODIZED TIN|47|WRAP JAR|1182.27|sts dazzle blit 6277|black forest orange pink medium|Manufacturer#1|Brand#11|SMALL POLISHED TIN|18|LG BAG|1183.27|ily fi 6278|saddle navy orchid forest olive|Manufacturer#5|Brand#54|STANDARD PLATED BRASS|23|WRAP PKG|1184.27|nts. sl 6279|steel ivory blush chiffon cornsilk|Manufacturer#4|Brand#41|SMALL BURNISHED STEEL|15|JUMBO PACK|1185.27|elets 6280|azure antique thistle dark pink|Manufacturer#1|Brand#11|LARGE ANODIZED COPPER|1|MED CASE|1186.28|he regular, regu 6281|tomato honeydew lawn black orange|Manufacturer#3|Brand#33|LARGE BRUSHED BRASS|4|JUMBO PKG|1187.28|deposits haggle 6282|navajo linen dodger honeydew misty|Manufacturer#5|Brand#51|MEDIUM BRUSHED STEEL|3|LG PKG|1188.28|y. even req 6283|puff dim sandy chocolate snow|Manufacturer#5|Brand#52|PROMO ANODIZED NICKEL|50|WRAP CASE|1189.28|ecial orbits bo 6284|maroon spring dodger black blush|Manufacturer#3|Brand#33|MEDIUM PLATED BRASS|38|LG CAN|1190.28|deposits 6285|steel plum mint purple green|Manufacturer#2|Brand#23|STANDARD PLATED STEEL|1|MED PKG|1191.28|nder 6286|wheat pale thistle chartreuse brown|Manufacturer#3|Brand#32|PROMO BRUSHED BRASS|21|JUMBO BOX|1192.28|s. sl 6287|frosted peru bisque powder hot|Manufacturer#5|Brand#55|SMALL PLATED COPPER|49|WRAP DRUM|1193.28|anent deposits haggle 6288|yellow lawn pink peach mint|Manufacturer#2|Brand#21|PROMO BRUSHED TIN|27|MED DRUM|1194.28|the slyly pending the 6289|beige aquamarine dodger chiffon black|Manufacturer#4|Brand#45|MEDIUM BURNISHED BRASS|38|SM BAG|1195.28|s. final, even requ 6290|orange black tomato powder purple|Manufacturer#2|Brand#24|LARGE ANODIZED STEEL|44|WRAP CASE|1196.29|ts boost car 6291|burnished powder chiffon orange wheat|Manufacturer#3|Brand#34|MEDIUM BURNISHED NICKEL|39|WRAP CAN|1197.29|eaves sle 6292|bisque brown moccasin snow maroon|Manufacturer#4|Brand#44|PROMO PLATED BRASS|28|MED DRUM|1198.29|ugouts. final accounts 6293|floral azure brown blue tomato|Manufacturer#5|Brand#54|PROMO BRUSHED COPPER|28|MED CASE|1199.29|dependencies. even 6294|metallic magenta aquamarine midnight linen|Manufacturer#4|Brand#44|MEDIUM POLISHED TIN|44|SM BOX|1200.29|excuses above the 6295|blanched cyan turquoise saddle white|Manufacturer#4|Brand#43|LARGE BRUSHED BRASS|40|SM BAG|1201.29|kly fluffily unusual 6296|coral metallic yellow plum goldenrod|Manufacturer#1|Brand#13|PROMO BRUSHED NICKEL|47|MED BOX|1202.29|cial deposits d 6297|steel burnished dodger cornflower coral|Manufacturer#4|Brand#45|ECONOMY POLISHED TIN|2|JUMBO DRUM|1203.29|ly unusual theodol 6298|cornsilk sky violet green burnished|Manufacturer#3|Brand#35|MEDIUM POLISHED NICKEL|47|SM BAG|1204.29|tes. furiously pend 6299|antique wheat blue seashell hot|Manufacturer#3|Brand#34|PROMO BRUSHED STEEL|43|JUMBO JAR|1205.29|totes. regularly 6300|navy black cornflower white aquamarine|Manufacturer#4|Brand#41|LARGE POLISHED BRASS|10|JUMBO BAG|1206.30|quests haggle 6301|blush ghost azure peach lemon|Manufacturer#4|Brand#45|LARGE ANODIZED BRASS|8|LG CAN|1207.30| pinto b 6302|lace frosted mint misty cream|Manufacturer#4|Brand#42|LARGE PLATED TIN|28|LG JAR|1208.30|blithely even instru 6303|dark powder almond honeydew burnished|Manufacturer#1|Brand#11|SMALL ANODIZED STEEL|46|JUMBO BOX|1209.30|ckages; 6304|chocolate dodger coral maroon beige|Manufacturer#2|Brand#21|MEDIUM BURNISHED BRASS|8|JUMBO DRUM|1210.30|ackages cajole at th 6305|white metallic hot antique olive|Manufacturer#3|Brand#33|ECONOMY BURNISHED COPPER|11|MED PKG|1211.30|ing packages 6306|navajo papaya metallic antique tan|Manufacturer#5|Brand#54|SMALL BRUSHED COPPER|5|WRAP PKG|1212.30|ound the care 6307|orchid magenta papaya cornflower wheat|Manufacturer#5|Brand#52|PROMO ANODIZED STEEL|25|JUMBO CAN|1213.30|kages. caref 6308|seashell red medium violet misty|Manufacturer#2|Brand#22|ECONOMY BRUSHED BRASS|14|LG CAN|1214.30|lose requests. s 6309|hot gainsboro cornflower chartreuse chiffon|Manufacturer#4|Brand#44|PROMO BRUSHED NICKEL|22|SM BOX|1215.30|realms cajole fu 6310|brown cream olive black goldenrod|Manufacturer#4|Brand#44|STANDARD ANODIZED TIN|12|WRAP PKG|1216.31|bout the 6311|rose misty ghost burlywood blush|Manufacturer#2|Brand#25|PROMO PLATED NICKEL|6|LG BOX|1217.31|ully even accounts 6312|burnished smoke metallic mint purple|Manufacturer#2|Brand#24|LARGE BURNISHED COPPER|48|JUMBO CAN|1218.31|ccounts-- sly 6313|yellow salmon gainsboro beige lace|Manufacturer#1|Brand#14|SMALL BURNISHED STEEL|27|LG CAN|1219.31|gular inst 6314|white turquoise plum goldenrod smoke|Manufacturer#5|Brand#51|MEDIUM PLATED TIN|3|LG CAN|1220.31|ake after the sly 6315|purple olive slate pale rosy|Manufacturer#5|Brand#51|LARGE PLATED COPPER|20|JUMBO PKG|1221.31|ecial a 6316|light peach wheat medium orange|Manufacturer#2|Brand#22|LARGE ANODIZED TIN|29|MED DRUM|1222.31|phs. iro 6317|orchid snow maroon ghost smoke|Manufacturer#2|Brand#22|PROMO BRUSHED NICKEL|34|WRAP JAR|1223.31|en accounts. 6318|dim indian coral seashell tomato|Manufacturer#5|Brand#54|ECONOMY BRUSHED COPPER|43|JUMBO BOX|1224.31|dolites. slyly iro 6319|tan pink salmon lavender spring|Manufacturer#2|Brand#23|PROMO BURNISHED COPPER|29|JUMBO CAN|1225.31|sits after the caref 6320|smoke orange purple white dark|Manufacturer#2|Brand#24|ECONOMY BRUSHED COPPER|21|MED BAG|1226.32|nly. furiously unu 6321|green medium goldenrod yellow honeydew|Manufacturer#2|Brand#23|LARGE PLATED COPPER|3|JUMBO PACK|1227.32|ts sleep along 6322|yellow violet medium firebrick turquoise|Manufacturer#5|Brand#52|ECONOMY POLISHED TIN|16|WRAP CAN|1228.32|y final foxes. bli 6323|aquamarine linen moccasin coral medium|Manufacturer#3|Brand#35|MEDIUM POLISHED BRASS|13|JUMBO PKG|1229.32|ng to the 6324|beige dodger burnished misty pink|Manufacturer#3|Brand#35|SMALL BURNISHED TIN|22|MED PKG|1230.32|gularly bo 6325|puff honeydew blush coral magenta|Manufacturer#1|Brand#11|LARGE ANODIZED STEEL|20|WRAP DRUM|1231.32|even deposits. 6326|blue olive lemon goldenrod slate|Manufacturer#3|Brand#35|PROMO POLISHED TIN|45|JUMBO CAN|1232.32|ounts dazzle 6327|forest navy powder black papaya|Manufacturer#1|Brand#11|MEDIUM BRUSHED BRASS|37|MED CAN|1233.32|ncies. even, even p 6328|chocolate red papaya lavender honeydew|Manufacturer#2|Brand#25|MEDIUM BURNISHED TIN|9|WRAP BOX|1234.32|kages. a 6329|lawn hot brown sienna tomato|Manufacturer#4|Brand#41|LARGE PLATED COPPER|47|JUMBO BOX|1235.32| special the 6330|firebrick lime ghost honeydew coral|Manufacturer#1|Brand#14|SMALL ANODIZED STEEL|15|SM PACK|1236.33|ts haggle furiousl 6331|maroon floral blanched burlywood red|Manufacturer#5|Brand#53|LARGE ANODIZED NICKEL|8|WRAP CAN|1237.33|ackages. bo 6332|honeydew sienna rose aquamarine moccasin|Manufacturer#4|Brand#44|LARGE PLATED STEEL|27|MED BOX|1238.33|among the regular 6333|blanched pink black drab wheat|Manufacturer#2|Brand#23|LARGE BURNISHED TIN|28|JUMBO BAG|1239.33|ages. careful 6334|indian wheat turquoise blanched linen|Manufacturer#4|Brand#42|ECONOMY PLATED STEEL|46|JUMBO DRUM|1240.33|e furiously unus 6335|lime burnished purple lemon linen|Manufacturer#3|Brand#31|ECONOMY BURNISHED TIN|12|JUMBO JAR|1241.33|ep. permanent, fina 6336|lemon dodger peach cyan blue|Manufacturer#2|Brand#24|SMALL BRUSHED STEEL|18|SM BAG|1242.33|. express 6337|steel orchid ivory antique navajo|Manufacturer#5|Brand#55|PROMO BURNISHED STEEL|5|SM DRUM|1243.33|xpress packa 6338|plum lace almond cyan seashell|Manufacturer#1|Brand#11|ECONOMY PLATED NICKEL|45|MED PKG|1244.33|unts again 6339|orange navajo coral mint black|Manufacturer#3|Brand#33|ECONOMY ANODIZED BRASS|10|WRAP DRUM|1245.33|eans cajo 6340|lavender midnight cornflower misty red|Manufacturer#2|Brand#21|LARGE POLISHED BRASS|27|MED DRUM|1246.34|nusual pac 6341|sky slate ghost smoke bisque|Manufacturer#5|Brand#51|STANDARD BRUSHED TIN|34|WRAP CASE|1247.34|ely thin accoun 6342|navy puff olive hot violet|Manufacturer#5|Brand#54|ECONOMY BRUSHED BRASS|30|MED CAN|1248.34|slyly unusual asymp 6343|saddle yellow moccasin tomato plum|Manufacturer#2|Brand#22|SMALL BURNISHED BRASS|6|SM CAN|1249.34|sts. fu 6344|blue frosted black linen chocolate|Manufacturer#3|Brand#33|SMALL BURNISHED BRASS|19|WRAP PKG|1250.34|s cajole fluffily. 6345|thistle sienna almond grey lemon|Manufacturer#5|Brand#53|SMALL BURNISHED COPPER|46|LG BAG|1251.34|kly f 6346|saddle ghost orchid brown blush|Manufacturer#5|Brand#55|STANDARD BRUSHED TIN|45|MED JAR|1252.34|nts haggle. ev 6347|honeydew cornsilk rosy linen blue|Manufacturer#4|Brand#44|LARGE BURNISHED BRASS|49|LG BOX|1253.34| alongside of 6348|purple rosy tomato maroon black|Manufacturer#5|Brand#54|ECONOMY POLISHED NICKEL|18|JUMBO PACK|1254.34| blithel 6349|forest frosted bisque orchid sky|Manufacturer#2|Brand#21|SMALL BURNISHED TIN|19|WRAP BAG|1255.34| the f 6350|blush papaya salmon sky gainsboro|Manufacturer#5|Brand#54|ECONOMY BRUSHED BRASS|28|JUMBO BAG|1256.35| carefully 6351|tomato orange honeydew blanched metallic|Manufacturer#1|Brand#11|SMALL BRUSHED BRASS|32|JUMBO BAG|1257.35|refully regular, unu 6352|rosy plum light tomato wheat|Manufacturer#5|Brand#53|MEDIUM BURNISHED TIN|14|LG DRUM|1258.35|t ideas affix along 6353|drab linen blue steel floral|Manufacturer#5|Brand#54|LARGE PLATED COPPER|46|MED CAN|1259.35|ain qui 6354|lawn midnight olive plum blush|Manufacturer#4|Brand#43|SMALL PLATED NICKEL|22|MED PACK|1260.35| final theodolites caj 6355|burlywood moccasin beige dodger lace|Manufacturer#5|Brand#55|STANDARD BURNISHED COPPER|41|LG PACK|1261.35|reful 6356|moccasin magenta dim slate orchid|Manufacturer#3|Brand#35|STANDARD POLISHED STEEL|8|MED CASE|1262.35|according to the 6357|chartreuse snow sandy saddle blush|Manufacturer#1|Brand#12|LARGE BURNISHED NICKEL|14|LG BAG|1263.35|ng foxe 6358|slate royal ghost cyan dark|Manufacturer#5|Brand#51|ECONOMY BURNISHED TIN|35|JUMBO PACK|1264.35|slyly fluffily 6359|cream mint yellow grey sandy|Manufacturer#3|Brand#33|PROMO BRUSHED BRASS|10|WRAP CAN|1265.35|es are blithely. 6360|floral drab salmon purple navy|Manufacturer#3|Brand#32|PROMO BRUSHED COPPER|26|JUMBO PACK|1266.36|al instructions a 6361|orchid magenta indian cream ivory|Manufacturer#5|Brand#55|STANDARD PLATED TIN|35|MED PACK|1267.36|ts use. slyly exp 6362|maroon peru navy drab lawn|Manufacturer#2|Brand#21|SMALL PLATED COPPER|7|JUMBO PACK|1268.36|ecial accounts at 6363|midnight orange maroon forest tomato|Manufacturer#1|Brand#12|ECONOMY BRUSHED TIN|16|LG JAR|1269.36|press the 6364|yellow almond tan chartreuse cream|Manufacturer#5|Brand#54|ECONOMY ANODIZED COPPER|11|LG BAG|1270.36|longside of th 6365|maroon coral magenta salmon mint|Manufacturer#5|Brand#55|SMALL BRUSHED TIN|37|SM CASE|1271.36|le fluffily reque 6366|lawn blue rose gainsboro navajo|Manufacturer#5|Brand#54|STANDARD BURNISHED BRASS|23|SM BAG|1272.36|he furiously d 6367|dark papaya misty forest violet|Manufacturer#4|Brand#44|LARGE PLATED STEEL|14|MED DRUM|1273.36|sleep after th 6368|moccasin linen cornflower light chartreuse|Manufacturer#3|Brand#31|MEDIUM POLISHED BRASS|2|SM CASE|1274.36|r the depo 6369|chocolate lawn navajo violet antique|Manufacturer#4|Brand#44|MEDIUM POLISHED TIN|4|WRAP PKG|1275.36|riously specia 6370|dodger sienna pink aquamarine sky|Manufacturer#5|Brand#53|LARGE ANODIZED TIN|28|LG PKG|1276.37|ctions 6371|peach chocolate dark midnight frosted|Manufacturer#2|Brand#24|STANDARD PLATED BRASS|7|LG JAR|1277.37|ely even reques 6372|plum wheat tomato blue dim|Manufacturer#2|Brand#22|STANDARD PLATED BRASS|5|SM CAN|1278.37|e fluf 6373|royal gainsboro pale lemon misty|Manufacturer#5|Brand#52|STANDARD ANODIZED BRASS|40|MED DRUM|1279.37| across the slyly ex 6374|navajo white plum lemon midnight|Manufacturer#5|Brand#53|ECONOMY BRUSHED STEEL|40|MED CAN|1280.37|xpress 6375|dim peach cornsilk magenta gainsboro|Manufacturer#3|Brand#33|MEDIUM ANODIZED NICKEL|3|LG CAN|1281.37|eodolites. ironic r 6376|white coral burlywood ivory ghost|Manufacturer#3|Brand#32|LARGE BURNISHED STEEL|22|JUMBO JAR|1282.37|to the busily fi 6377|magenta black burnished lawn blush|Manufacturer#1|Brand#12|MEDIUM BURNISHED COPPER|38|LG BAG|1283.37|fily eve 6378|turquoise dim pale red tomato|Manufacturer#5|Brand#52|STANDARD ANODIZED COPPER|21|JUMBO BAG|1284.37|furiously final ide 6379|almond magenta light violet chiffon|Manufacturer#3|Brand#31|LARGE ANODIZED COPPER|27|JUMBO CAN|1285.37|hout the 6380|sky saddle azure yellow cornflower|Manufacturer#3|Brand#33|LARGE BRUSHED NICKEL|29|WRAP JAR|1286.38|the quic 6381|seashell chocolate midnight cyan misty|Manufacturer#2|Brand#25|LARGE BURNISHED BRASS|49|WRAP BOX|1287.38|nag quickly q 6382|orange peru chartreuse linen honeydew|Manufacturer#4|Brand#42|MEDIUM BRUSHED BRASS|8|SM JAR|1288.38|s lose blithely 6383|slate light spring magenta snow|Manufacturer#2|Brand#22|MEDIUM POLISHED STEEL|4|WRAP BAG|1289.38|gular asymptotes. 6384|dark snow maroon spring cornsilk|Manufacturer#5|Brand#51|SMALL ANODIZED BRASS|18|LG PKG|1290.38|unts. blithely dar 6385|sienna white gainsboro peach aquamarine|Manufacturer#1|Brand#11|LARGE BURNISHED NICKEL|24|WRAP BOX|1291.38|s slee 6386|navy azure blue chiffon papaya|Manufacturer#2|Brand#24|STANDARD POLISHED NICKEL|18|MED BAG|1292.38|furiously. 6387|slate firebrick orchid light azure|Manufacturer#4|Brand#45|STANDARD BURNISHED COPPER|25|SM PKG|1293.38|ar instructions. caref 6388|seashell pale maroon floral coral|Manufacturer#2|Brand#23|PROMO POLISHED TIN|47|SM DRUM|1294.38|onic package 6389|antique blush green khaki purple|Manufacturer#5|Brand#51|STANDARD PLATED BRASS|12|JUMBO PACK|1295.38|r foxes. blithely b 6390|bisque pink gainsboro rose lavender|Manufacturer#3|Brand#32|PROMO BRUSHED BRASS|22|MED BOX|1296.39|ctions cajole about 6391|rosy powder yellow linen cyan|Manufacturer#2|Brand#25|LARGE BURNISHED COPPER|1|LG DRUM|1297.39|unusua 6392|linen orchid navajo white ivory|Manufacturer#5|Brand#53|ECONOMY POLISHED COPPER|28|MED BAG|1298.39|furiousl 6393|cornflower dodger khaki rose blush|Manufacturer#4|Brand#42|PROMO ANODIZED TIN|32|JUMBO PACK|1299.39|oxes. regular accou 6394|salmon pink dim azure tomato|Manufacturer#1|Brand#12|PROMO BRUSHED TIN|26|MED PACK|1300.39|cording to the pe 6395|hot turquoise rose ghost burnished|Manufacturer#3|Brand#33|LARGE BRUSHED TIN|22|MED BOX|1301.39|quickly i 6396|dim cornflower frosted pale purple|Manufacturer#3|Brand#34|MEDIUM PLATED BRASS|8|SM BAG|1302.39|t the enticingly fina 6397|turquoise ivory hot rosy violet|Manufacturer#3|Brand#34|STANDARD PLATED COPPER|9|SM JAR|1303.39|accounts sle 6398|papaya sandy black forest lace|Manufacturer#1|Brand#13|ECONOMY BURNISHED BRASS|31|SM BOX|1304.39|ve the iro 6399|papaya saddle midnight sky aquamarine|Manufacturer#4|Brand#42|LARGE BURNISHED COPPER|30|SM JAR|1305.39|nstruction 6400|royal papaya navajo rose thistle|Manufacturer#4|Brand#42|SMALL ANODIZED COPPER|14|SM DRUM|1306.40|d the ironic instruc 6401|floral smoke saddle antique mint|Manufacturer#3|Brand#33|SMALL BRUSHED STEEL|11|SM BAG|1307.40|ial dolphins abov 6402|lawn light puff dark deep|Manufacturer#1|Brand#12|STANDARD POLISHED NICKEL|35|MED JAR|1308.40|he carefully ironi 6403|chiffon tan navajo grey plum|Manufacturer#2|Brand#21|LARGE ANODIZED BRASS|28|WRAP BOX|1309.40|e blithely. sly 6404|rosy turquoise saddle ghost lace|Manufacturer#2|Brand#22|PROMO BURNISHED COPPER|29|SM DRUM|1310.40| requ 6405|wheat metallic sandy ivory spring|Manufacturer#3|Brand#33|LARGE ANODIZED STEEL|24|LG BAG|1311.40| the ironic frays 6406|dim metallic burnished plum salmon|Manufacturer#2|Brand#25|PROMO ANODIZED BRASS|41|LG CAN|1312.40|furio 6407|blue red forest azure violet|Manufacturer#2|Brand#25|STANDARD POLISHED TIN|36|LG CASE|1313.40|y regular requests sl 6408|turquoise frosted tomato navy moccasin|Manufacturer#5|Brand#54|PROMO PLATED TIN|23|JUMBO PKG|1314.40| accounts 6409|frosted plum dark purple aquamarine|Manufacturer#1|Brand#12|PROMO POLISHED TIN|7|SM CAN|1315.40|osits. regu 6410|violet misty rosy antique olive|Manufacturer#4|Brand#44|PROMO BURNISHED TIN|12|MED CAN|1316.41|pecial instruc 6411|lemon deep almond red yellow|Manufacturer#1|Brand#14|STANDARD PLATED TIN|11|WRAP PKG|1317.41| the furiously sp 6412|misty plum moccasin slate orchid|Manufacturer#1|Brand#11|MEDIUM PLATED COPPER|37|JUMBO CAN|1318.41|its. ev 6413|peach azure cornsilk chocolate brown|Manufacturer#5|Brand#51|SMALL PLATED TIN|11|SM CASE|1319.41|inal a 6414|ghost floral khaki aquamarine hot|Manufacturer#4|Brand#43|PROMO BURNISHED TIN|20|WRAP DRUM|1320.41|tructions haggle 6415|forest saddle royal maroon slate|Manufacturer#5|Brand#55|STANDARD BRUSHED STEEL|28|WRAP BOX|1321.41|pending packag 6416|firebrick ivory tomato chiffon floral|Manufacturer#3|Brand#31|PROMO PLATED BRASS|22|JUMBO CASE|1322.41|lyly around t 6417|lace dark thistle medium navajo|Manufacturer#1|Brand#13|PROMO BURNISHED STEEL|23|SM PKG|1323.41|its nod 6418|rose puff orange moccasin royal|Manufacturer#2|Brand#22|PROMO PLATED NICKEL|6|JUMBO CASE|1324.41|furio 6419|chartreuse seashell lemon floral pale|Manufacturer#1|Brand#11|STANDARD POLISHED TIN|47|MED CAN|1325.41|s. perman 6420|azure sky honeydew puff papaya|Manufacturer#2|Brand#24|ECONOMY POLISHED BRASS|36|WRAP CAN|1326.42|reful 6421|sienna peru lace dodger mint|Manufacturer#2|Brand#23|LARGE POLISHED STEEL|10|MED PKG|1327.42|tructions. dogged 6422|olive dark black wheat navy|Manufacturer#2|Brand#24|MEDIUM ANODIZED STEEL|39|MED PACK|1328.42| the s 6423|magenta ghost pale blue honeydew|Manufacturer#3|Brand#33|SMALL PLATED TIN|36|LG BOX|1329.42| even packages hagg 6424|rosy khaki black burlywood floral|Manufacturer#1|Brand#13|LARGE POLISHED BRASS|17|JUMBO BOX|1330.42| pinto beans h 6425|dodger lawn honeydew ghost blush|Manufacturer#1|Brand#12|STANDARD POLISHED BRASS|50|LG PKG|1331.42|ackages. blithel 6426|green steel frosted burlywood puff|Manufacturer#2|Brand#23|LARGE PLATED TIN|44|LG CASE|1332.42|long the carefull 6427|thistle powder floral bisque sienna|Manufacturer#1|Brand#14|MEDIUM POLISHED BRASS|28|SM JAR|1333.42| the 6428|chartreuse lime burlywood magenta lawn|Manufacturer#4|Brand#43|SMALL POLISHED NICKEL|38|JUMBO CAN|1334.42|ironic dependenci 6429|olive tomato rose blue maroon|Manufacturer#2|Brand#22|SMALL ANODIZED TIN|36|JUMBO DRUM|1335.42|ess blithely around t 6430|turquoise tan indian olive ghost|Manufacturer#2|Brand#22|SMALL PLATED STEEL|3|LG JAR|1336.43| regular asymptotes 6431|yellow beige pale gainsboro orchid|Manufacturer#4|Brand#45|ECONOMY BRUSHED COPPER|50|WRAP BOX|1337.43|ironic epitaphs. pint 6432|floral spring snow papaya black|Manufacturer#2|Brand#23|LARGE PLATED TIN|33|SM BOX|1338.43|s. quick, spec 6433|khaki peru turquoise blanched lemon|Manufacturer#1|Brand#14|ECONOMY BRUSHED BRASS|19|LG DRUM|1339.43|dencies doubt. unusu 6434|midnight aquamarine chartreuse powder khaki|Manufacturer#3|Brand#35|MEDIUM ANODIZED NICKEL|46|SM BOX|1340.43|e slyly 6435|frosted wheat royal grey ghost|Manufacturer#5|Brand#51|ECONOMY ANODIZED TIN|12|MED JAR|1341.43|press pack 6436|navy saddle misty blanched gainsboro|Manufacturer#3|Brand#34|PROMO BURNISHED BRASS|40|MED CAN|1342.43|across 6437|brown drab mint sky chocolate|Manufacturer#3|Brand#31|PROMO POLISHED BRASS|16|LG CAN|1343.43| special re 6438|ivory coral misty steel salmon|Manufacturer#5|Brand#52|ECONOMY POLISHED BRASS|23|LG CASE|1344.43|ly express instr 6439|thistle steel lace chocolate chiffon|Manufacturer#5|Brand#55|SMALL ANODIZED COPPER|41|LG JAR|1345.43|arefully bold 6440|floral dim almond ghost thistle|Manufacturer#1|Brand#12|SMALL BURNISHED COPPER|32|LG PKG|1346.44|ously pending r 6441|peach honeydew red blush puff|Manufacturer#1|Brand#13|MEDIUM POLISHED COPPER|16|SM JAR|1347.44|deposi 6442|magenta ivory orange coral grey|Manufacturer#4|Brand#41|ECONOMY BRUSHED TIN|22|WRAP PACK|1348.44|ckages. ca 6443|rose misty brown lavender orange|Manufacturer#5|Brand#55|LARGE PLATED BRASS|26|JUMBO JAR|1349.44|ep quietly across 6444|blanched frosted blush forest burnished|Manufacturer#4|Brand#41|ECONOMY ANODIZED NICKEL|15|SM JAR|1350.44| ironic accounts si 6445|navy lemon grey sky rosy|Manufacturer#1|Brand#11|LARGE ANODIZED STEEL|9|SM CAN|1351.44|pinto beans haggle c 6446|almond floral metallic blush azure|Manufacturer#5|Brand#52|PROMO ANODIZED STEEL|29|SM BOX|1352.44|y regula 6447|olive purple chiffon linen magenta|Manufacturer#2|Brand#21|SMALL PLATED COPPER|48|WRAP BOX|1353.44| sleep. blithely 6448|tomato chartreuse chocolate seashell moccasin|Manufacturer#1|Brand#12|SMALL ANODIZED NICKEL|17|LG BOX|1354.44| fluffily. regula 6449|yellow orchid cream puff dark|Manufacturer#3|Brand#34|SMALL BURNISHED STEEL|12|LG PACK|1355.44|special d 6450|puff light red goldenrod salmon|Manufacturer#5|Brand#55|PROMO ANODIZED BRASS|14|LG PKG|1356.45|equests w 6451|chartreuse cream deep moccasin hot|Manufacturer#1|Brand#13|LARGE POLISHED COPPER|27|WRAP CASE|1357.45| haggle slyly qui 6452|red steel magenta frosted firebrick|Manufacturer#2|Brand#22|PROMO ANODIZED STEEL|36|MED DRUM|1358.45|regular reque 6453|peach wheat sienna dark black|Manufacturer#5|Brand#51|MEDIUM BURNISHED TIN|37|MED JAR|1359.45|y. blithely silen 6454|wheat olive papaya pale goldenrod|Manufacturer#4|Brand#41|LARGE BRUSHED BRASS|20|MED DRUM|1360.45|ackages. slyl 6455|rose snow olive grey navajo|Manufacturer#5|Brand#51|LARGE BURNISHED COPPER|14|WRAP JAR|1361.45|nic dep 6456|lemon almond spring ghost forest|Manufacturer#4|Brand#43|SMALL POLISHED STEEL|22|JUMBO JAR|1362.45|e furiously bold depo 6457|antique pink yellow salmon coral|Manufacturer#3|Brand#33|LARGE BURNISHED NICKEL|50|MED BAG|1363.45| unusual, 6458|turquoise wheat honeydew maroon midnight|Manufacturer#2|Brand#25|MEDIUM POLISHED STEEL|11|SM CAN|1364.45|s sleep blithely abov 6459|navajo almond ghost chartreuse plum|Manufacturer#1|Brand#14|MEDIUM BRUSHED STEEL|14|JUMBO BOX|1365.45|d the ideas nag 6460|peach rose spring snow moccasin|Manufacturer#4|Brand#44|PROMO ANODIZED STEEL|36|MED DRUM|1366.46| reque 6461|black navy khaki sandy ivory|Manufacturer#2|Brand#21|MEDIUM BURNISHED BRASS|33|JUMBO PKG|1367.46|ular accounts hag 6462|papaya lemon lime misty light|Manufacturer#4|Brand#45|MEDIUM BRUSHED NICKEL|16|JUMBO DRUM|1368.46|ns sleep 6463|linen rose blue white honeydew|Manufacturer#3|Brand#31|PROMO BRUSHED BRASS|11|LG BOX|1369.46| sometime 6464|blanched spring chiffon lace indian|Manufacturer#3|Brand#31|PROMO PLATED BRASS|20|JUMBO PKG|1370.46|thely ideas. 6465|metallic cornsilk purple light firebrick|Manufacturer#3|Brand#35|SMALL ANODIZED NICKEL|41|LG JAR|1371.46|he busily regu 6466|thistle azure orange honeydew ivory|Manufacturer#5|Brand#54|LARGE ANODIZED NICKEL|33|MED DRUM|1372.46|lithely 6467|forest sandy azure thistle pale|Manufacturer#4|Brand#41|MEDIUM BURNISHED TIN|3|SM BOX|1373.46|lites. caref 6468|grey honeydew cornsilk light firebrick|Manufacturer#1|Brand#15|SMALL ANODIZED BRASS|1|JUMBO CASE|1374.46| regular a 6469|turquoise navajo indian olive forest|Manufacturer#1|Brand#14|MEDIUM ANODIZED STEEL|8|WRAP JAR|1375.46|ged req 6470|cornflower khaki powder blue black|Manufacturer#4|Brand#43|SMALL PLATED COPPER|45|WRAP CAN|1376.47|ts int 6471|cyan drab almond red burnished|Manufacturer#4|Brand#41|ECONOMY ANODIZED COPPER|40|WRAP PACK|1377.47|g pinto beans dete 6472|cornflower metallic cream bisque plum|Manufacturer#2|Brand#23|PROMO POLISHED STEEL|36|SM PACK|1378.47|al courts wake fur 6473|royal linen antique cornsilk cyan|Manufacturer#3|Brand#31|ECONOMY ANODIZED COPPER|35|LG CASE|1379.47| packages wake s 6474|navy sandy khaki gainsboro tan|Manufacturer#5|Brand#51|MEDIUM ANODIZED BRASS|25|SM BAG|1380.47|nding attainments. 6475|peru peach navajo white lemon|Manufacturer#4|Brand#41|LARGE BURNISHED COPPER|39|LG BAG|1381.47|special reque 6476|peru thistle moccasin olive smoke|Manufacturer#4|Brand#42|MEDIUM POLISHED NICKEL|42|JUMBO JAR|1382.47|uriously 6477|antique black seashell deep burnished|Manufacturer#3|Brand#34|SMALL BURNISHED NICKEL|6|MED BAG|1383.47| careful, ironic 6478|bisque royal seashell floral coral|Manufacturer#1|Brand#12|ECONOMY ANODIZED TIN|29|MED DRUM|1384.47| deposits. pending, do 6479|rose maroon powder orange snow|Manufacturer#3|Brand#33|MEDIUM BURNISHED NICKEL|22|WRAP JAR|1385.47| the final, regul 6480|floral spring beige black cornsilk|Manufacturer#5|Brand#54|STANDARD ANODIZED TIN|3|WRAP BAG|1386.48|re. quickly ir 6481|green khaki cornsilk misty lavender|Manufacturer#4|Brand#42|SMALL BRUSHED BRASS|26|LG JAR|1387.48|ages sleep flu 6482|indian sky orchid orange chocolate|Manufacturer#4|Brand#44|MEDIUM POLISHED NICKEL|24|LG CASE|1388.48| ironic asymptotes wak 6483|almond blush navy mint ghost|Manufacturer#3|Brand#35|PROMO ANODIZED STEEL|5|JUMBO CAN|1389.48|ccounts nag 6484|smoke drab peach pale misty|Manufacturer#2|Brand#25|PROMO PLATED COPPER|1|WRAP BAG|1390.48|w slyly pendi 6485|cream saddle slate floral frosted|Manufacturer#2|Brand#24|MEDIUM POLISHED TIN|26|MED BAG|1391.48|. quickl 6486|frosted indian drab seashell ghost|Manufacturer#2|Brand#23|PROMO BRUSHED TIN|22|WRAP PACK|1392.48|ithely regular dep 6487|slate dodger linen drab green|Manufacturer#1|Brand#11|ECONOMY ANODIZED TIN|46|SM BOX|1393.48|e even 6488|puff royal spring firebrick honeydew|Manufacturer#4|Brand#44|ECONOMY BURNISHED BRASS|4|LG BOX|1394.48|atelet 6489|ivory dodger floral antique plum|Manufacturer#5|Brand#53|STANDARD ANODIZED BRASS|28|LG CASE|1395.48|hes. ironically unusua 6490|cream gainsboro slate puff spring|Manufacturer#3|Brand#34|SMALL PLATED COPPER|36|MED CASE|1396.49|packages. furiously fi 6491|blue goldenrod rose lawn lavender|Manufacturer#2|Brand#21|SMALL ANODIZED STEEL|13|SM DRUM|1397.49|ests boost sly 6492|navy burnished deep cornflower sandy|Manufacturer#5|Brand#55|SMALL POLISHED STEEL|47|WRAP JAR|1398.49|s play carefully 6493|antique turquoise snow gainsboro slate|Manufacturer#5|Brand#52|SMALL PLATED COPPER|36|MED PACK|1399.49|s. silent, even d 6494|brown cyan light peach ivory|Manufacturer#1|Brand#12|LARGE BRUSHED BRASS|11|LG BOX|1400.49|packages. bl 6495|bisque dark orange lawn frosted|Manufacturer#4|Brand#42|LARGE BURNISHED NICKEL|27|SM PACK|1401.49|posits around the 6496|sienna blue goldenrod bisque magenta|Manufacturer#3|Brand#35|MEDIUM BRUSHED BRASS|2|JUMBO CAN|1402.49|e quick 6497|lace plum black deep almond|Manufacturer#4|Brand#45|PROMO BURNISHED TIN|46|WRAP CAN|1403.49|ly above the 6498|grey tomato rosy blanched brown|Manufacturer#1|Brand#14|SMALL POLISHED TIN|26|WRAP PACK|1404.49| packages 6499|cornflower magenta dodger metallic spring|Manufacturer#2|Brand#23|STANDARD POLISHED BRASS|46|WRAP PKG|1405.49|sts are s 6500|antique deep almond black aquamarine|Manufacturer#2|Brand#23|ECONOMY ANODIZED STEEL|1|WRAP DRUM|1406.50|detect slyly 6501|medium snow floral yellow sky|Manufacturer#2|Brand#22|SMALL ANODIZED TIN|46|JUMBO BOX|1407.50|sleep quickly e 6502|cornflower saddle lemon spring peru|Manufacturer#2|Brand#23|LARGE POLISHED COPPER|6|LG BOX|1408.50|ending, brave 6503|royal almond papaya gainsboro orchid|Manufacturer#4|Brand#42|SMALL BRUSHED NICKEL|1|MED CASE|1409.50| deposits cajole furio 6504|orange salmon dark wheat purple|Manufacturer#3|Brand#34|PROMO BRUSHED TIN|48|SM PACK|1410.50|gle slyly ironic, sile 6505|lavender metallic burlywood almond khaki|Manufacturer#4|Brand#42|ECONOMY ANODIZED STEEL|4|LG JAR|1411.50| special i 6506|slate green navy burlywood almond|Manufacturer#5|Brand#52|MEDIUM PLATED COPPER|44|LG CAN|1412.50|de of the express 6507|thistle lemon orchid lace beige|Manufacturer#1|Brand#14|STANDARD PLATED COPPER|47|WRAP CAN|1413.50|uctions haggl 6508|beige maroon almond misty cornflower|Manufacturer#4|Brand#44|SMALL BURNISHED STEEL|10|SM JAR|1414.50|even accou 6509|sienna misty thistle medium powder|Manufacturer#3|Brand#32|MEDIUM ANODIZED STEEL|27|LG PKG|1415.50|ackages for t 6510|indian aquamarine hot yellow drab|Manufacturer#5|Brand#52|MEDIUM BURNISHED BRASS|29|LG JAR|1416.51|sts across the 6511|burnished hot navy steel peru|Manufacturer#3|Brand#33|STANDARD BURNISHED TIN|39|MED JAR|1417.51|gular excuses. fluf 6512|light coral wheat powder rosy|Manufacturer#2|Brand#25|STANDARD PLATED BRASS|3|JUMBO CASE|1418.51|efully expres 6513|moccasin cornsilk misty frosted honeydew|Manufacturer#5|Brand#53|ECONOMY BURNISHED STEEL|21|LG BAG|1419.51| ironic dolphins i 6514|violet spring powder saddle burnished|Manufacturer#5|Brand#51|SMALL PLATED BRASS|28|JUMBO PACK|1420.51|al excuses ca 6515|sienna khaki brown linen purple|Manufacturer#2|Brand#21|SMALL BURNISHED BRASS|2|MED CAN|1421.51|accoun 6516|azure sandy ghost orange antique|Manufacturer#3|Brand#35|ECONOMY BURNISHED TIN|9|LG CASE|1422.51|ar pa 6517|metallic grey plum olive cyan|Manufacturer#4|Brand#41|SMALL BURNISHED COPPER|21|MED PACK|1423.51|nstructi 6518|honeydew goldenrod floral white yellow|Manufacturer#3|Brand#31|SMALL BRUSHED NICKEL|16|WRAP CASE|1424.51|according t 6519|indian seashell cream azure coral|Manufacturer#4|Brand#45|STANDARD ANODIZED NICKEL|17|LG PACK|1425.51|ckly regular 6520|green honeydew cyan yellow peru|Manufacturer#2|Brand#22|PROMO PLATED STEEL|4|LG DRUM|1426.52|y steal 6521|misty powder black linen forest|Manufacturer#2|Brand#24|PROMO BURNISHED NICKEL|42|SM CASE|1427.52|osits after the 6522|lavender almond spring navy blanched|Manufacturer#2|Brand#24|ECONOMY BURNISHED TIN|16|SM PACK|1428.52|s dazzle: careful 6523|hot maroon lawn moccasin ivory|Manufacturer#1|Brand#11|MEDIUM ANODIZED STEEL|43|MED BOX|1429.52|out the quickly 6524|powder blue linen brown peru|Manufacturer#1|Brand#15|PROMO BURNISHED TIN|34|MED CASE|1430.52|unts detect quick 6525|ghost dim antique honeydew dodger|Manufacturer#1|Brand#11|PROMO POLISHED COPPER|11|JUMBO BOX|1431.52|rding t 6526|forest black azure ghost beige|Manufacturer#4|Brand#41|ECONOMY PLATED COPPER|26|MED PKG|1432.52|accounts wake careful 6527|lace coral royal deep sienna|Manufacturer#2|Brand#21|ECONOMY PLATED STEEL|33|SM BOX|1433.52|theodolite 6528|midnight orange floral dim cyan|Manufacturer#5|Brand#51|PROMO ANODIZED COPPER|34|SM DRUM|1434.52|decoys 6529|sienna turquoise olive dark tomato|Manufacturer#1|Brand#15|SMALL BRUSHED STEEL|7|SM BOX|1435.52|ld accou 6530|lawn maroon azure rosy blue|Manufacturer#3|Brand#33|ECONOMY ANODIZED COPPER|33|JUMBO CASE|1436.53|ual packages 6531|floral metallic medium honeydew firebrick|Manufacturer#2|Brand#22|LARGE BURNISHED COPPER|28|WRAP CASE|1437.53|ths after the 6532|navajo burnished misty blue sandy|Manufacturer#5|Brand#54|SMALL BRUSHED COPPER|20|SM DRUM|1438.53| theodolites detect 6533|misty slate cream forest olive|Manufacturer#3|Brand#35|SMALL ANODIZED NICKEL|41|WRAP PKG|1439.53|symptotes 6534|blue metallic midnight forest powder|Manufacturer#3|Brand#32|STANDARD BRUSHED STEEL|11|WRAP JAR|1440.53|reful 6535|linen thistle sandy brown chocolate|Manufacturer#5|Brand#53|SMALL POLISHED COPPER|8|JUMBO JAR|1441.53|ular packa 6536|rosy plum violet peru lemon|Manufacturer#2|Brand#22|ECONOMY BRUSHED NICKEL|31|WRAP BAG|1442.53|aggle. reg 6537|olive cornsilk blanched seashell magenta|Manufacturer#5|Brand#53|PROMO POLISHED NICKEL|20|MED BOX|1443.53|furiously unu 6538|powder turquoise bisque grey steel|Manufacturer#5|Brand#51|PROMO ANODIZED NICKEL|10|JUMBO BAG|1444.53|hins wake ac 6539|black olive metallic azure spring|Manufacturer#1|Brand#13|SMALL POLISHED COPPER|29|MED JAR|1445.53|symptotes? quickly 6540|papaya salmon sienna ivory lime|Manufacturer#3|Brand#32|MEDIUM ANODIZED COPPER|5|JUMBO PACK|1446.54|beans wake re 6541|metallic thistle plum frosted light|Manufacturer#2|Brand#23|SMALL POLISHED BRASS|49|JUMBO DRUM|1447.54|its haggle along 6542|violet peach sienna blue rosy|Manufacturer#3|Brand#34|MEDIUM BRUSHED BRASS|38|SM BAG|1448.54|asymptote 6543|sky red goldenrod bisque tomato|Manufacturer#1|Brand#14|STANDARD POLISHED COPPER|28|JUMBO PACK|1449.54| packages haggle car 6544|firebrick aquamarine black midnight red|Manufacturer#2|Brand#25|STANDARD BRUSHED TIN|13|SM PKG|1450.54|e slyl 6545|powder blue orchid slate lemon|Manufacturer#4|Brand#45|SMALL PLATED COPPER|36|LG PACK|1451.54|ording to the slyly 6546|magenta sandy pale lavender steel|Manufacturer#2|Brand#25|LARGE BRUSHED BRASS|41|MED PKG|1452.54|ic pinto beans use 6547|forest khaki firebrick brown light|Manufacturer#2|Brand#23|LARGE PLATED COPPER|33|WRAP CASE|1453.54|ructions 6548|green slate brown orange sandy|Manufacturer#5|Brand#54|PROMO BURNISHED STEEL|7|WRAP BAG|1454.54|atele 6549|gainsboro sandy slate beige burlywood|Manufacturer#3|Brand#33|SMALL BRUSHED NICKEL|45|LG CAN|1455.54|ckages. 6550|cyan cornflower magenta cornsilk lawn|Manufacturer#2|Brand#24|LARGE POLISHED STEEL|23|WRAP PACK|1456.55|requests 6551|firebrick deep smoke mint peach|Manufacturer#5|Brand#51|PROMO ANODIZED BRASS|35|JUMBO BOX|1457.55|s. carefully 6552|cream lemon chocolate sienna black|Manufacturer#5|Brand#51|LARGE ANODIZED NICKEL|25|LG BOX|1458.55|ronic 6553|white green misty wheat indian|Manufacturer#2|Brand#24|SMALL POLISHED COPPER|33|JUMBO CASE|1459.55|ending excu 6554|drab maroon deep cyan floral|Manufacturer#1|Brand#13|MEDIUM POLISHED NICKEL|42|JUMBO PKG|1460.55|pending accounts 6555|brown thistle frosted sienna moccasin|Manufacturer#2|Brand#25|LARGE ANODIZED STEEL|23|WRAP BAG|1461.55|are furiou 6556|forest misty seashell navy ghost|Manufacturer#3|Brand#32|MEDIUM BRUSHED NICKEL|42|MED CAN|1462.55|nusual ideas. sp 6557|lawn blue navy rose navajo|Manufacturer#1|Brand#15|STANDARD BURNISHED NICKEL|28|SM BOX|1463.55|pendin 6558|floral cornflower burlywood rose firebrick|Manufacturer#5|Brand#52|SMALL ANODIZED BRASS|3|LG DRUM|1464.55|d ideas are 6559|honeydew steel seashell peach cornflower|Manufacturer#3|Brand#31|PROMO POLISHED TIN|8|JUMBO DRUM|1465.55|pades boost blithel 6560|salmon dim moccasin frosted indian|Manufacturer#3|Brand#34|STANDARD BRUSHED BRASS|7|SM BAG|1466.56|d the daringly regula 6561|dodger bisque blush peach lawn|Manufacturer#3|Brand#35|MEDIUM PLATED BRASS|42|LG BOX|1467.56|r the blithely spec 6562|spring hot blanched chocolate orange|Manufacturer#5|Brand#52|ECONOMY ANODIZED STEEL|3|LG PACK|1468.56|g accounts slee 6563|peach royal lemon frosted tomato|Manufacturer#5|Brand#55|ECONOMY POLISHED COPPER|43|LG PACK|1469.56|yly unusual patt 6564|slate brown floral firebrick grey|Manufacturer#4|Brand#44|PROMO BURNISHED TIN|44|WRAP JAR|1470.56|pendencies boost blith 6565|cyan lace chiffon papaya coral|Manufacturer#5|Brand#53|MEDIUM POLISHED STEEL|32|MED DRUM|1471.56|ing to the s 6566|misty salmon frosted antique honeydew|Manufacturer#4|Brand#42|PROMO ANODIZED BRASS|38|LG CASE|1472.56| furiously fur 6567|snow magenta dark saddle sky|Manufacturer#1|Brand#13|STANDARD POLISHED NICKEL|47|WRAP BAG|1473.56|express deposits 6568|dark lime sandy burnished blanched|Manufacturer#2|Brand#23|MEDIUM POLISHED BRASS|50|MED JAR|1474.56|t, pendi 6569|ivory snow red olive lawn|Manufacturer#4|Brand#44|SMALL BURNISHED COPPER|14|JUMBO JAR|1475.56|g the brave foxes dazz 6570|medium lawn puff magenta saddle|Manufacturer#2|Brand#25|SMALL BURNISHED NICKEL|43|WRAP PACK|1476.57|kages. r 6571|lace saddle lavender khaki pink|Manufacturer#5|Brand#51|SMALL PLATED BRASS|34|JUMBO CASE|1477.57|en dolphi 6572|green salmon lace powder lemon|Manufacturer#3|Brand#33|PROMO ANODIZED NICKEL|7|JUMBO PKG|1478.57|quickly final foxe 6573|cyan smoke firebrick papaya brown|Manufacturer#1|Brand#13|LARGE ANODIZED BRASS|32|JUMBO PACK|1479.57|ges affix. 6574|lavender mint olive honeydew lemon|Manufacturer#3|Brand#33|LARGE BURNISHED COPPER|45|MED PACK|1480.57|c pinto beans haggle c 6575|aquamarine red lime pale sandy|Manufacturer#3|Brand#34|STANDARD ANODIZED NICKEL|41|SM BAG|1481.57|into beans 6576|lawn black cyan tomato cornsilk|Manufacturer#5|Brand#51|LARGE BRUSHED NICKEL|9|LG CAN|1482.57|. carefully silent pi 6577|firebrick royal purple khaki burnished|Manufacturer#2|Brand#22|ECONOMY BURNISHED COPPER|41|WRAP CASE|1483.57|oost quickly acro 6578|yellow cyan moccasin aquamarine cornflower|Manufacturer#3|Brand#35|PROMO ANODIZED BRASS|46|SM CASE|1484.57| requests wake blithel 6579|rosy chartreuse green red navy|Manufacturer#4|Brand#45|STANDARD BRUSHED STEEL|17|SM CAN|1485.57|special accounts wil 6580|dark thistle coral cornsilk red|Manufacturer#5|Brand#52|PROMO BRUSHED STEEL|43|JUMBO BOX|1486.58|s caj 6581|khaki purple lawn royal slate|Manufacturer#3|Brand#33|LARGE BRUSHED COPPER|20|MED PKG|1487.58|ily qui 6582|rosy frosted puff drab dim|Manufacturer#1|Brand#15|LARGE BURNISHED BRASS|19|JUMBO CASE|1488.58|y bold ideas. bold 6583|magenta blue burnished azure midnight|Manufacturer#5|Brand#52|SMALL BRUSHED BRASS|10|SM CAN|1489.58|y unusual deposit 6584|peach hot bisque white snow|Manufacturer#4|Brand#42|STANDARD BRUSHED BRASS|44|LG BAG|1490.58|ent th 6585|tomato snow cornflower black mint|Manufacturer#4|Brand#43|SMALL BRUSHED NICKEL|39|JUMBO BOX|1491.58|ly regular deposit 6586|navy yellow frosted tomato wheat|Manufacturer#3|Brand#32|MEDIUM POLISHED COPPER|46|WRAP DRUM|1492.58| nod stealthily 6587|tan cornflower plum dark peru|Manufacturer#1|Brand#14|SMALL PLATED TIN|30|WRAP PKG|1493.58|e slyly 6588|floral tomato white medium burlywood|Manufacturer#3|Brand#34|LARGE BURNISHED BRASS|31|JUMBO BAG|1494.58|refully pending pin 6589|olive white spring misty cream|Manufacturer#1|Brand#11|ECONOMY PLATED NICKEL|30|LG PACK|1495.58| even pin 6590|firebrick chartreuse goldenrod honeydew steel|Manufacturer#1|Brand#13|SMALL BRUSHED COPPER|28|MED DRUM|1496.59|es sleep about th 6591|gainsboro khaki chocolate rosy navajo|Manufacturer#1|Brand#11|ECONOMY PLATED NICKEL|29|SM BAG|1497.59|x daringly bol 6592|puff olive drab grey red|Manufacturer#4|Brand#44|LARGE POLISHED NICKEL|4|MED BAG|1498.59| acco 6593|blanched lime lace magenta rosy|Manufacturer#1|Brand#14|ECONOMY POLISHED COPPER|41|MED PKG|1499.59| slyly regular the 6594|black khaki navajo purple turquoise|Manufacturer#2|Brand#21|MEDIUM BURNISHED TIN|20|WRAP PACK|1500.59|pendenci 6595|bisque lemon firebrick gainsboro pale|Manufacturer#3|Brand#31|SMALL PLATED NICKEL|36|MED CASE|1501.59|ar theod 6596|mint misty coral turquoise cornsilk|Manufacturer#2|Brand#24|LARGE POLISHED TIN|15|SM DRUM|1502.59|y unusual deposits ar 6597|metallic tan khaki beige salmon|Manufacturer#4|Brand#44|LARGE POLISHED TIN|11|WRAP CAN|1503.59|xes against the care 6598|cream brown goldenrod lawn burnished|Manufacturer#3|Brand#32|LARGE PLATED COPPER|33|SM PACK|1504.59|odolites 6599|plum midnight turquoise tan violet|Manufacturer#5|Brand#54|LARGE ANODIZED NICKEL|49|MED BOX|1505.59|fluffily against the f 6600|aquamarine blanched drab floral burnished|Manufacturer#3|Brand#31|PROMO PLATED STEEL|17|JUMBO CASE|1506.60| are around 6601|cyan orange metallic blush navajo|Manufacturer#2|Brand#21|LARGE PLATED NICKEL|8|MED CASE|1507.60|ingly brave dol 6602|blush violet azure black tan|Manufacturer#5|Brand#53|SMALL BURNISHED COPPER|45|JUMBO CASE|1508.60|ly silent deposits. a 6603|wheat blanched grey slate brown|Manufacturer#4|Brand#42|PROMO BRUSHED NICKEL|33|MED JAR|1509.60|posits are furiously 6604|powder smoke rose maroon cornsilk|Manufacturer#4|Brand#43|STANDARD BURNISHED NICKEL|48|SM CASE|1510.60|ckly bold ideas. 6605|peru pale spring orange papaya|Manufacturer#3|Brand#32|MEDIUM ANODIZED STEEL|10|SM PKG|1511.60|lyly. furiously 6606|rosy grey midnight wheat lavender|Manufacturer#5|Brand#51|SMALL ANODIZED STEEL|7|SM DRUM|1512.60|stealthily even ideas 6607|khaki goldenrod floral misty navy|Manufacturer#1|Brand#14|LARGE BRUSHED BRASS|39|SM CAN|1513.60|d dependencies. 6608|lemon ivory cornsilk navy seashell|Manufacturer#2|Brand#21|SMALL PLATED TIN|26|WRAP DRUM|1514.60|lyly among the fu 6609|cornflower medium grey royal slate|Manufacturer#3|Brand#33|SMALL ANODIZED BRASS|41|WRAP BOX|1515.60|cross the fluff 6610|moccasin lavender khaki dim white|Manufacturer#3|Brand#31|ECONOMY BRUSHED COPPER|46|SM JAR|1516.61|are furio 6611|chocolate puff rose indian purple|Manufacturer#4|Brand#42|PROMO ANODIZED BRASS|32|MED BOX|1517.61|fix slyly 6612|blue hot wheat misty gainsboro|Manufacturer#1|Brand#14|MEDIUM PLATED TIN|44|JUMBO JAR|1518.61|dinos. blithely 6613|rose slate antique maroon beige|Manufacturer#3|Brand#31|SMALL BURNISHED COPPER|7|WRAP PACK|1519.61|slyly 6614|misty peach blue cornflower indian|Manufacturer#5|Brand#53|MEDIUM BURNISHED TIN|46|JUMBO DRUM|1520.61|ly bold packages cajo 6615|rosy drab sienna misty royal|Manufacturer#5|Brand#54|SMALL ANODIZED BRASS|16|MED PACK|1521.61|ar requests. blithe 6616|azure coral sienna midnight lavender|Manufacturer#5|Brand#51|ECONOMY ANODIZED STEEL|9|LG PKG|1522.61| the q 6617|dodger black tomato powder peru|Manufacturer#3|Brand#33|ECONOMY BURNISHED COPPER|3|WRAP PACK|1523.61|ithely a 6618|floral spring antique moccasin peru|Manufacturer#4|Brand#43|PROMO BRUSHED BRASS|25|SM CASE|1524.61|r the unusual, 6619|violet frosted medium ivory cornsilk|Manufacturer#2|Brand#22|LARGE ANODIZED COPPER|21|JUMBO BOX|1525.61|s hang bli 6620|puff khaki peru midnight chocolate|Manufacturer#5|Brand#53|STANDARD ANODIZED BRASS|34|MED PACK|1526.62|ounts. pen 6621|steel deep brown cornflower burnished|Manufacturer#3|Brand#31|SMALL BRUSHED COPPER|7|JUMBO CAN|1527.62|tructions bre 6622|rose burlywood blue deep salmon|Manufacturer#2|Brand#22|ECONOMY POLISHED COPPER|16|LG PACK|1528.62| foxes are b 6623|salmon lavender blue medium papaya|Manufacturer#4|Brand#41|ECONOMY POLISHED NICKEL|5|WRAP DRUM|1529.62|lyly above the 6624|tan blanched cream dodger grey|Manufacturer#2|Brand#23|MEDIUM BRUSHED TIN|26|SM CASE|1530.62|s above the slyly cl 6625|chartreuse ghost dim frosted sandy|Manufacturer#2|Brand#23|MEDIUM BRUSHED BRASS|18|MED CASE|1531.62| at the slyly iro 6626|chartreuse lime hot saddle rose|Manufacturer#3|Brand#31|MEDIUM BURNISHED COPPER|17|LG DRUM|1532.62|ctions 6627|medium peru chartreuse orange royal|Manufacturer#4|Brand#42|STANDARD POLISHED NICKEL|32|JUMBO PACK|1533.62|ronic, even dep 6628|lemon sienna medium pink saddle|Manufacturer#5|Brand#55|PROMO PLATED TIN|3|SM CASE|1534.62| across the silent, 6629|deep bisque lavender snow ivory|Manufacturer#4|Brand#43|STANDARD PLATED TIN|29|JUMBO JAR|1535.62|ggle blithely expres 6630|spring metallic chocolate deep blush|Manufacturer#1|Brand#14|STANDARD ANODIZED BRASS|47|MED BAG|1536.63|t slyly al 6631|drab moccasin lace white ghost|Manufacturer#3|Brand#33|SMALL PLATED BRASS|28|WRAP CASE|1537.63|es nag. multi 6632|sandy orchid misty navajo antique|Manufacturer#1|Brand#15|ECONOMY PLATED COPPER|35|JUMBO CAN|1538.63|eas cajole bl 6633|orchid thistle sky linen khaki|Manufacturer#2|Brand#21|ECONOMY ANODIZED STEEL|27|WRAP JAR|1539.63|ounts thrash 6634|lace olive indian orange dim|Manufacturer#4|Brand#41|STANDARD BURNISHED STEEL|34|SM CAN|1540.63| above the 6635|frosted midnight khaki tomato sandy|Manufacturer#4|Brand#43|PROMO PLATED NICKEL|38|SM CAN|1541.63|side 6636|red linen dodger floral hot|Manufacturer#5|Brand#52|STANDARD POLISHED NICKEL|45|MED BOX|1542.63|nstruct 6637|chiffon bisque frosted magenta red|Manufacturer#1|Brand#13|LARGE PLATED BRASS|41|MED CAN|1543.63|. blithely 6638|sandy antique cornflower dim honeydew|Manufacturer#1|Brand#13|PROMO BRUSHED NICKEL|50|LG BAG|1544.63|uickly final 6639|turquoise midnight smoke maroon chiffon|Manufacturer#1|Brand#11|MEDIUM POLISHED STEEL|34|LG CASE|1545.63|y deposits 6640|pink sandy red mint black|Manufacturer#5|Brand#54|ECONOMY POLISHED TIN|3|JUMBO PKG|1546.64|ld depos 6641|cyan indian grey drab chartreuse|Manufacturer#2|Brand#23|ECONOMY ANODIZED COPPER|11|SM BOX|1547.64|ts. quickly 6642|gainsboro navajo cream medium maroon|Manufacturer#3|Brand#33|ECONOMY BURNISHED NICKEL|34|JUMBO BOX|1548.64|al accounts. carefull 6643|white lemon blanched deep yellow|Manufacturer#3|Brand#33|PROMO ANODIZED TIN|44|SM BOX|1549.64|the ironic ins 6644|khaki sandy moccasin goldenrod almond|Manufacturer#2|Brand#24|SMALL BRUSHED COPPER|25|MED CAN|1550.64|ts nag blithely above 6645|blush antique lemon plum dim|Manufacturer#4|Brand#41|STANDARD POLISHED STEEL|43|LG JAR|1551.64|pendencies affix. regu 6646|cornflower green slate spring medium|Manufacturer#5|Brand#55|STANDARD BRUSHED STEEL|2|WRAP BAG|1552.64|equests. de 6647|turquoise forest misty dodger aquamarine|Manufacturer#1|Brand#13|ECONOMY PLATED TIN|3|MED PKG|1553.64|ptotes. slyly ir 6648|forest beige chiffon hot peach|Manufacturer#1|Brand#14|STANDARD PLATED NICKEL|2|SM DRUM|1554.64|gside of the 6649|rosy saddle beige almond light|Manufacturer#2|Brand#22|ECONOMY ANODIZED NICKEL|18|WRAP BOX|1555.64|t. regular ideas are: 6650|almond white misty goldenrod blanched|Manufacturer#3|Brand#32|MEDIUM ANODIZED COPPER|27|SM PACK|1556.65| after the warhors 6651|burlywood sandy olive orange smoke|Manufacturer#2|Brand#23|ECONOMY ANODIZED NICKEL|20|WRAP PKG|1557.65| frays. 6652|red puff green sandy smoke|Manufacturer#1|Brand#12|ECONOMY BRUSHED TIN|33|SM JAR|1558.65|. blithely bold ac 6653|sienna papaya cornflower coral peru|Manufacturer#3|Brand#32|ECONOMY POLISHED NICKEL|10|LG PACK|1559.65|bout the unusual, i 6654|gainsboro dim goldenrod maroon light|Manufacturer#5|Brand#52|STANDARD BURNISHED COPPER|40|MED BAG|1560.65|en decoy 6655|burlywood snow frosted dark drab|Manufacturer#4|Brand#42|SMALL BURNISHED BRASS|36|WRAP PACK|1561.65|? blithel 6656|chiffon indian lace purple cream|Manufacturer#2|Brand#22|LARGE PLATED BRASS|31|MED PACK|1562.65|al excuses 6657|tomato lavender blush drab lawn|Manufacturer#2|Brand#24|SMALL POLISHED NICKEL|19|LG CASE|1563.65|uests. bravely unusua 6658|thistle goldenrod yellow beige black|Manufacturer#4|Brand#43|ECONOMY PLATED BRASS|20|MED BAG|1564.65|nding pl 6659|yellow pink pale rose honeydew|Manufacturer#2|Brand#21|LARGE BRUSHED NICKEL|22|MED JAR|1565.65|xpress asympto 6660|lime blanched rose salmon forest|Manufacturer#2|Brand#23|STANDARD PLATED BRASS|39|SM PKG|1566.66|p. qu 6661|beige coral ghost papaya antique|Manufacturer#2|Brand#23|PROMO BURNISHED STEEL|2|MED CASE|1567.66|ly special instr 6662|almond salmon hot sandy forest|Manufacturer#4|Brand#43|SMALL PLATED TIN|7|SM PKG|1568.66|r requests. 6663|spring seashell violet aquamarine dodger|Manufacturer#2|Brand#25|LARGE ANODIZED COPPER|32|LG JAR|1569.66|c ideas. 6664|lace peach snow pale spring|Manufacturer#3|Brand#35|LARGE POLISHED NICKEL|21|LG CAN|1570.66| quickly pending dol 6665|goldenrod chocolate peru midnight lemon|Manufacturer#5|Brand#55|LARGE BURNISHED BRASS|22|WRAP JAR|1571.66| furiously to 6666|azure blue smoke metallic powder|Manufacturer#1|Brand#15|LARGE PLATED STEEL|42|SM CAN|1572.66|y deposits. even, regu 6667|coral beige turquoise chiffon burnished|Manufacturer#1|Brand#14|MEDIUM POLISHED STEEL|13|MED PKG|1573.66|ents along the quic 6668|turquoise burnished honeydew aquamarine ghost|Manufacturer#5|Brand#55|STANDARD PLATED BRASS|23|WRAP BOX|1574.66|ong the furiously 6669|rose forest lace chocolate peru|Manufacturer#1|Brand#12|STANDARD PLATED BRASS|40|LG CAN|1575.66|ckly regular do 6670|snow wheat steel mint pink|Manufacturer#4|Brand#43|PROMO PLATED TIN|3|WRAP JAR|1576.67|iously regul 6671|lemon linen wheat frosted honeydew|Manufacturer#5|Brand#52|SMALL BURNISHED TIN|35|JUMBO PACK|1577.67|ncies? care 6672|slate dodger pink blanched thistle|Manufacturer#3|Brand#31|MEDIUM BURNISHED STEEL|32|LG CASE|1578.67| pending accou 6673|lavender forest grey slate chiffon|Manufacturer#3|Brand#31|LARGE PLATED BRASS|20|LG PACK|1579.67|ic dependenc 6674|lace frosted rosy chiffon blanched|Manufacturer#2|Brand#21|SMALL ANODIZED NICKEL|47|JUMBO JAR|1580.67|refully even deco 6675|burlywood purple deep goldenrod pink|Manufacturer#5|Brand#53|SMALL POLISHED COPPER|39|WRAP JAR|1581.67| furious foxe 6676|saddle tan sienna burlywood green|Manufacturer#5|Brand#53|SMALL BURNISHED NICKEL|41|WRAP CAN|1582.67|e final asymptotes. s 6677|chiffon royal lace yellow drab|Manufacturer#3|Brand#31|MEDIUM ANODIZED BRASS|31|WRAP CASE|1583.67|riously. unusual depen 6678|spring peru burnished moccasin khaki|Manufacturer#3|Brand#35|PROMO PLATED TIN|11|SM JAR|1584.67|ages ha 6679|lace moccasin puff violet plum|Manufacturer#2|Brand#21|SMALL PLATED TIN|37|LG BOX|1585.67| cour 6680|dim lace cream honeydew wheat|Manufacturer#1|Brand#14|STANDARD POLISHED BRASS|24|SM BAG|1586.68|n dolphins. slyly reg 6681|steel gainsboro burlywood cream hot|Manufacturer#2|Brand#23|STANDARD ANODIZED STEEL|7|LG PACK|1587.68|en excuses. sl 6682|slate sandy mint chiffon peru|Manufacturer#3|Brand#33|MEDIUM BRUSHED COPPER|30|MED CAN|1588.68|e. even packa 6683|cream sandy dodger dim cornflower|Manufacturer#1|Brand#13|SMALL POLISHED STEEL|49|LG BAG|1589.68| pending, silent pac 6684|green firebrick blanched goldenrod grey|Manufacturer#1|Brand#12|MEDIUM BURNISHED STEEL|2|MED CAN|1590.68|y about the c 6685|royal sienna mint green khaki|Manufacturer#4|Brand#45|ECONOMY ANODIZED BRASS|49|MED PKG|1591.68|detect 6686|frosted cornsilk misty light lime|Manufacturer#5|Brand#53|MEDIUM BURNISHED NICKEL|6|WRAP PACK|1592.68| the requests wake sl 6687|bisque pink navy ivory orange|Manufacturer#4|Brand#42|ECONOMY BURNISHED COPPER|8|SM CASE|1593.68|ully silent pa 6688|sienna olive lemon dark blue|Manufacturer#5|Brand#53|STANDARD BRUSHED NICKEL|34|JUMBO PACK|1594.68|ully regular theo 6689|maroon gainsboro cream turquoise cyan|Manufacturer#5|Brand#53|MEDIUM PLATED BRASS|23|WRAP PKG|1595.68| blithely special r 6690|azure wheat lawn midnight spring|Manufacturer#4|Brand#44|SMALL BRUSHED TIN|20|JUMBO CASE|1596.69|deposits. closely ev 6691|seashell chiffon peru chocolate antique|Manufacturer#2|Brand#24|MEDIUM PLATED COPPER|7|JUMBO BAG|1597.69| depo 6692|white deep cornflower purple ivory|Manufacturer#3|Brand#32|ECONOMY PLATED COPPER|24|MED CAN|1598.69|iously ironic court 6693|moccasin cornflower lemon goldenrod light|Manufacturer#4|Brand#44|MEDIUM ANODIZED COPPER|26|JUMBO DRUM|1599.69|regular d 6694|misty linen white spring metallic|Manufacturer#4|Brand#45|LARGE BRUSHED BRASS|26|MED PACK|1600.69|gular requests. 6695|brown yellow mint burlywood medium|Manufacturer#4|Brand#45|STANDARD POLISHED STEEL|13|WRAP BOX|1601.69|riously un 6696|moccasin mint saddle gainsboro seashell|Manufacturer#2|Brand#24|SMALL PLATED STEEL|1|MED DRUM|1602.69|ithely stealt 6697|cream smoke orange brown lawn|Manufacturer#4|Brand#42|PROMO POLISHED STEEL|8|SM PKG|1603.69|ic deposits. slyly spe 6698|orange lace lawn pink papaya|Manufacturer#4|Brand#45|PROMO BURNISHED BRASS|2|SM PACK|1604.69|s. carefu 6699|almond antique metallic honeydew green|Manufacturer#4|Brand#43|MEDIUM BRUSHED TIN|30|LG BAG|1605.69|wake around 6700|frosted metallic khaki coral steel|Manufacturer#4|Brand#44|MEDIUM BURNISHED BRASS|16|SM JAR|1606.70| even theodolit 6701|antique pale honeydew spring maroon|Manufacturer#2|Brand#23|PROMO BRUSHED NICKEL|10|WRAP PKG|1607.70|nto beans doubt qu 6702|violet sienna gainsboro brown cornsilk|Manufacturer#4|Brand#44|SMALL POLISHED NICKEL|44|MED CAN|1608.70|hy reques 6703|light coral sky rose almond|Manufacturer#1|Brand#15|LARGE BRUSHED BRASS|37|JUMBO BOX|1609.70|ic pinto be 6704|lemon burnished firebrick hot royal|Manufacturer#5|Brand#51|SMALL POLISHED NICKEL|23|SM DRUM|1610.70|lyly ironic p 6705|drab sienna cream yellow navajo|Manufacturer#1|Brand#15|STANDARD BURNISHED NICKEL|11|LG BAG|1611.70|ing to the quickly fi 6706|black drab peru almond moccasin|Manufacturer#4|Brand#44|STANDARD ANODIZED STEEL|8|SM PKG|1612.70|fluffil 6707|burlywood snow dark peru saddle|Manufacturer#2|Brand#22|MEDIUM BRUSHED NICKEL|14|SM BOX|1613.70|nusual 6708|goldenrod linen orchid antique azure|Manufacturer#2|Brand#24|SMALL POLISHED BRASS|3|SM DRUM|1614.70|ic deposits 6709|lawn pale red steel rose|Manufacturer#4|Brand#45|LARGE BURNISHED STEEL|16|WRAP CAN|1615.70|iously 6710|plum brown peru cornflower red|Manufacturer#3|Brand#33|LARGE ANODIZED TIN|45|WRAP DRUM|1616.71| are blithely slyly i 6711|honeydew brown powder blue plum|Manufacturer#1|Brand#15|PROMO PLATED NICKEL|35|MED BOX|1617.71|ctions are furiously 6712|moccasin burlywood peru dodger smoke|Manufacturer#3|Brand#35|ECONOMY ANODIZED COPPER|41|WRAP JAR|1618.71|ests hag 6713|sandy midnight honeydew dodger orchid|Manufacturer#5|Brand#55|MEDIUM PLATED NICKEL|31|JUMBO BOX|1619.71|deposits was 6714|midnight lemon blue frosted misty|Manufacturer#3|Brand#31|LARGE BURNISHED COPPER|50|SM BAG|1620.71|detect 6715|ivory azure lavender cyan dim|Manufacturer#3|Brand#33|MEDIUM ANODIZED COPPER|13|WRAP BOX|1621.71|y final deposits a 6716|frosted almond azure hot dim|Manufacturer#3|Brand#31|LARGE ANODIZED STEEL|11|MED CAN|1622.71|ly. platelets are ca 6717|lace sienna plum gainsboro blanched|Manufacturer#1|Brand#15|LARGE BRUSHED STEEL|14|JUMBO DRUM|1623.71|usly ironic pinto 6718|lawn coral papaya steel chartreuse|Manufacturer#4|Brand#41|SMALL BRUSHED COPPER|5|LG CAN|1624.71|eep ironic war 6719|ghost black papaya ivory coral|Manufacturer#2|Brand#21|ECONOMY ANODIZED NICKEL|21|LG CAN|1625.71|kages. 6720|beige burlywood spring olive orange|Manufacturer#5|Brand#53|SMALL BRUSHED NICKEL|8|MED JAR|1626.72|refully regular p 6721|burlywood chocolate midnight puff pink|Manufacturer#2|Brand#23|PROMO POLISHED BRASS|20|LG PKG|1627.72|ely ironic courts. i 6722|ghost moccasin lemon bisque chartreuse|Manufacturer#2|Brand#23|MEDIUM PLATED TIN|10|LG CAN|1628.72|ly even deposits 6723|mint firebrick sienna yellow chocolate|Manufacturer#5|Brand#53|SMALL ANODIZED STEEL|27|SM CAN|1629.72|quickly 6724|seashell sandy pink lemon magenta|Manufacturer#3|Brand#33|PROMO ANODIZED COPPER|27|MED CAN|1630.72|y regular dep 6725|bisque khaki pink chartreuse goldenrod|Manufacturer#4|Brand#45|ECONOMY POLISHED BRASS|50|LG PACK|1631.72|ronic, re 6726|dodger metallic honeydew cream lavender|Manufacturer#3|Brand#33|ECONOMY POLISHED NICKEL|19|SM DRUM|1632.72|fluff 6727|tomato antique brown smoke dark|Manufacturer#2|Brand#24|ECONOMY PLATED TIN|41|WRAP BOX|1633.72|ly. packages 6728|cream antique slate honeydew plum|Manufacturer#5|Brand#53|SMALL BRUSHED COPPER|14|LG PACK|1634.72|riously regular 6729|forest mint cyan goldenrod azure|Manufacturer#1|Brand#12|LARGE BURNISHED TIN|36|LG BOX|1635.72|ong the warthogs i 6730|slate rose cyan blue khaki|Manufacturer#2|Brand#21|STANDARD BURNISHED STEEL|38|SM PKG|1636.73|lites. 6731|steel hot blue dodger ivory|Manufacturer#2|Brand#21|MEDIUM ANODIZED STEEL|10|SM BAG|1637.73|es caj 6732|dark smoke navy dodger papaya|Manufacturer#3|Brand#32|STANDARD PLATED COPPER|16|LG CAN|1638.73|nts-- fluffily i 6733|sienna coral lemon gainsboro grey|Manufacturer#1|Brand#12|SMALL BRUSHED STEEL|11|LG BAG|1639.73|y. slyl 6734|yellow plum powder navajo lavender|Manufacturer#5|Brand#52|SMALL BRUSHED TIN|32|JUMBO DRUM|1640.73|above 6735|sky wheat goldenrod spring plum|Manufacturer#4|Brand#41|MEDIUM BURNISHED STEEL|44|LG PACK|1641.73|ly regular asympto 6736|olive burlywood azure cyan plum|Manufacturer#2|Brand#24|MEDIUM POLISHED TIN|16|SM BOX|1642.73|ts. ex 6737|frosted plum saddle black maroon|Manufacturer#5|Brand#52|PROMO BURNISHED NICKEL|9|WRAP BOX|1643.73|c forges. quickly s 6738|drab slate steel sienna papaya|Manufacturer#3|Brand#32|STANDARD PLATED TIN|50|SM PKG|1644.73|ajole regul 6739|bisque tan coral blue salmon|Manufacturer#1|Brand#11|STANDARD POLISHED COPPER|43|JUMBO CAN|1645.73|blithely unusual 6740|gainsboro mint slate blue coral|Manufacturer#2|Brand#23|LARGE POLISHED STEEL|43|LG JAR|1646.74|xcuses. fluff 6741|brown salmon medium aquamarine magenta|Manufacturer#3|Brand#32|MEDIUM BURNISHED TIN|20|SM BOX|1647.74|osits. furio 6742|wheat yellow dark maroon black|Manufacturer#5|Brand#54|LARGE PLATED BRASS|34|JUMBO PACK|1648.74|ithely special deposi 6743|misty gainsboro sky turquoise forest|Manufacturer#5|Brand#54|STANDARD ANODIZED COPPER|38|MED JAR|1649.74|hely final 6744|navy lavender magenta white dodger|Manufacturer#1|Brand#15|SMALL POLISHED STEEL|6|WRAP DRUM|1650.74| carefully final a 6745|cream steel magenta pale bisque|Manufacturer#3|Brand#33|ECONOMY POLISHED COPPER|12|WRAP BOX|1651.74| theodol 6746|thistle rosy deep mint plum|Manufacturer#5|Brand#51|ECONOMY BURNISHED COPPER|24|MED DRUM|1652.74|refully? slyly 6747|brown drab puff dim thistle|Manufacturer#1|Brand#12|MEDIUM ANODIZED TIN|40|WRAP CAN|1653.74|ymptotes mai 6748|sienna plum papaya green drab|Manufacturer#3|Brand#35|SMALL BURNISHED TIN|26|LG CAN|1654.74|ing to th 6749|yellow royal thistle beige lawn|Manufacturer#4|Brand#41|MEDIUM POLISHED COPPER|44|JUMBO BAG|1655.74|e fluffily after the b 6750|indian dark rosy navy dodger|Manufacturer#5|Brand#51|LARGE POLISHED BRASS|50|SM DRUM|1656.75|arefully ironic packag 6751|sandy steel tomato pink royal|Manufacturer#1|Brand#13|ECONOMY BURNISHED STEEL|15|WRAP PACK|1657.75| foxes. slyl 6752|medium rose sandy puff navy|Manufacturer#4|Brand#42|STANDARD BRUSHED NICKEL|4|SM BOX|1658.75| idea 6753|firebrick lemon burnished ghost maroon|Manufacturer#5|Brand#54|STANDARD BURNISHED STEEL|14|SM PACK|1659.75|ar ideas 6754|medium pink magenta rose burlywood|Manufacturer#1|Brand#15|MEDIUM ANODIZED STEEL|13|WRAP PACK|1660.75|lithely 6755|wheat hot medium honeydew grey|Manufacturer#4|Brand#41|LARGE PLATED COPPER|43|SM BAG|1661.75| to the even p 6756|rosy drab bisque purple ghost|Manufacturer#1|Brand#15|LARGE BURNISHED BRASS|47|JUMBO PACK|1662.75|ickly e 6757|steel brown navajo firebrick medium|Manufacturer#5|Brand#52|LARGE ANODIZED STEEL|5|JUMBO BOX|1663.75|ites serve quickly i 6758|lavender rose honeydew frosted metallic|Manufacturer#5|Brand#55|PROMO PLATED BRASS|38|JUMBO BAG|1664.75|s sleep 6759|burlywood antique bisque brown orchid|Manufacturer#2|Brand#24|ECONOMY BRUSHED COPPER|23|SM DRUM|1665.75|ts nag blithel 6760|rosy azure linen gainsboro chiffon|Manufacturer#5|Brand#52|STANDARD BRUSHED NICKEL|34|MED BAG|1666.76|furiously regula 6761|misty pale aquamarine orange azure|Manufacturer#5|Brand#54|SMALL BURNISHED TIN|26|LG DRUM|1667.76|ular platelets. 6762|rose royal chiffon magenta metallic|Manufacturer#1|Brand#15|SMALL ANODIZED BRASS|34|JUMBO BOX|1668.76|ckage 6763|lace papaya azure aquamarine powder|Manufacturer#4|Brand#43|MEDIUM PLATED STEEL|41|JUMBO PKG|1669.76|wake ca 6764|puff bisque forest almond salmon|Manufacturer#2|Brand#24|PROMO BURNISHED STEEL|50|JUMBO BOX|1670.76|uses cajole furio 6765|maroon tomato royal lavender papaya|Manufacturer#4|Brand#45|MEDIUM ANODIZED TIN|20|JUMBO BAG|1671.76|cing deposits. furi 6766|lace moccasin smoke honeydew burlywood|Manufacturer#2|Brand#22|SMALL POLISHED STEEL|30|MED JAR|1672.76|osits. reg 6767|maroon ghost rosy violet lemon|Manufacturer#2|Brand#25|STANDARD ANODIZED BRASS|12|SM CASE|1673.76|e bli 6768|lace moccasin orchid red beige|Manufacturer#1|Brand#13|SMALL ANODIZED COPPER|40|SM CAN|1674.76|r package 6769|turquoise yellow sky pale olive|Manufacturer#1|Brand#13|PROMO PLATED BRASS|26|JUMBO PKG|1675.76| pack 6770|red pale navajo slate black|Manufacturer#1|Brand#13|LARGE POLISHED COPPER|16|SM DRUM|1676.77|ly ironic asymptotes 6771|misty papaya almond royal chocolate|Manufacturer#3|Brand#31|MEDIUM ANODIZED STEEL|7|JUMBO CASE|1677.77|ully s 6772|lime steel black puff orange|Manufacturer#4|Brand#41|ECONOMY BURNISHED COPPER|8|JUMBO DRUM|1678.77|accounts cajole blithe 6773|orange lime slate powder plum|Manufacturer#2|Brand#23|MEDIUM ANODIZED TIN|24|LG DRUM|1679.77|slyly silent account 6774|cornsilk royal gainsboro powder cream|Manufacturer#5|Brand#51|SMALL ANODIZED NICKEL|40|MED PKG|1680.77|e evenly unusu 6775|rosy navy cream cornflower burnished|Manufacturer#5|Brand#51|LARGE ANODIZED BRASS|22|LG BAG|1681.77|efully expr 6776|puff sienna burlywood midnight drab|Manufacturer#2|Brand#25|SMALL BRUSHED COPPER|26|SM PKG|1682.77|equests boos 6777|honeydew cornflower cyan navajo tan|Manufacturer#1|Brand#15|STANDARD ANODIZED BRASS|35|JUMBO CASE|1683.77| carefully specia 6778|medium goldenrod antique green sandy|Manufacturer#2|Brand#25|MEDIUM POLISHED COPPER|18|MED CASE|1684.77|lways final requests. 6779|floral violet navy tomato maroon|Manufacturer#4|Brand#42|ECONOMY PLATED COPPER|23|LG PACK|1685.77|thely u 6780|rose deep cyan puff navajo|Manufacturer#4|Brand#41|PROMO POLISHED NICKEL|17|SM PKG|1686.78|es. carefully stealthy 6781|saddle pale tan lawn sienna|Manufacturer#2|Brand#25|LARGE BRUSHED STEEL|2|MED BAG|1687.78|ecial 6782|spring khaki bisque honeydew pale|Manufacturer#1|Brand#11|STANDARD BURNISHED STEEL|17|SM CASE|1688.78|counts sleep blithely 6783|hot ghost navy peru blush|Manufacturer#4|Brand#45|LARGE ANODIZED BRASS|7|LG BAG|1689.78|c pinto 6784|thistle burlywood papaya gainsboro lemon|Manufacturer#4|Brand#42|LARGE ANODIZED STEEL|32|JUMBO BOX|1690.78|unts nag blithely wit 6785|chartreuse violet pale puff orchid|Manufacturer#5|Brand#54|STANDARD BURNISHED BRASS|32|MED PKG|1691.78|uickly theodoli 6786|chiffon khaki lawn gainsboro turquoise|Manufacturer#1|Brand#12|LARGE ANODIZED NICKEL|11|JUMBO CAN|1692.78|ently. even, 6787|magenta coral red blush green|Manufacturer#4|Brand#43|STANDARD PLATED NICKEL|14|JUMBO BAG|1693.78|le carefully af 6788|blanched tomato hot chiffon steel|Manufacturer#2|Brand#25|LARGE ANODIZED TIN|50|JUMBO DRUM|1694.78|y bold 6789|cream dodger moccasin violet green|Manufacturer#4|Brand#45|STANDARD ANODIZED TIN|11|LG BAG|1695.78| among the final de 6790|bisque firebrick mint plum burnished|Manufacturer#4|Brand#42|PROMO BURNISHED COPPER|39|SM BOX|1696.79| among the b 6791|moccasin magenta chocolate snow beige|Manufacturer#4|Brand#43|LARGE BURNISHED STEEL|48|WRAP CAN|1697.79|s. pinto beans mainta 6792|aquamarine powder green spring honeydew|Manufacturer#1|Brand#13|MEDIUM PLATED NICKEL|48|MED BAG|1698.79|y above the stealthil 6793|misty grey violet plum lace|Manufacturer#3|Brand#35|PROMO BRUSHED NICKEL|14|JUMBO BAG|1699.79|refully r 6794|khaki seashell pale orange smoke|Manufacturer#3|Brand#34|LARGE BRUSHED NICKEL|23|MED CAN|1700.79|es among 6795|plum maroon azure hot slate|Manufacturer#5|Brand#53|ECONOMY BRUSHED NICKEL|48|WRAP CASE|1701.79|xes w 6796|cyan blue lavender puff tan|Manufacturer#2|Brand#21|PROMO BRUSHED STEEL|14|JUMBO PKG|1702.79|accounts detect furio 6797|almond indian cyan antique snow|Manufacturer#1|Brand#14|STANDARD POLISHED STEEL|8|LG JAR|1703.79|ake furiou 6798|sandy cyan navy hot black|Manufacturer#5|Brand#52|MEDIUM POLISHED TIN|36|SM CAN|1704.79|ely ironic accoun 6799|seashell wheat hot almond rose|Manufacturer#3|Brand#32|MEDIUM BURNISHED NICKEL|1|SM DRUM|1705.79|requests wake f 6800|ghost lawn chartreuse drab plum|Manufacturer#1|Brand#13|STANDARD POLISHED TIN|15|MED CASE|1706.80| express instructi 6801|seashell ghost azure firebrick cornsilk|Manufacturer#3|Brand#34|SMALL ANODIZED BRASS|36|WRAP PKG|1707.80|oxes. foxes among t 6802|floral blush grey frosted wheat|Manufacturer#1|Brand#14|STANDARD BRUSHED NICKEL|41|WRAP BAG|1708.80|ing platele 6803|sienna saddle dodger azure smoke|Manufacturer#3|Brand#33|SMALL BRUSHED BRASS|46|MED BAG|1709.80|ular excus 6804|turquoise medium thistle puff peach|Manufacturer#2|Brand#25|MEDIUM BRUSHED COPPER|40|SM PKG|1710.80|lyly pending ideas. 6805|misty azure green orchid orange|Manufacturer#1|Brand#14|PROMO BURNISHED COPPER|30|WRAP PKG|1711.80| furiously 6806|blush bisque antique lemon gainsboro|Manufacturer#5|Brand#51|PROMO ANODIZED BRASS|28|SM BOX|1712.80|ages. bold foxe 6807|drab wheat snow moccasin saddle|Manufacturer#1|Brand#12|LARGE PLATED NICKEL|48|JUMBO PACK|1713.80| instructions de 6808|steel turquoise drab sky misty|Manufacturer#3|Brand#34|PROMO BURNISHED STEEL|3|MED PKG|1714.80|ously unusual 6809|puff slate misty powder spring|Manufacturer#5|Brand#54|MEDIUM ANODIZED STEEL|22|SM CASE|1715.80|ar requests cajol 6810|tan tomato seashell navajo lawn|Manufacturer#4|Brand#43|STANDARD BRUSHED NICKEL|21|JUMBO JAR|1716.81|uriously regular r 6811|navy blush cornflower lace dim|Manufacturer#5|Brand#54|STANDARD ANODIZED COPPER|42|WRAP BOX|1717.81|kly blithely even a 6812|lime chartreuse papaya blue blush|Manufacturer#4|Brand#41|MEDIUM BURNISHED STEEL|45|SM CAN|1718.81| accounts haggl 6813|light azure royal brown sienna|Manufacturer#2|Brand#25|STANDARD ANODIZED COPPER|17|MED JAR|1719.81|uickly 6814|dim cornflower yellow lemon almond|Manufacturer#4|Brand#44|STANDARD POLISHED BRASS|4|WRAP BOX|1720.81|kages. blithely 6815|deep burlywood aquamarine puff dark|Manufacturer#1|Brand#11|SMALL POLISHED COPPER|14|JUMBO DRUM|1721.81|ss accounts eat dar 6816|peru brown chartreuse dark frosted|Manufacturer#2|Brand#24|STANDARD BURNISHED NICKEL|26|MED BAG|1722.81|s are! slyly 6817|cornsilk peru lavender goldenrod cream|Manufacturer#5|Brand#52|PROMO ANODIZED TIN|8|SM JAR|1723.81|al pinto be 6818|floral smoke light deep pale|Manufacturer#5|Brand#51|PROMO POLISHED BRASS|40|JUMBO PACK|1724.81|nusua 6819|snow violet royal dark khaki|Manufacturer#2|Brand#24|STANDARD PLATED STEEL|5|JUMBO BOX|1725.81| prom 6820|light smoke orchid plum wheat|Manufacturer#1|Brand#11|STANDARD ANODIZED STEEL|1|SM PACK|1726.82|nal deposi 6821|red chocolate tomato orange black|Manufacturer#2|Brand#22|LARGE PLATED BRASS|48|SM DRUM|1727.82|g accounts cajole car 6822|sky deep medium goldenrod lime|Manufacturer#5|Brand#55|STANDARD BRUSHED NICKEL|4|MED CASE|1728.82|ependencies are thinl 6823|tomato white magenta floral ivory|Manufacturer#4|Brand#43|ECONOMY PLATED STEEL|9|JUMBO BOX|1729.82| ironi 6824|tomato firebrick antique mint lemon|Manufacturer#3|Brand#33|LARGE ANODIZED COPPER|45|WRAP JAR|1730.82|ctions alongs 6825|gainsboro lace steel floral orchid|Manufacturer#4|Brand#45|LARGE POLISHED COPPER|24|LG BAG|1731.82|ns. ex 6826|sienna cornflower green lime tomato|Manufacturer#5|Brand#51|MEDIUM POLISHED STEEL|19|JUMBO BOX|1732.82|nts. careful 6827|bisque indian tan red midnight|Manufacturer#3|Brand#32|SMALL BURNISHED BRASS|8|SM BOX|1733.82|accounts-- furiously 6828|bisque sandy powder cornflower blanched|Manufacturer#1|Brand#15|MEDIUM POLISHED TIN|11|JUMBO JAR|1734.82|ic req 6829|midnight lawn steel chocolate salmon|Manufacturer#1|Brand#13|LARGE BRUSHED BRASS|6|MED BAG|1735.82|e regu 6830|violet grey navajo dim blush|Manufacturer#5|Brand#54|PROMO BRUSHED TIN|44|MED CAN|1736.83|s haggle quickly 6831|medium lime papaya cream sky|Manufacturer#1|Brand#15|SMALL BURNISHED BRASS|21|LG JAR|1737.83| final asymptotes. fi 6832|medium blush aquamarine bisque deep|Manufacturer#2|Brand#22|SMALL POLISHED TIN|39|SM BOX|1738.83|xpress 6833|metallic snow black khaki coral|Manufacturer#4|Brand#41|MEDIUM BURNISHED COPPER|25|LG BOX|1739.83|usly final accounts 6834|grey yellow green honeydew navajo|Manufacturer#3|Brand#34|STANDARD BURNISHED COPPER|17|WRAP PACK|1740.83|s inte 6835|blush floral midnight magenta frosted|Manufacturer#4|Brand#44|SMALL ANODIZED NICKEL|38|SM CASE|1741.83|blithely 6836|pale lavender bisque yellow indian|Manufacturer#3|Brand#31|SMALL PLATED TIN|7|LG JAR|1742.83|ges. 6837|drab salmon floral mint burlywood|Manufacturer#3|Brand#31|ECONOMY ANODIZED TIN|29|SM CASE|1743.83|sits na 6838|antique gainsboro aquamarine green spring|Manufacturer#1|Brand#15|PROMO ANODIZED STEEL|35|SM PACK|1744.83|ing packages. bold acc 6839|purple metallic saddle midnight magenta|Manufacturer#3|Brand#32|STANDARD PLATED BRASS|4|MED JAR|1745.83|s wake slyly. 6840|light drab snow coral deep|Manufacturer#1|Brand#15|SMALL ANODIZED NICKEL|26|WRAP BAG|1746.84|riously final ac 6841|lime cornflower ivory dim slate|Manufacturer#1|Brand#14|LARGE POLISHED NICKEL|3|SM DRUM|1747.84|y ruthles 6842|lime medium coral bisque pale|Manufacturer#3|Brand#31|ECONOMY PLATED NICKEL|1|WRAP JAR|1748.84|kages alongside 6843|honeydew violet cornsilk sienna antique|Manufacturer#3|Brand#35|STANDARD BRUSHED STEEL|46|LG PKG|1749.84|s use 6844|cyan chiffon orchid plum rose|Manufacturer#2|Brand#22|LARGE POLISHED TIN|49|SM CAN|1750.84|ess deposits sleep 6845|ghost lime violet cornsilk peach|Manufacturer#4|Brand#45|MEDIUM PLATED STEEL|40|WRAP JAR|1751.84|less fo 6846|white maroon chocolate smoke pink|Manufacturer#5|Brand#53|MEDIUM BURNISHED BRASS|35|WRAP CASE|1752.84|egular pinto 6847|pink moccasin peru puff saddle|Manufacturer#1|Brand#14|SMALL ANODIZED NICKEL|45|SM PACK|1753.84| accounts accord 6848|rosy honeydew plum tomato deep|Manufacturer#1|Brand#14|PROMO POLISHED BRASS|9|LG BOX|1754.84|romis 6849|powder pale dodger firebrick wheat|Manufacturer#3|Brand#34|STANDARD BURNISHED STEEL|32|JUMBO CASE|1755.84|ost sly 6850|brown blue tan hot blush|Manufacturer#5|Brand#53|ECONOMY ANODIZED STEEL|26|MED CAN|1756.85|ts integrate accordin 6851|red peach seashell honeydew burlywood|Manufacturer#5|Brand#51|MEDIUM BRUSHED TIN|36|JUMBO BAG|1757.85|uests kindle f 6852|pale cornsilk metallic brown beige|Manufacturer#2|Brand#23|PROMO POLISHED COPPER|25|SM PKG|1758.85| fluffily even ac 6853|purple sandy dark powder indian|Manufacturer#1|Brand#14|ECONOMY POLISHED COPPER|44|MED CASE|1759.85|bout the p 6854|azure navy gainsboro cream ghost|Manufacturer#4|Brand#41|ECONOMY BRUSHED BRASS|15|JUMBO JAR|1760.85|bout the carefully reg 6855|magenta spring orchid azure frosted|Manufacturer#5|Brand#51|PROMO ANODIZED NICKEL|6|WRAP DRUM|1761.85|l orbi 6856|blanched midnight cream papaya drab|Manufacturer#4|Brand#41|SMALL BRUSHED TIN|5|JUMBO CAN|1762.85|s haggle above the flu 6857|red spring blush cornsilk peach|Manufacturer#4|Brand#42|STANDARD POLISHED TIN|46|LG JAR|1763.85| quickly unu 6858|blush puff firebrick beige deep|Manufacturer#1|Brand#11|SMALL BRUSHED BRASS|30|WRAP BOX|1764.85|t evenly express ac 6859|snow lavender navy linen chartreuse|Manufacturer#3|Brand#35|ECONOMY ANODIZED NICKEL|35|MED PKG|1765.85|sual dolphi 6860|snow green linen dim orange|Manufacturer#1|Brand#14|LARGE POLISHED STEEL|9|LG JAR|1766.86|ly caref 6861|white violet coral olive metallic|Manufacturer#2|Brand#23|STANDARD POLISHED BRASS|32|MED BAG|1767.86| the slyly 6862|violet cornflower pink rose burnished|Manufacturer#1|Brand#13|PROMO POLISHED TIN|36|JUMBO CAN|1768.86|ckages cajole sl 6863|brown bisque linen cyan drab|Manufacturer#3|Brand#35|ECONOMY BURNISHED NICKEL|25|JUMBO CAN|1769.86|aggle slyly ste 6864|burnished misty firebrick cream papaya|Manufacturer#4|Brand#45|SMALL PLATED NICKEL|20|WRAP CAN|1770.86|y even accounts acros 6865|seashell salmon lace mint aquamarine|Manufacturer#3|Brand#35|MEDIUM BRUSHED NICKEL|17|SM BOX|1771.86|l depend 6866|goldenrod midnight metallic cornsilk wheat|Manufacturer#5|Brand#54|ECONOMY POLISHED COPPER|23|SM DRUM|1772.86|quickly along the slyl 6867|green puff blanched light lawn|Manufacturer#2|Brand#21|MEDIUM BRUSHED TIN|21|WRAP DRUM|1773.86|ns. final foxes al 6868|gainsboro goldenrod white forest navy|Manufacturer#5|Brand#52|STANDARD ANODIZED BRASS|17|MED BAG|1774.86|es. furiousl 6869|grey salmon seashell bisque linen|Manufacturer#1|Brand#14|MEDIUM PLATED BRASS|39|WRAP BAG|1775.86|nic ac 6870|chartreuse firebrick indian royal sky|Manufacturer#5|Brand#53|LARGE POLISHED TIN|33|WRAP BOX|1776.87|ar accounts affix bli 6871|peach steel turquoise sky forest|Manufacturer#1|Brand#11|ECONOMY PLATED STEEL|46|WRAP PKG|1777.87|r theodolites. ca 6872|hot almond azure lace forest|Manufacturer#2|Brand#21|PROMO BRUSHED TIN|41|LG PACK|1778.87|l packages boost aft 6873|medium red dark saddle white|Manufacturer#4|Brand#43|PROMO PLATED STEEL|47|MED DRUM|1779.87|unts snooze by the 6874|violet burnished misty medium frosted|Manufacturer#5|Brand#53|SMALL BRUSHED TIN|3|MED PKG|1780.87|above th 6875|blue papaya steel lemon cyan|Manufacturer#2|Brand#25|ECONOMY PLATED NICKEL|22|JUMBO JAR|1781.87|ts are deposits. slyl 6876|lavender sandy mint light deep|Manufacturer#1|Brand#14|LARGE BRUSHED TIN|5|MED PKG|1782.87|ickly regular, silent 6877|lemon ghost green thistle peru|Manufacturer#3|Brand#33|LARGE POLISHED COPPER|16|JUMBO PKG|1783.87|kly b 6878|cream wheat aquamarine yellow burlywood|Manufacturer#1|Brand#12|ECONOMY ANODIZED COPPER|44|LG BOX|1784.87|ular dep 6879|sienna slate chartreuse lavender rose|Manufacturer#5|Brand#51|STANDARD PLATED COPPER|25|JUMBO DRUM|1785.87|r the d 6880|lemon tan orchid white grey|Manufacturer#4|Brand#42|LARGE PLATED NICKEL|11|LG JAR|1786.88|ss deposits. carefull 6881|chiffon indian green salmon antique|Manufacturer#3|Brand#35|STANDARD POLISHED TIN|45|LG CAN|1787.88|he even pinto 6882|burlywood antique light cornflower beige|Manufacturer#2|Brand#21|LARGE ANODIZED BRASS|5|MED CASE|1788.88|e furiously bold 6883|royal aquamarine frosted moccasin beige|Manufacturer#1|Brand#14|ECONOMY BRUSHED BRASS|11|LG BAG|1789.88|venly express ideas. q 6884|smoke misty blanched burnished antique|Manufacturer#4|Brand#41|STANDARD BURNISHED COPPER|35|SM BOX|1790.88|tes use ac 6885|tomato pale dodger ghost orchid|Manufacturer#3|Brand#35|MEDIUM PLATED COPPER|46|JUMBO CASE|1791.88|ly regular d 6886|lace olive navy lime snow|Manufacturer#2|Brand#24|SMALL POLISHED COPPER|16|WRAP PKG|1792.88|e the even d 6887|linen ivory dim smoke seashell|Manufacturer#4|Brand#41|STANDARD BRUSHED BRASS|24|MED BAG|1793.88|s sleep 6888|azure purple moccasin magenta peru|Manufacturer#1|Brand#11|MEDIUM POLISHED TIN|4|SM PKG|1794.88|r request 6889|chiffon royal lemon sandy ivory|Manufacturer#2|Brand#22|MEDIUM PLATED BRASS|4|MED CASE|1795.88|ing to the even pl 6890|magenta misty peach medium peru|Manufacturer#2|Brand#24|ECONOMY POLISHED TIN|22|JUMBO CAN|1796.89|accounts. 6891|salmon brown red lavender sky|Manufacturer#1|Brand#11|SMALL ANODIZED COPPER|31|LG BAG|1797.89|pecia 6892|dim almond deep drab pale|Manufacturer#2|Brand#25|LARGE POLISHED NICKEL|37|WRAP DRUM|1798.89| beans; q 6893|metallic beige azure salmon brown|Manufacturer#2|Brand#21|PROMO BRUSHED BRASS|5|SM PACK|1799.89|ly ironic acc 6894|cyan grey violet deep almond|Manufacturer#1|Brand#13|SMALL PLATED TIN|30|LG JAR|1800.89|. expres 6895|thistle turquoise olive mint antique|Manufacturer#1|Brand#14|MEDIUM BRUSHED BRASS|20|JUMBO CASE|1801.89|sly ironic reque 6896|olive smoke lemon goldenrod moccasin|Manufacturer#3|Brand#35|SMALL PLATED STEEL|43|JUMBO BOX|1802.89|ts. enticingly expre 6897|coral magenta mint firebrick white|Manufacturer#4|Brand#41|ECONOMY BRUSHED BRASS|4|LG BAG|1803.89| blithely 6898|green yellow blue peach thistle|Manufacturer#1|Brand#13|LARGE PLATED STEEL|29|MED DRUM|1804.89|. pinto 6899|lemon maroon drab lace peru|Manufacturer#3|Brand#34|MEDIUM BRUSHED BRASS|49|JUMBO CASE|1805.89|sily express theodolit 6900|bisque blue navy turquoise saddle|Manufacturer#2|Brand#25|MEDIUM BRUSHED NICKEL|42|SM BOX|1806.90|le. bold 6901|indian drab lavender purple bisque|Manufacturer#4|Brand#42|PROMO ANODIZED STEEL|25|WRAP CASE|1807.90|e carefully 6902|ghost green violet mint bisque|Manufacturer#2|Brand#23|STANDARD ANODIZED BRASS|19|LG CAN|1808.90| deposits boost ab 6903|mint grey turquoise burnished spring|Manufacturer#3|Brand#32|LARGE POLISHED NICKEL|3|JUMBO BAG|1809.90|its acros 6904|deep salmon olive drab thistle|Manufacturer#3|Brand#32|SMALL PLATED BRASS|6|LG CAN|1810.90|deas promise ab 6905|sandy powder bisque orchid hot|Manufacturer#1|Brand#12|LARGE BURNISHED BRASS|32|LG BAG|1811.90| ironic packages al 6906|black moccasin deep lace blush|Manufacturer#5|Brand#51|STANDARD BURNISHED STEEL|23|SM PACK|1812.90|unts will sleep sp 6907|grey dodger lawn thistle tomato|Manufacturer#1|Brand#12|ECONOMY BRUSHED NICKEL|22|SM CAN|1813.90|ts mold furiously. 6908|royal rose peach rosy azure|Manufacturer#4|Brand#44|ECONOMY BRUSHED BRASS|7|LG CAN|1814.90|. furiously fi 6909|plum cornflower gainsboro tomato lavender|Manufacturer#5|Brand#53|SMALL ANODIZED NICKEL|12|SM CASE|1815.90|y ironic request 6910|coral steel deep sienna orange|Manufacturer#4|Brand#42|MEDIUM BRUSHED STEEL|6|JUMBO BAG|1816.91| silent accounts aft 6911|sky papaya magenta tomato peach|Manufacturer#2|Brand#22|PROMO POLISHED BRASS|31|JUMBO BOX|1817.91|ously through the 6912|olive brown navy chartreuse rose|Manufacturer#5|Brand#54|MEDIUM BRUSHED NICKEL|6|JUMBO PKG|1818.91|eat b 6913|grey rosy tan puff sienna|Manufacturer#1|Brand#13|SMALL PLATED COPPER|15|MED CAN|1819.91| the slyly 6914|peach indian maroon chiffon saddle|Manufacturer#2|Brand#22|STANDARD PLATED NICKEL|18|WRAP PACK|1820.91|ly final depos 6915|linen khaki saddle chocolate medium|Manufacturer#5|Brand#51|LARGE ANODIZED COPPER|37|SM BAG|1821.91|pendenc 6916|azure snow rose sandy chartreuse|Manufacturer#3|Brand#33|MEDIUM PLATED STEEL|1|SM BAG|1822.91|usly unusua 6917|steel black moccasin lace ghost|Manufacturer#3|Brand#34|SMALL ANODIZED NICKEL|42|LG PACK|1823.91| theo 6918|bisque mint drab gainsboro orange|Manufacturer#1|Brand#14|SMALL BRUSHED TIN|36|SM CASE|1824.91|ges? furiously e 6919|linen smoke light powder papaya|Manufacturer#2|Brand#22|ECONOMY BRUSHED TIN|18|WRAP BAG|1825.91|ully blithely bold id 6920|rose pink chartreuse dim deep|Manufacturer#3|Brand#34|STANDARD PLATED COPPER|50|WRAP CAN|1826.92|carefully. 6921|green puff forest pale lawn|Manufacturer#3|Brand#34|MEDIUM PLATED COPPER|10|SM BOX|1827.92|usly. stealthy courts 6922|firebrick lime cream magenta maroon|Manufacturer#3|Brand#32|LARGE ANODIZED STEEL|25|WRAP JAR|1828.92|the blithely bold th 6923|ivory firebrick seashell peach navajo|Manufacturer#1|Brand#12|STANDARD POLISHED BRASS|49|JUMBO DRUM|1829.92|s. quickly unusual 6924|powder blanched maroon violet magenta|Manufacturer#2|Brand#21|MEDIUM BURNISHED COPPER|43|WRAP PACK|1830.92| fluffy depende 6925|dodger forest cream khaki aquamarine|Manufacturer#5|Brand#51|LARGE POLISHED NICKEL|23|WRAP DRUM|1831.92|areful 6926|azure drab floral khaki spring|Manufacturer#5|Brand#52|LARGE PLATED STEEL|10|WRAP PACK|1832.92|ly pendin 6927|plum honeydew azure lime pale|Manufacturer#5|Brand#52|PROMO POLISHED BRASS|23|JUMBO BAG|1833.92| furi 6928|violet magenta peach lemon burnished|Manufacturer#2|Brand#22|MEDIUM ANODIZED STEEL|39|WRAP CAN|1834.92|s. blith 6929|spring goldenrod violet lemon slate|Manufacturer#3|Brand#35|PROMO PLATED STEEL|4|SM PKG|1835.92|ole fluffily. s 6930|chartreuse plum orchid violet lavender|Manufacturer#4|Brand#43|MEDIUM PLATED BRASS|15|WRAP CASE|1836.93| express, sile 6931|ghost black coral magenta mint|Manufacturer#5|Brand#52|SMALL PLATED NICKEL|43|SM CAN|1837.93|l gifts nag slyly af 6932|red pale firebrick turquoise moccasin|Manufacturer#4|Brand#45|PROMO POLISHED COPPER|30|JUMBO JAR|1838.93|nag carefully theodo 6933|mint smoke white olive powder|Manufacturer#3|Brand#34|STANDARD BURNISHED TIN|39|JUMBO BOX|1839.93|old accounts sleep 6934|tan slate orchid peru red|Manufacturer#1|Brand#13|SMALL BURNISHED STEEL|48|SM CAN|1840.93| quickly f 6935|lawn forest blanched burlywood pale|Manufacturer#2|Brand#24|SMALL PLATED COPPER|14|LG DRUM|1841.93|yly. 6936|dim orange indian drab midnight|Manufacturer#1|Brand#14|STANDARD ANODIZED NICKEL|28|WRAP DRUM|1842.93|es. s 6937|drab beige sky cornsilk firebrick|Manufacturer#2|Brand#21|STANDARD BRUSHED STEEL|33|JUMBO PKG|1843.93|asymptotes: final t 6938|linen burnished lemon drab moccasin|Manufacturer#2|Brand#25|LARGE PLATED STEEL|25|MED PKG|1844.93|nic accounts cajole 6939|mint pale antique rose orange|Manufacturer#1|Brand#15|SMALL PLATED TIN|28|WRAP BAG|1845.93|among the fur 6940|linen cyan chiffon tomato red|Manufacturer#3|Brand#33|PROMO BURNISHED NICKEL|5|SM JAR|1846.94|slyly final, 6941|honeydew plum blush lemon chiffon|Manufacturer#1|Brand#11|PROMO PLATED BRASS|40|WRAP PACK|1847.94|e even, expre 6942|thistle deep dim almond maroon|Manufacturer#3|Brand#31|PROMO BURNISHED STEEL|4|WRAP PKG|1848.94| packag 6943|peru rose lavender light puff|Manufacturer#2|Brand#21|STANDARD ANODIZED BRASS|9|SM BAG|1849.94|ts. bli 6944|sky chocolate olive yellow blanched|Manufacturer#5|Brand#51|MEDIUM POLISHED STEEL|41|JUMBO PACK|1850.94| the pending, final 6945|cornsilk slate peru chiffon orchid|Manufacturer#2|Brand#25|PROMO POLISHED STEEL|34|JUMBO JAR|1851.94|pecial package 6946|chartreuse saddle peach plum burnished|Manufacturer#3|Brand#33|PROMO BURNISHED BRASS|44|SM PACK|1852.94|uests boost furiously 6947|indian dim lace papaya blue|Manufacturer#2|Brand#22|MEDIUM BRUSHED COPPER|18|MED CAN|1853.94| unusu 6948|peach seashell black gainsboro deep|Manufacturer#1|Brand#12|SMALL BRUSHED STEEL|28|WRAP BOX|1854.94|s accou 6949|slate dim cyan navajo white|Manufacturer#5|Brand#55|LARGE BRUSHED NICKEL|26|WRAP PACK|1855.94|lets. 6950|sky ivory cream turquoise aquamarine|Manufacturer#1|Brand#13|ECONOMY BURNISHED STEEL|13|WRAP CAN|1856.95|aggle furiou 6951|tan sandy royal sienna puff|Manufacturer#1|Brand#14|MEDIUM ANODIZED TIN|12|SM BAG|1857.95| accounts wak 6952|gainsboro sienna blush forest lemon|Manufacturer#4|Brand#41|MEDIUM ANODIZED BRASS|37|LG CAN|1858.95|nt packages a 6953|pale plum dark seashell orchid|Manufacturer#3|Brand#34|PROMO BRUSHED NICKEL|12|WRAP PKG|1859.95|. ironic 6954|cornsilk ivory midnight pale blanched|Manufacturer#3|Brand#34|ECONOMY BURNISHED COPPER|27|WRAP BOX|1860.95|regular dep 6955|drab chartreuse cyan wheat snow|Manufacturer#2|Brand#24|SMALL BURNISHED TIN|26|JUMBO CAN|1861.95|ronic, ironic acco 6956|ivory navajo purple blush green|Manufacturer#2|Brand#24|MEDIUM POLISHED COPPER|17|JUMBO DRUM|1862.95| pinto beans sleep fu 6957|salmon pink sky seashell tomato|Manufacturer#5|Brand#54|LARGE PLATED STEEL|5|JUMBO DRUM|1863.95|thely furiously 6958|misty azure floral aquamarine dim|Manufacturer#1|Brand#15|PROMO ANODIZED COPPER|32|SM JAR|1864.95|ully after 6959|puff green deep dark sienna|Manufacturer#5|Brand#51|SMALL PLATED BRASS|28|SM BAG|1865.95| deposits wake care 6960|antique orchid dim linen peru|Manufacturer#5|Brand#53|ECONOMY ANODIZED BRASS|48|JUMBO PKG|1866.96|beans slee 6961|slate drab maroon thistle puff|Manufacturer#3|Brand#35|ECONOMY BURNISHED NICKEL|18|SM JAR|1867.96|s believe furiously! a 6962|green aquamarine azure midnight chiffon|Manufacturer#1|Brand#11|STANDARD BRUSHED BRASS|44|JUMBO JAR|1868.96|e fluffily. expr 6963|maroon deep frosted hot coral|Manufacturer#1|Brand#13|ECONOMY BRUSHED TIN|31|SM PKG|1869.96|arefully carefull 6964|moccasin dodger ivory plum sky|Manufacturer#4|Brand#41|SMALL PLATED TIN|50|LG BOX|1870.96|hless 6965|snow spring peach dodger green|Manufacturer#1|Brand#13|STANDARD BURNISHED COPPER|45|JUMBO CAN|1871.96| nag. q 6966|olive blanched drab smoke light|Manufacturer#4|Brand#42|STANDARD ANODIZED STEEL|22|WRAP BOX|1872.96|sits haggle bold 6967|puff hot brown sky pink|Manufacturer#5|Brand#53|SMALL BURNISHED BRASS|9|SM PACK|1873.96| beans cajole qu 6968|spring red royal turquoise ghost|Manufacturer#3|Brand#35|ECONOMY BRUSHED NICKEL|24|JUMBO BOX|1874.96|indle furiously regula 6969|powder steel lemon aquamarine deep|Manufacturer#2|Brand#21|MEDIUM ANODIZED BRASS|37|JUMBO BAG|1875.96|ily regular requests. 6970|powder hot moccasin deep goldenrod|Manufacturer#5|Brand#52|LARGE POLISHED BRASS|12|JUMBO PKG|1876.97|ly eve 6971|navajo seashell gainsboro light metallic|Manufacturer#4|Brand#45|SMALL BURNISHED NICKEL|6|JUMBO CAN|1877.97|e slyly r 6972|midnight white smoke lavender goldenrod|Manufacturer#2|Brand#23|ECONOMY POLISHED STEEL|20|SM JAR|1878.97|mptotes. depe 6973|grey seashell navajo lace cream|Manufacturer#1|Brand#12|SMALL BRUSHED TIN|16|WRAP PKG|1879.97|t packages inte 6974|beige drab metallic firebrick yellow|Manufacturer#2|Brand#23|MEDIUM ANODIZED STEEL|12|JUMBO BAG|1880.97|ironic ac 6975|smoke maroon peru salmon puff|Manufacturer#4|Brand#41|MEDIUM BRUSHED COPPER|35|JUMBO JAR|1881.97|iously ironic acc 6976|snow blanched midnight smoke olive|Manufacturer#4|Brand#42|PROMO BURNISHED COPPER|20|LG CASE|1882.97|theodolites 6977|papaya lemon olive maroon grey|Manufacturer#4|Brand#42|PROMO POLISHED COPPER|37|LG PACK|1883.97|ld instru 6978|yellow peach lace chiffon linen|Manufacturer#4|Brand#45|MEDIUM ANODIZED BRASS|6|SM PACK|1884.97| abou 6979|dodger saddle thistle drab seashell|Manufacturer#1|Brand#12|STANDARD POLISHED COPPER|2|WRAP PKG|1885.97|e regular, even 6980|thistle linen orange white burlywood|Manufacturer#5|Brand#53|STANDARD BRUSHED STEEL|35|WRAP PACK|1886.98|along the ironi 6981|cyan blue blanched tan hot|Manufacturer#1|Brand#12|PROMO PLATED TIN|34|WRAP PKG|1887.98|ructions are al 6982|brown wheat cream cornsilk pink|Manufacturer#2|Brand#22|SMALL POLISHED BRASS|12|LG PKG|1888.98|ons wake against th 6983|cornsilk royal olive smoke forest|Manufacturer#3|Brand#32|MEDIUM POLISHED BRASS|39|MED CASE|1889.98|uriously special foxes 6984|lime turquoise lavender floral rosy|Manufacturer#2|Brand#21|PROMO BURNISHED TIN|34|JUMBO PACK|1890.98|ly regular gifts det 6985|white black navy turquoise light|Manufacturer#4|Brand#44|MEDIUM BRUSHED COPPER|48|SM CASE|1891.98| packa 6986|lemon sky chocolate green linen|Manufacturer#2|Brand#21|ECONOMY BRUSHED BRASS|14|LG CAN|1892.98|ges. a 6987|grey burnished white antique beige|Manufacturer#2|Brand#23|ECONOMY POLISHED NICKEL|15|JUMBO PACK|1893.98|ctions wake furiously 6988|cream mint drab tomato ghost|Manufacturer#2|Brand#22|STANDARD ANODIZED BRASS|35|MED PKG|1894.98| deposit 6989|puff bisque wheat dim seashell|Manufacturer#5|Brand#55|STANDARD POLISHED COPPER|4|WRAP PACK|1895.98|s use accordi 6990|red coral violet white medium|Manufacturer#5|Brand#53|STANDARD BURNISHED NICKEL|44|JUMBO CASE|1896.99|ously ironic escapades 6991|drab chartreuse plum white goldenrod|Manufacturer#5|Brand#52|MEDIUM BRUSHED STEEL|33|WRAP BAG|1897.99| the 6992|chocolate lawn mint blanched turquoise|Manufacturer#5|Brand#52|SMALL BURNISHED TIN|21|SM BAG|1898.99|refully silent asympt 6993|thistle blanched peach red navy|Manufacturer#2|Brand#23|SMALL BURNISHED BRASS|1|MED JAR|1899.99|cial theodolites alon 6994|plum smoke cream navajo wheat|Manufacturer#4|Brand#43|PROMO BRUSHED NICKEL|35|JUMBO CASE|1900.99|gle. furi 6995|dim sienna navy smoke coral|Manufacturer#5|Brand#54|LARGE PLATED NICKEL|41|MED CASE|1901.99| according to 6996|metallic pale cornsilk antique tan|Manufacturer#2|Brand#21|LARGE ANODIZED TIN|45|WRAP BOX|1902.99|ly. blithely 6997|peru blanched burlywood lavender black|Manufacturer#5|Brand#54|SMALL BRUSHED STEEL|27|SM BAG|1903.99|gular sheaves! ca 6998|metallic khaki purple cyan cream|Manufacturer#1|Brand#12|STANDARD ANODIZED NICKEL|20|JUMBO BAG|1904.99|equest 6999|moccasin burlywood plum tomato azure|Manufacturer#3|Brand#31|PROMO PLATED TIN|18|JUMBO CAN|1905.99|ular deposits a 7000|beige orchid royal lace burnished|Manufacturer#4|Brand#44|ECONOMY ANODIZED NICKEL|37|WRAP CASE|907.00|. furiously c citus-7.0.3/src/test/regress/data/supplier.data000066400000000000000000004165771317107136600215300ustar00rootroot000000000000001|Supplier#000000001| N kD4on9OM Ipw3,gf0JBoQDd7tgrzrddZ|\N|27-918-335-1736|5755.94|each slyly above the careful 2|Supplier#000000002|89eJ5ksX3ImxJQBvxObC,|\N|15-679-861-2259|4032.68| slyly bold instructions. idle dependen 3|Supplier#000000003|q1,G3Pj6OjIuUYfUoH18BFTKP5aU9bEV3|\N|11-383-516-1199|4192.40|blithely silent requests after the express dependencies are sl 4|Supplier#000000004|Bk7ah4CK8SYQTepEmvMkkgMwg|\N|25-843-787-7479|4641.08|riously even requests above the exp 5|Supplier#000000005|Gcdm2rJRzl5qlTVzc|\N|21-151-690-3663|-283.84|. slyly regular pinto bea 6|Supplier#000000006|tQxuVm7s7CnK|\N|24-696-997-4969|1365.79|final accounts. regular dolphins use against the furiously ironic decoys. 7|Supplier#000000007|s,4TicNGB4uO6PaSqNBUq|0|33-990-965-2201|6820.35|s unwind silently furiously regular courts. final requests are deposits. requests wake quietly blit 8|Supplier#000000008|9Sq4bBH2FQEmaFOocY45sRTxo6yuoG|0|27-498-742-3860|7627.85|al pinto beans. asymptotes haggl 9|Supplier#000000009|1KhUgZegwM3ua7dsYmekYBsK|0|20-403-398-8662|5302.37|s. unusual, even requests along the furiously regular pac 10|Supplier#000000010|Saygah3gYWMp72i PY|0|34-852-489-8585|3891.91|ing waters. regular requests ar 11|Supplier#000000011|JfwTs,LZrV, M,9C|18|28-613-996-1505|3393.08|y ironic packages. slyly ironic accounts affix furiously; ironically unusual excuses across the flu 12|Supplier#000000012|aLIW q0HYd|8|18-179-925-7181|1432.69|al packages nag alongside of the bold instructions. express, daring accounts 13|Supplier#000000013|HK71HQyWoqRWOX8GI FpgAifW,2PoH|3|13-727-620-7813|9107.22|requests engage regularly instructions. furiously special requests ar 14|Supplier#000000014|EXsnO5pTNj4iZRm|15|25-656-247-5058|9189.82|l accounts boost. fluffily bold warhorses wake 15|Supplier#000000015|olXVbNBfVzRqgokr1T,Ie|8|18-453-357-6394|308.56| across the furiously regular platelets wake even deposits. quickly express she 16|Supplier#000000016|YjP5C55zHDXL7LalK27zfQnwejdpin4AMpvh|22|32-822-502-4215|2972.26|ously express ideas haggle quickly dugouts? fu 17|Supplier#000000017|c2d,ESHRSkK3WYnxpgw6aOqN0q|19|29-601-884-9219|1687.81|eep against the furiously bold ideas. fluffily bold packa 18|Supplier#000000018|PGGVE5PWAMwKDZw |16|26-729-551-1115|7040.82|accounts snooze slyly furiously bold 19|Supplier#000000019|edZT3es,nBFD8lBXTGeTl|24|34-278-310-2731|6150.38|refully final foxes across the dogged theodolites sleep slyly abou 20|Supplier#000000020|iybAE,RmTymrZVYaFZva2SH,j|3|13-715-945-6730|530.82|n, ironic ideas would nag blithely about the slyly regular accounts. silent, expr 21|Supplier#000000021|81CavellcrJ0PQ3CPBID0Z0JwyJm0ka5igEs|2|12-253-590-5816|9365.80|d. instructions integrate sometimes slyly pending instructions. accounts nag among the 22|Supplier#000000022|okiiQFk 8lm6EVX6Q0,bEcO|4|14-144-830-2814|-966.20| ironically among the deposits. closely expre 23|Supplier#000000023|ssetugTcXc096qlD7 2TL5crEEeS3zk|9|19-559-422-5776|5926.41|ges could have to are ironic deposits. regular, even request 24|Supplier#000000024|C4nPvLrVmKPPabFCj|0|10-620-939-2254|9170.71|usly pending deposits. slyly final accounts run 25|Supplier#000000025|RCQKONXMFnrodzz6w7fObFVV6CUm2q|22|32-431-945-3541|9198.31|ely regular deposits. carefully regular sauternes engage furiously above the regular accounts. idly 26|Supplier#000000026|iV,MHzAx6Z939uzFNkq09M0a1 MBfH7|21|31-758-894-4436|21.18| ideas poach carefully after the blithely bold asymptotes. furiously pending theodoli 27|Supplier#000000027|lC4CjKwNHUr6L4xIpzOBK4NlHkFTg|18|28-708-999-2028|1887.62|s according to the quickly regular hockey playe 28|Supplier#000000028|GBhvoRh,7YIN V|0|10-538-384-8460|-891.99|ld requests across the pinto beans are carefully against the quickly final courts. accounts sleep 29|Supplier#000000029|658tEqXLPvRd6xpFdqC2|1|11-555-705-5922|-811.62|y express ideas play furiously. even accounts sleep fluffily across the accounts. careful 30|Supplier#000000030|84NmC1rmQfO0fj3zkobLT|16|26-940-594-4852|8080.14|ias. carefully silent accounts cajole blithely. pending, special accounts cajole quickly above the f 31|Supplier#000000031|fRJimA7zchyApqRLHcQeocVpP|16|26-515-530-4159|5916.91|into beans wake after the special packages. slyly fluffy requests cajole furio 32|Supplier#000000032|yvoD3TtZSx1skQNCK8agk5bZlZLug|23|33-484-637-7873|3556.47|usly even depths. quickly ironic theodolites s 33|Supplier#000000033|gfeKpYw3400L0SDywXA6Ya1Qmq1w6YB9f3R|7|17-138-897-9374|8564.12|n sauternes along the regular asymptotes are regularly along the 34|Supplier#000000034|mYRe3KvA2O4lL4HhxDKkkrPUDPMKRCSp,Xpa|10|20-519-982-2343|237.31|eposits. slyly final deposits toward the slyly regular dependencies sleep among the excu 35|Supplier#000000035|QymmGXxjVVQ5OuABCXVVsu,4eF gU0Qc6|21|31-720-790-5245|4381.41| ironic deposits! final, bold platelets haggle quickly quickly pendin 36|Supplier#000000036|mzSpBBJvbjdx3UKTW3bLFewRD78D91lAC879|13|23-273-493-3679|2371.51|ular theodolites must haggle regular, bold accounts. slyly final pinto beans bo 37|Supplier#000000037|cqjyB5h1nV|0|10-470-144-1330|3017.47|iously final instructions. quickly special accounts hang fluffily above the accounts. deposits 38|Supplier#000000038|xEcx45vD0FXHT7c9mvWFY|4|14-361-296-6426|2512.41|ins. fluffily special accounts haggle slyly af 39|Supplier#000000039|ZM, nSYpEPWr1yAFHaC91qjFcijjeU5eH|8|18-851-856-5633|6115.65|le slyly requests. special packages shall are blithely. slyly unusual packages sleep 40|Supplier#000000040|zyIeWzbbpkTV37vm1nmSGBxSgd2Kp|22|32-231-247-6991|-290.06| final patterns. accounts haggle idly pas 41|Supplier#000000041|G 1FKHR435 wMKFmyt|18|28-739-447-2525|6942.67|odolites boost across the furiously regular fo 42|Supplier#000000042|1Y5lwEgpe3j2vbUBYj3SwLhK62JlwEMtDC|22|32-698-298-6317|6565.11| fluffily even requests cajole blithely fu 43|Supplier#000000043|Z5mLuAoTUEeKY5v22VnnA4D87Ao6jF2LvMYnlX8h|12|22-421-568-4862|7773.41|unts. unusual, final asymptotes 44|Supplier#000000044|kERxlLDnlIZJdN66zAPHklyL|7|17-713-930-5667|9759.38|x. carefully quiet account 45|Supplier#000000045|LcKnsa8XGtIO0WYSB7hkOrH rnzRg1|9|19-189-635-8862|2944.23|iously according to the ironic, silent accounts. 46|Supplier#000000046|e0URUXfDOYMdKe16Z5h5StMRbzGmTs,D2cjap|24|34-748-308-3215|3580.35|gular, regular ideas across th 47|Supplier#000000047|3XM1x,Pcxqw,HK4XNlgbnZMbLhBHLA|14|24-810-354-4471|2958.09|sly ironic deposits sleep carefully along t 48|Supplier#000000048|jg0U FNPMQDuyuKvTnLXXaLf3Wl6OtONA6mQlWJ|14|24-722-551-9498|5630.62|xpress instructions affix. fluffily even requests boos 49|Supplier#000000049|Nvq 6macF4GtJvz|24|34-211-567-6800|9915.24|the finally bold dependencies. dependencies after the fluffily final foxes boost fluffi 50|Supplier#000000050|rGobqSMMYz0ErrPhCGS|9|19-561-560-7437|4515.87|warhorses. ironic, regular accounts detect slyly after the quickly f 51|Supplier#000000051|rDkBXb01POIKjOwrij62uM8O4|9|19-475-537-1368|7241.40|ges nag at the blithely busy instructions. fluffy packages wake quickly. even, ironic ideas boost b 52|Supplier#000000052|WCk XCHYzBA1dvJDSol4ZJQQcQN,|19|29-974-934-4713|287.16|dolites are slyly against the furiously regular packages. ironic, final deposits cajole quickly 53|Supplier#000000053|i9v3 EsYCfLKFU6PIt8iihBOHBB37yR7b3GD7Rt|7|17-886-101-6083|6177.35|onic, special deposits wake furio 54|Supplier#000000054|J1s,Wxb5pg|12|22-966-435-7200|2733.69|blithely pending dolphins. quickly regular theodolites haggle slyly 55|Supplier#000000055|OqdYSiOQeG4eGi636Tj|24|34-876-912-6007|7162.15|kages. blithely even foxes cajole special, final accounts. blithely even dependencies r 56|Supplier#000000056|fUVtlUVal GiHBOuYoUQ XQ9NfNLQR3Gl|16|26-471-195-5486|-632.16| sleep special deposits. unusual requests wake blithely slyly regular ideas. 57|Supplier#000000057|bEWqUVRR f0mb2o18Y|17|27-681-514-6892|-831.07|detect according to the furiously br 58|Supplier#000000058|01dEADIZoCULZXg|16|26-659-969-5586|92.44|refully final foxes are. even, express courts according to the b 59|Supplier#000000059|N8lKbYjMnVlEHmTPRmBgtLiX8rrJx|17|27-249-395-9123|586.16|ffily along the even decoys. final instructions abov 60|Supplier#000000060|cb08ntDTARo47WmnBcYXu|8|18-550-360-2464|4515.80|thely express ideas use blithely 61|Supplier#000000061|Oz0M1qBR9I|8|18-396-489-9719|6096.58|s the slyly regular ideas shall 62|Supplier#000000062|bSmlFYUKBeRsqJxwC9 zS6xpFdEf5jNTb|19|29-603-653-2494|9202.57|ts. furiously ironic pinto beans are permanently after the bold ideas. regular, express f 63|Supplier#000000063|NlV0OQyIoPvPkw5AYuWGomX,hgqm1|5|15-781-401-3047|5742.03|ar deposits. blithely bold accounts against the slyly final pinto beans sleep about the exp 64|Supplier#000000064|w80JjnIP lGoLdUjRutbv81gGlqqpW4PQBeOtSYU|24|34-278-790-7004|1309.70|uickly regular requests use. carefully i 65|Supplier#000000065|BsAnHUmSFArppKrM|22|32-444-835-2434|-963.79|l ideas wake carefully around the regular packages. furiously ruthless pinto bea 66|Supplier#000000066|qYdruFJQJYYiKvnNVmYfCVydVB8bcW,AW,U6SOV3|23|33-300-836-9529|2455.98|ar requests. express orbits de 67|Supplier#000000067|7YrEKJncHFk5D W7ZaqfAXV|4|14-563-538-1657|3576.55|ray slyly final foxes. furio 68|Supplier#000000068|Ue6N50wH2CwE4PPgTGLmat,ibGYYlDoOb3xQwtgb|21|31-267-327-4328|5119.38|inal requests. ruthlessly ironic packages cajole 69|Supplier#000000069|T2Dl9,f97e333eRuMi2z |4|14-491-707-8310|8466.50|! carefully ironic instructions nag quickly pending requests. fluffily even deposits sleep a 70|Supplier#000000070|INWNH2w,OOWgNDq0BRCcBwOMQc6PdFDc4|6|16-821-608-1166|9508.37|ests sleep quickly express ideas. ironic ideas haggle about the final T 71|Supplier#000000071|YFo8an7P6wi Q|1|11-743-919-7272|8179.68| final accounts. bold, final escapades must have to cajole about the special platelets. fu 72|Supplier#000000072|mKpAJojtawk2alqV4 ZEbJ3PH3wfYqy AM8rGq1|18|28-113-898-6643|7014.50| theodolites sublate furiously about the regularly e 73|Supplier#000000073|HBZA1NHvrswQCxTTjg 5XrfSOGgMRKNCe2ovE|16|26-758-310-7496|3793.13|. never pending asymptotes a 74|Supplier#000000074|uM3yV5NOc6b5wNdpxF69CW 8QvDxqvKubRJtA|20|30-166-486-1559|4170.51|carefully along the quickly regular sentiments. ironic accounts sleep. regular deposits are blith 75|Supplier#000000075|7f3gN4rP1livII|18|28-716-704-8686|-224.84|eans. even, silent packages c 76|Supplier#000000076|JBhSBa3cLYvNgHUYtUHmtECCD|14|24-228-763-7840|2971.10|, even instructions. furiously unusual deposits wake slyly about the ev 77|Supplier#000000077|wVtcr0uH3CyrSiWMLsqnB09Syo,UuZxPMeBghlY|7|17-281-345-4863|4186.95|the slyly final asymptotes. blithely pending theodoli 78|Supplier#000000078|9y3OZ2CV hGrsrQxzB7V3zTtygHVHlG3SD6yrz|5|15-670-998-6860|1044.10|, regular packages wake quickly bold requests. carefully unusual requests about the unusual request 79|Supplier#000000079|p0u3tztSXUD2J8vFfLNFNKsrRRv7qyUtTBTA|14|24-402-227-9600|1191.94|nto beans integrate slyly across the fluffily pending multipliers. carefully ste 80|Supplier#000000080|cJ2MHSEJ13rIL2Wj3D5i6hRo30,ZiNUXhqn|21|31-646-289-1906|-40.45|ackages. blithely bold requests wake quickly. carefully regular foxes are slyly instructions. caref 81|Supplier#000000081|SLlacbhgpKmVa,gF3saYv12e0|12|22-535-310-6971|166.32|oost carefully quickly regular packages. carefully final excuses sleep blithely slyly pendi 82|Supplier#000000082|WyTKA7ZpF15t1aCNlT3|18|28-177-572-9691|-724.31|xpress multipliers wake furiously even foxes. furiously iro 83|Supplier#000000083|WRJUkzCn050seVz57oAfrbCuw|14|24-529-559-2461|1467.77|ly pending courts would cajole enticingly even deposits. slyly express 84|Supplier#000000084|DcYjWMiZGQqEKOJi4wAmIV08ikx|24|34-869-118-7803|4780.93|even depths. regular foxes use slyly. theod 85|Supplier#000000085|Ckls9RtlzKSF|7|17-167-806-8199|7174.74|egular packages. bold pinto beans wake fur 86|Supplier#000000086|J1fgg5QaqnN|19|29-903-665-7065|1883.37|cajole furiously special, final requests: furiously spec 87|Supplier#000000087|WCw7URDj8zoZ7tqC3cpm7|24|34-860-229-1674|4746.66|all are quickly after the ironic platelets. pending dolphins are. final the 88|Supplier#000000088|yOshY8wwzMgS|11|21-191-938-9469|3086.13|furiously special excuses aff 89|Supplier#000000089|fhtzZcSorhud1|9|19-259-876-1014|1638.02|en instructions across the slyly ironic requests engage 90|Supplier#000000090|bPE6Uhz1f2m3gwSGMrnRt,g,3gq37r5kxgphqss1|6|16-380-123-9217|6201.77|hely fluffily regular theodoli 91|Supplier#000000091|YV45D7TkfdQanOOZ7q9QxkyGUapU1oOWU6q3|3|13-604-986-9056|6255.87|nstructions use carefully according to the special packages: quickly silent th 92|Supplier#000000092|n48Wy4QI3lml8T217rk|2|12-701-432-8346|2470.84| even theodolites wake against the blithely fluffy packages 93|Supplier#000000093|wd1djjKXT,4zBm|16|26-528-528-1157|368.76|yly final accounts could are carefully. fluffily ironic instruct 94|Supplier#000000094|lK,pLPjAMVGJOXN80zPZuNQjpChliE|4|14-728-888-8882|2766.80|usly. furiously slow theodolites could haggle carefully fina 95|Supplier#000000095|p2 2hBfH5TD|19|29-142-539-1403|2588.83|sual instructions cajole slyly with the final ac 96|Supplier#000000096|Lbxp3WIipye o2wZme1i9iJx,xTt1Mp|12|22-888-654-7193|3437.24|efully unusual excuses wake fluffily regular theodo 97|Supplier#000000097|MrCQha2G6ndX1fp6CA|4|14-618-678-1789|6642.22|s, regular accounts. furiously bold ideas u 98|Supplier#000000098|ogHn8dpXB5Q|21|31-914-775-1978|5873.07|esias use slyly under the ironic foxes. re 99|Supplier#000000099|4SxkTHG28nZrtT0,MnVF9H|18|28-272-909-1617|3671.34|nic dependencies dazzle flu 100|Supplier#000000100|rIlN li8zvW22l2slbcx ECP4fL|21|31-351-324-5062|3191.70|es. regular instructions sleep carefully. slyly ironic packages across the foxes boost 101|Supplier#000000101|8KUTzVw32Pw3PD7 h,YJ0ysOoBtz9JfZI|11|21-970-795-5691|4901.77|ar foxes. carefully final ideas are. blithely regular deposits about the carefully regular package 102|Supplier#000000102|pDmsZ3V8W7NR1wW|8|18-884-693-5785|2980.07|ly even notornis nod furiously evenly regular platelets. thinly pending pat 103|Supplier#000000103|hBpQ4GYblzpHKTgATLPAS6ph3|2|12-442-523-5043|1050.66| accounts detect slyly bo 104|Supplier#000000104|Dcl4yGrzqv3OPeRO49bKh78XmQEDR7PBXIs0m|6|16-434-972-6922|1381.97|gular ideas. bravely bold deposits haggle through the carefully final deposits. slyly unusual idea 105|Supplier#000000105|cB YSy5Bla|4|14-951-800-2742|3754.75|sual requests haggle slyly. theodolites 106|Supplier#000000106|50EV3vyfAsWJAjTbT4qwU|21|31-810-990-4600|8091.65|eas affix carefully fluffily silent packages. regular deposits bo 107|Supplier#000000107|fqniA2vC1VZU5DZG2TBiN|16|26-958-723-2164|1378.93|thely special foxes nag carefully. requests cajole along the quickly ironic pinto b 108|Supplier#000000108|eL47lfhfMP7zRw|24|34-341-415-7878|7683.71|, regular packages! bold re 109|Supplier#000000109|4 63llZBLxtWRa A|21|31-405-311-8656|9202.82|al courts. unusual escapades cajol 110|Supplier#000000110|ehv9ObpyN0|15|25-824-874-9077|9804.10|eposits nag thinly furiously even accounts; permanent ideas nag fluf 111|Supplier#000000111|uv56H9j8cNa4qnflVSYbLNN|4|14-256-573-2660|6578.65|ing theodolites are about the slowly pending 112|Supplier#000000112|vdWe5lfgvisRCxdd85DTOZHqAzcuq7f7KKThA|12|22-617-876-1402|4332.95|es eat fluffily bold deposits. furiously unusual i 113|Supplier#000000113|5 YOpqbaHs7dR gG4EmXrI7XtA7DcnRMsWPU1z2D|11|21-211-117-1937|1882.05|blithely regular courts wake quickly even pl 114|Supplier#000000114|Uvz iykaBYOC|20|30-998-334-7841|4559.18|jole slyly blithely regular requests-- even requests haggle alongsid 115|Supplier#000000115|nJ 2t0f7Ve,wL1,6WzGBJLNBUCKlsV|23|33-597-248-1220|9192.10|es across the carefully express accounts boost caref 116|Supplier#000000116|gABGPfNYwB 9g1rMYWSAhpo 4|13|23-188-305-1829|5751.39|xcuses wake quickly above the regular packages; s 117|Supplier#000000117|ZnlLNC,8YN10T4mjI8eq5bJ|18|28-470-879-3141|4589.18|ymptotes. blithely regular theodolites are slyly according to the 118|Supplier#000000118|BYtvNtFpQAHHoBFWF|0|10-475-868-5521|7317.43|y ironic theodolites. furiously bold ideas use along t 119|Supplier#000000119|4CxBrM0o4yt6LYFxZIyZ89Xnf8LZNn6KcYc|18|28-558-264-1202|2060.13|ctions: quickly final courts wake quietly foxes. packages hag 120|Supplier#000000120|TNxoTw0SiMmQzGfZY9fTSkL2kmtPwvtR2L|7|17-645-761-5674|5575.40| beans cajole of the sly, dogged courts. 121|Supplier#000000121|CWGri,tKI 7gDcDsI|6|16-275-849-2485|5733.61|against the ironic, permanent pinto beans. doggedly pending deposits sleep agai 122|Supplier#000000122|2RUSHHspScCVTWC6z vw2XVR|16|26-432-258-4986|2732.95|ackages. carefully special accounts use slyly. slyly silent i 123|Supplier#000000123|IqRn20xsj5ibqAQjb6YNQf0xah|19|29-602-688-1506|5726.19|nts x-ray quickly according to t 124|Supplier#000000124|vmVadCZ xHPbQQA2fLxr68T1YhmjVSuRUJKCrcq6|8|18-313-889-3713|-941.38|ular excuses after the fluffily pending pinto 125|Supplier#000000125|XG eO4Xb4TSF7rj4R6WRQ1v2seTlyga3tvFZaC|2|12-419-430-3983|5157.25|ven accounts. fluffily ironic deposits are carefully. s 126|Supplier#000000126|CaO4YuZ oSkzemn|14|24-728-670-3468|6829.86|unts. carefully regular dolphins s 127|Supplier#000000127|VEqo3HZJ,0ggcaxvqZnW7Rq7l,cPqfKgX2bIRavM|1|11-265-565-1616|2502.95|yly above the fluffily ironic accounts. 128|Supplier#000000128|u,I0ayLPI2HKL|23|33-957-200-2556|1091.04|ng requests. slyly final ideas affix slyly even requests. fluffily regular theo 129|Supplier#000000129|9kWUk5K 7TAR40dW5nVeg i60Fy|15|25-314-164-7014|9001.17|even excuses wake even accounts. slyly bold excuses 130|Supplier#000000130|Rnt93MAs0EpPZjPYQIlGOWNUjk|23|33-322-101-6260|8708.17| blithely regular packages sublate. closely idle accounts are fluffily final, brav 131|Supplier#000000131|u3mTHMgBC0yJTLufr01TuHImgflQUXv|14|24-293-181-3975|1301.20|to the regular, pending instructions detect against the p 132|Supplier#000000132|sU92Jd3aQSbV|5|15-776-909-1326|-812.17|al, final accounts cajole requests; fluffily pending instruction 133|Supplier#000000133|QT,semYwdx|4|14-731-952-5158|9754.60|he quickly express instructions 134|Supplier#000000134|Nv7dxj4FGWrdcP56RmNci|6|16-184-952-7778|-329.16|c deposits haggle. dinos a 135|Supplier#000000135|F4Uy ZQNU6ESTmO3mrL,mI|7|17-290-812-8855|9767.99|courts wake slyly instructions. furiously silent requests cajol 136|Supplier#000000136|SyWVom9ZFrTA6BMAS|8|18-175-739-8397|4623.48|requests. boldly regular deposits are. packages are sometimes! c 137|Supplier#000000137|j81M1VQCvKcIVnpMnIUnBNVUOm0XYJuJY|18|28-108-440-4890|837.27|he blithely bold theodolites cajole blithely among the 138|Supplier#000000138|utbplAm g7RmxVfYoNdhcrQGWuzRqPe0qHSwbKw|19|29-533-434-6776|906.07|ickly unusual requests cajole. accounts above the furiously special excuses 139|Supplier#000000139| 2mQLQsVJ8WLBSnl0R bXrcyTgqXKrplgxb|22|32-788-265-2743|2217.93|arefully ironic ideas: slyly regular deposits about the furiously ironic requests 140|Supplier#000000140|mdfYYe8U sSb|16|26-379-377-5829|6727.48|sly final pinto beans affix furiously about the packages. even, bold accounts affix permanently fi 141|Supplier#000000141|5IN1dvjqFhyfKxoslkY3UL7CrNVPCZmq|10|20-557-547-7976|8842.14|quests detect blithely even ideas. unusual, regular accounts sleep blithely carefully regu 142|Supplier#000000142|HvfHnP57pz0w6l|16|26-302-532-7958|8991.41|dly ironic packages cajole amon 143|Supplier#000000143|a3v,6RZMN9p FzRTdV,fm7ehoVgEhifejGnrNY4H|12|22-916-300-5765|9658.99|s lose slyly about the blithely unusual asymptotes. blithely silent instructions cou 144|Supplier#000000144|f8tddEKps816HHqNwsKdn3|20|30-726-423-7363|9806.29| carefully even pinto beans n 145|Supplier#000000145|pSsVRBOlNKVuvXcjAEKxxy0hD8kA aZf|6|16-136-582-9756|-685.94|he carefully silent requests. quickly careful deposits are quickly about the bold, r 146|Supplier#000000146|rBDNgCr04x0sfdzD5,gFOutCiG2|22|32-792-619-3155|8271.39|s cajole quickly special requests. quickly enticing theodolites h 147|Supplier#000000147|oLdl SQpf,ZXebBw5F3g9|23|33-828-583-6731|8387.76|its. carefully pending packages after the ironically special frays wake above the special deposits. 148|Supplier#000000148|bkCBZzewuerw8xHv|7|17-648-666-9156|6089.75|ckly regular grouches. carefully ironic deposits cajole acc 149|Supplier#000000149|pVyWsjOidpHKp4NfKU4yLeym|6|16-660-553-2456|4518.31|ts detect along the foxes. final Tiresias are. idly pending deposits haggle; even, blithe pin 150|Supplier#000000150|kZajmmtJB4g,nNlaHxUvXLfZW0hVtnR35LKA|5|15-169-420-7828|4161.22|l instructions sleep quickly regular requests. final orbits use never furiously ironic reque 151|Supplier#000000151|2hd,3OAKPb39IY7 XuptY|22|32-960-568-5148|8561.72|hely final packages. ironic pinto beans haggle qu 152|Supplier#000000152|jTZy8PjLJqfLxgHm7,fDoJu7ZMs luO2YmN63|24|34-659-493-1274|3846.60|lyly even platelets cajole. fluffil 153|Supplier#000000153|qkCHAU1v9CtEBOGXhdFXJsx5L5gViVm5k,|9|19-346-843-5260|850.55|ress quickly. even accounts are around the carefully bold packages. slyly regular pa 154|Supplier#000000154|gB51OPlY yleFDn,pnDRbsDqy1gYM,SNRbo|13|23-471-808-2661|4155.67| special ideas haggle carefully ironic pack 155|Supplier#000000155|NFoZlgq90N33cC4HbLcfIGb1uqIZy85l42qa6|22|32-417-987-3690|3401.43|es! slyly ironic requests cajole above the daring account 156|Supplier#000000156|,KXbCcjqZNjmyHzeqaL4|5|15-106-692-4998|9780.74|sits cajole carefully instead of the final, ironic requests. furiously ironic deposits abo 157|Supplier#000000157|,mEGorBfVIm|3|13-776-259-5994|-963.19|ove the silent deposits. carefully pending packages cajole furiously final packa 158|Supplier#000000158| fkjbx7,DYi|7|17-873-902-6175|1596.44|cuses sleep after the pending, final 159|Supplier#000000159|xftQu5vkiD6BF|4|14-606-224-3002|326.36| blithely quickly ironic platel 160|Supplier#000000160|LG6VM3F8MhGnmoZUpyqHgsV0IP6gOagh|4|14-471-505-8811|5067.64| after the furiously express deposi 161|Supplier#000000161|fsteD4OulIaUNa IPXVesILV|0|10-604-123-7428|7810.78|doze among the slyly even platelets. packages believe blithely furiously final ac 162|Supplier#000000162|hf2fnryOkI9VjFN8R8i5rRWguXzdlMu3o|9|19-905-888-6020|-170.22|sits. even frets believe fluffily. fluffily regular accounts 163|Supplier#000000163|1MhydNirC ,cuLIZezbkUxJhAC53ii,B|12|22-980-516-1217|7999.27|es sleep about the fluffily express platelets. even multipliers must have to sublate. bli 164|Supplier#000000164|7H,0hAZkls5qVS 6sjbnVnQtpjMnGEv3gKXVS|2|12-414-446-6598|-264.20| the stealthy accounts. quickly stealthy warthogs detect. final deposits 165|Supplier#000000165|iPso5qCxSnxaNsRe9AU05Vl9hWm5oHIS|14|24-906-333-8640|1550.62|ions sleep silently. furiously unusual requests are slyly. express requests hind 166|Supplier#000000166|zCr2Z0hHyLjSz|13|23-117-751-3240|258.33|s. regular, unusual requests about the bold packages grow careful 167|Supplier#000000167|FEDldnEfBHVGOmKdFevemmG2|20|30-725-194-1727|5731.87|rding to the slyly bold deposits haggle furiously furiously regular accounts. blithely bold pac 168|Supplier#000000168|NNvNhn6dWzr80Igr|9|19-985-776-2090|6559.35|y final packages! express, ironic foxes snooze carefully above the furiously daring theodolites 169|Supplier#000000169|ycymrfB5JV1vU,swPXggAt|13|23-698-509-1073|-927.50|ss, even accounts. ironic packages sleep blithely after the slyly regular d 170|Supplier#000000170|RtsXQ,SunkA XHy9|23|33-803-340-5398|7392.78|ake carefully across the quickly 171|Supplier#000000171|eRpB8T GdtMBvJOo|10|20-229-200-1299|3549.54|es are. blithely final packages are unusual requests. fluffily expre 172|Supplier#000000172|NckigAXBRUXbJI|22|32-481-329-1585|2077.39|efully ironic packages x-ray thinly. slyly pending hockey players haggle slyly. sly 173|Supplier#000000173|OqQzF6rfxDvkjpMXVCwGfQzj4oTHBHyW5kC5Gjxd|12|22-640-545-4690|9583.11|ly regular escapades use among the express deposits. unusual, silent deposits wake. ins 174|Supplier#000000174|e1NMjwAq6RdyIKeA|22|32-840-184-9487|-561.50|s. blithely special dugouts integrate furiously. furiously bold accounts haggle th 175|Supplier#000000175|Lgv gYbBdu S9|12|22-392-226-3266|9845.98|leep. quickly blithe theodolites wake slyly. furiously pending ideas haggle after the slyly 176|Supplier#000000176|OLVnGuOx8m6NfApzODj4 JP01JJIm,qI53BChmgQ|10|20-970-245-2712|6387.89|s according to the carefully expr 177|Supplier#000000177|IUZ 7G x212nzZY5aQS|12|22-246-174-4465|-741.79|fully after the always ironic theodolites 178|Supplier#000000178|VJ9DInoVjbDg|16|26-471-122-2582|4693.27|hely final foxes instead of the express, expres 179|Supplier#000000179|d3le3XaTUC|19|29-560-587-5604|7727.83|kages solve carefully alongside of the furiously regular patterns. blithe 180|Supplier#000000180|JJzFp5wZcS0KpMLM95tYmq5Pv526UBfT8vrfwBk|7|17-600-237-1665|2753.77|ic deposits wake furiously even, express accounts. slyly express packages detect doggedly 181|Supplier#000000181|7g8adZQXXuHAYdoULLDVKYRtBhdddGqYj7pMzva|14|24-682-737-4806|3665.07|usual ideas. silent requests boost across the quickly regular instructi 182|Supplier#000000182|KuSXGdsAdYdkhzVLmgo4Xs|7|17-825-333-7344|7872.30|gular instructions. blithely regular sentiments around the slyly silent dependencies inte 183|Supplier#000000183|zAAIv68BEXvllrfgsW,i8e|0|10-842-403-7954|-192.51| slyly final dependencies alongside of the fluffily iro 184|Supplier#000000184|VTbbcJp9vdqn,tJA5pG5V,596mud6ZdspXgpUWX |1|11-802-685-1889|7448.53|special deposits cajole quickly even asymptotes. quickly ironic d 185|Supplier#000000185|F4C0AWRPk3|17|27-586-282-7422|5435.91| pinto beans. quickly express packages 186|Supplier#000000186|g huGEW5nrQ0Lmd6|1|11-398-253-5445|4397.48|g along the pending deposits. slyly final foxes sleep fluffily above th 187|Supplier#000000187|oMtyTl6hTyLQhiBwrUaa42zKOjfNobEq|18|28-127-320-9230|8517.04|riously along the carefully silent instructions. 188|Supplier#000000188|boP,dP6PjNCADoMv3FaXKRREikgs4J7cYng|20|30-573-811-4001|1020.57|ual requests cajole. final deposits 189|Supplier#000000189|SlPFDFJnn4gtrol|9|19-482-239-6669|1636.09|y regular courts. furiously express deposits haggle slyl 190|Supplier#000000190|qpXGL8oM1 wzQd|10|20-995-359-7606|6705.44|regular deposits haggle across the final, si 191|Supplier#000000191|X dZg6Dtv17X7|18|28-437-699-1621|1445.00|en packages haggle blithely regular requests. silently ironic packages n 192|Supplier#000000192|Tub1t4UlJwZ5U|15|25-585-189-5975|7031.84|ccounts use blithely. unusual, regular ideas use qu 193|Supplier#000000193|0HvoBt,qEF EaKzYNCl|5|15-872-804-8448|9095.64| packages. regular ideas sleep about the fluffily even deposits! special dolphins nag sly 194|Supplier#000000194|MDIkT8cHs7|9|19-741-822-3512|1487.08|nusual platelets cajole according to the 195|Supplier#000000195|xWy21YlUy3R6L01|2|12-395-261-9720|-767.31|e unusual foxes. express, final ideas sleep! carefully regular ideas around the quickly expres 196|Supplier#000000196|TQX4fMwB5tXoz4Fi4CEhALhi6|18|28-430-406-1127|4710.62|ke fluffily regular requests. furiously final 197|Supplier#000000197|YC2Acon6kjY3zj3Fbxs2k4Vdf7X0cd2F|3|13-999-250-8664|2398.30|ithely final ideas within the furiously ironic requests cajole 198|Supplier#000000198|ncWe9nTBqJETno|6|16-355-298-7120|2187.91|ts are blithely stealthily unusual asymptotes: blithely final excuses wake. ca 199|Supplier#000000199|k,8F8FGDuN 3udblO|22|32-268-147-8879|8151.22|requests. slyly express foxes across the blithe 200|Supplier#000000200|MNqafnV52UKvwuk3jFCn,AJkL|11|21-962-509-4762|1589.13| beans x-ray enticingly express accounts. blithely final deposits run; regular packages are 201|Supplier#000000201|e3lZO QY9QpCVdc0HXrqXB5uxXnOi r|18|28-782-585-7326|9537.73|kly above the special ideas. even requests nag carefully. quickly pending t 202|Supplier#000000202|NALZjSfea6SY zB1,I09OJYGrA8bwR4pU|23|33-549-918-5721|6739.52|courts cajole bold, special accounts. bold packages haggle re 203|Supplier#000000203|wvdhblw9JkIe52z0gnGSDrDBsMsI3Aidqk|1|11-257-649-3327|2150.78|ar warhorses detect. carefully final requests since the unusual, ironic deposits b 204|Supplier#000000204| 7bts1RIiMsy35F6V7 lC|15|25-718-760-9193|172.87|cording to the furiously even pinto bean 205|Supplier#000000205|rF uV8d0JNEk|3|13-272-651-7242|-670.30| beans cajole regular, pending packages 206|Supplier#000000206|hva5xXEn5j7H27CdKcjFqi,QnunUZyt8,a|19|29-156-330-8311|7136.21|quickly pending packages. regular, quiet packag 207|Supplier#000000207|0QmBvfdQPh7Xy09txlnJcv|2|12-245-833-1389|3285.77|xes. pending, regular deposits x-ray against the unusual deposits. final, bold platel 208|Supplier#000000208|kr uyD ,K95lEvq77tuHVjOJM57|4|14-932-450-9228|1938.69| instructions boost blithely along the 209|Supplier#000000209|fpTboatC6ogrozMPApz2DXmlukC9YVzCLCgQOtgU|18|28-134-789-6780|-395.27|ly ironic ideas sleep carefully even courts. slyly speci 210|Supplier#000000210|eC5 e7DXYBWi8XlnFtBxF,tSWPyQAzqb8gAZ|2|12-385-448-5157|9657.88|ccounts haggle after the carefully pending acc 211|Supplier#000000211|acmexYmhAZhFyM|2|12-150-553-5979|2080.07|quests sleep final pinto beans. blithely bold theodoli 212|Supplier#000000212|ZutN4kNIsv7sPWIV9,7|7|17-617-724-5874|-335.47|ular requests cajole furiously against the spe 213|Supplier#000000213|1s7 4odatD2BWz1QjXR2 2SnFhc,Rvr2Icxh1m7f|18|28-317-938-6979|2616.54|, regular packages. request 214|Supplier#000000214|B3uLKyb, xkfHbTSUBe6HwwaBPdCvhiOqO4y|14|24-114-624-8961|7089.25|omise blithely regular packages 215|Supplier#000000215|YmZQvVCVanip2E|12|22-333-479-3656|6125.89|fully upon the slyly bold excuses. blithely regular deposits promise across the slyl 216|Supplier#000000216|K83M7iWDJx N Y|14|24-182-902-2539|6902.54|luffily final theodolites haggle slyly pending platelets. speci 217|Supplier#000000217|UVrZj7M2mMd7JLjbc tvFmQad14d1oid|15|25-113-702-9259|3084.60|arefully final accounts. slyly even ideas haggle along 218|Supplier#000000218|c7CBVglgEM0XU,8bOU76GjEr5L5EXu9Uxa7U|10|20-180-767-8426|8843.88| fluffily. furiously regular instructions sleep slyly furiously regular packa 219|Supplier#000000219|f0siVrn1T,dOIJgYU|9|19-708-346-1944|5222.69|odolites use carefully about the reg 220|Supplier#000000220|uvWVvuY3tHcE2W34jcO538wy6|19|29-197-645-8102|6746.19|. furiously regular foxes solve blithely. ironic requests mold carefully. blithely unu 221|Supplier#000000221|aU oCBZs0CUUTU|11|21-402-925-9045|4283.34|onic dolphins nag furiously across the silent deposits. carefully even ideas snoo 222|Supplier#000000222|2JQCRHT8coRlrMria2|15|25-899-779-7536|4173.23|ctions x-ray against the s 223|Supplier#000000223|MCS3 zWF3Py8UZK PHdI6LsQD98phHJ7|2|12-790-796-4808|1518.42|y fluffy packages solve amon 224|Supplier#000000224|iD7eF,uwQxxPCAFTmdXYV7N|5|15-446-447-2944|9036.79|atelets. regular, ironic gifts eat furiously across the permanently silent instructions. reg 225|Supplier#000000225|6Ez0sxpUfmtjqFDO|9|19-787-123-9863|2461.11|d packages. unusual requests after the furiously bold packages wa 226|Supplier#000000226|83qOdU2EYRdPQAQhEtn GRZEd|3|13-768-844-6969|1906.46| deposits run fluffily: excuses breach closely against the slyly regular dolp 227|Supplier#000000227|Qo959Dll Bd7xvfq3ELtCq|4|14-215-994-7949|7896.01|ng accounts solve furiously at the furiously express packages. carefully bold packages sl 228|Supplier#000000228|pyTY uocaSasIUlrHUbBwM,r,|14|24-920-216-6514|2291.35|s above the final deposits detect along the furiously idle packages. b 229|Supplier#000000229|ycjgLrk,w8DcakfwTS1SO5kVch|14|24-392-655-8913|8751.47|ly according to the carefully regular 230|Supplier#000000230|TgT146Clu9ODbYYBx4Wd8oe|1|11-819-222-2128|2975.98| regular dependencies. packages after the accounts are blithely blithely bold packages. furiously 231|Supplier#000000231|uZfFnyAs9oFJ0No97rtQ OIV1AOYi|5|15-551-432-9958|9287.33|mptotes sleep above the regular dep 232|Supplier#000000232|90YJjotHlfwyieaTfuBJ8kohU5Oc83bESout,p|7|17-478-427-3811|300.80| asymptotes lose slyly evenly ironic asymptotes-- blithely bol 233|Supplier#000000233|Pu9mqssUjJNoqiGG4vfLTn3ENJnNPVrBNnXJM5Eb|6|16-283-737-6972|4544.18|kly pending platelets sleep blithely along the ironic, 234|Supplier#000000234|iMrk7HUD87at3IIh4rBi|24|34-187-193-2209|9957.00|nag furiously pending dependencies. unusual deposits integrate fur 235|Supplier#000000235|W89jjgy458|8|18-835-352-6531|2424.93|ackages run blithely above the fluffily even dolphins. dep 236|Supplier#000000236|dZExtd1dlMyrPdSqDC3|15|25-250-525-2436|9844.00|lar platelets. blithely ironic packages cajole blithely special ins 237|Supplier#000000237|NzEXFiPN5tnrZzs1E,uGeoEqolOlDYE7oQHnQWg|23|33-976-749-2413|6170.98|regular ideas. carefully express ins 238|Supplier#000000238|xjSZNq AIqmrV UfxyGcS04RYOzW|8|18-950-354-3488|5763.17|s. carefully final courts impress furiously among the slyly regular deposits. ideas thrash furi 239|Supplier#000000239|XO101kgHrJagK2FL1U6QCaTE ncCsMbeuTgK6o8|22|32-396-654-6826|4672.25|arls wake furiously deposits. even, regular depen 240|Supplier#000000240|yMzL86zw28z6sMa|10|20-843-630-4161|6537.07|sly. final, regular pinto beans unwind slyl 241|Supplier#000000241|K3sXvTEnHT9yQSbzZmbPNY0,iPQsmySI|8|18-213-514-9357|9594.49|arefully final foxes. regular, pending requests haggle busily special asymptote 242|Supplier#000000242|cpZMlI77TRq|11|21-489-286-5908|3736.64|nic accounts sleep furiously according to the quickly pending requests; slyly bold deposits haggle. 243|Supplier#000000243|8aQ3HGeOXxgYeMAXZQe B5y2RKEF5jdmN3Qb|6|16-554-376-5494|747.88|kly silent requests among the blithely regular foxes use fu 244|Supplier#000000244| c6fBN9a 6EOcB1ZjbImMBAQMwI BKScDNVRP8|1|11-556-986-9638|5489.08|d blithely. pinto beans boost quickly. blithely bold accounts affix. 245|Supplier#000000245|8qUceq1TSsNaTol3Ul6TDNSu9yRddanHEL|8|18-739-793-2096|6943.28|ual deposits x-ray slyly. carefully regular epitaphs about the 246|Supplier#000000246|BatWQwH8DgkBs|4|14-278-988-3903|5740.87|ealms are. even theodolites use regular, unusual ideas. ironic, final ideas sublate 247|Supplier#000000247|0bkES oiL2joJGmxdGwPfVCOL,pIQ4JNZBPnOR|23|33-695-935-2388|3758.64|final requests. final accounts affix. express accounts about the furio 248|Supplier#000000248|vDPBiu4L7hPZxwaEoNW u,zHgXVHvBXFBdfuek|6|16-646-663-7202|7487.64|ss packages are idly about 249|Supplier#000000249|ktpns6Bvu4fP8Wzzlc8|10|20-241-221-7700|2800.60| regular excuses about the blithely pen 250|Supplier#000000250|JkzOvHRi1rEdkZ|9|19-347-403-1571|9681.99| packages cajole blithely pending theodolites. ironic, silent requests 251|Supplier#000000251|Uqi3s, iqzLxI4duoRfgkciiN4XuCvITGIUf|21|31-869-288-1674|283.61|eep blithely even, ironic requests. carefully pending courts above th 252|Supplier#000000252|xcaKgadrug|11|21-860-636-2585|1523.14|tain slyly fluffily bold ideas. furiously ironic ideas nag furiously slyly even requests. 253|Supplier#000000253|DbryhLi6gCv8A P9FkjNp56mLP4KnV9Do|21|31-811-659-6792|352.61|ng courts wake quickly against the slyly careful packages. even ideas nag caref 254|Supplier#000000254|c6h4mizJAVT0Oz|7|17-100-212-8737|6230.48|nos. bold ideas wake carefully among the furiously 255|Supplier#000000255|qx16XyCEUh9OawVeQWOlGlhAU32iHFPNkO|18|28-629-327-4139|4663.08|s boost. ironic pinto beans along the slyly unusual foxes haggle regular, final asymptotes. reque 256|Supplier#000000256|ma15idU1PzO7itP67W1Rd|11|21-370-961-2170|6469.78|sly pending deposits haggle 257|Supplier#000000257|BjFRji2XvSyOVe MHMP9r5vC2eP4kfF GXwBe |16|26-806-563-6761|630.74|odolites use slyly finally silent requests. package 258|Supplier#000000258|z8ImnYbuBbohh1 WT|7|17-820-568-1141|5736.09|press packages. final dolphins sleep sly 259|Supplier#000000259|2zzYBzG12K|10|20-901-530-6532|439.97|ts wake slyly along the express dep 260|Supplier#000000260|TQGxRpJe08nPRqPYDIMTnst87mC0HZJ,rlnCQTF|23|33-226-804-7400|597.64|silent, stealthy instructions. furiously final tithes hinder regular requests. expres 261|Supplier#000000261|vUT2UDI,GAqIA|0|10-547-567-3297|-466.40|kages. furiously express requests are. carefully silent deposits at 262|Supplier#000000262| 3kCm2eN3Jra2UzvWWBm,P04b|20|30-952-865-8647|2138.97| ironic escapades. furiously even ideas affix blithely. silent accounts thrash beneath the 263|Supplier#000000263|7Vdou,WjHE|14|24-203-272-3139|-316.81|s quickly regular foxes. stealthily ironic p 264|Supplier#000000264|Oeq2ei9wRqt6yOuAAb0KoMRyoH4v|19|29-870-178-3535|8327.34|e busily ironic theodolites. quick deposits after the unusual dependencies print sly 265|Supplier#000000265|eHF4Edu,B8,NgBSSEV4xNC37i1q08WCNKyOe6jP|19|29-734-865-6334|2638.54|le evenly besides the fluffily fina 266|Supplier#000000266|LGWx4Xcq0GwQVwTR|23|33-256-678-2321|2874.22|y express foxes. special, pending deposits are slyly-- packages detect blithely ironic se 267|Supplier#000000267|,E9NoXeK5qOwVRoutS,X8Gr|10|20-501-962-2896|4068.14|riously close asymptotes are. carefully bold p 268|Supplier#000000268|ApLM6aebtP79LIp|23|33-863-692-9039|6261.99|nic frays use furiously among the final, ironic the 269|Supplier#000000269|LxOgEClzbXDEYHYG2ZMFoWHrD,C|2|12-988-623-6829|6331.84|lar foxes wake quickly about the carefully ironic asymptotes. final, regular do 270|Supplier#000000270|PM P65mhBoeFWkFMi9,|23|33-682-295-4727|3339.18|ss slyly after the fluffily regular packages-- quickly express pinto beans nag blithely 271|Supplier#000000271|U0BSsnILvbk7mL1cim,ZX70QIiTz|15|25-270-292-6296|8576.95|its nag above the silently final escapades. final, even inst 272|Supplier#000000272|ywrDqLLTfKUF93|7|17-209-164-6836|6858.73|of the furiously final accounts. bold dependencies after the fluffily ironic asymptotes hag 273|Supplier#000000273|O0TyoQXlzJx|15|25-247-243-3871|3708.18| pending ideas. carefully silen 274|Supplier#000000274|usxbl9KSW41DTE6FAglxHU|21|31-571-345-4549|6296.15|ecial courts. express asymptotes must have to sleep theodo 275|Supplier#000000275|Mh9eZVjmBPZc5I2KHcMcfZOYbpFqrlVOLR|21|31-446-410-5215|8477.48|packages. ironic packages are. pending deposits are according to th 276|Supplier#000000276|KdVDs6EGfWVsPdjuCh9iep|6|16-752-344-8255|7144.78|cial, ironic theodolites against the decoys cajole slyly ironic foxes. carefull 277|Supplier#000000277|REebuerbQmMxlbCXnl2ihK,VyItkTNxU36 c|12|22-758-939-2357|4300.15|final deposits. unusual pinto beans after the even accounts affix idle, final 278|Supplier#000000278|gBQmkVmclUGbjwHVtrdmCk9Dwqd2Dez1|9|19-563-887-6379|665.89|deposits against the final foxes use carefully express ins 279|Supplier#000000279|aVGVO0XNwTEkFwH0OhaQMJC7un0EizPpq|13|23-617-962-7353|2613.77|accounts. quickly special packages nag at the unusual requests-- special d 280|Supplier#000000280|MZAJPsr3TS62ocxtRc|11|21-536-349-7369|4540.53|nticing platelets. ironic theodolites about the regularly final accounts sleep accoun 281|Supplier#000000281|A2sesSQAAj6wvPPKL X4caRp,O|0|10-553-675-3578|3717.19|ooze fluffily express, pending pinto beans. regular instructions haggle ironic 282|Supplier#000000282|Gh,k4vQQPzOyIA9ac9re5IjaV5LyXDtZX9O1kNG|8|18-420-459-8366|3751.04|into beans. carefully special excuses cajole furiously. regular d 283|Supplier#000000283|MWLpGjjs1WoofKSoN7Lm0DXcSY5H4E9CeneF67ZQ|17|27-409-203-4267|785.26|ess instructions. slyly unusual deposits according to the accounts run past the slowly even package 284|Supplier#000000284|7NF0OkJazPIJ7l,LBSw3abv|15|25-955-684-2997|6866.81|quests nag carefully according to the furi 285|Supplier#000000285|Br7e1nnt1yxrw6ImgpJ7YdhFDjuBf|3|13-907-873-9658|7487.27|hely? slyly bold patterns doze carefully according to the fluffily even packages. bol 286|Supplier#000000286|o80iAPvmwqM3WOA93pqBHT4Dsgy1rwG|10|20-459-893-8984|7517.31|iously regular pinto beans sleep carefully slyly even accounts. packages cajol 287|Supplier#000000287|7a9SP7qW5Yku5PvSg|3|13-484-626-4797|6541.85|nic epitaphs are slyly above the foxes. ideas after the quickly bold pin 288|Supplier#000000288|8fUJhFrKTMmi5L|10|20-850-734-7789|1884.76|ual deposits wake furiously i 289|Supplier#000000289|6Tua0IHqN0sod1Tpdax5hD0|16|26-396-901-4544|6144.47|equests can wake: furiously final accounts lose regular, final requests. special somas affix. fi 290|Supplier#000000290|6Bk06GVtwZaKqg01|6|16-675-286-5102|167.56| the theodolites. ironic, ironic deposits above 291|Supplier#000000291|0qDDQst1b1bznHQh5jsmOq8nxf8Pz1Kn|0|10-404-519-2270|9132.12|wake silent, pending accounts. blithely final as 292|Supplier#000000292|POEdp8d ug5bKQa,w4SAMaM2xna1ZtVk4z9loa|21|31-933-112-8506|9598.62|ular excuses. furiously even requests sleep carefully around the fur 293|Supplier#000000293|H2JnUWv1X3s0yI7i2tY5Vd0kd4f0|9|19-143-962-9484|7114.81|t the instructions. daringly bol 294|Supplier#000000294|20vnacv,dtZ0mr5kukNf|18|28-299-788-1836|6700.75|en accounts. silent, regular instructio 295|Supplier#000000295|gpm7fahY9j6YyTr Dozul|23|33-998-989-3147|-435.02|en requests according to the 296|Supplier#000000296|g,WJbekrbjAcpNtn2QRsWtYx2RNVk 9aY|14|24-250-680-5383|1549.59|through the blithely regular depo 297|Supplier#000000297|gfVOI9aT2roNGEgc|1|11-562-493-7660|4954.57|al, express epitaphs? furiously ironic ideas haggle regular, ironic instructions. carefully special 298|Supplier#000000298|oIB13yXjsqGSW|5|15-790-635-6692|-530.05|egularly unusual accounts 299|Supplier#000000299|kvWv3MYn,Q2SqJlckBfhpWYmnSeRwg6|10|20-180-187-8847|-561.38|lar asymptotes integrate darin 300|Supplier#000000300|YU QZvXHJC7,ZspUPGwaIOa|2|12-468-732-4623|811.42|furiously even theodolites haggle along the final, ironic foxes. bold deposits are. 301|Supplier#000000301|YPFTsQOPRAGIlBw|4|14-434-699-9741|6472.62|express foxes sleep carefully even packages. carefully special ideas cajole slyly. carefully r 302|Supplier#000000302|ow8Io1JHkPnIAGCQYCWC|18|28-734-845-8630|4422.77|tructions after the foxes cajole regularly ironic tithes. ruthless forges across the slyly express 303|Supplier#000000303|MCFk0WJH4O9Fj,m3as3bdIgnP4pe 8qPj,wfo|8|18-932-912-3102|4500.90|ously carefully final foxes. fluffily silent requests across the fi 304|Supplier#000000304|b4rSMq4y hBDMJgcNp|11|21-261-644-8980|8564.16|endencies across the bold deposits nag 305|Supplier#000000305|pKYTaixBtQ3AGJLwndRNxUlLUkXN667JT|17|27-607-328-4914|974.24|uriously across the requests; ironic requests serve bl 306|Supplier#000000306|T5EUviO4mqrGO6ruLcIoo29M5S|16|26-954-209-5690|3979.54| wake fluffily. furiously final 307|Supplier#000000307|3wL9YHFIvddxzh3mwy6SSrpfmzKvwAGmXK|14|24-499-938-5607|2168.65|s above the unusual theodolites was quickly quickly perma 308|Supplier#000000308|mVY7rtoxj9uSqzAJIp TOWPsv080hbPbOStGZx|8|18-360-691-8400|3513.89|tructions hang carefully according to the furiously close requests. fur 309|Supplier#000000309|gT84r,AFSrZQpiSCnE02B3QK|2|12-950-744-5100|7875.48|riously final deposits boost blithely about the even foxes 310|Supplier#000000310|I5Mw,rGgWQOFVotMHUmVjH|0|10-119-372-1062|9312.63|ccording to the carefully express dolphins! furiously pending dependencies integrate s 311|Supplier#000000311|yjGDnCKi4Wmtim H3n9p|22|32-445-679-8585|7431.00|uriously final requests integrate. sheaves against the furiously final accounts are evenly abo 312|Supplier#000000312|8XYqBq S,OWbSp9Y2qGBWEV4hH l7ywxk|13|23-829-479-5202|7844.41| furiously requests. always bold accounts solve 313|Supplier#000000313|9xrpKrwr9,Jgvm5q rGkZePkpsH5xEXZnuLJug|9|19-648-945-5128|4114.68| beans. special deposits use carefully after the blithely even 314|Supplier#000000314|CW6uXfPdJ6lmcJ|22|32-389-335-6770|7456.49|s dazzle regular, final dolphins. furiously unusu 315|Supplier#000000315|HRLhxvqBP,f,NjeUiT|2|12-574-691-2832|7315.00|eas. blithely pending packages cajole blithely ironic packa 316|Supplier#000000316|uXJ13cI7fXByGLBoQ8VU9AT|0|10-382-551-5105|5189.82|ding to the slyly unusual frets. accounts use carefully slyly silent theodolites. slyly ironic depen 317|Supplier#000000317|EwuMPFkb3IrSqitX8ddgxy QWhXxeM4RoOu2d|17|27-131-276-7910|4277.34| packages according to the deposits sleep carefully regular hockey players. quietly bold 318|Supplier#000000318|Cm5fnQ4M6VeJF17mBbV6iB0XoQL31STrb3Su|24|34-466-917-6708|2268.02|ly express accounts cajole blithely brave instructions. furiously even 319|Supplier#000000319|tcES8Ay3zcHQER9K6gAKFWlDvcg nrbPl|18|28-288-466-7847|1578.18|ts are furiously even, pending pinto beans. fluffily idle courts cajole bl 320|Supplier#000000320|v2FlHgKKiGiDwR7uoJNWF|21|31-984-744-5308|1485.45|ns nag carefully carefully pending platelets. pending deposits dazzle. requests above the i 321|Supplier#000000321|pLngFl5yeMcHyov|19|29-573-279-1406|4941.88|y final requests impress s 322|Supplier#000000322|lB2qcFCrwazl7Qa|6|16-803-605-4129|6102.62| to wake. slyly even ideas breach furiously. ironic foxes haggle carefully. ironic asy 323|Supplier#000000323|0LEOmcTTomY1F0y|22|32-563-275-6438|704.83|accounts. unusual requests haggle slyly special packages. always silent instructions e 324|Supplier#000000324|QDsgOSozg jniYR2HzxofLZyk0qGdJe|18|28-637-452-5085|8042.43|ithely slyly special waters. 325|Supplier#000000325|bQgx8ABSanNFNfCHY,2 uPvGfafaMC5|8|18-671-443-5744|-786.95|unwind carefully among the ironic, final requests! quietly unusual foxes hagg 326|Supplier#000000326|9kFiCwhcBldg4xwm|7|17-390-604-7483|4680.75|quests could use furiously across the ironic, even f 327|Supplier#000000327|MoC7Jc7oThpZ34HmJPKuUbOZwOyPOb1ksGlvT8o|0|10-519-344-7532|6408.52|final accounts poach quickly slyly regular requests-- furiously careful requests cajole car 328|Supplier#000000328|SMm24d WG62|7|17-231-513-5721|5069.27|he unusual ideas. slyly final packages a 329|Supplier#000000329|zPg8 aqmUQ4wHGC 8nO39C50AOMB1VxTsJWmpYyz|17|27-571-511-5321|2166.85|endencies. boldly silent pinto beans cajole. carefully unusua 330|Supplier#000000330|MUxmICc8xS41A|22|32-845-516-3236|6199.85|ix bold requests! final deposits against the ironic ideas boost across the bl 331|Supplier#000000331|VkdMTxXHy22069CWmK658|22|32-337-220-5366|6929.91|special theodolites use furious 332|Supplier#000000332|OpvOrxiHWJ6QM|9|19-777-809-6033|1106.07|ve the furiously pending warthogs. slyly special ideas haggle carefully. express 333|Supplier#000000333|MaVf XgwPdkiX4nfJGOis8Uu2zKiIZH|22|32-508-202-6136|8702.02|oss the deposits cajole carefully even pinto beans. regular foxes detect alo 334|Supplier#000000334|NHMGXoP 8cWsNXvV46AkZZJuptUm4mFo8|13|23-767-548-6051|7848.40|efully regular requests: final requests solve quickly. pending deposits across the blithely express 335|Supplier#000000335|JSwzRFY7dDNKfBe8ebMtm|16|26-741-643-2945|5119.09|ong the regular ideas haggle along the close ideas. furiously ironic ideas alongside of the fluff 336|Supplier#000000336|orYe2VXtABdK sUvMCOZ9ZMhBJTPp7W3pffWmjZi|1|11-866-373-2323|3603.62|refully even packages above the unusual accounts detect 337|Supplier#000000337|IRrbCdIS,GB4YYhr|12|22-951-643-8793|9029.85|en theodolites-- special, final deposits should have to boost ca 338|Supplier#000000338|2b6Gj,rSxqeIiAOMquj6c03p GmAzIog BrF05|16|26-565-914-7207|6835.16|fully silent requests cajole idly. even dugouts wake f 339|Supplier#000000339| ESOHA53rEW5G7Z75w5hJ|5|15-967-506-6094|3589.64|ts snooze carefully express accounts. foxes nag carefully 340|Supplier#000000340|Pk5F9dPqsa3k16I3UR282gY|22|32-849-350-3343|5001.23|along the packages. blithely final foxes cajole. unusua 341|Supplier#000000341|sdb9yPRPun Z awDuDPUId2NH0Yrz7dnJ1GBDc|14|24-320-626-2836|2755.92| express deposits against the dependencies use blithel 342|Supplier#000000342|YgcA0MYOSB1ou|2|12-829-736-2942|6791.04|s wake above the quickly pending attainments. furiously special re 343|Supplier#000000343|LTRj tcLNq34je60R7LkZtNrEwmry00DIXguSxMS|4|14-617-526-7362|-905.27|ckages sleep among the slyly express excuses. special, regular pinto beans are carefully: express d 344|Supplier#000000344|F mMVT6PuRj5S|11|21-808-924-2361|7082.37|inal courts nod fluffily for the care 345|Supplier#000000345|ZYuibETx2zArwg|18|28-503-710-4886|351.98|lar dolphins. carefully unusual packages according to the furio 346|Supplier#000000346|9vl7c3azrqt9wITrcglQhIGfwea|8|18-148-870-6674|6710.10|ecial accounts. quickly pending packages print. slyly bold pinto beans detect slyly unus 347|Supplier#000000347|zg0m5HrQtl D|8|18-932-818-1269|2674.43|special escapades promise pending, regular deposits. blithel 348|Supplier#000000348|ZNEHF5c7kP5tPGdQ ZrJZNRdQnu0M9LVyO urMm|8|18-338-824-3860|1172.37|eas. frays nag furiously final accounts. fluffily unusual theodolites use furiously above the slyly 349|Supplier#000000349|21Sp51XIZ9vTChQBWv0CA1o1P,26clhjR|13|23-532-708-7267|1033.10|lithe packages. carefully final accounts on the carefully final accounts sleep slyly 350|Supplier#000000350|KIFxV73eovmwhh|7|17-113-181-4017|3294.68|e slyly special foxes. furiously unusual deposits detect carefully carefully ruthless foxes. quick 351|Supplier#000000351|ZLWTvVCSmwsKfElT7K 2O1Ui|12|22-508-407-2628|8684.60|ithely ironic theodolites play. decoys sleep slyly against the deposits. s 352|Supplier#000000352|LsVcltEi9NYu10ByH 5grND|16|26-911-452-7918|1189.55|ickly. platelets sleep slyly blithely slow escapades. special requests boost furiously. slyly enti 353|Supplier#000000353|Bo,HYtujUMsMvE,|15|25-153-823-7261|3646.16|ular packages. deposits nag slyly abo 354|Supplier#000000354|w8fOo5W,aS|3|13-796-527-4255|8965.72|blithely regular accounts around the special foxes kindle blithely across the even dependencies? 355|Supplier#000000355|DuCQn,7qi1KL a99mTWSY4Z9eC53dslWPABGj7|16|26-389-592-6792|6929.22|ackages cajole according to the slyly ironic theo 356|Supplier#000000356|Lefi6RDDtvaVXqvhlbMuUrVm45oJbtkZM,Mup|11|21-198-523-7929|397.74|carefully blithely ironic excuses. enticingly blithe packages along the attainments haggle carefu 357|Supplier#000000357|Vf7Hi5DuzZ6RJ,mfaXBVNqx0|20|30-147-268-1519|5724.04|e slyly among the furious 358|Supplier#000000358|V3yxhHhHSXqOoc5UPv5TwkVDGKQsG|3|13-831-247-2584|6974.74|ans. ironicCustomer requests cajole carefullyComplaintsy regular reque 359|Supplier#000000359|J5HqPZnfLcFMtlgwCnZPUI|24|34-121-923-9858|6476.58|ons. furiously unusual accounts above the blithe 360|Supplier#000000360|3avxIDL4YPrZVHie4rOiPrK8Z IJwEPqZaioHK|5|15-671-472-7761|4956.01|uickly regular asymptotes. packages ar 361|Supplier#000000361|f8IUYRmdVXhQC9qJQjWknCXmzhe38vCbk6|3|13-192-383-9438|1678.56|deposits. carefully final deposits cajole carefully after the furiously regular ideas. 362|Supplier#000000362|XdtN0U5Qm2Z|23|33-445-749-9918|5571.81|e furiously. slowly regular accounts sleep furiously. carefully bo 363|Supplier#000000363|sYpqZxYin8GKkPtNWKOnJMTLm9f5e0lZ61N8wp|1|11-220-343-2951|505.69|express requests cajole furiously blithely final deposits. quickly special foxes breach 364|Supplier#000000364|OhfGUPn8U9oBx5|9|19-653-706-8164|5105.84|regular dolphins promise after the special deposits. blithely final pinto be 365|Supplier#000000365|SZaykm40Np0vOKp|23|33-253-791-9564|901.98|s. deposits use slyly except the slyly final instr 366|Supplier#000000366|AtIdvjsMt9peVyEbpoDerNTteRF|12|22-648-291-8851|-535.40|ts. slyly special asymptotes c 367|Supplier#000000367|E Sv9brQVf43Mzz|22|32-458-198-9557|8488.53|ages. carefully final excuses nag finally. carefully ironic deposits abov 368|Supplier#000000368|3o5w6T5HzjFmSf1|0|10-694-873-8364|5783.96| nag fluffily alongside of the silent depo 369|Supplier#000000369|XKLa3tQT7,TgtuLi2Vme8vGyx|7|17-381-930-4614|2094.34|cording to the special, regular pinto 370|Supplier#000000370|yyNSJAG9UXcWit4SeMkEIrNcdVq5|0|10-602-768-3758|8515.99|ound the unusual foxes sleep finally within the furiously unusual requests. sl 371|Supplier#000000371|7kc0KqnPxrJuGZdrrec7Cn,wrCPdxPemNPZQ|19|29-501-449-3837|5358.50| among the ironic accounts-- regular accounts nod slyly 372|Supplier#000000372|Bdhu5NV4VfPYBxsCmK,YnkoHIaW|5|15-246-325-3001|1311.15|ltipliers. blithely regular deposits was above the furiously even accounts. q 373|Supplier#000000373|oQAQ3UNvyJW|16|26-656-301-9303|1461.85| asymptotes wake quickly around the slyly regular dependencies. regular attainments haggle along th 374|Supplier#000000374|svrrNWiqg1f3tEXZdVbFK CAtLDsW1CidtyS|2|12-699-158-6062|4263.58|ithely. ironic pinto beans use furiously abou 375|Supplier#000000375|3CIBgjwAjB A1uxkiJNjc 7pI9AKhvnr1BHV9|11|21-250-668-2735|1962.02|ully regular pinto beans acros 376|Supplier#000000376|L8OWL3jXMCR3Gh|16|26-752-731-5943|6579.41|usual dependencies haggle above the carefully regular platelets. never regular foxes detec 377|Supplier#000000377|L4SF6EzZ xhyZCQ59onlADR4|16|26-634-598-9185|6113.96|ly express accounts wake enticingly special, express frays. furiously 378|Supplier#000000378|FfbhyCxWvcPrO8ltp9|3|13-930-567-5190|4429.27| among the furiously pending excuses. fluffily express deposits except the slyly final packages 379|Supplier#000000379|jyGWzIJTAdI0Iko2o|20|30-202-917-6929|3698.31|sleep? express packages are quietly around the slyly even ideas. express accoun 380|Supplier#000000380|LiXmikqsO6R40FKovSUZpl|20|30-245-487-4913|5421.70|re. grouches against the blithely regular asymptotes sleep slyly daringly p 381|Supplier#000000381|NfyIKLJrXusWgmgkkCS6Gn3RRra|6|16-117-297-1825|7476.55| packages haggle blithely express tithes. blithely final deposits wake bli 382|Supplier#000000382|wNasxwKglHa|15|25-531-651-1411|3486.56|furiously final deposits-- even foxes haggle carefully about the fur 383|Supplier#000000383|pGEPbc70IKUZuTiTaigKuizjt7Y5oUoDL3Bq1|22|32-792-648-3774|-484.12|nic excuses. carefully even requests alongside of the regular p 384|Supplier#000000384|zMr51gtJ0Vu83Dk|7|17-554-428-8511|1342.17|taphs cajole furiously blithely final 385|Supplier#000000385|4RDwKCNc6 yBY|18|28-730-866-8837|5836.17|frays. requests sleep ironic theodolites. carefully even requests doubt furious 386|Supplier#000000386|zxFf8YWdD9ltSzw0NOTFpKcBH8zJrzif9|13|23-535-472-3290|-404.12| foxes wake carefully dependencies. slyly fluffy depen 387|Supplier#000000387|EEmqrW2gNAbuJjKuTPgA8kmKA0bZcQSmV|7|17-797-328-7624|3854.14|ld, unusual packages alongside of the carefully final deposit 388|Supplier#000000388|n27XQohXrXlJRLdsyXNoljPS|18|28-306-827-7902|6540.34|rate around the regular accounts. furiously special pinto beans use bli 389|Supplier#000000389|FW96liSdq3noHJpwM|24|34-885-883-5717|9467.35|nag ironic packages. ironic pinto beans would sleep furiously. regular realms wake across the 390|Supplier#000000390|8Qv7RjLQSFfyt5JpH8fsTf0|7|17-821-610-9726|868.36| to doze along the foxes. final requests are furiously. furiously express accounts use bl 391|Supplier#000000391|HBkwkigT2P9bU2wXBrPnQ|15|25-736-211-2793|6475.66|ckly furious dolphins about the furiously even d 392|Supplier#000000392|5YSB73Q4LMC9648IF1GGJAP|17|27-961-813-1153|527.38|ckages outside the furiously silent deposits sleep within the fin 393|Supplier#000000393|hxGMP2tFry WHEeI5cmEFVF027E|5|15-120-912-1594|6686.84|t the furiously regular deposits. excuses about the ruthless, regular 394|Supplier#000000394|5mGD3d,LeKycAyeYbVlrGMlzmT|9|19-517-731-4139|7685.82| accounts play quickly carefully unusual requests. blithely pe 395|Supplier#000000395|vYD9APwEz6R1BFWJ,GDJ7yCUZJm|18|28-723-165-7801|688.37|xcuses. regular deposits across the blithely final packages haggle slyly u 396|Supplier#000000396|WMtmXuQwBKESTTFH4ru1f|10|20-236-453-5458|6420.86|ts. quickly unusual accounts are fluffily above the express requests. daring, stealthy pi 397|Supplier#000000397|kFrhzYG1SR8aWrHsftcptN,im88pImSkxxFu|24|34-880-360-3462|3425.62|ependencies about the regular pinto beans haggle quickly about the s 398|Supplier#000000398|WTV,rO0S1KZFKlhLsUpAH|10|20-536-426-8920|9354.75|mong the courts nag slyly special foxes. furiously regular theodolites w 399|Supplier#000000399|UCzZPQfZXnRhGZcXfnnvkfnqLVEOc|20|30-814-148-6803|345.97| haggle furiously about the close theodolites. foxes along the bra 400|Supplier#000000400|QACx8vfYzPsZHCet2Yjst4e2XzjOXF|21|31-514-285-7013|4624.87|eas sleep furiously among the regular ideas; slyly bold excuses alon 401|Supplier#000000401|9 zStaJ sD|10|20-586-179-6994|2233.53|ages. dolphins integrate blithely final waters. carefully unusual accounts are fluf 402|Supplier#000000402|i9Sw4DoyMhzhKXCH9By,AYSgmD|3|13-109-731-3195|4943.01|around the carefully pending dolp 403|Supplier#000000403|TTVlcRcFrglnhCffA11iw l6bZyyaI9xcRF|9|19-513-268-3908|3102.18|he regularly ironic packages. idly final excuses integrate according to the 404|Supplier#000000404|RQwxP4tpScystYCqOJ,XpCWr4Kox4|7|17-943-161-3434|7311.61|. carefully silent instructions affix. blithely even requests unwind. final dolphins de 405|Supplier#000000405|dJIijVTX n7M0NDEQvCA |24|34-728-545-7374|3213.18|er the foxes. special warhorses nag fluffily 406|Supplier#000000406|zMhU58CDF4aHTeodxg9IgRZgq|21|31-926-216-4352|2867.41|o the even accounts. fluffily ironic asympto 407|Supplier#000000407|WliGC47Vto2nh7mj|11|21-112-803-6707|-459.62|ar asymptotes. carefully regular req 408|Supplier#000000408|qcor1u,vJXAokjnL5,dilyYNmh|22|32-858-724-2950|6173.87|blithely pending packages cajole furiously slyly pending notornis. slyly final 409|Supplier#000000409|LyXUYFz7aXrvy65kKAbTatGzGS,NDBcdtD|7|17-719-517-9836|-820.89|y final, slow theodolites. furiously regular req 410|Supplier#000000410|6V,FO4xJPwvxGzReYzVj6dwTSIechnSSCyz9iY|19|29-822-375-4854|6234.92| beans sleep outside the thin instructions. thinly even soma 411|Supplier#000000411|G9H53XVrdbhRgvQwho1AS|18|28-518-787-9625|2250.74|ial foxes. furiously permanent packa 412|Supplier#000000412|S2onAA,jGtQ3qfpN|12|22-351-499-2131|8183.66| the packages! quickly even warhorses haggle slyly along the final, expre 413|Supplier#000000413|GAufsRQQE P,dVCZWIMEUAsm,7|11|21-875-836-5585|7788.45| the fluffily even pinto beans. closely regular asympt 414|Supplier#000000414|FkmlHgU9pqCboQ32Lcx|21|31-118-322-1371|9547.00|. blithely unusual packages might cajole blithely regular requests. 415|Supplier#000000415|ibzrtLp NIBzzQVh2mc6M7GJj3V2Z5uKlIDw,z|20|30-297-706-6489|8575.44|inal deposits against the ironic Tiresias wake according to th 416|Supplier#000000416|F,9zQapGlzjmqRhVTj1DR|11|21-245-879-3004|3550.06|ic orbits according to the furiously f 417|Supplier#000000417|b3CbQxCMWWu,YyeQU 51fccuv7Mt|6|16-563-597-5520|-113.45|equests hinder quiet courts. carefully 418|Supplier#000000418|l07dIg BFdcW|24|34-689-611-9130|5229.01|re of the carefully final courts. ironic pearls haggle slyly along the bold, regular d 419|Supplier#000000419|FpWtqjkbqEXn|6|16-510-433-1061|2899.03|of the carefully express accounts. even tithe 420|Supplier#000000420|kLWtAMtbSn|12|22-971-269-4753|2968.22|eas cajole around the regular accounts. evenly even escapades 421|Supplier#000000421|tXZPR dOYjjbGjarXxKPn,1|8|18-360-757-8604|-128.86|c dependencies. quick, express deposits cajole quickly. fo 422|Supplier#000000422|iu4c7rkFFNOvmfx,aSs62I|10|20-645-417-6790|-755.64|ly even theodolites. blithely c 423|Supplier#000000423|VCgMjClu4IDaVVMwMW0ARf1ho|24|34-577-174-3894|2937.16|quests; bold deposits lose pending deposits-- slyly pending packages play slyly. regular, ironic mul 424|Supplier#000000424|uOdFKME6fSAI,rvLcpTL|22|32-406-948-7901|5881.52|es. furiously pending ideas against the fluffily si 425|Supplier#000000425|a KnEGf,bqEnGd2Wd9Tl|0|10-262-132-6639|2805.77|ular pinto beans are among the fluffily bold acco 426|Supplier#000000426|zjIHPRMAI8vF|23|33-576-289-4702|8621.42| requests nag. slyly regular ideas 427|Supplier#000000427| Hnd2bzQ95, Adq bg7BQAbFVmRV9rQ,jY98|20|30-315-782-5162|6223.17|he regular requests haggle blithely about the forges 428|Supplier#000000428|v,lOR2jikAbT0hNyPuYhhJODDs2VBPp|17|27-542-634-4350|2890.48|ly final packages. silent depo 429|Supplier#000000429|6ITML8w7yXMd5wzp4xUYXX7rb|23|33-283-246-6716|2267.98|l ideas sleep. furiously ironic 430|Supplier#000000430|SMdrDaNv,2XyFOL6oVEfvH|1|11-541-204-3496|939.22|? silent pinto beans are abo 431|Supplier#000000431|WoeV,NINojE6hJjauAdrl5fGcdxX5JUPLnrim|3|13-269-548-1399|9477.34| according to the bravely quick dolphins. deposit 432|Supplier#000000432|ZkBaoMg9n7nXd1fyn |10|20-401-350-6978|3812.16|ven deposits sleep slyly after the blithely busy decoys. slyly ironic deposits 433|Supplier#000000433|At103qyX,VicINJGCOU51mQyfdYBB44Cg0S|14|24-141-780-8375|4988.55|r carefully according to the furiously regu 434|Supplier#000000434|endL6N 85uU0NemLv4L3mSEH4LT2BF|5|15-763-277-6054|9522.03|n, final packages. furiously pending c 435|Supplier#000000435|xKvU,V2SZj3OqEwdlgXs01K jSbJRjYYF|8|18-874-271-6733|6776.54|nic theodolites. blithely dogged accounts haggle furiously express pinto beans 436|Supplier#000000436|MV8Xu3m,93IINpPlE|20|30-253-200-6170|8396.49|. carefully regular request 437|Supplier#000000437|HuqIk0sK4yC6x5be,cTlPrFqqBCHYf|7|17-652-134-3031|9807.53|usly bold deposits about the pending 438|Supplier#000000438|cePboEvTZ6IfUAG 8asHxVbEmZnLSph9z01|24|34-502-705-5611|9291.35|hin deposits. blithely pending deposits sleep slyly. slyl 439|Supplier#000000439|dTnCcwPBKS J WRmt,Emi KnILcwcR9YOrdLuWD2|9|19-415-856-7881|1871.86|ithely ironic packages use special foxes. carefully even packages snooze quickly 440|Supplier#000000440|s4UNoE4WDs9vXLZFJjsCVxZ0W8XU YCgqGBxVX|15|25-475-341-4841|9290.23|uses. requests cajole among the quickly regular fo 441|Supplier#000000441|fvmSClCxNTIEspspva|14|24-252-393-5381|5008.40| the even, unusual ideas. slyly even plat 442|Supplier#000000442|PJSCHXMAsqyRr5aPD9lp4tUl1B1WytbosVY8EdNZ|11|21-463-951-7051|2257.13|es wake. accounts sleep slyly. slyly ironic platelets haggle. slyly 443|Supplier#000000443|nbs9cmnC63bi|10|20-238-345-8589|5569.82|sleep even dolphins. enticing deposits wake. furiously regular deposits acc 444|Supplier#000000444|mHr2VcUpRkvyQ9rjKMaPkeWbVZmEIhxhb8F|21|31-256-493-5644|-12.78|riously final requests sublate slyly. furiously ironic packages 445|Supplier#000000445|WqzJKhnLnF05It4 5TDkGkUwVYszIko|20|30-686-270-2150|3065.22|r the stealthy theodolites. bold, unusual attainmen 446|Supplier#000000446|na LX4kqDQbh|8|18-902-583-3116|2141.08|usual warhorses-- carefully unusual dep 447|Supplier#000000447|UHeJiRfImFw1r4MTrBk0vcwUx9|5|15-104-804-3139|2108.30|deas thrash blithely. unusual packages in 448|Supplier#000000448|cYzWCXDovaNR ,S3PICo3KYKAG3bYm0YKyqaZVu|24|34-817-553-5356|1209.30|e thinly slyly even warthogs; final asymptotes boost unusual pinto b 449|Supplier#000000449|kOYLYC4JQ5tBVlul15gdo6smU,VdIObtXyC|10|20-757-629-3940|5701.21|gular deposits are carefully about the furiously ir 450|Supplier#000000450|t3hxOMnv5AFdpM4|23|33-257-936-2585|8199.71|oss the gifts. final accounts cajole. sometimes special asymptotes are carefully along the package 451|Supplier#000000451|cqMKQiLjokvIFG|6|16-328-146-7253|2503.45|cial packages. pinto beans 452|Supplier#000000452|6bT4efJCWyxEtXmA1ZdwmqfrPGK|17|27-445-799-5245|9524.84|ions wake slyly alongside of the carefully ironic theo 453|Supplier#000000453|bpt98PxU5HSQt61bVB695JPjBmJKUv hNzQeHvC|21|31-882-179-6015|-905.25| quickly until the ironic accounts. pending requests doubt bl 454|Supplier#000000454|K8p1uXD3L,L|0|10-453-843-1585|7337.45|ronic requests haggle furiously furiously regular accounts. stealthy asymptotes sh 455|Supplier#000000455|de1QmawQjYipd|9|19-822-816-5632|8563.10|le. slyly even requests thrash blithely across the flu 456|Supplier#000000456|iSqve6KC7t69,jX6,HF B8Hbat11O|5|15-951-880-7133|5154.37|e along the slyly bold theodolites. packages detect. regular requests sleep furiously b 457|Supplier#000000457|Jld2rUj,Xw3u,lLq2EevCRQVYwSnkCT1K7nY1if|8|18-721-125-2213|2458.18|lyly across the slyly even courts. quickly silent 458|Supplier#000000458|IFNkUK1H53HwUHabiONkMFAUDb|21|31-318-754-9316|7654.94|blithely enticing deposits are. furiously final accounts about the regular requests h 459|Supplier#000000459|w12ixcTkh6AtG1LvWxX8El8Nf4vEGQiZrrpy|18|28-295-883-6516|9569.85|arefully even deposits. furiously final requests sleep. packages sleep. q 460|Supplier#000000460|lGEBJPLJaDwOhZpc7DQMY,PTUEv6BVBUsOGK0oF|20|30-851-458-4942|7619.85|ar theodolites use carefully about 461|Supplier#000000461|VLYMztlQim7tjPGSK0xPZXnb91a8,9wqVRwk62BP|22|32-897-799-8437|2922.33|foxes. ironic, ironic packages lose furiously regular accounts. carefully slow excu 462|Supplier#000000462|Bg,C2gIsljPAG|5|15-387-146-3147|9497.29|uriously unusual courts. blithely express deposits could run 463|Supplier#000000463|XOb4DatMUyqMuFM92ZRaapwsEQ|0|10-178-678-7353|9091.71|lay. deposits breach instructions. sl 464|Supplier#000000464|XwZyuXCVeO5wb5izvhfeX|5|15-653-204-7643|1448.94|ly. ideas are slyly after the ironic, regular accounts. platelets among t 465|Supplier#000000465|YOOuLmTfTFNFiipLtt iL7HQ fj lf0xOKDjnu|17|27-586-454-8554|7267.03|ly regular accounts nag. slyly regular deposits run furiously slyly final requests. accoun 466|Supplier#000000466|HEW3DIL,Aw0Ud|22|32-408-942-6024|2051.26|requests. closely final pinto beans according to the quickly ironic instructions breach b 467|Supplier#000000467|aibBbBkbtmDJ988LnMNkCAi|12|22-666-307-4101|-467.16|ackages. even, final dependencies sleep quickly. carefully regular deposits cajole furi 468|Supplier#000000468|T,pwE,skbYjr5DCAD2EfmEHNychqFKw1loF|6|16-494-568-3545|449.82|nusual dependencies. blithely even packages are blit 469|Supplier#000000469|G4Xq2 RFlLP7uDadWjZ96 uyaGNk8 216c6|9|19-527-692-4725|994.99|ely express foxes. carefully unusual packages nod furiously. blithely unusual pinto beans cajole at 470|Supplier#000000470|XckbzsAgBLbUkdfjgJEPjmUMTM8ebSMEvI|19|29-165-289-1523|727.89|gular excuses. furiously regular excuses sleep slyly caref 471|Supplier#000000471|Fc4 FkVkaA8zsUVr,bT3PcTko0n|12|22-925-324-7167|-414.45|hely ironic accounts. ironic dependencies sleep furiously about the bold requests-- q 472|Supplier#000000472|NlJV2a0ovbomfosgHUBx6sgT|20|30-976-134-3524|6238.12|to the quickly even deposits print slyly ironic requests. sp 473|Supplier#000000473|x1skh3uebekXL4BIKGgIGDUfTk CDn5FIJGaq2|15|25-658-329-5141|1094.74|old, unusual grouches. furiou 474|Supplier#000000474|USHBMdX8iFodU|0|10-327-319-7717|5226.21| wake. even pinto beans sleep quickly about the slyly special theodolites. courts 475|Supplier#000000475|xw4V6,4QQW LI5Qg EOKy4JD B4Cq1tjzaOma9Y|3|13-397-755-1516|-115.01|among the slyly regular deposits cajole after the even theodolites. carefully unusua 476|Supplier#000000476|ZvT qI2gMbh|0|10-219-531-3554|980.32|o the silent hockey players hang quickly around the furiously special theodolites. carefully bold d 477|Supplier#000000477|VtaNKN5Mqui5yh7j2ldd5waf|7|17-180-144-7991|7205.20| excuses wake express deposits. furiously careful asymptotes according to the carefull 478|Supplier#000000478|4jV maCw9SEt8jyLUsjej60bmMhP6zBv ajTk|11|21-580-334-3987|7901.42| regular asymptotes: fluffily unusual accounts integrate 479|Supplier#000000479|tdLkV2Ks0wBP1VlwdnPUxZnWADmxlbmRVE0a6h|18|28-851-500-5156|5283.98|s. blithely final asymptotes haggle fluffily. regular ideas 480|Supplier#000000480|q8,LH5UQiP3Tv60slOsFzX,HM0JPcwM0rD7eg d|14|24-645-644-2970|2927.68|ular deposits according to the furiously even asymptotes use among the bold deposits. quickly 481|Supplier#000000481|VqFS2DPW Ci2TpkfD|9|19-951-947-8520|539.24|refully stealthy instructions hang blithely ironic pinto beans. ironi 482|Supplier#000000482|LkVra4orMCs|14|24-516-940-6953|7978.18|ages. final ideas need to wake quickly fina 483|Supplier#000000483|ncWfnroE1n639qMrW8|2|12-443-228-5035|8366.89|refully regular ideas. furiously express theodolites across the sl 484|Supplier#000000484|WwSH FFzB2lViwrWli6Z4QVV AN1KH2G8|24|34-506-254-3252|7350.40|t the pending, even instructions. blithely 485|Supplier#000000485|ULR12B9vkQg4g0nFMaW|1|11-779-798-5195|6282.72|tegrate across the pending, special instructions. furiously regular di 486|Supplier#000000486|nCwPqLXZwjH20OVRy,fCQskTo3it2JHEGn7l|20|30-632-817-3179|2709.83|nusual, pending deposits wake blithely ironic deposits. 487|Supplier#000000487|BvDBcJa,jQPslM|5|15-551-730-1265|2124.86|ly about the requests. accounts could use blithely? furiously pending accounts nag regular, even ide 488|Supplier#000000488|AzfObar4VYwnQvsGbISGCshVM AIWYq9|13|23-609-606-3265|4546.13|ly ironic packages use quickly about the ironic theodolites! blithel 489|Supplier#000000489|y9NMoYGxDUPfrB1GwjYhLtCeV7pOt|10|20-375-500-2226|9836.43| quickly carefully pending accounts. fina 490|Supplier#000000490|JNqlktPWJ4|2|12-619-779-5325|10.55|ng to the packages. carefully final 491|Supplier#000000491|mTbDcJHQ7d|0|10-361-729-1693|-836.47| to the blithely even deposits. fluffily silent waters sleep blithely above th 492|Supplier#000000492|8wEulEYM zGvMXfDNNEw4B|14|24-875-296-5180|8368.06|y. slyly express deposits alongside of the accounts nag fluffily after the evenl 493|Supplier#000000493|7tdI3AtlDll57sj5K48WLX j5RDbc|11|21-252-702-2543|4999.17|gular foxes. slyly pending requests hang along 494|Supplier#000000494|6hAiQHDGTy6,8bjpxI i3f|13|23-200-629-1426|1080.57|pending packages. slyly even deposits wake closely. specia 495|Supplier#000000495|p086j79twIlC25BD6A|12|22-881-968-9019|9097.65|y regular theodolites shall cajole. requests cajole slyly 496|Supplier#000000496|be4auZxyqAgF5ysH3nXWcc7bDsNgdZ|20|30-810-880-3654|2266.32|theodolites. slyly regular 497|Supplier#000000497|iLAqlCKDLUGqHrjuOcId7 uYoTmpA|12|22-895-454-2151|5980.87|es nag about the furious 498|Supplier#000000498|4jvUQrC4acOQ82EFM vLNHG|17|27-772-853-6514|485.25|. slyly ironic ideas cajole slyly quickly ironic deposits. blithely even theodolites boo 499|Supplier#000000499|NbcQeBiDiN2tFiVxHIaWU03BVFIuxt |18|28-243-638-7646|2518.34|al, express deposits hang furiously. regular, unusual pinto beans wake a 500|Supplier#000000500|jLfNCVrj7X5h31yfSR02Z4x7K|20|30-229-226-6452|6738.72|ly. carefully final packages boost 501|Supplier#000000501|PSStC43vWlQQpmTku4s|24|34-141-983-6520|-264.89|s nag quickly. platelets haggle quickly above the furiously silent packages 502|Supplier#000000502|AutSetu5u6moXK6Y3rpoWREh|4|14-678-262-5636|963.33|al excuses haggle furiously iro 503|Supplier#000000503|OtYqMbur3v7nfzYgFYmMrJvq5YTj1MtXgefj|20|30-263-152-1630|7643.78|players are across the slyly silent requests: even, r 504|Supplier#000000504|P8k2mjRiRUFCJfxw7KrEdRpNNQPDxiI|10|20-322-544-5770|9050.12|y final pinto beans. blithely regular instructions wake abo 505|Supplier#000000505|aqcYZYQD5TYlLDgIxhKZyFCzL3Ch5qKOxj|9|19-480-691-1853|6399.78| requests engage slyly regular ideas. fina 506|Supplier#000000506|hnXNVHB1ao5rlGLkrS64kBz5C5rx7 R4dqO5CNv|23|33-582-741-7991|1202.98|riously even accounts sleep furiously ironic foxes. quickly final requests haggle fu 507|Supplier#000000507|aF2w4JF8qV aaqApYqzTFtIXtKV57Na|9|19-981-569-8699|9464.26|p carefully besides the furiou 508|Supplier#000000508|F9,suuHYbe6kCRCPZaeSHSPAFBk9vOcFX8TUx|14|24-179-400-2422|3878.22|sits. blithely furious requests boost slyly about the quickly even packages. closely 509|Supplier#000000509|SF7dR8V5pK|6|16-298-154-3365|4315.15|ronic orbits are furiously across the requests. quickly express ideas across the special, bold 510|Supplier#000000510|VmXQl ,vY8JiEseo8Mv4zscvNCfsY|19|29-207-852-3454|-314.06| bold deposits. carefully even d 511|Supplier#000000511|RWNdTEe,VJFarN4Pu7Xl|23|33-476-213-8045|-455.12|t the quickly even deposits. carefully careful ideas sleep slyly bold deposits. unusual, even accoun 512|Supplier#000000512|MoNOuMC4QMOnBgD5uLcvtHCcfzf9cW|15|25-617-226-9364|4762.66|special accounts. daring foxes nag quickly silent, special packages. silent, unusual a 513|Supplier#000000513|YMhcTaVkhw0nO9B,|4|14-481-495-8505|8075.30| final requests. slyly final reque 514|Supplier#000000514|Q4ErX,NN,Z2UDP|14|24-677-367-2786|1669.85|arefully regular ideas: pinto beans detect across the slyly pending pinto b 515|Supplier#000000515|5TOuZXAb9df7m3BCW2 TeHMY1Zdf46DqpT2,0t|17|27-470-220-5233|2127.89| beans sleep after the final frays. special ideas across the carefully regular instructi 516|Supplier#000000516|z5Mm65PAP4m|5|15-165-647-2301|371.38|s. idly final theodolites ha 517|Supplier#000000517|NkRQYLe9d8vEXNO KKk8rxK502OI2 |8|18-577-641-3805|2781.03|ing asymptotes. carefully fin 518|Supplier#000000518|g2buDv7WzbkNDU63IN5af0i6SAdUxihirS2X|1|11-853-939-1266|-199.77|egular dependencies integrate carefully among the enticingly f 519|Supplier#000000519|0zgIDeAmk5976RzKiXZi,kobff8IxQn|19|29-133-225-9811|6361.20|onic requests cajole blithely furiously pending ideas. quickly quick accounts sle 520|Supplier#000000520|RHuqyeshPnOa6gwEiV3zDhP0o2aYvZ9glQURu1w|12|22-654-808-2429|1682.84|aggle among the final, pending realms. carefully regular escapades woul 521|Supplier#000000521|jQ648xqiuJaHLQjwrq5b|1|11-887-652-3799|507.99|carefully final asymptotes: carefully regular epitaphs about the blithely u 522|Supplier#000000522|joLGRuiXIsVWk|8|18-355-956-2843|-336.14|tect slyly final instructions. fluffily ironic ideas after the final, fina 523|Supplier#000000523|zzfDhdtZcvmVzA8rNFU,Yctj1zBN|18|28-458-231-8410|2492.06|e, regular deposits eat. fluffily express foxes haggle a 524|Supplier#000000524|rTYNcqhFosNU,4|13|23-993-423-3788|5093.35|c theodolites integrate quickly ironic deposits. furiously even de 525|Supplier#000000525|GtHZ9ooyeKe|11|21-247-956-8546|-79.52|y bold theodolites; express requests across the packages haggle slyly carefully final pinto b 526|Supplier#000000526|x8r7,jgpBRjq6Ns|5|15-539-980-8303|3477.18|as kindle ideas. blithely bold requests 527|Supplier#000000527|udKczd6U1Bm79UVDkA8P2Xa1VY qv9mvsXo|18|28-894-961-1780|6938.43| sublate blithely pending instructions. blithely final packages nag blithe 528|Supplier#000000528|QsnGjo7irxCIbN3|2|12-961-772-2408|1285.81|the furiously final tithes are across the pending ideas. car 529|Supplier#000000529|Fj4vm8y2 Tlgd|10|20-521-276-3787|4581.80|wake across the furiously regular excuses. express dolphins are q 530|Supplier#000000530|0qwCMwobKY OcmLyfRXlagA8ukENJv,|3|13-747-781-9694|4327.86|fily pending dependencies wake slyly final deposits. platelets against the slyly ironic requests na 531|Supplier#000000531|fN0ix827c112YajETqxxuofrfl7v VndWB38n|22|32-950-377-4573|4258.42| the carefully ironic gifts. carefully unusual multipliers sleep slyly amo 532|Supplier#000000532|ep92hT7VLaVlDKM7lgbj02kIL|3|13-595-401-8233|3278.71| fluffily fluffily express ideas; blithely special instructions wake quickly among th 533|Supplier#000000533|WF9wtTxzbBa4kv FAeer9I1pQJ0Qe,uJf3f w|7|17-404-617-2581|4213.95|e ironic foxes believe fluffily across the u 534|Supplier#000000534|JBhF3gZcQiNWGxh8DuoAhmVi|9|19-198-519-8383|3930.79|after the furiously even requests haggle thinly a 535|Supplier#000000535|aiq9Honllr6hFt, YJ6|10|20-121-889-4500|7839.46| foxes. carefully thin dependencies sublate furiously. regular instruction 536|Supplier#000000536|dzf PbgzKpWBDim5S1BSPLBNzxFpxZNUE|17|27-749-678-1361|8797.40|cajole blithely slyly even dependencies. carefu 537|Supplier#000000537|KsYYPIw2kWP|13|23-671-207-6720|5046.81| forges breach upon the bold ideas. final foxes nag frets. final instructions eat fluffily 538|Supplier#000000538|KBZ0RSDGTVJQPbWaU6x|17|27-799-369-5739|9669.24|e fluffily regular theodolites. special packages are into the careful 539|Supplier#000000539|GG5N3GIdNmmvhKs52Y|1|11-844-496-3836|5976.60|otes; carefully ironic deposits sleep idly along 540|Supplier#000000540|Tmyis ,xX7XjU2E|16|26-891-481-8993|5704.81|. blithely final instructions shall cajol 541|Supplier#000000541|Nxggufcm ,hR|17|27-914-557-6989|2830.62|blithely ironic accounts poach blithely alongside of the carefu 542|Supplier#000000542|3yWRklEDbAvfVuidQPgOOe,x7f,4 J5lSp4v|5|15-378-700-5884|8142.81|g requests believe carefully dolphins. quickly pending 543|Supplier#000000543|P10rl2 o A0jtJQDcB|15|25-433-303-6328|3222.71| against the carefully ironic excuses boost bli 544|Supplier#000000544|pUS3drDXbPeNqvI kUNlnz5GDSU5,aLI|1|11-471-707-9878|9042.70| sleep ironic accounts. fluffily even dependen 545|Supplier#000000545|D8SE9UgTdgq3oNH8RzaxDKpbsA BlRKsf|2|12-876-408-4086|2723.99|onic packages use slyly about the theodolites. final platelets are. finall 546|Supplier#000000546|YznQGwWvZbNA0O9ZV|22|32-679-789-3863|9036.47|es haggle. blithely final theodolites wake blithely. carefully regular packages 547|Supplier#000000547|MaV373lvwj|8|18-600-279-8954|7937.31| the quickly ironic asymptotes nag carefully 548|Supplier#000000548|btRiQsq qEK0qg0T|12|22-790-987-6339|3456.36|ly even tithes sleep alongside of the asymptotes. blithely ironic requests are 549|Supplier#000000549|oy89mLRUwTVCoU|0|10-377-880-8280|8948.84|iously final ideas. carefully unusual ideas wake fluffily special platelets. furiously unusual pin 550|Supplier#000000550|QQavssDXnYHbvOrg|12|22-648-743-9295|9238.79|en, bold ideas. ironic, unusual deposits boost carefully quick accounts. slyly e 551|Supplier#000000551|ZNiqP1w6Z SGZsLllIhaicTnLCCuAepdNbkm6pJ|8|18-297-775-8421|9364.67|lithely even instructions poach quickly. furiously bold accounts sleep final, final accoun 552|Supplier#000000552|JP3ebtH5epPwpU2lVQqKNZC3NTvnTlrhLy5eeGN|12|22-861-905-2401|2114.99|uickly about the deposits. furiously pending accounts wake. packages are slyly bold de 553|Supplier#000000553|a,liVofXbCJ|6|16-599-552-3755|3526.53|lar dinos nag slyly brave 554|Supplier#000000554|FAEEZyNtDGEhZ|7|17-365-531-4630|335.69|enticing accounts against the deposits use q 555|Supplier#000000555|TfB,a5bfl3Ah 3Z 74GqnNs6zKVGM|3|13-607-927-7514|7896.01|uriously regular theodolites according to the carefully f 556|Supplier#000000556|g3QRUaiDAI1nQQPJLJfAa9W|6|16-951-842-4482|2253.90| silent deposits haggle quickly ironic, final theodolites. boldly bold accou 557|Supplier#000000557|jj0wUYh9K3fG5Jhdhrkuy ,4|3|13-704-788-7706|6343.15|ckages. unusual, regular attainments 558|Supplier#000000558|T9hNjrRRRQmkbZomdaeLKDOqmmUcJpAJzXOxq|9|19-494-404-8664|6404.51|s against the carefully pending packages cajole al 559|Supplier#000000559|SkKThClbkbH8mIv|11|21-205-567-6566|3646.46| ideas cajole alongside of the carefully ironic packages. regular pint 560|Supplier#000000560|rYTPbri8qJ49rRfFmChtnDIQ|17|27-261-214-5284|3009.57|slow platelets. quickly pending ideas are requests. even theodolites may nag about the regular, 561|Supplier#000000561|1V3DMQWQpfjPJybZYAP|22|32-547-343-1231|1824.47|d packages. carefully bold ideas are quickly across the platelets. final, express pinto b 562|Supplier#000000562|8TXCtnRQzByqjie|23|33-782-496-5965|4848.52|he furiously special accounts hag 563|Supplier#000000563|Rc7U1cRUhYs03JD|7|17-108-537-2691|-942.73|slyly furiously final decoys; silent, special realms poach f 564|Supplier#000000564|IufyqhG4fmo VkgQT w BF4|2|12-702-995-1506|4410.70|epitaphs. even attainments cajole slyly regular packages. final deposits cajole. furiously final 565|Supplier#000000565|,oYB9wlD3mtL lj3PJC67a RGXaqh69sHK5G4e|20|30-487-989-9411|-334.52|s haggle never furiously special deposits. final attainments breach special pains. fl 566|Supplier#000000566|hBRvnhCUVIiXQK6dyAZYN,TNZItOlBvsVYCisb,O|24|34-396-766-5348|7705.73|rs! special packages cajole-- furiously final packages maintain slyly around the blithely spe 567|Supplier#000000567|fvuRpAap0MvoBguGKBfp|1|11-390-878-2811|5264.91|ke fluffily furiously ironic ideas. qu 568|Supplier#000000568|z70Hj8qVi8jQu|10|20-162-593-6831|1367.90|furiously fluffy instructions about the regular 569|Supplier#000000569|jjFjVCjK91yy2B dj|15|25-803-734-8127|9166.95|al frays until the slyly ironic requests cajole furiously about the quietly final foxes. furiously p 570|Supplier#000000570|8VkNpvXFgKgbKY2ypMKyIOBlK|17|27-658-225-4655|922.72|! regular platelets sleep furiously idly silent foxes. even courts sleep slyly. regular, reg 571|Supplier#000000571|GMifmfVJba|9|19-223-236-6710|7132.44|nto beans haggle carefully after the furiously regular reques 572|Supplier#000000572|J,RTwd9mNOTralFovrCrH99 f9rbvsDf3|19|29-444-247-7800|6039.27|its. carefully even requests along the quickly unusual pinto beans inte 573|Supplier#000000573|83GRV1s,yGFwl1NClLSXnJVRlh0xS8YW8|12|22-306-652-6853|3107.46|icing accounts. carefully regular sauternes according to the accounts wake after the fina 574|Supplier#000000574|2O8 sy9g2mlBOuEjzj0pA2pevk,|22|32-866-246-8752|8096.98|ully after the regular requests. slyly final dependencies wake slyly along the busy deposit 575|Supplier#000000575|J24LOV AQiHuYuR|19|29-201-935-5958|2918.54| special packages along the carefully e 576|Supplier#000000576|Plc2DypORn4qNOTpZ|12|22-316-723-5789|817.26|e of the final deposits. regular, unusual requests wake slyly. furio 577|Supplier#000000577|kn5oGAnFD1CQjet8awWorC,UMf37MP71yNcVD|13|23-973-363-7797|5593.17|olites along the quick accounts cajole throughout the regular asymptotes. accounts maintain 578|Supplier#000000578|bn5J0A4426DpcW7m rQ9,qxqJ1KN|20|30-105-334-1726|7428.76|carefully about the slyly regular warthogs. special packages above the regular pa 579|Supplier#000000579|U6sejT6kSPi5p1FUcUxjdJ|19|29-854-341-3857|3144.98|ly regular pinto beans. furiously regular ideas against the accounts nag blithely final pinto b 580|Supplier#000000580|MuRScZH74veaM2|6|16-732-277-6239|614.57|packages. furiously final theodolites integrate according to the carefully silent braids. caref 581|Supplier#000000581|X pNyEcNqxYwiP0gJ7FzkJ,haGkn|18|28-320-345-9799|8998.40|cross the quick packages wake carefully except the accounts? 582|Supplier#000000582|o6h0Bjjwags0FDRSAoKJPCWyt|4|14-341-851-2277|6178.48|ly unusual packages. regular, pending foxes are blithely. fluffily 583|Supplier#000000583|9st8mjB5G7J|15|25-760-126-2928|-339.15| dolphins across the carefully regular instructions hagg 584|Supplier#000000584|XvDYsHYpmY5AkX60fj0bZo4WW|18|28-223-704-2186|6912.86|e requests haggle carefully even ideas. express, bold requests integrate quickly furiously 585|Supplier#000000585|DQZTWEfNYL9UDlMqcQAEThcPdbyD45PYzL|23|33-357-931-8857|433.74|ar, silent instructions i 586|Supplier#000000586|9tfHwYyFe2t2,6pAVpkURXAxtc2cQw4qfGKYJ|2|12-747-610-3099|5850.91|ccording to the stealthily ironi 587|Supplier#000000587|58,gb EuMperMCg2lv XUQ9vi4GzhO2a|7|17-128-699-9949|5322.35|thin pinto beans boost silently. ruthless deposits haggle quickly above the slyly unusual th 588|Supplier#000000588|e3yF5zmSj y81I|14|24-180-601-5741|9760.06|gular, permanent accounts. 589|Supplier#000000589|3C4,WjUCjL59QhMSxyq1|18|28-878-356-5116|3415.90|apades are final, unusual instructions. bold, unusual 590|Supplier#000000590|KhRtsL4Foycp2hUwg bEHkDusXAf|9|19-277-247-1833|-12.84|de of the express requests. pinto beans are 591|Supplier#000000591|iXlVA9y6oX4|24|34-204-742-6291|2082.42|ven instructions try to are slyly about the quickly b 592|Supplier#000000592|tdYqh7rm0Zc7E0etRqHakcg,m34gQX|9|19-220-707-3861|6151.79|sual, express accounts integrate fluffily. dependencies cajole slyly 593|Supplier#000000593|qvlFqgoEMzzksE2uQlchYQ8V|6|16-262-671-5187|2214.36|arefully even ideas sleep quickly. ironic foxes wak 594|Supplier#000000594|8GY0oRK64AFmY7pys51Uqm7YbMn9luO,Z|17|27-826-454-6643|1760.34|quests use fluffily quickly final packages. carefully pending pinto beans are blithely among the ca 595|Supplier#000000595| CURZCs4l306M2ir8rFkgeYVg|24|34-354-570-3604|4922.60|ecial instructions cajole alongside of the requests. i 596|Supplier#000000596|caTnKVKTsCHNEVi1xVPD|4|14-761-106-2656|1180.93|its sleep. carefully unusual somas use furiously above the 597|Supplier#000000597|CKt5G XZ5DBt|24|34-219-790-3864|1828.73|ecoys sleep slyly according to the furiously regular requests. furiously expres 598|Supplier#000000598|p9AGBjg4DZuChQbY8gAj3LtMrxpOWqMpJR|7|17-985-962-2292|-590.83|uickly unusual ideas sleep blithely after the 599|Supplier#000000599|R u1wkvs4 B0wlbPbT8WrSzqoXZG0CjbsuAbPpUs|4|14-208-385-3654|769.29|affix. carefully final accounts about the care 600|Supplier#000000600|YHyUzea88sXoNmqmCMamiEfGC54xpdX|12|22-508-410-2758|2342.35|s. fluffily ironic deposits hinder furiousl 601|Supplier#000000601|TS2xMrQuUs9VrgWt4,gpdcEyWNw3K6,P|8|18-528-362-8573|7906.22|atelets cajole according to the pending, ironic orbits. carefully regular packa 602|Supplier#000000602|xEtByOs0Pydp9y75MSgoy6T R6PT8e|20|30-106-955-5651|8924.02|tes. furiously careful pains are. quickly even platelets boost sly, 603|Supplier#000000603|mECtpm1pmMnqK4K0DLZ5Gtkj 5bUydzBak6|12|22-807-182-2059|-846.12|dependencies. slyly regular accounts 604|Supplier#000000604|B53WjrwJCSh14Bx,oCEinGgCJ3ZCc8m|24|34-390-848-6584|227.59|regular asymptotes solve accordin 605|Supplier#000000605|wdwiNoNT8pVHOTHQ8jhVzaOTkU|6|16-835-870-9488|6071.58|foxes poach blithely beneath the excuses: ironic multipliers haggle quickly furiously unu 606|Supplier#000000606|n,iOFy5X,4GFeXNrCCKBmHucz1|19|29-856-255-1441|6988.38|es haggle across the carefully even accounts: unusual instructions x-ray carefully. blit 607|Supplier#000000607|vgEaPkxAonSSdAUn,7usQ c4G3Ho2r0|15|25-370-994-3762|4667.27|ests are closely quickly ironic orbits. carefully regular attainments cajole furiousl 608|Supplier#000000608|SQ,f89cn6x6g|17|27-435-165-2250|-210.13| do was furiously above the accounts. unusual, ironic packages hang about the carefully final 609|Supplier#000000609|n9 nkdqilT|12|22-852-519-5068|8287.95|ronic, regular ideas nag furiously across the final ideas. bold, express do 610|Supplier#000000610|cRikc,rgxAM3yz0IR85OD|20|30-402-585-4900|668.12|gainst the ideas. regular instructions are. theodolites cajole furiously final, un 611|Supplier#000000611| 6BVljZ1HeradVcmcDm90NYkla3iHPBsTSoUJr|8|18-554-185-6487|7048.30|al courts sleep carefully about the blithely express accounts. fluffily even request 612|Supplier#000000612|gt9T2nnuWBiy5zcrWG2iSdZt,sAEYnD6|23|33-377-769-8060|-118.86| regular requests after the slyly regular packages belie 613|Supplier#000000613|DYwZjMQj26Es8D8pxn2zx|11|21-796-340-9401|2201.94|e furiously. final foxes haggle carefully quickly express theodolites. regular deposits affix bli 614|Supplier#000000614|DteCEt557XpSo8CejUUbFm RgTeT4FRz7bC,6l|14|24-185-488-4015|1956.55|inal theodolites shall have to boost. unusual theodolites are 615|Supplier#000000615|dIT3WOBBwUuakVwd965N4logoVW1A|23|33-196-233-6474|2741.54|platelets. pending, regular 616|Supplier#000000616|Ktao GA3 5k7oF,wkDyhc0uatR72dD65pD|6|16-738-270-6883|7119.71|al packages are carefully after the regular p 617|Supplier#000000617|Q4haZeO8aVzl2dXziDw3f|7|17-563-347-4748|1002.43| blithely unusual theodolites haggle furiously. even courts use quickly against the 618|Supplier#000000618|mPrv5o5d22wyXUgUw69x8m dtx7I3mMh|4|14-912-871-9422|4710.51| the furiously pending deposits x-ray about the fluffily unusual accounts 619|Supplier#000000619|CzILYciyb3fdioa9LflK,ADrP|17|27-505-962-3048|559.50|ironic asymptotes. express, final pinto beans are furiously inside the furiously regu 620|Supplier#000000620|5pd GQ2NTM3c2uR,gCg9NspSE|24|34-347-881-4300|7516.12|posits promise. quickly express accounts according to the regularly pending accounts lose blithely 621|Supplier#000000621|fjFomMNvcBWHb|6|16-492-530-5790|8436.37|ns. even, even platelets up the carefully pending platelets u 622|Supplier#000000622|gCQimU1jYHoQiglDmW1FkQM9wzi YC1P15pMy1|21|31-421-544-2948|9199.28|ent instructions. furiously silent packages detect regularly quickly even somas. even pearls ha 623|Supplier#000000623|dSSQ3dTYwThbLppbetVUeuPfBIUF|7|17-593-337-7365|5408.07|ial frays use. carefully special foxes wake carefully slyly pending deposits-- final requests a 624|Supplier#000000624|JlCK4aBP3PCO|1|11-487-571-9291|9497.65|the silent, final pinto be 625|Supplier#000000625|0zW5d Hyogg0z,sXxl1PHS0Ya,muKs4N dS7|23|33-946-363-3870|4065.25|unts. silent accounts around the quickly final r 626|Supplier#000000626|uaYN3Mg6sVtD|13|23-672-133-9305|1586.50|ic deposits above the blit 627|Supplier#000000627|k6W51QENyVPe4 6z0ajEMTkEA|11|21-632-574-8701|9318.43|l instructions serve slyly regular deposits. carefully busy excuses cajole quickly pending d 628|Supplier#000000628|Gk75kOa26bzFvztn3rgkiOdL M0jTMU|0|10-599-740-9848|5975.00|ccounts play along the special ideas. blithely final deposi 629|Supplier#000000629|SjPnzOlaF3,4u1QAyH1l57EnL,h1IgnmoG|7|17-533-560-8817|856.17| furiously ironic requests by the furiously regular accounts wake slyly across the 630|Supplier#000000630|6hLBs3Rnd5elLLVv1i p3A2U6G1dkIApKDkiCy|23|33-522-267-9970|-418.50|kly bold notornis; idly even forges wake furiously quickly special pinto bea 631|Supplier#000000631|F1uLHPh2tHB6j1YLrB,vilZ5SIn6P1RFTPoz|15|25-354-834-6526|4127.62|egular, ironic packages. slyly fin 632|Supplier#000000632|pSP0SwLWVUKjWyzZtT|2|12-953-253-6464|4975.09|luffily even warhorses. carefully special requests are furious 633|Supplier#000000633|HhHzqIEl0jP8SQYZ7EybSWCtRFhmytByO7CPNZ|20|30-502-537-4154|3322.37|ent accounts mold. blithely unusual packages wake. furiously f 634|Supplier#000000634|hS62vraooyHWnMKyZV3f1GSPeKJ,7uRK6M5|23|33-105-608-2902|1133.80|equests affix around the blithely special theodolites. unusual accounts wake. pend 635|Supplier#000000635|JNDTs06uwtXvRZUWQVpDgAz|10|20-119-524-2053|1739.90|s. packages wake after the slyly ironic frets; quickly pending reque 636|Supplier#000000636|Kc rcRwa,q,TQx1W 3fu|18|28-357-934-4951|2408.11| wake fluffily above the slyly final ideas. silent instructions wake carefully: blithely silent d 637|Supplier#000000637|V6AMGzXQ7Eqs|5|15-832-253-5581|2002.17|rses haggle blithely about the carefully silent deposits. slyly pending packages th 638|Supplier#000000638|YfXfPM0,m6CdwYYiQjmy9dcN|8|18-147-424-5181|-614.31|quickly unusual instructions would wake carefully. slyly ironic request 639|Supplier#000000639|WGqnQRU1xoC,UV9xDGjc48rC4Cow4|9|19-128-575-2303|2172.39|sual theodolites. slyly even accounts according to the quickly special accounts are f 640|Supplier#000000640|mvvtlQKsTOsJj5Ihk7,cq|3|13-758-222-1059|281.90|nic accounts sleep daringly at t 641|Supplier#000000641| 0L8yoIwSCP4EJyESI 6bVH9k|17|27-393-904-4536|5749.43|l accounts use furiously against the fluffi 642|Supplier#000000642|eldBmBVYRbcB YfMRBlNyuQe8k0zYK,v Obk|11|21-775-952-5836|4622.19|reach carefully against the final, pending instructions-- slyly 643|Supplier#000000643|mJN4aN B Lxz2esIAW0GoxEw1rAU|18|28-782-409-7844|1218.59|gular requests. even, pending notornis thrash fluffily against the enticingly regular i 644|Supplier#000000644|70mM6 QN882bcuY|10|20-367-561-9783|7783.86|regular accounts. quickly final theodolites sl 645|Supplier#000000645|blbALMoy2OihlkvD7FtkLXPvTShY9JVtsWT|7|17-742-832-9508|9459.29|accounts. blithely unusual pinto beans at the blithely 646|Supplier#000000646|IUzsmT,2oBgjhWP2TlXTL6IkJH,4h,1SJRt|6|16-601-220-5489|8430.52|ites among the always final ideas kindle according to the theodolites. notornis in 647|Supplier#000000647|x5U7MBZmwfG9|23|33-258-202-4782|9828.21|s the slyly even ideas poach fluffily 648|Supplier#000000648|0RXVM8t80LLWl|20|30-526-602-8400|4877.49|aggle daring instructions. furiously final deposits detect furio 649|Supplier#000000649|8sfoyPTvZbFMXC93ti9qSI6dYN0QuXh3wO|7|17-341-611-2596|1927.21|equests. ironic dependencies are quickly slyl 650|Supplier#000000650|lqBJUDL9EXwh0|4|14-980-933-9338|4624.13|ons are. unusual, pending foxes affi 651|Supplier#000000651|oWekiBV6s,1g|22|32-181-426-4490|683.07|ly regular requests cajole abou 652|Supplier#000000652|Cwyzz7 uW9mWq|12|22-957-225-7894|5235.95|ly fluffily ironic realms. slyly even accounts sleep slyly. carefully even packag 653|Supplier#000000653|IK8OvngBYI1zh9bbK0vLThzVvk7F69hxytOmq|17|27-391-635-2412|9584.63|ar foxes cajole about the quietly final pinto beans. ex 654|Supplier#000000654|T96kVu5bsOeH6|7|17-361-437-5840|2997.61|ial ideas haggle carefully according to the carefully express accounts. ironic accounts pri 655|Supplier#000000655|j8ga9M1KhzXKSk6g,bXi0zbLWjckDEpwBeqs|4|14-708-916-3581|9745.28|stealthily slyly special deposits. final packages behind the regular requests na 656|Supplier#000000656|mQXqRMgstvOI|19|29-633-362-8481|8069.74|ronic packages integrate. even excuses integrate carefully ruthlessly bold packages. regular ideas a 657|Supplier#000000657|nas2fhRwM97W8EEqYpBN|3|13-546-747-5121|7182.24|es wake above the ironic instructi 658|Supplier#000000658|kw9Ts9s38tEPf,IKi6kXNEv02|22|32-991-641-3699|6888.65|ular notornis integrate. permanently final accounts wake final 659|Supplier#000000659|jjVP5XCtV9jbvUnkoeFUTrgB,ke|20|30-917-437-7814|631.86|r, ironic requests. carefully ruthless theodolites across the bravely bold deposits cajole car 660|Supplier#000000660|AmvX 3nxd9r EOYZErE6PvBtFx |20|30-126-295-9200|2555.98|longside of the requests. fluffily bold 661|Supplier#000000661|lZGFurTW1snIqk0oLWMMaeq3L|13|23-401-253-9405|1333.75|s detect quickly. blithely ironic dugouts maintain furiously ironic 662|Supplier#000000662|geJEMlJvE3HdW96Rz3touARh|18|28-497-129-7855|7337.00|the blithely ironic ideas use qui 663|Supplier#000000663|tPtpVTsSAQNw,4GgXR2Hxx5FyCxxgqmW,jE sRT|18|28-798-122-1574|956.88|osits. quickly quiet requests cajole against the slyly regular accounts. ironi 664|Supplier#000000664|ln6wISAnC8Bpj q4V|4|14-244-772-4913|9261.13|ly special foxes cajole slyly ironic reque 665|Supplier#000000665|n4JVAxZUnvT5dVZBK,3CIIDoa|12|22-273-991-9361|-197.51|nding theodolites. fluffily final packages wake? idle req 666|Supplier#000000666|7emVs,4gxuqP95JNK|19|29-330-510-9985|433.93|ously ironic requests haggle. deposits amo 667|Supplier#000000667|La6cVlSLCZZDhhX9FtKsRlylP,,lI3IYjHT8yJJX|23|33-382-268-5150|9009.30|ular accounts after the fluffily pending accounts are according to the 668|Supplier#000000668|lLpUAYxvq5Gu9eLRdlrj|8|18-876-287-5756|2317.52|x quietly among the braids. blithely final asymptotes would a 669|Supplier#000000669|,Csubly4KD59igxGYacW2q7jUvQ4ZaOKQC|20|30-256-757-7811|8577.07| pinto beans nag after the slyly final packages. final requests among the furiousl 670|Supplier#000000670|z2NRPuuVeu1jmuuyzoMOlpoCI6P|20|30-364-864-1141|8887.18|arefully fluffily even pinto beans. pinto bean 671|Supplier#000000671|VlDv51ScrQCe1eVVnz S4Kq5wFZUKJd PyBI|8|18-730-953-4689|6935.18| busy dependencies sleep blithely after the ironic, iron 672|Supplier#000000672|iu9d66fGNBYX|11|21-299-539-7383|1594.77|uickly carefully express foxes. ironic requests cajole about the requests. unusual acco 673|Supplier#000000673|GCmswucbTQe2Q3OHcnsNI|20|30-592-284-5403|5335.98|y ruthless requests. furiously regular accounts wake after 674|Supplier#000000674|jMxLRDxoP1Pf kzzyMVIfLB|6|16-128-338-8014|7822.90|thely after the furiously even pains. quietly 675|Supplier#000000675|pbDwRMZ6nNUpcFirCvv|15|25-499-280-9384|5579.98| bold deposits. regular, regular pinto 676|Supplier#000000676|USGIdhKusoe8dcvWdBbZWUfxnVxNnsgY mG|9|19-833-604-9178|5783.61|s use deposits. quickly even packages haggle quickl 677|Supplier#000000677|8mhrffG7D2WJBSQbOGstQ|13|23-290-639-3315|7128.81|nder blithely. slyly unusual theod 678|Supplier#000000678|SLpBfeoHSImv|1|11-465-565-3513|-58.41|he blithely even requests. blithely unusual theodolites sleep furiously against the 679|Supplier#000000679|qLzdFRbVDeEH|4|14-771-110-7666|6291.34|ole slyly against the furiously silent instructions; ironic instruc 680|Supplier#000000680|UhvDfdEfJh,Qbe7VZb8uSGO2TU 0jEa6nXZXE|22|32-522-382-1620|4586.49| the regularly regular dependencies. carefully bold excuses under th 681|Supplier#000000681|ArTNWD5g1KfANCMFDfk83TUlX|12|22-208-790-6946|6159.40|. carefully special ideas promise slyly foxes-- pending accounts about the furiously special the 682|Supplier#000000682|4bD4f4zKh88YutGs|9|19-577-707-8772|9127.14|eep carefully above the slyly final requests. carefully express foxes nag 683|Supplier#000000683|W0rFJpyes6atCIuwAmktnK|0|10-108-564-6160|2956.02|uests. platelets breach blithely among the furiously regular requests. quickly fin 684|Supplier#000000684|nqw,GGxCoNZ3UOuIa0edX3SdoYKER|19|29-345-334-1955|3810.81|sts are slyly. doggedly final warhorses wake carefully after the deposits. reg 685|Supplier#000000685|JgoYDMLdJeM|12|22-599-473-1489|4297.36|o the furiously final braids. ironic requests sleep among the even foxes. regula 686|Supplier#000000686|LxjyC4i3RxAqWnUF|9|19-818-456-6713|8724.42| requests haggle carefully. silent, ironic accounts along 687|Supplier#000000687|PN0ZGBcv2F7yzeOMZflOAoEnhAUMPocj6sc|20|30-168-842-6668|-624.22|usly. regular theodolites along the careful 688|Supplier#000000688|D fw5ocppmZpYBBIPI718hCihLDZ5KhKX|3|13-855-777-3804|602.75|after the busy platelets serve across the even packages. final pinto bean 689|Supplier#000000689|v8MJCOfDDFgJbxjwgtdKLtlZRKlSRGl|2|12-934-814-6084|8436.92|y final deposits. blithely unusual accounts along 690|Supplier#000000690|nK6Lv WWUh59jE525|19|29-330-952-4018|7448.46|nic pinto beans doubt blithely b 691|Supplier#000000691|XU1STWHllW5I5Rw9X,jsZi7X7M 4|12|22-930-512-3497|4239.95|to beans nag around the careful accounts. ideas integrate: daringly ironic pack 692|Supplier#000000692|K8M3uIAEsKuFGIc43sALPKCDSyKXtc0w VcdS|0|10-727-704-5789|845.01|ly ironic packages. excuses 693|Supplier#000000693|S,mnHfsroFOVieQGdcaY5eod,8Zmji8|8|18-231-996-9225|9956.55|wake quickly around the foxes. 694|Supplier#000000694|20i8TNU3K6H0SEL20|10|20-902-352-7633|5776.60|ounts. regular requests are ca 695|Supplier#000000695|xhSCyzMl iQ|21|31-274-635-2607|-590.99|cial asymptotes across the slyly unusual foxes use slyly furiousl 696|Supplier#000000696|hWvK 9N1EQX0kjEI|0|10-745-572-7198|9114.26|regular packages wake slyly after the carefully silent dependencies. packages dou 697|Supplier#000000697|CnQUnxL9Jk1ew4 kK,DqzwwV34c1KUiu4xFVsG|12|22-673-286-5547|6463.10| furious frets. furiously even accounts should affix furiously blithely final theo 698|Supplier#000000698|ciim3Adyrh1gqQlOsw0YYeF9gIfUM|11|21-373-751-4459|9356.72|y ruthless pinto beans. slyly final pinto bea 699|Supplier#000000699|CplgysgQzKm7KRFKOJe|4|14-247-404-4838|453.28|sts. express accounts boost. silent platelets boost fl 700|Supplier#000000700|K5l3kvvjnRQJJ,|5|15-648-846-4789|4196.26|es haggle quickly. slyly bold ideas serve at the regular attai 701|Supplier#000000701|ijyXEKJPjoVzpXY9g|0|10-713-854-3832|3513.22|ously ironic accounts nag quickly. car 702|Supplier#000000702|1IfvvCrOk6tDle1AjJisjgmZTSrf6Y 2t,Mdv|9|19-354-412-3179|7655.97|about the unusual, bold foxes. quickl 703|Supplier#000000703|QQ Z27PMXZP|13|23-876-543-9729|4921.83|ar patterns sleep about the instructions. p 704|Supplier#000000704|hQvlBqbqqnA5Dgo1BffRBX78tkkRu|19|29-300-896-5991|-845.44|ctions. carefully sly requ 705|Supplier#000000705|9up,Z78TUVPrp2QdumA8fRjL8PG5H6PG|15|25-673-680-4438|571.85|sual packages. carefully ironic reques 706|Supplier#000000706|oXF4XhSiEJMIJouBmMjLZ|11|21-513-570-8754|6043.19|uickly. special, ironic attainments cajole carefully according 707|Supplier#000000707|gIbGXDVlfL3Zl4dmtnAKrnoO|0|10-971-806-9591|2858.06|counts haggle ruthlessly bold deposits. furiously regular instructions wake near the furi 708|Supplier#000000708|qGdOm1xZczyifQ ,Ba2ptq2L7i2K9oWKXu dO9z|20|30-101-252-7593|7364.29|y ironic instructions. bold packages are after 709|Supplier#000000709|D8Mg5T7enR4HOYbpwPgkdDycdI5FpoTnXWUHB|20|30-921-692-7914|8638.35|nal requests. furiously even requests are 710|Supplier#000000710|f19YPvOyb QoYwjKC,oPycpGfieBAcwKJo|3|13-147-519-9896|4876.86|s hang about the accounts. slyl 711|Supplier#000000711|oG9,,CGt6x5c sDr1tzAdzvq1y|19|29-291-385-3264|2462.97|ts. blithely special dependencies i 712|Supplier#000000712|u0ZzFkqHLbJbJ|10|20-433-125-7032|24.49|y express theodolites are busily. bold theodolites cajole carefully furiously ironic pinto bea 713|Supplier#000000713|DBMIf1HiYY8OyRFcbtHpKIz|21|31-890-482-5331|2587.02| accounts serve furiously ironic deposits. ironically pending 714|Supplier#000000714|q1PPTQ0r9QK4PkYS95S yftFXTypAM 2l|1|11-165-805-5563|6077.06|nic excuses run after the final pinto beans. bl 715|Supplier#000000715|feMb9HkfGM8e,4i|14|24-382-559-6937|149.86|lithely quietly express atta 716|Supplier#000000716|OJtq1HiFQczPdQvmhx0gE2exTEdnJr |16|26-413-887-2014|8617.52|requests boost. carefully ironi 717|Supplier#000000717|hhUrgvyxsdTfzGY4OrQSHeZmMNB2L75xk|14|24-797-880-9149|6741.18|ng to the furiously speci 718|Supplier#000000718|W9byXRtqvNdPivnxG76r6|20|30-551-841-7946|8000.05| around the pending, special excuses are against the final instructions. regular deposits 719|Supplier#000000719|nQoXFQ,ztoTyboWFmO,a|18|28-664-720-1497|1922.82|jole about the requests. quickly ironic 720|Supplier#000000720|82 sGqlCVpJgAKKoW6yTkci95tvt|17|27-183-709-8965|7752.13|ckages sleep stealthily above the blithely special deposits. requests sleep furiously above the 721|Supplier#000000721|yF,pgNxRtqb1uql2l21qj|1|11-788-642-3247|4179.15|cajole slyly. requests hind 722|Supplier#000000722|XWycFRsEF4TGhGrCPnM17JRB|2|12-909-341-4605|1895.71|s are carefully. carefully silent somas over the furiously regular dependencies was ironic dugout 723|Supplier#000000723|ZFI9Pb HTy,8e,mY0mqP6ThbN|5|15-714-811-1747|3117.73|ests around the deposits cajole enticingly among the slyly express deposits. quick 724|Supplier#000000724|P92abZ6rWSfO2cm|18|28-471-255-1476|4696.62|ly final accounts use carefully 725|Supplier#000000725|fFk BGhsIcG6|1|11-382-323-5460|9077.74|pinto beans are carefully. blithely regular depos 726|Supplier#000000726|jASHnVdyT7e7Lxf6a|16|26-825-782-3808|8617.63|sual foxes. silent instructions are always across the quickly dogged pinto beans. de 727|Supplier#000000727|kc0Fnr5won8yJhzYC2j4H98m 59CRJTs|2|12-885-980-2162|8518.50|gular pinto beans under the pinto beans haggle around the carefully special pinto bea 728|Supplier#000000728|upr7iaSWGsmneQlVWifcSeJQ4|14|24-789-980-6663|3574.05|as. furiously even deposits are. fluffily ironic pinto beans about the packages 729|Supplier#000000729|pqck2ppy758TQpZCUAjPvlU55K3QjfL7Bi|3|13-627-404-3241|7113.46|haggle furiously among the express accounts. ironic, express warhorses promise. even, expr 730|Supplier#000000730|GQ8t3HK2XoGWSP,Sonc|1|11-468-792-6804|5903.85|kages. furiously ironic accounts are slyly. bold packa 731|Supplier#000000731|dxArCeCKpYV4yBOCZOZb39Y3s7EhIi3d|21|31-306-298-2911|3954.93|s engage carefully alongside of the idly regula 732|Supplier#000000732|3Q117DJd7vC3cBv,L4DAiVqWZNa,nBBoA|17|27-230-428-3838|6432.12|nto beans. carefully ironic dolphins nag reques 733|Supplier#000000733|mUdSdgmayvR|18|28-634-280-5540|5714.85|blithely express packages. final deposits nag above the regular 734|Supplier#000000734|dsoJfURkkt|10|20-428-370-2403|6479.49|ly pending excuses are. even instructions sleep furiously; reque 735|Supplier#000000735|7FqUrdaC732vBX3J7ruv0W4 Jfpx84|17|27-984-128-6691|9895.02|kages. furiously ironic depths unwind permanently slyly regular requests. carefully bold 736|Supplier#000000736|l6i2nMwVuovfKnuVgaSGK2rDy65DlAFLegiL7|3|13-681-806-8650|5700.83|the carefully pending waters wake about the requests. f 737|Supplier#000000737|5fna7sQRrNfLatMkl0Oy,Sps0IWTez|2|12-503-512-3693|1922.80|carefully ironic platelets use across the blithely speci 738|Supplier#000000738|dklvCmHEmlCFTuKU5YNnYAi96v,FMMq|22|32-910-791-4020|7435.07|usual packages use final, even ideas. carefully pending requests integrate carefully regular 739|Supplier#000000739|d7rDQneZCae9E57u3LSq7K|11|21-304-934-6837|-811.26|efully alongside of the slyly f 740|Supplier#000000740|vi82FyqGelGW0G1mpP17eDCBJRgNLvNi |23|33-714-391-9055|3057.29|und the quickly unusual id 741|Supplier#000000741|BLP6zAc29lDLOvSE3 h2|7|17-292-821-2297|824.94|even, unusual instructions b 742|Supplier#000000742|yaX50kwIVnFeY3|0|10-673-629-5928|2697.53|en theodolites are about the blithely unusual requests. bold deposits wake. furiously even packages 743|Supplier#000000743|ccFQShf qHch yPwbryx12DfnIYAp83,F|2|12-841-918-5889|4384.23|e slyly after the quickly final platelets? special, special foxes nag slyl 744|Supplier#000000744|5NKuw5W4mFDMQBbUjUO4k kfokG8yvAe|3|13-573-977-6527|5699.09|ular accounts. blithely ironic th 745|Supplier#000000745|KBaVOy ,RKCWhLiYxW|21|31-469-792-6546|5672.23|thely unusual ideas. pending, final de 746|Supplier#000000746|DI4uPKbEC5 D4LIwwSgGZ8SwqA1hLGJJN6guc|22|32-484-989-2368|5238.76|sly special sentiments-- carefully unusua 747|Supplier#000000747|0XGddxTld5cBDIN4Z30Je siitSCbFC|12|22-543-907-2770|1900.17|dolites haggle closely: theodolites affix after the express, even deposits. even ideas are blithely 748|Supplier#000000748|x3Wq1rbka5LB1UnGMzk0hd5,mWjrLfG49kR|1|11-549-384-3124|1084.18|furiously regular ideas-- express packages sleep quickly quickly e 749|Supplier#000000749|KxSfaofVZcFWnLsjaNszI5GTrlyAW,|5|15-131-224-8530|3839.44|gular pinto beans. blithely even accounts wake blithely accounts. careful 750|Supplier#000000750|uJB curMQwnzz79rXo9v4jy|18|28-361-120-8058|1533.06|y among the blithely regular accounts. regular, ironic instructions 751|Supplier#000000751|20kEocUg39iam9T EzquK P7grgp,QD|5|15-773-726-6594|2094.94|carefully special instructions cajole slow re 752|Supplier#000000752|l cHMtEnodxj3FV|3|13-854-821-4478|7588.27|ages. even, regular packages c 753|Supplier#000000753|Kbxpp9hdpX6bgG,|0|10-667-838-1746|151.10|tructions. pending deposits wake. pending dependencies haggle. regular accounts boost. unusua 754|Supplier#000000754|GLSmwjGddmyMx2D BlOKJm1Ji|17|27-971-371-9417|7425.83|leep. pinto beans haggle according to the unusual, e 755|Supplier#000000755|IRW3Y6qorkh4GBy4gHSpVTF5L|15|25-750-724-4757|9046.17|ding accounts was. carefully express ac 756|Supplier#000000756|ySXGqbQpYbXLoaFMKqIdH5Pai|7|17-726-757-7711|6116.81|out the final, express id 757|Supplier#000000757|PE9,2xp10mYiiKvHbHIVG1KIPLDtomT|8|18-152-957-5174|8209.16|s accounts. unusual instr 758|Supplier#000000758|Dko8jlTeGYKaDg s0o a9|17|27-130-847-7866|7448.00|oldly among the quickly regular platelets. furiously ironic packages around the furiously regula 759|Supplier#000000759|1wnIv4xlR2,zrcb495qI0gcXrJyVPrQjNU |8|18-430-878-7018|7602.04|its nod about the deposits. brave pinto beans lose quickly about the ironic foxes. even, even a 760|Supplier#000000760|ob94rzX66tJ35aKv2jR,inK1|21|31-367-390-9165|486.80|aphs. regular asymptotes wake quickly slyly ironic ideas. si 761|Supplier#000000761|zlSLelQUj2XrvTTFnv7WAcYZGvvMTx882d4|3|13-725-649-4070|6610.51| packages boost doggedly. fluffily pending accou 762|Supplier#000000762|GRVedLFC19uN9o8bNyNZWwzcbDUo2eT|20|30-393-711-8623|939.86|bove the bold packages. blithely final dolphins wake against the 763|Supplier#000000763|rpZRD,h5XXPIOe6bX1KqgwvBPoN|17|27-579-968-4858|9206.66|bold ideas. blithely express theodolites haggle blithe 764|Supplier#000000764|2qcwW0V7q3Ipei1tPW3|0|10-917-495-8225|8465.14|counts nag slyly along the deposits. quickly regular accounts brea 765|Supplier#000000765|RLsvd,9rVYPSoMUCDBVoB9|24|34-103-698-2282|8681.24|accounts grow. ideas cajole slyly. ironic requests haggle slyly e 766|Supplier#000000766|wfQTdIQSA7p5PFFvXNjhO|4|14-284-910-3726|-297.76|ickly along the final, pend 767|Supplier#000000767|bHEuqKKdmCMEKOV|21|31-880-346-2583|9504.89|e slyly carefully special accounts. furiously bold foxes sleep regularly. furiously unusual 768|Supplier#000000768|P,qwP7pGhJVoeq LJI|22|32-493-564-7451|4705.25|ffily unusual foxes sleep carefully according to the special platelets. a 769|Supplier#000000769|ak2320fUkG|6|16-655-591-2134|165.76|ly ironic ideas. quickly ironic platelets hag 770|Supplier#000000770|KNKouHfOJPphqjJXncoGYvv|9|19-372-844-8190|830.15|dolites nag blithely blithely final accounts. fluffily regular 771|Supplier#000000771|lwZ I15rq9kmZXUNhl|19|29-986-304-9006|2221.25|nal foxes eat slyly about the fluffily permanent id 772|Supplier#000000772|GxfKyTzgm 6bVmhZPQ6nUMCx5NSsl0 ATg1WccX|11|21-367-987-9338|7540.44|uietly quick packages! furiously bold pinto beans haggle carefull 773|Supplier#000000773|U0Sh9u896MJve84VFCmc6TLb8RUmg9BzJJBks44|23|33-321-732-9512|-707.02| furiously final sauternes about the carefully special packages could engage slyly 774|Supplier#000000774|XVYeiG4,BopCyYAQwld4l0scarsoe8J0cQ|0|10-311-896-5917|6030.51|sheaves. packages about the slyly express pinto beans thrash according to the 775|Supplier#000000775|tmhWTbbv9lv|2|12-946-153-9942|9751.14|final foxes around the blithely reg 776|Supplier#000000776|nklfFoSkCwf,ooSuF|21|31-317-593-4029|7550.40|es haggle instructions. bravely furious deposits haggle blithely. quickly regular water 777|Supplier#000000777|oJlJ0xr1b9l3t IHmi9|11|21-502-733-8098|1636.48|ly even pinto beans. slyly expre 778|Supplier#000000778|SHE9zl 2BWMYhH25|20|30-653-194-4012|2200.43|tructions along the furiousl 779|Supplier#000000779|iAtd5nxCjii|17|27-104-960-9666|9550.77|ut the permanently silent sauternes. slyly pending dolphins integr 780|Supplier#000000780|,G6UHU26b8dkvwpYiM,|6|16-367-150-9939|5799.04|express theodolites sleep. final, even instructions across the c 781|Supplier#000000781|kim1Maww3pdircDNv6hnVK21cI|2|12-757-769-5008|2501.73|ar, regular instructions. permanent, pending packages sleep blithely among the 782|Supplier#000000782|z5xIc71Rb5CsrmE0kO11P|11|21-940-365-9213|2493.13|s around the ironic requests engage according to the silent packages. attainments sleep about 783|Supplier#000000783|dMpZwZ95xznZWg4acMQW dK8AQMhB|3|13-785-672-8751|958.07|e the blithely ironic accounts. final, final warhorses along the ironic, expre 784|Supplier#000000784|Or3 KncT1AHPPb|0|10-734-420-5738|7284.90|he slyly even accounts. furiously unusual i 785|Supplier#000000785|W VkHBpQyD3qjQjWGpWicOpmILFehmEdWy67kUGY|22|32-297-653-2203|5364.99| packages boost carefully. express ideas along 786|Supplier#000000786|QiKBtsiRdDZ2xGcwZgOSoMaKSH4HQ360,88L|8|18-280-624-2919|406.37|uests. regular warthogs across the blithely express 787|Supplier#000000787|xaSs5H BquWpG7s38xn Rf5X|24|34-278-568-1942|7830.47|ously silent accounts dazzle carefully furiou 788|Supplier#000000788|jL QLbG475Uszs4 2RxBp4oR|20|30-377-394-1108|827.21|bold requests along the quickly special dependencies use outside the 789|Supplier#000000789|LF,j2pxKPgtbDGaj,l47vutF5Vz|13|23-428-566-8444|7874.25|counts. blithely special accounts haggle furiously carefull 790|Supplier#000000790|HSYD9,gCfAwpsgCLKGhf,Z4SH8GSYOc|5|15-189-744-3618|-415.18|iously ironic packages. final accounts boost blithely aft 791|Supplier#000000791|2dFfV7U1kBADWXl,NpgP0|22|32-320-959-1370|4992.15| even foxes alongside of the pending deposits boost foxes. bold, silent 792|Supplier#000000792|vsmDNbNfu2bAX2lAj4OTNaMKF x3pBq0yHYPzmm|1|11-678-517-4073|4128.40| wake fluffily against the doggedly sly pinto beans. final ideas sleep carefully ironic accounts. 793|Supplier#000000793|Z4N2V ERHL ds3jr9F|18|28-379-479-7140|7044.94|y regular packages sleep. requests slee 794|Supplier#000000794|ZUzPptVa1Vq9Xo9Pq8rPENR,0KGFd1Q214Hq3dJ|4|14-851-370-3696|9271.66|s against the unusual packages use about the 795|Supplier#000000795|1ozaCnTPf4sAV7oY6VE6y7RqC,WLUVXi|8|18-772-967-6194|887.34|efully final pinto beans. 796|Supplier#000000796|xre srJq9Ivai94OkW0yhsfrx|11|21-322-901-8359|6452.60|ts nag furiously fluffily even requests-- regular accounts unwind. regular, express pinto beans in 797|Supplier#000000797|3kcPU9j dU i|14|24-356-955-8704|8908.16|press instructions. ironic, even foxes use carefully 798|Supplier#000000798|Q4atQnxS0XRmpP|18|28-182-490-3136|7588.31| regular courts alongside of the requests believe slyly slyly unusual deposits. regul 799|Supplier#000000799|jwFN7ZB3T9sMF|22|32-579-339-1495|765.69|nusual requests. furiously unusual epitaphs integrate. slyly 800|Supplier#000000800|Z4 hpmBjpjBXREqzixsBCIaF|0|10-497-654-8607|7956.80|he bold foxes boost blithely about the blithely final epitaphs. slyly 801|Supplier#000000801|zohVF4 4GHOJpWy9kdytvYwm27mJEBhk|1|11-673-791-6926|976.53|ckly final accounts wake since the even instructions. regular, permanent accounts are against t 802|Supplier#000000802|,6HYXb4uaHITmtMBj4Ak57Pd|19|29-342-882-6463|9453.01|gular frets. permanently special multipliers believe blithely alongs 803|Supplier#000000803|,IXoixqcMluU5OEK7RhD,mDrBE2E3ygs|15|25-347-400-8216|1887.55|uiet ideas. even packages haggle carefully according to the fluffily slow requests. furiously ironi 804|Supplier#000000804|N3B GhiD6nanhYdssmqlpy2XVz5jZH|21|31-830-383-7329|9093.94|osits. regular theodolites are regularly slyly unusu 805|Supplier#000000805|LXWtvxudFJf56Uep17HO,NYC4A4mlr|8|18-407-342-1685|5968.71|ructions haggle. carefully silent foxes must wa 806|Supplier#000000806|mYSUX4mem2|13|23-702-985-4737|5054.15|foxes. blithely special packages are furiously. regular theodolites solve. ironic pinto b 807|Supplier#000000807|ClHvM1nuPUESGg35Ls|14|24-255-894-5069|1077.97|lly. even, pending requests boost furiously furious 808|Supplier#000000808|B3zlGM54ECUk5MgRzKI9f7F bB8|15|25-297-954-4894|9438.28|y even packages. requests sleep quickly fo 809|Supplier#000000809|dPqPaxh,IbS|22|32-172-990-2830|7241.31| accounts. express dolphin 810|Supplier#000000810|RMJoA1yw 1fM|9|19-328-138-9772|9713.42|ithely furiously final dolphins-- furiously ironic warhorses beyond th 811|Supplier#000000811|pLtLc7cdmb|20|30-734-469-7797|558.91|bold ideas sleep against the ideas. silent deposits are furiously even foxes. blithely 812|Supplier#000000812|8qh4tezyScl5bidLAysvutB,,ZI2dn6xP|6|16-585-724-6633|8615.50|y quickly regular deposits? quickly pending packages after the caref 813|Supplier#000000813|6EfZUjqLY8G28PhtbPGfz9FjWIXLx|22|32-887-679-3561|5793.63| silent somas. furiously unusual packages affix blithely along the s 814|Supplier#000000814|GWytN8Vx0IWzB8BByw6thupbonInepu|15|25-147-738-5484|-468.26|accounts. blithely final accounts haggle carefully special instruc 815|Supplier#000000815|3f8XIvP m9v5fv|7|17-984-775-9865|3855.74|ms. final packages use finall 816|Supplier#000000816|uCvvad6NCkXBUkr28t dtq swXPtu|23|33-830-680-6168|361.01|lve furiously according to the final accounts. even accounts on the 817|Supplier#000000817|0GTKh7JybR8sVahPoJT8kbNtDV0TzA79Q|0|10-282-124-6047|4468.89| blithely even requests. blithely ironic deposits wake slyly. ideas haggle! quickly i 818|Supplier#000000818|78Rr rF8zcBGTSud4,5B|16|26-754-547-4185|9594.51|es. carefully final deposits use 819|Supplier#000000819|n1YA v3IWFGmIP tZr|5|15-492-900-7246|4049.06|s use blithely. blithely regular ideas according to the pending theodolites haggle above t 820|Supplier#000000820|QoduMcALdP|15|25-716-340-3140|4604.84|osits. slyly final packages are furiously according to the ironic, pending deposits. ruthl 821|Supplier#000000821|O HVe5AKfowNLnep8qd2brd9fbZm WkJuc9Lli|20|30-472-667-2703|6497.93|old, regular packages about the platelets haggle slyly pending, unusual 822|Supplier#000000822|0NJZiE1bKnpzqT j,0|6|16-795-236-9887|797.90|ly even packages shall cajole up the carefull 823|Supplier#000000823| gC0DrEG5U,v893fp3nj mmXa6rYhJ0tjpJ|21|31-834-127-5277|-723.78|ccounts haggle blithely packages. carefully express pinto beans against the unusual 824|Supplier#000000824|wJnn6YrLnzsQWLOZNdMSBz1utk9EFS6icrvQyy|13|23-197-596-6598|-609.59|o beans are blithely across the bold, fi 825|Supplier#000000825|,9nqq,JWR0ztG0qp4rlDHgtShbP7AViBj|20|30-486-346-1320|4461.68|es haggle since the carefully regular theodolites: bold deposits according 826|Supplier#000000826|n,hapmxkVq19Yy9UQ8BVF00sQD|15|25-731-100-2823|9401.23|sleep furiously. regular deposits wake furio 827|Supplier#000000827|AlFjWDq6jDtaSUKnxn54OwQzt8CPUItYbCGztPQ,|9|19-131-253-5697|515.22|ggle. carefully silent requests lose slyly. final, final foxes among the fluffily iro 828|Supplier#000000828|0B2aPqJ6KTEr2fqxuC7z |21|31-911-715-8972|289.32|ions are carefully along the regular, pending pinto beans. special 829|Supplier#000000829|w247SZbFQvw1Fp4f0JFIfVXmIBfulBxfgUg|2|12-123-147-1171|9542.56|nding packages use fluffily above the blith 830|Supplier#000000830|5DHC2QScT6P6lXgRtHajXOfztB7ddjYH1LLUC3|10|20-415-380-4083|-65.23|ideas wake blithely quickly even notornis: furiously pending theodolites sleep. 831|Supplier#000000831|NkkeNeVsWdw8U SeVBnUX 2GB|15|25-275-692-5827|1536.13|carefully express accounts wake quickly about the quickly silent p 832|Supplier#000000832|SwUNp9Yyt5pe 6i5EYiV3hHU9RWJnd3VzBjsbtvO|15|25-658-573-4042|9747.16|old ideas wake carefully above the slowly regular pack 833|Supplier#000000833|ig2vYxu,8xwEzl0UfA4t5VJ|17|27-470-524-5760|6604.79|. quickly bold packages sleep among the packages. theodolites bo 834|Supplier#000000834|fwX0Z5,PgFaauaEXlVQX6UmHM0RDKS4EXe,Tn3nJ|9|19-419-490-3356|3732.75| express foxes nag slyly after the regular pinto beans. regul 835|Supplier#000000835|a7ZBr9561n7CHzwtrfoZnpNWf71uKtH|14|24-772-959-9240|1348.35|final asymptotes are furiously bold deposits: unusual, even accounts across the blithely 836|Supplier#000000836|KU2O25D5,FXdv|1|11-892-817-2809|-256.13|ke finally even asymptotes. accounts x-ray al 837|Supplier#000000837|717LGrDM2ChnIS91,PE4 ycp4mu4HPdcX|12|22-626-153-5392|5167.00|gular instructions are furiously a 838|Supplier#000000838|Zsa9XTlYna7SUIpK0RQAGYSVum|11|21-409-796-7661|5808.87|xcuses. furiously express deposits wake among the furiously ironic instructi 839|Supplier#000000839|1fSx9Sv6LraqnVP3u|6|16-845-687-7291|2761.59|ess, regular accounts haggle slyly across the carefully 840|Supplier#000000840|iYzUIypKhC0Y|19|29-781-337-5584|2963.09|eep blithely regular dependencies. blithely regular platelets sublate alongside o 841|Supplier#000000841|dvQXS7Wi29wVuSUWsknpHVQWVrJ6cUvB8V|7|17-359-161-6634|1685.95| final accounts unwind furiously among the furiously pending req 842|Supplier#000000842|3D3tmBm8zD3A BxuTWfoKXD|4|14-977-614-8564|8732.97|dolites impress quickly about 843|Supplier#000000843|iguHIr15YUL9RMmH7U3XsDfdyEg1441|18|28-503-987-8146|10.33|ss packages among the pinto beans cajole slyly outside the packages. regular, final 844|Supplier#000000844|COb5r2WsvJ0zf 58tJJLgYvEZHJb74EBdp|24|34-454-417-4967|6538.01| pinto beans. carefully stealthy theodol 845|Supplier#000000845|tgajQJpBFV6PGa9bzWSkW1eSGE3q5NsCMYE|23|33-350-532-8789|3408.47|sual courts. carefully special dependencies nag furiously. deposi 846|Supplier#000000846|C1Cs3zHlceRMxlaPIBprCC76x4LKVdH3QhZ|20|30-283-261-2020|7122.90|thely special deposits was furious 847|Supplier#000000847|wMieciw3 YGegk|15|25-908-575-3915|-382.49|ully express, regular instructions. daringly sp 848|Supplier#000000848|tx44JAuF,Jnw1|3|13-169-466-8402|4404.29|olphins. pending packages are during the regular packages. furiously regular requests haggle c 849|Supplier#000000849|uy a2rQl1Bag,cFo5GRTZigHtqh Gguuj2xtlz|1|11-608-566-4693|1955.97|ideas will have to sleep pinto beans. deposits around the dependencies ha 850|Supplier#000000850|l6KnHycBhdFcz58UReuEe1Jf2R40ZICoQ5Qpjh6f|4|14-161-130-9091|3891.72|e across the regular requests. silent ideas alongside of the carefully even ideas integrate slyl 851|Supplier#000000851|fhhjsCI1s8uC|15|25-692-383-2877|157.21|ffily express instructions. close deposits after the bold instructions nag regular, even asymptot 852|Supplier#000000852|n3zasd04WljXdo9xMjQRkZKrEB|1|11-574-892-3228|213.41|Customer s. even asympRecommends haggl 853|Supplier#000000853|hQuhL6zQSZmVifUzpnKvRLNEfJhShDomdbGC|8|18-286-624-2526|9797.52|iously. bold accounts cajo 854|Supplier#000000854|omM4Df4DWXSTKmenaUUSr|16|26-346-921-7567|6082.22|n requests believe fluffily. carefully special depen 855|Supplier#000000855|ekQwhb9fh5VGIvMBJ m,yT571ICZpI,LEb1e|9|19-105-166-2849|9964.88|ess patterns sublate blithely-- slyly pending requests use carefully about 856|Supplier#000000856|U,GH2ZjlmA78JRbjWhbf7jCgrU7a0Dx|4|14-216-125-2920|2763.95| special packages. theodolites haggle carefully. special packa 857|Supplier#000000857|srpKWldPZrVWm0dKjc7p 8fMKm1fYF|2|12-715-212-6604|9761.92|y final ideas poach across the special dugouts. furiously pending accounts cajole furiously. fin 858|Supplier#000000858|aA2g9NPEljznwqrZp4Fdw1Z|9|19-525-296-9901|1671.59|ependencies boost regular, regular requests. quickl 859|Supplier#000000859|OJ7rr38hbK1BlZSRXKojpIho8QazivUtIh0|16|26-731-166-4296|9296.31|ggle slyly among the express accounts. slyly unusual accounts above the thi 860|Supplier#000000860|C9FacsR,23JNHd8ioSI5qC7FfnR|17|27-547-426-7236|8210.13|ilent, regular ideas. bold, unus 861|Supplier#000000861|vj9yxpOqUdt3HIxfOWbA9|7|17-581-652-1425|1747.35|against the carefully regular requests sleep blithely fluffily ironic packages. 862|Supplier#000000862|JAH3,OdzNzdiWYK2ODrKfLLR2IQ8YVNJcsds|2|12-159-806-3875|6698.84|ously special pinto beans. deposits 863|Supplier#000000863|TsC9OuodnybJhWXq4PFNdEJf9jx2y181N3ilV|21|31-589-608-3508|487.31|ounts. fluffily special platelets along the even pinto beans boost 864|Supplier#000000864|D95VTylwusz7OYesg,|5|15-293-129-4196|4089.61|ites. quickly even ideas wake always express requests. express theodolites are. carefu 865|Supplier#000000865|zYVm4GPPlvV1MysjqDUItehzU9hD0tIaHeg|17|27-993-155-8321|4111.07|ly ironic packages. slyly pendi 866|Supplier#000000866|CosbyBH1bG81zFspjW|24|34-374-244-3932|1768.34|riously regular excuses. quickly close ideas sleep. final requests haggle along th 867|Supplier#000000867|WTM7RpRoZPk5MKGEtE2dsh|11|21-221-554-8461|7476.72|y final requests. blithely final instructions will haggle fluffily. iron 868|Supplier#000000868|dFPwNYsP 9xC|1|11-255-295-8702|6239.02|ar instructions. blithely ironic foxes doubt against the quickly bo 869|Supplier#000000869|xi6g0llBz3O1ECUHCk7p341ThVdavdqf6PM|13|23-269-725-9523|5303.66| regular theodolites thrash slyly about the sl 870|Supplier#000000870|QIgRinpKvCLPG|21|31-675-338-9417|3689.14|ronic accounts. quickly pending pinto beans after the regular asymptotes sleep furiously 871|Supplier#000000871|gTuPG353pz9sxS6iazJuqQtji6xN,Q9qgG2|8|18-566-173-8686|5418.37|ess, ironic platelets boost furio 872|Supplier#000000872|oJQEy8xyrptE|11|21-271-893-1965|6571.13| the pending, even foxes? blithely ironic depe 873|Supplier#000000873|jBfvdFseU7cz315kGbbocXnCwDKW,3iRlyaj3wn0|1|11-253-186-4222|-951.70|pinto beans. platelets serve slyly bold, pendi 874|Supplier#000000874|xsGhP46dDeavM6wnREIi7Q09jfAUTzucwp|16|26-826-579-8300|7755.50|beans. deposits haggle after the blithely express deposi 875|Supplier#000000875|pBjvc 55kMxfQ3gtYUiuy5TNRyd|13|23-165-986-6088|5757.36|ep. furiously final deposits sleep. regular, regular packages affix slyly 876|Supplier#000000876|PYBAM85Nrb2cuXH8VDrX 3TjZbsIAlIQ,,xA|11|21-432-561-3770|5979.92|ly special packages. carefully ironic deposits dazzle furiously acco 877|Supplier#000000877|4yN9i5CbQ8Tw1X5InsMlOjjhGg8MAciTG|4|14-685-851-5424|6489.62|beans. quick packages at the pending dependencies poa 878|Supplier#000000878|cennOpnejXFuwxsxrfoz6U,WN TC7|8|18-462-213-5795|4140.02|gular theodolites wake. blithely bold deposit 879|Supplier#000000879|6DGZ6o7FNRspPhM B1nZiMOAgq9fZT8UHW|17|27-204-329-1068|4269.56|s the ironic, ironic platelets boost carefully bl 880|Supplier#000000880|s7AgxI7139o6arS2SfUX|10|20-178-395-2605|3550.33|s hang always against the pinto beans. blithely spe 881|Supplier#000000881|02RRXg45CBGhuzwpKKz3fi4ewYv|13|23-877-378-2281|4423.06|ounts are according to the express theodolites. silent instructions above the d 882|Supplier#000000882|5op1w94,JerNmOkyPfAVkZEtb7|14|24-437-170-2579|9450.21|ly final requests haggle furiously final, regula 883|Supplier#000000883|5ppzWDz6xcMOO09LkrAOvHzFEMfP3CIpndr|18|28-614-756-7513|9746.01|hely final excuses sleep quickly. slyly pending instructions hang 884|Supplier#000000884|bmhEShejaS|3|13-498-258-4793|9223.93|requests. furiously pending accounts haggle furiously. blithely regular ideas wake along t 885|Supplier#000000885|aJUXiGC6qSAWr0Dl0VBahtF|7|17-578-639-8695|1736.47| furiously. carefully pending pin 886|Supplier#000000886|R52IgT6b0yBuU r8,dNRZVWRY|1|11-329-720-1904|-158.08|ts during the blithely silent packages c 887|Supplier#000000887|urEaTejH5POADP2ARrf|3|13-738-297-6117|3113.73|s. regular realms haggle. special, unusual accounts wake furiously. bold pearls play c 888|Supplier#000000888|JA,f8nt64wdZ4XkiHCYHC0r2FJ|24|34-606-153-4636|3420.34|deas wake blithely. regular, special id 889|Supplier#000000889|saKBdGnsGS9ccCMGJ5NFoKwZ7HkS7|20|30-566-872-3482|8570.93| the furiously final requests 890|Supplier#000000890|yY7XhfdJe4ewh8R, xHR06QABT|11|21-196-765-4250|4685.09|ly unusual asymptotes cajole along the 891|Supplier#000000891|cv64gZAB3Ax5XblZ19gNA zOHb9q2nGjtvUzLm9P|8|18-541-281-5118|2284.88|deposits. slyly final accounts are fluffily slyly 892|Supplier#000000892|j6prA4M3sX9a9xHem3HOZpYy|8|18-893-665-3629|9993.46|mong the regular instructions. regular, regular dependen 893|Supplier#000000893|WxOTCcoe RFwKWyZUCURPNAumww1nW,EYcrVjrj|22|32-328-447-9531|-823.97|ully pending pinto beans affix quickly after the decoys. sl 894|Supplier#000000894|T,9KqHZzFlFVvesdyzvzs9FHOQZjLk|9|19-875-711-4227|9490.22|ss the furiously special packa 895|Supplier#000000895|Tm5QKYFUhtY|2|12-826-730-8247|246.80|y final foxes cajole blithely. packages over the blithely ironic accounts haggle silent, regular dep 896|Supplier#000000896|yvNZycuQYm9d9A8v1m|7|17-790-100-9143|9880.72| regular deposits. carefully unusual accounts haggle ironic, 897|Supplier#000000897|9HoSSwrIPM8ge69XLD81Br993krGbn9aeUW4U|10|20-272-778-2639|7373.28| boost. bold accounts nag furiously 898|Supplier#000000898|Uud3qAUC91Cy,c|4|14-247-832-2299|5737.18|print carefully. blithely regular deposits after the deposits cajole against the sp 899|Supplier#000000899|oLlkiVghtro IwzcwFuzwMCG94rRpux|11|21-980-994-3905|7741.42|equests wake quickly special, express accounts. courts promi 900|Supplier#000000900|,6RXmcRyA48c0yvZ2I|5|15-926-534-2005|165.26|counts cajole carefully pending foxes. bold packages mold carefully unusual 901|Supplier#000000901|dVN377SgJQURQd8,XtVF9|8|18-664-532-4405|3465.20| sly foxes are. special requests x-ray about the slyly unusual foxes. furio 902|Supplier#000000902|VrxG9VHAp45UMWrL|22|32-382-410-6632|3660.22|are slyly unusual excuses. pending, special 903|Supplier#000000903|fQVbA9,L6tB9iFrCfk4Yt1dwT0kRWg9DV|22|32-277-492-3117|3238.01| sometimes across the furiously express accounts. unusual accou 904|Supplier#000000904|Wev4Rig5BkWdQIsATpN7W5lG5jh4LQ ,pMxD|15|25-960-333-1191|2152.23|thogs snooze blithely fluffily bold pinto b 905|Supplier#000000905|WAGJHr1OUm95U5|17|27-266-689-2728|2415.66|usly regular deposits. foxes boost blithely ironic theodolit 906|Supplier#000000906|KwvAh8P9RcDPjbx9Qv2xZeWPmkCmK hY|2|12-926-664-9785|7888.41|ructions. slyly final req 907|Supplier#000000907|Y79rRfd5UhgXi6Ahj9ooM0vNHts|12|22-595-604-3254|6558.92|t the instructions. bold, unusual pinto beans above the regular, unusual foxes wake blithely regu 908|Supplier#000000908|05YocPlDfIe SFz7r5BeVAgqOx8i|4|14-669-206-5538|2844.11|s the slyly unusual foxes. furiously 909|Supplier#000000909|BXuLybzUeFLI0GJqMG8xewTuKFqk8n|11|21-989-597-5142|4012.42|ss requests. even packages haggle furiously. 910|Supplier#000000910|0X S 2E55,maqch|5|15-960-448-6633|4763.20|arefully unusual deposits. 911|Supplier#000000911|TpZMQSDVVmgKZX9 wB,HY|11|21-748-770-4721|6026.09|he dependencies. furiously special deposits cajole slyly. theodolites use fluffil 912|Supplier#000000912|ppF7DX,JePrdoo9qYFYwTOwszgUjHk|8|18-633-593-4048|1492.02|the ironic Tiresias. requests above the slyly even deposits affix abo 913|Supplier#000000913|c78mMYZkHE7ktVSoB9D|24|34-601-419-1634|5266.72|s sleep bold, regular accounts. ironic packages integrate 914|Supplier#000000914|li7dM9CrPF213,Jkh3MJRSRhjSB,wRMuOvidQg8u|14|24-682-308-9029|9767.75|ajole. bold theodolites above the quickly ironic frets are quickly along the fur 915|Supplier#000000915|hzB2437Op7JLYX73d3,qU2|18|28-191-772-5459|687.45|ffily. slyly pending pinto beans haggle furiously regular accounts. furiously regular asymp 916|Supplier#000000916|tD 9oW5VNUWTBQCpsISJO2TZCwFzKLdqIZoTWV6|11|21-185-427-4872|1852.85|e packages haggle carefully along the furiously ironic dugouts. unusual frets impres 917|Supplier#000000917|tMr5motk0IFyIXJDwCr98Q O5|22|32-754-782-6474|8877.42|r dugouts? final, ironic packages breach furiously f 918|Supplier#000000918|e0sB7xAU3,cWF7pzXrpIbATUNydCUZup|20|30-303-831-1662|7893.58|ependencies wake carefull 919|Supplier#000000919|xg60CQmqGaVavmevvCjOSGXC 3YuMRVb|16|26-379-721-3359|5428.44|, regular requests: furiously even deposits wake blithely ironic packages. furiously even 920|Supplier#000000920|Ix0QnIqftxMwHW5KHeB,xB|20|30-713-464-9920|-111.84|leep carefully among the e 921|Supplier#000000921|2dsK093unFO|2|12-670-146-9689|-686.97|ounts. blithely final requests wake blithely. regular instructions cajole among th 922|Supplier#000000922|V2KIQXPxtYnOkul|13|23-456-977-9276|2015.59|yly even packages affix quickly! quickly pending foxes haggle at the final, bold deposits. blit 923|Supplier#000000923|aUJaK ezwGVA43Mo0XF|10|20-376-561-9214|5057.87|y. furiously express courts sleep. 924|Supplier#000000924|13INVXLNjpU9eTsoc4dLeid|14|24-768-719-6154|8562.82| shall have to integrate blithely alongside of the fluffily even epitaphs. quickly unusual 925|Supplier#000000925|x3n4pg,q28EckYO,613G7seoAmTPSX0jTvDvM2U|19|29-398-723-8226|406.59|regular packages can haggle acro 926|Supplier#000000926|SEEq DJHLi I6|10|20-500-435-2716|505.92| the furiously ironic dinos. closely ironic instructions al 927|Supplier#000000927|Ype0QDb17eJbg7l35PFzJso|9|19-185-526-3201|8997.88|ily final courts sleep alongside of the ruthlessly regular escapa 928|Supplier#000000928|VL,J8Fq0GI0BnVTaTU9Dcp9Z|18|28-382-849-1505|8512.48|equests are. slyly specia 929|Supplier#000000929|XYQyy9rraHvHMCBQcoGnAvfw0iGG4jOCMENI|20|30-190-675-2042|1235.72|ully according to the doggedly even theodolites. f 930|Supplier#000000930|jYRHnCNnk55 CODbFLqby,ewwOQa1M|10|20-435-373-1909|9740.48|ly unusual asymptotes at the fluffily ironic requests hinder slyly regular 931|Supplier#000000931|CqslHrffpOBxqMDxiGH8nz7scX,i3HmUhNlwBBU6|8|18-174-741-5563|4398.36|t carefully. express accounts sleep. ironic, final theodolites haggle regul 932|Supplier#000000932|2Ke5SDwuwZ1y7H2QROIfgQ1d7|10|20-315-146-6687|1278.74|counts wake carefully across the pendi 933|Supplier#000000933|TrYn5zjv6nrT47EwbXi1S,IU|7|17-446-406-8093|6756.59|ully pending platelets. quickly ironic realms cajole slyly across the even ex 934|Supplier#000000934|2o3Fav4osE|9|19-497-408-2402|3103.34|t the fluffily ironic platelets. foxes use. blithely pending p 935|Supplier#000000935|ij98czM 2KzWe7dDTOxB8sq0UfCdvrX|3|13-437-885-9309|4734.47|ly regular pinto beans wake blithely bold pinto beans! warthogs between the 936|Supplier#000000936|CuIXj6RYaHGQ5SOkmhu8ZDRt5IU|17|27-555-580-3995|7304.30|leep. final sheaves affix across the requests. carefully express ideas ar 937|Supplier#000000937|UBfhiW HpJzqh9uTnhXkOqjRvP1vAIWokSeR5|15|25-410-699-7522|1463.21|lyly regular decoys lose packages-- quickly ironic foxes across the express accounts bo 938|Supplier#000000938|3xcs3BuTqx8b|4|14-704-203-8596|5204.43| express platelets integrate slyly above the ca 939|Supplier#000000939|mWBKbdzDn3yJNpOT8p|23|33-487-125-3117|7815.06|efully. final requests after the unusual requests wake fluffily after the furiously r 940|Supplier#000000940|QqKPZBeHgcIKDeOfT6J8sRlg4|2|12-972-343-8810|5789.96|ccounts. quickly final patterns mold 941|Supplier#000000941|gqG2XEnVlzUhjjfQGYGlwk,jcaNsplI8Rleg|21|31-412-752-5573|3846.91|ronic theodolites. final, unusual 942|Supplier#000000942|VkukmyN0Dq3NkC1RMw2ZBk,I5icQfLFFG|24|34-943-753-9952|6074.75| deposits haggle. regular packages unwind fluf 943|Supplier#000000943|AaeNFJbUAF8MOb5VKA7wXB6|16|26-877-341-9002|5339.25|uffily unusual packages wake bl 944|Supplier#000000944|tUVVFs351nHm|13|23-452-536-7876|7776.86|pinto beans. quickly express requests haggle 945|Supplier#000000945|y8LENmYfvNpYOnHG4XdxLQGqjOPJJ4c0CacGxu8|1|11-785-307-6941|1706.26|quests wake slyly quickly ironic deposits. instructions wake 946|Supplier#000000946|RpFjkZmA ScvLe|15|25-522-937-5559|9093.75|shall have to use slyly. carefully ironic ideas alon 947|Supplier#000000947|6xS,tBAgcKGW,nXNhfm2Hv26uYicU3|18|28-277-111-4291|2699.78|ronic ideas. slyly final dolphins wake furiously after the 948|Supplier#000000948|LvcPHBbzYZKySxlda,0McYfaV2bb poGXMF|21|31-562-389-2753|9219.26|grate slyly after the quickly even accounts. bold accounts haggle carefully. quick, b 949|Supplier#000000949|a,UE,6nRVl2fCphkOoetR1ajIzAEJ1Aa1G1HV|23|33-332-697-2768|91.39|pinto beans. carefully express requests hagg 950|Supplier#000000950|MukfcGGpbbRXp5v52Rx43QpPjU8RFB|1|11-888-171-3390|4992.53| the furiously daring dependencies wake blithely blithely regular braids. ironic ac 951|Supplier#000000951|Tuh3kFB,zCuI,jtPmV,IMcXQF|2|12-561-627-9752|-511.17|eans. quickly fluffy accounts are quickly about the ironic 952|Supplier#000000952|n8W6MbJdih Ckh6wDvYJz84ZmabvK4yQDz|22|32-276-558-4960|8621.52|cajole permanently? carefully slow deposits cajole quickly. regular 953|Supplier#000000953|wTTb0ilU6Nba1VLsHj6k0jUt4TFFM6rvtXszzA |21|31-642-490-3022|7916.56|kages are carefully platelets. blithely enticing platelets c 954|Supplier#000000954|P3O5p UFz1QsLmZX|6|16-537-341-8517|6721.70|ect blithely blithely final acco 955|Supplier#000000955|7OFLXDHjSgGrFlHTg8VHFS4glUuN|17|27-839-781-6125|-408.45|ave instructions haggle. regular instructions past the theodolites are slyly depos 956|Supplier#000000956|dmmnYeCuIZB7b2pWTOQ9zrAdi6zxwIrj4aT446L|18|28-741-846-4826|8068.26|, regular accounts use against the furiously express ideas. furiously 957|Supplier#000000957|mSpFa,4jJ5R40k10YOvGEtl4KYjo|23|33-616-674-6155|4324.51|hily after the fluffily regular dependencies. deposits nag regular, silent accounts. i 958|Supplier#000000958|uAjh0zBiJ0d|8|18-553-836-9296|3011.31|ress pinto beans cajole carefully among the quickly special requests. fluffily even dep 959|Supplier#000000959|8grA EHBnwOZhO|7|17-108-642-3106|9032.15|nding dependencies nag furiou 960|Supplier#000000960|yk1dgGaQlbTN1YhnJjyM3ULEDDf|12|22-811-163-5363|1480.82|onic theodolites wake carefully. ironic packages use carefully. ironic 961|Supplier#000000961|jlfkD00x6r0M34ctcSTY3cABv4yWKcjvHV|15|25-911-416-1546|4139.88|luffily bold packages wake ste 962|Supplier#000000962|0udWpU30ecw3XMLiEVhaM7I,BhufyzF4i|18|28-105-675-7555|1898.39|ent deposits are furiously slyly ironic es 963|Supplier#000000963|s3WbU6w31FgnuZVtwGnH6PkyZFg|11|21-395-611-8793|4152.04|ily quiet accounts! blithe 964|Supplier#000000964|JpH9YUQjGXD1GChWcPj9LEGMN8xwZkCzpdG8HlNb|7|17-693-102-9498|8411.78| instructions; fluffily ironic pinto beans across the unus 965|Supplier#000000965|aPiA00HXK1,L2FmArcXVT|7|17-117-423-8603|2843.80|ut the express packages. ironic sentiments are qui 966|Supplier#000000966|FRlJWy32I6TsERrGDq,GS7|4|14-681-361-1636|9657.79|beans-- fluffily unusual deposits x-r 967|Supplier#000000967|uPDH,GMFjz|4|14-583-250-9472|502.38| across the quickly regular pinto beans are furiously brave accounts 968|Supplier#000000968|6idVVWn8RbFoZgPeyVJlQOJ|4|14-431-296-9521|4487.96|final accounts. slyly regular deposits wake fluffily after the 969|Supplier#000000969|thRdVx7vCajVFs7gsK8VKxzydPiHvIKK,DtR|21|31-171-204-7902|162.06|ounts nag slyly across the furiously 970|Supplier#000000970|FD,NVDvbAAT7rI6BdKI2rTo9UqH8AghW1e8DSJ|2|12-826-418-6561|4643.76|cording to the regular, ironic depo 971|Supplier#000000971|23XaUVLYuC3tQIPHCuLLgM5UawL|3|13-380-957-8529|709.58|ly final pinto beans among the furiously regular theodolites caj 972|Supplier#000000972|MkZSNXPZf9g8ZW3ez TU, s6S9aPVY|12|22-291-368-8958|-203.99|ing pinto beans integrate. pending 973|Supplier#000000973|5 nhBZ 03rG6EcOEDkZXvt|21|31-385-469-4031|1548.60|al ideas cajole quickly ironic packages. carefully unusual theodolites detect. unusual packa 974|Supplier#000000974|xIscm3sM7v5hU7NioMfSJ9tLeIBvSm3vGc|16|26-370-235-3418|-778.92|x furiously slyly regular 975|Supplier#000000975|,AC e,tBpNwKb5xMUzeohxlRn, hdZJo73gFQF8y|3|13-892-333-9275|7577.42| slyly express deposits. pending asymptotes could have to use furiously. fluff 976|Supplier#000000976|MVpCgFTl7sGge4cFxVXD|24|34-998-900-4911|4744.07|t the pending dependencies sleep blithely about the blithely pending acco 977|Supplier#000000977|Kuud1x4l,UNEkRAQjCEsu|20|30-938-867-9723|3633.55|lithely ironic requests sleep enticingly ironic foxes. deposits along the slyly pending dolphi 978|Supplier#000000978|XzhDlm7Mr3RyWZL7PV6ush|16|26-135-110-8202|6104.45|ly ironic requests. carefully bold ideas haggle quick 979|Supplier#000000979|cdvHjrKZR7iDlmSWU2a|10|20-151-688-1408|9538.15|ckages cajole quietly carefully regular in 980|Supplier#000000980|jfgiJfywBW88ZEYM 5V|13|23-105-829-3910|2783.33|xcuses. unusual, special accounts integrate furio 981|Supplier#000000981|uf24XV3FD7J0BY5FQ29Nbco8A|19|29-161-734-6046|5343.95|inal packages sleep along the accounts. fluffily special pains wake fluffily. blithely bold requests 982|Supplier#000000982|2GJow4mz8ZkIPUSibA0NZ3OyR5TkfHx0|10|20-884-330-2979|9763.28|deas cajole carefully furiously regula 983|Supplier#000000983|XOYb xohl2j0U7wTTUaT4F6DShKfH4Hv3p,hnP |0|10-384-209-1825|2576.28|onic requests. slyly unusual ideas wake carefully final depo 984|Supplier#000000984|6H6qqye iYbYzCmwWhj|21|31-519-879-5266|1444.79|iously except the blithely unusual packages. c 985|Supplier#000000985|kzI8mk3jN9F67EStJ 8dlpx 6GwZYwzXPFOKJ5R|1|11-131-656-2612|3524.10|ut the furiously final deposits integrate according to th 986|Supplier#000000986|tKoJtnykz0R39BWTgglt0rZxT|22|32-342-471-2481|3516.69|jole enticingly: regular foxes among the regular deposits shou 987|Supplier#000000987|DAWJ1lDhybbSO3mngqD28aX|18|28-375-179-1732|-40.30|uriously unusual courts. slyly unus 988|Supplier#000000988|dFt73JWMYsSxR3 UQN K3FAz|0|10-630-928-4130|2536.81| according to the ironic packages. 989|Supplier#000000989|e5uX8AJF,,zRIIDgJc YMB59ITz4v|1|11-895-219-9405|9098.64|t the silent, final dependencies use busily above the reg 990|Supplier#000000990|DeOjGX,4Ns1|2|12-647-684-5389|7985.78|y ironic packages sleep fluffily despite the grouches-- bold, special accounts nag along the carefu 991|Supplier#000000991|Bh4Danx VvUpMce x42|16|26-793-462-2874|4026.14|foxes are slyly above the furiously express t 992|Supplier#000000992|iZPAlGecV0uUsxMikQG7s|2|12-663-356-1288|4379.45|silent packages. quickly regular requests against the carefully unusual theodolites affix fu 993|Supplier#000000993|z2NwUJ TPfd9MP8K3Blp1prYQ116 |2|12-316-384-2073|2336.52| asymptotes haggle slowly above the 994|Supplier#000000994|0qF9I2cfv48Cu|4|14-183-331-6019|8855.24|sits boost blithely final instructions. ironic m 995|Supplier#000000995|CgVUX8DtNbtug2M,N|18|28-180-818-2912|9025.90|s nag. furiously even theodolites cajole. 996|Supplier#000000996|Wx4dQwOAwWjfSCGupfrM|7|17-447-811-3282|6329.90| ironic forges cajole blithely agai 997|Supplier#000000997|7eUWMrOCKCp2JYas6P4mL93eaWIOtKKWtTX|3|13-221-322-7971|3659.56|y regular excuses boost slyly furiously final deposits. evenly fi 998|Supplier#000000998|lgaoC,43IUbHf3Ar5odS8wQKp|15|25-430-605-1180|3282.62|hs against the unusual accounts haggle r 999|Supplier#000000999|XIA9uPu,fDZTOC,ItOGKYNXnoTvCuULtzmnSk|2|12-991-892-1050|3898.69| ironic requests snooze? unusual depths alongside of the furiously 1000|Supplier#000001000|sep4GQHrXe|17|27-971-649-2792|7307.62|press deposits boost thinly quickly unusual instructions. unusual forges haggle ruthlessly. packa citus-7.0.3/src/test/regress/data/users_table.data000066400000000000000000014756301317107136600221710ustar00rootroot0000000000000020,2014-01-12 21:54:06.407068,694,209,469, 60,2014-01-11 22:26:06.279143,87,624,82, 8,2014-01-18 04:13:25.327728,680,744,389, 8,2014-01-19 00:13:13.75574,941,143,540, 20,2014-01-18 14:25:31.817903,11,382,36, 20,2014-01-17 12:29:23.92051,775,707,573, 60,2014-01-12 05:08:11.923195,404,258,743, 8,2014-01-19 21:32:04.142373,525,417,264, 8,2014-01-20 14:06:39.70089,76,910,863, 8,2014-01-15 10:44:00.073976,511,455,264, 8,2014-01-11 02:42:09.964504,431,536,491, 8,2014-01-12 22:21:12.195373,668,461,88, 60,2014-01-13 03:08:23.408488,586,484,29, 20,2014-01-12 23:30:58.098957,676,236,455, 20,2014-01-12 09:53:03.733107,515,588,45, 60,2014-01-13 20:18:35.657453,995,393,171, 20,2014-01-16 02:24:40.32348,705,122,660, 8,2014-01-11 08:29:01.795058,114,383,984, 60,2014-01-19 15:10:28.893394,344,893,866, 8,2014-01-12 08:44:52.480069,303,175,536, 60,2014-01-20 11:57:36.824796,607,312,906, 8,2014-01-11 15:30:29.850663,383,857,175, 60,2014-01-15 21:34:51.127834,437,459,958, 20,2014-01-13 15:52:00.487557,994,268,462, 60,2014-01-15 04:34:37.693767,26,119,136, 60,2014-01-12 18:23:34.627112,291,193,712, 60,2014-01-17 19:44:49.149707,547,875,491, 8,2014-01-18 22:57:47.284532,901,224,103, 20,2014-01-18 04:04:11.076476,6,691,656, 60,2014-01-19 23:36:51.343179,925,630,955, 60,2014-01-18 23:11:11.479107,482,942,224, 20,2014-01-20 05:58:17.483964,296,621,564, 20,2014-01-21 02:02:57.622741,966,676,665, 8,2014-01-15 21:24:36.807255,229,491,144, 20,2014-01-18 00:11:31.239703,485,468,314, 8,2014-01-18 10:28:29.440644,788,792,908, 8,2014-01-12 03:54:00.727766,302,128,180, 20,2014-01-19 09:19:43.163317,740,966,197, 20,2014-01-17 02:00:33.583017,362,297,96, 8,2014-01-11 12:00:40.428655,289,460,848, 20,2014-01-13 14:03:43.237293,80,498,505, 20,2014-01-17 08:42:43.547522,845,310,74, 60,2014-01-13 14:21:52.624027,660,404,790, 60,2014-01-15 21:12:06.64578,8,100,946, 20,2014-01-19 12:13:41.646479,59,877,160, 20,2014-01-15 04:14:56.357553,348,383,957, 60,2014-01-18 21:30:42.303234,943,992,17, 8,2014-01-14 08:38:21.970378,926,641,755, 60,2014-01-15 16:43:58.152612,976,328,962, 60,2014-01-17 06:41:11.196628,115,342,958, 60,2014-01-15 21:23:14.450446,640,448,47, 20,2014-01-14 22:41:34.977827,76,921,547, 60,2014-01-18 02:11:41.047709,237,603,288, 20,2014-01-20 22:58:05.578281,71,395,820, 60,2014-01-17 18:33:35.465384,402,38,535, 60,2014-01-12 00:46:18.741476,540,746,741, 60,2014-01-12 11:21:40.205393,214,131,73, 60,2014-01-14 08:18:24.615509,714,864,474, 8,2014-01-20 03:44:06.026008,54,811,819, 20,2014-01-19 19:25:51.282289,533,92,511, 8,2014-01-13 01:55:15.958689,438,861,888, 60,2014-01-17 20:55:17.505117,932,598,349, 8,2014-01-18 19:24:08.766252,820,637,236, 8,2014-01-12 21:58:13.581429,380,121,539, 8,2014-01-12 12:19:54.312371,719,851,875, 60,2014-01-15 13:35:40.005954,607,862,734, 60,2014-01-20 12:26:43.022074,570,396,526, 60,2014-01-13 20:13:55.040555,305,596,390, 8,2014-01-13 16:55:23.169271,491,118,836, 60,2014-01-16 08:27:16.605091,408,804,545, 60,2014-01-14 12:14:35.46921,842,536,244, 8,2014-01-16 10:59:06.406564,26,763,724, 20,2014-01-12 11:10:00.152612,975,184,211, 60,2014-01-17 06:36:28.093657,708,873,993, 8,2014-01-11 02:11:13.052764,167,137,598, 60,2014-01-21 03:23:41.79951,700,503,950, 8,2014-01-11 04:51:24.45121,351,62,449, 8,2014-01-19 03:41:55.355997,734,796,778, 20,2014-01-14 06:02:03.769888,267,69,790, 8,2014-01-16 01:35:22.603823,927,663,363, 60,2014-01-15 04:23:06.553044,698,314,415, 60,2014-01-19 07:25:18.220334,765,504,378, 8,2014-01-20 04:16:57.58165,238,550,198, 20,2014-01-12 00:34:47.600753,851,404,641, 20,2014-01-19 00:23:23.798616,783,133,739, 20,2014-01-16 16:50:33.047452,625,848,401, 60,2014-01-13 05:21:06.342229,70,374,19, 8,2014-01-20 15:45:04.004008,916,995,897, 20,2014-01-20 05:08:57.662962,624,504,179, 8,2014-01-12 03:05:12.368238,109,847,544, 20,2014-01-19 05:10:55.286297,532,542,241, 8,2014-01-12 01:04:13.621651,324,840,760, 20,2014-01-19 04:00:29.873354,960,233,133, 20,2014-01-16 18:35:04.052549,575,13,107, 60,2014-01-14 03:02:27.2035,763,624,244, 8,2014-01-20 20:20:24.186145,676,212,605, 60,2014-01-11 02:10:44.058922,766,276,633, 60,2014-01-13 09:29:10.225573,683,602,661, 60,2014-01-13 10:34:19.2267,32,462,274, 20,2014-01-13 23:06:07.228193,271,822,325, 60,2014-01-18 21:56:26.831083,160,49,798, 8,2014-01-17 02:48:54.69591,273,262,262, 8,2014-01-19 14:05:13.635176,266,618,287, 8,2014-01-20 13:06:26.983695,369,268,60, 60,2014-01-13 03:25:22.789568,228,784,224, 60,2014-01-19 20:16:26.68794,923,444,728, 8,2014-01-20 10:30:08.783205,616,174,984, 60,2014-01-13 12:16:47.240778,877,955,539, 20,2014-01-17 17:58:22.043936,999,141,755, 20,2014-01-13 10:32:12.553093,740,555,554, 20,2014-01-18 17:52:09.84502,113,466,532, 20,2014-01-11 16:21:28.59698,253,909,50, 60,2014-01-11 11:57:30.773427,757,526,821, 20,2014-01-17 16:09:07.42667,998,883,143, 60,2014-01-12 20:56:16.197733,934,834,372, 60,2014-01-19 16:20:54.572043,819,877,211, 8,2014-01-13 02:59:40.474122,520,393,210, 8,2014-01-15 06:17:22.539964,256,657,683, 20,2014-01-19 02:05:58.57647,878,217,765, 8,2014-01-12 02:08:38.137084,453,532,956, 60,2014-01-16 13:22:34.908201,803,865,572, 60,2014-01-17 01:16:53.862767,481,89,294, 8,2014-01-13 07:12:51.759154,659,979,376, 20,2014-01-14 11:23:04.781975,203,173,336, 60,2014-01-12 13:51:57.915734,671,76,830, 8,2014-01-16 14:15:18.962654,813,943,880, 60,2014-01-14 01:43:28.96812,152,445,209, 20,2014-01-12 20:02:41.974656,316,437,897, 20,2014-01-19 03:44:29.188228,584,39,290, 60,2014-01-16 16:17:48.19482,398,914,559, 8,2014-01-15 23:08:49.202849,241,896,252, 20,2014-01-16 19:40:56.019311,981,157,633, 60,2014-01-12 13:06:42.89073,417,499,746, 8,2014-01-14 07:14:02.838025,560,957,669, 20,2014-01-16 02:47:23.003425,991,760,486, 60,2014-01-14 20:32:05.680717,972,455,794, 20,2014-01-12 21:30:29.525965,128,854,953, 8,2014-01-15 07:17:31.78663,221,25,585, 20,2014-01-17 12:48:52.921495,358,447,978, 60,2014-01-19 19:28:51.57032,789,801,568, 60,2014-01-17 21:49:44.340143,773,683,323, 20,2014-01-15 00:41:02.766515,732,47,13, 20,2014-01-20 15:50:20.1673,211,547,812, 60,2014-01-18 13:47:15.11357,864,220,797, 8,2014-01-11 10:50:10.193603,767,609,17, 8,2014-01-12 06:46:36.365453,859,950,49, 60,2014-01-17 00:08:09.686031,279,106,150, 20,2014-01-13 17:49:50.667725,595,675,428, 20,2014-01-16 17:03:58.905417,376,614,729, 8,2014-01-14 20:14:08.262501,79,438,430, 8,2014-01-14 23:58:28.805229,416,234,764, 8,2014-01-12 00:26:33.814036,473,724,976, 20,2014-01-21 01:31:02.125268,942,861,526, 20,2014-01-17 11:11:20.564383,916,130,516, 60,2014-01-15 15:49:38.596011,360,521,834, 20,2014-01-12 09:23:00.041002,737,558,402, 60,2014-01-12 19:26:39.527038,693,356,419, 20,2014-01-18 21:33:07.56413,471,629,560, 8,2014-01-14 09:25:42.015658,712,716,903, 8,2014-01-21 03:11:08.715266,304,42,950, 8,2014-01-14 07:50:55.75895,339,860,658, 8,2014-01-19 12:34:31.218927,679,996,104, 60,2014-01-16 16:52:04.734996,281,514,189, 60,2014-01-16 00:57:38.64968,476,825,447, 20,2014-01-12 13:48:34.056953,247,875,750, 60,2014-01-11 13:39:27.738421,608,17,137, 20,2014-01-20 01:29:44.330398,440,59,449, 60,2014-01-14 15:19:03.583336,926,71,329, 60,2014-01-16 00:56:59.525052,26,656,813, 8,2014-01-16 08:18:37.252311,155,545,662, 8,2014-01-13 04:47:55.153656,530,134,805, 60,2014-01-12 16:46:43.865195,944,93,835, 60,2014-01-20 12:59:40.018827,912,98,446, 60,2014-01-12 14:38:15.320957,546,223,215, 8,2014-01-20 10:33:58.979184,720,645,682, 20,2014-01-21 03:49:50.21243,669,676,532, 60,2014-01-14 01:24:51.68641,708,75,63, 60,2014-01-16 04:42:08.665216,439,271,339, 20,2014-01-13 15:39:40.880575,6,929,606, 20,2014-01-19 22:28:50.592246,463,707,538, 8,2014-01-20 04:56:16.927717,134,24,801, 20,2014-01-17 19:38:09.685804,600,534,79, 8,2014-01-21 02:55:24.406282,799,848,824, 20,2014-01-20 00:27:19.052985,120,996,531, 8,2014-01-14 00:49:30.250187,987,146,853, 8,2014-01-15 12:45:03.002294,958,384,796, 20,2014-01-11 03:50:19.094407,662,673,821, 20,2014-01-16 00:16:09.777225,32,733,698, 20,2014-01-13 04:18:10.566424,871,476,379, 60,2014-01-14 17:16:01.110066,328,834,317, 60,2014-01-15 21:27:18.492491,74,287,293, 60,2014-01-16 16:09:06.325374,915,303,598, 60,2014-01-12 23:50:32.328993,241,144,914, 20,2014-01-11 08:19:23.227486,931,91,552, 60,2014-01-11 11:06:44.975055,357,269,625, 8,2014-01-14 17:39:06.386365,862,903,202, 20,2014-01-12 01:58:50.965908,237,806,22, 60,2014-01-20 16:36:29.305871,932,108,94, 8,2014-01-18 12:58:09.969701,94,38,298, 8,2014-01-17 06:55:50.491379,810,206,907, 60,2014-01-15 18:55:06.558183,532,771,510, 60,2014-01-20 21:46:05.123357,847,232,686, 60,2014-01-19 03:42:34.356574,485,123,113, 8,2014-01-15 01:54:46.57701,232,390,686, 20,2014-01-12 06:24:20.444315,173,745,978, 20,2014-01-18 08:16:33.335757,649,500,917, 60,2014-01-14 23:44:36.789859,57,308,342, 8,2014-01-15 11:49:12.131145,728,891,644, 20,2014-01-13 06:58:42.000973,833,299,981, 8,2014-01-17 19:24:17.670434,77,955,990, 20,2014-01-14 04:18:02.723391,63,101,579, 60,2014-01-12 05:54:58.92869,807,642,630, 63,2014-01-14 09:02:27.356239,820,562,544, 82,2014-01-14 01:13:27.129673,217,154,503, 63,2014-01-11 00:22:17.982095,328,66,252, 25,2014-01-17 13:51:57.606425,919,406,392, 82,2014-01-18 17:58:30.131967,272,642,874, 82,2014-01-11 08:12:37.076501,589,727,290, 1,2014-01-12 08:08:07.38365,42,776,725, 15,2014-01-21 02:16:40.698391,417,425,82, 1,2014-01-15 05:28:38.186567,269,808,235, 15,2014-01-17 09:35:25.876141,331,347,888, 15,2014-01-16 12:25:47.023766,532,446,583, 25,2014-01-17 09:19:10.515978,305,197,283, 15,2014-01-17 21:54:49.104047,78,919,641, 15,2014-01-17 03:32:31.99882,440,249,533, 15,2014-01-10 20:58:16.902343,976,818,732, 82,2014-01-19 03:53:40.069954,212,444,615, 25,2014-01-16 14:09:01.304692,951,956,928, 1,2014-01-18 13:56:26.872044,471,866,680, 1,2014-01-15 04:49:30.561333,473,83,919, 63,2014-01-11 03:04:07.862875,526,297,219, 63,2014-01-18 05:42:31.995401,789,364,132, 63,2014-01-13 02:32:04.918326,523,608,422, 63,2014-01-15 08:58:54.440304,840,331,397, 82,2014-01-19 16:06:52.439717,94,775,488, 63,2014-01-20 14:48:38.254083,890,64,578, 82,2014-01-19 12:43:31.230164,424,45,612, 82,2014-01-13 21:56:04.57086,710,215,97, 15,2014-01-12 15:47:20.255475,755,405,955, 63,2014-01-16 02:07:48.900598,307,723,822, 82,2014-01-17 06:14:07.294251,681,814,922, 82,2014-01-14 01:42:19.184166,807,876,13, 25,2014-01-19 15:10:16.256836,112,144,199, 63,2014-01-20 11:27:34.423924,188,647,673, 63,2014-01-14 06:04:37.166259,491,632,269, 15,2014-01-15 23:02:13.863261,438,207,342, 15,2014-01-17 23:26:04.555891,230,430,25, 1,2014-01-14 18:17:14.24276,472,269,323, 63,2014-01-17 11:10:21.246911,486,826,837, 15,2014-01-17 19:42:45.254281,71,945,238, 82,2014-01-19 03:45:52.429747,642,392,624, 82,2014-01-13 14:45:47.123053,100,807,928, 15,2014-01-13 02:08:32.278047,529,925,29, 82,2014-01-15 07:05:02.945725,15,297,188, 15,2014-01-20 16:40:36.2271,383,42,750, 82,2014-01-19 09:41:04.277286,907,334,773, 82,2014-01-15 08:03:19.848068,684,904,692, 15,2014-01-18 14:34:16.297473,740,851,130, 82,2014-01-14 17:50:05.581977,27,10,353, 63,2014-01-12 15:59:46.720111,168,296,713, 63,2014-01-12 13:23:46.858806,588,222,463, 1,2014-01-15 00:54:13.445272,880,0,215, 82,2014-01-20 01:42:18.715514,784,507,32, 15,2014-01-14 19:55:51.777132,425,76,524, 82,2014-01-19 13:53:07.885575,151,302,304, 63,2014-01-18 11:49:11.155229,394,821,202, 1,2014-01-14 04:44:30.031635,184,546,945, 82,2014-01-17 20:36:39.115739,761,557,60, 1,2014-01-11 03:45:15.726509,252,66,830, 1,2014-01-16 00:31:50.28711,622,891,39, 1,2014-01-12 16:44:28.016337,244,914,355, 25,2014-01-17 13:59:23.020759,672,713,509, 15,2014-01-19 06:14:09.471276,802,619,684, 15,2014-01-11 01:54:44.273174,212,960,693, 82,2014-01-16 19:26:57.445103,625,914,456, 1,2014-01-12 06:18:46.637954,800,775,548, 63,2014-01-16 04:56:58.136435,591,287,735, 82,2014-01-13 12:53:02.000993,467,252,240, 82,2014-01-16 04:36:00.880716,638,746,419, 82,2014-01-12 10:07:19.383347,863,114,501, 63,2014-01-20 12:35:47.255694,420,808,656, 1,2014-01-14 02:21:53.310461,16,838,872, 1,2014-01-15 04:53:06.5064,821,251,588, 25,2014-01-12 08:44:19.533741,362,245,841, 1,2014-01-18 13:26:56.256186,762,842,74, 82,2014-01-14 15:33:42.733919,969,780,950, 15,2014-01-20 22:25:23.811027,746,893,220, 63,2014-01-12 15:30:16.104254,622,937,65, 63,2014-01-12 00:07:59.031392,901,113,540, 25,2014-01-14 10:15:29.393423,293,528,6, 82,2014-01-14 11:30:02.824366,863,862,501, 1,2014-01-13 17:31:45.890199,599,563,66, 1,2014-01-18 15:09:42.838695,774,923,271, 82,2014-01-13 07:12:21.539879,648,649,650, 25,2014-01-17 17:27:37.66775,113,389,521, 1,2014-01-16 23:02:50.72427,701,555,218, 1,2014-01-20 23:01:32.695527,800,664,59, 82,2014-01-21 02:12:07.699385,600,972,176, 25,2014-01-13 13:39:29.840009,362,497,536, 1,2014-01-10 20:46:48.422036,946,517,463, 15,2014-01-15 20:43:57.986496,255,435,286, 25,2014-01-15 10:23:57.856347,443,483,187, 25,2014-01-17 14:46:11.442796,942,924,685, 63,2014-01-13 20:58:07.457772,232,363,888, 82,2014-01-15 16:18:42.12952,870,617,545, 25,2014-01-13 04:13:08.887899,512,976,985, 1,2014-01-15 07:16:54.095727,577,41,389, 82,2014-01-21 01:15:40.265955,443,130,979, 15,2014-01-15 21:06:10.889311,288,744,797, 63,2014-01-20 15:52:54.976861,369,8,527, 82,2014-01-12 05:22:59.649303,943,41,855, 1,2014-01-15 03:41:58.145005,667,790,153, 25,2014-01-13 12:14:48.287323,417,383,585, 82,2014-01-16 14:16:06.155703,679,41,274, 1,2014-01-16 16:26:17.678746,541,710,881, 63,2014-01-21 05:41:44.543929,214,434,526, 15,2014-01-20 09:49:48.889622,439,654,43, 82,2014-01-16 08:51:41.490192,279,252,888, 15,2014-01-12 15:12:00.648183,728,811,260, 15,2014-01-11 03:57:47.921015,561,124,116, 82,2014-01-19 23:07:10.883615,754,815,426, 25,2014-01-16 06:42:03.472969,18,670,236, 63,2014-01-14 01:29:33.811632,503,820,439, 25,2014-01-17 08:16:53.722309,343,535,968, 25,2014-01-18 17:54:25.012848,486,234,827, 1,2014-01-20 22:57:11.479382,401,450,462, 25,2014-01-13 01:19:44.446579,331,75,256, 25,2014-01-18 10:55:57.708375,438,786,567, 1,2014-01-20 19:09:19.178768,541,854,321, 82,2014-01-15 18:59:14.286589,111,686,144, 25,2014-01-18 11:42:46.130411,60,350,243, 15,2014-01-15 09:53:17.165265,926,925,344, 25,2014-01-20 09:23:12.142935,394,33,156, 15,2014-01-14 20:28:57.573627,629,905,78, 82,2014-01-18 10:51:24.623036,205,926,843, 25,2014-01-14 19:41:54.272455,320,54,323, 1,2014-01-17 04:42:06.461526,6,228,730, 1,2014-01-12 12:08:18.719182,551,600,586, 15,2014-01-14 14:57:34.53841,333,577,796, 82,2014-01-11 19:48:17.350837,431,656,527, 1,2014-01-11 22:01:13.696043,507,263,548, 1,2014-01-16 00:20:34.187714,837,439,609, 25,2014-01-16 03:30:15.495843,94,878,677, 63,2014-01-14 14:16:01.983366,488,276,391, 15,2014-01-11 08:36:40.343417,674,401,928, 25,2014-01-11 13:56:33.175007,725,247,608, 82,2014-01-14 13:57:46.527295,425,318,351, 15,2014-01-20 22:26:29.233457,988,66,331, 82,2014-01-17 02:48:14.665198,888,363,700, 1,2014-01-16 09:09:47.175897,282,881,198, 15,2014-01-10 20:24:17.154472,246,632,604, 15,2014-01-15 19:55:25.548814,926,816,241, 1,2014-01-11 09:51:50.648866,541,905,487, 15,2014-01-14 01:53:50.966103,384,705,892, 25,2014-01-11 22:12:19.271123,703,205,624, 15,2014-01-19 07:46:15.661714,627,819,45, 15,2014-01-13 18:51:02.445486,613,990,595, 82,2014-01-14 03:32:03.717702,44,848,544, 1,2014-01-16 12:42:13.370507,387,737,178, 25,2014-01-13 08:00:21.624254,914,833,481, 63,2014-01-19 02:31:18.004291,839,359,606, 15,2014-01-13 18:24:59.500919,459,921,379, 25,2014-01-17 21:53:38.789519,505,498,698, 15,2014-01-18 05:54:30.147226,239,158,115, 25,2014-01-17 18:53:57.074546,731,969,798, 63,2014-01-15 02:45:03.412974,681,290,129, 25,2014-01-11 19:36:24.419681,618,504,535, 82,2014-01-13 17:36:03.536492,34,518,137, 1,2014-01-16 18:53:22.132155,868,18,599, 63,2014-01-15 14:33:58.958092,652,885,101, 15,2014-01-14 17:24:20.887328,941,419,575, 63,2014-01-17 20:54:35.828198,840,138,871, 63,2014-01-20 18:54:33.145805,997,241,54, 1,2014-01-20 00:54:36.38359,509,971,574, 1,2014-01-11 05:10:37.811565,68,7,126, 1,2014-01-10 21:31:13.489221,27,640,285, 25,2014-01-20 18:51:09.558597,605,433,271, 25,2014-01-14 23:08:24.339278,812,321,372, 15,2014-01-20 23:57:42.722678,414,264,213, 25,2014-01-16 15:39:24.223796,779,494,81, 82,2014-01-20 12:18:11.515175,380,734,136, 63,2014-01-21 00:21:59.877569,865,504,433, 82,2014-01-11 05:34:49.772608,712,39,358, 15,2014-01-21 02:10:02.164041,911,133,606, 82,2014-01-13 20:15:50.843673,107,691,374, 63,2014-01-12 07:47:09.043731,138,509,951, 15,2014-01-19 03:56:17.825754,179,50,515, 1,2014-01-16 19:06:53.289577,756,482,461, 63,2014-01-15 15:19:12.761433,113,553,827, 63,2014-01-14 10:38:31.196261,317,801,255, 25,2014-01-19 07:07:14.913831,755,282,679, 82,2014-01-13 11:50:30.765724,545,769,351, 1,2014-01-17 09:03:30.6976,62,921,863, 15,2014-01-15 23:00:53.703769,292,964,636, 1,2014-01-20 21:45:00.91295,79,154,218, 82,2014-01-13 21:57:27.772146,560,691,442, 63,2014-01-20 05:45:57.116743,470,62,605, 25,2014-01-11 11:21:25.332631,427,671,224, 63,2014-01-16 19:33:31.308637,513,105,673, 1,2014-01-15 18:39:19.248898,136,528,604, 63,2014-01-16 05:55:24.290723,974,14,929, 82,2014-01-20 16:57:52.196384,67,839,127, 25,2014-01-12 09:33:55.077096,414,154,134, 63,2014-01-15 18:49:57.436528,455,932,923, 1,2014-01-19 11:52:28.579975,416,986,865, 25,2014-01-12 18:44:32.88866,658,27,38, 1,2014-01-15 20:21:10.925749,458,630,723, 63,2014-01-19 00:43:38.138572,372,630,755, 82,2014-01-16 21:52:57.227939,408,843,748, 25,2014-01-15 14:18:53.648847,795,163,731, 82,2014-01-14 10:23:02.362368,911,312,629, 82,2014-01-16 04:11:08.743114,454,688,429, 1,2014-01-15 08:40:53.526416,488,764,344, 63,2014-01-14 19:57:52.134976,189,406,279, 63,2014-01-16 00:21:10.907154,612,452,757, 1,2014-01-18 08:56:44.370089,438,852,281, 1,2014-01-16 07:45:01.178707,683,395,225, 82,2014-01-13 22:17:28.733328,597,430,695, 63,2014-01-13 22:03:37.659666,672,702,858, 63,2014-01-21 03:04:13.94014,295,997,748, 1,2014-01-17 12:55:59.92959,857,674,729, 1,2014-01-11 23:10:52.573917,29,795,351, 15,2014-01-13 08:54:44.705865,29,866,98, 15,2014-01-13 15:59:30.62719,594,931,711, 15,2014-01-17 02:11:46.277685,80,432,65, 63,2014-01-13 00:39:45.618815,74,999,38, 82,2014-01-16 17:56:58.399754,543,762,342, 15,2014-01-16 01:57:43.394428,745,652,232, 25,2014-01-13 16:01:10.951446,864,871,84, 25,2014-01-12 07:30:29.708392,841,141,417, 15,2014-01-21 00:37:02.643325,490,911,862, 25,2014-01-19 01:56:35.242169,251,21,535, 82,2014-01-11 18:28:21.904777,517,184,337, 1,2014-01-12 04:10:57.720422,626,933,851, 1,2014-01-13 14:46:32.678697,988,146,62, 1,2014-01-20 10:20:50.484751,484,107,187, 63,2014-01-14 02:55:30.609082,495,213,854, 1,2014-01-18 15:07:43.604446,102,99,663, 63,2014-01-18 05:04:28.623323,365,40,493, 25,2014-01-20 04:48:27.83744,697,969,272, 15,2014-01-12 23:26:37.253293,501,528,684, 1,2014-01-11 09:27:30.985691,918,422,892, 82,2014-01-15 02:59:36.580553,533,152,978, 63,2014-01-17 12:07:30.779708,112,297,29, 82,2014-01-15 09:25:23.120667,753,948,310, 1,2014-01-20 07:20:47.488127,502,824,656, 25,2014-01-14 15:04:15.150216,705,398,923, 25,2014-01-20 21:10:24.299373,4,540,452, 15,2014-01-12 23:38:16.221455,798,21,697, 25,2014-01-17 17:07:52.809881,342,90,878, 15,2014-01-20 18:14:38.239514,570,633,284, 25,2014-01-19 16:34:16.151045,966,920,179, 82,2014-01-18 20:18:45.383797,324,62,600, 25,2014-01-12 21:09:22.945377,438,754,578, 82,2014-01-12 02:33:46.778653,85,724,692, 63,2014-01-14 16:30:31.661482,838,9,84, 15,2014-01-15 01:49:08.564192,861,771,704, 15,2014-01-18 00:30:45.178407,426,146,786, 82,2014-01-19 22:28:15.05591,250,625,601, 1,2014-01-17 21:50:19.515638,823,77,197, 1,2014-01-19 12:01:14.8868,922,842,282, 63,2014-01-19 17:05:17.699235,702,424,633, 82,2014-01-15 17:46:54.757806,344,448,544, 82,2014-01-20 10:29:36.791996,785,967,951, 15,2014-01-21 01:16:15.419657,208,516,567, 25,2014-01-18 12:33:27.436503,812,36,486, 82,2014-01-19 14:50:27.276747,532,468,941, 25,2014-01-13 22:11:46.029158,766,301,343, 25,2014-01-15 21:41:11.041368,392,117,502, 1,2014-01-16 13:54:55.90049,890,200,944, 82,2014-01-12 21:00:13.866598,408,715,352, 15,2014-01-18 01:07:48.29466,852,329,41, 15,2014-01-17 03:22:26.88618,183,686,307, 25,2014-01-17 03:59:50.447152,331,857,635, 82,2014-01-14 07:15:19.074788,62,223,534, 1,2014-01-11 06:47:50.007266,187,188,905, 82,2014-01-16 05:20:37.935279,324,327,707, 25,2014-01-18 02:19:34.225003,313,916,876, 15,2014-01-10 21:58:14.306639,332,802,796, 63,2014-01-18 08:58:54.156734,159,613,188, 15,2014-01-14 13:27:27.034884,689,180,509, 15,2014-01-20 20:12:52.546571,327,753,713, 15,2014-01-16 19:33:10.308198,260,77,818, 25,2014-01-12 03:46:12.4191,993,999,804, 25,2014-01-12 11:22:15.491948,811,295,965, 25,2014-01-18 02:06:57.086851,31,949,431, 1,2014-01-16 00:16:44.080582,770,740,54, 25,2014-01-16 17:11:24.056559,435,316,758, 25,2014-01-14 20:37:42.265259,227,316,360, 63,2014-01-14 16:44:59.136912,330,259,720, 15,2014-01-13 09:01:43.572197,826,270,388, 63,2014-01-13 02:38:57.152058,213,438,975, 63,2014-01-13 03:50:16.836148,413,228,814, 25,2014-01-18 06:48:38.330423,781,664,763, 25,2014-01-12 07:08:33.944054,996,340,863, 1,2014-01-12 23:06:32.256224,789,912,404, 1,2014-01-15 13:22:05.766925,835,858,495, 1,2014-01-21 01:59:01.22122,750,348,393, 25,2014-01-16 01:18:18.285382,969,595,237, 82,2014-01-20 15:03:16.808294,560,697,209, 1,2014-01-16 09:53:57.121709,791,92,590, 25,2014-01-18 02:18:32.151981,392,797,460, 1,2014-01-17 10:11:05.103373,674,347,948, 25,2014-01-12 07:16:24.00789,264,18,867, 82,2014-01-14 00:18:22.599132,707,779,441, 1,2014-01-20 21:26:24.17816,723,344,386, 63,2014-01-12 18:04:14.015155,19,977,759, 82,2014-01-19 09:39:00.53483,574,912,794, 25,2014-01-17 17:45:58.403164,139,580,770, 1,2014-01-12 20:02:28.322212,735,283,981, 15,2014-01-16 12:37:54.691565,992,43,344, 63,2014-01-11 01:13:25.438047,902,83,120, 82,2014-01-12 10:15:20.868783,950,80,438, 25,2014-01-12 02:11:04.999763,906,633,783, 25,2014-01-12 08:59:37.857148,150,354,29, 63,2014-01-14 01:37:36.360731,608,802,350, 25,2014-01-19 08:18:02.086614,489,933,193, 15,2014-01-17 13:16:21.938149,828,627,574, 1,2014-01-19 22:49:00.417291,287,873,683, 63,2014-01-12 22:55:44.351873,613,779,265, 15,2014-01-11 00:01:21.075061,567,695,81, 25,2014-01-12 01:50:43.989906,711,941,536, 1,2014-01-15 05:34:41.504351,950,598,696, 25,2014-01-13 07:51:37.911209,538,797,209, 15,2014-01-19 12:39:22.320329,503,97,406, 15,2014-01-16 16:43:15.448404,872,90,285, 1,2014-01-15 10:58:10.167433,171,680,95, 1,2014-01-13 20:01:28.087254,620,303,106, 25,2014-01-16 12:42:16.669625,395,520,940, 63,2014-01-20 16:16:28.452816,586,697,380, 63,2014-01-13 05:04:44.895967,157,786,823, 63,2014-01-11 16:36:13.791334,894,669,248, 1,2014-01-17 12:35:00.604797,50,333,855, 63,2014-01-19 19:15:49.99934,687,30,205, 15,2014-01-13 03:52:37.799643,415,787,988, 1,2014-01-20 16:53:23.204348,531,886,630, 25,2014-01-19 10:42:14.1775,372,93,543, 15,2014-01-15 01:56:51.814797,206,212,437, 63,2014-01-18 20:32:23.739179,164,737,650, 25,2014-01-15 22:28:12.580664,846,84,789, 82,2014-01-17 01:59:20.137009,634,473,316, 82,2014-01-14 03:10:18.430744,330,632,417, 1,2014-01-16 03:41:38.018711,257,133,836, 25,2014-01-18 16:14:41.005793,210,615,621, 25,2014-01-15 09:21:23.430925,573,706,218, 82,2014-01-17 16:41:15.876278,670,253,509, 1,2014-01-11 11:52:17.366943,620,125,225, 63,2014-01-13 11:39:25.517539,711,679,102, 1,2014-01-13 23:57:37.814427,867,465,749, 82,2014-01-20 14:41:17.784233,599,640,829, 82,2014-01-15 14:35:09.86983,977,854,492, 15,2014-01-14 03:58:58.889489,68,813,407, 1,2014-01-11 10:32:01.77414,961,673,399, 25,2014-01-20 00:09:51.374181,468,893,837, 63,2014-01-16 15:50:36.800698,873,709,550, 63,2014-01-20 03:11:24.094469,377,973,949, 63,2014-01-15 10:53:06.822585,854,320,772, 82,2014-01-10 20:48:46.96855,778,722,99, 1,2014-01-12 17:12:52.182143,321,124,239, 1,2014-01-21 03:35:23.492629,828,241,377, 82,2014-01-20 17:05:15.421367,44,736,501, 15,2014-01-15 02:17:37.078109,490,663,190, 82,2014-01-11 14:11:37.283964,826,119,424, 25,2014-01-16 23:40:16.026582,261,642,129, 1,2014-01-13 15:33:27.077449,914,29,256, 25,2014-01-13 22:04:15.083606,269,134,776, 63,2014-01-16 10:33:39.23093,316,480,570, 63,2014-01-11 20:15:41.25495,427,729,283, 82,2014-01-18 04:01:06.898823,488,455,67, 63,2014-01-14 01:06:02.970109,746,816,74, 63,2014-01-16 22:43:53.835614,358,993,113, 82,2014-01-14 00:00:27.035832,64,660,222, 25,2014-01-17 08:16:21.400853,411,852,784, 25,2014-01-11 20:25:31.854324,530,982,815, 63,2014-01-11 10:15:08.042044,155,674,463, 82,2014-01-11 11:37:44.831778,417,703,78, 63,2014-01-18 17:06:47.730602,695,634,936, 1,2014-01-12 02:07:25.408986,538,409,212, 82,2014-01-14 03:17:10.349735,880,566,732, 63,2014-01-11 11:04:25.545029,115,551,547, 1,2014-01-11 10:48:43.193221,196,997,573, 25,2014-01-18 21:52:20.219567,653,364,576, 15,2014-01-14 19:03:24.434518,529,350,606, 1,2014-01-12 01:20:44.967779,647,836,73, 15,2014-01-17 16:02:11.593748,6,766,57, 25,2014-01-20 14:54:01.235634,756,994,809, 82,2014-01-10 22:32:09.062248,816,379,300, 82,2014-01-11 20:55:18.416333,698,277,666, 63,2014-01-20 15:42:48.204185,422,887,185, 1,2014-01-12 19:45:01.244391,464,602,141, 63,2014-01-11 18:30:41.908963,968,248,792, 82,2014-01-20 02:48:03.625551,166,413,199, 15,2014-01-17 02:02:38.3225,972,612,665, 82,2014-01-12 12:42:19.193345,110,323,812, 82,2014-01-15 20:28:19.652133,40,715,193, 15,2014-01-19 21:36:05.399949,109,718,456, 15,2014-01-15 14:46:34.276951,393,467,637, 1,2014-01-11 01:01:58.883064,101,181,873, 25,2014-01-20 21:51:46.654899,864,162,128, 63,2014-01-12 12:47:41.175774,992,753,955, 25,2014-01-14 06:08:01.853173,742,409,381, 25,2014-01-16 14:35:40.490512,843,105,524, 1,2014-01-15 16:48:08.212025,510,241,49, 82,2014-01-20 18:24:23.254445,840,209,880, 15,2014-01-17 15:01:12.345255,982,216,767, 15,2014-01-16 07:03:16.254069,463,489,265, 63,2014-01-11 00:02:08.086223,912,209,737, 1,2014-01-15 02:08:00.075857,441,836,735, 25,2014-01-17 13:10:41.663055,647,539,862, 63,2014-01-14 07:19:18.435958,922,740,64, 15,2014-01-15 17:12:25.621306,250,164,334, 25,2014-01-18 03:59:24.856276,492,472,208, 25,2014-01-11 23:11:38.655524,246,678,947, 82,2014-01-19 16:15:50.055824,667,303,470, 82,2014-01-19 09:20:09.824055,124,519,168, 1,2014-01-18 19:13:50.249272,314,767,437, 25,2014-01-19 01:09:51.291458,625,4,415, 25,2014-01-19 11:52:18.886723,823,82,136, 63,2014-01-19 20:09:08.666024,17,742,203, 1,2014-01-18 10:52:39.495644,600,655,939, 1,2014-01-11 01:37:20.131114,387,938,555, 1,2014-01-20 18:39:50.574986,539,433,249, 25,2014-01-17 07:40:43.121614,518,774,124, 82,2014-01-17 07:39:58.454033,482,139,622, 25,2014-01-12 01:22:09.768332,608,273,428, 82,2014-01-11 22:09:02.773748,93,960,65, 1,2014-01-15 23:16:03.853982,130,343,234, 25,2014-01-16 20:08:44.045702,72,592,759, 25,2014-01-12 03:11:01.65723,191,647,139, 82,2014-01-15 15:07:50.5093,908,773,812, 25,2014-01-18 12:56:25.221476,880,471,148, 1,2014-01-15 13:19:03.510404,920,434,130, 15,2014-01-10 23:43:30.999812,212,577,20, 25,2014-01-12 23:44:33.433501,812,90,254, 63,2014-01-15 01:43:26.764849,521,341,792, 63,2014-01-17 18:44:43.345068,918,640,754, 15,2014-01-18 10:47:49.68757,251,494,146, 63,2014-01-15 05:45:34.851491,207,382,567, 63,2014-01-11 14:52:43.421345,660,531,67, 82,2014-01-14 17:58:31.350625,255,570,53, 15,2014-01-18 17:04:53.287449,413,68,805, 82,2014-01-16 12:05:09.042651,760,364,627, 1,2014-01-11 15:57:56.20732,584,126,660, 25,2014-01-19 20:16:31.943393,672,925,272, 25,2014-01-14 22:20:59.098475,720,600,899, 1,2014-01-20 05:18:06.031375,486,288,164, 1,2014-01-17 09:30:22.192665,84,853,607, 1,2014-01-12 17:30:50.390352,920,204,916, 15,2014-01-18 11:10:24.918098,966,562,924, 1,2014-01-15 23:39:30.858689,78,72,489, 25,2014-01-20 08:23:29.885996,880,517,39, 82,2014-01-18 16:47:45.049631,694,798,846, 25,2014-01-15 12:19:21.434095,209,69,992, 82,2014-01-16 10:04:13.007609,797,354,89, 25,2014-01-14 18:27:43.503664,314,490,844, 25,2014-01-16 17:41:31.202427,237,397,340, 15,2014-01-17 12:13:15.781776,879,655,407, 82,2014-01-19 21:43:47.357646,76,869,239, 63,2014-01-12 07:50:15.248129,989,772,302, 82,2014-01-18 19:24:17.439007,754,454,630, 82,2014-01-14 06:51:37.866946,920,393,549, 82,2014-01-20 00:46:40.470024,867,994,923, 63,2014-01-13 02:43:20.94941,416,998,140, 25,2014-01-14 10:35:08.867177,328,102,86, 1,2014-01-11 18:31:13.903525,57,495,60, 63,2014-01-17 08:26:47.714679,98,633,153, 15,2014-01-10 23:19:52.212245,417,766,640, 82,2014-01-19 09:19:03.591514,227,227,842, 25,2014-01-11 08:12:22.56617,679,391,533, 82,2014-01-11 18:12:35.63359,956,354,508, 1,2014-01-12 21:17:34.942139,34,191,575, 15,2014-01-19 05:17:15.853619,960,647,452, 25,2014-01-17 10:17:44.676241,191,606,982, 15,2014-01-13 17:15:31.149459,545,283,425, 63,2014-01-17 19:33:47.79701,483,726,791, 82,2014-01-11 02:38:43.775134,426,539,413, 82,2014-01-12 16:33:37.181253,111,787,722, 63,2014-01-13 23:04:09.990095,703,66,455, 82,2014-01-13 00:09:34.165486,995,807,884, 63,2014-01-20 07:44:02.099352,538,724,976, 15,2014-01-19 02:43:40.848783,947,683,602, 25,2014-01-12 02:33:04.051482,107,123,146, 1,2014-01-17 18:31:47.148983,945,62,396, 1,2014-01-13 09:03:02.282878,965,324,781, 25,2014-01-17 16:37:17.059511,341,233,820, 15,2014-01-11 06:59:30.652647,230,308,871, 25,2014-01-19 06:44:33.485724,252,268,241, 25,2014-01-13 22:50:32.841288,121,586,849, 88,2014-01-20 08:43:18.010712,595,274,653, 88,2014-01-20 18:34:48.733853,178,876,877, 88,2014-01-11 12:14:50.280295,623,85,229, 31,2014-01-13 09:34:55.877658,261,334,825, 88,2014-01-19 13:21:29.203877,55,635,851, 88,2014-01-13 18:58:11.229705,37,39,576, 31,2014-01-17 00:10:04.744835,179,481,325, 31,2014-01-20 11:52:43.107821,848,193,282, 88,2014-01-20 07:24:58.944384,740,469,381, 88,2014-01-17 03:29:56.9575,593,506,99, 31,2014-01-18 15:11:46.699335,158,748,735, 88,2014-01-20 19:37:21.510554,600,847,462, 31,2014-01-18 01:42:32.59109,281,934,586, 88,2014-01-20 16:29:21.641474,425,133,557, 31,2014-01-18 18:54:37.364591,241,162,680, 31,2014-01-14 06:00:17.26775,904,722,180, 88,2014-01-13 03:44:52.791352,264,973,24, 88,2014-01-15 08:28:25.161602,504,674,101, 88,2014-01-14 12:39:01.042884,339,399,415, 31,2014-01-15 00:18:29.972605,322,575,825, 31,2014-01-18 11:32:35.151697,80,32,483, 88,2014-01-16 16:48:35.208372,450,382,725, 88,2014-01-14 02:02:32.071957,905,35,989, 31,2014-01-16 08:16:16.000899,314,58,180, 88,2014-01-17 23:21:39.259854,645,434,62, 88,2014-01-21 00:34:19.221359,677,175,655, 31,2014-01-18 21:19:18.283776,303,835,320, 88,2014-01-14 09:58:56.319366,283,695,131, 31,2014-01-11 01:33:49.874006,420,391,290, 31,2014-01-16 22:03:51.7695,561,432,313, 31,2014-01-17 12:49:29.160654,946,496,680, 31,2014-01-20 14:17:07.884719,401,454,449, 88,2014-01-16 10:38:40.503772,678,493,187, 31,2014-01-18 05:04:19.440949,190,84,546, 31,2014-01-12 17:52:03.762796,442,442,517, 88,2014-01-14 18:00:09.707649,183,175,663, 31,2014-01-21 04:02:30.671073,621,48,584, 31,2014-01-18 22:02:08.507631,532,724,118, 31,2014-01-13 23:52:52.81547,229,861,908, 31,2014-01-20 05:27:29.615457,84,660,529, 88,2014-01-14 19:32:05.465131,375,383,475, 31,2014-01-11 09:04:39.514805,64,221,249, 88,2014-01-19 19:04:51.12643,883,939,215, 88,2014-01-11 15:14:38.056641,930,460,610, 31,2014-01-21 05:34:01.156698,968,955,183, 31,2014-01-17 07:59:28.491021,205,933,733, 88,2014-01-15 01:14:55.32439,481,750,158, 88,2014-01-13 03:18:53.94805,534,22,115, 31,2014-01-11 10:27:53.652623,97,513,882, 88,2014-01-18 17:53:56.367274,301,909,745, 31,2014-01-17 07:37:23.920656,559,977,608, 31,2014-01-19 02:00:28.804739,26,42,534, 31,2014-01-14 04:42:31.575646,317,702,473, 31,2014-01-20 13:39:55.992614,573,955,266, 31,2014-01-14 04:16:44.805638,572,53,380, 31,2014-01-10 22:04:10.8355,879,178,377, 88,2014-01-20 08:14:15.213973,373,134,376, 88,2014-01-11 19:36:03.089414,521,238,345, 88,2014-01-14 12:03:07.155284,406,880,492, 31,2014-01-20 13:48:05.088398,383,433,183, 88,2014-01-17 21:39:54.859333,763,75,212, 88,2014-01-21 04:52:36.315939,955,559,736, 31,2014-01-19 22:05:12.973117,237,85,968, 88,2014-01-13 02:18:35.363104,611,882,965, 31,2014-01-18 03:56:55.757306,552,666,182, 31,2014-01-11 09:57:16.735912,955,320,182, 31,2014-01-17 00:18:45.070753,511,980,875, 31,2014-01-18 01:59:26.428379,711,461,914, 88,2014-01-19 11:59:25.243962,941,859,166, 88,2014-01-20 04:11:37.886643,992,417,597, 31,2014-01-17 01:26:56.043836,909,411,239, 31,2014-01-13 01:31:30.709094,809,119,162, 88,2014-01-20 17:16:17.401448,322,812,317, 31,2014-01-15 14:31:47.170267,195,237,733, 31,2014-01-13 20:46:08.765733,654,568,521, 88,2014-01-20 16:50:18.558146,2,97,945, 31,2014-01-11 16:31:15.661287,993,833,628, 31,2014-01-18 02:01:04.090124,245,409,999, 31,2014-01-12 14:09:12.506197,721,686,55, 31,2014-01-12 06:59:09.31433,45,1,639, 88,2014-01-15 13:55:00.457398,192,608,697, 88,2014-01-19 01:46:36.426852,122,610,251, 88,2014-01-20 12:59:38.119069,456,847,136, 31,2014-01-18 22:37:32.033044,601,370,711, 88,2014-01-18 09:26:32.419884,485,103,912, 31,2014-01-13 11:16:22.924706,184,891,357, 88,2014-01-19 00:41:42.868964,362,955,32, 31,2014-01-17 11:40:47.633857,172,973,715, 88,2014-01-14 10:52:26.01412,796,150,27, 31,2014-01-12 06:44:50.024248,40,422,741, 88,2014-01-16 19:28:52.722255,839,567,915, 88,2014-01-11 02:32:20.873453,261,250,172, 31,2014-01-12 05:37:26.340186,591,988,62, 31,2014-01-15 11:34:05.695791,775,518,446, 31,2014-01-13 08:50:56.236557,883,449,735, 88,2014-01-19 13:34:22.097492,793,963,731, 31,2014-01-16 01:31:22.431703,792,45,6, 88,2014-01-19 13:09:41.30773,252,426,206, 31,2014-01-16 09:33:48.525871,815,211,149, 88,2014-01-14 07:30:47.675665,743,187,369, 31,2014-01-18 11:21:19.194373,998,697,283, 31,2014-01-12 05:00:44.570126,738,413,881, 88,2014-01-16 13:02:18.385178,208,320,765, 88,2014-01-17 22:37:36.59582,995,724,165, 88,2014-01-16 23:32:31.740393,255,209,919, 31,2014-01-19 13:48:27.150911,273,761,329, 88,2014-01-17 09:27:55.154385,527,842,841, 31,2014-01-17 20:03:47.402099,516,40,209, 31,2014-01-16 09:49:31.241454,681,727,665, 31,2014-01-19 03:37:07.660582,255,234,297, 31,2014-01-19 07:02:56.716429,26,742,774, 31,2014-01-21 03:44:31.698853,87,858,761, 88,2014-01-16 23:23:44.087853,300,524,862, 31,2014-01-18 14:02:34.835498,314,94,394, 88,2014-01-18 20:22:03.732316,937,493,420, 31,2014-01-14 02:50:16.507739,788,621,880, 31,2014-01-21 05:18:57.760204,71,216,605, 31,2014-01-16 15:03:46.601279,40,643,483, 88,2014-01-20 18:31:04.141596,328,184,140, 88,2014-01-14 10:11:23.774324,376,586,117, 88,2014-01-18 01:48:36.625526,677,244,45, 88,2014-01-16 07:59:56.86427,874,975,850, 31,2014-01-14 16:43:44.648196,398,863,851, 31,2014-01-19 11:26:02.965713,454,36,295, 88,2014-01-20 23:34:02.56006,300,14,505, 88,2014-01-17 05:34:40.884753,89,363,849, 88,2014-01-17 19:00:25.063624,146,363,806, 31,2014-01-15 19:05:24.991763,857,630,743, 31,2014-01-15 12:44:22.192483,42,956,431, 31,2014-01-12 22:34:13.589495,343,473,38, 88,2014-01-19 06:36:12.667847,888,610,263, 31,2014-01-12 18:05:41.386856,158,873,198, 31,2014-01-14 07:34:58.159621,512,513,655, 88,2014-01-14 13:38:31.053025,602,682,392, 31,2014-01-19 20:43:17.983096,948,485,492, 88,2014-01-20 11:07:29.900433,546,64,964, 88,2014-01-12 21:26:58.203936,198,641,884, 31,2014-01-16 00:11:13.137481,68,20,684, 31,2014-01-17 01:11:17.302532,816,225,913, 88,2014-01-18 11:16:29.445391,916,320,66, 88,2014-01-13 21:48:20.798063,873,765,982, 31,2014-01-15 02:14:14.018961,473,450,796, 31,2014-01-18 09:01:01.144663,23,378,830, 88,2014-01-20 01:12:04.885916,45,557,425, 31,2014-01-12 10:16:48.854458,290,614,32, 88,2014-01-15 23:23:04.876978,744,637,509, 31,2014-01-12 22:02:21.393654,289,639,515, 31,2014-01-12 09:35:46.614663,37,607,662, 31,2014-01-11 08:26:51.478257,142,27,309, 88,2014-01-12 10:33:25.535669,902,531,0, 88,2014-01-15 23:47:10.388986,5,540,611, 88,2014-01-18 14:15:28.103783,946,314,256, 31,2014-01-17 22:33:22.399939,191,864,510, 31,2014-01-19 20:30:55.037182,350,259,583, 31,2014-01-16 19:41:31.069497,604,551,244, 88,2014-01-17 16:07:24.959999,298,899,983, 31,2014-01-15 20:05:35.922355,324,964,571, 88,2014-01-13 08:41:56.13312,233,25,166, 31,2014-01-12 05:12:49.951761,290,994,870, 88,2014-01-20 12:49:58.114839,611,109,334, 31,2014-01-15 11:16:09.723034,479,276,981, 31,2014-01-20 15:49:02.619608,912,66,677, 88,2014-01-12 00:55:39.502114,367,641,294, 88,2014-01-18 22:51:07.882655,642,833,33, 31,2014-01-13 23:27:33.672633,770,562,283, 88,2014-01-21 01:38:57.485209,723,906,633, 88,2014-01-18 03:58:37.783089,426,821,618, 88,2014-01-16 00:54:31.87699,804,891,102, 88,2014-01-15 19:50:10.62269,362,803,406, 31,2014-01-13 23:09:55.085621,884,673,233, 88,2014-01-13 06:11:01.32238,720,561,376, 31,2014-01-18 21:38:31.420752,134,360,65, 31,2014-01-18 05:24:09.104582,985,144,553, 88,2014-01-20 19:12:02.467043,521,349,502, 31,2014-01-17 16:50:36.307088,489,301,768, 31,2014-01-19 19:40:57.959041,654,983,467, 31,2014-01-15 12:35:07.344021,483,269,485, 88,2014-01-19 18:52:57.700742,344,708,237, 88,2014-01-10 23:16:44.573703,472,886,693, 88,2014-01-16 01:01:58.822278,293,776,832, 88,2014-01-10 23:26:23.23641,15,741,541, 88,2014-01-16 03:03:54.963109,637,683,812, 88,2014-01-13 09:17:26.926062,954,612,361, 88,2014-01-18 01:59:45.636348,429,543,25, 31,2014-01-14 17:34:50.000292,781,392,912, 88,2014-01-19 08:58:57.995978,419,993,684, 88,2014-01-14 12:07:10.596347,829,416,371, 88,2014-01-19 17:40:25.922646,442,543,986, 31,2014-01-11 11:40:54.129098,107,335,121, 88,2014-01-15 21:20:00.548527,531,642,578, 88,2014-01-19 00:30:24.037903,615,857,99, 31,2014-01-16 02:57:03.852132,170,267,863, 31,2014-01-15 07:09:03.168136,151,637,944, 31,2014-01-20 05:26:03.540018,895,526,986, 88,2014-01-13 19:48:11.735207,694,943,906, 31,2014-01-18 10:36:36.841189,664,747,163, 31,2014-01-20 01:05:01.025226,720,573,941, 31,2014-01-21 03:46:49.518296,280,152,906, 31,2014-01-13 05:31:08.718178,494,567,64, 31,2014-01-14 14:55:11.647917,820,572,925, 31,2014-01-13 20:56:44.604335,972,651,944, 31,2014-01-15 15:42:10.040558,207,130,794, 31,2014-01-12 06:33:43.068669,617,116,968, 31,2014-01-10 20:20:53.708918,625,470,514, 88,2014-01-15 04:54:12.508021,208,108,728, 88,2014-01-19 03:24:19.375756,211,560,597, 88,2014-01-19 20:01:51.668378,822,49,160, 79,2014-01-19 21:29:19.852042,674,904,611, 33,2014-01-17 16:17:17.076917,684,698,261, 26,2014-01-19 23:18:36.242081,5,598,968, 26,2014-01-14 16:31:18.67474,297,219,448, 26,2014-01-17 19:43:40.313327,472,237,843, 87,2014-01-14 20:22:31.205191,54,741,999, 24,2014-01-17 05:48:45.600801,674,95,504, 24,2014-01-14 15:43:25.950095,447,192,110, 79,2014-01-18 17:57:21.205901,709,83,469, 33,2014-01-15 08:47:43.596779,36,89,519, 24,2014-01-18 07:50:36.546442,587,779,702, 94,2014-01-17 05:37:47.128547,960,819,240, 87,2014-01-16 00:28:37.726296,658,517,788, 33,2014-01-12 23:10:37.094969,792,914,729, 79,2014-01-15 00:08:11.16645,893,62,458, 94,2014-01-10 21:25:41.578428,442,375,713, 33,2014-01-17 10:19:40.263524,891,141,383, 79,2014-01-13 23:34:14.706468,324,130,677, 94,2014-01-13 21:13:53.313635,779,321,712, 94,2014-01-14 14:56:17.104713,106,796,573, 79,2014-01-12 18:39:15.731694,334,931,296, 79,2014-01-13 19:00:42.83235,892,948,479, 33,2014-01-17 00:27:25.822891,684,450,972, 24,2014-01-16 13:34:27.379609,777,801,906, 26,2014-01-16 19:57:27.436685,274,386,503, 79,2014-01-11 10:09:35.863449,521,179,674, 94,2014-01-18 00:08:10.448698,282,633,883, 79,2014-01-16 20:18:21.145603,425,445,86, 94,2014-01-15 19:03:48.37147,531,257,647, 87,2014-01-15 21:32:29.824454,913,959,634, 79,2014-01-15 10:20:12.813981,434,524,219, 26,2014-01-14 10:33:08.22393,258,379,51, 79,2014-01-12 07:49:46.901371,297,729,827, 26,2014-01-14 03:38:49.056481,51,351,854, 94,2014-01-18 07:04:26.89867,819,337,424, 87,2014-01-19 07:33:27.215117,401,324,569, 87,2014-01-18 04:01:20.261672,332,170,901, 24,2014-01-14 06:53:12.499471,879,916,117, 94,2014-01-12 17:16:53.165211,274,96,150, 87,2014-01-15 15:58:41.467572,560,265,498, 24,2014-01-18 19:40:56.09667,940,186,684, 26,2014-01-20 05:07:29.711652,154,46,625, 79,2014-01-11 15:36:28.596538,726,458,49, 33,2014-01-13 14:09:33.822966,543,79,599, 87,2014-01-11 22:18:06.807041,175,846,726, 24,2014-01-15 19:44:39.762988,755,103,330, 94,2014-01-13 15:35:15.401394,463,768,433, 79,2014-01-18 12:37:47.070564,714,331,176, 24,2014-01-18 23:18:54.469874,51,370,679, 79,2014-01-16 16:49:08.715448,37,817,86, 87,2014-01-11 21:34:04.175276,218,811,670, 94,2014-01-20 21:58:10.201568,449,142,740, 79,2014-01-19 15:49:51.547798,776,94,89, 33,2014-01-18 02:01:29.998168,562,543,717, 87,2014-01-16 05:32:37.581597,826,156,828, 94,2014-01-15 05:47:18.984483,621,889,655, 26,2014-01-18 16:11:05.861617,917,510,445, 87,2014-01-12 23:40:48.030295,684,357,902, 87,2014-01-10 20:05:40.130087,647,603,252, 26,2014-01-13 05:14:54.233505,433,42,752, 24,2014-01-18 01:13:17.854748,312,528,925, 24,2014-01-15 10:25:52.944486,107,500,482, 26,2014-01-16 19:48:02.457435,741,161,796, 24,2014-01-19 13:03:04.756539,371,48,697, 26,2014-01-18 18:04:42.000969,304,967,900, 33,2014-01-13 20:52:29.356105,314,151,427, 79,2014-01-17 14:36:31.971655,441,128,465, 94,2014-01-15 16:06:02.26264,95,227,711, 24,2014-01-17 07:45:41.855577,843,474,117, 94,2014-01-19 11:53:25.136867,413,42,435, 94,2014-01-20 12:04:43.730631,478,755,28, 79,2014-01-14 21:26:37.952246,448,140,131, 87,2014-01-18 11:00:54.848519,30,520,268, 24,2014-01-10 21:41:12.327169,264,355,725, 79,2014-01-17 15:36:11.775211,886,332,522, 26,2014-01-19 13:19:01.655559,824,188,272, 24,2014-01-15 21:25:52.090156,779,157,700, 33,2014-01-20 11:11:27.177024,636,369,340, 24,2014-01-16 19:56:48.726123,526,334,544, 87,2014-01-13 14:44:46.56003,396,599,292, 79,2014-01-15 22:00:35.892473,216,526,542, 79,2014-01-17 21:30:52.9014,481,367,7, 79,2014-01-13 06:42:56.762018,74,241,997, 79,2014-01-14 07:50:27.440271,112,400,759, 87,2014-01-14 17:32:22.899567,664,910,456, 24,2014-01-18 16:15:34.343615,862,755,809, 26,2014-01-18 17:37:46.424755,784,706,372, 26,2014-01-12 03:43:28.761603,836,88,9, 87,2014-01-20 19:56:22.373909,369,316,926, 24,2014-01-18 17:43:26.554842,268,738,121, 94,2014-01-14 12:58:22.995108,167,720,588, 26,2014-01-17 15:09:40.228657,553,718,758, 87,2014-01-12 22:09:19.499329,58,502,673, 79,2014-01-20 12:46:25.452544,375,989,886, 24,2014-01-15 22:12:44.985196,503,625,135, 26,2014-01-20 20:14:01.500297,898,668,856, 94,2014-01-13 03:38:54.80865,448,220,161, 33,2014-01-12 06:49:16.956851,861,425,75, 87,2014-01-15 06:20:03.763356,173,123,509, 24,2014-01-19 15:24:36.664226,614,12,977, 79,2014-01-20 22:42:42.093718,938,143,677, 33,2014-01-14 12:24:47.493987,205,864,15, 87,2014-01-13 06:51:14.616891,157,842,586, 26,2014-01-18 03:43:36.942655,693,607,482, 26,2014-01-14 14:05:59.821155,288,895,727, 79,2014-01-20 02:27:26.392102,824,380,707, 87,2014-01-16 11:02:38.598215,267,66,483, 94,2014-01-19 15:31:51.911312,790,397,257, 79,2014-01-19 07:38:53.569127,148,314,544, 24,2014-01-12 00:59:27.324756,610,191,724, 87,2014-01-12 00:16:38.471761,902,331,136, 87,2014-01-13 23:39:29.4616,553,29,102, 33,2014-01-19 02:30:20.226156,591,518,407, 24,2014-01-14 10:59:35.233779,340,661,54, 94,2014-01-17 11:29:56.901871,633,230,474, 24,2014-01-12 14:02:43.126142,184,311,819, 94,2014-01-11 21:15:09.577394,701,957,84, 79,2014-01-14 23:07:43.326626,788,340,92, 87,2014-01-13 21:46:11.887745,981,167,430, 87,2014-01-11 11:11:31.951303,309,196,188, 24,2014-01-12 10:51:09.881887,179,32,745, 94,2014-01-17 14:44:34.882853,988,779,669, 87,2014-01-18 06:21:12.17996,37,819,420, 94,2014-01-14 13:00:29.381216,397,226,326, 94,2014-01-16 21:31:00.335397,490,640,484, 94,2014-01-12 22:33:57.165575,939,536,874, 24,2014-01-14 03:14:30.881932,88,24,522, 26,2014-01-19 05:09:55.144046,486,266,935, 79,2014-01-14 09:23:14.122425,922,97,783, 33,2014-01-18 13:34:34.645289,572,848,301, 26,2014-01-17 14:34:31.808272,651,957,635, 33,2014-01-14 02:05:56.216561,300,630,319, 79,2014-01-11 19:59:22.139275,443,153,990, 33,2014-01-20 01:25:46.425164,94,714,857, 26,2014-01-10 23:49:33.159217,470,948,591, 33,2014-01-15 14:05:21.960431,119,312,504, 94,2014-01-18 21:53:12.817685,443,231,106, 79,2014-01-16 14:52:11.757431,706,911,56, 87,2014-01-13 23:37:13.871743,57,855,158, 26,2014-01-16 23:32:06.386811,569,166,573, 24,2014-01-17 19:51:39.082188,623,468,812, 79,2014-01-15 03:53:52.343505,329,355,982, 26,2014-01-20 03:11:35.848831,376,85,922, 24,2014-01-15 16:21:59.308763,637,812,775, 79,2014-01-18 18:53:27.577284,425,478,566, 26,2014-01-16 08:41:32.750702,884,181,689, 33,2014-01-17 10:24:42.434905,496,240,793, 26,2014-01-19 20:08:37.154678,337,809,471, 79,2014-01-20 11:49:16.077747,343,476,628, 87,2014-01-20 12:10:54.32265,953,435,790, 87,2014-01-20 11:20:09.105981,752,729,783, 79,2014-01-11 16:40:25.959633,245,480,634, 94,2014-01-16 20:55:29.205502,801,115,113, 26,2014-01-17 11:41:21.28636,75,518,887, 87,2014-01-15 09:40:55.34085,428,792,252, 94,2014-01-12 12:26:29.540899,233,922,349, 33,2014-01-19 14:15:18.451935,102,819,716, 87,2014-01-18 16:55:26.222782,628,609,287, 87,2014-01-20 21:36:24.684945,614,115,209, 33,2014-01-12 17:38:32.57436,782,893,991, 24,2014-01-16 00:30:00.86807,557,310,649, 24,2014-01-17 06:10:56.493217,186,421,485, 24,2014-01-15 23:44:28.790922,841,118,637, 26,2014-01-17 00:29:23.007346,540,817,503, 87,2014-01-16 01:36:42.9188,586,746,785, 33,2014-01-16 03:34:01.95014,77,512,294, 26,2014-01-11 08:34:44.967776,734,371,921, 24,2014-01-13 17:29:55.736484,653,158,499, 33,2014-01-11 12:26:13.70799,433,515,415, 33,2014-01-14 12:11:58.839939,234,728,652, 94,2014-01-19 21:02:02.123297,87,533,258, 94,2014-01-18 12:17:52.790178,774,389,791, 87,2014-01-18 20:05:51.183444,740,358,356, 87,2014-01-18 18:13:37.972127,540,19,220, 24,2014-01-12 22:39:52.098941,864,900,772, 94,2014-01-16 08:59:18.761147,274,875,703, 79,2014-01-13 20:55:10.723248,339,990,195, 94,2014-01-19 13:04:34.533846,204,945,817, 26,2014-01-14 23:07:55.915825,548,607,257, 79,2014-01-13 02:44:26.800995,313,675,12, 26,2014-01-18 19:15:28.856496,417,655,270, 87,2014-01-14 04:28:05.022226,165,29,72, 79,2014-01-13 23:24:52.760628,828,758,248, 33,2014-01-14 10:10:58.061998,836,920,182, 24,2014-01-20 20:09:26.308585,276,879,1, 79,2014-01-18 13:05:48.101478,881,562,388, 94,2014-01-16 02:37:27.602897,637,807,924, 26,2014-01-19 04:24:44.760521,235,826,148, 87,2014-01-16 00:01:14.324679,150,385,541, 87,2014-01-15 18:13:52.287842,264,925,308, 94,2014-01-21 02:03:17.334881,444,713,432, 33,2014-01-21 04:31:15.192749,917,634,970, 26,2014-01-11 18:24:48.781477,84,358,309, 33,2014-01-15 19:47:46.126223,116,170,889, 87,2014-01-16 23:00:38.200514,720,399,782, 94,2014-01-17 00:01:31.700277,430,532,618, 24,2014-01-21 03:21:48.076362,987,683,593, 33,2014-01-17 11:35:23.16829,506,957,171, 94,2014-01-19 21:31:27.436761,801,585,60, 94,2014-01-11 09:48:01.784352,17,943,363, 26,2014-01-21 03:47:22.008228,933,321,920, 26,2014-01-18 12:33:29.560058,800,626,17, 26,2014-01-19 02:05:54.574949,968,312,948, 33,2014-01-18 17:53:13.192092,704,822,127, 87,2014-01-16 00:47:07.532605,550,684,828, 94,2014-01-21 04:45:46.673891,690,314,861, 24,2014-01-13 20:52:31.953239,460,568,935, 26,2014-01-19 01:42:18.255853,129,819,614, 94,2014-01-19 11:50:21.208155,522,860,257, 26,2014-01-18 00:00:27.869064,407,473,705, 94,2014-01-10 22:26:45.056848,666,590,158, 24,2014-01-17 01:05:50.064651,558,322,177, 33,2014-01-10 22:28:32.89129,43,166,917, 87,2014-01-14 01:51:37.817476,536,12,18, 26,2014-01-20 15:16:48.127068,601,207,865, 79,2014-01-20 22:37:59.199875,895,951,11, 24,2014-01-11 08:57:25.918954,631,481,584, 26,2014-01-15 11:54:15.729964,88,974,90, 33,2014-01-18 21:02:43.960396,377,155,485, 79,2014-01-16 12:58:40.243633,420,698,773, 33,2014-01-20 10:08:08.018224,978,566,315, 87,2014-01-18 17:06:01.295697,206,533,285, 24,2014-01-16 11:29:55.436801,505,781,307, 33,2014-01-10 22:32:56.799701,936,731,739, 79,2014-01-13 06:53:47.42192,97,40,467, 33,2014-01-12 04:30:33.637315,488,527,527, 26,2014-01-17 02:34:28.499978,941,984,10, 26,2014-01-13 04:15:35.498282,698,208,817, 26,2014-01-18 20:05:56.805605,40,193,356, 26,2014-01-15 18:05:55.936741,405,22,59, 94,2014-01-13 18:03:37.283053,311,818,678, 24,2014-01-18 17:53:18.814253,248,417,228, 24,2014-01-13 00:39:25.496799,513,887,215, 79,2014-01-11 14:09:31.858003,483,337,164, 94,2014-01-16 05:46:32.006344,725,814,839, 79,2014-01-18 05:26:33.029403,913,854,980, 24,2014-01-11 12:55:18.531893,600,672,52, 87,2014-01-19 06:39:03.959582,58,232,536, 24,2014-01-16 01:08:51.285256,776,185,469, 24,2014-01-20 04:45:39.740048,669,146,440, 94,2014-01-16 00:39:31.828646,139,567,841, 94,2014-01-16 03:35:36.342104,289,823,525, 24,2014-01-15 23:51:29.805118,708,351,593, 26,2014-01-16 03:08:04.720354,996,92,674, 26,2014-01-19 09:27:14.15958,595,644,360, 26,2014-01-15 09:08:17.932184,679,221,317, 79,2014-01-15 19:46:03.920229,85,616,978, 94,2014-01-19 22:24:40.078953,801,725,796, 94,2014-01-20 01:02:33.662149,169,289,79, 33,2014-01-13 10:48:47.881045,857,750,269, 94,2014-01-15 05:23:20.322586,596,988,307, 33,2014-01-19 05:10:41.680373,632,23,497, 79,2014-01-10 21:54:49.176742,536,834,96, 94,2014-01-20 20:53:15.759388,171,480,2, 26,2014-01-19 07:43:38.480074,426,508,694, 26,2014-01-13 08:48:36.59866,789,216,574, 87,2014-01-11 19:23:49.396703,847,265,452, 94,2014-01-15 04:18:06.980053,214,190,616, 33,2014-01-15 17:04:12.096943,670,140,369, 87,2014-01-19 19:29:46.202727,537,860,201, 26,2014-01-20 02:24:02.916793,172,350,989, 94,2014-01-18 15:07:49.379996,490,321,340, 94,2014-01-17 07:23:05.016979,512,446,86, 33,2014-01-11 21:03:28.41401,738,162,384, 26,2014-01-19 09:17:21.237999,797,295,458, 94,2014-01-12 07:09:37.023324,25,753,659, 87,2014-01-19 06:30:01.443414,814,80,725, 33,2014-01-20 02:12:39.77031,618,51,437, 79,2014-01-20 17:48:40.982906,43,480,290, 87,2014-01-14 01:38:52.72909,267,443,649, 94,2014-01-19 00:58:19.510358,910,988,164, 94,2014-01-15 12:28:12.811552,853,865,19, 24,2014-01-19 09:14:29.071193,131,978,13, 87,2014-01-13 18:49:49.315476,302,431,757, 24,2014-01-20 19:36:17.531906,220,309,361, 79,2014-01-17 12:41:43.231192,696,789,390, 94,2014-01-18 07:58:07.247661,187,629,147, 26,2014-01-15 09:22:21.452554,24,966,760, 94,2014-01-16 05:06:23.310145,533,975,430, 87,2014-01-17 03:00:40.910229,714,871,131, 24,2014-01-18 00:11:09.333599,455,572,214, 87,2014-01-20 14:29:43.632731,833,231,467, 24,2014-01-15 02:11:22.590601,268,2,171, 33,2014-01-18 02:05:58.51034,92,679,388, 33,2014-01-20 05:22:59.392539,309,602,829, 24,2014-01-13 03:55:01.071095,166,697,566, 33,2014-01-20 14:54:35.109001,432,517,565, 87,2014-01-21 04:46:48.789241,55,881,460, 26,2014-01-17 12:13:08.051147,192,13,492, 94,2014-01-15 01:58:47.205944,250,821,398, 33,2014-01-19 18:16:34.991968,412,457,19, 79,2014-01-16 08:37:10.968359,326,369,889, 87,2014-01-12 11:06:36.586359,951,564,869, 87,2014-01-15 19:39:40.008948,443,515,599, 79,2014-01-17 09:40:39.38237,703,917,232, 87,2014-01-21 00:23:57.824357,322,62,776, 33,2014-01-17 06:49:17.032271,155,444,220, 26,2014-01-15 10:10:40.825783,412,399,301, 79,2014-01-19 20:36:37.594669,348,337,99, 87,2014-01-16 18:37:58.015177,883,504,235, 24,2014-01-18 15:49:33.554873,659,249,164, 94,2014-01-17 15:34:57.105446,972,471,55, 33,2014-01-11 01:06:10.827148,495,388,33, 87,2014-01-16 19:04:02.626485,812,975,441, 94,2014-01-20 14:24:46.420922,589,16,809, 94,2014-01-21 00:42:28.359053,693,59,254, 26,2014-01-13 01:45:45.857677,870,752,821, 94,2014-01-17 16:22:53.669002,772,550,201, 79,2014-01-15 04:04:49.811608,532,56,214, 87,2014-01-18 10:52:09.167822,841,536,856, 87,2014-01-13 13:23:34.579231,866,495,973, 33,2014-01-11 22:15:59.145206,120,416,691, 24,2014-01-17 19:21:52.800554,275,402,672, 24,2014-01-17 19:34:57.169832,903,717,34, 33,2014-01-19 04:21:57.655547,335,733,555, 87,2014-01-16 18:44:52.193092,694,203,890, 94,2014-01-20 03:29:58.240927,321,268,237, 94,2014-01-18 13:16:32.764548,249,917,294, 33,2014-01-16 17:31:40.982752,652,935,265, 79,2014-01-16 09:43:06.292493,665,67,300, 87,2014-01-12 09:15:19.97091,186,352,382, 33,2014-01-15 05:48:15.97472,344,405,310, 87,2014-01-11 12:20:17.260852,744,319,508, 79,2014-01-14 00:21:56.557269,949,157,532, 87,2014-01-20 05:27:55.984086,62,744,881, 24,2014-01-18 02:00:56.643222,107,833,404, 33,2014-01-13 18:45:54.381627,697,37,575, 26,2014-01-16 06:17:13.016357,71,63,145, 94,2014-01-12 06:11:37.469006,804,463,664, 26,2014-01-12 09:22:31.976715,843,109,521, 33,2014-01-11 18:55:11.031953,580,685,3, 24,2014-01-20 02:01:11.024298,504,656,339, 79,2014-01-19 04:57:29.08216,218,546,385, 24,2014-01-12 00:01:21.859101,384,792,578, 24,2014-01-15 15:05:13.650783,9,77,475, 26,2014-01-18 13:22:15.503082,634,779,468, 26,2014-01-11 18:43:50.218155,283,648,727, 24,2014-01-17 20:50:59.50846,942,312,831, 26,2014-01-14 23:45:09.172085,911,495,414, 87,2014-01-16 02:48:40.030182,646,47,595, 33,2014-01-15 01:43:08.676282,512,551,738, 26,2014-01-17 17:08:43.751317,143,117,849, 33,2014-01-17 05:04:39.175389,327,218,515, 94,2014-01-11 15:05:01.476836,476,999,354, 87,2014-01-14 06:43:40.921568,511,207,678, 33,2014-01-15 03:26:36.831354,658,248,192, 87,2014-01-17 13:49:53.670348,329,851,66, 33,2014-01-13 04:13:39.162495,592,484,642, 94,2014-01-12 10:43:09.595902,635,654,158, 87,2014-01-13 01:21:34.6531,492,678,766, 26,2014-01-18 17:56:45.454989,94,79,837, 24,2014-01-13 23:58:29.566811,897,721,105, 79,2014-01-17 11:09:50.62782,508,290,409, 33,2014-01-19 10:17:02.715841,676,725,763, 24,2014-01-17 04:20:26.124081,350,824,168, 79,2014-01-16 10:37:46.611906,829,5,29, 24,2014-01-16 06:17:59.359062,764,652,639, 79,2014-01-20 03:06:20.506127,964,113,82, 94,2014-01-11 10:54:59.628682,487,806,373, 26,2014-01-17 16:29:36.828486,519,57,720, 87,2014-01-11 06:28:52.482842,573,39,13, 94,2014-01-12 09:50:10.660636,705,213,139, 24,2014-01-16 12:30:47.852784,827,61,928, 87,2014-01-19 15:26:21.565002,555,201,867, 94,2014-01-13 13:51:32.519738,310,421,767, 26,2014-01-10 21:36:01.503568,478,500,198, 79,2014-01-16 22:48:37.068084,921,965,528, 94,2014-01-14 12:35:22.737893,421,332,194, 26,2014-01-17 22:27:01.012028,665,796,485, 87,2014-01-21 02:33:46.240589,62,784,512, 33,2014-01-19 19:24:02.768075,996,266,534, 24,2014-01-11 18:10:09.688311,353,584,102, 94,2014-01-17 13:42:29.991905,702,976,695, 94,2014-01-15 18:28:41.943882,259,955,852, 94,2014-01-12 13:15:11.165148,702,682,224, 24,2014-01-21 00:26:10.913474,762,921,66, 33,2014-01-20 01:55:18.775236,861,521,255, 79,2014-01-19 07:05:04.835496,935,797,335, 33,2014-01-12 22:39:50.076388,628,888,658, 94,2014-01-11 06:38:28.371138,870,256,665, 79,2014-01-11 02:26:39.488595,79,958,822, 24,2014-01-20 20:36:35.531376,889,219,648, 94,2014-01-14 10:36:57.938369,177,645,692, 24,2014-01-17 17:36:30.116834,373,691,836, 33,2014-01-19 00:53:38.247635,675,431,602, 33,2014-01-20 18:57:24.06245,735,234,806, 87,2014-01-12 22:14:16.72874,852,788,195, 26,2014-01-14 01:11:37.606697,973,833,67, 94,2014-01-19 16:03:44.568577,192,146,958, 33,2014-01-13 13:09:16.357423,774,410,241, 26,2014-01-20 21:41:14.435183,305,69,117, 87,2014-01-20 02:32:37.051418,415,900,596, 94,2014-01-15 02:59:27.018059,75,820,915, 26,2014-01-16 04:12:02.287969,79,882,607, 24,2014-01-18 11:58:58.61642,343,761,244, 79,2014-01-17 20:50:59.537797,815,562,250, 87,2014-01-16 05:48:03.791536,523,923,270, 33,2014-01-14 04:47:35.684923,733,534,581, 26,2014-01-11 03:26:22.276109,806,940,480, 94,2014-01-12 22:15:04.803565,3,555,614, 94,2014-01-14 01:21:21.925511,72,245,934, 24,2014-01-20 02:50:25.044183,249,424,314, 26,2014-01-13 20:25:14.491876,173,737,46, 24,2014-01-20 19:03:51.917417,970,404,410, 94,2014-01-14 15:19:06.988066,882,327,363, 94,2014-01-15 13:40:25.657443,98,758,736, 79,2014-01-20 13:30:02.831309,217,306,147, 94,2014-01-13 11:14:25.763302,318,53,993, 79,2014-01-13 14:45:30.492938,366,4,917, 24,2014-01-12 06:09:52.907697,482,441,625, 87,2014-01-13 21:52:54.134858,982,881,147, 87,2014-01-13 21:12:09.981952,761,515,929, 87,2014-01-11 20:46:28.439073,2,528,311, 24,2014-01-17 12:29:52.073227,597,158,90, 33,2014-01-20 18:48:40.098786,126,675,686, 26,2014-01-20 01:40:06.686708,863,921,639, 87,2014-01-17 01:27:16.135677,15,682,103, 87,2014-01-12 11:02:56.827526,531,912,611, 87,2014-01-12 20:51:44.293824,887,651,321, 26,2014-01-15 11:31:00.704673,190,384,720, 26,2014-01-15 04:12:13.185368,5,996,791, 94,2014-01-12 12:32:58.729007,684,716,420, 79,2014-01-14 08:03:37.756091,276,1,130, 87,2014-01-19 11:11:40.203427,499,297,793, 79,2014-01-17 20:45:01.017396,455,506,98, 79,2014-01-11 14:02:36.37293,830,84,504, 79,2014-01-16 02:02:39.741644,693,373,308, 87,2014-01-12 20:33:04.808932,495,569,778, 87,2014-01-14 22:50:12.057853,766,825,605, 33,2014-01-16 09:29:02.017752,592,446,70, 79,2014-01-14 22:48:09.612916,646,762,466, 79,2014-01-18 04:11:33.983365,725,12,180, 87,2014-01-15 06:19:27.061935,295,943,12, 79,2014-01-17 23:13:24.104792,367,785,960, 79,2014-01-17 17:15:25.900781,727,853,752, 94,2014-01-19 01:38:34.050421,133,590,452, 26,2014-01-12 06:53:49.762234,518,346,242, 94,2014-01-17 00:45:28.732092,623,749,335, 94,2014-01-11 06:52:59.813722,322,825,298, 33,2014-01-15 01:39:20.255592,490,228,271, 87,2014-01-18 10:55:21.639789,167,75,653, 26,2014-01-14 08:45:53.948581,237,244,397, 87,2014-01-18 02:51:30.237544,299,454,910, 24,2014-01-19 11:41:50.07928,748,478,984, 33,2014-01-21 01:15:46.021808,445,771,264, 94,2014-01-17 15:40:10.336329,857,188,354, 24,2014-01-18 07:21:56.765989,230,928,359, 94,2014-01-16 20:43:02.157904,91,920,181, 79,2014-01-19 06:43:07.163855,854,461,768, 33,2014-01-20 08:13:41.059813,864,649,240, 79,2014-01-11 02:14:02.862578,478,893,500, 87,2014-01-13 04:55:20.349224,681,598,209, 26,2014-01-11 14:46:39.78924,914,669,970, 24,2014-01-14 14:17:40.618669,633,541,152, 24,2014-01-11 10:07:00.55307,823,601,474, 33,2014-01-18 15:31:40.806634,1,910,826, 26,2014-01-15 08:20:16.991599,66,956,741, 87,2014-01-16 16:09:40.294714,994,949,387, 26,2014-01-20 16:04:45.615567,983,139,968, 33,2014-01-19 11:10:29.049451,419,601,899, 24,2014-01-11 19:38:42.312466,542,294,782, 26,2014-01-14 08:52:55.228482,339,741,967, 26,2014-01-16 09:22:03.032816,457,313,223, 24,2014-01-16 05:58:09.374401,481,341,237, 24,2014-01-11 02:06:19.333693,119,344,917, 94,2014-01-12 20:37:28.934017,386,76,466, 33,2014-01-14 01:36:43.424822,423,154,429, 24,2014-01-12 13:00:09.095927,698,125,802, 24,2014-01-19 01:22:57.666107,843,154,639, 26,2014-01-14 12:29:43.238964,762,988,536, 26,2014-01-16 18:39:29.351519,355,423,327, 79,2014-01-16 06:18:19.306316,249,847,299, 26,2014-01-18 01:15:37.187544,498,736,994, 33,2014-01-13 15:30:59.589061,43,546,615, 87,2014-01-14 12:00:09.385596,573,260,766, 87,2014-01-17 20:31:23.209771,676,83,300, 94,2014-01-20 11:11:09.925392,312,823,974, 94,2014-01-11 13:22:06.151584,939,588,878, 24,2014-01-13 11:14:25.367676,713,556,807, 87,2014-01-18 11:54:17.089666,773,925,819, 94,2014-01-21 01:35:47.211817,900,116,408, 26,2014-01-13 17:28:28.230254,568,160,1000, 79,2014-01-20 20:49:37.43889,522,797,883, 24,2014-01-11 10:22:27.001055,139,585,125, 33,2014-01-17 11:46:08.848923,439,104,733, 79,2014-01-11 00:56:37.991959,431,964,886, 33,2014-01-19 05:54:07.807691,71,440,487, 79,2014-01-11 14:06:25.84094,700,387,583, 33,2014-01-16 21:06:18.286673,213,560,686, 33,2014-01-18 15:58:53.423257,84,614,534, 94,2014-01-20 05:16:54.890392,173,712,895, 87,2014-01-17 20:45:00.599139,478,28,318, 87,2014-01-11 18:51:48.652158,220,449,635, 79,2014-01-15 08:38:57.923626,647,876,616, 79,2014-01-12 20:43:09.973959,510,574,921, 24,2014-01-12 00:58:07.985852,769,348,290, 87,2014-01-17 09:16:26.857643,519,539,869, 79,2014-01-16 02:19:53.398781,371,630,597, 94,2014-01-13 17:58:17.081779,972,828,53, 87,2014-01-15 04:39:24.52375,961,584,371, 26,2014-01-19 18:49:36.637744,119,587,350, 26,2014-01-19 16:37:46.433717,944,339,608, 87,2014-01-20 14:57:43.830066,993,592,438, 24,2014-01-16 14:05:13.825706,168,740,2, 87,2014-01-12 02:08:46.022779,63,544,495, 87,2014-01-13 20:57:53.215661,803,482,191, 33,2014-01-13 04:36:37.035478,528,636,446, 79,2014-01-11 07:19:55.94859,600,157,423, 33,2014-01-14 14:19:59.367665,541,526,626, 26,2014-01-15 19:51:02.403154,766,890,374, 79,2014-01-18 23:14:13.038255,262,56,117, 79,2014-01-14 09:55:46.579481,812,341,592, 26,2014-01-18 17:19:30.633407,490,149,724, 24,2014-01-18 14:03:50.477145,497,112,797, 87,2014-01-15 00:18:13.580536,811,401,864, 33,2014-01-14 23:05:39.48233,347,116,938, 79,2014-01-18 19:00:28.469523,164,479,942, 94,2014-01-13 00:12:21.388227,563,95,799, 24,2014-01-15 17:12:05.32327,192,780,675, 79,2014-01-14 10:06:46.756196,515,290,265, 94,2014-01-20 20:11:14.811484,674,701,181, 87,2014-01-14 16:29:00.214081,481,857,705, 79,2014-01-11 00:51:47.355753,430,976,247, 24,2014-01-11 09:03:03.463643,723,869,492, 26,2014-01-19 05:07:58.137707,190,221,879, 79,2014-01-13 01:34:57.329712,740,185,848, 94,2014-01-12 14:01:11.449495,436,984,724, 33,2014-01-15 08:24:24.99535,752,193,271, 33,2014-01-18 07:54:50.728493,883,758,899, 24,2014-01-15 11:59:28.531694,982,948,363, 24,2014-01-19 17:03:49.5191,965,814,697, 87,2014-01-16 20:44:27.366656,511,854,326, 33,2014-01-13 22:37:14.96541,7,971,19, 33,2014-01-19 02:01:33.349584,55,304,738, 79,2014-01-12 04:49:41.192363,113,66,393, 79,2014-01-15 04:46:00.98819,526,785,708, 94,2014-01-11 16:59:26.565246,504,322,34, 87,2014-01-14 13:26:18.227841,252,237,506, 26,2014-01-15 16:05:56.936778,763,462,23, 24,2014-01-15 11:19:25.93291,264,654,976, 87,2014-01-19 13:17:20.630995,567,80,645, 26,2014-01-13 09:20:09.975033,177,730,855, 24,2014-01-19 01:15:12.512391,315,804,159, 33,2014-01-17 00:36:51.264402,941,885,111, 79,2014-01-21 03:24:00.452597,951,872,708, 87,2014-01-12 19:33:26.092928,493,529,883, 24,2014-01-21 03:42:30.746732,405,951,27, 87,2014-01-18 16:24:28.922121,242,793,178, 26,2014-01-14 23:45:47.481154,336,423,875, 33,2014-01-15 14:54:36.070422,180,964,641, 33,2014-01-11 20:31:15.678316,631,893,253, 24,2014-01-14 13:57:02.293058,969,590,987, 94,2014-01-19 11:23:36.284503,853,984,42, 26,2014-01-12 01:23:03.03407,58,317,201, 24,2014-01-15 03:00:05.756701,397,547,520, 87,2014-01-17 10:31:34.422209,629,698,412, 33,2014-01-14 06:58:00.363781,597,443,496, 24,2014-01-16 21:01:17.206616,483,197,45, 26,2014-01-11 12:55:59.417559,354,273,704, 33,2014-01-11 08:52:51.092694,993,76,343, 24,2014-01-11 03:00:45.738309,350,812,143, 79,2014-01-20 09:59:48.936659,767,860,365, 87,2014-01-17 09:37:18.45935,624,602,878, 94,2014-01-14 05:38:00.70372,302,971,296, 24,2014-01-18 06:01:22.286243,649,536,265, 24,2014-01-18 18:26:59.651712,771,861,961, 79,2014-01-18 14:24:01.691908,594,143,765, 24,2014-01-19 03:00:48.851907,494,109,106, 94,2014-01-12 01:53:17.879553,611,18,996, 33,2014-01-13 00:29:58.629106,465,56,47, 26,2014-01-13 08:20:14.784818,399,312,63, 33,2014-01-20 19:10:38.510548,471,883,440, 87,2014-01-15 13:50:08.60414,668,292,16, 87,2014-01-11 03:35:27.297628,886,39,516, 33,2014-01-16 13:47:29.77495,234,471,828, 26,2014-01-15 11:14:09.056737,492,32,461, 33,2014-01-13 03:08:53.390555,980,600,420, 87,2014-01-16 11:30:00.521683,18,60,949, 26,2014-01-12 21:38:37.978857,343,940,641, 87,2014-01-17 06:54:40.872129,295,119,53, 33,2014-01-10 20:24:36.592105,115,811,583, 87,2014-01-13 22:09:53.657593,918,148,748, 26,2014-01-21 00:51:43.165187,499,680,681, 79,2014-01-19 11:48:12.876607,690,125,965, 24,2014-01-15 03:32:56.691662,30,523,827, 26,2014-01-14 21:51:48.921889,695,185,462, 26,2014-01-15 16:19:47.298818,492,525,938, 26,2014-01-18 14:30:57.055863,923,955,892, 24,2014-01-20 22:53:06.128504,191,544,308, 26,2014-01-16 09:15:46.716376,569,433,518, 33,2014-01-19 03:23:48.148557,368,995,751, 87,2014-01-21 05:53:51.866813,850,180,199, 94,2014-01-15 13:15:35.653454,159,426,643, 33,2014-01-15 07:01:06.607906,817,659,532, 94,2014-01-14 05:31:52.570532,355,752,738, 33,2014-01-12 13:16:57.939698,413,206,577, 87,2014-01-12 19:28:06.259619,566,911,8, 24,2014-01-11 13:55:54.262859,631,959,890, 26,2014-01-20 20:17:46.791605,352,269,864, 87,2014-01-14 01:21:24.139172,898,844,324, 79,2014-01-13 18:25:52.891966,456,215,949, 24,2014-01-12 22:38:01.576842,701,579,713, 33,2014-01-13 14:32:02.64972,240,436,244, 94,2014-01-18 12:16:01.496106,543,629,600, 94,2014-01-13 06:13:28.87447,691,38,412, 33,2014-01-19 08:19:32.424671,702,357,890, 87,2014-01-12 17:30:10.553262,217,175,417, 26,2014-01-15 13:22:22.265026,864,716,516, 33,2014-01-14 13:49:32.946353,920,863,830, 24,2014-01-14 19:08:48.53212,829,908,186, 24,2014-01-11 14:17:03.137155,331,942,533, 79,2014-01-14 14:14:09.538878,719,875,14, 26,2014-01-17 21:18:42.189712,891,106,753, 94,2014-01-11 09:08:46.302342,191,411,924, 33,2014-01-12 20:02:22.415485,665,928,252, 94,2014-01-11 18:51:38.881794,975,995,540, 24,2014-01-15 11:00:35.22465,980,886,297, 26,2014-01-17 16:22:09.714722,741,763,375, 26,2014-01-19 13:22:35.937656,190,495,277, 26,2014-01-15 03:53:41.353153,49,46,179, 26,2014-01-12 19:37:56.431098,654,198,514, 24,2014-01-17 10:46:24.086214,554,930,979, 33,2014-01-15 03:47:33.220385,377,296,231, 79,2014-01-17 12:53:32.084552,672,135,119, 94,2014-01-11 11:47:30.69412,281,721,861, 87,2014-01-18 13:19:25.790918,641,45,141, 87,2014-01-19 06:10:30.024669,372,894,722, 87,2014-01-13 11:15:36.95374,35,583,544, 79,2014-01-19 07:15:20.053777,928,804,479, 87,2014-01-18 20:28:16.816274,604,97,960, 87,2014-01-16 16:37:01.092911,62,561,596, 10,2014-01-11 19:41:12.945743,648,942,487, 35,2014-01-20 23:06:18.393117,824,879,49, 92,2014-01-19 11:09:03.742631,705,625,307, 92,2014-01-19 11:57:14.442268,96,574,335, 35,2014-01-12 23:19:47.267586,189,720,645, 92,2014-01-17 13:28:36.167302,146,10,210, 35,2014-01-10 23:27:24.99553,822,627,475, 35,2014-01-17 16:42:09.533031,582,758,511, 35,2014-01-10 21:18:09.113656,332,788,707, 10,2014-01-14 22:36:13.527649,614,594,478, 92,2014-01-18 10:59:12.670185,914,536,249, 92,2014-01-14 15:32:18.652533,340,247,469, 10,2014-01-11 13:54:55.71778,506,73,911, 92,2014-01-19 00:07:58.972947,430,223,369, 35,2014-01-16 15:34:41.068438,236,112,462, 10,2014-01-12 12:46:34.599574,377,157,887, 92,2014-01-13 05:08:34.197597,972,462,474, 92,2014-01-13 01:56:50.783159,264,95,925, 35,2014-01-10 20:09:10.53723,881,585,910, 10,2014-01-17 13:02:15.55075,982,517,432, 10,2014-01-15 01:34:47.214257,294,206,538, 10,2014-01-17 10:55:34.623444,613,376,901, 35,2014-01-11 10:49:48.771135,824,218,853, 35,2014-01-11 08:28:19.299228,884,935,291, 10,2014-01-18 02:43:05.317564,660,568,699, 92,2014-01-19 04:09:14.562472,865,412,221, 92,2014-01-19 18:38:49.323898,313,771,652, 92,2014-01-20 17:58:42.271303,965,862,616, 92,2014-01-17 05:24:34.616249,888,549,62, 35,2014-01-17 09:07:06.140172,409,312,413, 10,2014-01-16 04:35:43.364215,161,169,442, 92,2014-01-18 05:05:47.562411,362,489,142, 35,2014-01-17 02:13:24.533707,817,288,580, 10,2014-01-14 09:44:47.106846,161,314,264, 35,2014-01-16 11:03:02.004679,13,800,379, 10,2014-01-19 05:33:11.801293,877,448,568, 35,2014-01-21 03:13:23.274148,951,582,371, 10,2014-01-16 14:30:27.000209,134,760,463, 10,2014-01-15 16:15:21.334324,792,525,431, 35,2014-01-21 04:31:32.387804,800,489,780, 10,2014-01-20 17:06:40.527858,325,298,699, 35,2014-01-12 21:14:34.004929,563,707,739, 92,2014-01-14 14:03:51.040757,552,616,508, 35,2014-01-11 01:01:36.245639,6,758,358, 92,2014-01-21 01:22:32.977876,348,602,433, 10,2014-01-20 09:38:32.109195,277,793,872, 35,2014-01-12 17:48:10.845214,267,838,570, 10,2014-01-13 00:31:07.175472,342,732,413, 35,2014-01-12 05:35:22.892354,202,173,416, 10,2014-01-12 17:57:21.382444,53,749,772, 92,2014-01-19 17:33:22.726222,432,587,734, 10,2014-01-16 11:10:10.106611,575,62,558, 10,2014-01-19 08:52:56.005887,303,113,20, 10,2014-01-20 08:23:11.497777,508,422,302, 10,2014-01-16 23:38:29.405841,654,254,215, 92,2014-01-16 05:36:01.323451,479,68,324, 35,2014-01-18 06:32:26.060249,995,828,797, 10,2014-01-15 12:17:18.729738,289,187,569, 10,2014-01-15 17:34:43.594753,700,123,267, 92,2014-01-14 05:57:00.676497,229,466,373, 35,2014-01-11 15:24:24.870328,84,447,896, 92,2014-01-21 02:10:26.958968,94,920,263, 92,2014-01-11 05:02:48.238908,530,992,934, 35,2014-01-17 21:37:49.404035,372,642,829, 35,2014-01-14 05:55:14.065814,4,510,177, 10,2014-01-16 20:05:50.243587,215,699,919, 10,2014-01-15 21:11:01.205328,767,854,469, 92,2014-01-14 03:08:37.339961,49,834,470, 35,2014-01-12 04:36:17.243797,473,430,778, 35,2014-01-20 17:26:22.540072,4,475,82, 10,2014-01-14 01:40:09.728185,117,829,362, 10,2014-01-11 15:42:57.772074,410,780,663, 35,2014-01-12 08:40:56.545001,423,338,671, 92,2014-01-17 19:44:00.768943,137,457,614, 92,2014-01-11 20:44:34.017713,234,9,879, 92,2014-01-12 04:03:29.522876,918,373,478, 10,2014-01-16 23:22:32.878138,719,976,537, 92,2014-01-13 18:32:44.863345,114,696,500, 92,2014-01-14 08:34:36.698348,762,907,781, 35,2014-01-18 08:57:55.770492,432,203,945, 35,2014-01-15 16:30:06.245789,589,2,66, 10,2014-01-12 20:07:59.424571,939,152,43, 35,2014-01-13 14:08:05.877104,856,652,257, 92,2014-01-13 19:23:02.252095,427,358,107, 92,2014-01-11 22:31:10.922347,768,530,782, 92,2014-01-19 17:46:35.283363,379,652,409, 35,2014-01-19 04:59:03.575546,275,567,507, 10,2014-01-19 09:03:36.982596,304,280,828, 35,2014-01-14 00:03:54.013101,961,67,177, 10,2014-01-13 16:33:47.170719,833,206,322, 92,2014-01-12 09:00:37.659512,808,934,689, 10,2014-01-14 19:28:18.883429,778,81,529, 35,2014-01-13 12:44:14.129686,930,356,201, 92,2014-01-12 18:03:25.898421,936,958,738, 10,2014-01-11 11:06:08.287465,536,747,939, 35,2014-01-16 22:39:28.195919,823,307,560, 10,2014-01-18 18:09:16.142427,699,593,528, 35,2014-01-16 12:17:09.493212,634,494,325, 92,2014-01-20 05:48:05.535881,166,567,500, 92,2014-01-20 02:45:33.386223,559,444,395, 92,2014-01-15 23:43:32.033284,366,544,323, 35,2014-01-13 01:28:15.264067,397,325,713, 92,2014-01-20 22:28:31.158297,953,767,607, 35,2014-01-17 12:24:28.578284,680,267,500, 92,2014-01-20 01:12:16.033009,425,615,945, 35,2014-01-11 13:13:05.176429,753,504,198, 92,2014-01-18 20:27:58.10116,647,625,171, 10,2014-01-15 18:34:48.911147,734,400,487, 10,2014-01-14 11:45:50.039774,51,289,409, 35,2014-01-11 23:02:34.799509,612,449,401, 10,2014-01-12 21:32:44.681639,443,596,887, 92,2014-01-19 08:15:56.285563,833,792,807, 35,2014-01-13 23:10:34.224079,194,838,244, 35,2014-01-15 15:40:50.559161,595,643,249, 35,2014-01-11 21:38:58.537657,575,965,90, 10,2014-01-15 01:41:45.146846,870,230,420, 35,2014-01-14 03:27:25.842525,230,457,45, 10,2014-01-20 06:38:02.113623,196,364,383, 10,2014-01-13 04:45:22.129442,239,388,843, 10,2014-01-17 07:31:19.855625,694,828,284, 10,2014-01-12 17:11:49.284341,145,619,997, 92,2014-01-14 17:45:59.788953,334,715,127, 92,2014-01-10 20:59:38.739474,719,855,747, 10,2014-01-15 09:56:03.414027,417,760,972, 10,2014-01-16 15:49:25.687793,732,875,895, 10,2014-01-11 12:05:47.026939,370,772,975, 35,2014-01-11 02:35:31.609946,42,280,128, 35,2014-01-14 03:58:41.83022,375,281,565, 35,2014-01-17 04:22:56.520151,490,9,630, 92,2014-01-20 12:23:37.146246,210,168,797, 92,2014-01-13 00:44:15.216444,287,61,835, 10,2014-01-11 22:06:28.553434,787,334,52, 10,2014-01-12 07:51:52.410313,713,945,206, 10,2014-01-12 17:12:46.375159,955,898,612, 35,2014-01-18 14:30:57.131719,218,934,230, 35,2014-01-11 03:04:08.443741,860,609,731, 92,2014-01-13 10:25:51.551589,136,282,532, 35,2014-01-16 04:58:55.23288,597,601,494, 92,2014-01-16 01:38:57.354887,299,487,978, 10,2014-01-17 02:11:41.591364,62,584,428, 10,2014-01-17 08:01:30.032389,547,936,512, 35,2014-01-18 03:11:42.036945,999,368,963, 35,2014-01-15 04:27:37.876926,986,261,163, 35,2014-01-20 11:12:04.256888,254,115,914, 10,2014-01-12 12:52:32.596106,973,91,905, 10,2014-01-16 06:06:36.415003,257,593,140, 92,2014-01-14 06:53:49.403733,46,404,906, 35,2014-01-15 20:19:58.438631,360,954,266, 10,2014-01-15 06:44:38.528625,998,394,796, 10,2014-01-16 15:39:11.533175,668,386,934, 92,2014-01-11 21:51:18.294256,428,736,781, 92,2014-01-17 03:56:27.812966,992,445,163, 35,2014-01-20 13:25:11.322548,891,557,958, 92,2014-01-11 22:50:57.03373,27,226,375, 10,2014-01-11 07:52:31.226994,204,5,13, 92,2014-01-15 23:14:37.010341,420,657,556, 92,2014-01-12 14:56:44.061088,336,481,138, 92,2014-01-11 14:28:02.837358,156,494,342, 92,2014-01-19 07:13:18.84056,902,760,908, 92,2014-01-18 23:19:40.581239,495,983,355, 10,2014-01-10 20:51:39.983604,458,286,740, 10,2014-01-11 01:57:34.057004,401,170,779, 92,2014-01-20 01:26:09.135092,849,384,760, 35,2014-01-12 08:43:32.393917,602,215,530, 92,2014-01-12 23:10:20.432165,397,998,93, 35,2014-01-17 09:57:06.266811,210,325,183, 92,2014-01-12 15:47:40.837657,695,789,171, 35,2014-01-15 13:36:11.983753,686,829,540, 10,2014-01-12 08:56:01.50011,501,52,962, 35,2014-01-17 21:26:38.192965,779,3,472, 35,2014-01-11 09:47:53.575117,989,862,711, 92,2014-01-18 20:57:31.532499,456,828,928, 92,2014-01-14 18:38:20.229909,646,157,576, 10,2014-01-15 18:15:31.452461,410,592,129, 10,2014-01-18 02:09:35.789386,579,826,204, 92,2014-01-16 11:30:52.826016,943,451,52, 10,2014-01-21 04:22:07.867464,823,991,999, 10,2014-01-11 03:03:25.19312,780,825,372, 10,2014-01-11 01:50:51.264647,990,475,361, 35,2014-01-15 05:06:46.39609,567,49,475, 92,2014-01-16 22:42:36.726715,518,228,278, 92,2014-01-12 03:42:09.558904,988,857,401, 35,2014-01-11 03:03:14.209055,569,997,264, 10,2014-01-16 06:07:48.049261,322,262,97, 35,2014-01-13 06:33:06.593053,260,958,143, 92,2014-01-11 14:55:45.436049,427,999,727, 10,2014-01-10 23:22:25.059602,86,918,594, 35,2014-01-15 01:29:50.654141,679,712,397, 92,2014-01-12 09:23:48.273407,752,975,816, 10,2014-01-19 10:35:43.900164,357,363,676, 35,2014-01-12 18:49:31.235379,720,712,495, 35,2014-01-12 10:15:28.257011,381,259,837, 10,2014-01-19 16:33:17.957168,529,999,446, 10,2014-01-11 14:15:40.370471,6,91,222, 10,2014-01-13 22:59:00.651348,718,839,852, 35,2014-01-11 09:43:38.389751,984,942,859, 35,2014-01-18 04:12:46.637701,569,510,80, 35,2014-01-15 18:46:41.489005,396,349,65, 10,2014-01-16 03:19:50.373504,811,402,836, 35,2014-01-19 17:08:48.137811,234,936,658, 92,2014-01-12 10:13:19.68197,939,342,443, 10,2014-01-16 17:07:43.94904,690,817,729, 10,2014-01-17 08:06:19.67031,821,336,53, 92,2014-01-16 08:51:39.911879,812,726,585, 92,2014-01-11 05:23:15.401501,410,698,196, 35,2014-01-14 04:15:55.460117,864,614,891, 10,2014-01-11 14:22:32.737895,566,130,578, 10,2014-01-11 03:45:23.268965,357,296,684, 35,2014-01-14 11:19:20.653236,335,913,713, 35,2014-01-11 20:13:24.002543,493,481,599, 10,2014-01-15 12:52:09.665053,723,495,163, 10,2014-01-20 14:01:57.379951,186,542,911, 35,2014-01-13 03:55:33.561866,404,957,377, 92,2014-01-15 19:55:23.874109,911,133,128, 35,2014-01-15 14:09:45.429212,592,644,60, 10,2014-01-15 14:28:40.154919,974,589,107, 35,2014-01-16 14:51:09.310157,936,365,194, 35,2014-01-15 17:32:10.488815,341,538,386, 92,2014-01-19 19:58:30.80906,65,149,638, 92,2014-01-18 04:14:57.583983,443,513,831, 10,2014-01-13 22:07:54.388978,219,331,319, 10,2014-01-11 08:48:02.04444,931,276,477, 10,2014-01-19 18:30:25.840995,555,416,733, 35,2014-01-12 08:41:12.346565,235,333,216, 92,2014-01-12 03:03:42.415329,931,425,779, 10,2014-01-12 11:29:26.492343,18,190,673, 92,2014-01-12 22:24:50.736315,25,74,478, 35,2014-01-19 11:16:29.05303,50,808,643, 10,2014-01-17 10:16:07.981768,192,970,271, 10,2014-01-18 05:44:41.109819,891,325,846, 10,2014-01-17 22:25:17.190842,765,992,914, 92,2014-01-19 00:29:27.663737,158,932,898, 92,2014-01-13 16:52:25.058858,518,538,394, 10,2014-01-14 00:31:36.861152,193,341,469, 35,2014-01-14 03:21:07.576035,276,807,519, 10,2014-01-14 02:15:40.460358,996,718,593, 92,2014-01-17 08:47:32.321269,404,51,261, 35,2014-01-14 21:43:40.31393,270,261,675, 35,2014-01-14 10:01:03.729323,186,824,136, 92,2014-01-21 00:06:52.974924,839,741,630, 10,2014-01-15 21:57:04.316892,119,94,409, 35,2014-01-19 02:53:13.394377,22,559,314, 35,2014-01-20 08:08:50.354875,766,966,743, 35,2014-01-18 05:52:37.878757,77,101,242, 10,2014-01-13 16:48:37.268485,288,781,176, 92,2014-01-14 16:18:35.784506,197,39,483, 92,2014-01-12 14:21:18.033677,844,254,113, 35,2014-01-19 11:39:46.579062,962,93,866, 10,2014-01-19 13:50:46.273321,550,605,438, 10,2014-01-11 04:19:48.842736,648,573,158, 92,2014-01-16 09:54:44.163046,78,823,309, 10,2014-01-12 05:58:40.662719,801,184,976, 10,2014-01-11 17:07:50.887176,867,223,399, 10,2014-01-14 22:25:10.004041,456,182,354, 92,2014-01-13 18:39:53.009282,359,171,631, 35,2014-01-13 00:11:33.302506,997,134,82, 92,2014-01-16 13:54:36.496803,939,705,891, 92,2014-01-15 21:04:43.745598,298,775,830, 92,2014-01-11 05:28:02.355536,920,971,316, 10,2014-01-12 18:10:44.47857,39,678,645, 92,2014-01-12 20:49:24.855417,488,428,537, 35,2014-01-18 07:53:19.546378,472,209,832, 10,2014-01-20 22:40:12.142307,96,667,906, 35,2014-01-15 17:41:49.914694,540,537,975, 92,2014-01-11 02:24:56.40795,839,920,34, 35,2014-01-13 20:01:19.718343,705,706,164, 10,2014-01-18 23:57:30.375052,379,799,873, 35,2014-01-17 15:12:28.729219,865,735,520, 10,2014-01-17 21:45:00.032693,754,510,914, 10,2014-01-12 03:58:34.104795,445,10,619, 35,2014-01-17 09:19:21.704143,207,616,147, 10,2014-01-12 13:42:04.349584,558,449,651, 92,2014-01-20 10:51:47.499171,354,641,147, 92,2014-01-16 11:28:12.059436,337,212,880, 35,2014-01-19 23:34:42.228343,716,450,182, 10,2014-01-12 21:40:24.768076,596,951,650, 10,2014-01-20 07:46:47.843944,516,387,886, 35,2014-01-11 07:56:00.262019,574,181,313, 10,2014-01-11 03:20:11.347138,824,942,390, 35,2014-01-18 15:37:34.117265,930,179,911, 10,2014-01-11 16:15:49.104756,347,911,305, 10,2014-01-16 17:14:55.510183,486,568,622, 35,2014-01-20 01:36:14.779983,551,723,61, 35,2014-01-12 13:23:39.991932,979,553,947, 10,2014-01-20 19:40:05.514224,435,241,116, 10,2014-01-12 14:16:07.789266,217,546,612, 10,2014-01-14 17:35:13.294857,861,544,689, 35,2014-01-16 03:34:42.011026,73,240,213, 92,2014-01-17 15:20:51.534864,718,500,768, 92,2014-01-15 03:03:15.650394,866,585,774, 35,2014-01-18 01:45:26.489597,517,351,167, 10,2014-01-19 16:10:16.390699,881,590,675, 10,2014-01-12 04:56:35.197192,506,31,535, 10,2014-01-17 18:25:38.632324,242,227,948, 92,2014-01-14 03:52:06.305393,492,88,42, 35,2014-01-12 11:21:31.605142,454,178,372, 10,2014-01-20 18:26:58.350667,837,933,786, 35,2014-01-11 21:49:36.680445,522,150,864, 92,2014-01-19 06:34:00.33436,494,364,163, 92,2014-01-17 10:11:58.383359,570,544,283, 92,2014-01-13 05:48:10.785239,779,642,52, 35,2014-01-15 09:53:22.038922,818,252,606, 10,2014-01-19 03:54:02.732945,517,927,807, 35,2014-01-12 10:39:58.284829,15,504,446, 10,2014-01-21 01:21:34.098358,255,14,280, 10,2014-01-17 21:28:44.961286,28,963,975, 48,2014-01-14 12:20:23.052905,547,931,259, 48,2014-01-20 03:08:21.942302,41,768,0, 48,2014-01-18 09:24:45.223306,768,496,251, 48,2014-01-14 19:40:34.400043,310,660,696, 48,2014-01-17 12:45:56.059567,661,245,108, 48,2014-01-19 05:40:34.328062,42,724,862, 48,2014-01-20 16:55:29.910226,623,688,173, 48,2014-01-16 08:22:10.839969,101,861,818, 48,2014-01-20 23:04:14.320414,140,635,276, 48,2014-01-20 06:35:35.42445,388,643,492, 48,2014-01-18 02:38:18.629234,729,684,654, 48,2014-01-14 10:39:27.615271,806,661,911, 48,2014-01-15 04:10:17.435895,144,40,16, 48,2014-01-14 11:59:10.164517,856,242,810, 48,2014-01-18 17:42:43.265665,945,407,876, 48,2014-01-11 23:55:43.925492,857,344,767, 48,2014-01-12 22:09:26.555215,172,744,842, 48,2014-01-20 02:39:18.462856,811,458,737, 48,2014-01-18 22:21:22.557816,605,633,434, 48,2014-01-16 06:01:32.860607,424,367,620, 48,2014-01-11 08:00:50.067998,206,424,344, 48,2014-01-18 10:48:20.908901,463,110,758, 48,2014-01-17 07:51:09.541052,258,966,920, 48,2014-01-19 18:34:50.402777,300,567,456, 48,2014-01-14 15:00:19.292261,776,156,542, 48,2014-01-19 17:39:20.32671,963,999,735, 48,2014-01-13 22:28:12.441699,415,931,684, 48,2014-01-12 12:54:22.025625,129,182,663, 48,2014-01-10 22:19:18.611539,600,74,789, 48,2014-01-13 17:49:46.540057,445,826,498, 48,2014-01-19 14:23:06.986911,497,880,736, 48,2014-01-14 14:39:41.664444,432,592,229, 48,2014-01-12 14:58:08.482359,370,244,746, 48,2014-01-16 17:47:52.210636,542,562,354, 48,2014-01-18 14:20:16.064486,870,316,296, 48,2014-01-19 07:44:04.541926,511,275,958, 48,2014-01-14 17:28:26.538699,349,597,597, 48,2014-01-18 01:15:45.974712,486,771,8, 48,2014-01-14 10:06:15.381894,318,474,565, 48,2014-01-14 10:32:40.859112,163,661,217, 48,2014-01-17 01:51:21.399161,894,291,572, 48,2014-01-11 06:44:34.011547,435,529,77, 48,2014-01-18 01:12:08.474383,894,58,910, 48,2014-01-11 00:01:38.835057,565,794,504, 48,2014-01-14 22:43:44.176064,571,995,611, 48,2014-01-15 12:54:51.740467,705,490,23, 48,2014-01-12 03:57:22.760968,571,821,226, 48,2014-01-17 00:53:10.731278,85,982,309, 48,2014-01-14 09:34:10.203323,53,508,198, 48,2014-01-20 06:18:45.318784,840,991,176, 48,2014-01-12 00:54:43.591885,201,111,512, 48,2014-01-14 21:35:00.271741,343,243,928, 48,2014-01-17 11:07:06.227685,413,406,367, 48,2014-01-18 12:45:53.132938,492,388,347, 48,2014-01-13 10:09:50.674517,96,559,134, 48,2014-01-10 20:07:25.520366,851,918,115, 48,2014-01-17 00:25:13.459648,367,882,60, 48,2014-01-16 12:38:03.116216,168,438,243, 48,2014-01-12 13:01:47.545991,268,550,957, 48,2014-01-17 02:44:32.071186,484,261,409, 48,2014-01-19 10:27:49.656273,820,851,673, 48,2014-01-10 21:24:54.532901,184,544,656, 48,2014-01-20 21:24:13.73563,963,688,733, 48,2014-01-21 05:25:58.138631,772,690,839, 48,2014-01-16 19:12:46.743538,573,787,514, 48,2014-01-18 05:44:29.800117,721,160,733, 48,2014-01-19 07:10:02.680557,884,927,660, 48,2014-01-20 16:41:13.282655,48,782,687, 48,2014-01-15 01:00:15.774829,216,892,104, 48,2014-01-12 11:16:18.062871,114,373,219, 48,2014-01-13 21:13:54.141768,315,970,593, 48,2014-01-10 20:51:37.174409,203,28,940, 48,2014-01-12 22:00:52.074418,946,708,773, 48,2014-01-21 02:26:02.61657,617,57,786, 48,2014-01-11 00:53:16.009467,162,558,647, 48,2014-01-17 00:44:36.250481,251,351,816, 48,2014-01-15 09:20:54.357036,331,903,801, 48,2014-01-12 08:50:38.770435,286,463,32, 48,2014-01-12 19:37:46.981759,821,361,653, 48,2014-01-18 22:55:04.56036,561,883,220, 48,2014-01-11 09:09:24.089638,79,223,268, 48,2014-01-14 00:32:30.573645,138,25,185, 48,2014-01-12 14:30:04.8321,27,458,700, 48,2014-01-18 00:16:30.317323,961,295,767, 48,2014-01-11 07:18:23.706583,159,306,597, 48,2014-01-15 04:39:55.506617,573,325,932, 48,2014-01-18 00:23:55.837689,584,193,907, 48,2014-01-17 11:43:37.16665,497,583,216, 48,2014-01-20 21:17:58.622832,413,198,232, 48,2014-01-19 17:25:43.383679,157,627,967, 48,2014-01-13 08:28:09.237836,387,905,817, 48,2014-01-19 01:45:48.279105,482,884,443, 48,2014-01-19 18:50:37.917,428,850,753, 48,2014-01-12 23:52:22.973886,405,472,93, 48,2014-01-19 01:11:46.417736,699,133,434, 48,2014-01-15 08:03:24.660537,271,542,331, 48,2014-01-20 09:36:52.774003,139,529,509, 48,2014-01-17 02:21:49.098714,177,830,704, 48,2014-01-14 18:44:37.943193,458,17,105, 48,2014-01-14 04:37:08.549251,62,294,940, 48,2014-01-18 17:38:07.161583,78,474,644, 48,2014-01-17 19:58:32.085379,83,970,841, 48,2014-01-14 05:28:45.723661,524,285,780, 48,2014-01-20 19:38:59.236001,421,547,470, 48,2014-01-17 16:24:34.701949,636,377,943, 48,2014-01-14 10:22:01.733128,847,755,637, 48,2014-01-16 14:23:35.486482,457,797,328, 48,2014-01-11 19:45:29.058985,452,663,442, 48,2014-01-15 23:12:40.503982,843,568,269, 48,2014-01-18 14:01:22.468241,7,990,1000, 5,2014-01-19 22:40:33.619346,539,324,73, 53,2014-01-16 12:22:04.59362,793,146,992, 5,2014-01-11 08:33:53.041886,598,903,207, 5,2014-01-11 07:10:38.451865,837,421,414, 5,2014-01-13 06:38:34.910944,980,28,125, 53,2014-01-11 19:52:16.748469,500,245,424, 5,2014-01-15 15:50:33.958481,187,409,796, 5,2014-01-20 11:02:30.749052,767,824,530, 5,2014-01-18 11:35:53.915119,809,573,838, 53,2014-01-15 07:08:32.581733,814,79,630, 5,2014-01-18 22:28:14.132731,420,863,415, 5,2014-01-21 00:04:03.153375,176,540,436, 5,2014-01-13 02:54:20.860839,290,575,475, 5,2014-01-17 11:18:52.04973,762,162,413, 53,2014-01-12 17:56:26.127261,677,342,117, 53,2014-01-10 22:06:07.278994,930,869,243, 5,2014-01-11 13:22:16.710687,610,68,348, 53,2014-01-11 21:33:18.901265,496,156,622, 53,2014-01-17 04:27:56.377707,835,515,250, 53,2014-01-15 12:06:54.65388,867,725,257, 53,2014-01-15 06:10:27.450516,463,966,229, 53,2014-01-14 16:06:03.539291,725,995,533, 53,2014-01-12 02:05:26.739259,353,845,877, 53,2014-01-18 15:39:13.174177,676,709,66, 5,2014-01-14 05:45:02.775292,611,32,129, 53,2014-01-18 22:30:01.441208,935,104,103, 53,2014-01-11 20:01:14.907724,71,398,122, 53,2014-01-20 00:08:38.261774,577,511,743, 53,2014-01-19 22:15:30.500194,126,274,898, 5,2014-01-16 23:13:55.411706,268,68,97, 5,2014-01-17 08:10:00.730015,24,791,404, 53,2014-01-18 14:56:04.119957,700,503,820, 5,2014-01-12 05:36:00.005326,195,525,336, 5,2014-01-17 20:43:53.771901,114,603,253, 5,2014-01-19 02:06:42.571822,130,257,356, 5,2014-01-14 16:14:34.91627,965,594,848, 5,2014-01-18 20:36:10.52037,531,926,459, 53,2014-01-13 11:57:16.530304,298,313,408, 53,2014-01-13 21:17:05.665321,421,299,172, 5,2014-01-16 02:12:04.435908,371,26,604, 53,2014-01-17 23:05:49.112037,584,66,604, 5,2014-01-11 13:45:19.798471,90,485,496, 53,2014-01-15 20:16:07.589283,564,964,262, 53,2014-01-20 06:00:09.973295,582,933,306, 5,2014-01-18 05:04:11.848201,172,302,867, 53,2014-01-17 18:12:33.716545,657,890,788, 5,2014-01-20 08:06:17.252289,492,944,81, 53,2014-01-18 22:26:28.558889,754,493,145, 53,2014-01-18 19:45:52.617809,736,31,321, 53,2014-01-16 06:34:13.629996,639,547,710, 5,2014-01-13 04:33:23.213187,512,452,401, 5,2014-01-12 19:56:20.068745,171,486,688, 53,2014-01-20 02:40:17.169287,482,308,818, 5,2014-01-14 10:38:49.952447,946,460,5, 53,2014-01-20 15:35:33.242922,577,915,872, 53,2014-01-13 02:25:19.944579,239,290,54, 53,2014-01-12 03:08:51.394073,440,50,818, 53,2014-01-11 05:36:48.150646,600,418,771, 53,2014-01-11 20:33:58.206352,432,941,397, 53,2014-01-21 05:24:21.894267,697,740,396, 53,2014-01-17 08:50:43.562352,829,420,541, 5,2014-01-18 08:43:58.936367,635,192,789, 5,2014-01-18 14:20:26.014224,945,13,935, 5,2014-01-18 18:26:43.567678,661,791,837, 5,2014-01-14 23:27:52.708268,973,853,156, 53,2014-01-16 10:27:08.586047,940,530,323, 5,2014-01-12 04:41:18.484368,541,561,821, 53,2014-01-12 14:04:03.228638,430,856,39, 5,2014-01-19 02:24:25.11677,136,773,513, 53,2014-01-15 05:58:24.149689,528,166,26, 53,2014-01-17 20:16:07.664546,158,591,258, 53,2014-01-15 19:30:14.228807,15,448,319, 5,2014-01-15 23:43:43.948159,55,127,576, 5,2014-01-12 10:32:15.253829,753,250,638, 53,2014-01-14 19:30:24.202102,595,948,777, 5,2014-01-12 22:47:55.79678,947,263,551, 53,2014-01-19 08:44:48.970374,210,401,142, 5,2014-01-13 21:36:41.454391,638,712,999, 5,2014-01-21 01:14:24.355669,567,197,455, 53,2014-01-16 22:30:41.588603,509,327,1, 53,2014-01-19 08:10:55.084386,352,308,614, 53,2014-01-12 23:47:47.568856,823,175,808, 53,2014-01-18 22:27:01.657348,612,629,272, 53,2014-01-18 04:51:12.253674,282,918,291, 5,2014-01-16 14:26:37.521721,724,436,675, 53,2014-01-18 08:02:34.90027,673,938,995, 5,2014-01-20 11:16:32.198253,102,884,290, 5,2014-01-17 21:35:28.915795,389,625,312, 53,2014-01-18 17:39:23.051334,602,992,91, 5,2014-01-11 01:50:30.404605,363,72,872, 5,2014-01-17 20:59:50.810061,816,274,56, 5,2014-01-14 20:30:06.613687,67,746,40, 53,2014-01-18 14:34:29.340972,448,951,568, 5,2014-01-15 05:20:16.824705,335,95,216, 53,2014-01-12 08:56:50.181784,25,758,313, 5,2014-01-12 08:02:22.04924,978,832,614, 53,2014-01-20 19:47:25.410751,554,837,797, 53,2014-01-13 17:38:08.666152,611,547,642, 53,2014-01-14 02:06:25.278297,960,988,554, 5,2014-01-18 16:11:50.527522,42,340,359, 53,2014-01-18 03:36:32.815841,26,252,824, 5,2014-01-21 02:22:32.942843,948,216,419, 5,2014-01-13 05:42:04.756748,274,238,607, 5,2014-01-12 21:20:16.764419,478,904,639, 53,2014-01-12 06:54:48.197092,497,778,13, 5,2014-01-17 05:12:28.958849,285,374,575, 5,2014-01-15 00:08:12.561199,957,973,363, 53,2014-01-20 19:39:37.167466,864,110,782, 5,2014-01-20 06:49:10.413658,672,420,631, 5,2014-01-14 19:22:36.916868,589,214,275, 5,2014-01-16 12:10:18.756069,856,349,874, 53,2014-01-18 09:00:05.498046,640,682,631, 5,2014-01-16 23:10:24.486142,313,839,436, 53,2014-01-14 04:37:20.413417,993,712,559, 5,2014-01-15 07:51:17.752139,876,114,128, 53,2014-01-12 07:37:02.007864,54,480,695, 5,2014-01-11 06:39:55.314105,745,319,357, 5,2014-01-14 13:07:49.950392,215,347,23, 5,2014-01-19 09:12:30.923657,842,162,243, 5,2014-01-19 04:19:18.36544,935,148,77, 53,2014-01-14 18:58:20.355416,707,503,107, 5,2014-01-16 00:12:21.733719,162,571,537, 53,2014-01-12 18:49:24.979127,495,461,506, 53,2014-01-12 03:32:49.696387,891,494,604, 53,2014-01-20 09:32:38.558424,308,772,877, 53,2014-01-14 07:46:15.160911,85,588,94, 5,2014-01-13 15:35:11.746046,760,982,157, 5,2014-01-19 23:20:03.969595,538,11,879, 53,2014-01-17 05:24:23.827064,884,347,32, 5,2014-01-16 21:41:37.024343,743,324,543, 5,2014-01-17 09:31:54.497116,764,63,525, 53,2014-01-14 03:00:56.643323,382,595,478, 5,2014-01-16 18:04:09.967185,134,953,842, 53,2014-01-19 19:13:59.253864,90,454,913, 53,2014-01-16 04:21:13.407742,444,843,928, 53,2014-01-18 04:58:58.164276,788,159,81, 53,2014-01-15 18:26:28.213131,735,806,22, 5,2014-01-20 08:29:25.96894,826,234,807, 5,2014-01-17 18:38:35.33216,972,967,701, 53,2014-01-14 19:15:38.626791,703,221,590, 5,2014-01-13 21:52:02.885807,379,312,937, 53,2014-01-13 00:48:54.088229,260,834,405, 53,2014-01-11 22:15:44.124836,705,29,205, 53,2014-01-20 01:02:27.371949,158,319,447, 5,2014-01-16 09:26:14.502065,18,954,762, 5,2014-01-16 10:07:01.876975,146,470,533, 5,2014-01-11 02:39:29.379812,711,30,220, 53,2014-01-16 20:06:09.81617,662,88,33, 5,2014-01-20 03:14:51.827785,997,500,745, 5,2014-01-19 15:52:00.30347,494,241,483, 53,2014-01-14 18:25:28.181609,613,437,836, 53,2014-01-13 16:13:12.183201,184,575,249, 5,2014-01-14 10:04:22.037608,991,699,40, 5,2014-01-16 17:14:53.160735,63,661,459, 53,2014-01-14 23:46:01.879588,287,188,558, 5,2014-01-13 13:37:00.596032,896,512,392, 53,2014-01-20 05:01:08.322067,418,432,244, 53,2014-01-17 19:21:13.625634,399,493,104, 5,2014-01-12 06:57:04.565627,299,376,447, 5,2014-01-16 04:25:32.149129,828,565,148, 5,2014-01-13 11:02:50.649976,61,971,945, 5,2014-01-18 20:28:59.063161,524,200,546, 5,2014-01-19 11:26:28.792452,995,334,929, 53,2014-01-19 09:07:00.61758,96,576,327, 5,2014-01-17 09:42:58.317025,268,811,421, 53,2014-01-14 09:47:42.200193,43,482,797, 53,2014-01-16 08:05:58.781856,515,259,626, 53,2014-01-11 22:09:26.530156,5,848,679, 5,2014-01-13 12:17:08.169133,504,928,735, 5,2014-01-12 20:44:34.114018,902,786,28, 53,2014-01-15 21:25:05.156947,553,850,208, 5,2014-01-16 14:09:11.055359,363,877,508, 53,2014-01-15 01:33:28.202666,717,474,527, 53,2014-01-16 23:40:49.282202,82,169,312, 53,2014-01-15 09:11:38.427308,415,204,720, 53,2014-01-20 14:59:42.70473,20,758,621, 53,2014-01-12 03:47:51.159177,371,291,12, 53,2014-01-15 15:51:07.807541,736,840,502, 5,2014-01-16 05:05:52.5209,699,107,385, 5,2014-01-11 01:02:42.986962,921,591,541, 5,2014-01-14 01:43:08.11101,20,599,979, 53,2014-01-20 03:31:20.702509,435,20,841, 53,2014-01-13 21:15:55.170163,202,918,968, 5,2014-01-17 15:47:30.148619,885,431,446, 53,2014-01-15 14:46:13.863245,447,376,982, 53,2014-01-18 01:01:57.05017,352,135,507, 5,2014-01-20 09:24:30.74507,431,350,632, 53,2014-01-14 13:47:22.185311,456,782,155, 53,2014-01-14 14:23:10.675804,431,252,710, 53,2014-01-11 10:21:35.310697,647,879,718, 77,2014-01-19 22:12:54.334859,241,680,678, 77,2014-01-17 05:26:01.326199,196,579,118, 77,2014-01-19 10:50:34.373858,485,364,439, 77,2014-01-18 03:39:23.127311,626,479,674, 50,2014-01-15 08:33:01.943779,136,736,109, 50,2014-01-15 14:33:32.691302,933,109,386, 77,2014-01-11 07:27:05.327924,575,809,950, 50,2014-01-20 20:39:00.725636,641,401,506, 77,2014-01-16 16:42:59.221459,944,626,118, 50,2014-01-13 23:44:13.497057,274,994,423, 77,2014-01-12 11:23:34.840073,465,195,24, 50,2014-01-11 08:08:04.378825,887,57,916, 77,2014-01-19 17:53:24.552417,700,226,219, 50,2014-01-16 16:57:03.042739,563,871,980, 77,2014-01-17 11:48:53.661027,380,491,936, 77,2014-01-13 21:05:02.980145,495,401,968, 77,2014-01-16 01:56:45.74747,265,414,593, 77,2014-01-18 19:36:44.820204,597,445,544, 77,2014-01-18 16:56:10.787684,22,121,162, 50,2014-01-11 01:02:38.26837,705,792,266, 77,2014-01-19 00:39:27.807585,192,99,928, 77,2014-01-11 12:39:18.898695,501,186,165, 50,2014-01-20 08:33:58.970879,316,211,879, 77,2014-01-11 15:55:22.977748,241,681,19, 77,2014-01-18 08:26:49.047733,108,238,601, 50,2014-01-14 17:20:12.834124,313,873,456, 77,2014-01-18 20:57:20.027918,466,89,843, 77,2014-01-17 11:51:19.792803,638,624,137, 50,2014-01-18 11:07:35.019855,616,568,316, 50,2014-01-12 05:20:30.704141,205,513,461, 50,2014-01-18 02:12:55.1035,603,919,8, 77,2014-01-17 03:20:29.354715,444,778,847, 77,2014-01-18 14:46:32.03034,459,131,961, 50,2014-01-16 07:03:29.477778,822,624,720, 50,2014-01-14 00:59:52.482026,902,780,941, 50,2014-01-12 17:19:33.974119,243,109,738, 77,2014-01-21 01:37:02.169081,443,282,240, 50,2014-01-14 12:26:57.80995,933,752,689, 77,2014-01-12 07:58:34.699755,555,187,943, 50,2014-01-16 12:20:01.39054,305,451,907, 77,2014-01-17 16:11:11.307426,196,69,223, 50,2014-01-13 23:22:09.539828,361,592,138, 50,2014-01-17 00:28:05.769365,47,218,773, 77,2014-01-16 04:04:35.859843,886,173,331, 50,2014-01-19 20:19:12.582567,378,713,919, 50,2014-01-13 06:16:59.430393,851,411,495, 77,2014-01-19 05:09:38.839987,986,972,481, 77,2014-01-14 16:15:58.330036,154,398,298, 50,2014-01-21 05:53:44.251016,246,262,21, 50,2014-01-16 16:05:49.627672,491,4,802, 50,2014-01-14 21:18:36.598406,52,806,745, 77,2014-01-19 00:33:12.058601,786,997,598, 77,2014-01-17 08:45:08.526786,897,249,4, 77,2014-01-13 23:52:35.569285,851,537,531, 77,2014-01-19 20:28:35.036349,615,700,676, 50,2014-01-14 11:11:57.57452,203,943,317, 50,2014-01-17 21:12:48.403828,875,727,62, 77,2014-01-17 11:25:55.064686,400,678,422, 50,2014-01-21 03:03:17.367323,172,30,67, 50,2014-01-15 02:20:23.423683,253,920,612, 77,2014-01-18 20:46:25.768827,533,948,441, 77,2014-01-17 23:16:12.470823,130,141,347, 77,2014-01-10 23:40:52.778398,527,267,734, 50,2014-01-16 05:32:57.799167,981,811,665, 50,2014-01-13 00:19:41.948601,86,397,0, 50,2014-01-14 04:40:45.260424,593,619,622, 77,2014-01-18 02:52:31.773285,954,166,258, 77,2014-01-12 19:56:44.117682,114,132,787, 77,2014-01-17 21:07:43.070794,55,170,921, 50,2014-01-19 14:51:06.473041,862,510,141, 77,2014-01-18 12:16:45.508642,454,519,220, 77,2014-01-14 07:18:54.37822,445,228,257, 77,2014-01-12 08:13:16.013288,334,309,538, 50,2014-01-14 06:44:51.278006,159,136,976, 50,2014-01-19 15:23:30.238063,785,782,779, 77,2014-01-10 22:32:28.595854,905,242,509, 50,2014-01-16 17:01:50.708818,99,581,776, 77,2014-01-17 14:33:09.078469,743,990,180, 77,2014-01-14 18:48:26.92631,970,778,196, 50,2014-01-16 16:55:34.959833,600,750,279, 50,2014-01-13 00:38:58.706141,343,318,249, 77,2014-01-18 20:07:03.524716,549,750,490, 50,2014-01-14 11:28:47.018433,464,371,110, 77,2014-01-19 13:24:07.232927,195,495,11, 77,2014-01-11 13:59:39.094421,970,428,256, 50,2014-01-13 01:57:22.054782,759,364,326, 77,2014-01-12 18:36:04.807446,914,169,344, 77,2014-01-18 15:12:27.498249,883,770,363, 77,2014-01-19 17:23:17.119469,134,923,127, 77,2014-01-12 15:39:22.174769,964,28,32, 50,2014-01-12 11:32:50.921932,28,736,833, 50,2014-01-17 08:09:42.888296,725,128,126, 77,2014-01-19 18:55:34.646011,585,549,399, 77,2014-01-12 15:13:43.70033,446,142,64, 50,2014-01-12 07:42:40.687461,956,578,243, 77,2014-01-11 13:15:16.594613,355,185,773, 50,2014-01-15 23:54:28.960754,448,146,891, 77,2014-01-19 14:35:12.460747,231,491,112, 50,2014-01-13 13:12:00.712714,668,758,79, 77,2014-01-12 15:02:12.031548,874,65,962, 50,2014-01-17 23:26:18.934207,228,632,450, 50,2014-01-21 05:28:46.221355,162,191,222, 77,2014-01-16 02:21:06.409768,640,529,374, 77,2014-01-19 11:39:34.947495,745,934,536, 50,2014-01-14 06:13:37.499361,279,258,429, 77,2014-01-14 11:44:36.648249,248,824,494, 50,2014-01-19 14:12:03.54335,885,783,372, 77,2014-01-20 03:15:28.208179,287,607,883, 50,2014-01-10 20:17:45.726718,557,440,263, 50,2014-01-13 03:00:30.469659,50,998,662, 77,2014-01-15 14:11:03.168011,488,133,765, 50,2014-01-13 00:56:44.432858,363,469,50, 77,2014-01-21 03:07:33.994795,448,430,635, 77,2014-01-19 05:39:50.186446,632,273,404, 50,2014-01-11 08:20:51.666204,388,726,283, 50,2014-01-11 11:07:13.089216,6,292,425, 50,2014-01-11 01:37:12.241647,202,258,763, 77,2014-01-13 06:56:56.47365,610,509,105, 50,2014-01-19 06:19:40.587465,463,37,916, 50,2014-01-19 23:00:29.361116,748,172,413, 50,2014-01-15 02:36:18.648839,769,216,962, 77,2014-01-20 21:52:31.509397,805,292,148, 77,2014-01-16 01:10:12.249831,328,338,120, 50,2014-01-13 15:31:53.294849,925,139,847, 50,2014-01-12 07:06:15.209726,733,310,696, 77,2014-01-17 12:52:52.937293,11,774,556, 77,2014-01-14 08:47:09.889461,962,53,971, 77,2014-01-17 11:00:44.17048,439,515,448, 50,2014-01-15 21:28:05.398459,120,823,751, 77,2014-01-17 01:59:10.602176,833,891,316, 77,2014-01-19 06:02:56.202447,371,393,243, 50,2014-01-12 14:54:24.332667,712,795,825, 77,2014-01-17 01:27:56.82353,933,275,481, 50,2014-01-14 02:24:02.612215,717,552,581, 50,2014-01-10 20:33:59.280161,458,297,800, 50,2014-01-20 11:41:34.323311,720,389,767, 50,2014-01-17 18:08:39.260464,56,118,81, 77,2014-01-19 14:46:02.82351,114,227,600, 77,2014-01-19 08:57:02.53149,560,881,782, 50,2014-01-17 18:26:24.987182,468,885,979, 77,2014-01-11 11:46:33.29359,355,350,559, 50,2014-01-13 17:08:05.699921,95,288,360, 50,2014-01-19 23:23:09.420459,375,697,682, 50,2014-01-11 08:54:07.288384,334,273,994, 77,2014-01-11 16:47:55.886366,693,852,559, 50,2014-01-20 11:44:01.086664,544,522,394, 77,2014-01-12 00:01:20.3776,600,285,81, 77,2014-01-11 22:25:08.128014,893,468,324, 50,2014-01-12 12:40:57.560314,889,400,549, 77,2014-01-20 10:21:00.965065,661,124,575, 50,2014-01-21 01:25:37.489548,186,234,744, 77,2014-01-16 19:17:16.209152,214,421,602, 50,2014-01-20 02:13:32.47446,928,218,76, 50,2014-01-15 20:35:49.739379,189,932,167, 50,2014-01-19 14:49:09.504001,989,246,876, 77,2014-01-11 03:19:47.684186,891,870,640, 50,2014-01-12 03:28:42.676672,987,378,463, 77,2014-01-12 17:36:19.393882,280,258,326, 77,2014-01-17 18:20:31.854668,848,35,889, 50,2014-01-17 04:56:48.075131,433,437,494, 50,2014-01-18 23:35:29.996057,163,202,395, 50,2014-01-15 18:23:28.057115,658,636,670, 77,2014-01-18 23:51:12.407797,446,73,399, 77,2014-01-14 19:03:26.820007,974,720,890, 50,2014-01-19 00:47:30.669749,784,570,823, 50,2014-01-19 00:25:11.687958,776,784,242, 77,2014-01-14 00:45:01.143319,849,404,273, 50,2014-01-15 12:56:09.930213,357,117,620, 50,2014-01-17 09:11:14.511889,561,25,116, 77,2014-01-12 03:42:03.674808,572,223,71, 50,2014-01-12 01:22:34.917814,975,689,105, 77,2014-01-18 00:57:47.805478,997,383,202, 77,2014-01-15 00:50:09.374728,194,220,922, 50,2014-01-21 04:45:44.338273,124,596,871, 50,2014-01-18 13:51:55.093863,78,712,552, 77,2014-01-15 21:38:05.261514,866,826,131, 77,2014-01-20 10:29:45.424936,832,484,492, 77,2014-01-19 17:53:15.471463,401,639,321, 50,2014-01-17 00:03:13.389526,219,217,387, 77,2014-01-11 17:10:42.98525,408,559,103, 50,2014-01-18 22:14:16.436527,840,818,950, 77,2014-01-16 19:28:50.879075,85,256,875, 50,2014-01-17 16:27:59.194822,182,384,635, 77,2014-01-17 18:27:48.910987,88,590,267, 50,2014-01-11 10:04:40.618454,901,970,520, 50,2014-01-16 01:17:08.698823,305,371,72, 77,2014-01-18 01:47:36.595174,55,780,35, 77,2014-01-12 17:33:23.295545,697,961,563, 77,2014-01-17 22:53:28.092704,579,265,777, 50,2014-01-14 14:08:08.449842,470,693,63, 50,2014-01-19 02:30:11.370675,991,930,873, 50,2014-01-15 16:28:58.089181,170,387,490, 77,2014-01-19 12:31:36.507376,53,459,57, 50,2014-01-16 20:21:23.778892,319,101,975, 50,2014-01-19 15:32:24.909189,79,642,607, 50,2014-01-17 07:19:07.177124,620,441,536, 77,2014-01-14 14:46:35.46685,356,898,844, 50,2014-01-12 10:17:26.052507,994,634,617, 50,2014-01-11 14:15:17.107336,807,666,65, 77,2014-01-21 03:57:49.978738,1,389,894, 77,2014-01-13 17:59:29.727315,360,148,718, 77,2014-01-12 19:37:52.02515,722,681,468, 50,2014-01-17 22:55:37.784216,704,208,271, 54,2014-01-17 22:49:39.102463,361,295,694, 61,2014-01-12 18:23:36.363423,176,59,50, 14,2014-01-15 06:47:32.878078,929,416,312, 14,2014-01-12 14:27:44.363976,550,402,929, 93,2014-01-11 22:53:21.788359,510,862,783, 54,2014-01-13 18:40:48.349541,703,715,7, 7,2014-01-18 18:30:57.753502,84,275,289, 93,2014-01-12 20:04:04.774029,396,53,771, 93,2014-01-11 10:55:04.786068,440,855,688, 61,2014-01-14 07:59:48.632996,30,379,780, 14,2014-01-19 16:32:03.968851,252,421,387, 4,2014-01-18 09:22:53.697055,991,612,532, 14,2014-01-14 22:04:29.25145,361,728,495, 93,2014-01-14 11:49:12.667673,779,176,278, 54,2014-01-15 05:10:30.292229,205,789,116, 61,2014-01-16 19:37:52.546995,262,519,336, 4,2014-01-11 04:42:40.760797,748,640,138, 7,2014-01-18 23:18:38.742491,37,285,238, 7,2014-01-14 16:08:03.918089,471,640,719, 93,2014-01-16 01:11:38.849978,997,801,296, 4,2014-01-17 05:50:15.249866,352,974,182, 93,2014-01-20 16:29:27.69698,459,552,270, 14,2014-01-14 10:44:03.759167,370,404,144, 54,2014-01-13 07:09:22.426991,575,606,679, 4,2014-01-14 01:16:03.163829,351,993,363, 14,2014-01-16 01:01:29.811673,685,541,971, 61,2014-01-14 01:24:39.534327,680,169,274, 93,2014-01-13 23:13:53.142986,355,171,702, 54,2014-01-18 23:00:59.538989,206,271,292, 61,2014-01-16 01:02:31.559897,563,709,472, 14,2014-01-21 02:09:30.927202,193,718,982, 61,2014-01-15 15:50:38.641451,436,588,741, 4,2014-01-17 23:26:07.92332,311,839,708, 61,2014-01-15 02:57:03.8057,683,147,806, 54,2014-01-17 10:18:23.005427,520,428,852, 4,2014-01-19 02:19:29.712099,782,415,648, 14,2014-01-18 01:37:52.155241,803,16,208, 7,2014-01-14 22:49:20.759348,989,233,323, 4,2014-01-21 02:23:34.486128,583,177,601, 14,2014-01-18 16:32:56.941728,429,733,860, 54,2014-01-18 10:49:09.392344,509,644,998, 4,2014-01-19 12:55:38.454979,898,882,909, 54,2014-01-15 19:55:50.638783,685,482,641, 54,2014-01-12 02:53:38.643794,999,320,803, 54,2014-01-12 18:44:51.122652,484,852,868, 93,2014-01-20 05:06:20.931431,956,226,86, 93,2014-01-18 02:31:31.191207,862,895,853, 93,2014-01-13 03:27:31.883449,685,105,864, 14,2014-01-17 22:24:59.673922,582,992,166, 54,2014-01-11 12:39:35.109296,512,228,979, 7,2014-01-18 08:39:10.733427,57,816,998, 93,2014-01-13 22:15:14.923787,632,466,948, 54,2014-01-10 23:09:02.806276,706,682,848, 93,2014-01-11 13:23:14.492594,604,794,815, 7,2014-01-16 09:24:37.350778,304,549,780, 54,2014-01-14 04:25:05.970105,210,104,452, 4,2014-01-16 18:24:44.304267,498,451,59, 93,2014-01-19 14:49:16.885105,735,499,311, 7,2014-01-17 07:38:59.113092,955,51,485, 54,2014-01-14 11:25:43.843675,161,960,905, 54,2014-01-14 09:51:48.445002,488,1,72, 14,2014-01-17 03:48:30.040714,860,768,423, 4,2014-01-19 07:16:22.485126,857,84,345, 54,2014-01-11 03:17:56.368741,5,558,3, 14,2014-01-11 00:45:33.846414,989,318,682, 14,2014-01-15 11:34:45.490972,301,687,279, 4,2014-01-19 09:37:26.08084,533,813,326, 4,2014-01-18 06:23:26.001654,378,61,464, 14,2014-01-19 14:24:06.25032,198,775,159, 61,2014-01-19 06:01:00.566969,526,179,140, 4,2014-01-15 16:56:22.943382,874,381,420, 4,2014-01-16 19:13:15.642664,636,869,472, 93,2014-01-17 12:56:39.021948,171,602,515, 61,2014-01-20 16:52:13.582584,454,435,65, 4,2014-01-18 02:06:54.286457,577,926,478, 14,2014-01-19 11:41:30.1446,220,559,996, 93,2014-01-19 15:58:34.514016,687,676,384, 54,2014-01-14 22:38:25.477665,740,524,440, 7,2014-01-11 09:09:02.028469,532,357,55, 14,2014-01-16 08:23:34.187937,501,922,720, 7,2014-01-15 15:18:00.58696,466,298,379, 93,2014-01-18 21:48:12.761896,9,421,43, 54,2014-01-19 10:38:49.111725,197,723,985, 93,2014-01-15 18:27:03.393236,404,935,80, 7,2014-01-19 15:11:27.254909,199,463,623, 4,2014-01-14 14:03:26.462502,224,675,190, 54,2014-01-19 02:52:09.363761,304,528,566, 61,2014-01-15 03:36:11.559176,679,164,180, 7,2014-01-12 22:52:43.347607,357,183,469, 93,2014-01-15 04:31:08.476853,645,143,522, 54,2014-01-18 19:01:55.402851,545,634,805, 4,2014-01-16 12:44:31.793028,752,966,768, 14,2014-01-11 02:19:38.517567,514,462,375, 61,2014-01-16 20:18:17.888396,356,732,253, 7,2014-01-16 20:02:28.16177,112,153,199, 54,2014-01-11 07:05:12.36398,46,127,618, 93,2014-01-11 01:53:03.379367,37,667,176, 54,2014-01-14 23:39:54.242609,292,311,996, 93,2014-01-18 17:28:38.366053,438,477,864, 61,2014-01-19 20:17:09.629686,131,74,81, 14,2014-01-12 23:40:54.809578,939,158,230, 4,2014-01-13 04:25:01.309435,871,888,98, 7,2014-01-15 09:30:25.27235,413,433,428, 14,2014-01-19 16:37:33.831525,781,685,300, 93,2014-01-12 15:17:14.89202,727,774,70, 7,2014-01-12 05:37:19.558807,734,963,22, 7,2014-01-17 22:19:03.976126,697,626,777, 7,2014-01-11 01:15:49.406035,41,197,142, 61,2014-01-16 08:15:45.036891,927,697,938, 61,2014-01-18 11:28:06.004595,486,365,624, 61,2014-01-16 13:39:23.593973,974,672,48, 14,2014-01-21 03:33:45.623852,266,565,516, 93,2014-01-16 03:16:18.76691,513,696,351, 7,2014-01-14 18:18:12.705697,850,889,641, 4,2014-01-15 16:00:49.017507,204,175,762, 14,2014-01-14 12:27:46.021819,766,826,964, 14,2014-01-18 12:21:39.1682,23,540,259, 93,2014-01-13 12:52:58.381269,543,882,783, 7,2014-01-18 20:03:57.580995,325,515,6, 4,2014-01-20 15:14:22.516226,238,153,691, 54,2014-01-17 21:24:06.858122,690,488,185, 7,2014-01-16 09:05:52.984266,746,796,757, 14,2014-01-15 21:58:54.309254,29,190,215, 93,2014-01-18 03:43:45.376107,988,330,384, 93,2014-01-11 23:24:10.87266,500,50,872, 14,2014-01-11 12:01:22.471024,824,774,626, 93,2014-01-18 14:48:57.740087,184,896,564, 61,2014-01-12 05:17:14.252028,230,146,25, 93,2014-01-15 15:41:16.713633,765,774,582, 14,2014-01-16 02:17:36.106141,258,184,712, 93,2014-01-21 05:34:23.881715,788,592,962, 14,2014-01-17 19:22:11.52321,561,484,609, 93,2014-01-18 10:42:37.415995,497,296,493, 93,2014-01-15 09:04:49.154065,407,721,370, 61,2014-01-16 05:59:45.354736,384,263,422, 4,2014-01-20 05:59:52.308015,63,627,935, 93,2014-01-16 18:42:08.712872,369,695,150, 7,2014-01-12 22:18:49.330862,525,334,364, 54,2014-01-20 11:15:41.714469,799,564,359, 93,2014-01-11 20:57:53.749763,578,411,884, 7,2014-01-20 13:46:55.335876,908,718,237, 7,2014-01-15 18:55:05.308442,525,381,528, 14,2014-01-11 18:31:39.374033,909,752,692, 93,2014-01-15 11:03:14.102786,629,462,189, 7,2014-01-19 17:13:18.014557,229,517,845, 7,2014-01-16 14:32:28.391541,526,164,503, 93,2014-01-19 03:31:00.124605,794,669,378, 7,2014-01-16 23:34:57.182757,178,683,193, 61,2014-01-19 07:25:26.772809,179,713,526, 93,2014-01-16 17:34:57.7056,527,952,249, 61,2014-01-16 08:49:19.698983,171,288,272, 54,2014-01-15 22:49:33.63093,121,757,562, 93,2014-01-11 20:40:50.689866,98,144,884, 61,2014-01-11 00:48:14.008239,913,845,876, 93,2014-01-12 20:33:19.007038,1,778,178, 61,2014-01-13 00:05:01.562526,576,122,198, 93,2014-01-11 16:49:36.479262,514,298,918, 14,2014-01-20 15:22:16.747544,354,937,985, 14,2014-01-14 09:22:15.814554,570,895,905, 14,2014-01-16 12:30:53.192895,635,692,279, 4,2014-01-15 11:39:52.853684,884,642,293, 61,2014-01-14 08:56:39.696269,989,658,408, 4,2014-01-13 01:53:04.716106,852,883,284, 61,2014-01-12 16:22:30.269679,196,414,726, 14,2014-01-18 22:01:28.850332,595,621,598, 7,2014-01-18 11:52:50.070843,67,919,902, 93,2014-01-11 16:22:22.578114,132,737,233, 54,2014-01-14 10:43:37.563624,751,834,119, 4,2014-01-20 14:11:39.402124,538,90,681, 93,2014-01-10 21:38:04.292582,916,626,305, 54,2014-01-15 11:41:31.313386,531,384,395, 61,2014-01-19 21:58:34.738,430,229,48, 14,2014-01-15 20:33:09.601024,69,678,688, 14,2014-01-16 10:13:10.68742,784,299,915, 7,2014-01-14 03:01:48.840786,673,267,674, 4,2014-01-14 07:46:27.615581,820,531,171, 7,2014-01-11 18:45:39.07896,208,665,5, 4,2014-01-12 00:32:48.965392,435,372,119, 7,2014-01-20 11:21:24.798757,703,147,15, 7,2014-01-20 06:11:05.85177,271,155,200, 61,2014-01-17 22:07:46.670992,818,71,251, 61,2014-01-15 14:10:44.497742,695,244,336, 61,2014-01-14 23:00:39.483119,188,539,398, 7,2014-01-18 22:48:37.361277,447,447,979, 7,2014-01-15 18:58:58.506399,748,581,729, 4,2014-01-16 23:33:58.490157,765,340,539, 14,2014-01-21 02:53:38.923803,979,276,696, 7,2014-01-16 15:48:34.98566,26,125,787, 93,2014-01-16 08:56:15.237701,394,537,885, 4,2014-01-14 06:15:54.738776,412,479,704, 4,2014-01-11 22:19:28.178975,284,726,370, 93,2014-01-21 00:36:08.091804,218,808,225, 54,2014-01-17 19:12:34.435045,417,762,256, 93,2014-01-14 04:12:32.895081,900,999,837, 4,2014-01-12 10:58:38.361483,5,112,978, 93,2014-01-15 11:14:03.285797,351,756,231, 4,2014-01-11 10:05:22.966343,79,510,305, 54,2014-01-13 07:21:00.939597,168,417,255, 54,2014-01-19 01:57:40.849421,808,373,626, 61,2014-01-21 04:17:02.368468,248,507,446, 4,2014-01-13 08:59:05.232178,121,36,137, 93,2014-01-13 07:39:12.162806,9,168,318, 7,2014-01-19 20:15:37.106468,272,938,981, 14,2014-01-18 09:32:14.833621,8,703,319, 7,2014-01-18 21:52:22.850646,265,811,207, 54,2014-01-12 17:17:25.947254,437,872,774, 61,2014-01-11 11:18:42.449202,641,596,240, 54,2014-01-19 20:38:01.929606,987,266,975, 7,2014-01-13 21:50:14.912645,963,746,531, 14,2014-01-21 02:40:07.24796,355,758,305, 93,2014-01-18 20:49:07.781795,302,299,82, 7,2014-01-20 23:58:01.583637,294,575,131, 93,2014-01-15 10:50:51.746121,482,497,483, 4,2014-01-12 13:49:47.264915,534,345,341, 4,2014-01-18 16:46:38.944914,102,934,897, 93,2014-01-20 09:49:50.25252,322,988,255, 54,2014-01-18 17:23:45.755071,983,260,226, 93,2014-01-18 13:40:17.869135,835,147,860, 4,2014-01-15 19:38:25.23818,418,384,764, 61,2014-01-13 20:20:00.993191,26,964,720, 93,2014-01-11 13:56:12.607912,154,274,808, 61,2014-01-16 21:57:53.417155,121,944,63, 14,2014-01-13 14:56:09.084995,717,128,490, 7,2014-01-18 13:08:47.042957,591,410,874, 54,2014-01-20 06:10:26.312656,516,22,224, 54,2014-01-15 05:54:47.446478,1000,191,992, 14,2014-01-12 18:22:50.328754,148,64,162, 54,2014-01-20 20:15:49.278999,854,435,488, 7,2014-01-17 17:15:48.386494,294,164,985, 93,2014-01-21 00:20:31.178174,63,839,263, 14,2014-01-20 18:32:51.647467,338,847,307, 7,2014-01-20 06:14:53.618673,15,338,467, 7,2014-01-13 01:59:43.3414,137,967,456, 4,2014-01-19 08:48:28.753933,481,323,299, 14,2014-01-17 09:47:08.452294,220,431,872, 7,2014-01-21 03:52:06.192046,432,623,181, 93,2014-01-10 20:05:54.701187,397,275,641, 54,2014-01-18 01:05:50.901916,98,76,426, 7,2014-01-19 18:30:08.121652,613,741,284, 14,2014-01-13 21:56:09.613832,533,364,3, 93,2014-01-17 21:45:58.149876,866,268,784, 54,2014-01-17 09:19:15.903447,26,205,12, 7,2014-01-13 15:54:11.19747,770,959,303, 54,2014-01-12 02:36:49.895997,390,899,728, 93,2014-01-19 03:09:03.16836,2,739,62, 7,2014-01-11 02:40:50.142802,566,217,667, 7,2014-01-11 06:26:40.148516,245,558,497, 93,2014-01-16 14:32:48.923851,799,903,529, 4,2014-01-18 20:21:08.011939,404,177,29, 4,2014-01-16 06:05:05.387116,707,543,817, 93,2014-01-19 14:52:49.917042,392,678,879, 61,2014-01-19 14:17:20.61985,103,836,853, 93,2014-01-11 22:02:58.804271,260,245,438, 4,2014-01-11 23:48:59.002036,186,141,21, 93,2014-01-16 21:26:07.662807,13,965,717, 14,2014-01-10 22:13:25.116926,619,465,562, 93,2014-01-16 09:43:46.448515,964,641,883, 93,2014-01-18 19:48:57.99198,748,604,779, 14,2014-01-20 22:29:14.395926,873,469,621, 14,2014-01-12 20:59:34.835008,458,850,467, 7,2014-01-18 14:09:29.170154,161,198,383, 61,2014-01-20 11:02:06.043392,818,978,198, 93,2014-01-11 21:14:28.454101,18,32,237, 4,2014-01-20 20:09:12.511554,831,615,91, 4,2014-01-18 13:50:34.797326,253,963,205, 4,2014-01-18 11:01:36.906395,34,121,46, 14,2014-01-20 18:01:18.7036,220,708,306, 93,2014-01-18 13:56:29.498514,394,402,241, 61,2014-01-15 06:07:27.80831,993,691,697, 54,2014-01-19 06:31:26.825671,336,395,213, 14,2014-01-11 05:52:39.112346,816,867,831, 4,2014-01-11 21:53:25.958605,886,236,265, 7,2014-01-15 09:50:42.729118,375,728,676, 54,2014-01-14 01:46:50.310234,580,951,800, 93,2014-01-13 04:30:15.854601,373,378,176, 7,2014-01-13 06:59:45.897897,347,448,833, 93,2014-01-14 08:27:40.453036,175,437,855, 4,2014-01-13 14:56:56.003536,487,21,903, 61,2014-01-19 01:32:34.821748,678,433,699, 61,2014-01-11 22:48:48.464975,492,825,166, 61,2014-01-19 01:02:01.390651,910,675,510, 61,2014-01-17 10:25:24.73879,84,621,420, 14,2014-01-20 17:06:09.084825,460,345,31, 4,2014-01-20 03:05:00.194922,332,433,25, 93,2014-01-18 14:14:23.740826,756,629,581, 93,2014-01-16 08:32:16.747632,110,6,484, 4,2014-01-20 05:18:25.312268,661,452,264, 14,2014-01-13 17:58:10.189341,504,82,546, 7,2014-01-13 22:21:14.739612,218,626,538, 14,2014-01-19 21:47:39.708193,209,388,144, 4,2014-01-15 18:57:45.024768,499,426,191, 54,2014-01-11 06:30:43.910185,137,359,582, 4,2014-01-19 02:49:45.752004,780,417,657, 7,2014-01-16 20:12:13.478869,852,2,709, 54,2014-01-10 20:39:56.42174,218,478,981, 14,2014-01-16 10:40:20.54933,255,875,906, 54,2014-01-14 01:13:50.385264,261,563,19, 93,2014-01-20 18:41:15.125758,261,647,953, 14,2014-01-13 18:36:50.048262,93,228,864, 14,2014-01-18 11:21:18.193993,402,865,419, 4,2014-01-18 19:12:41.951429,252,706,965, 7,2014-01-14 04:29:29.160608,808,384,304, 93,2014-01-19 13:14:44.152597,779,979,728, 61,2014-01-12 23:03:24.680547,999,92,737, 7,2014-01-17 10:16:19.470843,371,726,906, 61,2014-01-11 11:45:00.007198,832,866,444, 54,2014-01-15 10:03:10.578444,287,470,991, 54,2014-01-20 22:43:59.923879,19,707,418, 93,2014-01-14 06:41:56.010734,610,791,158, 93,2014-01-13 05:35:45.400192,51,807,23, 4,2014-01-11 15:32:48.388854,424,378,480, 54,2014-01-12 01:43:57.401385,750,580,473, 61,2014-01-19 20:01:10.138981,53,116,571, 7,2014-01-11 02:38:57.473679,904,414,501, 93,2014-01-10 22:48:57.596726,440,504,620, 14,2014-01-17 04:15:33.879807,568,410,47, 93,2014-01-16 15:11:14.22173,194,476,268, 4,2014-01-20 08:07:22.908994,689,533,2, 54,2014-01-20 02:13:44.069567,328,221,707, 4,2014-01-19 17:32:28.961343,150,692,718, 7,2014-01-18 23:55:02.617186,678,74,663, 14,2014-01-14 15:11:29.094334,677,827,192, 54,2014-01-20 04:03:12.871528,910,457,748, 61,2014-01-16 20:44:48.369191,704,626,302, 14,2014-01-20 15:23:42.573203,687,758,862, 54,2014-01-20 04:43:09.293687,871,301,680, 4,2014-01-12 01:25:08.91894,356,810,878, 4,2014-01-13 10:37:32.958467,50,690,665, 14,2014-01-19 17:24:24.419446,750,98,189, 93,2014-01-15 00:01:58.967201,131,463,967, 61,2014-01-21 01:58:51.152459,466,6,210, 61,2014-01-17 06:37:06.370875,838,310,689, 61,2014-01-18 08:31:28.127809,380,635,9, 54,2014-01-19 09:13:35.305056,663,303,961, 61,2014-01-19 09:40:31.051422,469,254,388, 4,2014-01-14 12:47:47.599071,484,696,262, 93,2014-01-20 00:58:35.312674,714,156,518, 4,2014-01-13 13:43:41.630285,757,124,56, 93,2014-01-14 05:31:47.52295,379,610,332, 7,2014-01-13 01:40:31.323408,855,12,179, 93,2014-01-15 23:19:27.030477,686,198,78, 61,2014-01-15 01:04:35.912223,954,311,545, 61,2014-01-14 07:24:28.725213,427,817,218, 54,2014-01-14 13:20:37.169877,701,570,733, 61,2014-01-15 07:43:33.385902,263,137,638, 93,2014-01-14 10:13:26.32194,298,170,608, 93,2014-01-20 21:36:11.049684,222,448,87, 4,2014-01-21 02:54:47.607632,3,919,896, 93,2014-01-13 12:20:49.230933,285,826,193, 93,2014-01-19 17:49:55.11925,795,557,550, 93,2014-01-19 14:27:16.569394,286,119,375, 4,2014-01-11 06:15:51.84812,762,629,308, 54,2014-01-13 03:01:24.213584,688,13,550, 61,2014-01-18 12:30:29.440922,769,179,930, 7,2014-01-17 07:00:40.217729,4,958,435, 61,2014-01-12 12:25:06.786788,496,561,372, 14,2014-01-17 11:13:38.734609,771,822,617, 61,2014-01-18 12:25:49.136669,749,755,579, 7,2014-01-15 03:02:39.745674,534,894,642, 93,2014-01-15 22:38:03.154055,326,707,821, 4,2014-01-12 06:27:48.103871,879,221,345, 54,2014-01-14 23:01:30.898132,939,939,936, 7,2014-01-11 23:15:09.52493,2,64,620, 4,2014-01-19 18:59:16.23168,831,965,953, 14,2014-01-13 02:15:06.203607,153,976,777, 14,2014-01-20 12:55:40.576771,232,538,375, 14,2014-01-13 01:47:03.830751,157,901,274, 14,2014-01-11 21:13:41.516281,83,82,832, 7,2014-01-12 20:39:22.207056,130,10,673, 93,2014-01-16 11:18:51.35412,874,245,723, 93,2014-01-14 02:54:12.839689,458,423,553, 7,2014-01-17 23:58:49.237532,202,818,729, 54,2014-01-20 16:23:27.266343,60,219,65, 93,2014-01-17 14:18:41.564902,355,766,800, 54,2014-01-11 07:19:26.407409,416,543,978, 61,2014-01-14 18:07:00.652245,590,748,972, 61,2014-01-21 04:32:07.886841,792,713,297, 54,2014-01-10 22:55:37.457513,826,396,205, 61,2014-01-14 15:01:48.260296,199,610,456, 54,2014-01-13 10:52:57.117774,747,394,371, 7,2014-01-19 20:45:32.576763,484,324,607, 4,2014-01-12 23:29:04.82969,561,881,844, 61,2014-01-13 21:08:48.966312,802,694,206, 61,2014-01-11 17:46:56.790767,564,159,962, 61,2014-01-20 15:59:34.270611,728,472,111, 7,2014-01-20 08:09:29.184042,522,569,991, 14,2014-01-13 10:12:03.577554,283,633,716, 93,2014-01-16 21:13:13.00564,783,39,564, 93,2014-01-17 14:35:18.32071,122,453,608, 54,2014-01-17 17:14:43.323228,528,176,490, 61,2014-01-11 13:51:16.159695,95,399,386, 93,2014-01-19 01:03:06.42458,680,113,241, 54,2014-01-11 10:16:14.221779,652,852,630, 93,2014-01-12 17:06:25.685044,361,483,44, 14,2014-01-17 14:02:22.65626,576,881,770, 7,2014-01-13 16:31:20.425386,79,294,355, 54,2014-01-12 00:02:06.261815,832,173,11, 4,2014-01-19 19:49:26.48743,704,356,750, 61,2014-01-14 17:45:01.941668,903,132,258, 61,2014-01-14 00:41:28.46887,494,878,775, 54,2014-01-15 01:08:17.84155,596,373,642, 7,2014-01-18 00:39:14.781357,187,817,876, 54,2014-01-21 04:40:17.706403,603,242,896, 4,2014-01-14 11:31:45.107893,939,304,91, 14,2014-01-14 08:57:56.346677,48,807,143, 14,2014-01-11 05:59:44.114231,48,627,504, 61,2014-01-18 09:38:45.760136,552,405,783, 61,2014-01-14 07:30:04.233519,91,614,544, 7,2014-01-11 08:55:21.571744,34,947,784, 4,2014-01-11 18:40:34.020433,979,812,462, 93,2014-01-16 22:23:01.35171,442,979,325, 14,2014-01-20 09:40:54.148507,731,428,439, 4,2014-01-13 22:09:38.850123,322,558,143, 61,2014-01-19 23:31:50.318023,951,490,120, 93,2014-01-10 21:27:50.939274,273,579,802, 54,2014-01-13 08:09:13.121153,544,954,250, 7,2014-01-19 01:41:19.502065,116,153,63, 14,2014-01-13 11:39:54.517247,961,458,949, 14,2014-01-19 09:22:26.126793,790,690,978, 93,2014-01-15 10:16:37.822775,90,187,450, 93,2014-01-20 08:54:37.840475,668,990,609, 4,2014-01-20 03:13:42.286487,273,320,844, 4,2014-01-13 05:19:44.247355,813,771,141, 61,2014-01-20 23:10:52.062255,673,425,97, 7,2014-01-11 14:20:07.971531,932,688,774, 54,2014-01-19 23:22:06.903616,457,915,795, 54,2014-01-13 09:42:12.487642,785,992,539, 7,2014-01-12 18:22:14.233345,130,908,576, 93,2014-01-18 13:11:33.391046,367,593,264, 7,2014-01-17 07:27:14.429309,241,712,864, 4,2014-01-15 23:03:42.702216,828,461,99, 93,2014-01-12 08:19:51.232595,526,217,773, 4,2014-01-14 02:06:29.211085,929,180,356, 7,2014-01-15 21:44:00.409037,749,704,459, 14,2014-01-15 23:51:36.340488,807,917,104, 4,2014-01-17 15:04:25.557763,55,535,974, 4,2014-01-16 07:43:44.523269,851,213,946, 61,2014-01-13 03:30:22.101044,731,353,304, 61,2014-01-21 02:34:29.791281,975,593,607, 61,2014-01-16 20:39:06.095013,899,976,625, 4,2014-01-14 02:10:56.121476,594,179,975, 61,2014-01-16 18:57:31.142992,464,158,654, 93,2014-01-16 00:20:00.24352,593,115,140, 61,2014-01-17 04:20:34.972018,456,384,374, 93,2014-01-15 12:29:21.461015,806,633,304, 93,2014-01-16 01:47:51.183213,688,474,433, 7,2014-01-19 16:29:48.093171,915,793,638, 14,2014-01-13 08:10:40.963079,306,844,933, 54,2014-01-18 17:27:45.70046,281,53,238, 4,2014-01-17 19:52:14.219965,375,189,766, 93,2014-01-17 22:27:18.785854,896,553,924, 7,2014-01-17 20:22:23.540935,744,772,277, 54,2014-01-16 17:05:56.506452,493,693,48, 61,2014-01-20 07:47:03.033209,237,685,512, 4,2014-01-17 13:33:15.603609,532,391,131, 54,2014-01-17 11:26:04.478401,329,541,347, 61,2014-01-19 01:09:09.937244,934,468,529, 61,2014-01-20 03:15:28.091251,315,325,686, 54,2014-01-19 09:48:18.711747,13,658,130, 54,2014-01-16 08:20:43.32829,526,844,94, 61,2014-01-16 04:42:42.52098,409,667,593, 93,2014-01-14 02:52:01.414381,733,220,374, 54,2014-01-17 20:40:34.561303,601,149,650, 54,2014-01-19 10:49:11.732066,781,176,864, 7,2014-01-19 04:36:01.823418,532,580,715, 4,2014-01-12 14:32:10.901792,71,261,573, 14,2014-01-15 19:53:37.289828,228,733,353, 61,2014-01-14 06:19:46.346686,603,763,748, 61,2014-01-14 22:02:33.002834,258,633,902, 93,2014-01-15 16:28:07.081109,365,617,773, 54,2014-01-20 06:58:52.4417,701,854,984, 61,2014-01-18 04:13:29.12473,895,20,339, 4,2014-01-11 05:25:38.224521,774,120,133, 61,2014-01-15 01:18:52.68522,115,839,294, 61,2014-01-14 02:34:04.096748,804,972,161, 4,2014-01-15 21:54:59.685535,241,121,449, 54,2014-01-20 07:06:43.868432,994,633,670, 61,2014-01-12 13:03:52.18992,94,373,66, 54,2014-01-18 10:05:40.649034,704,899,31, 14,2014-01-17 18:34:29.568893,23,193,19, 54,2014-01-19 12:56:06.409883,868,469,437, 4,2014-01-15 02:32:59.434889,27,331,180, 54,2014-01-14 08:56:53.110247,248,380,253, 61,2014-01-15 00:02:02.916335,330,792,526, 93,2014-01-14 04:20:02.468517,933,34,207, 93,2014-01-21 02:30:08.713856,180,98,192, 93,2014-01-11 05:28:07.394736,524,928,837, 14,2014-01-11 23:29:12.40576,375,2,153, 4,2014-01-19 23:45:36.805108,382,204,482, 4,2014-01-19 19:16:26.106902,149,385,802, 7,2014-01-17 11:49:55.734049,974,442,253, 54,2014-01-14 22:28:19.326088,283,33,764, 14,2014-01-12 16:08:27.521283,203,594,23, 7,2014-01-14 02:30:30.295353,544,270,193, 7,2014-01-13 03:17:31.058153,290,779,148, 7,2014-01-21 00:44:29.344701,403,728,754, 61,2014-01-15 21:02:41.197564,252,349,628, 14,2014-01-18 03:11:08.347981,563,691,966, 93,2014-01-14 01:04:15.691388,194,907,276, 61,2014-01-19 23:05:14.200398,723,435,241, 93,2014-01-12 13:39:15.429509,601,521,165, 7,2014-01-13 02:03:08.133087,77,498,58, 4,2014-01-16 21:18:43.325128,230,980,284, 54,2014-01-12 23:04:53.65403,997,733,69, 4,2014-01-17 07:22:00.818307,623,168,22, 4,2014-01-20 03:52:47.421876,534,810,79, 61,2014-01-18 00:59:53.339566,934,93,734, 61,2014-01-16 08:28:44.687158,884,347,518, 14,2014-01-11 10:56:39.611796,287,888,700, 61,2014-01-15 05:05:33.9886,351,282,489, 4,2014-01-12 21:03:14.256051,226,395,708, 14,2014-01-20 03:52:46.021679,665,952,76, 7,2014-01-19 11:38:33.423906,399,165,459, 54,2014-01-16 10:00:07.366298,645,173,493, 14,2014-01-13 21:54:48.938433,882,909,676, 14,2014-01-12 09:58:35.892423,593,247,303, 14,2014-01-16 06:30:16.080155,761,462,395, 61,2014-01-14 07:22:56.333171,690,423,717, 14,2014-01-13 13:27:48.298183,810,257,500, 93,2014-01-15 00:15:52.885681,479,20,984, 7,2014-01-12 20:39:22.440073,582,650,1, 93,2014-01-20 05:17:44.032232,604,868,227, 7,2014-01-19 02:44:12.21177,785,508,931, 7,2014-01-14 16:47:49.961356,911,147,898, 54,2014-01-13 01:48:14.328004,797,961,223, 4,2014-01-11 00:01:43.270341,314,559,99, 4,2014-01-14 11:32:19.306057,982,100,227, 14,2014-01-18 02:50:55.525568,19,17,979, 14,2014-01-18 07:12:51.618322,618,847,129, 4,2014-01-17 16:36:34.997445,993,418,180, 61,2014-01-16 19:56:09.725966,265,309,433, 7,2014-01-20 00:52:07.047831,576,227,891, 61,2014-01-19 22:39:43.130532,858,716,224, 14,2014-01-12 11:14:53.051513,681,664,215, 93,2014-01-11 17:57:00.701861,721,169,558, 14,2014-01-16 00:01:43.949258,167,698,100, 93,2014-01-11 09:07:40.47339,201,530,42, 7,2014-01-18 22:56:54.041846,856,278,574, 93,2014-01-11 02:30:28.636417,20,595,621, 61,2014-01-12 00:04:20.085604,653,453,909, 4,2014-01-12 22:02:28.030445,841,492,844, 61,2014-01-13 03:33:42.892468,744,735,465, 54,2014-01-10 21:57:06.107283,459,503,365, 54,2014-01-11 03:41:01.454352,547,1000,657, 4,2014-01-18 17:33:50.259185,431,500,431, 54,2014-01-13 23:51:55.045717,668,749,816, 93,2014-01-12 17:39:37.346775,411,406,618, 7,2014-01-13 18:04:06.33934,533,533,620, 7,2014-01-17 11:14:51.379306,366,676,55, 61,2014-01-15 11:07:25.644957,612,729,945, 93,2014-01-17 22:19:59.225022,228,479,537, 17,2014-01-19 11:54:13.819379,282,110,808, 51,2014-01-14 10:25:09.677609,406,590,490, 51,2014-01-15 19:04:11.43721,345,146,371, 51,2014-01-12 22:42:03.781154,432,568,970, 17,2014-01-16 16:13:24.005614,957,880,919, 17,2014-01-15 23:05:54.707551,114,889,153, 51,2014-01-16 14:14:23.087211,999,137,412, 51,2014-01-13 13:04:19.531181,625,616,652, 17,2014-01-13 00:18:46.325873,406,847,314, 17,2014-01-13 00:50:58.085076,540,47,347, 51,2014-01-19 13:00:29.257147,932,999,935, 17,2014-01-11 19:10:53.374123,615,805,883, 51,2014-01-11 17:30:41.215608,32,289,840, 51,2014-01-21 04:15:22.30866,869,741,202, 17,2014-01-12 17:07:54.075984,702,797,308, 51,2014-01-16 21:32:25.164867,699,863,113, 17,2014-01-11 07:23:02.782468,415,103,959, 17,2014-01-20 20:04:48.11783,713,538,840, 51,2014-01-17 04:02:53.801283,112,987,4, 17,2014-01-12 11:27:22.868072,310,617,829, 17,2014-01-12 12:07:16.148694,233,17,925, 17,2014-01-19 11:36:36.69417,385,989,232, 17,2014-01-12 13:24:28.975355,186,48,212, 17,2014-01-12 19:48:17.603046,13,371,406, 51,2014-01-16 23:10:26.953355,524,268,811, 51,2014-01-15 17:16:24.021492,326,208,287, 51,2014-01-14 17:27:54.950239,344,484,511, 51,2014-01-19 21:14:33.292695,366,265,683, 51,2014-01-11 22:31:15.400798,312,26,622, 17,2014-01-19 08:35:20.595197,924,542,742, 17,2014-01-16 13:34:32.518135,350,492,277, 17,2014-01-20 14:25:29.220177,885,831,63, 51,2014-01-12 13:00:30.272805,436,478,618, 17,2014-01-11 02:38:43.955346,424,177,26, 51,2014-01-12 07:07:33.001331,458,370,555, 17,2014-01-18 09:13:54.278419,746,688,620, 51,2014-01-16 05:44:38.662897,395,106,170, 51,2014-01-18 01:21:56.088962,879,745,116, 17,2014-01-21 02:18:13.809601,694,591,868, 17,2014-01-18 10:03:24.98877,287,573,929, 17,2014-01-20 06:12:54.174038,683,752,3, 51,2014-01-19 09:18:43.067167,555,923,60, 51,2014-01-19 09:14:18.362893,404,410,778, 51,2014-01-21 03:43:35.389646,397,538,243, 17,2014-01-19 07:34:05.375827,832,798,133, 17,2014-01-10 20:22:12.439296,349,933,720, 17,2014-01-16 19:16:00.554932,813,317,958, 17,2014-01-19 18:57:08.158295,6,2,238, 51,2014-01-20 20:27:00.557126,769,733,633, 17,2014-01-12 17:18:54.356216,782,136,539, 17,2014-01-11 00:24:31.026367,272,606,361, 17,2014-01-12 02:34:16.70582,375,689,10, 51,2014-01-10 22:55:31.050385,53,658,879, 51,2014-01-12 17:49:00.002142,369,561,415, 17,2014-01-14 02:22:34.309284,75,392,580, 17,2014-01-17 02:05:58.003741,20,160,643, 51,2014-01-17 15:05:24.023633,680,57,839, 17,2014-01-17 23:50:29.259523,70,91,728, 51,2014-01-15 17:20:31.296855,838,683,180, 17,2014-01-18 17:36:39.424431,957,964,724, 51,2014-01-16 02:25:49.85472,244,151,290, 51,2014-01-11 00:55:03.814991,1,548,470, 51,2014-01-18 02:02:08.644608,612,31,859, 17,2014-01-17 19:26:20.127945,738,678,22, 51,2014-01-11 07:33:47.770336,439,560,435, 17,2014-01-19 13:09:41.646359,3,103,33, 51,2014-01-14 22:40:14.406364,994,326,880, 17,2014-01-16 17:18:26.433233,469,550,805, 51,2014-01-16 08:31:37.735321,307,286,397, 17,2014-01-14 18:58:28.216383,848,216,414, 17,2014-01-13 21:21:51.422002,442,468,745, 17,2014-01-15 08:44:31.909359,703,288,414, 51,2014-01-12 22:17:11.28355,989,77,436, 17,2014-01-12 00:36:09.785314,632,131,752, 51,2014-01-15 06:28:07.299005,100,673,925, 51,2014-01-10 23:51:16.659376,876,805,976, 51,2014-01-12 00:58:22.22461,510,142,592, 51,2014-01-21 05:44:07.853937,857,483,785, 17,2014-01-19 22:48:24.818091,426,698,576, 51,2014-01-11 15:25:22.781735,122,886,161, 51,2014-01-12 17:03:02.210572,298,19,300, 17,2014-01-20 03:12:55.844457,760,879,284, 17,2014-01-12 21:59:39.487974,696,967,947, 51,2014-01-12 19:58:33.260958,965,683,790, 17,2014-01-11 15:01:55.846599,543,177,287, 51,2014-01-16 04:22:13.797258,209,685,56, 17,2014-01-19 02:04:31.265118,273,949,854, 17,2014-01-18 10:07:19.870232,941,826,968, 17,2014-01-12 22:12:43.056781,147,10,28, 51,2014-01-13 13:25:02.561973,286,564,308, 51,2014-01-15 21:43:59.294664,525,18,604, 51,2014-01-18 04:38:32.911501,298,943,530, 51,2014-01-13 18:20:06.376965,692,375,549, 17,2014-01-12 17:46:07.939691,20,879,297, 51,2014-01-14 18:04:53.039445,172,124,780, 17,2014-01-14 05:53:54.147301,171,961,934, 17,2014-01-11 00:55:49.586049,220,759,364, 17,2014-01-18 20:45:07.446228,586,752,17, 17,2014-01-20 03:12:20.580533,165,418,153, 17,2014-01-16 13:27:27.321371,551,535,852, 51,2014-01-12 09:43:35.662611,419,909,805, 51,2014-01-12 18:34:12.002535,590,402,86, 17,2014-01-21 02:11:59.23073,945,875,124, 17,2014-01-14 12:00:46.946161,255,521,941, 51,2014-01-13 23:10:21.78785,662,831,746, 51,2014-01-15 02:40:06.529735,390,160,530, 51,2014-01-14 15:52:03.605538,101,1000,841, 51,2014-01-15 04:08:44.012459,741,979,106, 17,2014-01-15 02:24:14.384092,42,273,513, 51,2014-01-13 08:40:28.423628,560,315,380, 17,2014-01-15 23:34:06.794615,820,762,247, 51,2014-01-16 23:27:16.594664,106,758,694, 17,2014-01-12 05:53:24.268504,455,870,624, 17,2014-01-18 01:33:46.282589,889,413,49, 51,2014-01-18 23:25:49.855622,349,610,752, 51,2014-01-13 00:55:20.115103,868,746,240, 17,2014-01-12 23:56:00.079847,781,911,126, 51,2014-01-16 19:30:21.12074,85,137,29, 51,2014-01-20 15:02:39.985754,794,62,877, 51,2014-01-15 02:08:43.136628,467,466,166, 51,2014-01-19 12:55:23.682714,48,94,150, 51,2014-01-15 06:46:39.280418,40,627,231, 17,2014-01-12 00:47:16.048129,584,150,182, 51,2014-01-12 01:15:30.059677,895,895,395, 51,2014-01-17 04:32:47.220108,293,802,63, 17,2014-01-15 22:52:09.087993,73,159,111, 51,2014-01-15 11:09:24.206978,145,481,563, 17,2014-01-17 09:28:36.806157,106,610,703, 17,2014-01-13 13:37:16.534221,684,807,734, 17,2014-01-14 08:21:44.787511,162,135,56, 51,2014-01-12 16:56:04.127528,366,829,652, 17,2014-01-15 03:20:52.196832,575,783,571, 17,2014-01-16 06:55:56.790467,317,930,181, 51,2014-01-12 13:08:03.358258,96,717,119, 17,2014-01-18 19:21:39.142994,175,595,664, 51,2014-01-19 10:06:18.578316,398,608,517, 51,2014-01-16 19:48:09.888412,381,101,312, 17,2014-01-12 05:13:42.748531,313,696,12, 51,2014-01-13 08:15:02.591195,908,503,427, 51,2014-01-21 02:12:24.272503,38,914,13, 51,2014-01-14 17:54:11.172579,398,970,24, 17,2014-01-18 11:49:09.385809,398,814,531, 17,2014-01-16 19:39:40.867168,149,574,375, 17,2014-01-16 03:47:35.441083,146,263,218, 17,2014-01-15 07:22:55.668398,491,538,854, 17,2014-01-14 13:05:30.722789,955,445,303, 17,2014-01-18 08:42:55.556186,254,514,170, 17,2014-01-17 11:18:55.748245,71,137,799, 51,2014-01-20 12:35:51.843948,223,850,853, 51,2014-01-17 17:45:35.541942,592,856,996, 17,2014-01-11 07:27:38.884873,56,687,831, 17,2014-01-18 19:31:15.52666,810,360,733, 51,2014-01-11 18:32:14.822778,506,439,973, 51,2014-01-12 12:14:54.933002,931,284,705, 17,2014-01-20 00:46:45.586757,745,58,619, 17,2014-01-18 03:05:02.042886,993,359,166, 17,2014-01-17 15:07:04.020995,165,386,30, 17,2014-01-14 05:56:09.793735,819,945,758, 51,2014-01-14 06:33:38.849464,867,697,272, 51,2014-01-20 08:44:20.555217,980,222,941, 51,2014-01-17 18:17:54.581666,857,912,40, 17,2014-01-16 03:29:42.976992,544,73,48, 51,2014-01-14 06:05:12.752049,870,69,760, 17,2014-01-12 19:13:51.372132,506,723,961, 17,2014-01-17 20:37:46.335669,745,631,59, 17,2014-01-11 19:26:51.895043,183,638,537, 17,2014-01-10 23:20:09.950448,923,35,957, 51,2014-01-13 10:25:56.224081,298,757,180, 51,2014-01-13 04:40:34.643993,993,341,738, 17,2014-01-13 11:35:12.541643,317,885,272, 51,2014-01-13 06:38:20.496584,470,759,71, 51,2014-01-17 02:34:45.816572,662,743,355, 17,2014-01-21 03:24:21.927452,402,738,395, 17,2014-01-19 06:18:01.363752,72,511,190, 17,2014-01-12 00:22:21.257655,786,575,710, 17,2014-01-15 04:47:17.595851,275,302,434, 17,2014-01-12 13:23:32.086541,247,289,726, 17,2014-01-19 13:05:16.813842,544,248,742, 17,2014-01-11 10:06:13.344096,254,663,222, 17,2014-01-11 19:59:23.930488,825,758,169, 17,2014-01-16 00:50:52.356202,866,497,199, 17,2014-01-11 21:33:52.228969,365,655,419, 17,2014-01-19 19:30:39.457568,706,815,397, 51,2014-01-16 23:23:07.178981,158,698,456, 17,2014-01-13 13:48:47.162391,360,351,85, 51,2014-01-18 14:17:25.044325,696,73,776, 51,2014-01-13 20:28:09.221868,617,44,301, 17,2014-01-20 08:55:51.183385,612,101,946, 17,2014-01-11 14:13:34.83806,981,994,644, 17,2014-01-17 07:01:48.071331,910,602,821, 51,2014-01-19 11:40:11.739021,395,143,811, 51,2014-01-18 12:31:29.419726,313,604,116, 17,2014-01-12 04:31:31.048742,594,149,484, 17,2014-01-12 11:45:24.49107,967,551,416, 17,2014-01-20 11:45:20.791857,507,855,10, 17,2014-01-19 05:09:17.38441,102,130,237, 51,2014-01-13 11:12:16.386531,547,289,124, 17,2014-01-20 15:05:30.742724,934,381,530, 51,2014-01-11 09:35:13.608492,663,837,534, 17,2014-01-15 19:52:51.030524,756,397,994, 51,2014-01-12 20:40:43.284367,296,276,675, 51,2014-01-13 20:13:34.105076,761,102,675, 17,2014-01-11 16:27:36.847095,277,389,277, 16,2014-01-12 18:05:05.212239,72,680,870, 16,2014-01-11 20:31:35.468828,634,285,538, 96,2014-01-12 20:49:58.10475,984,340,270, 16,2014-01-17 02:52:22.808089,712,717,401, 16,2014-01-13 13:55:07.555368,985,979,320, 16,2014-01-11 03:55:14.918592,132,291,445, 16,2014-01-17 16:58:36.152604,753,473,911, 96,2014-01-14 13:54:31.486275,775,551,560, 16,2014-01-16 08:46:07.274795,854,250,198, 96,2014-01-18 18:32:28.381573,143,340,125, 16,2014-01-13 03:25:10.943843,257,265,331, 16,2014-01-12 02:09:14.453775,383,76,396, 96,2014-01-11 02:21:15.543964,143,989,640, 16,2014-01-20 21:42:35.988168,818,467,117, 96,2014-01-15 02:37:23.676061,194,544,685, 16,2014-01-20 15:17:06.727768,59,432,848, 96,2014-01-11 05:56:10.826228,254,642,440, 96,2014-01-11 03:39:11.747393,494,299,252, 96,2014-01-18 20:57:18.466789,506,843,859, 16,2014-01-18 22:27:40.246373,10,438,290, 16,2014-01-12 12:10:42.796134,4,871,784, 96,2014-01-20 12:42:42.957858,200,37,544, 16,2014-01-18 04:13:01.03823,727,46,157, 16,2014-01-20 21:20:00.180545,903,826,458, 96,2014-01-12 17:54:59.344389,879,595,558, 96,2014-01-17 13:18:31.780954,135,969,863, 16,2014-01-11 00:55:13.789036,69,811,786, 96,2014-01-17 17:47:50.374912,154,40,343, 16,2014-01-19 13:59:15.065741,306,269,184, 96,2014-01-14 01:08:47.894112,961,954,405, 16,2014-01-18 14:15:27.222007,339,484,13, 96,2014-01-11 02:04:20.277979,80,17,865, 96,2014-01-15 01:40:23.362941,413,106,23, 16,2014-01-20 15:05:25.326757,286,69,441, 16,2014-01-17 08:56:43.086068,732,171,151, 96,2014-01-17 19:35:30.918727,466,927,846, 16,2014-01-20 23:00:40.245769,281,816,572, 16,2014-01-13 19:55:19.238672,266,429,303, 16,2014-01-11 03:30:02.405003,637,207,672, 96,2014-01-16 01:46:47.520563,471,507,910, 96,2014-01-11 08:27:47.620664,523,82,830, 96,2014-01-13 10:55:13.348846,902,748,40, 96,2014-01-17 07:56:01.974757,744,480,890, 96,2014-01-11 14:49:03.164627,235,601,97, 96,2014-01-13 02:37:49.337014,517,719,112, 96,2014-01-11 04:33:25.650819,602,764,536, 16,2014-01-11 00:06:09.892395,239,59,90, 16,2014-01-13 12:34:00.163242,495,360,661, 16,2014-01-11 12:12:37.398211,405,917,587, 96,2014-01-19 01:03:28.359183,384,864,199, 16,2014-01-11 05:01:40.409615,186,815,436, 96,2014-01-13 04:23:20.194766,535,950,586, 96,2014-01-18 07:46:11.31746,861,995,207, 96,2014-01-18 13:14:41.448264,178,77,707, 96,2014-01-12 19:43:20.37531,609,3,467, 96,2014-01-20 05:41:10.661849,424,413,448, 16,2014-01-14 20:33:13.229218,862,289,917, 96,2014-01-13 00:38:34.164767,843,120,842, 16,2014-01-16 17:29:01.03718,378,523,709, 96,2014-01-13 04:32:28.294957,758,743,889, 16,2014-01-16 05:47:22.058879,813,37,665, 16,2014-01-14 01:44:28.259188,73,588,539, 96,2014-01-13 10:36:48.572936,604,899,751, 96,2014-01-20 11:27:45.422239,661,253,686, 16,2014-01-13 10:49:53.586365,688,118,919, 16,2014-01-19 23:33:31.659423,649,564,817, 96,2014-01-17 01:03:16.340966,705,212,84, 16,2014-01-13 03:50:33.832133,97,841,673, 16,2014-01-12 13:28:50.898095,85,816,532, 96,2014-01-17 08:33:18.745968,449,972,595, 96,2014-01-18 09:37:21.352696,45,327,821, 96,2014-01-13 01:56:38.518758,452,66,621, 96,2014-01-19 23:28:32.094814,927,758,287, 16,2014-01-14 11:33:23.327453,486,939,827, 96,2014-01-13 20:45:41.683804,407,313,196, 96,2014-01-11 20:06:21.431828,772,240,203, 16,2014-01-14 20:06:48.978272,485,297,441, 96,2014-01-14 00:51:51.576199,83,155,531, 16,2014-01-14 12:40:21.59507,717,128,898, 16,2014-01-15 12:19:26.376903,102,80,51, 96,2014-01-11 19:55:19.935801,92,528,220, 96,2014-01-14 21:42:02.005103,631,803,364, 96,2014-01-17 20:42:46.571669,295,777,609, 96,2014-01-19 07:41:31.253261,191,715,14, 96,2014-01-12 04:56:43.453366,457,508,250, 96,2014-01-19 20:26:06.946979,219,893,257, 16,2014-01-18 07:22:41.91511,762,368,836, 96,2014-01-16 05:29:56.682583,767,4,315, 16,2014-01-11 15:04:41.111746,136,116,385, 96,2014-01-13 18:51:42.952291,932,420,244, 96,2014-01-18 14:02:24.97796,108,95,760, 96,2014-01-17 00:52:03.171044,189,289,607, 16,2014-01-17 00:36:11.211897,260,883,189, 96,2014-01-21 04:39:13.550897,786,745,525, 96,2014-01-16 06:19:48.593281,823,947,13, 96,2014-01-19 15:26:04.798261,237,687,967, 96,2014-01-19 22:12:45.21032,564,472,942, 16,2014-01-12 01:23:04.934247,903,38,438, 16,2014-01-11 13:16:38.630395,758,1000,588, 96,2014-01-11 05:41:36.108834,945,960,963, 16,2014-01-18 13:56:23.680216,692,475,974, 16,2014-01-19 02:53:59.98309,592,583,903, 16,2014-01-13 11:38:14.627593,896,666,432, 16,2014-01-17 07:24:55.775029,604,507,537, 16,2014-01-12 08:27:23.310961,982,834,929, 16,2014-01-16 12:23:56.311397,305,348,494, 96,2014-01-18 07:31:17.206856,701,740,766, 16,2014-01-16 08:34:12.289233,80,886,758, 16,2014-01-19 17:15:47.888016,479,450,289, 96,2014-01-11 14:11:38.801926,95,880,120, 16,2014-01-21 00:53:38.666136,672,823,207, 96,2014-01-20 17:11:07.823817,212,19,280, 96,2014-01-15 15:53:40.807029,15,979,233, 16,2014-01-17 15:36:25.237805,531,601,571, 96,2014-01-18 18:52:39.077078,716,69,495, 96,2014-01-17 00:50:24.260395,974,332,54, 16,2014-01-16 06:02:32.185203,700,361,503, 96,2014-01-15 20:15:20.992189,920,720,623, 96,2014-01-12 00:20:20.943398,52,28,764, 16,2014-01-17 01:07:13.296949,418,845,535, 96,2014-01-18 19:07:03.944898,636,125,246, 96,2014-01-19 18:22:45.921358,146,188,821, 96,2014-01-12 19:59:16.467993,588,563,895, 96,2014-01-14 13:43:15.156795,951,186,650, 16,2014-01-19 17:01:59.472255,629,967,429, 16,2014-01-18 06:19:05.061274,171,132,276, 96,2014-01-12 23:09:19.955056,284,475,552, 16,2014-01-18 09:14:44.682994,776,195,215, 16,2014-01-19 11:42:09.995521,414,91,534, 96,2014-01-13 16:25:58.585451,116,806,254, 96,2014-01-18 18:56:20.791828,935,113,136, 96,2014-01-16 19:38:33.675735,308,428,698, 96,2014-01-11 13:19:58.568541,790,149,766, 96,2014-01-11 00:34:35.419421,113,478,698, 96,2014-01-12 21:03:29.450765,53,195,215, 96,2014-01-13 01:47:21.879503,529,796,67, 96,2014-01-16 16:58:31.731237,283,646,789, 16,2014-01-20 08:34:46.657622,188,628,732, 96,2014-01-18 14:21:34.169155,995,41,54, 96,2014-01-15 04:14:19.619253,573,332,857, 16,2014-01-21 02:46:25.459967,148,721,118, 96,2014-01-18 09:15:12.835292,955,528,658, 16,2014-01-14 15:25:27.44307,449,310,805, 96,2014-01-15 12:40:06.266996,737,683,830, 96,2014-01-14 18:51:38.073517,703,264,583, 96,2014-01-12 04:18:06.520148,149,84,890, 16,2014-01-11 07:30:30.52781,895,242,889, 96,2014-01-20 04:54:10.25872,48,973,95, 16,2014-01-17 04:33:27.512335,574,143,779, 16,2014-01-12 11:50:51.471207,330,452,857, 16,2014-01-16 00:01:23.555669,941,370,750, 96,2014-01-14 17:40:31.457233,215,280,838, 96,2014-01-11 00:13:37.392566,824,200,268, 16,2014-01-18 00:00:40.02408,594,901,121, 16,2014-01-18 11:23:46.614028,714,47,792, 16,2014-01-19 21:15:36.864821,793,571,944, 96,2014-01-15 00:19:45.085354,69,862,998, 96,2014-01-20 14:33:06.569084,680,117,173, 16,2014-01-17 00:30:21.547815,573,648,841, 16,2014-01-13 06:01:55.081294,708,736,347, 16,2014-01-13 00:59:05.154535,255,746,946, 96,2014-01-14 13:26:42.339643,161,539,652, 96,2014-01-19 05:40:28.75703,291,848,887, 16,2014-01-13 18:19:03.723495,954,125,736, 96,2014-01-14 18:01:17.759482,553,209,486, 16,2014-01-10 20:43:58.208215,459,887,981, 16,2014-01-16 00:06:25.602998,717,170,771, 96,2014-01-20 14:59:49.49072,220,350,632, 16,2014-01-20 09:18:44.865835,339,542,70, 96,2014-01-13 08:27:59.772153,700,766,133, 96,2014-01-14 13:14:09.109972,363,808,361, 96,2014-01-20 06:05:10.325803,287,227,505, 16,2014-01-20 21:43:12.607865,794,560,875, 16,2014-01-18 08:39:36.553042,730,562,981, 16,2014-01-14 12:45:16.593218,233,504,815, 16,2014-01-14 10:34:50.681381,659,665,930, 16,2014-01-19 16:57:43.07319,94,166,135, 16,2014-01-15 00:15:47.121028,253,556,889, 96,2014-01-13 09:29:00.940101,820,808,345, 16,2014-01-15 15:31:10.585525,439,117,540, 96,2014-01-16 16:06:38.592235,184,704,263, 96,2014-01-18 13:30:24.49577,714,156,534, 96,2014-01-19 13:11:42.043178,970,775,338, 96,2014-01-16 20:20:15.984801,333,413,896, 96,2014-01-15 07:31:04.519851,639,163,682, 96,2014-01-16 18:35:28.657206,28,699,278, 16,2014-01-15 11:35:52.849622,706,8,775, 96,2014-01-19 11:50:49.605624,435,500,479, 96,2014-01-16 03:08:35.226709,769,62,730, 16,2014-01-11 06:06:14.397855,48,654,178, 96,2014-01-11 11:52:44.686919,731,639,723, 96,2014-01-18 08:07:40.381245,725,493,877, 16,2014-01-14 23:32:56.737498,677,263,151, 96,2014-01-19 21:33:13.443948,946,971,427, 16,2014-01-10 20:26:44.104741,282,543,620, 16,2014-01-18 21:34:14.49698,836,128,654, 96,2014-01-19 22:17:11.652163,924,0,71, 16,2014-01-16 00:33:09.708157,228,634,991, 16,2014-01-18 06:34:03.9877,978,101,935, 16,2014-01-19 01:35:56.518418,952,319,834, 16,2014-01-18 13:01:09.480311,928,384,262, 96,2014-01-11 13:48:13.097672,140,245,643, 16,2014-01-18 01:41:06.844221,56,491,390, 16,2014-01-18 04:44:22.088176,658,474,297, 16,2014-01-19 02:27:49.650714,280,416,580, 16,2014-01-11 08:26:23.437439,697,751,613, 16,2014-01-11 09:19:12.769557,903,396,842, 96,2014-01-17 13:25:32.723903,855,200,284, 96,2014-01-15 12:42:10.558467,287,264,368, 16,2014-01-13 22:48:13.709658,16,294,566, 16,2014-01-11 22:56:43.309848,299,781,633, 16,2014-01-10 22:48:49.150702,866,796,359, 96,2014-01-11 06:18:38.205847,182,467,727, 16,2014-01-20 16:08:25.353026,485,426,441, 96,2014-01-16 23:09:05.135503,982,195,331, 16,2014-01-15 17:49:42.725698,926,34,124, 98,2014-01-16 04:43:54.010232,288,27,757, 85,2014-01-11 04:44:57.985544,822,704,58, 85,2014-01-13 23:40:32.331322,755,543,100, 85,2014-01-11 01:52:29.236942,25,931,363, 98,2014-01-11 14:51:12.383399,88,767,537, 98,2014-01-14 15:33:17.01824,54,304,459, 85,2014-01-18 14:00:09.618606,838,476,37, 98,2014-01-15 18:24:09.120897,744,912,716, 85,2014-01-13 07:06:30.462607,42,377,739, 85,2014-01-18 14:26:53.723347,924,647,73, 85,2014-01-13 09:58:23.618296,418,843,731, 85,2014-01-11 23:23:42.11477,572,669,539, 85,2014-01-13 09:00:03.431504,0,196,110, 85,2014-01-20 20:32:27.605996,623,502,433, 85,2014-01-20 04:59:38.633188,250,900,212, 98,2014-01-21 02:01:12.912234,11,254,834, 85,2014-01-11 04:20:40.704086,778,811,145, 85,2014-01-17 00:40:45.477827,724,800,389, 98,2014-01-18 00:45:35.00041,516,257,539, 85,2014-01-19 10:48:30.3548,832,348,822, 98,2014-01-17 13:07:08.915266,519,643,682, 98,2014-01-18 14:04:47.770386,140,393,434, 98,2014-01-15 18:14:03.079123,971,821,145, 98,2014-01-11 19:49:19.474151,157,695,947, 98,2014-01-11 06:53:01.480045,802,482,501, 98,2014-01-16 21:10:46.388971,573,545,57, 98,2014-01-11 22:38:08.624852,940,857,920, 85,2014-01-11 17:11:39.685892,879,746,169, 98,2014-01-16 07:19:11.741996,91,921,736, 85,2014-01-18 01:47:13.760775,269,899,731, 98,2014-01-16 15:01:22.412008,792,45,250, 85,2014-01-11 06:03:05.752228,720,686,329, 98,2014-01-18 10:32:11.746318,901,880,333, 98,2014-01-19 18:41:54.74333,346,761,877, 85,2014-01-11 11:55:34.989589,397,229,906, 85,2014-01-19 05:23:24.129717,598,857,258, 85,2014-01-13 04:15:11.76199,810,60,598, 98,2014-01-19 05:55:44.608195,24,378,590, 85,2014-01-13 17:47:33.250614,343,510,508, 85,2014-01-15 15:21:42.224597,980,135,678, 98,2014-01-16 14:22:38.331542,766,959,936, 98,2014-01-16 07:45:56.868909,590,723,592, 98,2014-01-16 18:45:24.339367,159,889,917, 98,2014-01-19 03:22:41.763465,771,895,63, 85,2014-01-15 22:18:24.475324,405,815,778, 85,2014-01-15 17:45:02.972974,87,589,796, 85,2014-01-18 23:23:54.675699,863,41,533, 98,2014-01-16 06:39:05.17941,760,823,76, 85,2014-01-11 12:25:48.450801,765,832,711, 85,2014-01-15 18:09:29.676109,471,587,928, 85,2014-01-14 11:27:35.53421,661,677,192, 98,2014-01-18 05:32:57.366067,54,797,418, 85,2014-01-13 02:14:17.446496,654,830,746, 98,2014-01-19 09:41:38.613333,22,718,352, 85,2014-01-19 05:22:16.840218,94,581,764, 85,2014-01-13 13:07:18.926959,736,944,542, 85,2014-01-15 00:52:25.002303,790,48,622, 98,2014-01-20 08:00:25.46549,867,352,212, 85,2014-01-14 10:18:58.612851,708,113,571, 98,2014-01-20 12:11:36.7443,988,473,806, 85,2014-01-17 03:47:39.226264,235,693,627, 85,2014-01-20 05:20:21.024859,683,197,202, 98,2014-01-20 22:14:42.496947,487,610,634, 85,2014-01-14 08:19:50.972582,994,981,559, 98,2014-01-18 18:02:15.768609,913,280,792, 98,2014-01-11 04:10:17.486535,988,869,35, 85,2014-01-12 07:43:15.102299,436,689,155, 98,2014-01-21 02:17:27.530599,250,532,111, 98,2014-01-19 14:06:02.09473,614,535,112, 98,2014-01-15 05:30:48.352913,7,670,273, 98,2014-01-15 11:39:09.755196,543,791,564, 85,2014-01-14 22:28:40.426272,584,72,385, 85,2014-01-20 17:16:45.222242,842,97,789, 98,2014-01-11 00:24:34.094563,664,630,193, 85,2014-01-12 19:51:22.189736,669,507,36, 98,2014-01-15 09:35:09.697566,658,987,232, 85,2014-01-15 22:09:37.067536,914,351,62, 98,2014-01-20 23:15:16.865435,399,975,805, 98,2014-01-20 20:14:14.876976,500,379,133, 85,2014-01-16 14:35:25.518338,798,108,925, 98,2014-01-15 11:24:46.541964,621,775,840, 85,2014-01-14 01:41:50.411187,16,951,162, 98,2014-01-13 14:08:22.884824,136,263,257, 85,2014-01-17 17:39:03.988459,694,223,31, 85,2014-01-12 05:23:29.02494,156,283,28, 85,2014-01-11 13:30:39.725041,489,40,516, 98,2014-01-20 10:46:22.915419,311,65,200, 98,2014-01-16 10:15:54.027244,751,392,954, 98,2014-01-21 01:31:05.19053,413,261,17, 85,2014-01-13 15:05:21.52869,211,505,174, 85,2014-01-15 16:27:30.771962,348,631,870, 98,2014-01-16 23:18:44.416794,274,214,130, 85,2014-01-12 14:25:42.553549,251,554,30, 85,2014-01-15 08:42:13.268908,790,59,262, 98,2014-01-20 11:38:35.389376,603,50,397, 98,2014-01-20 12:27:58.322158,590,410,562, 98,2014-01-15 16:52:30.755444,667,65,190, 98,2014-01-11 13:21:50.491675,50,345,594, 98,2014-01-20 08:45:25.852756,915,952,285, 98,2014-01-14 00:58:32.850174,854,622,633, 98,2014-01-15 22:52:38.845006,743,168,431, 98,2014-01-14 14:24:35.607952,245,172,854, 98,2014-01-18 03:27:13.276446,891,893,570, 85,2014-01-15 10:09:24.067248,605,161,559, 98,2014-01-14 18:49:09.702933,200,732,49, 98,2014-01-20 03:18:35.466601,667,860,541, 98,2014-01-19 23:44:33.764815,350,744,742, 98,2014-01-19 20:58:46.77047,585,163,988, 85,2014-01-19 20:33:52.332036,611,978,972, 85,2014-01-19 13:58:48.641791,424,276,431, 98,2014-01-15 05:34:12.289227,472,993,254, 98,2014-01-14 01:58:38.874,118,758,737, 98,2014-01-12 09:40:39.052978,845,45,86, 85,2014-01-17 23:42:35.17405,585,114,773, 98,2014-01-20 23:37:42.862879,339,927,849, 85,2014-01-13 19:04:08.077918,740,546,306, 85,2014-01-18 17:13:14.899092,178,986,460, 98,2014-01-20 04:24:05.778298,243,543,775, 98,2014-01-19 09:20:02.10558,89,318,775, 85,2014-01-18 12:44:20.090041,776,227,956, 85,2014-01-12 13:29:27.306986,575,918,779, 98,2014-01-13 19:47:32.877542,194,692,798, 85,2014-01-14 06:03:04.506834,967,437,462, 98,2014-01-14 07:55:09.860955,404,1000,935, 85,2014-01-18 08:29:46.146451,541,823,170, 85,2014-01-13 11:41:39.896629,183,911,228, 98,2014-01-13 14:23:08.183113,804,895,501, 85,2014-01-12 19:22:16.901895,983,540,987, 98,2014-01-14 05:03:30.388304,300,458,954, 98,2014-01-12 17:08:34.036289,52,577,770, 85,2014-01-16 00:20:49.752069,618,968,652, 98,2014-01-19 07:56:09.233311,690,991,797, 98,2014-01-16 11:33:09.644241,132,864,207, 98,2014-01-12 21:48:03.028515,339,752,983, 98,2014-01-13 12:05:33.300559,48,918,753, 85,2014-01-20 10:22:19.347174,673,253,670, 85,2014-01-11 19:06:38.495116,880,289,809, 98,2014-01-12 05:50:07.065373,618,349,125, 98,2014-01-19 01:21:06.118064,257,327,958, 98,2014-01-20 19:40:30.827572,594,656,309, 85,2014-01-20 23:48:55.707164,398,938,34, 98,2014-01-13 00:55:18.407291,465,898,211, 98,2014-01-13 15:39:09.701572,150,277,459, 85,2014-01-12 03:29:34.76056,839,108,94, 85,2014-01-20 04:37:53.581342,195,819,706, 85,2014-01-13 09:16:52.56445,403,173,438, 85,2014-01-15 02:33:42.838478,966,971,85, 98,2014-01-17 15:51:08.480432,728,99,18, 98,2014-01-12 07:40:58.343167,974,158,987, 85,2014-01-13 05:53:44.944058,211,997,796, 98,2014-01-14 22:35:28.570474,262,867,180, 85,2014-01-14 01:10:25.650154,350,445,392, 85,2014-01-16 05:41:17.8216,749,366,630, 98,2014-01-18 08:38:33.077727,903,419,623, 98,2014-01-17 13:05:35.511108,883,712,197, 98,2014-01-13 08:11:03.968051,155,400,548, 98,2014-01-21 00:20:12.974357,581,522,662, 85,2014-01-20 07:28:43.694641,367,569,59, 98,2014-01-15 07:33:20.869946,329,411,617, 85,2014-01-13 23:23:43.362661,760,797,316, 98,2014-01-11 18:37:17.730929,155,917,203, 98,2014-01-20 11:54:10.622015,707,755,576, 98,2014-01-12 01:19:52.596392,155,199,886, 98,2014-01-17 10:10:27.37517,427,867,123, 85,2014-01-12 03:42:13.650949,863,668,41, 98,2014-01-14 17:25:25.89695,944,997,160, 98,2014-01-16 14:32:46.722763,437,754,647, 98,2014-01-13 02:48:52.146066,412,464,49, 98,2014-01-16 03:15:32.962743,817,452,847, 98,2014-01-14 09:53:52.840827,590,175,115, 85,2014-01-12 16:29:22.973636,912,191,819, 85,2014-01-15 21:04:28.669906,113,308,600, 85,2014-01-16 14:49:11.248118,362,197,479, 85,2014-01-15 12:08:32.675627,451,799,581, 98,2014-01-17 04:34:03.430468,543,763,822, 85,2014-01-15 13:27:04.829458,651,200,566, 85,2014-01-18 01:25:25.240077,891,409,199, 85,2014-01-11 01:07:46.269365,304,272,759, 98,2014-01-11 23:18:13.309892,661,127,915, 84,2014-01-19 13:06:23.583245,602,51,294, 36,2014-01-13 11:01:31.213423,87,158,14, 36,2014-01-16 01:53:41.880784,934,788,928, 84,2014-01-12 08:16:49.233817,704,878,112, 36,2014-01-18 20:42:49.035442,234,616,870, 84,2014-01-13 04:32:14.958511,953,352,441, 84,2014-01-19 01:22:24.744926,487,64,268, 89,2014-01-10 22:53:53.003493,142,385,599, 89,2014-01-12 22:52:27.932869,160,705,247, 36,2014-01-18 02:51:08.439566,989,324,675, 89,2014-01-15 10:27:13.873857,983,421,716, 36,2014-01-16 02:16:11.295529,114,166,528, 36,2014-01-19 01:28:26.170495,67,740,191, 89,2014-01-14 16:21:24.495873,54,592,297, 84,2014-01-17 07:36:03.891921,62,710,723, 89,2014-01-15 05:38:53.546083,707,927,474, 36,2014-01-16 00:03:38.146822,642,967,324, 84,2014-01-21 05:01:29.789291,932,546,897, 36,2014-01-21 00:11:40.268847,168,573,85, 84,2014-01-18 06:52:30.293306,321,716,5, 36,2014-01-16 02:17:02.752033,791,470,677, 84,2014-01-14 04:05:33.109673,373,809,591, 36,2014-01-20 03:21:53.266943,820,11,916, 36,2014-01-21 03:21:31.42194,681,60,559, 89,2014-01-19 22:54:44.357791,659,633,97, 89,2014-01-14 09:30:25.942571,991,453,835, 36,2014-01-17 01:55:34.852826,695,389,870, 36,2014-01-14 06:21:49.187249,148,460,152, 89,2014-01-11 04:55:51.183067,279,528,924, 84,2014-01-17 07:03:21.122191,504,3,869, 89,2014-01-15 09:40:02.497141,610,636,420, 84,2014-01-19 22:02:14.766311,666,762,218, 36,2014-01-19 22:04:52.335614,303,562,124, 36,2014-01-20 15:33:44.377925,756,157,217, 89,2014-01-11 00:19:04.000128,400,755,629, 89,2014-01-17 12:47:41.371055,999,355,511, 84,2014-01-12 14:05:59.336436,581,70,930, 84,2014-01-19 05:41:28.745473,377,229,645, 89,2014-01-17 15:41:34.374967,44,849,456, 89,2014-01-14 16:58:27.269305,98,97,726, 89,2014-01-16 02:32:37.185039,61,816,467, 89,2014-01-11 20:08:48.248825,953,5,783, 84,2014-01-19 23:14:38.565254,958,864,567, 36,2014-01-13 22:01:03.355953,178,546,967, 84,2014-01-15 16:30:12.744697,181,836,639, 89,2014-01-16 00:50:42.457175,624,497,111, 89,2014-01-18 07:39:56.902036,498,410,137, 36,2014-01-20 20:33:50.891939,248,591,692, 84,2014-01-15 23:52:12.246466,102,179,315, 84,2014-01-18 01:51:37.170882,76,820,415, 36,2014-01-17 21:26:21.185245,20,460,858, 84,2014-01-10 20:09:14.998498,473,473,822, 89,2014-01-10 23:57:10.280556,525,508,824, 36,2014-01-16 18:48:14.452187,934,326,138, 84,2014-01-21 03:30:46.420857,342,930,152, 89,2014-01-20 02:51:54.638347,392,291,334, 89,2014-01-20 08:18:40.395177,676,713,873, 84,2014-01-16 23:26:21.273683,158,932,886, 36,2014-01-13 03:13:43.825596,504,17,804, 89,2014-01-20 17:14:31.578244,865,253,257, 89,2014-01-13 00:29:42.395874,311,318,812, 89,2014-01-17 16:53:46.322736,729,696,891, 84,2014-01-19 09:16:46.344555,683,366,504, 84,2014-01-11 16:34:34.731488,845,290,370, 36,2014-01-17 02:27:30.701081,69,496,753, 36,2014-01-19 13:35:50.345102,299,198,523, 84,2014-01-18 09:22:16.102962,436,392,370, 84,2014-01-18 20:33:30.037519,120,644,872, 36,2014-01-17 13:17:19.090575,67,91,204, 84,2014-01-14 19:03:50.477929,987,985,159, 89,2014-01-12 07:31:57.307243,893,967,420, 84,2014-01-12 09:49:56.275615,186,876,85, 84,2014-01-15 19:12:38.726754,34,584,226, 89,2014-01-11 00:46:35.872497,160,386,166, 89,2014-01-15 11:50:59.631567,880,769,672, 36,2014-01-20 15:42:51.471451,437,703,209, 84,2014-01-16 05:37:18.329672,410,13,635, 36,2014-01-12 13:30:56.533603,87,143,63, 84,2014-01-20 06:16:42.363389,265,527,611, 36,2014-01-10 23:29:30.576556,808,450,132, 36,2014-01-19 19:22:33.704486,275,89,482, 84,2014-01-16 21:43:03.548633,923,436,915, 89,2014-01-10 23:38:45.575054,976,489,954, 36,2014-01-19 23:19:43.985042,368,236,467, 84,2014-01-12 10:31:18.00124,280,395,944, 89,2014-01-10 21:09:31.995911,674,897,198, 84,2014-01-18 20:11:38.623389,808,545,148, 36,2014-01-11 12:49:58.396417,331,545,230, 36,2014-01-17 00:35:53.269595,162,931,397, 84,2014-01-21 03:25:22.448984,891,109,986, 36,2014-01-11 00:04:29.97466,622,802,276, 89,2014-01-19 05:05:35.66547,501,701,287, 89,2014-01-17 14:19:08.772141,587,110,320, 89,2014-01-19 13:21:16.319216,396,716,384, 89,2014-01-20 01:40:10.397376,407,272,394, 36,2014-01-13 10:46:39.473222,412,57,77, 36,2014-01-17 20:57:06.664318,463,702,281, 89,2014-01-17 05:02:26.500339,317,226,54, 36,2014-01-11 01:20:09.51074,695,392,142, 89,2014-01-14 04:14:25.754893,400,577,239, 89,2014-01-21 04:06:16.978268,491,834,907, 36,2014-01-12 12:52:06.817982,858,163,390, 84,2014-01-15 18:04:22.030926,924,719,319, 36,2014-01-15 17:18:55.705022,711,586,985, 36,2014-01-12 17:38:42.690479,936,320,732, 36,2014-01-20 09:55:21.662494,55,974,79, 36,2014-01-15 03:01:47.176472,796,559,370, 36,2014-01-18 03:16:01.02057,646,564,437, 36,2014-01-11 17:26:18.196516,222,971,396, 84,2014-01-14 03:18:29.54028,840,210,880, 89,2014-01-18 06:45:31.597126,417,569,833, 84,2014-01-20 16:48:51.901002,960,729,283, 89,2014-01-20 05:01:33.088914,554,569,736, 89,2014-01-18 10:24:17.17218,752,888,971, 84,2014-01-19 10:08:35.886463,889,814,249, 89,2014-01-11 09:32:51.090153,996,565,75, 89,2014-01-18 11:33:49.168511,92,580,367, 89,2014-01-17 00:20:14.509851,687,15,819, 36,2014-01-12 02:22:49.486571,560,289,849, 36,2014-01-14 06:09:42.438107,441,266,99, 89,2014-01-16 21:45:36.959254,512,722,422, 89,2014-01-12 06:27:19.461231,563,158,476, 36,2014-01-12 05:15:18.103996,361,480,952, 36,2014-01-13 06:04:45.731395,469,125,402, 36,2014-01-20 23:48:35.780866,315,114,582, 84,2014-01-11 00:55:28.501372,513,544,665, 84,2014-01-15 20:51:25.204617,813,461,553, 89,2014-01-17 14:45:42.445183,433,936,261, 89,2014-01-17 09:57:55.00171,684,645,344, 89,2014-01-16 02:11:34.715356,756,795,835, 84,2014-01-20 23:00:08.200496,578,102,599, 89,2014-01-17 08:04:11.979978,690,899,80, 84,2014-01-17 19:03:41.533758,172,683,282, 89,2014-01-15 11:04:30.231422,367,32,488, 84,2014-01-11 19:23:07.685,752,390,478, 84,2014-01-19 16:42:24.224237,225,715,605, 89,2014-01-14 14:59:51.893915,625,917,320, 84,2014-01-16 02:24:54.861472,227,318,704, 89,2014-01-16 13:58:25.244807,472,920,820, 89,2014-01-15 12:26:10.090431,554,922,376, 84,2014-01-19 09:43:24.401752,47,146,653, 89,2014-01-13 14:43:56.841933,57,429,337, 89,2014-01-14 23:15:01.991852,630,68,66, 89,2014-01-18 08:44:57.491085,606,103,440, 84,2014-01-21 05:08:14.014533,902,403,638, 36,2014-01-13 03:23:37.878315,324,556,596, 84,2014-01-18 22:17:48.581239,586,878,449, 36,2014-01-18 10:42:03.183045,542,606,617, 36,2014-01-19 07:43:52.388166,619,483,491, 36,2014-01-20 04:40:38.068228,882,342,640, 89,2014-01-11 10:51:45.621152,239,363,857, 36,2014-01-14 23:29:29.347421,334,807,107, 36,2014-01-11 05:07:57.529459,264,746,537, 36,2014-01-12 20:07:03.725148,197,895,864, 36,2014-01-17 09:34:15.078815,567,973,356, 36,2014-01-10 22:56:33.310324,298,424,437, 89,2014-01-13 01:02:32.22652,148,310,222, 84,2014-01-12 00:25:40.283431,348,796,649, 84,2014-01-17 17:42:15.755927,872,283,171, 84,2014-01-19 15:00:27.22823,112,554,76, 84,2014-01-17 06:37:14.999206,392,642,980, 84,2014-01-17 10:42:23.956422,453,678,273, 89,2014-01-15 17:04:39.208208,192,361,492, 36,2014-01-13 19:40:56.532965,107,427,280, 84,2014-01-11 15:46:54.187844,807,523,209, 89,2014-01-16 16:27:46.893208,151,933,342, 84,2014-01-12 06:23:20.757201,34,414,244, 84,2014-01-15 10:46:46.081758,272,982,513, 84,2014-01-11 12:52:41.7551,494,775,235, 36,2014-01-18 00:21:46.002428,277,191,757, 84,2014-01-20 03:12:56.172609,289,504,552, 84,2014-01-20 02:36:06.156852,213,723,225, 89,2014-01-20 19:05:42.844361,154,859,888, 36,2014-01-13 20:27:58.16446,567,818,940, 36,2014-01-17 05:21:03.647937,455,606,703, 36,2014-01-20 18:13:56.858896,400,584,559, 89,2014-01-16 03:51:36.042776,491,137,7, 89,2014-01-14 21:38:52.229595,677,747,830, 89,2014-01-17 22:56:00.041941,91,551,220, 36,2014-01-14 05:35:28.430942,828,963,918, 84,2014-01-13 20:19:30.297823,113,18,462, 36,2014-01-18 13:47:45.663511,920,560,793, 89,2014-01-18 09:04:57.778781,861,713,889, 89,2014-01-14 05:27:27.827282,901,734,807, 36,2014-01-20 13:54:49.388659,645,756,354, 36,2014-01-14 12:39:12.857596,918,376,907, 36,2014-01-14 08:24:01.138026,745,977,499, 89,2014-01-12 08:57:21.615598,959,551,824, 36,2014-01-15 17:04:53.141446,535,511,434, 36,2014-01-10 20:06:16.893952,463,196,631, 36,2014-01-21 03:57:48.843828,294,112,667, 84,2014-01-11 17:42:08.140652,469,511,258, 84,2014-01-17 10:48:40.850375,602,667,449, 89,2014-01-15 15:02:28.052456,670,246,768, 89,2014-01-14 17:23:04.673617,563,582,403, 84,2014-01-18 06:35:35.038219,288,158,713, 36,2014-01-11 01:30:14.945665,487,312,581, 89,2014-01-16 03:46:25.430818,126,111,978, 84,2014-01-12 11:22:21.119977,894,503,626, 84,2014-01-11 18:22:56.700764,320,792,954, 36,2014-01-12 22:08:11.433246,496,937,515, 36,2014-01-11 08:35:17.292586,901,615,28, 36,2014-01-21 00:59:02.858034,626,764,535, 84,2014-01-12 11:13:54.278027,556,219,818, 36,2014-01-14 09:03:15.457047,636,380,944, 84,2014-01-17 00:20:06.505972,790,971,149, 36,2014-01-11 23:27:51.136922,440,306,149, 36,2014-01-19 16:54:51.499823,702,419,277, 36,2014-01-21 01:58:58.735567,582,478,398, 36,2014-01-19 02:23:51.179282,311,830,265, 84,2014-01-12 16:30:19.931183,936,933,259, 36,2014-01-13 16:18:29.03339,850,462,471, 89,2014-01-16 10:11:36.842794,763,930,935, 84,2014-01-20 05:35:17.709965,662,761,346, 89,2014-01-17 01:45:56.861091,985,354,33, 84,2014-01-15 18:06:26.231453,310,742,186, 36,2014-01-13 12:14:30.56756,8,726,31, 36,2014-01-20 14:09:57.999117,954,393,820, 36,2014-01-17 07:03:47.847051,608,158,757, 36,2014-01-18 09:19:23.709007,484,549,317, 89,2014-01-20 14:16:14.893069,138,982,259, 89,2014-01-17 05:01:36.691298,280,952,345, 89,2014-01-19 07:01:31.849659,309,479,239, 36,2014-01-16 19:04:55.743444,287,882,317, 89,2014-01-11 14:04:04.743754,590,190,167, 89,2014-01-12 18:24:36.523276,805,304,178, 84,2014-01-13 19:40:30.781662,58,188,531, 84,2014-01-11 19:34:19.689418,931,110,895, 84,2014-01-18 02:11:01.954514,733,580,915, 89,2014-01-15 11:02:51.90164,577,3,895, 36,2014-01-12 17:57:16.390601,146,763,407, 89,2014-01-20 04:19:13.387759,942,734,409, 89,2014-01-15 23:38:09.194644,904,250,575, 89,2014-01-12 12:56:19.248635,669,757,13, 36,2014-01-11 09:33:07.665786,387,494,792, 84,2014-01-19 12:41:24.651691,104,156,287, 89,2014-01-18 17:16:25.754607,656,692,808, 84,2014-01-12 13:00:58.803128,765,432,422, 84,2014-01-17 23:36:16.151932,809,193,94, 89,2014-01-18 13:15:24.490594,117,215,432, 89,2014-01-20 19:24:49.982409,899,645,534, 89,2014-01-19 20:06:36.083117,193,430,149, 36,2014-01-10 23:33:53.523983,448,568,479, 84,2014-01-15 23:36:26.825203,168,746,486, 84,2014-01-18 19:41:53.79308,882,750,778, 36,2014-01-17 05:19:50.385074,607,483,808, 89,2014-01-20 21:42:53.056656,219,442,317, 36,2014-01-11 01:56:24.360642,801,996,130, 36,2014-01-16 13:29:48.384191,481,439,736, 89,2014-01-16 22:46:40.904126,972,827,748, 84,2014-01-18 15:15:48.070067,871,787,178, 36,2014-01-15 21:46:03.27726,430,10,912, 36,2014-01-12 21:48:17.595425,823,735,604, 84,2014-01-16 16:17:19.919726,52,725,148, 36,2014-01-11 10:50:59.020704,896,261,353, 84,2014-01-13 15:52:22.339178,445,206,553, 36,2014-01-18 14:41:56.443422,304,999,626, 89,2014-01-14 10:31:29.802366,245,392,917, 36,2014-01-14 15:26:42.028597,49,886,757, 84,2014-01-15 10:52:58.397934,13,574,35, 36,2014-01-19 01:34:21.704424,642,681,113, 89,2014-01-16 13:23:58.419198,363,759,143, 89,2014-01-14 09:12:11.785694,274,422,802, 84,2014-01-13 19:12:30.899069,781,942,710, 36,2014-01-18 06:20:17.667833,714,236,428, 36,2014-01-14 22:45:19.451899,468,446,612, 89,2014-01-12 01:53:55.550759,140,568,16, 84,2014-01-15 17:36:43.42286,288,168,286, 36,2014-01-16 15:46:18.255027,780,889,938, 89,2014-01-19 05:30:11.702693,587,440,458, 89,2014-01-13 00:52:07.913454,952,255,377, 89,2014-01-16 05:11:08.237436,193,968,54, 36,2014-01-17 19:36:47.785808,796,932,521, 89,2014-01-13 04:26:01.437436,446,629,672, 36,2014-01-10 22:47:35.062641,333,807,377, 36,2014-01-15 09:18:41.57889,889,225,381, 84,2014-01-19 13:45:51.82293,405,610,325, 36,2014-01-21 00:30:28.119297,523,983,937, 36,2014-01-15 15:15:05.93995,322,813,882, 36,2014-01-14 21:15:40.207121,83,706,338, 84,2014-01-16 17:17:09.023423,778,819,333, 84,2014-01-13 00:30:54.010017,494,289,891, 84,2014-01-19 23:01:43.4848,971,187,928, 36,2014-01-18 19:05:26.618848,861,642,984, 36,2014-01-18 20:48:13.929744,628,116,315, 84,2014-01-20 13:52:42.505504,684,955,409, 84,2014-01-11 04:57:48.958026,694,791,108, 89,2014-01-16 05:30:10.373165,422,520,897, 89,2014-01-13 18:24:12.30829,139,907,776, 84,2014-01-15 00:24:30.987043,209,482,956, 89,2014-01-20 20:23:08.7711,274,167,317, 55,2014-01-11 13:58:34.012715,970,581,237, 0,2014-01-20 17:48:29.406241,750,260,587, 0,2014-01-13 23:35:20.557214,965,312,437, 19,2014-01-14 13:11:04.911782,560,44,729, 64,2014-01-17 18:08:47.074493,820,80,369, 0,2014-01-18 02:20:40.009113,325,260,858, 55,2014-01-15 19:05:00.462543,607,585,533, 0,2014-01-12 05:45:30.497353,633,630,652, 19,2014-01-13 12:06:58.26414,826,41,365, 19,2014-01-13 18:35:12.165654,124,663,954, 19,2014-01-14 10:37:38.410807,529,672,138, 55,2014-01-18 21:18:06.501577,575,844,53, 19,2014-01-20 18:11:59.951463,343,414,655, 0,2014-01-16 19:03:39.848244,2,979,333, 64,2014-01-19 00:05:41.564218,359,59,205, 0,2014-01-14 21:30:41.530771,613,505,922, 19,2014-01-15 02:49:31.671174,459,337,101, 55,2014-01-18 18:36:09.683515,918,773,853, 64,2014-01-19 16:45:47.470721,347,160,190, 19,2014-01-19 04:05:11.878715,915,92,467, 19,2014-01-14 05:53:18.707356,353,810,239, 19,2014-01-11 11:16:41.480738,857,374,622, 19,2014-01-17 21:06:55.363516,230,906,402, 64,2014-01-11 18:58:45.326204,389,100,936, 64,2014-01-19 12:04:55.410481,355,851,74, 64,2014-01-17 04:59:37.86902,572,756,799, 64,2014-01-12 03:56:34.28465,777,472,624, 19,2014-01-14 11:35:05.784066,715,23,87, 19,2014-01-20 03:23:50.177309,799,456,13, 55,2014-01-16 08:21:05.271693,930,40,737, 19,2014-01-14 01:58:14.555166,656,111,591, 64,2014-01-20 21:22:24.190024,331,951,827, 55,2014-01-15 20:09:34.677933,140,616,371, 64,2014-01-17 05:33:35.112378,675,986,69, 55,2014-01-14 04:33:29.101806,381,979,141, 19,2014-01-12 08:18:21.752426,86,767,707, 19,2014-01-14 01:54:15.121491,954,173,912, 19,2014-01-19 03:38:29.564768,321,965,450, 64,2014-01-13 18:03:52.249779,274,862,228, 19,2014-01-16 18:01:13.385631,274,93,482, 19,2014-01-11 16:13:41.730422,521,743,623, 0,2014-01-17 08:41:30.660586,794,777,568, 19,2014-01-14 09:19:19.887207,947,647,775, 19,2014-01-11 04:25:41.681884,784,34,593, 0,2014-01-12 21:45:10.50925,36,522,786, 64,2014-01-12 03:25:01.451425,32,770,726, 0,2014-01-15 05:56:23.212656,668,35,612, 55,2014-01-17 04:34:42.180425,183,190,977, 55,2014-01-20 02:01:11.135358,594,862,507, 0,2014-01-13 16:42:10.683377,502,957,312, 55,2014-01-15 02:39:54.05914,861,168,55, 19,2014-01-13 01:54:29.842715,693,575,537, 64,2014-01-14 07:58:52.164115,508,4,800, 64,2014-01-11 17:46:49.422654,312,309,706, 19,2014-01-14 00:53:15.16892,337,489,715, 0,2014-01-12 14:03:47.575015,155,813,174, 55,2014-01-18 02:46:27.292094,500,834,331, 55,2014-01-15 08:49:49.45357,843,761,53, 64,2014-01-16 05:38:53.35908,691,686,929, 64,2014-01-17 00:10:17.469403,510,777,178, 55,2014-01-20 21:10:54.725263,678,880,214, 0,2014-01-19 11:37:07.914245,691,129,37, 55,2014-01-16 15:32:41.659846,266,118,330, 64,2014-01-15 11:20:29.403615,192,606,106, 55,2014-01-15 11:10:43.027043,338,359,964, 0,2014-01-20 00:06:10.761651,345,918,570, 55,2014-01-16 23:38:51.156041,35,940,661, 55,2014-01-18 17:04:58.148533,261,741,856, 0,2014-01-17 21:44:40.32642,628,758,554, 19,2014-01-19 21:42:43.40624,562,336,373, 19,2014-01-14 05:06:11.534583,204,700,591, 55,2014-01-18 17:58:22.056842,26,519,329, 55,2014-01-16 00:24:14.066826,19,896,277, 0,2014-01-17 18:25:31.421792,834,672,108, 19,2014-01-19 02:24:03.739145,579,865,120, 55,2014-01-18 02:09:24.576077,523,903,734, 55,2014-01-19 01:50:32.873635,166,406,777, 0,2014-01-13 02:20:26.951801,896,678,876, 55,2014-01-14 00:44:06.756921,861,733,211, 19,2014-01-17 21:51:44.008994,856,967,937, 0,2014-01-15 23:02:37.635597,824,332,355, 55,2014-01-18 07:24:00.81606,28,246,380, 55,2014-01-20 03:46:13.851708,262,62,625, 55,2014-01-19 11:01:29.79971,228,655,4, 0,2014-01-19 05:10:50.239133,61,38,66, 19,2014-01-12 22:39:29.020628,179,34,996, 19,2014-01-21 05:05:17.374725,432,500,252, 19,2014-01-16 01:57:17.531227,884,515,91, 55,2014-01-17 11:29:18.474617,232,311,833, 19,2014-01-16 04:44:10.733805,82,917,221, 19,2014-01-11 20:07:35.00063,320,450,969, 55,2014-01-17 02:40:13.199879,326,342,987, 19,2014-01-14 10:21:18.648469,222,747,993, 64,2014-01-17 15:40:16.660475,152,569,646, 19,2014-01-11 08:00:42.603494,473,233,394, 19,2014-01-19 01:32:01.675511,212,441,135, 64,2014-01-16 09:46:27.422546,799,243,501, 64,2014-01-17 11:39:33.759954,503,706,511, 64,2014-01-16 12:36:59.824046,143,896,127, 0,2014-01-13 01:31:07.748966,969,844,299, 55,2014-01-16 03:22:17.166193,406,559,125, 55,2014-01-19 21:43:11.358629,542,22,216, 55,2014-01-20 23:29:29.806226,665,93,76, 19,2014-01-10 21:46:31.23302,557,659,646, 55,2014-01-16 10:08:42.78084,498,791,590, 64,2014-01-18 19:53:33.545371,453,827,646, 64,2014-01-18 03:55:55.809516,602,984,844, 0,2014-01-14 05:59:15.654475,641,374,887, 19,2014-01-21 02:14:00.497171,980,144,202, 19,2014-01-10 22:40:02.566435,901,420,538, 19,2014-01-10 21:50:59.663469,783,379,898, 19,2014-01-15 19:16:38.132768,942,326,975, 19,2014-01-18 10:04:03.382496,781,75,878, 55,2014-01-20 05:37:13.515177,122,319,741, 55,2014-01-14 00:18:07.932897,259,233,65, 19,2014-01-16 09:14:53.62163,800,638,748, 64,2014-01-11 22:16:42.535805,460,594,227, 55,2014-01-13 23:23:25.307623,678,689,257, 19,2014-01-11 05:12:11.152857,877,72,997, 64,2014-01-18 13:46:01.010422,234,317,523, 64,2014-01-19 08:07:36.041847,918,174,575, 0,2014-01-12 05:19:46.153906,271,894,329, 19,2014-01-14 10:26:14.21072,528,299,24, 0,2014-01-12 12:28:54.690316,655,376,766, 55,2014-01-19 01:00:02.814381,644,973,631, 55,2014-01-14 22:26:56.814214,26,980,895, 64,2014-01-20 18:00:56.365828,81,468,948, 55,2014-01-14 04:46:30.236927,234,348,590, 64,2014-01-11 04:06:30.574168,826,40,275, 64,2014-01-16 00:37:56.189873,194,687,12, 55,2014-01-16 10:17:37.986312,189,222,946, 64,2014-01-16 11:28:47.74036,27,563,191, 0,2014-01-14 16:21:07.548922,241,420,642, 19,2014-01-16 03:47:07.792538,69,592,963, 55,2014-01-16 13:15:18.973799,504,8,408, 0,2014-01-20 06:29:50.329761,132,223,550, 64,2014-01-13 17:40:41.337908,232,959,434, 55,2014-01-13 11:11:14.783315,873,838,233, 19,2014-01-13 06:29:05.984236,689,951,41, 19,2014-01-13 13:54:41.835079,754,981,300, 64,2014-01-13 13:51:17.34975,525,662,406, 19,2014-01-13 08:20:05.647705,240,325,70, 55,2014-01-18 13:11:19.968266,42,832,902, 55,2014-01-21 03:55:20.732246,122,881,708, 55,2014-01-12 07:57:19.162882,174,578,135, 55,2014-01-11 07:29:27.901164,137,735,158, 55,2014-01-16 07:10:14.354295,949,699,775, 19,2014-01-13 10:14:01.698686,905,970,732, 0,2014-01-14 10:52:53.208785,104,92,188, 64,2014-01-16 16:22:25.507152,906,65,617, 55,2014-01-21 04:00:02.709527,925,61,846, 19,2014-01-12 13:00:29.250632,929,498,764, 64,2014-01-18 01:42:11.661058,272,805,448, 19,2014-01-14 08:26:16.920247,699,156,499, 64,2014-01-14 05:29:23.940949,396,759,288, 19,2014-01-15 20:42:14.475859,348,347,637, 55,2014-01-18 10:53:13.73446,908,881,447, 55,2014-01-13 17:30:20.306776,862,561,502, 19,2014-01-19 05:28:44.712786,573,185,90, 64,2014-01-18 18:59:44.309047,216,624,163, 64,2014-01-18 22:08:16.497069,229,696,771, 55,2014-01-14 09:46:22.699099,54,551,285, 55,2014-01-14 00:28:32.049407,857,964,303, 0,2014-01-12 08:29:24.045991,402,415,317, 0,2014-01-19 17:33:30.491636,747,410,299, 64,2014-01-19 17:43:51.023206,236,379,389, 55,2014-01-11 08:59:14.375752,809,747,606, 19,2014-01-12 05:14:11.829545,73,987,31, 64,2014-01-11 22:55:05.806521,256,281,793, 55,2014-01-13 19:28:20.359989,221,872,417, 19,2014-01-14 23:08:53.665043,563,678,565, 0,2014-01-14 16:46:23.156271,7,16,890, 64,2014-01-16 07:48:26.007694,650,19,333, 0,2014-01-12 06:20:13.633308,575,38,123, 64,2014-01-14 14:41:43.888936,53,384,111, 55,2014-01-17 19:45:45.170576,19,459,495, 55,2014-01-12 17:49:41.534472,579,473,103, 19,2014-01-20 01:51:58.243232,860,621,757, 55,2014-01-20 09:59:46.869263,815,31,617, 55,2014-01-16 08:42:34.743257,590,363,433, 55,2014-01-15 12:14:23.750384,490,958,144, 55,2014-01-20 07:59:49.57879,38,96,488, 64,2014-01-18 01:43:03.994309,302,22,188, 64,2014-01-12 07:56:35.411862,189,672,163, 19,2014-01-13 10:26:06.499036,525,745,364, 64,2014-01-11 01:12:27.935258,18,645,661, 55,2014-01-17 08:38:49.887721,696,715,656, 0,2014-01-21 01:19:20.233917,758,331,735, 19,2014-01-13 22:42:48.242453,170,380,789, 55,2014-01-15 08:07:34.600508,605,933,540, 55,2014-01-18 14:19:04.542964,644,592,765, 55,2014-01-11 14:51:04.739522,506,717,302, 55,2014-01-18 21:53:57.300026,266,810,836, 64,2014-01-11 08:47:36.592371,785,773,995, 64,2014-01-13 03:20:28.785513,774,705,618, 64,2014-01-17 09:27:27.791662,169,321,401, 64,2014-01-20 06:31:27.615997,827,897,7, 19,2014-01-13 16:19:43.161265,749,143,104, 0,2014-01-18 18:41:39.621626,611,720,683, 0,2014-01-10 23:26:33.422518,48,539,436, 64,2014-01-16 15:48:03.521253,434,974,151, 55,2014-01-12 11:50:33.286669,32,196,267, 19,2014-01-14 20:12:56.579208,908,207,132, 55,2014-01-11 17:36:29.528947,139,342,259, 19,2014-01-13 22:10:46.919977,406,401,202, 19,2014-01-18 14:54:40.468145,832,370,659, 0,2014-01-18 17:22:14.699523,522,639,666, 64,2014-01-15 20:00:28.454449,927,100,99, 0,2014-01-17 10:46:38.711377,364,252,672, 55,2014-01-17 21:22:01.568785,553,775,554, 19,2014-01-10 22:43:03.197706,802,477,43, 64,2014-01-11 17:01:02.461761,532,681,915, 64,2014-01-16 23:21:51.147995,229,906,840, 0,2014-01-18 04:26:07.192016,388,215,484, 19,2014-01-13 04:57:37.873624,712,298,626, 64,2014-01-19 13:47:57.647031,45,778,689, 55,2014-01-18 09:38:35.127693,193,726,383, 64,2014-01-19 17:36:27.761345,719,233,551, 55,2014-01-19 09:07:17.880948,607,319,794, 64,2014-01-11 02:21:23.370146,374,527,297, 55,2014-01-13 19:44:02.362272,743,667,144, 19,2014-01-16 17:26:22.423912,183,904,11, 19,2014-01-11 21:12:28.109669,189,70,799, 19,2014-01-11 11:37:59.662298,967,170,852, 19,2014-01-17 06:13:59.016702,489,831,383, 19,2014-01-14 04:32:56.895181,44,522,649, 64,2014-01-18 01:05:27.45396,93,740,744, 55,2014-01-16 06:45:26.632699,891,487,567, 64,2014-01-17 00:52:40.056447,201,602,186, 19,2014-01-15 13:47:07.075586,828,961,492, 19,2014-01-16 10:12:00.055218,128,971,239, 64,2014-01-12 10:40:43.5777,509,623,153, 64,2014-01-17 05:37:40.362255,387,173,613, 19,2014-01-20 10:24:56.634426,68,620,640, 19,2014-01-13 08:17:13.106648,269,65,564, 19,2014-01-20 07:48:27.282232,828,292,615, 64,2014-01-17 19:19:37.102571,947,810,141, 64,2014-01-21 05:39:27.806171,869,438,937, 55,2014-01-14 21:48:55.736681,354,499,666, 55,2014-01-14 00:06:15.813948,153,311,793, 55,2014-01-17 21:01:29.374956,598,666,976, 55,2014-01-15 00:31:58.934807,276,240,780, 64,2014-01-14 21:07:18.276128,710,539,878, 0,2014-01-13 14:23:20.522951,784,763,438, 19,2014-01-11 22:58:06.126823,346,108,314, 19,2014-01-17 06:04:56.149751,297,436,473, 0,2014-01-11 22:11:18.170401,489,687,87, 19,2014-01-19 12:36:41.254516,149,702,383, 0,2014-01-15 17:41:23.911516,710,552,595, 19,2014-01-20 11:18:36.051349,126,523,449, 19,2014-01-19 18:58:04.624661,225,52,614, 55,2014-01-18 17:25:26.273787,305,267,585, 55,2014-01-15 22:44:58.47568,186,980,457, 64,2014-01-20 20:10:32.73433,46,972,935, 19,2014-01-19 09:03:25.936085,425,501,682, 0,2014-01-11 22:58:57.492383,965,720,209, 0,2014-01-13 18:43:29.629511,680,993,839, 19,2014-01-16 04:08:53.390046,102,493,342, 64,2014-01-17 09:44:24.125082,408,730,551, 19,2014-01-19 23:36:09.685958,414,422,799, 0,2014-01-20 21:56:00.466051,388,691,75, 64,2014-01-12 13:56:24.1803,555,946,724, 0,2014-01-11 04:16:53.263658,256,529,218, 0,2014-01-16 21:33:40.828306,697,356,863, 19,2014-01-11 18:21:20.815144,768,911,0, 55,2014-01-13 16:34:06.370306,181,563,19, 55,2014-01-15 23:22:08.110958,998,449,478, 64,2014-01-18 17:40:57.917715,710,782,525, 64,2014-01-13 16:13:34.176476,158,397,217, 64,2014-01-20 01:11:03.84764,171,537,461, 55,2014-01-11 11:47:13.732081,954,658,647, 0,2014-01-20 17:15:03.551852,112,26,800, 0,2014-01-13 19:43:02.782447,253,446,671, 19,2014-01-15 12:54:32.008209,814,904,955, 64,2014-01-13 01:38:24.074802,156,853,774, 0,2014-01-14 22:41:08.909689,69,390,705, 55,2014-01-11 12:59:28.157961,42,106,910, 64,2014-01-14 03:49:42.245203,895,11,793, 64,2014-01-13 05:17:50.164205,657,915,651, 55,2014-01-16 10:40:52.069477,475,294,885, 64,2014-01-13 09:08:18.296972,148,374,824, 64,2014-01-11 18:15:54.789285,858,973,195, 19,2014-01-13 22:06:18.343264,701,377,508, 55,2014-01-18 11:53:16.772652,311,488,953, 19,2014-01-11 08:26:27.523616,616,202,427, 64,2014-01-12 01:09:44.279349,796,819,600, 55,2014-01-19 14:52:14.265035,75,403,462, 64,2014-01-14 07:09:57.153547,968,576,996, 55,2014-01-17 09:18:37.669815,632,619,792, 0,2014-01-15 18:36:38.390118,940,798,105, 19,2014-01-13 00:46:06.839505,580,869,675, 64,2014-01-17 01:14:38.135866,185,447,537, 55,2014-01-17 12:33:02.570418,415,649,183, 64,2014-01-13 09:03:00.103583,461,265,48, 19,2014-01-12 16:48:18.964173,361,152,483, 64,2014-01-18 10:54:23.385561,158,684,829, 0,2014-01-16 05:37:06.473889,128,402,110, 19,2014-01-17 20:10:27.075131,159,871,657, 0,2014-01-15 22:35:21.303696,536,198,16, 55,2014-01-19 01:50:40.650784,202,303,556, 55,2014-01-16 15:21:30.92319,722,703,596, 0,2014-01-16 14:22:35.035777,808,746,217, 55,2014-01-18 13:05:44.202635,442,447,546, 0,2014-01-19 15:04:33.705636,642,256,691, 55,2014-01-10 21:17:07.043986,941,281,500, 19,2014-01-20 18:44:08.277439,397,555,327, 19,2014-01-13 07:45:42.615326,317,675,593, 19,2014-01-11 14:16:35.201948,831,486,57, 64,2014-01-13 16:33:50.52306,998,975,113, 55,2014-01-15 17:03:32.77995,640,120,448, 64,2014-01-17 04:57:27.271844,292,460,20, 19,2014-01-16 05:42:08.820032,75,876,846, 64,2014-01-16 15:19:27.569235,61,753,96, 19,2014-01-20 07:03:45.615107,177,995,504, 55,2014-01-13 11:35:25.592685,894,58,206, 55,2014-01-17 03:45:55.092851,56,33,600, 19,2014-01-11 02:13:29.894876,976,190,981, 64,2014-01-11 20:27:39.857721,18,875,338, 55,2014-01-20 14:55:52.246399,272,812,655, 55,2014-01-17 15:32:07.564691,91,666,382, 55,2014-01-16 19:04:18.247839,371,843,894, 55,2014-01-12 09:41:59.086323,520,306,901, 64,2014-01-13 10:46:45.700556,960,947,878, 64,2014-01-13 01:37:20.818255,75,465,88, 64,2014-01-14 22:44:59.189906,235,612,419, 55,2014-01-15 07:35:04.664729,851,412,256, 55,2014-01-20 16:31:44.204236,43,333,340, 19,2014-01-20 08:22:05.663795,588,865,236, 64,2014-01-11 21:45:31.740279,210,71,816, 55,2014-01-15 09:07:05.507931,406,282,989, 64,2014-01-18 04:12:46.314579,687,251,441, 19,2014-01-17 17:07:02.663469,34,632,416, 55,2014-01-21 03:29:40.543708,317,860,835, 55,2014-01-15 11:18:30.517215,717,726,611, 0,2014-01-16 02:11:36.369105,945,485,18, 64,2014-01-21 04:46:47.587695,404,367,765, 64,2014-01-15 00:02:38.795072,75,995,641, 64,2014-01-18 13:57:18.98485,620,488,562, 55,2014-01-11 13:03:22.789643,762,846,759, 64,2014-01-17 20:36:29.318132,844,130,853, 64,2014-01-13 01:00:51.7648,523,487,705, 19,2014-01-17 22:00:50.061486,329,518,670, 19,2014-01-12 20:18:38.138165,185,711,583, 64,2014-01-18 20:20:19.334035,883,668,202, 19,2014-01-16 23:04:35.677014,454,504,430, 55,2014-01-15 11:54:03.73085,827,701,44, 55,2014-01-14 18:06:14.426886,753,215,479, 0,2014-01-17 05:18:05.57189,144,901,416, 19,2014-01-16 12:21:43.588571,838,976,642, 55,2014-01-14 03:02:06.673704,558,83,297, 19,2014-01-13 14:50:13.13658,536,232,732, 19,2014-01-12 01:26:01.836408,217,682,640, 64,2014-01-15 16:44:05.760027,719,647,121, 64,2014-01-16 05:36:58.837136,869,579,700, 55,2014-01-14 07:03:22.654665,205,213,913, 64,2014-01-19 19:29:04.949933,867,616,28, 19,2014-01-20 17:12:03.501866,225,183,843, 0,2014-01-13 17:35:06.8589,265,533,406, 37,2014-01-18 21:51:10.613728,182,898,190, 74,2014-01-11 08:57:35.242145,473,810,622, 74,2014-01-18 06:42:12.366831,308,125,679, 65,2014-01-15 20:03:56.928726,938,426,886, 37,2014-01-18 06:04:37.905614,587,404,638, 74,2014-01-18 04:11:52.91054,913,100,304, 65,2014-01-20 11:22:27.445941,537,776,195, 37,2014-01-13 02:16:14.275139,88,712,869, 65,2014-01-18 02:58:40.498235,841,95,259, 40,2014-01-14 05:25:06.241012,488,151,445, 65,2014-01-20 20:13:33.259989,90,121,370, 40,2014-01-18 20:02:03.288297,483,41,195, 65,2014-01-10 20:01:35.559145,514,415,585, 37,2014-01-12 15:14:25.024789,148,176,522, 40,2014-01-15 12:02:53.349784,872,1000,95, 40,2014-01-12 20:20:13.697309,747,364,511, 74,2014-01-20 15:34:44.358824,584,979,425, 45,2014-01-11 05:07:29.026797,262,381,443, 74,2014-01-17 12:14:17.428159,874,916,681, 45,2014-01-14 03:40:58.785711,80,647,372, 74,2014-01-17 14:25:34.598686,332,492,215, 40,2014-01-12 18:36:01.01673,817,709,434, 40,2014-01-17 10:43:05.459416,835,344,722, 45,2014-01-20 09:15:47.735267,701,334,935, 74,2014-01-14 00:02:02.853139,206,603,323, 65,2014-01-11 21:27:11.219443,923,492,244, 40,2014-01-15 08:52:46.572404,940,955,454, 37,2014-01-17 11:05:25.508222,514,445,713, 40,2014-01-20 20:56:16.169376,1,576,803, 74,2014-01-14 20:04:50.074689,377,602,971, 74,2014-01-20 08:40:32.367123,914,433,959, 40,2014-01-18 12:47:26.783523,43,67,270, 65,2014-01-15 09:02:25.316834,971,654,477, 37,2014-01-17 09:22:44.734373,833,733,829, 40,2014-01-13 02:51:23.712249,108,138,740, 40,2014-01-12 09:07:03.222867,276,751,304, 65,2014-01-14 07:34:37.644914,562,394,199, 40,2014-01-12 08:13:51.158189,363,136,25, 45,2014-01-14 15:23:17.498005,167,717,393, 40,2014-01-11 04:33:18.143568,313,943,852, 65,2014-01-15 17:38:57.39962,406,273,525, 37,2014-01-14 05:36:50.757995,731,904,776, 37,2014-01-19 04:35:21.431865,528,913,916, 74,2014-01-15 17:40:32.958765,934,877,564, 37,2014-01-16 00:51:15.782783,261,595,345, 40,2014-01-13 10:38:14.781649,987,779,946, 74,2014-01-17 18:00:46.656493,68,29,802, 45,2014-01-15 10:26:00.141608,911,891,262, 40,2014-01-13 19:45:43.808866,36,734,793, 40,2014-01-14 00:15:04.084652,495,270,67, 40,2014-01-18 18:06:58.92732,937,641,236, 37,2014-01-20 14:11:18.407552,770,976,490, 37,2014-01-15 22:51:05.101802,417,65,894, 45,2014-01-14 22:50:04.387154,510,474,528, 40,2014-01-19 17:27:06.143239,784,176,605, 74,2014-01-19 02:53:07.954941,949,757,782, 65,2014-01-16 00:17:15.606597,620,568,174, 40,2014-01-13 20:19:52.715643,871,599,79, 45,2014-01-15 07:58:33.463163,79,310,980, 37,2014-01-15 15:13:31.776392,717,699,925, 74,2014-01-17 20:24:42.790331,228,418,396, 40,2014-01-14 10:39:05.830704,30,398,806, 45,2014-01-12 22:00:58.559915,127,385,8, 65,2014-01-11 23:27:08.107584,103,895,461, 65,2014-01-21 00:01:50.565078,987,707,440, 45,2014-01-15 04:52:22.272164,2,150,478, 37,2014-01-13 12:34:11.330451,32,102,163, 45,2014-01-14 01:36:28.209992,572,831,202, 40,2014-01-16 17:06:13.430352,241,49,927, 65,2014-01-17 07:57:28.828457,138,380,577, 37,2014-01-14 10:09:46.353558,856,805,446, 40,2014-01-11 04:45:10.829972,678,173,343, 74,2014-01-20 17:34:19.586452,701,753,792, 45,2014-01-12 08:45:07.785424,588,606,969, 40,2014-01-16 02:25:43.789155,759,62,6, 40,2014-01-15 12:25:35.369235,112,637,528, 45,2014-01-14 23:23:22.567492,323,92,546, 65,2014-01-12 14:26:30.445648,807,146,284, 45,2014-01-20 02:51:35.510844,913,784,933, 40,2014-01-17 23:09:06.376356,243,411,398, 65,2014-01-15 18:41:34.5303,424,716,927, 65,2014-01-17 14:58:34.438163,343,393,771, 40,2014-01-17 07:20:24.783908,389,743,52, 45,2014-01-20 21:32:39.632102,169,57,241, 40,2014-01-11 07:48:38.825317,523,614,941, 74,2014-01-15 18:47:30.927147,145,294,200, 45,2014-01-18 18:25:47.587461,149,238,140, 74,2014-01-16 12:05:54.432333,135,543,662, 74,2014-01-18 19:07:23.643208,823,919,423, 74,2014-01-12 20:24:21.050625,492,928,955, 37,2014-01-10 21:19:26.208725,769,40,423, 45,2014-01-15 09:32:06.43354,455,707,548, 45,2014-01-16 11:03:26.881329,302,391,254, 40,2014-01-12 23:20:24.76864,660,21,276, 65,2014-01-16 12:59:14.541124,391,187,442, 45,2014-01-16 05:05:17.446407,871,785,292, 40,2014-01-17 08:12:47.040804,173,778,77, 74,2014-01-19 05:33:25.871575,233,615,990, 45,2014-01-19 10:41:45.656818,68,405,338, 45,2014-01-12 19:19:00.471156,978,36,456, 37,2014-01-15 07:30:54.700032,327,790,149, 65,2014-01-12 14:51:32.010376,504,882,606, 74,2014-01-13 04:04:11.301547,35,297,991, 40,2014-01-14 19:05:14.286483,794,526,122, 45,2014-01-14 03:36:39.796219,206,523,964, 65,2014-01-18 10:29:55.090702,140,215,116, 45,2014-01-19 11:30:49.655719,19,616,133, 65,2014-01-18 07:00:02.36371,369,489,168, 65,2014-01-20 04:56:25.536351,340,441,118, 65,2014-01-18 08:22:25.166562,576,905,145, 37,2014-01-15 00:09:08.740067,222,66,794, 40,2014-01-14 17:38:00.066651,683,574,399, 74,2014-01-14 17:20:59.605144,675,603,880, 40,2014-01-11 01:29:33.523975,732,905,895, 37,2014-01-14 09:10:39.699172,456,82,797, 65,2014-01-15 05:09:38.43046,343,625,211, 45,2014-01-16 00:17:04.451542,247,167,646, 45,2014-01-11 21:36:27.286634,278,137,803, 74,2014-01-20 21:15:32.862794,479,346,696, 74,2014-01-13 13:24:28.094749,657,248,233, 37,2014-01-13 22:00:48.337258,389,761,702, 37,2014-01-20 22:34:59.071519,269,60,673, 74,2014-01-18 02:56:34.528289,771,337,369, 45,2014-01-19 13:04:15.219007,683,891,930, 45,2014-01-12 15:55:23.840158,554,754,960, 40,2014-01-13 09:55:49.069831,80,65,371, 45,2014-01-14 12:09:32.665415,905,260,233, 65,2014-01-19 04:08:10.880962,27,400,151, 74,2014-01-11 09:29:14.941406,907,527,511, 40,2014-01-12 16:51:18.322232,354,921,104, 45,2014-01-21 03:27:11.352537,625,546,736, 37,2014-01-15 21:00:09.641857,512,95,982, 45,2014-01-14 11:42:50.333027,298,89,479, 45,2014-01-13 01:31:22.654084,701,554,628, 65,2014-01-19 20:05:23.928341,125,258,233, 37,2014-01-17 19:19:30.129246,843,97,438, 65,2014-01-20 16:01:17.744787,572,547,788, 37,2014-01-18 01:36:13.584479,924,722,865, 37,2014-01-14 20:19:32.492956,691,789,21, 37,2014-01-19 14:57:43.281138,813,503,744, 45,2014-01-15 03:58:38.751041,919,461,283, 74,2014-01-19 00:28:41.233023,487,482,552, 40,2014-01-13 02:35:43.347789,979,398,608, 37,2014-01-19 01:19:38.356184,124,507,107, 45,2014-01-19 05:58:14.756998,35,88,790, 45,2014-01-16 15:46:23.046961,761,906,611, 65,2014-01-13 00:29:16.787065,38,875,317, 37,2014-01-14 00:15:19.20854,633,305,604, 65,2014-01-17 17:22:50.333595,601,767,708, 40,2014-01-12 15:44:49.649858,174,353,115, 65,2014-01-16 17:39:47.303289,905,473,193, 37,2014-01-20 19:23:38.671273,104,658,701, 37,2014-01-12 08:19:48.721796,652,633,415, 45,2014-01-13 14:36:21.831997,209,448,311, 45,2014-01-19 02:27:53.89028,735,541,167, 40,2014-01-14 04:15:12.561955,864,601,360, 37,2014-01-16 04:32:10.901828,275,238,887, 74,2014-01-12 08:37:26.555695,731,134,163, 65,2014-01-12 02:23:23.443336,425,738,671, 37,2014-01-16 18:01:25.843234,83,100,12, 65,2014-01-14 05:28:44.878346,127,382,213, 40,2014-01-11 23:50:34.795874,442,296,944, 37,2014-01-11 09:01:35.485092,127,383,605, 37,2014-01-17 21:11:35.211374,106,482,95, 45,2014-01-14 05:21:57.449958,177,346,822, 40,2014-01-20 09:06:59.413852,19,448,508, 37,2014-01-14 10:31:05.34062,822,692,661, 65,2014-01-13 15:23:15.194746,552,797,338, 37,2014-01-17 04:43:12.99833,9,698,859, 65,2014-01-18 10:50:37.833576,40,486,516, 45,2014-01-12 00:20:58.475883,5,573,957, 40,2014-01-11 02:41:51.749372,682,220,416, 37,2014-01-16 05:19:19.066599,883,894,752, 40,2014-01-14 06:56:41.823673,853,145,906, 45,2014-01-19 08:01:30.105975,54,96,339, 65,2014-01-14 05:17:33.823597,976,241,651, 37,2014-01-20 02:43:04.871053,246,470,12, 37,2014-01-11 02:30:46.89304,951,166,335, 37,2014-01-17 09:32:53.032555,325,28,512, 74,2014-01-16 14:05:55.204648,928,806,88, 45,2014-01-12 22:15:36.542898,137,265,311, 74,2014-01-12 21:12:40.335845,853,405,741, 74,2014-01-16 03:29:33.875921,712,689,860, 40,2014-01-14 10:35:25.264695,675,740,403, 40,2014-01-15 15:49:02.167841,896,259,134, 45,2014-01-13 23:57:27.766202,657,767,846, 40,2014-01-17 18:50:37.827069,67,157,902, 40,2014-01-21 00:21:13.069669,727,504,839, 45,2014-01-15 12:34:54.322316,299,419,568, 37,2014-01-19 01:14:01.270405,661,993,802, 74,2014-01-16 12:22:38.913323,66,934,176, 37,2014-01-18 22:03:39.200661,433,408,679, 45,2014-01-20 05:04:36.066279,45,737,650, 65,2014-01-17 01:24:14.398415,730,406,674, 40,2014-01-15 13:15:14.412035,52,778,224, 74,2014-01-13 04:26:33.516238,268,385,966, 40,2014-01-16 04:31:13.812267,461,48,623, 37,2014-01-19 03:46:19.752655,383,623,871, 40,2014-01-15 23:49:48.710983,996,194,878, 65,2014-01-12 03:14:26.810597,11,216,224, 45,2014-01-16 08:36:57.586231,246,699,173, 74,2014-01-17 04:10:47.186867,645,897,448, 40,2014-01-12 09:56:18.560388,729,820,196, 37,2014-01-11 07:56:16.652831,825,821,431, 65,2014-01-20 15:07:29.01054,167,693,596, 37,2014-01-20 21:57:48.666364,249,247,912, 37,2014-01-14 17:13:50.476428,857,162,605, 37,2014-01-19 11:50:33.881593,49,821,427, 65,2014-01-21 04:28:35.559403,542,307,794, 37,2014-01-10 20:46:43.508984,399,418,20, 74,2014-01-14 19:56:29.08666,663,643,501, 45,2014-01-12 20:44:12.102302,593,711,376, 74,2014-01-12 21:59:23.845248,624,1,586, 74,2014-01-20 03:26:02.962581,633,736,18, 45,2014-01-16 11:19:37.367416,939,810,656, 40,2014-01-17 17:48:26.013089,945,579,988, 37,2014-01-12 21:23:30.729202,571,438,392, 65,2014-01-13 00:10:15.194483,318,941,646, 65,2014-01-17 12:09:39.083176,918,669,432, 65,2014-01-17 13:58:25.051518,313,148,983, 74,2014-01-21 05:24:16.464889,443,76,793, 65,2014-01-12 18:32:17.996499,198,229,812, 45,2014-01-15 06:02:04.252179,672,923,407, 65,2014-01-20 04:28:52.531168,47,546,188, 74,2014-01-18 23:56:32.394914,566,379,840, 65,2014-01-19 23:17:18.664215,673,289,727, 37,2014-01-12 02:55:26.047405,161,87,802, 40,2014-01-13 22:27:46.207599,290,290,167, 45,2014-01-17 21:03:38.41687,74,571,772, 45,2014-01-17 06:45:14.75839,877,464,865, 37,2014-01-15 05:42:13.018196,399,135,381, 37,2014-01-12 23:40:36.003102,338,521,349, 65,2014-01-13 04:56:01.945256,509,322,414, 65,2014-01-16 19:38:31.578583,545,764,826, 65,2014-01-13 11:36:52.655932,106,974,386, 45,2014-01-12 14:03:30.955796,416,147,54, 74,2014-01-16 11:36:20.244947,691,283,944, 74,2014-01-17 08:50:43.13236,194,7,942, 74,2014-01-21 05:54:04.837808,246,365,502, 74,2014-01-16 10:04:55.80435,56,690,479, 40,2014-01-17 09:37:26.641764,787,944,531, 37,2014-01-14 19:50:33.924468,98,431,85, 74,2014-01-18 10:49:07.907071,470,194,675, 65,2014-01-19 11:36:50.487011,748,226,195, 40,2014-01-13 17:16:36.887049,780,306,927, 37,2014-01-13 16:08:45.274486,14,106,155, 45,2014-01-15 23:25:16.500099,42,416,42, 74,2014-01-15 18:40:07.616251,704,257,368, 45,2014-01-15 20:19:00.46897,202,111,451, 40,2014-01-12 05:34:55.583276,307,963,872, 65,2014-01-12 02:38:32.667769,88,630,445, 74,2014-01-15 19:43:16.933858,837,891,430, 37,2014-01-14 04:07:13.579775,886,460,651, 45,2014-01-16 12:40:36.919948,139,217,302, 37,2014-01-14 18:12:09.465027,493,899,865, 37,2014-01-11 22:03:45.975107,199,64,92, 45,2014-01-15 05:57:55.584162,624,581,124, 37,2014-01-16 01:07:35.512432,48,378,956, 65,2014-01-15 00:31:32.182706,885,493,863, 37,2014-01-11 21:01:34.001032,889,935,197, 40,2014-01-12 01:52:50.270822,409,811,868, 74,2014-01-19 10:13:45.200902,280,775,577, 37,2014-01-14 00:42:10.004134,669,368,106, 74,2014-01-14 10:48:52.216078,784,413,896, 40,2014-01-14 23:52:16.779905,232,277,25, 45,2014-01-16 16:19:02.660067,445,957,322, 37,2014-01-16 04:52:23.171874,233,17,37, 65,2014-01-20 15:28:37.024852,933,489,257, 37,2014-01-12 19:09:45.792846,105,503,866, 45,2014-01-16 04:46:28.009681,724,939,565, 65,2014-01-15 19:33:32.829621,860,592,14, 65,2014-01-19 08:47:12.43461,792,770,428, 65,2014-01-20 04:37:01.934149,688,732,724, 45,2014-01-13 00:22:40.736692,370,486,95, 40,2014-01-17 14:24:02.921621,991,250,907, 45,2014-01-12 15:53:38.821618,932,910,423, 74,2014-01-15 20:31:26.011178,210,473,580, 74,2014-01-12 07:49:19.421721,749,325,556, 65,2014-01-17 14:33:46.437869,60,109,754, 65,2014-01-20 20:50:26.480148,847,542,970, 40,2014-01-13 17:24:15.005416,765,986,814, 45,2014-01-18 21:12:19.106056,849,430,459, 40,2014-01-15 10:33:43.414006,643,397,70, 37,2014-01-17 01:31:28.58519,130,620,788, 65,2014-01-14 03:52:56.026004,486,646,822, 74,2014-01-19 08:45:52.879033,654,706,935, 74,2014-01-18 03:35:14.560297,855,93,431, 40,2014-01-18 13:50:51.610585,946,318,311, 74,2014-01-14 03:53:28.391466,666,763,64, 65,2014-01-11 22:06:46.743003,578,614,350, 74,2014-01-19 14:52:25.611618,925,119,128, 45,2014-01-15 09:46:18.662287,248,549,716, 74,2014-01-20 12:20:31.944324,132,196,649, 65,2014-01-12 09:34:35.616171,540,581,809, 65,2014-01-19 00:35:10.878365,290,586,456, 40,2014-01-14 06:12:48.724228,737,753,575, 40,2014-01-18 05:53:38.276237,937,133,261, 45,2014-01-13 23:27:34.050239,760,864,265, 74,2014-01-13 15:41:25.74908,721,85,387, 45,2014-01-20 05:03:24.069083,254,520,696, 65,2014-01-19 08:14:02.06034,637,198,805, 65,2014-01-18 15:14:58.578701,562,396,453, 40,2014-01-18 07:50:36.503694,821,48,455, 37,2014-01-18 06:51:03.994489,996,41,483, 37,2014-01-20 19:37:39.315393,54,759,229, 40,2014-01-14 16:14:39.425315,572,56,702, 37,2014-01-20 02:44:42.816106,236,226,159, 74,2014-01-15 10:09:05.32657,515,568,259, 40,2014-01-16 04:03:58.847454,300,998,350, 37,2014-01-16 11:18:29.254394,797,767,394, 45,2014-01-15 00:59:31.806718,254,268,233, 45,2014-01-19 01:28:13.852869,756,824,33, 37,2014-01-14 02:30:48.36045,971,433,435, 37,2014-01-19 15:33:15.220724,382,652,196, 65,2014-01-14 20:59:42.438058,127,221,874, 40,2014-01-17 10:23:44.386453,330,458,650, 37,2014-01-17 18:19:08.099757,955,627,519, 37,2014-01-11 18:34:56.998775,474,21,445, 40,2014-01-14 18:14:35.997039,66,843,985, 45,2014-01-21 02:12:36.491223,484,673,500, 37,2014-01-12 20:41:43.741777,950,591,958, 40,2014-01-13 03:07:01.609076,512,518,850, 37,2014-01-15 05:58:55.15351,956,138,245, 40,2014-01-12 03:02:15.686101,304,990,733, 65,2014-01-14 16:41:37.225247,789,330,274, 65,2014-01-13 00:34:06.031875,331,316,71, 45,2014-01-15 13:15:04.410329,126,649,425, 37,2014-01-11 16:35:15.501484,192,200,4, 74,2014-01-16 04:01:40.082532,471,80,930, 37,2014-01-18 08:56:30.159828,67,173,339, 74,2014-01-21 01:38:39.570986,9,334,642, 65,2014-01-14 06:15:42.142872,384,292,783, 40,2014-01-15 18:11:28.738529,726,314,361, 37,2014-01-18 03:29:16.07468,62,585,786, 74,2014-01-11 07:06:46.137359,821,88,465, 45,2014-01-15 07:49:08.054341,113,856,953, 65,2014-01-11 13:43:55.500414,924,712,328, 65,2014-01-20 13:51:28.953885,575,182,690, 65,2014-01-19 21:58:13.38091,206,475,621, 40,2014-01-16 21:47:54.347868,948,878,963, 74,2014-01-15 19:09:58.208279,195,928,372, 40,2014-01-13 16:57:45.188048,80,699,137, 74,2014-01-14 17:16:08.200736,861,644,461, 74,2014-01-19 01:40:46.568728,616,392,145, 45,2014-01-12 02:31:00.408772,138,856,692, 65,2014-01-18 18:15:50.638795,879,871,205, 40,2014-01-15 06:04:30.955182,508,338,477, 74,2014-01-19 00:50:08.508948,287,429,45, 74,2014-01-19 16:50:47.63757,998,409,795, 40,2014-01-19 04:19:06.952641,907,743,30, 65,2014-01-18 21:02:45.000171,748,328,394, 37,2014-01-11 07:32:31.379767,125,968,857, 45,2014-01-11 01:26:08.561716,621,926,943, 40,2014-01-12 21:01:40.1541,337,603,853, 74,2014-01-12 14:34:47.065868,681,476,513, 74,2014-01-14 22:07:45.786963,330,163,690, 40,2014-01-15 01:35:46.185975,645,221,872, 40,2014-01-17 07:49:51.476616,473,855,401, 37,2014-01-15 18:43:01.288447,673,802,954, 74,2014-01-20 09:37:26.268507,67,560,142, 37,2014-01-14 10:46:21.636445,18,44,741, 65,2014-01-15 14:21:40.859433,512,110,654, 74,2014-01-13 09:53:08.411379,472,785,745, 45,2014-01-19 08:57:50.374974,170,101,633, 37,2014-01-12 11:50:56.934532,674,20,999, 74,2014-01-13 20:59:54.549158,467,241,456, 74,2014-01-13 10:46:58.429315,40,683,750, 37,2014-01-13 05:34:52.434946,763,480,111, 40,2014-01-13 04:51:23.503043,876,153,316, 65,2014-01-12 02:45:11.810645,42,719,924, 74,2014-01-19 07:22:46.782814,184,802,42, 37,2014-01-18 04:01:21.711322,198,963,176, 65,2014-01-14 23:42:56.998693,479,903,392, 45,2014-01-12 18:38:54.98355,91,832,957, 40,2014-01-15 23:42:08.28005,961,413,74, 74,2014-01-16 06:13:57.407465,950,78,903, 74,2014-01-20 16:54:45.622345,618,420,540, 40,2014-01-20 09:46:39.235232,660,400,885, 45,2014-01-14 01:04:05.916412,359,260,158, 74,2014-01-19 03:45:33.260334,578,392,708, 37,2014-01-18 08:05:46.187873,351,83,816, 74,2014-01-11 16:06:50.917003,499,530,913, 65,2014-01-19 15:18:04.6401,793,131,495, 74,2014-01-18 13:31:54.749589,518,379,358, 45,2014-01-13 17:08:31.071103,128,979,282, 45,2014-01-10 23:52:51.705968,838,294,39, 37,2014-01-12 05:39:40.536551,520,375,343, 37,2014-01-17 22:44:17.257078,852,985,852, 37,2014-01-17 11:42:43.182583,540,970,51, 40,2014-01-17 04:22:41.825417,643,698,703, 74,2014-01-17 02:21:43.526005,659,419,598, 40,2014-01-21 02:29:04.819028,719,132,388, 40,2014-01-11 12:44:22.68485,282,159,66, 65,2014-01-19 16:14:51.937383,724,497,468, 40,2014-01-19 05:26:55.194421,825,402,668, 65,2014-01-13 04:35:19.619382,248,35,812, 74,2014-01-12 07:14:46.486542,861,473,506, 37,2014-01-11 10:13:53.623735,29,417,964, 37,2014-01-15 14:10:12.054328,729,209,185, 45,2014-01-14 16:06:09.989584,452,324,46, 40,2014-01-12 16:59:05.43438,457,205,899, 45,2014-01-13 15:32:58.837141,569,291,640, 40,2014-01-11 14:07:31.700906,513,426,876, 37,2014-01-16 20:42:02.433073,171,440,856, 40,2014-01-15 14:11:53.820691,144,771,664, 45,2014-01-16 17:49:39.980956,837,220,155, 45,2014-01-11 20:55:59.840956,329,490,336, 40,2014-01-15 01:06:39.443454,370,493,391, 37,2014-01-15 21:36:19.216608,392,198,504, 40,2014-01-15 02:00:05.757369,176,934,558, 65,2014-01-12 22:52:12.703789,771,432,711, 45,2014-01-12 23:42:05.404481,541,855,150, 74,2014-01-15 22:06:56.674371,399,412,519, 40,2014-01-11 08:10:17.343889,4,518,614, 74,2014-01-20 17:14:00.154489,531,503,901, 74,2014-01-18 19:15:27.745474,97,367,121, 37,2014-01-11 12:03:09.049856,659,205,361, 37,2014-01-11 16:53:40.691041,788,481,755, 74,2014-01-15 11:59:45.002972,745,305,692, 74,2014-01-18 03:45:52.232859,270,772,111, 37,2014-01-18 01:16:22.516457,451,817,215, 37,2014-01-11 08:21:28.528976,2,595,276, 40,2014-01-18 00:14:57.051886,784,754,30, 40,2014-01-18 18:00:45.201726,161,401,533, 37,2014-01-20 04:36:20.466359,71,26,275, 45,2014-01-15 23:41:52.246307,837,586,784, 37,2014-01-21 02:36:04.821108,936,389,416, 37,2014-01-11 05:51:06.953321,898,958,243, 40,2014-01-16 13:55:45.870462,423,839,515, 45,2014-01-15 10:46:16.875855,451,46,16, 74,2014-01-15 01:57:16.942905,501,573,921, 65,2014-01-18 10:54:51.304843,466,496,999, 65,2014-01-18 06:19:15.712997,320,135,484, 45,2014-01-15 20:04:48.64423,902,831,466, 37,2014-01-14 01:36:53.738334,541,390,255, 45,2014-01-12 14:31:09.534106,237,317,676, 37,2014-01-11 07:54:28.625186,600,49,587, 45,2014-01-15 02:32:53.579291,853,865,842, 45,2014-01-16 19:37:48.97756,684,588,869, 40,2014-01-16 09:30:47.841795,826,204,222, 37,2014-01-19 08:32:59.336659,490,328,418, 37,2014-01-18 22:30:01.681349,549,474,103, 40,2014-01-18 13:12:53.246695,268,670,593, 65,2014-01-14 00:39:56.011449,781,564,912, 65,2014-01-19 10:40:19.025239,688,918,382, 74,2014-01-18 00:26:53.401183,285,545,942, 37,2014-01-11 13:55:23.756923,646,837,331, 65,2014-01-20 02:43:28.075095,174,651,316, 37,2014-01-18 21:20:34.092223,828,96,208, 65,2014-01-16 05:55:08.759895,95,200,891, 45,2014-01-17 00:29:20.307953,845,577,109, 45,2014-01-15 16:36:56.6091,60,918,69, 45,2014-01-16 18:16:37.288871,901,855,107, 40,2014-01-13 18:44:17.360259,499,764,363, 45,2014-01-13 04:37:41.810826,578,787,452, 45,2014-01-15 16:52:57.75565,855,489,733, 37,2014-01-18 22:26:09.606567,344,59,608, 65,2014-01-13 01:13:46.631934,433,913,37, 45,2014-01-16 02:44:04.70897,343,753,891, 37,2014-01-14 06:21:55.477029,219,201,853, 37,2014-01-17 16:00:03.50779,263,621,932, 37,2014-01-20 08:41:21.651875,755,926,479, 45,2014-01-11 11:16:46.782292,231,849,801, 40,2014-01-14 16:19:19.221205,775,185,358, 40,2014-01-14 22:46:10.296105,248,896,365, 65,2014-01-14 16:53:40.520626,434,794,197, 45,2014-01-16 10:50:28.755311,529,256,406, 65,2014-01-15 10:40:38.92171,69,99,953, 45,2014-01-18 23:26:34.099917,762,585,878, 40,2014-01-12 00:28:17.732872,428,842,376, 45,2014-01-21 00:11:26.763505,353,781,561, 45,2014-01-17 01:59:33.436575,195,577,708, 45,2014-01-20 02:58:19.414222,560,98,489, 74,2014-01-18 07:24:20.010199,946,138,846, 74,2014-01-20 06:39:29.448024,250,38,842, 37,2014-01-18 07:38:38.439459,224,865,382, 65,2014-01-15 01:51:13.411382,137,9,534, 45,2014-01-21 00:34:53.205367,234,726,320, 74,2014-01-17 04:22:06.514554,197,446,524, 65,2014-01-12 17:11:47.504025,299,684,6, 74,2014-01-16 00:30:01.965261,75,601,914, 40,2014-01-12 22:51:26.822927,753,767,450, 45,2014-01-17 13:48:44.113124,196,531,847, 45,2014-01-11 12:46:39.254551,33,815,493, 65,2014-01-15 21:35:44.183186,286,234,114, 37,2014-01-19 22:26:25.92395,279,170,183, 45,2014-01-16 09:39:37.010201,666,776,737, 65,2014-01-13 14:01:53.789753,278,648,108, 40,2014-01-11 17:40:12.555884,996,774,55, 40,2014-01-11 06:23:41.719171,429,548,9, 65,2014-01-17 00:23:49.267202,443,843,637, 74,2014-01-18 13:40:16.064093,830,865,149, 37,2014-01-20 19:05:03.371046,753,536,572, 65,2014-01-17 15:40:36.049494,738,693,358, 45,2014-01-11 23:59:35.285298,820,684,975, 65,2014-01-14 11:51:13.66757,136,738,71, 45,2014-01-11 02:34:16.57012,821,647,989, 45,2014-01-17 14:50:04.040609,260,853,427, 45,2014-01-19 02:31:52.58928,75,829,652, 65,2014-01-19 06:00:50.670036,215,699,965, 45,2014-01-18 19:18:21.773481,63,284,77, 74,2014-01-18 20:43:19.352785,648,445,719, 45,2014-01-15 02:00:24.106611,84,813,157, 45,2014-01-17 16:16:41.187703,581,899,485, 65,2014-01-15 22:07:39.363404,10,213,690, 40,2014-01-14 02:39:53.555055,5,348,528, 45,2014-01-14 17:55:19.627163,962,468,744, 37,2014-01-20 03:58:52.774786,36,944,390, 45,2014-01-13 21:14:46.760422,936,142,673, 37,2014-01-21 02:17:26.141718,77,313,368, 40,2014-01-11 15:10:40.278811,990,110,986, 37,2014-01-19 01:44:48.725683,311,70,307, 74,2014-01-12 19:08:52.964644,57,455,79, 45,2014-01-18 08:59:24.391935,532,160,377, 37,2014-01-19 18:31:27.980234,557,274,140, 74,2014-01-17 20:44:37.14783,838,287,362, 45,2014-01-17 01:25:50.315885,259,111,843, 40,2014-01-14 22:11:04.990435,958,495,820, 37,2014-01-20 14:46:30.938003,548,261,452, 37,2014-01-17 23:06:02.872189,253,32,51, 74,2014-01-15 08:34:46.709606,746,533,195, 40,2014-01-16 09:10:20.205205,157,573,650, 40,2014-01-15 06:46:18.936281,271,168,631, 91,2014-01-14 21:39:50.081072,790,876,200, 3,2014-01-12 18:50:56.254699,91,648,379, 3,2014-01-16 10:45:54.221579,584,900,178, 3,2014-01-18 13:31:03.748642,257,18,276, 91,2014-01-13 01:25:12.824819,91,522,764, 3,2014-01-12 19:35:58.262189,907,530,156, 91,2014-01-16 10:02:56.337922,952,850,722, 91,2014-01-11 01:26:03.494855,570,737,323, 3,2014-01-20 18:54:20.03567,794,409,763, 3,2014-01-14 00:46:15.690708,67,14,897, 3,2014-01-15 07:26:27.601468,788,322,344, 91,2014-01-17 05:11:01.223373,333,555,847, 3,2014-01-19 02:53:55.054111,611,162,297, 3,2014-01-18 14:06:21.156523,239,327,644, 3,2014-01-21 03:06:20.850536,889,749,864, 3,2014-01-18 00:52:47.829318,474,911,794, 3,2014-01-11 05:21:07.916944,931,467,675, 91,2014-01-20 23:23:46.992253,892,57,862, 3,2014-01-18 20:03:28.108128,193,372,410, 3,2014-01-19 11:05:56.643047,294,783,490, 91,2014-01-12 12:32:39.957317,591,442,110, 3,2014-01-15 23:02:52.500482,681,691,754, 91,2014-01-17 23:37:24.62328,272,302,300, 3,2014-01-19 13:17:17.105147,424,546,65, 91,2014-01-11 18:28:42.816368,328,984,547, 3,2014-01-11 15:48:29.614135,912,785,670, 3,2014-01-18 22:03:48.043151,554,943,945, 3,2014-01-18 21:34:45.688556,905,203,259, 3,2014-01-16 04:23:16.323742,829,108,781, 91,2014-01-14 01:14:08.248356,472,845,303, 91,2014-01-12 22:21:04.624838,343,908,497, 3,2014-01-20 06:03:06.404813,215,929,577, 3,2014-01-16 00:05:04.503055,23,423,641, 91,2014-01-18 13:06:58.846418,1,458,570, 3,2014-01-17 13:34:10.153454,450,435,275, 3,2014-01-18 05:30:17.327875,612,904,21, 3,2014-01-20 12:42:57.108606,406,587,53, 91,2014-01-12 17:37:06.491377,365,322,848, 91,2014-01-18 10:56:20.82273,173,584,86, 3,2014-01-20 01:37:17.144276,168,928,657, 3,2014-01-15 22:23:22.182504,306,258,318, 91,2014-01-12 12:22:48.424617,267,610,991, 3,2014-01-16 00:48:18.367649,643,242,402, 91,2014-01-13 19:17:17.236616,994,871,847, 91,2014-01-20 06:29:09.58114,567,682,816, 91,2014-01-15 21:54:39.218185,207,31,706, 91,2014-01-21 00:10:05.065933,931,528,979, 3,2014-01-20 15:50:17.498502,138,559,739, 3,2014-01-15 15:18:26.210857,869,692,656, 3,2014-01-18 14:13:33.17448,184,100,886, 3,2014-01-18 20:56:14.141549,377,316,254, 91,2014-01-17 07:51:06.168174,880,710,668, 91,2014-01-13 07:16:25.674963,700,190,678, 3,2014-01-15 14:33:38.764829,621,843,610, 91,2014-01-15 15:08:23.273321,170,761,559, 91,2014-01-14 05:45:08.49133,477,450,561, 91,2014-01-16 10:22:08.378965,437,468,551, 3,2014-01-13 07:12:11.316891,440,500,895, 3,2014-01-11 21:19:54.180306,245,680,380, 91,2014-01-11 08:45:24.703125,205,78,133, 91,2014-01-16 12:26:19.565247,970,978,133, 91,2014-01-13 23:40:58.805144,936,333,142, 3,2014-01-20 18:48:31.107938,585,293,755, 3,2014-01-11 06:31:24.068722,110,738,339, 91,2014-01-11 06:47:57.65198,213,721,93, 3,2014-01-17 02:22:41.261811,830,806,284, 91,2014-01-18 16:01:41.396597,164,593,634, 91,2014-01-20 23:30:54.760586,792,323,729, 91,2014-01-18 23:59:47.753188,263,718,291, 3,2014-01-15 20:58:02.219746,742,233,271, 3,2014-01-19 19:08:11.905281,589,891,255, 91,2014-01-13 16:23:09.935692,723,619,158, 91,2014-01-17 13:20:50.644364,62,439,175, 91,2014-01-14 13:56:30.27293,392,116,759, 3,2014-01-16 15:40:27.172726,122,330,987, 3,2014-01-16 13:50:00.225502,62,670,402, 3,2014-01-19 15:51:09.491535,690,517,336, 91,2014-01-16 09:50:32.238659,433,706,906, 3,2014-01-15 23:40:17.724005,39,349,913, 3,2014-01-14 01:09:35.702393,723,643,444, 3,2014-01-13 18:04:05.41314,831,586,759, 91,2014-01-13 14:36:31.865554,692,101,753, 3,2014-01-20 13:00:41.870567,815,254,778, 91,2014-01-16 05:20:31.088102,81,702,209, 3,2014-01-18 09:10:10.630802,812,409,265, 3,2014-01-14 22:09:05.143889,113,76,764, 91,2014-01-19 15:05:39.579852,114,486,515, 3,2014-01-13 13:32:19.009767,821,487,472, 91,2014-01-17 09:21:16.460779,97,421,654, 3,2014-01-20 16:25:33.760158,165,55,433, 3,2014-01-14 02:17:43.712892,925,456,285, 3,2014-01-12 15:47:36.026446,482,867,424, 3,2014-01-13 10:06:32.565302,426,608,229, 91,2014-01-13 15:06:14.821249,442,450,949, 3,2014-01-13 02:19:00.095168,454,779,200, 3,2014-01-13 20:54:30.217282,600,509,233, 3,2014-01-19 21:28:56.083059,291,276,210, 91,2014-01-20 22:20:41.491764,43,474,684, 91,2014-01-13 14:25:24.978287,387,291,780, 91,2014-01-17 15:28:43.836248,928,133,328, 91,2014-01-15 13:18:43.71151,172,399,747, 3,2014-01-12 03:33:36.883569,978,315,728, 3,2014-01-20 11:51:53.772358,774,969,581, 91,2014-01-11 20:39:34.355874,471,36,721, 3,2014-01-15 21:30:07.156499,701,611,719, 91,2014-01-15 21:32:20.945084,640,909,677, 91,2014-01-17 14:29:34.581795,851,257,930, 91,2014-01-14 07:21:16.648034,172,666,132, 91,2014-01-11 01:22:53.183744,652,917,601, 91,2014-01-12 08:09:52.3058,871,351,45, 3,2014-01-17 12:30:52.350427,2,513,992, 3,2014-01-13 23:26:58.597302,905,954,518, 3,2014-01-15 02:46:24.171772,241,62,757, 3,2014-01-16 19:31:34.220994,799,141,117, 91,2014-01-19 08:47:29.685405,236,737,500, 91,2014-01-12 05:56:34.802575,411,515,200, 3,2014-01-20 21:40:39.365301,523,954,752, 3,2014-01-17 17:53:09.265256,678,853,975, 3,2014-01-14 23:28:53.812342,14,348,78, 3,2014-01-17 01:01:55.82608,92,997,827, 91,2014-01-17 04:18:43.025415,950,489,922, 3,2014-01-18 05:46:37.525653,789,372,934, 91,2014-01-18 20:49:31.852525,463,660,370, 3,2014-01-19 18:25:15.591135,569,566,381, 91,2014-01-21 00:52:52.346901,184,379,411, 3,2014-01-21 03:08:31.947693,471,764,816, 3,2014-01-12 09:19:45.808418,683,205,263, 3,2014-01-19 16:21:48.429961,137,6,538, 91,2014-01-20 19:29:13.439876,28,590,858, 3,2014-01-15 03:45:10.786705,209,59,259, 91,2014-01-16 01:50:32.266209,803,451,777, 3,2014-01-15 02:47:57.151386,5,455,826, 3,2014-01-16 11:18:47.670273,260,747,795, 3,2014-01-15 07:42:26.038568,456,533,986, 3,2014-01-16 03:27:31.507678,780,994,835, 91,2014-01-11 02:48:54.827192,408,284,471, 91,2014-01-20 09:14:46.983652,385,424,869, 91,2014-01-12 11:57:06.089474,220,937,175, 3,2014-01-14 14:10:11.475226,906,112,958, 3,2014-01-20 14:37:40.167816,331,209,249, 91,2014-01-14 00:06:58.395273,255,574,453, 3,2014-01-10 20:41:03.826072,292,490,915, 91,2014-01-13 08:04:38.765118,544,453,415, 91,2014-01-18 06:53:22.567046,582,712,565, 3,2014-01-16 20:12:38.047066,325,286,472, 3,2014-01-11 10:52:08.450523,98,176,449, 3,2014-01-19 16:49:57.36962,731,802,497, 91,2014-01-16 11:53:17.412366,312,39,139, 3,2014-01-18 08:45:17.716198,852,560,969, 91,2014-01-13 10:18:51.182381,859,980,94, 3,2014-01-12 06:55:13.238866,994,908,385, 3,2014-01-14 07:04:00.741614,132,961,754, 91,2014-01-20 20:05:28.708034,679,141,187, 91,2014-01-20 07:44:45.091391,30,336,114, 91,2014-01-12 19:29:16.332749,617,266,220, 91,2014-01-20 14:58:21.054935,281,894,9, 91,2014-01-20 04:53:17.039502,435,160,750, 3,2014-01-14 08:49:02.141585,230,307,412, 91,2014-01-19 01:20:09.484897,976,32,795, 91,2014-01-19 18:22:30.479378,719,224,80, 91,2014-01-18 16:34:12.928291,34,52,164, 3,2014-01-13 21:10:41.751105,502,963,211, 3,2014-01-13 15:10:27.630764,388,383,876, 3,2014-01-13 21:53:00.598982,239,896,815, 3,2014-01-18 08:53:07.790092,221,437,860, 91,2014-01-18 22:37:59.138443,660,49,93, 91,2014-01-14 04:41:55.426174,763,518,408, 91,2014-01-17 12:07:54.773744,356,572,382, 3,2014-01-20 14:35:05.227916,638,759,288, 91,2014-01-17 22:52:06.901399,714,341,702, 91,2014-01-16 20:45:34.941559,463,518,70, 91,2014-01-13 08:42:03.623608,956,625,74, 91,2014-01-17 23:33:10.727471,385,796,273, 91,2014-01-19 08:50:13.706677,800,829,109, 3,2014-01-20 19:35:26.190653,927,800,642, 91,2014-01-13 13:45:48.774955,271,844,110, 3,2014-01-19 23:42:22.157619,941,469,984, 91,2014-01-19 06:25:23.560693,578,760,29, 91,2014-01-19 05:39:06.187321,959,114,101, 3,2014-01-17 02:27:39.873817,42,589,849, 3,2014-01-11 10:44:14.743074,282,188,600, 3,2014-01-20 16:34:19.426186,496,967,639, 91,2014-01-20 13:31:40.61543,634,232,288, 91,2014-01-11 00:49:43.451108,317,483,803, 91,2014-01-19 18:19:04.517996,172,765,473, 91,2014-01-12 03:00:56.948598,751,13,366, 91,2014-01-20 19:48:04.506044,301,243,860, 91,2014-01-18 17:12:21.557498,335,123,667, 3,2014-01-15 15:49:59.090183,552,981,230, 3,2014-01-18 15:08:13.990941,908,903,440, 91,2014-01-17 05:34:52.036876,436,403,803, 3,2014-01-13 02:24:12.018474,317,341,10, 3,2014-01-11 06:18:55.742046,436,93,101, 91,2014-01-20 00:45:19.66764,440,980,452, 91,2014-01-16 04:17:12.617456,787,484,109, 91,2014-01-18 19:12:03.532138,811,485,398, 91,2014-01-17 17:23:18.806501,981,919,878, 91,2014-01-19 12:59:08.04363,308,464,799, 91,2014-01-15 01:19:58.306301,708,771,41, 3,2014-01-17 01:58:24.034417,339,647,321, 91,2014-01-16 05:51:14.945448,789,779,948, 3,2014-01-21 02:05:33.24786,970,297,15, 91,2014-01-19 14:40:27.658025,644,500,302, 91,2014-01-12 23:24:25.672919,421,123,988, 3,2014-01-19 04:55:46.954956,602,579,697, 3,2014-01-19 04:15:53.849098,627,264,6, 3,2014-01-15 17:10:14.447874,127,144,253, 91,2014-01-17 22:38:09.112575,787,966,539, 3,2014-01-17 04:41:17.409791,672,916,751, 3,2014-01-13 16:49:20.635615,5,670,816, 91,2014-01-13 19:05:48.986393,759,193,487, 91,2014-01-17 19:25:32.152865,580,203,430, 91,2014-01-13 03:23:40.061801,629,137,665, 3,2014-01-13 02:37:29.601823,524,540,500, 91,2014-01-18 00:15:15.603972,778,696,210, 91,2014-01-11 15:42:44.579797,229,454,438, 91,2014-01-14 09:38:26.550421,577,939,801, 3,2014-01-17 14:03:20.110016,803,970,99, 100,2014-01-19 12:55:06.137295,492,95,206, 100,2014-01-19 05:28:25.640604,104,65,641, 57,2014-01-14 23:11:34.100956,737,831,264, 100,2014-01-15 12:29:58.17417,975,466,832, 57,2014-01-11 01:52:37.659497,691,369,593, 57,2014-01-15 09:30:29.843002,597,622,386, 57,2014-01-14 07:15:17.842229,74,825,962, 100,2014-01-16 10:09:50.276953,495,711,904, 52,2014-01-12 22:42:33.375559,745,544,229, 57,2014-01-21 04:38:36.64873,243,162,586, 100,2014-01-14 17:08:58.321002,589,323,918, 52,2014-01-17 04:02:31.68186,82,984,248, 52,2014-01-17 00:37:00.683147,14,411,455, 100,2014-01-20 03:00:13.266451,637,121,415, 57,2014-01-17 00:08:04.929721,698,777,928, 57,2014-01-15 09:17:28.341592,484,487,139, 52,2014-01-11 20:24:38.93937,102,188,466, 52,2014-01-14 23:03:51.884677,297,652,259, 57,2014-01-13 07:33:22.190689,625,313,970, 52,2014-01-16 17:34:53.387663,998,681,227, 57,2014-01-11 15:42:00.997253,63,727,851, 52,2014-01-19 16:14:39.600479,484,395,988, 57,2014-01-19 14:24:14.023277,988,730,680, 57,2014-01-14 14:47:49.983645,372,758,203, 57,2014-01-16 05:40:11.753344,921,796,150, 57,2014-01-11 11:47:54.085078,252,730,729, 52,2014-01-16 21:25:19.585886,544,98,377, 100,2014-01-12 23:55:27.357316,343,670,767, 100,2014-01-12 07:30:38.665294,205,721,893, 57,2014-01-20 11:03:46.136307,52,667,635, 52,2014-01-19 17:58:47.467331,228,861,283, 57,2014-01-21 00:25:44.802588,307,639,569, 100,2014-01-18 10:32:11.777331,767,606,440, 52,2014-01-13 11:10:21.568288,320,769,917, 57,2014-01-15 06:55:42.977177,969,124,401, 57,2014-01-18 16:24:49.436828,742,559,728, 57,2014-01-18 00:40:51.41129,974,808,273, 100,2014-01-18 18:11:00.819405,968,737,225, 100,2014-01-13 20:34:39.713781,839,967,479, 57,2014-01-20 03:23:24.786848,669,898,805, 57,2014-01-18 16:49:37.468135,505,273,159, 100,2014-01-17 17:43:38.034783,50,638,490, 57,2014-01-16 01:25:56.468708,975,611,683, 52,2014-01-14 11:26:38.151282,257,346,530, 57,2014-01-16 14:43:51.301234,792,60,714, 52,2014-01-11 19:34:01.398848,932,977,936, 57,2014-01-19 00:44:06.492874,819,337,555, 57,2014-01-17 15:08:30.241023,771,459,531, 52,2014-01-15 22:37:53.283526,246,507,566, 52,2014-01-11 02:17:28.683563,326,565,717, 52,2014-01-13 02:43:23.628686,631,255,675, 52,2014-01-16 18:19:54.281197,196,399,592, 100,2014-01-19 22:32:08.284043,2,384,149, 57,2014-01-11 11:07:37.651964,189,880,871, 52,2014-01-20 13:07:44.264843,638,491,333, 57,2014-01-14 22:12:20.037386,169,110,655, 52,2014-01-12 02:55:31.737042,52,792,139, 57,2014-01-16 04:33:03.850729,889,154,128, 57,2014-01-17 02:07:47.394702,925,815,980, 100,2014-01-13 14:26:10.402335,367,195,711, 52,2014-01-15 09:36:49.987456,247,887,425, 52,2014-01-15 14:06:34.862033,727,139,994, 52,2014-01-13 08:51:55.205342,661,140,653, 52,2014-01-12 14:09:01.764788,927,854,368, 52,2014-01-18 05:16:56.430321,741,618,393, 57,2014-01-17 19:47:38.182519,678,116,99, 57,2014-01-20 10:33:51.201616,452,349,467, 57,2014-01-14 23:57:47.84203,864,724,717, 100,2014-01-15 07:58:39.001925,67,525,689, 52,2014-01-13 01:08:30.915398,5,62,838, 57,2014-01-13 21:21:12.628878,891,938,10, 57,2014-01-12 18:48:16.47006,268,305,790, 57,2014-01-19 22:52:08.9506,941,809,520, 100,2014-01-19 02:47:09.098005,245,754,384, 52,2014-01-16 10:14:54.621343,112,272,159, 57,2014-01-15 07:36:00.251834,46,50,549, 52,2014-01-20 02:21:10.496854,329,385,759, 100,2014-01-14 04:59:01.114635,531,199,963, 100,2014-01-11 16:44:30.492857,394,863,927, 100,2014-01-14 18:59:03.780379,794,541,729, 52,2014-01-14 11:16:29.798199,451,825,980, 100,2014-01-13 23:27:54.121544,159,205,541, 100,2014-01-20 17:18:58.061577,656,435,292, 52,2014-01-13 03:48:38.08266,528,731,886, 100,2014-01-14 14:35:31.773507,58,533,928, 52,2014-01-20 00:26:42.326839,745,960,903, 52,2014-01-17 06:00:58.120046,440,228,449, 100,2014-01-15 21:31:03.510968,254,116,958, 100,2014-01-14 22:59:46.177568,80,797,457, 57,2014-01-13 02:08:45.515168,425,251,555, 57,2014-01-18 15:57:13.913303,784,46,349, 52,2014-01-19 12:36:36.165024,749,557,956, 57,2014-01-17 20:15:20.377201,831,267,974, 57,2014-01-21 04:49:09.118645,866,301,983, 52,2014-01-10 20:45:37.929811,990,640,743, 52,2014-01-14 19:32:16.807942,523,69,625, 52,2014-01-17 18:36:47.301164,802,978,826, 57,2014-01-20 11:19:29.131428,693,444,468, 52,2014-01-18 23:30:04.649972,932,845,226, 57,2014-01-11 20:35:26.303089,873,648,106, 52,2014-01-12 06:28:00.047244,249,176,704, 52,2014-01-11 14:51:17.27885,915,113,752, 52,2014-01-13 19:23:42.773149,260,177,268, 52,2014-01-10 23:20:08.997845,641,611,981, 100,2014-01-19 21:38:26.376855,176,354,140, 57,2014-01-19 09:38:37.39491,221,301,961, 100,2014-01-15 10:56:09.249679,538,683,200, 57,2014-01-18 17:59:36.873709,858,946,867, 52,2014-01-12 08:37:38.509546,581,208,932, 52,2014-01-16 07:40:39.742956,613,442,543, 100,2014-01-12 06:58:40.654507,526,112,615, 57,2014-01-15 23:54:08.307745,770,549,366, 57,2014-01-19 11:08:33.8645,905,710,113, 52,2014-01-11 18:17:38.716084,694,784,436, 52,2014-01-18 07:42:46.390405,899,274,651, 52,2014-01-12 19:44:05.638426,602,593,73, 57,2014-01-20 22:44:21.042922,674,968,405, 57,2014-01-14 07:43:44.51087,168,860,492, 57,2014-01-17 21:15:09.149394,857,586,744, 100,2014-01-14 15:44:07.220909,192,350,437, 52,2014-01-16 13:52:30.026038,300,41,801, 52,2014-01-15 07:12:23.062697,844,129,441, 100,2014-01-12 22:20:43.385933,674,401,988, 57,2014-01-13 04:07:50.403239,448,714,11, 57,2014-01-15 06:01:32.18176,869,904,288, 52,2014-01-12 23:06:21.316165,772,744,762, 52,2014-01-17 03:40:07.21118,616,901,755, 100,2014-01-11 18:38:19.482924,167,385,237, 57,2014-01-12 04:25:50.447592,633,790,164, 57,2014-01-14 21:10:11.861152,624,537,739, 100,2014-01-12 19:13:45.786431,236,373,28, 57,2014-01-13 14:53:50.494836,11,684,969, 57,2014-01-15 16:01:29.140421,270,653,930, 52,2014-01-15 18:37:28.55958,730,926,912, 57,2014-01-13 18:13:59.49268,404,326,486, 100,2014-01-14 07:39:55.517275,86,258,724, 57,2014-01-13 22:16:05.954491,38,257,933, 57,2014-01-18 09:10:08.742779,768,366,310, 52,2014-01-11 19:39:32.391403,41,500,774, 52,2014-01-15 10:53:44.464036,399,911,396, 100,2014-01-13 10:50:48.485734,249,203,999, 52,2014-01-13 06:38:13.04591,950,222,139, 57,2014-01-20 14:47:52.7722,339,75,454, 52,2014-01-11 15:59:22.350234,600,636,385, 52,2014-01-14 04:55:51.761995,159,670,26, 57,2014-01-17 16:30:39.162605,733,915,175, 57,2014-01-13 15:43:27.98866,880,920,554, 52,2014-01-13 21:40:12.805336,466,521,934, 57,2014-01-21 04:14:23.673475,396,526,279, 57,2014-01-20 16:58:37.138054,837,987,116, 57,2014-01-17 17:24:20.026246,367,844,370, 100,2014-01-16 12:06:53.699513,767,261,213, 57,2014-01-14 18:11:00.20117,33,711,192, 52,2014-01-19 19:45:03.412179,776,933,127, 52,2014-01-18 20:14:44.103171,217,4,414, 100,2014-01-19 04:12:32.38293,681,233,881, 57,2014-01-11 12:51:24.728343,552,783,847, 52,2014-01-14 17:54:51.314351,188,357,304, 57,2014-01-20 02:50:51.865854,199,698,61, 100,2014-01-12 21:17:15.176354,209,577,438, 100,2014-01-18 19:05:03.175922,180,138,228, 52,2014-01-11 16:04:37.652286,87,116,275, 57,2014-01-15 16:11:05.67119,820,298,819, 57,2014-01-13 05:06:32.316343,242,669,905, 57,2014-01-16 14:42:06.212285,272,38,849, 57,2014-01-18 14:25:05.16429,323,646,879, 57,2014-01-16 16:46:27.833618,707,477,558, 57,2014-01-19 16:58:12.166775,163,598,481, 52,2014-01-15 17:35:13.907068,602,808,377, 52,2014-01-17 16:26:00.225022,433,337,617, 57,2014-01-13 21:51:56.631231,550,489,182, 100,2014-01-18 08:26:02.392803,420,401,614, 100,2014-01-20 03:04:13.270932,263,764,828, 57,2014-01-13 06:39:49.403431,137,197,60, 52,2014-01-19 04:25:24.743037,697,57,525, 57,2014-01-13 02:00:05.033346,105,696,344, 57,2014-01-20 03:10:28.566036,4,32,374, 100,2014-01-11 14:08:52.732117,975,511,621, 52,2014-01-16 03:40:17.838682,467,359,726, 57,2014-01-20 01:24:52.239931,405,215,790, 52,2014-01-11 01:07:29.870171,698,767,948, 100,2014-01-12 15:04:37.864928,842,694,337, 52,2014-01-15 07:31:45.939444,185,299,601, 57,2014-01-14 23:18:30.071341,20,674,759, 57,2014-01-11 04:49:41.277107,740,422,252, 100,2014-01-12 21:46:30.042615,529,242,391, 100,2014-01-12 21:31:02.454271,297,40,654, 57,2014-01-11 21:41:06.005869,568,97,590, 57,2014-01-16 19:41:21.357385,642,578,158, 57,2014-01-11 18:21:54.320545,404,392,761, 57,2014-01-13 22:58:21.182223,501,446,874, 57,2014-01-14 08:46:24.533307,238,165,370, 57,2014-01-12 14:26:31.972831,184,665,537, 100,2014-01-18 19:09:26.853413,927,971,654, 57,2014-01-16 17:52:56.84965,797,358,477, 57,2014-01-18 09:08:38.185116,350,645,384, 57,2014-01-16 03:34:32.017702,978,16,447, 52,2014-01-12 04:39:24.683268,369,57,351, 100,2014-01-16 20:06:50.35231,603,362,539, 52,2014-01-21 01:09:45.924771,199,331,616, 57,2014-01-19 01:05:24.908708,112,312,841, 100,2014-01-19 21:58:46.983542,608,667,14, 57,2014-01-18 03:35:48.317574,716,19,568, 57,2014-01-17 22:09:38.179641,826,200,449, 52,2014-01-11 22:38:36.386973,298,208,358, 100,2014-01-16 02:01:13.06103,295,348,184, 52,2014-01-20 04:09:43.212986,654,89,726, 100,2014-01-21 05:49:04.953009,282,731,300, 100,2014-01-16 20:10:05.793147,560,625,848, 100,2014-01-15 01:50:01.05167,430,640,989, 57,2014-01-20 01:13:57.192941,450,117,60, 57,2014-01-17 01:17:35.663318,958,321,388, 57,2014-01-16 20:54:38.916598,128,416,631, 52,2014-01-14 02:45:43.132804,978,266,751, 52,2014-01-21 04:36:05.735078,451,787,130, 57,2014-01-17 05:44:20.194124,835,221,174, 57,2014-01-16 04:32:13.17542,426,996,545, 100,2014-01-12 20:07:08.189349,321,329,361, 52,2014-01-18 07:25:26.199993,177,737,642, 57,2014-01-11 18:13:34.532804,388,439,186, 52,2014-01-13 18:29:02.509894,704,789,697, 52,2014-01-11 00:23:47.382217,831,506,541, 100,2014-01-15 06:59:59.066111,505,437,409, 57,2014-01-15 12:55:34.483144,593,102,426, 57,2014-01-18 23:33:14.235629,738,898,302, 57,2014-01-21 04:52:55.915761,364,65,190, 52,2014-01-12 16:04:12.66826,499,607,775, 100,2014-01-13 21:07:46.253751,828,959,456, 52,2014-01-12 03:32:20.599449,67,745,287, 100,2014-01-18 16:11:03.020571,56,644,97, 52,2014-01-13 16:17:32.178522,972,223,125, 52,2014-01-20 08:37:45.508157,753,538,469, 52,2014-01-17 08:09:50.004112,30,759,698, 57,2014-01-20 23:53:20.496515,140,565,821, 57,2014-01-17 00:47:23.687798,886,540,439, 100,2014-01-18 10:48:26.391085,454,439,307, 52,2014-01-15 19:54:33.557544,703,389,565, 52,2014-01-15 22:57:06.901204,839,921,722, 57,2014-01-18 10:37:31.344515,59,967,699, 52,2014-01-11 10:04:39.350691,477,734,905, 57,2014-01-20 04:47:07.952873,941,8,892, 52,2014-01-17 05:51:28.537455,424,230,37, 52,2014-01-17 15:22:15.014008,238,131,587, 57,2014-01-15 19:41:46.869891,760,106,999, 62,2014-01-20 12:37:11.670259,112,267,95, 99,2014-01-17 13:58:20.749086,259,699,251, 99,2014-01-11 19:26:07.064015,744,237,94, 99,2014-01-15 11:09:24.846098,458,478,185, 99,2014-01-19 14:05:28.938855,892,78,761, 99,2014-01-19 06:51:33.264007,910,546,205, 62,2014-01-16 09:22:59.378902,14,593,250, 62,2014-01-12 02:34:31.448749,497,16,914, 99,2014-01-19 11:15:20.646223,412,9,148, 62,2014-01-20 20:22:58.445433,335,603,894, 99,2014-01-16 19:30:05.931893,511,20,609, 99,2014-01-17 04:48:34.881853,337,51,235, 99,2014-01-20 19:15:54.361195,62,418,918, 62,2014-01-18 15:34:18.600572,845,616,682, 62,2014-01-20 05:56:21.135603,327,959,609, 62,2014-01-11 16:48:14.960644,205,270,672, 62,2014-01-16 01:45:21.621143,84,195,259, 62,2014-01-12 16:13:53.314544,43,419,790, 99,2014-01-21 05:26:00.468801,753,658,174, 62,2014-01-12 03:55:11.625674,171,151,803, 62,2014-01-12 10:07:13.811058,675,729,788, 99,2014-01-17 00:13:24.157018,455,801,661, 62,2014-01-19 18:43:38.016759,846,533,309, 62,2014-01-17 10:01:47.368602,510,142,437, 99,2014-01-11 17:10:31.058221,961,938,133, 99,2014-01-16 23:21:09.361274,43,357,502, 99,2014-01-18 00:06:26.719293,876,685,150, 62,2014-01-21 01:57:39.011095,757,684,715, 99,2014-01-12 23:12:37.899148,751,386,547, 62,2014-01-14 09:28:41.733302,13,445,315, 62,2014-01-15 15:39:25.880984,422,579,509, 99,2014-01-12 05:49:49.569407,841,675,46, 99,2014-01-21 03:27:02.482807,618,31,22, 62,2014-01-16 15:05:32.944999,444,515,960, 62,2014-01-16 20:59:14.415506,468,166,218, 99,2014-01-19 11:32:31.421662,247,129,82, 99,2014-01-14 15:57:06.209006,749,139,556, 99,2014-01-12 00:22:13.794827,312,756,956, 62,2014-01-20 18:07:02.870411,473,912,590, 99,2014-01-12 21:12:26.855229,218,351,323, 62,2014-01-11 14:45:12.24026,533,170,399, 99,2014-01-16 07:37:08.802724,205,192,828, 99,2014-01-19 06:01:01.737501,361,699,364, 99,2014-01-11 04:01:06.601456,752,768,277, 99,2014-01-13 17:11:27.403296,64,581,216, 99,2014-01-18 05:57:22.873104,811,645,277, 99,2014-01-12 00:49:21.562519,847,907,393, 99,2014-01-18 22:56:49.024439,175,65,252, 62,2014-01-20 02:11:16.187649,146,217,137, 99,2014-01-12 00:15:22.03132,67,732,740, 62,2014-01-20 06:52:00.650112,774,283,968, 62,2014-01-11 06:18:29.998706,90,857,802, 99,2014-01-18 04:28:46.188337,137,87,441, 99,2014-01-18 19:35:38.667291,904,695,84, 99,2014-01-17 20:20:17.367309,373,49,655, 62,2014-01-19 01:39:17.246558,130,739,458, 62,2014-01-14 12:56:48.028565,837,957,926, 99,2014-01-14 14:26:44.086602,263,417,392, 62,2014-01-18 21:36:56.258072,967,131,200, 99,2014-01-16 16:09:25.927712,683,459,374, 62,2014-01-18 03:55:25.820323,387,805,520, 62,2014-01-13 07:16:22.139057,284,335,700, 62,2014-01-18 01:59:15.497539,931,368,797, 62,2014-01-18 01:22:28.30313,27,60,871, 99,2014-01-19 02:21:55.084475,403,749,464, 62,2014-01-13 16:58:29.913045,451,175,383, 62,2014-01-16 06:54:59.724793,271,483,399, 62,2014-01-12 12:19:01.293481,394,418,509, 62,2014-01-14 21:20:43.707872,840,237,570, 62,2014-01-15 19:02:02.595204,266,117,895, 99,2014-01-14 13:31:28.149129,265,738,567, 62,2014-01-15 16:05:55.948133,490,522,53, 99,2014-01-10 20:39:11.397928,695,851,216, 99,2014-01-12 13:32:29.886631,359,257,416, 99,2014-01-16 00:07:02.550008,547,545,302, 99,2014-01-13 17:50:38.801223,908,277,13, 99,2014-01-19 23:29:52.759735,690,62,545, 62,2014-01-17 04:56:24.112527,964,678,33, 62,2014-01-11 10:47:27.826081,420,137,328, 99,2014-01-18 19:41:08.947803,480,75,396, 62,2014-01-18 09:11:46.143847,89,427,837, 99,2014-01-20 21:39:28.476193,355,240,946, 99,2014-01-19 05:59:38.94651,154,325,30, 99,2014-01-15 07:40:32.332602,13,822,980, 62,2014-01-18 11:15:07.143483,432,274,939, 62,2014-01-15 20:19:56.314238,859,824,843, 62,2014-01-13 03:19:49.57916,178,806,987, 62,2014-01-11 18:11:55.172467,156,588,617, 99,2014-01-19 14:46:40.400841,350,101,289, 62,2014-01-21 04:56:45.837233,197,352,375, 62,2014-01-17 14:21:21.100179,646,700,845, 62,2014-01-16 12:42:06.221164,162,25,402, 99,2014-01-13 06:13:07.976709,504,450,406, 62,2014-01-14 10:20:36.597719,221,622,759, 99,2014-01-13 08:04:34.524294,47,333,920, 62,2014-01-11 02:35:03.061184,892,661,699, 99,2014-01-17 07:19:06.511182,705,37,615, 99,2014-01-18 18:59:34.249086,513,42,811, 99,2014-01-12 18:54:04.354665,758,370,49, 62,2014-01-10 22:39:50.219054,33,875,902, 99,2014-01-13 08:01:36.84471,788,473,255, 62,2014-01-16 12:25:32.503795,327,312,661, 62,2014-01-15 18:45:46.167606,696,440,895, 62,2014-01-13 08:40:48.242639,83,583,741, 62,2014-01-18 05:58:02.390425,379,954,897, 99,2014-01-20 22:52:48.717615,528,307,730, 62,2014-01-16 06:31:27.044281,546,380,996, 62,2014-01-16 23:27:55.15058,22,429,777, 99,2014-01-16 21:49:12.830142,347,493,715, 62,2014-01-16 21:18:54.870361,6,60,201, 99,2014-01-14 13:09:04.098383,995,107,178, 62,2014-01-14 01:00:58.973988,421,864,68, 62,2014-01-16 12:58:23.346554,342,212,345, 62,2014-01-12 13:08:43.044893,352,508,958, 62,2014-01-18 12:41:31.306591,316,158,774, 62,2014-01-13 18:13:30.490457,528,402,687, 99,2014-01-17 13:28:39.359131,441,446,284, 99,2014-01-20 20:01:20.88617,437,373,182, 99,2014-01-14 16:25:25.662924,418,333,536, 62,2014-01-15 22:15:19.760391,336,927,517, 62,2014-01-20 18:58:06.723403,8,865,709, 62,2014-01-11 00:46:46.763103,120,153,267, 62,2014-01-11 04:57:25.981553,158,634,842, 62,2014-01-12 19:11:14.700112,792,918,971, 99,2014-01-14 15:07:23.361241,912,910,364, 99,2014-01-13 17:02:00.506267,712,846,851, 62,2014-01-13 01:46:17.761296,262,661,16, 99,2014-01-21 02:26:29.872423,152,935,965, 99,2014-01-11 06:01:34.755353,554,472,588, 62,2014-01-15 00:40:22.11638,334,151,682, 62,2014-01-21 05:06:20.091477,707,170,877, 62,2014-01-13 18:03:11.600064,432,488,790, 62,2014-01-20 17:05:54.620175,132,656,38, 62,2014-01-15 17:52:06.259084,442,632,262, 62,2014-01-16 06:43:59.843121,711,182,587, 62,2014-01-17 17:03:57.011019,38,954,980, 99,2014-01-15 10:44:54.976699,513,199,442, 62,2014-01-11 07:15:26.887402,35,157,473, 99,2014-01-13 10:31:52.161599,150,421,342, 99,2014-01-11 02:34:07.80684,926,24,465, 62,2014-01-17 08:34:21.757764,584,307,185, 62,2014-01-17 03:40:56.259982,588,50,618, 62,2014-01-14 07:35:06.781247,12,53,871, 99,2014-01-12 15:32:45.104318,409,228,532, 99,2014-01-18 20:49:39.304875,112,814,676, 99,2014-01-11 14:16:38.087837,414,610,573, 62,2014-01-15 13:46:15.594775,191,52,414, 99,2014-01-15 04:18:18.664426,272,522,423, 62,2014-01-11 04:17:58.974007,531,577,3, 62,2014-01-19 10:11:41.2577,299,960,485, 99,2014-01-20 06:33:38.424816,568,978,257, 99,2014-01-21 03:16:05.69783,514,839,901, 99,2014-01-19 14:58:28.021223,2,223,22, 99,2014-01-20 15:31:04.40637,123,852,245, 99,2014-01-12 16:27:20.397942,619,848,31, 62,2014-01-13 00:05:51.382464,522,675,59, 62,2014-01-13 02:33:04.912636,820,368,733, 99,2014-01-14 22:13:38.159657,228,410,809, 62,2014-01-12 20:32:21.254886,450,697,690, 62,2014-01-13 12:34:39.668408,220,41,678, 62,2014-01-19 02:54:00.276038,649,749,190, 62,2014-01-12 19:38:41.346365,260,811,277, 62,2014-01-16 10:37:51.268473,188,514,685, 99,2014-01-18 13:59:54.896212,260,505,284, 62,2014-01-17 17:30:47.605868,386,984,786, 62,2014-01-11 11:21:51.111594,595,519,305, 62,2014-01-15 01:03:51.907231,709,495,580, 62,2014-01-11 22:15:42.582566,166,248,722, 62,2014-01-11 22:37:17.998996,547,990,274, 62,2014-01-17 15:35:44.06883,148,877,914, 62,2014-01-12 04:49:50.389825,991,758,90, 99,2014-01-18 11:11:39.756759,966,150,238, 99,2014-01-13 13:16:40.328813,362,945,252, 99,2014-01-15 16:24:57.171072,850,590,724, 62,2014-01-20 06:44:24.861496,385,302,945, 62,2014-01-11 04:06:19.634107,629,473,343, 99,2014-01-16 10:41:35.258909,910,413,269, 62,2014-01-14 14:30:40.456272,236,571,588, 62,2014-01-15 12:24:38.298533,807,415,215, 99,2014-01-16 18:59:34.233335,482,668,475, 99,2014-01-12 18:42:21.714391,287,283,831, 62,2014-01-14 12:58:16.723349,987,753,879, 99,2014-01-16 16:15:39.931166,767,797,14, 62,2014-01-11 03:40:49.735613,893,913,74, 62,2014-01-13 22:29:21.129719,990,770,23, 99,2014-01-18 12:43:00.329107,544,78,259, 99,2014-01-13 07:46:41.118076,922,896,954, 99,2014-01-16 05:02:26.042774,947,578,687, 62,2014-01-12 04:56:38.488765,459,420,712, 99,2014-01-15 08:19:02.372963,115,49,55, 99,2014-01-18 21:37:05.711182,312,947,193, 99,2014-01-20 11:50:38.764802,557,271,982, 99,2014-01-17 07:57:43.719327,840,33,754, 62,2014-01-14 02:14:56.979655,563,591,625, 62,2014-01-17 19:50:33.661433,145,990,245, 62,2014-01-13 19:28:31.325195,257,259,838, 99,2014-01-14 17:36:48.091668,204,907,326, 62,2014-01-11 14:54:25.568665,488,978,860, 99,2014-01-14 21:44:13.908179,739,310,245, 62,2014-01-15 20:14:06.090664,566,137,340, 62,2014-01-18 10:30:09.637915,590,462,746, 99,2014-01-16 06:34:04.298004,323,344,249, 99,2014-01-13 01:25:45.847842,379,577,751, 62,2014-01-21 03:46:49.966727,216,441,480, 99,2014-01-21 02:59:01.469076,496,141,861, 46,2014-01-12 02:10:10.709338,42,15,377, 97,2014-01-11 01:53:09.600834,286,374,890, 46,2014-01-16 07:40:36.727985,826,362,467, 46,2014-01-15 20:40:51.165609,499,333,930, 46,2014-01-15 18:17:47.899368,42,769,786, 28,2014-01-11 20:40:10.961321,368,24,949, 28,2014-01-17 19:23:12.879999,515,342,206, 97,2014-01-19 11:16:04.622717,50,745,132, 28,2014-01-17 16:55:50.892486,414,824,541, 28,2014-01-18 03:04:02.615612,519,550,650, 46,2014-01-12 03:45:25.752854,719,70,491, 46,2014-01-14 23:38:51.222013,335,762,582, 28,2014-01-20 14:50:43.733689,847,920,360, 97,2014-01-17 12:47:51.795628,629,690,531, 97,2014-01-16 08:35:29.710778,757,681,338, 28,2014-01-14 17:09:46.106651,925,421,32, 97,2014-01-15 04:24:57.50681,960,75,446, 97,2014-01-15 14:26:08.475999,31,568,32, 46,2014-01-21 05:07:29.826397,654,415,434, 28,2014-01-18 10:39:54.486466,236,919,968, 46,2014-01-12 04:16:42.137433,67,425,323, 46,2014-01-13 18:36:01.151592,361,471,603, 97,2014-01-11 22:16:42.578133,278,928,65, 46,2014-01-12 23:11:07.706098,163,621,278, 46,2014-01-17 20:20:15.059771,405,769,526, 46,2014-01-16 22:30:48.669216,824,272,268, 97,2014-01-20 13:41:17.344013,62,113,726, 97,2014-01-12 20:54:19.357775,146,432,165, 46,2014-01-19 03:56:34.517057,961,89,559, 28,2014-01-20 11:28:07.311159,953,766,293, 46,2014-01-12 17:53:20.826852,150,748,266, 28,2014-01-20 10:06:45.226395,790,152,895, 46,2014-01-20 17:21:16.911994,150,998,833, 46,2014-01-18 05:33:57.555256,548,52,310, 46,2014-01-15 00:47:36.392003,241,41,419, 46,2014-01-15 05:39:04.81178,629,985,811, 46,2014-01-19 06:14:08.516577,532,316,321, 46,2014-01-11 14:10:49.272422,274,533,109, 46,2014-01-13 10:55:09.434497,227,87,214, 28,2014-01-15 17:09:59.409482,547,307,233, 97,2014-01-18 21:14:51.888034,372,118,846, 28,2014-01-14 18:40:35.187351,834,721,249, 46,2014-01-19 20:48:50.631496,198,583,289, 28,2014-01-18 06:05:35.622142,917,150,339, 97,2014-01-11 01:28:26.982979,464,859,883, 46,2014-01-14 23:24:20.342274,190,134,511, 46,2014-01-11 17:15:21.728793,202,608,950, 46,2014-01-15 09:53:24.489791,306,939,246, 46,2014-01-19 17:50:28.818273,340,890,659, 46,2014-01-11 16:22:51.555191,282,357,932, 46,2014-01-12 14:33:18.976675,223,947,596, 46,2014-01-21 02:07:10.955706,630,918,470, 28,2014-01-14 14:58:52.707201,167,653,211, 97,2014-01-13 16:50:01.554808,940,210,876, 28,2014-01-12 19:18:18.662223,570,327,33, 28,2014-01-11 05:19:07.766972,910,782,719, 97,2014-01-19 19:20:50.224024,964,607,933, 46,2014-01-12 02:59:36.006235,138,62,82, 46,2014-01-13 06:13:27.125167,984,983,587, 28,2014-01-17 17:17:24.741081,677,47,55, 46,2014-01-11 08:27:43.317395,287,419,401, 28,2014-01-15 04:06:47.952019,523,360,741, 28,2014-01-16 21:24:09.967476,850,249,191, 46,2014-01-21 05:49:00.229807,777,230,729, 28,2014-01-12 03:40:45.507275,438,476,741, 28,2014-01-21 02:11:46.359898,378,792,348, 97,2014-01-15 05:28:05.041588,935,549,237, 46,2014-01-20 13:54:54.024271,678,510,413, 46,2014-01-11 10:22:35.632321,692,482,454, 97,2014-01-17 20:23:14.476084,888,674,900, 97,2014-01-15 01:04:53.433753,739,271,649, 28,2014-01-19 11:37:27.520355,88,281,894, 46,2014-01-11 09:03:49.663855,825,956,177, 28,2014-01-13 15:53:44.065249,505,61,487, 28,2014-01-16 11:43:03.142497,312,640,938, 28,2014-01-11 14:32:16.646834,277,39,766, 46,2014-01-17 19:18:04.407523,233,475,721, 97,2014-01-17 08:58:24.871709,397,285,100, 28,2014-01-16 04:25:41.137044,100,581,860, 28,2014-01-16 07:08:33.226215,170,584,544, 28,2014-01-18 05:21:16.426899,412,406,559, 28,2014-01-17 22:59:00.113719,227,987,728, 46,2014-01-16 03:15:44.18192,278,806,194, 97,2014-01-11 14:20:09.134101,729,446,882, 46,2014-01-20 19:49:01.668527,373,174,162, 28,2014-01-18 02:34:02.844143,773,897,448, 46,2014-01-11 23:39:16.901492,950,831,894, 28,2014-01-19 09:09:51.892971,394,279,928, 46,2014-01-19 09:33:38.850798,224,309,264, 46,2014-01-14 09:52:44.026659,55,377,692, 28,2014-01-15 20:27:16.634052,638,614,783, 46,2014-01-19 22:01:22.168193,791,296,854, 28,2014-01-18 17:59:31.978677,156,837,650, 28,2014-01-11 11:51:26.601947,220,255,747, 28,2014-01-19 21:50:22.398,828,917,400, 46,2014-01-20 01:40:17.486371,318,544,767, 46,2014-01-11 08:03:12.961845,93,177,709, 28,2014-01-13 21:18:27.439588,764,46,221, 46,2014-01-19 09:35:11.510642,183,854,828, 28,2014-01-11 22:25:48.594166,406,999,296, 97,2014-01-20 21:41:41.916092,62,804,239, 97,2014-01-13 04:40:04.944395,581,663,85, 97,2014-01-20 14:03:16.11452,258,886,181, 28,2014-01-11 00:45:31.579947,850,209,516, 97,2014-01-16 00:33:49.009645,599,112,742, 97,2014-01-15 19:46:19.257435,92,541,393, 97,2014-01-11 19:17:48.226781,787,584,119, 97,2014-01-12 13:51:53.417167,317,388,205, 97,2014-01-11 22:44:44.129145,848,686,924, 28,2014-01-17 03:43:29.363826,40,408,837, 28,2014-01-18 01:00:26.643381,612,950,822, 28,2014-01-19 08:06:00.556044,841,470,476, 97,2014-01-13 20:42:29.477545,435,350,494, 97,2014-01-12 22:16:10.825721,39,83,40, 46,2014-01-20 02:26:09.690564,64,234,178, 46,2014-01-13 10:31:31.146492,122,900,978, 28,2014-01-20 04:50:13.669864,711,765,108, 46,2014-01-10 20:05:26.592056,346,272,416, 46,2014-01-11 13:41:23.039462,803,137,239, 46,2014-01-18 08:23:52.520661,69,484,848, 28,2014-01-14 09:58:10.618715,603,972,333, 46,2014-01-16 14:08:39.673514,874,781,555, 28,2014-01-17 00:25:14.689273,964,62,984, 46,2014-01-11 21:57:42.597393,918,746,458, 97,2014-01-17 06:00:06.27546,998,384,859, 28,2014-01-15 16:15:37.087273,159,297,930, 46,2014-01-21 03:38:00.083764,125,503,576, 28,2014-01-17 18:03:19.237305,177,272,290, 97,2014-01-18 17:34:04.527279,56,149,796, 97,2014-01-19 07:13:11.594405,837,118,992, 46,2014-01-18 20:29:07.831471,278,124,187, 28,2014-01-18 09:15:46.443371,315,179,988, 46,2014-01-11 05:53:16.538801,298,881,761, 28,2014-01-18 04:32:23.946409,401,717,345, 97,2014-01-18 14:01:18.023318,869,43,494, 28,2014-01-16 10:27:05.548445,377,173,575, 28,2014-01-12 18:18:43.203846,2,395,60, 28,2014-01-19 13:19:06.250099,225,275,412, 46,2014-01-18 04:18:58.966031,432,401,105, 28,2014-01-13 21:03:27.33299,545,698,913, 46,2014-01-15 11:02:35.614344,603,950,352, 97,2014-01-14 23:19:25.609412,686,413,205, 97,2014-01-11 23:09:27.889034,742,858,758, 28,2014-01-18 11:45:05.091889,967,888,980, 97,2014-01-17 01:35:36.435133,275,818,594, 97,2014-01-21 05:35:37.579598,858,503,33, 46,2014-01-21 02:16:36.23838,508,572,872, 97,2014-01-16 00:25:50.105416,482,714,90, 46,2014-01-21 05:41:04.171654,182,535,959, 46,2014-01-11 09:57:59.277843,793,388,759, 28,2014-01-13 02:49:42.626077,267,501,534, 28,2014-01-14 09:39:14.790369,872,227,399, 46,2014-01-17 04:06:38.951776,35,179,40, 46,2014-01-19 07:14:57.31535,286,345,83, 97,2014-01-15 11:36:57.38818,817,202,576, 46,2014-01-13 04:06:45.227236,343,856,515, 28,2014-01-13 17:30:34.403042,422,922,462, 46,2014-01-15 09:14:57.471944,2,407,664, 28,2014-01-20 02:10:04.46496,851,274,159, 28,2014-01-11 05:04:38.930322,959,474,527, 46,2014-01-13 10:28:09.066769,219,798,826, 46,2014-01-17 16:39:12.296431,476,305,10, 28,2014-01-18 18:20:25.373693,70,894,611, 46,2014-01-13 20:21:25.60557,373,529,527, 28,2014-01-14 15:11:36.242841,218,430,457, 28,2014-01-16 02:21:43.39701,509,125,504, 46,2014-01-19 10:48:31.154433,121,668,2, 46,2014-01-16 13:30:19.446685,899,296,172, 46,2014-01-14 09:40:49.64711,635,981,479, 46,2014-01-16 09:07:30.120465,386,623,356, 97,2014-01-19 14:33:46.779676,232,710,469, 97,2014-01-19 00:43:25.261454,836,194,944, 28,2014-01-20 12:26:55.729877,129,153,861, 97,2014-01-20 17:43:14.669129,539,436,343, 46,2014-01-16 06:28:30.353761,668,914,373, 46,2014-01-16 08:02:32.165429,210,191,167, 97,2014-01-20 17:18:52.248727,376,285,374, 46,2014-01-16 02:45:06.592143,828,544,495, 97,2014-01-11 02:28:22.270845,371,266,1, 46,2014-01-20 16:59:56.4208,629,850,881, 46,2014-01-16 16:43:05.870404,806,197,360, 28,2014-01-13 09:18:04.896922,824,15,81, 46,2014-01-13 20:39:11.211169,0,235,475, 28,2014-01-12 14:49:44.822179,358,815,912, 46,2014-01-11 10:33:02.212691,264,568,66, 97,2014-01-18 12:16:08.59935,635,297,430, 97,2014-01-14 22:56:30.049416,127,386,900, 28,2014-01-14 08:03:36.615733,88,844,488, 28,2014-01-12 15:31:06.071713,627,509,489, 46,2014-01-13 19:06:34.514376,674,631,757, 97,2014-01-14 17:08:15.546055,544,571,437, 46,2014-01-15 05:59:15.138482,948,948,700, 97,2014-01-20 15:45:46.811226,658,544,309, 97,2014-01-12 05:28:40.919748,907,816,357, 28,2014-01-18 06:20:40.744051,950,703,107, 28,2014-01-14 00:57:23.054067,435,462,31, 97,2014-01-17 11:50:24.316758,553,57,414, 97,2014-01-16 11:09:11.898485,475,502,579, 97,2014-01-19 18:27:42.501172,62,951,907, 28,2014-01-21 01:31:13.963869,927,59,17, 46,2014-01-11 14:16:42.01895,293,722,474, 97,2014-01-18 03:01:29.280848,848,179,255, 46,2014-01-18 20:14:39.225742,320,999,613, 97,2014-01-10 20:43:37.749246,511,44,491, 46,2014-01-17 14:44:43.949976,970,638,160, 97,2014-01-13 20:43:09.579503,986,352,734, 28,2014-01-16 08:46:09.914675,979,74,381, 97,2014-01-17 02:03:36.199122,521,353,920, 46,2014-01-19 03:28:16.171646,887,742,4, 28,2014-01-16 15:14:32.18552,635,6,432, 28,2014-01-16 13:03:32.619922,219,453,629, 28,2014-01-14 14:11:22.042049,643,292,227, 46,2014-01-19 04:32:37.082441,314,333,832, 46,2014-01-19 13:42:43.831092,910,434,350, 28,2014-01-16 09:01:06.864228,513,109,497, 97,2014-01-19 19:05:39.295132,518,892,125, 97,2014-01-16 19:58:52.43086,726,273,847, 28,2014-01-20 11:57:36.914064,305,188,217, 97,2014-01-12 21:09:15.910866,788,959,375, 28,2014-01-18 15:29:58.502573,458,760,750, 97,2014-01-13 01:04:11.42844,236,336,294, 97,2014-01-16 18:17:31.45692,293,657,62, 46,2014-01-12 15:29:13.641055,817,272,59, 97,2014-01-12 10:49:58.239666,672,835,844, 46,2014-01-18 03:46:12.376668,392,665,928, 46,2014-01-20 01:49:54.385106,929,93,355, 97,2014-01-15 15:47:21.294151,731,220,722, 28,2014-01-14 09:36:36.693426,420,406,755, 97,2014-01-15 06:59:06.28401,525,802,984, 46,2014-01-14 04:15:03.795323,142,625,369, 97,2014-01-14 05:07:50.657714,806,318,330, 46,2014-01-16 01:15:48.30296,104,694,497, 46,2014-01-11 01:16:33.07617,432,928,165, 97,2014-01-11 19:22:29.883454,999,491,210, 46,2014-01-16 01:59:26.052206,804,433,997, 28,2014-01-17 20:01:17.026566,518,947,915, 97,2014-01-14 20:05:39.462958,502,104,76, 28,2014-01-11 04:45:35.96688,276,276,795, 28,2014-01-13 16:04:53.225688,53,391,951, 46,2014-01-12 17:33:55.634603,818,238,370, 46,2014-01-17 00:00:08.1524,697,255,307, 28,2014-01-19 09:08:25.84561,32,638,776, 46,2014-01-16 11:45:17.677072,15,873,974, 97,2014-01-14 22:32:45.23526,830,186,295, 97,2014-01-17 16:51:09.676701,211,873,918, 46,2014-01-11 14:46:24.5413,920,872,970, 97,2014-01-13 11:38:24.530393,800,952,80, 28,2014-01-13 06:50:02.107561,845,960,144, 28,2014-01-10 20:44:01.455364,710,512,622, 28,2014-01-15 12:47:40.441676,264,871,131, 97,2014-01-21 02:20:00.610134,124,12,73, 46,2014-01-13 01:48:12.884222,685,637,851, 28,2014-01-11 01:05:11.898597,364,189,665, 28,2014-01-12 11:49:14.25119,583,468,59, 46,2014-01-14 16:38:11.123886,812,747,622, 28,2014-01-18 08:51:24.275683,388,492,215, 46,2014-01-11 07:39:08.636715,782,719,652, 97,2014-01-19 12:25:32.418038,493,752,295, 28,2014-01-11 12:28:00.969109,164,973,645, 28,2014-01-15 18:38:14.920725,915,429,30, 97,2014-01-12 10:40:36.21336,785,693,143, 97,2014-01-14 21:35:51.626823,373,465,252, 97,2014-01-20 23:54:03.223684,860,288,414, 46,2014-01-12 15:57:09.289949,590,854,777, 97,2014-01-15 20:58:21.510277,365,102,842, 46,2014-01-15 19:53:29.276309,622,446,589, 46,2014-01-19 15:58:26.316515,595,459,843, 28,2014-01-19 21:04:00.973235,476,82,178, 28,2014-01-16 04:39:05.243191,64,574,461, 97,2014-01-12 02:03:19.542202,877,274,864, 46,2014-01-11 08:37:56.608257,669,752,627, 97,2014-01-11 22:39:13.396009,432,293,329, 46,2014-01-20 15:11:45.387812,615,881,277, 97,2014-01-17 00:23:14.285329,547,974,961, 97,2014-01-16 01:11:58.63127,127,293,684, 46,2014-01-17 02:02:55.064514,873,994,521, 46,2014-01-17 19:09:38.82663,344,502,991, 28,2014-01-18 16:50:23.161661,634,941,449, 46,2014-01-19 12:52:57.172494,245,330,157, 97,2014-01-17 19:53:40.282412,242,56,366, 46,2014-01-12 23:38:03.603339,293,257,783, 97,2014-01-19 09:12:57.782628,194,222,402, 28,2014-01-20 01:41:53.166634,26,190,808, 28,2014-01-13 04:43:15.502354,296,37,737, 28,2014-01-21 01:02:12.034236,436,545,70, 97,2014-01-13 12:20:04.290521,390,896,328, 46,2014-01-20 17:34:39.778037,606,912,359, 97,2014-01-11 02:41:20.670951,574,810,753, 28,2014-01-11 18:45:36.708977,704,908,651, 97,2014-01-11 00:02:40.747147,982,992,246, 28,2014-01-16 01:19:35.591676,43,109,700, 46,2014-01-13 09:26:12.922337,452,797,690, 28,2014-01-15 01:38:32.374389,9,81,794, 46,2014-01-15 19:13:38.81536,377,735,804, 97,2014-01-15 05:23:22.212286,449,655,601, 28,2014-01-20 02:36:53.884666,241,713,799, 28,2014-01-20 19:07:08.09167,177,142,593, 46,2014-01-13 15:21:48.528801,227,897,324, 46,2014-01-18 17:40:54.85832,245,964,946, 28,2014-01-15 17:46:13.335279,159,498,205, 28,2014-01-14 21:25:08.071003,677,655,48, 86,2014-01-19 06:18:51.466578,14,712,490, 86,2014-01-16 20:25:26.731289,679,737,683, 86,2014-01-14 06:36:53.458815,834,517,815, 38,2014-01-15 00:42:05.751907,948,226,489, 71,2014-01-11 15:37:25.362558,298,503,28, 71,2014-01-20 12:39:48.523748,699,697,85, 73,2014-01-11 13:51:44.578537,466,47,919, 66,2014-01-19 12:27:48.52422,895,187,360, 66,2014-01-18 19:32:45.696242,352,416,934, 86,2014-01-18 13:45:24.860949,280,770,520, 73,2014-01-11 06:05:52.127977,114,375,2, 38,2014-01-16 22:45:43.47887,894,338,521, 73,2014-01-17 09:27:18.027583,194,653,381, 38,2014-01-13 14:49:07.63033,240,21,618, 66,2014-01-16 17:47:55.513106,914,823,740, 6,2014-01-20 01:47:22.318523,555,120,998, 6,2014-01-13 02:23:47.408368,449,227,785, 71,2014-01-17 00:29:16.184057,919,407,449, 6,2014-01-21 00:32:59.027501,599,57,690, 6,2014-01-13 06:26:28.155515,793,48,854, 71,2014-01-11 19:48:51.775732,245,973,354, 73,2014-01-13 03:59:11.949839,423,704,672, 66,2014-01-17 12:05:00.529903,950,670,57, 71,2014-01-16 19:02:30.591512,88,184,743, 71,2014-01-17 13:22:34.162544,941,845,319, 86,2014-01-16 08:41:54.414988,938,599,608, 66,2014-01-16 08:09:38.683182,388,670,122, 71,2014-01-20 08:44:22.691345,92,627,170, 86,2014-01-13 20:22:49.273308,127,711,641, 71,2014-01-21 05:55:52.018461,815,573,465, 66,2014-01-14 00:09:30.762767,513,636,520, 66,2014-01-11 20:41:40.739885,217,601,895, 86,2014-01-16 20:21:18.749749,858,137,837, 73,2014-01-17 10:46:24.221581,913,428,762, 71,2014-01-16 01:23:46.491794,779,984,515, 71,2014-01-17 15:58:44.112307,930,549,789, 6,2014-01-16 17:26:12.745328,454,35,142, 38,2014-01-16 19:15:31.07033,404,335,419, 73,2014-01-15 22:26:32.636946,571,696,996, 6,2014-01-14 06:58:58.44157,318,316,904, 73,2014-01-14 03:00:55.931699,59,358,979, 86,2014-01-16 08:32:24.764923,792,270,249, 86,2014-01-20 09:44:41.920858,695,572,42, 73,2014-01-20 16:28:13.959282,496,749,884, 73,2014-01-19 03:21:32.395253,307,74,964, 6,2014-01-15 21:32:37.433965,177,294,71, 6,2014-01-19 12:15:36.277805,495,696,534, 66,2014-01-10 23:45:19.803621,91,695,115, 38,2014-01-11 16:01:53.618441,940,90,772, 6,2014-01-19 06:48:35.305306,830,739,958, 38,2014-01-13 10:11:47.959135,928,621,733, 66,2014-01-12 15:50:45.394174,872,910,438, 71,2014-01-11 04:47:47.255564,560,996,271, 38,2014-01-20 02:16:48.489457,810,108,486, 86,2014-01-18 14:53:15.985685,728,808,513, 6,2014-01-17 22:10:21.418107,428,201,925, 71,2014-01-15 04:58:42.904445,63,182,124, 6,2014-01-13 17:02:54.669286,351,97,409, 38,2014-01-17 00:54:44.109452,857,945,485, 86,2014-01-18 05:21:32.177753,553,768,976, 38,2014-01-13 16:58:46.687748,590,510,426, 38,2014-01-20 05:04:14.872218,627,293,158, 6,2014-01-19 06:03:12.917639,626,859,899, 71,2014-01-19 17:20:05.437916,577,307,917, 71,2014-01-16 09:50:39.094218,527,494,387, 38,2014-01-14 01:26:59.409432,107,427,501, 71,2014-01-16 03:18:49.550223,671,275,585, 73,2014-01-11 21:16:51.839546,603,13,133, 73,2014-01-20 00:42:30.479762,621,233,969, 73,2014-01-21 05:45:22.187169,733,451,147, 6,2014-01-15 08:15:50.281535,126,245,292, 38,2014-01-12 21:43:26.411461,419,103,579, 73,2014-01-16 08:17:46.952092,912,32,705, 66,2014-01-14 12:00:32.202394,848,23,636, 73,2014-01-12 08:11:40.371163,597,848,480, 38,2014-01-14 05:39:19.347345,328,900,862, 66,2014-01-19 13:33:09.636358,47,763,606, 66,2014-01-21 00:27:16.648969,643,928,708, 6,2014-01-14 09:24:39.150966,481,391,736, 66,2014-01-20 09:35:03.254799,738,263,612, 71,2014-01-19 01:15:51.954694,940,797,528, 73,2014-01-16 23:36:27.11052,744,801,922, 38,2014-01-11 19:25:48.649392,6,296,755, 86,2014-01-19 10:03:39.210257,454,402,320, 73,2014-01-15 19:53:15.599977,935,429,35, 66,2014-01-19 14:19:04.635077,628,55,938, 6,2014-01-16 02:14:00.628365,389,625,984, 86,2014-01-20 04:51:58.504422,473,334,31, 66,2014-01-12 01:21:59.304365,195,605,974, 71,2014-01-11 21:08:44.737817,551,355,812, 6,2014-01-17 04:13:30.682175,909,842,771, 6,2014-01-14 22:20:45.992531,380,374,344, 6,2014-01-10 20:12:59.610453,948,437,354, 66,2014-01-15 04:16:43.599814,566,914,188, 73,2014-01-13 09:40:51.430447,671,745,432, 86,2014-01-16 10:03:38.704671,46,40,356, 66,2014-01-18 09:43:43.009246,381,925,360, 71,2014-01-18 16:59:40.980671,794,24,358, 38,2014-01-17 11:20:30.544218,169,77,809, 6,2014-01-17 04:26:13.489007,956,133,140, 6,2014-01-18 16:45:03.168258,501,190,374, 73,2014-01-11 13:36:20.825752,831,816,739, 6,2014-01-19 06:09:39.900888,3,908,688, 66,2014-01-13 19:02:50.120351,630,216,258, 6,2014-01-15 05:36:53.028146,411,983,705, 73,2014-01-20 18:21:20.272051,478,51,153, 38,2014-01-17 04:42:09.468115,513,867,249, 86,2014-01-13 13:10:02.664923,411,113,973, 73,2014-01-20 12:48:36.921019,9,975,274, 86,2014-01-20 18:06:48.61908,468,921,71, 73,2014-01-12 16:45:05.919723,135,823,341, 73,2014-01-18 08:04:28.875713,31,383,561, 66,2014-01-16 11:43:15.729601,166,339,253, 73,2014-01-13 16:10:54.569115,1,590,430, 73,2014-01-16 12:08:08.08597,634,271,760, 71,2014-01-11 01:36:31.329578,526,303,151, 38,2014-01-12 00:29:59.204611,912,675,150, 6,2014-01-11 08:22:08.714334,412,211,856, 38,2014-01-20 10:28:29.834,477,668,98, 66,2014-01-13 05:51:58.508975,176,869,441, 6,2014-01-12 09:30:53.45257,461,410,961, 66,2014-01-16 08:42:00.516175,106,469,528, 38,2014-01-17 08:12:44.501506,447,280,948, 86,2014-01-12 09:43:53.063023,463,393,902, 71,2014-01-20 16:58:44.115989,895,403,902, 73,2014-01-19 21:53:35.931953,147,299,505, 38,2014-01-17 23:47:31.767694,157,471,885, 73,2014-01-17 20:42:27.125234,512,273,137, 38,2014-01-17 08:53:16.913043,358,947,390, 66,2014-01-14 05:08:02.311911,46,261,660, 66,2014-01-13 19:08:40.61466,713,383,331, 73,2014-01-14 19:38:20.081301,154,386,207, 73,2014-01-14 22:44:23.138083,783,71,96, 71,2014-01-11 19:18:20.515549,531,706,884, 66,2014-01-17 18:41:10.201652,363,159,368, 38,2014-01-19 08:21:16.166229,179,101,212, 86,2014-01-11 07:39:40.787599,467,716,272, 6,2014-01-13 17:23:19.669767,754,595,908, 38,2014-01-11 15:31:18.831152,862,944,310, 73,2014-01-21 00:28:17.708619,195,409,716, 6,2014-01-13 05:30:08.289267,12,140,618, 66,2014-01-13 12:16:24.751294,142,680,683, 73,2014-01-18 02:32:46.58475,337,612,328, 71,2014-01-18 21:13:24.018867,440,376,996, 6,2014-01-16 08:27:19.320408,305,407,275, 71,2014-01-13 08:40:54.67072,670,548,17, 66,2014-01-19 02:49:55.348864,454,823,645, 86,2014-01-17 12:57:18.525019,811,778,955, 71,2014-01-13 21:03:03.385474,727,928,182, 86,2014-01-18 07:18:25.182865,975,974,702, 38,2014-01-19 22:49:17.034414,789,480,299, 6,2014-01-15 10:33:56.838044,386,303,278, 86,2014-01-13 10:00:25.699458,68,838,765, 73,2014-01-16 01:02:01.53592,408,618,885, 73,2014-01-17 00:17:49.901067,714,514,845, 38,2014-01-12 20:59:09.815447,767,532,746, 86,2014-01-14 16:55:37.468292,707,560,816, 71,2014-01-13 18:05:21.66876,342,571,183, 73,2014-01-19 21:41:36.9411,525,718,52, 86,2014-01-21 05:48:54.381334,49,194,475, 73,2014-01-17 03:13:23.980672,100,420,51, 38,2014-01-12 10:50:17.555761,226,245,624, 66,2014-01-14 19:27:14.462636,31,191,601, 6,2014-01-21 05:57:47.118755,361,938,504, 6,2014-01-13 10:08:38.071309,198,299,224, 73,2014-01-11 08:08:24.664289,709,405,967, 38,2014-01-19 08:19:03.285403,514,860,739, 71,2014-01-13 21:48:18.858909,785,923,679, 71,2014-01-14 05:31:44.334474,491,277,899, 71,2014-01-20 03:50:22.116555,24,892,95, 66,2014-01-13 16:16:36.567527,631,13,378, 66,2014-01-16 15:01:52.623741,307,312,359, 38,2014-01-12 10:06:46.867849,923,754,39, 86,2014-01-20 22:49:23.152277,555,794,272, 6,2014-01-14 06:15:16.642607,390,532,760, 86,2014-01-17 22:34:06.188677,713,979,47, 86,2014-01-13 01:30:17.823417,66,134,683, 71,2014-01-12 03:05:11.991472,84,540,830, 86,2014-01-14 05:31:24.713696,376,184,40, 6,2014-01-16 02:33:21.20889,615,180,983, 71,2014-01-19 14:23:37.174754,367,241,905, 38,2014-01-12 22:20:41.748109,623,941,375, 66,2014-01-20 17:07:18.046933,651,561,41, 86,2014-01-11 18:24:02.874213,849,566,146, 38,2014-01-18 03:22:43.284448,934,140,526, 86,2014-01-16 11:25:07.948,64,871,562, 86,2014-01-13 19:23:12.689659,385,99,123, 66,2014-01-11 14:18:20.75274,891,384,904, 73,2014-01-19 09:30:29.61676,422,74,873, 73,2014-01-12 11:04:49.630759,7,292,225, 66,2014-01-11 14:07:15.134074,575,470,736, 71,2014-01-15 06:43:53.597433,683,537,214, 86,2014-01-14 01:55:07.186521,445,580,841, 71,2014-01-15 13:34:29.59671,205,946,457, 38,2014-01-15 06:41:40.716607,547,491,687, 38,2014-01-16 16:03:45.25783,659,399,10, 6,2014-01-16 01:42:54.260999,559,897,423, 73,2014-01-13 09:00:44.002011,571,849,827, 86,2014-01-19 17:52:04.11674,619,52,593, 71,2014-01-19 11:14:38.595473,704,942,208, 86,2014-01-12 06:51:06.118567,117,888,711, 38,2014-01-12 04:08:40.684686,780,120,595, 66,2014-01-14 20:16:31.219213,14,347,655, 66,2014-01-13 20:57:52.986835,568,788,749, 73,2014-01-11 20:58:03.836965,265,949,233, 71,2014-01-18 06:31:47.86224,65,239,550, 38,2014-01-20 23:31:59.175511,389,567,750, 71,2014-01-14 02:28:21.66038,595,262,18, 86,2014-01-19 13:36:59.85371,501,969,5, 38,2014-01-13 23:03:23.889208,320,675,467, 6,2014-01-19 09:01:42.869271,155,637,643, 71,2014-01-17 22:00:37.028466,690,842,862, 73,2014-01-16 01:24:05.637736,445,65,283, 73,2014-01-18 20:09:00.916204,870,944,778, 86,2014-01-18 20:24:39.902678,21,600,640, 6,2014-01-12 22:46:48.922184,261,235,474, 66,2014-01-14 01:34:08.864204,368,659,421, 66,2014-01-11 09:47:52.592757,752,214,546, 86,2014-01-13 17:05:09.674925,573,357,522, 73,2014-01-12 05:04:38.480965,123,52,183, 86,2014-01-13 00:52:42.223517,650,917,596, 66,2014-01-14 11:12:24.808999,319,682,517, 73,2014-01-16 15:48:32.078817,525,905,21, 86,2014-01-16 06:47:49.410457,622,784,722, 71,2014-01-19 04:46:54.405709,558,790,609, 66,2014-01-21 02:30:12.795424,912,372,587, 38,2014-01-11 16:51:34.668288,19,445,955, 6,2014-01-14 00:29:48.666708,468,187,837, 6,2014-01-13 05:30:56.797435,628,555,939, 71,2014-01-20 14:43:38.785447,878,556,453, 73,2014-01-12 05:44:27.2626,702,818,637, 38,2014-01-14 16:22:02.916002,325,283,212, 73,2014-01-11 12:52:19.470133,27,646,861, 6,2014-01-16 06:00:58.481812,569,649,66, 86,2014-01-17 17:19:55.902837,294,661,195, 66,2014-01-12 13:50:23.307098,860,732,527, 71,2014-01-13 06:32:46.344052,816,167,786, 86,2014-01-17 10:51:55.078349,373,244,628, 6,2014-01-15 20:18:44.967897,195,669,348, 73,2014-01-11 14:09:46.198181,169,51,638, 38,2014-01-20 13:55:18.967976,590,255,357, 6,2014-01-13 23:20:27.837168,677,780,131, 73,2014-01-18 16:10:23.226647,601,558,367, 86,2014-01-15 09:19:24.605713,433,145,392, 6,2014-01-11 13:29:28.753791,608,497,297, 66,2014-01-16 06:35:03.129745,579,647,430, 71,2014-01-17 12:06:13.527898,570,557,644, 38,2014-01-14 19:03:37.617994,512,335,382, 86,2014-01-16 20:22:55.722502,409,941,909, 6,2014-01-20 09:11:23.202822,50,112,828, 38,2014-01-16 04:08:16.099378,904,733,765, 38,2014-01-19 01:15:37.946019,218,542,894, 73,2014-01-13 14:23:48.011821,684,485,467, 6,2014-01-11 13:56:48.178195,754,719,483, 71,2014-01-14 02:03:27.356476,567,533,964, 86,2014-01-11 13:10:42.41753,708,131,687, 73,2014-01-11 10:27:00.973619,428,899,910, 73,2014-01-14 22:55:02.025183,434,25,841, 6,2014-01-14 17:40:31.084239,721,74,567, 38,2014-01-13 19:57:57.771054,836,429,843, 86,2014-01-14 07:38:40.81063,212,14,247, 73,2014-01-16 03:24:58.346839,432,592,358, 71,2014-01-17 16:20:00.687057,716,545,656, 73,2014-01-15 00:31:00.280764,887,954,410, 73,2014-01-11 03:25:56.82907,698,833,425, 71,2014-01-14 03:39:56.590314,998,836,839, 73,2014-01-16 18:21:23.587861,902,665,349, 38,2014-01-13 13:58:43.173122,632,618,93, 6,2014-01-20 18:31:51.668663,228,301,34, 73,2014-01-11 08:40:08.555758,680,168,487, 71,2014-01-14 08:08:29.371303,25,593,48, 73,2014-01-20 02:27:10.63664,877,959,633, 6,2014-01-14 12:00:36.393345,552,832,938, 71,2014-01-11 18:18:52.59837,368,702,55, 38,2014-01-14 05:46:35.242352,706,715,646, 86,2014-01-15 05:30:05.147135,655,627,876, 71,2014-01-17 04:53:55.728115,531,729,534, 6,2014-01-20 21:52:48.77025,157,582,602, 86,2014-01-19 04:33:42.76513,894,191,842, 86,2014-01-12 19:16:51.450616,765,104,846, 73,2014-01-20 01:04:11.973072,892,777,634, 6,2014-01-14 02:41:58.864507,437,850,282, 71,2014-01-21 00:32:29.396634,430,120,124, 86,2014-01-12 09:27:59.984893,664,814,566, 6,2014-01-14 20:38:47.042702,321,308,192, 73,2014-01-13 20:35:56.75353,988,281,562, 6,2014-01-13 02:38:42.402424,272,990,590, 71,2014-01-15 11:05:48.016321,982,495,557, 71,2014-01-17 23:30:58.778713,939,224,152, 73,2014-01-17 00:19:13.487081,181,567,10, 38,2014-01-18 11:03:45.787376,182,839,0, 86,2014-01-11 01:09:39.589343,868,847,646, 71,2014-01-11 21:44:11.83392,614,448,689, 6,2014-01-14 21:23:46.474852,384,326,380, 71,2014-01-15 05:40:39.870106,114,220,262, 71,2014-01-12 05:10:08.66299,701,167,894, 66,2014-01-18 05:03:43.065165,755,757,593, 6,2014-01-21 04:02:03.457967,282,370,436, 38,2014-01-14 23:08:51.83653,464,910,842, 86,2014-01-17 17:35:34.733828,434,419,229, 73,2014-01-11 06:42:12.014145,769,465,112, 86,2014-01-18 11:17:21.207834,668,294,716, 66,2014-01-16 14:02:45.370886,559,453,521, 86,2014-01-14 22:42:48.40749,517,695,552, 86,2014-01-19 09:36:13.806203,980,42,497, 66,2014-01-19 23:49:20.613239,69,121,96, 73,2014-01-19 08:12:53.554625,489,879,803, 38,2014-01-15 08:30:09.534318,812,278,467, 71,2014-01-19 15:42:09.383906,542,573,135, 86,2014-01-17 06:46:36.319755,632,122,517, 71,2014-01-17 07:47:00.984933,495,448,531, 6,2014-01-18 10:46:21.356978,218,819,772, 71,2014-01-20 13:28:35.184682,267,303,742, 71,2014-01-17 02:19:30.381987,649,888,682, 66,2014-01-20 00:14:21.342291,574,745,177, 73,2014-01-14 04:07:22.227384,489,106,713, 86,2014-01-20 02:55:27.135518,699,116,216, 38,2014-01-11 20:53:03.744714,437,99,328, 73,2014-01-18 19:13:10.244124,616,800,885, 71,2014-01-16 20:26:25.91423,43,325,87, 38,2014-01-18 01:12:17.231796,530,981,561, 38,2014-01-16 00:16:56.0315,709,228,427, 71,2014-01-17 01:36:05.503573,91,766,814, 86,2014-01-19 02:56:29.066134,254,44,958, 6,2014-01-20 01:40:42.506352,226,830,683, 6,2014-01-11 01:16:45.373679,838,767,377, 86,2014-01-20 12:06:37.729124,533,531,183, 86,2014-01-17 00:44:25.571517,931,569,499, 73,2014-01-10 23:18:48.831646,686,4,633, 73,2014-01-14 05:15:29.565655,860,45,42, 71,2014-01-13 12:20:00.305764,670,796,202, 6,2014-01-11 10:01:00.845791,568,821,608, 71,2014-01-11 10:32:50.773907,164,867,516, 38,2014-01-19 06:22:45.67665,901,518,971, 86,2014-01-15 12:43:49.25328,165,889,280, 66,2014-01-20 00:09:04.580111,20,583,632, 86,2014-01-18 00:12:06.289889,942,72,346, 71,2014-01-13 14:56:42.807906,671,474,377, 86,2014-01-14 02:39:14.114848,774,145,203, 6,2014-01-16 09:54:15.673795,569,41,562, 66,2014-01-20 01:43:19.12808,207,854,779, 6,2014-01-20 14:26:15.099781,825,82,624, 73,2014-01-13 14:40:37.031193,793,737,384, 38,2014-01-19 09:11:54.312762,203,159,438, 86,2014-01-16 10:45:45.481769,342,996,892, 86,2014-01-12 08:54:58.373482,790,733,980, 38,2014-01-12 07:19:16.540146,886,729,670, 38,2014-01-15 07:41:12.617286,789,947,222, 86,2014-01-13 09:48:02.118197,388,814,479, 73,2014-01-20 06:32:26.784271,458,412,329, 86,2014-01-10 22:07:38.531517,621,76,721, 38,2014-01-20 15:00:19.350411,635,58,298, 73,2014-01-15 00:49:22.81619,84,558,508, 6,2014-01-17 03:43:44.03509,714,970,395, 38,2014-01-18 11:56:48.416546,730,262,978, 73,2014-01-13 20:30:05.322542,728,285,223, 86,2014-01-17 09:00:29.408769,178,11,111, 71,2014-01-17 18:03:26.14567,536,937,514, 6,2014-01-20 01:14:30.894478,650,988,429, 71,2014-01-17 12:19:18.240834,382,649,943, 86,2014-01-21 03:18:55.711744,937,317,384, 86,2014-01-12 07:34:31.200243,470,344,249, 71,2014-01-18 02:20:19.086625,328,94,206, 86,2014-01-11 07:51:46.485651,806,762,332, 71,2014-01-20 17:57:16.876893,194,908,854, 38,2014-01-12 09:04:08.340325,241,993,226, 66,2014-01-20 12:00:51.065763,216,337,243, 71,2014-01-17 12:09:23.16678,712,5,75, 66,2014-01-15 04:00:51.14823,324,187,454, 66,2014-01-13 08:40:05.18061,53,842,683, 86,2014-01-12 16:03:38.840995,519,324,113, 73,2014-01-13 23:44:10.27631,351,190,775, 73,2014-01-12 17:06:20.280811,690,636,508, 38,2014-01-15 10:44:15.872186,414,866,569, 38,2014-01-12 02:56:04.589073,128,763,21, 73,2014-01-18 07:52:05.76258,155,201,61, 73,2014-01-16 23:39:14.24567,417,265,118, 86,2014-01-13 14:15:21.129638,535,907,35, 6,2014-01-12 09:33:18.380285,161,508,761, 38,2014-01-19 13:27:16.363867,29,172,623, 73,2014-01-12 14:47:47.913908,4,733,54, 38,2014-01-12 11:40:56.911803,220,526,870, 86,2014-01-18 22:27:35.714277,193,421,186, 73,2014-01-16 19:37:10.730099,81,548,685, 66,2014-01-18 19:24:40.947311,239,972,243, 73,2014-01-16 04:24:24.130823,395,499,552, 86,2014-01-19 20:07:16.053059,884,801,34, 73,2014-01-14 22:25:10.35608,536,234,705, 38,2014-01-12 16:27:50.276912,842,154,608, 73,2014-01-18 15:21:46.947538,147,442,829, 38,2014-01-11 04:44:28.596915,418,548,610, 71,2014-01-12 13:46:45.988656,660,342,931, 86,2014-01-20 02:56:18.14778,135,695,973, 73,2014-01-18 11:04:47.683959,391,514,967, 73,2014-01-13 01:38:32.474307,328,486,186, 6,2014-01-19 14:53:35.024673,310,560,786, 66,2014-01-20 00:08:56.024283,751,200,828, 6,2014-01-12 07:39:23.54007,955,975,782, 73,2014-01-15 21:02:58.191453,873,478,524, 38,2014-01-13 22:09:47.172515,139,45,785, 38,2014-01-14 20:19:28.7211,511,752,163, 66,2014-01-17 17:06:37.032447,304,4,333, 71,2014-01-17 01:53:57.449244,138,209,843, 73,2014-01-16 17:25:49.001911,660,613,86, 86,2014-01-11 21:50:52.904633,476,819,732, 73,2014-01-18 08:50:02.038317,457,783,410, 6,2014-01-13 19:17:54.764492,753,955,857, 71,2014-01-18 01:30:07.150303,195,584,709, 86,2014-01-21 03:05:23.167955,50,406,335, 66,2014-01-15 08:51:13.144777,706,433,85, 71,2014-01-16 08:57:23.514589,52,961,52, 73,2014-01-12 11:53:11.082282,214,637,300, 71,2014-01-17 00:32:10.056999,182,572,15, 71,2014-01-14 01:24:59.228866,99,122,848, 71,2014-01-18 11:30:21.812381,343,973,251, 38,2014-01-14 13:56:51.00431,197,28,464, 86,2014-01-19 09:49:23.360108,436,548,894, 86,2014-01-17 01:37:37.865441,847,173,258, 66,2014-01-18 16:22:01.360392,880,936,918, 71,2014-01-10 20:17:13.637021,764,58,359, 71,2014-01-14 10:59:24.812978,363,878,618, 73,2014-01-19 01:06:29.957725,640,117,304, 71,2014-01-12 14:03:59.625677,771,298,949, 73,2014-01-13 07:55:42.960758,424,248,183, 38,2014-01-16 06:11:17.641684,777,481,429, 71,2014-01-14 19:42:32.099984,532,335,698, 6,2014-01-11 16:49:17.985431,138,658,872, 86,2014-01-15 00:20:13.665968,205,625,807, 71,2014-01-16 07:21:55.640054,970,676,792, 6,2014-01-16 17:52:16.177303,420,216,534, 38,2014-01-18 02:30:00.838482,596,814,977, 86,2014-01-20 07:41:24.361154,440,746,661, 71,2014-01-13 04:58:53.20975,999,612,925, 38,2014-01-13 22:23:58.287726,846,686,566, 38,2014-01-15 19:07:13.363066,263,570,911, 71,2014-01-14 06:49:46.114803,413,839,628, 71,2014-01-11 01:14:00.326461,959,451,391, 71,2014-01-18 18:25:08.127976,879,510,102, 86,2014-01-11 02:19:53.265106,649,70,121, 86,2014-01-10 22:19:23.494417,286,451,190, 73,2014-01-12 21:16:21.272754,497,683,642, 6,2014-01-16 15:17:16.779695,6,978,430, 66,2014-01-12 14:12:34.576699,131,62,285, 86,2014-01-19 01:48:31.329753,367,250,604, 6,2014-01-19 20:42:16.00898,636,546,221, 6,2014-01-20 05:42:56.389499,453,815,177, 38,2014-01-12 09:45:22.334064,465,163,872, 71,2014-01-18 00:31:39.369089,115,287,926, 38,2014-01-16 01:20:34.25494,564,944,397, 66,2014-01-20 06:07:23.694874,115,607,47, 38,2014-01-18 00:48:53.006109,674,847,300, 38,2014-01-19 16:19:59.068336,736,788,118, 71,2014-01-18 01:13:53.6526,98,280,168, 86,2014-01-19 18:52:52.631785,266,985,970, 71,2014-01-11 18:15:42.029096,528,933,386, 38,2014-01-13 01:25:11.294283,842,854,95, 6,2014-01-13 08:35:24.73177,253,866,619, 38,2014-01-12 15:05:00.014946,358,30,932, 86,2014-01-17 05:45:24.960251,764,194,218, 73,2014-01-18 19:57:20.371824,892,455,339, 6,2014-01-18 12:57:16.192248,337,629,825, 6,2014-01-14 02:15:25.798733,456,141,596, 71,2014-01-17 21:38:44.733397,160,679,772, 6,2014-01-20 21:56:09.401999,173,728,950, 38,2014-01-17 04:39:24.086879,184,16,687, 71,2014-01-12 10:45:58.096463,211,374,388, 73,2014-01-13 22:45:55.516801,526,922,451, 71,2014-01-17 09:53:24.413341,233,381,595, 73,2014-01-20 09:11:06.22444,674,51,434, 73,2014-01-14 05:05:48.782327,510,947,893, 71,2014-01-17 12:12:47.907757,373,967,157, 6,2014-01-12 00:27:27.497611,873,141,266, 86,2014-01-20 00:23:05.562021,971,667,927, 66,2014-01-19 06:25:22.484875,662,581,817, 6,2014-01-20 06:15:58.827365,374,65,892, 73,2014-01-18 15:05:21.571001,541,691,155, 86,2014-01-18 06:08:18.874374,316,134,21, 66,2014-01-11 10:01:21.161848,225,920,446, 73,2014-01-15 09:37:00.94009,903,520,746, 73,2014-01-13 01:28:53.129314,961,131,9, 38,2014-01-20 20:08:44.856722,384,572,499, 73,2014-01-12 04:25:53.946198,637,622,144, 38,2014-01-11 11:48:52.197651,199,234,638, 6,2014-01-17 15:22:38.509322,329,183,82, 86,2014-01-21 03:18:46.577983,769,618,225, 38,2014-01-12 10:04:34.227166,30,452,703, 38,2014-01-19 20:47:49.803605,568,641,658, 73,2014-01-13 05:54:11.309753,17,241,517, 38,2014-01-14 05:09:34.24211,613,839,758, 86,2014-01-15 20:33:14.763856,101,678,10, 66,2014-01-21 05:51:31.681997,773,809,874, 66,2014-01-11 12:06:50.434359,286,220,769, 66,2014-01-19 02:48:40.562591,267,671,196, 6,2014-01-17 21:30:16.415394,332,401,1000, 38,2014-01-11 04:02:59.836357,437,603,21, 38,2014-01-15 01:28:04.649469,824,580,459, 6,2014-01-19 12:16:14.511857,145,35,594, 38,2014-01-14 06:48:55.353578,350,903,166, 66,2014-01-11 05:21:29.062809,276,529,443, 66,2014-01-18 15:27:20.736716,353,684,203, 71,2014-01-17 15:54:44.135905,83,129,781, 38,2014-01-17 21:34:16.970986,583,741,882, 71,2014-01-19 19:54:48.234327,374,151,695, 6,2014-01-16 10:17:49.697926,181,617,621, 73,2014-01-15 21:59:39.455861,39,138,2, 73,2014-01-18 20:10:47.061692,947,495,751, 66,2014-01-13 19:23:11.269346,709,187,456, 73,2014-01-12 22:07:58.330235,468,756,952, 71,2014-01-19 10:12:08.22354,393,713,82, 66,2014-01-18 09:00:12.209435,334,14,382, 86,2014-01-15 03:36:51.45955,502,833,75, 73,2014-01-19 00:20:53.080261,916,182,401, 66,2014-01-19 17:26:06.156053,859,713,220, 66,2014-01-15 19:25:43.65762,469,804,530, 73,2014-01-15 09:43:31.589583,94,946,811, 73,2014-01-19 14:44:52.734036,720,207,634, 73,2014-01-17 09:30:17.884785,757,554,421, 66,2014-01-14 00:31:21.393189,56,143,589, 6,2014-01-11 14:39:04.044209,765,732,654, 71,2014-01-20 18:39:52.126896,825,489,51, 71,2014-01-19 01:04:36.157045,523,452,231, 73,2014-01-11 14:30:35.726206,761,771,57, 71,2014-01-11 00:46:42.561255,54,266,485, 66,2014-01-16 21:53:16.720054,752,875,255, 38,2014-01-18 16:00:52.1416,242,141,544, 38,2014-01-11 08:49:42.397614,567,23,471, 38,2014-01-21 03:21:21.369523,827,689,753, 86,2014-01-16 22:17:06.653457,821,25,380, 6,2014-01-14 19:38:37.751192,918,781,376, 6,2014-01-11 02:42:50.432752,906,911,310, 6,2014-01-14 07:44:27.390172,705,757,140, 6,2014-01-11 05:33:21.887097,443,556,932, 38,2014-01-18 04:17:07.403738,835,34,603, 6,2014-01-12 21:39:15.624499,851,736,391, 6,2014-01-16 19:51:11.585442,937,497,310, 6,2014-01-12 20:16:46.859599,958,456,686, 6,2014-01-20 21:50:02.68661,785,664,931, 71,2014-01-19 19:14:22.854789,379,499,604, 73,2014-01-14 22:24:45.189835,699,895,375, 71,2014-01-19 02:02:10.91015,208,597,198, 6,2014-01-16 22:14:35.064224,325,250,145, 71,2014-01-19 06:01:36.649385,493,477,282, 66,2014-01-16 20:23:03.99083,240,920,554, 6,2014-01-15 09:40:41.220276,203,667,789, 71,2014-01-13 19:27:20.307005,899,819,780, 38,2014-01-11 00:06:35.580414,139,480,713, 71,2014-01-13 18:25:33.954732,587,720,848, 38,2014-01-20 08:57:38.191791,739,717,450, 71,2014-01-14 04:37:56.974021,463,361,792, 38,2014-01-14 13:04:37.998942,114,288,673, 66,2014-01-19 21:37:30.318686,559,232,944, 86,2014-01-11 23:42:33.131066,260,462,144, 6,2014-01-15 07:35:13.725148,670,985,239, 86,2014-01-20 02:24:12.879943,514,711,324, 71,2014-01-18 01:35:49.85112,492,41,732, 66,2014-01-12 17:36:05.866748,70,605,734, 86,2014-01-20 15:13:55.277975,113,665,154, 71,2014-01-17 22:57:11.221062,581,870,415, 71,2014-01-18 19:53:12.520204,501,987,368, 71,2014-01-14 04:52:33.029167,767,580,38, 6,2014-01-18 05:40:01.653815,850,343,296, 71,2014-01-11 21:37:39.910796,43,738,458, 86,2014-01-14 14:25:54.916681,782,425,439, 6,2014-01-15 03:57:09.057552,985,235,179, 86,2014-01-13 23:16:55.535295,950,642,510, 73,2014-01-20 14:17:06.502124,504,433,640, 73,2014-01-17 04:13:55.917152,121,600,868, 71,2014-01-13 15:06:58.221905,464,558,75, 6,2014-01-19 03:31:29.356911,401,919,626, 38,2014-01-10 20:38:41.106987,845,339,284, 38,2014-01-11 11:09:09.132474,953,28,349, 86,2014-01-14 19:46:04.421135,44,65,758, 71,2014-01-19 06:40:17.756372,751,501,140, 86,2014-01-17 11:32:13.123304,452,655,297, 73,2014-01-19 09:26:45.64183,968,259,814, 6,2014-01-11 20:07:38.063797,435,985,798, 66,2014-01-17 15:38:48.703717,569,101,715, 6,2014-01-11 21:52:19.596563,264,934,879, 73,2014-01-10 23:05:16.255586,805,391,914, 66,2014-01-21 00:16:45.677738,252,618,551, 73,2014-01-15 14:56:57.595504,467,743,391, 38,2014-01-20 00:42:46.574693,439,690,209, 6,2014-01-11 17:59:18.809222,363,913,576, 38,2014-01-20 02:32:11.320652,752,605,87, 66,2014-01-18 21:06:59.454635,943,836,565, 71,2014-01-18 23:35:08.660342,622,910,603, 73,2014-01-11 14:08:17.187399,139,240,390, 71,2014-01-18 06:20:54.732609,661,804,867, 86,2014-01-15 16:32:19.881404,680,680,615, 86,2014-01-19 14:01:29.707604,82,509,392, 6,2014-01-11 05:13:27.762195,137,661,917, 38,2014-01-12 16:12:21.535219,40,154,435, 66,2014-01-20 15:39:09.6184,500,322,356, 71,2014-01-14 23:39:22.678876,455,149,245, 21,2014-01-17 00:09:30.592771,526,120,821, 70,2014-01-13 08:56:05.154114,179,871,211, 21,2014-01-14 07:56:29.181,960,866,289, 21,2014-01-12 22:23:26.509923,428,610,419, 70,2014-01-16 04:03:03.376019,850,121,765, 70,2014-01-12 05:27:58.537911,178,450,701, 34,2014-01-12 23:02:07.616909,889,913,908, 70,2014-01-16 19:12:12.508493,613,498,200, 70,2014-01-16 05:14:02.959047,697,721,84, 34,2014-01-10 23:42:25.373701,230,920,306, 70,2014-01-13 00:44:25.631797,160,269,237, 34,2014-01-14 08:40:48.600877,778,47,507, 21,2014-01-11 23:50:03.437498,381,777,744, 21,2014-01-19 20:23:14.335932,659,300,379, 70,2014-01-15 10:33:08.197441,273,714,832, 21,2014-01-12 02:55:19.693084,86,18,17, 34,2014-01-19 14:40:00.013671,50,340,336, 34,2014-01-20 05:30:05.792945,718,218,241, 21,2014-01-10 21:38:06.267777,653,256,235, 21,2014-01-20 12:39:18.822893,605,396,100, 70,2014-01-19 02:02:17.113596,195,656,241, 34,2014-01-18 22:45:05.722831,934,146,577, 21,2014-01-18 06:14:27.483654,486,60,291, 70,2014-01-19 20:10:34.300995,597,716,296, 21,2014-01-15 23:06:00.455441,173,790,989, 34,2014-01-12 16:46:47.365058,67,409,266, 70,2014-01-18 04:12:04.009018,47,841,805, 34,2014-01-16 08:19:28.217635,609,665,676, 34,2014-01-14 12:59:08.900696,476,196,160, 70,2014-01-17 13:51:13.627418,786,918,423, 21,2014-01-20 11:58:50.896512,576,961,875, 21,2014-01-20 17:08:39.493468,838,339,501, 21,2014-01-20 02:47:18.781531,338,307,617, 34,2014-01-13 13:55:20.077512,921,277,935, 21,2014-01-12 09:32:06.003809,780,845,236, 70,2014-01-15 00:50:22.157969,152,791,455, 34,2014-01-14 23:23:18.615424,482,79,201, 21,2014-01-14 12:34:13.620719,839,94,256, 21,2014-01-21 00:02:34.666461,95,593,420, 21,2014-01-20 08:37:21.57489,891,316,403, 70,2014-01-14 16:16:38.994421,863,380,842, 70,2014-01-12 18:47:00.298677,95,463,283, 21,2014-01-13 11:18:10.175768,554,764,204, 34,2014-01-15 20:06:42.431919,408,881,269, 21,2014-01-11 09:10:14.634609,587,533,621, 70,2014-01-18 01:51:18.373627,136,874,744, 34,2014-01-17 03:02:02.125422,152,191,930, 21,2014-01-20 03:50:14.64828,437,863,137, 34,2014-01-17 01:21:24.166571,335,997,159, 70,2014-01-17 04:40:08.3932,988,232,172, 70,2014-01-19 10:29:33.471592,209,289,411, 21,2014-01-14 21:23:41.280586,879,841,85, 21,2014-01-14 21:25:14.116031,567,149,568, 21,2014-01-16 10:44:00.955246,285,200,44, 70,2014-01-13 11:34:15.581581,980,767,444, 21,2014-01-20 00:31:14.571892,382,434,559, 21,2014-01-18 07:30:48.320304,675,744,719, 34,2014-01-20 19:46:19.5906,775,446,716, 70,2014-01-15 02:50:42.789527,545,593,444, 34,2014-01-11 14:29:57.221,979,55,356, 34,2014-01-17 03:37:33.218436,653,142,749, 34,2014-01-14 08:49:33.686459,341,903,618, 34,2014-01-11 01:38:36.714886,309,328,765, 70,2014-01-16 00:24:51.999968,92,568,928, 70,2014-01-17 02:44:53.763971,867,304,891, 21,2014-01-12 15:10:42.718696,870,73,807, 70,2014-01-20 05:15:14.157936,542,309,732, 34,2014-01-10 20:08:12.379815,920,359,69, 21,2014-01-16 07:44:56.339415,588,184,166, 21,2014-01-19 23:17:48.824817,239,67,413, 21,2014-01-20 08:45:33.954704,486,913,694, 21,2014-01-20 04:01:35.334254,828,699,427, 70,2014-01-11 12:04:49.123494,776,629,785, 70,2014-01-12 14:03:44.130472,975,120,872, 21,2014-01-14 18:08:17.766173,476,417,231, 34,2014-01-12 01:15:03.758103,332,183,201, 70,2014-01-19 19:55:02.504098,632,917,752, 70,2014-01-21 01:10:19.891595,788,313,274, 34,2014-01-10 23:05:18.406383,450,525,628, 21,2014-01-15 15:16:26.671089,131,475,875, 34,2014-01-16 23:50:28.285214,994,897,278, 21,2014-01-19 13:34:51.877975,776,563,647, 70,2014-01-19 16:40:07.951675,784,22,967, 21,2014-01-21 01:15:42.401245,212,143,822, 21,2014-01-14 18:18:52.833221,326,619,256, 70,2014-01-11 22:14:23.533256,51,722,254, 34,2014-01-19 19:46:56.973136,844,172,856, 21,2014-01-11 19:49:41.153944,508,18,61, 34,2014-01-11 12:00:43.124275,324,417,430, 21,2014-01-13 16:37:39.763083,447,307,40, 34,2014-01-12 14:19:38.374944,819,918,152, 70,2014-01-17 19:38:16.342711,426,839,338, 21,2014-01-17 05:27:13.449542,329,919,89, 34,2014-01-12 19:58:15.08983,102,546,63, 70,2014-01-12 14:03:08.342678,154,981,506, 34,2014-01-13 02:12:07.213514,231,593,244, 21,2014-01-14 15:08:57.808526,169,348,76, 21,2014-01-11 13:18:22.501034,995,950,454, 70,2014-01-13 02:20:19.593328,936,457,812, 21,2014-01-20 02:53:54.14836,359,490,865, 21,2014-01-20 16:36:11.325851,811,443,326, 70,2014-01-12 05:05:53.548032,141,56,389, 21,2014-01-19 00:55:29.482615,0,570,707, 34,2014-01-10 22:41:00.449345,587,208,283, 34,2014-01-13 23:09:37.678923,916,919,397, 34,2014-01-12 13:03:47.248788,960,36,107, 21,2014-01-12 03:56:04.207448,14,134,947, 70,2014-01-12 13:04:40.183021,12,223,150, 21,2014-01-12 08:14:07.140802,95,790,445, 70,2014-01-12 07:01:22.613831,655,564,950, 34,2014-01-17 08:21:06.854109,872,12,242, 34,2014-01-18 12:04:35.426016,664,857,114, 34,2014-01-21 00:36:14.492225,808,914,65, 21,2014-01-15 19:01:14.805783,539,868,160, 34,2014-01-18 07:20:17.82726,504,145,695, 21,2014-01-14 12:55:07.325446,36,672,898, 34,2014-01-16 21:15:38.339458,88,556,124, 70,2014-01-16 21:07:14.800817,50,442,573, 34,2014-01-15 12:44:48.47939,675,195,677, 21,2014-01-17 13:16:21.463733,939,50,899, 70,2014-01-19 17:44:54.5639,816,981,859, 21,2014-01-17 07:04:26.854752,993,949,39, 34,2014-01-14 02:54:37.806445,928,882,132, 21,2014-01-15 17:12:08.013442,388,2,556, 70,2014-01-19 07:02:41.944582,128,258,666, 34,2014-01-15 20:57:46.149123,118,676,967, 70,2014-01-17 23:24:15.226955,871,914,608, 34,2014-01-12 16:11:39.753527,28,42,246, 21,2014-01-16 14:16:08.650157,16,269,657, 70,2014-01-20 05:44:34.820702,340,848,102, 70,2014-01-11 13:05:33.901887,708,888,953, 70,2014-01-16 00:52:19.976008,327,941,938, 70,2014-01-11 04:50:28.368734,109,813,866, 21,2014-01-19 18:01:03.384502,901,914,952, 34,2014-01-16 03:33:20.425353,129,54,379, 70,2014-01-14 08:00:06.047657,864,664,607, 34,2014-01-11 01:04:50.63329,774,512,454, 34,2014-01-17 11:29:24.632801,644,471,779, 70,2014-01-16 01:04:46.231097,962,848,888, 34,2014-01-12 13:18:57.774091,579,182,753, 34,2014-01-18 22:30:47.247051,360,374,757, 34,2014-01-12 03:25:53.085205,863,782,897, 21,2014-01-20 05:23:33.200106,33,810,716, 70,2014-01-18 17:07:01.739277,881,80,29, 21,2014-01-17 02:27:07.891408,115,84,239, 21,2014-01-17 06:43:51.027786,912,867,579, 70,2014-01-12 00:02:09.064723,476,471,238, 21,2014-01-12 17:42:46.230867,832,783,883, 21,2014-01-12 21:51:05.828603,304,853,427, 34,2014-01-16 16:46:57.544531,967,103,289, 34,2014-01-19 10:59:07.694601,818,404,35, 34,2014-01-11 09:36:00.392503,417,332,260, 21,2014-01-12 17:51:24.399283,568,646,591, 34,2014-01-12 07:53:45.501046,471,108,453, 70,2014-01-16 06:48:08.405945,174,815,721, 34,2014-01-21 04:54:06.343865,276,990,637, 70,2014-01-17 08:51:31.650169,471,769,601, 34,2014-01-13 00:12:23.632901,265,192,586, 70,2014-01-12 15:05:46.097392,597,990,377, 21,2014-01-12 17:07:40.300746,175,320,989, 34,2014-01-11 23:56:58.453603,847,501,542, 21,2014-01-13 08:11:19.999279,853,965,460, 21,2014-01-17 22:00:00.276754,37,116,728, 21,2014-01-12 08:47:26.822757,547,175,489, 21,2014-01-11 20:12:23.38378,905,131,193, 70,2014-01-12 19:33:20.702526,31,885,665, 34,2014-01-15 20:47:32.870414,313,363,507, 34,2014-01-12 01:17:14.017489,405,788,462, 21,2014-01-19 11:02:45.335327,405,56,854, 21,2014-01-21 01:52:19.10151,740,344,231, 70,2014-01-13 18:36:11.791579,727,250,783, 70,2014-01-17 03:33:32.582378,888,942,878, 34,2014-01-11 23:18:12.187135,978,767,229, 21,2014-01-12 17:59:44.992105,546,18,165, 70,2014-01-14 14:40:34.321655,913,234,857, 21,2014-01-18 05:45:20.078544,553,680,702, 21,2014-01-19 04:43:36.019892,135,96,845, 21,2014-01-15 18:42:43.386797,924,100,199, 21,2014-01-20 03:28:06.309411,363,846,488, 70,2014-01-10 20:34:41.848495,469,838,104, 34,2014-01-11 05:29:40.931328,992,421,764, 34,2014-01-18 08:27:14.004011,491,567,124, 21,2014-01-11 10:10:42.240998,626,985,505, 21,2014-01-13 03:21:05.33061,258,507,952, 21,2014-01-19 20:20:59.505057,585,855,53, 34,2014-01-16 20:58:50.646943,217,313,710, 70,2014-01-13 02:15:11.674476,263,753,108, 34,2014-01-15 23:12:31.155226,570,517,931, 21,2014-01-19 01:11:14.279844,258,923,559, 70,2014-01-14 21:20:57.772286,351,960,973, 21,2014-01-17 20:20:11.455972,143,598,767, 21,2014-01-20 05:08:12.733866,109,453,576, 34,2014-01-17 09:32:17.771566,182,843,538, 70,2014-01-14 12:20:11.733145,264,44,272, 21,2014-01-11 07:55:39.556623,563,425,120, 70,2014-01-18 09:44:41.155765,832,424,419, 70,2014-01-16 11:53:32.43567,52,89,323, 21,2014-01-16 08:43:12.427036,794,39,627, 70,2014-01-19 15:01:55.173254,556,36,175, 21,2014-01-14 16:56:17.770997,818,970,841, 34,2014-01-16 04:35:31.528967,282,568,631, 34,2014-01-12 03:38:06.964833,767,811,682, 21,2014-01-21 00:29:50.353794,277,477,494, 21,2014-01-17 07:53:43.716102,867,331,423, 70,2014-01-14 01:37:51.956939,112,215,272, 21,2014-01-14 09:10:24.675449,918,288,43, 34,2014-01-14 07:39:03.794647,453,712,933, 70,2014-01-12 00:21:27.97683,241,187,273, 21,2014-01-19 07:53:08.062246,111,545,338, 70,2014-01-13 05:07:10.104057,711,214,583, 21,2014-01-12 00:56:09.825325,856,336,907, 70,2014-01-19 17:22:48.993574,126,766,347, 21,2014-01-20 17:34:24.108069,212,972,78, 34,2014-01-12 15:06:52.066323,773,234,961, 70,2014-01-11 14:43:54.324184,913,344,317, 70,2014-01-19 07:55:23.613127,15,940,484, 34,2014-01-18 16:05:42.713266,371,961,102, 21,2014-01-13 20:59:05.999079,687,897,885, 21,2014-01-14 01:07:54.768353,896,226,698, 34,2014-01-16 11:16:56.993528,513,607,729, 21,2014-01-17 22:20:03.771366,323,315,69, 34,2014-01-21 01:28:06.224744,397,802,775, 21,2014-01-15 10:25:09.727394,396,466,428, 34,2014-01-14 01:52:21.542931,546,42,136, 34,2014-01-14 07:48:17.957889,651,382,666, 70,2014-01-15 22:20:49.284017,926,629,984, 70,2014-01-11 05:37:02.698696,344,822,796, 21,2014-01-19 23:41:50.393558,946,128,880, 34,2014-01-11 01:04:01.711054,920,163,694, 70,2014-01-20 00:38:57.87195,31,668,934, 70,2014-01-13 10:38:08.164555,981,101,923, 70,2014-01-16 09:39:33.240021,368,126,25, 34,2014-01-10 22:17:04.837202,640,355,214, 34,2014-01-13 05:07:58.518349,206,535,416, 34,2014-01-12 11:33:16.956123,426,656,221, 34,2014-01-14 03:54:56.794141,248,782,435, 70,2014-01-16 18:18:23.194218,563,964,758, 21,2014-01-15 23:12:20.75077,983,5,122, 21,2014-01-15 08:16:24.771391,788,220,9, 21,2014-01-14 20:11:31.256464,892,441,475, 70,2014-01-18 08:19:30.854827,302,918,859, 70,2014-01-16 13:12:34.596715,983,301,461, 70,2014-01-13 07:34:20.250456,493,139,703, 34,2014-01-17 19:53:54.962897,732,988,489, 70,2014-01-18 08:19:26.663456,550,413,243, 34,2014-01-14 02:18:14.574641,604,52,684, 34,2014-01-15 21:49:18.576023,683,565,160, 21,2014-01-15 18:25:09.376722,845,592,636, 34,2014-01-17 03:17:20.57372,419,283,722, 34,2014-01-19 02:57:13.344795,195,736,527, 34,2014-01-10 23:42:06.37025,227,216,161, 70,2014-01-13 19:37:24.345085,620,639,14, 21,2014-01-18 22:25:19.569539,793,445,681, 21,2014-01-15 14:07:16.097645,940,260,924, 70,2014-01-17 01:29:45.888435,814,454,880, 34,2014-01-12 00:13:37.527427,77,665,574, 34,2014-01-20 16:28:05.381661,504,719,73, 70,2014-01-17 11:06:48.587132,285,607,447, 70,2014-01-21 03:55:27.920985,165,593,699, 83,2014-01-20 21:32:07.093134,274,33,372, 39,2014-01-16 05:45:46.459501,476,708,295, 49,2014-01-13 08:33:36.085541,172,372,447, 39,2014-01-16 01:11:40.333155,812,905,533, 49,2014-01-16 08:02:51.296704,844,995,895, 83,2014-01-15 17:41:34.604309,800,320,133, 39,2014-01-17 16:44:57.289698,271,406,958, 83,2014-01-19 15:57:48.090845,69,917,15, 83,2014-01-11 05:59:57.798527,255,260,387, 39,2014-01-12 09:57:18.040468,338,279,140, 83,2014-01-13 18:14:12.862235,728,973,715, 49,2014-01-15 06:11:29.05499,108,847,204, 49,2014-01-19 22:16:48.895715,828,369,252, 39,2014-01-19 11:26:47.45937,12,118,165, 83,2014-01-17 17:45:49.305446,664,751,49, 83,2014-01-16 12:10:43.858611,847,164,569, 39,2014-01-16 13:46:14.122826,379,78,809, 83,2014-01-21 00:04:03.880505,704,367,47, 83,2014-01-11 04:00:02.435053,295,805,466, 39,2014-01-11 02:11:23.499548,391,572,663, 83,2014-01-16 21:21:24.454225,350,104,710, 83,2014-01-19 10:57:15.779849,355,981,194, 49,2014-01-11 05:53:29.870218,434,936,221, 49,2014-01-19 20:58:48.799729,291,377,132, 39,2014-01-17 03:22:35.349388,377,527,314, 49,2014-01-16 00:00:45.967862,745,837,963, 39,2014-01-15 16:28:34.688165,869,816,468, 39,2014-01-18 07:36:12.876815,649,311,791, 83,2014-01-15 10:28:51.349943,276,619,487, 39,2014-01-11 21:35:23.275297,615,588,187, 39,2014-01-18 05:31:40.7978,182,391,160, 49,2014-01-15 02:00:58.443078,562,634,814, 39,2014-01-17 07:21:09.734798,598,88,64, 83,2014-01-20 18:05:16.883342,421,658,537, 83,2014-01-20 07:12:38.776233,766,508,289, 39,2014-01-12 09:24:01.031502,909,361,434, 39,2014-01-15 05:46:51.48765,14,657,137, 49,2014-01-16 17:57:36.065932,250,601,493, 49,2014-01-21 05:21:49.122766,908,179,87, 83,2014-01-15 15:46:49.286177,576,684,972, 49,2014-01-18 07:54:54.106819,381,265,842, 39,2014-01-13 17:36:01.985001,30,145,516, 83,2014-01-20 01:58:18.341586,148,684,549, 83,2014-01-17 00:11:43.002533,993,110,287, 49,2014-01-11 23:02:49.44437,744,286,929, 39,2014-01-16 13:44:07.647031,491,898,56, 49,2014-01-12 06:22:26.861145,335,281,597, 83,2014-01-17 16:49:03.567196,562,20,297, 83,2014-01-16 07:48:11.527536,555,246,667, 83,2014-01-12 14:22:29.296198,127,576,722, 49,2014-01-17 23:00:27.067164,555,594,40, 83,2014-01-11 23:09:35.98218,85,705,585, 39,2014-01-21 05:19:45.076047,927,110,807, 83,2014-01-18 08:53:56.937381,144,175,730, 49,2014-01-21 00:08:24.781909,689,763,277, 49,2014-01-17 02:42:20.425434,195,972,700, 83,2014-01-13 02:54:42.905244,840,962,296, 39,2014-01-15 10:36:59.470074,944,736,229, 83,2014-01-14 04:18:33.302249,499,324,235, 83,2014-01-17 17:23:34.255186,703,302,689, 83,2014-01-16 12:12:22.745371,445,353,407, 49,2014-01-11 03:50:14.10005,761,74,910, 49,2014-01-11 13:24:32.698265,768,586,109, 49,2014-01-12 13:33:32.480589,8,131,712, 39,2014-01-21 01:55:30.98381,424,22,663, 83,2014-01-21 00:37:11.474918,265,357,787, 83,2014-01-14 02:57:33.51209,603,790,363, 49,2014-01-15 01:42:22.47146,677,324,615, 49,2014-01-16 12:34:47.54085,929,590,27, 83,2014-01-14 02:19:22.634856,807,437,986, 83,2014-01-19 21:29:11.758057,58,847,692, 49,2014-01-13 14:29:41.647669,407,499,589, 83,2014-01-16 23:55:24.620276,800,179,761, 83,2014-01-18 17:27:30.099643,251,579,19, 83,2014-01-19 18:41:24.650202,526,477,807, 49,2014-01-18 02:58:14.064646,534,825,281, 83,2014-01-14 01:11:37.746675,75,107,32, 83,2014-01-21 05:03:51.511767,331,25,951, 83,2014-01-14 13:47:17.632261,306,714,403, 39,2014-01-19 12:59:49.27463,335,524,433, 39,2014-01-12 13:26:20.807965,305,808,287, 83,2014-01-11 06:47:44.699425,501,707,412, 83,2014-01-20 16:09:25.256811,703,273,526, 49,2014-01-12 12:46:05.884431,586,800,656, 39,2014-01-18 19:41:41.636806,529,420,58, 49,2014-01-20 10:17:50.038721,751,560,967, 49,2014-01-18 19:28:26.309866,62,381,42, 83,2014-01-21 02:36:24.54205,537,914,634, 39,2014-01-14 14:54:49.508796,784,382,262, 49,2014-01-11 17:46:59.612534,274,20,764, 49,2014-01-17 13:59:58.797656,160,298,565, 49,2014-01-20 07:07:12.254167,407,750,427, 49,2014-01-12 01:37:13.712584,611,139,98, 83,2014-01-18 07:24:31.495921,776,739,855, 49,2014-01-11 14:40:44.734755,321,106,52, 83,2014-01-11 21:32:44.696395,589,928,14, 39,2014-01-18 02:01:42.970839,566,762,696, 49,2014-01-14 21:38:18.247264,551,970,27, 83,2014-01-16 03:15:07.168274,369,335,163, 49,2014-01-13 08:36:30.512107,598,530,611, 39,2014-01-18 03:57:40.88212,245,370,74, 39,2014-01-14 18:44:18.926331,240,636,547, 83,2014-01-16 03:06:12.159776,62,132,520, 39,2014-01-13 21:53:05.502396,472,349,643, 83,2014-01-12 06:11:49.025974,503,580,272, 49,2014-01-14 15:47:36.810398,993,923,862, 39,2014-01-21 04:51:19.567461,639,246,815, 49,2014-01-15 11:23:26.772649,811,161,523, 83,2014-01-14 14:51:28.322165,492,761,939, 39,2014-01-14 12:38:37.199723,768,346,32, 39,2014-01-13 18:23:16.047279,539,479,873, 83,2014-01-16 08:17:49.130129,481,335,2, 49,2014-01-14 23:26:21.899148,955,581,912, 39,2014-01-13 04:32:41.304091,376,871,770, 83,2014-01-18 01:03:55.01456,976,595,559, 39,2014-01-12 13:08:03.535954,39,88,86, 83,2014-01-12 08:50:31.342811,805,914,158, 83,2014-01-15 14:32:21.324846,860,663,734, 49,2014-01-12 09:44:28.078004,219,183,622, 39,2014-01-16 03:45:20.851607,635,250,495, 83,2014-01-16 12:19:20.937379,569,832,375, 83,2014-01-19 03:44:26.87566,853,396,554, 83,2014-01-15 04:52:33.106193,184,45,210, 39,2014-01-17 17:56:34.649964,518,418,345, 83,2014-01-16 05:08:58.372001,37,530,698, 39,2014-01-15 23:33:17.840947,187,402,499, 83,2014-01-18 19:29:19.346777,896,810,748, 49,2014-01-13 01:10:41.34284,201,505,856, 83,2014-01-20 01:11:36.088211,980,588,481, 49,2014-01-13 16:44:26.515051,213,42,563, 39,2014-01-15 13:47:11.854948,657,224,881, 49,2014-01-16 23:09:16.97075,250,712,655, 49,2014-01-17 15:28:45.441382,848,171,166, 49,2014-01-20 20:53:24.015143,588,959,861, 83,2014-01-20 01:02:22.473146,744,331,736, 83,2014-01-19 01:40:34.467356,705,739,492, 83,2014-01-14 06:41:00.825542,237,84,814, 39,2014-01-19 23:53:42.040607,569,495,648, 49,2014-01-13 07:04:01.240004,99,653,372, 83,2014-01-18 01:32:29.147706,686,19,381, 39,2014-01-13 06:32:19.24033,964,283,664, 49,2014-01-16 05:27:17.287703,88,954,93, 83,2014-01-13 03:50:18.277835,580,42,228, 83,2014-01-17 09:58:41.139477,439,659,301, 49,2014-01-18 13:59:58.591794,66,90,135, 49,2014-01-20 08:54:13.292815,737,353,857, 39,2014-01-19 03:06:44.675432,742,284,65, 39,2014-01-20 02:50:29.935024,52,781,245, 49,2014-01-14 17:26:34.61766,467,297,26, 83,2014-01-20 16:51:12.753855,674,388,579, 39,2014-01-15 00:35:50.786631,971,931,92, 83,2014-01-20 09:45:55.55504,199,558,826, 49,2014-01-18 14:35:39.629517,577,886,473, 83,2014-01-19 09:28:23.892823,913,266,753, 83,2014-01-16 21:42:30.205003,396,270,396, 83,2014-01-13 13:44:38.001518,840,412,196, 49,2014-01-14 03:01:41.73419,922,712,906, 83,2014-01-14 11:11:49.55178,406,815,34, 49,2014-01-15 18:55:19.344776,431,243,284, 39,2014-01-12 22:13:17.822401,399,871,650, 49,2014-01-17 07:56:16.066832,950,487,978, 83,2014-01-20 12:42:31.199724,295,214,154, 49,2014-01-19 01:22:34.793151,146,988,560, 49,2014-01-13 17:25:01.508214,941,537,982, 83,2014-01-20 03:35:55.214868,483,286,121, 39,2014-01-17 20:24:57.266716,928,986,209, 39,2014-01-11 13:05:35.97557,429,74,149, 39,2014-01-13 04:16:56.040408,836,245,999, 39,2014-01-16 14:18:39.307323,759,859,933, 39,2014-01-14 00:09:37.215994,409,593,933, 83,2014-01-20 09:49:25.188115,403,153,167, 39,2014-01-19 00:50:58.548072,177,830,509, 83,2014-01-19 09:36:54.503696,757,914,263, 83,2014-01-12 07:39:43.466369,430,588,567, 39,2014-01-15 04:49:39.68755,581,363,688, 39,2014-01-16 17:36:53.09549,623,437,266, 39,2014-01-11 10:33:56.759183,446,270,789, 39,2014-01-13 01:56:24.363401,32,402,933, 39,2014-01-15 14:27:23.030514,901,599,543, 39,2014-01-15 08:00:31.376843,928,773,403, 49,2014-01-12 12:47:37.117256,470,612,415, 49,2014-01-19 19:03:13.817564,502,816,421, 49,2014-01-14 11:46:26.931882,409,913,294, 83,2014-01-20 07:23:16.746773,932,709,152, 39,2014-01-17 22:31:37.710386,522,618,650, 83,2014-01-20 13:28:57.136886,687,767,100, 83,2014-01-12 15:07:54.748709,463,384,249, 49,2014-01-21 05:33:19.444576,647,334,999, 49,2014-01-13 18:40:46.689086,916,867,960, 39,2014-01-17 14:03:14.093485,734,1,581, 49,2014-01-12 21:46:37.267397,488,807,434, 39,2014-01-20 06:37:02.755918,439,649,870, 39,2014-01-16 20:45:45.293209,674,221,310, 83,2014-01-21 03:09:12.060548,59,332,978, 83,2014-01-12 18:02:04.264551,515,841,176, 49,2014-01-15 18:21:40.508496,608,405,430, 49,2014-01-17 17:34:09.327264,674,584,325, 39,2014-01-13 11:07:40.240121,922,203,596, 49,2014-01-18 02:38:36.548905,325,867,362, 83,2014-01-13 01:52:48.635006,532,241,635, 49,2014-01-16 15:17:17.456115,501,836,224, 49,2014-01-17 06:28:01.737439,301,743,796, 49,2014-01-10 20:43:47.183078,215,991,113, 49,2014-01-14 18:54:11.95981,802,981,794, 83,2014-01-18 18:07:45.203806,265,312,863, 83,2014-01-15 05:33:26.870627,182,155,946, 83,2014-01-20 16:31:05.055719,578,811,199, 39,2014-01-19 08:41:41.96299,473,979,580, 49,2014-01-17 11:29:51.234028,871,961,275, 49,2014-01-15 00:58:28.086232,75,654,311, 39,2014-01-13 10:42:13.339833,39,94,862, 83,2014-01-19 04:17:28.351284,719,11,463, 39,2014-01-13 14:01:41.903796,537,539,485, 49,2014-01-17 02:28:40.271716,777,880,125, 39,2014-01-18 05:40:45.098476,615,545,699, 49,2014-01-20 16:33:19.614602,711,858,43, 39,2014-01-16 09:57:37.408602,365,648,561, 83,2014-01-20 00:48:39.847184,961,814,636, 39,2014-01-20 16:06:39.059178,477,182,146, 49,2014-01-19 08:38:24.097689,221,713,885, 49,2014-01-16 08:51:53.940671,876,323,199, 39,2014-01-12 07:53:16.326574,852,892,469, 39,2014-01-18 09:15:26.854025,379,556,853, 49,2014-01-11 23:37:39.23388,667,296,888, 49,2014-01-12 05:02:28.387541,366,386,948, 49,2014-01-20 07:17:31.118576,692,806,821, 49,2014-01-16 21:59:19.742376,743,114,605, 83,2014-01-19 02:36:37.714804,652,136,400, 39,2014-01-12 12:25:11.358697,727,452,142, 83,2014-01-13 18:37:56.2917,94,171,210, 83,2014-01-10 22:29:26.34981,779,198,246, 39,2014-01-18 07:42:28.81523,218,708,172, 83,2014-01-20 05:05:58.029139,155,238,603, 83,2014-01-10 23:13:13.532889,502,261,416, 39,2014-01-11 20:36:40.775041,774,866,243, 49,2014-01-17 17:13:43.232945,281,315,46, 39,2014-01-15 08:46:40.403934,390,549,345, 83,2014-01-11 07:07:45.830759,717,512,879, 83,2014-01-15 19:55:25.195935,700,752,196, 83,2014-01-11 14:16:31.637963,419,758,325, 39,2014-01-15 12:06:13.917411,295,141,924, 83,2014-01-18 10:37:38.535768,340,114,663, 49,2014-01-19 22:33:59.989666,850,31,350, 39,2014-01-18 06:07:55.821207,143,798,938, 83,2014-01-14 07:06:18.807484,962,707,342, 49,2014-01-16 22:14:45.088142,565,657,371, 83,2014-01-17 16:41:15.435808,163,132,238, 49,2014-01-19 21:03:56.216086,578,321,820, 49,2014-01-15 17:03:24.935326,358,700,406, 39,2014-01-17 02:47:54.495405,84,86,627, 39,2014-01-17 23:42:20.314194,851,691,690, 39,2014-01-21 05:55:18.875997,930,387,998, 39,2014-01-18 14:41:10.82198,300,246,482, 49,2014-01-15 02:57:47.168219,341,550,604, 39,2014-01-11 23:32:58.109877,467,55,649, 39,2014-01-19 23:43:39.209521,897,142,266, 39,2014-01-14 04:15:18.286795,457,576,184, 49,2014-01-18 01:32:17.852672,693,818,989, 83,2014-01-17 20:20:16.924745,497,101,542, 83,2014-01-15 20:40:29.645492,948,108,738, 39,2014-01-21 00:10:14.144372,891,374,432, 49,2014-01-17 22:49:43.274555,833,275,437, 39,2014-01-12 22:22:58.460722,708,172,682, 83,2014-01-19 23:16:12.17351,428,836,86, 49,2014-01-18 02:02:56.807862,153,287,396, 49,2014-01-13 22:59:39.236182,459,807,977, 83,2014-01-16 10:29:55.406455,226,332,493, 83,2014-01-12 04:49:37.211797,112,528,842, 83,2014-01-14 10:07:25.066942,361,871,82, 39,2014-01-11 00:25:20.602391,713,223,715, 49,2014-01-12 23:06:08.849759,675,803,846, 39,2014-01-19 02:13:38.984353,521,179,557, 83,2014-01-18 15:02:59.138158,49,342,142, 83,2014-01-11 15:40:08.839425,705,262,479, 49,2014-01-16 02:21:34.805979,420,371,26, 83,2014-01-11 16:09:17.945643,607,921,136, 83,2014-01-17 17:54:53.927567,450,207,450, 39,2014-01-12 13:02:50.241788,292,323,195, 49,2014-01-20 17:13:14.161729,330,63,234, 39,2014-01-12 04:58:18.862894,812,610,746, 39,2014-01-18 19:50:44.737194,331,444,696, 39,2014-01-17 10:55:34.475922,651,49,742, 49,2014-01-12 04:53:37.738891,701,602,214, 49,2014-01-16 04:31:55.559593,4,127,959, 83,2014-01-11 07:53:21.644141,710,862,839, 49,2014-01-13 08:26:35.849186,723,509,69, 49,2014-01-14 22:15:34.769114,0,572,205, 49,2014-01-14 16:08:39.930935,440,877,744, 39,2014-01-20 13:58:53.701858,38,63,918, 39,2014-01-11 12:35:51.693858,217,341,819, 83,2014-01-19 16:49:09.576847,263,124,245, 39,2014-01-20 08:09:07.84623,845,678,711, 49,2014-01-18 15:25:34.968831,625,408,948, 39,2014-01-11 09:12:08.037569,223,782,139, 39,2014-01-19 01:25:20.019741,978,743,227, 43,2014-01-15 11:28:31.776694,856,960,206, 56,2014-01-14 12:11:47.27375,11,672,224, 56,2014-01-14 05:55:15.426197,954,955,437, 13,2014-01-16 20:18:08.988491,831,315,636, 43,2014-01-18 02:19:12.341111,551,348,942, 43,2014-01-14 10:20:36.028587,784,104,962, 56,2014-01-18 23:24:17.83825,216,559,614, 13,2014-01-15 22:32:51.325464,533,411,48, 43,2014-01-11 19:23:35.166746,933,641,443, 56,2014-01-19 19:04:26.678095,386,99,672, 56,2014-01-21 04:54:26.131443,629,851,341, 56,2014-01-12 15:32:53.112389,142,589,656, 13,2014-01-16 06:59:20.605661,323,627,87, 56,2014-01-12 11:57:16.37365,534,380,409, 43,2014-01-12 02:46:07.274117,389,753,84, 56,2014-01-17 15:57:39.468975,136,457,856, 43,2014-01-20 11:48:01.110843,842,987,180, 43,2014-01-18 17:41:41.750458,35,763,741, 43,2014-01-19 00:51:17.207865,455,531,99, 43,2014-01-15 10:19:56.670435,810,967,393, 56,2014-01-19 05:35:03.394599,37,158,2, 13,2014-01-11 03:17:53.057051,997,120,98, 43,2014-01-19 12:35:31.439968,561,829,837, 56,2014-01-12 15:43:43.325954,375,608,523, 43,2014-01-20 21:16:46.758909,960,18,140, 43,2014-01-20 05:11:23.133826,276,772,51, 13,2014-01-11 02:32:52.9028,82,38,757, 43,2014-01-19 23:25:54.60514,408,22,240, 56,2014-01-17 14:36:58.102657,344,892,910, 56,2014-01-11 15:45:00.940369,790,547,587, 43,2014-01-17 18:51:14.624881,374,917,11, 43,2014-01-11 20:05:29.879351,798,479,636, 43,2014-01-15 07:56:48.214539,474,591,27, 56,2014-01-21 04:46:30.051077,836,590,375, 43,2014-01-17 20:23:38.867842,53,589,180, 43,2014-01-12 04:16:00.55565,555,479,670, 43,2014-01-14 09:07:06.079665,913,802,162, 56,2014-01-15 13:47:56.706511,668,830,518, 43,2014-01-17 06:48:51.881114,836,23,296, 43,2014-01-15 08:30:41.24641,907,507,362, 13,2014-01-14 02:52:23.384606,603,105,943, 56,2014-01-17 05:43:18.012975,331,798,822, 43,2014-01-17 04:03:34.358798,620,443,511, 43,2014-01-19 13:51:43.990269,103,422,974, 43,2014-01-18 21:40:34.386625,951,844,564, 56,2014-01-18 10:49:41.633334,179,315,573, 56,2014-01-15 23:49:23.459243,293,718,278, 13,2014-01-18 03:28:35.497468,213,75,216, 56,2014-01-15 22:31:23.383793,812,619,465, 13,2014-01-13 18:40:40.667527,693,386,389, 43,2014-01-12 07:48:32.168322,663,633,262, 56,2014-01-13 22:06:26.778393,641,788,554, 56,2014-01-14 01:58:33.724578,66,202,949, 13,2014-01-21 00:24:03.60829,216,605,426, 13,2014-01-15 17:50:10.104346,958,718,370, 13,2014-01-13 17:15:20.483906,271,986,13, 43,2014-01-19 23:35:26.742116,813,571,439, 13,2014-01-16 00:23:03.007147,106,411,270, 56,2014-01-12 10:41:15.089046,411,313,206, 56,2014-01-16 08:12:24.845192,162,91,755, 43,2014-01-16 20:08:03.947935,834,365,296, 56,2014-01-19 09:32:29.714346,721,154,663, 56,2014-01-17 08:17:54.724544,553,663,494, 43,2014-01-10 22:04:52.162474,317,492,319, 13,2014-01-19 08:18:59.765423,289,611,934, 56,2014-01-13 22:41:33.592804,661,448,854, 13,2014-01-12 06:20:52.718123,766,849,299, 43,2014-01-12 11:26:05.845506,470,838,881, 56,2014-01-18 16:29:30.299317,108,80,778, 56,2014-01-18 17:09:44.599236,143,748,453, 56,2014-01-16 23:56:47.091917,851,609,550, 43,2014-01-11 13:21:53.683923,78,573,329, 56,2014-01-14 16:53:02.612211,535,925,611, 56,2014-01-12 22:00:21.451134,583,758,12, 43,2014-01-20 07:13:37.674191,876,659,153, 13,2014-01-12 08:33:36.998836,730,213,478, 13,2014-01-20 12:50:03.084469,462,422,460, 13,2014-01-15 01:03:01.133853,71,132,423, 56,2014-01-19 16:02:12.496723,20,726,582, 13,2014-01-15 05:21:26.468261,315,798,612, 43,2014-01-17 23:43:41.80138,454,33,707, 56,2014-01-21 03:50:44.665045,178,303,305, 43,2014-01-18 07:27:53.246654,436,217,897, 56,2014-01-21 05:42:15.525957,4,284,80, 13,2014-01-20 22:14:48.273335,666,292,863, 13,2014-01-12 19:18:03.35142,296,834,682, 56,2014-01-13 16:57:36.009864,897,698,954, 43,2014-01-19 15:50:15.015451,667,766,515, 43,2014-01-17 23:41:06.358567,856,413,714, 13,2014-01-15 07:38:51.099329,374,396,596, 43,2014-01-14 18:02:39.860643,669,64,577, 43,2014-01-13 13:49:10.306501,75,12,52, 56,2014-01-13 11:11:20.813675,507,913,918, 43,2014-01-10 20:20:34.585605,126,996,400, 56,2014-01-13 15:54:02.468975,394,971,774, 56,2014-01-11 13:30:20.579098,300,413,185, 56,2014-01-13 23:02:08.17841,618,275,176, 13,2014-01-15 02:14:55.187098,415,200,885, 43,2014-01-13 04:56:26.424604,200,657,115, 13,2014-01-11 09:31:38.477726,203,870,911, 43,2014-01-12 13:24:39.786753,903,883,603, 43,2014-01-19 08:53:13.516941,330,541,769, 56,2014-01-12 02:53:32.161649,890,943,919, 43,2014-01-16 10:17:42.398965,811,945,435, 43,2014-01-11 00:53:34.968075,786,107,223, 56,2014-01-11 04:07:09.83584,453,324,204, 13,2014-01-17 22:51:19.398221,612,381,702, 13,2014-01-20 17:43:38.052544,592,634,362, 13,2014-01-15 09:10:10.969693,472,170,939, 43,2014-01-16 08:53:31.894944,735,775,957, 56,2014-01-14 17:05:04.520805,189,659,814, 13,2014-01-12 02:53:52.771073,376,868,637, 43,2014-01-16 06:44:16.559989,994,277,815, 13,2014-01-11 18:32:57.767459,220,774,534, 43,2014-01-12 02:36:08.297449,577,957,171, 56,2014-01-15 22:59:04.833324,672,393,23, 43,2014-01-13 17:51:01.118879,940,800,465, 13,2014-01-14 23:33:44.307313,154,908,105, 43,2014-01-14 08:49:19.848775,617,286,240, 13,2014-01-20 21:32:07.477865,295,969,402, 13,2014-01-19 11:12:35.406642,925,662,238, 43,2014-01-18 06:51:59.709837,818,412,576, 56,2014-01-13 05:21:17.784366,504,701,250, 13,2014-01-11 16:23:56.220317,895,141,499, 13,2014-01-18 07:12:34.295443,191,228,350, 43,2014-01-16 01:15:20.253759,539,282,657, 43,2014-01-12 09:54:16.799833,555,845,942, 13,2014-01-11 00:14:42.473853,783,108,568, 13,2014-01-20 07:30:15.440857,529,543,856, 43,2014-01-14 18:50:43.224438,33,444,898, 13,2014-01-11 13:46:20.951579,267,779,905, 13,2014-01-11 14:54:55.22761,231,499,291, 13,2014-01-12 21:43:56.741378,148,823,19, 13,2014-01-12 20:39:53.113228,709,213,217, 43,2014-01-17 05:12:37.626995,456,540,280, 43,2014-01-13 02:37:31.709452,381,644,413, 56,2014-01-13 04:47:02.949069,729,278,878, 56,2014-01-13 22:03:57.025215,675,541,191, 43,2014-01-12 14:21:09.761996,697,539,723, 43,2014-01-17 17:57:13.91918,598,188,101, 43,2014-01-19 10:57:28.920158,356,357,894, 13,2014-01-16 11:26:14.282801,861,116,427, 56,2014-01-19 00:51:06.690253,419,473,597, 13,2014-01-14 11:41:45.480147,127,734,617, 56,2014-01-17 09:59:12.050679,600,413,422, 13,2014-01-20 07:27:14.987702,973,229,842, 13,2014-01-19 14:40:50.313471,559,37,732, 56,2014-01-20 07:50:13.169558,611,774,68, 56,2014-01-14 01:00:59.295433,973,462,179, 56,2014-01-12 17:30:10.162246,675,286,275, 56,2014-01-19 23:22:20.647422,840,966,504, 43,2014-01-12 06:13:34.702075,367,809,933, 13,2014-01-20 04:22:09.872083,179,786,411, 56,2014-01-11 22:43:38.432206,933,410,831, 13,2014-01-13 02:37:30.92281,141,997,724, 13,2014-01-17 05:34:44.167526,741,399,852, 56,2014-01-17 03:58:58.685966,935,765,837, 56,2014-01-14 16:31:47.722644,712,637,925, 43,2014-01-17 09:49:26.641379,521,946,577, 56,2014-01-16 05:29:14.126823,176,848,244, 56,2014-01-18 15:22:30.947081,364,914,428, 13,2014-01-18 03:35:47.592958,427,805,4, 43,2014-01-17 00:24:09.354852,951,999,247, 56,2014-01-20 17:06:27.688878,85,442,863, 13,2014-01-20 04:15:40.706186,262,949,84, 13,2014-01-12 23:36:46.981847,301,129,869, 56,2014-01-12 13:43:59.39833,598,117,398, 43,2014-01-12 03:02:43.655674,969,232,546, 43,2014-01-16 01:40:44.007061,966,176,704, 13,2014-01-14 08:05:09.160745,431,318,834, 43,2014-01-19 00:59:57.574854,51,222,605, 13,2014-01-14 06:38:12.92722,371,223,453, 43,2014-01-19 23:31:23.443546,870,123,72, 56,2014-01-16 19:51:04.265106,59,542,787, 43,2014-01-17 22:19:58.407367,498,533,554, 56,2014-01-16 03:30:35.494225,476,641,224, 13,2014-01-15 21:18:19.253227,183,33,274, 56,2014-01-16 07:00:48.720839,621,469,450, 43,2014-01-15 05:20:48.664202,267,763,910, 13,2014-01-19 02:19:18.548661,251,655,27, 56,2014-01-18 04:30:58.883084,827,705,358, 56,2014-01-13 22:43:09.311624,962,143,720, 43,2014-01-20 12:32:53.250736,808,601,875, 13,2014-01-17 02:53:08.755586,484,67,143, 43,2014-01-15 01:26:47.743831,518,195,585, 56,2014-01-12 09:10:24.173547,597,810,50, 56,2014-01-13 02:27:52.923113,649,205,520, 13,2014-01-10 23:25:46.429797,217,657,969, 43,2014-01-16 05:42:11.896191,434,7,257, 56,2014-01-19 16:17:19.56491,854,665,28, 13,2014-01-16 08:55:00.557039,561,863,447, 56,2014-01-13 15:04:42.843692,666,718,593, 56,2014-01-16 13:53:07.157869,52,368,345, 43,2014-01-12 03:19:09.911892,196,126,170, 13,2014-01-13 02:11:10.53257,121,930,696, 13,2014-01-15 12:08:47.864474,21,918,286, 13,2014-01-14 06:55:56.893738,741,109,325, 13,2014-01-14 19:55:09.9309,399,538,551, 13,2014-01-16 19:11:31.520148,668,358,576, 43,2014-01-19 12:36:40.900799,704,942,710, 56,2014-01-18 08:00:19.091645,582,712,839, 13,2014-01-14 14:11:29.095002,829,734,677, 43,2014-01-12 13:14:53.828019,667,715,849, 13,2014-01-17 01:31:42.53561,929,79,482, 56,2014-01-20 14:02:33.360108,351,697,724, 13,2014-01-19 15:34:52.235386,52,405,149, 56,2014-01-11 23:02:18.029835,289,860,890, 56,2014-01-15 05:20:52.613335,657,903,850, 56,2014-01-14 16:35:40.956224,209,937,909, 43,2014-01-16 08:23:06.694038,981,103,463, 56,2014-01-13 01:40:11.161997,377,747,999, 56,2014-01-11 15:06:39.839308,280,988,263, 43,2014-01-19 11:06:16.006081,200,594,748, 43,2014-01-12 08:13:04.413153,657,568,765, 56,2014-01-17 21:59:48.594895,391,73,233, 56,2014-01-13 06:33:03.749911,501,714,682, 56,2014-01-13 21:23:28.5867,784,393,701, 56,2014-01-20 04:27:41.518427,166,437,57, 13,2014-01-13 09:58:50.180128,767,376,144, 56,2014-01-19 07:05:40.48331,790,568,4, 13,2014-01-18 14:45:01.083338,404,946,184, 43,2014-01-18 22:53:50.737167,759,309,562, 43,2014-01-11 16:10:23.327002,266,229,523, 56,2014-01-13 22:38:08.241206,941,245,953, 43,2014-01-20 06:13:00.649058,861,151,670, 43,2014-01-13 22:21:33.859572,118,261,259, 56,2014-01-18 14:46:56.10568,347,998,601, 56,2014-01-13 07:08:57.542796,814,514,141, 56,2014-01-17 22:16:43.790472,219,674,200, 13,2014-01-14 03:58:27.625828,25,811,150, 56,2014-01-11 13:45:38.443596,224,159,317, 13,2014-01-15 00:17:02.882536,87,253,831, 13,2014-01-17 22:09:56.720829,351,470,536, 43,2014-01-13 07:00:32.271615,542,99,323, 13,2014-01-21 05:48:45.418146,668,124,971, 43,2014-01-17 06:12:30.081357,609,878,612, 56,2014-01-11 16:35:24.507001,189,978,19, 56,2014-01-11 22:51:03.447981,187,902,509, 56,2014-01-11 05:33:22.694693,82,770,451, 13,2014-01-15 13:11:05.463225,385,995,727, 43,2014-01-17 11:14:10.142439,527,756,224, 43,2014-01-13 11:13:33.857109,162,94,412, 56,2014-01-16 08:17:45.302533,625,71,840, 56,2014-01-15 16:20:26.14852,896,188,448, 56,2014-01-14 23:26:38.270262,946,579,290, 43,2014-01-13 00:17:33.897848,647,102,75, 43,2014-01-18 02:53:29.898431,222,152,215, 56,2014-01-18 00:50:06.856962,399,143,187, 13,2014-01-11 22:45:15.416275,860,67,444, 56,2014-01-20 16:52:20.078559,430,986,142, 43,2014-01-16 01:55:47.340273,265,380,260, 56,2014-01-19 17:30:16.499612,463,847,825, 56,2014-01-18 09:46:10.815726,130,790,845, 56,2014-01-16 22:06:10.667275,800,757,584, 13,2014-01-12 10:08:24.740818,636,890,834, 43,2014-01-17 09:59:11.464784,887,301,426, 13,2014-01-20 00:27:44.526846,90,896,464, 43,2014-01-20 04:55:20.846918,239,753,869, 43,2014-01-19 21:08:09.00758,295,279,979, 43,2014-01-16 16:44:28.317737,119,709,898, 43,2014-01-13 02:53:48.472746,567,809,675, 43,2014-01-20 14:53:47.451176,351,902,726, 13,2014-01-20 21:01:31.200273,176,657,224, 43,2014-01-20 05:03:45.193994,855,870,962, 43,2014-01-12 15:54:19.722791,218,705,642, 56,2014-01-20 20:50:16.61842,70,763,660, 13,2014-01-16 05:16:15.275351,906,334,770, 13,2014-01-13 12:29:44.229792,337,459,939, 56,2014-01-11 13:41:20.066401,688,811,859, 56,2014-01-16 14:49:37.970044,694,818,55, 43,2014-01-18 05:40:49.693016,888,609,577, 13,2014-01-18 04:55:30.20884,81,516,328, 43,2014-01-19 06:03:11.827153,3,709,689, 56,2014-01-13 07:58:34.995969,704,218,539, 13,2014-01-12 15:15:56.357358,301,787,643, 43,2014-01-12 23:29:50.097415,6,682,156, 56,2014-01-15 12:16:08.893816,499,880,916, 13,2014-01-19 22:09:26.256209,2,755,584, 56,2014-01-20 04:19:56.954797,497,845,498, 13,2014-01-16 15:01:24.310091,378,112,892, 43,2014-01-19 09:01:46.334769,871,814,804, 56,2014-01-15 00:15:44.295069,675,632,939, 13,2014-01-15 02:31:40.809703,957,466,695, 56,2014-01-16 12:47:57.150914,307,390,420, 43,2014-01-21 02:21:54.962763,682,235,741, 43,2014-01-16 16:40:05.550521,736,881,935, 13,2014-01-12 16:47:08.615698,748,857,141, 56,2014-01-19 20:49:39.489609,185,327,84, 56,2014-01-15 15:35:26.397439,663,35,114, 13,2014-01-11 07:55:17.623697,818,423,684, 43,2014-01-15 07:34:07.807346,749,108,836, 13,2014-01-17 22:29:14.870603,738,197,869, 56,2014-01-21 02:49:05.074873,215,390,77, 13,2014-01-14 22:35:39.00762,363,80,770, 43,2014-01-16 21:33:00.064597,3,988,69, 43,2014-01-12 12:43:24.798082,126,104,851, 43,2014-01-14 13:25:55.626039,719,128,144, 2,2014-01-11 20:49:15.339948,883,54,921, 2,2014-01-15 05:13:09.027873,458,168,23, 47,2014-01-15 07:07:15.692859,583,347,220, 32,2014-01-17 15:38:53.309992,522,87,699, 47,2014-01-12 04:53:58.721308,313,188,935, 47,2014-01-12 06:02:45.901699,626,969,691, 2,2014-01-15 15:42:05.137565,313,362,670, 47,2014-01-14 16:52:33.717277,234,825,761, 47,2014-01-14 01:18:42.259476,788,120,756, 2,2014-01-17 19:11:55.234979,851,635,589, 47,2014-01-19 09:08:42.611093,554,650,170, 32,2014-01-12 17:28:08.515685,789,692,178, 32,2014-01-16 17:31:52.189776,479,814,27, 2,2014-01-14 18:10:06.921183,526,333,815, 2,2014-01-10 20:29:54.850454,690,193,585, 47,2014-01-20 21:47:36.485264,966,212,462, 32,2014-01-19 00:41:47.730886,993,92,933, 47,2014-01-16 13:17:52.001367,287,960,746, 2,2014-01-20 18:09:31.448027,920,537,716, 47,2014-01-14 11:21:53.281407,562,455,5, 32,2014-01-18 10:05:00.617483,758,188,175, 32,2014-01-19 08:59:10.937636,78,661,716, 2,2014-01-19 06:57:19.679265,909,753,671, 47,2014-01-18 22:00:18.24118,148,870,964, 47,2014-01-13 10:33:18.745402,160,395,162, 2,2014-01-15 23:26:34.549867,678,614,84, 2,2014-01-18 18:49:23.316053,259,749,183, 47,2014-01-17 13:08:57.753021,715,396,192, 2,2014-01-11 14:59:34.614465,725,67,858, 32,2014-01-20 11:32:48.114135,878,838,72, 2,2014-01-10 20:34:53.379479,666,990,932, 2,2014-01-12 15:48:49.954412,424,477,47, 32,2014-01-14 10:45:57.142428,563,161,610, 2,2014-01-15 07:42:09.072338,379,466,784, 47,2014-01-19 11:27:43.264404,536,138,48, 47,2014-01-15 19:39:55.863736,599,545,604, 2,2014-01-16 17:44:54.974036,989,172,488, 32,2014-01-13 21:09:48.401969,331,34,676, 32,2014-01-19 16:32:29.581014,786,678,921, 47,2014-01-19 23:03:37.233513,428,933,996, 47,2014-01-20 20:21:43.637367,703,287,763, 47,2014-01-17 19:41:12.192106,830,224,134, 32,2014-01-11 10:31:45.749198,19,24,744, 2,2014-01-16 07:53:35.827143,908,597,607, 32,2014-01-11 07:51:19.11329,601,385,688, 47,2014-01-11 11:01:40.600071,662,722,528, 2,2014-01-15 23:41:12.312406,144,523,142, 32,2014-01-19 12:33:06.844175,920,344,296, 2,2014-01-17 04:19:32.601439,619,201,338, 47,2014-01-15 11:50:43.760433,520,729,62, 2,2014-01-12 17:55:00.125582,375,300,748, 47,2014-01-14 08:24:33.218922,109,68,45, 2,2014-01-13 14:49:54.698489,49,562,185, 47,2014-01-21 04:52:19.804847,614,22,775, 32,2014-01-12 00:24:51.460102,256,426,615, 2,2014-01-16 05:23:13.443891,946,953,473, 47,2014-01-15 22:18:54.354714,530,753,921, 32,2014-01-19 23:14:14.776575,49,705,922, 47,2014-01-12 12:32:11.196911,719,248,276, 47,2014-01-16 17:18:28.969178,475,900,594, 2,2014-01-19 04:47:02.89071,527,500,567, 32,2014-01-12 13:07:04.576391,158,624,484, 47,2014-01-18 13:07:18.92359,820,672,862, 47,2014-01-12 09:33:00.033139,888,722,85, 32,2014-01-17 00:49:13.648728,487,593,858, 32,2014-01-16 18:35:02.188414,170,140,320, 2,2014-01-17 09:12:55.896874,727,377,172, 2,2014-01-12 12:34:08.623183,810,342,582, 2,2014-01-19 19:44:50.590381,448,415,101, 32,2014-01-15 19:45:25.477888,486,852,146, 47,2014-01-11 05:37:45.856697,119,477,634, 47,2014-01-19 10:06:34.227748,467,597,837, 2,2014-01-12 09:26:37.669994,739,975,897, 32,2014-01-11 20:09:31.606315,911,745,571, 32,2014-01-14 12:00:10.05531,145,18,735, 32,2014-01-12 21:17:56.783283,500,15,547, 47,2014-01-12 11:11:12.206385,402,88,42, 2,2014-01-19 15:41:22.367717,573,754,255, 32,2014-01-11 03:51:03.627458,55,333,16, 32,2014-01-18 19:30:44.807824,727,99,380, 2,2014-01-13 21:32:06.128569,349,979,987, 47,2014-01-13 01:46:03.753459,405,970,979, 47,2014-01-11 21:55:18.027165,237,812,83, 32,2014-01-16 16:22:00.827057,615,543,156, 32,2014-01-13 00:38:23.558306,323,761,435, 47,2014-01-13 02:20:09.487268,298,193,311, 47,2014-01-11 15:45:14.270948,245,936,659, 2,2014-01-18 02:57:17.913439,516,356,488, 2,2014-01-11 19:34:24.263842,937,965,87, 32,2014-01-13 08:17:25.467859,792,989,506, 2,2014-01-13 14:15:46.882617,298,647,936, 47,2014-01-20 04:21:27.154971,26,806,350, 47,2014-01-15 01:24:30.044669,98,944,544, 2,2014-01-10 21:23:05.806625,880,91,136, 32,2014-01-11 07:54:27.188109,773,820,937, 32,2014-01-10 20:13:43.693397,661,906,545, 2,2014-01-16 19:58:07.995039,212,116,186, 2,2014-01-17 21:07:23.085403,937,327,507, 47,2014-01-12 12:47:52.31658,285,782,488, 32,2014-01-15 09:42:58.58542,99,648,626, 2,2014-01-12 10:52:48.563291,142,565,69, 2,2014-01-12 22:25:38.173696,306,816,796, 2,2014-01-13 13:49:32.813588,103,908,121, 2,2014-01-14 00:19:26.233703,490,780,628, 32,2014-01-13 22:35:09.78001,626,713,305, 47,2014-01-17 05:49:42.868898,674,231,123, 47,2014-01-16 01:37:23.016986,99,374,447, 32,2014-01-15 13:46:21.986396,905,292,836, 32,2014-01-15 15:31:05.236614,221,484,555, 32,2014-01-16 09:28:26.644864,427,2,226, 2,2014-01-13 03:17:06.79422,838,646,32, 47,2014-01-18 17:03:11.365182,445,914,416, 2,2014-01-18 15:14:30.398323,474,54,981, 2,2014-01-14 05:12:24.821384,244,264,877, 47,2014-01-14 03:25:12.192241,954,972,880, 2,2014-01-20 19:52:53.956629,525,57,318, 32,2014-01-16 11:32:34.309071,889,215,974, 47,2014-01-14 23:10:26.463189,783,606,722, 32,2014-01-17 16:50:11.870068,332,855,956, 2,2014-01-17 11:06:58.572912,353,777,381, 47,2014-01-17 11:27:51.931467,749,955,494, 47,2014-01-20 11:05:58.753103,914,345,540, 2,2014-01-16 09:28:25.727883,915,524,660, 32,2014-01-11 06:52:21.976134,497,177,261, 2,2014-01-20 12:29:04.559729,128,919,610, 47,2014-01-16 21:22:52.916411,122,418,771, 32,2014-01-11 07:06:05.669531,454,252,721, 32,2014-01-16 02:27:12.554768,145,214,896, 32,2014-01-13 12:30:16.001815,421,751,988, 32,2014-01-12 23:53:57.986531,209,589,474, 2,2014-01-20 16:10:11.140187,330,553,780, 47,2014-01-15 03:23:04.565104,792,51,895, 47,2014-01-15 02:19:36.160227,299,394,945, 2,2014-01-12 23:59:43.953775,836,965,581, 47,2014-01-18 07:42:30.798807,0,192,886, 2,2014-01-18 04:54:45.940236,289,606,249, 47,2014-01-19 09:49:26.822673,539,99,489, 2,2014-01-13 03:19:53.816213,866,361,818, 2,2014-01-12 12:41:07.926632,465,688,364, 47,2014-01-13 19:20:32.059705,143,149,981, 2,2014-01-18 16:48:20.461077,67,812,839, 32,2014-01-14 19:58:14.720852,310,230,896, 32,2014-01-11 06:23:43.424889,548,223,16, 47,2014-01-16 02:02:50.8594,18,344,106, 2,2014-01-18 05:10:39.542655,676,321,883, 2,2014-01-14 13:48:55.617548,376,15,736, 47,2014-01-15 15:55:44.816029,781,550,816, 2,2014-01-13 10:43:13.851726,238,678,479, 2,2014-01-18 16:59:22.080736,631,796,579, 2,2014-01-12 02:45:56.686516,2,196,157, 2,2014-01-20 01:50:12.424639,639,932,211, 2,2014-01-14 22:27:14.012202,269,718,977, 32,2014-01-11 07:51:55.43962,155,581,384, 2,2014-01-15 05:18:38.152941,488,226,843, 47,2014-01-15 09:19:35.988337,759,30,122, 47,2014-01-21 00:20:59.999348,550,265,539, 32,2014-01-10 20:41:31.069352,898,948,180, 2,2014-01-15 20:25:41.658288,483,407,847, 32,2014-01-15 20:48:12.554116,199,932,834, 47,2014-01-13 13:11:47.071167,964,990,954, 32,2014-01-18 00:19:39.644819,308,926,367, 47,2014-01-15 06:58:23.694303,837,750,577, 47,2014-01-17 20:34:51.636271,558,514,859, 32,2014-01-11 20:39:15.805046,611,247,882, 32,2014-01-17 10:58:07.648497,888,767,184, 2,2014-01-14 22:17:22.435498,193,189,266, 2,2014-01-19 05:34:01.745282,11,535,265, 2,2014-01-15 14:47:34.47117,376,75,766, 2,2014-01-17 05:37:16.25171,548,457,363, 32,2014-01-20 22:15:09.671915,732,960,700, 2,2014-01-18 14:08:06.530875,677,173,783, 2,2014-01-14 16:25:36.712787,999,512,422, 32,2014-01-14 12:13:24.392767,618,275,811, 32,2014-01-19 00:31:49.956183,834,244,337, 32,2014-01-19 22:28:27.572187,639,536,452, 2,2014-01-11 11:24:03.935422,661,821,479, 32,2014-01-12 08:20:45.573731,262,768,62, 47,2014-01-14 08:24:12.388217,118,810,473, 47,2014-01-14 02:07:17.787148,704,926,959, 2,2014-01-20 05:20:07.654467,586,88,116, 47,2014-01-15 15:10:09.074732,934,666,507, 32,2014-01-12 21:57:30.212205,176,38,796, 47,2014-01-13 21:47:21.667089,769,178,35, 47,2014-01-16 03:02:04.514352,221,760,9, 32,2014-01-17 07:16:08.365146,957,13,106, 2,2014-01-18 11:06:57.655426,910,622,449, 47,2014-01-15 21:23:04.5137,16,753,418, 47,2014-01-17 07:57:39.434498,223,719,259, 47,2014-01-13 01:32:39.313714,910,356,900, 47,2014-01-20 22:11:17.067816,101,966,697, 2,2014-01-20 01:09:26.505665,636,3,425, 47,2014-01-20 05:52:18.958532,212,530,344, 47,2014-01-14 23:09:40.762538,958,643,3, 32,2014-01-16 15:44:18.141935,210,792,684, 47,2014-01-10 20:31:34.763578,849,724,107, 2,2014-01-11 04:07:48.411034,262,568,862, 47,2014-01-20 18:01:40.577433,705,367,845, 2,2014-01-19 06:05:36.50886,299,999,809, 47,2014-01-15 22:55:22.882204,105,579,992, 2,2014-01-16 17:38:56.829144,244,575,637, 47,2014-01-18 22:20:46.180775,174,49,736, 47,2014-01-13 07:03:29.413499,262,583,108, 32,2014-01-20 14:04:33.541931,252,199,834, 32,2014-01-12 04:34:10.573961,36,712,842, 32,2014-01-11 01:35:19.369682,848,540,205, 47,2014-01-19 06:33:01.114119,336,280,414, 32,2014-01-12 19:58:14.509383,313,945,616, 2,2014-01-12 13:56:04.943413,406,866,7, 32,2014-01-12 08:57:13.502754,307,557,569, 47,2014-01-16 02:05:32.29695,193,741,522, 2,2014-01-11 13:16:12.597879,975,329,592, 32,2014-01-17 04:07:22.577486,142,320,345, 47,2014-01-18 04:03:02.509155,223,146,3, 32,2014-01-14 15:03:34.264968,822,410,575, 32,2014-01-12 01:09:27.092257,145,570,348, 2,2014-01-14 05:19:10.874301,824,783,55, 47,2014-01-11 20:10:31.920814,320,486,316, 47,2014-01-17 02:32:31.605957,227,768,630, 32,2014-01-20 17:16:50.3088,429,800,761, 2,2014-01-14 01:43:11.234527,455,70,848, 47,2014-01-16 18:43:48.674192,837,629,897, 47,2014-01-19 12:26:16.814465,729,96,104, 2,2014-01-13 01:35:30.193478,885,652,368, 32,2014-01-20 21:53:29.436729,546,352,448, 32,2014-01-14 22:10:34.9564,900,245,49, 2,2014-01-13 02:07:04.957056,887,680,107, 2,2014-01-10 20:01:17.847764,441,850,215, 32,2014-01-14 10:12:15.534252,983,35,26, 32,2014-01-11 02:12:41.466335,355,772,365, 32,2014-01-15 22:56:40.729969,69,396,148, 47,2014-01-20 07:51:12.363397,546,518,580, 32,2014-01-19 04:33:27.64711,814,507,106, 2,2014-01-18 10:00:10.143468,103,344,674, 2,2014-01-19 15:55:45.905747,595,750,321, 47,2014-01-20 13:07:38.221071,423,384,436, 32,2014-01-18 15:35:29.513149,397,745,918, 2,2014-01-17 16:28:47.019866,713,291,902, 32,2014-01-12 03:05:52.730874,529,809,279, 2,2014-01-20 09:31:34.456562,826,501,632, 47,2014-01-19 05:26:00.522619,660,822,703, 2,2014-01-17 09:11:25.027824,72,736,945, 47,2014-01-21 02:47:47.05486,273,466,376, 32,2014-01-15 03:33:23.100524,993,949,316, 47,2014-01-14 07:14:27.536979,323,896,893, 47,2014-01-14 11:51:21.319829,270,52,80, 32,2014-01-16 08:42:50.192781,851,645,430, 32,2014-01-17 16:33:38.411281,724,684,563, 47,2014-01-15 12:01:53.240642,844,264,734, 47,2014-01-12 05:15:21.798739,831,976,229, 2,2014-01-17 03:50:28.72008,792,761,828, 32,2014-01-18 17:45:04.475588,686,582,723, 32,2014-01-18 03:59:10.47293,534,385,558, 2,2014-01-15 10:16:45.534545,688,795,107, 32,2014-01-20 23:20:34.669066,135,486,434, 2,2014-01-17 19:52:39.909659,171,477,745, 2,2014-01-19 12:27:20.490945,216,745,133, 2,2014-01-12 19:27:39.626122,48,51,433, 47,2014-01-17 19:53:57.757423,588,72,55, 2,2014-01-12 16:39:36.025197,10,994,312, 2,2014-01-13 01:40:21.092457,80,963,783, 47,2014-01-12 12:50:38.48781,531,91,621, 32,2014-01-11 18:30:48.389012,589,27,481, 2,2014-01-11 00:13:48.739986,992,982,735, 47,2014-01-20 02:50:48.631278,315,262,899, 2,2014-01-20 14:26:34.294759,704,814,445, 2,2014-01-20 17:21:26.961057,540,779,328, 32,2014-01-17 12:26:18.144847,303,478,772, 2,2014-01-17 00:55:21.314624,892,612,892, 47,2014-01-11 14:27:19.691931,732,427,969, 47,2014-01-16 15:57:52.601408,241,442,607, 47,2014-01-15 00:21:21.837663,839,652,860, 47,2014-01-18 03:38:44.719755,602,666,970, 2,2014-01-16 12:45:39.656269,161,333,492, 47,2014-01-19 07:54:44.938187,896,567,501, 2,2014-01-11 04:53:12.256734,333,756,622, 32,2014-01-20 04:37:00.976517,772,789,488, 32,2014-01-14 10:37:35.130969,201,59,443, 2,2014-01-18 01:26:50.668016,763,62,733, 2,2014-01-14 10:38:54.217158,545,203,997, 32,2014-01-15 19:52:56.929707,865,277,469, 2,2014-01-13 23:17:19.388096,866,299,629, 2,2014-01-11 22:23:58.692746,569,464,962, 2,2014-01-12 17:52:07.403056,642,335,567, 47,2014-01-18 13:34:04.922641,447,316,604, 47,2014-01-11 15:44:33.361812,250,461,889, 2,2014-01-19 17:44:47.312716,73,554,770, 32,2014-01-16 20:01:25.414004,714,738,281, 2,2014-01-13 15:12:12.988353,318,810,447, 32,2014-01-16 07:38:45.070557,575,416,338, 32,2014-01-18 16:41:01.4392,472,498,965, 32,2014-01-15 20:52:34.08081,176,676,659, 95,2014-01-18 00:29:23.558369,892,99,500, 95,2014-01-19 15:11:49.828212,12,238,728, 27,2014-01-16 01:06:22.820796,584,494,127, 95,2014-01-16 21:20:12.189647,546,485,759, 27,2014-01-18 23:38:24.122972,299,522,492, 27,2014-01-15 12:27:49.782273,32,157,444, 27,2014-01-13 03:46:30.334493,576,634,937, 95,2014-01-14 18:33:45.438015,625,768,110, 95,2014-01-16 06:55:09.474204,10,823,789, 95,2014-01-18 23:44:22.936321,70,79,762, 95,2014-01-18 22:55:07.275677,223,140,969, 27,2014-01-13 04:33:54.194378,933,43,974, 27,2014-01-14 06:30:02.59259,819,48,931, 27,2014-01-17 00:49:52.213865,258,261,370, 27,2014-01-13 13:27:06.451112,582,613,325, 95,2014-01-13 05:07:03.569106,312,231,50, 95,2014-01-20 15:27:27.344833,386,16,774, 95,2014-01-20 18:53:57.119547,956,33,69, 27,2014-01-16 19:45:57.786684,251,255,914, 27,2014-01-15 05:20:24.274541,682,416,148, 95,2014-01-13 12:11:16.507643,389,172,12, 27,2014-01-17 22:09:56.479431,834,930,226, 27,2014-01-17 03:12:31.677597,104,892,55, 27,2014-01-21 05:45:21.430702,904,330,812, 95,2014-01-18 17:54:29.841662,758,121,432, 95,2014-01-15 14:57:18.990731,271,947,9, 95,2014-01-16 19:46:46.844706,174,753,492, 95,2014-01-11 03:06:42.830016,575,905,996, 95,2014-01-21 02:36:04.061289,508,267,519, 95,2014-01-14 06:27:48.283907,974,677,185, 95,2014-01-16 03:59:16.910825,421,184,457, 95,2014-01-17 21:05:27.619657,250,364,502, 95,2014-01-12 15:39:38.112539,408,640,450, 27,2014-01-10 23:05:39.732041,506,374,299, 27,2014-01-13 12:25:39.809304,137,983,907, 95,2014-01-20 19:18:02.23551,453,16,227, 27,2014-01-15 15:33:29.514314,241,404,185, 95,2014-01-15 20:12:10.144216,894,879,324, 27,2014-01-14 07:51:47.673525,105,173,341, 27,2014-01-21 02:28:38.988517,93,604,842, 27,2014-01-13 13:56:33.080536,36,434,209, 27,2014-01-12 00:46:54.949202,114,97,710, 95,2014-01-13 01:02:33.182895,596,898,403, 95,2014-01-17 00:26:35.673126,143,420,787, 95,2014-01-18 05:36:47.163067,375,281,945, 27,2014-01-15 18:29:39.634426,718,450,442, 95,2014-01-19 09:33:39.242652,440,545,159, 27,2014-01-17 15:04:14.5079,150,199,790, 27,2014-01-15 07:23:36.753973,851,829,585, 27,2014-01-14 23:19:37.029336,985,507,435, 27,2014-01-11 14:24:38.782859,802,929,109, 27,2014-01-17 23:34:53.261616,954,231,624, 95,2014-01-11 15:29:33.508767,89,35,149, 27,2014-01-17 21:37:10.460456,333,173,794, 27,2014-01-17 23:20:14.692319,57,210,719, 27,2014-01-19 13:24:03.350429,262,109,519, 27,2014-01-12 06:34:29.451187,600,950,404, 27,2014-01-13 13:07:01.537025,149,911,175, 95,2014-01-19 20:30:46.180444,645,512,624, 95,2014-01-12 03:10:33.512476,650,625,998, 95,2014-01-16 23:34:49.821351,297,410,655, 27,2014-01-14 18:30:03.091689,502,417,264, 95,2014-01-19 04:16:01.132133,678,106,84, 95,2014-01-18 19:14:27.93389,295,726,416, 27,2014-01-14 21:35:42.823729,967,218,361, 27,2014-01-11 10:41:40.941856,134,340,186, 27,2014-01-18 08:32:30.169399,497,703,675, 27,2014-01-19 17:09:12.338042,544,384,635, 27,2014-01-16 10:53:51.086072,132,76,207, 95,2014-01-11 10:24:17.843343,207,69,427, 95,2014-01-19 13:37:51.326559,83,103,624, 27,2014-01-19 04:50:24.166608,757,440,824, 27,2014-01-12 15:11:12.792546,376,697,38, 27,2014-01-11 08:40:24.509873,944,421,852, 95,2014-01-14 23:16:59.839734,507,993,343, 27,2014-01-20 00:47:59.956031,129,707,923, 27,2014-01-16 07:10:04.144299,240,238,283, 27,2014-01-13 02:50:39.082386,878,568,575, 95,2014-01-16 09:52:14.463931,453,730,959, 27,2014-01-20 18:33:40.898272,983,833,484, 27,2014-01-17 06:10:16.111722,226,510,238, 27,2014-01-17 04:16:53.246791,514,525,509, 95,2014-01-17 12:08:34.159889,585,535,259, 95,2014-01-18 01:39:49.620908,988,245,462, 95,2014-01-13 19:54:03.707666,109,951,902, 95,2014-01-14 05:28:48.852626,188,472,46, 95,2014-01-16 09:03:52.971337,697,31,26, 95,2014-01-15 06:28:33.158853,950,646,152, 95,2014-01-16 22:35:50.38965,216,859,676, 27,2014-01-14 23:34:39.152201,839,956,261, 27,2014-01-16 13:39:06.671748,156,940,981, 27,2014-01-12 16:10:40.211001,111,533,906, 95,2014-01-18 22:04:42.24389,714,450,62, 27,2014-01-14 11:55:07.803881,91,971,894, 27,2014-01-20 15:25:08.144891,408,121,272, 95,2014-01-12 13:40:25.067618,580,796,789, 95,2014-01-15 02:36:48.745738,522,750,863, 95,2014-01-17 17:57:38.314709,643,840,852, 95,2014-01-11 00:49:37.40566,216,425,957, 95,2014-01-20 17:30:39.831809,947,547,163, 95,2014-01-18 08:21:56.158052,971,107,722, 95,2014-01-19 18:27:28.732219,355,166,544, 27,2014-01-18 16:21:03.998418,515,877,726, 95,2014-01-20 03:33:08.950598,248,533,25, 95,2014-01-20 07:07:53.242092,143,683,980, 27,2014-01-12 09:38:03.838571,842,502,153, 95,2014-01-18 22:21:08.906629,733,783,79, 95,2014-01-15 08:17:57.386392,963,155,944, 95,2014-01-14 16:28:42.920957,479,253,885, 27,2014-01-14 02:13:23.370979,64,8,612, 27,2014-01-14 20:51:38.284664,243,974,353, 95,2014-01-21 02:38:59.033099,231,130,141, 27,2014-01-20 10:30:16.61777,610,242,388, 95,2014-01-11 03:00:12.444971,202,695,509, 27,2014-01-17 22:18:48.654007,82,207,257, 95,2014-01-13 00:24:20.325435,273,907,884, 27,2014-01-14 12:29:01.297596,906,293,667, 95,2014-01-13 01:22:41.625345,475,976,35, 95,2014-01-17 10:52:53.484708,450,137,702, 27,2014-01-20 15:04:51.687247,558,929,670, 27,2014-01-17 04:57:20.777545,67,501,149, 95,2014-01-12 18:32:00.156456,65,799,418, 95,2014-01-12 01:15:31.898667,595,373,352, 95,2014-01-14 21:02:03.021434,988,917,462, 95,2014-01-16 10:27:07.960338,50,881,637, 95,2014-01-11 10:40:40.043557,258,78,284, 27,2014-01-16 14:42:28.089052,363,749,989, 27,2014-01-20 17:03:56.706495,884,576,167, 27,2014-01-18 08:38:18.358267,651,304,632, 27,2014-01-16 19:32:05.494713,506,579,9, 95,2014-01-20 04:34:36.538304,286,452,600, 95,2014-01-15 11:00:14.51632,823,401,888, 95,2014-01-15 07:59:34.227351,18,557,22, 27,2014-01-17 14:55:40.537142,664,926,299, 95,2014-01-14 08:33:23.467336,975,360,874, 27,2014-01-14 09:07:27.469443,366,626,942, 95,2014-01-19 04:33:44.375712,559,862,119, 27,2014-01-12 00:54:32.373966,247,858,992, 95,2014-01-18 21:25:24.856253,667,524,999, 27,2014-01-12 15:02:27.29667,422,894,866, 27,2014-01-15 07:07:55.744945,125,594,773, 27,2014-01-12 12:17:03.140917,173,703,888, 27,2014-01-12 11:41:26.329768,389,220,70, 27,2014-01-14 11:38:12.363134,431,590,962, 95,2014-01-12 19:17:15.585887,73,222,424, 27,2014-01-19 14:00:14.983775,414,880,791, 27,2014-01-16 16:02:32.688571,995,632,670, 27,2014-01-16 11:46:16.883483,279,572,922, 27,2014-01-11 09:22:56.609539,381,993,371, 95,2014-01-12 20:55:26.173278,183,782,269, 27,2014-01-15 20:51:08.571149,154,578,824, 95,2014-01-17 18:20:17.387083,411,566,245, 27,2014-01-14 19:27:26.329734,668,148,283, 27,2014-01-17 02:06:40.469816,542,853,328, 27,2014-01-11 09:22:20.408936,809,128,633, 95,2014-01-20 09:54:34.290492,899,297,369, 27,2014-01-17 16:47:20.513373,56,891,578, 27,2014-01-17 04:04:48.49799,675,496,387, 27,2014-01-19 20:58:30.996985,225,14,687, 95,2014-01-14 19:25:38.872059,685,892,418, 95,2014-01-12 17:36:53.993121,890,156,574, 95,2014-01-18 19:33:07.53529,744,69,449, 95,2014-01-19 10:25:53.388379,942,368,861, 95,2014-01-17 05:36:28.220472,404,485,466, 27,2014-01-15 04:28:48.072431,832,71,613, 95,2014-01-12 12:59:16.855716,957,250,494, 95,2014-01-20 18:43:55.689915,544,992,498, 27,2014-01-13 03:02:32.448144,520,696,472, 27,2014-01-13 17:53:49.230101,112,844,214, 27,2014-01-18 10:09:20.546169,794,912,882, 95,2014-01-14 22:04:59.745232,346,377,384, 95,2014-01-18 05:01:44.975046,651,947,682, 95,2014-01-20 02:26:23.687504,190,542,360, 27,2014-01-16 13:46:26.075001,53,626,602, 27,2014-01-11 10:39:57.33818,695,604,735, 95,2014-01-11 15:43:39.273391,685,127,587, 27,2014-01-14 21:46:41.059195,283,587,912, 27,2014-01-17 06:42:30.02717,429,936,540, 27,2014-01-17 07:29:56.157293,712,388,808, 95,2014-01-15 11:09:37.668733,312,659,914, 27,2014-01-19 07:37:56.200448,867,897,142, 27,2014-01-11 22:21:04.728442,480,109,910, 27,2014-01-11 23:29:55.055817,721,899,108, 27,2014-01-12 21:05:22.530601,363,778,960, 95,2014-01-18 04:27:45.198257,443,535,307, 27,2014-01-12 12:52:15.464754,974,902,14, 95,2014-01-12 00:59:56.821093,899,179,763, 95,2014-01-14 15:15:05.71163,298,297,206, 27,2014-01-18 20:57:03.963162,42,366,632, 95,2014-01-21 01:58:27.818078,432,782,655, 27,2014-01-18 14:40:44.58369,422,164,797, 27,2014-01-20 18:33:57.956283,295,568,283, 27,2014-01-18 15:31:35.353369,155,699,162, 27,2014-01-16 19:06:37.972488,541,294,253, 95,2014-01-16 18:10:26.176755,305,94,568, 95,2014-01-12 14:00:23.426219,996,365,869, 95,2014-01-18 12:05:54.828203,307,75,96, 95,2014-01-16 06:54:21.86667,763,492,290, 27,2014-01-14 21:02:55.874363,981,919,492, 27,2014-01-10 23:59:44.058304,110,169,318, 27,2014-01-13 11:03:42.413257,554,905,652, 58,2014-01-18 23:07:55.619596,461,700,279, 58,2014-01-18 09:01:29.033769,270,676,256, 58,2014-01-12 07:30:06.100761,567,156,510, 58,2014-01-14 06:54:21.695016,567,994,122, 58,2014-01-18 23:41:26.371949,175,324,975, 58,2014-01-13 03:13:45.374152,723,910,486, 58,2014-01-18 08:41:02.75421,806,186,512, 58,2014-01-15 00:23:56.399119,185,553,966, 58,2014-01-19 14:43:41.531446,828,128,142, 58,2014-01-12 13:50:40.422944,764,742,719, 58,2014-01-13 02:01:52.599986,223,886,472, 58,2014-01-20 17:04:46.259887,587,178,158, 58,2014-01-13 17:20:35.478761,325,872,782, 58,2014-01-15 03:07:15.130588,626,254,694, 58,2014-01-17 15:32:31.458144,485,292,916, 58,2014-01-15 10:12:50.943934,960,427,511, 58,2014-01-16 08:07:11.95168,891,939,545, 58,2014-01-11 00:47:37.170193,259,643,382, 58,2014-01-13 01:09:54.907097,921,445,313, 58,2014-01-16 04:05:39.769759,832,807,943, 58,2014-01-18 19:28:21.753882,513,153,657, 58,2014-01-12 13:43:52.863379,478,731,977, 58,2014-01-13 13:37:15.123128,421,558,298, 58,2014-01-14 08:34:59.72637,890,434,977, 58,2014-01-18 11:54:19.040134,697,471,366, 58,2014-01-15 07:37:38.549347,26,205,170, 58,2014-01-11 14:40:54.554993,310,864,510, 58,2014-01-13 12:48:40.906805,117,366,235, 58,2014-01-19 08:40:34.424129,30,658,437, 58,2014-01-11 18:40:38.613297,876,883,403, 58,2014-01-16 03:52:23.320062,34,490,271, 58,2014-01-17 01:48:30.043724,990,104,352, 58,2014-01-19 07:42:07.647066,121,92,811, 58,2014-01-17 15:22:29.420824,189,467,165, 58,2014-01-20 12:42:51.73874,724,594,873, 58,2014-01-17 01:23:34.019434,951,943,511, 58,2014-01-19 22:36:14.795396,2,86,311, 58,2014-01-17 15:23:54.49295,377,757,818, 58,2014-01-21 05:47:30.418553,364,678,687, 58,2014-01-18 07:19:56.326841,202,871,104, 58,2014-01-19 09:14:34.915894,719,13,425, 58,2014-01-13 01:49:23.01854,401,820,967, 58,2014-01-17 18:24:42.587147,789,639,569, 58,2014-01-11 20:35:10.395074,460,555,517, 58,2014-01-17 08:56:38.149127,185,212,545, 58,2014-01-14 03:57:14.045291,310,883,424, 58,2014-01-16 10:48:01.339008,408,239,324, 58,2014-01-12 11:03:50.100807,532,172,182, 58,2014-01-14 08:44:51.215483,287,852,297, 58,2014-01-18 15:57:56.246105,819,230,624, 58,2014-01-17 19:09:29.870567,849,404,458, 58,2014-01-11 22:13:12.969785,296,882,612, 58,2014-01-20 09:41:49.109484,409,90,445, 58,2014-01-20 12:46:44.994113,915,2,222, 58,2014-01-15 10:48:12.696155,768,452,536, 58,2014-01-17 15:36:08.149619,186,924,170, 58,2014-01-14 14:24:23.543459,487,834,505, 58,2014-01-16 05:29:07.251148,798,994,297, 58,2014-01-20 08:24:49.056424,163,561,94, 58,2014-01-12 17:04:57.967589,638,804,519, 58,2014-01-17 04:09:45.864864,455,919,613, 58,2014-01-15 06:17:12.376905,91,576,96, 58,2014-01-18 22:53:28.011732,733,907,144, 58,2014-01-15 05:51:53.511929,851,455,109, 58,2014-01-11 15:39:41.797729,18,626,258, 58,2014-01-18 05:36:19.750472,333,987,191, 58,2014-01-11 01:15:27.531364,57,649,616, 58,2014-01-20 18:15:56.593125,392,351,406, 58,2014-01-14 15:00:14.243841,232,838,520, 58,2014-01-11 01:02:57.950335,354,831,617, 58,2014-01-17 19:35:52.920385,292,182,900, 58,2014-01-12 18:14:49.159735,236,592,946, 58,2014-01-13 06:52:20.968875,424,696,309, 58,2014-01-14 08:00:35.507532,980,369,699, 58,2014-01-13 18:49:59.554809,985,106,801, 58,2014-01-19 19:48:59.118421,837,674,799, 58,2014-01-17 15:57:49.552823,101,585,156, 58,2014-01-19 09:38:00.893818,860,18,233, 58,2014-01-11 00:52:49.219228,69,265,606, 58,2014-01-21 04:42:40.768726,298,117,196, 58,2014-01-16 19:35:57.139922,216,271,139, 58,2014-01-18 00:02:19.090214,71,750,189, 58,2014-01-11 20:55:53.738511,257,198,194, 58,2014-01-15 23:17:46.249406,455,314,912, 58,2014-01-17 06:49:04.084326,410,279,667, 58,2014-01-16 11:44:06.434667,201,883,649, 58,2014-01-12 08:53:54.399026,681,490,746, 58,2014-01-21 01:13:27.627786,786,119,322, 58,2014-01-11 11:13:13.686233,422,611,51, 58,2014-01-11 11:18:43.455869,526,86,365, 58,2014-01-12 12:18:25.595794,344,185,249, 58,2014-01-17 19:22:59.551097,38,578,945, 58,2014-01-15 21:35:55.832775,723,503,660, 58,2014-01-20 15:11:53.607525,502,622,70, 58,2014-01-11 19:14:53.063026,706,242,591, 58,2014-01-16 17:15:37.630504,804,94,502, 58,2014-01-17 14:48:13.357997,130,488,823, 58,2014-01-12 00:30:20.594808,242,924,850, 58,2014-01-16 05:31:34.224048,24,830,642, 58,2014-01-10 23:48:27.601838,260,889,120, 58,2014-01-12 05:33:18.545145,147,387,715, 58,2014-01-12 19:07:27.144433,273,546,575, 58,2014-01-12 22:03:16.761992,242,964,653, 58,2014-01-14 16:25:39.51402,304,117,111, 42,2014-01-16 07:08:02.651966,11,942,265, 42,2014-01-15 20:53:16.316801,151,952,820, 42,2014-01-13 06:14:38.63244,705,711,203, 42,2014-01-12 17:05:52.204789,522,550,162, 42,2014-01-14 00:31:17.211038,658,346,41, 42,2014-01-13 11:07:27.852087,237,259,49, 42,2014-01-12 15:48:32.973515,965,319,368, 42,2014-01-20 00:07:14.35096,847,309,909, 42,2014-01-20 15:09:46.9423,150,32,242, 42,2014-01-13 16:44:26.712026,766,725,208, 42,2014-01-14 17:25:00.600785,31,921,555, 41,2014-01-16 15:58:51.026626,345,635,354, 41,2014-01-19 08:28:33.147111,817,477,203, 42,2014-01-16 06:18:54.999811,83,93,639, 42,2014-01-16 11:12:18.654412,601,292,408, 42,2014-01-19 23:41:46.833345,28,407,758, 42,2014-01-16 21:37:38.45568,89,538,81, 41,2014-01-18 03:30:44.250206,777,736,69, 42,2014-01-16 13:04:46.384442,613,949,996, 42,2014-01-11 13:13:34.288455,757,669,334, 41,2014-01-17 12:42:37.857731,206,128,528, 42,2014-01-17 12:19:39.447469,931,506,821, 42,2014-01-17 10:29:11.919378,20,309,440, 42,2014-01-13 21:30:51.216148,553,702,852, 41,2014-01-18 16:50:00.042277,446,215,812, 41,2014-01-12 10:00:46.143426,683,310,395, 42,2014-01-14 01:19:18.817985,456,338,267, 42,2014-01-20 02:23:18.587422,36,361,700, 42,2014-01-14 09:08:13.287859,300,835,72, 42,2014-01-16 03:22:35.579977,209,50,914, 41,2014-01-13 12:48:58.10186,25,933,111, 41,2014-01-19 20:16:15.939825,546,320,365, 41,2014-01-21 04:15:51.897197,382,60,719, 41,2014-01-15 23:03:36.7343,158,493,949, 41,2014-01-11 07:22:08.144614,494,636,697, 42,2014-01-13 22:47:09.108234,259,453,590, 41,2014-01-18 14:11:04.586387,60,147,229, 42,2014-01-13 03:10:41.118547,101,91,830, 42,2014-01-12 16:54:23.459195,869,934,242, 41,2014-01-17 23:20:51.528687,642,850,244, 42,2014-01-15 23:55:07.830573,648,96,648, 41,2014-01-16 14:19:24.05998,269,54,65, 41,2014-01-13 09:19:42.555314,253,631,743, 42,2014-01-14 02:23:40.977684,751,456,300, 42,2014-01-11 14:38:19.060211,176,482,280, 41,2014-01-19 00:32:01.210145,358,412,7, 41,2014-01-12 20:05:27.811029,543,146,880, 41,2014-01-17 16:15:57.515892,276,464,72, 42,2014-01-15 22:02:45.460351,853,167,868, 42,2014-01-18 13:10:14.195472,911,9,733, 41,2014-01-18 09:29:31.804767,490,109,850, 41,2014-01-12 04:45:23.318501,180,914,842, 41,2014-01-14 19:29:53.643359,294,692,98, 42,2014-01-14 13:58:43.724145,734,492,699, 42,2014-01-15 06:16:14.534649,55,95,797, 42,2014-01-12 06:19:53.685636,923,638,679, 41,2014-01-16 03:59:29.867571,862,666,562, 41,2014-01-18 11:35:33.352634,125,5,162, 42,2014-01-11 02:43:12.273477,243,990,958, 41,2014-01-19 17:07:43.15543,425,13,923, 42,2014-01-13 08:58:08.93303,471,750,680, 42,2014-01-13 19:32:10.375337,813,722,324, 42,2014-01-18 07:23:59.095255,306,106,584, 42,2014-01-13 07:14:00.830228,437,795,959, 42,2014-01-18 22:35:47.109637,934,475,418, 41,2014-01-18 18:46:07.239869,772,85,428, 42,2014-01-16 10:01:09.938463,779,519,683, 41,2014-01-16 06:46:51.696444,432,769,773, 42,2014-01-21 01:56:48.358416,207,393,794, 42,2014-01-18 06:55:33.397657,278,971,34, 41,2014-01-13 00:07:43.225131,84,423,310, 41,2014-01-15 19:51:56.189407,474,225,514, 41,2014-01-13 15:14:57.458057,533,42,934, 41,2014-01-15 13:27:25.780864,344,320,856, 41,2014-01-19 02:15:37.167092,576,682,413, 41,2014-01-14 09:53:16.518269,224,307,159, 41,2014-01-13 07:59:26.991009,752,92,816, 41,2014-01-21 02:21:04.978541,719,210,336, 41,2014-01-10 20:09:14.03416,54,933,410, 41,2014-01-18 10:02:12.45136,493,599,732, 42,2014-01-18 09:31:19.174012,247,1000,95, 42,2014-01-18 09:38:45.838927,604,870,543, 41,2014-01-19 18:47:35.769861,718,701,472, 41,2014-01-11 23:01:12.817372,842,422,781, 42,2014-01-11 17:37:29.563072,94,648,658, 42,2014-01-13 19:03:50.30451,15,567,547, 41,2014-01-13 09:21:06.503427,543,157,549, 41,2014-01-17 01:36:59.430643,732,408,567, 42,2014-01-11 00:39:23.657565,488,396,72, 41,2014-01-13 16:04:18.776904,3,754,898, 41,2014-01-15 12:44:42.586073,611,805,848, 41,2014-01-13 13:37:32.590595,300,579,905, 42,2014-01-16 15:36:29.152241,768,108,592, 41,2014-01-12 14:08:41.681328,468,325,331, 41,2014-01-16 00:51:33.420823,159,713,376, 42,2014-01-14 08:12:16.261878,313,302,543, 42,2014-01-20 12:54:48.921617,137,624,333, 42,2014-01-11 04:52:43.359285,986,39,71, 41,2014-01-19 18:59:07.958322,821,582,907, 41,2014-01-20 08:51:37.280032,358,172,498, 42,2014-01-18 15:48:16.756943,563,114,754, 41,2014-01-11 13:06:51.183873,982,140,408, 42,2014-01-14 22:43:33.46944,485,711,423, 42,2014-01-11 01:03:14.215,840,148,793, 42,2014-01-16 06:34:16.964736,356,725,162, 42,2014-01-12 18:59:10.63695,511,954,140, 42,2014-01-14 14:56:30.733268,253,626,335, 41,2014-01-18 18:33:43.955746,137,993,944, 42,2014-01-12 15:20:15.615492,378,518,299, 42,2014-01-14 15:05:44.767848,570,932,838, 42,2014-01-15 22:35:56.407106,448,690,171, 41,2014-01-20 04:51:34.789503,352,635,21, 41,2014-01-11 18:44:30.606774,751,593,379, 42,2014-01-14 11:23:32.177386,459,924,152, 42,2014-01-10 21:52:47.607294,972,695,259, 42,2014-01-12 16:22:00.170265,373,532,910, 41,2014-01-17 10:27:22.481898,411,193,7, 41,2014-01-13 11:13:54.110721,807,976,38, 41,2014-01-18 21:58:59.600907,864,639,809, 41,2014-01-17 15:06:46.139462,201,575,488, 41,2014-01-16 07:18:12.887625,387,999,106, 42,2014-01-13 04:43:42.1874,957,666,630, 42,2014-01-20 08:44:18.730057,48,408,45, 41,2014-01-11 16:54:42.039866,571,378,585, 42,2014-01-14 22:52:23.868728,12,251,453, 42,2014-01-15 03:35:52.150879,297,898,367, 42,2014-01-15 05:06:58.301745,393,647,466, 41,2014-01-14 05:47:12.790345,349,670,98, 41,2014-01-15 12:28:35.510165,255,883,979, 41,2014-01-13 18:06:06.260485,267,851,327, 42,2014-01-13 08:38:50.070796,969,84,666, 41,2014-01-12 22:16:52.267526,358,936,355, 41,2014-01-14 11:12:57.444358,334,862,939, 41,2014-01-17 11:22:23.540234,716,382,265, 42,2014-01-13 03:20:06.482526,713,555,524, 42,2014-01-19 21:47:14.409095,31,753,326, 42,2014-01-19 10:21:34.177185,109,471,185, 42,2014-01-16 22:16:37.216214,786,353,662, 42,2014-01-17 10:20:58.364841,435,994,690, 42,2014-01-21 05:41:49.792677,606,732,681, 41,2014-01-20 17:22:21.98406,703,451,289, 42,2014-01-12 02:56:54.771946,802,605,830, 42,2014-01-20 04:33:24.582599,313,435,794, 42,2014-01-11 06:06:52.590835,119,429,904, 42,2014-01-15 18:20:26.949333,945,380,550, 42,2014-01-20 06:26:12.189893,265,693,703, 41,2014-01-13 02:28:52.7611,99,148,508, 42,2014-01-11 22:47:49.43165,403,753,682, 41,2014-01-12 11:40:06.300614,644,85,564, 41,2014-01-21 04:27:52.362427,716,551,816, 42,2014-01-18 17:54:35.571111,777,371,442, 42,2014-01-17 22:58:19.188239,45,947,974, 42,2014-01-13 03:11:34.549827,350,678,766, 42,2014-01-17 20:38:54.301588,101,389,299, 41,2014-01-18 19:53:01.228104,451,821,768, 42,2014-01-17 06:03:58.418555,215,928,800, 42,2014-01-11 18:14:46.452468,307,321,982, 41,2014-01-12 18:59:59.529849,685,791,831, 42,2014-01-20 15:51:11.209319,286,390,389, 42,2014-01-16 10:43:21.963051,940,458,56, 41,2014-01-15 17:06:05.790334,809,488,276, 41,2014-01-12 18:30:01.280115,274,935,666, 41,2014-01-18 13:00:14.230577,506,582,447, 41,2014-01-19 08:19:03.234693,259,857,289, 41,2014-01-19 09:52:24.820349,832,633,368, 42,2014-01-20 20:20:20.713103,424,881,504, 42,2014-01-18 00:06:17.643787,156,889,739, 41,2014-01-17 14:13:58.997954,982,557,134, 41,2014-01-16 12:36:57.929317,844,596,706, 42,2014-01-14 04:27:16.008628,978,6,902, 41,2014-01-17 13:55:48.79063,220,252,334, 41,2014-01-15 23:59:19.913377,664,529,983, 41,2014-01-15 11:24:10.780993,455,556,185, 42,2014-01-16 12:29:13.373229,477,596,451, 41,2014-01-16 10:06:12.504631,964,835,509, 41,2014-01-20 09:44:37.730326,997,592,774, 42,2014-01-15 12:55:25.563123,831,354,271, 41,2014-01-18 16:35:05.265731,663,67,94, 41,2014-01-11 02:32:27.161976,501,104,982, 41,2014-01-17 04:35:31.863736,637,139,893, 41,2014-01-18 15:02:57.628158,535,354,427, 41,2014-01-19 00:27:02.733507,431,337,472, 42,2014-01-13 21:33:51.051975,524,592,690, 41,2014-01-20 22:14:32.177985,273,482,762, 42,2014-01-15 15:05:57.035095,371,585,720, 42,2014-01-11 11:26:52.28008,660,985,711, 42,2014-01-16 22:18:30.59696,743,124,198, 41,2014-01-16 13:20:43.487562,880,432,340, 41,2014-01-13 10:26:51.810348,278,825,933, 41,2014-01-16 08:09:41.806279,117,708,581, 42,2014-01-11 18:04:05.450614,875,128,127, 42,2014-01-18 07:32:57.600683,644,131,874, 42,2014-01-18 06:39:43.086394,338,845,328, 41,2014-01-19 11:04:19.681191,469,342,811, 42,2014-01-16 09:52:00.835795,871,95,907, 41,2014-01-16 10:32:07.906743,885,430,888, 42,2014-01-19 01:24:40.394713,166,34,121, 42,2014-01-13 03:58:18.479582,915,195,664, 42,2014-01-12 18:46:06.904697,47,469,504, 42,2014-01-14 08:01:38.324029,106,55,699, 90,2014-01-16 12:25:34.488629,891,186,759, 90,2014-01-19 12:41:55.695746,898,249,738, 81,2014-01-19 12:00:58.237825,966,946,809, 90,2014-01-21 03:49:45.269622,916,224,41, 30,2014-01-14 19:11:09.068975,716,448,315, 76,2014-01-14 16:07:10.742456,224,40,499, 23,2014-01-20 07:34:22.999948,214,991,539, 90,2014-01-19 12:06:34.632517,585,133,264, 9,2014-01-12 02:42:16.008189,54,652,816, 76,2014-01-20 14:06:50.162343,628,824,160, 30,2014-01-15 10:42:06.496254,327,151,357, 76,2014-01-19 21:45:13.636767,653,28,797, 81,2014-01-18 08:33:52.89585,885,198,494, 81,2014-01-18 12:15:57.548649,66,1,913, 90,2014-01-19 13:59:45.814752,316,330,464, 76,2014-01-12 17:39:49.930945,92,348,820, 80,2014-01-19 03:42:49.828728,264,726,509, 80,2014-01-15 06:18:16.411712,674,862,629, 30,2014-01-18 11:00:33.418507,218,76,713, 30,2014-01-11 08:09:41.639077,791,42,720, 76,2014-01-20 18:27:58.217991,361,87,943, 81,2014-01-19 09:04:38.86954,181,139,9, 30,2014-01-18 19:42:39.240179,529,760,745, 23,2014-01-17 19:07:41.304384,94,680,39, 76,2014-01-17 14:08:58.55073,147,388,429, 80,2014-01-13 23:34:40.075974,36,116,224, 30,2014-01-12 23:39:49.211546,521,941,929, 81,2014-01-15 09:33:38.945443,591,530,930, 23,2014-01-16 07:32:58.555556,17,924,180, 80,2014-01-14 22:25:56.116243,59,37,482, 76,2014-01-18 21:35:17.269891,607,393,48, 23,2014-01-11 13:58:33.044185,887,655,408, 81,2014-01-13 05:07:51.811989,406,296,846, 81,2014-01-17 03:36:15.507717,81,695,521, 30,2014-01-11 11:48:18.313807,206,478,973, 9,2014-01-17 04:19:00.881383,528,581,385, 81,2014-01-20 23:43:26.250173,577,153,750, 90,2014-01-20 23:22:41.314175,649,615,492, 90,2014-01-15 10:25:35.5139,121,152,223, 90,2014-01-11 20:25:42.258361,158,152,758, 90,2014-01-20 07:29:31.476518,646,114,108, 30,2014-01-20 01:07:42.010154,371,343,883, 9,2014-01-20 22:10:55.895128,253,369,938, 9,2014-01-17 10:03:24.372368,222,937,403, 81,2014-01-17 07:23:39.558803,906,240,734, 90,2014-01-19 06:10:41.710299,609,764,811, 81,2014-01-19 07:43:14.303313,173,708,184, 90,2014-01-15 05:06:29.38795,339,89,687, 90,2014-01-13 06:28:58.122011,763,850,601, 9,2014-01-16 12:43:47.72182,315,19,79, 80,2014-01-15 17:16:11.027027,419,865,317, 80,2014-01-12 18:56:56.340422,845,294,652, 76,2014-01-14 15:48:26.591359,181,329,710, 23,2014-01-13 06:58:50.267206,246,140,621, 9,2014-01-19 18:04:37.644806,476,47,257, 81,2014-01-10 23:57:25.142508,190,156,968, 76,2014-01-16 10:33:30.343179,200,242,201, 9,2014-01-11 11:44:26.856352,961,485,845, 80,2014-01-15 13:31:04.087951,908,577,164, 30,2014-01-11 12:06:28.899154,472,9,667, 9,2014-01-15 14:10:22.973014,467,979,347, 90,2014-01-13 05:06:21.357843,21,99,948, 81,2014-01-12 06:05:01.94334,432,469,893, 76,2014-01-17 23:18:14.785003,858,905,265, 23,2014-01-19 12:42:36.865558,162,868,213, 23,2014-01-12 21:53:20.257567,971,57,959, 81,2014-01-13 21:37:15.666385,820,22,770, 81,2014-01-19 06:26:03.115732,534,715,646, 30,2014-01-12 15:16:01.571741,223,936,27, 80,2014-01-18 12:02:51.180285,879,933,425, 30,2014-01-20 06:51:45.374513,880,768,923, 90,2014-01-11 16:45:33.048258,232,882,189, 9,2014-01-17 07:10:33.190859,99,832,150, 81,2014-01-19 23:02:41.269641,64,630,24, 30,2014-01-18 06:48:57.420626,926,485,177, 9,2014-01-13 08:34:12.749661,653,880,807, 30,2014-01-17 23:13:22.979941,162,743,757, 76,2014-01-16 08:32:11.723939,947,534,843, 81,2014-01-17 17:40:42.137612,696,874,591, 9,2014-01-20 09:42:21.102371,753,245,553, 80,2014-01-11 15:15:59.445758,856,208,882, 76,2014-01-12 04:56:53.165058,20,45,785, 80,2014-01-11 22:39:17.442792,671,734,231, 23,2014-01-15 11:04:26.037536,214,900,559, 76,2014-01-14 15:55:43.432264,37,287,818, 81,2014-01-20 20:43:55.087598,61,392,608, 30,2014-01-15 15:01:51.180045,46,568,97, 90,2014-01-20 06:29:13.775863,532,614,543, 23,2014-01-11 02:28:21.94437,848,42,538, 81,2014-01-20 08:32:55.267996,767,381,40, 76,2014-01-20 22:35:42.675018,565,102,786, 23,2014-01-15 20:38:44.917383,513,11,917, 80,2014-01-12 07:39:16.626257,198,572,781, 30,2014-01-11 22:40:44.618357,240,100,393, 90,2014-01-12 13:56:59.702386,857,710,376, 90,2014-01-21 00:21:53.491816,433,971,794, 90,2014-01-14 00:34:04.875923,729,769,370, 90,2014-01-15 15:34:15.369191,452,577,818, 23,2014-01-19 00:47:56.607967,954,940,791, 23,2014-01-15 19:50:06.447664,17,921,728, 76,2014-01-12 21:37:06.549477,284,25,318, 81,2014-01-18 01:39:41.98248,780,79,397, 9,2014-01-16 16:35:39.495922,980,742,689, 90,2014-01-19 08:47:39.740335,405,748,169, 80,2014-01-16 18:42:23.252121,86,994,302, 9,2014-01-13 17:24:36.916548,687,909,585, 9,2014-01-11 11:21:52.490417,476,298,109, 9,2014-01-13 11:55:46.23248,577,227,667, 30,2014-01-19 05:56:48.640486,776,109,966, 81,2014-01-18 09:02:34.628029,542,10,845, 23,2014-01-12 15:38:07.334851,463,926,408, 80,2014-01-20 01:12:48.086664,839,683,854, 80,2014-01-19 17:59:27.793086,61,574,80, 76,2014-01-13 18:17:24.777643,744,902,498, 80,2014-01-14 06:17:14.1242,199,417,870, 80,2014-01-13 03:55:11.22577,109,908,562, 30,2014-01-13 09:01:19.865241,988,553,673, 76,2014-01-19 01:19:05.304245,592,96,492, 23,2014-01-12 04:24:25.001632,367,252,27, 81,2014-01-13 15:29:41.80961,147,776,600, 9,2014-01-18 03:52:00.572241,920,277,648, 30,2014-01-11 21:00:07.67665,767,869,491, 9,2014-01-18 16:08:26.726994,419,451,294, 80,2014-01-19 15:31:17.198498,848,481,751, 80,2014-01-12 23:40:52.295007,596,380,100, 90,2014-01-20 10:05:26.429799,771,354,261, 90,2014-01-19 09:53:10.690733,49,986,145, 23,2014-01-16 04:14:57.171349,104,76,367, 81,2014-01-14 19:39:41.79899,323,990,876, 9,2014-01-17 04:41:07.298701,268,80,496, 9,2014-01-21 04:05:03.619013,469,885,938, 30,2014-01-16 21:16:48.348467,465,103,391, 76,2014-01-14 00:20:49.28118,88,902,690, 76,2014-01-16 14:40:43.115354,495,124,515, 81,2014-01-15 00:04:28.089222,341,712,395, 30,2014-01-19 23:03:12.533301,756,508,174, 23,2014-01-19 12:05:20.031902,837,490,901, 76,2014-01-15 15:26:20.579639,173,716,691, 81,2014-01-12 04:58:58.765781,779,142,495, 90,2014-01-17 12:02:08.672808,83,661,809, 90,2014-01-12 18:28:55.207668,288,706,856, 76,2014-01-14 00:37:06.100632,737,164,151, 90,2014-01-16 07:14:56.759473,957,113,562, 80,2014-01-11 06:28:23.001173,255,124,619, 80,2014-01-16 22:54:30.878275,307,373,704, 81,2014-01-19 17:32:10.883673,982,191,995, 90,2014-01-13 14:23:34.226944,126,712,420, 76,2014-01-19 11:55:50.743935,19,641,543, 30,2014-01-17 12:51:16.187918,617,250,645, 23,2014-01-14 22:47:59.228576,277,757,998, 76,2014-01-11 21:25:32.553546,597,146,359, 23,2014-01-14 10:43:16.760159,177,386,303, 30,2014-01-15 23:48:06.905226,395,765,383, 90,2014-01-19 17:33:59.280959,53,972,838, 81,2014-01-12 20:14:33.959076,325,842,731, 23,2014-01-18 03:28:59.200232,57,361,763, 90,2014-01-18 21:39:25.710758,127,213,628, 30,2014-01-11 00:07:44.64981,804,143,37, 80,2014-01-13 01:43:56.371581,957,939,820, 81,2014-01-12 11:19:07.509749,107,756,684, 30,2014-01-17 08:48:51.94851,731,38,502, 9,2014-01-12 23:48:59.991014,217,180,171, 81,2014-01-18 12:35:55.858216,348,684,732, 23,2014-01-20 13:09:41.229692,275,971,588, 81,2014-01-18 18:29:43.106368,993,703,383, 90,2014-01-12 06:40:23.947437,709,282,685, 90,2014-01-19 06:12:53.762993,176,799,47, 23,2014-01-17 00:35:03.138271,902,136,307, 23,2014-01-17 02:06:44.527076,400,716,485, 90,2014-01-20 15:11:52.529194,921,294,272, 76,2014-01-13 06:37:11.811079,390,709,994, 76,2014-01-19 00:35:39.735161,840,843,44, 90,2014-01-13 09:48:58.629826,89,180,267, 81,2014-01-18 17:52:08.570551,169,19,89, 23,2014-01-19 11:04:02.736335,177,261,912, 76,2014-01-19 12:43:29.508521,701,479,799, 30,2014-01-17 05:24:19.454224,353,724,582, 30,2014-01-11 19:27:36.963278,491,782,122, 81,2014-01-17 18:39:20.252456,310,775,121, 80,2014-01-13 12:15:35.642143,257,672,667, 81,2014-01-15 22:15:36.191854,97,35,11, 23,2014-01-18 20:04:52.806002,675,755,263, 23,2014-01-17 02:58:52.40272,529,383,514, 76,2014-01-21 02:03:43.09708,370,243,19, 30,2014-01-17 07:38:52.086961,175,289,809, 23,2014-01-19 03:13:26.361797,947,688,913, 23,2014-01-17 23:32:42.297313,479,817,780, 81,2014-01-14 23:18:17.79772,159,767,867, 90,2014-01-19 07:21:11.011606,207,441,671, 23,2014-01-20 05:16:38.669314,66,397,314, 9,2014-01-16 14:37:25.307469,954,153,482, 30,2014-01-15 10:10:02.960117,215,106,126, 90,2014-01-11 23:05:38.660327,473,585,449, 9,2014-01-13 21:13:21.166103,366,736,580, 81,2014-01-14 17:19:44.189808,664,851,122, 80,2014-01-19 21:35:21.766695,251,995,916, 81,2014-01-15 07:53:45.113541,630,42,330, 23,2014-01-12 17:32:37.952801,817,363,667, 23,2014-01-15 16:10:24.904966,336,407,690, 23,2014-01-11 04:00:29.641034,716,685,647, 9,2014-01-12 02:44:30.481995,804,736,805, 23,2014-01-18 02:47:36.716464,186,22,658, 30,2014-01-19 08:36:09.376197,950,590,573, 76,2014-01-14 16:33:29.11224,100,947,989, 9,2014-01-15 14:39:45.287016,534,956,625, 76,2014-01-17 13:40:12.112532,292,147,986, 90,2014-01-12 23:16:58.62076,995,657,106, 23,2014-01-11 14:04:04.741658,604,675,593, 76,2014-01-18 13:07:49.07581,928,384,39, 9,2014-01-19 21:56:18.873217,666,958,183, 23,2014-01-14 06:19:40.383801,746,932,436, 90,2014-01-13 05:23:25.267665,394,619,564, 30,2014-01-17 12:01:11.67922,728,944,331, 9,2014-01-20 13:18:32.786521,959,795,642, 30,2014-01-13 01:27:08.364745,304,208,677, 90,2014-01-13 13:40:03.7666,470,696,449, 81,2014-01-18 10:31:59.148318,393,59,79, 9,2014-01-20 04:59:50.662057,793,90,360, 30,2014-01-17 16:58:21.564321,13,971,429, 9,2014-01-16 11:53:10.159924,178,408,10, 9,2014-01-19 04:16:29.33137,874,227,182, 90,2014-01-13 01:35:46.872208,600,923,724, 23,2014-01-21 02:03:13.120042,742,768,195, 9,2014-01-20 07:22:07.991697,38,914,115, 81,2014-01-16 02:49:08.038311,957,713,751, 76,2014-01-14 13:22:57.30985,277,662,201, 30,2014-01-18 22:57:29.758393,545,247,359, 81,2014-01-20 14:42:53.151851,481,833,192, 76,2014-01-16 10:55:35.26307,54,584,569, 23,2014-01-13 09:07:54.663777,205,126,900, 76,2014-01-20 22:43:22.792886,966,118,973, 90,2014-01-17 17:40:05.745065,572,750,24, 80,2014-01-20 15:55:31.380241,124,681,453, 81,2014-01-19 01:19:32.169083,411,76,972, 23,2014-01-11 04:13:34.857305,994,124,358, 30,2014-01-15 00:35:16.667256,1000,997,272, 9,2014-01-15 08:59:44.281615,296,361,774, 23,2014-01-13 07:30:33.478066,161,426,978, 80,2014-01-15 18:39:21.408915,601,660,123, 30,2014-01-12 16:07:33.357425,864,983,177, 9,2014-01-11 23:26:52.351282,480,254,56, 81,2014-01-19 04:59:01.793134,617,214,989, 81,2014-01-15 01:30:58.62509,69,708,77, 80,2014-01-18 15:28:04.030922,669,603,70, 90,2014-01-18 12:17:34.579655,113,308,557, 30,2014-01-17 06:58:06.989833,895,247,576, 90,2014-01-10 23:08:07.797522,562,504,796, 23,2014-01-15 16:49:33.728392,173,137,577, 76,2014-01-16 05:57:57.652309,770,938,168, 81,2014-01-17 20:06:29.361843,554,952,391, 81,2014-01-10 22:42:43.888317,816,983,574, 30,2014-01-14 04:14:26.98368,582,661,18, 9,2014-01-20 01:42:16.234051,500,244,516, 23,2014-01-21 04:45:57.008777,19,474,498, 9,2014-01-13 05:36:34.975796,75,706,352, 80,2014-01-14 22:31:24.272361,554,275,85, 80,2014-01-14 12:08:54.318627,780,667,892, 90,2014-01-10 22:34:04.734189,647,56,54, 30,2014-01-14 07:14:17.424632,351,188,890, 76,2014-01-20 03:04:29.581697,223,257,3, 90,2014-01-13 11:41:59.397966,64,99,15, 76,2014-01-13 23:57:40.217519,149,625,885, 81,2014-01-16 14:44:35.32718,268,386,180, 90,2014-01-12 21:37:30.778206,11,458,377, 90,2014-01-11 19:17:12.387021,479,651,327, 23,2014-01-16 22:58:10.184485,581,612,90, 90,2014-01-17 02:12:47.445881,73,427,109, 30,2014-01-16 08:16:56.668635,203,278,660, 76,2014-01-19 10:28:43.66297,4,641,422, 23,2014-01-11 14:52:08.854795,280,92,8, 9,2014-01-18 04:24:30.026479,715,975,20, 30,2014-01-20 13:55:36.014253,935,754,148, 23,2014-01-19 23:51:10.647929,511,431,793, 23,2014-01-11 23:55:28.651569,233,811,24, 90,2014-01-17 23:23:40.045174,567,895,631, 23,2014-01-17 06:08:45.228003,566,571,923, 80,2014-01-18 10:53:35.641822,380,646,275, 80,2014-01-18 02:31:47.842696,938,472,437, 9,2014-01-11 16:58:18.956396,658,456,349, 30,2014-01-13 10:51:33.294131,292,736,314, 30,2014-01-14 16:38:17.204957,325,857,33, 30,2014-01-11 19:41:02.844711,715,255,767, 30,2014-01-16 19:06:00.277811,653,347,244, 81,2014-01-13 12:20:33.439008,628,128,913, 90,2014-01-11 18:26:59.853489,981,675,972, 76,2014-01-19 04:42:35.253608,262,323,834, 90,2014-01-17 14:51:57.71179,599,360,842, 9,2014-01-15 10:35:54.172534,681,182,869, 76,2014-01-19 07:16:39.988216,96,516,725, 81,2014-01-21 02:06:15.136422,81,110,520, 76,2014-01-14 07:40:23.754231,977,696,92, 23,2014-01-11 12:58:39.386181,731,106,366, 23,2014-01-13 20:03:55.353941,525,875,926, 90,2014-01-20 02:24:59.081411,539,72,962, 90,2014-01-13 14:36:10.164806,334,984,596, 23,2014-01-14 19:21:07.740961,760,229,216, 90,2014-01-15 19:23:09.266317,400,898,336, 81,2014-01-19 20:48:57.610687,914,894,708, 81,2014-01-20 07:38:04.410016,811,734,387, 76,2014-01-13 23:51:52.929286,795,705,300, 23,2014-01-20 15:41:06.465482,940,470,817, 30,2014-01-17 06:02:34.436496,27,713,124, 9,2014-01-13 07:47:28.94354,934,690,16, 9,2014-01-19 09:32:17.113831,940,113,724, 23,2014-01-18 09:58:03.088065,840,697,708, 80,2014-01-20 11:11:08.989132,12,310,859, 9,2014-01-15 09:41:02.341834,254,948,62, 30,2014-01-15 14:51:38.729886,379,665,389, 76,2014-01-17 07:42:56.831829,838,964,390, 30,2014-01-16 06:39:21.298229,563,127,21, 76,2014-01-18 05:43:12.024019,493,691,188, 76,2014-01-21 04:21:14.036786,610,594,431, 81,2014-01-17 06:20:24.142941,636,214,348, 90,2014-01-13 18:49:12.302249,433,498,305, 9,2014-01-13 10:41:47.476214,516,497,216, 23,2014-01-18 04:47:23.996849,711,432,973, 80,2014-01-11 17:31:47.555857,777,480,325, 90,2014-01-20 05:33:45.188003,861,453,453, 23,2014-01-12 09:23:18.169382,216,731,78, 9,2014-01-20 04:48:27.544073,340,454,202, 30,2014-01-20 01:40:00.324424,168,704,187, 80,2014-01-15 21:03:41.923614,651,276,420, 81,2014-01-20 21:47:06.930673,645,700,805, 90,2014-01-12 15:43:55.678365,816,763,98, 23,2014-01-14 17:28:41.005444,307,942,36, 30,2014-01-13 06:23:17.09548,861,893,293, 80,2014-01-16 15:05:03.419745,775,820,18, 76,2014-01-19 16:51:50.27176,215,594,190, 81,2014-01-11 21:12:14.706167,512,596,421, 76,2014-01-15 16:43:07.82976,886,112,414, 76,2014-01-12 10:43:43.201047,313,704,501, 9,2014-01-11 06:53:21.17165,279,776,430, 81,2014-01-11 16:45:42.266256,618,442,472, 9,2014-01-14 22:31:12.144586,445,840,899, 76,2014-01-19 20:25:38.28548,446,118,361, 23,2014-01-19 06:43:45.354321,375,954,204, 30,2014-01-14 03:42:21.133719,290,666,53, 23,2014-01-14 00:06:40.627315,747,237,329, 80,2014-01-13 15:35:24.084626,464,50,638, 30,2014-01-20 15:25:17.965967,343,651,133, 23,2014-01-19 10:46:01.925544,44,544,106, 90,2014-01-21 01:18:36.108645,695,179,753, 80,2014-01-20 13:46:32.002753,420,119,154, 81,2014-01-15 11:06:26.068904,854,839,707, 76,2014-01-13 14:07:48.410894,269,164,805, 23,2014-01-12 18:28:19.478967,581,607,783, 90,2014-01-12 09:53:50.065753,523,994,912, 9,2014-01-14 11:39:35.96717,639,166,490, 76,2014-01-11 18:02:04.66697,512,725,240, 30,2014-01-13 23:17:08.235135,635,566,776, 76,2014-01-13 10:28:03.511244,828,715,437, 81,2014-01-20 23:42:04.991394,373,12,355, 76,2014-01-19 00:20:50.158749,781,111,203, 81,2014-01-13 02:15:10.441917,879,28,784, 30,2014-01-12 09:26:00.670178,548,908,396, 23,2014-01-12 11:49:31.164194,973,996,333, 90,2014-01-15 12:38:27.537397,406,420,840, 90,2014-01-18 04:31:04.089923,865,147,857, 80,2014-01-10 22:41:21.435954,827,86,468, 23,2014-01-16 13:50:42.243564,274,144,907, 23,2014-01-12 15:14:11.919683,827,636,573, 81,2014-01-12 13:25:04.637001,897,736,553, 90,2014-01-17 00:44:03.415632,610,147,426, 76,2014-01-13 11:59:54.18594,874,504,666, 80,2014-01-16 15:56:16.782006,638,993,717, 81,2014-01-15 15:09:41.701113,392,464,562, 9,2014-01-11 12:43:39.54026,579,861,120, 80,2014-01-19 23:38:37.915725,348,249,887, 80,2014-01-18 19:16:22.328847,962,501,45, 9,2014-01-14 08:19:03.624887,738,826,896, 23,2014-01-19 09:03:55.881692,657,971,482, 30,2014-01-17 00:02:24.254391,616,981,680, 23,2014-01-14 03:37:39.733951,354,406,932, 76,2014-01-18 16:50:27.884864,7,384,698, 90,2014-01-11 05:08:50.323295,851,559,414, 81,2014-01-16 21:45:28.144845,292,559,175, 76,2014-01-20 15:18:47.363831,595,896,621, 76,2014-01-12 19:02:40.389048,216,22,892, 76,2014-01-20 13:25:04.112016,466,506,883, 30,2014-01-11 03:20:52.03122,185,469,284, 80,2014-01-15 22:19:48.624183,871,978,443, 30,2014-01-12 17:53:07.623678,953,594,481, 81,2014-01-10 21:02:57.022614,138,461,945, 23,2014-01-13 16:40:38.783352,689,809,185, 30,2014-01-15 00:08:18.065595,327,460,534, 30,2014-01-12 10:28:57.692791,921,179,483, 90,2014-01-15 08:30:09.947546,50,220,830, 9,2014-01-19 16:46:45.60341,717,8,537, 23,2014-01-19 19:00:01.782714,248,997,123, 30,2014-01-15 11:11:31.383919,216,953,127, 76,2014-01-15 00:37:27.846974,122,626,55, 9,2014-01-11 04:14:13.702397,136,894,271, 23,2014-01-17 04:36:36.020919,656,928,846, 90,2014-01-21 05:21:31.262606,711,916,905, 23,2014-01-13 20:14:07.888336,555,571,761, 80,2014-01-12 14:32:52.802925,311,559,481, 23,2014-01-15 14:31:12.964139,515,547,637, 90,2014-01-14 12:57:47.429017,542,429,451, 23,2014-01-11 08:11:30.719069,231,388,893, 76,2014-01-13 03:47:35.292985,159,127,223, 80,2014-01-18 01:16:51.053904,387,496,56, 30,2014-01-19 21:15:26.600761,683,245,693, 9,2014-01-19 07:49:59.547376,611,585,386, 80,2014-01-10 22:54:30.787855,147,548,249, 30,2014-01-17 08:05:54.485625,573,206,21, 80,2014-01-19 16:58:49.871091,919,947,790, 30,2014-01-17 00:39:58.933119,244,816,677, 81,2014-01-16 17:24:41.849875,945,461,235, 90,2014-01-11 06:01:30.260139,769,416,313, 80,2014-01-16 08:05:03.045134,166,440,319, 80,2014-01-17 00:45:33.881095,529,9,232, 80,2014-01-16 08:21:18.884742,663,334,307, 90,2014-01-18 05:58:10.668812,868,86,333, 9,2014-01-17 01:48:30.903707,525,542,912, 90,2014-01-19 05:01:57.668094,526,936,992, 30,2014-01-12 00:06:28.734826,36,316,473, 23,2014-01-18 16:17:28.596918,989,733,709, 76,2014-01-13 07:32:07.61564,284,480,625, 76,2014-01-20 20:53:14.338236,409,198,833, 23,2014-01-17 05:17:30.379631,840,337,594, 9,2014-01-17 22:43:38.999557,102,285,306, 90,2014-01-14 15:30:42.18521,879,381,693, 9,2014-01-17 13:31:44.082447,888,106,778, 90,2014-01-13 21:20:15.020896,792,726,83, 90,2014-01-14 14:52:13.448235,429,782,989, 80,2014-01-20 13:45:51.970783,411,887,988, 23,2014-01-15 15:53:07.823821,902,69,828, 80,2014-01-19 09:23:26.412374,592,508,335, 23,2014-01-13 20:43:39.3998,712,867,191, 30,2014-01-16 04:04:38.54289,425,540,660, 23,2014-01-11 07:11:01.705359,286,903,965, 23,2014-01-21 02:00:30.454123,833,639,866, 9,2014-01-14 19:20:05.14407,89,753,531, 30,2014-01-19 19:01:01.252735,844,651,522, 9,2014-01-21 04:55:01.241978,882,560,780, 80,2014-01-10 21:25:59.629695,391,870,452, 80,2014-01-18 05:59:51.123826,782,237,426, 23,2014-01-16 23:35:00.175097,602,854,345, 76,2014-01-16 18:50:41.47957,151,257,805, 90,2014-01-18 16:01:21.384384,865,496,591, 90,2014-01-12 01:40:03.22065,669,497,440, 90,2014-01-12 13:36:15.360665,255,385,600, 90,2014-01-13 18:22:40.269126,833,391,686, 30,2014-01-19 11:38:13.889462,287,733,196, 80,2014-01-18 19:24:46.264791,728,257,785, 30,2014-01-11 17:24:37.93722,14,151,775, 81,2014-01-20 15:44:42.624288,65,522,402, 76,2014-01-16 05:42:14.861708,497,138,771, 9,2014-01-14 04:56:45.552858,887,226,842, 23,2014-01-20 06:37:56.962524,241,531,964, 80,2014-01-12 04:59:45.24134,972,307,231, 81,2014-01-10 21:40:24.552835,859,490,845, 90,2014-01-13 16:08:39.148153,471,239,727, 23,2014-01-18 22:31:29.323786,191,21,439, 90,2014-01-13 23:00:39.573731,357,455,83, 9,2014-01-17 11:00:52.596389,732,378,377, 81,2014-01-18 06:17:21.294989,763,28,577, 23,2014-01-18 18:53:47.397552,456,103,203, 81,2014-01-15 14:24:19.008763,399,148,669, 9,2014-01-10 21:01:00.69479,277,753,427, 23,2014-01-13 16:58:25.94086,347,973,915, 80,2014-01-16 01:35:20.714123,234,888,935, 76,2014-01-21 03:01:31.148912,994,160,838, 81,2014-01-17 16:18:31.084931,826,151,627, 23,2014-01-14 14:36:21.966858,474,313,103, 23,2014-01-21 01:56:32.391309,440,628,554, 90,2014-01-17 17:44:30.714626,192,555,922, 80,2014-01-11 14:36:13.091103,437,336,734, 76,2014-01-16 19:31:32.566406,43,556,989, 76,2014-01-13 06:35:12.194616,778,127,231, 90,2014-01-19 10:37:34.475487,991,706,660, 76,2014-01-18 01:11:35.787056,689,162,385, 80,2014-01-15 00:11:27.555279,633,569,750, 30,2014-01-11 23:00:14.744613,124,345,995, 9,2014-01-16 06:49:49.676937,551,869,52, 90,2014-01-12 13:36:13.820071,522,300,953, 9,2014-01-12 20:24:52.681832,633,810,459, 9,2014-01-15 16:34:32.301225,382,291,802, 80,2014-01-17 23:18:28.681779,620,631,455, 80,2014-01-16 05:21:38.234691,276,905,993, 30,2014-01-14 17:12:29.264169,326,576,97, 81,2014-01-19 08:18:13.923537,880,191,270, 23,2014-01-16 07:02:02.787525,718,978,510, 80,2014-01-17 13:21:08.412322,518,292,1, 9,2014-01-17 00:49:43.247324,106,538,358, 23,2014-01-19 10:02:42.361256,638,992,190, 80,2014-01-13 18:22:01.00871,163,861,15, 80,2014-01-14 01:07:04.542313,95,620,847, 30,2014-01-16 22:56:29.759227,357,7,811, 81,2014-01-18 12:46:20.017473,304,611,569, 30,2014-01-14 02:08:05.237521,236,1000,563, 80,2014-01-19 19:54:55.700089,983,978,893, 80,2014-01-13 08:21:40.731596,314,946,327, 9,2014-01-13 23:09:36.386433,33,94,365, 9,2014-01-16 06:13:26.785019,664,718,106, 30,2014-01-17 02:58:02.698453,382,546,381, 9,2014-01-13 19:06:08.777743,767,412,63, 23,2014-01-12 17:57:57.499645,147,406,396, 30,2014-01-17 21:34:15.789556,980,771,70, 9,2014-01-19 18:37:41.344568,557,853,275, 76,2014-01-15 04:33:09.69426,155,682,737, 90,2014-01-16 02:11:50.265044,647,7,103, 80,2014-01-16 13:49:17.131624,740,75,397, 80,2014-01-19 08:44:37.249959,478,598,477, 30,2014-01-17 05:12:05.009656,768,483,166, 81,2014-01-11 14:39:06.808561,686,206,231, 9,2014-01-21 02:20:51.070029,960,913,906, 9,2014-01-19 05:36:57.691489,694,856,137, 81,2014-01-16 11:13:39.110205,42,512,256, 23,2014-01-17 19:39:19.752228,329,132,399, 81,2014-01-14 04:58:35.926598,32,404,800, 80,2014-01-20 08:26:08.374374,572,52,265, 81,2014-01-15 21:57:33.675766,90,109,244, 90,2014-01-19 16:00:38.714123,908,401,954, 80,2014-01-16 15:47:16.786695,980,420,403, 81,2014-01-11 16:47:16.923089,53,570,836, 30,2014-01-17 20:03:21.075799,145,474,93, 80,2014-01-19 14:09:17.795405,27,195,724, 30,2014-01-14 21:54:21.465821,57,804,273, 76,2014-01-13 12:59:50.835026,651,33,932, 90,2014-01-16 20:55:37.812878,104,900,553, 9,2014-01-18 04:02:26.703342,20,741,503, 81,2014-01-12 02:54:46.535114,323,962,456, 9,2014-01-19 09:17:18.544474,100,319,489, 76,2014-01-10 21:12:03.089775,686,168,182, 90,2014-01-17 13:08:13.320133,883,777,885, 30,2014-01-15 06:15:21.242928,387,458,391, 81,2014-01-13 20:18:11.867936,22,135,699, 80,2014-01-19 11:06:10.819778,24,637,285, 80,2014-01-11 21:49:37.032903,517,410,436, 30,2014-01-12 08:55:53.212503,913,167,916, 23,2014-01-13 09:39:20.514458,112,533,329, 81,2014-01-17 04:01:27.297947,921,296,358, 23,2014-01-18 02:45:10.344127,784,310,1000, 80,2014-01-11 12:23:57.764417,45,964,197, 23,2014-01-13 03:13:32.308023,175,460,756, 80,2014-01-18 21:24:17.153107,308,995,492, 81,2014-01-11 08:44:48.834447,44,361,413, 9,2014-01-11 02:50:29.999511,707,719,906, 30,2014-01-14 02:37:56.263312,434,330,135, 23,2014-01-18 08:24:08.586674,63,981,116, 76,2014-01-14 11:49:05.926108,748,586,491, 9,2014-01-13 05:04:04.637686,432,595,93, 80,2014-01-13 00:21:42.26244,627,411,885, 81,2014-01-12 21:49:44.640651,553,694,394, 90,2014-01-19 00:51:21.424381,70,908,886, 76,2014-01-13 21:08:59.185948,225,827,398, 9,2014-01-19 21:53:05.716449,523,670,503, 80,2014-01-17 09:00:39.219786,605,168,425, 30,2014-01-17 23:03:20.651768,133,868,325, 80,2014-01-12 04:52:56.551475,1,425,506, 9,2014-01-12 23:56:17.032666,668,486,276, 9,2014-01-14 21:05:47.355109,368,779,121, 76,2014-01-13 11:47:43.08659,96,387,35, 90,2014-01-11 03:13:35.57714,305,577,419, 9,2014-01-14 22:17:50.444884,973,197,514, 9,2014-01-20 04:55:56.406723,468,309,316, 90,2014-01-15 13:28:56.820067,645,933,225, 76,2014-01-17 22:36:02.312821,660,193,198, 23,2014-01-18 10:02:07.226501,499,465,596, 9,2014-01-16 15:18:33.852971,246,988,16, 30,2014-01-19 11:31:55.525324,419,264,419, 90,2014-01-20 23:41:27.740959,40,393,280, 23,2014-01-12 13:20:01.151337,697,490,956, 30,2014-01-16 08:17:05.86987,508,767,332, 23,2014-01-11 06:05:25.505376,432,232,188, 81,2014-01-14 20:33:33.459359,21,665,544, 90,2014-01-13 23:41:23.022977,237,953,134, 23,2014-01-11 18:50:14.340242,832,999,375, 90,2014-01-15 03:24:03.458871,85,632,287, 23,2014-01-17 06:19:19.286289,430,224,313, 80,2014-01-19 07:14:22.926916,323,578,955, 9,2014-01-18 19:13:09.385399,164,258,610, 81,2014-01-19 15:23:23.923975,255,911,966, 76,2014-01-11 01:36:05.189775,525,808,342, 90,2014-01-20 21:02:54.026049,768,444,764, 76,2014-01-17 10:14:45.348356,35,202,859, 23,2014-01-14 02:45:04.375722,528,386,615, 23,2014-01-19 12:55:59.742499,842,216,724, 30,2014-01-13 13:15:24.568143,151,127,369, 90,2014-01-21 05:48:25.027491,620,647,441, 80,2014-01-20 21:48:56.293974,582,743,494, 23,2014-01-15 17:11:41.600808,737,80,785, 23,2014-01-14 20:54:12.3826,523,172,15, 30,2014-01-13 03:36:39.380564,960,457,927, 90,2014-01-16 00:25:17.177947,785,910,238, 30,2014-01-18 23:12:02.827484,517,967,896, 9,2014-01-12 02:32:35.787286,932,294,506, 81,2014-01-20 17:54:13.998015,149,618,959, 9,2014-01-15 15:48:05.140725,617,598,772, 30,2014-01-19 16:34:43.013787,110,152,910, 30,2014-01-16 03:12:47.851404,862,359,398, 30,2014-01-13 21:20:00.666048,908,351,254, 9,2014-01-19 10:16:10.755165,602,629,55, 76,2014-01-17 20:32:49.002742,481,802,632, 80,2014-01-19 09:37:06.535918,890,738,616, 30,2014-01-19 20:21:36.260541,792,978,453, 23,2014-01-11 11:06:22.462101,309,847,470, 80,2014-01-12 03:18:29.558895,770,334,577, 80,2014-01-20 19:11:50.600782,4,150,434, 23,2014-01-15 18:30:25.920972,623,93,726, 9,2014-01-18 13:37:48.845183,137,214,751, 90,2014-01-18 20:26:13.528118,147,801,954, 9,2014-01-13 07:43:35.306371,909,692,70, 90,2014-01-16 23:01:12.769158,372,916,356, 90,2014-01-19 02:02:18.717893,638,543,934, 23,2014-01-12 22:46:29.33242,678,810,481, 90,2014-01-13 03:15:58.117515,188,543,379, 80,2014-01-11 22:47:23.093615,637,91,648, 81,2014-01-11 05:42:29.074919,429,894,894, 90,2014-01-15 20:31:22.685657,63,622,721, 80,2014-01-11 22:35:48.121105,381,841,424, 30,2014-01-10 21:31:25.368893,451,271,104, 81,2014-01-20 17:43:04.286466,585,880,39, 90,2014-01-15 23:30:00.503705,891,337,547, 80,2014-01-13 05:08:04.749456,556,830,286, 76,2014-01-15 12:08:21.464413,806,984,718, 76,2014-01-13 16:42:03.331191,386,717,739, 81,2014-01-14 11:40:40.536743,185,394,572, 90,2014-01-15 00:02:35.462846,806,65,239, 90,2014-01-18 12:30:08.471915,678,741,839, 80,2014-01-12 22:15:23.550949,373,146,19, 81,2014-01-20 07:15:23.31425,193,705,402, 80,2014-01-11 03:50:09.138382,546,178,719, 23,2014-01-11 02:31:34.306114,390,939,113, 90,2014-01-16 21:48:12.316992,863,672,274, 76,2014-01-19 17:27:15.6743,955,60,209, 80,2014-01-20 02:53:10.566654,97,12,539, 76,2014-01-17 12:54:34.779093,536,360,134, 76,2014-01-21 00:45:45.233615,110,472,8, 9,2014-01-19 16:05:01.167856,133,298,853, 80,2014-01-12 01:25:00.700065,759,912,828, 80,2014-01-18 08:23:34.078798,935,159,820, 76,2014-01-17 06:31:14.695974,184,714,918, 23,2014-01-14 13:08:36.006854,369,96,497, 81,2014-01-14 01:24:46.848376,231,85,771, 81,2014-01-15 02:33:33.413867,887,575,292, 9,2014-01-16 15:55:05.339274,606,336,363, 23,2014-01-16 08:40:44.965891,58,937,301, 76,2014-01-16 05:20:56.507481,792,324,921, 9,2014-01-17 01:37:34.41461,567,540,222, 9,2014-01-10 23:12:07.651968,569,717,440, 90,2014-01-17 07:56:44.628586,353,759,433, 90,2014-01-17 03:08:59.783503,425,898,420, 30,2014-01-20 20:55:11.938433,369,568,383, 76,2014-01-12 01:26:45.132292,575,272,622, 9,2014-01-19 12:17:04.533379,43,957,835, 80,2014-01-15 03:03:33.403265,100,963,17, 80,2014-01-14 22:08:48.463481,823,503,426, 81,2014-01-12 17:57:45.070121,395,838,583, 23,2014-01-19 07:06:08.866111,196,645,179, 80,2014-01-12 04:38:56.935816,166,345,893, 81,2014-01-14 20:13:08.62107,903,732,605, 23,2014-01-18 08:21:32.180361,6,998,380, 80,2014-01-12 12:29:06.074198,770,462,468, 76,2014-01-15 02:44:42.927183,390,568,308, 90,2014-01-14 00:09:44.497353,352,200,291, 23,2014-01-10 23:56:21.748498,585,555,274, 81,2014-01-13 23:37:53.494256,281,259,542, 76,2014-01-20 17:04:19.276445,385,978,709, 23,2014-01-21 04:42:06.982113,631,618,595, 30,2014-01-12 09:42:54.662113,716,185,914, 80,2014-01-11 12:29:19.976509,69,619,846, 23,2014-01-18 07:05:41.06133,653,96,330, 9,2014-01-18 20:14:09.358086,131,849,461, 90,2014-01-15 05:37:55.983364,853,287,402, 80,2014-01-11 02:30:27.909706,723,782,561, 90,2014-01-12 16:47:42.771953,891,76,432, 9,2014-01-21 01:33:01.323056,463,319,853, 30,2014-01-16 15:11:12.875597,854,929,610, 30,2014-01-18 02:08:39.279433,498,607,832, 30,2014-01-16 21:10:35.737667,394,108,470, 30,2014-01-16 18:23:20.527565,710,292,429, 9,2014-01-14 04:05:23.90802,715,548,839, 76,2014-01-12 18:19:35.521171,30,310,936, 30,2014-01-16 09:18:32.466416,377,544,238, 80,2014-01-15 09:32:09.040311,355,356,467, 76,2014-01-11 00:36:40.054549,999,461,535, 30,2014-01-20 16:22:05.869681,219,255,709, 9,2014-01-19 11:40:57.503793,568,742,752, 80,2014-01-12 22:34:25.12509,138,91,55, 9,2014-01-18 17:28:14.735792,366,417,124, 81,2014-01-20 20:19:54.439608,959,9,928, 23,2014-01-16 22:47:33.746158,620,808,551, 23,2014-01-15 19:49:46.916153,395,224,777, 76,2014-01-12 02:49:00.513806,873,215,210, 90,2014-01-21 05:32:16.67376,764,123,586, 23,2014-01-18 23:59:31.413506,74,639,329, 30,2014-01-12 06:45:22.262724,297,310,583, 9,2014-01-13 23:10:10.168018,173,634,613, 9,2014-01-18 11:03:50.689951,82,861,417, 80,2014-01-12 05:27:29.244836,963,470,289, 80,2014-01-15 12:53:04.83013,771,252,891, 90,2014-01-19 03:33:10.66646,978,504,706, 9,2014-01-19 16:33:10.306167,526,471,612, 30,2014-01-13 03:07:14.188217,172,101,405, 80,2014-01-13 03:11:06.650243,113,951,515, 90,2014-01-19 23:03:38.215872,885,668,717, 81,2014-01-14 23:54:56.96017,853,177,810, 76,2014-01-12 22:44:07.973299,808,856,617, 76,2014-01-15 08:14:51.091888,940,137,987, 90,2014-01-11 20:03:36.239603,206,8,886, 90,2014-01-18 23:54:43.710966,898,329,131, 90,2014-01-10 20:38:11.619452,317,43,887, 30,2014-01-15 04:09:00.147623,202,793,610, 23,2014-01-20 22:14:19.232136,491,276,434, 23,2014-01-16 09:56:44.085868,338,991,796, 90,2014-01-19 17:41:09.187934,964,942,131, 30,2014-01-21 02:50:59.287104,933,926,700, 9,2014-01-15 20:18:49.955549,398,167,573, 76,2014-01-17 23:22:06.691728,94,68,633, 23,2014-01-12 19:25:24.412194,160,551,497, 9,2014-01-13 07:47:04.691341,524,571,656, 30,2014-01-17 13:42:01.131755,352,251,394, 22,2014-01-18 22:12:58.158352,31,148,197, 22,2014-01-18 07:36:51.607494,305,540,172, 22,2014-01-18 20:31:01.645561,753,327,762, 22,2014-01-18 21:45:14.832113,936,692,499, 22,2014-01-16 01:36:23.021,689,175,105, 72,2014-01-20 07:16:23.908285,103,897,705, 72,2014-01-11 14:55:25.00013,702,588,713, 59,2014-01-13 06:40:13.710951,424,54,267, 72,2014-01-11 06:43:53.153541,690,143,619, 59,2014-01-16 07:48:29.83026,768,120,573, 22,2014-01-11 04:13:24.37783,722,242,159, 22,2014-01-20 03:17:03.459707,113,435,746, 22,2014-01-18 14:55:44.018477,821,333,301, 59,2014-01-13 11:24:31.028074,967,396,700, 22,2014-01-18 20:20:41.675998,815,289,718, 59,2014-01-12 08:50:40.978648,864,933,200, 22,2014-01-15 14:08:39.001373,767,540,487, 59,2014-01-12 22:35:32.767885,859,808,226, 72,2014-01-13 08:54:17.218251,546,298,708, 72,2014-01-13 08:03:22.712339,592,813,68, 22,2014-01-12 23:13:44.387338,126,928,758, 22,2014-01-17 17:03:17.365874,787,816,991, 59,2014-01-13 00:17:41.944475,812,452,933, 59,2014-01-18 13:10:28.473625,947,144,424, 22,2014-01-16 04:44:26.553809,763,911,371, 22,2014-01-12 21:08:41.231579,32,126,446, 59,2014-01-13 03:29:18.429173,571,292,806, 22,2014-01-12 22:06:33.245956,403,469,995, 72,2014-01-14 20:34:05.643773,215,54,207, 72,2014-01-15 15:16:23.120933,764,705,669, 59,2014-01-19 15:48:34.377711,844,760,70, 72,2014-01-12 12:47:03.802544,77,121,872, 59,2014-01-12 16:53:14.728427,792,79,739, 59,2014-01-17 06:19:36.023273,290,928,320, 72,2014-01-20 14:32:18.634656,882,609,409, 59,2014-01-17 22:29:37.749846,749,816,318, 72,2014-01-16 07:35:59.931978,466,733,322, 59,2014-01-10 23:27:43.635206,717,91,488, 72,2014-01-20 09:09:51.460797,675,829,234, 59,2014-01-16 18:19:53.085519,605,590,790, 72,2014-01-16 11:16:13.465467,173,283,33, 72,2014-01-20 17:23:15.838627,805,748,178, 59,2014-01-15 15:36:56.545226,976,922,119, 22,2014-01-13 20:11:57.484364,63,655,808, 59,2014-01-12 22:47:46.8667,852,255,91, 72,2014-01-13 05:57:38.221224,75,112,994, 22,2014-01-15 09:02:38.46301,796,542,805, 59,2014-01-17 16:56:25.868073,999,134,235, 22,2014-01-15 08:33:10.98911,79,54,171, 59,2014-01-17 21:56:55.681681,525,427,34, 59,2014-01-20 04:59:48.580411,190,745,125, 59,2014-01-17 11:46:55.376867,964,159,390, 59,2014-01-14 09:00:13.047555,394,30,587, 59,2014-01-11 23:17:30.525305,194,423,488, 59,2014-01-14 18:57:23.850492,177,117,695, 72,2014-01-19 17:44:39.601783,742,471,786, 59,2014-01-14 00:26:11.756884,705,852,306, 22,2014-01-17 02:26:42.279665,129,989,445, 72,2014-01-11 09:51:12.84774,445,844,144, 72,2014-01-18 01:00:17.401076,643,449,442, 59,2014-01-11 11:43:05.400598,126,815,100, 59,2014-01-20 05:39:47.225451,436,515,107, 59,2014-01-19 17:47:21.20362,980,318,156, 59,2014-01-13 08:36:20.129444,313,900,345, 22,2014-01-16 05:59:23.248724,367,635,174, 59,2014-01-19 02:19:39.838276,798,21,588, 59,2014-01-20 11:05:57.87929,980,396,901, 59,2014-01-11 07:35:23.180702,778,536,843, 59,2014-01-19 05:47:23.473482,54,245,932, 72,2014-01-19 14:15:49.340086,957,778,161, 22,2014-01-17 05:55:16.266221,584,186,425, 72,2014-01-14 11:03:36.939369,666,885,712, 22,2014-01-19 01:39:05.179132,762,695,909, 72,2014-01-11 15:32:12.811866,2,742,537, 59,2014-01-17 11:15:34.423732,662,473,529, 72,2014-01-21 04:26:52.045832,769,445,291, 72,2014-01-14 01:29:51.033091,747,265,783, 59,2014-01-11 14:18:12.886743,913,162,632, 22,2014-01-17 15:23:17.914325,726,664,461, 72,2014-01-18 14:03:02.02262,170,709,480, 59,2014-01-18 16:15:08.568424,147,548,501, 59,2014-01-16 14:23:06.494736,952,561,788, 72,2014-01-14 19:49:57.399486,648,536,700, 72,2014-01-11 19:15:21.616399,963,694,446, 72,2014-01-17 17:40:37.020041,782,722,896, 22,2014-01-18 18:47:21.249978,936,266,514, 22,2014-01-20 17:00:01.218182,450,156,706, 22,2014-01-20 22:06:48.777344,706,470,977, 72,2014-01-14 15:14:03.529643,695,966,621, 22,2014-01-10 20:51:14.065922,842,833,942, 22,2014-01-17 17:07:06.17842,644,673,713, 22,2014-01-15 06:57:08.93066,469,993,403, 22,2014-01-20 06:31:01.291373,225,995,521, 59,2014-01-16 04:54:27.38204,506,841,575, 59,2014-01-17 19:33:29.060104,797,777,11, 22,2014-01-15 06:30:24.540097,885,567,927, 72,2014-01-14 01:14:07.220316,219,276,624, 59,2014-01-17 00:39:26.939395,56,331,881, 22,2014-01-15 18:05:47.721218,376,995,783, 72,2014-01-12 01:01:30.694218,741,543,87, 72,2014-01-15 08:55:16.279481,588,754,164, 72,2014-01-11 18:01:03.987439,680,312,1000, 72,2014-01-15 16:05:07.633585,472,873,723, 59,2014-01-13 04:34:21.458614,386,305,958, 22,2014-01-12 13:33:16.799305,878,433,322, 22,2014-01-11 21:20:42.057318,605,757,511, 72,2014-01-13 03:01:13.504866,228,589,633, 72,2014-01-15 19:03:07.832815,11,783,791, 22,2014-01-12 15:38:54.94406,448,797,201, 22,2014-01-19 22:24:31.41919,62,673,447, 72,2014-01-13 03:06:09.855433,183,162,544, 22,2014-01-20 11:54:03.512904,951,742,642, 22,2014-01-15 06:47:37.913926,80,861,712, 22,2014-01-17 02:56:07.25492,390,109,10, 22,2014-01-11 01:09:25.129302,207,173,47, 72,2014-01-11 18:28:14.934386,69,683,486, 22,2014-01-14 15:43:28.504899,245,490,821, 59,2014-01-20 22:09:26.347484,383,686,168, 59,2014-01-11 10:35:03.71173,510,236,26, 72,2014-01-18 10:57:32.034542,550,98,208, 72,2014-01-20 23:00:40.413406,933,988,59, 72,2014-01-18 07:42:09.89015,469,739,445, 59,2014-01-12 11:54:40.965202,979,531,915, 72,2014-01-19 23:31:41.704779,97,991,218, 22,2014-01-13 06:36:37.27219,234,143,263, 22,2014-01-19 11:28:10.025306,16,308,160, 72,2014-01-14 00:02:06.244876,941,61,671, 59,2014-01-16 11:50:44.492505,188,610,955, 22,2014-01-15 06:07:36.964701,109,996,971, 59,2014-01-18 22:07:53.966094,588,674,221, 22,2014-01-17 16:52:15.186723,912,335,1000, 59,2014-01-19 19:02:53.244601,386,307,260, 72,2014-01-19 20:08:57.953952,808,274,680, 22,2014-01-12 02:57:22.820308,273,752,277, 72,2014-01-11 17:37:14.703215,26,65,318, 22,2014-01-11 03:42:14.753256,785,137,944, 72,2014-01-13 04:18:04.877627,391,888,722, 59,2014-01-14 00:38:28.208079,922,742,86, 72,2014-01-16 02:45:22.586071,702,50,747, 59,2014-01-14 23:56:59.822106,681,874,780, 72,2014-01-12 17:02:59.62727,844,505,572, 59,2014-01-18 09:51:32.441504,756,576,139, 59,2014-01-14 05:51:03.33501,284,653,823, 72,2014-01-17 03:50:37.541615,726,453,850, 59,2014-01-14 06:47:39.696425,617,76,126, 72,2014-01-14 11:00:28.464313,795,389,255, 22,2014-01-18 02:18:52.476,447,486,632, 72,2014-01-18 02:31:08.201323,858,476,5, 59,2014-01-14 03:09:54.811798,399,550,852, 59,2014-01-18 16:53:56.187731,473,22,782, 72,2014-01-15 07:28:40.236284,46,17,300, 22,2014-01-13 20:10:35.225204,351,108,513, 72,2014-01-15 18:36:06.077881,451,311,831, 72,2014-01-16 23:23:21.201485,232,67,737, 22,2014-01-12 13:42:16.929983,442,836,274, 59,2014-01-18 05:12:43.350071,283,535,747, 59,2014-01-15 04:51:31.227211,367,701,615, 22,2014-01-15 17:44:23.174859,535,376,459, 72,2014-01-13 11:03:27.842576,417,755,178, 22,2014-01-19 14:59:08.191912,626,466,845, 59,2014-01-13 09:52:17.141373,102,388,332, 22,2014-01-20 07:55:43.029718,134,239,215, 22,2014-01-18 04:02:01.436514,545,633,234, 22,2014-01-12 00:01:15.095325,94,639,978, 22,2014-01-11 04:53:05.850027,229,455,933, 59,2014-01-19 01:39:16.140147,359,535,623, 72,2014-01-12 07:43:29.848581,263,825,345, 72,2014-01-13 13:11:10.728073,398,475,653, 72,2014-01-11 20:17:44.348227,531,315,305, 59,2014-01-17 14:28:52.435071,597,633,607, 59,2014-01-17 17:08:10.550179,713,759,69, 22,2014-01-13 17:20:43.975916,239,404,801, 72,2014-01-14 18:20:24.876576,805,512,893, 22,2014-01-21 02:59:13.88519,593,335,556, 22,2014-01-20 01:11:21.51753,138,786,818, 59,2014-01-18 05:08:04.57342,35,802,269, 59,2014-01-14 07:59:42.349502,988,744,674, 72,2014-01-16 21:30:13.99353,539,453,422, 59,2014-01-15 01:39:12.774743,955,712,590, 22,2014-01-17 15:09:37.1613,327,595,69, 59,2014-01-14 08:24:10.181261,860,91,269, 72,2014-01-19 13:07:53.011026,238,442,588, 22,2014-01-20 15:20:12.386504,774,92,523, 22,2014-01-19 07:00:16.259142,374,366,63, 72,2014-01-15 06:31:14.212931,163,712,940, 59,2014-01-11 23:02:29.316487,422,636,610, 72,2014-01-16 06:12:59.609213,402,991,430, 59,2014-01-19 15:22:45.440143,624,62,113, 59,2014-01-16 20:46:52.491347,400,780,927, 72,2014-01-18 21:16:27.451789,670,441,577, 72,2014-01-18 00:21:53.632055,423,402,468, 59,2014-01-19 10:39:09.632719,918,462,314, 59,2014-01-17 23:12:10.481507,180,776,659, 72,2014-01-14 22:23:55.068989,410,777,487, 59,2014-01-20 14:40:24.728044,786,323,655, 59,2014-01-18 08:05:16.331953,210,23,364, 59,2014-01-12 18:03:11.209135,311,325,796, 72,2014-01-11 16:23:54.577044,808,583,926, 22,2014-01-21 01:16:27.060025,36,573,621, 72,2014-01-13 18:20:55.557362,150,628,808, 59,2014-01-18 10:52:47.012116,194,572,269, 22,2014-01-17 12:24:37.610204,264,115,258, 59,2014-01-16 15:41:39.533277,765,446,185, 59,2014-01-11 23:13:11.888691,139,328,901, 72,2014-01-17 09:23:51.495394,878,573,897, 22,2014-01-15 10:53:01.050807,785,242,786, 59,2014-01-19 08:21:16.46211,840,109,963, 22,2014-01-20 21:23:33.844896,761,159,87, 72,2014-01-11 02:23:15.044338,318,152,687, 72,2014-01-13 04:00:29.237272,541,81,475, 22,2014-01-17 06:33:11.006196,290,551,939, 22,2014-01-14 14:47:25.225599,156,975,117, 59,2014-01-11 11:08:22.248298,516,511,846, 59,2014-01-16 15:53:23.3927,875,812,195, 22,2014-01-12 15:47:41.484741,557,202,829, 59,2014-01-15 21:39:36.46123,56,178,806, 59,2014-01-17 18:55:52.709186,790,1000,179, 22,2014-01-18 02:00:41.093953,19,230,877, 59,2014-01-14 07:02:21.901373,539,71,798, 22,2014-01-13 09:42:45.200533,738,773,174, 22,2014-01-15 17:17:08.546161,939,80,470, 59,2014-01-11 01:24:15.533847,110,148,885, 59,2014-01-11 14:21:54.833672,513,463,705, 22,2014-01-12 10:29:19.027668,373,294,783, 72,2014-01-15 03:48:10.602835,470,943,213, 72,2014-01-10 23:02:19.561716,679,255,930, 22,2014-01-19 22:34:35.359621,76,354,791, 72,2014-01-17 01:51:21.811971,342,291,509, 22,2014-01-11 19:26:14.138759,202,194,122, 22,2014-01-19 17:51:02.419646,81,587,197, 59,2014-01-20 00:12:17.369332,22,1,590, 59,2014-01-19 10:19:01.150875,673,290,559, 22,2014-01-16 00:15:40.029851,309,248,2, 22,2014-01-15 09:53:56.903029,330,570,979, 72,2014-01-20 13:32:13.039984,862,967,828, 72,2014-01-12 03:39:31.525245,54,975,816, 72,2014-01-20 00:46:57.953836,490,711,725, 22,2014-01-18 15:53:29.502095,670,756,471, 72,2014-01-11 19:03:05.370141,465,337,592, 59,2014-01-20 07:10:12.998593,933,773,35, 72,2014-01-20 23:53:58.739367,481,426,67, 22,2014-01-18 05:36:16.376336,444,594,767, 22,2014-01-13 15:57:38.224192,605,352,138, 72,2014-01-11 05:02:20.988084,587,130,605, 59,2014-01-13 15:29:39.769036,205,964,956, 59,2014-01-15 11:45:19.709351,386,222,695, 59,2014-01-16 06:41:57.449314,764,253,123, 22,2014-01-20 14:25:32.478223,946,712,762, 59,2014-01-12 07:46:00.803304,597,555,206, 59,2014-01-19 17:44:19.351105,727,491,396, 59,2014-01-12 18:08:17.679175,824,401,203, 72,2014-01-17 05:03:09.349466,350,66,289, 59,2014-01-19 23:08:34.884953,719,13,529, 22,2014-01-13 12:30:12.512847,689,699,17, 59,2014-01-18 19:32:28.377553,46,51,652, 22,2014-01-13 20:56:45.487789,96,943,491, 72,2014-01-13 15:32:32.074562,911,859,30, 59,2014-01-17 12:07:03.737174,90,420,951, 72,2014-01-20 02:48:07.299758,25,862,32, 59,2014-01-14 14:58:46.213741,425,536,582, 59,2014-01-15 23:58:06.15724,74,732,334, 59,2014-01-18 21:00:24.669091,938,173,461, 59,2014-01-12 19:17:47.364616,62,852,499, 72,2014-01-21 04:13:46.18709,588,20,831, 59,2014-01-13 00:54:21.57212,908,434,829, 22,2014-01-12 02:50:00.4046,857,762,123, 72,2014-01-12 01:53:17.712753,338,343,110, 72,2014-01-11 19:41:19.526375,373,713,127, 72,2014-01-19 22:43:29.907114,959,500,433, 22,2014-01-13 00:56:23.082894,943,79,319, 72,2014-01-10 20:51:32.524968,971,241,751, 22,2014-01-19 16:37:28.64648,409,967,828, 72,2014-01-20 10:32:39.459649,274,345,623, 22,2014-01-13 16:49:10.749159,628,150,700, 59,2014-01-20 01:39:49.634565,170,965,759, 22,2014-01-12 20:02:19.228685,109,848,145, 22,2014-01-18 08:34:30.45851,931,650,588, 59,2014-01-15 02:21:47.084297,107,570,930, 22,2014-01-12 04:27:51.707328,803,681,436, 59,2014-01-19 20:20:31.262234,313,688,985, 22,2014-01-13 14:06:06.435403,870,366,642, 59,2014-01-14 02:36:09.386503,30,620,170, 59,2014-01-15 19:23:40.6117,890,840,275, 22,2014-01-12 07:14:41.320355,483,640,798, 22,2014-01-16 19:06:21.89935,727,790,18, 72,2014-01-13 08:56:08.989253,398,35,53, 72,2014-01-15 08:11:26.808144,64,818,419, 22,2014-01-19 14:38:53.973913,492,222,973, 72,2014-01-20 01:03:12.726847,488,599,758, 72,2014-01-14 04:59:34.107902,790,717,840, 72,2014-01-12 23:37:40.187653,797,767,410, 22,2014-01-14 19:01:18.884085,12,65,54, 22,2014-01-11 19:59:58.776993,387,572,390, 72,2014-01-14 22:55:27.552689,782,846,300, 59,2014-01-14 17:15:05.071175,731,282,372, 22,2014-01-14 00:54:20.349532,657,762,884, 59,2014-01-16 05:45:27.957289,60,814,372, 72,2014-01-15 23:08:22.783929,745,437,247, 22,2014-01-15 00:35:39.875907,663,251,909, 22,2014-01-14 22:28:57.864402,149,534,45, 72,2014-01-18 04:04:45.867242,14,216,155, 72,2014-01-15 01:27:12.400876,731,407,660, 72,2014-01-13 09:06:26.511301,737,644,491, 59,2014-01-17 08:37:25.326892,676,420,977, 22,2014-01-17 22:16:23.150035,712,684,123, 22,2014-01-12 04:46:16.145866,531,355,478, 22,2014-01-19 08:39:44.555577,425,699,724, 72,2014-01-15 00:50:53.608966,385,323,361, 59,2014-01-16 11:08:03.230162,150,546,647, 22,2014-01-20 17:07:36.262904,522,844,398, 59,2014-01-13 15:11:24.8712,317,665,705, 22,2014-01-19 05:14:09.665565,176,950,110, 22,2014-01-13 13:43:45.649407,382,80,531, 59,2014-01-18 14:35:05.4829,788,575,487, 72,2014-01-20 16:28:50.985921,316,783,999, 59,2014-01-19 12:50:07.548757,210,252,463, 59,2014-01-21 03:31:14.472573,413,778,497, 72,2014-01-14 18:40:17.794064,357,841,827, 59,2014-01-17 21:29:01.52309,423,753,348, 72,2014-01-19 22:34:27.199419,290,955,57, 59,2014-01-18 03:39:51.901967,719,636,301, 22,2014-01-20 01:06:41.710743,624,669,791, 22,2014-01-13 11:35:46.083505,770,217,922, 59,2014-01-19 03:39:50.679378,219,406,545, 22,2014-01-13 18:02:09.263431,772,414,78, 72,2014-01-17 08:50:51.15468,56,588,682, 72,2014-01-11 22:34:11.02891,545,830,64, 22,2014-01-19 03:47:37.221139,129,21,389, 22,2014-01-12 01:59:13.939028,59,687,286, 72,2014-01-16 03:09:50.904818,910,58,932, 72,2014-01-12 20:16:35.08554,512,800,864, 22,2014-01-19 10:03:59.80627,461,935,25, 22,2014-01-20 08:37:03.305694,23,45,481, 72,2014-01-15 09:23:01.596842,361,942,869, 72,2014-01-15 12:41:25.133161,846,577,63, 22,2014-01-17 00:53:26.455729,170,309,731, 72,2014-01-16 18:09:17.743126,505,199,509, 78,2014-01-13 15:21:09.688738,663,865,342, 69,2014-01-21 05:44:20.064694,617,247,646, 29,2014-01-11 23:17:20.973289,71,958,317, 11,2014-01-13 02:28:45.952061,517,600,873, 29,2014-01-13 14:55:44.935894,479,922,374, 29,2014-01-20 08:31:30.639272,128,288,313, 11,2014-01-15 20:12:31.601469,380,984,270, 12,2014-01-10 23:30:50.419213,221,364,138, 12,2014-01-19 19:00:21.625193,702,654,107, 12,2014-01-14 03:02:39.150646,975,605,202, 29,2014-01-10 21:02:04.891785,970,597,594, 29,2014-01-13 07:40:39.419676,129,220,491, 69,2014-01-21 04:31:40.673734,205,774,765, 69,2014-01-19 23:36:32.091204,683,493,875, 11,2014-01-20 15:20:31.321642,819,233,111, 69,2014-01-19 23:38:22.384477,472,576,695, 78,2014-01-12 05:12:18.174709,276,32,709, 69,2014-01-18 13:00:22.001021,604,813,448, 29,2014-01-12 11:40:31.648327,416,791,807, 12,2014-01-18 18:03:09.32939,354,909,419, 78,2014-01-19 15:34:33.029931,873,591,290, 12,2014-01-20 19:28:08.869466,178,530,180, 78,2014-01-20 00:02:23.268418,588,870,902, 11,2014-01-14 12:44:23.934749,803,239,530, 11,2014-01-12 09:44:43.955005,577,769,735, 11,2014-01-18 04:06:23.074687,51,130,633, 69,2014-01-13 15:21:27.240443,845,568,720, 69,2014-01-16 23:07:45.552266,725,549,537, 78,2014-01-12 10:47:48.207849,422,207,132, 12,2014-01-19 20:14:53.696591,752,248,117, 29,2014-01-12 11:17:03.295392,491,220,328, 11,2014-01-15 06:08:57.897005,58,562,680, 69,2014-01-19 19:59:13.761284,903,270,160, 29,2014-01-13 14:34:24.26868,45,710,31, 12,2014-01-17 12:37:43.849067,435,345,9, 69,2014-01-12 04:54:58.697598,405,858,504, 29,2014-01-12 17:05:54.907952,490,92,185, 69,2014-01-12 02:50:15.450535,299,966,309, 69,2014-01-12 08:25:49.11681,96,633,80, 29,2014-01-11 06:06:16.533565,883,641,837, 11,2014-01-15 09:52:54.601181,798,955,922, 78,2014-01-12 09:27:54.008596,8,994,597, 11,2014-01-13 17:46:55.953241,802,525,7, 29,2014-01-15 08:24:35.274916,836,400,725, 11,2014-01-11 03:04:26.099801,276,887,702, 29,2014-01-13 03:07:27.274882,436,392,971, 78,2014-01-14 02:02:57.659812,924,93,548, 12,2014-01-12 12:16:44.274509,177,531,868, 11,2014-01-20 20:07:49.276322,409,432,61, 78,2014-01-15 17:43:29.30814,688,21,900, 78,2014-01-20 10:19:53.603899,883,836,871, 11,2014-01-19 05:42:22.306253,528,857,972, 29,2014-01-15 07:11:38.177605,474,281,970, 12,2014-01-19 04:22:16.872736,510,744,649, 29,2014-01-12 12:26:46.241422,664,810,447, 29,2014-01-16 20:56:22.13261,114,505,606, 69,2014-01-16 02:28:39.947423,100,194,73, 11,2014-01-15 07:48:13.481865,676,859,554, 29,2014-01-12 14:04:07.684877,849,413,877, 69,2014-01-17 17:16:28.155691,752,378,285, 29,2014-01-13 22:03:07.178455,959,437,275, 29,2014-01-14 05:21:10.980269,5,960,285, 78,2014-01-11 17:25:26.052696,338,248,190, 12,2014-01-12 12:02:20.940159,984,259,631, 78,2014-01-16 23:55:35.249368,272,245,529, 29,2014-01-18 10:03:09.901763,288,958,8, 78,2014-01-13 20:57:19.637757,1,279,200, 11,2014-01-18 21:01:30.15732,460,499,552, 12,2014-01-19 16:53:25.352298,470,677,108, 11,2014-01-15 09:23:08.754568,559,453,510, 11,2014-01-19 07:07:46.690883,966,520,685, 12,2014-01-13 20:46:19.953898,103,818,968, 69,2014-01-16 22:51:02.763164,893,27,529, 29,2014-01-11 18:54:42.644124,22,27,505, 11,2014-01-18 09:10:55.228814,329,822,403, 11,2014-01-17 05:55:28.862964,527,775,681, 78,2014-01-14 02:02:09.919425,147,304,49, 29,2014-01-11 05:13:52.888626,680,169,876, 69,2014-01-18 22:12:13.137474,539,998,136, 12,2014-01-13 16:09:59.195748,245,475,554, 12,2014-01-16 02:57:22.196766,113,314,946, 29,2014-01-18 02:32:06.741373,737,613,452, 29,2014-01-11 15:52:21.502001,336,249,146, 78,2014-01-20 14:09:00.374371,299,631,998, 78,2014-01-16 00:54:23.614109,302,188,309, 11,2014-01-13 08:19:07.743423,726,401,489, 78,2014-01-16 05:05:22.5074,199,496,232, 11,2014-01-10 21:23:03.561952,281,165,831, 12,2014-01-17 20:07:21.225706,266,963,961, 12,2014-01-17 23:09:30.192277,301,292,16, 11,2014-01-17 18:39:31.717643,682,17,117, 69,2014-01-20 22:10:28.404162,769,857,780, 12,2014-01-10 22:30:41.172965,387,557,836, 11,2014-01-18 16:04:57.770339,896,34,111, 29,2014-01-12 04:12:49.344322,744,201,143, 78,2014-01-17 02:26:16.422332,264,866,398, 69,2014-01-15 20:08:07.672102,889,555,436, 69,2014-01-15 05:10:08.982079,37,901,471, 29,2014-01-14 17:27:46.579651,14,923,299, 69,2014-01-14 07:01:33.0244,22,514,905, 78,2014-01-19 18:33:17.736646,935,566,974, 11,2014-01-12 18:35:33.270954,435,415,251, 11,2014-01-17 07:47:52.978298,289,878,184, 12,2014-01-15 11:24:20.499809,337,394,67, 78,2014-01-13 17:30:15.915078,939,226,892, 78,2014-01-14 10:58:48.207531,700,516,109, 29,2014-01-11 11:19:49.362774,34,516,128, 29,2014-01-16 23:32:25.834504,363,673,173, 29,2014-01-14 20:12:41.096156,297,274,319, 78,2014-01-19 13:32:02.500248,697,444,365, 78,2014-01-19 19:42:25.030251,339,811,768, 29,2014-01-20 03:10:03.293342,447,93,433, 29,2014-01-16 10:04:09.24204,992,993,201, 78,2014-01-20 15:34:46.532671,710,141,528, 29,2014-01-19 11:19:03.667712,113,449,395, 11,2014-01-11 04:58:32.856149,258,550,440, 69,2014-01-12 17:53:54.276094,867,603,424, 12,2014-01-14 10:24:26.175113,220,396,872, 29,2014-01-11 06:21:36.418101,501,288,589, 29,2014-01-19 18:01:15.5018,695,138,983, 12,2014-01-11 03:33:56.367808,942,980,298, 29,2014-01-18 05:01:08.135743,18,155,902, 29,2014-01-19 10:11:43.906381,31,908,87, 29,2014-01-11 06:04:37.540773,231,521,383, 78,2014-01-15 15:06:05.906082,708,469,770, 29,2014-01-20 18:24:33.250703,478,416,94, 78,2014-01-17 12:30:53.963105,800,406,1, 12,2014-01-20 15:14:13.578183,881,810,601, 12,2014-01-14 17:34:42.233201,655,410,876, 69,2014-01-10 23:58:40.542756,253,611,869, 78,2014-01-13 16:15:46.603003,969,288,43, 29,2014-01-13 06:07:59.969847,101,140,183, 29,2014-01-12 22:34:13.81371,738,12,595, 69,2014-01-20 04:03:39.581301,107,151,494, 12,2014-01-17 21:32:20.470076,245,699,643, 12,2014-01-15 20:04:29.729208,477,261,908, 12,2014-01-13 09:02:27.788831,844,114,258, 69,2014-01-18 12:52:09.832849,439,134,756, 69,2014-01-11 13:36:55.563711,962,483,848, 69,2014-01-17 09:15:08.885407,423,967,551, 12,2014-01-16 20:24:12.333516,632,141,482, 29,2014-01-20 13:19:20.594382,678,916,162, 29,2014-01-16 06:25:12.178749,30,371,238, 11,2014-01-12 00:28:21.575555,895,862,130, 29,2014-01-19 22:54:07.127053,417,61,117, 12,2014-01-14 11:44:15.84646,296,547,106, 78,2014-01-12 09:26:54.431704,675,925,393, 78,2014-01-11 10:48:01.403147,968,910,668, 12,2014-01-18 02:08:42.021993,365,853,78, 29,2014-01-12 19:48:30.850224,611,365,453, 69,2014-01-20 08:49:16.905366,614,513,980, 29,2014-01-18 09:42:38.389801,202,873,531, 12,2014-01-20 04:49:38.985968,780,508,761, 12,2014-01-18 13:01:00.811747,264,887,336, 78,2014-01-18 19:47:15.930574,358,94,228, 29,2014-01-14 13:55:44.892469,969,647,329, 69,2014-01-18 01:25:34.06245,940,364,961, 78,2014-01-15 02:18:09.893679,423,377,153, 78,2014-01-13 23:09:58.470652,114,657,160, 69,2014-01-11 13:00:16.29565,409,44,524, 12,2014-01-15 06:16:50.436435,57,747,438, 69,2014-01-16 19:25:45.073655,430,260,93, 69,2014-01-13 23:08:16.265917,880,493,911, 11,2014-01-17 08:51:04.250566,298,166,408, 12,2014-01-15 17:29:24.655375,427,878,820, 12,2014-01-21 00:40:36.735992,68,331,745, 11,2014-01-11 22:55:33.979774,765,827,38, 78,2014-01-18 06:31:52.444206,882,775,835, 29,2014-01-18 07:32:46.568841,148,319,242, 12,2014-01-12 16:32:29.543485,468,463,155, 12,2014-01-14 09:47:01.329615,299,70,312, 69,2014-01-13 21:56:58.902357,991,618,608, 78,2014-01-11 23:51:50.137868,420,719,135, 69,2014-01-19 20:12:13.508363,863,86,288, 12,2014-01-15 02:25:20.477913,412,862,247, 12,2014-01-21 02:45:57.264921,959,382,58, 12,2014-01-13 01:56:29.355243,686,218,288, 69,2014-01-16 15:52:14.910036,320,320,265, 78,2014-01-11 07:33:58.668486,875,782,257, 29,2014-01-20 08:05:11.377235,633,213,69, 29,2014-01-18 15:40:45.76026,804,799,801, 78,2014-01-20 20:23:15.573852,62,74,510, 78,2014-01-17 11:47:49.767036,956,140,664, 69,2014-01-17 14:30:24.746228,86,658,671, 78,2014-01-18 03:24:16.385599,367,76,45, 29,2014-01-15 01:35:05.697611,749,746,571, 78,2014-01-10 22:26:09.638697,586,129,371, 78,2014-01-14 22:49:50.448049,349,277,171, 12,2014-01-19 07:53:15.591291,623,202,711, 11,2014-01-14 01:36:08.109769,57,419,737, 69,2014-01-15 15:50:06.744119,210,837,700, 12,2014-01-13 08:10:06.027726,977,574,579, 78,2014-01-20 01:01:53.183424,719,287,112, 29,2014-01-18 18:58:23.010034,632,567,456, 29,2014-01-19 21:01:10.278292,413,972,247, 78,2014-01-14 12:31:17.838799,794,345,703, 11,2014-01-18 13:38:59.746026,617,268,947, 12,2014-01-20 23:56:44.258066,126,141,311, 12,2014-01-11 13:03:10.283424,370,247,908, 11,2014-01-15 15:11:46.314868,53,149,462, 78,2014-01-12 10:29:13.801971,97,812,725, 69,2014-01-15 02:50:11.613039,399,958,316, 11,2014-01-18 17:08:45.217644,126,558,649, 11,2014-01-13 14:21:03.939839,818,876,491, 69,2014-01-13 17:02:25.121821,82,144,325, 11,2014-01-12 13:34:05.695556,261,911,615, 12,2014-01-13 11:07:01.205179,394,401,938, 11,2014-01-15 22:58:54.477064,482,376,340, 78,2014-01-18 09:26:20.605593,735,644,716, 29,2014-01-13 22:40:59.873666,264,762,120, 12,2014-01-15 01:04:05.854299,158,674,696, 11,2014-01-15 19:07:06.365853,692,535,365, 12,2014-01-13 13:04:15.447518,43,59,188, 11,2014-01-11 06:51:55.621335,719,66,0, 69,2014-01-12 03:37:31.1125,269,696,703, 11,2014-01-20 20:28:31.833118,261,868,652, 78,2014-01-15 12:27:01.318947,48,599,432, 11,2014-01-12 06:03:40.751197,807,293,352, 29,2014-01-14 13:18:22.281167,805,240,427, 69,2014-01-13 14:20:16.910237,627,194,891, 78,2014-01-15 11:39:48.860965,612,507,656, 29,2014-01-19 09:08:29.025285,446,775,421, 29,2014-01-16 02:30:22.937964,689,525,744, 11,2014-01-14 06:41:42.044807,703,568,48, 29,2014-01-16 22:06:52.03532,993,322,103, 78,2014-01-14 17:31:33.216674,91,245,867, 12,2014-01-17 23:12:59.883606,361,262,22, 69,2014-01-14 05:45:51.781346,575,293,514, 11,2014-01-14 11:28:17.47474,92,220,473, 11,2014-01-18 16:16:10.16703,415,333,46, 29,2014-01-19 00:57:38.096632,420,338,629, 29,2014-01-16 01:57:31.27671,350,870,246, 12,2014-01-12 13:06:21.780489,991,556,344, 11,2014-01-16 12:06:23.314276,999,178,905, 11,2014-01-18 20:18:35.216968,816,780,141, 12,2014-01-15 10:08:46.902308,351,694,929, 78,2014-01-18 05:40:29.009831,399,392,884, 69,2014-01-11 01:25:36.422147,115,708,370, 78,2014-01-20 13:07:41.379372,259,230,289, 78,2014-01-15 09:06:49.615843,507,774,423, 78,2014-01-14 04:06:36.295812,582,9,187, 11,2014-01-14 08:11:47.233672,974,361,9, 12,2014-01-20 08:13:55.981696,664,948,212, 78,2014-01-16 21:10:51.74333,77,303,951, 29,2014-01-14 19:03:42.855007,595,695,174, 78,2014-01-11 05:51:27.094196,846,768,803, 78,2014-01-16 11:39:23.576448,481,869,127, 69,2014-01-19 11:30:44.173954,708,613,137, 12,2014-01-12 15:55:07.845811,352,475,685, 78,2014-01-20 04:57:45.857615,726,240,215, 78,2014-01-11 19:51:01.084191,420,834,717, 69,2014-01-17 07:34:56.706776,724,838,848, 78,2014-01-18 08:06:14.883319,687,85,725, 12,2014-01-17 02:21:24.022154,649,188,210, 69,2014-01-20 18:16:38.751583,579,699,658, 29,2014-01-14 00:13:06.918638,318,298,242, 12,2014-01-20 23:52:57.238828,377,720,329, 69,2014-01-17 11:29:38.635608,765,244,161, 29,2014-01-17 09:58:58.700403,423,756,800, 12,2014-01-14 05:21:14.713987,131,349,638, 12,2014-01-14 21:45:48.802639,989,381,350, 11,2014-01-15 04:56:36.797035,643,950,254, 69,2014-01-19 11:18:45.990697,529,120,722, 69,2014-01-16 14:52:10.583127,102,269,451, 12,2014-01-20 21:03:00.111311,521,272,145, 12,2014-01-17 01:37:21.207666,143,420,684, 11,2014-01-21 05:00:57.485435,891,739,868, 29,2014-01-17 20:43:29.121143,681,86,122, 78,2014-01-17 07:02:57.629811,368,902,395, 78,2014-01-20 12:08:38.864808,486,663,330, 78,2014-01-11 23:50:18.736985,916,14,237, 78,2014-01-20 15:09:33.925624,823,367,852, 11,2014-01-13 14:20:26.098479,876,218,842, 11,2014-01-11 02:04:14.7191,208,133,220, 69,2014-01-16 06:20:25.668954,965,544,69, 11,2014-01-17 13:24:08.953486,668,827,949, 12,2014-01-11 11:55:41.813296,524,899,609, 69,2014-01-11 11:59:49.245401,469,539,21, 69,2014-01-15 18:54:53.127441,203,682,737, 29,2014-01-13 07:50:49.659107,855,728,666, 78,2014-01-20 20:57:35.103434,391,29,166, 69,2014-01-16 18:45:54.211632,730,501,943, 78,2014-01-19 19:25:46.366302,927,583,850, 69,2014-01-17 23:03:49.986753,86,227,837, 12,2014-01-12 15:07:18.234205,975,351,716, 29,2014-01-19 07:42:25.117885,351,223,39, 29,2014-01-21 03:16:56.905391,329,692,71, 69,2014-01-12 09:00:15.473033,168,133,600, 12,2014-01-15 13:12:03.753494,320,444,274, 29,2014-01-17 07:15:55.605794,950,786,591, 12,2014-01-15 18:21:30.187021,984,443,665, 69,2014-01-19 14:57:52.556552,765,923,547, 11,2014-01-11 06:12:32.402829,362,890,411, 11,2014-01-13 23:40:16.178137,725,813,731, 12,2014-01-14 23:50:03.139679,977,380,256, 11,2014-01-10 21:15:32.51414,225,325,640, 69,2014-01-20 05:17:37.385802,644,210,935, 78,2014-01-14 22:51:00.625533,64,522,266, 12,2014-01-17 21:59:01.635702,338,795,777, 12,2014-01-16 06:20:35.016033,387,326,996, 78,2014-01-14 04:59:39.490342,448,405,726, 12,2014-01-19 01:49:20.372688,11,981,896, 11,2014-01-15 15:30:08.941656,754,177,231, 69,2014-01-16 23:20:05.58924,736,156,563, 12,2014-01-19 07:53:35.091789,981,877,530, 12,2014-01-21 01:50:34.611029,60,843,384, 29,2014-01-13 06:44:14.542726,332,207,885, 29,2014-01-19 23:49:16.905503,830,79,912, 11,2014-01-11 07:50:23.856431,441,992,762, 29,2014-01-18 05:39:07.670586,230,385,961, 12,2014-01-12 01:40:06.56461,784,997,946, 78,2014-01-10 22:47:58.959866,91,331,905, 69,2014-01-13 18:25:01.882218,646,228,912, 69,2014-01-21 01:05:52.930913,437,61,456, 29,2014-01-18 01:51:48.946619,428,315,992, 12,2014-01-15 13:32:20.116423,366,273,208, 78,2014-01-19 02:48:18.049218,151,588,692, 69,2014-01-17 23:08:45.852428,978,467,261, 11,2014-01-17 02:32:35.589876,477,3,39, 69,2014-01-13 10:00:21.802711,790,434,274, 69,2014-01-14 00:24:41.458222,69,7,539, 12,2014-01-11 14:54:05.776896,56,882,358, 11,2014-01-11 18:58:14.359264,121,432,618, 11,2014-01-14 10:37:13.86147,717,389,853, 11,2014-01-14 18:34:21.955033,314,831,634, 78,2014-01-15 22:48:17.499361,979,339,533, 69,2014-01-14 11:52:46.37561,455,716,210, 11,2014-01-13 17:51:59.341254,331,817,782, 69,2014-01-20 01:39:18.124895,74,632,107, 12,2014-01-11 03:51:48.011312,641,159,703, 69,2014-01-19 04:12:34.357288,591,235,970, 12,2014-01-13 00:38:57.615235,513,744,210, 29,2014-01-19 09:41:08.384419,747,682,687, 12,2014-01-13 13:42:43.298945,847,272,304, 69,2014-01-19 03:59:03.204476,385,284,73, 12,2014-01-17 11:34:43.476206,124,882,874, 29,2014-01-13 09:33:17.909974,763,914,899, 11,2014-01-11 04:43:17.747621,709,910,590, 69,2014-01-16 05:24:00.381709,220,778,616, 12,2014-01-13 21:23:41.766824,57,217,492, 12,2014-01-18 14:22:25.418207,802,277,529, 11,2014-01-17 11:04:06.94674,733,65,846, 78,2014-01-14 00:11:40.72669,750,325,169, 29,2014-01-11 02:47:27.300425,668,143,391, 29,2014-01-17 06:09:59.877652,545,710,574, 12,2014-01-10 20:03:29.673727,960,216,934, 29,2014-01-15 20:19:47.417267,231,726,698, 29,2014-01-15 02:58:17.92687,674,273,704, 29,2014-01-17 23:12:15.526155,17,82,687, 12,2014-01-11 16:52:23.007143,971,154,454, 12,2014-01-17 16:58:39.73,775,356,313, 12,2014-01-21 03:36:56.984377,464,627,438, 69,2014-01-12 11:46:28.784039,913,106,966, 12,2014-01-18 15:56:54.089264,566,792,325, 78,2014-01-14 08:14:10.845848,238,272,238, 78,2014-01-16 10:20:50.739491,231,65,519, 78,2014-01-13 08:45:11.588625,643,38,450, 11,2014-01-18 00:06:57.221877,188,482,66, 11,2014-01-19 08:12:50.080746,704,303,825, 78,2014-01-12 04:24:29.71352,92,785,632, 11,2014-01-18 07:58:45.233189,733,377,670, 78,2014-01-17 06:25:24.438033,745,15,929, 69,2014-01-14 09:03:27.329175,67,746,794, 29,2014-01-16 11:39:53.617607,406,577,727, 12,2014-01-20 00:08:07.736978,518,263,463, 69,2014-01-12 07:02:30.53365,785,275,782, 78,2014-01-12 17:14:37.094233,266,657,650, 78,2014-01-12 03:41:25.647371,69,318,102, 69,2014-01-12 15:45:48.281271,846,695,432, 69,2014-01-18 02:38:37.475944,327,791,897, 69,2014-01-15 05:05:07.414195,442,909,334, 12,2014-01-20 10:08:13.699478,902,86,950, 11,2014-01-14 07:42:44.422682,310,704,749, 29,2014-01-18 09:16:48.140884,758,836,536, 11,2014-01-20 16:55:41.000322,878,188,203, 29,2014-01-20 17:52:44.300754,325,366,45, 78,2014-01-18 09:20:17.81461,22,682,36, 11,2014-01-15 07:15:28.417589,579,984,284, 11,2014-01-14 14:51:02.227624,825,428,122, 69,2014-01-15 02:32:33.340767,638,784,98, 12,2014-01-16 04:07:51.424731,822,78,214, 29,2014-01-11 01:49:41.957624,884,535,457, 78,2014-01-15 00:09:30.325563,816,901,199, 12,2014-01-17 19:54:20.20877,894,495,195, 78,2014-01-18 21:46:36.047306,88,162,592, 29,2014-01-18 12:23:41.17141,984,993,161, 11,2014-01-13 00:15:10.94826,929,910,750, 29,2014-01-11 00:31:47.635931,294,40,705, 11,2014-01-15 06:30:38.393287,64,217,670, 12,2014-01-11 02:28:01.029006,654,18,955, 78,2014-01-12 08:56:17.34987,65,634,681, 78,2014-01-12 08:29:23.626896,44,10,227, 69,2014-01-17 12:53:25.46704,55,613,665, 12,2014-01-15 21:59:44.679045,55,70,711, 29,2014-01-18 00:09:17.244503,162,827,726, 78,2014-01-16 07:01:33.204436,581,243,981, 29,2014-01-17 09:02:15.212695,909,872,474, 78,2014-01-19 21:23:54.338736,410,141,175, 12,2014-01-17 14:42:58.851807,960,250,429, 29,2014-01-19 04:48:03.494384,605,22,559, 11,2014-01-16 18:02:31.815099,136,81,371, 78,2014-01-11 13:48:06.266002,258,121,751, 69,2014-01-18 08:56:17.193863,896,914,162, 12,2014-01-20 05:45:16.237781,60,165,45, 12,2014-01-19 03:04:54.406885,544,571,209, 29,2014-01-17 19:51:58.194184,685,814,353, 29,2014-01-19 17:38:00.538535,215,520,604, 69,2014-01-16 06:25:12.221916,863,970,401, 78,2014-01-11 21:07:26.611773,359,714,439, 11,2014-01-13 02:29:02.766578,640,777,548, 69,2014-01-20 12:57:45.562682,736,483,325, 29,2014-01-17 05:15:18.036504,898,416,677, 12,2014-01-13 08:18:44.724202,413,107,572, 11,2014-01-14 07:07:15.888245,362,878,205, 69,2014-01-13 19:09:38.245694,763,378,794, 69,2014-01-11 00:05:20.771508,365,468,61, 69,2014-01-11 13:30:57.060074,136,344,603, 12,2014-01-15 23:24:49.193954,28,954,201, 12,2014-01-11 04:37:08.407858,407,878,997, 12,2014-01-16 00:01:35.453361,229,629,767, 12,2014-01-16 05:52:50.22338,267,92,569, 12,2014-01-12 17:33:25.757728,127,458,284, 69,2014-01-17 12:30:59.080257,72,978,689, 12,2014-01-12 12:46:15.69042,204,169,569, 29,2014-01-17 19:33:10.436773,878,678,639, 78,2014-01-14 06:40:16.325179,929,598,222, 29,2014-01-17 23:47:48.894856,518,788,746, 11,2014-01-13 22:35:25.649468,850,988,326, 11,2014-01-12 22:04:10.663916,546,86,396, 11,2014-01-14 08:30:47.746664,739,329,973, 78,2014-01-11 21:23:29.143852,550,54,274, 12,2014-01-18 20:06:42.479015,712,645,484, 69,2014-01-15 02:18:54.012665,365,374,945, 78,2014-01-19 10:19:46.338133,607,709,265, 12,2014-01-17 19:51:58.717216,355,726,357, 12,2014-01-12 23:23:48.41997,805,258,987, 11,2014-01-16 00:11:44.532318,957,266,990, 11,2014-01-16 07:29:59.255751,340,489,390, 69,2014-01-18 09:49:00.641885,652,995,586, 11,2014-01-17 01:19:11.14451,379,764,711, 12,2014-01-18 13:59:02.022329,230,624,790, 11,2014-01-17 16:46:46.204567,717,217,321, 11,2014-01-13 00:34:29.181015,116,58,592, 29,2014-01-21 02:17:46.74695,577,663,462, 78,2014-01-21 03:54:02.093231,722,642,92, 11,2014-01-15 23:44:07.426708,443,813,767, 78,2014-01-10 20:23:07.518458,363,372,322, 11,2014-01-11 11:24:59.153305,373,47,902, 12,2014-01-21 03:08:56.621082,671,165,568, 78,2014-01-11 05:00:15.926317,155,198,688, 78,2014-01-16 15:26:34.606667,880,413,21, 12,2014-01-16 03:01:46.844461,712,589,448, 11,2014-01-13 02:33:41.684046,29,843,104, 69,2014-01-12 21:57:33.687343,537,810,157, 69,2014-01-17 19:48:02.534881,822,243,854, 11,2014-01-20 02:06:52.120819,951,88,679, 12,2014-01-16 08:37:50.012522,336,401,473, 29,2014-01-14 13:35:51.430156,424,328,68, 78,2014-01-12 18:42:17.770705,603,421,348, 29,2014-01-18 10:42:00.676437,550,466,908, 78,2014-01-18 02:06:39.17682,413,112,24, 29,2014-01-13 20:05:46.914557,310,551,762, 29,2014-01-16 00:48:43.155871,894,282,891, 11,2014-01-11 22:25:33.189903,598,254,806, 11,2014-01-12 00:25:33.252691,184,778,199, 69,2014-01-12 14:40:41.873085,155,674,803, 69,2014-01-14 01:49:21.609873,932,222,678, 29,2014-01-17 04:37:17.785428,400,698,187, 12,2014-01-18 02:10:41.128836,321,790,411, 11,2014-01-11 05:38:22.251758,507,251,832, 69,2014-01-12 23:56:28.929939,816,796,224, 78,2014-01-15 10:09:43.151584,539,279,738, 69,2014-01-18 02:25:08.456326,274,550,515, 29,2014-01-15 04:30:58.110953,645,368,855, 11,2014-01-15 06:27:29.898534,986,508,803, 69,2014-01-18 00:19:10.549556,645,342,542, 78,2014-01-20 08:15:05.538081,863,768,380, 12,2014-01-15 06:50:37.416993,858,968,94, 11,2014-01-18 15:44:09.702861,424,99,697, 78,2014-01-20 05:24:02.159163,880,259,444, 78,2014-01-15 15:50:53.343728,753,131,826, 78,2014-01-14 01:10:44.309947,812,810,230, 11,2014-01-15 02:25:49.003625,638,742,884, 11,2014-01-17 22:24:35.027774,812,817,214, 69,2014-01-16 03:08:17.997289,48,461,298, 69,2014-01-11 16:13:51.538506,309,181,384, 12,2014-01-16 18:31:27.149011,233,738,430, 11,2014-01-11 05:46:08.00981,582,190,26, 29,2014-01-15 09:49:42.968662,809,146,929, 11,2014-01-18 17:13:44.919716,636,565,290, 11,2014-01-18 20:28:08.686247,448,731,437, 12,2014-01-12 05:56:22.145901,765,837,142, 78,2014-01-11 07:19:31.834273,992,587,109, 29,2014-01-13 15:16:51.842118,167,622,50, 29,2014-01-13 08:21:55.335804,369,841,722, 69,2014-01-12 11:45:05.087382,970,337,851, 12,2014-01-15 09:57:33.715204,964,181,727, 12,2014-01-16 14:11:16.945678,772,500,180, 78,2014-01-18 20:22:22.87281,545,550,611, 11,2014-01-12 06:08:14.844459,430,102,743, 11,2014-01-16 23:49:39.197436,212,562,620, 78,2014-01-21 00:18:51.802749,188,999,17, 12,2014-01-16 20:17:57.996044,81,919,871, 69,2014-01-13 20:14:47.653761,703,860,486, 29,2014-01-14 22:49:49.913703,17,346,778, 29,2014-01-10 20:45:27.894578,888,937,234, 78,2014-01-21 00:33:58.203737,85,224,314, 12,2014-01-14 01:04:55.451783,523,153,596, 78,2014-01-15 07:36:05.311571,269,271,146, 29,2014-01-18 10:18:07.906598,14,554,761, 69,2014-01-13 00:28:57.610947,135,910,56, 78,2014-01-20 03:26:58.655299,417,136,59, 78,2014-01-11 05:28:52.216545,163,471,224, 29,2014-01-17 06:54:46.614571,106,71,70, 78,2014-01-16 19:51:33.683492,562,596,375, 12,2014-01-16 12:37:10.213833,125,761,965, 12,2014-01-18 03:08:38.153496,664,833,638, 78,2014-01-12 08:23:00.832502,35,294,582, 11,2014-01-16 22:23:18.223644,331,474,101, 69,2014-01-12 06:58:21.122158,443,868,685, 12,2014-01-20 05:36:45.752218,52,747,921, 78,2014-01-14 12:51:26.91031,739,840,950, 12,2014-01-13 16:54:43.268059,403,851,714, 29,2014-01-20 16:56:17.586491,399,178,67, 12,2014-01-17 08:08:18.752428,901,822,147, 12,2014-01-16 05:16:38.603864,25,657,142, 29,2014-01-11 22:41:22.673874,612,896,320, 11,2014-01-11 12:05:52.468051,967,404,339, 69,2014-01-11 13:27:55.549542,256,421,617, 11,2014-01-19 23:03:45.546684,974,940,193, 78,2014-01-12 22:14:07.31251,145,100,305, 78,2014-01-17 17:17:34.746978,221,20,914, 69,2014-01-19 17:22:37.349433,901,897,467, 12,2014-01-18 22:32:05.308554,917,381,299, 69,2014-01-20 17:32:22.401158,729,208,779, 69,2014-01-13 10:12:27.263555,472,328,728, 11,2014-01-18 23:17:33.203132,56,485,465, 78,2014-01-20 12:06:20.604896,48,716,957, 12,2014-01-16 15:17:22.71534,556,796,433, 12,2014-01-13 00:53:38.514702,112,688,424, 78,2014-01-17 16:24:28.511494,180,646,81, 69,2014-01-18 19:46:20.326705,236,593,357, 69,2014-01-11 22:20:37.17042,136,799,595, 12,2014-01-18 01:53:20.728458,448,18,366, 69,2014-01-14 20:41:06.941276,708,63,137, 44,2014-01-17 22:12:10.853911,333,346,540, 67,2014-01-13 08:30:30.942292,949,492,521, 18,2014-01-11 17:49:45.094772,333,602,447, 75,2014-01-19 10:35:11.686414,640,684,927, 67,2014-01-19 10:53:49.166354,642,19,827, 18,2014-01-13 04:48:06.21735,48,669,466, 67,2014-01-18 10:11:57.438632,291,278,698, 18,2014-01-12 17:45:16.076665,356,970,226, 18,2014-01-16 01:42:49.485409,964,594,54, 67,2014-01-17 21:08:15.025123,619,192,105, 44,2014-01-19 05:53:34.829093,4,758,205, 68,2014-01-11 00:59:28.089693,344,944,796, 68,2014-01-18 23:49:37.699416,54,544,355, 75,2014-01-19 21:59:27.297143,487,114,867, 68,2014-01-11 18:27:23.639234,270,151,507, 68,2014-01-17 16:53:23.2461,414,484,661, 75,2014-01-11 14:13:34.609653,39,551,678, 44,2014-01-18 15:44:58.386631,942,204,314, 68,2014-01-16 04:16:00.595953,454,791,436, 67,2014-01-19 16:45:39.918207,300,599,515, 18,2014-01-18 03:17:20.78779,763,338,245, 75,2014-01-18 18:28:27.859508,458,985,586, 75,2014-01-17 10:03:13.121339,979,374,854, 67,2014-01-17 09:23:41.392685,939,291,526, 18,2014-01-14 03:45:50.574848,660,408,997, 75,2014-01-19 14:56:51.63646,616,439,924, 67,2014-01-13 19:48:09.90418,967,404,697, 75,2014-01-11 17:32:10.901553,5,13,16, 67,2014-01-20 17:17:28.806881,678,161,649, 67,2014-01-21 01:41:30.632639,367,290,609, 68,2014-01-15 18:13:17.843248,129,317,328, 67,2014-01-17 09:29:39.660793,413,382,860, 18,2014-01-13 04:12:01.575349,901,778,116, 68,2014-01-16 16:03:02.938021,973,445,705, 44,2014-01-15 14:04:51.347206,236,93,254, 68,2014-01-11 09:05:50.741704,983,466,34, 67,2014-01-19 00:51:09.155371,39,401,943, 67,2014-01-12 18:16:48.785838,507,588,88, 75,2014-01-13 06:51:06.818369,912,171,834, 44,2014-01-13 20:33:58.641199,752,219,174, 18,2014-01-19 19:25:03.81096,841,912,810, 44,2014-01-11 06:44:41.64746,503,769,937, 68,2014-01-14 01:33:26.730891,643,474,787, 44,2014-01-17 13:14:41.510376,796,395,920, 75,2014-01-20 08:44:08.945023,96,905,805, 68,2014-01-15 00:00:50.370125,75,942,479, 18,2014-01-14 00:08:04.756896,263,67,893, 68,2014-01-21 02:57:43.554676,649,229,774, 75,2014-01-12 09:45:48.756756,30,753,370, 68,2014-01-19 08:24:05.352849,28,948,271, 68,2014-01-19 13:43:23.473302,666,217,903, 75,2014-01-19 17:03:09.544546,128,407,16, 67,2014-01-16 20:52:33.212357,527,832,650, 67,2014-01-15 17:46:36.594642,174,787,561, 75,2014-01-15 20:26:50.937232,614,990,859, 67,2014-01-20 04:38:23.787206,697,732,303, 75,2014-01-14 02:43:28.231102,976,386,220, 18,2014-01-18 20:15:00.841831,954,334,169, 68,2014-01-21 02:10:34.689178,410,600,717, 75,2014-01-13 14:00:57.037982,235,121,736, 18,2014-01-18 15:56:31.47447,978,656,594, 44,2014-01-15 14:23:52.532426,8,204,735, 44,2014-01-20 03:30:36.698775,888,348,55, 67,2014-01-21 00:08:33.049819,879,812,560, 75,2014-01-11 00:26:55.470447,411,638,907, 75,2014-01-14 11:35:28.045981,122,919,544, 18,2014-01-11 03:14:23.791523,362,170,972, 75,2014-01-19 05:18:04.626236,847,383,300, 68,2014-01-16 09:52:16.831819,67,909,528, 67,2014-01-13 14:05:30.609892,471,795,326, 75,2014-01-11 19:52:03.267435,603,93,341, 18,2014-01-14 23:17:20.643199,272,470,159, 44,2014-01-14 00:50:12.257771,635,883,148, 44,2014-01-15 01:25:29.998326,226,187,169, 75,2014-01-11 06:32:02.153575,60,365,567, 67,2014-01-13 03:34:21.202795,849,820,289, 44,2014-01-19 05:26:20.368451,94,805,672, 44,2014-01-14 10:40:06.910471,23,812,301, 44,2014-01-13 00:32:04.757471,351,12,358, 18,2014-01-20 19:12:09.125627,275,502,767, 44,2014-01-12 13:04:12.26332,859,191,619, 18,2014-01-11 08:15:28.230773,367,160,297, 75,2014-01-19 06:15:18.670173,717,134,98, 75,2014-01-18 13:56:45.475678,338,847,551, 18,2014-01-16 06:02:04.825833,530,404,146, 68,2014-01-13 20:42:09.607824,857,949,287, 68,2014-01-17 12:35:09.263303,868,709,791, 44,2014-01-19 12:45:33.056935,931,400,117, 18,2014-01-11 10:57:10.449655,123,553,183, 75,2014-01-17 08:45:43.95248,311,737,639, 44,2014-01-11 20:46:30.095337,852,609,524, 68,2014-01-19 06:53:41.924125,146,737,62, 75,2014-01-11 17:09:36.484907,431,107,14, 68,2014-01-21 04:17:06.794112,548,991,368, 44,2014-01-19 01:02:14.974364,519,523,923, 75,2014-01-11 21:36:31.955354,964,507,125, 68,2014-01-14 09:52:34.840512,961,99,646, 68,2014-01-19 08:16:38.765886,436,367,850, 67,2014-01-20 06:54:36.581591,36,547,918, 68,2014-01-19 23:44:51.672331,645,727,439, 44,2014-01-11 16:22:09.376197,832,367,654, 67,2014-01-10 20:46:39.849026,570,55,146, 44,2014-01-13 17:02:12.31553,634,128,605, 67,2014-01-14 21:12:21.633968,496,893,954, 75,2014-01-15 02:12:09.847352,193,123,549, 75,2014-01-14 03:34:14.469524,567,581,541, 44,2014-01-17 04:46:42.836762,801,600,447, 67,2014-01-13 01:38:30.216223,726,903,550, 75,2014-01-17 18:14:21.379996,754,492,802, 68,2014-01-19 09:18:47.594232,343,277,29, 18,2014-01-12 14:50:39.341849,117,325,109, 18,2014-01-19 11:18:33.643735,953,571,630, 44,2014-01-19 21:34:15.825425,897,170,679, 75,2014-01-21 01:05:58.012023,788,79,38, 67,2014-01-16 19:15:19.119413,850,817,214, 18,2014-01-14 21:36:20.651258,642,478,311, 67,2014-01-13 15:48:07.619847,82,826,842, 44,2014-01-13 01:50:28.382716,464,623,834, 44,2014-01-13 04:21:53.708193,20,442,250, 18,2014-01-14 06:45:18.069502,959,540,914, 67,2014-01-19 14:36:12.335196,675,634,395, 18,2014-01-14 05:08:23.80353,755,231,786, 18,2014-01-12 07:38:59.994047,770,811,702, 44,2014-01-20 11:45:48.820103,735,856,312, 18,2014-01-14 03:25:30.598061,820,827,342, 18,2014-01-20 12:41:14.96841,950,425,102, 44,2014-01-11 03:22:20.775876,717,23,625, 67,2014-01-17 17:18:05.438574,448,727,996, 67,2014-01-18 14:57:53.734297,418,51,192, 67,2014-01-20 14:16:57.357467,938,616,385, 44,2014-01-16 11:02:57.110905,161,158,257, 67,2014-01-19 11:20:03.110493,987,306,81, 67,2014-01-20 15:03:37.206912,41,131,259, 67,2014-01-19 08:05:09.426854,596,761,83, 67,2014-01-13 02:32:24.74446,244,67,465, 18,2014-01-14 11:15:47.054265,713,150,493, 18,2014-01-12 05:39:23.896378,221,780,404, 44,2014-01-19 11:19:07.581223,66,983,91, 75,2014-01-16 16:54:17.270487,2,830,107, 44,2014-01-19 03:53:45.276374,534,316,531, 67,2014-01-17 14:37:55.175874,403,874,692, 44,2014-01-18 11:44:56.612336,338,419,585, 44,2014-01-17 09:12:18.920108,440,847,77, 67,2014-01-16 06:12:11.001299,164,600,582, 67,2014-01-18 06:50:54.624778,873,669,616, 18,2014-01-12 22:27:38.039941,151,15,61, 68,2014-01-20 07:48:31.652557,259,459,999, 75,2014-01-21 02:39:02.244625,36,480,134, 44,2014-01-15 04:18:06.422656,319,775,258, 44,2014-01-12 06:10:25.36075,945,230,835, 67,2014-01-14 03:24:20.314546,734,675,684, 68,2014-01-13 12:54:18.758272,534,466,938, 44,2014-01-15 15:18:49.164699,529,875,712, 44,2014-01-15 15:03:20.308593,661,591,730, 44,2014-01-12 18:40:07.578375,363,970,412, 68,2014-01-18 22:44:19.762761,353,42,512, 44,2014-01-14 21:44:35.277003,700,400,798, 75,2014-01-13 02:02:28.354251,574,329,375, 44,2014-01-15 10:02:25.201334,308,468,357, 75,2014-01-12 06:42:29.0113,488,30,149, 75,2014-01-12 10:19:25.712138,844,442,39, 68,2014-01-21 01:05:22.31224,963,34,735, 75,2014-01-20 22:02:32.122211,364,9,431, 67,2014-01-11 19:23:02.91905,178,146,64, 75,2014-01-19 03:10:31.739094,998,341,10, 18,2014-01-12 18:34:56.866673,623,417,491, 18,2014-01-15 10:38:49.973314,229,802,462, 67,2014-01-20 12:49:55.635472,812,353,649, 68,2014-01-10 23:54:04.448315,106,412,299, 75,2014-01-10 21:33:07.24422,349,648,557, 67,2014-01-18 10:43:40.912265,432,386,145, 67,2014-01-17 18:31:59.624189,804,201,609, 18,2014-01-18 13:18:03.856556,824,816,728, 18,2014-01-14 13:55:59.832373,510,799,500, 18,2014-01-12 18:44:10.625487,306,535,115, 44,2014-01-15 14:08:58.481334,827,808,551, 44,2014-01-16 16:23:37.872314,644,83,716, 68,2014-01-11 20:32:42.278044,464,636,763, 68,2014-01-15 10:48:00.726378,228,765,656, 75,2014-01-21 00:41:44.295389,604,730,371, 67,2014-01-13 06:43:07.638794,121,480,523, 44,2014-01-18 18:12:21.040925,505,503,320, 18,2014-01-13 07:36:03.05366,740,789,2, 75,2014-01-18 02:01:56.803494,273,906,129, 44,2014-01-13 03:15:41.349518,294,677,957, 44,2014-01-15 06:16:10.632035,86,705,511, 67,2014-01-15 18:46:16.566255,957,59,781, 75,2014-01-17 05:00:16.626521,160,105,46, 67,2014-01-17 12:18:38.986705,919,18,198, 75,2014-01-20 08:48:41.76759,973,424,497, 68,2014-01-18 15:42:45.637821,379,505,672, 75,2014-01-19 02:38:04.698843,997,507,251, 44,2014-01-20 03:54:04.080248,653,143,549, 68,2014-01-18 07:45:17.760032,719,588,942, 75,2014-01-20 02:01:07.617893,603,468,98, 18,2014-01-18 01:04:35.819342,627,300,981, 67,2014-01-20 06:20:14.627124,354,472,28, 68,2014-01-14 06:39:57.591206,864,89,840, 75,2014-01-17 07:54:31.455232,934,562,436, 68,2014-01-20 10:14:19.075439,748,809,971, 44,2014-01-14 08:13:04.835426,304,415,659, 44,2014-01-14 12:38:12.367496,73,535,605, 75,2014-01-16 22:46:18.699627,79,943,293, 67,2014-01-11 15:31:08.692402,526,195,559, 68,2014-01-18 06:34:12.199869,184,650,321, 44,2014-01-18 21:30:29.325115,930,114,291, 75,2014-01-16 09:40:07.173736,601,764,893, 44,2014-01-13 16:57:50.072602,743,527,886, 18,2014-01-19 22:03:11.603158,22,96,931, 67,2014-01-21 00:28:07.900116,705,851,393, 44,2014-01-13 11:39:34.367991,202,691,578, 75,2014-01-11 22:46:19.241953,540,229,365, 67,2014-01-18 12:40:28.94104,231,556,230, 18,2014-01-15 23:15:37.42165,700,180,482, 18,2014-01-19 04:48:16.045866,882,573,704, 75,2014-01-20 19:56:10.290557,242,574,612, 44,2014-01-20 09:31:48.053685,125,739,525, 44,2014-01-13 17:34:32.61212,467,669,520, 67,2014-01-16 18:56:26.917078,887,198,122, 75,2014-01-16 15:50:27.040391,779,776,375, 68,2014-01-12 20:23:14.380129,986,658,575, 67,2014-01-14 04:39:12.555318,796,437,742, 44,2014-01-14 12:28:31.739233,824,194,392, 75,2014-01-11 18:17:18.460377,194,752,249, 75,2014-01-11 06:24:30.31535,192,512,596, 68,2014-01-13 08:29:39.357126,630,432,922, 18,2014-01-18 23:21:54.279719,967,774,470, 18,2014-01-20 16:44:44.942474,970,42,821, 18,2014-01-16 19:09:36.948751,684,578,412, 18,2014-01-15 01:16:25.73495,878,2,82, 67,2014-01-19 20:59:04.017912,156,657,700, 68,2014-01-20 07:22:41.784178,78,854,461, 67,2014-01-18 17:54:38.102447,617,45,273, 18,2014-01-15 13:45:22.71754,384,184,770, 75,2014-01-21 02:53:50.47658,441,975,97, 67,2014-01-15 18:28:50.302735,228,469,425, 67,2014-01-13 05:15:52.042653,500,869,146, 18,2014-01-16 06:33:57.650736,513,352,872, 68,2014-01-18 15:26:40.375336,437,685,543, 18,2014-01-11 21:19:03.645811,969,608,870, 75,2014-01-16 01:02:05.550851,934,106,653, 44,2014-01-10 21:06:14.743327,564,159,473, 75,2014-01-13 00:05:22.888183,590,61,946, 44,2014-01-13 07:42:34.49231,465,587,857, 68,2014-01-16 00:21:52.164977,65,837,710, 68,2014-01-10 22:53:38.934049,557,393,442, 18,2014-01-12 21:38:44.782868,953,257,777, 68,2014-01-15 03:53:40.218663,119,179,661, 75,2014-01-13 20:28:11.54617,439,975,312, 44,2014-01-18 20:35:11.700365,278,956,957, 68,2014-01-20 23:44:07.259473,442,747,272, 18,2014-01-15 20:51:25.926298,913,832,493, 44,2014-01-11 19:14:24.255682,902,688,903, 18,2014-01-14 06:12:38.998706,395,717,276, 67,2014-01-16 19:08:44.386675,6,321,322, 67,2014-01-12 05:38:54.571032,125,837,182, 68,2014-01-16 18:42:18.356251,152,561,186, 67,2014-01-14 12:30:38.666812,93,382,308, 75,2014-01-11 16:23:39.513926,103,416,73, 75,2014-01-12 07:51:55.305003,817,220,325, 44,2014-01-18 17:47:04.401763,242,672,32, 67,2014-01-20 17:22:43.531839,51,511,88, 68,2014-01-11 09:14:37.089181,83,512,875, 18,2014-01-16 05:41:42.504628,660,183,558, 18,2014-01-15 01:08:06.249797,588,202,141, 44,2014-01-11 06:08:27.56618,33,244,439, 68,2014-01-21 04:10:32.807364,834,950,685, 67,2014-01-17 10:23:58.29245,583,560,499, 68,2014-01-16 16:42:25.216916,475,330,711, 44,2014-01-18 13:37:13.1827,695,511,462, 68,2014-01-18 11:43:01.938681,773,201,59, 18,2014-01-11 11:44:30.767768,442,578,929, 44,2014-01-18 14:43:27.926027,685,107,393, 18,2014-01-20 15:48:24.826865,830,196,23, 67,2014-01-13 23:27:05.260077,722,473,806, 18,2014-01-13 09:05:20.091005,518,291,846, 18,2014-01-20 18:42:03.760914,688,101,71, 67,2014-01-16 01:05:50.043364,851,537,265, 68,2014-01-17 16:59:00.309668,320,148,373, 75,2014-01-13 09:10:15.307083,824,874,152, 67,2014-01-13 15:41:01.743728,816,120,772, 18,2014-01-17 10:43:07.569141,354,373,34, 18,2014-01-18 10:01:41.2338,908,219,548, 18,2014-01-14 14:55:25.99941,339,457,950, 67,2014-01-20 20:55:46.568266,111,873,773, 44,2014-01-13 23:10:25.620475,647,863,286, 75,2014-01-16 00:34:20.570444,477,100,978, 75,2014-01-16 09:38:04.924518,641,473,691, 18,2014-01-17 15:41:04.287287,940,416,478, 75,2014-01-16 20:58:00.08437,200,123,853, 44,2014-01-17 21:30:00.22952,944,930,674, 18,2014-01-15 03:28:08.689469,462,386,881, 67,2014-01-16 08:20:43.616627,975,41,848, 44,2014-01-18 10:44:37.318701,427,728,73, 18,2014-01-20 13:09:51.194098,200,726,497, 68,2014-01-20 13:28:49.866424,831,81,47, 75,2014-01-18 20:53:04.884881,356,214,684, 44,2014-01-20 11:20:24.001461,226,703,834, 67,2014-01-16 17:52:48.158874,416,845,481, 68,2014-01-14 07:35:30.101797,793,758,615, 75,2014-01-17 18:57:37.184161,438,608,402, 44,2014-01-13 23:35:50.097554,263,157,865, 18,2014-01-14 23:20:00.869983,604,661,723, 75,2014-01-15 03:41:05.11019,460,64,846, 18,2014-01-13 09:24:14.924419,212,799,268, 44,2014-01-18 02:47:06.13006,988,200,614, 18,2014-01-17 16:46:25.201194,528,835,813, 75,2014-01-12 22:06:18.685333,563,695,323, 18,2014-01-12 21:52:56.173425,940,773,878, 68,2014-01-14 03:45:25.510862,540,890,552, 75,2014-01-15 11:16:33.992835,481,525,826, 67,2014-01-15 17:33:57.917153,5,710,877, 75,2014-01-20 18:28:33.080422,54,109,942, 67,2014-01-12 15:18:15.226635,939,517,631, 18,2014-01-19 12:29:23.916564,783,738,235, 67,2014-01-20 09:24:19.648688,680,113,669, 44,2014-01-15 18:28:40.84711,940,804,531, 75,2014-01-14 07:03:44.487426,441,309,869, 44,2014-01-15 13:02:24.573205,309,421,174, 18,2014-01-12 04:09:45.134817,899,698,420, 44,2014-01-20 08:01:44.571796,575,610,957, 67,2014-01-12 04:32:24.802725,288,886,168, 67,2014-01-16 11:37:53.824285,117,431,661, 44,2014-01-15 10:22:28.188423,72,114,209, 18,2014-01-19 19:17:02.121426,629,417,659, 68,2014-01-15 18:47:45.018383,431,887,837, 44,2014-01-14 17:51:18.054846,787,708,390, 75,2014-01-17 10:10:07.006726,615,328,773, 68,2014-01-15 00:08:09.019845,232,66,757, 75,2014-01-20 15:44:06.214139,635,748,214, 18,2014-01-20 21:45:37.108523,48,856,379, 68,2014-01-11 13:05:46.204006,708,812,703, 18,2014-01-13 09:19:56.311694,378,508,208, 67,2014-01-14 15:05:37.978506,187,793,139, 44,2014-01-15 20:46:51.314196,42,479,370, 67,2014-01-15 22:44:11.236531,4,618,415, 67,2014-01-11 11:52:44.108986,277,86,447, 18,2014-01-12 07:33:16.51539,701,475,476, 18,2014-01-18 00:50:29.921865,357,925,456, 68,2014-01-13 13:45:40.28241,198,100,930, 68,2014-01-15 15:18:42.026252,154,810,586, 67,2014-01-12 06:07:03.914699,933,368,234, 68,2014-01-18 11:19:38.199983,78,65,106, 44,2014-01-15 03:47:15.106674,922,758,220, 67,2014-01-14 01:25:19.141334,767,584,258, 67,2014-01-16 17:49:02.116547,294,141,140, 18,2014-01-14 07:11:34.755361,704,677,808, 68,2014-01-18 23:53:59.988864,68,981,41, 75,2014-01-20 04:52:46.603974,430,494,325, 18,2014-01-19 00:13:59.328567,795,717,97, 18,2014-01-20 08:03:45.12368,709,524,344, 68,2014-01-19 06:54:31.176189,388,429,607, 67,2014-01-20 08:46:24.131291,985,523,581, 75,2014-01-15 13:41:38.947966,902,936,291, 67,2014-01-13 11:16:59.364611,592,47,652, 68,2014-01-18 22:03:26.253136,542,353,919, 75,2014-01-20 12:29:23.966349,80,319,569, 18,2014-01-17 09:08:17.419876,687,758,276, 44,2014-01-15 02:13:33.259861,912,105,962, 68,2014-01-14 06:37:32.986194,504,653,303, 18,2014-01-16 18:52:23.634015,306,691,951, 44,2014-01-14 17:59:10.368384,55,763,303, 67,2014-01-14 23:43:19.1902,90,450,122, 75,2014-01-19 08:12:19.945708,963,709,959, 68,2014-01-18 13:04:48.347309,405,400,77, 75,2014-01-20 00:30:10.504396,581,398,727, 75,2014-01-14 00:56:31.18224,777,697,767, 75,2014-01-19 04:57:32.456296,26,359,548, 67,2014-01-11 02:03:27.019785,22,613,419, 44,2014-01-21 05:47:01.104523,526,409,940, 18,2014-01-11 12:43:12.738706,738,880,943, 75,2014-01-15 21:22:09.046036,962,411,828, 67,2014-01-12 05:54:05.019223,687,189,503, 67,2014-01-19 04:02:50.93869,383,5,667, 44,2014-01-20 05:09:24.153129,396,618,942, 18,2014-01-15 11:19:24.160976,893,367,556, 44,2014-01-14 15:51:53.055655,433,877,150, 68,2014-01-13 06:20:58.908491,857,841,480, 67,2014-01-13 05:13:24.14984,724,424,404, 18,2014-01-13 14:44:39.659629,976,513,783, 18,2014-01-11 00:34:58.237476,397,130,685, 67,2014-01-12 07:17:09.27352,795,845,365, 44,2014-01-11 15:39:10.835818,494,336,153, 44,2014-01-20 13:21:22.368767,396,192,131, 18,2014-01-17 00:58:48.221485,34,89,167, 18,2014-01-14 06:56:10.200848,697,892,950, 75,2014-01-18 05:24:48.621903,825,536,814, 68,2014-01-16 07:28:12.187834,522,152,535, 44,2014-01-20 20:04:27.620724,485,246,374, 67,2014-01-12 01:38:21.881765,185,322,43, 67,2014-01-19 18:05:45.174028,643,631,751, 67,2014-01-16 08:56:51.254739,379,613,895, 67,2014-01-15 23:37:32.250568,261,511,778, 68,2014-01-13 11:49:04.364228,578,873,857, 68,2014-01-14 11:09:11.200447,468,626,729, 75,2014-01-13 06:42:20.597877,123,830,156, 44,2014-01-12 06:19:14.868624,594,533,819, 68,2014-01-17 16:05:42.383105,258,108,990, 44,2014-01-11 05:39:53.054173,50,704,685, 75,2014-01-12 12:22:41.888408,883,713,408, 67,2014-01-17 15:52:43.487629,226,416,368, 67,2014-01-11 22:23:05.792879,974,789,425, 75,2014-01-17 13:44:50.934446,688,597,626, 44,2014-01-19 01:46:48.506851,619,961,791, 44,2014-01-20 06:25:56.731987,695,394,912, 75,2014-01-16 12:54:15.087575,928,609,381, 18,2014-01-13 07:06:12.667827,799,663,231, 68,2014-01-13 16:17:49.787643,812,203,957, 18,2014-01-18 23:15:13.996484,651,821,412, 75,2014-01-15 16:19:36.817667,851,77,316, 44,2014-01-16 11:02:29.447272,108,952,837, 44,2014-01-19 03:50:12.233959,144,923,483, 75,2014-01-17 03:36:46.091185,492,238,205, 18,2014-01-17 06:41:40.283508,953,218,300, 67,2014-01-18 11:11:34.602727,915,193,492, 67,2014-01-12 22:35:34.312671,23,548,621, 75,2014-01-20 17:37:50.484355,466,755,380, 68,2014-01-15 10:36:23.225049,795,720,478, 44,2014-01-18 10:03:46.500505,972,661,253, 18,2014-01-20 07:42:18.105079,921,823,148, 18,2014-01-16 16:14:45.106814,42,247,542, 18,2014-01-16 22:09:31.674533,905,511,374, 18,2014-01-15 10:39:09.359818,693,900,765, 68,2014-01-11 09:52:17.357381,207,525,596, 18,2014-01-19 13:58:36.03876,974,244,726, 18,2014-01-19 01:48:20.560265,867,945,798, 75,2014-01-13 20:34:37.955258,587,564,506, 18,2014-01-21 00:17:50.907384,70,473,267, 75,2014-01-15 11:54:02.943371,768,448,698, 68,2014-01-14 06:14:31.009431,133,135,593, 44,2014-01-12 06:40:32.795793,599,392,888, 44,2014-01-11 21:46:46.430999,960,329,347, 67,2014-01-15 08:37:36.802729,99,388,712, 68,2014-01-19 00:25:23.730657,841,441,115, 67,2014-01-20 03:33:34.93827,126,466,996, 67,2014-01-14 09:03:33.534717,604,374,984, 18,2014-01-14 07:19:38.818232,497,277,157, 18,2014-01-12 04:39:47.606097,374,305,318, 67,2014-01-17 05:21:23.322359,873,741,920, 75,2014-01-12 00:34:52.814717,257,959,90, 75,2014-01-17 00:59:24.424182,525,154,740, 67,2014-01-12 10:23:52.769631,131,129,102, 67,2014-01-20 08:25:05.048676,340,113,340, 18,2014-01-12 22:36:10.515368,127,369,117, 67,2014-01-18 21:05:33.053139,998,717,773, 67,2014-01-17 13:36:39.651822,683,572,738, 8,2014-01-11 03:48:30.443568,947,925,587, 8,2014-01-12 19:46:26.734355,680,150,306, 20,2014-01-14 13:03:16.559532,422,355,353, 8,2014-01-15 21:33:11.665239,774,859,753, 60,2014-01-15 07:15:59.163667,722,101,350, 60,2014-01-17 17:28:58.360332,899,749,39, 60,2014-01-16 23:32:36.366365,31,560,591, 8,2014-01-20 08:34:54.862248,288,244,468, 20,2014-01-15 14:53:08.098157,195,444,600, 20,2014-01-18 16:00:48.531933,475,374,627, 8,2014-01-17 06:31:12.856146,453,609,400, 8,2014-01-19 19:21:37.008035,883,857,507, 8,2014-01-11 18:34:50.554348,144,476,544, 60,2014-01-11 12:09:46.528925,510,687,304, 60,2014-01-20 06:05:37.390549,332,769,351, 8,2014-01-18 13:25:21.452671,866,757,997, 20,2014-01-13 06:15:57.459629,594,257,291, 60,2014-01-18 18:35:50.818783,178,208,798, 60,2014-01-15 21:07:06.785709,436,865,584, 20,2014-01-14 13:21:17.187178,904,174,458, 60,2014-01-20 20:40:49.982906,145,828,919, 8,2014-01-11 04:03:33.497661,77,15,815, 8,2014-01-20 22:37:16.714103,108,19,109, 20,2014-01-15 16:48:35.184151,44,153,821, 60,2014-01-11 01:38:42.312322,229,195,260, 8,2014-01-17 23:17:19.043251,218,896,71, 60,2014-01-13 20:45:48.914034,911,594,578, 8,2014-01-12 02:04:54.089651,19,372,901, 60,2014-01-20 17:04:06.523799,768,231,513, 20,2014-01-15 21:41:40.986974,127,154,10, 60,2014-01-14 09:26:29.931647,661,832,553, 8,2014-01-21 00:52:36.967785,15,10,868, 20,2014-01-17 21:28:07.721329,391,301,501, 60,2014-01-18 02:29:46.491178,968,416,176, 20,2014-01-15 16:25:48.633024,994,825,673, 60,2014-01-11 22:44:06.885416,414,650,58, 8,2014-01-14 13:58:44.85151,982,414,820, 8,2014-01-11 09:58:24.999389,947,635,701, 20,2014-01-11 01:19:01.747663,220,781,29, 8,2014-01-19 08:51:52.949668,584,171,388, 20,2014-01-19 05:59:13.531322,943,166,303, 8,2014-01-17 11:50:14.603809,530,247,467, 8,2014-01-17 22:13:29.957703,929,236,368, 20,2014-01-20 04:34:04.085671,153,444,623, 20,2014-01-18 04:00:01.133153,154,797,793, 60,2014-01-16 22:19:07.348672,925,663,59, 8,2014-01-17 11:59:25.538342,653,982,286, 8,2014-01-20 14:15:58.592782,314,974,919, 60,2014-01-14 10:54:58.167455,444,484,336, 60,2014-01-12 03:06:32.32447,254,947,747, 20,2014-01-13 21:37:15.77996,292,75,9, 60,2014-01-14 01:35:48.150361,428,152,422, 8,2014-01-12 11:10:05.822131,989,777,467, 8,2014-01-13 14:14:32.494483,246,824,523, 60,2014-01-18 22:24:23.334512,533,637,943, 20,2014-01-12 16:48:48.134453,700,744,646, 20,2014-01-20 17:31:51.537734,7,956,931, 20,2014-01-11 13:10:12.248547,407,679,704, 8,2014-01-13 22:53:42.224523,165,127,519, 20,2014-01-20 04:35:58.061952,165,474,204, 20,2014-01-16 14:51:53.23594,24,907,713, 8,2014-01-17 12:20:12.156169,686,783,448, 20,2014-01-19 23:28:35.029737,892,941,724, 60,2014-01-13 06:20:00.957269,885,278,8, 8,2014-01-14 08:49:58.647767,548,678,848, 20,2014-01-14 09:54:23.662761,719,290,692, 60,2014-01-14 09:04:07.842684,377,828,48, 20,2014-01-18 02:48:43.499277,786,957,668, 8,2014-01-14 23:52:48.66215,251,453,202, 20,2014-01-14 14:23:09.590767,828,497,423, 8,2014-01-16 05:40:36.449365,963,773,399, 8,2014-01-12 23:52:02.193472,334,62,375, 8,2014-01-10 20:13:24.194575,129,48,792, 75,2014-01-15 01:11:44.828457,813,44,967, 68,2014-01-18 08:43:23.537495,251,483,813, 67,2014-01-11 18:13:02.876871,185,330,828, 75,2014-01-12 05:15:31.328962,584,598,367, 75,2014-01-17 10:25:41.642574,649,568,244, 18,2014-01-17 14:27:47.983684,485,609,811, 18,2014-01-18 07:25:03.003915,775,12,554, 75,2014-01-11 15:04:51.002392,789,669,904, 75,2014-01-18 04:20:05.341484,97,236,941, 44,2014-01-16 15:23:39.042675,854,517,17, 75,2014-01-19 20:53:11.563075,843,16,774, 18,2014-01-21 04:54:43.296743,319,909,200, 75,2014-01-16 09:41:29.950479,214,459,292, 18,2014-01-14 02:47:14.506446,731,674,324, 67,2014-01-14 05:09:14.306593,223,634,539, 68,2014-01-17 20:22:02.746272,167,481,557, 18,2014-01-15 04:34:00.937864,815,352,395, 75,2014-01-18 17:46:51.109322,408,102,616, 44,2014-01-15 14:47:26.476929,684,160,645, 75,2014-01-14 02:07:35.876133,261,225,851, citus-7.0.3/src/test/regress/expected/000077500000000000000000000000001317107136600176765ustar00rootroot00000000000000citus-7.0.3/src/test/regress/expected/.gitignore000066400000000000000000000011451317107136600216670ustar00rootroot00000000000000/multi_agg_distinct.out /multi_agg_type_conversion.out /multi_alter_table_statements.out /multi_alter_table_statements_0.out /multi_append_table_to_shard.out /multi_behavioral_analytics_create_table.out /multi_copy.out /multi_create_schema.out /multi_large_shardid.out /multi_master_delete_protocol.out /multi_outer_join.out /multi_outer_join_reference.out /multi_load_data.out /multi_load_large_records.out /multi_load_more_data.out /worker_copy.out /multi_complex_count_distinct.out /multi_mx_copy_data.out /multi_behavioral_analytics_create_table.out /multi_insert_select_behavioral_analytics_create_table.out citus-7.0.3/src/test/regress/expected/isolation_add_node_vs_reference_table_operations.out000066400000000000000000000274451317107136600324210ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content create_reference_table step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; step s1-begin: BEGIN; step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); nodename nodeport isactive localhost 57638 t step s2-copy-to-reference-table: COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; step s1-commit: COMMIT; step s2-copy-to-reference-table: <... completed> step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from %s') ORDER BY nodeport; nodeport success result 57637 t 10 57638 t 10 master_remove_node starting permutation: s2-load-metadata-cache s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content create_reference_table step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; step s2-begin: BEGIN; step s2-copy-to-reference-table: COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); step s2-commit: COMMIT; step s1-add-second-worker: <... completed> nodename nodeport isactive localhost 57638 t step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from %s') ORDER BY nodeport; nodeport success result 57637 t 10 57638 t 10 master_remove_node starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content create_reference_table step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; step s1-begin: BEGIN; step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); nodename nodeport isactive localhost 57638 t step s2-insert-to-reference-table: INSERT INTO test_reference_table VALUES (6); step s1-commit: COMMIT; step s2-insert-to-reference-table: <... completed> step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from %s') ORDER BY nodeport; nodeport success result 57637 t 6 57638 t 6 master_remove_node starting permutation: s2-load-metadata-cache s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content create_reference_table step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; step s2-begin: BEGIN; step s2-insert-to-reference-table: INSERT INTO test_reference_table VALUES (6); step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); step s2-commit: COMMIT; step s1-add-second-worker: <... completed> nodename nodeport isactive localhost 57638 t step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from %s') ORDER BY nodeport; nodeport success result 57637 t 6 57638 t 6 master_remove_node starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count create_reference_table step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; step s1-begin: BEGIN; step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); nodename nodeport isactive localhost 57638 t step s2-ddl-on-reference-table: CREATE INDEX reference_index ON test_reference_table(test_id); step s1-commit: COMMIT; step s2-ddl-on-reference-table: <... completed> step s2-print-index-count: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE schemaname || ''.'' || tablename = ''%s''') ORDER BY nodeport; nodeport success result 57637 t 1 57638 t 1 master_remove_node starting permutation: s2-load-metadata-cache s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count create_reference_table step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; step s2-begin: BEGIN; step s2-ddl-on-reference-table: CREATE INDEX reference_index ON test_reference_table(test_id); step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); step s2-commit: COMMIT; step s1-add-second-worker: <... completed> nodename nodeport isactive localhost 57638 t step s2-print-index-count: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE schemaname || ''.'' || tablename = ''%s''') ORDER BY nodeport; nodeport success result 57637 t 1 57638 t 1 master_remove_node starting permutation: s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content create_reference_table step s1-begin: BEGIN; step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); nodename nodeport isactive localhost 57638 t step s2-copy-to-reference-table: COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; step s1-commit: COMMIT; step s2-copy-to-reference-table: <... completed> step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from %s') ORDER BY nodeport; nodeport success result 57637 t 5 57638 t 5 master_remove_node starting permutation: s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content create_reference_table step s2-begin: BEGIN; step s2-copy-to-reference-table: COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); step s2-commit: COMMIT; step s2-commit: <... completed> step s1-add-second-worker: <... completed> nodename nodeport isactive localhost 57638 t step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from %s') ORDER BY nodeport; nodeport success result 57637 t 5 57638 t 5 master_remove_node starting permutation: s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content create_reference_table step s1-begin: BEGIN; step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); nodename nodeport isactive localhost 57638 t step s2-insert-to-reference-table: INSERT INTO test_reference_table VALUES (6); step s1-commit: COMMIT; step s2-insert-to-reference-table: <... completed> step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from %s') ORDER BY nodeport; nodeport success result 57637 t 1 57638 t 1 master_remove_node starting permutation: s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content create_reference_table step s2-begin: BEGIN; step s2-insert-to-reference-table: INSERT INTO test_reference_table VALUES (6); step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); step s2-commit: COMMIT; step s1-add-second-worker: <... completed> nodename nodeport isactive localhost 57638 t step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from %s') ORDER BY nodeport; nodeport success result 57637 t 1 57638 t 1 master_remove_node starting permutation: s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count create_reference_table step s1-begin: BEGIN; step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); nodename nodeport isactive localhost 57638 t step s2-ddl-on-reference-table: CREATE INDEX reference_index ON test_reference_table(test_id); step s1-commit: COMMIT; step s2-ddl-on-reference-table: <... completed> step s2-print-index-count: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE schemaname || ''.'' || tablename = ''%s''') ORDER BY nodeport; nodeport success result 57637 t 1 57638 t 1 master_remove_node starting permutation: s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count create_reference_table step s2-begin: BEGIN; step s2-ddl-on-reference-table: CREATE INDEX reference_index ON test_reference_table(test_id); step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); step s2-commit: COMMIT; step s1-add-second-worker: <... completed> nodename nodeport isactive localhost 57638 t step s2-print-index-count: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE schemaname || ''.'' || tablename = ''%s''') ORDER BY nodeport; nodeport success result 57637 t 1 57638 t 1 master_remove_node citus-7.0.3/src/test/regress/expected/isolation_add_node_vs_reference_table_operations_0.out000066400000000000000000000273671317107136600326430ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content create_reference_table step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; step s1-begin: BEGIN; step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); nodename nodeport isactive localhost 57638 t step s2-copy-to-reference-table: COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; step s1-commit: COMMIT; step s2-copy-to-reference-table: <... completed> step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from %s') ORDER BY nodeport; nodeport success result 57637 t 10 57638 t 10 master_remove_node starting permutation: s2-load-metadata-cache s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content create_reference_table step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; step s2-begin: BEGIN; step s2-copy-to-reference-table: COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); step s2-commit: COMMIT; step s1-add-second-worker: <... completed> nodename nodeport isactive localhost 57638 t step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from %s') ORDER BY nodeport; nodeport success result 57637 t 10 57638 t 10 master_remove_node starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content create_reference_table step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; step s1-begin: BEGIN; step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); nodename nodeport isactive localhost 57638 t step s2-insert-to-reference-table: INSERT INTO test_reference_table VALUES (6); step s1-commit: COMMIT; step s2-insert-to-reference-table: <... completed> step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from %s') ORDER BY nodeport; nodeport success result 57637 t 6 57638 t 6 master_remove_node starting permutation: s2-load-metadata-cache s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content create_reference_table step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; step s2-begin: BEGIN; step s2-insert-to-reference-table: INSERT INTO test_reference_table VALUES (6); step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); step s2-commit: COMMIT; step s1-add-second-worker: <... completed> nodename nodeport isactive localhost 57638 t step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from %s') ORDER BY nodeport; nodeport success result 57637 t 6 57638 t 6 master_remove_node starting permutation: s2-load-metadata-cache s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count create_reference_table step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; step s1-begin: BEGIN; step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); nodename nodeport isactive localhost 57638 t step s2-ddl-on-reference-table: CREATE INDEX reference_index ON test_reference_table(test_id); step s1-commit: COMMIT; step s2-ddl-on-reference-table: <... completed> step s2-print-index-count: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE schemaname || ''.'' || tablename = ''%s''') ORDER BY nodeport; nodeport success result 57637 t 1 57638 t 1 master_remove_node starting permutation: s2-load-metadata-cache s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count create_reference_table step s2-load-metadata-cache: COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; step s2-begin: BEGIN; step s2-ddl-on-reference-table: CREATE INDEX reference_index ON test_reference_table(test_id); step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); step s2-commit: COMMIT; step s1-add-second-worker: <... completed> nodename nodeport isactive localhost 57638 t step s2-print-index-count: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE schemaname || ''.'' || tablename = ''%s''') ORDER BY nodeport; nodeport success result 57637 t 1 57638 t 1 master_remove_node starting permutation: s1-begin s1-add-second-worker s2-copy-to-reference-table s1-commit s2-print-content create_reference_table step s1-begin: BEGIN; step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); nodename nodeport isactive localhost 57638 t step s2-copy-to-reference-table: COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; step s1-commit: COMMIT; step s2-copy-to-reference-table: <... completed> step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from %s') ORDER BY nodeport; nodeport success result 57637 t 5 57638 t 5 master_remove_node starting permutation: s2-begin s2-copy-to-reference-table s1-add-second-worker s2-commit s2-print-content create_reference_table step s2-begin: BEGIN; step s2-copy-to-reference-table: COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); step s2-commit: COMMIT; step s1-add-second-worker: <... completed> nodename nodeport isactive localhost 57638 t step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from %s') ORDER BY nodeport; nodeport success result 57637 t 5 57638 t 5 master_remove_node starting permutation: s1-begin s1-add-second-worker s2-insert-to-reference-table s1-commit s2-print-content create_reference_table step s1-begin: BEGIN; step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); nodename nodeport isactive localhost 57638 t step s2-insert-to-reference-table: INSERT INTO test_reference_table VALUES (6); step s1-commit: COMMIT; step s2-insert-to-reference-table: <... completed> step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from %s') ORDER BY nodeport; nodeport success result 57637 t 1 57638 t 1 master_remove_node starting permutation: s2-begin s2-insert-to-reference-table s1-add-second-worker s2-commit s2-print-content create_reference_table step s2-begin: BEGIN; step s2-insert-to-reference-table: INSERT INTO test_reference_table VALUES (6); step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); step s2-commit: COMMIT; step s1-add-second-worker: <... completed> nodename nodeport isactive localhost 57638 t step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from %s') ORDER BY nodeport; nodeport success result 57637 t 1 57638 t 1 master_remove_node starting permutation: s1-begin s1-add-second-worker s2-ddl-on-reference-table s1-commit s2-print-index-count create_reference_table step s1-begin: BEGIN; step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); nodename nodeport isactive localhost 57638 t step s2-ddl-on-reference-table: CREATE INDEX reference_index ON test_reference_table(test_id); step s1-commit: COMMIT; step s2-ddl-on-reference-table: <... completed> step s2-print-index-count: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE schemaname || ''.'' || tablename = ''%s''') ORDER BY nodeport; nodeport success result 57637 t 1 57638 t 1 master_remove_node starting permutation: s2-begin s2-ddl-on-reference-table s1-add-second-worker s2-commit s2-print-index-count create_reference_table step s2-begin: BEGIN; step s2-ddl-on-reference-table: CREATE INDEX reference_index ON test_reference_table(test_id); step s1-add-second-worker: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); step s2-commit: COMMIT; step s1-add-second-worker: <... completed> nodename nodeport isactive localhost 57638 t step s2-print-index-count: SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE schemaname || ''.'' || tablename = ''%s''') ORDER BY nodeport; nodeport success result 57637 t 1 57638 t 1 master_remove_node citus-7.0.3/src/test/regress/expected/isolation_add_remove_node.out000066400000000000000000000320431317107136600256240ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-begin s1-add-node-1 s2-remove-node-1 s1-commit s1-show-nodes ?column? 1 step s1-begin: BEGIN; step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); ?column? 1 step s2-remove-node-1: SELECT * FROM master_remove_node('localhost', 57637); step s1-commit: COMMIT; step s2-remove-node-1: <... completed> master_remove_node step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; nodename nodeport isactive master_remove_node starting permutation: s1-begin s1-add-node-1 s2-add-node-2 s1-commit s1-show-nodes ?column? 1 step s1-begin: BEGIN; step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); ?column? 1 step s2-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); step s1-commit: COMMIT; step s2-add-node-2: <... completed> ?column? 1 step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; nodename nodeport isactive localhost 57637 t localhost 57638 t master_remove_node starting permutation: s1-begin s1-add-node-1 s2-add-node-1 s1-commit s1-show-nodes ?column? 1 step s1-begin: BEGIN; step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); ?column? 1 step s2-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); step s1-commit: COMMIT; step s2-add-node-1: <... completed> ?column? 1 step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; nodename nodeport isactive localhost 57637 t master_remove_node starting permutation: s1-begin s1-add-node-1 s2-add-node-2 s1-abort s1-show-nodes ?column? 1 step s1-begin: BEGIN; step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); ?column? 1 step s2-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); step s1-abort: ABORT; step s2-add-node-2: <... completed> ?column? 1 step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; nodename nodeport isactive localhost 57638 t master_remove_node starting permutation: s1-begin s1-add-node-1 s2-add-node-1 s1-abort s1-show-nodes ?column? 1 step s1-begin: BEGIN; step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); ?column? 1 step s2-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); step s1-abort: ABORT; step s2-add-node-1: <... completed> ?column? 1 step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; nodename nodeport isactive localhost 57637 t master_remove_node starting permutation: s1-add-node-1 s1-add-node-2 s1-begin s1-remove-node-1 s2-remove-node-2 s1-commit s1-show-nodes ?column? 1 step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); ?column? 1 step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); ?column? 1 step s1-begin: BEGIN; step s1-remove-node-1: SELECT * FROM master_remove_node('localhost', 57637); master_remove_node step s2-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); step s1-commit: COMMIT; step s2-remove-node-2: <... completed> master_remove_node step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; nodename nodeport isactive master_remove_node starting permutation: s1-add-node-1 s1-begin s1-remove-node-1 s2-remove-node-1 s1-commit s1-show-nodes ?column? 1 step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); ?column? 1 step s1-begin: BEGIN; step s1-remove-node-1: SELECT * FROM master_remove_node('localhost', 57637); master_remove_node step s2-remove-node-1: SELECT * FROM master_remove_node('localhost', 57637); step s1-commit: COMMIT; step s2-remove-node-1: <... completed> error in steps s1-commit s2-remove-node-1: ERROR: node at "localhost:57637" does not exist step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; nodename nodeport isactive master_remove_node starting permutation: s1-add-node-1 s1-begin s1-activate-node-1 s2-activate-node-1 s1-commit s1-show-nodes ?column? 1 step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); ?column? 1 step s1-begin: BEGIN; step s1-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); ?column? 1 step s2-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); step s1-commit: COMMIT; step s2-activate-node-1: <... completed> ?column? 1 step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; nodename nodeport isactive localhost 57637 t master_remove_node starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-commit s1-show-nodes ?column? 1 step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); ?column? 1 step s1-begin: BEGIN; step s1-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); ?column? 1 step s2-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); step s1-commit: COMMIT; step s2-disable-node-1: <... completed> ?column? 1 step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; nodename nodeport isactive localhost 57637 f master_remove_node starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-activate-node-1 s1-commit s1-show-nodes ?column? 1 step s1-add-inactive-1: SELECT 1 FROM master_add_inactive_node('localhost', 57637); ?column? 1 step s1-begin: BEGIN; step s1-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); ?column? 1 step s2-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); step s1-commit: COMMIT; step s2-activate-node-1: <... completed> ?column? 1 step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; nodename nodeport isactive localhost 57637 t master_remove_node starting permutation: s1-add-inactive-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-commit s1-show-nodes ?column? 1 step s1-add-inactive-1: SELECT 1 FROM master_add_inactive_node('localhost', 57637); ?column? 1 step s1-begin: BEGIN; step s1-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); ?column? 1 step s2-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); step s1-commit: COMMIT; step s2-disable-node-1: <... completed> ?column? 1 step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; nodename nodeport isactive localhost 57637 f master_remove_node starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-activate-node-1 s1-commit s1-show-nodes ?column? 1 step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); ?column? 1 step s1-begin: BEGIN; step s1-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); ?column? 1 step s2-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); step s1-commit: COMMIT; step s2-activate-node-1: <... completed> ?column? 1 step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; nodename nodeport isactive localhost 57637 t master_remove_node starting permutation: s1-add-node-1 s1-begin s1-activate-node-1 s2-disable-node-1 s1-commit s1-show-nodes ?column? 1 step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); ?column? 1 step s1-begin: BEGIN; step s1-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); ?column? 1 step s2-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); step s1-commit: COMMIT; step s2-disable-node-1: <... completed> ?column? 1 step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; nodename nodeport isactive localhost 57637 f master_remove_node starting permutation: s1-add-inactive-1 s1-begin s1-disable-node-1 s2-activate-node-1 s1-commit s1-show-nodes ?column? 1 step s1-add-inactive-1: SELECT 1 FROM master_add_inactive_node('localhost', 57637); ?column? 1 step s1-begin: BEGIN; step s1-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); ?column? 1 step s2-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); step s1-commit: COMMIT; step s2-activate-node-1: <... completed> ?column? 1 step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; nodename nodeport isactive localhost 57637 t master_remove_node starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-disable-node-1 s1-commit s1-show-nodes ?column? 1 step s1-add-inactive-1: SELECT 1 FROM master_add_inactive_node('localhost', 57637); ?column? 1 step s1-begin: BEGIN; step s1-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); ?column? 1 step s2-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); step s1-commit: COMMIT; step s2-disable-node-1: <... completed> ?column? 1 step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; nodename nodeport isactive localhost 57637 f master_remove_node starting permutation: s1-add-inactive-1 s1-begin s1-activate-node-1 s2-disable-node-1 s1-abort s1-show-nodes ?column? 1 step s1-add-inactive-1: SELECT 1 FROM master_add_inactive_node('localhost', 57637); ?column? 1 step s1-begin: BEGIN; step s1-activate-node-1: SELECT 1 FROM master_activate_node('localhost', 57637); ?column? 1 step s2-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); step s1-abort: ABORT; step s2-disable-node-1: <... completed> ?column? 1 step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; nodename nodeport isactive localhost 57637 f master_remove_node starting permutation: s1-add-node-1 s1-begin s1-disable-node-1 s2-disable-node-1 s1-abort s1-show-nodes ?column? 1 step s1-add-node-1: SELECT 1 FROM master_add_node('localhost', 57637); ?column? 1 step s1-begin: BEGIN; step s1-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); ?column? 1 step s2-disable-node-1: SELECT 1 FROM master_disable_node('localhost', 57637); step s1-abort: ABORT; step s2-disable-node-1: <... completed> ?column? 1 step s1-show-nodes: SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; nodename nodeport isactive localhost 57637 f master_remove_node citus-7.0.3/src/test/regress/expected/isolation_append_copy_vs_all.out000066400000000000000000000725521317107136600263640ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; count 15 starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-router-select: SELECT * FROM append_copy WHERE id = 1; id data int_data 1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-real-time-select: SELECT * FROM append_copy ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-task-tracker-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-insert: INSERT INTO append_copy VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; count 11 starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-insert-select: INSERT INTO append_copy SELECT * FROM append_copy; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-update: UPDATE append_copy SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-delete: DELETE FROM append_copy WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; count 9 starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-truncate: TRUNCATE append_copy; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; count 0 starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-drop: DROP TABLE append_copy; step s1-commit: COMMIT; step s2-drop: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; ERROR: relation "append_copy" does not exist starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id); step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id); step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-drop-index: DROP INDEX append_copy_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY append_copy_index ON append_copy(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-copy-additional-column: COPY append_copy FROM PROGRAM 'echo 5, f, 5, 5\\n6, g, 6, 6\\n7, h, 7, 7\\n8, i, 8, 8\\n9, j, 9, 9' WITH CSV; step s2-ddl-drop-column: ALTER TABLE append_copy DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-rename-column: ALTER TABLE append_copy RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-table-size: SELECT citus_total_relation_size('append_copy'); citus_total_relation_size 32768 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM append_copy;'); master_modify_multiple_shards 5 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; count 5 starting permutation: s1-initialize s1-begin s1-copy s2-master-apply-delete-command s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-master-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_copy WHERE id <= 4;'); step s1-commit: COMMIT; step s2-master-apply-delete-command: <... completed> master_apply_delete_command 1 step s1-select-count: SELECT COUNT(*) FROM append_copy; count 5 starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-master-drop-all-shards: SELECT master_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); step s1-commit: COMMIT; step s2-master-drop-all-shards: <... completed> master_drop_all_shards 2 step s1-select-count: SELECT COUNT(*) FROM append_copy; count 0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-copy s2-distribute-table s1-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE append_copy; step s1-create-non-distributed-table: CREATE TABLE append_copy(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-distribute-table: SELECT create_distributed_table('append_copy', 'id', 'append'); step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table step s1-select-count: SELECT COUNT(*) FROM append_copy; count 0 starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM append_copy WHERE id = 1; id data int_data 1 b 1 step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM append_copy ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO append_copy VALUES(0, 'k', 0); step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; count 11 starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO append_copy SELECT * FROM append_copy; step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE append_copy SET data = 'l' WHERE id = 0; step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM append_copy WHERE id = 1; step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; count 9 starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE append_copy; step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; count 5 starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE append_copy; step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: relation "append_copy" does not exist step s1-select-count: SELECT COUNT(*) FROM append_copy; ERROR: relation "append_copy" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id); step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX append_copy_index ON append_copy(id); step s1-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX append_copy_index; step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0; step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: missing data for column "new_column" step s1-select-count: SELECT COUNT(*) FROM append_copy; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE append_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE append_copy DROP new_column; step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE append_copy RENAME data TO new_column; step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('append_copy'); citus_total_relation_size 32768 step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; count 10 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM append_copy;'); master_modify_multiple_shards 5 step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM append_copy; count 5 starting permutation: s1-initialize s1-begin s1-master-apply-delete-command s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_copy WHERE id <= 4;'); master_apply_delete_command 1 step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; count 5 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-drop-all-shards: SELECT master_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); master_drop_all_shards 1 step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; count 5 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-copy s1-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE append_copy; step s1-create-non-distributed-table: CREATE TABLE append_copy(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('append_copy', 'id', 'append'); create_distributed_table step s2-copy: COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM append_copy; count 5 citus-7.0.3/src/test/regress/expected/isolation_cancellation.out000066400000000000000000000050071317107136600251460ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-timeout s1-sleep10000 s1-reset s1-drop step s1-timeout: SET statement_timeout = '100ms'; step s1-sleep10000: SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1; ERROR: canceling statement due to statement timeout step s1-reset: RESET ALL; step s1-drop: DROP TABLE cancel_table; starting permutation: s1-timeout s1-sleep10000 s1-reset s2-drop step s1-timeout: SET statement_timeout = '100ms'; step s1-sleep10000: SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1; ERROR: canceling statement due to statement timeout step s1-reset: RESET ALL; step s2-drop: DROP TABLE cancel_table; starting permutation: s1-timeout s1-begin s1-sleep10000 s1-rollback s1-reset s1-drop step s1-timeout: SET statement_timeout = '100ms'; step s1-begin: BEGIN; step s1-sleep10000: SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1; ERROR: canceling statement due to statement timeout step s1-rollback: ROLLBACK; step s1-reset: RESET ALL; step s1-drop: DROP TABLE cancel_table; starting permutation: s1-timeout s1-begin s1-sleep10000 s1-rollback s1-reset s2-drop step s1-timeout: SET statement_timeout = '100ms'; step s1-begin: BEGIN; step s1-sleep10000: SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1; ERROR: canceling statement due to statement timeout step s1-rollback: ROLLBACK; step s1-reset: RESET ALL; step s2-drop: DROP TABLE cancel_table; starting permutation: s1-timeout s1-begin s1-update1 s1-sleep10000 s1-rollback s1-reset s1-drop step s1-timeout: SET statement_timeout = '100ms'; step s1-begin: BEGIN; step s1-update1: UPDATE cancel_table SET data = '' WHERE test_id = 1; step s1-sleep10000: SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1; ERROR: canceling statement due to statement timeout step s1-rollback: ROLLBACK; step s1-reset: RESET ALL; step s1-drop: DROP TABLE cancel_table; starting permutation: s1-timeout s1-begin s1-update1 s1-sleep10000 s1-rollback s1-reset s2-drop step s1-timeout: SET statement_timeout = '100ms'; step s1-begin: BEGIN; step s1-update1: UPDATE cancel_table SET data = '' WHERE test_id = 1; step s1-sleep10000: SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1; ERROR: canceling statement due to statement timeout step s1-rollback: ROLLBACK; step s1-reset: RESET ALL; step s2-drop: DROP TABLE cancel_table; citus-7.0.3/src/test/regress/expected/isolation_cluster_management.out000066400000000000000000000006461317107136600263730ustar00rootroot00000000000000Parsed test spec with 1 sessions starting permutation: s1a step s1a: SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57637); SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); nodename nodeport isactive localhost 57637 t nodename nodeport isactive localhost 57638 t citus-7.0.3/src/test/regress/expected/isolation_concurrent_dml.out000066400000000000000000000033541317107136600255330ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-begin s1-insert s2-update s1-commit master_create_worker_shards step s1-begin: BEGIN; step s1-insert: INSERT INTO test_concurrent_dml VALUES(1); step s2-update: UPDATE test_concurrent_dml SET data = 'blarg' WHERE test_id = 1; step s1-commit: COMMIT; step s2-update: <... completed> starting permutation: s1-insert s2-update master_create_worker_shards step s1-insert: INSERT INTO test_concurrent_dml VALUES(1); step s2-update: UPDATE test_concurrent_dml SET data = 'blarg' WHERE test_id = 1; starting permutation: s1-begin s1-multi-insert s2-update s1-commit master_create_worker_shards step s1-begin: BEGIN; step s1-multi-insert: INSERT INTO test_concurrent_dml VALUES (1), (2); step s2-update: UPDATE test_concurrent_dml SET data = 'blarg' WHERE test_id = 1; step s1-commit: COMMIT; step s2-update: <... completed> starting permutation: s1-begin s1-multi-insert s2-multi-insert-overlap s1-commit master_create_worker_shards step s1-begin: BEGIN; step s1-multi-insert: INSERT INTO test_concurrent_dml VALUES (1), (2); step s2-multi-insert-overlap: INSERT INTO test_concurrent_dml VALUES (1), (4); step s1-commit: COMMIT; starting permutation: s1-begin s2-begin s1-multi-insert s2-multi-insert s1-commit s2-commit master_create_worker_shards step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-multi-insert: INSERT INTO test_concurrent_dml VALUES (1), (2); step s2-multi-insert: INSERT INTO test_concurrent_dml VALUES (3), (4); step s1-commit: COMMIT; step s2-commit: COMMIT; citus-7.0.3/src/test/regress/expected/isolation_copy_placement_vs_copy_placement.out000066400000000000000000000036051317107136600313100ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-load-cache s2-load-cache s2-set-placement-inactive s2-begin s2-repair-placement s1-repair-placement s2-commit step s1-load-cache: COPY test_hash_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; step s2-load-cache: COPY test_hash_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638; step s2-begin: BEGIN; step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement step s1-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); step s2-commit: COMMIT; step s1-repair-placement: <... completed> error in steps s2-commit s1-repair-placement: ERROR: target placement must be in inactive state starting permutation: s2-set-placement-inactive s2-begin s2-repair-placement s1-repair-placement s2-commit step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638; step s2-begin: BEGIN; step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement step s1-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); step s2-commit: COMMIT; step s1-repair-placement: <... completed> error in steps s2-commit s1-repair-placement: ERROR: target placement must be in inactive state citus-7.0.3/src/test/regress/expected/isolation_copy_placement_vs_modification.out000066400000000000000000000320121317107136600307450ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-update s2-commit s1-commit s2-print-content step s1-load-cache: TRUNCATE test_copy_placement_vs_modification; step s1-insert: INSERT INTO test_copy_placement_vs_modification VALUES (5, 10); step s1-begin: BEGIN; step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; count 1 step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; step s2-begin: BEGIN; step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement step s1-update: UPDATE test_copy_placement_vs_modification SET y = 5 WHERE x = 5; step s2-commit: COMMIT; step s1-update: <... completed> step s1-commit: COMMIT; step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; nodeport success result 57637 t 5 57638 t 5 starting permutation: s1-load-cache s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-delete s2-commit s1-commit s2-print-content step s1-load-cache: TRUNCATE test_copy_placement_vs_modification; step s1-insert: INSERT INTO test_copy_placement_vs_modification VALUES (5, 10); step s1-begin: BEGIN; step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; count 1 step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; step s2-begin: BEGIN; step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement step s1-delete: DELETE FROM test_copy_placement_vs_modification WHERE x = 5; step s2-commit: COMMIT; step s1-delete: <... completed> step s1-commit: COMMIT; step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; nodeport success result 57637 t 57638 t starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-insert s2-commit s1-commit s2-print-content step s1-load-cache: TRUNCATE test_copy_placement_vs_modification; step s1-begin: BEGIN; step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; count 0 step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; step s2-begin: BEGIN; step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement step s1-insert: INSERT INTO test_copy_placement_vs_modification VALUES (5, 10); step s2-commit: COMMIT; step s1-insert: <... completed> step s1-commit: COMMIT; step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; nodeport success result 57637 t 10 57638 t 10 starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-copy s2-commit s1-commit s2-print-content step s1-load-cache: TRUNCATE test_copy_placement_vs_modification; step s1-begin: BEGIN; step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; count 0 step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; step s2-begin: BEGIN; step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement step s1-copy: COPY test_copy_placement_vs_modification FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; step s2-commit: COMMIT; step s1-copy: <... completed> step s1-commit: COMMIT; step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; nodeport success result 57637 t 5 57638 t 5 starting permutation: s1-load-cache s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-ddl s2-commit s1-commit s2-print-index-count step s1-load-cache: TRUNCATE test_copy_placement_vs_modification; step s1-begin: BEGIN; step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; count 0 step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; step s2-begin: BEGIN; step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement step s1-ddl: CREATE INDEX test_copy_placement_vs_modification_index ON test_copy_placement_vs_modification(x); step s2-commit: COMMIT; step s1-ddl: <... completed> step s1-commit: COMMIT; step s2-print-index-count: SELECT nodeport, success, result FROM run_command_on_placements('test_copy_placement_vs_modification', 'select count(*) from pg_indexes WHERE schemaname || ''.'' || tablename = ''%s''') ORDER BY nodeport; nodeport success result 57637 t 1 57637 t 1 57638 t 1 57638 t 1 starting permutation: s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-update s2-commit s1-commit s2-print-content step s1-insert: INSERT INTO test_copy_placement_vs_modification VALUES (5, 10); step s1-begin: BEGIN; step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; count 1 step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; step s2-begin: BEGIN; step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement step s1-update: UPDATE test_copy_placement_vs_modification SET y = 5 WHERE x = 5; step s2-commit: COMMIT; step s1-update: <... completed> step s1-commit: COMMIT; step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; nodeport success result 57637 t 5 57638 t 5 starting permutation: s1-insert s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-delete s2-commit s1-commit s2-print-content step s1-insert: INSERT INTO test_copy_placement_vs_modification VALUES (5, 10); step s1-begin: BEGIN; step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; count 1 step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; step s2-begin: BEGIN; step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement step s1-delete: DELETE FROM test_copy_placement_vs_modification WHERE x = 5; step s2-commit: COMMIT; step s1-delete: <... completed> step s1-commit: COMMIT; step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; nodeport success result 57637 t 57638 t starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-insert s2-commit s1-commit s2-print-content step s1-begin: BEGIN; step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; count 0 step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; step s2-begin: BEGIN; step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement step s1-insert: INSERT INTO test_copy_placement_vs_modification VALUES (5, 10); step s2-commit: COMMIT; step s1-insert: <... completed> step s1-commit: COMMIT; step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; nodeport success result 57637 t 10 57638 t 10 starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-copy s2-commit s1-commit s2-print-content step s1-begin: BEGIN; step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; count 0 step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; step s2-begin: BEGIN; step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement step s1-copy: COPY test_copy_placement_vs_modification FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; step s2-commit: COMMIT; step s1-copy: <... completed> step s1-commit: COMMIT; step s2-print-content: SELECT nodeport, success, result FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; nodeport success result 57637 t 5 57638 t 5 starting permutation: s1-begin s1-select s2-set-placement-inactive s2-begin s2-repair-placement s1-ddl s2-commit s1-commit s2-print-index-count step s1-begin: BEGIN; step s1-select: SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; count 0 step s2-set-placement-inactive: UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; step s2-begin: BEGIN; step s2-repair-placement: SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); master_copy_shard_placement step s1-ddl: CREATE INDEX test_copy_placement_vs_modification_index ON test_copy_placement_vs_modification(x); step s2-commit: COMMIT; step s1-ddl: <... completed> step s1-commit: COMMIT; step s2-print-index-count: SELECT nodeport, success, result FROM run_command_on_placements('test_copy_placement_vs_modification', 'select count(*) from pg_indexes WHERE schemaname || ''.'' || tablename = ''%s''') ORDER BY nodeport; nodeport success result 57637 t 1 57637 t 1 57638 t 1 57638 t 1 citus-7.0.3/src/test/regress/expected/isolation_create_restore_point.out000066400000000000000000000067721317107136600267430ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-begin s1-create-distributed s2-create-restore s1-commit create_distributed_table step s1-begin: BEGIN; step s1-create-distributed: CREATE TABLE test_create_distributed_table (test_id integer NOT NULL, data text); SELECT create_distributed_table('test_create_distributed_table', 'test_id'); create_distributed_table step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); step s1-commit: COMMIT; step s2-create-restore: <... completed> ?column? 1 starting permutation: s1-begin s1-insert s2-create-restore s1-commit create_distributed_table step s1-begin: BEGIN; step s1-insert: INSERT INTO restore_table VALUES (1,'hello'); step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); step s1-commit: COMMIT; step s2-create-restore: <... completed> ?column? 1 starting permutation: s1-begin s1-modify-multiple s2-create-restore s1-commit create_distributed_table step s1-begin: BEGIN; step s1-modify-multiple: SELECT master_modify_multiple_shards($$UPDATE restore_table SET data = 'world'$$); master_modify_multiple_shards 0 step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); step s1-commit: COMMIT; step s2-create-restore: <... completed> ?column? 1 starting permutation: s1-begin s1-ddl s2-create-restore s1-commit create_distributed_table step s1-begin: BEGIN; step s1-ddl: ALTER TABLE restore_table ADD COLUMN x int; step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); step s1-commit: COMMIT; step s2-create-restore: <... completed> ?column? 1 starting permutation: s1-begin s1-copy s2-create-restore s1-commit create_distributed_table step s1-begin: BEGIN; step s1-copy: COPY restore_table FROM PROGRAM 'echo 1,hello' WITH CSV; step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); step s1-commit: COMMIT; step s2-create-restore: <... completed> ?column? 1 starting permutation: s1-begin s1-drop s2-create-restore s1-commit create_distributed_table step s1-begin: BEGIN; step s1-drop: DROP TABLE restore_table; step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); step s1-commit: COMMIT; step s2-create-restore: <... completed> ?column? 1 starting permutation: s1-begin s1-add-node s2-create-restore s1-commit create_distributed_table step s1-begin: BEGIN; step s1-add-node: SELECT 1 FROM master_add_inactive_node('localhost', 9999); ?column? 1 step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); step s1-commit: COMMIT; step s2-create-restore: <... completed> ?column? 1 starting permutation: s1-begin s1-remove-node s2-create-restore s1-commit create_distributed_table step s1-begin: BEGIN; step s1-remove-node: SELECT master_remove_node('localhost', 9999); master_remove_node step s2-create-restore: SELECT 1 FROM citus_create_restore_point('citus-test'); step s1-commit: COMMIT; step s2-create-restore: <... completed> ?column? 1 citus-7.0.3/src/test/regress/expected/isolation_create_table_vs_add_remove_node.out000066400000000000000000000250411317107136600310260ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-begin s1-add-node-2 s2-create-table-1 s1-commit s1-show-placements s2-select node_name node_port localhost 57637 step s1-begin: BEGIN; step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); ?column? 1 step s2-create-table-1: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x'); step s1-commit: COMMIT; step s2-create-table-1: <... completed> create_distributed_table step s1-show-placements: SELECT nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'dist_table'::regclass ORDER BY nodename, nodeport; nodename nodeport localhost 57637 localhost 57637 localhost 57638 localhost 57638 step s2-select: SELECT * FROM dist_table; x y master_remove_node starting permutation: s1-begin s1-add-node-2 s2-create-table-1 s1-abort s1-show-placements s2-select node_name node_port localhost 57637 step s1-begin: BEGIN; step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); ?column? 1 step s2-create-table-1: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x'); step s1-abort: ABORT; step s2-create-table-1: <... completed> create_distributed_table step s1-show-placements: SELECT nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'dist_table'::regclass ORDER BY nodename, nodeport; nodename nodeport localhost 57637 localhost 57637 localhost 57637 localhost 57637 step s2-select: SELECT * FROM dist_table; x y master_remove_node starting permutation: s2-begin s2-create-table-1 s1-add-node-2 s2-commit s1-show-placements s2-select node_name node_port localhost 57637 step s2-begin: BEGIN; step s2-create-table-1: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x'); create_distributed_table step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); step s2-commit: COMMIT; step s1-add-node-2: <... completed> ?column? 1 step s1-show-placements: SELECT nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'dist_table'::regclass ORDER BY nodename, nodeport; nodename nodeport localhost 57637 localhost 57637 localhost 57637 localhost 57637 step s2-select: SELECT * FROM dist_table; x y master_remove_node starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-table-1 s1-commit s1-show-placements s2-select node_name node_port localhost 57637 step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); ?column? 1 step s1-begin: BEGIN; step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); master_remove_node step s2-create-table-1: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x'); step s1-commit: COMMIT; step s2-create-table-1: <... completed> create_distributed_table step s1-show-placements: SELECT nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'dist_table'::regclass ORDER BY nodename, nodeport; nodename nodeport localhost 57637 localhost 57637 localhost 57637 localhost 57637 step s2-select: SELECT * FROM dist_table; x y master_remove_node starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-table-1 s1-abort s1-show-placements s2-select node_name node_port localhost 57637 step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); ?column? 1 step s1-begin: BEGIN; step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); master_remove_node step s2-create-table-1: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x'); step s1-abort: ABORT; step s2-create-table-1: <... completed> create_distributed_table step s1-show-placements: SELECT nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'dist_table'::regclass ORDER BY nodename, nodeport; nodename nodeport localhost 57637 localhost 57637 localhost 57638 localhost 57638 step s2-select: SELECT * FROM dist_table; x y master_remove_node starting permutation: s1-add-node-2 s2-begin s2-create-table-1 s1-remove-node-2 s2-commit s1-show-placements s2-select node_name node_port localhost 57637 step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); ?column? 1 step s2-begin: BEGIN; step s2-create-table-1: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x'); create_distributed_table step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); step s2-commit: COMMIT; step s1-remove-node-2: <... completed> error in steps s2-commit s1-remove-node-2: ERROR: you cannot remove the primary node of a node group which has shard placements step s1-show-placements: SELECT nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'dist_table'::regclass ORDER BY nodename, nodeport; nodename nodeport localhost 57637 localhost 57637 localhost 57638 localhost 57638 step s2-select: SELECT * FROM dist_table; x y master_remove_node starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-table-2 s1-commit s2-select node_name node_port localhost 57637 step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); ?column? 1 step s1-begin: BEGIN; step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); master_remove_node step s2-create-table-2: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 2; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x'); step s1-commit: COMMIT; step s2-create-table-2: <... completed> error in steps s1-commit s2-create-table-2: ERROR: replication_factor (2) exceeds number of worker nodes (1) step s2-select: SELECT * FROM dist_table; ERROR: relation "dist_table" does not exist master_remove_node starting permutation: s1-add-node-2 s2-begin s2-create-table-2 s1-remove-node-2 s2-commit s2-select node_name node_port localhost 57637 step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); ?column? 1 step s2-begin: BEGIN; step s2-create-table-2: SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 2; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x'); create_distributed_table step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); step s2-commit: COMMIT; step s1-remove-node-2: <... completed> error in steps s2-commit s1-remove-node-2: ERROR: you cannot remove the primary node of a node group which has shard placements step s2-select: SELECT * FROM dist_table; x y master_remove_node starting permutation: s1-add-node-2 s1-begin s1-remove-node-2 s2-create-append-table s1-commit s2-select node_name node_port localhost 57637 step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); ?column? 1 step s1-begin: BEGIN; step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); master_remove_node step s2-create-append-table: SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x', 'append'); SELECT 1 FROM master_create_empty_shard('dist_table'); step s1-commit: COMMIT; step s2-create-append-table: <... completed> create_distributed_table ?column? 1 step s2-select: SELECT * FROM dist_table; x y master_remove_node starting permutation: s1-add-node-2 s2-begin s2-create-append-table s1-remove-node-2 s2-commit s2-select node_name node_port localhost 57637 step s1-add-node-2: SELECT 1 FROM master_add_node('localhost', 57638); ?column? 1 step s2-begin: BEGIN; step s2-create-append-table: SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x', 'append'); SELECT 1 FROM master_create_empty_shard('dist_table'); create_distributed_table ?column? 1 step s1-remove-node-2: SELECT * FROM master_remove_node('localhost', 57638); step s2-commit: COMMIT; step s1-remove-node-2: <... completed> master_remove_node step s2-select: SELECT * FROM dist_table; x y master_remove_node citus-7.0.3/src/test/regress/expected/isolation_data_migration.out000066400000000000000000000064671317107136600255070ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s2-begin s2-copy s1-create_distributed_table s2-commit s2-select step s2-begin: BEGIN; step s2-copy: COPY migration_table FROM PROGRAM 'echo 1,hello' WITH CSV; step s1-create_distributed_table: SELECT create_distributed_table('migration_table', 'test_id'); step s2-commit: COMMIT; step s1-create_distributed_table: <... completed> create_distributed_table step s2-select: SELECT * FROM migration_table ORDER BY test_id; test_id data 1 hello starting permutation: s1-begin s1-create_distributed_table s2-copy s1-commit s2-select step s1-begin: BEGIN; step s1-create_distributed_table: SELECT create_distributed_table('migration_table', 'test_id'); create_distributed_table step s2-copy: COPY migration_table FROM PROGRAM 'echo 1,hello' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s2-select: SELECT * FROM migration_table ORDER BY test_id; test_id data 1 hello starting permutation: s2-begin s2-insert s1-create_distributed_table s2-commit s2-select step s2-begin: BEGIN; step s2-insert: INSERT INTO migration_table VALUES (1, 'hello'); step s1-create_distributed_table: SELECT create_distributed_table('migration_table', 'test_id'); step s2-commit: COMMIT; step s1-create_distributed_table: <... completed> create_distributed_table step s2-select: SELECT * FROM migration_table ORDER BY test_id; test_id data 1 hello starting permutation: s1-begin s1-create_distributed_table s2-insert s1-commit s2-select step s1-begin: BEGIN; step s1-create_distributed_table: SELECT create_distributed_table('migration_table', 'test_id'); create_distributed_table step s2-insert: INSERT INTO migration_table VALUES (1, 'hello'); step s1-commit: COMMIT; step s2-insert: <... completed> step s2-select: SELECT * FROM migration_table ORDER BY test_id; test_id data 1 hello starting permutation: s1-begin-serializable s2-copy s1-create_distributed_table s1-commit s2-select step s1-begin-serializable: BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; SELECT 1; ?column? 1 step s2-copy: COPY migration_table FROM PROGRAM 'echo 1,hello' WITH CSV; step s1-create_distributed_table: SELECT create_distributed_table('migration_table', 'test_id'); create_distributed_table step s1-commit: COMMIT; step s2-select: SELECT * FROM migration_table ORDER BY test_id; test_id data 1 hello starting permutation: s1-begin-serializable s2-insert s1-create_distributed_table s1-commit s2-select step s1-begin-serializable: BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; SELECT 1; ?column? 1 step s2-insert: INSERT INTO migration_table VALUES (1, 'hello'); step s1-create_distributed_table: SELECT create_distributed_table('migration_table', 'test_id'); create_distributed_table step s1-commit: COMMIT; step s2-select: SELECT * FROM migration_table ORDER BY test_id; test_id data 1 hello citus-7.0.3/src/test/regress/expected/isolation_ddl_vs_all.out000066400000000000000000000771601317107136600246260ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-ddl-create-index s1-commit s2-commit s1-show-indexes create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); step s2-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> error in steps s1-commit s2-ddl-create-index: ERROR: duplicate key value violates unique constraint "pg_class_relname_nsp_index" step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-ddl-create-index-concurrently s1-commit s1-show-indexes create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY ddl_hash_index ON ddl_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> error in steps s1-commit s2-ddl-create-index-concurrently: ERROR: relation "ddl_hash_index" already exists step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-ddl-add-column s1-commit s2-commit s1-show-indexes s1-show-columns create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); step s2-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_2 int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-ddl-rename-column s1-commit s2-commit s1-show-indexes s1-show-columns create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); step s2-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-ddl-create-index s1-commit s2-commit s1-show-columns s1-show-indexes create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_1 int DEFAULT 0; step s2-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-ddl-create-index-concurrently s1-commit s1-show-columns s1-show-indexes create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_1 int DEFAULT 0; step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY ddl_hash_index ON ddl_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-ddl-add-column s1-commit s2-commit s1-show-columns create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_1 int DEFAULT 0; step s2-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_2 int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-ddl-rename-column s1-commit s2-commit s1-show-columns create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_1 int DEFAULT 0; step s2-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-ddl-create-index s1-commit s2-commit s1-show-columns s1-show-indexes create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column; step s2-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-ddl-create-index-concurrently s1-commit s1-show-columns s1-show-indexes create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column; step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY ddl_hash_index ON ddl_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-ddl-add-column s1-commit s2-commit s1-show-columns create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column; step s2-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_2 int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-ddl-rename-column s1-commit s2-commit s1-show-columns create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column; step s2-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> error in steps s1-commit s2-ddl-rename-column: ERROR: column "data" does not exist step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-table-size s1-commit s2-commit s1-show-indexes create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); step s2-table-size: SELECT citus_total_relation_size('ddl_hash'); citus_total_relation_size 57344 step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-master-modify-multiple-shards s1-commit s2-commit s1-show-indexes create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); step s2-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM ddl_hash;'); step s1-commit: COMMIT; step s2-master-modify-multiple-shards: <... completed> master_modify_multiple_shards 5 step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-ddl-create-index s2-distribute-table s1-commit s2-commit s1-show-indexes create_distributed_table step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); step s2-distribute-table: SELECT create_distributed_table('ddl_hash', 'id'); step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers (localhost,57637,t,4) (localhost,57638,t,4) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-table-size s1-commit s2-commit s1-show-columns create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_1 int DEFAULT 0; step s2-table-size: SELECT citus_total_relation_size('ddl_hash'); step s1-commit: COMMIT; step s2-table-size: <... completed> citus_total_relation_size 57344 step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-master-modify-multiple-shards s1-commit s2-commit s1-show-columns create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_1 int DEFAULT 0; step s2-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM ddl_hash;'); step s1-commit: COMMIT; step s2-master-modify-multiple-shards: <... completed> master_modify_multiple_shards 5 step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-ddl-add-column s2-distribute-table s1-commit s2-commit s1-show-columns create_distributed_table step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_1 int DEFAULT 0; step s2-distribute-table: SELECT create_distributed_table('ddl_hash', 'id'); step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-table-size s1-commit s2-commit s1-show-columns create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column; step s2-table-size: SELECT citus_total_relation_size('ddl_hash'); step s1-commit: COMMIT; step s2-table-size: <... completed> citus_total_relation_size 57344 step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-master-modify-multiple-shards s1-commit s2-commit s1-show-columns create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column; step s2-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM ddl_hash;'); step s1-commit: COMMIT; step s2-master-modify-multiple-shards: <... completed> master_modify_multiple_shards 5 step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-distribute-table s1-commit s2-commit s1-show-columns create_distributed_table step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column; step s2-distribute-table: SELECT create_distributed_table('ddl_hash', 'id'); step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-ddl-create-index s1-commit s2-commit s1-show-indexes create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('ddl_hash'); citus_total_relation_size 57344 step s2-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-ddl-create-index s1-commit s2-commit s1-show-indexes create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM ddl_hash;'); master_modify_multiple_shards 5 step s2-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-ddl-create-index s1-commit s2-commit s1-show-indexes create_distributed_table step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('ddl_hash', 'id'); create_distributed_table step s2-ddl-create-index: CREATE INDEX ddl_hash_index ON ddl_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s2-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers (localhost,57637,t,4) (localhost,57638,t,4) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-table-size s2-ddl-create-index-concurrently s1-commit s1-show-indexes create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('ddl_hash'); citus_total_relation_size 57344 step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY ddl_hash_index ON ddl_hash(id); step s1-commit: COMMIT; step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-ddl-create-index-concurrently s1-commit s1-show-indexes create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM ddl_hash;'); master_modify_multiple_shards 5 step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY ddl_hash_index ON ddl_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-ddl-create-index-concurrently s1-commit s1-show-indexes create_distributed_table step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('ddl_hash', 'id'); create_distributed_table step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY ddl_hash_index ON ddl_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); run_command_on_workers (localhost,57637,t,4) (localhost,57638,t,4) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-ddl-add-column s1-commit s2-commit s1-show-columns create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('ddl_hash'); citus_total_relation_size 57344 step s2-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_2 int DEFAULT 0; step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-ddl-add-column s1-commit s2-commit s1-show-columns create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM ddl_hash;'); master_modify_multiple_shards 5 step s2-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_2 int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-ddl-add-column s1-commit s2-commit s1-show-columns create_distributed_table step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('ddl_hash', 'id'); create_distributed_table step s2-ddl-add-column: ALTER TABLE ddl_hash ADD new_column_2 int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-ddl-rename-column s1-commit s2-commit s1-show-columns create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('ddl_hash'); citus_total_relation_size 57344 step s2-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column; step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-ddl-rename-column s1-commit s2-commit s1-show-columns create_distributed_table step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM ddl_hash;'); master_modify_multiple_shards 5 step s2-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-ddl-rename-column s1-commit s2-commit s1-show-columns create_distributed_table step s1-drop: DROP TABLE ddl_hash; step s1-create-non-distributed-table: CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-initialize: COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('ddl_hash', 'id'); create_distributed_table step s2-ddl-rename-column: ALTER TABLE ddl_hash RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s2-commit: COMMIT; step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func citus-7.0.3/src/test/regress/expected/isolation_delete_vs_all.out000066400000000000000000000441561317107136600253240ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-delete s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-delete: DELETE FROM delete_hash WHERE id = 4; step s2-delete: DELETE FROM delete_hash WHERE id = 4; step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; count 4 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-truncate s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-delete: DELETE FROM delete_hash WHERE id = 4; step s2-truncate: TRUNCATE delete_hash; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; count 0 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-drop s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-delete: DELETE FROM delete_hash WHERE id = 4; step s2-drop: DROP TABLE delete_hash; step s1-commit: COMMIT; step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; ERROR: relation "delete_hash" does not exist restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-delete: DELETE FROM delete_hash WHERE id = 4; step s2-ddl-create-index: CREATE INDEX delete_hash_index ON delete_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; count 4 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-delete s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX delete_hash_index ON delete_hash(id); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-delete: DELETE FROM delete_hash WHERE id = 4; step s2-ddl-drop-index: DROP INDEX delete_hash_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; count 4 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-delete s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM delete_hash WHERE id = 4; step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY delete_hash_index ON delete_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM delete_hash; count 4 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-delete: DELETE FROM delete_hash WHERE id = 4; step s2-ddl-add-column: ALTER TABLE delete_hash ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; count 4 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-delete s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE delete_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-delete: DELETE FROM delete_hash WHERE id = 4; step s2-ddl-drop-column: ALTER TABLE delete_hash DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; count 4 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-delete: DELETE FROM delete_hash WHERE id = 4; step s2-ddl-rename-column: ALTER TABLE delete_hash RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; count 4 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-table-size s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-delete: DELETE FROM delete_hash WHERE id = 4; step s2-table-size: SELECT citus_total_relation_size('delete_hash'); citus_total_relation_size 57344 step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; count 4 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-delete: DELETE FROM delete_hash WHERE id = 4; step s2-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM delete_hash;'); step s1-commit: COMMIT; step s2-master-modify-multiple-shards: <... completed> master_modify_multiple_shards 4 step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; count 0 restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-delete s2-distribute-table s1-commit s2-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE delete_hash; step s1-create-non-distributed-table: CREATE TABLE delete_hash(id integer, data text); COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-delete: DELETE FROM delete_hash WHERE id = 4; step s2-distribute-table: SELECT create_distributed_table('delete_hash', 'id'); step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; count 8 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-delete s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE delete_hash; step s2-delete: DELETE FROM delete_hash WHERE id = 4; step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; count 0 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-delete s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE delete_hash; step s2-delete: DELETE FROM delete_hash WHERE id = 4; step s1-commit: COMMIT; step s2-delete: <... completed> error in steps s1-commit s2-delete: ERROR: relation "delete_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; ERROR: relation "delete_hash" does not exist restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-delete s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX delete_hash_index ON delete_hash(id); step s2-delete: DELETE FROM delete_hash WHERE id = 4; step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; count 4 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-delete s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX delete_hash_index ON delete_hash(id); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX delete_hash_index; step s2-delete: DELETE FROM delete_hash WHERE id = 4; step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; count 4 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-delete s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE delete_hash ADD new_column int DEFAULT 0; step s2-delete: DELETE FROM delete_hash WHERE id = 4; step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; count 4 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-delete s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE delete_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE delete_hash DROP new_column; step s2-delete: DELETE FROM delete_hash WHERE id = 4; step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; count 4 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-delete s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE delete_hash RENAME data TO new_column; step s2-delete: DELETE FROM delete_hash WHERE id = 4; step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; count 4 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-delete s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('delete_hash'); citus_total_relation_size 57344 step s2-delete: DELETE FROM delete_hash WHERE id = 4; step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; count 4 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-delete s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM delete_hash;'); master_modify_multiple_shards 5 step s2-delete: DELETE FROM delete_hash WHERE id = 4; step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; count 0 restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-delete s1-commit s2-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE delete_hash; step s1-create-non-distributed-table: CREATE TABLE delete_hash(id integer, data text); COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-initialize: COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('delete_hash', 'id'); create_distributed_table step s2-delete: DELETE FROM delete_hash WHERE id = 4; step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM delete_hash; count 8 restore_isolation_tester_func citus-7.0.3/src/test/regress/expected/isolation_distributed_deadlock_detection.out000066400000000000000000000545321317107136600307270ustar00rootroot00000000000000Parsed test spec with 7 sessions starting permutation: s1-begin s2-begin s1-update-1 s2-update-2 s2-update-1 deadlock-checker-call s1-update-2 deadlock-checker-call s1-finish s2-finish step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; step s2-update-1: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 1; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks f step s1-update-2: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks t step s2-update-1: <... completed> step s1-update-2: <... completed> error in steps deadlock-checker-call s2-update-1 s1-update-2: ERROR: canceling the transaction since it has involved in a distributed deadlock step s1-finish: COMMIT; step s2-finish: COMMIT; starting permutation: s1-begin s2-begin s1-update-1-rep-2 s2-update-2-rep-2 s2-update-1-rep-2 deadlock-checker-call s1-update-2-rep-2 deadlock-checker-call s1-finish s2-finish step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-update-1-rep-2: UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 1; step s2-update-2-rep-2: UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 2; step s2-update-1-rep-2: UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 1; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks f step s1-update-2-rep-2: UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 2; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks t step s2-update-1-rep-2: <... completed> step s1-update-2-rep-2: <... completed> error in steps deadlock-checker-call s2-update-1-rep-2 s1-update-2-rep-2: ERROR: canceling the transaction since it has involved in a distributed deadlock step s1-finish: COMMIT; step s2-finish: COMMIT; starting permutation: s1-begin s2-begin s1-set-2pc s2-set-2pc s1-update-1 s2-update-2 s2-update-1 deadlock-checker-call s1-update-2 deadlock-checker-call s1-finish s2-finish step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-set-2pc: set citus.multi_shard_commit_protocol TO '2pc'; step s2-set-2pc: set citus.multi_shard_commit_protocol TO '2pc'; step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; step s2-update-1: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 1; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks f step s1-update-2: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks t step s2-update-1: <... completed> step s1-update-2: <... completed> error in steps deadlock-checker-call s2-update-1 s1-update-2: ERROR: canceling the transaction since it has involved in a distributed deadlock step s1-finish: COMMIT; step s2-finish: COMMIT; starting permutation: s1-begin s2-begin s1-update-1 s2-update-2 s1-update-2 deadlock-checker-call s2-upsert-select-all deadlock-checker-call s1-finish s2-finish step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; step s1-update-2: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks f step s2-upsert-select-all: INSERT INTO deadlock_detection_test SELECT * FROM deadlock_detection_test ON CONFLICT(user_id) DO UPDATE SET some_val = deadlock_detection_test.some_val + 5 RETURNING *; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks t step s1-update-2: <... completed> step s2-upsert-select-all: <... completed> error in steps deadlock-checker-call s1-update-2 s2-upsert-select-all: ERROR: canceling the transaction since it has involved in a distributed deadlock step s1-finish: COMMIT; step s2-finish: COMMIT; starting permutation: s1-begin s2-begin s1-update-1 s2-update-2 s1-update-2 deadlock-checker-call s2-ddl deadlock-checker-call s1-finish s2-finish step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; step s1-update-2: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks f step s2-ddl: ALTER TABLE deadlock_detection_test ADD COLUMN test_col INT; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks t step s1-update-2: <... completed> step s2-ddl: <... completed> error in steps deadlock-checker-call s1-update-2 s2-ddl: ERROR: canceling the transaction since it has involved in a distributed deadlock step s1-finish: COMMIT; step s2-finish: COMMIT; starting permutation: s1-begin s2-begin s1-insert-dist-10 s2-insert-local-10 s2-insert-dist-10 s1-insert-local-10 deadlock-checker-call s1-finish s2-finish step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-insert-dist-10: INSERT INTO deadlock_detection_test VALUES (10, 10); step s2-insert-local-10: INSERT INTO local_deadlock_table VALUES (10, 10); step s2-insert-dist-10: INSERT INTO deadlock_detection_test VALUES (10, 10); step s1-insert-local-10: INSERT INTO local_deadlock_table VALUES (10, 10); step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks t step s2-insert-dist-10: <... completed> step s1-insert-local-10: <... completed> error in steps deadlock-checker-call s2-insert-dist-10 s1-insert-local-10: ERROR: canceling the transaction since it has involved in a distributed deadlock step s1-finish: COMMIT; step s2-finish: COMMIT; starting permutation: s1-begin s2-begin s2-insert-ref-10 s1-insert-ref-11 s2-insert-ref-11 s1-insert-ref-10 deadlock-checker-call s1-finish s2-finish step s1-begin: BEGIN; step s2-begin: BEGIN; step s2-insert-ref-10: INSERT INTO deadlock_detection_reference VALUES (10, 10); step s1-insert-ref-11: INSERT INTO deadlock_detection_reference VALUES (11, 11); step s2-insert-ref-11: INSERT INTO deadlock_detection_reference VALUES (11, 11); step s1-insert-ref-10: INSERT INTO deadlock_detection_reference VALUES (10, 10); step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks t step s2-insert-ref-11: <... completed> step s1-insert-ref-10: <... completed> error in steps deadlock-checker-call s2-insert-ref-11 s1-insert-ref-10: ERROR: canceling the transaction since it has involved in a distributed deadlock step s1-finish: COMMIT; step s2-finish: COMMIT; starting permutation: s1-begin s2-begin s2-insert-ref-10 s1-update-1 deadlock-checker-call s2-update-1 s1-insert-ref-10 deadlock-checker-call s1-finish s2-finish step s1-begin: BEGIN; step s2-begin: BEGIN; step s2-insert-ref-10: INSERT INTO deadlock_detection_reference VALUES (10, 10); step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks f step s2-update-1: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 1; step s1-insert-ref-10: INSERT INTO deadlock_detection_reference VALUES (10, 10); step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks t step s2-update-1: <... completed> step s1-insert-ref-10: <... completed> error in steps deadlock-checker-call s2-update-1 s1-insert-ref-10: ERROR: canceling the transaction since it has involved in a distributed deadlock step s1-finish: COMMIT; step s2-finish: COMMIT; starting permutation: s1-begin s2-begin s3-begin s1-update-1 s2-update-2 s3-update-3 deadlock-checker-call s1-update-2 s2-update-3 s3-update-1 deadlock-checker-call s3-finish s2-finish s1-finish step s1-begin: BEGIN; step s2-begin: BEGIN; step s3-begin: BEGIN; step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; step s3-update-3: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks f step s1-update-2: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2; step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; step s3-update-1: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 1; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks t step s2-update-3: <... completed> step s3-update-1: <... completed> error in steps deadlock-checker-call s2-update-3 s3-update-1: ERROR: canceling the transaction since it has involved in a distributed deadlock step s3-finish: COMMIT; step s2-finish: COMMIT; step s1-update-2: <... completed> step s1-finish: COMMIT; starting permutation: s1-begin s2-begin s3-begin s2-update-1 s1-update-1 s2-update-2 s3-update-3 s3-update-2 deadlock-checker-call s2-update-3 deadlock-checker-call s3-finish s2-finish s1-finish step s1-begin: BEGIN; step s2-begin: BEGIN; step s3-begin: BEGIN; step s2-update-1: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 1; step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; step s3-update-3: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks f step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks t step s3-update-2: <... completed> step s2-update-3: <... completed> error in steps deadlock-checker-call s3-update-2 s2-update-3: ERROR: canceling the transaction since it has involved in a distributed deadlock step s3-finish: COMMIT; step s2-finish: COMMIT; step s1-update-1: <... completed> step s1-finish: COMMIT; starting permutation: s1-begin s2-begin s3-begin s4-begin s1-update-1 s2-update-2 s3-update-3 s3-update-2 deadlock-checker-call s4-update-4 s2-update-3 deadlock-checker-call s3-finish s2-finish s1-finish s4-finish step s1-begin: BEGIN; step s2-begin: BEGIN; step s3-begin: BEGIN; step s4-begin: BEGIN; step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; step s3-update-3: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks f step s4-update-4: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 4; step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks t step s3-update-2: <... completed> step s2-update-3: <... completed> error in steps deadlock-checker-call s3-update-2 s2-update-3: ERROR: canceling the transaction since it has involved in a distributed deadlock step s3-finish: COMMIT; step s2-finish: COMMIT; step s1-finish: COMMIT; step s4-finish: COMMIT; starting permutation: s1-begin s2-begin s3-begin s4-begin s4-update-1 s1-update-1 deadlock-checker-call s2-update-2 s3-update-3 s2-update-3 s3-update-2 deadlock-checker-call s3-finish s2-finish s4-finish s1-finish step s1-begin: BEGIN; step s2-begin: BEGIN; step s3-begin: BEGIN; step s4-begin: BEGIN; step s4-update-1: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 1; step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks f step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; step s3-update-3: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks t step s2-update-3: <... completed> step s3-update-2: <... completed> error in steps deadlock-checker-call s2-update-3 s3-update-2: ERROR: canceling the transaction since it has involved in a distributed deadlock step s3-finish: COMMIT; step s2-finish: COMMIT; step s4-finish: COMMIT; step s1-update-1: <... completed> step s1-finish: COMMIT; starting permutation: s1-begin s2-begin s3-begin s4-begin s1-update-1 s4-update-4 s2-update-2 s3-update-3 s3-update-2 s4-update-1 s1-update-4 deadlock-checker-call s1-finish s4-finish s2-update-3 deadlock-checker-call s2-finish s3-finish step s1-begin: BEGIN; step s2-begin: BEGIN; step s3-begin: BEGIN; step s4-begin: BEGIN; step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; step s4-update-4: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 4; step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; step s3-update-3: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; step s4-update-1: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 1; step s1-update-4: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 4; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks t step s4-update-1: <... completed> step s1-update-4: <... completed> error in steps deadlock-checker-call s4-update-1 s1-update-4: ERROR: canceling the transaction since it has involved in a distributed deadlock step s1-finish: COMMIT; step s4-finish: COMMIT; step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks t step s3-update-2: <... completed> step s2-update-3: <... completed> error in steps deadlock-checker-call s3-update-2 s2-update-3: ERROR: canceling the transaction since it has involved in a distributed deadlock step s2-finish: COMMIT; step s3-finish: COMMIT; starting permutation: s1-begin s2-begin s3-begin s4-begin s5-begin s6-begin s1-update-1 s5-update-5 s3-update-2 s2-update-3 s4-update-4 s3-update-4 deadlock-checker-call s6-update-6 s4-update-6 s1-update-5 s5-update-1 deadlock-checker-call s1-finish s5-finish s6-finish s4-finish s3-finish s2-finish step s1-begin: BEGIN; step s2-begin: BEGIN; step s3-begin: BEGIN; step s4-begin: BEGIN; step s5-begin: BEGIN; step s6-begin: BEGIN; step s1-update-1: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; step s5-update-5: UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 5; step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; step s4-update-4: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 4; step s3-update-4: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 4; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks f step s6-update-6: UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 6; step s4-update-6: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 6; step s1-update-5: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 5; step s5-update-1: UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 1; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks t step s1-update-5: <... completed> step s5-update-1: <... completed> error in steps deadlock-checker-call s1-update-5 s5-update-1: ERROR: canceling the transaction since it has involved in a distributed deadlock step s1-finish: COMMIT; step s5-finish: COMMIT; step s6-finish: COMMIT; step s4-update-6: <... completed> step s4-finish: COMMIT; step s3-update-4: <... completed> step s3-finish: COMMIT; step s2-finish: COMMIT; starting permutation: s1-begin s2-begin s3-begin s4-begin s5-begin s6-begin s6-update-6 s5-update-5 s5-update-6 s4-update-4 s1-update-4 s4-update-5 deadlock-checker-call s2-update-3 s3-update-2 s2-update-2 s3-update-3 deadlock-checker-call s6-finish s5-finish s4-finish s1-finish s3-finish s2-finish step s1-begin: BEGIN; step s2-begin: BEGIN; step s3-begin: BEGIN; step s4-begin: BEGIN; step s5-begin: BEGIN; step s6-begin: BEGIN; step s6-update-6: UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 6; step s5-update-5: UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 5; step s5-update-6: UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 6; step s4-update-4: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 4; step s1-update-4: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 4; step s4-update-5: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 5; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks f step s2-update-3: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; step s3-update-3: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks t step s2-update-2: <... completed> step s3-update-3: <... completed> error in steps deadlock-checker-call s2-update-2 s3-update-3: ERROR: canceling the transaction since it has involved in a distributed deadlock step s6-finish: COMMIT; step s5-update-6: <... completed> step s5-finish: COMMIT; step s4-update-5: <... completed> step s4-finish: COMMIT; step s1-update-4: <... completed> step s1-finish: COMMIT; step s3-finish: COMMIT; step s2-finish: COMMIT; starting permutation: s1-begin s2-begin s3-begin s4-begin s5-begin s6-begin s5-update-5 s3-update-2 s2-update-2 s4-update-4 s3-update-4 s4-update-5 s1-update-4 deadlock-checker-call s6-update-6 s5-update-6 s6-update-5 deadlock-checker-call s5-finish s6-finish s4-finish s3-finish s1-finish s2-finish step s1-begin: BEGIN; step s2-begin: BEGIN; step s3-begin: BEGIN; step s4-begin: BEGIN; step s5-begin: BEGIN; step s6-begin: BEGIN; step s5-update-5: UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 5; step s3-update-2: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; step s2-update-2: UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; step s4-update-4: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 4; step s3-update-4: UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 4; step s4-update-5: UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 5; step s1-update-4: UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 4; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks f step s6-update-6: UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 6; step s5-update-6: UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 6; step s6-update-5: UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 5; step deadlock-checker-call: SELECT check_distributed_deadlocks(); check_distributed_deadlocks t step s5-update-6: <... completed> step s6-update-5: <... completed> error in steps deadlock-checker-call s5-update-6 s6-update-5: ERROR: canceling the transaction since it has involved in a distributed deadlock step s5-finish: COMMIT; step s4-update-5: <... completed> step s6-finish: COMMIT; step s4-finish: COMMIT; step s3-update-4: <... completed> step s3-finish: COMMIT; step s2-update-2: <... completed> step s1-update-4: <... completed> step s1-finish: COMMIT; step s2-finish: COMMIT; citus-7.0.3/src/test/regress/expected/isolation_distributed_transaction_id.out000066400000000000000000000075021317107136600301170ustar00rootroot00000000000000Parsed test spec with 4 sessions starting permutation: s1-begin s1-assign-transaction-id s4-get-all-transactions s2-begin s2-assign-transaction-id s4-get-all-transactions s3-begin s3-assign-transaction-id s4-get-all-transactions s1-commit s4-get-all-transactions s2-commit s4-get-all-transactions s3-commit s4-get-all-transactions step s1-begin: BEGIN; step s1-assign-transaction-id: SELECT assign_distributed_transaction_id(1, 1, '2015-01-01 00:00:00+0'); assign_distributed_transaction_id step s4-get-all-transactions: SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_all_active_transactions() ORDER BY 1,2,3; initiator_node_identifiertransaction_numbertransaction_stamp 1 1 Wed Dec 31 16:00:00 2014 PST step s2-begin: BEGIN; step s2-assign-transaction-id: SELECT assign_distributed_transaction_id(2, 2, '2015-01-02 00:00:00+0'); assign_distributed_transaction_id step s4-get-all-transactions: SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_all_active_transactions() ORDER BY 1,2,3; initiator_node_identifiertransaction_numbertransaction_stamp 1 1 Wed Dec 31 16:00:00 2014 PST 2 2 Thu Jan 01 16:00:00 2015 PST step s3-begin: BEGIN; step s3-assign-transaction-id: SELECT assign_distributed_transaction_id(3, 3, '2015-01-03 00:00:00+0'); assign_distributed_transaction_id step s4-get-all-transactions: SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_all_active_transactions() ORDER BY 1,2,3; initiator_node_identifiertransaction_numbertransaction_stamp 1 1 Wed Dec 31 16:00:00 2014 PST 2 2 Thu Jan 01 16:00:00 2015 PST 3 3 Fri Jan 02 16:00:00 2015 PST step s1-commit: COMMIT; step s4-get-all-transactions: SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_all_active_transactions() ORDER BY 1,2,3; initiator_node_identifiertransaction_numbertransaction_stamp 2 2 Thu Jan 01 16:00:00 2015 PST 3 3 Fri Jan 02 16:00:00 2015 PST step s2-commit: COMMIT; step s4-get-all-transactions: SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_all_active_transactions() ORDER BY 1,2,3; initiator_node_identifiertransaction_numbertransaction_stamp 3 3 Fri Jan 02 16:00:00 2015 PST step s3-commit: COMMIT; step s4-get-all-transactions: SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_all_active_transactions() ORDER BY 1,2,3; initiator_node_identifiertransaction_numbertransaction_stamp starting permutation: s1-create-table s1-begin s1-insert s1-get-current-transaction-id s2-get-first-worker-active-transactions step s1-create-table: -- some tests also use distributed table CREATE TABLE distributed_transaction_id_table(some_value int, other_value int); SET citus.shard_count TO 4; SELECT create_distributed_table('distributed_transaction_id_table', 'some_value'); create_distributed_table step s1-begin: BEGIN; step s1-insert: INSERT INTO distributed_transaction_id_table VALUES (1, 1); step s1-get-current-transaction-id: SELECT row(initiator_node_identifier, transaction_number) FROM get_current_transaction_id(); row (0,287) step s2-get-first-worker-active-transactions: SELECT * FROM run_command_on_workers('SELECT row(initiator_node_identifier, transaction_number) FROM get_all_active_transactions(); ') WHERE nodeport = 57637; ; nodename nodeport success result localhost 57637 t (0,287) citus-7.0.3/src/test/regress/expected/isolation_dml_vs_repair.out000066400000000000000000000164161317107136600253460ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s2-invalidate-57637 s1-begin s1-insertone s2-repair s1-commit master_create_worker_shards step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; step s1-begin: BEGIN; step s1-insertone: INSERT INTO test_dml_vs_repair VALUES(1, 1); step s2-repair: SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); step s1-commit: COMMIT; step s2-repair: <... completed> master_copy_shard_placement starting permutation: s1-insertone s2-invalidate-57637 s1-begin s1-insertall s2-repair s1-commit master_create_worker_shards step s1-insertone: INSERT INTO test_dml_vs_repair VALUES(1, 1); step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; step s1-begin: BEGIN; step s1-insertall: INSERT INTO test_dml_vs_repair SELECT test_id, data+1 FROM test_dml_vs_repair; step s2-repair: SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); step s1-commit: COMMIT; step s2-repair: <... completed> master_copy_shard_placement starting permutation: s2-invalidate-57637 s2-begin s2-repair s1-insertone s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display master_create_worker_shards step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; step s2-begin: BEGIN; step s2-repair: SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); master_copy_shard_placement step s1-insertone: INSERT INTO test_dml_vs_repair VALUES(1, 1); step s2-commit: COMMIT; step s1-insertone: <... completed> step s2-invalidate-57638: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; test_id data 1 1 step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; step s2-revalidate-57638: UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; test_id data 1 1 starting permutation: s2-invalidate-57637 s1-prepared-insertone s2-begin s2-repair s1-prepared-insertone s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display master_create_worker_shards step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; step s1-prepared-insertone: EXECUTE insertone; step s2-begin: BEGIN; step s2-repair: SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); master_copy_shard_placement step s1-prepared-insertone: EXECUTE insertone; step s2-commit: COMMIT; step s1-prepared-insertone: <... completed> step s2-invalidate-57638: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; test_id data 1 1 1 1 step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; step s2-revalidate-57638: UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; test_id data 1 1 1 1 starting permutation: s2-invalidate-57637 s1-insertone s1-prepared-insertall s2-begin s2-repair s1-prepared-insertall s2-commit s2-invalidate-57638 s1-display s2-invalidate-57637 s2-revalidate-57638 s1-display master_create_worker_shards step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; step s1-insertone: INSERT INTO test_dml_vs_repair VALUES(1, 1); step s1-prepared-insertall: EXECUTE insertall; step s2-begin: BEGIN; step s2-repair: SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); master_copy_shard_placement step s1-prepared-insertall: EXECUTE insertall; step s2-commit: COMMIT; step s1-prepared-insertall: <... completed> step s2-invalidate-57638: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; test_id data 1 1 1 2 1 2 1 3 step s2-invalidate-57637: UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; step s2-revalidate-57638: UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; step s1-display: SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; test_id data 1 1 1 2 1 2 1 3 citus-7.0.3/src/test/regress/expected/isolation_drop_shards.out000066400000000000000000000111311317107136600250150ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-begin s1-drop-all-shards s2-truncate s1-commit ?column? 1 step s1-begin: BEGIN; step s1-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); master_drop_all_shards 16 step s2-truncate: TRUNCATE append_table; step s1-commit: COMMIT; step s2-truncate: <... completed> starting permutation: s1-begin s1-drop-all-shards s2-apply-delete-command s1-commit ?column? 1 step s1-begin: BEGIN; step s1-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); master_drop_all_shards 16 step s2-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_table'); step s1-commit: COMMIT; step s2-apply-delete-command: <... completed> master_apply_delete_command 0 starting permutation: s1-begin s1-drop-all-shards s2-drop-all-shards s1-commit ?column? 1 step s1-begin: BEGIN; step s1-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); master_drop_all_shards 16 step s2-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); step s1-commit: COMMIT; step s2-drop-all-shards: <... completed> master_drop_all_shards 0 starting permutation: s1-begin s1-drop-all-shards s2-select s1-commit ?column? 1 step s1-begin: BEGIN; step s1-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); master_drop_all_shards 16 step s2-select: SELECT * FROM append_table; step s1-commit: COMMIT; step s2-select: <... completed> test_id data starting permutation: s1-begin s1-apply-delete-command s2-truncate s1-commit ?column? 1 step s1-begin: BEGIN; step s1-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_table'); master_apply_delete_command 16 step s2-truncate: TRUNCATE append_table; step s1-commit: COMMIT; step s2-truncate: <... completed> starting permutation: s1-begin s1-apply-delete-command s2-apply-delete-command s1-commit ?column? 1 step s1-begin: BEGIN; step s1-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_table'); master_apply_delete_command 16 step s2-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_table'); step s1-commit: COMMIT; step s2-apply-delete-command: <... completed> master_apply_delete_command 0 starting permutation: s1-begin s1-apply-delete-command s2-drop-all-shards s1-commit ?column? 1 step s1-begin: BEGIN; step s1-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_table'); master_apply_delete_command 16 step s2-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); step s1-commit: COMMIT; step s2-drop-all-shards: <... completed> master_drop_all_shards 0 starting permutation: s1-begin s1-truncate s2-truncate s1-commit ?column? 1 step s1-begin: BEGIN; step s1-truncate: TRUNCATE append_table; step s2-truncate: TRUNCATE append_table; step s1-commit: COMMIT; step s2-truncate: <... completed> starting permutation: s1-begin s1-truncate s2-apply-delete-command s1-commit ?column? 1 step s1-begin: BEGIN; step s1-truncate: TRUNCATE append_table; step s2-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM append_table'); step s1-commit: COMMIT; step s2-apply-delete-command: <... completed> master_apply_delete_command 0 starting permutation: s1-begin s1-truncate s2-drop-all-shards s1-commit ?column? 1 step s1-begin: BEGIN; step s1-truncate: TRUNCATE append_table; step s2-drop-all-shards: SELECT master_drop_all_shards('append_table', 'public', 'append_table'); step s1-commit: COMMIT; step s2-drop-all-shards: <... completed> master_drop_all_shards 0 starting permutation: s1-begin s1-truncate s2-select s1-commit ?column? 1 step s1-begin: BEGIN; step s1-truncate: TRUNCATE append_table; step s2-select: SELECT * FROM append_table; step s1-commit: COMMIT; step s2-select: <... completed> test_id data citus-7.0.3/src/test/regress/expected/isolation_drop_vs_all.out000066400000000000000000000374411317107136600250250ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-drop s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-drop: <... completed> error in steps s1-commit s2-drop: ERROR: table "drop_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; step s2-ddl-create-index: CREATE INDEX drop_hash_index ON drop_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> error in steps s1-commit s2-ddl-create-index: ERROR: relation "drop_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-drop s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX drop_hash_index ON drop_hash(id); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; step s2-ddl-drop-index: DROP INDEX drop_hash_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> error in steps s1-commit s2-ddl-drop-index: ERROR: index "drop_hash_index" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-drop s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY drop_hash_index ON drop_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> error in steps s1-commit s2-ddl-create-index-concurrently: ERROR: relation "drop_hash" does not exist step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; step s2-ddl-add-column: ALTER TABLE drop_hash ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> error in steps s1-commit s2-ddl-add-column: ERROR: relation "drop_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-drop s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE drop_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; step s2-ddl-drop-column: ALTER TABLE drop_hash DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> error in steps s1-commit s2-ddl-drop-column: ERROR: relation "drop_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; step s2-ddl-rename-column: ALTER TABLE drop_hash RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> error in steps s1-commit s2-ddl-rename-column: ERROR: relation "drop_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-table-size s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; step s2-table-size: SELECT citus_total_relation_size('drop_hash'); step s1-commit: COMMIT; step s2-table-size: <... completed> error in steps s1-commit s2-table-size: ERROR: could not open relation with OID 23227 step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; step s2-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DROP FROM drop_hash;'); ERROR: syntax error at or near "FROM" step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-drop s2-distribute-table s1-commit s2-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE drop_hash; step s1-create-non-distributed-table: CREATE TABLE drop_hash(id integer, data text); COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; step s2-distribute-table: SELECT create_distributed_table('drop_hash', 'id'); step s1-commit: COMMIT; step s2-distribute-table: <... completed> error in steps s1-commit s2-distribute-table: ERROR: could not open relation with OID 23248 step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-drop s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX drop_hash_index ON drop_hash(id); step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-drop s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX drop_hash_index ON drop_hash(id); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX drop_hash_index; step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE drop_hash ADD new_column int DEFAULT 0; step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE drop_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE drop_hash DROP new_column; step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE drop_hash RENAME data TO new_column; step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-drop s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('drop_hash'); citus_total_relation_size 57344 step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-drop s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DROP FROM drop_hash;'); ERROR: syntax error at or near "FROM" step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-drop s1-commit s2-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE drop_hash; step s1-create-non-distributed-table: CREATE TABLE drop_hash(id integer, data text); COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('drop_hash', 'id'); create_distributed_table step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func citus-7.0.3/src/test/regress/expected/isolation_drop_vs_all_0.out000066400000000000000000000374411317107136600252440ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-drop s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-drop: <... completed> error in steps s1-commit s2-drop: ERROR: table "drop_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; step s2-ddl-create-index: CREATE INDEX drop_hash_index ON drop_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> error in steps s1-commit s2-ddl-create-index: ERROR: relation "drop_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-drop s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX drop_hash_index ON drop_hash(id); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; step s2-ddl-drop-index: DROP INDEX drop_hash_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> error in steps s1-commit s2-ddl-drop-index: ERROR: index "drop_hash_index" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-drop s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY drop_hash_index ON drop_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> error in steps s1-commit s2-ddl-create-index-concurrently: ERROR: relation "drop_hash" does not exist step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; step s2-ddl-add-column: ALTER TABLE drop_hash ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> error in steps s1-commit s2-ddl-add-column: ERROR: relation "drop_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-drop s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE drop_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; step s2-ddl-drop-column: ALTER TABLE drop_hash DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> error in steps s1-commit s2-ddl-drop-column: ERROR: relation "drop_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; step s2-ddl-rename-column: ALTER TABLE drop_hash RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> error in steps s1-commit s2-ddl-rename-column: ERROR: relation "drop_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-table-size s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; step s2-table-size: SELECT citus_total_relation_size('drop_hash'); step s1-commit: COMMIT; step s2-table-size: <... completed> error in steps s1-commit s2-table-size: ERROR: could not open relation with OID 22183 step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; step s2-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DROP FROM drop_hash;'); ERROR: syntax error at or near "FROM" step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-drop s2-distribute-table s1-commit s2-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE drop_hash; step s1-create-non-distributed-table: CREATE TABLE drop_hash(id integer, data text); COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE drop_hash; step s2-distribute-table: SELECT create_distributed_table('drop_hash', 'id'); step s1-commit: COMMIT; step s2-distribute-table: <... completed> error in steps s1-commit s2-distribute-table: ERROR: could not open relation with OID 22204 step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-drop s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX drop_hash_index ON drop_hash(id); step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-drop s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX drop_hash_index ON drop_hash(id); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX drop_hash_index; step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE drop_hash ADD new_column int DEFAULT 0; step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE drop_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE drop_hash DROP new_column; step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-drop s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE drop_hash RENAME data TO new_column; step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-drop s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('drop_hash'); citus_total_relation_size 57344 step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-drop s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DROP FROM drop_hash;'); ERROR: syntax error at or near "FROM" step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-drop s1-commit s2-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE drop_hash; step s1-create-non-distributed-table: CREATE TABLE drop_hash(id integer, data text); COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-initialize: COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('drop_hash', 'id'); create_distributed_table step s2-drop: DROP TABLE drop_hash; step s1-commit: COMMIT; step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist restore_isolation_tester_func citus-7.0.3/src/test/regress/expected/isolation_dump_global_wait_edges.out000066400000000000000000000044701317107136600271750ustar00rootroot00000000000000Parsed test spec with 4 sessions starting permutation: s1-begin s2-begin s1-update s2-update detector-dump-wait-edges s1-abort s2-abort step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-update: UPDATE distributed_table SET y = 1 WHERE x = 1; step s2-update: UPDATE distributed_table SET y = 2 WHERE x = 1; step detector-dump-wait-edges: SELECT waiting_transaction_num, blocking_transaction_num, blocking_transaction_waiting FROM dump_global_wait_edges() ORDER BY waiting_transaction_num, blocking_transaction_num, blocking_transaction_waiting; SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1; waiting_transaction_numblocking_transaction_numblocking_transaction_waiting 290 289 f transactionnumberwaitingtransactionnumbers 289 290 289 step s1-abort: ABORT; step s2-update: <... completed> step s2-abort: ABORT; starting permutation: s1-begin s2-begin s3-begin s1-update s2-update s3-update detector-dump-wait-edges s1-abort s2-abort s3-abort step s1-begin: BEGIN; step s2-begin: BEGIN; step s3-begin: BEGIN; step s1-update: UPDATE distributed_table SET y = 1 WHERE x = 1; step s2-update: UPDATE distributed_table SET y = 2 WHERE x = 1; step s3-update: UPDATE distributed_table SET y = 3 WHERE x = 1; step detector-dump-wait-edges: SELECT waiting_transaction_num, blocking_transaction_num, blocking_transaction_waiting FROM dump_global_wait_edges() ORDER BY waiting_transaction_num, blocking_transaction_num, blocking_transaction_waiting; SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1; waiting_transaction_numblocking_transaction_numblocking_transaction_waiting 294 293 f 295 293 f 295 294 t transactionnumberwaitingtransactionnumbers 293 294 293 295 293,294 step s1-abort: ABORT; step s2-update: <... completed> step s2-abort: ABORT; step s3-update: <... completed> step s3-abort: ABORT; citus-7.0.3/src/test/regress/expected/isolation_dump_local_wait_edges.out000066400000000000000000000074631317107136600270340ustar00rootroot00000000000000Parsed test spec with 4 sessions starting permutation: dist11-begin dist13-begin dist11-update dist13-update detector-dump-wait-edges dist11-abort dist13-abort step dist11-begin: BEGIN; SELECT assign_distributed_transaction_id(11, 1, '2017-01-01 00:00:00+0'); assign_distributed_transaction_id step dist13-begin: BEGIN; SELECT assign_distributed_transaction_id(13, 1, '2017-01-01 00:00:00+0'); assign_distributed_transaction_id step dist11-update: UPDATE local_table SET y = 1 WHERE x = 1; step dist13-update: UPDATE local_table SET y = 3 WHERE x = 1; step detector-dump-wait-edges: SELECT waiting_node_id, waiting_transaction_num, blocking_node_id, blocking_transaction_num, blocking_transaction_waiting FROM dump_local_wait_edges() ORDER BY waiting_node_id, blocking_transaction_num, blocking_transaction_waiting; waiting_node_idwaiting_transaction_numblocking_node_idblocking_transaction_numblocking_transaction_waiting 13 1 11 1 f step dist11-abort: ABORT; step dist13-update: <... completed> step dist13-abort: ABORT; starting permutation: local-begin dist13-begin local-update dist13-update detector-dump-wait-edges local-abort dist13-abort step local-begin: BEGIN; step dist13-begin: BEGIN; SELECT assign_distributed_transaction_id(13, 1, '2017-01-01 00:00:00+0'); assign_distributed_transaction_id step local-update: UPDATE local_table SET y = 2 WHERE x = 1; step dist13-update: UPDATE local_table SET y = 3 WHERE x = 1; step detector-dump-wait-edges: SELECT waiting_node_id, waiting_transaction_num, blocking_node_id, blocking_transaction_num, blocking_transaction_waiting FROM dump_local_wait_edges() ORDER BY waiting_node_id, blocking_transaction_num, blocking_transaction_waiting; waiting_node_idwaiting_transaction_numblocking_node_idblocking_transaction_numblocking_transaction_waiting 13 1 0 f step local-abort: ABORT; step dist13-update: <... completed> step dist13-abort: ABORT; starting permutation: dist11-begin local-begin dist13-begin dist11-update local-update dist13-update detector-dump-wait-edges dist11-abort local-abort dist13-abort step dist11-begin: BEGIN; SELECT assign_distributed_transaction_id(11, 1, '2017-01-01 00:00:00+0'); assign_distributed_transaction_id step local-begin: BEGIN; step dist13-begin: BEGIN; SELECT assign_distributed_transaction_id(13, 1, '2017-01-01 00:00:00+0'); assign_distributed_transaction_id step dist11-update: UPDATE local_table SET y = 1 WHERE x = 1; step local-update: UPDATE local_table SET y = 2 WHERE x = 1; step dist13-update: UPDATE local_table SET y = 3 WHERE x = 1; step detector-dump-wait-edges: SELECT waiting_node_id, waiting_transaction_num, blocking_node_id, blocking_transaction_num, blocking_transaction_waiting FROM dump_local_wait_edges() ORDER BY waiting_node_id, blocking_transaction_num, blocking_transaction_waiting; waiting_node_idwaiting_transaction_numblocking_node_idblocking_transaction_numblocking_transaction_waiting 0 11 1 f 13 1 0 t step dist11-abort: ABORT; step local-update: <... completed> step local-abort: ABORT; step dist13-update: <... completed> step dist13-abort: ABORT; citus-7.0.3/src/test/regress/expected/isolation_hash_copy_vs_all.out000066400000000000000000000704321317107136600260330ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 15 starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-router-select: SELECT * FROM hash_copy WHERE id = 1; id data int_data 1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-real-time-select: SELECT * FROM hash_copy ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-task-tracker-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-insert: INSERT INTO hash_copy VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 11 starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-insert-select: INSERT INTO hash_copy SELECT * FROM hash_copy; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 20 starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-update: UPDATE hash_copy SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-delete: DELETE FROM hash_copy WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 9 starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-truncate: TRUNCATE hash_copy; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 0 starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-drop: DROP TABLE hash_copy; step s1-commit: COMMIT; step s2-drop: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; ERROR: relation "hash_copy" does not exist starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-create-index: CREATE INDEX hash_copy_index ON hash_copy(id); step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX hash_copy_index ON hash_copy(id); step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-drop-index: DROP INDEX hash_copy_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY hash_copy_index ON hash_copy(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-add-column: ALTER TABLE hash_copy ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE hash_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-copy-additional-column: COPY hash_copy FROM PROGRAM 'echo 5, f, 5, 5\\n6, g, 6, 6\\n7, h, 7, 7\\n8, i, 8, 8\\n9, j, 9, 9' WITH CSV; step s2-ddl-drop-column: ALTER TABLE hash_copy DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-rename-column: ALTER TABLE hash_copy RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-table-size: SELECT citus_total_relation_size('hash_copy'); citus_total_relation_size 65536 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM hash_copy;'); step s1-commit: COMMIT; step s2-master-modify-multiple-shards: <... completed> master_modify_multiple_shards 10 step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 0 starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-master-drop-all-shards: SELECT master_drop_all_shards('hash_copy'::regclass, 'public', 'hash_copy'); step s1-commit: COMMIT; step s2-master-drop-all-shards: <... completed> master_drop_all_shards 4 step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 0 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-copy s2-distribute-table s1-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE hash_copy; step s1-create-non-distributed-table: CREATE TABLE hash_copy(id integer, data text, int_data int); COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-distribute-table: SELECT create_distributed_table('hash_copy', 'id'); step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 15 starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM hash_copy WHERE id = 1; id data int_data 1 b 1 step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 10 starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM hash_copy ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 10 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 10 starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO hash_copy VALUES(0, 'k', 0); step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 11 starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO hash_copy SELECT * FROM hash_copy; step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 15 starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE hash_copy SET data = 'l' WHERE id = 0; step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 10 starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM hash_copy WHERE id = 1; step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 9 starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE hash_copy; step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 5 starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE hash_copy; step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: relation "hash_copy" does not exist step s1-select-count: SELECT COUNT(*) FROM hash_copy; ERROR: relation "hash_copy" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX hash_copy_index ON hash_copy(id); step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX hash_copy_index ON hash_copy(id); step s1-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX hash_copy_index; step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE hash_copy ADD new_column int DEFAULT 0; step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: missing data for column "new_column" step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE hash_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE hash_copy DROP new_column; step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE hash_copy RENAME data TO new_column; step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('hash_copy'); citus_total_relation_size 57344 step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 10 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM hash_copy;'); master_modify_multiple_shards 5 step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 5 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-drop-all-shards: SELECT master_drop_all_shards('hash_copy'::regclass, 'public', 'hash_copy'); master_drop_all_shards 4 step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: could not find any shards into which to copy step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 0 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-copy s1-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE hash_copy; step s1-create-non-distributed-table: CREATE TABLE hash_copy(id integer, data text, int_data int); COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-initialize: COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('hash_copy', 'id'); create_distributed_table step s2-copy: COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM hash_copy; count 15 citus-7.0.3/src/test/regress/expected/isolation_insert_select_vs_all.out000066400000000000000000001772321317107136600267270ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-insert-select s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-initialize s1-begin s1-insert-select s2-update-on-inserted s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-update-on-inserted: UPDATE insert_of_insert_select_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-initialize s1-begin s1-insert-select s2-delete-on-inserted s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-delete-on-inserted: DELETE FROM insert_of_insert_select_hash WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-initialize s1-begin s1-insert-select s2-truncate-on-inserted s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-truncate-on-inserted: TRUNCATE insert_of_insert_select_hash; step s1-commit: COMMIT; step s2-truncate-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-initialize s1-begin s1-insert-select s2-drop-on-inserted s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-drop-on-inserted: DROP TABLE insert_of_insert_select_hash; step s1-commit: COMMIT; step s2-drop-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-create-index-on-inserted s1-commit s1-select-count s1-show-indexes-inserted create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-ddl-create-index-on-inserted: CREATE INDEX insert_of_insert_select_hash_index ON insert_of_insert_select_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) starting permutation: s1-initialize s1-ddl-create-index-on-inserted s1-begin s1-insert-select s2-ddl-drop-index-on-inserted s1-commit s1-select-count s1-show-indexes-inserted create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-ddl-create-index-on-inserted: CREATE INDEX insert_of_insert_select_hash_index ON insert_of_insert_select_hash(id); step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-ddl-drop-index-on-inserted: DROP INDEX insert_of_insert_select_hash_index; step s1-commit: COMMIT; step s2-ddl-drop-index-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-create-index-concurrently-on-inserted s1-commit s1-select-count s1-show-indexes-inserted create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-ddl-create-index-concurrently-on-inserted: CREATE INDEX CONCURRENTLY insert_of_insert_select_hash_index ON insert_of_insert_select_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-add-column-on-inserted s1-commit s1-select-count s1-show-columns-inserted create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-ddl-add-column-on-inserted: ALTER TABLE insert_of_insert_select_hash ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column-on-inserted s1-begin s1-insert-select s2-ddl-drop-column-on-inserted s1-commit s1-select-count s1-show-columns-inserted create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-ddl-add-column-on-inserted: ALTER TABLE insert_of_insert_select_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-ddl-drop-column-on-inserted: ALTER TABLE insert_of_insert_select_hash DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-rename-column-on-inserted s1-commit s1-select-count s1-show-columns-inserted s1-show-columns-inserted create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-ddl-rename-column-on-inserted: ALTER TABLE insert_of_insert_select_hash RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column-on-inserted: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-insert-select s2-table-size-on-inserted s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-table-size-on-inserted: SELECT citus_total_relation_size('insert_of_insert_select_hash'); citus_total_relation_size 65536 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-initialize s1-begin s1-insert-select s2-master-modify-multiple-shards-on-inserted s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-master-modify-multiple-shards-on-inserted: SELECT master_modify_multiple_shards('DELETE FROM insert_of_insert_select_hash;'); step s1-commit: COMMIT; step s2-master-modify-multiple-shards-on-inserted: <... completed> master_modify_multiple_shards 15 step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-initialize s1-begin s1-insert-select s2-master-drop-all-shards-on-inserted s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-master-drop-all-shards-on-inserted: SELECT master_drop_all_shards('insert_of_insert_select_hash'::regclass, 'public', 'insert_of_insert_select_hash'); step s1-commit: COMMIT; step s2-master-drop-all-shards-on-inserted: <... completed> master_drop_all_shards 4 step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-drop-on-inserted s1-create-non-distributed-table-on-inserted s1-initialize s1-begin s1-insert-select s2-distribute-table-on-inserted s1-commit s1-select-count create_distributed_table step s1-drop-on-inserted: DROP TABLE insert_of_insert_select_hash; step s1-create-non-distributed-table-on-inserted: CREATE TABLE insert_of_insert_select_hash(id integer, data text); step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; ERROR: cannot plan queries which include both local and distributed relations step s2-distribute-table-on-inserted: SELECT create_distributed_table('insert_of_insert_select_hash', 'id'); create_distributed_table step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-initialize s1-begin s1-insert-select s2-update-on-selected s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-update-on-selected: UPDATE select_of_insert_select_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-initialize s1-begin s1-insert-select s2-delete-on-selected s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-delete-on-selected: DELETE FROM select_of_insert_select_hash WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 9 starting permutation: s1-initialize s1-begin s1-insert-select s2-truncate-on-selected s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-truncate-on-selected: TRUNCATE select_of_insert_select_hash; step s1-commit: COMMIT; step s2-truncate-on-selected: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 0 starting permutation: s1-initialize s1-begin s1-insert-select s2-drop-on-selected s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-drop-on-selected: DROP TABLE select_of_insert_select_hash; step s1-commit: COMMIT; step s2-drop-on-selected: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; ERROR: relation "select_of_insert_select_hash" does not exist starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-create-index-on-selected s1-commit s1-select-count s1-show-indexes-selected create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-ddl-create-index-on-selected: CREATE INDEX select_of_insert_select_hash_index ON select_of_insert_select_hash(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) starting permutation: s1-initialize s1-ddl-create-index-on-selected s1-begin s1-insert-select s2-ddl-drop-index-on-selected s1-commit s1-select-count s1-show-indexes-selected create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-ddl-create-index-on-selected: CREATE INDEX select_of_insert_select_hash_index ON select_of_insert_select_hash(id); step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-ddl-drop-index-on-selected: DROP INDEX select_of_insert_select_hash_index; step s1-commit: COMMIT; step s2-ddl-drop-index-on-selected: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-create-index-concurrently-on-selected s1-commit s1-select-count s1-show-indexes-selected create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-ddl-create-index-concurrently-on-selected: CREATE INDEX CONCURRENTLY select_of_insert_select_hash_index ON insert_of_insert_select_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently-on-selected: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-add-column-on-selected s1-commit s1-select-count s1-show-columns-selected create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-ddl-add-column-on-selected: ALTER TABLE select_of_insert_select_hash ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column-on-selected: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column-on-selected s1-begin s1-insert-select s2-ddl-drop-column-on-selected s1-commit s1-select-count s1-show-columns-selected create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-ddl-add-column-on-selected: ALTER TABLE select_of_insert_select_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; ERROR: INSERT has more expressions than target columns step s2-ddl-drop-column-on-selected: ALTER TABLE select_of_insert_select_hash DROP new_column; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-insert-select s2-ddl-rename-column-on-selected s1-commit s1-select-count s1-show-columns-selected create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-ddl-rename-column-on-selected: ALTER TABLE select_of_insert_select_hash RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column-on-selected: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-insert-select s2-table-size-on-selected s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-table-size-on-selected: SELECT citus_total_relation_size('select_of_insert_select_hash'); citus_total_relation_size 65536 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-initialize s1-begin s1-insert-select s2-master-modify-multiple-shards-on-selected s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-master-modify-multiple-shards-on-selected: SELECT master_modify_multiple_shards('DELETE FROM select_of_insert_select_hash;'); master_modify_multiple_shards 10 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 0 starting permutation: s1-initialize s1-begin s1-insert-select s2-master-drop-all-shards-on-selected s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-master-drop-all-shards-on-selected: SELECT master_drop_all_shards('select_of_insert_select_hash'::regclass, 'public', 'select_of_insert_select_hash'); step s1-commit: COMMIT; step s2-master-drop-all-shards-on-selected: <... completed> master_drop_all_shards 4 step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 0 starting permutation: s1-drop-on-selected s1-create-non-distributed-table-on-selected s1-initialize s1-begin s1-insert-select s2-distribute-table-on-selected s1-commit s1-select-count create_distributed_table step s1-drop-on-selected: DROP TABLE select_of_insert_select_hash; step s1-create-non-distributed-table-on-selected: CREATE TABLE select_of_insert_select_hash(id integer, data text); step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; step s2-distribute-table-on-selected: SELECT create_distributed_table('select_of_insert_select_hash', 'id'); create_distributed_table step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-initialize s1-begin s1-update-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-update-on-inserted: UPDATE insert_of_insert_select_hash SET data = 'l' WHERE id = 4; step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-initialize s1-begin s1-delete-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-delete-on-inserted: DELETE FROM insert_of_insert_select_hash WHERE id = 4; step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-initialize s1-begin s1-truncate-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-truncate-on-inserted: TRUNCATE insert_of_insert_select_hash; step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-initialize s1-begin s1-drop-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-drop-on-inserted: DROP TABLE insert_of_insert_select_hash; step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> error in steps s1-commit s2-insert-select: ERROR: relation "insert_of_insert_select_hash" does not exist step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-initialize s1-begin s1-ddl-create-index-on-inserted s2-insert-select s1-commit s1-select-count s1-show-indexes-inserted create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index-on-inserted: CREATE INDEX insert_of_insert_select_hash_index ON insert_of_insert_select_hash(id); step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) starting permutation: s1-initialize s1-ddl-create-index-on-inserted s1-begin s1-ddl-drop-index-on-inserted s2-insert-select s1-commit s1-select-count s1-show-indexes-inserted create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-ddl-create-index-on-inserted: CREATE INDEX insert_of_insert_select_hash_index ON insert_of_insert_select_hash(id); step s1-begin: BEGIN; step s1-ddl-drop-index-on-inserted: DROP INDEX insert_of_insert_select_hash_index; step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-indexes-inserted: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column-on-inserted s2-insert-select s1-commit s1-select-count s1-show-columns-inserted create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column-on-inserted: ALTER TABLE insert_of_insert_select_hash ADD new_column int DEFAULT 0; step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column-on-inserted s1-begin s1-ddl-drop-column-on-inserted s2-insert-select s1-commit s1-select-count s1-show-columns-inserted create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-ddl-add-column-on-inserted: ALTER TABLE insert_of_insert_select_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-ddl-drop-column-on-inserted: ALTER TABLE insert_of_insert_select_hash DROP new_column; step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column-on-inserted s2-insert-select s1-commit s1-select-count s1-show-columns-inserted create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column-on-inserted: ALTER TABLE insert_of_insert_select_hash RENAME data TO new_column; step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-columns-inserted: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-table-size-on-inserted: SELECT citus_total_relation_size('insert_of_insert_select_hash'); citus_total_relation_size 65536 step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards-on-inserted: SELECT master_modify_multiple_shards('DELETE FROM insert_of_insert_select_hash;'); master_modify_multiple_shards 10 step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-master-drop-all-shards-on-inserted: SELECT master_drop_all_shards('insert_of_insert_select_hash'::regclass, 'public', 'insert_of_insert_select_hash'); master_drop_all_shards 4 step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> error in steps s1-commit s2-insert-select: ERROR: could not find any shards into which to copy step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-drop-on-inserted s1-create-non-distributed-table-on-inserted s1-initialize s1-begin s1-distribute-table-on-inserted s2-insert-select s1-commit s1-select-count create_distributed_table step s1-drop-on-inserted: DROP TABLE insert_of_insert_select_hash; step s1-create-non-distributed-table-on-inserted: CREATE TABLE insert_of_insert_select_hash(id integer, data text); step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-distribute-table-on-inserted: SELECT create_distributed_table('insert_of_insert_select_hash', 'id'); create_distributed_table step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-initialize s1-begin s1-update-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-update-on-selected: UPDATE select_of_insert_select_hash SET data = 'l' WHERE id = 4; step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-initialize s1-begin s1-delete-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-delete-on-selected: DELETE FROM select_of_insert_select_hash WHERE id = 4; step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 9 starting permutation: s1-initialize s1-begin s1-truncate-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-truncate-on-selected: TRUNCATE select_of_insert_select_hash; step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 0 starting permutation: s1-initialize s1-begin s1-drop-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-drop-on-selected: DROP TABLE select_of_insert_select_hash; step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> error in steps s1-commit s2-insert-select: ERROR: relation "select_of_insert_select_hash" does not exist step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; ERROR: relation "select_of_insert_select_hash" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index-on-selected s2-insert-select s1-commit s1-select-count s1-show-indexes-selected create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index-on-selected: CREATE INDEX select_of_insert_select_hash_index ON select_of_insert_select_hash(id); step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) starting permutation: s1-initialize s1-ddl-create-index-on-selected s1-begin s1-ddl-drop-index-on-selected s2-insert-select s1-commit s1-select-count s1-show-indexes-selected create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-ddl-create-index-on-selected: CREATE INDEX select_of_insert_select_hash_index ON select_of_insert_select_hash(id); step s1-begin: BEGIN; step s1-ddl-drop-index-on-selected: DROP INDEX select_of_insert_select_hash_index; step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-indexes-selected: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column-on-selected s2-insert-select s1-commit s1-select-count s1-show-columns-selected create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column-on-selected: ALTER TABLE select_of_insert_select_hash ADD new_column int DEFAULT 0; step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> error in steps s1-commit s2-insert-select: ERROR: INSERT has more expressions than target columns step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column-on-selected s1-begin s1-ddl-drop-column-on-selected s2-insert-select s1-commit s1-select-count s1-show-columns-selected create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-ddl-add-column-on-selected: ALTER TABLE select_of_insert_select_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-ddl-drop-column-on-selected: ALTER TABLE select_of_insert_select_hash DROP new_column; step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column-on-selected s2-insert-select s1-commit s1-select-count s1-show-columns-selected create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column-on-selected: ALTER TABLE select_of_insert_select_hash RENAME data TO new_column; step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 step s1-show-columns-selected: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-table-size-on-selected: SELECT citus_total_relation_size('select_of_insert_select_hash'); citus_total_relation_size 65536 step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards-on-selected: SELECT master_modify_multiple_shards('DELETE FROM select_of_insert_select_hash;'); master_modify_multiple_shards 10 step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 0 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-master-drop-all-shards-on-selected: SELECT master_drop_all_shards('select_of_insert_select_hash'::regclass, 'public', 'select_of_insert_select_hash'); master_drop_all_shards 4 step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 0 starting permutation: s1-drop-on-selected s1-create-non-distributed-table-on-selected s1-initialize s1-begin s1-distribute-table-on-selected s2-insert-select s1-commit s1-select-count create_distributed_table step s1-drop-on-selected: DROP TABLE select_of_insert_select_hash; step s1-create-non-distributed-table-on-selected: CREATE TABLE select_of_insert_select_hash(id integer, data text); step s1-initialize: COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; step s1-begin: BEGIN; step s1-distribute-table-on-selected: SELECT create_distributed_table('select_of_insert_select_hash', 'id'); create_distributed_table step s2-insert-select: INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_of_insert_select_hash; count 10 citus-7.0.3/src/test/regress/expected/isolation_insert_vs_all.out000066400000000000000000001163531317107136600253650ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-insert s2-insert s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 7 starting permutation: s1-initialize s1-begin s1-insert s2-insert-multi-row s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 9 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-insert s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 9 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-insert s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 9 starting permutation: s1-initialize s1-begin s1-insert s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-insert-select: INSERT INTO insert_hash SELECT * FROM insert_hash; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 11 starting permutation: s1-initialize s1-begin s1-insert s2-update s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-update: UPDATE insert_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 6 starting permutation: s1-initialize s1-begin s1-insert s2-delete s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-delete: DELETE FROM insert_hash WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 5 starting permutation: s1-initialize s1-begin s1-insert s2-truncate s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-truncate: TRUNCATE insert_hash; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 0 starting permutation: s1-initialize s1-begin s1-insert s2-drop s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-drop: DROP TABLE insert_hash; step s1-commit: COMMIT; step s2-drop: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; ERROR: relation "insert_hash" does not exist starting permutation: s1-initialize s1-begin s1-insert s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 6 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-insert s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-ddl-drop-index: DROP INDEX insert_hash_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 6 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-insert s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY insert_hash_index ON insert_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 6 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) starting permutation: s1-initialize s1-begin s1-insert s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 6 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-insert s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-ddl-drop-column: ALTER TABLE insert_hash DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 6 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-insert s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-ddl-rename-column: ALTER TABLE insert_hash RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 6 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-insert s2-table-size s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-table-size: SELECT citus_total_relation_size('insert_hash'); citus_total_relation_size 57344 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 6 starting permutation: s1-initialize s1-begin s1-insert s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM insert_hash;'); master_modify_multiple_shards 5 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 1 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-insert s2-distribute-table s1-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE insert_hash; step s1-create-non-distributed-table: CREATE TABLE insert_hash(id integer, data text); COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s2-distribute-table: SELECT create_distributed_table('insert_hash', 'id'); step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 11 starting permutation: s1-initialize s1-begin s1-insert-select s2-insert s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_hash SELECT * FROM insert_hash; step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 11 starting permutation: s1-initialize s1-begin s1-update s2-insert s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE insert_hash SET data = 'l' WHERE id = 4; step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 6 starting permutation: s1-initialize s1-begin s1-delete s2-insert s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM insert_hash WHERE id = 4; step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 5 starting permutation: s1-initialize s1-begin s1-truncate s2-insert s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE insert_hash; step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 1 starting permutation: s1-initialize s1-begin s1-drop s2-insert s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE insert_hash; step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> error in steps s1-commit s2-insert: ERROR: relation "insert_hash" does not exist step s1-select-count: SELECT COUNT(*) FROM insert_hash; ERROR: relation "insert_hash" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-insert s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 6 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-insert s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); step s1-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX insert_hash_index; step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 6 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-insert s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 6 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-insert s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE insert_hash DROP new_column; step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 6 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-insert s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE insert_hash RENAME data TO new_column; step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 6 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size s2-insert s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('insert_hash'); citus_total_relation_size 57344 step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 6 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-insert s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM insert_hash;'); master_modify_multiple_shards 5 step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 1 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-insert s1-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE insert_hash; step s1-create-non-distributed-table: CREATE TABLE insert_hash(id integer, data text); COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('insert_hash', 'id'); create_distributed_table step s2-insert: INSERT INTO insert_hash VALUES(7, 'k'); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 11 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-insert-select: INSERT INTO insert_hash SELECT * FROM insert_hash; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 13 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-update s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-update: UPDATE insert_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 8 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-delete s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-delete: DELETE FROM insert_hash WHERE id = 4; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 7 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-truncate s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-truncate: TRUNCATE insert_hash; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 0 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-drop s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-drop: DROP TABLE insert_hash; step s1-commit: COMMIT; step s2-drop: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; ERROR: relation "insert_hash" does not exist starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 8 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-insert-multi-row s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-ddl-drop-index: DROP INDEX insert_hash_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 8 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY insert_hash_index ON insert_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 8 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 8 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-insert-multi-row s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-ddl-drop-column: ALTER TABLE insert_hash DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 8 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-ddl-rename-column: ALTER TABLE insert_hash RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 8 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-table-size s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-table-size: SELECT citus_total_relation_size('insert_hash'); citus_total_relation_size 57344 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 8 starting permutation: s1-initialize s1-begin s1-insert-multi-row s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM insert_hash;'); master_modify_multiple_shards 5 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 3 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-insert-multi-row s2-distribute-table s1-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE insert_hash; step s1-create-non-distributed-table: CREATE TABLE insert_hash(id integer, data text); COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s2-distribute-table: SELECT create_distributed_table('insert_hash', 'id'); step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 13 starting permutation: s1-initialize s1-begin s1-insert-select s2-insert-multi-row s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO insert_hash SELECT * FROM insert_hash; step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 13 starting permutation: s1-initialize s1-begin s1-update s2-insert-multi-row s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE insert_hash SET data = 'l' WHERE id = 4; step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 8 starting permutation: s1-initialize s1-begin s1-delete s2-insert-multi-row s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM insert_hash WHERE id = 4; step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 7 starting permutation: s1-initialize s1-begin s1-truncate s2-insert-multi-row s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE insert_hash; step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 3 starting permutation: s1-initialize s1-begin s1-drop s2-insert-multi-row s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE insert_hash; step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> error in steps s1-commit s2-insert-multi-row: ERROR: relation "insert_hash" does not exist step s1-select-count: SELECT COUNT(*) FROM insert_hash; ERROR: relation "insert_hash" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-insert-multi-row s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 8 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-insert-multi-row s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX insert_hash_index ON insert_hash(id); step s1-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX insert_hash_index; step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 8 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-insert-multi-row s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 8 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-insert-multi-row s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE insert_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE insert_hash DROP new_column; step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 8 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-insert-multi-row s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE insert_hash RENAME data TO new_column; step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 8 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size s2-insert-multi-row s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('insert_hash'); citus_total_relation_size 57344 step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 8 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-insert-multi-row s1-commit s1-select-count create_distributed_table step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM insert_hash;'); master_modify_multiple_shards 5 step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 3 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-insert-multi-row s1-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE insert_hash; step s1-create-non-distributed-table: CREATE TABLE insert_hash(id integer, data text); COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-initialize: COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('insert_hash', 'id'); create_distributed_table step s2-insert-multi-row: INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); step s1-commit: COMMIT; step s2-insert-multi-row: <... completed> step s1-select-count: SELECT COUNT(*) FROM insert_hash; count 13 citus-7.0.3/src/test/regress/expected/isolation_insert_vs_vacuum.out000066400000000000000000000012471317107136600261100ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-begin s1-insert s2-vacuum-analyze s1-commit create_distributed_table step s1-begin: BEGIN; step s1-insert: INSERT INTO test_insert_vacuum VALUES(1, 1); step s2-vacuum-analyze: VACUUM ANALYZE test_insert_vacuum; step s1-commit: COMMIT; starting permutation: s1-begin s1-insert s2-vacuum-full s1-commit create_distributed_table step s1-begin: BEGIN; step s1-insert: INSERT INTO test_insert_vacuum VALUES(1, 1); step s2-vacuum-full: VACUUM FULL test_insert_vacuum; step s1-commit: COMMIT; step s2-vacuum-full: <... completed> citus-7.0.3/src/test/regress/expected/isolation_partitioned_copy_vs_all.out000066400000000000000000000621611317107136600274320ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 15 starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-router-select: SELECT * FROM partitioned_copy WHERE id = 1; id data int_data 1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-real-time-select: SELECT * FROM partitioned_copy ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-task-tracker-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM partitioned_copy AS t1 JOIN partitioned_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-insert: INSERT INTO partitioned_copy VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 11 starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-insert-select: INSERT INTO partitioned_copy SELECT * FROM partitioned_copy; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 20 starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-update: UPDATE partitioned_copy SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-delete: DELETE FROM partitioned_copy WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 9 starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-truncate: TRUNCATE partitioned_copy; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 0 starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-drop: DROP TABLE partitioned_copy; step s1-commit: COMMIT; step s2-drop: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; ERROR: relation "partitioned_copy" does not exist starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-add-column: ALTER TABLE partitioned_copy ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE partitioned_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-copy-additional-column: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5, 5\\n6, g, 6, 6\\n7, h, 7, 7\\n8, i, 8, 8\\n9, j, 9, 9' WITH CSV; step s2-ddl-drop-column: ALTER TABLE partitioned_copy DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-rename-column: ALTER TABLE partitioned_copy RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-table-size: SELECT citus_total_relation_size('partitioned_copy'); citus_total_relation_size 32768 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM partitioned_copy;'); step s1-commit: COMMIT; step s2-master-modify-multiple-shards: <... completed> master_modify_multiple_shards 10 step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 0 starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-master-drop-all-shards: SELECT master_drop_all_shards('partitioned_copy'::regclass, 'public', 'partitioned_copy'); step s1-commit: COMMIT; step s2-master-drop-all-shards: <... completed> master_drop_all_shards 4 step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 0 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-copy s2-distribute-table s1-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE partitioned_copy; step s1-create-non-distributed-table: CREATE TABLE partitioned_copy(id integer, data text, int_data int); COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-distribute-table: SELECT create_distributed_table('partitioned_copy', 'id'); step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 15 starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM partitioned_copy WHERE id = 1; id data int_data 1 b 1 step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 10 starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM partitioned_copy ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 10 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM partitioned_copy AS t1 JOIN partitioned_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 10 starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO partitioned_copy VALUES(0, 'k', 0); step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 11 starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO partitioned_copy SELECT * FROM partitioned_copy; step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 15 starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE partitioned_copy SET data = 'l' WHERE id = 0; step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 10 starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM partitioned_copy WHERE id = 1; step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 9 starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE partitioned_copy; step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 5 starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE partitioned_copy; step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: relation "partitioned_copy" does not exist step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; ERROR: relation "partitioned_copy" does not exist starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE partitioned_copy ADD new_column int DEFAULT 0; step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: missing data for column "new_column" step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE partitioned_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE partitioned_copy DROP new_column; step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE partitioned_copy RENAME data TO new_column; step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('partitioned_copy'); citus_total_relation_size 32768 step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 10 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM partitioned_copy;'); master_modify_multiple_shards 5 step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 5 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-drop-all-shards: SELECT master_drop_all_shards('partitioned_copy'::regclass, 'public', 'partitioned_copy'); master_drop_all_shards 4 step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: could not find any shards into which to copy step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 0 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-copy s1-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE partitioned_copy; step s1-create-non-distributed-table: CREATE TABLE partitioned_copy(id integer, data text, int_data int); COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-initialize: COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('partitioned_copy', 'id'); create_distributed_table step s2-copy: COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM partitioned_copy; count 15 citus-7.0.3/src/test/regress/expected/isolation_partitioned_copy_vs_all_0.out000066400000000000000000000004751317107136600276510ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count setup failed: ERROR: syntax error at or near "PARTITION" LINE 3: ...itioned_copy(id integer, data text, int_data int) PARTITION ... ^ citus-7.0.3/src/test/regress/expected/isolation_progress_monitoring.out000066400000000000000000000055301317107136600266240ustar00rootroot00000000000000Parsed test spec with 5 sessions starting permutation: take-locks s1-start-operation s2-start-operation s3-start-operation show-progress release-locks-1 show-progress release-locks-2 show-progress release-locks-3 step take-locks: -- Locks for steps of sample operation in s1 SELECT pg_advisory_lock(10); SELECT pg_advisory_lock(11); SELECT pg_advisory_lock(12); -- Locks for steps of sample operation in s2 SELECT pg_advisory_lock(20); SELECT pg_advisory_lock(21); SELECT pg_advisory_lock(22); -- Locks for steps of sample operation in s3 SELECT pg_advisory_lock(30); SELECT pg_advisory_lock(31); SELECT pg_advisory_lock(32); pg_advisory_lock pg_advisory_lock pg_advisory_lock pg_advisory_lock pg_advisory_lock pg_advisory_lock pg_advisory_lock pg_advisory_lock pg_advisory_lock step s1-start-operation: SELECT sample_operation(1337, 10, -1); step s2-start-operation: SELECT sample_operation(1337, 20, 2); step s3-start-operation: SELECT sample_operation(3778, 30, 9); step show-progress: SELECT show_progress(1337); SELECT show_progress(3778); show_progress (0,0) (1,0) (0,0) (1,0) show_progress (0,0) (1,0) step release-locks-1: -- Release the locks of first steps of sample operations SELECT pg_advisory_unlock(10); SELECT pg_advisory_unlock(20); SELECT pg_advisory_unlock(30); pg_advisory_unlock t pg_advisory_unlock t pg_advisory_unlock t step show-progress: SELECT show_progress(1337); SELECT show_progress(3778); show_progress (0,-1) (1,0) (0,2) (1,0) show_progress (0,9) (1,0) step release-locks-2: -- Release the locks of second steps of sample operations SELECT pg_advisory_unlock(11); SELECT pg_advisory_unlock(21); SELECT pg_advisory_unlock(31); pg_advisory_unlock t pg_advisory_unlock t pg_advisory_unlock t step show-progress: SELECT show_progress(1337); SELECT show_progress(3778); show_progress (0,-1) (1,-1) (0,2) (1,2) show_progress (0,9) (1,9) step release-locks-3: -- Release the locks of final steps of sample operations SELECT pg_advisory_unlock(12); SELECT pg_advisory_unlock(22); SELECT pg_advisory_unlock(32); pg_advisory_unlock t pg_advisory_unlock t pg_advisory_unlock t step s1-start-operation: <... completed> sample_operation step s2-start-operation: <... completed> sample_operation step s3-start-operation: <... completed> sample_operation citus-7.0.3/src/test/regress/expected/isolation_range_copy_vs_all.out000066400000000000000000000724001317107136600262010ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; count 15 starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-router-select: SELECT * FROM range_copy WHERE id = 1; id data int_data 1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-real-time-select: SELECT * FROM range_copy ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-task-tracker-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM range_copy AS t1 JOIN range_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-insert: INSERT INTO range_copy VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; count 11 starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-insert-select: INSERT INTO range_copy SELECT * FROM range_copy; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-update: UPDATE range_copy SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-delete: DELETE FROM range_copy WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; count 9 starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-truncate: TRUNCATE range_copy; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; count 0 starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-drop: DROP TABLE range_copy; step s1-commit: COMMIT; step s2-drop: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; ERROR: relation "range_copy" does not exist starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id); step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id); step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-drop-index: DROP INDEX range_copy_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY range_copy_index ON range_copy(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-copy-additional-column: COPY range_copy FROM PROGRAM 'echo 5, f, 5, 5\\n6, g, 6, 6\\n7, h, 7, 7\\n8, i, 8, 8\\n9, j, 9, 9' WITH CSV; step s2-ddl-drop-column: ALTER TABLE range_copy DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-rename-column: ALTER TABLE range_copy RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-table-size: SELECT citus_total_relation_size('range_copy'); citus_total_relation_size 32768 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM range_copy;'); master_modify_multiple_shards 5 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; count 5 starting permutation: s1-initialize s1-begin s1-copy s2-master-apply-delete-command s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-master-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM range_copy WHERE id <= 4;'); step s1-commit: COMMIT; step s2-master-apply-delete-command: <... completed> master_apply_delete_command 1 step s1-select-count: SELECT COUNT(*) FROM range_copy; count 5 starting permutation: s1-initialize s1-begin s1-copy s2-master-drop-all-shards s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-master-drop-all-shards: SELECT master_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); step s1-commit: COMMIT; step s2-master-drop-all-shards: <... completed> master_drop_all_shards 2 step s1-select-count: SELECT COUNT(*) FROM range_copy; count 0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-copy s2-distribute-table s1-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE range_copy; step s1-create-non-distributed-table: CREATE TABLE range_copy(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-distribute-table: SELECT create_distributed_table('range_copy', 'id', 'range'); step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table step s1-select-count: SELECT COUNT(*) FROM range_copy; count 0 starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM range_copy WHERE id = 1; id data int_data 1 b 1 step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM range_copy ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM range_copy AS t1 JOIN range_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO range_copy VALUES(0, 'k', 0); step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; count 11 starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO range_copy SELECT * FROM range_copy; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE range_copy SET data = 'l' WHERE id = 0; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM range_copy WHERE id = 1; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; count 9 starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE range_copy; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; count 5 starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE range_copy; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: relation "range_copy" does not exist step s1-select-count: SELECT COUNT(*) FROM range_copy; ERROR: relation "range_copy" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id); step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX range_copy_index ON range_copy(id); step s1-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX range_copy_index; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: missing data for column "new_column" step s1-select-count: SELECT COUNT(*) FROM range_copy; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE range_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE range_copy DROP new_column; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE range_copy RENAME data TO new_column; step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('range_copy'); citus_total_relation_size 32768 step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; count 10 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM range_copy;'); master_modify_multiple_shards 5 step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM range_copy; count 5 starting permutation: s1-initialize s1-begin s1-master-apply-delete-command s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM range_copy WHERE id <= 4;'); master_apply_delete_command 1 step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; count 5 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s2-copy s1-commit s1-select-count create_distributed_table step s1-initialize: COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-drop-all-shards: SELECT master_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); master_drop_all_shards 1 step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM range_copy; count 5 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-copy s1-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE range_copy; step s1-create-non-distributed-table: CREATE TABLE range_copy(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('range_copy', 'id', 'range'); create_distributed_table step s2-copy: COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: could not find any shards into which to copy step s1-select-count: SELECT COUNT(*) FROM range_copy; count 0 citus-7.0.3/src/test/regress/expected/isolation_reference_copy_vs_all.out000066400000000000000000000675541317107136600270610ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-copy s2-copy s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 15 starting permutation: s1-initialize s1-begin s1-copy s2-router-select s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-router-select: SELECT * FROM reference_copy WHERE id = 1; id data int_data 1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-real-time-select s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-real-time-select: SELECT * FROM reference_copy ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-task-tracker-select s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM reference_copy AS t1 JOIN reference_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-insert s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-insert: INSERT INTO reference_copy VALUES(0, 'k', 0); step s1-commit: COMMIT; step s2-insert: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 11 starting permutation: s1-initialize s1-begin s1-copy s2-insert-select s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-insert-select: INSERT INTO reference_copy SELECT * FROM reference_copy; step s1-commit: COMMIT; step s2-insert-select: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 20 starting permutation: s1-initialize s1-begin s1-copy s2-update s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-update: UPDATE reference_copy SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s2-update: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-delete s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-delete: DELETE FROM reference_copy WHERE id = 1; step s1-commit: COMMIT; step s2-delete: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 9 starting permutation: s1-initialize s1-begin s1-copy s2-truncate s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-truncate: TRUNCATE reference_copy; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 0 starting permutation: s1-initialize s1-begin s1-copy s2-drop s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-drop: DROP TABLE reference_copy; step s1-commit: COMMIT; step s2-drop: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; ERROR: relation "reference_copy" does not exist starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-create-index: CREATE INDEX reference_copy_index ON reference_copy(id); step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-copy s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX reference_copy_index ON reference_copy(id); step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-drop-index: DROP INDEX reference_copy_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-copy s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY reference_copy_index ON reference_copy(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) starting permutation: s1-initialize s1-begin s1-copy s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-add-column: ALTER TABLE reference_copy ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-copy-additional-column s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE reference_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-copy-additional-column: COPY reference_copy FROM PROGRAM 'echo 5, f, 5, 5\\n6, g, 6, 6\\n7, h, 7, 7\\n8, i, 8, 8\\n9, j, 9, 9' WITH CSV; step s2-ddl-drop-column: ALTER TABLE reference_copy DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-copy s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-ddl-rename-column: ALTER TABLE reference_copy RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-copy s2-table-size s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-table-size: SELECT citus_total_relation_size('reference_copy'); citus_total_relation_size 32768 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 10 starting permutation: s1-initialize s1-begin s1-copy s2-master-modify-multiple-shards s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM reference_copy;'); step s1-commit: COMMIT; step s2-master-modify-multiple-shards: <... completed> master_modify_multiple_shards 10 step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 0 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-copy s2-distribute-table s1-commit s1-select-count create_reference_table step s1-drop: DROP TABLE reference_copy; step s1-create-non-distributed-table: CREATE TABLE reference_copy(id integer, data text, int_data int); COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s2-distribute-table: SELECT create_reference_table('reference_copy'); step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_reference_table step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 15 starting permutation: s1-initialize s1-begin s1-router-select s2-copy s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM reference_copy WHERE id = 1; id data int_data 1 b 1 step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 10 starting permutation: s1-initialize s1-begin s1-real-time-select s2-copy s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM reference_copy ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 10 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-copy s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM reference_copy AS t1 JOIN reference_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 10 starting permutation: s1-initialize s1-begin s1-insert s2-copy s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO reference_copy VALUES(0, 'k', 0); step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 11 starting permutation: s1-initialize s1-begin s1-insert-select s2-copy s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO reference_copy SELECT * FROM reference_copy; step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 15 starting permutation: s1-initialize s1-begin s1-update s2-copy s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE reference_copy SET data = 'l' WHERE id = 0; step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 10 starting permutation: s1-initialize s1-begin s1-delete s2-copy s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM reference_copy WHERE id = 1; step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 9 starting permutation: s1-initialize s1-begin s1-truncate s2-copy s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE reference_copy; step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 5 starting permutation: s1-initialize s1-begin s1-drop s2-copy s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE reference_copy; step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: relation "reference_copy" does not exist step s1-select-count: SELECT COUNT(*) FROM reference_copy; ERROR: relation "reference_copy" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-copy s1-commit s1-select-count s1-show-indexes create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX reference_copy_index ON reference_copy(id); step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-copy s1-commit s1-select-count s1-show-indexes create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX reference_copy_index ON reference_copy(id); step s1-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX reference_copy_index; step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 10 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-copy s1-commit s1-select-count s1-show-columns create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE reference_copy ADD new_column int DEFAULT 0; step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> error in steps s1-commit s2-copy: ERROR: missing data for column "new_column" step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-copy s1-commit s1-select-count s1-show-columns create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE reference_copy ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE reference_copy DROP new_column; step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-copy s1-commit s1-select-count s1-show-columns create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE reference_copy RENAME data TO new_column; step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 10 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size s2-copy s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('reference_copy'); citus_total_relation_size 32768 step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 10 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-copy s1-commit s1-select-count create_reference_table step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM reference_copy;'); master_modify_multiple_shards 5 step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 5 starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s1-distribute-table s2-copy s1-commit s1-select-count create_reference_table step s1-drop: DROP TABLE reference_copy; step s1-create-non-distributed-table: CREATE TABLE reference_copy(id integer, data text, int_data int); COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-initialize: COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-distribute-table: SELECT create_reference_table('reference_copy'); create_reference_table step s2-copy: COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; step s1-commit: COMMIT; step s2-copy: <... completed> step s1-select-count: SELECT COUNT(*) FROM reference_copy; count 15 citus-7.0.3/src/test/regress/expected/isolation_replace_wait_function.out000066400000000000000000000010461317107136600270550ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-insert-1 s2-insert s1-finish s2-finish create_distributed_table step s1-insert-1: BEGIN; INSERT INTO test_locking (a) VALUES (1); step s2-insert: BEGIN; INSERT INTO test_locking (a) VALUES (1); step s1-finish: COMMIT; step s2-insert: <... completed> error in steps s1-finish s2-insert: ERROR: duplicate key value violates unique constraint "test_locking_a_key_102320" step s2-finish: COMMIT; restore_isolation_tester_func citus-7.0.3/src/test/regress/expected/isolation_select_vs_all.out000066400000000000000000003055161317107136600253410ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s1-router-select s2-router-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s2-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-router-select s2-real-time-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-router-select s2-task-tracker-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-real-time-select s2-router-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-real-time-select s2-real-time-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-real-time-select s2-task-tracker-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-router-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-real-time-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-task-tracker-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-router-select s2-insert s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s2-insert: INSERT INTO select_append VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 6 starting permutation: s1-initialize s1-begin s1-router-select s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s2-insert-select: INSERT INTO select_append SELECT * FROM select_append; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-router-select s2-update s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s2-update: UPDATE select_append SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-router-select s2-delete s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s2-delete: DELETE FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 4 starting permutation: s1-initialize s1-begin s1-router-select s2-truncate s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s2-truncate: TRUNCATE select_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-initialize s1-begin s1-router-select s2-drop s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s2-drop: DROP TABLE select_append; step s1-commit: COMMIT; step s2-drop: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; ERROR: relation "select_append" does not exist starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s2-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-router-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s2-ddl-drop-index: DROP INDEX select_append_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY select_append_index ON select_append(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s2-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-router-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data new_column 1 b 1 0 step s2-ddl-drop-column: ALTER TABLE select_append DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-router-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s2-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-router-select s2-table-size s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s2-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size 32768 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-router-select s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s2-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM select_append;'); master_modify_multiple_shards 5 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-initialize s1-begin s2-master-apply-delete-command s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s2-master-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM select_append WHERE id <= 4;'); master_apply_delete_command 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-initialize s1-begin s2-master-drop-all-shards s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s2-master-drop-all-shards: SELECT master_drop_all_shards('select_append'::regclass, 'public', 'append_copy'); master_drop_all_shards 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-router-select s2-distribute-table s1-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE select_append; step s1-create-non-distributed-table: CREATE TABLE select_append(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data step s2-distribute-table: SELECT create_distributed_table('select_append', 'id', 'append'); create_distributed_table step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-initialize s1-begin s1-insert s2-router-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO select_append VALUES(0, 'k', 0); step s2-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 6 starting permutation: s1-initialize s1-begin s1-insert-select s2-router-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO select_append SELECT * FROM select_append; step s2-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-update s2-router-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE select_append SET data = 'l' WHERE id = 0; step s2-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-delete s2-router-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM select_append WHERE id = 1; step s2-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 4 starting permutation: s1-initialize s1-begin s1-truncate s2-router-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE select_append; step s2-router-select: SELECT * FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s2-router-select: <... completed> id data int_data step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-initialize s1-begin s1-drop s2-router-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE select_append; step s2-router-select: SELECT * FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s2-router-select: <... completed> error in steps s1-commit s2-router-select: ERROR: relation "select_append" does not exist step s1-select-count: SELECT COUNT(*) FROM select_append; ERROR: relation "select_append" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-router-select s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s2-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-router-select s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX select_append_index; step s2-router-select: SELECT * FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s2-router-select: <... completed> id data int_data 1 b 1 step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-router-select s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s2-router-select: SELECT * FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s2-router-select: <... completed> id data int_data new_column 1 b 1 0 step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-router-select s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE select_append DROP new_column; step s2-router-select: SELECT * FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s2-router-select: <... completed> id data int_data 1 b 1 step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-router-select s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; step s2-router-select: SELECT * FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s2-router-select: <... completed> id new_column int_data 1 b 1 step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size s2-router-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size 32768 step s2-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-router-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM select_append;'); master_modify_multiple_shards 5 step s2-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data 1 b 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-initialize s1-begin s1-master-apply-delete-command s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM select_append WHERE id <= 4;'); master_apply_delete_command 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-initialize s1-begin s1-master-drop-all-shards s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-drop-all-shards: SELECT master_drop_all_shards('select_append'::regclass, 'public', 'append_copy'); master_drop_all_shards 1 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-router-select s1-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE select_append; step s1-create-non-distributed-table: CREATE TABLE select_append(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('select_append', 'id', 'append'); create_distributed_table step s2-router-select: SELECT * FROM select_append WHERE id = 1; id data int_data step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-initialize s1-begin s1-real-time-select s2-insert s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-insert: INSERT INTO select_append VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 6 starting permutation: s1-initialize s1-begin s1-real-time-select s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-insert-select: INSERT INTO select_append SELECT * FROM select_append; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-real-time-select s2-update s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-update: UPDATE select_append SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-real-time-select s2-delete s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-delete: DELETE FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 4 starting permutation: s1-initialize s1-begin s1-real-time-select s2-truncate s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-truncate: TRUNCATE select_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-initialize s1-begin s1-real-time-select s2-drop s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-drop: DROP TABLE select_append; step s1-commit: COMMIT; step s2-drop: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; ERROR: relation "select_append" does not exist starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-real-time-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-ddl-drop-index: DROP INDEX select_append_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY select_append_index ON select_append(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-real-time-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data new_column 0 a 0 0 1 b 1 0 2 c 2 0 3 d 3 0 4 e 4 0 step s2-ddl-drop-column: ALTER TABLE select_append DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-real-time-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-real-time-select s2-table-size s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size 32768 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-real-time-select s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s2-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM select_append;'); master_modify_multiple_shards 5 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-real-time-select s2-distribute-table s1-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE select_append; step s1-create-non-distributed-table: CREATE TABLE select_append(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data step s2-distribute-table: SELECT create_distributed_table('select_append', 'id', 'append'); create_distributed_table step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-initialize s1-begin s1-insert s2-real-time-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO select_append VALUES(0, 'k', 0); step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 6 starting permutation: s1-initialize s1-begin s1-insert-select s2-real-time-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO select_append SELECT * FROM select_append; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-update s2-real-time-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE select_append SET data = 'l' WHERE id = 0; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-delete s2-real-time-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM select_append WHERE id = 1; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 4 starting permutation: s1-initialize s1-begin s1-truncate s2-real-time-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE select_append; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; step s1-commit: COMMIT; step s2-real-time-select: <... completed> id data int_data step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-initialize s1-begin s1-drop s2-real-time-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE select_append; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; step s1-commit: COMMIT; step s2-real-time-select: <... completed> error in steps s1-commit s2-real-time-select: ERROR: relation "select_append" does not exist step s1-select-count: SELECT COUNT(*) FROM select_append; ERROR: relation "select_append" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-real-time-select s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-real-time-select s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX select_append_index; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; step s1-commit: COMMIT; step s2-real-time-select: <... completed> id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-real-time-select s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; step s1-commit: COMMIT; step s2-real-time-select: <... completed> id data int_data new_column 0 a 0 0 1 b 1 0 2 c 2 0 3 d 3 0 4 e 4 0 step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-real-time-select s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE select_append DROP new_column; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; step s1-commit: COMMIT; step s2-real-time-select: <... completed> id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-real-time-select s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; step s1-commit: COMMIT; step s2-real-time-select: <... completed> id new_column int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size s2-real-time-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size 32768 step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-real-time-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM select_append;'); master_modify_multiple_shards 5 step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data 0 a 0 1 b 1 2 c 2 3 d 3 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-real-time-select s1-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE select_append; step s1-create-non-distributed-table: CREATE TABLE select_append(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('select_append', 'id', 'append'); create_distributed_table step s2-real-time-select: SELECT * FROM select_append ORDER BY 1, 2; id data int_data step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-insert s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-insert: INSERT INTO select_append VALUES(0, 'k', 0); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 6 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-insert-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-insert-select: INSERT INTO select_append SELECT * FROM select_append; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-update s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-update: UPDATE select_append SET data = 'l' WHERE id = 0; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-delete s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-delete: DELETE FROM select_append WHERE id = 1; step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 4 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-truncate s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-truncate: TRUNCATE select_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-drop s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-drop: DROP TABLE select_append; step s1-commit: COMMIT; step s2-drop: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; ERROR: relation "select_append" does not exist starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-create-index s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-task-tracker-select s2-ddl-drop-index s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-ddl-drop-index: DROP INDEX select_append_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY select_append_index ON select_append(id); step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-add-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-task-tracker-select s2-ddl-drop-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data new_column id data int_data new_column 0 a 0 0 0 a 0 0 1 b 1 0 1 b 1 0 2 c 2 0 2 c 2 0 3 d 3 0 3 d 3 0 4 e 4 0 4 e 4 0 step s2-ddl-drop-column: ALTER TABLE select_append DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-ddl-rename-column s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-table-size s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size 32768 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-task-tracker-select s2-master-modify-multiple-shards s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s2-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM select_append;'); master_modify_multiple_shards 5 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-task-tracker-select s2-distribute-table s1-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE select_append; step s1-create-non-distributed-table: CREATE TABLE select_append(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data step s2-distribute-table: SELECT create_distributed_table('select_append', 'id', 'append'); create_distributed_table step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-initialize s1-begin s1-insert s2-task-tracker-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert: INSERT INTO select_append VALUES(0, 'k', 0); step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 6 starting permutation: s1-initialize s1-begin s1-insert-select s2-task-tracker-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-insert-select: INSERT INTO select_append SELECT * FROM select_append; step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-update s2-task-tracker-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE select_append SET data = 'l' WHERE id = 0; step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-delete s2-task-tracker-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-delete: DELETE FROM select_append WHERE id = 1; step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 4 starting permutation: s1-initialize s1-begin s1-truncate s2-task-tracker-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE select_append; step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> id data int_data id data int_data step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-initialize s1-begin s1-drop s2-task-tracker-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-drop: DROP TABLE select_append; step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> error in steps s1-commit s2-task-tracker-select: ERROR: relation "select_append" does not exist step s1-select-count: SELECT COUNT(*) FROM select_append; ERROR: relation "select_append" does not exist starting permutation: s1-initialize s1-begin s1-ddl-create-index s2-task-tracker-select s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers (localhost,57637,t,1) (localhost,57638,t,1) starting permutation: s1-initialize s1-ddl-create-index s1-begin s1-ddl-drop-index s2-task-tracker-select s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-create-index: CREATE INDEX select_append_index ON select_append(id); step s1-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX select_append_index; step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) starting permutation: s1-initialize s1-begin s1-ddl-add-column s2-task-tracker-select s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> id data int_data new_column id data int_data new_column 0 a 0 0 0 a 0 0 1 b 1 0 1 b 1 0 2 c 2 0 2 c 2 0 3 d 3 0 3 d 3 0 4 e 4 0 4 e 4 0 step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-ddl-add-column s1-begin s1-ddl-drop-column s2-task-tracker-select s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-ddl-add-column: ALTER TABLE select_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE select_append DROP new_column; step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") starting permutation: s1-initialize s1-begin s1-ddl-rename-column s2-task-tracker-select s1-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE select_append RENAME data TO new_column; step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; step s1-commit: COMMIT; step s2-task-tracker-select: <... completed> id new_column int_data id new_column int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) starting permutation: s1-initialize s1-begin s1-table-size s2-task-tracker-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('select_append'); citus_total_relation_size 32768 step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 5 starting permutation: s1-initialize s1-begin s1-master-modify-multiple-shards s2-task-tracker-select s1-commit s1-select-count create_distributed_table step s1-initialize: COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; step s1-begin: BEGIN; step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM select_append;'); master_modify_multiple_shards 5 step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data 0 a 0 0 a 0 1 b 1 1 b 1 2 c 2 2 c 2 3 d 3 3 d 3 4 e 4 4 e 4 step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 starting permutation: s1-drop s1-create-non-distributed-table s1-begin s1-distribute-table s2-task-tracker-select s1-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE select_append; step s1-create-non-distributed-table: CREATE TABLE select_append(id integer, data text, int_data int); step s1-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('select_append', 'id', 'append'); create_distributed_table step s2-task-tracker-select: SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; id data int_data id data int_data step s1-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM select_append; count 0 citus-7.0.3/src/test/regress/expected/isolation_transaction_recovery.out000066400000000000000000000006501317107136600267540ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-begin s1-recover s2-insert s1-commit create_reference_table step s1-begin: BEGIN; step s1-recover: SELECT recover_prepared_transactions(); recover_prepared_transactions 0 step s2-insert: INSERT INTO test_transaction_recovery VALUES (1,2); step s1-commit: COMMIT; step s2-insert: <... completed> citus-7.0.3/src/test/regress/expected/isolation_truncate_vs_all.out000066400000000000000000000517071317107136600257070ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-drop s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; step s2-drop: DROP TABLE truncate_append; step s1-commit: COMMIT; step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; ERROR: relation "truncate_append" does not exist restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; step s2-ddl-create-index: CREATE INDEX truncate_append_index ON truncate_append(id); step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-truncate s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX truncate_append_index ON truncate_append(id); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; step s2-ddl-drop-index: DROP INDEX truncate_append_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-truncate s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY truncate_append_index ON truncate_append(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; step s2-ddl-add-column: ALTER TABLE truncate_append ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-truncate s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE truncate_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; step s2-ddl-drop-column: ALTER TABLE truncate_append DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; step s2-ddl-rename-column: ALTER TABLE truncate_append RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-table-size s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; step s2-table-size: SELECT citus_total_relation_size('truncate_append'); step s1-commit: COMMIT; step s2-table-size: <... completed> citus_total_relation_size 0 step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; step s2-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM truncate_append;'); step s1-commit: COMMIT; step s2-master-modify-multiple-shards: <... completed> master_modify_multiple_shards 0 step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-apply-delete-command s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; step s2-master-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM truncate_append WHERE id <= 4;'); step s1-commit: COMMIT; step s2-master-apply-delete-command: <... completed> master_apply_delete_command 0 step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-master-drop-all-shards s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; step s2-master-drop-all-shards: SELECT master_drop_all_shards('truncate_append'::regclass, 'public', 'truncate_append'); step s1-commit: COMMIT; step s2-master-drop-all-shards: <... completed> master_drop_all_shards 0 step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-begin s2-begin s1-truncate s2-distribute-table s1-commit s2-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE truncate_append; step s1-create-non-distributed-table: CREATE TABLE truncate_append(id integer, data text); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; step s2-distribute-table: SELECT create_distributed_table('truncate_append', 'id', 'append'); step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-truncate s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE truncate_append; step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-truncate s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE truncate_append; step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-truncate: <... completed> error in steps s1-commit s2-truncate: ERROR: relation "truncate_append" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; ERROR: relation "truncate_append" does not exist restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-truncate s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX truncate_append_index ON truncate_append(id); step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-truncate s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX truncate_append_index ON truncate_append(id); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX truncate_append_index; step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE truncate_append ADD new_column int DEFAULT 0; step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE truncate_append ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE truncate_append DROP new_column; step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-truncate s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE truncate_append RENAME data TO new_column; step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-truncate s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('truncate_append'); citus_total_relation_size 32768 step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-truncate s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM truncate_append;'); master_modify_multiple_shards 5 step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-master-apply-delete-command s2-truncate s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-master-apply-delete-command: SELECT master_apply_delete_command('DELETE FROM truncate_append WHERE id <= 4;'); master_apply_delete_command 1 step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-master-drop-all-shards s2-truncate s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-master-drop-all-shards: SELECT master_drop_all_shards('truncate_append'::regclass, 'public', 'truncate_append'); master_drop_all_shards 1 step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-begin s2-begin s1-distribute-table s2-truncate s1-commit s2-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE truncate_append; step s1-create-non-distributed-table: CREATE TABLE truncate_append(id integer, data text); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('truncate_append', 'id', 'append'); create_distributed_table step s2-truncate: TRUNCATE truncate_append; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM truncate_append; count 0 restore_isolation_tester_func citus-7.0.3/src/test/regress/expected/isolation_update_vs_all.out000066400000000000000000000473011317107136600253370ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s2-begin s1-update s2-update s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s2-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 5 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-update s2-delete s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s2-delete: DELETE FROM update_hash WHERE id = 4; step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 4 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-update s2-truncate s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s2-truncate: TRUNCATE update_hash; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 0 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-update s2-drop s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s2-drop: DROP TABLE update_hash; step s1-commit: COMMIT; step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; ERROR: relation "update_hash" does not exist restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-update s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s2-ddl-create-index: CREATE INDEX update_hash_index ON update_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-update s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX update_hash_index ON update_hash(id); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s2-ddl-drop-index: DROP INDEX update_hash_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-update s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY update_hash_index ON update_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM update_hash; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-update s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s2-ddl-add-column: ALTER TABLE update_hash ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-update s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE update_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s2-ddl-drop-column: ALTER TABLE update_hash DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-update s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s2-ddl-rename-column: ALTER TABLE update_hash RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-update s2-table-size s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s2-table-size: SELECT citus_total_relation_size('update_hash'); citus_total_relation_size 57344 step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 5 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-update s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s2-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM update_hash;'); step s1-commit: COMMIT; step s2-master-modify-multiple-shards: <... completed> master_modify_multiple_shards 5 step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 0 restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-update s2-distribute-table s1-commit s2-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE update_hash; step s1-create-non-distributed-table: CREATE TABLE update_hash(id integer, data text); COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s2-distribute-table: SELECT create_distributed_table('update_hash', 'id'); step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 10 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-update s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-delete: DELETE FROM update_hash WHERE id = 4; step s2-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 4 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-update s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE update_hash; step s2-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 0 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-update s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE update_hash; step s2-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s2-update: <... completed> error in steps s1-commit s2-update: ERROR: relation "update_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; ERROR: relation "update_hash" does not exist restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-update s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX update_hash_index ON update_hash(id); step s2-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-update s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX update_hash_index ON update_hash(id); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX update_hash_index; step s2-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); run_command_on_workers (localhost,57637,t,0) (localhost,57638,t,0) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-update s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE update_hash ADD new_column int DEFAULT 0; step s2-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-update s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE update_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE update_hash DROP new_column; step s2-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-update s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE update_hash RENAME data TO new_column; step s2-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s2-update: <... completed> error in steps s1-commit s2-update: ERROR: column "data" of relation "update_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-update s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('update_hash'); citus_total_relation_size 57344 step s2-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 5 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-update s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM update_hash;'); master_modify_multiple_shards 5 step s2-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 0 restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-distribute-table s2-update s1-commit s2-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE update_hash; step s1-create-non-distributed-table: CREATE TABLE update_hash(id integer, data text); COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-initialize: COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-distribute-table: SELECT create_distributed_table('update_hash', 'id'); create_distributed_table step s2-update: UPDATE update_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM update_hash; count 10 restore_isolation_tester_func citus-7.0.3/src/test/regress/expected/isolation_upsert_vs_all.out000066400000000000000000000514551317107136600254040ustar00rootroot00000000000000Parsed test spec with 2 sessions starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-upsert s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s2-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 5 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-update s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s2-update: UPDATE upsert_hash SET data = 'l' WHERE id = 4; step s1-commit: COMMIT; step s2-update: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 5 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-delete s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s2-delete: DELETE FROM upsert_hash WHERE id = 4; step s1-commit: COMMIT; step s2-delete: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 4 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-truncate s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s2-truncate: TRUNCATE upsert_hash; step s1-commit: COMMIT; step s2-truncate: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 0 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-drop s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s2-drop: DROP TABLE upsert_hash; step s1-commit: COMMIT; step s2-drop: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; ERROR: relation "upsert_hash" does not exist restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-ddl-create-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s2-ddl-create-index: CREATE INDEX upsert_hash_index ON upsert_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); run_command_on_workers (localhost,57637,t,4) (localhost,57638,t,4) restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-upsert s2-ddl-drop-index s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX upsert_hash_index ON upsert_hash(id); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s2-ddl-drop-index: DROP INDEX upsert_hash_index; step s1-commit: COMMIT; step s2-ddl-drop-index: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s1-upsert s2-ddl-create-index-concurrently s1-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s1-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s2-ddl-create-index-concurrently: CREATE INDEX CONCURRENTLY upsert_hash_index ON upsert_hash(id); step s1-commit: COMMIT; step s2-ddl-create-index-concurrently: <... completed> step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); run_command_on_workers (localhost,57637,t,4) (localhost,57638,t,4) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-ddl-add-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s2-ddl-add-column: ALTER TABLE upsert_hash ADD new_column int DEFAULT 0; step s1-commit: COMMIT; step s2-ddl-add-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-upsert s2-ddl-drop-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE upsert_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s2-ddl-drop-column: ALTER TABLE upsert_hash DROP new_column; step s1-commit: COMMIT; step s2-ddl-drop-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-ddl-rename-column s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s2-ddl-rename-column: ALTER TABLE upsert_hash RENAME data TO new_column; step s1-commit: COMMIT; step s2-ddl-rename-column: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-table-size s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s2-table-size: SELECT citus_total_relation_size('upsert_hash'); citus_total_relation_size 114688 step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 5 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-upsert s2-master-modify-multiple-shards s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s2-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM upsert_hash;'); step s1-commit: COMMIT; step s2-master-modify-multiple-shards: <... completed> master_modify_multiple_shards 5 step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 0 restore_isolation_tester_func starting permutation: s1-drop s1-create-non-distributed-table s1-initialize s1-begin s2-begin s1-upsert s2-distribute-table s1-commit s2-commit s1-select-count create_distributed_table step s1-drop: DROP TABLE upsert_hash; step s1-create-non-distributed-table: CREATE TABLE upsert_hash(id integer PRIMARY KEY, data text); step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s2-distribute-table: SELECT create_distributed_table('upsert_hash', 'id'); step s1-commit: COMMIT; step s2-distribute-table: <... completed> create_distributed_table step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 5 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-update s2-upsert s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-update: UPDATE upsert_hash SET data = 'l' WHERE id = 4; step s2-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 5 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-delete s2-upsert s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-delete: DELETE FROM upsert_hash WHERE id = 4; step s2-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 5 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-truncate s2-upsert s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-truncate: TRUNCATE upsert_hash; step s2-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 1 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-drop s2-upsert s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-drop: DROP TABLE upsert_hash; step s2-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s1-commit: COMMIT; step s2-upsert: <... completed> error in steps s1-commit s2-upsert: ERROR: relation "upsert_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; ERROR: relation "upsert_hash" does not exist restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-create-index s2-upsert s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-create-index: CREATE INDEX upsert_hash_index ON upsert_hash(id); step s2-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); run_command_on_workers (localhost,57637,t,4) (localhost,57638,t,4) restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-create-index s1-begin s2-begin s1-ddl-drop-index s2-upsert s1-commit s2-commit s1-select-count s1-show-indexes create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-create-index: CREATE INDEX upsert_hash_index ON upsert_hash(id); step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-drop-index: DROP INDEX upsert_hash_index; step s2-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 5 step s1-show-indexes: SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); run_command_on_workers (localhost,57637,t,2) (localhost,57638,t,2) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-add-column s2-upsert s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-add-column: ALTER TABLE upsert_hash ADD new_column int DEFAULT 0; step s2-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-initialize s1-ddl-add-column s1-begin s2-begin s1-ddl-drop-column s2-upsert s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-ddl-add-column: ALTER TABLE upsert_hash ADD new_column int DEFAULT 0; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-drop-column: ALTER TABLE upsert_hash DROP new_column; step s2-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,"") (localhost,57638,t,"") restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-ddl-rename-column s2-upsert s1-commit s2-commit s1-select-count s1-show-columns create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-ddl-rename-column: ALTER TABLE upsert_hash RENAME data TO new_column; step s2-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s1-commit: COMMIT; step s2-upsert: <... completed> error in steps s1-commit s2-upsert: ERROR: column "data" of relation "upsert_hash" does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 5 step s1-show-columns: SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); run_command_on_workers (localhost,57637,t,new_column) (localhost,57638,t,new_column) restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-table-size s2-upsert s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-table-size: SELECT citus_total_relation_size('upsert_hash'); citus_total_relation_size 114688 step s2-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s1-commit: COMMIT; step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 5 restore_isolation_tester_func starting permutation: s1-initialize s1-begin s2-begin s1-master-modify-multiple-shards s2-upsert s1-commit s2-commit s1-select-count create_distributed_table step s1-initialize: COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; step s1-begin: BEGIN; step s2-begin: BEGIN; step s1-master-modify-multiple-shards: SELECT master_modify_multiple_shards('DELETE FROM upsert_hash;'); master_modify_multiple_shards 5 step s2-upsert: INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; step s1-commit: COMMIT; step s2-upsert: <... completed> step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM upsert_hash; count 1 restore_isolation_tester_func citus-7.0.3/src/test/regress/expected/multi_703_upgrade.out000066400000000000000000000022171317107136600236630ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 103000; -- tests that the upgrade from 7.0-2 to 7.0-3 properly migrates shard placements DROP EXTENSION citus; SET citus.enable_version_checks TO 'false'; CREATE EXTENSION citus VERSION '7.0-2'; INSERT INTO pg_dist_shard_placement (placementid, shardid, shardstate, shardlength, nodename, nodeport) VALUES (1, 1, 1, 0, 'localhost', :worker_1_port); -- if there are no worker nodes which match the shards this should fail ALTER EXTENSION citus UPDATE TO '7.0-3'; ERROR: There is no node at "localhost:57637" CONTEXT: PL/pgSQL function citus.find_groupid_for_node(text,integer) line 6 at RAISE -- if you add a matching worker the upgrade should succeed INSERT INTO pg_dist_node (nodename, nodeport, groupid) VALUES ('localhost', :worker_1_port, 1); ALTER EXTENSION citus UPDATE TO '7.0-3'; SELECT * FROM pg_dist_placement; placementid | shardid | shardstate | shardlength | groupid -------------+---------+------------+-------------+--------- 1 | 1 | 1 | 0 | 1 (1 row) -- reset and prepare for the rest of the tests DROP EXTENSION citus; CREATE EXTENSION citus; citus-7.0.3/src/test/regress/expected/multi_agg_approximate_distinct.out000066400000000000000000000135561317107136600267230ustar00rootroot00000000000000-- -- MULTI_AGG_APPROXIMATE_DISTINCT -- -- Create HLL extension if present, print false result otherwise SELECT CASE WHEN COUNT(*) > 0 THEN 'CREATE EXTENSION HLL' ELSE 'SELECT false AS hll_present' END AS create_cmd FROM pg_available_extensions() WHERE name = 'hll' \gset :create_cmd; \c - - - :worker_1_port :create_cmd; \c - - - :worker_2_port :create_cmd; \c - - - :master_port -- Try to execute count(distinct) when approximate distincts aren't enabled SELECT count(distinct l_orderkey) FROM lineitem; ERROR: cannot compute aggregate (distinct) DETAIL: table partitioning is unsuitable for aggregate (distinct) HINT: You can load the hll extension from contrib packages and enable distinct approximations. -- Check approximate count(distinct) at different precisions / error rates SET citus.count_distinct_error_rate = 0.1; SELECT count(distinct l_orderkey) FROM lineitem; count ------- 2612 (1 row) SET citus.count_distinct_error_rate = 0.01; SELECT count(distinct l_orderkey) FROM lineitem; count ------- 2967 (1 row) -- Check approximate count(distinct) for different data types SELECT count(distinct l_partkey) FROM lineitem; count ------- 11654 (1 row) SELECT count(distinct l_extendedprice) FROM lineitem; count ------- 11691 (1 row) SELECT count(distinct l_shipdate) FROM lineitem; count ------- 2483 (1 row) SELECT count(distinct l_comment) FROM lineitem; count ------- 11788 (1 row) -- Check that we can execute approximate count(distinct) on complex expressions SELECT count(distinct (l_orderkey * 2 + 1)) FROM lineitem; count ------- 2980 (1 row) SELECT count(distinct extract(month from l_shipdate)) AS my_month FROM lineitem; my_month ---------- 12 (1 row) SELECT count(distinct l_partkey) / count(distinct l_orderkey) FROM lineitem; ?column? ---------- 3 (1 row) -- Check that we can execute approximate count(distinct) on select queries that -- contain different filter, join, sort and limit clauses SELECT count(distinct l_orderkey) FROM lineitem WHERE octet_length(l_comment) + octet_length('randomtext'::text) > 40; count ------- 2355 (1 row) SELECT count(DISTINCT l_orderkey) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5; count ------- 835 (1 row) SELECT count(DISTINCT l_orderkey) as distinct_order_count, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY distinct_order_count ASC, l_quantity ASC LIMIT 10; distinct_order_count | l_quantity ----------------------+------------ 210 | 29.00 216 | 13.00 217 | 16.00 219 | 3.00 220 | 18.00 222 | 14.00 223 | 7.00 223 | 17.00 223 | 26.00 223 | 31.00 (10 rows) -- Check that approximate count(distinct) works at a table in a schema other than public -- create necessary objects CREATE SCHEMA test_count_distinct_schema; CREATE TABLE test_count_distinct_schema.nation_hash( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152) ); SELECT master_create_distributed_table('test_count_distinct_schema.nation_hash', 'n_nationkey', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('test_count_distinct_schema.nation_hash', 4, 2); master_create_worker_shards ----------------------------- (1 row) \copy test_count_distinct_schema.nation_hash FROM STDIN with delimiter '|'; SET search_path TO public; SET citus.count_distinct_error_rate TO 0.01; SELECT COUNT (DISTINCT n_regionkey) FROM test_count_distinct_schema.nation_hash; count ------- 3 (1 row) -- test with search_path is set SET search_path TO test_count_distinct_schema; SELECT COUNT (DISTINCT n_regionkey) FROM nation_hash; count ------- 3 (1 row) SET search_path TO public; -- If we have an order by on count(distinct) that we intend to push down to -- worker nodes, we need to error out. Otherwise, we are fine. SET citus.limit_clause_row_fetch_count = 1000; SELECT l_returnflag, count(DISTINCT l_shipdate) as count_distinct, count(*) as total FROM lineitem GROUP BY l_returnflag ORDER BY count_distinct LIMIT 10; ERROR: cannot approximate count(distinct) and order by it HINT: You might need to disable approximations for either count(distinct) or limit through configuration. SELECT l_returnflag, count(DISTINCT l_shipdate) as count_distinct, count(*) as total FROM lineitem GROUP BY l_returnflag ORDER BY total LIMIT 10; l_returnflag | count_distinct | total --------------+----------------+------- R | 1103 | 2901 A | 1108 | 2944 N | 1265 | 6155 (3 rows) SELECT l_orderkey, count(l_partkey) FILTER (WHERE l_shipmode = 'AIR'), count(DISTINCT l_partkey) FILTER (WHERE l_shipmode = 'AIR'), count(DISTINCT CASE WHEN l_shipmode = 'AIR' THEN l_partkey ELSE NULL END) FROM lineitem GROUP BY l_orderkey ORDER BY 2 DESC, 1 DESC LIMIT 10; l_orderkey | count | count | count ------------+-------+-------+------- 12005 | 4 | 4 | 4 5409 | 4 | 4 | 4 4964 | 4 | 4 | 4 14848 | 3 | 3 | 3 14496 | 3 | 3 | 3 13473 | 3 | 3 | 3 13122 | 3 | 3 | 3 12929 | 3 | 3 | 3 12645 | 3 | 3 | 3 12417 | 3 | 3 | 3 (10 rows) -- Check that we can revert config and disable count(distinct) approximations SET citus.count_distinct_error_rate = 0.0; SELECT count(distinct l_orderkey) FROM lineitem; ERROR: cannot compute aggregate (distinct) DETAIL: table partitioning is unsuitable for aggregate (distinct) HINT: You can load the hll extension from contrib packages and enable distinct approximations. citus-7.0.3/src/test/regress/expected/multi_agg_approximate_distinct_0.out000066400000000000000000000140711317107136600271330ustar00rootroot00000000000000-- -- MULTI_AGG_APPROXIMATE_DISTINCT -- -- Create HLL extension if present, print false result otherwise SELECT CASE WHEN COUNT(*) > 0 THEN 'CREATE EXTENSION HLL' ELSE 'SELECT false AS hll_present' END AS create_cmd FROM pg_available_extensions() WHERE name = 'hll' \gset :create_cmd; hll_present ------------- f (1 row) \c - - - :worker_1_port :create_cmd; hll_present ------------- f (1 row) \c - - - :worker_2_port :create_cmd; hll_present ------------- f (1 row) \c - - - :master_port -- Try to execute count(distinct) when approximate distincts aren't enabled SELECT count(distinct l_orderkey) FROM lineitem; ERROR: cannot compute aggregate (distinct) DETAIL: table partitioning is unsuitable for aggregate (distinct) HINT: You can load the hll extension from contrib packages and enable distinct approximations. -- Check approximate count(distinct) at different precisions / error rates SET citus.count_distinct_error_rate = 0.1; SELECT count(distinct l_orderkey) FROM lineitem; ERROR: cannot compute count (distinct) approximation HINT: You need to have the hll extension loaded. SET citus.count_distinct_error_rate = 0.01; SELECT count(distinct l_orderkey) FROM lineitem; ERROR: cannot compute count (distinct) approximation HINT: You need to have the hll extension loaded. -- Check approximate count(distinct) for different data types SELECT count(distinct l_partkey) FROM lineitem; ERROR: cannot compute count (distinct) approximation HINT: You need to have the hll extension loaded. SELECT count(distinct l_extendedprice) FROM lineitem; ERROR: cannot compute count (distinct) approximation HINT: You need to have the hll extension loaded. SELECT count(distinct l_shipdate) FROM lineitem; ERROR: cannot compute count (distinct) approximation HINT: You need to have the hll extension loaded. SELECT count(distinct l_comment) FROM lineitem; ERROR: cannot compute count (distinct) approximation HINT: You need to have the hll extension loaded. -- Check that we can execute approximate count(distinct) on complex expressions SELECT count(distinct (l_orderkey * 2 + 1)) FROM lineitem; ERROR: cannot compute count (distinct) approximation HINT: You need to have the hll extension loaded. SELECT count(distinct extract(month from l_shipdate)) AS my_month FROM lineitem; ERROR: cannot compute count (distinct) approximation HINT: You need to have the hll extension loaded. SELECT count(distinct l_partkey) / count(distinct l_orderkey) FROM lineitem; ERROR: cannot compute count (distinct) approximation HINT: You need to have the hll extension loaded. -- Check that we can execute approximate count(distinct) on select queries that -- contain different filter, join, sort and limit clauses SELECT count(distinct l_orderkey) FROM lineitem WHERE octet_length(l_comment) + octet_length('randomtext'::text) > 40; ERROR: cannot compute count (distinct) approximation HINT: You need to have the hll extension loaded. SELECT count(DISTINCT l_orderkey) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5; ERROR: cannot compute count (distinct) approximation HINT: You need to have the hll extension loaded. SELECT count(DISTINCT l_orderkey) as distinct_order_count, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY distinct_order_count ASC, l_quantity ASC LIMIT 10; ERROR: cannot compute count (distinct) approximation HINT: You need to have the hll extension loaded. -- Check that approximate count(distinct) works at a table in a schema other than public -- create necessary objects CREATE SCHEMA test_count_distinct_schema; CREATE TABLE test_count_distinct_schema.nation_hash( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152) ); SELECT master_create_distributed_table('test_count_distinct_schema.nation_hash', 'n_nationkey', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('test_count_distinct_schema.nation_hash', 4, 2); master_create_worker_shards ----------------------------- (1 row) \copy test_count_distinct_schema.nation_hash FROM STDIN with delimiter '|'; SET search_path TO public; SET citus.count_distinct_error_rate TO 0.01; SELECT COUNT (DISTINCT n_regionkey) FROM test_count_distinct_schema.nation_hash; ERROR: cannot compute count (distinct) approximation HINT: You need to have the hll extension loaded. -- test with search_path is set SET search_path TO test_count_distinct_schema; SELECT COUNT (DISTINCT n_regionkey) FROM nation_hash; ERROR: cannot compute count (distinct) approximation HINT: You need to have the hll extension loaded. SET search_path TO public; -- If we have an order by on count(distinct) that we intend to push down to -- worker nodes, we need to error out. Otherwise, we are fine. SET citus.limit_clause_row_fetch_count = 1000; SELECT l_returnflag, count(DISTINCT l_shipdate) as count_distinct, count(*) as total FROM lineitem GROUP BY l_returnflag ORDER BY count_distinct LIMIT 10; ERROR: cannot compute count (distinct) approximation HINT: You need to have the hll extension loaded. SELECT l_returnflag, count(DISTINCT l_shipdate) as count_distinct, count(*) as total FROM lineitem GROUP BY l_returnflag ORDER BY total LIMIT 10; ERROR: cannot compute count (distinct) approximation HINT: You need to have the hll extension loaded. SELECT l_orderkey, count(l_partkey) FILTER (WHERE l_shipmode = 'AIR'), count(DISTINCT l_partkey) FILTER (WHERE l_shipmode = 'AIR'), count(DISTINCT CASE WHEN l_shipmode = 'AIR' THEN l_partkey ELSE NULL END) FROM lineitem GROUP BY l_orderkey ORDER BY 2 DESC, 1 DESC LIMIT 10; ERROR: cannot compute count (distinct) approximation HINT: You need to have the hll extension loaded. -- Check that we can revert config and disable count(distinct) approximations SET citus.count_distinct_error_rate = 0.0; SELECT count(distinct l_orderkey) FROM lineitem; ERROR: cannot compute aggregate (distinct) DETAIL: table partitioning is unsuitable for aggregate (distinct) HINT: You can load the hll extension from contrib packages and enable distinct approximations. citus-7.0.3/src/test/regress/expected/multi_alter_table_add_constraints.out000066400000000000000000000523401317107136600273620ustar00rootroot00000000000000-- -- MULTI_ALTER_TABLE_ADD_CONSTRAINTS -- -- Test checks whether constraints of distributed tables can be adjusted using -- the ALTER TABLE ... ADD CONSTRAINT ... command. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1450000; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 1450000; -- Check "PRIMARY KEY CONSTRAINT" CREATE TABLE products ( product_no integer, name text, price numeric ); SELECT create_distributed_table('products', 'product_no'); create_distributed_table -------------------------- (1 row) -- Can only add primary key constraint on distribution column (or group of columns -- including distribution column) -- Command below should error out since 'name' is not a distribution column ALTER TABLE products ADD CONSTRAINT p_key PRIMARY KEY(name); ERROR: cannot create constraint on "products" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). ALTER TABLE products ADD CONSTRAINT p_key PRIMARY KEY(product_no); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' INSERT INTO products VALUES(1, 'product_1', 1); -- Should error out, since we are trying to add a new row having a value on p_key column -- conflicting with the existing row. INSERT INTO products VALUES(1, 'product_1', 1); ERROR: duplicate key value violates unique constraint "p_key_1450001" DETAIL: Key (product_no)=(1) already exists. CONTEXT: while executing command on localhost:57638 ALTER TABLE products DROP CONSTRAINT p_key; INSERT INTO products VALUES(1, 'product_1', 1); -- Can not create constraint since it conflicts with the existing data ALTER TABLE products ADD CONSTRAINT p_key PRIMARY KEY(product_no); ERROR: could not create unique index "p_key_1450001" DETAIL: Key (product_no)=(1) is duplicated. CONTEXT: while executing command on localhost:57637 DROP TABLE products; -- Check "PRIMARY KEY CONSTRAINT" with reference table CREATE TABLE products_ref ( product_no integer, name text, price numeric ); SELECT create_reference_table('products_ref'); create_reference_table ------------------------ (1 row) -- Can add PRIMARY KEY to any column ALTER TABLE products_ref ADD CONSTRAINT p_key PRIMARY KEY(name); ALTER TABLE products_ref DROP CONSTRAINT p_key; ALTER TABLE products_ref ADD CONSTRAINT p_key PRIMARY KEY(product_no); INSERT INTO products_ref VALUES(1, 'product_1', 1); -- Should error out, since we are trying to add new row having a value on p_key column -- conflicting with the existing row. INSERT INTO products_ref VALUES(1, 'product_1', 1); ERROR: duplicate key value violates unique constraint "p_key_1450032" DETAIL: Key (product_no)=(1) already exists. CONTEXT: while executing command on localhost:57637 DROP TABLE products_ref; -- Check "PRIMARY KEY CONSTRAINT" on append table CREATE TABLE products_append ( product_no integer, name text, price numeric ); SELECT create_distributed_table('products_append', 'product_no', 'append'); create_distributed_table -------------------------- (1 row) -- Can only add primary key constraint on distribution column (or group -- of columns including distribution column) -- Command below should error out since 'name' is not a distribution column ALTER TABLE products_append ADD CONSTRAINT p_key_name PRIMARY KEY(name); WARNING: table "products_append" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. ERROR: cannot create constraint on "products_append" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). ALTER TABLE products_append ADD CONSTRAINT p_key PRIMARY KEY(product_no); WARNING: table "products_append" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. --- Error out since first and third rows have the same product_no \COPY products_append FROM STDIN DELIMITER AS ','; ERROR: duplicate key value violates unique constraint "p_key_1450033" DETAIL: Key (product_no)=(1) already exists. DROP TABLE products_append; -- Check "UNIQUE CONSTRAINT" CREATE TABLE unique_test_table(id int, name varchar(20)); SELECT create_distributed_table('unique_test_table', 'id'); create_distributed_table -------------------------- (1 row) -- Can only add unique constraint on distribution column (or group -- of columns including distribution column) -- Command below should error out since 'name' is not a distribution column ALTER TABLE unique_test_table ADD CONSTRAINT unn_name UNIQUE(name); ERROR: cannot create constraint on "unique_test_table" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). ALTER TABLE unique_test_table ADD CONSTRAINT unn_id UNIQUE(id); -- Error out, since table can not have two rows with same id. INSERT INTO unique_test_table VALUES(1, 'Ahmet'); INSERT INTO unique_test_table VALUES(1, 'Mehmet'); ERROR: duplicate key value violates unique constraint "unn_id_1450035" DETAIL: Key (id)=(1) already exists. CONTEXT: while executing command on localhost:57638 ALTER TABLE unique_test_table DROP CONSTRAINT unn_id; -- Insert row which will conflict with the next unique constraint command INSERT INTO unique_test_table VALUES(1, 'Mehmet'); -- Can not create constraint since it conflicts with the existing data ALTER TABLE unique_test_table ADD CONSTRAINT unn_id UNIQUE(id); ERROR: could not create unique index "unn_id_1450035" DETAIL: Key (id)=(1) is duplicated. CONTEXT: while executing command on localhost:57637 -- Can create unique constraint over multiple columns which must include -- distribution column ALTER TABLE unique_test_table ADD CONSTRAINT unn_id_name UNIQUE(id, name); -- Error out, since tables can not have two rows with same id and name. INSERT INTO unique_test_table VALUES(1, 'Mehmet'); ERROR: duplicate key value violates unique constraint "unn_id_name_1450035" DETAIL: Key (id, name)=(1, Mehmet) already exists. CONTEXT: while executing command on localhost:57638 DROP TABLE unique_test_table; -- Check "UNIQUE CONSTRAINT" with reference table CREATE TABLE unique_test_table_ref(id int, name varchar(20)); SELECT create_reference_table('unique_test_table_ref'); create_reference_table ------------------------ (1 row) -- We can add unique constraint on any column with reference tables ALTER TABLE unique_test_table_ref ADD CONSTRAINT unn_name UNIQUE(name); ALTER TABLE unique_test_table_ref ADD CONSTRAINT unn_id UNIQUE(id); -- Error out. Since the table can not have two rows with the same id. INSERT INTO unique_test_table_ref VALUES(1, 'Ahmet'); INSERT INTO unique_test_table_ref VALUES(1, 'Mehmet'); ERROR: duplicate key value violates unique constraint "unn_id_1450066" DETAIL: Key (id)=(1) already exists. CONTEXT: while executing command on localhost:57637 -- We can add unique constraint with multiple columns ALTER TABLE unique_test_table_ref DROP CONSTRAINT unn_id; ALTER TABLE unique_test_table_ref ADD CONSTRAINT unn_id_name UNIQUE(id,name); -- Error out, since two rows can not have the same id or name. INSERT INTO unique_test_table_ref VALUES(1, 'Mehmet'); DROP TABLE unique_test_table_ref; -- Check "UNIQUE CONSTRAINT" with append table CREATE TABLE unique_test_table_append(id int, name varchar(20)); SELECT create_distributed_table('unique_test_table_append', 'id', 'append'); create_distributed_table -------------------------- (1 row) -- Can only add unique constraint on distribution column (or group -- of columns including distribution column) -- Command below should error out since 'name' is not a distribution column ALTER TABLE unique_test_table_append ADD CONSTRAINT unn_name UNIQUE(name); WARNING: table "unique_test_table_append" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. ERROR: cannot create constraint on "unique_test_table_append" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). ALTER TABLE unique_test_table_append ADD CONSTRAINT unn_id UNIQUE(id); WARNING: table "unique_test_table_append" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. -- Error out. Table can not have two rows with the same id. \COPY unique_test_table_append FROM STDIN DELIMITER AS ','; ERROR: duplicate key value violates unique constraint "unn_id_1450067" DETAIL: Key (id)=(1) already exists. DROP TABLE unique_test_table_append; -- Check "CHECK CONSTRAINT" CREATE TABLE products ( product_no integer, name text, price numeric, discounted_price numeric ); SELECT create_distributed_table('products', 'product_no'); create_distributed_table -------------------------- (1 row) -- Can add column and table check constraints ALTER TABLE products ADD CONSTRAINT p_check CHECK(price > 0); ALTER TABLE products ADD CONSTRAINT p_multi_check CHECK(price > discounted_price); -- First and third queries will error out, because of conflicts with p_check and -- p_multi_check, respectively. INSERT INTO products VALUES(1, 'product_1', -1, -2); ERROR: new row for relation "products_1450069" violates check constraint "p_check_1450069" DETAIL: Failing row contains (1, product_1, -1, -2). CONTEXT: while executing command on localhost:57638 INSERT INTO products VALUES(1, 'product_1', 5, 3); INSERT INTO products VALUES(1, 'product_1', 2, 3); ERROR: new row for relation "products_1450069" violates check constraint "p_multi_check_1450069" DETAIL: Failing row contains (1, product_1, 2, 3). CONTEXT: while executing command on localhost:57638 DROP TABLE products; -- Check "CHECK CONSTRAINT" with reference table CREATE TABLE products_ref ( product_no integer, name text, price numeric, discounted_price numeric ); SELECT create_reference_table('products_ref'); create_reference_table ------------------------ (1 row) -- Can add column and table check constraints ALTER TABLE products_ref ADD CONSTRAINT p_check CHECK(price > 0); ALTER TABLE products_ref ADD CONSTRAINT p_multi_check CHECK(price > discounted_price); -- First and third queries will error out, because of conflicts with p_check and -- p_multi_check, respectively. INSERT INTO products_ref VALUES(1, 'product_1', -1, -2); ERROR: new row for relation "products_ref_1450100" violates check constraint "p_check_1450100" DETAIL: Failing row contains (1, product_1, -1, -2). CONTEXT: while executing command on localhost:57637 INSERT INTO products_ref VALUES(1, 'product_1', 5, 3); INSERT INTO products_ref VALUES(1, 'product_1', 2, 3); ERROR: new row for relation "products_ref_1450100" violates check constraint "p_multi_check_1450100" DETAIL: Failing row contains (1, product_1, 2, 3). CONTEXT: while executing command on localhost:57637 DROP TABLE products_ref; -- Check "CHECK CONSTRAINT" with append table CREATE TABLE products_append ( product_no int, name varchar(20), price int, discounted_price int ); SELECT create_distributed_table('products_append', 'product_no', 'append'); create_distributed_table -------------------------- (1 row) -- Can add column and table check constraints ALTER TABLE products_append ADD CONSTRAINT p_check CHECK(price > 0); ALTER TABLE products_append ADD CONSTRAINT p_multi_check CHECK(price > discounted_price); -- Error out,since the third row conflicting with the p_multi_check \COPY products_append FROM STDIN DELIMITER AS ','; ERROR: new row for relation "products_append_1450101" violates check constraint "p_multi_check" DETAIL: Failing row contains (1, Product_3, 8, 10). DROP TABLE products_append; -- Check "EXCLUSION CONSTRAINT" CREATE TABLE products ( product_no integer, name text, price numeric ); SELECT create_distributed_table('products', 'product_no'); create_distributed_table -------------------------- (1 row) -- Can only add exclusion constraint on distribution column (or group of columns -- including distribution column) -- Command below should error out since 'name' is not a distribution column ALTER TABLE products ADD CONSTRAINT exc_name EXCLUDE USING btree (name with =); ERROR: cannot create constraint on "products" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). -- We can add composite exclusion ALTER TABLE products ADD CONSTRAINT exc_pno_name EXCLUDE USING btree (product_no with =, name with =); -- 4th command will error out since it conflicts with exc_pno_name constraint INSERT INTO products VALUES(1,'product_1', 5); INSERT INTO products VALUES(1,'product_2', 10); INSERT INTO products VALUES(2,'product_2', 5); INSERT INTO products VALUES(2,'product_2', 5); ERROR: conflicting key value violates exclusion constraint "exc_pno_name_1450126" DETAIL: Key (product_no, name)=(2, product_2) conflicts with existing key (product_no, name)=(2, product_2). CONTEXT: while executing command on localhost:57637 DROP TABLE products; -- Check "EXCLUSION CONSTRAINT" with reference table CREATE TABLE products_ref ( product_no integer, name text, price numeric ); SELECT create_reference_table('products_ref'); create_reference_table ------------------------ (1 row) -- We can add exclusion constraint on any column ALTER TABLE products_ref ADD CONSTRAINT exc_name EXCLUDE USING btree (name with =); -- We can add composite exclusion because none of pair of rows are conflicting ALTER TABLE products_ref ADD CONSTRAINT exc_pno_name EXCLUDE USING btree (product_no with =, name with =); -- Third insertion will error out, since it has the same name with second insertion INSERT INTO products_ref VALUES(1,'product_1', 5); INSERT INTO products_ref VALUES(1,'product_2', 10); INSERT INTO products_ref VALUES(2,'product_2', 5); ERROR: conflicting key value violates exclusion constraint "exc_name_1450134" DETAIL: Key (name)=(product_2) conflicts with existing key (name)=(product_2). CONTEXT: while executing command on localhost:57637 DROP TABLE products_ref; -- Check "EXCLUSION CONSTRAINT" with append table CREATE TABLE products_append ( product_no integer, name text, price numeric ); SELECT create_distributed_table('products_append', 'product_no','append'); create_distributed_table -------------------------- (1 row) -- Can only add exclusion constraint on distribution column (or group of column -- including distribution column) -- Command below should error out since 'name' is not a distribution column ALTER TABLE products_append ADD CONSTRAINT exc_name EXCLUDE USING btree (name with =); WARNING: table "products_append" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. ERROR: cannot create constraint on "products_append" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). ALTER TABLE products_append ADD CONSTRAINT exc_pno_name EXCLUDE USING btree (product_no with =, name with =); WARNING: table "products_append" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. -- Error out since first and third can not pass the exclusion check. \COPY products_append FROM STDIN DELIMITER AS ','; ERROR: conflicting key value violates exclusion constraint "exc_pno_name_1450135" DETAIL: Key (product_no, name)=(1, Product_1) conflicts with existing key (product_no, name)=(1, Product_1). DROP TABLE products_append; -- Check "NOT NULL" CREATE TABLE products ( product_no integer, name text, price numeric ); SELECT create_distributed_table('products', 'product_no'); create_distributed_table -------------------------- (1 row) ALTER TABLE products ALTER COLUMN name SET NOT NULL; -- Insertions will error out since both product_no and name can not have NULL value INSERT INTO products VALUES(1,NULL,5); ERROR: null value in column "name" violates not-null constraint DETAIL: Failing row contains (1, null, 5). CONTEXT: while executing command on localhost:57638 INSERT INTO products VALUES(NULL,'product_1', 5); ERROR: cannot perform an INSERT with NULL in the partition column DROP TABLE products; -- Check "NOT NULL" with reference table CREATE TABLE products_ref ( product_no integer, name text, price numeric ); SELECT create_reference_table('products_ref'); create_reference_table ------------------------ (1 row) ALTER TABLE products_ref ALTER COLUMN name SET NOT NULL; -- Insertions will error out since both product_no and name can not have NULL value INSERT INTO products_ref VALUES(1,NULL,5); ERROR: null value in column "name" violates not-null constraint DETAIL: Failing row contains (1, null, 5). CONTEXT: while executing command on localhost:57637 INSERT INTO products_ref VALUES(NULL,'product_1', 5); DROP TABLE products_ref; -- Check "NOT NULL" with append table CREATE TABLE products_append ( product_no integer, name text, price numeric ); SELECT create_distributed_table('products_append', 'product_no', 'append'); create_distributed_table -------------------------- (1 row) ALTER TABLE products_append ALTER COLUMN name SET NOT NULL; -- Error out since name and product_no columns can not handle NULL value. \COPY products_append FROM STDIN DELIMITER AS ','; DROP TABLE products_append; -- Tests for ADD CONSTRAINT is not only subcommand CREATE TABLE products ( product_no integer, name text, price numeric ); SELECT create_distributed_table('products', 'product_no'); create_distributed_table -------------------------- (1 row) -- Should error out since add constraint is not the single subcommand ALTER TABLE products ADD CONSTRAINT unn_1 UNIQUE(product_no, price), ADD CONSTRAINT unn_2 UNIQUE(product_no, name); ERROR: cannot execute ADD CONSTRAINT command with other subcommands HINT: You can issue each subcommand separately -- Tests for constraints without name -- Commands below should error out since constraints do not have the name ALTER TABLE products ADD UNIQUE(product_no); ERROR: cannot create constraint without a name on a distributed table ALTER TABLE products ADD PRIMARY KEY(product_no); ERROR: cannot create constraint without a name on a distributed table ALTER TABLE products ADD CHECK(product_no <> 0); ERROR: cannot create constraint without a name on a distributed table ALTER TABLE products ADD EXCLUDE USING btree (product_no with =); ERROR: cannot create constraint without a name on a distributed table DROP TABLE products; -- Tests with transactions CREATE TABLE products ( product_no integer, name text, price numeric, discounted_price numeric ); SELECT create_distributed_table('products', 'product_no'); create_distributed_table -------------------------- (1 row) BEGIN; INSERT INTO products VALUES(1,'product_1', 5); -- DDL should pick the right connections after a single INSERT ALTER TABLE products ADD CONSTRAINT unn_pno UNIQUE(product_no); ROLLBACK; BEGIN; -- Add constraints ALTER TABLE products ADD CONSTRAINT unn_pno UNIQUE(product_no); ALTER TABLE products ADD CONSTRAINT check_price CHECK(price > discounted_price); ALTER TABLE products ALTER COLUMN product_no SET NOT NULL; ALTER TABLE products ADD CONSTRAINT p_key_product PRIMARY KEY(product_no); INSERT INTO products VALUES(1,'product_1', 10, 8); ROLLBACK; -- There should be no constraint on master and worker(s) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='products'::regclass; Constraint | Definition ------------+------------ (0 rows) \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.products_1450202'::regclass; Constraint | Definition ------------+------------ (0 rows) \c - - - :master_port -- Tests to check the effect of rollback BEGIN; -- Add constraints (which will be rollbacked) ALTER TABLE products ADD CONSTRAINT unn_pno UNIQUE(product_no); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' ALTER TABLE products ADD CONSTRAINT check_price CHECK(price > discounted_price); ALTER TABLE products ADD CONSTRAINT p_key_product PRIMARY KEY(product_no); ROLLBACK; -- There should be no constraint on master and worker(s) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='products'::regclass; Constraint | Definition ------------+------------ (0 rows) \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.products_1450202'::regclass; Constraint | Definition ------------+------------ (0 rows) \c - - - :master_port DROP TABLE products; citus-7.0.3/src/test/regress/expected/multi_array_agg.out000066400000000000000000000155501317107136600236030ustar00rootroot00000000000000-- -- MULTI_ARRAY_AGG -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 520000; CREATE OR REPLACE FUNCTION array_sort (ANYARRAY) RETURNS ANYARRAY LANGUAGE SQL AS $$ SELECT ARRAY(SELECT unnest($1) ORDER BY 1) $$; -- Check multi_cat_agg() aggregate which is used to implement array_agg() SELECT array_cat_agg(i) FROM (VALUES (ARRAY[1,2]), (NULL), (ARRAY[3,4])) AS t(i); array_cat_agg --------------- {1,2,3,4} (1 row) -- Check that we don't support distinct and order by with array_agg() SELECT array_agg(distinct l_orderkey) FROM lineitem; ERROR: array_agg (distinct) is unsupported SELECT array_agg(l_orderkey ORDER BY l_partkey) FROM lineitem; ERROR: array_agg with order by is unsupported SELECT array_agg(distinct l_orderkey ORDER BY l_orderkey) FROM lineitem; ERROR: array_agg with order by is unsupported -- Check array_agg() for different data types and LIMIT clauses SELECT array_sort(array_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; array_sort -------------------------------------------------- {2132,15635,24027,63700,67310,155190} {106170} {4297,19036,29380,62143,128449,183095} {88035} {37531,108570,123927} {139636} {79251,94780,145243,151894,157238,163073,182052} {2743,11615,44161,82704,85811,197921} {33918,60519,61336,137469} {88362,89414,169544} (10 rows) SELECT array_sort(array_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; array_sort ----------------------------------------------------------------- {13309.60,21168.23,22824.48,28955.64,45983.16,49620.16} {44694.46} {2618.76,28733.64,32986.52,39890.88,46796.47,54058.05} {30690.90} {23678.55,50723.92,73426.50} {61998.31} {6476.15,11594.16,13608.60,31809.96,43058.75,73943.82,81639.88} {2210.32,6582.96,9159.66,47227.60,64605.44,79059.64} {7532.30,40217.23,47344.32,75928.31} {9681.24,17554.68,30875.02} (10 rows) SELECT array_sort(array_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; array_sort -------------------------------------------------------------------------------- {01-29-1996,01-30-1996,03-13-1996,03-30-1996,04-12-1996,04-21-1996} {01-28-1997} {10-29-1993,11-09-1993,12-04-1993,12-14-1993,01-16-1994,02-02-1994} {01-10-1996} {08-08-1994,10-16-1994,10-31-1994} {04-27-1992} {01-15-1996,01-16-1996,02-01-1996,02-10-1996,02-11-1996,03-21-1996,05-07-1996} {07-21-1995,08-04-1995,08-07-1995,08-14-1995,08-28-1995,10-23-1995} {10-29-1993,11-09-1993,12-09-1993,12-09-1993} {10-09-1998,10-23-1998,10-30-1998} (10 rows) SELECT array_sort(array_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; array_sort ---------------------------------------------------------------------------------------------- {"AIR ","FOB ","MAIL ","MAIL ","REG AIR ","TRUCK "} {"RAIL "} {"AIR ","FOB ","RAIL ","RAIL ","SHIP ","TRUCK "} {"REG AIR "} {"AIR ","AIR ","FOB "} {"TRUCK "} {"FOB ","FOB ","FOB ","FOB ","MAIL ","SHIP ","TRUCK "} {"AIR ","AIR ","AIR ","RAIL ","REG AIR ","TRUCK "} {"AIR ","MAIL ","MAIL ","TRUCK "} {"FOB ","FOB ","REG AIR "} (10 rows) -- Check that we can execute array_agg() within other functions SELECT array_length(array_agg(l_orderkey), 1) FROM lineitem; array_length -------------- 12000 (1 row) -- Check that we can execute array_agg() on select queries that hit multiple -- shards and contain different aggregates, filter clauses and other complex -- expressions. Note that the l_orderkey ranges are such that the matching rows -- lie in different shards. SELECT l_quantity, count(*), avg(l_extendedprice), array_sort(array_agg(l_orderkey)) FROM lineitem WHERE l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | count | avg | array_sort ------------+-------+-----------------------+-------------------------------------------------------------------------------------------------- 1.00 | 17 | 1477.1258823529411765 | {5543,5633,5634,5698,5766,5856,5857,5986,8997,9026,9158,9184,9220,9222,9348,9383,9476} 2.00 | 19 | 3078.4242105263157895 | {5506,5540,5573,5669,5703,5730,5798,5831,5893,5920,5923,9030,9058,9123,9124,9188,9344,9441,9476} 3.00 | 14 | 4714.0392857142857143 | {5509,5543,5605,5606,5827,9124,9157,9184,9223,9254,9349,9414,9475,9477} 4.00 | 19 | 5929.7136842105263158 | {5504,5507,5508,5511,5538,5764,5766,5826,5829,5862,5959,5985,9091,9120,9281,9347,9382,9440,9473} (4 rows) SELECT l_quantity, array_sort(array_agg(extract (month FROM o_orderdate))) AS my_month FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | my_month ------------+------------------------------------------------ 1.00 | {2,3,4,4,4,5,5,5,6,7,7,7,7,9,9,11,11} 2.00 | {1,3,5,5,5,5,6,6,6,7,7,8,10,10,11,11,11,12,12} 3.00 | {3,4,5,6,7,7,8,8,8,9,9,10,11,11} 4.00 | {1,1,1,2,2,2,5,5,6,6,6,6,8,9,10,10,11,11,12} (4 rows) SELECT l_quantity, array_sort(array_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE l_quantity < 5 AND octet_length(l_comment) + octet_length('randomtext'::text) > 40 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; l_quantity | array_sort ------------+--------------------------------------------- 1.00 | {11269,11397,11713,11715,11973,18317,18445} 2.00 | {11847,18061,18247,18953} 3.00 | {18249,18315,18699,18951,18955} 4.00 | {11653,11659,18241,18765} (4 rows) -- Check that we can execute array_agg() with an expression containing NULL values SELECT array_agg(case when l_quantity > 20 then l_quantity else NULL end) FROM lineitem WHERE l_orderkey < 10; array_agg -------------------------------------------------------------------------------------------------------------------------------------------------- {NULL,36.00,NULL,28.00,24.00,32.00,38.00,45.00,49.00,27.00,NULL,28.00,26.00,30.00,NULL,26.00,50.00,37.00,NULL,NULL,46.00,28.00,38.00,35.00,NULL} (1 row) -- Check that we return NULL in case there are no input rows to array_agg() SELECT array_agg(l_orderkey) FROM lineitem WHERE l_quantity < 0; array_agg ----------- (1 row) citus-7.0.3/src/test/regress/expected/multi_average_expression.out000066400000000000000000000050061317107136600255330ustar00rootroot00000000000000-- -- MULTI_AVERAGE_EXPRESSION_ORDER -- -- This test checks that the group-by columns don't need to be above an average -- expression, and can be anywhere in the projection order. This is in response -- to a bug we had due to the average expression introducing new columns. SELECT sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order, l_returnflag, l_linestatus FROM lineitem WHERE l_shipdate <= date '1998-12-01' - interval '90 days' GROUP BY l_returnflag, l_linestatus ORDER BY l_returnflag, l_linestatus; sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order | l_returnflag | l_linestatus -----------+----------------+----------------+------------------+---------------------+--------------------+------------------------+-------------+--------------+-------------- 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 | A | F 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 | N | F 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883 | N | O 73156.00 | 108937979.73 | 103516623.6698 | 107743533.784328 | 25.2175112030334367 | 37551.871675284385 | 0.04983798690106859704 | 2901 | R | F (4 rows) -- These tests check that distributed averages only consider non-null input -- values. This is in response to a bug we had due to the distributed average -- using sum(expression)/count(*) to calculate avg(expression). We now use the -- correct form sum(expression)/count(expression) for average calculations. -- Run avg() on an expression that contains some null values SELECT avg(case when l_quantity > 20 then l_quantity end) FROM lineitem; avg --------------------- 35.3570440077497924 (1 row) -- Run avg() on an expression that contains only null values SELECT avg(case when l_quantity > 5000 then l_quantity end) FROM lineitem; avg ----- (1 row) citus-7.0.3/src/test/regress/expected/multi_basic_queries.out000066400000000000000000000013711317107136600244610ustar00rootroot00000000000000-- -- MULTI_BASIC_QUERIES -- -- Execute simple sum, average, and count queries on data recently uploaded to -- our partitioned table. SELECT count(*) FROM lineitem; count ------- 12000 (1 row) SELECT sum(l_extendedprice) FROM lineitem; sum -------------- 457702024.50 (1 row) SELECT avg(l_extendedprice) FROM lineitem; avg -------------------- 38141.835375000000 (1 row) -- Verify that we can do queries in read-only mode BEGIN; SET TRANSACTION READ ONLY; SELECT count(*) FROM lineitem; count ------- 12000 (1 row) COMMIT; -- Verify temp tables which are used for final result aggregation don't persist. SELECT count(*) FROM pg_class WHERE relname LIKE 'pg_merge_job_%' AND relkind = 'r'; count ------- 0 (1 row) citus-7.0.3/src/test/regress/expected/multi_behavioral_analytics_basics.out000066400000000000000000000373141317107136600273600ustar00rootroot00000000000000------------------------------------ ------------------------------------ -- Vanilla funnel query ------------------------------------ ------------------------------------ INSERT INTO agg_results (user_id, value_1_agg) SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg -------+-------+--------------------- 5 | 5 | 15.6000000000000000 (1 row) ------------------------------------ ------------------------------------ -- Funnel grouped by whether or not a user has done an event ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results (user_id, value_1_agg, value_2_agg ) SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event) FROM ( SELECT t1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(hasdone_event, 'Has not done event') AS hasdone_event FROM ( ( SELECT u.user_id, 'step=>1'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) UNION ( SELECT u.user_id, 'step=>2'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (103, 104, 105) ) ) t1 LEFT JOIN ( SELECT DISTINCT user_id, 'Has done event'::TEXT AS hasdone_event FROM events_table AS e WHERE e.user_id >= 10 AND e.user_id <= 25 AND e.event_type IN (106, 107, 108) ) t2 ON (t1.user_id = t2.user_id) GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg -------+-------+--------------------- 8 | 8 | 16.1250000000000000 (1 row) ------------------------------------ ------------------------------------ -- Funnel, grouped by the number of times a user has done an event ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results (user_id, value_1_agg, value_2_agg) SELECT user_id, avg(array_length(events_table, 1)) AS event_average, count_pay FROM ( SELECT subquery_1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(count_pay, 0) AS count_pay FROM ( (SELECT users_table.user_id, 'action=>1'AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 10 AND events_table.event_type < 12 ) UNION (SELECT users_table.user_id, 'action=>2'AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 12 AND events_table.event_type < 14 ) ) AS subquery_1 LEFT JOIN (SELECT user_id, COUNT(*) AS count_pay FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 15 AND users_table.value_1 < 17 GROUP BY user_id HAVING COUNT(*) > 1) AS subquery_2 ON subquery_1.user_id = subquery_2.user_id GROUP BY subquery_1.user_id, count_pay) AS subquery_top WHERE array_ndims(events_table) > 0 GROUP BY count_pay, user_id ORDER BY count_pay; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg -------+-------+--------------------- 8 | 8 | 45.0000000000000000 (1 row) ------------------------------------ ------------------------------------ -- Most recently seen users_table events_table ------------------------------------ -- Note that we don't use ORDER BY/LIMIT yet ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results (user_id, agg_time, value_2_agg) SELECT user_id, user_lastseen, array_length(event_array, 1) FROM ( SELECT user_id, max(u.time) as user_lastseen, array_agg(event_type ORDER BY u.time) AS event_array FROM ( SELECT user_id, time FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 10 AND users_table.value_1 < 12 ) u LEFT JOIN LATERAL ( SELECT event_type, time FROM events_table WHERE user_id = u.user_id AND events_table.event_type > 10 AND events_table.event_type < 12 ) t ON true GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg -------+-------+--------------------- 6 | 6 | 42.0000000000000000 (1 row) ------------------------------------ ------------------------------------ -- Count the number of distinct users_table who are in segment X and Y and Z ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results (user_id) SELECT DISTINCT user_id FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 10 AND value_1 <= 20) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 30 AND value_1 <= 40) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 50 AND value_1 <= 60); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg -------+-------+--------------------- 33 | 33 | 50.3939393939393939 (1 row) ------------------------------------ ------------------------------------ -- Count the number of distinct users_table who are in at least two of X and Y and Z segments ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id) SELECT user_id FROM users_table WHERE (value_1 = 10 OR value_1 = 11 OR value_1 = 12) GROUP BY user_id HAVING count(distinct value_1) >= 2; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg -------+-------+--------------------- 4 | 4 | 51.0000000000000000 (1 row) ------------------------------------ ------------------------------------ -- Find customers who have done X, and satisfy other customer specific criteria ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 101 AND value_1 < 110 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type>101 AND event_type < 110 AND value_3 > 100 AND user_id=users_table.user_id); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg -------+-------+--------------------- 34 | 27 | 40.5588235294117647 (1 row) ------------------------------------ ------------------------------------ -- Customers who haven’t done X, and satisfy other customer specific criteria ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 = 101 AND value_2 >= 5 AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type=101 AND value_3 > 100 AND user_id=users_table.user_id); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg -------+-------+--------------------- 8 | 7 | 39.7500000000000000 (1 row) ------------------------------------ ------------------------------------ -- Customers who have done X and Y, and satisfy other customer specific criteria ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 100 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type!=100 AND value_3 > 100 AND user_id=users_table.user_id) AND EXISTS (SELECT user_id FROM events_table WHERE event_type=101 AND value_3 > 100 AND user_id=users_table.user_id); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg -------+-------+--------------------- 1202 | 14 | 47.7462562396006656 (1 row) ------------------------------------ ------------------------------------ -- Customers who have done X and haven’t done Y, and satisfy other customer specific criteria ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type <= 300 AND value_3 > 100 AND user_id=users_table.user_id) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 300 AND event_type <= 350 AND value_3 > 100 AND user_id=users_table.user_id); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg -------+-------+--------------------- 205 | 2 | 55.2195121951219512 (1 row) ------------------------------------ ------------------------------------ -- Customers who have done X more than 2 times, and satisfy other customer specific criteria ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 100 AND value_1 < 124 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type < 124 AND value_3 > 100 AND user_id = users_table.user_id GROUP BY user_id HAVING Count(*) > 2); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg -------+-------+--------------------- 78 | 34 | 52.4230769230769231 (1 row) ------------------------------------ ------------------------------------ -- Find me all users_table who logged in more than once ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_1_agg) SELECT user_id, value_1 from ( SELECT user_id, value_1 From users_table WHERE value_2 > 100 and user_id = 15 GROUP BY value_1, user_id HAVING count(*) > 1 ) as a; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg -------+-------+--------------------- 6 | 1 | 15.0000000000000000 (1 row) ------------------------------------ ------------------------------------ -- Find me all users_table who has done some event and has filters ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id) Select user_id From events_table Where event_type = 16 And value_2 > 50 And user_id in (select user_id From users_table Where value_1 = 15 And value_2 > 25); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg -------+-------+--------------------- 2 | 2 | 30.0000000000000000 (1 row) ------------------------------------ ------------------------------------ -- Which events_table did people who has done some specific events_table ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_1_agg) SELECT user_id, event_type FROM events_table WHERE user_id in (SELECT user_id from events_table WHERE event_type > 500 and event_type < 505) GROUP BY user_id, event_type; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg -------+-------+--------------------- 3084 | 32 | 44.1498054474708171 (1 row) ------------------------------------ ------------------------------------ -- Find me all the users_table who has done some event more than three times ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id) select user_id from ( select user_id from events_table where event_type = 901 group by user_id having count(*) > 3 ) as a; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg -------+-------+--------------------- 1 | 1 | 57.0000000000000000 (1 row) ------------------------------------ ------------------------------------ -- Find my assets that have the highest probability and fetch their metadata ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_1_agg, value_3_agg) SELECT users_table.user_id, users_table.value_1, prob FROM users_table JOIN (SELECT ma.user_id, (GREATEST(coalesce(ma.value_4 / 250, 0.0) + GREATEST(1.0))) / 2 AS prob FROM users_table AS ma, events_table as short_list WHERE short_list.user_id = ma.user_id and ma.value_1 < 50 and short_list.event_type < 50 ) temp ON users_table.user_id = temp.user_id WHERE users_table.value_1 < 50; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; count | count | avg -------+-------+--------------------- 14371 | 101 | 50.5232064574490293 (1 row) citus-7.0.3/src/test/regress/expected/multi_behavioral_analytics_single_shard_queries.out000066400000000000000000000401711317107136600323060ustar00rootroot00000000000000------------------------------------ ------------------------------------ -- Vanilla funnel query -- single shard ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, value_1_agg) SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q WHERE user_id = 20; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg -------+-------+--------------------- 1 | 1 | 20.0000000000000000 (1 row) ------------------------------------ ------------------------------------ -- Vanilla funnel query -- two shards ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, value_1_agg) SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND (u.user_id = 13 OR u.user_id = 20) AND (e.user_id = 13 OR e.user_id = 20) AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q WHERE (user_id = 13 OR user_id = 20); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg -------+-------+--------------------- 2 | 2 | 16.5000000000000000 (1 row) ------------------------------------ ------------------------------------ -- Funnel grouped by whether or not a user has done an event -- single shard query ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, value_1_agg, value_2_agg ) SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event) FROM ( SELECT t1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(hasdone_event, 'Has not done event') AS hasdone_event FROM ( ( SELECT u.user_id, 'step=>1'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) UNION ( SELECT u.user_id, 'step=>2'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (103, 104, 105) ) ) t1 LEFT JOIN ( SELECT DISTINCT user_id, 'Has done event'::TEXT AS hasdone_event FROM events_table AS e WHERE e.user_id >= 10 AND e.user_id <= 25 AND e.event_type IN (106, 107, 108) ) t2 ON (t1.user_id = t2.user_id) WHERE t1.user_id = 20 GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event; ------------------------------------ ------------------------------------ -- Funnel grouped by whether or not a user has done an event -- two shards query ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, value_1_agg, value_2_agg ) SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event) FROM ( SELECT t1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(hasdone_event, 'Has not done event') AS hasdone_event FROM ( ( SELECT u.user_id, 'step=>1'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND (e.user_id = 20 OR e.user_id = 17) AND e.event_type IN (100, 101, 102) ) UNION ( SELECT u.user_id, 'step=>2'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND (e.user_id = 20 OR e.user_id = 17) AND e.event_type IN (103, 104, 105) ) ) t1 LEFT JOIN ( SELECT DISTINCT user_id, 'Has done event'::TEXT AS hasdone_event FROM events_table AS e WHERE (e.user_id = 20 OR e.user_id = 17) AND e.event_type IN (106, 107, 108) ) t2 ON (t1.user_id = t2.user_id) WHERE (t1.user_id = 20 OR t1.user_id = 17) GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg -------+-------+--------------------- 2 | 2 | 18.5000000000000000 (1 row) ------------------------------------ ------------------------------------ -- Most recently seen users_table events_table -- single shard query ------------------------------------ -- Note that we don't use ORDER BY/LIMIT yet ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, agg_time, value_2_agg) SELECT user_id, user_lastseen, array_length(event_array, 1) FROM ( SELECT user_id, max(u.time) as user_lastseen, array_agg(event_type ORDER BY u.time) AS event_array FROM ( SELECT user_id, time FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 10 AND users_table.value_1 < 12 ) u LEFT JOIN LATERAL ( SELECT event_type, time FROM events_table WHERE user_id = u.user_id AND events_table.event_type > 10 AND events_table.event_type < 12 ) t ON true WHERE user_id = 65 GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg -------+-------+--------------------- 1 | 1 | 65.0000000000000000 (1 row) ------------------------------------ ------------------------------------ -- Most recently seen users_table events_table -- two shards query ------------------------------------ -- Note that we don't use ORDER BY/LIMIT yet ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, agg_time, value_2_agg) SELECT user_id, user_lastseen, array_length(event_array, 1) FROM ( SELECT user_id, max(u.time) as user_lastseen, array_agg(event_type ORDER BY u.time) AS event_array FROM ( SELECT user_id, time FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND (user_id = 65 OR user_id = 12) AND users_table.value_1 > 10 AND users_table.value_1 < 12 ) u LEFT JOIN LATERAL ( SELECT event_type, time FROM events_table WHERE user_id = u.user_id AND (user_id = 65 OR user_id = 12) AND events_table.event_type > 10 AND events_table.event_type < 12 ) t ON true WHERE (user_id = 65 OR user_id = 12) GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg -------+-------+--------------------- 2 | 2 | 38.5000000000000000 (1 row) ------------------------------------ ------------------------------------ -- Count the number of distinct users_table who are in segment X and Y and Z -- single shard ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id) SELECT DISTINCT user_id FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 10 AND value_1 <= 20) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 30 AND value_1 <= 40) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 50 AND value_1 <= 60) AND user_id = 7; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg -------+-------+-------------------- 1 | 1 | 7.0000000000000000 (1 row) ------------------------------------ ------------------------------------ -- Count the number of distinct users_table who are in segment X and Y and Z -- two shards ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id) SELECT DISTINCT user_id FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 10 AND value_1 <= 20 AND (user_id = 7 OR user_id = 20)) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 30 AND value_1 <= 40 AND (user_id = 7 OR user_id = 20)) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 50 AND value_1 <= 60 AND (user_id = 7 OR user_id = 20)) AND (user_id = 7 OR user_id = 20); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg -------+-------+--------------------- 2 | 2 | 13.5000000000000000 (1 row) ------------------------------------ ------------------------------------ -- Find customers who have done X, and satisfy other customer specific criteria -- single shard ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 101 AND value_1 < 110 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type>101 AND event_type < 110 AND value_3 > 100 AND user_id=users_table.user_id) AND user_id = 61; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg -------+-------+--------------------- 1 | 1 | 61.0000000000000000 (1 row) ------------------------------------ ------------------------------------ -- Find customers who have done X, and satisfy other customer specific criteria -- two shards ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 101 AND value_1 < 110 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type>101 AND event_type < 110 AND value_3 > 100 AND (user_id = 61 OR user_id = 51) AND user_id=users_table.user_id) AND (user_id = 61 OR user_id = 51); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg -------+-------+--------------------- 2 | 2 | 56.0000000000000000 (1 row) ------------------------------------ ------------------------------------ -- Customers who have done X and haven’t done Y, and satisfy other customer specific criteria -- single shard ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_2 >= 5 AND user_id = 96 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type <= 300 AND value_3 > 100 AND user_id=users_table.user_id) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 300 AND event_type <= 350 AND value_3 > 100 AND user_id=users_table.user_id); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg -------+-------+--------------------- 110 | 1 | 96.0000000000000000 (1 row) ------------------------------------ ------------------------------------ -- Customers who have done X and haven’t done Y, and satisfy other customer specific criteria -- two shards ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_2 >= 5 AND (user_id = 96 OR user_id = 8) AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type <= 300 AND value_3 > 100 AND user_id=users_table.user_id AND (user_id = 96 OR user_id = 8)) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 300 AND event_type <= 350 AND value_3 > 100 AND user_id=users_table.user_id AND (user_id = 96 OR user_id = 8)); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg -------+-------+--------------------- 205 | 2 | 55.2195121951219512 (1 row) ------------------------------------ ------------------------------------ -- Customers who have done X more than 2 times, and satisfy other customer specific criteria -- single shard ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 100 AND value_1 < 124 AND value_2 >= 5 AND user_id = 47 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type < 124 AND value_3 > 100 AND user_id = users_table.user_id AND user_id = 47 GROUP BY user_id HAVING Count(*) > 2); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg -------+-------+--------------------- 6 | 1 | 47.0000000000000000 (1 row) ------------------------------------ ------------------------------------ -- Customers who have done X more than 2 times, and satisfy other customer specific criteria -- two shards ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 100 AND value_1 < 124 AND value_2 >= 5 AND (user_id = 47 or user_id = 81) AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type < 124 AND value_3 > 100 AND user_id = users_table.user_id AND (user_id = 47 or user_id = 81) GROUP BY user_id HAVING Count(*) > 2); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; count | count | avg -------+-------+--------------------- 7 | 2 | 51.8571428571428571 (1 row) citus-7.0.3/src/test/regress/expected/multi_binary_master_copy_format.out000066400000000000000000000012601317107136600271010ustar00rootroot00000000000000-- -- MULTI_BINARY_MASTER_COPY -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 430000; -- Try binary master copy for different executors SET citus.binary_master_copy_format TO 'on'; SET citus.task_executor_type TO 'task-tracker'; SELECT count(*) FROM lineitem; count ------- 12000 (1 row) SELECT l_shipmode FROM lineitem WHERE l_partkey = 67310 OR l_partkey = 155190; l_shipmode ------------ TRUCK MAIL (2 rows) SET citus.task_executor_type TO 'real-time'; SELECT count(*) FROM lineitem; count ------- 12000 (1 row) SELECT l_shipmode FROM lineitem WHERE l_partkey = 67310 OR l_partkey = 155190; l_shipmode ------------ TRUCK MAIL (2 rows) citus-7.0.3/src/test/regress/expected/multi_cache_invalidation.out000066400000000000000000000015131317107136600254450ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1601000; CREATE TABLE tab9 (test_id integer NOT NULL, data int); CREATE TABLE tab10 (test_id integer NOT NULL, data int); SELECT master_create_distributed_table('tab9', 'test_id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_distributed_table('tab10', 'test_id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('tab9', 1, 1); master_create_worker_shards ----------------------------- (1 row) TRUNCATE tab9; UPDATE pg_dist_shard SET logicalrelid = 'tab10'::regclass WHERE logicalrelid = 'tab9'::regclass; TRUNCATE tab10; ERROR: cached metadata for shard 1601000 is inconsistent HINT: Reconnect and try again. DROP TABLE tab9; DROP TABLE tab10; citus-7.0.3/src/test/regress/expected/multi_citus_tools.out000066400000000000000000000542471317107136600242240ustar00rootroot00000000000000-- -- MULTI CITUS TOOLS -- -- tests UDFs created for citus tools -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000; -- test with invalid port, prevent OS dependent warning from being displayed SET client_min_messages to ERROR; -- PG 9.5 does not show context for plpgsql raise -- message whereas PG 9.6 shows. disabling it -- for this test only to have consistent behavior -- b/w PG 9.6+ and PG 9.5. \set SHOW_CONTEXT never SELECT * FROM master_run_on_worker(ARRAY['localhost']::text[], ARRAY['666']::int[], ARRAY['select count(*) from pg_dist_shard']::text[], false); node_name | node_port | success | result -----------+-----------+---------+------------------------------------ localhost | 666 | f | failed to connect to localhost:666 (1 row) SELECT * FROM master_run_on_worker(ARRAY['localhost']::text[], ARRAY['666']::int[], ARRAY['select count(*) from pg_dist_shard']::text[], true); node_name | node_port | success | result -----------+-----------+---------+------------------------------------ localhost | 666 | f | failed to connect to localhost:666 (1 row) RESET client_min_messages; -- store worker node name and port SELECT quote_literal(node_name) as node_name, node_port as node_port FROM master_get_active_worker_nodes() ORDER BY node_port LIMIT 1 \gset -- connect to the first worker and ask for shard count, should return 0 SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from pg_dist_shard']::text[], false); node_name | node_port | success | result -----------+-----------+---------+-------- localhost | 57637 | t | 0 (1 row) -- connect to the first worker and ask for shards, should fail with -- expecting a single column error SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select * from pg_dist_shard']::text[], false); node_name | node_port | success | result -----------+-----------+---------+------------------------------------------ localhost | 57637 | f | expected a single column in query target (1 row) -- query result may only contain a single row SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select a from generate_series(1,2) a']::text[], false); node_name | node_port | success | result -----------+-----------+---------+--------------------------------------- localhost | 57637 | f | expected a single row in query result (1 row) -- send multiple queries SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY[:node_port, :node_port]::int[], ARRAY['select a from generate_series(1,1) a', 'select a from generate_series(2,2) a']::text[], false); node_name | node_port | success | result -----------+-----------+---------+-------- localhost | 57637 | t | 1 localhost | 57637 | t | 2 (2 rows) -- send multiple queries, one fails SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY[:node_port, :node_port]::int[], ARRAY['select a from generate_series(1,1) a', 'select a from generate_series(1,2) a']::text[], false); node_name | node_port | success | result -----------+-----------+---------+--------------------------------------- localhost | 57637 | t | 1 localhost | 57637 | f | expected a single row in query result (2 rows) -- send multiple queries, both fail SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY[:node_port, :node_port]::int[], ARRAY['select a from generate_series(1,2) a', 'select a from generate_series(1,2) a']::text[], false); node_name | node_port | success | result -----------+-----------+---------+--------------------------------------- localhost | 57637 | f | expected a single row in query result localhost | 57637 | f | expected a single row in query result (2 rows) -- can create tables at worker SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY[:node_port, :node_port]::int[], ARRAY['create table first_table(a int, b int)', 'create table second_table(a int, b int)']::text[], false); node_name | node_port | success | result -----------+-----------+---------+-------------- localhost | 57637 | t | CREATE TABLE localhost | 57637 | t | CREATE TABLE (2 rows) -- can insert into table SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into first_table select a,a from generate_series(1,20) a']::text[], false); node_name | node_port | success | result -----------+-----------+---------+------------- localhost | 57637 | t | INSERT 0 20 (1 row) SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from first_table']::text[], false); node_name | node_port | success | result -----------+-----------+---------+-------- localhost | 57637 | t | 20 (1 row) -- insert into second table twice SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into second_table select * from first_table']::text[], false); node_name | node_port | success | result -----------+-----------+---------+------------- localhost | 57637 | t | INSERT 0 20 (1 row) SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into second_table select * from first_table']::text[], false); node_name | node_port | success | result -----------+-----------+---------+------------- localhost | 57637 | t | INSERT 0 20 (1 row) -- check inserted values at second table SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from second_table']::text[], false); node_name | node_port | success | result -----------+-----------+---------+-------- localhost | 57637 | t | 40 (1 row) -- store worker node name and port again -- previously set variables become unusable after some number of uses SELECT quote_literal(node_name) as node_name, node_port as node_port FROM master_get_active_worker_nodes() ORDER BY node_port LIMIT 1 \gset -- create index on tables SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['create index first_table_index on first_table(a)']::text[], false); node_name | node_port | success | result -----------+-----------+---------+-------------- localhost | 57637 | t | CREATE INDEX (1 row) -- drop created tables SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['drop table first_table']::text[], false); node_name | node_port | success | result -----------+-----------+---------+------------ localhost | 57637 | t | DROP TABLE (1 row) SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['drop table second_table']::text[], false); node_name | node_port | success | result -----------+-----------+---------+------------ localhost | 57637 | t | DROP TABLE (1 row) -- verify table is dropped SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from second_table']::text[], false); node_name | node_port | success | result -----------+-----------+---------+------------------------------------------------ localhost | 57637 | f | ERROR: relation "second_table" does not exist (1 row) -- -- Run the same tests in parallel -- -- connect to the first worker and ask for shard count, should return 0 SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from pg_dist_shard']::text[], true); node_name | node_port | success | result -----------+-----------+---------+-------- localhost | 57637 | t | 0 (1 row) -- connect to the first worker and ask for shards, should fail with -- expecting a single column error SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select * from pg_dist_shard']::text[], true); node_name | node_port | success | result -----------+-----------+---------+------------------------------------------ localhost | 57637 | f | expected a single column in query target (1 row) -- query result may only contain a single row SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select a from generate_series(1,2) a']::text[], true); node_name | node_port | success | result -----------+-----------+---------+--------------------------------------- localhost | 57637 | f | expected a single row in query result (1 row) -- send multiple queries SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY[:node_port, :node_port]::int[], ARRAY['select a from generate_series(1,1) a', 'select a from generate_series(2,2) a']::text[], true); node_name | node_port | success | result -----------+-----------+---------+-------- localhost | 57637 | t | 1 localhost | 57637 | t | 2 (2 rows) -- send multiple queries, one fails SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY[:node_port, :node_port]::int[], ARRAY['select a from generate_series(1,1) a', 'select a from generate_series(1,2) a']::text[], true); node_name | node_port | success | result -----------+-----------+---------+--------------------------------------- localhost | 57637 | t | 1 localhost | 57637 | f | expected a single row in query result (2 rows) -- send multiple queries, both fail SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY[:node_port, :node_port]::int[], ARRAY['select a from generate_series(1,2) a', 'select a from generate_series(1,2) a']::text[], true); node_name | node_port | success | result -----------+-----------+---------+--------------------------------------- localhost | 57637 | f | expected a single row in query result localhost | 57637 | f | expected a single row in query result (2 rows) -- can create tables at worker SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY[:node_port, :node_port]::int[], ARRAY['create table first_table(a int, b int)', 'create table second_table(a int, b int)']::text[], true); node_name | node_port | success | result -----------+-----------+---------+-------------- localhost | 57637 | t | CREATE TABLE localhost | 57637 | t | CREATE TABLE (2 rows) -- store worker node name and port again -- previously set variables become unusable after some number of uses SELECT quote_literal(node_name) as node_name, node_port as node_port FROM master_get_active_worker_nodes() ORDER BY node_port LIMIT 1 \gset -- can insert into table SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into first_table select a,a from generate_series(1,20) a']::text[], true); node_name | node_port | success | result -----------+-----------+---------+------------- localhost | 57637 | t | INSERT 0 20 (1 row) SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from first_table']::text[], true); node_name | node_port | success | result -----------+-----------+---------+-------- localhost | 57637 | t | 20 (1 row) -- insert into second table twice SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into second_table select * from first_table']::text[], true); node_name | node_port | success | result -----------+-----------+---------+------------- localhost | 57637 | t | INSERT 0 20 (1 row) SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into second_table select * from first_table']::text[], true); node_name | node_port | success | result -----------+-----------+---------+------------- localhost | 57637 | t | INSERT 0 20 (1 row) -- check inserted values at second table SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from second_table']::text[], true); node_name | node_port | success | result -----------+-----------+---------+-------- localhost | 57637 | t | 40 (1 row) -- create index on tables SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['create index first_table_index on first_table(a)']::text[], true); node_name | node_port | success | result -----------+-----------+---------+-------------- localhost | 57637 | t | CREATE INDEX (1 row) -- drop created tables SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['drop table first_table']::text[], true); node_name | node_port | success | result -----------+-----------+---------+------------ localhost | 57637 | t | DROP TABLE (1 row) SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['drop table second_table']::text[], true); node_name | node_port | success | result -----------+-----------+---------+------------ localhost | 57637 | t | DROP TABLE (1 row) -- verify table is dropped SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from second_table']::text[], true); node_name | node_port | success | result -----------+-----------+---------+------------------------------------------------ localhost | 57637 | f | ERROR: relation "second_table" does not exist (1 row) -- run_command_on_XXX tests SELECT * FROM run_command_on_workers('select 1') ORDER BY 2 ASC; nodename | nodeport | success | result -----------+----------+---------+-------- localhost | 57637 | t | 1 localhost | 57638 | t | 1 (2 rows) SELECT * FROM run_command_on_workers('select count(*) from pg_dist_partition') ORDER BY 2 ASC; nodename | nodeport | success | result -----------+----------+---------+-------- localhost | 57637 | t | 0 localhost | 57638 | t | 0 (2 rows) -- make sure run_on_all_placements respects shardstate CREATE TABLE check_placements (key int); SELECT master_create_distributed_table('check_placements', 'key', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('check_placements', 5, 2); master_create_worker_shards ----------------------------- (1 row) SELECT * FROM run_command_on_placements('check_placements', 'select 1'); nodename | nodeport | shardid | success | result -----------+----------+---------+---------+-------- localhost | 57637 | 1240000 | t | 1 localhost | 57638 | 1240000 | t | 1 localhost | 57637 | 1240001 | t | 1 localhost | 57638 | 1240001 | t | 1 localhost | 57637 | 1240002 | t | 1 localhost | 57638 | 1240002 | t | 1 localhost | 57637 | 1240003 | t | 1 localhost | 57638 | 1240003 | t | 1 localhost | 57637 | 1240004 | t | 1 localhost | 57638 | 1240004 | t | 1 (10 rows) UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid % 2 = 0 AND nodeport = :worker_1_port; SELECT * FROM run_command_on_placements('check_placements', 'select 1'); nodename | nodeport | shardid | success | result -----------+----------+---------+---------+-------- localhost | 57638 | 1240000 | t | 1 localhost | 57637 | 1240001 | t | 1 localhost | 57638 | 1240001 | t | 1 localhost | 57638 | 1240002 | t | 1 localhost | 57637 | 1240003 | t | 1 localhost | 57638 | 1240003 | t | 1 localhost | 57638 | 1240004 | t | 1 (7 rows) DROP TABLE check_placements CASCADE; -- make sure run_on_all_colocated_placements correctly detects colocation CREATE TABLE check_colocated (key int); SELECT master_create_distributed_table('check_colocated', 'key', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('check_colocated', 5, 2); master_create_worker_shards ----------------------------- (1 row) CREATE TABLE second_table (key int); SELECT master_create_distributed_table('second_table', 'key', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('second_table', 4, 2); master_create_worker_shards ----------------------------- (1 row) SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table', 'select 1'); ERROR: tables check_colocated and second_table are not co-located -- even when the difference is in replication factor, an error is thrown SELECT master_drop_all_shards('second_table'::regclass, current_schema(), 'second_table'); master_drop_all_shards ------------------------ 4 (1 row) SELECT master_create_worker_shards('second_table', 5, 1); master_create_worker_shards ----------------------------- (1 row) SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table', 'select 1'); ERROR: tables check_colocated and second_table are not co-located -- when everything matches, the command is run! SELECT master_drop_all_shards('second_table'::regclass, current_schema(), 'second_table'); master_drop_all_shards ------------------------ 5 (1 row) SELECT master_create_worker_shards('second_table', 5, 2); master_create_worker_shards ----------------------------- (1 row) SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table', 'select 1'); nodename | nodeport | shardid1 | shardid2 | success | result -----------+----------+----------+----------+---------+-------- localhost | 57637 | 1240005 | 1240019 | t | 1 localhost | 57638 | 1240005 | 1240019 | t | 1 localhost | 57637 | 1240006 | 1240020 | t | 1 localhost | 57638 | 1240006 | 1240020 | t | 1 localhost | 57637 | 1240007 | 1240021 | t | 1 localhost | 57638 | 1240007 | 1240021 | t | 1 localhost | 57637 | 1240008 | 1240022 | t | 1 localhost | 57638 | 1240008 | 1240022 | t | 1 localhost | 57637 | 1240009 | 1240023 | t | 1 localhost | 57638 | 1240009 | 1240023 | t | 1 (10 rows) -- when a placement is invalid considers the tables to not be colocated UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = ( SELECT shardid FROM pg_dist_shard WHERE nodeport = :worker_1_port AND logicalrelid = 'second_table'::regclass ORDER BY 1 ASC LIMIT 1 ); SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table', 'select 1'); ERROR: tables check_colocated and second_table are not co-located -- when matching placement is also invalid, considers the tables to be colocated UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = ( SELECT shardid FROM pg_dist_shard WHERE nodeport = :worker_1_port AND logicalrelid = 'check_colocated'::regclass ORDER BY 1 ASC LIMIT 1 ); SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table', 'select 1'); nodename | nodeport | shardid1 | shardid2 | success | result -----------+----------+----------+----------+---------+-------- localhost | 57638 | 1240005 | 1240019 | t | 1 localhost | 57637 | 1240006 | 1240020 | t | 1 localhost | 57638 | 1240006 | 1240020 | t | 1 localhost | 57637 | 1240007 | 1240021 | t | 1 localhost | 57638 | 1240007 | 1240021 | t | 1 localhost | 57637 | 1240008 | 1240022 | t | 1 localhost | 57638 | 1240008 | 1240022 | t | 1 localhost | 57637 | 1240009 | 1240023 | t | 1 localhost | 57638 | 1240009 | 1240023 | t | 1 (9 rows) DROP TABLE check_colocated CASCADE; DROP TABLE second_table CASCADE; -- runs on all shards CREATE TABLE check_shards (key int); SELECT master_create_distributed_table('check_shards', 'key', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('check_shards', 5, 2); master_create_worker_shards ----------------------------- (1 row) SELECT * FROM run_command_on_shards('check_shards', 'select 1'); shardid | success | result ---------+---------+-------- 1240024 | t | 1 1240025 | t | 1 1240026 | t | 1 1240027 | t | 1 1240028 | t | 1 (5 rows) UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid % 2 = 0; SELECT * FROM run_command_on_shards('check_shards', 'select 1'); NOTICE: some shards do not have active placements shardid | success | result ---------+---------+-------- 1240025 | t | 1 1240027 | t | 1 (2 rows) DROP TABLE check_shards CASCADE; -- set SHOW_CONTEXT back to default \set SHOW_CONTEXT errors citus-7.0.3/src/test/regress/expected/multi_cluster_management.out000066400000000000000000000451311317107136600255220ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; -- Tests functions related to cluster membership -- before starting the test, lets try to create reference table and see a -- meaningful error CREATE TABLE test_reference_table (y int primary key, name text); SELECT create_reference_table('test_reference_table'); ERROR: cannot create reference table "test_reference_table" DETAIL: There are no active worker nodes. -- add the nodes to the cluster SELECT 1 FROM master_add_node('localhost', :worker_1_port); ?column? ---------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ---------- 1 (1 row) -- get the active nodes SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes -------------------------------- (localhost,57638) (localhost,57637) (2 rows) -- try to add a node that is already in the cluster SELECT nodeid, groupid FROM master_add_node('localhost', :worker_1_port); nodeid | groupid --------+--------- 1 | 1 (1 row) -- get the active nodes SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes -------------------------------- (localhost,57638) (localhost,57637) (2 rows) -- try to remove a node (with no placements) SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) -- verify that the node has been deleted SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes -------------------------------- (localhost,57637) (1 row) -- try to disable a node with no placements see that node is removed SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ---------- 1 (1 row) SELECT master_disable_node('localhost', :worker_2_port); master_disable_node --------------------- (1 row) SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes -------------------------------- (localhost,57637) (1 row) -- add some shard placements to the cluster SELECT isactive FROM master_activate_node('localhost', :worker_2_port); isactive ---------- t (1 row) CREATE TABLE cluster_management_test (col_1 text, col_2 int); SELECT master_create_distributed_table('cluster_management_test', 'col_1', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('cluster_management_test', 16, 1); master_create_worker_shards ----------------------------- (1 row) -- see that there are some active placements in the candidate node SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 1220001 | 1 | localhost | 57638 1220003 | 1 | localhost | 57638 1220005 | 1 | localhost | 57638 1220007 | 1 | localhost | 57638 1220009 | 1 | localhost | 57638 1220011 | 1 | localhost | 57638 1220013 | 1 | localhost | 57638 1220015 | 1 | localhost | 57638 (8 rows) -- try to remove a node with active placements and see that node removal is failed SELECT master_remove_node('localhost', :worker_2_port); ERROR: you cannot remove the primary node of a node group which has shard placements SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes -------------------------------- (localhost,57638) (localhost,57637) (2 rows) -- insert a row so that master_disable_node() exercises closing connections INSERT INTO test_reference_table VALUES (1, '1'); -- try to disable a node with active placements see that node is removed -- observe that a notification is displayed SELECT master_disable_node('localhost', :worker_2_port); NOTICE: Node localhost:57638 has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 57638) to activate this node back. master_disable_node --------------------- (1 row) SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes -------------------------------- (localhost,57637) (1 row) -- try to disable a node which does not exist and see that an error is thrown SELECT master_disable_node('localhost.noexist', 2345); ERROR: node at "localhost.noexist:2345" does not exist -- restore the node for next tests SELECT isactive FROM master_activate_node('localhost', :worker_2_port); isactive ---------- t (1 row) -- try to remove a node with active placements and see that node removal is failed SELECT master_remove_node('localhost', :worker_2_port); ERROR: you cannot remove the primary node of a node group which has shard placements -- mark all placements in the candidate node as inactive SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport=:worker_2_port \gset UPDATE pg_dist_placement SET shardstate=3 WHERE groupid=:worker_2_group; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 1220001 | 3 | localhost | 57638 1220003 | 3 | localhost | 57638 1220005 | 3 | localhost | 57638 1220007 | 3 | localhost | 57638 1220009 | 3 | localhost | 57638 1220011 | 3 | localhost | 57638 1220013 | 3 | localhost | 57638 1220015 | 3 | localhost | 57638 (8 rows) -- try to remove a node with only inactive placements and see that removal still fails SELECT master_remove_node('localhost', :worker_2_port); ERROR: you cannot remove the primary node of a node group which has shard placements SELECT master_get_active_worker_nodes(); master_get_active_worker_nodes -------------------------------- (localhost,57638) (localhost,57637) (2 rows) -- clean-up SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ---------- 1 (1 row) UPDATE pg_dist_placement SET shardstate=1 WHERE groupid=:worker_2_group; -- when there is no primary we should get a pretty error UPDATE pg_dist_node SET noderole = 'secondary' WHERE nodeport=:worker_2_port; SELECT * FROM cluster_management_test; ERROR: node group 3 does not have a primary node -- when there is no node at all in the group we should get a different error DELETE FROM pg_dist_node WHERE nodeport=:worker_2_port; SELECT * FROM cluster_management_test; ERROR: there is a shard placement in node group 3 but there are no nodes in that group -- clean-up SELECT groupid as new_group FROM master_add_node('localhost', :worker_2_port) \gset UPDATE pg_dist_placement SET groupid = :new_group WHERE groupid = :worker_2_group; -- test that you are allowed to remove secondary nodes even if there are placements SELECT 1 FROM master_add_node('localhost', 9990, groupid => :new_group, noderole => 'secondary'); ?column? ---------- 1 (1 row) SELECT master_remove_node('localhost', :worker_2_port); ERROR: you cannot remove the primary node of a node group which has shard placements SELECT master_remove_node('localhost', 9990); master_remove_node -------------------- (1 row) -- clean-up DROP TABLE cluster_management_test; -- check that adding/removing nodes are propagated to nodes with hasmetadata=true SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) UPDATE pg_dist_node SET hasmetadata=true WHERE nodeport=:worker_1_port; SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ---------- 1 (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; nodename | nodeport -----------+---------- localhost | 57638 (1 row) \c - - - :master_port SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; nodename | nodeport ----------+---------- (0 rows) \c - - - :master_port -- check that added nodes are not propagated to nodes with hasmetadata=false UPDATE pg_dist_node SET hasmetadata=false WHERE nodeport=:worker_1_port; SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ---------- 1 (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; nodename | nodeport ----------+---------- (0 rows) \c - - - :master_port -- check that removing two nodes in the same transaction works SELECT master_remove_node('localhost', :worker_1_port), master_remove_node('localhost', :worker_2_port); master_remove_node | master_remove_node --------------------+-------------------- | (1 row) SELECT count(1) FROM pg_dist_node; count ------- 0 (1 row) -- check that adding two nodes in the same transaction works SELECT master_add_node('localhost', :worker_1_port), master_add_node('localhost', :worker_2_port); master_add_node | master_add_node ---------------------------------------------------+--------------------------------------------------- (8,7,localhost,57637,default,f,t,primary,default) | (9,8,localhost,57638,default,f,t,primary,default) (1 row) SELECT * FROM pg_dist_node ORDER BY nodeid; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster --------+---------+-----------+----------+----------+-------------+----------+----------+------------- 8 | 7 | localhost | 57637 | default | f | t | primary | default 9 | 8 | localhost | 57638 | default | f | t | primary | default (2 rows) -- check that mixed add/remove node commands work fine inside transaction BEGIN; SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ---------- 1 (1 row) SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) COMMIT; SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; nodename | nodeport ----------+---------- (0 rows) UPDATE pg_dist_node SET hasmetadata=true WHERE nodeport=:worker_1_port; BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ---------- 1 (1 row) SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ---------- 1 (1 row) COMMIT; SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; nodename | nodeport -----------+---------- localhost | 57638 (1 row) \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; nodename | nodeport -----------+---------- localhost | 57638 (1 row) \c - - - :master_port SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; master_remove_node -------------------- (2 rows) SELECT 1 FROM master_add_node('localhost', :worker_1_port); ?column? ---------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ---------- 1 (1 row) -- check that a distributed table can be created after adding a node in a transaction SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ---------- 1 (1 row) CREATE TABLE temp(col1 text, col2 int); SELECT create_distributed_table('temp', 'col1'); create_distributed_table -------------------------- (1 row) INSERT INTO temp VALUES ('row1', 1); INSERT INTO temp VALUES ('row2', 2); COMMIT; SELECT col1, col2 FROM temp ORDER BY col1; col1 | col2 ------+------ row1 | 1 row2 | 2 (2 rows) SELECT count(*) FROM pg_dist_shard_placement, pg_dist_shard WHERE pg_dist_shard_placement.shardid = pg_dist_shard.shardid AND pg_dist_shard.logicalrelid = 'temp'::regclass AND pg_dist_shard_placement.nodeport = :worker_2_port; count ------- 32 (1 row) DROP TABLE temp; \c - - - :worker_1_port DELETE FROM pg_dist_partition; DELETE FROM pg_dist_shard; DELETE FROM pg_dist_placement; DELETE FROM pg_dist_node; \c - - - :master_port SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); stop_metadata_sync_to_node ---------------------------- (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); stop_metadata_sync_to_node ---------------------------- (1 row) -- check that you can't add a primary to a non-default cluster SELECT master_add_node('localhost', 9999, nodecluster => 'olap'); ERROR: primaries must be added to the default cluster -- check that you can't add more than one primary to a group SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset SELECT master_add_node('localhost', 9999, groupid => :worker_1_group, noderole => 'primary'); ERROR: group 12 already has a primary node -- check that you can add secondaries and unavailable nodes to a group SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset SELECT 1 FROM master_add_node('localhost', 9998, groupid => :worker_1_group, noderole => 'secondary'); ?column? ---------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', 9997, groupid => :worker_1_group, noderole => 'unavailable'); ?column? ---------- 1 (1 row) -- add_inactive_node also works with secondaries SELECT 1 FROM master_add_inactive_node('localhost', 9996, groupid => :worker_2_group, noderole => 'secondary'); ?column? ---------- 1 (1 row) -- check that you can add a seconary to a non-default cluster, and activate it, and remove it SELECT master_add_inactive_node('localhost', 9999, groupid => :worker_2_group, nodecluster => 'olap', noderole => 'secondary'); master_add_inactive_node --------------------------------------------------- (19,14,localhost,9999,default,f,f,secondary,olap) (1 row) SELECT master_activate_node('localhost', 9999); master_activate_node --------------------------------------------------- (19,14,localhost,9999,default,f,t,secondary,olap) (1 row) SELECT master_disable_node('localhost', 9999); master_disable_node --------------------- (1 row) SELECT master_remove_node('localhost', 9999); master_remove_node -------------------- (1 row) -- check that you can't manually add two primaries to a group INSERT INTO pg_dist_node (nodename, nodeport, groupid, noderole) VALUES ('localhost', 5000, :worker_1_group, 'primary'); ERROR: there cannot be two primary nodes in a group CONTEXT: PL/pgSQL function citus.pg_dist_node_trigger_func() line 10 at RAISE UPDATE pg_dist_node SET noderole = 'primary' WHERE groupid = :worker_1_group AND nodeport = 9998; ERROR: there cannot be two primary nodes in a group CONTEXT: PL/pgSQL function citus.pg_dist_node_trigger_func() line 18 at RAISE -- check that you can't manually add a primary to a non-default cluster INSERT INTO pg_dist_node (nodename, nodeport, groupid, noderole, nodecluster) VALUES ('localhost', 5000, 1000, 'primary', 'olap'); ERROR: new row for relation "pg_dist_node" violates check constraint "primaries_are_only_allowed_in_the_default_cluster" DETAIL: Failing row contains (17, 1000, localhost, 5000, default, f, t, primary, olap). UPDATE pg_dist_node SET nodecluster = 'olap' WHERE nodeport = :worker_1_port; ERROR: new row for relation "pg_dist_node" violates check constraint "primaries_are_only_allowed_in_the_default_cluster" DETAIL: Failing row contains (13, 12, localhost, 57637, default, f, t, primary, olap). -- check that you /can/ add a secondary node to a non-default cluster SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary', nodecluster=> 'olap'); master_add_node --------------------------------------------------- (20,12,localhost,8888,default,f,t,secondary,olap) (1 row) -- check that super-long cluster names are truncated SELECT master_add_node('localhost', 8887, groupid => :worker_1_group, noderole => 'secondary', nodecluster=> 'thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.' 'thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.' 'thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.' 'thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.' 'overflow' ); master_add_node -------------------------------------------------------------------------------------------------------------- (21,12,localhost,8887,default,f,t,secondary,thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.) (1 row) SELECT * FROM pg_dist_node WHERE nodeport=8887; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster --------+---------+-----------+----------+----------+-------------+----------+-----------+----------------------------------------------------------------- 21 | 12 | localhost | 8887 | default | f | t | secondary | thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars. (1 row) -- don't remove the secondary and unavailable nodes, check that no commands are sent to -- them in any of the remaining tests -- master_add_secondary_node lets you skip looking up the groupid SELECT master_add_secondary_node('localhost', 9995, 'localhost', :worker_1_port); master_add_secondary_node ------------------------------------------------------ (22,12,localhost,9995,default,f,t,secondary,default) (1 row) SELECT master_add_secondary_node('localhost', 9994, primaryname => 'localhost', primaryport => :worker_2_port); master_add_secondary_node ------------------------------------------------------ (23,14,localhost,9994,default,f,t,secondary,default) (1 row) SELECT master_add_secondary_node('localhost', 9993, 'localhost', 2000); ERROR: node at "localhost:2000" does not exist SELECT master_add_secondary_node('localhost', 9992, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); master_add_secondary_node ------------------------------------------------------------- (24,12,localhost,9992,default,f,t,secondary,second-cluster) (1 row) citus-7.0.3/src/test/regress/expected/multi_colocated_shard_transfer.out000066400000000000000000000263141317107136600266710ustar00rootroot00000000000000-- -- MULTI_COLOCATED_SHARD_TRANSFER -- -- These tables are created in multi_colocation_utils test -- test repair -- manually set shardstate as inactive UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_2_port AND (shardid = 1300000 OR shardid = 1300004); UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_2_port AND shardid = 1300016; UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_2_port AND shardid = 1300020; -- test repairing colocated shards -- status before shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate ---------+---------------+----------+--------------+------------ 1300000 | table1_group1 | 57637 | 1000 | 1 1300000 | table1_group1 | 57638 | 1000 | 3 1300001 | table1_group1 | 57637 | 1000 | 1 1300001 | table1_group1 | 57638 | 1000 | 1 1300002 | table1_group1 | 57637 | 1000 | 1 1300002 | table1_group1 | 57638 | 1000 | 1 1300003 | table1_group1 | 57637 | 1000 | 1 1300003 | table1_group1 | 57638 | 1000 | 1 1300004 | table2_group1 | 57637 | 1000 | 1 1300004 | table2_group1 | 57638 | 1000 | 3 1300005 | table2_group1 | 57637 | 1000 | 1 1300005 | table2_group1 | 57638 | 1000 | 1 1300006 | table2_group1 | 57637 | 1000 | 1 1300006 | table2_group1 | 57638 | 1000 | 1 1300007 | table2_group1 | 57637 | 1000 | 1 1300007 | table2_group1 | 57638 | 1000 | 1 (16 rows) -- repair colocated shards SELECT master_copy_shard_placement(1300000, 'localhost', :worker_1_port, 'localhost', :worker_2_port); master_copy_shard_placement ----------------------------- (1 row) -- status after shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate ---------+---------------+----------+--------------+------------ 1300000 | table1_group1 | 57637 | 1000 | 1 1300000 | table1_group1 | 57638 | 1000 | 1 1300001 | table1_group1 | 57637 | 1000 | 1 1300001 | table1_group1 | 57638 | 1000 | 1 1300002 | table1_group1 | 57637 | 1000 | 1 1300002 | table1_group1 | 57638 | 1000 | 1 1300003 | table1_group1 | 57637 | 1000 | 1 1300003 | table1_group1 | 57638 | 1000 | 1 1300004 | table2_group1 | 57637 | 1000 | 1 1300004 | table2_group1 | 57638 | 1000 | 3 1300005 | table2_group1 | 57637 | 1000 | 1 1300005 | table2_group1 | 57638 | 1000 | 1 1300006 | table2_group1 | 57637 | 1000 | 1 1300006 | table2_group1 | 57638 | 1000 | 1 1300007 | table2_group1 | 57637 | 1000 | 1 1300007 | table2_group1 | 57638 | 1000 | 1 (16 rows) -- test repairing NOT colocated shard -- status before shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND p.logicalrelid = 'table5_groupX'::regclass ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate ---------+---------------+----------+--------------+------------ 1300016 | table5_groupx | 57637 | 0 | 1 1300016 | table5_groupx | 57638 | 0 | 3 1300017 | table5_groupx | 57637 | 0 | 1 1300017 | table5_groupx | 57638 | 0 | 1 1300018 | table5_groupx | 57637 | 0 | 1 1300018 | table5_groupx | 57638 | 0 | 1 1300019 | table5_groupx | 57637 | 0 | 1 1300019 | table5_groupx | 57638 | 0 | 1 (8 rows) -- repair NOT colocated shard SELECT master_copy_shard_placement(1300016, 'localhost', :worker_1_port, 'localhost', :worker_2_port); master_copy_shard_placement ----------------------------- (1 row) -- status after shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND p.logicalrelid = 'table5_groupX'::regclass ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate ---------+---------------+----------+--------------+------------ 1300016 | table5_groupx | 57637 | 0 | 1 1300016 | table5_groupx | 57638 | 0 | 1 1300017 | table5_groupx | 57637 | 0 | 1 1300017 | table5_groupx | 57638 | 0 | 1 1300018 | table5_groupx | 57637 | 0 | 1 1300018 | table5_groupx | 57638 | 0 | 1 1300019 | table5_groupx | 57637 | 0 | 1 1300019 | table5_groupx | 57638 | 0 | 1 (8 rows) -- test repairing shard in append distributed table -- status before shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND p.logicalrelid = 'table6_append'::regclass ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate ---------+---------------+----------+--------------+------------ 1300020 | table6_append | 57637 | 0 | 1 1300020 | table6_append | 57638 | 0 | 3 1300021 | table6_append | 57637 | 0 | 1 1300021 | table6_append | 57638 | 0 | 1 (4 rows) -- repair shard in append distributed table SELECT master_copy_shard_placement(1300020, 'localhost', :worker_1_port, 'localhost', :worker_2_port); master_copy_shard_placement ----------------------------- (1 row) -- status after shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND p.logicalrelid = 'table6_append'::regclass ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate ---------+---------------+----------+--------------+------------ 1300020 | table6_append | 57637 | 0 | 1 1300020 | table6_append | 57638 | 0 | 1 1300021 | table6_append | 57637 | 0 | 1 1300021 | table6_append | 57638 | 0 | 1 (4 rows) -- test repair while all placements of one shard in colocation group is unhealthy -- manually set shardstate as inactive UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1300000; -- status before shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate ---------+---------------+----------+--------------+------------ 1300000 | table1_group1 | 57637 | 1000 | 3 1300000 | table1_group1 | 57638 | 1000 | 3 1300001 | table1_group1 | 57637 | 1000 | 1 1300001 | table1_group1 | 57638 | 1000 | 1 1300002 | table1_group1 | 57637 | 1000 | 1 1300002 | table1_group1 | 57638 | 1000 | 1 1300003 | table1_group1 | 57637 | 1000 | 1 1300003 | table1_group1 | 57638 | 1000 | 1 1300004 | table2_group1 | 57637 | 1000 | 1 1300004 | table2_group1 | 57638 | 1000 | 3 1300005 | table2_group1 | 57637 | 1000 | 1 1300005 | table2_group1 | 57638 | 1000 | 1 1300006 | table2_group1 | 57637 | 1000 | 1 1300006 | table2_group1 | 57638 | 1000 | 1 1300007 | table2_group1 | 57637 | 1000 | 1 1300007 | table2_group1 | 57638 | 1000 | 1 (16 rows) -- repair while all placements of one shard in colocation group is unhealthy SELECT master_copy_shard_placement(1300000, 'localhost', :worker_1_port, 'localhost', :worker_2_port); ERROR: source placement must be in finalized state -- status after shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; shardid | logicalrelid | nodeport | colocationid | shardstate ---------+---------------+----------+--------------+------------ 1300000 | table1_group1 | 57637 | 1000 | 3 1300000 | table1_group1 | 57638 | 1000 | 3 1300001 | table1_group1 | 57637 | 1000 | 1 1300001 | table1_group1 | 57638 | 1000 | 1 1300002 | table1_group1 | 57637 | 1000 | 1 1300002 | table1_group1 | 57638 | 1000 | 1 1300003 | table1_group1 | 57637 | 1000 | 1 1300003 | table1_group1 | 57638 | 1000 | 1 1300004 | table2_group1 | 57637 | 1000 | 1 1300004 | table2_group1 | 57638 | 1000 | 3 1300005 | table2_group1 | 57637 | 1000 | 1 1300005 | table2_group1 | 57638 | 1000 | 1 1300006 | table2_group1 | 57637 | 1000 | 1 1300006 | table2_group1 | 57638 | 1000 | 1 1300007 | table2_group1 | 57637 | 1000 | 1 1300007 | table2_group1 | 57638 | 1000 | 1 (16 rows) citus-7.0.3/src/test/regress/expected/multi_colocation_utils.out000066400000000000000000001133051317107136600252160ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 4; -- =================================================================== -- create test utility function -- =================================================================== CREATE SEQUENCE colocation_test_seq MINVALUE 1000 NO CYCLE; /* a very simple UDF that only sets the colocation ids the same * DO NOT USE THIS FUNCTION IN PRODUCTION. It manually sets colocationid column of * pg_dist_partition and it does not check anything about pyshical state about shards. */ CREATE OR REPLACE FUNCTION colocation_test_colocate_tables(source_table regclass, target_table regclass) RETURNS BOOL LANGUAGE plpgsql AS $colocate_tables$ DECLARE nextid INTEGER; BEGIN SELECT nextval('colocation_test_seq') INTO nextid; UPDATE pg_dist_partition SET colocationId = nextid WHERE logicalrelid IN ( (SELECT p1.logicalrelid FROM pg_dist_partition p1, pg_dist_partition p2 WHERE p2.logicalrelid = source_table AND (p1.logicalrelid = source_table OR (p1.colocationId = p2.colocationId AND p1.colocationId != 0))) UNION (SELECT target_table) ); RETURN TRUE; END; $colocate_tables$; -- =================================================================== -- create test functions -- =================================================================== CREATE FUNCTION get_table_colocation_id(regclass) RETURNS INTEGER AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION tables_colocated(regclass, regclass) RETURNS bool AS 'citus' LANGUAGE C; CREATE FUNCTION shards_colocated(bigint, bigint) RETURNS bool AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION find_shard_interval_index(bigint) RETURNS int AS 'citus' LANGUAGE C STRICT; -- =================================================================== -- test co-location util functions -- =================================================================== -- create distributed table observe shard pruning CREATE TABLE table1_group1 ( id int ); SELECT master_create_distributed_table('table1_group1', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('table1_group1', 4, 2); master_create_worker_shards ----------------------------- (1 row) CREATE TABLE table2_group1 ( id int ); SELECT master_create_distributed_table('table2_group1', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('table2_group1', 4, 2); master_create_worker_shards ----------------------------- (1 row) CREATE TABLE table3_group2 ( id int ); SELECT master_create_distributed_table('table3_group2', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('table3_group2', 4, 2); master_create_worker_shards ----------------------------- (1 row) CREATE TABLE table4_group2 ( id int ); SELECT master_create_distributed_table('table4_group2', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('table4_group2', 4, 2); master_create_worker_shards ----------------------------- (1 row) CREATE TABLE table5_groupX ( id int ); SELECT master_create_distributed_table('table5_groupX', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('table5_groupX', 4, 2); master_create_worker_shards ----------------------------- (1 row) CREATE TABLE table6_append ( id int ); SELECT master_create_distributed_table('table6_append', 'id', 'append'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_empty_shard('table6_append'); master_create_empty_shard --------------------------- 1300020 (1 row) SELECT master_create_empty_shard('table6_append'); master_create_empty_shard --------------------------- 1300021 (1 row) -- make table1_group1 and table2_group1 co-located manually SELECT colocation_test_colocate_tables('table1_group1', 'table2_group1'); colocation_test_colocate_tables --------------------------------- t (1 row) -- check co-location id SELECT get_table_colocation_id('table1_group1'); get_table_colocation_id ------------------------- 1000 (1 row) SELECT get_table_colocation_id('table5_groupX'); get_table_colocation_id ------------------------- 0 (1 row) SELECT get_table_colocation_id('table6_append'); get_table_colocation_id ------------------------- 0 (1 row) -- check self table co-location SELECT tables_colocated('table1_group1', 'table1_group1'); tables_colocated ------------------ t (1 row) SELECT tables_colocated('table5_groupX', 'table5_groupX'); tables_colocated ------------------ t (1 row) SELECT tables_colocated('table6_append', 'table6_append'); tables_colocated ------------------ t (1 row) -- check table co-location with same co-location group SELECT tables_colocated('table1_group1', 'table2_group1'); tables_colocated ------------------ t (1 row) -- check table co-location with different co-location group SELECT tables_colocated('table1_group1', 'table3_group2'); tables_colocated ------------------ f (1 row) -- check table co-location with invalid co-location group SELECT tables_colocated('table1_group1', 'table5_groupX'); tables_colocated ------------------ f (1 row) SELECT tables_colocated('table1_group1', 'table6_append'); tables_colocated ------------------ f (1 row) -- check self shard co-location SELECT shards_colocated(1300000, 1300000); shards_colocated ------------------ t (1 row) SELECT shards_colocated(1300016, 1300016); shards_colocated ------------------ t (1 row) SELECT shards_colocated(1300020, 1300020); shards_colocated ------------------ t (1 row) -- check shard co-location with same co-location group SELECT shards_colocated(1300000, 1300004); shards_colocated ------------------ t (1 row) -- check shard co-location with same table different co-location group SELECT shards_colocated(1300000, 1300001); shards_colocated ------------------ f (1 row) -- check shard co-location with different co-location group SELECT shards_colocated(1300000, 1300005); shards_colocated ------------------ f (1 row) -- check shard co-location with invalid co-location group SELECT shards_colocated(1300000, 1300016); shards_colocated ------------------ f (1 row) SELECT shards_colocated(1300000, 1300020); shards_colocated ------------------ f (1 row) -- check co-located table list SELECT UNNEST(get_colocated_table_array('table1_group1'))::regclass ORDER BY 1; unnest --------------- table1_group1 table2_group1 (2 rows) SELECT UNNEST(get_colocated_table_array('table5_groupX'))::regclass ORDER BY 1; unnest --------------- table5_groupx (1 row) SELECT UNNEST(get_colocated_table_array('table6_append'))::regclass ORDER BY 1; unnest --------------- table6_append (1 row) -- check co-located shard list SELECT get_colocated_shard_array(1300000) ORDER BY 1; get_colocated_shard_array --------------------------- {1300000,1300004} (1 row) SELECT get_colocated_shard_array(1300016) ORDER BY 1; get_colocated_shard_array --------------------------- {1300016} (1 row) SELECT get_colocated_shard_array(1300020) ORDER BY 1; get_colocated_shard_array --------------------------- {1300020} (1 row) -- check FindShardIntervalIndex function SELECT find_shard_interval_index(1300000); find_shard_interval_index --------------------------- 0 (1 row) SELECT find_shard_interval_index(1300001); find_shard_interval_index --------------------------- 1 (1 row) SELECT find_shard_interval_index(1300002); find_shard_interval_index --------------------------- 2 (1 row) SELECT find_shard_interval_index(1300003); find_shard_interval_index --------------------------- 3 (1 row) SELECT find_shard_interval_index(1300016); find_shard_interval_index --------------------------- 0 (1 row) -- check external colocation API SET citus.shard_count = 2; CREATE TABLE table1_groupA ( id int ); SELECT create_distributed_table('table1_groupA', 'id'); create_distributed_table -------------------------- (1 row) CREATE TABLE table2_groupA ( id int ); SELECT create_distributed_table('table2_groupA', 'id'); create_distributed_table -------------------------- (1 row) -- change shard replication factor SET citus.shard_replication_factor = 1; CREATE TABLE table1_groupB ( id int ); SELECT create_distributed_table('table1_groupB', 'id'); create_distributed_table -------------------------- (1 row) CREATE TABLE table2_groupB ( id int ); SELECT create_distributed_table('table2_groupB', 'id'); create_distributed_table -------------------------- (1 row) UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='table1_groupB'::regclass; UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='table2_groupB'::regclass; -- revert back to default shard replication factor SET citus.shard_replication_factor to DEFAULT; -- change partition column type CREATE TABLE table1_groupC ( id text ); SELECT create_distributed_table('table1_groupC', 'id'); create_distributed_table -------------------------- (1 row) CREATE TABLE table2_groupC ( id text ); SELECT create_distributed_table('table2_groupC', 'id'); create_distributed_table -------------------------- (1 row) -- change shard count SET citus.shard_count = 4; CREATE TABLE table1_groupD ( id int ); SELECT create_distributed_table('table1_groupD', 'id'); create_distributed_table -------------------------- (1 row) CREATE TABLE table2_groupD ( id int ); SELECT create_distributed_table('table2_groupD', 'id'); create_distributed_table -------------------------- (1 row) -- try other distribution methods CREATE TABLE table_append ( id int ); SELECT create_distributed_table('table_append', 'id', 'append'); create_distributed_table -------------------------- (1 row) CREATE TABLE table_range ( id int ); SELECT create_distributed_table('table_range', 'id', 'range'); create_distributed_table -------------------------- (1 row) -- test foreign table creation CREATE FOREIGN TABLE table3_groupD ( id int ) SERVER fake_fdw_server; SELECT create_distributed_table('table3_groupD', 'id'); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined create_distributed_table -------------------------- (1 row) -- check metadata SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 3 | 32 | 2 | 23 4 | 2 | 2 | 23 5 | 2 | 1 | 23 6 | 2 | 2 | 25 7 | 4 | 2 | 23 (5 rows) SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY logicalrelid; logicalrelid | colocationid ---------------+-------------- table1_groupa | 4 table2_groupa | 4 table1_groupb | 5 table2_groupb | 5 table1_groupc | 6 table2_groupc | 6 table1_groupd | 7 table2_groupd | 7 table3_groupd | 7 (9 rows) -- check effects of dropping tables DROP TABLE table1_groupA; SELECT * FROM pg_dist_colocation WHERE colocationid = 4; colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 4 | 2 | 2 | 23 (1 row) -- dropping all tables in a colocation group also deletes the colocation group DROP TABLE table2_groupA; SELECT * FROM pg_dist_colocation WHERE colocationid = 4; colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 4 | 2 | 2 | 23 (1 row) -- create dropped colocation group again SET citus.shard_count = 2; CREATE TABLE table1_groupE ( id int ); SELECT create_distributed_table('table1_groupE', 'id'); create_distributed_table -------------------------- (1 row) CREATE TABLE table2_groupE ( id int ); SELECT create_distributed_table('table2_groupE', 'id'); create_distributed_table -------------------------- (1 row) -- test different table DDL CREATE TABLE table3_groupE ( dummy_column text, id int ); SELECT create_distributed_table('table3_groupE', 'id'); create_distributed_table -------------------------- (1 row) -- test different schema CREATE SCHEMA schema_collocation; CREATE TABLE schema_collocation.table4_groupE ( id int ); SELECT create_distributed_table('schema_collocation.table4_groupE', 'id'); create_distributed_table -------------------------- (1 row) -- test colocate_with option CREATE TABLE table1_group_none_1 ( id int ); SELECT create_distributed_table('table1_group_none_1', 'id', colocate_with => 'none'); create_distributed_table -------------------------- (1 row) CREATE TABLE table2_group_none_1 ( id int ); SELECT create_distributed_table('table2_group_none_1', 'id', colocate_with => 'table1_group_none_1'); create_distributed_table -------------------------- (1 row) CREATE TABLE table1_group_none_2 ( id int ); SELECT create_distributed_table('table1_group_none_2', 'id', colocate_with => 'none'); create_distributed_table -------------------------- (1 row) CREATE TABLE table4_groupE ( id int ); SELECT create_distributed_table('table4_groupE', 'id', colocate_with => 'default'); create_distributed_table -------------------------- (1 row) SET citus.shard_count = 3; -- check that this new configuration does not have a default group CREATE TABLE table1_group_none_3 ( id int ); SELECT create_distributed_table('table1_group_none_3', 'id', colocate_with => 'NONE'); create_distributed_table -------------------------- (1 row) -- a new table does not use a non-default group CREATE TABLE table1_group_default ( id int ); SELECT create_distributed_table('table1_group_default', 'id', colocate_with => 'DEFAULT'); create_distributed_table -------------------------- (1 row) -- check metadata SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 3 | 32 | 2 | 23 4 | 2 | 2 | 23 5 | 2 | 1 | 23 6 | 2 | 2 | 25 7 | 4 | 2 | 23 11 | 3 | 2 | 23 (6 rows) SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; logicalrelid | colocationid ----------------------------------+-------------- table1_groupe | 4 table2_groupe | 4 table3_groupe | 4 schema_collocation.table4_groupe | 4 table4_groupe | 4 table1_groupb | 5 table2_groupb | 5 table1_groupc | 6 table2_groupc | 6 table1_groupd | 7 table2_groupd | 7 table3_groupd | 7 table1_group_none_1 | 8 table2_group_none_1 | 8 table1_group_none_2 | 9 table1_group_none_3 | 10 table1_group_default | 11 (17 rows) -- check failing colocate_with options CREATE TABLE table_postgresql( id int ); CREATE TABLE table_failing ( id int ); SELECT create_distributed_table('table_failing', 'id', colocate_with => 'table_append'); ERROR: cannot distribute relation DETAIL: Currently, colocate_with option is only supported for hash distributed tables. SELECT create_distributed_table('table_failing', 'id', 'append', 'table1_groupE'); ERROR: cannot distribute relation DETAIL: Currently, colocate_with option is only supported for hash distributed tables. SELECT create_distributed_table('table_failing', 'id', colocate_with => 'table_postgresql'); ERROR: relation table_postgresql is not distributed SELECT create_distributed_table('table_failing', 'id', colocate_with => 'no_table'); ERROR: relation "no_table" does not exist SELECT create_distributed_table('table_failing', 'id', colocate_with => ''); ERROR: invalid name syntax SELECT create_distributed_table('table_failing', 'id', colocate_with => NULL); create_distributed_table -------------------------- (1 row) -- check with different distribution column types CREATE TABLE table_bigint ( id bigint ); SELECT create_distributed_table('table_bigint', 'id', colocate_with => 'table1_groupE'); ERROR: cannot colocate tables table1_groupe and table_bigint DETAIL: Distribution column types don't match for table1_groupe and table_bigint. -- check worker table schemas \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.table3_groupE_1300050'::regclass; Column | Type | Modifiers --------------+---------+----------- dummy_column | text | id | integer | (2 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='schema_collocation.table4_groupE_1300052'::regclass; Column | Type | Modifiers --------+---------+----------- id | integer | (1 row) \c - - - :master_port CREATE TABLE table1_groupF ( id int ); SELECT create_reference_table('table1_groupF'); create_reference_table ------------------------ (1 row) CREATE TABLE table2_groupF ( id int ); SELECT create_reference_table('table2_groupF'); create_reference_table ------------------------ (1 row) -- check metadata SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 3 | 32 | 2 | 23 4 | 2 | 2 | 23 5 | 2 | 1 | 23 6 | 2 | 2 | 25 7 | 4 | 2 | 23 11 | 3 | 2 | 23 (6 rows) -- cross check with internal colocation API SELECT p1.logicalrelid::regclass AS table1, p2.logicalrelid::regclass AS table2, tables_colocated(p1.logicalrelid , p2.logicalrelid) AS colocated FROM pg_dist_partition p1, pg_dist_partition p2 WHERE p1.logicalrelid < p2.logicalrelid AND p1.colocationid != 0 AND p2.colocationid != 0 AND tables_colocated(p1.logicalrelid , p2.logicalrelid) is TRUE ORDER BY table1, table2; table1 | table2 | colocated ----------------------------------+----------------------------------+----------- table1_group1 | table2_group1 | t table1_groupb | table2_groupb | t table1_groupc | table2_groupc | t table1_groupd | table2_groupd | t table1_groupd | table3_groupd | t table2_groupd | table3_groupd | t table1_groupe | table2_groupe | t table1_groupe | table3_groupe | t table1_groupe | schema_collocation.table4_groupe | t table1_groupe | table4_groupe | t table2_groupe | table3_groupe | t table2_groupe | schema_collocation.table4_groupe | t table2_groupe | table4_groupe | t table3_groupe | schema_collocation.table4_groupe | t table3_groupe | table4_groupe | t schema_collocation.table4_groupe | table4_groupe | t table1_group_none_1 | table2_group_none_1 | t table1_groupf | table2_groupf | t (18 rows) -- check created shards SELECT logicalrelid, pg_dist_shard.shardid AS shardid, shardstorage, nodeport, shardminvalue, shardmaxvalue FROM pg_dist_shard, pg_dist_shard_placement WHERE pg_dist_shard.shardid = pg_dist_shard_placement.shardid AND pg_dist_shard.shardid >= 1300026 ORDER BY logicalrelid, shardmaxvalue::integer, shardid, placementid; logicalrelid | shardid | shardstorage | nodeport | shardminvalue | shardmaxvalue ----------------------------------+---------+--------------+----------+---------------+--------------- table1_groupb | 1300026 | t | 57637 | -2147483648 | -1 table1_groupb | 1300027 | t | 57638 | 0 | 2147483647 table2_groupb | 1300028 | t | 57637 | -2147483648 | -1 table2_groupb | 1300029 | t | 57638 | 0 | 2147483647 table1_groupc | 1300030 | t | 57637 | -2147483648 | -1 table1_groupc | 1300030 | t | 57638 | -2147483648 | -1 table1_groupc | 1300031 | t | 57638 | 0 | 2147483647 table1_groupc | 1300031 | t | 57637 | 0 | 2147483647 table2_groupc | 1300032 | t | 57638 | -2147483648 | -1 table2_groupc | 1300032 | t | 57637 | -2147483648 | -1 table2_groupc | 1300033 | t | 57637 | 0 | 2147483647 table2_groupc | 1300033 | t | 57638 | 0 | 2147483647 table1_groupd | 1300034 | t | 57637 | -2147483648 | -1073741825 table1_groupd | 1300034 | t | 57638 | -2147483648 | -1073741825 table1_groupd | 1300035 | t | 57638 | -1073741824 | -1 table1_groupd | 1300035 | t | 57637 | -1073741824 | -1 table1_groupd | 1300036 | t | 57637 | 0 | 1073741823 table1_groupd | 1300036 | t | 57638 | 0 | 1073741823 table1_groupd | 1300037 | t | 57638 | 1073741824 | 2147483647 table1_groupd | 1300037 | t | 57637 | 1073741824 | 2147483647 table2_groupd | 1300038 | t | 57638 | -2147483648 | -1073741825 table2_groupd | 1300038 | t | 57637 | -2147483648 | -1073741825 table2_groupd | 1300039 | t | 57637 | -1073741824 | -1 table2_groupd | 1300039 | t | 57638 | -1073741824 | -1 table2_groupd | 1300040 | t | 57638 | 0 | 1073741823 table2_groupd | 1300040 | t | 57637 | 0 | 1073741823 table2_groupd | 1300041 | t | 57637 | 1073741824 | 2147483647 table2_groupd | 1300041 | t | 57638 | 1073741824 | 2147483647 table3_groupd | 1300042 | f | 57637 | -2147483648 | -1073741825 table3_groupd | 1300042 | f | 57638 | -2147483648 | -1073741825 table3_groupd | 1300043 | f | 57638 | -1073741824 | -1 table3_groupd | 1300043 | f | 57637 | -1073741824 | -1 table3_groupd | 1300044 | f | 57637 | 0 | 1073741823 table3_groupd | 1300044 | f | 57638 | 0 | 1073741823 table3_groupd | 1300045 | f | 57638 | 1073741824 | 2147483647 table3_groupd | 1300045 | f | 57637 | 1073741824 | 2147483647 table1_groupe | 1300046 | t | 57637 | -2147483648 | -1 table1_groupe | 1300046 | t | 57638 | -2147483648 | -1 table1_groupe | 1300047 | t | 57638 | 0 | 2147483647 table1_groupe | 1300047 | t | 57637 | 0 | 2147483647 table2_groupe | 1300048 | t | 57638 | -2147483648 | -1 table2_groupe | 1300048 | t | 57637 | -2147483648 | -1 table2_groupe | 1300049 | t | 57637 | 0 | 2147483647 table2_groupe | 1300049 | t | 57638 | 0 | 2147483647 table3_groupe | 1300050 | t | 57637 | -2147483648 | -1 table3_groupe | 1300050 | t | 57638 | -2147483648 | -1 table3_groupe | 1300051 | t | 57638 | 0 | 2147483647 table3_groupe | 1300051 | t | 57637 | 0 | 2147483647 schema_collocation.table4_groupe | 1300052 | t | 57638 | -2147483648 | -1 schema_collocation.table4_groupe | 1300052 | t | 57637 | -2147483648 | -1 schema_collocation.table4_groupe | 1300053 | t | 57637 | 0 | 2147483647 schema_collocation.table4_groupe | 1300053 | t | 57638 | 0 | 2147483647 table1_group_none_1 | 1300054 | t | 57637 | -2147483648 | -1 table1_group_none_1 | 1300054 | t | 57638 | -2147483648 | -1 table1_group_none_1 | 1300055 | t | 57638 | 0 | 2147483647 table1_group_none_1 | 1300055 | t | 57637 | 0 | 2147483647 table2_group_none_1 | 1300056 | t | 57638 | -2147483648 | -1 table2_group_none_1 | 1300056 | t | 57637 | -2147483648 | -1 table2_group_none_1 | 1300057 | t | 57637 | 0 | 2147483647 table2_group_none_1 | 1300057 | t | 57638 | 0 | 2147483647 table1_group_none_2 | 1300058 | t | 57637 | -2147483648 | -1 table1_group_none_2 | 1300058 | t | 57638 | -2147483648 | -1 table1_group_none_2 | 1300059 | t | 57638 | 0 | 2147483647 table1_group_none_2 | 1300059 | t | 57637 | 0 | 2147483647 table4_groupe | 1300060 | t | 57637 | -2147483648 | -1 table4_groupe | 1300060 | t | 57638 | -2147483648 | -1 table4_groupe | 1300061 | t | 57638 | 0 | 2147483647 table4_groupe | 1300061 | t | 57637 | 0 | 2147483647 table1_group_none_3 | 1300062 | t | 57637 | -2147483648 | -715827884 table1_group_none_3 | 1300062 | t | 57638 | -2147483648 | -715827884 table1_group_none_3 | 1300063 | t | 57638 | -715827883 | 715827881 table1_group_none_3 | 1300063 | t | 57637 | -715827883 | 715827881 table1_group_none_3 | 1300064 | t | 57637 | 715827882 | 2147483647 table1_group_none_3 | 1300064 | t | 57638 | 715827882 | 2147483647 table1_group_default | 1300065 | t | 57637 | -2147483648 | -715827884 table1_group_default | 1300065 | t | 57638 | -2147483648 | -715827884 table1_group_default | 1300066 | t | 57638 | -715827883 | 715827881 table1_group_default | 1300066 | t | 57637 | -715827883 | 715827881 table1_group_default | 1300067 | t | 57637 | 715827882 | 2147483647 table1_group_default | 1300067 | t | 57638 | 715827882 | 2147483647 table1_groupf | 1300068 | t | 57637 | | table1_groupf | 1300068 | t | 57638 | | table2_groupf | 1300069 | t | 57637 | | table2_groupf | 1300069 | t | 57638 | | (84 rows) -- reset colocation ids to test mark_tables_colocated ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1; DELETE FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000; UPDATE pg_dist_partition SET colocationid = 0 WHERE colocationid >= 1 AND colocationid < 1000; -- check metadata SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ (0 rows) SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; logicalrelid | colocationid --------------+-------------- (0 rows) -- first check failing cases SELECT mark_tables_colocated('table1_groupB', ARRAY['table1_groupC']); ERROR: cannot colocate tables table1_groupb and table1_groupc DETAIL: Distribution column types don't match for table1_groupb and table1_groupc. SELECT mark_tables_colocated('table1_groupB', ARRAY['table1_groupD']); ERROR: cannot colocate tables table1_groupb and table1_groupd DETAIL: Shard counts don't match for table1_groupb and table1_groupd. SELECT mark_tables_colocated('table1_groupB', ARRAY['table1_groupE']); ERROR: cannot colocate tables table1_groupb and table1_groupe DETAIL: Shard 1300026 of table1_groupb and shard 1300046 of table1_groupe have different number of shard placements. SELECT mark_tables_colocated('table1_groupB', ARRAY['table1_groupF']); ERROR: cannot colocate tables table1_groupb and table1_groupf DETAIL: Replication models don't match for table1_groupb and table1_groupf. SELECT mark_tables_colocated('table1_groupB', ARRAY['table2_groupB', 'table1_groupD']); ERROR: cannot colocate tables table1_groupb and table1_groupd DETAIL: Shard counts don't match for table1_groupb and table1_groupd. -- check metadata to see failing calls didn't have any side effects SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ (0 rows) SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; logicalrelid | colocationid --------------+-------------- (0 rows) -- check successfully cololated tables SELECT mark_tables_colocated('table1_groupB', ARRAY['table2_groupB']); mark_tables_colocated ----------------------- (1 row) SELECT mark_tables_colocated('table1_groupC', ARRAY['table2_groupC']); mark_tables_colocated ----------------------- (1 row) SELECT mark_tables_colocated('table1_groupD', ARRAY['table2_groupD']); mark_tables_colocated ----------------------- (1 row) SELECT mark_tables_colocated('table1_groupE', ARRAY['table2_groupE', 'table3_groupE']); mark_tables_colocated ----------------------- (1 row) SELECT mark_tables_colocated('table1_groupF', ARRAY['table2_groupF']); mark_tables_colocated ----------------------- (1 row) -- check to colocate with itself SELECT mark_tables_colocated('table1_groupB', ARRAY['table1_groupB']); mark_tables_colocated ----------------------- (1 row) SET citus.shard_count = 2; CREATE TABLE table1_group_none ( id int ); SELECT create_distributed_table('table1_group_none', 'id', colocate_with => 'NONE'); create_distributed_table -------------------------- (1 row) CREATE TABLE table2_group_none ( id int ); SELECT create_distributed_table('table2_group_none', 'id', colocate_with => 'NONE'); create_distributed_table -------------------------- (1 row) -- check metadata to see colocation groups are created successfully SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 2 | 2 | 1 | 23 3 | 2 | 2 | 25 4 | 4 | 2 | 23 5 | 2 | 2 | 23 (4 rows) SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; logicalrelid | colocationid -------------------+-------------- table1_groupb | 2 table2_groupb | 2 table1_groupc | 3 table2_groupc | 3 table1_groupd | 4 table2_groupd | 4 table1_groupe | 5 table2_groupe | 5 table3_groupe | 5 table1_group_none | 6 table2_group_none | 7 (11 rows) -- move the all tables in colocation group 5 to colocation group 7 SELECT mark_tables_colocated('table1_group_none', ARRAY['table1_groupE', 'table2_groupE', 'table3_groupE']); mark_tables_colocated ----------------------- (1 row) -- move a table with a colocation id which is already not in pg_dist_colocation SELECT mark_tables_colocated('table1_group_none', ARRAY['table2_group_none']); mark_tables_colocated ----------------------- (1 row) -- check metadata to see that unused colocation group is deleted SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 2 | 2 | 1 | 23 3 | 2 | 2 | 25 4 | 4 | 2 | 23 (3 rows) SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; logicalrelid | colocationid -------------------+-------------- table1_groupb | 2 table2_groupb | 2 table1_groupc | 3 table2_groupc | 3 table1_groupd | 4 table2_groupd | 4 table1_groupe | 6 table2_groupe | 6 table3_groupe | 6 table1_group_none | 6 table2_group_none | 6 (11 rows) -- try to colocate different replication models CREATE TABLE table1_groupG ( id int ); SELECT create_distributed_table('table1_groupG', 'id'); create_distributed_table -------------------------- (1 row) -- update replication model UPDATE pg_dist_partition SET repmodel = 's' WHERE logicalrelid = 'table1_groupG'::regclass; CREATE TABLE table2_groupG ( id int ); SELECT create_distributed_table('table2_groupG', 'id', colocate_with => 'table1_groupG'); ERROR: cannot colocate tables table1_groupg and table2_groupg DETAIL: Replication models don't match for table1_groupg and table2_groupg. CREATE TABLE table2_groupG ( id int ); ERROR: relation "table2_groupg" already exists SELECT create_distributed_table('table2_groupG', 'id', colocate_with => 'NONE'); create_distributed_table -------------------------- (1 row) SELECT mark_tables_colocated('table1_groupG', ARRAY['table2_groupG']); ERROR: cannot colocate tables table1_groupg and table2_groupg DETAIL: Replication models don't match for table1_groupg and table2_groupg. -- drop tables to clean test space DROP TABLE table1_groupb; DROP TABLE table2_groupb; DROP TABLE table1_groupc; DROP TABLE table2_groupc; DROP TABLE table1_groupd; DROP TABLE table2_groupd; DROP TABLE table1_groupf; DROP TABLE table2_groupf; DROP TABLE table1_groupe; DROP TABLE table2_groupe; DROP TABLE table3_groupe; DROP TABLE table4_groupe; DROP TABLE schema_collocation.table4_groupe; DROP TABLE table1_group_none_1; DROP TABLE table2_group_none_1; DROP TABLE table1_group_none_2; DROP TABLE table1_group_none_3; DROP TABLE table1_group_none; DROP TABLE table2_group_none; DROP TABLE table1_group_default; citus-7.0.3/src/test/regress/expected/multi_complex_expressions.out000066400000000000000000000334511317107136600257600ustar00rootroot00000000000000-- -- MULTI_COMPLEX_EXPRESSIONS -- -- Check that we can correctly handle complex expressions and aggregates. SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; ?column? ------------------------ 12000.0000000000000000 (1 row) SELECT sum(l_quantity) / (10 * avg(l_quantity)) FROM lineitem; ?column? ----------------------- 1200.0000000000000000 (1 row) SELECT (sum(l_quantity) / (10 * avg(l_quantity))) + 11 FROM lineitem; ?column? ----------------------- 1211.0000000000000000 (1 row) SELECT avg(l_quantity) as average FROM lineitem; average --------------------- 25.4462500000000000 (1 row) SELECT 100 * avg(l_quantity) as average_times_hundred FROM lineitem; average_times_hundred ----------------------- 2544.6250000000000000 (1 row) SELECT 100 * avg(l_quantity) / 10 as average_times_ten FROM lineitem; average_times_ten ---------------------- 254.4625000000000000 (1 row) SELECT l_quantity, 10 * count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; l_quantity | count_quantity ------------+---------------- 44.00 | 2150 38.00 | 2160 45.00 | 2180 13.00 | 2190 47.00 | 2200 29.00 | 2220 36.00 | 2230 49.00 | 2230 3.00 | 2270 35.00 | 2280 18.00 | 2290 31.00 | 2290 43.00 | 2290 14.00 | 2300 16.00 | 2300 17.00 | 2300 26.00 | 2300 7.00 | 2320 10.00 | 2340 34.00 | 2340 15.00 | 2350 25.00 | 2360 33.00 | 2360 42.00 | 2360 2.00 | 2370 12.00 | 2410 37.00 | 2410 6.00 | 2420 22.00 | 2420 1.00 | 2430 19.00 | 2430 4.00 | 2440 20.00 | 2460 48.00 | 2460 41.00 | 2470 24.00 | 2490 27.00 | 2490 8.00 | 2500 11.00 | 2500 5.00 | 2540 21.00 | 2550 32.00 | 2550 9.00 | 2580 39.00 | 2600 46.00 | 2600 50.00 | 2600 23.00 | 2610 30.00 | 2640 40.00 | 2690 28.00 | 2730 (50 rows) -- Check that we can handle complex select clause expressions. SELECT count(*) FROM lineitem WHERE octet_length(l_comment || l_comment) > 40; count ------- 8148 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(concat(l_comment, l_comment)) > 40; count ------- 8148 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(l_comment) + octet_length('randomtext'::text) > 40; count ------- 4611 (1 row) SELECT count(*) FROM lineitem WHERE octet_length(l_comment) + 10 > 40; count ------- 4611 (1 row) SELECT count(*) FROM lineitem WHERE (l_receiptdate::timestamp - l_shipdate::timestamp) > interval '5 days'; count ------- 10008 (1 row) -- can push down queries where no columns present on the WHERE clause SELECT count(*) FROM lineitem WHERE random() = -0.1; count ------- 0 (1 row) -- boolean tests can be pushed down SELECT count(*) FROM lineitem WHERE (l_partkey > 10000) is true; count ------- 11423 (1 row) -- scalar array operator expressions can be pushed down SELECT count(*) FROM lineitem WHERE l_partkey = ANY(ARRAY[19353, 19354, 19355]); count ------- 1 (1 row) -- some more scalar array operator expressions SELECT count(*) FROM lineitem WHERE l_partkey = ALL(ARRAY[19353]); count ------- 1 (1 row) -- operator expressions involving arrays SELECT count(*) FROM lineitem WHERE ARRAY[19353, 19354, 19355] @> ARRAY[l_partkey]; count ------- 1 (1 row) -- coerced via io expressions can be pushed down SELECT count(*) FROM lineitem WHERE (l_quantity/100)::int::bool::text::bool; count ------- 260 (1 row) -- case expressions can be pushed down SELECT count(*) FROM lineitem WHERE (CASE WHEN l_orderkey > 4000 THEN l_partkey / 100 > 1 ELSE false END); count ------- 7948 (1 row) -- coalesce expressions can be pushed down SELECT count(*) FROM lineitem WHERE COALESCE((l_partkey/50000)::bool, false); count ------- 9122 (1 row) -- nullif expressions can be pushed down SELECT count(*) FROM lineitem WHERE NULLIF((l_partkey/50000)::bool, false); count ------- 9122 (1 row) -- null test expressions can be pushed down SELECT count(*) FROM orders WHERE o_comment IS NOT null; count ------- 2984 (1 row) -- functions can be pushed down SELECT count(*) FROM lineitem WHERE isfinite(l_shipdate); count ------- 12000 (1 row) -- constant expressions can be pushed down SELECT count(*) FROM lineitem WHERE 0 != 0; count ------- 0 (1 row) -- distinct expressions can be pushed down SELECT count(*) FROM lineitem WHERE l_partkey IS DISTINCT FROM 50040; count ------- 11999 (1 row) -- row compare expression can be pushed down SELECT count(*) FROM lineitem WHERE row(l_partkey, 2, 3) > row(2000, 2, 3); count ------- 11882 (1 row) -- combination of different expressions can be pushed down SELECT count(*) FROM lineitem WHERE (l_quantity/100)::int::bool::text::bool AND CASE WHEN l_orderkey > 4000 THEN l_partkey / 100 > 1 ELSE false END AND COALESCE((l_partkey/50000)::bool, false) AND NULLIF((l_partkey/50000)::bool, false) AND isfinite(l_shipdate) AND l_partkey IS DISTINCT FROM 50040 AND row(l_partkey, 2, 3) > row(2000, 2, 3); count ------- 137 (1 row) -- constant expression in the WHERE clause with a column in the target list SELECT l_linenumber FROM lineitem WHERE 1!=0 ORDER BY l_linenumber LIMIT 1; l_linenumber -------------- 1 (1 row) -- constant expression in the WHERE clause with expressions and a column the target list SELECT count(*) * l_discount as total_discount, count(*), sum(l_tax), l_discount FROM lineitem WHERE 1!=0 GROUP BY l_discount ORDER BY total_discount DESC, sum(l_tax) DESC; total_discount | count | sum | l_discount ----------------+-------+-------+------------ 104.80 | 1048 | 41.08 | 0.10 98.55 | 1095 | 44.15 | 0.09 90.64 | 1133 | 45.94 | 0.08 71.05 | 1015 | 41.19 | 0.07 69.42 | 1157 | 45.75 | 0.06 53.60 | 1072 | 42.82 | 0.05 43.64 | 1091 | 44.40 | 0.04 32.55 | 1085 | 43.30 | 0.03 22.22 | 1111 | 45.07 | 0.02 11.22 | 1122 | 44.54 | 0.01 0.00 | 1071 | 44.00 | 0.00 (11 rows) -- distinct expressions in the WHERE clause with a column in the target list SELECT l_linenumber FROM lineitem WHERE l_linenumber IS DISTINCT FROM 1 AND l_orderkey IS DISTINCT FROM 8997 ORDER BY l_linenumber LIMIT 1; l_linenumber -------------- 2 (1 row) -- distinct expressions in the WHERE clause with expressions and a column the target list SELECT max(l_linenumber), min(l_discount), l_receiptdate FROM lineitem WHERE l_linenumber IS DISTINCT FROM 1 AND l_orderkey IS DISTINCT FROM 8997 GROUP BY l_receiptdate ORDER BY l_receiptdate LIMIT 1; max | min | l_receiptdate -----+------+--------------- 3 | 0.07 | 01-09-1992 (1 row) -- Check that we can handle implicit and explicit join clause definitions. SELECT count(*) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5; count ------- 951 (1 row) SELECT count(*) FROM lineitem JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5; count ------- 951 (1 row) SELECT count(*) FROM lineitem JOIN orders ON l_orderkey = o_orderkey WHERE l_quantity < 5; count ------- 951 (1 row) -- Check that we make sure local joins are between columns only. SELECT count(*) FROM lineitem, orders WHERE l_orderkey + 1 = o_orderkey; ERROR: cannot perform local joins that involve expressions DETAIL: local joins can be performed between columns only -- Check that we can issue limit/offset queries -- OFFSET in subqueries are not supported -- Error in the planner when single repartition subquery SELECT * FROM (SELECT o_custkey FROM orders GROUP BY o_custkey ORDER BY o_custkey OFFSET 20) sq; ERROR: cannot perform distributed planning on this query DETAIL: Subqueries with offset are not supported yet -- Error in the optimizer when subquery pushdown is on SELECT * FROM (SELECT o_orderkey FROM orders ORDER BY o_orderkey OFFSET 20) sq; ERROR: cannot perform distributed planning on this query DETAIL: Subqueries with offset are not supported yet -- Simple LIMIT/OFFSET with ORDER BY SELECT o_orderkey FROM orders ORDER BY o_orderkey LIMIT 10 OFFSET 20; o_orderkey ------------ 69 70 71 96 97 98 99 100 101 102 (10 rows) -- LIMIT/OFFSET with a subquery SET citus.task_executor_type TO 'task-tracker'; SELECT customer_keys.o_custkey, SUM(order_count) AS total_order_count FROM (SELECT o_custkey, o_orderstatus, COUNT(*) AS order_count FROM orders GROUP BY o_custkey, o_orderstatus ) customer_keys GROUP BY customer_keys.o_custkey ORDER BY customer_keys.o_custkey DESC LIMIT 10 OFFSET 20; o_custkey | total_order_count -----------+------------------- 1466 | 1 1465 | 2 1463 | 4 1462 | 10 1460 | 1 1459 | 6 1457 | 1 1456 | 3 1454 | 2 1453 | 5 (10 rows) SET citus.task_executor_type TO 'real-time'; SET client_min_messages TO DEBUG1; -- Ensure that we push down LIMIT and OFFSET properly -- No Group-By -> Push Down CREATE TEMP TABLE temp_limit_test_1 AS SELECT o_custkey FROM orders LIMIT 10 OFFSET 15; DEBUG: push down of limit count: 25 -- GROUP BY without ORDER BY -> No push-down CREATE TEMP TABLE temp_limit_test_2 AS SELECT o_custkey FROM orders GROUP BY o_custkey LIMIT 10 OFFSET 15; -- GROUP BY and ORDER BY non-aggregate -> push-down CREATE TEMP TABLE temp_limit_test_3 AS SELECT o_custkey FROM orders GROUP BY o_custkey ORDER BY o_custkey LIMIT 10 OFFSET 15; DEBUG: push down of limit count: 25 -- GROUP BY and ORDER BY aggregate -> No push-down CREATE TEMP TABLE temp_limit_test_4 AS SELECT o_custkey, COUNT(*) AS ccnt FROM orders GROUP BY o_custkey ORDER BY ccnt DESC LIMIT 10 OFFSET 15; -- OFFSET without LIMIT SELECT o_custkey FROM orders ORDER BY o_custkey OFFSET 2980; o_custkey ----------- 1498 1499 1499 1499 (4 rows) -- LIMIT/OFFSET with Joins SELECT li.l_partkey, o.o_custkey, li.l_quantity FROM lineitem li JOIN orders o ON li.l_orderkey = o.o_orderkey WHERE li.l_quantity > 25 ORDER BY 1, 2, 3 LIMIT 10 OFFSET 20; DEBUG: push down of limit count: 30 l_partkey | o_custkey | l_quantity -----------+-----------+------------ 655 | 58 | 50.00 669 | 319 | 34.00 699 | 1255 | 50.00 716 | 61 | 45.00 723 | 14 | 36.00 802 | 754 | 50.00 831 | 589 | 32.00 835 | 67 | 33.00 864 | 439 | 32.00 875 | 13 | 43.00 (10 rows) RESET client_min_messages; -- FILTERs SELECT l_orderkey, sum(l_extendedprice), sum(l_extendedprice) FILTER (WHERE l_shipmode = 'AIR'), count(*), count(*) FILTER (WHERE l_shipmode = 'AIR'), max(l_extendedprice), max(l_extendedprice) FILTER (WHERE l_quantity < 30) FROM lineitem GROUP BY l_orderkey ORDER BY 2 DESC, 1 DESC LIMIT 10; l_orderkey | sum | sum | count | count | max | max ------------+-----------+-----------+-------+-------+-----------+---------- 12804 | 440012.71 | 45788.16 | 7 | 1 | 94398.00 | 45788.16 9863 | 412560.63 | 175647.63 | 7 | 3 | 85723.77 | 50769.14 2567 | 412076.77 | 59722.26 | 7 | 1 | 94894.00 | 9784.02 11142 | 410502.38 | 44965.95 | 7 | 1 | 83989.44 | 44965.95 12039 | 407048.94 | 76406.30 | 7 | 2 | 94471.02 | 19679.30 2306 | 405629.96 | 28032.60 | 7 | 1 | 92838.00 | 44384.50 5606 | 403595.91 | 36531.51 | 7 | 2 | 94890.18 | 30582.75 11296 | 399079.89 | | 6 | 0 | 102449.00 | 33122.93 11046 | 391163.26 | 31436.34 | 7 | 2 | 94506.24 | 47519.76 4421 | 387313.12 | | 7 | 0 | 67301.52 | 23783.40 (10 rows) SELECT l_orderkey, sum(l_extendedprice), sum(l_extendedprice) FILTER (WHERE l_shipmode = 'AIR'), count(*), count(*) FILTER (WHERE l_shipmode = 'AIR'), max(l_extendedprice), max(l_extendedprice) FILTER (WHERE l_quantity < 30) FROM lineitem GROUP BY l_orderkey HAVING count(*) FILTER (WHERE l_shipmode = 'AIR') > 1 ORDER BY 2 DESC, 1 DESC LIMIT 10; l_orderkey | sum | sum | count | count | max | max ------------+-----------+-----------+-------+-------+----------+---------- 9863 | 412560.63 | 175647.63 | 7 | 3 | 85723.77 | 50769.14 12039 | 407048.94 | 76406.30 | 7 | 2 | 94471.02 | 19679.30 5606 | 403595.91 | 36531.51 | 7 | 2 | 94890.18 | 30582.75 11046 | 391163.26 | 31436.34 | 7 | 2 | 94506.24 | 47519.76 14499 | 384140.30 | 67867.08 | 7 | 2 | 84335.36 | 46169.75 11623 | 380598.48 | 133709.82 | 7 | 2 | 93701.54 | 21487.65 10787 | 375688.09 | 99424.78 | 7 | 2 | 76732.67 | 50946.91 12902 | 358191.24 | 76891.00 | 7 | 2 | 82008.08 | 35602.08 3747 | 353701.23 | 68592.23 | 7 | 2 | 67181.10 | 46252.77 5158 | 349889.05 | 159753.19 | 7 | 3 | 78714.67 | 29729.20 (10 rows) citus-7.0.3/src/test/regress/expected/multi_count_type_conversion.out000066400000000000000000000052451317107136600263050ustar00rootroot00000000000000-- -- MULTI_COUNT_TYPE_CONVERSION -- -- Verify that we can sort count(*) results correctly. We perform this check as -- our count() operations execute in two steps: worker nodes report their -- count() results, and the master node sums these counts up. During this sum(), -- the data type changes from int8 to numeric. When we sort the numeric value, -- we get erroneous results on 64-bit architectures. To fix this issue, we -- manually cast back the sum() result to an int8 data type. SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity DESC; count_quantity | l_quantity ----------------+------------ 219 | 13.00 222 | 29.00 227 | 3.00 229 | 31.00 229 | 18.00 230 | 26.00 230 | 17.00 230 | 16.00 230 | 14.00 232 | 7.00 234 | 10.00 235 | 15.00 236 | 25.00 237 | 2.00 241 | 12.00 242 | 22.00 242 | 6.00 243 | 19.00 243 | 1.00 244 | 4.00 246 | 20.00 249 | 27.00 249 | 24.00 250 | 11.00 250 | 8.00 254 | 5.00 255 | 21.00 258 | 9.00 261 | 23.00 264 | 30.00 273 | 28.00 (31 rows) SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity DESC, l_quantity ASC; count_quantity | l_quantity ----------------+------------ 273 | 28.00 264 | 30.00 261 | 23.00 258 | 9.00 255 | 21.00 254 | 5.00 250 | 8.00 250 | 11.00 249 | 24.00 249 | 27.00 246 | 20.00 244 | 4.00 243 | 1.00 243 | 19.00 242 | 6.00 242 | 22.00 241 | 12.00 237 | 2.00 236 | 25.00 235 | 15.00 234 | 10.00 232 | 7.00 230 | 14.00 230 | 16.00 230 | 17.00 230 | 26.00 229 | 18.00 229 | 31.00 227 | 3.00 222 | 29.00 219 | 13.00 (31 rows) citus-7.0.3/src/test/regress/expected/multi_create_fdw.out000066400000000000000000000007611317107136600237500ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 390000; -- =================================================================== -- get ready for the foreign data wrapper tests -- =================================================================== -- create fake fdw for use in tests CREATE FUNCTION fake_fdw_handler() RETURNS fdw_handler AS 'citus' LANGUAGE C STRICT; CREATE FOREIGN DATA WRAPPER fake_fdw HANDLER fake_fdw_handler; CREATE SERVER fake_fdw_server FOREIGN DATA WRAPPER fake_fdw; citus-7.0.3/src/test/regress/expected/multi_create_insert_proxy.out000066400000000000000000000052621317107136600257360ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 380000; -- =================================================================== -- test INSERT proxy creation functionality -- =================================================================== -- use transaction to permit multiple calls to proxy function in one session BEGIN; -- use "unorthodox" object names to test quoting CREATE SCHEMA "A$AP Mob" CREATE TABLE "Dr. Bronner's ""Magic"" Soaps" ( id bigint PRIMARY KEY, data text NOT NULL DEFAULT 'lorem ipsum' ); \set insert_target '"A$AP Mob"."Dr. Bronner''s ""Magic"" Soaps"' -- create proxy and save proxy table name SELECT create_insert_proxy_for_table(:'insert_target') AS proxy_tablename \gset -- insert to proxy, relying on default value INSERT INTO pg_temp.:"proxy_tablename" (id) VALUES (1); -- copy some rows into the proxy COPY pg_temp.:"proxy_tablename" FROM stdin; -- verify rows were copied to target SELECT * FROM :insert_target ORDER BY id ASC; id | data ----+----------------------------- 1 | lorem ipsum 2 | dolor sit amet 3 | consectetur adipiscing elit 4 | sed do eiusmod 5 | tempor incididunt ut 6 | labore et dolore (6 rows) -- and not to proxy SELECT count(*) FROM pg_temp.:"proxy_tablename"; count ------- 0 (1 row) ROLLBACK; -- test behavior with distributed table, (so no transaction) CREATE TABLE insert_target ( id bigint PRIMARY KEY, data text NOT NULL DEFAULT 'lorem ipsum' ); -- squelch WARNINGs that contain worker_port SET client_min_messages TO ERROR; SELECT master_create_distributed_table('insert_target', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('insert_target', 2, 1); master_create_worker_shards ----------------------------- (1 row) CREATE TEMPORARY SEQUENCE rows_inserted; SELECT create_insert_proxy_for_table('insert_target', 'rows_inserted') AS proxy_tablename \gset -- insert to proxy, again relying on default value INSERT INTO pg_temp.:"proxy_tablename" (id) VALUES (1); -- test copy with bad row in middle \set VERBOSITY terse COPY pg_temp.:"proxy_tablename" FROM stdin; ERROR: null value in column "data" violates not-null constraint \set VERBOSITY default -- verify rows were copied to distributed table SELECT * FROM insert_target ORDER BY id ASC; id | data ----+----------------------------- 1 | lorem ipsum 2 | dolor sit amet 3 | consectetur adipiscing elit 4 | sed do eiusmod 5 | tempor incididunt ut 6 | labore et dolore (6 rows) -- the counter should match the number of rows stored SELECT currval('rows_inserted'); currval --------- 6 (1 row) SET client_min_messages TO DEFAULT; citus-7.0.3/src/test/regress/expected/multi_create_shards.out000066400000000000000000000220111317107136600244440ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 370000; -- =================================================================== -- create test functions and types needed for tests -- =================================================================== CREATE FUNCTION sort_names(cstring, cstring, cstring) RETURNS cstring AS 'citus' LANGUAGE C STRICT; -- create a custom type... CREATE TYPE dummy_type AS ( i integer ); -- ... as well as a function to use as its comparator... CREATE FUNCTION dummy_type_function(dummy_type, dummy_type) RETURNS boolean AS 'SELECT TRUE;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- ... use that function to create a custom operator... CREATE OPERATOR = ( LEFTARG = dummy_type, RIGHTARG = dummy_type, PROCEDURE = dummy_type_function ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY dummy_op_family USING hash; -- ... finally, build an operator class, designate it as the default operator -- class for the type, but only specify an equality operator. So the type will -- have a default op class but no hash operator in that class. CREATE OPERATOR CLASS dummy_op_family_class DEFAULT FOR TYPE dummy_type USING hash FAMILY dummy_op_family AS OPERATOR 1 =; -- =================================================================== -- test shard creation functionality -- =================================================================== CREATE TABLE table_to_distribute ( name text PRIMARY KEY, id bigint, json_data json, test_type_data dummy_type ); -- use the table WITH (OIDS) set ALTER TABLE table_to_distribute SET WITH OIDS; SELECT master_create_distributed_table('table_to_distribute', 'id', 'hash'); ERROR: cannot distribute relation: table_to_distribute DETAIL: Distributed relations must not specify the WITH (OIDS) option in their definitions. -- revert WITH (OIDS) from above ALTER TABLE table_to_distribute SET WITHOUT OIDS; -- use an index instead of table name SELECT master_create_distributed_table('table_to_distribute_pkey', 'id', 'hash'); ERROR: table_to_distribute_pkey is not a regular, foreign or partitioned table -- use a bad column name SELECT master_create_distributed_table('table_to_distribute', 'bad_column', 'hash'); ERROR: column "bad_column" of relation "table_to_distribute" does not exist -- use unrecognized partition type SELECT master_create_distributed_table('table_to_distribute', 'name', 'unrecognized'); ERROR: invalid input value for enum citus.distribution_type: "unrecognized" LINE 1: ..._distributed_table('table_to_distribute', 'name', 'unrecogni... ^ -- use a partition column of a type lacking any default operator class SELECT master_create_distributed_table('table_to_distribute', 'json_data', 'hash'); ERROR: data type json has no default operator class for specified partition method DETAIL: Partition column types must have a default operator class defined. -- use a partition column of type lacking the required support function (hash) SELECT master_create_distributed_table('table_to_distribute', 'test_type_data', 'hash'); ERROR: could not identify a hash function for type dummy_type DETAIL: Partition column types must have a hash function defined to use hash partitioning. -- distribute table and inspect side effects SELECT master_create_distributed_table('table_to_distribute', 'name', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT partmethod, partkey FROM pg_dist_partition WHERE logicalrelid = 'table_to_distribute'::regclass; partmethod | partkey ------------+-------------------------------------------------------------------------------------------------------------------------- h | {VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} (1 row) -- use a bad shard count SELECT master_create_worker_shards('table_to_distribute', 0, 1); ERROR: shard_count must be positive -- use a bad replication factor SELECT master_create_worker_shards('table_to_distribute', 16, 0); ERROR: replication_factor must be positive -- use a replication factor higher than shard count SELECT master_create_worker_shards('table_to_distribute', 16, 3); ERROR: replication_factor (3) exceeds number of worker nodes (2) HINT: Add more worker nodes or try again with a lower replication factor. -- finally, create shards and inspect metadata SELECT master_create_worker_shards('table_to_distribute', 16, 1); master_create_worker_shards ----------------------------- (1 row) SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE logicalrelid = 'table_to_distribute'::regclass ORDER BY (shardminvalue::integer) ASC; shardstorage | shardminvalue | shardmaxvalue --------------+---------------+--------------- t | -2147483648 | -1879048193 t | -1879048192 | -1610612737 t | -1610612736 | -1342177281 t | -1342177280 | -1073741825 t | -1073741824 | -805306369 t | -805306368 | -536870913 t | -536870912 | -268435457 t | -268435456 | -1 t | 0 | 268435455 t | 268435456 | 536870911 t | 536870912 | 805306367 t | 805306368 | 1073741823 t | 1073741824 | 1342177279 t | 1342177280 | 1610612735 t | 1610612736 | 1879048191 t | 1879048192 | 2147483647 (16 rows) -- all shards should have the same size (16 divides evenly into the hash space) SELECT count(*) AS shard_count, shardmaxvalue::integer - shardminvalue::integer AS shard_size FROM pg_dist_shard WHERE logicalrelid='table_to_distribute'::regclass GROUP BY shard_size; shard_count | shard_size -------------+------------ 16 | 268435455 (1 row) SELECT COUNT(*) FROM pg_class WHERE relname LIKE 'table_to_distribute%' AND relkind = 'r'; count ------- 1 (1 row) -- try to create them again SELECT master_create_worker_shards('table_to_distribute', 16, 1); ERROR: table "table_to_distribute" has already had shards created for it -- test list sorting SELECT sort_names('sumedh', 'jason', 'ozgun'); sort_names ------------ jason + ozgun + sumedh + (1 row) SELECT COUNT(*) FROM pg_class WHERE relname LIKE 'throwaway%' AND relkind = 'r'; count ------- 0 (1 row) -- test foreign table creation CREATE FOREIGN TABLE foreign_table_to_distribute ( name text, id bigint ) SERVER fake_fdw_server; SELECT master_create_distributed_table('foreign_table_to_distribute', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('foreign_table_to_distribute', 16, 1); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined master_create_worker_shards ----------------------------- (1 row) SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE logicalrelid = 'foreign_table_to_distribute'::regclass ORDER BY (shardminvalue::integer) ASC; shardstorage | shardminvalue | shardmaxvalue --------------+---------------+--------------- f | -2147483648 | -1879048193 f | -1879048192 | -1610612737 f | -1610612736 | -1342177281 f | -1342177280 | -1073741825 f | -1073741824 | -805306369 f | -805306368 | -536870913 f | -536870912 | -268435457 f | -268435456 | -1 f | 0 | 268435455 f | 268435456 | 536870911 f | 536870912 | 805306367 f | 805306368 | 1073741823 f | 1073741824 | 1342177279 f | 1342177280 | 1610612735 f | 1610612736 | 1879048191 f | 1879048192 | 2147483647 (16 rows) -- test shard creation using weird shard count CREATE TABLE weird_shard_count ( name text, id bigint ); SELECT master_create_distributed_table('weird_shard_count', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('weird_shard_count', 7, 1); master_create_worker_shards ----------------------------- (1 row) -- Citus ensures all shards are roughly the same size SELECT shardmaxvalue::integer - shardminvalue::integer AS shard_size FROM pg_dist_shard WHERE logicalrelid = 'weird_shard_count'::regclass ORDER BY shardminvalue::integer ASC; shard_size ------------ 613566755 613566755 613566755 613566755 613566755 613566755 613566759 (7 rows) -- cleanup foreign table, related shards and shard placements DELETE FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'foreign_table_to_distribute'::regclass); DELETE FROM pg_dist_shard WHERE logicalrelid = 'foreign_table_to_distribute'::regclass; DELETE FROM pg_dist_partition WHERE logicalrelid = 'foreign_table_to_distribute'::regclass; citus-7.0.3/src/test/regress/expected/multi_create_table.out000066400000000000000000000661761317107136600242730ustar00rootroot00000000000000-- -- MULTI_CREATE_TABLE -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 360000; -- Create new table definitions for use in testing in distributed planning and -- execution functionality. Also create indexes to boost performance. CREATE TABLE lineitem ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); SELECT master_create_distributed_table('lineitem', 'l_orderkey', 'append'); WARNING: table "lineitem" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. master_create_distributed_table --------------------------------- (1 row) CREATE INDEX lineitem_time_index ON lineitem (l_shipdate); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' CREATE TABLE orders ( o_orderkey bigint not null, o_custkey integer not null, o_orderstatus char(1) not null, o_totalprice decimal(15,2) not null, o_orderdate date not null, o_orderpriority char(15) not null, o_clerk char(15) not null, o_shippriority integer not null, o_comment varchar(79) not null, PRIMARY KEY(o_orderkey) ); SELECT master_create_distributed_table('orders', 'o_orderkey', 'append'); WARNING: table "orders" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. master_create_distributed_table --------------------------------- (1 row) CREATE TABLE customer ( c_custkey integer not null, c_name varchar(25) not null, c_address varchar(40) not null, c_nationkey integer not null, c_phone char(15) not null, c_acctbal decimal(15,2) not null, c_mktsegment char(10) not null, c_comment varchar(117) not null); SELECT master_create_distributed_table('customer', 'c_custkey', 'append'); master_create_distributed_table --------------------------------- (1 row) CREATE TABLE nation ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); SELECT create_reference_table('nation'); create_reference_table ------------------------ (1 row) CREATE TABLE part ( p_partkey integer not null, p_name varchar(55) not null, p_mfgr char(25) not null, p_brand char(10) not null, p_type varchar(25) not null, p_size integer not null, p_container char(10) not null, p_retailprice decimal(15,2) not null, p_comment varchar(23) not null); SELECT master_create_distributed_table('part', 'p_partkey', 'append'); master_create_distributed_table --------------------------------- (1 row) CREATE TABLE supplier ( s_suppkey integer not null, s_name char(25) not null, s_address varchar(40) not null, s_nationkey integer, s_phone char(15) not null, s_acctbal decimal(15,2) not null, s_comment varchar(101) not null ); SELECT create_reference_table('supplier'); create_reference_table ------------------------ (1 row) -- create a single shard supplier table which is not -- a reference table CREATE TABLE supplier_single_shard ( s_suppkey integer not null, s_name char(25) not null, s_address varchar(40) not null, s_nationkey integer, s_phone char(15) not null, s_acctbal decimal(15,2) not null, s_comment varchar(101) not null ); SELECT master_create_distributed_table('supplier_single_shard', 's_suppkey', 'append'); master_create_distributed_table --------------------------------- (1 row) CREATE TABLE mx_table_test (col1 int, col2 text); -- Since we're superuser, we can set the replication model to 'streaming' to -- create a one-off MX table... but if we forget to set the replication factor to one, -- we should see an error reminding us to fix that SET citus.replication_model TO 'streaming'; SELECT create_distributed_table('mx_table_test', 'col1'); ERROR: replication factors above one are incompatible with the streaming replication model HINT: Try again after reducing "citus.shard_replication_factor" to one or setting "citus.replication_model" to "statement". -- ok, so now actually create the one-off MX table SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('mx_table_test', 'col1'); create_distributed_table -------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_table_test'::regclass; repmodel ---------- s (1 row) DROP TABLE mx_table_test; -- Show that master_create_distributed_table ignores citus.replication_model GUC CREATE TABLE s_table(a int); SELECT master_create_distributed_table('s_table', 'a', 'hash'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. master_create_distributed_table --------------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='s_table'::regclass; repmodel ---------- c (1 row) -- Show that master_create_worker_shards complains when RF>1 and replication model is streaming UPDATE pg_dist_partition SET repmodel = 's' WHERE logicalrelid='s_table'::regclass; SELECT master_create_worker_shards('s_table', 4, 2); ERROR: using replication factor 2 with the streaming replication model is not supported DETAIL: The table s_table is marked as streaming replicated and the shard replication factor of streaming replicated tables must be 1. HINT: Use replication factor 1. DROP TABLE s_table; RESET citus.replication_model; -- Show that create_distributed_table with append and range distributions ignore -- citus.replication_model GUC SET citus.shard_replication_factor TO 2; SET citus.replication_model TO streaming; CREATE TABLE repmodel_test (a int); SELECT create_distributed_table('repmodel_test', 'a', 'append'); NOTICE: using statement-based replication DETAIL: Streaming replication is supported only for hash-distributed tables. create_distributed_table -------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; repmodel ---------- c (1 row) DROP TABLE repmodel_test; CREATE TABLE repmodel_test (a int); SELECT create_distributed_table('repmodel_test', 'a', 'range'); NOTICE: using statement-based replication DETAIL: Streaming replication is supported only for hash-distributed tables. create_distributed_table -------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; repmodel ---------- c (1 row) DROP TABLE repmodel_test; -- Show that master_create_distributed_table created statement replicated tables no matter -- what citus.replication_model set to CREATE TABLE repmodel_test (a int); SELECT master_create_distributed_table('repmodel_test', 'a', 'hash'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. master_create_distributed_table --------------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; repmodel ---------- c (1 row) DROP TABLE repmodel_test; CREATE TABLE repmodel_test (a int); SELECT master_create_distributed_table('repmodel_test', 'a', 'append'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. master_create_distributed_table --------------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; repmodel ---------- c (1 row) DROP TABLE repmodel_test; CREATE TABLE repmodel_test (a int); SELECT master_create_distributed_table('repmodel_test', 'a', 'range'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. master_create_distributed_table --------------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; repmodel ---------- c (1 row) DROP TABLE repmodel_test; -- Check that the replication_model overwrite behavior is the same with RF=1 SET citus.shard_replication_factor TO 1; CREATE TABLE repmodel_test (a int); SELECT create_distributed_table('repmodel_test', 'a', 'append'); NOTICE: using statement-based replication DETAIL: Streaming replication is supported only for hash-distributed tables. create_distributed_table -------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; repmodel ---------- c (1 row) DROP TABLE repmodel_test; CREATE TABLE repmodel_test (a int); SELECT create_distributed_table('repmodel_test', 'a', 'range'); NOTICE: using statement-based replication DETAIL: Streaming replication is supported only for hash-distributed tables. create_distributed_table -------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; repmodel ---------- c (1 row) DROP TABLE repmodel_test; CREATE TABLE repmodel_test (a int); SELECT master_create_distributed_table('repmodel_test', 'a', 'hash'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. master_create_distributed_table --------------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; repmodel ---------- c (1 row) DROP TABLE repmodel_test; CREATE TABLE repmodel_test (a int); SELECT master_create_distributed_table('repmodel_test', 'a', 'append'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. master_create_distributed_table --------------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; repmodel ---------- c (1 row) DROP TABLE repmodel_test; CREATE TABLE repmodel_test (a int); SELECT master_create_distributed_table('repmodel_test', 'a', 'range'); NOTICE: using statement-based replication DETAIL: The current replication_model setting is 'streaming', which is not supported by master_create_distributed_table. HINT: Use create_distributed_table to use the streaming replication model. master_create_distributed_table --------------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; repmodel ---------- c (1 row) DROP TABLE repmodel_test; RESET citus.replication_model; -- Test initial data loading CREATE TABLE data_load_test (col1 int, col2 text, col3 serial); INSERT INTO data_load_test VALUES (132, 'hello'); INSERT INTO data_load_test VALUES (243, 'world'); -- table must be empty when using append- or range-partitioning SELECT create_distributed_table('data_load_test', 'col1', 'append'); ERROR: cannot distribute relation "data_load_test" DETAIL: Relation "data_load_test" contains data. HINT: Empty your table before distributing it. SELECT create_distributed_table('data_load_test', 'col1', 'range'); ERROR: cannot distribute relation "data_load_test" DETAIL: Relation "data_load_test" contains data. HINT: Empty your table before distributing it. -- table must be empty when using master_create_distributed_table (no shards created) SELECT master_create_distributed_table('data_load_test', 'col1', 'hash'); ERROR: cannot distribute relation "data_load_test" DETAIL: Relation "data_load_test" contains data. HINT: Empty your table before distributing it. -- create_distributed_table creates shards and copies data into the distributed table SELECT create_distributed_table('data_load_test', 'col1'); NOTICE: Copying data from local table... create_distributed_table -------------------------- (1 row) SELECT * FROM data_load_test ORDER BY col1; col1 | col2 | col3 ------+-------+------ 132 | hello | 1 243 | world | 2 (2 rows) DROP TABLE data_load_test; -- test queries on distributed tables with no shards CREATE TABLE no_shard_test (col1 int, col2 text); SELECT create_distributed_table('no_shard_test', 'col1', 'append'); create_distributed_table -------------------------- (1 row) SELECT * FROM no_shard_test WHERE col1 > 1; col1 | col2 ------+------ (0 rows) DROP TABLE no_shard_test; CREATE TABLE no_shard_test (col1 int, col2 text); SELECT create_distributed_table('no_shard_test', 'col1', 'range'); create_distributed_table -------------------------- (1 row) SELECT * FROM no_shard_test WHERE col1 > 1; col1 | col2 ------+------ (0 rows) DROP TABLE no_shard_test; CREATE TABLE no_shard_test (col1 int, col2 text); SELECT master_create_distributed_table('no_shard_test', 'col1', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT * FROM no_shard_test WHERE col1 > 1; col1 | col2 ------+------ (0 rows) DROP TABLE no_shard_test; -- ensure writes in the same transaction as create_distributed_table are visible BEGIN; CREATE TABLE data_load_test (col1 int, col2 text, col3 serial); INSERT INTO data_load_test VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test', 'col1'); NOTICE: Copying data from local table... create_distributed_table -------------------------- (1 row) INSERT INTO data_load_test VALUES (243, 'world'); END; SELECT * FROM data_load_test ORDER BY col1; col1 | col2 | col3 ------+-------+------ 132 | hello | 1 243 | world | 2 (2 rows) DROP TABLE data_load_test; -- creating co-located distributed tables in the same transaction works BEGIN; CREATE TABLE data_load_test1 (col1 int, col2 text, col3 serial); INSERT INTO data_load_test1 VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test1', 'col1'); NOTICE: Copying data from local table... create_distributed_table -------------------------- (1 row) CREATE TABLE data_load_test2 (col1 int, col2 text, col3 serial); INSERT INTO data_load_test2 VALUES (132, 'world'); SELECT create_distributed_table('data_load_test2', 'col1'); NOTICE: Copying data from local table... create_distributed_table -------------------------- (1 row) SELECT a.col2 ||' '|| b.col2 FROM data_load_test1 a JOIN data_load_test2 b USING (col1) WHERE col1 = 132; ?column? ------------- hello world (1 row) DROP TABLE data_load_test1, data_load_test2; END; -- There should be no table on the worker node \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'data_load_test%'; relname --------- (0 rows) \c - - - :master_port -- creating an index after loading data works BEGIN; CREATE TABLE data_load_test (col1 int, col2 text, col3 serial); INSERT INTO data_load_test VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test', 'col1'); NOTICE: Copying data from local table... create_distributed_table -------------------------- (1 row) CREATE INDEX data_load_test_idx ON data_load_test (col2); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' DROP TABLE data_load_test; END; -- popping in and out of existence in the same transaction works BEGIN; CREATE TABLE data_load_test (col1 int, col2 text, col3 serial); INSERT INTO data_load_test VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test', 'col1'); NOTICE: Copying data from local table... create_distributed_table -------------------------- (1 row) DROP TABLE data_load_test; END; -- but dropping after a write on the distributed table is currently disallowed BEGIN; CREATE TABLE data_load_test (col1 int, col2 text, col3 serial); INSERT INTO data_load_test VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test', 'col1'); NOTICE: Copying data from local table... create_distributed_table -------------------------- (1 row) INSERT INTO data_load_test VALUES (243, 'world'); DROP TABLE data_load_test; END; -- Test data loading after dropping a column CREATE TABLE data_load_test (col1 int, col2 text, col3 text, "CoL4"")" int); INSERT INTO data_load_test VALUES (132, 'hello', 'world'); INSERT INTO data_load_test VALUES (243, 'world', 'hello'); ALTER TABLE data_load_test DROP COLUMN col1; SELECT create_distributed_table('data_load_test', 'col3'); NOTICE: Copying data from local table... create_distributed_table -------------------------- (1 row) SELECT * FROM data_load_test ORDER BY col2; col2 | col3 | CoL4") -------+-------+-------- hello | world | world | hello | (2 rows) -- make sure the tuple went to the right shard SELECT * FROM data_load_test WHERE col3 = 'world'; col2 | col3 | CoL4") -------+-------+-------- hello | world | (1 row) DROP TABLE data_load_test; SET citus.shard_replication_factor TO default; SET citus.shard_count to 4; CREATE TABLE lineitem_hash_part (like lineitem); SELECT create_distributed_table('lineitem_hash_part', 'l_orderkey'); create_distributed_table -------------------------- (1 row) CREATE TABLE orders_hash_part (like orders); SELECT create_distributed_table('orders_hash_part', 'o_orderkey'); create_distributed_table -------------------------- (1 row) CREATE UNLOGGED TABLE unlogged_table ( key text, value text ); SELECT create_distributed_table('unlogged_table', 'key'); create_distributed_table -------------------------- (1 row) SELECT * FROM master_get_table_ddl_events('unlogged_table'); master_get_table_ddl_events -------------------------------------------------------------------- CREATE UNLOGGED TABLE public.unlogged_table (key text, value text) (1 row) \c - - - :worker_1_port SELECT relpersistence FROM pg_class WHERE relname LIKE 'unlogged_table_%'; relpersistence ---------------- u u u u (4 rows) \c - - - :master_port -- Test rollback of create table BEGIN; CREATE TABLE rollback_table(id int, name varchar(20)); SELECT create_distributed_table('rollback_table','id'); create_distributed_table -------------------------- (1 row) ROLLBACK; -- Table should not exist on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%'); Column | Type | Modifiers --------+------+----------- (0 rows) \c - - - :master_port -- Insert 3 rows to make sure that copy after shard creation touches the same -- worker node twice. BEGIN; CREATE TABLE rollback_table(id int, name varchar(20)); INSERT INTO rollback_table VALUES(1, 'Name_1'); INSERT INTO rollback_table VALUES(2, 'Name_2'); INSERT INTO rollback_table VALUES(3, 'Name_3'); SELECT create_distributed_table('rollback_table','id'); NOTICE: Copying data from local table... create_distributed_table -------------------------- (1 row) ROLLBACK; -- Table should not exist on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%'); Column | Type | Modifiers --------+------+----------- (0 rows) \c - - - :master_port BEGIN; CREATE TABLE rollback_table(id int, name varchar(20)); SELECT create_distributed_table('rollback_table','id'); create_distributed_table -------------------------- (1 row) \copy rollback_table from stdin delimiter ',' CREATE INDEX rollback_index ON rollback_table(id); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' COMMIT; -- Check the table is created SELECT count(*) FROM rollback_table; count ------- 3 (1 row) DROP TABLE rollback_table; BEGIN; CREATE TABLE rollback_table(id int, name varchar(20)); SELECT create_distributed_table('rollback_table','id'); create_distributed_table -------------------------- (1 row) \copy rollback_table from stdin delimiter ',' ROLLBACK; -- Table should not exist on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%'); Column | Type | Modifiers --------+------+----------- (0 rows) \c - - - :master_port BEGIN; CREATE TABLE tt1(id int); SELECT create_distributed_table('tt1','id'); create_distributed_table -------------------------- (1 row) CREATE TABLE tt2(id int); SELECT create_distributed_table('tt2','id'); create_distributed_table -------------------------- (1 row) INSERT INTO tt1 VALUES(1); INSERT INTO tt2 SELECT * FROM tt1 WHERE id = 1; COMMIT; -- Table should exist on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt1_360430'::regclass; Column | Type | Modifiers --------+---------+----------- id | integer | (1 row) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt2_360462'::regclass; Column | Type | Modifiers --------+---------+----------- id | integer | (1 row) \c - - - :master_port DROP TABLE tt1; DROP TABLE tt2; -- It is known that creating a table with master_create_empty_shard is not -- transactional, so table stay remaining on the worker node after the rollback BEGIN; CREATE TABLE append_tt1(id int); SELECT create_distributed_table('append_tt1','id','append'); create_distributed_table -------------------------- (1 row) SELECT master_create_empty_shard('append_tt1'); master_create_empty_shard --------------------------- 360494 (1 row) ROLLBACK; -- Table exists on the worker node. \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.append_tt1_360494'::regclass; Column | Type | Modifiers --------+---------+----------- id | integer | (1 row) \c - - - :master_port -- There should be no table on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid from pg_class WHERE relname LIKE 'public.tt1%'); Column | Type | Modifiers --------+------+----------- (0 rows) \c - - - :master_port -- Queries executing with router executor is allowed in the same transaction -- with create_distributed_table BEGIN; CREATE TABLE tt1(id int); INSERT INTO tt1 VALUES(1); SELECT create_distributed_table('tt1','id'); NOTICE: Copying data from local table... create_distributed_table -------------------------- (1 row) INSERT INTO tt1 VALUES(2); SELECT * FROM tt1 WHERE id = 1; id ---- 1 (1 row) COMMIT; -- Placements should be created on the worker \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt1_360495'::regclass; Column | Type | Modifiers --------+---------+----------- id | integer | (1 row) \c - - - :master_port DROP TABLE tt1; BEGIN; CREATE TABLE tt1(id int); SELECT create_distributed_table('tt1','id'); create_distributed_table -------------------------- (1 row) DROP TABLE tt1; COMMIT; -- There should be no table on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid from pg_class WHERE relname LIKE 'tt1%'); Column | Type | Modifiers --------+------+----------- (0 rows) \c - - - :master_port -- Tests with create_distributed_table & DDL & DML commands -- Test should pass since GetPlacementListConnection can provide connections -- in this order of execution CREATE TABLE sample_table(id int); SELECT create_distributed_table('sample_table','id'); create_distributed_table -------------------------- (1 row) BEGIN; CREATE TABLE stage_table (LIKE sample_table); \COPY stage_table FROM stdin; -- Note that this operation is a local copy SELECT create_distributed_table('stage_table', 'id'); NOTICE: Copying data from local table... create_distributed_table -------------------------- (1 row) INSERT INTO sample_table SELECT * FROM stage_table; DROP TABLE stage_table; SELECT * FROM sample_table WHERE id = 3; id ---- 3 (1 row) COMMIT; -- Show that rows of sample_table are updated SELECT count(*) FROM sample_table; count ------- 4 (1 row) DROP table sample_table; -- Test as create_distributed_table - copy - create_distributed_table - copy -- This combination is used by tests written by some ORMs. BEGIN; CREATE TABLE tt1(id int); SELECT create_distributed_table('tt1','id'); create_distributed_table -------------------------- (1 row) \COPY tt1 from stdin; CREATE TABLE tt2(like tt1); SELECT create_distributed_table('tt2','id'); create_distributed_table -------------------------- (1 row) \COPY tt2 from stdin; INSERT INTO tt1 SELECT * FROM tt2; SELECT * FROM tt1 WHERE id = 3; id ---- 3 (1 row) SELECT * FROM tt2 WHERE id = 6; id ---- 6 (1 row) END; SELECT count(*) FROM tt1; count ------- 6 (1 row) -- the goal of the following test is to make sure that -- both create_reference_table and create_distributed_table -- calls creates the schemas without leading to any deadlocks -- first create reference table, then hash distributed table BEGIN; CREATE SCHEMA sc; CREATE TABLE sc.ref(a int); insert into sc.ref SELECT s FROM generate_series(0, 100) s; SELECT create_reference_table('sc.ref'); NOTICE: Copying data from local table... create_reference_table ------------------------ (1 row) CREATE TABLE sc.hash(a int); insert into sc.hash SELECT s FROM generate_series(0, 100) s; SELECT create_distributed_table('sc.hash', 'a'); NOTICE: Copying data from local table... create_distributed_table -------------------------- (1 row) COMMIT; -- first create hash distributed table, then reference table BEGIN; CREATE SCHEMA sc2; CREATE TABLE sc2.hash(a int); insert into sc2.hash SELECT s FROM generate_series(0, 100) s; SELECT create_distributed_table('sc2.hash', 'a'); NOTICE: Copying data from local table... create_distributed_table -------------------------- (1 row) CREATE TABLE sc2.ref(a int); insert into sc2.ref SELECT s FROM generate_series(0, 100) s; SELECT create_reference_table('sc2.ref'); NOTICE: Copying data from local table... create_reference_table ------------------------ (1 row) COMMIT; DROP TABLE tt1; DROP TABLE tt2; DROP SCHEMA sc CASCADE; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table sc.ref drop cascades to table sc.hash DROP SCHEMA sc2 CASCADE; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table sc2.hash drop cascades to table sc2.ref citus-7.0.3/src/test/regress/expected/multi_create_table_constraints.out000066400000000000000000000442141317107136600267070ustar00rootroot00000000000000-- -- MULTI_CREATE_TABLE_CONSTRAINTS -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 365000; -- test that Citus forbids unique and EXCLUDE constraints on append-partitioned tables. CREATE TABLE uniq_cns_append_tables ( partition_col integer UNIQUE, other_col integer ); SELECT master_create_distributed_table('uniq_cns_append_tables', 'partition_col', 'append'); WARNING: table "uniq_cns_append_tables" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. master_create_distributed_table --------------------------------- (1 row) CREATE TABLE excl_cns_append_tables ( partition_col integer, other_col integer, EXCLUDE (partition_col WITH =) ); SELECT master_create_distributed_table('excl_cns_append_tables', 'partition_col', 'append'); WARNING: table "excl_cns_append_tables" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. master_create_distributed_table --------------------------------- (1 row) -- test that Citus cannot distribute unique constraints that do not include -- the partition column on hash-partitioned tables. CREATE TABLE pk_on_non_part_col ( partition_col integer, other_col integer PRIMARY KEY ); SELECT master_create_distributed_table('pk_on_non_part_col', 'partition_col', 'hash'); ERROR: cannot create constraint on "pk_on_non_part_col" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). CREATE TABLE uq_on_non_part_col ( partition_col integer, other_col integer UNIQUE ); SELECT master_create_distributed_table('uq_on_non_part_col', 'partition_col', 'hash'); ERROR: cannot create constraint on "uq_on_non_part_col" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). CREATE TABLE ex_on_non_part_col ( partition_col integer, other_col integer, EXCLUDE (other_col WITH =) ); SELECT master_create_distributed_table('ex_on_non_part_col', 'partition_col', 'hash'); ERROR: cannot create constraint on "ex_on_non_part_col" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). -- now show that Citus can distribute unique and EXCLUDE constraints that -- include the partition column for hash-partitioned tables. -- However, EXCLUDE constraints must include the partition column with -- an equality operator. -- These tests are for UNNAMED constraints. CREATE TABLE pk_on_part_col ( partition_col integer PRIMARY KEY, other_col integer ); SELECT master_create_distributed_table('pk_on_part_col', 'partition_col', 'hash'); master_create_distributed_table --------------------------------- (1 row) CREATE TABLE uq_part_col ( partition_col integer UNIQUE, other_col integer ); SELECT master_create_distributed_table('uq_part_col', 'partition_col', 'hash'); master_create_distributed_table --------------------------------- (1 row) CREATE TABLE uq_two_columns ( partition_col integer, other_col integer, UNIQUE (partition_col, other_col) ); SELECT master_create_distributed_table('uq_two_columns', 'partition_col', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('uq_two_columns', '4', '2'); master_create_worker_shards ----------------------------- (1 row) INSERT INTO uq_two_columns (partition_col, other_col) VALUES (1,1); INSERT INTO uq_two_columns (partition_col, other_col) VALUES (1,1); ERROR: duplicate key value violates unique constraint "uq_two_columns_partition_col_other_col_key_365000" DETAIL: Key (partition_col, other_col)=(1, 1) already exists. CONTEXT: while executing command on localhost:57637 CREATE TABLE ex_on_part_col ( partition_col integer, other_col integer, EXCLUDE (partition_col WITH =) ); SELECT master_create_distributed_table('ex_on_part_col', 'partition_col', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('ex_on_part_col', '4', '2'); master_create_worker_shards ----------------------------- (1 row) INSERT INTO ex_on_part_col (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_part_col (partition_col, other_col) VALUES (1,2); ERROR: conflicting key value violates exclusion constraint "ex_on_part_col_partition_col_excl_365004" DETAIL: Key (partition_col)=(1) conflicts with existing key (partition_col)=(1). CONTEXT: while executing command on localhost:57637 CREATE TABLE ex_on_two_columns ( partition_col integer, other_col integer, EXCLUDE (partition_col WITH =, other_col WITH =) ); SELECT master_create_distributed_table('ex_on_two_columns', 'partition_col', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('ex_on_two_columns', '4', '2'); master_create_worker_shards ----------------------------- (1 row) INSERT INTO ex_on_two_columns (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns (partition_col, other_col) VALUES (1,1); ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_partition_col_other_col_excl_365008" DETAIL: Key (partition_col, other_col)=(1, 1) conflicts with existing key (partition_col, other_col)=(1, 1). CONTEXT: while executing command on localhost:57637 CREATE TABLE ex_on_two_columns_prt ( partition_col integer, other_col integer, EXCLUDE (partition_col WITH =, other_col WITH =) WHERE (other_col > 100) ); SELECT master_create_distributed_table('ex_on_two_columns_prt', 'partition_col', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('ex_on_two_columns_prt', '4', '2'); master_create_worker_shards ----------------------------- (1 row) INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,101); INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,101); ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_prt_partition_col_other_col_excl_365012" DETAIL: Key (partition_col, other_col)=(1, 101) conflicts with existing key (partition_col, other_col)=(1, 101). CONTEXT: while executing command on localhost:57637 CREATE TABLE ex_wrong_operator ( partition_col tsrange, other_col tsrange, EXCLUDE USING gist (other_col WITH =, partition_col WITH &&) ); SELECT master_create_distributed_table('ex_wrong_operator', 'partition_col', 'hash'); ERROR: cannot create constraint on "ex_wrong_operator" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). CREATE TABLE ex_overlaps ( partition_col tsrange, other_col tsrange, EXCLUDE USING gist (other_col WITH &&, partition_col WITH =) ); SELECT master_create_distributed_table('ex_overlaps', 'partition_col', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('ex_overlaps', '4', '2'); master_create_worker_shards ----------------------------- (1 row) INSERT INTO ex_overlaps (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-01 00:00:00, 2016-02-01 00:00:00]'); INSERT INTO ex_overlaps (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-15 00:00:00, 2016-02-01 00:00:00]'); ERROR: conflicting key value violates exclusion constraint "ex_overlaps_other_col_partition_col_excl_365019" DETAIL: Key (other_col, partition_col)=(["2016-01-15 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]) conflicts with existing key (other_col, partition_col)=(["2016-01-01 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]). CONTEXT: while executing command on localhost:57638 -- now show that Citus can distribute unique and EXCLUDE constraints that -- include the partition column, for hash-partitioned tables. -- However, EXCLUDE constraints must include the partition column with -- an equality operator. -- These tests are for NAMED constraints. CREATE TABLE pk_on_part_col_named ( partition_col integer CONSTRAINT pk_on_part_col_named_pk PRIMARY KEY, other_col integer ); SELECT master_create_distributed_table('pk_on_part_col_named', 'partition_col', 'hash'); master_create_distributed_table --------------------------------- (1 row) CREATE TABLE uq_part_col_named ( partition_col integer CONSTRAINT uq_part_col_named_uniq UNIQUE, other_col integer ); SELECT master_create_distributed_table('uq_part_col_named', 'partition_col', 'hash'); master_create_distributed_table --------------------------------- (1 row) CREATE TABLE uq_two_columns_named ( partition_col integer, other_col integer, CONSTRAINT uq_two_columns_named_uniq UNIQUE (partition_col, other_col) ); SELECT master_create_distributed_table('uq_two_columns_named', 'partition_col', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('uq_two_columns_named', '4', '2'); master_create_worker_shards ----------------------------- (1 row) INSERT INTO uq_two_columns_named (partition_col, other_col) VALUES (1,1); INSERT INTO uq_two_columns_named (partition_col, other_col) VALUES (1,1); ERROR: duplicate key value violates unique constraint "uq_two_columns_named_uniq_365020" DETAIL: Key (partition_col, other_col)=(1, 1) already exists. CONTEXT: while executing command on localhost:57637 CREATE TABLE ex_on_part_col_named ( partition_col integer, other_col integer, CONSTRAINT ex_on_part_col_named_exclude EXCLUDE (partition_col WITH =) ); SELECT master_create_distributed_table('ex_on_part_col_named', 'partition_col', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('ex_on_part_col_named', '4', '2'); master_create_worker_shards ----------------------------- (1 row) INSERT INTO ex_on_part_col_named (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_part_col_named (partition_col, other_col) VALUES (1,2); ERROR: conflicting key value violates exclusion constraint "ex_on_part_col_named_exclude_365024" DETAIL: Key (partition_col)=(1) conflicts with existing key (partition_col)=(1). CONTEXT: while executing command on localhost:57637 CREATE TABLE ex_on_two_columns_named ( partition_col integer, other_col integer, CONSTRAINT ex_on_two_columns_named_exclude EXCLUDE (partition_col WITH =, other_col WITH =) ); SELECT master_create_distributed_table('ex_on_two_columns_named', 'partition_col', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('ex_on_two_columns_named', '4', '2'); master_create_worker_shards ----------------------------- (1 row) INSERT INTO ex_on_two_columns_named (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns_named (partition_col, other_col) VALUES (1,1); ERROR: conflicting key value violates exclusion constraint "ex_on_two_columns_named_exclude_365028" DETAIL: Key (partition_col, other_col)=(1, 1) conflicts with existing key (partition_col, other_col)=(1, 1). CONTEXT: while executing command on localhost:57637 CREATE TABLE ex_multiple_excludes ( partition_col integer, other_col integer, other_other_col integer, CONSTRAINT ex_multiple_excludes_excl1 EXCLUDE (partition_col WITH =, other_col WITH =), CONSTRAINT ex_multiple_excludes_excl2 EXCLUDE (partition_col WITH =, other_other_col WITH =) ); SELECT master_create_distributed_table('ex_multiple_excludes', 'partition_col', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('ex_multiple_excludes', '4', '2'); master_create_worker_shards ----------------------------- (1 row) INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,1,1); INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,1,2); ERROR: conflicting key value violates exclusion constraint "ex_multiple_excludes_excl1_365032" DETAIL: Key (partition_col, other_col)=(1, 1) conflicts with existing key (partition_col, other_col)=(1, 1). CONTEXT: while executing command on localhost:57637 INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,2,1); ERROR: conflicting key value violates exclusion constraint "ex_multiple_excludes_excl2_365032" DETAIL: Key (partition_col, other_other_col)=(1, 1) conflicts with existing key (partition_col, other_other_col)=(1, 1). CONTEXT: while executing command on localhost:57637 CREATE TABLE ex_wrong_operator_named ( partition_col tsrange, other_col tsrange, CONSTRAINT ex_wrong_operator_named_exclude EXCLUDE USING gist (other_col WITH =, partition_col WITH &&) ); SELECT master_create_distributed_table('ex_wrong_operator_named', 'partition_col', 'hash'); ERROR: cannot create constraint on "ex_wrong_operator_named" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). CREATE TABLE ex_overlaps_named ( partition_col tsrange, other_col tsrange, CONSTRAINT ex_overlaps_operator_named_exclude EXCLUDE USING gist (other_col WITH &&, partition_col WITH =) ); SELECT master_create_distributed_table('ex_overlaps_named', 'partition_col', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('ex_overlaps_named', '4', '2'); master_create_worker_shards ----------------------------- (1 row) INSERT INTO ex_overlaps_named (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-01 00:00:00, 2016-02-01 00:00:00]'); INSERT INTO ex_overlaps_named (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-15 00:00:00, 2016-02-01 00:00:00]'); ERROR: conflicting key value violates exclusion constraint "ex_overlaps_operator_named_exclude_365039" DETAIL: Key (other_col, partition_col)=(["2016-01-15 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]) conflicts with existing key (other_col, partition_col)=(["2016-01-01 00:00:00","2016-02-01 00:00:00"], ["2016-01-01 00:00:00","2016-02-01 00:00:00"]). CONTEXT: while executing command on localhost:57638 -- now show that Citus allows unique constraints on range-partitioned tables. CREATE TABLE uq_range_tables ( partition_col integer UNIQUE, other_col integer ); SELECT master_create_distributed_table('uq_range_tables', 'partition_col', 'range'); master_create_distributed_table --------------------------------- (1 row) -- show that CHECK constraints are distributed. CREATE TABLE check_example ( partition_col integer UNIQUE, other_col integer CHECK (other_col >= 100), other_other_col integer CHECK (abs(other_other_col) >= 100) ); SELECT master_create_distributed_table('check_example', 'partition_col', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('check_example', '2', '2'); master_create_worker_shards ----------------------------- (1 row) \c - - - :worker_1_port \d check_example_partition_col_key_365040 Index "public.check_example_partition_col_key_365040" Column | Type | Definition ---------------+---------+--------------- partition_col | integer | partition_col unique, btree, for table "public.check_example_365040" SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.check_example_365040'::regclass; Constraint | Definition -------------------------------------+------------------------------------- check_example_other_col_check | CHECK (other_col >= 100) check_example_other_other_col_check | CHECK (abs(other_other_col) >= 100) (2 rows) \c - - - :master_port -- drop unnecessary tables DROP TABLE pk_on_non_part_col, uq_on_non_part_col CASCADE; DROP TABLE pk_on_part_col, uq_part_col, uq_two_columns CASCADE; DROP TABLE ex_on_part_col, ex_on_two_columns, ex_on_two_columns_prt, ex_multiple_excludes, ex_overlaps CASCADE; DROP TABLE ex_on_part_col_named, ex_on_two_columns_named, ex_overlaps_named CASCADE; DROP TABLE uq_range_tables, check_example CASCADE; -- test dropping table with foreign keys SET citus.shard_count = 4; SET citus.shard_replication_factor = 1; CREATE TABLE raw_table_1 (user_id int, UNIQUE(user_id)); SELECT create_distributed_table('raw_table_1', 'user_id'); create_distributed_table -------------------------- (1 row) CREATE TABLE raw_table_2 (user_id int REFERENCES raw_table_1(user_id), UNIQUE(user_id)); SELECT create_distributed_table('raw_table_2', 'user_id'); create_distributed_table -------------------------- (1 row) -- see that the constraint exists SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='raw_table_2'::regclass; Constraint | Definition --------------------------+------------------------------------------------------- raw_table_2_user_id_fkey | FOREIGN KEY (user_id) REFERENCES raw_table_1(user_id) (1 row) -- should be prevented by the foreign key DROP TABLE raw_table_1; ERROR: cannot drop table raw_table_1 because other objects depend on it DETAIL: constraint raw_table_2_user_id_fkey on table raw_table_2 depends on table raw_table_1 HINT: Use DROP ... CASCADE to drop the dependent objects too. -- should cleanly drop the remote shards DROP TABLE raw_table_1 CASCADE; NOTICE: drop cascades to constraint raw_table_2_user_id_fkey on table raw_table_2 -- see that the constraint also dropped SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='raw_table_2'::regclass; Constraint | Definition ------------+------------ (0 rows) -- drop the table as well DROP TABLE raw_table_2; citus-7.0.3/src/test/regress/expected/multi_create_table_new_features.out000066400000000000000000000023001317107136600270150ustar00rootroot00000000000000-- -- MULTI_CREATE_TABLE_NEW_FEATURES -- -- print major version to make version-specific tests clear SHOW server_version \gset SELECT substring(:'server_version', '\d+') AS major_version; major_version --------------- 10 (1 row) -- Verify that the GENERATED ... AS IDENTITY feature in PostgreSQL 10 -- is forbidden in distributed tables. CREATE TABLE table_identity_col ( id integer GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, payload text ); SELECT master_create_distributed_table('table_identity_col', 'id', 'append'); ERROR: cannot distribute relation: table_identity_col DETAIL: Distributed relations must not use GENERATED ... AS IDENTITY. SELECT create_distributed_table('table_identity_col', 'id'); ERROR: cannot distribute relation: table_identity_col DETAIL: Distributed relations must not use GENERATED ... AS IDENTITY. SELECT create_distributed_table('table_identity_col', 'text'); ERROR: cannot distribute relation: table_identity_col DETAIL: Distributed relations must not use GENERATED ... AS IDENTITY. SELECT create_reference_table('table_identity_col'); ERROR: cannot distribute relation: table_identity_col DETAIL: Distributed relations must not use GENERATED ... AS IDENTITY. citus-7.0.3/src/test/regress/expected/multi_create_table_new_features_0.out000066400000000000000000000027441317107136600272500ustar00rootroot00000000000000-- -- MULTI_CREATE_TABLE_NEW_FEATURES -- -- print major version to make version-specific tests clear SHOW server_version \gset SELECT substring(:'server_version', '\d+') AS major_version; major_version --------------- 9 (1 row) -- Verify that the GENERATED ... AS IDENTITY feature in PostgreSQL 10 -- is forbidden in distributed tables. CREATE TABLE table_identity_col ( id integer GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, payload text ); ERROR: syntax error at or near "GENERATED" LINE 2: id integer GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, ^ SELECT master_create_distributed_table('table_identity_col', 'id', 'append'); ERROR: relation "table_identity_col" does not exist LINE 1: SELECT master_create_distributed_table('table_identity_col',... ^ SELECT create_distributed_table('table_identity_col', 'id'); ERROR: relation "table_identity_col" does not exist LINE 1: SELECT create_distributed_table('table_identity_col', 'id'); ^ SELECT create_distributed_table('table_identity_col', 'text'); ERROR: relation "table_identity_col" does not exist LINE 1: SELECT create_distributed_table('table_identity_col', 'text'... ^ SELECT create_reference_table('table_identity_col'); ERROR: relation "table_identity_col" does not exist LINE 1: SELECT create_reference_table('table_identity_col'); ^ citus-7.0.3/src/test/regress/expected/multi_cross_shard.out000066400000000000000000000131361317107136600241570ustar00rootroot00000000000000-- -- MULTI_CROSS_SHARD -- -- Tests to log cross shard queries according to error log level -- -- Create a distributed table and add data to it CREATE TABLE multi_task_table ( id int, name varchar(20) ); SELECT create_distributed_table('multi_task_table', 'id'); create_distributed_table -------------------------- (1 row) INSERT INTO multi_task_table VALUES(1, 'elem_1'); INSERT INTO multi_task_table VALUES(2, 'elem_2'); INSERT INTO multi_task_table VALUES(3, 'elem_3'); -- Shouldn't log anything when the log level is 'off' SHOW citus.multi_task_query_log_level; citus.multi_task_query_log_level ---------------------------------- off (1 row) SELECT * FROM multi_task_table; id | name ----+-------- 1 | elem_1 3 | elem_3 2 | elem_2 (3 rows) -- Get messages with the log level 'notice' SET citus.multi_task_query_log_level TO notice; SELECT * FROM multi_task_table; NOTICE: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. id | name ----+-------- 1 | elem_1 3 | elem_3 2 | elem_2 (3 rows) SELECT AVG(id) AS avg_id FROM multi_task_table; NOTICE: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. avg_id -------------------- 2.0000000000000000 (1 row) -- Get messages with the log level 'error' SET citus.multi_task_query_log_level TO error; SELECT * FROM multi_task_table; ERROR: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. -- Check the log message with INSERT INTO ... SELECT CREATE TABLE raw_table ( id int, order_count int ); CREATE TABLE summary_table ( id int, order_sum BIGINT ); SELECT create_distributed_table('raw_table', 'id'); create_distributed_table -------------------------- (1 row) SELECT create_distributed_table('summary_table', 'id'); create_distributed_table -------------------------- (1 row) INSERT INTO raw_table VALUES(1, '15'); INSERT INTO raw_table VALUES(2, '15'); INSERT INTO raw_table VALUES(3, '15'); INSERT INTO raw_table VALUES(1, '20'); INSERT INTO raw_table VALUES(2, '25'); INSERT INTO raw_table VALUES(3, '35'); -- Should notice user that the query is multi-task one SET citus.multi_task_query_log_level TO notice; INSERT INTO summary_table SELECT id, SUM(order_count) FROM raw_table GROUP BY id; NOTICE: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. -- Should error out since the query is multi-task one SET citus.multi_task_query_log_level TO error; INSERT INTO summary_table SELECT id, SUM(order_count) FROM raw_table GROUP BY id; ERROR: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. -- Shouldn't error out since it is a single task insert-into select query INSERT INTO summary_table SELECT id, SUM(order_count) FROM raw_table WHERE id = 1 GROUP BY id; -- Should have four rows (three rows from the query without where and the one from with where) SET citus.multi_task_query_log_level to DEFAULT; SELECT * FROM summary_table; id | order_sum ----+----------- 1 | 35 1 | 35 3 | 50 2 | 40 (4 rows) -- Set log-level to different levels inside the transaction BEGIN; -- Should notice user that the query is multi-task one SET citus.multi_task_query_log_level TO notice; INSERT INTO summary_table SELECT id, SUM(order_count) FROM raw_table GROUP BY id; NOTICE: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. -- Should error out since the query is multi-task one SET citus.multi_task_query_log_level TO error; INSERT INTO summary_table SELECT id, SUM(order_count) FROM raw_table GROUP BY id; ERROR: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. ROLLBACK; -- Should have only four rows since the transaction is rollbacked. SET citus.multi_task_query_log_level to DEFAULT; SELECT * FROM summary_table; id | order_sum ----+----------- 1 | 35 1 | 35 3 | 50 2 | 40 (4 rows) -- Test router-select query SET citus.multi_task_query_log_level TO notice; -- Shouldn't log since it is a router select query SELECT * FROM raw_table WHERE ID = 1; id | order_count ----+------------- 1 | 15 1 | 20 (2 rows) -- Task tracker query test CREATE TABLE tt1 ( id int, name varchar(20) ); CREATE TABLE tt2 ( id int, name varchar(20), count bigint ); SELECT create_distributed_table('tt1', 'id'); create_distributed_table -------------------------- (1 row) SELECT create_distributed_table('tt2', 'name'); create_distributed_table -------------------------- (1 row) INSERT INTO tt1 VALUES(1, 'Ahmet'); INSERT INTO tt1 VALUES(2, 'Mehmet'); INSERT INTO tt2 VALUES(1, 'Ahmet', 5); INSERT INTO tt2 VALUES(2, 'Mehmet', 15); -- Should notice since it is a task-tracker query SET citus.task_executor_type to "task-tracker"; SELECT tt1.id, tt2.count from tt1,tt2 where tt1.id = tt2.id; NOTICE: multi-task query about to be executed HINT: Queries are split to multiple tasks if they have to be split into several queries on the workers. id | count ----+------- 1 | 5 2 | 15 (2 rows) SET citus.task_executor_type to DEFAULT; DROP TABLE tt2; DROP TABLE tt1; DROP TABLE multi_task_table; DROP TABLE raw_table; DROP TABLE summary_table; citus-7.0.3/src/test/regress/expected/multi_data_types.out000066400000000000000000000126711317107136600240050ustar00rootroot00000000000000-- =================================================================== -- test composite type, varchar and enum types -- create, distribute, INSERT, SELECT and UPDATE -- =================================================================== ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 530000; -- create a custom type... CREATE TYPE test_composite_type AS ( i integer, i2 integer ); -- ... as well as a function to use as its comparator... CREATE FUNCTION equal_test_composite_type_function(test_composite_type, test_composite_type) RETURNS boolean LANGUAGE 'internal' AS 'record_eq' IMMUTABLE RETURNS NULL ON NULL INPUT; CREATE FUNCTION cmp_test_composite_type_function(test_composite_type, test_composite_type) RETURNS int LANGUAGE 'internal' AS 'btrecordcmp' IMMUTABLE RETURNS NULL ON NULL INPUT; -- ... use that function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_composite_type, RIGHTARG = test_composite_type, PROCEDURE = equal_test_composite_type_function, HASHES ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY cats_op_fam USING hash; -- ... create a test HASH function. Though it is a poor hash function, -- it is acceptable for our tests CREATE FUNCTION test_composite_type_hash(test_composite_type) RETURNS int AS 'SELECT hashtext( ($1.i + $1.i2)::text);' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS cats_op_fam_clas3 DEFAULT FOR TYPE test_composite_type USING BTREE AS OPERATOR 3 = (test_composite_type, test_composite_type), FUNCTION 1 cmp_test_composite_type_function(test_composite_type, test_composite_type); CREATE OPERATOR CLASS cats_op_fam_class DEFAULT FOR TYPE test_composite_type USING HASH AS OPERATOR 1 = (test_composite_type, test_composite_type), FUNCTION 1 test_composite_type_hash(test_composite_type); -- create and distribute a table on composite type column CREATE TABLE composite_type_partitioned_table ( id integer, col test_composite_type ); SELECT master_create_distributed_table('composite_type_partitioned_table', 'col', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('composite_type_partitioned_table', 4, 1); master_create_worker_shards ----------------------------- (1 row) -- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table INSERT INTO composite_type_partitioned_table VALUES (1, '(1, 2)'::test_composite_type); INSERT INTO composite_type_partitioned_table VALUES (2, '(3, 4)'::test_composite_type); INSERT INTO composite_type_partitioned_table VALUES (3, '(5, 6)'::test_composite_type); INSERT INTO composite_type_partitioned_table VALUES (4, '(7, 8)'::test_composite_type); INSERT INTO composite_type_partitioned_table VALUES (5, '(9, 10)'::test_composite_type); SELECT * FROM composite_type_partitioned_table WHERE col = '(7, 8)'::test_composite_type; id | col ----+------- 4 | (7,8) (1 row) UPDATE composite_type_partitioned_table SET id = 6 WHERE col = '(7, 8)'::test_composite_type; SELECT * FROM composite_type_partitioned_table WHERE col = '(7, 8)'::test_composite_type; id | col ----+------- 6 | (7,8) (1 row) -- create and distribute a table on enum type column CREATE TYPE bug_status AS ENUM ('new', 'open', 'closed'); CREATE TABLE bugs ( id integer, status bug_status ); SELECT master_create_distributed_table('bugs', 'status', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('bugs', 4, 1); master_create_worker_shards ----------------------------- (1 row) -- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table INSERT INTO bugs VALUES (1, 'new'); INSERT INTO bugs VALUES (2, 'open'); INSERT INTO bugs VALUES (3, 'closed'); INSERT INTO bugs VALUES (4, 'closed'); INSERT INTO bugs VALUES (5, 'open'); SELECT * FROM bugs WHERE status = 'closed'::bug_status; id | status ----+-------- 3 | closed 4 | closed (2 rows) UPDATE bugs SET status = 'closed'::bug_status WHERE id = 2; ERROR: modifying the partition value of rows is not allowed SELECT * FROM bugs WHERE status = 'open'::bug_status; id | status ----+-------- 2 | open 5 | open (2 rows) -- create and distribute a table on varchar column CREATE TABLE varchar_hash_partitioned_table ( id int, name varchar ); SELECT master_create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('varchar_hash_partitioned_table', 4, 1); master_create_worker_shards ----------------------------- (1 row) -- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table INSERT INTO varchar_hash_partitioned_table VALUES (1, 'Jason'); INSERT INTO varchar_hash_partitioned_table VALUES (2, 'Ozgun'); INSERT INTO varchar_hash_partitioned_table VALUES (3, 'Onder'); INSERT INTO varchar_hash_partitioned_table VALUES (4, 'Sumedh'); INSERT INTO varchar_hash_partitioned_table VALUES (5, 'Marco'); SELECT * FROM varchar_hash_partitioned_table WHERE id = 1; id | name ----+------- 1 | Jason (1 row) UPDATE varchar_hash_partitioned_table SET id = 6 WHERE name = 'Jason'; SELECT * FROM varchar_hash_partitioned_table WHERE id = 6; id | name ----+------- 6 | Jason (1 row) citus-7.0.3/src/test/regress/expected/multi_deparse_shard_query.out000066400000000000000000000362221317107136600256770ustar00rootroot00000000000000-- -- MULTI_DEPARSE_SHARD_QUERY -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 13100000; CREATE FUNCTION deparse_shard_query_test(text) RETURNS VOID AS 'citus' LANGUAGE C STRICT; -- create the first table CREATE TABLE raw_events_1 (tenant_id bigint, value_1 int, value_2 int, value_3 float, value_4 bigint, value_5 text, value_6 int DEfAULT 10, value_7 int, event_at date DEfAULT now() ); SELECT master_create_distributed_table('raw_events_1', 'tenant_id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('raw_events_1', 4, 1); master_create_worker_shards ----------------------------- (1 row) -- create the first table CREATE TABLE raw_events_2 (tenant_id bigint, value_1 int, value_2 int, value_3 float, value_4 bigint, value_5 text, value_6 float DEfAULT (random()*100)::float, value_7 int, event_at date DEfAULT now() ); SELECT master_create_distributed_table('raw_events_2', 'tenant_id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('raw_events_2', 4, 1); master_create_worker_shards ----------------------------- (1 row) CREATE TABLE aggregated_events (tenant_id bigint, sum_value_1 bigint, average_value_2 float, average_value_3 float, sum_value_4 bigint, sum_value_5 float, average_value_6 int, rollup_hour date); SELECT master_create_distributed_table('aggregated_events', 'tenant_id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('aggregated_events', 4, 1); master_create_worker_shards ----------------------------- (1 row) -- start with very simple examples on a single table SELECT deparse_shard_query_test(' INSERT INTO raw_events_1 SELECT * FROM raw_events_1; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, value_5, value_6, value_7, event_at FROM public.raw_events_1 deparse_shard_query_test -------------------------- (1 row) SELECT deparse_shard_query_test(' INSERT INTO raw_events_1(tenant_id, value_4) SELECT tenant_id, value_4 FROM raw_events_1; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, event_at) SELECT tenant_id, value_4, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1 deparse_shard_query_test -------------------------- (1 row) -- now that shuffle columns a bit on a single table SELECT deparse_shard_query_test(' INSERT INTO raw_events_1(value_5, value_2, tenant_id, value_4) SELECT value_2::text, value_5::int, tenant_id, value_4 FROM raw_events_1; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1 deparse_shard_query_test -------------------------- (1 row) -- same test on two different tables SELECT deparse_shard_query_test(' INSERT INTO raw_events_1(value_5, value_2, tenant_id, value_4) SELECT value_2::text, value_5::int, tenant_id, value_4 FROM raw_events_2; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_2, value_4, value_5, value_6, event_at) SELECT tenant_id, (value_5)::integer AS value_5, value_4, (value_2)::text AS value_2, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_2 deparse_shard_query_test -------------------------- (1 row) -- lets do some simple aggregations SELECT deparse_shard_query_test(E' INSERT INTO aggregated_events (tenant_id, rollup_hour, sum_value_1, average_value_3, average_value_6, sum_value_4) SELECT tenant_id, date_trunc(\'hour\', event_at) , sum(value_1), avg(value_3), avg(value_6), sum(value_4) FROM raw_events_1 GROUP BY tenant_id, date_trunc(\'hour\', event_at) '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, average_value_3, sum_value_4, average_value_6, rollup_hour) SELECT tenant_id, sum(value_1) AS sum, avg(value_3) AS avg, sum(value_4) AS sum, avg(value_6) AS avg, date_trunc('hour'::text, (event_at)::timestamp with time zone) AS date_trunc FROM public.raw_events_1 GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone)) deparse_shard_query_test -------------------------- (1 row) -- also some subqueries, JOINS with a complicated target lists -- a simple JOIN SELECT deparse_shard_query_test(' INSERT INTO raw_events_1 (value_3, tenant_id) SELECT raw_events_2.value_3, raw_events_1.tenant_id FROM raw_events_1, raw_events_2 WHERE raw_events_1.tenant_id = raw_events_2.tenant_id; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT raw_events_1.tenant_id, raw_events_2.value_3, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1, public.raw_events_2 WHERE (raw_events_1.tenant_id = raw_events_2.tenant_id) deparse_shard_query_test -------------------------- (1 row) -- join with group by SELECT deparse_shard_query_test(' INSERT INTO raw_events_1 (value_3, tenant_id) SELECT max(raw_events_2.value_3), avg(raw_events_1.value_3) FROM raw_events_1, raw_events_2 WHERE raw_events_1.tenant_id = raw_events_2.tenant_id GROUP BY raw_events_1.event_at '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_3, value_6, event_at) SELECT avg(raw_events_1.value_3) AS avg, max(raw_events_2.value_3) AS max, 10 AS value_6, (now())::date AS event_at FROM public.raw_events_1, public.raw_events_2 WHERE (raw_events_1.tenant_id = raw_events_2.tenant_id) GROUP BY raw_events_1.event_at deparse_shard_query_test -------------------------- (1 row) -- a more complicated JOIN SELECT deparse_shard_query_test(' INSERT INTO aggregated_events (sum_value_4, tenant_id) SELECT max(r1.value_4), r3.tenant_id FROM raw_events_1 r1, raw_events_2 r2, raw_events_1 r3 WHERE r1.tenant_id = r2.tenant_id AND r2.tenant_id = r3.tenant_id GROUP BY r1.value_1, r3.tenant_id, r2.event_at ORDER BY r2.event_at DESC; '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_4) SELECT r3.tenant_id, max(r1.value_4) AS max FROM public.raw_events_1 r1, public.raw_events_2 r2, public.raw_events_1 r3 WHERE ((r1.tenant_id = r2.tenant_id) AND (r2.tenant_id = r3.tenant_id)) GROUP BY r1.value_1, r3.tenant_id, r2.event_at ORDER BY r2.event_at DESC deparse_shard_query_test -------------------------- (1 row) -- queries with CTEs are supported SELECT deparse_shard_query_test(' WITH first_tenant AS (SELECT event_at, value_5, tenant_id FROM raw_events_1) INSERT INTO aggregated_events (rollup_hour, sum_value_5, tenant_id) SELECT event_at, sum(value_5::int), tenant_id FROM raw_events_1 GROUP BY event_at, tenant_id; '); INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5, rollup_hour) SELECT tenant_id, sum((value_5)::integer) AS sum, event_at FROM public.raw_events_1 GROUP BY event_at, tenant_id deparse_shard_query_test -------------------------- (1 row) SELECT deparse_shard_query_test(' WITH first_tenant AS (SELECT event_at, value_5, tenant_id FROM raw_events_1) INSERT INTO aggregated_events (sum_value_5, tenant_id) SELECT sum(value_5::int), tenant_id FROM raw_events_1 GROUP BY event_at, tenant_id; '); INFO: query: WITH first_tenant AS (SELECT raw_events_1.event_at, raw_events_1.value_5, raw_events_1.tenant_id FROM public.raw_events_1) INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, sum((value_5)::integer) AS sum FROM public.raw_events_1 GROUP BY event_at, tenant_id deparse_shard_query_test -------------------------- (1 row) SELECT deparse_shard_query_test(' INSERT INTO aggregated_events (sum_value_1, sum_value_5, tenant_id) WITH RECURSIVE hierarchy as ( SELECT value_1, 1 AS LEVEL, tenant_id FROM raw_events_1 WHERE tenant_id = 1 UNION SELECT re.value_2, (h.level+1), re.tenant_id FROM hierarchy h JOIN raw_events_1 re ON (h.tenant_id = re.tenant_id AND h.value_1 = re.value_6)) SELECT * FROM hierarchy WHERE LEVEL <= 2; '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) WITH RECURSIVE hierarchy AS (SELECT raw_events_1.value_1, 1 AS level, raw_events_1.tenant_id FROM public.raw_events_1 WHERE (raw_events_1.tenant_id = 1) UNION SELECT re.value_2, (h.level + 1), re.tenant_id FROM (hierarchy h JOIN public.raw_events_1 re ON (((h.tenant_id = re.tenant_id) AND (h.value_1 = re.value_6))))) SELECT tenant_id, value_1, level FROM hierarchy WHERE (level <= 2) deparse_shard_query_test -------------------------- (1 row) SELECT deparse_shard_query_test(' INSERT INTO aggregated_events (sum_value_1) SELECT DISTINCT value_1 FROM raw_events_1; '); INFO: query: INSERT INTO public.aggregated_events (sum_value_1) SELECT DISTINCT value_1 FROM public.raw_events_1 deparse_shard_query_test -------------------------- (1 row) -- many filters suffled SELECT deparse_shard_query_test(E' INSERT INTO aggregated_events (sum_value_5, sum_value_1, tenant_id) SELECT value_3, value_2, tenant_id FROM raw_events_1 WHERE (value_5 like \'%s\' or value_5 like \'%a\') and (tenant_id = 1) and (value_6 < 3000 or value_3 > 8000); '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, value_2, value_3 FROM public.raw_events_1 WHERE (((value_5 ~~ '%s'::text) OR (value_5 ~~ '%a'::text)) AND (tenant_id = 1) AND ((value_6 < 3000) OR (value_3 > (8000)::double precision))) deparse_shard_query_test -------------------------- (1 row) SELECT deparse_shard_query_test(E' INSERT INTO aggregated_events (sum_value_5, tenant_id) SELECT rank() OVER (PARTITION BY tenant_id ORDER BY value_6), tenant_id FROM raw_events_1 WHERE event_at = now(); '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_5) SELECT tenant_id, rank() OVER (PARTITION BY tenant_id ORDER BY value_6) AS rank FROM public.raw_events_1 WHERE (event_at = now()) deparse_shard_query_test -------------------------- (1 row) SELECT deparse_shard_query_test(E' INSERT INTO aggregated_events (sum_value_5, tenant_id, sum_value_4) SELECT random(), int4eq(1, max(value_1))::int, value_6 FROM raw_events_1 WHERE event_at = now() GROUP BY event_at, value_7, value_6; '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_4, sum_value_5) SELECT (int4eq(1, max(value_1)))::integer AS int4eq, value_6, random() AS random FROM public.raw_events_1 WHERE (event_at = now()) GROUP BY event_at, value_7, value_6 deparse_shard_query_test -------------------------- (1 row) SELECT deparse_shard_query_test(' INSERT INTO aggregated_events (sum_value_1, tenant_id) SELECT count(DISTINCT CASE WHEN value_1 > 100 THEN tenant_id ELSE value_6 END) as c, max(tenant_id) FROM raw_events_1; '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1) SELECT max(tenant_id) AS max, count(DISTINCT CASE WHEN (value_1 > 100) THEN tenant_id ELSE (value_6)::bigint END) AS c FROM public.raw_events_1 deparse_shard_query_test -------------------------- (1 row) SELECT deparse_shard_query_test(' INSERT INTO raw_events_1(value_7, value_1, tenant_id) SELECT value_7, value_1, tenant_id FROM (SELECT tenant_id, value_2 as value_7, value_1 FROM raw_events_2 ) as foo '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_1, value_6, value_7, event_at) SELECT tenant_id, value_1, 10 AS value_6, value_7, (now())::date AS event_at FROM (SELECT raw_events_2.tenant_id, raw_events_2.value_2 AS value_7, raw_events_2.value_1 FROM public.raw_events_2) foo deparse_shard_query_test -------------------------- (1 row) SELECT deparse_shard_query_test(E' INSERT INTO aggregated_events(sum_value_1, tenant_id, sum_value_5) SELECT sum(value_1), tenant_id, sum(value_5::bigint) FROM (SELECT raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1 FROM raw_events_2, raw_events_1 WHERE raw_events_1.tenant_id = raw_events_2.tenant_id ) as foo GROUP BY tenant_id, date_trunc(\'hour\', event_at) '); INFO: query: INSERT INTO public.aggregated_events (tenant_id, sum_value_1, sum_value_5) SELECT tenant_id, sum(value_1) AS sum, sum((value_5)::bigint) AS sum FROM (SELECT raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1 FROM public.raw_events_2, public.raw_events_1 WHERE (raw_events_1.tenant_id = raw_events_2.tenant_id)) foo GROUP BY tenant_id, (date_trunc('hour'::text, (event_at)::timestamp with time zone)) deparse_shard_query_test -------------------------- (1 row) SELECT deparse_shard_query_test(E' INSERT INTO raw_events_2(tenant_id, value_1, value_2, value_3, value_4) SELECT tenant_id, value_1, value_2, value_3, value_4 FROM (SELECT value_2, value_4, tenant_id, value_1, value_3 FROM raw_events_1 ) as foo '); INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT tenant_id, value_1, value_2, value_3, value_4, (random() * (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo deparse_shard_query_test -------------------------- (1 row) SELECT deparse_shard_query_test(E' INSERT INTO raw_events_2(tenant_id, value_1, value_4, value_2, value_3) SELECT * FROM (SELECT value_2, value_4, tenant_id, value_1, value_3 FROM raw_events_1 ) as foo '); INFO: query: INSERT INTO public.raw_events_2 (tenant_id, value_1, value_2, value_3, value_4, value_6, event_at) SELECT value_2, value_4, value_1, value_3, tenant_id, (random() * (100)::double precision) AS value_6, (now())::date AS event_at FROM (SELECT raw_events_1.value_2, raw_events_1.value_4, raw_events_1.tenant_id, raw_events_1.value_1, raw_events_1.value_3 FROM public.raw_events_1) foo deparse_shard_query_test -------------------------- (1 row) -- use a column multiple times SELECT deparse_shard_query_test(' INSERT INTO raw_events_1(tenant_id, value_7, value_4) SELECT tenant_id, value_7, value_7 FROM raw_events_1 ORDER BY value_2, value_1; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_7, 10 AS value_6, value_7, (now())::date AS event_at FROM public.raw_events_1 ORDER BY value_2, value_1 deparse_shard_query_test -------------------------- (1 row) -- test dropped table as well ALTER TABLE raw_events_1 DROP COLUMN value_5; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' SELECT deparse_shard_query_test(' INSERT INTO raw_events_1(tenant_id, value_7, value_4) SELECT tenant_id, value_7, value_4 FROM raw_events_1; '); INFO: query: INSERT INTO public.raw_events_1 (tenant_id, value_4, value_6, value_7, event_at) SELECT tenant_id, value_4, 10 AS value_6, value_7, (now())::date AS event_at FROM public.raw_events_1 deparse_shard_query_test -------------------------- (1 row) citus-7.0.3/src/test/regress/expected/multi_distributed_transaction_id.out000066400000000000000000000136051317107136600272510ustar00rootroot00000000000000-- -- MULTI_DISTRIBUTED_TRANSACTION_ID -- -- Unit tests for distributed transaction id functionality -- -- get the current transaction id, which should be uninitialized -- note that we skip printing the databaseId, which might change -- per run -- set timezone to a specific value to prevent -- different values on different servers SET TIME ZONE 'PST8PDT'; -- should return uninitialized values if not in a transaction SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id(); initiator_node_identifier | transaction_number | transaction_stamp ---------------------------+--------------------+------------------- 0 | 0 | (1 row) BEGIN; -- we should still see the uninitialized values SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); initiator_node_identifier | transaction_number | transaction_stamp | ?column? ---------------------------+--------------------+-------------------+---------- 0 | 0 | | t (1 row) -- now assign a value SELECT assign_distributed_transaction_id(50, 50, '2016-01-01 00:00:00+0'); assign_distributed_transaction_id ----------------------------------- (1 row) -- see the assigned value SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); initiator_node_identifier | transaction_number | transaction_stamp | ?column? ---------------------------+--------------------+------------------------------+---------- 50 | 50 | Thu Dec 31 16:00:00 2015 PST | t (1 row) -- a backend cannot be assigned another tx id if already assigned SELECT assign_distributed_transaction_id(51, 51, '2017-01-01 00:00:00+0'); ERROR: the backend has already been assigned a transaction id ROLLBACK; -- since the transaction finished, we should see the uninitialized values SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); initiator_node_identifier | transaction_number | transaction_stamp | ?column? ---------------------------+--------------------+-------------------+---------- 0 | 0 | | t (1 row) -- also see that ROLLBACK (i.e., failures in the transaction) clears the shared memory BEGIN; -- we should still see the uninitialized values SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); initiator_node_identifier | transaction_number | transaction_stamp | ?column? ---------------------------+--------------------+-------------------+---------- 0 | 0 | | t (1 row) -- now assign a value SELECT assign_distributed_transaction_id(52, 52, '2015-01-01 00:00:00+0'); assign_distributed_transaction_id ----------------------------------- (1 row) SELECT 5 / 0; ERROR: division by zero COMMIT; -- since the transaction errored, we should see the uninitialized values again SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); initiator_node_identifier | transaction_number | transaction_stamp | ?column? ---------------------------+--------------------+-------------------+---------- 0 | 0 | | t (1 row) -- we should also see that a new connection means an uninitialized transaction id BEGIN; SELECT assign_distributed_transaction_id(52, 52, '2015-01-01 00:00:00+0'); assign_distributed_transaction_id ----------------------------------- (1 row) SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); initiator_node_identifier | transaction_number | transaction_stamp | ?column? ---------------------------+--------------------+------------------------------+---------- 52 | 52 | Wed Dec 31 16:00:00 2014 PST | t (1 row) \c - - - :master_port SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); initiator_node_identifier | transaction_number | transaction_stamp | ?column? ---------------------------+--------------------+-------------------+---------- 0 | 0 | | t (1 row) -- now show that PREPARE resets the distributed transaction id BEGIN; SELECT assign_distributed_transaction_id(120, 120, '2015-01-01 00:00:00+0'); assign_distributed_transaction_id ----------------------------------- (1 row) SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); initiator_node_identifier | transaction_number | transaction_stamp | ?column? ---------------------------+--------------------+------------------------------+---------- 120 | 120 | Wed Dec 31 16:00:00 2014 PST | t (1 row) PREPARE TRANSACTION 'dist_xact_id_test'; -- after the prepare we should see that transaction id is cleared SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); initiator_node_identifier | transaction_number | transaction_stamp | ?column? ---------------------------+--------------------+-------------------+---------- 0 | 0 | | t (1 row) -- cleanup the transaction ROLLBACK PREPARED 'dist_xact_id_test'; -- set back to the original zone SET TIME ZONE DEFAULT; citus-7.0.3/src/test/regress/expected/multi_distribution_metadata.out000066400000000000000000000347571317107136600262400ustar00rootroot00000000000000-- =================================================================== -- create test functions -- =================================================================== ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 540000; CREATE FUNCTION load_shard_id_array(regclass) RETURNS bigint[] AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION load_shard_interval_array(bigint, anyelement) RETURNS anyarray AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION load_shard_placement_array(bigint, bool) RETURNS text[] AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION partition_column_id(regclass) RETURNS smallint AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION partition_type(regclass) RETURNS "char" AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION is_distributed_table(regclass) RETURNS boolean AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION column_name_to_column_id(regclass, cstring) RETURNS smallint AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION create_monolithic_shard_row(regclass) RETURNS bigint AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION acquire_shared_shard_lock(bigint) RETURNS void AS 'citus' LANGUAGE C STRICT; -- =================================================================== -- test distribution metadata functionality -- =================================================================== -- create hash distributed table CREATE TABLE events_hash ( id bigint, name text ); SELECT master_create_distributed_table('events_hash', 'name', 'hash'); master_create_distributed_table --------------------------------- (1 row) -- create worker shards SELECT master_create_worker_shards('events_hash', 4, 2); master_create_worker_shards ----------------------------- (1 row) -- set shardstate of one replication from each shard to 0 (invalid value) UPDATE pg_dist_placement SET shardstate = 0 WHERE shardid BETWEEN 540000 AND 540003 AND groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port); -- should see above shard identifiers SELECT load_shard_id_array('events_hash'); load_shard_id_array ------------------------------- {540000,540001,540002,540003} (1 row) -- should see array with first shard range SELECT load_shard_interval_array(540000, 0); load_shard_interval_array --------------------------- {-2147483648,-1073741825} (1 row) -- should even work for range-partitioned shards -- create range distributed table CREATE TABLE events_range ( id bigint, name text ); SELECT master_create_distributed_table('events_range', 'name', 'range'); master_create_distributed_table --------------------------------- (1 row) -- create empty shard SELECT master_create_empty_shard('events_range'); master_create_empty_shard --------------------------- 540004 (1 row) UPDATE pg_dist_shard SET shardminvalue = 'Aardvark', shardmaxvalue = 'Zebra' WHERE shardid = 540004; SELECT load_shard_interval_array(540004, ''::text); load_shard_interval_array --------------------------- {Aardvark,Zebra} (1 row) -- should see error for non-existent shard SELECT load_shard_interval_array(540005, 0); ERROR: could not find valid entry for shard 540005 -- should see two placements SELECT load_shard_placement_array(540001, false); load_shard_placement_array ----------------------------------- {localhost:57638,localhost:57637} (1 row) -- only one of which is finalized SELECT load_shard_placement_array(540001, true); load_shard_placement_array ---------------------------- {localhost:57637} (1 row) -- should see error for non-existent shard SELECT load_shard_placement_array(540001, false); load_shard_placement_array ----------------------------------- {localhost:57638,localhost:57637} (1 row) -- should see column id of 'name' SELECT partition_column_id('events_hash'); partition_column_id --------------------- 2 (1 row) -- should see hash partition type and fail for non-distributed tables SELECT partition_type('events_hash'); partition_type ---------------- h (1 row) SELECT partition_type('pg_type'); ERROR: relation pg_type is not distributed -- should see true for events_hash, false for others SELECT is_distributed_table('events_hash'); is_distributed_table ---------------------- t (1 row) SELECT is_distributed_table('pg_type'); is_distributed_table ---------------------- f (1 row) SELECT is_distributed_table('pg_dist_shard'); is_distributed_table ---------------------- f (1 row) -- test underlying column name-id translation SELECT column_name_to_column_id('events_hash', 'name'); column_name_to_column_id -------------------------- 2 (1 row) SELECT column_name_to_column_id('events_hash', 'ctid'); ERROR: cannot reference system column "ctid" in relation "events_hash" SELECT column_name_to_column_id('events_hash', 'non_existent'); ERROR: column "non_existent" of relation "events_hash" does not exist -- drop shard rows (must drop placements first) DELETE FROM pg_dist_placement WHERE shardid BETWEEN 540000 AND 540004; DELETE FROM pg_dist_shard WHERE logicalrelid = 'events_hash'::regclass; DELETE FROM pg_dist_shard WHERE logicalrelid = 'events_range'::regclass; -- verify that an eager load shows them missing SELECT load_shard_id_array('events_hash'); load_shard_id_array --------------------- {} (1 row) -- create second table to distribute CREATE TABLE customers ( id bigint, name text ); -- now we'll distribute using function calls but verify metadata manually... -- partition on id and manually inspect partition row INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey) VALUES ('customers'::regclass, 'h', column_name_to_column('customers'::regclass, 'id')); SELECT partmethod, column_to_column_name(logicalrelid, partkey) FROM pg_dist_partition WHERE logicalrelid = 'customers'::regclass; partmethod | column_to_column_name ------------+----------------------- h | id (1 row) -- make one huge shard and manually inspect shard row SELECT create_monolithic_shard_row('customers') AS new_shard_id \gset SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = :new_shard_id; shardstorage | shardminvalue | shardmaxvalue --------------+---------------+--------------- t | -2147483648 | 2147483647 (1 row) -- now we'll even test our lock methods... -- use transaction to bound how long we hold the lock BEGIN; -- pick up a shard lock and look for it in pg_locks SELECT acquire_shared_shard_lock(5); acquire_shared_shard_lock --------------------------- (1 row) SELECT objid, mode FROM pg_locks WHERE locktype = 'advisory' AND objid = 5; objid | mode -------+----------- 5 | ShareLock (1 row) -- commit should drop the lock COMMIT; -- lock should be gone now SELECT COUNT(*) FROM pg_locks WHERE locktype = 'advisory' AND objid = 5; count ------- 0 (1 row) -- test get_shard_id_for_distribution_column SET citus.shard_count TO 4; CREATE TABLE get_shardid_test_table1(column1 int, column2 int); SELECT create_distributed_table('get_shardid_test_table1', 'column1'); create_distributed_table -------------------------- (1 row) \COPY get_shardid_test_table1 FROM STDIN with delimiter '|'; SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 1); get_shard_id_for_distribution_column -------------------------------------- 540006 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 2); get_shard_id_for_distribution_column -------------------------------------- 540009 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 3); get_shard_id_for_distribution_column -------------------------------------- 540007 (1 row) -- verify result of the get_shard_id_for_distribution_column \c - - - :worker_1_port SELECT * FROM get_shardid_test_table1_540006; column1 | column2 ---------+--------- 1 | 1 (1 row) SELECT * FROM get_shardid_test_table1_540009; column1 | column2 ---------+--------- 2 | 2 (1 row) SELECT * FROM get_shardid_test_table1_540007; column1 | column2 ---------+--------- 3 | 3 (1 row) \c - - - :master_port -- test non-existing value SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 4); get_shard_id_for_distribution_column -------------------------------------- 540007 (1 row) -- test array type SET citus.shard_count TO 4; CREATE TABLE get_shardid_test_table2(column1 text[], column2 int); SELECT create_distributed_table('get_shardid_test_table2', 'column1'); create_distributed_table -------------------------- (1 row) \COPY get_shardid_test_table2 FROM STDIN with delimiter '|'; SELECT get_shard_id_for_distribution_column('get_shardid_test_table2', '{a, b, c}'); get_shard_id_for_distribution_column -------------------------------------- 540013 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table2', '{d, e, f}'); get_shard_id_for_distribution_column -------------------------------------- 540011 (1 row) -- verify result of the get_shard_id_for_distribution_column \c - - - :worker_1_port SELECT * FROM get_shardid_test_table2_540013; column1 | column2 ---------+--------- {a,b,c} | 1 (1 row) SELECT * FROM get_shardid_test_table2_540011; column1 | column2 ---------+--------- {d,e,f} | 2 (1 row) \c - - - :master_port -- test mismatching data type SELECT get_shard_id_for_distribution_column('get_shardid_test_table2', 'a'); ERROR: malformed array literal: "a" DETAIL: Array value must start with "{" or dimension information. -- test NULL distribution column value for hash distributed table SELECT get_shard_id_for_distribution_column('get_shardid_test_table2'); ERROR: distribution value cannot be NULL for tables other than reference tables. SELECT get_shard_id_for_distribution_column('get_shardid_test_table2', NULL); ERROR: distribution value cannot be NULL for tables other than reference tables. -- test non-distributed table CREATE TABLE get_shardid_test_table3(column1 int, column2 int); SELECT get_shard_id_for_distribution_column('get_shardid_test_table3', 1); ERROR: relation is not distributed -- test append distributed table SELECT create_distributed_table('get_shardid_test_table3', 'column1', 'append'); create_distributed_table -------------------------- (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table3', 1); ERROR: finding shard id of given distribution value is only supported for hash partitioned tables, range partitioned tables and reference tables. -- test reference table; CREATE TABLE get_shardid_test_table4(column1 int, column2 int); SELECT create_reference_table('get_shardid_test_table4'); create_reference_table ------------------------ (1 row) -- test NULL distribution column value for reference table SELECT get_shard_id_for_distribution_column('get_shardid_test_table4'); get_shard_id_for_distribution_column -------------------------------------- 540014 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', NULL); get_shard_id_for_distribution_column -------------------------------------- 540014 (1 row) -- test different data types for reference table SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', 1); get_shard_id_for_distribution_column -------------------------------------- 540014 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', 'a'); get_shard_id_for_distribution_column -------------------------------------- 540014 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', '{a, b, c}'); get_shard_id_for_distribution_column -------------------------------------- 540014 (1 row) -- test range distributed table CREATE TABLE get_shardid_test_table5(column1 int, column2 int); SELECT create_distributed_table('get_shardid_test_table5', 'column1', 'range'); create_distributed_table -------------------------- (1 row) -- create worker shards SELECT master_create_empty_shard('get_shardid_test_table5'); master_create_empty_shard --------------------------- 540015 (1 row) SELECT master_create_empty_shard('get_shardid_test_table5'); master_create_empty_shard --------------------------- 540016 (1 row) SELECT master_create_empty_shard('get_shardid_test_table5'); master_create_empty_shard --------------------------- 540017 (1 row) SELECT master_create_empty_shard('get_shardid_test_table5'); master_create_empty_shard --------------------------- 540018 (1 row) -- now the comparison is done via the partition column type, which is text UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 1000 WHERE shardid = 540015; UPDATE pg_dist_shard SET shardminvalue = 1001, shardmaxvalue = 2000 WHERE shardid = 540016; UPDATE pg_dist_shard SET shardminvalue = 2001, shardmaxvalue = 3000 WHERE shardid = 540017; UPDATE pg_dist_shard SET shardminvalue = 3001, shardmaxvalue = 4000 WHERE shardid = 540018; SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 5); get_shard_id_for_distribution_column -------------------------------------- 540015 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 1111); get_shard_id_for_distribution_column -------------------------------------- 540016 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 2689); get_shard_id_for_distribution_column -------------------------------------- 540017 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 3248); get_shard_id_for_distribution_column -------------------------------------- 540018 (1 row) -- test non-existing value for range distributed tables SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 4001); get_shard_id_for_distribution_column -------------------------------------- 0 (1 row) SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', -999); get_shard_id_for_distribution_column -------------------------------------- 0 (1 row) -- clear unnecessary tables; DROP TABLE get_shardid_test_table1, get_shardid_test_table2, get_shardid_test_table3, get_shardid_test_table4, get_shardid_test_table5; citus-7.0.3/src/test/regress/expected/multi_drop_extension.out000066400000000000000000000030021317107136600246740ustar00rootroot00000000000000-- -- MULTI_DROP_EXTENSION -- -- Tests around dropping and recreating the extension ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 550000; CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); master_create_distributed_table --------------------------------- (1 row) -- this emits a NOTICE message for every table we are dropping with our CASCADE. It would -- be nice to check that we get those NOTICE messages, but it's nicer to not have to -- change this test every time the previous tests change the set of tables they leave -- around. SET client_min_messages TO 'WARNING'; DROP EXTENSION citus CASCADE; RESET client_min_messages; CREATE EXTENSION citus; -- re-add the nodes to the cluster SELECT 1 FROM master_add_node('localhost', :worker_1_port); ?column? ---------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ---------- 1 (1 row) -- verify that a table can be created after the extension has been dropped and recreated CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); master_create_distributed_table --------------------------------- (1 row) SELECT 1 FROM master_create_empty_shard('testtableddl'); ?column? ---------- 1 (1 row) SELECT * FROM testtableddl; somecol | distributecol ---------+--------------- (0 rows) DROP TABLE testtableddl; citus-7.0.3/src/test/regress/expected/multi_dropped_column_aliases.out000066400000000000000000000075341317107136600263650ustar00rootroot00000000000000-- Tests that check that our query functionality behaves as expected when the -- table schema is modified via ALTER statements. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 620000; SELECT count(*) FROM customer; count ------- 1000 (1 row) SELECT * FROM customer LIMIT 2; c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment -----------+--------------------+--------------------------------+-------------+-----------------+-----------+--------------+----------------------------------------------------------------- 1 | Customer#000000001 | IVhzIApeRb ot,c,E | 15 | 25-989-741-2988 | 711.56 | BUILDING | to the even, regular platelets. regular, ironic epitaphs nag e 2 | Customer#000000002 | XSTf4,NCwDVaWNe6tEgvwfmRchLXak | 13 | 23-768-687-3665 | 121.65 | AUTOMOBILE | l accounts. blithely ironic theodolites integrate boldly: caref (2 rows) ALTER TABLE customer ADD COLUMN new_column1 INTEGER; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' ALTER TABLE customer ADD COLUMN new_column2 INTEGER; SELECT count(*) FROM customer; count ------- 1000 (1 row) SELECT * FROM customer LIMIT 2; c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment | new_column1 | new_column2 -----------+--------------------+--------------------------------+-------------+-----------------+-----------+--------------+-----------------------------------------------------------------+-------------+------------- 1 | Customer#000000001 | IVhzIApeRb ot,c,E | 15 | 25-989-741-2988 | 711.56 | BUILDING | to the even, regular platelets. regular, ironic epitaphs nag e | | 2 | Customer#000000002 | XSTf4,NCwDVaWNe6tEgvwfmRchLXak | 13 | 23-768-687-3665 | 121.65 | AUTOMOBILE | l accounts. blithely ironic theodolites integrate boldly: caref | | (2 rows) ALTER TABLE customer DROP COLUMN new_column1; ALTER TABLE customer DROP COLUMN new_column2; SELECT count(*) FROM customer; count ------- 1000 (1 row) SELECT * FROM customer LIMIT 2; c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment -----------+--------------------+--------------------------------+-------------+-----------------+-----------+--------------+----------------------------------------------------------------- 1 | Customer#000000001 | IVhzIApeRb ot,c,E | 15 | 25-989-741-2988 | 711.56 | BUILDING | to the even, regular platelets. regular, ironic epitaphs nag e 2 | Customer#000000002 | XSTf4,NCwDVaWNe6tEgvwfmRchLXak | 13 | 23-768-687-3665 | 121.65 | AUTOMOBILE | l accounts. blithely ironic theodolites integrate boldly: caref (2 rows) -- Verify joins work with dropped columns. SELECT count(*) FROM customer, orders WHERE c_custkey = o_custkey; count ------- 1955 (1 row) -- Test joinExpr aliases by performing an outer-join. This code path is -- currently not exercised, but we are adding this test to catch this bug when -- we start supporting outer joins. SELECT c_custkey FROM (customer LEFT OUTER JOIN orders ON (c_custkey = o_custkey)) AS test(c_custkey, c_nationkey) INNER JOIN lineitem ON (test.c_custkey = l_orderkey) LIMIT 10; ERROR: cannot run outer join query if join is not on the partition column DETAIL: Outer joins requiring repartitioning are not supported. citus-7.0.3/src/test/regress/expected/multi_expire_table_cache.out000066400000000000000000000053441317107136600254350ustar00rootroot00000000000000--- --- MULTI_EXPIRE_TABLE_CACHE --- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; -- create test table CREATE TABLE large_table(a int, b int); SELECT master_create_distributed_table('large_table', 'a', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('large_table', 8, 1); master_create_worker_shards ----------------------------- (1 row) CREATE TABLE broadcast_table(a int, b int); SELECT master_create_distributed_table('broadcast_table', 'a', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('broadcast_table', 2, 1); master_create_worker_shards ----------------------------- (1 row) -- verify only small tables are supported SELECT master_expire_table_cache('large_table'); ERROR: Must be called on tables smaller than 4 shards SELECT master_expire_table_cache('broadcast_table'); master_expire_table_cache --------------------------- (1 row) -- run a join so that broadcast tables are cached on other workers SELECT * from large_table l, broadcast_table b where l.a = b.b; a | b | a | b ---+---+---+--- (0 rows) -- insert some data INSERT INTO large_table VALUES(1, 1); INSERT INTO large_table VALUES(1, 2); INSERT INTO large_table VALUES(2, 1); INSERT INTO large_table VALUES(2, 2); INSERT INTO large_table VALUES(3, 1); INSERT INTO large_table VALUES(3, 2); INSERT INTO broadcast_table VALUES(1, 1); -- verify returned results are wrong SELECT * from large_table l, broadcast_table b WHERE l.b = b.b ORDER BY l.a, l.b; a | b | a | b ---+---+---+--- 1 | 1 | 1 | 1 2 | 1 | 1 | 1 (2 rows) -- expire cache and re-run, results should be correct this time SELECT master_expire_table_cache('broadcast_table'); master_expire_table_cache --------------------------- (1 row) SELECT * from large_table l, broadcast_table b WHERE l.b = b.b ORDER BY l.a, l.b; a | b | a | b ---+---+---+--- 1 | 1 | 1 | 1 2 | 1 | 1 | 1 3 | 1 | 1 | 1 (3 rows) -- insert some more data into broadcast table INSERT INTO broadcast_table VALUES(2, 2); -- run the same query, get wrong results SELECT * from large_table l, broadcast_table b WHERE l.b = b.b ORDER BY l.a, l.b; a | b | a | b ---+---+---+--- 1 | 1 | 1 | 1 2 | 1 | 1 | 1 3 | 1 | 1 | 1 3 | 2 | 2 | 2 (4 rows) -- expire cache and re-run, results should be correct this time SELECT master_expire_table_cache('broadcast_table'); master_expire_table_cache --------------------------- (1 row) SELECT * from large_table l, broadcast_table b WHERE l.b = b.b ORDER BY l.a, l.b; a | b | a | b ---+---+---+--- 1 | 1 | 1 | 1 1 | 2 | 2 | 2 2 | 1 | 1 | 1 2 | 2 | 2 | 2 3 | 1 | 1 | 1 3 | 2 | 2 | 2 (6 rows) DROP TABLE large_table, broadcast_table; citus-7.0.3/src/test/regress/expected/multi_explain.out000066400000000000000000001170361317107136600233110ustar00rootroot00000000000000-- -- MULTI_EXPLAIN -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000; -- print whether we're using version > 9 to make version-specific tests clear SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine; version_above_nine -------------------- t (1 row) \a\t SET citus.task_executor_type TO 'real-time'; SET citus.explain_distributed_queries TO on; -- Function that parses explain output as JSON CREATE FUNCTION explain_json(query text) RETURNS jsonb AS $BODY$ DECLARE result jsonb; BEGIN EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result; RETURN result; END; $BODY$ LANGUAGE plpgsql; -- Function that parses explain output as XML CREATE FUNCTION explain_xml(query text) RETURNS xml AS $BODY$ DECLARE result xml; BEGIN EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result; RETURN result; END; $BODY$ LANGUAGE plpgsql; -- VACUMM related tables to ensure test outputs are stable VACUUM ANALYZE lineitem; VACUUM ANALYZE orders; -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; Sort Sort Key: COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint), remote_scan.l_quantity -> HashAggregate Group Key: remote_scan.l_quantity -> Custom Scan (Citus Real-Time) Task Count: 8 Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_290001 lineitem -- Test JSON format EXPLAIN (COSTS FALSE, FORMAT JSON) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; [ { "Plan": { "Node Type": "Sort", "Parallel Aware": false, "Sort Key": ["COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint)", "remote_scan.l_quantity"], "Plans": [ { "Node Type": "Aggregate", "Strategy": "Hashed", "Partial Mode": "Simple", "Parent Relationship": "Outer", "Parallel Aware": false, "Group Key": ["remote_scan.l_quantity"], "Plans": [ { "Node Type": "Custom Scan", "Parent Relationship": "Outer", "Custom Plan Provider": "Citus Real-Time", "Parallel Aware": false, "Distributed Query": { "Job": { "Task Count": 8, "Tasks Shown": "One of 8", "Tasks": [ { "Node": "host=localhost port=57637 dbname=regression", "Remote Plan": [ [ { "Plan": { "Node Type": "Aggregate", "Strategy": "Hashed", "Partial Mode": "Simple", "Parallel Aware": false, "Group Key": ["l_quantity"], "Plans": [ { "Node Type": "Seq Scan", "Parent Relationship": "Outer", "Parallel Aware": false, "Relation Name": "lineitem_290001", "Alias": "lineitem" } ] } } ] ] } ] } } } ] } ] } } ] -- Validate JSON format SELECT true AS valid FROM explain_json($$ SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$); t -- Test XML format EXPLAIN (COSTS FALSE, FORMAT XML) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; Sort false COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint) remote_scan.l_quantity Aggregate Hashed Simple Outer false remote_scan.l_quantity Custom Scan Outer Citus Real-Time false 8 One of 8 host=localhost port=57637 dbname=regression Aggregate Hashed Simple false l_quantity Seq Scan Outer false lineitem_290001 lineitem -- Validate XML format SELECT true AS valid FROM explain_xml($$ SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$); t -- Test YAML format EXPLAIN (COSTS FALSE, FORMAT YAML) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; - Plan: Node Type: "Sort" Parallel Aware: false Sort Key: - "COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint)" - "remote_scan.l_quantity" Plans: - Node Type: "Aggregate" Strategy: "Hashed" Partial Mode: "Simple" Parent Relationship: "Outer" Parallel Aware: false Group Key: - "remote_scan.l_quantity" Plans: - Node Type: "Custom Scan" Parent Relationship: "Outer" Custom Plan Provider: "Citus Real-Time" Parallel Aware: false Distributed Query: Job: Task Count: 8 Tasks Shown: "One of 8" Tasks: - Node: "host=localhost port=57637 dbname=regression" Remote Plan: - Plan: Node Type: "Aggregate" Strategy: "Hashed" Partial Mode: "Simple" Parallel Aware: false Group Key: - "l_quantity" Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" Parallel Aware: false Relation Name: "lineitem_290001" Alias: "lineitem" -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; Sort Sort Key: COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint), remote_scan.l_quantity -> HashAggregate Group Key: remote_scan.l_quantity -> Custom Scan (Citus Real-Time) Task Count: 8 Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_290001 lineitem -- Test verbose EXPLAIN (COSTS FALSE, VERBOSE TRUE) SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; Aggregate Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2"))) -> Custom Scan (Citus Real-Time) Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2" Task Count: 8 Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate Output: sum(l_quantity), sum(l_quantity), count(l_quantity) -> Seq Scan on public.lineitem_290001 lineitem Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment -- Test join EXPLAIN (COSTS FALSE) SELECT * FROM lineitem JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5.0 ORDER BY l_quantity LIMIT 10; Limit -> Sort Sort Key: remote_scan.l_quantity -> Custom Scan (Citus Real-Time) Task Count: 8 Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> Limit -> Sort Sort Key: lineitem.l_quantity -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Scan using orders_pkey_290008 on orders_290008 orders -> Sort Sort Key: lineitem.l_orderkey -> Seq Scan on lineitem_290001 lineitem Filter: (l_quantity < 5.0) -- Test insert EXPLAIN (COSTS FALSE) INSERT INTO lineitem VALUES (1,0), (2, 0), (3, 0), (4, 0); Custom Scan (Citus Router) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57638 dbname=regression -> Insert on lineitem_290000 citus_table_alias -> Values Scan on "*VALUES*" -- Test update EXPLAIN (COSTS FALSE) UPDATE lineitem SET l_suppkey = 12 WHERE l_orderkey = 1 AND l_partkey = 0; Custom Scan (Citus Router) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57638 dbname=regression -> Update on lineitem_290000 lineitem -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem Index Cond: (l_orderkey = 1) Filter: (l_partkey = 0) -- Test delete EXPLAIN (COSTS FALSE) DELETE FROM lineitem WHERE l_orderkey = 1 AND l_partkey = 0; Custom Scan (Citus Router) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57638 dbname=regression -> Delete on lineitem_290000 lineitem -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem Index Cond: (l_orderkey = 1) Filter: (l_partkey = 0) -- Test zero-shard update EXPLAIN (COSTS FALSE) UPDATE lineitem SET l_suppkey = 12 WHERE l_orderkey = 1 AND l_orderkey = 0; Custom Scan (Citus Router) Task Count: 0 Tasks Shown: All -- Test zero-shard delete EXPLAIN (COSTS FALSE) DELETE FROM lineitem WHERE l_orderkey = 1 AND l_orderkey = 0; Custom Scan (Citus Router) Task Count: 0 Tasks Shown: All -- Test single-shard SELECT EXPLAIN (COSTS FALSE) SELECT l_quantity FROM lineitem WHERE l_orderkey = 5; Custom Scan (Citus Router) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem Index Cond: (l_orderkey = 5) SELECT true AS valid FROM explain_xml($$ SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$); t SELECT true AS valid FROM explain_json($$ SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$); t -- Test CREATE TABLE ... AS EXPLAIN (COSTS FALSE) CREATE TABLE explain_result AS SELECT * FROM lineitem; Custom Scan (Citus Real-Time) Task Count: 8 Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> Seq Scan on lineitem_290001 lineitem -- Test having EXPLAIN (COSTS FALSE, VERBOSE TRUE) SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem HAVING sum(l_quantity) > 100; Aggregate Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2"))) Filter: (sum(remote_scan.worker_column_4) > '100'::numeric) -> Custom Scan (Citus Real-Time) Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2", remote_scan.worker_column_4 Task Count: 8 Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate Output: sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity) -> Seq Scan on public.lineitem_290001 lineitem Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment -- Test having without aggregate EXPLAIN (COSTS FALSE, VERBOSE TRUE) SELECT l_quantity FROM lineitem GROUP BY l_quantity HAVING l_quantity > (100 * random()); HashAggregate Output: remote_scan.l_quantity Group Key: remote_scan.l_quantity Filter: ((remote_scan.worker_column_2)::double precision > ('100'::double precision * random())) -> Custom Scan (Citus Real-Time) Output: remote_scan.l_quantity, remote_scan.worker_column_2 Task Count: 8 Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> HashAggregate Output: l_quantity, l_quantity Group Key: lineitem.l_quantity -> Seq Scan on public.lineitem_290001 lineitem Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment -- Subquery pushdown tests with explain EXPLAIN (COSTS OFF) SELECT avg(array_length(events, 1)) AS event_average FROM (SELECT tenant_id, user_id, array_agg(event_type ORDER BY event_time) AS events FROM (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, event_type, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type IN ('click', 'submit', 'pay')) AS subquery GROUP BY tenant_id, user_id) AS subquery; Aggregate -> Custom Scan (Citus Real-Time) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> GroupAggregate Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) -> Sort Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) -> Hash Join Hash Cond: (users.composite_id = events.composite_id) -> Seq Scan on users_1400029 users Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) -> Hash -> Seq Scan on events_1400025 events Filter: ((event_type)::text = ANY ('{click,submit,pay}'::text[])) -- Union and left join subquery pushdown EXPLAIN (COSTS OFF) SELECT avg(array_length(events, 1)) AS event_average, hasdone FROM (SELECT subquery_1.tenant_id, subquery_1.user_id, array_agg(event ORDER BY event_time) AS events, COALESCE(hasdone, 'Has not done paying') AS hasdone FROM ( (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id) as composite_id, 'action=>1'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'click') UNION (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id) as composite_id, 'action=>2'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'submit') ) AS subquery_1 LEFT JOIN (SELECT DISTINCT ON ((composite_id).tenant_id, (composite_id).user_id) composite_id, (composite_id).tenant_id, (composite_id).user_id, 'Has done paying'::TEXT AS hasdone FROM events WHERE events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'pay') AS subquery_2 ON subquery_1.composite_id = subquery_2.composite_id GROUP BY subquery_1.tenant_id, subquery_1.user_id, hasdone) AS subquery_top GROUP BY hasdone; HashAggregate Group Key: remote_scan.hasdone -> Custom Scan (Citus Real-Time) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=57637 dbname=regression -> GroupAggregate Group Key: subquery_top.hasdone -> Sort Sort Key: subquery_top.hasdone -> Subquery Scan on subquery_top -> GroupAggregate Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone -> Sort Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone -> Hash Left Join Hash Cond: (users.composite_id = subquery_2.composite_id) -> HashAggregate Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), users.composite_id, ('action=>1'::text), events.event_time -> Append -> Hash Join Hash Cond: (users.composite_id = events.composite_id) -> Seq Scan on users_1400029 users Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) -> Hash -> Seq Scan on events_1400025 events Filter: ((event_type)::text = 'click'::text) -> Hash Join Hash Cond: (users_1.composite_id = events_1.composite_id) -> Seq Scan on users_1400029 users_1 Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) -> Hash -> Seq Scan on events_1400025 events_1 Filter: ((event_type)::text = 'submit'::text) -> Hash -> Subquery Scan on subquery_2 -> Unique -> Sort Sort Key: ((events_2.composite_id).tenant_id), ((events_2.composite_id).user_id) -> Seq Scan on events_1400025 events_2 Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text)) -- Union, left join and having subquery pushdown EXPLAIN (COSTS OFF) SELECT avg(array_length(events, 1)) AS event_average, count_pay FROM ( SELECT subquery_1.tenant_id, subquery_1.user_id, array_agg(event ORDER BY event_time) AS events, COALESCE(count_pay, 0) AS count_pay FROM ( (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id), 'action=>1'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'click') UNION (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id), 'action=>2'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'submit') ) AS subquery_1 LEFT JOIN (SELECT (composite_id).tenant_id, (composite_id).user_id, composite_id, COUNT(*) AS count_pay FROM events WHERE events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'pay' GROUP BY composite_id HAVING COUNT(*) > 2) AS subquery_2 ON subquery_1.composite_id = subquery_2.composite_id GROUP BY subquery_1.tenant_id, subquery_1.user_id, count_pay) AS subquery_top WHERE array_ndims(events) > 0 GROUP BY count_pay ORDER BY count_pay; ERROR: bogus varattno for OUTER_VAR var: 3 -- Lateral join subquery pushdown -- set subquery_pushdown due to limit in the query SET citus.subquery_pushdown to ON; EXPLAIN (COSTS OFF) SELECT tenant_id, user_id, user_lastseen, event_array FROM (SELECT tenant_id, user_id, max(lastseen) as user_lastseen, array_agg(event_type ORDER BY event_time) AS event_array FROM (SELECT (composite_id).tenant_id, (composite_id).user_id, composite_id, lastseen FROM users WHERE composite_id >= '(1, -9223372036854775808)'::user_composite_type AND composite_id <= '(1, 9223372036854775807)'::user_composite_type ORDER BY lastseen DESC LIMIT 10 ) AS subquery_top LEFT JOIN LATERAL (SELECT event_type, event_time FROM events WHERE (composite_id) = subquery_top.composite_id ORDER BY event_time DESC LIMIT 99) AS subquery_lateral ON true GROUP BY tenant_id, user_id ) AS shard_union ORDER BY user_lastseen DESC LIMIT 10; Limit -> Sort Sort Key: remote_scan.user_lastseen DESC -> Custom Scan (Citus Real-Time) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=57637 dbname=regression -> Limit -> Sort Sort Key: (max(users.lastseen)) DESC -> GroupAggregate Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) -> Sort Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) -> Nested Loop Left Join -> Limit -> Sort Sort Key: users.lastseen DESC -> Seq Scan on users_1400029 users Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) -> Limit -> Sort Sort Key: events.event_time DESC -> Seq Scan on events_1400025 events Filter: (composite_id = users.composite_id) -- Test all tasks output SET citus.explain_all_tasks TO on; EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; Aggregate -> Custom Scan (Citus Real-Time) Task Count: 4 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_290005 lineitem Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Seq Scan on lineitem_290004 lineitem Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_290007 lineitem Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Seq Scan on lineitem_290006 lineitem Filter: (l_orderkey > 9030) SELECT true AS valid FROM explain_xml($$ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$); t SELECT true AS valid FROM explain_json($$ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$); t -- Test track tracker SET citus.task_executor_type TO 'task-tracker'; SET citus.explain_all_tasks TO off; EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_290005 lineitem Filter: (l_orderkey > 9030) -- Test re-partition join SET citus.large_table_shard_count TO 1; EXPLAIN (COSTS FALSE) SELECT count(*) FROM lineitem, orders, customer, supplier_single_shard WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 1 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob Map Task Count: 1 Merge Task Count: 1 -> MapMergeJob Map Task Count: 8 Merge Task Count: 1 EXPLAIN (COSTS FALSE, FORMAT JSON) SELECT count(*) FROM lineitem, orders, customer, supplier_single_shard WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; [ { "Plan": { "Node Type": "Aggregate", "Strategy": "Plain", "Partial Mode": "Simple", "Parallel Aware": false, "Plans": [ { "Node Type": "Custom Scan", "Parent Relationship": "Outer", "Custom Plan Provider": "Citus Task-Tracker", "Parallel Aware": false, "Distributed Query": { "Job": { "Task Count": 1, "Tasks Shown": "None, not supported for re-partition queries", "Depended Jobs": [ { "Map Task Count": 1, "Merge Task Count": 1, "Depended Jobs": [ { "Map Task Count": 8, "Merge Task Count": 1 } ] } ] } } } ] } } ] SELECT true AS valid FROM explain_json($$ SELECT count(*) FROM lineitem, orders, customer, supplier_single_shard WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey$$); t EXPLAIN (COSTS FALSE, FORMAT XML) SELECT count(*) FROM lineitem, orders, customer, supplier_single_shard WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; Aggregate Plain Simple false Custom Scan Outer Citus Task-Tracker false 1 None, not supported for re-partition queries 1 1 8 1 SELECT true AS valid FROM explain_xml($$ SELECT count(*) FROM lineitem, orders, customer, supplier WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey$$); t -- make sure that EXPLAIN works without -- problems for queries that inlvolves only -- reference tables SELECT true AS valid FROM explain_xml($$ SELECT count(*) FROM nation WHERE n_name = 'CHINA'$$); t SELECT true AS valid FROM explain_xml($$ SELECT count(*) FROM nation, supplier WHERE nation.n_nationkey = supplier.s_nationkey$$); t EXPLAIN (COSTS FALSE, FORMAT YAML) SELECT count(*) FROM lineitem, orders, customer, supplier_single_shard WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; - Plan: Node Type: "Aggregate" Strategy: "Plain" Partial Mode: "Simple" Parallel Aware: false Plans: - Node Type: "Custom Scan" Parent Relationship: "Outer" Custom Plan Provider: "Citus Task-Tracker" Parallel Aware: false Distributed Query: Job: Task Count: 1 Tasks Shown: "None, not supported for re-partition queries" Depended Jobs: - Map Task Count: 1 Merge Task Count: 1 Depended Jobs: - Map Task Count: 8 Merge Task Count: 1 -- test parallel aggregates SET parallel_setup_cost=0; SET parallel_tuple_cost=0; SET min_parallel_relation_size=0; ERROR: unrecognized configuration parameter "min_parallel_relation_size" SET min_parallel_table_scan_size=0; SET max_parallel_workers_per_gather=4; -- ensure local plans display correctly CREATE TABLE lineitem_clone (LIKE lineitem); EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem_clone; Finalize Aggregate -> Gather Workers Planned: 3 -> Partial Aggregate -> Parallel Seq Scan on lineitem_clone -- ensure distributed plans don't break EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem; Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 8 Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_290001 lineitem -- ensure EXPLAIN EXECUTE doesn't crash PREPARE task_tracker_query AS SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; EXPLAIN (COSTS FALSE) EXECUTE task_tracker_query; Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_290005 lineitem Filter: (l_orderkey > 9030) SET citus.task_executor_type TO 'real-time'; PREPARE router_executor_query AS SELECT l_quantity FROM lineitem WHERE l_orderkey = 5; EXPLAIN EXECUTE router_executor_query; Custom Scan (Citus Router) (cost=0.00..0.00 rows=0 width=0) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..11.83 rows=3 width=5) Index Cond: (l_orderkey = 5) PREPARE real_time_executor_query AS SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; EXPLAIN (COSTS FALSE) EXECUTE real_time_executor_query; Aggregate -> Custom Scan (Citus Real-Time) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_290005 lineitem Filter: (l_orderkey > 9030) -- EXPLAIN EXECUTE of parametrized prepared statements is broken, but -- at least make sure to fail without crashing PREPARE router_executor_query_param(int) AS SELECT l_quantity FROM lineitem WHERE l_orderkey = $1; EXPLAIN EXECUTE router_executor_query_param(5); Custom Scan (Citus Router) (cost=0.00..0.00 rows=0 width=0) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..11.83 rows=3 width=5) Index Cond: (l_orderkey = 5) -- test explain in a transaction with alter table to test we use right connections BEGIN; CREATE TABLE explain_table(id int); SELECT create_distributed_table('explain_table', 'id'); ALTER TABLE explain_table ADD COLUMN value int; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' ROLLBACK; -- test explain with local INSERT ... SELECT EXPLAIN (COSTS OFF) INSERT INTO lineitem_hash_part SELECT o_orderkey FROM orders_hash_part LIMIT 3; Custom Scan (Citus INSERT ... SELECT via coordinator) -> Limit -> Custom Scan (Citus Real-Time) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=57637 dbname=regression -> Limit -> Seq Scan on orders_hash_part_360295 orders_hash_part SELECT true AS valid FROM explain_json($$ INSERT INTO lineitem_hash_part (l_orderkey) SELECT o_orderkey FROM orders_hash_part LIMIT 3; $$); t EXPLAIN (COSTS OFF) INSERT INTO lineitem_hash_part (l_orderkey, l_quantity) SELECT o_orderkey, 5 FROM orders_hash_part LIMIT 3; Custom Scan (Citus INSERT ... SELECT via coordinator) -> Limit -> Custom Scan (Citus Real-Time) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=57637 dbname=regression -> Limit -> Seq Scan on orders_hash_part_360295 orders_hash_part EXPLAIN (COSTS OFF) INSERT INTO lineitem_hash_part (l_orderkey) SELECT s FROM generate_series(1,5) s; Custom Scan (Citus INSERT ... SELECT via coordinator) -> Function Scan on generate_series s EXPLAIN (COSTS OFF) WITH cte1 AS (SELECT s FROM generate_series(1,10) s) INSERT INTO lineitem_hash_part WITH cte1 AS (SELECT * FROM cte1 LIMIT 5) SELECT s FROM cte1; Custom Scan (Citus INSERT ... SELECT via coordinator) -> Subquery Scan on citus_insert_select_subquery CTE cte1 -> Function Scan on generate_series s -> CTE Scan on cte1 CTE cte1 -> Limit -> CTE Scan on cte1 cte1_1 EXPLAIN (COSTS OFF) INSERT INTO lineitem_hash_part ( SELECT s FROM generate_series(1,5) s) UNION ( SELECT s FROM generate_series(5,10) s); Custom Scan (Citus INSERT ... SELECT via coordinator) -> Subquery Scan on citus_insert_select_subquery -> HashAggregate Group Key: s.s -> Append -> Function Scan on generate_series s -> Function Scan on generate_series s_1 citus-7.0.3/src/test/regress/expected/multi_explain_0.out000066400000000000000000001170401317107136600235230ustar00rootroot00000000000000-- -- MULTI_EXPLAIN -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000; -- print whether we're using version > 9 to make version-specific tests clear SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine; version_above_nine -------------------- f (1 row) \a\t SET citus.task_executor_type TO 'real-time'; SET citus.explain_distributed_queries TO on; -- Function that parses explain output as JSON CREATE FUNCTION explain_json(query text) RETURNS jsonb AS $BODY$ DECLARE result jsonb; BEGIN EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result; RETURN result; END; $BODY$ LANGUAGE plpgsql; -- Function that parses explain output as XML CREATE FUNCTION explain_xml(query text) RETURNS xml AS $BODY$ DECLARE result xml; BEGIN EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result; RETURN result; END; $BODY$ LANGUAGE plpgsql; -- VACUMM related tables to ensure test outputs are stable VACUUM ANALYZE lineitem; VACUUM ANALYZE orders; -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; Sort Sort Key: COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint), remote_scan.l_quantity -> HashAggregate Group Key: remote_scan.l_quantity -> Custom Scan (Citus Real-Time) Task Count: 8 Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_290001 lineitem -- Test JSON format EXPLAIN (COSTS FALSE, FORMAT JSON) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; [ { "Plan": { "Node Type": "Sort", "Parallel Aware": false, "Sort Key": ["COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint)", "remote_scan.l_quantity"], "Plans": [ { "Node Type": "Aggregate", "Strategy": "Hashed", "Partial Mode": "Simple", "Parent Relationship": "Outer", "Parallel Aware": false, "Group Key": ["remote_scan.l_quantity"], "Plans": [ { "Node Type": "Custom Scan", "Parent Relationship": "Outer", "Custom Plan Provider": "Citus Real-Time", "Parallel Aware": false, "Distributed Query": { "Job": { "Task Count": 8, "Tasks Shown": "One of 8", "Tasks": [ { "Node": "host=localhost port=57637 dbname=regression", "Remote Plan": [ [ { "Plan": { "Node Type": "Aggregate", "Strategy": "Hashed", "Partial Mode": "Simple", "Parallel Aware": false, "Group Key": ["l_quantity"], "Plans": [ { "Node Type": "Seq Scan", "Parent Relationship": "Outer", "Parallel Aware": false, "Relation Name": "lineitem_290001", "Alias": "lineitem" } ] } } ] ] } ] } } } ] } ] } } ] -- Validate JSON format SELECT true AS valid FROM explain_json($$ SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$); t -- Test XML format EXPLAIN (COSTS FALSE, FORMAT XML) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; Sort false COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint) remote_scan.l_quantity Aggregate Hashed Simple Outer false remote_scan.l_quantity Custom Scan Outer Citus Real-Time false 8 One of 8 host=localhost port=57637 dbname=regression Aggregate Hashed Simple false l_quantity Seq Scan Outer false lineitem_290001 lineitem -- Validate XML format SELECT true AS valid FROM explain_xml($$ SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$); t -- Test YAML format EXPLAIN (COSTS FALSE, FORMAT YAML) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; - Plan: Node Type: "Sort" Parallel Aware: false Sort Key: - "COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint)" - "remote_scan.l_quantity" Plans: - Node Type: "Aggregate" Strategy: "Hashed" Partial Mode: "Simple" Parent Relationship: "Outer" Parallel Aware: false Group Key: - "remote_scan.l_quantity" Plans: - Node Type: "Custom Scan" Parent Relationship: "Outer" Custom Plan Provider: "Citus Real-Time" Parallel Aware: false Distributed Query: Job: Task Count: 8 Tasks Shown: "One of 8" Tasks: - Node: "host=localhost port=57637 dbname=regression" Remote Plan: - Plan: Node Type: "Aggregate" Strategy: "Hashed" Partial Mode: "Simple" Parallel Aware: false Group Key: - "l_quantity" Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" Parallel Aware: false Relation Name: "lineitem_290001" Alias: "lineitem" -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; Sort Sort Key: COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint), remote_scan.l_quantity -> HashAggregate Group Key: remote_scan.l_quantity -> Custom Scan (Citus Real-Time) Task Count: 8 Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_290001 lineitem -- Test verbose EXPLAIN (COSTS FALSE, VERBOSE TRUE) SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; Aggregate Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2"))) -> Custom Scan (Citus Real-Time) Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2" Task Count: 8 Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate Output: sum(l_quantity), sum(l_quantity), count(l_quantity) -> Seq Scan on public.lineitem_290001 lineitem Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment -- Test join EXPLAIN (COSTS FALSE) SELECT * FROM lineitem JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5.0 ORDER BY l_quantity LIMIT 10; Limit -> Sort Sort Key: remote_scan.l_quantity -> Custom Scan (Citus Real-Time) Task Count: 8 Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> Limit -> Sort Sort Key: lineitem.l_quantity -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Scan using orders_pkey_290008 on orders_290008 orders -> Sort Sort Key: lineitem.l_orderkey -> Seq Scan on lineitem_290001 lineitem Filter: (l_quantity < 5.0) -- Test insert EXPLAIN (COSTS FALSE) INSERT INTO lineitem VALUES (1,0), (2, 0), (3, 0), (4, 0); Custom Scan (Citus Router) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57638 dbname=regression -> Insert on lineitem_290000 citus_table_alias -> Values Scan on "*VALUES*" -- Test update EXPLAIN (COSTS FALSE) UPDATE lineitem SET l_suppkey = 12 WHERE l_orderkey = 1 AND l_partkey = 0; Custom Scan (Citus Router) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57638 dbname=regression -> Update on lineitem_290000 lineitem -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem Index Cond: (l_orderkey = 1) Filter: (l_partkey = 0) -- Test delete EXPLAIN (COSTS FALSE) DELETE FROM lineitem WHERE l_orderkey = 1 AND l_partkey = 0; Custom Scan (Citus Router) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57638 dbname=regression -> Delete on lineitem_290000 lineitem -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem Index Cond: (l_orderkey = 1) Filter: (l_partkey = 0) -- Test zero-shard update EXPLAIN (COSTS FALSE) UPDATE lineitem SET l_suppkey = 12 WHERE l_orderkey = 1 AND l_orderkey = 0; Custom Scan (Citus Router) Task Count: 0 Tasks Shown: All -- Test zero-shard delete EXPLAIN (COSTS FALSE) DELETE FROM lineitem WHERE l_orderkey = 1 AND l_orderkey = 0; Custom Scan (Citus Router) Task Count: 0 Tasks Shown: All -- Test single-shard SELECT EXPLAIN (COSTS FALSE) SELECT l_quantity FROM lineitem WHERE l_orderkey = 5; Custom Scan (Citus Router) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem Index Cond: (l_orderkey = 5) SELECT true AS valid FROM explain_xml($$ SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$); t SELECT true AS valid FROM explain_json($$ SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$); t -- Test CREATE TABLE ... AS EXPLAIN (COSTS FALSE) CREATE TABLE explain_result AS SELECT * FROM lineitem; Custom Scan (Citus Real-Time) Task Count: 8 Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> Seq Scan on lineitem_290001 lineitem -- Test having EXPLAIN (COSTS FALSE, VERBOSE TRUE) SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem HAVING sum(l_quantity) > 100; Aggregate Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2"))) Filter: (sum(remote_scan.worker_column_4) > '100'::numeric) -> Custom Scan (Citus Real-Time) Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2", remote_scan.worker_column_4 Task Count: 8 Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate Output: sum(l_quantity), sum(l_quantity), count(l_quantity), sum(l_quantity) -> Seq Scan on public.lineitem_290001 lineitem Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment -- Test having without aggregate EXPLAIN (COSTS FALSE, VERBOSE TRUE) SELECT l_quantity FROM lineitem GROUP BY l_quantity HAVING l_quantity > (100 * random()); HashAggregate Output: remote_scan.l_quantity Group Key: remote_scan.l_quantity Filter: ((remote_scan.worker_column_2)::double precision > ('100'::double precision * random())) -> Custom Scan (Citus Real-Time) Output: remote_scan.l_quantity, remote_scan.worker_column_2 Task Count: 8 Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> HashAggregate Output: l_quantity, l_quantity Group Key: lineitem.l_quantity -> Seq Scan on public.lineitem_290001 lineitem Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment -- Subquery pushdown tests with explain EXPLAIN (COSTS OFF) SELECT avg(array_length(events, 1)) AS event_average FROM (SELECT tenant_id, user_id, array_agg(event_type ORDER BY event_time) AS events FROM (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, event_type, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type IN ('click', 'submit', 'pay')) AS subquery GROUP BY tenant_id, user_id) AS subquery; Aggregate -> Custom Scan (Citus Real-Time) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> GroupAggregate Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) -> Sort Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) -> Hash Join Hash Cond: (users.composite_id = events.composite_id) -> Seq Scan on users_1400029 users Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) -> Hash -> Seq Scan on events_1400025 events Filter: ((event_type)::text = ANY ('{click,submit,pay}'::text[])) -- Union and left join subquery pushdown EXPLAIN (COSTS OFF) SELECT avg(array_length(events, 1)) AS event_average, hasdone FROM (SELECT subquery_1.tenant_id, subquery_1.user_id, array_agg(event ORDER BY event_time) AS events, COALESCE(hasdone, 'Has not done paying') AS hasdone FROM ( (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id) as composite_id, 'action=>1'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'click') UNION (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id) as composite_id, 'action=>2'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'submit') ) AS subquery_1 LEFT JOIN (SELECT DISTINCT ON ((composite_id).tenant_id, (composite_id).user_id) composite_id, (composite_id).tenant_id, (composite_id).user_id, 'Has done paying'::TEXT AS hasdone FROM events WHERE events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'pay') AS subquery_2 ON subquery_1.composite_id = subquery_2.composite_id GROUP BY subquery_1.tenant_id, subquery_1.user_id, hasdone) AS subquery_top GROUP BY hasdone; HashAggregate Group Key: remote_scan.hasdone -> Custom Scan (Citus Real-Time) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=57637 dbname=regression -> GroupAggregate Group Key: subquery_top.hasdone -> Sort Sort Key: subquery_top.hasdone -> Subquery Scan on subquery_top -> GroupAggregate Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone -> Sort Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), subquery_2.hasdone -> Hash Left Join Hash Cond: (users.composite_id = subquery_2.composite_id) -> HashAggregate Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id), users.composite_id, ('action=>1'::text), events.event_time -> Append -> Hash Join Hash Cond: (users.composite_id = events.composite_id) -> Seq Scan on users_1400029 users Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) -> Hash -> Seq Scan on events_1400025 events Filter: ((event_type)::text = 'click'::text) -> Hash Join Hash Cond: (users_1.composite_id = events_1.composite_id) -> Seq Scan on users_1400029 users_1 Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) -> Hash -> Seq Scan on events_1400025 events_1 Filter: ((event_type)::text = 'submit'::text) -> Hash -> Subquery Scan on subquery_2 -> Unique -> Sort Sort Key: ((events_2.composite_id).tenant_id), ((events_2.composite_id).user_id) -> Seq Scan on events_1400025 events_2 Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type) AND ((event_type)::text = 'pay'::text)) -- Union, left join and having subquery pushdown EXPLAIN (COSTS OFF) SELECT avg(array_length(events, 1)) AS event_average, count_pay FROM ( SELECT subquery_1.tenant_id, subquery_1.user_id, array_agg(event ORDER BY event_time) AS events, COALESCE(count_pay, 0) AS count_pay FROM ( (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id), 'action=>1'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'click') UNION (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id), 'action=>2'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'submit') ) AS subquery_1 LEFT JOIN (SELECT (composite_id).tenant_id, (composite_id).user_id, composite_id, COUNT(*) AS count_pay FROM events WHERE events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'pay' GROUP BY composite_id HAVING COUNT(*) > 2) AS subquery_2 ON subquery_1.composite_id = subquery_2.composite_id GROUP BY subquery_1.tenant_id, subquery_1.user_id, count_pay) AS subquery_top WHERE array_ndims(events) > 0 GROUP BY count_pay ORDER BY count_pay; ERROR: bogus varattno for OUTER_VAR var: 3 -- Lateral join subquery pushdown -- set subquery_pushdown due to limit in the query SET citus.subquery_pushdown to ON; EXPLAIN (COSTS OFF) SELECT tenant_id, user_id, user_lastseen, event_array FROM (SELECT tenant_id, user_id, max(lastseen) as user_lastseen, array_agg(event_type ORDER BY event_time) AS event_array FROM (SELECT (composite_id).tenant_id, (composite_id).user_id, composite_id, lastseen FROM users WHERE composite_id >= '(1, -9223372036854775808)'::user_composite_type AND composite_id <= '(1, 9223372036854775807)'::user_composite_type ORDER BY lastseen DESC LIMIT 10 ) AS subquery_top LEFT JOIN LATERAL (SELECT event_type, event_time FROM events WHERE (composite_id) = subquery_top.composite_id ORDER BY event_time DESC LIMIT 99) AS subquery_lateral ON true GROUP BY tenant_id, user_id ) AS shard_union ORDER BY user_lastseen DESC LIMIT 10; Limit -> Sort Sort Key: remote_scan.user_lastseen DESC -> Custom Scan (Citus Real-Time) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=57637 dbname=regression -> Limit -> Sort Sort Key: (max(users.lastseen)) DESC -> GroupAggregate Group Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) -> Sort Sort Key: ((users.composite_id).tenant_id), ((users.composite_id).user_id) -> Nested Loop Left Join -> Limit -> Sort Sort Key: users.lastseen DESC -> Seq Scan on users_1400029 users Filter: ((composite_id >= '(1,-9223372036854775808)'::user_composite_type) AND (composite_id <= '(1,9223372036854775807)'::user_composite_type)) -> Limit -> Sort Sort Key: events.event_time DESC -> Seq Scan on events_1400025 events Filter: (composite_id = users.composite_id) -- Test all tasks output SET citus.explain_all_tasks TO on; EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; Aggregate -> Custom Scan (Citus Real-Time) Task Count: 4 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_290005 lineitem Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Seq Scan on lineitem_290004 lineitem Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_290007 lineitem Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Seq Scan on lineitem_290006 lineitem Filter: (l_orderkey > 9030) SELECT true AS valid FROM explain_xml($$ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$); t SELECT true AS valid FROM explain_json($$ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$); t -- Test track tracker SET citus.task_executor_type TO 'task-tracker'; SET citus.explain_all_tasks TO off; EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_290005 lineitem Filter: (l_orderkey > 9030) -- Test re-partition join SET citus.large_table_shard_count TO 1; EXPLAIN (COSTS FALSE) SELECT count(*) FROM lineitem, orders, customer, supplier_single_shard WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 1 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob Map Task Count: 1 Merge Task Count: 1 -> MapMergeJob Map Task Count: 8 Merge Task Count: 1 EXPLAIN (COSTS FALSE, FORMAT JSON) SELECT count(*) FROM lineitem, orders, customer, supplier_single_shard WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; [ { "Plan": { "Node Type": "Aggregate", "Strategy": "Plain", "Partial Mode": "Simple", "Parallel Aware": false, "Plans": [ { "Node Type": "Custom Scan", "Parent Relationship": "Outer", "Custom Plan Provider": "Citus Task-Tracker", "Parallel Aware": false, "Distributed Query": { "Job": { "Task Count": 1, "Tasks Shown": "None, not supported for re-partition queries", "Depended Jobs": [ { "Map Task Count": 1, "Merge Task Count": 1, "Depended Jobs": [ { "Map Task Count": 8, "Merge Task Count": 1 } ] } ] } } } ] } } ] SELECT true AS valid FROM explain_json($$ SELECT count(*) FROM lineitem, orders, customer, supplier_single_shard WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey$$); t EXPLAIN (COSTS FALSE, FORMAT XML) SELECT count(*) FROM lineitem, orders, customer, supplier_single_shard WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; Aggregate Plain Simple false Custom Scan Outer Citus Task-Tracker false 1 None, not supported for re-partition queries 1 1 8 1 SELECT true AS valid FROM explain_xml($$ SELECT count(*) FROM lineitem, orders, customer, supplier WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey$$); t -- make sure that EXPLAIN works without -- problems for queries that inlvolves only -- reference tables SELECT true AS valid FROM explain_xml($$ SELECT count(*) FROM nation WHERE n_name = 'CHINA'$$); t SELECT true AS valid FROM explain_xml($$ SELECT count(*) FROM nation, supplier WHERE nation.n_nationkey = supplier.s_nationkey$$); t EXPLAIN (COSTS FALSE, FORMAT YAML) SELECT count(*) FROM lineitem, orders, customer, supplier_single_shard WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; - Plan: Node Type: "Aggregate" Strategy: "Plain" Partial Mode: "Simple" Parallel Aware: false Plans: - Node Type: "Custom Scan" Parent Relationship: "Outer" Custom Plan Provider: "Citus Task-Tracker" Parallel Aware: false Distributed Query: Job: Task Count: 1 Tasks Shown: "None, not supported for re-partition queries" Depended Jobs: - Map Task Count: 1 Merge Task Count: 1 Depended Jobs: - Map Task Count: 8 Merge Task Count: 1 -- test parallel aggregates SET parallel_setup_cost=0; SET parallel_tuple_cost=0; SET min_parallel_relation_size=0; SET min_parallel_table_scan_size=0; ERROR: unrecognized configuration parameter "min_parallel_table_scan_size" SET max_parallel_workers_per_gather=4; -- ensure local plans display correctly CREATE TABLE lineitem_clone (LIKE lineitem); EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem_clone; Finalize Aggregate -> Gather Workers Planned: 3 -> Partial Aggregate -> Parallel Seq Scan on lineitem_clone -- ensure distributed plans don't break EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem; Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 8 Tasks Shown: One of 8 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_290001 lineitem -- ensure EXPLAIN EXECUTE doesn't crash PREPARE task_tracker_query AS SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; EXPLAIN (COSTS FALSE) EXECUTE task_tracker_query; Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_290005 lineitem Filter: (l_orderkey > 9030) SET citus.task_executor_type TO 'real-time'; PREPARE router_executor_query AS SELECT l_quantity FROM lineitem WHERE l_orderkey = 5; EXPLAIN EXECUTE router_executor_query; Custom Scan (Citus Router) (cost=0.00..0.00 rows=0 width=0) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..11.83 rows=3 width=5) Index Cond: (l_orderkey = 5) PREPARE real_time_executor_query AS SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; EXPLAIN (COSTS FALSE) EXECUTE real_time_executor_query; Aggregate -> Custom Scan (Citus Real-Time) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_290005 lineitem Filter: (l_orderkey > 9030) -- EXPLAIN EXECUTE of parametrized prepared statements is broken, but -- at least make sure to fail without crashing PREPARE router_executor_query_param(int) AS SELECT l_quantity FROM lineitem WHERE l_orderkey = $1; EXPLAIN EXECUTE router_executor_query_param(5); Custom Scan (Citus Router) (cost=0.00..0.00 rows=0 width=0) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem (cost=0.28..11.83 rows=3 width=5) Index Cond: (l_orderkey = 5) -- test explain in a transaction with alter table to test we use right connections BEGIN; CREATE TABLE explain_table(id int); SELECT create_distributed_table('explain_table', 'id'); ALTER TABLE explain_table ADD COLUMN value int; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' ROLLBACK; -- test explain with local INSERT ... SELECT EXPLAIN (COSTS OFF) INSERT INTO lineitem_hash_part SELECT o_orderkey FROM orders_hash_part LIMIT 3; Custom Scan (Citus INSERT ... SELECT via coordinator) -> Limit -> Custom Scan (Citus Real-Time) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=57637 dbname=regression -> Limit -> Seq Scan on orders_hash_part_360295 orders_hash_part SELECT true AS valid FROM explain_json($$ INSERT INTO lineitem_hash_part (l_orderkey) SELECT o_orderkey FROM orders_hash_part LIMIT 3; $$); t EXPLAIN (COSTS OFF) INSERT INTO lineitem_hash_part (l_orderkey, l_quantity) SELECT o_orderkey, 5 FROM orders_hash_part LIMIT 3; Custom Scan (Citus INSERT ... SELECT via coordinator) -> Limit -> Custom Scan (Citus Real-Time) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=57637 dbname=regression -> Limit -> Seq Scan on orders_hash_part_360295 orders_hash_part EXPLAIN (COSTS OFF) INSERT INTO lineitem_hash_part (l_orderkey) SELECT s FROM generate_series(1,5) s; Custom Scan (Citus INSERT ... SELECT via coordinator) -> Function Scan on generate_series s EXPLAIN (COSTS OFF) WITH cte1 AS (SELECT s FROM generate_series(1,10) s) INSERT INTO lineitem_hash_part WITH cte1 AS (SELECT * FROM cte1 LIMIT 5) SELECT s FROM cte1; Custom Scan (Citus INSERT ... SELECT via coordinator) -> Subquery Scan on citus_insert_select_subquery CTE cte1 -> Function Scan on generate_series s -> CTE Scan on cte1 CTE cte1 -> Limit -> CTE Scan on cte1 cte1_1 EXPLAIN (COSTS OFF) INSERT INTO lineitem_hash_part ( SELECT s FROM generate_series(1,5) s) UNION ( SELECT s FROM generate_series(5,10) s); Custom Scan (Citus INSERT ... SELECT via coordinator) -> Subquery Scan on citus_insert_select_subquery -> HashAggregate Group Key: s.s -> Append -> Function Scan on generate_series s -> Function Scan on generate_series s_1 citus-7.0.3/src/test/regress/expected/multi_extension.out000066400000000000000000000312501317107136600236560ustar00rootroot00000000000000-- -- MULTI_EXTENSION -- -- Tests around extension creation / upgrades -- -- It'd be nice to script generation of this file, but alas, that's -- not done yet. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 580000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 580000; CREATE SCHEMA test; CREATE OR REPLACE FUNCTION test.maintenance_worker(p_dbname text DEFAULT current_database()) RETURNS pg_stat_activity LANGUAGE plpgsql AS $$ DECLARE activity record; BEGIN LOOP SELECT * INTO activity FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon' AND datname = p_dbname; IF activity.pid IS NOT NULL THEN RETURN activity; ELSE PERFORM pg_sleep(0.1); PERFORM pg_stat_clear_snapshot(); END IF ; END LOOP; END; $$; -- check maintenance daemon is started SELECT datname, datname = current_database(), usename = (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') FROM test.maintenance_worker(); datname | ?column? | ?column? ------------+----------+---------- regression | t | t (1 row) -- ensure no objects were created outside pg_catalog SELECT COUNT(*) FROM pg_depend AS pgd, pg_extension AS pge, LATERAL pg_identify_object(pgd.classid, pgd.objid, pgd.objsubid) AS pgio WHERE pgd.refclassid = 'pg_extension'::regclass AND pgd.refobjid = pge.oid AND pge.extname = 'citus' AND pgio.schema NOT IN ('pg_catalog', 'citus', 'test'); count ------- 0 (1 row) -- DROP EXTENSION pre-created by the regression suite DROP EXTENSION citus; \c SET citus.enable_version_checks TO 'false'; -- Create extension in oldest version CREATE EXTENSION citus VERSION '5.0'; ALTER EXTENSION citus UPDATE TO '5.0-1'; ALTER EXTENSION citus UPDATE TO '5.0-2'; ALTER EXTENSION citus UPDATE TO '5.1-1'; ALTER EXTENSION citus UPDATE TO '5.1-2'; ALTER EXTENSION citus UPDATE TO '5.1-3'; ALTER EXTENSION citus UPDATE TO '5.1-4'; ALTER EXTENSION citus UPDATE TO '5.1-5'; ALTER EXTENSION citus UPDATE TO '5.1-6'; ALTER EXTENSION citus UPDATE TO '5.1-7'; ALTER EXTENSION citus UPDATE TO '5.1-8'; ALTER EXTENSION citus UPDATE TO '5.2-1'; ALTER EXTENSION citus UPDATE TO '5.2-2'; ALTER EXTENSION citus UPDATE TO '5.2-3'; ALTER EXTENSION citus UPDATE TO '5.2-4'; ALTER EXTENSION citus UPDATE TO '6.0-1'; ALTER EXTENSION citus UPDATE TO '6.0-2'; ALTER EXTENSION citus UPDATE TO '6.0-3'; ALTER EXTENSION citus UPDATE TO '6.0-4'; ALTER EXTENSION citus UPDATE TO '6.0-5'; ALTER EXTENSION citus UPDATE TO '6.0-6'; ALTER EXTENSION citus UPDATE TO '6.0-7'; ALTER EXTENSION citus UPDATE TO '6.0-8'; ALTER EXTENSION citus UPDATE TO '6.0-9'; ALTER EXTENSION citus UPDATE TO '6.0-10'; ALTER EXTENSION citus UPDATE TO '6.0-11'; ALTER EXTENSION citus UPDATE TO '6.0-12'; ALTER EXTENSION citus UPDATE TO '6.0-13'; ALTER EXTENSION citus UPDATE TO '6.0-14'; ALTER EXTENSION citus UPDATE TO '6.0-15'; ALTER EXTENSION citus UPDATE TO '6.0-16'; ALTER EXTENSION citus UPDATE TO '6.0-17'; ALTER EXTENSION citus UPDATE TO '6.0-18'; ALTER EXTENSION citus UPDATE TO '6.1-1'; ALTER EXTENSION citus UPDATE TO '6.1-2'; ALTER EXTENSION citus UPDATE TO '6.1-3'; ALTER EXTENSION citus UPDATE TO '6.1-4'; ALTER EXTENSION citus UPDATE TO '6.1-5'; ALTER EXTENSION citus UPDATE TO '6.1-6'; ALTER EXTENSION citus UPDATE TO '6.1-7'; ALTER EXTENSION citus UPDATE TO '6.1-8'; ALTER EXTENSION citus UPDATE TO '6.1-9'; ALTER EXTENSION citus UPDATE TO '6.1-10'; ALTER EXTENSION citus UPDATE TO '6.1-11'; ALTER EXTENSION citus UPDATE TO '6.1-12'; ALTER EXTENSION citus UPDATE TO '6.1-13'; ALTER EXTENSION citus UPDATE TO '6.1-14'; ALTER EXTENSION citus UPDATE TO '6.1-15'; ALTER EXTENSION citus UPDATE TO '6.1-16'; ALTER EXTENSION citus UPDATE TO '6.1-17'; ALTER EXTENSION citus UPDATE TO '6.2-1'; ALTER EXTENSION citus UPDATE TO '6.2-2'; ALTER EXTENSION citus UPDATE TO '6.2-3'; ALTER EXTENSION citus UPDATE TO '6.2-4'; ALTER EXTENSION citus UPDATE TO '7.0-1'; ALTER EXTENSION citus UPDATE TO '7.0-2'; ALTER EXTENSION citus UPDATE TO '7.0-3'; ALTER EXTENSION citus UPDATE TO '7.0-4'; ALTER EXTENSION citus UPDATE TO '7.0-5'; ALTER EXTENSION citus UPDATE TO '7.0-6'; ALTER EXTENSION citus UPDATE TO '7.0-7'; ALTER EXTENSION citus UPDATE TO '7.0-8'; ALTER EXTENSION citus UPDATE TO '7.0-9'; ALTER EXTENSION citus UPDATE TO '7.0-10'; ALTER EXTENSION citus UPDATE TO '7.0-11'; ALTER EXTENSION citus UPDATE TO '7.0-12'; ALTER EXTENSION citus UPDATE TO '7.0-13'; ALTER EXTENSION citus UPDATE TO '7.0-14'; ALTER EXTENSION citus UPDATE TO '7.0-15'; -- show running version SHOW citus.version; citus.version --------------- 7.0.3 (1 row) -- ensure no objects were created outside pg_catalog SELECT COUNT(*) FROM pg_depend AS pgd, pg_extension AS pge, LATERAL pg_identify_object(pgd.classid, pgd.objid, pgd.objsubid) AS pgio WHERE pgd.refclassid = 'pg_extension'::regclass AND pgd.refobjid = pge.oid AND pge.extname = 'citus' AND pgio.schema NOT IN ('pg_catalog', 'citus', 'test'); count ------- 0 (1 row) -- see incompatible version errors out RESET citus.enable_version_checks; DROP EXTENSION citus; CREATE EXTENSION citus VERSION '5.0'; ERROR: specified version incompatible with loaded Citus library DETAIL: Loaded library requires 7.0, but 5.0 was specified. HINT: If a newer library is present, restart the database and try the command again. -- Test non-distributed queries work even in version mismatch SET citus.enable_version_checks TO 'false'; CREATE EXTENSION citus VERSION '6.2-1'; SET citus.enable_version_checks TO 'true'; -- Test CREATE TABLE CREATE TABLE version_mismatch_table(column1 int); -- Test COPY \copy version_mismatch_table FROM STDIN; -- Test INSERT INSERT INTO version_mismatch_table(column1) VALUES(5); -- Test SELECT SELECT * FROM version_mismatch_table ORDER BY column1; column1 --------- 0 1 2 3 4 5 (6 rows) -- Test SELECT from pg_catalog SELECT d.datname as "Name", pg_catalog.pg_get_userbyid(d.datdba) as "Owner", pg_catalog.array_to_string(d.datacl, E'\n') AS "Access privileges" FROM pg_catalog.pg_database d ORDER BY 1; Name | Owner | Access privileges ------------+----------+----------------------- postgres | postgres | regression | postgres | template0 | postgres | =c/postgres + | | postgres=CTc/postgres template1 | postgres | =c/postgres + | | postgres=CTc/postgres (4 rows) -- We should not distribute table in version mistmatch SELECT create_distributed_table('version_mismatch_table', 'column1'); ERROR: loaded Citus library version differs from installed extension version DETAIL: Loaded library requires 7.0, but the installed extension version is 6.2-1. HINT: Run ALTER EXTENSION citus UPDATE and try again. -- This function will cause fail in next ALTER EXTENSION CREATE OR REPLACE FUNCTION pg_catalog.citus_table_size(table_name regclass) RETURNS bigint LANGUAGE plpgsql AS $function$ BEGIN END; $function$; SET citus.enable_version_checks TO 'false'; -- This will fail because of previous function declaration ALTER EXTENSION citus UPDATE TO '6.2-2'; ERROR: function "citus_table_size" already exists with same argument types -- We can DROP problematic function and continue ALTER EXTENSION even when version checks are on SET citus.enable_version_checks TO 'true'; DROP FUNCTION citus_table_size(regclass); SET citus.enable_version_checks TO 'false'; ALTER EXTENSION citus UPDATE TO '6.2-2'; -- Test updating to the latest version without specifying the version number ALTER EXTENSION citus UPDATE; -- re-create in newest version DROP EXTENSION citus; \c CREATE EXTENSION citus; -- test cache invalidation in workers \c - - - :worker_1_port DROP EXTENSION citus; SET citus.enable_version_checks TO 'false'; CREATE EXTENSION citus VERSION '5.2-4'; SET citus.enable_version_checks TO 'true'; -- during ALTER EXTENSION, we should invalidate the cache ALTER EXTENSION citus UPDATE; -- if cache is invalidated succesfull, this \d should work without any problem \d List of relations Schema | Name | Type | Owner --------+------+------+------- (0 rows) \c - - - :master_port -- check that maintenance daemon gets (re-)started for the right user DROP EXTENSION citus; CREATE USER testuser SUPERUSER; SET ROLE testuser; CREATE EXTENSION citus; SELECT datname, datname = current_database(), usename = (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') FROM test.maintenance_worker(); datname | ?column? | ?column? ------------+----------+---------- regression | t | t (1 row) -- and recreate as the right owner RESET ROLE; DROP EXTENSION citus; CREATE EXTENSION citus; -- Check that maintenance daemon can also be started in another database CREATE DATABASE another; NOTICE: Citus partially supports CREATE DATABASE for distributed databases DETAIL: Citus does not propagate CREATE DATABASE command to workers HINT: You can manually create a database and its extensions on workers. \c another CREATE EXTENSION citus; CREATE SCHEMA test; CREATE OR REPLACE FUNCTION test.maintenance_worker(p_dbname text DEFAULT current_database()) RETURNS pg_stat_activity LANGUAGE plpgsql AS $$ DECLARE activity record; BEGIN LOOP SELECT * INTO activity FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon' AND datname = p_dbname; IF activity.pid IS NOT NULL THEN RETURN activity; ELSE PERFORM pg_sleep(0.1); PERFORM pg_stat_clear_snapshot(); END IF ; END LOOP; END; $$; -- see that the deamon started SELECT datname, datname = current_database(), usename = (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') FROM test.maintenance_worker(); datname | ?column? | ?column? ---------+----------+---------- another | t | t (1 row) -- Test that database with active worker can be dropped. \c regression CREATE SCHEMA test_deamon; -- we create a similar function on the regression database -- note that this function checks for the existence of the daemon -- when not found, returns true else tries for 5 times and -- returns false CREATE OR REPLACE FUNCTION test_deamon.maintenance_deamon_died(p_dbname text) RETURNS boolean LANGUAGE plpgsql AS $$ DECLARE activity record; BEGIN PERFORM pg_stat_clear_snapshot(); LOOP SELECT * INTO activity FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon' AND datname = p_dbname; IF activity.pid IS NULL THEN RETURN true; ELSE RETURN false; END IF; END LOOP; END; $$; -- drop the database and see that the deamon is dead DROP DATABASE another; SELECT * FROM test_deamon.maintenance_deamon_died('another'); maintenance_deamon_died ------------------------- t (1 row) -- we don't need the schema and the function anymore DROP SCHEMA test_deamon CASCADE; NOTICE: drop cascades to function test_deamon.maintenance_deamon_died(text) -- verify citus does not crash while creating a table when run against an older worker -- create_distributed_table piggybacks multiple commands into single one, if one worker -- did not have the required UDF it should fail instead of crash. -- create a test database, configure citus with single node CREATE DATABASE another; NOTICE: Citus partially supports CREATE DATABASE for distributed databases DETAIL: Citus does not propagate CREATE DATABASE command to workers HINT: You can manually create a database and its extensions on workers. \c - - - :worker_1_port CREATE DATABASE another; NOTICE: Citus partially supports CREATE DATABASE for distributed databases DETAIL: Citus does not propagate CREATE DATABASE command to workers HINT: You can manually create a database and its extensions on workers. \c - - - :master_port \c another CREATE EXTENSION citus; SELECT FROM master_add_node('localhost', :worker_1_port); -- (1 row) \c - - - :worker_1_port CREATE EXTENSION citus; ALTER FUNCTION assign_distributed_transaction_id(initiator_node_identifier integer, transaction_number bigint, transaction_stamp timestamp with time zone) RENAME TO dummy_assign_function; \c - - - :master_port SET citus.shard_replication_factor to 1; -- create_distributed_table command should fail CREATE TABLE t1(a int, b int); SELECT create_distributed_table('t1', 'a'); WARNING: function assign_distributed_transaction_id(integer, integer, unknown) does not exist HINT: No function matches the given name and argument types. You might need to add explicit type casts. CONTEXT: while executing command on localhost:57637 ERROR: current transaction is aborted, commands ignored until end of transaction block CONTEXT: while executing command on localhost:57637 \c regression \c - - - :worker_1_port DROP DATABASE another; \c - - - :master_port DROP DATABASE another; citus-7.0.3/src/test/regress/expected/multi_follower_configure_followers.out000066400000000000000000000014211317107136600276250ustar00rootroot00000000000000-- prepare for future tests by configuring all the follower nodes \c - - - :follower_master_port ALTER SYSTEM SET citus.use_secondary_nodes TO 'always'; ALTER SYSTEM SET citus.cluster_name TO 'second-cluster'; SELECT pg_reload_conf(); pg_reload_conf ---------------- t (1 row) -- also configure the workers, they'll run queries when MX is enabled \c - - - :follower_worker_1_port ALTER SYSTEM SET citus.use_secondary_nodes TO 'always'; ALTER SYSTEM SET citus.cluster_name TO 'second-cluster'; SELECT pg_reload_conf(); pg_reload_conf ---------------- t (1 row) \c - - - :follower_worker_2_port ALTER SYSTEM SET citus.use_secondary_nodes TO 'always'; ALTER SYSTEM SET citus.cluster_name TO 'second-cluster'; SELECT pg_reload_conf(); pg_reload_conf ---------------- t (1 row) citus-7.0.3/src/test/regress/expected/multi_follower_sanity_check.out000066400000000000000000000006721317107136600262230ustar00rootroot00000000000000-- check that the nodes are all in read-only mode and rejecting write queries \c - - - :follower_master_port CREATE TABLE tab (a int); ERROR: cannot execute CREATE TABLE in a read-only transaction \c - - - :follower_worker_1_port CREATE TABLE tab (a int); ERROR: cannot execute CREATE TABLE in a read-only transaction \c - - - :follower_worker_2_port CREATE TABLE tab (a int); ERROR: cannot execute CREATE TABLE in a read-only transaction citus-7.0.3/src/test/regress/expected/multi_follower_select_statements.out000066400000000000000000000057271317107136600273130ustar00rootroot00000000000000\c - - - :master_port -- do some setup SELECT 1 FROM master_add_node('localhost', :worker_1_port); ?column? ---------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ---------- 1 (1 row) CREATE TABLE the_table (a int, b int); SELECT create_distributed_table('the_table', 'a'); create_distributed_table -------------------------- (1 row) INSERT INTO the_table (a, b) VALUES (1, 1); INSERT INTO the_table (a, b) VALUES (1, 2); -- connect to the follower and check that a simple select query works, the follower -- is still in the default cluster and will send queries to the primary nodes \c - - - :follower_master_port SELECT * FROM the_table; a | b ---+--- 1 | 1 1 | 2 (2 rows) -- now, connect to the follower but tell it to use secondary nodes. There are no -- secondary nodes so this should fail. -- (this is :follower_master_port but substitution doesn't work here) \c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always'" SELECT * FROM the_table; ERROR: node group 2 does not have a secondary node -- add the secondary nodes and try again, the SELECT statement should work this time \c - - - :master_port SELECT 1 FROM master_add_node('localhost', :follower_worker_1_port, groupid => (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_1_port), noderole => 'secondary'); ?column? ---------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :follower_worker_2_port, groupid => (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port), noderole => 'secondary'); ?column? ---------- 1 (1 row) \c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always'" -- now that we've added secondaries this should work SELECT * FROM the_table; a | b ---+--- 1 | 1 1 | 2 (2 rows) SELECT node_name, node_port FROM master_get_active_worker_nodes() ORDER BY node_name, node_port; node_name | node_port -----------+----------- localhost | 9071 localhost | 9072 (2 rows) -- okay, now let's play with nodecluster. If we change the cluster of our follower node -- queries should stat failing again, since there are no worker nodes in the new cluster \c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'" -- there are no secondary nodes in this cluster, so this should fail! SELECT * FROM the_table; ERROR: there is a shard placement in node group 2 but there are no nodes in that group -- now move the secondary nodes into the new cluster and see that the follower, finally -- correctly configured, can run select queries involving them \c - - - :master_port UPDATE pg_dist_node SET nodecluster = 'second-cluster' WHERE noderole = 'secondary'; \c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'" SELECT * FROM the_table; a | b ---+--- 1 | 1 1 | 2 (2 rows) -- clean up after ourselves \c - - - :master_port DROP TABLE the_table; citus-7.0.3/src/test/regress/expected/multi_follower_task_tracker.out000066400000000000000000000012211317107136600262230ustar00rootroot00000000000000\c - - - :master_port -- do some setup CREATE TABLE tab(a int, b int); SELECT create_distributed_table('tab', 'a'); create_distributed_table -------------------------- (1 row) INSERT INTO tab (a, b) VALUES (1, 1); INSERT INTO tab (a, b) VALUES (1, 2); \c - - - :follower_master_port SET citus.task_executor_type TO 'real-time'; SELECT * FROM tab; a | b ---+--- 1 | 1 1 | 2 (2 rows) SET citus.task_executor_type TO 'task-tracker'; SELECT * FROM tab; ERROR: task tracker queries are not allowed while citus.use_secondary_nodes is 'always' HINT: try setting citus.task_executor_type TO 'real-time' -- clean up \c - - - :master_port DROP TABLE tab; citus-7.0.3/src/test/regress/expected/multi_foreign_key.out000066400000000000000000001156651317107136600241600ustar00rootroot00000000000000-- -- MULTI_FOREIGN_KEY -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1350000; -- set shard_count to 4 for faster tests, because we create/drop lots of shards in this test. SET citus.shard_count TO 4; -- create tables CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table -------------------------- (1 row) -- test foreign constraint creation with not supported parameters CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE SET NULL); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); ERROR: cannot create foreign key constraint DETAIL: SET NULL or SET DEFAULT is not supported in ON DELETE operation. DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE SET DEFAULT); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); ERROR: cannot create foreign key constraint DETAIL: SET NULL or SET DEFAULT is not supported in ON DELETE operation. DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON UPDATE SET NULL); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); ERROR: cannot create foreign key constraint DETAIL: SET NULL, SET DEFAULT or CASCADE is not supported in ON UPDATE operation. DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON UPDATE SET DEFAULT); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); ERROR: cannot create foreign key constraint DETAIL: SET NULL, SET DEFAULT or CASCADE is not supported in ON UPDATE operation. DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON UPDATE CASCADE); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); ERROR: cannot create foreign key constraint DETAIL: SET NULL, SET DEFAULT or CASCADE is not supported in ON UPDATE operation. DROP TABLE referencing_table; -- test foreign constraint creation on NOT co-located tables SET citus.shard_count TO 8; CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id)); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); ERROR: cannot create foreign key constraint DETAIL: Foreign key constraint can only be created on co-located tables. DROP TABLE referencing_table; SET citus.shard_count TO 4; -- test foreign constraint creation on non-partition columns CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(id) REFERENCES referenced_table(id)); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); ERROR: cannot create foreign key constraint DETAIL: Partition column must exist both referencing and referenced side of the foreign constraint statement and it must be in the same ordinal in both sides. DROP TABLE referencing_table; -- test foreign constraint creation while column list are in incorrect order CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(id, ref_id) REFERENCES referenced_table(id, test_column)); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); ERROR: cannot create foreign key constraint DETAIL: Partition column must exist both referencing and referenced side of the foreign constraint statement and it must be in the same ordinal in both sides. DROP TABLE referencing_table; -- test foreign constraint with replication factor > 1 CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id)); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); ERROR: cannot create foreign key constraint DETAIL: Citus Community Edition currently supports foreign key constraints only for "citus.shard_replication_factor = 1". HINT: Please change "citus.shard_replication_factor to 1". To learn more about using foreign keys with other replication factors, please contact us at https://citusdata.com/about/contact_us. DROP TABLE referencing_table; DROP TABLE referenced_table; -- test foreign constraint with correct conditions SET citus.shard_replication_factor TO 1; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id)); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table -------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); create_distributed_table -------------------------- (1 row) -- test inserts -- test insert to referencing table while there is NO corresponding value in referenced table INSERT INTO referencing_table VALUES(1, 1); ERROR: insert or update on table "referencing_table_1350008" violates foreign key constraint "referencing_table_ref_id_fkey_1350008" DETAIL: Key (ref_id)=(1) is not present in table "referenced_table_1350004". CONTEXT: while executing command on localhost:57637 -- test insert to referencing while there is corresponding value in referenced table INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); -- test deletes -- test delete from referenced table while there is corresponding value in referencing table DELETE FROM referenced_table WHERE id = 1; ERROR: update or delete on table "referenced_table_1350004" violates foreign key constraint "referencing_table_ref_id_fkey_1350008" on table "referencing_table_1350008" DETAIL: Key (id)=(1) is still referenced from table "referencing_table_1350008". CONTEXT: while executing command on localhost:57637 -- test delete from referenced table while there is NO corresponding value in referencing table DELETE FROM referencing_table WHERE ref_id = 1; DELETE FROM referenced_table WHERE id = 1; -- test cascading truncate INSERT INTO referenced_table VALUES(2, 2); INSERT INTO referencing_table VALUES(2, 2); TRUNCATE referenced_table CASCADE; NOTICE: truncate cascades to table "referencing_table" SELECT * FROM referencing_table; id | ref_id ----+-------- (0 rows) -- drop table for next tests DROP TABLE referencing_table; DROP TABLE referenced_table; -- test foreign constraint options -- test ON DELETE CASCADE CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE CASCADE); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table -------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); create_distributed_table -------------------------- (1 row) -- single shard cascading delete INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; SELECT * FROM referencing_table; id | ref_id ----+-------- (0 rows) SELECT * FROM referenced_table; id | test_column ----+------------- (0 rows) -- multi shard cascading delete INSERT INTO referenced_table VALUES(2, 2); INSERT INTO referencing_table VALUES(2, 2); SELECT master_modify_multiple_shards('DELETE FROM referenced_table'); master_modify_multiple_shards ------------------------------- 1 (1 row) SELECT * FROM referencing_table; id | ref_id ----+-------- (0 rows) -- multi shard cascading delete with alter table INSERT INTO referenced_table VALUES(3, 3); INSERT INTO referencing_table VALUES(3, 3); BEGIN; ALTER TABLE referencing_table ADD COLUMN x int DEFAULT 0; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' SELECT master_modify_multiple_shards('DELETE FROM referenced_table'); master_modify_multiple_shards ------------------------------- 1 (1 row) COMMIT; DROP TABLE referencing_table; DROP TABLE referenced_table; -- test ON DELETE NO ACTION + DEFERABLE + INITIALLY DEFERRED CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE NO ACTION DEFERRABLE INITIALLY DEFERRED); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table -------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); create_distributed_table -------------------------- (1 row) INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; ERROR: update or delete on table "referenced_table_1350020" violates foreign key constraint "referencing_table_ref_id_fkey_1350024" on table "referencing_table_1350024" DETAIL: Key (id)=(1) is still referenced from table "referencing_table_1350024". CONTEXT: while executing command on localhost:57637 BEGIN; DELETE FROM referenced_table WHERE id = 1; DELETE FROM referencing_table WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; id | ref_id ----+-------- (0 rows) SELECT * FROM referenced_table; id | test_column ----+------------- (0 rows) DROP TABLE referencing_table; DROP TABLE referenced_table; -- test ON DELETE RESTRICT CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE RESTRICT); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table -------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); create_distributed_table -------------------------- (1 row) INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); BEGIN; DELETE FROM referenced_table WHERE id = 1; ERROR: update or delete on table "referenced_table_1350028" violates foreign key constraint "referencing_table_ref_id_fkey_1350032" on table "referencing_table_1350032" DETAIL: Key (id)=(1) is still referenced from table "referencing_table_1350032". CONTEXT: while executing command on localhost:57637 DELETE FROM referencing_table WHERE ref_id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; SELECT * FROM referencing_table; id | ref_id ----+-------- 1 | 1 (1 row) SELECT * FROM referenced_table; id | test_column ----+------------- 1 | 1 (1 row) DROP TABLE referencing_table; DROP TABLE referenced_table; -- test ON UPDATE NO ACTION + DEFERABLE + INITIALLY DEFERRED CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table -------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); create_distributed_table -------------------------- (1 row) INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); UPDATE referenced_table SET test_column = 10 WHERE id = 1; ERROR: update or delete on table "referenced_table_1350036" violates foreign key constraint "referencing_table_ref_id_fkey_1350040" on table "referencing_table_1350040" DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_1350040". CONTEXT: while executing command on localhost:57637 BEGIN; UPDATE referenced_table SET test_column = 10 WHERE id = 1; UPDATE referencing_table SET id = 10 WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; id | ref_id ----+-------- 10 | 1 (1 row) SELECT * FROM referenced_table; id | test_column ----+------------- 1 | 10 (1 row) DROP TABLE referencing_table; DROP TABLE referenced_table; -- test ON UPDATE RESTRICT CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) ON UPDATE RESTRICT); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table -------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); create_distributed_table -------------------------- (1 row) INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); BEGIN; UPDATE referenced_table SET test_column = 20 WHERE id = 1; ERROR: update or delete on table "referenced_table_1350044" violates foreign key constraint "referencing_table_ref_id_fkey_1350048" on table "referencing_table_1350048" DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_1350048". CONTEXT: while executing command on localhost:57637 UPDATE referencing_table SET id = 20 WHERE ref_id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; SELECT * FROM referencing_table; id | ref_id ----+-------- 1 | 1 (1 row) SELECT * FROM referenced_table; id | test_column ----+------------- 1 | 1 (1 row) DROP TABLE referencing_table; DROP TABLE referenced_table; -- test MATCH SIMPLE CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) MATCH SIMPLE); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table -------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); create_distributed_table -------------------------- (1 row) INSERT INTO referencing_table VALUES(null, 2); SELECT * FROM referencing_table; id | ref_id ----+-------- | 2 (1 row) DELETE FROM referencing_table WHERE ref_id = 2; DROP TABLE referencing_table; DROP TABLE referenced_table; -- test MATCH FULL CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) MATCH FULL); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table -------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); create_distributed_table -------------------------- (1 row) INSERT INTO referencing_table VALUES(null, 2); ERROR: insert or update on table "referencing_table_1350067" violates foreign key constraint "referencing_table_ref_id_fkey_1350067" DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. CONTEXT: while executing command on localhost:57638 SELECT * FROM referencing_table; id | ref_id ----+-------- (0 rows) DROP TABLE referencing_table; DROP TABLE referenced_table; -- Similar tests, but this time we push foreign key constraints created by ALTER TABLE queries -- create tables CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); SELECT master_create_distributed_table('referenced_table', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('referenced_table', 4, 1); master_create_worker_shards ----------------------------- (1 row) CREATE TABLE referencing_table(id int, ref_id int); SELECT master_create_distributed_table('referencing_table', 'ref_id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('referencing_table', 4, 1); master_create_worker_shards ----------------------------- (1 row) -- verify that we skip foreign key validation when propagation is turned off -- not skipping validation would result in a distributed query, which emits debug messages BEGIN; SET LOCAL citus.enable_ddl_propagation TO off; SET LOCAL client_min_messages TO DEBUG2; ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY (ref_id) REFERENCES referenced_table (id); ABORT; -- test foreign constraint creation -- test foreign constraint creation with not supported parameters ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE SET NULL; ERROR: cannot create foreign key constraint DETAIL: SET NULL or SET DEFAULT is not supported in ON DELETE operation. ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE SET DEFAULT; ERROR: cannot create foreign key constraint DETAIL: SET NULL or SET DEFAULT is not supported in ON DELETE operation. ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON UPDATE SET NULL; ERROR: cannot create foreign key constraint DETAIL: SET NULL, SET DEFAULT or CASCADE is not supported in ON UPDATE operation. ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON UPDATE SET DEFAULT; ERROR: cannot create foreign key constraint DETAIL: SET NULL, SET DEFAULT or CASCADE is not supported in ON UPDATE operation. ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON UPDATE CASCADE; ERROR: cannot create foreign key constraint DETAIL: SET NULL, SET DEFAULT or CASCADE is not supported in ON UPDATE operation. -- test foreign constraint creation with multiple subcommands ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id), ADD CONSTRAINT test_constraint FOREIGN KEY(id) REFERENCES referenced_table(test_column); ERROR: cannot execute ADD CONSTRAINT command with other subcommands HINT: You can issue each subcommand separately -- test foreign constraint creation without giving explicit name ALTER TABLE referencing_table ADD FOREIGN KEY(ref_id) REFERENCES referenced_table(id); ERROR: cannot create constraint without a name on a distributed table -- test foreign constraint creation on NOT co-located tables ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id); ERROR: cannot create foreign key constraint DETAIL: Foreign key constraint can only be created on co-located tables. -- create co-located tables DROP TABLE referencing_table; DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referenced_table', 'id', 'hash'); create_distributed_table -------------------------- (1 row) SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); create_distributed_table -------------------------- (1 row) -- columns for the referenced table is empty ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table ON DELETE CASCADE; ERROR: number of referencing and referenced columns for foreign key disagree -- test foreign constraint creation on non-partition columns ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(id) REFERENCES referenced_table(id); ERROR: cannot create foreign key constraint DETAIL: Partition column must exist both referencing and referenced side of the foreign constraint statement and it must be in the same ordinal in both sides. -- test foreign constraint creation while column list are in incorrect order ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(id, ref_id) REFERENCES referenced_table(id, test_column); ERROR: cannot create foreign key constraint DETAIL: Partition column must exist both referencing and referenced side of the foreign constraint statement and it must be in the same ordinal in both sides. -- test foreign constraint creation while column list are not in same length ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id, test_column); ERROR: number of referencing and referenced columns for foreign key disagree -- test foreign constraint creation while existing tables does not satisfy the constraint INSERT INTO referencing_table VALUES(1, 1); ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id); ERROR: insert or update on table "referencing_table_1350080" violates foreign key constraint "test_constraint_1350080" DETAIL: Key (ref_id)=(1) is not present in table "referenced_table_1350076". CONTEXT: while executing command on localhost:57637 -- test foreign constraint with correct conditions DELETE FROM referencing_table WHERE ref_id = 1; ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id); -- test inserts -- test insert to referencing table while there is NO corresponding value in referenced table INSERT INTO referencing_table VALUES(1, 1); ERROR: insert or update on table "referencing_table_1350080" violates foreign key constraint "test_constraint_1350080" DETAIL: Key (ref_id)=(1) is not present in table "referenced_table_1350076". CONTEXT: while executing command on localhost:57637 -- test insert to referencing while there is corresponding value in referenced table INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); -- test deletes -- test delete from referenced table while there is corresponding value in referencing table DELETE FROM referenced_table WHERE id = 1; ERROR: update or delete on table "referenced_table_1350076" violates foreign key constraint "test_constraint_1350080" on table "referencing_table_1350080" DETAIL: Key (id)=(1) is still referenced from table "referencing_table_1350080". CONTEXT: while executing command on localhost:57637 -- test delete from referenced table while there is NO corresponding value in referencing table DELETE FROM referencing_table WHERE ref_id = 1; DELETE FROM referenced_table WHERE id = 1; -- test DROP CONSTRAINT ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; -- test foreign constraint options -- test ON DELETE CASCADE ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE CASCADE; INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; SELECT * FROM referencing_table; id | ref_id ----+-------- (0 rows) SELECT * FROM referenced_table; id | test_column ----+------------- (0 rows) ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; -- test ON DELETE NO ACTION + DEFERABLE + INITIALLY DEFERRED ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE NO ACTION DEFERRABLE INITIALLY DEFERRED; INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; ERROR: update or delete on table "referenced_table_1350076" violates foreign key constraint "test_constraint_1350080" on table "referencing_table_1350080" DETAIL: Key (id)=(1) is still referenced from table "referencing_table_1350080". CONTEXT: while executing command on localhost:57637 BEGIN; DELETE FROM referenced_table WHERE id = 1; DELETE FROM referencing_table WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; id | ref_id ----+-------- (0 rows) SELECT * FROM referenced_table; id | test_column ----+------------- (0 rows) ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; -- test ON DELETE RESTRICT ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE RESTRICT; INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); BEGIN; DELETE FROM referenced_table WHERE id = 1; ERROR: update or delete on table "referenced_table_1350076" violates foreign key constraint "test_constraint_1350080" on table "referencing_table_1350080" DETAIL: Key (id)=(1) is still referenced from table "referencing_table_1350080". CONTEXT: while executing command on localhost:57637 DELETE FROM referencing_table WHERE ref_id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; SELECT * FROM referencing_table; id | ref_id ----+-------- 1 | 1 (1 row) SELECT * FROM referenced_table; id | test_column ----+------------- 1 | 1 (1 row) ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; -- test ON UPDATE NO ACTION + DEFERABLE + INITIALLY DEFERRED ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED; UPDATE referenced_table SET test_column = 10 WHERE id = 1; ERROR: update or delete on table "referenced_table_1350076" violates foreign key constraint "test_constraint_1350080" on table "referencing_table_1350080" DETAIL: Key (id, test_column)=(1, 1) is still referenced from table "referencing_table_1350080". CONTEXT: while executing command on localhost:57637 BEGIN; UPDATE referenced_table SET test_column = 10 WHERE id = 1; UPDATE referencing_table SET id = 10 WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; id | ref_id ----+-------- 10 | 1 (1 row) SELECT * FROM referenced_table; id | test_column ----+------------- 1 | 10 (1 row) ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; -- test ON UPDATE RESTRICT ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) ON UPDATE RESTRICT; BEGIN; UPDATE referenced_table SET test_column = 20 WHERE id = 1; ERROR: update or delete on table "referenced_table_1350076" violates foreign key constraint "test_constraint_1350080" on table "referencing_table_1350080" DETAIL: Key (id, test_column)=(1, 10) is still referenced from table "referencing_table_1350080". CONTEXT: while executing command on localhost:57637 UPDATE referencing_table SET id = 20 WHERE ref_id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; SELECT * FROM referencing_table; id | ref_id ----+-------- 10 | 1 (1 row) SELECT * FROM referenced_table; id | test_column ----+------------- 1 | 10 (1 row) ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; -- test MATCH SIMPLE ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) MATCH SIMPLE; INSERT INTO referencing_table VALUES(null, 2); SELECT * FROM referencing_table; id | ref_id ----+-------- 10 | 1 | 2 (2 rows) DELETE FROM referencing_table WHERE ref_id = 2; ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; -- test MATCH FULL ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) MATCH FULL; INSERT INTO referencing_table VALUES(null, 2); ERROR: insert or update on table "referencing_table_1350083" violates foreign key constraint "test_constraint_1350083" DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. CONTEXT: while executing command on localhost:57638 SELECT * FROM referencing_table; id | ref_id ----+-------- 10 | 1 (1 row) ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; -- we no longer need those tables DROP TABLE referencing_table; DROP TABLE referenced_table; -- test cyclical foreign keys CREATE TABLE cyclic_reference_table1(id int, table2_id int, PRIMARY KEY(id, table2_id)); CREATE TABLE cyclic_reference_table2(id int, table1_id int, PRIMARY KEY(id, table1_id)); SELECT create_distributed_table('cyclic_reference_table1', 'id', 'hash'); create_distributed_table -------------------------- (1 row) SELECT create_distributed_table('cyclic_reference_table2', 'table1_id', 'hash'); create_distributed_table -------------------------- (1 row) ALTER TABLE cyclic_reference_table1 ADD CONSTRAINT cyclic_constraint1 FOREIGN KEY(id, table2_id) REFERENCES cyclic_reference_table2(table1_id, id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE cyclic_reference_table2 ADD CONSTRAINT cyclic_constraint2 FOREIGN KEY(id, table1_id) REFERENCES cyclic_reference_table1(table2_id, id) DEFERRABLE INITIALLY DEFERRED; -- test insertion to a table which has cyclic foreign constraints, we expect that to fail INSERT INTO cyclic_reference_table1 VALUES(1, 1); ERROR: insert or update on table "cyclic_reference_table1_1350084" violates foreign key constraint "cyclic_constraint1_1350084" DETAIL: Key (id, table2_id)=(1, 1) is not present in table "cyclic_reference_table2_1350088". CONTEXT: while executing command on localhost:57637 -- proper insertion to table with cyclic dependency BEGIN; INSERT INTO cyclic_reference_table1 VALUES(1, 1); INSERT INTO cyclic_reference_table2 VALUES(1, 1); COMMIT; -- verify that rows are actually inserted SELECT * FROM cyclic_reference_table1; id | table2_id ----+----------- 1 | 1 (1 row) SELECT * FROM cyclic_reference_table2; id | table1_id ----+----------- 1 | 1 (1 row) -- test dropping cyclic referenced tables -- we expect those two queries to fail DROP TABLE cyclic_reference_table1; ERROR: cannot drop table cyclic_reference_table1 because other objects depend on it DETAIL: constraint cyclic_constraint2 on table cyclic_reference_table2 depends on table cyclic_reference_table1 HINT: Use DROP ... CASCADE to drop the dependent objects too. DROP TABLE cyclic_reference_table2; ERROR: cannot drop table cyclic_reference_table2 because other objects depend on it DETAIL: constraint cyclic_constraint1 on table cyclic_reference_table1 depends on table cyclic_reference_table2 HINT: Use DROP ... CASCADE to drop the dependent objects too. -- proper way of DROP with CASCADE option DROP TABLE cyclic_reference_table1 CASCADE; NOTICE: drop cascades to constraint cyclic_constraint2 on table cyclic_reference_table2 DROP TABLE cyclic_reference_table2 CASCADE; -- test creation of foreign keys in a transaction CREATE TABLE transaction_referenced_table(id int PRIMARY KEY); CREATE TABLE transaction_referencing_table(id int, ref_id int); BEGIN; ALTER TABLE transaction_referencing_table ADD CONSTRAINT transaction_fk_constraint FOREIGN KEY(ref_id) REFERENCES transaction_referenced_table(id); COMMIT; -- test insertion to referencing table, we expect that to fail INSERT INTO transaction_referencing_table VALUES(1, 1); ERROR: insert or update on table "transaction_referencing_table" violates foreign key constraint "transaction_fk_constraint" DETAIL: Key (ref_id)=(1) is not present in table "transaction_referenced_table". -- proper insertion to both referenced and referencing tables INSERT INTO transaction_referenced_table VALUES(1); INSERT INTO transaction_referencing_table VALUES(1, 1); -- verify that rows are actually inserted SELECT * FROM transaction_referenced_table; id ---- 1 (1 row) SELECT * FROM transaction_referencing_table; id | ref_id ----+-------- 1 | 1 (1 row) -- we no longer need those tables DROP TABLE transaction_referencing_table; DROP TABLE transaction_referenced_table; -- test self referencing foreign key CREATE TABLE self_referencing_table1( id int, other_column int, other_column_ref int, PRIMARY KEY(id, other_column), FOREIGN KEY(id, other_column_ref) REFERENCES self_referencing_table1(id, other_column) ); SELECT create_distributed_table('self_referencing_table1', 'id', 'hash'); create_distributed_table -------------------------- (1 row) -- test insertion to self referencing table INSERT INTO self_referencing_table1 VALUES(1, 1, 1); -- we expect this query to fail INSERT INTO self_referencing_table1 VALUES(1, 2, 3); ERROR: insert or update on table "self_referencing_table1_1350092" violates foreign key constraint "self_referencing_table1_id_fkey_1350092" DETAIL: Key (id, other_column_ref)=(1, 3) is not present in table "self_referencing_table1_1350092". CONTEXT: while executing command on localhost:57637 -- verify that rows are actually inserted SELECT * FROM self_referencing_table1; id | other_column | other_column_ref ----+--------------+------------------ 1 | 1 | 1 (1 row) -- we no longer need those tables DROP TABLE self_referencing_table1; -- test self referencing foreign key with ALTER TABLE CREATE TABLE self_referencing_table2(id int, other_column int, other_column_ref int, PRIMARY KEY(id, other_column)); SELECT create_distributed_table('self_referencing_table2', 'id', 'hash'); create_distributed_table -------------------------- (1 row) ALTER TABLE self_referencing_table2 ADD CONSTRAINT self_referencing_fk_constraint FOREIGN KEY(id, other_column_ref) REFERENCES self_referencing_table2(id, other_column); -- test insertion to self referencing table INSERT INTO self_referencing_table2 VALUES(1, 1, 1); -- we expect this query to fail INSERT INTO self_referencing_table2 VALUES(1, 2, 3); ERROR: insert or update on table "self_referencing_table2_1350096" violates foreign key constraint "self_referencing_fk_constraint_1350096" DETAIL: Key (id, other_column_ref)=(1, 3) is not present in table "self_referencing_table2_1350096". CONTEXT: while executing command on localhost:57637 -- verify that rows are actually inserted SELECT * FROM self_referencing_table2; id | other_column | other_column_ref ----+--------------+------------------ 1 | 1 | 1 (1 row) -- we no longer need those tables DROP TABLE self_referencing_table2; -- test reference tables -- test foreign key creation on CREATE TABLE from reference table CREATE TABLE referenced_by_reference_table(id int PRIMARY KEY, other_column int); SELECT create_distributed_table('referenced_by_reference_table', 'id'); create_distributed_table -------------------------- (1 row) CREATE TABLE reference_table(id int, referencing_column int REFERENCES referenced_by_reference_table(id)); SELECT create_reference_table('reference_table'); ERROR: cannot create foreign key constraint from or to reference tables -- test foreign key creation on CREATE TABLE to reference table DROP TABLE reference_table; CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int); SELECT create_reference_table('reference_table'); create_reference_table ------------------------ (1 row) CREATE TABLE references_to_reference_table(id int, referencing_column int REFERENCES reference_table(id)); SELECT create_distributed_table('references_to_reference_table', 'referencing_column'); ERROR: cannot create foreign key constraint from or to reference tables -- test foreign key creation on CREATE TABLE from + to reference table CREATE TABLE reference_table_second(id int, referencing_column int REFERENCES reference_table(id)); SELECT create_reference_table('reference_table_second'); ERROR: cannot create foreign key constraint from or to reference tables -- test foreign key creation on CREATE TABLE from reference table to local table CREATE TABLE referenced_local_table(id int PRIMARY KEY, other_column int); DROP TABLE reference_table CASCADE; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to constraint references_to_reference_table_referencing_column_fkey on table references_to_reference_table drop cascades to constraint reference_table_second_referencing_column_fkey on table reference_table_second CREATE TABLE reference_table(id int, referencing_column int REFERENCES referenced_local_table(id)); SELECT create_reference_table('reference_table'); ERROR: cannot create foreign key constraint from or to reference tables -- test foreign key creation on CREATE TABLE on self referencing reference table CREATE TABLE self_referencing_reference_table( id int, other_column int, other_column_ref int, PRIMARY KEY(id, other_column), FOREIGN KEY(id, other_column_ref) REFERENCES self_referencing_reference_table(id, other_column) ); SELECT create_reference_table('self_referencing_reference_table'); ERROR: cannot create foreign key constraint from or to reference tables -- test foreign key creation on ALTER TABLE from reference table DROP TABLE reference_table; CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int); SELECT create_reference_table('reference_table'); create_reference_table ------------------------ (1 row) ALTER TABLE reference_table ADD CONSTRAINT fk FOREIGN KEY(referencing_column) REFERENCES referenced_by_reference_table(id); ERROR: cannot create foreign key constraint from or to reference tables -- test foreign key creation on ALTER TABLE to reference table DROP TABLE references_to_reference_table; CREATE TABLE references_to_reference_table(id int, referencing_column int); SELECT create_distributed_table('references_to_reference_table', 'referencing_column'); create_distributed_table -------------------------- (1 row) ALTER TABLE references_to_reference_table ADD CONSTRAINT fk FOREIGN KEY(referencing_column) REFERENCES reference_table(id); ERROR: cannot create foreign key constraint from or to reference tables -- test foreign key creation on ALTER TABLE from + to reference table DROP TABLE reference_table_second; CREATE TABLE reference_table_second(id int, referencing_column int); SELECT create_reference_table('reference_table_second'); create_reference_table ------------------------ (1 row) ALTER TABLE reference_table_second ADD CONSTRAINT fk FOREIGN KEY(referencing_column) REFERENCES reference_table(id); ERROR: cannot create foreign key constraint from or to reference tables -- test foreign key creation on ALTER TABLE from reference table to local table DROP TABLE reference_table; CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int); SELECT create_reference_table('reference_table'); create_reference_table ------------------------ (1 row) ALTER TABLE reference_table ADD CONSTRAINT fk FOREIGN KEY(referencing_column) REFERENCES referenced_local_table(id); ERROR: relation referenced_local_table is not distributed -- test foreign key creation on ALTER TABLE on self referencing reference table DROP TABLE self_referencing_reference_table; CREATE TABLE self_referencing_reference_table( id int, other_column int, other_column_ref int, PRIMARY KEY(id, other_column) ); SELECT create_reference_table('self_referencing_reference_table'); create_reference_table ------------------------ (1 row) ALTER TABLE self_referencing_reference_table ADD CONSTRAINT fk FOREIGN KEY(id, other_column_ref) REFERENCES self_referencing_reference_table(id, other_column); ERROR: cannot create foreign key constraint from or to reference tables -- we no longer need those tables DROP TABLE referenced_by_reference_table, references_to_reference_table, reference_table, reference_table_second, referenced_local_table, self_referencing_reference_table; citus-7.0.3/src/test/regress/expected/multi_function_evaluation.out000066400000000000000000000142561317107136600257250ustar00rootroot00000000000000-- -- MULTI_FUNCTION_EVALUATION -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1200000; -- nextval() works (no good way to test DEFAULT, or, by extension, SERIAL) CREATE TABLE example (key INT, value INT); SELECT master_create_distributed_table('example', 'key', 'hash'); master_create_distributed_table --------------------------------- (1 row) CREATE SEQUENCE example_value_seq; SELECT master_create_worker_shards('example', 1, 2); master_create_worker_shards ----------------------------- (1 row) INSERT INTO example VALUES (1, nextval('example_value_seq')); SELECT * FROM example; key | value -----+------- 1 | 1 (1 row) -- functions called by prepared statements are also evaluated PREPARE stmt AS INSERT INTO example VALUES (2); EXECUTE stmt; EXECUTE stmt; SELECT * FROM example; key | value -----+------- 1 | 1 2 | 2 | (3 rows) -- non-immutable functions inside CASE/COALESCE aren't allowed ALTER TABLE example DROP value; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' ALTER TABLE example ADD value timestamp; -- this is allowed because there are no mutable funcs in the CASE UPDATE example SET value = (CASE WHEN value > timestamp '12-12-1991' THEN timestamp '12-12-1991' ELSE value + interval '1 hour' END) WHERE key = 1; -- this is allowed because the planner strips away the CASE during constant evaluation UPDATE example SET value = CASE WHEN true THEN now() ELSE now() + interval '1 hour' END WHERE key = 1; -- this is not allowed because there're mutable functions in a CaseWhen clause -- (which we can't easily evaluate on the master) UPDATE example SET value = (CASE WHEN now() > timestamp '12-12-1991' THEN now() ELSE timestamp '10-24-1190' END) WHERE key = 1; ERROR: non-IMMUTABLE functions are not allowed in CASE or COALESCE statements -- make sure we also check defresult (the ELSE clause) UPDATE example SET value = (CASE WHEN now() > timestamp '12-12-1991' THEN timestamp '12-12-1191' ELSE now() END) WHERE key = 1; ERROR: non-IMMUTABLE functions are not allowed in CASE or COALESCE statements -- COALESCE is allowed UPDATE example SET value = COALESCE(null, null, timestamp '10-10-1000') WHERE key = 1; -- COALESCE is not allowed if there are any mutable functions UPDATE example SET value = COALESCE(now(), timestamp '10-10-1000') WHERE key = 1; ERROR: non-IMMUTABLE functions are not allowed in CASE or COALESCE statements UPDATE example SET value = COALESCE(timestamp '10-10-1000', now()) WHERE key = 1; ERROR: non-IMMUTABLE functions are not allowed in CASE or COALESCE statements -- RowCompareExpr's are checked for mutability. These are allowed: ALTER TABLE example DROP value; ALTER TABLE example ADD value boolean; ALTER TABLE example ADD time_col timestamptz; UPDATE example SET value = NULLIF(ROW(1, 2) < ROW(2, 3), true) WHERE key = 1; UPDATE example SET value = NULLIF(ROW(true, 2) < ROW(value, 3), true) WHERE key = 1; -- But this RowCompareExpr is not (it passes Var into STABLE) UPDATE example SET value = NULLIF( ROW(date '10-10-1000', 2) < ROW(time_col, 3), true ) WHERE key = 1; ERROR: STABLE functions used in UPDATE queries cannot be called with column references -- DistinctExpr's are also checked for mutability. These are allowed: UPDATE example SET value = 1 IS DISTINCT FROM 2 WHERE key = 1; UPDATE example SET value = date '10-10-1000' IS DISTINCT FROM timestamptz '10-10-1000' WHERE key = 1; -- But this RowCompare references the STABLE = (date, timestamptz) operator UPDATE example SET value = date '10-10-1000' IS DISTINCT FROM time_col WHERE key = 1; ERROR: STABLE functions used in UPDATE queries cannot be called with column references -- this ScalarArrayOpExpr ("scalar op ANY/ALL (array)") is allowed UPDATE example SET value = date '10-10-1000' = ANY ('{10-10-1000}'::date[]) WHERE key = 1; -- this ScalarArrayOpExpr is not, it invokes the STABLE = (timestamptz, date) operator UPDATE example SET value = time_col = ANY ('{10-10-1000}'::date[]) WHERE key = 1; ERROR: STABLE functions used in UPDATE queries cannot be called with column references -- CoerceViaIO (typoutput -> typinput, a type coercion) ALTER TABLE example DROP value; ALTER TABLE example ADD value date; -- this one is allowed UPDATE example SET value = (timestamp '10-19-2000 13:29')::date WHERE key = 1; -- this one is not UPDATE example SET value = time_col::date WHERE key = 1; ERROR: STABLE functions used in UPDATE queries cannot be called with column references -- ArrayCoerceExpr (applies elemfuncid to each elem) ALTER TABLE example DROP value; ALTER TABLE example ADD value date[]; -- this one is allowed UPDATE example SET value = array[timestamptz '10-20-2013 10:20']::date[] WHERE key = 1; -- this one is not UPDATE example SET value = array[time_col]::date[] WHERE key = 1; ERROR: STABLE functions used in UPDATE queries cannot be called with column references -- test that UPDATE and DELETE also have the functions in WHERE evaluated ALTER TABLE example DROP time_col; ALTER TABLE example DROP value; ALTER TABLE example ADD value timestamptz; INSERT INTO example VALUES (3, now()); UPDATE example SET value = timestamp '10-10-2000 00:00' WHERE key = 3 AND value > now() - interval '1 hour'; SELECT * FROM example WHERE key = 3; key | value -----+------------------------------ 3 | Tue Oct 10 00:00:00 2000 PDT (1 row) DELETE FROM example WHERE key = 3 AND value < now() - interval '1 hour'; SELECT * FROM example WHERE key = 3; key | value -----+------- (0 rows) -- test that function evaluation descends into expressions CREATE OR REPLACE FUNCTION stable_fn() RETURNS timestamptz STABLE LANGUAGE plpgsql AS $function$ BEGIN RAISE NOTICE 'stable_fn called'; RETURN timestamp '10-10-2000 00:00'; END; $function$; INSERT INTO example VALUES (44, (ARRAY[stable_fn(),stable_fn()])[1]); NOTICE: stable_fn called CONTEXT: PL/pgSQL function stable_fn() line 3 at RAISE NOTICE: stable_fn called CONTEXT: PL/pgSQL function stable_fn() line 3 at RAISE SELECT * FROM example WHERE key = 44; key | value -----+------------------------------ 44 | Tue Oct 10 00:00:00 2000 PDT (1 row) DROP FUNCTION stable_fn(); DROP TABLE example; citus-7.0.3/src/test/regress/expected/multi_generate_ddl_commands.out000066400000000000000000000224641317107136600261470ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 610000; -- =================================================================== -- create test functions -- =================================================================== CREATE FUNCTION table_ddl_command_array(regclass) RETURNS text[] AS 'citus' LANGUAGE C STRICT; -- =================================================================== -- test ddl command generation functionality -- =================================================================== -- first make sure a simple table works CREATE TABLE simple_table ( first_name text, last_name text, id bigint ); SELECT table_ddl_command_array('simple_table'); table_ddl_command_array ----------------------------------------------------------------------------------- {"CREATE TABLE public.simple_table (first_name text, last_name text, id bigint)"} (1 row) -- ensure not-null constraints are propagated CREATE TABLE not_null_table ( city text, id bigint not null ); SELECT table_ddl_command_array('not_null_table'); table_ddl_command_array ------------------------------------------------------------------------ {"CREATE TABLE public.not_null_table (city text, id bigint NOT NULL)"} (1 row) -- ensure tables not in search path are schema-prefixed CREATE SCHEMA not_in_path CREATE TABLE simple_table (id bigint); SELECT table_ddl_command_array('not_in_path.simple_table'); table_ddl_command_array ------------------------------------------------------------------------------------------------------------------------ {"CREATE SCHEMA IF NOT EXISTS not_in_path AUTHORIZATION postgres","CREATE TABLE not_in_path.simple_table (id bigint)"} (1 row) -- even more complex constraints should be preserved... CREATE TABLE column_constraint_table ( first_name text, last_name text, age int CONSTRAINT non_negative_age CHECK (age >= 0) ); SELECT table_ddl_command_array('column_constraint_table'); table_ddl_command_array ---------------------------------------------------------------------------------------------------------------------------------------------- {"CREATE TABLE public.column_constraint_table (first_name text, last_name text, age integer, CONSTRAINT non_negative_age CHECK (age >= 0))"} (1 row) -- including table constraints CREATE TABLE table_constraint_table ( bid_item_id bigint, min_bid decimal not null, max_bid decimal not null, CONSTRAINT bids_ordered CHECK (min_bid > max_bid) ); SELECT table_ddl_command_array('table_constraint_table'); table_ddl_command_array ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- {"CREATE TABLE public.table_constraint_table (bid_item_id bigint, min_bid numeric NOT NULL, max_bid numeric NOT NULL, CONSTRAINT bids_ordered CHECK (min_bid > max_bid))"} (1 row) -- default values are supported CREATE TABLE default_value_table ( name text, price decimal default 0.00 ); SELECT table_ddl_command_array('default_value_table'); table_ddl_command_array ------------------------------------------------------------------------------------- {"CREATE TABLE public.default_value_table (name text, price numeric DEFAULT 0.00)"} (1 row) -- of course primary keys work... CREATE TABLE pkey_table ( first_name text, last_name text, id bigint PRIMARY KEY ); SELECT table_ddl_command_array('pkey_table'); table_ddl_command_array -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- {"CREATE TABLE public.pkey_table (first_name text, last_name text, id bigint NOT NULL)","ALTER TABLE public.pkey_table ADD CONSTRAINT pkey_table_pkey PRIMARY KEY (id)"} (1 row) -- as do unique indexes... CREATE TABLE unique_table ( user_id bigint not null, username text UNIQUE not null ); SELECT table_ddl_command_array('unique_table'); table_ddl_command_array ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- {"CREATE TABLE public.unique_table (user_id bigint NOT NULL, username text NOT NULL)","ALTER TABLE public.unique_table ADD CONSTRAINT unique_table_username_key UNIQUE (username)"} (1 row) -- and indexes used for clustering CREATE TABLE clustered_table ( data json not null, received_at timestamp not null ); CREATE INDEX clustered_time_idx ON clustered_table (received_at); CLUSTER clustered_table USING clustered_time_idx; SELECT table_ddl_command_array('clustered_table'); table_ddl_command_array ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- {"CREATE TABLE public.clustered_table (data json NOT NULL, received_at timestamp without time zone NOT NULL)","CREATE INDEX clustered_time_idx ON public.clustered_table USING btree (received_at) TABLESPACE pg_default","ALTER TABLE public.clustered_table CLUSTER ON clustered_time_idx"} (1 row) -- fiddly things like storage type and statistics also work CREATE TABLE fiddly_table ( hostname char(255) not null, os char(255) not null, ip_addr inet not null, traceroute text not null ); ALTER TABLE fiddly_table ALTER hostname SET STORAGE PLAIN, ALTER os SET STORAGE MAIN, ALTER ip_addr SET STORAGE EXTENDED, ALTER traceroute SET STORAGE EXTERNAL, ALTER ip_addr SET STATISTICS 500; SELECT table_ddl_command_array('fiddly_table'); table_ddl_command_array --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- {"CREATE TABLE public.fiddly_table (hostname character(255) NOT NULL, os character(255) NOT NULL, ip_addr inet NOT NULL, traceroute text NOT NULL)","ALTER TABLE ONLY public.fiddly_table ALTER COLUMN hostname SET STORAGE PLAIN, ALTER COLUMN os SET STORAGE MAIN, ALTER COLUMN ip_addr SET STORAGE EXTENDED, ALTER COLUMN ip_addr SET STATISTICS 500, ALTER COLUMN traceroute SET STORAGE EXTERNAL"} (1 row) -- test foreign tables using fake FDW CREATE FOREIGN TABLE foreign_table ( id bigint not null, full_name text not null default '' ) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true'); SELECT table_ddl_command_array('foreign_table'); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined table_ddl_command_array -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- {"CREATE SERVER fake_fdw_server FOREIGN DATA WRAPPER fake_fdw","CREATE FOREIGN TABLE public.foreign_table (id bigint NOT NULL, full_name text DEFAULT ''::text NOT NULL) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true')"} (1 row) -- propagating views is not supported CREATE VIEW local_view AS SELECT * FROM simple_table; SELECT table_ddl_command_array('local_view'); ERROR: local_view is not a regular, foreign or partitioned table -- clean up DROP VIEW IF EXISTS local_view; DROP FOREIGN TABLE IF EXISTS foreign_table; DROP TABLE IF EXISTS simple_table, not_null_table, column_constraint_table, table_constraint_table, default_value_table, pkey_table, unique_table, clustered_table, fiddly_table; citus-7.0.3/src/test/regress/expected/multi_hash_pruning.out000066400000000000000000000200521317107136600243250ustar00rootroot00000000000000-- -- MULTI_HASH_PRUNING -- -- Tests for shard and join pruning logic on hash partitioned tables. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 630000; -- Create a table partitioned on integer column and update partition type to -- hash. Then load data into this table and update shard min max values with -- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026, -- 1134484726, -28094569 and -1011077333. CREATE TABLE orders_hash_partitioned ( o_orderkey integer, o_custkey integer, o_orderstatus char(1), o_totalprice decimal(15,2), o_orderdate date, o_orderpriority char(15), o_clerk char(15), o_shippriority integer, o_comment varchar(79) ); SELECT master_create_distributed_table('orders_hash_partitioned', 'o_orderkey', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('orders_hash_partitioned', 4, 1); master_create_worker_shards ----------------------------- (1 row) SET client_min_messages TO DEBUG2; -- Check that we can prune shards for simple cases, boolean expressions and -- immutable functions. SELECT count(*) FROM orders_hash_partitioned; count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1; DEBUG: Creating router plan DEBUG: Plan is router executable count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 2; DEBUG: Creating router plan DEBUG: Plan is router executable count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 3; DEBUG: Creating router plan DEBUG: Plan is router executable count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 4; DEBUG: Creating router plan DEBUG: Plan is router executable count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 AND o_clerk = 'aaa'; DEBUG: Creating router plan DEBUG: Plan is router executable count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = abs(-1); DEBUG: Creating router plan DEBUG: Plan is router executable count ------- 0 (1 row) -- disable router planning SET citus.enable_router_execution TO 'false'; SELECT count(*) FROM orders_hash_partitioned; count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1; count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 2; count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 3; count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 4; count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 AND o_clerk = 'aaa'; count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = abs(-1); count ------- 0 (1 row) SET citus.enable_router_execution TO DEFAULT; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey is NULL; count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey is not NULL; count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey > 2; count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_orderkey = 2; count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_clerk = 'aaa'; count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR (o_orderkey = 3 AND o_clerk = 'aaa'); count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_orderkey is NULL; count ------- 0 (1 row) SELECT count(*) FROM (SELECT o_orderkey FROM orders_hash_partitioned WHERE o_orderkey = 1) AS orderkeys; DEBUG: Creating router plan DEBUG: Plan is router executable count ------- 0 (1 row) SET client_min_messages TO DEFAULT; -- Check that we support runing for ANY/IN with literal. SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY ('{1,2,3}'); count ------- 13 (1 row) SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey IN (1,2,3); count ------- 13 (1 row) -- Check whether we can deal with null arrays SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey IN (NULL); count ------- 0 (1 row) SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY (NULL); count ------- 0 (1 row) SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey IN (NULL) OR TRUE; count ------- 12000 (1 row) SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY (NULL) OR TRUE; count ------- 12000 (1 row) -- Check whether we support IN/ANY in subquery SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey IN (SELECT l_orderkey FROM lineitem_hash_part); count ------- 12000 (1 row) SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY (SELECT l_orderkey FROM lineitem_hash_part); count ------- 12000 (1 row) -- Check whether we support IN/ANY in subquery with append and range distributed table SELECT count(*) FROM lineitem WHERE l_orderkey = ANY ('{1,2,3}'); count ------- 13 (1 row) SELECT count(*) FROM lineitem WHERE l_orderkey IN (1,2,3); count ------- 13 (1 row) SELECT count(*) FROM lineitem WHERE l_orderkey = ANY(NULL) OR TRUE; count ------- 12000 (1 row) SELECT count(*) FROM lineitem_range WHERE l_orderkey = ANY ('{1,2,3}'); count ------- 13 (1 row) SELECT count(*) FROM lineitem_range WHERE l_orderkey IN (1,2,3); count ------- 13 (1 row) SELECT count(*) FROM lineitem_range WHERE l_orderkey = ANY(NULL) OR TRUE; count ------- 12000 (1 row) SET client_min_messages TO DEBUG2; -- Check that we don't show the message if the operator is not -- equality operator SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey < ALL ('{1,2,3}'); count ------- 0 (1 row) -- Check that we don't give a spurious hint message when non-partition -- columns are used with ANY/IN/ALL SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_totalprice IN (2, 5); count ------- 0 (1 row) -- Check that we cannot prune for mutable functions. SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = random(); count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = random() OR o_orderkey = 1; count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = random() AND o_orderkey = 1; DEBUG: Creating router plan DEBUG: Plan is router executable count ------- 0 (1 row) -- Check that we can do join pruning. SELECT count(*) FROM orders_hash_partitioned orders1, orders_hash_partitioned orders2 WHERE orders1.o_orderkey = orders2.o_orderkey; DEBUG: join prunable for intervals [-2147483648,-1073741825] and [-1073741824,-1] DEBUG: join prunable for intervals [-2147483648,-1073741825] and [0,1073741823] DEBUG: join prunable for intervals [-2147483648,-1073741825] and [1073741824,2147483647] DEBUG: join prunable for intervals [-1073741824,-1] and [-2147483648,-1073741825] DEBUG: join prunable for intervals [-1073741824,-1] and [0,1073741823] DEBUG: join prunable for intervals [-1073741824,-1] and [1073741824,2147483647] DEBUG: join prunable for intervals [0,1073741823] and [-2147483648,-1073741825] DEBUG: join prunable for intervals [0,1073741823] and [-1073741824,-1] DEBUG: join prunable for intervals [0,1073741823] and [1073741824,2147483647] DEBUG: join prunable for intervals [1073741824,2147483647] and [-2147483648,-1073741825] DEBUG: join prunable for intervals [1073741824,2147483647] and [-1073741824,-1] DEBUG: join prunable for intervals [1073741824,2147483647] and [0,1073741823] count ------- 0 (1 row) SELECT count(*) FROM orders_hash_partitioned orders1, orders_hash_partitioned orders2 WHERE orders1.o_orderkey = orders2.o_orderkey AND orders1.o_orderkey = 1 AND orders2.o_orderkey is NULL; DEBUG: Creating router plan DEBUG: Plan is router executable count ------- 0 (1 row) citus-7.0.3/src/test/regress/expected/multi_index_statements.out000066400000000000000000000416401317107136600252240ustar00rootroot00000000000000-- -- MULTI_INDEX_STATEMENTS -- -- Check that we can run CREATE INDEX and DROP INDEX statements on distributed -- tables. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 640000; -- -- CREATE TEST TABLES -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 102080; CREATE TABLE index_test_range(a int, b int, c int); SELECT master_create_distributed_table('index_test_range', 'a', 'range'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_empty_shard('index_test_range'); master_create_empty_shard --------------------------- 102080 (1 row) SELECT master_create_empty_shard('index_test_range'); master_create_empty_shard --------------------------- 102081 (1 row) CREATE TABLE index_test_hash(a int, b int, c int); SELECT master_create_distributed_table('index_test_hash', 'a', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('index_test_hash', 8, 2); master_create_worker_shards ----------------------------- (1 row) CREATE TABLE index_test_append(a int, b int, c int); SELECT master_create_distributed_table('index_test_append', 'a', 'append'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_empty_shard('index_test_append'); master_create_empty_shard --------------------------- 102090 (1 row) SELECT master_create_empty_shard('index_test_append'); master_create_empty_shard --------------------------- 102091 (1 row) -- -- CREATE INDEX -- -- Verify that we can create different types of indexes CREATE INDEX lineitem_orderkey_index ON lineitem (l_orderkey); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' CREATE INDEX lineitem_partkey_desc_index ON lineitem (l_partkey DESC); CREATE INDEX lineitem_partial_index ON lineitem (l_shipdate) WHERE l_shipdate < '1995-01-01'; CREATE INDEX lineitem_colref_index ON lineitem (record_ne(lineitem.*, NULL)); SET client_min_messages = ERROR; -- avoid version dependant warning about WAL CREATE INDEX lineitem_orderkey_hash_index ON lineitem USING hash (l_partkey); CREATE UNIQUE INDEX index_test_range_index_a ON index_test_range(a); CREATE UNIQUE INDEX index_test_range_index_a_b ON index_test_range(a,b); CREATE UNIQUE INDEX index_test_hash_index_a ON index_test_hash(a); CREATE UNIQUE INDEX index_test_hash_index_a_b ON index_test_hash(a,b); CREATE UNIQUE INDEX index_test_hash_index_a_b_partial ON index_test_hash(a,b) WHERE c IS NOT NULL; CREATE UNIQUE INDEX index_test_range_index_a_b_partial ON index_test_range(a,b) WHERE c IS NOT NULL; RESET client_min_messages; -- Verify that we handle if not exists statements correctly CREATE INDEX lineitem_orderkey_index on lineitem(l_orderkey); ERROR: relation "lineitem_orderkey_index" already exists CREATE INDEX IF NOT EXISTS lineitem_orderkey_index on lineitem(l_orderkey); NOTICE: relation "lineitem_orderkey_index" already exists, skipping CREATE INDEX IF NOT EXISTS lineitem_orderkey_index_new on lineitem(l_orderkey); -- Verify if not exists behavior with an index with same name on a different table CREATE INDEX lineitem_orderkey_index on index_test_hash(a); ERROR: relation "lineitem_orderkey_index" already exists CREATE INDEX IF NOT EXISTS lineitem_orderkey_index on index_test_hash(a); NOTICE: relation "lineitem_orderkey_index" already exists, skipping -- Verify that we can create indexes concurrently CREATE INDEX CONCURRENTLY lineitem_concurrently_index ON lineitem (l_orderkey); -- Verify that no-name local CREATE INDEX CONCURRENTLY works CREATE TABLE local_table (id integer, name text); CREATE INDEX CONCURRENTLY ON local_table(id); DROP TABLE local_table; -- Verify that all indexes got created on the master node and one of the workers SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; schemaname | tablename | indexname | tablespace | indexdef ------------+------------------+------------------------------------+------------+--------------------------------------------------------------------------------------------------------------------- public | index_test_hash | index_test_hash_index_a | | CREATE UNIQUE INDEX index_test_hash_index_a ON index_test_hash USING btree (a) public | index_test_hash | index_test_hash_index_a_b | | CREATE UNIQUE INDEX index_test_hash_index_a_b ON index_test_hash USING btree (a, b) public | index_test_hash | index_test_hash_index_a_b_partial | | CREATE UNIQUE INDEX index_test_hash_index_a_b_partial ON index_test_hash USING btree (a, b) WHERE (c IS NOT NULL) public | index_test_range | index_test_range_index_a | | CREATE UNIQUE INDEX index_test_range_index_a ON index_test_range USING btree (a) public | index_test_range | index_test_range_index_a_b | | CREATE UNIQUE INDEX index_test_range_index_a_b ON index_test_range USING btree (a, b) public | index_test_range | index_test_range_index_a_b_partial | | CREATE UNIQUE INDEX index_test_range_index_a_b_partial ON index_test_range USING btree (a, b) WHERE (c IS NOT NULL) public | lineitem | lineitem_colref_index | | CREATE INDEX lineitem_colref_index ON lineitem USING btree (record_ne(lineitem.*, NULL::record)) public | lineitem | lineitem_concurrently_index | | CREATE INDEX lineitem_concurrently_index ON lineitem USING btree (l_orderkey) public | lineitem | lineitem_orderkey_hash_index | | CREATE INDEX lineitem_orderkey_hash_index ON lineitem USING hash (l_partkey) public | lineitem | lineitem_orderkey_index | | CREATE INDEX lineitem_orderkey_index ON lineitem USING btree (l_orderkey) public | lineitem | lineitem_orderkey_index_new | | CREATE INDEX lineitem_orderkey_index_new ON lineitem USING btree (l_orderkey) public | lineitem | lineitem_partial_index | | CREATE INDEX lineitem_partial_index ON lineitem USING btree (l_shipdate) WHERE (l_shipdate < '01-01-1995'::date) public | lineitem | lineitem_partkey_desc_index | | CREATE INDEX lineitem_partkey_desc_index ON lineitem USING btree (l_partkey DESC) public | lineitem | lineitem_pkey | | CREATE UNIQUE INDEX lineitem_pkey ON lineitem USING btree (l_orderkey, l_linenumber) public | lineitem | lineitem_time_index | | CREATE INDEX lineitem_time_index ON lineitem USING btree (l_shipdate) (15 rows) \c - - - :worker_1_port SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1); count ------- 9 (1 row) SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash%'; count ------- 24 (1 row) SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_range%'; count ------- 6 (1 row) SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append%'; count ------- 0 (1 row) \c - - - :master_port -- Verify that we error out on unsupported statement types CREATE UNIQUE INDEX try_index ON lineitem (l_orderkey); ERROR: creating unique indexes on append-partitioned tables is currently unsupported CREATE INDEX try_index ON lineitem (l_orderkey) TABLESPACE newtablespace; ERROR: specifying tablespaces with CREATE INDEX statements is currently unsupported CREATE UNIQUE INDEX try_unique_range_index ON index_test_range(b); ERROR: creating unique indexes on non-partition columns is currently unsupported CREATE UNIQUE INDEX try_unique_range_index_partial ON index_test_range(b) WHERE c IS NOT NULL; ERROR: creating unique indexes on non-partition columns is currently unsupported CREATE UNIQUE INDEX try_unique_hash_index ON index_test_hash(b); ERROR: creating unique indexes on non-partition columns is currently unsupported CREATE UNIQUE INDEX try_unique_hash_index_partial ON index_test_hash(b) WHERE c IS NOT NULL; ERROR: creating unique indexes on non-partition columns is currently unsupported CREATE UNIQUE INDEX try_unique_append_index ON index_test_append(b); ERROR: creating unique indexes on append-partitioned tables is currently unsupported CREATE UNIQUE INDEX try_unique_append_index ON index_test_append(a); ERROR: creating unique indexes on append-partitioned tables is currently unsupported CREATE UNIQUE INDEX try_unique_append_index_a_b ON index_test_append(a,b); ERROR: creating unique indexes on append-partitioned tables is currently unsupported -- Verify that we error out in case of postgres errors on supported statement -- types. CREATE INDEX lineitem_orderkey_index ON lineitem (l_orderkey); ERROR: relation "lineitem_orderkey_index" already exists CREATE INDEX try_index ON lineitem USING gist (l_orderkey); ERROR: data type bigint has no default operator class for access method "gist" HINT: You must specify an operator class for the index or define a default operator class for the data type. CREATE INDEX try_index ON lineitem (non_existent_column); ERROR: column "non_existent_column" does not exist CREATE INDEX ON lineitem (l_orderkey); ERROR: creating index without a name on a distributed table is currently unsupported -- Verify that none of failed indexes got created on the master node SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; schemaname | tablename | indexname | tablespace | indexdef ------------+------------------+------------------------------------+------------+--------------------------------------------------------------------------------------------------------------------- public | index_test_hash | index_test_hash_index_a | | CREATE UNIQUE INDEX index_test_hash_index_a ON index_test_hash USING btree (a) public | index_test_hash | index_test_hash_index_a_b | | CREATE UNIQUE INDEX index_test_hash_index_a_b ON index_test_hash USING btree (a, b) public | index_test_hash | index_test_hash_index_a_b_partial | | CREATE UNIQUE INDEX index_test_hash_index_a_b_partial ON index_test_hash USING btree (a, b) WHERE (c IS NOT NULL) public | index_test_range | index_test_range_index_a | | CREATE UNIQUE INDEX index_test_range_index_a ON index_test_range USING btree (a) public | index_test_range | index_test_range_index_a_b | | CREATE UNIQUE INDEX index_test_range_index_a_b ON index_test_range USING btree (a, b) public | index_test_range | index_test_range_index_a_b_partial | | CREATE UNIQUE INDEX index_test_range_index_a_b_partial ON index_test_range USING btree (a, b) WHERE (c IS NOT NULL) public | lineitem | lineitem_colref_index | | CREATE INDEX lineitem_colref_index ON lineitem USING btree (record_ne(lineitem.*, NULL::record)) public | lineitem | lineitem_concurrently_index | | CREATE INDEX lineitem_concurrently_index ON lineitem USING btree (l_orderkey) public | lineitem | lineitem_orderkey_hash_index | | CREATE INDEX lineitem_orderkey_hash_index ON lineitem USING hash (l_partkey) public | lineitem | lineitem_orderkey_index | | CREATE INDEX lineitem_orderkey_index ON lineitem USING btree (l_orderkey) public | lineitem | lineitem_orderkey_index_new | | CREATE INDEX lineitem_orderkey_index_new ON lineitem USING btree (l_orderkey) public | lineitem | lineitem_partial_index | | CREATE INDEX lineitem_partial_index ON lineitem USING btree (l_shipdate) WHERE (l_shipdate < '01-01-1995'::date) public | lineitem | lineitem_partkey_desc_index | | CREATE INDEX lineitem_partkey_desc_index ON lineitem USING btree (l_partkey DESC) public | lineitem | lineitem_pkey | | CREATE UNIQUE INDEX lineitem_pkey ON lineitem USING btree (l_orderkey, l_linenumber) public | lineitem | lineitem_time_index | | CREATE INDEX lineitem_time_index ON lineitem USING btree (l_shipdate) (15 rows) -- -- DROP INDEX -- -- Verify that we can't drop multiple indexes in a single command DROP INDEX lineitem_orderkey_index, lineitem_partial_index; ERROR: cannot drop multiple distributed objects in a single command HINT: Try dropping each object in a separate DROP command. -- Verify that we can succesfully drop indexes DROP INDEX lineitem_orderkey_index; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' DROP INDEX lineitem_orderkey_index_new; DROP INDEX lineitem_partkey_desc_index; DROP INDEX lineitem_partial_index; DROP INDEX lineitem_colref_index; -- Verify that we handle if exists statements correctly DROP INDEX non_existent_index; ERROR: index "non_existent_index" does not exist DROP INDEX IF EXISTS non_existent_index; NOTICE: index "non_existent_index" does not exist, skipping DROP INDEX IF EXISTS lineitem_orderkey_hash_index; DROP INDEX lineitem_orderkey_hash_index; ERROR: index "lineitem_orderkey_hash_index" does not exist DROP INDEX index_test_range_index_a; DROP INDEX index_test_range_index_a_b; DROP INDEX index_test_range_index_a_b_partial; DROP INDEX index_test_hash_index_a; DROP INDEX index_test_hash_index_a_b; DROP INDEX index_test_hash_index_a_b_partial; -- Verify that we can drop indexes concurrently DROP INDEX CONCURRENTLY lineitem_concurrently_index; -- Verify that all the indexes are dropped from the master and one worker node. -- As there's a primary key, so exclude those from this check. SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%'; indrelid | indexrelid ----------+------------ (0 rows) SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname; schemaname | tablename | indexname | tablespace | indexdef ------------+-----------+-----------+------------+---------- (0 rows) \c - - - :worker_1_port SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%'; indrelid | indexrelid ----------+------------ (0 rows) SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname; schemaname | tablename | indexname | tablespace | indexdef ------------+-----------+-----------+------------+---------- (0 rows) -- create index that will conflict with master operations CREATE INDEX CONCURRENTLY ith_b_idx_102089 ON index_test_hash_102089(b); \c - - - :master_port -- should fail because worker index already exists CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b); ERROR: CONCURRENTLY-enabled index command failed DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. -- the failure results in an INVALID index SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; Index Valid? -------------- f (1 row) -- we can clean it up and recreate with an DROP IF EXISTS DROP INDEX CONCURRENTLY IF EXISTS ith_b_idx; CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b); SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; Index Valid? -------------- t (1 row) \c - - - :worker_1_port -- now drop shard index to test partial master DROP failure DROP INDEX CONCURRENTLY ith_b_idx_102089; \c - - - :master_port DROP INDEX CONCURRENTLY ith_b_idx; ERROR: CONCURRENTLY-enabled index command failed DETAIL: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. HINT: Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index, then retry the original command. -- the failure results in an INVALID index SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; Index Valid? -------------- f (1 row) -- final clean up DROP INDEX CONCURRENTLY IF EXISTS ith_b_idx; -- Drop created tables DROP TABLE index_test_range; DROP TABLE index_test_hash; DROP TABLE index_test_append; citus-7.0.3/src/test/regress/expected/multi_insert_select.out000066400000000000000000004766671317107136600245360ustar00rootroot00000000000000-- -- MULTI_INSERT_SELECT -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 13300000; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 13300000; -- create co-located tables SET citus.shard_count = 4; SET citus.shard_replication_factor = 2; CREATE TABLE raw_events_first (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint, UNIQUE(user_id, value_1)); SELECT create_distributed_table('raw_events_first', 'user_id'); create_distributed_table -------------------------- (1 row) CREATE TABLE raw_events_second (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint, UNIQUE(user_id, value_1)); SELECT create_distributed_table('raw_events_second', 'user_id'); create_distributed_table -------------------------- (1 row) CREATE TABLE agg_events (user_id int, value_1_agg int, value_2_agg int, value_3_agg float, value_4_agg bigint, agg_time timestamp, UNIQUE(user_id, value_1_agg)); SELECT create_distributed_table('agg_events', 'user_id');; create_distributed_table -------------------------- (1 row) -- create the reference table as well CREATE TABLE reference_table (user_id int); SELECT create_reference_table('reference_table'); create_reference_table ------------------------ (1 row) CREATE TABLE insert_select_varchar_test (key varchar, value int); SELECT create_distributed_table('insert_select_varchar_test', 'key', 'hash'); create_distributed_table -------------------------- (1 row) -- set back to the defaults SET citus.shard_count = DEFAULT; SET citus.shard_replication_factor = DEFAULT; INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (1, now(), 10, 100, 1000.1, 10000); INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (2, now(), 20, 200, 2000.1, 20000); INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (3, now(), 30, 300, 3000.1, 30000); INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (4, now(), 40, 400, 4000.1, 40000); INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (5, now(), 50, 500, 5000.1, 50000); INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (6, now(), 60, 600, 6000.1, 60000); SET client_min_messages TO DEBUG2; -- raw table to raw table INSERT INTO raw_events_second SELECT * FROM raw_events_first; DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_first_13300000 raw_events_first WHERE ((worker_hash(user_id) >= '-2147483648'::integer) AND (worker_hash(user_id) <= '-1073741825'::integer)) DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_first_13300001 raw_events_first WHERE ((worker_hash(user_id) >= '-1073741824'::integer) AND (worker_hash(user_id) <= '-1'::integer)) DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_first_13300002 raw_events_first WHERE ((worker_hash(user_id) >= 0) AND (worker_hash(user_id) <= 1073741823)) DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_first_13300003 raw_events_first WHERE ((worker_hash(user_id) >= 1073741824) AND (worker_hash(user_id) <= 2147483647)) DEBUG: Plan is router executable -- see that our first multi shard INSERT...SELECT works expected SET client_min_messages TO INFO; SELECT raw_events_first.user_id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id; user_id --------- 1 5 3 4 6 2 (6 rows) -- see that we get unique vialitons INSERT INTO raw_events_second SELECT * FROM raw_events_first; ERROR: duplicate key value violates unique constraint "raw_events_second_user_id_value_1_key_13300004" DETAIL: Key (user_id, value_1)=(1, 10) already exists. CONTEXT: while executing command on localhost:57637 -- stable functions should be allowed INSERT INTO raw_events_second (user_id, time) SELECT user_id, now() FROM raw_events_first WHERE user_id < 0; INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE time > now() + interval '1 day'; -- hide version-dependent PL/pgSQL context messages \set VERBOSITY terse -- make sure we evaluate stable functions on the master, once CREATE OR REPLACE FUNCTION evaluate_on_master() RETURNS int LANGUAGE plpgsql STABLE AS $function$ BEGIN RAISE NOTICE 'evaluating on master'; RETURN 0; END; $function$; INSERT INTO raw_events_second (user_id, value_1) SELECT user_id, evaluate_on_master() FROM raw_events_first WHERE user_id < 0; NOTICE: evaluating on master -- make sure we don't evaluate stable functions with column arguments CREATE OR REPLACE FUNCTION evaluate_on_master(x int) RETURNS int LANGUAGE plpgsql STABLE AS $function$ BEGIN RAISE NOTICE 'evaluating on master'; RETURN x; END; $function$; INSERT INTO raw_events_second (user_id, value_1) SELECT user_id, evaluate_on_master(value_1) FROM raw_events_first WHERE user_id = 0; WARNING: function public.evaluate_on_master(integer) does not exist WARNING: function public.evaluate_on_master(integer) does not exist ERROR: could not modify any active placements \set VERBOSITY default -- add one more row INSERT INTO raw_events_first (user_id, time) VALUES (7, now()); -- try a single shard query SET client_min_messages TO DEBUG2; INSERT INTO raw_events_second (user_id, time) SELECT user_id, time FROM raw_events_first WHERE user_id = 7; DEBUG: Skipping target shard interval 13300004 since SELECT query for it pruned away DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id, "time") SELECT user_id, "time" FROM public.raw_events_first_13300001 raw_events_first WHERE ((user_id = 7) AND ((worker_hash(user_id) >= '-1073741824'::integer) AND (worker_hash(user_id) <= '-1'::integer))) DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300007 since SELECT query for it pruned away DEBUG: Plan is router executable SET client_min_messages TO INFO; -- add one more row INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (8, now(), 80, 800, 8000, 80000); -- reorder columns SET client_min_messages TO DEBUG2; INSERT INTO raw_events_second (value_2, value_1, value_3, value_4, user_id, time) SELECT value_2, value_1, value_3, value_4, user_id, time FROM raw_events_first WHERE user_id = 8; DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_first_13300000 raw_events_first WHERE ((user_id = 8) AND ((worker_hash(user_id) >= '-2147483648'::integer) AND (worker_hash(user_id) <= '-1073741825'::integer))) DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300007 since SELECT query for it pruned away DEBUG: Plan is router executable -- a zero shard select INSERT INTO raw_events_second (value_2, value_1, value_3, value_4, user_id, time) SELECT value_2, value_1, value_3, value_4, user_id, time FROM raw_events_first WHERE false; DEBUG: Skipping target shard interval 13300004 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300007 since SELECT query for it pruned away DEBUG: Plan is router executable -- another zero shard select INSERT INTO raw_events_second (value_2, value_1, value_3, value_4, user_id, time) SELECT value_2, value_1, value_3, value_4, user_id, time FROM raw_events_first WHERE 0 != 0; DEBUG: Skipping target shard interval 13300004 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300007 since SELECT query for it pruned away DEBUG: Plan is router executable -- add one more row SET client_min_messages TO INFO; INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (9, now(), 90, 900, 9000, 90000); -- show that RETURNING also works SET client_min_messages TO DEBUG2; INSERT INTO raw_events_second (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM raw_events_first WHERE value_3 = 9000 RETURNING *; DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM public.raw_events_first_13300000 raw_events_first WHERE ((value_3 = (9000)::double precision) AND ((worker_hash(user_id) >= '-2147483648'::integer) AND (worker_hash(user_id) <= '-1073741825'::integer))) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4 DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM public.raw_events_first_13300001 raw_events_first WHERE ((value_3 = (9000)::double precision) AND ((worker_hash(user_id) >= '-1073741824'::integer) AND (worker_hash(user_id) <= '-1'::integer))) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4 DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM public.raw_events_first_13300002 raw_events_first WHERE ((value_3 = (9000)::double precision) AND ((worker_hash(user_id) >= 0) AND (worker_hash(user_id) <= 1073741823))) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4 DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM public.raw_events_first_13300003 raw_events_first WHERE ((value_3 = (9000)::double precision) AND ((worker_hash(user_id) >= 1073741824) AND (worker_hash(user_id) <= 2147483647))) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4 DEBUG: Plan is router executable user_id | time | value_1 | value_2 | value_3 | value_4 ---------+------+---------+---------+---------+--------- 9 | | 90 | | 9000 | (1 row) -- hits two shards INSERT INTO raw_events_second (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM raw_events_first WHERE user_id = 9 OR user_id = 16 RETURNING *; DEBUG: Skipping target shard interval 13300004 since SELECT query for it pruned away DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM public.raw_events_first_13300001 raw_events_first WHERE (((user_id = 9) OR (user_id = 16)) AND ((worker_hash(user_id) >= '-1073741824'::integer) AND (worker_hash(user_id) <= '-1'::integer))) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4 DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM public.raw_events_first_13300003 raw_events_first WHERE (((user_id = 9) OR (user_id = 16)) AND ((worker_hash(user_id) >= 1073741824) AND (worker_hash(user_id) <= 2147483647))) RETURNING citus_table_alias.user_id, citus_table_alias."time", citus_table_alias.value_1, citus_table_alias.value_2, citus_table_alias.value_3, citus_table_alias.value_4 DEBUG: Plan is router executable ERROR: duplicate key value violates unique constraint "raw_events_second_user_id_value_1_key_13300007" DETAIL: Key (user_id, value_1)=(9, 90) already exists. CONTEXT: while executing command on localhost:57638 -- now do some aggregations INSERT INTO agg_events SELECT user_id, sum(value_1), avg(value_2), sum(value_3), count(value_4) FROM raw_events_first GROUP BY user_id; DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg, value_2_agg, value_3_agg, value_4_agg) SELECT user_id, sum(value_1) AS sum, avg(value_2) AS avg, sum(value_3) AS sum, count(value_4) AS count FROM public.raw_events_first_13300000 raw_events_first WHERE ((worker_hash(user_id) >= '-2147483648'::integer) AND (worker_hash(user_id) <= '-1073741825'::integer)) GROUP BY user_id DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg, value_2_agg, value_3_agg, value_4_agg) SELECT user_id, sum(value_1) AS sum, avg(value_2) AS avg, sum(value_3) AS sum, count(value_4) AS count FROM public.raw_events_first_13300001 raw_events_first WHERE ((worker_hash(user_id) >= '-1073741824'::integer) AND (worker_hash(user_id) <= '-1'::integer)) GROUP BY user_id DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg, value_2_agg, value_3_agg, value_4_agg) SELECT user_id, sum(value_1) AS sum, avg(value_2) AS avg, sum(value_3) AS sum, count(value_4) AS count FROM public.raw_events_first_13300002 raw_events_first WHERE ((worker_hash(user_id) >= 0) AND (worker_hash(user_id) <= 1073741823)) GROUP BY user_id DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg, value_2_agg, value_3_agg, value_4_agg) SELECT user_id, sum(value_1) AS sum, avg(value_2) AS avg, sum(value_3) AS sum, count(value_4) AS count FROM public.raw_events_first_13300003 raw_events_first WHERE ((worker_hash(user_id) >= 1073741824) AND (worker_hash(user_id) <= 2147483647)) GROUP BY user_id DEBUG: Plan is router executable -- group by column not exists on the SELECT target list INSERT INTO agg_events (value_3_agg, value_4_agg, value_1_agg, user_id) SELECT sum(value_3), count(value_4), sum(value_1), user_id FROM raw_events_first GROUP BY value_2, user_id RETURNING *; DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg, value_3_agg, value_4_agg) SELECT user_id, sum(value_1) AS sum, sum(value_3) AS sum, count(value_4) AS count FROM public.raw_events_first_13300000 raw_events_first WHERE ((worker_hash(user_id) >= '-2147483648'::integer) AND (worker_hash(user_id) <= '-1073741825'::integer)) GROUP BY value_2, user_id RETURNING citus_table_alias.user_id, citus_table_alias.value_1_agg, citus_table_alias.value_2_agg, citus_table_alias.value_3_agg, citus_table_alias.value_4_agg, citus_table_alias.agg_time DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg, value_3_agg, value_4_agg) SELECT user_id, sum(value_1) AS sum, sum(value_3) AS sum, count(value_4) AS count FROM public.raw_events_first_13300001 raw_events_first WHERE ((worker_hash(user_id) >= '-1073741824'::integer) AND (worker_hash(user_id) <= '-1'::integer)) GROUP BY value_2, user_id RETURNING citus_table_alias.user_id, citus_table_alias.value_1_agg, citus_table_alias.value_2_agg, citus_table_alias.value_3_agg, citus_table_alias.value_4_agg, citus_table_alias.agg_time DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg, value_3_agg, value_4_agg) SELECT user_id, sum(value_1) AS sum, sum(value_3) AS sum, count(value_4) AS count FROM public.raw_events_first_13300002 raw_events_first WHERE ((worker_hash(user_id) >= 0) AND (worker_hash(user_id) <= 1073741823)) GROUP BY value_2, user_id RETURNING citus_table_alias.user_id, citus_table_alias.value_1_agg, citus_table_alias.value_2_agg, citus_table_alias.value_3_agg, citus_table_alias.value_4_agg, citus_table_alias.agg_time DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg, value_3_agg, value_4_agg) SELECT user_id, sum(value_1) AS sum, sum(value_3) AS sum, count(value_4) AS count FROM public.raw_events_first_13300003 raw_events_first WHERE ((worker_hash(user_id) >= 1073741824) AND (worker_hash(user_id) <= 2147483647)) GROUP BY value_2, user_id RETURNING citus_table_alias.user_id, citus_table_alias.value_1_agg, citus_table_alias.value_2_agg, citus_table_alias.value_3_agg, citus_table_alias.value_4_agg, citus_table_alias.agg_time DEBUG: Plan is router executable ERROR: duplicate key value violates unique constraint "agg_events_user_id_value_1_agg_key_13300008" DETAIL: Key (user_id, value_1_agg)=(1, 10) already exists. CONTEXT: while executing command on localhost:57638 -- some subquery tests INSERT INTO agg_events (value_1_agg, user_id) SELECT SUM(value_1), id FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id) AS foo GROUP BY id ORDER BY id; DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg) SELECT id, sum(value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300000 raw_events_first, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id)) foo WHERE ((worker_hash(id) >= '-2147483648'::integer) AND (worker_hash(id) <= '-1073741825'::integer)) GROUP BY id ORDER BY id DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg) SELECT id, sum(value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300001 raw_events_first, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id)) foo WHERE ((worker_hash(id) >= '-1073741824'::integer) AND (worker_hash(id) <= '-1'::integer)) GROUP BY id ORDER BY id DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT id, sum(value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300002 raw_events_first, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id)) foo WHERE ((worker_hash(id) >= 0) AND (worker_hash(id) <= 1073741823)) GROUP BY id ORDER BY id DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT id, sum(value_1) AS sum FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM public.raw_events_first_13300003 raw_events_first, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id)) foo WHERE ((worker_hash(id) >= 1073741824) AND (worker_hash(id) <= 2147483647)) GROUP BY id ORDER BY id DEBUG: Plan is router executable ERROR: duplicate key value violates unique constraint "agg_events_user_id_value_1_agg_key_13300008" DETAIL: Key (user_id, value_1_agg)=(1, 10) already exists. CONTEXT: while executing command on localhost:57638 -- subquery one more level depth INSERT INTO agg_events (value_4_agg, value_1_agg, user_id) SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id) AS foo ORDER BY id; DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT id, v1, v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300000 raw_events_first, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE ((worker_hash(id) >= '-2147483648'::integer) AND (worker_hash(id) <= '-1073741825'::integer)) ORDER BY id DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT id, v1, v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300001 raw_events_first, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE ((worker_hash(id) >= '-1073741824'::integer) AND (worker_hash(id) <= '-1'::integer)) ORDER BY id DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT id, v1, v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300002 raw_events_first, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE ((worker_hash(id) >= 0) AND (worker_hash(id) <= 1073741823)) ORDER BY id DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg, value_4_agg) SELECT id, v1, v4 FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300003 raw_events_first, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id) foo WHERE ((worker_hash(id) >= 1073741824) AND (worker_hash(id) <= 2147483647)) ORDER BY id DEBUG: Plan is router executable ERROR: duplicate key value violates unique constraint "agg_events_user_id_value_1_agg_key_13300008" DETAIL: Key (user_id, value_1_agg)=(1, 10) already exists. CONTEXT: while executing command on localhost:57638 -- join between subqueries INSERT INTO agg_events (user_id) SELECT f2.id FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id); DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id) SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300000 raw_events_first, public.reference_table_13300012 reference_table WHERE (raw_events_first.user_id = reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300000 raw_events_first, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) > (10)::numeric)) foo2) f2 ON ((f.id = f2.id))) WHERE ((worker_hash(f2.id) >= '-2147483648'::integer) AND (worker_hash(f2.id) <= '-1073741825'::integer)) DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id) SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300001 raw_events_first, public.reference_table_13300012 reference_table WHERE (raw_events_first.user_id = reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300001 raw_events_first, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) > (10)::numeric)) foo2) f2 ON ((f.id = f2.id))) WHERE ((worker_hash(f2.id) >= '-1073741824'::integer) AND (worker_hash(f2.id) <= '-1'::integer)) DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id) SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300002 raw_events_first, public.reference_table_13300012 reference_table WHERE (raw_events_first.user_id = reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300002 raw_events_first, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) > (10)::numeric)) foo2) f2 ON ((f.id = f2.id))) WHERE ((worker_hash(f2.id) >= 0) AND (worker_hash(f2.id) <= 1073741823)) DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id) SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300003 raw_events_first, public.reference_table_13300012 reference_table WHERE (raw_events_first.user_id = reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300003 raw_events_first, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) > (10)::numeric)) foo2) f2 ON ((f.id = f2.id))) WHERE ((worker_hash(f2.id) >= 1073741824) AND (worker_hash(f2.id) <= 2147483647)) DEBUG: Plan is router executable -- add one more level subqueris on top of subquery JOINs INSERT INTO agg_events (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) FROM ( SELECT f2.id as id, f2.v4 as value FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id)) as outer_most GROUP BY outer_most.id; DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_4_agg) SELECT id, max(value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300000 raw_events_first, public.reference_table_13300012 reference_table WHERE (raw_events_first.user_id = reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300000 raw_events_first, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) > (10)::numeric)) foo2) f2 ON ((f.id = f2.id)))) outer_most WHERE ((worker_hash(id) >= '-2147483648'::integer) AND (worker_hash(id) <= '-1073741825'::integer)) GROUP BY id DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_4_agg) SELECT id, max(value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300001 raw_events_first, public.reference_table_13300012 reference_table WHERE (raw_events_first.user_id = reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300001 raw_events_first, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) > (10)::numeric)) foo2) f2 ON ((f.id = f2.id)))) outer_most WHERE ((worker_hash(id) >= '-1073741824'::integer) AND (worker_hash(id) <= '-1'::integer)) GROUP BY id DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_4_agg) SELECT id, max(value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300002 raw_events_first, public.reference_table_13300012 reference_table WHERE (raw_events_first.user_id = reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300002 raw_events_first, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) > (10)::numeric)) foo2) f2 ON ((f.id = f2.id)))) outer_most WHERE ((worker_hash(id) >= 0) AND (worker_hash(id) <= 1073741823)) GROUP BY id DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_4_agg) SELECT id, max(value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300003 raw_events_first, public.reference_table_13300012 reference_table WHERE (raw_events_first.user_id = reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300003 raw_events_first, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) > (10)::numeric)) foo2) f2 ON ((f.id = f2.id)))) outer_most WHERE ((worker_hash(id) >= 1073741824) AND (worker_hash(id) <= 2147483647)) GROUP BY id DEBUG: Plan is router executable -- subqueries in WHERE clause INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id IN (SELECT user_id FROM raw_events_second WHERE user_id = 2); DEBUG: Skipping target shard interval 13300004 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300003 raw_events_first WHERE ((user_id IN (SELECT raw_events_second.user_id FROM public.raw_events_second_13300007 raw_events_second WHERE (raw_events_second.user_id = 2))) AND ((worker_hash(user_id) >= 1073741824) AND (worker_hash(user_id) <= 2147483647))) DEBUG: Plan is router executable INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id IN (SELECT user_id FROM raw_events_second WHERE user_id != 2 AND value_1 = 2000) ON conflict (user_id, value_1) DO NOTHING; DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300000 raw_events_first WHERE ((user_id IN (SELECT raw_events_second.user_id FROM public.raw_events_second_13300004 raw_events_second WHERE ((raw_events_second.user_id <> 2) AND (raw_events_second.value_1 = 2000)))) AND ((worker_hash(user_id) >= '-2147483648'::integer) AND (worker_hash(user_id) <= '-1073741825'::integer))) ON CONFLICT(user_id, value_1) DO NOTHING DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300001 raw_events_first WHERE ((user_id IN (SELECT raw_events_second.user_id FROM public.raw_events_second_13300005 raw_events_second WHERE ((raw_events_second.user_id <> 2) AND (raw_events_second.value_1 = 2000)))) AND ((worker_hash(user_id) >= '-1073741824'::integer) AND (worker_hash(user_id) <= '-1'::integer))) ON CONFLICT(user_id, value_1) DO NOTHING DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300002 raw_events_first WHERE ((user_id IN (SELECT raw_events_second.user_id FROM public.raw_events_second_13300006 raw_events_second WHERE ((raw_events_second.user_id <> 2) AND (raw_events_second.value_1 = 2000)))) AND ((worker_hash(user_id) >= 0) AND (worker_hash(user_id) <= 1073741823))) ON CONFLICT(user_id, value_1) DO NOTHING DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300003 raw_events_first WHERE ((user_id IN (SELECT raw_events_second.user_id FROM public.raw_events_second_13300007 raw_events_second WHERE ((raw_events_second.user_id <> 2) AND (raw_events_second.value_1 = 2000)))) AND ((worker_hash(user_id) >= 1073741824) AND (worker_hash(user_id) <= 2147483647))) ON CONFLICT(user_id, value_1) DO NOTHING DEBUG: Plan is router executable INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id IN (SELECT user_id FROM raw_events_second WHERE false); DEBUG: Skipping target shard interval 13300004 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300007 since SELECT query for it pruned away DEBUG: Plan is router executable INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id IN (SELECT user_id FROM raw_events_second WHERE value_1 = 1000 OR value_1 = 2000 OR value_1 = 3000); DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300000 raw_events_first WHERE ((user_id IN (SELECT raw_events_second.user_id FROM public.raw_events_second_13300004 raw_events_second WHERE ((raw_events_second.value_1 = 1000) OR (raw_events_second.value_1 = 2000) OR (raw_events_second.value_1 = 3000)))) AND ((worker_hash(user_id) >= '-2147483648'::integer) AND (worker_hash(user_id) <= '-1073741825'::integer))) DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300001 raw_events_first WHERE ((user_id IN (SELECT raw_events_second.user_id FROM public.raw_events_second_13300005 raw_events_second WHERE ((raw_events_second.value_1 = 1000) OR (raw_events_second.value_1 = 2000) OR (raw_events_second.value_1 = 3000)))) AND ((worker_hash(user_id) >= '-1073741824'::integer) AND (worker_hash(user_id) <= '-1'::integer))) DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300002 raw_events_first WHERE ((user_id IN (SELECT raw_events_second.user_id FROM public.raw_events_second_13300006 raw_events_second WHERE ((raw_events_second.value_1 = 1000) OR (raw_events_second.value_1 = 2000) OR (raw_events_second.value_1 = 3000)))) AND ((worker_hash(user_id) >= 0) AND (worker_hash(user_id) <= 1073741823))) DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300003 raw_events_first WHERE ((user_id IN (SELECT raw_events_second.user_id FROM public.raw_events_second_13300007 raw_events_second WHERE ((raw_events_second.value_1 = 1000) OR (raw_events_second.value_1 = 2000) OR (raw_events_second.value_1 = 3000)))) AND ((worker_hash(user_id) >= 1073741824) AND (worker_hash(user_id) <= 2147483647))) DEBUG: Plan is router executable -- lets mix subqueries in FROM clause and subqueries in WHERE INSERT INTO agg_events (user_id) SELECT f2.id FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 1000) AS foo2 ) as f2 ON (f.id = f2.id) WHERE f.id IN (SELECT user_id FROM raw_events_second); DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id) SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300000 raw_events_first, public.reference_table_13300012 reference_table WHERE (raw_events_first.user_id = reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300000 raw_events_first, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) > (1000)::numeric)) foo2) f2 ON ((f.id = f2.id))) WHERE ((f.id IN (SELECT raw_events_second.user_id FROM public.raw_events_second_13300004 raw_events_second)) AND ((worker_hash(f2.id) >= '-2147483648'::integer) AND (worker_hash(f2.id) <= '-1073741825'::integer))) DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id) SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300001 raw_events_first, public.reference_table_13300012 reference_table WHERE (raw_events_first.user_id = reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300001 raw_events_first, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) > (1000)::numeric)) foo2) f2 ON ((f.id = f2.id))) WHERE ((f.id IN (SELECT raw_events_second.user_id FROM public.raw_events_second_13300005 raw_events_second)) AND ((worker_hash(f2.id) >= '-1073741824'::integer) AND (worker_hash(f2.id) <= '-1'::integer))) DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id) SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300002 raw_events_first, public.reference_table_13300012 reference_table WHERE (raw_events_first.user_id = reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300002 raw_events_first, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) > (1000)::numeric)) foo2) f2 ON ((f.id = f2.id))) WHERE ((f.id IN (SELECT raw_events_second.user_id FROM public.raw_events_second_13300006 raw_events_second)) AND ((worker_hash(f2.id) >= 0) AND (worker_hash(f2.id) <= 1073741823))) DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id) SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300003 raw_events_first, public.reference_table_13300012 reference_table WHERE (raw_events_first.user_id = reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300003 raw_events_first, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) > (1000)::numeric)) foo2) f2 ON ((f.id = f2.id))) WHERE ((f.id IN (SELECT raw_events_second.user_id FROM public.raw_events_second_13300007 raw_events_second)) AND ((worker_hash(f2.id) >= 1073741824) AND (worker_hash(f2.id) <= 2147483647))) DEBUG: Plan is router executable -- some UPSERTS INSERT INTO agg_events AS ae ( user_id, value_1_agg, agg_time ) SELECT user_id, value_1, time FROM raw_events_first ON conflict (user_id, value_1_agg) DO UPDATE SET agg_time = EXCLUDED.agg_time WHERE ae.agg_time < EXCLUDED.agg_time; DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300000 raw_events_first WHERE ((worker_hash(user_id) >= '-2147483648'::integer) AND (worker_hash(user_id) <= '-1073741825'::integer)) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time < excluded.agg_time) DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300001 raw_events_first WHERE ((worker_hash(user_id) >= '-1073741824'::integer) AND (worker_hash(user_id) <= '-1'::integer)) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time < excluded.agg_time) DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300002 raw_events_first WHERE ((worker_hash(user_id) >= 0) AND (worker_hash(user_id) <= 1073741823)) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time < excluded.agg_time) DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300003 raw_events_first WHERE ((worker_hash(user_id) >= 1073741824) AND (worker_hash(user_id) <= 2147483647)) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time < excluded.agg_time) DEBUG: Plan is router executable -- upserts with returning INSERT INTO agg_events AS ae ( user_id, value_1_agg, agg_time ) SELECT user_id, value_1, time FROM raw_events_first ON conflict (user_id, value_1_agg) DO UPDATE SET agg_time = EXCLUDED.agg_time WHERE ae.agg_time < EXCLUDED.agg_time RETURNING user_id, value_1_agg; DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300000 raw_events_first WHERE ((worker_hash(user_id) >= '-2147483648'::integer) AND (worker_hash(user_id) <= '-1073741825'::integer)) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time < excluded.agg_time) RETURNING ae.user_id, ae.value_1_agg DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300001 raw_events_first WHERE ((worker_hash(user_id) >= '-1073741824'::integer) AND (worker_hash(user_id) <= '-1'::integer)) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time < excluded.agg_time) RETURNING ae.user_id, ae.value_1_agg DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300002 raw_events_first WHERE ((worker_hash(user_id) >= 0) AND (worker_hash(user_id) <= 1073741823)) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time < excluded.agg_time) RETURNING ae.user_id, ae.value_1_agg DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS ae (user_id, value_1_agg, agg_time) SELECT user_id, value_1, "time" FROM public.raw_events_first_13300003 raw_events_first WHERE ((worker_hash(user_id) >= 1073741824) AND (worker_hash(user_id) <= 2147483647)) ON CONFLICT(user_id, value_1_agg) DO UPDATE SET agg_time = excluded.agg_time WHERE (ae.agg_time < excluded.agg_time) RETURNING ae.user_id, ae.value_1_agg DEBUG: Plan is router executable user_id | value_1_agg ---------+------------- 7 | (1 row) INSERT INTO agg_events (user_id, value_1_agg) SELECT user_id, sum(value_1 + value_2) FROM raw_events_first GROUP BY user_id; DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg) SELECT user_id, sum((value_1 + value_2)) AS sum FROM public.raw_events_first_13300000 raw_events_first WHERE ((worker_hash(user_id) >= '-2147483648'::integer) AND (worker_hash(user_id) <= '-1073741825'::integer)) GROUP BY user_id DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg) SELECT user_id, sum((value_1 + value_2)) AS sum FROM public.raw_events_first_13300001 raw_events_first WHERE ((worker_hash(user_id) >= '-1073741824'::integer) AND (worker_hash(user_id) <= '-1'::integer)) GROUP BY user_id DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT user_id, sum((value_1 + value_2)) AS sum FROM public.raw_events_first_13300002 raw_events_first WHERE ((worker_hash(user_id) >= 0) AND (worker_hash(user_id) <= 1073741823)) GROUP BY user_id DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT user_id, sum((value_1 + value_2)) AS sum FROM public.raw_events_first_13300003 raw_events_first WHERE ((worker_hash(user_id) >= 1073741824) AND (worker_hash(user_id) <= 2147483647)) GROUP BY user_id DEBUG: Plan is router executable -- FILTER CLAUSE INSERT INTO agg_events (user_id, value_1_agg) SELECT user_id, sum(value_1 + value_2) FILTER (where value_3 = 15) FROM raw_events_first GROUP BY user_id; DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg) SELECT user_id, sum((value_1 + value_2)) FILTER (WHERE (value_3 = (15)::double precision)) AS sum FROM public.raw_events_first_13300000 raw_events_first WHERE ((worker_hash(user_id) >= '-2147483648'::integer) AND (worker_hash(user_id) <= '-1073741825'::integer)) GROUP BY user_id DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg) SELECT user_id, sum((value_1 + value_2)) FILTER (WHERE (value_3 = (15)::double precision)) AS sum FROM public.raw_events_first_13300001 raw_events_first WHERE ((worker_hash(user_id) >= '-1073741824'::integer) AND (worker_hash(user_id) <= '-1'::integer)) GROUP BY user_id DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT user_id, sum((value_1 + value_2)) FILTER (WHERE (value_3 = (15)::double precision)) AS sum FROM public.raw_events_first_13300002 raw_events_first WHERE ((worker_hash(user_id) >= 0) AND (worker_hash(user_id) <= 1073741823)) GROUP BY user_id DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT user_id, sum((value_1 + value_2)) FILTER (WHERE (value_3 = (15)::double precision)) AS sum FROM public.raw_events_first_13300003 raw_events_first WHERE ((worker_hash(user_id) >= 1073741824) AND (worker_hash(user_id) <= 2147483647)) GROUP BY user_id DEBUG: Plan is router executable -- a test with reference table JOINs INSERT INTO agg_events (user_id, value_1_agg) SELECT raw_events_first.user_id, sum(value_1) FROM reference_table, raw_events_first WHERE raw_events_first.user_id = reference_table.user_id GROUP BY raw_events_first.user_id; DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg) SELECT raw_events_first.user_id, sum(raw_events_first.value_1) AS sum FROM public.reference_table_13300012 reference_table, public.raw_events_first_13300000 raw_events_first WHERE ((raw_events_first.user_id = reference_table.user_id) AND ((worker_hash(raw_events_first.user_id) >= '-2147483648'::integer) AND (worker_hash(raw_events_first.user_id) <= '-1073741825'::integer))) GROUP BY raw_events_first.user_id DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg) SELECT raw_events_first.user_id, sum(raw_events_first.value_1) AS sum FROM public.reference_table_13300012 reference_table, public.raw_events_first_13300001 raw_events_first WHERE ((raw_events_first.user_id = reference_table.user_id) AND ((worker_hash(raw_events_first.user_id) >= '-1073741824'::integer) AND (worker_hash(raw_events_first.user_id) <= '-1'::integer))) GROUP BY raw_events_first.user_id DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT raw_events_first.user_id, sum(raw_events_first.value_1) AS sum FROM public.reference_table_13300012 reference_table, public.raw_events_first_13300002 raw_events_first WHERE ((raw_events_first.user_id = reference_table.user_id) AND ((worker_hash(raw_events_first.user_id) >= 0) AND (worker_hash(raw_events_first.user_id) <= 1073741823))) GROUP BY raw_events_first.user_id DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT raw_events_first.user_id, sum(raw_events_first.value_1) AS sum FROM public.reference_table_13300012 reference_table, public.raw_events_first_13300003 raw_events_first WHERE ((raw_events_first.user_id = reference_table.user_id) AND ((worker_hash(raw_events_first.user_id) >= 1073741824) AND (worker_hash(raw_events_first.user_id) <= 2147483647))) GROUP BY raw_events_first.user_id DEBUG: Plan is router executable -- a note on the outer joins is that -- we filter out outer join results -- where partition column returns -- NULL. Thus, we could INSERT less rows -- than we expect from subquery result. -- see the following tests SET client_min_messages TO INFO; -- we don't want to see constraint vialotions, so truncate first TRUNCATE agg_events; -- add a row to first table to make table contents different INSERT INTO raw_events_second (user_id, time, value_1, value_2, value_3, value_4) VALUES (10, now(), 100, 10000, 10000, 100000); DELETE FROM raw_events_second WHERE user_id = 2; -- we select 11 rows SELECT t1.user_id AS col1, t2.user_id AS col2 FROM raw_events_first t1 FULL JOIN raw_events_second t2 ON t1.user_id = t2.user_id ORDER BY t1.user_id, t2.user_id; col1 | col2 ------+------ 1 | 1 2 | 3 | 3 4 | 4 5 | 5 6 | 6 7 | 7 8 | 8 9 | 9 | 10 (10 rows) SET client_min_messages TO DEBUG2; -- we insert 10 rows since we filtered out -- NULL partition column values INSERT INTO agg_events (user_id, value_1_agg) SELECT t1.user_id AS col1, t2.user_id AS col2 FROM raw_events_first t1 FULL JOIN raw_events_second t2 ON t1.user_id = t2.user_id; DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg) SELECT t1.user_id AS col1, t2.user_id AS col2 FROM (public.raw_events_first_13300000 t1 FULL JOIN public.raw_events_second_13300004 t2 ON ((t1.user_id = t2.user_id))) WHERE ((worker_hash(t1.user_id) >= '-2147483648'::integer) AND (worker_hash(t1.user_id) <= '-1073741825'::integer)) DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg) SELECT t1.user_id AS col1, t2.user_id AS col2 FROM (public.raw_events_first_13300001 t1 FULL JOIN public.raw_events_second_13300005 t2 ON ((t1.user_id = t2.user_id))) WHERE ((worker_hash(t1.user_id) >= '-1073741824'::integer) AND (worker_hash(t1.user_id) <= '-1'::integer)) DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT t1.user_id AS col1, t2.user_id AS col2 FROM (public.raw_events_first_13300002 t1 FULL JOIN public.raw_events_second_13300006 t2 ON ((t1.user_id = t2.user_id))) WHERE ((worker_hash(t1.user_id) >= 0) AND (worker_hash(t1.user_id) <= 1073741823)) DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT t1.user_id AS col1, t2.user_id AS col2 FROM (public.raw_events_first_13300003 t1 FULL JOIN public.raw_events_second_13300007 t2 ON ((t1.user_id = t2.user_id))) WHERE ((worker_hash(t1.user_id) >= 1073741824) AND (worker_hash(t1.user_id) <= 2147483647)) DEBUG: Plan is router executable SET client_min_messages TO INFO; -- see that the results are different from the SELECT query SELECT user_id, value_1_agg FROM agg_events ORDER BY user_id, value_1_agg; user_id | value_1_agg ---------+------------- 1 | 1 2 | 3 | 3 4 | 4 5 | 5 6 | 6 7 | 7 8 | 8 9 | 9 (9 rows) -- we don't want to see constraint vialotions, so truncate first SET client_min_messages TO INFO; TRUNCATE agg_events; SET client_min_messages TO DEBUG2; -- DISTINCT clause INSERT INTO agg_events (value_1_agg, user_id) SELECT DISTINCT value_1, user_id FROM raw_events_first; DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT user_id, value_1 FROM public.raw_events_first_13300000 raw_events_first WHERE ((worker_hash(user_id) >= '-2147483648'::integer) AND (worker_hash(user_id) <= '-1073741825'::integer)) DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT user_id, value_1 FROM public.raw_events_first_13300001 raw_events_first WHERE ((worker_hash(user_id) >= '-1073741824'::integer) AND (worker_hash(user_id) <= '-1'::integer)) DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT user_id, value_1 FROM public.raw_events_first_13300002 raw_events_first WHERE ((worker_hash(user_id) >= 0) AND (worker_hash(user_id) <= 1073741823)) DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_1_agg) SELECT DISTINCT user_id, value_1 FROM public.raw_events_first_13300003 raw_events_first WHERE ((worker_hash(user_id) >= 1073741824) AND (worker_hash(user_id) <= 2147483647)) DEBUG: Plan is router executable -- we don't want to see constraint vialotions, so truncate first SET client_min_messages TO INFO; truncate agg_events; SET client_min_messages TO DEBUG2; -- we do not support DISTINCT ON clauses INSERT INTO agg_events (value_1_agg, user_id) SELECT DISTINCT ON (value_1) value_1, user_id FROM raw_events_first; DEBUG: DISTINCT ON clauses are not allowed in distributed INSERT ... SELECT queries DEBUG: Collecting INSERT ... SELECT results on coordinator ERROR: could not run distributed query with DISTINCT clause HINT: Consider using an equality filter on the distributed table's partition column. -- We do not support some CTEs WITH fist_table_agg AS (SELECT sum(value_1) as v1_agg, user_id FROM raw_events_first GROUP BY user_id) INSERT INTO agg_events (value_1_agg, user_id) SELECT v1_agg, user_id FROM fist_table_agg; DEBUG: distributed INSERT ... SELECT can only select from distributed tables DEBUG: Collecting INSERT ... SELECT results on coordinator ERROR: could not run distributed query with complex table expressions HINT: Consider using an equality filter on the distributed table's partition column. -- We don't support CTEs that consist of const values as well INSERT INTO agg_events WITH sub_cte AS (SELECT 1) SELECT raw_events_first.user_id, (SELECT * FROM sub_cte) FROM raw_events_first; DEBUG: Subqueries without relations are not allowed in distributed INSERT ... SELECT queries DEBUG: Collecting INSERT ... SELECT results on coordinator ERROR: could not run distributed query with common table expressions HINT: Consider using an equality filter on the distributed table's partition column. -- We support set operations via the coordinator BEGIN; INSERT INTO raw_events_first(user_id) SELECT user_id FROM ((SELECT user_id FROM raw_events_first) UNION (SELECT user_id FROM raw_events_second)) as foo; DEBUG: Set operations are not allowed in distributed INSERT ... SELECT queries DEBUG: Collecting INSERT ... SELECT results on coordinator ROLLBACK; -- We do not support any set operations INSERT INTO raw_events_first(user_id) (SELECT user_id FROM raw_events_first) INTERSECT (SELECT user_id FROM raw_events_first); DEBUG: Set operations are not allowed in distributed INSERT ... SELECT queries DEBUG: Collecting INSERT ... SELECT results on coordinator ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- If the query is router plannable then it is executed via the coordinator INSERT INTO raw_events_first(user_id) SELECT user_id FROM ((SELECT user_id FROM raw_events_first WHERE user_id = 15) EXCEPT (SELECT user_id FROM raw_events_second where user_id = 17)) as foo; DEBUG: Set operations are not allowed in distributed INSERT ... SELECT queries DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: Creating router plan DEBUG: Plan is router executable -- some supported LEFT joins INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first LEFT JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.user_id; DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300000 raw_events_first LEFT JOIN public.raw_events_second_13300004 raw_events_second ON ((raw_events_first.user_id = raw_events_second.user_id))) WHERE ((worker_hash(raw_events_first.user_id) >= '-2147483648'::integer) AND (worker_hash(raw_events_first.user_id) <= '-1073741825'::integer)) DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300001 raw_events_first LEFT JOIN public.raw_events_second_13300005 raw_events_second ON ((raw_events_first.user_id = raw_events_second.user_id))) WHERE ((worker_hash(raw_events_first.user_id) >= '-1073741824'::integer) AND (worker_hash(raw_events_first.user_id) <= '-1'::integer)) DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300002 raw_events_first LEFT JOIN public.raw_events_second_13300006 raw_events_second ON ((raw_events_first.user_id = raw_events_second.user_id))) WHERE ((worker_hash(raw_events_first.user_id) >= 0) AND (worker_hash(raw_events_first.user_id) <= 1073741823)) DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300003 raw_events_first LEFT JOIN public.raw_events_second_13300007 raw_events_second ON ((raw_events_first.user_id = raw_events_second.user_id))) WHERE ((worker_hash(raw_events_first.user_id) >= 1073741824) AND (worker_hash(raw_events_first.user_id) <= 2147483647)) DEBUG: Plan is router executable INSERT INTO agg_events (user_id) SELECT raw_events_second.user_id FROM reference_table LEFT JOIN raw_events_second ON reference_table.user_id = raw_events_second.user_id; DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id) SELECT raw_events_second.user_id FROM (public.reference_table_13300012 reference_table LEFT JOIN public.raw_events_second_13300004 raw_events_second ON ((reference_table.user_id = raw_events_second.user_id))) WHERE ((worker_hash(raw_events_second.user_id) >= '-2147483648'::integer) AND (worker_hash(raw_events_second.user_id) <= '-1073741825'::integer)) DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id) SELECT raw_events_second.user_id FROM (public.reference_table_13300012 reference_table LEFT JOIN public.raw_events_second_13300005 raw_events_second ON ((reference_table.user_id = raw_events_second.user_id))) WHERE ((worker_hash(raw_events_second.user_id) >= '-1073741824'::integer) AND (worker_hash(raw_events_second.user_id) <= '-1'::integer)) DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id) SELECT raw_events_second.user_id FROM (public.reference_table_13300012 reference_table LEFT JOIN public.raw_events_second_13300006 raw_events_second ON ((reference_table.user_id = raw_events_second.user_id))) WHERE ((worker_hash(raw_events_second.user_id) >= 0) AND (worker_hash(raw_events_second.user_id) <= 1073741823)) DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id) SELECT raw_events_second.user_id FROM (public.reference_table_13300012 reference_table LEFT JOIN public.raw_events_second_13300007 raw_events_second ON ((reference_table.user_id = raw_events_second.user_id))) WHERE ((worker_hash(raw_events_second.user_id) >= 1073741824) AND (worker_hash(raw_events_second.user_id) <= 2147483647)) DEBUG: Plan is router executable INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first LEFT JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.user_id WHERE raw_events_first.user_id = 10; DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300000 raw_events_first LEFT JOIN public.raw_events_second_13300004 raw_events_second ON ((raw_events_first.user_id = raw_events_second.user_id))) WHERE ((raw_events_first.user_id = 10) AND ((worker_hash(raw_events_first.user_id) >= '-2147483648'::integer) AND (worker_hash(raw_events_first.user_id) <= '-1073741825'::integer))) DEBUG: Skipping target shard interval 13300009 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300010 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300011 since SELECT query for it pruned away DEBUG: Plan is router executable INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first LEFT JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.user_id WHERE raw_events_second.user_id = 10 OR raw_events_second.user_id = 11; DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300000 raw_events_first LEFT JOIN public.raw_events_second_13300004 raw_events_second ON ((raw_events_first.user_id = raw_events_second.user_id))) WHERE (((raw_events_second.user_id = 10) OR (raw_events_second.user_id = 11)) AND ((worker_hash(raw_events_first.user_id) >= '-2147483648'::integer) AND (worker_hash(raw_events_first.user_id) <= '-1073741825'::integer))) DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300001 raw_events_first LEFT JOIN (SELECT NULL::integer AS user_id, NULL::timestamp without time zone AS "time", NULL::integer AS value_1, NULL::integer AS value_2, NULL::double precision AS value_3, NULL::bigint AS value_4 WHERE false) raw_events_second(user_id, "time", value_1, value_2, value_3, value_4) ON ((raw_events_first.user_id = raw_events_second.user_id))) WHERE (((raw_events_second.user_id = 10) OR (raw_events_second.user_id = 11)) AND ((worker_hash(raw_events_first.user_id) >= '-1073741824'::integer) AND (worker_hash(raw_events_first.user_id) <= '-1'::integer))) DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300002 raw_events_first LEFT JOIN (SELECT NULL::integer AS user_id, NULL::timestamp without time zone AS "time", NULL::integer AS value_1, NULL::integer AS value_2, NULL::double precision AS value_3, NULL::bigint AS value_4 WHERE false) raw_events_second(user_id, "time", value_1, value_2, value_3, value_4) ON ((raw_events_first.user_id = raw_events_second.user_id))) WHERE (((raw_events_second.user_id = 10) OR (raw_events_second.user_id = 11)) AND ((worker_hash(raw_events_first.user_id) >= 0) AND (worker_hash(raw_events_first.user_id) <= 1073741823))) DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300003 raw_events_first LEFT JOIN public.raw_events_second_13300007 raw_events_second ON ((raw_events_first.user_id = raw_events_second.user_id))) WHERE (((raw_events_second.user_id = 10) OR (raw_events_second.user_id = 11)) AND ((worker_hash(raw_events_first.user_id) >= 1073741824) AND (worker_hash(raw_events_first.user_id) <= 2147483647))) DEBUG: Plan is router executable INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first INNER JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.user_id WHERE raw_events_first.user_id = 10 AND raw_events_first.user_id = 20; DEBUG: Skipping target shard interval 13300008 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300009 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300010 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300011 since SELECT query for it pruned away DEBUG: Plan is router executable INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first LEFT JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.user_id WHERE raw_events_first.user_id = 10 AND raw_events_second.user_id = 20; DEBUG: Skipping target shard interval 13300008 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300009 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300010 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300011 since SELECT query for it pruned away DEBUG: Plan is router executable INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first LEFT JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.user_id WHERE raw_events_first.user_id IN (19, 20, 21); DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300000 raw_events_first LEFT JOIN public.raw_events_second_13300004 raw_events_second ON ((raw_events_first.user_id = raw_events_second.user_id))) WHERE ((raw_events_first.user_id = ANY (ARRAY[19, 20, 21])) AND ((worker_hash(raw_events_first.user_id) >= '-2147483648'::integer) AND (worker_hash(raw_events_first.user_id) <= '-1073741825'::integer))) DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300001 raw_events_first LEFT JOIN public.raw_events_second_13300005 raw_events_second ON ((raw_events_first.user_id = raw_events_second.user_id))) WHERE ((raw_events_first.user_id = ANY (ARRAY[19, 20, 21])) AND ((worker_hash(raw_events_first.user_id) >= '-1073741824'::integer) AND (worker_hash(raw_events_first.user_id) <= '-1'::integer))) DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300002 raw_events_first LEFT JOIN public.raw_events_second_13300006 raw_events_second ON ((raw_events_first.user_id = raw_events_second.user_id))) WHERE ((raw_events_first.user_id = ANY (ARRAY[19, 20, 21])) AND ((worker_hash(raw_events_first.user_id) >= 0) AND (worker_hash(raw_events_first.user_id) <= 1073741823))) DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM ((SELECT NULL::integer AS user_id, NULL::timestamp without time zone AS "time", NULL::integer AS value_1, NULL::integer AS value_2, NULL::double precision AS value_3, NULL::bigint AS value_4 WHERE false) raw_events_first(user_id, "time", value_1, value_2, value_3, value_4) LEFT JOIN public.raw_events_second_13300007 raw_events_second ON ((raw_events_first.user_id = raw_events_second.user_id))) WHERE ((raw_events_first.user_id = ANY (ARRAY[19, 20, 21])) AND ((worker_hash(raw_events_first.user_id) >= 1073741824) AND (worker_hash(raw_events_first.user_id) <= 2147483647))) DEBUG: Plan is router executable INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first INNER JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.user_id WHERE raw_events_second.user_id IN (19, 20, 21); DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300000 raw_events_first JOIN public.raw_events_second_13300004 raw_events_second ON ((raw_events_first.user_id = raw_events_second.user_id))) WHERE ((raw_events_second.user_id = ANY (ARRAY[19, 20, 21])) AND ((worker_hash(raw_events_first.user_id) >= '-2147483648'::integer) AND (worker_hash(raw_events_first.user_id) <= '-1073741825'::integer))) DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300001 raw_events_first JOIN public.raw_events_second_13300005 raw_events_second ON ((raw_events_first.user_id = raw_events_second.user_id))) WHERE ((raw_events_second.user_id = ANY (ARRAY[19, 20, 21])) AND ((worker_hash(raw_events_first.user_id) >= '-1073741824'::integer) AND (worker_hash(raw_events_first.user_id) <= '-1'::integer))) DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300002 raw_events_first JOIN public.raw_events_second_13300006 raw_events_second ON ((raw_events_first.user_id = raw_events_second.user_id))) WHERE ((raw_events_second.user_id = ANY (ARRAY[19, 20, 21])) AND ((worker_hash(raw_events_first.user_id) >= 0) AND (worker_hash(raw_events_first.user_id) <= 1073741823))) DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id) SELECT raw_events_first.user_id FROM (public.raw_events_first_13300003 raw_events_first JOIN (SELECT NULL::integer AS user_id, NULL::timestamp without time zone AS "time", NULL::integer AS value_1, NULL::integer AS value_2, NULL::double precision AS value_3, NULL::bigint AS value_4 WHERE false) raw_events_second(user_id, "time", value_1, value_2, value_3, value_4) ON ((raw_events_first.user_id = raw_events_second.user_id))) WHERE ((raw_events_second.user_id = ANY (ARRAY[19, 20, 21])) AND ((worker_hash(raw_events_first.user_id) >= 1073741824) AND (worker_hash(raw_events_first.user_id) <= 2147483647))) DEBUG: Plan is router executable -- the following is a very tricky query for Citus -- although we do not support pushing down JOINs on non-partition -- columns here it is safe to push it down given that we're looking for -- a specific value (i.e., value_1 = 12) on the joining column. -- Note that the query always hits the same shard on raw_events_second -- and this query wouldn't have worked if we're to use different worker -- count or shard replication factor INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first, raw_events_second WHERE raw_events_second.user_id = raw_events_first.value_1 AND raw_events_first.value_1 = 12; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- some unsupported LEFT/INNER JOINs -- JOIN on one table with partition column other is not INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first LEFT JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.value_1; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- same as the above with INNER JOIN INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first INNER JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.value_1; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- a not meaningful query INSERT INTO agg_events (user_id) SELECT raw_events_second.user_id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_first.value_1; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- both tables joined on non-partition columns INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first LEFT JOIN raw_events_second ON raw_events_first.value_1 = raw_events_second.value_1; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- same as the above with INNER JOIN INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first INNER JOIN raw_events_second ON raw_events_first.value_1 = raw_events_second.value_1; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- even if there is a filter on the partition key, since the join is not on the partition key we reject -- this query INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first LEFT JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.value_1 WHERE raw_events_first.user_id = 10; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- same as the above with INNER JOIN INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first INNER JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.value_1 WHERE raw_events_first.user_id = 10; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- make things a bit more complicate with IN clauses INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first INNER JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.value_1 WHERE raw_events_first.value_1 IN (10, 11,12) OR raw_events_second.user_id IN (1,2,3,4); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- implicit join on non partition column should also not be pushed down INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first, raw_events_second WHERE raw_events_second.user_id = raw_events_first.value_1; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- the following is again a tricky query for Citus -- if the given filter was on value_1 as shown in the above, Citus could -- push it down. But here the query is refused INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first, raw_events_second WHERE raw_events_second.user_id = raw_events_first.value_1 AND raw_events_first.value_2 = 12; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- lets do some unsupported query tests with subqueries -- foo is not joined on the partition key so the query is not -- pushed down INSERT INTO agg_events (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) FROM ( SELECT f2.id as id, f2.v4 as value FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first LEFT JOIN reference_table ON (raw_events_first.value_1 = reference_table.user_id)) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id)) as outer_most GROUP BY outer_most.id; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- if the given filter was on value_1 as shown in the above, Citus could -- push it down. But here the query is refused INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first, raw_events_second WHERE raw_events_second.user_id = raw_events_first.value_1 AND raw_events_first.value_2 = 12; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- lets do some unsupported query tests with subqueries -- foo is not joined on the partition key so the query is not -- pushed down INSERT INTO agg_events (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) FROM ( SELECT f2.id as id, f2.v4 as value FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first LEFT JOIN reference_table ON (raw_events_first.value_1 = reference_table.user_id)) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id)) as outer_most GROUP BY outer_most.id; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. INSERT INTO agg_events (value_4_agg, value_1_agg, user_id) SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id != raw_events_second.user_id GROUP BY raw_events_second.user_id) AS foo; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- INSERT partition column does not match with SELECT partition column INSERT INTO agg_events (value_4_agg, value_1_agg, user_id) SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.value_3 AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.value_3) AS foo; DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match DETAIL: The data type of the target table's partition column should exactly match the data type of the corresponding simple column reference in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator ERROR: cannot push down this subquery DETAIL: Group by list without partition column is currently unsupported -- error cases -- no part column at all INSERT INTO raw_events_second (value_1) SELECT value_1 FROM raw_events_first; DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match DETAIL: the query doesn't include the target table's partition column DEBUG: Collecting INSERT ... SELECT results on coordinator ERROR: the partition column of table public.raw_events_second should have a value INSERT INTO raw_events_second (value_1) SELECT user_id FROM raw_events_first; DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match DETAIL: the query doesn't include the target table's partition column DEBUG: Collecting INSERT ... SELECT results on coordinator ERROR: the partition column of table public.raw_events_second should have a value INSERT INTO raw_events_second (user_id) SELECT value_1 FROM raw_events_first; DEBUG: cannot perform distributed INSERT INTO ... SELECT becuase the partition columns in the source table and subquery do not match DETAIL: The target table's partition column should correspond to a partition column in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator ERROR: the partition column of table public.raw_events_second cannot be NULL INSERT INTO raw_events_second (user_id) SELECT user_id * 2 FROM raw_events_first; DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match DETAIL: Subquery contains an operator in the same position as the target table's partition column. HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator INSERT INTO raw_events_second (user_id) SELECT user_id :: bigint FROM raw_events_first; DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match DETAIL: Subquery contains an explicit cast in the same position as the target table's partition column. HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator INSERT INTO agg_events (value_3_agg, value_4_agg, value_1_agg, value_2_agg, user_id) SELECT SUM(value_3), Count(value_4), user_id, SUM(value_1), Avg(value_2) FROM raw_events_first GROUP BY user_id; DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match DETAIL: Subquery contains an aggregation in the same position as the target table's partition column. HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator ERROR: the partition column of table public.agg_events cannot be NULL INSERT INTO agg_events (value_3_agg, value_4_agg, value_1_agg, value_2_agg, user_id) SELECT SUM(value_3), Count(value_4), user_id, SUM(value_1), value_2 FROM raw_events_first GROUP BY user_id, value_2; DEBUG: cannot perform distributed INSERT INTO ... SELECT becuase the partition columns in the source table and subquery do not match DETAIL: The target table's partition column should correspond to a partition column in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator ERROR: the partition column of table public.agg_events cannot be NULL -- tables should be co-located INSERT INTO agg_events (user_id) SELECT user_id FROM reference_table; DEBUG: cannot perform distributed INSERT INTO ... SELECT becuase the partition columns in the source table and subquery do not match DETAIL: The target table's partition column should correspond to a partition column in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator DEBUG: Creating router plan DEBUG: Plan is router executable -- unsupported joins between subqueries -- we do not return bare partition column on the inner query INSERT INTO agg_events (user_id) SELECT f2.id FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, raw_events_second.value_1 AS v1, SUM(raw_events_second.user_id) AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.value_1 HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id); DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match DETAIL: Subquery contains an expression that is not a simple column reference in the same position as the target table's partition column. HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- the second part of the query is not routable since -- GROUP BY not on the partition column (i.e., value_1) and thus join -- on f.id = f2.id is not on the partition key (instead on the sum of partition key) INSERT INTO agg_events (user_id) SELECT f.id FROM (SELECT id FROM (SELECT raw_events_first.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, raw_events_second.value_1 AS v1, SUM(raw_events_second.user_id) AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.value_1 HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- cannot pushdown the query since the JOIN is not equi JOIN INSERT INTO agg_events (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) FROM ( SELECT f2.id as id, f2.v4 as value FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id != f2.id)) as outer_most GROUP BY outer_most.id; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- cannot pushdown since foo2 is not join on partition key INSERT INTO agg_events (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) FROM ( SELECT f2.id as id, f2.v4 as value FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.value_1 GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id)) as outer_most GROUP BY outer_most.id; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- cannot push down since foo doesn't have en equi join INSERT INTO agg_events (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) FROM ( SELECT f2.id as id, f2.v4 as value FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id != reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id)) as outer_most GROUP BY outer_most.id; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- some unsupported LATERAL JOINs -- join on averages is not on the partition key INSERT INTO agg_events (user_id, value_4_agg) SELECT averages.user_id, avg(averages.value_4) FROM (SELECT raw_events_second.user_id FROM reference_table JOIN raw_events_second on (reference_table.user_id = raw_events_second.user_id) ) reference_ids JOIN LATERAL (SELECT user_id, value_4 FROM raw_events_first WHERE value_4 = reference_ids.user_id) as averages ON true GROUP BY averages.user_id; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- join among reference_ids and averages is not on the partition key INSERT INTO agg_events (user_id, value_4_agg) SELECT averages.user_id, avg(averages.value_4) FROM (SELECT raw_events_second.user_id FROM reference_table JOIN raw_events_second on (reference_table.user_id = raw_events_second.user_id) ) reference_ids JOIN LATERAL (SELECT user_id, value_4 FROM raw_events_first) as averages ON averages.value_4 = reference_ids.user_id GROUP BY averages.user_id; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- join among the agg_ids and averages is not on the partition key INSERT INTO agg_events (user_id, value_4_agg) SELECT averages.user_id, avg(averages.value_4) FROM (SELECT raw_events_second.user_id FROM reference_table JOIN raw_events_second on (reference_table.user_id = raw_events_second.user_id) ) reference_ids JOIN LATERAL (SELECT user_id, value_4 FROM raw_events_first) as averages ON averages.user_id = reference_ids.user_id JOIN LATERAL (SELECT user_id, value_4 FROM agg_events) as agg_ids ON (agg_ids.value_4 = averages.user_id) GROUP BY averages.user_id; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- not supported subqueries in WHERE clause -- since the selected value in the WHERE is not -- partition key INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id IN (SELECT value_1 FROM raw_events_second); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- same as above but slightly more complex -- since it also includes subquery in FROM as well INSERT INTO agg_events (user_id) SELECT f2.id FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id) WHERE f.id IN (SELECT value_1 FROM raw_events_second); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- some more semi-anti join tests -- join in where INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id IN (SELECT raw_events_second.user_id FROM raw_events_second, raw_events_first WHERE raw_events_second.user_id = raw_events_first.user_id AND raw_events_first.user_id = 200); DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300000 raw_events_first WHERE ((user_id IN (SELECT raw_events_second.user_id FROM public.raw_events_second_13300004 raw_events_second, public.raw_events_first_13300000 raw_events_first_1 WHERE ((raw_events_second.user_id = raw_events_first_1.user_id) AND (raw_events_first_1.user_id = 200)))) AND ((worker_hash(user_id) >= '-2147483648'::integer) AND (worker_hash(user_id) <= '-1073741825'::integer))) DEBUG: Skipping target shard interval 13300005 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300006 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300007 since SELECT query for it pruned away DEBUG: Plan is router executable -- we cannot push this down since it is NOT IN INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id NOT IN (SELECT raw_events_second.user_id FROM raw_events_second, raw_events_first WHERE raw_events_second.user_id = raw_events_first.user_id AND raw_events_first.user_id = 200); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- safe to push down INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE EXISTS (SELECT 1 FROM raw_events_second WHERE raw_events_second.user_id =raw_events_first.user_id); DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300000 raw_events_first WHERE ((EXISTS (SELECT 1 FROM public.raw_events_second_13300004 raw_events_second WHERE (raw_events_second.user_id = raw_events_first.user_id))) AND ((worker_hash(user_id) >= '-2147483648'::integer) AND (worker_hash(user_id) <= '-1073741825'::integer))) DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300001 raw_events_first WHERE ((EXISTS (SELECT 1 FROM public.raw_events_second_13300005 raw_events_second WHERE (raw_events_second.user_id = raw_events_first.user_id))) AND ((worker_hash(user_id) >= '-1073741824'::integer) AND (worker_hash(user_id) <= '-1'::integer))) DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300002 raw_events_first WHERE ((EXISTS (SELECT 1 FROM public.raw_events_second_13300006 raw_events_second WHERE (raw_events_second.user_id = raw_events_first.user_id))) AND ((worker_hash(user_id) >= 0) AND (worker_hash(user_id) <= 1073741823))) DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300003 raw_events_first WHERE ((EXISTS (SELECT 1 FROM public.raw_events_second_13300007 raw_events_second WHERE (raw_events_second.user_id = raw_events_first.user_id))) AND ((worker_hash(user_id) >= 1073741824) AND (worker_hash(user_id) <= 2147483647))) DEBUG: Plan is router executable -- we cannot push down INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE NOT EXISTS (SELECT 1 FROM raw_events_second WHERE raw_events_second.user_id =raw_events_first.user_id); DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300000 raw_events_first WHERE ((NOT (EXISTS (SELECT 1 FROM public.raw_events_second_13300004 raw_events_second WHERE (raw_events_second.user_id = raw_events_first.user_id)))) AND ((worker_hash(user_id) >= '-2147483648'::integer) AND (worker_hash(user_id) <= '-1073741825'::integer))) DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300001 raw_events_first WHERE ((NOT (EXISTS (SELECT 1 FROM public.raw_events_second_13300005 raw_events_second WHERE (raw_events_second.user_id = raw_events_first.user_id)))) AND ((worker_hash(user_id) >= '-1073741824'::integer) AND (worker_hash(user_id) <= '-1'::integer))) DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300002 raw_events_first WHERE ((NOT (EXISTS (SELECT 1 FROM public.raw_events_second_13300006 raw_events_second WHERE (raw_events_second.user_id = raw_events_first.user_id)))) AND ((worker_hash(user_id) >= 0) AND (worker_hash(user_id) <= 1073741823))) DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300003 raw_events_first WHERE ((NOT (EXISTS (SELECT 1 FROM public.raw_events_second_13300007 raw_events_second WHERE (raw_events_second.user_id = raw_events_first.user_id)))) AND ((worker_hash(user_id) >= 1073741824) AND (worker_hash(user_id) <= 2147483647))) DEBUG: Plan is router executable -- more complex LEFT JOINs INSERT INTO agg_events (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) FROM ( SELECT f2.id as id, f2.v4 as value FROM (SELECT id FROM (SELECT raw_events_first.user_id AS id FROM raw_events_first LEFT JOIN reference_table ON (raw_events_first.user_id = reference_table.user_id)) AS foo) as f LEFT JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id)) as outer_most GROUP BY outer_most.id; DEBUG: distributed statement: INSERT INTO public.agg_events_13300008 AS citus_table_alias (user_id, value_4_agg) SELECT id, max(value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT raw_events_first.user_id AS id FROM (public.raw_events_first_13300000 raw_events_first LEFT JOIN public.reference_table_13300012 reference_table ON ((raw_events_first.user_id = reference_table.user_id)))) foo) f LEFT JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300000 raw_events_first, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) > (10)::numeric)) foo2) f2 ON ((f.id = f2.id)))) outer_most WHERE ((worker_hash(id) >= '-2147483648'::integer) AND (worker_hash(id) <= '-1073741825'::integer)) GROUP BY id DEBUG: distributed statement: INSERT INTO public.agg_events_13300009 AS citus_table_alias (user_id, value_4_agg) SELECT id, max(value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT raw_events_first.user_id AS id FROM (public.raw_events_first_13300001 raw_events_first LEFT JOIN public.reference_table_13300012 reference_table ON ((raw_events_first.user_id = reference_table.user_id)))) foo) f LEFT JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300001 raw_events_first, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) > (10)::numeric)) foo2) f2 ON ((f.id = f2.id)))) outer_most WHERE ((worker_hash(id) >= '-1073741824'::integer) AND (worker_hash(id) <= '-1'::integer)) GROUP BY id DEBUG: distributed statement: INSERT INTO public.agg_events_13300010 AS citus_table_alias (user_id, value_4_agg) SELECT id, max(value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT raw_events_first.user_id AS id FROM (public.raw_events_first_13300002 raw_events_first LEFT JOIN public.reference_table_13300012 reference_table ON ((raw_events_first.user_id = reference_table.user_id)))) foo) f LEFT JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300002 raw_events_first, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) > (10)::numeric)) foo2) f2 ON ((f.id = f2.id)))) outer_most WHERE ((worker_hash(id) >= 0) AND (worker_hash(id) <= 1073741823)) GROUP BY id DEBUG: distributed statement: INSERT INTO public.agg_events_13300011 AS citus_table_alias (user_id, value_4_agg) SELECT id, max(value) AS max FROM (SELECT f2.id, f2.v4 AS value FROM ((SELECT foo.id FROM (SELECT raw_events_first.user_id AS id FROM (public.raw_events_first_13300003 raw_events_first LEFT JOIN public.reference_table_13300012 reference_table ON ((raw_events_first.user_id = reference_table.user_id)))) foo) f LEFT JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300003 raw_events_first, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) > (10)::numeric)) foo2) f2 ON ((f.id = f2.id)))) outer_most WHERE ((worker_hash(id) >= 1073741824) AND (worker_hash(id) <= 2147483647)) GROUP BY id DEBUG: Plan is router executable -- cannot push down since the f.id IN is matched with value_1 INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id IN ( SELECT f2.id FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id) WHERE f.id IN (SELECT value_1 FROM raw_events_second)); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- same as above, but this time is it safe to push down since -- f.id IN is matched with user_id INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id IN ( SELECT f2.id FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id) WHERE f.id IN (SELECT user_id FROM raw_events_second)); DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300004 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300000 raw_events_first WHERE ((user_id IN (SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300000 raw_events_first_1, public.reference_table_13300012 reference_table WHERE (raw_events_first_1.user_id = reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first_1.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300000 raw_events_first_1, public.raw_events_second_13300004 raw_events_second WHERE (raw_events_first_1.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) > (10)::numeric)) foo2) f2 ON ((f.id = f2.id))) WHERE (f.id IN (SELECT raw_events_second.user_id FROM public.raw_events_second_13300004 raw_events_second)))) AND ((worker_hash(user_id) >= '-2147483648'::integer) AND (worker_hash(user_id) <= '-1073741825'::integer))) DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300005 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300001 raw_events_first WHERE ((user_id IN (SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300001 raw_events_first_1, public.reference_table_13300012 reference_table WHERE (raw_events_first_1.user_id = reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first_1.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300001 raw_events_first_1, public.raw_events_second_13300005 raw_events_second WHERE (raw_events_first_1.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) > (10)::numeric)) foo2) f2 ON ((f.id = f2.id))) WHERE (f.id IN (SELECT raw_events_second.user_id FROM public.raw_events_second_13300005 raw_events_second)))) AND ((worker_hash(user_id) >= '-1073741824'::integer) AND (worker_hash(user_id) <= '-1'::integer))) DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300006 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300002 raw_events_first WHERE ((user_id IN (SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300002 raw_events_first_1, public.reference_table_13300012 reference_table WHERE (raw_events_first_1.user_id = reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first_1.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300002 raw_events_first_1, public.raw_events_second_13300006 raw_events_second WHERE (raw_events_first_1.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) > (10)::numeric)) foo2) f2 ON ((f.id = f2.id))) WHERE (f.id IN (SELECT raw_events_second.user_id FROM public.raw_events_second_13300006 raw_events_second)))) AND ((worker_hash(user_id) >= 0) AND (worker_hash(user_id) <= 1073741823))) DEBUG: distributed statement: INSERT INTO public.raw_events_second_13300007 AS citus_table_alias (user_id) SELECT user_id FROM public.raw_events_first_13300003 raw_events_first WHERE ((user_id IN (SELECT f2.id FROM ((SELECT foo.id FROM (SELECT reference_table.user_id AS id FROM public.raw_events_first_13300003 raw_events_first_1, public.reference_table_13300012 reference_table WHERE (raw_events_first_1.user_id = reference_table.user_id)) foo) f JOIN (SELECT foo2.v4, foo2.v1, foo2.id FROM (SELECT sum(raw_events_second.value_4) AS v4, sum(raw_events_first_1.value_1) AS v1, raw_events_second.user_id AS id FROM public.raw_events_first_13300003 raw_events_first_1, public.raw_events_second_13300007 raw_events_second WHERE (raw_events_first_1.user_id = raw_events_second.user_id) GROUP BY raw_events_second.user_id HAVING (sum(raw_events_second.value_4) > (10)::numeric)) foo2) f2 ON ((f.id = f2.id))) WHERE (f.id IN (SELECT raw_events_second.user_id FROM public.raw_events_second_13300007 raw_events_second)))) AND ((worker_hash(user_id) >= 1073741824) AND (worker_hash(user_id) <= 2147483647))) DEBUG: Plan is router executable -- cannot push down since top level user_id is matched with NOT IN INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id NOT IN ( SELECT f2.id FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id) WHERE f.id IN (SELECT user_id FROM raw_events_second)); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- cannot push down since join is not equi join (f.id > f2.id) INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id IN ( SELECT f2.id FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id > f2.id) WHERE f.id IN (SELECT user_id FROM raw_events_second)); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- we currently not support grouping sets INSERT INTO agg_events (user_id, value_1_agg, value_2_agg) SELECT user_id, Sum(value_1) AS sum_val1, Sum(value_2) AS sum_val2 FROM raw_events_second GROUP BY grouping sets ( ( user_id ), ( value_1 ), ( user_id, value_1 ), ( ) ); DEBUG: grouping sets are not allowed in distributed INSERT ... SELECT queries DEBUG: Collecting INSERT ... SELECT results on coordinator ERROR: could not run distributed query with GROUPING SETS, CUBE, or ROLLUP HINT: Consider using an equality filter on the distributed table's partition column. -- set back to INFO SET client_min_messages TO INFO; -- avoid constraint violations TRUNCATE raw_events_first; -- we don't support LIMIT even if it exists in the subqueries -- in where clause INSERT INTO agg_events(user_id) SELECT user_id FROM users_table WHERE user_id IN (SELECT user_id FROM ( ( SELECT user_id FROM ( SELECT e1.user_id FROM users_table u1, events_table e1 WHERE e1.user_id = u1.user_id LIMIT 3 ) as f_inner ) ) AS f2); ERROR: cannot push down this subquery DETAIL: Limit in subquery is currently unsupported -- Altering a table and selecting from it using a multi-shard statement -- in the same transaction is allowed because we will use the same -- connections for all co-located placements. BEGIN; ALTER TABLE raw_events_second DROP COLUMN value_4; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' INSERT INTO raw_events_first SELECT * FROM raw_events_second; ROLLBACK; -- Alterating a table and selecting from it using a single-shard statement -- in the same transaction is disallowed because we will use a different -- connection. BEGIN; ALTER TABLE raw_events_second DROP COLUMN value_4; INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 100; ROLLBACK; -- Altering a reference table and then performing an INSERT ... SELECT which -- joins with the reference table is not allowed, since the INSERT ... SELECT -- would read from the reference table over others connections than the ones -- that performed the DDL. BEGIN; ALTER TABLE reference_table ADD COLUMN z int; INSERT INTO raw_events_first (user_id) SELECT user_id FROM raw_events_second JOIN reference_table USING (user_id); ERROR: cannot establish a new connection for placement 13300024, since DDL has been executed on a connection that is in use ROLLBACK; -- Insert after copy is allowed BEGIN; COPY raw_events_second (user_id, value_1) FROM STDIN DELIMITER ','; INSERT INTO raw_events_first SELECT * FROM raw_events_second; ROLLBACK; -- Insert after copy is currently allowed for single-shard operation. -- Both insert and copy are rolled back successfully. BEGIN; COPY raw_events_second (user_id, value_1) FROM STDIN DELIMITER ','; INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 101; SELECT user_id FROM raw_events_first WHERE user_id = 101; user_id --------- 101 (1 row) ROLLBACK; BEGIN; INSERT INTO raw_events_first SELECT * FROM raw_events_second; COPY raw_events_first (user_id, value_1) FROM STDIN DELIMITER ','; ROLLBACK; BEGIN; INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 100; COPY raw_events_first (user_id, value_1) FROM STDIN DELIMITER ','; ROLLBACK; -- Similarly, multi-row INSERTs will take part in transactions and reuse connections... BEGIN; INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 100; COPY raw_events_first (user_id, value_1) FROM STDIN DELIMITER ','; INSERT INTO raw_events_first (user_id, value_1) VALUES (105, 105), (106, 106); ROLLBACK; -- selecting from views works CREATE VIEW test_view AS SELECT * FROM raw_events_first; INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (16, now(), 60, 600, 6000.1, 60000); SELECT count(*) FROM raw_events_second; count ------- 36 (1 row) INSERT INTO raw_events_second SELECT * FROM test_view; INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (17, now(), 60, 600, 6000.1, 60000); INSERT INTO raw_events_second SELECT * FROM test_view WHERE user_id = 17 GROUP BY 1,2,3,4,5,6; SELECT count(*) FROM raw_events_second; count ------- 38 (1 row) -- we need this in our next test truncate raw_events_first; SET client_min_messages TO DEBUG2; -- first show that the query works now INSERT INTO raw_events_first SELECT * FROM raw_events_second; DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300000 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300004 raw_events_second WHERE ((worker_hash(user_id) >= '-2147483648'::integer) AND (worker_hash(user_id) <= '-1073741825'::integer)) DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300001 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300005 raw_events_second WHERE ((worker_hash(user_id) >= '-1073741824'::integer) AND (worker_hash(user_id) <= '-1'::integer)) DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300002 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300006 raw_events_second WHERE ((worker_hash(user_id) >= 0) AND (worker_hash(user_id) <= 1073741823)) DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300003 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300007 raw_events_second WHERE ((worker_hash(user_id) >= 1073741824) AND (worker_hash(user_id) <= 2147483647)) DEBUG: Plan is router executable SET client_min_messages TO INFO; truncate raw_events_first; SET client_min_messages TO DEBUG2; -- now show that it works for a single shard query as well INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 5; DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300000 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300004 raw_events_second WHERE ((user_id = 5) AND ((worker_hash(user_id) >= '-2147483648'::integer) AND (worker_hash(user_id) <= '-1073741825'::integer))) DEBUG: Skipping target shard interval 13300001 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300002 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300003 since SELECT query for it pruned away DEBUG: Plan is router executable SET client_min_messages TO INFO; -- if a single shard of the SELECT is unhealty, the query should fail UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 13300004 AND nodeport = :worker_1_port; truncate raw_events_first; SET client_min_messages TO DEBUG2; -- this should fail INSERT INTO raw_events_first SELECT * FROM raw_events_second; ERROR: cannot perform distributed planning for the given modification DETAIL: Insert query cannot be executed on all placements for shard 13300000 -- this should also fail INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 5; ERROR: cannot perform distributed planning for the given modification DETAIL: Insert query cannot be executed on all placements for shard 13300000 -- but this should work given that it hits different shard INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 6; DEBUG: Skipping target shard interval 13300000 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300001 since SELECT query for it pruned away DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300002 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300006 raw_events_second WHERE ((user_id = 6) AND ((worker_hash(user_id) >= 0) AND (worker_hash(user_id) <= 1073741823))) DEBUG: Skipping target shard interval 13300003 since SELECT query for it pruned away DEBUG: Plan is router executable SET client_min_messages TO INFO; -- mark the unhealthy placement as healthy again for the next tests UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE shardid = 13300004 AND nodeport = :worker_1_port; -- now that we should show that it works if one of the target shard interval is not healthy UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 13300000 AND nodeport = :worker_1_port; truncate raw_events_first; SET client_min_messages TO DEBUG2; -- this should work INSERT INTO raw_events_first SELECT * FROM raw_events_second; DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300000 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300004 raw_events_second WHERE ((worker_hash(user_id) >= '-2147483648'::integer) AND (worker_hash(user_id) <= '-1073741825'::integer)) DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300001 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300005 raw_events_second WHERE ((worker_hash(user_id) >= '-1073741824'::integer) AND (worker_hash(user_id) <= '-1'::integer)) DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300002 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300006 raw_events_second WHERE ((worker_hash(user_id) >= 0) AND (worker_hash(user_id) <= 1073741823)) DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300003 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300007 raw_events_second WHERE ((worker_hash(user_id) >= 1073741824) AND (worker_hash(user_id) <= 2147483647)) DEBUG: Plan is router executable SET client_min_messages TO INFO; truncate raw_events_first; SET client_min_messages TO DEBUG2; -- this should also work INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 5; DEBUG: distributed statement: INSERT INTO public.raw_events_first_13300000 AS citus_table_alias (user_id, "time", value_1, value_2, value_3, value_4) SELECT user_id, "time", value_1, value_2, value_3, value_4 FROM public.raw_events_second_13300004 raw_events_second WHERE ((user_id = 5) AND ((worker_hash(user_id) >= '-2147483648'::integer) AND (worker_hash(user_id) <= '-1073741825'::integer))) DEBUG: Skipping target shard interval 13300001 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300002 since SELECT query for it pruned away DEBUG: Skipping target shard interval 13300003 since SELECT query for it pruned away DEBUG: Plan is router executable SET client_min_messages TO INFO; -- now do some tests with varchars INSERT INTO insert_select_varchar_test VALUES ('test_1', 10); INSERT INTO insert_select_varchar_test VALUES ('test_2', 30); INSERT INTO insert_select_varchar_test (key, value) SELECT *, 100 FROM (SELECT f1.key FROM (SELECT key FROM insert_select_varchar_test GROUP BY 1 HAVING Count(key) < 3) AS f1, (SELECT key FROM insert_select_varchar_test GROUP BY 1 HAVING Sum(COALESCE(insert_select_varchar_test.value, 0)) > 20.0) AS f2 WHERE f1.key = f2.key GROUP BY 1) AS foo; SELECT * FROM insert_select_varchar_test; key | value --------+------- test_2 | 30 test_2 | 100 test_1 | 10 (3 rows) -- some tests with DEFAULT columns and constant values -- this test is mostly importantly intended for deparsing the query correctly -- but still it is preferable to have this test here instead of multi_deparse_shard_query CREATE TABLE table_with_defaults ( store_id int, first_name text, default_1 int DEFAULT 1, last_name text, default_2 text DEFAULT '2' ); -- we don't need many shards SET citus.shard_count = 2; SELECT create_distributed_table('table_with_defaults', 'store_id'); create_distributed_table -------------------------- (1 row) -- let's see the queries SET client_min_messages TO DEBUG2; -- a very simple query INSERT INTO table_with_defaults SELECT * FROM table_with_defaults; DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, first_name, default_1, last_name, default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE ((worker_hash(store_id) >= '-2147483648'::integer) AND (worker_hash(store_id) <= '-1'::integer)) DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, first_name, default_1, last_name, default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE ((worker_hash(store_id) >= 0) AND (worker_hash(store_id) <= 2147483647)) DEBUG: Plan is router executable -- see that defaults are filled INSERT INTO table_with_defaults (store_id, first_name) SELECT store_id, first_name FROM table_with_defaults; DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, first_name, 1 AS default_1, '2'::text AS default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE ((worker_hash(store_id) >= '-2147483648'::integer) AND (worker_hash(store_id) <= '-1'::integer)) DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, first_name, 1 AS default_1, '2'::text AS default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE ((worker_hash(store_id) >= 0) AND (worker_hash(store_id) <= 2147483647)) DEBUG: Plan is router executable -- shuffle one of the defaults and skip the other INSERT INTO table_with_defaults (default_2, store_id, first_name) SELECT default_2, store_id, first_name FROM table_with_defaults; DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, first_name, 1 AS default_1, default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE ((worker_hash(store_id) >= '-2147483648'::integer) AND (worker_hash(store_id) <= '-1'::integer)) DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, first_name, 1 AS default_1, default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE ((worker_hash(store_id) >= 0) AND (worker_hash(store_id) <= 2147483647)) DEBUG: Plan is router executable -- shuffle both defaults INSERT INTO table_with_defaults (default_2, store_id, default_1, first_name) SELECT default_2, store_id, default_1, first_name FROM table_with_defaults; DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, first_name, default_1, default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE ((worker_hash(store_id) >= '-2147483648'::integer) AND (worker_hash(store_id) <= '-1'::integer)) DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, first_name, default_1, default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE ((worker_hash(store_id) >= 0) AND (worker_hash(store_id) <= 2147483647)) DEBUG: Plan is router executable -- use constants instead of non-default column INSERT INTO table_with_defaults (default_2, last_name, store_id, first_name) SELECT default_2, 'Freund', store_id, 'Andres' FROM table_with_defaults; DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, 'Andres'::text AS first_name, 1 AS default_1, 'Freund'::text AS last_name, default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE ((worker_hash(store_id) >= '-2147483648'::integer) AND (worker_hash(store_id) <= '-1'::integer)) DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, 'Andres'::text AS first_name, 1 AS default_1, 'Freund'::text AS last_name, default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE ((worker_hash(store_id) >= 0) AND (worker_hash(store_id) <= 2147483647)) DEBUG: Plan is router executable -- use constants instead of non-default column and skip both defauls INSERT INTO table_with_defaults (last_name, store_id, first_name) SELECT 'Freund', store_id, 'Andres' FROM table_with_defaults; DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, 'Andres'::text AS first_name, 1 AS default_1, 'Freund'::text AS last_name, '2'::text AS default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE ((worker_hash(store_id) >= '-2147483648'::integer) AND (worker_hash(store_id) <= '-1'::integer)) DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, 'Andres'::text AS first_name, 1 AS default_1, 'Freund'::text AS last_name, '2'::text AS default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE ((worker_hash(store_id) >= 0) AND (worker_hash(store_id) <= 2147483647)) DEBUG: Plan is router executable -- use constants instead of default columns INSERT INTO table_with_defaults (default_2, last_name, store_id, first_name, default_1) SELECT 20, last_name, store_id, first_name, 10 FROM table_with_defaults; DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, first_name, 10, last_name, 20 FROM public.table_with_defaults_13300017 table_with_defaults WHERE ((worker_hash(store_id) >= '-2147483648'::integer) AND (worker_hash(store_id) <= '-1'::integer)) DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, first_name, 10, last_name, 20 FROM public.table_with_defaults_13300018 table_with_defaults WHERE ((worker_hash(store_id) >= 0) AND (worker_hash(store_id) <= 2147483647)) DEBUG: Plan is router executable -- use constants instead of both default columns and non-default columns INSERT INTO table_with_defaults (default_2, last_name, store_id, first_name, default_1) SELECT 20, 'Freund', store_id, 'Andres', 10 FROM table_with_defaults; DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, 'Andres'::text AS first_name, 10, 'Freund'::text AS last_name, 20 FROM public.table_with_defaults_13300017 table_with_defaults WHERE ((worker_hash(store_id) >= '-2147483648'::integer) AND (worker_hash(store_id) <= '-1'::integer)) DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, last_name, default_2) SELECT store_id, 'Andres'::text AS first_name, 10, 'Freund'::text AS last_name, 20 FROM public.table_with_defaults_13300018 table_with_defaults WHERE ((worker_hash(store_id) >= 0) AND (worker_hash(store_id) <= 2147483647)) DEBUG: Plan is router executable -- some of the the ultimate queries where we have constants, -- defaults and group by entry is not on the target entry INSERT INTO table_with_defaults (default_2, store_id, first_name) SELECT '2000', store_id, 'Andres' FROM table_with_defaults GROUP BY last_name, store_id; DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, 'Andres'::text AS first_name, 1 AS default_1, '2000'::text AS default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE ((worker_hash(store_id) >= '-2147483648'::integer) AND (worker_hash(store_id) <= '-1'::integer)) GROUP BY last_name, store_id DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, 'Andres'::text AS first_name, 1 AS default_1, '2000'::text AS default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE ((worker_hash(store_id) >= 0) AND (worker_hash(store_id) <= 2147483647)) GROUP BY last_name, store_id DEBUG: Plan is router executable INSERT INTO table_with_defaults (default_1, store_id, first_name, default_2) SELECT 1000, store_id, 'Andres', '2000' FROM table_with_defaults GROUP BY last_name, store_id, first_name; DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, 'Andres'::text AS first_name, 1000, '2000'::text AS default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE ((worker_hash(store_id) >= '-2147483648'::integer) AND (worker_hash(store_id) <= '-1'::integer)) GROUP BY last_name, store_id, first_name DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, 'Andres'::text AS first_name, 1000, '2000'::text AS default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE ((worker_hash(store_id) >= 0) AND (worker_hash(store_id) <= 2147483647)) GROUP BY last_name, store_id, first_name DEBUG: Plan is router executable INSERT INTO table_with_defaults (default_1, store_id, first_name, default_2) SELECT 1000, store_id, 'Andres', '2000' FROM table_with_defaults GROUP BY last_name, store_id, first_name, default_2; DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, 'Andres'::text AS first_name, 1000, '2000'::text AS default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE ((worker_hash(store_id) >= '-2147483648'::integer) AND (worker_hash(store_id) <= '-1'::integer)) GROUP BY last_name, store_id, first_name, default_2 DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, 'Andres'::text AS first_name, 1000, '2000'::text AS default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE ((worker_hash(store_id) >= 0) AND (worker_hash(store_id) <= 2147483647)) GROUP BY last_name, store_id, first_name, default_2 DEBUG: Plan is router executable INSERT INTO table_with_defaults (default_1, store_id, first_name) SELECT 1000, store_id, 'Andres' FROM table_with_defaults GROUP BY last_name, store_id, first_name, default_2; DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300017 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, 'Andres'::text AS first_name, 1000, '2'::text AS default_2 FROM public.table_with_defaults_13300017 table_with_defaults WHERE ((worker_hash(store_id) >= '-2147483648'::integer) AND (worker_hash(store_id) <= '-1'::integer)) GROUP BY last_name, store_id, first_name, default_2 DEBUG: distributed statement: INSERT INTO public.table_with_defaults_13300018 AS citus_table_alias (store_id, first_name, default_1, default_2) SELECT store_id, 'Andres'::text AS first_name, 1000, '2'::text AS default_2 FROM public.table_with_defaults_13300018 table_with_defaults WHERE ((worker_hash(store_id) >= 0) AND (worker_hash(store_id) <= 2147483647)) GROUP BY last_name, store_id, first_name, default_2 DEBUG: Plan is router executable RESET client_min_messages; -- Stable function in default should be allowed ALTER TABLE table_with_defaults ADD COLUMN t timestamptz DEFAULT now(); INSERT INTO table_with_defaults (store_id, first_name, last_name) SELECT store_id, 'first '||store_id, 'last '||store_id FROM table_with_defaults GROUP BY store_id, first_name, last_name; -- Volatile function in default should be disallowed CREATE TABLE table_with_serial ( store_id int, s bigserial ); SELECT create_distributed_table('table_with_serial', 'store_id'); create_distributed_table -------------------------- (1 row) INSERT INTO table_with_serial (store_id) SELECT store_id FROM table_with_defaults GROUP BY store_id; ERROR: INSERT ... SELECT cannot generate sequence values when selecting from a distributed table -- do some more error/error message checks SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE text_table (part_col text, val int); CREATE TABLE char_table (part_col char[], val int); create table table_with_starts_with_defaults (a int DEFAULT 5, b int, c int); SELECT create_distributed_table('text_table', 'part_col'); create_distributed_table -------------------------- (1 row) SELECT create_distributed_table('char_table','part_col'); create_distributed_table -------------------------- (1 row) SELECT create_distributed_table('table_with_starts_with_defaults', 'c'); create_distributed_table -------------------------- (1 row) SET client_min_messages TO DEBUG; INSERT INTO text_table (part_col) SELECT CASE WHEN part_col = 'onder' THEN 'marco' END FROM text_table ; DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match DETAIL: Subquery contains a case expression in the same position as the target table's partition column. HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator INSERT INTO text_table (part_col) SELECT COALESCE(part_col, 'onder') FROM text_table; DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match DETAIL: Subquery contains a coalesce expression in the same position as the target table's partition column. HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator INSERT INTO text_table (part_col) SELECT GREATEST(part_col, 'jason') FROM text_table; DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match DETAIL: Subquery contains a min/max expression in the same position as the target table's partition column. HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator INSERT INTO text_table (part_col) SELECT LEAST(part_col, 'andres') FROM text_table; DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match DETAIL: Subquery contains a min/max expression in the same position as the target table's partition column. HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator INSERT INTO text_table (part_col) SELECT NULLIF(part_col, 'metin') FROM text_table; DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match DETAIL: Subquery contains an expression that is not a simple column reference in the same position as the target table's partition column. HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator INSERT INTO text_table (part_col) SELECT part_col isnull FROM text_table; DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match DETAIL: Subquery contains an expression that is not a simple column reference in the same position as the target table's partition column. HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator INSERT INTO text_table (part_col) SELECT part_col::text from char_table; DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match DETAIL: Subquery contains an explicit coercion in the same position as the target table's partition column. HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator INSERT INTO text_table (part_col) SELECT (part_col = 'burak') is true FROM text_table; DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match DETAIL: Subquery contains an expression that is not a simple column reference in the same position as the target table's partition column. HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator INSERT INTO text_table (part_col) SELECT val FROM text_table; DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match DETAIL: The data type of the target table's partition column should exactly match the data type of the corresponding simple column reference in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator INSERT INTO text_table (part_col) SELECT val::text FROM text_table; DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match DETAIL: Subquery contains an explicit coercion in the same position as the target table's partition column. HINT: Ensure the target table's partition column has a corresponding simple column reference to a distributed table's partition column in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator RESET client_min_messages; insert into table_with_starts_with_defaults (b,c) select b,c FROM table_with_starts_with_defaults; -- Test on partition column without native hash function CREATE TABLE raw_table ( id BIGINT, time DATE ); CREATE TABLE summary_table ( time DATE, count BIGINT ); SELECT create_distributed_table('raw_table', 'time'); create_distributed_table -------------------------- (1 row) SELECT create_distributed_table('summary_table', 'time'); create_distributed_table -------------------------- (1 row) INSERT INTO raw_table VALUES(1, '11-11-1980'); INSERT INTO summary_table SELECT time, COUNT(*) FROM raw_table GROUP BY time; SELECT * FROM summary_table; time | count ------------+------- 11-11-1980 | 1 (1 row) -- Test INSERT ... SELECT via coordinator -- Select from constants TRUNCATE raw_events_first; INSERT INTO raw_events_first (user_id, value_1) SELECT * FROM (VALUES (1,2), (3,4), (5,6)) AS v(int,int); SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id; user_id | value_1 ---------+--------- 1 | 2 3 | 4 5 | 6 (3 rows) -- Select from local functions TRUNCATE raw_events_first; CREATE SEQUENCE insert_select_test_seq; SET client_min_messages TO DEBUG; INSERT INTO raw_events_first (user_id, value_1, value_2) SELECT s, nextval('insert_select_test_seq'), (random()*10)::int FROM generate_series(1, 5) s; DEBUG: distributed INSERT ... SELECT can only select from distributed tables DEBUG: Collecting INSERT ... SELECT results on coordinator SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; user_id | value_1 ---------+--------- 1 | 1 2 | 2 3 | 3 4 | 4 5 | 5 (5 rows) -- ON CONFLICT is unsupported INSERT INTO raw_events_first (user_id, value_1) SELECT s, nextval('insert_select_test_seq') FROM generate_series(1, 5) s ON CONFLICT DO NOTHING; DEBUG: distributed INSERT ... SELECT can only select from distributed tables ERROR: ON CONFLICT is not supported in INSERT ... SELECT via coordinator -- RETURNING is unsupported INSERT INTO raw_events_first (user_id, value_1) SELECT s, nextval('insert_select_test_seq') FROM generate_series(1, 5) s RETURNING *; DEBUG: distributed INSERT ... SELECT can only select from distributed tables ERROR: RETURNING is not supported in INSERT ... SELECT via coordinator RESET client_min_messages; -- INSERT ... SELECT and multi-shard SELECT in the same transaction is unsupported TRUNCATE raw_events_first; BEGIN; INSERT INTO raw_events_first (user_id, value_1) SELECT s, s FROM generate_series(1, 5) s; SELECT user_id, value_1 FROM raw_events_first; ERROR: cannot open new connections after the first modification command within a transaction ROLLBACK; -- INSERT ... SELECT and single-shard SELECT in the same transaction is supported TRUNCATE raw_events_first; BEGIN; INSERT INTO raw_events_first (user_id, value_1) SELECT s, s FROM generate_series(1, 5) s; SELECT user_id, value_1 FROM raw_events_first WHERE user_id = 1; user_id | value_1 ---------+--------- 1 | 1 (1 row) COMMIT; -- Select from local table TRUNCATE raw_events_first; CREATE TEMPORARY TABLE raw_events_first_local AS SELECT s AS u, 2*s AS v FROM generate_series(1, 5) s; INSERT INTO raw_events_first (user_id, value_1) SELECT u, v FROM raw_events_first_local; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; user_id | value_1 ---------+--------- 1 | 2 2 | 4 3 | 6 4 | 8 5 | 10 (5 rows) -- Use columns in opposite order TRUNCATE raw_events_first; INSERT INTO raw_events_first (value_1, user_id) SELECT u, v FROM raw_events_first_local; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; user_id | value_1 ---------+--------- 2 | 1 4 | 2 6 | 3 8 | 4 10 | 5 (5 rows) -- Set operations can work with opposite column order TRUNCATE raw_events_first; INSERT INTO raw_events_first (value_3, user_id) ( SELECT v, u::bigint FROM raw_events_first_local ) UNION ALL ( SELECT v, u FROM raw_events_first_local ); SELECT user_id, value_3 FROM raw_events_first ORDER BY user_id, value_3; user_id | value_3 ---------+--------- 1 | 2 1 | 2 2 | 4 2 | 4 3 | 6 3 | 6 4 | 8 4 | 8 5 | 10 5 | 10 (10 rows) -- Select from other distributed table with limit TRUNCATE raw_events_first; TRUNCATE raw_events_second; INSERT INTO raw_events_second (user_id, value_4) SELECT s, 3*s FROM generate_series (1,5) s; INSERT INTO raw_events_first (user_id, value_1) SELECT user_id, value_4 FROM raw_events_second LIMIT 5; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; user_id | value_1 ---------+--------- 1 | 3 2 | 6 3 | 9 4 | 12 5 | 15 (5 rows) -- CTEs are supported in local queries TRUNCATE raw_events_first; WITH removed_rows AS ( DELETE FROM raw_events_first_local RETURNING u ) INSERT INTO raw_events_first (user_id, value_1) WITH value AS (SELECT 1) SELECT * FROM removed_rows, value; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; user_id | value_1 ---------+--------- 1 | 1 2 | 1 3 | 1 4 | 1 5 | 1 (5 rows) -- nested CTEs are also supported TRUNCATE raw_events_first; INSERT INTO raw_events_first_local SELECT s, 2*s FROM generate_series(0, 10) s; WITH rows_to_remove AS ( SELECT u FROM raw_events_first_local WHERE u > 0 ), removed_rows AS ( DELETE FROM raw_events_first_local WHERE u IN (SELECT * FROM rows_to_remove) RETURNING u, v ) INSERT INTO raw_events_first (user_id, value_1) WITH ultra_rows AS ( WITH numbers AS ( SELECT s FROM generate_series(1,10) s ), super_rows AS ( SELECT u, v FROM removed_rows JOIN numbers ON (u = s) ) SELECT * FROM super_rows LIMIT 5 ) SELECT u, v FROM ultra_rows; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; user_id | value_1 ---------+--------- 1 | 2 2 | 4 3 | 6 4 | 8 5 | 10 (5 rows) -- CTEs with duplicate names are also supported TRUNCATE raw_events_first; WITH super_rows AS ( SELECT u FROM raw_events_first_local ) INSERT INTO raw_events_first (user_id, value_1) WITH super_rows AS ( SELECT * FROM super_rows GROUP BY u ) SELECT u, 5 FROM super_rows; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; user_id | value_1 ---------+--------- 0 | 5 (1 row) -- CTEs are supported in router queries TRUNCATE raw_events_first; WITH user_two AS ( SELECT user_id, value_4 FROM raw_events_second WHERE user_id = 2 ) INSERT INTO raw_events_first (user_id, value_1) SELECT * FROM user_two; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; user_id | value_1 ---------+--------- 2 | 6 (1 row) -- CTEs are supported when there are name collisions WITH numbers AS ( SELECT s FROM generate_series(1,10) s ) INSERT INTO raw_events_first(user_id, value_1) WITH numbers AS ( SELECT s, s FROM generate_series(1,5) s ) SELECT * FROM numbers; -- Select into distributed table with a sequence CREATE TABLE "CaseSensitiveTable" ("UserID" int, "Value1" int); SELECT create_distributed_table('"CaseSensitiveTable"', 'UserID'); create_distributed_table -------------------------- (1 row) INSERT INTO "CaseSensitiveTable" SELECT s, s FROM generate_series(1,10) s; SELECT * FROM "CaseSensitiveTable" ORDER BY "UserID"; UserID | Value1 --------+-------- 1 | 1 2 | 2 3 | 3 4 | 4 5 | 5 6 | 6 7 | 7 8 | 8 9 | 9 10 | 10 (10 rows) DROP TABLE "CaseSensitiveTable"; -- Select into distributed table with a sequence CREATE TABLE dist_table_with_sequence (user_id serial, value_1 serial); SELECT create_distributed_table('dist_table_with_sequence', 'user_id'); create_distributed_table -------------------------- (1 row) -- from local query INSERT INTO dist_table_with_sequence (value_1) SELECT s FROM generate_series(1,5) s; SELECT * FROM dist_table_with_sequence ORDER BY user_id; user_id | value_1 ---------+--------- 1 | 1 2 | 2 3 | 3 4 | 4 5 | 5 (5 rows) -- from a distributed query INSERT INTO dist_table_with_sequence (value_1) SELECT value_1 FROM dist_table_with_sequence; ERROR: INSERT ... SELECT cannot generate sequence values when selecting from a distributed table SELECT * FROM dist_table_with_sequence ORDER BY user_id; user_id | value_1 ---------+--------- 1 | 1 2 | 2 3 | 3 4 | 4 5 | 5 (5 rows) -- Select from distributed table into reference table CREATE TABLE ref_table (user_id int, value_1 int); SELECT create_reference_table('ref_table'); create_reference_table ------------------------ (1 row) INSERT INTO ref_table SELECT user_id, value_1 FROM raw_events_second; SELECT * FROM ref_table ORDER BY user_id, value_1; user_id | value_1 ---------+--------- 1 | 2 | 3 | 4 | 5 | (5 rows) DROP TABLE ref_table; -- Select into an append-partitioned table is not supported CREATE TABLE insert_append_table (user_id int, value_4 bigint); SELECT create_distributed_table('insert_append_table', 'user_id', 'append'); create_distributed_table -------------------------- (1 row) INSERT INTO insert_append_table (user_id, value_4) SELECT user_id, 1 FROM raw_events_second LIMIT 5; ERROR: INSERT ... SELECT into an append-distributed table is not supported DROP TABLE insert_append_table; -- Insert from other distributed table as prepared statement TRUNCATE raw_events_first; PREPARE insert_prep(int) AS INSERT INTO raw_events_first (user_id, value_1) SELECT $1, value_4 FROM raw_events_second ORDER BY value_4 LIMIT 1; EXECUTE insert_prep(1); EXECUTE insert_prep(2); EXECUTE insert_prep(3); EXECUTE insert_prep(4); EXECUTE insert_prep(5); EXECUTE insert_prep(6); SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; user_id | value_1 ---------+--------- 1 | 3 2 | 3 3 | 3 4 | 3 5 | 3 6 | 3 (6 rows) -- Inserting into views is handled via coordinator TRUNCATE raw_events_first; INSERT INTO test_view SELECT * FROM raw_events_second; SELECT user_id, value_4 FROM test_view ORDER BY user_id, value_4; user_id | value_4 ---------+--------- 1 | 3 2 | 6 3 | 9 4 | 12 5 | 15 (5 rows) -- Drop the view now, because the column we are about to drop depends on it DROP VIEW test_view; -- Make sure we handle dropped columns correctly CREATE TABLE drop_col_table (col1 text, col2 text, col3 text); SELECT create_distributed_table('drop_col_table', 'col2'); create_distributed_table -------------------------- (1 row) ALTER TABLE drop_col_table DROP COLUMN col1; INSERT INTO drop_col_table (col3, col2) SELECT value_4, user_id FROM raw_events_second LIMIT 5; SELECT * FROM drop_col_table ORDER BY col2, col3; col2 | col3 ------+------ 1 | 3 2 | 6 3 | 9 4 | 12 5 | 15 (5 rows) -- make sure the tuple went to the right shard SELECT * FROM drop_col_table WHERE col2 = '1'; col2 | col3 ------+------ 1 | 3 (1 row) RESET client_min_messages; DROP TABLE drop_col_table; DROP TABLE raw_table; DROP TABLE summary_table; DROP TABLE raw_events_first CASCADE; DROP TABLE raw_events_second; DROP TABLE reference_table; DROP TABLE agg_events; DROP TABLE table_with_defaults; DROP TABLE table_with_serial; DROP TABLE text_table; DROP TABLE char_table; DROP TABLE table_with_starts_with_defaults; citus-7.0.3/src/test/regress/expected/multi_insert_select_non_pushable_queries.out000066400000000000000000000631731317107136600310100ustar00rootroot00000000000000------------------------------------ ------------------------------------ -- Vanilla funnel query ------------------------------------ ------------------------------------ -- not pushable since the JOIN is not an equi join INSERT INTO agg_results_third (user_id, value_1_agg) SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id != e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------ ------------------------------------ -- Funnel grouped by whether or not a user has done an event ------------------------------------ ------------------------------------ -- not pushable since the JOIN is not an equi join left part of the UNION -- is not equi join INSERT INTO agg_results_third (user_id, value_1_agg, value_2_agg ) SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event) FROM ( SELECT t1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(hasdone_event, 'Has not done event') AS hasdone_event FROM ( ( SELECT u.user_id, 'step=>1'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id != e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) UNION ( SELECT u.user_id, 'step=>2'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (103, 104, 105) ) ) t1 LEFT JOIN ( SELECT DISTINCT user_id, 'Has done event'::TEXT AS hasdone_event FROM events_table AS e WHERE e.user_id >= 10 AND e.user_id <= 25 AND e.event_type IN (106, 107, 108) ) t2 ON (t1.user_id = t2.user_id) GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- not pushable since the JOIN is not an equi join right part of the UNION -- is not joined on the partition key INSERT INTO agg_results_third (user_id, value_1_agg, value_2_agg ) SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event) FROM ( SELECT t1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(hasdone_event, 'Has not done event') AS hasdone_event FROM ( ( SELECT u.user_id, 'step=>1'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) UNION ( SELECT u.user_id, 'step=>2'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.event_type AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (103, 104, 105) ) ) t1 LEFT JOIN ( SELECT DISTINCT user_id, 'Has done event'::TEXT AS hasdone_event FROM events_table AS e WHERE e.user_id >= 10 AND e.user_id <= 25 AND e.event_type IN (106, 107, 108) ) t2 ON (t1.user_id = t2.user_id) GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- the LEFT JOIN conditon is not on the partition column (i.e., is it part_key divided by 2) INSERT INTO agg_results_third (user_id, value_1_agg, value_2_agg ) SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event) FROM ( SELECT t1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(hasdone_event, 'Has not done event') AS hasdone_event FROM ( ( SELECT u.user_id, 'step=>1'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) UNION ( SELECT u.user_id, 'step=>2'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (103, 104, 105) ) ) t1 LEFT JOIN ( SELECT DISTINCT user_id, 'Has done event'::TEXT AS hasdone_event FROM events_table AS e WHERE e.user_id >= 10 AND e.user_id <= 25 AND e.event_type IN (106, 107, 108) ) t2 ON (t1.user_id = (t2.user_id)/2) GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. ------------------------------------ ------------------------------------ -- Funnel, grouped by the number of times a user has done an event ------------------------------------ ------------------------------------ -- not pushable since the right of the UNION query is not joined on -- the partition key INSERT INTO agg_results_third (user_id, value_1_agg, value_2_agg) SELECT user_id, avg(array_length(events_table, 1)) AS event_average, count_pay FROM ( SELECT subquery_1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(count_pay, 0) AS count_pay FROM ( (SELECT users_table.user_id, 'action=>1'AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 10 AND events_table.event_type < 12 ) UNION (SELECT users_table.user_id, 'action=>2'AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id != events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 12 AND events_table.event_type < 14 ) ) AS subquery_1 LEFT JOIN (SELECT user_id, COUNT(*) AS count_pay FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 15 AND users_table.value_1 < 17 GROUP BY user_id HAVING COUNT(*) > 1) AS subquery_2 ON subquery_1.user_id = subquery_2.user_id GROUP BY subquery_1.user_id, count_pay) AS subquery_top WHERE array_ndims(events_table) > 0 GROUP BY count_pay, user_id ORDER BY count_pay; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- not pushable since the JOIN condition is not equi JOIN -- (subquery_1 JOIN subquery_2) INSERT INTO agg_results_third (user_id, value_1_agg, value_2_agg) SELECT user_id, avg(array_length(events_table, 1)) AS event_average, count_pay FROM ( SELECT subquery_1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(count_pay, 0) AS count_pay FROM ( (SELECT users_table.user_id, 'action=>1'AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 10 AND events_table.event_type < 12 ) UNION (SELECT users_table.user_id, 'action=>2'AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 12 AND events_table.event_type < 14 ) ) AS subquery_1 LEFT JOIN (SELECT user_id, COUNT(*) AS count_pay FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 15 AND users_table.value_1 < 17 GROUP BY user_id HAVING COUNT(*) > 1) AS subquery_2 ON subquery_1.user_id > subquery_2.user_id GROUP BY subquery_1.user_id, count_pay) AS subquery_top WHERE array_ndims(events_table) > 0 GROUP BY count_pay, user_id ORDER BY count_pay; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. ------------------------------------ ------------------------------------ -- Most recently seen users_table events_table ------------------------------------ -- Note that we don't use ORDER BY/LIMIT yet ------------------------------------ ------------------------------------ -- not pushable since lateral join is not an equi join INSERT INTO agg_results_third (user_id, agg_time, value_2_agg) SELECT user_id, user_lastseen, array_length(event_array, 1) FROM ( SELECT user_id, max(u.time) as user_lastseen, array_agg(event_type ORDER BY u.time) AS event_array FROM ( SELECT user_id, time FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 10 AND users_table.value_1 < 12 ) u LEFT JOIN LATERAL ( SELECT event_type, time FROM events_table WHERE user_id != u.user_id AND events_table.event_type > 10 AND events_table.event_type < 12 ) t ON true GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- not pushable since lateral join is not on the partition key INSERT INTO agg_results_third (user_id, agg_time, value_2_agg) SELECT user_id, user_lastseen, array_length(event_array, 1) FROM ( SELECT user_id, max(u.time) as user_lastseen, array_agg(event_type ORDER BY u.time) AS event_array FROM ( SELECT user_id, time FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 10 AND users_table.value_1 < 12 ) u LEFT JOIN LATERAL ( SELECT event_type, time FROM events_table WHERE event_type = u.user_id AND events_table.event_type > 10 AND events_table.event_type < 12 ) t ON true GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- not pushable since lateral join is not on the partition key INSERT INTO agg_results_third (user_id, agg_time, value_2_agg) SELECT user_id, user_lastseen, array_length(event_array, 1) FROM ( SELECT user_id, max(u.time) as user_lastseen, array_agg(event_type ORDER BY u.time) AS event_array FROM ( SELECT user_id, time, value_3 as val_3 FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 10 AND users_table.value_1 < 12 ) u LEFT JOIN LATERAL ( SELECT event_type, time FROM events_table WHERE event_type = u.val_3 AND events_table.event_type > 10 AND events_table.event_type < 12 ) t ON true GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------ ------------------------------------ -- Count the number of distinct users_table who are in segment X and Y and Z ------------------------------------ ------------------------------------ -- not pushable since partition key is NOT IN INSERT INTO agg_results_third (user_id) SELECT DISTINCT user_id FROM users_table WHERE user_id NOT IN (SELECT user_id FROM users_table WHERE value_1 >= 10 AND value_1 <= 20) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 30 AND value_1 <= 40) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 50 AND value_1 <= 60); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- not pushable since partition key is not selected from the second subquery INSERT INTO agg_results_third (user_id) SELECT DISTINCT user_id FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 10 AND value_1 <= 20) AND user_id IN (SELECT value_1 FROM users_table WHERE value_1 >= 30 AND value_1 <= 40) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 50 AND value_1 <= 60); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- not pushable since second subquery does not return bare partition key INSERT INTO agg_results_third (user_id) SELECT DISTINCT user_id FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 10 AND value_1 <= 20) AND user_id IN (SELECT 3 * user_id FROM users_table WHERE value_1 >= 30 AND value_1 <= 40) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 50 AND value_1 <= 60); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------ ------------------------------------ -- Find customers who have done X, and satisfy other customer specific criteria ------------------------------------ ------------------------------------ -- not pushable since join is not an euqi join INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 101 AND value_1 < 110 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type>101 AND event_type < 110 AND value_3 > 100 AND user_id!=users_table.user_id); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- not pushable since the join is not on the partition key INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 101 AND value_1 < 110 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type>101 AND event_type < 110 AND value_3 > 100 AND event_type = users_table.user_id); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------ ------------------------------------ -- Customers who haven’t done X, and satisfy other customer specific criteria ------------------------------------ ------------------------------------ -- not pushable since the join is not an equi join INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 = 101 AND value_2 >= 5 AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type=101 AND value_3 > 100 AND user_id!=users_table.user_id); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- not pushable since the join is not the partition key INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 = 101 AND value_2 >= 5 AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type=101 AND value_3 > 100 AND event_type=users_table.user_id); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------ ------------------------------------ -- Customers who have done X and Y, and satisfy other customer specific criteria ------------------------------------ ------------------------------------ -- not pushable since the second join is not on the partition key INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 100 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type!=100 AND value_3 > 100 AND user_id=users_table.user_id) AND EXISTS (SELECT user_id FROM events_table WHERE event_type=101 AND value_3 > 100 AND user_id!=users_table.user_id); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------ ------------------------------------ -- Customers who have done X and haven’t done Y, and satisfy other customer specific criteria ------------------------------------ ------------------------------------ -- not pushable since the first join is not on the partition key INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type <= 300 AND value_3 > 100 AND user_id!=users_table.user_id) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 300 AND event_type <= 350 AND value_3 > 100 AND user_id=users_table.user_id); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------ ------------------------------------ -- Customers who have done X more than 2 times, and satisfy other customer specific criteria ------------------------------------ ------------------------------------ -- not pushable since the second join is not an equi join INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 100 AND value_1 < 124 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type < 124 AND value_3 > 100 AND user_id != users_table.user_id GROUP BY user_id HAVING Count(*) > 2); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- not pushable since the second join is not on the partition key INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 100 AND value_1 < 124 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type < 124 AND value_3 > 100 AND event_type = users_table.user_id GROUP BY user_id HAVING Count(*) > 2); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- not pushable since the second join is not on the partition key INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 100 AND value_1 < 124 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type < 124 AND value_3 > 100 AND user_id = users_table.value_1 GROUP BY user_id HAVING Count(*) > 2); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------ ------------------------------------ -- Find me all users_table who has done some event and has filters ------------------------------------ ------------------------------------ -- not pushable due to NOT IN INSERT INTO agg_results_third(user_id) Select user_id From events_table Where event_type = 16 And value_2 > 50 And user_id NOT in (select user_id From users_table Where value_1 = 15 And value_2 > 25); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- not pushable since we're not selecting the partition key INSERT INTO agg_results_third(user_id) Select user_id From events_table Where event_type = 16 And value_2 > 50 And user_id in (select value_3 From users_table Where value_1 = 15 And value_2 > 25); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- not pushable since we're not selecting the partition key -- from the events table INSERT INTO agg_results_third(user_id) Select user_id From events_table Where event_type = 16 And value_2 > 50 And event_type in (select user_id From users_table Where value_1 = 15 And value_2 > 25); ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------ ------------------------------------ -- Which events_table did people who has done some specific events_table ------------------------------------ ------------------------------------ -- not pushable due to NOT IN INSERT INTO agg_results_third(user_id, value_1_agg) SELECT user_id, event_type FROM events_table WHERE user_id NOT IN (SELECT user_id from events_table WHERE event_type > 500 and event_type < 505) GROUP BY user_id, event_type; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- not pushable due to not selecting the partition key INSERT INTO agg_results_third(user_id, value_1_agg) SELECT user_id, event_type FROM events_table WHERE user_id IN (SELECT value_2 from events_table WHERE event_type > 500 and event_type < 505) GROUP BY user_id, event_type; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- not pushable due to not comparing user id from the events table INSERT INTO agg_results_third(user_id, value_1_agg) SELECT user_id, event_type FROM events_table WHERE event_type IN (SELECT user_id from events_table WHERE event_type > 500 and event_type < 505) GROUP BY user_id, event_type; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. ------------------------------------ ------------------------------------ -- Find my assets that have the highest probability and fetch their metadata ------------------------------------ ------------------------------------ -- not pushable since the join is not an equi join INSERT INTO agg_results_third(user_id, value_1_agg, value_3_agg) SELECT users_table.user_id, users_table.value_1, prob FROM users_table JOIN (SELECT ma.user_id, (GREATEST(coalesce(ma.value_4 / 250, 0.0) + GREATEST(1.0))) / 2 AS prob FROM users_table AS ma, events_table as short_list WHERE short_list.user_id != ma.user_id and ma.value_1 < 50 and short_list.event_type < 50 ) temp ON users_table.user_id = temp.user_id WHERE users_table.value_1 < 50; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- not pushable since the join is not on the partition key INSERT INTO agg_results_third(user_id, value_1_agg, value_3_agg) SELECT users_table.user_id, users_table.value_1, prob FROM users_table JOIN (SELECT ma.user_id, (GREATEST(coalesce(ma.value_4 / 250, 0.0) + GREATEST(1.0))) / 2 AS prob FROM users_table AS ma, events_table as short_list WHERE short_list.user_id = ma.value_2 and ma.value_1 < 50 and short_list.event_type < 50 ) temp ON users_table.user_id = temp.user_id WHERE users_table.value_1 < 50; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- not supported since one of the queries doesn't have a relation INSERT INTO agg_results (user_id, agg_time, value_2_agg) SELECT user_id, user_lastseen, array_length(event_array, 1) FROM ( SELECT user_id, max(u.time) as user_lastseen, array_agg(event_type ORDER BY u.time) AS event_array FROM ( SELECT user_id, time, value_3 as val_3 FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 10 AND users_table.value_1 < 12 ) u LEFT JOIN LATERAL ( SELECT event_type, time FROM events_table, (SELECT 1 as x) as f WHERE user_id = u.user_id AND events_table.event_type > 10 AND events_table.event_type < 12 ) t ON true GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC; ERROR: cannot push down this subquery DETAIL: Subqueries without relations are unsupported citus-7.0.3/src/test/regress/expected/multi_join_order_additional.out000066400000000000000000000272771317107136600262020ustar00rootroot00000000000000-- -- MULTI_JOIN_ORDER_ADDITIONAL -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 650000; -- Set configuration to print table join order and pruned shards SET citus.explain_distributed_queries TO off; SET citus.log_multi_join_order TO TRUE; SET citus.task_executor_type = 'task-tracker'; -- can't explain all queries otherwise SET client_min_messages TO DEBUG2; -- Create new table definitions for use in testing in distributed planning and -- execution functionality. Also create indexes to boost performance. CREATE TABLE lineitem_hash ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "lineitem_hash_pkey" for table "lineitem_hash" DEBUG: building index "lineitem_hash_pkey" on table "lineitem_hash" SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('lineitem_hash', 2, 1); master_create_worker_shards ----------------------------- (1 row) CREATE INDEX lineitem_hash_time_index ON lineitem_hash (l_shipdate); DEBUG: building index "lineitem_hash_time_index" on table "lineitem_hash" NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' CREATE TABLE orders_hash ( o_orderkey bigint not null, o_custkey integer not null, o_orderstatus char(1) not null, o_totalprice decimal(15,2) not null, o_orderdate date not null, o_orderpriority char(15) not null, o_clerk char(15) not null, o_shippriority integer not null, o_comment varchar(79) not null, PRIMARY KEY(o_orderkey) ); DEBUG: CREATE TABLE / PRIMARY KEY will create implicit index "orders_hash_pkey" for table "orders_hash" DEBUG: building index "orders_hash_pkey" on table "orders_hash" SELECT master_create_distributed_table('orders_hash', 'o_orderkey', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('orders_hash', 2, 1); master_create_worker_shards ----------------------------- (1 row) CREATE TABLE customer_hash ( c_custkey integer not null, c_name varchar(25) not null, c_address varchar(40) not null, c_nationkey integer not null, c_phone char(15) not null, c_acctbal decimal(15,2) not null, c_mktsegment char(10) not null, c_comment varchar(117) not null); SELECT master_create_distributed_table('customer_hash', 'c_custkey', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('customer_hash', 2, 1); master_create_worker_shards ----------------------------- (1 row) -- The following query checks that we can correctly handle self-joins EXPLAIN SELECT l1.l_quantity FROM lineitem l1, lineitem l2 WHERE l1.l_orderkey = l2.l_orderkey AND l1.l_quantity > 5; LOG: join order: [ "lineitem" ][ local partition join "lineitem" ] DEBUG: join prunable for intervals [1,1509] and [2951,4455] DEBUG: join prunable for intervals [1,1509] and [4480,5986] DEBUG: join prunable for intervals [1,1509] and [8997,10560] DEBUG: join prunable for intervals [1,1509] and [10560,12036] DEBUG: join prunable for intervals [1,1509] and [12036,13473] DEBUG: join prunable for intervals [1,1509] and [13473,14947] DEBUG: join prunable for intervals [1509,4964] and [8997,10560] DEBUG: join prunable for intervals [1509,4964] and [10560,12036] DEBUG: join prunable for intervals [1509,4964] and [12036,13473] DEBUG: join prunable for intervals [1509,4964] and [13473,14947] DEBUG: join prunable for intervals [2951,4455] and [1,1509] DEBUG: join prunable for intervals [2951,4455] and [4480,5986] DEBUG: join prunable for intervals [2951,4455] and [8997,10560] DEBUG: join prunable for intervals [2951,4455] and [10560,12036] DEBUG: join prunable for intervals [2951,4455] and [12036,13473] DEBUG: join prunable for intervals [2951,4455] and [13473,14947] DEBUG: join prunable for intervals [4480,5986] and [1,1509] DEBUG: join prunable for intervals [4480,5986] and [2951,4455] DEBUG: join prunable for intervals [4480,5986] and [8997,10560] DEBUG: join prunable for intervals [4480,5986] and [10560,12036] DEBUG: join prunable for intervals [4480,5986] and [12036,13473] DEBUG: join prunable for intervals [4480,5986] and [13473,14947] DEBUG: join prunable for intervals [8997,10560] and [1,1509] DEBUG: join prunable for intervals [8997,10560] and [1509,4964] DEBUG: join prunable for intervals [8997,10560] and [2951,4455] DEBUG: join prunable for intervals [8997,10560] and [4480,5986] DEBUG: join prunable for intervals [8997,10560] and [12036,13473] DEBUG: join prunable for intervals [8997,10560] and [13473,14947] DEBUG: join prunable for intervals [10560,12036] and [1,1509] DEBUG: join prunable for intervals [10560,12036] and [1509,4964] DEBUG: join prunable for intervals [10560,12036] and [2951,4455] DEBUG: join prunable for intervals [10560,12036] and [4480,5986] DEBUG: join prunable for intervals [10560,12036] and [13473,14947] DEBUG: join prunable for intervals [12036,13473] and [1,1509] DEBUG: join prunable for intervals [12036,13473] and [1509,4964] DEBUG: join prunable for intervals [12036,13473] and [2951,4455] DEBUG: join prunable for intervals [12036,13473] and [4480,5986] DEBUG: join prunable for intervals [12036,13473] and [8997,10560] DEBUG: join prunable for intervals [13473,14947] and [1,1509] DEBUG: join prunable for intervals [13473,14947] and [1509,4964] DEBUG: join prunable for intervals [13473,14947] and [2951,4455] DEBUG: join prunable for intervals [13473,14947] and [4480,5986] DEBUG: join prunable for intervals [13473,14947] and [8997,10560] DEBUG: join prunable for intervals [13473,14947] and [10560,12036] QUERY PLAN -------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (2 rows) -- Update configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; SET client_min_messages TO LOG; -- The following queries check that we correctly handle joins and OR clauses. In -- particular, these queries check that we factorize out OR clauses if possible, -- and that we default to a cartesian product otherwise. EXPLAIN SELECT count(*) FROM lineitem, orders WHERE (l_orderkey = o_orderkey AND l_quantity > 5) OR (l_orderkey = o_orderkey AND l_quantity < 10); LOG: join order: [ "lineitem" ][ local partition join "orders" ] QUERY PLAN -------------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) EXPLAIN SELECT l_quantity FROM lineitem, orders WHERE (l_orderkey = o_orderkey OR l_quantity > 5); LOG: join order: [ "lineitem" ][ cartesian product "orders" ] ERROR: cannot perform distributed planning on this query DETAIL: Cartesian products are currently unsupported -- The below queries modify the partition method in pg_dist_partition. We thus -- begin a transaction here so the changes don't impact any other parallel -- running tests. BEGIN; -- Validate that we take into account the partition method when building the -- join-order plan. EXPLAIN SELECT count(*) FROM orders, lineitem_hash WHERE o_orderkey = l_orderkey; LOG: join order: [ "orders" ][ single partition join "lineitem_hash" ] QUERY PLAN -------------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) -- Verify we handle local joins between two hash-partitioned tables. EXPLAIN SELECT count(*) FROM orders_hash, lineitem_hash WHERE o_orderkey = l_orderkey; LOG: join order: [ "orders_hash" ][ local partition join "lineitem_hash" ] QUERY PLAN -------------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) -- Validate that we can handle broadcast joins with hash-partitioned tables. EXPLAIN SELECT count(*) FROM customer_hash, nation WHERE c_nationkey = n_nationkey; LOG: join order: [ "customer_hash" ][ broadcast join "nation" ] QUERY PLAN -------------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) -- Update the large table shard count for all the following tests. SET citus.large_table_shard_count TO 1; -- Validate that we don't use a single-partition join method for a hash -- re-partitioned table, thus preventing a partition of just the customer table. EXPLAIN SELECT count(*) FROM orders, lineitem, customer WHERE o_custkey = l_partkey AND o_custkey = c_nationkey; LOG: join order: [ "orders" ][ dual partition join "lineitem" ][ dual partition join "customer" ] QUERY PLAN -------------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) -- Validate that we don't chose a single-partition join method with a -- hash-partitioned base table EXPLAIN SELECT count(*) FROM orders, customer_hash WHERE c_custkey = o_custkey; LOG: join order: [ "orders" ][ dual partition join "customer_hash" ] QUERY PLAN -------------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) -- Validate that we can re-partition a hash partitioned table to join with a -- range partitioned one. EXPLAIN SELECT count(*) FROM orders_hash, customer WHERE c_custkey = o_custkey; LOG: join order: [ "orders_hash" ][ single partition join "customer" ] QUERY PLAN -------------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) COMMIT; -- Reset client logging level to its previous value SET client_min_messages TO NOTICE; DROP TABLE lineitem_hash; DROP TABLE orders_hash; DROP TABLE customer_hash; citus-7.0.3/src/test/regress/expected/multi_join_order_tpch_large.out000066400000000000000000000140111317107136600261600ustar00rootroot00000000000000-- -- MULTI_JOIN_ORDER_TPCH_LARGE -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 660000; -- Enable configuration to print table join order SET citus.explain_distributed_queries TO off; SET citus.log_multi_join_order TO TRUE; SET citus.task_executor_type = 'task-tracker'; -- can't explain all queries otherwise SET client_min_messages TO LOG; -- Change configuration to treat lineitem, orders, customer, and part tables as -- large. The following queries are basically the same as the ones in tpch_small -- except that more data has been loaded into customer and part tables. Therefore, -- we will apply different distributed join strategies for these queries. SET citus.large_table_shard_count TO 2; -- Query #6 from the TPC-H decision support benchmark EXPLAIN SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem WHERE l_shipdate >= date '1994-01-01' and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; LOG: join order: [ "lineitem" ] QUERY PLAN -------------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) -- Query #3 from the TPC-H decision support benchmark EXPLAIN SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority FROM customer, orders, lineitem WHERE c_mktsegment = 'BUILDING' AND c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate < date '1995-03-15' AND l_shipdate > date '1995-03-15' GROUP BY l_orderkey, o_orderdate, o_shippriority ORDER BY revenue DESC, o_orderdate; LOG: join order: [ "orders" ][ local partition join "lineitem" ][ single partition join "customer" ] QUERY PLAN ------------------------------------------------------------------------------------------------ Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: sum((sum(remote_scan.revenue))) DESC, remote_scan.o_orderdate -> HashAggregate (cost=0.00..0.00 rows=0 width=0) Group Key: remote_scan.l_orderkey, remote_scan.o_orderdate, remote_scan.o_shippriority -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (6 rows) -- Query #10 from the TPC-H decision support benchmark EXPLAIN SELECT c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment FROM customer, orders, lineitem, nation WHERE c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate >= date '1993-10-01' AND o_orderdate < date '1993-10-01' + interval '3' month AND l_returnflag = 'R' AND c_nationkey = n_nationkey GROUP BY c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment ORDER BY revenue DESC; LOG: join order: [ "orders" ][ local partition join "lineitem" ][ single partition join "customer" ][ broadcast join "nation" ] QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: sum((sum(remote_scan.revenue))) DESC -> HashAggregate (cost=0.00..0.00 rows=0 width=0) Group Key: remote_scan.c_custkey, remote_scan.c_name, remote_scan.c_acctbal, remote_scan.c_phone, remote_scan.n_name, remote_scan.c_address, remote_scan.c_comment -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (6 rows) -- Query #19 from the TPC-H decision support benchmark (modified) EXPLAIN SELECT sum(l_extendedprice* (1 - l_discount)) as revenue FROM lineitem, part WHERE ( p_partkey = l_partkey AND (p_brand = 'Brand#12' OR p_brand= 'Brand#14' OR p_brand='Brand#15') AND l_quantity >= 10 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#23' OR p_brand='Brand#24') AND l_quantity >= 20 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#33' OR p_brand = 'Brand#34' OR p_brand = 'Brand#35') AND l_quantity >= 1 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ); LOG: join order: [ "lineitem" ][ single partition join "part" ] QUERY PLAN -------------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) -- Query to test multiple re-partition jobs in a single query EXPLAIN SELECT l_partkey, count(*) FROM lineitem, part, orders, customer WHERE l_orderkey = o_orderkey AND l_partkey = p_partkey AND c_custkey = o_custkey GROUP BY l_partkey; LOG: join order: [ "lineitem" ][ local partition join "orders" ][ single partition join "part" ][ single partition join "customer" ] QUERY PLAN -------------------------------------------------------------------------- HashAggregate (cost=0.00..0.00 rows=0 width=0) Group Key: remote_scan.l_partkey -> Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (4 rows) -- Reset client logging level to its previous value SET client_min_messages TO NOTICE; citus-7.0.3/src/test/regress/expected/multi_join_order_tpch_small.out000066400000000000000000000115161317107136600262050ustar00rootroot00000000000000-- -- MULTI_JOIN_ORDER_TPCH_SMALL -- -- Enable configuration to print table join order SET citus.explain_distributed_queries TO off; SET citus.log_multi_join_order TO TRUE; SET client_min_messages TO LOG; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #6 from the TPC-H decision support benchmark EXPLAIN SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem WHERE l_shipdate >= date '1994-01-01' and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; LOG: join order: [ "lineitem" ] QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) -- Query #3 from the TPC-H decision support benchmark EXPLAIN SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority FROM customer, orders, lineitem WHERE c_mktsegment = 'BUILDING' AND c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate < date '1995-03-15' AND l_shipdate > date '1995-03-15' GROUP BY l_orderkey, o_orderdate, o_shippriority ORDER BY revenue DESC, o_orderdate; LOG: join order: [ "orders" ][ broadcast join "customer" ][ local partition join "lineitem" ] QUERY PLAN ------------------------------------------------------------------------------------------------ Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: sum((sum(remote_scan.revenue))) DESC, remote_scan.o_orderdate -> HashAggregate (cost=0.00..0.00 rows=0 width=0) Group Key: remote_scan.l_orderkey, remote_scan.o_orderdate, remote_scan.o_shippriority -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (6 rows) -- Query #10 from the TPC-H decision support benchmark EXPLAIN SELECT c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment FROM customer, orders, lineitem, nation WHERE c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate >= date '1993-10-01' AND o_orderdate < date '1993-10-01' + interval '3' month AND l_returnflag = 'R' AND c_nationkey = n_nationkey GROUP BY c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment ORDER BY revenue DESC; LOG: join order: [ "orders" ][ broadcast join "customer" ][ broadcast join "nation" ][ local partition join "lineitem" ] QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Sort (cost=0.00..0.00 rows=0 width=0) Sort Key: sum((sum(remote_scan.revenue))) DESC -> HashAggregate (cost=0.00..0.00 rows=0 width=0) Group Key: remote_scan.c_custkey, remote_scan.c_name, remote_scan.c_acctbal, remote_scan.c_phone, remote_scan.n_name, remote_scan.c_address, remote_scan.c_comment -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (6 rows) -- Query #19 from the TPC-H decision support benchmark (modified) EXPLAIN SELECT sum(l_extendedprice* (1 - l_discount)) as revenue FROM lineitem, part WHERE ( p_partkey = l_partkey AND (p_brand = 'Brand#12' OR p_brand= 'Brand#14' OR p_brand='Brand#15') AND l_quantity >= 10 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#23' OR p_brand='Brand#24') AND l_quantity >= 20 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#33' OR p_brand = 'Brand#34' OR p_brand = 'Brand#35') AND l_quantity >= 1 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ); LOG: join order: [ "lineitem" ][ broadcast join "part" ] QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) -- Reset client logging level to its previous value SET client_min_messages TO NOTICE; citus-7.0.3/src/test/regress/expected/multi_join_pruning.out000066400000000000000000000124071317107136600243460ustar00rootroot00000000000000-- -- MULTI_JOIN_PRUNING -- -- Check that join-pruning works for joins between two large relations. For now -- we only check for join-pruning between locally partitioned relations. In the -- future we want to check for pruning between re-partitioned relations as well. SET citus.explain_distributed_queries TO off; SET client_min_messages TO DEBUG2; -- Change configuration to treat all tables as large SET citus.large_table_shard_count TO 2; SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey; DEBUG: join prunable for intervals [1,1509] and [8997,14946] DEBUG: join prunable for intervals [1509,2951] and [8997,14946] DEBUG: join prunable for intervals [2951,4455] and [8997,14946] DEBUG: join prunable for intervals [4480,5986] and [8997,14946] DEBUG: join prunable for intervals [8997,10560] and [1,5986] DEBUG: join prunable for intervals [10560,12036] and [1,5986] DEBUG: join prunable for intervals [12036,13473] and [1,5986] DEBUG: join prunable for intervals [13473,14947] and [1,5986] sum | avg -------+-------------------- 36086 | 3.0076679446574429 (1 row) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_orderkey > 9030; DEBUG: join prunable for intervals [8997,10560] and [1,5986] DEBUG: join prunable for intervals [10560,12036] and [1,5986] DEBUG: join prunable for intervals [12036,13473] and [1,5986] DEBUG: join prunable for intervals [13473,14947] and [1,5986] sum | avg -------+-------------------- 17996 | 3.0194630872483221 (1 row) -- Shards for the lineitem table have been pruned away. Check that join pruning -- works as expected in this case. SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_orderkey > 20000; sum | avg -----+----- | (1 row) -- Partition pruning left three shards for the lineitem and one shard for the -- orders table. These shard sets don't overlap, so join pruning should prune -- out all the shards, and leave us with an empty task list. SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_orderkey > 6000 AND o_orderkey < 6000; DEBUG: join prunable for intervals [8997,10560] and [1,5986] DEBUG: join prunable for intervals [10560,12036] and [1,5986] DEBUG: join prunable for intervals [12036,13473] and [1,5986] DEBUG: join prunable for intervals [13473,14947] and [1,5986] sum | avg -----+----- | (1 row) -- Make sure that we can handle filters without a column SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND false; sum | avg -----+----- | (1 row) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem INNER JOIN orders ON (l_orderkey = o_orderkey) WHERE false; sum | avg -----+----- | (1 row) -- These tests check that we can do join pruning for tables partitioned over -- different type of columns including varchar, array types, composite types -- etc. This is in response to a bug we had where we were not able to resolve -- correct operator types for some kind of column types. EXPLAIN SELECT count(*) FROM array_partitioned_table table1, array_partitioned_table table2 WHERE table1.array_column = table2.array_column; DEBUG: join prunable for intervals [{},{AZZXSP27F21T6,AZZXSP27F21T6}] and [{BA1000U2AMO4ZGX,BZZXSP27F21T6},{CA1000U2AMO4ZGX,CZZXSP27F21T6}] DEBUG: join prunable for intervals [{BA1000U2AMO4ZGX,BZZXSP27F21T6},{CA1000U2AMO4ZGX,CZZXSP27F21T6}] and [{},{AZZXSP27F21T6,AZZXSP27F21T6}] QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) EXPLAIN SELECT count(*) FROM composite_partitioned_table table1, composite_partitioned_table table2 WHERE table1.composite_column = table2.composite_column; DEBUG: join prunable for intervals [(a,3,b),(b,4,c)] and [(c,5,d),(d,6,e)] DEBUG: join prunable for intervals [(c,5,d),(d,6,e)] and [(a,3,b),(b,4,c)] QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) -- Test that large table joins on partition varchar columns work EXPLAIN SELECT count(*) FROM varchar_partitioned_table table1, varchar_partitioned_table table2 WHERE table1.varchar_column = table2.varchar_column; DEBUG: join prunable for intervals [AA1000U2AMO4ZGX,AZZXSP27F21T6] and [BA1000U2AMO4ZGX,BZZXSP27F21T6] DEBUG: join prunable for intervals [BA1000U2AMO4ZGX,BZZXSP27F21T6] and [AA1000U2AMO4ZGX,AZZXSP27F21T6] QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) citus-7.0.3/src/test/regress/expected/multi_large_table_join_planning.out000066400000000000000000000422411317107136600270120ustar00rootroot00000000000000-- -- MULTI_LARGE_TABLE_PLANNING -- -- Tests that cover large table join planning. Note that we explicitly start a -- transaction block here so that we don't emit debug messages with changing -- transaction ids in them. Also, we set the executor type to task tracker -- executor here, as we cannot run repartition jobs with real time executor. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000; SET citus.enable_unique_job_ids TO off; -- print whether we're using version > 9 to make version-specific tests clear SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine; version_above_nine -------------------- t (1 row) BEGIN; SET client_min_messages TO DEBUG4; SET citus.large_table_shard_count TO 2; SET citus.task_executor_type TO 'task-tracker'; -- Debug4 log messages display jobIds within them. We explicitly set the jobId -- sequence here so that the regression output becomes independent of the number -- of jobs executed prior to running this test. -- Multi-level repartition join to verify our projection columns are correctly -- referenced and propagated across multiple repartition jobs. The test also -- validates that only the minimal necessary projection columns are transferred -- between jobs. SELECT l_partkey, o_orderkey, count(*) FROM lineitem, part, orders, customer WHERE l_orderkey = o_orderkey AND l_partkey = p_partkey AND c_custkey = o_custkey AND (l_quantity > 5.0 OR l_extendedprice > 1200.0) AND p_size > 8 AND o_totalprice > 10.0 AND c_acctbal < 5000.0 AND l_partkey < 1000 GROUP BY l_partkey, o_orderkey ORDER BY l_partkey, o_orderkey; DEBUG: join prunable for intervals [1,1509] and [8997,14946] DEBUG: join prunable for intervals [1509,4964] and [8997,14946] DEBUG: join prunable for intervals [2951,4455] and [8997,14946] DEBUG: join prunable for intervals [4480,5986] and [8997,14946] DEBUG: join prunable for intervals [8997,10560] and [1,5986] DEBUG: join prunable for intervals [10560,12036] and [1,5986] DEBUG: join prunable for intervals [12036,13473] and [1,5986] DEBUG: join prunable for intervals [13473,14947] and [1,5986] DEBUG: generated sql query for task 3 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: generated sql query for task 6 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: generated sql query for task 9 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: generated sql query for task 12 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: generated sql query for task 15 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: generated sql query for task 18 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: generated sql query for task 21 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290006 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: generated sql query for task 24 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290007 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 3 to node localhost:57638 DEBUG: assigned task 12 to node localhost:57637 DEBUG: assigned task 9 to node localhost:57638 DEBUG: assigned task 18 to node localhost:57637 DEBUG: assigned task 15 to node localhost:57638 DEBUG: assigned task 24 to node localhost:57637 DEBUG: assigned task 21 to node localhost:57638 DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [6001,7000] and [1,1000] DEBUG: generated sql query for task 3 DETAIL: query string: "SELECT "pg_merge_job_0001.task_000025".intermediate_column_1_0, "pg_merge_job_0001.task_000025".intermediate_column_1_1, "pg_merge_job_0001.task_000025".intermediate_column_1_2, "pg_merge_job_0001.task_000025".intermediate_column_1_3, "pg_merge_job_0001.task_000025".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000025 "pg_merge_job_0001.task_000025" JOIN part_290011 part ON (("pg_merge_job_0001.task_000025".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)" DEBUG: generated sql query for task 6 DETAIL: query string: "SELECT "pg_merge_job_0001.task_000034".intermediate_column_1_0, "pg_merge_job_0001.task_000034".intermediate_column_1_1, "pg_merge_job_0001.task_000034".intermediate_column_1_2, "pg_merge_job_0001.task_000034".intermediate_column_1_3, "pg_merge_job_0001.task_000034".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000034 "pg_merge_job_0001.task_000034" JOIN part_280002 part ON (("pg_merge_job_0001.task_000034".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)" DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 25 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 34 DEBUG: assigned task 3 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57638 DEBUG: join prunable for intervals [1,1000] and [1001,2000] DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [1001,2000] and [1,1000] DEBUG: join prunable for intervals [1001,2000] and [6001,7000] DEBUG: join prunable for intervals [6001,7000] and [1,1000] DEBUG: join prunable for intervals [6001,7000] and [1001,2000] DEBUG: generated sql query for task 3 DETAIL: query string: "SELECT "pg_merge_job_0002.task_000007".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000007".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000007 "pg_merge_job_0002.task_000007" JOIN customer_290010 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000007".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000007".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000007".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000007".intermediate_column_2_0, "pg_merge_job_0002.task_000007".intermediate_column_2_1" DEBUG: generated sql query for task 6 DETAIL: query string: "SELECT "pg_merge_job_0002.task_000010".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000010".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000010 "pg_merge_job_0002.task_000010" JOIN customer_280001 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000010".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000010".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000010".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000010".intermediate_column_2_0, "pg_merge_job_0002.task_000010".intermediate_column_2_1" DEBUG: generated sql query for task 9 DETAIL: query string: "SELECT "pg_merge_job_0002.task_000013".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000013".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000013 "pg_merge_job_0002.task_000013" JOIN customer_280000 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000013".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000013".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000013".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000013".intermediate_column_2_0, "pg_merge_job_0002.task_000013".intermediate_column_2_1" DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 7 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 10 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 13 DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 9 to node localhost:57638 DEBUG: assigned task 3 to node localhost:57637 DEBUG: completed cleanup query for job 3 DEBUG: completed cleanup query for job 3 DEBUG: completed cleanup query for job 2 DEBUG: completed cleanup query for job 2 DEBUG: completed cleanup query for job 1 DEBUG: completed cleanup query for job 1 l_partkey | o_orderkey | count -----------+------------+------- 18 | 12005 | 1 79 | 5121 | 1 91 | 2883 | 1 222 | 9413 | 1 278 | 1287 | 1 309 | 2374 | 1 318 | 321 | 1 321 | 5984 | 1 337 | 10403 | 1 350 | 13698 | 1 358 | 4323 | 1 364 | 9347 | 1 416 | 640 | 1 426 | 10855 | 1 450 | 35 | 1 484 | 3843 | 1 504 | 14566 | 1 510 | 13569 | 1 532 | 3175 | 1 641 | 134 | 1 669 | 10944 | 1 716 | 2885 | 1 738 | 4355 | 1 802 | 2534 | 1 824 | 9287 | 1 864 | 3175 | 1 957 | 4293 | 1 960 | 10980 | 1 963 | 4580 | 1 (29 rows) SELECT l_partkey, o_orderkey, count(*) FROM lineitem, orders WHERE l_suppkey = o_shippriority AND l_quantity < 5.0 AND o_totalprice <> 4.0 GROUP BY l_partkey, o_orderkey ORDER BY l_partkey, o_orderkey; DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for task 4 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for task 6 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290002 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for task 8 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290003 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for task 10 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290004 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for task 12 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290005 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for task 14 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290006 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for task 16 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290007 lineitem WHERE (l_quantity < 5.0)" DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 DEBUG: assigned task 8 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57638 DEBUG: assigned task 12 to node localhost:57637 DEBUG: assigned task 10 to node localhost:57638 DEBUG: assigned task 16 to node localhost:57637 DEBUG: assigned task 14 to node localhost:57638 DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290008 orders WHERE (o_totalprice <> 4.0)" DEBUG: generated sql query for task 4 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290009 orders WHERE (o_totalprice <> 4.0)" DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 2 DEBUG: generated sql query for task 3 DETAIL: query string: "SELECT "pg_merge_job_0004.task_000017".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000005".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000017 "pg_merge_job_0004.task_000017" JOIN pg_merge_job_0005.task_000005 "pg_merge_job_0005.task_000005" ON (("pg_merge_job_0004.task_000017".intermediate_column_4_1 = "pg_merge_job_0005.task_000005".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000017".intermediate_column_4_0, "pg_merge_job_0005.task_000005".intermediate_column_5_0" DEBUG: generated sql query for task 6 DETAIL: query string: "SELECT "pg_merge_job_0004.task_000026".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000008".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000026 "pg_merge_job_0004.task_000026" JOIN pg_merge_job_0005.task_000008 "pg_merge_job_0005.task_000008" ON (("pg_merge_job_0004.task_000026".intermediate_column_4_1 = "pg_merge_job_0005.task_000008".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000026".intermediate_column_4_0, "pg_merge_job_0005.task_000008".intermediate_column_5_0" DEBUG: generated sql query for task 9 DETAIL: query string: "SELECT "pg_merge_job_0004.task_000035".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000011".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000035 "pg_merge_job_0004.task_000035" JOIN pg_merge_job_0005.task_000011 "pg_merge_job_0005.task_000011" ON (("pg_merge_job_0004.task_000035".intermediate_column_4_1 = "pg_merge_job_0005.task_000011".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000035".intermediate_column_4_0, "pg_merge_job_0005.task_000011".intermediate_column_5_0" DEBUG: generated sql query for task 12 DETAIL: query string: "SELECT "pg_merge_job_0004.task_000044".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000014".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000044 "pg_merge_job_0004.task_000044" JOIN pg_merge_job_0005.task_000014 "pg_merge_job_0005.task_000014" ON (("pg_merge_job_0004.task_000044".intermediate_column_4_1 = "pg_merge_job_0005.task_000014".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000044".intermediate_column_4_0, "pg_merge_job_0005.task_000014".intermediate_column_5_0" DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 17 DEBUG: pruning merge fetch taskId 2 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 8 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 35 DEBUG: pruning merge fetch taskId 8 DETAIL: Creating dependency on merge taskId 11 DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 44 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 14 DEBUG: assigned task 3 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57638 DEBUG: assigned task 9 to node localhost:57637 DEBUG: assigned task 12 to node localhost:57638 DEBUG: completed cleanup query for job 6 DEBUG: completed cleanup query for job 6 DEBUG: completed cleanup query for job 4 DEBUG: completed cleanup query for job 4 DEBUG: completed cleanup query for job 5 DEBUG: completed cleanup query for job 5 l_partkey | o_orderkey | count -----------+------------+------- (0 rows) -- Reset client logging level to its previous value SET client_min_messages TO NOTICE; COMMIT; citus-7.0.3/src/test/regress/expected/multi_large_table_join_planning_0.out000066400000000000000000000430531317107136600272330ustar00rootroot00000000000000-- -- MULTI_LARGE_TABLE_PLANNING -- -- Tests that cover large table join planning. Note that we explicitly start a -- transaction block here so that we don't emit debug messages with changing -- transaction ids in them. Also, we set the executor type to task tracker -- executor here, as we cannot run repartition jobs with real time executor. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000; SET citus.enable_unique_job_ids TO off; -- print whether we're using version > 9 to make version-specific tests clear SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine; version_above_nine -------------------- f (1 row) BEGIN; SET client_min_messages TO DEBUG4; DEBUG: CommitTransactionCommand SET citus.large_table_shard_count TO 2; DEBUG: StartTransactionCommand DEBUG: ProcessUtility DEBUG: CommitTransactionCommand SET citus.task_executor_type TO 'task-tracker'; DEBUG: StartTransactionCommand DEBUG: ProcessUtility DEBUG: CommitTransactionCommand -- Debug4 log messages display jobIds within them. We explicitly set the jobId -- sequence here so that the regression output becomes independent of the number -- of jobs executed prior to running this test. -- Multi-level repartition join to verify our projection columns are correctly -- referenced and propagated across multiple repartition jobs. The test also -- validates that only the minimal necessary projection columns are transferred -- between jobs. SELECT l_partkey, o_orderkey, count(*) FROM lineitem, part, orders, customer WHERE l_orderkey = o_orderkey AND l_partkey = p_partkey AND c_custkey = o_custkey AND (l_quantity > 5.0 OR l_extendedprice > 1200.0) AND p_size > 8 AND o_totalprice > 10.0 AND c_acctbal < 5000.0 AND l_partkey < 1000 GROUP BY l_partkey, o_orderkey ORDER BY l_partkey, o_orderkey; DEBUG: StartTransactionCommand DEBUG: join prunable for intervals [1,1509] and [8997,14946] DEBUG: join prunable for intervals [1509,4964] and [8997,14946] DEBUG: join prunable for intervals [2951,4455] and [8997,14946] DEBUG: join prunable for intervals [4480,5986] and [8997,14946] DEBUG: join prunable for intervals [8997,10560] and [1,5986] DEBUG: join prunable for intervals [10560,12036] and [1,5986] DEBUG: join prunable for intervals [12036,13473] and [1,5986] DEBUG: join prunable for intervals [13473,14947] and [1,5986] DEBUG: generated sql query for task 3 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290000 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: generated sql query for task 6 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290001 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: generated sql query for task 9 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290002 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: generated sql query for task 12 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290003 lineitem JOIN orders_290008 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: generated sql query for task 15 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290004 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: generated sql query for task 18 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290005 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: generated sql query for task 21 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290006 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: generated sql query for task 24 DETAIL: query string: "SELECT lineitem.l_partkey, orders.o_orderkey, lineitem.l_quantity, lineitem.l_extendedprice, orders.o_custkey FROM (lineitem_290007 lineitem JOIN orders_290009 orders ON ((lineitem.l_orderkey = orders.o_orderkey))) WHERE ((lineitem.l_partkey < 1000) AND (orders.o_totalprice > 10.0))" DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 3 to node localhost:57638 DEBUG: assigned task 12 to node localhost:57637 DEBUG: assigned task 9 to node localhost:57638 DEBUG: assigned task 18 to node localhost:57637 DEBUG: assigned task 15 to node localhost:57638 DEBUG: assigned task 24 to node localhost:57637 DEBUG: assigned task 21 to node localhost:57638 DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [6001,7000] and [1,1000] DEBUG: generated sql query for task 3 DETAIL: query string: "SELECT "pg_merge_job_0001.task_000025".intermediate_column_1_0, "pg_merge_job_0001.task_000025".intermediate_column_1_1, "pg_merge_job_0001.task_000025".intermediate_column_1_2, "pg_merge_job_0001.task_000025".intermediate_column_1_3, "pg_merge_job_0001.task_000025".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000025 "pg_merge_job_0001.task_000025" JOIN part_290011 part ON (("pg_merge_job_0001.task_000025".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)" DEBUG: generated sql query for task 6 DETAIL: query string: "SELECT "pg_merge_job_0001.task_000034".intermediate_column_1_0, "pg_merge_job_0001.task_000034".intermediate_column_1_1, "pg_merge_job_0001.task_000034".intermediate_column_1_2, "pg_merge_job_0001.task_000034".intermediate_column_1_3, "pg_merge_job_0001.task_000034".intermediate_column_1_4 FROM (pg_merge_job_0001.task_000034 "pg_merge_job_0001.task_000034" JOIN part_280002 part ON (("pg_merge_job_0001.task_000034".intermediate_column_1_0 = part.p_partkey))) WHERE (part.p_size > 8)" DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 25 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 34 DEBUG: assigned task 3 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57638 DEBUG: join prunable for intervals [1,1000] and [1001,2000] DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [1001,2000] and [1,1000] DEBUG: join prunable for intervals [1001,2000] and [6001,7000] DEBUG: join prunable for intervals [6001,7000] and [1,1000] DEBUG: join prunable for intervals [6001,7000] and [1001,2000] DEBUG: generated sql query for task 3 DETAIL: query string: "SELECT "pg_merge_job_0002.task_000007".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000007".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000007 "pg_merge_job_0002.task_000007" JOIN customer_290010 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000007".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000007".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000007".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000007".intermediate_column_2_0, "pg_merge_job_0002.task_000007".intermediate_column_2_1" DEBUG: generated sql query for task 6 DETAIL: query string: "SELECT "pg_merge_job_0002.task_000010".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000010".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000010 "pg_merge_job_0002.task_000010" JOIN customer_280001 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000010".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000010".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000010".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000010".intermediate_column_2_0, "pg_merge_job_0002.task_000010".intermediate_column_2_1" DEBUG: generated sql query for task 9 DETAIL: query string: "SELECT "pg_merge_job_0002.task_000013".intermediate_column_2_0 AS l_partkey, "pg_merge_job_0002.task_000013".intermediate_column_2_1 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0002.task_000013 "pg_merge_job_0002.task_000013" JOIN customer_280000 customer ON ((customer.c_custkey = "pg_merge_job_0002.task_000013".intermediate_column_2_4))) WHERE ((("pg_merge_job_0002.task_000013".intermediate_column_2_2 > 5.0) OR ("pg_merge_job_0002.task_000013".intermediate_column_2_3 > 1200.0)) AND (customer.c_acctbal < 5000.0)) GROUP BY "pg_merge_job_0002.task_000013".intermediate_column_2_0, "pg_merge_job_0002.task_000013".intermediate_column_2_1" DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 7 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 10 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 13 DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 9 to node localhost:57638 DEBUG: assigned task 3 to node localhost:57637 DEBUG: completed cleanup query for job 3 DEBUG: completed cleanup query for job 3 DEBUG: completed cleanup query for job 2 DEBUG: completed cleanup query for job 2 DEBUG: completed cleanup query for job 1 DEBUG: completed cleanup query for job 1 DEBUG: CommitTransactionCommand l_partkey | o_orderkey | count -----------+------------+------- 18 | 12005 | 1 79 | 5121 | 1 91 | 2883 | 1 222 | 9413 | 1 278 | 1287 | 1 309 | 2374 | 1 318 | 321 | 1 321 | 5984 | 1 337 | 10403 | 1 350 | 13698 | 1 358 | 4323 | 1 364 | 9347 | 1 416 | 640 | 1 426 | 10855 | 1 450 | 35 | 1 484 | 3843 | 1 504 | 14566 | 1 510 | 13569 | 1 532 | 3175 | 1 641 | 134 | 1 669 | 10944 | 1 716 | 2885 | 1 738 | 4355 | 1 802 | 2534 | 1 824 | 9287 | 1 864 | 3175 | 1 957 | 4293 | 1 960 | 10980 | 1 963 | 4580 | 1 (29 rows) SELECT l_partkey, o_orderkey, count(*) FROM lineitem, orders WHERE l_suppkey = o_shippriority AND l_quantity < 5.0 AND o_totalprice <> 4.0 GROUP BY l_partkey, o_orderkey ORDER BY l_partkey, o_orderkey; DEBUG: StartTransactionCommand DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290000 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for task 4 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290001 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for task 6 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290002 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for task 8 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290003 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for task 10 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290004 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for task 12 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290005 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for task 14 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290006 lineitem WHERE (l_quantity < 5.0)" DEBUG: generated sql query for task 16 DETAIL: query string: "SELECT l_partkey, l_suppkey FROM lineitem_290007 lineitem WHERE (l_quantity < 5.0)" DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 DEBUG: assigned task 8 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57638 DEBUG: assigned task 12 to node localhost:57637 DEBUG: assigned task 10 to node localhost:57638 DEBUG: assigned task 16 to node localhost:57637 DEBUG: assigned task 14 to node localhost:57638 DEBUG: generated sql query for task 2 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290008 orders WHERE (o_totalprice <> 4.0)" DEBUG: generated sql query for task 4 DETAIL: query string: "SELECT o_orderkey, o_shippriority FROM orders_290009 orders WHERE (o_totalprice <> 4.0)" DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 2 DEBUG: generated sql query for task 3 DETAIL: query string: "SELECT "pg_merge_job_0004.task_000017".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000005".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000017 "pg_merge_job_0004.task_000017" JOIN pg_merge_job_0005.task_000005 "pg_merge_job_0005.task_000005" ON (("pg_merge_job_0004.task_000017".intermediate_column_4_1 = "pg_merge_job_0005.task_000005".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000017".intermediate_column_4_0, "pg_merge_job_0005.task_000005".intermediate_column_5_0" DEBUG: generated sql query for task 6 DETAIL: query string: "SELECT "pg_merge_job_0004.task_000026".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000008".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000026 "pg_merge_job_0004.task_000026" JOIN pg_merge_job_0005.task_000008 "pg_merge_job_0005.task_000008" ON (("pg_merge_job_0004.task_000026".intermediate_column_4_1 = "pg_merge_job_0005.task_000008".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000026".intermediate_column_4_0, "pg_merge_job_0005.task_000008".intermediate_column_5_0" DEBUG: generated sql query for task 9 DETAIL: query string: "SELECT "pg_merge_job_0004.task_000035".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000011".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000035 "pg_merge_job_0004.task_000035" JOIN pg_merge_job_0005.task_000011 "pg_merge_job_0005.task_000011" ON (("pg_merge_job_0004.task_000035".intermediate_column_4_1 = "pg_merge_job_0005.task_000011".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000035".intermediate_column_4_0, "pg_merge_job_0005.task_000011".intermediate_column_5_0" DEBUG: generated sql query for task 12 DETAIL: query string: "SELECT "pg_merge_job_0004.task_000044".intermediate_column_4_0 AS l_partkey, "pg_merge_job_0005.task_000014".intermediate_column_5_0 AS o_orderkey, count(*) AS count FROM (pg_merge_job_0004.task_000044 "pg_merge_job_0004.task_000044" JOIN pg_merge_job_0005.task_000014 "pg_merge_job_0005.task_000014" ON (("pg_merge_job_0004.task_000044".intermediate_column_4_1 = "pg_merge_job_0005.task_000014".intermediate_column_5_1))) WHERE true GROUP BY "pg_merge_job_0004.task_000044".intermediate_column_4_0, "pg_merge_job_0005.task_000014".intermediate_column_5_0" DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 17 DEBUG: pruning merge fetch taskId 2 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 8 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 35 DEBUG: pruning merge fetch taskId 8 DETAIL: Creating dependency on merge taskId 11 DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 44 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 14 DEBUG: assigned task 3 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57638 DEBUG: assigned task 9 to node localhost:57637 DEBUG: assigned task 12 to node localhost:57638 DEBUG: completed cleanup query for job 6 DEBUG: completed cleanup query for job 6 DEBUG: completed cleanup query for job 4 DEBUG: completed cleanup query for job 4 DEBUG: completed cleanup query for job 5 DEBUG: completed cleanup query for job 5 DEBUG: CommitTransactionCommand l_partkey | o_orderkey | count -----------+------------+------- (0 rows) -- Reset client logging level to its previous value SET client_min_messages TO NOTICE; DEBUG: StartTransactionCommand DEBUG: ProcessUtility COMMIT; citus-7.0.3/src/test/regress/expected/multi_large_table_pruning.out000066400000000000000000000227431317107136600256540ustar00rootroot00000000000000-- -- MULTI_LARGE_TABLE_PRUNING -- -- Tests covering partition and join-pruning for large table joins. Note that we -- set executor type to task tracker executor here, as we cannot run repartition -- jobs with real time executor. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 700000; SET citus.large_table_shard_count TO 2; SET client_min_messages TO DEBUG2; SET citus.task_executor_type TO 'task-tracker'; -- Single range-repartition join to test join-pruning behaviour. EXPLAIN (COSTS OFF) SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey; DEBUG: join prunable for intervals [1,1000] and [1001,2000] DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [1001,2000] and [1,1000] DEBUG: join prunable for intervals [1001,2000] and [6001,7000] DEBUG: join prunable for intervals [6001,7000] and [1,1000] DEBUG: join prunable for intervals [6001,7000] and [1001,2000] DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 8 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 11 QUERY PLAN ------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 3 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob Map Task Count: 2 Merge Task Count: 3 (7 rows) SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey; DEBUG: join prunable for intervals [1,1000] and [1001,2000] DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [1001,2000] and [1,1000] DEBUG: join prunable for intervals [1001,2000] and [6001,7000] DEBUG: join prunable for intervals [6001,7000] and [1,1000] DEBUG: join prunable for intervals [6001,7000] and [1001,2000] DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 8 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 11 count ------- 2984 (1 row) -- Single range-repartition join with a selection clause on the partitioned -- table to test the case when all map tasks are pruned away. EXPLAIN (COSTS OFF) SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey AND o_orderkey < 0; QUERY PLAN ------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 0 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob Map Task Count: 0 Merge Task Count: 0 (7 rows) SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey AND o_orderkey < 0; count ------- 0 (1 row) -- Single range-repartition join with a selection clause on the base table to -- test the case when all sql tasks are pruned away. EXPLAIN (COSTS OFF) SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey AND c_custkey < 0; QUERY PLAN ------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 0 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob Map Task Count: 2 Merge Task Count: 3 (7 rows) SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey AND c_custkey < 0; count ------- 0 (1 row) -- Dual hash-repartition join test case. Note that this query doesn't produce -- meaningful results and is only to test hash-partitioning of two large tables -- on non-partition columns. EXPLAIN (COSTS OFF) SELECT count(*) FROM lineitem, customer WHERE l_partkey = c_nationkey; DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 2 DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 17 DEBUG: pruning merge fetch taskId 2 DETAIL: Creating dependency on merge taskId 7 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 11 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 35 DEBUG: pruning merge fetch taskId 8 DETAIL: Creating dependency on merge taskId 15 DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 44 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 19 QUERY PLAN ------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 4 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob Map Task Count: 8 Merge Task Count: 4 -> MapMergeJob Map Task Count: 3 Merge Task Count: 4 (10 rows) SELECT count(*) FROM lineitem, customer WHERE l_partkey = c_nationkey; DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 2 DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 17 DEBUG: pruning merge fetch taskId 2 DETAIL: Creating dependency on merge taskId 7 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 11 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 35 DEBUG: pruning merge fetch taskId 8 DETAIL: Creating dependency on merge taskId 15 DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 44 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 19 count ------- 125 (1 row) -- Dual hash-repartition join with a selection clause on one of the tables to -- test the case when all map tasks are pruned away. EXPLAIN (COSTS OFF) SELECT count(*) FROM lineitem, customer WHERE l_partkey = c_nationkey AND l_orderkey < 0; QUERY PLAN ------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 0 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob Map Task Count: 0 Merge Task Count: 0 -> MapMergeJob Map Task Count: 3 Merge Task Count: 4 (10 rows) SELECT count(*) FROM lineitem, customer WHERE l_partkey = c_nationkey AND l_orderkey < 0; count ------- 0 (1 row) -- Test cases with false in the WHERE clause EXPLAIN (COSTS OFF) SELECT o_orderkey FROM orders INNER JOIN customer ON (o_custkey = c_custkey) WHERE false; QUERY PLAN ------------------------------------------------------------- Custom Scan (Citus Task-Tracker) Task Count: 0 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob Map Task Count: 0 Merge Task Count: 0 (6 rows) -- execute once, to verify that's handled SELECT o_orderkey FROM orders INNER JOIN customer ON (o_custkey = c_custkey) WHERE false; o_orderkey ------------ (0 rows) EXPLAIN (COSTS OFF) SELECT o_orderkey FROM orders INNER JOIN customer ON (o_custkey = c_custkey) WHERE 1=0 AND c_custkey < 0; QUERY PLAN ------------------------------------------------------------- Custom Scan (Citus Task-Tracker) Task Count: 0 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob Map Task Count: 0 Merge Task Count: 0 (6 rows) EXPLAIN (COSTS OFF) SELECT o_orderkey FROM orders INNER JOIN customer ON (o_custkey = c_custkey AND false); QUERY PLAN ---------------------------------- Custom Scan (Citus Task-Tracker) Task Count: 0 Tasks Shown: All (3 rows) EXPLAIN (COSTS OFF) SELECT o_orderkey FROM orders, customer WHERE o_custkey = c_custkey AND false; QUERY PLAN ---------------------------------- Custom Scan (Citus Task-Tracker) Task Count: 0 Tasks Shown: All (3 rows) citus-7.0.3/src/test/regress/expected/multi_large_table_task_assignment.out000066400000000000000000000252731317107136600273650ustar00rootroot00000000000000-- -- MULTI_LARGE_TABLE_TASK_ASSIGNMENT -- -- Tests which cover task assignment for MapMerge jobs for single range repartition -- and dual hash repartition joins. The tests also cover task assignment propagation -- from a sql task to its depended tasks. Note that we set the executor type to task -- tracker executor here, as we cannot run repartition jobs with real time executor. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000; -- print whether we're using version > 9 to make version-specific tests clear SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine; version_above_nine -------------------- t (1 row) BEGIN; SET client_min_messages TO DEBUG3; SET citus.large_table_shard_count TO 2; SET citus.task_executor_type TO 'task-tracker'; -- Single range repartition join to test anchor-shard based task assignment and -- assignment propagation to merge and data-fetch tasks. SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey; DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 DEBUG: join prunable for intervals [1,1000] and [1001,2000] DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [1001,2000] and [1,1000] DEBUG: join prunable for intervals [1001,2000] and [6001,7000] DEBUG: join prunable for intervals [6001,7000] and [1,1000] DEBUG: join prunable for intervals [6001,7000] and [1001,2000] DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 8 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 11 DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 9 to node localhost:57638 DEBUG: assigned task 3 to node localhost:57637 count ------- 2984 (1 row) -- Single range repartition join, along with a join with a small table containing -- more than one shard. This situation results in multiple sql tasks depending on -- the same merge task, and tests our constraint group creation and assignment -- propagation. Here 'orders' is considered the small table. SET citus.large_table_shard_count TO 3; SELECT count(*) FROM orders, customer, lineitem WHERE o_custkey = c_custkey AND o_orderkey = l_orderkey; DEBUG: assigned task 9 to node localhost:57637 DEBUG: assigned task 15 to node localhost:57638 DEBUG: assigned task 12 to node localhost:57637 DEBUG: assigned task 18 to node localhost:57638 DEBUG: assigned task 3 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57638 DEBUG: join prunable for intervals [1,1509] and [2951,4455] DEBUG: join prunable for intervals [1,1509] and [4480,5986] DEBUG: join prunable for intervals [1,1509] and [8997,10560] DEBUG: join prunable for intervals [1,1509] and [10560,12036] DEBUG: join prunable for intervals [1,1509] and [12036,13473] DEBUG: join prunable for intervals [1,1509] and [13473,14947] DEBUG: join prunable for intervals [1509,4964] and [8997,10560] DEBUG: join prunable for intervals [1509,4964] and [10560,12036] DEBUG: join prunable for intervals [1509,4964] and [12036,13473] DEBUG: join prunable for intervals [1509,4964] and [13473,14947] DEBUG: join prunable for intervals [2951,4455] and [1,1509] DEBUG: join prunable for intervals [2951,4455] and [4480,5986] DEBUG: join prunable for intervals [2951,4455] and [8997,10560] DEBUG: join prunable for intervals [2951,4455] and [10560,12036] DEBUG: join prunable for intervals [2951,4455] and [12036,13473] DEBUG: join prunable for intervals [2951,4455] and [13473,14947] DEBUG: join prunable for intervals [4480,5986] and [1,1509] DEBUG: join prunable for intervals [4480,5986] and [2951,4455] DEBUG: join prunable for intervals [4480,5986] and [8997,10560] DEBUG: join prunable for intervals [4480,5986] and [10560,12036] DEBUG: join prunable for intervals [4480,5986] and [12036,13473] DEBUG: join prunable for intervals [4480,5986] and [13473,14947] DEBUG: join prunable for intervals [8997,10560] and [1,1509] DEBUG: join prunable for intervals [8997,10560] and [1509,4964] DEBUG: join prunable for intervals [8997,10560] and [2951,4455] DEBUG: join prunable for intervals [8997,10560] and [4480,5986] DEBUG: join prunable for intervals [8997,10560] and [12036,13473] DEBUG: join prunable for intervals [8997,10560] and [13473,14947] DEBUG: join prunable for intervals [10560,12036] and [1,1509] DEBUG: join prunable for intervals [10560,12036] and [1509,4964] DEBUG: join prunable for intervals [10560,12036] and [2951,4455] DEBUG: join prunable for intervals [10560,12036] and [4480,5986] DEBUG: join prunable for intervals [10560,12036] and [13473,14947] DEBUG: join prunable for intervals [12036,13473] and [1,1509] DEBUG: join prunable for intervals [12036,13473] and [1509,4964] DEBUG: join prunable for intervals [12036,13473] and [2951,4455] DEBUG: join prunable for intervals [12036,13473] and [4480,5986] DEBUG: join prunable for intervals [12036,13473] and [8997,10560] DEBUG: join prunable for intervals [13473,14947] and [1,1509] DEBUG: join prunable for intervals [13473,14947] and [1509,4964] DEBUG: join prunable for intervals [13473,14947] and [2951,4455] DEBUG: join prunable for intervals [13473,14947] and [4480,5986] DEBUG: join prunable for intervals [13473,14947] and [8997,10560] DEBUG: join prunable for intervals [13473,14947] and [10560,12036] DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 19 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 19 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 13 DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 16 DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 19 DETAIL: Creating dependency on merge taskId 33 DEBUG: pruning merge fetch taskId 22 DETAIL: Creating dependency on merge taskId 33 DEBUG: pruning merge fetch taskId 25 DETAIL: Creating dependency on merge taskId 40 DEBUG: pruning merge fetch taskId 28 DETAIL: Creating dependency on merge taskId 40 DEBUG: pruning merge fetch taskId 31 DETAIL: Creating dependency on merge taskId 47 DEBUG: pruning merge fetch taskId 34 DETAIL: Creating dependency on merge taskId 47 DEBUG: pruning merge fetch taskId 37 DETAIL: Creating dependency on merge taskId 54 DEBUG: pruning merge fetch taskId 40 DETAIL: Creating dependency on merge taskId 54 DEBUG: pruning merge fetch taskId 43 DETAIL: Creating dependency on merge taskId 54 DEBUG: pruning merge fetch taskId 46 DETAIL: Creating dependency on merge taskId 61 DEBUG: pruning merge fetch taskId 49 DETAIL: Creating dependency on merge taskId 61 DEBUG: pruning merge fetch taskId 52 DETAIL: Creating dependency on merge taskId 61 DEBUG: pruning merge fetch taskId 55 DETAIL: Creating dependency on merge taskId 68 DEBUG: pruning merge fetch taskId 58 DETAIL: Creating dependency on merge taskId 68 DEBUG: assigned task 21 to node localhost:57637 DEBUG: assigned task 3 to node localhost:57638 DEBUG: assigned task 27 to node localhost:57637 DEBUG: assigned task 9 to node localhost:57638 DEBUG: assigned task 48 to node localhost:57637 DEBUG: assigned task 33 to node localhost:57638 DEBUG: assigned task 39 to node localhost:57637 DEBUG: assigned task 57 to node localhost:57638 DEBUG: propagating assignment from merge task 19 to constrained sql task 6 DEBUG: propagating assignment from merge task 26 to constrained sql task 12 DEBUG: propagating assignment from merge task 26 to constrained sql task 15 DEBUG: propagating assignment from merge task 26 to constrained sql task 18 DEBUG: propagating assignment from merge task 33 to constrained sql task 24 DEBUG: propagating assignment from merge task 40 to constrained sql task 30 DEBUG: propagating assignment from merge task 47 to constrained sql task 36 DEBUG: propagating assignment from merge task 54 to constrained sql task 42 DEBUG: propagating assignment from merge task 54 to constrained sql task 45 DEBUG: propagating assignment from merge task 61 to constrained sql task 51 DEBUG: propagating assignment from merge task 61 to constrained sql task 54 DEBUG: propagating assignment from merge task 68 to constrained sql task 60 count ------- 11998 (1 row) SET citus.large_table_shard_count TO 2; -- Dual hash repartition join which tests the separate hash repartition join -- task assignment algorithm. SELECT count(*) FROM lineitem, customer WHERE l_partkey = c_nationkey; DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 DEBUG: assigned task 8 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57638 DEBUG: assigned task 12 to node localhost:57637 DEBUG: assigned task 10 to node localhost:57638 DEBUG: assigned task 16 to node localhost:57637 DEBUG: assigned task 14 to node localhost:57638 DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57638 DEBUG: assigned task 2 to node localhost:57637 DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 2 DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 17 DEBUG: pruning merge fetch taskId 2 DETAIL: Creating dependency on merge taskId 7 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 11 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 35 DEBUG: pruning merge fetch taskId 8 DETAIL: Creating dependency on merge taskId 15 DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 44 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 19 DEBUG: assigned task 3 to node localhost:57638 DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 9 to node localhost:57638 DEBUG: assigned task 12 to node localhost:57637 count ------- 125 (1 row) -- Reset client logging level to its previous value SET client_min_messages TO NOTICE; COMMIT; citus-7.0.3/src/test/regress/expected/multi_large_table_task_assignment_0.out000066400000000000000000000264661317107136600276110ustar00rootroot00000000000000-- -- MULTI_LARGE_TABLE_TASK_ASSIGNMENT -- -- Tests which cover task assignment for MapMerge jobs for single range repartition -- and dual hash repartition joins. The tests also cover task assignment propagation -- from a sql task to its depended tasks. Note that we set the executor type to task -- tracker executor here, as we cannot run repartition jobs with real time executor. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000; -- print whether we're using version > 9 to make version-specific tests clear SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine; version_above_nine -------------------- f (1 row) BEGIN; SET client_min_messages TO DEBUG3; DEBUG: CommitTransactionCommand SET citus.large_table_shard_count TO 2; DEBUG: StartTransactionCommand DEBUG: ProcessUtility DEBUG: CommitTransactionCommand SET citus.task_executor_type TO 'task-tracker'; DEBUG: StartTransactionCommand DEBUG: ProcessUtility DEBUG: CommitTransactionCommand -- Single range repartition join to test anchor-shard based task assignment and -- assignment propagation to merge and data-fetch tasks. SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey; DEBUG: StartTransactionCommand DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 DEBUG: join prunable for intervals [1,1000] and [1001,2000] DEBUG: join prunable for intervals [1,1000] and [6001,7000] DEBUG: join prunable for intervals [1001,2000] and [1,1000] DEBUG: join prunable for intervals [1001,2000] and [6001,7000] DEBUG: join prunable for intervals [6001,7000] and [1,1000] DEBUG: join prunable for intervals [6001,7000] and [1001,2000] DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 8 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 11 DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 9 to node localhost:57638 DEBUG: assigned task 3 to node localhost:57637 DEBUG: CommitTransactionCommand count ------- 2984 (1 row) -- Single range repartition join, along with a join with a small table containing -- more than one shard. This situation results in multiple sql tasks depending on -- the same merge task, and tests our constraint group creation and assignment -- propagation. Here 'orders' is considered the small table. SET citus.large_table_shard_count TO 3; DEBUG: StartTransactionCommand DEBUG: ProcessUtility DEBUG: CommitTransactionCommand SELECT count(*) FROM orders, customer, lineitem WHERE o_custkey = c_custkey AND o_orderkey = l_orderkey; DEBUG: StartTransactionCommand DEBUG: assigned task 9 to node localhost:57637 DEBUG: assigned task 15 to node localhost:57638 DEBUG: assigned task 12 to node localhost:57637 DEBUG: assigned task 18 to node localhost:57638 DEBUG: assigned task 3 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57638 DEBUG: join prunable for intervals [1,1509] and [2951,4455] DEBUG: join prunable for intervals [1,1509] and [4480,5986] DEBUG: join prunable for intervals [1,1509] and [8997,10560] DEBUG: join prunable for intervals [1,1509] and [10560,12036] DEBUG: join prunable for intervals [1,1509] and [12036,13473] DEBUG: join prunable for intervals [1,1509] and [13473,14947] DEBUG: join prunable for intervals [1509,4964] and [8997,10560] DEBUG: join prunable for intervals [1509,4964] and [10560,12036] DEBUG: join prunable for intervals [1509,4964] and [12036,13473] DEBUG: join prunable for intervals [1509,4964] and [13473,14947] DEBUG: join prunable for intervals [2951,4455] and [1,1509] DEBUG: join prunable for intervals [2951,4455] and [4480,5986] DEBUG: join prunable for intervals [2951,4455] and [8997,10560] DEBUG: join prunable for intervals [2951,4455] and [10560,12036] DEBUG: join prunable for intervals [2951,4455] and [12036,13473] DEBUG: join prunable for intervals [2951,4455] and [13473,14947] DEBUG: join prunable for intervals [4480,5986] and [1,1509] DEBUG: join prunable for intervals [4480,5986] and [2951,4455] DEBUG: join prunable for intervals [4480,5986] and [8997,10560] DEBUG: join prunable for intervals [4480,5986] and [10560,12036] DEBUG: join prunable for intervals [4480,5986] and [12036,13473] DEBUG: join prunable for intervals [4480,5986] and [13473,14947] DEBUG: join prunable for intervals [8997,10560] and [1,1509] DEBUG: join prunable for intervals [8997,10560] and [1509,4964] DEBUG: join prunable for intervals [8997,10560] and [2951,4455] DEBUG: join prunable for intervals [8997,10560] and [4480,5986] DEBUG: join prunable for intervals [8997,10560] and [12036,13473] DEBUG: join prunable for intervals [8997,10560] and [13473,14947] DEBUG: join prunable for intervals [10560,12036] and [1,1509] DEBUG: join prunable for intervals [10560,12036] and [1509,4964] DEBUG: join prunable for intervals [10560,12036] and [2951,4455] DEBUG: join prunable for intervals [10560,12036] and [4480,5986] DEBUG: join prunable for intervals [10560,12036] and [13473,14947] DEBUG: join prunable for intervals [12036,13473] and [1,1509] DEBUG: join prunable for intervals [12036,13473] and [1509,4964] DEBUG: join prunable for intervals [12036,13473] and [2951,4455] DEBUG: join prunable for intervals [12036,13473] and [4480,5986] DEBUG: join prunable for intervals [12036,13473] and [8997,10560] DEBUG: join prunable for intervals [13473,14947] and [1,1509] DEBUG: join prunable for intervals [13473,14947] and [1509,4964] DEBUG: join prunable for intervals [13473,14947] and [2951,4455] DEBUG: join prunable for intervals [13473,14947] and [4480,5986] DEBUG: join prunable for intervals [13473,14947] and [8997,10560] DEBUG: join prunable for intervals [13473,14947] and [10560,12036] DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 19 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 19 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 13 DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 16 DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 19 DETAIL: Creating dependency on merge taskId 33 DEBUG: pruning merge fetch taskId 22 DETAIL: Creating dependency on merge taskId 33 DEBUG: pruning merge fetch taskId 25 DETAIL: Creating dependency on merge taskId 40 DEBUG: pruning merge fetch taskId 28 DETAIL: Creating dependency on merge taskId 40 DEBUG: pruning merge fetch taskId 31 DETAIL: Creating dependency on merge taskId 47 DEBUG: pruning merge fetch taskId 34 DETAIL: Creating dependency on merge taskId 47 DEBUG: pruning merge fetch taskId 37 DETAIL: Creating dependency on merge taskId 54 DEBUG: pruning merge fetch taskId 40 DETAIL: Creating dependency on merge taskId 54 DEBUG: pruning merge fetch taskId 43 DETAIL: Creating dependency on merge taskId 54 DEBUG: pruning merge fetch taskId 46 DETAIL: Creating dependency on merge taskId 61 DEBUG: pruning merge fetch taskId 49 DETAIL: Creating dependency on merge taskId 61 DEBUG: pruning merge fetch taskId 52 DETAIL: Creating dependency on merge taskId 61 DEBUG: pruning merge fetch taskId 55 DETAIL: Creating dependency on merge taskId 68 DEBUG: pruning merge fetch taskId 58 DETAIL: Creating dependency on merge taskId 68 DEBUG: assigned task 21 to node localhost:57637 DEBUG: assigned task 3 to node localhost:57638 DEBUG: assigned task 27 to node localhost:57637 DEBUG: assigned task 9 to node localhost:57638 DEBUG: assigned task 48 to node localhost:57637 DEBUG: assigned task 33 to node localhost:57638 DEBUG: assigned task 39 to node localhost:57637 DEBUG: assigned task 57 to node localhost:57638 DEBUG: propagating assignment from merge task 19 to constrained sql task 6 DEBUG: propagating assignment from merge task 26 to constrained sql task 12 DEBUG: propagating assignment from merge task 26 to constrained sql task 15 DEBUG: propagating assignment from merge task 26 to constrained sql task 18 DEBUG: propagating assignment from merge task 33 to constrained sql task 24 DEBUG: propagating assignment from merge task 40 to constrained sql task 30 DEBUG: propagating assignment from merge task 47 to constrained sql task 36 DEBUG: propagating assignment from merge task 54 to constrained sql task 42 DEBUG: propagating assignment from merge task 54 to constrained sql task 45 DEBUG: propagating assignment from merge task 61 to constrained sql task 51 DEBUG: propagating assignment from merge task 61 to constrained sql task 54 DEBUG: propagating assignment from merge task 68 to constrained sql task 60 DEBUG: CommitTransactionCommand count ------- 11998 (1 row) SET citus.large_table_shard_count TO 2; DEBUG: StartTransactionCommand DEBUG: ProcessUtility DEBUG: CommitTransactionCommand -- Dual hash repartition join which tests the separate hash repartition join -- task assignment algorithm. SELECT count(*) FROM lineitem, customer WHERE l_partkey = c_nationkey; DEBUG: StartTransactionCommand DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 DEBUG: assigned task 8 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57638 DEBUG: assigned task 12 to node localhost:57637 DEBUG: assigned task 10 to node localhost:57638 DEBUG: assigned task 16 to node localhost:57637 DEBUG: assigned task 14 to node localhost:57638 DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 6 to node localhost:57638 DEBUG: assigned task 2 to node localhost:57637 DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 2 DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 17 DEBUG: pruning merge fetch taskId 2 DETAIL: Creating dependency on merge taskId 7 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 26 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 11 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 35 DEBUG: pruning merge fetch taskId 8 DETAIL: Creating dependency on merge taskId 15 DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 44 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 19 DEBUG: assigned task 3 to node localhost:57638 DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 9 to node localhost:57638 DEBUG: assigned task 12 to node localhost:57637 DEBUG: CommitTransactionCommand count ------- 125 (1 row) -- Reset client logging level to its previous value SET client_min_messages TO NOTICE; DEBUG: StartTransactionCommand DEBUG: ProcessUtility COMMIT; citus-7.0.3/src/test/regress/expected/multi_limit_clause.out000066400000000000000000000203141317107136600243130ustar00rootroot00000000000000-- -- MULTI_LIMIT_CLAUSE -- -- Display debug messages on limit clause push down. SET client_min_messages TO DEBUG1; -- Check that we can correctly handle the Limit clause in distributed queries. -- Note that we don't have the limit optimization enabled for these queries, and -- will end up fetching all rows to the master database. SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity ASC; count_quantity | l_quantity ----------------+------------ 219 | 13.00 222 | 29.00 227 | 3.00 229 | 18.00 229 | 31.00 230 | 14.00 230 | 16.00 230 | 17.00 230 | 26.00 232 | 7.00 234 | 10.00 235 | 15.00 236 | 25.00 237 | 2.00 241 | 12.00 242 | 6.00 242 | 22.00 243 | 1.00 243 | 19.00 244 | 4.00 246 | 20.00 249 | 24.00 249 | 27.00 250 | 8.00 250 | 11.00 254 | 5.00 255 | 21.00 258 | 9.00 261 | 23.00 264 | 30.00 273 | 28.00 (31 rows) SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity DESC, l_quantity DESC; count_quantity | l_quantity ----------------+------------ 273 | 28.00 264 | 30.00 261 | 23.00 258 | 9.00 255 | 21.00 254 | 5.00 250 | 11.00 250 | 8.00 249 | 27.00 249 | 24.00 246 | 20.00 244 | 4.00 243 | 19.00 243 | 1.00 242 | 22.00 242 | 6.00 241 | 12.00 237 | 2.00 236 | 25.00 235 | 15.00 234 | 10.00 232 | 7.00 230 | 26.00 230 | 17.00 230 | 16.00 230 | 14.00 229 | 31.00 229 | 18.00 227 | 3.00 222 | 29.00 219 | 13.00 (31 rows) SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity ASC LIMIT 5; count_quantity | l_quantity ----------------+------------ 219 | 13.00 222 | 29.00 227 | 3.00 229 | 18.00 229 | 31.00 (5 rows) SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity ASC LIMIT 10; count_quantity | l_quantity ----------------+------------ 219 | 13.00 222 | 29.00 227 | 3.00 229 | 18.00 229 | 31.00 230 | 14.00 230 | 16.00 230 | 17.00 230 | 26.00 232 | 7.00 (10 rows) SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity DESC, l_quantity DESC LIMIT 10; count_quantity | l_quantity ----------------+------------ 273 | 28.00 264 | 30.00 261 | 23.00 258 | 9.00 255 | 21.00 254 | 5.00 250 | 11.00 250 | 8.00 249 | 27.00 249 | 24.00 (10 rows) -- Check that we can handle limits for simple sort clauses. We order by columns -- in the first two tests, and then by a simple expression in the last test. SELECT min(l_orderkey) FROM lineitem; min ----- 1 (1 row) SELECT l_orderkey FROM lineitem ORDER BY l_orderkey ASC LIMIT 1; DEBUG: push down of limit count: 1 l_orderkey ------------ 1 (1 row) SELECT max(l_orderkey) FROM lineitem; max ------- 14947 (1 row) SELECT l_orderkey FROM lineitem ORDER BY l_orderkey DESC LIMIT 1; DEBUG: push down of limit count: 1 l_orderkey ------------ 14947 (1 row) SELECT * FROM lineitem ORDER BY l_orderkey DESC, l_linenumber DESC LIMIT 3; DEBUG: push down of limit count: 3 l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment ------------+-----------+-----------+--------------+------------+-----------------+------------+-------+--------------+--------------+------------+--------------+---------------+---------------------------+------------+--------------------------------- 14947 | 107098 | 7099 | 2 | 29.00 | 32047.61 | 0.04 | 0.06 | N | O | 11-08-1995 | 08-30-1995 | 12-03-1995 | TAKE BACK RETURN | FOB | inal sentiments t 14947 | 31184 | 3688 | 1 | 14.00 | 15612.52 | 0.09 | 0.02 | N | O | 11-05-1995 | 09-25-1995 | 11-27-1995 | TAKE BACK RETURN | RAIL | bout the even, iro 14946 | 79479 | 4494 | 2 | 37.00 | 53963.39 | 0.01 | 0.01 | N | O | 11-27-1996 | 02-01-1997 | 11-29-1996 | COLLECT COD | AIR | sleep furiously after the furio (3 rows) SELECT max(extract(epoch from l_shipdate)) FROM lineitem; max ----------- 912124800 (1 row) SELECT * FROM lineitem ORDER BY extract(epoch from l_shipdate) DESC, l_orderkey DESC LIMIT 3; DEBUG: push down of limit count: 3 l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment ------------+-----------+-----------+--------------+------------+-----------------+------------+-------+--------------+--------------+------------+--------------+---------------+---------------------------+------------+-------------------------------------- 4678 | 57388 | 9894 | 1 | 35.00 | 47088.30 | 0.04 | 0.08 | N | O | 11-27-1998 | 10-02-1998 | 12-17-1998 | TAKE BACK RETURN | AIR | he accounts. fluffily bold sheaves b 12384 | 84161 | 1686 | 5 | 6.00 | 6870.96 | 0.04 | 0.00 | N | O | 11-26-1998 | 10-04-1998 | 12-08-1998 | COLLECT COD | RAIL | ep blithely. blithely ironic r 1124 | 92298 | 4808 | 3 | 35.00 | 45160.15 | 0.10 | 0.05 | N | O | 11-25-1998 | 10-08-1998 | 12-25-1998 | TAKE BACK RETURN | AIR | ut the slyly bold pinto beans; fi (3 rows) -- Exercise the scenario where order by clauses don't have any aggregates, and -- that we can push down the limit as a result. Check that when this happens, we -- also sort on all group by clauses behind the covers. SELECT l_quantity, l_discount, avg(l_partkey) FROM lineitem GROUP BY l_quantity, l_discount ORDER BY l_quantity LIMIT 1; DEBUG: push down of limit count: 1 l_quantity | l_discount | avg ------------+------------+-------------------- 1.00 | 0.00 | 99167.304347826087 (1 row) -- Results from the previous query should match this query's results. SELECT l_quantity, l_discount, avg(l_partkey) FROM lineitem GROUP BY l_quantity, l_discount ORDER BY l_quantity, l_discount LIMIT 1; DEBUG: push down of limit count: 1 l_quantity | l_discount | avg ------------+------------+-------------------- 1.00 | 0.00 | 99167.304347826087 (1 row) SET client_min_messages TO NOTICE; citus-7.0.3/src/test/regress/expected/multi_limit_clause_approximate.out000066400000000000000000000126461317107136600267350ustar00rootroot00000000000000-- -- MULTI_LIMIT_CLAUSE_APPROXIMATE -- -- Display debug messages on limit clause push down. SET client_min_messages TO DEBUG1; -- We first look at results with limit optimization disabled. This first query -- has a group and an order by. The order by clause is a commutative aggregate -- function. SELECT l_partkey, sum(l_partkey * (1 + l_suppkey)) AS aggregate FROM lineitem GROUP BY l_partkey ORDER BY aggregate DESC LIMIT 10; l_partkey | aggregate -----------+------------ 194541 | 3727794642 160895 | 3671463005 183486 | 3128069328 179825 | 3093889125 162432 | 2834113536 153937 | 2761321906 199283 | 2726988572 185925 | 2672114100 196629 | 2622637602 157064 | 2614644408 (10 rows) -- Enable limit optimization to fetch one third of each shard's data SET citus.limit_clause_row_fetch_count TO 600; SELECT l_partkey, sum(l_partkey * (1 + l_suppkey)) AS aggregate FROM lineitem GROUP BY l_partkey ORDER BY aggregate DESC LIMIT 10; DEBUG: push down of limit count: 600 l_partkey | aggregate -----------+------------ 194541 | 3727794642 160895 | 3671463005 183486 | 3128069328 179825 | 3093889125 162432 | 2834113536 153937 | 2761321906 199283 | 2726988572 185925 | 2672114100 196629 | 2622637602 157064 | 2614644408 (10 rows) -- Disable limit optimization for our second test. This time, we have a query -- that joins several tables, and that groups and orders the results. RESET citus.limit_clause_row_fetch_count; SELECT c_custkey, c_name, count(*) as lineitem_count FROM customer, orders, lineitem WHERE c_custkey = o_custkey AND l_orderkey = o_orderkey GROUP BY c_custkey, c_name ORDER BY lineitem_count DESC, c_custkey LIMIT 10; c_custkey | c_name | lineitem_count -----------+--------------------+---------------- 43 | Customer#000000043 | 42 370 | Customer#000000370 | 40 79 | Customer#000000079 | 38 689 | Customer#000000689 | 38 685 | Customer#000000685 | 37 472 | Customer#000000472 | 36 643 | Customer#000000643 | 34 226 | Customer#000000226 | 33 496 | Customer#000000496 | 32 304 | Customer#000000304 | 31 (10 rows) -- Now, enable limit optimization to fetch half of each task's results. For this -- test, we also change a config setting to ensure that we don't repartition any -- of the tables during the query. SET citus.limit_clause_row_fetch_count TO 150; SET citus.large_table_shard_count TO 2; SELECT c_custkey, c_name, count(*) as lineitem_count FROM customer, orders, lineitem WHERE c_custkey = o_custkey AND l_orderkey = o_orderkey GROUP BY c_custkey, c_name ORDER BY lineitem_count DESC, c_custkey LIMIT 10; DEBUG: push down of limit count: 150 c_custkey | c_name | lineitem_count -----------+--------------------+---------------- 43 | Customer#000000043 | 42 370 | Customer#000000370 | 38 79 | Customer#000000079 | 37 689 | Customer#000000689 | 36 472 | Customer#000000472 | 35 685 | Customer#000000685 | 35 643 | Customer#000000643 | 34 226 | Customer#000000226 | 33 496 | Customer#000000496 | 32 304 | Customer#000000304 | 31 (10 rows) RESET citus.large_table_shard_count; -- We now test scenarios where applying the limit optimization wouldn't produce -- meaningful results. First, we check that we don't push down the limit clause -- for non-commutative aggregates. SELECT l_partkey, avg(l_suppkey) AS average FROM lineitem GROUP BY l_partkey ORDER BY average DESC, l_partkey LIMIT 10; l_partkey | average -----------+----------------------- 9998 | 9999.0000000000000000 102466 | 9997.0000000000000000 184959 | 9996.0000000000000000 17492 | 9994.0000000000000000 124966 | 9991.0000000000000000 89989 | 9990.0000000000000000 32479 | 9989.0000000000000000 144960 | 9989.0000000000000000 147473 | 9988.0000000000000000 37481 | 9985.0000000000000000 (10 rows) -- Next, check that we don't apply the limit optimization for expressions that -- have aggregates within them SELECT l_partkey, round(sum(l_suppkey)) AS complex_expression FROM lineitem GROUP BY l_partkey ORDER BY complex_expression DESC LIMIT 10; l_partkey | complex_expression -----------+-------------------- 160895 | 22816 194541 | 19160 37018 | 19044 64284 | 18594 15283 | 18357 1927 | 18284 136884 | 18194 114073 | 18192 1051 | 18156 41555 | 18136 (10 rows) -- Check that query execution works as expected for other queries without limits SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 10.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity ASC; count_quantity | l_quantity ----------------+------------ 227 | 3.00 232 | 7.00 237 | 2.00 242 | 6.00 243 | 1.00 244 | 4.00 250 | 8.00 254 | 5.00 258 | 9.00 (9 rows) RESET citus.limit_clause_row_fetch_count; RESET client_min_messages; citus-7.0.3/src/test/regress/expected/multi_master_protocol.out000066400000000000000000000053541317107136600250640ustar00rootroot00000000000000-- -- MULTI_MASTER_PROTOCOL -- -- Tests that check the metadata returned by the master node. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 740000; SELECT part_storage_type, part_key, part_replica_count, part_max_size, part_placement_policy FROM master_get_table_metadata('lineitem'); part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy -------------------+------------+--------------------+---------------+----------------------- t | l_orderkey | 2 | 307200 | 2 (1 row) SELECT * FROM master_get_table_ddl_events('lineitem'); master_get_table_ddl_events ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- CREATE TABLE public.lineitem (l_orderkey bigint NOT NULL, l_partkey integer NOT NULL, l_suppkey integer NOT NULL, l_linenumber integer NOT NULL, l_quantity numeric(15,2) NOT NULL, l_extendedprice numeric(15,2) NOT NULL, l_discount numeric(15,2) NOT NULL, l_tax numeric(15,2) NOT NULL, l_returnflag character(1) NOT NULL, l_linestatus character(1) NOT NULL, l_shipdate date NOT NULL, l_commitdate date NOT NULL, l_receiptdate date NOT NULL, l_shipinstruct character(25) NOT NULL, l_shipmode character(10) NOT NULL, l_comment character varying(44) NOT NULL) CREATE INDEX lineitem_time_index ON public.lineitem USING btree (l_shipdate) TABLESPACE pg_default ALTER TABLE public.lineitem ADD CONSTRAINT lineitem_pkey PRIMARY KEY (l_orderkey, l_linenumber) (3 rows) SELECT * FROM master_get_new_shardid(); master_get_new_shardid ------------------------ 740000 (1 row) SELECT * FROM master_get_active_worker_nodes(); node_name | node_port -----------+----------- localhost | 57638 localhost | 57637 (2 rows) citus-7.0.3/src/test/regress/expected/multi_metadata_access.out000066400000000000000000000015051317107136600247430ustar00rootroot00000000000000-- -- MULTI_METADATA_ACCESS -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1360000; CREATE USER no_access; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SET ROLE no_access; -- list relations in the citus extension without sufficient privileges SELECT pg_class.oid::regclass FROM pg_class JOIN pg_namespace nsp ON (pg_class.relnamespace = nsp.oid) JOIN pg_depend dep ON(objid = pg_class.oid) JOIN pg_extension ext ON (ext.oid = dep.refobjid) WHERE refclassid = 'pg_extension'::regclass AND classid ='pg_class'::regclass AND ext.extname = 'citus' AND nsp.nspname = 'pg_catalog' AND NOT has_table_privilege(pg_class.oid, 'select'); oid ----- (0 rows) RESET role; DROP USER no_access; citus-7.0.3/src/test/regress/expected/multi_metadata_sync.out000066400000000000000000002102721317107136600244610ustar00rootroot00000000000000-- -- MULTI_METADATA_SYNC -- -- Tests for metadata snapshot functions, metadata syncing functions and propagation of -- metadata changes to MX tables. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000; SELECT nextval('pg_catalog.pg_dist_placement_placementid_seq') AS last_placement_id \gset ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100000; SELECT nextval('pg_catalog.pg_dist_groupid_seq') AS last_group_id \gset SELECT nextval('pg_catalog.pg_dist_node_nodeid_seq') AS last_node_id \gset -- Create the necessary test utility function CREATE FUNCTION master_metadata_snapshot() RETURNS text[] LANGUAGE C STRICT AS 'citus'; COMMENT ON FUNCTION master_metadata_snapshot() IS 'commands to create the metadata snapshot'; -- Show that none of the existing tables are qualified to be MX tables SELECT * FROM pg_dist_partition WHERE partmethod='h' AND repmodel='s'; logicalrelid | partmethod | partkey | colocationid | repmodel --------------+------------+---------+--------------+---------- (0 rows) -- Show that, with no MX tables, metadata snapshot contains only the delete commands, -- pg_dist_node entries and reference tables SELECT unnest(master_metadata_snapshot()); unnest ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition TRUNCATE pg_dist_node INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default') (3 rows) -- Create a test table with constraints and SERIAL CREATE TABLE mx_test_table (col_1 int UNIQUE, col_2 text NOT NULL, col_3 BIGSERIAL); SELECT master_create_distributed_table('mx_test_table', 'col_1', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('mx_test_table', 8, 1); master_create_worker_shards ----------------------------- (1 row) -- Set the replication model of the test table to streaming replication so that it is -- considered as an MX table UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='mx_test_table'::regclass; -- Show that the created MX table is included in the metadata snapshot SELECT unnest(master_metadata_snapshot()); unnest -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition TRUNCATE pg_dist_node INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE') ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL) ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE public.mx_test_table OWNER TO postgres INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('public.mx_test_table'::regclass, 'h', column_name_to_column('public.mx_test_table','col_1'), 0, 's') SELECT worker_create_truncate_trigger('public.mx_test_table') INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007) INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('public.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('public.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('public.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('public.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('public.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('public.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('public.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('public.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647') (12 rows) -- Show that CREATE INDEX commands are included in the metadata snapshot CREATE INDEX mx_index ON mx_test_table(col_2); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' SELECT unnest(master_metadata_snapshot()); unnest -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition TRUNCATE pg_dist_node INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default') SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE') ALTER SEQUENCE public.mx_test_table_col_3_seq OWNER TO postgres CREATE TABLE public.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('public.mx_test_table_col_3_seq'::regclass) NOT NULL) CREATE INDEX mx_index ON public.mx_test_table USING btree (col_2) TABLESPACE pg_default ALTER TABLE public.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE public.mx_test_table OWNER TO postgres INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('public.mx_test_table'::regclass, 'h', column_name_to_column('public.mx_test_table','col_1'), 0, 's') SELECT worker_create_truncate_trigger('public.mx_test_table') INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007) INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('public.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('public.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('public.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('public.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('public.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('public.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('public.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('public.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647') (13 rows) -- Show that schema changes are included in the metadata snapshot CREATE SCHEMA mx_testing_schema; ALTER TABLE mx_test_table SET SCHEMA mx_testing_schema; WARNING: not propagating ALTER ... SET SCHEMA commands to worker nodes HINT: Connect to worker nodes directly to manually change schemas of affected objects. SELECT unnest(master_metadata_snapshot()); unnest ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition TRUNCATE pg_dist_node INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default') CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE') ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL) CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) TABLESPACE pg_default ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's') SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table') INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007) INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('mx_testing_schema.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('mx_testing_schema.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('mx_testing_schema.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('mx_testing_schema.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('mx_testing_schema.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('mx_testing_schema.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('mx_testing_schema.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647') (15 rows) -- Show that append distributed tables are not included in the metadata snapshot CREATE TABLE non_mx_test_table (col_1 int, col_2 text); SELECT master_create_distributed_table('non_mx_test_table', 'col_1', 'append'); master_create_distributed_table --------------------------------- (1 row) UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='non_mx_test_table'::regclass; SELECT unnest(master_metadata_snapshot()); unnest ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition TRUNCATE pg_dist_node INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default') CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE') ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL) CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) TABLESPACE pg_default ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's') SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table') INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007) INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('mx_testing_schema.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('mx_testing_schema.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('mx_testing_schema.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('mx_testing_schema.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('mx_testing_schema.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('mx_testing_schema.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('mx_testing_schema.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647') (15 rows) -- Show that range distributed tables are not included in the metadata snapshot UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass; SELECT unnest(master_metadata_snapshot()); unnest ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition TRUNCATE pg_dist_node INSERT INTO pg_dist_node (nodeid, groupid, nodename, nodeport, noderack, hasmetadata, isactive, noderole, nodecluster) VALUES (1, 1, 'localhost', 57637, 'default', FALSE, TRUE, 'primary'::noderole, 'default'),(2, 2, 'localhost', 57638, 'default', FALSE, TRUE, 'primary'::noderole, 'default') CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres SELECT worker_apply_sequence_command ('CREATE SEQUENCE IF NOT EXISTS mx_testing_schema.mx_test_table_col_3_seq INCREMENT BY 1 MINVALUE 1 MAXVALUE 9223372036854775807 START WITH 1 NO CYCLE') ALTER SEQUENCE mx_testing_schema.mx_test_table_col_3_seq OWNER TO postgres CREATE SCHEMA IF NOT EXISTS mx_testing_schema AUTHORIZATION postgres CREATE TABLE mx_testing_schema.mx_test_table (col_1 integer, col_2 text NOT NULL, col_3 bigint DEFAULT nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) NOT NULL) CREATE INDEX mx_index ON mx_testing_schema.mx_test_table USING btree (col_2) TABLESPACE pg_default ALTER TABLE mx_testing_schema.mx_test_table ADD CONSTRAINT mx_test_table_col_1_key UNIQUE (col_1) ALTER TABLE mx_testing_schema.mx_test_table OWNER TO postgres INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey, colocationid, repmodel) VALUES ('mx_testing_schema.mx_test_table'::regclass, 'h', column_name_to_column('mx_testing_schema.mx_test_table','col_1'), 0, 's') SELECT worker_create_truncate_trigger('mx_testing_schema.mx_test_table') INSERT INTO pg_dist_placement (shardid, shardstate, shardlength, groupid, placementid) VALUES (1310000, 1, 0, 1, 100000),(1310001, 1, 0, 2, 100001),(1310002, 1, 0, 1, 100002),(1310003, 1, 0, 2, 100003),(1310004, 1, 0, 1, 100004),(1310005, 1, 0, 2, 100005),(1310006, 1, 0, 1, 100006),(1310007, 1, 0, 2, 100007) INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't', '-2147483648', '-1610612737'),('mx_testing_schema.mx_test_table'::regclass, 1310001, 't', '-1610612736', '-1073741825'),('mx_testing_schema.mx_test_table'::regclass, 1310002, 't', '-1073741824', '-536870913'),('mx_testing_schema.mx_test_table'::regclass, 1310003, 't', '-536870912', '-1'),('mx_testing_schema.mx_test_table'::regclass, 1310004, 't', '0', '536870911'),('mx_testing_schema.mx_test_table'::regclass, 1310005, 't', '536870912', '1073741823'),('mx_testing_schema.mx_test_table'::regclass, 1310006, 't', '1073741824', '1610612735'),('mx_testing_schema.mx_test_table'::regclass, 1310007, 't', '1610612736', '2147483647') (15 rows) -- Test start_metadata_sync_to_node UDF -- Ensure that hasmetadata=false for all nodes SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true; count ------- 0 (1 row) -- Ensure it works when run on a secondary node SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary'); master_add_node ---------------------------------------------------- (4,1,localhost,8888,default,f,t,secondary,default) (1 row) SELECT start_metadata_sync_to_node('localhost', 8888); start_metadata_sync_to_node ----------------------------- (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888; hasmetadata ------------- t (1 row) SELECT stop_metadata_sync_to_node('localhost', 8888); stop_metadata_sync_to_node ---------------------------- (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888; hasmetadata ------------- f (1 row) -- Add a node to another cluster to make sure it's also synced SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); master_add_secondary_node ----------------------------------------------------------- (5,1,localhost,8889,default,f,t,secondary,second-cluster) (1 row) -- Run start_metadata_sync_to_node and check that it marked hasmetadata for that worker SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ----------------------------- (1 row) SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port; nodeid | hasmetadata --------+------------- 1 | t (1 row) -- Check that the metadata has been copied to the worker \c - - - :worker_1_port SELECT * FROM pg_dist_local_group; groupid --------- 1 (1 row) SELECT * FROM pg_dist_node ORDER BY nodeid; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster --------+---------+-----------+----------+----------+-------------+----------+-----------+---------------- 1 | 1 | localhost | 57637 | default | t | t | primary | default 2 | 2 | localhost | 57638 | default | f | t | primary | default 4 | 1 | localhost | 8888 | default | f | t | secondary | default 5 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster (4 rows) SELECT * FROM pg_dist_partition ORDER BY logicalrelid; logicalrelid | partmethod | partkey | colocationid | repmodel ---------------------------------+------------+------------------------------------------------------------------------------------------------------------------------+--------------+---------- mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 0 | s (1 row) SELECT * FROM pg_dist_shard ORDER BY shardid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ---------------------------------+---------+--------------+---------------+--------------- mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825 mx_testing_schema.mx_test_table | 1310002 | t | -1073741824 | -536870913 mx_testing_schema.mx_test_table | 1310003 | t | -536870912 | -1 mx_testing_schema.mx_test_table | 1310004 | t | 0 | 536870911 mx_testing_schema.mx_test_table | 1310005 | t | 536870912 | 1073741823 mx_testing_schema.mx_test_table | 1310006 | t | 1073741824 | 1610612735 mx_testing_schema.mx_test_table | 1310007 | t | 1610612736 | 2147483647 (8 rows) SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; shardid | shardstate | shardlength | nodename | nodeport | placementid ---------+------------+-------------+-----------+----------+------------- 1310000 | 1 | 0 | localhost | 57637 | 100000 1310001 | 1 | 0 | localhost | 57638 | 100001 1310002 | 1 | 0 | localhost | 57637 | 100002 1310003 | 1 | 0 | localhost | 57638 | 100003 1310004 | 1 | 0 | localhost | 57637 | 100004 1310005 | 1 | 0 | localhost | 57638 | 100005 1310006 | 1 | 0 | localhost | 57637 | 100006 1310007 | 1 | 0 | localhost | 57638 | 100007 (8 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass; Column | Type | Modifiers --------+---------+--------------------------------------------------------------------------------- col_1 | integer | col_2 | text | not null col_3 | bigint | not null default nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) (3 rows) \d mx_testing_schema.mx_test_table_col_1_key Index "mx_testing_schema.mx_test_table_col_1_key" Column | Type | Definition --------+---------+------------ col_1 | integer | col_1 unique, btree, for table "mx_testing_schema.mx_test_table" \d mx_testing_schema.mx_index Index "mx_testing_schema.mx_index" Column | Type | Definition --------+------+------------ col_2 | text | col_2 btree, for table "mx_testing_schema.mx_test_table" -- Check that pg_dist_colocation is not synced SELECT * FROM pg_dist_colocation ORDER BY colocationid; colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ (0 rows) -- Make sure that truncate trigger has been set for the MX table on worker SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass; count ------- 1 (1 row) -- Make sure that start_metadata_sync_to_node considers foreign key constraints \c - - - :master_port -- Since we're superuser, we can set the replication model to 'streaming' to -- create some MX tables SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE SCHEMA mx_testing_schema_2; CREATE TABLE mx_testing_schema.fk_test_1 (col1 int, col2 text, col3 int, UNIQUE(col1, col3)); CREATE TABLE mx_testing_schema_2.fk_test_2 (col1 int, col2 int, col3 text, FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1 (col1, col3)); SELECT create_distributed_table('mx_testing_schema.fk_test_1', 'col1'); create_distributed_table -------------------------- (1 row) SELECT create_distributed_table('mx_testing_schema_2.fk_test_2', 'col1'); create_distributed_table -------------------------- (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ----------------------------- (1 row) -- Check that foreign key metadata exists on the worker \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_testing_schema_2.fk_test_2'::regclass; Constraint | Definition ---------------------+----------------------------------------------------------------------------- fk_test_2_col1_fkey | FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1(col1, col3) (1 row) \c - - - :master_port DROP TABLE mx_testing_schema_2.fk_test_2; DROP TABLE mx_testing_schema.fk_test_1; RESET citus.shard_replication_factor; RESET citus.replication_model; -- Check that repeated calls to start_metadata_sync_to_node has no side effects \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ----------------------------- (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ----------------------------- (1 row) \c - - - :worker_1_port SELECT * FROM pg_dist_local_group; groupid --------- 1 (1 row) SELECT * FROM pg_dist_node ORDER BY nodeid; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster --------+---------+-----------+----------+----------+-------------+----------+-----------+---------------- 1 | 1 | localhost | 57637 | default | t | t | primary | default 2 | 2 | localhost | 57638 | default | f | t | primary | default 4 | 1 | localhost | 8888 | default | f | t | secondary | default 5 | 1 | localhost | 8889 | default | f | t | secondary | second-cluster (4 rows) SELECT * FROM pg_dist_partition ORDER BY logicalrelid; logicalrelid | partmethod | partkey | colocationid | repmodel ---------------------------------+------------+------------------------------------------------------------------------------------------------------------------------+--------------+---------- mx_testing_schema.mx_test_table | h | {VAR :varno 1 :varattno 1 :vartype 23 :vartypmod -1 :varcollid 0 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} | 0 | s (1 row) SELECT * FROM pg_dist_shard ORDER BY shardid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue ---------------------------------+---------+--------------+---------------+--------------- mx_testing_schema.mx_test_table | 1310000 | t | -2147483648 | -1610612737 mx_testing_schema.mx_test_table | 1310001 | t | -1610612736 | -1073741825 mx_testing_schema.mx_test_table | 1310002 | t | -1073741824 | -536870913 mx_testing_schema.mx_test_table | 1310003 | t | -536870912 | -1 mx_testing_schema.mx_test_table | 1310004 | t | 0 | 536870911 mx_testing_schema.mx_test_table | 1310005 | t | 536870912 | 1073741823 mx_testing_schema.mx_test_table | 1310006 | t | 1073741824 | 1610612735 mx_testing_schema.mx_test_table | 1310007 | t | 1610612736 | 2147483647 (8 rows) SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; shardid | shardstate | shardlength | nodename | nodeport | placementid ---------+------------+-------------+-----------+----------+------------- 1310000 | 1 | 0 | localhost | 57637 | 100000 1310001 | 1 | 0 | localhost | 57638 | 100001 1310002 | 1 | 0 | localhost | 57637 | 100002 1310003 | 1 | 0 | localhost | 57638 | 100003 1310004 | 1 | 0 | localhost | 57637 | 100004 1310005 | 1 | 0 | localhost | 57638 | 100005 1310006 | 1 | 0 | localhost | 57637 | 100006 1310007 | 1 | 0 | localhost | 57638 | 100007 (8 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass; Column | Type | Modifiers --------+---------+--------------------------------------------------------------------------------- col_1 | integer | col_2 | text | not null col_3 | bigint | not null default nextval('mx_testing_schema.mx_test_table_col_3_seq'::regclass) (3 rows) \d mx_testing_schema.mx_test_table_col_1_key Index "mx_testing_schema.mx_test_table_col_1_key" Column | Type | Definition --------+---------+------------ col_1 | integer | col_1 unique, btree, for table "mx_testing_schema.mx_test_table" \d mx_testing_schema.mx_index Index "mx_testing_schema.mx_index" Column | Type | Definition --------+------+------------ col_2 | text | col_2 btree, for table "mx_testing_schema.mx_test_table" SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass; count ------- 1 (1 row) -- Make sure that start_metadata_sync_to_node cannot be called inside a transaction \c - - - :master_port BEGIN; SELECT start_metadata_sync_to_node('localhost', :worker_2_port); ERROR: start_metadata_sync_to_node cannot run inside a transaction block ROLLBACK; SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; hasmetadata ------------- f (1 row) -- Check that the distributed table can be queried from the worker \c - - - :master_port SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ----------------------------- (1 row) CREATE TABLE mx_query_test (a int, b text, c int); SELECT create_distributed_table('mx_query_test', 'a'); create_distributed_table -------------------------- (1 row) SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_query_test'::regclass; repmodel ---------- s (1 row) INSERT INTO mx_query_test VALUES (1, 'one', 1); INSERT INTO mx_query_test VALUES (2, 'two', 4); INSERT INTO mx_query_test VALUES (3, 'three', 9); INSERT INTO mx_query_test VALUES (4, 'four', 16); INSERT INTO mx_query_test VALUES (5, 'five', 24); \c - - - :worker_1_port SELECT * FROM mx_query_test ORDER BY a; a | b | c ---+-------+---- 1 | one | 1 2 | two | 4 3 | three | 9 4 | four | 16 5 | five | 24 (5 rows) INSERT INTO mx_query_test VALUES (6, 'six', 36); UPDATE mx_query_test SET c = 25 WHERE a = 5; \c - - - :master_port SELECT * FROM mx_query_test ORDER BY a; a | b | c ---+-------+---- 1 | one | 1 2 | two | 4 3 | three | 9 4 | four | 16 5 | five | 25 6 | six | 36 (6 rows) \c - - - :master_port DROP TABLE mx_query_test; -- Check that stop_metadata_sync_to_node function sets hasmetadata of the node to false \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ----------------------------- (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port; hasmetadata ------------- t (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); stop_metadata_sync_to_node ---------------------------- (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port; hasmetadata ------------- f (1 row) -- Test DDL propagation in MX tables SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ----------------------------- (1 row) SET citus.shard_count = 5; SET citus.multi_shard_commit_protocol TO '2pc'; CREATE SCHEMA mx_test_schema_1; CREATE SCHEMA mx_test_schema_2; -- Create MX tables SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE TABLE mx_test_schema_1.mx_table_1 (col1 int UNIQUE, col2 text); CREATE INDEX mx_index_1 ON mx_test_schema_1.mx_table_1 (col1); CREATE TABLE mx_test_schema_2.mx_table_2 (col1 int, col2 text); CREATE INDEX mx_index_2 ON mx_test_schema_2.mx_table_2 (col2); ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY(col1) REFERENCES mx_test_schema_1.mx_table_1(col1); SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass; Column | Type | Modifiers --------+---------+----------- col1 | integer | col2 | text | (2 rows) \d mx_test_schema_1.mx_table_1_col1_key Index "mx_test_schema_1.mx_table_1_col1_key" Column | Type | Definition --------+---------+------------ col1 | integer | col1 unique, btree, for table "mx_test_schema_1.mx_table_1" \d mx_test_schema_1.mx_index_1 Index "mx_test_schema_1.mx_index_1" Column | Type | Definition --------+---------+------------ col1 | integer | col1 btree, for table "mx_test_schema_1.mx_table_1" SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_2.mx_table_2'::regclass; Column | Type | Modifiers --------+---------+----------- col1 | integer | col2 | text | (2 rows) \d mx_test_schema_2.mx_index_2 Index "mx_test_schema_2.mx_index_2" Column | Type | Definition --------+------+------------ col2 | text | col2 btree, for table "mx_test_schema_2.mx_table_2" SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_2.mx_table_2'::regclass; Constraint | Definition ------------------+----------------------------------------------------------------- mx_fk_constraint | FOREIGN KEY (col1) REFERENCES mx_test_schema_1.mx_table_1(col1) (1 row) SELECT create_distributed_table('mx_test_schema_1.mx_table_1', 'col1'); create_distributed_table -------------------------- (1 row) SELECT create_distributed_table('mx_test_schema_2.mx_table_2', 'col1'); create_distributed_table -------------------------- (1 row) -- Check that created tables are marked as streaming replicated tables SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass ORDER BY logicalrelid; logicalrelid | repmodel -----------------------------+---------- mx_test_schema_1.mx_table_1 | s mx_test_schema_2.mx_table_2 | s (2 rows) -- See the shards and placements of the mx tables SELECT logicalrelid, shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass ORDER BY logicalrelid, shardid; logicalrelid | shardid | nodename | nodeport -----------------------------+---------+-----------+---------- mx_test_schema_1.mx_table_1 | 1310104 | localhost | 57637 mx_test_schema_1.mx_table_1 | 1310105 | localhost | 57638 mx_test_schema_1.mx_table_1 | 1310106 | localhost | 57637 mx_test_schema_1.mx_table_1 | 1310107 | localhost | 57638 mx_test_schema_1.mx_table_1 | 1310108 | localhost | 57637 mx_test_schema_2.mx_table_2 | 1310109 | localhost | 57637 mx_test_schema_2.mx_table_2 | 1310110 | localhost | 57638 mx_test_schema_2.mx_table_2 | 1310111 | localhost | 57637 mx_test_schema_2.mx_table_2 | 1310112 | localhost | 57638 mx_test_schema_2.mx_table_2 | 1310113 | localhost | 57637 (10 rows) -- Check that metadata of MX tables exist on the metadata worker \c - - - :worker_1_port -- Check that tables are created \dt mx_test_schema_?.mx_table_? List of relations Schema | Name | Type | Owner ------------------+------------+-------+---------- mx_test_schema_1 | mx_table_1 | table | postgres mx_test_schema_2 | mx_table_2 | table | postgres (2 rows) -- Check that table metadata are created SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass; logicalrelid | repmodel -----------------------------+---------- mx_test_schema_1.mx_table_1 | s mx_test_schema_2.mx_table_2 | s (2 rows) -- Check that shard and placement data are created SELECT logicalrelid, shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass ORDER BY logicalrelid, shardid; logicalrelid | shardid | nodename | nodeport -----------------------------+---------+-----------+---------- mx_test_schema_1.mx_table_1 | 1310104 | localhost | 57637 mx_test_schema_1.mx_table_1 | 1310105 | localhost | 57638 mx_test_schema_1.mx_table_1 | 1310106 | localhost | 57637 mx_test_schema_1.mx_table_1 | 1310107 | localhost | 57638 mx_test_schema_1.mx_table_1 | 1310108 | localhost | 57637 mx_test_schema_2.mx_table_2 | 1310109 | localhost | 57637 mx_test_schema_2.mx_table_2 | 1310110 | localhost | 57638 mx_test_schema_2.mx_table_2 | 1310111 | localhost | 57637 mx_test_schema_2.mx_table_2 | 1310112 | localhost | 57638 mx_test_schema_2.mx_table_2 | 1310113 | localhost | 57637 (10 rows) -- Check that metadata of MX tables don't exist on the non-metadata worker \c - - - :worker_2_port \d mx_test_schema_1.mx_table_1 \d mx_test_schema_2.mx_table_2 SELECT * FROM pg_dist_partition; logicalrelid | partmethod | partkey | colocationid | repmodel --------------+------------+---------+--------------+---------- (0 rows) SELECT * FROM pg_dist_shard; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue --------------+---------+--------------+---------------+--------------- (0 rows) SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; shardid | shardstate | shardlength | nodename | nodeport | placementid ---------+------------+-------------+----------+----------+------------- (0 rows) -- Check that CREATE INDEX statement is propagated \c - - - :master_port SET citus.multi_shard_commit_protocol TO '2pc'; SET client_min_messages TO 'ERROR'; CREATE INDEX mx_index_3 ON mx_test_schema_2.mx_table_2 USING hash (col1); ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_table_2_col1_key UNIQUE (col1); \c - - - :worker_1_port \d mx_test_schema_2.mx_index_3 Index "mx_test_schema_2.mx_index_3" Column | Type | Definition --------+---------+------------ col1 | integer | col1 hash, for table "mx_test_schema_2.mx_table_2" \d mx_test_schema_2.mx_table_2_col1_key Index "mx_test_schema_2.mx_table_2_col1_key" Column | Type | Definition --------+---------+------------ col1 | integer | col1 unique, btree, for table "mx_test_schema_2.mx_table_2" -- Check that DROP INDEX statement is propagated \c - - - :master_port SET citus.multi_shard_commit_protocol TO '2pc'; DROP INDEX mx_test_schema_2.mx_index_3; \c - - - :worker_1_port \d mx_test_schema_2.mx_index_3 -- Check that ALTER TABLE statements are propagated \c - - - :master_port SET citus.multi_shard_commit_protocol TO '2pc'; ALTER TABLE mx_test_schema_1.mx_table_1 ADD COLUMN col3 NUMERIC; ALTER TABLE mx_test_schema_1.mx_table_1 ALTER COLUMN col3 SET DATA TYPE INT; ALTER TABLE mx_test_schema_1.mx_table_1 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1); \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass; Column | Type | Modifiers --------+---------+----------- col1 | integer | col2 | text | col3 | integer | (3 rows) SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass; Constraint | Definition ------------------+----------------------------------------------------------------- mx_fk_constraint | FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1) (1 row) -- Check that foreign key constraint with NOT VALID works as well \c - - - :master_port SET citus.multi_shard_commit_protocol TO '2pc'; ALTER TABLE mx_test_schema_1.mx_table_1 DROP CONSTRAINT mx_fk_constraint; ALTER TABLE mx_test_schema_1.mx_table_1 ADD CONSTRAINT mx_fk_constraint_2 FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1) NOT VALID; \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass; Constraint | Definition --------------------+----------------------------------------------------------------- mx_fk_constraint_2 | FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1) (1 row) -- Check that mark_tables_colocated call propagates the changes to the workers \c - - - :master_port SELECT nextval('pg_catalog.pg_dist_colocationid_seq') AS last_colocation_id \gset ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 10000; SET citus.shard_count TO 7; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE TABLE mx_colocation_test_1 (a int); SELECT create_distributed_table('mx_colocation_test_1', 'a'); create_distributed_table -------------------------- (1 row) CREATE TABLE mx_colocation_test_2 (a int); SELECT create_distributed_table('mx_colocation_test_2', 'a'); create_distributed_table -------------------------- (1 row) -- Check the colocation IDs of the created tables SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid = 'mx_colocation_test_1'::regclass OR logicalrelid = 'mx_colocation_test_2'::regclass ORDER BY logicalrelid; logicalrelid | colocationid ----------------------+-------------- mx_colocation_test_1 | 10000 mx_colocation_test_2 | 10000 (2 rows) -- Reset the colocation IDs of the test tables DELETE FROM pg_dist_colocation WHERE EXISTS ( SELECT 1 FROM pg_dist_partition WHERE colocationid = pg_dist_partition.colocationid AND pg_dist_partition.logicalrelid = 'mx_colocation_test_1'::regclass); UPDATE pg_dist_partition SET colocationid = 0 WHERE logicalrelid = 'mx_colocation_test_1'::regclass OR logicalrelid = 'mx_colocation_test_2'::regclass; -- Mark tables colocated and see the changes on the master and the worker SELECT mark_tables_colocated('mx_colocation_test_1', ARRAY['mx_colocation_test_2']); mark_tables_colocated ----------------------- (1 row) SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid = 'mx_colocation_test_1'::regclass OR logicalrelid = 'mx_colocation_test_2'::regclass; logicalrelid | colocationid ----------------------+-------------- mx_colocation_test_1 | 10001 mx_colocation_test_2 | 10001 (2 rows) \c - - - :worker_1_port SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid = 'mx_colocation_test_1'::regclass OR logicalrelid = 'mx_colocation_test_2'::regclass; logicalrelid | colocationid ----------------------+-------------- mx_colocation_test_1 | 10001 mx_colocation_test_2 | 10001 (2 rows) \c - - - :master_port -- Check that DROP TABLE on MX tables works DROP TABLE mx_colocation_test_1; DROP TABLE mx_colocation_test_2; \d mx_colocation_test_1 \d mx_colocation_test_2 \c - - - :worker_1_port \d mx_colocation_test_1 \d mx_colocation_test_2 -- Check that dropped MX table can be recreated again \c - - - :master_port SET citus.shard_count TO 7; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE TABLE mx_temp_drop_test (a int); SELECT create_distributed_table('mx_temp_drop_test', 'a'); create_distributed_table -------------------------- (1 row) SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_temp_drop_test'::regclass; logicalrelid | repmodel -------------------+---------- mx_temp_drop_test | s (1 row) DROP TABLE mx_temp_drop_test; CREATE TABLE mx_temp_drop_test (a int); SELECT create_distributed_table('mx_temp_drop_test', 'a'); create_distributed_table -------------------------- (1 row) SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_temp_drop_test'::regclass; logicalrelid | repmodel -------------------+---------- mx_temp_drop_test | s (1 row) DROP TABLE mx_temp_drop_test; -- Check that MX tables can be created with SERIAL columns, but error out on metadata sync \c - - - :master_port SET citus.shard_count TO 3; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); stop_metadata_sync_to_node ---------------------------- (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); stop_metadata_sync_to_node ---------------------------- (1 row) CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL); SELECT create_distributed_table('mx_table_with_small_sequence', 'a'); create_distributed_table -------------------------- (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); ERROR: cannot create an mx table with a serial or smallserial column DETAIL: Only bigserial is supported in mx tables. DROP TABLE mx_table_with_small_sequence; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ----------------------------- (1 row) -- Show that create_distributed_table errors out if the table has a SERIAL column and -- there are metadata workers CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL); SELECT create_distributed_table('mx_table_with_small_sequence', 'a'); ERROR: cannot create an mx table with a serial or smallserial column DETAIL: Only bigserial is supported in mx tables. DROP TABLE mx_table_with_small_sequence; -- Create an MX table with (BIGSERIAL) sequences CREATE TABLE mx_table_with_sequence(a int, b BIGSERIAL, c BIGSERIAL); SELECT create_distributed_table('mx_table_with_sequence', 'a'); create_distributed_table -------------------------- (1 row) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; Column | Type | Modifiers --------+---------+-------------------------------------------------------------------- a | integer | b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass) c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass) (3 rows) \ds mx_table_with_sequence_b_seq List of relations Schema | Name | Type | Owner --------+------------------------------+----------+---------- public | mx_table_with_sequence_b_seq | sequence | postgres (1 row) \ds mx_table_with_sequence_c_seq List of relations Schema | Name | Type | Owner --------+------------------------------+----------+---------- public | mx_table_with_sequence_c_seq | sequence | postgres (1 row) -- Check that the sequences created on the metadata worker as well \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; Column | Type | Modifiers --------+---------+-------------------------------------------------------------------- a | integer | b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass) c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass) (3 rows) \ds mx_table_with_sequence_b_seq List of relations Schema | Name | Type | Owner --------+------------------------------+----------+---------- public | mx_table_with_sequence_b_seq | sequence | postgres (1 row) \ds mx_table_with_sequence_c_seq List of relations Schema | Name | Type | Owner --------+------------------------------+----------+---------- public | mx_table_with_sequence_c_seq | sequence | postgres (1 row) -- Check that the sequences on the worker have their own space SELECT nextval('mx_table_with_sequence_b_seq'); nextval ----------------- 281474976710657 (1 row) SELECT nextval('mx_table_with_sequence_c_seq'); nextval ----------------- 281474976710657 (1 row) -- Check that adding a new metadata node sets the sequence space correctly \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_2_port); start_metadata_sync_to_node ----------------------------- (1 row) \c - - - :worker_2_port SELECT groupid FROM pg_dist_local_group; groupid --------- 2 (1 row) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; Column | Type | Modifiers --------+---------+-------------------------------------------------------------------- a | integer | b | bigint | not null default nextval('mx_table_with_sequence_b_seq'::regclass) c | bigint | not null default nextval('mx_table_with_sequence_c_seq'::regclass) (3 rows) \ds mx_table_with_sequence_b_seq List of relations Schema | Name | Type | Owner --------+------------------------------+----------+---------- public | mx_table_with_sequence_b_seq | sequence | postgres (1 row) \ds mx_table_with_sequence_c_seq List of relations Schema | Name | Type | Owner --------+------------------------------+----------+---------- public | mx_table_with_sequence_c_seq | sequence | postgres (1 row) SELECT nextval('mx_table_with_sequence_b_seq'); nextval ----------------- 562949953421313 (1 row) SELECT nextval('mx_table_with_sequence_c_seq'); nextval ----------------- 562949953421313 (1 row) -- Check that dropping the mx table with sequences works as expected, even the metadata -- syncing is stopped to one of the workers \c - - - :master_port SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); stop_metadata_sync_to_node ---------------------------- (1 row) DROP TABLE mx_table_with_sequence; \d mx_table_with_sequence \ds mx_table_with_sequence_b_seq List of relations Schema | Name | Type | Owner --------+------+------+------- (0 rows) \ds mx_table_with_sequence_c_seq List of relations Schema | Name | Type | Owner --------+------+------+------- (0 rows) -- Check that the sequences are dropped from the workers \c - - - :worker_1_port \d mx_table_with_sequence \ds mx_table_with_sequence_b_seq List of relations Schema | Name | Type | Owner --------+------+------+------- (0 rows) \ds mx_table_with_sequence_c_seq List of relations Schema | Name | Type | Owner --------+------+------+------- (0 rows) -- Check that the sequences are dropped from the workers \c - - - :worker_2_port \ds mx_table_with_sequence_b_seq List of relations Schema | Name | Type | Owner --------+------+------+------- (0 rows) \ds mx_table_with_sequence_c_seq List of relations Schema | Name | Type | Owner --------+------+------+------- (0 rows) -- Check that MX sequences play well with non-super users \c - - - :master_port -- Remove a node so that shards and sequences won't be created on table creation. Therefore, -- we can test that start_metadata_sync_to_node can actually create the sequence with proper -- owner CREATE TABLE pg_dist_placement_temp AS SELECT * FROM pg_dist_placement; CREATE TABLE pg_dist_partition_temp AS SELECT * FROM pg_dist_partition; DELETE FROM pg_dist_placement; DELETE FROM pg_dist_partition; SELECT groupid AS old_worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) -- the master user needs superuser permissions to change the replication model CREATE USER mx_user WITH SUPERUSER; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. \c - - - :worker_1_port CREATE USER mx_user; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. \c - - - :worker_2_port CREATE USER mx_user; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. \c - mx_user - :master_port -- Create an mx table as a different user CREATE TABLE mx_table (a int, b BIGSERIAL); SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT create_distributed_table('mx_table', 'a'); create_distributed_table -------------------------- (1 row) \c - postgres - :master_port SELECT master_add_node('localhost', :worker_2_port); master_add_node --------------------------------------------------- (6,4,localhost,57638,default,f,t,primary,default) (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_port); start_metadata_sync_to_node ----------------------------- (1 row) \c - mx_user - :worker_1_port SELECT nextval('mx_table_b_seq'); nextval ----------------- 281474976710657 (1 row) INSERT INTO mx_table (a) VALUES (37); INSERT INTO mx_table (a) VALUES (38); SELECT * FROM mx_table ORDER BY a; a | b ----+----------------- 37 | 281474976710658 38 | 281474976710659 (2 rows) \c - mx_user - :worker_2_port SELECT nextval('mx_table_b_seq'); nextval ------------------ 1125899906842625 (1 row) INSERT INTO mx_table (a) VALUES (39); INSERT INTO mx_table (a) VALUES (40); SELECT * FROM mx_table ORDER BY a; a | b ----+------------------ 37 | 281474976710658 38 | 281474976710659 39 | 1125899906842626 40 | 1125899906842627 (4 rows) \c - mx_user - :master_port DROP TABLE mx_table; -- put the metadata back into a consistent state \c - postgres - :master_port INSERT INTO pg_dist_placement SELECT * FROM pg_dist_placement_temp; INSERT INTO pg_dist_partition SELECT * FROM pg_dist_partition_temp; DROP TABLE pg_dist_placement_temp; DROP TABLE pg_dist_partition_temp; UPDATE pg_dist_placement SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) WHERE groupid = :old_worker_2_group; \c - - - :worker_1_port UPDATE pg_dist_placement SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) WHERE groupid = :old_worker_2_group; \c - - - :worker_2_port UPDATE pg_dist_placement SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) WHERE groupid = :old_worker_2_group; \c - - - :master_port SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); stop_metadata_sync_to_node ---------------------------- (1 row) DROP USER mx_user; \c - - - :worker_1_port DROP USER mx_user; \c - - - :worker_2_port DROP USER mx_user; -- Check that create_reference_table creates the metadata on workers \c - - - :master_port CREATE TABLE mx_ref (col_1 int, col_2 text); SELECT create_reference_table('mx_ref'); create_reference_table ------------------------ (1 row) \dt mx_ref List of relations Schema | Name | Type | Owner --------+--------+-------+---------- public | mx_ref | table | postgres (1 row) \c - - - :worker_1_port \dt mx_ref List of relations Schema | Name | Type | Owner --------+--------+-------+---------- public | mx_ref | table | postgres (1 row) SELECT logicalrelid, partmethod, repmodel, shardid, placementid, nodename, nodeport FROM pg_dist_partition NATURAL JOIN pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'mx_ref'::regclass ORDER BY nodeport; logicalrelid | partmethod | repmodel | shardid | placementid | nodename | nodeport --------------+------------+----------+---------+-------------+-----------+---------- mx_ref | n | t | 1310183 | 100183 | localhost | 57637 mx_ref | n | t | 1310183 | 100184 | localhost | 57638 (2 rows) SELECT shardid AS ref_table_shardid FROM pg_dist_shard WHERE logicalrelid='mx_ref'::regclass \gset -- Check that DDL commands are propagated to reference tables on workers \c - - - :master_port ALTER TABLE mx_ref ADD COLUMN col_3 NUMERIC DEFAULT 0; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' CREATE INDEX mx_ref_index ON mx_ref(col_1); SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass; Column | Type | Modifiers --------+---------+----------- col_1 | integer | col_2 | text | col_3 | numeric | default 0 (3 rows) \d mx_ref_index Index "public.mx_ref_index" Column | Type | Definition --------+---------+------------ col_1 | integer | col_1 btree, for table "public.mx_ref" \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass; Column | Type | Modifiers --------+---------+----------- col_1 | integer | col_2 | text | col_3 | numeric | default 0 (3 rows) \d mx_ref_index Index "public.mx_ref_index" Column | Type | Definition --------+---------+------------ col_1 | integer | col_1 btree, for table "public.mx_ref" -- Check that metada is cleaned successfully upon drop table \c - - - :master_port DROP TABLE mx_ref; \d mx_ref \c - - - :worker_1_port \d mx_ref SELECT * FROM pg_dist_shard WHERE shardid=:ref_table_shardid; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue --------------+---------+--------------+---------------+--------------- (0 rows) SELECT * FROM pg_dist_shard_placement WHERE shardid=:ref_table_shardid; shardid | shardstate | shardlength | nodename | nodeport | placementid ---------+------------+-------------+----------+----------+------------- (0 rows) -- Check that master_add_node propagates the metadata about new placements of a reference table \c - - - :master_port SELECT groupid AS old_worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset CREATE TABLE tmp_placement AS SELECT * FROM pg_dist_placement WHERE groupid = :old_worker_2_group; DELETE FROM pg_dist_placement WHERE groupid = :old_worker_2_group; SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) CREATE TABLE mx_ref (col_1 int, col_2 text); SELECT create_reference_table('mx_ref'); create_reference_table ------------------------ (1 row) SELECT shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass; shardid | nodename | nodeport ---------+-----------+---------- 1310184 | localhost | 57637 (1 row) \c - - - :worker_1_port SELECT shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass; shardid | nodename | nodeport ---------+-----------+---------- 1310184 | localhost | 57637 (1 row) \c - - - :master_port SELECT master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "mx_ref" to the node localhost:57638 master_add_node --------------------------------------------------- (7,5,localhost,57638,default,f,t,primary,default) (1 row) SELECT shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass ORDER BY shardid, nodeport; shardid | nodename | nodeport ---------+-----------+---------- 1310184 | localhost | 57637 1310184 | localhost | 57638 (2 rows) \c - - - :worker_1_port SELECT shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass ORDER BY shardid, nodeport; shardid | nodename | nodeport ---------+-----------+---------- 1310184 | localhost | 57637 1310184 | localhost | 57638 (2 rows) -- Get the metadata back into a consistent state \c - - - :master_port INSERT INTO pg_dist_placement (SELECT * FROM tmp_placement); DROP TABLE tmp_placement; UPDATE pg_dist_placement SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) WHERE groupid = :old_worker_2_group; \c - - - :worker_1_port UPDATE pg_dist_placement SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) WHERE groupid = :old_worker_2_group; -- Cleanup \c - - - :master_port DROP TABLE mx_test_schema_2.mx_table_2 CASCADE; NOTICE: drop cascades to constraint mx_fk_constraint_2 on table mx_test_schema_1.mx_table_1 DROP TABLE mx_test_schema_1.mx_table_1 CASCADE; DROP TABLE mx_testing_schema.mx_test_table; DROP TABLE mx_ref; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); stop_metadata_sync_to_node ---------------------------- (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); stop_metadata_sync_to_node ---------------------------- (1 row) RESET citus.shard_count; RESET citus.shard_replication_factor; RESET citus.replication_model; RESET citus.multi_shard_commit_protocol; ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART :last_group_id; ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART :last_node_id; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; citus-7.0.3/src/test/regress/expected/multi_modifications.out000066400000000000000000001342071317107136600245000ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 750000; -- =================================================================== -- test end-to-end modification functionality -- =================================================================== CREATE TYPE order_side AS ENUM ('buy', 'sell'); CREATE TABLE limit_orders ( id bigint PRIMARY KEY, symbol text NOT NULL, bidder_id bigint NOT NULL, placed_at timestamp NOT NULL, kind order_side NOT NULL, limit_price decimal NOT NULL DEFAULT 0.00 CHECK (limit_price >= 0.00) ); CREATE TABLE multiple_hash ( category text NOT NULL, data text NOT NULL ); CREATE TABLE insufficient_shards ( LIKE limit_orders ); CREATE TABLE range_partitioned ( LIKE limit_orders ); CREATE TABLE append_partitioned ( LIKE limit_orders ); SELECT master_create_distributed_table('limit_orders', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_distributed_table('multiple_hash', 'category', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_distributed_table('insufficient_shards', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_distributed_table('range_partitioned', 'id', 'range'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_distributed_table('append_partitioned', 'id', 'append'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('limit_orders', 2, 2); master_create_worker_shards ----------------------------- (1 row) SELECT master_create_worker_shards('multiple_hash', 2, 2); master_create_worker_shards ----------------------------- (1 row) -- make a single shard that covers no partition values SELECT master_create_worker_shards('insufficient_shards', 1, 1); master_create_worker_shards ----------------------------- (1 row) UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 0 WHERE logicalrelid = 'insufficient_shards'::regclass; -- create range-partitioned shards SELECT master_create_empty_shard('range_partitioned') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 49999 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('range_partitioned') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 50000, shardmaxvalue = 99999 WHERE shardid = :new_shard_id; -- create append-partitioned shards SELECT master_create_empty_shard('append_partitioned') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 500000 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('append_partitioned') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 500000, shardmaxvalue = 1000000 WHERE shardid = :new_shard_id; -- basic single-row INSERT INSERT INTO limit_orders VALUES (32743, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); SELECT COUNT(*) FROM limit_orders WHERE id = 32743; count ------- 1 (1 row) -- basic single-row INSERT with RETURNING INSERT INTO limit_orders VALUES (32744, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69) RETURNING *; id | symbol | bidder_id | placed_at | kind | limit_price -------+--------+-----------+--------------------------+------+------------- 32744 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 (1 row) -- try a single-row INSERT with no shard to receive it INSERT INTO insufficient_shards VALUES (32743, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); ERROR: cannot find shard interval DETAIL: Hash of the partition column value does not fall into any shards. -- try an insert to a range-partitioned table INSERT INTO range_partitioned VALUES (32743, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); -- also insert to an append-partitioned table INSERT INTO append_partitioned VALUES (414123, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); -- ensure the values are where we put them and query to ensure they are properly pruned SET client_min_messages TO 'DEBUG2'; SET citus.task_executor_type TO 'real-time'; SELECT * FROM range_partitioned WHERE id = 32743; DEBUG: Creating router plan DEBUG: Plan is router executable id | symbol | bidder_id | placed_at | kind | limit_price -------+--------+-----------+--------------------------+------+------------- 32743 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 (1 row) SELECT * FROM append_partitioned WHERE id = 414123; DEBUG: Plan is router executable id | symbol | bidder_id | placed_at | kind | limit_price --------+--------+-----------+--------------------------+------+------------- 414123 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 (1 row) SET client_min_messages TO DEFAULT; SET citus.task_executor_type TO DEFAULT; -- try inserting without a range-partitioned shard to receive the value INSERT INTO range_partitioned VALUES (999999, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); ERROR: cannot run INSERT command which targets no shards HINT: Make sure you have created a shard which can receive this partition column value. -- and insert into an append-partitioned table with a value that spans shards: INSERT INTO append_partitioned VALUES (500000, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); ERROR: cannot run INSERT command which targets multiple shards HINT: Make sure the value for partition column "id" falls into a single shard. -- INSERT with DEFAULT in the target list INSERT INTO limit_orders VALUES (12756, 'MSFT', 10959, '2013-05-08 07:29:23', 'sell', DEFAULT); SELECT COUNT(*) FROM limit_orders WHERE id = 12756; count ------- 1 (1 row) -- INSERT with expressions in target list INSERT INTO limit_orders VALUES (430, upper('ibm'), 214, timestamp '2003-01-28 10:31:17' + interval '5 hours', 'buy', sqrt(2)); SELECT COUNT(*) FROM limit_orders WHERE id = 430; count ------- 1 (1 row) -- INSERT without partition key INSERT INTO limit_orders DEFAULT VALUES; ERROR: cannot perform an INSERT without a partition column value -- squelch WARNINGs that contain worker_port SET client_min_messages TO ERROR; -- INSERT violating NOT NULL constraint INSERT INTO limit_orders VALUES (NULL, 'T', 975234, DEFAULT); ERROR: cannot perform an INSERT with NULL in the partition column -- INSERT violating column constraint INSERT INTO limit_orders VALUES (18811, 'BUD', 14962, '2014-04-05 08:32:16', 'sell', -5.00); ERROR: new row for relation "limit_orders_750000" violates check constraint "limit_orders_limit_price_check" DETAIL: Failing row contains (18811, BUD, 14962, 2014-04-05 08:32:16, sell, -5.00). CONTEXT: while executing command on localhost:57637 -- INSERT violating primary key constraint INSERT INTO limit_orders VALUES (32743, 'LUV', 5994, '2001-04-16 03:37:28', 'buy', 0.58); ERROR: duplicate key value violates unique constraint "limit_orders_pkey_750001" DETAIL: Key (id)=(32743) already exists. CONTEXT: while executing command on localhost:57638 -- INSERT violating primary key constraint, with RETURNING specified. INSERT INTO limit_orders VALUES (32743, 'LUV', 5994, '2001-04-16 03:37:28', 'buy', 0.58) RETURNING *; ERROR: duplicate key value violates unique constraint "limit_orders_pkey_750001" DETAIL: Key (id)=(32743) already exists. CONTEXT: while executing command on localhost:57638 -- INSERT, with RETURNING specified, failing with a non-constraint error INSERT INTO limit_orders VALUES (34153, 'LEE', 5994, '2001-04-16 03:37:28', 'buy', 0.58) RETURNING id / 0; ERROR: could not modify any active placements SET client_min_messages TO DEFAULT; -- commands with non-constant partition values are supported INSERT INTO limit_orders VALUES (random() * 100, 'ORCL', 152, '2011-08-25 11:50:45', 'sell', 0.58); -- values for other columns are totally fine INSERT INTO limit_orders VALUES (2036, 'GOOG', 5634, now(), 'buy', random()); -- commands with mutable functions in their quals DELETE FROM limit_orders WHERE id = 246 AND bidder_id = (random() * 1000); ERROR: functions used in the WHERE clause of modification queries on distributed tables must not be VOLATILE -- commands with mutable but non-volatile functions(ie: stable func.) in their quals -- (the cast to timestamp is because the timestamp_eq_timestamptz operator is stable) DELETE FROM limit_orders WHERE id = 246 AND placed_at = current_timestamp::timestamp; -- multi-row inserts are supported INSERT INTO limit_orders VALUES (12037, 'GOOG', 5634, '2001-04-16 03:37:28', 'buy', 0.50), (12038, 'GOOG', 5634, '2001-04-17 03:37:28', 'buy', 2.50), (12039, 'GOOG', 5634, '2001-04-18 03:37:28', 'buy', 1.50); SELECT COUNT(*) FROM limit_orders WHERE id BETWEEN 12037 AND 12039; count ------- 3 (1 row) -- even those with functions and returning INSERT INTO limit_orders VALUES (22037, 'GOOG', 5634, now(), 'buy', 0.50), (22038, 'GOOG', 5634, now(), 'buy', 2.50), (22039, 'GOOG', 5634, now(), 'buy', 1.50) RETURNING id; id ------- 22038 22039 22037 (3 rows) SELECT COUNT(*) FROM limit_orders WHERE id BETWEEN 22037 AND 22039; count ------- 3 (1 row) -- even those with functions in their partition columns INSERT INTO limit_orders VALUES (random() * 10 + 70000, 'GOOG', 5634, now(), 'buy', 0.50), (random() * 10 + 80000, 'GOOG', 5634, now(), 'buy', 2.50), (random() * 10 + 80090, 'GOOG', 5634, now(), 'buy', 1.50); SELECT COUNT(*) FROM limit_orders WHERE id BETWEEN 70000 AND 90000; count ------- 3 (1 row) -- Who says that? :) -- INSERT ... SELECT ... FROM commands are unsupported -- INSERT INTO limit_orders SELECT * FROM limit_orders; -- commands containing a CTE are unsupported WITH deleted_orders AS (DELETE FROM limit_orders RETURNING *) INSERT INTO limit_orders DEFAULT VALUES; ERROR: common table expressions are not supported in distributed modifications -- test simple DELETE INSERT INTO limit_orders VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); SELECT COUNT(*) FROM limit_orders WHERE id = 246; count ------- 1 (1 row) DELETE FROM limit_orders WHERE id = 246; SELECT COUNT(*) FROM limit_orders WHERE id = 246; count ------- 0 (1 row) -- test simple DELETE with RETURNING DELETE FROM limit_orders WHERE id = 430 RETURNING *; id | symbol | bidder_id | placed_at | kind | limit_price -----+--------+-----------+--------------------------+------+----------------- 430 | IBM | 214 | Tue Jan 28 15:31:17 2003 | buy | 1.4142135623731 (1 row) SELECT COUNT(*) FROM limit_orders WHERE id = 430; count ------- 0 (1 row) -- DELETE with expression in WHERE clause INSERT INTO limit_orders VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); SELECT COUNT(*) FROM limit_orders WHERE id = 246; count ------- 1 (1 row) DELETE FROM limit_orders WHERE id = (2 * 123); SELECT COUNT(*) FROM limit_orders WHERE id = 246; count ------- 0 (1 row) -- commands with no constraints on the partition key are not supported DELETE FROM limit_orders WHERE bidder_id = 162; ERROR: cannot run DELETE command which targets multiple shards HINT: Consider using an equality filter on partition column "id" to target a single shard. If you'd like to run a multi-shard operation, use master_modify_multiple_shards(). -- commands with a USING clause are unsupported CREATE TABLE bidders ( name text, id bigint ); DELETE FROM limit_orders USING bidders WHERE limit_orders.id = 246 AND limit_orders.bidder_id = bidders.id AND bidders.name = 'Bernie Madoff'; ERROR: cannot plan queries which include both local and distributed relations -- commands containing a CTE are unsupported WITH deleted_orders AS (INSERT INTO limit_orders DEFAULT VALUES RETURNING *) DELETE FROM limit_orders; ERROR: common table expressions are not supported in distributed modifications -- cursors are not supported DELETE FROM limit_orders WHERE CURRENT OF cursor_name; ERROR: cannot run DELETE command which targets multiple shards HINT: Consider using an equality filter on partition column "id" to target a single shard. If you'd like to run a multi-shard operation, use master_modify_multiple_shards(). INSERT INTO limit_orders VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); -- simple UPDATE UPDATE limit_orders SET symbol = 'GM' WHERE id = 246; SELECT symbol FROM limit_orders WHERE id = 246; symbol -------- GM (1 row) -- simple UPDATE with RETURNING UPDATE limit_orders SET symbol = 'GM' WHERE id = 246 RETURNING *; id | symbol | bidder_id | placed_at | kind | limit_price -----+--------+-----------+--------------------------+------+------------- 246 | GM | 162 | Mon Jul 02 16:32:15 2007 | sell | 20.69 (1 row) -- expression UPDATE UPDATE limit_orders SET bidder_id = 6 * 3 WHERE id = 246; SELECT bidder_id FROM limit_orders WHERE id = 246; bidder_id ----------- 18 (1 row) -- expression UPDATE with RETURNING UPDATE limit_orders SET bidder_id = 6 * 5 WHERE id = 246 RETURNING *; id | symbol | bidder_id | placed_at | kind | limit_price -----+--------+-----------+--------------------------+------+------------- 246 | GM | 30 | Mon Jul 02 16:32:15 2007 | sell | 20.69 (1 row) -- multi-column UPDATE UPDATE limit_orders SET (kind, limit_price) = ('buy', DEFAULT) WHERE id = 246; SELECT kind, limit_price FROM limit_orders WHERE id = 246; kind | limit_price ------+------------- buy | 0.00 (1 row) -- multi-column UPDATE with RETURNING UPDATE limit_orders SET (kind, limit_price) = ('buy', 999) WHERE id = 246 RETURNING *; id | symbol | bidder_id | placed_at | kind | limit_price -----+--------+-----------+--------------------------+------+------------- 246 | GM | 30 | Mon Jul 02 16:32:15 2007 | buy | 999 (1 row) -- Test that on unique contraint violations, we fail fast INSERT INTO limit_orders VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67); INSERT INTO limit_orders VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67); ERROR: duplicate key value violates unique constraint "limit_orders_pkey_750001" DETAIL: Key (id)=(275) already exists. CONTEXT: while executing command on localhost:57638 -- Test that shards which miss a modification are marked unhealthy -- First: Connect to the second worker node \c - - - :worker_2_port -- Second: Move aside limit_orders shard on the second worker node ALTER TABLE limit_orders_750000 RENAME TO renamed_orders; -- Third: Connect back to master node \c - - - :master_port -- Fourth: Perform an INSERT on the remaining node INSERT INTO limit_orders VALUES (276, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67); WARNING: relation "public.limit_orders_750000" does not exist CONTEXT: while executing command on localhost:57638 -- Last: Verify the insert worked but the deleted placement is now unhealthy SELECT count(*) FROM limit_orders WHERE id = 276; count ------- 1 (1 row) SELECT count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND sp.nodename = 'localhost' AND sp.nodeport = :worker_2_port AND sp.shardstate = 3 AND s.logicalrelid = 'limit_orders'::regclass; count ------- 1 (1 row) -- Test that if all shards miss a modification, no state change occurs -- First: Connect to the first worker node \c - - - :worker_1_port -- Second: Move aside limit_orders shard on the second worker node ALTER TABLE limit_orders_750000 RENAME TO renamed_orders; -- Third: Connect back to master node \c - - - :master_port -- Fourth: Perform an INSERT on the remaining node INSERT INTO limit_orders VALUES (276, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67); WARNING: relation "public.limit_orders_750000" does not exist CONTEXT: while executing command on localhost:57637 ERROR: could not modify any active placements -- Last: Verify worker is still healthy SELECT count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND sp.nodename = 'localhost' AND sp.nodeport = :worker_1_port AND sp.shardstate = 1 AND s.logicalrelid = 'limit_orders'::regclass; count ------- 2 (1 row) -- Undo our change... -- First: Connect to the first worker node \c - - - :worker_1_port -- Second: Move aside limit_orders shard on the second worker node ALTER TABLE renamed_orders RENAME TO limit_orders_750000; -- Third: Connect back to master node \c - - - :master_port -- commands with no constraints on the partition key are not supported UPDATE limit_orders SET limit_price = 0.00; ERROR: cannot run UPDATE command which targets multiple shards HINT: Consider using an equality filter on partition column "id" to target a single shard. If you'd like to run a multi-shard operation, use master_modify_multiple_shards(). -- attempting to change the partition key is unsupported UPDATE limit_orders SET id = 0 WHERE id = 246; ERROR: modifying the partition value of rows is not allowed UPDATE limit_orders SET id = 0 WHERE id = 0 OR id = 246; ERROR: modifying the partition value of rows is not allowed -- setting the partition column value to itself is allowed UPDATE limit_orders SET id = 246 WHERE id = 246; UPDATE limit_orders SET id = 246 WHERE id = 246 AND symbol = 'GM'; UPDATE limit_orders SET id = limit_orders.id WHERE id = 246; -- UPDATEs with a FROM clause are unsupported UPDATE limit_orders SET limit_price = 0.00 FROM bidders WHERE limit_orders.id = 246 AND limit_orders.bidder_id = bidders.id AND bidders.name = 'Bernie Madoff'; ERROR: cannot plan queries which include both local and distributed relations -- commands containing a CTE are unsupported WITH deleted_orders AS (INSERT INTO limit_orders DEFAULT VALUES RETURNING *) UPDATE limit_orders SET symbol = 'GM'; ERROR: common table expressions are not supported in distributed modifications SELECT symbol, bidder_id FROM limit_orders WHERE id = 246; symbol | bidder_id --------+----------- GM | 30 (1 row) -- updates referencing just a var are supported UPDATE limit_orders SET bidder_id = id WHERE id = 246; -- updates referencing a column are supported UPDATE limit_orders SET bidder_id = bidder_id + 1 WHERE id = 246; -- IMMUTABLE functions are allowed UPDATE limit_orders SET symbol = LOWER(symbol) WHERE id = 246; SELECT symbol, bidder_id FROM limit_orders WHERE id = 246; symbol | bidder_id --------+----------- gm | 247 (1 row) -- IMMUTABLE functions are allowed -- even in returning UPDATE limit_orders SET symbol = UPPER(symbol) WHERE id = 246 RETURNING id, LOWER(symbol), symbol; id | lower | symbol -----+-------+-------- 246 | gm | GM (1 row) ALTER TABLE limit_orders ADD COLUMN array_of_values integer[]; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' -- updates referencing STABLE functions are allowed UPDATE limit_orders SET placed_at = LEAST(placed_at, now()::timestamp) WHERE id = 246; -- so are binary operators UPDATE limit_orders SET array_of_values = 1 || array_of_values WHERE id = 246; CREATE FUNCTION immutable_append(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; \c - - - :worker_1_port CREATE FUNCTION immutable_append(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; \c - - - :worker_2_port CREATE FUNCTION immutable_append(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; \c - - - :master_port -- immutable function calls with vars are also allowed UPDATE limit_orders SET array_of_values = immutable_append(array_of_values, 2) WHERE id = 246; CREATE FUNCTION stable_append(old_values int[], new_value int) RETURNS int[] AS $$ BEGIN RETURN old_values || new_value; END; $$ LANGUAGE plpgsql STABLE; -- but STABLE function calls with vars are not allowed UPDATE limit_orders SET array_of_values = stable_append(array_of_values, 3) WHERE id = 246; ERROR: STABLE functions used in UPDATE queries cannot be called with column references SELECT array_of_values FROM limit_orders WHERE id = 246; array_of_values ----------------- {1,2} (1 row) -- STRICT functions work as expected CREATE FUNCTION temp_strict_func(integer,integer) RETURNS integer AS 'SELECT COALESCE($1, 2) + COALESCE($1, 3);' LANGUAGE SQL STABLE STRICT; UPDATE limit_orders SET bidder_id = temp_strict_func(1, null) WHERE id = 246; ERROR: null value in column "bidder_id" violates not-null constraint DETAIL: Failing row contains (246, GM, null, 2007-07-02 16:32:15, buy, 999, {1,2}). CONTEXT: while executing command on localhost:57637 SELECT array_of_values FROM limit_orders WHERE id = 246; array_of_values ----------------- {1,2} (1 row) ALTER TABLE limit_orders DROP array_of_values; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' -- even in RETURNING UPDATE limit_orders SET placed_at = placed_at WHERE id = 246 RETURNING NOW(); ERROR: non-IMMUTABLE functions are not allowed in the RETURNING clause -- cursors are not supported UPDATE limit_orders SET symbol = 'GM' WHERE CURRENT OF cursor_name; ERROR: cannot run UPDATE command which targets multiple shards HINT: Consider using an equality filter on partition column "id" to target a single shard. If you'd like to run a multi-shard operation, use master_modify_multiple_shards(). -- check that multi-row UPDATE/DELETEs with RETURNING work INSERT INTO multiple_hash VALUES ('0', '1'); INSERT INTO multiple_hash VALUES ('0', '2'); INSERT INTO multiple_hash VALUES ('0', '3'); INSERT INTO multiple_hash VALUES ('0', '4'); INSERT INTO multiple_hash VALUES ('0', '5'); INSERT INTO multiple_hash VALUES ('0', '6'); UPDATE multiple_hash SET data = data ||'-1' WHERE category = '0' RETURNING *; category | data ----------+------ 0 | 1-1 0 | 2-1 0 | 3-1 0 | 4-1 0 | 5-1 0 | 6-1 (6 rows) DELETE FROM multiple_hash WHERE category = '0' RETURNING *; category | data ----------+------ 0 | 1-1 0 | 2-1 0 | 3-1 0 | 4-1 0 | 5-1 0 | 6-1 (6 rows) -- ensure returned row counters are correct \set QUIET off INSERT INTO multiple_hash VALUES ('1', '1'); INSERT 0 1 INSERT INTO multiple_hash VALUES ('1', '2'); INSERT 0 1 INSERT INTO multiple_hash VALUES ('1', '3'); INSERT 0 1 INSERT INTO multiple_hash VALUES ('2', '1'); INSERT 0 1 INSERT INTO multiple_hash VALUES ('2', '2'); INSERT 0 1 INSERT INTO multiple_hash VALUES ('2', '3'); INSERT 0 1 INSERT INTO multiple_hash VALUES ('2', '3') RETURNING *; category | data ----------+------ 2 | 3 (1 row) INSERT 0 1 -- check that update return the right number of rows -- one row UPDATE multiple_hash SET data = data ||'-1' WHERE category = '1' AND data = '1'; UPDATE 1 -- three rows UPDATE multiple_hash SET data = data ||'-2' WHERE category = '1'; UPDATE 3 -- three rows, with RETURNING UPDATE multiple_hash SET data = data ||'-2' WHERE category = '1' RETURNING category; category ---------- 1 1 1 (3 rows) UPDATE 3 -- check SELECT * FROM multiple_hash WHERE category = '1' ORDER BY category, data; category | data ----------+--------- 1 | 1-1-2-2 1 | 2-2-2 1 | 3-2-2 (3 rows) -- check that deletes return the right number of rows -- one row DELETE FROM multiple_hash WHERE category = '2' AND data = '1'; DELETE 1 -- two rows DELETE FROM multiple_hash WHERE category = '2'; DELETE 3 -- three rows, with RETURNING DELETE FROM multiple_hash WHERE category = '1' RETURNING category; category ---------- 1 1 1 (3 rows) DELETE 3 -- check SELECT * FROM multiple_hash WHERE category = '1' ORDER BY category, data; category | data ----------+------ (0 rows) SELECT * FROM multiple_hash WHERE category = '2' ORDER BY category, data; category | data ----------+------ (0 rows) -- verify interaction of default values, SERIAL, and RETURNING \set QUIET on CREATE TABLE app_analytics_events (id serial, app_id integer, name text); SELECT master_create_distributed_table('app_analytics_events', 'app_id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('app_analytics_events', 4, 1); master_create_worker_shards ----------------------------- (1 row) INSERT INTO app_analytics_events VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id; id ---- 1 (1 row) INSERT INTO app_analytics_events (app_id, name) VALUES (102, 'Wayz') RETURNING id; id ---- 2 (1 row) INSERT INTO app_analytics_events (app_id, name) VALUES (103, 'Mynt') RETURNING *; id | app_id | name ----+--------+------ 3 | 103 | Mynt (1 row) DROP TABLE app_analytics_events; -- again with serial in the partition column CREATE TABLE app_analytics_events (id serial, app_id integer, name text); SELECT create_distributed_table('app_analytics_events', 'id'); create_distributed_table -------------------------- (1 row) INSERT INTO app_analytics_events VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id; id ---- 1 (1 row) INSERT INTO app_analytics_events (app_id, name) VALUES (102, 'Wayz') RETURNING id; id ---- 2 (1 row) INSERT INTO app_analytics_events (app_id, name) VALUES (103, 'Mynt') RETURNING *; id | app_id | name ----+--------+------ 3 | 103 | Mynt (1 row) -- Test multi-row insert with serial in the partition column INSERT INTO app_analytics_events (app_id, name) VALUES (104, 'Wayz'), (105, 'Mynt') RETURNING *; id | app_id | name ----+--------+------ 5 | 105 | Mynt 4 | 104 | Wayz (2 rows) INSERT INTO app_analytics_events (id, name) VALUES (DEFAULT, 'Foo'), (300, 'Wah') RETURNING *; id | app_id | name -----+--------+------ 6 | | Foo 300 | | Wah (2 rows) PREPARE prep(varchar) AS INSERT INTO app_analytics_events (id, name) VALUES (DEFAULT, $1 || '.1'), (400 , $1 || '.2') RETURNING *; EXECUTE prep('version-1'); id | app_id | name -----+--------+------------- 7 | | version-1.1 400 | | version-1.2 (2 rows) EXECUTE prep('version-2'); id | app_id | name -----+--------+------------- 8 | | version-2.1 400 | | version-2.2 (2 rows) EXECUTE prep('version-3'); id | app_id | name -----+--------+------------- 400 | | version-3.2 9 | | version-3.1 (2 rows) EXECUTE prep('version-4'); id | app_id | name -----+--------+------------- 10 | | version-4.1 400 | | version-4.2 (2 rows) EXECUTE prep('version-5'); id | app_id | name -----+--------+------------- 400 | | version-5.2 11 | | version-5.1 (2 rows) EXECUTE prep('version-6'); id | app_id | name -----+--------+------------- 400 | | version-6.2 12 | | version-6.1 (2 rows) SELECT * FROM app_analytics_events ORDER BY id, name; id | app_id | name -----+--------+----------------- 1 | 101 | Fauxkemon Geaux 2 | 102 | Wayz 3 | 103 | Mynt 4 | 104 | Wayz 5 | 105 | Mynt 6 | | Foo 7 | | version-1.1 8 | | version-2.1 9 | | version-3.1 10 | | version-4.1 11 | | version-5.1 12 | | version-6.1 300 | | Wah 400 | | version-1.2 400 | | version-2.2 400 | | version-3.2 400 | | version-4.2 400 | | version-5.2 400 | | version-6.2 (19 rows) TRUNCATE app_analytics_events; -- Test multi-row insert with a dropped column ALTER TABLE app_analytics_events DROP COLUMN app_id; INSERT INTO app_analytics_events (name) VALUES ('Wayz'), ('Mynt') RETURNING *; id | name ----+------ 14 | Mynt 13 | Wayz (2 rows) SELECT * FROM app_analytics_events ORDER BY id; id | name ----+------ 13 | Wayz 14 | Mynt (2 rows) DROP TABLE app_analytics_events; -- Test multi-row insert with a dropped column before the partition column CREATE TABLE app_analytics_events (id int default 3, app_id integer, name text); SELECT create_distributed_table('app_analytics_events', 'name'); create_distributed_table -------------------------- (1 row) ALTER TABLE app_analytics_events DROP COLUMN app_id; INSERT INTO app_analytics_events (name) VALUES ('Wayz'), ('Mynt') RETURNING *; id | name ----+------ 3 | Mynt 3 | Wayz (2 rows) SELECT * FROM app_analytics_events WHERE name = 'Wayz'; id | name ----+------ 3 | Wayz (1 row) DROP TABLE app_analytics_events; -- Test multi-row insert with serial in a reference table CREATE TABLE app_analytics_events (id serial, app_id integer, name text); SELECT create_reference_table('app_analytics_events'); create_reference_table ------------------------ (1 row) INSERT INTO app_analytics_events (app_id, name) VALUES (104, 'Wayz'), (105, 'Mynt') RETURNING *; id | app_id | name ----+--------+------ 1 | 104 | Wayz 2 | 105 | Mynt (2 rows) SELECT * FROM app_analytics_events ORDER BY id; id | app_id | name ----+--------+------ 1 | 104 | Wayz 2 | 105 | Mynt (2 rows) DROP TABLE app_analytics_events; -- Test multi-row insert with serial in a non-partition column CREATE TABLE app_analytics_events (id int, app_id serial, name text); SELECT create_distributed_table('app_analytics_events', 'id'); create_distributed_table -------------------------- (1 row) INSERT INTO app_analytics_events (id, name) VALUES (99, 'Wayz'), (98, 'Mynt') RETURNING name, app_id; name | app_id ------+-------- Mynt | 2 Wayz | 1 (2 rows) SELECT * FROM app_analytics_events ORDER BY id; id | app_id | name ----+--------+------ 98 | 2 | Mynt 99 | 1 | Wayz (2 rows) DROP TABLE app_analytics_events; -- test UPDATE with subqueries CREATE TABLE raw_table (id bigint, value bigint); CREATE TABLE summary_table ( id bigint, min_value numeric, average_value numeric, count int, uniques int); SELECT create_distributed_table('raw_table', 'id'); create_distributed_table -------------------------- (1 row) SELECT create_distributed_table('summary_table', 'id'); create_distributed_table -------------------------- (1 row) INSERT INTO raw_table VALUES (1, 100); INSERT INTO raw_table VALUES (1, 200); INSERT INTO raw_table VALUES (1, 200); INSERT INTO raw_table VALUES (1, 300); INSERT INTO raw_table VALUES (2, 400); INSERT INTO raw_table VALUES (2, 500); INSERT INTO summary_table VALUES (1); INSERT INTO summary_table VALUES (2); SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+---------------+-------+--------- 1 | | | | 2 | | | | (2 rows) UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table WHERE id = 1 ) average_query WHERE id = 1; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+----------------------+-------+--------- 1 | | 200.0000000000000000 | | 2 | | | | (2 rows) -- try different syntax UPDATE summary_table SET (min_value, average_value) = (SELECT min(value), avg(value) FROM raw_table WHERE id = 2) WHERE id = 2; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+----------------------+-------+--------- 1 | | 200.0000000000000000 | | 2 | 400 | 450.0000000000000000 | | (2 rows) UPDATE summary_table SET min_value = 100 WHERE id IN (SELECT id FROM raw_table WHERE id = 1 and value > 100) AND id = 1; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+----------------------+-------+--------- 1 | 100 | 200.0000000000000000 | | 2 | 400 | 450.0000000000000000 | | (2 rows) -- indeed, we don't need filter on UPDATE explicitly if SELECT already prunes to one shard UPDATE summary_table SET uniques = 2 WHERE id IN (SELECT id FROM raw_table WHERE id = 1 and value IN (100, 200)); SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+----------------------+-------+--------- 1 | 100 | 200.0000000000000000 | | 2 2 | 400 | 450.0000000000000000 | | (2 rows) -- use inner results for non-partition column UPDATE summary_table SET uniques = NULL WHERE min_value IN (SELECT value FROM raw_table WHERE id = 1) AND id = 1; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+----------------------+-------+--------- 1 | 100 | 200.0000000000000000 | | 2 | 400 | 450.0000000000000000 | | (2 rows) -- these should not update anything UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table WHERE id = 1 AND id = 4 ) average_query WHERE id = 1 AND id = 4; UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table WHERE id = 1 ) average_query WHERE id = 1 AND id = 4; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+----------------------+-------+--------- 1 | 100 | 200.0000000000000000 | | 2 | 400 | 450.0000000000000000 | | (2 rows) -- update with NULL value UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table WHERE id = 1 AND id = 4 ) average_query WHERE id = 1; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+----------------------+-------+--------- 1 | 100 | | | 2 | 400 | 450.0000000000000000 | | (2 rows) -- unsupported multi-shard updates UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table) average_query; ERROR: cannot run UPDATE command which targets multiple shards HINT: Consider using an equality filter on partition column "id" to target a single shard. If you'd like to run a multi-shard operation, use master_modify_multiple_shards(). UPDATE summary_table SET average_value = average_value + 1 WHERE id = (SELECT id FROM raw_table WHERE value > 100); ERROR: cannot run UPDATE command which targets multiple shards HINT: Consider using an equality filter on partition column "id" to target a single shard. If you'd like to run a multi-shard operation, use master_modify_multiple_shards(). -- test complex queries UPDATE summary_table SET uniques = metrics.expensive_uniques, count = metrics.total_count FROM (SELECT id, count(DISTINCT (CASE WHEN value > 100 then value end)) AS expensive_uniques, count(value) AS total_count FROM raw_table WHERE id = 1 GROUP BY id) metrics WHERE summary_table.id = metrics.id AND summary_table.id = 1; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+----------------------+-------+--------- 1 | 100 | | 4 | 2 2 | 400 | 450.0000000000000000 | | (2 rows) -- test joins UPDATE summary_table SET count = count + 1 FROM raw_table WHERE raw_table.id = summary_table.id AND summary_table.id = 1; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+----------------------+-------+--------- 1 | 100 | | 5 | 2 2 | 400 | 450.0000000000000000 | | (2 rows) -- test with prepared statements PREPARE prepared_update_with_subquery(int, int) AS UPDATE summary_table SET count = count + $1 FROM raw_table WHERE raw_table.id = summary_table.id AND summary_table.id = $2; -- execute 6 times to trigger prepared statement usage EXECUTE prepared_update_with_subquery(10, 1); EXECUTE prepared_update_with_subquery(10, 1); EXECUTE prepared_update_with_subquery(10, 1); EXECUTE prepared_update_with_subquery(10, 1); EXECUTE prepared_update_with_subquery(10, 1); EXECUTE prepared_update_with_subquery(10, 1); SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+----------------------+-------+--------- 1 | 100 | | 65 | 2 2 | 400 | 450.0000000000000000 | | (2 rows) -- test with reference tables CREATE TABLE reference_raw_table (id bigint, value bigint); CREATE TABLE reference_summary_table ( id bigint, min_value numeric, average_value numeric, count int, uniques int); SELECT create_reference_table('reference_raw_table'); create_reference_table ------------------------ (1 row) SELECT create_reference_table('reference_summary_table'); create_reference_table ------------------------ (1 row) INSERT INTO reference_raw_table VALUES (1, 100); INSERT INTO reference_raw_table VALUES (1, 200); INSERT INTO reference_raw_table VALUES (1, 200); INSERT INTO reference_raw_table VALUES (1,300), (2, 400), (2,500) RETURNING *; id | value ----+------- 1 | 300 2 | 400 2 | 500 (3 rows) INSERT INTO reference_summary_table VALUES (1); INSERT INTO reference_summary_table VALUES (2); SELECT * FROM reference_summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+---------------+-------+--------- 1 | | | | 2 | | | | (2 rows) UPDATE reference_summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM reference_raw_table WHERE id = 1 ) average_query WHERE id = 1; UPDATE reference_summary_table SET (min_value, average_value) = (SELECT min(value), avg(value) FROM reference_raw_table WHERE id = 2) WHERE id = 2; SELECT * FROM reference_summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+----------------------+-------+--------- 1 | | 200.0000000000000000 | | 2 | 400 | 450.0000000000000000 | | (2 rows) -- no need partition colum equalities on reference tables UPDATE reference_summary_table SET (count) = (SELECT id AS inner_id FROM reference_raw_table WHERE value = 500) WHERE min_value = 400; SELECT * FROM reference_summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+----------------------+-------+--------- 1 | | 200.0000000000000000 | | 2 | 400 | 450.0000000000000000 | 2 | (2 rows) -- can read from a reference table and update a distributed table UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM reference_raw_table WHERE id = 1 ) average_query WHERE id = 1; -- cannot read from a distributed table and update a reference table UPDATE reference_summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table WHERE id = 1 ) average_query WHERE id = 1; ERROR: cannot perform select on a distributed table and modify a reference table UPDATE reference_summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table WHERE id = 1 AND id = 2 ) average_query WHERE id = 1; ERROR: cannot perform select on a distributed table and modify a reference table -- test master_modify_multiple_shards() with subqueries and expect to fail SELECT master_modify_multiple_shards(' UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table WHERE id = 1 ) average_query WHERE id = 1'); ERROR: cannot perform distributed planning for the given modifications DETAIL: Subqueries are not supported in distributed modifications. -- test connection API via using COPY -- COPY on SELECT part BEGIN; \COPY raw_table FROM STDIN WITH CSV INSERT INTO summary_table VALUES (3); UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table WHERE id = 3 ) average_query WHERE id = 3; COMMIT; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+----------------------+-------+--------- 1 | 100 | 200.0000000000000000 | 65 | 2 2 | 400 | 450.0000000000000000 | | 3 | | 150.0000000000000000 | | (3 rows) -- COPY on UPDATE part BEGIN; INSERT INTO raw_table VALUES (4, 100); INSERT INTO raw_table VALUES (4, 200); \COPY summary_table FROM STDIN WITH CSV UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table WHERE id = 4 ) average_query WHERE id = 4; COMMIT; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+----------------------+-------+--------- 1 | 100 | 200.0000000000000000 | 65 | 2 2 | 400 | 450.0000000000000000 | | 3 | | 150.0000000000000000 | | 4 | | 150.0000000000000000 | | (4 rows) -- COPY on both part BEGIN; \COPY raw_table FROM STDIN WITH CSV \COPY summary_table FROM STDIN WITH CSV UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table WHERE id = 5 ) average_query WHERE id = 5; COMMIT; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+----------------------+-------+--------- 1 | 100 | 200.0000000000000000 | 65 | 2 2 | 400 | 450.0000000000000000 | | 3 | | 150.0000000000000000 | | 4 | | 150.0000000000000000 | | 5 | | 150.0000000000000000 | | (5 rows) -- COPY on reference tables BEGIN; \COPY reference_raw_table FROM STDIN WITH CSV \COPY summary_table FROM STDIN WITH CSV UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM reference_raw_table WHERE id = 6 ) average_query WHERE id = 6; COMMIT; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+----------------------+-------+--------- 1 | 100 | 200.0000000000000000 | 65 | 2 2 | 400 | 450.0000000000000000 | | 3 | | 150.0000000000000000 | | 4 | | 150.0000000000000000 | | 5 | | 150.0000000000000000 | | 6 | | 150.0000000000000000 | | (6 rows) -- test DELETE queries SELECT * FROM raw_table ORDER BY id, value; id | value ----+------- 1 | 100 1 | 200 1 | 200 1 | 300 2 | 400 2 | 500 3 | 100 3 | 200 4 | 100 4 | 200 5 | 100 5 | 200 (12 rows) DELETE FROM summary_table WHERE min_value IN (SELECT value FROM raw_table WHERE id = 1) AND id = 1; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+----------------------+-------+--------- 2 | 400 | 450.0000000000000000 | | 3 | | 150.0000000000000000 | | 4 | | 150.0000000000000000 | | 5 | | 150.0000000000000000 | | 6 | | 150.0000000000000000 | | (5 rows) -- test with different syntax DELETE FROM summary_table USING raw_table WHERE summary_table.id = raw_table.id AND raw_table.id = 2; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+----------------------+-------+--------- 3 | | 150.0000000000000000 | | 4 | | 150.0000000000000000 | | 5 | | 150.0000000000000000 | | 6 | | 150.0000000000000000 | | (4 rows) -- cannot read from a distributed table and delete from a reference table DELETE FROM reference_summary_table USING raw_table WHERE reference_summary_table.id = raw_table.id AND raw_table.id = 3; ERROR: cannot perform select on a distributed table and modify a reference table SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+----------------------+-------+--------- 3 | | 150.0000000000000000 | | 4 | | 150.0000000000000000 | | 5 | | 150.0000000000000000 | | 6 | | 150.0000000000000000 | | (4 rows) -- test connection API via using COPY with DELETEs BEGIN; \COPY summary_table FROM STDIN WITH CSV DELETE FROM summary_table USING raw_table WHERE summary_table.id = raw_table.id AND raw_table.id = 1; DELETE FROM summary_table USING reference_raw_table WHERE summary_table.id = reference_raw_table.id AND reference_raw_table.id = 2; COMMIT; SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+----------------------+-------+--------- 3 | | 150.0000000000000000 | | 4 | | 150.0000000000000000 | | 5 | | 150.0000000000000000 | | 6 | | 150.0000000000000000 | | (4 rows) -- test DELETEs with prepared statements PREPARE prepared_delete_with_join(int) AS DELETE FROM summary_table USING raw_table WHERE summary_table.id = raw_table.id AND raw_table.id = $1; INSERT INTO raw_table VALUES (6, 100); -- execute 6 times to trigger prepared statement usage EXECUTE prepared_delete_with_join(1); EXECUTE prepared_delete_with_join(2); EXECUTE prepared_delete_with_join(3); EXECUTE prepared_delete_with_join(4); EXECUTE prepared_delete_with_join(5); EXECUTE prepared_delete_with_join(6); SELECT * FROM summary_table ORDER BY id; id | min_value | average_value | count | uniques ----+-----------+---------------+-------+--------- (0 rows) DROP TABLE raw_table; DROP TABLE summary_table; DROP TABLE reference_raw_table; DROP TABLE reference_summary_table; citus-7.0.3/src/test/regress/expected/multi_modifying_xacts.out000066400000000000000000001322541317107136600250370ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1200000; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 1200000; -- =================================================================== -- test end-to-end modification functionality -- =================================================================== CREATE TABLE researchers ( id bigint NOT NULL, lab_id int NOT NULL, name text NOT NULL ); CREATE TABLE labs ( id bigint NOT NULL, name text NOT NULL ); SELECT master_create_distributed_table('researchers', 'lab_id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('researchers', 2, 2); master_create_worker_shards ----------------------------- (1 row) SELECT master_create_distributed_table('labs', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('labs', 1, 1); master_create_worker_shards ----------------------------- (1 row) -- might be confusing to have two people in the same lab with the same name CREATE UNIQUE INDEX avoid_name_confusion_idx ON researchers (lab_id, name); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' -- add some data INSERT INTO researchers VALUES (1, 1, 'Donald Knuth'); INSERT INTO researchers VALUES (2, 1, 'Niklaus Wirth'); INSERT INTO researchers VALUES (3, 2, 'Tony Hoare'); INSERT INTO researchers VALUES (4, 2, 'Kenneth Iverson'); -- replace a researcher, reusing their id in a multi-row INSERT BEGIN; DELETE FROM researchers WHERE lab_id = 1 AND id = 2; INSERT INTO researchers VALUES (2, 1, 'John Backus'), (12, 1, 'Frances E. Allen'); COMMIT; SELECT name FROM researchers WHERE lab_id = 1 AND id % 10 = 2; name ------------------ John Backus Frances E. Allen (2 rows) -- and the other way around BEGIN; INSERT INTO researchers VALUES (14, 2, 'Alan Kay'), (15, 2, 'Barbara Liskov'); DELETE FROM researchers WHERE id = 14 AND lab_id = 2; ROLLBACK; -- should have rolled everything back SELECT * FROM researchers WHERE id = 15 AND lab_id = 2; id | lab_id | name ----+--------+------ (0 rows) -- abort a modification BEGIN; DELETE FROM researchers WHERE lab_id = 1 AND id = 1; ABORT; SELECT name FROM researchers WHERE lab_id = 1 AND id = 1; name -------------- Donald Knuth (1 row) -- trigger a unique constraint violation BEGIN; UPDATE researchers SET name = 'John Backus' WHERE id = 1 AND lab_id = 1; ERROR: duplicate key value violates unique constraint "avoid_name_confusion_idx_1200000" DETAIL: Key (lab_id, name)=(1, John Backus) already exists. CONTEXT: while executing command on localhost:57637 ABORT; -- creating savepoints should work... BEGIN; INSERT INTO researchers VALUES (5, 3, 'Dennis Ritchie'); SAVEPOINT hire_thompson; INSERT INTO researchers VALUES (6, 3, 'Ken Thompson'); COMMIT; SELECT name FROM researchers WHERE lab_id = 3 AND id = 6; name -------------- Ken Thompson (1 row) -- even if created by PL/pgSQL... \set VERBOSITY terse BEGIN; DO $$ BEGIN INSERT INTO researchers VALUES (10, 10, 'Edsger Dijkstra'); EXCEPTION WHEN not_null_violation THEN RAISE NOTICE 'caught not_null_violation'; END $$; COMMIT; -- rollback should also work BEGIN; INSERT INTO researchers VALUES (7, 4, 'Jim Gray'); SAVEPOINT hire_engelbart; INSERT INTO researchers VALUES (8, 4, 'Douglas Engelbart'); ROLLBACK TO hire_engelbart; COMMIT; SELECT name FROM researchers WHERE lab_id = 4; name ---------- Jim Gray (1 row) BEGIN; DO $$ BEGIN INSERT INTO researchers VALUES (11, 11, 'Whitfield Diffie'); INSERT INTO researchers VALUES (NULL, 10, 'Edsger Dijkstra'); EXCEPTION WHEN not_null_violation THEN RAISE NOTICE 'caught not_null_violation'; END $$; NOTICE: caught not_null_violation COMMIT; \set VERBOSITY default -- should be valid to edit labs after researchers... BEGIN; INSERT INTO researchers VALUES (8, 5, 'Douglas Engelbart'); INSERT INTO labs VALUES (5, 'Los Alamos'); COMMIT; SELECT * FROM researchers, labs WHERE labs.id = researchers.lab_id; id | lab_id | name | id | name ----+--------+-------------------+----+------------ 8 | 5 | Douglas Engelbart | 5 | Los Alamos (1 row) -- and the other way around is also allowed BEGIN; INSERT INTO labs VALUES (6, 'Bell Labs'); INSERT INTO researchers VALUES (9, 6, 'Leslie Lamport'); COMMIT; -- we should be able to expand the transaction participants BEGIN; INSERT INTO labs VALUES (6, 'Bell Labs'); INSERT INTO researchers VALUES (9, 6, 'Leslie Lamport'); ERROR: duplicate key value violates unique constraint "avoid_name_confusion_idx_1200001" DETAIL: Key (lab_id, name)=(6, Leslie Lamport) already exists. CONTEXT: while executing command on localhost:57638 ABORT; -- SELECTs may occur after a modification: First check that selecting -- from the modified node works. BEGIN; INSERT INTO labs VALUES (6, 'Bell Labs'); SELECT count(*) FROM researchers WHERE lab_id = 6; count ------- 1 (1 row) ABORT; -- then check that SELECT going to new node still is fine BEGIN; UPDATE pg_dist_shard_placement AS sp SET shardstate = 3 FROM pg_dist_shard AS s WHERE sp.shardid = s.shardid AND sp.nodename = 'localhost' AND sp.nodeport = :worker_1_port AND s.logicalrelid = 'researchers'::regclass; INSERT INTO labs VALUES (6, 'Bell Labs'); SELECT count(*) FROM researchers WHERE lab_id = 6; count ------- 1 (1 row) ABORT; -- we can mix DDL and INSERT BEGIN; INSERT INTO labs VALUES (6, 'Bell Labs'); ALTER TABLE labs ADD COLUMN motto text; ABORT; -- whether it occurs first or second BEGIN; ALTER TABLE labs ADD COLUMN motto text; INSERT INTO labs VALUES (6, 'Bell Labs'); ABORT; -- but the DDL should correctly roll back SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.labs'::regclass; Column | Type | Modifiers --------+--------+----------- id | bigint | not null name | text | not null (2 rows) SELECT * FROM labs WHERE id = 6; id | name ----+----------- 6 | Bell Labs (1 row) -- COPY can happen after single row INSERT BEGIN; INSERT INTO labs VALUES (6, 'Bell Labs'); \copy labs from stdin delimiter ',' COMMIT; -- COPY cannot be performed if multiple shards were modified over the same connection BEGIN; INSERT INTO researchers VALUES (2, 1, 'Knuth Donald'); INSERT INTO researchers VALUES (10, 6, 'Lamport Leslie'); \copy researchers from stdin delimiter ',' ERROR: cannot establish a new connection for placement 1200003, since DML has been executed on a connection that is in use CONTEXT: COPY researchers, line 2: "10,6,Lesport Lampie" ROLLBACK; -- COPY cannot be performed after a multi-row INSERT that uses one connection BEGIN; INSERT INTO researchers VALUES (2, 1, 'Knuth Donald'), (10, 6, 'Lamport Leslie'); \copy researchers from stdin delimiter ',' ERROR: cannot establish a new connection for placement 1200003, since DML has been executed on a connection that is in use CONTEXT: COPY researchers, line 2: "10,6,Lesport Lampie" ROLLBACK; -- after a COPY you can modify multiple shards, since they'll use different connections BEGIN; \copy researchers from stdin delimiter ',' INSERT INTO researchers VALUES (2, 1, 'Knuth Donald'); INSERT INTO researchers VALUES (10, 6, 'Lamport Leslie'); ROLLBACK; -- after a COPY you can perform a multi-row INSERT BEGIN; \copy researchers from stdin delimiter ',' INSERT INTO researchers VALUES (2, 1, 'Knuth Donald'), (10, 6, 'Lamport Leslie'); ROLLBACK; -- COPY can happen before single row INSERT BEGIN; \copy labs from stdin delimiter ',' SELECT name FROM labs WHERE id = 10; name ---------------- Weyland-Yutani Weyland-Yutani (2 rows) INSERT INTO labs VALUES (6, 'Bell Labs'); COMMIT; -- two consecutive COPYs in a transaction are allowed BEGIN; \copy labs from stdin delimiter ',' \copy labs from stdin delimiter ',' COMMIT; SELECT name FROM labs WHERE id = 11 OR id = 12 ORDER BY id; name ---------------- Planet Express fsociety (2 rows) -- 1pc failure test SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) -- copy with unique index violation BEGIN; \copy researchers FROM STDIN delimiter ',' \copy researchers FROM STDIN delimiter ',' ERROR: duplicate key value violates unique constraint "avoid_name_confusion_idx_1200001" DETAIL: Key (lab_id, name)=(6, 'Bjarne Stroustrup') already exists. COMMIT; -- verify rollback SELECT * FROM researchers WHERE lab_id = 6; id | lab_id | name ----+--------+---------------- 9 | 6 | Leslie Lamport (1 row) SELECT count(*) FROM pg_dist_transaction; count ------- 0 (1 row) -- 2pc failure and success tests SET citus.multi_shard_commit_protocol TO '2pc'; SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) -- copy with unique index violation BEGIN; \copy researchers FROM STDIN delimiter ',' \copy researchers FROM STDIN delimiter ',' ERROR: duplicate key value violates unique constraint "avoid_name_confusion_idx_1200001" DETAIL: Key (lab_id, name)=(6, 'Bjarne Stroustrup') already exists. COMMIT; -- verify rollback SELECT * FROM researchers WHERE lab_id = 6; id | lab_id | name ----+--------+---------------- 9 | 6 | Leslie Lamport (1 row) SELECT count(*) FROM pg_dist_transaction; count ------- 0 (1 row) BEGIN; \copy researchers FROM STDIN delimiter ',' \copy researchers FROM STDIN delimiter ',' COMMIT; -- verify success SELECT * FROM researchers WHERE lab_id = 6; id | lab_id | name ----+--------+---------------------- 9 | 6 | Leslie Lamport 17 | 6 | 'Bjarne Stroustrup' 18 | 6 | 'Dennis Ritchie' (3 rows) -- verify 2pc SELECT count(*) FROM pg_dist_transaction; count ------- 2 (1 row) RESET citus.multi_shard_commit_protocol; -- create a check function SELECT * from run_command_on_workers('CREATE FUNCTION reject_large_id() RETURNS trigger AS $rli$ BEGIN IF (NEW.id > 30) THEN RAISE ''illegal value''; END IF; RETURN NEW; END; $rli$ LANGUAGE plpgsql;') ORDER BY nodeport; nodename | nodeport | success | result -----------+----------+---------+----------------- localhost | 57637 | t | CREATE FUNCTION localhost | 57638 | t | CREATE FUNCTION (2 rows) -- register after insert trigger SELECT * FROM run_command_on_placements('researchers', 'CREATE CONSTRAINT TRIGGER reject_large_researcher_id AFTER INSERT ON %s DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_large_id()') ORDER BY nodeport, shardid; nodename | nodeport | shardid | success | result -----------+----------+---------+---------+---------------- localhost | 57637 | 1200000 | t | CREATE TRIGGER localhost | 57637 | 1200001 | t | CREATE TRIGGER localhost | 57638 | 1200000 | t | CREATE TRIGGER localhost | 57638 | 1200001 | t | CREATE TRIGGER (4 rows) -- hide postgresql version dependend messages for next test only \set VERBOSITY terse -- deferred check should abort the transaction BEGIN; DELETE FROM researchers WHERE lab_id = 6; \copy researchers FROM STDIN delimiter ',' \copy researchers FROM STDIN delimiter ',' COMMIT; WARNING: illegal value WARNING: failed to commit critical transaction on localhost:57638, metadata is likely out of sync WARNING: illegal value WARNING: failed to commit critical transaction on localhost:57637, metadata is likely out of sync WARNING: could not commit transaction for shard 1200001 on any active node ERROR: could not commit transaction on any active node \unset VERBOSITY -- verify everyhing including delete is rolled back SELECT * FROM researchers WHERE lab_id = 6; id | lab_id | name ----+--------+---------------------- 9 | 6 | Leslie Lamport 17 | 6 | 'Bjarne Stroustrup' 18 | 6 | 'Dennis Ritchie' (3 rows) -- cleanup triggers and the function SELECT * from run_command_on_placements('researchers', 'drop trigger reject_large_researcher_id on %s') ORDER BY nodeport, shardid; nodename | nodeport | shardid | success | result -----------+----------+---------+---------+-------------- localhost | 57637 | 1200000 | t | DROP TRIGGER localhost | 57637 | 1200001 | t | DROP TRIGGER localhost | 57638 | 1200000 | t | DROP TRIGGER localhost | 57638 | 1200001 | t | DROP TRIGGER (4 rows) SELECT * FROM run_command_on_workers('drop function reject_large_id()') ORDER BY nodeport; nodename | nodeport | success | result -----------+----------+---------+--------------- localhost | 57637 | t | DROP FUNCTION localhost | 57638 | t | DROP FUNCTION (2 rows) -- ALTER and copy are compatible BEGIN; ALTER TABLE labs ADD COLUMN motto text; \copy labs from stdin delimiter ',' ROLLBACK; BEGIN; \copy labs from stdin delimiter ',' ALTER TABLE labs ADD COLUMN motto text; ABORT; -- cannot perform DDL once a connection is used for multiple shards BEGIN; SELECT lab_id FROM researchers WHERE lab_id = 1 AND id = 0; lab_id -------- (0 rows) SELECT lab_id FROM researchers WHERE lab_id = 2 AND id = 0; lab_id -------- (0 rows) ALTER TABLE researchers ADD COLUMN motto text; ERROR: cannot perform a parallel DDL command because multiple placements have been accessed over the same connection ROLLBACK; -- multi-shard operations can co-exist with DDL in a transactional way BEGIN; ALTER TABLE labs ADD COLUMN motto text; SELECT master_modify_multiple_shards('DELETE FROM labs'); master_modify_multiple_shards ------------------------------- 8 (1 row) ALTER TABLE labs ADD COLUMN score float; ROLLBACK; -- should have rolled everything back SELECT * FROM labs WHERE id = 12; id | name ----+---------- 12 | fsociety (1 row) -- now, for some special failures... CREATE TABLE objects ( id bigint PRIMARY KEY, name text NOT NULL ); SELECT master_create_distributed_table('objects', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('objects', 1, 2); master_create_worker_shards ----------------------------- (1 row) -- test primary key violations BEGIN; INSERT INTO objects VALUES (1, 'apple'); INSERT INTO objects VALUES (1, 'orange'); ERROR: duplicate key value violates unique constraint "objects_pkey_1200003" DETAIL: Key (id)=(1) already exists. CONTEXT: while executing command on localhost:57637 COMMIT; -- data shouldn't have persisted... SELECT * FROM objects WHERE id = 1; id | name ----+------ (0 rows) -- and placements should still be healthy... SELECT count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND sp.shardstate = 1 AND s.logicalrelid = 'objects'::regclass; count ------- 2 (1 row) -- create trigger on one worker to reject certain values \c - - - :worker_2_port CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$ BEGIN IF (NEW.name = 'BAD') THEN RAISE 'illegal value'; END IF; RETURN NEW; END; $rb$ LANGUAGE plpgsql; CREATE CONSTRAINT TRIGGER reject_bad AFTER INSERT ON objects_1200003 DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW EXECUTE PROCEDURE reject_bad(); \c - - - :master_port -- test partial failure; worker_1 succeeds, 2 fails \set VERBOSITY terse BEGIN; INSERT INTO objects VALUES (1, 'apple'); INSERT INTO objects VALUES (2, 'BAD'); WARNING: illegal value INSERT INTO labs VALUES (7, 'E Corp'); COMMIT; -- data should be persisted SELECT * FROM objects WHERE id = 2; id | name ----+------ 2 | BAD (1 row) SELECT * FROM labs WHERE id = 7; id | name ----+-------- 7 | E Corp (1 row) -- but one placement should be bad SELECT count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND sp.nodename = 'localhost' AND sp.nodeport = :worker_2_port AND sp.shardstate = 3 AND s.logicalrelid = 'objects'::regclass; count ------- 1 (1 row) DELETE FROM objects; -- mark shards as healthy again; delete all data UPDATE pg_dist_shard_placement AS sp SET shardstate = 1 FROM pg_dist_shard AS s WHERE sp.shardid = s.shardid AND s.logicalrelid = 'objects'::regclass; -- what if there are errors on different shards at different times? \c - - - :worker_1_port CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$ BEGIN IF (NEW.name = 'BAD') THEN RAISE 'illegal value'; END IF; RETURN NEW; END; $rb$ LANGUAGE plpgsql; CREATE CONSTRAINT TRIGGER reject_bad AFTER INSERT ON labs_1200002 DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW EXECUTE PROCEDURE reject_bad(); \c - - - :master_port BEGIN; INSERT INTO objects VALUES (1, 'apple'); INSERT INTO objects VALUES (2, 'BAD'); WARNING: illegal value INSERT INTO labs VALUES (8, 'Aperture Science'); INSERT INTO labs VALUES (9, 'BAD'); WARNING: illegal value ERROR: could not modify any active placements COMMIT; -- data should NOT be persisted SELECT * FROM objects WHERE id = 1; id | name ----+------ (0 rows) SELECT * FROM labs WHERE id = 8; id | name ----+------ (0 rows) -- all placements should remain healthy SELECT count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND sp.shardstate = 1 AND (s.logicalrelid = 'objects'::regclass OR s.logicalrelid = 'labs'::regclass); count ------- 3 (1 row) -- what if the failures happen at COMMIT time? \c - - - :worker_2_port DROP TRIGGER reject_bad ON objects_1200003; CREATE CONSTRAINT TRIGGER reject_bad AFTER INSERT ON objects_1200003 DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_bad(); \c - - - :master_port -- should be the same story as before, just at COMMIT time BEGIN; INSERT INTO objects VALUES (1, 'apple'); INSERT INTO objects VALUES (2, 'BAD'); INSERT INTO labs VALUES (9, 'Umbrella Corporation'); COMMIT; WARNING: illegal value WARNING: failed to commit transaction on localhost:57638 -- data should be persisted SELECT * FROM objects WHERE id = 2; id | name ----+------ 2 | BAD (1 row) SELECT * FROM labs WHERE id = 7; id | name ----+-------- 7 | E Corp (1 row) -- but one placement should be bad SELECT count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND sp.nodename = 'localhost' AND sp.nodeport = :worker_2_port AND sp.shardstate = 3 AND s.logicalrelid = 'objects'::regclass; count ------- 1 (1 row) DELETE FROM objects; -- mark shards as healthy again; delete all data UPDATE pg_dist_shard_placement AS sp SET shardstate = 1 FROM pg_dist_shard AS s WHERE sp.shardid = s.shardid AND s.logicalrelid = 'objects'::regclass; -- what if all nodes have failures at COMMIT time? \c - - - :worker_1_port DROP TRIGGER reject_bad ON labs_1200002; CREATE CONSTRAINT TRIGGER reject_bad AFTER INSERT ON labs_1200002 DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_bad(); \c - - - :master_port BEGIN; INSERT INTO objects VALUES (1, 'apple'); INSERT INTO objects VALUES (2, 'BAD'); INSERT INTO labs VALUES (8, 'Aperture Science'); INSERT INTO labs VALUES (9, 'BAD'); COMMIT; WARNING: illegal value WARNING: failed to commit transaction on localhost:57637 WARNING: illegal value WARNING: failed to commit transaction on localhost:57638 WARNING: could not commit transaction for shard 1200002 on any active node WARNING: could not commit transaction for shard 1200003 on any active node ERROR: could not commit transaction on any active node -- data should NOT be persisted SELECT * FROM objects WHERE id = 1; id | name ----+------ (0 rows) SELECT * FROM labs WHERE id = 8; id | name ----+------ (0 rows) -- all placements should remain healthy SELECT count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND sp.shardstate = 1 AND (s.logicalrelid = 'objects'::regclass OR s.logicalrelid = 'labs'::regclass); count ------- 3 (1 row) -- what if one shard (objects) succeeds but another (labs) completely fails? \c - - - :worker_2_port DROP TRIGGER reject_bad ON objects_1200003; \c - - - :master_port BEGIN; INSERT INTO objects VALUES (1, 'apple'); INSERT INTO labs VALUES (8, 'Aperture Science'); INSERT INTO labs VALUES (9, 'BAD'); COMMIT; WARNING: illegal value WARNING: failed to commit transaction on localhost:57637 WARNING: could not commit transaction for shard 1200002 on any active node \set VERBOSITY default -- data to objects should be persisted, but labs should not... SELECT * FROM objects WHERE id = 1; id | name ----+------- 1 | apple (1 row) SELECT * FROM labs WHERE id = 8; id | name ----+------ (0 rows) -- labs should be healthy, but one object placement shouldn't be SELECT s.logicalrelid::regclass::text, sp.shardstate, count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND (s.logicalrelid = 'objects'::regclass OR s.logicalrelid = 'labs'::regclass) GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; logicalrelid | shardstate | count --------------+------------+------- labs | 1 | 1 objects | 1 | 1 objects | 3 | 1 (3 rows) -- some append-partitioned tests for good measure CREATE TABLE append_researchers ( LIKE researchers ); SELECT master_create_distributed_table('append_researchers', 'id', 'append'); master_create_distributed_table --------------------------------- (1 row) SET citus.shard_replication_factor TO 1; SELECT master_create_empty_shard('append_researchers') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 500000 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('append_researchers') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 500000, shardmaxvalue = 1000000 WHERE shardid = :new_shard_id; SET citus.shard_replication_factor TO DEFAULT; -- try single-shard INSERT BEGIN; INSERT INTO append_researchers VALUES (0, 0, 'John Backus'); COMMIT; SELECT * FROM append_researchers WHERE id = 0; id | lab_id | name ----+--------+------------- 0 | 0 | John Backus (1 row) -- try rollback BEGIN; DELETE FROM append_researchers WHERE id = 0; ROLLBACK; SELECT * FROM append_researchers WHERE id = 0; id | lab_id | name ----+--------+------------- 0 | 0 | John Backus (1 row) -- try hitting shard on other node BEGIN; INSERT INTO append_researchers VALUES (1, 1, 'John McCarthy'); INSERT INTO append_researchers VALUES (500000, 500000, 'Tony Hoare'); ERROR: cannot run INSERT command which targets multiple shards HINT: Make sure the value for partition column "id" falls into a single shard. ROLLBACK; SELECT * FROM append_researchers; id | lab_id | name ----+--------+------------- 0 | 0 | John Backus (1 row) -- we use 2PC for reference tables by default -- let's add some tests for them CREATE TABLE reference_modifying_xacts (key int, value int); SELECT create_reference_table('reference_modifying_xacts'); create_reference_table ------------------------ (1 row) -- very basic test, ensure that INSERTs work INSERT INTO reference_modifying_xacts VALUES (1, 1); SELECT * FROM reference_modifying_xacts; key | value -----+------- 1 | 1 (1 row) -- now ensure that it works in a transaction as well BEGIN; INSERT INTO reference_modifying_xacts VALUES (2, 2); SELECT * FROM reference_modifying_xacts; key | value -----+------- 1 | 1 2 | 2 (2 rows) COMMIT; -- we should be able to see the insert outside of the transaction as well SELECT * FROM reference_modifying_xacts; key | value -----+------- 1 | 1 2 | 2 (2 rows) -- rollback should also work BEGIN; INSERT INTO reference_modifying_xacts VALUES (3, 3); SELECT * FROM reference_modifying_xacts; key | value -----+------- 1 | 1 2 | 2 3 | 3 (3 rows) ROLLBACK; -- see that we've not inserted SELECT * FROM reference_modifying_xacts; key | value -----+------- 1 | 1 2 | 2 (2 rows) -- lets fail on of the workers at before the commit time \c - - - :worker_1_port CREATE FUNCTION reject_bad_reference() RETURNS trigger AS $rb$ BEGIN IF (NEW.key = 999) THEN RAISE 'illegal value'; END IF; RETURN NEW; END; $rb$ LANGUAGE plpgsql; CREATE CONSTRAINT TRIGGER reject_bad_reference AFTER INSERT ON reference_modifying_xacts_1200006 DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW EXECUTE PROCEDURE reject_bad_reference(); \c - - - :master_port \set VERBOSITY terse -- try without wrapping inside a transaction INSERT INTO reference_modifying_xacts VALUES (999, 3); ERROR: illegal value -- same test within a transaction BEGIN; INSERT INTO reference_modifying_xacts VALUES (999, 3); ERROR: illegal value COMMIT; -- lets fail one of the workers at COMMIT time \c - - - :worker_1_port DROP TRIGGER reject_bad_reference ON reference_modifying_xacts_1200006; CREATE CONSTRAINT TRIGGER reject_bad_reference AFTER INSERT ON reference_modifying_xacts_1200006 DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_bad_reference(); \c - - - :master_port \set VERBOSITY terse -- try without wrapping inside a transaction INSERT INTO reference_modifying_xacts VALUES (999, 3); WARNING: illegal value ERROR: failure on connection marked as essential: localhost:57637 -- same test within a transaction BEGIN; INSERT INTO reference_modifying_xacts VALUES (999, 3); COMMIT; WARNING: illegal value ERROR: failure on connection marked as essential: localhost:57637 -- all placements should be healthy SELECT s.logicalrelid::regclass::text, sp.shardstate, count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND s.logicalrelid = 'reference_modifying_xacts'::regclass GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; logicalrelid | shardstate | count ---------------------------+------------+------- reference_modifying_xacts | 1 | 2 (1 row) -- for the time-being drop the constraint \c - - - :worker_1_port DROP TRIGGER reject_bad_reference ON reference_modifying_xacts_1200006; \c - - - :master_port -- now create a hash distributed table and run tests -- including both the reference table and the hash -- distributed table SET citus.shard_count = 4; SET citus.shard_replication_factor = 1; CREATE TABLE hash_modifying_xacts (key int, value int); SELECT create_distributed_table('hash_modifying_xacts', 'key'); create_distributed_table -------------------------- (1 row) -- let's try to expand the xact participants BEGIN; INSERT INTO hash_modifying_xacts VALUES (1, 1); INSERT INTO reference_modifying_xacts VALUES (10, 10); COMMIT; -- it is allowed when turning off deadlock prevention BEGIN; INSERT INTO hash_modifying_xacts VALUES (1, 1); INSERT INTO reference_modifying_xacts VALUES (10, 10); ABORT; BEGIN; INSERT INTO hash_modifying_xacts VALUES (1, 1); INSERT INTO hash_modifying_xacts VALUES (2, 2); ABORT; -- lets fail one of the workers before COMMIT time for the hash table \c - - - :worker_1_port CREATE FUNCTION reject_bad_hash() RETURNS trigger AS $rb$ BEGIN IF (NEW.key = 997) THEN RAISE 'illegal value'; END IF; RETURN NEW; END; $rb$ LANGUAGE plpgsql; CREATE CONSTRAINT TRIGGER reject_bad_hash AFTER INSERT ON hash_modifying_xacts_1200007 DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW EXECUTE PROCEDURE reject_bad_hash(); \c - - - :master_port \set VERBOSITY terse -- the transaction as a whole should fail BEGIN; INSERT INTO reference_modifying_xacts VALUES (55, 10); INSERT INTO hash_modifying_xacts VALUES (997, 1); WARNING: illegal value ERROR: could not modify any active placements COMMIT; -- ensure that the value didn't go into the reference table SELECT * FROM reference_modifying_xacts WHERE key = 55; key | value -----+------- (0 rows) -- now lets fail on of the workers for the hash distributed table table -- when there is a reference table involved \c - - - :worker_1_port DROP TRIGGER reject_bad_hash ON hash_modifying_xacts_1200007; -- the trigger is on execution time CREATE CONSTRAINT TRIGGER reject_bad_hash AFTER INSERT ON hash_modifying_xacts_1200007 DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_bad_hash(); \c - - - :master_port \set VERBOSITY terse -- the transaction as a whole should fail BEGIN; INSERT INTO reference_modifying_xacts VALUES (12, 12); INSERT INTO hash_modifying_xacts VALUES (997, 1); COMMIT; WARNING: illegal value ERROR: failure on connection marked as essential: localhost:57637 -- ensure that the values didn't go into the reference table SELECT * FROM reference_modifying_xacts WHERE key = 12; key | value -----+------- (0 rows) -- all placements should be healthy SELECT s.logicalrelid::regclass::text, sp.shardstate, count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND (s.logicalrelid = 'reference_modifying_xacts'::regclass OR s.logicalrelid = 'hash_modifying_xacts'::regclass) GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; logicalrelid | shardstate | count ---------------------------+------------+------- reference_modifying_xacts | 1 | 2 hash_modifying_xacts | 1 | 4 (2 rows) -- now, fail the insert on reference table -- and ensure that hash distributed table's -- change is rollbacked as well \c - - - :worker_1_port CREATE CONSTRAINT TRIGGER reject_bad_reference AFTER INSERT ON reference_modifying_xacts_1200006 DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW EXECUTE PROCEDURE reject_bad_reference(); \c - - - :master_port \set VERBOSITY terse BEGIN; -- to expand participant to include all worker nodes INSERT INTO reference_modifying_xacts VALUES (66, 3); INSERT INTO hash_modifying_xacts VALUES (80, 1); INSERT INTO reference_modifying_xacts VALUES (999, 3); ERROR: illegal value COMMIT; SELECT * FROM hash_modifying_xacts WHERE key = 80; key | value -----+------- (0 rows) SELECT * FROM reference_modifying_xacts WHERE key = 66; key | value -----+------- (0 rows) SELECT * FROM reference_modifying_xacts WHERE key = 999; key | value -----+------- (0 rows) -- all placements should be healthy SELECT s.logicalrelid::regclass::text, sp.shardstate, count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND (s.logicalrelid = 'reference_modifying_xacts'::regclass OR s.logicalrelid = 'hash_modifying_xacts'::regclass) GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; logicalrelid | shardstate | count ---------------------------+------------+------- reference_modifying_xacts | 1 | 2 hash_modifying_xacts | 1 | 4 (2 rows) -- now show that all modifications to reference -- tables are done in 2PC SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) INSERT INTO reference_modifying_xacts VALUES (70, 70); SELECT count(*) FROM pg_dist_transaction; count ------- 2 (1 row) -- reset the transactions table SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) BEGIN; INSERT INTO reference_modifying_xacts VALUES (71, 71); COMMIT; SELECT count(*) FROM pg_dist_transaction; count ------- 2 (1 row) -- create a hash distributed tablw which spans all nodes SET citus.shard_count = 4; SET citus.shard_replication_factor = 2; CREATE TABLE hash_modifying_xacts_second (key int, value int); SELECT create_distributed_table('hash_modifying_xacts_second', 'key'); create_distributed_table -------------------------- (1 row) -- reset the transactions table SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) BEGIN; INSERT INTO hash_modifying_xacts_second VALUES (72, 1); INSERT INTO reference_modifying_xacts VALUES (72, 3); COMMIT; SELECT count(*) FROM pg_dist_transaction; count ------- 2 (1 row) -- reset the transactions table SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) DELETE FROM reference_modifying_xacts; SELECT count(*) FROM pg_dist_transaction; count ------- 2 (1 row) -- reset the transactions table SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) UPDATE reference_modifying_xacts SET key = 10; SELECT count(*) FROM pg_dist_transaction; count ------- 2 (1 row) -- now to one more type of failure testing -- in which we'll make the remote host unavailable -- first create the new user on all nodes CREATE USER test_user; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes \c - - - :worker_1_port CREATE USER test_user; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes \c - - - :worker_2_port CREATE USER test_user; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -- now connect back to the master with the new user \c - test_user - :master_port CREATE TABLE reference_failure_test (key int, value int); SELECT create_reference_table('reference_failure_test'); create_reference_table ------------------------ (1 row) -- create a hash distributed table SET citus.shard_count TO 4; CREATE TABLE numbers_hash_failure_test(key int, value int); SELECT create_distributed_table('numbers_hash_failure_test', 'key'); create_distributed_table -------------------------- (1 row) -- ensure that the shard is created for this user \c - test_user - :worker_1_port \dt reference_failure_test_1200015 List of relations Schema | Name | Type | Owner --------+--------------------------------+-------+----------- public | reference_failure_test_1200015 | table | test_user (1 row) -- now connect with the default user, -- and rename the existing user \c - :default_user - :worker_1_port ALTER USER test_user RENAME TO test_user_new; -- connect back to master and query the reference table \c - test_user - :master_port -- should fail since the worker doesn't have test_user anymore INSERT INTO reference_failure_test VALUES (1, '1'); WARNING: connection error: localhost:57637 ERROR: failure on connection marked as essential: localhost:57637 -- the same as the above, but wrapped within a transaction BEGIN; INSERT INTO reference_failure_test VALUES (1, '1'); WARNING: connection error: localhost:57637 ERROR: failure on connection marked as essential: localhost:57637 COMMIT; BEGIN; COPY reference_failure_test FROM STDIN WITH (FORMAT 'csv'); ERROR: connection error: localhost:57637 COMMIT; -- show that no data go through the table and shard states are good SELECT * FROM reference_failure_test; key | value -----+------- (0 rows) -- all placements should be healthy SELECT s.logicalrelid::regclass::text, sp.shardstate, count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND s.logicalrelid = 'reference_failure_test'::regclass GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; logicalrelid | shardstate | count ------------------------+------------+------- reference_failure_test | 1 | 2 (1 row) BEGIN; COPY numbers_hash_failure_test FROM STDIN WITH (FORMAT 'csv'); WARNING: connection error: localhost:57637 WARNING: connection error: localhost:57637 -- some placements are invalid before abort SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 1200016 | 3 | localhost | 57637 1200016 | 1 | localhost | 57638 1200017 | 1 | localhost | 57637 1200017 | 1 | localhost | 57638 1200018 | 1 | localhost | 57637 1200018 | 1 | localhost | 57638 1200019 | 3 | localhost | 57637 1200019 | 1 | localhost | 57638 (8 rows) ABORT; -- verify nothing is inserted SELECT count(*) FROM numbers_hash_failure_test; WARNING: connection error: localhost:57637 WARNING: connection error: localhost:57637 count ------- 0 (1 row) -- all placements to be market valid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 1200016 | 1 | localhost | 57637 1200016 | 1 | localhost | 57638 1200017 | 1 | localhost | 57637 1200017 | 1 | localhost | 57638 1200018 | 1 | localhost | 57637 1200018 | 1 | localhost | 57638 1200019 | 1 | localhost | 57637 1200019 | 1 | localhost | 57638 (8 rows) BEGIN; COPY numbers_hash_failure_test FROM STDIN WITH (FORMAT 'csv'); WARNING: connection error: localhost:57637 WARNING: connection error: localhost:57637 -- check shard states before commit SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 1200016 | 3 | localhost | 57637 1200016 | 1 | localhost | 57638 1200017 | 1 | localhost | 57637 1200017 | 1 | localhost | 57638 1200018 | 1 | localhost | 57637 1200018 | 1 | localhost | 57638 1200019 | 3 | localhost | 57637 1200019 | 1 | localhost | 57638 (8 rows) COMMIT; -- expect some placements to be market invalid after commit SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 1200016 | 3 | localhost | 57637 1200016 | 1 | localhost | 57638 1200017 | 1 | localhost | 57637 1200017 | 1 | localhost | 57638 1200018 | 1 | localhost | 57637 1200018 | 1 | localhost | 57638 1200019 | 3 | localhost | 57637 1200019 | 1 | localhost | 57638 (8 rows) -- verify data is inserted SELECT count(*) FROM numbers_hash_failure_test; WARNING: connection error: localhost:57637 WARNING: connection error: localhost:57637 count ------- 2 (1 row) -- connect back to the worker and set rename the test_user back \c - :default_user - :worker_1_port ALTER USER test_user_new RENAME TO test_user; -- connect back to the master with the proper user to continue the tests \c - :default_user - :master_port DROP TABLE reference_modifying_xacts, hash_modifying_xacts, hash_modifying_xacts_second, reference_failure_test, numbers_hash_failure_test; SELECT * FROM run_command_on_workers('DROP USER test_user'); nodename | nodeport | success | result -----------+----------+---------+----------- localhost | 57637 | t | DROP ROLE localhost | 57638 | t | DROP ROLE (2 rows) DROP USER test_user; -- set up foreign keys to test transactions with co-located and reference tables BEGIN; SET LOCAL citus.shard_replication_factor TO 1; SET LOCAL citus.shard_count TO 4; CREATE TABLE usergroups ( gid int PRIMARY KEY, name text ); SELECT create_reference_table('usergroups'); create_reference_table ------------------------ (1 row) CREATE TABLE itemgroups ( gid int PRIMARY KEY, name text ); SELECT create_reference_table('itemgroups'); create_reference_table ------------------------ (1 row) CREATE TABLE users ( id int PRIMARY KEY, name text, user_group int ); SELECT create_distributed_table('users', 'id'); create_distributed_table -------------------------- (1 row) CREATE TABLE items ( user_id int REFERENCES users (id) ON DELETE CASCADE, item_name text, item_group int ); SELECT create_distributed_table('items', 'user_id'); create_distributed_table -------------------------- (1 row) -- Table to find values that live in different shards on the same node SELECT id, shard_name('users', shardid), nodename, nodeport FROM pg_dist_shard_placement JOIN ( SELECT id, get_shard_id_for_distribution_column('users', id) shardid FROM generate_series(1,10) id ) ids USING (shardid) ORDER BY id; id | shard_name | nodename | nodeport ----+----------------------+-----------+---------- 1 | public.users_1200022 | localhost | 57637 2 | public.users_1200025 | localhost | 57638 3 | public.users_1200023 | localhost | 57638 4 | public.users_1200023 | localhost | 57638 5 | public.users_1200022 | localhost | 57637 6 | public.users_1200024 | localhost | 57637 7 | public.users_1200023 | localhost | 57638 8 | public.users_1200022 | localhost | 57637 9 | public.users_1200025 | localhost | 57638 10 | public.users_1200022 | localhost | 57637 (10 rows) END; -- the INSERTs into items should see the users BEGIN; \COPY users FROM STDIN WITH CSV INSERT INTO items VALUES (1, 'item-1'); INSERT INTO items VALUES (6, 'item-6'); END; SELECT user_id FROM items ORDER BY user_id; user_id --------- 1 6 (2 rows) -- should not be able to open multiple connections per node after INSERTing over one connection BEGIN; INSERT INTO users VALUES (2, 'burak'); INSERT INTO users VALUES (3, 'burak'); \COPY items FROM STDIN WITH CSV ERROR: cannot establish a new connection for placement 1200042, since DML has been executed on a connection that is in use END; -- cannot perform DDL after a co-located table has been read over 1 connection BEGIN; SELECT id FROM users WHERE id = 1; id ---- 1 (1 row) SELECT id FROM users WHERE id = 6; id ---- 6 (1 row) ALTER TABLE items ADD COLUMN last_update timestamptz; NOTICE: using one-phase commit for distributed DDL commands ERROR: cannot perform a parallel DDL command because multiple placements have been accessed over the same connection END; -- but the other way around is fine BEGIN; ALTER TABLE items ADD COLUMN last_update timestamptz; SELECT id FROM users JOIN items ON (id = user_id) WHERE id = 1; id ---- 1 (1 row) SELECT id FROM users JOIN items ON (id = user_id) WHERE id = 6; id ---- 6 (1 row) END; BEGIN; -- establish multiple connections to a node \COPY users FROM STDIN WITH CSV -- now read from the reference table over each connection SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 2; user_id --------- (0 rows) SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 3; user_id --------- (0 rows) -- perform a DDL command on the reference table ALTER TABLE itemgroups ADD COLUMN last_update timestamptz; ERROR: cannot perform DDL on placement 1200036, which has been read over multiple connections END; BEGIN; -- establish multiple connections to a node \COPY users FROM STDIN WITH CSV -- read from the reference table over each connection SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 2; user_id --------- (0 rows) SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 3; user_id --------- (0 rows) -- perform a DDL command on a co-located reference table ALTER TABLE usergroups ADD COLUMN last_update timestamptz; ERROR: cannot perform DDL on placement 1200034 since a co-located placement has been read over multiple connections END; BEGIN; -- make a modification over connection 1 INSERT INTO usergroups VALUES (0,'istanbul'); -- copy over connections 1 and 2 \COPY users FROM STDIN WITH CSV -- cannot read modifications made over different connections SELECT id FROM users JOIN usergroups ON (gid = user_group) WHERE id = 3; ERROR: cannot perform query with placements that were modified over multiple connections END; -- make sure we can see cascading deletes BEGIN; SELECT master_modify_multiple_shards('DELETE FROM users'); master_modify_multiple_shards ------------------------------- 2 (1 row) SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 1; user_id --------- (0 rows) SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 6; user_id --------- (0 rows) END; -- test visibility after COPY INSERT INTO usergroups VALUES (2,'group'); BEGIN; -- opens two separate connections to node \COPY users FROM STDIN WITH CSV -- Uses first connection, which wrote the row with id = 2 SELECT * FROM users JOIN usergroups ON (user_group = gid) WHERE id = 2; id | name | user_group | gid | name ----+-------+------------+-----+------- 2 | onder | 2 | 2 | group (1 row) -- Should use second connection, which wrote the row with id = 4 SELECT * FROM users JOIN usergroups ON (user_group = gid) WHERE id = 4; id | name | user_group | gid | name ----+-------+------------+-----+------- 4 | murat | 2 | 2 | group (1 row) END; DROP TABLE items, users, itemgroups, usergroups, researchers, labs; citus-7.0.3/src/test/regress/expected/multi_multiuser.out000066400000000000000000000131051317107136600236720ustar00rootroot00000000000000-- -- MULTI_MULTIUSERS -- -- Test user permissions. -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1420000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1420000; SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 2; CREATE TABLE test (id integer, val integer); SELECT create_distributed_table('test', 'id'); create_distributed_table -------------------------- (1 row) -- turn off propagation to avoid Enterprise processing the following section SET citus.enable_ddl_propagation TO off; CREATE USER full_access; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. CREATE USER read_access; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. CREATE USER no_access; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. GRANT ALL ON TABLE test TO full_access; GRANT SELECT ON TABLE test TO read_access; SET citus.enable_ddl_propagation TO DEFAULT; \c - - - :worker_1_port CREATE USER full_access; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. CREATE USER read_access; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. CREATE USER no_access; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. GRANT ALL ON TABLE test_1420000 TO full_access; GRANT SELECT ON TABLE test_1420000 TO read_access; \c - - - :worker_2_port CREATE USER full_access; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. CREATE USER read_access; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. CREATE USER no_access; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. GRANT ALL ON TABLE test_1420001 TO full_access; GRANT SELECT ON TABLE test_1420001 TO read_access; \c - - - :master_port -- create prepare tests PREPARE prepare_insert AS INSERT INTO test VALUES ($1); PREPARE prepare_select AS SELECT count(*) FROM test; -- not allowed to read absolute paths, even as superuser COPY "/etc/passwd" TO STDOUT WITH (format transmit); ERROR: absolute path not allowed -- check full permission SET ROLE full_access; EXECUTE prepare_insert(1); EXECUTE prepare_select; count ------- 1 (1 row) INSERT INTO test VALUES (2); SELECT count(*) FROM test; count ------- 2 (1 row) SELECT count(*) FROM test WHERE id = 1; count ------- 1 (1 row) SET citus.task_executor_type TO 'task-tracker'; SELECT count(*) FROM test; count ------- 2 (1 row) -- test re-partition query (needs to transmit intermediate results) SELECT count(*) FROM test a JOIN test b ON (a.val = b.val) WHERE a.id = 1 AND b.id = 2; count ------- 0 (1 row) -- should not be able to transmit directly COPY "postgresql.conf" TO STDOUT WITH (format transmit); ERROR: operation is not allowed HINT: Run the command with a superuser. SET citus.task_executor_type TO 'real-time'; -- should not be able to transmit directly COPY "postgresql.conf" TO STDOUT WITH (format transmit); ERROR: operation is not allowed HINT: Run the command with a superuser. -- check read permission SET ROLE read_access; EXECUTE prepare_insert(1); ERROR: permission denied for relation test EXECUTE prepare_select; count ------- 2 (1 row) INSERT INTO test VALUES (2); ERROR: permission denied for relation test SELECT count(*) FROM test; count ------- 2 (1 row) SELECT count(*) FROM test WHERE id = 1; count ------- 1 (1 row) SET citus.task_executor_type TO 'task-tracker'; SELECT count(*) FROM test; count ------- 2 (1 row) -- test re-partition query (needs to transmit intermediate results) SELECT count(*) FROM test a JOIN test b ON (a.val = b.val) WHERE a.id = 1 AND b.id = 2; count ------- 0 (1 row) -- should not be able to transmit directly COPY "postgresql.conf" TO STDOUT WITH (format transmit); ERROR: operation is not allowed HINT: Run the command with a superuser. SET citus.task_executor_type TO 'real-time'; -- check no permission SET ROLE no_access; EXECUTE prepare_insert(1); ERROR: permission denied for relation test EXECUTE prepare_select; ERROR: permission denied for relation test INSERT INTO test VALUES (2); ERROR: permission denied for relation test SELECT count(*) FROM test; ERROR: permission denied for relation test SELECT count(*) FROM test WHERE id = 1; ERROR: permission denied for relation test SET citus.task_executor_type TO 'task-tracker'; SELECT count(*) FROM test; ERROR: permission denied for relation test -- test re-partition query SELECT count(*) FROM test a JOIN test b ON (a.val = b.val) WHERE a.id = 1 AND b.id = 2; ERROR: permission denied for relation test -- should not be able to transmit directly COPY "postgresql.conf" TO STDOUT WITH (format transmit); ERROR: operation is not allowed HINT: Run the command with a superuser. SET citus.task_executor_type TO 'real-time'; RESET ROLE; DROP TABLE test; DROP USER full_access; DROP USER read_access; DROP USER no_access; citus-7.0.3/src/test/regress/expected/multi_mx_create_table.out000066400000000000000000000372211317107136600247640ustar00rootroot00000000000000-- -- MULTI_MX_CREATE_TABLE -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ----------------------------- (1 row) SELECT start_metadata_sync_to_node('localhost', :worker_2_port); start_metadata_sync_to_node ----------------------------- (1 row) -- create schema to test schema support CREATE SCHEMA citus_mx_test_schema; CREATE SCHEMA citus_mx_test_schema_join_1; CREATE SCHEMA citus_mx_test_schema_join_2; -- create UDFs that we're going to use in our tests SET search_path TO public; CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; SET search_path TO citus_mx_test_schema; CREATE OR REPLACE FUNCTION simpleTestFunction2(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; CREATE OPERATOR citus_mx_test_schema.=== ( LEFTARG = int, RIGHTARG = int, PROCEDURE = int4eq, COMMUTATOR = ===, NEGATOR = !==, HASHES, MERGES ); SET search_path TO public; CREATE COLLATION citus_mx_test_schema.english FROM "en_US"; CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text); CREATE TYPE order_side_mx AS ENUM ('buy', 'sell'); -- now create required stuff in the worker 1 \c - - - :worker_1_port -- create schema to test schema support CREATE SCHEMA citus_mx_test_schema; CREATE SCHEMA citus_mx_test_schema_join_1; CREATE SCHEMA citus_mx_test_schema_join_2; -- create UDFs in worker node CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; SET search_path TO citus_mx_test_schema; CREATE OR REPLACE FUNCTION simpleTestFunction2(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; -- create operator CREATE OPERATOR citus_mx_test_schema.=== ( LEFTARG = int, RIGHTARG = int, PROCEDURE = int4eq, COMMUTATOR = ===, NEGATOR = !==, HASHES, MERGES ); SET search_path TO public; CREATE COLLATION citus_mx_test_schema.english FROM "en_US"; SET search_path TO public; CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text); CREATE TYPE order_side_mx AS ENUM ('buy', 'sell'); -- now create required stuff in the worker 2 \c - - - :worker_2_port -- create schema to test schema support CREATE SCHEMA citus_mx_test_schema; CREATE SCHEMA citus_mx_test_schema_join_1; CREATE SCHEMA citus_mx_test_schema_join_2; -- create UDF CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; SET search_path TO citus_mx_test_schema; CREATE OR REPLACE FUNCTION simpleTestFunction2(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; -- create operator CREATE OPERATOR citus_mx_test_schema.=== ( LEFTARG = int, RIGHTARG = int, PROCEDURE = int4eq, COMMUTATOR = ===, NEGATOR = !==, HASHES, MERGES ); SET search_path TO public; CREATE COLLATION citus_mx_test_schema.english FROM "en_US"; SET search_path TO public; CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text); CREATE TYPE order_side_mx AS ENUM ('buy', 'sell'); -- connect back to the master, and do some more tests \c - - - :master_port SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; SET search_path TO public; CREATE TABLE nation_hash( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152) ); SET citus.shard_count TO 16; SELECT create_distributed_table('nation_hash', 'n_nationkey'); create_distributed_table -------------------------- (1 row) SET search_path TO citus_mx_test_schema; -- create mx tables that we're going to use for our tests CREATE TABLE citus_mx_test_schema.nation_hash( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152) ); SELECT create_distributed_table('nation_hash', 'n_nationkey'); create_distributed_table -------------------------- (1 row) CREATE TABLE citus_mx_test_schema_join_1.nation_hash ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); SET citus.shard_count TO 4; SELECT create_distributed_table('citus_mx_test_schema_join_1.nation_hash', 'n_nationkey'); create_distributed_table -------------------------- (1 row) CREATE TABLE citus_mx_test_schema_join_1.nation_hash_2 ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); SELECT create_distributed_table('citus_mx_test_schema_join_1.nation_hash_2', 'n_nationkey'); create_distributed_table -------------------------- (1 row) SET search_path TO citus_mx_test_schema_join_2; CREATE TABLE nation_hash ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); SELECT create_distributed_table('nation_hash', 'n_nationkey'); create_distributed_table -------------------------- (1 row) SET search_path TO citus_mx_test_schema; CREATE TABLE nation_hash_collation_search_path( n_nationkey integer not null, n_name char(25) not null COLLATE english, n_regionkey integer not null, n_comment varchar(152) ); SELECT create_distributed_table('nation_hash_collation_search_path', 'n_nationkey'); create_distributed_table -------------------------- (1 row) \COPY nation_hash_collation_search_path FROM STDIN with delimiter '|'; CREATE TABLE citus_mx_test_schema.nation_hash_composite_types( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152), test_col citus_mx_test_schema.new_composite_type ); SELECT create_distributed_table('citus_mx_test_schema.nation_hash_composite_types', 'n_nationkey'); create_distributed_table -------------------------- (1 row) -- insert some data to verify composite type queries \COPY citus_mx_test_schema.nation_hash_composite_types FROM STDIN with delimiter '|'; -- now create tpch tables -- Create new table definitions for use in testing in distributed planning and -- execution functionality. Also create indexes to boost performance. SET search_path TO public; CREATE TABLE lineitem_mx ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); SET citus.shard_count TO 16; SELECT create_distributed_table('lineitem_mx', 'l_orderkey'); create_distributed_table -------------------------- (1 row) CREATE INDEX lineitem_mx_time_index ON lineitem_mx (l_shipdate); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' CREATE TABLE orders_mx ( o_orderkey bigint not null, o_custkey integer not null, o_orderstatus char(1) not null, o_totalprice decimal(15,2) not null, o_orderdate date not null, o_orderpriority char(15) not null, o_clerk char(15) not null, o_shippriority integer not null, o_comment varchar(79) not null, PRIMARY KEY(o_orderkey) ); SELECT create_distributed_table('orders_mx', 'o_orderkey'); create_distributed_table -------------------------- (1 row) CREATE TABLE customer_mx ( c_custkey integer not null, c_name varchar(25) not null, c_address varchar(40) not null, c_nationkey integer not null, c_phone char(15) not null, c_acctbal decimal(15,2) not null, c_mktsegment char(10) not null, c_comment varchar(117) not null); SET citus.shard_count TO 1; SELECT create_distributed_table('customer_mx', 'c_custkey'); create_distributed_table -------------------------- (1 row) CREATE TABLE nation_mx ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); SELECT create_distributed_table('nation_mx', 'n_nationkey'); create_distributed_table -------------------------- (1 row) CREATE TABLE part_mx ( p_partkey integer not null, p_name varchar(55) not null, p_mfgr char(25) not null, p_brand char(10) not null, p_type varchar(25) not null, p_size integer not null, p_container char(10) not null, p_retailprice decimal(15,2) not null, p_comment varchar(23) not null); SELECT create_distributed_table('part_mx', 'p_partkey'); create_distributed_table -------------------------- (1 row) CREATE TABLE supplier_mx ( s_suppkey integer not null, s_name char(25) not null, s_address varchar(40) not null, s_nationkey integer, s_phone char(15) not null, s_acctbal decimal(15,2) not null, s_comment varchar(101) not null ); SELECT create_distributed_table('supplier_mx', 's_suppkey'); create_distributed_table -------------------------- (1 row) -- Create test table for ddl CREATE TABLE mx_ddl_table ( key int primary key, value int ); SET citus.shard_count TO 4; SELECT create_distributed_table('mx_ddl_table', 'key', 'hash'); create_distributed_table -------------------------- (1 row) -- Load some test data COPY mx_ddl_table (key, value) FROM STDIN WITH (FORMAT 'csv'); -- test table for modifications CREATE TABLE limit_orders_mx ( id bigint PRIMARY KEY, symbol text NOT NULL, bidder_id bigint NOT NULL, placed_at timestamp NOT NULL, kind order_side_mx NOT NULL, limit_price decimal NOT NULL DEFAULT 0.00 CHECK (limit_price >= 0.00) ); SET citus.shard_count TO 2; SELECT create_distributed_table('limit_orders_mx', 'id'); create_distributed_table -------------------------- (1 row) -- test table for modifications CREATE TABLE multiple_hash_mx ( category text NOT NULL, data text NOT NULL ); SELECT create_distributed_table('multiple_hash_mx', 'category'); create_distributed_table -------------------------- (1 row) SET citus.shard_count TO 4; CREATE TABLE app_analytics_events_mx (id bigserial, app_id integer, name text); SELECT create_distributed_table('app_analytics_events_mx', 'app_id'); create_distributed_table -------------------------- (1 row) CREATE TABLE researchers_mx ( id bigint NOT NULL, lab_id int NOT NULL, name text NOT NULL ); SET citus.shard_count TO 2; SELECT create_distributed_table('researchers_mx', 'lab_id'); create_distributed_table -------------------------- (1 row) CREATE TABLE labs_mx ( id bigint NOT NULL, name text NOT NULL ); SET citus.shard_count TO 1; SELECT create_distributed_table('labs_mx', 'id'); create_distributed_table -------------------------- (1 row) -- now, for some special failures... CREATE TABLE objects_mx ( id bigint PRIMARY KEY, name text NOT NULL ); SELECT create_distributed_table('objects_mx', 'id', 'hash'); create_distributed_table -------------------------- (1 row) CREATE TABLE articles_hash_mx ( id bigint NOT NULL, author_id bigint NOT NULL, title varchar(20) NOT NULL, word_count integer ); -- this table is used in router executor tests CREATE TABLE articles_single_shard_hash_mx (LIKE articles_hash_mx); SET citus.shard_count TO 2; SELECT create_distributed_table('articles_hash_mx', 'author_id'); create_distributed_table -------------------------- (1 row) SET citus.shard_count TO 1; SELECT create_distributed_table('articles_single_shard_hash_mx', 'author_id'); create_distributed_table -------------------------- (1 row) SET citus.shard_count TO 4; CREATE TABLE company_employees_mx (company_id int, employee_id int, manager_id int); SELECT create_distributed_table('company_employees_mx', 'company_id'); create_distributed_table -------------------------- (1 row) WITH shard_counts AS ( SELECT logicalrelid, count(*) AS shard_count FROM pg_dist_shard GROUP BY logicalrelid ) SELECT logicalrelid, colocationid, shard_count, partmethod, repmodel FROM pg_dist_partition NATURAL JOIN shard_counts ORDER BY colocationid, logicalrelid; logicalrelid | colocationid | shard_count | partmethod | repmodel --------------------------------------------------------+--------------+-------------+------------+---------- nation_hash | 2 | 16 | h | s citus_mx_test_schema.nation_hash | 2 | 16 | h | s citus_mx_test_schema_join_1.nation_hash | 3 | 4 | h | s citus_mx_test_schema_join_1.nation_hash_2 | 3 | 4 | h | s citus_mx_test_schema_join_2.nation_hash | 3 | 4 | h | s citus_mx_test_schema.nation_hash_collation_search_path | 3 | 4 | h | s citus_mx_test_schema.nation_hash_composite_types | 3 | 4 | h | s mx_ddl_table | 3 | 4 | h | s app_analytics_events_mx | 3 | 4 | h | s company_employees_mx | 3 | 4 | h | s lineitem_mx | 4 | 16 | h | s orders_mx | 4 | 16 | h | s customer_mx | 5 | 1 | h | s nation_mx | 5 | 1 | h | s part_mx | 5 | 1 | h | s supplier_mx | 5 | 1 | h | s limit_orders_mx | 6 | 2 | h | s articles_hash_mx | 6 | 2 | h | s multiple_hash_mx | 7 | 2 | h | s researchers_mx | 8 | 2 | h | s labs_mx | 9 | 1 | h | s objects_mx | 9 | 1 | h | s articles_single_shard_hash_mx | 9 | 1 | h | s (23 rows) citus-7.0.3/src/test/regress/expected/multi_mx_ddl.out000066400000000000000000000217071317107136600231170ustar00rootroot00000000000000-- Tests related to distributed DDL commands on mx cluster SELECT * FROM mx_ddl_table ORDER BY key; key | value -----+------- 1 | 10 2 | 11 3 | 21 4 | 37 5 | 60 6 | 100 10 | 200 11 | 230 (8 rows) -- CREATE INDEX CREATE INDEX ddl_test_index ON mx_ddl_table(value); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' CREATE INDEX CONCURRENTLY ddl_test_concurrent_index ON mx_ddl_table(value); -- ADD COLUMN ALTER TABLE mx_ddl_table ADD COLUMN version INTEGER; -- SET DEFAULT ALTER TABLE mx_ddl_table ALTER COLUMN version SET DEFAULT 1; SELECT master_modify_multiple_shards('UPDATE mx_ddl_table SET version=0.1 WHERE version IS NULL'); master_modify_multiple_shards ------------------------------- 8 (1 row) -- SET NOT NULL ALTER TABLE mx_ddl_table ALTER COLUMN version SET NOT NULL; -- See that the changes are applied on coordinator, worker tables and shards SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; Column | Type | Modifiers ---------+---------+-------------------- key | integer | not null value | integer | version | integer | not null default 1 (3 rows) \d ddl_test*_index Index "public.ddl_test_concurrent_index" Column | Type | Definition --------+---------+------------ value | integer | value btree, for table "public.mx_ddl_table" Index "public.ddl_test_index" Column | Type | Definition --------+---------+------------ value | integer | value btree, for table "public.mx_ddl_table" \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; Column | Type | Modifiers ---------+---------+-------------------- key | integer | not null value | integer | version | integer | not null default 1 (3 rows) \d ddl_test*_index Index "public.ddl_test_concurrent_index" Column | Type | Definition --------+---------+------------ value | integer | value btree, for table "public.mx_ddl_table" Index "public.ddl_test_index" Column | Type | Definition --------+---------+------------ value | integer | value btree, for table "public.mx_ddl_table" SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220088'::regclass; Column | Type | Modifiers ---------+---------+-------------------- key | integer | not null value | integer | version | integer | not null default 1 (3 rows) \d ddl_test*_index_1220088 Index "public.ddl_test_concurrent_index_1220088" Column | Type | Definition --------+---------+------------ value | integer | value btree, for table "public.mx_ddl_table_1220088" Index "public.ddl_test_index_1220088" Column | Type | Definition --------+---------+------------ value | integer | value btree, for table "public.mx_ddl_table_1220088" \c - - - :worker_2_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; Column | Type | Modifiers ---------+---------+-------------------- key | integer | not null value | integer | version | integer | not null default 1 (3 rows) \d ddl_test*_index Index "public.ddl_test_concurrent_index" Column | Type | Definition --------+---------+------------ value | integer | value btree, for table "public.mx_ddl_table" Index "public.ddl_test_index" Column | Type | Definition --------+---------+------------ value | integer | value btree, for table "public.mx_ddl_table" SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220089'::regclass; Column | Type | Modifiers ---------+---------+-------------------- key | integer | not null value | integer | version | integer | not null default 1 (3 rows) \d ddl_test*_index_1220089 Index "public.ddl_test_concurrent_index_1220089" Column | Type | Definition --------+---------+------------ value | integer | value btree, for table "public.mx_ddl_table_1220089" Index "public.ddl_test_index_1220089" Column | Type | Definition --------+---------+------------ value | integer | value btree, for table "public.mx_ddl_table_1220089" INSERT INTO mx_ddl_table VALUES (37, 78, 2); INSERT INTO mx_ddl_table VALUES (38, 78); -- Switch to the coordinator \c - - - :master_port -- SET DATA TYPE ALTER TABLE mx_ddl_table ALTER COLUMN version SET DATA TYPE double precision; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' INSERT INTO mx_ddl_table VALUES (78, 83, 2.1); \c - - - :worker_1_port SELECT * FROM mx_ddl_table ORDER BY key; key | value | version -----+-------+--------- 1 | 10 | 0 2 | 11 | 0 3 | 21 | 0 4 | 37 | 0 5 | 60 | 0 6 | 100 | 0 10 | 200 | 0 11 | 230 | 0 37 | 78 | 2 38 | 78 | 1 78 | 83 | 2.1 (11 rows) -- Switch to the coordinator \c - - - :master_port -- DROP INDEX DROP INDEX ddl_test_index; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' DROP INDEX CONCURRENTLY ddl_test_concurrent_index; -- DROP DEFAULT ALTER TABLE mx_ddl_table ALTER COLUMN version DROP DEFAULT; -- DROP NOT NULL ALTER TABLE mx_ddl_table ALTER COLUMN version DROP NOT NULL; -- DROP COLUMN ALTER TABLE mx_ddl_table DROP COLUMN version; -- See that the changes are applied on coordinator, worker tables and shards SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; Column | Type | Modifiers --------+---------+----------- key | integer | not null value | integer | (2 rows) \di ddl_test*_index List of relations Schema | Name | Type | Owner | Table --------+------+------+-------+------- (0 rows) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; Column | Type | Modifiers --------+---------+----------- key | integer | not null value | integer | (2 rows) \di ddl_test*_index List of relations Schema | Name | Type | Owner | Table --------+------+------+-------+------- (0 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220088'::regclass; Column | Type | Modifiers --------+---------+----------- key | integer | not null value | integer | (2 rows) \di ddl_test*_index_1220088 List of relations Schema | Name | Type | Owner | Table --------+------+------+-------+------- (0 rows) \c - - - :worker_2_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; Column | Type | Modifiers --------+---------+----------- key | integer | not null value | integer | (2 rows) \di ddl_test*_index List of relations Schema | Name | Type | Owner | Table --------+------+------+-------+------- (0 rows) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220089'::regclass; Column | Type | Modifiers --------+---------+----------- key | integer | not null value | integer | (2 rows) \di ddl_test*_index_1220089 List of relations Schema | Name | Type | Owner | Table --------+------+------+-------+------- (0 rows) -- Show that DDL commands are done within a two-phase commit transaction \c - - - :master_port SET client_min_messages TO debug2; CREATE INDEX ddl_test_index ON mx_ddl_table(value); DEBUG: building index "ddl_test_index" on table "mx_ddl_table" NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' RESET client_min_messages; DROP INDEX ddl_test_index; -- show that sequences owned by mx tables result in unique values SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 4; SET citus.replication_model TO streaming; CREATE TABLE mx_sequence(key INT, value BIGSERIAL); SELECT create_distributed_table('mx_sequence', 'key'); create_distributed_table -------------------------- (1 row) \c - - - :worker_1_port SELECT last_value AS worker_1_lastval FROM mx_sequence_value_seq \gset \c - - - :worker_2_port SELECT last_value AS worker_2_lastval FROM mx_sequence_value_seq \gset \c - - - :master_port -- don't look at the actual values because they rely on the groupids of the nodes -- which can change depending on the tests which have run before this one SELECT :worker_1_lastval = :worker_2_lastval; ?column? ---------- f (1 row) -- the type of sequences can't be changed ALTER TABLE mx_sequence ALTER value TYPE BIGINT; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' ALTER TABLE mx_sequence ALTER value TYPE INT; citus-7.0.3/src/test/regress/expected/multi_mx_explain.out000066400000000000000000000606221317107136600240130ustar00rootroot00000000000000-- -- MULTI_MX_EXPLAIN -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1320000; \c - - - :worker_1_port \c - - - :worker_2_port \c - - - :master_port \a\t SET citus.task_executor_type TO 'real-time'; SET citus.explain_distributed_queries TO on; \c - - - :worker_1_port -- Function that parses explain output as JSON CREATE FUNCTION explain_json(query text) RETURNS jsonb AS $BODY$ DECLARE result jsonb; BEGIN EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result; RETURN result; END; $BODY$ LANGUAGE plpgsql; -- Function that parses explain output as XML CREATE FUNCTION explain_xml(query text) RETURNS xml AS $BODY$ DECLARE result xml; BEGIN EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result; RETURN result; END; $BODY$ LANGUAGE plpgsql; \c - - - :worker_2_port -- Function that parses explain output as JSON CREATE FUNCTION explain_json(query text) RETURNS jsonb AS $BODY$ DECLARE result jsonb; BEGIN EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result; RETURN result; END; $BODY$ LANGUAGE plpgsql; -- Function that parses explain output as XML CREATE FUNCTION explain_xml(query text) RETURNS xml AS $BODY$ DECLARE result xml; BEGIN EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result; RETURN result; END; $BODY$ LANGUAGE plpgsql; -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity; Sort Sort Key: COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint), remote_scan.l_quantity -> HashAggregate Group Key: remote_scan.l_quantity -> Custom Scan (Citus Real-Time) Task Count: 16 Tasks Shown: One of 16 -> Task Node: host=localhost port=57637 dbname=regression -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_mx_1220052 lineitem_mx -- Test JSON format EXPLAIN (COSTS FALSE, FORMAT JSON) SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity; [ { "Plan": { "Node Type": "Sort", "Parallel Aware": false, "Sort Key": ["COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint)", "remote_scan.l_quantity"], "Plans": [ { "Node Type": "Aggregate", "Strategy": "Hashed", "Partial Mode": "Simple", "Parent Relationship": "Outer", "Parallel Aware": false, "Group Key": ["remote_scan.l_quantity"], "Plans": [ { "Node Type": "Custom Scan", "Parent Relationship": "Outer", "Custom Plan Provider": "Citus Real-Time", "Parallel Aware": false, "Distributed Query": { "Job": { "Task Count": 16, "Tasks Shown": "One of 16", "Tasks": [ { "Node": "host=localhost port=57637 dbname=regression", "Remote Plan": [ [ { "Plan": { "Node Type": "Aggregate", "Strategy": "Hashed", "Partial Mode": "Simple", "Parallel Aware": false, "Group Key": ["l_quantity"], "Plans": [ { "Node Type": "Seq Scan", "Parent Relationship": "Outer", "Parallel Aware": false, "Relation Name": "lineitem_mx_1220052", "Alias": "lineitem_mx" } ] } } ] ] } ] } } } ] } ] } } ] -- Validate JSON format SELECT true AS valid FROM explain_json($$ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$); t \c - - - :worker_1_port -- Test XML format EXPLAIN (COSTS FALSE, FORMAT XML) SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity; Sort false COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint) remote_scan.l_quantity Aggregate Hashed Simple Outer false remote_scan.l_quantity Custom Scan Outer Citus Real-Time false 16 One of 16 host=localhost port=57637 dbname=regression Aggregate Hashed Simple false l_quantity Seq Scan Outer false lineitem_mx_1220052 lineitem_mx -- Validate XML format SELECT true AS valid FROM explain_xml($$ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$); t -- Test YAML format EXPLAIN (COSTS FALSE, FORMAT YAML) SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity; - Plan: Node Type: "Sort" Parallel Aware: false Sort Key: - "COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint)" - "remote_scan.l_quantity" Plans: - Node Type: "Aggregate" Strategy: "Hashed" Partial Mode: "Simple" Parent Relationship: "Outer" Parallel Aware: false Group Key: - "remote_scan.l_quantity" Plans: - Node Type: "Custom Scan" Parent Relationship: "Outer" Custom Plan Provider: "Citus Real-Time" Parallel Aware: false Distributed Query: Job: Task Count: 16 Tasks Shown: "One of 16" Tasks: - Node: "host=localhost port=57637 dbname=regression" Remote Plan: - Plan: Node Type: "Aggregate" Strategy: "Hashed" Partial Mode: "Simple" Parallel Aware: false Group Key: - "l_quantity" Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" Parallel Aware: false Relation Name: "lineitem_mx_1220052" Alias: "lineitem_mx" -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity; Sort Sort Key: COALESCE((pg_catalog.sum((COALESCE((pg_catalog.sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint), remote_scan.l_quantity -> HashAggregate Group Key: remote_scan.l_quantity -> Custom Scan (Citus Real-Time) Task Count: 16 Tasks Shown: One of 16 -> Task Node: host=localhost port=57637 dbname=regression -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_mx_1220052 lineitem_mx \c - - - :worker_2_port -- Test verbose EXPLAIN (COSTS FALSE, VERBOSE TRUE) SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem_mx; Aggregate Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / pg_catalog.sum(remote_scan."?column?_2"))) -> Custom Scan (Citus Real-Time) Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2" Task Count: 16 Tasks Shown: One of 16 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate Output: sum(l_quantity), sum(l_quantity), count(l_quantity) -> Seq Scan on public.lineitem_mx_1220052 lineitem_mx Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment -- Test join EXPLAIN (COSTS FALSE) SELECT * FROM lineitem_mx JOIN orders_mx ON l_orderkey = o_orderkey AND l_quantity < 5.0 ORDER BY l_quantity LIMIT 10; Limit -> Sort Sort Key: remote_scan.l_quantity -> Custom Scan (Citus Real-Time) Task Count: 16 Tasks Shown: One of 16 -> Task Node: host=localhost port=57637 dbname=regression -> Limit -> Sort Sort Key: lineitem_mx.l_quantity -> Hash Join Hash Cond: (lineitem_mx.l_orderkey = orders_mx.o_orderkey) -> Seq Scan on lineitem_mx_1220052 lineitem_mx Filter: (l_quantity < 5.0) -> Hash -> Seq Scan on orders_mx_1220068 orders_mx -- Test insert EXPLAIN (COSTS FALSE) INSERT INTO lineitem_mx VALUES(1,0); Custom Scan (Citus Router) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Insert on lineitem_mx_1220052 -> Result -- Test update EXPLAIN (COSTS FALSE) UPDATE lineitem_mx SET l_suppkey = 12 WHERE l_orderkey = 1 AND l_partkey = 0; Custom Scan (Citus Router) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Update on lineitem_mx_1220052 lineitem_mx -> Index Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx Index Cond: (l_orderkey = 1) Filter: (l_partkey = 0) -- Test delete EXPLAIN (COSTS FALSE) DELETE FROM lineitem_mx WHERE l_orderkey = 1 AND l_partkey = 0; Custom Scan (Citus Router) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Delete on lineitem_mx_1220052 lineitem_mx -> Index Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx Index Cond: (l_orderkey = 1) Filter: (l_partkey = 0) -- make the outputs more consistent VACUUM ANALYZE lineitem_mx; -- Test single-shard SELECT EXPLAIN (COSTS FALSE) SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5; Custom Scan (Citus Router) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57638 dbname=regression -> Index Scan using lineitem_mx_pkey_1220055 on lineitem_mx_1220055 lineitem_mx Index Cond: (l_orderkey = 5) SELECT true AS valid FROM explain_xml($$ SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5$$); t SELECT true AS valid FROM explain_json($$ SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5$$); t -- Test CREATE TABLE ... AS EXPLAIN (COSTS FALSE) CREATE TABLE explain_result AS SELECT * FROM lineitem_mx; Custom Scan (Citus Real-Time) Task Count: 16 Tasks Shown: One of 16 -> Task Node: host=localhost port=57637 dbname=regression -> Seq Scan on lineitem_mx_1220052 lineitem_mx -- Test all tasks output SET citus.explain_all_tasks TO on; EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030; Aggregate -> Custom Scan (Citus Real-Time) Task Count: 16 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220053 on lineitem_mx_1220053 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220054 on lineitem_mx_1220054 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220055 on lineitem_mx_1220055 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220056 on lineitem_mx_1220056 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220057 on lineitem_mx_1220057 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220058 on lineitem_mx_1220058 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220059 on lineitem_mx_1220059 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220060 on lineitem_mx_1220060 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220061 on lineitem_mx_1220061 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220062 on lineitem_mx_1220062 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220063 on lineitem_mx_1220063 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220064 on lineitem_mx_1220064 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Seq Scan on lineitem_mx_1220065 lineitem_mx Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220066 on lineitem_mx_1220066 lineitem_mx Index Cond: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220067 on lineitem_mx_1220067 lineitem_mx Index Cond: (l_orderkey > 9030) SELECT true AS valid FROM explain_xml($$ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030$$); t SELECT true AS valid FROM explain_json($$ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030$$); t -- Test track tracker SET citus.task_executor_type TO 'task-tracker'; SET citus.explain_all_tasks TO off; EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030; Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 16 Tasks Shown: One of 16 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Index Only Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 lineitem_mx Index Cond: (l_orderkey > 9030) -- Test re-partition join SET citus.large_table_shard_count TO 1; EXPLAIN (COSTS FALSE) SELECT count(*) FROM lineitem_mx, orders_mx, customer_mx, supplier_mx WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 4 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob Map Task Count: 4 Merge Task Count: 4 -> MapMergeJob Map Task Count: 16 Merge Task Count: 4 -> MapMergeJob Map Task Count: 1 Merge Task Count: 4 -> MapMergeJob Map Task Count: 1 Merge Task Count: 4 EXPLAIN (COSTS FALSE, FORMAT JSON) SELECT count(*) FROM lineitem_mx, orders_mx, customer_mx, supplier_mx WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; [ { "Plan": { "Node Type": "Aggregate", "Strategy": "Plain", "Partial Mode": "Simple", "Parallel Aware": false, "Plans": [ { "Node Type": "Custom Scan", "Parent Relationship": "Outer", "Custom Plan Provider": "Citus Task-Tracker", "Parallel Aware": false, "Distributed Query": { "Job": { "Task Count": 4, "Tasks Shown": "None, not supported for re-partition queries", "Depended Jobs": [ { "Map Task Count": 4, "Merge Task Count": 4, "Depended Jobs": [ { "Map Task Count": 16, "Merge Task Count": 4 }, { "Map Task Count": 1, "Merge Task Count": 4 } ] }, { "Map Task Count": 1, "Merge Task Count": 4 } ] } } } ] } } ] SELECT true AS valid FROM explain_json($$ SELECT count(*) FROM lineitem_mx, orders_mx, customer_mx, supplier_mx WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey$$); t EXPLAIN (COSTS FALSE, FORMAT XML) SELECT count(*) FROM lineitem_mx, orders_mx, customer_mx, supplier_mx WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; Aggregate Plain Simple false Custom Scan Outer Citus Task-Tracker false 4 None, not supported for re-partition queries 4 4 16 4 1 4 1 4 SELECT true AS valid FROM explain_xml($$ SELECT count(*) FROM lineitem_mx, orders_mx, customer_mx, supplier_mx WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey$$); t EXPLAIN (COSTS FALSE, FORMAT YAML) SELECT count(*) FROM lineitem_mx, orders_mx, customer_mx, supplier_mx WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; - Plan: Node Type: "Aggregate" Strategy: "Plain" Partial Mode: "Simple" Parallel Aware: false Plans: - Node Type: "Custom Scan" Parent Relationship: "Outer" Custom Plan Provider: "Citus Task-Tracker" Parallel Aware: false Distributed Query: Job: Task Count: 4 Tasks Shown: "None, not supported for re-partition queries" Depended Jobs: - Map Task Count: 4 Merge Task Count: 4 Depended Jobs: - Map Task Count: 16 Merge Task Count: 4 - Map Task Count: 1 Merge Task Count: 4 - Map Task Count: 1 Merge Task Count: 4 citus-7.0.3/src/test/regress/expected/multi_mx_explain_0.out000066400000000000000000000545201317107136600242320ustar00rootroot00000000000000-- -- MULTI_MX_EXPLAIN -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1320000; \c - - - :worker_1_port \c - - - :worker_2_port \c - - - :master_port \a\t SET citus.task_executor_type TO 'real-time'; SET citus.explain_distributed_queries TO on; \c - - - :worker_1_port -- Function that parses explain output as JSON CREATE FUNCTION explain_json(query text) RETURNS jsonb AS $BODY$ DECLARE result jsonb; BEGIN EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result; RETURN result; END; $BODY$ LANGUAGE plpgsql; -- Function that parses explain output as XML CREATE FUNCTION explain_xml(query text) RETURNS xml AS $BODY$ DECLARE result xml; BEGIN EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result; RETURN result; END; $BODY$ LANGUAGE plpgsql; \c - - - :worker_2_port -- Function that parses explain output as JSON CREATE FUNCTION explain_json(query text) RETURNS jsonb AS $BODY$ DECLARE result jsonb; BEGIN EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result; RETURN result; END; $BODY$ LANGUAGE plpgsql; -- Function that parses explain output as XML CREATE FUNCTION explain_xml(query text) RETURNS xml AS $BODY$ DECLARE result xml; BEGIN EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result; RETURN result; END; $BODY$ LANGUAGE plpgsql; -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity; Sort Sort Key: COALESCE((sum((COALESCE((sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint), remote_scan.l_quantity -> HashAggregate Group Key: remote_scan.l_quantity -> Custom Scan (Citus Real-Time) Task Count: 16 Tasks Shown: One of 16 -> Task Node: host=localhost port=57637 dbname=regression -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_mx_1220052 lineitem_mx -- Test JSON format EXPLAIN (COSTS FALSE, FORMAT JSON) SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity; [ { "Plan": { "Node Type": "Sort", "Sort Key": ["COALESCE((sum((COALESCE((sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint)", "remote_scan.l_quantity"], "Plans": [ { "Node Type": "Aggregate", "Strategy": "Hashed", "Parent Relationship": "Outer", "Group Key": ["remote_scan.l_quantity"], "Plans": [ { "Node Type": "Custom Scan", "Parent Relationship": "Outer", "Custom Plan Provider": "Citus Real-Time", "Distributed Query": { "Job": { "Task Count": 16, "Tasks Shown": "One of 16", "Tasks": [ { "Node": "host=localhost port=57637 dbname=regression", "Remote Plan": [ [ { "Plan": { "Node Type": "Aggregate", "Strategy": "Hashed", "Group Key": ["l_quantity"], "Plans": [ { "Node Type": "Seq Scan", "Parent Relationship": "Outer", "Relation Name": "lineitem_mx_1220052", "Alias": "lineitem_mx" } ] } } ] ] } ] } } } ] } ] } } ] -- Validate JSON format SELECT true AS valid FROM explain_json($$ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$); t \c - - - :worker_1_port -- Test XML format EXPLAIN (COSTS FALSE, FORMAT XML) SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity; Sort COALESCE((sum((COALESCE((sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint) remote_scan.l_quantity Aggregate Hashed Outer remote_scan.l_quantity Custom Scan Outer Citus Real-Time 16 One of 16 host=localhost port=57637 dbname=regression Aggregate Hashed l_quantity Seq Scan Outer lineitem_mx_1220052 lineitem_mx -- Validate XML format SELECT true AS valid FROM explain_xml($$ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$); t -- Test YAML format EXPLAIN (COSTS FALSE, FORMAT YAML) SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity; - Plan: Node Type: "Sort" Sort Key: - "COALESCE((sum((COALESCE((sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint)" - "remote_scan.l_quantity" Plans: - Node Type: "Aggregate" Strategy: "Hashed" Parent Relationship: "Outer" Group Key: - "remote_scan.l_quantity" Plans: - Node Type: "Custom Scan" Parent Relationship: "Outer" Custom Plan Provider: "Citus Real-Time" Distributed Query: Job: Task Count: 16 Tasks Shown: "One of 16" Tasks: - Node: "host=localhost port=57637 dbname=regression" Remote Plan: - Plan: Node Type: "Aggregate" Strategy: "Hashed" Group Key: - "l_quantity" Plans: - Node Type: "Seq Scan" Parent Relationship: "Outer" Relation Name: "lineitem_mx_1220052" Alias: "lineitem_mx" -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity; Sort Sort Key: COALESCE((sum((COALESCE((sum(remote_scan.count_quantity))::bigint, '0'::bigint))))::bigint, '0'::bigint), remote_scan.l_quantity -> HashAggregate Group Key: remote_scan.l_quantity -> Custom Scan (Citus Real-Time) Task Count: 16 Tasks Shown: One of 16 -> Task Node: host=localhost port=57637 dbname=regression -> HashAggregate Group Key: l_quantity -> Seq Scan on lineitem_mx_1220052 lineitem_mx \c - - - :worker_2_port -- Test verbose EXPLAIN (COSTS FALSE, VERBOSE TRUE) SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem_mx; Aggregate Output: (sum(remote_scan."?column?") / (sum(remote_scan."?column?_1") / sum(remote_scan."?column?_2"))) -> Custom Scan (Citus Real-Time) Output: remote_scan."?column?", remote_scan."?column?_1", remote_scan."?column?_2" Task Count: 16 Tasks Shown: One of 16 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate Output: sum(l_quantity), sum(l_quantity), count(l_quantity) -> Seq Scan on public.lineitem_mx_1220052 lineitem_mx Output: l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment -- Test join EXPLAIN (COSTS FALSE) SELECT * FROM lineitem_mx JOIN orders_mx ON l_orderkey = o_orderkey AND l_quantity < 5.0 ORDER BY l_quantity LIMIT 10; Limit -> Sort Sort Key: remote_scan.l_quantity -> Custom Scan (Citus Real-Time) Task Count: 16 Tasks Shown: One of 16 -> Task Node: host=localhost port=57637 dbname=regression -> Limit -> Sort Sort Key: lineitem_mx.l_quantity -> Hash Join Hash Cond: (lineitem_mx.l_orderkey = orders_mx.o_orderkey) -> Seq Scan on lineitem_mx_1220052 lineitem_mx Filter: (l_quantity < 5.0) -> Hash -> Seq Scan on orders_mx_1220068 orders_mx -- Test insert EXPLAIN (COSTS FALSE) INSERT INTO lineitem_mx VALUES(1,0); Custom Scan (Citus Router) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Insert on lineitem_mx_1220052 -> Result -- Test update EXPLAIN (COSTS FALSE) UPDATE lineitem_mx SET l_suppkey = 12 WHERE l_orderkey = 1 AND l_partkey = 0; Custom Scan (Citus Router) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Update on lineitem_mx_1220052 -> Index Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 Index Cond: (l_orderkey = 1) Filter: (l_partkey = 0) -- Test delete EXPLAIN (COSTS FALSE) DELETE FROM lineitem_mx WHERE l_orderkey = 1 AND l_partkey = 0; Custom Scan (Citus Router) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Delete on lineitem_mx_1220052 -> Index Scan using lineitem_mx_pkey_1220052 on lineitem_mx_1220052 Index Cond: (l_orderkey = 1) Filter: (l_partkey = 0) -- Test single-shard SELECT EXPLAIN (COSTS FALSE) SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5; Custom Scan (Citus Router) Task Count: 1 Tasks Shown: All -> Task Node: host=localhost port=57638 dbname=regression -> Bitmap Heap Scan on lineitem_mx_1220055 lineitem_mx Recheck Cond: (l_orderkey = 5) -> Bitmap Index Scan on lineitem_mx_pkey_1220055 Index Cond: (l_orderkey = 5) SELECT true AS valid FROM explain_xml($$ SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5$$); t SELECT true AS valid FROM explain_json($$ SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5$$); t -- Test CREATE TABLE ... AS EXPLAIN (COSTS FALSE) CREATE TABLE explain_result AS SELECT * FROM lineitem_mx; Custom Scan (Citus Real-Time) Task Count: 16 Tasks Shown: One of 16 -> Task Node: host=localhost port=57637 dbname=regression -> Seq Scan on lineitem_mx_1220052 lineitem_mx -- Test all tasks output SET citus.explain_all_tasks TO on; EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030; Aggregate -> Custom Scan (Citus Real-Time) Task Count: 16 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_mx_1220052 lineitem_mx Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Seq Scan on lineitem_mx_1220053 lineitem_mx Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_mx_1220054 lineitem_mx Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Seq Scan on lineitem_mx_1220055 lineitem_mx Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_mx_1220056 lineitem_mx Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Seq Scan on lineitem_mx_1220057 lineitem_mx Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_mx_1220058 lineitem_mx Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Seq Scan on lineitem_mx_1220059 lineitem_mx Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_mx_1220060 lineitem_mx Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Seq Scan on lineitem_mx_1220061 lineitem_mx Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_mx_1220062 lineitem_mx Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Seq Scan on lineitem_mx_1220063 lineitem_mx Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_mx_1220064 lineitem_mx Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Seq Scan on lineitem_mx_1220065 lineitem_mx Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_mx_1220066 lineitem_mx Filter: (l_orderkey > 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Seq Scan on lineitem_mx_1220067 lineitem_mx Filter: (l_orderkey > 9030) SELECT true AS valid FROM explain_xml($$ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030$$); t SELECT true AS valid FROM explain_json($$ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030$$); t -- Test track tracker SET citus.task_executor_type TO 'task-tracker'; SET citus.explain_all_tasks TO off; EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030; Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 16 Tasks Shown: One of 16 -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Seq Scan on lineitem_mx_1220052 lineitem_mx Filter: (l_orderkey > 9030) -- Test re-partition join SET citus.large_table_shard_count TO 1; EXPLAIN (COSTS FALSE) SELECT count(*) FROM lineitem_mx, orders_mx, customer_mx, supplier_mx WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; Aggregate -> Custom Scan (Citus Task-Tracker) Task Count: 4 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob Map Task Count: 4 Merge Task Count: 4 -> MapMergeJob Map Task Count: 16 Merge Task Count: 4 -> MapMergeJob Map Task Count: 1 Merge Task Count: 4 -> MapMergeJob Map Task Count: 1 Merge Task Count: 4 EXPLAIN (COSTS FALSE, FORMAT JSON) SELECT count(*) FROM lineitem_mx, orders_mx, customer_mx, supplier_mx WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; [ { "Plan": { "Node Type": "Aggregate", "Strategy": "Plain", "Plans": [ { "Node Type": "Custom Scan", "Parent Relationship": "Outer", "Custom Plan Provider": "Citus Task-Tracker", "Distributed Query": { "Job": { "Task Count": 4, "Tasks Shown": "None, not supported for re-partition queries", "Depended Jobs": [ { "Map Task Count": 4, "Merge Task Count": 4, "Depended Jobs": [ { "Map Task Count": 16, "Merge Task Count": 4 }, { "Map Task Count": 1, "Merge Task Count": 4 } ] }, { "Map Task Count": 1, "Merge Task Count": 4 } ] } } } ] } } ] SELECT true AS valid FROM explain_json($$ SELECT count(*) FROM lineitem_mx, orders_mx, customer_mx, supplier_mx WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey$$); t EXPLAIN (COSTS FALSE, FORMAT XML) SELECT count(*) FROM lineitem_mx, orders_mx, customer_mx, supplier_mx WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; Aggregate Plain Custom Scan Outer Citus Task-Tracker 4 None, not supported for re-partition queries 4 4 16 4 1 4 1 4 SELECT true AS valid FROM explain_xml($$ SELECT count(*) FROM lineitem_mx, orders_mx, customer_mx, supplier_mx WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey$$); t EXPLAIN (COSTS FALSE, FORMAT YAML) SELECT count(*) FROM lineitem_mx, orders_mx, customer_mx, supplier_mx WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; - Plan: Node Type: "Aggregate" Strategy: "Plain" Plans: - Node Type: "Custom Scan" Parent Relationship: "Outer" Custom Plan Provider: "Citus Task-Tracker" Distributed Query: Job: Task Count: 4 Tasks Shown: "None, not supported for re-partition queries" Depended Jobs: - Map Task Count: 4 Merge Task Count: 4 Depended Jobs: - Map Task Count: 16 Merge Task Count: 4 - Map Task Count: 1 Merge Task Count: 4 - Map Task Count: 1 Merge Task Count: 4 citus-7.0.3/src/test/regress/expected/multi_mx_metadata.out000066400000000000000000000202521317107136600241260ustar00rootroot00000000000000-- Test creation of mx tables and metadata syncing -- get rid of the previously created entries in pg_dist_transaction -- for the sake of getting consistent results in this test file SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) CREATE TABLE distributed_mx_table ( key text primary key, value jsonb ); CREATE INDEX ON distributed_mx_table USING GIN (value); SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; SET citus.shard_count TO 4; SELECT create_distributed_table('distributed_mx_table', 'key'); create_distributed_table -------------------------- (1 row) -- Verify that we've logged commit records SELECT count(*) FROM pg_dist_transaction; count ------- 5 (1 row) -- Confirm that the metadata transactions have been committed SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) -- Verify that the commit records have been removed SELECT count(*) FROM pg_dist_transaction; count ------- 3 (1 row) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass; Column | Type | Modifiers --------+-------+----------- key | text | not null value | jsonb | (2 rows) \d distributed_mx_table_pkey Index "public.distributed_mx_table_pkey" Column | Type | Definition --------+------+------------ key | text | key primary key, btree, for table "public.distributed_mx_table" \d distributed_mx_table_value_idx Index "public.distributed_mx_table_value_idx" Column | Type | Definition --------+------+------------ value | text | value gin, for table "public.distributed_mx_table" SELECT repmodel FROM pg_dist_partition WHERE logicalrelid = 'distributed_mx_table'::regclass; repmodel ---------- s (1 row) SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'distributed_mx_table'::regclass; count ------- 4 (1 row) \c - - - :worker_2_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass; Column | Type | Modifiers --------+-------+----------- key | text | not null value | jsonb | (2 rows) \d distributed_mx_table_pkey Index "public.distributed_mx_table_pkey" Column | Type | Definition --------+------+------------ key | text | key primary key, btree, for table "public.distributed_mx_table" \d distributed_mx_table_value_idx Index "public.distributed_mx_table_value_idx" Column | Type | Definition --------+------+------------ value | text | value gin, for table "public.distributed_mx_table" SELECT repmodel FROM pg_dist_partition WHERE logicalrelid = 'distributed_mx_table'::regclass; repmodel ---------- s (1 row) SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'distributed_mx_table'::regclass; count ------- 4 (1 row) -- Create a table and then roll back the transaction \c - - - :master_port SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; BEGIN; CREATE TABLE should_not_exist ( key text primary key, value jsonb ); SELECT create_distributed_table('should_not_exist', 'key'); create_distributed_table -------------------------- (1 row) ABORT; -- Verify that the table does not exist on the worker \c - - - :worker_1_port SELECT count(*) FROM pg_tables WHERE tablename = 'should_not_exist'; count ------- 0 (1 row) -- Ensure that we don't allow prepare on a metadata transaction \c - - - :master_port SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; BEGIN; CREATE TABLE should_not_exist ( key text primary key, value jsonb ); SELECT create_distributed_table('should_not_exist', 'key'); create_distributed_table -------------------------- (1 row) PREPARE TRANSACTION 'this_should_fail'; ERROR: cannot use 2PC in transactions involving multiple servers -- now show that we can create tables and schemas withing a single transaction BEGIN; CREATE SCHEMA IF NOT EXISTS citus_mx_schema_for_xacts; SET search_path TO citus_mx_schema_for_xacts; SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 1; CREATE TABLE objects_for_xacts ( id bigint PRIMARY KEY, name text NOT NULL ); SELECT create_distributed_table('objects_for_xacts', 'id'); create_distributed_table -------------------------- (1 row) COMMIT; -- see that the table actually created and distributed \c - - - :worker_1_port SELECT repmodel FROM pg_dist_partition WHERE logicalrelid = 'citus_mx_schema_for_xacts.objects_for_xacts'::regclass; repmodel ---------- s (1 row) SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'citus_mx_schema_for_xacts.objects_for_xacts'::regclass; count ------- 1 (1 row) \c - - - :master_port SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; -- now show that we can rollback on creating mx table, but shards remain.... BEGIN; CREATE SCHEMA IF NOT EXISTS citus_mx_schema_for_xacts; NOTICE: schema "citus_mx_schema_for_xacts" already exists, skipping SET search_path TO citus_mx_schema_for_xacts; SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 2; CREATE TABLE objects_for_xacts2 ( id bigint PRIMARY KEY, name text NOT NULL ); SELECT create_distributed_table('objects_for_xacts2', 'id'); create_distributed_table -------------------------- (1 row) ROLLBACK; -- show that the table not exists on the coordinator SELECT count(*) FROM pg_tables WHERE tablename = 'objects_for_xacts2' and schemaname = 'citus_mx_schema_for_xacts'; count ------- 0 (1 row) \c - - - :worker_1_port -- the distributed table not exists on the worker node SELECT count(*) FROM pg_tables WHERE tablename = 'objects_for_xacts2' and schemaname = 'citus_mx_schema_for_xacts'; count ------- 0 (1 row) -- shard also does not exist since we create shards in a transaction SELECT count(*) FROM pg_tables WHERE tablename LIKE 'objects_for_xacts2_%' and schemaname = 'citus_mx_schema_for_xacts'; count ------- 0 (1 row) -- make sure that master_drop_all_shards does not work from the worker nodes SELECT master_drop_all_shards('citus_mx_schema_for_xacts.objects_for_xacts'::regclass, 'citus_mx_schema_for_xacts', 'objects_for_xacts'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. -- Ensure pg_dist_transaction is empty for test SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) -- Create some "fake" prepared transactions to recover \c - - - :worker_1_port BEGIN; CREATE TABLE should_abort (value int); PREPARE TRANSACTION 'citus_0_should_abort'; BEGIN; CREATE TABLE should_commit (value int); PREPARE TRANSACTION 'citus_0_should_commit'; BEGIN; CREATE TABLE should_be_sorted_into_middle (value int); PREPARE TRANSACTION 'citus_0_should_be_sorted_into_middle'; \c - - - :master_port -- Add "fake" pg_dist_transaction records and run recovery SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset INSERT INTO pg_dist_transaction VALUES (:worker_1_group, 'citus_0_should_commit'); INSERT INTO pg_dist_transaction VALUES (:worker_1_group, 'citus_0_should_be_forgotten'); SELECT recover_prepared_transactions(); NOTICE: recovered a prepared transaction on localhost:57637 CONTEXT: ROLLBACK PREPARED 'citus_0_should_abort' NOTICE: recovered a prepared transaction on localhost:57637 CONTEXT: ROLLBACK PREPARED 'citus_0_should_be_sorted_into_middle' NOTICE: recovered a prepared transaction on localhost:57637 CONTEXT: COMMIT PREPARED 'citus_0_should_commit' recover_prepared_transactions ------------------------------- 3 (1 row) SELECT count(*) FROM pg_dist_transaction; count ------- 3 (1 row) -- Confirm that transactions were correctly rolled forward \c - - - :worker_1_port SELECT count(*) FROM pg_tables WHERE tablename = 'should_abort'; count ------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit'; count ------- 1 (1 row) citus-7.0.3/src/test/regress/expected/multi_mx_modifications.out000066400000000000000000000457071317107136600252120ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1330000; -- =================================================================== -- test end-to-end modification functionality for mx tables -- =================================================================== -- basic single-row INSERT INSERT INTO limit_orders_mx VALUES (32743, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32743; count ------- 1 (1 row) -- now singe-row INSERT from a worker \c - - - :worker_1_port INSERT INTO limit_orders_mx VALUES (32744, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32744; count ------- 1 (1 row) -- now singe-row INSERT to the other worker \c - - - :worker_2_port INSERT INTO limit_orders_mx VALUES (32745, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32745; count ------- 1 (1 row) -- and see all the inserted rows SELECT * FROM limit_orders_mx; id | symbol | bidder_id | placed_at | kind | limit_price -------+--------+-----------+--------------------------+------+------------- 32744 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 32743 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 32745 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 (3 rows) -- basic single-row INSERT with RETURNING INSERT INTO limit_orders_mx VALUES (32746, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69) RETURNING *; id | symbol | bidder_id | placed_at | kind | limit_price -------+--------+-----------+--------------------------+------+------------- 32746 | AAPL | 9580 | Tue Oct 19 10:23:54 2004 | buy | 20.69 (1 row) -- INSERT with DEFAULT in the target list INSERT INTO limit_orders_mx VALUES (12756, 'MSFT', 10959, '2013-05-08 07:29:23', 'sell', DEFAULT); SELECT * FROM limit_orders_mx WHERE id = 12756; id | symbol | bidder_id | placed_at | kind | limit_price -------+--------+-----------+--------------------------+------+------------- 12756 | MSFT | 10959 | Wed May 08 07:29:23 2013 | sell | 0.00 (1 row) -- INSERT with expressions in target list INSERT INTO limit_orders_mx VALUES (430, upper('ibm'), 214, timestamp '2003-01-28 10:31:17' + interval '5 hours', 'buy', sqrt(2)); SELECT * FROM limit_orders_mx WHERE id = 430; id | symbol | bidder_id | placed_at | kind | limit_price -----+--------+-----------+--------------------------+------+----------------- 430 | IBM | 214 | Tue Jan 28 15:31:17 2003 | buy | 1.4142135623731 (1 row) -- INSERT without partition key INSERT INTO limit_orders_mx DEFAULT VALUES; ERROR: cannot perform an INSERT without a partition column value -- squelch WARNINGs that contain worker_port SET client_min_messages TO ERROR; -- INSERT violating NOT NULL constraint INSERT INTO limit_orders_mx VALUES (NULL, 'T', 975234, DEFAULT); ERROR: cannot perform an INSERT with NULL in the partition column -- INSERT violating column constraint INSERT INTO limit_orders_mx VALUES (18811, 'BUD', 14962, '2014-04-05 08:32:16', 'sell', -5.00); ERROR: new row for relation "limit_orders_mx_1220092" violates check constraint "limit_orders_mx_limit_price_check" DETAIL: Failing row contains (18811, BUD, 14962, 2014-04-05 08:32:16, sell, -5.00). CONTEXT: while executing command on localhost:57637 -- INSERT violating primary key constraint INSERT INTO limit_orders_mx VALUES (32743, 'LUV', 5994, '2001-04-16 03:37:28', 'buy', 0.58); ERROR: duplicate key value violates unique constraint "limit_orders_mx_pkey_1220093" DETAIL: Key (id)=(32743) already exists. CONTEXT: while executing command on localhost:57638 -- INSERT violating primary key constraint, with RETURNING specified. INSERT INTO limit_orders_mx VALUES (32743, 'LUV', 5994, '2001-04-16 03:37:28', 'buy', 0.58) RETURNING *; ERROR: duplicate key value violates unique constraint "limit_orders_mx_pkey_1220093" DETAIL: Key (id)=(32743) already exists. CONTEXT: while executing command on localhost:57638 -- INSERT, with RETURNING specified, failing with a non-constraint error INSERT INTO limit_orders_mx VALUES (34153, 'LEE', 5994, '2001-04-16 03:37:28', 'buy', 0.58) RETURNING id / 0; ERROR: could not modify any active placements SET client_min_messages TO DEFAULT; -- commands with non-constant partition values are unsupported INSERT INTO limit_orders_mx VALUES (random() * 100, 'ORCL', 152, '2011-08-25 11:50:45', 'sell', 0.58); -- values for other columns are totally fine INSERT INTO limit_orders_mx VALUES (2036, 'GOOG', 5634, now(), 'buy', random()); -- commands with mutable functions in their quals DELETE FROM limit_orders_mx WHERE id = 246 AND bidder_id = (random() * 1000); ERROR: functions used in the WHERE clause of modification queries on distributed tables must not be VOLATILE -- commands with mutable but non-volatile functions(ie: stable func.) in their quals -- (the cast to timestamp is because the timestamp_eq_timestamptz operator is stable) DELETE FROM limit_orders_mx WHERE id = 246 AND placed_at = current_timestamp::timestamp; -- commands with multiple rows are supported INSERT INTO limit_orders_mx VALUES (2037, 'GOOG', 5634, now(), 'buy', random()), (2038, 'GOOG', 5634, now(), 'buy', random()), (2039, 'GOOG', 5634, now(), 'buy', random()); -- connect back to the other node \c - - - :worker_1_port -- commands containing a CTE are unsupported WITH deleted_orders AS (DELETE FROM limit_orders_mx RETURNING *) INSERT INTO limit_orders_mx DEFAULT VALUES; ERROR: common table expressions are not supported in distributed modifications -- test simple DELETE INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246; count ------- 1 (1 row) DELETE FROM limit_orders_mx WHERE id = 246; SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246; count ------- 0 (1 row) -- test simple DELETE with RETURNING DELETE FROM limit_orders_mx WHERE id = 430 RETURNING *; id | symbol | bidder_id | placed_at | kind | limit_price -----+--------+-----------+--------------------------+------+----------------- 430 | IBM | 214 | Tue Jan 28 15:31:17 2003 | buy | 1.4142135623731 (1 row) SELECT COUNT(*) FROM limit_orders_mx WHERE id = 430; count ------- 0 (1 row) -- DELETE with expression in WHERE clause INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246; count ------- 1 (1 row) DELETE FROM limit_orders_mx WHERE id = (2 * 123); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246; count ------- 0 (1 row) -- commands with no constraints on the partition key are not supported DELETE FROM limit_orders_mx WHERE bidder_id = 162; ERROR: cannot run DELETE command which targets multiple shards HINT: Consider using an equality filter on partition column "id" to target a single shard. If you'd like to run a multi-shard operation, use master_modify_multiple_shards(). -- commands with a USING clause are unsupported CREATE TABLE bidders ( name text, id bigint ); DELETE FROM limit_orders_mx USING bidders WHERE limit_orders_mx.id = 246 AND limit_orders_mx.bidder_id = bidders.id AND bidders.name = 'Bernie Madoff'; ERROR: cannot plan queries which include both local and distributed relations -- commands containing a CTE are unsupported WITH deleted_orders AS (INSERT INTO limit_orders_mx DEFAULT VALUES RETURNING *) DELETE FROM limit_orders_mx; ERROR: common table expressions are not supported in distributed modifications -- cursors are not supported DELETE FROM limit_orders_mx WHERE CURRENT OF cursor_name; ERROR: cannot run DELETE command which targets multiple shards HINT: Consider using an equality filter on partition column "id" to target a single shard. If you'd like to run a multi-shard operation, use master_modify_multiple_shards(). INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); -- simple UPDATE UPDATE limit_orders_mx SET symbol = 'GM' WHERE id = 246; SELECT symbol FROM limit_orders_mx WHERE id = 246; symbol -------- GM (1 row) -- simple UPDATE with RETURNING UPDATE limit_orders_mx SET symbol = 'GM' WHERE id = 246 RETURNING *; id | symbol | bidder_id | placed_at | kind | limit_price -----+--------+-----------+--------------------------+------+------------- 246 | GM | 162 | Mon Jul 02 16:32:15 2007 | sell | 20.69 (1 row) -- expression UPDATE UPDATE limit_orders_mx SET bidder_id = 6 * 3 WHERE id = 246; SELECT bidder_id FROM limit_orders_mx WHERE id = 246; bidder_id ----------- 18 (1 row) -- expression UPDATE with RETURNING UPDATE limit_orders_mx SET bidder_id = 6 * 5 WHERE id = 246 RETURNING *; id | symbol | bidder_id | placed_at | kind | limit_price -----+--------+-----------+--------------------------+------+------------- 246 | GM | 30 | Mon Jul 02 16:32:15 2007 | sell | 20.69 (1 row) -- multi-column UPDATE UPDATE limit_orders_mx SET (kind, limit_price) = ('buy', DEFAULT) WHERE id = 246; SELECT kind, limit_price FROM limit_orders_mx WHERE id = 246; kind | limit_price ------+------------- buy | 0.00 (1 row) -- multi-column UPDATE with RETURNING UPDATE limit_orders_mx SET (kind, limit_price) = ('buy', 999) WHERE id = 246 RETURNING *; id | symbol | bidder_id | placed_at | kind | limit_price -----+--------+-----------+--------------------------+------+------------- 246 | GM | 30 | Mon Jul 02 16:32:15 2007 | buy | 999 (1 row) -- Test that on unique contraint violations, we fail fast INSERT INTO limit_orders_mx VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67); INSERT INTO limit_orders_mx VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67); ERROR: duplicate key value violates unique constraint "limit_orders_mx_pkey_1220093" DETAIL: Key (id)=(275) already exists. CONTEXT: while executing command on localhost:57638 -- commands with no constraints on the partition key are not supported UPDATE limit_orders_mx SET limit_price = 0.00; ERROR: cannot run UPDATE command which targets multiple shards HINT: Consider using an equality filter on partition column "id" to target a single shard. If you'd like to run a multi-shard operation, use master_modify_multiple_shards(). -- attempting to change the partition key is unsupported UPDATE limit_orders_mx SET id = 0 WHERE id = 246; ERROR: modifying the partition value of rows is not allowed -- UPDATEs with a FROM clause are unsupported UPDATE limit_orders_mx SET limit_price = 0.00 FROM bidders WHERE limit_orders_mx.id = 246 AND limit_orders_mx.bidder_id = bidders.id AND bidders.name = 'Bernie Madoff'; ERROR: cannot plan queries which include both local and distributed relations -- commands containing a CTE are unsupported WITH deleted_orders AS (INSERT INTO limit_orders_mx DEFAULT VALUES RETURNING *) UPDATE limit_orders_mx SET symbol = 'GM'; ERROR: common table expressions are not supported in distributed modifications SELECT symbol, bidder_id FROM limit_orders_mx WHERE id = 246; symbol | bidder_id --------+----------- GM | 30 (1 row) -- updates referencing just a var are supported UPDATE limit_orders_mx SET bidder_id = id WHERE id = 246; -- updates referencing a column are supported UPDATE limit_orders_mx SET bidder_id = bidder_id + 1 WHERE id = 246; -- IMMUTABLE functions are allowed UPDATE limit_orders_mx SET symbol = LOWER(symbol) WHERE id = 246; SELECT symbol, bidder_id FROM limit_orders_mx WHERE id = 246; symbol | bidder_id --------+----------- gm | 247 (1 row) -- IMMUTABLE functions are allowed -- even in returning UPDATE limit_orders_mx SET symbol = UPPER(symbol) WHERE id = 246 RETURNING id, LOWER(symbol), symbol; id | lower | symbol -----+-------+-------- 246 | gm | GM (1 row) -- connect coordinator to run the DDL \c - - - :master_port ALTER TABLE limit_orders_mx ADD COLUMN array_of_values integer[]; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' -- connect back to the other node \c - - - :worker_2_port -- updates referencing STABLE functions are allowed UPDATE limit_orders_mx SET placed_at = LEAST(placed_at, now()::timestamp) WHERE id = 246; -- so are binary operators UPDATE limit_orders_mx SET array_of_values = 1 || array_of_values WHERE id = 246; -- connect back to the other node \c - - - :worker_2_port -- immutable function calls with vars are also allowed UPDATE limit_orders_mx SET array_of_values = immutable_append_mx(array_of_values, 2) WHERE id = 246; CREATE FUNCTION stable_append_mx(old_values int[], new_value int) RETURNS int[] AS $$ BEGIN RETURN old_values || new_value; END; $$ LANGUAGE plpgsql STABLE; -- but STABLE function calls with vars are not allowed UPDATE limit_orders_mx SET array_of_values = stable_append_mx(array_of_values, 3) WHERE id = 246; ERROR: STABLE functions used in UPDATE queries cannot be called with column references SELECT array_of_values FROM limit_orders_mx WHERE id = 246; array_of_values ----------------- {1,2} (1 row) -- STRICT functions work as expected CREATE FUNCTION temp_strict_func(integer,integer) RETURNS integer AS 'SELECT COALESCE($1, 2) + COALESCE($1, 3);' LANGUAGE SQL STABLE STRICT; UPDATE limit_orders_mx SET bidder_id = temp_strict_func(1, null) WHERE id = 246; ERROR: null value in column "bidder_id" violates not-null constraint DETAIL: Failing row contains (246, GM, null, 2007-07-02 16:32:15, buy, 999, {1,2}). CONTEXT: while executing command on localhost:57637 SELECT array_of_values FROM limit_orders_mx WHERE id = 246; array_of_values ----------------- {1,2} (1 row) -- connect coordinator to run the DDL \c - - - :master_port ALTER TABLE limit_orders_mx DROP array_of_values; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' -- connect back to the other node \c - - - :worker_2_port -- even in RETURNING UPDATE limit_orders_mx SET placed_at = placed_at WHERE id = 246 RETURNING NOW(); ERROR: non-IMMUTABLE functions are not allowed in the RETURNING clause -- cursors are not supported UPDATE limit_orders_mx SET symbol = 'GM' WHERE CURRENT OF cursor_name; ERROR: cannot run UPDATE command which targets multiple shards HINT: Consider using an equality filter on partition column "id" to target a single shard. If you'd like to run a multi-shard operation, use master_modify_multiple_shards(). -- check that multi-row UPDATE/DELETEs with RETURNING work INSERT INTO multiple_hash_mx VALUES ('0', '1'); INSERT INTO multiple_hash_mx VALUES ('0', '2'); INSERT INTO multiple_hash_mx VALUES ('0', '3'); INSERT INTO multiple_hash_mx VALUES ('0', '4'); INSERT INTO multiple_hash_mx VALUES ('0', '5'); INSERT INTO multiple_hash_mx VALUES ('0', '6'); UPDATE multiple_hash_mx SET data = data ||'-1' WHERE category = '0' RETURNING *; category | data ----------+------ 0 | 1-1 0 | 2-1 0 | 3-1 0 | 4-1 0 | 5-1 0 | 6-1 (6 rows) DELETE FROM multiple_hash_mx WHERE category = '0' RETURNING *; category | data ----------+------ 0 | 1-1 0 | 2-1 0 | 3-1 0 | 4-1 0 | 5-1 0 | 6-1 (6 rows) -- ensure returned row counters are correct \set QUIET off INSERT INTO multiple_hash_mx VALUES ('1', '1'); INSERT 0 1 INSERT INTO multiple_hash_mx VALUES ('1', '2'); INSERT 0 1 INSERT INTO multiple_hash_mx VALUES ('1', '3'); INSERT 0 1 INSERT INTO multiple_hash_mx VALUES ('2', '1'); INSERT 0 1 INSERT INTO multiple_hash_mx VALUES ('2', '2'); INSERT 0 1 INSERT INTO multiple_hash_mx VALUES ('2', '3'); INSERT 0 1 INSERT INTO multiple_hash_mx VALUES ('2', '3') RETURNING *; category | data ----------+------ 2 | 3 (1 row) INSERT 0 1 -- check that update return the right number of rows -- one row UPDATE multiple_hash_mx SET data = data ||'-1' WHERE category = '1' AND data = '1'; UPDATE 1 -- three rows UPDATE multiple_hash_mx SET data = data ||'-2' WHERE category = '1'; UPDATE 3 -- three rows, with RETURNING UPDATE multiple_hash_mx SET data = data ||'-2' WHERE category = '1' RETURNING category; category ---------- 1 1 1 (3 rows) UPDATE 3 -- check SELECT * FROM multiple_hash_mx WHERE category = '1' ORDER BY category, data; category | data ----------+--------- 1 | 1-1-2-2 1 | 2-2-2 1 | 3-2-2 (3 rows) -- check that deletes return the right number of rows -- one row DELETE FROM multiple_hash_mx WHERE category = '2' AND data = '1'; DELETE 1 -- two rows DELETE FROM multiple_hash_mx WHERE category = '2'; DELETE 3 -- three rows, with RETURNING DELETE FROM multiple_hash_mx WHERE category = '1' RETURNING category; category ---------- 1 1 1 (3 rows) DELETE 3 -- check SELECT * FROM multiple_hash_mx WHERE category = '1' ORDER BY category, data; category | data ----------+------ (0 rows) SELECT * FROM multiple_hash_mx WHERE category = '2' ORDER BY category, data; category | data ----------+------ (0 rows) --- INSERT ... SELECT ... FROM commands are supported from workers INSERT INTO multiple_hash_mx SELECT s, s*2 FROM generate_series(1,10) s; INSERT 0 10 -- but are never distributed BEGIN; BEGIN SET LOCAL client_min_messages TO DEBUG1; SET INSERT INTO multiple_hash_mx SELECT * FROM multiple_hash_mx; DEBUG: distributed INSERT ... SELECT can only be performed from the coordinator DEBUG: Collecting INSERT ... SELECT results on coordinator INSERT 0 10 END; COMMIT -- verify interaction of default values, SERIAL, and RETURNING \set QUIET on -- make sure this test always returns the same output no matter which tests have run SELECT minimum_value::bigint AS min_value, maximum_value::bigint AS max_value FROM information_schema.sequences WHERE sequence_name = 'app_analytics_events_mx_id_seq' \gset SELECT last_value FROM app_analytics_events_mx_id_seq \gset ALTER SEQUENCE app_analytics_events_mx_id_seq NO MINVALUE NO MAXVALUE; SELECT setval('app_analytics_events_mx_id_seq'::regclass, 3940649673949184); setval ------------------ 3940649673949184 (1 row) INSERT INTO app_analytics_events_mx VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id; id ------------------ 3940649673949185 (1 row) INSERT INTO app_analytics_events_mx (app_id, name) VALUES (102, 'Wayz') RETURNING id; id ------------------ 3940649673949186 (1 row) INSERT INTO app_analytics_events_mx (app_id, name) VALUES (103, 'Mynt') RETURNING *; id | app_id | name ------------------+--------+------ 3940649673949187 | 103 | Mynt (1 row) -- clean up SELECT setval('app_analytics_events_mx_id_seq'::regclass, :last_value); setval ------------------ 3940649673949185 (1 row) ALTER SEQUENCE app_analytics_events_mx_id_seq MINVALUE :min_value MAXVALUE :max_value; citus-7.0.3/src/test/regress/expected/multi_mx_modifying_xacts.out000066400000000000000000000273261317107136600255460ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1340000; -- =================================================================== -- test end-to-end modification functionality for mx tables in transactions -- =================================================================== -- add some data INSERT INTO researchers_mx VALUES (1, 1, 'Donald Knuth'); INSERT INTO researchers_mx VALUES (2, 1, 'Niklaus Wirth'); INSERT INTO researchers_mx VALUES (3, 2, 'Tony Hoare'); INSERT INTO researchers_mx VALUES (4, 2, 'Kenneth Iverson'); -- replace a researcher, reusing their id on the coordinator BEGIN; DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 2; INSERT INTO researchers_mx VALUES (2, 1, 'John Backus'); COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2; name ------------- John Backus (1 row) -- do it on the worker node as well \c - - - :worker_1_port BEGIN; DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 2; INSERT INTO researchers_mx VALUES (2, 1, 'John Backus Worker 1'); COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2; name ---------------------- John Backus Worker 1 (1 row) -- do it on the worker other node as well \c - - - :worker_2_port BEGIN; DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 2; INSERT INTO researchers_mx VALUES (2, 1, 'John Backus Worker 2'); COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2; name ---------------------- John Backus Worker 2 (1 row) \c - - - :master_port -- abort a modification BEGIN; DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1; ABORT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1; name -------------- Donald Knuth (1 row) \c - - - :worker_1_port -- abort a modification on the worker node BEGIN; DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1; ABORT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1; name -------------- Donald Knuth (1 row) \c - - - :worker_2_port -- abort a modification on the other worker node BEGIN; DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1; ABORT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1; name -------------- Donald Knuth (1 row) -- switch back to the first worker node \c - - - :worker_1_port -- creating savepoints should work... BEGIN; INSERT INTO researchers_mx VALUES (5, 3, 'Dennis Ritchie'); SAVEPOINT hire_thompson; INSERT INTO researchers_mx VALUES (6, 3, 'Ken Thompson'); COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 3 AND id = 6; name -------------- Ken Thompson (1 row) -- even if created by PL/pgSQL... \set VERBOSITY terse BEGIN; DO $$ BEGIN INSERT INTO researchers_mx VALUES (10, 10, 'Edsger Dijkstra'); EXCEPTION WHEN not_null_violation THEN RAISE NOTICE 'caught not_null_violation'; END $$; COMMIT; -- rollback should also work BEGIN; INSERT INTO researchers_mx VALUES (7, 4, 'Jim Gray'); SAVEPOINT hire_engelbart; INSERT INTO researchers_mx VALUES (8, 4, 'Douglas Engelbart'); ROLLBACK TO hire_engelbart; COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 4; name ---------- Jim Gray (1 row) BEGIN; DO $$ BEGIN INSERT INTO researchers_mx VALUES (NULL, 10, 'Edsger Dijkstra'); EXCEPTION WHEN not_null_violation THEN RAISE NOTICE 'caught not_null_violation'; END $$; NOTICE: caught not_null_violation COMMIT; \set VERBOSITY default -- should be valid to edit labs_mx after researchers_mx... BEGIN; INSERT INTO researchers_mx VALUES (8, 5, 'Douglas Engelbart'); INSERT INTO labs_mx VALUES (5, 'Los Alamos'); COMMIT; SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id; id | lab_id | name | id | name ----+--------+-------------------+----+------------ 8 | 5 | Douglas Engelbart | 5 | Los Alamos (1 row) -- and the other way around is also allowed BEGIN; INSERT INTO labs_mx VALUES (6, 'Bell labs_mx'); INSERT INTO researchers_mx VALUES (9, 6, 'Leslie Lamport'); COMMIT; -- have the same test on the other worker node \c - - - :worker_2_port -- should be valid to edit labs_mx after researchers_mx... BEGIN; INSERT INTO researchers_mx VALUES (8, 5, 'Douglas Engelbart'); INSERT INTO labs_mx VALUES (5, 'Los Alamos'); COMMIT; SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id; id | lab_id | name | id | name ----+--------+-------------------+----+------------ 8 | 5 | Douglas Engelbart | 5 | Los Alamos 8 | 5 | Douglas Engelbart | 5 | Los Alamos 8 | 5 | Douglas Engelbart | 5 | Los Alamos 8 | 5 | Douglas Engelbart | 5 | Los Alamos (4 rows) -- and the other way around is also allowed BEGIN; INSERT INTO labs_mx VALUES (6, 'Bell labs_mx'); INSERT INTO researchers_mx VALUES (9, 6, 'Leslie Lamport'); COMMIT; -- switch back to the worker node \c - - - :worker_1_port -- this logic doesn't apply to router SELECTs occurring after a modification: -- selecting from the modified node is fine... BEGIN; INSERT INTO labs_mx VALUES (6, 'Bell labs_mx'); SELECT count(*) FROM researchers_mx WHERE lab_id = 6; count ------- 2 (1 row) ABORT; -- doesn't apply to COPY after modifications BEGIN; INSERT INTO labs_mx VALUES (6, 'Bell labs_mx'); \copy labs_mx from stdin delimiter ',' COMMIT; -- copy will also work if before any modifications BEGIN; \copy labs_mx from stdin delimiter ',' SELECT name FROM labs_mx WHERE id = 10; name ------------------ Weyland-Yutani-1 Weyland-Yutani-2 (2 rows) INSERT INTO labs_mx VALUES (6, 'Bell labs_mx'); COMMIT; \c - - - :worker_1_port -- test primary key violations BEGIN; INSERT INTO objects_mx VALUES (1, 'apple'); INSERT INTO objects_mx VALUES (1, 'orange'); ERROR: duplicate key value violates unique constraint "objects_mx_pkey_1220103" DETAIL: Key (id)=(1) already exists. CONTEXT: while executing command on localhost:57637 COMMIT; -- data shouldn't have persisted... SELECT * FROM objects_mx WHERE id = 1; id | name ----+------ (0 rows) -- same test on the second worker node \c - - - :worker_2_port -- test primary key violations BEGIN; INSERT INTO objects_mx VALUES (1, 'apple'); INSERT INTO objects_mx VALUES (1, 'orange'); ERROR: duplicate key value violates unique constraint "objects_mx_pkey_1220103" DETAIL: Key (id)=(1) already exists. CONTEXT: while executing command on localhost:57637 COMMIT; -- data shouldn't have persisted... SELECT * FROM objects_mx WHERE id = 1; id | name ----+------ (0 rows) -- create trigger on one worker to reject certain values \c - - - :worker_1_port CREATE FUNCTION reject_bad_mx() RETURNS trigger AS $rb$ BEGIN IF (NEW.name = 'BAD') THEN RAISE 'illegal value'; END IF; RETURN NEW; END; $rb$ LANGUAGE plpgsql; CREATE CONSTRAINT TRIGGER reject_bad_mx AFTER INSERT ON objects_mx_1220103 DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW EXECUTE PROCEDURE reject_bad_mx(); -- test partial failure; statement 1 successed, statement 2 fails \set VERBOSITY terse BEGIN; INSERT INTO labs_mx VALUES (7, 'E Corp'); INSERT INTO objects_mx VALUES (2, 'BAD'); WARNING: illegal value ERROR: could not modify any active placements COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 2; id | name ----+------ (0 rows) SELECT * FROM labs_mx WHERE id = 7; id | name ----+------ (0 rows) -- same failure test from worker 2 \c - - - :worker_2_port -- test partial failure; statement 1 successed, statement 2 fails BEGIN; INSERT INTO labs_mx VALUES (7, 'E Corp'); INSERT INTO objects_mx VALUES (2, 'BAD'); WARNING: illegal value ERROR: could not modify any active placements COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 2; id | name ----+------ (0 rows) SELECT * FROM labs_mx WHERE id = 7; id | name ----+------ (0 rows) \c - - - :worker_1_port -- what if there are errors on different shards at different times? \c - - - :worker_1_port CREATE CONSTRAINT TRIGGER reject_bad_mx AFTER INSERT ON labs_mx_1220102 DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW EXECUTE PROCEDURE reject_bad_mx(); BEGIN; INSERT INTO objects_mx VALUES (1, 'apple'); INSERT INTO objects_mx VALUES (2, 'BAD'); WARNING: illegal value ERROR: could not modify any active placements INSERT INTO labs_mx VALUES (8, 'Aperture Science'); ERROR: current transaction is aborted, commands ignored until end of transaction block INSERT INTO labs_mx VALUES (9, 'BAD'); ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 1; id | name ----+------ (0 rows) SELECT * FROM labs_mx WHERE id = 8; id | name ----+------ (0 rows) -- same test from the other worker \c - - - :worker_2_port BEGIN; INSERT INTO objects_mx VALUES (1, 'apple'); INSERT INTO objects_mx VALUES (2, 'BAD'); WARNING: illegal value ERROR: could not modify any active placements INSERT INTO labs_mx VALUES (8, 'Aperture Science'); ERROR: current transaction is aborted, commands ignored until end of transaction block INSERT INTO labs_mx VALUES (9, 'BAD'); ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 1; id | name ----+------ (0 rows) SELECT * FROM labs_mx WHERE id = 8; id | name ----+------ (0 rows) -- what if the failures happen at COMMIT time? \c - - - :worker_1_port DROP TRIGGER reject_bad_mx ON objects_mx_1220103; CREATE CONSTRAINT TRIGGER reject_bad_mx AFTER INSERT ON objects_mx_1220103 DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_bad_mx(); -- should be the same story as before, just at COMMIT time BEGIN; INSERT INTO objects_mx VALUES (1, 'apple'); INSERT INTO objects_mx VALUES (2, 'BAD'); INSERT INTO labs_mx VALUES (9, 'Umbrella Corporation'); COMMIT; WARNING: illegal value WARNING: failed to commit transaction on localhost:57637 WARNING: could not commit transaction for shard 1220103 on any active node WARNING: could not commit transaction for shard 1220102 on any active node ERROR: could not commit transaction on any active node -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 2; id | name ----+------ (0 rows) SELECT * FROM labs_mx WHERE id = 7; id | name ----+------ (0 rows) DROP TRIGGER reject_bad_mx ON labs_mx_1220102; CREATE CONSTRAINT TRIGGER reject_bad_mx AFTER INSERT ON labs_mx_1220102 DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_bad_mx(); BEGIN; INSERT INTO objects_mx VALUES (1, 'apple'); INSERT INTO objects_mx VALUES (2, 'BAD'); INSERT INTO labs_mx VALUES (8, 'Aperture Science'); INSERT INTO labs_mx VALUES (9, 'BAD'); COMMIT; WARNING: illegal value WARNING: failed to commit transaction on localhost:57637 WARNING: could not commit transaction for shard 1220103 on any active node WARNING: could not commit transaction for shard 1220102 on any active node ERROR: could not commit transaction on any active node -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 1; id | name ----+------ (0 rows) SELECT * FROM labs_mx WHERE id = 8; id | name ----+------ (0 rows) -- what if one shard (objects_mx) succeeds but another (labs_mx) completely fails? \c - - - :worker_1_port DROP TRIGGER reject_bad_mx ON objects_mx_1220103; BEGIN; INSERT INTO objects_mx VALUES (1, 'apple'); INSERT INTO labs_mx VALUES (8, 'Aperture Science'); INSERT INTO labs_mx VALUES (9, 'BAD'); COMMIT; WARNING: illegal value WARNING: failed to commit transaction on localhost:57637 WARNING: could not commit transaction for shard 1220103 on any active node WARNING: could not commit transaction for shard 1220102 on any active node ERROR: could not commit transaction on any active node -- no data should persists SELECT * FROM objects_mx WHERE id = 1; id | name ----+------ (0 rows) SELECT * FROM labs_mx WHERE id = 8; id | name ----+------ (0 rows) citus-7.0.3/src/test/regress/expected/multi_mx_reference_table.out000066400000000000000000000473361317107136600254670ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000; \c - - - :master_port CREATE TABLE reference_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test'); create_reference_table ------------------------ (1 row) INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO reference_table_test VALUES (3, 3.0, '3', '2016-12-03'); INSERT INTO reference_table_test VALUES (4, 4.0, '4', '2016-12-04'); INSERT INTO reference_table_test VALUES (5, 5.0, '5', '2016-12-05'); \c - - - :worker_1_port -- run some queries on top of the data SELECT * FROM reference_table_test; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 4 | 4 | 4 | Sun Dec 04 00:00:00 2016 5 | 5 | 5 | Mon Dec 05 00:00:00 2016 (5 rows) SELECT * FROM reference_table_test WHERE value_1 = 1; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) SELECT value_1, value_2 FROM reference_table_test ORDER BY 2 ASC LIMIT 3; value_1 | value_2 ---------+--------- 1 | 1 2 | 2 3 | 3 (3 rows) SELECT value_1, value_3 FROM reference_table_test WHERE value_2 >= 4 ORDER BY 2 LIMIT 3; value_1 | value_3 ---------+--------- 4 | 4 5 | 5 (2 rows) SELECT value_1, 15 * value_2 FROM reference_table_test ORDER BY 2 ASC LIMIT 2; value_1 | ?column? ---------+---------- 1 | 15 2 | 30 (2 rows) SELECT value_1, 15 * value_2 FROM reference_table_test ORDER BY 2 ASC LIMIT 2 OFFSET 2; value_1 | ?column? ---------+---------- 3 | 45 4 | 60 (2 rows) SELECT value_2, value_4 FROM reference_table_test WHERE value_2 = 2 OR value_2 = 3; value_2 | value_4 ---------+-------------------------- 2 | Fri Dec 02 00:00:00 2016 3 | Sat Dec 03 00:00:00 2016 (2 rows) SELECT value_2, value_4 FROM reference_table_test WHERE value_2 = 2 AND value_2 = 3; value_2 | value_4 ---------+--------- (0 rows) SELECT value_2, value_4 FROM reference_table_test WHERE value_3 = '2' OR value_1 = 3; value_2 | value_4 ---------+-------------------------- 2 | Fri Dec 02 00:00:00 2016 3 | Sat Dec 03 00:00:00 2016 (2 rows) SELECT value_2, value_4 FROM reference_table_test WHERE ( value_3 = '2' OR value_1 = 3 ) AND FALSE; value_2 | value_4 ---------+--------- (0 rows) SELECT * FROM reference_table_test WHERE value_2 IN ( SELECT value_3::FLOAT FROM reference_table_test ) AND value_1 < 3; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (2 rows) SELECT value_4 FROM reference_table_test WHERE value_3 IN ( '1', '2' ); value_4 -------------------------- Thu Dec 01 00:00:00 2016 Fri Dec 02 00:00:00 2016 (2 rows) SELECT date_part('day', value_4) FROM reference_table_test WHERE value_3 IN ( '5', '2' ); date_part ----------- 2 5 (2 rows) SELECT value_4 FROM reference_table_test WHERE value_2 <= 2 AND value_2 >= 4; value_4 --------- (0 rows) SELECT value_4 FROM reference_table_test WHERE value_2 <= 20 AND value_2 >= 4; value_4 -------------------------- Sun Dec 04 00:00:00 2016 Mon Dec 05 00:00:00 2016 (2 rows) SELECT value_4 FROM reference_table_test WHERE value_2 >= 5 AND value_2 <= random(); value_4 --------- (0 rows) SELECT value_1 FROM reference_table_test WHERE value_4 BETWEEN '2016-12-01' AND '2016-12-03'; value_1 --------- 1 2 3 (3 rows) SELECT value_1 FROM reference_table_test WHERE FALSE; value_1 --------- (0 rows) SELECT value_1 FROM reference_table_test WHERE int4eq(1, 2); value_1 --------- (0 rows) -- rename output name and do some operations SELECT value_1 as id, value_2 * 15 as age FROM reference_table_test; id | age ----+----- 1 | 15 2 | 30 3 | 45 4 | 60 5 | 75 (5 rows) -- queries with CTEs are supported WITH some_data AS ( SELECT value_2, value_4 FROM reference_table_test WHERE value_2 >=3) SELECT * FROM some_data; value_2 | value_4 ---------+-------------------------- 3 | Sat Dec 03 00:00:00 2016 4 | Sun Dec 04 00:00:00 2016 5 | Mon Dec 05 00:00:00 2016 (3 rows) -- queries with CTEs are supported even if CTE is not referenced inside query WITH some_data AS ( SELECT value_2, value_4 FROM reference_table_test WHERE value_2 >=3) SELECT * FROM reference_table_test ORDER BY 1 LIMIT 1; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) -- queries which involve functions in FROM clause are supported if it goes to a single worker. SELECT * FROM reference_table_test, position('om' in 'Thomas') WHERE value_1 = 1; value_1 | value_2 | value_3 | value_4 | position ---------+---------+---------+--------------------------+---------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 | 3 (1 row) SELECT * FROM reference_table_test, position('om' in 'Thomas') WHERE value_1 = 1 OR value_1 = 2; value_1 | value_2 | value_3 | value_4 | position ---------+---------+---------+--------------------------+---------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 | 3 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 | 3 (2 rows) -- set operations are supported SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 1 UNION SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 (2 rows) SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 1 EXCEPT SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 1 INTERSECT SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+--------- (0 rows) -- to make the tests more interested for aggregation tests, ingest some more data \c - - - :master_port INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO reference_table_test VALUES (3, 3.0, '3', '2016-12-03'); \c - - - :worker_1_port -- some aggregations SELECT value_4, SUM(value_2) FROM reference_table_test GROUP BY value_4 HAVING SUM(value_2) > 3 ORDER BY 1; value_4 | sum --------------------------+----- Fri Dec 02 00:00:00 2016 | 4 Sat Dec 03 00:00:00 2016 | 6 Sun Dec 04 00:00:00 2016 | 4 Mon Dec 05 00:00:00 2016 | 5 (4 rows) SELECT value_4, value_3, SUM(value_2) FROM reference_table_test GROUP BY GROUPING sets ((value_4), (value_3)) ORDER BY 1, 2, 3; value_4 | value_3 | sum --------------------------+---------+----- Thu Dec 01 00:00:00 2016 | | 2 Fri Dec 02 00:00:00 2016 | | 4 Sat Dec 03 00:00:00 2016 | | 6 Sun Dec 04 00:00:00 2016 | | 4 Mon Dec 05 00:00:00 2016 | | 5 | 1 | 2 | 2 | 4 | 3 | 6 | 4 | 4 | 5 | 5 (10 rows) -- distinct clauses also work fine SELECT DISTINCT value_4 FROM reference_table_test ORDER BY 1; value_4 -------------------------- Thu Dec 01 00:00:00 2016 Fri Dec 02 00:00:00 2016 Sat Dec 03 00:00:00 2016 Sun Dec 04 00:00:00 2016 Mon Dec 05 00:00:00 2016 (5 rows) -- window functions are also supported SELECT value_4, RANK() OVER (PARTITION BY value_1 ORDER BY value_4) FROM reference_table_test; value_4 | rank --------------------------+------ Thu Dec 01 00:00:00 2016 | 1 Thu Dec 01 00:00:00 2016 | 1 Fri Dec 02 00:00:00 2016 | 1 Fri Dec 02 00:00:00 2016 | 1 Sat Dec 03 00:00:00 2016 | 1 Sat Dec 03 00:00:00 2016 | 1 Sun Dec 04 00:00:00 2016 | 1 Mon Dec 05 00:00:00 2016 | 1 (8 rows) -- window functions are also supported SELECT value_4, AVG(value_1) OVER (PARTITION BY value_4 ORDER BY value_4) FROM reference_table_test; value_4 | avg --------------------------+------------------------ Thu Dec 01 00:00:00 2016 | 1.00000000000000000000 Thu Dec 01 00:00:00 2016 | 1.00000000000000000000 Fri Dec 02 00:00:00 2016 | 2.0000000000000000 Fri Dec 02 00:00:00 2016 | 2.0000000000000000 Sat Dec 03 00:00:00 2016 | 3.0000000000000000 Sat Dec 03 00:00:00 2016 | 3.0000000000000000 Sun Dec 04 00:00:00 2016 | 4.0000000000000000 Mon Dec 05 00:00:00 2016 | 5.0000000000000000 (8 rows) SELECT count(DISTINCT CASE WHEN value_2 >= 3 THEN value_2 ELSE NULL END) as c FROM reference_table_test; c --- 3 (1 row) SELECT value_1, count(DISTINCT CASE WHEN value_2 >= 3 THEN value_2 ELSE NULL END) as c FROM reference_table_test GROUP BY value_1 ORDER BY 1; value_1 | c ---------+--- 1 | 0 2 | 0 3 | 1 4 | 1 5 | 1 (5 rows) -- selects inside a transaction works fine as well BEGIN; SELECT * FROM reference_table_test; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 4 | 4 | 4 | Sun Dec 04 00:00:00 2016 5 | 5 | 5 | Mon Dec 05 00:00:00 2016 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 (8 rows) SELECT * FROM reference_table_test WHERE value_1 = 1; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (2 rows) END; -- cursor queries also works fine BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM reference_table_test WHERE value_1 = 1 OR value_1 = 2 ORDER BY value_1; FETCH test_cursor; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) FETCH ALL test_cursor; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (3 rows) FETCH test_cursor; -- fetch one row after the last value_1 | value_2 | value_3 | value_4 ---------+---------+---------+--------- (0 rows) FETCH BACKWARD test_cursor; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (1 row) END; -- table creation queries inside can be router plannable CREATE TEMP TABLE temp_reference_test as SELECT * FROM reference_table_test WHERE value_1 = 1; \c - - - :master_port -- all kinds of joins are supported among reference tables -- first create two more tables CREATE TABLE reference_table_test_second (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_second'); create_reference_table ------------------------ (1 row) CREATE TABLE reference_table_test_third (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_third'); create_reference_table ------------------------ (1 row) -- ingest some data to both tables INSERT INTO reference_table_test_second VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO reference_table_test_second VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO reference_table_test_second VALUES (3, 3.0, '3', '2016-12-03'); INSERT INTO reference_table_test_third VALUES (4, 4.0, '4', '2016-12-04'); INSERT INTO reference_table_test_third VALUES (5, 5.0, '5', '2016-12-05'); \c - - - :worker_2_port -- some very basic tests SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2 WHERE t1.value_2 = t2.value_2 ORDER BY 1; value_1 --------- 1 2 3 (3 rows) SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_third t3 WHERE t1.value_2 = t3.value_2 ORDER BY 1; value_1 --------- 4 5 (2 rows) SELECT DISTINCT t2.value_1 FROM reference_table_test_second t2, reference_table_test_third t3 WHERE t2.value_2 = t3.value_2 ORDER BY 1; value_1 --------- (0 rows) -- join on different columns and different data types via casts SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2 WHERE t1.value_2 = t2.value_1 ORDER BY 1; value_1 --------- 1 2 3 (3 rows) SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2 WHERE t1.value_2 = t2.value_3::int ORDER BY 1; value_1 --------- 1 2 3 (3 rows) SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2 WHERE t1.value_2 = date_part('day', t2.value_4) ORDER BY 1; value_1 --------- 1 2 3 (3 rows) -- ingest a common row to see more meaningful results with joins involving 3 tables \c - - - :master_port INSERT INTO reference_table_test_third VALUES (3, 3.0, '3', '2016-12-03'); \c - - - :worker_1_port SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2, reference_table_test_third t3 WHERE t1.value_2 = date_part('day', t2.value_4) AND t3.value_2 = t1.value_2 ORDER BY 1; value_1 --------- 3 (1 row) -- same query on different columns SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2, reference_table_test_third t3 WHERE t1.value_1 = date_part('day', t2.value_4) AND t3.value_2 = t1.value_1 ORDER BY 1; value_1 --------- 3 (1 row) -- with the JOIN syntax SELECT DISTINCT t1.value_1 FROM reference_table_test t1 JOIN reference_table_test_second t2 USING (value_1) JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; value_1 --------- 3 (1 row) -- and left/right joins SELECT DISTINCT t1.value_1 FROM reference_table_test t1 LEFT JOIN reference_table_test_second t2 USING (value_1) LEFT JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; value_1 --------- 1 2 3 4 5 (5 rows) SELECT DISTINCT t1.value_1 FROM reference_table_test t1 RIGHT JOIN reference_table_test_second t2 USING (value_1) RIGHT JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; value_1 --------- 3 (2 rows) \c - - - :master_port SET citus.shard_count TO 6; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; CREATE TABLE colocated_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_distributed_table('colocated_table_test', 'value_1'); create_distributed_table -------------------------- (1 row) CREATE TABLE colocated_table_test_2 (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_distributed_table('colocated_table_test_2', 'value_1'); create_distributed_table -------------------------- (1 row) DELETE FROM reference_table_test; INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO colocated_table_test VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO colocated_table_test VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO colocated_table_test_2 VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO colocated_table_test_2 VALUES (2, 2.0, '2', '2016-12-02'); \c - - - :worker_1_port SET client_min_messages TO DEBUG1; SET citus.log_multi_join_order TO TRUE; SELECT reference_table_test.value_1 FROM reference_table_test, colocated_table_test WHERE colocated_table_test.value_1 = reference_table_test.value_1; LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ] value_1 --------- 1 2 (2 rows) SELECT colocated_table_test.value_2 FROM reference_table_test, colocated_table_test WHERE colocated_table_test.value_2 = reference_table_test.value_2; LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ] value_2 --------- 1 2 (2 rows) SELECT colocated_table_test.value_2 FROM colocated_table_test, reference_table_test WHERE reference_table_test.value_1 = colocated_table_test.value_1; LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ] value_2 --------- 1 2 (2 rows) SELECT colocated_table_test.value_2 FROM reference_table_test, colocated_table_test, colocated_table_test_2 WHERE colocated_table_test.value_2 = reference_table_test.value_2; LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ][ cartesian product "colocated_table_test_2" ] ERROR: cannot perform distributed planning on this query DETAIL: Cartesian products are currently unsupported SELECT colocated_table_test.value_2 FROM reference_table_test, colocated_table_test, colocated_table_test_2 WHERE colocated_table_test.value_1 = colocated_table_test_2.value_1 AND colocated_table_test.value_2 = reference_table_test.value_2; LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ][ local partition join "colocated_table_test_2" ] value_2 --------- 1 2 (2 rows) SET citus.task_executor_type to "task-tracker"; SELECT colocated_table_test.value_2 FROM reference_table_test, colocated_table_test, colocated_table_test_2 WHERE colocated_table_test.value_2 = colocated_table_test_2.value_2 AND colocated_table_test.value_2 = reference_table_test.value_2; LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ][ dual partition join "colocated_table_test_2" ] value_2 --------- 1 2 (2 rows) SELECT reference_table_test.value_2 FROM reference_table_test, colocated_table_test, colocated_table_test_2 WHERE colocated_table_test.value_1 = reference_table_test.value_1 AND colocated_table_test_2.value_1 = reference_table_test.value_1; LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ][ dual partition join "colocated_table_test_2" ] value_2 --------- 1 2 (2 rows) SET client_min_messages TO NOTICE; SET citus.log_multi_join_order TO FALSE; -- clean up tables \c - - - :master_port DROP TABLE reference_table_test, reference_table_test_second, reference_table_test_third;; citus-7.0.3/src/test/regress/expected/multi_mx_repartition_join_w1.out000066400000000000000000000007301317107136600263330ustar00rootroot00000000000000-- Test two concurrent reparttition joins from two different workers -- This test runs the below query from the :worker_1_port and the -- concurrent test runs the same query on :worker_2_port. Note that, both -- tests use the same sequence ids but the queries should not fail. \c - - - :worker_1_port SET citus.task_executor_type TO "task-tracker"; CREATE TEMP TABLE t1 AS SELECT l1.l_comment FROM lineitem_mx l1, orders_mx l2 WHERE l1.l_comment = l2.o_comment; citus-7.0.3/src/test/regress/expected/multi_mx_repartition_join_w2.out000066400000000000000000000007331317107136600263370ustar00rootroot00000000000000-- Test two concurrent reparttition joins from two different workers -- This test runs the below query from the :worker_2_port and the -- concurrent test runs the same query on :worker_1_port. Note that, both -- tests use the same sequence ids but the queries should not fail. \c - - - :worker_2_port SET citus.task_executor_type TO "task-tracker"; CREATE TEMP TABLE t1 AS SELECT l1.l_comment FROM lineitem_mx l1, orders_mx l2 WHERE l1.l_comment = l2.o_comment; citus-7.0.3/src/test/regress/expected/multi_mx_repartition_udt_prepare.out000066400000000000000000000165761317107136600273160ustar00rootroot00000000000000-- -- MULTI_MX_REPARTITION_UDT_PREPARE -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 535000; -- START type creation CREATE TYPE test_udt AS (i integer, i2 integer); -- ... as well as a function to use as its comparator... CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- ... use that function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, PROCEDURE = equal_test_udt_function, COMMUTATOR = =, HASHES ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; -- ... create a test HASH function. Though it is a poor hash function, -- it is acceptable for our tests CREATE FUNCTION test_udt_hash(test_udt) RETURNS int AS 'SELECT hashtext( ($1.i + $1.i2)::text);' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 DEFAULT FOR TYPE test_udt USING BTREE AS OPERATOR 3 = (test_udt, test_udt); CREATE OPERATOR CLASS tudt_op_fam_class DEFAULT FOR TYPE test_udt USING HASH AS OPERATOR 1 = (test_udt, test_udt), FUNCTION 1 test_udt_hash(test_udt); -- END type creation CREATE TABLE repartition_udt ( pk integer not null, udtcol test_udt, txtcol text ); CREATE TABLE repartition_udt_other ( pk integer not null, udtcol test_udt, txtcol text ); -- Connect directly to a worker, create and drop the type, then -- proceed with type creation as above; thus the OIDs will be different. -- so that the OID is off. \c - - - :worker_1_port CREATE TYPE test_udt AS (i integer, i2 integer); DROP TYPE test_udt CASCADE; -- START type creation CREATE TYPE test_udt AS (i integer, i2 integer); -- ... as well as a function to use as its comparator... CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- ... use that function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, PROCEDURE = equal_test_udt_function, COMMUTATOR = =, HASHES ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; -- ... create a test HASH function. Though it is a poor hash function, -- it is acceptable for our tests CREATE FUNCTION test_udt_hash(test_udt) RETURNS int AS 'SELECT hashtext( ($1.i + $1.i2)::text);' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 DEFAULT FOR TYPE test_udt USING BTREE AS OPERATOR 3 = (test_udt, test_udt); CREATE OPERATOR CLASS tudt_op_fam_class DEFAULT FOR TYPE test_udt USING HASH AS OPERATOR 1 = (test_udt, test_udt), FUNCTION 1 test_udt_hash(test_udt); -- END type creation \c - - - :worker_2_port -- START type creation CREATE TYPE test_udt AS (i integer, i2 integer); -- ... as well as a function to use as its comparator... CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- ... use that function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, PROCEDURE = equal_test_udt_function, COMMUTATOR = =, HASHES ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; -- ... create a test HASH function. Though it is a poor hash function, -- it is acceptable for our tests CREATE FUNCTION test_udt_hash(test_udt) RETURNS int AS 'SELECT hashtext( ($1.i + $1.i2)::text);' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 DEFAULT FOR TYPE test_udt USING BTREE AS OPERATOR 3 = (test_udt, test_udt); CREATE OPERATOR CLASS tudt_op_fam_class DEFAULT FOR TYPE test_udt USING HASH AS OPERATOR 1 = (test_udt, test_udt), FUNCTION 1 test_udt_hash(test_udt); -- END type creation -- Connect to master \c - - - :master_port -- Distribute and populate the two tables. SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; SET citus.shard_count TO 3; SELECT create_distributed_table('repartition_udt', 'pk'); create_distributed_table -------------------------- (1 row) SET citus.shard_count TO 5; SELECT create_distributed_table('repartition_udt_other', 'pk'); create_distributed_table -------------------------- (1 row) INSERT INTO repartition_udt values (1, '(1,1)'::test_udt, 'foo'); INSERT INTO repartition_udt values (2, '(1,2)'::test_udt, 'foo'); INSERT INTO repartition_udt values (3, '(1,3)'::test_udt, 'foo'); INSERT INTO repartition_udt values (4, '(2,1)'::test_udt, 'foo'); INSERT INTO repartition_udt values (5, '(2,2)'::test_udt, 'foo'); INSERT INTO repartition_udt values (6, '(2,3)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (7, '(1,1)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (8, '(1,2)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (9, '(1,3)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (10, '(2,1)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (11, '(2,2)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (12, '(2,3)'::test_udt, 'foo'); SET client_min_messages = LOG; -- Query that should result in a repartition join on int column, and be empty. SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.pk = repartition_udt_other.pk WHERE repartition_udt.pk > 1; pk | udtcol | txtcol | pk | udtcol | txtcol ----+--------+--------+----+--------+-------- (0 rows) -- Query that should result in a repartition join on UDT column. SET citus.large_table_shard_count = 1; SET citus.task_executor_type = 'task-tracker'; SET citus.log_multi_join_order = true; EXPLAIN SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.udtcol = repartition_udt_other.udtcol WHERE repartition_udt.pk > 1; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] QUERY PLAN -------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) Task Count: 4 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob Map Task Count: 3 Merge Task Count: 4 -> MapMergeJob Map Task Count: 5 Merge Task Count: 4 (9 rows) SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.udtcol = repartition_udt_other.udtcol WHERE repartition_udt.pk > 1 ORDER BY repartition_udt.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] pk | udtcol | txtcol | pk | udtcol | txtcol ----+--------+--------+----+--------+-------- 2 | (1,2) | foo | 8 | (1,2) | foo 3 | (1,3) | foo | 9 | (1,3) | foo 4 | (2,1) | foo | 10 | (2,1) | foo 5 | (2,2) | foo | 11 | (2,2) | foo 6 | (2,3) | foo | 12 | (2,3) | foo (5 rows) \c - - - :worker_1_port \c - - - :worker_2_port citus-7.0.3/src/test/regress/expected/multi_mx_repartition_udt_w1.out000066400000000000000000000023651317107136600261760ustar00rootroot00000000000000-- -- MULTI_MX_REPARTITION_W1_UDT -- \c - - - :worker_1_port SET client_min_messages = LOG; -- Query that should result in a repartition join on UDT column. SET citus.large_table_shard_count = 1; SET citus.task_executor_type = 'task-tracker'; SET citus.log_multi_join_order = true; -- Query that should result in a repartition join on int column, and be empty. SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.pk = repartition_udt_other.pk WHERE repartition_udt.pk > 1; LOG: join order: [ "repartition_udt" ][ local partition join "repartition_udt_other" ] pk | udtcol | txtcol | pk | udtcol | txtcol ----+--------+--------+----+--------+-------- (0 rows) SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.udtcol = repartition_udt_other.udtcol WHERE repartition_udt.pk > 1 ORDER BY repartition_udt.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] pk | udtcol | txtcol | pk | udtcol | txtcol ----+--------+--------+----+--------+-------- 2 | (1,2) | foo | 8 | (1,2) | foo 3 | (1,3) | foo | 9 | (1,3) | foo 4 | (2,1) | foo | 10 | (2,1) | foo 5 | (2,2) | foo | 11 | (2,2) | foo 6 | (2,3) | foo | 12 | (2,3) | foo (5 rows) citus-7.0.3/src/test/regress/expected/multi_mx_repartition_udt_w2.out000066400000000000000000000023651317107136600261770ustar00rootroot00000000000000-- -- MULTI_MX_REPARTITION_W2_UDT -- \c - - - :worker_2_port SET client_min_messages = LOG; -- Query that should result in a repartition join on UDT column. SET citus.large_table_shard_count = 1; SET citus.task_executor_type = 'task-tracker'; SET citus.log_multi_join_order = true; -- Query that should result in a repartition join on int column, and be empty. SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.pk = repartition_udt_other.pk WHERE repartition_udt.pk > 1; LOG: join order: [ "repartition_udt" ][ local partition join "repartition_udt_other" ] pk | udtcol | txtcol | pk | udtcol | txtcol ----+--------+--------+----+--------+-------- (0 rows) SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.udtcol = repartition_udt_other.udtcol WHERE repartition_udt.pk > 1 ORDER BY repartition_udt.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] pk | udtcol | txtcol | pk | udtcol | txtcol ----+--------+--------+----+--------+-------- 2 | (1,2) | foo | 8 | (1,2) | foo 3 | (1,3) | foo | 9 | (1,3) | foo 4 | (2,1) | foo | 10 | (2,1) | foo 5 | (2,2) | foo | 11 | (2,2) | foo 6 | (2,3) | foo | 12 | (2,3) | foo (5 rows) citus-7.0.3/src/test/regress/expected/multi_mx_router_planner.out000066400000000000000000001322721317107136600254130ustar00rootroot00000000000000-- =================================================================== -- test router planner functionality for single shard select queries -- =================================================================== -- run all the router queries from the one of the workers \c - - - :worker_1_port -- this table is used in a CTE test CREATE TABLE authors_hash_mx ( name text, id bigint ); -- create a bunch of test data INSERT INTO articles_hash_mx VALUES ( 1, 1, 'arsenous', 9572); INSERT INTO articles_hash_mx VALUES ( 2, 2, 'abducing', 13642); INSERT INTO articles_hash_mx VALUES ( 3, 3, 'asternal', 10480); INSERT INTO articles_hash_mx VALUES ( 4, 4, 'altdorfer', 14551); INSERT INTO articles_hash_mx VALUES ( 5, 5, 'aruru', 11389); INSERT INTO articles_hash_mx VALUES ( 6, 6, 'atlases', 15459); INSERT INTO articles_hash_mx VALUES ( 7, 7, 'aseptic', 12298); INSERT INTO articles_hash_mx VALUES ( 8, 8, 'agatized', 16368); INSERT INTO articles_hash_mx VALUES ( 9, 9, 'alligate', 438); INSERT INTO articles_hash_mx VALUES (10, 10, 'aggrandize', 17277); INSERT INTO articles_hash_mx VALUES (11, 1, 'alamo', 1347); INSERT INTO articles_hash_mx VALUES (12, 2, 'archiblast', 18185); INSERT INTO articles_hash_mx VALUES (13, 3, 'aseyev', 2255); INSERT INTO articles_hash_mx VALUES (14, 4, 'andesite', 19094); INSERT INTO articles_hash_mx VALUES (15, 5, 'adversa', 3164); INSERT INTO articles_hash_mx VALUES (16, 6, 'allonym', 2); INSERT INTO articles_hash_mx VALUES (17, 7, 'auriga', 4073); INSERT INTO articles_hash_mx VALUES (18, 8, 'assembly', 911); INSERT INTO articles_hash_mx VALUES (19, 9, 'aubergiste', 4981); INSERT INTO articles_hash_mx VALUES (20, 10, 'absentness', 1820); INSERT INTO articles_hash_mx VALUES (21, 1, 'arcading', 5890); INSERT INTO articles_hash_mx VALUES (22, 2, 'antipope', 2728); INSERT INTO articles_hash_mx VALUES (23, 3, 'abhorring', 6799); INSERT INTO articles_hash_mx VALUES (24, 4, 'audacious', 3637); INSERT INTO articles_hash_mx VALUES (25, 5, 'antehall', 7707); INSERT INTO articles_hash_mx VALUES (26, 6, 'abington', 4545); INSERT INTO articles_hash_mx VALUES (27, 7, 'arsenous', 8616); INSERT INTO articles_hash_mx VALUES (28, 8, 'aerophyte', 5454); INSERT INTO articles_hash_mx VALUES (29, 9, 'amateur', 9524); INSERT INTO articles_hash_mx VALUES (30, 10, 'andelee', 6363); INSERT INTO articles_hash_mx VALUES (31, 1, 'athwartships', 7271); INSERT INTO articles_hash_mx VALUES (32, 2, 'amazon', 11342); INSERT INTO articles_hash_mx VALUES (33, 3, 'autochrome', 8180); INSERT INTO articles_hash_mx VALUES (34, 4, 'amnestied', 12250); INSERT INTO articles_hash_mx VALUES (35, 5, 'aminate', 9089); INSERT INTO articles_hash_mx VALUES (36, 6, 'ablation', 13159); INSERT INTO articles_hash_mx VALUES (37, 7, 'archduchies', 9997); INSERT INTO articles_hash_mx VALUES (38, 8, 'anatine', 14067); INSERT INTO articles_hash_mx VALUES (39, 9, 'anchises', 10906); INSERT INTO articles_hash_mx VALUES (40, 10, 'attemper', 14976); INSERT INTO articles_hash_mx VALUES (41, 1, 'aznavour', 11814); INSERT INTO articles_hash_mx VALUES (42, 2, 'ausable', 15885); INSERT INTO articles_hash_mx VALUES (43, 3, 'affixal', 12723); INSERT INTO articles_hash_mx VALUES (44, 4, 'anteport', 16793); INSERT INTO articles_hash_mx VALUES (45, 5, 'afrasia', 864); INSERT INTO articles_hash_mx VALUES (46, 6, 'atlanta', 17702); INSERT INTO articles_hash_mx VALUES (47, 7, 'abeyance', 1772); INSERT INTO articles_hash_mx VALUES (48, 8, 'alkylic', 18610); INSERT INTO articles_hash_mx VALUES (49, 9, 'anyone', 2681); INSERT INTO articles_hash_mx VALUES (50, 10, 'anjanette', 19519); SET citus.task_executor_type TO 'real-time'; SET citus.large_table_shard_count TO 2; SET client_min_messages TO 'DEBUG2'; -- insert a single row for the test INSERT INTO articles_single_shard_hash_mx VALUES (50, 10, 'anjanette', 19519); DEBUG: Creating router plan DEBUG: Plan is router executable -- single-shard tests -- test simple select for a single row SELECT * FROM articles_hash_mx WHERE author_id = 10 AND id = 50; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+-----------+------------ 50 | 10 | anjanette | 19519 (1 row) -- get all titles by a single author SELECT title FROM articles_hash_mx WHERE author_id = 10; DEBUG: Creating router plan DEBUG: Plan is router executable title ------------ aggrandize absentness andelee attemper anjanette (5 rows) -- try ordering them by word count SELECT title, word_count FROM articles_hash_mx WHERE author_id = 10 ORDER BY word_count DESC NULLS LAST; DEBUG: Creating router plan DEBUG: Plan is router executable title | word_count ------------+------------ anjanette | 19519 aggrandize | 17277 attemper | 14976 andelee | 6363 absentness | 1820 (5 rows) -- look at last two articles by an author SELECT title, id FROM articles_hash_mx WHERE author_id = 5 ORDER BY id LIMIT 2; DEBUG: Creating router plan DEBUG: Plan is router executable title | id ---------+---- aruru | 5 adversa | 15 (2 rows) -- find all articles by two authors in same shard -- but plan is not router executable due to order by SELECT title, author_id FROM articles_hash_mx WHERE author_id = 7 OR author_id = 8 ORDER BY author_id ASC, id; DEBUG: Creating router plan DEBUG: Plan is router executable title | author_id -------------+----------- aseptic | 7 auriga | 7 arsenous | 7 archduchies | 7 abeyance | 7 agatized | 8 assembly | 8 aerophyte | 8 anatine | 8 alkylic | 8 (10 rows) -- same query is router executable with no order by SELECT title, author_id FROM articles_hash_mx WHERE author_id = 7 OR author_id = 8; DEBUG: Creating router plan DEBUG: Plan is router executable title | author_id -------------+----------- aseptic | 7 agatized | 8 auriga | 7 assembly | 8 arsenous | 7 aerophyte | 8 archduchies | 7 anatine | 8 abeyance | 7 alkylic | 8 (10 rows) -- add in some grouping expressions, still on same shard -- having queries unsupported in Citus SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash_mx WHERE author_id = 1 OR author_id = 7 OR author_id = 8 OR author_id = 10 GROUP BY author_id HAVING sum(word_count) > 1000 ORDER BY sum(word_count) DESC; DEBUG: Creating router plan DEBUG: Plan is router executable author_id | corpus_size -----------+------------- 10 | 59955 8 | 55410 7 | 36756 1 | 35894 (4 rows) -- however having clause is supported if it goes to a single shard SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash_mx WHERE author_id = 1 GROUP BY author_id HAVING sum(word_count) > 1000 ORDER BY sum(word_count) DESC; DEBUG: Creating router plan DEBUG: Plan is router executable author_id | corpus_size -----------+------------- 1 | 35894 (1 row) -- query is a single shard query but can't do shard pruning, -- not router-plannable due to <= and IN SELECT * FROM articles_hash_mx WHERE author_id <= 1; id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) SELECT * FROM articles_hash_mx WHERE author_id IN (1, 3); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 11 | 1 | alamo | 1347 13 | 3 | aseyev | 2255 21 | 1 | arcading | 5890 23 | 3 | abhorring | 6799 31 | 1 | athwartships | 7271 33 | 3 | autochrome | 8180 41 | 1 | aznavour | 11814 43 | 3 | affixal | 12723 (10 rows) -- queries with CTEs are supported WITH first_author AS ( SELECT id FROM articles_hash_mx WHERE author_id = 1) SELECT * FROM first_author; DEBUG: Creating router plan DEBUG: Plan is router executable id ---- 1 11 21 31 41 (5 rows) -- queries with CTEs are supported even if CTE is not referenced inside query WITH first_author AS ( SELECT id FROM articles_hash_mx WHERE author_id = 1) SELECT title FROM articles_hash_mx WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable title -------------- arsenous alamo arcading athwartships aznavour (5 rows) -- two CTE joins are supported if they go to the same worker WITH id_author AS ( SELECT id, author_id FROM articles_hash_mx WHERE author_id = 1), id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 1) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | id | title ----+-----------+----+-------------- 1 | 1 | 1 | arsenous 11 | 1 | 11 | alamo 21 | 1 | 21 | arcading 31 | 1 | 31 | athwartships 41 | 1 | 41 | aznavour (5 rows) WITH id_author AS ( SELECT id, author_id FROM articles_hash_mx WHERE author_id = 1), id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 3) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | id | title ----+-----------+----+------- (0 rows) -- CTE joins are not supported if table shards are at different workers WITH id_author AS ( SELECT id, author_id FROM articles_hash_mx WHERE author_id = 1), id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 2) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; ERROR: could not run distributed query with complex table expressions HINT: Consider using an equality filter on the distributed table's partition column. -- recursive CTEs are supported when filtered on partition column INSERT INTO company_employees_mx values(1, 1, 0); DEBUG: Creating router plan DEBUG: Plan is router executable INSERT INTO company_employees_mx values(1, 2, 1); DEBUG: Creating router plan DEBUG: Plan is router executable INSERT INTO company_employees_mx values(1, 3, 1); DEBUG: Creating router plan DEBUG: Plan is router executable INSERT INTO company_employees_mx values(1, 4, 2); DEBUG: Creating router plan DEBUG: Plan is router executable INSERT INTO company_employees_mx values(1, 5, 4); DEBUG: Creating router plan DEBUG: Plan is router executable INSERT INTO company_employees_mx values(3, 1, 0); DEBUG: Creating router plan DEBUG: Plan is router executable INSERT INTO company_employees_mx values(3, 15, 1); DEBUG: Creating router plan DEBUG: Plan is router executable INSERT INTO company_employees_mx values(3, 3, 1); DEBUG: Creating router plan DEBUG: Plan is router executable -- find employees at top 2 level within company hierarchy WITH RECURSIVE hierarchy as ( SELECT *, 1 AS level FROM company_employees_mx WHERE company_id = 1 and manager_id = 0 UNION SELECT ce.*, (h.level+1) FROM hierarchy h JOIN company_employees_mx ce ON (h.employee_id = ce.manager_id AND h.company_id = ce.company_id AND ce.company_id = 1)) SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: Creating router plan DEBUG: Plan is router executable company_id | employee_id | manager_id | level ------------+-------------+------------+------- 1 | 1 | 0 | 1 1 | 2 | 1 | 2 1 | 3 | 1 | 2 (3 rows) -- query becomes not router plannble and gets rejected -- if filter on company is dropped WITH RECURSIVE hierarchy as ( SELECT *, 1 AS level FROM company_employees_mx WHERE company_id = 1 and manager_id = 0 UNION SELECT ce.*, (h.level+1) FROM hierarchy h JOIN company_employees_mx ce ON (h.employee_id = ce.manager_id AND h.company_id = ce.company_id)) SELECT * FROM hierarchy WHERE LEVEL <= 2; ERROR: could not run distributed query with complex table expressions HINT: Consider using an equality filter on the distributed table's partition column. -- logically wrong query, query involves different shards -- from the same table, but still router plannable due to -- shard being placed on the same worker. WITH RECURSIVE hierarchy as ( SELECT *, 1 AS level FROM company_employees_mx WHERE company_id = 3 and manager_id = 0 UNION SELECT ce.*, (h.level+1) FROM hierarchy h JOIN company_employees_mx ce ON (h.employee_id = ce.manager_id AND h.company_id = ce.company_id AND ce.company_id = 2)) SELECT * FROM hierarchy WHERE LEVEL <= 2; ERROR: could not run distributed query with complex table expressions HINT: Consider using an equality filter on the distributed table's partition column. -- grouping sets are supported on single shard SELECT id, substring(title, 2, 1) AS subtitle, count(*) FROM articles_hash_mx WHERE author_id = 1 or author_id = 3 GROUP BY GROUPING SETS ((id),(subtitle)) ORDER BY id, subtitle; DEBUG: Creating router plan DEBUG: Plan is router executable id | subtitle | count ----+----------+------- 1 | | 1 3 | | 1 11 | | 1 13 | | 1 21 | | 1 23 | | 1 31 | | 1 33 | | 1 41 | | 1 43 | | 1 | b | 1 | f | 1 | l | 1 | r | 2 | s | 2 | t | 1 | u | 1 | z | 1 (18 rows) -- grouping sets are not supported on multiple shards SELECT id, substring(title, 2, 1) AS subtitle, count(*) FROM articles_hash_mx WHERE author_id = 1 or author_id = 2 GROUP BY GROUPING SETS ((id),(subtitle)) ORDER BY id, subtitle; ERROR: could not run distributed query with GROUPING SETS, CUBE, or ROLLUP HINT: Consider using an equality filter on the distributed table's partition column. -- queries which involve functions in FROM clause are supported if it goes to a single worker. SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count | position ----+-----------+--------------+------------+---------- 1 | 1 | arsenous | 9572 | 3 11 | 1 | alamo | 1347 | 3 21 | 1 | arcading | 5890 | 3 31 | 1 | athwartships | 7271 | 3 41 | 1 | aznavour | 11814 | 3 (5 rows) SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 3; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count | position ----+-----------+--------------+------------+---------- 1 | 1 | arsenous | 9572 | 3 3 | 3 | asternal | 10480 | 3 11 | 1 | alamo | 1347 | 3 13 | 3 | aseyev | 2255 | 3 21 | 1 | arcading | 5890 | 3 23 | 3 | abhorring | 6799 | 3 31 | 1 | athwartships | 7271 | 3 33 | 3 | autochrome | 8180 | 3 41 | 1 | aznavour | 11814 | 3 43 | 3 | affixal | 12723 | 3 (10 rows) -- they are not supported if multiple workers are involved SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 2; ERROR: could not run distributed query with complex table expressions HINT: Consider using an equality filter on the distributed table's partition column. -- subqueries are supported in FROM clause but they are not router plannable SELECT articles_hash_mx.id,test.word_count FROM articles_hash_mx, (SELECT id, word_count FROM articles_hash_mx) AS test WHERE test.id = articles_hash_mx.id ORDER BY articles_hash_mx.id; DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 2 DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 2 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 8 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 8 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 11 DEBUG: pruning merge fetch taskId 8 DETAIL: Creating dependency on merge taskId 11 DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 14 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 14 ERROR: cannot use real time executor with repartition jobs HINT: Set citus.task_executor_type to "task-tracker". SELECT articles_hash_mx.id,test.word_count FROM articles_hash_mx, (SELECT id, word_count FROM articles_hash_mx) AS test WHERE test.id = articles_hash_mx.id and articles_hash_mx.author_id = 1 ORDER BY articles_hash_mx.id; DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 2 DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 3 DEBUG: pruning merge fetch taskId 2 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 8 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 7 DEBUG: pruning merge fetch taskId 8 DETAIL: Creating dependency on merge taskId 11 DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 9 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 14 ERROR: cannot use real time executor with repartition jobs HINT: Set citus.task_executor_type to "task-tracker". -- subqueries are not supported in SELECT clause SELECT a.title AS name, (SELECT a2.id FROM articles_single_shard_hash_mx a2 WHERE a.id = a2.id LIMIT 1) AS special_price FROM articles_hash_mx a; ERROR: could not run distributed query with subquery outside the FROM and WHERE clauses HINT: Consider using an equality filter on the distributed table's partition column. -- simple lookup query SELECT * FROM articles_hash_mx WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- below query hits a single shard, router plannable SELECT * FROM articles_hash_mx WHERE author_id = 1 OR author_id = 17; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- below query hits two shards, not router plannable + not router executable -- handled by real-time executor SELECT * FROM articles_hash_mx WHERE author_id = 1 OR author_id = 18; id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- rename the output columns SELECT id as article_id, word_count * id as random_value FROM articles_hash_mx WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable article_id | random_value ------------+-------------- 1 | 9572 11 | 14817 21 | 123690 31 | 225401 41 | 484374 (5 rows) -- we can push down co-located joins to a single worker SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash_mx a, articles_hash_mx b WHERE a.author_id = 10 and a.author_id = b.author_id LIMIT 3; DEBUG: Creating router plan DEBUG: Plan is router executable first_author | second_word_count --------------+------------------- 10 | 17277 10 | 1820 10 | 6363 (3 rows) -- following join is router plannable since the same worker -- has both shards SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash_mx a, articles_single_shard_hash_mx b WHERE a.author_id = 10 and a.author_id = b.author_id LIMIT 3; DEBUG: Creating router plan DEBUG: Plan is router executable first_author | second_word_count --------------+------------------- 10 | 19519 10 | 19519 10 | 19519 (3 rows) -- following join is not router plannable since there are no -- workers containing both shards, added a CTE to make this fail -- at logical planner WITH single_shard as (SELECT * FROM articles_single_shard_hash_mx) SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash_mx a, single_shard b WHERE a.author_id = 2 and a.author_id = b.author_id LIMIT 3; DEBUG: Found no worker with all shard placements ERROR: could not run distributed query with complex table expressions -- single shard select with limit is router plannable SELECT * FROM articles_hash_mx WHERE author_id = 1 LIMIT 3; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+----------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 (3 rows) -- single shard select with limit + offset is router plannable SELECT * FROM articles_hash_mx WHERE author_id = 1 LIMIT 2 OFFSET 1; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+----------+------------ 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 (2 rows) -- single shard select with limit + offset + order by is router plannable SELECT * FROM articles_hash_mx WHERE author_id = 1 ORDER BY id desc LIMIT 2 OFFSET 1; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 31 | 1 | athwartships | 7271 21 | 1 | arcading | 5890 (2 rows) -- single shard select with group by on non-partition column is router plannable SELECT id FROM articles_hash_mx WHERE author_id = 1 GROUP BY id ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id ---- 1 11 21 31 41 (5 rows) -- single shard select with distinct is router plannable SELECT distinct id FROM articles_hash_mx WHERE author_id = 1 ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id ---- 1 11 21 31 41 (5 rows) -- single shard aggregate is router plannable SELECT avg(word_count) FROM articles_hash_mx WHERE author_id = 2; DEBUG: Creating router plan DEBUG: Plan is router executable avg -------------------- 12356.400000000000 (1 row) -- max, min, sum, count are router plannable on single shard SELECT max(word_count) as max, min(word_count) as min, sum(word_count) as sum, count(word_count) as cnt FROM articles_hash_mx WHERE author_id = 2; DEBUG: Creating router plan DEBUG: Plan is router executable max | min | sum | cnt -------+------+-------+----- 18185 | 2728 | 61782 | 5 (1 row) -- queries with aggregates and group by supported on single shard SELECT max(word_count) FROM articles_hash_mx WHERE author_id = 1 GROUP BY author_id; DEBUG: Creating router plan DEBUG: Plan is router executable max ------- 11814 (1 row) -- router plannable union queries are supported SELECT * FROM ( SELECT * FROM articles_hash_mx WHERE author_id = 1 UNION SELECT * FROM articles_hash_mx WHERE author_id = 3 ) AS combination ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 11 | 1 | alamo | 1347 13 | 3 | aseyev | 2255 21 | 1 | arcading | 5890 23 | 3 | abhorring | 6799 31 | 1 | athwartships | 7271 33 | 3 | autochrome | 8180 41 | 1 | aznavour | 11814 43 | 3 | affixal | 12723 (10 rows) (SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 1) UNION (SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 3); DEBUG: Creating router plan DEBUG: Plan is router executable left ------ a (1 row) (SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 1) INTERSECT (SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 3); DEBUG: Creating router plan DEBUG: Plan is router executable left ------ a (1 row) SELECT * FROM ( SELECT LEFT(title, 2) FROM articles_hash_mx WHERE author_id = 1 EXCEPT SELECT LEFT(title, 2) FROM articles_hash_mx WHERE author_id = 3 ) AS combination ORDER BY 1; DEBUG: Creating router plan DEBUG: Plan is router executable left ------ al ar at az (4 rows) -- union queries are not supported if not router plannable -- there is an inconsistency on shard pruning between -- ubuntu/mac disabling log messages for this queries only SET client_min_messages to 'NOTICE'; (SELECT * FROM articles_hash_mx WHERE author_id = 1) UNION (SELECT * FROM articles_hash_mx WHERE author_id = 2); ERROR: could not run distributed query with UNION, INTERSECT, or EXCEPT HINT: Consider using an equality filter on the distributed table's partition column. SELECT * FROM ( (SELECT * FROM articles_hash_mx WHERE author_id = 1) UNION (SELECT * FROM articles_hash_mx WHERE author_id = 2)) uu; ERROR: cannot push down this subquery DETAIL: Currently all leaf queries need to have same filters on partition column -- error out for queries with repartition jobs SELECT * FROM articles_hash_mx a, articles_hash_mx b WHERE a.id = b.id AND a.author_id = 1; ERROR: cannot use real time executor with repartition jobs HINT: Set citus.task_executor_type to "task-tracker". -- queries which hit more than 1 shards are not router plannable or executable -- handled by real-time executor SELECT * FROM articles_hash_mx WHERE author_id >= 1 AND author_id <= 3; id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 11 | 1 | alamo | 1347 13 | 3 | aseyev | 2255 21 | 1 | arcading | 5890 23 | 3 | abhorring | 6799 31 | 1 | athwartships | 7271 33 | 3 | autochrome | 8180 41 | 1 | aznavour | 11814 43 | 3 | affixal | 12723 2 | 2 | abducing | 13642 12 | 2 | archiblast | 18185 22 | 2 | antipope | 2728 32 | 2 | amazon | 11342 42 | 2 | ausable | 15885 (15 rows) SET citus.task_executor_type TO 'real-time'; -- Test various filtering options for router plannable check SET client_min_messages to 'DEBUG2'; -- this is definitely single shard -- and router plannable SELECT * FROM articles_hash_mx WHERE author_id = 1 and author_id >= 1; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- not router plannable due to or SELECT * FROM articles_hash_mx WHERE author_id = 1 or id = 1; id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- router plannable SELECT * FROM articles_hash_mx WHERE author_id = 1 and (id = 1 or id = 41); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+----------+------------ 1 | 1 | arsenous | 9572 41 | 1 | aznavour | 11814 (2 rows) -- router plannable SELECT * FROM articles_hash_mx WHERE author_id = 1 and (id = random()::int * 0); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+-------+------------ (0 rows) -- not router plannable due to function call on the right side SELECT * FROM articles_hash_mx WHERE author_id = (random()::int * 0 + 1); id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- not router plannable due to or SELECT * FROM articles_hash_mx WHERE author_id = 1 or id = 1; id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- router plannable due to abs(-1) getting converted to 1 by postgresql SELECT * FROM articles_hash_mx WHERE author_id = abs(-1); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- not router plannable due to abs() function SELECT * FROM articles_hash_mx WHERE 1 = abs(author_id); id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- not router plannable due to abs() function SELECT * FROM articles_hash_mx WHERE author_id = abs(author_id - 2); id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- router plannable, function on different field SELECT * FROM articles_hash_mx WHERE author_id = 1 and (id = abs(id - 2)); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+----------+------------ 1 | 1 | arsenous | 9572 (1 row) -- not router plannable due to is true SELECT * FROM articles_hash_mx WHERE (author_id = 1) is true; id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- router plannable, (boolean expression) = true is collapsed to (boolean expression) SELECT * FROM articles_hash_mx WHERE (author_id = 1) = true; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- router plannable, between operator is on another column SELECT * FROM articles_hash_mx WHERE (author_id = 1) and id between 0 and 20; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+----------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 (2 rows) -- router plannable, partition column expression is and'ed to rest SELECT * FROM articles_hash_mx WHERE (author_id = 1) and (id = 1 or id = 31) and title like '%s'; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 (2 rows) -- router plannable, order is changed SELECT * FROM articles_hash_mx WHERE (id = 1 or id = 31) and title like '%s' and (author_id = 1); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 (2 rows) -- router plannable SELECT * FROM articles_hash_mx WHERE (title like '%s' or title like 'a%') and (author_id = 1); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- router plannable SELECT * FROM articles_hash_mx WHERE (title like '%s' or title like 'a%') and (author_id = 1) and (word_count < 3000 or word_count > 8000); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+----------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 41 | 1 | aznavour | 11814 (3 rows) -- window functions are supported if query is router plannable SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count FROM articles_hash_mx WHERE author_id = 5; DEBUG: Creating router plan DEBUG: Plan is router executable prev | title | word_count ----------+----------+------------ | afrasia | 864 afrasia | adversa | 3164 adversa | antehall | 7707 antehall | aminate | 9089 aminate | aruru | 11389 (5 rows) SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count FROM articles_hash_mx WHERE author_id = 5 ORDER BY word_count DESC; DEBUG: Creating router plan DEBUG: Plan is router executable prev | title | word_count ----------+----------+------------ aminate | aruru | 11389 antehall | aminate | 9089 adversa | antehall | 7707 afrasia | adversa | 3164 | afrasia | 864 (5 rows) SELECT id, MIN(id) over (order by word_count) FROM articles_hash_mx WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable id | min ----+----- 11 | 11 21 | 11 31 | 11 1 | 1 41 | 1 (5 rows) SELECT id, word_count, AVG(word_count) over (order by word_count) FROM articles_hash_mx WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable id | word_count | avg ----+------------+----------------------- 11 | 1347 | 1347.0000000000000000 21 | 5890 | 3618.5000000000000000 31 | 7271 | 4836.0000000000000000 1 | 9572 | 6020.0000000000000000 41 | 11814 | 7178.8000000000000000 (5 rows) SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count) FROM articles_hash_mx WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable word_count | rank ------------+------ 1347 | 1 5890 | 2 7271 | 3 9572 | 4 11814 | 5 (5 rows) -- window functions are not supported for not router plannable queries SELECT id, MIN(id) over (order by word_count) FROM articles_hash_mx WHERE author_id = 1 or author_id = 2; ERROR: could not run distributed query with window functions HINT: Consider using an equality filter on the distributed table's partition column. SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count FROM articles_hash_mx WHERE author_id = 5 or author_id = 2; ERROR: could not run distributed query with window functions HINT: Consider using an equality filter on the distributed table's partition column. -- complex query hitting a single shard SELECT count(DISTINCT CASE WHEN word_count > 100 THEN id ELSE NULL END) as c FROM articles_hash_mx WHERE author_id = 5; DEBUG: Creating router plan DEBUG: Plan is router executable c --- 5 (1 row) -- same query is not router plannable if hits multiple shards SELECT count(DISTINCT CASE WHEN word_count > 100 THEN id ELSE NULL END) as c FROM articles_hash_mx GROUP BY author_id; c --- 4 5 5 5 5 5 5 5 5 5 (10 rows) -- queries inside transactions can be router plannable BEGIN; SELECT * FROM articles_hash_mx WHERE author_id = 1 ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) END; -- cursor queries are router plannable BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM articles_hash_mx WHERE author_id = 1 ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable FETCH test_cursor; id | author_id | title | word_count ----+-----------+----------+------------ 1 | 1 | arsenous | 9572 (1 row) FETCH test_cursor; id | author_id | title | word_count ----+-----------+-------+------------ 11 | 1 | alamo | 1347 (1 row) FETCH BACKWARD test_cursor; id | author_id | title | word_count ----+-----------+----------+------------ 1 | 1 | arsenous | 9572 (1 row) END; -- queries inside copy can be router plannable COPY ( SELECT * FROM articles_hash_mx WHERE author_id = 1 ORDER BY id) TO STDOUT; DEBUG: Creating router plan DEBUG: Plan is router executable 1 1 arsenous 9572 11 1 alamo 1347 21 1 arcading 5890 31 1 athwartships 7271 41 1 aznavour 11814 -- table creation queries inside can be router plannable CREATE TEMP TABLE temp_articles_hash_mx as SELECT * FROM articles_hash_mx WHERE author_id = 1 ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable -- router plannable queries may include filter for aggragates SELECT count(*), count(*) FILTER (WHERE id < 3) FROM articles_hash_mx WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable count | count -------+------- 5 | 1 (1 row) -- non-router plannable queries support filters as well SELECT count(*), count(*) FILTER (WHERE id < 3) FROM articles_hash_mx WHERE author_id = 1 or author_id = 2; count | count -------+------- 10 | 2 (1 row) -- prepare queries can be router plannable PREPARE author_1_articles as SELECT * FROM articles_hash_mx WHERE author_id = 1; EXECUTE author_1_articles; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- parametric prepare queries can be router plannable PREPARE author_articles(int) as SELECT * FROM articles_hash_mx WHERE author_id = $1; EXECUTE author_articles(1); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- queries inside plpgsql functions could be router plannable CREATE OR REPLACE FUNCTION author_articles_max_id() RETURNS int AS $$ DECLARE max_id integer; BEGIN SELECT MAX(id) FROM articles_hash_mx ah WHERE author_id = 1 into max_id; return max_id; END; $$ LANGUAGE plpgsql; SELECT author_articles_max_id(); DEBUG: Creating router plan CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash_mx ah WHERE author_id = 1" PL/pgSQL function author_articles_max_id() line 5 at SQL statement DEBUG: Plan is router executable CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash_mx ah WHERE author_id = 1" PL/pgSQL function author_articles_max_id() line 5 at SQL statement author_articles_max_id ------------------------ 41 (1 row) -- plpgsql function that return query results are not router plannable CREATE OR REPLACE FUNCTION author_articles_id_word_count() RETURNS TABLE(id bigint, word_count int) AS $$ DECLARE BEGIN RETURN QUERY SELECT ah.id, ah.word_count FROM articles_hash_mx ah WHERE author_id = 1; END; $$ LANGUAGE plpgsql; SELECT * FROM author_articles_id_word_count(); DEBUG: Creating router plan CONTEXT: SQL statement "SELECT ah.id, ah.word_count FROM articles_hash_mx ah WHERE author_id = 1" PL/pgSQL function author_articles_id_word_count() line 4 at RETURN QUERY DEBUG: Plan is router executable CONTEXT: SQL statement "SELECT ah.id, ah.word_count FROM articles_hash_mx ah WHERE author_id = 1" PL/pgSQL function author_articles_id_word_count() line 4 at RETURN QUERY id | word_count ----+------------ 1 | 9572 11 | 1347 21 | 5890 31 | 7271 41 | 11814 (5 rows) -- materialized views can be created for router plannable queries CREATE MATERIALIZED VIEW mv_articles_hash_mx AS SELECT * FROM articles_hash_mx WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable SELECT * FROM mv_articles_hash_mx; id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) SET client_min_messages to 'INFO'; DROP MATERIALIZED VIEW mv_articles_hash_mx; SET client_min_messages to 'DEBUG2'; CREATE MATERIALIZED VIEW mv_articles_hash_mx_error AS SELECT * FROM articles_hash_mx WHERE author_id in (1,2); -- router planner/executor is disabled for task-tracker executor -- following query is router plannable, but router planner is disabled -- TODO: Uncomment once we fix task-tracker issue --SET citus.task_executor_type to 'task-tracker'; --SELECT id -- FROM articles_hash_mx -- WHERE author_id = 1; -- insert query is router plannable even under task-tracker INSERT INTO articles_hash_mx VALUES (51, 1, 'amateus', 1814); DEBUG: Creating router plan DEBUG: Plan is router executable -- verify insert is successfull (not router plannable and executable) SELECT id FROM articles_hash_mx WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable id ---- 1 11 21 31 41 51 (6 rows) citus-7.0.3/src/test/regress/expected/multi_mx_schema_support.out000066400000000000000000000410021317107136600253760ustar00rootroot00000000000000-- -- MULTI_MX_SCHEMA_SUPPORT -- -- connect to a worker node and run some queries \c - - - :worker_1_port -- test very basic queries SELECT * FROM nation_hash ORDER BY n_nationkey LIMIT 4; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold (4 rows) SELECT * FROM citus_mx_test_schema.nation_hash ORDER BY n_nationkey LIMIT 4; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold (4 rows) -- test cursors SET search_path TO public; BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM nation_hash WHERE n_nationkey = 1; FETCH test_cursor; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH test_cursor; n_nationkey | n_name | n_regionkey | n_comment -------------+--------+-------------+----------- (0 rows) FETCH BACKWARD test_cursor; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) END; -- test with search_path is set SET search_path TO citus_mx_test_schema; BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM nation_hash WHERE n_nationkey = 1; FETCH test_cursor; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH test_cursor; n_nationkey | n_name | n_regionkey | n_comment -------------+--------+-------------+----------- (0 rows) FETCH BACKWARD test_cursor; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) END; -- test inserting to table in different schema SET search_path TO public; INSERT INTO citus_mx_test_schema.nation_hash(n_nationkey, n_name, n_regionkey) VALUES (100, 'TURKEY', 3); -- verify insertion SELECT * FROM citus_mx_test_schema.nation_hash WHERE n_nationkey = 100; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+----------- 100 | TURKEY | 3 | (1 row) -- test with search_path is set SET search_path TO citus_mx_test_schema; INSERT INTO nation_hash(n_nationkey, n_name, n_regionkey) VALUES (101, 'GERMANY', 3); -- verify insertion SELECT * FROM nation_hash WHERE n_nationkey = 101; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+----------- 101 | GERMANY | 3 | (1 row) -- TODO: add UPDATE/DELETE/UPSERT -- test UDFs with schemas SET search_path TO public; -- UDF in public, table in a schema other than public, search_path is not set SELECT simpleTestFunction(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5; simpletestfunction -------------------- 152 151 37 35 34 (5 rows) -- UDF in public, table in a schema other than public, search_path is set SET search_path TO citus_mx_test_schema; SELECT public.simpleTestFunction(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5; simpletestfunction -------------------- 152 151 37 35 34 (5 rows) -- UDF in schema, table in a schema other than public, search_path is not set SET search_path TO public; SELECT citus_mx_test_schema.simpleTestFunction2(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5; simpletestfunction2 --------------------- 152 151 37 35 34 (5 rows) -- UDF in schema, table in a schema other than public, search_path is set SET search_path TO citus_mx_test_schema; SELECT simpleTestFunction2(n_nationkey)::int FROM nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5; simpletestfunction2 --------------------- 152 151 37 35 34 (5 rows) -- test operators with schema SET search_path TO public; -- test with search_path is not set SELECT * FROM citus_mx_test_schema.nation_hash WHERE n_nationkey OPERATOR(citus_mx_test_schema.===) 1; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) -- test with search_path is set SET search_path TO citus_mx_test_schema; SELECT * FROM nation_hash WHERE n_nationkey OPERATOR(===) 1; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) SELECT * FROM citus_mx_test_schema.nation_hash_collation_search_path; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon 5 | ETHIOPIA | 0 | ven packages wake quickly. regu 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4 | EGYPT | 4 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special (6 rows) SELECT n_comment FROM citus_mx_test_schema.nation_hash_collation_search_path ORDER BY n_comment COLLATE citus_mx_test_schema.english; n_comment ------------------------------------------------------------------------------------------------------------- al foxes promise slyly according to the regular accounts. bold requests alon eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold haggle. carefully final deposits detect slyly agai ven packages wake quickly. regu y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special (6 rows) SET search_path TO citus_mx_test_schema; SELECT * FROM nation_hash_collation_search_path ORDER BY 1 DESC; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------- 5 | ETHIOPIA | 0 | ven packages wake quickly. regu 4 | EGYPT | 4 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai (6 rows) SELECT n_comment FROM nation_hash_collation_search_path ORDER BY n_comment COLLATE english; n_comment ------------------------------------------------------------------------------------------------------------- al foxes promise slyly according to the regular accounts. bold requests alon eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold haggle. carefully final deposits detect slyly agai ven packages wake quickly. regu y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special (6 rows) SELECT * FROM citus_mx_test_schema.nation_hash_composite_types WHERE test_col = '(a,a)'::citus_mx_test_schema.new_composite_type ORDER BY 1::int DESC; n_nationkey | n_name | n_regionkey | n_comment | test_col -------------+---------------------------+-------------+----------------------------------------------------+---------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai | (a,a) (1 row) --test with search_path is set SET search_path TO citus_mx_test_schema; SELECT * FROM nation_hash_composite_types WHERE test_col = '(a,a)'::new_composite_type ORDER BY 1::int DESC; n_nationkey | n_name | n_regionkey | n_comment | test_col -------------+---------------------------+-------------+----------------------------------------------------+---------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai | (a,a) (1 row) -- check when search_path is public, -- join of two tables which are in different schemas, -- join on partition column SET search_path TO public; SELECT count (*) FROM citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_nationkey; count ------- 25 (1 row) -- check when search_path is different than public, -- join of two tables which are in different schemas, -- join on partition column SET search_path TO citus_mx_test_schema_join_1; SELECT count (*) FROM nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_nationkey; count ------- 25 (1 row) -- check when search_path is public, -- join of two tables which are in same schemas, -- join on partition column SET search_path TO public; SELECT count (*) FROM citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_1.nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_nationkey; count ------- 25 (1 row) -- check when search_path is different than public, -- join of two tables which are in same schemas, -- join on partition column SET search_path TO citus_mx_test_schema_join_1; SELECT count (*) FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_nationkey; count ------- 25 (1 row) -- single repartition joins SET citus.task_executor_type TO "task-tracker"; -- check when search_path is public, -- join of two tables which are in different schemas, -- join on partition column and non-partition column --SET search_path TO public; SELECT count (*) FROM citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_regionkey; count ------- 25 (1 row) -- check when search_path is different than public, -- join of two tables which are in different schemas, -- join on partition column and non-partition column SET search_path TO citus_mx_test_schema_join_1; SELECT count (*) FROM nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_regionkey; count ------- 25 (1 row) -- check when search_path is different than public, -- join of two tables which are in same schemas, -- join on partition column and non-partition column SET search_path TO citus_mx_test_schema_join_1; SELECT count (*) FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_regionkey; count ------- 25 (1 row) -- hash repartition joins -- check when search_path is public, -- join of two tables which are in different schemas, -- join on non-partition column SET search_path TO public; SELECT count (*) FROM citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_regionkey = n2.n_regionkey; count ------- 125 (1 row) -- check when search_path is different than public, -- join of two tables which are in different schemas, -- join on non-partition column SET search_path TO citus_mx_test_schema_join_1; SELECT count (*) FROM nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_regionkey = n2.n_regionkey; count ------- 125 (1 row) -- check when search_path is different than public, -- join of two tables which are in same schemas, -- join on non-partition column SET search_path TO citus_mx_test_schema_join_1; SELECT count (*) FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_regionkey = n2.n_regionkey; count ------- 125 (1 row) -- set task_executor back to real-time SET citus.task_executor_type TO "real-time"; citus-7.0.3/src/test/regress/expected/multi_mx_tpch_query1.out000066400000000000000000000124531317107136600246160ustar00rootroot00000000000000-- -- MULTI_MX_TPCH_QUERY1 -- -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #1 from the TPC-H decision support benchmark SELECT l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order FROM lineitem_mx WHERE l_shipdate <= date '1998-12-01' - interval '90 days' GROUP BY l_returnflag, l_linestatus ORDER BY l_returnflag, l_linestatus; l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order --------------+--------------+-----------+----------------+----------------+------------------+---------------------+--------------------+------------------------+------------- A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 N | O | 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883 R | F | 73156.00 | 108937979.73 | 103516623.6698 | 107743533.784328 | 25.2175112030334367 | 37551.871675284385 | 0.04983798690106859704 | 2901 (4 rows) -- connect one of the workers \c - - - :worker_1_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #1 from the TPC-H decision support benchmark SELECT l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order FROM lineitem_mx WHERE l_shipdate <= date '1998-12-01' - interval '90 days' GROUP BY l_returnflag, l_linestatus ORDER BY l_returnflag, l_linestatus; l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order --------------+--------------+-----------+----------------+----------------+------------------+---------------------+--------------------+------------------------+------------- A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 N | O | 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883 R | F | 73156.00 | 108937979.73 | 103516623.6698 | 107743533.784328 | 25.2175112030334367 | 37551.871675284385 | 0.04983798690106859704 | 2901 (4 rows) -- connect to the other node \c - - - :worker_2_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #1 from the TPC-H decision support benchmark SELECT l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order FROM lineitem_mx WHERE l_shipdate <= date '1998-12-01' - interval '90 days' GROUP BY l_returnflag, l_linestatus ORDER BY l_returnflag, l_linestatus; l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order --------------+--------------+-----------+----------------+----------------+------------------+---------------------+--------------------+------------------------+------------- A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 N | O | 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883 R | F | 73156.00 | 108937979.73 | 103516623.6698 | 107743533.784328 | 25.2175112030334367 | 37551.871675284385 | 0.04983798690106859704 | 2901 (4 rows) citus-7.0.3/src/test/regress/expected/multi_mx_tpch_query10.out000066400000000000000000000400151317107136600246710ustar00rootroot00000000000000-- -- MULTI_MX_TPCH_QUERY10 -- -- Query #10 from the TPC-H decision support benchmark. Unlike other TPC-H tests, -- we don't set citus.large_table_shard_count here, and instead use the default value -- coming from postgresql.conf or multi_task_tracker_executor.conf. -- connect to master \c - - - :master_port SELECT c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment FROM customer_mx, orders_mx, lineitem_mx, nation_mx WHERE c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate >= date '1993-10-01' AND o_orderdate < date '1993-10-01' + interval '3' month AND l_returnflag = 'R' AND c_nationkey = n_nationkey GROUP BY c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment ORDER BY revenue DESC LIMIT 20; c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment -----------+--------------------+-------------+-----------+---------------------------+---------------------------------------+-----------------+--------------------------------------------------------------------------------------------------------------------- 436 | Customer#000000436 | 255187.7382 | 5896.87 | ROMANIA | 4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z | 29-927-687-6390 | olites engage carefully. slyly ironic asymptotes about the ironi 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole 223 | Customer#000000223 | 218652.8040 | 7476.20 | SAUDI ARABIA | ftau6Pk,brboMyEl,,kFm | 30-193-643-1517 | al, regular requests run furiously blithely silent packages. blithely ironic accounts across the furious 613 | Customer#000000613 | 186092.2017 | 6679.75 | EGYPT | AJT,26RbanTdEHOBgTWg | 14-275-416-1669 | ironic, pending deposits: quickl 355 | Customer#000000355 | 168184.4825 | 8727.90 | KENYA | 205r3Xg9ZWjPZNX1z | 24-656-787-6091 | ly bold requests detect furiously. unusual instructions sleep aft 872 | Customer#000000872 | 166831.7294 | -858.61 | PERU | vLP7iNZBK4B,HANFTKabVI3AO Y9O8H | 27-357-139-7164 | detect. packages wake slyly express foxes. even deposits ru 805 | Customer#000000805 | 165239.8440 | 511.69 | IRAN | wCKx5zcHvwpSffyc9qfi9dvqcm9LT,cLAG | 20-732-989-5653 | busy sentiments. pending packages haggle among the express requests-- slyly regular excuses above the slyl 427 | Customer#000000427 | 148033.5226 | 4376.80 | BRAZIL | LHzXa71U2AGqfbqj1yYYqw2MEXq99dWmY | 12-124-309-3821 | y even multipliers according to the regu 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily 679 | Customer#000000679 | 145188.0664 | 1394.44 | IRAN | IJf1FlZL9I9m,rvofcoKy5pRUOjUQV | 20-146-696-9508 | ely pending frays boost carefully 160 | Customer#000000160 | 138511.7370 | 4363.17 | JORDAN | 5soVQ3dOCRBWBS | 23-428-666-4806 | olites. silently ironic accounts cajole furious 883 | Customer#000000883 | 128224.1349 | 479.96 | CANADA | qVQ8rWNU5KZYDcS | 13-526-239-6950 | uctions are carefully across the regular, regular asymptote 101 | Customer#000000101 | 124996.0120 | 7470.96 | BRAZIL | sMmL2rNeHDltovSm Y | 12-514-298-3699 | sleep. pending packages detect slyly ironic pack 671 | Customer#000000671 | 124125.2191 | 3227.87 | VIETNAM | ic6qGrt0giB,HDEiBK,,FYGHXQpc | 31-593-213-9388 | bold ideas above the ironic packages affix blithely about the furiou 526 | Customer#000000526 | 120324.0048 | 705.93 | ARGENTINA | 0oAVPhh1I4JdrDafVG2Z8 | 11-170-679-3115 | ctions cajole after the furiously unusual ideas. ironic packages among the instructions are carefully carefully iro 367 | Customer#000000367 | 118572.6180 | 9108.65 | JORDAN | yZaDoEZCqt2VMTVKoZUkf6gJ4yj | 23-939-319-4691 | eodolites under the ironic, stealthy requests affix furiously among the unusual tit 745 | Customer#000000745 | 113738.6908 | 7115.14 | CHINA | vjuHvDKdaomsivy l | 28-913-438-9403 | o beans. bold, regular theodolites haggle carefully about the quickl 118 | Customer#000000118 | 113149.7832 | 3582.37 | CHINA | OVnFuHygK9wx3xpg8 | 28-639-943-7051 | uick packages alongside of the furiously final deposits haggle above the fluffily even foxes. blithely dogged dep 50 | Customer#000000050 | 111600.5870 | 4266.13 | FRANCE | 9SzDYlkzxByyJ1QeTI o | 16-658-112-3221 | ts. furiously ironic accounts cajole furiously slyly ironic dinos. (20 rows) -- connect one of the workers \c - - - :worker_1_port SELECT c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment FROM customer_mx, orders_mx, lineitem_mx, nation_mx WHERE c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate >= date '1993-10-01' AND o_orderdate < date '1993-10-01' + interval '3' month AND l_returnflag = 'R' AND c_nationkey = n_nationkey GROUP BY c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment ORDER BY revenue DESC LIMIT 20; c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment -----------+--------------------+-------------+-----------+---------------------------+---------------------------------------+-----------------+--------------------------------------------------------------------------------------------------------------------- 436 | Customer#000000436 | 255187.7382 | 5896.87 | ROMANIA | 4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z | 29-927-687-6390 | olites engage carefully. slyly ironic asymptotes about the ironi 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole 223 | Customer#000000223 | 218652.8040 | 7476.20 | SAUDI ARABIA | ftau6Pk,brboMyEl,,kFm | 30-193-643-1517 | al, regular requests run furiously blithely silent packages. blithely ironic accounts across the furious 613 | Customer#000000613 | 186092.2017 | 6679.75 | EGYPT | AJT,26RbanTdEHOBgTWg | 14-275-416-1669 | ironic, pending deposits: quickl 355 | Customer#000000355 | 168184.4825 | 8727.90 | KENYA | 205r3Xg9ZWjPZNX1z | 24-656-787-6091 | ly bold requests detect furiously. unusual instructions sleep aft 872 | Customer#000000872 | 166831.7294 | -858.61 | PERU | vLP7iNZBK4B,HANFTKabVI3AO Y9O8H | 27-357-139-7164 | detect. packages wake slyly express foxes. even deposits ru 805 | Customer#000000805 | 165239.8440 | 511.69 | IRAN | wCKx5zcHvwpSffyc9qfi9dvqcm9LT,cLAG | 20-732-989-5653 | busy sentiments. pending packages haggle among the express requests-- slyly regular excuses above the slyl 427 | Customer#000000427 | 148033.5226 | 4376.80 | BRAZIL | LHzXa71U2AGqfbqj1yYYqw2MEXq99dWmY | 12-124-309-3821 | y even multipliers according to the regu 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily 679 | Customer#000000679 | 145188.0664 | 1394.44 | IRAN | IJf1FlZL9I9m,rvofcoKy5pRUOjUQV | 20-146-696-9508 | ely pending frays boost carefully 160 | Customer#000000160 | 138511.7370 | 4363.17 | JORDAN | 5soVQ3dOCRBWBS | 23-428-666-4806 | olites. silently ironic accounts cajole furious 883 | Customer#000000883 | 128224.1349 | 479.96 | CANADA | qVQ8rWNU5KZYDcS | 13-526-239-6950 | uctions are carefully across the regular, regular asymptote 101 | Customer#000000101 | 124996.0120 | 7470.96 | BRAZIL | sMmL2rNeHDltovSm Y | 12-514-298-3699 | sleep. pending packages detect slyly ironic pack 671 | Customer#000000671 | 124125.2191 | 3227.87 | VIETNAM | ic6qGrt0giB,HDEiBK,,FYGHXQpc | 31-593-213-9388 | bold ideas above the ironic packages affix blithely about the furiou 526 | Customer#000000526 | 120324.0048 | 705.93 | ARGENTINA | 0oAVPhh1I4JdrDafVG2Z8 | 11-170-679-3115 | ctions cajole after the furiously unusual ideas. ironic packages among the instructions are carefully carefully iro 367 | Customer#000000367 | 118572.6180 | 9108.65 | JORDAN | yZaDoEZCqt2VMTVKoZUkf6gJ4yj | 23-939-319-4691 | eodolites under the ironic, stealthy requests affix furiously among the unusual tit 745 | Customer#000000745 | 113738.6908 | 7115.14 | CHINA | vjuHvDKdaomsivy l | 28-913-438-9403 | o beans. bold, regular theodolites haggle carefully about the quickl 118 | Customer#000000118 | 113149.7832 | 3582.37 | CHINA | OVnFuHygK9wx3xpg8 | 28-639-943-7051 | uick packages alongside of the furiously final deposits haggle above the fluffily even foxes. blithely dogged dep 50 | Customer#000000050 | 111600.5870 | 4266.13 | FRANCE | 9SzDYlkzxByyJ1QeTI o | 16-658-112-3221 | ts. furiously ironic accounts cajole furiously slyly ironic dinos. (20 rows) -- connect to the other worker \c - - - :worker_2_port SELECT c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment FROM customer_mx, orders_mx, lineitem_mx, nation_mx WHERE c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate >= date '1993-10-01' AND o_orderdate < date '1993-10-01' + interval '3' month AND l_returnflag = 'R' AND c_nationkey = n_nationkey GROUP BY c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment ORDER BY revenue DESC LIMIT 20; c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment -----------+--------------------+-------------+-----------+---------------------------+---------------------------------------+-----------------+--------------------------------------------------------------------------------------------------------------------- 436 | Customer#000000436 | 255187.7382 | 5896.87 | ROMANIA | 4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z | 29-927-687-6390 | olites engage carefully. slyly ironic asymptotes about the ironi 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole 223 | Customer#000000223 | 218652.8040 | 7476.20 | SAUDI ARABIA | ftau6Pk,brboMyEl,,kFm | 30-193-643-1517 | al, regular requests run furiously blithely silent packages. blithely ironic accounts across the furious 613 | Customer#000000613 | 186092.2017 | 6679.75 | EGYPT | AJT,26RbanTdEHOBgTWg | 14-275-416-1669 | ironic, pending deposits: quickl 355 | Customer#000000355 | 168184.4825 | 8727.90 | KENYA | 205r3Xg9ZWjPZNX1z | 24-656-787-6091 | ly bold requests detect furiously. unusual instructions sleep aft 872 | Customer#000000872 | 166831.7294 | -858.61 | PERU | vLP7iNZBK4B,HANFTKabVI3AO Y9O8H | 27-357-139-7164 | detect. packages wake slyly express foxes. even deposits ru 805 | Customer#000000805 | 165239.8440 | 511.69 | IRAN | wCKx5zcHvwpSffyc9qfi9dvqcm9LT,cLAG | 20-732-989-5653 | busy sentiments. pending packages haggle among the express requests-- slyly regular excuses above the slyl 427 | Customer#000000427 | 148033.5226 | 4376.80 | BRAZIL | LHzXa71U2AGqfbqj1yYYqw2MEXq99dWmY | 12-124-309-3821 | y even multipliers according to the regu 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily 679 | Customer#000000679 | 145188.0664 | 1394.44 | IRAN | IJf1FlZL9I9m,rvofcoKy5pRUOjUQV | 20-146-696-9508 | ely pending frays boost carefully 160 | Customer#000000160 | 138511.7370 | 4363.17 | JORDAN | 5soVQ3dOCRBWBS | 23-428-666-4806 | olites. silently ironic accounts cajole furious 883 | Customer#000000883 | 128224.1349 | 479.96 | CANADA | qVQ8rWNU5KZYDcS | 13-526-239-6950 | uctions are carefully across the regular, regular asymptote 101 | Customer#000000101 | 124996.0120 | 7470.96 | BRAZIL | sMmL2rNeHDltovSm Y | 12-514-298-3699 | sleep. pending packages detect slyly ironic pack 671 | Customer#000000671 | 124125.2191 | 3227.87 | VIETNAM | ic6qGrt0giB,HDEiBK,,FYGHXQpc | 31-593-213-9388 | bold ideas above the ironic packages affix blithely about the furiou 526 | Customer#000000526 | 120324.0048 | 705.93 | ARGENTINA | 0oAVPhh1I4JdrDafVG2Z8 | 11-170-679-3115 | ctions cajole after the furiously unusual ideas. ironic packages among the instructions are carefully carefully iro 367 | Customer#000000367 | 118572.6180 | 9108.65 | JORDAN | yZaDoEZCqt2VMTVKoZUkf6gJ4yj | 23-939-319-4691 | eodolites under the ironic, stealthy requests affix furiously among the unusual tit 745 | Customer#000000745 | 113738.6908 | 7115.14 | CHINA | vjuHvDKdaomsivy l | 28-913-438-9403 | o beans. bold, regular theodolites haggle carefully about the quickl 118 | Customer#000000118 | 113149.7832 | 3582.37 | CHINA | OVnFuHygK9wx3xpg8 | 28-639-943-7051 | uick packages alongside of the furiously final deposits haggle above the fluffily even foxes. blithely dogged dep 50 | Customer#000000050 | 111600.5870 | 4266.13 | FRANCE | 9SzDYlkzxByyJ1QeTI o | 16-658-112-3221 | ts. furiously ironic accounts cajole furiously slyly ironic dinos. (20 rows) citus-7.0.3/src/test/regress/expected/multi_mx_tpch_query12.out000066400000000000000000000057031317107136600247000ustar00rootroot00000000000000-- -- MULTI_MX_TPCH_QUERY12 -- -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #12 from the TPC-H decision support benchmark SELECT l_shipmode, sum(case when o_orderpriority = '1-URGENT' OR o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority <> '1-URGENT' AND o_orderpriority <> '2-HIGH' then 1 else 0 end) AS low_line_count FROM orders_mx, lineitem_mx WHERE o_orderkey = l_orderkey AND l_shipmode in ('MAIL', 'SHIP') AND l_commitdate < l_receiptdate AND l_shipdate < l_commitdate AND l_receiptdate >= date '1994-01-01' AND l_receiptdate < date '1994-01-01' + interval '1' year GROUP BY l_shipmode ORDER BY l_shipmode; l_shipmode | high_line_count | low_line_count ------------+-----------------+---------------- MAIL | 11 | 15 SHIP | 11 | 19 (2 rows) -- connect one of the workers \c - - - :worker_1_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #12 from the TPC-H decision support benchmark SELECT l_shipmode, sum(case when o_orderpriority = '1-URGENT' OR o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority <> '1-URGENT' AND o_orderpriority <> '2-HIGH' then 1 else 0 end) AS low_line_count FROM orders_mx, lineitem_mx WHERE o_orderkey = l_orderkey AND l_shipmode in ('MAIL', 'SHIP') AND l_commitdate < l_receiptdate AND l_shipdate < l_commitdate AND l_receiptdate >= date '1994-01-01' AND l_receiptdate < date '1994-01-01' + interval '1' year GROUP BY l_shipmode ORDER BY l_shipmode; l_shipmode | high_line_count | low_line_count ------------+-----------------+---------------- MAIL | 11 | 15 SHIP | 11 | 19 (2 rows) -- connect to the other worker node \c - - - :worker_2_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #12 from the TPC-H decision support benchmark SELECT l_shipmode, sum(case when o_orderpriority = '1-URGENT' OR o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority <> '1-URGENT' AND o_orderpriority <> '2-HIGH' then 1 else 0 end) AS low_line_count FROM orders_mx, lineitem_mx WHERE o_orderkey = l_orderkey AND l_shipmode in ('MAIL', 'SHIP') AND l_commitdate < l_receiptdate AND l_shipdate < l_commitdate AND l_receiptdate >= date '1994-01-01' AND l_receiptdate < date '1994-01-01' + interval '1' year GROUP BY l_shipmode ORDER BY l_shipmode; l_shipmode | high_line_count | low_line_count ------------+-----------------+---------------- MAIL | 11 | 15 SHIP | 11 | 19 (2 rows) citus-7.0.3/src/test/regress/expected/multi_mx_tpch_query14.out000066400000000000000000000035471317107136600247060ustar00rootroot00000000000000-- -- MULTI_MX_TPCH_QUERY14 -- -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #14 from the TPC-H decision support benchmark SELECT 100.00 * sum(case when p_type like 'PROMO%' then l_extendedprice * (1 - l_discount) else 0 end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue FROM lineitem_mx, part_mx WHERE l_partkey = p_partkey AND l_shipdate >= date '1995-09-01' AND l_shipdate < date '1995-09-01' + interval '1' year; promo_revenue --------------------- 32.1126387112005225 (1 row) -- connect one of the workers \c - - - :worker_1_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #14 from the TPC-H decision support benchmark SELECT 100.00 * sum(case when p_type like 'PROMO%' then l_extendedprice * (1 - l_discount) else 0 end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue FROM lineitem_mx, part_mx WHERE l_partkey = p_partkey AND l_shipdate >= date '1995-09-01' AND l_shipdate < date '1995-09-01' + interval '1' year; promo_revenue --------------------- 32.1126387112005225 (1 row) -- connect to the other node \c - - - :worker_2_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #14 from the TPC-H decision support benchmark SELECT 100.00 * sum(case when p_type like 'PROMO%' then l_extendedprice * (1 - l_discount) else 0 end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue FROM lineitem_mx, part_mx WHERE l_partkey = p_partkey AND l_shipdate >= date '1995-09-01' AND l_shipdate < date '1995-09-01' + interval '1' year; promo_revenue --------------------- 32.1126387112005225 (1 row) citus-7.0.3/src/test/regress/expected/multi_mx_tpch_query19.out000066400000000000000000000063561317107136600247140ustar00rootroot00000000000000-- -- MULTI_MX_TPCH_QUERY19 -- -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #19 from the TPC-H decision support benchmark. Note that we modified -- the query from its original to make it work on smaller data sets. SELECT sum(l_extendedprice* (1 - l_discount)) as revenue FROM lineitem_mx, part_mx WHERE ( p_partkey = l_partkey AND (p_brand = 'Brand#12' OR p_brand= 'Brand#14' OR p_brand='Brand#15') AND l_quantity >= 10 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#23' OR p_brand='Brand#24') AND l_quantity >= 20 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#33' OR p_brand = 'Brand#34' OR p_brand = 'Brand#35') AND l_quantity >= 1 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ); revenue ------------- 144747.0857 (1 row) -- connect one of the workers \c - - - :worker_1_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #19 from the TPC-H decision support benchmark. Note that we modified -- the query from its original to make it work on smaller data sets. SELECT sum(l_extendedprice* (1 - l_discount)) as revenue FROM lineitem_mx, part_mx WHERE ( p_partkey = l_partkey AND (p_brand = 'Brand#12' OR p_brand= 'Brand#14' OR p_brand='Brand#15') AND l_quantity >= 10 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#23' OR p_brand='Brand#24') AND l_quantity >= 20 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#33' OR p_brand = 'Brand#34' OR p_brand = 'Brand#35') AND l_quantity >= 1 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ); revenue ------------- 144747.0857 (1 row) -- connect to the other node \c - - - :worker_2_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #19 from the TPC-H decision support benchmark. Note that we modified -- the query from its original to make it work on smaller data sets. SELECT sum(l_extendedprice* (1 - l_discount)) as revenue FROM lineitem_mx, part_mx WHERE ( p_partkey = l_partkey AND (p_brand = 'Brand#12' OR p_brand= 'Brand#14' OR p_brand='Brand#15') AND l_quantity >= 10 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#23' OR p_brand='Brand#24') AND l_quantity >= 20 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#33' OR p_brand = 'Brand#34' OR p_brand = 'Brand#35') AND l_quantity >= 1 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ); revenue ------------- 144747.0857 (1 row) citus-7.0.3/src/test/regress/expected/multi_mx_tpch_query3.out000066400000000000000000000112021317107136600246070ustar00rootroot00000000000000-- -- MULTI_MX_TPCH_QUERY3 -- -- Query #3 from the TPC-H decision support benchmark. Unlike other TPC-H tests, -- we don't set citus.large_table_shard_count here, and instead use the default value -- coming from postgresql.conf or multi_task_tracker_executor.conf. -- connect to the coordinator \c - - - :master_port SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority FROM customer_mx, orders_mx, lineitem_mx WHERE c_mktsegment = 'BUILDING' AND c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate < date '1995-03-15' AND l_shipdate > date '1995-03-15' GROUP BY l_orderkey, o_orderdate, o_shippriority ORDER BY revenue DESC, o_orderdate; l_orderkey | revenue | o_orderdate | o_shippriority ------------+-------------+-------------+---------------- 1637 | 268170.6408 | 02-08-1995 | 0 9696 | 252014.5497 | 02-20-1995 | 0 10916 | 242749.1996 | 03-11-1995 | 0 450 | 221012.3165 | 03-05-1995 | 0 5347 | 198353.7942 | 02-22-1995 | 0 10691 | 112800.1020 | 03-14-1995 | 0 386 | 104975.2484 | 01-25-1995 | 0 5765 | 88222.7556 | 12-15-1994 | 0 4707 | 88143.7774 | 02-27-1995 | 0 5312 | 83750.7028 | 02-24-1995 | 0 5728 | 70101.6400 | 12-11-1994 | 0 577 | 57986.6224 | 12-19-1994 | 0 12706 | 16636.6368 | 11-21-1994 | 0 3844 | 8851.3200 | 12-29-1994 | 0 11073 | 7433.6295 | 12-02-1994 | 0 13924 | 3111.4970 | 12-20-1994 | 0 (16 rows) -- connect one of the workers \c - - - :worker_1_port SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority FROM customer_mx, orders_mx, lineitem_mx WHERE c_mktsegment = 'BUILDING' AND c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate < date '1995-03-15' AND l_shipdate > date '1995-03-15' GROUP BY l_orderkey, o_orderdate, o_shippriority ORDER BY revenue DESC, o_orderdate; l_orderkey | revenue | o_orderdate | o_shippriority ------------+-------------+-------------+---------------- 1637 | 268170.6408 | 02-08-1995 | 0 9696 | 252014.5497 | 02-20-1995 | 0 10916 | 242749.1996 | 03-11-1995 | 0 450 | 221012.3165 | 03-05-1995 | 0 5347 | 198353.7942 | 02-22-1995 | 0 10691 | 112800.1020 | 03-14-1995 | 0 386 | 104975.2484 | 01-25-1995 | 0 5765 | 88222.7556 | 12-15-1994 | 0 4707 | 88143.7774 | 02-27-1995 | 0 5312 | 83750.7028 | 02-24-1995 | 0 5728 | 70101.6400 | 12-11-1994 | 0 577 | 57986.6224 | 12-19-1994 | 0 12706 | 16636.6368 | 11-21-1994 | 0 3844 | 8851.3200 | 12-29-1994 | 0 11073 | 7433.6295 | 12-02-1994 | 0 13924 | 3111.4970 | 12-20-1994 | 0 (16 rows) -- connect to the other node \c - - - :worker_2_port SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority FROM customer_mx, orders_mx, lineitem_mx WHERE c_mktsegment = 'BUILDING' AND c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate < date '1995-03-15' AND l_shipdate > date '1995-03-15' GROUP BY l_orderkey, o_orderdate, o_shippriority ORDER BY revenue DESC, o_orderdate; l_orderkey | revenue | o_orderdate | o_shippriority ------------+-------------+-------------+---------------- 1637 | 268170.6408 | 02-08-1995 | 0 9696 | 252014.5497 | 02-20-1995 | 0 10916 | 242749.1996 | 03-11-1995 | 0 450 | 221012.3165 | 03-05-1995 | 0 5347 | 198353.7942 | 02-22-1995 | 0 10691 | 112800.1020 | 03-14-1995 | 0 386 | 104975.2484 | 01-25-1995 | 0 5765 | 88222.7556 | 12-15-1994 | 0 4707 | 88143.7774 | 02-27-1995 | 0 5312 | 83750.7028 | 02-24-1995 | 0 5728 | 70101.6400 | 12-11-1994 | 0 577 | 57986.6224 | 12-19-1994 | 0 12706 | 16636.6368 | 11-21-1994 | 0 3844 | 8851.3200 | 12-29-1994 | 0 11073 | 7433.6295 | 12-02-1994 | 0 13924 | 3111.4970 | 12-20-1994 | 0 (16 rows) citus-7.0.3/src/test/regress/expected/multi_mx_tpch_query6.out000066400000000000000000000030331317107136600246150ustar00rootroot00000000000000-- -- MULTI_MX_TPCH_QUERY6 -- -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #6 from the TPC-H decision support benchmark SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem_mx WHERE l_shipdate >= date '1994-01-01' and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; revenue ------------- 243277.7858 (1 row) -- connect to one of the worker nodes \c - - - :worker_1_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #6 from the TPC-H decision support benchmark SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem_mx WHERE l_shipdate >= date '1994-01-01' and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; revenue ------------- 243277.7858 (1 row) -- connect to the other worker node \c - - - :worker_2_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #6 from the TPC-H decision support benchmark SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem_mx WHERE l_shipdate >= date '1994-01-01' and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; revenue ------------- 243277.7858 (1 row) citus-7.0.3/src/test/regress/expected/multi_mx_tpch_query7.out000066400000000000000000000072141317107136600246230ustar00rootroot00000000000000-- -- MULTI_MX_TPCH_QUERY7 -- -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem AND orders tables as large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H decision support benchmark SELECT supp_nation, cust_nation, l_year, sum(volume) as revenue FROM ( SELECT n1.n_name as supp_nation, n2.n_name as cust_nation, extract(year FROM l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume FROM supplier_mx, lineitem_mx, orders_mx, customer_mx, nation_mx n1, nation_mx n2 WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = n1.n_nationkey AND c_nationkey = n2.n_nationkey AND ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) AND l_shipdate between date '1995-01-01' AND date '1996-12-31' ) as shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year; supp_nation | cust_nation | l_year | revenue ---------------------------+---------------------------+--------+----------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) -- connect one of the workers \c - - - :worker_1_port -- Change configuration to treat lineitem AND orders tables as large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H decision support benchmark SELECT supp_nation, cust_nation, l_year, sum(volume) as revenue FROM ( SELECT n1.n_name as supp_nation, n2.n_name as cust_nation, extract(year FROM l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume FROM supplier_mx, lineitem_mx, orders_mx, customer_mx, nation_mx n1, nation_mx n2 WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = n1.n_nationkey AND c_nationkey = n2.n_nationkey AND ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) AND l_shipdate between date '1995-01-01' AND date '1996-12-31' ) as shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year; supp_nation | cust_nation | l_year | revenue ---------------------------+---------------------------+--------+----------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) -- connect to the other worker node \c - - - :worker_2_port -- Change configuration to treat lineitem AND orders tables as large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H decision support benchmark SELECT supp_nation, cust_nation, l_year, sum(volume) as revenue FROM ( SELECT n1.n_name as supp_nation, n2.n_name as cust_nation, extract(year FROM l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume FROM supplier_mx, lineitem_mx, orders_mx, customer_mx, nation_mx n1, nation_mx n2 WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = n1.n_nationkey AND c_nationkey = n2.n_nationkey AND ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) AND l_shipdate between date '1995-01-01' AND date '1996-12-31' ) as shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year; supp_nation | cust_nation | l_year | revenue ---------------------------+---------------------------+--------+----------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) citus-7.0.3/src/test/regress/expected/multi_mx_tpch_query7_nested.out000066400000000000000000000102461317107136600261640ustar00rootroot00000000000000-- -- MULTI_MX_TPCH_QUERY7_NESTED -- -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem AND orders tables AS large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H benchmark; modified to include sub-selects SELECT supp_nation, cust_nation, l_year, sum(volume) AS revenue FROM ( SELECT supp_nation, cust_nation, extract(year FROM l_shipdate) AS l_year, l_extendedprice * (1 - l_discount) AS volume FROM supplier_mx, lineitem_mx, orders_mx, customer_mx, ( SELECT n1.n_nationkey AS supp_nation_key, n2.n_nationkey AS cust_nation_key, n1.n_name AS supp_nation, n2.n_name AS cust_nation FROM nation_mx n1, nation_mx n2 WHERE ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) ) AS temp WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = supp_nation_key AND c_nationkey = cust_nation_key AND l_shipdate between date '1995-01-01' AND date '1996-12-31' ) AS shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year; supp_nation | cust_nation | l_year | revenue ---------------------------+---------------------------+--------+----------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) -- connect to one of the workers \c - - - :worker_1_port -- Change configuration to treat lineitem AND orders tables AS large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H benchmark; modified to include sub-selects SELECT supp_nation, cust_nation, l_year, sum(volume) AS revenue FROM ( SELECT supp_nation, cust_nation, extract(year FROM l_shipdate) AS l_year, l_extendedprice * (1 - l_discount) AS volume FROM supplier_mx, lineitem_mx, orders_mx, customer_mx, ( SELECT n1.n_nationkey AS supp_nation_key, n2.n_nationkey AS cust_nation_key, n1.n_name AS supp_nation, n2.n_name AS cust_nation FROM nation_mx n1, nation_mx n2 WHERE ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) ) AS temp WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = supp_nation_key AND c_nationkey = cust_nation_key AND l_shipdate between date '1995-01-01' AND date '1996-12-31' ) AS shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year; supp_nation | cust_nation | l_year | revenue ---------------------------+---------------------------+--------+----------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) -- connect to the coordinator \c - - - :worker_2_port -- Change configuration to treat lineitem AND orders tables AS large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H benchmark; modified to include sub-selects SELECT supp_nation, cust_nation, l_year, sum(volume) AS revenue FROM ( SELECT supp_nation, cust_nation, extract(year FROM l_shipdate) AS l_year, l_extendedprice * (1 - l_discount) AS volume FROM supplier_mx, lineitem_mx, orders_mx, customer_mx, ( SELECT n1.n_nationkey AS supp_nation_key, n2.n_nationkey AS cust_nation_key, n1.n_name AS supp_nation, n2.n_name AS cust_nation FROM nation_mx n1, nation_mx n2 WHERE ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) ) AS temp WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = supp_nation_key AND c_nationkey = cust_nation_key AND l_shipdate between date '1995-01-01' AND date '1996-12-31' ) AS shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year; supp_nation | cust_nation | l_year | revenue ---------------------------+---------------------------+--------+----------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) citus-7.0.3/src/test/regress/expected/multi_mx_transaction_recovery.out000066400000000000000000000070561317107136600266200ustar00rootroot00000000000000-- Tests for running transaction recovery from a worker node SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; CREATE TABLE test_recovery (x text); SELECT create_distributed_table('test_recovery', 'x'); create_distributed_table -------------------------- (1 row) \c - - - :worker_1_port SET citus.multi_shard_commit_protocol TO '2pc'; -- Ensure pg_dist_transaction is empty for test SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) SELECT count(*) FROM pg_dist_transaction; count ------- 0 (1 row) -- If the groupid of the worker changes this query will produce a -- different result and the prepared statement names should be adapted -- accordingly. SELECT * FROM pg_dist_local_group; groupid --------- 12 (1 row) BEGIN; CREATE TABLE table_should_abort (value int); PREPARE TRANSACTION 'citus_12_should_abort'; BEGIN; CREATE TABLE table_should_commit (value int); PREPARE TRANSACTION 'citus_12_should_commit'; BEGIN; CREATE TABLE should_be_sorted_into_middle (value int); PREPARE TRANSACTION 'citus_12_should_be_sorted_into_middle'; -- Add "fake" pg_dist_transaction records and run recovery INSERT INTO pg_dist_transaction VALUES (12, 'citus_12_should_commit'); INSERT INTO pg_dist_transaction VALUES (12, 'citus_12_should_be_forgotten'); SELECT recover_prepared_transactions(); NOTICE: recovered a prepared transaction on localhost:57637 CONTEXT: ROLLBACK PREPARED 'citus_12_should_abort' NOTICE: recovered a prepared transaction on localhost:57637 CONTEXT: ROLLBACK PREPARED 'citus_12_should_be_sorted_into_middle' NOTICE: recovered a prepared transaction on localhost:57637 CONTEXT: COMMIT PREPARED 'citus_12_should_commit' recover_prepared_transactions ------------------------------- 3 (1 row) SELECT count(*) FROM pg_dist_transaction; count ------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'table_should_abort'; count ------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'table_should_commit'; count ------- 1 (1 row) -- plain INSERT does not use 2PC INSERT INTO test_recovery VALUES ('hello'); SELECT count(*) FROM pg_dist_transaction; count ------- 0 (1 row) -- Multi-statement transactions should write 2 transaction recovery records BEGIN; INSERT INTO test_recovery VALUES ('hello'); INSERT INTO test_recovery VALUES ('world'); COMMIT; SELECT count(*) FROM pg_dist_transaction; count ------- 2 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) -- Committed INSERT..SELECT via coordinator should write 4 transaction recovery records INSERT INTO test_recovery (x) SELECT 'hello-'||s FROM generate_series(1,100) s; SELECT count(*) FROM pg_dist_transaction; count ------- 4 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) -- Committed COPY should write 3 transaction records (2 fall into the same shard) COPY test_recovery (x) FROM STDIN CSV; SELECT count(*) FROM pg_dist_transaction; count ------- 3 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) DROP TABLE table_should_commit; \c - - - :master_port DROP TABLE test_recovery_ref; ERROR: table "test_recovery_ref" does not exist DROP TABLE test_recovery; citus-7.0.3/src/test/regress/expected/multi_name_lengths.out000066400000000000000000000455271317107136600243220ustar00rootroot00000000000000-- -- MULTI_NAME_LENGTHS -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 225000; SET citus.multi_shard_commit_protocol = '2pc'; -- Verify that a table name > 56 characters gets hashed properly. CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( col1 integer not null, col2 integer not null); SELECT master_create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345678901234567890', '2', '2'); master_create_worker_shards ----------------------------- (1 row) \c - - - :worker_1_port \dt too_long_* List of relations Schema | Name | Type | Owner --------+-----------------------------------------------------------------+-------+---------- public | too_long_12345678901234567890123456789012345678_e0119164_225000 | table | postgres public | too_long_12345678901234567890123456789012345678_e0119164_225001 | table | postgres (2 rows) \c - - - :master_port -- Verify that the UDF works and rejects bad arguments. SELECT shard_name(NULL, 666666); shard_name ------------ (1 row) SELECT shard_name(0, 666666); ERROR: object_name does not reference a valid relation SELECT shard_name('too_long_12345678901234567890123456789012345678901234567890'::regclass, 666666); shard_name ------------------------------------------------------------------------ public.too_long_12345678901234567890123456789012345678_e0119164_666666 (1 row) SELECT shard_name('too_long_12345678901234567890123456789012345678901234567890'::regclass, NULL); shard_name ------------ (1 row) SELECT shard_name('too_long_12345678901234567890123456789012345678901234567890'::regclass, -21); ERROR: shard_id cannot be zero or negative value DROP TABLE too_long_12345678901234567890123456789012345678901234567890 CASCADE; -- Table to use for rename checks. CREATE TABLE name_lengths ( col1 integer not null, col2 integer not null, constraint constraint_a UNIQUE (col1) ); SELECT master_create_distributed_table('name_lengths', 'col1', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('name_lengths', '2', '2'); master_create_worker_shards ----------------------------- (1 row) -- Verify that we CAN add columns with "too-long names", because -- the columns' names are not extended in the corresponding shard tables. ALTER TABLE name_lengths ADD COLUMN float_col_12345678901234567890123456789012345678901234567890 FLOAT; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' ALTER TABLE name_lengths ADD COLUMN date_col_12345678901234567890123456789012345678901234567890 DATE; ALTER TABLE name_lengths ADD COLUMN int_col_12345678901234567890123456789012345678901234567890 INTEGER DEFAULT 1; -- Placeholders for unsupported ALTER TABLE to add constraints with implicit names that are likely too long ALTER TABLE name_lengths ADD UNIQUE (float_col_12345678901234567890123456789012345678901234567890); ERROR: cannot create constraint without a name on a distributed table ALTER TABLE name_lengths ADD EXCLUDE (int_col_12345678901234567890123456789012345678901234567890 WITH =); ERROR: cannot create constraint without a name on a distributed table ALTER TABLE name_lengths ADD CHECK (date_col_12345678901234567890123456789012345678901234567890 > '2014-01-01'::date); ERROR: cannot create constraint without a name on a distributed table \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.name_lengths_225002'::regclass; Column | Type | Modifiers --------------------------------------------------------------+------------------+----------- col1 | integer | not null col2 | integer | not null float_col_12345678901234567890123456789012345678901234567890 | double precision | date_col_12345678901234567890123456789012345678901234567890 | date | int_col_12345678901234567890123456789012345678901234567890 | integer | default 1 (5 rows) \c - - - :master_port -- Placeholders for unsupported add constraints with EXPLICIT names that are too long ALTER TABLE name_lengths ADD CONSTRAINT nl_unique_12345678901234567890123456789012345678901234567890 UNIQUE (float_col_12345678901234567890123456789012345678901234567890); ERROR: cannot create constraint on "name_lengths" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). ALTER TABLE name_lengths ADD CONSTRAINT nl_exclude_12345678901234567890123456789012345678901234567890 EXCLUDE (int_col_12345678901234567890123456789012345678901234567890 WITH =); ERROR: cannot create constraint on "name_lengths" DETAIL: Distributed relations cannot have UNIQUE, EXCLUDE, or PRIMARY KEY constraints that do not include the partition column (with an equality operator if EXCLUDE). ALTER TABLE name_lengths ADD CONSTRAINT nl_checky_12345678901234567890123456789012345678901234567890 CHECK (date_col_12345678901234567890123456789012345678901234567890 >= '2014-01-01'::date); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.name_lengths_225002'::regclass; Constraint | Definition -----------------------------------------------------------------+------------------------------------------------------------------------------------------- nl_checky_1234567890123456789012345678901234567_b16df46d_225002 | CHECK (date_col_12345678901234567890123456789012345678901234567890 >= '01-01-2014'::date) (1 row) \c - - - :master_port -- Placeholders for RENAME operations ALTER TABLE name_lengths RENAME TO name_len_12345678901234567890123456789012345678901234567890; ERROR: renaming distributed tables is currently unsupported ALTER TABLE name_lengths RENAME CONSTRAINT unique_12345678901234567890123456789012345678901234567890 TO unique2_12345678901234567890123456789012345678901234567890; ERROR: renaming constraints belonging to distributed tables is currently unsupported -- Verify that CREATE INDEX on already distributed table has proper shard names. CREATE INDEX tmp_idx_12345678901234567890123456789012345678901234567890 ON name_lengths(col2); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' \c - - - :worker_1_port \d tmp_idx_* Index "public.tmp_idx_123456789012345678901234567890123456789_5e470afa_225002" Column | Type | Definition --------+---------+------------ col2 | integer | col2 btree, for table "public.name_lengths_225002" Index "public.tmp_idx_123456789012345678901234567890123456789_5e470afa_225003" Column | Type | Definition --------+---------+------------ col2 | integer | col2 btree, for table "public.name_lengths_225003" \c - - - :master_port -- Verify that a new index name > 63 characters is auto-truncated -- by the parser/rewriter before further processing, just as in Postgres. CREATE INDEX tmp_idx_123456789012345678901234567890123456789012345678901234567890 ON name_lengths(col2); NOTICE: identifier "tmp_idx_123456789012345678901234567890123456789012345678901234567890" will be truncated to "tmp_idx_1234567890123456789012345678901234567890123456789012345" NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' \c - - - :worker_1_port \d tmp_idx_* Index "public.tmp_idx_123456789012345678901234567890123456789_599636aa_225002" Column | Type | Definition --------+---------+------------ col2 | integer | col2 btree, for table "public.name_lengths_225002" Index "public.tmp_idx_123456789012345678901234567890123456789_599636aa_225003" Column | Type | Definition --------+---------+------------ col2 | integer | col2 btree, for table "public.name_lengths_225003" Index "public.tmp_idx_123456789012345678901234567890123456789_5e470afa_225002" Column | Type | Definition --------+---------+------------ col2 | integer | col2 btree, for table "public.name_lengths_225002" Index "public.tmp_idx_123456789012345678901234567890123456789_5e470afa_225003" Column | Type | Definition --------+---------+------------ col2 | integer | col2 btree, for table "public.name_lengths_225003" \c - - - :master_port -- Verify that distributed tables with too-long names -- for CHECK constraints are no trouble. CREATE TABLE sneaky_name_lengths ( col1 integer not null, col2 integer not null, int_col_12345678901234567890123456789012345678901234567890 integer not null, CHECK (int_col_12345678901234567890123456789012345678901234567890 > 100) ); SELECT master_create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2'); master_create_worker_shards ----------------------------- (1 row) DROP TABLE sneaky_name_lengths CASCADE; CREATE TABLE sneaky_name_lengths ( int_col_123456789012345678901234567890123456789012345678901234 integer UNIQUE not null, col2 integer not null, CONSTRAINT checky_12345678901234567890123456789012345678901234567890 CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100) ); \di public.sneaky_name_lengths* List of relations Schema | Name | Type | Owner | Table --------+-----------------------------------------------------------------+-------+----------+--------------------- public | sneaky_name_lengths_int_col_1234567890123456789012345678901_key | index | postgres | sneaky_name_lengths (1 row) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths'::regclass; Constraint | Definition -----------------------------------------------------------+------------------------------------------------------------------------------ checky_12345678901234567890123456789012345678901234567890 | CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100) (1 row) SELECT master_create_distributed_table('sneaky_name_lengths', 'int_col_123456789012345678901234567890123456789012345678901234', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2'); master_create_worker_shards ----------------------------- (1 row) \c - - - :worker_1_port \di public.sneaky*225006 List of relations Schema | Name | Type | Owner | Table --------+-----------------------------------------------------------------+-------+----------+---------------------------- public | sneaky_name_lengths_int_col_1234567890123456789_6402d2cd_225006 | index | postgres | sneaky_name_lengths_225006 (1 row) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths_225006'::regclass; Constraint | Definition -----------------------------------------------------------+------------------------------------------------------------------------------ checky_12345678901234567890123456789012345678901234567890 | CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100) (1 row) \c - - - :master_port DROP TABLE sneaky_name_lengths CASCADE; -- verify that named constraint with too-long name gets hashed properly CREATE TABLE sneaky_name_lengths ( col1 integer not null, col2 integer not null, int_col_12345678901234567890123456789012345678901234567890 integer not null, constraint unique_12345678901234567890123456789012345678901234567890 UNIQUE (col1) ); SELECT master_create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2'); master_create_worker_shards ----------------------------- (1 row) \c - - - :worker_1_port \di unique*225008 List of relations Schema | Name | Type | Owner | Table --------+-----------------------------------------------------------------+-------+----------+---------------------------- public | unique_1234567890123456789012345678901234567890_a5986f27_225008 | index | postgres | sneaky_name_lengths_225008 (1 row) \c - - - :master_port DROP TABLE sneaky_name_lengths CASCADE; -- Verify that much larger shardIds are handled properly ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 2250000000000; CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( col1 integer not null, col2 integer not null); SELECT master_create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345678901234567890', '2', '2'); master_create_worker_shards ----------------------------- (1 row) \c - - - :worker_1_port \dt *225000000000* List of relations Schema | Name | Type | Owner --------+-----------------------------------------------------------------+-------+---------- public | too_long_1234567890123456789012345678901_e0119164_2250000000000 | table | postgres public | too_long_1234567890123456789012345678901_e0119164_2250000000001 | table | postgres (2 rows) \c - - - :master_port DROP TABLE too_long_12345678901234567890123456789012345678901234567890 CASCADE; -- Verify that multi-byte boundaries are respected for databases with UTF8 encoding. CREATE TABLE U&"elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D" UESCAPE '!' ( col1 integer not null PRIMARY KEY, col2 integer not null); SELECT master_create_distributed_table(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', 'col1', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', '2', '2'); master_create_worker_shards ----------------------------- (1 row) -- Verify that quoting is used in shard_name SELECT shard_name(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!'::regclass, min(shardid)) FROM pg_dist_shard WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!'::regclass; shard_name ---------------------------------------------------------- public."elephant_ÑлонÑлонÑлонÑло_c8b737c2_2250000000002" (1 row) \c - - - :worker_1_port \dt public.elephant_* List of relations Schema | Name | Type | Owner --------+-------------------------------------------------+-------+---------- public | elephant_ÑлонÑлонÑлонÑло_c8b737c2_2250000000002 | table | postgres public | elephant_ÑлонÑлонÑлонÑло_c8b737c2_2250000000003 | table | postgres (2 rows) \di public.elephant_* List of relations Schema | Name | Type | Owner | Table --------+-------------------------------------------------+-------+----------+------------------------------------------------- public | elephant_ÑлонÑлонÑлонÑло_14d34928_2250000000002 | index | postgres | elephant_ÑлонÑлонÑлонÑло_c8b737c2_2250000000002 public | elephant_ÑлонÑлонÑлонÑло_14d34928_2250000000003 | index | postgres | elephant_ÑлонÑлонÑлонÑло_c8b737c2_2250000000003 (2 rows) \c - - - :master_port -- Verify that shard_name UDF supports schemas CREATE SCHEMA multi_name_lengths; CREATE TABLE multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890 ( col1 integer not null, col2 integer not null); SELECT master_create_distributed_table('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890', 2, 1); master_create_worker_shards ----------------------------- (1 row) SELECT shard_name('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890'::regclass, min(shardid)) FROM pg_dist_shard WHERE logicalrelid = 'multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890'::regclass; shard_name ------------------------------------------------------------------------------------ multi_name_lengths.too_long_1234567890123456789012345678901_e0119164_2250000000004 (1 row) DROP TABLE multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890; -- Clean up. DROP TABLE name_lengths CASCADE; DROP TABLE U&"elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D" UESCAPE '!' CASCADE; citus-7.0.3/src/test/regress/expected/multi_null_minmax_value_pruning.out000066400000000000000000000603401317107136600271250ustar00rootroot00000000000000-- -- MULTI_NULL_MINMAX_VALUE_PRUNING -- -- This test checks that we can handle null min/max values in shard statistics -- and that we don't partition or join prune shards that have null values. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000; -- print whether we're using version > 9 to make version-specific tests clear SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine; version_above_nine -------------------- t (1 row) SET client_min_messages TO DEBUG2; SET citus.explain_all_tasks TO on; -- to avoid differing explain output - executor doesn't matter, -- because were testing pruning here. SET citus.task_executor_type TO 'real-time'; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000; shardminvalue | shardmaxvalue ---------------+--------------- 1 | 1509 (1 row) SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001; shardminvalue | shardmaxvalue ---------------+--------------- 1509 | 2951 (1 row) -- Check that partition and join pruning works when min/max values exist -- Adding l_orderkey = 1 to make the query not router executable EXPLAIN (COSTS FALSE) SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1; QUERY PLAN ----------------------------------------------------------------------- Custom Scan (Citus Real-Time) Task Count: 2 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Bitmap Heap Scan on lineitem_290000 lineitem Recheck Cond: ((l_orderkey = 9030) OR (l_orderkey = 1)) -> BitmapOr -> Bitmap Index Scan on lineitem_pkey_290000 Index Cond: (l_orderkey = 9030) -> Bitmap Index Scan on lineitem_pkey_290000 Index Cond: (l_orderkey = 1) -> Task Node: host=localhost port=57638 dbname=regression -> Bitmap Heap Scan on lineitem_290004 lineitem Recheck Cond: ((l_orderkey = 9030) OR (l_orderkey = 1)) -> BitmapOr -> Bitmap Index Scan on lineitem_pkey_290004 Index Cond: (l_orderkey = 9030) -> Bitmap Index Scan on lineitem_pkey_290004 Index Cond: (l_orderkey = 1) (21 rows) EXPLAIN (COSTS FALSE) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey; DEBUG: join prunable for intervals [1,1509] and [8997,14946] DEBUG: join prunable for intervals [1509,2951] and [8997,14946] DEBUG: join prunable for intervals [2951,4455] and [8997,14946] DEBUG: join prunable for intervals [4480,5986] and [8997,14946] DEBUG: join prunable for intervals [8997,10560] and [1,5986] DEBUG: join prunable for intervals [10560,12036] and [1,5986] DEBUG: join prunable for intervals [12036,13473] and [1,5986] DEBUG: join prunable for intervals [13473,14947] and [1,5986] QUERY PLAN ------------------------------------------------------------------------------------------------------ Aggregate -> Custom Scan (Citus Real-Time) Task Count: 8 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders (60 rows) -- Now set the minimum value for a shard to null. Then check that we don't apply -- partition or join pruning for the shard with null min value. UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000; EXPLAIN (COSTS FALSE) SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; QUERY PLAN ------------------------------------------------------------------------------- Custom Scan (Citus Real-Time) Task Count: 2 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem Index Cond: (l_orderkey = 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Index Scan using lineitem_pkey_290004 on lineitem_290004 lineitem Index Cond: (l_orderkey = 9030) (11 rows) EXPLAIN (COSTS FALSE) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey; DEBUG: join prunable for intervals [1509,2951] and [8997,14946] DEBUG: join prunable for intervals [2951,4455] and [8997,14946] DEBUG: join prunable for intervals [4480,5986] and [8997,14946] DEBUG: join prunable for intervals [8997,10560] and [1,5986] DEBUG: join prunable for intervals [10560,12036] and [1,5986] DEBUG: join prunable for intervals [12036,13473] and [1,5986] DEBUG: join prunable for intervals [13473,14947] and [1,5986] QUERY PLAN ------------------------------------------------------------------------------------------------------ Aggregate -> Custom Scan (Citus Real-Time) Task Count: 9 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders (67 rows) -- Next, set the maximum value for another shard to null. Then check that we -- don't apply partition or join pruning for this other shard either. UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001; EXPLAIN (COSTS FALSE) SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; QUERY PLAN ------------------------------------------------------------------------------- Custom Scan (Citus Real-Time) Task Count: 3 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Index Scan using lineitem_pkey_290001 on lineitem_290001 lineitem Index Cond: (l_orderkey = 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem Index Cond: (l_orderkey = 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Index Scan using lineitem_pkey_290004 on lineitem_290004 lineitem Index Cond: (l_orderkey = 9030) (15 rows) EXPLAIN (COSTS FALSE) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey; DEBUG: join prunable for intervals [2951,4455] and [8997,14946] DEBUG: join prunable for intervals [4480,5986] and [8997,14946] DEBUG: join prunable for intervals [8997,10560] and [1,5986] DEBUG: join prunable for intervals [10560,12036] and [1,5986] DEBUG: join prunable for intervals [12036,13473] and [1,5986] DEBUG: join prunable for intervals [13473,14947] and [1,5986] QUERY PLAN ------------------------------------------------------------------------------------------------------ Aggregate -> Custom Scan (Citus Real-Time) Task Count: 10 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders (74 rows) -- Last, set the minimum value to 0 and check that we don't treat it as null. We -- should apply partition and join pruning for this shard now. UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000; EXPLAIN (COSTS FALSE) SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; QUERY PLAN ------------------------------------------------------------------------------- Custom Scan (Citus Real-Time) Task Count: 2 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Index Scan using lineitem_pkey_290001 on lineitem_290001 lineitem Index Cond: (l_orderkey = 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Index Scan using lineitem_pkey_290004 on lineitem_290004 lineitem Index Cond: (l_orderkey = 9030) (11 rows) EXPLAIN (COSTS FALSE) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey; DEBUG: join prunable for intervals [0,1509] and [8997,14946] DEBUG: join prunable for intervals [2951,4455] and [8997,14946] DEBUG: join prunable for intervals [4480,5986] and [8997,14946] DEBUG: join prunable for intervals [8997,10560] and [1,5986] DEBUG: join prunable for intervals [10560,12036] and [1,5986] DEBUG: join prunable for intervals [12036,13473] and [1,5986] DEBUG: join prunable for intervals [13473,14947] and [1,5986] QUERY PLAN ------------------------------------------------------------------------------------------------------ Aggregate -> Custom Scan (Citus Real-Time) Task Count: 9 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem -> Index Only Scan using orders_pkey_290009 on orders_290009 orders (67 rows) -- Set minimum and maximum values for two shards back to their original values UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 290000; UPDATE pg_dist_shard SET shardmaxvalue = '4964' WHERE shardid = 290001; SET client_min_messages TO NOTICE; citus-7.0.3/src/test/regress/expected/multi_null_minmax_value_pruning_0.out000066400000000000000000000603401317107136600273440ustar00rootroot00000000000000-- -- MULTI_NULL_MINMAX_VALUE_PRUNING -- -- This test checks that we can handle null min/max values in shard statistics -- and that we don't partition or join prune shards that have null values. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000; -- print whether we're using version > 9 to make version-specific tests clear SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine; version_above_nine -------------------- f (1 row) SET client_min_messages TO DEBUG2; SET citus.explain_all_tasks TO on; -- to avoid differing explain output - executor doesn't matter, -- because were testing pruning here. SET citus.task_executor_type TO 'real-time'; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000; shardminvalue | shardmaxvalue ---------------+--------------- 1 | 1509 (1 row) SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001; shardminvalue | shardmaxvalue ---------------+--------------- 1509 | 2951 (1 row) -- Check that partition and join pruning works when min/max values exist -- Adding l_orderkey = 1 to make the query not router executable EXPLAIN (COSTS FALSE) SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1; QUERY PLAN ----------------------------------------------------------------------- Custom Scan (Citus Real-Time) Task Count: 2 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Bitmap Heap Scan on lineitem_290000 lineitem Recheck Cond: ((l_orderkey = 9030) OR (l_orderkey = 1)) -> BitmapOr -> Bitmap Index Scan on lineitem_pkey_290000 Index Cond: (l_orderkey = 9030) -> Bitmap Index Scan on lineitem_pkey_290000 Index Cond: (l_orderkey = 1) -> Task Node: host=localhost port=57638 dbname=regression -> Bitmap Heap Scan on lineitem_290004 lineitem Recheck Cond: ((l_orderkey = 9030) OR (l_orderkey = 1)) -> BitmapOr -> Bitmap Index Scan on lineitem_pkey_290004 Index Cond: (l_orderkey = 9030) -> Bitmap Index Scan on lineitem_pkey_290004 Index Cond: (l_orderkey = 1) (21 rows) EXPLAIN (COSTS FALSE) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey; DEBUG: join prunable for intervals [1,1509] and [8997,14946] DEBUG: join prunable for intervals [1509,2951] and [8997,14946] DEBUG: join prunable for intervals [2951,4455] and [8997,14946] DEBUG: join prunable for intervals [4480,5986] and [8997,14946] DEBUG: join prunable for intervals [8997,10560] and [1,5986] DEBUG: join prunable for intervals [10560,12036] and [1,5986] DEBUG: join prunable for intervals [12036,13473] and [1,5986] DEBUG: join prunable for intervals [13473,14947] and [1,5986] QUERY PLAN ------------------------------------------------------------------------------------------------------ Aggregate -> Custom Scan (Citus Real-Time) Task Count: 8 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem (60 rows) -- Now set the minimum value for a shard to null. Then check that we don't apply -- partition or join pruning for the shard with null min value. UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000; EXPLAIN (COSTS FALSE) SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; QUERY PLAN ------------------------------------------------------------------------------- Custom Scan (Citus Real-Time) Task Count: 2 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem Index Cond: (l_orderkey = 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Index Scan using lineitem_pkey_290004 on lineitem_290004 lineitem Index Cond: (l_orderkey = 9030) (11 rows) EXPLAIN (COSTS FALSE) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey; DEBUG: join prunable for intervals [1509,2951] and [8997,14946] DEBUG: join prunable for intervals [2951,4455] and [8997,14946] DEBUG: join prunable for intervals [4480,5986] and [8997,14946] DEBUG: join prunable for intervals [8997,10560] and [1,5986] DEBUG: join prunable for intervals [10560,12036] and [1,5986] DEBUG: join prunable for intervals [12036,13473] and [1,5986] DEBUG: join prunable for intervals [13473,14947] and [1,5986] QUERY PLAN ------------------------------------------------------------------------------------------------------ Aggregate -> Custom Scan (Citus Real-Time) Task Count: 9 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem (67 rows) -- Next, set the maximum value for another shard to null. Then check that we -- don't apply partition or join pruning for this other shard either. UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001; EXPLAIN (COSTS FALSE) SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; QUERY PLAN ------------------------------------------------------------------------------- Custom Scan (Citus Real-Time) Task Count: 3 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Index Scan using lineitem_pkey_290001 on lineitem_290001 lineitem Index Cond: (l_orderkey = 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Index Scan using lineitem_pkey_290000 on lineitem_290000 lineitem Index Cond: (l_orderkey = 9030) -> Task Node: host=localhost port=57637 dbname=regression -> Index Scan using lineitem_pkey_290004 on lineitem_290004 lineitem Index Cond: (l_orderkey = 9030) (15 rows) EXPLAIN (COSTS FALSE) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey; DEBUG: join prunable for intervals [2951,4455] and [8997,14946] DEBUG: join prunable for intervals [4480,5986] and [8997,14946] DEBUG: join prunable for intervals [8997,10560] and [1,5986] DEBUG: join prunable for intervals [10560,12036] and [1,5986] DEBUG: join prunable for intervals [12036,13473] and [1,5986] DEBUG: join prunable for intervals [13473,14947] and [1,5986] QUERY PLAN ------------------------------------------------------------------------------------------------------ Aggregate -> Custom Scan (Citus Real-Time) Task Count: 10 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem (74 rows) -- Last, set the minimum value to 0 and check that we don't treat it as null. We -- should apply partition and join pruning for this shard now. UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000; EXPLAIN (COSTS FALSE) SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; QUERY PLAN ------------------------------------------------------------------------------- Custom Scan (Citus Real-Time) Task Count: 2 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Index Scan using lineitem_pkey_290001 on lineitem_290001 lineitem Index Cond: (l_orderkey = 9030) -> Task Node: host=localhost port=57638 dbname=regression -> Index Scan using lineitem_pkey_290004 on lineitem_290004 lineitem Index Cond: (l_orderkey = 9030) (11 rows) EXPLAIN (COSTS FALSE) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey; DEBUG: join prunable for intervals [0,1509] and [8997,14946] DEBUG: join prunable for intervals [2951,4455] and [8997,14946] DEBUG: join prunable for intervals [4480,5986] and [8997,14946] DEBUG: join prunable for intervals [8997,10560] and [1,5986] DEBUG: join prunable for intervals [10560,12036] and [1,5986] DEBUG: join prunable for intervals [12036,13473] and [1,5986] DEBUG: join prunable for intervals [13473,14947] and [1,5986] QUERY PLAN ------------------------------------------------------------------------------------------------------ Aggregate -> Custom Scan (Citus Real-Time) Task Count: 9 Tasks Shown: All -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (lineitem.l_orderkey = orders.o_orderkey) -> Index Only Scan using lineitem_pkey_290000 on lineitem_290000 lineitem -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Index Only Scan using lineitem_pkey_290001 on lineitem_290001 lineitem -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Index Only Scan using lineitem_pkey_290002 on lineitem_290002 lineitem -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290008 on orders_290008 orders -> Index Only Scan using lineitem_pkey_290003 on lineitem_290003 lineitem -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using lineitem_pkey_290004 on lineitem_290004 lineitem -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using lineitem_pkey_290005 on lineitem_290005 lineitem -> Task Node: host=localhost port=57638 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using lineitem_pkey_290006 on lineitem_290006 lineitem -> Task Node: host=localhost port=57637 dbname=regression -> Aggregate -> Merge Join Merge Cond: (orders.o_orderkey = lineitem.l_orderkey) -> Index Only Scan using orders_pkey_290009 on orders_290009 orders -> Index Only Scan using lineitem_pkey_290007 on lineitem_290007 lineitem (67 rows) -- Set minimum and maximum values for two shards back to their original values UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 290000; UPDATE pg_dist_shard SET shardmaxvalue = '4964' WHERE shardid = 290001; SET client_min_messages TO NOTICE; citus-7.0.3/src/test/regress/expected/multi_partition_pruning.out000066400000000000000000000163161317107136600254230ustar00rootroot00000000000000-- -- MULTI_PARTITION_PRUNING -- -- Tests to verify that we correctly prune unreferenced shards. For this, we -- need to increase the logging verbosity of messages displayed on the client. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 770000; -- Adding additional l_orderkey = 1 to make this query not router executable SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1; l_orderkey | l_linenumber | l_shipdate ------------+--------------+------------ 1 | 1 | 03-13-1996 1 | 2 | 04-12-1996 1 | 3 | 01-29-1996 1 | 4 | 04-21-1996 1 | 5 | 03-30-1996 1 | 6 | 01-30-1996 9030 | 1 | 09-02-1998 9030 | 2 | 08-19-1998 9030 | 3 | 08-27-1998 9030 | 4 | 07-20-1998 9030 | 5 | 09-29-1998 9030 | 6 | 09-03-1998 (12 rows) -- We use the l_linenumber field for the following aggregations. We need to use -- an integer type, as aggregations on numerics or big integers return numerics -- of unknown length. When the numerics are read into our temporary table, they -- trigger the the creation of toasted tables and indexes. This in turn prints -- non-deterministic debug messages. To avoid this chain, we use l_linenumber. SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; sum | avg -------+-------------------- 17999 | 3.0189533713518953 (1 row) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE (l_orderkey < 4000 OR l_orderkey > 9030); sum | avg -------+-------------------- 30184 | 3.0159872102318145 (1 row) -- The following query should prune out all shards and return empty results SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE l_orderkey > 20000; sum | avg -----+----- | (1 row) -- The tests below verify that we can prune shards partitioned over different -- types of columns including varchar, array types, composite types etc. This is -- in response to a bug we had where we were not able to resolve correct operator -- types for some kind of column types. First we create tables partitioned on -- these types and the logical shards and placements for them. -- Create varchar partitioned table CREATE TABLE varchar_partitioned_table ( varchar_column varchar(100) ); SELECT master_create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append'); master_create_distributed_table --------------------------------- (1 row) -- Create logical shards and shard placements with shardid 100,101 INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES('varchar_partitioned_table'::regclass, 100, 't', 'AA1000U2AMO4ZGX', 'AZZXSP27F21T6'), ('varchar_partitioned_table'::regclass, 101, 't', 'BA1000U2AMO4ZGX', 'BZZXSP27F21T6'); INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 100, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport ASC LIMIT 1; INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 101, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport ASC LIMIT 1; -- Create array partitioned table RESET client_min_messages; -- avoid debug messages about toast index creation CREATE TABLE array_partitioned_table ( array_column text[] ); SELECT master_create_distributed_table('array_partitioned_table', 'array_column', 'append'); master_create_distributed_table --------------------------------- (1 row) SET client_min_messages TO DEBUG2; -- Create logical shard with shardid 102, 103 INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES('array_partitioned_table'::regclass, 102, 't', '{}', '{AZZXSP27F21T6, AZZXSP27F21T6}'), ('array_partitioned_table'::regclass, 103, 't', '{BA1000U2AMO4ZGX, BZZXSP27F21T6}', '{CA1000U2AMO4ZGX, CZZXSP27F21T6}'); INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 102, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport ASC LIMIT 1; INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 103, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport ASC LIMIT 1; -- Create composite type partitioned table CREATE TYPE composite_type AS ( text_column text, double_column decimal, varchar_column varchar(50) ); RESET client_min_messages; -- avoid debug messages about toast index creation CREATE TABLE composite_partitioned_table ( composite_column composite_type ); SELECT master_create_distributed_table('composite_partitioned_table', 'composite_column', 'append'); master_create_distributed_table --------------------------------- (1 row) SET client_min_messages TO DEBUG2; -- Create logical shard with shardid 104, 105 INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES('composite_partitioned_table'::regclass, 104, 't', '(a,3,b)', '(b,4,c)'), ('composite_partitioned_table'::regclass, 105, 't', '(c,5,d)', '(d,6,e)'); INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 104, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport ASC LIMIT 1; INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 105, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport ASC LIMIT 1; -- Verify that shard pruning works. Note that these queries should all -- prune one shard (see task count). As these tables don't exist -- remotely, temporarily disable WARNING messages. SET client_min_messages TO ERROR; EXPLAIN (COSTS OFF) SELECT count(*) FROM varchar_partitioned_table WHERE varchar_column = 'BA2'; QUERY PLAN ------------------------------------------------- Aggregate -> Custom Scan (Citus Real-Time) Task Count: 1 Tasks Shown: All -> Task Error: Could not get remote plan. (6 rows) EXPLAIN (COSTS OFF) SELECT count(*) FROM array_partitioned_table WHERE array_column > '{BA1000U2AMO4ZGX, BZZXSP27F21T6}'; QUERY PLAN ------------------------------------------------- Aggregate -> Custom Scan (Citus Real-Time) Task Count: 1 Tasks Shown: All -> Task Error: Could not get remote plan. (6 rows) EXPLAIN (COSTS OFF) SELECT count(*) FROM composite_partitioned_table WHERE composite_column < '(b,5,c)'::composite_type; QUERY PLAN ------------------------------------------------- Aggregate -> Custom Scan (Citus Real-Time) Task Count: 1 Tasks Shown: All -> Task Error: Could not get remote plan. (6 rows) SET client_min_messages TO NOTICE; citus-7.0.3/src/test/regress/expected/multi_partitioning.out000066400000000000000000001320161317107136600243530ustar00rootroot00000000000000-- -- Distributed Partitioned Table Tests -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1660000; SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; -- -- Distributed Partitioned Table Creation Tests -- -- 1-) Distributing partitioned table -- create partitioned table CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time); -- create its partitions CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); CREATE TABLE partitioning_test_2010 PARTITION OF partitioning_test FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); -- load some data and distribute tables INSERT INTO partitioning_test VALUES (1, '2009-06-06'); INSERT INTO partitioning_test VALUES (2, '2010-07-07'); INSERT INTO partitioning_test_2009 VALUES (3, '2009-09-09'); INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03'); -- distribute partitioned table SELECT create_distributed_table('partitioning_test', 'id'); NOTICE: Copying data from local table... NOTICE: Copying data from local table... create_distributed_table -------------------------- (1 row) -- see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; id | time ----+------------ 1 | 06-06-2009 2 | 07-07-2010 3 | 09-09-2009 4 | 03-03-2010 (4 rows) -- see partitioned table and its partitions are distributed SELECT logicalrelid FROM pg_dist_partition WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; logicalrelid ------------------------ partitioning_test partitioning_test_2009 partitioning_test_2010 (3 rows) SELECT logicalrelid, count(*) FROM pg_dist_shard WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') GROUP BY logicalrelid ORDER BY 1,2; logicalrelid | count ------------------------+------- partitioning_test | 4 partitioning_test_2009 | 4 partitioning_test_2010 | 4 (3 rows) -- 2-) Creating partition of a distributed table CREATE TABLE partitioning_test_2011 PARTITION OF partitioning_test FOR VALUES FROM ('2011-01-01') TO ('2012-01-01'); -- new partition is automatically distributed as well SELECT logicalrelid FROM pg_dist_partition WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2011') ORDER BY 1; logicalrelid ------------------------ partitioning_test partitioning_test_2011 (2 rows) SELECT logicalrelid, count(*) FROM pg_dist_shard WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2011') GROUP BY logicalrelid ORDER BY 1,2; logicalrelid | count ------------------------+------- partitioning_test | 4 partitioning_test_2011 | 4 (2 rows) -- 3-) Attaching non distributed table to a distributed table CREATE TABLE partitioning_test_2012(id int, time date); -- load some data INSERT INTO partitioning_test_2012 VALUES (5, '2012-06-06'); INSERT INTO partitioning_test_2012 VALUES (6, '2012-07-07'); ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2012 FOR VALUES FROM ('2012-01-01') TO ('2013-01-01'); NOTICE: Copying data from local table... -- attached partition is distributed as well SELECT logicalrelid FROM pg_dist_partition WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2012') ORDER BY 1; logicalrelid ------------------------ partitioning_test partitioning_test_2012 (2 rows) SELECT logicalrelid, count(*) FROM pg_dist_shard WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2012') GROUP BY logicalrelid ORDER BY 1,2; logicalrelid | count ------------------------+------- partitioning_test | 4 partitioning_test_2012 | 4 (2 rows) -- see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; id | time ----+------------ 1 | 06-06-2009 2 | 07-07-2010 3 | 09-09-2009 4 | 03-03-2010 5 | 06-06-2012 6 | 07-07-2012 (6 rows) -- 4-) Attaching distributed table to distributed table CREATE TABLE partitioning_test_2013(id int, time date); SELECT create_distributed_table('partitioning_test_2013', 'id'); create_distributed_table -------------------------- (1 row) -- load some data INSERT INTO partitioning_test_2013 VALUES (7, '2013-06-06'); INSERT INTO partitioning_test_2013 VALUES (8, '2013-07-07'); ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2013 FOR VALUES FROM ('2013-01-01') TO ('2014-01-01'); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' -- see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; id | time ----+------------ 1 | 06-06-2009 2 | 07-07-2010 3 | 09-09-2009 4 | 03-03-2010 5 | 06-06-2012 6 | 07-07-2012 7 | 06-06-2013 8 | 07-07-2013 (8 rows) -- 5-) Failure cases while creating distributed partitioned tables -- cannot distribute a partition if its parent is not distributed CREATE TABLE partitioning_test_failure(id int, time date) PARTITION BY RANGE (time); CREATE TABLE partitioning_test_failure_2009 PARTITION OF partitioning_test_failure FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); SELECT create_distributed_table('partitioning_test_failure_2009', 'id'); ERROR: cannot distribute relation "partitioning_test_failure_2009" which is partition of "partitioning_test_failure" DETAIL: Citus does not support distributing partitions if their parent is not distributed table. HINT: Distribute the partitioned table "partitioning_test_failure" instead. -- only hash distributed tables can have partitions SELECT create_distributed_table('partitioning_test_failure', 'id', 'append'); ERROR: distributing partitioned tables in only supported for hash-distributed tables SELECT create_distributed_table('partitioning_test_failure', 'id', 'range'); ERROR: distributing partitioned tables in only supported for hash-distributed tables SELECT create_reference_table('partitioning_test_failure'); ERROR: distributing partitioned tables in only supported for hash-distributed tables -- replication factor > 1 is not allowed in distributed partitioned tables SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('partitioning_test_failure', 'id'); ERROR: distributing partitioned tables with replication factor greater than 1 is not supported SET citus.shard_replication_factor TO 1; -- non-distributed tables cannot have distributed partitions; DROP TABLE partitioning_test_failure_2009; CREATE TABLE partitioning_test_failure_2009(id int, time date); SELECT create_distributed_table('partitioning_test_failure_2009', 'id'); create_distributed_table -------------------------- (1 row) ALTER TABLE partitioning_test_failure ATTACH PARTITION partitioning_test_failure_2009 FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); ERROR: non-distributed tables cannot have distributed partitions HINT: Distribute the partitioned table "partitioning_test_failure_2009" instead -- multi-level partitioning is not allowed DROP TABLE partitioning_test_failure_2009; CREATE TABLE partitioning_test_failure_2009 PARTITION OF partitioning_test_failure FOR VALUES FROM ('2009-01-01') TO ('2010-01-01') PARTITION BY RANGE (time); SELECT create_distributed_table('partitioning_test_failure', 'id'); ERROR: distributing multi-level partitioned tables is not supported DETAIL: Relation "partitioning_test_failure_2009" is partitioned table itself and it is also partition of relation "partitioning_test_failure". -- multi-level partitioning is not allowed in different order DROP TABLE partitioning_test_failure_2009; SELECT create_distributed_table('partitioning_test_failure', 'id'); create_distributed_table -------------------------- (1 row) CREATE TABLE partitioning_test_failure_2009 PARTITION OF partitioning_test_failure FOR VALUES FROM ('2009-01-01') TO ('2010-01-01') PARTITION BY RANGE (time); ERROR: distributing multi-level partitioned tables is not supported DETAIL: Relation "partitioning_test_failure_2009" is partitioned table itself and it is also partition of relation "partitioning_test_failure". -- -- DMLs in distributed partitioned tables -- -- test COPY -- COPY data to partitioned table COPY partitioning_test FROM STDIN WITH CSV; -- COPY data to partition directly COPY partitioning_test_2009 FROM STDIN WITH CSV; -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id >= 9 ORDER BY 1; id | time ----+------------ 9 | 01-01-2009 10 | 01-01-2010 11 | 01-01-2011 12 | 01-01-2012 13 | 01-02-2009 14 | 01-03-2009 (6 rows) -- test INSERT -- INSERT INTO the partitioned table INSERT INTO partitioning_test VALUES(15, '2009-02-01'); INSERT INTO partitioning_test VALUES(16, '2010-02-01'); INSERT INTO partitioning_test VALUES(17, '2011-02-01'); INSERT INTO partitioning_test VALUES(18, '2012-02-01'); -- INSERT INTO the partitions directly table INSERT INTO partitioning_test VALUES(19, '2009-02-02'); INSERT INTO partitioning_test VALUES(20, '2010-02-02'); -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id >= 15 ORDER BY 1; id | time ----+------------ 15 | 02-01-2009 16 | 02-01-2010 17 | 02-01-2011 18 | 02-01-2012 19 | 02-02-2009 20 | 02-02-2010 (6 rows) -- test INSERT/SELECT -- INSERT/SELECT from partition to partitioned table INSERT INTO partitioning_test SELECT * FROM partitioning_test_2011; -- INSERT/SELECT from partitioned table to partition INSERT INTO partitioning_test_2012 SELECT * FROM partitioning_test WHERE time >= '2012-01-01' AND time < '2013-01-01'; -- see the data is loaded to shards (rows in the given range should be duplicated) SELECT * FROM partitioning_test WHERE time >= '2011-01-01' AND time < '2013-01-01' ORDER BY 1; id | time ----+------------ 5 | 06-06-2012 5 | 06-06-2012 6 | 07-07-2012 6 | 07-07-2012 11 | 01-01-2011 11 | 01-01-2011 12 | 01-01-2012 12 | 01-01-2012 17 | 02-01-2011 17 | 02-01-2011 18 | 02-01-2012 18 | 02-01-2012 (12 rows) -- test UPDATE -- UPDATE partitioned table UPDATE partitioning_test SET time = '2013-07-07' WHERE id = 7; -- UPDATE partition directly UPDATE partitioning_test_2013 SET time = '2013-08-08' WHERE id = 8; -- see the data is updated SELECT * FROM partitioning_test WHERE id = 7 OR id = 8 ORDER BY 1; id | time ----+------------ 7 | 07-07-2013 8 | 08-08-2013 (2 rows) -- UPDATE that tries to move a row to a non-existing partition (this should fail) UPDATE partitioning_test SET time = '2020-07-07' WHERE id = 7; ERROR: new row for relation "partitioning_test_2013_1660021" violates partition constraint DETAIL: Failing row contains (7, 2020-07-07). CONTEXT: while executing command on localhost:57638 -- UPDATE with subqueries on partitioned table UPDATE partitioning_test SET time = time + INTERVAL '1 day' WHERE id IN (SELECT id FROM partitioning_test WHERE id = 1); -- UPDATE with subqueries on partition UPDATE partitioning_test_2009 SET time = time + INTERVAL '1 month' WHERE id IN (SELECT id FROM partitioning_test WHERE id = 2); -- see the data is updated SELECT * FROM partitioning_test WHERE id = 1 OR id = 2 ORDER BY 1; id | time ----+------------ 1 | 06-07-2009 2 | 07-07-2010 (2 rows) -- test DELETE -- DELETE from partitioned table DELETE FROM partitioning_test WHERE id = 9; -- DELETE from partition directly DELETE FROM partitioning_test_2010 WHERE id = 10; -- see the data is deleted SELECT * FROM partitioning_test WHERE id = 9 OR id = 10 ORDER BY 1; id | time ----+------ (0 rows) -- test master_modify_multiple_shards -- master_modify_multiple_shards on partitioned table SELECT master_modify_multiple_shards('UPDATE partitioning_test SET time = time + INTERVAL ''1 day'''); master_modify_multiple_shards ------------------------------- 24 (1 row) -- see rows are UPDATED SELECT * FROM partitioning_test ORDER BY 1; id | time ----+------------ 1 | 06-08-2009 2 | 07-08-2010 3 | 09-10-2009 4 | 03-04-2010 5 | 06-07-2012 5 | 06-07-2012 6 | 07-08-2012 6 | 07-08-2012 7 | 07-08-2013 8 | 08-09-2013 11 | 01-02-2011 11 | 01-02-2011 12 | 01-02-2012 12 | 01-02-2012 13 | 01-03-2009 14 | 01-04-2009 15 | 02-02-2009 16 | 02-02-2010 17 | 02-02-2011 17 | 02-02-2011 18 | 02-02-2012 18 | 02-02-2012 19 | 02-03-2009 20 | 02-03-2010 (24 rows) -- master_modify_multiple_shards on partition directly SELECT master_modify_multiple_shards('UPDATE partitioning_test_2009 SET time = time + INTERVAL ''1 day'''); master_modify_multiple_shards ------------------------------- 6 (1 row) -- see rows are UPDATED SELECT * FROM partitioning_test_2009 ORDER BY 1; id | time ----+------------ 1 | 06-09-2009 3 | 09-11-2009 13 | 01-04-2009 14 | 01-05-2009 15 | 02-03-2009 19 | 02-04-2009 (6 rows) -- test master_modify_multiple_shards which fails in workers (updated value is outside of partition bounds) SELECT master_modify_multiple_shards('UPDATE partitioning_test_2009 SET time = time + INTERVAL ''6 month'''); ERROR: new row for relation "partitioning_test_2009_1660005" violates partition constraint DETAIL: Failing row contains (3, 2010-03-11). CONTEXT: while executing command on localhost:57638 -- -- DDL in distributed partitioned tables -- -- test CREATE INDEX -- CREATE INDEX on partitioned table - this will error out CREATE INDEX partitioning_index ON partitioning_test(id); ERROR: cannot create index on partitioned table "partitioning_test" -- CREATE INDEX on partition CREATE INDEX partitioning_2009_index ON partitioning_test_2009(id); -- CREATE INDEX CONCURRENTLY on partition CREATE INDEX CONCURRENTLY partitioned_2010_index ON partitioning_test_2010(id); -- see index is created SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'partitioning_test%' ORDER BY indexname; tablename | indexname ------------------------+------------------------- partitioning_test_2010 | partitioned_2010_index partitioning_test_2009 | partitioning_2009_index (2 rows) -- test add COLUMN -- add COLUMN to partitioned table ALTER TABLE partitioning_test ADD new_column int; -- add COLUMN to partition - this will error out ALTER TABLE partitioning_test_2010 ADD new_column_2 int; ERROR: cannot add column to a partition -- see additional column is created SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; name | type ------------+--------- id | integer new_column | integer time | date (3 rows) SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test_2010'::regclass ORDER BY 1; name | type ------------+--------- id | integer new_column | integer time | date (3 rows) -- test add PRIMARY KEY -- add PRIMARY KEY to partitioned table - this will error out ALTER TABLE partitioning_test ADD CONSTRAINT partitioning_primary PRIMARY KEY (id); ERROR: primary key constraints are not supported on partitioned tables LINE 1: ALTER TABLE partitioning_test ADD CONSTRAINT partitioning_pr... ^ -- ADD PRIMARY KEY to partition ALTER TABLE partitioning_test_2009 ADD CONSTRAINT partitioning_2009_primary PRIMARY KEY (id); -- see PRIMARY KEY is created SELECT table_name, constraint_name, constraint_type FROM information_schema.table_constraints WHERE table_name = 'partitioning_test_2009' AND constraint_name = 'partitioning_2009_primary'; table_name | constraint_name | constraint_type ------------------------+---------------------------+----------------- partitioning_test_2009 | partitioning_2009_primary | PRIMARY KEY (1 row) -- test ADD FOREIGN CONSTRAINT -- add FOREIGN CONSTRAINT to partitioned table -- this will error out ALTER TABLE partitioning_test ADD CONSTRAINT partitioning_foreign FOREIGN KEY (id) REFERENCES partitioning_test_2009 (id); ERROR: foreign key constraints are not supported on partitioned tables LINE 1: ALTER TABLE partitioning_test ADD CONSTRAINT partitioning_fo... ^ -- add FOREIGN CONSTRAINT to partition INSERT INTO partitioning_test_2009 VALUES (5, '2009-06-06'); INSERT INTO partitioning_test_2009 VALUES (6, '2009-07-07'); INSERT INTO partitioning_test_2009 VALUES(12, '2009-02-01'); INSERT INTO partitioning_test_2009 VALUES(18, '2009-02-01'); ALTER TABLE partitioning_test_2012 ADD CONSTRAINT partitioning_2012_foreign FOREIGN KEY (id) REFERENCES partitioning_test_2009 (id) ON DELETE CASCADE; -- see FOREIGN KEY is created SELECT "Constraint" FROM table_fkeys WHERE relid = 'partitioning_test_2012'::regclass ORDER BY 1; Constraint --------------------------- partitioning_2012_foreign (1 row) -- test ON DELETE CASCADE works DELETE FROM partitioning_test_2009 WHERE id = 5; -- see that element is deleted from both partitions SELECT * FROM partitioning_test_2009 WHERE id = 5 ORDER BY 1; id | time | new_column ----+------+------------ (0 rows) SELECT * FROM partitioning_test_2012 WHERE id = 5 ORDER BY 1; id | time | new_column ----+------+------------ (0 rows) -- test DETACH partition ALTER TABLE partitioning_test DETACH PARTITION partitioning_test_2009; -- see DETACHed partitions content is not accessible from partitioning_test; SELECT * FROM partitioning_test WHERE time >= '2009-01-01' AND time < '2010-01-01' ORDER BY 1; id | time | new_column ----+------+------------ (0 rows) -- -- Transaction tests -- -- DDL in transaction BEGIN; ALTER TABLE partitioning_test ADD newer_column int; -- see additional column is created SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; name | type --------------+--------- id | integer new_column | integer newer_column | integer time | date (4 rows) ROLLBACK; -- see rollback is successful SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; name | type ------------+--------- id | integer new_column | integer time | date (3 rows) -- COPY in transaction BEGIN; COPY partitioning_test FROM STDIN WITH CSV; -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id = 22 ORDER BY 1; id | time | new_column ----+------------+------------ 22 | 01-01-2010 | 22 (1 row) SELECT * FROM partitioning_test WHERE id = 23 ORDER BY 1; id | time | new_column ----+------------+------------ 23 | 01-01-2011 | 23 (1 row) SELECT * FROM partitioning_test WHERE id = 24 ORDER BY 1; id | time | new_column ----+------------+------------ 24 | 01-01-2013 | 24 (1 row) ROLLBACK; -- see rollback is successful SELECT * FROM partitioning_test WHERE id >= 22 ORDER BY 1; id | time | new_column ----+------+------------ (0 rows) -- DML in transaction BEGIN; -- INSERT in transaction INSERT INTO partitioning_test VALUES(25, '2010-02-02'); -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; id | time | new_column ----+------------+------------ 25 | 02-02-2010 | (1 row) -- INSERT/SELECT in transaction INSERT INTO partitioning_test SELECT * FROM partitioning_test WHERE id = 25; -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; id | time | new_column ----+------------+------------ 25 | 02-02-2010 | 25 | 02-02-2010 | (2 rows) -- UPDATE in transaction UPDATE partitioning_test SET time = '2010-10-10' WHERE id = 25; -- see the data is updated SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; id | time | new_column ----+------------+------------ 25 | 10-10-2010 | 25 | 10-10-2010 | (2 rows) -- perform operations on partition and partioned tables together INSERT INTO partitioning_test VALUES(26, '2010-02-02', 26); INSERT INTO partitioning_test_2010 VALUES(26, '2010-02-02', 26); COPY partitioning_test FROM STDIN WITH CSV; COPY partitioning_test_2010 FROM STDIN WITH CSV; -- see the data is loaded to shards (we should see 4 rows with same content) SELECT * FROM partitioning_test WHERE id = 26 ORDER BY 1; id | time | new_column ----+------------+------------ 26 | 02-02-2010 | 26 26 | 02-02-2010 | 26 26 | 02-02-2010 | 26 26 | 02-02-2010 | 26 (4 rows) ROLLBACK; -- see rollback is successful SELECT * FROM partitioning_test WHERE id = 26 ORDER BY 1; id | time | new_column ----+------+------------ (0 rows) -- DETACH and DROP in a transaction BEGIN; ALTER TABLE partitioning_test DETACH PARTITION partitioning_test_2011; DROP TABLE partitioning_test_2011; COMMIT; -- see DROPed partitions content is not accessible SELECT * FROM partitioning_test WHERE time >= '2011-01-01' AND time < '2012-01-01' ORDER BY 1; id | time | new_column ----+------+------------ (0 rows) -- -- Misc tests -- -- test TRUNCATE -- test TRUNCATE partition TRUNCATE partitioning_test_2012; -- see partition is TRUNCATEd SELECT * FROM partitioning_test_2012 ORDER BY 1; id | time | new_column ----+------+------------ (0 rows) -- test TRUNCATE partitioned table TRUNCATE partitioning_test; -- see partitioned table is TRUNCATEd SELECT * FROM partitioning_test ORDER BY 1; id | time | new_column ----+------+------------ (0 rows) -- test DROP -- test DROP partition INSERT INTO partitioning_test_2010 VALUES(27, '2010-02-01'); DROP TABLE partitioning_test_2010; -- see DROPped partitions content is not accessible from partitioning_test; SELECT * FROM partitioning_test WHERE time >= '2010-01-01' AND time < '2011-01-01' ORDER BY 1; id | time | new_column ----+------+------------ (0 rows) -- test DROP partitioned table DROP TABLE partitioning_test; -- dropping the parent should CASCADE to the children as well SELECT table_name FROM information_schema.tables WHERE table_name LIKE 'partitioning_test%' ORDER BY 1; table_name --------------------------- partitioning_test_2009 partitioning_test_failure (2 rows) -- test distributing partitioned table colocated with non-partitioned table CREATE TABLE partitioned_users_table (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint) PARTITION BY RANGE (time); CREATE TABLE partitioned_events_table (user_id int, time timestamp, event_type int, value_2 int, value_3 float, value_4 bigint) PARTITION BY RANGE (time); SELECT create_distributed_table('partitioned_users_table', 'user_id', colocate_with => 'users_table'); create_distributed_table -------------------------- (1 row) SELECT create_distributed_table('partitioned_events_table', 'user_id', colocate_with => 'events_table'); create_distributed_table -------------------------- (1 row) -- INSERT/SELECT from regular table to partitioned table CREATE TABLE partitioned_users_table_2009 PARTITION OF partitioned_users_table FOR VALUES FROM ('2014-01-01') TO ('2015-01-01'); CREATE TABLE partitioned_events_table_2009 PARTITION OF partitioned_events_table FOR VALUES FROM ('2014-01-01') TO ('2015-01-01'); INSERT INTO partitioned_events_table SELECT * FROM events_table; INSERT INTO partitioned_users_table_2009 SELECT * FROM users_table; -- -- Complex JOINs, subqueries, UNIONs etc... -- -- subquery with UNIONs on partitioned table SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM (SELECT *, random() FROM (SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM (SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM( (SELECT "events"."user_id", "events"."time", 0 AS event FROM partitioned_events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15)) UNION (SELECT "events"."user_id", "events"."time", 1 AS event FROM partitioned_events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) UNION (SELECT "events"."user_id", "events"."time", 2 AS event FROM partitioned_events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) UNION (SELECT "events"."user_id", "events"."time", 3 AS event FROM partitioned_events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13))) t1 GROUP BY "t1"."user_id") AS t) "q" ) AS final_query GROUP BY types ORDER BY types; types | sumofeventtype -------+---------------- 0 | 55 1 | 38 2 | 70 3 | 58 (4 rows) -- UNION and JOIN on both partitioned and regular tables SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM (SELECT *, random() FROM (SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM (SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."time", 0 AS event, "events"."user_id" FROM partitioned_events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM ( SELECT * FROM ( SELECT max("events"."time"), 0 AS event, "events"."user_id" FROM events_table as "events", users_table as "users" WHERE events.user_id = users.user_id AND event_type IN (10, 11, 12, 13, 14, 15) GROUP BY "events"."user_id" ) as events_subquery_5 ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."time", 2 AS event, "events"."user_id" FROM partitioned_events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."time", 3 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4) ) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM partitioned_users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; types | sumofeventtype -------+---------------- 0 | 115 2 | 160 3 | 158 (3 rows) -- test LIST partitioning CREATE TABLE list_partitioned_events_table (user_id int, time date, event_type int, value_2 int, value_3 float, value_4 bigint) PARTITION BY LIST (time); CREATE TABLE list_partitioned_events_table_2014_01_01_05 PARTITION OF list_partitioned_events_table FOR VALUES IN ('2014-01-01', '2014-01-02', '2014-01-03', '2014-01-04', '2014-01-05'); CREATE TABLE list_partitioned_events_table_2014_01_06_10 PARTITION OF list_partitioned_events_table FOR VALUES IN ('2014-01-06', '2014-01-07', '2014-01-08', '2014-01-09', '2014-01-10'); CREATE TABLE list_partitioned_events_table_2014_01_11_15 PARTITION OF list_partitioned_events_table FOR VALUES IN ('2014-01-11', '2014-01-12', '2014-01-13', '2014-01-14', '2014-01-15'); -- test distributing partitioned table colocated with another partitioned table SELECT create_distributed_table('list_partitioned_events_table', 'user_id', colocate_with => 'partitioned_events_table'); create_distributed_table -------------------------- (1 row) -- INSERT/SELECT from partitioned table to partitioned table INSERT INTO list_partitioned_events_table SELECT user_id, date_trunc('day', time) as time, event_type, value_2, value_3, value_4 FROM events_table WHERE time >= '2014-01-01' AND time <= '2014-01-15'; -- LEFT JOINs used with INNER JOINs on range partitioned table, list partitioned table and non-partitioned table SELECT count(*) AS cnt, "generated_group_field" FROM (SELECT "eventQuery"."user_id", random(), generated_group_field FROM (SELECT "multi_group_wrapper_1".*, generated_group_field, random() FROM (SELECT * FROM (SELECT "list_partitioned_events_table"."time", "list_partitioned_events_table"."user_id" as event_user_id FROM list_partitioned_events_table as "list_partitioned_events_table" WHERE user_id > 80) "temp_data_queries" INNER JOIN (SELECT "users"."user_id" FROM partitioned_users_table as "users" WHERE user_id > 80 and value_2 = 5) "user_filters_1" ON ("temp_data_queries".event_user_id = "user_filters_1".user_id)) AS "multi_group_wrapper_1" LEFT JOIN (SELECT "users"."user_id" AS "user_id", value_2 AS "generated_group_field" FROM partitioned_users_table as "users") "left_group_by_1" ON ("left_group_by_1".user_id = "multi_group_wrapper_1".event_user_id)) "eventQuery") "pushedDownQuery" GROUP BY "generated_group_field" ORDER BY cnt DESC, generated_group_field ASC LIMIT 10; cnt | generated_group_field -----+----------------------- 68 | 551 68 | 569 68 | 645 68 | 713 68 | 734 34 | 3 34 | 5 34 | 15 34 | 32 34 | 68 (10 rows) -- -- Additional partitioning features -- -- test multi column partitioning CREATE TABLE multi_column_partitioning(c1 int, c2 int) PARTITION BY RANGE (c1, c2); CREATE TABLE multi_column_partitioning_0_0_10_0 PARTITION OF multi_column_partitioning FOR VALUES FROM (0, 0) TO (10, 0); SELECT create_distributed_table('multi_column_partitioning', 'c1'); create_distributed_table -------------------------- (1 row) -- test INSERT to multi-column partitioned table INSERT INTO multi_column_partitioning VALUES(1, 1); INSERT INTO multi_column_partitioning_0_0_10_0 VALUES(5, -5); -- test INSERT to multi-column partitioned table where no suitable partition exists INSERT INTO multi_column_partitioning VALUES(10, 1); ERROR: no partition of relation "multi_column_partitioning_1660068" found for row DETAIL: Partition key of the failing row contains (c1, c2) = (10, 1). CONTEXT: while executing command on localhost:57637 -- test with MINVALUE/MAXVALUE CREATE TABLE multi_column_partitioning_10_max_20_min PARTITION OF multi_column_partitioning FOR VALUES FROM (10, MAXVALUE) TO (20, MINVALUE); -- test INSERT to partition with MINVALUE/MAXVALUE bounds INSERT INTO multi_column_partitioning VALUES(11, -11); INSERT INTO multi_column_partitioning_10_max_20_min VALUES(19, -19); -- test INSERT to multi-column partitioned table where no suitable partition exists INSERT INTO multi_column_partitioning VALUES(20, -20); ERROR: no partition of relation "multi_column_partitioning_1660068" found for row DETAIL: Partition key of the failing row contains (c1, c2) = (20, -20). CONTEXT: while executing command on localhost:57637 -- see data is loaded to multi-column partitioned table SELECT * FROM multi_column_partitioning ORDER BY 1, 2; c1 | c2 ----+----- 1 | 1 5 | -5 11 | -11 19 | -19 (4 rows) -- -- Tests for locks on partitioned tables -- CREATE TABLE partitioning_locks(id int, ref_id int, time date) PARTITION BY RANGE (time); -- create its partitions CREATE TABLE partitioning_locks_2009 PARTITION OF partitioning_locks FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); CREATE TABLE partitioning_locks_2010 PARTITION OF partitioning_locks FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); -- distribute partitioned table SELECT create_distributed_table('partitioning_locks', 'id'); create_distributed_table -------------------------- (1 row) -- test locks on router SELECT BEGIN; SELECT * FROM partitioning_locks WHERE id = 1 ORDER BY 1, 2; id | ref_id | time ----+--------+------ (0 rows) SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode -------------------------+----------+----------------- partitioning_locks | relation | AccessShareLock partitioning_locks_2009 | relation | AccessShareLock partitioning_locks_2010 | relation | AccessShareLock (3 rows) COMMIT; -- test locks on real-time SELECT BEGIN; SELECT * FROM partitioning_locks ORDER BY 1, 2; id | ref_id | time ----+--------+------ (0 rows) SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode -------------------------+----------+----------------- partitioning_locks | relation | AccessShareLock partitioning_locks_2009 | relation | AccessShareLock partitioning_locks_2010 | relation | AccessShareLock (3 rows) COMMIT; -- test locks on task-tracker SELECT SET citus.task_executor_type TO 'task-tracker'; BEGIN; SELECT * FROM partitioning_locks AS pl1 JOIN partitioning_locks AS pl2 ON pl1.id = pl2.ref_id ORDER BY 1, 2; id | ref_id | time | id | ref_id | time ----+--------+------+----+--------+------ (0 rows) SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode -------------------------+----------+----------------- partitioning_locks | relation | AccessShareLock partitioning_locks_2009 | relation | AccessShareLock partitioning_locks_2010 | relation | AccessShareLock (3 rows) COMMIT; SET citus.task_executor_type TO 'real-time'; -- test locks on INSERT BEGIN; INSERT INTO partitioning_locks VALUES(1, 1, '2009-01-01'); SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode -------------------------+----------+------------------ partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock partitioning_locks_2009 | relation | AccessShareLock partitioning_locks_2009 | relation | RowExclusiveLock partitioning_locks_2010 | relation | AccessShareLock partitioning_locks_2010 | relation | RowExclusiveLock (6 rows) COMMIT; -- test locks on UPDATE BEGIN; UPDATE partitioning_locks SET time = '2009-02-01' WHERE id = 1; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode -------------------------+----------+------------------ partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock partitioning_locks_2009 | relation | AccessShareLock partitioning_locks_2009 | relation | RowExclusiveLock partitioning_locks_2010 | relation | AccessShareLock partitioning_locks_2010 | relation | RowExclusiveLock (6 rows) COMMIT; -- test locks on DELETE BEGIN; DELETE FROM partitioning_locks WHERE id = 1; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode -------------------------+----------+------------------ partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock partitioning_locks_2009 | relation | AccessShareLock partitioning_locks_2009 | relation | RowExclusiveLock partitioning_locks_2010 | relation | AccessShareLock partitioning_locks_2010 | relation | RowExclusiveLock (6 rows) COMMIT; -- test locks on INSERT/SELECT CREATE TABLE partitioning_locks_for_select(id int, ref_id int, time date); SELECT create_distributed_table('partitioning_locks_for_select', 'id'); create_distributed_table -------------------------- (1 row) BEGIN; INSERT INTO partitioning_locks SELECT * FROM partitioning_locks_for_select; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode -------------------------------+----------+------------------ partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock partitioning_locks_2009 | relation | AccessShareLock partitioning_locks_2009 | relation | RowExclusiveLock partitioning_locks_2010 | relation | AccessShareLock partitioning_locks_2010 | relation | RowExclusiveLock partitioning_locks_for_select | relation | AccessShareLock (7 rows) COMMIT; -- test locks on coordinator INSERT/SELECT BEGIN; INSERT INTO partitioning_locks SELECT * FROM partitioning_locks_for_select LIMIT 5; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode -------------------------------+----------+------------------ partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock partitioning_locks_2009 | relation | RowExclusiveLock partitioning_locks_2010 | relation | RowExclusiveLock partitioning_locks_for_select | relation | AccessShareLock (5 rows) COMMIT; -- test locks on master_modify_multiple_shards BEGIN; SELECT master_modify_multiple_shards('UPDATE partitioning_locks SET time = ''2009-03-01'''); master_modify_multiple_shards ------------------------------- 0 (1 row) SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode -------------------------+----------+------------------ partitioning_locks | relation | AccessShareLock partitioning_locks | relation | RowExclusiveLock partitioning_locks_2009 | relation | RowExclusiveLock partitioning_locks_2010 | relation | RowExclusiveLock (4 rows) COMMIT; -- test locks on DDL BEGIN; ALTER TABLE partitioning_locks ADD COLUMN new_column int; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode -------------------------+----------+--------------------- partitioning_locks | relation | AccessExclusiveLock partitioning_locks | relation | AccessShareLock partitioning_locks_2009 | relation | AccessExclusiveLock partitioning_locks_2010 | relation | AccessExclusiveLock (4 rows) COMMIT; -- test locks on TRUNCATE BEGIN; TRUNCATE partitioning_locks; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; relation | locktype | mode -------------------------+----------+--------------------- partitioning_locks | relation | AccessExclusiveLock partitioning_locks | relation | AccessShareLock partitioning_locks_2009 | relation | AccessExclusiveLock partitioning_locks_2009 | relation | AccessShareLock partitioning_locks_2009 | relation | RowExclusiveLock partitioning_locks_2009 | relation | ShareLock partitioning_locks_2010 | relation | AccessExclusiveLock partitioning_locks_2010 | relation | AccessShareLock partitioning_locks_2010 | relation | RowExclusiveLock partitioning_locks_2010 | relation | ShareLock (10 rows) COMMIT; -- test shard resource locks with master_modify_multiple_shards BEGIN; SELECT master_modify_multiple_shards('UPDATE partitioning_locks_2009 SET time = ''2009-03-01'''); master_modify_multiple_shards ------------------------------- 0 (1 row) -- see the locks on parent table SELECT logicalrelid, locktype, mode FROM pg_locks AS l JOIN pg_dist_shard AS s ON l.objid = s.shardid WHERE logicalrelid IN ('partitioning_locks', 'partitioning_locks_2009', 'partitioning_locks_2010') AND pid = pg_backend_pid() ORDER BY 1, 2, 3; logicalrelid | locktype | mode -------------------------+----------+-------------------------- partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks_2009 | advisory | ShareLock partitioning_locks_2009 | advisory | ShareLock partitioning_locks_2009 | advisory | ShareLock partitioning_locks_2009 | advisory | ShareLock partitioning_locks_2009 | advisory | ShareUpdateExclusiveLock partitioning_locks_2009 | advisory | ShareUpdateExclusiveLock partitioning_locks_2009 | advisory | ShareUpdateExclusiveLock partitioning_locks_2009 | advisory | ShareUpdateExclusiveLock (12 rows) COMMIT; -- test shard resource locks with TRUNCATE BEGIN; TRUNCATE partitioning_locks_2009; -- see the locks on parent table SELECT logicalrelid, locktype, mode FROM pg_locks AS l JOIN pg_dist_shard AS s ON l.objid = s.shardid WHERE logicalrelid IN ('partitioning_locks', 'partitioning_locks_2009', 'partitioning_locks_2010') AND pid = pg_backend_pid() ORDER BY 1, 2, 3; logicalrelid | locktype | mode -------------------------+----------+-------------------------- partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks_2009 | advisory | ShareLock partitioning_locks_2009 | advisory | ShareLock partitioning_locks_2009 | advisory | ShareLock partitioning_locks_2009 | advisory | ShareLock partitioning_locks_2009 | advisory | ShareUpdateExclusiveLock partitioning_locks_2009 | advisory | ShareUpdateExclusiveLock partitioning_locks_2009 | advisory | ShareUpdateExclusiveLock partitioning_locks_2009 | advisory | ShareUpdateExclusiveLock (12 rows) COMMIT; -- test shard resource locks with INSERT/SELECT BEGIN; INSERT INTO partitioning_locks_2009 SELECT * FROM partitioning_locks WHERE time >= '2009-01-01' AND time < '2010-01-01'; -- see the locks on parent table SELECT logicalrelid, locktype, mode FROM pg_locks AS l JOIN pg_dist_shard AS s ON l.objid = s.shardid WHERE logicalrelid IN ('partitioning_locks', 'partitioning_locks_2009', 'partitioning_locks_2010') AND pid = pg_backend_pid() ORDER BY 1, 2, 3; logicalrelid | locktype | mode -------------------------+----------+-------------------------- partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks | advisory | ShareUpdateExclusiveLock partitioning_locks_2009 | advisory | ShareLock partitioning_locks_2009 | advisory | ShareLock partitioning_locks_2009 | advisory | ShareLock partitioning_locks_2009 | advisory | ShareLock partitioning_locks_2009 | advisory | ShareUpdateExclusiveLock partitioning_locks_2009 | advisory | ShareUpdateExclusiveLock partitioning_locks_2009 | advisory | ShareUpdateExclusiveLock partitioning_locks_2009 | advisory | ShareUpdateExclusiveLock (12 rows) COMMIT; DROP TABLE IF EXISTS partitioning_test_2012, partitioning_test_2013, partitioned_events_table, partitioned_users_table, list_partitioned_events_table, multi_column_partitioning, partitioning_locks, partitioning_locks_for_select; NOTICE: table "partitioning_test_2012" does not exist, skipping NOTICE: table "partitioning_test_2013" does not exist, skipping citus-7.0.3/src/test/regress/expected/multi_partitioning_0.out000066400000000000000000001447331317107136600246030ustar00rootroot00000000000000-- -- Distributed Partitioned Table Tests -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1660000; SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; -- -- Distributed Partitioned Table Creation Tests -- -- 1-) Distributing partitioned table -- create partitioned table CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time); ERROR: syntax error at or near "PARTITION" LINE 1: CREATE TABLE partitioning_test(id int, time date) PARTITION ... ^ -- create its partitions CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); ERROR: syntax error at or near "PARTITION" LINE 1: CREATE TABLE partitioning_test_2009 PARTITION OF partitionin... ^ CREATE TABLE partitioning_test_2010 PARTITION OF partitioning_test FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); ERROR: syntax error at or near "PARTITION" LINE 1: CREATE TABLE partitioning_test_2010 PARTITION OF partitionin... ^ -- load some data and distribute tables INSERT INTO partitioning_test VALUES (1, '2009-06-06'); ERROR: relation "partitioning_test" does not exist LINE 1: INSERT INTO partitioning_test VALUES (1, '2009-06-06'); ^ INSERT INTO partitioning_test VALUES (2, '2010-07-07'); ERROR: relation "partitioning_test" does not exist LINE 1: INSERT INTO partitioning_test VALUES (2, '2010-07-07'); ^ INSERT INTO partitioning_test_2009 VALUES (3, '2009-09-09'); ERROR: relation "partitioning_test_2009" does not exist LINE 1: INSERT INTO partitioning_test_2009 VALUES (3, '2009-09-09'); ^ INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03'); ERROR: relation "partitioning_test_2010" does not exist LINE 1: INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03'); ^ -- distribute partitioned table SELECT create_distributed_table('partitioning_test', 'id'); ERROR: relation "partitioning_test" does not exist LINE 1: SELECT create_distributed_table('partitioning_test', 'id'); ^ -- see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; ERROR: relation "partitioning_test" does not exist LINE 1: SELECT * FROM partitioning_test ORDER BY 1; ^ -- see partitioned table and its partitions are distributed SELECT logicalrelid FROM pg_dist_partition WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; ERROR: relation "partitioning_test" does not exist LINE 6: logicalrelid IN ('partitioning_test', 'partitioning_test_20... ^ SELECT logicalrelid, count(*) FROM pg_dist_shard WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') GROUP BY logicalrelid ORDER BY 1,2; ERROR: relation "partitioning_test" does not exist LINE 4: WHERE logicalrelid IN ('partitioning_test', 'partitioning_t... ^ -- 2-) Creating partition of a distributed table CREATE TABLE partitioning_test_2011 PARTITION OF partitioning_test FOR VALUES FROM ('2011-01-01') TO ('2012-01-01'); ERROR: syntax error at or near "PARTITION" LINE 1: CREATE TABLE partitioning_test_2011 PARTITION OF partitionin... ^ -- new partition is automatically distributed as well SELECT logicalrelid FROM pg_dist_partition WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2011') ORDER BY 1; ERROR: relation "partitioning_test" does not exist LINE 6: logicalrelid IN ('partitioning_test', 'partitioning_test_20... ^ SELECT logicalrelid, count(*) FROM pg_dist_shard WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2011') GROUP BY logicalrelid ORDER BY 1,2; ERROR: relation "partitioning_test" does not exist LINE 4: WHERE logicalrelid IN ('partitioning_test', 'partitioning_t... ^ -- 3-) Attaching non distributed table to a distributed table CREATE TABLE partitioning_test_2012(id int, time date); -- load some data INSERT INTO partitioning_test_2012 VALUES (5, '2012-06-06'); INSERT INTO partitioning_test_2012 VALUES (6, '2012-07-07'); ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2012 FOR VALUES FROM ('2012-01-01') TO ('2013-01-01'); ERROR: syntax error at or near "ATTACH" LINE 1: ALTER TABLE partitioning_test ATTACH PARTITION partitioning_... ^ -- attached partition is distributed as well SELECT logicalrelid FROM pg_dist_partition WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2012') ORDER BY 1; ERROR: relation "partitioning_test" does not exist LINE 6: logicalrelid IN ('partitioning_test', 'partitioning_test_20... ^ SELECT logicalrelid, count(*) FROM pg_dist_shard WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2012') GROUP BY logicalrelid ORDER BY 1,2; ERROR: relation "partitioning_test" does not exist LINE 4: WHERE logicalrelid IN ('partitioning_test', 'partitioning_t... ^ -- see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; ERROR: relation "partitioning_test" does not exist LINE 1: SELECT * FROM partitioning_test ORDER BY 1; ^ -- 4-) Attaching distributed table to distributed table CREATE TABLE partitioning_test_2013(id int, time date); SELECT create_distributed_table('partitioning_test_2013', 'id'); create_distributed_table -------------------------- (1 row) -- load some data INSERT INTO partitioning_test_2013 VALUES (7, '2013-06-06'); INSERT INTO partitioning_test_2013 VALUES (8, '2013-07-07'); ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2013 FOR VALUES FROM ('2013-01-01') TO ('2014-01-01'); ERROR: syntax error at or near "ATTACH" LINE 1: ALTER TABLE partitioning_test ATTACH PARTITION partitioning_... ^ -- see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; ERROR: relation "partitioning_test" does not exist LINE 1: SELECT * FROM partitioning_test ORDER BY 1; ^ -- 5-) Failure cases while creating distributed partitioned tables -- cannot distribute a partition if its parent is not distributed CREATE TABLE partitioning_test_failure(id int, time date) PARTITION BY RANGE (time); ERROR: syntax error at or near "PARTITION" LINE 1: ...ABLE partitioning_test_failure(id int, time date) PARTITION ... ^ CREATE TABLE partitioning_test_failure_2009 PARTITION OF partitioning_test_failure FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); ERROR: syntax error at or near "PARTITION" LINE 1: CREATE TABLE partitioning_test_failure_2009 PARTITION OF par... ^ SELECT create_distributed_table('partitioning_test_failure_2009', 'id'); ERROR: relation "partitioning_test_failure_2009" does not exist LINE 1: SELECT create_distributed_table('partitioning_test_failure_2... ^ -- only hash distributed tables can have partitions SELECT create_distributed_table('partitioning_test_failure', 'id', 'append'); ERROR: relation "partitioning_test_failure" does not exist LINE 1: SELECT create_distributed_table('partitioning_test_failure',... ^ SELECT create_distributed_table('partitioning_test_failure', 'id', 'range'); ERROR: relation "partitioning_test_failure" does not exist LINE 1: SELECT create_distributed_table('partitioning_test_failure',... ^ SELECT create_reference_table('partitioning_test_failure'); ERROR: relation "partitioning_test_failure" does not exist LINE 1: SELECT create_reference_table('partitioning_test_failure'); ^ -- replication factor > 1 is not allowed in distributed partitioned tables SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('partitioning_test_failure', 'id'); ERROR: relation "partitioning_test_failure" does not exist LINE 1: SELECT create_distributed_table('partitioning_test_failure',... ^ SET citus.shard_replication_factor TO 1; -- non-distributed tables cannot have distributed partitions; DROP TABLE partitioning_test_failure_2009; ERROR: table "partitioning_test_failure_2009" does not exist CREATE TABLE partitioning_test_failure_2009(id int, time date); SELECT create_distributed_table('partitioning_test_failure_2009', 'id'); create_distributed_table -------------------------- (1 row) ALTER TABLE partitioning_test_failure ATTACH PARTITION partitioning_test_failure_2009 FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); ERROR: syntax error at or near "ATTACH" LINE 1: ALTER TABLE partitioning_test_failure ATTACH PARTITION parti... ^ -- multi-level partitioning is not allowed DROP TABLE partitioning_test_failure_2009; CREATE TABLE partitioning_test_failure_2009 PARTITION OF partitioning_test_failure FOR VALUES FROM ('2009-01-01') TO ('2010-01-01') PARTITION BY RANGE (time); ERROR: syntax error at or near "PARTITION" LINE 1: CREATE TABLE partitioning_test_failure_2009 PARTITION OF par... ^ SELECT create_distributed_table('partitioning_test_failure', 'id'); ERROR: relation "partitioning_test_failure" does not exist LINE 1: SELECT create_distributed_table('partitioning_test_failure',... ^ -- multi-level partitioning is not allowed in different order DROP TABLE partitioning_test_failure_2009; ERROR: table "partitioning_test_failure_2009" does not exist SELECT create_distributed_table('partitioning_test_failure', 'id'); ERROR: relation "partitioning_test_failure" does not exist LINE 1: SELECT create_distributed_table('partitioning_test_failure',... ^ CREATE TABLE partitioning_test_failure_2009 PARTITION OF partitioning_test_failure FOR VALUES FROM ('2009-01-01') TO ('2010-01-01') PARTITION BY RANGE (time); ERROR: syntax error at or near "PARTITION" LINE 1: CREATE TABLE partitioning_test_failure_2009 PARTITION OF par... ^ -- -- DMLs in distributed partitioned tables -- -- test COPY -- COPY data to partitioned table COPY partitioning_test FROM STDIN WITH CSV; ERROR: relation "partitioning_test" does not exist 9,2009-01-01 10,2010-01-01 11,2011-01-01 12,2012-01-01 \. invalid command \. -- COPY data to partition directly COPY partitioning_test_2009 FROM STDIN WITH CSV; ERROR: syntax error at or near "9" LINE 1: 9,2009-01-01 ^ 13,2009-01-02 14,2009-01-03 \. invalid command \. -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id >= 9 ORDER BY 1; ERROR: syntax error at or near "13" LINE 1: 13,2009-01-02 ^ -- test INSERT -- INSERT INTO the partitioned table INSERT INTO partitioning_test VALUES(15, '2009-02-01'); ERROR: relation "partitioning_test" does not exist LINE 1: INSERT INTO partitioning_test VALUES(15, '2009-02-01'); ^ INSERT INTO partitioning_test VALUES(16, '2010-02-01'); ERROR: relation "partitioning_test" does not exist LINE 1: INSERT INTO partitioning_test VALUES(16, '2010-02-01'); ^ INSERT INTO partitioning_test VALUES(17, '2011-02-01'); ERROR: relation "partitioning_test" does not exist LINE 1: INSERT INTO partitioning_test VALUES(17, '2011-02-01'); ^ INSERT INTO partitioning_test VALUES(18, '2012-02-01'); ERROR: relation "partitioning_test" does not exist LINE 1: INSERT INTO partitioning_test VALUES(18, '2012-02-01'); ^ -- INSERT INTO the partitions directly table INSERT INTO partitioning_test VALUES(19, '2009-02-02'); ERROR: relation "partitioning_test" does not exist LINE 1: INSERT INTO partitioning_test VALUES(19, '2009-02-02'); ^ INSERT INTO partitioning_test VALUES(20, '2010-02-02'); ERROR: relation "partitioning_test" does not exist LINE 1: INSERT INTO partitioning_test VALUES(20, '2010-02-02'); ^ -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id >= 15 ORDER BY 1; ERROR: relation "partitioning_test" does not exist LINE 1: SELECT * FROM partitioning_test WHERE id >= 15 ORDER BY 1; ^ -- test INSERT/SELECT -- INSERT/SELECT from partition to partitioned table INSERT INTO partitioning_test SELECT * FROM partitioning_test_2011; ERROR: relation "partitioning_test" does not exist LINE 1: INSERT INTO partitioning_test SELECT * FROM partitioning_tes... ^ -- INSERT/SELECT from partitioned table to partition INSERT INTO partitioning_test_2012 SELECT * FROM partitioning_test WHERE time >= '2012-01-01' AND time < '2013-01-01'; ERROR: relation "partitioning_test" does not exist LINE 1: INSERT INTO partitioning_test_2012 SELECT * FROM partitionin... ^ -- see the data is loaded to shards (rows in the given range should be duplicated) SELECT * FROM partitioning_test WHERE time >= '2011-01-01' AND time < '2013-01-01' ORDER BY 1; ERROR: relation "partitioning_test" does not exist LINE 1: SELECT * FROM partitioning_test WHERE time >= '2011-01-01' A... ^ -- test UPDATE -- UPDATE partitioned table UPDATE partitioning_test SET time = '2013-07-07' WHERE id = 7; ERROR: relation "partitioning_test" does not exist LINE 1: UPDATE partitioning_test SET time = '2013-07-07' WHERE id = ... ^ -- UPDATE partition directly UPDATE partitioning_test_2013 SET time = '2013-08-08' WHERE id = 8; -- see the data is updated SELECT * FROM partitioning_test WHERE id = 7 OR id = 8 ORDER BY 1; ERROR: relation "partitioning_test" does not exist LINE 1: SELECT * FROM partitioning_test WHERE id = 7 OR id = 8 ORDER... ^ -- UPDATE that tries to move a row to a non-existing partition (this should fail) UPDATE partitioning_test SET time = '2020-07-07' WHERE id = 7; ERROR: relation "partitioning_test" does not exist LINE 1: UPDATE partitioning_test SET time = '2020-07-07' WHERE id = ... ^ -- UPDATE with subqueries on partitioned table UPDATE partitioning_test SET time = time + INTERVAL '1 day' WHERE id IN (SELECT id FROM partitioning_test WHERE id = 1); ERROR: relation "partitioning_test" does not exist LINE 2: partitioning_test ^ -- UPDATE with subqueries on partition UPDATE partitioning_test_2009 SET time = time + INTERVAL '1 month' WHERE id IN (SELECT id FROM partitioning_test WHERE id = 2); ERROR: relation "partitioning_test_2009" does not exist LINE 2: partitioning_test_2009 ^ -- see the data is updated SELECT * FROM partitioning_test WHERE id = 1 OR id = 2 ORDER BY 1; ERROR: relation "partitioning_test" does not exist LINE 1: SELECT * FROM partitioning_test WHERE id = 1 OR id = 2 ORDER... ^ -- test DELETE -- DELETE from partitioned table DELETE FROM partitioning_test WHERE id = 9; ERROR: relation "partitioning_test" does not exist LINE 1: DELETE FROM partitioning_test WHERE id = 9; ^ -- DELETE from partition directly DELETE FROM partitioning_test_2010 WHERE id = 10; ERROR: relation "partitioning_test_2010" does not exist LINE 1: DELETE FROM partitioning_test_2010 WHERE id = 10; ^ -- see the data is deleted SELECT * FROM partitioning_test WHERE id = 9 OR id = 10 ORDER BY 1; ERROR: relation "partitioning_test" does not exist LINE 1: SELECT * FROM partitioning_test WHERE id = 9 OR id = 10 ORDE... ^ -- test master_modify_multiple_shards -- master_modify_multiple_shards on partitioned table SELECT master_modify_multiple_shards('UPDATE partitioning_test SET time = time + INTERVAL ''1 day'''); ERROR: relation "partitioning_test" does not exist -- see rows are UPDATED SELECT * FROM partitioning_test ORDER BY 1; ERROR: relation "partitioning_test" does not exist LINE 1: SELECT * FROM partitioning_test ORDER BY 1; ^ -- master_modify_multiple_shards on partition directly SELECT master_modify_multiple_shards('UPDATE partitioning_test_2009 SET time = time + INTERVAL ''1 day'''); ERROR: relation "partitioning_test_2009" does not exist -- see rows are UPDATED SELECT * FROM partitioning_test_2009 ORDER BY 1; ERROR: relation "partitioning_test_2009" does not exist LINE 1: SELECT * FROM partitioning_test_2009 ORDER BY 1; ^ -- test master_modify_multiple_shards which fails in workers (updated value is outside of partition bounds) SELECT master_modify_multiple_shards('UPDATE partitioning_test_2009 SET time = time + INTERVAL ''6 month'''); ERROR: relation "partitioning_test_2009" does not exist -- -- DDL in distributed partitioned tables -- -- test CREATE INDEX -- CREATE INDEX on partitioned table - this will error out CREATE INDEX partitioning_index ON partitioning_test(id); ERROR: relation "partitioning_test" does not exist -- CREATE INDEX on partition CREATE INDEX partitioning_2009_index ON partitioning_test_2009(id); ERROR: relation "partitioning_test_2009" does not exist -- CREATE INDEX CONCURRENTLY on partition CREATE INDEX CONCURRENTLY partitioned_2010_index ON partitioning_test_2010(id); ERROR: relation "partitioning_test_2010" does not exist -- see index is created SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'partitioning_test%' ORDER BY indexname; tablename | indexname -----------+----------- (0 rows) -- test add COLUMN -- add COLUMN to partitioned table ALTER TABLE partitioning_test ADD new_column int; ERROR: relation "partitioning_test" does not exist -- add COLUMN to partition - this will error out ALTER TABLE partitioning_test_2010 ADD new_column_2 int; ERROR: relation "partitioning_test_2010" does not exist -- see additional column is created SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; ERROR: relation "partitioning_test" does not exist LINE 1: SELECT name, type FROM table_attrs WHERE relid = 'partitioni... ^ SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test_2010'::regclass ORDER BY 1; ERROR: relation "partitioning_test_2010" does not exist LINE 1: SELECT name, type FROM table_attrs WHERE relid = 'partitioni... ^ -- test add PRIMARY KEY -- add PRIMARY KEY to partitioned table - this will error out ALTER TABLE partitioning_test ADD CONSTRAINT partitioning_primary PRIMARY KEY (id); ERROR: relation "partitioning_test" does not exist -- ADD PRIMARY KEY to partition ALTER TABLE partitioning_test_2009 ADD CONSTRAINT partitioning_2009_primary PRIMARY KEY (id); ERROR: relation "partitioning_test_2009" does not exist -- see PRIMARY KEY is created SELECT table_name, constraint_name, constraint_type FROM information_schema.table_constraints WHERE table_name = 'partitioning_test_2009' AND constraint_name = 'partitioning_2009_primary'; table_name | constraint_name | constraint_type ------------+-----------------+----------------- (0 rows) -- test ADD FOREIGN CONSTRAINT -- add FOREIGN CONSTRAINT to partitioned table -- this will error out ALTER TABLE partitioning_test ADD CONSTRAINT partitioning_foreign FOREIGN KEY (id) REFERENCES partitioning_test_2009 (id); ERROR: relation "partitioning_test" does not exist -- add FOREIGN CONSTRAINT to partition INSERT INTO partitioning_test_2009 VALUES (5, '2009-06-06'); ERROR: relation "partitioning_test_2009" does not exist LINE 1: INSERT INTO partitioning_test_2009 VALUES (5, '2009-06-06'); ^ INSERT INTO partitioning_test_2009 VALUES (6, '2009-07-07'); ERROR: relation "partitioning_test_2009" does not exist LINE 1: INSERT INTO partitioning_test_2009 VALUES (6, '2009-07-07'); ^ INSERT INTO partitioning_test_2009 VALUES(12, '2009-02-01'); ERROR: relation "partitioning_test_2009" does not exist LINE 1: INSERT INTO partitioning_test_2009 VALUES(12, '2009-02-01'); ^ INSERT INTO partitioning_test_2009 VALUES(18, '2009-02-01'); ERROR: relation "partitioning_test_2009" does not exist LINE 1: INSERT INTO partitioning_test_2009 VALUES(18, '2009-02-01'); ^ ALTER TABLE partitioning_test_2012 ADD CONSTRAINT partitioning_2012_foreign FOREIGN KEY (id) REFERENCES partitioning_test_2009 (id) ON DELETE CASCADE; ERROR: relation "partitioning_test_2009" does not exist -- see FOREIGN KEY is created SELECT "Constraint" FROM table_fkeys WHERE relid = 'partitioning_test_2012'::regclass ORDER BY 1; Constraint ------------ (0 rows) -- test ON DELETE CASCADE works DELETE FROM partitioning_test_2009 WHERE id = 5; ERROR: relation "partitioning_test_2009" does not exist LINE 1: DELETE FROM partitioning_test_2009 WHERE id = 5; ^ -- see that element is deleted from both partitions SELECT * FROM partitioning_test_2009 WHERE id = 5 ORDER BY 1; ERROR: relation "partitioning_test_2009" does not exist LINE 1: SELECT * FROM partitioning_test_2009 WHERE id = 5 ORDER BY 1... ^ SELECT * FROM partitioning_test_2012 WHERE id = 5 ORDER BY 1; id | time ----+------------ 5 | 06-06-2012 (1 row) -- test DETACH partition ALTER TABLE partitioning_test DETACH PARTITION partitioning_test_2009; ERROR: syntax error at or near "DETACH" LINE 1: ALTER TABLE partitioning_test DETACH PARTITION partitioning_... ^ -- see DETACHed partitions content is not accessible from partitioning_test; SELECT * FROM partitioning_test WHERE time >= '2009-01-01' AND time < '2010-01-01' ORDER BY 1; ERROR: relation "partitioning_test" does not exist LINE 1: SELECT * FROM partitioning_test WHERE time >= '2009-01-01' A... ^ -- -- Transaction tests -- -- DDL in transaction BEGIN; ALTER TABLE partitioning_test ADD newer_column int; ERROR: relation "partitioning_test" does not exist -- see additional column is created SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; ERROR: current transaction is aborted, commands ignored until end of transaction block ROLLBACK; -- see rollback is successful SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; ERROR: relation "partitioning_test" does not exist LINE 1: SELECT name, type FROM table_attrs WHERE relid = 'partitioni... ^ -- COPY in transaction BEGIN; COPY partitioning_test FROM STDIN WITH CSV; ERROR: relation "partitioning_test" does not exist 22,2010-01-01,22 23,2011-01-01,23 24,2013-01-01,24 \. invalid command \. -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id = 22 ORDER BY 1; ERROR: syntax error at or near "22" LINE 1: 22,2010-01-01,22 ^ SELECT * FROM partitioning_test WHERE id = 23 ORDER BY 1; ERROR: current transaction is aborted, commands ignored until end of transaction block SELECT * FROM partitioning_test WHERE id = 24 ORDER BY 1; ERROR: current transaction is aborted, commands ignored until end of transaction block ROLLBACK; -- see rollback is successful SELECT * FROM partitioning_test WHERE id >= 22 ORDER BY 1; ERROR: relation "partitioning_test" does not exist LINE 1: SELECT * FROM partitioning_test WHERE id >= 22 ORDER BY 1; ^ -- DML in transaction BEGIN; -- INSERT in transaction INSERT INTO partitioning_test VALUES(25, '2010-02-02'); ERROR: relation "partitioning_test" does not exist LINE 1: INSERT INTO partitioning_test VALUES(25, '2010-02-02'); ^ -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; ERROR: current transaction is aborted, commands ignored until end of transaction block -- INSERT/SELECT in transaction INSERT INTO partitioning_test SELECT * FROM partitioning_test WHERE id = 25; ERROR: current transaction is aborted, commands ignored until end of transaction block -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; ERROR: current transaction is aborted, commands ignored until end of transaction block -- UPDATE in transaction UPDATE partitioning_test SET time = '2010-10-10' WHERE id = 25; ERROR: current transaction is aborted, commands ignored until end of transaction block -- see the data is updated SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; ERROR: current transaction is aborted, commands ignored until end of transaction block -- perform operations on partition and partioned tables together INSERT INTO partitioning_test VALUES(26, '2010-02-02', 26); ERROR: current transaction is aborted, commands ignored until end of transaction block INSERT INTO partitioning_test_2010 VALUES(26, '2010-02-02', 26); ERROR: current transaction is aborted, commands ignored until end of transaction block COPY partitioning_test FROM STDIN WITH CSV; ERROR: current transaction is aborted, commands ignored until end of transaction block 26,2010-02-02,26 \. invalid command \. COPY partitioning_test_2010 FROM STDIN WITH CSV; ERROR: syntax error at or near "26" LINE 1: 26,2010-02-02,26 ^ 26,2010-02-02,26 \. invalid command \. -- see the data is loaded to shards (we should see 4 rows with same content) SELECT * FROM partitioning_test WHERE id = 26 ORDER BY 1; ERROR: syntax error at or near "26" LINE 1: 26,2010-02-02,26 ^ ROLLBACK; -- see rollback is successful SELECT * FROM partitioning_test WHERE id = 26 ORDER BY 1; ERROR: relation "partitioning_test" does not exist LINE 1: SELECT * FROM partitioning_test WHERE id = 26 ORDER BY 1; ^ -- DETACH and DROP in a transaction BEGIN; ALTER TABLE partitioning_test DETACH PARTITION partitioning_test_2011; ERROR: syntax error at or near "DETACH" LINE 1: ALTER TABLE partitioning_test DETACH PARTITION partitioning_... ^ DROP TABLE partitioning_test_2011; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; -- see DROPed partitions content is not accessible SELECT * FROM partitioning_test WHERE time >= '2011-01-01' AND time < '2012-01-01' ORDER BY 1; ERROR: relation "partitioning_test" does not exist LINE 1: SELECT * FROM partitioning_test WHERE time >= '2011-01-01' A... ^ -- -- Misc tests -- -- test TRUNCATE -- test TRUNCATE partition TRUNCATE partitioning_test_2012; -- see partition is TRUNCATEd SELECT * FROM partitioning_test_2012 ORDER BY 1; id | time ----+------ (0 rows) -- test TRUNCATE partitioned table TRUNCATE partitioning_test; ERROR: relation "partitioning_test" does not exist -- see partitioned table is TRUNCATEd SELECT * FROM partitioning_test ORDER BY 1; ERROR: relation "partitioning_test" does not exist LINE 1: SELECT * FROM partitioning_test ORDER BY 1; ^ -- test DROP -- test DROP partition INSERT INTO partitioning_test_2010 VALUES(27, '2010-02-01'); ERROR: relation "partitioning_test_2010" does not exist LINE 1: INSERT INTO partitioning_test_2010 VALUES(27, '2010-02-01'); ^ DROP TABLE partitioning_test_2010; ERROR: table "partitioning_test_2010" does not exist -- see DROPped partitions content is not accessible from partitioning_test; SELECT * FROM partitioning_test WHERE time >= '2010-01-01' AND time < '2011-01-01' ORDER BY 1; ERROR: relation "partitioning_test" does not exist LINE 1: SELECT * FROM partitioning_test WHERE time >= '2010-01-01' A... ^ -- test DROP partitioned table DROP TABLE partitioning_test; ERROR: table "partitioning_test" does not exist -- dropping the parent should CASCADE to the children as well SELECT table_name FROM information_schema.tables WHERE table_name LIKE 'partitioning_test%' ORDER BY 1; table_name ------------------------ partitioning_test_2012 partitioning_test_2013 (2 rows) -- test distributing partitioned table colocated with non-partitioned table CREATE TABLE partitioned_users_table (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint) PARTITION BY RANGE (time); ERROR: syntax error at or near "PARTITION" LINE 1: ... int, value_2 int, value_3 float, value_4 bigint) PARTITION ... ^ CREATE TABLE partitioned_events_table (user_id int, time timestamp, event_type int, value_2 int, value_3 float, value_4 bigint) PARTITION BY RANGE (time); ERROR: syntax error at or near "PARTITION" LINE 1: ... int, value_2 int, value_3 float, value_4 bigint) PARTITION ... ^ SELECT create_distributed_table('partitioned_users_table', 'user_id', colocate_with => 'users_table'); ERROR: relation "partitioned_users_table" does not exist LINE 1: SELECT create_distributed_table('partitioned_users_table', '... ^ SELECT create_distributed_table('partitioned_events_table', 'user_id', colocate_with => 'events_table'); ERROR: relation "partitioned_events_table" does not exist LINE 1: SELECT create_distributed_table('partitioned_events_table', ... ^ -- INSERT/SELECT from regular table to partitioned table CREATE TABLE partitioned_users_table_2009 PARTITION OF partitioned_users_table FOR VALUES FROM ('2014-01-01') TO ('2015-01-01'); ERROR: syntax error at or near "PARTITION" LINE 1: CREATE TABLE partitioned_users_table_2009 PARTITION OF parti... ^ CREATE TABLE partitioned_events_table_2009 PARTITION OF partitioned_events_table FOR VALUES FROM ('2014-01-01') TO ('2015-01-01'); ERROR: syntax error at or near "PARTITION" LINE 1: CREATE TABLE partitioned_events_table_2009 PARTITION OF part... ^ INSERT INTO partitioned_events_table SELECT * FROM events_table; ERROR: relation "partitioned_events_table" does not exist LINE 1: INSERT INTO partitioned_events_table SELECT * FROM events_ta... ^ INSERT INTO partitioned_users_table_2009 SELECT * FROM users_table; ERROR: relation "partitioned_users_table_2009" does not exist LINE 1: INSERT INTO partitioned_users_table_2009 SELECT * FROM users... ^ -- -- Complex JOINs, subqueries, UNIONs etc... -- -- subquery with UNIONs on partitioned table SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM (SELECT *, random() FROM (SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM (SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM( (SELECT "events"."user_id", "events"."time", 0 AS event FROM partitioned_events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15)) UNION (SELECT "events"."user_id", "events"."time", 1 AS event FROM partitioned_events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) UNION (SELECT "events"."user_id", "events"."time", 2 AS event FROM partitioned_events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) UNION (SELECT "events"."user_id", "events"."time", 3 AS event FROM partitioned_events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13))) t1 GROUP BY "t1"."user_id") AS t) "q" ) AS final_query GROUP BY types ORDER BY types; ERROR: relation "partitioned_events_table" does not exist LINE 14: partitioned_events_table as "events" ^ -- UNION and JOIN on both partitioned and regular tables SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM (SELECT *, random() FROM (SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM (SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."time", 0 AS event, "events"."user_id" FROM partitioned_events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM ( SELECT * FROM ( SELECT max("events"."time"), 0 AS event, "events"."user_id" FROM events_table as "events", users_table as "users" WHERE events.user_id = users.user_id AND event_type IN (10, 11, 12, 13, 14, 15) GROUP BY "events"."user_id" ) as events_subquery_5 ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."time", 2 AS event, "events"."user_id" FROM partitioned_events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."time", 3 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4) ) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM partitioned_users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; ERROR: relation "partitioned_events_table" does not exist LINE 18: partitioned_events_table as "events" ^ -- test LIST partitioning CREATE TABLE list_partitioned_events_table (user_id int, time date, event_type int, value_2 int, value_3 float, value_4 bigint) PARTITION BY LIST (time); ERROR: syntax error at or near "PARTITION" LINE 1: ... int, value_2 int, value_3 float, value_4 bigint) PARTITION ... ^ CREATE TABLE list_partitioned_events_table_2014_01_01_05 PARTITION OF list_partitioned_events_table FOR VALUES IN ('2014-01-01', '2014-01-02', '2014-01-03', '2014-01-04', '2014-01-05'); ERROR: syntax error at or near "PARTITION" LINE 1: ...TABLE list_partitioned_events_table_2014_01_01_05 PARTITION ... ^ CREATE TABLE list_partitioned_events_table_2014_01_06_10 PARTITION OF list_partitioned_events_table FOR VALUES IN ('2014-01-06', '2014-01-07', '2014-01-08', '2014-01-09', '2014-01-10'); ERROR: syntax error at or near "PARTITION" LINE 1: ...TABLE list_partitioned_events_table_2014_01_06_10 PARTITION ... ^ CREATE TABLE list_partitioned_events_table_2014_01_11_15 PARTITION OF list_partitioned_events_table FOR VALUES IN ('2014-01-11', '2014-01-12', '2014-01-13', '2014-01-14', '2014-01-15'); ERROR: syntax error at or near "PARTITION" LINE 1: ...TABLE list_partitioned_events_table_2014_01_11_15 PARTITION ... ^ -- test distributing partitioned table colocated with another partitioned table SELECT create_distributed_table('list_partitioned_events_table', 'user_id', colocate_with => 'partitioned_events_table'); ERROR: relation "list_partitioned_events_table" does not exist LINE 1: SELECT create_distributed_table('list_partitioned_events_tab... ^ -- INSERT/SELECT from partitioned table to partitioned table INSERT INTO list_partitioned_events_table SELECT user_id, date_trunc('day', time) as time, event_type, value_2, value_3, value_4 FROM events_table WHERE time >= '2014-01-01' AND time <= '2014-01-15'; ERROR: relation "list_partitioned_events_table" does not exist LINE 2: list_partitioned_events_table ^ -- LEFT JOINs used with INNER JOINs on range partitioned table, list partitioned table and non-partitioned table SELECT count(*) AS cnt, "generated_group_field" FROM (SELECT "eventQuery"."user_id", random(), generated_group_field FROM (SELECT "multi_group_wrapper_1".*, generated_group_field, random() FROM (SELECT * FROM (SELECT "list_partitioned_events_table"."time", "list_partitioned_events_table"."user_id" as event_user_id FROM list_partitioned_events_table as "list_partitioned_events_table" WHERE user_id > 80) "temp_data_queries" INNER JOIN (SELECT "users"."user_id" FROM partitioned_users_table as "users" WHERE user_id > 80 and value_2 = 5) "user_filters_1" ON ("temp_data_queries".event_user_id = "user_filters_1".user_id)) AS "multi_group_wrapper_1" LEFT JOIN (SELECT "users"."user_id" AS "user_id", value_2 AS "generated_group_field" FROM partitioned_users_table as "users") "left_group_by_1" ON ("left_group_by_1".user_id = "multi_group_wrapper_1".event_user_id)) "eventQuery") "pushedDownQuery" GROUP BY "generated_group_field" ORDER BY cnt DESC, generated_group_field ASC LIMIT 10; ERROR: relation "list_partitioned_events_table" does not exist LINE 15: list_partitioned_events_table as "list_partitio... ^ -- -- Additional partitioning features -- -- test multi column partitioning CREATE TABLE multi_column_partitioning(c1 int, c2 int) PARTITION BY RANGE (c1, c2); ERROR: syntax error at or near "PARTITION" LINE 1: ...E TABLE multi_column_partitioning(c1 int, c2 int) PARTITION ... ^ CREATE TABLE multi_column_partitioning_0_0_10_0 PARTITION OF multi_column_partitioning FOR VALUES FROM (0, 0) TO (10, 0); ERROR: syntax error at or near "PARTITION" LINE 1: CREATE TABLE multi_column_partitioning_0_0_10_0 PARTITION OF... ^ SELECT create_distributed_table('multi_column_partitioning', 'c1'); ERROR: relation "multi_column_partitioning" does not exist LINE 1: SELECT create_distributed_table('multi_column_partitioning',... ^ -- test INSERT to multi-column partitioned table INSERT INTO multi_column_partitioning VALUES(1, 1); ERROR: relation "multi_column_partitioning" does not exist LINE 1: INSERT INTO multi_column_partitioning VALUES(1, 1); ^ INSERT INTO multi_column_partitioning_0_0_10_0 VALUES(5, -5); ERROR: relation "multi_column_partitioning_0_0_10_0" does not exist LINE 1: INSERT INTO multi_column_partitioning_0_0_10_0 VALUES(5, -5)... ^ -- test INSERT to multi-column partitioned table where no suitable partition exists INSERT INTO multi_column_partitioning VALUES(10, 1); ERROR: relation "multi_column_partitioning" does not exist LINE 1: INSERT INTO multi_column_partitioning VALUES(10, 1); ^ -- test with MINVALUE/MAXVALUE CREATE TABLE multi_column_partitioning_10_max_20_min PARTITION OF multi_column_partitioning FOR VALUES FROM (10, MAXVALUE) TO (20, MINVALUE); ERROR: syntax error at or near "PARTITION" LINE 1: ...ATE TABLE multi_column_partitioning_10_max_20_min PARTITION ... ^ -- test INSERT to partition with MINVALUE/MAXVALUE bounds INSERT INTO multi_column_partitioning VALUES(11, -11); ERROR: relation "multi_column_partitioning" does not exist LINE 1: INSERT INTO multi_column_partitioning VALUES(11, -11); ^ INSERT INTO multi_column_partitioning_10_max_20_min VALUES(19, -19); ERROR: relation "multi_column_partitioning_10_max_20_min" does not exist LINE 1: INSERT INTO multi_column_partitioning_10_max_20_min VALUES(1... ^ -- test INSERT to multi-column partitioned table where no suitable partition exists INSERT INTO multi_column_partitioning VALUES(20, -20); ERROR: relation "multi_column_partitioning" does not exist LINE 1: INSERT INTO multi_column_partitioning VALUES(20, -20); ^ -- see data is loaded to multi-column partitioned table SELECT * FROM multi_column_partitioning ORDER BY 1, 2; ERROR: relation "multi_column_partitioning" does not exist LINE 1: SELECT * FROM multi_column_partitioning ORDER BY 1, 2; ^ -- -- Tests for locks on partitioned tables -- CREATE TABLE partitioning_locks(id int, ref_id int, time date) PARTITION BY RANGE (time); ERROR: syntax error at or near "PARTITION" LINE 1: ...partitioning_locks(id int, ref_id int, time date) PARTITION ... ^ -- create its partitions CREATE TABLE partitioning_locks_2009 PARTITION OF partitioning_locks FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); ERROR: syntax error at or near "PARTITION" LINE 1: CREATE TABLE partitioning_locks_2009 PARTITION OF partitioni... ^ CREATE TABLE partitioning_locks_2010 PARTITION OF partitioning_locks FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); ERROR: syntax error at or near "PARTITION" LINE 1: CREATE TABLE partitioning_locks_2010 PARTITION OF partitioni... ^ -- distribute partitioned table SELECT create_distributed_table('partitioning_locks', 'id'); ERROR: relation "partitioning_locks" does not exist LINE 1: SELECT create_distributed_table('partitioning_locks', 'id'); ^ -- test locks on router SELECT BEGIN; SELECT * FROM partitioning_locks WHERE id = 1 ORDER BY 1, 2; ERROR: relation "partitioning_locks" does not exist LINE 1: SELECT * FROM partitioning_locks WHERE id = 1 ORDER BY 1, 2; ^ SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; -- test locks on real-time SELECT BEGIN; SELECT * FROM partitioning_locks ORDER BY 1, 2; ERROR: relation "partitioning_locks" does not exist LINE 1: SELECT * FROM partitioning_locks ORDER BY 1, 2; ^ SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; -- test locks on task-tracker SELECT SET citus.task_executor_type TO 'task-tracker'; BEGIN; SELECT * FROM partitioning_locks AS pl1 JOIN partitioning_locks AS pl2 ON pl1.id = pl2.ref_id ORDER BY 1, 2; ERROR: relation "partitioning_locks" does not exist LINE 1: SELECT * FROM partitioning_locks AS pl1 JOIN partitioning_lo... ^ SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; SET citus.task_executor_type TO 'real-time'; -- test locks on INSERT BEGIN; INSERT INTO partitioning_locks VALUES(1, 1, '2009-01-01'); ERROR: relation "partitioning_locks" does not exist LINE 1: INSERT INTO partitioning_locks VALUES(1, 1, '2009-01-01'); ^ SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; -- test locks on UPDATE BEGIN; UPDATE partitioning_locks SET time = '2009-02-01' WHERE id = 1; ERROR: relation "partitioning_locks" does not exist LINE 1: UPDATE partitioning_locks SET time = '2009-02-01' WHERE id =... ^ SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; -- test locks on DELETE BEGIN; DELETE FROM partitioning_locks WHERE id = 1; ERROR: relation "partitioning_locks" does not exist LINE 1: DELETE FROM partitioning_locks WHERE id = 1; ^ SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; -- test locks on INSERT/SELECT CREATE TABLE partitioning_locks_for_select(id int, ref_id int, time date); SELECT create_distributed_table('partitioning_locks_for_select', 'id'); create_distributed_table -------------------------- (1 row) BEGIN; INSERT INTO partitioning_locks SELECT * FROM partitioning_locks_for_select; ERROR: relation "partitioning_locks" does not exist LINE 1: INSERT INTO partitioning_locks SELECT * FROM partitioning_lo... ^ SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; -- test locks on coordinator INSERT/SELECT BEGIN; INSERT INTO partitioning_locks SELECT * FROM partitioning_locks_for_select LIMIT 5; ERROR: relation "partitioning_locks" does not exist LINE 1: INSERT INTO partitioning_locks SELECT * FROM partitioning_lo... ^ SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; -- test locks on master_modify_multiple_shards BEGIN; SELECT master_modify_multiple_shards('UPDATE partitioning_locks SET time = ''2009-03-01'''); ERROR: relation "partitioning_locks" does not exist SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; -- test locks on DDL BEGIN; ALTER TABLE partitioning_locks ADD COLUMN new_column int; ERROR: relation "partitioning_locks" does not exist SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; -- test locks on TRUNCATE BEGIN; TRUNCATE partitioning_locks; ERROR: relation "partitioning_locks" does not exist SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; -- test shard resource locks with master_modify_multiple_shards BEGIN; SELECT master_modify_multiple_shards('UPDATE partitioning_locks_2009 SET time = ''2009-03-01'''); ERROR: relation "partitioning_locks_2009" does not exist -- see the locks on parent table SELECT logicalrelid, locktype, mode FROM pg_locks AS l JOIN pg_dist_shard AS s ON l.objid = s.shardid WHERE logicalrelid IN ('partitioning_locks', 'partitioning_locks_2009', 'partitioning_locks_2010') AND pid = pg_backend_pid() ORDER BY 1, 2, 3; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; -- test shard resource locks with TRUNCATE BEGIN; TRUNCATE partitioning_locks_2009; ERROR: relation "partitioning_locks_2009" does not exist -- see the locks on parent table SELECT logicalrelid, locktype, mode FROM pg_locks AS l JOIN pg_dist_shard AS s ON l.objid = s.shardid WHERE logicalrelid IN ('partitioning_locks', 'partitioning_locks_2009', 'partitioning_locks_2010') AND pid = pg_backend_pid() ORDER BY 1, 2, 3; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; -- test shard resource locks with INSERT/SELECT BEGIN; INSERT INTO partitioning_locks_2009 SELECT * FROM partitioning_locks WHERE time >= '2009-01-01' AND time < '2010-01-01'; ERROR: relation "partitioning_locks_2009" does not exist LINE 1: INSERT INTO partitioning_locks_2009 SELECT * FROM partitioni... ^ -- see the locks on parent table SELECT logicalrelid, locktype, mode FROM pg_locks AS l JOIN pg_dist_shard AS s ON l.objid = s.shardid WHERE logicalrelid IN ('partitioning_locks', 'partitioning_locks_2009', 'partitioning_locks_2010') AND pid = pg_backend_pid() ORDER BY 1, 2, 3; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; DROP TABLE IF EXISTS partitioning_test_2012, partitioning_test_2013, partitioned_events_table, partitioned_users_table, list_partitioned_events_table, multi_column_partitioning, partitioning_locks, partitioning_locks_for_select; NOTICE: table "partitioned_events_table" does not exist, skipping NOTICE: table "partitioned_users_table" does not exist, skipping NOTICE: table "list_partitioned_events_table" does not exist, skipping NOTICE: table "multi_column_partitioning" does not exist, skipping NOTICE: table "partitioning_locks" does not exist, skipping citus-7.0.3/src/test/regress/expected/multi_partitioning_utils.out000066400000000000000000000472011317107136600255740ustar00rootroot00000000000000-- =================================================================== -- create test functions -- =================================================================== CREATE FUNCTION generate_alter_table_detach_partition_command(regclass) RETURNS text AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION generate_alter_table_attach_partition_command(regclass) RETURNS text AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION generate_partition_information(regclass) RETURNS text AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION print_partitions(regclass) RETURNS text AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION table_inherits(regclass) RETURNS bool AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION table_inherited(regclass) RETURNS bool AS 'citus' LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION detach_and_attach_partition(partition_name regclass, parent_table_name regclass) RETURNS void LANGUAGE plpgsql VOLATILE AS $function$ DECLARE detach_partition_command text := ''; attach_partition_command text := ''; command_result text := ''; BEGIN -- first generate the command SELECT public.generate_alter_table_attach_partition_command(partition_name) INTO attach_partition_command; -- now genereate the detach command SELECT public.generate_alter_table_detach_partition_command(partition_name) INTO detach_partition_command; -- later detach the same partition EXECUTE detach_partition_command; -- not attach it again EXECUTE attach_partition_command; END; $function$; CREATE OR REPLACE FUNCTION drop_and_recreate_partitioned_table(parent_table_name regclass) RETURNS void LANGUAGE plpgsql VOLATILE AS $function$ DECLARE command text := ''; BEGIN -- first generate the command CREATE TABLE partitioned_table_create_commands AS SELECT master_get_table_ddl_events(parent_table_name::text); -- later detach the same partition EXECUTE 'DROP TABLE ' || parent_table_name::text || ';'; FOR command IN SELECT * FROM partitioned_table_create_commands LOOP -- can do some processing here EXECUTE command; END LOOP; DROP TABLE partitioned_table_create_commands; END; $function$; -- create a partitioned table CREATE TABLE date_partitioned_table(id int, time date) PARTITION BY RANGE (time); -- we should be able to get the partitioning information even if there are no partitions SELECT generate_partition_information('date_partitioned_table'); generate_partition_information -------------------------------- RANGE ("time") (1 row) -- we should be able to drop and re-create the partitioned table using the command that Citus generate SELECT drop_and_recreate_partitioned_table('date_partitioned_table'); drop_and_recreate_partitioned_table ------------------------------------- (1 row) -- we should also be able to see the PARTITION BY ... for the parent table SELECT master_get_table_ddl_events('date_partitioned_table'); master_get_table_ddl_events --------------------------------------------------------------------------------------------------- CREATE TABLE public.date_partitioned_table (id integer, "time" date) PARTITION BY RANGE ("time") (1 row) -- now create the partitions CREATE TABLE date_partition_2006 PARTITION OF date_partitioned_table FOR VALUES FROM ('2006-01-01') TO ('2007-01-01'); CREATE TABLE date_partition_2007 PARTITION OF date_partitioned_table FOR VALUES FROM ('2007-01-01') TO ('2008-01-01'); -- we should be able to get the partitioning information after the partitions are created SELECT generate_partition_information('date_partitioned_table'); generate_partition_information -------------------------------- RANGE ("time") (1 row) -- lets get the attach partition commands SELECT generate_alter_table_attach_partition_command('date_partition_2006'); generate_alter_table_attach_partition_command ----------------------------------------------------------------------------------------------------------------------------------------- ALTER TABLE public.date_partitioned_table ATTACH PARTITION public.date_partition_2006 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007'); (1 row) SELECT generate_alter_table_attach_partition_command('date_partition_2007'); generate_alter_table_attach_partition_command ----------------------------------------------------------------------------------------------------------------------------------------- ALTER TABLE public.date_partitioned_table ATTACH PARTITION public.date_partition_2007 FOR VALUES FROM ('01-01-2007') TO ('01-01-2008'); (1 row) -- detach and attach the partition by the command generated by us \d+ date_partitioned_table Table "public.date_partitioned_table" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+---------+--------------+------------- id | integer | | | | plain | | time | date | | | | plain | | Partition key: RANGE ("time") Partitions: date_partition_2006 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007'), date_partition_2007 FOR VALUES FROM ('01-01-2007') TO ('01-01-2008') SELECT detach_and_attach_partition('date_partition_2007', 'date_partitioned_table'); detach_and_attach_partition ----------------------------- (1 row) -- check that both partitions are visiable \d+ date_partitioned_table Table "public.date_partitioned_table" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+---------+--------------+------------- id | integer | | | | plain | | time | date | | | | plain | | Partition key: RANGE ("time") Partitions: date_partition_2006 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007'), date_partition_2007 FOR VALUES FROM ('01-01-2007') TO ('01-01-2008') -- make sure that inter shard commands work as expected -- assume that the shardId is 100 CREATE TABLE date_partitioned_table_100 (id int, time date) PARTITION BY RANGE (time); CREATE TABLE date_partition_2007_100 (id int, time date ); -- now create the partitioning hierarcy SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public', referenced_shard:=100, referenced_schema_name:='public', command:='ALTER TABLE date_partitioned_table ATTACH PARTITION date_partition_2007 FOR VALUES FROM (''2007-01-01'') TO (''2008-01-02'')' ); worker_apply_inter_shard_ddl_command -------------------------------------- (1 row) -- the hierarcy is successfully created \d+ date_partitioned_table_100 Table "public.date_partitioned_table_100" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+---------+--------------+------------- id | integer | | | | plain | | time | date | | | | plain | | Partition key: RANGE ("time") Partitions: date_partition_2007_100 FOR VALUES FROM ('01-01-2007') TO ('01-02-2008') -- Citus can also get the DDL events for the partitions as regular tables SELECT master_get_table_ddl_events('date_partition_2007_100'); master_get_table_ddl_events ----------------------------------------------------------------------- CREATE TABLE public.date_partition_2007_100 (id integer, "time" date) (1 row) -- now break the partitioning hierarcy SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public', referenced_shard:=100, referenced_schema_name:='public', command:='ALTER TABLE date_partitioned_table DETACH PARTITION date_partition_2007' ); worker_apply_inter_shard_ddl_command -------------------------------------- (1 row) -- the hierarcy is successfully broken \d+ date_partitioned_table_100 Table "public.date_partitioned_table_100" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+---------+--------------+------------- id | integer | | | | plain | | time | date | | | | plain | | Partition key: RANGE ("time") -- now lets have some more complex partitioning hierarcies with -- tables on different schemas and constraints on the tables CREATE SCHEMA partition_parent_schema; CREATE TABLE partition_parent_schema.parent_table (id int NOT NULL, time date DEFAULT now()) PARTITION BY RANGE (time); CREATE SCHEMA partition_child_1_schema; CREATE TABLE partition_child_1_schema.child_1 (id int NOT NULL, time date ); CREATE SCHEMA partition_child_2_schema; CREATE TABLE partition_child_2_schema.child_2 (id int NOT NULL, time date ); -- we should be able to get the partitioning information even if there are no partitions SELECT generate_partition_information('partition_parent_schema.parent_table'); generate_partition_information -------------------------------- RANGE ("time") (1 row) -- we should be able to drop and re-create the partitioned table using the command that Citus generate SELECT drop_and_recreate_partitioned_table('partition_parent_schema.parent_table'); NOTICE: schema "partition_parent_schema" already exists, skipping CONTEXT: SQL statement "CREATE SCHEMA IF NOT EXISTS partition_parent_schema AUTHORIZATION postgres" PL/pgSQL function drop_and_recreate_partitioned_table(regclass) line 15 at EXECUTE drop_and_recreate_partitioned_table ------------------------------------- (1 row) ALTER TABLE partition_parent_schema.parent_table ATTACH PARTITION partition_child_1_schema.child_1 FOR VALUES FROM ('2009-01-01') TO ('2010-01-02'); SET search_path = 'partition_parent_schema'; ALTER TABLE parent_table ATTACH PARTITION partition_child_2_schema.child_2 FOR VALUES FROM ('2006-01-01') TO ('2007-01-01'); SELECT public.generate_partition_information('parent_table'); generate_partition_information -------------------------------- RANGE ("time") (1 row) -- lets get the attach partition commands SELECT public.generate_alter_table_attach_partition_command('partition_child_1_schema.child_1'); generate_alter_table_attach_partition_command ------------------------------------------------------------------------------------------------------------------------------------------------------ ALTER TABLE partition_parent_schema.parent_table ATTACH PARTITION partition_child_1_schema.child_1 FOR VALUES FROM ('01-01-2009') TO ('01-02-2010'); (1 row) SET search_path = 'partition_child_2_schema'; SELECT public.generate_alter_table_attach_partition_command('child_2'); generate_alter_table_attach_partition_command ------------------------------------------------------------------------------------------------------------------------------------------------------ ALTER TABLE partition_parent_schema.parent_table ATTACH PARTITION partition_child_2_schema.child_2 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007'); (1 row) SET search_path = 'partition_parent_schema'; -- detach and attach the partition by the command generated by us \d+ parent_table Table "partition_parent_schema.parent_table" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+---------+--------------+------------- id | integer | | not null | | plain | | time | date | | | now() | plain | | Partition key: RANGE ("time") Partitions: partition_child_1_schema.child_1 FOR VALUES FROM ('01-01-2009') TO ('01-02-2010'), partition_child_2_schema.child_2 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007') SELECT public.detach_and_attach_partition('partition_child_1_schema.child_1', 'parent_table'); detach_and_attach_partition ----------------------------- (1 row) -- check that both partitions are visiable \d+ parent_table Table "partition_parent_schema.parent_table" Column | Type | Collation | Nullable | Default | Storage | Stats target | Description --------+---------+-----------+----------+---------+---------+--------------+------------- id | integer | | not null | | plain | | time | date | | | now() | plain | | Partition key: RANGE ("time") Partitions: partition_child_1_schema.child_1 FOR VALUES FROM ('01-01-2009') TO ('01-02-2010'), partition_child_2_schema.child_2 FOR VALUES FROM ('01-01-2006') TO ('01-01-2007') -- some very simple checks that should error out SELECT public.generate_alter_table_attach_partition_command('parent_table'); ERROR: "parent_table" is not a partition SELECT public.generate_partition_information('partition_child_1_schema.child_1'); ERROR: "child_1" is not a parent table SELECT public.print_partitions('partition_child_1_schema.child_1'); ERROR: "child_1" is not a parent table -- now pring the partitions SELECT public.print_partitions('parent_table'); print_partitions ------------------ child_1,child_2 (1 row) SET search_path = 'public'; -- test multi column / expression partitioning with UNBOUNDED ranges CREATE OR REPLACE FUNCTION some_function(input_val text) RETURNS text LANGUAGE plpgsql IMMUTABLE AS $function$ BEGIN return reverse(input_val); END; $function$; CREATE TABLE multi_column_partitioned ( a int, b int, c text ) PARTITION BY RANGE (a, (a+b+1), some_function(upper(c))); CREATE TABLE multi_column_partition_1( a int, b int, c text ); CREATE TABLE multi_column_partition_2( a int, b int, c text ); -- partitioning information SELECT generate_partition_information('multi_column_partitioned'); generate_partition_information ----------------------------------------------------- RANGE (a, (((a + b) + 1)), some_function(upper(c))) (1 row) SELECT master_get_table_ddl_events('multi_column_partitioned'); master_get_table_ddl_events ------------------------------------------------------------------------------------------------------------------------------------------------------ CREATE TABLE public.multi_column_partitioned (a integer, b integer, c text) PARTITION BY RANGE (a, (((a + b) + 1)), public.some_function(upper(c))) (1 row) SELECT drop_and_recreate_partitioned_table('multi_column_partitioned'); drop_and_recreate_partitioned_table ------------------------------------- (1 row) -- partitions and their ranges ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_column_partition_1 FOR VALUES FROM (1, 10, '250') TO (1, 20, '250'); SELECT generate_alter_table_attach_partition_command('multi_column_partition_1'); generate_alter_table_attach_partition_command -------------------------------------------------------------------------------------------------------------------------------------------- ALTER TABLE public.multi_column_partitioned ATTACH PARTITION public.multi_column_partition_1 FOR VALUES FROM (1, 10, 250) TO (1, 20, 250); (1 row) ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_column_partition_2 FOR VALUES FROM (10, 1000, '2500') TO (MAXVALUE, MAXVALUE, MAXVALUE); SELECT generate_alter_table_attach_partition_command('multi_column_partition_2'); generate_alter_table_attach_partition_command ------------------------------------------------------------------------------------------------------------------------------------------------------------------ ALTER TABLE public.multi_column_partitioned ATTACH PARTITION public.multi_column_partition_2 FOR VALUES FROM (10, 1000, 2500) TO (MAXVALUE, MAXVALUE, MAXVALUE); (1 row) SELECT generate_alter_table_detach_partition_command('multi_column_partition_2'); generate_alter_table_detach_partition_command ----------------------------------------------------------------------------------------------- ALTER TABLE public.multi_column_partitioned DETACH PARTITION public.multi_column_partition_2; (1 row) -- finally a test with LIST partitioning CREATE TABLE list_partitioned (col1 NUMERIC, col2 NUMERIC, col3 VARCHAR(10)) PARTITION BY LIST (col1) ; SELECT generate_partition_information('list_partitioned'); generate_partition_information -------------------------------- LIST (col1) (1 row) SELECT master_get_table_ddl_events('list_partitioned'); master_get_table_ddl_events ------------------------------------------------------------------------------------------------------------------------- CREATE TABLE public.list_partitioned (col1 numeric, col2 numeric, col3 character varying(10)) PARTITION BY LIST (col1) (1 row) SELECT drop_and_recreate_partitioned_table('list_partitioned'); drop_and_recreate_partitioned_table ------------------------------------- (1 row) CREATE TABLE list_partitioned_1 PARTITION OF list_partitioned FOR VALUES IN (100, 101, 102, 103, 104); SELECT generate_alter_table_attach_partition_command('list_partitioned_1'); generate_alter_table_attach_partition_command ----------------------------------------------------------------------------------------------------------------------------------- ALTER TABLE public.list_partitioned ATTACH PARTITION public.list_partitioned_1 FOR VALUES IN ('100', '101', '102', '103', '104'); (1 row) -- also differentiate partitions and inhereted tables CREATE TABLE cities ( name text, population float, altitude int -- in feet ); CREATE TABLE capitals ( state char(2) ) INHERITS (cities); -- returns true since capitals inherits from cities SELECT table_inherits('capitals'); table_inherits ---------------- t (1 row) -- although date_partition_2006 inherits from its parent -- returns false since the hierarcy is formed via partitioning SELECT table_inherits('date_partition_2006'); table_inherits ---------------- f (1 row) -- returns true since cities inherited by capitals SELECT table_inherited('cities'); table_inherited ----------------- t (1 row) -- although date_partitioned_table inherited by its partitions -- returns false since the hierarcy is formed via partitioning SELECT table_inherited('date_partitioned_table'); table_inherited ----------------- f (1 row) -- also these are not supported SELECT master_get_table_ddl_events('capitals'); ERROR: capitals is not a regular, foreign or partitioned table SELECT master_get_table_ddl_events('cities'); ERROR: cities is not a regular, foreign or partitioned table -- dropping parents frop the partitions DROP TABLE date_partitioned_table, multi_column_partitioned, list_partitioned, partition_parent_schema.parent_table, cities, capitals; citus-7.0.3/src/test/regress/expected/multi_partitioning_utils_0.out000066400000000000000000000407161317107136600260170ustar00rootroot00000000000000-- =================================================================== -- create test functions -- =================================================================== CREATE FUNCTION generate_alter_table_detach_partition_command(regclass) RETURNS text AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION generate_alter_table_attach_partition_command(regclass) RETURNS text AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION generate_partition_information(regclass) RETURNS text AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION print_partitions(regclass) RETURNS text AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION table_inherits(regclass) RETURNS bool AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION table_inherited(regclass) RETURNS bool AS 'citus' LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION detach_and_attach_partition(partition_name regclass, parent_table_name regclass) RETURNS void LANGUAGE plpgsql VOLATILE AS $function$ DECLARE detach_partition_command text := ''; attach_partition_command text := ''; command_result text := ''; BEGIN -- first generate the command SELECT public.generate_alter_table_attach_partition_command(partition_name) INTO attach_partition_command; -- now genereate the detach command SELECT public.generate_alter_table_detach_partition_command(partition_name) INTO detach_partition_command; -- later detach the same partition EXECUTE detach_partition_command; -- not attach it again EXECUTE attach_partition_command; END; $function$; CREATE OR REPLACE FUNCTION drop_and_recreate_partitioned_table(parent_table_name regclass) RETURNS void LANGUAGE plpgsql VOLATILE AS $function$ DECLARE command text := ''; BEGIN -- first generate the command CREATE TABLE partitioned_table_create_commands AS SELECT master_get_table_ddl_events(parent_table_name::text); -- later detach the same partition EXECUTE 'DROP TABLE ' || parent_table_name::text || ';'; FOR command IN SELECT * FROM partitioned_table_create_commands LOOP -- can do some processing here EXECUTE command; END LOOP; DROP TABLE partitioned_table_create_commands; END; $function$; -- create a partitioned table CREATE TABLE date_partitioned_table(id int, time date) PARTITION BY RANGE (time); ERROR: syntax error at or near "PARTITION" LINE 1: ...E TABLE date_partitioned_table(id int, time date) PARTITION ... ^ -- we should be able to get the partitioning information even if there are no partitions SELECT generate_partition_information('date_partitioned_table'); ERROR: relation "date_partitioned_table" does not exist LINE 1: SELECT generate_partition_information('date_partitioned_tabl... ^ -- we should be able to drop and re-create the partitioned table using the command that Citus generate SELECT drop_and_recreate_partitioned_table('date_partitioned_table'); ERROR: relation "date_partitioned_table" does not exist LINE 1: SELECT drop_and_recreate_partitioned_table('date_partitioned... ^ -- we should also be able to see the PARTITION BY ... for the parent table SELECT master_get_table_ddl_events('date_partitioned_table'); ERROR: relation "date_partitioned_table" does not exist -- now create the partitions CREATE TABLE date_partition_2006 PARTITION OF date_partitioned_table FOR VALUES FROM ('2006-01-01') TO ('2007-01-01'); ERROR: syntax error at or near "PARTITION" LINE 1: CREATE TABLE date_partition_2006 PARTITION OF date_partition... ^ CREATE TABLE date_partition_2007 PARTITION OF date_partitioned_table FOR VALUES FROM ('2007-01-01') TO ('2008-01-01'); ERROR: syntax error at or near "PARTITION" LINE 1: CREATE TABLE date_partition_2007 PARTITION OF date_partition... ^ -- we should be able to get the partitioning information after the partitions are created SELECT generate_partition_information('date_partitioned_table'); ERROR: relation "date_partitioned_table" does not exist LINE 1: SELECT generate_partition_information('date_partitioned_tabl... ^ -- lets get the attach partition commands SELECT generate_alter_table_attach_partition_command('date_partition_2006'); ERROR: relation "date_partition_2006" does not exist LINE 1: ...ECT generate_alter_table_attach_partition_command('date_part... ^ SELECT generate_alter_table_attach_partition_command('date_partition_2007'); ERROR: relation "date_partition_2007" does not exist LINE 1: ...ECT generate_alter_table_attach_partition_command('date_part... ^ -- detach and attach the partition by the command generated by us \d+ date_partitioned_table SELECT detach_and_attach_partition('date_partition_2007', 'date_partitioned_table'); ERROR: relation "date_partition_2007" does not exist LINE 1: SELECT detach_and_attach_partition('date_partition_2007', 'd... ^ -- check that both partitions are visiable \d+ date_partitioned_table -- make sure that inter shard commands work as expected -- assume that the shardId is 100 CREATE TABLE date_partitioned_table_100 (id int, time date) PARTITION BY RANGE (time); ERROR: syntax error at or near "PARTITION" LINE 1: ...LE date_partitioned_table_100 (id int, time date) PARTITION ... ^ CREATE TABLE date_partition_2007_100 (id int, time date ); -- now create the partitioning hierarcy SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public', referenced_shard:=100, referenced_schema_name:='public', command:='ALTER TABLE date_partitioned_table ATTACH PARTITION date_partition_2007 FOR VALUES FROM (''2007-01-01'') TO (''2008-01-02'')' ); ERROR: syntax error at or near "ATTACH" LINE 1: SELECT worker_apply_inter_shard_ddl_command(referencing_shar... ^ -- the hierarcy is successfully created \d+ date_partitioned_table_100 -- Citus can also get the DDL events for the partitions as regular tables SELECT master_get_table_ddl_events('date_partition_2007_100'); master_get_table_ddl_events ----------------------------------------------------------------------- CREATE TABLE public.date_partition_2007_100 (id integer, "time" date) (1 row) -- now break the partitioning hierarcy SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public', referenced_shard:=100, referenced_schema_name:='public', command:='ALTER TABLE date_partitioned_table DETACH PARTITION date_partition_2007' ); ERROR: syntax error at or near "DETACH" LINE 1: SELECT worker_apply_inter_shard_ddl_command(referencing_shar... ^ -- the hierarcy is successfully broken \d+ date_partitioned_table_100 -- now lets have some more complex partitioning hierarcies with -- tables on different schemas and constraints on the tables CREATE SCHEMA partition_parent_schema; CREATE TABLE partition_parent_schema.parent_table (id int NOT NULL, time date DEFAULT now()) PARTITION BY RANGE (time); ERROR: syntax error at or near "PARTITION" LINE 1: ..._table (id int NOT NULL, time date DEFAULT now()) PARTITION ... ^ CREATE SCHEMA partition_child_1_schema; CREATE TABLE partition_child_1_schema.child_1 (id int NOT NULL, time date ); CREATE SCHEMA partition_child_2_schema; CREATE TABLE partition_child_2_schema.child_2 (id int NOT NULL, time date ); -- we should be able to get the partitioning information even if there are no partitions SELECT generate_partition_information('partition_parent_schema.parent_table'); ERROR: relation "partition_parent_schema.parent_table" does not exist LINE 1: SELECT generate_partition_information('partition_parent_sche... ^ -- we should be able to drop and re-create the partitioned table using the command that Citus generate SELECT drop_and_recreate_partitioned_table('partition_parent_schema.parent_table'); ERROR: relation "partition_parent_schema.parent_table" does not exist LINE 1: SELECT drop_and_recreate_partitioned_table('partition_parent... ^ ALTER TABLE partition_parent_schema.parent_table ATTACH PARTITION partition_child_1_schema.child_1 FOR VALUES FROM ('2009-01-01') TO ('2010-01-02'); ERROR: syntax error at or near "ATTACH" LINE 1: ALTER TABLE partition_parent_schema.parent_table ATTACH PART... ^ SET search_path = 'partition_parent_schema'; ALTER TABLE parent_table ATTACH PARTITION partition_child_2_schema.child_2 FOR VALUES FROM ('2006-01-01') TO ('2007-01-01'); ERROR: syntax error at or near "ATTACH" LINE 1: ALTER TABLE parent_table ATTACH PARTITION partition_child_2... ^ SELECT public.generate_partition_information('parent_table'); ERROR: relation "parent_table" does not exist LINE 1: SELECT public.generate_partition_information('parent_table')... ^ -- lets get the attach partition commands SELECT public.generate_alter_table_attach_partition_command('partition_child_1_schema.child_1'); generate_alter_table_attach_partition_command ----------------------------------------------- (1 row) SET search_path = 'partition_child_2_schema'; SELECT public.generate_alter_table_attach_partition_command('child_2'); generate_alter_table_attach_partition_command ----------------------------------------------- (1 row) SET search_path = 'partition_parent_schema'; -- detach and attach the partition by the command generated by us \d+ parent_table SELECT public.detach_and_attach_partition('partition_child_1_schema.child_1', 'parent_table'); ERROR: relation "parent_table" does not exist LINE 1: ...ach_partition('partition_child_1_schema.child_1', 'parent_ta... ^ -- check that both partitions are visiable \d+ parent_table -- some very simple checks that should error out SELECT public.generate_alter_table_attach_partition_command('parent_table'); ERROR: relation "parent_table" does not exist LINE 1: ...lic.generate_alter_table_attach_partition_command('parent_ta... ^ SELECT public.generate_partition_information('partition_child_1_schema.child_1'); generate_partition_information -------------------------------- (1 row) SELECT public.print_partitions('partition_child_1_schema.child_1'); print_partitions ------------------ (1 row) -- now pring the partitions SELECT public.print_partitions('parent_table'); ERROR: relation "parent_table" does not exist LINE 1: SELECT public.print_partitions('parent_table'); ^ SET search_path = 'public'; -- test multi column / expression partitioning with UNBOUNDED ranges CREATE OR REPLACE FUNCTION some_function(input_val text) RETURNS text LANGUAGE plpgsql IMMUTABLE AS $function$ BEGIN return reverse(input_val); END; $function$; CREATE TABLE multi_column_partitioned ( a int, b int, c text ) PARTITION BY RANGE (a, (a+b+1), some_function(upper(c))); ERROR: syntax error at or near "PARTITION" LINE 5: ) PARTITION BY RANGE (a, (a+b+1), some_function(upper(c))); ^ CREATE TABLE multi_column_partition_1( a int, b int, c text ); CREATE TABLE multi_column_partition_2( a int, b int, c text ); -- partitioning information SELECT generate_partition_information('multi_column_partitioned'); ERROR: relation "multi_column_partitioned" does not exist LINE 1: SELECT generate_partition_information('multi_column_partitio... ^ SELECT master_get_table_ddl_events('multi_column_partitioned'); ERROR: relation "multi_column_partitioned" does not exist SELECT drop_and_recreate_partitioned_table('multi_column_partitioned'); ERROR: relation "multi_column_partitioned" does not exist LINE 1: SELECT drop_and_recreate_partitioned_table('multi_column_par... ^ -- partitions and their ranges ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_column_partition_1 FOR VALUES FROM (1, 10, '250') TO (1, 20, '250'); ERROR: syntax error at or near "ATTACH" LINE 1: ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_... ^ SELECT generate_alter_table_attach_partition_command('multi_column_partition_1'); generate_alter_table_attach_partition_command ----------------------------------------------- (1 row) ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_column_partition_2 FOR VALUES FROM (10, 1000, '2500') TO (MAXVALUE, MAXVALUE, MAXVALUE); ERROR: syntax error at or near "ATTACH" LINE 1: ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_... ^ SELECT generate_alter_table_attach_partition_command('multi_column_partition_2'); generate_alter_table_attach_partition_command ----------------------------------------------- (1 row) SELECT generate_alter_table_detach_partition_command('multi_column_partition_2'); generate_alter_table_detach_partition_command ----------------------------------------------- (1 row) -- finally a test with LIST partitioning CREATE TABLE list_partitioned (col1 NUMERIC, col2 NUMERIC, col3 VARCHAR(10)) PARTITION BY LIST (col1) ; ERROR: syntax error at or near "PARTITION" LINE 1: ...ed (col1 NUMERIC, col2 NUMERIC, col3 VARCHAR(10)) PARTITION ... ^ SELECT generate_partition_information('list_partitioned'); ERROR: relation "list_partitioned" does not exist LINE 1: SELECT generate_partition_information('list_partitioned'); ^ SELECT master_get_table_ddl_events('list_partitioned'); ERROR: relation "list_partitioned" does not exist SELECT drop_and_recreate_partitioned_table('list_partitioned'); ERROR: relation "list_partitioned" does not exist LINE 1: SELECT drop_and_recreate_partitioned_table('list_partitioned... ^ CREATE TABLE list_partitioned_1 PARTITION OF list_partitioned FOR VALUES IN (100, 101, 102, 103, 104); ERROR: syntax error at or near "PARTITION" LINE 1: CREATE TABLE list_partitioned_1 PARTITION OF list_partitione... ^ SELECT generate_alter_table_attach_partition_command('list_partitioned_1'); ERROR: relation "list_partitioned_1" does not exist LINE 1: ...ECT generate_alter_table_attach_partition_command('list_part... ^ -- also differentiate partitions and inhereted tables CREATE TABLE cities ( name text, population float, altitude int -- in feet ); CREATE TABLE capitals ( state char(2) ) INHERITS (cities); -- returns true since capitals inherits from cities SELECT table_inherits('capitals'); table_inherits ---------------- t (1 row) -- although date_partition_2006 inherits from its parent -- returns false since the hierarcy is formed via partitioning SELECT table_inherits('date_partition_2006'); ERROR: relation "date_partition_2006" does not exist LINE 1: SELECT table_inherits('date_partition_2006'); ^ -- returns true since cities inherited by capitals SELECT table_inherited('cities'); table_inherited ----------------- t (1 row) -- although date_partitioned_table inherited by its partitions -- returns false since the hierarcy is formed via partitioning SELECT table_inherited('date_partitioned_table'); ERROR: relation "date_partitioned_table" does not exist LINE 1: SELECT table_inherited('date_partitioned_table'); ^ -- also these are not supported SELECT master_get_table_ddl_events('capitals'); ERROR: capitals is not a regular, foreign or partitioned table SELECT master_get_table_ddl_events('cities'); ERROR: cities is not a regular, foreign or partitioned table -- dropping parents frop the partitions DROP TABLE date_partitioned_table, multi_column_partitioned, list_partitioned, partition_parent_schema.parent_table, cities, capitals; ERROR: table "date_partitioned_table" does not exist citus-7.0.3/src/test/regress/expected/multi_prepare_plsql.out000066400000000000000000000620231317107136600245150ustar00rootroot00000000000000-- -- MULTI_PREPARE_PLSQL -- -- Many of the queries are taken from other regression test files -- and converted into both plain SQL and PL/pgsql functions, which -- use prepared statements internally. CREATE FUNCTION plpgsql_test_1() RETURNS TABLE(count bigint) AS $$ DECLARE BEGIN RETURN QUERY SELECT count(*) FROM orders; END; $$ LANGUAGE plpgsql; CREATE FUNCTION plpgsql_test_2() RETURNS TABLE(count bigint) AS $$ DECLARE BEGIN RETURN QUERY SELECT count(*) FROM orders, lineitem WHERE o_orderkey = l_orderkey; END; $$ LANGUAGE plpgsql; CREATE FUNCTION plpgsql_test_3() RETURNS TABLE(count bigint) AS $$ DECLARE BEGIN RETURN QUERY SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey; END; $$ LANGUAGE plpgsql; CREATE FUNCTION plpgsql_test_4() RETURNS TABLE(count bigint) AS $$ DECLARE BEGIN RETURN QUERY SELECT count(*) FROM orders, customer, lineitem WHERE o_custkey = c_custkey AND o_orderkey = l_orderkey; END; $$ LANGUAGE plpgsql; CREATE FUNCTION plpgsql_test_5() RETURNS TABLE(count bigint) AS $$ DECLARE BEGIN RETURN QUERY SELECT count(*) FROM lineitem, customer WHERE l_partkey = c_nationkey; END; $$ LANGUAGE plpgsql; CREATE FUNCTION plpgsql_test_6(int) RETURNS TABLE(count bigint) AS $$ DECLARE BEGIN RETURN QUERY SELECT count(*) FROM orders, lineitem WHERE o_orderkey = l_orderkey AND l_suppkey > $1; END; $$ LANGUAGE plpgsql; CREATE FUNCTION plpgsql_test_7(text, text) RETURNS TABLE(supp_natadsion text, cusasdt_nation text, l_yeasdar int, sasdaum double precision) AS $$ DECLARE BEGIN RETURN QUERY SELECT supp_nation::text, cust_nation::text, l_year::int, sum(volume)::double precision AS revenue FROM ( SELECT supp_nation, cust_nation, extract(year FROM l_shipdate) AS l_year, l_extendedprice * (1 - l_discount) AS volume FROM supplier, lineitem, orders, customer, ( SELECT n1.n_nationkey AS supp_nation_key, n2.n_nationkey AS cust_nation_key, n1.n_name AS supp_nation, n2.n_name AS cust_nation FROM nation n1, nation n2 WHERE ( (n1.n_name = $1 AND n2.n_name = $2) OR (n1.n_name = $2 AND n2.n_name = $1) ) ) AS temp WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = supp_nation_key AND c_nationkey = cust_nation_key AND l_shipdate between date '1995-01-01' AND date '1996-12-31' ) AS shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year; END; $$ LANGUAGE plpgsql; SET citus.task_executor_type TO 'task-tracker'; SET client_min_messages TO INFO; -- now, run PL/pgsql functions SELECT plpgsql_test_1(); plpgsql_test_1 ---------------- 2984 (1 row) SELECT plpgsql_test_2(); plpgsql_test_2 ---------------- 11998 (1 row) SELECT plpgsql_test_3(); plpgsql_test_3 ---------------- 1955 (1 row) SELECT plpgsql_test_4(); plpgsql_test_4 ---------------- 7804 (1 row) SELECT plpgsql_test_5(); plpgsql_test_5 ---------------- 39 (1 row) -- run PL/pgsql functions with different parameters SELECT plpgsql_test_6(155); plpgsql_test_6 ---------------- 11811 (1 row) SELECT plpgsql_test_6(1555); plpgsql_test_6 ---------------- 10183 (1 row) SELECT plpgsql_test_7('UNITED KINGDOM', 'CHINA'); plpgsql_test_7 ---------------------------------------- ("UNITED KINGDOM",CHINA,1996,18560.22) (1 row) SELECT plpgsql_test_7('FRANCE', 'GERMANY'); plpgsql_test_7 --------------------------------- (GERMANY,FRANCE,1995,2399.2948) (1 row) -- now, PL/pgsql functions with random order SELECT plpgsql_test_6(155); plpgsql_test_6 ---------------- 11811 (1 row) SELECT plpgsql_test_3(); plpgsql_test_3 ---------------- 1955 (1 row) SELECT plpgsql_test_7('FRANCE', 'GERMANY'); plpgsql_test_7 --------------------------------- (GERMANY,FRANCE,1995,2399.2948) (1 row) SELECT plpgsql_test_5(); plpgsql_test_5 ---------------- 39 (1 row) SELECT plpgsql_test_1(); plpgsql_test_1 ---------------- 2984 (1 row) SELECT plpgsql_test_6(1555); plpgsql_test_6 ---------------- 10183 (1 row) SELECT plpgsql_test_4(); plpgsql_test_4 ---------------- 7804 (1 row) SELECT plpgsql_test_7('UNITED KINGDOM', 'CHINA'); plpgsql_test_7 ---------------------------------------- ("UNITED KINGDOM",CHINA,1996,18560.22) (1 row) SELECT plpgsql_test_2(); plpgsql_test_2 ---------------- 11998 (1 row) -- run the tests which do not require re-partition -- with real-time executor SET citus.task_executor_type TO 'real-time'; -- now, run PL/pgsql functions SELECT plpgsql_test_1(); plpgsql_test_1 ---------------- 2984 (1 row) SELECT plpgsql_test_2(); plpgsql_test_2 ---------------- 11998 (1 row) -- run PL/pgsql functions with different parameters SELECT plpgsql_test_6(155); plpgsql_test_6 ---------------- 11811 (1 row) SELECT plpgsql_test_6(1555); plpgsql_test_6 ---------------- 10183 (1 row) -- test router executor parameterized PL/pgsql functions CREATE TABLE plpgsql_table ( key int, value int ); SELECT master_create_distributed_table('plpgsql_table','key','hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('plpgsql_table',4,1); master_create_worker_shards ----------------------------- (1 row) CREATE FUNCTION no_parameter_insert() RETURNS void as $$ BEGIN INSERT INTO plpgsql_table (key) VALUES (0); END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT no_parameter_insert(); no_parameter_insert --------------------- (1 row) SELECT no_parameter_insert(); no_parameter_insert --------------------- (1 row) SELECT no_parameter_insert(); no_parameter_insert --------------------- (1 row) SELECT no_parameter_insert(); no_parameter_insert --------------------- (1 row) SELECT no_parameter_insert(); no_parameter_insert --------------------- (1 row) SELECT no_parameter_insert(); no_parameter_insert --------------------- (1 row) CREATE FUNCTION single_parameter_insert(key_arg int) RETURNS void as $$ BEGIN INSERT INTO plpgsql_table (key) VALUES (key_arg); END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT single_parameter_insert(1); single_parameter_insert ------------------------- (1 row) SELECT single_parameter_insert(2); single_parameter_insert ------------------------- (1 row) SELECT single_parameter_insert(3); single_parameter_insert ------------------------- (1 row) SELECT single_parameter_insert(4); single_parameter_insert ------------------------- (1 row) SELECT single_parameter_insert(5); single_parameter_insert ------------------------- (1 row) SELECT single_parameter_insert(6); single_parameter_insert ------------------------- (1 row) CREATE FUNCTION double_parameter_insert(key_arg int, value_arg int) RETURNS void as $$ BEGIN INSERT INTO plpgsql_table (key, value) VALUES (key_arg, value_arg); END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT double_parameter_insert(1, 10); double_parameter_insert ------------------------- (1 row) SELECT double_parameter_insert(2, 20); double_parameter_insert ------------------------- (1 row) SELECT double_parameter_insert(3, 30); double_parameter_insert ------------------------- (1 row) SELECT double_parameter_insert(4, 40); double_parameter_insert ------------------------- (1 row) SELECT double_parameter_insert(5, 50); double_parameter_insert ------------------------- (1 row) SELECT double_parameter_insert(6, 60); double_parameter_insert ------------------------- (1 row) CREATE FUNCTION non_partition_parameter_insert(value_arg int) RETURNS void as $$ BEGIN INSERT INTO plpgsql_table (key, value) VALUES (0, value_arg); END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT non_partition_parameter_insert(10); non_partition_parameter_insert -------------------------------- (1 row) SELECT non_partition_parameter_insert(20); non_partition_parameter_insert -------------------------------- (1 row) SELECT non_partition_parameter_insert(30); non_partition_parameter_insert -------------------------------- (1 row) SELECT non_partition_parameter_insert(40); non_partition_parameter_insert -------------------------------- (1 row) SELECT non_partition_parameter_insert(50); non_partition_parameter_insert -------------------------------- (1 row) SELECT non_partition_parameter_insert(60); non_partition_parameter_insert -------------------------------- (1 row) -- check inserted values SELECT * FROM plpgsql_table ORDER BY key, value; key | value -----+------- 0 | 10 0 | 20 0 | 30 0 | 40 0 | 50 0 | 60 0 | 0 | 0 | 0 | 0 | 0 | 1 | 10 1 | 2 | 20 2 | 3 | 30 3 | 4 | 40 4 | 5 | 50 5 | 6 | 60 6 | (24 rows) -- check router executor select CREATE FUNCTION router_partition_column_select(key_arg int) RETURNS TABLE(key int, value int) AS $$ DECLARE BEGIN RETURN QUERY SELECT plpgsql_table.key, plpgsql_table.value FROM plpgsql_table WHERE plpgsql_table.key = key_arg ORDER BY key, value; END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT router_partition_column_select(1); router_partition_column_select -------------------------------- (1,10) (1,) (2 rows) SELECT router_partition_column_select(2); router_partition_column_select -------------------------------- (2,20) (2,) (2 rows) SELECT router_partition_column_select(3); router_partition_column_select -------------------------------- (3,30) (3,) (2 rows) SELECT router_partition_column_select(4); router_partition_column_select -------------------------------- (4,40) (4,) (2 rows) SELECT router_partition_column_select(5); router_partition_column_select -------------------------------- (5,50) (5,) (2 rows) SELECT router_partition_column_select(6); router_partition_column_select -------------------------------- (6,60) (6,) (2 rows) CREATE FUNCTION router_non_partition_column_select(value_arg int) RETURNS TABLE(key int, value int) AS $$ DECLARE BEGIN RETURN QUERY SELECT plpgsql_table.key, plpgsql_table.value FROM plpgsql_table WHERE plpgsql_table.key = 0 AND plpgsql_table.value = value_arg ORDER BY key, value; END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT router_non_partition_column_select(10); router_non_partition_column_select ------------------------------------ (0,10) (1 row) SELECT router_non_partition_column_select(20); router_non_partition_column_select ------------------------------------ (0,20) (1 row) SELECT router_non_partition_column_select(30); router_non_partition_column_select ------------------------------------ (0,30) (1 row) SELECT router_non_partition_column_select(40); router_non_partition_column_select ------------------------------------ (0,40) (1 row) SELECT router_non_partition_column_select(50); router_non_partition_column_select ------------------------------------ (0,50) (1 row) SELECT router_non_partition_column_select(60); router_non_partition_column_select ------------------------------------ (0,60) (1 row) -- check real-time executor CREATE FUNCTION real_time_non_partition_column_select(value_arg int) RETURNS TABLE(key int, value int) AS $$ DECLARE BEGIN RETURN QUERY SELECT plpgsql_table.key, plpgsql_table.value FROM plpgsql_table WHERE plpgsql_table.value = value_arg ORDER BY key, value; END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT real_time_non_partition_column_select(10); real_time_non_partition_column_select --------------------------------------- (0,10) (1,10) (2 rows) SELECT real_time_non_partition_column_select(20); real_time_non_partition_column_select --------------------------------------- (0,20) (2,20) (2 rows) SELECT real_time_non_partition_column_select(30); real_time_non_partition_column_select --------------------------------------- (0,30) (3,30) (2 rows) SELECT real_time_non_partition_column_select(40); real_time_non_partition_column_select --------------------------------------- (0,40) (4,40) (2 rows) SELECT real_time_non_partition_column_select(50); real_time_non_partition_column_select --------------------------------------- (0,50) (5,50) (2 rows) SELECT real_time_non_partition_column_select(60); real_time_non_partition_column_select --------------------------------------- (0,60) (6,60) (2 rows) CREATE FUNCTION real_time_partition_column_select(key_arg int) RETURNS TABLE(key int, value int) AS $$ DECLARE BEGIN RETURN QUERY SELECT plpgsql_table.key, plpgsql_table.value FROM plpgsql_table WHERE plpgsql_table.key = key_arg OR plpgsql_table.value = 10 ORDER BY key, value; END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT real_time_partition_column_select(1); real_time_partition_column_select ----------------------------------- (0,10) (1,10) (1,) (3 rows) SELECT real_time_partition_column_select(2); real_time_partition_column_select ----------------------------------- (0,10) (1,10) (2,20) (2,) (4 rows) SELECT real_time_partition_column_select(3); real_time_partition_column_select ----------------------------------- (0,10) (1,10) (3,30) (3,) (4 rows) SELECT real_time_partition_column_select(4); real_time_partition_column_select ----------------------------------- (0,10) (1,10) (4,40) (4,) (4 rows) SELECT real_time_partition_column_select(5); real_time_partition_column_select ----------------------------------- (0,10) (1,10) (5,50) (5,) (4 rows) SELECT real_time_partition_column_select(6); real_time_partition_column_select ----------------------------------- (0,10) (1,10) (6,60) (6,) (4 rows) -- check task-tracker executor SET citus.task_executor_type TO 'task-tracker'; CREATE FUNCTION task_tracker_non_partition_column_select(value_arg int) RETURNS TABLE(key int, value int) AS $$ DECLARE BEGIN RETURN QUERY SELECT plpgsql_table.key, plpgsql_table.value FROM plpgsql_table WHERE plpgsql_table.value = value_arg ORDER BY key, value; END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT task_tracker_non_partition_column_select(10); task_tracker_non_partition_column_select ------------------------------------------ (0,10) (1,10) (2 rows) SELECT task_tracker_non_partition_column_select(20); task_tracker_non_partition_column_select ------------------------------------------ (0,20) (2,20) (2 rows) SELECT task_tracker_non_partition_column_select(30); task_tracker_non_partition_column_select ------------------------------------------ (0,30) (3,30) (2 rows) SELECT task_tracker_non_partition_column_select(40); task_tracker_non_partition_column_select ------------------------------------------ (0,40) (4,40) (2 rows) SELECT task_tracker_non_partition_column_select(50); task_tracker_non_partition_column_select ------------------------------------------ (0,50) (5,50) (2 rows) SELECT real_time_non_partition_column_select(60); real_time_non_partition_column_select --------------------------------------- (0,60) (6,60) (2 rows) CREATE FUNCTION task_tracker_partition_column_select(key_arg int) RETURNS TABLE(key int, value int) AS $$ DECLARE BEGIN RETURN QUERY SELECT plpgsql_table.key, plpgsql_table.value FROM plpgsql_table WHERE plpgsql_table.key = key_arg OR plpgsql_table.value = 10 ORDER BY key, value; END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT task_tracker_partition_column_select(1); task_tracker_partition_column_select -------------------------------------- (0,10) (1,10) (1,) (3 rows) SELECT task_tracker_partition_column_select(2); task_tracker_partition_column_select -------------------------------------- (0,10) (1,10) (2,20) (2,) (4 rows) SELECT task_tracker_partition_column_select(3); task_tracker_partition_column_select -------------------------------------- (0,10) (1,10) (3,30) (3,) (4 rows) SELECT task_tracker_partition_column_select(4); task_tracker_partition_column_select -------------------------------------- (0,10) (1,10) (4,40) (4,) (4 rows) SELECT task_tracker_partition_column_select(5); task_tracker_partition_column_select -------------------------------------- (0,10) (1,10) (5,50) (5,) (4 rows) SELECT task_tracker_partition_column_select(6); task_tracker_partition_column_select -------------------------------------- (0,10) (1,10) (6,60) (6,) (4 rows) SET citus.task_executor_type TO 'real-time'; -- check updates CREATE FUNCTION partition_parameter_update(int, int) RETURNS void as $$ BEGIN UPDATE plpgsql_table SET value = $2 WHERE key = $1; END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT partition_parameter_update(1, 11); partition_parameter_update ---------------------------- (1 row) SELECT partition_parameter_update(2, 21); partition_parameter_update ---------------------------- (1 row) SELECT partition_parameter_update(3, 31); partition_parameter_update ---------------------------- (1 row) SELECT partition_parameter_update(4, 41); partition_parameter_update ---------------------------- (1 row) SELECT partition_parameter_update(5, 51); partition_parameter_update ---------------------------- (1 row) SELECT partition_parameter_update(6, 61); partition_parameter_update ---------------------------- (1 row) CREATE FUNCTION non_partition_parameter_update(int, int) RETURNS void as $$ BEGIN UPDATE plpgsql_table SET value = $2 WHERE key = 0 AND value = $1; END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT non_partition_parameter_update(10, 12); non_partition_parameter_update -------------------------------- (1 row) SELECT non_partition_parameter_update(20, 22); non_partition_parameter_update -------------------------------- (1 row) SELECT non_partition_parameter_update(30, 32); non_partition_parameter_update -------------------------------- (1 row) SELECT non_partition_parameter_update(40, 42); non_partition_parameter_update -------------------------------- (1 row) SELECT non_partition_parameter_update(50, 52); non_partition_parameter_update -------------------------------- (1 row) SELECT non_partition_parameter_update(60, 62); non_partition_parameter_update -------------------------------- (1 row) -- check table after updates SELECT * FROM plpgsql_table ORDER BY key, value; key | value -----+------- 0 | 12 0 | 22 0 | 32 0 | 42 0 | 52 0 | 62 0 | 0 | 0 | 0 | 0 | 0 | 1 | 11 1 | 11 2 | 21 2 | 21 3 | 31 3 | 31 4 | 41 4 | 41 5 | 51 5 | 51 6 | 61 6 | 61 (24 rows) -- check deletes CREATE FUNCTION partition_parameter_delete(int, int) RETURNS void as $$ BEGIN DELETE FROM plpgsql_table WHERE key = $1 AND value = $2; END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT partition_parameter_delete(1, 11); partition_parameter_delete ---------------------------- (1 row) SELECT partition_parameter_delete(2, 21); partition_parameter_delete ---------------------------- (1 row) SELECT partition_parameter_delete(3, 31); partition_parameter_delete ---------------------------- (1 row) SELECT partition_parameter_delete(4, 41); partition_parameter_delete ---------------------------- (1 row) SELECT partition_parameter_delete(5, 51); partition_parameter_delete ---------------------------- (1 row) SELECT partition_parameter_delete(6, 61); partition_parameter_delete ---------------------------- (1 row) CREATE FUNCTION non_partition_parameter_delete(int) RETURNS void as $$ BEGIN DELETE FROM plpgsql_table WHERE key = 0 AND value = $1; END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT non_partition_parameter_delete(12); non_partition_parameter_delete -------------------------------- (1 row) SELECT non_partition_parameter_delete(22); non_partition_parameter_delete -------------------------------- (1 row) SELECT non_partition_parameter_delete(32); non_partition_parameter_delete -------------------------------- (1 row) SELECT non_partition_parameter_delete(42); non_partition_parameter_delete -------------------------------- (1 row) SELECT non_partition_parameter_delete(52); non_partition_parameter_delete -------------------------------- (1 row) SELECT non_partition_parameter_delete(62); non_partition_parameter_delete -------------------------------- (1 row) -- check table after deletes SELECT * FROM plpgsql_table ORDER BY key, value; key | value -----+------- 0 | 0 | 0 | 0 | 0 | 0 | (6 rows) -- check whether we can handle execute parameters CREATE TABLE execute_parameter_test (key int, val date); SELECT create_distributed_table('execute_parameter_test', 'key'); create_distributed_table -------------------------- (1 row) DO $$ BEGIN EXECUTE 'INSERT INTO execute_parameter_test VALUES (3, $1)' USING date '2000-01-01'; EXECUTE 'INSERT INTO execute_parameter_test VALUES (3, $1)' USING NULL::date; END; $$; DROP TABLE execute_parameter_test; -- check whether we can handle parameters + default CREATE TABLE func_parameter_test ( key text NOT NULL, seq int4 NOT NULL, created_at timestamptz NOT NULL DEFAULT now(), updated_at timestamptz NOT NULL DEFAULT now(), PRIMARY KEY (key, seq) ); SELECT create_distributed_table('func_parameter_test', 'key'); create_distributed_table -------------------------- (1 row) CREATE OR REPLACE FUNCTION insert_with_max(pkey text) RETURNS VOID AS $BODY$ DECLARE max_seq int4; BEGIN SELECT MAX(seq) INTO max_seq FROM func_parameter_test WHERE func_parameter_test.key = pkey; IF max_seq IS NULL THEN max_seq := 0; END IF; INSERT INTO func_parameter_test(key, seq) VALUES (pkey, max_seq + 1); END; $BODY$ LANGUAGE plpgsql; SELECT insert_with_max('key'); insert_with_max ----------------- (1 row) SELECT insert_with_max('key'); insert_with_max ----------------- (1 row) SELECT insert_with_max('key'); insert_with_max ----------------- (1 row) SELECT insert_with_max('key'); insert_with_max ----------------- (1 row) SELECT insert_with_max('key'); insert_with_max ----------------- (1 row) SELECT insert_with_max('key'); insert_with_max ----------------- (1 row) SELECT key, seq FROM func_parameter_test ORDER BY seq; key | seq -----+----- key | 1 key | 2 key | 3 key | 4 key | 5 key | 6 (6 rows) DROP FUNCTION insert_with_max(text); DROP TABLE func_parameter_test; -- test prepared DDL, mainly to verify we don't mess up the query tree SET citus.multi_shard_commit_protocol TO '2pc'; CREATE TABLE prepare_ddl (x int, y int); SELECT create_distributed_table('prepare_ddl', 'x'); create_distributed_table -------------------------- (1 row) CREATE OR REPLACE FUNCTION ddl_in_plpgsql() RETURNS VOID AS $BODY$ BEGIN CREATE INDEX prepared_index ON public.prepare_ddl(x); DROP INDEX prepared_index; END; $BODY$ LANGUAGE plpgsql; SELECT ddl_in_plpgsql(); ddl_in_plpgsql ---------------- (1 row) SELECT ddl_in_plpgsql(); ddl_in_plpgsql ---------------- (1 row) -- test prepared COPY CREATE OR REPLACE FUNCTION copy_in_plpgsql() RETURNS VOID AS $BODY$ BEGIN COPY prepare_ddl (x) FROM PROGRAM 'echo 1' WITH CSV; END; $BODY$ LANGUAGE plpgsql; SELECT copy_in_plpgsql(); copy_in_plpgsql ----------------- (1 row) SELECT copy_in_plpgsql(); copy_in_plpgsql ----------------- (1 row) DROP FUNCTION ddl_in_plpgsql(); DROP FUNCTION copy_in_plpgsql(); DROP TABLE prepare_ddl; -- clean-up functions DROP FUNCTION plpgsql_test_1(); DROP FUNCTION plpgsql_test_2(); DROP FUNCTION plpgsql_test_3(); DROP FUNCTION plpgsql_test_4(); DROP FUNCTION plpgsql_test_5(); DROP FUNCTION plpgsql_test_6(int); DROP FUNCTION plpgsql_test_7(text, text); DROP FUNCTION no_parameter_insert(); DROP FUNCTION single_parameter_insert(int); DROP FUNCTION double_parameter_insert(int, int); DROP FUNCTION non_partition_parameter_insert(int); DROP FUNCTION router_partition_column_select(int); DROP FUNCTION router_non_partition_column_select(int); DROP FUNCTION real_time_non_partition_column_select(int); DROP FUNCTION real_time_partition_column_select(int); DROP FUNCTION task_tracker_non_partition_column_select(int); DROP FUNCTION task_tracker_partition_column_select(int); DROP FUNCTION partition_parameter_update(int, int); DROP FUNCTION non_partition_parameter_update(int, int); DROP FUNCTION partition_parameter_delete(int, int); DROP FUNCTION non_partition_parameter_delete(int); citus-7.0.3/src/test/regress/expected/multi_prepare_sql.out000066400000000000000000000637551317107136600241760ustar00rootroot00000000000000-- -- MULTI_PREPARE_SQL -- -- Tests covering PREPARE statements. Many of the queries are -- taken from other regression test files and converted into -- prepared statements. PREPARE prepared_test_1 AS SELECT count(*) FROM orders; PREPARE prepared_test_2 AS SELECT count(*) FROM orders, lineitem WHERE o_orderkey = l_orderkey; PREPARE prepared_test_3 AS SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey; PREPARE prepared_test_4 AS SELECT count(*) FROM orders, customer, lineitem WHERE o_custkey = c_custkey AND o_orderkey = l_orderkey; PREPARE prepared_test_5 AS SELECT count(*) FROM lineitem, customer WHERE l_partkey = c_nationkey; PREPARE prepared_test_6(int) AS SELECT count(*) FROM orders, lineitem WHERE o_orderkey = l_orderkey AND l_suppkey > $1; PREPARE prepared_test_7(text, text) AS SELECT supp_nation, cust_nation, l_year, sum(volume) AS revenue FROM ( SELECT supp_nation, cust_nation, extract(year FROM l_shipdate) AS l_year, l_extendedprice * (1 - l_discount) AS volume FROM supplier, lineitem, orders, customer, ( SELECT n1.n_nationkey AS supp_nation_key, n2.n_nationkey AS cust_nation_key, n1.n_name AS supp_nation, n2.n_name AS cust_nation FROM nation n1, nation n2 WHERE ( (n1.n_name = $1 AND n2.n_name = $2) OR (n1.n_name = $2 AND n2.n_name = $1) ) ) AS temp WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = supp_nation_key AND c_nationkey = cust_nation_key AND l_shipdate between date '1995-01-01' AND date '1996-12-31' ) AS shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year; SET citus.task_executor_type TO 'task-tracker'; SET client_min_messages TO INFO; -- execute prepared statements EXECUTE prepared_test_1; count ------- 2984 (1 row) EXECUTE prepared_test_2; count ------- 11998 (1 row) EXECUTE prepared_test_3; count ------- 1955 (1 row) EXECUTE prepared_test_4; count ------- 7804 (1 row) EXECUTE prepared_test_5; count ------- 39 (1 row) -- execute prepared statements with different parameters EXECUTE prepared_test_6(155); count ------- 11811 (1 row) EXECUTE prepared_test_6(1555); count ------- 10183 (1 row) EXECUTE prepared_test_7('UNITED KINGDOM', 'CHINA'); supp_nation | cust_nation | l_year | revenue ---------------------------+---------------------------+--------+------------ UNITED KINGDOM | CHINA | 1996 | 18560.2200 (1 row) EXECUTE prepared_test_7('FRANCE', 'GERMANY'); supp_nation | cust_nation | l_year | revenue ---------------------------+---------------------------+--------+----------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) -- now, execute prepared statements with random order EXECUTE prepared_test_6(155); count ------- 11811 (1 row) EXECUTE prepared_test_3; count ------- 1955 (1 row) EXECUTE prepared_test_7('FRANCE', 'GERMANY'); supp_nation | cust_nation | l_year | revenue ---------------------------+---------------------------+--------+----------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) EXECUTE prepared_test_5; count ------- 39 (1 row) EXECUTE prepared_test_1; count ------- 2984 (1 row) EXECUTE prepared_test_6(1555); count ------- 10183 (1 row) EXECUTE prepared_test_4; count ------- 7804 (1 row) EXECUTE prepared_test_7('UNITED KINGDOM', 'CHINA'); supp_nation | cust_nation | l_year | revenue ---------------------------+---------------------------+--------+------------ UNITED KINGDOM | CHINA | 1996 | 18560.2200 (1 row) EXECUTE prepared_test_2; count ------- 11998 (1 row) -- CREATE TABLE ... AS EXECUTE prepared_statement tests CREATE TEMP TABLE prepared_sql_test_7 AS EXECUTE prepared_test_7('UNITED KINGDOM', 'CHINA'); SELECT * from prepared_sql_test_7; supp_nation | cust_nation | l_year | revenue ---------------------------+---------------------------+--------+------------ UNITED KINGDOM | CHINA | 1996 | 18560.2200 (1 row) -- now, run some of the tests with real-time executor SET citus.task_executor_type TO 'real-time'; -- execute prepared statements EXECUTE prepared_test_1; count ------- 2984 (1 row) EXECUTE prepared_test_2; count ------- 11998 (1 row) -- execute prepared statements with different parameters EXECUTE prepared_test_6(155); count ------- 11811 (1 row) EXECUTE prepared_test_6(1555); count ------- 10183 (1 row) -- test router executor with parameterized non-partition columns -- create a custom type which also exists on worker nodes CREATE TYPE test_composite_type AS ( i integer, i2 integer ); CREATE TABLE router_executor_table ( id bigint NOT NULL, comment varchar(20), stats test_composite_type ); SELECT master_create_distributed_table('router_executor_table', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('router_executor_table', 2, 2); master_create_worker_shards ----------------------------- (1 row) -- test parameterized inserts PREPARE prepared_insert(varchar(20)) AS INSERT INTO router_executor_table VALUES (1, $1, $2); EXECUTE prepared_insert('comment-1', '(1, 10)'); EXECUTE prepared_insert('comment-2', '(2, 20)'); EXECUTE prepared_insert('comment-3', '(3, 30)'); EXECUTE prepared_insert('comment-4', '(4, 40)'); EXECUTE prepared_insert('comment-5', '(5, 50)'); EXECUTE prepared_insert('comment-6', '(6, 60)'); SELECT * FROM router_executor_table ORDER BY comment; id | comment | stats ----+-----------+-------- 1 | comment-1 | (1,10) 1 | comment-2 | (2,20) 1 | comment-3 | (3,30) 1 | comment-4 | (4,40) 1 | comment-5 | (5,50) 1 | comment-6 | (6,60) (6 rows) -- test parameterized selects PREPARE prepared_select(integer, integer) AS SELECT count(*) FROM router_executor_table WHERE id = 1 AND stats = ROW($1, $2)::test_composite_type; EXECUTE prepared_select(1, 10); count ------- 1 (1 row) EXECUTE prepared_select(2, 20); count ------- 1 (1 row) EXECUTE prepared_select(3, 30); count ------- 1 (1 row) EXECUTE prepared_select(4, 40); count ------- 1 (1 row) EXECUTE prepared_select(5, 50); count ------- 1 (1 row) EXECUTE prepared_select(6, 60); count ------- 1 (1 row) -- Test that parameterized partition column for an insert is supported PREPARE prepared_partition_column_insert(bigint) AS INSERT INTO router_executor_table VALUES ($1, 'arsenous', '(1,10)'); -- execute 6 times to trigger prepared statement usage EXECUTE prepared_partition_column_insert(1); EXECUTE prepared_partition_column_insert(2); EXECUTE prepared_partition_column_insert(3); EXECUTE prepared_partition_column_insert(4); EXECUTE prepared_partition_column_insert(5); EXECUTE prepared_partition_column_insert(6); DROP TYPE test_composite_type CASCADE; NOTICE: drop cascades to table router_executor_table column stats -- test router executor with prepare statements CREATE TABLE prepare_table ( key int, value int ); SELECT master_create_distributed_table('prepare_table','key','hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('prepare_table',4,1); master_create_worker_shards ----------------------------- (1 row) PREPARE prepared_no_parameter_insert AS INSERT INTO prepare_table (key) VALUES (0); -- execute 6 times to trigger prepared statement usage EXECUTE prepared_no_parameter_insert; EXECUTE prepared_no_parameter_insert; EXECUTE prepared_no_parameter_insert; EXECUTE prepared_no_parameter_insert; EXECUTE prepared_no_parameter_insert; EXECUTE prepared_no_parameter_insert; PREPARE prepared_single_parameter_insert(int) AS INSERT INTO prepare_table (key) VALUES ($1); -- execute 6 times to trigger prepared statement usage EXECUTE prepared_single_parameter_insert(1); EXECUTE prepared_single_parameter_insert(2); EXECUTE prepared_single_parameter_insert(3); EXECUTE prepared_single_parameter_insert(4); EXECUTE prepared_single_parameter_insert(5); EXECUTE prepared_single_parameter_insert(6); PREPARE prepared_double_parameter_insert(int, int) AS INSERT INTO prepare_table (key, value) VALUES ($1, $2); -- execute 6 times to trigger prepared statement usage EXECUTE prepared_double_parameter_insert(1, 10); EXECUTE prepared_double_parameter_insert(2, 20); EXECUTE prepared_double_parameter_insert(3, 30); EXECUTE prepared_double_parameter_insert(4, 40); EXECUTE prepared_double_parameter_insert(5, 50); EXECUTE prepared_double_parameter_insert(6, 60); PREPARE prepared_multi_insert(int, int) AS INSERT INTO prepare_table (key, value) VALUES ($1, $2), ($1 + 1, $2 + 10); -- execute 6 times to trigger prepared statement usage EXECUTE prepared_multi_insert( 7, 70); EXECUTE prepared_multi_insert( 9, 90); EXECUTE prepared_multi_insert(11, 110); EXECUTE prepared_multi_insert(13, 130); EXECUTE prepared_multi_insert(15, 150); EXECUTE prepared_multi_insert(17, 170); PREPARE prepared_non_partition_parameter_insert(int) AS INSERT INTO prepare_table (key, value) VALUES (0, $1); -- execute 6 times to trigger prepared statement usage EXECUTE prepared_non_partition_parameter_insert(10); EXECUTE prepared_non_partition_parameter_insert(20); EXECUTE prepared_non_partition_parameter_insert(30); EXECUTE prepared_non_partition_parameter_insert(40); EXECUTE prepared_non_partition_parameter_insert(50); EXECUTE prepared_non_partition_parameter_insert(60); -- check inserted values SELECT * FROM prepare_table ORDER BY key, value; key | value -----+------- 0 | 10 0 | 20 0 | 30 0 | 40 0 | 50 0 | 60 0 | 0 | 0 | 0 | 0 | 0 | 1 | 10 1 | 2 | 20 2 | 3 | 30 3 | 4 | 40 4 | 5 | 50 5 | 6 | 60 6 | 7 | 70 8 | 80 9 | 90 10 | 100 11 | 110 12 | 120 13 | 130 14 | 140 15 | 150 16 | 160 17 | 170 18 | 180 (36 rows) SELECT master_modify_multiple_shards('DELETE FROM prepare_table WHERE value >= 70'); master_modify_multiple_shards ------------------------------- 12 (1 row) -- check router executor select PREPARE prepared_router_partition_column_select(int) AS SELECT prepare_table.key, prepare_table.value FROM prepare_table WHERE prepare_table.key = $1 ORDER BY key, value; EXECUTE prepared_router_partition_column_select(1); key | value -----+------- 1 | 10 1 | (2 rows) EXECUTE prepared_router_partition_column_select(2); key | value -----+------- 2 | 20 2 | (2 rows) EXECUTE prepared_router_partition_column_select(3); key | value -----+------- 3 | 30 3 | (2 rows) EXECUTE prepared_router_partition_column_select(4); key | value -----+------- 4 | 40 4 | (2 rows) EXECUTE prepared_router_partition_column_select(5); key | value -----+------- 5 | 50 5 | (2 rows) EXECUTE prepared_router_partition_column_select(6); key | value -----+------- 6 | 60 6 | (2 rows) PREPARE prepared_router_non_partition_column_select(int) AS SELECT prepare_table.key, prepare_table.value FROM prepare_table WHERE prepare_table.key = 0 AND prepare_table.value = $1 ORDER BY key, value; EXECUTE prepared_router_non_partition_column_select(10); key | value -----+------- 0 | 10 (1 row) EXECUTE prepared_router_non_partition_column_select(20); key | value -----+------- 0 | 20 (1 row) EXECUTE prepared_router_non_partition_column_select(30); key | value -----+------- 0 | 30 (1 row) EXECUTE prepared_router_non_partition_column_select(40); key | value -----+------- 0 | 40 (1 row) EXECUTE prepared_router_non_partition_column_select(50); key | value -----+------- 0 | 50 (1 row) EXECUTE prepared_router_non_partition_column_select(60); key | value -----+------- 0 | 60 (1 row) -- check real-time executor PREPARE prepared_real_time_non_partition_column_select(int) AS SELECT prepare_table.key, prepare_table.value FROM prepare_table WHERE prepare_table.value = $1 ORDER BY key, value; EXECUTE prepared_real_time_non_partition_column_select(10); key | value -----+------- 0 | 10 1 | 10 (2 rows) EXECUTE prepared_real_time_non_partition_column_select(20); key | value -----+------- 0 | 20 2 | 20 (2 rows) EXECUTE prepared_real_time_non_partition_column_select(30); key | value -----+------- 0 | 30 3 | 30 (2 rows) EXECUTE prepared_real_time_non_partition_column_select(40); key | value -----+------- 0 | 40 4 | 40 (2 rows) EXECUTE prepared_real_time_non_partition_column_select(50); key | value -----+------- 0 | 50 5 | 50 (2 rows) EXECUTE prepared_real_time_non_partition_column_select(60); key | value -----+------- 0 | 60 6 | 60 (2 rows) PREPARE prepared_real_time_partition_column_select(int) AS SELECT prepare_table.key, prepare_table.value FROM prepare_table WHERE prepare_table.key = $1 OR prepare_table.value = 10 ORDER BY key, value; EXECUTE prepared_real_time_partition_column_select(1); key | value -----+------- 0 | 10 1 | 10 1 | (3 rows) EXECUTE prepared_real_time_partition_column_select(2); key | value -----+------- 0 | 10 1 | 10 2 | 20 2 | (4 rows) EXECUTE prepared_real_time_partition_column_select(3); key | value -----+------- 0 | 10 1 | 10 3 | 30 3 | (4 rows) EXECUTE prepared_real_time_partition_column_select(4); key | value -----+------- 0 | 10 1 | 10 4 | 40 4 | (4 rows) EXECUTE prepared_real_time_partition_column_select(5); key | value -----+------- 0 | 10 1 | 10 5 | 50 5 | (4 rows) EXECUTE prepared_real_time_partition_column_select(6); key | value -----+------- 0 | 10 1 | 10 6 | 60 6 | (4 rows) -- check task-tracker executor SET citus.task_executor_type TO 'task-tracker'; PREPARE prepared_task_tracker_non_partition_column_select(int) AS SELECT prepare_table.key, prepare_table.value FROM prepare_table WHERE prepare_table.value = $1 ORDER BY key, value; EXECUTE prepared_task_tracker_non_partition_column_select(10); key | value -----+------- 0 | 10 1 | 10 (2 rows) EXECUTE prepared_task_tracker_non_partition_column_select(20); key | value -----+------- 0 | 20 2 | 20 (2 rows) EXECUTE prepared_task_tracker_non_partition_column_select(30); key | value -----+------- 0 | 30 3 | 30 (2 rows) EXECUTE prepared_task_tracker_non_partition_column_select(40); key | value -----+------- 0 | 40 4 | 40 (2 rows) EXECUTE prepared_task_tracker_non_partition_column_select(50); key | value -----+------- 0 | 50 5 | 50 (2 rows) EXECUTE prepared_task_tracker_non_partition_column_select(60); key | value -----+------- 0 | 60 6 | 60 (2 rows) PREPARE prepared_task_tracker_partition_column_select(int) AS SELECT prepare_table.key, prepare_table.value FROM prepare_table WHERE prepare_table.key = $1 OR prepare_table.value = 10 ORDER BY key, value; EXECUTE prepared_task_tracker_partition_column_select(1); key | value -----+------- 0 | 10 1 | 10 1 | (3 rows) EXECUTE prepared_task_tracker_partition_column_select(2); key | value -----+------- 0 | 10 1 | 10 2 | 20 2 | (4 rows) EXECUTE prepared_task_tracker_partition_column_select(3); key | value -----+------- 0 | 10 1 | 10 3 | 30 3 | (4 rows) EXECUTE prepared_task_tracker_partition_column_select(4); key | value -----+------- 0 | 10 1 | 10 4 | 40 4 | (4 rows) EXECUTE prepared_task_tracker_partition_column_select(5); key | value -----+------- 0 | 10 1 | 10 5 | 50 5 | (4 rows) EXECUTE prepared_task_tracker_partition_column_select(6); key | value -----+------- 0 | 10 1 | 10 6 | 60 6 | (4 rows) SET citus.task_executor_type TO 'real-time'; -- check updates PREPARE prepared_partition_parameter_update(int, int) AS UPDATE prepare_table SET value = $2 WHERE key = $1; -- execute 6 times to trigger prepared statement usage EXECUTE prepared_partition_parameter_update(1, 11); EXECUTE prepared_partition_parameter_update(2, 21); EXECUTE prepared_partition_parameter_update(3, 31); EXECUTE prepared_partition_parameter_update(4, 41); EXECUTE prepared_partition_parameter_update(5, 51); EXECUTE prepared_partition_parameter_update(6, 61); PREPARE prepared_non_partition_parameter_update(int, int) AS UPDATE prepare_table SET value = $2 WHERE key = 0 AND value = $1; -- execute 6 times to trigger prepared statement usage EXECUTE prepared_non_partition_parameter_update(10, 12); EXECUTE prepared_non_partition_parameter_update(20, 22); EXECUTE prepared_non_partition_parameter_update(30, 32); EXECUTE prepared_non_partition_parameter_update(40, 42); EXECUTE prepared_non_partition_parameter_update(50, 52); EXECUTE prepared_non_partition_parameter_update(60, 62); -- check after updates SELECT * FROM prepare_table ORDER BY key, value; key | value -----+------- 0 | 12 0 | 22 0 | 32 0 | 42 0 | 52 0 | 62 0 | 0 | 0 | 0 | 0 | 0 | 1 | 11 1 | 11 2 | 21 2 | 21 3 | 31 3 | 31 4 | 41 4 | 41 5 | 51 5 | 51 6 | 61 6 | 61 (24 rows) -- check deletes PREPARE prepared_partition_parameter_delete(int, int) AS DELETE FROM prepare_table WHERE key = $1 AND value = $2; EXECUTE prepared_partition_parameter_delete(1, 11); EXECUTE prepared_partition_parameter_delete(2, 21); EXECUTE prepared_partition_parameter_delete(3, 31); EXECUTE prepared_partition_parameter_delete(4, 41); EXECUTE prepared_partition_parameter_delete(5, 51); EXECUTE prepared_partition_parameter_delete(6, 61); PREPARE prepared_non_partition_parameter_delete(int) AS DELETE FROM prepare_table WHERE key = 0 AND value = $1; -- execute 6 times to trigger prepared statement usage EXECUTE prepared_non_partition_parameter_delete(12); EXECUTE prepared_non_partition_parameter_delete(22); EXECUTE prepared_non_partition_parameter_delete(32); EXECUTE prepared_non_partition_parameter_delete(42); EXECUTE prepared_non_partition_parameter_delete(52); EXECUTE prepared_non_partition_parameter_delete(62); -- check after deletes SELECT * FROM prepare_table ORDER BY key, value; key | value -----+------- 0 | 0 | 0 | 0 | 0 | 0 | (6 rows) -- Testing parameters + function evaluation CREATE TABLE prepare_func_table ( key text, value1 int, value2 text, value3 timestamptz DEFAULT now() ); SELECT create_distributed_table('prepare_func_table', 'key'); create_distributed_table -------------------------- (1 row) -- test function evaluation with parameters in an expression PREPARE prepared_function_evaluation_insert(int) AS INSERT INTO prepare_func_table (key, value1) VALUES ($1+1, 0*random()); -- execute 6 times to trigger prepared statement usage EXECUTE prepared_function_evaluation_insert(1); EXECUTE prepared_function_evaluation_insert(2); EXECUTE prepared_function_evaluation_insert(3); EXECUTE prepared_function_evaluation_insert(4); EXECUTE prepared_function_evaluation_insert(5); EXECUTE prepared_function_evaluation_insert(6); SELECT key, value1 FROM prepare_func_table ORDER BY key; key | value1 -----+-------- 2 | 0 3 | 0 4 | 0 5 | 0 6 | 0 7 | 0 (6 rows) TRUNCATE prepare_func_table; -- make it a bit harder: parameter wrapped in a function call PREPARE wrapped_parameter_evaluation(text,text[]) AS INSERT INTO prepare_func_table (key,value2) VALUES ($1,array_to_string($2,'')); EXECUTE wrapped_parameter_evaluation('key', ARRAY['value']); EXECUTE wrapped_parameter_evaluation('key', ARRAY['value']); EXECUTE wrapped_parameter_evaluation('key', ARRAY['value']); EXECUTE wrapped_parameter_evaluation('key', ARRAY['value']); EXECUTE wrapped_parameter_evaluation('key', ARRAY['value']); EXECUTE wrapped_parameter_evaluation('key', ARRAY['value']); SELECT key, value2 FROM prepare_func_table; key | value2 -----+-------- key | value key | value key | value key | value key | value key | value (6 rows) DROP TABLE prepare_func_table; -- Text columns can give issues when there is an implicit cast from varchar CREATE TABLE text_partition_column_table ( key text NOT NULL, value int ); SELECT create_distributed_table('text_partition_column_table', 'key'); create_distributed_table -------------------------- (1 row) PREPARE prepared_relabel_insert(varchar) AS INSERT INTO text_partition_column_table VALUES ($1, 1); EXECUTE prepared_relabel_insert('test'); EXECUTE prepared_relabel_insert('test'); EXECUTE prepared_relabel_insert('test'); EXECUTE prepared_relabel_insert('test'); EXECUTE prepared_relabel_insert('test'); EXECUTE prepared_relabel_insert('test'); SELECT key, value FROM text_partition_column_table ORDER BY key; key | value ------+------- test | 1 test | 1 test | 1 test | 1 test | 1 test | 1 (6 rows) DROP TABLE text_partition_column_table; -- Domain type columns can give issues CREATE DOMAIN test_key AS text CHECK(VALUE ~ '^test-\d$'); SELECT run_command_on_workers($$ CREATE DOMAIN test_key AS text CHECK(VALUE ~ '^test-\d$') $$); run_command_on_workers ------------------------------------- (localhost,57637,t,"CREATE DOMAIN") (localhost,57638,t,"CREATE DOMAIN") (2 rows) CREATE TABLE domain_partition_column_table ( key test_key NOT NULL, value int ); SELECT create_distributed_table('domain_partition_column_table', 'key'); create_distributed_table -------------------------- (1 row) PREPARE prepared_coercion_to_domain_insert(text) AS INSERT INTO domain_partition_column_table VALUES ($1, 1); EXECUTE prepared_coercion_to_domain_insert('test-1'); EXECUTE prepared_coercion_to_domain_insert('test-2'); EXECUTE prepared_coercion_to_domain_insert('test-3'); EXECUTE prepared_coercion_to_domain_insert('test-4'); EXECUTE prepared_coercion_to_domain_insert('test-5'); EXECUTE prepared_coercion_to_domain_insert('test-6'); SELECT key, value FROM domain_partition_column_table ORDER BY key; key | value --------+------- test-1 | 1 test-2 | 1 test-3 | 1 test-4 | 1 test-5 | 1 test-6 | 1 (6 rows) DROP TABLE domain_partition_column_table; -- verify we re-evaluate volatile functions every time CREATE TABLE http_request ( site_id INT, ingest_time TIMESTAMPTZ DEFAULT now(), url TEXT, request_country TEXT, ip_address TEXT, status_code INT, response_time_msec INT ); SELECT create_distributed_table('http_request', 'site_id'); create_distributed_table -------------------------- (1 row) PREPARE FOO AS INSERT INTO http_request ( site_id, ingest_time, url, request_country, ip_address, status_code, response_time_msec ) VALUES ( 1, clock_timestamp(), 'http://example.com/path', 'USA', inet '88.250.10.123', 200, 10 ); EXECUTE foo; EXECUTE foo; EXECUTE foo; EXECUTE foo; EXECUTE foo; EXECUTE foo; SELECT count(distinct ingest_time) FROM http_request WHERE site_id = 1; count ------- 6 (1 row) DROP TABLE http_request; -- verify placement state updates invalidate shard state -- -- We use a immutable function to check for that. The planner will -- evaluate it once during planning, during execution it should never -- be reached (no rows). That way we'll see a NOTICE when -- (re-)planning, but not when executing. -- first create helper function CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$; \c - - - :worker_1_port CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$; \c - - - :worker_2_port CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$; \c - - - :master_port -- test table CREATE TABLE test_table (test_id integer NOT NULL, data text); SELECT master_create_distributed_table('test_table', 'test_id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('test_table', 2, 2); master_create_worker_shards ----------------------------- (1 row) -- avoid 9.6+ only context messages \set VERBOSITY terse --plain statement, needs planning SELECT count(*) FROM test_table HAVING COUNT(*) = immutable_bleat('replanning'); NOTICE: replanning count ------- (0 rows) --prepared statement PREPARE countsome AS SELECT count(*) FROM test_table HAVING COUNT(*) = immutable_bleat('replanning'); EXECUTE countsome; -- should indicate planning NOTICE: replanning count ------- (0 rows) EXECUTE countsome; -- no replanning count ------- (0 rows) -- invalidate half of the placements using SQL, should invalidate via trigger UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_table'::regclass) AND nodeport = :worker_1_port; EXECUTE countsome; -- should indicate replanning NOTICE: replanning count ------- (0 rows) EXECUTE countsome; -- no replanning count ------- (0 rows) -- repair shards, should invalidate via master_metadata_utility.c SELECT master_copy_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port) FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_table'::regclass) AND nodeport = :worker_1_port; master_copy_shard_placement ----------------------------- (2 rows) EXECUTE countsome; -- should indicate replanning NOTICE: replanning count ------- (0 rows) EXECUTE countsome; -- no replanning count ------- (0 rows) -- reset \set VERBOSITY default -- clean-up prepared statements DEALLOCATE ALL; DROP TABLE prepare_table; citus-7.0.3/src/test/regress/expected/multi_prune_shard_list.out000066400000000000000000000173751317107136600252230ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 800000; -- =================================================================== -- create test functions -- =================================================================== CREATE FUNCTION prune_using_no_values(regclass) RETURNS text[] AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION prune_using_single_value(regclass, text) RETURNS text[] AS 'citus' LANGUAGE C; CREATE FUNCTION prune_using_either_value(regclass, text, text) RETURNS text[] AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION prune_using_both_values(regclass, text, text) RETURNS text[] AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION debug_equality_expression(regclass) RETURNS cstring AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION print_sorted_shard_intervals(regclass) RETURNS text[] AS 'citus' LANGUAGE C STRICT; -- =================================================================== -- test shard pruning functionality -- =================================================================== -- create distributed table observe shard pruning CREATE TABLE pruning ( species text, last_pruned date, plant_id integer ); SELECT master_create_distributed_table('pruning', 'species', 'hash'); master_create_distributed_table --------------------------------- (1 row) -- create worker shards SELECT master_create_worker_shards('pruning', 4, 1); master_create_worker_shards ----------------------------- (1 row) -- with no values, expect all shards SELECT prune_using_no_values('pruning'); prune_using_no_values ------------------------------- {800000,800001,800002,800003} (1 row) -- with a single value, expect a single shard SELECT prune_using_single_value('pruning', 'tomato'); prune_using_single_value -------------------------- {800002} (1 row) -- null values should result in no pruning SELECT prune_using_single_value('pruning', NULL); prune_using_single_value ------------------------------- {800000,800001,800002,800003} (1 row) -- build an OR clause and expect more than one sahrd SELECT prune_using_either_value('pruning', 'tomato', 'petunia'); prune_using_either_value -------------------------- {800002,800001} (1 row) -- an AND clause with values on different shards returns no shards SELECT prune_using_both_values('pruning', 'tomato', 'petunia'); prune_using_both_values ------------------------- {} (1 row) -- even if both values are on the same shard, a value can't be equal to two others SELECT prune_using_both_values('pruning', 'tomato', 'rose'); prune_using_both_values ------------------------- {} (1 row) -- unit test of the equality expression generation code SELECT debug_equality_expression('pruning'); debug_equality_expression -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- {OPEXPR :opno 98 :opfuncid 67 :opresulttype 16 :opretset false :opcollid 0 :inputcollid 100 :args ({VAR :varno 1 :varattno 1 :vartype 25 :vartypmod -1 :varcollid 100 :varlevelsup 0 :varnoold 1 :varoattno 1 :location -1} {CONST :consttype 25 :consttypmod -1 :constcollid 100 :constlen -1 :constbyval false :constisnull true :location -1 :constvalue <>}) :location -1} (1 row) -- print the initial ordering of shard intervals SELECT print_sorted_shard_intervals('pruning'); print_sorted_shard_intervals ------------------------------- {800000,800001,800002,800003} (1 row) -- update only min value for one shard UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 103071; SELECT print_sorted_shard_intervals('pruning'); print_sorted_shard_intervals ------------------------------- {800000,800001,800002,800003} (1 row) -- now lets have one more shard without min/max values UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 103072; SELECT print_sorted_shard_intervals('pruning'); print_sorted_shard_intervals ------------------------------- {800000,800001,800002,800003} (1 row) -- now lets have one more shard without min/max values UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 103070; SELECT print_sorted_shard_intervals('pruning'); print_sorted_shard_intervals ------------------------------- {800000,800001,800002,800003} (1 row) -- all shard placements are uninitialized UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 103073; SELECT print_sorted_shard_intervals('pruning'); print_sorted_shard_intervals ------------------------------- {800000,800001,800002,800003} (1 row) -- create range distributed table observe shard pruning CREATE TABLE pruning_range ( species text, last_pruned date, plant_id integer ); SELECT master_create_distributed_table('pruning_range', 'species', 'range'); master_create_distributed_table --------------------------------- (1 row) -- create worker shards SELECT master_create_empty_shard('pruning_range'); master_create_empty_shard --------------------------- 800004 (1 row) SELECT master_create_empty_shard('pruning_range'); master_create_empty_shard --------------------------- 800005 (1 row) SELECT master_create_empty_shard('pruning_range'); master_create_empty_shard --------------------------- 800006 (1 row) SELECT master_create_empty_shard('pruning_range'); master_create_empty_shard --------------------------- 800007 (1 row) -- now the comparison is done via the partition column type, which is text UPDATE pg_dist_shard SET shardminvalue = 'a', shardmaxvalue = 'b' WHERE shardid = 103074; UPDATE pg_dist_shard SET shardminvalue = 'c', shardmaxvalue = 'd' WHERE shardid = 103075; UPDATE pg_dist_shard SET shardminvalue = 'e', shardmaxvalue = 'f' WHERE shardid = 103076; UPDATE pg_dist_shard SET shardminvalue = 'g', shardmaxvalue = 'h' WHERE shardid = 103077; -- print the ordering of shard intervals with range partitioning as well SELECT print_sorted_shard_intervals('pruning_range'); print_sorted_shard_intervals ------------------------------- {800004,800005,800006,800007} (1 row) -- update only min value for one shard UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 103075; SELECT print_sorted_shard_intervals('pruning_range'); print_sorted_shard_intervals ------------------------------- {800004,800005,800006,800007} (1 row) -- now lets have one more shard without min/max values UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 103076; SELECT print_sorted_shard_intervals('pruning_range'); print_sorted_shard_intervals ------------------------------- {800004,800005,800006,800007} (1 row) -- now lets have one more shard without min/max values UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 103074; SELECT print_sorted_shard_intervals('pruning_range'); print_sorted_shard_intervals ------------------------------- {800004,800005,800006,800007} (1 row) -- all shard placements are uninitialized UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 103077; SELECT print_sorted_shard_intervals('pruning_range'); print_sorted_shard_intervals ------------------------------- {800004,800005,800006,800007} (1 row) citus-7.0.3/src/test/regress/expected/multi_query_directory_cleanup.out000066400000000000000000000140301317107136600265770ustar00rootroot00000000000000-- -- MULTI_QUERY_DIRECTORY_CLEANUP -- -- We execute sub-queries on worker nodes, and copy query results to a directory -- on the master node for final processing. When the query completes or fails, -- the resource owner should automatically clean up these intermediate query -- result files. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 810000; SET citus.enable_unique_job_ids TO off; BEGIN; -- pg_ls_dir() displays jobids. We explicitly set the jobId sequence -- here so that the regression output becomes independent of the -- number of jobs executed prior to running this test. SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; revenue --------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; revenue --------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; revenue --------------- 22770844.7654 (1 row) SELECT pg_ls_dir('base/pgsql_job_cache'); pg_ls_dir ----------- (0 rows) COMMIT; SELECT pg_ls_dir('base/pgsql_job_cache'); pg_ls_dir ----------- (0 rows) BEGIN; SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; revenue --------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; revenue --------------- 22770844.7654 (1 row) SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; revenue --------------- 22770844.7654 (1 row) SELECT pg_ls_dir('base/pgsql_job_cache'); pg_ls_dir ----------- (0 rows) ROLLBACK; SELECT pg_ls_dir('base/pgsql_job_cache'); pg_ls_dir ----------- (0 rows) -- Test that multiple job directories are all cleaned up correctly, -- both individually (by closing a cursor) and in bulk when ending a -- transaction. BEGIN; DECLARE c_00 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_00; revenue --------------- 22770844.7654 (1 row) DECLARE c_01 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_01; revenue --------------- 22770844.7654 (1 row) DECLARE c_02 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_02; revenue --------------- 22770844.7654 (1 row) DECLARE c_03 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_03; revenue --------------- 22770844.7654 (1 row) DECLARE c_04 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_04; revenue --------------- 22770844.7654 (1 row) DECLARE c_05 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_05; revenue --------------- 22770844.7654 (1 row) DECLARE c_06 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_06; revenue --------------- 22770844.7654 (1 row) DECLARE c_07 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_07; revenue --------------- 22770844.7654 (1 row) DECLARE c_08 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_08; revenue --------------- 22770844.7654 (1 row) DECLARE c_09 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_09; revenue --------------- 22770844.7654 (1 row) DECLARE c_10 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_10; revenue --------------- 22770844.7654 (1 row) DECLARE c_11 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_11; revenue --------------- 22770844.7654 (1 row) DECLARE c_12 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_12; revenue --------------- 22770844.7654 (1 row) DECLARE c_13 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_13; revenue --------------- 22770844.7654 (1 row) DECLARE c_14 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_14; revenue --------------- 22770844.7654 (1 row) DECLARE c_15 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_15; revenue --------------- 22770844.7654 (1 row) DECLARE c_16 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_16; revenue --------------- 22770844.7654 (1 row) DECLARE c_17 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_17; revenue --------------- 22770844.7654 (1 row) DECLARE c_18 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_18; revenue --------------- 22770844.7654 (1 row) DECLARE c_19 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_19; revenue --------------- 22770844.7654 (1 row) SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; f ----------------- master_job_0007 master_job_0008 master_job_0009 master_job_0010 master_job_0011 master_job_0012 master_job_0013 master_job_0014 master_job_0015 master_job_0016 master_job_0017 master_job_0018 master_job_0019 master_job_0020 master_job_0021 master_job_0022 master_job_0023 master_job_0024 master_job_0025 master_job_0026 (20 rows) -- close first, 17th (first after re-alloc) and last cursor. CLOSE c_00; CLOSE c_16; CLOSE c_19; SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; f ----------------- master_job_0008 master_job_0009 master_job_0010 master_job_0011 master_job_0012 master_job_0013 master_job_0014 master_job_0015 master_job_0016 master_job_0017 master_job_0018 master_job_0019 master_job_0020 master_job_0021 master_job_0022 master_job_0024 master_job_0025 (17 rows) ROLLBACK; SELECT pg_ls_dir('base/pgsql_job_cache'); pg_ls_dir ----------- (0 rows) citus-7.0.3/src/test/regress/expected/multi_read_from_secondaries.out000066400000000000000000000042411317107136600261570ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1600000; \c "dbname=regression options='-c\ citus.use_secondary_nodes=always'" CREATE TABLE dest_table (a int, b int); CREATE TABLE source_table (a int, b int); -- attempts to change metadata should fail while reading from secondaries SELECT create_distributed_table('dest_table', 'a'); ERROR: writing to worker nodes is not currently allowed DETAIL: citus.use_secondary_nodes is set to 'always' \c "dbname=regression options='-c\ citus.use_secondary_nodes=never'" SELECT create_distributed_table('dest_table', 'a'); create_distributed_table -------------------------- (1 row) SELECT create_distributed_table('source_table', 'a'); create_distributed_table -------------------------- (1 row) INSERT INTO dest_table (a, b) VALUES (1, 1); INSERT INTO dest_table (a, b) VALUES (2, 1); INSERT INTO source_table (a, b) VALUES (10, 10); -- simluate actually having secondary nodes SELECT * FROM pg_dist_node; nodeid | groupid | nodename | nodeport | noderack | hasmetadata | isactive | noderole | nodecluster --------+---------+-----------+----------+----------+-------------+----------+----------+------------- 1 | 1 | localhost | 57637 | default | f | t | primary | default 2 | 2 | localhost | 57638 | default | f | t | primary | default (2 rows) UPDATE pg_dist_node SET noderole = 'secondary'; \c "dbname=regression options='-c\ citus.use_secondary_nodes=always'" -- inserts are disallowed INSERT INTO dest_table (a, b) VALUES (1, 2); ERROR: writing to worker nodes is not currently allowed DETAIL: citus.use_secondary_nodes is set to 'always' -- router selects are allowed SELECT a FROM dest_table WHERE a = 1; a --- 1 (1 row) -- real-time selects are also allowed SELECT a FROM dest_table; a --- 1 2 (2 rows) -- insert into is definitely not allowed INSERT INTO dest_table (a, b) SELECT a, b FROM source_table; ERROR: writing to worker nodes is not currently allowed DETAIL: citus.use_secondary_nodes is set to 'always' \c "dbname=regression options='-c\ citus.use_secondary_nodes=never'" UPDATE pg_dist_node SET noderole = 'primary'; DROP TABLE dest_table; citus-7.0.3/src/test/regress/expected/multi_reference_table.out000066400000000000000000001363701317107136600247600ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000; CREATE TABLE reference_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp); -- insert some data, and make sure that cannot be create_distributed_table INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); -- create the reference table SELECT create_reference_table('reference_table_test'); NOTICE: Copying data from local table... create_reference_table ------------------------ (1 row) -- see that partkey is NULL SELECT partmethod, (partkey IS NULL) as partkeyisnull, repmodel FROM pg_dist_partition WHERE logicalrelid = 'reference_table_test'::regclass; partmethod | partkeyisnull | repmodel ------------+---------------+---------- n | t | t (1 row) -- now see that shard min/max values are NULL SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'reference_table_test'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ---------+---------------------+--------------------- 1250000 | t | t (1 row) SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'reference_table_test'::regclass) ORDER BY placementid; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 1250000 | 1 | localhost | 57637 1250000 | 1 | localhost | 57638 (2 rows) -- check whether data was copied into distributed table SELECT * FROM reference_table_test; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) -- now, execute some modification queries INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO reference_table_test VALUES (3, 3.0, '3', '2016-12-03'); INSERT INTO reference_table_test VALUES (4, 4.0, '4', '2016-12-04'); INSERT INTO reference_table_test VALUES (5, 5.0, '5', '2016-12-05'); -- most of the queries in this file are already tested on multi_router_planner.sql -- However, for the sake of completeness we need to run similar tests with -- reference tables as well -- run some queries on top of the data SELECT * FROM reference_table_test; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 4 | 4 | 4 | Sun Dec 04 00:00:00 2016 5 | 5 | 5 | Mon Dec 05 00:00:00 2016 (5 rows) SELECT * FROM reference_table_test WHERE value_1 = 1; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) SELECT value_1, value_2 FROM reference_table_test ORDER BY 2 ASC LIMIT 3; value_1 | value_2 ---------+--------- 1 | 1 2 | 2 3 | 3 (3 rows) SELECT value_1, value_3 FROM reference_table_test WHERE value_2 >= 4 ORDER BY 2 LIMIT 3; value_1 | value_3 ---------+--------- 4 | 4 5 | 5 (2 rows) SELECT value_1, 15 * value_2 FROM reference_table_test ORDER BY 2 ASC LIMIT 2; value_1 | ?column? ---------+---------- 1 | 15 2 | 30 (2 rows) SELECT value_1, 15 * value_2 FROM reference_table_test ORDER BY 2 ASC LIMIT 2 OFFSET 2; value_1 | ?column? ---------+---------- 3 | 45 4 | 60 (2 rows) SELECT value_2, value_4 FROM reference_table_test WHERE value_2 = 2 OR value_2 = 3; value_2 | value_4 ---------+-------------------------- 2 | Fri Dec 02 00:00:00 2016 3 | Sat Dec 03 00:00:00 2016 (2 rows) SELECT value_2, value_4 FROM reference_table_test WHERE value_2 = 2 AND value_2 = 3; value_2 | value_4 ---------+--------- (0 rows) SELECT value_2, value_4 FROM reference_table_test WHERE value_3 = '2' OR value_1 = 3; value_2 | value_4 ---------+-------------------------- 2 | Fri Dec 02 00:00:00 2016 3 | Sat Dec 03 00:00:00 2016 (2 rows) SELECT value_2, value_4 FROM reference_table_test WHERE ( value_3 = '2' OR value_1 = 3 ) AND FALSE; value_2 | value_4 ---------+--------- (0 rows) SELECT * FROM reference_table_test WHERE value_2 IN ( SELECT value_3::FLOAT FROM reference_table_test ) AND value_1 < 3; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (2 rows) SELECT value_4 FROM reference_table_test WHERE value_3 IN ( '1', '2' ); value_4 -------------------------- Thu Dec 01 00:00:00 2016 Fri Dec 02 00:00:00 2016 (2 rows) SELECT date_part('day', value_4) FROM reference_table_test WHERE value_3 IN ( '5', '2' ); date_part ----------- 2 5 (2 rows) SELECT value_4 FROM reference_table_test WHERE value_2 <= 2 AND value_2 >= 4; value_4 --------- (0 rows) SELECT value_4 FROM reference_table_test WHERE value_2 <= 20 AND value_2 >= 4; value_4 -------------------------- Sun Dec 04 00:00:00 2016 Mon Dec 05 00:00:00 2016 (2 rows) SELECT value_4 FROM reference_table_test WHERE value_2 >= 5 AND value_2 <= random(); value_4 --------- (0 rows) SELECT value_1 FROM reference_table_test WHERE value_4 BETWEEN '2016-12-01' AND '2016-12-03'; value_1 --------- 1 2 3 (3 rows) SELECT value_1 FROM reference_table_test WHERE FALSE; value_1 --------- (0 rows) SELECT value_1 FROM reference_table_test WHERE int4eq(1, 2); value_1 --------- (0 rows) -- rename output name and do some operations SELECT value_1 as id, value_2 * 15 as age FROM reference_table_test; id | age ----+----- 1 | 15 2 | 30 3 | 45 4 | 60 5 | 75 (5 rows) -- queries with CTEs are supported WITH some_data AS ( SELECT value_2, value_4 FROM reference_table_test WHERE value_2 >=3) SELECT * FROM some_data; value_2 | value_4 ---------+-------------------------- 3 | Sat Dec 03 00:00:00 2016 4 | Sun Dec 04 00:00:00 2016 5 | Mon Dec 05 00:00:00 2016 (3 rows) -- queries with CTEs are supported even if CTE is not referenced inside query WITH some_data AS ( SELECT value_2, value_4 FROM reference_table_test WHERE value_2 >=3) SELECT * FROM reference_table_test ORDER BY 1 LIMIT 1; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) -- queries which involve functions in FROM clause are supported if it goes to a single worker. SELECT * FROM reference_table_test, position('om' in 'Thomas') WHERE value_1 = 1; value_1 | value_2 | value_3 | value_4 | position ---------+---------+---------+--------------------------+---------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 | 3 (1 row) SELECT * FROM reference_table_test, position('om' in 'Thomas') WHERE value_1 = 1 OR value_1 = 2; value_1 | value_2 | value_3 | value_4 | position ---------+---------+---------+--------------------------+---------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 | 3 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 | 3 (2 rows) -- set operations are supported SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 1 UNION SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 (2 rows) SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 1 EXCEPT SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 1 INTERSECT SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+--------- (0 rows) -- to make the tests more interested for aggregation tests, ingest some more data INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO reference_table_test VALUES (3, 3.0, '3', '2016-12-03'); -- some aggregations SELECT value_4, SUM(value_2) FROM reference_table_test GROUP BY value_4 HAVING SUM(value_2) > 3 ORDER BY 1; value_4 | sum --------------------------+----- Fri Dec 02 00:00:00 2016 | 4 Sat Dec 03 00:00:00 2016 | 6 Sun Dec 04 00:00:00 2016 | 4 Mon Dec 05 00:00:00 2016 | 5 (4 rows) SELECT value_4, value_3, SUM(value_2) FROM reference_table_test GROUP BY GROUPING sets ((value_4), (value_3)) ORDER BY 1, 2, 3; value_4 | value_3 | sum --------------------------+---------+----- Thu Dec 01 00:00:00 2016 | | 2 Fri Dec 02 00:00:00 2016 | | 4 Sat Dec 03 00:00:00 2016 | | 6 Sun Dec 04 00:00:00 2016 | | 4 Mon Dec 05 00:00:00 2016 | | 5 | 1 | 2 | 2 | 4 | 3 | 6 | 4 | 4 | 5 | 5 (10 rows) -- distinct clauses also work fine SELECT DISTINCT value_4 FROM reference_table_test ORDER BY 1; value_4 -------------------------- Thu Dec 01 00:00:00 2016 Fri Dec 02 00:00:00 2016 Sat Dec 03 00:00:00 2016 Sun Dec 04 00:00:00 2016 Mon Dec 05 00:00:00 2016 (5 rows) -- window functions are also supported SELECT value_4, RANK() OVER (PARTITION BY value_1 ORDER BY value_4) FROM reference_table_test; value_4 | rank --------------------------+------ Thu Dec 01 00:00:00 2016 | 1 Thu Dec 01 00:00:00 2016 | 1 Fri Dec 02 00:00:00 2016 | 1 Fri Dec 02 00:00:00 2016 | 1 Sat Dec 03 00:00:00 2016 | 1 Sat Dec 03 00:00:00 2016 | 1 Sun Dec 04 00:00:00 2016 | 1 Mon Dec 05 00:00:00 2016 | 1 (8 rows) -- window functions are also supported SELECT value_4, AVG(value_1) OVER (PARTITION BY value_4 ORDER BY value_4) FROM reference_table_test; value_4 | avg --------------------------+------------------------ Thu Dec 01 00:00:00 2016 | 1.00000000000000000000 Thu Dec 01 00:00:00 2016 | 1.00000000000000000000 Fri Dec 02 00:00:00 2016 | 2.0000000000000000 Fri Dec 02 00:00:00 2016 | 2.0000000000000000 Sat Dec 03 00:00:00 2016 | 3.0000000000000000 Sat Dec 03 00:00:00 2016 | 3.0000000000000000 Sun Dec 04 00:00:00 2016 | 4.0000000000000000 Mon Dec 05 00:00:00 2016 | 5.0000000000000000 (8 rows) SELECT count(DISTINCT CASE WHEN value_2 >= 3 THEN value_2 ELSE NULL END) as c FROM reference_table_test; c --- 3 (1 row) SELECT value_1, count(DISTINCT CASE WHEN value_2 >= 3 THEN value_2 ELSE NULL END) as c FROM reference_table_test GROUP BY value_1 ORDER BY 1; value_1 | c ---------+--- 1 | 0 2 | 0 3 | 1 4 | 1 5 | 1 (5 rows) -- selects inside a transaction works fine as well BEGIN; SELECT * FROM reference_table_test; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 4 | 4 | 4 | Sun Dec 04 00:00:00 2016 5 | 5 | 5 | Mon Dec 05 00:00:00 2016 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 3 | 3 | 3 | Sat Dec 03 00:00:00 2016 (8 rows) SELECT * FROM reference_table_test WHERE value_1 = 1; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (2 rows) END; -- cursor queries also works fine BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM reference_table_test WHERE value_1 = 1 OR value_1 = 2 ORDER BY value_1; FETCH test_cursor; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) FETCH ALL test_cursor; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (3 rows) FETCH test_cursor; -- fetch one row after the last value_1 | value_2 | value_3 | value_4 ---------+---------+---------+--------- (0 rows) FETCH BACKWARD test_cursor; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (1 row) END; -- table creation queries inside can be router plannable CREATE TEMP TABLE temp_reference_test as SELECT * FROM reference_table_test WHERE value_1 = 1; -- all kinds of joins are supported among reference tables -- first create two more tables CREATE TABLE reference_table_test_second (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_second'); create_reference_table ------------------------ (1 row) CREATE TABLE reference_table_test_third (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_third'); create_reference_table ------------------------ (1 row) -- ingest some data to both tables INSERT INTO reference_table_test_second VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO reference_table_test_second VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO reference_table_test_second VALUES (3, 3.0, '3', '2016-12-03'); INSERT INTO reference_table_test_third VALUES (4, 4.0, '4', '2016-12-04'); INSERT INTO reference_table_test_third VALUES (5, 5.0, '5', '2016-12-05'); -- some very basic tests SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2 WHERE t1.value_2 = t2.value_2 ORDER BY 1; value_1 --------- 1 2 3 (3 rows) SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_third t3 WHERE t1.value_2 = t3.value_2 ORDER BY 1; value_1 --------- 4 5 (2 rows) SELECT DISTINCT t2.value_1 FROM reference_table_test_second t2, reference_table_test_third t3 WHERE t2.value_2 = t3.value_2 ORDER BY 1; value_1 --------- (0 rows) -- join on different columns and different data types via casts SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2 WHERE t1.value_2 = t2.value_1 ORDER BY 1; value_1 --------- 1 2 3 (3 rows) SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2 WHERE t1.value_2 = t2.value_3::int ORDER BY 1; value_1 --------- 1 2 3 (3 rows) SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2 WHERE t1.value_2 = date_part('day', t2.value_4) ORDER BY 1; value_1 --------- 1 2 3 (3 rows) -- ingest a common row to see more meaningful results with joins involving 3 tables INSERT INTO reference_table_test_third VALUES (3, 3.0, '3', '2016-12-03'); SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2, reference_table_test_third t3 WHERE t1.value_2 = date_part('day', t2.value_4) AND t3.value_2 = t1.value_2 ORDER BY 1; value_1 --------- 3 (1 row) -- same query on different columns SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2, reference_table_test_third t3 WHERE t1.value_1 = date_part('day', t2.value_4) AND t3.value_2 = t1.value_1 ORDER BY 1; value_1 --------- 3 (1 row) -- with the JOIN syntax SELECT DISTINCT t1.value_1 FROM reference_table_test t1 JOIN reference_table_test_second t2 USING (value_1) JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; value_1 --------- 3 (1 row) -- and left/right joins SELECT DISTINCT t1.value_1 FROM reference_table_test t1 LEFT JOIN reference_table_test_second t2 USING (value_1) LEFT JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; value_1 --------- 1 2 3 4 5 (5 rows) SELECT DISTINCT t1.value_1 FROM reference_table_test t1 RIGHT JOIN reference_table_test_second t2 USING (value_1) RIGHT JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; value_1 --------- 3 (2 rows) -- now, lets have some tests on UPSERTs and uniquness CREATE TABLE reference_table_test_fourth (value_1 int, value_2 float PRIMARY KEY, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_fourth'); create_reference_table ------------------------ (1 row) -- insert a row INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '1', '2016-12-01'); -- now get the unique key violation INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '1', '2016-12-01'); ERROR: duplicate key value violates unique constraint "reference_table_test_fourth_pkey_1250003" DETAIL: Key (value_2)=(1) already exists. CONTEXT: while executing command on localhost:57637 -- now get null constraint violation due to primary key INSERT INTO reference_table_test_fourth (value_1, value_3, value_4) VALUES (1, '1.0', '2016-12-01'); ERROR: null value in column "value_2" violates not-null constraint DETAIL: Failing row contains (1, null, 1.0, 2016-12-01 00:00:00). CONTEXT: while executing command on localhost:57637 -- lets run some upserts INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '1', '2016-12-01') ON CONFLICT DO NOTHING RETURNING *; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+--------- (0 rows) INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '10', '2016-12-01') ON CONFLICT (value_2) DO UPDATE SET value_3 = EXCLUDED.value_3, value_2 = EXCLUDED.value_2 RETURNING *; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 10 | Thu Dec 01 00:00:00 2016 (1 row) -- update all columns INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '10', '2016-12-01') ON CONFLICT (value_2) DO UPDATE SET value_3 = EXCLUDED.value_3 || '+10', value_2 = EXCLUDED.value_2 + 10, value_1 = EXCLUDED.value_1 + 10, value_4 = '2016-12-10' RETURNING *; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 11 | 11 | 10+10 | Sat Dec 10 00:00:00 2016 (1 row) -- finally see that shard healths are OK SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'reference_table_test_fourth'::regclass) ORDER BY placementid; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 1250003 | 1 | localhost | 57637 1250003 | 1 | localhost | 57638 (2 rows) -- let's not run some update/delete queries on arbitrary columns DELETE FROM reference_table_test WHERE value_1 = 1 RETURNING *; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (2 rows) DELETE FROM reference_table_test WHERE value_4 = '2016-12-05' RETURNING *; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 5 | 5 | 5 | Mon Dec 05 00:00:00 2016 (1 row) UPDATE reference_table_test SET value_2 = 15 WHERE value_2 = 2 RETURNING *; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 2 | 15 | 2 | Fri Dec 02 00:00:00 2016 2 | 15 | 2 | Fri Dec 02 00:00:00 2016 (2 rows) -- and some queries without any filters UPDATE reference_table_test SET value_2 = 15, value_1 = 45 RETURNING *; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 45 | 15 | 3 | Sat Dec 03 00:00:00 2016 45 | 15 | 4 | Sun Dec 04 00:00:00 2016 45 | 15 | 3 | Sat Dec 03 00:00:00 2016 45 | 15 | 2 | Fri Dec 02 00:00:00 2016 45 | 15 | 2 | Fri Dec 02 00:00:00 2016 (5 rows) DELETE FROM reference_table_test RETURNING *; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 45 | 15 | 3 | Sat Dec 03 00:00:00 2016 45 | 15 | 4 | Sun Dec 04 00:00:00 2016 45 | 15 | 3 | Sat Dec 03 00:00:00 2016 45 | 15 | 2 | Fri Dec 02 00:00:00 2016 45 | 15 | 2 | Fri Dec 02 00:00:00 2016 (5 rows) -- some tests with function evaluation and sequences CREATE TABLE reference_table_test_fifth (value_1 serial PRIMARY KEY, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_fifth'); create_reference_table ------------------------ (1 row) CREATE SEQUENCE example_ref_value_seq; -- see that sequences work as expected INSERT INTO reference_table_test_fifth (value_2) VALUES (2) RETURNING value_1, value_2; value_1 | value_2 ---------+--------- 1 | 2 (1 row) INSERT INTO reference_table_test_fifth (value_2) VALUES (2) RETURNING value_1, value_2; value_1 | value_2 ---------+--------- 2 | 2 (1 row) INSERT INTO reference_table_test_fifth (value_2, value_3) VALUES (nextval('example_ref_value_seq'), nextval('example_ref_value_seq')::text) RETURNING value_1, value_2, value_3; value_1 | value_2 | value_3 ---------+---------+--------- 3 | 1 | 2 (1 row) UPDATE reference_table_test_fifth SET value_4 = now() WHERE value_1 = 1 RETURNING value_1, value_2, value_4 > '2000-01-01'; value_1 | value_2 | ?column? ---------+---------+---------- 1 | 2 | t (1 row) -- test copying FROM / TO -- first delete all the data DELETE FROM reference_table_test; COPY reference_table_test FROM STDIN WITH CSV; COPY reference_table_test (value_2, value_3, value_4) FROM STDIN WITH CSV; COPY reference_table_test (value_3) FROM STDIN WITH CSV; COPY reference_table_test FROM STDIN WITH CSV; COPY reference_table_test TO STDOUT WITH CSV; 1,1,1,Fri Jan 01 00:00:00 2016 ,2,2,Sat Jan 02 00:00:00 2016 ,,3, ,,, -- INSERT INTO SELECT among reference tables DELETE FROM reference_table_test_second; INSERT INTO reference_table_test_second SELECT * FROM reference_table_test RETURNING *; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Fri Jan 01 00:00:00 2016 | 2 | 2 | Sat Jan 02 00:00:00 2016 | | 3 | | | | (4 rows) INSERT INTO reference_table_test_second (value_2) SELECT reference_table_test.value_2 FROM reference_table_test JOIN reference_table_test_second USING (value_1) RETURNING *; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+--------- | 1 | | (1 row) SET citus.shard_count TO 6; SET citus.shard_replication_factor TO 2; CREATE TABLE colocated_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_distributed_table('colocated_table_test', 'value_1'); create_distributed_table -------------------------- (1 row) CREATE TABLE colocated_table_test_2 (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_distributed_table('colocated_table_test_2', 'value_1'); create_distributed_table -------------------------- (1 row) DELETE FROM reference_table_test; INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO colocated_table_test VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO colocated_table_test VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO colocated_table_test_2 VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO colocated_table_test_2 VALUES (2, 2.0, '2', '2016-12-02'); SET client_min_messages TO DEBUG1; SET citus.log_multi_join_order TO TRUE; SELECT reference_table_test.value_1 FROM reference_table_test, colocated_table_test WHERE colocated_table_test.value_1 = reference_table_test.value_1; LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ] value_1 --------- 1 2 (2 rows) SELECT colocated_table_test.value_2 FROM reference_table_test, colocated_table_test WHERE colocated_table_test.value_2 = reference_table_test.value_2; LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ] value_2 --------- 1 2 (2 rows) SELECT colocated_table_test.value_2 FROM colocated_table_test, reference_table_test WHERE reference_table_test.value_1 = colocated_table_test.value_1; LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ] value_2 --------- 1 2 (2 rows) SELECT colocated_table_test.value_2 FROM reference_table_test, colocated_table_test, colocated_table_test_2 WHERE colocated_table_test.value_2 = reference_table_test.value_2; LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ][ cartesian product "colocated_table_test_2" ] ERROR: cannot perform distributed planning on this query DETAIL: Cartesian products are currently unsupported SELECT colocated_table_test.value_2 FROM reference_table_test, colocated_table_test, colocated_table_test_2 WHERE colocated_table_test.value_1 = colocated_table_test_2.value_1 AND colocated_table_test.value_2 = reference_table_test.value_2; LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ][ local partition join "colocated_table_test_2" ] value_2 --------- 1 2 (2 rows) SET citus.task_executor_type to "task-tracker"; SELECT colocated_table_test.value_2 FROM reference_table_test, colocated_table_test, colocated_table_test_2 WHERE colocated_table_test.value_2 = colocated_table_test_2.value_2 AND colocated_table_test.value_2 = reference_table_test.value_2; LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ][ dual partition join "colocated_table_test_2" ] value_2 --------- 1 2 (2 rows) SELECT reference_table_test.value_2 FROM reference_table_test, colocated_table_test, colocated_table_test_2 WHERE colocated_table_test.value_1 = reference_table_test.value_1 AND colocated_table_test_2.value_1 = reference_table_test.value_1; LOG: join order: [ "colocated_table_test" ][ broadcast join "reference_table_test" ][ dual partition join "colocated_table_test_2" ] value_2 --------- 1 2 (2 rows) SET citus.log_multi_join_order TO FALSE; SET citus.shard_count TO DEFAULT; SET citus.task_executor_type to "real-time"; -- some INSERT .. SELECT queries that involve both hash distributed and reference tables -- should go via coordinator since we're inserting into reference table where -- not all the participants are reference tables INSERT INTO reference_table_test (value_1) SELECT colocated_table_test.value_1 FROM colocated_table_test, colocated_table_test_2 WHERE colocated_table_test.value_1 = colocated_table_test_2.value_1; DEBUG: only reference tables may be queried when targeting a reference table with distributed INSERT ... SELECT DEBUG: Collecting INSERT ... SELECT results on coordinator -- should go via coordinator, same as the above INSERT INTO reference_table_test (value_1) SELECT colocated_table_test.value_1 FROM colocated_table_test, reference_table_test WHERE colocated_table_test.value_1 = reference_table_test.value_1; DEBUG: only reference tables may be queried when targeting a reference table with distributed INSERT ... SELECT DEBUG: Collecting INSERT ... SELECT results on coordinator -- not pushable due to lack of equality between partition column and column of reference table INSERT INTO colocated_table_test (value_1, value_2) SELECT colocated_table_test_2.value_1, reference_table_test.value_2 FROM colocated_table_test_2, reference_table_test WHERE colocated_table_test_2.value_4 = reference_table_test.value_4 RETURNING value_1, value_2; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- some more complex queries (Note that there are more complex queries in multi_insert_select.sql) INSERT INTO colocated_table_test (value_1, value_2) SELECT colocated_table_test_2.value_1, reference_table_test.value_2 FROM colocated_table_test_2, reference_table_test WHERE colocated_table_test_2.value_2 = reference_table_test.value_2 RETURNING value_1, value_2; ERROR: cannot perform distributed planning for the given modification DETAIL: Select query cannot be pushed down to the worker. -- partition column value comes from reference table, goes via coordinator INSERT INTO colocated_table_test (value_1, value_2) SELECT reference_table_test.value_2, colocated_table_test_2.value_1 FROM colocated_table_test_2, reference_table_test WHERE colocated_table_test_2.value_4 = reference_table_test.value_4; DEBUG: cannot perform distributed INSERT INTO ... SELECT because the partition columns in the source table and subquery do not match DETAIL: The data type of the target table's partition column should exactly match the data type of the corresponding simple column reference in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator INSERT INTO colocated_table_test (value_1, value_2) SELECT reference_table_test.value_1, colocated_table_test_2.value_1 FROM colocated_table_test_2, reference_table_test WHERE colocated_table_test_2.value_4 = reference_table_test.value_4; DEBUG: cannot perform distributed INSERT INTO ... SELECT becuase the partition columns in the source table and subquery do not match DETAIL: The target table's partition column should correspond to a partition column in the subquery. DEBUG: Collecting INSERT ... SELECT results on coordinator RESET client_min_messages; -- some tests for mark_tables_colocated -- should error out SELECT mark_tables_colocated('colocated_table_test_2', ARRAY['reference_table_test']); ERROR: cannot colocate tables colocated_table_test_2 and reference_table_test DETAIL: Replication models don't match for colocated_table_test_2 and reference_table_test. -- should work sliently SELECT mark_tables_colocated('reference_table_test', ARRAY['reference_table_test_fifth']); mark_tables_colocated ----------------------- (1 row) -- ensure that reference tables on -- different queries works as expected CREATE SCHEMA reference_schema; -- create with schema prefix CREATE TABLE reference_schema.reference_table_test_sixth (value_1 serial PRIMARY KEY, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_schema.reference_table_test_sixth'); create_reference_table ------------------------ (1 row) SET search_path TO 'reference_schema'; -- create on the schema CREATE TABLE reference_table_test_seventh (value_1 serial PRIMARY KEY, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_seventh'); create_reference_table ------------------------ (1 row) -- ingest some data INSERT INTO reference_table_test_sixth VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO reference_table_test_seventh VALUES (1, 1.0, '1', '2016-12-01'); SET search_path TO 'public'; -- ingest some data INSERT INTO reference_schema.reference_table_test_sixth VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO reference_schema.reference_table_test_seventh VALUES (2, 2.0, '2', '2016-12-02'); -- some basic queries SELECT value_1 FROM reference_schema.reference_table_test_sixth; value_1 --------- 1 2 (2 rows) SET search_path TO 'reference_schema'; SELECT reference_table_test_sixth.value_1 FROM reference_table_test_sixth, reference_table_test_seventh WHERE reference_table_test_sixth.value_4 = reference_table_test_seventh.value_4; value_1 --------- 1 2 (2 rows) -- last test with cross schemas SET search_path TO 'public'; SELECT reftable.value_2, colocated_table_test_2.value_1 FROM colocated_table_test_2, reference_schema.reference_table_test_sixth as reftable WHERE colocated_table_test_2.value_4 = reftable.value_4; value_2 | value_1 ---------+--------- 1 | 1 2 | 2 (2 rows) -- let's now test TRUNCATE and DROP TABLE -- delete all rows and ingest some data DELETE FROM reference_table_test; INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO reference_table_test VALUES (3, 3.0, '3', '2016-12-03'); INSERT INTO reference_table_test VALUES (4, 4.0, '4', '2016-12-04'); INSERT INTO reference_table_test VALUES (5, 5.0, '5', '2016-12-05'); SELECT count(*) FROM reference_table_test; count ------- 5 (1 row) -- truncate it and get the result back TRUNCATE reference_table_test; SELECT count(*) FROM reference_table_test; count ------- 0 (1 row) -- now try dropping one of the existing reference tables -- and check the metadata SELECT logicalrelid FROM pg_dist_partition WHERE logicalrelid::regclass::text LIKE '%reference_table_test_fifth%'; logicalrelid ---------------------------- reference_table_test_fifth (1 row) SELECT logicalrelid FROM pg_dist_shard WHERE logicalrelid::regclass::text LIKE '%reference_table_test_fifth%'; logicalrelid ---------------------------- reference_table_test_fifth (1 row) DROP TABLE reference_table_test_fifth; SELECT logicalrelid FROM pg_dist_partition WHERE logicalrelid::regclass::text LIKE '%reference_table_test_fifth%'; logicalrelid -------------- (0 rows) SELECT logicalrelid FROM pg_dist_shard WHERE logicalrelid::regclass::text LIKE '%reference_table_test_fifth%'; logicalrelid -------------- (0 rows) -- now test DDL changes CREATE TABLE reference_table_ddl (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_ddl'); create_reference_table ------------------------ (1 row) -- CREATE & DROP index and check the workers CREATE INDEX reference_index_1 ON reference_table_ddl(value_1); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' CREATE INDEX reference_index_2 ON reference_table_ddl(value_2, value_3); -- should be able to create/drop UNIQUE index on a reference table CREATE UNIQUE INDEX reference_index_3 ON reference_table_ddl(value_1); -- should be able to add a column ALTER TABLE reference_table_ddl ADD COLUMN value_5 INTEGER; ALTER TABLE reference_table_ddl ALTER COLUMN value_5 SET DATA TYPE FLOAT; ALTER TABLE reference_table_ddl DROP COLUMN value_1; ALTER TABLE reference_table_ddl ALTER COLUMN value_2 SET DEFAULT 25.0; ALTER TABLE reference_table_ddl ALTER COLUMN value_3 SET NOT NULL; -- see that Citus applied all DDLs to the table SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.reference_table_ddl'::regclass; Column | Type | Modifiers ---------+-----------------------------+-------------- value_2 | double precision | default 25.0 value_3 | text | not null value_4 | timestamp without time zone | value_5 | double precision | (4 rows) \d reference_index_2 Index "public.reference_index_2" Column | Type | Definition ---------+------------------+------------ value_2 | double precision | value_2 value_3 | text | value_3 btree, for table "public.reference_table_ddl" -- also to the shard placements \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.reference_table_ddl_1250019'::regclass; Column | Type | Modifiers ---------+-----------------------------+-------------- value_2 | double precision | default 25.0 value_3 | text | not null value_4 | timestamp without time zone | value_5 | double precision | (4 rows) \d reference_index_2_1250019 Index "public.reference_index_2_1250019" Column | Type | Definition ---------+------------------+------------ value_2 | double precision | value_2 value_3 | text | value_3 btree, for table "public.reference_table_ddl_1250019" \c - - - :master_port DROP INDEX reference_index_2; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.reference_table_ddl_1250019'::regclass; Column | Type | Modifiers ---------+-----------------------------+-------------- value_2 | double precision | default 25.0 value_3 | text | not null value_4 | timestamp without time zone | value_5 | double precision | (4 rows) \di reference_index_2* List of relations Schema | Name | Type | Owner | Table --------+------+------+-------+------- (0 rows) \c - - - :master_port -- as we expect, renaming and setting WITH OIDS does not work for reference tables ALTER TABLE reference_table_ddl RENAME TO reference_table_ddl_test; ERROR: renaming distributed tables is currently unsupported ALTER TABLE reference_table_ddl SET WITH OIDS; ERROR: alter table command is currently unsupported DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP CONSTRAINT, ATTACH|DETACH PARTITION and TYPE subcommands are supported. -- now test reference tables against some helper UDFs that Citus provides -- cannot delete / drop shards from a reference table SELECT master_apply_delete_command('DELETE FROM reference_table_ddl'); ERROR: cannot delete from distributed table DETAIL: Delete statements on reference tables are not supported. -- cannot add shards SELECT master_create_empty_shard('reference_table_ddl'); ERROR: relation "reference_table_ddl" is a reference table DETAIL: We currently don't support creating shards on reference tables -- master_modify_multiple_shards works, but, does it make sense to use at all? INSERT INTO reference_table_ddl (value_2, value_3) VALUES (7, 'aa'); SELECT master_modify_multiple_shards('DELETE FROM reference_table_ddl WHERE value_2 = 7'); master_modify_multiple_shards ------------------------------- 1 (1 row) INSERT INTO reference_table_ddl (value_2, value_3) VALUES (7, 'bb'); SELECT master_modify_multiple_shards('DELETE FROM reference_table_ddl'); master_modify_multiple_shards ------------------------------- 1 (1 row) -- get/update the statistics SELECT part_storage_type, part_key, part_replica_count, part_max_size, part_placement_policy FROM master_get_table_metadata('reference_table_ddl'); part_storage_type | part_key | part_replica_count | part_max_size | part_placement_policy -------------------+----------+--------------------+---------------+----------------------- t | | 2 | 307200 | 2 (1 row) SELECT shardid AS a_shard_id FROM pg_dist_shard WHERE logicalrelid = 'reference_table_ddl'::regclass \gset SELECT master_update_shard_statistics(:a_shard_id); master_update_shard_statistics -------------------------------- 16384 (1 row) CREATE TABLE append_reference_tmp_table (id INT); SELECT master_append_table_to_shard(:a_shard_id, 'append_reference_tmp_table', 'localhost', :master_port); ERROR: cannot append to shardId 1250019 DETAIL: We currently don't support appending to shards in hash-partitioned or reference tables SELECT master_get_table_ddl_events('reference_table_ddl'); master_get_table_ddl_events ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- CREATE TABLE public.reference_table_ddl (value_2 double precision DEFAULT 25.0, value_3 text NOT NULL, value_4 timestamp without time zone, value_5 double precision) (1 row) -- in reality, we wouldn't need to repair any reference table shard placements -- however, the test could be relevant for other purposes SELECT placementid AS a_placement_id FROM pg_dist_shard_placement WHERE shardid = :a_shard_id AND nodeport = :worker_1_port \gset SELECT placementid AS b_placement_id FROM pg_dist_shard_placement WHERE shardid = :a_shard_id AND nodeport = :worker_2_port \gset UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE placementid = :a_placement_id; SELECT master_copy_shard_placement(:a_shard_id, 'localhost', :worker_2_port, 'localhost', :worker_1_port); master_copy_shard_placement ----------------------------- (1 row) SELECT shardid, shardstate FROM pg_dist_shard_placement WHERE placementid = :a_placement_id; shardid | shardstate ---------+------------ 1250019 | 1 (1 row) -- some queries that are captured in functions CREATE FUNCTION select_count_all() RETURNS bigint AS ' SELECT count(*) FROM reference_table_test; ' LANGUAGE SQL; CREATE FUNCTION insert_into_ref_table(value_1 int, value_2 float, value_3 text, value_4 timestamp) RETURNS void AS ' INSERT INTO reference_table_test VALUES ($1, $2, $3, $4); ' LANGUAGE SQL; TRUNCATE reference_table_test; SELECT select_count_all(); select_count_all ------------------ 0 (1 row) SELECT insert_into_ref_table(1, 1.0, '1', '2016-12-01'); insert_into_ref_table ----------------------- (1 row) SELECT insert_into_ref_table(2, 2.0, '2', '2016-12-02'); insert_into_ref_table ----------------------- (1 row) SELECT insert_into_ref_table(3, 3.0, '3', '2016-12-03'); insert_into_ref_table ----------------------- (1 row) SELECT insert_into_ref_table(4, 4.0, '4', '2016-12-04'); insert_into_ref_table ----------------------- (1 row) SELECT insert_into_ref_table(5, 5.0, '5', '2016-12-05'); insert_into_ref_table ----------------------- (1 row) SELECT insert_into_ref_table(6, 6.0, '6', '2016-12-06'); insert_into_ref_table ----------------------- (1 row) SELECT select_count_all(); select_count_all ------------------ 6 (1 row) TRUNCATE reference_table_test; -- some prepared queries and pl/pgsql functions PREPARE insert_into_ref_table_pr (int, float, text, timestamp) AS INSERT INTO reference_table_test VALUES ($1, $2, $3, $4); -- reference tables do not have up-to-five execution limit as other tables EXECUTE insert_into_ref_table_pr(1, 1.0, '1', '2016-12-01'); EXECUTE insert_into_ref_table_pr(2, 2.0, '2', '2016-12-02'); EXECUTE insert_into_ref_table_pr(3, 3.0, '3', '2016-12-03'); EXECUTE insert_into_ref_table_pr(4, 4.0, '4', '2016-12-04'); EXECUTE insert_into_ref_table_pr(5, 5.0, '5', '2016-12-05'); EXECUTE insert_into_ref_table_pr(6, 6.0, '6', '2016-12-06'); -- see the count, then truncate the table SELECT select_count_all(); select_count_all ------------------ 6 (1 row) TRUNCATE reference_table_test; -- reference tables work with composite key -- and we even do not need to create hash -- function etc. -- first create the type on all nodes CREATE TYPE reference_comp_key as (key text, value text); \c - - - :worker_1_port CREATE TYPE reference_comp_key as (key text, value text); \c - - - :worker_2_port CREATE TYPE reference_comp_key as (key text, value text); \c - - - :master_port CREATE TABLE reference_table_composite (id int PRIMARY KEY, data reference_comp_key); SELECT create_reference_table('reference_table_composite'); create_reference_table ------------------------ (1 row) -- insert and query some data INSERT INTO reference_table_composite (id, data) VALUES (1, ('key_1', 'value_1')::reference_comp_key); INSERT INTO reference_table_composite (id, data) VALUES (2, ('key_2', 'value_2')::reference_comp_key); SELECT * FROM reference_table_composite; id | data ----+----------------- 1 | (key_1,value_1) 2 | (key_2,value_2) (2 rows) SELECT (data).key FROM reference_table_composite; key ------- key_1 key_2 (2 rows) -- make sure that reference tables obeys single shard transactions TRUNCATE reference_table_test; BEGIN; INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); SELECT * FROM reference_table_test; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 1 | 1 | 1 | Thu Dec 01 00:00:00 2016 (1 row) ROLLBACK; SELECT * FROM reference_table_test; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+--------- (0 rows) -- now insert a row and commit BEGIN; INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); COMMIT; SELECT * FROM reference_table_test; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 2 | 2 | 2 | Fri Dec 02 00:00:00 2016 (1 row) -- one basic UPDATE test BEGIN; UPDATE reference_table_test SET value_1 = 10 WHERE value_1 = 2; COMMIT; SELECT * FROM reference_table_test; value_1 | value_2 | value_3 | value_4 ---------+---------+---------+-------------------------- 10 | 2 | 2 | Fri Dec 02 00:00:00 2016 (1 row) -- DML+master_modify_multiple_shards is allowed BEGIN; INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); SELECT master_modify_multiple_shards('DELETE FROM colocated_table_test'); master_modify_multiple_shards ------------------------------- 6 (1 row) ROLLBACK; -- DDL+DML is allowed BEGIN; ALTER TABLE reference_table_test ADD COLUMN value_dummy INT; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); ROLLBACK; -- clean up tables DROP TABLE reference_table_test, reference_table_test_second, reference_table_test_third, reference_table_test_fourth, reference_table_ddl, reference_table_composite; DROP SCHEMA reference_schema CASCADE; NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table reference_schema.reference_table_test_sixth drop cascades to table reference_schema.reference_table_test_seventh citus-7.0.3/src/test/regress/expected/multi_remove_node_reference_table.out000066400000000000000000000672101317107136600273360ustar00rootroot00000000000000-- -- MULTI_REMOVE_NODE_REFERENCE_TABLE -- -- Tests that check the metadata after master_remove_node. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1380000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1380000; ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1380000; ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1380000; -- create copy of pg_dist_shard_placement to reload after the test CREATE TABLE tmp_shard_placement AS SELECT * FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; DELETE FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; -- make worker 1 receive metadata changes SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ----------------------------- (1 row) -- remove non-existing node SELECT master_remove_node('localhost', 55555); ERROR: node at "localhost:55555" does not exist -- remove a node with no reference tables -- verify node exist before removal SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) -- verify node is removed SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 0 (1 row) -- re-add the node for next tests SELECT groupid AS worker_2_group FROM master_add_node('localhost', :worker_2_port) \gset -- add a secondary to check we don't attempt to replicate the table to it SELECT isactive FROM master_add_node('localhost', 9000, groupid=>:worker_2_group, noderole=>'secondary'); isactive ---------- t (1 row) -- remove a node with reference table CREATE TABLE remove_node_reference_table(column1 int); SELECT create_reference_table('remove_node_reference_table'); create_reference_table ------------------------ (1 row) -- make sure when we add a secondary we don't attempt to add placements to it SELECT isactive FROM master_add_node('localhost', 9001, groupid=>:worker_2_group, noderole=>'secondary'); isactive ---------- t (1 row) SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; count ------- 1 (1 row) -- make sure when we disable a secondary we don't remove any placements SELECT master_disable_node('localhost', 9001); master_disable_node --------------------- (1 row) SELECT isactive FROM pg_dist_node WHERE nodeport = 9001; isactive ---------- f (1 row) SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; count ------- 1 (1 row) -- make sure when we activate a secondary we don't add any placements SELECT 1 FROM master_activate_node('localhost', 9001); ?column? ---------- 1 (1 row) SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; count ------- 1 (1 row) -- make sure when we remove a secondary we don't remove any placements SELECT master_remove_node('localhost', 9001); master_remove_node -------------------- (1 row) SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; count ------- 1 (1 row) -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1380000 | 1 | 0 | localhost | 57638 (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370005 | 1 | 2 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1380000 | 1 | 0 | localhost | 57638 (1 row) \c - - - :master_port SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 0 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370005 | 1 | 1 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 0 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) \c - - - :master_port -- remove same node twice SELECT master_remove_node('localhost', :worker_2_port); ERROR: node at "localhost:57638" does not exist -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 ?column? ---------- 1 (1 row) -- try to disable the node before removing it (this used to crash) SELECT master_disable_node('localhost', :worker_2_port); master_disable_node --------------------- (1 row) SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) -- re-add the node for the next test SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 ?column? ---------- 1 (1 row) -- remove node in a transaction and ROLLBACK -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1380000 | 1 | 0 | localhost | 57638 (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370005 | 1 | 2 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1380000 | 1 | 0 | localhost | 57638 (1 row) \c - - - :master_port BEGIN; SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) ROLLBACK; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1380000 | 1 | 0 | localhost | 57638 (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370005 | 1 | 2 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1380000 | 1 | 0 | localhost | 57638 (1 row) \c - - - :master_port -- remove node in a transaction and COMMIT -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1380000 | 1 | 0 | localhost | 57638 (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370005 | 1 | 2 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1380000 | 1 | 0 | localhost | 57638 (1 row) \c - - - :master_port BEGIN; SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) COMMIT; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 0 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370005 | 1 | 1 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 0 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) \c - - - :master_port -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 ?column? ---------- 1 (1 row) -- test inserting a value then removing a node in a transaction -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1380000 | 1 | 0 | localhost | 57638 (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370005 | 1 | 2 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1380000 | 1 | 0 | localhost | 57638 (1 row) \c - - - :master_port BEGIN; INSERT INTO remove_node_reference_table VALUES(1); SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) COMMIT; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 0 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370005 | 1 | 1 | 0 (1 row) --verify the data is inserted SELECT * FROM remove_node_reference_table; column1 --------- 1 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 0 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) SELECT * FROM remove_node_reference_table; column1 --------- 1 (1 row) \c - - - :master_port -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 ?column? ---------- 1 (1 row) -- test executing DDL command then removing a node in a transaction -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1380000 | 1 | 0 | localhost | 57638 (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370005 | 1 | 2 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1380000 | 1 | 0 | localhost | 57638 (1 row) \c - - - :master_port BEGIN; ALTER TABLE remove_node_reference_table ADD column2 int; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) COMMIT; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 0 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370005 | 1 | 1 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 0 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) \c - - - :master_port -- verify table structure is changed SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.remove_node_reference_table'::regclass; Column | Type | Modifiers ---------+---------+----------- column1 | integer | column2 | integer | (2 rows) -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 ?column? ---------- 1 (1 row) -- test DROP table after removing a node in a transaction -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1380000 | 1 | 0 | localhost | 57638 (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370005 | 1 | 2 | 0 (1 row) BEGIN; SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) DROP TABLE remove_node_reference_table; COMMIT; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 0 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid = 1380000; colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ (0 rows) -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ---------- 1 (1 row) -- re-create remove_node_reference_table CREATE TABLE remove_node_reference_table(column1 int); SELECT create_reference_table('remove_node_reference_table'); create_reference_table ------------------------ (1 row) -- test removing a node while there is a reference table at another schema CREATE SCHEMA remove_node_reference_table_schema; CREATE TABLE remove_node_reference_table_schema.table1(column1 int); SELECT create_reference_table('remove_node_reference_table_schema.table1'); create_reference_table ------------------------ (1 row) -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port ORDER BY shardid; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1380001 | 1 | 0 | localhost | 57638 1380002 | 1 | 0 | localhost | 57638 (2 rows) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table_schema.table1'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370004 | 1 | 2 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port ORDER BY shardid; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1380001 | 1 | 0 | localhost | 57638 1380002 | 1 | 0 | localhost | 57638 (2 rows) \c - - - :master_port SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 0 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table_schema.table1'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370004 | 1 | 1 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 0 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) \c - - - :master_port -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 NOTICE: Replicating reference table "table1" to the node localhost:57638 ?column? ---------- 1 (1 row) -- test with master_disable_node -- status before master_disable_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port ORDER BY shardid; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1380001 | 1 | 0 | localhost | 57638 1380002 | 1 | 0 | localhost | 57638 (2 rows) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370004 | 1 | 2 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port ORDER BY shardid ASC; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1380001 | 1 | 0 | localhost | 57638 1380002 | 1 | 0 | localhost | 57638 (2 rows) \c - - - :master_port SELECT master_disable_node('localhost', :worker_2_port); master_disable_node --------------------- (1 row) -- status after master_disable_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370004 | 1 | 2 | 0 (1 row) \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) \c - - - :master_port -- re-add the node for next tests SELECT 1 FROM master_activate_node('localhost', :worker_2_port); NOTICE: Replicating reference table "remove_node_reference_table" to the node localhost:57638 NOTICE: Replicating reference table "table1" to the node localhost:57638 ?column? ---------- 1 (1 row) -- DROP tables to clean workspace DROP TABLE remove_node_reference_table; DROP TABLE remove_node_reference_table_schema.table1; DROP SCHEMA remove_node_reference_table_schema CASCADE; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); stop_metadata_sync_to_node ---------------------------- (1 row) -- reload pg_dist_shard_placement table INSERT INTO pg_dist_shard_placement (SELECT * FROM tmp_shard_placement); DROP TABLE tmp_shard_placement; citus-7.0.3/src/test/regress/expected/multi_repair_shards.out000066400000000000000000000136121317107136600244720ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 820000; SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport=:worker_2_port \gset SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport=:worker_1_port \gset -- =================================================================== -- test shard repair functionality -- =================================================================== -- create a table and create its distribution metadata CREATE TABLE customer_engagements ( id integer, created_at date, event_data text ); -- add some indexes CREATE INDEX ON customer_engagements (id); CREATE INDEX ON customer_engagements (created_at); CREATE INDEX ON customer_engagements (event_data); -- distribute the table SELECT master_create_distributed_table('customer_engagements', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) -- create a single shard on the first worker SELECT master_create_worker_shards('customer_engagements', 1, 2); master_create_worker_shards ----------------------------- (1 row) -- ingest some data for the tests INSERT INTO customer_engagements VALUES (1, '01-01-2015', 'first event'); INSERT INTO customer_engagements VALUES (2, '02-01-2015', 'second event'); INSERT INTO customer_engagements VALUES (1, '03-01-2015', 'third event'); -- the following queries does the following: -- (i) create a new shard -- (ii) mark the second shard placements as unhealthy -- (iii) do basic checks i.e., only allow copy from healthy placement to unhealthy ones -- (iv) do a successful master_copy_shard_placement from the first placement to the second -- (v) mark the first placement as unhealthy and execute a query that is routed to the second placement -- get the newshardid SELECT shardid as newshardid FROM pg_dist_shard WHERE logicalrelid = 'customer_engagements'::regclass \gset -- now, update the second placement as unhealthy UPDATE pg_dist_placement SET shardstate = 3 WHERE shardid = :newshardid AND groupid = :worker_2_group; -- cannot repair a shard after a modification (transaction still open during repair) BEGIN; ALTER TABLE customer_engagements ADD COLUMN value float; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); ERROR: cannot open new connections after the first modification command within a transaction ROLLBACK; BEGIN; INSERT INTO customer_engagements VALUES (4, '04-01-2015', 'fourth event'); SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); ERROR: cannot open new connections after the first modification command within a transaction ROLLBACK; -- modifications after reparing a shard are fine (will use new metadata) BEGIN; SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); master_copy_shard_placement ----------------------------- (1 row) ALTER TABLE customer_engagements ADD COLUMN value float; ROLLBACK; BEGIN; SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); master_copy_shard_placement ----------------------------- (1 row) INSERT INTO customer_engagements VALUES (4, '04-01-2015', 'fourth event'); ROLLBACK; -- add a fake healthy placement for the tests INSERT INTO pg_dist_placement (groupid, shardid, shardstate, shardlength) VALUES (:worker_2_group, :newshardid, 1, 0); SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); ERROR: target placement must be in inactive state DELETE FROM pg_dist_placement WHERE groupid = :worker_2_group AND shardid = :newshardid AND shardstate = 1; -- also try to copy from an inactive placement SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port); ERROR: source placement must be in finalized state -- "copy" this shard from the first placement to the second one SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); master_copy_shard_placement ----------------------------- (1 row) -- now, update first placement as unhealthy (and raise a notice) so that queries are not routed to there UPDATE pg_dist_placement SET shardstate = 3 WHERE shardid = :newshardid AND groupid = :worker_1_group; -- get the data from the second placement SELECT * FROM customer_engagements; id | created_at | event_data ----+------------+-------------- 1 | 01-01-2015 | first event 2 | 02-01-2015 | second event 1 | 03-01-2015 | third event (3 rows) -- now do the same test over again with a foreign table CREATE FOREIGN TABLE remote_engagements ( id integer, created_at date, event_data text ) SERVER fake_fdw_server; -- distribute the table SELECT master_create_distributed_table('remote_engagements', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) -- create a single shard on the first worker SELECT master_create_worker_shards('remote_engagements', 1, 2); NOTICE: foreign-data wrapper "fake_fdw" does not have an extension defined master_create_worker_shards ----------------------------- (1 row) -- get the newshardid SELECT shardid as remotenewshardid FROM pg_dist_shard WHERE logicalrelid = 'remote_engagements'::regclass \gset -- now, update the second placement as unhealthy UPDATE pg_dist_placement SET shardstate = 3 WHERE shardid = :remotenewshardid AND groupid = :worker_2_group; -- oops! we don't support repairing shards backed by foreign tables SELECT master_copy_shard_placement(:remotenewshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); ERROR: cannot repair shard DETAIL: Table remote_engagements is a foreign table. Repairing shards backed by foreign tables is not supported. citus-7.0.3/src/test/regress/expected/multi_repartition_udt.out000066400000000000000000000167761317107136600250760ustar00rootroot00000000000000-- -- MULTI_REPARTITION_UDT -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 535000; -- START type creation CREATE TYPE test_udt AS (i integer, i2 integer); -- ... as well as a function to use as its comparator... CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- ... use that function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, PROCEDURE = equal_test_udt_function, COMMUTATOR = =, HASHES ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; -- ... create a test HASH function. Though it is a poor hash function, -- it is acceptable for our tests CREATE FUNCTION test_udt_hash(test_udt) RETURNS int AS 'SELECT hashtext( ($1.i + $1.i2)::text);' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 DEFAULT FOR TYPE test_udt USING BTREE AS OPERATOR 3 = (test_udt, test_udt); CREATE OPERATOR CLASS tudt_op_fam_class DEFAULT FOR TYPE test_udt USING HASH AS OPERATOR 1 = (test_udt, test_udt), FUNCTION 1 test_udt_hash(test_udt); -- END type creation CREATE TABLE repartition_udt ( pk integer not null, udtcol test_udt, txtcol text ); CREATE TABLE repartition_udt_other ( pk integer not null, udtcol test_udt, txtcol text ); -- Connect directly to a worker, create and drop the type, then -- proceed with type creation as above; thus the OIDs will be different. -- so that the OID is off. \c - - - :worker_1_port CREATE TYPE test_udt AS (i integer, i2 integer); DROP TYPE test_udt CASCADE; -- START type creation CREATE TYPE test_udt AS (i integer, i2 integer); -- ... as well as a function to use as its comparator... CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- ... use that function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, PROCEDURE = equal_test_udt_function, COMMUTATOR = =, HASHES ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; -- ... create a test HASH function. Though it is a poor hash function, -- it is acceptable for our tests CREATE FUNCTION test_udt_hash(test_udt) RETURNS int AS 'SELECT hashtext( ($1.i + $1.i2)::text);' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 DEFAULT FOR TYPE test_udt USING BTREE AS OPERATOR 3 = (test_udt, test_udt); CREATE OPERATOR CLASS tudt_op_fam_class DEFAULT FOR TYPE test_udt USING HASH AS OPERATOR 1 = (test_udt, test_udt), FUNCTION 1 test_udt_hash(test_udt); -- END type creation \c - - - :worker_2_port -- START type creation CREATE TYPE test_udt AS (i integer, i2 integer); -- ... as well as a function to use as its comparator... CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- ... use that function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, PROCEDURE = equal_test_udt_function, COMMUTATOR = =, HASHES ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; -- ... create a test HASH function. Though it is a poor hash function, -- it is acceptable for our tests CREATE FUNCTION test_udt_hash(test_udt) RETURNS int AS 'SELECT hashtext( ($1.i + $1.i2)::text);' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 DEFAULT FOR TYPE test_udt USING BTREE AS OPERATOR 3 = (test_udt, test_udt); CREATE OPERATOR CLASS tudt_op_fam_class DEFAULT FOR TYPE test_udt USING HASH AS OPERATOR 1 = (test_udt, test_udt), FUNCTION 1 test_udt_hash(test_udt); -- END type creation -- Connect to master \c - - - :master_port -- Distribute and populate the two tables. SELECT master_create_distributed_table('repartition_udt', 'pk', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('repartition_udt', 3, 1); master_create_worker_shards ----------------------------- (1 row) SELECT master_create_distributed_table('repartition_udt_other', 'pk', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('repartition_udt_other', 5, 1); master_create_worker_shards ----------------------------- (1 row) INSERT INTO repartition_udt values (1, '(1,1)'::test_udt, 'foo'); INSERT INTO repartition_udt values (2, '(1,2)'::test_udt, 'foo'); INSERT INTO repartition_udt values (3, '(1,3)'::test_udt, 'foo'); INSERT INTO repartition_udt values (4, '(2,1)'::test_udt, 'foo'); INSERT INTO repartition_udt values (5, '(2,2)'::test_udt, 'foo'); INSERT INTO repartition_udt values (6, '(2,3)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (7, '(1,1)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (8, '(1,2)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (9, '(1,3)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (10, '(2,1)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (11, '(2,2)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (12, '(2,3)'::test_udt, 'foo'); SET client_min_messages = LOG; -- Query that should result in a repartition join on int column, and be empty. SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.pk = repartition_udt_other.pk WHERE repartition_udt.pk > 1; pk | udtcol | txtcol | pk | udtcol | txtcol ----+--------+--------+----+--------+-------- (0 rows) -- Query that should result in a repartition join on UDT column. SET citus.large_table_shard_count = 1; SET citus.task_executor_type = 'task-tracker'; SET citus.log_multi_join_order = true; EXPLAIN SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.udtcol = repartition_udt_other.udtcol WHERE repartition_udt.pk > 1; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] QUERY PLAN -------------------------------------------------------------------- Custom Scan (Citus Task-Tracker) (cost=0.00..0.00 rows=0 width=0) Task Count: 4 Tasks Shown: None, not supported for re-partition queries -> MapMergeJob Map Task Count: 3 Merge Task Count: 4 -> MapMergeJob Map Task Count: 5 Merge Task Count: 4 (9 rows) SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.udtcol = repartition_udt_other.udtcol WHERE repartition_udt.pk > 1 ORDER BY repartition_udt.pk; LOG: join order: [ "repartition_udt" ][ dual partition join "repartition_udt_other" ] pk | udtcol | txtcol | pk | udtcol | txtcol ----+--------+--------+----+--------+-------- 2 | (1,2) | foo | 8 | (1,2) | foo 3 | (1,3) | foo | 9 | (1,3) | foo 4 | (2,1) | foo | 10 | (2,1) | foo 5 | (2,2) | foo | 11 | (2,2) | foo 6 | (2,3) | foo | 12 | (2,3) | foo (5 rows) citus-7.0.3/src/test/regress/expected/multi_repartitioned_subquery_udf.out000066400000000000000000000032071317107136600273110ustar00rootroot00000000000000-- -- MULTI_REPARTITIONED_SUBQUERY_UDF -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 830000; -- Create UDF in master and workers \c - - - :master_port DROP FUNCTION IF EXISTS median(double precision[]); NOTICE: function median(pg_catalog.float8[]) does not exist, skipping CREATE FUNCTION median(double precision[]) RETURNS double precision LANGUAGE sql IMMUTABLE AS $_$ SELECT AVG(val) FROM (SELECT val FROM unnest($1) val ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; $_$; \c - - - :worker_1_port DROP FUNCTION IF EXISTS median(double precision[]); NOTICE: function median(pg_catalog.float8[]) does not exist, skipping CREATE FUNCTION median(double precision[]) RETURNS double precision LANGUAGE sql IMMUTABLE AS $_$ SELECT AVG(val) FROM (SELECT val FROM unnest($1) val ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; $_$; \c - - - :worker_2_port DROP FUNCTION IF EXISTS median(double precision[]); NOTICE: function median(pg_catalog.float8[]) does not exist, skipping CREATE FUNCTION median(double precision[]) RETURNS double precision LANGUAGE sql IMMUTABLE AS $_$ SELECT AVG(val) FROM (SELECT val FROM unnest($1) val ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; $_$; -- Run query on master \c - - - :master_port SET citus.task_executor_type TO 'task-tracker'; SELECT * FROM (SELECT median(ARRAY[1,2,sum(l_suppkey)]) as median, count(*) FROM lineitem GROUP BY l_partkey) AS a WHERE median > 2; median | count --------+------- (0 rows) citus-7.0.3/src/test/regress/expected/multi_replicate_reference_table.out000066400000000000000000000555001317107136600270030ustar00rootroot00000000000000-- -- MULTI_REPLICATE_REFERENCE_TABLE -- -- Tests that check the metadata returned by the master node. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1370000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1370000; ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1370000; ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1370000; -- remove a node for testing purposes CREATE TABLE tmp_shard_placement AS SELECT * FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; DELETE FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) -- test adding new node with no reference tables -- verify there is no node with nodeport = :worker_2_port before adding the node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 0 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ---------- 1 (1 row) -- verify node is added SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 1 (1 row) -- verify nothing is replicated to the new node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) -- test adding new node with a reference table which does not have any healthy placement SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) -- verify there is no node with nodeport = :worker_2_port before adding the node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 0 (1 row) CREATE TABLE replicate_reference_table_unhealthy(column1 int); SELECT create_reference_table('replicate_reference_table_unhealthy'); create_reference_table ------------------------ (1 row) UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1370000; SELECT 1 FROM master_add_node('localhost', :worker_2_port); ERROR: could not find any healthy placement for shard 1370000 -- verify node is not added SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; count ------- 0 (1 row) -- verify nothing is replicated to the new node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) DROP TABLE replicate_reference_table_unhealthy; -- test replicating a reference table when a new node added CREATE TABLE replicate_reference_table_valid(column1 int); SELECT create_reference_table('replicate_reference_table_valid'); create_reference_table ------------------------ (1 row) -- status before master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_valid'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370000 | 1 | 1 | 0 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "replicate_reference_table_valid" to the node localhost:57638 ?column? ---------- 1 (1 row) -- status after master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1370001 | 1 | 0 | localhost | 57638 (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_valid'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370000 | 1 | 2 | 0 (1 row) -- test add same node twice -- status before master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1370001 | 1 | 0 | localhost | 57638 (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_valid'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370000 | 1 | 2 | 0 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ---------- 1 (1 row) -- status after master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1370001 | 1 | 0 | localhost | 57638 (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_valid'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370000 | 1 | 2 | 0 (1 row) DROP TABLE replicate_reference_table_valid; -- test replicating a reference table when a new node added in TRANSACTION + ROLLBACK SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) CREATE TABLE replicate_reference_table_rollback(column1 int); SELECT create_reference_table('replicate_reference_table_rollback'); create_reference_table ------------------------ (1 row) -- status before master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_rollback'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370001 | 1 | 1 | 0 (1 row) BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "replicate_reference_table_rollback" to the node localhost:57638 ?column? ---------- 1 (1 row) ROLLBACK; -- status after master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_rollback'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370001 | 1 | 1 | 0 (1 row) DROP TABLE replicate_reference_table_rollback; -- test replicating a reference table when a new node added in TRANSACTION + COMMIT CREATE TABLE replicate_reference_table_commit(column1 int); SELECT create_reference_table('replicate_reference_table_commit'); create_reference_table ------------------------ (1 row) -- status before master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_commit'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370001 | 1 | 1 | 0 (1 row) BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "replicate_reference_table_commit" to the node localhost:57638 ?column? ---------- 1 (1 row) COMMIT; -- status after master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1370003 | 1 | 0 | localhost | 57638 (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_commit'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370001 | 1 | 2 | 0 (1 row) DROP TABLE replicate_reference_table_commit; -- test adding new node + upgrading another hash distributed table to reference table + creating new reference table in TRANSACTION SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) CREATE TABLE replicate_reference_table_reference_one(column1 int); SELECT create_reference_table('replicate_reference_table_reference_one'); create_reference_table ------------------------ (1 row) SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE TABLE replicate_reference_table_hash(column1 int); SELECT create_distributed_table('replicate_reference_table_hash', 'column1'); create_distributed_table -------------------------- (1 row) -- update replication model to statement-based replication since streaming replicated tables cannot be upgraded to reference tables UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='replicate_reference_table_hash'::regclass; CREATE TABLE replicate_reference_table_reference_two(column1 int); -- status before master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_reference_one'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370002 | 1 | 1 | 0 (1 row) SELECT logicalrelid, partmethod, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid IN ('replicate_reference_table_reference_one', 'replicate_reference_table_hash', 'replicate_reference_table_reference_two') ORDER BY logicalrelid; logicalrelid | partmethod | colocationid | repmodel -----------------------------------------+------------+--------------+---------- replicate_reference_table_reference_one | n | 1370002 | t replicate_reference_table_hash | h | 1360004 | c (2 rows) BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "replicate_reference_table_reference_one" to the node localhost:57638 ?column? ---------- 1 (1 row) SELECT upgrade_to_reference_table('replicate_reference_table_hash'); NOTICE: Replicating reference table "replicate_reference_table_hash" to the node localhost:57638 upgrade_to_reference_table ---------------------------- (1 row) SELECT create_reference_table('replicate_reference_table_reference_two'); create_reference_table ------------------------ (1 row) COMMIT; -- status after master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port ORDER BY shardid; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1370004 | 1 | 0 | localhost | 57638 1370005 | 1 | 0 | localhost | 57638 1370006 | 1 | 0 | localhost | 57638 (3 rows) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_reference_one'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370002 | 1 | 2 | 0 (1 row) SELECT logicalrelid, partmethod, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid IN ('replicate_reference_table_reference_one', 'replicate_reference_table_hash', 'replicate_reference_table_reference_two') ORDER BY logicalrelid; logicalrelid | partmethod | colocationid | repmodel -----------------------------------------+------------+--------------+---------- replicate_reference_table_reference_one | n | 1370002 | t replicate_reference_table_hash | n | 1370002 | t replicate_reference_table_reference_two | n | 1370002 | t (3 rows) DROP TABLE replicate_reference_table_reference_one; DROP TABLE replicate_reference_table_hash; DROP TABLE replicate_reference_table_reference_two; -- test inserting a value then adding a new node in a transaction SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) CREATE TABLE replicate_reference_table_insert(column1 int); SELECT create_reference_table('replicate_reference_table_insert'); create_reference_table ------------------------ (1 row) BEGIN; INSERT INTO replicate_reference_table_insert VALUES(1); SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "replicate_reference_table_insert" to the node localhost:57638 ERROR: cannot open new connections after the first modification command within a transaction ROLLBACK; DROP TABLE replicate_reference_table_insert; -- test COPY then adding a new node in a transaction CREATE TABLE replicate_reference_table_copy(column1 int); SELECT create_reference_table('replicate_reference_table_copy'); create_reference_table ------------------------ (1 row) BEGIN; COPY replicate_reference_table_copy FROM STDIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "replicate_reference_table_copy" to the node localhost:57638 ERROR: cannot open new connections after the first modification command within a transaction ROLLBACK; DROP TABLE replicate_reference_table_copy; -- test executing DDL command then adding a new node in a transaction CREATE TABLE replicate_reference_table_ddl(column1 int); SELECT create_reference_table('replicate_reference_table_ddl'); create_reference_table ------------------------ (1 row) BEGIN; ALTER TABLE replicate_reference_table_ddl ADD column2 int; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "replicate_reference_table_ddl" to the node localhost:57638 ERROR: cannot open new connections after the first modification command within a transaction ROLLBACK; DROP TABLE replicate_reference_table_ddl; -- test DROP table after adding new node in a transaction CREATE TABLE replicate_reference_table_drop(column1 int); SELECT create_reference_table('replicate_reference_table_drop'); create_reference_table ------------------------ (1 row) -- status before master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_drop'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370003 | 1 | 1 | 0 (1 row) BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "replicate_reference_table_drop" to the node localhost:57638 ?column? ---------- 1 (1 row) DROP TABLE replicate_reference_table_drop; COMMIT; -- status after master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid = 1370009; colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ (0 rows) -- test adding a node while there is a reference table at another schema SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) CREATE SCHEMA replicate_reference_table_schema; CREATE TABLE replicate_reference_table_schema.table1(column1 int); SELECT create_reference_table('replicate_reference_table_schema.table1'); create_reference_table ------------------------ (1 row) -- status before master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+----------+---------- (0 rows) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_schema.table1'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370004 | 1 | 1 | 0 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); NOTICE: Replicating reference table "table1" to the node localhost:57638 ?column? ---------- 1 (1 row) -- status after master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1370011 | 1 | 0 | localhost | 57638 (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_schema.table1'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1370004 | 1 | 2 | 0 (1 row) DROP TABLE replicate_reference_table_schema.table1; DROP SCHEMA replicate_reference_table_schema CASCADE; -- do some tests with inactive node SELECT master_remove_node('localhost', :worker_2_port); master_remove_node -------------------- (1 row) CREATE TABLE initially_not_replicated_reference_table (key int); SELECT create_reference_table('initially_not_replicated_reference_table'); create_reference_table ------------------------ (1 row) SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port); ?column? ---------- 1 (1 row) -- we should see only one shard placements SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'initially_not_replicated_reference_table'::regclass) ORDER BY 1,4,5; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1370012 | 1 | 0 | localhost | 57637 (1 row) -- we should see the two shard placements after activation SELECT 1 FROM master_activate_node('localhost', :worker_2_port); NOTICE: Replicating reference table "initially_not_replicated_reference_table" to the node localhost:57638 ?column? ---------- 1 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'initially_not_replicated_reference_table'::regclass) ORDER BY 1,4,5; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1370012 | 1 | 0 | localhost | 57637 1370012 | 1 | 0 | localhost | 57638 (2 rows) -- this should have no effect SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ---------- 1 (1 row) -- drop unnecassary tables DROP TABLE initially_not_replicated_reference_table; -- reload pg_dist_shard_placement table INSERT INTO pg_dist_shard_placement (SELECT * FROM tmp_shard_placement); DROP TABLE tmp_shard_placement; RESET citus.shard_replication_factor; RESET citus.replication_model; citus-7.0.3/src/test/regress/expected/multi_router_planner.out000066400000000000000000002174171317107136600247140ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 840000; -- =================================================================== -- test router planner functionality for single shard select queries -- =================================================================== CREATE TABLE articles_hash ( id bigint NOT NULL, author_id bigint NOT NULL, title varchar(20) NOT NULL, word_count integer ); CREATE TABLE articles_range ( id bigint NOT NULL, author_id bigint NOT NULL, title varchar(20) NOT NULL, word_count integer ); CREATE TABLE articles_append ( id bigint NOT NULL, author_id bigint NOT NULL, title varchar(20) NOT NULL, word_count integer ); -- Check for the existence of line 'DEBUG: Creating router plan' -- to determine if router planner is used. -- this table is used in a CTE test CREATE TABLE authors_hash ( name varchar(20), id bigint ); CREATE TABLE authors_range ( name varchar(20), id bigint ); CREATE TABLE authors_reference ( name varchar(20), id bigint ); -- this table is used in router executor tests CREATE TABLE articles_single_shard_hash (LIKE articles_hash); SELECT master_create_distributed_table('articles_hash', 'author_id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_distributed_table('articles_single_shard_hash', 'author_id', 'hash'); master_create_distributed_table --------------------------------- (1 row) -- test when a table is distributed but no shards created yet SELECT count(*) from articles_hash; count ------- 0 (1 row) SELECT master_create_worker_shards('articles_hash', 2, 1); master_create_worker_shards ----------------------------- (1 row) SELECT master_create_worker_shards('articles_single_shard_hash', 1, 1); master_create_worker_shards ----------------------------- (1 row) SELECT create_reference_table('authors_reference'); create_reference_table ------------------------ (1 row) -- create a bunch of test data INSERT INTO articles_hash VALUES ( 1, 1, 'arsenous', 9572); INSERT INTO articles_hash VALUES ( 2, 2, 'abducing', 13642); INSERT INTO articles_hash VALUES ( 3, 3, 'asternal', 10480); INSERT INTO articles_hash VALUES ( 4, 4, 'altdorfer', 14551); INSERT INTO articles_hash VALUES ( 5, 5, 'aruru', 11389); INSERT INTO articles_hash VALUES ( 6, 6, 'atlases', 15459); INSERT INTO articles_hash VALUES ( 7, 7, 'aseptic', 12298); INSERT INTO articles_hash VALUES ( 8, 8, 'agatized', 16368); INSERT INTO articles_hash VALUES ( 9, 9, 'alligate', 438); INSERT INTO articles_hash VALUES (10, 10, 'aggrandize', 17277); INSERT INTO articles_hash VALUES (11, 1, 'alamo', 1347); INSERT INTO articles_hash VALUES (12, 2, 'archiblast', 18185); INSERT INTO articles_hash VALUES (13, 3, 'aseyev', 2255); INSERT INTO articles_hash VALUES (14, 4, 'andesite', 19094); INSERT INTO articles_hash VALUES (15, 5, 'adversa', 3164); INSERT INTO articles_hash VALUES (16, 6, 'allonym', 2); INSERT INTO articles_hash VALUES (17, 7, 'auriga', 4073); INSERT INTO articles_hash VALUES (18, 8, 'assembly', 911); INSERT INTO articles_hash VALUES (19, 9, 'aubergiste', 4981); INSERT INTO articles_hash VALUES (20, 10, 'absentness', 1820); INSERT INTO articles_hash VALUES (21, 1, 'arcading', 5890); INSERT INTO articles_hash VALUES (22, 2, 'antipope', 2728); INSERT INTO articles_hash VALUES (23, 3, 'abhorring', 6799); INSERT INTO articles_hash VALUES (24, 4, 'audacious', 3637); INSERT INTO articles_hash VALUES (25, 5, 'antehall', 7707); INSERT INTO articles_hash VALUES (26, 6, 'abington', 4545); INSERT INTO articles_hash VALUES (27, 7, 'arsenous', 8616); INSERT INTO articles_hash VALUES (28, 8, 'aerophyte', 5454); INSERT INTO articles_hash VALUES (29, 9, 'amateur', 9524); INSERT INTO articles_hash VALUES (30, 10, 'andelee', 6363); INSERT INTO articles_hash VALUES (31, 1, 'athwartships', 7271); INSERT INTO articles_hash VALUES (32, 2, 'amazon', 11342); INSERT INTO articles_hash VALUES (33, 3, 'autochrome', 8180); INSERT INTO articles_hash VALUES (34, 4, 'amnestied', 12250); INSERT INTO articles_hash VALUES (35, 5, 'aminate', 9089); INSERT INTO articles_hash VALUES (36, 6, 'ablation', 13159); INSERT INTO articles_hash VALUES (37, 7, 'archduchies', 9997); INSERT INTO articles_hash VALUES (38, 8, 'anatine', 14067); INSERT INTO articles_hash VALUES (39, 9, 'anchises', 10906); INSERT INTO articles_hash VALUES (40, 10, 'attemper', 14976); INSERT INTO articles_hash VALUES (41, 1, 'aznavour', 11814); INSERT INTO articles_hash VALUES (42, 2, 'ausable', 15885); INSERT INTO articles_hash VALUES (43, 3, 'affixal', 12723); INSERT INTO articles_hash VALUES (44, 4, 'anteport', 16793); INSERT INTO articles_hash VALUES (45, 5, 'afrasia', 864); INSERT INTO articles_hash VALUES (46, 6, 'atlanta', 17702); INSERT INTO articles_hash VALUES (47, 7, 'abeyance', 1772); INSERT INTO articles_hash VALUES (48, 8, 'alkylic', 18610); INSERT INTO articles_hash VALUES (49, 9, 'anyone', 2681); INSERT INTO articles_hash VALUES (50, 10, 'anjanette', 19519); SET citus.task_executor_type TO 'real-time'; SET citus.large_table_shard_count TO 2; SET client_min_messages TO 'DEBUG2'; -- insert a single row for the test INSERT INTO articles_single_shard_hash VALUES (50, 10, 'anjanette', 19519); DEBUG: Creating router plan DEBUG: Plan is router executable -- single-shard tests -- test simple select for a single row SELECT * FROM articles_hash WHERE author_id = 10 AND id = 50; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+-----------+------------ 50 | 10 | anjanette | 19519 (1 row) -- get all titles by a single author SELECT title FROM articles_hash WHERE author_id = 10; DEBUG: Creating router plan DEBUG: Plan is router executable title ------------ aggrandize absentness andelee attemper anjanette (5 rows) -- try ordering them by word count SELECT title, word_count FROM articles_hash WHERE author_id = 10 ORDER BY word_count DESC NULLS LAST; DEBUG: Creating router plan DEBUG: Plan is router executable title | word_count ------------+------------ anjanette | 19519 aggrandize | 17277 attemper | 14976 andelee | 6363 absentness | 1820 (5 rows) -- look at last two articles by an author SELECT title, id FROM articles_hash WHERE author_id = 5 ORDER BY id LIMIT 2; DEBUG: Creating router plan DEBUG: Plan is router executable title | id ---------+---- aruru | 5 adversa | 15 (2 rows) -- find all articles by two authors in same shard -- but plan is not router executable due to order by SELECT title, author_id FROM articles_hash WHERE author_id = 7 OR author_id = 8 ORDER BY author_id ASC, id; DEBUG: Creating router plan DEBUG: Plan is router executable title | author_id -------------+----------- aseptic | 7 auriga | 7 arsenous | 7 archduchies | 7 abeyance | 7 agatized | 8 assembly | 8 aerophyte | 8 anatine | 8 alkylic | 8 (10 rows) -- same query is router executable with no order by SELECT title, author_id FROM articles_hash WHERE author_id = 7 OR author_id = 8; DEBUG: Creating router plan DEBUG: Plan is router executable title | author_id -------------+----------- aseptic | 7 agatized | 8 auriga | 7 assembly | 8 arsenous | 7 aerophyte | 8 archduchies | 7 anatine | 8 abeyance | 7 alkylic | 8 (10 rows) -- add in some grouping expressions, still on same shard -- having queries unsupported in Citus SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash WHERE author_id = 1 OR author_id = 7 OR author_id = 8 OR author_id = 10 GROUP BY author_id HAVING sum(word_count) > 1000 ORDER BY sum(word_count) DESC; DEBUG: Creating router plan DEBUG: Plan is router executable author_id | corpus_size -----------+------------- 10 | 59955 8 | 55410 7 | 36756 1 | 35894 (4 rows) -- however having clause is supported if it goes to a single shard SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash WHERE author_id = 1 GROUP BY author_id HAVING sum(word_count) > 1000 ORDER BY sum(word_count) DESC; DEBUG: Creating router plan DEBUG: Plan is router executable author_id | corpus_size -----------+------------- 1 | 35894 (1 row) -- query is a single shard query but can't do shard pruning, -- not router-plannable due to <= and IN SELECT * FROM articles_hash WHERE author_id <= 1; id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) SELECT * FROM articles_hash WHERE author_id IN (1, 3); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 11 | 1 | alamo | 1347 13 | 3 | aseyev | 2255 21 | 1 | arcading | 5890 23 | 3 | abhorring | 6799 31 | 1 | athwartships | 7271 33 | 3 | autochrome | 8180 41 | 1 | aznavour | 11814 43 | 3 | affixal | 12723 (10 rows) -- queries with CTEs are supported WITH first_author AS ( SELECT id FROM articles_hash WHERE author_id = 1) SELECT * FROM first_author; DEBUG: Creating router plan DEBUG: Plan is router executable id ---- 1 11 21 31 41 (5 rows) -- queries with CTEs are supported even if CTE is not referenced inside query WITH first_author AS ( SELECT id FROM articles_hash WHERE author_id = 1) SELECT title FROM articles_hash WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable title -------------- arsenous alamo arcading athwartships aznavour (5 rows) -- two CTE joins are supported if they go to the same worker WITH id_author AS ( SELECT id, author_id FROM articles_hash WHERE author_id = 1), id_title AS (SELECT id, title from articles_hash WHERE author_id = 1) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | id | title ----+-----------+----+-------------- 1 | 1 | 1 | arsenous 11 | 1 | 11 | alamo 21 | 1 | 21 | arcading 31 | 1 | 31 | athwartships 41 | 1 | 41 | aznavour (5 rows) WITH id_author AS ( SELECT id, author_id FROM articles_hash WHERE author_id = 1), id_title AS (SELECT id, title from articles_hash WHERE author_id = 3) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | id | title ----+-----------+----+------- (0 rows) -- CTE joins are not supported if table shards are at different workers WITH id_author AS ( SELECT id, author_id FROM articles_hash WHERE author_id = 1), id_title AS (SELECT id, title from articles_hash WHERE author_id = 2) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; ERROR: could not run distributed query with complex table expressions HINT: Consider using an equality filter on the distributed table's partition column. -- recursive CTEs are supported when filtered on partition column CREATE TABLE company_employees (company_id int, employee_id int, manager_id int); SELECT master_create_distributed_table('company_employees', 'company_id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('company_employees', 4, 1); master_create_worker_shards ----------------------------- (1 row) INSERT INTO company_employees values(1, 1, 0); DEBUG: Creating router plan DEBUG: Plan is router executable INSERT INTO company_employees values(1, 2, 1); DEBUG: Creating router plan DEBUG: Plan is router executable INSERT INTO company_employees values(1, 3, 1); DEBUG: Creating router plan DEBUG: Plan is router executable INSERT INTO company_employees values(1, 4, 2); DEBUG: Creating router plan DEBUG: Plan is router executable INSERT INTO company_employees values(1, 5, 4); DEBUG: Creating router plan DEBUG: Plan is router executable INSERT INTO company_employees values(3, 1, 0); DEBUG: Creating router plan DEBUG: Plan is router executable INSERT INTO company_employees values(3, 15, 1); DEBUG: Creating router plan DEBUG: Plan is router executable INSERT INTO company_employees values(3, 3, 1); DEBUG: Creating router plan DEBUG: Plan is router executable -- find employees at top 2 level within company hierarchy WITH RECURSIVE hierarchy as ( SELECT *, 1 AS level FROM company_employees WHERE company_id = 1 and manager_id = 0 UNION SELECT ce.*, (h.level+1) FROM hierarchy h JOIN company_employees ce ON (h.employee_id = ce.manager_id AND h.company_id = ce.company_id AND ce.company_id = 1)) SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: Creating router plan DEBUG: Plan is router executable company_id | employee_id | manager_id | level ------------+-------------+------------+------- 1 | 1 | 0 | 1 1 | 2 | 1 | 2 1 | 3 | 1 | 2 (3 rows) -- query becomes not router plannble and gets rejected -- if filter on company is dropped WITH RECURSIVE hierarchy as ( SELECT *, 1 AS level FROM company_employees WHERE company_id = 1 and manager_id = 0 UNION SELECT ce.*, (h.level+1) FROM hierarchy h JOIN company_employees ce ON (h.employee_id = ce.manager_id AND h.company_id = ce.company_id)) SELECT * FROM hierarchy WHERE LEVEL <= 2; ERROR: could not run distributed query with complex table expressions HINT: Consider using an equality filter on the distributed table's partition column. -- logically wrong query, query involves different shards -- from the same table WITH RECURSIVE hierarchy as ( SELECT *, 1 AS level FROM company_employees WHERE company_id = 3 and manager_id = 0 UNION SELECT ce.*, (h.level+1) FROM hierarchy h JOIN company_employees ce ON (h.employee_id = ce.manager_id AND h.company_id = ce.company_id AND ce.company_id = 2)) SELECT * FROM hierarchy WHERE LEVEL <= 2; ERROR: could not run distributed query with complex table expressions HINT: Consider using an equality filter on the distributed table's partition column. -- CTE with queries other than SELECT is not supported WITH new_article AS ( INSERT INTO articles_hash VALUES (1, 1, 'arsenous', 9572) RETURNING * ) SELECT * FROM new_article; DEBUG: data-modifying statements are not supported in the WITH clauses of distributed queries ERROR: could not run distributed query with complex table expressions HINT: Consider using an equality filter on the distributed table's partition column. -- Modifying statement in nested CTE case is covered by PostgreSQL itself WITH new_article AS ( WITH nested_cte AS ( INSERT INTO articles_hash VALUES (1, 1, 'arsenous', 9572) RETURNING * ) SELECT * FROM nested_cte ) SELECT * FROM new_article; ERROR: WITH clause containing a data-modifying statement must be at the top level LINE 2: WITH nested_cte AS ( ^ -- Modifying statement in a CTE in subquery is also covered by PostgreSQL SELECT * FROM ( WITH new_article AS ( INSERT INTO articles_hash VALUES (1, 1, 'arsenous', 9572) RETURNING * ) SELECT * FROM new_article ) AS subquery_cte; ERROR: WITH clause containing a data-modifying statement must be at the top level LINE 2: WITH new_article AS ( ^ -- grouping sets are supported on single shard SELECT id, substring(title, 2, 1) AS subtitle, count(*) FROM articles_hash WHERE author_id = 1 or author_id = 3 GROUP BY GROUPING SETS ((id),(subtitle)) ORDER BY id, subtitle; DEBUG: Creating router plan DEBUG: Plan is router executable id | subtitle | count ----+----------+------- 1 | | 1 3 | | 1 11 | | 1 13 | | 1 21 | | 1 23 | | 1 31 | | 1 33 | | 1 41 | | 1 43 | | 1 | b | 1 | f | 1 | l | 1 | r | 2 | s | 2 | t | 1 | u | 1 | z | 1 (18 rows) -- grouping sets are not supported on multiple shards SELECT id, substring(title, 2, 1) AS subtitle, count(*) FROM articles_hash WHERE author_id = 1 or author_id = 2 GROUP BY GROUPING SETS ((id),(subtitle)) ORDER BY id, subtitle; ERROR: could not run distributed query with GROUPING SETS, CUBE, or ROLLUP HINT: Consider using an equality filter on the distributed table's partition column. -- queries which involve functions in FROM clause are supported if it goes to a single worker. SELECT * FROM articles_hash, position('om' in 'Thomas') WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count | position ----+-----------+--------------+------------+---------- 1 | 1 | arsenous | 9572 | 3 11 | 1 | alamo | 1347 | 3 21 | 1 | arcading | 5890 | 3 31 | 1 | athwartships | 7271 | 3 41 | 1 | aznavour | 11814 | 3 (5 rows) SELECT * FROM articles_hash, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 3; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count | position ----+-----------+--------------+------------+---------- 1 | 1 | arsenous | 9572 | 3 3 | 3 | asternal | 10480 | 3 11 | 1 | alamo | 1347 | 3 13 | 3 | aseyev | 2255 | 3 21 | 1 | arcading | 5890 | 3 23 | 3 | abhorring | 6799 | 3 31 | 1 | athwartships | 7271 | 3 33 | 3 | autochrome | 8180 | 3 41 | 1 | aznavour | 11814 | 3 43 | 3 | affixal | 12723 | 3 (10 rows) -- they are not supported if multiple workers are involved SELECT * FROM articles_hash, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 2; ERROR: could not run distributed query with complex table expressions HINT: Consider using an equality filter on the distributed table's partition column. -- unless the query can be transformed into a join SELECT * FROM articles_hash WHERE author_id IN (SELECT author_id FROM articles_hash WHERE author_id = 2) ORDER BY articles_hash.id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+------------+------------ 2 | 2 | abducing | 13642 12 | 2 | archiblast | 18185 22 | 2 | antipope | 2728 32 | 2 | amazon | 11342 42 | 2 | ausable | 15885 (5 rows) -- subqueries are supported in FROM clause but they are not router plannable SELECT articles_hash.id,test.word_count FROM articles_hash, (SELECT id, word_count FROM articles_hash) AS test WHERE test.id = articles_hash.id ORDER BY articles_hash.id; DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 2 DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 2 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 8 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 8 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 11 DEBUG: pruning merge fetch taskId 8 DETAIL: Creating dependency on merge taskId 11 DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 14 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 14 ERROR: cannot use real time executor with repartition jobs HINT: Set citus.task_executor_type to "task-tracker". SELECT articles_hash.id,test.word_count FROM articles_hash, (SELECT id, word_count FROM articles_hash) AS test WHERE test.id = articles_hash.id and articles_hash.author_id = 1 ORDER BY articles_hash.id; DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 2 DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 3 DEBUG: pruning merge fetch taskId 2 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 8 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 7 DEBUG: pruning merge fetch taskId 8 DETAIL: Creating dependency on merge taskId 11 DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 9 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 14 ERROR: cannot use real time executor with repartition jobs HINT: Set citus.task_executor_type to "task-tracker". -- subqueries are not supported in SELECT clause SELECT a.title AS name, (SELECT a2.id FROM articles_single_shard_hash a2 WHERE a.id = a2.id LIMIT 1) AS special_price FROM articles_hash a; ERROR: could not run distributed query with subquery outside the FROM and WHERE clauses HINT: Consider using an equality filter on the distributed table's partition column. -- simple lookup query SELECT * FROM articles_hash WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- below query hits a single shard, router plannable SELECT * FROM articles_hash WHERE author_id = 1 OR author_id = 17; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- below query hits two shards, not router plannable + not router executable -- handled by real-time executor SELECT * FROM articles_hash WHERE author_id = 1 OR author_id = 18; id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- rename the output columns SELECT id as article_id, word_count * id as random_value FROM articles_hash WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable article_id | random_value ------------+-------------- 1 | 9572 11 | 14817 21 | 123690 31 | 225401 41 | 484374 (5 rows) -- we can push down co-located joins to a single worker SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash a, articles_hash b WHERE a.author_id = 10 and a.author_id = b.author_id LIMIT 3; DEBUG: Creating router plan DEBUG: Plan is router executable first_author | second_word_count --------------+------------------- 10 | 17277 10 | 1820 10 | 6363 (3 rows) -- following join is router plannable since the same worker -- has both shards SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash a, articles_single_shard_hash b WHERE a.author_id = 10 and a.author_id = b.author_id LIMIT 3; DEBUG: Creating router plan DEBUG: Plan is router executable first_author | second_word_count --------------+------------------- 10 | 19519 10 | 19519 10 | 19519 (3 rows) -- following join is not router plannable since there are no -- workers containing both shards, added a CTE to make this fail -- at logical planner WITH single_shard as (SELECT * FROM articles_single_shard_hash) SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash a, single_shard b WHERE a.author_id = 2 and a.author_id = b.author_id LIMIT 3; DEBUG: Found no worker with all shard placements ERROR: could not run distributed query with complex table expressions HINT: Consider using an equality filter on the distributed table's partition column. -- single shard select with limit is router plannable SELECT * FROM articles_hash WHERE author_id = 1 LIMIT 3; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+----------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 (3 rows) -- single shard select with limit + offset is router plannable SELECT * FROM articles_hash WHERE author_id = 1 LIMIT 2 OFFSET 1; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+----------+------------ 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 (2 rows) -- single shard select with limit + offset + order by is router plannable SELECT * FROM articles_hash WHERE author_id = 1 ORDER BY id desc LIMIT 2 OFFSET 1; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 31 | 1 | athwartships | 7271 21 | 1 | arcading | 5890 (2 rows) -- single shard select with group by on non-partition column is router plannable SELECT id FROM articles_hash WHERE author_id = 1 GROUP BY id ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id ---- 1 11 21 31 41 (5 rows) -- single shard select with distinct is router plannable SELECT DISTINCT id FROM articles_hash WHERE author_id = 1 ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id ---- 1 11 21 31 41 (5 rows) -- single shard aggregate is router plannable SELECT avg(word_count) FROM articles_hash WHERE author_id = 2; DEBUG: Creating router plan DEBUG: Plan is router executable avg -------------------- 12356.400000000000 (1 row) -- max, min, sum, count are router plannable on single shard SELECT max(word_count) as max, min(word_count) as min, sum(word_count) as sum, count(word_count) as cnt FROM articles_hash WHERE author_id = 2; DEBUG: Creating router plan DEBUG: Plan is router executable max | min | sum | cnt -------+------+-------+----- 18185 | 2728 | 61782 | 5 (1 row) -- queries with aggregates and group by supported on single shard SELECT max(word_count) FROM articles_hash WHERE author_id = 1 GROUP BY author_id; DEBUG: Creating router plan DEBUG: Plan is router executable max ------- 11814 (1 row) -- router plannable union queries are supported SELECT * FROM ( SELECT * FROM articles_hash WHERE author_id = 1 UNION SELECT * FROM articles_hash WHERE author_id = 3 ) AS combination ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 11 | 1 | alamo | 1347 13 | 3 | aseyev | 2255 21 | 1 | arcading | 5890 23 | 3 | abhorring | 6799 31 | 1 | athwartships | 7271 33 | 3 | autochrome | 8180 41 | 1 | aznavour | 11814 43 | 3 | affixal | 12723 (10 rows) (SELECT LEFT(title, 1) FROM articles_hash WHERE author_id = 1) UNION (SELECT LEFT(title, 1) FROM articles_hash WHERE author_id = 3); DEBUG: Creating router plan DEBUG: Plan is router executable left ------ a (1 row) (SELECT LEFT(title, 1) FROM articles_hash WHERE author_id = 1) INTERSECT (SELECT LEFT(title, 1) FROM articles_hash WHERE author_id = 3); DEBUG: Creating router plan DEBUG: Plan is router executable left ------ a (1 row) SELECT * FROM ( SELECT LEFT(title, 2) FROM articles_hash WHERE author_id = 1 EXCEPT SELECT LEFT(title, 2) FROM articles_hash WHERE author_id = 3 ) AS combination ORDER BY 1; DEBUG: Creating router plan DEBUG: Plan is router executable left ------ al ar at az (4 rows) -- union queries are not supported if not router plannable -- there is an inconsistency on shard pruning between -- ubuntu/mac disabling log messages for this queries only SET client_min_messages to 'NOTICE'; (SELECT * FROM articles_hash WHERE author_id = 1) UNION (SELECT * FROM articles_hash WHERE author_id = 2); ERROR: could not run distributed query with UNION, INTERSECT, or EXCEPT HINT: Consider using an equality filter on the distributed table's partition column. SELECT * FROM ( (SELECT * FROM articles_hash WHERE author_id = 1) UNION (SELECT * FROM articles_hash WHERE author_id = 2)) uu; ERROR: cannot push down this subquery DETAIL: Currently all leaf queries need to have same filters on partition column -- error out for queries with repartition jobs SELECT * FROM articles_hash a, articles_hash b WHERE a.id = b.id AND a.author_id = 1; ERROR: cannot use real time executor with repartition jobs HINT: Set citus.task_executor_type to "task-tracker". -- queries which hit more than 1 shards are not router plannable or executable -- handled by real-time executor SELECT * FROM articles_hash WHERE author_id >= 1 AND author_id <= 3; id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 3 | 3 | asternal | 10480 11 | 1 | alamo | 1347 13 | 3 | aseyev | 2255 21 | 1 | arcading | 5890 23 | 3 | abhorring | 6799 31 | 1 | athwartships | 7271 33 | 3 | autochrome | 8180 41 | 1 | aznavour | 11814 43 | 3 | affixal | 12723 2 | 2 | abducing | 13642 12 | 2 | archiblast | 18185 22 | 2 | antipope | 2728 32 | 2 | amazon | 11342 42 | 2 | ausable | 15885 (15 rows) SET citus.task_executor_type TO 'real-time'; -- Test various filtering options for router plannable check SET client_min_messages to 'DEBUG2'; -- this is definitely single shard -- and router plannable SELECT * FROM articles_hash WHERE author_id = 1 and author_id >= 1; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- not router plannable due to or SELECT * FROM articles_hash WHERE author_id = 1 or id = 1; id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- router plannable SELECT * FROM articles_hash WHERE author_id = 1 and (id = 1 or id = 41); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+----------+------------ 1 | 1 | arsenous | 9572 41 | 1 | aznavour | 11814 (2 rows) -- router plannable SELECT * FROM articles_hash WHERE author_id = 1 and (id = random()::int * 0); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+-------+------------ (0 rows) -- not router plannable due to function call on the right side SELECT * FROM articles_hash WHERE author_id = (random()::int * 0 + 1); id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- not router plannable due to or SELECT * FROM articles_hash WHERE author_id = 1 or id = 1; id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- router plannable due to abs(-1) getting converted to 1 by postgresql SELECT * FROM articles_hash WHERE author_id = abs(-1); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- not router plannable due to abs() function SELECT * FROM articles_hash WHERE 1 = abs(author_id); id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- not router plannable due to abs() function SELECT * FROM articles_hash WHERE author_id = abs(author_id - 2); id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- router plannable, function on different field SELECT * FROM articles_hash WHERE author_id = 1 and (id = abs(id - 2)); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+----------+------------ 1 | 1 | arsenous | 9572 (1 row) -- not router plannable due to is true SELECT * FROM articles_hash WHERE (author_id = 1) is true; id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- router plannable, (boolean expression) = true is collapsed to (boolean expression) SELECT * FROM articles_hash WHERE (author_id = 1) = true; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- router plannable, between operator is on another column SELECT * FROM articles_hash WHERE (author_id = 1) and id between 0 and 20; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+----------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 (2 rows) -- router plannable, partition column expression is and'ed to rest SELECT * FROM articles_hash WHERE (author_id = 1) and (id = 1 or id = 31) and title like '%s'; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 (2 rows) -- router plannable, order is changed SELECT * FROM articles_hash WHERE (id = 1 or id = 31) and title like '%s' and (author_id = 1); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 31 | 1 | athwartships | 7271 (2 rows) -- router plannable SELECT * FROM articles_hash WHERE (title like '%s' or title like 'a%') and (author_id = 1); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- router plannable SELECT * FROM articles_hash WHERE (title like '%s' or title like 'a%') and (author_id = 1) and (word_count < 3000 or word_count > 8000); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+----------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 41 | 1 | aznavour | 11814 (3 rows) -- window functions are supported if query is router plannable SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count FROM articles_hash WHERE author_id = 5; DEBUG: Creating router plan DEBUG: Plan is router executable prev | title | word_count ----------+----------+------------ | afrasia | 864 afrasia | adversa | 3164 adversa | antehall | 7707 antehall | aminate | 9089 aminate | aruru | 11389 (5 rows) SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count FROM articles_hash WHERE author_id = 5 ORDER BY word_count DESC; DEBUG: Creating router plan DEBUG: Plan is router executable prev | title | word_count ----------+----------+------------ aminate | aruru | 11389 antehall | aminate | 9089 adversa | antehall | 7707 afrasia | adversa | 3164 | afrasia | 864 (5 rows) SELECT id, MIN(id) over (order by word_count) FROM articles_hash WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable id | min ----+----- 11 | 11 21 | 11 31 | 11 1 | 1 41 | 1 (5 rows) SELECT id, word_count, AVG(word_count) over (order by word_count) FROM articles_hash WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable id | word_count | avg ----+------------+----------------------- 11 | 1347 | 1347.0000000000000000 21 | 5890 | 3618.5000000000000000 31 | 7271 | 4836.0000000000000000 1 | 9572 | 6020.0000000000000000 41 | 11814 | 7178.8000000000000000 (5 rows) SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count) FROM articles_hash WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable word_count | rank ------------+------ 1347 | 1 5890 | 2 7271 | 3 9572 | 4 11814 | 5 (5 rows) -- window functions are not supported for not router plannable queries SELECT id, MIN(id) over (order by word_count) FROM articles_hash WHERE author_id = 1 or author_id = 2; ERROR: could not run distributed query with window functions HINT: Consider using an equality filter on the distributed table's partition column. SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count FROM articles_hash WHERE author_id = 5 or author_id = 2; ERROR: could not run distributed query with window functions HINT: Consider using an equality filter on the distributed table's partition column. -- where false queries are router plannable SELECT * FROM articles_hash WHERE false; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+-------+------------ (0 rows) SELECT * FROM articles_hash WHERE author_id = 1 and false; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+-------+------------ (0 rows) SELECT * FROM articles_hash WHERE author_id = 1 and 1=0; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+-------+------------ (0 rows) SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash a, articles_single_shard_hash b WHERE a.author_id = 10 and a.author_id = b.author_id and false; DEBUG: Creating router plan DEBUG: Plan is router executable first_author | second_word_count --------------+------------------- (0 rows) SELECT * FROM articles_hash WHERE null; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+-------+------------ (0 rows) -- where false with immutable function returning false SELECT * FROM articles_hash a WHERE a.author_id = 10 and int4eq(1, 2); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+-------+------------ (0 rows) SELECT * FROM articles_hash a WHERE int4eq(1, 2); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+-------+------------ (0 rows) SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash a, articles_single_shard_hash b WHERE a.author_id = 10 and a.author_id = b.author_id and int4eq(1, 1); DEBUG: Creating router plan DEBUG: Plan is router executable first_author | second_word_count --------------+------------------- 10 | 19519 10 | 19519 10 | 19519 10 | 19519 10 | 19519 (5 rows) SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash a, articles_single_shard_hash b WHERE a.author_id = 10 and a.author_id = b.author_id and int4eq(1, 2); DEBUG: Creating router plan DEBUG: Plan is router executable first_author | second_word_count --------------+------------------- (0 rows) -- partition_column is null clause does not prune out any shards, -- all shards remain after shard pruning, not router plannable SELECT * FROM articles_hash a WHERE a.author_id is null; id | author_id | title | word_count ----+-----------+-------+------------ (0 rows) -- partition_column equals to null clause prunes out all shards -- no shards after shard pruning, router plannable SELECT * FROM articles_hash a WHERE a.author_id = null; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+-------+------------ (0 rows) -- stable function returning bool SELECT * FROM articles_hash a WHERE date_ne_timestamp('1954-04-11', '1954-04-11'::timestamp); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+-------+------------ (0 rows) SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash a, articles_single_shard_hash b WHERE a.author_id = 10 and a.author_id = b.author_id and date_ne_timestamp('1954-04-11', '1954-04-11'::timestamp); DEBUG: Creating router plan DEBUG: Plan is router executable first_author | second_word_count --------------+------------------- (0 rows) -- union/difference /intersection with where false -- this query was not originally router plannable, addition of 1=0 -- makes it router plannable SELECT * FROM ( SELECT * FROM articles_hash WHERE author_id = 1 UNION SELECT * FROM articles_hash WHERE author_id = 2 and 1=0 ) AS combination ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) SELECT * FROM ( SELECT * FROM articles_hash WHERE author_id = 1 EXCEPT SELECT * FROM articles_hash WHERE author_id = 2 and 1=0 ) AS combination ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) (SELECT * FROM articles_hash WHERE author_id = 1) INTERSECT (SELECT * FROM articles_hash WHERE author_id = 2 and 1=0); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+-------+------------ (0 rows) -- CTEs with where false WITH id_author AS ( SELECT id, author_id FROM articles_hash WHERE author_id = 1), id_title AS (SELECT id, title from articles_hash WHERE author_id = 1 and 1=0) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | id | title ----+-----------+----+------- (0 rows) WITH id_author AS ( SELECT id, author_id FROM articles_hash WHERE author_id = 1), id_title AS (SELECT id, title from articles_hash WHERE author_id = 1) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id and 1=0; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | id | title ----+-----------+----+------- (0 rows) WITH RECURSIVE hierarchy as ( SELECT *, 1 AS level FROM company_employees WHERE company_id = 1 and manager_id = 0 UNION SELECT ce.*, (h.level+1) FROM hierarchy h JOIN company_employees ce ON (h.employee_id = ce.manager_id AND h.company_id = ce.company_id AND ce.company_id = 1)) SELECT * FROM hierarchy WHERE LEVEL <= 2 and 1=0; DEBUG: Creating router plan DEBUG: Plan is router executable company_id | employee_id | manager_id | level ------------+-------------+------------+------- (0 rows) WITH RECURSIVE hierarchy as ( SELECT *, 1 AS level FROM company_employees WHERE company_id = 1 and manager_id = 0 UNION SELECT ce.*, (h.level+1) FROM hierarchy h JOIN company_employees ce ON (h.employee_id = ce.manager_id AND h.company_id = ce.company_id AND ce.company_id = 1 AND 1=0)) SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: Creating router plan DEBUG: Plan is router executable company_id | employee_id | manager_id | level ------------+-------------+------------+------- 1 | 1 | 0 | 1 (1 row) WITH RECURSIVE hierarchy as ( SELECT *, 1 AS level FROM company_employees WHERE company_id = 1 and manager_id = 0 AND 1=0 UNION SELECT ce.*, (h.level+1) FROM hierarchy h JOIN company_employees ce ON (h.employee_id = ce.manager_id AND h.company_id = ce.company_id AND ce.company_id = 1)) SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: Creating router plan DEBUG: Plan is router executable company_id | employee_id | manager_id | level ------------+-------------+------------+------- (0 rows) -- window functions with where false SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count) FROM articles_hash WHERE author_id = 1 and 1=0; DEBUG: Creating router plan DEBUG: Plan is router executable word_count | rank ------------+------ (0 rows) -- function calls in WHERE clause with non-relational arguments SELECT author_id FROM articles_hash WHERE substring('hello world', 1, 5) = 'hello' ORDER BY author_id LIMIT 1; DEBUG: push down of limit count: 1 author_id ----------- 1 (1 row) -- when expression evaluates to false SELECT author_id FROM articles_hash WHERE substring('hello world', 1, 4) = 'hello' ORDER BY author_id LIMIT 1; DEBUG: Creating router plan DEBUG: Plan is router executable author_id ----------- (0 rows) -- verify range partitioned tables can be used in router plannable queries -- just 4 shards to be created for each table to make sure -- they are 'co-located' pairwise SET citus.shard_replication_factor TO 1; SELECT master_create_distributed_table('authors_range', 'id', 'range'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_distributed_table('articles_range', 'author_id', 'range'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_empty_shard('authors_range') as shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue=10 WHERE shardid = :shard_id; SELECT master_create_empty_shard('authors_range') as shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 11, shardmaxvalue=30 WHERE shardid = :shard_id; SELECT master_create_empty_shard('authors_range') as shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 21, shardmaxvalue=40 WHERE shardid = :shard_id; SELECT master_create_empty_shard('authors_range') as shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 31, shardmaxvalue=40 WHERE shardid = :shard_id; SELECT master_create_empty_shard('articles_range') as shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue=10 WHERE shardid = :shard_id; SELECT master_create_empty_shard('articles_range') as shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 11, shardmaxvalue=30 WHERE shardid = :shard_id; SELECT master_create_empty_shard('articles_range') as shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 21, shardmaxvalue=40 WHERE shardid = :shard_id; SELECT master_create_empty_shard('articles_range') as shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 31, shardmaxvalue=40 WHERE shardid = :shard_id; -- single shard select queries are router plannable SELECT * FROM articles_range where author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+-------+------------ (0 rows) SELECT * FROM articles_range where author_id = 1 or author_id = 5; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+-------+------------ (0 rows) -- zero shard select query is router plannable SELECT * FROM articles_range where author_id = 1 and author_id = 2; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+-------+------------ (0 rows) -- single shard joins on range partitioned table are router plannable SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id) WHERE ar.author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count | name | id ----+-----------+-------+------------+------+---- (0 rows) -- zero shard join is router plannable SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id) WHERE ar.author_id = 1 and au.id = 2; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count | name | id ----+-----------+-------+------------+------+---- (0 rows) -- multi-shard join is not router plannable SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id) WHERE ar.author_id = 35; DEBUG: join prunable for intervals [21,40] and [1,10] DEBUG: join prunable for intervals [31,40] and [1,10] DEBUG: join prunable for intervals [31,40] and [11,30] id | author_id | title | word_count | name | id ----+-----------+-------+------------+------+---- (0 rows) -- this is a bug, it is a single shard join query but not router plannable SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id) WHERE ar.author_id = 1 or au.id = 5; DEBUG: join prunable for intervals [1,10] and [11,30] DEBUG: join prunable for intervals [1,10] and [21,40] DEBUG: join prunable for intervals [1,10] and [31,40] DEBUG: join prunable for intervals [11,30] and [1,10] DEBUG: join prunable for intervals [11,30] and [31,40] DEBUG: join prunable for intervals [21,40] and [1,10] DEBUG: join prunable for intervals [31,40] and [1,10] DEBUG: join prunable for intervals [31,40] and [11,30] id | author_id | title | word_count | name | id ----+-----------+-------+------------+------+---- (0 rows) -- bogus query, join on non-partition column, but router plannable due to filters SELECT * FROM articles_range ar join authors_range au on (ar.id = au.id) WHERE ar.author_id = 1 and au.id < 10; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count | name | id ----+-----------+-------+------------+------+---- (0 rows) -- join between hash and range partition tables are router plannable -- only if both tables pruned down to single shard and co-located on the same -- node. -- router plannable SELECT * FROM articles_hash ar join authors_range au on (ar.author_id = au.id) WHERE ar.author_id = 2; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count | name | id ----+-----------+-------+------------+------+---- (0 rows) -- not router plannable SELECT * FROM articles_hash ar join authors_range au on (ar.author_id = au.id) WHERE ar.author_id = 3; DEBUG: Found no worker with all shard placements DEBUG: join prunable for intervals [1,10] and [11,30] DEBUG: join prunable for intervals [1,10] and [21,40] DEBUG: join prunable for intervals [1,10] and [31,40] DEBUG: join prunable for intervals [11,30] and [1,10] DEBUG: join prunable for intervals [11,30] and [31,40] DEBUG: join prunable for intervals [21,40] and [1,10] DEBUG: join prunable for intervals [31,40] and [1,10] DEBUG: join prunable for intervals [31,40] and [11,30] DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 3 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 7 DEBUG: pruning merge fetch taskId 13 DETAIL: Creating dependency on merge taskId 7 DEBUG: pruning merge fetch taskId 16 DETAIL: Creating dependency on merge taskId 7 DEBUG: pruning merge fetch taskId 19 DETAIL: Creating dependency on merge taskId 9 DEBUG: pruning merge fetch taskId 22 DETAIL: Creating dependency on merge taskId 9 ERROR: cannot use real time executor with repartition jobs HINT: Set citus.task_executor_type to "task-tracker". -- join between a range partitioned table and reference table is router plannable SELECT * FROM articles_range ar join authors_reference au on (ar.author_id = au.id) WHERE ar.author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count | name | id ----+-----------+-------+------------+------+---- (0 rows) -- still hits a single shard and router plannable SELECT * FROM articles_range ar join authors_reference au on (ar.author_id = au.id) WHERE ar.author_id = 1 or ar.author_id = 5; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count | name | id ----+-----------+-------+------------+------+---- (0 rows) -- it is not router plannable if hit multiple shards SELECT * FROM articles_range ar join authors_reference au on (ar.author_id = au.id) WHERE ar.author_id = 1 or ar.author_id = 15; id | author_id | title | word_count | name | id ----+-----------+-------+------------+------+---- (0 rows) -- following is a bug, function should have been -- evaluated at master before going to worker -- need to use a append distributed table here SELECT master_create_distributed_table('articles_append', 'author_id', 'append'); master_create_distributed_table --------------------------------- (1 row) SET citus.shard_replication_factor TO 1; SELECT master_create_empty_shard('articles_append') AS shard_id \gset UPDATE pg_dist_shard SET shardmaxvalue = 100, shardminvalue=1 WHERE shardid = :shard_id; SELECT author_id FROM articles_append WHERE substring('articles_append'::regclass::text, 1, 5) = 'hello' ORDER BY author_id LIMIT 1; DEBUG: push down of limit count: 1 WARNING: relation "public.articles_append" does not exist CONTEXT: while executing command on localhost:57638 WARNING: relation "public.articles_append" does not exist CONTEXT: while executing command on localhost:57638 WARNING: relation "public.articles_append" does not exist CONTEXT: while executing command on localhost:57638 ERROR: failed to execute task 2 -- same query with where false but evaluation left to worker SELECT author_id FROM articles_append WHERE substring('articles_append'::regclass::text, 1, 4) = 'hello' ORDER BY author_id LIMIT 1; DEBUG: push down of limit count: 1 WARNING: relation "public.articles_append" does not exist CONTEXT: while executing command on localhost:57638 WARNING: relation "public.articles_append" does not exist CONTEXT: while executing command on localhost:57638 WARNING: relation "public.articles_append" does not exist CONTEXT: while executing command on localhost:57638 ERROR: failed to execute task 2 -- same query on router planner with where false but evaluation left to worker SELECT author_id FROM articles_single_shard_hash WHERE substring('articles_single_shard_hash'::regclass::text, 1, 4) = 'hello' ORDER BY author_id LIMIT 1; DEBUG: Creating router plan DEBUG: Plan is router executable WARNING: relation "public.articles_single_shard_hash" does not exist CONTEXT: while executing command on localhost:57637 ERROR: could not receive query results SELECT author_id FROM articles_hash WHERE author_id = 1 AND substring('articles_hash'::regclass::text, 1, 5) = 'hello' ORDER BY author_id LIMIT 1; DEBUG: Creating router plan DEBUG: Plan is router executable WARNING: relation "public.articles_hash" does not exist CONTEXT: while executing command on localhost:57637 ERROR: could not receive query results -- create a dummy function to be used in filtering CREATE OR REPLACE FUNCTION someDummyFunction(regclass) RETURNS text AS $$ BEGIN RETURN md5($1::text); END; $$ LANGUAGE 'plpgsql' IMMUTABLE; -- not router plannable, returns all rows SELECT * FROM articles_hash WHERE someDummyFunction('articles_hash') = md5('articles_hash') ORDER BY author_id, id LIMIT 5; DEBUG: push down of limit count: 5 id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- router plannable, errors SELECT * FROM articles_hash WHERE someDummyFunction('articles_hash') = md5('articles_hash') AND author_id = 1 ORDER BY author_id, id LIMIT 5; DEBUG: Creating router plan DEBUG: Plan is router executable WARNING: relation "public.articles_hash" does not exist CONTEXT: while executing command on localhost:57637 ERROR: could not receive query results -- temporarily turn off debug messages before dropping the function SET client_min_messages TO 'NOTICE'; DROP FUNCTION someDummyFunction(regclass); SET client_min_messages TO 'DEBUG2'; -- complex query hitting a single shard SELECT count(DISTINCT CASE WHEN word_count > 100 THEN id ELSE NULL END) as c FROM articles_hash WHERE author_id = 5; DEBUG: Creating router plan DEBUG: Plan is router executable c --- 5 (1 row) -- same query is not router plannable if hits multiple shards SELECT count(DISTINCT CASE WHEN word_count > 100 THEN id ELSE NULL END) as c FROM articles_hash GROUP BY author_id; c --- 4 5 5 5 5 5 5 5 5 5 (10 rows) -- queries inside transactions can be router plannable BEGIN; SELECT * FROM articles_hash WHERE author_id = 1 ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) END; -- queries inside read-only transactions can be router plannable BEGIN; SET TRANSACTION READ ONLY; SELECT * FROM articles_hash WHERE author_id = 1 ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) END; -- cursor queries are router plannable BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM articles_hash WHERE author_id = 1 ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable FETCH test_cursor; id | author_id | title | word_count ----+-----------+----------+------------ 1 | 1 | arsenous | 9572 (1 row) FETCH ALL test_cursor; id | author_id | title | word_count ----+-----------+--------------+------------ 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (4 rows) FETCH test_cursor; -- fetch one row after the last id | author_id | title | word_count ----+-----------+-------+------------ (0 rows) FETCH BACKWARD test_cursor; id | author_id | title | word_count ----+-----------+----------+------------ 41 | 1 | aznavour | 11814 (1 row) END; -- queries inside copy can be router plannable COPY ( SELECT * FROM articles_hash WHERE author_id = 1 ORDER BY id) TO STDOUT; DEBUG: Creating router plan DEBUG: Plan is router executable 1 1 arsenous 9572 11 1 alamo 1347 21 1 arcading 5890 31 1 athwartships 7271 41 1 aznavour 11814 -- table creation queries inside can be router plannable CREATE TEMP TABLE temp_articles_hash as SELECT * FROM articles_hash WHERE author_id = 1 ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable -- router plannable queries may include filter for aggragates SELECT count(*), count(*) FILTER (WHERE id < 3) FROM articles_hash WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable count | count -------+------- 5 | 1 (1 row) -- non-router plannable queries also support filters SELECT count(*), count(*) FILTER (WHERE id < 3) FROM articles_hash WHERE author_id = 1 or author_id = 2; count | count -------+------- 10 | 2 (1 row) -- prepare queries can be router plannable PREPARE author_1_articles as SELECT * FROM articles_hash WHERE author_id = 1; EXECUTE author_1_articles; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- parametric prepare queries can be router plannable PREPARE author_articles(int) as SELECT * FROM articles_hash WHERE author_id = $1; EXECUTE author_articles(1); DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- queries inside plpgsql functions could be router plannable CREATE OR REPLACE FUNCTION author_articles_max_id() RETURNS int AS $$ DECLARE max_id integer; BEGIN SELECT MAX(id) FROM articles_hash ah WHERE author_id = 1 into max_id; return max_id; END; $$ LANGUAGE plpgsql; SELECT author_articles_max_id(); DEBUG: Creating router plan CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash ah WHERE author_id = 1" PL/pgSQL function author_articles_max_id() line 5 at SQL statement DEBUG: Plan is router executable CONTEXT: SQL statement "SELECT MAX(id) FROM articles_hash ah WHERE author_id = 1" PL/pgSQL function author_articles_max_id() line 5 at SQL statement author_articles_max_id ------------------------ 41 (1 row) -- check that function returning setof query are router plannable CREATE OR REPLACE FUNCTION author_articles_id_word_count() RETURNS TABLE(id bigint, word_count int) AS $$ DECLARE BEGIN RETURN QUERY SELECT ah.id, ah.word_count FROM articles_hash ah WHERE author_id = 1; END; $$ LANGUAGE plpgsql; SELECT * FROM author_articles_id_word_count(); DEBUG: Creating router plan CONTEXT: SQL statement "SELECT ah.id, ah.word_count FROM articles_hash ah WHERE author_id = 1" PL/pgSQL function author_articles_id_word_count() line 4 at RETURN QUERY DEBUG: Plan is router executable CONTEXT: SQL statement "SELECT ah.id, ah.word_count FROM articles_hash ah WHERE author_id = 1" PL/pgSQL function author_articles_id_word_count() line 4 at RETURN QUERY id | word_count ----+------------ 1 | 9572 11 | 1347 21 | 5890 31 | 7271 41 | 11814 (5 rows) -- materialized views can be created for router plannable queries CREATE MATERIALIZED VIEW mv_articles_hash_empty AS SELECT * FROM articles_hash WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable SELECT * FROM mv_articles_hash_empty; id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) CREATE MATERIALIZED VIEW mv_articles_hash_data AS SELECT * FROM articles_hash WHERE author_id in (1,2); SELECT * FROM mv_articles_hash_data; id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 2 | 2 | abducing | 13642 12 | 2 | archiblast | 18185 22 | 2 | antipope | 2728 32 | 2 | amazon | 11342 42 | 2 | ausable | 15885 (10 rows) -- router planner/executor is now enabled for task-tracker executor SET citus.task_executor_type to 'task-tracker'; SELECT id FROM articles_hash WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable id ---- 1 11 21 31 41 (5 rows) -- insert query is router plannable even under task-tracker INSERT INTO articles_hash VALUES (51, 1, 'amateus', 1814); DEBUG: Creating router plan DEBUG: Plan is router executable -- verify insert is successfull (not router plannable and executable) SELECT id FROM articles_hash WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable id ---- 1 11 21 31 41 51 (6 rows) SET client_min_messages to 'NOTICE'; -- test that a connection failure marks placements invalid SET citus.shard_replication_factor TO 2; CREATE TABLE failure_test (a int, b int); SELECT master_create_distributed_table('failure_test', 'a', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('failure_test', 2); master_create_worker_shards ----------------------------- (1 row) CREATE USER router_user; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. GRANT INSERT ON ALL TABLES IN SCHEMA public TO router_user; \c - - - :worker_1_port CREATE USER router_user; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. GRANT INSERT ON ALL TABLES IN SCHEMA public TO router_user; \c - router_user - :master_port -- first test that it is marked invalid inside a transaction block -- we will fail to connect to worker 2, since the user does not exist BEGIN; INSERT INTO failure_test VALUES (1, 1); WARNING: connection error: localhost:57638 DETAIL: no connection to the server SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'failure_test'::regclass ) ORDER BY placementid; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 840017 | 1 | localhost | 57637 840017 | 3 | localhost | 57638 840018 | 1 | localhost | 57638 840018 | 1 | localhost | 57637 (4 rows) ROLLBACK; INSERT INTO failure_test VALUES (2, 1); WARNING: connection error: localhost:57638 DETAIL: no connection to the server SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'failure_test'::regclass ) ORDER BY placementid; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 840017 | 1 | localhost | 57637 840017 | 1 | localhost | 57638 840018 | 3 | localhost | 57638 840018 | 1 | localhost | 57637 (4 rows) \c - postgres - :worker_1_port DROP OWNED BY router_user; DROP USER router_user; \c - - - :master_port DROP OWNED BY router_user; DROP USER router_user; DROP TABLE failure_test; DROP FUNCTION author_articles_max_id(); DROP FUNCTION author_articles_id_word_count(); DROP MATERIALIZED VIEW mv_articles_hash_empty; DROP MATERIALIZED VIEW mv_articles_hash_data; DROP TABLE articles_hash; DROP TABLE articles_single_shard_hash; DROP TABLE authors_hash; DROP TABLE authors_range; DROP TABLE authors_reference; DROP TABLE company_employees; DROP TABLE articles_range; DROP TABLE articles_append; citus-7.0.3/src/test/regress/expected/multi_schema_support.out000066400000000000000000001233561317107136600247070ustar00rootroot00000000000000-- -- MULTI_SCHEMA_SUPPORT -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1190000; -- create schema to test schema support CREATE SCHEMA test_schema_support; -- test master_append_table_to_shard with schema -- create local table to append CREATE TABLE public.nation_local( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152) ); \copy public.nation_local FROM STDIN with delimiter '|'; CREATE TABLE test_schema_support.nation_append( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152) ); SELECT master_create_distributed_table('test_schema_support.nation_append', 'n_nationkey', 'append'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_empty_shard('test_schema_support.nation_append'); master_create_empty_shard --------------------------- 1190000 (1 row) -- append table to shard SELECT master_append_table_to_shard(1190000, 'public.nation_local', 'localhost', :master_port); master_append_table_to_shard ------------------------------ 0.0266667 (1 row) -- verify table actually appended to shard SELECT COUNT(*) FROM test_schema_support.nation_append; count ------- 6 (1 row) -- test with shard name contains special characters CREATE TABLE test_schema_support."nation._'append" ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); SELECT master_create_distributed_table('test_schema_support."nation._''append"', 'n_nationkey', 'append'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_empty_shard('test_schema_support."nation._''append"'); master_create_empty_shard --------------------------- 1190001 (1 row) SELECT master_append_table_to_shard(1190001, 'nation_local', 'localhost', :master_port); master_append_table_to_shard ------------------------------ 0.0266667 (1 row) -- verify table actually appended to shard SELECT COUNT(*) FROM test_schema_support."nation._'append"; count ------- 6 (1 row) -- test master_append_table_to_shard with schema with search_path is set SET search_path TO test_schema_support; SELECT master_append_table_to_shard(1190000, 'public.nation_local', 'localhost', :master_port); master_append_table_to_shard ------------------------------ 0.0266667 (1 row) -- verify table actually appended to shard SELECT COUNT(*) FROM nation_append; count ------- 12 (1 row) -- test with search_path is set and shard name contains special characters SELECT master_append_table_to_shard(1190001, 'nation_local', 'localhost', :master_port); master_append_table_to_shard ------------------------------ 0.0266667 (1 row) -- verify table actually appended to shard SELECT COUNT(*) FROM "nation._'append"; count ------- 12 (1 row) -- test shard creation on append(by data loading) and hash distributed(with UDF) tables -- when search_path is set SET search_path TO test_schema_support; -- create shard with COPY on append distributed table CREATE TABLE nation_append_search_path( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152) ); SELECT master_create_distributed_table('nation_append_search_path', 'n_nationkey', 'append'); master_create_distributed_table --------------------------------- (1 row) \copy nation_append_search_path FROM STDIN with delimiter '|'; -- create shard with master_create_worker_shards CREATE TABLE test_schema_support.nation_hash( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152) ); SELECT master_create_distributed_table('test_schema_support.nation_hash', 'n_nationkey', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('test_schema_support.nation_hash', 4, 2); master_create_worker_shards ----------------------------- (1 row) -- test cursors SET search_path TO public; BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM test_schema_support.nation_append WHERE n_nationkey = 1; FETCH test_cursor; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH test_cursor; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH BACKWARD test_cursor; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) END; -- test with search_path is set SET search_path TO test_schema_support; BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM nation_append WHERE n_nationkey = 1; FETCH test_cursor; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH test_cursor; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) FETCH BACKWARD test_cursor; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) END; -- test inserting to table in different schema SET search_path TO public; INSERT INTO test_schema_support.nation_hash(n_nationkey, n_name, n_regionkey) VALUES (6, 'FRANCE', 3); -- verify insertion SELECT * FROM test_schema_support.nation_hash WHERE n_nationkey = 6; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+----------- 6 | FRANCE | 3 | (1 row) -- test with search_path is set SET search_path TO test_schema_support; INSERT INTO nation_hash(n_nationkey, n_name, n_regionkey) VALUES (7, 'GERMANY', 3); -- verify insertion SELECT * FROM nation_hash WHERE n_nationkey = 7; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+----------- 7 | GERMANY | 3 | (1 row) -- test UDFs with schemas SET search_path TO public; \copy test_schema_support.nation_hash FROM STDIN with delimiter '|'; -- create UDF in master node CREATE OR REPLACE FUNCTION dummyFunction(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; -- create UDF in worker node 1 \c - - - :worker_1_port CREATE OR REPLACE FUNCTION dummyFunction(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; -- create UDF in worker node 2 \c - - - :worker_2_port CREATE OR REPLACE FUNCTION dummyFunction(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; \c - - - :master_port -- UDF in public, table in a schema other than public, search_path is not set SELECT dummyFunction(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; dummyfunction --------------- 1 10 11 2 4 5 7 8 (8 rows) -- UDF in public, table in a schema other than public, search_path is set SET search_path TO test_schema_support; SELECT public.dummyFunction(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; dummyfunction --------------- 1 10 11 2 4 5 7 8 (8 rows) -- create UDF in master node in schema SET search_path TO test_schema_support; CREATE OR REPLACE FUNCTION dummyFunction2(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; -- create UDF in worker node 1 in schema \c - - - :worker_1_port SET search_path TO test_schema_support; CREATE OR REPLACE FUNCTION dummyFunction2(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; -- create UDF in worker node 2 in schema \c - - - :worker_2_port SET search_path TO test_schema_support; CREATE OR REPLACE FUNCTION dummyFunction2(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; \c - - - :master_port -- UDF in schema, table in a schema other than public, search_path is not set SET search_path TO public; SELECT test_schema_support.dummyFunction2(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; dummyfunction2 ---------------- 1 10 11 2 4 5 7 8 (8 rows) -- UDF in schema, table in a schema other than public, search_path is set SET search_path TO test_schema_support; SELECT dummyFunction2(n_nationkey) FROM nation_hash GROUP BY 1 ORDER BY 1; dummyfunction2 ---------------- 1 10 11 2 4 5 7 8 (8 rows) -- test operators with schema SET search_path TO public; -- create operator in master CREATE OPERATOR test_schema_support.=== ( LEFTARG = int, RIGHTARG = int, PROCEDURE = int4eq, COMMUTATOR = ===, NEGATOR = !==, HASHES, MERGES ); -- create operator in worker node 1 \c - - - :worker_1_port CREATE OPERATOR test_schema_support.=== ( LEFTARG = int, RIGHTARG = int, PROCEDURE = int4eq, COMMUTATOR = ===, NEGATOR = !==, HASHES, MERGES ); -- create operator in worker node 2 \c - - - :worker_2_port CREATE OPERATOR test_schema_support.=== ( LEFTARG = int, RIGHTARG = int, PROCEDURE = int4eq, COMMUTATOR = ===, NEGATOR = !==, HASHES, MERGES ); \c - - - :master_port -- test with search_path is not set SELECT * FROM test_schema_support.nation_hash WHERE n_nationkey OPERATOR(test_schema_support.===) 1; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) -- test with search_path is set SET search_path TO test_schema_support; SELECT * FROM nation_hash WHERE n_nationkey OPERATOR(===) 1; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------ 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon (1 row) -- test with master_modify_multiple_shards SET search_path TO public; SELECT master_modify_multiple_shards('UPDATE test_schema_support.nation_hash SET n_regionkey = n_regionkey + 1'); master_modify_multiple_shards ------------------------------- 8 (1 row) --verify master_modify_multiple_shards SELECT * FROM test_schema_support.nation_hash; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------- 1 | ARGENTINA | 2 | al foxes promise slyly according to the regular accounts. bold requests alon 5 | ETHIOPIA | 1 | ven packages wake quickly. regu 7 | GERMANY | 4 | 0 | ALGERIA | 1 | haggle. carefully final deposits detect slyly agai 3 | CANADA | 2 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4 | EGYPT | 5 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 6 | FRANCE | 4 | 2 | BRAZIL | 2 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special (8 rows) --test with search_path is set SET search_path TO test_schema_support; SELECT master_modify_multiple_shards('UPDATE nation_hash SET n_regionkey = n_regionkey + 1'); master_modify_multiple_shards ------------------------------- 8 (1 row) --verify master_modify_multiple_shards SELECT * FROM nation_hash; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------- 1 | ARGENTINA | 3 | al foxes promise slyly according to the regular accounts. bold requests alon 5 | ETHIOPIA | 2 | ven packages wake quickly. regu 7 | GERMANY | 5 | 0 | ALGERIA | 2 | haggle. carefully final deposits detect slyly agai 3 | CANADA | 3 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4 | EGYPT | 6 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 6 | FRANCE | 5 | 2 | BRAZIL | 3 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special (8 rows) --test COLLATION with schema SET search_path TO public; CREATE COLLATION test_schema_support.english FROM "en_US"; -- create COLLATION in worker node 1 in schema \c - - - :worker_1_port CREATE COLLATION test_schema_support.english FROM "en_US"; -- create COLLATION in worker node 2 in schema \c - - - :worker_2_port CREATE COLLATION test_schema_support.english FROM "en_US"; \c - - - :master_port CREATE TABLE test_schema_support.nation_hash_collation( n_nationkey integer not null, n_name char(25) not null COLLATE test_schema_support.english, n_regionkey integer not null, n_comment varchar(152) ); SELECT master_create_distributed_table('test_schema_support.nation_hash_collation', 'n_nationkey', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('test_schema_support.nation_hash_collation', 4, 2); master_create_worker_shards ----------------------------- (1 row) \copy test_schema_support.nation_hash_collation FROM STDIN with delimiter '|'; SELECT * FROM test_schema_support.nation_hash_collation; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon 5 | ETHIOPIA | 0 | ven packages wake quickly. regu 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4 | EGYPT | 4 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special (6 rows) SELECT n_comment FROM test_schema_support.nation_hash_collation ORDER BY n_comment COLLATE test_schema_support.english; n_comment ------------------------------------------------------------------------------------------------------------- al foxes promise slyly according to the regular accounts. bold requests alon eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold haggle. carefully final deposits detect slyly agai ven packages wake quickly. regu y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special (6 rows) --test with search_path is set SET search_path TO test_schema_support; CREATE TABLE nation_hash_collation_search_path( n_nationkey integer not null, n_name char(25) not null COLLATE english, n_regionkey integer not null, n_comment varchar(152) ); SELECT master_create_distributed_table('nation_hash_collation_search_path', 'n_nationkey', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('nation_hash_collation_search_path', 4, 2); master_create_worker_shards ----------------------------- (1 row) \copy nation_hash_collation_search_path FROM STDIN with delimiter '|'; SELECT * FROM nation_hash_collation_search_path; n_nationkey | n_name | n_regionkey | n_comment -------------+---------------------------+-------------+------------------------------------------------------------------------------------------------------------- 1 | ARGENTINA | 1 | al foxes promise slyly according to the regular accounts. bold requests alon 5 | ETHIOPIA | 0 | ven packages wake quickly. regu 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai 3 | CANADA | 1 | eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4 | EGYPT | 4 | y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 2 | BRAZIL | 1 | y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special (6 rows) SELECT n_comment FROM nation_hash_collation_search_path ORDER BY n_comment COLLATE english; n_comment ------------------------------------------------------------------------------------------------------------- al foxes promise slyly according to the regular accounts. bold requests alon eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold haggle. carefully final deposits detect slyly agai ven packages wake quickly. regu y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special (6 rows) --test composite types with schema SET search_path TO public; CREATE TYPE test_schema_support.new_composite_type as (key1 text, key2 text); -- create type in worker node 1 in schema \c - - - :worker_1_port CREATE TYPE test_schema_support.new_composite_type as (key1 text, key2 text); -- create type in worker node 2 in schema \c - - - :worker_2_port CREATE TYPE test_schema_support.new_composite_type as (key1 text, key2 text); \c - - - :master_port CREATE TABLE test_schema_support.nation_hash_composite_types( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152), test_col test_schema_support.new_composite_type ); SELECT master_create_distributed_table('test_schema_support.nation_hash_composite_types', 'n_nationkey', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('test_schema_support.nation_hash_composite_types', 4, 2); master_create_worker_shards ----------------------------- (1 row) -- insert some data to verify composite type queries \copy test_schema_support.nation_hash_composite_types FROM STDIN with delimiter '|'; SELECT * FROM test_schema_support.nation_hash_composite_types WHERE test_col = '(a,a)'::test_schema_support.new_composite_type; n_nationkey | n_name | n_regionkey | n_comment | test_col -------------+---------------------------+-------------+----------------------------------------------------+---------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai | (a,a) (1 row) --test with search_path is set SET search_path TO test_schema_support; SELECT * FROM nation_hash_composite_types WHERE test_col = '(a,a)'::new_composite_type; n_nationkey | n_name | n_regionkey | n_comment | test_col -------------+---------------------------+-------------+----------------------------------------------------+---------- 0 | ALGERIA | 0 | haggle. carefully final deposits detect slyly agai | (a,a) (1 row) -- test ALTER TABLE ADD/DROP queries with schemas SET search_path TO public; ALTER TABLE test_schema_support.nation_hash ADD COLUMN new_col INT; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' -- verify column is added SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash'::regclass; Column | Type | Modifiers -------------+------------------------+----------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null n_comment | character varying(152) | new_col | integer | (5 rows) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; Column | Type | Modifiers -------------+------------------------+----------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null n_comment | character varying(152) | new_col | integer | (5 rows) \c - - - :master_port ALTER TABLE test_schema_support.nation_hash DROP COLUMN IF EXISTS non_existent_column; NOTICE: column "non_existent_column" of relation "nation_hash" does not exist, skipping NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' ALTER TABLE test_schema_support.nation_hash DROP COLUMN IF EXISTS new_col; -- verify column is dropped SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash'::regclass; Column | Type | Modifiers -------------+------------------------+----------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null n_comment | character varying(152) | (4 rows) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; Column | Type | Modifiers -------------+------------------------+----------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null n_comment | character varying(152) | (4 rows) \c - - - :master_port --test with search_path is set SET search_path TO test_schema_support; ALTER TABLE nation_hash ADD COLUMN new_col INT; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' -- verify column is added SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash'::regclass; Column | Type | Modifiers -------------+------------------------+----------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null n_comment | character varying(152) | new_col | integer | (5 rows) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; Column | Type | Modifiers -------------+------------------------+----------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null n_comment | character varying(152) | new_col | integer | (5 rows) \c - - - :master_port SET search_path TO test_schema_support; ALTER TABLE nation_hash DROP COLUMN IF EXISTS non_existent_column; NOTICE: column "non_existent_column" of relation "nation_hash" does not exist, skipping NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' ALTER TABLE nation_hash DROP COLUMN IF EXISTS new_col; -- verify column is dropped SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash'::regclass; Column | Type | Modifiers -------------+------------------------+----------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null n_comment | character varying(152) | (4 rows) \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; Column | Type | Modifiers -------------+------------------------+----------- n_nationkey | integer | not null n_name | character(25) | not null n_regionkey | integer | not null n_comment | character varying(152) | (4 rows) \c - - - :master_port -- test CREATE/DROP INDEX with schemas SET search_path TO public; -- CREATE index CREATE INDEX index1 ON test_schema_support.nation_hash(n_name); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' --verify INDEX is created \d test_schema_support.index1 Index "test_schema_support.index1" Column | Type | Definition --------+---------------+------------ n_name | character(25) | n_name btree, for table "test_schema_support.nation_hash" \c - - - :worker_1_port \d test_schema_support.index1_1190003 Index "test_schema_support.index1_1190003" Column | Type | Definition --------+---------------+------------ n_name | character(25) | n_name btree, for table "test_schema_support.nation_hash_1190003" \c - - - :master_port -- DROP index DROP INDEX test_schema_support.index1; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' --verify INDEX is dropped \d test_schema_support.index1 \c - - - :worker_1_port \d test_schema_support.index1_1190003 \c - - - :master_port --test with search_path is set SET search_path TO test_schema_support; -- CREATE index CREATE INDEX index1 ON nation_hash(n_name); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' --verify INDEX is created \d test_schema_support.index1 Index "test_schema_support.index1" Column | Type | Definition --------+---------------+------------ n_name | character(25) | n_name btree, for table "test_schema_support.nation_hash" \c - - - :worker_1_port \d test_schema_support.index1_1190003 Index "test_schema_support.index1_1190003" Column | Type | Definition --------+---------------+------------ n_name | character(25) | n_name btree, for table "test_schema_support.nation_hash_1190003" \c - - - :master_port -- DROP index SET search_path TO test_schema_support; DROP INDEX index1; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' --verify INDEX is dropped \d test_schema_support.index1 \c - - - :worker_1_port \d test_schema_support.index1_1190003 \c - - - :master_port -- test master_copy_shard_placement with schemas SET search_path TO public; -- mark shard as inactive UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1190000 and nodeport = :worker_1_port; SELECT master_copy_shard_placement(1190000, 'localhost', :worker_2_port, 'localhost', :worker_1_port); master_copy_shard_placement ----------------------------- (1 row) -- verify shardstate SELECT shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid = 1190000 ORDER BY nodeport; shardstate | nodename | nodeport ------------+-----------+---------- 1 | localhost | 57637 1 | localhost | 57638 (2 rows) --test with search_path is set SET search_path TO test_schema_support; -- mark shard as inactive UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1190000 and nodeport = :worker_1_port; SELECT master_copy_shard_placement(1190000, 'localhost', :worker_2_port, 'localhost', :worker_1_port); master_copy_shard_placement ----------------------------- (1 row) -- verify shardstate SELECT shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid = 1190000 ORDER BY nodeport; shardstate | nodename | nodeport ------------+-----------+---------- 1 | localhost | 57637 1 | localhost | 57638 (2 rows) -- test master_apply_delete_command with schemas SET search_path TO public; SELECT master_apply_delete_command('DELETE FROM test_schema_support.nation_append') ; master_apply_delete_command ----------------------------- 1 (1 row) -- verify shard is dropped \c - - - :worker_1_port \d test_schema_support.nation_append_119* \c - - - :master_port -- test with search_path is set SET search_path TO test_schema_support; \copy nation_append FROM STDIN with delimiter '|'; SELECT master_apply_delete_command('DELETE FROM nation_append') ; master_apply_delete_command ----------------------------- 1 (1 row) -- verify shard is dropped \c - - - :worker_1_port \d test_schema_support.nation_append_119* \c - - - :master_port -- check joins of tables which are in schemas other than public -- we create new tables with replication factor of 1 -- so that we guarantee to have repartitions when necessary -- create necessary objects and load data to them CREATE SCHEMA test_schema_support_join_1; CREATE SCHEMA test_schema_support_join_2; CREATE TABLE test_schema_support_join_1.nation_hash ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); CREATE TABLE test_schema_support_join_1.nation_hash_2 ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); CREATE TABLE test_schema_support_join_2.nation_hash ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); SELECT master_create_distributed_table('test_schema_support_join_1.nation_hash', 'n_nationkey', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash', 4, 1); master_create_worker_shards ----------------------------- (1 row) \copy test_schema_support_join_1.nation_hash FROM STDIN with delimiter '|'; SELECT master_create_distributed_table('test_schema_support_join_1.nation_hash_2', 'n_nationkey', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash_2', 4, 1); master_create_worker_shards ----------------------------- (1 row) \copy test_schema_support_join_1.nation_hash_2 FROM STDIN with delimiter '|'; SELECT master_create_distributed_table('test_schema_support_join_2.nation_hash', 'n_nationkey', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('test_schema_support_join_2.nation_hash', 4, 1); master_create_worker_shards ----------------------------- (1 row) \copy test_schema_support_join_2.nation_hash FROM STDIN with delimiter '|'; -- check when search_path is public, -- join of two tables which are in different schemas, -- join on partition column SET search_path TO public; SELECT count (*) FROM test_schema_support_join_1.nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_nationkey; count ------- 6 (1 row) -- check when search_path is different than public, -- join of two tables which are in different schemas, -- join on partition column SET search_path TO test_schema_support_join_1; SELECT count (*) FROM nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_nationkey; count ------- 6 (1 row) -- check when search_path is public, -- join of two tables which are in same schemas, -- join on partition column SET search_path TO public; SELECT count (*) FROM test_schema_support_join_1.nation_hash n1, test_schema_support_join_1.nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_nationkey; count ------- 6 (1 row) -- check when search_path is different than public, -- join of two tables which are in same schemas, -- join on partition column SET search_path TO test_schema_support_join_1; SELECT count (*) FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_nationkey; count ------- 6 (1 row) -- single repartition joins SET citus.task_executor_type TO "task-tracker"; -- check when search_path is public, -- join of two tables which are in different schemas, -- join on partition column and non-partition column SET search_path TO public; SELECT count (*) FROM test_schema_support_join_1.nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_regionkey; count ------- 6 (1 row) -- check when search_path is different than public, -- join of two tables which are in different schemas, -- join on partition column and non-partition column SET search_path TO test_schema_support_join_1; SELECT count (*) FROM nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_regionkey; count ------- 6 (1 row) -- check when search_path is different than public, -- join of two tables which are in same schemas, -- join on partition column and non-partition column SET search_path TO test_schema_support_join_1; SELECT count (*) FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_regionkey; count ------- 6 (1 row) -- hash repartition joins -- check when search_path is public, -- join of two tables which are in different schemas, -- join on non-partition column SET search_path TO public; SELECT count (*) FROM test_schema_support_join_1.nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_regionkey = n2.n_regionkey; count ------- 14 (1 row) -- check when search_path is different than public, -- join of two tables which are in different schemas, -- join on non-partition column SET search_path TO test_schema_support_join_1; SELECT count (*) FROM nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_regionkey = n2.n_regionkey; count ------- 14 (1 row) -- check when search_path is different than public, -- join of two tables which are in same schemas, -- join on non-partition column SET search_path TO test_schema_support_join_1; SELECT count (*) FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_regionkey = n2.n_regionkey; count ------- 14 (1 row) -- set task_executor back to real-time SET citus.task_executor_type TO "real-time"; -- test ALTER TABLE SET SCHEMA -- we expect that it will warn out SET search_path TO public; ALTER TABLE test_schema_support.nation_hash SET SCHEMA public; WARNING: not propagating ALTER ... SET SCHEMA commands to worker nodes HINT: Connect to worker nodes directly to manually change schemas of affected objects. -- we will use this function in next test CREATE FUNCTION run_command_on_coordinator_and_workers(p_sql text) RETURNS void LANGUAGE plpgsql AS $$ BEGIN EXECUTE p_sql; PERFORM run_command_on_workers(p_sql); END;$$; -- test schema propagation with user other than current user SELECT run_command_on_coordinator_and_workers('CREATE USER "test-user"'); NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. CONTEXT: SQL statement "CREATE USER "test-user"" PL/pgSQL function run_command_on_coordinator_and_workers(text) line 3 at EXECUTE run_command_on_coordinator_and_workers ---------------------------------------- (1 row) SELECT run_command_on_coordinator_and_workers('GRANT ALL ON DATABASE postgres to "test-user"'); run_command_on_coordinator_and_workers ---------------------------------------- (1 row) CREATE SCHEMA schema_with_user AUTHORIZATION "test-user"; CREATE TABLE schema_with_user.test_table(column1 int); SELECT create_reference_table('schema_with_user.test_table'); create_reference_table ------------------------ (1 row) -- verify that owner of the created schema is test-user \c - - - :worker_1_port \dn schema_with_user List of schemas Name | Owner ------------------+----------- schema_with_user | test-user (1 row) \c - - - :master_port -- we do not use run_command_on_coordinator_and_workers here because when there is CASCADE, it causes deadlock DROP OWNED BY "test-user" CASCADE; NOTICE: drop cascades to table schema_with_user.test_table SELECT run_command_on_workers('DROP OWNED BY "test-user" CASCADE'); run_command_on_workers ---------------------------------- (localhost,57637,t,"DROP OWNED") (localhost,57638,t,"DROP OWNED") (2 rows) SELECT run_command_on_coordinator_and_workers('DROP USER "test-user"'); run_command_on_coordinator_and_workers ---------------------------------------- (1 row) DROP FUNCTION run_command_on_coordinator_and_workers(p_sql text); -- test run_command_on_* UDFs with schema CREATE SCHEMA run_test_schema; CREATE TABLE run_test_schema.test_table(id int); SELECT create_distributed_table('run_test_schema.test_table','id'); create_distributed_table -------------------------- (1 row) -- randomly insert data to evaluate below UDFs better INSERT INTO run_test_schema.test_table VALUES(1); INSERT INTO run_test_schema.test_table VALUES(7); INSERT INTO run_test_schema.test_table VALUES(9); -- try UDFs which call shard_name as a subroutine SELECT sum(result::int) FROM run_command_on_placements('run_test_schema.test_table','SELECT pg_table_size(''%s'')'); sum ------- 49152 (1 row) SELECT sum(result::int) FROM run_command_on_shards('run_test_schema.test_table','SELECT pg_table_size(''%s'')'); sum ------- 24576 (1 row) -- Clean up the created schema DROP SCHEMA run_test_schema CASCADE; NOTICE: drop cascades to table run_test_schema.test_table citus-7.0.3/src/test/regress/expected/multi_shard_modify.out000066400000000000000000000257711317107136600243250ustar00rootroot00000000000000-- -- MULTI_SHARD_MODIFY -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 350000; -- Create a new hash partitioned multi_shard_modify_test table and load data into it. CREATE TABLE multi_shard_modify_test ( t_key integer not null, t_name varchar(25) not null, t_value integer not null); SELECT master_create_distributed_table('multi_shard_modify_test', 't_key', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('multi_shard_modify_test', 4, 2); master_create_worker_shards ----------------------------- (1 row) COPY multi_shard_modify_test (t_key, t_name, t_value) FROM STDIN WITH (FORMAT 'csv'); -- Testing master_modify_multiple_shards -- Verify that master_modify_multiple_shards can be rolled back BEGIN; SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key > 10 AND t_key <= 13'); master_modify_multiple_shards ------------------------------- 3 (1 row) SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = 202'); master_modify_multiple_shards ------------------------------- 1 (1 row) ROLLBACK; SELECT count(*) FROM multi_shard_modify_test; count ------- 27 (1 row) -- Check that master_modify_multiple_shards cannot be called with non-distributed tables CREATE TEMPORARY TABLE temporary_nondistributed_table (col_1 integer,col_2 text); INSERT INTO temporary_nondistributed_table VALUES (37, 'eren'), (31, 'onder'); SELECT master_modify_multiple_shards('DELETE FROM temporary_nondistributed_table WHERE col_1 = 37'); ERROR: relation "temporary_nondistributed_table" is not a distributed table -- commands with volatile functions in their quals SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = (random() * 1000)'); ERROR: functions used in the WHERE clause of modification queries on distributed tables must not be VOLATILE SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_value = (random() * 1000)'); ERROR: functions used in the WHERE clause of modification queries on distributed tables must not be VOLATILE -- commands with immutable functions in their quals SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = abs(-3)'); master_modify_multiple_shards ------------------------------- 1 (1 row) -- DELETE with expression in WHERE clause SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = (3*18-40)'); master_modify_multiple_shards ------------------------------- 1 (1 row) -- commands with a USING a non distributed table error out CREATE TABLE temp_nations(name text, key integer); SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test USING temp_nations WHERE multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''foobar'' '); ERROR: relation temp_nations is not distributed -- commands with a USING clause are unsupported SELECT master_create_distributed_table('temp_nations', 'name', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('temp_nations', 4, 2); master_create_worker_shards ----------------------------- (1 row) SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test USING temp_nations WHERE multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''foobar'' '); ERROR: cannot perform distributed planning for the given modification DETAIL: Joins are not supported in distributed modifications. -- commands with a RETURNING clause are unsupported SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = 3 RETURNING *'); ERROR: master_modify_multiple_shards() does not support RETURNING -- commands containing a CTE are unsupported SELECT master_modify_multiple_shards('WITH deleted_stuff AS (INSERT INTO multi_shard_modify_test DEFAULT VALUES RETURNING *) DELETE FROM multi_shard_modify_test'); ERROR: common table expressions are not supported in distributed modifications -- Check that we can successfully delete from multiple shards with 1PC SET citus.multi_shard_commit_protocol TO '1pc'; SELECT count(*) FROM multi_shard_modify_test; count ------- 25 (1 row) SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key > 200'); master_modify_multiple_shards ------------------------------- 2 (1 row) SELECT count(*) FROM multi_shard_modify_test; count ------- 23 (1 row) -- Check that we can successfully delete from multiple shards with 2PC SET citus.multi_shard_commit_protocol TO '2pc'; SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key > 100'); master_modify_multiple_shards ------------------------------- 2 (1 row) SELECT count(*) FROM multi_shard_modify_test; count ------- 21 (1 row) -- Check that shard pruning works SET client_min_messages TO DEBUG2; SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = 15'); master_modify_multiple_shards ------------------------------- 1 (1 row) SET client_min_messages TO NOTICE; -- Check that master_modify_multiple_shards works without partition keys SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_name LIKE ''barce%'' '); master_modify_multiple_shards ------------------------------- 1 (1 row) -- Simple, Single Shard Update SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''warsaw'' WHERE t_key=17'); master_modify_multiple_shards ------------------------------- 1 (1 row) SELECT t_name FROM multi_shard_modify_test WHERE t_key=17; t_name -------- warsaw (1 row) -- Simple, Multi Shard Update SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''???'' WHERE t_key>30 AND t_key<35'); master_modify_multiple_shards ------------------------------- 4 (1 row) SELECT t_name FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; t_name -------- ??? ??? ??? ??? (4 rows) -- expression UPDATE SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value=8*37 WHERE t_key>30 AND t_key<35'); master_modify_multiple_shards ------------------------------- 4 (1 row) SELECT t_value FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; t_value --------- 296 296 296 296 (4 rows) -- multi-column UPDATE SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''somename'', t_value=333 WHERE t_key>30 AND t_key<35'); master_modify_multiple_shards ------------------------------- 4 (1 row) SELECT t_name, t_value FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; t_name | t_value ----------+--------- somename | 333 somename | 333 somename | 333 somename | 333 (4 rows) -- commands with no constraints on the partition key are supported SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''nice city'' WHERE t_value < 0'); master_modify_multiple_shards ------------------------------- 2 (1 row) SELECT t_name FROM multi_shard_modify_test WHERE t_value < 0; t_name ----------- nice city nice city (2 rows) -- attempting to change the partition key is unsupported SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_key=3000 WHERE t_key < 10 '); ERROR: modifying the partition value of rows is not allowed -- UPDATEs with a FROM clause are unsupported SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name = ''FAIL'' FROM temp_nations WHERE multi_shard_modify_test.t_key = 3 AND multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''dummy'' '); ERROR: cannot perform distributed planning for the given modification DETAIL: Joins are not supported in distributed modifications. -- commands with a RETURNING clause are unsupported SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''FAIL'' WHERE t_key=4 RETURNING *'); ERROR: master_modify_multiple_shards() does not support RETURNING -- commands containing a CTE are unsupported SELECT master_modify_multiple_shards('WITH t AS (INSERT INTO multi_shard_modify_test DEFAULT VALUES RETURNING *) UPDATE multi_shard_modify_test SET t_name = ''FAIL'' '); ERROR: common table expressions are not supported in distributed modifications -- updates referencing just a var are supported SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value=t_key WHERE t_key = 10'); master_modify_multiple_shards ------------------------------- 1 (1 row) SELECT t_value FROM multi_shard_modify_test WHERE t_key=10; t_value --------- 10 (1 row) -- updates referencing a column are supported SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = t_value + 37 WHERE t_key = 10'); master_modify_multiple_shards ------------------------------- 1 (1 row) SELECT t_value FROM multi_shard_modify_test WHERE t_key=10; t_value --------- 47 (1 row) CREATE FUNCTION temp_stable_func() RETURNS integer AS 'SELECT 10;' LANGUAGE SQL STABLE; -- updates referencing non-IMMUTABLE functions are unsupported SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name = ''FAIL!'' WHERE t_key = temp_stable_func()'); master_modify_multiple_shards ------------------------------- 1 (1 row) -- updates referencing IMMUTABLE functions in SET section are supported SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = abs(-78) WHERE t_key = 10'); master_modify_multiple_shards ------------------------------- 1 (1 row) SELECT t_value FROM multi_shard_modify_test WHERE t_key=10; t_value --------- 78 (1 row) -- updates referencing STABLE functions in SET section are supported SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = temp_stable_func() * 2 WHERE t_key = 10'); master_modify_multiple_shards ------------------------------- 1 (1 row) -- updates referencing VOLATILE functions in SET section are not supported SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = random() WHERE t_key = 10'); ERROR: functions used in UPDATE queries on distributed tables must not be VOLATILE -- commands with stable functions in their quals are allowed SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = temp_stable_func()'); master_modify_multiple_shards ------------------------------- 1 (1 row) ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 102046; citus-7.0.3/src/test/regress/expected/multi_simple_queries.out000066400000000000000000000437631317107136600247040ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 850000; -- =================================================================== -- test end-to-end query functionality -- =================================================================== CREATE TABLE articles ( id bigint NOT NULL, author_id bigint NOT NULL, title varchar(20) NOT NULL, word_count integer NOT NULL CHECK (word_count > 0) ); -- this table is used in a CTE test CREATE TABLE authors ( name text, id bigint ); -- this table is used in router executor tests CREATE TABLE articles_single_shard (LIKE articles); SELECT master_create_distributed_table('articles', 'author_id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_distributed_table('articles_single_shard', 'author_id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('articles', 2, 1); master_create_worker_shards ----------------------------- (1 row) SELECT master_create_worker_shards('articles_single_shard', 1, 1); master_create_worker_shards ----------------------------- (1 row) -- create a bunch of test data INSERT INTO articles VALUES ( 1, 1, 'arsenous', 9572); INSERT INTO articles VALUES ( 2, 2, 'abducing', 13642); INSERT INTO articles VALUES ( 3, 3, 'asternal', 10480); INSERT INTO articles VALUES ( 4, 4, 'altdorfer', 14551); INSERT INTO articles VALUES ( 5, 5, 'aruru', 11389); INSERT INTO articles VALUES ( 6, 6, 'atlases', 15459); INSERT INTO articles VALUES ( 7, 7, 'aseptic', 12298); INSERT INTO articles VALUES ( 8, 8, 'agatized', 16368); INSERT INTO articles VALUES ( 9, 9, 'alligate', 438); INSERT INTO articles VALUES (10, 10, 'aggrandize', 17277); INSERT INTO articles VALUES (11, 1, 'alamo', 1347); INSERT INTO articles VALUES (12, 2, 'archiblast', 18185); INSERT INTO articles VALUES (13, 3, 'aseyev', 2255); INSERT INTO articles VALUES (14, 4, 'andesite', 19094); INSERT INTO articles VALUES (15, 5, 'adversa', 3164); INSERT INTO articles VALUES (16, 6, 'allonym', 2); INSERT INTO articles VALUES (17, 7, 'auriga', 4073); INSERT INTO articles VALUES (18, 8, 'assembly', 911); INSERT INTO articles VALUES (19, 9, 'aubergiste', 4981); INSERT INTO articles VALUES (20, 10, 'absentness', 1820); INSERT INTO articles VALUES (21, 1, 'arcading', 5890); INSERT INTO articles VALUES (22, 2, 'antipope', 2728); INSERT INTO articles VALUES (23, 3, 'abhorring', 6799); INSERT INTO articles VALUES (24, 4, 'audacious', 3637); INSERT INTO articles VALUES (25, 5, 'antehall', 7707); INSERT INTO articles VALUES (26, 6, 'abington', 4545); INSERT INTO articles VALUES (27, 7, 'arsenous', 8616); INSERT INTO articles VALUES (28, 8, 'aerophyte', 5454); INSERT INTO articles VALUES (29, 9, 'amateur', 9524); INSERT INTO articles VALUES (30, 10, 'andelee', 6363); INSERT INTO articles VALUES (31, 1, 'athwartships', 7271); INSERT INTO articles VALUES (32, 2, 'amazon', 11342); INSERT INTO articles VALUES (33, 3, 'autochrome', 8180); INSERT INTO articles VALUES (34, 4, 'amnestied', 12250); INSERT INTO articles VALUES (35, 5, 'aminate', 9089); INSERT INTO articles VALUES (36, 6, 'ablation', 13159); INSERT INTO articles VALUES (37, 7, 'archduchies', 9997); INSERT INTO articles VALUES (38, 8, 'anatine', 14067); INSERT INTO articles VALUES (39, 9, 'anchises', 10906); INSERT INTO articles VALUES (40, 10, 'attemper', 14976); INSERT INTO articles VALUES (41, 1, 'aznavour', 11814); INSERT INTO articles VALUES (42, 2, 'ausable', 15885); INSERT INTO articles VALUES (43, 3, 'affixal', 12723); INSERT INTO articles VALUES (44, 4, 'anteport', 16793); INSERT INTO articles VALUES (45, 5, 'afrasia', 864); INSERT INTO articles VALUES (46, 6, 'atlanta', 17702); INSERT INTO articles VALUES (47, 7, 'abeyance', 1772); INSERT INTO articles VALUES (48, 8, 'alkylic', 18610); INSERT INTO articles VALUES (49, 9, 'anyone', 2681); INSERT INTO articles VALUES (50, 10, 'anjanette', 19519); -- insert a single row for the test INSERT INTO articles_single_shard VALUES (50, 10, 'anjanette', 19519); -- zero-shard modifications should succeed UPDATE articles SET title = '' WHERE author_id = 1 AND author_id = 2; UPDATE articles SET title = '' WHERE 0 = 1; DELETE FROM articles WHERE author_id = 1 AND author_id = 2; -- single-shard tests -- test simple select for a single row SELECT * FROM articles WHERE author_id = 10 AND id = 50; id | author_id | title | word_count ----+-----------+-----------+------------ 50 | 10 | anjanette | 19519 (1 row) -- get all titles by a single author SELECT title FROM articles WHERE author_id = 10; title ------------ aggrandize absentness andelee attemper anjanette (5 rows) -- try ordering them by word count SELECT title, word_count FROM articles WHERE author_id = 10 ORDER BY word_count DESC NULLS LAST; title | word_count ------------+------------ anjanette | 19519 aggrandize | 17277 attemper | 14976 andelee | 6363 absentness | 1820 (5 rows) -- look at last two articles by an author SELECT title, id FROM articles WHERE author_id = 5 ORDER BY id LIMIT 2; title | id ---------+---- aruru | 5 adversa | 15 (2 rows) -- find all articles by two authors in same shard SELECT title, author_id FROM articles WHERE author_id = 7 OR author_id = 8 ORDER BY author_id ASC, id; title | author_id -------------+----------- aseptic | 7 auriga | 7 arsenous | 7 archduchies | 7 abeyance | 7 agatized | 8 assembly | 8 aerophyte | 8 anatine | 8 alkylic | 8 (10 rows) -- add in some grouping expressions SELECT author_id, sum(word_count) AS corpus_size FROM articles WHERE author_id = 1 OR author_id = 2 OR author_id = 8 OR author_id = 10 GROUP BY author_id HAVING sum(word_count) > 40000 ORDER BY sum(word_count) DESC; author_id | corpus_size -----------+------------- 2 | 61782 10 | 59955 8 | 55410 (3 rows) -- UNION/INTERSECT queries are unsupported if on multiple shards SELECT * FROM articles WHERE author_id = 10 UNION SELECT * FROM articles WHERE author_id = 2; ERROR: could not run distributed query with UNION, INTERSECT, or EXCEPT HINT: Consider using an equality filter on the distributed table's partition column. -- queries using CTEs are unsupported WITH long_names AS ( SELECT id FROM authors WHERE char_length(name) > 15 ) SELECT title FROM articles; ERROR: cannot plan queries which include both local and distributed relations -- queries which involve functions in FROM clause are unsupported. SELECT * FROM articles, position('om' in 'Thomas'); ERROR: could not run distributed query with complex table expressions HINT: Consider using an equality filter on the distributed table's partition column. -- subqueries are not supported in WHERE clause in Citus SELECT * FROM articles WHERE author_id IN (SELECT id FROM authors WHERE name LIKE '%a'); ERROR: cannot plan queries which include both local and distributed relations -- subqueries are supported in FROM clause SELECT articles.id,test.word_count FROM articles, (SELECT id, word_count FROM articles) AS test WHERE test.id = articles.id ORDER BY articles.id; id | word_count ----+------------ 1 | 9572 2 | 13642 3 | 10480 4 | 14551 5 | 11389 6 | 15459 7 | 12298 8 | 16368 9 | 438 10 | 17277 11 | 1347 12 | 18185 13 | 2255 14 | 19094 15 | 3164 16 | 2 17 | 4073 18 | 911 19 | 4981 20 | 1820 21 | 5890 22 | 2728 23 | 6799 24 | 3637 25 | 7707 26 | 4545 27 | 8616 28 | 5454 29 | 9524 30 | 6363 31 | 7271 32 | 11342 33 | 8180 34 | 12250 35 | 9089 36 | 13159 37 | 9997 38 | 14067 39 | 10906 40 | 14976 41 | 11814 42 | 15885 43 | 12723 44 | 16793 45 | 864 46 | 17702 47 | 1772 48 | 18610 49 | 2681 50 | 19519 (50 rows) -- subqueries are not supported in SELECT clause SELECT a.title AS name, (SELECT a2.id FROM articles_single_shard a2 WHERE a.id = a2.id LIMIT 1) AS special_price FROM articles a; ERROR: could not run distributed query with subquery outside the FROM and WHERE clauses HINT: Consider using an equality filter on the distributed table's partition column. -- joins are not supported between local and distributed tables SELECT title, authors.name FROM authors, articles WHERE authors.id = articles.author_id; ERROR: cannot plan queries which include both local and distributed relations -- inner joins are not supported (I think) SELECT * FROM (articles INNER JOIN authors ON articles.id = authors.id); ERROR: cannot plan queries which include both local and distributed relations -- test use of EXECUTE statements within plpgsql DO $sharded_execute$ BEGIN EXECUTE 'SELECT COUNT(*) FROM articles ' || 'WHERE author_id = $1 AND author_id = $2' USING 1, 2; END $sharded_execute$; -- test use of bare SQL within plpgsql DO $sharded_sql$ BEGIN SELECT COUNT(*) FROM articles WHERE author_id = 1 AND author_id = 2; END $sharded_sql$; ERROR: query has no destination for result data HINT: If you want to discard the results of a SELECT, use PERFORM instead. CONTEXT: PL/pgSQL function inline_code_block line 3 at SQL statement -- test cross-shard queries SELECT COUNT(*) FROM articles; count ------- 50 (1 row) -- test with empty target list SELECT FROM articles; -- (50 rows) SELECT FROM articles WHERE author_id = 3737; -- (0 rows) SELECT FROM articles WHERE word_count = 65500; -- (0 rows) -- having queries supported in Citus SELECT author_id, sum(word_count) AS corpus_size FROM articles GROUP BY author_id HAVING sum(word_count) > 25000 ORDER BY sum(word_count) DESC LIMIT 5; author_id | corpus_size -----------+------------- 4 | 66325 2 | 61782 10 | 59955 8 | 55410 6 | 50867 (5 rows) SELECT author_id FROM articles GROUP BY author_id HAVING sum(word_count) > 50000 ORDER BY author_id; author_id ----------- 2 4 6 8 10 (5 rows) SELECT author_id FROM articles GROUP BY author_id HAVING sum(word_count) > 50000 AND author_id < 5 ORDER BY author_id; author_id ----------- 2 4 (2 rows) SELECT author_id FROM articles GROUP BY author_id HAVING sum(word_count) > 50000 OR author_id < 5 ORDER BY author_id; author_id ----------- 1 2 3 4 6 8 10 (7 rows) SELECT author_id FROM articles GROUP BY author_id HAVING author_id <= 2 OR author_id = 8 ORDER BY author_id; author_id ----------- 1 2 8 (3 rows) SELECT o_orderstatus, count(*), avg(o_totalprice) FROM orders GROUP BY o_orderstatus HAVING count(*) > 1450 OR avg(o_totalprice) > 150000 ORDER BY o_orderstatus; o_orderstatus | count | avg ---------------+-------+--------------------- O | 1460 | 143355.847013698630 P | 75 | 164847.914533333333 (2 rows) SELECT o_orderstatus, sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_orderkey > 9030 GROUP BY o_orderstatus HAVING sum(l_linenumber) > 1000 ORDER BY o_orderstatus; o_orderstatus | sum | avg ---------------+------+-------------------- F | 8559 | 3.0126715945089757 O | 8901 | 3.0050641458474004 (2 rows) -- now, test the cases where Citus do or do not need to create -- the master queries SET citus.large_table_shard_count TO 2; SET client_min_messages TO 'DEBUG2'; SET citus.task_executor_type TO 'real-time'; -- start with the simple lookup query SELECT * FROM articles WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- below query hits a single shard, so no need to create the master query SELECT * FROM articles WHERE author_id = 1 OR author_id = 17; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- below query hits two shards, so needs to create the master query SELECT * FROM articles WHERE author_id = 1 OR author_id = 18; id | author_id | title | word_count ----+-----------+--------------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 21 | 1 | arcading | 5890 31 | 1 | athwartships | 7271 41 | 1 | aznavour | 11814 (5 rows) -- rename the output columns on a no master query case SELECT id as article_id, word_count * id as random_value FROM articles WHERE author_id = 1; DEBUG: Creating router plan DEBUG: Plan is router executable article_id | random_value ------------+-------------- 1 | 9572 11 | 14817 21 | 123690 31 | 225401 41 | 484374 (5 rows) -- we can push down co-located joins to a single worker without the -- master query being required for only the same tables SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles a, articles b WHERE a.author_id = 10 and a.author_id = b.author_id LIMIT 3; DEBUG: Creating router plan DEBUG: Plan is router executable first_author | second_word_count --------------+------------------- 10 | 17277 10 | 1820 10 | 6363 (3 rows) -- now show that JOINs with multiple tables are not router executable -- they are executed by real-time executor SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles a, articles_single_shard b WHERE a.author_id = 10 and a.author_id = b.author_id LIMIT 3; DEBUG: Creating router plan DEBUG: Plan is router executable first_author | second_word_count --------------+------------------- 10 | 19519 10 | 19519 10 | 19519 (3 rows) -- do not create the master query for LIMIT on a single shard SELECT SELECT * FROM articles WHERE author_id = 1 LIMIT 2; DEBUG: Creating router plan DEBUG: Plan is router executable id | author_id | title | word_count ----+-----------+----------+------------ 1 | 1 | arsenous | 9572 11 | 1 | alamo | 1347 (2 rows) -- This query hits a single shard. So GROUP BY can be -- pushed down to the workers directly. This query is -- equivalent to SELECT DISTINCT on a single shard. SELECT id FROM articles WHERE author_id = 1 GROUP BY id ORDER BY id; DEBUG: Creating router plan DEBUG: Plan is router executable id ---- 1 11 21 31 41 (5 rows) -- copying from a single shard table does not require the master query COPY articles_single_shard TO stdout; DEBUG: Creating router plan DEBUG: Plan is router executable 50 10 anjanette 19519 -- error out for queries with aggregates SELECT avg(word_count) FROM articles WHERE author_id = 2; DEBUG: Creating router plan DEBUG: Plan is router executable avg -------------------- 12356.400000000000 (1 row) -- max, min, sum, count is somehow implemented -- differently in distributed planning SELECT max(word_count) as max, min(word_count) as min, sum(word_count) as sum, count(word_count) as cnt FROM articles WHERE author_id = 2; DEBUG: Creating router plan DEBUG: Plan is router executable max | min | sum | cnt -------+------+-------+----- 18185 | 2728 | 61782 | 5 (1 row) -- error out for queries with repartition jobs SELECT * FROM articles a, articles b WHERE a.id = b.id AND a.author_id = 1; DEBUG: join prunable for task partitionId 0 and 1 DEBUG: join prunable for task partitionId 0 and 2 DEBUG: join prunable for task partitionId 0 and 3 DEBUG: join prunable for task partitionId 1 and 0 DEBUG: join prunable for task partitionId 1 and 2 DEBUG: join prunable for task partitionId 1 and 3 DEBUG: join prunable for task partitionId 2 and 0 DEBUG: join prunable for task partitionId 2 and 1 DEBUG: join prunable for task partitionId 2 and 3 DEBUG: join prunable for task partitionId 3 and 0 DEBUG: join prunable for task partitionId 3 and 1 DEBUG: join prunable for task partitionId 3 and 2 DEBUG: pruning merge fetch taskId 1 DETAIL: Creating dependency on merge taskId 3 DEBUG: pruning merge fetch taskId 2 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 4 DETAIL: Creating dependency on merge taskId 5 DEBUG: pruning merge fetch taskId 5 DETAIL: Creating dependency on merge taskId 8 DEBUG: pruning merge fetch taskId 7 DETAIL: Creating dependency on merge taskId 7 DEBUG: pruning merge fetch taskId 8 DETAIL: Creating dependency on merge taskId 11 DEBUG: pruning merge fetch taskId 10 DETAIL: Creating dependency on merge taskId 9 DEBUG: pruning merge fetch taskId 11 DETAIL: Creating dependency on merge taskId 14 ERROR: cannot use real time executor with repartition jobs HINT: Set citus.task_executor_type to "task-tracker". -- system columns from shard tables can be queried and retrieved SELECT count(*) FROM ( SELECT tableoid, ctid, cmin, cmax, xmin, xmax FROM articles WHERE tableoid IS NOT NULL OR ctid IS NOT NULL OR cmin IS NOT NULL OR cmax IS NOT NULL OR xmin IS NOT NULL OR xmax IS NOT NULL ) x; count ------- 50 (1 row) SET client_min_messages to 'NOTICE'; citus-7.0.3/src/test/regress/expected/multi_single_relation_subquery.out000066400000000000000000000144131317107136600267610ustar00rootroot00000000000000-- -- MULTI_SINGLE_RELATION_SUBQUERY -- -- This test checks that we are able to run selected set of distributed SQL subqueries. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 860000; SET citus.task_executor_type TO 'task-tracker'; select number_sum, count(*) as total, avg(total_count) avg_count from (select l_suppkey, l_linestatus, sum(l_linenumber) as number_sum, count(*) as total_count from lineitem group by l_suppkey, l_linestatus) as distributed_table where number_sum >= 10 group by number_sum order by total desc, number_sum desc limit 10; number_sum | total | avg_count ------------+-------+-------------------- 10 | 136 | 2.3970588235294118 11 | 97 | 2.6082474226804124 12 | 56 | 2.8392857142857143 13 | 42 | 2.8809523809523810 14 | 21 | 3.2857142857142857 16 | 10 | 3.5000000000000000 15 | 10 | 3.3000000000000000 17 | 6 | 3.3333333333333333 18 | 3 | 4.0000000000000000 19 | 2 | 4.0000000000000000 (10 rows) -- same query above, just replace outer where clause with inner having clause select number_sum, count(*) as total, avg(total_count) avg_count from (select l_suppkey, l_linestatus, sum(l_linenumber) as number_sum, count(*) as total_count from lineitem group by l_suppkey, l_linestatus having sum(l_linenumber) >= 10) as distributed_table group by number_sum order by total desc, number_sum desc limit 10; number_sum | total | avg_count ------------+-------+-------------------- 10 | 136 | 2.3970588235294118 11 | 97 | 2.6082474226804124 12 | 56 | 2.8392857142857143 13 | 42 | 2.8809523809523810 14 | 21 | 3.2857142857142857 16 | 10 | 3.5000000000000000 15 | 10 | 3.3000000000000000 17 | 6 | 3.3333333333333333 18 | 3 | 4.0000000000000000 19 | 2 | 4.0000000000000000 (10 rows) select (l_suppkey / 100) as suppkey_bin, avg(total_count) avg_count from (select l_suppkey, sum(l_linenumber) as number_sum, count(*) as total_count from lineitem group by l_suppkey, l_linestatus) as distributed_table group by suppkey_bin order by avg_count desc limit 20; suppkey_bin | avg_count -------------+-------------------- 95 | 1.4851485148514851 90 | 1.4761904761904762 52 | 1.4680851063829787 40 | 1.4659090909090909 15 | 1.4642857142857143 75 | 1.4444444444444444 72 | 1.4375000000000000 84 | 1.4242424242424242 35 | 1.4226804123711340 64 | 1.4166666666666667 74 | 1.4117647058823529 21 | 1.4000000000000000 18 | 1.4000000000000000 26 | 1.3932584269662921 96 | 1.3913043478260870 71 | 1.3913043478260870 86 | 1.3894736842105263 55 | 1.3882352941176471 57 | 1.3875000000000000 1 | 1.3846153846153846 (20 rows) select total, avg(avg_count) as total_avg_count from (select number_sum, count(*) as total, avg(total_count) avg_count from (select l_suppkey, sum(l_linenumber) as number_sum, count(*) as total_count from lineitem where l_partkey > 100 and l_quantity > 2 and l_orderkey < 10000 group by l_suppkey) as distributed_table where number_sum >= 10 group by number_sum) as distributed_table_2 group by total order by total; total | total_avg_count -------+-------------------- 1 | 4.8000000000000000 6 | 3.0000000000000000 10 | 3.5000000000000000 27 | 2.9259259259259259 32 | 2.8125000000000000 57 | 2.4912280701754386 77 | 2.3896103896103896 (7 rows) -- Check that we support subquery even though group by clause is an expression -- and it is not referred in the target list. select avg(count) from (select l_suppkey, count(*) as count from lineitem group by (l_orderkey/4)::int, l_suppkey ) as distributed_table; avg ------------------------ 1.00083402835696413678 (1 row) -- Check that we don't support subqueries with limit. select l_suppkey, sum(suppkey_count) as total_suppkey_count from (select l_suppkey, count(*) as suppkey_count from lineitem group by l_suppkey order by l_suppkey limit 100) as distributed_table group by l_suppkey; ERROR: cannot perform distributed planning on this query DETAIL: Subqueries with limit are not supported yet -- Check that we don't support subqueries without aggregates. select rounded_tax from (select round(l_tax) as rounded_tax from lineitem group by l_tax) as distributed_table; ERROR: cannot perform distributed planning on this query DETAIL: Subqueries without aggregates are not supported yet -- Check that we support subqueries with count(distinct). select avg(different_shipment_days) from (select count(distinct l_shipdate) as different_shipment_days from lineitem group by l_partkey) as distributed_table; avg ------------------------ 1.02907126318497555956 (1 row) select avg(different_shipment_days) from (select count(distinct l_shipdate) as different_shipment_days from lineitem group by l_partkey having count(distinct l_shipdate) >= 2) as distributed_table; avg -------------------- 2.0335365853658537 (1 row) -- Check that if subquery is pulled, we don't error and run query properly. SELECT max(l_suppkey) FROM ( SELECT l_suppkey FROM ( SELECT l_suppkey, count(*) FROM lineitem WHERE l_orderkey < 20000 GROUP BY l_suppkey) z ) y; max ------ 9999 (1 row) citus-7.0.3/src/test/regress/expected/multi_size_queries.out000066400000000000000000000075111317107136600243540ustar00rootroot00000000000000-- -- MULTI_SIZE_QUERIES -- -- Test checks whether size of distributed tables can be obtained with citus_table_size. -- To find the relation size and total relation size citus_relation_size and -- citus_total_relation_size are also tested. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1390000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1390000; -- Tests with invalid relation IDs SELECT citus_table_size(1); ERROR: could not open relation with OID 1 SELECT citus_relation_size(1); ERROR: could not open relation with OID 1 SELECT citus_total_relation_size(1); ERROR: could not open relation with OID 1 -- Tests with non-distributed table CREATE TABLE non_distributed_table (x int); SELECT citus_table_size('non_distributed_table'); ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed SELECT citus_relation_size('non_distributed_table'); ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed SELECT citus_total_relation_size('non_distributed_table'); ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed DROP TABLE non_distributed_table; -- Tests on distributed table with replication factor > 1 SELECT citus_table_size('lineitem_hash_part'); ERROR: cannot calculate the size because replication factor is greater than 1 SELECT citus_relation_size('lineitem_hash_part'); ERROR: cannot calculate the size because replication factor is greater than 1 SELECT citus_total_relation_size('lineitem_hash_part'); ERROR: cannot calculate the size because replication factor is greater than 1 VACUUM (FULL) customer_copy_hash; -- Tests on distributed tables with streaming replication. SELECT citus_table_size('customer_copy_hash'); citus_table_size ------------------ 548864 (1 row) SELECT citus_relation_size('customer_copy_hash'); citus_relation_size --------------------- 548864 (1 row) SELECT citus_total_relation_size('customer_copy_hash'); citus_total_relation_size --------------------------- 1597440 (1 row) CREATE INDEX index_1 on customer_copy_hash(c_custkey); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' VACUUM (FULL) customer_copy_hash; -- Tests on distributed table with index. SELECT citus_table_size('customer_copy_hash'); citus_table_size ------------------ 548864 (1 row) SELECT citus_relation_size('customer_copy_hash'); citus_relation_size --------------------- 548864 (1 row) SELECT citus_total_relation_size('customer_copy_hash'); citus_total_relation_size --------------------------- 2646016 (1 row) -- Tests on reference table VACUUM (FULL) supplier; SELECT citus_table_size('supplier'); citus_table_size ------------------ 376832 (1 row) SELECT citus_relation_size('supplier'); citus_relation_size --------------------- 376832 (1 row) SELECT citus_total_relation_size('supplier'); citus_total_relation_size --------------------------- 376832 (1 row) CREATE INDEX index_2 on supplier(s_suppkey); VACUUM (FULL) supplier; SELECT citus_table_size('supplier'); citus_table_size ------------------ 376832 (1 row) SELECT citus_relation_size('supplier'); citus_relation_size --------------------- 376832 (1 row) SELECT citus_total_relation_size('supplier'); citus_total_relation_size --------------------------- 458752 (1 row) -- Test inside the transaction BEGIN; ALTER TABLE supplier ALTER COLUMN s_suppkey SET NOT NULL; select citus_table_size('supplier'); ERROR: citus size functions cannot be called in transaction blocks which contain multi-shard data modifications END; DROP INDEX index_1; DROP INDEX index_2; citus-7.0.3/src/test/regress/expected/multi_sql_function.out000066400000000000000000000205341317107136600243510ustar00rootroot00000000000000-- -- MULTI_SQL_FUNCTION -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000; CREATE FUNCTION sql_test_no_1() RETURNS bigint AS ' SELECT count(*) FROM orders; ' LANGUAGE SQL; CREATE FUNCTION sql_test_no_2() RETURNS bigint AS ' SELECT count(*) FROM orders, lineitem WHERE o_orderkey = l_orderkey; ' LANGUAGE SQL; CREATE FUNCTION sql_test_no_3() RETURNS bigint AS ' SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey; ' LANGUAGE SQL; CREATE FUNCTION sql_test_no_4() RETURNS bigint AS ' SELECT count(*) FROM orders, customer, lineitem WHERE o_custkey = c_custkey AND o_orderkey = l_orderkey; ' LANGUAGE SQL; SET citus.task_executor_type TO 'task-tracker'; SET client_min_messages TO INFO; -- now, run plain SQL functions SELECT sql_test_no_1(); sql_test_no_1 --------------- 2984 (1 row) SELECT sql_test_no_2(); sql_test_no_2 --------------- 11998 (1 row) SELECT sql_test_no_3(); sql_test_no_3 --------------- 1955 (1 row) SELECT sql_test_no_4(); sql_test_no_4 --------------- 7804 (1 row) -- run the tests which do not require re-partition -- with real-time executor SET citus.task_executor_type TO 'real-time'; -- now, run plain SQL functions SELECT sql_test_no_1(); sql_test_no_1 --------------- 2984 (1 row) SELECT sql_test_no_2(); sql_test_no_2 --------------- 11998 (1 row) -- test router executor parameterized sql functions CREATE TABLE temp_table ( key int, value int ); SELECT master_create_distributed_table('temp_table','key','hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('temp_table',4,1); master_create_worker_shards ----------------------------- (1 row) CREATE FUNCTION no_parameter_insert_sql() RETURNS void AS $$ INSERT INTO temp_table (key) VALUES (0); $$ LANGUAGE SQL; -- execute 6 times SELECT no_parameter_insert_sql(); no_parameter_insert_sql ------------------------- (1 row) SELECT no_parameter_insert_sql(); no_parameter_insert_sql ------------------------- (1 row) SELECT no_parameter_insert_sql(); no_parameter_insert_sql ------------------------- (1 row) SELECT no_parameter_insert_sql(); no_parameter_insert_sql ------------------------- (1 row) SELECT no_parameter_insert_sql(); no_parameter_insert_sql ------------------------- (1 row) SELECT no_parameter_insert_sql(); no_parameter_insert_sql ------------------------- (1 row) CREATE FUNCTION non_partition_parameter_insert_sql(int) RETURNS void AS $$ INSERT INTO temp_table (key, value) VALUES (0, $1); $$ LANGUAGE SQL; -- execute 6 times SELECT non_partition_parameter_insert_sql(10); non_partition_parameter_insert_sql ------------------------------------ (1 row) SELECT non_partition_parameter_insert_sql(20); non_partition_parameter_insert_sql ------------------------------------ (1 row) SELECT non_partition_parameter_insert_sql(30); non_partition_parameter_insert_sql ------------------------------------ (1 row) SELECT non_partition_parameter_insert_sql(40); non_partition_parameter_insert_sql ------------------------------------ (1 row) SELECT non_partition_parameter_insert_sql(50); non_partition_parameter_insert_sql ------------------------------------ (1 row) SELECT non_partition_parameter_insert_sql(60); non_partition_parameter_insert_sql ------------------------------------ (1 row) -- check inserted values SELECT * FROM temp_table ORDER BY key, value; key | value -----+------- 0 | 10 0 | 20 0 | 30 0 | 40 0 | 50 0 | 60 0 | 0 | 0 | 0 | 0 | 0 | (12 rows) -- check updates CREATE FUNCTION non_partition_parameter_update_sql(int, int) RETURNS void AS $$ UPDATE temp_table SET value = $2 WHERE key = 0 AND value = $1; $$ LANGUAGE SQL; -- execute 6 times SELECT non_partition_parameter_update_sql(10, 12); non_partition_parameter_update_sql ------------------------------------ (1 row) SELECT non_partition_parameter_update_sql(20, 22); non_partition_parameter_update_sql ------------------------------------ (1 row) SELECT non_partition_parameter_update_sql(30, 32); non_partition_parameter_update_sql ------------------------------------ (1 row) SELECT non_partition_parameter_update_sql(40, 42); non_partition_parameter_update_sql ------------------------------------ (1 row) SELECT non_partition_parameter_update_sql(50, 52); non_partition_parameter_update_sql ------------------------------------ (1 row) SELECT non_partition_parameter_update_sql(60, 62); non_partition_parameter_update_sql ------------------------------------ (1 row) -- check after updates SELECT * FROM temp_table ORDER BY key, value; key | value -----+------- 0 | 12 0 | 22 0 | 32 0 | 42 0 | 52 0 | 62 0 | 0 | 0 | 0 | 0 | 0 | (12 rows) -- check deletes CREATE FUNCTION non_partition_parameter_delete_sql(int) RETURNS void AS $$ DELETE FROM temp_table WHERE key = 0 AND value = $1; $$ LANGUAGE SQL; -- execute 6 times to trigger prepared statement usage SELECT non_partition_parameter_delete_sql(12); non_partition_parameter_delete_sql ------------------------------------ (1 row) SELECT non_partition_parameter_delete_sql(22); non_partition_parameter_delete_sql ------------------------------------ (1 row) SELECT non_partition_parameter_delete_sql(32); non_partition_parameter_delete_sql ------------------------------------ (1 row) SELECT non_partition_parameter_delete_sql(42); non_partition_parameter_delete_sql ------------------------------------ (1 row) SELECT non_partition_parameter_delete_sql(52); non_partition_parameter_delete_sql ------------------------------------ (1 row) SELECT non_partition_parameter_delete_sql(62); non_partition_parameter_delete_sql ------------------------------------ (1 row) -- check after deletes SELECT * FROM temp_table ORDER BY key, value; key | value -----+------- 0 | 0 | 0 | 0 | 0 | 0 | (6 rows) -- test running parameterized SQL function CREATE TABLE test_parameterized_sql(id integer, org_id integer); select create_distributed_table('test_parameterized_sql','org_id'); create_distributed_table -------------------------- (1 row) CREATE OR REPLACE FUNCTION test_parameterized_sql_function(org_id_val integer) RETURNS TABLE (a bigint) AS $$ SELECT count(*) AS count_val from test_parameterized_sql where org_id = org_id_val; $$ LANGUAGE SQL STABLE; CREATE OR REPLACE FUNCTION test_parameterized_sql_function_in_subquery_where(org_id_val integer) RETURNS TABLE (a bigint) AS $$ SELECT count(*) AS count_val from test_parameterized_sql as t1 where org_id IN (SELECT org_id FROM test_parameterized_sql as t2 WHERE t2.org_id = t1.org_id AND org_id = org_id_val); $$ LANGUAGE SQL STABLE; INSERT INTO test_parameterized_sql VALUES(1, 1); -- all of them should fail SELECT * FROM test_parameterized_sql_function(1); ERROR: cannot perform distributed planning on this query because parameterized queries for SQL functions referencing distributed tables are not supported HINT: Consider using PL/pgSQL functions instead. SELECT test_parameterized_sql_function(1); ERROR: could not create distributed plan DETAIL: Possibly this is caused by the use of parameters in SQL functions, which is not supported in Citus. HINT: Consider using PL/pgSQL functions instead. CONTEXT: SQL function "test_parameterized_sql_function" statement 1 SELECT test_parameterized_sql_function_in_subquery_where(1); ERROR: could not create distributed plan DETAIL: Possibly this is caused by the use of parameters in SQL functions, which is not supported in Citus. HINT: Consider using PL/pgSQL functions instead. CONTEXT: SQL function "test_parameterized_sql_function_in_subquery_where" statement 1 DROP TABLE temp_table; DROP TABLE test_parameterized_sql; -- clean-up functions DROP FUNCTION sql_test_no_1(); DROP FUNCTION sql_test_no_2(); DROP FUNCTION sql_test_no_3(); DROP FUNCTION sql_test_no_4(); DROP FUNCTION no_parameter_insert_sql(); DROP FUNCTION non_partition_parameter_insert_sql(int); DROP FUNCTION non_partition_parameter_update_sql(int, int); DROP FUNCTION non_partition_parameter_delete_sql(int); DROP FUNCTION test_parameterized_sql_function(int); DROP FUNCTION test_parameterized_sql_function_in_subquery_where(int); citus-7.0.3/src/test/regress/expected/multi_subquery.out000066400000000000000000000360241317107136600235250ustar00rootroot00000000000000-- -- MULTI_SUBQUERY -- -- no need to set shardid sequence given that we're not creating any shards SET citus.enable_router_execution TO FALSE; -- Check that we error out if shard min/max values are not exactly same. SELECT avg(unit_price) FROM (SELECT l_orderkey, avg(o_totalprice) AS unit_price FROM lineitem_subquery, orders_subquery WHERE l_orderkey = o_orderkey GROUP BY l_orderkey) AS unit_prices; ERROR: cannot push down this subquery DETAIL: Shards of relations in subquery need to have 1-to-1 shard partitioning -- Update metadata in order to make all shards equal -- note that the table is created on multi_insert_select_create_table.sql UPDATE pg_dist_shard SET shardmaxvalue = '14947' WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'orders_subquery'::regclass ORDER BY shardid DESC LIMIT 1); -- If group by is not on partition column then we error out from single table -- repartition code path SELECT avg(order_count) FROM (SELECT l_suppkey, count(*) AS order_count FROM lineitem_subquery GROUP BY l_suppkey) AS order_counts; ERROR: cannot use real time executor with repartition jobs HINT: Set citus.task_executor_type to "task-tracker". -- Check that we error out if join is not on partition columns. SELECT avg(unit_price) FROM (SELECT l_orderkey, avg(o_totalprice / l_quantity) AS unit_price FROM lineitem_subquery, orders_subquery GROUP BY l_orderkey) AS unit_prices; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. SELECT avg(unit_price) FROM (SELECT l_orderkey, avg(o_totalprice / l_quantity) AS unit_price FROM lineitem_subquery, orders_subquery WHERE l_orderkey = o_custkey GROUP BY l_orderkey) AS unit_prices; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- Check that we error out if there is non relation subqueries SELECT count(*) FROM ( (SELECT l_orderkey FROM lineitem_subquery) UNION ALL (SELECT 1::bigint) ) b; ERROR: cannot push down this subquery DETAIL: Subqueries without relations are unsupported -- Check that we error out if queries in union do not include partition columns. SELECT count(*) FROM ( (SELECT l_orderkey FROM lineitem_subquery) UNION (SELECT l_partkey FROM lineitem_subquery) ) b; ERROR: cannot pushdown the subquery since all leaves of the UNION does not include partition key at the same position DETAIL: Each leaf query of the UNION should return partition key at the same position on its target list. -- Check that we run union queries if partition column is selected. SELECT count(*) FROM ( (SELECT l_orderkey FROM lineitem_subquery) UNION (SELECT l_orderkey FROM lineitem_subquery) ) b; count ------- 2985 (1 row) -- Check that we error out if inner query has Limit but subquery_pushdown is not set SELECT avg(o_totalprice/l_quantity) FROM (SELECT l_orderkey, l_quantity FROM lineitem_subquery ORDER BY l_quantity LIMIT 10 ) lineitem_quantities JOIN LATERAL (SELECT o_totalprice FROM orders_subquery WHERE lineitem_quantities.l_orderkey = o_orderkey) orders_price ON true; ERROR: cannot push down this subquery DETAIL: Limit in subquery is currently unsupported -- Limit is only supported when subquery_pushdown is set -- Check that we error out if inner query has limit but outer query has not. SET citus.subquery_pushdown to ON; SELECT avg(o_totalprice/l_quantity) FROM (SELECT l_orderkey, l_quantity FROM lineitem_subquery ORDER BY l_quantity LIMIT 10 ) lineitem_quantities JOIN LATERAL (SELECT o_totalprice FROM orders_subquery WHERE lineitem_quantities.l_orderkey = o_orderkey) orders_price ON true; ERROR: cannot push down this subquery DETAIL: Limit in subquery without limit in the outermost query is unsupported -- reset the flag for next query SET citus.subquery_pushdown to OFF; -- Check that we error out if the outermost query is a distinct clause. SELECT count(DISTINCT a) FROM ( SELECT count(*) a FROM lineitem_subquery GROUP BY l_orderkey ) z; ERROR: cannot push down this subquery DETAIL: distinct in the outermost query is unsupported -- Check supported subquery types. SELECT o_custkey, sum(order_count) as total_order_count FROM (SELECT o_orderkey, o_custkey, count(*) AS order_count FROM orders_subquery WHERE o_orderkey > 0 AND o_orderkey < 12000 GROUP BY o_orderkey, o_custkey) AS order_counts GROUP BY o_custkey ORDER BY total_order_count DESC, o_custkey ASC LIMIT 10; o_custkey | total_order_count -----------+------------------- 1462 | 9 619 | 8 643 | 8 1030 | 8 1486 | 8 79 | 7 304 | 7 319 | 7 343 | 7 448 | 7 (10 rows) SELECT avg(unit_price) FROM (SELECT l_orderkey, avg(o_totalprice / l_quantity) AS unit_price FROM lineitem_subquery, orders_subquery WHERE l_orderkey = o_orderkey GROUP BY l_orderkey) AS unit_prices WHERE unit_price > 1000 AND unit_price < 10000; avg ----------------------- 4968.2889885208475549 (1 row) -- Check that if subquery is pulled, we don't error and run query properly. SELECT count(*) FROM ( SELECT l_orderkey FROM ( (SELECT l_orderkey FROM lineitem_subquery) UNION (SELECT l_orderkey FROM lineitem_subquery) ) a WHERE l_orderkey = 1 ) b; count ------- 1 (1 row) SELECT count(*) FROM ( SELECT * FROM ( (SELECT * FROM lineitem_subquery) UNION (SELECT * FROM lineitem_subquery) ) a WHERE l_orderkey = 1 ) b; count ------- 6 (1 row) SELECT max(l_orderkey) FROM ( SELECT l_orderkey FROM ( SELECT l_orderkey FROM lineitem_subquery WHERE l_orderkey < 20000 GROUP BY l_orderkey ) z ) y; max ------- 14947 (1 row) -- Add one more shard to one relation, then test if we error out because of different -- shard counts for joining relations. SELECT master_create_empty_shard('orders_subquery') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 15000, shardmaxvalue = 20000 WHERE shardid = :new_shard_id; SELECT avg(unit_price) FROM (SELECT l_orderkey, avg(o_totalprice / l_quantity) AS unit_price FROM lineitem_subquery, orders_subquery WHERE l_orderkey = o_orderkey GROUP BY l_orderkey) AS unit_prices; ERROR: cannot push down this subquery DETAIL: Shards of relations in subquery need to have 1-to-1 shard partitioning -- Check that we can prune shards in subqueries with VARCHAR partition columns CREATE TABLE subquery_pruning_varchar_test_table ( a varchar, b int ); SELECT master_create_distributed_table('subquery_pruning_varchar_test_table', 'a', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('subquery_pruning_varchar_test_table', 4, 1); master_create_worker_shards ----------------------------- (1 row) SET client_min_messages TO DEBUG2; SELECT * FROM (SELECT count(*) FROM subquery_pruning_varchar_test_table WHERE a = 'onder' GROUP BY a) AS foo; DEBUG: Skipping the target shard interval 570033 because SELECT query is pruned away for the interval DEBUG: Skipping the target shard interval 570034 because SELECT query is pruned away for the interval DEBUG: Skipping the target shard interval 570036 because SELECT query is pruned away for the interval count ------- (0 rows) SELECT * FROM (SELECT count(*) FROM subquery_pruning_varchar_test_table WHERE 'eren' = a GROUP BY a) AS foo; DEBUG: Skipping the target shard interval 570033 because SELECT query is pruned away for the interval DEBUG: Skipping the target shard interval 570035 because SELECT query is pruned away for the interval DEBUG: Skipping the target shard interval 570036 because SELECT query is pruned away for the interval count ------- (0 rows) SET client_min_messages TO NOTICE; -- test subquery join on VARCHAR partition column SELECT * FROM (SELECT a_inner AS a FROM (SELECT subquery_pruning_varchar_test_table.a AS a_inner FROM subquery_pruning_varchar_test_table GROUP BY subquery_pruning_varchar_test_table.a HAVING count(subquery_pruning_varchar_test_table.a) < 3) AS f1, (SELECT subquery_pruning_varchar_test_table.a FROM subquery_pruning_varchar_test_table GROUP BY subquery_pruning_varchar_test_table.a HAVING sum(coalesce(subquery_pruning_varchar_test_table.b,0)) > 20.0) AS f2 WHERE f1.a_inner = f2.a GROUP BY a_inner) AS foo; a --- (0 rows) DROP TABLE subquery_pruning_varchar_test_table; -- Simple join subquery pushdown SELECT avg(array_length(events, 1)) AS event_average FROM (SELECT tenant_id, user_id, array_agg(event_type ORDER BY event_time) AS events FROM (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, event_type, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type IN ('click', 'submit', 'pay')) AS subquery GROUP BY tenant_id, user_id) AS subquery; event_average -------------------- 3.6666666666666667 (1 row) -- Union and left join subquery pushdown SELECT avg(array_length(events, 1)) AS event_average, hasdone FROM (SELECT subquery_1.tenant_id, subquery_1.user_id, array_agg(event ORDER BY event_time) AS events, COALESCE(hasdone, 'Has not done paying') AS hasdone FROM ( (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id) as composite_id, 'action=>1'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'click') UNION (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id) as composite_id, 'action=>2'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'submit') ) AS subquery_1 LEFT JOIN (SELECT DISTINCT ON ((composite_id).tenant_id, (composite_id).user_id) composite_id, (composite_id).tenant_id, (composite_id).user_id, 'Has done paying'::TEXT AS hasdone FROM events WHERE events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'pay') AS subquery_2 ON subquery_1.composite_id = subquery_2.composite_id GROUP BY subquery_1.tenant_id, subquery_1.user_id, hasdone) AS subquery_top GROUP BY hasdone ORDER BY event_average DESC; event_average | hasdone --------------------+--------------------- 4.0000000000000000 | Has not done paying 2.5000000000000000 | Has done paying (2 rows) -- Union, left join and having subquery pushdown SELECT avg(array_length(events, 1)) AS event_average, count_pay FROM ( SELECT subquery_1.tenant_id, subquery_1.user_id, array_agg(event ORDER BY event_time) AS events, COALESCE(count_pay, 0) AS count_pay FROM ( (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id), 'action=>1'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'click') UNION (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id), 'action=>2'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'submit') ) AS subquery_1 LEFT JOIN (SELECT (composite_id).tenant_id, (composite_id).user_id, composite_id, COUNT(*) AS count_pay FROM events WHERE events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'pay' GROUP BY composite_id HAVING COUNT(*) > 2) AS subquery_2 ON subquery_1.composite_id = subquery_2.composite_id GROUP BY subquery_1.tenant_id, subquery_1.user_id, count_pay) AS subquery_top WHERE array_ndims(events) > 0 GROUP BY count_pay ORDER BY count_pay; event_average | count_pay --------------------+----------- 3.0000000000000000 | 0 (1 row) -- Lateral join subquery pushdown -- set subquery_pushdown since there is limit in the query SET citus.subquery_pushdown to ON; SELECT tenant_id, user_id, user_lastseen, event_array FROM (SELECT tenant_id, user_id, max(lastseen) as user_lastseen, array_agg(event_type ORDER BY event_time) AS event_array FROM (SELECT (composite_id).tenant_id, (composite_id).user_id, composite_id, lastseen FROM users WHERE composite_id >= '(1, -9223372036854775808)'::user_composite_type AND composite_id <= '(1, 9223372036854775807)'::user_composite_type ORDER BY lastseen DESC LIMIT 10 ) AS subquery_top LEFT JOIN LATERAL (SELECT event_type, event_time FROM events WHERE (composite_id) = subquery_top.composite_id ORDER BY event_time DESC LIMIT 99) AS subquery_lateral ON true GROUP BY tenant_id, user_id ) AS shard_union ORDER BY user_lastseen DESC LIMIT 10; tenant_id | user_id | user_lastseen | event_array -----------+---------+---------------+---------------------------- 1 | 1003 | 1472807315 | {click,click,click,submit} 1 | 1002 | 1472807215 | {click,click,submit,pay} 1 | 1001 | 1472807115 | {click,submit,pay} (3 rows) -- cleanup the tables and the type & functions -- also set the min messages to WARNING to skip -- CASCADE NOTICE messagez SET client_min_messages TO WARNING; DROP TABLE users, events; SELECT run_command_on_master_and_workers($f$ DROP TYPE user_composite_type CASCADE; $f$); run_command_on_master_and_workers ----------------------------------- (1 row) -- createed in multi_behavioral_analytics_create_table DROP FUNCTION run_command_on_master_and_workers(p_sql text); SET client_min_messages TO DEFAULT; SET citus.subquery_pushdown to OFF; SET citus.enable_router_execution TO 'true'; citus-7.0.3/src/test/regress/expected/multi_subquery_behavioral_analytics.out000066400000000000000000001643301317107136600277720ustar00rootroot00000000000000-- -- multi subquery behavioral analytics queries aims to expand existing subquery pushdown -- regression tests to cover more cases -- the tables that are used depends to multi_insert_select_behavioral_analytics_create_table.sql -- --- We don't need shard id sequence here given that we're not creating any shards, so not writing it at all -- The following line is intended to force Citus to NOT use router planner for the tests in this -- file. The motivation for doing this is to make sure that single-task queries can be planned -- by non-router code-paths. Thus, this flag should NOT be used in production. Otherwise, the actual -- router queries would fail. SET citus.enable_router_execution TO FALSE; ------------------------------------ -- Vanilla funnel query ------------------------------------ SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q ORDER BY 2 DESC, 1; user_id | array_length ---------+-------------- 13 | 172 12 | 121 23 | 115 10 | 114 20 | 90 (5 rows) ------------------------------------ -- Funnel grouped by whether or not a user has done an event -- This has multiple subqueries joinin at the top level ------------------------------------ SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event), hasdone_event FROM ( SELECT t1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(hasdone_event, 'Has not done event') AS hasdone_event FROM ( ( SELECT u.user_id, 'step=>1'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) UNION ( SELECT u.user_id, 'step=>2'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (103, 104, 105) ) ) t1 LEFT JOIN ( SELECT DISTINCT user_id, 'Has done event'::TEXT AS hasdone_event FROM events_table AS e WHERE e.user_id >= 10 AND e.user_id <= 25 AND e.event_type IN (106, 107, 108) ) t2 ON (t1.user_id = t2.user_id) GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event ORDER BY user_id; user_id | sum | length | hasdone_event ---------+-----+--------+-------------------- 10 | 1 | 18 | Has not done event 12 | 1 | 14 | Has done event 13 | 2 | 18 | Has not done event 15 | 1 | 18 | Has not done event 17 | 1 | 18 | Has not done event 19 | 1 | 14 | Has done event 20 | 2 | 18 | Has not done event 23 | 1 | 18 | Has not done event (8 rows) -- same query but multiple joins are one level below, returns count of row instead of actual rows SELECT count(*) FROM ( SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event), hasdone_event FROM ( SELECT t1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(hasdone_event, 'Has not done event') AS hasdone_event FROM ( ( SELECT u.user_id, 'step=>1'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) UNION ( SELECT u.user_id, 'step=>2'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (103, 104, 105) ) ) t1 LEFT JOIN ( SELECT DISTINCT user_id, 'Has done event'::TEXT AS hasdone_event FROM events_table AS e WHERE e.user_id >= 10 AND e.user_id <= 25 AND e.event_type IN (106, 107, 108) ) t2 ON (t1.user_id = t2.user_id) GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event ORDER BY user_id) u; count ------- 8 (1 row) -- Same queries written without unions SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event), hasdone_event FROM ( SELECT t1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(hasdone_event, 'Has not done event') AS hasdone_event FROM ( SELECT u.user_id, CASE WHEN e.event_type IN (100, 101, 102) THEN 'step=>1'::text else 'step==>2'::text END AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102, 103, 104, 105) GROUP BY 1,2,3 ) t1 LEFT JOIN ( SELECT DISTINCT user_id, 'Has done event'::TEXT AS hasdone_event FROM events_table AS e WHERE e.user_id >= 10 AND e.user_id <= 25 AND e.event_type IN (106, 107, 108) ) t2 ON (t1.user_id = t2.user_id) GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event ORDER BY user_id; user_id | sum | length | hasdone_event ---------+-----+--------+-------------------- 10 | 1 | 18 | Has not done event 12 | 1 | 14 | Has done event 13 | 2 | 18 | Has not done event 15 | 1 | 18 | Has not done event 17 | 1 | 18 | Has not done event 19 | 1 | 14 | Has done event 20 | 2 | 18 | Has not done event 23 | 1 | 18 | Has not done event (8 rows) -- same query but multiple joins are one level below, returns count of row instead of actual rows SELECT count(*) FROM ( SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event), hasdone_event FROM ( SELECT t1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(hasdone_event, 'Has not done event') AS hasdone_event FROM ( SELECT u.user_id, CASE WHEN e.event_type in (100, 101, 102) then 'step=>1'::text else 'step==>2'::text END AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102, 103, 104, 105) GROUP BY 1,2,3 ) t1 LEFT JOIN ( SELECT DISTINCT user_id, 'Has done event'::TEXT AS hasdone_event FROM events_table AS e WHERE e.user_id >= 10 AND e.user_id <= 25 AND e.event_type IN (106, 107, 108) ) t2 ON (t1.user_id = t2.user_id) GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event ORDER BY user_id) u; count ------- 8 (1 row) ------------------------------------ -- Funnel, grouped by the number of times a user has done an event ------------------------------------ SELECT user_id, avg(array_length(events_table, 1)) AS event_average, count_pay FROM ( SELECT subquery_1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(count_pay, 0) AS count_pay FROM ( (SELECT users_table.user_id, 'action=>1'AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 10 AND events_table.event_type < 12 ) UNION (SELECT users_table.user_id, 'action=>2'AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 12 AND events_table.event_type < 14 ) ) AS subquery_1 LEFT JOIN (SELECT user_id, COUNT(*) AS count_pay FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 15 AND users_table.value_1 < 17 GROUP BY user_id HAVING COUNT(*) > 1) AS subquery_2 ON subquery_1.user_id = subquery_2.user_id GROUP BY subquery_1.user_id, count_pay) AS subquery_top WHERE array_ndims(events_table) > 0 GROUP BY count_pay, user_id ORDER BY event_average DESC, count_pay DESC, user_id DESC; user_id | event_average | count_pay ---------+------------------------+----------- 69 | 1.00000000000000000000 | 0 65 | 1.00000000000000000000 | 0 58 | 1.00000000000000000000 | 0 49 | 1.00000000000000000000 | 0 40 | 1.00000000000000000000 | 0 32 | 1.00000000000000000000 | 0 29 | 1.00000000000000000000 | 0 18 | 1.00000000000000000000 | 0 (8 rows) SELECT user_id, avg(array_length(events_table, 1)) AS event_average, count_pay FROM ( SELECT subquery_1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(count_pay, 0) AS count_pay FROM ( (SELECT users_table.user_id, 'action=>1'AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 10 AND events_table.event_type < 12 ) UNION (SELECT users_table.user_id, 'action=>2'AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 12 AND events_table.event_type < 14 ) ) AS subquery_1 LEFT JOIN (SELECT user_id, COUNT(*) AS count_pay FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 15 AND users_table.value_1 < 17 GROUP BY user_id HAVING COUNT(*) > 1) AS subquery_2 ON subquery_1.user_id = subquery_2.user_id GROUP BY subquery_1.user_id, count_pay) AS subquery_top WHERE array_ndims(events_table) > 0 GROUP BY count_pay, user_id HAVING avg(array_length(events_table, 1)) > 0 ORDER BY event_average DESC, count_pay DESC, user_id DESC; user_id | event_average | count_pay ---------+------------------------+----------- 69 | 1.00000000000000000000 | 0 65 | 1.00000000000000000000 | 0 58 | 1.00000000000000000000 | 0 49 | 1.00000000000000000000 | 0 40 | 1.00000000000000000000 | 0 32 | 1.00000000000000000000 | 0 29 | 1.00000000000000000000 | 0 18 | 1.00000000000000000000 | 0 (8 rows) -- Same queries rewritten without using unions SELECT user_id, avg(array_length(events_table, 1)) AS event_average, count_pay FROM ( SELECT subquery_1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(count_pay, 0) AS count_pay FROM ( SELECT users_table.user_id, CASE WHEN events_table.event_type > 10 AND events_table.event_type < 12 THEN 'action=>1' ELSE 'action=>2' END AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND (events_table.event_type > 10 AND events_table.event_type < 12 OR events_table.event_type > 12 AND events_table.event_type < 14) GROUP BY 1, 2, 3 ) AS subquery_1 LEFT JOIN (SELECT user_id, COUNT(*) AS count_pay FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 15 AND users_table.value_1 < 17 GROUP BY user_id HAVING COUNT(*) > 1) AS subquery_2 ON subquery_1.user_id = subquery_2.user_id GROUP BY subquery_1.user_id, count_pay) AS subquery_top WHERE array_ndims(events_table) > 0 GROUP BY count_pay, user_id ORDER BY event_average DESC, count_pay DESC, user_id DESC; user_id | event_average | count_pay ---------+------------------------+----------- 69 | 1.00000000000000000000 | 0 65 | 1.00000000000000000000 | 0 58 | 1.00000000000000000000 | 0 49 | 1.00000000000000000000 | 0 40 | 1.00000000000000000000 | 0 32 | 1.00000000000000000000 | 0 29 | 1.00000000000000000000 | 0 18 | 1.00000000000000000000 | 0 (8 rows) SELECT user_id, avg(array_length(events_table, 1)) AS event_average, count_pay FROM ( SELECT subquery_1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(count_pay, 0) AS count_pay FROM ( SELECT users_table.user_id, CASE WHEN events_table.event_type > 10 AND events_table.event_type < 12 THEN 'action=>1' ELSE 'action=>2' END AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND (events_table.event_type > 10 AND events_table.event_type < 12 OR events_table.event_type > 12 AND events_table.event_type < 14) GROUP BY 1, 2, 3 ) AS subquery_1 LEFT JOIN (SELECT user_id, COUNT(*) AS count_pay FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 15 AND users_table.value_1 < 17 GROUP BY user_id HAVING COUNT(*) > 1) AS subquery_2 ON subquery_1.user_id = subquery_2.user_id GROUP BY subquery_1.user_id, count_pay) AS subquery_top WHERE array_ndims(events_table) > 0 GROUP BY count_pay, user_id HAVING avg(array_length(events_table, 1)) > 0 ORDER BY event_average DESC, count_pay DESC, user_id DESC; user_id | event_average | count_pay ---------+------------------------+----------- 69 | 1.00000000000000000000 | 0 65 | 1.00000000000000000000 | 0 58 | 1.00000000000000000000 | 0 49 | 1.00000000000000000000 | 0 40 | 1.00000000000000000000 | 0 32 | 1.00000000000000000000 | 0 29 | 1.00000000000000000000 | 0 18 | 1.00000000000000000000 | 0 (8 rows) ------------------------------------ -- Most recently seen users_table events_table ------------------------------------ -- Note that we don't use ORDER BY/LIMIT yet ------------------------------------ SELECT user_id, user_lastseen, array_length(event_array, 1) FROM ( SELECT user_id, max(u.time) as user_lastseen, array_agg(event_type ORDER BY u.time) AS event_array FROM ( SELECT user_id, time FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 10 AND users_table.value_1 < 12 ) u LEFT JOIN LATERAL ( SELECT event_type, time FROM events_table WHERE user_id = u.user_id AND events_table.event_type > 10 AND events_table.event_type < 12 ) t ON true GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC, user_id; user_id | user_lastseen | array_length ---------+---------------------------------+-------------- 12 | Sun Jan 19 01:49:20.372688 2014 | 1 20 | Sat Jan 18 14:25:31.817903 2014 | 1 42 | Thu Jan 16 07:08:02.651966 2014 | 1 56 | Tue Jan 14 12:11:47.27375 2014 | 1 57 | Mon Jan 13 14:53:50.494836 2014 | 1 65 | Sun Jan 12 03:14:26.810597 2014 | 1 (6 rows) ------------------------------------ -- Count the number of distinct users_table who are in segment X and Y and Z ------------------------------------ SELECT user_id FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 10 AND value_1 <= 20) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 30 AND value_1 <= 40) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 50 AND value_1 <= 60) GROUP BY user_id ORDER BY user_id DESC LIMIT 5; user_id --------- 93 90 88 87 84 (5 rows) ------------------------------------ -- Find customers who have done X, and satisfy other customer specific criteria ------------------------------------ SELECT user_id, value_2 FROM users_table WHERE value_1 > 101 AND value_1 < 110 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 101 AND event_type < 110 AND value_3 > 100 AND user_id = users_table.user_id) ORDER BY 2 DESC, 1 DESC LIMIT 5; user_id | value_2 ---------+--------- 95 | 951 4 | 934 2 | 908 90 | 900 49 | 847 (5 rows) ------------------------------------ -- Customers who haven’t done X, and satisfy other customer specific criteria ------------------------------------ SELECT user_id, value_2 FROM users_table WHERE value_1 = 101 AND value_2 >= 5 AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type=101 AND value_3 > 100 AND user_id = users_table.user_id) ORDER BY 1 DESC, 2 DESC LIMIT 3; user_id | value_2 ---------+--------- 58 | 585 51 | 1000 48 | 861 (3 rows) ------------------------------------ -- Customers who have done X and Y, and satisfy other customer specific criteria ------------------------------------ SELECT user_id, sum(value_2) as cnt FROM users_table WHERE value_1 > 100 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type != 100 AND value_3 > 100 AND user_id = users_table.user_id) AND EXISTS (SELECT user_id FROM events_table WHERE event_type = 101 AND value_3 > 100 AND user_id = users_table.user_id) GROUP BY user_id ORDER BY cnt DESC, user_id DESC LIMIT 5; user_id | cnt ---------+------- 49 | 48606 69 | 46524 86 | 46163 80 | 45995 35 | 45437 (5 rows) ------------------------------------ -- Customers who have done X and haven’t done Y, and satisfy other customer specific criteria ------------------------------------ SELECT user_id, value_2 FROM users_table WHERE value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type <= 300 AND value_3 > 100 AND user_id = users_table.user_id) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 300 AND event_type <= 350 AND value_3 > 100 AND user_id = users_table.user_id) ORDER BY 2 DESC, 1 DESC LIMIT 4; user_id | value_2 ---------+--------- 8 | 996 96 | 995 8 | 995 96 | 989 (4 rows) ------------------------------------ -- Customers who have done X more than 2 times, and satisfy other customer specific criteria ------------------------------------ SELECT user_id, avg(value_2) FROM users_table WHERE value_1 > 100 AND value_1 < 124 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type < 124 AND value_3 > 100 AND user_id = users_table.user_id GROUP BY user_id HAVING Count(*) > 2) GROUP BY user_id ORDER BY 1 DESC, 2 DESC LIMIT 5; user_id | avg ---------+---------------------- 99 | 571.6666666666666667 98 | 758.0000000000000000 96 | 459.6666666666666667 90 | 453.3333333333333333 89 | 215.0000000000000000 (5 rows) ------------------------------------ -- Find me all users_table who logged in more than once ------------------------------------ SELECT user_id, value_1 from ( SELECT user_id, value_1 From users_table WHERE value_2 > 100 and user_id = 15 GROUP BY value_1, user_id HAVING count(*) > 1 ) AS a ORDER BY user_id ASC, value_1 ASC; user_id | value_1 ---------+--------- 15 | 212 15 | 230 15 | 417 15 | 490 15 | 529 15 | 926 (6 rows) -- same query with additional filter to make it not router plannable SELECT user_id, value_1 from ( SELECT user_id, value_1 From users_table WHERE value_2 > 100 and (user_id = 15 OR user_id = 16) GROUP BY value_1, user_id HAVING count(*) > 1 ) AS a ORDER BY user_id ASC, value_1 ASC; user_id | value_1 ---------+--------- 15 | 212 15 | 230 15 | 417 15 | 490 15 | 529 15 | 926 16 | 339 16 | 485 16 | 717 16 | 903 (10 rows) ------------------------------------ -- Find me all users_table who has done some event and has filters ------------------------------------ SELECT user_id FROM events_table WHERE event_type = 16 AND value_2 > 50 AND user_id IN (SELECT user_id FROM users_table WHERE value_1 = 15 AND value_2 > 25 ) ORDER BY 1; user_id --------- 7 53 (2 rows) ------------------------------------ -- Which events_table did people who has done some specific events_table ------------------------------------ SELECT user_id, event_type FROM events_table WHERE user_id in (SELECT user_id from events_table WHERE event_type > 500 and event_type < 505) GROUP BY user_id, event_type ORDER BY 2 DESC, 1 LIMIT 3; user_id | event_type ---------+------------ 18 | 999 23 | 999 26 | 999 (3 rows) ------------------------------------ -- Find me all the users_table who has done some event more than three times ------------------------------------ SELECT user_id FROM ( SELECT user_id FROM events_table WHERE event_type = 901 GROUP BY user_id HAVING count(*) > 3 ) AS a ORDER BY user_id; user_id --------- 57 (1 row) ------------------------------------ -- Find my assets that have the highest probability and fetch their metadata ------------------------------------ CREATE TEMP TABLE assets AS SELECT users_table.user_id, users_table.value_1, prob FROM users_table JOIN (SELECT ma.user_id, (GREATEST(coalesce(ma.value_4 / 250, 0.0) + GREATEST(1.0))) / 2 AS prob FROM users_table AS ma, events_table as short_list WHERE short_list.user_id = ma.user_id and ma.value_1 < 50 and short_list.event_type < 50 ) temp ON users_table.user_id = temp.user_id WHERE users_table.value_1 < 50; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM assets; count | count | avg -------+-------+--------------------- 14371 | 101 | 50.5232064574490293 (1 row) DROP TABLE assets; -- count number of distinct users who have value_1 equal to 5 or 13 but not 3 -- original query that fails SELECT count(*) FROM ( SELECT user_id FROM users_table WHERE (value_1 = '5' OR value_1 = '13') AND user_id NOT IN (select user_id from users_table where value_1 = '3') GROUP BY user_id HAVING count(distinct value_1) = 2 ) as foo; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- previous push down query SELECT subquery_count FROM (SELECT count(*) as subquery_count FROM (SELECT user_id FROM users_table WHERE (value_1 = '5' OR value_1 = '13') GROUP BY user_id HAVING count(distinct value_1) = 2) as a LEFT JOIN (SELECT user_id FROM users_table WHERE (value_1 = '3') GROUP BY user_id) as b ON a.user_id = b.user_id WHERE b.user_id IS NULL GROUP BY a.user_id ) AS inner_subquery; subquery_count ---------------- 1 (1 row) -- new pushdown query without single range table entry at top requirement SELECT count(*) as subquery_count FROM ( SELECT user_id FROM users_table WHERE (value_1 = '5' OR value_1 = '13') GROUP BY user_id HAVING count(distinct value_1) = 2 ) as a LEFT JOIN ( SELECT user_id FROM users_table WHERE (value_1 = '3') GROUP BY user_id) AS b ON a.user_id = b.user_id WHERE b.user_id IS NULL GROUP BY a.user_id; subquery_count ---------------- 1 (1 row) -- most queries below has limit clause -- therefore setting subquery_pushdown flag for all SET citus.subquery_pushdown to ON; -- multi-subquery-join -- The first query has filters on partion column to make it router plannable -- but it is processed by logical planner since we disabled router execution SELECT e1.user_id, sum(view_homepage) AS viewed_homepage, sum(use_demo) AS use_demo, sum(enter_credit_card) AS entered_credit_card, sum(submit_card_info) as submit_card_info, sum(see_bought_screen) as see_bought_screen FROM ( -- Get the first time each user viewed the homepage. SELECT user_id, 1 AS view_homepage, min(time) AS view_homepage_time FROM events_table WHERE user_id = 1 and event_type IN (10, 20, 30, 40, 50, 60, 70, 80, 90) GROUP BY user_id ) e1 LEFT JOIN LATERAL ( SELECT user_id, 1 AS use_demo, time AS use_demo_time FROM events_table WHERE user_id = e1.user_id AND user_id = 1 and event_type IN (11, 21, 31, 41, 51, 61, 71, 81, 91) ORDER BY time LIMIT 1 ) e2 ON true LEFT JOIN LATERAL ( SELECT user_id, 1 AS enter_credit_card, time AS enter_credit_card_time FROM events_table WHERE user_id = e2.user_id AND user_id = 1 and event_type IN (12, 22, 32, 42, 52, 62, 72, 82, 92) ORDER BY time LIMIT 1 ) e3 ON true LEFT JOIN LATERAL ( SELECT 1 AS submit_card_info, user_id, time AS enter_credit_card_time FROM events_table WHERE user_id = e3.user_id AND user_id = 1 and event_type IN (13, 23, 33, 43, 53, 63, 73, 83, 93) ORDER BY time LIMIT 1 ) e4 ON true LEFT JOIN LATERAL ( SELECT 1 AS see_bought_screen FROM events_table WHERE user_id = e4.user_id AND user_id = 1 and event_type IN (14, 24, 34, 44, 54, 64, 74, 84, 94) ORDER BY time LIMIT 1 ) e5 ON true WHERE e1.user_id = 1 GROUP BY e1.user_id LIMIT 1; user_id | viewed_homepage | use_demo | entered_credit_card | submit_card_info | see_bought_screen ---------+-----------------+----------+---------------------+------------------+------------------- 1 | 1 | | | | (1 row) -- Same query without all limitations SELECT e1.user_id, sum(view_homepage) AS viewed_homepage, sum(use_demo) AS use_demo, sum(enter_credit_card) AS entered_credit_card, sum(submit_card_info) as submit_card_info, sum(see_bought_screen) as see_bought_screen FROM ( -- Get the first time each user viewed the homepage. SELECT user_id, 1 AS view_homepage, min(time) AS view_homepage_time FROM events_table WHERE event_type IN (10, 20, 30, 40, 50, 60, 70, 80, 90) GROUP BY user_id ) e1 LEFT JOIN LATERAL ( SELECT user_id, 1 AS use_demo, time AS use_demo_time FROM events_table WHERE user_id = e1.user_id AND event_type IN (11, 21, 31, 41, 51, 61, 71, 81, 91) ORDER BY time ) e2 ON true LEFT JOIN LATERAL ( SELECT user_id, 1 AS enter_credit_card, time AS enter_credit_card_time FROM events_table WHERE user_id = e2.user_id AND event_type IN (12, 22, 32, 42, 52, 62, 72, 82, 92) ORDER BY time ) e3 ON true LEFT JOIN LATERAL ( SELECT 1 AS submit_card_info, user_id, time AS enter_credit_card_time FROM events_table WHERE user_id = e3.user_id AND event_type IN (13, 23, 33, 43, 53, 63, 73, 83, 93) ORDER BY time ) e4 ON true LEFT JOIN LATERAL ( SELECT 1 AS see_bought_screen FROM events_table WHERE user_id = e4.user_id AND event_type IN (14, 24, 34, 44, 54, 64, 74, 84, 94) ORDER BY time ) e5 ON true GROUP BY e1.user_id ORDER BY 6 DESC NULLS LAST, 5 DESC NULLS LAST, 4 DESC NULLS LAST, 3 DESC NULLS LAST, 2 DESC NULLS LAST, 1 LIMIT 15; user_id | viewed_homepage | use_demo | entered_credit_card | submit_card_info | see_bought_screen ---------+-----------------+----------+---------------------+------------------+------------------- 72 | 36 | 36 | 36 | 36 | 36 95 | 12 | 12 | 12 | 12 | 12 82 | 4 | 4 | 4 | 4 | 4 74 | 3 | 3 | 3 | 3 | 3 83 | 3 | 3 | 3 | 3 | 3 6 | 2 | 2 | 2 | 2 | 2 42 | 1 | 1 | 1 | 1 | 1 5 | 4 | 4 | 4 | 4 | 93 | 4 | 4 | 4 | 4 | 51 | 1 | 1 | 1 | 1 | 85 | 6 | 6 | 6 | | 73 | 4 | 4 | 4 | | 0 | 3 | 3 | 3 | | 10 | 2 | 2 | 2 | | 13 | 2 | 2 | 2 | | (15 rows) -- Same query without all limitations but uses having() to show only those submitted their credit card info SELECT e1.user_id, sum(view_homepage) AS viewed_homepage, sum(use_demo) AS use_demo, sum(enter_credit_card) AS entered_credit_card, sum(submit_card_info) as submit_card_info, sum(see_bought_screen) as see_bought_screen FROM ( -- Get the first time each user viewed the homepage. SELECT user_id, 1 AS view_homepage, min(time) AS view_homepage_time FROM events_table WHERE event_type IN (10, 20, 30, 40, 50, 60, 70, 80, 90) GROUP BY user_id ) e1 LEFT JOIN LATERAL ( SELECT user_id, 1 AS use_demo, time AS use_demo_time FROM events_table WHERE user_id = e1.user_id AND event_type IN (11, 21, 31, 41, 51, 61, 71, 81, 91) ORDER BY time ) e2 ON true LEFT JOIN LATERAL ( SELECT user_id, 1 AS enter_credit_card, time AS enter_credit_card_time FROM events_table WHERE user_id = e2.user_id AND event_type IN (12, 22, 32, 42, 52, 62, 72, 82, 92) ORDER BY time ) e3 ON true LEFT JOIN LATERAL ( SELECT 1 AS submit_card_info, user_id, time AS enter_credit_card_time FROM events_table WHERE user_id = e3.user_id AND event_type IN (13, 23, 33, 43, 53, 63, 73, 83, 93) ORDER BY time ) e4 ON true LEFT JOIN LATERAL ( SELECT 1 AS see_bought_screen FROM events_table WHERE user_id = e4.user_id AND event_type IN (14, 24, 34, 44, 54, 64, 74, 84, 94) ORDER BY time ) e5 ON true group by e1.user_id HAVING sum(submit_card_info) > 0 ORDER BY 6 DESC NULLS LAST, 5 DESC NULLS LAST, 4 DESC NULLS LAST, 3 DESC NULLS LAST, 2 DESC NULLS LAST, 1 LIMIT 15; user_id | viewed_homepage | use_demo | entered_credit_card | submit_card_info | see_bought_screen ---------+-----------------+----------+---------------------+------------------+------------------- 72 | 36 | 36 | 36 | 36 | 36 95 | 12 | 12 | 12 | 12 | 12 82 | 4 | 4 | 4 | 4 | 4 74 | 3 | 3 | 3 | 3 | 3 83 | 3 | 3 | 3 | 3 | 3 6 | 2 | 2 | 2 | 2 | 2 42 | 1 | 1 | 1 | 1 | 1 5 | 4 | 4 | 4 | 4 | 93 | 4 | 4 | 4 | 4 | 51 | 1 | 1 | 1 | 1 | (10 rows) -- Explain analyze on this query fails due to #756 -- avg expression used on order by SELECT a.user_id, avg(b.value_2) as subquery_avg FROM ( SELECT user_id FROM users_table WHERE (value_1 > 5) GROUP BY user_id HAVING count(distinct value_1) > 88 ) as a LEFT JOIN ( SELECT user_id, value_2, value_3 FROM users_table WHERE (value_1 > 3)) AS b ON a.user_id = b.user_id WHERE b.user_id IS NOT NULL GROUP BY a.user_id ORDER BY avg(b.value_3), 2, 1 LIMIT 5; user_id | subquery_avg ---------+---------------------- 99 | 456.7446808510638298 83 | 469.6037735849056604 61 | 486.5869565217391304 78 | 434.9009009009009009 77 | 449.9313725490196078 (5 rows) -- add having to the same query SELECT a.user_id, avg(b.value_2) as subquery_avg FROM ( SELECT user_id FROM users_table WHERE (value_1 > 5) GROUP BY user_id HAVING count(distinct value_1) > 88 ) as a LEFT JOIN ( SELECT user_id, value_2, value_3 FROM users_table WHERE (value_1 > 3)) AS b ON a.user_id = b.user_id WHERE b.user_id IS NOT NULL GROUP BY a.user_id HAVING sum(b.value_3) > 50000 ORDER BY avg(b.value_3), 2, 1 LIMIT 5; user_id | subquery_avg ---------+---------------------- 78 | 434.9009009009009009 29 | 505.0934579439252336 17 | 526.9633027522935780 91 | 501.4339622641509434 24 | 515.1714285714285714 (5 rows) -- avg on the value_3 is not a resjunk SELECT a.user_id, avg(b.value_2) as subquery_avg, avg(b.value_3) FROM (SELECT user_id FROM users_table WHERE (value_1 > 5) GROUP BY user_id HAVING count(distinct value_1) > 88 ) as a LEFT JOIN ( SELECT user_id, value_2, value_3 FROM users_table WHERE (value_1 > 3) ) AS b ON a.user_id = b.user_id WHERE b.user_id IS NOT NULL GROUP BY a.user_id ORDER BY avg(b.value_3) DESC, 2, 1 LIMIT 5; user_id | subquery_avg | avg ---------+----------------------+------------------ 6 | 523.8247422680412371 | 569.226804123711 62 | 497.1545454545454545 | 567.681818181818 8 | 524.5894736842105263 | 565.2 10 | 502.2017543859649123 | 561.929824561404 16 | 467.5145631067961165 | 561.73786407767 (5 rows) -- a powerful query structure that analyzes users/events -- using (relation JOIN subquery JOIN relation) SELECT u.user_id, sub.value_2, sub.value_3, COUNT(e2.user_id) counts FROM users_table u LEFT OUTER JOIN LATERAL (SELECT * FROM events_table e1 WHERE e1.user_id = u.user_id ORDER BY e1.value_3 DESC LIMIT 1 ) sub ON true LEFT OUTER JOIN events_table e2 ON e2.user_id = sub.user_id WHERE e2.value_2 > 10 AND e2.value_2 < 50 AND u.value_2 > 10 AND u.value_2 < 50 GROUP BY u.user_id, sub.value_2, sub.value_3 ORDER BY 4 DESC, 1 DESC, 2 ASC, 3 ASC LIMIT 10; user_id | value_2 | value_3 | counts ---------+---------+---------+-------- 87 | 807 | 990 | 45 25 | 613 | 992 | 40 26 | 952 | 982 | 36 17 | 277 | 993 | 36 83 | 571 | 1000 | 35 99 | 309 | 998 | 32 96 | 571 | 987 | 30 95 | 631 | 997 | 30 82 | 444 | 997 | 28 57 | 975 | 989 | 25 (10 rows) -- distinct users joined with events SELECT avg(events_table.event_type) as avg_type, count(*) as users_count FROM events_table JOIN (SELECT DISTINCT user_id FROM users_table ) as distinct_users ON distinct_users.user_id = events_table.user_id GROUP BY distinct_users.user_id ORDER BY users_count desc, avg_type DESC LIMIT 5; avg_type | users_count ----------------------+------------- 496.5748031496062992 | 127 531.1788617886178862 | 123 504.6806722689075630 | 119 503.7203389830508475 | 118 506.3793103448275862 | 116 (5 rows) -- reduce the data set, aggregate and join SELECT events_table.event_type, users_count.ct FROM events_table JOIN (SELECT distinct_users.user_id, count(1) as ct FROM (SELECT user_id FROM users_table ) as distinct_users GROUP BY distinct_users.user_id ) as users_count ON users_count.user_id = events_table.user_id ORDER BY users_count.ct desc, event_type DESC LIMIT 5; event_type | ct ------------+----- 996 | 121 986 | 121 979 | 121 975 | 121 960 | 121 (5 rows) --- now, test (subquery JOIN subquery) SELECT n1.user_id, count_1, total_count FROM (SELECT user_id, count(1) as count_1 FROM users_table GROUP BY user_id ) n1 INNER JOIN ( SELECT user_id, count(1) as total_count FROM events_table GROUP BY user_id, event_type ) n2 ON (n2.user_id = n1.user_id) ORDER BY total_count DESC, count_1 DESC, 1 DESC LIMIT 10; user_id | count_1 | total_count ---------+---------+------------- 57 | 105 | 4 78 | 112 | 3 45 | 111 | 3 40 | 107 | 3 36 | 106 | 3 25 | 105 | 3 86 | 100 | 3 80 | 100 | 3 60 | 100 | 3 35 | 100 | 3 (10 rows) SELECT a.user_id, avg(b.value_2) as subquery_avg FROM (SELECT user_id FROM users_table WHERE (value_1 > 5) GROUP BY user_id HAVING count(distinct value_1) > 88 ) as a LEFT JOIN (SELECT DISTINCT ON (user_id) user_id, value_2, value_3 FROM users_table WHERE (value_1 > 3) ORDER BY 1,2,3 ) AS b ON a.user_id = b.user_id WHERE b.user_id IS NOT NULL GROUP BY a.user_id ORDER BY avg(b.value_3), 2, 1 LIMIT 5; user_id | subquery_avg ---------+--------------------- 10 | 5.0000000000000000 87 | 12.0000000000000000 77 | 28.0000000000000000 37 | 17.0000000000000000 11 | 3.0000000000000000 (5 rows) -- distinct clause must include partition column -- when used in target list SELECT a.user_id, avg(b.value_2) as subquery_avg FROM (SELECT user_id FROM users_table WHERE (value_1 > 5) GROUP BY user_id HAVING count(distinct value_1) > 88 ) as a LEFT JOIN (SELECT DISTINCT ON (value_2) value_2 , user_id, value_3 FROM users_table WHERE (value_1 > 3) ORDER BY 1,2,3 ) AS b USING (user_id) GROUP BY user_id; ERROR: cannot push down this subquery DETAIL: Distinct on columns without partition column is currently unsupported SELECT a.user_id, avg(b.value_2) as subquery_avg FROM (SELECT user_id FROM users_table WHERE (value_1 > 5) GROUP BY user_id HAVING count(distinct value_1) > 88 ) as a LEFT JOIN (SELECT DISTINCT ON (value_2, user_id) value_2 , user_id, value_3 FROM users_table WHERE (value_1 > 3) ORDER BY 1,2,3 ) AS b ON a.user_id = b.user_id WHERE b.user_id IS NOT NULL GROUP BY a.user_id ORDER BY avg(b.value_3), 2, 1 LIMIT 5; user_id | subquery_avg ---------+---------------------- 99 | 459.1910112359550562 83 | 458.0721649484536082 9 | 541.5217391304347826 78 | 434.2336448598130841 77 | 443.8686868686868687 (5 rows) SELECT user_id, event_type FROM (SELECT * FROM ( (SELECT event_type, user_id as a_user_id FROM events_table) AS a JOIN (SELECT ma.user_id AS user_id, ma.value_2 AS value_2, (GREATEST(coalesce((ma.value_3 * ma.value_2) / 20, 0.0) + GREATEST(1.0))) / 2 AS prob FROM users_table AS ma WHERE (ma.value_2 > 100) ORDER BY prob DESC, user_id DESC LIMIT 10 ) AS ma ON (a.a_user_id = ma.user_id) ) AS inner_sub ORDER BY prob DESC, user_id DESC LIMIT 10 ) AS outer_sub ORDER BY prob DESC, event_type DESC, user_id DESC LIMIT 10; user_id | event_type ---------+------------ 10 | 813 10 | 806 10 | 805 10 | 685 10 | 591 10 | 442 10 | 333 10 | 317 10 | 244 10 | 169 (10 rows) -- very similar query but produces different result due to -- ordering difference in the previous one's inner query SELECT user_id, event_type FROM (SELECT event_type, user_id as a_user_id FROM events_table) AS a JOIN (SELECT ma.user_id AS user_id, ma.value_2 AS value_2, (GREATEST(coalesce((ma.value_3 * ma.value_2) / 20, 0.0) + GREATEST(1.0))) / 2 AS prob FROM users_table AS ma WHERE (ma.value_2 > 100) ORDER BY prob DESC, user_id DESC LIMIT 10 ) AS ma ON (a.a_user_id = ma.user_id) ORDER BY prob DESC, event_type DESC, user_id DESC LIMIT 10; user_id | event_type ---------+------------ 10 | 998 10 | 996 10 | 981 10 | 975 10 | 962 10 | 945 10 | 945 10 | 933 10 | 932 10 | 915 (10 rows) -- now they produce the same result when ordering fixed in 'outer_sub' SELECT user_id, event_type FROM (SELECT * FROM ( (SELECT event_type, user_id as a_user_id FROM events_table ) AS a JOIN (SELECT ma.user_id AS user_id, ma.value_2 AS value_2, (GREATEST(coalesce((ma.value_3 * ma.value_2) / 20, 0.0) + GREATEST(1.0))) / 2 AS prob FROM users_table AS ma WHERE (ma.value_2 > 100) ORDER BY prob DESC, user_id DESC LIMIT 10 ) AS ma ON (a.a_user_id = ma.user_id) ) AS inner_sub ORDER BY prob DESC, event_type DESC, user_id DESC LIMIT 10 ) AS outer_sub ORDER BY prob DESC, event_type DESC, user_id DESC LIMIT 10; user_id | event_type ---------+------------ 10 | 998 10 | 996 10 | 981 10 | 975 10 | 962 10 | 945 10 | 945 10 | 933 10 | 932 10 | 915 (10 rows) -- this is one complex join query derived from a user's production query -- first declare the function on workers on master -- With array_index: SELECT * FROM run_command_on_workers('CREATE OR REPLACE FUNCTION array_index(ANYARRAY, ANYELEMENT) RETURNS INT AS $$ SELECT i FROM (SELECT generate_series(array_lower($1, 1), array_upper($1, 1))) g(i) WHERE $1 [i] = $2 LIMIT 1; $$ LANGUAGE sql') ORDER BY 1,2; nodename | nodeport | success | result -----------+----------+---------+----------------- localhost | 57637 | t | CREATE FUNCTION localhost | 57638 | t | CREATE FUNCTION (2 rows) CREATE OR REPLACE FUNCTION array_index(ANYARRAY, ANYELEMENT) RETURNS INT AS $$ SELECT i FROM (SELECT generate_series(array_lower($1, 1), array_upper($1, 1))) g(i) WHERE $1 [i] = $2 LIMIT 1; $$ LANGUAGE sql; SELECT * FROM (SELECT * FROM ( (SELECT user_id AS user_id_e, event_type AS event_type_e FROM events_table ) AS ma_e JOIN (SELECT value_2, value_3, user_id FROM (SELECT * FROM ( (SELECT user_id_p AS user_id FROM (SELECT * FROM ( (SELECT user_id AS user_id_p FROM events_table WHERE (event_type IN (1,2,3,4,5)) ) AS ma_p JOIN (SELECT user_id AS user_id_a FROM users_table WHERE (value_2 % 5 = 1) ) AS a ON (a.user_id_a = ma_p.user_id_p) ) ) AS a_ma_p ) AS inner_filter_q JOIN (SELECT value_2, value_3, user_id AS user_id_ck FROM events_table WHERE event_type = ANY(ARRAY [10, 11, 12]) ORDER BY value_3 ASC, user_id_ck DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC LIMIT 10 ) AS ma_ck ON (ma_ck.user_id_ck = inner_filter_q.user_id) ) AS inner_sub_q ORDER BY value_3 ASC, user_id_ck DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC LIMIT 10 ) AS outer_sub_q ORDER BY value_3 ASC, user_id DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC LIMIT 10) AS inner_search_q ON (ma_e.user_id_e = inner_search_q.user_id) ) AS outer_inner_sub_q ORDER BY value_3 ASC, user_id DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC, event_type_e DESC LIMIT 10) AS outer_outer_sub_q ORDER BY value_3 ASC, user_id DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC, event_type_e DESC LIMIT 10; user_id_e | event_type_e | value_2 | value_3 | user_id -----------+--------------+---------+---------+--------- 65 | 991 | 167 | 108 | 65 65 | 991 | 167 | 108 | 65 65 | 991 | 167 | 108 | 65 65 | 991 | 167 | 108 | 65 65 | 991 | 167 | 108 | 65 65 | 991 | 167 | 108 | 65 65 | 991 | 167 | 108 | 65 65 | 991 | 167 | 108 | 65 65 | 991 | 167 | 108 | 65 65 | 991 | 167 | 108 | 65 (10 rows) -- top level select * is removed now there is -- a join at top level. SELECT * FROM ( (SELECT user_id AS user_id_e, event_type as event_type_e FROM events_table ) AS ma_e JOIN (SELECT value_2, value_3, user_id FROM (SELECT * FROM ( (SELECT user_id_p AS user_id FROM (SELECT * FROM ( (SELECT user_id AS user_id_p FROM events_table WHERE (event_type IN (1, 2, 3, 4, 5)) ) AS ma_p JOIN (SELECT user_id AS user_id_a FROM users_table WHERE (value_2 % 5 = 1) ) AS a ON (a.user_id_a = ma_p.user_id_p) ) ) AS a_ma_p ) AS inner_filter_q JOIN (SELECT value_2, value_3, user_id AS user_id_ck FROM events_table WHERE event_type = ANY(ARRAY [10, 11, 12]) ORDER BY value_3 ASC, user_id_ck DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC LIMIT 10 ) AS ma_ck ON (ma_ck.user_id_ck = inner_filter_q.user_id) ) AS inner_sub_q ORDER BY value_3 ASC, user_id_ck DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC LIMIT 10 ) AS outer_sub_q ORDER BY value_3 ASC, user_id DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC LIMIT 10) AS inner_search_q ON (ma_e.user_id_e = inner_search_q.user_id) ) AS outer_inner_sub_q ORDER BY value_3 ASC, user_id DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC, event_type_e DESC LIMIT 10; user_id_e | event_type_e | value_2 | value_3 | user_id -----------+--------------+---------+---------+--------- 65 | 991 | 167 | 108 | 65 65 | 991 | 167 | 108 | 65 65 | 991 | 167 | 108 | 65 65 | 991 | 167 | 108 | 65 65 | 991 | 167 | 108 | 65 65 | 991 | 167 | 108 | 65 65 | 991 | 167 | 108 | 65 65 | 991 | 167 | 108 | 65 65 | 991 | 167 | 108 | 65 65 | 991 | 167 | 108 | 65 (10 rows) -- drop created functions SELECT * FROM run_command_on_workers('DROP FUNCTION array_index(ANYARRAY, ANYELEMENT)') ORDER BY 1,2; nodename | nodeport | success | result -----------+----------+---------+--------------- localhost | 57637 | t | DROP FUNCTION localhost | 57638 | t | DROP FUNCTION (2 rows) DROP FUNCTION array_index(ANYARRAY, ANYELEMENT); -- a not supported query due to constant range table entry SELECT count(*) as subquery_count FROM ( SELECT user_id FROM users_table WHERE (value_1 = '5' OR value_1 = '13') GROUP BY user_id HAVING count(distinct value_1) = 2 ) as a LEFT JOIN ( SELECT 1 as user_id ) AS b ON a.user_id = b.user_id WHERE b.user_id IS NULL GROUP BY a.user_id; ERROR: cannot push down this subquery DETAIL: Subqueries without relations are unsupported -- same with INNER JOIN SELECT count(*) as subquery_count FROM ( SELECT user_id FROM users_table WHERE (value_1 = '5' OR value_1 = '13') GROUP BY user_id HAVING count(distinct value_1) = 2 ) as a INNER JOIN ( SELECT 1 as user_id ) AS b ON a.user_id = b.user_id WHERE b.user_id IS NULL GROUP BY a.user_id; ERROR: cannot push down this subquery DETAIL: Subqueries without relations are unsupported -- this is slightly different, we use RTE_VALUEs here SELECT Count(*) AS subquery_count FROM (SELECT user_id FROM users_table WHERE (value_1 = '5' OR value_1 = '13' ) GROUP BY user_id HAVING Count(DISTINCT value_1) = 2) AS a INNER JOIN (SELECT * FROM (VALUES (1, 'one'), (2, 'two'), (3, 'three')) AS t (user_id, letter)) AS b ON a.user_id = b.user_id WHERE b.user_id IS NULL GROUP BY a.user_id; ERROR: cannot push down this subquery DETAIL: Table expressions other than simple relations and subqueries are currently unsupported -- same query without LIMIT/OFFSET returns 30 rows SET client_min_messages TO DEBUG1; -- now, lets use a simple expression on the LIMIT and explicit coercion on the OFFSET SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q ORDER BY 2 DESC, 1 LIMIT 3+3 OFFSET 5::smallint; DEBUG: push down of limit count: 11 user_id | array_length ---------+-------------- 23 | 115 46 | 115 10 | 114 96 | 113 73 | 111 91 | 107 (6 rows) -- now, lets use implicit coersion in LIMIT and a simple expressions on OFFSET SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q ORDER BY 2 DESC, 1 LIMIT '3' OFFSET 27+2; DEBUG: push down of limit count: 32 user_id | array_length ---------+-------------- 0 | 54 (1 row) -- create a test function which is marked as volatile CREATE OR REPLACE FUNCTION volatile_func_test() RETURNS INT AS $$ SELECT 5; $$ LANGUAGE sql VOLATILE; -- Citus should be able to evalute functions/row comparisons on the LIMIT/OFFSET SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q ORDER BY 2 DESC, 1 LIMIT volatile_func_test() + (ROW(1,2,NULL) < ROW(1,3,0))::int OFFSET volatile_func_test() + volatile_func_test(); DEBUG: push down of limit count: 16 user_id | array_length ---------+-------------- 91 | 107 69 | 103 67 | 101 35 | 100 80 | 100 86 | 100 (6 rows) -- now, lets use expressions on both the LIMIT and OFFSET SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q ORDER BY 2 DESC, 1 LIMIT (5 > 4)::int OFFSET CASE WHEN 5 != 5 THEN 27 WHEN 1 > 5 THEN 28 ELSE 29 END; DEBUG: push down of limit count: 30 user_id | array_length ---------+-------------- 0 | 54 (1 row) -- we don't allow parameters on the LIMIT/OFFSET clauses PREPARE parametrized_limit AS SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q ORDER BY 2 DESC, 1 LIMIT $1 OFFSET $2; EXECUTE parametrized_limit(3,3); DEBUG: push down of limit count: 6 user_id | array_length ---------+-------------- 13 | 172 12 | 121 23 | 115 (3 rows) PREPARE parametrized_offset AS SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q ORDER BY 2 DESC, 1 LIMIT 3 OFFSET $1; EXECUTE parametrized_offset(3); DEBUG: push down of limit count: 6 user_id | array_length ---------+-------------- 13 | 172 12 | 121 23 | 115 (3 rows) SET client_min_messages TO DEFAULT; DROP FUNCTION volatile_func_test(); CREATE FUNCTION test_join_function_2(integer, integer) RETURNS bool AS 'select $1 > $2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- we don't support joins via functions SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE test_join_function_2(u.user_id, e.user_id) ) t GROUP BY user_id ) q ORDER BY 2 DESC, 1; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- note that the following query has joins on the partition keys -- however we fail to push down it due to the function call on the -- where clause. We probably need to relax that check SELECT users_table.user_id, users_table.value_1, prob FROM users_table JOIN (SELECT ma.user_id, (GREATEST(coalesce(ma.value_4 / 250, 0.0) + GREATEST(1.0))) / 2 AS prob FROM users_table AS ma, events_table as short_list WHERE short_list.user_id = ma.user_id and ma.value_1 < 50 and short_list.event_type < 50 ) temp ON users_table.user_id = temp.user_id WHERE users_table.value_1 < 50 AND test_join_function_2(users_table.user_id, temp.user_id); ERROR: unsupported clause type DROP FUNCTION test_join_function_2(integer, integer); SET citus.enable_router_execution TO TRUE; SET citus.subquery_pushdown to OFF; citus-7.0.3/src/test/regress/expected/multi_subquery_complex_queries.out000066400000000000000000002440711317107136600270140ustar00rootroot00000000000000-- -- multi subquery complex queries aims to expand existing subquery pushdown -- regression tests to cover more caeses -- the tables that are used depends to multi_insert_select_behavioral_analytics_create_table.sql -- -- We don't need shard id sequence here, so commented out to prevent conflicts with concurrent tests -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1400000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1400000; SET citus.enable_router_execution TO FALSE; -- -- UNIONs and JOINs mixed -- SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; types | sumofeventtype -------+---------------- 0 | 115 1 | 82 2 | 160 3 | 158 (4 rows) -- same query with target entries shuffled inside UNIONs SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."time", 0 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."time", 1 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."time", 2 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."time", 3 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; types | sumofeventtype -------+---------------- 0 | 115 1 | 82 2 | 160 3 | 158 (4 rows) -- not supported since events_subquery_2 doesn't have partition key on the target list -- within the shuffled target list SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."time", 0 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."time", 1 AS event, "events"."user_id" * 2 FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."time", 2 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."time", 3 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- not supported since events_subquery_2 doesn't have partition key on the target list SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."time", 0 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."time", 1 AS event, "events"."value_2" as user_id FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."time", 2 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."time", 3 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- we can support arbitrary subqueries within UNIONs SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM (SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."time", 0 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM ( SELECT * FROM ( SELECT max("events"."time"), 0 AS event, "events"."user_id" FROM events_table as "events", users_table as "users" WHERE events.user_id = users.user_id AND event_type IN (10, 11, 12, 13, 14, 15) GROUP BY "events"."user_id" ) as events_subquery_5 ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."time", 2 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."time", 3 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4) ) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; types | sumofeventtype -------+---------------- 0 | 115 2 | 160 3 | 158 (3 rows) -- not supported since events_subquery_5 is not joined on partition key SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM (SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."time", 0 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM ( SELECT * FROM ( SELECT max("events"."time"), 0 AS event, "events"."user_id" FROM events_table as "events", users_table as "users" WHERE events.user_id = users.value_2 AND event_type IN (10, 11, 12, 13, 14, 15) GROUP BY "events"."user_id" ) as events_subquery_5 ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."time", 2 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."time", 3 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4) ) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- not supported since the join is not equi join SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id != q.user_id)) as final_query GROUP BY types ORDER BY types; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- not supported since subquery 3 includes a JOIN with non-equi join SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events", users_table as "users" WHERE event_type IN (20, 21, 22, 23, 24, 25) AND users.user_id != events.user_id ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- similar query with more union statements (to enable UNION tree become larger) SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 4 AS event FROM events_table as "events" WHERE event_type IN (31, 32, 33, 34, 35, 36)) events_subquery_5) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 5 AS event FROM events_table as "events" WHERE event_type IN (37, 38, 39, 40, 41, 42)) events_subquery_6) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 6 AS event FROM events_table as "events" WHERE event_type IN (50, 51, 52, 53, 54, 55)) events_subquery_6) ) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; types | sumofeventtype -------+---------------- 0 | 115 1 | 82 2 | 160 3 | 158 4 | 117 5 | 98 6 | 167 (7 rows) -- -- UNION ALL Queries -- SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; types | sumofeventtype -------+---------------- 0 | 115 1 | 82 2 | 160 3 | 158 (4 rows) -- same query target list entries shuffled SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."time", 0 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION ALL (SELECT * FROM (SELECT "events"."time", 1 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION ALL (SELECT * FROM (SELECT "events"."time", 2 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION ALL (SELECT * FROM (SELECT "events"."time", 3 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; types | sumofeventtype -------+---------------- 0 | 115 1 | 82 2 | 160 3 | 158 (4 rows) -- not supported since subquery 3 does not have partition key SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION ALL (SELECT * FROM (SELECT "events"."value_2", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- not supported since events_subquery_4 does not have partition key on the -- target list SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."time", 0 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION ALL (SELECT * FROM (SELECT "events"."time", 1 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION ALL (SELECT * FROM (SELECT "events"."time", 2 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION ALL (SELECT * FROM (SELECT "events"."time", 3 AS event, "events"."user_id" * 2 FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- union all with inner and left joins SELECT user_id, count(*) as cnt FROM (SELECT first_query.user_id, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "first_query" INNER JOIN (SELECT "t"."user_id" FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t LEFT OUTER JOIN ( SELECT DISTINCT "events"."user_id" as user_id FROM events_table as "events" WHERE event_type IN (35, 36, 37, 38) GROUP BY user_id ) as t2 ON (t2.user_id = t.user_id) WHERE t2.user_id is NULL) as second_query ON ("first_query".user_id = "second_query".user_id)) as final_query GROUP BY user_id ORDER BY cnt DESC, user_id DESC LIMIT 10; user_id | cnt ---------+----- 27 | 35 87 | 27 74 | 20 72 | 16 12 | 16 66 | 15 56 | 15 40 | 15 23 | 12 59 | 10 (10 rows) -- not supported since the join between t and t2 is not equi join -- union all with inner and left joins SELECT user_id, count(*) as cnt FROM (SELECT first_query.user_id, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "first_query" INNER JOIN (SELECT "t"."user_id" FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t LEFT OUTER JOIN ( SELECT DISTINCT "events"."user_id" as user_id FROM events_table as "events" WHERE event_type IN (35, 36, 37, 38) GROUP BY user_id ) as t2 ON (t2.user_id > t.user_id) WHERE t2.user_id is NULL) as second_query ON ("first_query".user_id = "second_query".user_id)) as final_query GROUP BY user_id ORDER BY cnt DESC, user_id DESC LIMIT 10; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- -- Union, inner join and left join -- SELECT user_id, count(*) as cnt FROM (SELECT first_query.user_id, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "first_query" INNER JOIN (SELECT "t"."user_id" FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t LEFT OUTER JOIN ( SELECT DISTINCT "events"."user_id" as user_id FROM events_table as "events" WHERE event_type IN (35, 36, 37, 38) GROUP BY user_id ) as t2 ON (t2.user_id = t.user_id) WHERE t2.user_id is NULL) as second_query ON ("first_query".user_id = "second_query".user_id)) as final_query GROUP BY user_id ORDER BY cnt DESC, user_id DESC LIMIT 10; user_id | cnt ---------+----- 27 | 35 87 | 27 74 | 20 72 | 16 12 | 16 66 | 15 56 | 15 40 | 15 23 | 12 59 | 10 (10 rows) -- Simple LATERAL JOINs with GROUP BYs in each side -- need to set subquery_pushdown due to limit for next 2 queries SET citus.subquery_pushdown to ON; SELECT * FROM (SELECT "some_users_data".user_id, lastseen FROM (SELECT user_id, max(time) AS lastseen FROM (SELECT user_id, time FROM (SELECT user_id, time FROM events_table as "events" WHERE user_id > 10 and user_id < 40) "events_1" ORDER BY time DESC LIMIT 1000) "recent_events_1" GROUP BY user_id ORDER BY max(time) DESC) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND users.value_2 > 50 and users.value_2 < 55 LIMIT 1) "some_users_data" ON TRUE ORDER BY lastseen DESC LIMIT 50) "some_users" order BY user_id LIMIT 50; user_id | lastseen ---------+--------------------------------- 19 | Tue Jan 21 05:23:09.26298 2014 22 | Tue Jan 21 05:22:28.223506 2014 25 | Tue Jan 21 01:10:29.315788 2014 31 | Tue Jan 21 02:43:24.591489 2014 33 | Tue Jan 21 04:23:35.623056 2014 34 | Tue Jan 21 04:15:03.874341 2014 (6 rows) -- same query with subuqery joins in topmost select SELECT "some_users_data".user_id, lastseen FROM (SELECT user_id, max(time) AS lastseen FROM (SELECT user_id, time FROM (SELECT user_id, time FROM events_table as "events" WHERE user_id > 10 and user_id < 40) "events_1" ORDER BY time DESC LIMIT 1000) "recent_events_1" GROUP BY user_id ORDER BY max(TIME) DESC) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND users.value_2 > 50 and users.value_2 < 55 LIMIT 1) "some_users_data" ON TRUE ORDER BY user_id limit 50; user_id | lastseen ---------+--------------------------------- 19 | Tue Jan 21 05:23:09.26298 2014 22 | Tue Jan 21 05:22:28.223506 2014 25 | Tue Jan 21 01:10:29.315788 2014 31 | Tue Jan 21 02:43:24.591489 2014 33 | Tue Jan 21 04:23:35.623056 2014 34 | Tue Jan 21 04:15:03.874341 2014 (6 rows) -- reset subquery_pushdown SET citus.subquery_pushdown to OFF; -- not supported since JOIN is not on the partition key SELECT "some_users_data".user_id, lastseen FROM (SELECT user_id, max(time) AS lastseen FROM (SELECT user_id, time FROM (SELECT user_id, time FROM events_table as "events" WHERE user_id > 10 and user_id < 40) "events_1" ORDER BY time DESC LIMIT 1000) "recent_events_1" GROUP BY user_id ORDER BY max(TIME) DESC) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."value_1" = "some_recent_users"."user_id" AND users.value_2 > 50 and users.value_2 < 55 LIMIT 1) "some_users_data" ON TRUE ORDER BY user_id limit 50; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- not supported since JOIN is not on the partition key -- see (2 * user_id as user_id) target list element SELECT "some_users_data".user_id, lastseen FROM (SELECT 2 * user_id as user_id, max(time) AS lastseen FROM (SELECT user_id, time FROM (SELECT user_id, time FROM events_table as "events" WHERE user_id > 10 and user_id < 40) "events_1" ORDER BY time DESC LIMIT 1000) "recent_events_1" GROUP BY user_id ORDER BY max(TIME) DESC) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND users.value_2 > 50 and users.value_2 < 55 LIMIT 1) "some_users_data" ON TRUE ORDER BY user_id limit 50; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- LATERAL JOINs used with INNER JOINs SET citus.subquery_pushdown to ON; SELECT user_id, lastseen FROM (SELECT "some_users_data".user_id, lastseen FROM (SELECT filter_users_1.user_id, time AS lastseen FROM (SELECT user_where_1_1.user_id FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_1 > 20) user_where_1_1 INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_2 > 60) user_where_1_join_1 ON ("user_where_1_1".user_id = "user_where_1_join_1".user_id)) filter_users_1 JOIN LATERAL (SELECT user_id, time FROM events_table as "events" WHERE user_id > 12 and user_id < 16 AND user_id = filter_users_1.user_id ORDER BY time DESC LIMIT 1) "last_events_1" ON TRUE ORDER BY time DESC LIMIT 10) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND "users"."value_2" > 70 LIMIT 1) "some_users_data" ON TRUE ORDER BY lastseen DESC LIMIT 10) "some_users" ORDER BY user_id DESC LIMIT 10; user_id | lastseen ---------+--------------------------------- 15 | Tue Jan 21 02:25:36.136461 2014 15 | Tue Jan 21 02:25:36.136461 2014 15 | Tue Jan 21 02:25:36.136461 2014 15 | Tue Jan 21 02:25:36.136461 2014 15 | Tue Jan 21 02:25:36.136461 2014 15 | Tue Jan 21 02:25:36.136461 2014 15 | Tue Jan 21 02:25:36.136461 2014 15 | Tue Jan 21 02:25:36.136461 2014 15 | Tue Jan 21 02:25:36.136461 2014 15 | Tue Jan 21 02:25:36.136461 2014 (10 rows) -- -- A similar query with topmost select is dropped -- and replaced by aggregation. Notice the heavy use of limit -- SELECT "some_users_data".user_id, MAX(lastseen), count(*) FROM (SELECT filter_users_1.user_id, time AS lastseen FROM (SELECT user_where_1_1.user_id FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_1 > 20) user_where_1_1 INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_2 > 60) user_where_1_join_1 ON ("user_where_1_1".user_id = "user_where_1_join_1".user_id)) filter_users_1 JOIN LATERAL (SELECT user_id, time FROM events_table as "events" WHERE user_id > 12 and user_id < 16 and user_id = filter_users_1.user_id ORDER BY time DESC LIMIT 1) "last_events_1" ON true ORDER BY time DESC LIMIT 10) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND "users"."value_2" > 70 LIMIT 1) "some_users_data" ON true GROUP BY 1 ORDER BY 2, 1 DESC LIMIT 10; user_id | max | count ---------+---------------------------------+------- 15 | Tue Jan 21 02:25:36.136461 2014 | 10 13 | Tue Jan 21 05:06:48.989766 2014 | 10 14 | Tue Jan 21 05:46:51.286381 2014 | 10 (3 rows) SET citus.subquery_pushdown to OFF; -- not supported since the inner JOIN is not equi join SELECT user_id, lastseen FROM (SELECT "some_users_data".user_id, lastseen FROM (SELECT filter_users_1.user_id, time AS lastseen FROM (SELECT user_where_1_1.user_id FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_1 > 20) user_where_1_1 INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_2 > 60) user_where_1_join_1 ON ("user_where_1_1".user_id != "user_where_1_join_1".user_id)) filter_users_1 JOIN LATERAL (SELECT user_id, time FROM events_table as "events" WHERE user_id > 12 and user_id < 16 and user_id = filter_users_1.user_id ORDER BY time DESC LIMIT 1) "last_events_1" ON true ORDER BY time DESC LIMIT 10) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND "users"."value_2" > 70 LIMIT 1) "some_users_data" ON true ORDER BY lastseen DESC LIMIT 10) "some_users" ORDER BY user_id DESC LIMIT 10; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- not supported since the inner JOIN is not on the partition key SELECT user_id, lastseen FROM (SELECT "some_users_data".user_id, lastseen FROM (SELECT filter_users_1.user_id, time AS lastseen FROM (SELECT user_where_1_1.user_id FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_1 > 20) user_where_1_1 INNER JOIN (SELECT "users"."user_id", "users"."value_1" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_2 > 60) user_where_1_join_1 ON ("user_where_1_1".user_id = "user_where_1_join_1".value_1)) filter_users_1 JOIN LATERAL (SELECT user_id, time FROM events_table as "events" WHERE user_id > 12 and user_id < 16 and user_id = filter_users_1.user_id ORDER BY time DESC LIMIT 1) "last_events_1" ON true ORDER BY time DESC LIMIT 10) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND "users"."value_2" > 70 LIMIT 1) "some_users_data" ON true ORDER BY lastseen DESC LIMIT 10) "some_users" ORDER BY user_id DESC LIMIT 10; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- not supported since upper LATERAL JOIN is not equi join SELECT user_id, lastseen FROM (SELECT "some_users_data".user_id, lastseen FROM (SELECT filter_users_1.user_id, time AS lastseen FROM (SELECT user_where_1_1.user_id FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_1 > 20) user_where_1_1 INNER JOIN (SELECT "users"."user_id", "users"."value_1" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_2 > 60) user_where_1_join_1 ON ("user_where_1_1".user_id = "user_where_1_join_1".user_id)) filter_users_1 JOIN LATERAL (SELECT user_id, time FROM events_table as "events" WHERE user_id > 12 and user_id < 16 and user_id != filter_users_1.user_id ORDER BY time DESC LIMIT 1) "last_events_1" ON true ORDER BY time DESC LIMIT 10) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND "users"."value_2" > 70 LIMIT 1) "some_users_data" ON true ORDER BY lastseen DESC LIMIT 10) "some_users" ORDER BY user_id DESC LIMIT 10; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- not supported since lower LATERAL JOIN is not on the partition key SELECT user_id, lastseen FROM (SELECT "some_users_data".user_id, lastseen FROM (SELECT filter_users_1.user_id, time AS lastseen FROM (SELECT user_where_1_1.user_id FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_1 > 20) user_where_1_1 INNER JOIN (SELECT "users"."user_id", "users"."value_1" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_2 > 60) user_where_1_join_1 ON ("user_where_1_1".user_id = "user_where_1_join_1".user_id)) filter_users_1 JOIN LATERAL (SELECT user_id, time FROM events_table as "events" WHERE user_id > 12 and user_id < 16 and user_id = filter_users_1.user_id ORDER BY time DESC LIMIT 1) "last_events_1" ON true ORDER BY time DESC LIMIT 10) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."value_1" = "some_recent_users"."user_id" AND "users"."value_2" > 70 LIMIT 1) "some_users_data" ON true ORDER BY lastseen DESC LIMIT 10) "some_users" ORDER BY user_id DESC LIMIT 10; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- NESTED INNER JOINs SELECT count(*) AS value, "generated_group_field" FROM (SELECT DISTINCT "pushedDownQuery"."real_user_id", "generated_group_field" FROM (SELECT "eventQuery"."real_user_id", "eventQuery"."time", random(), ("eventQuery"."value_2") AS "generated_group_field" FROM (SELECT * FROM (SELECT "events"."time", "events"."user_id", "events"."value_2" FROM events_table as "events" WHERE user_id > 10 and user_id < 40 AND event_type IN (40, 41, 42, 43, 44, 45) ) "temp_data_queries" INNER JOIN (SELECT user_where_1_1.real_user_id FROM (SELECT "users"."user_id" as real_user_id FROM users_table as "users" WHERE user_id > 10 and user_id < 40 and value_2 > 50 ) user_where_1_1 INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 10 and user_id < 40 and value_3 > 50 ) user_where_1_join_1 ON ("user_where_1_1".real_user_id = "user_where_1_join_1".user_id)) "user_filters_1" ON ("temp_data_queries".user_id = "user_filters_1".real_user_id)) "eventQuery") "pushedDownQuery") "pushedDownQuery" GROUP BY "generated_group_field" ORDER BY generated_group_field DESC, value DESC; value | generated_group_field -------+----------------------- 1 | 966 1 | 917 1 | 905 1 | 868 1 | 836 1 | 791 1 | 671 1 | 642 1 | 358 1 | 317 1 | 307 1 | 302 1 | 214 1 | 166 1 | 116 1 | 1 (16 rows) -- not supported since the first inner join is not on the partition key SELECT count(*) AS value, "generated_group_field" FROM (SELECT DISTINCT "pushedDownQuery"."real_user_id", "generated_group_field" FROM (SELECT "eventQuery"."real_user_id", "eventQuery"."time", random(), ("eventQuery"."value_2") AS "generated_group_field" FROM (SELECT * FROM (SELECT "events"."time", "events"."user_id", "events"."value_2" FROM events_table as "events" WHERE user_id > 10 and user_id < 40 AND event_type IN (40, 41, 42, 43, 44, 45) ) "temp_data_queries" INNER JOIN (SELECT user_where_1_1.real_user_id FROM (SELECT "users"."user_id" as real_user_id FROM users_table as "users" WHERE user_id > 10 and user_id < 40 and value_2 > 50 ) user_where_1_1 INNER JOIN (SELECT "users"."user_id", "users"."value_2" FROM users_table as "users" WHERE user_id > 10 and user_id < 40 and value_3 > 50 ) user_where_1_join_1 ON ("user_where_1_1".real_user_id = "user_where_1_join_1".value_2)) "user_filters_1" ON ("temp_data_queries".user_id = "user_filters_1".real_user_id)) "eventQuery") "pushedDownQuery") "pushedDownQuery" GROUP BY "generated_group_field" ORDER BY generated_group_field DESC, value DESC; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- not supported since the first inner join is not an equi join SELECT count(*) AS value, "generated_group_field" FROM (SELECT DISTINCT "pushedDownQuery"."real_user_id", "generated_group_field" FROM (SELECT "eventQuery"."real_user_id", "eventQuery"."time", random(), ("eventQuery"."value_2") AS "generated_group_field" FROM (SELECT * FROM (SELECT "events"."time", "events"."user_id", "events"."value_2" FROM events_table as "events" WHERE user_id > 10 and user_id < 40 AND event_type IN (40, 41, 42, 43, 44, 45) ) "temp_data_queries" INNER JOIN (SELECT user_where_1_1.real_user_id FROM (SELECT "users"."user_id" as real_user_id FROM users_table as "users" WHERE user_id > 10 and user_id < 40 and value_2 > 50 ) user_where_1_1 INNER JOIN (SELECT "users"."user_id", "users"."value_2" FROM users_table as "users" WHERE user_id > 10 and user_id < 40 and value_3 > 50 ) user_where_1_join_1 ON ("user_where_1_1".real_user_id >= "user_where_1_join_1".user_id)) "user_filters_1" ON ("temp_data_queries".user_id = "user_filters_1".real_user_id)) "eventQuery") "pushedDownQuery") "pushedDownQuery" GROUP BY "generated_group_field" ORDER BY generated_group_field DESC, value DESC; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- single level inner joins SELECT "value_3", count(*) AS cnt FROM (SELECT "value_3", "user_id", random() FROM (SELECT users_in_segment_1.user_id, value_3 FROM (SELECT user_id, value_3 * 2 as value_3 FROM (SELECT user_id, value_3 FROM (SELECT "users"."user_id", value_3 FROM users_table as "users" WHERE user_id > 10 and user_id < 40 and value_2 > 30 ) simple_user_where_1 ) all_buckets_1 ) users_in_segment_1 JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 10 and user_id < 40 and value_2 > 60 ) some_users_data ON ("users_in_segment_1".user_id = "some_users_data".user_id) ) segmentalias_1) "tempQuery" GROUP BY "value_3" ORDER BY cnt, value_3 DESC LIMIT 10; value_3 | cnt ---------+----- 556 | 75 228 | 75 146 | 75 70 | 75 1442 | 79 1232 | 79 1090 | 79 1012 | 79 886 | 79 674 | 79 (10 rows) -- not supported since there is no partition column equality at all SELECT "value_3", count(*) AS cnt FROM (SELECT "value_3", "user_id", random() FROM (SELECT users_in_segment_1.user_id, value_3 FROM (SELECT user_id, value_3 * 2 as value_3 FROM (SELECT user_id, value_3 FROM (SELECT "users"."user_id", value_3 FROM users_table as "users" WHERE user_id > 10 and user_id < 40 and value_2 > 30 ) simple_user_where_1 ) all_buckets_1 ) users_in_segment_1 JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 10 and user_id < 40 and value_2 > 60 ) some_users_data ON (true) ) segmentalias_1) "tempQuery" GROUP BY "value_3" ORDER BY cnt, value_3 DESC LIMIT 10; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- nested LATERAL JOINs SET citus.subquery_pushdown to ON; SELECT * FROM (SELECT "some_users_data".user_id, "some_recent_users".value_3 FROM (SELECT filter_users_1.user_id, value_3 FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 20 and user_id < 70 and users.value_2 = 200) filter_users_1 JOIN LATERAL (SELECT user_id, value_3 FROM events_table as "events" WHERE user_id > 20 and user_id < 70 AND ("events".user_id = "filter_users_1".user_id) ORDER BY value_3 DESC LIMIT 1) "last_events_1" ON true ORDER BY value_3 DESC LIMIT 10) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND users.value_2 > 200 LIMIT 1) "some_users_data" ON true ORDER BY value_3 DESC LIMIT 10) "some_users" ORDER BY value_3 DESC LIMIT 10; user_id | value_3 ---------+--------- 44 | 998 65 | 996 66 | 996 37 | 995 57 | 989 21 | 985 (6 rows) -- nested lateral join at top most level SELECT "some_users_data".user_id, "some_recent_users".value_3 FROM (SELECT filter_users_1.user_id, value_3 FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 20 and user_id < 70 and users.value_2 = 200 ) filter_users_1 JOIN LATERAL (SELECT user_id, value_3 FROM events_table as "events" WHERE user_id > 20 and user_id < 70 AND ("events".user_id = "filter_users_1".user_id) ORDER BY value_3 DESC LIMIT 1 ) "last_events_1" ON true ORDER BY value_3 DESC LIMIT 10 ) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND users.value_2 > 200 LIMIT 1 ) "some_users_data" ON true ORDER BY value_3 DESC, user_id ASC LIMIT 10; user_id | value_3 ---------+--------- 44 | 998 65 | 996 66 | 996 37 | 995 57 | 989 21 | 985 (6 rows) -- longer nested lateral joins SELECT * FROM (SELECT "some_users_data".user_id, "some_recent_users".value_3 FROM (SELECT filter_users_1.user_id, value_3 FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 20 and user_id < 70 and users.value_2 = 200) filter_users_1 JOIN LATERAL (SELECT user_id, value_3 FROM events_table as "events" WHERE user_id > 20 and user_id < 70 AND ("events".user_id = "filter_users_1".user_id) ORDER BY value_3 DESC LIMIT 1) "last_events_1" ON true ORDER BY value_3 DESC LIMIT 10) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND users.value_2 > 200 LIMIT 1) "some_users_data" ON true ORDER BY value_3 DESC LIMIT 10) "some_users" ORDER BY value_3 DESC LIMIT 10; user_id | value_3 ---------+--------- 44 | 998 65 | 996 66 | 996 37 | 995 57 | 989 21 | 985 (6 rows) -- longer nested lateral join wth top level join SELECT "some_users_data".user_id, "some_recent_users".value_3 FROM (SELECT filter_users_1.user_id, value_3 FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 20 and user_id < 70 and users.value_2 = 200 ) filter_users_1 JOIN LATERAL (SELECT user_id, value_3 FROM events_table as "events" WHERE user_id > 20 and user_id < 70 AND ("events".user_id = "filter_users_1".user_id) ORDER BY value_3 DESC LIMIT 1 ) "last_events_1" ON TRUE ORDER BY value_3 DESC LIMIT 10 ) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND users.value_2 > 200 LIMIT 1 ) "some_users_data" ON TRUE ORDER BY value_3 DESC LIMIT 10; user_id | value_3 ---------+--------- 44 | 998 65 | 996 66 | 996 37 | 995 57 | 989 21 | 985 (6 rows) SET citus.subquery_pushdown to OFF; -- LEFT JOINs used with INNER JOINs SELECT count(*) AS cnt, "generated_group_field" FROM (SELECT "eventQuery"."user_id", random(), generated_group_field FROM (SELECT "multi_group_wrapper_1".*, generated_group_field, random() FROM (SELECT * FROM (SELECT "events"."time", "events"."user_id" as event_user_id FROM events_table as "events" WHERE user_id > 80) "temp_data_queries" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 80 and value_2 = 5) "user_filters_1" ON ("temp_data_queries".event_user_id = "user_filters_1".user_id)) AS "multi_group_wrapper_1" LEFT JOIN (SELECT "users"."user_id" AS "user_id", value_2 AS "generated_group_field" FROM users_table as "users") "left_group_by_1" ON ("left_group_by_1".user_id = "multi_group_wrapper_1".event_user_id)) "eventQuery") "pushedDownQuery" group BY "generated_group_field" ORDER BY cnt DESC, generated_group_field ASC LIMIT 10; cnt | generated_group_field -----+----------------------- 176 | 551 176 | 569 176 | 645 176 | 713 176 | 734 88 | 3 88 | 5 88 | 15 88 | 32 88 | 68 (10 rows) -- single table subquery, no JOINS involved SELECT count(*) AS cnt, user_id FROM (SELECT "eventQuery"."user_id", random() FROM (SELECT "events"."user_id" FROM events_table "events" WHERE event_type IN (10, 20, 30, 40, 50, 60, 70, 80, 90)) "eventQuery") "pushedDownQuery" GROUP BY "user_id" ORDER BY cnt DESC, user_id DESC LIMIT 10; cnt | user_id -----+--------- 4 | 24 3 | 96 3 | 93 3 | 49 3 | 46 3 | 38 3 | 14 3 | 10 2 | 99 2 | 95 (10 rows) -- lateral joins in the nested manner SET citus.subquery_pushdown to ON; SELECT * FROM (SELECT "some_users_data".user_id, value_2 FROM (SELECT user_id, max(value_2) AS value_2 FROM (SELECT user_id, value_2 FROM (SELECT user_id, value_2 FROM events_table as "events" WHERE user_id > 10 and user_id < 20) "events_1" ORDER BY value_2 DESC LIMIT 10000) "recent_events_1" GROUP BY user_id ORDER BY max(value_2) DESC) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND value_2 > 75 LIMIT 1) "some_users_data" ON true ORDER BY value_2 DESC LIMIT 10) "some_users" ORDER BY value_2 DESC, user_id DESC LIMIT 10; user_id | value_2 ---------+--------- 13 | 998 18 | 994 16 | 993 12 | 993 11 | 993 14 | 991 17 | 976 15 | 976 19 | 966 (9 rows) SET citus.subquery_pushdown to OFF; -- not supported since join is not on the partition key SELECT * FROM (SELECT "some_users_data".user_id, value_2 FROM (SELECT user_id, max(value_2) AS value_2 FROM (SELECT user_id, value_2 FROM (SELECT user_id, value_2 FROM events_table as "events" WHERE user_id > 10 and user_id < 20) "events_1" ORDER BY value_2 DESC LIMIT 10000) "recent_events_1" GROUP BY user_id ORDER BY max(value_2) DESC) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."value_2" = "some_recent_users"."user_id" AND value_2 > 75 LIMIT 1) "some_users_data" ON true ORDER BY value_2 DESC LIMIT 10) "some_users" ORDER BY value_2 DESC, user_id DESC LIMIT 10; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- lets test some unsupported set operations -- not supported since we use INTERSECT SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) INTERSECT (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; ERROR: cannot push down this subquery DETAIL: Intersect and Except are currently unsupported -- not supported due to offset SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4) OFFSET 3) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; ERROR: cannot push down this subquery DETAIL: Offset clause is currently unsupported -- not supported due to window functions SELECT user_id, some_vals FROM ( SELECT * , Row_number() over (PARTITION BY "user_id" ORDER BY "user_id") AS "some_vals", Random() FROM users_table ) user_id ORDER BY 1, 2 limit 10; ERROR: cannot perform distributed planning on this query DETAIL: Subqueries without group by clause are not supported yet -- not supported due to non relation rte SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT 1 as user_id, now(), 3 AS event ) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; ERROR: cannot push down this subquery DETAIL: Subqueries without relations are unsupported -- similar to the above, but constant rte is on the right side of the query SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT 1 as user_id, now(), 3 AS event ) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT random()::int as user_id) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; ERROR: cannot push down this subquery DETAIL: Subqueries without relations are unsupported SET citus.enable_router_execution TO TRUE; citus-7.0.3/src/test/regress/expected/multi_subquery_complex_reference_clause.out000066400000000000000000000554351317107136600306350ustar00rootroot00000000000000-- -- multi subquery complex queries aims to expand existing subquery pushdown -- regression tests to cover more caeses -- the tables that are used depends to multi_insert_select_behavioral_analytics_create_table.sql -- -- We don't need shard id sequence here, so commented out to prevent conflicts with concurrent tests -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1400000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1400000; SET citus.enable_router_execution TO FALSE; CREATE TABLE user_buy_test_table(user_id int, item_id int, buy_count int); SELECT create_distributed_table('user_buy_test_table', 'user_id'); create_distributed_table -------------------------- (1 row) INSERT INTO user_buy_test_table VALUES(1,2,1); INSERT INTO user_buy_test_table VALUES(2,3,4); INSERT INTO user_buy_test_table VALUES(3,4,2); INSERT INTO user_buy_test_table VALUES(7,5,2); CREATE TABLE users_return_test_table(user_id int, item_id int, buy_count int); SELECT create_distributed_table('users_return_test_table', 'user_id'); create_distributed_table -------------------------- (1 row) INSERT INTO users_return_test_table VALUES(4,1,1); INSERT INTO users_return_test_table VALUES(1,3,1); INSERT INTO users_return_test_table VALUES(3,2,2); CREATE TABLE users_ref_test_table(id int, it_name varchar(25), k_no int); SELECT create_reference_table('users_ref_test_table'); create_reference_table ------------------------ (1 row) INSERT INTO users_ref_test_table VALUES(1,'User_1',45); INSERT INTO users_ref_test_table VALUES(2,'User_2',46); INSERT INTO users_ref_test_table VALUES(3,'User_3',47); INSERT INTO users_ref_test_table VALUES(4,'User_4',48); INSERT INTO users_ref_test_table VALUES(5,'User_5',49); INSERT INTO users_ref_test_table VALUES(6,'User_6',50); -- Simple Join test with reference table SELECT count(*) FROM (SELECT random() FROM user_buy_test_table JOIN users_ref_test_table ON user_buy_test_table.user_id = users_ref_test_table.id) subquery_1; count ------- 3 (1 row) -- Should work, reference table at the inner side is allowed SELECT count(*) FROM (SELECT random(), k_no FROM user_buy_test_table LEFT JOIN users_ref_test_table ON user_buy_test_table.user_id = users_ref_test_table.id) subquery_1 WHERE k_no = 47; count ------- 1 (1 row) -- Should not work, no equality between partition column and reference table SELECT * FROM (SELECT random() FROM user_buy_test_table LEFT JOIN users_ref_test_table ON user_buy_test_table.item_id = users_ref_test_table.id) subquery_1; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- Should not work, no equality between partition column and reference table SELECT * FROM (SELECT random() FROM user_buy_test_table LEFT JOIN users_ref_test_table ON user_buy_test_table.user_id > users_ref_test_table.id) subquery_1; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- Shouldn't work, reference table at the outer side is not allowed SELECT * FROM (SELECT random() FROM users_ref_test_table LEFT JOIN user_buy_test_table ON users_ref_test_table.id = user_buy_test_table.user_id) subquery_1; ERROR: cannot pushdown the subquery DETAIL: There exist a reference table in the outer part of the outer join -- Should work, reference table at the inner side is allowed SELECT count(*) FROM (SELECT random() FROM users_ref_test_table RIGHT JOIN user_buy_test_table ON user_buy_test_table.user_id = users_ref_test_table.id) subquery_1; count ------- 4 (1 row) -- Shouldn't work, reference table at the outer side is not allowed SELECT * FROM (SELECT random() FROM user_buy_test_table RIGHT JOIN users_ref_test_table ON user_buy_test_table.user_id = users_ref_test_table.id) subquery_1; ERROR: cannot pushdown the subquery DETAIL: There exist a reference table in the outer part of the outer join -- Should pass since reference table locates in the inner part of each left join SELECT count(*) FROM (SELECT tt1.user_id, random() FROM user_buy_test_table AS tt1 JOIN users_return_test_table as tt2 ON tt1.user_id = tt2.user_id) subquery_1 LEFT JOIN (SELECT tt1.user_id, random() FROM user_buy_test_table as tt1 LEFT JOIN users_ref_test_table as ref ON tt1.user_id = ref.id) subquery_2 ON subquery_1.user_id = subquery_2.user_id; count ------- 2 (1 row) -- Should not pass since reference table locates in the outer part of right join SELECT * FROM (SELECT tt1.user_id, random() FROM user_buy_test_table AS tt1 JOIN users_return_test_table as tt2 ON tt1.user_id = tt2.user_id) subquery_1 RIGHT JOIN (SELECT tt1.user_id, random() FROM user_buy_test_table as tt1 JOIN users_ref_test_table as ref ON tt1.user_id = ref.id) subquery_2 ON subquery_1.user_id = subquery_2.user_id; ERROR: cannot pushdown the subquery DETAIL: There exist a reference table in the outer part of the outer join -- LATERAL JOINs used with INNER JOINs with reference tables SET citus.subquery_pushdown to ON; SELECT user_id, lastseen FROM (SELECT "some_users_data".user_id, lastseen FROM (SELECT filter_users_1.user_id, time AS lastseen FROM (SELECT user_where_1_1.user_id FROM (SELECT "users"."user_id" FROM users_reference_table as "users" WHERE user_id > 12 and user_id < 16 and value_1 > 20) user_where_1_1 INNER JOIN (SELECT "users"."user_id" FROM users_reference_table as "users" WHERE user_id > 12 and user_id < 16 and value_2 > 60) user_where_1_join_1 ON ("user_where_1_1".user_id = "user_where_1_join_1".user_id)) filter_users_1 JOIN LATERAL (SELECT user_id, time FROM events_reference_table as "events" WHERE user_id > 12 and user_id < 16 AND user_id = filter_users_1.user_id ORDER BY time DESC LIMIT 1) "last_events_1" ON TRUE ORDER BY time DESC LIMIT 10) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_reference_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND "users"."value_2" > 70 LIMIT 1) "some_users_data" ON TRUE ORDER BY lastseen DESC LIMIT 10) "some_users" ORDER BY user_id DESC LIMIT 10; user_id | lastseen ---------+--------------------------------- 14 | Tue Jan 21 05:46:51.286381 2014 14 | Tue Jan 21 05:46:51.286381 2014 14 | Tue Jan 21 05:46:51.286381 2014 14 | Tue Jan 21 05:46:51.286381 2014 14 | Tue Jan 21 05:46:51.286381 2014 14 | Tue Jan 21 05:46:51.286381 2014 14 | Tue Jan 21 05:46:51.286381 2014 14 | Tue Jan 21 05:46:51.286381 2014 14 | Tue Jan 21 05:46:51.286381 2014 14 | Tue Jan 21 05:46:51.286381 2014 (10 rows) SET citus.subquery_pushdown to OFF; -- NESTED INNER JOINs with reference tables SELECT count(*) AS value, "generated_group_field" FROM (SELECT DISTINCT "pushedDownQuery"."real_user_id", "generated_group_field" FROM (SELECT "eventQuery"."real_user_id", "eventQuery"."time", random(), ("eventQuery"."value_2") AS "generated_group_field" FROM (SELECT * FROM (SELECT "events"."time", "events"."user_id", "events"."value_2" FROM events_reference_table as "events" WHERE user_id > 10 and user_id < 40 AND event_type IN (40, 41, 42, 43, 44, 45) ) "temp_data_queries" INNER JOIN (SELECT user_where_1_1.real_user_id FROM (SELECT "users"."user_id" as real_user_id FROM users_reference_table as "users" WHERE user_id > 10 and user_id < 40 and value_2 > 50 ) user_where_1_1 INNER JOIN (SELECT "users"."user_id" FROM users_reference_table as "users" WHERE user_id > 10 and user_id < 40 and value_3 > 50 ) user_where_1_join_1 ON ("user_where_1_1".real_user_id = "user_where_1_join_1".user_id)) "user_filters_1" ON ("temp_data_queries".user_id = "user_filters_1".real_user_id)) "eventQuery") "pushedDownQuery") "pushedDownQuery" GROUP BY "generated_group_field" ORDER BY generated_group_field DESC, value DESC; value | generated_group_field -------+----------------------- 1 | 966 1 | 917 1 | 905 1 | 868 1 | 836 1 | 791 1 | 671 1 | 642 1 | 358 1 | 317 1 | 307 1 | 302 1 | 214 1 | 166 1 | 116 1 | 1 (16 rows) -- single level inner joins with reference tables SELECT "value_3", count(*) AS cnt FROM (SELECT "value_3", "user_id", random() FROM (SELECT users_in_segment_1.user_id, value_3 FROM (SELECT user_id, value_3 * 2 as value_3 FROM (SELECT user_id, value_3 FROM (SELECT "users"."user_id", value_3 FROM users_reference_table as "users" WHERE user_id > 10 and user_id < 40 and value_2 > 30 ) simple_user_where_1 ) all_buckets_1 ) users_in_segment_1 JOIN (SELECT "users"."user_id" FROM users_reference_table as "users" WHERE user_id > 10 and user_id < 40 and value_2 > 60 ) some_users_data ON ("users_in_segment_1".user_id = "some_users_data".user_id) ) segmentalias_1) "tempQuery" GROUP BY "value_3" ORDER BY cnt, value_3 DESC LIMIT 10; value_3 | cnt ---------+----- 556 | 75 228 | 75 146 | 75 70 | 75 1442 | 79 1232 | 79 1090 | 79 1012 | 79 886 | 79 674 | 79 (10 rows) -- nested LATERAL JOINs with reference tables SET citus.subquery_pushdown to ON; SELECT * FROM (SELECT "some_users_data".user_id, "some_recent_users".value_3 FROM (SELECT filter_users_1.user_id, value_3 FROM (SELECT "users"."user_id" FROM users_reference_table as "users" WHERE user_id > 20 and user_id < 70 and users.value_2 = 200) filter_users_1 JOIN LATERAL (SELECT user_id, value_3 FROM events_reference_table as "events" WHERE user_id > 20 and user_id < 70 AND ("events".user_id = "filter_users_1".user_id) ORDER BY value_3 DESC LIMIT 1) "last_events_1" ON true ORDER BY value_3 DESC LIMIT 10) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_reference_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND users.value_2 > 200 LIMIT 1) "some_users_data" ON true ORDER BY value_3 DESC LIMIT 10) "some_users" ORDER BY value_3 DESC LIMIT 10; user_id | value_3 ---------+--------- 44 | 998 65 | 996 66 | 996 37 | 995 57 | 989 21 | 985 (6 rows) SET citus.subquery_pushdown to OFF; -- LEFT JOINs used with INNER JOINs should error out since reference table exist in the -- left side of the LEFT JOIN. SELECT count(*) AS cnt, "generated_group_field" FROM (SELECT "eventQuery"."user_id", random(), generated_group_field FROM (SELECT "multi_group_wrapper_1".*, generated_group_field, random() FROM (SELECT * FROM (SELECT "events"."time", "events"."user_id" as event_user_id FROM events_table as "events" WHERE user_id > 80) "temp_data_queries" INNER JOIN (SELECT "users"."user_id" FROM users_reference_table as "users" WHERE user_id > 80 and value_2 = 5) "user_filters_1" ON ("temp_data_queries".event_user_id = "user_filters_1".user_id)) AS "multi_group_wrapper_1" LEFT JOIN (SELECT "users"."user_id" AS "user_id", value_2 AS "generated_group_field" FROM users_table as "users") "left_group_by_1" ON ("left_group_by_1".user_id = "multi_group_wrapper_1".event_user_id)) "eventQuery") "pushedDownQuery" group BY "generated_group_field" ORDER BY cnt DESC, generated_group_field ASC LIMIT 10; ERROR: cannot pushdown the subquery DETAIL: There exist a reference table in the outer part of the outer join -- RIGHT JOINs used with INNER JOINs should error out since reference table exist in the -- right side of the RIGHT JOIN. SELECT count(*) AS cnt, "generated_group_field" FROM (SELECT "eventQuery"."user_id", random(), generated_group_field FROM (SELECT "multi_group_wrapper_1".*, generated_group_field, random() FROM (SELECT * FROM (SELECT "events"."time", "events"."user_id" as event_user_id FROM events_table as "events" WHERE user_id > 80) "temp_data_queries" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 80 and value_2 = 5) "user_filters_1" ON ("temp_data_queries".event_user_id = "user_filters_1".user_id)) AS "multi_group_wrapper_1" RIGHT JOIN (SELECT "users"."user_id" AS "user_id", value_2 AS "generated_group_field" FROM users_reference_table as "users") "right_group_by_1" ON ("right_group_by_1".user_id = "multi_group_wrapper_1".event_user_id)) "eventQuery") "pushedDownQuery" group BY "generated_group_field" ORDER BY cnt DESC, generated_group_field ASC LIMIT 10; ERROR: cannot pushdown the subquery DETAIL: There exist a reference table in the outer part of the outer join -- Outer subquery with reference table SELECT "some_users_data".user_id, lastseen FROM (SELECT user_id, max(time) AS lastseen FROM (SELECT user_id, time FROM (SELECT user_id, time FROM events_reference_table as "events" WHERE user_id > 10 and user_id < 40) "events_1" ORDER BY time DESC) "recent_events_1" GROUP BY user_id ORDER BY max(TIME) DESC) "some_recent_users" FULL JOIN (SELECT "users".user_id FROM users_table as "users" WHERE users.value_2 > 50 and users.value_2 < 55) "some_users_data" ON "some_users_data"."user_id" = "some_recent_users"."user_id" ORDER BY user_id limit 50; ERROR: cannot pushdown the subquery DETAIL: There exist a reference table in the outer part of the outer join -- -- UNIONs and JOINs with reference tables, shoukld error out -- SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_reference_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; ERROR: cannot push down this subquery DETAIL: Reference tables are not supported with union operator -- reference table exist in the subquery of union, should error out SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM (SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."time", 0 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM ( SELECT * FROM ( SELECT max("events"."time"), 0 AS event, "events"."user_id" FROM events_reference_table as "events", users_table as "users" WHERE events.user_id = users.user_id AND event_type IN (10, 11, 12, 13, 14, 15) GROUP BY "events"."user_id" ) as events_subquery_5 ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."time", 2 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."time", 3 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4) ) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; ERROR: cannot push down this subquery DETAIL: Reference tables are not supported with union operator -- -- Should error out with UNION ALL Queries on reference tables -- SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_reference_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; ERROR: cannot push down this subquery DETAIL: Reference tables are not supported with union operator DROP TABLE user_buy_test_table; DROP TABLE users_ref_test_table; DROP TABLE users_return_test_table; citus-7.0.3/src/test/regress/expected/multi_subquery_in_where_clause.out000066400000000000000000000346171317107136600267470ustar00rootroot00000000000000-- -- multi subquery in where queries aims to expand existing subquery pushdown -- regression tests to cover more cases specifically subqueries in WHERE clause -- the tables that are used depends to multi_insert_select_behavioral_analytics_create_table.sql -- -- We don't need shard id sequence here, so commented out to prevent conflicts with concurrent tests -- subqueries in WHERE with greater operator SELECT user_id FROM users_table WHERE value_2 > (SELECT max(value_2) FROM events_table WHERE users_table.user_id = events_table.user_id AND event_type = 50 GROUP BY user_id ) GROUP BY user_id HAVING count(*) > 66 ORDER BY user_id LIMIT 5; user_id --------- 49 55 56 63 (4 rows) -- subqueries in where with ALL operator SELECT user_id FROM users_table WHERE value_2 > 545 AND value_2 < ALL (SELECT avg(value_3) FROM events_table WHERE users_table.user_id = events_table.user_id GROUP BY user_id) GROUP BY 1 ORDER BY 1 DESC LIMIT 3; user_id --------- 69 52 12 (3 rows) -- IN operator on non-partition key SELECT user_id FROM events_table as e1 WHERE event_type IN (SELECT event_type FROM events_table as e2 WHERE value_2 = 15 AND value_3 > 25 AND e1.user_id = e2.user_id ) ORDER BY 1; user_id --------- 8 17 33 47 54 54 56 71 79 86 (10 rows) -- NOT IN on non-partition key SELECT user_id FROM events_table as e1 WHERE event_type NOT IN (SELECT event_type FROM events_table as e2 WHERE value_2 = 15 AND value_3 > 25 AND e1.user_id = e2.user_id ) GROUP BY 1 HAVING count(*) > 122 ORDER BY 1; user_id --------- 23 25 (2 rows) -- non-correlated query with =ANY on partition keys SELECT user_id, count(*) FROM users_table WHERE user_id =ANY(SELECT user_id FROM users_table WHERE value_1 >= 10 AND value_1 <= 20) GROUP BY 1 ORDER BY 2 DESC LIMIT 5; user_id | count ---------+------- 12 | 121 87 | 117 37 | 115 23 | 115 46 | 115 (5 rows) -- users that appeared more than 118 times SELECT user_id FROM users_table WHERE 118 <= (SELECT count(*) FROM events_table WHERE users_table.user_id = events_table.user_id GROUP BY user_id) GROUP BY user_id ORDER BY user_id; user_id --------- 13 17 23 25 (4 rows) -- the following query doesn't have a meaningful result -- but it is a valid query with an arbitrary subquery in -- WHERE clause SELECT user_id, value_2 FROM users_table WHERE value_1 > 101 AND value_1 < 110 AND value_2 >= 5 AND user_id IN ( SELECT e1.user_id FROM ( -- Get the first time each user viewed the homepage. SELECT user_id, 1 AS view_homepage, min(time) AS view_homepage_time FROM events_table WHERE event_type IN (10, 20, 30, 40, 50, 60, 70, 80, 90) GROUP BY user_id ) e1 LEFT JOIN LATERAL ( SELECT user_id, 1 AS use_demo, time AS use_demo_time FROM events_table WHERE user_id = e1.user_id AND event_type IN (11, 21, 31, 41, 51, 61, 71, 81, 91) ORDER BY time ) e2 ON true LEFT JOIN LATERAL ( SELECT user_id, 1 AS enter_credit_card, time AS enter_credit_card_time FROM events_table WHERE user_id = e2.user_id AND event_type IN (12, 22, 32, 42, 52, 62, 72, 82, 92) ORDER BY time ) e3 ON true LEFT JOIN LATERAL ( SELECT 1 AS submit_card_info, user_id, time AS enter_credit_card_time FROM events_table WHERE user_id = e3.user_id AND event_type IN (13, 23, 33, 43, 53, 63, 73, 83, 93) ORDER BY time ) e4 ON true LEFT JOIN LATERAL ( SELECT 1 AS see_bought_screen FROM events_table WHERE user_id = e4.user_id AND event_type IN (14, 24, 34, 44, 54, 64, 74, 84, 94) ORDER BY time ) e5 ON true group by e1.user_id HAVING sum(submit_card_info) > 0 ) ORDER BY 1, 2; user_id | value_2 ---------+--------- 5 | 884 42 | 55 42 | 471 51 | 758 72 | 897 82 | 691 95 | 951 (7 rows) -- similar to the above query -- the following query doesn't have a meaningful result -- but it is a valid query with an arbitrary subquery in -- WHERE clause SELECT user_id FROM users_table WHERE user_id IN ( SELECT user_id FROM ( SELECT subquery_1.user_id, count_pay FROM ( (SELECT users_table.user_id, 'action=>1' AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 10 AND events_table.event_type < 12 ) UNION (SELECT users_table.user_id, 'action=>2' AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 12 AND events_table.event_type < 14 ) ) AS subquery_1 LEFT JOIN (SELECT user_id, COUNT(*) AS count_pay FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 15 AND users_table.value_1 < 17 GROUP BY user_id HAVING COUNT(*) > 1) AS subquery_2 ON subquery_1.user_id = subquery_2.user_id GROUP BY subquery_1.user_id, count_pay) AS subquery_top GROUP BY count_pay, user_id ) GROUP BY user_id HAVING count(*) > 3 AND sum(value_2) > 49000 ORDER BY 1; user_id --------- 18 29 40 49 58 69 (6 rows) -- the following query doesn't have a meaningful result -- but it is a valid query with an arbitrary subquery in -- FROM clause involving a complex query in WHERE clause SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id IN ( SELECT user_id FROM users_table WHERE value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type <= 300 AND value_3 > 100 AND user_id = users_table.user_id) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 300 AND event_type <= 350 AND value_3 > 100 AND user_id = users_table.user_id) ) ) t GROUP BY user_id ) q ORDER BY 2 DESC, 1; user_id | array_length ---------+-------------- 96 | 12204 8 | 8170 (2 rows) -- -- below tests only aims for cases where all relations -- are not joined on partition key -- -- e4 is not joined on the partition key SELECT user_id, value_2 FROM users_table WHERE value_1 > 101 AND value_1 < 110 AND value_2 >= 5 AND user_id IN ( SELECT e1.user_id FROM ( -- Get the first time each user viewed the homepage. SELECT user_id, 1 AS view_homepage, min(time) AS view_homepage_time FROM events_table WHERE event_type IN (10, 20, 30, 40, 50, 60, 70, 80, 90) GROUP BY user_id ) e1 LEFT JOIN LATERAL ( SELECT user_id, 1 AS use_demo, time AS use_demo_time FROM events_table WHERE user_id = e1.user_id AND event_type IN (11, 21, 31, 41, 51, 61, 71, 81, 91) ORDER BY time ) e2 ON true LEFT JOIN LATERAL ( SELECT user_id, 1 AS enter_credit_card, time AS enter_credit_card_time FROM events_table WHERE user_id = e2.user_id AND event_type IN (12, 22, 32, 42, 52, 62, 72, 82, 92) ORDER BY time ) e3 ON true LEFT JOIN LATERAL ( SELECT 1 AS submit_card_info, user_id, time AS enter_credit_card_time FROM events_table WHERE value_2 = e3.user_id AND event_type IN (13, 23, 33, 43, 53, 63, 73, 83, 93) ORDER BY time ) e4 ON true LEFT JOIN LATERAL ( SELECT 1 AS see_bought_screen FROM events_table WHERE user_id = e4.user_id AND event_type IN (14, 24, 34, 44, 54, 64, 74, 84, 94) ORDER BY time ) e5 ON true group by e1.user_id HAVING sum(submit_card_info) > 0 ); ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- left leaf query does not return partition key SELECT user_id FROM users_table WHERE user_id IN ( SELECT user_id FROM ( SELECT subquery_1.user_id, count_pay FROM ( (SELECT 2 * users_table.user_id as user_id, 'action=>1' AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 10 AND events_table.event_type < 12 ) UNION (SELECT users_table.user_id, 'action=>2' AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 12 AND events_table.event_type < 14 ) ) AS subquery_1 LEFT JOIN (SELECT user_id, COUNT(*) AS count_pay FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 15 AND users_table.value_1 < 17 GROUP BY user_id HAVING COUNT(*) > 1) AS subquery_2 ON subquery_1.user_id = subquery_2.user_id GROUP BY subquery_1.user_id, count_pay) AS subquery_top GROUP BY count_pay, user_id ) GROUP BY user_id HAVING count(*) > 3 AND sum(value_2) > 49000 ORDER BY 1; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- NOT EXISTS query has non-equi join SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id IN ( SELECT user_id FROM users_table WHERE value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type <= 300 AND value_3 > 100 AND user_id = users_table.user_id) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 300 AND event_type <= 350 AND value_3 > 100 AND user_id != users_table.user_id) ) ) t GROUP BY user_id ) q ORDER BY 2 DESC, 1; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- subquery in where clause doesn't have a relation SELECT user_id FROM users_table WHERE value_2 > (SELECT 1); ERROR: cannot push down this subquery DETAIL: Subqueries without relations are unsupported -- OFFSET is not supported in the subquey SELECT user_id FROM users_table WHERE value_2 > (SELECT max(value_2) FROM events_table WHERE users_table.user_id = events_table.user_id AND event_type = 50 GROUP BY user_id OFFSET 3 ); ERROR: cannot push down this subquery DETAIL: Offset clause is currently unsupported -- we can detect unsupported subquerues even if they appear -- in WHERE subquery -> FROM subquery -> WHERE subquery SELECT user_id FROM users_table WHERE user_id IN (SELECT f_inner.user_id FROM ( SELECT e1.user_id FROM users_table u1, events_table e1 WHERE e1.user_id = u1.user_id ) as f_inner, ( SELECT e1.user_id FROM users_table u1, events_table e1 WHERE e1.user_id = u1.user_id AND e1.user_id IN (SELECT user_id FROM users_table LIMIT 3 ) ) as f_outer WHERE f_inner.user_id = f_outer.user_id ); ERROR: cannot push down this subquery DETAIL: Limit in subquery is currently unsupported -- semi join is not on the partition key for the third subquery SELECT user_id FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 10 AND value_1 <= 20) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 30 AND value_1 <= 40) AND value_2 IN (SELECT user_id FROM users_table WHERE value_1 >= 50 AND value_1 <= 60); ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. CREATE FUNCTION test_join_function(integer, integer) RETURNS bool AS 'select $1 > $2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- we disallow JOINs via functions SELECT user_id, value_2 FROM users_table WHERE value_1 = 101 AND value_2 >= 5 AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type=101 AND value_3 > 100 AND test_join_function(events_table.user_id, users_table.user_id)) ORDER BY 1 DESC, 2 DESC LIMIT 3; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. DROP FUNCTION test_join_function(int,int); citus-7.0.3/src/test/regress/expected/multi_subquery_in_where_reference_clause.out000066400000000000000000000122011317107136600307460ustar00rootroot00000000000000-- -- queries to test the subquery pushdown on reference tables -- subqueries in WHERE with greater operator SELECT user_id FROM users_table WHERE value_2 > (SELECT max(value_2) FROM events_reference_table WHERE users_table.user_id = events_reference_table.user_id AND event_type = 50 GROUP BY user_id ) GROUP BY user_id HAVING count(*) > 66 ORDER BY user_id LIMIT 5; user_id --------- 49 55 56 63 (4 rows) -- subqueries in WHERE with IN operator SELECT user_id FROM users_table WHERE value_2 IN (SELECT value_2 FROM events_reference_table WHERE users_table.user_id = events_reference_table.user_id ) GROUP BY user_id ORDER BY user_id LIMIT 3; user_id --------- 0 1 2 (3 rows) -- subqueries in WHERE with NOT EXISTS operator, should work since -- reference table in the inner part of the join SELECT user_id FROM users_table WHERE NOT EXISTS (SELECT value_2 FROM events_reference_table WHERE users_table.user_id = events_reference_table.user_id ) GROUP BY user_id ORDER BY user_id LIMIT 3; user_id --------- (0 rows) -- subqueries in WHERE with NOT EXISTS operator, should not work -- there is a reference table in the outer part of the join SELECT user_id FROM users_reference_table WHERE NOT EXISTS (SELECT value_2 FROM events_table WHERE users_reference_table.user_id = events_table.user_id ) LIMIT 3; ERROR: cannot pushdown the subquery DETAIL: There exist a reference table in the outer part of the outer join -- subqueries in WHERE with IN operator without equality SELECT user_id FROM users_table WHERE value_2 IN (SELECT value_2 FROM events_reference_table WHERE users_table.user_id > events_reference_table.user_id ) GROUP BY user_id ORDER BY user_id LIMIT 3; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- have reference table without any equality, should error out SELECT user_id FROM users_table WHERE value_2 > (SELECT max(value_2) FROM events_reference_table WHERE event_type = 50 GROUP BY user_id ) GROUP BY user_id HAVING count(*) > 66 ORDER BY user_id LIMIT 5; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- users that appeared more than 118 times, should run since the reference table -- on the right side of the semi join SELECT user_id FROM users_table WHERE 118 <= (SELECT count(*) FROM events_reference_table WHERE users_table.user_id = events_reference_table.user_id GROUP BY user_id) GROUP BY user_id ORDER BY user_id; user_id --------- 13 17 23 25 (4 rows) -- should error out since reference table exist on the left side -- of the left lateral join SELECT user_id, value_2 FROM users_table WHERE value_1 > 101 AND value_1 < 110 AND value_2 >= 5 AND user_id IN ( SELECT e1.user_id FROM ( -- Get the first time each user viewed the homepage. SELECT user_id, 1 AS view_homepage, min(time) AS view_homepage_time FROM events_reference_table WHERE event_type IN (10, 20, 30, 40, 50, 60, 70, 80, 90) GROUP BY user_id ) e1 LEFT JOIN LATERAL ( SELECT user_id, 1 AS use_demo, time AS use_demo_time FROM events_reference_table WHERE user_id = e1.user_id AND event_type IN (11, 21, 31, 41, 51, 61, 71, 81, 91) ORDER BY time ) e2 ON true LEFT JOIN LATERAL ( SELECT user_id, 1 AS enter_credit_card, time AS enter_credit_card_time FROM events_reference_table WHERE user_id = e2.user_id AND event_type IN (12, 22, 32, 42, 52, 62, 72, 82, 92) ORDER BY time ) e3 ON true LEFT JOIN LATERAL ( SELECT 1 AS submit_card_info, user_id, time AS enter_credit_card_time FROM events_reference_table WHERE user_id = e3.user_id AND event_type IN (13, 23, 33, 43, 53, 63, 73, 83, 93) ORDER BY time ) e4 ON true LEFT JOIN LATERAL ( SELECT 1 AS see_bought_screen FROM events_reference_table WHERE user_id = e4.user_id AND event_type IN (14, 24, 34, 44, 54, 64, 74, 84, 94) ORDER BY time ) e5 ON true group by e1.user_id HAVING sum(submit_card_info) > 0 ) ORDER BY 1, 2; ERROR: cannot pushdown the subquery DETAIL: There exist a reference table in the outer part of the outer join citus-7.0.3/src/test/regress/expected/multi_subquery_misc.out000066400000000000000000000231011317107136600245300ustar00rootroot00000000000000-- multi subquery pushdown misc aims to test subquery pushdown queries with -- (i) Prepared statements -- (ii) PL/PGSQL functions -- (iii) SQL functions -- the tables that are used depends to multi_insert_select_behavioral_analytics_create_table.sql -- We don't need shard id sequence here, so commented out to prevent conflicts with concurrent tests SET citus.enable_router_execution TO false; PREPARE prepared_subquery_1 AS SELECT user_id, user_lastseen, array_length(event_array, 1) FROM ( SELECT user_id, max(u.time) as user_lastseen, array_agg(event_type ORDER BY u.time) AS event_array FROM ( SELECT user_id, time FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 10 AND users_table.value_1 < 12 ) u LEFT JOIN LATERAL ( SELECT event_type, time FROM events_table WHERE user_id = u.user_id AND events_table.event_type > 10 AND events_table.event_type < 12 ) t ON true GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC, user_id; EXECUTE prepared_subquery_1; user_id | user_lastseen | array_length ---------+---------------------------------+-------------- 12 | Sun Jan 19 01:49:20.372688 2014 | 1 20 | Sat Jan 18 14:25:31.817903 2014 | 1 42 | Thu Jan 16 07:08:02.651966 2014 | 1 56 | Tue Jan 14 12:11:47.27375 2014 | 1 57 | Mon Jan 13 14:53:50.494836 2014 | 1 65 | Sun Jan 12 03:14:26.810597 2014 | 1 (6 rows) PREPARE prepared_subquery_2(int, int) AS SELECT user_id, user_lastseen, array_length(event_array, 1) FROM ( SELECT user_id, max(u.time) as user_lastseen, array_agg(event_type ORDER BY u.time) AS event_array FROM ( SELECT user_id, time FROM users_table WHERE user_id >= $1 AND user_id <= $2 AND users_table.value_1 > 10 AND users_table.value_1 < 12 ) u LEFT JOIN LATERAL ( SELECT event_type, time FROM events_table WHERE user_id = u.user_id AND events_table.event_type > 10 AND events_table.event_type < 12 ) t ON true GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC, user_id; -- should be fine with more than five executions EXECUTE prepared_subquery_2(10, 70); user_id | user_lastseen | array_length ---------+---------------------------------+-------------- 12 | Sun Jan 19 01:49:20.372688 2014 | 1 20 | Sat Jan 18 14:25:31.817903 2014 | 1 42 | Thu Jan 16 07:08:02.651966 2014 | 1 56 | Tue Jan 14 12:11:47.27375 2014 | 1 57 | Mon Jan 13 14:53:50.494836 2014 | 1 65 | Sun Jan 12 03:14:26.810597 2014 | 1 (6 rows) EXECUTE prepared_subquery_2(10, 70); user_id | user_lastseen | array_length ---------+---------------------------------+-------------- 12 | Sun Jan 19 01:49:20.372688 2014 | 1 20 | Sat Jan 18 14:25:31.817903 2014 | 1 42 | Thu Jan 16 07:08:02.651966 2014 | 1 56 | Tue Jan 14 12:11:47.27375 2014 | 1 57 | Mon Jan 13 14:53:50.494836 2014 | 1 65 | Sun Jan 12 03:14:26.810597 2014 | 1 (6 rows) EXECUTE prepared_subquery_2(10, 70); user_id | user_lastseen | array_length ---------+---------------------------------+-------------- 12 | Sun Jan 19 01:49:20.372688 2014 | 1 20 | Sat Jan 18 14:25:31.817903 2014 | 1 42 | Thu Jan 16 07:08:02.651966 2014 | 1 56 | Tue Jan 14 12:11:47.27375 2014 | 1 57 | Mon Jan 13 14:53:50.494836 2014 | 1 65 | Sun Jan 12 03:14:26.810597 2014 | 1 (6 rows) EXECUTE prepared_subquery_2(10, 70); user_id | user_lastseen | array_length ---------+---------------------------------+-------------- 12 | Sun Jan 19 01:49:20.372688 2014 | 1 20 | Sat Jan 18 14:25:31.817903 2014 | 1 42 | Thu Jan 16 07:08:02.651966 2014 | 1 56 | Tue Jan 14 12:11:47.27375 2014 | 1 57 | Mon Jan 13 14:53:50.494836 2014 | 1 65 | Sun Jan 12 03:14:26.810597 2014 | 1 (6 rows) EXECUTE prepared_subquery_2(10, 70); user_id | user_lastseen | array_length ---------+---------------------------------+-------------- 12 | Sun Jan 19 01:49:20.372688 2014 | 1 20 | Sat Jan 18 14:25:31.817903 2014 | 1 42 | Thu Jan 16 07:08:02.651966 2014 | 1 56 | Tue Jan 14 12:11:47.27375 2014 | 1 57 | Mon Jan 13 14:53:50.494836 2014 | 1 65 | Sun Jan 12 03:14:26.810597 2014 | 1 (6 rows) EXECUTE prepared_subquery_2(10, 70); user_id | user_lastseen | array_length ---------+---------------------------------+-------------- 12 | Sun Jan 19 01:49:20.372688 2014 | 1 20 | Sat Jan 18 14:25:31.817903 2014 | 1 42 | Thu Jan 16 07:08:02.651966 2014 | 1 56 | Tue Jan 14 12:11:47.27375 2014 | 1 57 | Mon Jan 13 14:53:50.494836 2014 | 1 65 | Sun Jan 12 03:14:26.810597 2014 | 1 (6 rows) EXECUTE prepared_subquery_2(10, 70); user_id | user_lastseen | array_length ---------+---------------------------------+-------------- 12 | Sun Jan 19 01:49:20.372688 2014 | 1 20 | Sat Jan 18 14:25:31.817903 2014 | 1 42 | Thu Jan 16 07:08:02.651966 2014 | 1 56 | Tue Jan 14 12:11:47.27375 2014 | 1 57 | Mon Jan 13 14:53:50.494836 2014 | 1 65 | Sun Jan 12 03:14:26.810597 2014 | 1 (6 rows) -- prepared statements with subqueries in WHERE clause PREPARE prepared_subquery_3(int, int, int, int, int, int) AS SELECT user_id FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= $4 AND value_1 <= $3) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= $5 AND value_1 <= $6) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= $1 AND value_1 <= $2) GROUP BY user_id ORDER BY user_id DESC LIMIT 5; -- enough times (6+) to actually use prepared statements EXECUTE prepared_subquery_3(50, 60, 20, 10, 30, 40); user_id --------- 93 90 88 87 84 (5 rows) EXECUTE prepared_subquery_3(50, 60, 20, 10, 30, 40); user_id --------- 93 90 88 87 84 (5 rows) EXECUTE prepared_subquery_3(50, 60, 20, 10, 30, 40); user_id --------- 93 90 88 87 84 (5 rows) EXECUTE prepared_subquery_3(50, 60, 20, 10, 30, 40); user_id --------- 93 90 88 87 84 (5 rows) EXECUTE prepared_subquery_3(50, 60, 20, 10, 30, 40); user_id --------- 93 90 88 87 84 (5 rows) EXECUTE prepared_subquery_3(50, 60, 20, 10, 30, 40); user_id --------- 93 90 88 87 84 (5 rows) CREATE FUNCTION plpgsql_subquery_test(int, int) RETURNS TABLE(count bigint) AS $$ DECLARE BEGIN RETURN QUERY SELECT count(*) FROM users_table JOIN (SELECT ma.user_id, (GREATEST(coalesce(ma.value_4 / 250, 0.0) + GREATEST(1.0))) / 2 AS prob FROM users_table AS ma, events_table as short_list WHERE short_list.user_id = ma.user_id and ma.value_1 < $1 and short_list.event_type < 50 ) temp ON users_table.user_id = temp.user_id WHERE users_table.value_1 < $2; END; $$ LANGUAGE plpgsql; -- enough times (6+) to actually use prepared statements SELECT plpgsql_subquery_test(10, 20); plpgsql_subquery_test ----------------------- 1500 (1 row) SELECT plpgsql_subquery_test(10, 20); plpgsql_subquery_test ----------------------- 1500 (1 row) SELECT plpgsql_subquery_test(10, 20); plpgsql_subquery_test ----------------------- 1500 (1 row) SELECT plpgsql_subquery_test(10, 20); plpgsql_subquery_test ----------------------- 1500 (1 row) SELECT plpgsql_subquery_test(10, 20); plpgsql_subquery_test ----------------------- 1500 (1 row) SELECT plpgsql_subquery_test(10, 20); plpgsql_subquery_test ----------------------- 1500 (1 row) -- this should also work, but should return 0 given that int = NULL is always returns false SELECT plpgsql_subquery_test(10, NULL); plpgsql_subquery_test ----------------------- 0 (1 row) CREATE FUNCTION sql_subquery_test(int, int) RETURNS bigint AS $$ SELECT count(*) FROM users_table JOIN (SELECT ma.user_id, (GREATEST(coalesce(ma.value_4 / 250, 0.0) + GREATEST(1.0))) / 2 AS prob FROM users_table AS ma, events_table as short_list WHERE short_list.user_id = ma.user_id and ma.value_1 < $1 and short_list.event_type < 50 ) temp ON users_table.user_id = temp.user_id WHERE users_table.value_1 < $2; $$ LANGUAGE SQL; -- should error out SELECT sql_subquery_test(5,5); ERROR: could not create distributed plan DETAIL: Possibly this is caused by the use of parameters in SQL functions, which is not supported in Citus. HINT: Consider using PL/pgSQL functions instead. CONTEXT: SQL function "sql_subquery_test" statement 1 DROP FUNCTION plpgsql_subquery_test(int, int); DROP FUNCTION sql_subquery_test(int, int); citus-7.0.3/src/test/regress/expected/multi_subquery_union.out000066400000000000000000000773771317107136600247550ustar00rootroot00000000000000-- -- multi subquery toplevel union queries aims to expand existing subquery pushdown -- regression tests to cover more cases -- the tables that are used depends to multi_insert_select_behavioral_analytics_create_table.sql -- We don't need shard id sequence here, so commented out to prevent conflicts with concurrent tests -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1400000; SET citus.enable_router_execution TO false; -- a very simple union query SELECT user_id, counter FROM ( SELECT user_id, value_2 % 10 AS counter FROM events_table WHERE event_type IN (1, 2, 3, 4, 5) UNION SELECT user_id, value_2 % 10 AS counter FROM events_table WHERE event_type IN (5, 6, 7, 8, 9, 10) ) user_id ORDER BY 2 DESC,1 LIMIT 5; user_id | counter ---------+--------- 7 | 9 8 | 9 15 | 9 16 | 9 20 | 9 (5 rows) -- a very simple union query with reference table SELECT user_id, counter FROM ( SELECT user_id, value_2 % 10 AS counter FROM events_table WHERE event_type IN (1, 2, 3, 4, 5) UNION SELECT user_id, value_2 % 10 AS counter FROM events_reference_table WHERE event_type IN (5, 6, 7, 8, 9, 10) ) user_id ORDER BY 2 DESC,1 LIMIT 5; ERROR: cannot pushdown this query DETAIL: Reference tables are not allowed with set operations -- the same query with union all SELECT user_id, counter FROM ( SELECT user_id, value_2 % 10 AS counter FROM events_table WHERE event_type IN (1, 2, 3, 4, 5) UNION ALL SELECT user_id, value_2 % 10 AS counter FROM events_table WHERE event_type IN (5, 6, 7, 8, 9, 10) ) user_id ORDER BY 2 DESC,1 LIMIT 5; user_id | counter ---------+--------- 7 | 9 7 | 9 8 | 9 15 | 9 15 | 9 (5 rows) -- the same query with union all and reference table SELECT user_id, counter FROM ( SELECT user_id, value_2 % 10 AS counter FROM events_table WHERE event_type IN (1, 2, 3, 4, 5) UNION ALL SELECT user_id, value_2 % 10 AS counter FROM events_reference_table WHERE event_type IN (5, 6, 7, 8, 9, 10) ) user_id ORDER BY 2 DESC,1 LIMIT 5; ERROR: cannot pushdown this query DETAIL: Reference tables are not allowed with set operations -- the same query with group by SELECT user_id, sum(counter) FROM ( SELECT user_id, value_2 % 10 AS counter FROM events_table WHERE event_type IN (1, 2, 3, 4, 5) UNION SELECT user_id, value_2 % 10 AS counter FROM events_table WHERE event_type IN (5, 6, 7, 8, 9, 10) ) user_id GROUP BY 1 ORDER BY 2 DESC,1 LIMIT 5; user_id | sum ---------+----- 49 | 22 15 | 19 26 | 17 48 | 17 61 | 17 (5 rows) -- the same query with UNION ALL clause SELECT user_id, sum(counter) FROM ( SELECT user_id, value_2 % 10 AS counter FROM events_table WHERE event_type IN (1, 2, 3, 4, 5) UNION ALL SELECT user_id, value_2 % 10 AS counter FROM events_table WHERE event_type IN (5, 6, 7, 8, 9, 10) ) user_id GROUP BY 1 ORDER BY 2 DESC,1 LIMIT 5; user_id | sum ---------+----- 48 | 35 61 | 30 15 | 28 49 | 25 80 | 24 (5 rows) -- the same query target list entries shuffled SELECT user_id, sum(counter) FROM ( SELECT value_2 % 10 AS counter, user_id FROM events_table WHERE event_type IN (1, 2, 3, 4, 5) UNION SELECT value_2 % 10 AS counter, user_id FROM events_table WHERE event_type IN (5, 6, 7, 8, 9, 10) ) user_id GROUP BY 1 ORDER BY 2 DESC,1 LIMIT 5; user_id | sum ---------+----- 49 | 22 15 | 19 26 | 17 48 | 17 61 | 17 (5 rows) -- same query with GROUP BY SELECT user_id, sum(counter) FROM ( SELECT user_id, value_2 AS counter FROM events_table WHERE event_type IN (1, 2) UNION SELECT user_id, value_2 AS counter FROM events_table WHERE event_type IN (5, 6) ) user_id GROUP BY user_id --HAVING sum(counter) > 900 ORDER BY 1,2 DESC LIMIT 5; user_id | sum ---------+------ 1 | 518 2 | 637 4 | 343 6 | 354 7 | 1374 (5 rows) -- the same query target list entries shuffled but this time the subqueries target list -- is shuffled SELECT user_id, sum(counter) FROM ( SELECT value_2 AS counter, user_id FROM events_table WHERE event_type IN (1, 2) UNION SELECT value_2 AS counter, user_id FROM events_table WHERE event_type IN (5, 6) ) user_id GROUP BY user_id --HAVING sum(counter) > 900 ORDER BY 1,2 DESC LIMIT 5; user_id | sum ---------+------ 1 | 518 2 | 637 4 | 343 6 | 354 7 | 1374 (5 rows) -- similar query this time more subqueries and target list contains a resjunk entry SELECT sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 20 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 40 and value_1 < 60 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 60 and value_1 < 80 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 80 and value_1 < 100 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 100 and value_1 < 120 GROUP BY user_id HAVING sum(value_2) > 500 ) user_id GROUP BY user_id ORDER BY 1 DESC LIMIT 5; sum ------- 27772 25720 24993 24968 23508 (5 rows) -- similar query this time more subqueries with reference table and target list contains a resjunk entry SELECT sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 20 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 40 and value_1 < 60 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_reference_table where value_1 < 60 and value_1 < 80 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 80 and value_1 < 100 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 100 and value_1 < 120 GROUP BY user_id HAVING sum(value_2) > 500 ) user_id GROUP BY user_id ORDER BY 1 DESC LIMIT 5; ERROR: cannot pushdown this query DETAIL: Reference tables are not allowed with set operations -- similar query as above, with UNION ALL SELECT sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 20 GROUP BY user_id HAVING sum(value_2) > 5000 UNION ALL SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 40 and value_1 < 60 GROUP BY user_id HAVING sum(value_2) > 500 UNION ALL SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 60 and value_1 < 80 GROUP BY user_id HAVING sum(value_2) > 500 UNION ALL SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 80 and value_1 < 100 GROUP BY user_id HAVING sum(value_2) > 500 UNION ALL SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 100 and value_1 < 120 GROUP BY user_id HAVING sum(value_2) > 500 ) user_id GROUP BY user_id ORDER BY 1 DESC LIMIT 5; sum ------- 27667 25080 24814 24365 23508 (5 rows) -- unions within unions SELECT * FROM ( ( SELECT user_id, sum(counter) FROM (SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id UNION SELECT user_id, sum(value_2) AS counter FROM events_table GROUP BY user_id) user_id_1 GROUP BY user_id) UNION (SELECT user_id, sum(counter) FROM (SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id UNION SELECT user_id, sum(value_2) AS counter FROM events_table GROUP BY user_id) user_id_2 GROUP BY user_id)) AS ftop ORDER BY 2 DESC, 1 DESC LIMIT 5; user_id | sum ---------+-------- 23 | 126017 45 | 117323 25 | 116595 17 | 116520 90 | 115843 (5 rows) -- unions within unions with reference table SELECT * FROM ( ( SELECT user_id, sum(counter) FROM (SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id UNION SELECT user_id, sum(value_2) AS counter FROM events_reference_table GROUP BY user_id) user_id_1 GROUP BY user_id) UNION (SELECT user_id, sum(counter) FROM (SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id UNION SELECT user_id, sum(value_2) AS counter FROM events_table GROUP BY user_id) user_id_2 GROUP BY user_id)) AS ftop ORDER BY 2 DESC, 1 DESC LIMIT 5; ERROR: cannot pushdown this query DETAIL: Reference tables are not allowed with set operations -- top level unions are wrapped into top level aggregations SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15)) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" ) as final_query GROUP BY types ORDER BY types; types | sumofeventtype -------+---------------- 0 | 55 1 | 38 2 | 70 3 | 58 (4 rows) -- exactly the same query -- but wrapper unions are removed from the inner part of the query SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM (SELECT *, random() FROM (SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM (SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM( (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15)) UNION (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) UNION (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) UNION (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13))) t1 GROUP BY "t1"."user_id") AS t) "q" ) as final_query GROUP BY types ORDER BY types; types | sumofeventtype -------+---------------- 0 | 55 1 | 38 2 | 70 3 | 58 (4 rows) -- again excatly the same query with top level wrapper removed SELECT ("q"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15)) UNION (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) UNION (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) UNION (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13))) t1 GROUP BY "t1"."user_id") AS t) "q" GROUP BY types ORDER BY types; types | sumofeventtype -------+---------------- 0 | 55 1 | 38 2 | 70 3 | 58 (4 rows) -- again same query but with only two top level empty queries (i.e., no group bys) SELECT * FROM ( SELECT * FROM ( SELECT "t1"."user_id" FROM ( (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15)) UNION (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) UNION (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) UNION (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13))) t1 ) AS t) "q" ORDER BY 1 LIMIT 5; user_id --------- 0 0 0 1 1 (5 rows) -- a very similar query UNION ALL SELECT ("q"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15)) UNION ALL (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) UNION ALL (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) UNION ALL (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13))) t1 GROUP BY "t1"."user_id") AS t) "q" GROUP BY types ORDER BY types; types | sumofeventtype -------+---------------- 0 | 55 1 | 38 2 | 70 3 | 58 (4 rows) -- some UNION ALL queries that are going to be pulled up SELECT count(*) FROM ( (SELECT user_id FROM users_table) UNION ALL (SELECT user_id FROM events_table) ) b; count ------- 20002 (1 row) -- some UNION ALL queries that are going to be pulled up with reference table SELECT count(*) FROM ( (SELECT user_id FROM users_table) UNION ALL (SELECT user_id FROM events_reference_table) ) b; ERROR: cannot pushdown this query DETAIL: Reference tables are not allowed with set operations -- similar query without top level agg SELECT user_id FROM ( (SELECT user_id FROM users_table) UNION ALL (SELECT user_id FROM events_table) ) b ORDER BY 1 DESC LIMIT 5; user_id --------- 100 100 100 100 100 (5 rows) -- similar query with multiple target list entries SELECT user_id, value_3 FROM ( (SELECT value_3, user_id FROM users_table) UNION ALL (SELECT value_3, user_id FROM events_table) ) b ORDER BY 1 DESC, 2 DESC LIMIT 5; user_id | value_3 ---------+--------- 100 | 999 100 | 997 100 | 991 100 | 989 100 | 988 (5 rows) -- similar query group by inside the subqueries SELECT user_id, value_3_sum FROM ( (SELECT sum(value_3) as value_3_sum, user_id FROM users_table GROUP BY user_id) UNION ALL (SELECT sum(value_3) as value_3_sum, user_id FROM users_table GROUP BY user_id) ) b ORDER BY 2 DESC, 1 DESC LIMIT 5; user_id | value_3_sum ---------+------------- 10 | 64060 10 | 64060 62 | 62445 62 | 62445 26 | 60536 (5 rows) -- similar query top level group by SELECT user_id, sum(value_3) FROM ( (SELECT value_3, user_id FROM users_table) UNION ALL (SELECT value_3, user_id FROM events_table) ) b GROUP BY 1 ORDER BY 2 DESC, 1 DESC LIMIT 5; user_id | sum ---------+-------- 23 | 123923 25 | 118087 69 | 115828 26 | 114705 3 | 113915 (5 rows) -- a long set operation list SELECT user_id, value_3 FROM ( (SELECT value_3, user_id FROM events_table where event_type IN (1, 2, 3, 4, 5)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (6, 7, 8, 9, 10)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (11, 12, 13, 14, 15)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (16, 17, 18, 19, 20)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (21, 22, 23, 24, 25)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (26, 27, 28, 29, 30)) ) b ORDER BY 1 DESC, 2 DESC LIMIT 5; user_id | value_3 ---------+--------- 100 | 951 99 | 558 99 | 14 98 | 987 98 | 577 (5 rows) -- no partition key on the top SELECT max(value_3) FROM ( (SELECT value_3, user_id FROM events_table where event_type IN (1, 2, 3, 4, 5)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (6, 7, 8, 9, 10)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (11, 12, 13, 14, 15)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (16, 17, 18, 19, 20)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (21, 22, 23, 24, 25)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (26, 27, 28, 29, 30)) ) b GROUP BY user_id ORDER BY 1 DESC LIMIT 5; max ----- 997 997 996 995 995 (5 rows) -- now lets also have some unsupported queries -- group by is not on the partition key SELECT user_id, sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM events_table GROUP BY user_id UNION SELECT value_1 as user_id, sum(value_2) AS counter FROM users_table GROUP BY value_1 ) user_id GROUP BY user_id; ERROR: cannot pushdown the subquery since all leaves of the UNION does not include partition key at the same position DETAIL: Each leaf query of the UNION should return partition key at the same position on its target list. -- partition key is not selected SELECT sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 20 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 40 and value_1 < 60 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 60 and value_1 < 80 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 80 and value_1 < 100 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT 2 * user_id, sum(value_2) AS counter FROM users_table where value_1 < 100 and value_1 < 120 GROUP BY user_id HAVING sum(value_2) > 500 ) user_id GROUP BY user_id ORDER BY 1 DESC LIMIT 5; ERROR: cannot pushdown the subquery since all leaves of the UNION does not include partition key at the same position DETAIL: Each leaf query of the UNION should return partition key at the same position on its target list. -- excepts within unions are not supported SELECT * FROM ( ( SELECT user_id, sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id UNION SELECT user_id, sum(value_2) AS counter FROM events_table GROUP BY user_id ) user_id_1 GROUP BY user_id ) UNION ( SELECT user_id, sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id EXCEPT SELECT user_id, sum(value_2) AS counter FROM events_table GROUP BY user_id ) user_id_2 GROUP BY user_id) ) as ftop; ERROR: cannot push down this subquery DETAIL: Intersect and Except are currently unsupported -- joins inside unions are not supported SELECT user_id, sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id UNION SELECT events_table.user_id, sum(events_table.value_2) AS counter FROM events_table, users_table WHERE users_table.user_id > events_table.user_id GROUP BY 1 ) user_id GROUP BY user_id; ERROR: cannot pushdown the subquery since all leaves of the UNION does not include partition key at the same position DETAIL: Each leaf query of the UNION should return partition key at the same position on its target list. -- joins inside unions are not supported -- slightly more comlex than the above SELECT * FROM ( ( SELECT user_id, sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id UNION SELECT user_id, sum(value_2) AS counter FROM events_table GROUP BY user_id ) user_id_1 GROUP BY user_id ) UNION ( SELECT user_id, sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id UNION SELECT events_table.user_id, sum(events_table.value_2) AS counter FROM events_table, users_table WHERE (events_table.user_id = users_table.user_id) GROUP BY events_table.user_id ) user_id_2 GROUP BY user_id) ) as ftop; ERROR: cannot pushdown the subquery since all leaves of the UNION does not include partition key at the same position DETAIL: Each leaf query of the UNION should return partition key at the same position on its target list. -- offset inside the union SELECT user_id, sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM events_table GROUP BY user_id UNION SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id OFFSET 4 ) user_id GROUP BY user_id; ERROR: cannot push down this subquery DETAIL: Offset clause is currently unsupported -- lower level union does not return partition key with the other relations SELECT * FROM ( ( SELECT user_id, sum(counter) FROM (SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id UNION SELECT user_id, sum(value_2) AS counter FROM events_table GROUP BY user_id) user_id_1 GROUP BY user_id) UNION (SELECT user_id, sum(counter) FROM (SELECT sum(value_2) AS counter, user_id FROM users_table GROUP BY user_id UNION SELECT user_id, sum(value_2) AS counter FROM events_table GROUP BY user_id) user_id_2 GROUP BY user_id)) AS ftop; ERROR: cannot pushdown the subquery since all leaves of the UNION does not include partition key at the same position DETAIL: Each leaf query of the UNION should return partition key at the same position on its target list. -- some UNION all queries that are going to be pulled up SELECT count(*) FROM ( (SELECT user_id FROM users_table) UNION ALL (SELECT 2 * user_id FROM events_table) ) b; ERROR: cannot pushdown the subquery since all leaves of the UNION does not include partition key at the same position DETAIL: Each leaf query of the UNION should return partition key at the same position on its target list. -- last query does not have partition key SELECT user_id, value_3 FROM ( (SELECT value_3, user_id FROM events_table where event_type IN (1, 2, 3, 4, 5)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (6, 7, 8, 9, 10)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (11, 12, 13, 14, 15)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (16, 17, 18, 19, 20)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (21, 22, 23, 24, 25)) UNION ALL (SELECT value_3, value_2 FROM events_table where event_type IN (26, 27, 28, 29, 30)) ) b ORDER BY 1 DESC, 2 DESC LIMIT 5; ERROR: cannot pushdown the subquery since all leaves of the UNION does not include partition key at the same position DETAIL: Each leaf query of the UNION should return partition key at the same position on its target list. -- we don't allow joins within unions SELECT count(*) FROM ( (SELECT user_id FROM users_table) UNION ALL (SELECT users_table.user_id FROM events_table, users_table WHERE events_table.user_id = users_table.user_id) ) b; ERROR: cannot pushdown the subquery since all leaves of the UNION does not include partition key at the same position DETAIL: Each leaf query of the UNION should return partition key at the same position on its target list. -- we don't support subqueries without relations SELECT count(*) FROM ( (SELECT user_id FROM users_table) UNION ALL (SELECT 1) ) b; ERROR: cannot push down this subquery DETAIL: Subqueries without relations are unsupported -- we don't support subqueries without relations SELECT * FROM ( (SELECT user_id FROM users_table) UNION ALL (SELECT (random() * 100)::int) ) b; ERROR: cannot push down this subquery DETAIL: Subqueries without relations are unsupported -- we don't support subqueries without relations SELECT user_id, value_3 FROM ( (SELECT value_3, user_id FROM events_table where event_type IN (1, 2, 3, 4, 5)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (6, 7, 8, 9, 10)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (11, 12, 13, 14, 15)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (16, 17, 18, 19, 20)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (21, 22, 23, 24, 25)) UNION ALL (SELECT 1, 2) ) b ORDER BY 1 DESC, 2 DESC LIMIT 5; ERROR: cannot push down this subquery DETAIL: Subqueries without relations are unsupported SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15)) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT 1, now(), 3 AS event) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" ) as final_query GROUP BY types ORDER BY types; ERROR: cannot push down this subquery DETAIL: Subqueries without relations are unsupported SET citus.enable_router_execution TO true; DROP TABLE events_reference_table; DROP TABLE users_reference_table; citus-7.0.3/src/test/regress/expected/multi_subtransactions.out000066400000000000000000000155131317107136600250700ustar00rootroot00000000000000CREATE TABLE artists ( id bigint NOT NULL, name text NOT NULL ); SELECT create_distributed_table('artists', 'id'); create_distributed_table -------------------------- (1 row) -- add some data INSERT INTO artists VALUES (1, 'Pablo Picasso'); INSERT INTO artists VALUES (2, 'Vincent van Gogh'); INSERT INTO artists VALUES (3, 'Claude Monet'); INSERT INTO artists VALUES (4, 'William Kurelek'); -- RELEASE SAVEPOINT BEGIN; INSERT INTO artists VALUES (5, 'Asher Lev'); SAVEPOINT s1; DELETE FROM artists WHERE id=5; RELEASE SAVEPOINT s1; COMMIT; SELECT * FROM artists WHERE id=5; id | name ----+------ (0 rows) -- ROLLBACK TO SAVEPOINT BEGIN; INSERT INTO artists VALUES (5, 'Asher Lev'); SAVEPOINT s1; DELETE FROM artists WHERE id=5; ROLLBACK TO SAVEPOINT s1; COMMIT; SELECT * FROM artists WHERE id=5; id | name ----+----------- 5 | Asher Lev (1 row) -- Serial sub-transaction releases BEGIN; SAVEPOINT s1; DELETE FROM artists WHERE id=5; RELEASE SAVEPOINT s1; SAVEPOINT s2; INSERT INTO artists VALUES (5, 'Jacob Kahn'); RELEASE SAVEPOINT s2; COMMIT; SELECT * FROM artists WHERE id=5; id | name ----+------------ 5 | Jacob Kahn (1 row) -- Serial sub-transaction rollbacks BEGIN; SAVEPOINT s1; UPDATE artists SET name='A' WHERE id=5; ROLLBACK TO SAVEPOINT s1; SAVEPOINT s2; DELETE FROM artists WHERE id=5; ROLLBACK TO SAVEPOINT s2; COMMIT; SELECT * FROM artists WHERE id=5; id | name ----+------------ 5 | Jacob Kahn (1 row) -- Multiple sub-transaction activity before first query BEGIN; SAVEPOINT s0; SAVEPOINT s1; SAVEPOINT s2; SAVEPOINT s3; ROLLBACK TO SAVEPOINT s2; RELEASE SAVEPOINT s1; INSERT INTO artists VALUES (6, 'John J. Audubon'); ROLLBACK TO SAVEPOINT s0; INSERT INTO artists VALUES (6, 'Emily Carr'); COMMIT; SELECT * FROM artists WHERE id=6; id | name ----+------------ 6 | Emily Carr (1 row) -- Release after rollback BEGIN; SAVEPOINT s1; ROLLBACK TO s1; RELEASE SAVEPOINT s1; SAVEPOINT s2; INSERT INTO artists VALUES (7, 'John J. Audubon'); ROLLBACK TO s2; RELEASE SAVEPOINT s2; COMMIT; SELECT * FROM artists WHERE id=7; id | name ----+------ (0 rows) -- Recover from errors \set VERBOSITY terse BEGIN; SAVEPOINT s1; SAVEPOINT s2; INSERT INTO artists VALUES (7, NULL); ERROR: null value in column "name" violates not-null constraint ROLLBACK TO SAVEPOINT s1; COMMIT; -- Don't recover from errors BEGIN; SAVEPOINT s1; SAVEPOINT s2; INSERT INTO artists VALUES (7, NULL); ERROR: null value in column "name" violates not-null constraint SAVEPOINT s3; ERROR: current transaction is aborted, commands ignored until end of transaction block ROLLBACK TO SAVEPOINT s3; ERROR: no such savepoint COMMIT; -- =================================================================== -- Tests for replication factor > 1 -- =================================================================== CREATE TABLE researchers ( id bigint NOT NULL, lab_id int NOT NULL, name text NOT NULL ); SELECT master_create_distributed_table('researchers', 'lab_id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('researchers', 2, 2); master_create_worker_shards ----------------------------- (1 row) -- Basic rollback and release BEGIN; INSERT INTO researchers VALUES (7, 4, 'Jan Plaza'); SAVEPOINT s1; INSERT INTO researchers VALUES (8, 4, 'Alonzo Church'); ROLLBACK TO s1; RELEASE SAVEPOINT s1; COMMIT; SELECT * FROM researchers WHERE id in (7, 8); id | lab_id | name ----+--------+----------- 7 | 4 | Jan Plaza (1 row) -- Recover from failure on one of nodes BEGIN; SAVEPOINT s1; INSERT INTO researchers VALUES (11, 11, 'Dana Scott'); INSERT INTO researchers VALUES (NULL, 10, 'Stephen Kleene'); ERROR: null value in column "id" violates not-null constraint ROLLBACK TO SAVEPOINT s1; INSERT INTO researchers VALUES (12, 10, 'Stephen Kleene'); COMMIT; SELECT * FROM researchers WHERE lab_id=10; id | lab_id | name ----+--------+---------------- 12 | 10 | Stephen Kleene (1 row) -- Don't recover, but rollback BEGIN; SAVEPOINT s1; INSERT INTO researchers VALUES (NULL, 10, 'Raymond Smullyan'); ERROR: null value in column "id" violates not-null constraint RELEASE SAVEPOINT s1; ERROR: current transaction is aborted, commands ignored until end of transaction block SAVEPOINT s2; ERROR: current transaction is aborted, commands ignored until end of transaction block ROLLBACK; SELECT * FROM researchers WHERE lab_id=10; id | lab_id | name ----+--------+---------------- 12 | 10 | Stephen Kleene (1 row) -- Don't recover, and commit BEGIN; SAVEPOINT s1; INSERT INTO researchers VALUES (NULL, 10, 'Raymond Smullyan'); ERROR: null value in column "id" violates not-null constraint RELEASE SAVEPOINT s1; ERROR: current transaction is aborted, commands ignored until end of transaction block SAVEPOINT s2; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; SELECT * FROM researchers WHERE lab_id=10; id | lab_id | name ----+--------+---------------- 12 | 10 | Stephen Kleene (1 row) -- Implicit savepoints via pl/pgsql exceptions BEGIN; DO $$ BEGIN INSERT INTO researchers VALUES (15, 10, 'Melvin Fitting'); INSERT INTO researchers VALUES (NULL, 10, 'Raymond Smullyan'); EXCEPTION WHEN not_null_violation THEN RAISE NOTICE 'caught not_null_violation'; END $$; NOTICE: caught not_null_violation COMMIT; SELECT * FROM researchers WHERE lab_id=10; id | lab_id | name ----+--------+---------------- 12 | 10 | Stephen Kleene (1 row) BEGIN; DO $$ BEGIN INSERT INTO researchers VALUES (15, 10, 'Melvin Fitting'); RAISE EXCEPTION plpgsql_error; EXCEPTION WHEN plpgsql_error THEN RAISE NOTICE 'caught manual plpgsql_error'; END $$; NOTICE: caught manual plpgsql_error COMMIT; SELECT * FROM researchers WHERE lab_id=10; id | lab_id | name ----+--------+---------------- 12 | 10 | Stephen Kleene (1 row) BEGIN; DO $$ BEGIN INSERT INTO researchers VALUES (15, 10, 'Melvin Fitting'); INSERT INTO researchers VALUES (NULL, 10, 'Raymond Smullyan'); EXCEPTION WHEN not_null_violation THEN RAISE EXCEPTION not_null_violation; -- rethrow it END $$; ERROR: not_null_violation COMMIT; SELECT * FROM researchers WHERE lab_id=10; id | lab_id | name ----+--------+---------------- 12 | 10 | Stephen Kleene (1 row) -- Insert something after catching error. BEGIN; DO $$ BEGIN INSERT INTO researchers VALUES (15, 10, 'Melvin Fitting'); INSERT INTO researchers VALUES (NULL, 10, 'Raymond Smullyan'); EXCEPTION WHEN not_null_violation THEN INSERT INTO researchers VALUES (32, 10, 'Raymond Smullyan'); END $$; COMMIT; SELECT * FROM researchers WHERE lab_id=10; id | lab_id | name ----+--------+------------------ 12 | 10 | Stephen Kleene 32 | 10 | Raymond Smullyan (2 rows) -- Clean-up DROP TABLE artists; DROP TABLE researchers; citus-7.0.3/src/test/regress/expected/multi_table_ddl.out000066400000000000000000000114521317107136600235560ustar00rootroot00000000000000-- -- MULTI_TABLE_DDL -- -- Tests around changing the schema and dropping of a distributed table ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 870000; CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); master_create_distributed_table --------------------------------- (1 row) -- verify that the citus extension can't be dropped while distributed tables exist DROP EXTENSION citus; ERROR: cannot drop extension citus because other objects depend on it DETAIL: table testtableddl depends on extension citus HINT: Use DROP ... CASCADE to drop the dependent objects too. -- verify that the distribution column can't have its type changed ALTER TABLE testtableddl ALTER COLUMN distributecol TYPE text; ERROR: cannot execute ALTER TABLE command involving partition column -- verify that the distribution column can't be dropped ALTER TABLE testtableddl DROP COLUMN distributecol; ERROR: cannot execute ALTER TABLE command involving partition column -- verify that the table can be dropped in a transaction block \set VERBOSITY terse BEGIN; DROP TABLE testtableddl; COMMIT; \set VERBOSITY default -- recreate testtableddl CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); master_create_distributed_table --------------------------------- (1 row) -- verify that the table can be dropped DROP TABLE testtableddl; -- verify that the table can dropped even if shards exist CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); -- create table and do create empty shard test here, too SET citus.shard_replication_factor TO 1; SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); master_create_distributed_table --------------------------------- (1 row) SELECT 1 FROM master_create_empty_shard('testtableddl'); ?column? ---------- 1 (1 row) -- now actually drop table and shards DROP TABLE testtableddl; RESET citus.shard_replication_factor; -- ensure no metadata of distributed tables are remaining SELECT * FROM pg_dist_partition; logicalrelid | partmethod | partkey | colocationid | repmodel --------------+------------+---------+--------------+---------- (0 rows) SELECT * FROM pg_dist_shard; logicalrelid | shardid | shardstorage | shardminvalue | shardmaxvalue --------------+---------+--------------+---------------+--------------- (0 rows) SELECT * FROM pg_dist_shard_placement; shardid | shardstate | shardlength | nodename | nodeport | placementid ---------+------------+-------------+----------+----------+------------- (0 rows) -- check that the extension now can be dropped (and recreated) DROP EXTENSION citus; CREATE EXTENSION citus; -- re-add the nodes to the cluster SELECT 1 FROM master_add_node('localhost', :worker_1_port); ?column? ---------- 1 (1 row) SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? ---------- 1 (1 row) -- create a table with a SERIAL column CREATE TABLE testserialtable(id serial, group_id integer); SELECT master_create_distributed_table('testserialtable', 'group_id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('testserialtable', 2, 1); master_create_worker_shards ----------------------------- (1 row) -- should not be able to add additional serial columns ALTER TABLE testserialtable ADD COLUMN other_id serial; ERROR: cannot execute ADD COLUMN commands involving serial pseudotypes -- and we shouldn't be able to change a distributed sequence's owner ALTER SEQUENCE testserialtable_id_seq OWNED BY NONE; ERROR: cannot alter OWNED BY option of a sequence already owned by a distributed table -- or create a sequence with a distributed owner CREATE SEQUENCE standalone_sequence OWNED BY testserialtable.group_id; ERROR: cannot create sequences that specify a distributed table in their OWNED BY option HINT: Use a sequence in a distributed table by specifying a serial column type before creating any shards. -- or even change a manual sequence to be owned by a distributed table CREATE SEQUENCE standalone_sequence; ALTER SEQUENCE standalone_sequence OWNED BY testserialtable.group_id; ERROR: cannot associate an existing sequence with a distributed table HINT: Use a sequence in a distributed table by specifying a serial column type before creating any shards. -- an edge case, but it's OK to change an owner to the same distributed table ALTER SEQUENCE testserialtable_id_seq OWNED BY testserialtable.id; -- drop distributed table \c - - - :master_port DROP TABLE testserialtable; -- verify owned sequence is dropped \c - - - :worker_1_port \ds List of relations Schema | Name | Type | Owner --------+------+------+------- (0 rows) citus-7.0.3/src/test/regress/expected/multi_task_assignment_policy.out000066400000000000000000000150041317107136600264120ustar00rootroot00000000000000-- -- MULTI_TASK_ASSIGNMENT -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000; -- print whether we're using version > 9 to make version-specific tests clear SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine; version_above_nine -------------------- t (1 row) SET citus.explain_distributed_queries TO off; -- Check that our policies for assigning tasks to worker nodes run as expected. -- To test this, we first create a shell table, and then manually insert shard -- and shard placement data into system catalogs. We next run Explain command, -- and check that tasks are assigned to worker nodes as expected. CREATE TABLE task_assignment_test_table (test_id integer); SELECT master_create_distributed_table('task_assignment_test_table', 'test_id', 'append'); master_create_distributed_table --------------------------------- (1 row) -- Create logical shards with shardids 200, 201, and 202 INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) SELECT pg_class.oid, series.index, 'r', 1, 1000 FROM pg_class, generate_series(200, 202) AS series(index) WHERE pg_class.relname = 'task_assignment_test_table'; -- Create shard placements for shard 200 and 201 INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 200, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport ASC LIMIT 2; INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 201, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport ASC LIMIT 2; -- Create shard placements for shard 202 INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 202, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport DESC LIMIT 2; -- Start transaction block to avoid auto commits. This avoids additional debug -- messages from getting printed at real transaction starts and commits. BEGIN; -- Increase log level to see which worker nodes tasks are assigned to. Note that -- the following log messages print node name and port numbers; and node numbers -- in regression tests depend upon PG_VERSION_NUM. SET client_min_messages TO DEBUG3; -- First test the default greedy task assignment policy SET citus.task_assignment_policy TO 'greedy'; EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 DEBUG: assigned task 4 to node localhost:57637 QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 DEBUG: assigned task 4 to node localhost:57637 QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) -- Next test the first-replica task assignment policy SET citus.task_assignment_policy TO 'first-replica'; EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) -- Finally test the round-robin task assignment policy SET citus.task_assignment_policy TO 'round-robin'; EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: assigned task 6 to node localhost:57638 DEBUG: assigned task 4 to node localhost:57638 DEBUG: assigned task 2 to node localhost:57637 QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: assigned task 6 to node localhost:57638 DEBUG: assigned task 4 to node localhost:57638 DEBUG: assigned task 2 to node localhost:57637 QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) RESET citus.task_assignment_policy; RESET client_min_messages; COMMIT; citus-7.0.3/src/test/regress/expected/multi_task_assignment_policy_0.out000066400000000000000000000170441317107136600266370ustar00rootroot00000000000000-- -- MULTI_TASK_ASSIGNMENT -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000; -- print whether we're using version > 9 to make version-specific tests clear SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine; version_above_nine -------------------- f (1 row) SET citus.explain_distributed_queries TO off; -- Check that our policies for assigning tasks to worker nodes run as expected. -- To test this, we first create a shell table, and then manually insert shard -- and shard placement data into system catalogs. We next run Explain command, -- and check that tasks are assigned to worker nodes as expected. CREATE TABLE task_assignment_test_table (test_id integer); SELECT master_create_distributed_table('task_assignment_test_table', 'test_id', 'append'); master_create_distributed_table --------------------------------- (1 row) -- Create logical shards with shardids 200, 201, and 202 INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) SELECT pg_class.oid, series.index, 'r', 1, 1000 FROM pg_class, generate_series(200, 202) AS series(index) WHERE pg_class.relname = 'task_assignment_test_table'; -- Create shard placements for shard 200 and 201 INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 200, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport ASC LIMIT 2; INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 201, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport ASC LIMIT 2; -- Create shard placements for shard 202 INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 202, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport DESC LIMIT 2; -- Start transaction block to avoid auto commits. This avoids additional debug -- messages from getting printed at real transaction starts and commits. BEGIN; -- Increase log level to see which worker nodes tasks are assigned to. Note that -- the following log messages print node name and port numbers; and node numbers -- in regression tests depend upon PG_VERSION_NUM. SET client_min_messages TO DEBUG3; DEBUG: CommitTransactionCommand -- First test the default greedy task assignment policy SET citus.task_assignment_policy TO 'greedy'; DEBUG: StartTransactionCommand DEBUG: ProcessUtility DEBUG: CommitTransactionCommand EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: StartTransactionCommand DEBUG: ProcessUtility DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 DEBUG: assigned task 4 to node localhost:57637 DEBUG: CommitTransactionCommand QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: StartTransactionCommand DEBUG: ProcessUtility DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 DEBUG: assigned task 4 to node localhost:57637 DEBUG: CommitTransactionCommand QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) -- Next test the first-replica task assignment policy SET citus.task_assignment_policy TO 'first-replica'; DEBUG: StartTransactionCommand DEBUG: ProcessUtility DEBUG: CommitTransactionCommand EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: StartTransactionCommand DEBUG: ProcessUtility DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 DEBUG: CommitTransactionCommand QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: StartTransactionCommand DEBUG: ProcessUtility DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 DEBUG: CommitTransactionCommand QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) -- Finally test the round-robin task assignment policy SET citus.task_assignment_policy TO 'round-robin'; DEBUG: StartTransactionCommand DEBUG: ProcessUtility DEBUG: CommitTransactionCommand EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: StartTransactionCommand DEBUG: ProcessUtility DEBUG: assigned task 6 to node localhost:57638 DEBUG: assigned task 4 to node localhost:57638 DEBUG: assigned task 2 to node localhost:57637 DEBUG: CommitTransactionCommand QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: StartTransactionCommand DEBUG: ProcessUtility DEBUG: assigned task 6 to node localhost:57637 DEBUG: assigned task 4 to node localhost:57637 DEBUG: assigned task 2 to node localhost:57638 DEBUG: CommitTransactionCommand QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) EXPLAIN SELECT count(*) FROM task_assignment_test_table; DEBUG: StartTransactionCommand DEBUG: ProcessUtility DEBUG: assigned task 6 to node localhost:57638 DEBUG: assigned task 4 to node localhost:57638 DEBUG: assigned task 2 to node localhost:57637 DEBUG: CommitTransactionCommand QUERY PLAN ----------------------------------------------------------------------- Aggregate (cost=0.00..0.00 rows=0 width=0) -> Custom Scan (Citus Real-Time) (cost=0.00..0.00 rows=0 width=0) explain statements for distributed queries are not enabled (3 rows) RESET citus.task_assignment_policy; DEBUG: StartTransactionCommand DEBUG: ProcessUtility DEBUG: CommitTransactionCommand RESET client_min_messages; DEBUG: StartTransactionCommand DEBUG: ProcessUtility COMMIT; citus-7.0.3/src/test/regress/expected/multi_task_string_size.out000066400000000000000000000131241317107136600252240ustar00rootroot00000000000000-- -- MULTI_TASK_STRING_SIZE -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1602000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1602000; CREATE TABLE wide_table ( long_column_001 int, long_column_002 int, long_column_003 int, long_column_004 int, long_column_005 int, long_column_006 int, long_column_007 int, long_column_008 int, long_column_009 int, long_column_010 int, long_column_011 int, long_column_012 int, long_column_013 int, long_column_014 int, long_column_015 int, long_column_016 int, long_column_017 int, long_column_018 int, long_column_019 int, long_column_020 int, long_column_021 int, long_column_022 int, long_column_023 int, long_column_024 int, long_column_025 int, long_column_026 int, long_column_027 int, long_column_028 int, long_column_029 int, long_column_030 int, long_column_031 int, long_column_032 int, long_column_033 int, long_column_034 int, long_column_035 int, long_column_036 int, long_column_037 int, long_column_038 int, long_column_039 int, long_column_040 int, long_column_041 int, long_column_042 int, long_column_043 int, long_column_044 int, long_column_045 int, long_column_046 int, long_column_047 int, long_column_048 int, long_column_049 int, long_column_050 int, long_column_051 int, long_column_052 int, long_column_053 int, long_column_054 int, long_column_055 int, long_column_056 int, long_column_057 int, long_column_058 int, long_column_059 int, long_column_060 int, long_column_061 int, long_column_062 int, long_column_063 int, long_column_064 int, long_column_065 int, long_column_066 int, long_column_067 int, long_column_068 int, long_column_069 int, long_column_070 int, long_column_071 int, long_column_072 int, long_column_073 int, long_column_074 int, long_column_075 int, long_column_076 int, long_column_077 int, long_column_078 int, long_column_079 int, long_column_080 int, long_column_081 int, long_column_082 int, long_column_083 int, long_column_084 int, long_column_085 int, long_column_086 int, long_column_087 int, long_column_088 int, long_column_089 int, long_column_090 int, long_column_091 int, long_column_092 int, long_column_093 int, long_column_094 int, long_column_095 int, long_column_096 int, long_column_097 int, long_column_098 int, long_column_099 int, long_column_100 int, long_column_101 int, long_column_102 int, long_column_103 int, long_column_104 int, long_column_105 int, long_column_106 int, long_column_107 int, long_column_108 int, long_column_109 int, long_column_110 int, long_column_111 int, long_column_112 int, long_column_113 int, long_column_114 int, long_column_115 int, long_column_116 int, long_column_117 int, long_column_118 int, long_column_119 int, long_column_120 int, long_column_121 int, long_column_122 int, long_column_123 int, long_column_124 int, long_column_125 int, long_column_126 int, long_column_127 int, long_column_128 int, long_column_129 int, long_column_130 int, long_column_131 int, long_column_132 int, long_column_133 int, long_column_134 int, long_column_135 int, long_column_136 int, long_column_137 int, long_column_138 int, long_column_139 int, long_column_140 int, long_column_141 int, long_column_142 int, long_column_143 int, long_column_144 int, long_column_145 int, long_column_146 int, long_column_147 int, long_column_148 int, long_column_149 int, long_column_150 int, long_column_151 int, long_column_152 int, long_column_153 int, long_column_154 int, long_column_155 int, long_column_156 int, long_column_157 int, long_column_158 int, long_column_159 int, long_column_160 int, long_column_161 int, long_column_162 int, long_column_163 int, long_column_164 int, long_column_165 int, long_column_166 int, long_column_167 int, long_column_168 int, long_column_169 int, long_column_170 int, long_column_171 int, long_column_172 int, long_column_173 int, long_column_174 int, long_column_175 int, long_column_176 int, long_column_177 int, long_column_178 int, long_column_179 int, long_column_180 int, long_column_181 int, long_column_182 int, long_column_183 int, long_column_184 int, long_column_185 int, long_column_186 int, long_column_187 int, long_column_188 int, long_column_189 int, long_column_190 int, long_column_191 int, long_column_192 int, long_column_193 int, long_column_194 int, long_column_195 int, long_column_196 int, long_column_197 int, long_column_198 int, long_column_199 int, long_column_200 int ); SELECT create_distributed_table('wide_table', 'long_column_001'); create_distributed_table -------------------------- (1 row) SET citus.task_executor_type TO 'task-tracker'; SHOW citus.max_task_string_size; citus.max_task_string_size ---------------------------- 12288 (1 row) -- setting can not be changed on runtime SET citus.max_task_string_size TO 20000; ERROR: parameter "citus.max_task_string_size" cannot be changed without restarting the server -- error message may vary between executions -- hiding warning and error message -- no output means the query has failed SET client_min_messages to FATAL; SELECT u.* FROM wide_table u JOIN wide_table v ON (u.long_column_002 = v.long_column_003); -- following will succeed since it fetches few columns SELECT u.long_column_001, u.long_column_002, u.long_column_003 FROM wide_table u JOIN wide_table v ON (u.long_column_002 = v.long_column_003); long_column_001 | long_column_002 | long_column_003 -----------------+-----------------+----------------- (0 rows) RESET client_min_messages; DROP TABLE wide_table; RESET citus.shard_count; RESET citus.task_executor_type; citus-7.0.3/src/test/regress/expected/multi_test_helpers.out000066400000000000000000000064131317107136600243460ustar00rootroot00000000000000-- File to create functions and helpers needed for subsequent tests -- create a helper function to create objects on each node CREATE FUNCTION run_command_on_master_and_workers(p_sql text) RETURNS void LANGUAGE plpgsql AS $$ BEGIN EXECUTE p_sql; PERFORM run_command_on_workers(p_sql); END;$$; -- The following views are intended as alternatives to \d commands, whose -- output changed in PostgreSQL 10. In particular, they must be used any time -- a test wishes to print out the structure of a relation, which previously -- was safely accomplished by a \d invocation. SELECT run_command_on_master_and_workers( $desc_views$ CREATE VIEW table_fkey_cols AS SELECT rc.constraint_name AS "name", kcu.column_name AS "column_name", uc_kcu.column_name AS "refd_column_name", format('%I.%I', kcu.table_schema, kcu.table_name)::regclass::oid AS relid, format('%I.%I', uc_kcu.table_schema, uc_kcu.table_name)::regclass::oid AS refd_relid FROM information_schema.referential_constraints rc, information_schema.key_column_usage kcu, information_schema.key_column_usage uc_kcu WHERE rc.constraint_schema = kcu.constraint_schema AND rc.constraint_name = kcu.constraint_name AND rc.unique_constraint_schema = uc_kcu.constraint_schema AND rc.unique_constraint_name = uc_kcu.constraint_name; CREATE VIEW table_fkeys AS SELECT name AS "Constraint", format('FOREIGN KEY (%s) REFERENCES %s(%s)', string_agg(DISTINCT quote_ident(column_name), ', '), string_agg(DISTINCT refd_relid::regclass::text, ', '), string_agg(DISTINCT quote_ident(refd_column_name), ', ')) AS "Definition", "relid" FROM table_fkey_cols GROUP BY (name, relid); CREATE VIEW table_attrs AS SELECT c.column_name AS "name", c.data_type AS "type", CASE WHEN character_maximum_length IS NOT NULL THEN format('(%s)', character_maximum_length) WHEN data_type = 'numeric' AND numeric_precision IS NOT NULL THEN format('(%s,%s)', numeric_precision, numeric_scale) ELSE '' END AS "modifier", c.column_default AS "default", (NOT c.is_nullable::boolean) AS "notnull", format('%I.%I', c.table_schema, c.table_name)::regclass::oid AS "relid" FROM information_schema.columns AS c ORDER BY ordinal_position; CREATE VIEW table_desc AS SELECT "name" AS "Column", "type" || "modifier" AS "Type", rtrim(( CASE "notnull" WHEN true THEN 'not null ' ELSE '' END ) || ( CASE WHEN "default" IS NULL THEN '' ELSE 'default ' || "default" END )) AS "Modifiers", "relid" FROM table_attrs; CREATE VIEW table_checks AS SELECT cc.constraint_name AS "Constraint", ('CHECK ' || regexp_replace(check_clause, '^\((.*)\)$', '\1')) AS "Definition", format('%I.%I', ccu.table_schema, ccu.table_name)::regclass::oid AS relid FROM information_schema.check_constraints cc, information_schema.constraint_column_usage ccu WHERE cc.constraint_schema = ccu.constraint_schema AND cc.constraint_name = ccu.constraint_name ORDER BY cc.constraint_name ASC; $desc_views$ ); run_command_on_master_and_workers ----------------------------------- (1 row) citus-7.0.3/src/test/regress/expected/multi_tpch_query1.out000066400000000000000000000033471317107136600241140ustar00rootroot00000000000000-- -- MULTI_TPCH_QUERY1 -- -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #1 from the TPC-H decision support benchmark SELECT l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order FROM lineitem WHERE l_shipdate <= date '1998-12-01' - interval '90 days' GROUP BY l_returnflag, l_linestatus ORDER BY l_returnflag, l_linestatus; l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order --------------+--------------+-----------+----------------+----------------+------------------+---------------------+--------------------+------------------------+------------- A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 N | O | 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883 R | F | 73156.00 | 108937979.73 | 103516623.6698 | 107743533.784328 | 25.2175112030334367 | 37551.871675284385 | 0.04983798690106859704 | 2901 (4 rows) citus-7.0.3/src/test/regress/expected/multi_tpch_query10.out000066400000000000000000000127121317107136600241700ustar00rootroot00000000000000-- -- MULTI_TPCH_QUERY10 -- -- Query #10 from the TPC-H decision support benchmark. Unlike other TPC-H tests, -- we don't set citus.large_table_shard_count here, and instead use the default value -- coming from postgresql.conf or multi_task_tracker_executor.conf. SELECT c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment FROM customer, orders, lineitem, nation WHERE c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate >= date '1993-10-01' AND o_orderdate < date '1993-10-01' + interval '3' month AND l_returnflag = 'R' AND c_nationkey = n_nationkey GROUP BY c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment ORDER BY revenue DESC LIMIT 20; c_custkey | c_name | revenue | c_acctbal | n_name | c_address | c_phone | c_comment -----------+--------------------+-------------+-----------+---------------------------+---------------------------------------+-----------------+--------------------------------------------------------------------------------------------------------------------- 436 | Customer#000000436 | 255187.7382 | 5896.87 | ROMANIA | 4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z | 29-927-687-6390 | olites engage carefully. slyly ironic asymptotes about the ironi 640 | Customer#000000640 | 251941.1430 | 3025.84 | BRAZIL | j3vjr0 n,pJFG4gIOtC | 12-702-315-6637 | lly. furiously quick deposits haggle quickly regular packages. pinto 361 | Customer#000000361 | 239204.0858 | 7451.84 | SAUDI ARABIA | l0F8jMJVe63cb | 30-164-267-4590 | fully busy ideas. regular foxes cajole 223 | Customer#000000223 | 218652.8040 | 7476.20 | SAUDI ARABIA | ftau6Pk,brboMyEl,,kFm | 30-193-643-1517 | al, regular requests run furiously blithely silent packages. blithely ironic accounts across the furious 613 | Customer#000000613 | 186092.2017 | 6679.75 | EGYPT | AJT,26RbanTdEHOBgTWg | 14-275-416-1669 | ironic, pending deposits: quickl 355 | Customer#000000355 | 168184.4825 | 8727.90 | KENYA | 205r3Xg9ZWjPZNX1z | 24-656-787-6091 | ly bold requests detect furiously. unusual instructions sleep aft 872 | Customer#000000872 | 166831.7294 | -858.61 | PERU | vLP7iNZBK4B,HANFTKabVI3AO Y9O8H | 27-357-139-7164 | detect. packages wake slyly express foxes. even deposits ru 805 | Customer#000000805 | 165239.8440 | 511.69 | IRAN | wCKx5zcHvwpSffyc9qfi9dvqcm9LT,cLAG | 20-732-989-5653 | busy sentiments. pending packages haggle among the express requests-- slyly regular excuses above the slyl 427 | Customer#000000427 | 148033.5226 | 4376.80 | BRAZIL | LHzXa71U2AGqfbqj1yYYqw2MEXq99dWmY | 12-124-309-3821 | y even multipliers according to the regu 581 | Customer#000000581 | 146369.1712 | 3242.10 | UNITED STATES | s9SoN9XeVuCri | 34-415-978-2518 | ns. quickly regular pinto beans must sleep fluffily 679 | Customer#000000679 | 145188.0664 | 1394.44 | IRAN | IJf1FlZL9I9m,rvofcoKy5pRUOjUQV | 20-146-696-9508 | ely pending frays boost carefully 160 | Customer#000000160 | 138511.7370 | 4363.17 | JORDAN | 5soVQ3dOCRBWBS | 23-428-666-4806 | olites. silently ironic accounts cajole furious 883 | Customer#000000883 | 128224.1349 | 479.96 | CANADA | qVQ8rWNU5KZYDcS | 13-526-239-6950 | uctions are carefully across the regular, regular asymptote 101 | Customer#000000101 | 124996.0120 | 7470.96 | BRAZIL | sMmL2rNeHDltovSm Y | 12-514-298-3699 | sleep. pending packages detect slyly ironic pack 671 | Customer#000000671 | 124125.2191 | 3227.87 | VIETNAM | ic6qGrt0giB,HDEiBK,,FYGHXQpc | 31-593-213-9388 | bold ideas above the ironic packages affix blithely about the furiou 526 | Customer#000000526 | 120324.0048 | 705.93 | ARGENTINA | 0oAVPhh1I4JdrDafVG2Z8 | 11-170-679-3115 | ctions cajole after the furiously unusual ideas. ironic packages among the instructions are carefully carefully iro 367 | Customer#000000367 | 118572.6180 | 9108.65 | JORDAN | yZaDoEZCqt2VMTVKoZUkf6gJ4yj | 23-939-319-4691 | eodolites under the ironic, stealthy requests affix furiously among the unusual tit 745 | Customer#000000745 | 113738.6908 | 7115.14 | CHINA | vjuHvDKdaomsivy l | 28-913-438-9403 | o beans. bold, regular theodolites haggle carefully about the quickl 118 | Customer#000000118 | 113149.7832 | 3582.37 | CHINA | OVnFuHygK9wx3xpg8 | 28-639-943-7051 | uick packages alongside of the furiously final deposits haggle above the fluffily even foxes. blithely dogged dep 50 | Customer#000000050 | 111600.5870 | 4266.13 | FRANCE | 9SzDYlkzxByyJ1QeTI o | 16-658-112-3221 | ts. furiously ironic accounts cajole furiously slyly ironic dinos. (20 rows) citus-7.0.3/src/test/regress/expected/multi_tpch_query12.out000066400000000000000000000017001317107136600241650ustar00rootroot00000000000000-- -- MULTI_TPCH_QUERY12 -- -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #12 from the TPC-H decision support benchmark SELECT l_shipmode, sum(case when o_orderpriority = '1-URGENT' OR o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority <> '1-URGENT' AND o_orderpriority <> '2-HIGH' then 1 else 0 end) AS low_line_count FROM orders, lineitem WHERE o_orderkey = l_orderkey AND l_shipmode in ('MAIL', 'SHIP') AND l_commitdate < l_receiptdate AND l_shipdate < l_commitdate AND l_receiptdate >= date '1994-01-01' AND l_receiptdate < date '1994-01-01' + interval '1' year GROUP BY l_shipmode ORDER BY l_shipmode; l_shipmode | high_line_count | low_line_count ------------+-----------------+---------------- MAIL | 11 | 15 SHIP | 11 | 19 (2 rows) citus-7.0.3/src/test/regress/expected/multi_tpch_query14.out000066400000000000000000000011161317107136600241700ustar00rootroot00000000000000-- -- MULTI_TPCH_QUERY14 -- -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #14 from the TPC-H decision support benchmark SELECT 100.00 * sum(case when p_type like 'PROMO%' then l_extendedprice * (1 - l_discount) else 0 end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue FROM lineitem, part WHERE l_partkey = p_partkey AND l_shipdate >= date '1995-09-01' AND l_shipdate < date '1995-09-01' + interval '1' year; promo_revenue --------------------- 32.1126387112005225 (1 row) citus-7.0.3/src/test/regress/expected/multi_tpch_query19.out000066400000000000000000000020461317107136600242000ustar00rootroot00000000000000-- -- MULTI_TPCH_QUERY19 -- -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #19 from the TPC-H decision support benchmark. Note that we modified -- the query from its original to make it work on smaller data sets. SELECT sum(l_extendedprice* (1 - l_discount)) as revenue FROM lineitem, part WHERE ( p_partkey = l_partkey AND (p_brand = 'Brand#12' OR p_brand= 'Brand#14' OR p_brand='Brand#15') AND l_quantity >= 10 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#23' OR p_brand='Brand#24') AND l_quantity >= 20 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#33' OR p_brand = 'Brand#34' OR p_brand = 'Brand#35') AND l_quantity >= 1 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ); revenue ------------- 144747.0857 (1 row) citus-7.0.3/src/test/regress/expected/multi_tpch_query3.out000066400000000000000000000032331317107136600241100ustar00rootroot00000000000000-- -- MULTI_TPCH_QUERY3 -- -- Query #3 from the TPC-H decision support benchmark. Unlike other TPC-H tests, -- we don't set citus.large_table_shard_count here, and instead use the default value -- coming from postgresql.conf or multi_task_tracker_executor.conf. SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority FROM customer, orders, lineitem WHERE c_mktsegment = 'BUILDING' AND c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate < date '1995-03-15' AND l_shipdate > date '1995-03-15' GROUP BY l_orderkey, o_orderdate, o_shippriority ORDER BY revenue DESC, o_orderdate; l_orderkey | revenue | o_orderdate | o_shippriority ------------+-------------+-------------+---------------- 1637 | 268170.6408 | 02-08-1995 | 0 9696 | 252014.5497 | 02-20-1995 | 0 10916 | 242749.1996 | 03-11-1995 | 0 450 | 221012.3165 | 03-05-1995 | 0 5347 | 198353.7942 | 02-22-1995 | 0 10691 | 112800.1020 | 03-14-1995 | 0 386 | 104975.2484 | 01-25-1995 | 0 5765 | 88222.7556 | 12-15-1994 | 0 4707 | 88143.7774 | 02-27-1995 | 0 5312 | 83750.7028 | 02-24-1995 | 0 5728 | 70101.6400 | 12-11-1994 | 0 577 | 57986.6224 | 12-19-1994 | 0 12706 | 16636.6368 | 11-21-1994 | 0 3844 | 8851.3200 | 12-29-1994 | 0 11073 | 7433.6295 | 12-02-1994 | 0 13924 | 3111.4970 | 12-20-1994 | 0 (16 rows) citus-7.0.3/src/test/regress/expected/multi_tpch_query6.out000066400000000000000000000007351317107136600241170ustar00rootroot00000000000000-- -- MULTI_TPCH_QUERY6 -- -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #6 from the TPC-H decision support benchmark SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem WHERE l_shipdate >= date '1994-01-01' and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; revenue ------------- 243277.7858 (1 row) citus-7.0.3/src/test/regress/expected/multi_tpch_query7.out000066400000000000000000000022411317107136600241120ustar00rootroot00000000000000-- -- MULTI_TPCH_QUERY7 -- -- Change configuration to treat lineitem AND orders tables as large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H decision support benchmark SELECT supp_nation, cust_nation, l_year, sum(volume) as revenue FROM ( SELECT n1.n_name as supp_nation, n2.n_name as cust_nation, extract(year FROM l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume FROM supplier, lineitem, orders, customer, nation n1, nation n2 WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = n1.n_nationkey AND c_nationkey = n2.n_nationkey AND ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) AND l_shipdate between date '1995-01-01' AND date '1996-12-31' ) as shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year; supp_nation | cust_nation | l_year | revenue ---------------------------+---------------------------+--------+----------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) citus-7.0.3/src/test/regress/expected/multi_tpch_query7_nested.out000066400000000000000000000025321317107136600254570ustar00rootroot00000000000000-- -- MULTI_TPCH_QUERY7_NESTED -- -- Change configuration to treat lineitem AND orders tables AS large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H benchmark; modified to include sub-selects SELECT supp_nation, cust_nation, l_year, sum(volume) AS revenue FROM ( SELECT supp_nation, cust_nation, extract(year FROM l_shipdate) AS l_year, l_extendedprice * (1 - l_discount) AS volume FROM supplier, lineitem, orders, customer, ( SELECT n1.n_nationkey AS supp_nation_key, n2.n_nationkey AS cust_nation_key, n1.n_name AS supp_nation, n2.n_name AS cust_nation FROM nation n1, nation n2 WHERE ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) ) AS temp WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = supp_nation_key AND c_nationkey = cust_nation_key AND l_shipdate between date '1995-01-01' AND date '1996-12-31' ) AS shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year; supp_nation | cust_nation | l_year | revenue ---------------------------+---------------------------+--------+----------- GERMANY | FRANCE | 1995 | 2399.2948 (1 row) citus-7.0.3/src/test/regress/expected/multi_transaction_recovery.out000066400000000000000000000126101317107136600261040ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; -- Tests for prepared transaction recovery -- Ensure pg_dist_transaction is empty for test SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) SELECT * FROM pg_dist_transaction; groupid | gid ---------+----- (0 rows) -- Create some "fake" prepared transactions to recover \c - - - :worker_1_port BEGIN; CREATE TABLE should_abort (value int); PREPARE TRANSACTION 'citus_0_should_abort'; BEGIN; CREATE TABLE should_commit (value int); PREPARE TRANSACTION 'citus_0_should_commit'; BEGIN; CREATE TABLE should_be_sorted_into_middle (value int); PREPARE TRANSACTION 'citus_0_should_be_sorted_into_middle'; \c - - - :master_port -- Add "fake" pg_dist_transaction records and run recovery INSERT INTO pg_dist_transaction VALUES (1, 'citus_0_should_commit'); INSERT INTO pg_dist_transaction VALUES (1, 'citus_0_should_be_forgotten'); SELECT recover_prepared_transactions(); NOTICE: recovered a prepared transaction on localhost:57637 CONTEXT: ROLLBACK PREPARED 'citus_0_should_abort' NOTICE: recovered a prepared transaction on localhost:57637 CONTEXT: ROLLBACK PREPARED 'citus_0_should_be_sorted_into_middle' NOTICE: recovered a prepared transaction on localhost:57637 CONTEXT: COMMIT PREPARED 'citus_0_should_commit' recover_prepared_transactions ------------------------------- 3 (1 row) SELECT count(*) FROM pg_dist_transaction; count ------- 0 (1 row) -- Confirm that transactions were correctly rolled forward \c - - - :worker_1_port SELECT count(*) FROM pg_tables WHERE tablename = 'should_abort'; count ------- 0 (1 row) SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit'; count ------- 1 (1 row) \c - - - :master_port SET citus.shard_replication_factor TO 2; SET citus.shard_count TO 2; SET citus.multi_shard_commit_protocol TO '2pc'; -- create_distributed_table should add 2 recovery records (1 connection per node) CREATE TABLE test_recovery (x text); SELECT create_distributed_table('test_recovery', 'x'); create_distributed_table -------------------------- (1 row) SELECT count(*) FROM pg_dist_transaction; count ------- 2 (1 row) -- create_reference_table should add another 2 recovery records CREATE TABLE test_recovery_ref (x text); SELECT create_reference_table('test_recovery_ref'); create_reference_table ------------------------ (1 row) SELECT count(*) FROM pg_dist_transaction; count ------- 4 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) -- plain INSERT does not use 2PC INSERT INTO test_recovery VALUES ('hello'); SELECT count(*) FROM pg_dist_transaction; count ------- 0 (1 row) -- Committed DDL commands should write 4 transaction recovery records BEGIN; ALTER TABLE test_recovery ADD COLUMN y text; ROLLBACK; SELECT count(*) FROM pg_dist_transaction; count ------- 0 (1 row) ALTER TABLE test_recovery ADD COLUMN y text; SELECT count(*) FROM pg_dist_transaction; count ------- 4 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) SELECT count(*) FROM pg_dist_transaction; count ------- 0 (1 row) -- Committed master_modify_multiple_shards should write 4 transaction recovery records BEGIN; SELECT master_modify_multiple_shards($$UPDATE test_recovery SET y = 'world'$$); master_modify_multiple_shards ------------------------------- 1 (1 row) ROLLBACK; SELECT count(*) FROM pg_dist_transaction; count ------- 0 (1 row) SELECT master_modify_multiple_shards($$UPDATE test_recovery SET y = 'world'$$); master_modify_multiple_shards ------------------------------- 1 (1 row) SELECT count(*) FROM pg_dist_transaction; count ------- 4 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) SELECT count(*) FROM pg_dist_transaction; count ------- 0 (1 row) -- Committed INSERT..SELECT should write 4 transaction recovery records BEGIN; INSERT INTO test_recovery SELECT x, 'earth' FROM test_recovery; ROLLBACK; SELECT count(*) FROM pg_dist_transaction; count ------- 0 (1 row) INSERT INTO test_recovery SELECT x, 'earth' FROM test_recovery; SELECT count(*) FROM pg_dist_transaction; count ------- 4 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) -- Committed INSERT..SELECT via coordinator should write 4 transaction recovery records INSERT INTO test_recovery (x) SELECT 'hello-'||s FROM generate_series(1,100) s; SELECT count(*) FROM pg_dist_transaction; count ------- 4 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) -- Committed COPY should write 4 transaction records COPY test_recovery (x) FROM STDIN CSV; SELECT count(*) FROM pg_dist_transaction; count ------- 4 (1 row) SELECT recover_prepared_transactions(); recover_prepared_transactions ------------------------------- 0 (1 row) DROP TABLE test_recovery_ref; DROP TABLE test_recovery; citus-7.0.3/src/test/regress/expected/multi_transactional_drop_shards.out000066400000000000000000000474551317107136600271120ustar00rootroot00000000000000-- -- MULTI_TRANSACTIONAL_DROP_SHARDS -- -- Tests that check the metadata returned by the master node. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1410000; SET citus.shard_count TO 4; -- test DROP TABLE(ergo master_drop_all_shards) in transaction, then ROLLBACK CREATE TABLE transactional_drop_shards(column1 int); SELECT create_distributed_table('transactional_drop_shards', 'column1'); create_distributed_table -------------------------- (1 row) BEGIN; DROP TABLE transactional_drop_shards; ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; shardid --------- 1410000 1410001 1410002 1410003 (4 rows) SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 1410000 | 1 | localhost | 57637 1410000 | 1 | localhost | 57638 1410001 | 1 | localhost | 57637 1410001 | 1 | localhost | 57638 1410002 | 1 | localhost | 57637 1410002 | 1 | localhost | 57638 1410003 | 1 | localhost | 57637 1410003 | 1 | localhost | 57638 (8 rows) -- verify table is not dropped \dt transactional_drop_shards List of relations Schema | Name | Type | Owner --------+---------------------------+-------+---------- public | transactional_drop_shards | table | postgres (1 row) -- verify shards are not dropped \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner --------+-----------------------------------+-------+---------- public | transactional_drop_shards_1410000 | table | postgres public | transactional_drop_shards_1410001 | table | postgres public | transactional_drop_shards_1410002 | table | postgres public | transactional_drop_shards_1410003 | table | postgres (4 rows) \c - - - :master_port -- test DROP TABLE(ergo master_drop_all_shards) in transaction, then COMMIT BEGIN; DROP TABLE transactional_drop_shards; COMMIT; -- verify metadata is deleted SELECT shardid FROM pg_dist_shard WHERE shardid IN (1410000, 1410001, 1410002, 1410003) ORDER BY shardid; shardid --------- (0 rows) SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (1410000, 1410001, 1410002, 1410003) ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+----------+---------- (0 rows) -- verify table is dropped \dt transactional_drop_shards List of relations Schema | Name | Type | Owner --------+------+------+------- (0 rows) -- verify shards are dropped \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner --------+------+------+------- (0 rows) \c - - - :master_port -- test master_delete_protocol in transaction, then ROLLBACK CREATE TABLE transactional_drop_shards(column1 int); SELECT create_distributed_table('transactional_drop_shards', 'column1', 'append'); create_distributed_table -------------------------- (1 row) SELECT master_create_empty_shard('transactional_drop_shards'); master_create_empty_shard --------------------------- 1410004 (1 row) BEGIN; SELECT master_apply_delete_command('DELETE FROM transactional_drop_shards'); master_apply_delete_command ----------------------------- 1 (1 row) ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; shardid --------- 1410004 (1 row) SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 1410004 | 1 | localhost | 57637 1410004 | 1 | localhost | 57638 (2 rows) -- verify shards are not dropped \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner --------+-----------------------------------+-------+---------- public | transactional_drop_shards_1410004 | table | postgres (1 row) \c - - - :master_port -- test master_delete_protocol in transaction, then COMMIT BEGIN; SELECT master_apply_delete_command('DELETE FROM transactional_drop_shards'); master_apply_delete_command ----------------------------- 1 (1 row) COMMIT; -- verify metadata is deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; shardid --------- (0 rows) SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+----------+---------- (0 rows) -- verify shards are dropped \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner --------+------+------+------- (0 rows) \c - - - :master_port -- test DROP table in a transaction after insertion SELECT master_create_empty_shard('transactional_drop_shards'); master_create_empty_shard --------------------------- 1410005 (1 row) BEGIN; INSERT INTO transactional_drop_shards VALUES (1); DROP TABLE transactional_drop_shards; ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; shardid --------- 1410005 (1 row) SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 1410005 | 1 | localhost | 57637 1410005 | 1 | localhost | 57638 (2 rows) -- verify table is not dropped \dt transactional_drop_shards List of relations Schema | Name | Type | Owner --------+---------------------------+-------+---------- public | transactional_drop_shards | table | postgres (1 row) -- verify shards are not dropped \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner --------+-----------------------------------+-------+---------- public | transactional_drop_shards_1410005 | table | postgres (1 row) \c - - - :master_port -- test master_apply_delete_command in a transaction after insertion BEGIN; INSERT INTO transactional_drop_shards VALUES (1); SELECT master_apply_delete_command('DELETE FROM transactional_drop_shards'); master_apply_delete_command ----------------------------- 1 (1 row) ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; shardid --------- 1410005 (1 row) SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 1410005 | 1 | localhost | 57637 1410005 | 1 | localhost | 57638 (2 rows) -- verify shards are not dropped \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner --------+-----------------------------------+-------+---------- public | transactional_drop_shards_1410005 | table | postgres (1 row) -- test DROP table with failing worker CREATE FUNCTION fail_drop_table() RETURNS event_trigger AS $fdt$ BEGIN RAISE 'illegal value'; END; $fdt$ LANGUAGE plpgsql; CREATE EVENT TRIGGER fail_drop_table ON sql_drop EXECUTE PROCEDURE fail_drop_table(); \c - - - :master_port \set VERBOSITY terse DROP TABLE transactional_drop_shards; ERROR: illegal value \set VERBOSITY default -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; shardid --------- 1410005 (1 row) SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 1410005 | 1 | localhost | 57637 1410005 | 1 | localhost | 57638 (2 rows) -- verify table is not dropped \dt transactional_drop_shards List of relations Schema | Name | Type | Owner --------+---------------------------+-------+---------- public | transactional_drop_shards | table | postgres (1 row) -- verify shards are not dropped \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner --------+-----------------------------------+-------+---------- public | transactional_drop_shards_1410005 | table | postgres (1 row) \c - - - :master_port -- test DROP reference table with failing worker CREATE TABLE transactional_drop_reference(column1 int); SELECT create_reference_table('transactional_drop_reference'); create_reference_table ------------------------ (1 row) \set VERBOSITY terse DROP TABLE transactional_drop_reference; ERROR: illegal value \set VERBOSITY default -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_reference'::regclass ORDER BY shardid; shardid --------- 1410006 (1 row) SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_reference'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 1410006 | 1 | localhost | 57637 1410006 | 1 | localhost | 57638 (2 rows) -- verify table is not dropped \dt transactional_drop_reference List of relations Schema | Name | Type | Owner --------+------------------------------+-------+---------- public | transactional_drop_reference | table | postgres (1 row) -- verify shards are not dropped \c - - - :worker_1_port \dt transactional_drop_reference* List of relations Schema | Name | Type | Owner --------+--------------------------------------+-------+---------- public | transactional_drop_reference_1410006 | table | postgres (1 row) \c - - - :master_port -- test master_apply_delete_command table with failing worker \set VERBOSITY terse SELECT master_apply_delete_command('DELETE FROM transactional_drop_shards'); ERROR: illegal value \set VERBOSITY default -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; shardid --------- 1410005 (1 row) SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 1410005 | 1 | localhost | 57637 1410005 | 1 | localhost | 57638 (2 rows) -- verify shards are not dropped \c - - - :worker_1_port \dt transactional_drop_shards_* List of relations Schema | Name | Type | Owner --------+-----------------------------------+-------+---------- public | transactional_drop_shards_1410005 | table | postgres (1 row) DROP EVENT TRIGGER fail_drop_table; \c - - - :master_port -- test with SERIAL column + with more shards SET citus.shard_count TO 8; CREATE TABLE transactional_drop_serial(column1 int, column2 SERIAL); SELECT create_distributed_table('transactional_drop_serial', 'column1'); create_distributed_table -------------------------- (1 row) -- test DROP TABLE(ergo master_drop_all_shards) in transaction, then ROLLBACK BEGIN; DROP TABLE transactional_drop_serial; ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_serial'::regclass ORDER BY shardid; shardid --------- 1410007 1410008 1410009 1410010 1410011 1410012 1410013 1410014 (8 rows) SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_serial'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 1410007 | 1 | localhost | 57637 1410007 | 1 | localhost | 57638 1410008 | 1 | localhost | 57637 1410008 | 1 | localhost | 57638 1410009 | 1 | localhost | 57637 1410009 | 1 | localhost | 57638 1410010 | 1 | localhost | 57637 1410010 | 1 | localhost | 57638 1410011 | 1 | localhost | 57637 1410011 | 1 | localhost | 57638 1410012 | 1 | localhost | 57637 1410012 | 1 | localhost | 57638 1410013 | 1 | localhost | 57637 1410013 | 1 | localhost | 57638 1410014 | 1 | localhost | 57637 1410014 | 1 | localhost | 57638 (16 rows) -- verify table is not dropped \dt transactional_drop_serial List of relations Schema | Name | Type | Owner --------+---------------------------+-------+---------- public | transactional_drop_serial | table | postgres (1 row) -- verify shards and sequence are not dropped \c - - - :worker_1_port \dt transactional_drop_serial_* List of relations Schema | Name | Type | Owner --------+-----------------------------------+-------+---------- public | transactional_drop_serial_1410007 | table | postgres public | transactional_drop_serial_1410008 | table | postgres public | transactional_drop_serial_1410009 | table | postgres public | transactional_drop_serial_1410010 | table | postgres public | transactional_drop_serial_1410011 | table | postgres public | transactional_drop_serial_1410012 | table | postgres public | transactional_drop_serial_1410013 | table | postgres public | transactional_drop_serial_1410014 | table | postgres (8 rows) \ds transactional_drop_serial_column2_seq List of relations Schema | Name | Type | Owner --------+------+------+------- (0 rows) \c - - - :master_port -- test DROP TABLE(ergo master_drop_all_shards) in transaction, then COMMIT BEGIN; DROP TABLE transactional_drop_serial; COMMIT; -- verify metadata is deleted SELECT shardid FROM pg_dist_shard WHERE shardid IN (1410007, 1410008, 1410009, 1410010, 1410011, 1410012, 1410013, 1410014) ORDER BY shardid; shardid --------- (0 rows) SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (1410007, 1410008, 1410009, 1410010, 1410011, 1410012, 1410013, 1410014) ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+----------+---------- (0 rows) -- verify table is dropped \dt transactional_drop_serial List of relations Schema | Name | Type | Owner --------+------+------+------- (0 rows) -- verify shards and sequence are dropped \c - - - :worker_1_port \dt transactional_drop_serial_* List of relations Schema | Name | Type | Owner --------+------+------+------- (0 rows) \ds transactional_drop_serial_column2_seq List of relations Schema | Name | Type | Owner --------+------+------+------- (0 rows) \c - - - :master_port -- test with MX, DROP TABLE, then ROLLBACK SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 4; CREATE TABLE transactional_drop_mx(column1 int); SELECT create_distributed_table('transactional_drop_mx', 'column1'); create_distributed_table -------------------------- (1 row) UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='transactional_drop_mx'::regclass; -- make worker 1 receive metadata changes SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ----------------------------- (1 row) -- see metadata is propogated to the worker \c - - - :worker_1_port SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_mx'::regclass ORDER BY shardid; shardid --------- 1410015 1410016 1410017 1410018 (4 rows) SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_mx'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 1410015 | 1 | localhost | 57637 1410016 | 1 | localhost | 57638 1410017 | 1 | localhost | 57637 1410018 | 1 | localhost | 57638 (4 rows) \c - - - :master_port BEGIN; DROP TABLE transactional_drop_mx; ROLLBACK; -- verify metadata is not deleted \c - - - :worker_1_port SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_mx'::regclass ORDER BY shardid; shardid --------- 1410015 1410016 1410017 1410018 (4 rows) SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_mx'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 1410015 | 1 | localhost | 57637 1410016 | 1 | localhost | 57638 1410017 | 1 | localhost | 57637 1410018 | 1 | localhost | 57638 (4 rows) -- test with MX, DROP TABLE, then COMMIT \c - - - :master_port BEGIN; DROP TABLE transactional_drop_mx; COMMIT; -- verify metadata is deleted \c - - - :worker_1_port SELECT shardid FROM pg_dist_shard WHERE shardid IN (1410015, 1410016, 1410017, 1410018) ORDER BY shardid; shardid --------- (0 rows) SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (1410015, 1410016, 1410017, 1410018) ORDER BY shardid, nodename, nodeport; shardid | shardstate | nodename | nodeport ---------+------------+----------+---------- (0 rows) \c - - - :master_port -- clean the workspace DROP TABLE transactional_drop_shards, transactional_drop_reference; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); stop_metadata_sync_to_node ---------------------------- (1 row) citus-7.0.3/src/test/regress/expected/multi_truncate.out000066400000000000000000000227771317107136600235050ustar00rootroot00000000000000-- -- MULTI_TRUNCATE -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1210000; -- -- truncate for append distribution -- expect all shards to be dropped -- CREATE TABLE test_truncate_append(a int); SELECT master_create_distributed_table('test_truncate_append', 'a', 'append'); master_create_distributed_table --------------------------------- (1 row) -- verify no error is thrown when no shards are present TRUNCATE TABLE test_truncate_append; SELECT master_create_empty_shard('test_truncate_append') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 500 WHERE shardid = :new_shard_id; SELECT count(*) FROM test_truncate_append; count ------- 0 (1 row) INSERT INTO test_truncate_append values (1); SELECT count(*) FROM test_truncate_append; count ------- 1 (1 row) -- create some more shards SELECT master_create_empty_shard('test_truncate_append'); master_create_empty_shard --------------------------- 1210001 (1 row) SELECT master_create_empty_shard('test_truncate_append'); master_create_empty_shard --------------------------- 1210002 (1 row) -- verify 3 shards are presents SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_append'::regclass ORDER BY shardid; shardid --------- 1210000 1210001 1210002 (3 rows) TRUNCATE TABLE test_truncate_append; -- verify data is truncated from the table SELECT count(*) FROM test_truncate_append; count ------- 0 (1 row) -- verify no shard exists anymore SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_append'::regclass; shardid --------- (0 rows) -- command can run inside transaction BEGIN; TRUNCATE TABLE test_truncate_append; COMMIT; DROP TABLE test_truncate_append; -- -- truncate for range distribution -- expect shard to be present, data to be truncated -- CREATE TABLE test_truncate_range(a int); SELECT master_create_distributed_table('test_truncate_range', 'a', 'range'); master_create_distributed_table --------------------------------- (1 row) -- verify no error is thrown when no shards are present TRUNCATE TABLE test_truncate_range; SELECT master_create_empty_shard('test_truncate_range') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 500 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('test_truncate_range') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 501, shardmaxvalue = 1500 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('test_truncate_range') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1501, shardmaxvalue = 2500 WHERE shardid = :new_shard_id; SELECT count(*) FROM test_truncate_range; count ------- 0 (1 row) INSERT INTO test_truncate_range values (1); INSERT INTO test_truncate_range values (1001); INSERT INTO test_truncate_range values (2000); INSERT INTO test_truncate_range values (100); SELECT count(*) FROM test_truncate_range; count ------- 4 (1 row) -- verify 3 shards are presents SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_range'::regclass ORDER BY shardid; shardid --------- 1210003 1210004 1210005 (3 rows) TRUNCATE TABLE test_truncate_range; -- verify data is truncated from the table SELECT count(*) FROM test_truncate_range; count ------- 0 (1 row) -- verify 3 shards are still present SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_range'::regclass ORDER BY shardid; shardid --------- 1210003 1210004 1210005 (3 rows) -- verify that truncate can be aborted INSERT INTO test_truncate_range VALUES (1); BEGIN; TRUNCATE TABLE test_truncate_range; ROLLBACK; SELECT count(*) FROM test_truncate_range; count ------- 1 (1 row) DROP TABLE test_truncate_range; -- -- truncate for hash distribution. -- expect shard to be present, data to be truncated -- CREATE TABLE test_truncate_hash(a int); SELECT master_create_distributed_table('test_truncate_hash', 'a', 'hash'); master_create_distributed_table --------------------------------- (1 row) -- verify no error is thrown when no shards are present TRUNCATE TABLE test_truncate_hash; SELECT count(*) FROM test_truncate_hash; count ------- 0 (1 row) INSERT INTO test_truncate_hash values (1); ERROR: could not find any shards DETAIL: No shards exist for distributed table "test_truncate_hash". HINT: Run master_create_worker_shards to create shards and try again. INSERT INTO test_truncate_hash values (1001); ERROR: could not find any shards DETAIL: No shards exist for distributed table "test_truncate_hash". HINT: Run master_create_worker_shards to create shards and try again. INSERT INTO test_truncate_hash values (2000); ERROR: could not find any shards DETAIL: No shards exist for distributed table "test_truncate_hash". HINT: Run master_create_worker_shards to create shards and try again. INSERT INTO test_truncate_hash values (100); ERROR: could not find any shards DETAIL: No shards exist for distributed table "test_truncate_hash". HINT: Run master_create_worker_shards to create shards and try again. SELECT count(*) FROM test_truncate_hash; count ------- 0 (1 row) -- verify 4 shards are present SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_hash'::regclass ORDER BY shardid; shardid --------- (0 rows) TRUNCATE TABLE test_truncate_hash; SELECT master_create_worker_shards('test_truncate_hash', 4, 1); master_create_worker_shards ----------------------------- (1 row) INSERT INTO test_truncate_hash values (1); INSERT INTO test_truncate_hash values (1001); INSERT INTO test_truncate_hash values (2000); INSERT INTO test_truncate_hash values (100); SELECT count(*) FROM test_truncate_hash; count ------- 4 (1 row) TRUNCATE TABLE test_truncate_hash; -- verify data is truncated from the table SELECT count(*) FROM test_truncate_hash; count ------- 0 (1 row) -- verify 4 shards are still presents SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_hash'::regclass ORDER BY shardid; shardid --------- 1210006 1210007 1210008 1210009 (4 rows) -- verify that truncate can be aborted INSERT INTO test_truncate_hash VALUES (1); BEGIN; TRUNCATE TABLE test_truncate_hash; ROLLBACK; SELECT count(*) FROM test_truncate_hash; count ------- 1 (1 row) DROP TABLE test_truncate_hash; -- test with table with spaces in it CREATE TABLE "a b hash" (a int, b int); SELECT master_create_distributed_table('"a b hash"', 'a', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('"a b hash"', 4, 1); master_create_worker_shards ----------------------------- (1 row) INSERT INTO "a b hash" values (1, 0); SELECT * from "a b hash"; a | b ---+--- 1 | 0 (1 row) TRUNCATE TABLE "a b hash"; SELECT * from "a b hash"; a | b ---+--- (0 rows) DROP TABLE "a b hash"; -- now with append CREATE TABLE "a b append" (a int, b int); SELECT master_create_distributed_table('"a b append"', 'a', 'append'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_empty_shard('"a b append"') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 500 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('"a b append"') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 501, shardmaxvalue = 1000 WHERE shardid = :new_shard_id; INSERT INTO "a b append" values (1, 1); INSERT INTO "a b append" values (600, 600); SELECT * FROM "a b append" ORDER BY a; a | b -----+----- 1 | 1 600 | 600 (2 rows) TRUNCATE TABLE "a b append"; -- verify all shards are dropped SELECT shardid FROM pg_dist_shard where logicalrelid = '"a b append"'::regclass; shardid --------- (0 rows) DROP TABLE "a b append"; -- Truncate local data only CREATE TABLE test_local_truncate (x int, y int); INSERT INTO test_local_truncate VALUES (1,2); SELECT create_distributed_table('test_local_truncate', 'x', colocate_with => 'none'); NOTICE: Copying data from local table... create_distributed_table -------------------------- (1 row) BEGIN; SET LOCAL citus.enable_ddl_propagation TO off; TRUNCATE test_local_truncate; COMMIT; -- Ensure distributed data is not truncated SELECT * FROM test_local_truncate; x | y ---+--- 1 | 2 (1 row) -- Undistribute table SELECT master_drop_all_shards('test_local_truncate', 'pubic', 'test_local_truncate'); master_drop_all_shards ------------------------ 32 (1 row) DELETE FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regclass; -- Ensure local data is truncated SELECT * FROM test_local_truncate; x | y ---+--- (0 rows) DROP TABLE test_local_truncate; -- Truncate local data, but roll back CREATE TABLE test_local_truncate (x int, y int); INSERT INTO test_local_truncate VALUES (1,2); SELECT create_distributed_table('test_local_truncate', 'x', colocate_with => 'none'); NOTICE: Copying data from local table... create_distributed_table -------------------------- (1 row) BEGIN; SET LOCAL citus.enable_ddl_propagation TO off; TRUNCATE test_local_truncate; ROLLBACK; -- Ensure distributed data is not truncated SELECT * FROM test_local_truncate; x | y ---+--- 1 | 2 (1 row) -- Undistribute table SELECT master_drop_all_shards('test_local_truncate', 'pubic', 'test_local_truncate'); master_drop_all_shards ------------------------ 32 (1 row) DELETE FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regclass; -- Ensure local data is not truncated SELECT * FROM test_local_truncate; x | y ---+--- 1 | 2 (1 row) DROP TABLE test_local_truncate; citus-7.0.3/src/test/regress/expected/multi_unsupported_worker_operations.out000066400000000000000000000344051317107136600300730ustar00rootroot00000000000000-- -- MULTI_UNSUPPORTED_WORKER_OPERATIONS -- -- Tests for ensuring unsupported functions on workers error out. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1270000; ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1370000; ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1370000; -- Set the colocation id to a safe value so that -- it is not affected by future changes to colocation id sequence SELECT nextval('pg_catalog.pg_dist_colocationid_seq') AS last_colocation_id \gset ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 150000; -- Prepare the environment SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SET citus.shard_count TO 5; -- Create test tables CREATE TABLE mx_table (col_1 int, col_2 text, col_3 BIGSERIAL); SELECT create_distributed_table('mx_table', 'col_1'); create_distributed_table -------------------------- (1 row) CREATE TABLE mx_table_2 (col_1 int, col_2 text, col_3 BIGSERIAL); SELECT create_distributed_table('mx_table_2', 'col_1'); create_distributed_table -------------------------- (1 row) CREATE TABLE mx_ref_table (col_1 int, col_2 text); SELECT create_reference_table('mx_ref_table'); create_reference_table ------------------------ (1 row) -- Check that the created tables are colocated MX tables SELECT logicalrelid, repmodel, colocationid FROM pg_dist_partition WHERE logicalrelid IN ('mx_table'::regclass, 'mx_table_2'::regclass) ORDER BY logicalrelid; logicalrelid | repmodel | colocationid --------------+----------+-------------- mx_table | s | 150000 mx_table_2 | s | 150000 (2 rows) SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ----------------------------- (1 row) COPY mx_table (col_1, col_2) FROM STDIN WITH (FORMAT 'csv'); INSERT INTO mx_ref_table VALUES (-37, 'morbi'); INSERT INTO mx_ref_table VALUES (-78, 'sapien'); INSERT INTO mx_ref_table VALUES (-34, 'augue'); SELECT * FROM mx_table ORDER BY col_1; col_1 | col_2 | col_3 -------+----------+------- -37 | 'lorem' | 1 80 | 'dolor' | 3 7344 | 'sit' | 4 65536 | 'ipsum' | 2 65832 | 'amet' | 5 (5 rows) -- Try commands from metadata worker \c - - - :worker_1_port CREATE TABLE mx_table_worker(col_1 text); -- master_create_distributed_table SELECT master_create_distributed_table('mx_table_worker', 'col_1', 'hash'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. -- create_distributed_table SELECT create_distributed_table('mx_table_worker', 'col_1'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. -- create_reference_table SELECT create_reference_table('mx_table_worker'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM pg_dist_partition WHERE logicalrelid='mx_table_worker'::regclass; count ------- 0 (1 row) DROP TABLE mx_table_worker; -- master_create_worker_shards CREATE TEMP TABLE pg_dist_shard_temp AS SELECT * FROM pg_dist_shard WHERE logicalrelid = 'mx_table'::regclass; DELETE FROM pg_dist_shard WHERE logicalrelid = 'mx_table'::regclass; SELECT master_create_worker_shards('mx_table', 5, 1); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='mx_table'::regclass; count ------- 0 (1 row) INSERT INTO pg_dist_shard SELECT * FROM pg_dist_shard_temp; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='mx_table'::regclass; count ------- 5 (1 row) -- INSERT/UPDATE/DELETE/COPY on reference tables SELECT * FROM mx_ref_table ORDER BY col_1; col_1 | col_2 -------+-------- -78 | sapien -37 | morbi -34 | augue (3 rows) INSERT INTO mx_ref_table (col_1, col_2) VALUES (-6, 'vestibulum'); ERROR: cannot perform distributed planning for the given modification DETAIL: Modifications to reference tables are supported only from the coordinator. UPDATE mx_ref_table SET col_2 = 'habitant' WHERE col_1 = -37; ERROR: cannot perform distributed planning for the given modification DETAIL: Modifications to reference tables are supported only from the coordinator. DELETE FROM mx_ref_table WHERE col_1 = -78; ERROR: cannot perform distributed planning for the given modification DETAIL: Modifications to reference tables are supported only from the coordinator. COPY mx_ref_table (col_1, col_2) FROM STDIN WITH (FORMAT 'csv'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT * FROM mx_ref_table ORDER BY col_1; col_1 | col_2 -------+-------- -78 | sapien -37 | morbi -34 | augue (3 rows) \c - - - :master_port DROP TABLE mx_ref_table; CREATE UNIQUE INDEX mx_test_uniq_index ON mx_table(col_1); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' \c - - - :worker_1_port -- DDL commands SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass; Column | Type | Modifiers --------+---------+---------------------------------------------------------- col_1 | integer | col_2 | text | col_3 | bigint | not null default nextval('mx_table_col_3_seq'::regclass) (3 rows) CREATE INDEX mx_test_index ON mx_table(col_2); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. ALTER TABLE mx_table ADD COLUMN col_4 int; ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. ALTER TABLE mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY(col_1) REFERENCES mx_table(col_1); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass; Column | Type | Modifiers --------+---------+---------------------------------------------------------- col_1 | integer | col_2 | text | col_3 | bigint | not null default nextval('mx_table_col_3_seq'::regclass) (3 rows) \d mx_test_index -- master_modify_multiple_shards SELECT master_modify_multiple_shards('UPDATE mx_table SET col_2=''none'''); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM mx_table WHERE col_2='none'; count ------- 0 (1 row) SELECT count(*) FROM mx_table WHERE col_2!='none'; count ------- 5 (1 row) SELECT master_modify_multiple_shards('DELETE FROM mx_table'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM mx_table; count ------- 5 (1 row) -- master_drop_all_shards SELECT master_drop_all_shards('mx_table'::regclass, 'public', 'mx_table'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_table'::regclass; count ------- 5 (1 row) -- master_apply_delete_command SELECT master_apply_delete_command('DELETE FROM mx_table'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM mx_table; count ------- 5 (1 row) -- master_add_node SELECT 1 FROM master_add_node('localhost', 5432); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(1) FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432; count ------- 0 (1 row) -- master_remove_node \c - - - :master_port DROP INDEX mx_test_uniq_index; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' SELECT 1 FROM master_add_node('localhost', 5432); ?column? ---------- 1 (1 row) \c - - - :worker_1_port SELECT master_remove_node('localhost', 5432); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(1) FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432; count ------- 1 (1 row) \c - - - :master_port SELECT master_remove_node('localhost', 5432); master_remove_node -------------------- (1 row) -- TRUNCATE \c - - - :worker_1_port TRUNCATE mx_table; ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM mx_table; count ------- 5 (1 row) -- INSERT / SELECT pulls results to worker BEGIN; SET LOCAL client_min_messages TO DEBUG; INSERT INTO mx_table_2 SELECT * FROM mx_table; DEBUG: distributed INSERT ... SELECT can only be performed from the coordinator DEBUG: Collecting INSERT ... SELECT results on coordinator END; SELECT count(*) FROM mx_table_2; count ------- 5 (1 row) -- mark_tables_colocated UPDATE pg_dist_partition SET colocationid = 0 WHERE logicalrelid='mx_table_2'::regclass; SELECT mark_tables_colocated('mx_table', ARRAY['mx_table_2']); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT colocationid FROM pg_dist_partition WHERE logicalrelid='mx_table_2'::regclass; colocationid -------------- 0 (1 row) SELECT colocationid AS old_colocation_id FROM pg_dist_partition WHERE logicalrelid='mx_table'::regclass \gset UPDATE pg_dist_partition SET colocationid = :old_colocation_id WHERE logicalrelid='mx_table_2'::regclass; -- start_metadata_sync_to_node SELECT start_metadata_sync_to_node('localhost', :worker_2_port); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; hasmetadata ------------- f (1 row) -- stop_metadata_sync_to_node \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_2_port); start_metadata_sync_to_node ----------------------------- (1 row) \c - - - :worker_1_port SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. \c - - - :master_port SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; hasmetadata ------------- t (1 row) SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); stop_metadata_sync_to_node ---------------------------- (1 row) SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; hasmetadata ------------- f (1 row) \c - - - :worker_2_port SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition; worker_drop_distributed_table ------------------------------- (2 rows) DELETE FROM pg_dist_node; \c - - - :worker_1_port -- DROP TABLE DROP TABLE mx_table; ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. CONTEXT: SQL statement "SELECT master_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name)" PL/pgSQL function citus_drop_trigger() line 21 at PERFORM SELECT count(*) FROM mx_table; count ------- 5 (1 row) -- master_drop_distributed_table_metadata SELECT master_drop_distributed_table_metadata('mx_table'::regclass, 'public', 'mx_table'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT count(*) FROM mx_table; count ------- 5 (1 row) -- master_copy_shard_placement SELECT logicalrelid, shardid AS testshardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'mx_table'::regclass AND nodeport=:worker_1_port ORDER BY shardid LIMIT 1 \gset SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset INSERT INTO pg_dist_placement (groupid, shardid, shardstate, shardlength) VALUES (:worker_2_group, :testshardid, 3, 0); SELECT master_copy_shard_placement(:testshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. SELECT shardid, nodename, nodeport, shardstate FROM pg_dist_shard_placement WHERE shardid = :testshardid ORDER BY nodeport; shardid | nodename | nodeport | shardstate ---------+-----------+----------+------------ 1270000 | localhost | 57637 | 1 1270000 | localhost | 57638 | 3 (2 rows) DELETE FROM pg_dist_placement WHERE groupid = :worker_2_group AND shardid = :testshardid; -- master_get_new_placementid SELECT master_get_new_placementid(); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. -- Show that sequences can be created and dropped on worker nodes CREATE TABLE some_table_with_sequence(a int, b BIGSERIAL, c BIGSERIAL); DROP TABLE some_table_with_sequence; CREATE SEQUENCE some_sequence; DROP SEQUENCE some_sequence; -- Show that dropping the sequence of an MX table with cascade harms the table and shards BEGIN; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass; Column | Type | Modifiers --------+---------+---------------------------------------------------------- col_1 | integer | col_2 | text | col_3 | bigint | not null default nextval('mx_table_col_3_seq'::regclass) (3 rows) DROP SEQUENCE mx_table_col_3_seq CASCADE; NOTICE: drop cascades to default for table mx_table column col_3 SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass; Column | Type | Modifiers --------+---------+----------- col_1 | integer | col_2 | text | col_3 | bigint | not null (3 rows) ROLLBACK; -- Cleanup \c - - - :master_port DROP TABLE mx_table; DROP TABLE mx_table_2; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); stop_metadata_sync_to_node ---------------------------- (1 row) \c - - - :worker_1_port DELETE FROM pg_dist_node; SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition; worker_drop_distributed_table ------------------------------- (0 rows) \c - - - :master_port ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id; RESET citus.shard_replication_factor; RESET citus.replication_model; citus-7.0.3/src/test/regress/expected/multi_upgrade_reference_table.out000066400000000000000000001136311317107136600264620ustar00rootroot00000000000000-- -- MULTI_UPGRADE_REFERENCE_TABLE -- -- Tests around upgrade_reference_table UDF -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1360000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1360000; -- test with not distributed table CREATE TABLE upgrade_reference_table_local(column1 int); SELECT upgrade_to_reference_table('upgrade_reference_table_local'); ERROR: cannot upgrade to reference table DETAIL: Relation "upgrade_reference_table_local" is not distributed. HINT: Instead, you can use; create_reference_table('upgrade_reference_table_local'); DROP TABLE upgrade_reference_table_local; -- test with table which has more than one shard SET citus.shard_count TO 4; CREATE TABLE upgrade_reference_table_multiple_shard(column1 int); SELECT create_distributed_table('upgrade_reference_table_multiple_shard', 'column1'); create_distributed_table -------------------------- (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_multiple_shard'); ERROR: cannot upgrade to reference table DETAIL: Relation "upgrade_reference_table_multiple_shard" shard count is not one. Only relations with one shard can be upgraded to reference tables. DROP TABLE upgrade_reference_table_multiple_shard; -- test with table which has no shard CREATE TABLE upgrade_reference_table_no_shard(column1 int); SELECT create_distributed_table('upgrade_reference_table_no_shard', 'column1', 'append'); create_distributed_table -------------------------- (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_no_shard'); ERROR: cannot upgrade to reference table DETAIL: Relation "upgrade_reference_table_no_shard" shard count is not one. Only relations with one shard can be upgraded to reference tables. DROP TABLE upgrade_reference_table_no_shard; -- test with table with foreign keys SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; CREATE TABLE upgrade_reference_table_referenced(column1 int PRIMARY KEY); SELECT create_distributed_table('upgrade_reference_table_referenced', 'column1'); create_distributed_table -------------------------- (1 row) CREATE TABLE upgrade_reference_table_referencing(column1 int REFERENCES upgrade_reference_table_referenced(column1)); SELECT create_distributed_table('upgrade_reference_table_referencing', 'column1'); create_distributed_table -------------------------- (1 row) -- update replication model to statement-based replication since streaming replicated tables cannot be upgraded to reference tables UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_referenced'::regclass; UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_referencing'::regclass; SELECT upgrade_to_reference_table('upgrade_reference_table_referenced'); ERROR: cannot upgrade to reference table DETAIL: Relation "upgrade_reference_table_referenced" is part of a foreign constraint. Foreign key constraints are not allowed from or to reference tables. SELECT upgrade_to_reference_table('upgrade_reference_table_referencing'); ERROR: cannot upgrade to reference table DETAIL: Relation "upgrade_reference_table_referencing" is part of a foreign constraint. Foreign key constraints are not allowed from or to reference tables. DROP TABLE upgrade_reference_table_referencing; DROP TABLE upgrade_reference_table_referenced; -- test with no healthy placements CREATE TABLE upgrade_reference_table_unhealthy(column1 int); SELECT create_distributed_table('upgrade_reference_table_unhealthy', 'column1'); create_distributed_table -------------------------- (1 row) UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_unhealthy'::regclass; UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1360006; SELECT upgrade_to_reference_table('upgrade_reference_table_unhealthy'); ERROR: could not find any healthy placement for shard 1360006 DROP TABLE upgrade_reference_table_unhealthy; -- test with table containing composite type CREATE TYPE upgrade_test_composite_type AS (key1 text, key2 text); \c - - - :worker_1_port CREATE TYPE upgrade_test_composite_type AS (key1 text, key2 text); \c - - - :master_port SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; CREATE TABLE upgrade_reference_table_composite(column1 int, column2 upgrade_test_composite_type); SELECT create_distributed_table('upgrade_reference_table_composite', 'column1'); create_distributed_table -------------------------- (1 row) UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_composite'::regclass; SELECT upgrade_to_reference_table('upgrade_reference_table_composite'); NOTICE: Replicating reference table "upgrade_reference_table_composite" to the node localhost:57638 ERROR: type "public.upgrade_test_composite_type" does not exist CONTEXT: while executing command on localhost:57638 DROP TABLE upgrade_reference_table_composite; -- test with reference table CREATE TABLE upgrade_reference_table_reference(column1 int); SELECT create_reference_table('upgrade_reference_table_reference'); create_reference_table ------------------------ (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_reference'); ERROR: cannot upgrade to reference table DETAIL: Relation "upgrade_reference_table_reference" is already a reference table DROP TABLE upgrade_reference_table_reference; -- test valid cases, append distributed table CREATE TABLE upgrade_reference_table_append(column1 int); SELECT create_distributed_table('upgrade_reference_table_append', 'column1', 'append'); create_distributed_table -------------------------- (1 row) COPY upgrade_reference_table_append FROM STDIN; -- situation before upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_append'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- a | f | 0 | c (1 row) SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_append'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ---------+---------------------+--------------------- 1360009 | f | f (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_append'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ (0 rows) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_append'::regclass); shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1360009 | 1 | 8192 | localhost | 57637 (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_append'); NOTICE: Replicating reference table "upgrade_reference_table_append" to the node localhost:57638 upgrade_to_reference_table ---------------------------- (1 row) -- situation after upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_append'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- n | t | 10005 | t (1 row) SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_append'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ---------+---------------------+--------------------- 1360009 | t | t (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_append'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 10005 | 1 | 2 | 0 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_append'::regclass) ORDER BY nodeport; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1360009 | 1 | 8192 | localhost | 57637 1360009 | 1 | 0 | localhost | 57638 (2 rows) DROP TABLE upgrade_reference_table_append; -- test valid cases, shard exists at one worker CREATE TABLE upgrade_reference_table_one_worker(column1 int); SELECT create_distributed_table('upgrade_reference_table_one_worker', 'column1'); create_distributed_table -------------------------- (1 row) UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_one_worker'::regclass; -- situation before upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- h | f | 1360000 | c (1 row) SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ---------+---------------------+--------------------- 1360010 | f | f (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1360000 | 1 | 1 | 23 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass); shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1360010 | 1 | 0 | localhost | 57637 (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_one_worker'); NOTICE: Replicating reference table "upgrade_reference_table_one_worker" to the node localhost:57638 upgrade_to_reference_table ---------------------------- (1 row) -- situation after upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- n | t | 10005 | t (1 row) SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ---------+---------------------+--------------------- 1360010 | t | t (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 10005 | 1 | 2 | 0 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass) ORDER BY nodeport; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1360010 | 1 | 0 | localhost | 57637 1360010 | 1 | 0 | localhost | 57638 (2 rows) DROP TABLE upgrade_reference_table_one_worker; -- test valid cases, shard exists at both workers but one is unhealthy SET citus.shard_replication_factor TO 2; CREATE TABLE upgrade_reference_table_one_unhealthy(column1 int); SELECT create_distributed_table('upgrade_reference_table_one_unhealthy', 'column1'); create_distributed_table -------------------------- (1 row) UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1360010 AND nodeport = :worker_1_port; -- situation before upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- h | f | 1360001 | c (1 row) SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ---------+---------------------+--------------------- 1360011 | f | f (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1360001 | 1 | 2 | 23 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass) ORDER BY nodeport; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1360011 | 1 | 0 | localhost | 57637 1360011 | 1 | 0 | localhost | 57638 (2 rows) SELECT upgrade_to_reference_table('upgrade_reference_table_one_unhealthy'); upgrade_to_reference_table ---------------------------- (1 row) -- situation after upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- n | t | 10005 | t (1 row) SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ---------+---------------------+--------------------- 1360011 | t | t (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 10005 | 1 | 2 | 0 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass) ORDER BY nodeport; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1360011 | 1 | 0 | localhost | 57637 1360011 | 1 | 0 | localhost | 57638 (2 rows) DROP TABLE upgrade_reference_table_one_unhealthy; -- test valid cases, shard exists at both workers and both are healthy CREATE TABLE upgrade_reference_table_both_healthy(column1 int); SELECT create_distributed_table('upgrade_reference_table_both_healthy', 'column1'); create_distributed_table -------------------------- (1 row) -- situation before upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- h | f | 1360002 | c (1 row) SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ---------+---------------------+--------------------- 1360012 | f | f (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1360002 | 1 | 2 | 23 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass) ORDER BY nodeport; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1360012 | 1 | 0 | localhost | 57637 1360012 | 1 | 0 | localhost | 57638 (2 rows) SELECT upgrade_to_reference_table('upgrade_reference_table_both_healthy'); upgrade_to_reference_table ---------------------------- (1 row) -- situation after upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- n | t | 10005 | t (1 row) SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ---------+---------------------+--------------------- 1360012 | t | t (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 10005 | 1 | 2 | 0 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass) ORDER BY nodeport; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1360012 | 1 | 0 | localhost | 57637 1360012 | 1 | 0 | localhost | 57638 (2 rows) DROP TABLE upgrade_reference_table_both_healthy; -- test valid cases, do it in transaction and ROLLBACK SET citus.shard_replication_factor TO 1; CREATE TABLE upgrade_reference_table_transaction_rollback(column1 int); SELECT create_distributed_table('upgrade_reference_table_transaction_rollback', 'column1'); create_distributed_table -------------------------- (1 row) UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_transaction_rollback'::regclass; -- situation before upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- h | f | 1360003 | c (1 row) SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ---------+---------------------+--------------------- 1360013 | f | f (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1360003 | 1 | 1 | 23 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass); shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1360013 | 1 | 0 | localhost | 57637 (1 row) BEGIN; SELECT upgrade_to_reference_table('upgrade_reference_table_transaction_rollback'); NOTICE: Replicating reference table "upgrade_reference_table_transaction_rollback" to the node localhost:57638 upgrade_to_reference_table ---------------------------- (1 row) ROLLBACK; -- situation after upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- h | f | 1360003 | c (1 row) SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ---------+---------------------+--------------------- 1360013 | f | f (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1360003 | 1 | 1 | 23 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass); shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1360013 | 1 | 0 | localhost | 57637 (1 row) DROP TABLE upgrade_reference_table_transaction_rollback; -- test valid cases, do it in transaction and COMMIT SET citus.shard_replication_factor TO 1; CREATE TABLE upgrade_reference_table_transaction_commit(column1 int); SELECT create_distributed_table('upgrade_reference_table_transaction_commit', 'column1'); create_distributed_table -------------------------- (1 row) UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_transaction_commit'::regclass; -- situation before upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- h | f | 1360003 | c (1 row) SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ---------+---------------------+--------------------- 1360014 | f | f (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1360003 | 1 | 1 | 23 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass); shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1360014 | 1 | 0 | localhost | 57637 (1 row) BEGIN; SELECT upgrade_to_reference_table('upgrade_reference_table_transaction_commit'); NOTICE: Replicating reference table "upgrade_reference_table_transaction_commit" to the node localhost:57638 upgrade_to_reference_table ---------------------------- (1 row) COMMIT; -- situation after upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- n | t | 10005 | t (1 row) SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ---------+---------------------+--------------------- 1360014 | t | t (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 10005 | 1 | 2 | 0 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass) ORDER BY nodeport; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1360014 | 1 | 0 | localhost | 57637 1360014 | 1 | 0 | localhost | 57638 (2 rows) -- verify that shard is replicated to other worker \c - - - :worker_2_port \dt upgrade_reference_table_transaction_commit_* List of relations Schema | Name | Type | Owner --------+----------------------------------------------------+-------+---------- public | upgrade_reference_table_transaction_commit_1360014 | table | postgres (1 row) \c - - - :master_port DROP TABLE upgrade_reference_table_transaction_commit; -- create an mx table SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE TABLE upgrade_reference_table_mx(column1 int); SELECT create_distributed_table('upgrade_reference_table_mx', 'column1'); create_distributed_table -------------------------- (1 row) -- verify that streaming replicated tables cannot be upgraded to reference tables SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- h | f | 1360004 | s (1 row) SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ---------+---------------------+--------------------- 1360015 | f | f (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1360004 | 1 | 1 | 23 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass) ORDER BY nodeport; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1360015 | 1 | 0 | localhost | 57637 (1 row) SELECT upgrade_to_reference_table('upgrade_reference_table_mx'); ERROR: cannot upgrade to reference table DETAIL: Upgrade is only supported for statement-based replicated tables but "upgrade_reference_table_mx" is streaming replicated -- situation after upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- h | f | 1360004 | s (1 row) SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ---------+---------------------+--------------------- 1360015 | f | f (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1360004 | 1 | 1 | 23 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass) ORDER BY nodeport; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1360015 | 1 | 0 | localhost | 57637 (1 row) DROP TABLE upgrade_reference_table_mx; -- test valid cases, do it with MX SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 2; RESET citus.replication_model; CREATE TABLE upgrade_reference_table_mx(column1 int); SELECT create_distributed_table('upgrade_reference_table_mx', 'column1'); create_distributed_table -------------------------- (1 row) UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_2_port AND shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='upgrade_reference_table_mx'::regclass); SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node ----------------------------- (1 row) -- situation before upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- h | f | 1360005 | c (1 row) SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ---------+---------------------+--------------------- 1360016 | f | f (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 1360005 | 1 | 2 | 23 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass) ORDER BY nodeport; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1360016 | 1 | 0 | localhost | 57637 1360016 | 3 | 0 | localhost | 57638 (2 rows) SELECT upgrade_to_reference_table('upgrade_reference_table_mx'); NOTICE: Replicating reference table "upgrade_reference_table_mx" to the node localhost:57638 upgrade_to_reference_table ---------------------------- (1 row) -- situation after upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- n | t | 10005 | t (1 row) SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ---------+---------------------+--------------------- 1360016 | t | t (1 row) SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); colocationid | shardcount | replicationfactor | distributioncolumntype --------------+------------+-------------------+------------------------ 10005 | 1 | 2 | 0 (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass) ORDER BY nodeport; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1360016 | 1 | 0 | localhost | 57637 1360016 | 1 | 0 | localhost | 57638 (2 rows) -- situation on metadata worker \c - - - :worker_1_port SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; partmethod | partkeyisnull | colocationid | repmodel ------------+---------------+--------------+---------- n | t | 10005 | t (1 row) SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; shardid | shardminvalueisnull | shardmaxvalueisnull ---------+---------------------+--------------------- 1360016 | t | t (1 row) SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass) ORDER BY nodeport; shardid | shardstate | shardlength | nodename | nodeport ---------+------------+-------------+-----------+---------- 1360016 | 1 | 0 | localhost | 57637 1360016 | 1 | 0 | localhost | 57638 (2 rows) \c - - - :master_port DROP TABLE upgrade_reference_table_mx; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); stop_metadata_sync_to_node ---------------------------- (1 row) citus-7.0.3/src/test/regress/expected/multi_upsert.out000066400000000000000000000255011317107136600231660ustar00rootroot00000000000000-- this test file aims to test UPSERT feature on Citus ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 980000; CREATE TABLE upsert_test ( part_key int UNIQUE, other_col int, third_col int ); -- distribute the table and create shards SELECT master_create_distributed_table('upsert_test', 'part_key', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('upsert_test', '4', '2'); master_create_worker_shards ----------------------------- (1 row) -- do a regular insert INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1), (2, 2); -- observe that there is a conflict and the following query does nothing INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT DO NOTHING; -- same as the above with different syntax INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO NOTHING; --again the same query with another syntax INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT upsert_test_part_key_key DO NOTHING; -- now, update the columns INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET other_col = 2, third_col = 4; -- see the results SELECT * FROM upsert_test; part_key | other_col | third_col ----------+-----------+----------- 1 | 2 | 4 2 | 2 | (2 rows) -- do a multi-row DO NOTHING insert INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1), (2, 2) ON CONFLICT DO NOTHING; -- do a multi-row DO UPDATE insert INSERT INTO upsert_test (part_key, other_col) VALUES (1, 10), (2, 20), (3, 30) ON CONFLICT (part_key) DO UPDATE SET other_col = EXCLUDED.other_col WHERE upsert_test.part_key != 1; -- see the results SELECT * FROM upsert_test ORDER BY part_key ASC; part_key | other_col | third_col ----------+-----------+----------- 1 | 2 | 4 2 | 20 | 3 | 30 | (3 rows) DELETE FROM upsert_test WHERE part_key = 2; DELETE FROM upsert_test WHERE part_key = 3; -- use a WHERE clause, so that SET doesn't have an affect INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET other_col = 30 WHERE upsert_test.other_col = 3; -- see the results SELECT * FROM upsert_test; part_key | other_col | third_col ----------+-----------+----------- 1 | 2 | 4 (1 row) -- use a WHERE clause, that hits the row and updates it INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET other_col = 30 WHERE upsert_test.other_col = 2; -- see the results SELECT * FROM upsert_test; part_key | other_col | third_col ----------+-----------+----------- 1 | 30 | 4 (1 row) -- use two elements in the WHERE, that doesn't hit the row and updates it INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET other_col = 30 WHERE upsert_test.other_col = 2 AND upsert_test.other_col = 3; -- use EXCLUDED keyword INSERT INTO upsert_test (part_key, other_col, third_col) VALUES (1, 1, 100) ON CONFLICT (part_key) DO UPDATE SET other_col = EXCLUDED.third_col; -- see the results SELECT * FROM upsert_test; part_key | other_col | third_col ----------+-----------+----------- 1 | 100 | 4 (1 row) -- now update multiple columns with ALIAS table and reference to the row itself INSERT INTO upsert_test as ups_test (part_key) VALUES (1) ON CONFLICT (part_key) DO UPDATE SET other_col = ups_test.other_col + 50, third_col = 200; -- see the results SELECT * FROM upsert_test; part_key | other_col | third_col ----------+-----------+----------- 1 | 150 | 200 (1 row) -- now, do some more complex assignments INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET other_col = upsert_test.other_col + 1, third_col = upsert_test.third_col + (EXCLUDED.part_key + EXCLUDED.other_col) + 670; -- see the results SELECT * FROM upsert_test; part_key | other_col | third_col ----------+-----------+----------- 1 | 151 | 872 (1 row) -- now, WHERE clause also has table reference INSERT INTO upsert_test as ups_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET other_col = (ups_test.other_col + ups_test.third_col + (EXCLUDED.part_key + EXCLUDED.other_col)) % 15 WHERE ups_test.third_col < 1000 + ups_test.other_col; -- see the results SELECT * FROM upsert_test; part_key | other_col | third_col ----------+-----------+----------- 1 | 5 | 872 (1 row) -- Test upsert, with returning: INSERT INTO upsert_test (part_key, other_col) VALUES (2, 2) ON CONFLICT (part_key) DO UPDATE SET other_col = 3 RETURNING *; part_key | other_col | third_col ----------+-----------+----------- 2 | 2 | (1 row) INSERT INTO upsert_test (part_key, other_col) VALUES (2, 2) ON CONFLICT (part_key) DO UPDATE SET other_col = 3 RETURNING *; part_key | other_col | third_col ----------+-----------+----------- 2 | 3 | (1 row) -- create another table CREATE TABLE upsert_test_2 ( part_key int, other_col int, third_col int, PRIMARY KEY (part_key, other_col) ); -- distribute the table and create shards SELECT master_create_distributed_table('upsert_test_2', 'part_key', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('upsert_test_2', '4', '2'); master_create_worker_shards ----------------------------- (1 row) -- now show that Citus works with multiple columns as the PRIMARY KEY, including the partiton key INSERT INTO upsert_test_2 (part_key, other_col) VALUES (1, 1); INSERT INTO upsert_test_2 (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key, other_col) DO NOTHING; -- this errors out since there is no unique constraint on partition key INSERT INTO upsert_test_2 (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO NOTHING; ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification -- create another table CREATE TABLE upsert_test_3 ( part_key int, count int ); -- note that this is not a unique index CREATE INDEX idx_ups_test ON upsert_test_3(part_key); -- distribute the table and create shards SELECT master_create_distributed_table('upsert_test_3', 'part_key', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('upsert_test_3', '4', '2'); master_create_worker_shards ----------------------------- (1 row) -- since there are no unique indexes, error-out INSERT INTO upsert_test_3 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_3.count + 1; ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification -- create another table CREATE TABLE upsert_test_4 ( part_key int UNIQUE, count int ); -- distribute the table and create shards SELECT master_create_distributed_table('upsert_test_4', 'part_key', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('upsert_test_4', '4', '2'); master_create_worker_shards ----------------------------- (1 row) -- a single row insert INSERT INTO upsert_test_4 VALUES (1, 0); -- show a simple count example use case INSERT INTO upsert_test_4 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_4.count + 1; INSERT INTO upsert_test_4 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_4.count + 1; INSERT INTO upsert_test_4 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_4.count + 1; INSERT INTO upsert_test_4 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_4.count + 1; INSERT INTO upsert_test_4 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_4.count + 1; INSERT INTO upsert_test_4 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_4.count + 1; -- now see the results SELECT * FROM upsert_test_4; part_key | count ----------+------- 1 | 6 (1 row) -- now test dropped columns CREATE TABLE dropcol_distributed(key int primary key, drop1 int, keep1 text, drop2 numeric, keep2 float); SELECT master_create_distributed_table('dropcol_distributed', 'key', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('dropcol_distributed', 4, 1); master_create_worker_shards ----------------------------- (1 row) INSERT INTO dropcol_distributed AS dropcol (key, keep1, keep2) VALUES (1, '5', 5) ON CONFLICT(key) DO UPDATE SET keep1 = dropcol.keep1; ALTER TABLE dropcol_distributed DROP COLUMN drop2; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' INSERT INTO dropcol_distributed (key, keep1, keep2) VALUES (1, '5', 5) ON CONFLICT(key) DO UPDATE SET keep1 = dropcol_distributed.keep1; ALTER TABLE dropcol_distributed DROP COLUMN keep2; INSERT INTO dropcol_distributed AS dropcol (key, keep1) VALUES (1, '5') ON CONFLICT(key) DO UPDATE SET keep1 = dropcol.keep1; ALTER TABLE dropcol_distributed DROP COLUMN drop1; INSERT INTO dropcol_distributed AS dropcol (key, keep1) VALUES (1, '5') ON CONFLICT(key) DO UPDATE SET keep1 = dropcol.keep1; -- below we test the cases that Citus does not support -- subquery in the SET clause INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET other_col = (SELECT count(*) from upsert_test); ERROR: cannot perform distributed planning for the given modifications DETAIL: Subqueries are not supported in distributed modifications. -- non mutable function call in the SET INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET other_col = random()::int; ERROR: functions used in the DO UPDATE SET clause of INSERTs on distributed tables must be marked IMMUTABLE -- non mutable function call in the WHERE INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET other_col = 5 WHERE upsert_test.other_col = random()::int; ERROR: functions used in the WHERE clause of the ON CONFLICT clause of INSERTs on distributed tables must be marked IMMUTABLE -- non mutable function call in the arbiter WHERE INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) WHERE part_key = random()::int DO UPDATE SET other_col = 5; ERROR: functions used in the WHERE clause of the ON CONFLICT clause of INSERTs on distributed tables must be marked IMMUTABLE -- error out on attempt to update the partition key INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET part_key = 15; ERROR: modifying the partition value of rows is not allowed citus-7.0.3/src/test/regress/expected/multi_utilities.out000066400000000000000000000250371317107136600236630ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 990000; -- =================================================================== -- test utility statement functionality -- =================================================================== CREATE TABLE sharded_table ( name text, id bigint ); SELECT master_create_distributed_table('sharded_table', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('sharded_table', 2, 1); master_create_worker_shards ----------------------------- (1 row) -- COPY out is supported with distributed tables COPY sharded_table TO STDOUT; COPY (SELECT COUNT(*) FROM sharded_table) TO STDOUT; 0 BEGIN; SET TRANSACTION READ ONLY; COPY sharded_table TO STDOUT; COPY (SELECT COUNT(*) FROM sharded_table) TO STDOUT; 0 COMMIT; -- cursors may not involve distributed tables DECLARE all_sharded_rows CURSOR FOR SELECT * FROM sharded_table; ERROR: DECLARE CURSOR can only be used in transaction blocks -- verify PREPARE functionality PREPARE sharded_insert AS INSERT INTO sharded_table VALUES ('adam', 1); PREPARE sharded_update AS UPDATE sharded_table SET name = 'bob' WHERE id = 1; PREPARE sharded_delete AS DELETE FROM sharded_table WHERE id = 1; PREPARE sharded_query AS SELECT name FROM sharded_table WHERE id = 1; EXECUTE sharded_query; name ------ (0 rows) EXECUTE sharded_insert; EXECUTE sharded_query; name ------ adam (1 row) EXECUTE sharded_update; EXECUTE sharded_query; name ------ bob (1 row) EXECUTE sharded_delete; EXECUTE sharded_query; name ------ (0 rows) -- try to drop shards with where clause SELECT master_apply_delete_command('DELETE FROM sharded_table WHERE id > 0'); ERROR: cannot delete from hash distributed table with this command DETAIL: Delete statements on hash-partitioned tables are not supported with master_apply_delete_command. HINT: Use master_modify_multiple_shards command instead. -- drop all shards SELECT master_apply_delete_command('DELETE FROM sharded_table'); ERROR: cannot delete from hash distributed table with this command DETAIL: Delete statements on hash-partitioned tables are not supported with master_apply_delete_command. HINT: Use master_modify_multiple_shards command instead. -- lock shard metadata: take some share locks and exclusive locks BEGIN; SELECT lock_shard_metadata(5, ARRAY[999001, 999002, 999002]); lock_shard_metadata --------------------- (1 row) SELECT lock_shard_metadata(7, ARRAY[999001, 999003, 999004]); lock_shard_metadata --------------------- (1 row) SELECT locktype, objid, mode, granted FROM pg_locks WHERE objid IN (999001, 999002, 999003, 999004) ORDER BY objid, mode; locktype | objid | mode | granted ----------+--------+---------------+--------- advisory | 999001 | ExclusiveLock | t advisory | 999001 | ShareLock | t advisory | 999002 | ShareLock | t advisory | 999003 | ExclusiveLock | t advisory | 999004 | ExclusiveLock | t (5 rows) END; -- lock shard metadata: unsupported lock type SELECT lock_shard_metadata(0, ARRAY[990001, 999002]); ERROR: unsupported lockmode 0 -- lock shard metadata: invalid shard ID SELECT lock_shard_metadata(5, ARRAY[0]); lock_shard_metadata --------------------- (1 row) -- lock shard metadata: lock nothing SELECT lock_shard_metadata(5, ARRAY[]::bigint[]); ERROR: no locks specified -- lock shard resources: take some share locks and exclusive locks BEGIN; SELECT lock_shard_resources(5, ARRAY[999001, 999002, 999002]); lock_shard_resources ---------------------- (1 row) SELECT lock_shard_resources(7, ARRAY[999001, 999003, 999004]); lock_shard_resources ---------------------- (1 row) SELECT locktype, objid, mode, granted FROM pg_locks WHERE objid IN (999001, 999002, 999003, 999004) ORDER BY objid, mode; locktype | objid | mode | granted ----------+--------+---------------+--------- advisory | 999001 | ExclusiveLock | t advisory | 999001 | ShareLock | t advisory | 999002 | ShareLock | t advisory | 999003 | ExclusiveLock | t advisory | 999004 | ExclusiveLock | t (5 rows) END; -- lock shard metadata: unsupported lock type SELECT lock_shard_resources(0, ARRAY[990001, 999002]); ERROR: unsupported lockmode 0 -- lock shard metadata: invalid shard ID SELECT lock_shard_resources(5, ARRAY[-1]); lock_shard_resources ---------------------- (1 row) -- lock shard metadata: lock nothing SELECT lock_shard_resources(5, ARRAY[]::bigint[]); ERROR: no locks specified -- drop table DROP TABLE sharded_table; -- VACUUM tests -- create a table with a single shard (for convenience) CREATE TABLE dustbunnies (id integer, name text, age integer); SELECT master_create_distributed_table('dustbunnies', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('dustbunnies', 1, 2); master_create_worker_shards ----------------------------- (1 row) -- add some data to the distributed table \copy dustbunnies (id, name) from stdin with csv -- following approach adapted from PostgreSQL's stats.sql file -- save relevant stat counter values in refreshable view \c - - - :worker_1_port CREATE MATERIALIZED VIEW prevcounts AS SELECT analyze_count, vacuum_count FROM pg_stat_user_tables WHERE relname='dustbunnies_990002'; -- create function that sleeps until those counters increment create function wait_for_stats() returns void as $$ declare start_time timestamptz := clock_timestamp(); analyze_updated bool; vacuum_updated bool; begin -- we don't want to wait forever; loop will exit after 10 seconds for i in 1 .. 100 loop -- check to see if analyze has been updated SELECT (st.analyze_count >= pc.analyze_count + 1) INTO analyze_updated FROM pg_stat_user_tables AS st, pg_class AS cl, prevcounts AS pc WHERE st.relname='dustbunnies_990002' AND cl.relname='dustbunnies_990002'; -- check to see if vacuum has been updated SELECT (st.vacuum_count >= pc.vacuum_count + 1) INTO vacuum_updated FROM pg_stat_user_tables AS st, pg_class AS cl, prevcounts AS pc WHERE st.relname='dustbunnies_990002' AND cl.relname='dustbunnies_990002'; exit when analyze_updated or vacuum_updated; -- wait a little perform pg_sleep(0.1); -- reset stats snapshot so we can test again perform pg_stat_clear_snapshot(); end loop; -- report time waited in postmaster log (where it won't change test output) raise log 'wait_for_stats delayed % seconds', extract(epoch from clock_timestamp() - start_time); end $$ language plpgsql; -- run VACUUM and ANALYZE against the table on the master \c - - - :master_port VACUUM dustbunnies; ANALYZE dustbunnies; -- verify that the VACUUM and ANALYZE ran \c - - - :worker_1_port SELECT wait_for_stats(); wait_for_stats ---------------- (1 row) REFRESH MATERIALIZED VIEW prevcounts; SELECT pg_stat_get_vacuum_count('dustbunnies_990002'::regclass); pg_stat_get_vacuum_count -------------------------- 1 (1 row) SELECT pg_stat_get_analyze_count('dustbunnies_990002'::regclass); pg_stat_get_analyze_count --------------------------- 1 (1 row) -- get file node to verify VACUUM FULL SELECT relfilenode AS oldnode FROM pg_class WHERE oid='dustbunnies_990002'::regclass \gset -- send a VACUUM FULL and a VACUUM ANALYZE \c - - - :master_port VACUUM (FULL) dustbunnies; VACUUM ANALYZE dustbunnies; -- verify that relfilenode changed \c - - - :worker_1_port SELECT relfilenode != :oldnode AS table_rewritten FROM pg_class WHERE oid='dustbunnies_990002'::regclass; table_rewritten ----------------- t (1 row) -- verify the VACUUM ANALYZE incremented both vacuum and analyze counts SELECT wait_for_stats(); wait_for_stats ---------------- (1 row) SELECT pg_stat_get_vacuum_count('dustbunnies_990002'::regclass); pg_stat_get_vacuum_count -------------------------- 2 (1 row) SELECT pg_stat_get_analyze_count('dustbunnies_990002'::regclass); pg_stat_get_analyze_count --------------------------- 2 (1 row) -- disable auto-VACUUM for next test ALTER TABLE dustbunnies_990002 SET (autovacuum_enabled = false); SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid='dustbunnies_990002'::regclass \gset -- send a VACUUM FREEZE after adding a new row \c - - - :master_port INSERT INTO dustbunnies VALUES (5, 'peter'); VACUUM (FREEZE) dustbunnies; -- verify that relfrozenxid increased \c - - - :worker_1_port SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class WHERE oid='dustbunnies_990002'::regclass; frozen_performed ------------------ t (1 row) -- check there are no nulls in either column SELECT attname, null_frac FROM pg_stats WHERE tablename = 'dustbunnies_990002' ORDER BY attname; attname | null_frac ---------+----------- age | 1 id | 0 name | 0 (3 rows) -- add NULL values, then perform column-specific ANALYZE \c - - - :master_port INSERT INTO dustbunnies VALUES (6, NULL, NULL); ANALYZE dustbunnies (name); -- verify that name's NULL ratio is updated but age's is not \c - - - :worker_1_port SELECT attname, null_frac FROM pg_stats WHERE tablename = 'dustbunnies_990002' ORDER BY attname; attname | null_frac ---------+----------- age | 1 id | 0 name | 0.166667 (3 rows) \c - - - :master_port -- verify warning for unqualified VACUUM VACUUM; WARNING: not propagating VACUUM command to worker nodes HINT: Provide a specific table in order to VACUUM distributed tables. -- and warning when using targeted VACUUM without DDL propagation SET citus.enable_ddl_propagation to false; VACUUM dustbunnies; WARNING: not propagating VACUUM command to worker nodes HINT: Set citus.enable_ddl_propagation to true in order to send targeted VACUUM commands to worker nodes. SET citus.enable_ddl_propagation to DEFAULT; -- test worker_hash SELECT worker_hash(123); worker_hash ------------- -205084363 (1 row) SELECT worker_hash('1997-08-08'::date); worker_hash ------------- -499701663 (1 row) -- test a custom type (this test should run after multi_data_types) SELECT worker_hash('(1, 2)'); ERROR: cannot find a hash function for the input type HINT: Cast input to a data type with a hash function. SELECT worker_hash('(1, 2)'::test_composite_type); worker_hash ------------- -1895345704 (1 row) SELECT citus_truncate_trigger(); ERROR: must be called as trigger -- confirm that citus_create_restore_point works SELECT 1 FROM citus_create_restore_point('regression-test'); ?column? ---------- 1 (1 row) -- TODO: support VERBOSE -- VACUUM VERBOSE dustbunnies; -- VACUUM (FULL, VERBOSE) dustbunnies; -- ANALYZE VERBOSE dustbunnies; citus-7.0.3/src/test/regress/expected/multi_utility_statements.out000066400000000000000000000263121317107136600256170ustar00rootroot00000000000000-- -- MULTI_UTILITY_STATEMENTS -- -- Check that we can run utility statements with embedded SELECT statements on -- distributed tables. Currently we only support CREATE TABLE AS (SELECT..), -- DECLARE CURSOR, and COPY ... TO statements. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1000000; CREATE TEMP TABLE lineitem_pricing_summary AS ( SELECT l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order FROM lineitem WHERE l_shipdate <= date '1998-12-01' - interval '90 days' GROUP BY l_returnflag, l_linestatus ORDER BY l_returnflag, l_linestatus ); SELECT * FROM lineitem_pricing_summary ORDER BY l_returnflag, l_linestatus; l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order --------------+--------------+-----------+----------------+----------------+------------------+---------------------+--------------------+------------------------+------------- A | F | 75465.00 | 113619873.63 | 107841287.0728 | 112171153.245923 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 2944 N | F | 2022.00 | 3102551.45 | 2952540.7118 | 3072642.770652 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 76 N | O | 149778.00 | 224706948.16 | 213634857.6854 | 222134071.929801 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 5883 R | F | 73156.00 | 108937979.73 | 103516623.6698 | 107743533.784328 | 25.2175112030334367 | 37551.871675284385 | 0.04983798690106859704 | 2901 (4 rows) -- Test we can handle joins SET citus.large_table_shard_count TO 2; CREATE TABLE shipping_priority AS ( SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority FROM customer, orders, lineitem WHERE c_mktsegment = 'BUILDING' AND c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate < date '1995-03-15' AND l_shipdate > date '1995-03-15' GROUP BY l_orderkey, o_orderdate, o_shippriority ORDER BY revenue DESC, o_orderdate ); SELECT * FROM shipping_priority; l_orderkey | revenue | o_orderdate | o_shippriority ------------+-------------+-------------+---------------- 1637 | 268170.6408 | 02-08-1995 | 0 9696 | 252014.5497 | 02-20-1995 | 0 10916 | 242749.1996 | 03-11-1995 | 0 450 | 221012.3165 | 03-05-1995 | 0 5347 | 198353.7942 | 02-22-1995 | 0 10691 | 112800.1020 | 03-14-1995 | 0 386 | 104975.2484 | 01-25-1995 | 0 5765 | 88222.7556 | 12-15-1994 | 0 4707 | 88143.7774 | 02-27-1995 | 0 5312 | 83750.7028 | 02-24-1995 | 0 5728 | 70101.6400 | 12-11-1994 | 0 577 | 57986.6224 | 12-19-1994 | 0 12706 | 16636.6368 | 11-21-1994 | 0 3844 | 8851.3200 | 12-29-1994 | 0 11073 | 7433.6295 | 12-02-1994 | 0 13924 | 3111.4970 | 12-20-1994 | 0 (16 rows) DROP TABLE shipping_priority; -- Check COPY against distributed tables works both when specifying a -- query as the source, and when directly naming a table. COPY ( SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority FROM customer, orders, lineitem WHERE c_mktsegment = 'BUILDING' AND c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate < date '1995-03-15' AND l_shipdate > date '1995-03-15' GROUP BY l_orderkey, o_orderdate, o_shippriority ORDER BY revenue DESC, o_orderdate ) TO stdout; 1637 268170.6408 02-08-1995 0 9696 252014.5497 02-20-1995 0 10916 242749.1996 03-11-1995 0 450 221012.3165 03-05-1995 0 5347 198353.7942 02-22-1995 0 10691 112800.1020 03-14-1995 0 386 104975.2484 01-25-1995 0 5765 88222.7556 12-15-1994 0 4707 88143.7774 02-27-1995 0 5312 83750.7028 02-24-1995 0 5728 70101.6400 12-11-1994 0 577 57986.6224 12-19-1994 0 12706 16636.6368 11-21-1994 0 3844 8851.3200 12-29-1994 0 11073 7433.6295 12-02-1994 0 13924 3111.4970 12-20-1994 0 -- check copying to file -- (quiet off to force number of copied records to be displayed) \set QUIET off COPY nation TO '/dev/null'; COPY 25 \set QUIET on -- stdout COPY nation TO STDOUT; 0 ALGERIA 0 haggle. carefully final deposits detect slyly agai 1 ARGENTINA 1 al foxes promise slyly according to the regular accounts. bold requests alon 2 BRAZIL 1 y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3 CANADA 1 eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4 EGYPT 4 y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5 ETHIOPIA 0 ven packages wake quickly. regu 6 FRANCE 3 refully final requests. regular, ironi 7 GERMANY 3 l platelets. regular accounts x-ray: unusual, regular acco 8 INDIA 2 ss excuses cajole slyly across the packages. deposits print aroun 9 INDONESIA 2 slyly express asymptotes. regular deposits haggle slyly. carefully ironic hockey players sleep blithely. carefull 10 IRAN 4 efully alongside of the slyly final dependencies. 11 IRAQ 4 nic deposits boost atop the quickly final requests? quickly regula 12 JAPAN 2 ously. final, express gifts cajole a 13 JORDAN 4 ic deposits are blithely about the carefully regular pa 14 KENYA 0 pending excuses haggle furiously deposits. pending, express pinto beans wake fluffily past t 15 MOROCCO 0 rns. blithely bold courts among the closely regular packages use furiously bold platelets? 16 MOZAMBIQUE 0 s. ironic, unusual asymptotes wake blithely r 17 PERU 1 platelets. blithely pending dependencies use fluffily across the even pinto beans. carefully silent accoun 18 CHINA 2 c dependencies. furiously express notornis sleep slyly regular accounts. ideas sleep. depos 19 ROMANIA 3 ular asymptotes are about the furious multipliers. express dependencies nag above the ironically ironic account 20 SAUDI ARABIA 4 ts. silent requests haggle. closely express packages sleep across the blithely 21 VIETNAM 2 hely enticingly express accounts. even, final 22 RUSSIA 3 requests against the platelets use never according to the quickly regular pint 23 UNITED KINGDOM 3 eans boost carefully special requests. accounts are. carefull 24 UNITED STATES 1 y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be -- ensure individual cols can be copied out, too COPY nation(n_name) TO STDOUT; ALGERIA ARGENTINA BRAZIL CANADA EGYPT ETHIOPIA FRANCE GERMANY INDIA INDONESIA IRAN IRAQ JAPAN JORDAN KENYA MOROCCO MOZAMBIQUE PERU CHINA ROMANIA SAUDI ARABIA VIETNAM RUSSIA UNITED KINGDOM UNITED STATES -- Test that we can create on-commit drop tables, and also test creating with -- oids, along with changing column names BEGIN; CREATE TEMP TABLE customer_few (customer_key) WITH (OIDS) ON COMMIT DROP AS (SELECT * FROM customer WHERE c_nationkey = 1 ORDER BY c_custkey LIMIT 10); SELECT customer_key, c_name, c_address FROM customer_few ORDER BY customer_key LIMIT 5; customer_key | c_name | c_address --------------+--------------------+----------------------------------------- 3 | Customer#000000003 | MG9kdTD2WBHm 14 | Customer#000000014 | KXkletMlL2JQEA 30 | Customer#000000030 | nJDsELGAavU63Jl0c5NKsKfL8rIJQQkQnYL2QJY 59 | Customer#000000059 | zLOCP0wh92OtBihgspOGl4 106 | Customer#000000106 | xGCOEAUjUNG (5 rows) COMMIT; SELECT customer_key, c_name, c_address FROM customer_few ORDER BY customer_key LIMIT 5; ERROR: relation "customer_few" does not exist LINE 2: FROM customer_few ORDER BY customer_key LIMIT 5; ^ -- Test DECLARE CURSOR statements DECLARE holdCursor SCROLL CURSOR WITH HOLD FOR SELECT l_orderkey, l_linenumber, l_quantity, l_discount FROM lineitem ORDER BY l_orderkey, l_linenumber; FETCH NEXT FROM holdCursor; l_orderkey | l_linenumber | l_quantity | l_discount ------------+--------------+------------+------------ 1 | 1 | 17.00 | 0.04 (1 row) FETCH FORWARD 5 FROM holdCursor; l_orderkey | l_linenumber | l_quantity | l_discount ------------+--------------+------------+------------ 1 | 2 | 36.00 | 0.09 1 | 3 | 8.00 | 0.10 1 | 4 | 28.00 | 0.09 1 | 5 | 24.00 | 0.10 1 | 6 | 32.00 | 0.07 (5 rows) FETCH LAST FROM holdCursor; l_orderkey | l_linenumber | l_quantity | l_discount ------------+--------------+------------+------------ 14947 | 2 | 29.00 | 0.04 (1 row) FETCH BACKWARD 5 FROM holdCursor; l_orderkey | l_linenumber | l_quantity | l_discount ------------+--------------+------------+------------ 14947 | 1 | 14.00 | 0.09 14946 | 2 | 37.00 | 0.01 14946 | 1 | 38.00 | 0.00 14945 | 6 | 37.00 | 0.05 14945 | 5 | 44.00 | 0.08 (5 rows) -- Test WITHOUT HOLD cursors inside transactions BEGIN; DECLARE noHoldCursor SCROLL CURSOR FOR SELECT l_orderkey, l_linenumber, l_quantity, l_discount FROM lineitem ORDER BY l_orderkey, l_linenumber; FETCH ABSOLUTE 5 FROM noHoldCursor; l_orderkey | l_linenumber | l_quantity | l_discount ------------+--------------+------------+------------ 1 | 5 | 24.00 | 0.10 (1 row) FETCH BACKWARD noHoldCursor; l_orderkey | l_linenumber | l_quantity | l_discount ------------+--------------+------------+------------ 1 | 4 | 28.00 | 0.09 (1 row) COMMIT; FETCH ABSOLUTE 5 FROM noHoldCursor; ERROR: cursor "noholdcursor" does not exist citus-7.0.3/src/test/regress/expected/multi_utility_warnings.out000066400000000000000000000014571317107136600252630ustar00rootroot00000000000000-- -- MULTI_UTILITY_WARNINGS -- -- Tests to check if we inform the user about potential caveats of creating new -- databases, schemas, and roles. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1010000; CREATE DATABASE new_database; NOTICE: Citus partially supports CREATE DATABASE for distributed databases DETAIL: Citus does not propagate CREATE DATABASE command to workers HINT: You can manually create a database and its extensions on workers. CREATE ROLE new_role; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. CREATE USER new_user; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. citus-7.0.3/src/test/regress/expected/multi_view.out000066400000000000000000000773031317107136600226250ustar00rootroot00000000000000-- -- MULTI_VIEW -- -- This file contains test cases for view support. It verifies various -- Citus features: simple selects, aggregates, joins, outer joins -- router queries, single row inserts, multi row inserts via insert -- into select, multi row insert via copy commands. SELECT count(*) FROM lineitem_hash_part; count ------- 12000 (1 row) SELECT count(*) FROM orders_hash_part; count ------- 2984 (1 row) -- create a view for priority orders CREATE VIEW priority_orders AS SELECT * FROM orders_hash_part WHERE o_orderpriority < '3-MEDIUM'; -- aggregate pushdown SELECT o_orderpriority, count(*) FROM priority_orders GROUP BY 1 ORDER BY 2, 1; o_orderpriority | count -----------------+------- 2-HIGH | 593 1-URGENT | 603 (2 rows) SELECT o_orderpriority, count(*) FROM orders_hash_part WHERE o_orderpriority < '3-MEDIUM' GROUP BY 1 ORDER BY 2,1; o_orderpriority | count -----------------+------- 2-HIGH | 593 1-URGENT | 603 (2 rows) -- filters SELECT o_orderpriority, count(*) as all, count(*) FILTER (WHERE o_orderstatus ='F') as fullfilled FROM priority_orders GROUP BY 1 ORDER BY 2, 1; o_orderpriority | all | fullfilled -----------------+-----+------------ 2-HIGH | 593 | 271 1-URGENT | 603 | 280 (2 rows) -- having SELECT o_orderdate, count(*) from priority_orders group by 1 having (count(*) > 3) order by 2 desc, 1 desc; o_orderdate | count -------------+------- 08-20-1996 | 5 10-10-1994 | 4 05-05-1994 | 4 04-07-1994 | 4 03-17-1993 | 4 (5 rows) -- having with filters SELECT o_orderdate, count(*) as all, count(*) FILTER(WHERE o_orderstatus = 'F') from priority_orders group by 1 having (count(*) > 3) order by 2 desc, 1 desc; o_orderdate | all | count -------------+-----+------- 08-20-1996 | 5 | 0 10-10-1994 | 4 | 4 05-05-1994 | 4 | 4 04-07-1994 | 4 | 4 03-17-1993 | 4 | 4 (5 rows) -- limit SELECT o_orderkey, o_totalprice from orders_hash_part order by 2 desc, 1 asc limit 5 ; o_orderkey | o_totalprice ------------+-------------- 4421 | 401055.62 10209 | 400191.77 11142 | 395039.05 14179 | 384265.43 11296 | 378166.33 (5 rows) SELECT o_orderkey, o_totalprice from priority_orders order by 2 desc, 1 asc limit 1 ; o_orderkey | o_totalprice ------------+-------------- 14179 | 384265.43 (1 row) CREATE VIEW priority_lineitem AS SELECT li.* FROM lineitem_hash_part li JOIN priority_orders ON (l_orderkey = o_orderkey); SELECT l_orderkey, count(*) FROM priority_lineitem GROUP BY 1 ORDER BY 2 DESC, 1 LIMIT 5; l_orderkey | count ------------+------- 7 | 7 225 | 7 226 | 7 322 | 7 326 | 7 (5 rows) CREATE VIEW air_shipped_lineitems AS SELECT * FROM lineitem_hash_part WHERE l_shipmode = 'AIR'; -- join between view and table SELECT count(*) FROM orders_hash_part join air_shipped_lineitems ON (o_orderkey = l_orderkey); count ------- 1706 (1 row) -- join between views SELECT count(*) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey); count ------- 700 (1 row) -- count distinct on partition column is not supported SELECT count(distinct o_orderkey) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey); ERROR: cannot compute aggregate (distinct) DETAIL: table partitioning is unsuitable for aggregate (distinct) HINT: You can load the hll extension from contrib packages and enable distinct approximations. -- count distinct on partition column is supported on router queries SELECT count(distinct o_orderkey) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey) WHERE (o_orderkey = 231); count ------- 1 (1 row) -- select distinct on router joins of views also works SELECT distinct(o_orderkey) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey) WHERE (o_orderkey = 231); o_orderkey ------------ 231 (1 row) -- left join support depends on flattening of the query -- following query fails since the inner part is kept as subquery SELECT * FROM priority_orders left join air_shipped_lineitems ON (o_orderkey = l_orderkey); ERROR: cannot perform distributed planning on this query DETAIL: Subqueries in outer joins are not supported -- however, this works SELECT count(*) FROM priority_orders left join lineitem_hash_part ON (o_orderkey = l_orderkey) WHERE l_shipmode ='AIR'; count ------- 700 (1 row) -- view at the inner side of is not supported SELECT count(*) FROM priority_orders right join lineitem_hash_part ON (o_orderkey = l_orderkey) WHERE l_shipmode ='AIR'; ERROR: cannot perform distributed planning on this query DETAIL: Subqueries in outer joins are not supported -- but view at the outer side is. This is essentially the same as a left join with arguments reversed. SELECT count(*) FROM lineitem_hash_part right join priority_orders ON (o_orderkey = l_orderkey) WHERE l_shipmode ='AIR'; count ------- 700 (1 row) -- left join on router query is supported SELECT o_orderkey, l_linenumber FROM priority_orders left join air_shipped_lineitems ON (o_orderkey = l_orderkey) WHERE o_orderkey = 2; o_orderkey | l_linenumber ------------+-------------- 2 | (1 row) -- repartition query on view join -- it passes planning, fails at execution stage SELECT * FROM priority_orders JOIN air_shipped_lineitems ON (o_custkey = l_suppkey); ERROR: cannot use real time executor with repartition jobs HINT: Set citus.task_executor_type to "task-tracker". SET citus.task_executor_type to "task-tracker"; SELECT count(*) FROM priority_orders JOIN air_shipped_lineitems ON (o_custkey = l_suppkey); count ------- 192 (1 row) SET citus.task_executor_type to DEFAULT; -- insert into... select works with views CREATE TABLE temp_lineitem(LIKE lineitem_hash_part); SELECT create_distributed_table('temp_lineitem', 'l_orderkey', 'hash', 'lineitem_hash_part'); create_distributed_table -------------------------- (1 row) INSERT INTO temp_lineitem SELECT * FROM air_shipped_lineitems; SELECT count(*) FROM temp_lineitem; count ------- 1706 (1 row) -- following is a where false query, should not be inserting anything INSERT INTO temp_lineitem SELECT * FROM air_shipped_lineitems WHERE l_shipmode = 'MAIL'; SELECT count(*) FROM temp_lineitem; count ------- 1706 (1 row) SET citus.task_executor_type to "task-tracker"; -- single view repartition subqueries are not supported SELECT l_suppkey, count(*) FROM (SELECT l_suppkey, l_shipdate, count(*) FROM air_shipped_lineitems GROUP BY l_suppkey, l_shipdate) supps GROUP BY l_suppkey ORDER BY 2 DESC, 1 LIMIT 5; ERROR: cannot perform distributed planning on this query DETAIL: Subqueries without group by clause are not supported yet -- logically same query without a view works fine SELECT l_suppkey, count(*) FROM (SELECT l_suppkey, l_shipdate, count(*) FROM lineitem_hash_part WHERE l_shipmode = 'AIR' GROUP BY l_suppkey, l_shipdate) supps GROUP BY l_suppkey ORDER BY 2 DESC, 1 LIMIT 5; l_suppkey | count -----------+------- 7680 | 4 160 | 3 1042 | 3 1318 | 3 5873 | 3 (5 rows) -- when a view is replaced by actual query it still fails SELECT l_suppkey, count(*) FROM (SELECT l_suppkey, l_shipdate, count(*) FROM (SELECT * FROM lineitem_hash_part WHERE l_shipmode = 'AIR') asi GROUP BY l_suppkey, l_shipdate) supps GROUP BY l_suppkey ORDER BY 2 DESC, 1 LIMIT 5; ERROR: cannot perform distributed planning on this query DETAIL: Subqueries without group by clause are not supported yet -- repartition query on view with single table subquery CREATE VIEW supp_count_view AS SELECT * FROM (SELECT l_suppkey, count(*) FROM lineitem_hash_part GROUP BY 1) s1; SELECT * FROM supp_count_view ORDER BY 2 DESC, 1 LIMIT 10; l_suppkey | count -----------+------- 6104 | 8 1868 | 6 5532 | 6 5849 | 6 6169 | 6 6669 | 6 6692 | 6 7703 | 6 7869 | 6 8426 | 6 (10 rows) SET citus.task_executor_type to DEFAULT; -- create a view with aggregate CREATE VIEW lineitems_by_shipping_method AS SELECT l_shipmode, count(*) as cnt FROM lineitem_hash_part GROUP BY 1; -- following will fail due to non GROUP BY of partition key SELECT * FROM lineitems_by_shipping_method; ERROR: Unrecognized range table id 1 -- create a view with group by on partition column CREATE VIEW lineitems_by_orderkey AS SELECT l_orderkey, count(*) FROM lineitem_hash_part GROUP BY 1; -- this should work since we're able to push down this query SELECT * FROM lineitems_by_orderkey ORDER BY 2 DESC, 1 ASC LIMIT 10; l_orderkey | count ------------+------- 7 | 7 68 | 7 129 | 7 164 | 7 194 | 7 225 | 7 226 | 7 322 | 7 326 | 7 354 | 7 (10 rows) -- it would also work since it is made router plannable SELECT * FROM lineitems_by_orderkey WHERE l_orderkey = 100; l_orderkey | count ------------+------- 100 | 5 (1 row) DROP TABLE temp_lineitem CASCADE; DROP VIEW supp_count_view; DROP VIEW lineitems_by_orderkey; DROP VIEW lineitems_by_shipping_method; DROP VIEW air_shipped_lineitems; DROP VIEW priority_lineitem; DROP VIEW priority_orders; -- new tests for real time use case including views and subqueries -- create view to display recent user who has an activity after a timestamp CREATE VIEW recent_users AS SELECT user_id, max(time) as lastseen FROM users_table GROUP BY user_id HAVING max(time) > '2014-01-21 05:45:49.978738'::timestamp order by 2 DESC; SELECT * FROM recent_users; user_id | lastseen ---------+--------------------------------- 87 | Tue Jan 21 05:53:51.866813 2014 50 | Tue Jan 21 05:53:44.251016 2014 74 | Tue Jan 21 05:54:04.837808 2014 6 | Tue Jan 21 05:57:47.118755 2014 71 | Tue Jan 21 05:55:52.018461 2014 39 | Tue Jan 21 05:55:18.875997 2014 66 | Tue Jan 21 05:51:31.681997 2014 100 | Tue Jan 21 05:49:04.953009 2014 46 | Tue Jan 21 05:49:00.229807 2014 86 | Tue Jan 21 05:48:54.381334 2014 13 | Tue Jan 21 05:48:45.418146 2014 90 | Tue Jan 21 05:48:25.027491 2014 58 | Tue Jan 21 05:47:30.418553 2014 44 | Tue Jan 21 05:47:01.104523 2014 (14 rows) -- create a view for recent_events CREATE VIEW recent_events AS SELECT user_id, time FROM events_table WHERE time > '2014-01-20 01:45:49.978738'::timestamp; SELECT count(*) FROM recent_events; count ------- 1105 (1 row) -- count number of events of recent_users SELECT count(*) FROM recent_users ru JOIN events_table et ON (ru.user_id = et.user_id); count ------- 1336 (1 row) -- count number of events of per recent users order by count SELECT ru.user_id, count(*) FROM recent_users ru JOIN events_table et ON (ru.user_id = et.user_id) GROUP BY ru.user_id ORDER BY 2 DESC, 1; user_id | count ---------+------- 13 | 118 44 | 109 90 | 109 87 | 105 46 | 103 86 | 100 66 | 98 39 | 96 71 | 95 74 | 93 6 | 89 58 | 87 50 | 79 100 | 55 (14 rows) -- the same query with a left join however, it would still generate the same result SELECT ru.user_id, count(*) FROM recent_users ru LEFT JOIN events_table et ON (ru.user_id = et.user_id) GROUP BY ru.user_id ORDER BY 2 DESC, 1; user_id | count ---------+------- 13 | 118 44 | 109 90 | 109 87 | 105 46 | 103 86 | 100 66 | 98 39 | 96 71 | 95 74 | 93 6 | 89 58 | 87 50 | 79 100 | 55 (14 rows) -- query wrapped inside a subquery, it needs another top level order by SELECT * FROM (SELECT ru.user_id, count(*) FROM recent_users ru JOIN events_table et ON (ru.user_id = et.user_id) GROUP BY ru.user_id ORDER BY 2 DESC, 1) s1 ORDER BY 2 DESC, 1; user_id | count ---------+------- 13 | 118 44 | 109 90 | 109 87 | 105 46 | 103 86 | 100 66 | 98 39 | 96 71 | 95 74 | 93 6 | 89 58 | 87 50 | 79 100 | 55 (14 rows) -- non-partition key joins are not supported inside subquery SELECT * FROM (SELECT ru.user_id, count(*) FROM recent_users ru JOIN events_table et ON (ru.user_id = et.event_type) GROUP BY ru.user_id ORDER BY 2 DESC, 1) s1 ORDER BY 2 DESC, 1; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- join between views -- recent users who has an event in recent events SELECT ru.user_id FROM recent_users ru JOIN recent_events re USING(user_id) GROUP BY ru.user_id ORDER BY ru.user_id; user_id --------- 6 13 39 44 46 50 58 66 71 74 86 87 90 100 (14 rows) -- outer join inside a subquery -- recent_events who are not done by recent users SELECT count(*) FROM ( SELECT re.*, ru.user_id AS recent_user FROM recent_events re LEFT JOIN recent_users ru USING(user_id)) reu WHERE recent_user IS NULL; count ------- 957 (1 row) -- same query with anti-join SELECT count(*) FROM recent_events re LEFT JOIN recent_users ru ON(ru.user_id = re.user_id) WHERE ru.user_id IS NULL; count ------- 957 (1 row) -- join between view and table -- users who has recent activity and they have an entry with value_1 is less than 15 SELECT ut.* FROM recent_users ru JOIN users_table ut USING (user_id) WHERE ut.value_1 < 15 ORDER BY 1,2; user_id | time | value_1 | value_2 | value_3 | value_4 ---------+---------------------------------+---------+---------+---------+--------- 6 | Mon Jan 13 05:30:08.289267 2014 | 12 | 140 | 618 | 6 | Thu Jan 16 15:17:16.779695 2014 | 6 | 978 | 430 | 6 | Sun Jan 19 06:09:39.900888 2014 | 3 | 908 | 688 | 13 | Sun Jan 19 22:09:26.256209 2014 | 2 | 755 | 584 | 39 | Wed Jan 15 05:46:51.48765 2014 | 14 | 657 | 137 | 39 | Sun Jan 19 11:26:47.45937 2014 | 12 | 118 | 165 | 44 | Wed Jan 15 14:23:52.532426 2014 | 8 | 204 | 735 | 44 | Sun Jan 19 05:53:34.829093 2014 | 4 | 758 | 205 | 46 | Mon Jan 13 20:39:11.211169 2014 | 0 | 235 | 475 | 46 | Wed Jan 15 09:14:57.471944 2014 | 2 | 407 | 664 | 50 | Sat Jan 11 11:07:13.089216 2014 | 6 | 292 | 425 | 58 | Sun Jan 19 22:36:14.795396 2014 | 2 | 86 | 311 | 66 | Tue Jan 14 20:16:31.219213 2014 | 14 | 347 | 655 | 74 | Tue Jan 21 01:38:39.570986 2014 | 9 | 334 | 642 | 86 | Sun Jan 19 06:18:51.466578 2014 | 14 | 712 | 490 | 87 | Sat Jan 11 20:46:28.439073 2014 | 2 | 528 | 311 | 90 | Sun Jan 12 21:37:30.778206 2014 | 11 | 458 | 377 | 100 | Sun Jan 19 22:32:08.284043 2014 | 2 | 384 | 149 | (18 rows) -- determine if a recent user has done a given event type or not SELECT ru.user_id, CASE WHEN et.user_id IS NULL THEN 'NO' ELSE 'YES' END as done_event FROM recent_users ru LEFT JOIN events_table et ON(ru.user_id = et.user_id AND et.event_type = 625) ORDER BY 2 DESC, 1; user_id | done_event ---------+------------ 6 | YES 13 | NO 39 | NO 44 | NO 46 | NO 50 | NO 58 | NO 66 | NO 71 | NO 74 | NO 86 | NO 87 | NO 90 | NO 100 | NO (14 rows) -- view vs table join wrapped inside a subquery SELECT * FROM (SELECT ru.user_id, CASE WHEN et.user_id IS NULL THEN 'NO' ELSE 'YES' END as done_event FROM recent_users ru LEFT JOIN events_table et ON(ru.user_id = et.user_id AND et.event_type = 625) ) s1 ORDER BY 2 DESC, 1; user_id | done_event ---------+------------ 6 | YES 13 | NO 39 | NO 44 | NO 46 | NO 50 | NO 58 | NO 66 | NO 71 | NO 74 | NO 86 | NO 87 | NO 90 | NO 100 | NO (14 rows) -- event vs table non-partition-key join is not supported SELECT * FROM (SELECT ru.user_id, CASE WHEN et.user_id IS NULL THEN 'NO' ELSE 'YES' END as done_event FROM recent_users ru LEFT JOIN events_table et ON(ru.user_id = et.event_type) ) s1 ORDER BY 2 DESC, 1; ERROR: cannot pushdown the subquery since all relations are not joined using distribution keys DETAIL: Each relation should be joined with at least one another relation using distribution keys and equality operator. -- create a select only view CREATE VIEW selected_users AS SELECT * FROM users_table WHERE value_1 >= 120 and value_1 <150; CREATE VIEW recent_selected_users AS SELECT su.* FROM selected_users su JOIN recent_users ru USING(user_id); SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER BY 1; user_id --------- 6 13 39 44 46 50 58 66 71 74 86 90 (12 rows) -- this would be supported when we implement where partition_key in (subquery) support SELECT et.user_id, et.time FROM events_table et WHERE et.user_id IN (SELECT user_id FROM recent_selected_users) GROUP BY 1,2 ORDER BY 1 DESC,2 DESC LIMIT 5; user_id | time ---------+--------------------------------- 90 | Tue Jan 21 02:50:05.379732 2014 90 | Tue Jan 21 00:08:33.911898 2014 90 | Mon Jan 20 22:25:39.21906 2014 90 | Mon Jan 20 21:11:10.814326 2014 90 | Mon Jan 20 19:16:33.359257 2014 (5 rows) -- it is supported when it is a router query SELECT count(*) FROM events_table et WHERE et.user_id IN (SELECT user_id FROM recent_selected_users WHERE user_id = 90); count ------- 109 (1 row) -- expected this to work but it did not (SELECT user_id FROM recent_users) UNION (SELECT user_id FROM selected_users); ERROR: could not run distributed query with UNION, INTERSECT, or EXCEPT HINT: Consider using an equality filter on the distributed table's partition column. -- wrapping it inside a SELECT * works SELECT * FROM ( (SELECT user_id FROM recent_users) UNION (SELECT user_id FROM selected_users) ) u WHERE user_id < 15 AND user_id > 10 ORDER BY user_id; user_id --------- 11 12 13 14 (4 rows) -- union all also works for views SELECT * FROM ( (SELECT user_id FROM recent_users) UNION ALL (SELECT user_id FROM selected_users) ) u WHERE user_id < 15 AND user_id > 10 ORDER BY user_id; user_id --------- 11 11 11 12 12 12 12 12 12 13 13 13 13 13 14 (15 rows) SELECT count(*) FROM ( (SELECT user_id FROM recent_users) UNION (SELECT user_id FROM selected_users) ) u WHERE user_id < 15 AND user_id > 10; count ------- 4 (1 row) -- expected this to work but it does not SELECT count(*) FROM ( (SELECT user_id FROM recent_users) UNION ALL (SELECT user_id FROM selected_users) ) u WHERE user_id < 15 AND user_id > 10; ERROR: cannot pushdown the subquery since all leaves of the UNION does not include partition key at the same position DETAIL: Each leaf query of the UNION should return partition key at the same position on its target list. -- expand view definitions and re-run last 2 queries SELECT count(*) FROM ( (SELECT user_id FROM (SELECT user_id, max(time) as lastseen FROM users_table GROUP BY user_id HAVING max(time) > '2014-01-21 05:45:49.978738'::timestamp order by 2 DESC) aa ) UNION (SELECT user_id FROM (SELECT * FROM users_table WHERE value_1 >= 120 and value_1 <150) bb) ) u WHERE user_id < 15 AND user_id > 10; count ------- 4 (1 row) SELECT count(*) FROM ( (SELECT user_id FROM (SELECT user_id, max(time) as lastseen FROM users_table GROUP BY user_id HAVING max(time) > '2014-01-21 05:45:49.978738'::timestamp order by 2 DESC) aa ) UNION ALL (SELECT user_id FROM (SELECT * FROM users_table WHERE value_1 >= 120 and value_1 <150) bb) ) u WHERE user_id < 15 AND user_id > 10; ERROR: cannot pushdown the subquery since all leaves of the UNION does not include partition key at the same position DETAIL: Each leaf query of the UNION should return partition key at the same position on its target list. -- test distinct -- distinct is supported if it is on a partition key CREATE VIEW distinct_user_with_value_1_15 AS SELECT DISTINCT user_id FROM users_table WHERE value_1 = 15; SELECT * FROM distinct_user_with_value_1_15 ORDER BY user_id; user_id --------- 7 8 35 42 46 53 70 82 87 88 96 (11 rows) -- distinct is not supported if it is on a non-partition key CREATE VIEW distinct_value_1 AS SELECT DISTINCT value_1 FROM users_table WHERE value_2 = 15; SELECT * FROM distinct_value_1; ERROR: cannot perform distributed planning on this query DETAIL: Subqueries without group by clause are not supported yet -- CTEs are not supported even if they are on views CREATE VIEW cte_view_1 AS WITH c1 AS (SELECT * FROM users_table WHERE value_1 = 15) SELECT * FROM c1 WHERE value_2 < 500; SELECT * FROM cte_view_1; ERROR: cannot push down this subquery DETAIL: Table expressions other than simple relations and subqueries are currently unsupported -- this is single shard query but still not supported since it has view + cte -- router planner can't detect it SELECT * FROM cte_view_1 WHERE user_id = 8; ERROR: cannot push down this subquery DETAIL: Table expressions other than simple relations and subqueries are currently unsupported -- if CTE itself prunes down to a single shard than the view is supported (router plannable) CREATE VIEW cte_view_2 AS WITH c1 AS (SELECT * FROM users_table WHERE user_id = 8) SELECT * FROM c1 WHERE value_1 = 15; SELECT * FROM cte_view_2; user_id | time | value_1 | value_2 | value_3 | value_4 ---------+---------------------------------+---------+---------+---------+--------- 8 | Tue Jan 21 00:52:36.967785 2014 | 15 | 10 | 868 | (1 row) CREATE VIEW router_view AS SELECT * FROM users_table WHERE user_id = 2; -- router plannable SELECT user_id FROM router_view GROUP BY 1; user_id --------- 2 (1 row) -- join a router view SELECT * FROM (SELECT user_id FROM router_view GROUP BY 1) rv JOIN recent_events USING (user_id) ORDER BY 2 LIMIT 3; user_id | time ---------+--------------------------------- 2 | Mon Jan 20 02:02:03.208351 2014 2 | Mon Jan 20 02:34:14.54301 2014 2 | Mon Jan 20 03:16:38.418772 2014 (3 rows) SELECT * FROM (SELECT user_id FROM router_view GROUP BY 1) rv JOIN (SELECT * FROM recent_events) re USING (user_id) ORDER BY 2 LIMIT 3; user_id | time ---------+--------------------------------- 2 | Mon Jan 20 02:02:03.208351 2014 2 | Mon Jan 20 02:34:14.54301 2014 2 | Mon Jan 20 03:16:38.418772 2014 (3 rows) -- views with limits CREATE VIEW recent_10_users AS SELECT user_id, max(time) as lastseen FROM users_table GROUP BY user_id ORDER BY lastseen DESC LIMIT 10; -- this is not supported since it has limit in it and subquery_pushdown is not set SELECT * FROM recent_10_users; ERROR: cannot perform distributed planning on this query DETAIL: Subqueries with limit are not supported yet SET citus.subquery_pushdown to ON; -- still not supported since outer query does not have limit -- it shows a different (subquery with single relation) error message SELECT * FROM recent_10_users; ERROR: cannot perform distributed planning on this query DETAIL: Subqueries with limit are not supported yet -- now it displays more correct error message SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id); ERROR: cannot push down this subquery DETAIL: Limit in subquery without limit in the outermost query is unsupported -- now both are supported when there is a limit on the outer most query SELECT * FROM recent_10_users ORDER BY lastseen DESC LIMIT 10; user_id | lastseen ---------+--------------------------------- 6 | Tue Jan 21 05:57:47.118755 2014 71 | Tue Jan 21 05:55:52.018461 2014 39 | Tue Jan 21 05:55:18.875997 2014 74 | Tue Jan 21 05:54:04.837808 2014 87 | Tue Jan 21 05:53:51.866813 2014 50 | Tue Jan 21 05:53:44.251016 2014 66 | Tue Jan 21 05:51:31.681997 2014 100 | Tue Jan 21 05:49:04.953009 2014 46 | Tue Jan 21 05:49:00.229807 2014 86 | Tue Jan 21 05:48:54.381334 2014 (10 rows) SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id) ORDER BY et.time DESC LIMIT 10; user_id | time | event_type | value_2 | value_3 | value_4 ---------+---------------------------------+------------+---------+---------+--------- 65 | Tue Jan 21 05:56:52.624231 2014 | 241 | 30 | 543 | 42 | Tue Jan 21 05:46:35.158342 2014 | 761 | 877 | 335 | 54 | Tue Jan 21 05:46:19.103645 2014 | 595 | 477 | 996 | 44 | Tue Jan 21 05:43:00.838945 2014 | 682 | 641 | 448 | 27 | Tue Jan 21 05:34:10.935865 2014 | 912 | 605 | 989 | 61 | Tue Jan 21 05:25:27.452065 2014 | 392 | 472 | 925 | 19 | Tue Jan 21 05:23:09.26298 2014 | 202 | 888 | 640 | 65 | Tue Jan 21 05:22:56.725329 2014 | 519 | 457 | 259 | 27 | Tue Jan 21 05:19:14.38026 2014 | 19 | 19 | 205 | 11 | Tue Jan 21 05:15:14.879531 2014 | 459 | 545 | 80 | (10 rows) RESET citus.subquery_pushdown; VACUUM ANALYZE users_table; -- explain tests EXPLAIN (COSTS FALSE) SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER BY 1; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------- Sort Sort Key: remote_scan.user_id -> HashAggregate Group Key: remote_scan.user_id -> Custom Scan (Citus Real-Time) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=57637 dbname=regression -> HashAggregate Group Key: users_table.user_id -> Hash Join Hash Cond: (users_table.user_id = ru.user_id) -> Bitmap Heap Scan on users_table_1400000 users_table Recheck Cond: ((value_1 >= 120) AND (value_1 < 150)) -> Bitmap Index Scan on is_index3_1400000 Index Cond: ((value_1 >= 120) AND (value_1 < 150)) -> Hash -> Subquery Scan on ru -> Sort Sort Key: (max(users_table_1."time")) DESC -> HashAggregate Group Key: users_table_1.user_id Filter: (max(users_table_1."time") > '2014-01-21 05:45:49.978738'::timestamp without time zone) -> Seq Scan on users_table_1400000 users_table_1 (25 rows) EXPLAIN (COSTS FALSE) SELECT * FROM ( (SELECT user_id FROM recent_users) UNION (SELECT user_id FROM selected_users) ) u WHERE user_id < 15 AND user_id > 10 ORDER BY user_id; QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------- Sort Sort Key: remote_scan.user_id -> Custom Scan (Citus Real-Time) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=57637 dbname=regression -> Unique -> Sort Sort Key: recent_users.user_id -> Append -> Subquery Scan on recent_users -> Sort Sort Key: (max(users_table."time")) DESC -> GroupAggregate Group Key: users_table.user_id Filter: (max(users_table."time") > '2014-01-21 05:45:49.978738'::timestamp without time zone) -> Index Scan using is_index1_1400000 on users_table_1400000 users_table Index Cond: ((user_id < 15) AND (user_id > 10)) -> Index Scan using is_index1_1400000 on users_table_1400000 users_table_1 Index Cond: ((user_id < 15) AND (user_id > 10)) Filter: ((value_1 >= 120) AND (value_1 < 150)) (22 rows) EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id) ORDER BY et.time DESC LIMIT 10; ERROR: cannot push down this subquery DETAIL: Limit in subquery is currently unsupported SET citus.subquery_pushdown to ON; EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id) ORDER BY et.time DESC LIMIT 10; QUERY PLAN --------------------------------------------------------------------------------------------------------------------- Limit -> Sort Sort Key: remote_scan."time" DESC -> Custom Scan (Citus Real-Time) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=57637 dbname=regression -> Limit -> Sort Sort Key: et."time" DESC -> Hash Join Hash Cond: (et.user_id = recent_10_users.user_id) -> Seq Scan on events_table_1400004 et -> Hash -> Subquery Scan on recent_10_users -> Limit -> Sort Sort Key: (max(users_table."time")) DESC -> HashAggregate Group Key: users_table.user_id -> Seq Scan on users_table_1400000 users_table (22 rows) RESET citus.subquery_pushdown; DROP VIEW recent_10_users; DROP VIEW router_view; DROP VIEW cte_view_2; DROP VIEW cte_view_1; DROP VIEW distinct_value_1; DROP VIEW distinct_user_with_value_1_15; DROP VIEW recent_selected_users; DROP VIEW selected_users; DROP VIEW recent_events; DROP VIEW recent_users; citus-7.0.3/src/test/regress/expected/multi_working_columns.out000066400000000000000000000040571317107136600250670ustar00rootroot00000000000000-- -- MULTI_WORKING_COLUMNS -- -- Columns that are used in sorting and grouping but that do not appear in the -- projection order are called working (resjunk) columns. We check in here that -- these columns are pulled to the master, and are correctly used in sorting and -- grouping. SELECT l_quantity FROM lineitem ORDER BY l_shipdate, l_quantity LIMIT 20; l_quantity ------------ 38.00 13.00 15.00 17.00 30.00 24.00 24.00 5.00 38.00 13.00 26.00 30.00 30.00 35.00 38.00 24.00 37.00 11.00 18.00 17.00 (20 rows) SELECT l_quantity, count(*) as count FROM lineitem GROUP BY l_quantity, l_shipdate ORDER BY l_quantity, count LIMIT 20; l_quantity | count ------------+------- 1.00 | 1 1.00 | 1 1.00 | 1 1.00 | 1 1.00 | 1 1.00 | 1 1.00 | 1 1.00 | 1 1.00 | 1 1.00 | 1 1.00 | 1 1.00 | 1 1.00 | 1 1.00 | 1 1.00 | 1 1.00 | 1 1.00 | 1 1.00 | 1 1.00 | 1 1.00 | 1 (20 rows) SELECT l_quantity, l_shipdate, count(*) as count FROM lineitem GROUP BY l_quantity, l_shipdate ORDER BY l_quantity, count, l_shipdate LIMIT 20; l_quantity | l_shipdate | count ------------+------------+------- 1.00 | 02-07-1992 | 1 1.00 | 02-23-1992 | 1 1.00 | 03-17-1992 | 1 1.00 | 04-22-1992 | 1 1.00 | 04-23-1992 | 1 1.00 | 04-30-1992 | 1 1.00 | 05-13-1992 | 1 1.00 | 05-15-1992 | 1 1.00 | 05-27-1992 | 1 1.00 | 05-29-1992 | 1 1.00 | 06-09-1992 | 1 1.00 | 06-23-1992 | 1 1.00 | 07-15-1992 | 1 1.00 | 07-18-1992 | 1 1.00 | 07-23-1992 | 1 1.00 | 08-03-1992 | 1 1.00 | 08-11-1992 | 1 1.00 | 08-29-1992 | 1 1.00 | 09-08-1992 | 1 1.00 | 09-11-1992 | 1 (20 rows) citus-7.0.3/src/test/regress/expected/task_tracker_assign_task.out000066400000000000000000000046071317107136600255010ustar00rootroot00000000000000-- -- TASK_TRACKER_ASSIGN_TASK -- \set JobId 401010 \set SimpleTaskId 101101 \set RecoverableTaskId 801102 \set SimpleTaskTable lineitem_simple_task \set BadQueryString '\'SELECT COUNT(*) FROM bad_table_name\'' \set GoodQueryString '\'SELECT COUNT(*) FROM lineitem\'' \set SelectAll 'SELECT *' -- We assign two tasks to the task tracker. The first task simply executes. The -- recoverable task on the other hand repeatedly fails, and we sleep until the -- task tracker stops retrying the recoverable task. SELECT task_tracker_assign_task(:JobId, :SimpleTaskId, 'COPY (SELECT * FROM lineitem) TO ' '''base/pgsql_job_cache/job_401010/task_101101'''); task_tracker_assign_task -------------------------- (1 row) SELECT task_tracker_assign_task(:JobId, :RecoverableTaskId, :BadQueryString); task_tracker_assign_task -------------------------- (1 row) -- After assigning the two tasks, we wait for them to make progress. Note that -- these tasks get scheduled and run asynchronously, so if the sleep interval is -- not enough, the regression tests may fail on an overloaded box. SELECT pg_sleep(3.0); pg_sleep ---------- (1 row) SELECT task_tracker_task_status(:JobId, :SimpleTaskId); task_tracker_task_status -------------------------- 6 (1 row) SELECT task_tracker_task_status(:JobId, :RecoverableTaskId); task_tracker_task_status -------------------------- 5 (1 row) COPY :SimpleTaskTable FROM 'base/pgsql_job_cache/job_401010/task_101101'; SELECT COUNT(*) FROM :SimpleTaskTable; count ------- 12000 (1 row) SELECT COUNT(*) AS diff_lhs FROM ( :SelectAll FROM :SimpleTaskTable EXCEPT ALL :SelectAll FROM lineitem ) diff; diff_lhs ---------- 0 (1 row) SELECT COUNT(*) As diff_rhs FROM ( :SelectAll FROM lineitem EXCEPT ALL :SelectAll FROM :SimpleTaskTable ) diff; diff_rhs ---------- 0 (1 row) -- We now reassign the recoverable task with a good query string. This updates -- the task's query string, and reschedules the updated task for execution. SELECT task_tracker_assign_task(:JobId, :RecoverableTaskId, :GoodQueryString); task_tracker_assign_task -------------------------- (1 row) SELECT pg_sleep(2.0); pg_sleep ---------- (1 row) SELECT task_tracker_task_status(:JobId, :RecoverableTaskId); task_tracker_task_status -------------------------- 6 (1 row) citus-7.0.3/src/test/regress/expected/task_tracker_cleanup_job.out000066400000000000000000000041651317107136600254530ustar00rootroot00000000000000-- -- TASK_TRACKER_CLEANUP_JOB -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1060000; \set JobId 401010 \set CompletedTaskId 801107 \set RunningTaskId 801108 -- We assign two tasks to the task tracker. The first task should complete and -- the second task should continue to keep running. SELECT task_tracker_assign_task(:JobId, :CompletedTaskId, 'COPY (SELECT * FROM lineitem) TO ' '''base/pgsql_job_cache/job_401010/task_801107'''); task_tracker_assign_task -------------------------- (1 row) SELECT task_tracker_assign_task(:JobId, :RunningTaskId, 'SELECT pg_sleep(100)'); task_tracker_assign_task -------------------------- (1 row) SELECT pg_sleep(2.0); pg_sleep ---------- (1 row) SELECT task_tracker_task_status(:JobId, :CompletedTaskId); task_tracker_task_status -------------------------- 6 (1 row) SELECT task_tracker_task_status(:JobId, :RunningTaskId); task_tracker_task_status -------------------------- 3 (1 row) SELECT isdir FROM pg_stat_file('base/pgsql_job_cache/job_401010/task_801107'); isdir ------- f (1 row) SELECT isdir FROM pg_stat_file('base/pgsql_job_cache/job_401010'); isdir ------- t (1 row) -- We now clean up all tasks for this job id. As a result, shared hash entries, -- files, and connections associated with these tasks should all be cleaned up. SELECT task_tracker_cleanup_job(:JobId); task_tracker_cleanup_job -------------------------- (1 row) SELECT pg_sleep(1.0); pg_sleep ---------- (1 row) SELECT task_tracker_task_status(:JobId, :CompletedTaskId); ERROR: could not find the worker task DETAIL: Task jobId: 401010 and taskId: 801107 SELECT task_tracker_task_status(:JobId, :RunningTaskId); ERROR: could not find the worker task DETAIL: Task jobId: 401010 and taskId: 801108 SELECT isdir FROM pg_stat_file('base/pgsql_job_cache/job_401010/task_801107'); ERROR: could not stat file "base/pgsql_job_cache/job_401010/task_801107": No such file or directory SELECT isdir FROM pg_stat_file('base/pgsql_job_cache/job_401010'); ERROR: could not stat file "base/pgsql_job_cache/job_401010": No such file or directory citus-7.0.3/src/test/regress/expected/task_tracker_create_table.out000066400000000000000000000010211317107136600255700ustar00rootroot00000000000000-- -- TASK_TRACKER_CREATE_TABLE -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1070000; -- New table definitions to test the task tracker process and protocol CREATE TABLE lineitem_simple_task ( LIKE lineitem ); CREATE TABLE lineitem_compute_task ( LIKE lineitem ); CREATE TABLE lineitem_compute_update_task ( LIKE lineitem ); CREATE TABLE lineitem_partition_task_part_00 ( LIKE lineitem ); CREATE TABLE lineitem_partition_task_part_01 ( LIKE lineitem ); CREATE TABLE lineitem_partition_task_part_02 ( LIKE lineitem ); citus-7.0.3/src/test/regress/expected/task_tracker_partition_task.out000066400000000000000000000056401317107136600262240ustar00rootroot00000000000000-- -- TASK_TRACKER_PARTITION_TASK -- \set JobId 401010 \set PartitionTaskId 801106 \set PartitionColumn l_orderkey \set SelectAll 'SELECT *' \set TablePart00 lineitem_partition_task_part_00 \set TablePart01 lineitem_partition_task_part_01 \set TablePart02 lineitem_partition_task_part_02 -- We assign a partition task and wait for it to complete. Note that we hardcode -- the partition function call string, including the job and task identifiers, -- into the argument in the task assignment function. This hardcoding is -- necessary as the current psql version does not perform variable interpolation -- for names inside single quotes. SELECT task_tracker_assign_task(:JobId, :PartitionTaskId, 'SELECT worker_range_partition_table(' '401010, 801106, ''SELECT * FROM lineitem'', ' '''l_orderkey'', 20, ARRAY[1000, 3000]::_int8)'); task_tracker_assign_task -------------------------- (1 row) SELECT pg_sleep(4.0); pg_sleep ---------- (1 row) SELECT task_tracker_task_status(:JobId, :PartitionTaskId); task_tracker_task_status -------------------------- 6 (1 row) COPY :TablePart00 FROM 'base/pgsql_job_cache/job_401010/task_801106/p_00000'; COPY :TablePart01 FROM 'base/pgsql_job_cache/job_401010/task_801106/p_00001'; COPY :TablePart02 FROM 'base/pgsql_job_cache/job_401010/task_801106/p_00002'; SELECT COUNT(*) FROM :TablePart00; count ------- 1004 (1 row) SELECT COUNT(*) FROM :TablePart02; count ------- 8970 (1 row) -- We first compute the difference of partition tables against the base table. -- Then, we compute the difference of the base table against partitioned tables. SELECT COUNT(*) AS diff_lhs_00 FROM ( :SelectAll FROM :TablePart00 EXCEPT ALL :SelectAll FROM lineitem WHERE :PartitionColumn < 1000 ) diff; diff_lhs_00 ------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_01 FROM ( :SelectAll FROM :TablePart01 EXCEPT ALL :SelectAll FROM lineitem WHERE :PartitionColumn >= 1000 AND :PartitionColumn < 3000 ) diff; diff_lhs_01 ------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_02 FROM ( :SelectAll FROM :TablePart02 EXCEPT ALL :SelectAll FROM lineitem WHERE :PartitionColumn >= 3000 ) diff; diff_lhs_02 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_00 FROM ( :SelectAll FROM lineitem WHERE :PartitionColumn < 1000 EXCEPT ALL :SelectAll FROM :TablePart00 ) diff; diff_rhs_00 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_01 FROM ( :SelectAll FROM lineitem WHERE :PartitionColumn >= 1000 AND :PartitionColumn < 3000 EXCEPT ALL :SelectAll FROM :TablePart01 ) diff; diff_rhs_01 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_02 FROM ( :SelectAll FROM lineitem WHERE :PartitionColumn >= 3000 EXCEPT ALL :SelectAll FROM :TablePart02 ) diff; diff_rhs_02 ------------- 0 (1 row) citus-7.0.3/src/test/regress/expected/worker_binary_data_partition.out000066400000000000000000000066451317107136600264010ustar00rootroot00000000000000-- -- WORKER_BINARY_DATA_PARTITION -- \set JobId 201010 \set TaskId 101105 \set Partition_Column textcolumn \set Partition_Column_Text '\'textcolumn\'' \set Partition_Column_Type 25 \set Select_Query_Text '\'SELECT * FROM binary_data_table\'' \set Select_All 'SELECT *' \set Table_Name binary_data_table \set Table_Part_00 binary_data_table_part_00 \set Table_Part_01 binary_data_table_part_01 \set Table_Part_02 binary_data_table_part_02 -- Create table with special characters CREATE TABLE :Table_Name(textcolumn text, binarycolumn bytea); COPY :Table_Name FROM stdin; SELECT length(binarycolumn) FROM :Table_Name; length -------- 2 4 3 2 4 14 28 16 9 11 11 24 17 12 (14 rows) -- Run select query, and apply range partitioning on query results SELECT worker_range_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type, ARRAY['aaa', 'some']::_text); worker_range_partition_table ------------------------------ (1 row) -- Copy range partitioned files into tables CREATE TABLE :Table_Part_00 ( LIKE :Table_Name ); CREATE TABLE :Table_Part_01 ( LIKE :Table_Name ); CREATE TABLE :Table_Part_02 ( LIKE :Table_Name ); COPY :Table_Part_00 FROM 'base/pgsql_job_cache/job_201010/task_101105/p_00000'; COPY :Table_Part_01 FROM 'base/pgsql_job_cache/job_201010/task_101105/p_00001'; COPY :Table_Part_02 FROM 'base/pgsql_job_cache/job_201010/task_101105/p_00002'; -- The union of the three partitions should have as many rows as original table SELECT COUNT(*) AS total_row_count FROM ( SELECT * FROM :Table_Part_00 UNION ALL SELECT * FROM :Table_Part_01 UNION ALL SELECT * FROM :Table_Part_02 ) AS all_rows; total_row_count ----------------- 14 (1 row) -- We first compute the difference of partition tables against the base table. -- Then, we compute the difference of the base table against partitioned tables. SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Table_Part_00 EXCEPT ALL :Select_All FROM :Table_Name WHERE :Partition_Column IS NULL OR :Partition_Column < 'aaa' ) diff; diff_lhs_00 ------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Table_Part_01 EXCEPT ALL :Select_All FROM :Table_Name WHERE :Partition_Column >= 'aaa' AND :Partition_Column < 'some' ) diff; diff_lhs_01 ------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_All FROM :Table_Part_02 EXCEPT ALL :Select_All FROM :Table_Name WHERE :Partition_Column >= 'some' ) diff; diff_lhs_02 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_All FROM :Table_Name WHERE :Partition_Column IS NULL OR :Partition_Column < 'aaa' EXCEPT ALL :Select_All FROM :Table_Part_00 ) diff; diff_rhs_00 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM :Table_Name WHERE :Partition_Column >= 'aaa' AND :Partition_Column < 'some' EXCEPT ALL :Select_All FROM :Table_Part_01 ) diff; diff_rhs_01 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM :Table_Name WHERE :Partition_Column >= 'some' EXCEPT ALL :Select_All FROM :Table_Part_02 ) diff; diff_rhs_02 ------------- 0 (1 row) citus-7.0.3/src/test/regress/expected/worker_check_invalid_arguments.out000066400000000000000000000066631317107136600267030ustar00rootroot00000000000000-- -- WORKER_CHECK_INVALID_ARGUMENTS -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1100000; \set JobId 201010 \set TaskId 101108 \set Table_Name simple_binary_data_table \set Partition_Column_Name '\'textcolumn\'' \set Partition_Column_Type 25 \set Partition_Count 2 \set Select_Query_Text '\'SELECT * FROM simple_binary_data_table\'' \set Bad_Partition_Column_Name '\'badcolumnname\'' \set Bad_Partition_Column_Type 20 \set Bad_Select_Query_Text '\'SELECT * FROM bad_table_name\'' -- Create simple table and insert a few rows into this table -- N.B. - These rows will be partitioned to files on disk then read back in the -- order the files are listed by a call to readdir; because this order is not -- predictable, the second column of these rows always has the same value, to -- avoid an error message differing based on file read order. CREATE TABLE :Table_Name(textcolumn text, binarycolumn bytea); COPY :Table_Name FROM stdin; SELECT COUNT(*) FROM :Table_Name; count ------- 2 (1 row) -- Check that we fail with bad SQL query SELECT worker_range_partition_table(:JobId, :TaskId, :Bad_Select_Query_Text, :Partition_Column_Name, :Partition_Column_Type, ARRAY['aaa', 'some']::_text); ERROR: relation "bad_table_name" does not exist LINE 1: SELECT * FROM bad_table_name ^ QUERY: SELECT * FROM bad_table_name -- Check that we fail with bad partition column name SELECT worker_range_partition_table(:JobId, :TaskId, :Select_Query_Text, :Bad_Partition_Column_Name, :Partition_Column_Type, ARRAY['aaa', 'some']::_text); ERROR: could not find column name "badcolumnname" -- Check that we fail when partition column and split point types do not match SELECT worker_range_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Name, :Bad_Partition_Column_Type, ARRAY['aaa', 'some']::_text); ERROR: partition column type 20 and split point type 25 do not match -- Check that we fail with bad partition column type on hash partitioning SELECT worker_hash_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Name, :Bad_Partition_Column_Type, :Partition_Count); ERROR: partition column types 25 and 20 do not match -- Now, partition table data using valid arguments SELECT worker_range_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Name, :Partition_Column_Type, ARRAY['aaa', 'some']::_text); worker_range_partition_table ------------------------------ (1 row) -- Check that we fail to merge when the number of column names and column types -- do not match SELECT worker_merge_files_into_table(:JobId, :TaskId, ARRAY['textcolumn', 'binarycolumn'], ARRAY['text', 'bytea', 'integer']); ERROR: column name array size: 2 and type array size: 3 do not match -- Check that we fail to merge when column types do not match underlying data SELECT worker_merge_files_into_table(:JobId, :TaskId, ARRAY['textcolumn', 'binarycolumn'], ARRAY['text', 'integer']); ERROR: invalid input syntax for integer: "\x0b50" CONTEXT: COPY task_101108, line 1, column binarycolumn: "\x0b50" -- Finally, merge partitioned files using valid arguments SELECT worker_merge_files_into_table(:JobId, :TaskId, ARRAY['textcolumn', 'binarycolumn'], ARRAY['text', 'bytea']); worker_merge_files_into_table ------------------------------- (1 row) citus-7.0.3/src/test/regress/expected/worker_create_table.out000066400000000000000000000061111317107136600244310ustar00rootroot00000000000000-- -- WORKER_CREATE_TABLE -- -- Create new table definitions for lineitem and supplier tables to test worker -- node execution logic. For now,the tests include range and hash partitioning -- of existing tables. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1110000; CREATE TABLE lineitem ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); CREATE TABLE lineitem_complex ( l_partkey integer not null, l_discount decimal(15, 2) not null, l_shipdate date not null, l_comment varchar(44) not null ); -- Range partitioned lineitem data are inserted into these four tables CREATE TABLE lineitem_range_part_00 ( LIKE lineitem ); CREATE TABLE lineitem_range_part_01 ( LIKE lineitem ); CREATE TABLE lineitem_range_part_02 ( LIKE lineitem ); CREATE TABLE lineitem_range_part_03 ( LIKE lineitem ); -- Complex range partitioned lineitem data are inserted into these four tables CREATE TABLE lineitem_range_complex_part_00 ( LIKE lineitem_complex ); CREATE TABLE lineitem_range_complex_part_01 ( LIKE lineitem_complex ); CREATE TABLE lineitem_range_complex_part_02 ( LIKE lineitem_complex ); CREATE TABLE lineitem_range_complex_part_03 ( LIKE lineitem_complex ); -- Hash partitioned lineitem data are inserted into these four tables CREATE TABLE lineitem_hash_part_00 ( LIKE lineitem ); CREATE TABLE lineitem_hash_part_01 ( LIKE lineitem ); CREATE TABLE lineitem_hash_part_02 ( LIKE lineitem ); CREATE TABLE lineitem_hash_part_03 ( LIKE lineitem ); -- Complex hash partitioned lineitem data are inserted into these four tables CREATE TABLE lineitem_hash_complex_part_00 ( LIKE lineitem_complex ); CREATE TABLE lineitem_hash_complex_part_01 ( LIKE lineitem_complex ); CREATE TABLE lineitem_hash_complex_part_02 ( LIKE lineitem_complex ); CREATE TABLE lineitem_hash_complex_part_03 ( LIKE lineitem_complex ); -- Now create a supplier table to test repartitioning the data on the nation key -- column, where the column's values can be null or zero. CREATE TABLE SUPPLIER ( s_suppkey integer not null, s_name char(25) not null, s_address varchar(40) not null, s_nationkey integer, s_phone char(15) not null, s_acctbal decimal(15,2) not null, s_comment varchar(101) not null ); -- Range partitioned supplier data are inserted into three tables CREATE TABLE supplier_range_part_00 ( LIKE supplier ); CREATE TABLE supplier_range_part_01 ( LIKE supplier ); CREATE TABLE supplier_range_part_02 ( LIKE supplier ); -- Hash partitioned supplier data are inserted into three tables CREATE TABLE supplier_hash_part_00 ( LIKE supplier ); CREATE TABLE supplier_hash_part_01 ( LIKE supplier ); CREATE TABLE supplier_hash_part_02 ( LIKE supplier ); citus-7.0.3/src/test/regress/expected/worker_hash_partition.out000066400000000000000000000067641317107136600250510ustar00rootroot00000000000000-- -- WORKER_HASH_PARTITION -- \set JobId 201010 \set TaskId 101103 \set Partition_Column l_orderkey \set Partition_Column_Text '\'l_orderkey\'' \set Partition_Column_Type '\'int8\'' \set Partition_Count 4 \set Select_Query_Text '\'SELECT * FROM lineitem\'' \set Select_All 'SELECT *' -- Hash functions internally return unsigned 32-bit integers. However, when -- called externally, the return value becomes a signed 32-bit integer. We hack -- around this conversion issue by bitwise-anding the hash results. Note that -- this only works because we are modding with 4. The proper Hash_Mod_Function -- would be (case when hashint8(l_orderkey) >= 0 then (hashint8(l_orderkey) % 4) -- else ((hashint8(l_orderkey) + 4294967296) % 4) end). \set Hash_Mod_Function '( (hashint8(l_orderkey) & 2147483647) % 4 )' \set Table_Part_00 lineitem_hash_part_00 \set Table_Part_01 lineitem_hash_part_01 \set Table_Part_02 lineitem_hash_part_02 \set Table_Part_03 lineitem_hash_part_03 -- Run select query, and apply hash partitioning on query results SELECT worker_hash_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type::regtype, :Partition_Count); worker_hash_partition_table ----------------------------- (1 row) COPY :Table_Part_00 FROM 'base/pgsql_job_cache/job_201010/task_101103/p_00000'; COPY :Table_Part_01 FROM 'base/pgsql_job_cache/job_201010/task_101103/p_00001'; COPY :Table_Part_02 FROM 'base/pgsql_job_cache/job_201010/task_101103/p_00002'; COPY :Table_Part_03 FROM 'base/pgsql_job_cache/job_201010/task_101103/p_00003'; SELECT COUNT(*) FROM :Table_Part_00; count ------- 3081 (1 row) SELECT COUNT(*) FROM :Table_Part_03; count ------- 2935 (1 row) -- We first compute the difference of partition tables against the base table. -- Then, we compute the difference of the base table against partitioned tables. SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Table_Part_00 EXCEPT ALL :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 0) ) diff; diff_lhs_00 ------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Table_Part_01 EXCEPT ALL :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 1) ) diff; diff_lhs_01 ------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_All FROM :Table_Part_02 EXCEPT ALL :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 2) ) diff; diff_lhs_02 ------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_03 FROM ( :Select_All FROM :Table_Part_03 EXCEPT ALL :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 3) ) diff; diff_lhs_03 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 0) EXCEPT ALL :Select_All FROM :Table_Part_00 ) diff; diff_rhs_00 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 1) EXCEPT ALL :Select_All FROM :Table_Part_01 ) diff; diff_rhs_01 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 2) EXCEPT ALL :Select_All FROM :Table_Part_02 ) diff; diff_rhs_02 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_03 FROM ( :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 3) EXCEPT ALL :Select_All FROM :Table_Part_03 ) diff; diff_rhs_03 ------------- 0 (1 row) citus-7.0.3/src/test/regress/expected/worker_hash_partition_complex.out000066400000000000000000000075501317107136600265720ustar00rootroot00000000000000-- -- WORKER_HASH_PARTITION_COMPLEX -- \set JobId 201010 \set TaskId 101104 \set Partition_Column l_partkey \set Partition_Column_Text '\'l_partkey\'' \set Partition_Column_Type 23 \set Partition_Count 4 \set Select_Columns 'SELECT l_partkey, l_discount, l_shipdate, l_comment' \set Select_Filters 'l_shipdate >= date \'1992-01-15\' AND l_discount between 0.02 AND 0.08' \set Hash_Mod_Function '( (hashint4(l_partkey) & 2147483647) % 4 )' \set Table_Part_00 lineitem_hash_complex_part_00 \set Table_Part_01 lineitem_hash_complex_part_01 \set Table_Part_02 lineitem_hash_complex_part_02 \set Table_Part_03 lineitem_hash_complex_part_03 -- Run hardcoded complex select query, and apply hash partitioning on query -- results SELECT worker_hash_partition_table(:JobId, :TaskId, 'SELECT l_partkey, l_discount, l_shipdate, l_comment' ' FROM lineitem ' ' WHERE l_shipdate >= date ''1992-01-15''' ' AND l_discount between 0.02 AND 0.08', :Partition_Column_Text, :Partition_Column_Type, :Partition_Count); worker_hash_partition_table ----------------------------- (1 row) -- Copy partitioned data files into tables for testing purposes COPY :Table_Part_00 FROM 'base/pgsql_job_cache/job_201010/task_101104/p_00000'; COPY :Table_Part_01 FROM 'base/pgsql_job_cache/job_201010/task_101104/p_00001'; COPY :Table_Part_02 FROM 'base/pgsql_job_cache/job_201010/task_101104/p_00002'; COPY :Table_Part_03 FROM 'base/pgsql_job_cache/job_201010/task_101104/p_00003'; SELECT COUNT(*) FROM :Table_Part_00; count ------- 1988 (1 row) SELECT COUNT(*) FROM :Table_Part_03; count ------- 1881 (1 row) -- We first compute the difference of partition tables against the base table. -- Then, we compute the difference of the base table against partitioned tables. SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_Columns FROM :Table_Part_00 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 0) ) diff; diff_lhs_00 ------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_Columns FROM :Table_Part_01 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 1) ) diff; diff_lhs_01 ------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_Columns FROM :Table_Part_02 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 2) ) diff; diff_lhs_02 ------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_03 FROM ( :Select_Columns FROM :Table_Part_03 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 3) ) diff; diff_lhs_03 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 0) EXCEPT ALL :Select_Columns FROM :Table_Part_00 ) diff; diff_rhs_00 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 1) EXCEPT ALL :Select_Columns FROM :Table_Part_01 ) diff; diff_rhs_01 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 2) EXCEPT ALL :Select_Columns FROM :Table_Part_02 ) diff; diff_rhs_02 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_03 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 3) EXCEPT ALL :Select_Columns FROM :Table_Part_03 ) diff; diff_rhs_03 ------------- 0 (1 row) citus-7.0.3/src/test/regress/expected/worker_merge_hash_files.out000066400000000000000000000030721317107136600253060ustar00rootroot00000000000000-- -- WORKER_MERGE_HASH_FILES -- \set JobId 201010 \set TaskId 101103 \set Task_Table_Name public.task_101103 \set Select_All 'SELECT *' -- TaskId determines our dependency on hash partitioned files. We take these -- files, and merge them in a task table. We also pass the column names and -- column types that are used to create the task table. SELECT worker_merge_files_into_table(:JobId, :TaskId, ARRAY['orderkey', 'partkey', 'suppkey', 'linenumber', 'quantity', 'extendedprice', 'discount', 'tax', 'returnflag', 'linestatus', 'shipdate', 'commitdate', 'receiptdate', 'shipinstruct', 'shipmode', 'comment']::_text, ARRAY['bigint', 'integer', 'integer', 'integer', 'decimal(15, 2)', 'decimal(15, 2)', 'decimal(15, 2)', 'decimal(15, 2)', 'char(1)', 'char(1)', 'date', 'date', 'date', 'char(25)', 'char(10)', 'varchar(44)']::_text); worker_merge_files_into_table ------------------------------- (1 row) -- We first count elements from the merged table and the original table we hash -- partitioned. We then compute the difference of these two tables. SELECT COUNT(*) FROM :Task_Table_Name; count ------- 12000 (1 row) SELECT COUNT(*) FROM lineitem; count ------- 12000 (1 row) SELECT COUNT(*) AS diff_lhs FROM ( :Select_All FROM :Task_Table_Name EXCEPT ALL :Select_All FROM lineitem ) diff; diff_lhs ---------- 0 (1 row) SELECT COUNT(*) AS diff_rhs FROM ( :Select_All FROM lineitem EXCEPT ALL :Select_All FROM :Task_Table_Name ) diff; diff_rhs ---------- 0 (1 row) citus-7.0.3/src/test/regress/expected/worker_merge_range_files.out000066400000000000000000000030751317107136600254620ustar00rootroot00000000000000-- -- WORKER_MERGE_RANGE_FILES -- \set JobId 201010 \set TaskId 101101 \set Task_Table_Name public.task_101101 \set Select_All 'SELECT *' -- TaskId determines our dependency on range partitioned files. We take these -- files, and merge them in a task table. We also pass the column names and -- column types that are used to create the task table. SELECT worker_merge_files_into_table(:JobId, :TaskId, ARRAY['orderkey', 'partkey', 'suppkey', 'linenumber', 'quantity', 'extendedprice', 'discount', 'tax', 'returnflag', 'linestatus', 'shipdate', 'commitdate', 'receiptdate', 'shipinstruct', 'shipmode', 'comment']::_text, ARRAY['bigint', 'integer', 'integer', 'integer', 'decimal(15, 2)', 'decimal(15, 2)', 'decimal(15, 2)', 'decimal(15, 2)', 'char(1)', 'char(1)', 'date', 'date', 'date', 'char(25)', 'char(10)', 'varchar(44)']::_text); worker_merge_files_into_table ------------------------------- (1 row) -- We first count elements from the merged table and the original table we range -- partitioned. We then compute the difference of these two tables. SELECT COUNT(*) FROM :Task_Table_Name; count ------- 12000 (1 row) SELECT COUNT(*) FROM lineitem; count ------- 12000 (1 row) SELECT COUNT(*) AS diff_lhs FROM ( :Select_All FROM :Task_Table_Name EXCEPT ALL :Select_All FROM lineitem ) diff; diff_lhs ---------- 0 (1 row) SELECT COUNT(*) AS diff_rhs FROM ( :Select_All FROM lineitem EXCEPT ALL :Select_All FROM :Task_Table_Name ) diff; diff_rhs ---------- 0 (1 row) citus-7.0.3/src/test/regress/expected/worker_null_data_partition.out000066400000000000000000000133411317107136600260560ustar00rootroot00000000000000-- -- WORKER_NULL_DATA_PARTITION -- \set JobId 201010 \set Range_TaskId 101106 \set Partition_Column s_nationkey \set Partition_Column_Text '\'s_nationkey\'' \set Partition_Column_Type 23 \set Select_Query_Text '\'SELECT * FROM supplier\'' \set Select_All 'SELECT *' \set Range_Table_Part_00 supplier_range_part_00 \set Range_Table_Part_01 supplier_range_part_01 \set Range_Table_Part_02 supplier_range_part_02 -- Run select query, and apply range partitioning on query results. Note that -- one of the split point values is 0, We are checking here that the partition -- function doesn't treat 0 as null, and that range repartitioning correctly -- puts null nation key values into the 0th repartition bucket. SELECT worker_range_partition_table(:JobId, :Range_TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type, ARRAY[0, 10]::_int4); worker_range_partition_table ------------------------------ (1 row) -- Copy partitioned data files into tables for testing purposes COPY :Range_Table_Part_00 FROM 'base/pgsql_job_cache/job_201010/task_101106/p_00000'; COPY :Range_Table_Part_01 FROM 'base/pgsql_job_cache/job_201010/task_101106/p_00001'; COPY :Range_Table_Part_02 FROM 'base/pgsql_job_cache/job_201010/task_101106/p_00002'; SELECT COUNT(*) FROM :Range_Table_Part_00; count ------- 6 (1 row) SELECT COUNT(*) FROM :Range_Table_Part_02; count ------- 588 (1 row) -- We first compute the difference of partition tables against the base table. -- Then, we compute the difference of the base table against partitioned tables. SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Range_Table_Part_00 EXCEPT ALL (:Select_All FROM supplier WHERE :Partition_Column < 0 OR :Partition_Column IS NULL) ) diff; diff_lhs_00 ------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Range_Table_Part_01 EXCEPT ALL :Select_All FROM supplier WHERE :Partition_Column >= 0 AND :Partition_Column < 10 ) diff; diff_lhs_01 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM supplier WHERE :Partition_Column >= 10 EXCEPT ALL :Select_All FROM :Range_Table_Part_02 ) diff; diff_rhs_02 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_00 FROM ( (:Select_All FROM supplier WHERE :Partition_Column < 0 OR :Partition_Column IS NULL) EXCEPT ALL :Select_All FROM :Range_Table_Part_00 ) diff; diff_rhs_00 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM supplier WHERE :Partition_Column >= 0 AND :Partition_Column < 10 EXCEPT ALL :Select_All FROM :Range_Table_Part_01 ) diff; diff_rhs_01 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM supplier WHERE :Partition_Column >= 10 EXCEPT ALL :Select_All FROM :Range_Table_Part_02 ) diff; diff_rhs_02 ------------- 0 (1 row) -- Next, run select query and apply hash partitioning on query results. We are -- checking here that hash repartitioning correctly puts null nation key values -- into the 0th repartition bucket. \set Hash_TaskId 101107 \set Partition_Count 4 \set Hash_Mod_Function '( (hashint4(s_nationkey) & 2147483647) % 4 )' \set Hash_Table_Part_00 supplier_hash_part_00 \set Hash_Table_Part_01 supplier_hash_part_01 \set Hash_Table_Part_02 supplier_hash_part_02 -- Run select query, and apply hash partitioning on query results SELECT worker_hash_partition_table(:JobId, :Hash_TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type, :Partition_Count); worker_hash_partition_table ----------------------------- (1 row) COPY :Hash_Table_Part_00 FROM 'base/pgsql_job_cache/job_201010/task_101107/p_00000'; COPY :Hash_Table_Part_01 FROM 'base/pgsql_job_cache/job_201010/task_101107/p_00001'; COPY :Hash_Table_Part_02 FROM 'base/pgsql_job_cache/job_201010/task_101107/p_00002'; SELECT COUNT(*) FROM :Hash_Table_Part_00; count ------- 298 (1 row) SELECT COUNT(*) FROM :Hash_Table_Part_02; count ------- 203 (1 row) -- We first compute the difference of partition tables against the base table. -- Then, we compute the difference of the base table against partitioned tables. SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Hash_Table_Part_00 EXCEPT ALL (:Select_All FROM supplier WHERE (:Hash_Mod_Function = 0) OR :Partition_Column IS NULL) ) diff; diff_lhs_00 ------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Hash_Table_Part_01 EXCEPT ALL :Select_All FROM supplier WHERE (:Hash_Mod_Function = 1) ) diff; diff_lhs_01 ------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_All FROM :Hash_Table_Part_02 EXCEPT ALL :Select_All FROM supplier WHERE (:Hash_Mod_Function = 2) ) diff; diff_lhs_02 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_00 FROM ( (:Select_All FROM supplier WHERE (:Hash_Mod_Function = 0) OR :Partition_Column IS NULL) EXCEPT ALL :Select_All FROM :Hash_Table_Part_00 ) diff; diff_rhs_00 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM supplier WHERE (:Hash_Mod_Function = 1) EXCEPT ALL :Select_All FROM :Hash_Table_Part_01 ) diff; diff_rhs_01 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM supplier WHERE (:Hash_Mod_Function = 2) EXCEPT ALL :Select_All FROM :Hash_Table_Part_02 ) diff; diff_rhs_02 ------------- 0 (1 row) citus-7.0.3/src/test/regress/expected/worker_range_partition.out000066400000000000000000000064721317107136600252160ustar00rootroot00000000000000-- -- WORKER_RANGE_PARTITION -- \set JobId 201010 \set TaskId 101101 \set Partition_Column l_orderkey \set Partition_Column_Text '\'l_orderkey\'' \set Partition_Column_Type 20 \set Select_Query_Text '\'SELECT * FROM lineitem\'' \set Select_All 'SELECT *' \set Table_Part_00 lineitem_range_part_00 \set Table_Part_01 lineitem_range_part_01 \set Table_Part_02 lineitem_range_part_02 \set Table_Part_03 lineitem_range_part_03 \set File_Basedir base/pgsql_job_cache \set Table_File_00 :File_Basedir/job_:JobId/task_:TaskId/p_00000 \set Table_File_01 :File_Basedir/job_:JobId/task_:TaskId/p_00001 \set Table_File_02 :File_Basedir/job_:JobId/task_:TaskId/p_00002 \set Table_File_03 :File_Basedir/job_:JobId/task_:TaskId/p_00003 -- Run select query, and apply range partitioning on query results SELECT worker_range_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type, ARRAY[1, 3000, 12000]::_int8); worker_range_partition_table ------------------------------ (1 row) COPY :Table_Part_00 FROM :'Table_File_00'; COPY :Table_Part_01 FROM :'Table_File_01'; COPY :Table_Part_02 FROM :'Table_File_02'; COPY :Table_Part_03 FROM :'Table_File_03'; SELECT COUNT(*) FROM :Table_Part_00; count ------- 0 (1 row) SELECT COUNT(*) FROM :Table_Part_03; count ------- 3047 (1 row) -- We first compute the difference of partition tables against the base table. -- Then, we compute the difference of the base table against partitioned tables. SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Table_Part_00 EXCEPT ALL :Select_All FROM lineitem WHERE :Partition_Column < 1 ) diff; diff_lhs_00 ------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Table_Part_01 EXCEPT ALL :Select_All FROM lineitem WHERE :Partition_Column >= 1 AND :Partition_Column < 3000 ) diff; diff_lhs_01 ------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_All FROM :Table_Part_02 EXCEPT ALL :Select_All FROM lineitem WHERE :Partition_Column >= 3000 AND :Partition_Column < 12000 ) diff; diff_lhs_02 ------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_03 FROM ( :Select_All FROM :Table_Part_03 EXCEPT ALL :Select_All FROM lineitem WHERE :Partition_Column >= 12000 ) diff; diff_lhs_03 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_All FROM lineitem WHERE :Partition_Column < 1 EXCEPT ALL :Select_All FROM :Table_Part_00 ) diff; diff_rhs_00 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM lineitem WHERE :Partition_Column >= 1 AND :Partition_Column < 3000 EXCEPT ALL :Select_All FROM :Table_Part_01 ) diff; diff_rhs_01 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM lineitem WHERE :Partition_Column >= 3000 AND :Partition_Column < 12000 EXCEPT ALL :Select_All FROM :Table_Part_02 ) diff; diff_rhs_02 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_03 FROM ( :Select_All FROM lineitem WHERE :Partition_Column >= 12000 EXCEPT ALL :Select_All FROM :Table_Part_03 ) diff; diff_rhs_03 ------------- 0 (1 row) citus-7.0.3/src/test/regress/expected/worker_range_partition_complex.out000066400000000000000000000076001317107136600267370ustar00rootroot00000000000000-- -- WORKER_RANGE_PARTITION_COMPLEX -- \set JobId 201010 \set TaskId 101102 \set Partition_Column l_partkey \set Partition_Column_Text '\'l_partkey\'' \set Partition_Column_Type 23 \set Select_Columns 'SELECT l_partkey, l_discount, l_shipdate, l_comment' \set Select_Filters 'l_shipdate >= date \'1992-01-15\' AND l_discount between 0.02 AND 0.08' \set Table_Part_00 lineitem_range_complex_part_00 \set Table_Part_01 lineitem_range_complex_part_01 \set Table_Part_02 lineitem_range_complex_part_02 \set Table_Part_03 lineitem_range_complex_part_03 -- Run hardcoded complex select query, and apply range partitioning on query -- results SELECT worker_range_partition_table(:JobId, :TaskId, 'SELECT l_partkey, l_discount, l_shipdate, l_comment' ' FROM lineitem ' ' WHERE l_shipdate >= date ''1992-01-15''' ' AND l_discount between 0.02 AND 0.08', :Partition_Column_Text, :Partition_Column_Type, ARRAY[101, 12000, 18000]::_int4); worker_range_partition_table ------------------------------ (1 row) -- Copy partitioned data files into tables for testing purposes COPY :Table_Part_00 FROM 'base/pgsql_job_cache/job_201010/task_101102/p_00000'; COPY :Table_Part_01 FROM 'base/pgsql_job_cache/job_201010/task_101102/p_00001'; COPY :Table_Part_02 FROM 'base/pgsql_job_cache/job_201010/task_101102/p_00002'; COPY :Table_Part_03 FROM 'base/pgsql_job_cache/job_201010/task_101102/p_00003'; SELECT COUNT(*) FROM :Table_Part_00; count ------- 3 (1 row) SELECT COUNT(*) FROM :Table_Part_03; count ------- 7022 (1 row) -- We first compute the difference of partition tables against the base table. -- Then, we compute the difference of the base table against partitioned tables. SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_Columns FROM :Table_Part_00 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column < 101 ) diff; diff_lhs_00 ------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_Columns FROM :Table_Part_01 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 101 AND :Partition_Column < 12000 ) diff; diff_lhs_01 ------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_Columns FROM :Table_Part_02 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 12000 AND :Partition_Column < 18000 ) diff; diff_lhs_02 ------------- 0 (1 row) SELECT COUNT(*) AS diff_lhs_03 FROM ( :Select_Columns FROM :Table_Part_03 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 18000 ) diff; diff_lhs_03 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column < 101 EXCEPT ALL :Select_Columns FROM :Table_Part_00 ) diff; diff_rhs_00 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 101 AND :Partition_Column < 12000 EXCEPT ALL :Select_Columns FROM :Table_Part_01 ) diff; diff_rhs_01 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 12000 AND :Partition_Column < 18000 EXCEPT ALL :Select_Columns FROM :Table_Part_02 ) diff; diff_rhs_02 ------------- 0 (1 row) SELECT COUNT(*) AS diff_rhs_03 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 18000 EXCEPT ALL :Select_Columns FROM :Table_Part_03 ) diff; diff_rhs_03 ------------- 0 (1 row) citus-7.0.3/src/test/regress/input/000077500000000000000000000000001317107136600172345ustar00rootroot00000000000000citus-7.0.3/src/test/regress/input/multi_agg_distinct.source000066400000000000000000000101371317107136600243310ustar00rootroot00000000000000-- -- MULTI_AGG_DISTINCT -- -- Create a new range partitioned lineitem table and load data into it CREATE TABLE lineitem_range ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null ); SELECT master_create_distributed_table('lineitem_range', 'l_orderkey', 'range'); SELECT master_create_empty_shard('lineitem_range') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('lineitem_range') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947 WHERE shardid = :new_shard_id; SET citus.shard_max_size TO "500MB"; \copy lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \copy lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' RESET citus.shard_max_size; -- Run aggregate(distinct) on partition column for range partitioned table SELECT count(distinct l_orderkey) FROM lineitem_range; SELECT avg(distinct l_orderkey) FROM lineitem_range; -- Run count(distinct) on join between a range partitioned table and a single -- sharded table. For this test, we also change a config setting to ensure that -- we don't repartition any of the tables during the query. SET citus.large_table_shard_count TO 2; SELECT p_partkey, count(distinct l_orderkey) FROM lineitem_range, part WHERE l_partkey = p_partkey GROUP BY p_partkey ORDER BY p_partkey LIMIT 10; RESET citus.large_table_shard_count; -- Check that we don't support count(distinct) on non-partition column, and -- complex expressions. SELECT count(distinct l_partkey) FROM lineitem_range; SELECT count(distinct (l_orderkey + 1)) FROM lineitem_range; -- Now test append partitioned tables. First run count(distinct) on a single -- sharded table. SELECT count(distinct p_mfgr) FROM part; SELECT p_mfgr, count(distinct p_partkey) FROM part GROUP BY p_mfgr ORDER BY p_mfgr; -- We don't support count(distinct) queries if table is append partitioned and -- has multiple shards SELECT count(distinct o_orderkey) FROM orders; -- Hash partitioned tables: CREATE TABLE lineitem_hash ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null ); SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); SELECT master_create_worker_shards('lineitem_hash', 4, 1); \copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -- aggregate(distinct) on partition column is allowed SELECT count(distinct l_orderkey) FROM lineitem_hash; SELECT avg(distinct l_orderkey) FROM lineitem_hash; -- count(distinct) on non-partition column or expression is not allowed SELECT count(distinct l_partkey) FROM lineitem_hash; SELECT count(distinct (l_orderkey + 1)) FROM lineitem_hash; -- agg(distinct) is allowed if we group by partition column SELECT l_orderkey, count(distinct l_partkey) INTO hash_results FROM lineitem_hash GROUP BY l_orderkey; SELECT l_orderkey, count(distinct l_partkey) INTO range_results FROM lineitem_range GROUP BY l_orderkey; -- they should return the same results SELECT * FROM hash_results h, range_results r WHERE h.l_orderkey = r.l_orderkey AND h.count != r.count; DROP TABLE lineitem_hash; citus-7.0.3/src/test/regress/input/multi_agg_type_conversion.source000066400000000000000000000036331317107136600257410ustar00rootroot00000000000000-- -- MULTI_AGG_TYPE_CONVERSION -- -- Test aggregate type conversions using sums of integers and division operator SELECT sum(l_suppkey) FROM lineitem; SELECT sum(l_suppkey) / 2 FROM lineitem; SELECT sum(l_suppkey) / 2::numeric FROM lineitem; SELECT sum(l_suppkey)::int8 / 2 FROM lineitem; -- Create a new table to test type conversions on different types, and load -- data into this table. Then, apply aggregate functions and divide / multiply -- the results to test type conversions. CREATE TABLE aggregate_type ( float_value float(20) not null, double_value float(40) not null, interval_value interval not null); SELECT master_create_distributed_table('aggregate_type', 'float_value', 'append'); \copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data' -- Test conversions using aggregates on floats and division SELECT min(float_value), max(float_value), sum(float_value), count(float_value), avg(float_value) FROM aggregate_type; SELECT min(float_value) / 2, max(float_value) / 2, sum(float_value) / 2, count(float_value) / 2, avg(float_value) / 2 FROM aggregate_type; -- Test conversions using aggregates on large floats and multiplication SELECT min(double_value), max(double_value), sum(double_value), count(double_value), avg(double_value) FROM aggregate_type; SELECT min(double_value) * 2, max(double_value) * 2, sum(double_value) * 2, count(double_value) * 2, avg(double_value) * 2 FROM aggregate_type; -- Test conversions using aggregates on intervals and division. We also use the -- default configuration value for IntervalStyle. SET IntervalStyle TO 'postgres'; SELECT min(interval_value), max(interval_value), sum(interval_value), count(interval_value), avg(interval_value) FROM aggregate_type; SELECT min(interval_value) / 2, max(interval_value) / 2, sum(interval_value) / 2, count(interval_value) / 2, avg(interval_value) / 2 FROM aggregate_type; citus-7.0.3/src/test/regress/input/multi_alter_table_statements.source000066400000000000000000000425171317107136600264260ustar00rootroot00000000000000-- -- MULTI_ALTER_TABLE_STATEMENTS -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 220000; -- Check that we can run ALTER TABLE statements on distributed tables. -- We set the shardid sequence here so that the shardids in this test -- aren't affected by changes to the previous tests. CREATE TABLE lineitem_alter ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null ); SELECT master_create_distributed_table('lineitem_alter', 'l_orderkey', 'append'); \copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -- Verify that we can add columns ALTER TABLE lineitem_alter ADD COLUMN float_column FLOAT; ALTER TABLE lineitem_alter ADD COLUMN date_column DATE; ALTER TABLE lineitem_alter ADD COLUMN int_column1 INTEGER DEFAULT 1; ALTER TABLE lineitem_alter ADD COLUMN int_column2 INTEGER DEFAULT 2; ALTER TABLE lineitem_alter ADD COLUMN null_column INTEGER; -- show changed schema on one worker \c - - - :worker_1_port SELECT attname, atttypid::regtype FROM (SELECT oid FROM pg_class WHERE relname LIKE 'lineitem_alter_%' ORDER BY relname LIMIT 1) pc JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid) ORDER BY attnum; \c - - - :master_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column; SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1; -- Verify that SET|DROP DEFAULT works ALTER TABLE lineitem_alter ALTER COLUMN float_column SET DEFAULT 1; ALTER TABLE lineitem_alter ALTER COLUMN int_column1 DROP DEFAULT; -- \copy to verify that default values take effect \copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column; SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1; -- Verify that SET NOT NULL works ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; -- Drop default so that NULLs will be inserted for this column ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT; -- \copy should fail because it will try to insert NULLs for a NOT NULL column -- Note, this operation will create a table on the workers but it won't be in the metadata \copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -- Verify that DROP NOT NULL works ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; -- \copy should succeed now \copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' SELECT count(*) from lineitem_alter; -- Verify that SET DATA TYPE works SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2; ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET DATA TYPE FLOAT; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2; -- Verify that DROP COLUMN works ALTER TABLE lineitem_alter DROP COLUMN int_column1; ALTER TABLE lineitem_alter DROP COLUMN float_column; ALTER TABLE lineitem_alter DROP COLUMN date_column; -- Verify that RENAME COLUMN works ALTER TABLE lineitem_alter RENAME COLUMN l_orderkey TO l_orderkey_renamed; SELECT SUM(l_orderkey_renamed) FROM lineitem_alter; -- Verify that IF EXISTS works as expected ALTER TABLE non_existent_table ADD COLUMN new_column INTEGER; ALTER TABLE IF EXISTS non_existent_table ADD COLUMN new_column INTEGER; ALTER TABLE IF EXISTS lineitem_alter ALTER COLUMN int_column2 SET DATA TYPE INTEGER; ALTER TABLE lineitem_alter DROP COLUMN non_existent_column; ALTER TABLE lineitem_alter DROP COLUMN IF EXISTS non_existent_column; ALTER TABLE lineitem_alter DROP COLUMN IF EXISTS int_column2; -- Verify with IF EXISTS for extant table ALTER TABLE IF EXISTS lineitem_alter RENAME COLUMN l_orderkey_renamed TO l_orderkey; SELECT SUM(l_orderkey) FROM lineitem_alter; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; -- Verify that we can execute commands with multiple subcommands ALTER TABLE lineitem_alter ADD COLUMN int_column1 INTEGER, ADD COLUMN int_column2 INTEGER; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; ALTER TABLE lineitem_alter ADD COLUMN int_column3 INTEGER, ALTER COLUMN int_column1 SET STATISTICS 10; ALTER TABLE lineitem_alter DROP COLUMN int_column1, DROP COLUMN int_column2; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; -- Verify that we cannot execute alter commands on the distribution column ALTER TABLE lineitem_alter ALTER COLUMN l_orderkey DROP NOT NULL; ALTER TABLE lineitem_alter DROP COLUMN l_orderkey; -- Verify that we error out on unsupported statement types ALTER TABLE lineitem_alter ALTER COLUMN l_orderkey SET STATISTICS 100; ALTER TABLE lineitem_alter DROP CONSTRAINT IF EXISTS non_existent_contraint; ALTER TABLE lineitem_alter SET WITHOUT OIDS; -- Verify that we error out in case of postgres errors on supported statement -- types ALTER TABLE lineitem_alter ADD COLUMN new_column non_existent_type; ALTER TABLE lineitem_alter ALTER COLUMN null_column SET NOT NULL; ALTER TABLE lineitem_alter ALTER COLUMN l_partkey SET DEFAULT 'a'; -- Verify that we error out on non-column RENAME statements ALTER TABLE lineitem_alter RENAME TO lineitem_renamed; ALTER TABLE lineitem_alter RENAME CONSTRAINT constraint_a TO constraint_b; -- Verify that IF EXISTS works as expected with RENAME statements ALTER TABLE non_existent_table RENAME TO non_existent_table_renamed; ALTER TABLE IF EXISTS non_existent_table RENAME TO non_existent_table_renamed; ALTER TABLE IF EXISTS non_existent_table RENAME COLUMN column1 TO column2; -- Verify that none of the failed alter table commands took effect on the master -- node SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; -- verify that non-propagated ddl commands are allowed inside a transaction block SET citus.enable_ddl_propagation to false; BEGIN; CREATE INDEX temp_index_1 ON lineitem_alter(l_linenumber); COMMIT; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; DROP INDEX temp_index_1; -- verify that single distributed ddl commands are allowed inside a transaction block SET citus.enable_ddl_propagation to true; BEGIN; CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); COMMIT; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; DROP INDEX temp_index_2; -- and so are multiple ddl statements BEGIN; CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); ALTER TABLE lineitem_alter ADD COLUMN first integer; COMMIT; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; \d temp_index_2 ALTER TABLE lineitem_alter DROP COLUMN first; DROP INDEX temp_index_2; -- ensure that user-specified rollback causes full rollback BEGIN; CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); CREATE INDEX temp_index_3 ON lineitem_alter(l_partkey); ROLLBACK; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; -- ensure that errors cause full rollback BEGIN; CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); ROLLBACK; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; -- verify that SAVEPOINT is allowed... BEGIN; CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); SAVEPOINT my_savepoint; CREATE INDEX temp_index_3 ON lineitem_alter(l_partkey); ROLLBACK; -- and also rolling back to it is also allowed BEGIN; CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); SAVEPOINT my_savepoint; CREATE INDEX temp_index_3 ON lineitem_alter(l_partkey); ROLLBACK TO my_savepoint; COMMIT; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; DROP INDEX temp_index_2; -- Add column on only one worker... \c - - - :worker_2_port ALTER TABLE lineitem_alter_220000 ADD COLUMN first integer; \c - - - :master_port -- and try to add it in a multi-statement block, which fails BEGIN; CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); ALTER TABLE lineitem_alter ADD COLUMN first integer; COMMIT; -- Nothing from the block should have committed SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; -- Create single-shard table (to avoid deadlocks in the upcoming test hackery) CREATE TABLE single_shard_items (id integer, name text); SELECT master_create_distributed_table('single_shard_items', 'id', 'hash'); SELECT master_create_worker_shards('single_shard_items', 1, 2); -- Drop the column from the worker... \c - - - :worker_2_port ALTER TABLE lineitem_alter_220000 DROP COLUMN first; -- Create table to trigger at-xact-end (deferred) failure CREATE TABLE ddl_commands (command text UNIQUE DEFERRABLE INITIALLY DEFERRED); -- Use an event trigger to log all DDL event tags in it CREATE FUNCTION log_ddl_tag() RETURNS event_trigger AS $ldt$ BEGIN INSERT INTO ddl_commands VALUES (tg_tag); END; $ldt$ LANGUAGE plpgsql; CREATE EVENT TRIGGER log_ddl_tag ON ddl_command_end EXECUTE PROCEDURE log_ddl_tag(); \c - - - :master_port -- The above trigger will cause failure at transaction end on one placement. -- We'll test 2PC first, as it should handle this "best" (no divergence) SET citus.multi_shard_commit_protocol TO '2pc'; BEGIN; CREATE INDEX single_index_2 ON single_shard_items(id); CREATE INDEX single_index_3 ON single_shard_items(name); COMMIT; -- Nothing from the block should have committed SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'single_shard_items' ORDER BY 1; -- Now try with 2pc off RESET citus.multi_shard_commit_protocol; BEGIN; CREATE INDEX single_index_2 ON single_shard_items(id); CREATE INDEX single_index_3 ON single_shard_items(name); COMMIT; -- The block should have committed with a warning SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'single_shard_items' ORDER BY 1; \c - - - :worker_2_port DROP EVENT TRIGGER log_ddl_tag; DROP FUNCTION log_ddl_tag(); DROP TABLE ddl_commands; \c - - - :master_port -- Distributed SELECTs cannot appear after ALTER BEGIN; CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); SELECT count(*) FROM lineitem_alter; COMMIT; -- but are allowed before BEGIN; SELECT count(*) FROM lineitem_alter; CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); COMMIT; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; DROP INDEX temp_index_2; --- verify that distributed ddl commands can be used with 2pc SET citus.multi_shard_commit_protocol TO '2pc'; CREATE INDEX temp_index_3 ON lineitem_alter(l_orderkey); SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; DROP INDEX temp_index_3; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; RESET citus.multi_shard_commit_protocol; -- verify that not any of shard placements are marked as failed when a query failure occurs CREATE TABLE test_ab (a int, b int); SELECT master_create_distributed_table('test_ab', 'a', 'hash'); SELECT master_create_worker_shards('test_ab', 8, 2); INSERT INTO test_ab VALUES (2, 10); INSERT INTO test_ab VALUES (2, 11); CREATE UNIQUE INDEX temp_unique_index_1 ON test_ab(a); SELECT shardid FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard WHERE logicalrelid='test_ab'::regclass AND shardstate=3; -- Check that the schema on the worker still looks reasonable \c - - - :worker_1_port SELECT attname, atttypid::regtype FROM (SELECT oid FROM pg_class WHERE relname LIKE 'lineitem_alter_%' ORDER BY relname LIMIT 1) pc JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid) ORDER BY attnum; \c - - - :master_port -- verify that we don't intercept DDL commands if propagation is turned off SET citus.enable_ddl_propagation to false; -- table rename statement can be performed now ALTER TABLE lineitem_alter RENAME TO lineitem_renamed; -- verify rename is performed SELECT relname FROM pg_class WHERE relname = 'lineitem_alter' or relname = 'lineitem_renamed'; -- revert it to original name ALTER TABLE lineitem_renamed RENAME TO lineitem_alter; -- this column is added to master table and not workers ALTER TABLE lineitem_alter ADD COLUMN column_only_added_to_master int; -- verify newly added column is not present in a worker shard \c - - - :worker_1_port SELECT column_only_added_to_master FROM lineitem_alter_220000 LIMIT 0; \c - - - :master_port -- ddl propagation flag is reset to default, disable it again SET citus.enable_ddl_propagation to false; -- following query succeeds since it accesses an previously existing column SELECT l_orderkey FROM lineitem_alter LIMIT 0; -- make master and workers have the same schema again ALTER TABLE lineitem_alter DROP COLUMN column_only_added_to_master; -- now this should succeed SELECT * FROM lineitem_alter LIMIT 0; -- previously unsupported statements are accepted by postgresql now ALTER TABLE lineitem_alter ALTER COLUMN l_orderkey SET STATISTICS 100; ALTER TABLE lineitem_alter DROP CONSTRAINT IF EXISTS non_existent_contraint; ALTER TABLE lineitem_alter SET WITHOUT OIDS; -- distribution column still cannot be dropped. ALTER TABLE lineitem_alter DROP COLUMN l_orderkey; -- Even unique indexes on l_partkey (non-partition column) are allowed. -- Citus would have prevented that. CREATE UNIQUE INDEX unique_lineitem_partkey on lineitem_alter(l_partkey); SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; -- verify index is not created on worker \c - - - :worker_1_port SELECT indexname, tablename FROM pg_indexes WHERE tablename like 'lineitem_alter_%'; \c - - - :master_port -- verify alter table and drop sequence in the same transaction does not cause deadlock CREATE TABLE sequence_deadlock_test (a serial, b serial); SELECT create_distributed_table('sequence_deadlock_test', 'a'); BEGIN; ALTER TABLE sequence_deadlock_test ADD COLUMN c int; DROP SEQUENCE sequence_deadlock_test_b_seq CASCADE; END; DROP TABLE sequence_deadlock_test; -- verify enable/disable trigger all works SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 1; CREATE TABLE trigger_table ( id int, value text ); SELECT create_distributed_table('trigger_table', 'id'); -- first set a trigger on a shard \c - - - :worker_1_port CREATE FUNCTION update_value() RETURNS trigger AS $up$ BEGIN NEW.value := 'trigger enabled'; RETURN NEW; END; $up$ LANGUAGE plpgsql; CREATE TRIGGER update_value BEFORE INSERT ON trigger_table_220056 FOR EACH ROW EXECUTE PROCEDURE update_value(); \c - - - :master_port INSERT INTO trigger_table VALUES (1, 'trigger disabled'); SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value; ALTER TABLE trigger_table DISABLE TRIGGER ALL; INSERT INTO trigger_table VALUES (1, 'trigger disabled'); SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value; ALTER TABLE trigger_table ENABLE TRIGGER ALL; INSERT INTO trigger_table VALUES (1, 'trigger disabled'); SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value; DROP TABLE trigger_table; -- test ALTER TABLE ALL IN TABLESPACE -- we expect that it will warn out CREATE TABLESPACE super_fast_ssd LOCATION '@abs_srcdir@/data'; ALTER TABLE ALL IN TABLESPACE pg_default SET TABLESPACE super_fast_ssd; ALTER TABLE ALL IN TABLESPACE super_fast_ssd SET TABLESPACE pg_default; DROP TABLESPACE super_fast_ssd; -- Cleanup the table and its shards SET citus.enable_ddl_propagation to true; SELECT master_apply_delete_command('DELETE FROM lineitem_alter'); DROP TABLE lineitem_alter; -- check that nothing's left over on workers, other than the leftover shard created -- during the unsuccessful COPY \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%'; \c - - - :master_port -- Test alter table with drop table in the same transaction BEGIN; CREATE TABLE test_table_1(id int); SELECT create_distributed_table('test_table_1','id'); ALTER TABLE test_table_1 ADD CONSTRAINT u_key UNIQUE(id); DROP TABLE test_table_1; END; -- There should be no test_table_1 shard on workers \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'test_table_1%'; \c - - - :master_port citus-7.0.3/src/test/regress/input/multi_append_table_to_shard.source000066400000000000000000000130121317107136600261660ustar00rootroot00000000000000-- -- MULTI_APPEND_TABLE_TO_SHARD -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 230000; -- Initialize tables to join CREATE TABLE multi_append_table_to_shard_right ( right_number INTEGER not null, right_text TEXT not null ); SELECT master_create_distributed_table('multi_append_table_to_shard_right', 'right_number', 'append'); CREATE TABLE multi_append_table_to_shard_left ( left_number INTEGER not null, left_text TEXT not null ); SELECT master_create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append'); CREATE TABLE multi_append_table_to_shard_right_hash ( right_number INTEGER not null, right_text TEXT not null ); SELECT master_create_distributed_table('multi_append_table_to_shard_right_hash', 'right_number', 'hash'); SELECT master_create_worker_shards('multi_append_table_to_shard_right_hash', 1, 1); -- Replicate 'left' table on both workers SELECT set_config('citus.shard_replication_factor', '2', false); \copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' \copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' -- Place 'right' table only on the primary worker SELECT set_config('citus.shard_replication_factor', '1', false); \copy multi_append_table_to_shard_right FROM '@abs_srcdir@/data/agg.data' -- Reset shard replication factor to ensure tasks will be assigned to both workers SELECT set_config('citus.shard_replication_factor', '2', false); -- All 8 rows in left table match a row in right table SELECT COUNT(*) FROM multi_append_table_to_shard_left, multi_append_table_to_shard_right WHERE left_number = right_number; -- Now append more data to the 'right' table CREATE TABLE multi_append_table_to_shard_stage ( number INTEGER not null, text TEXT not null ); COPY multi_append_table_to_shard_stage FROM '@abs_srcdir@/data/agg.data'; SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM pg_dist_shard WHERE 'multi_append_table_to_shard_right'::regclass::oid = logicalrelid; -- Only the primary worker will see the new matches, as the secondary still uses a cached shard SELECT COUNT(*) FROM multi_append_table_to_shard_left, multi_append_table_to_shard_right WHERE left_number = right_number; -- Now add a lot of data to ensure we increase the size on disk DELETE FROM multi_append_table_to_shard_stage; COPY multi_append_table_to_shard_stage FROM '@abs_srcdir@/data/large_records.data' with delimiter '|'; SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM pg_dist_shard WHERE 'multi_append_table_to_shard_right'::regclass::oid = logicalrelid; -- This join will refresh the shard on the secondary, all 8 rows in the left table will match twice (16) SELECT COUNT(*) FROM multi_append_table_to_shard_left, multi_append_table_to_shard_right WHERE left_number = right_number; -- Check that we error out if we try to append data to a hash partitioned table. SELECT master_create_empty_shard('multi_append_table_to_shard_right_hash'); SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM pg_dist_shard WHERE 'multi_append_table_to_shard_right_hash'::regclass::oid = logicalrelid; -- Clean up after test SELECT master_apply_delete_command('DELETE FROM multi_append_table_to_shard_right'); SELECT master_apply_delete_command('DELETE FROM multi_append_table_to_shard_left'); DROP TABLE multi_append_table_to_shard_stage; DROP TABLE multi_append_table_to_shard_right; DROP TABLE multi_append_table_to_shard_left; -- Check partitioning by date CREATE TABLE multi_append_table_to_shard_date ( event_date DATE, value INT ); SELECT master_create_distributed_table('multi_append_table_to_shard_date', 'event_date', 'append'); -- Create an empty shard and check that we can query the table SELECT master_create_empty_shard('multi_append_table_to_shard_date'); SELECT * FROM multi_append_table_to_shard_date; -- Create an empty distributed table and check that we can query it CREATE TABLE multi_append_table_to_shard_stage (LIKE multi_append_table_to_shard_date); SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM pg_dist_shard WHERE 'multi_append_table_to_shard_date'::regclass::oid = logicalrelid; SELECT * FROM multi_append_table_to_shard_date; -- INSERT NULL values and check that we can query the table INSERT INTO multi_append_table_to_shard_stage VALUES (NULL, NULL); SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM pg_dist_shard WHERE 'multi_append_table_to_shard_date'::regclass::oid = logicalrelid; SELECT * FROM multi_append_table_to_shard_date; -- INSERT regular values and check that we can query the table INSERT INTO multi_append_table_to_shard_stage VALUES ('2016-01-01', 3); SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM pg_dist_shard WHERE 'multi_append_table_to_shard_date'::regclass::oid = logicalrelid; SELECT * FROM multi_append_table_to_shard_date; -- When run inside aborted transaction does not persist changes INSERT INTO multi_append_table_to_shard_stage VALUES ('2016-02-02', 4); BEGIN; SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM pg_dist_shard WHERE 'multi_append_table_to_shard_date'::regclass::oid = logicalrelid; ROLLBACK; SELECT * FROM multi_append_table_to_shard_date; DROP TABLE multi_append_table_to_shard_stage; DROP TABLE multi_append_table_to_shard_date; citus-7.0.3/src/test/regress/input/multi_behavioral_analytics_create_table.source000066400000000000000000000264241317107136600305550ustar00rootroot00000000000000-- -- multi behavioral analytics -- this file is intended to create the table requires for the tests -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1400000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1400000; SET citus.shard_replication_factor = 1; SET citus.shard_count = 4; CREATE TABLE users_table (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint); SELECT create_distributed_table('users_table', 'user_id'); CREATE TABLE events_table (user_id int, time timestamp, event_type int, value_2 int, value_3 float, value_4 bigint); SELECT create_distributed_table('events_table', 'user_id'); CREATE TABLE agg_results (user_id int, value_1_agg int, value_2_agg int, value_3_agg float, value_4_agg bigint, agg_time timestamp); SELECT create_distributed_table('agg_results', 'user_id'); -- we need this to improve the concurrency on the regression tests CREATE TABLE agg_results_second (user_id int, value_1_agg int, value_2_agg int, value_3_agg float, value_4_agg bigint, agg_time timestamp); SELECT create_distributed_table('agg_results_second', 'user_id'); -- same as agg_results_second CREATE TABLE agg_results_third (user_id int, value_1_agg int, value_2_agg int, value_3_agg float, value_4_agg bigint, agg_time timestamp); SELECT create_distributed_table('agg_results_third', 'user_id'); -- same as agg_results_second CREATE TABLE agg_results_fourth (user_id int, value_1_agg int, value_2_agg int, value_3_agg float, value_4_agg bigint, agg_time timestamp); SELECT create_distributed_table('agg_results_fourth', 'user_id'); COPY users_table FROM '@abs_srcdir@/data/users_table.data' WITH CSV; COPY events_table FROM '@abs_srcdir@/data/events_table.data' WITH CSV; -- create indexes for CREATE INDEX is_index1 ON users_table(user_id); CREATE INDEX is_index2 ON events_table(user_id); CREATE INDEX is_index3 ON users_table(value_1); CREATE INDEX is_index4 ON events_table(event_type); CREATE INDEX is_index5 ON users_table(value_2); CREATE INDEX is_index6 ON events_table(value_2); -- Create composite type to use in subquery pushdown SELECT run_command_on_master_and_workers($f$ CREATE TYPE user_composite_type AS ( tenant_id BIGINT, user_id BIGINT ); $f$); SELECT run_command_on_master_and_workers($f$ CREATE FUNCTION cmp_user_composite_type_function(user_composite_type, user_composite_type) RETURNS int LANGUAGE 'internal' AS 'btrecordcmp' IMMUTABLE RETURNS NULL ON NULL INPUT; $f$); SELECT run_command_on_master_and_workers($f$ CREATE FUNCTION gt_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean LANGUAGE 'internal' AS 'record_gt' IMMUTABLE RETURNS NULL ON NULL INPUT; $f$); SELECT run_command_on_master_and_workers($f$ CREATE FUNCTION ge_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean LANGUAGE 'internal' AS 'record_ge' IMMUTABLE RETURNS NULL ON NULL INPUT; $f$); SELECT run_command_on_master_and_workers($f$ CREATE FUNCTION equal_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean LANGUAGE 'internal' AS 'record_eq' IMMUTABLE; $f$); SELECT run_command_on_master_and_workers($f$ CREATE FUNCTION lt_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean LANGUAGE 'internal' AS 'record_lt' IMMUTABLE RETURNS NULL ON NULL INPUT; $f$); SELECT run_command_on_master_and_workers($f$ CREATE FUNCTION le_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean LANGUAGE 'internal' AS 'record_lt' IMMUTABLE RETURNS NULL ON NULL INPUT; $f$); SELECT run_command_on_master_and_workers($f$ CREATE OPERATOR > ( LEFTARG = user_composite_type, RIGHTARG = user_composite_type, PROCEDURE = gt_user_composite_type_function ); $f$); SELECT run_command_on_master_and_workers($f$ CREATE OPERATOR >= ( LEFTARG = user_composite_type, RIGHTARG = user_composite_type, PROCEDURE = ge_user_composite_type_function ); $f$); -- ... use that function to create a custom equality operator... SELECT run_command_on_master_and_workers($f$ -- ... use that function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = user_composite_type, RIGHTARG = user_composite_type, PROCEDURE = equal_user_composite_type_function, commutator = =, RESTRICT = eqsel, JOIN = eqjoinsel, merges, hashes ); $f$); SELECT run_command_on_master_and_workers($f$ CREATE OPERATOR <= ( LEFTARG = user_composite_type, RIGHTARG = user_composite_type, PROCEDURE = le_user_composite_type_function ); $f$); SELECT run_command_on_master_and_workers($f$ CREATE OPERATOR < ( LEFTARG = user_composite_type, RIGHTARG = user_composite_type, PROCEDURE = lt_user_composite_type_function ); $f$); -- ... and create a custom operator family for hash indexes... SELECT run_command_on_master_and_workers($f$ CREATE OPERATOR FAMILY cats_2_op_fam USING hash; $f$); -- ... create a test HASH function. Though it is a poor hash function, -- it is acceptable for our tests SELECT run_command_on_master_and_workers($f$ CREATE FUNCTION test_composite_type_hash(user_composite_type) RETURNS int AS 'SELECT hashtext( ($1.tenant_id + $1.tenant_id)::text);' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; $f$); -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH SELECT run_command_on_master_and_workers($f$ CREATE OPERATOR CLASS cats_2_op_fam_clas3 DEFAULT FOR TYPE user_composite_type USING BTREE AS OPERATOR 1 <= (user_composite_type, user_composite_type), OPERATOR 2 < (user_composite_type, user_composite_type), OPERATOR 3 = (user_composite_type, user_composite_type), OPERATOR 4 >= (user_composite_type, user_composite_type), OPERATOR 5 > (user_composite_type, user_composite_type), FUNCTION 1 cmp_user_composite_type_function(user_composite_type, user_composite_type); $f$); SELECT run_command_on_master_and_workers($f$ CREATE OPERATOR CLASS cats_2_op_fam_class DEFAULT FOR TYPE user_composite_type USING HASH AS OPERATOR 1 = (user_composite_type, user_composite_type), FUNCTION 1 test_composite_type_hash(user_composite_type); $f$); CREATE TABLE events ( composite_id user_composite_type, event_id bigint, event_type character varying(255), event_time bigint ); SELECT master_create_distributed_table('events', 'composite_id', 'range'); SELECT master_create_empty_shard('events') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = '(1,1)', shardmaxvalue = '(1,2000000000)' WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('events') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = '(1,2000000001)', shardmaxvalue = '(1,4300000000)' WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('events') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = '(2,1)', shardmaxvalue = '(2,2000000000)' WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('events') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = '(2,2000000001)', shardmaxvalue = '(2,4300000000)' WHERE shardid = :new_shard_id; \COPY events FROM STDIN WITH CSV "(1,1001)",20001,click,1472807012 "(1,1001)",20002,submit,1472807015 "(1,1001)",20003,pay,1472807020 "(1,1002)",20010,click,1472807022 "(1,1002)",20011,click,1472807023 "(1,1002)",20012,submit,1472807025 "(1,1002)",20013,pay,1472807030 "(1,1003)",20014,click,1472807032 "(1,1003)",20015,click,1472807033 "(1,1003)",20016,click,1472807034 "(1,1003)",20017,submit,1472807035 \. CREATE TABLE users ( composite_id user_composite_type, lastseen bigint ); SELECT master_create_distributed_table('users', 'composite_id', 'range'); SELECT master_create_empty_shard('users') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = '(1,1)', shardmaxvalue = '(1,2000000000)' WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('users') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = '(1,2000000001)', shardmaxvalue = '(1,4300000000)' WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('users') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = '(2,1)', shardmaxvalue = '(2,2000000000)' WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('users') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = '(2,2000000001)', shardmaxvalue = '(2,4300000000)' WHERE shardid = :new_shard_id; \COPY users FROM STDIN WITH CSV "(1,1001)",1472807115 "(1,1002)",1472807215 "(1,1003)",1472807315 \. -- Create tables for subquery tests CREATE TABLE lineitem_subquery ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); SELECT master_create_distributed_table('lineitem_subquery', 'l_orderkey', 'range'); CREATE TABLE orders_subquery ( o_orderkey bigint not null, o_custkey integer not null, o_orderstatus char(1) not null, o_totalprice decimal(15,2) not null, o_orderdate date not null, o_orderpriority char(15) not null, o_clerk char(15) not null, o_shippriority integer not null, o_comment varchar(79) not null, PRIMARY KEY(o_orderkey) ); SELECT master_create_distributed_table('orders_subquery', 'o_orderkey', 'range'); SET citus.enable_router_execution TO 'false'; -- Check that we don't crash if there are not any shards. SELECT avg(unit_price) FROM (SELECT l_orderkey, avg(o_totalprice) AS unit_price FROM lineitem_subquery, orders_subquery WHERE l_orderkey = o_orderkey GROUP BY l_orderkey) AS unit_prices; -- Load data into tables. SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('orders_subquery') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('orders_subquery') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14946 WHERE shardid = :new_shard_id; SET citus.shard_max_size TO "1MB"; \copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' \copy orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' \copy orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' CREATE TABLE events_reference_table (like events_table including all); SELECT create_reference_table('events_reference_table'); INSERT INTO events_reference_table SELECT * FROM events_table; CREATE TABLE users_reference_table (like users_table including all); SELECT create_reference_table('users_reference_table'); INSERT INTO users_reference_table SELECT * FROM users_table; citus-7.0.3/src/test/regress/input/multi_complex_count_distinct.source000066400000000000000000000153111317107136600264510ustar00rootroot00000000000000-- -- COMPLEX_COUNT_DISTINCT -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 240000; CREATE TABLE lineitem_hash ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); SELECT master_create_worker_shards('lineitem_hash', 8, 1); \copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' SET citus.task_executor_type to "task-tracker"; -- count(distinct) is supported on top level query if there -- is a grouping on the partition key SELECT l_orderkey, count(DISTINCT l_partkey) FROM lineitem_hash GROUP BY l_orderkey ORDER BY 2 DESC, 1 DESC LIMIT 10; -- it is not supported if there is no grouping or grouping is on non-partition field SELECT count(DISTINCT l_partkey) FROM lineitem_hash ORDER BY 1 DESC LIMIT 10; SELECT l_shipmode, count(DISTINCT l_partkey) FROM lineitem_hash GROUP BY l_shipmode ORDER BY 2 DESC, 1 DESC LIMIT 10; -- count distinct is supported on single table subqueries SELECT * FROM ( SELECT l_orderkey, count(DISTINCT l_partkey) FROM lineitem_hash GROUP BY l_orderkey) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; SELECT * FROM ( SELECT l_partkey, count(DISTINCT l_orderkey) FROM lineitem_hash GROUP BY l_partkey) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; -- count distinct with filters SELECT l_orderkey, count(DISTINCT l_partkey) FILTER (WHERE l_shipmode = 'AIR') FROM lineitem_hash GROUP BY l_orderkey ORDER BY 2 DESC, 1 DESC LIMIT 10; -- filter column already exists in target list SELECT * FROM ( SELECT l_orderkey, count(DISTINCT l_partkey) FILTER (WHERE l_orderkey > 100) FROM lineitem_hash GROUP BY l_orderkey) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; -- filter column does not exist in target list SELECT * FROM ( SELECT l_orderkey, count(DISTINCT l_partkey) FILTER (WHERE l_shipmode = 'AIR') FROM lineitem_hash GROUP BY l_orderkey) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; -- case expr in count distinct is supported. -- count orders partkeys if l_shipmode is air SELECT * FROM ( SELECT l_orderkey, count(DISTINCT CASE WHEN l_shipmode = 'AIR' THEN l_partkey ELSE NULL END) as count FROM lineitem_hash GROUP BY l_orderkey) sub WHERE count > 0 ORDER BY 2 DESC, 1 DESC LIMIT 10; -- text like operator is also supported SELECT * FROM ( SELECT l_orderkey, count(DISTINCT CASE WHEN l_shipmode like '%A%' THEN l_partkey ELSE NULL END) as count FROM lineitem_hash GROUP BY l_orderkey) sub WHERE count > 0 ORDER BY 2 DESC, 1 DESC LIMIT 10; -- count distinct is rejected if it does not reference any columns SELECT * FROM ( SELECT l_linenumber, count(DISTINCT 1) FROM lineitem_hash GROUP BY l_linenumber) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; -- count distinct is rejected if it does not reference any columns SELECT * FROM ( SELECT l_linenumber, count(DISTINCT (random() * 5)::int) FROM lineitem_hash GROUP BY l_linenumber) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; -- even non-const function calls are supported within count distinct SELECT * FROM ( SELECT l_orderkey, count(DISTINCT (random() * 5)::int = l_linenumber) FROM lineitem_hash GROUP BY l_orderkey) sub ORDER BY 2 DESC, 1 DESC LIMIT 0; -- multiple nested subquery SELECT total, avg(avg_count) as total_avg_count FROM ( SELECT number_sum, count(DISTINCT l_suppkey) as total, avg(total_count) avg_count FROM ( SELECT l_suppkey, sum(l_linenumber) as number_sum, count(DISTINCT l_shipmode) as total_count FROM lineitem_hash WHERE l_partkey > 100 and l_quantity > 2 and l_orderkey < 10000 GROUP BY l_suppkey) as distributed_table WHERE number_sum >= 10 GROUP BY number_sum) as distributed_table_2 GROUP BY total ORDER BY total_avg_count DESC; -- multiple cases query SELECT * FROM ( SELECT count(DISTINCT CASE WHEN l_shipmode = 'TRUCK' THEN l_partkey WHEN l_shipmode = 'AIR' THEN l_quantity WHEN l_shipmode = 'SHIP' THEN l_discount ELSE l_suppkey END) as count, l_shipdate FROM lineitem_hash GROUP BY l_shipdate) sub WHERE count > 0 ORDER BY 1 DESC, 2 DESC LIMIT 10; -- count DISTINCT expression SELECT * FROM ( SELECT l_quantity, count(DISTINCT ((l_orderkey / 1000) * 1000 )) as count FROM lineitem_hash GROUP BY l_quantity) sub WHERE count > 0 ORDER BY 2 DESC, 1 DESC LIMIT 10; -- count DISTINCT is part of an expression which inclues another aggregate SELECT * FROM ( SELECT sum(((l_partkey * l_tax) / 100)) / count(DISTINCT CASE WHEN l_shipmode = 'TRUCK' THEN l_partkey ELSE l_suppkey END) as avg, l_shipmode FROM lineitem_hash GROUP BY l_shipmode) sub ORDER BY 1 DESC, 2 DESC LIMIT 10; --- count DISTINCT CASE WHEN expression SELECT * FROM ( SELECT count(DISTINCT CASE WHEN l_shipmode = 'TRUCK' THEN l_linenumber WHEN l_shipmode = 'AIR' THEN l_linenumber + 10 ELSE 2 END) as avg FROM lineitem_hash GROUP BY l_shipdate) sub ORDER BY 1 DESC LIMIT 10; -- COUNT DISTINCT (c1, c2) SELECT * FROM (SELECT l_shipmode, count(DISTINCT (l_shipdate, l_tax)) FROM lineitem_hash GROUP BY l_shipmode) t ORDER BY 2 DESC,1 DESC LIMIT 10; -- other distinct aggregate are not supported SELECT * FROM ( SELECT l_linenumber, sum(DISTINCT l_partkey) FROM lineitem_hash GROUP BY l_linenumber) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; SELECT * FROM ( SELECT l_linenumber, avg(DISTINCT l_partkey) FROM lineitem_hash GROUP BY l_linenumber) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; -- whole row references, oid, and ctid are not supported in count distinct -- test table does not have oid or ctid enabled, so tests for them are skipped SELECT * FROM ( SELECT l_linenumber, count(DISTINCT lineitem_hash) FROM lineitem_hash GROUP BY l_linenumber) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; SELECT * FROM ( SELECT l_linenumber, count(DISTINCT lineitem_hash.*) FROM lineitem_hash GROUP BY l_linenumber) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; DROP TABLE lineitem_hash; citus-7.0.3/src/test/regress/input/multi_copy.source000066400000000000000000000574601317107136600226560ustar00rootroot00000000000000-- -- MULTI_COPY -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 560000; -- Create a new hash-partitioned table into which to COPY CREATE TABLE customer_copy_hash ( c_custkey integer, c_name varchar(25) not null, c_address varchar(40), c_nationkey integer, c_phone char(15), c_acctbal decimal(15,2), c_mktsegment char(10), c_comment varchar(117), primary key (c_custkey)); SELECT master_create_distributed_table('customer_copy_hash', 'c_custkey', 'hash'); -- Test COPY into empty hash-partitioned table COPY customer_copy_hash FROM '@abs_srcdir@/data/customer.1.data' WITH (DELIMITER '|'); SELECT master_create_worker_shards('customer_copy_hash', 64, 1); -- Test empty copy COPY customer_copy_hash FROM STDIN; \. -- Test syntax error COPY customer_copy_hash (c_custkey,c_name) FROM STDIN; 1,customer1 2,customer2, notinteger,customernot \. -- Confirm that no data was copied SELECT count(*) FROM customer_copy_hash; -- Test primary key violation COPY customer_copy_hash (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); 1,customer1 2,customer2 2,customer2 \. -- Confirm that no data was copied SELECT count(*) FROM customer_copy_hash; -- Test headers option COPY customer_copy_hash (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv', HEADER true, FORCE_NULL (c_custkey)); # header 1,customer1 2,customer2 3,customer3 \. -- Confirm that only first row was skipped SELECT count(*) FROM customer_copy_hash; -- Test force_not_null option COPY customer_copy_hash (c_custkey, c_name, c_address) FROM STDIN WITH (FORMAT 'csv', QUOTE '"', FORCE_NOT_NULL (c_address)); "4","customer4","" \. -- Confirm that value is not null SELECT count(c_address) FROM customer_copy_hash WHERE c_custkey = 4; -- Test force_null option COPY customer_copy_hash (c_custkey, c_name, c_address) FROM STDIN WITH (FORMAT 'csv', QUOTE '"', FORCE_NULL (c_address)); "5","customer5","" \. -- Confirm that value is null SELECT count(c_address) FROM customer_copy_hash WHERE c_custkey = 5; -- Test null violation COPY customer_copy_hash (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); 6,customer6 7,customer7 8, \. -- Confirm that no data was copied SELECT count(*) FROM customer_copy_hash; -- Test server-side copy from program COPY customer_copy_hash (c_custkey, c_name) FROM PROGRAM 'echo 9 customer9' WITH (DELIMITER ' '); -- Confirm that data was copied SELECT count(*) FROM customer_copy_hash WHERE c_custkey = 9; -- Test server-side copy from file COPY customer_copy_hash FROM '@abs_srcdir@/data/customer.2.data' WITH (DELIMITER '|'); -- Confirm that data was copied SELECT count(*) FROM customer_copy_hash; -- Test client-side copy from file \copy customer_copy_hash FROM '@abs_srcdir@/data/customer.3.data' WITH (DELIMITER '|'); -- Confirm that data was copied SELECT count(*) FROM customer_copy_hash; -- Make sure that master_update_shard_statistics() only updates shard length for -- hash-partitioned tables SELECT master_update_shard_statistics(560000); SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560000; SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560000; -- Create a new hash-partitioned table with default now() function CREATE TABLE customer_with_default( c_custkey integer, c_name varchar(25) not null, c_time timestamp default now()); SELECT master_create_distributed_table('customer_with_default', 'c_custkey', 'hash'); SELECT master_create_worker_shards('customer_with_default', 64, 1); -- Test with default values for now() function COPY customer_with_default (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); 1,customer1 2,customer2 \. -- Confirm that data was copied with now() function SELECT count(*) FROM customer_with_default where c_time IS NOT NULL; -- Add columns to the table and perform a COPY ALTER TABLE customer_copy_hash ADD COLUMN extra1 INT DEFAULT 0; ALTER TABLE customer_copy_hash ADD COLUMN extra2 INT DEFAULT 0; COPY customer_copy_hash (c_custkey, c_name, extra1, extra2) FROM STDIN CSV; 10,customer10,1,5 \. SELECT * FROM customer_copy_hash WHERE extra1 = 1; -- Test dropping an intermediate column ALTER TABLE customer_copy_hash DROP COLUMN extra1; COPY customer_copy_hash (c_custkey, c_name, extra2) FROM STDIN CSV; 11,customer11,5 \. SELECT * FROM customer_copy_hash WHERE c_custkey = 11; -- Test dropping the last column ALTER TABLE customer_copy_hash DROP COLUMN extra2; COPY customer_copy_hash (c_custkey, c_name) FROM STDIN CSV; 12,customer12 \. SELECT * FROM customer_copy_hash WHERE c_custkey = 12; -- Create a new range-partitioned table into which to COPY CREATE TABLE customer_copy_range ( c_custkey integer, c_name varchar(25), c_address varchar(40), c_nationkey integer, c_phone char(15), c_acctbal decimal(15,2), c_mktsegment char(10), c_comment varchar(117), primary key (c_custkey)); SELECT master_create_distributed_table('customer_copy_range', 'c_custkey', 'range'); -- Test COPY into empty range-partitioned table COPY customer_copy_range FROM '@abs_srcdir@/data/customer.1.data' WITH (DELIMITER '|'); SELECT master_create_empty_shard('customer_copy_range') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 500 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('customer_copy_range') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 501, shardmaxvalue = 1000 WHERE shardid = :new_shard_id; -- Test copy into range-partitioned table COPY customer_copy_range FROM '@abs_srcdir@/data/customer.1.data' WITH (DELIMITER '|'); -- Check whether data went into the right shard (maybe) SELECT min(c_custkey), max(c_custkey), avg(c_custkey), count(*) FROM customer_copy_range WHERE c_custkey <= 500; -- Check whether data was copied SELECT count(*) FROM customer_copy_range; -- Manipulate min/max values and check shard statistics for new shard UPDATE pg_dist_shard SET shardminvalue = 1501, shardmaxvalue = 2000 WHERE shardid = :new_shard_id; SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = :new_shard_id; SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = :new_shard_id; -- Update shard statistics for range-partitioned shard and check that only the -- shard length is updated. SELECT master_update_shard_statistics(:new_shard_id); SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = :new_shard_id; SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = :new_shard_id; -- Revert back min/max value updates UPDATE pg_dist_shard SET shardminvalue = 501, shardmaxvalue = 1000 WHERE shardid = :new_shard_id; -- Create a new append-partitioned table into which to COPY CREATE TABLE customer_copy_append ( c_custkey integer, c_name varchar(25) not null, c_address varchar(40), c_nationkey integer, c_phone char(15), c_acctbal decimal(15,2), c_mktsegment char(10), c_comment varchar(117)); SELECT master_create_distributed_table('customer_copy_append', 'c_custkey', 'append'); -- Test syntax error COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); 1,customer1 2,customer2 notinteger,customernot \. -- Test that no shard is created for failing copy SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'customer_copy_append'::regclass; -- Test empty copy COPY customer_copy_append FROM STDIN; \. -- Test that no shard is created for copying zero rows SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'customer_copy_append'::regclass; -- Test proper copy COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); 1,customer1 2,customer2 \. -- Check whether data was copied properly SELECT * FROM customer_copy_append; -- Manipulate manipulate and check shard statistics for append-partitioned table shard UPDATE pg_dist_shard SET shardminvalue = 1501, shardmaxvalue = 2000 WHERE shardid = 560131; UPDATE pg_dist_shard_placement SET shardlength = 0 WHERE shardid = 560131; SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560131; SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560131; -- Update shard statistics for append-partitioned shard SELECT master_update_shard_statistics(560131); SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560131; SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560131; -- Create lineitem table CREATE TABLE lineitem_copy_append ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null); SELECT master_create_distributed_table('lineitem_copy_append', 'l_orderkey', 'append'); -- Test multiple shard creation SET citus.shard_max_size TO '256kB'; COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'lineitem_copy_append'::regclass; -- Test round robin shard policy SET citus.shard_replication_factor TO 1; COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'; SELECT pg_dist_shard_placement.shardid, pg_dist_shard_placement.nodeport FROM pg_dist_shard, pg_dist_shard_placement WHERE pg_dist_shard.shardid = pg_dist_shard_placement.shardid AND logicalrelid = 'lineitem_copy_append'::regclass ORDER BY pg_dist_shard.shardid DESC LIMIT 5; -- Ensure that copy from worker node of table with serial column fails CREATE TABLE customer_worker_copy_append_seq (id integer, seq serial); SELECT master_create_distributed_table('customer_worker_copy_append_seq', 'id', 'append'); -- Connect to the first worker node \c - - - 57637 -- Test copy from the worker node COPY customer_worker_copy_append_seq FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|', master_host 'localhost', master_port 57636); -- Connect back to the master node \c - - - 57636 -- Create customer table for the worker copy with constraint and index CREATE TABLE customer_worker_copy_append ( c_custkey integer , c_name varchar(25) not null, c_address varchar(40), c_nationkey integer, c_phone char(15), c_acctbal decimal(15,2), c_mktsegment char(10), c_comment varchar(117), primary key (c_custkey)); CREATE INDEX ON customer_worker_copy_append (c_name); SELECT master_create_distributed_table('customer_worker_copy_append', 'c_custkey', 'append'); -- Connect to the first worker node \c - - - 57637 -- Test copy from the worker node COPY customer_worker_copy_append FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|', master_host 'localhost', master_port 57636); -- Make sure we don't use 2PC when connecting to master, even if requested BEGIN; SET LOCAL citus.multi_shard_commit_protocol TO '2pc'; COPY customer_worker_copy_append FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|', master_host 'localhost', master_port 57636); COMMIT; -- Test if there is no relation to copy data with the worker copy COPY lineitem_copy_none FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', master_host 'localhost', master_port 57636); -- Connect back to the master node \c - - - 57636 -- Test the content of the table SELECT min(c_custkey), max(c_custkey), avg(c_acctbal), count(*) FROM customer_worker_copy_append; -- Test schema support on append partitioned tables CREATE SCHEMA append; CREATE TABLE append.customer_copy ( c_custkey integer , c_name varchar(25) not null, c_address varchar(40), c_nationkey integer, c_phone char(15), c_acctbal decimal(15,2), c_mktsegment char(10), c_comment varchar(117)); SELECT master_create_distributed_table('append.customer_copy', 'c_custkey', 'append'); -- Test copy from the master node COPY append.customer_copy FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|'); -- Test copy from the worker node \c - - - 57637 COPY append.customer_copy FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|', master_host 'localhost', master_port 57636); -- Connect back to the master node \c - - - 57636 -- Test the content of the table SELECT min(c_custkey), max(c_custkey), avg(c_acctbal), count(*) FROM append.customer_copy; -- Test with table name which contains special character CREATE TABLE "customer_with_special_\\_character"( c_custkey integer, c_name varchar(25) not null); SELECT master_create_distributed_table('"customer_with_special_\\_character"', 'c_custkey', 'hash'); SELECT master_create_worker_shards('"customer_with_special_\\_character"', 4, 1); COPY "customer_with_special_\\_character" (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); 1,customer1 2,customer2 \. -- Confirm that data was copied SELECT count(*) FROM "customer_with_special_\\_character"; -- Test with table name which starts with number CREATE TABLE "1_customer"( c_custkey integer, c_name varchar(25) not null); SELECT master_create_distributed_table('"1_customer"', 'c_custkey', 'hash'); SELECT master_create_worker_shards('"1_customer"', 4, 1); COPY "1_customer" (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); 1,customer1 2,customer2 \. -- Confirm that data was copied SELECT count(*) FROM "1_customer"; -- Test COPY with types having different Oid at master and workers CREATE TYPE number_pack AS ( number1 integer, number2 integer ); CREATE TYPE super_number_pack AS ( packed_number1 number_pack, packed_number2 number_pack ); -- Create same types in worker1 \c - - - :worker_1_port CREATE TYPE number_pack AS ( number1 integer, number2 integer ); CREATE TYPE super_number_pack AS ( packed_number1 number_pack, packed_number2 number_pack ); -- Create same types in worker2 \c - - - :worker_2_port CREATE TYPE number_pack AS ( number1 integer, number2 integer ); CREATE TYPE super_number_pack AS ( packed_number1 number_pack, packed_number2 number_pack ); -- Connect back to master \c - - - :master_port -- Test array of user-defined type with hash distribution CREATE TABLE packed_numbers_hash ( id integer, packed_numbers number_pack[] ); SELECT master_create_distributed_table('packed_numbers_hash', 'id', 'hash'); SELECT master_create_worker_shards('packed_numbers_hash', 4, 1); COPY (SELECT 1, ARRAY[ROW(42, 42), ROW(42, 42)]) TO '/tmp/copy_test_array_of_composite'; COPY packed_numbers_hash FROM '/tmp/copy_test_array_of_composite'; -- Verify data is actually copied SELECT * FROM packed_numbers_hash; -- Test composite type containing an element with different Oid with hash distribution CREATE TABLE super_packed_numbers_hash ( id integer, super_packed_number super_number_pack ); SELECT master_create_distributed_table('super_packed_numbers_hash', 'id', 'hash'); SELECT master_create_worker_shards('super_packed_numbers_hash', 4, 1); COPY (SELECT 1, ROW(ROW(42, 42), ROW(42, 42))) TO '/tmp/copy_test_composite_of_composite'; COPY super_packed_numbers_hash FROM '/tmp/copy_test_composite_of_composite'; -- Verify data is actually copied SELECT * FROM super_packed_numbers_hash; -- Test array of user-defined type with append distribution CREATE TABLE packed_numbers_append ( id integer, packed_numbers number_pack[] ); SELECT master_create_distributed_table('packed_numbers_append', 'id', 'append'); COPY packed_numbers_append FROM '/tmp/copy_test_array_of_composite'; -- Verify data is actually copied SELECT * FROM packed_numbers_append; -- Test composite type containing an element with different Oid with append distribution CREATE TABLE super_packed_numbers_append ( id integer, super_packed_number super_number_pack ); SELECT master_create_distributed_table('super_packed_numbers_append', 'id', 'append'); COPY super_packed_numbers_append FROM '/tmp/copy_test_composite_of_composite'; -- Verify data is actually copied SELECT * FROM super_packed_numbers_append; -- Test copy on append for composite type partition column CREATE TABLE composite_partition_column_table( id integer, composite_column number_pack ); SELECT master_create_distributed_table('composite_partition_column_table', 'composite_column', 'append'); \COPY composite_partition_column_table FROM STDIN WITH (FORMAT 'csv'); 1,"(1,1)" 2,"(2,2)" \. -- Test copy on append distributed tables do not create shards on removed workers CREATE TABLE numbers_append (a int, b int); SELECT master_create_distributed_table('numbers_append', 'a', 'append'); -- no shards is created yet SELECT shardid, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_append'::regclass order by placementid; COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); 1,1 2,2 \. COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); 3,5 4,6 \. -- verify there are shards at both workers SELECT shardid, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_append'::regclass order by placementid; -- disable the first node SELECT master_disable_node('localhost', :worker_1_port); -- set replication factor to 1 so that copy will -- succeed without replication count error SET citus.shard_replication_factor TO 1; -- add two new shards and verify they are created at the other node COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); 5,7 6,8 \. COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); 7,9 8,10 \. SELECT shardid, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_append'::regclass order by placementid; -- add the node back SELECT 1 FROM master_activate_node('localhost', :worker_1_port); RESET citus.shard_replication_factor; -- add two new shards and verify they are created at both workers COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); 9,11 10,12 \. COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); 11,13 12,14 \. SELECT shardid, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_append'::regclass order by placementid; DROP TABLE numbers_append; -- Test copy failures against connection failures -- create and switch to test user CREATE USER test_user; SELECT * FROM run_command_on_workers('CREATE USER test_user'); \c - test_user SET citus.shard_count to 4; CREATE TABLE numbers_hash (a int, b int); SELECT create_distributed_table('numbers_hash', 'a'); COPY numbers_hash FROM STDIN WITH (FORMAT 'csv'); 1,1 2,2 3,3 4,4 5,5 6,6 7,7 8,8 \. -- verify each placement is active SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_hash'::regclass order by placementid; -- create a reference table CREATE TABLE numbers_reference(a int, b int); SELECT create_reference_table('numbers_reference'); COPY numbers_reference FROM STDIN WITH (FORMAT 'csv'); 1,1 2,2 \. -- create another hash distributed table CREATE TABLE numbers_hash_other(a int, b int); SELECT create_distributed_table('numbers_hash_other', 'a'); SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_hash_other'::regclass order by placementid; -- manually corrupt pg_dist_shard such that both copies of one shard is placed in -- worker_1. This is to test the behavior when no replica of a shard is accessible. -- Whole copy operation is supposed to fail and rollback. \c - :default_user UPDATE pg_dist_shard_placement SET nodeport = :worker_1_port WHERE shardid = 560176; -- disable test_user on the first worker \c - :default_user - :worker_1_port ALTER USER test_user WITH nologin; \c - test_user - :master_port -- reissue copy COPY numbers_hash FROM STDIN WITH (FORMAT 'csv'); 1,1 2,2 3,3 4,4 5,5 6,6 7,7 8,8 \. -- verify shards in the first worker as marked invalid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_hash'::regclass order by placementid; -- try to insert into a reference table copy should fail COPY numbers_reference FROM STDIN WITH (FORMAT 'csv'); 3,1 4,2 \. -- verify shards for reference table are still valid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_reference'::regclass order by placementid; -- try to insert into numbers_hash_other. copy should fail and rollback -- since it can not insert into either copies of a shard. shards are expected to -- stay valid since the operation is rolled back. COPY numbers_hash_other FROM STDIN WITH (FORMAT 'csv'); 1,1 2,2 3,3 \. -- verify shards for numbers_hash_other are still valid -- since copy has failed altogether SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_hash_other'::regclass order by placementid; -- re-enable test_user on the first worker \c - :default_user - :worker_1_port ALTER USER test_user WITH login; -- there is a dangling shard in worker_2, drop it \c - test_user - :worker_2_port DROP TABLE numbers_hash_other_560176; \c - test_user - :master_port DROP TABLE numbers_hash; DROP TABLE numbers_hash_other; DROP TABLE numbers_reference; \c - :default_user -- test copy failure inside the node -- it will be done by changing definition of a shard table SET citus.shard_count to 4; CREATE TABLE numbers_hash(a int, b int); SELECT create_distributed_table('numbers_hash', 'a'); \c - - - :worker_1_port ALTER TABLE numbers_hash_560180 DROP COLUMN b; \c - - - :master_port -- operation will fail to modify a shard and roll back COPY numbers_hash FROM STDIN WITH (FORMAT 'csv'); 1,1 2,2 3,3 4,4 5,5 6,6 7,7 8,8 \. -- verify no row is inserted SELECT count(a) FROM numbers_hash; -- verify shard is still marked as valid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_hash'::regclass order by placementid; DROP TABLE numbers_hash; SELECT * FROM run_command_on_workers('DROP USER test_user'); DROP USER test_user; -- Test copy with built-in type without binary output function CREATE TABLE test_smgr ( col1 smgr NOT NULL, col2 character varying(255) NOT NULL ); SELECT create_reference_table('test_smgr'); \COPY test_smgr FROM STDIN WITH (format CSV) magnetic disk, test \. SELECT * FROM test_smgr; DROP TABLE test_smgr; -- Test drop table with copy in the same transaction BEGIN; CREATE TABLE tt1(id int); SELECT create_distributed_table('tt1','id'); \copy tt1 from STDIN; 1 2 \. DROP TABLE tt1; END; -- Test dropping a column in front of the partition column CREATE TABLE drop_copy_test_table (col1 int, col2 int, col3 int, col4 int); SELECT create_distributed_table('drop_copy_test_table','col3'); ALTER TABLE drop_copy_test_table drop column col1; COPY drop_copy_test_table (col2,col3,col4) from STDIN with CSV; ,1, ,2, \. SELECT * FROM drop_copy_test_table WHERE col3 = 1; ALTER TABLE drop_copy_test_table drop column col4; COPY drop_copy_test_table (col2,col3) from STDIN with CSV; ,1 ,2 \. SELECT * FROM drop_copy_test_table WHERE col3 = 1; DROP TABLE drop_copy_test_table; -- There should be no "tt1" shard on the worker nodes \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'tt1%'; \c - - - :master_port citus-7.0.3/src/test/regress/input/multi_create_schema.source000066400000000000000000000006331317107136600244550ustar00rootroot00000000000000 ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 250000; CREATE SCHEMA tpch CREATE TABLE nation ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); SELECT master_create_distributed_table('tpch.nation', 'n_nationkey', 'append'); \copy tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' SELECT count(*) from tpch.nation; citus-7.0.3/src/test/regress/input/multi_large_shardid.source000066400000000000000000000026051317107136600244630ustar00rootroot00000000000000-- -- MULTI_LARGE_SHARDID -- -- Load data into distributed tables, and run TPC-H query #1 and #6. This test -- differs from previous tests in that it modifies the *internal* shardId -- generator, forcing the distributed database to use 64-bit shard identifiers. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 100200300400500; -- Load additional data to start using large shard identifiers. \copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -- Query #1 from the TPC-H decision support benchmark. SELECT l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order FROM lineitem WHERE l_shipdate <= date '1998-12-01' - interval '90 days' GROUP BY l_returnflag, l_linestatus ORDER BY l_returnflag, l_linestatus; -- Query #6 from the TPC-H decision support benchmark. SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem WHERE l_shipdate >= date '1994-01-01' and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; citus-7.0.3/src/test/regress/input/multi_load_data.source000066400000000000000000000026161317107136600236050ustar00rootroot00000000000000-- -- MULTI_LOAD_DATA -- -- Tests for loading data in a distributed cluster. Please note that the number -- of shards uploaded depends on two config values: citus.shard_replication_factor and -- citus.shard_max_size. These values are set in pg_regress_multi.pl. Shard placement -- policy is left to the default value (round-robin) to test the common install case. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000; \copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' \copy orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' \copy orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' \copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' \copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' \copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|' \copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' \copy supplier_single_shard FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' \copy lineitem_hash_part FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \copy lineitem_hash_part FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' \copy orders_hash_part FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' \copy orders_hash_part FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' citus-7.0.3/src/test/regress/input/multi_load_large_records.source000066400000000000000000000014221317107136600255010ustar00rootroot00000000000000-- -- MULTI_STAGE_LARGE_RECORDS -- -- Tests for loading data with large records (i.e. greater than the read buffer -- size, which is 32kB) in a distributed cluster. These tests make sure that we -- are creating shards of correct size even when records are large. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 300000; SET citus.shard_max_size TO "256kB"; CREATE TABLE large_records_table (data_id integer, data text); SELECT master_create_distributed_table('large_records_table', 'data_id', 'append'); \copy large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|' SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard, pg_class WHERE pg_class.oid=logicalrelid AND relname='large_records_table' ORDER BY shardid; RESET citus.shard_max_size; citus-7.0.3/src/test/regress/input/multi_load_more_data.source000066400000000000000000000010571317107136600246250ustar00rootroot00000000000000-- -- MULTI_STAGE_MORE_DATA -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 280000; -- We load more data to customer and part tables to test distributed joins. The -- loading causes the planner to consider customer and part tables as large, and -- evaluate plans where some of the underlying tables need to be repartitioned. \copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' \copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' \copy part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' citus-7.0.3/src/test/regress/input/multi_master_delete_protocol.source000066400000000000000000000050701317107136600264300ustar00rootroot00000000000000-- -- MULTI_MASTER_DELETE_PROTOCOL -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 320000; -- Create a new range partitioned customer_delete_protocol table and load data into it. CREATE TABLE customer_delete_protocol ( c_custkey integer not null, c_name varchar(25) not null, c_address varchar(40) not null, c_nationkey integer not null, c_phone char(15) not null, c_acctbal decimal(15,2) not null, c_mktsegment char(10) not null, c_comment varchar(117) not null); SELECT master_create_distributed_table('customer_delete_protocol', 'c_custkey', 'append'); \copy customer_delete_protocol FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' \copy customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' \copy customer_delete_protocol FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' -- Testing master_apply_delete_command -- Check that we don't support conditions on columns other than partition key. SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol WHERE c_acctbal > 0.0'); -- Check that free-form deletes are not supported. DELETE FROM customer_delete_protocol WHERE c_custkey > 100; -- Check that we delete a shard if and only if all rows in the shard satisfy the condition. SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol WHERE c_custkey > 6500'); SELECT count(*) from customer_delete_protocol; -- Delete one shard that satisfies the given conditions. SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol WHERE c_custkey > 1000 AND c_custkey < 3000'); SELECT count(*) from customer_delete_protocol; -- Delete all shards if no condition is provided. SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol'); SELECT count(*) FROM customer_delete_protocol; -- Verify that empty shards are deleted if no condition is provided SELECT 1 AS one FROM master_create_empty_shard('customer_delete_protocol'); SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol WHERE c_custkey > 1000'); SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol'); -- Verify that master_apply_delete_command can be called in a transaction block SELECT 1 AS one FROM master_create_empty_shard('customer_delete_protocol'); BEGIN; SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol'); COMMIT; citus-7.0.3/src/test/regress/input/multi_mx_copy_data.source000066400000000000000000000024371317107136600243450ustar00rootroot00000000000000-- -- MULTI_MX_COPY_DATA -- \COPY nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|'; SET search_path TO citus_mx_test_schema; \COPY nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|'; \COPY citus_mx_test_schema_join_1.nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|'; \COPY citus_mx_test_schema_join_1.nation_hash_2 FROM '@abs_srcdir@/data/nation.data' with delimiter '|'; \COPY citus_mx_test_schema_join_2.nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|'; -- now try loading data from worker node \c - - - :worker_1_port SET search_path TO public; \COPY lineitem_mx FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \COPY lineitem_mx FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' \COPY orders_mx FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' \COPY orders_mx FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' -- and use second worker as well \c - - - :worker_2_port SET search_path TO public; \COPY customer_mx FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' \COPY nation_mx FROM '@abs_srcdir@/data/nation.data' with delimiter '|' \COPY part_mx FROM '@abs_srcdir@/data/part.data' with delimiter '|' \COPY supplier_mx FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' citus-7.0.3/src/test/regress/input/multi_outer_join.source000066400000000000000000000331371317107136600240540ustar00rootroot00000000000000 ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 310000; SET citus.large_table_shard_count TO 2; SET citus.log_multi_join_order to true; SET client_min_messages TO LOG; CREATE TABLE multi_outer_join_left ( l_custkey integer not null, l_name varchar(25) not null, l_address varchar(40) not null, l_nationkey integer not null, l_phone char(15) not null, l_acctbal decimal(15,2) not null, l_mktsegment char(10) not null, l_comment varchar(117) not null ); SELECT master_create_distributed_table('multi_outer_join_left', 'l_custkey', 'append'); CREATE TABLE multi_outer_join_right ( r_custkey integer not null, r_name varchar(25) not null, r_address varchar(40) not null, r_nationkey integer not null, r_phone char(15) not null, r_acctbal decimal(15,2) not null, r_mktsegment char(10) not null, r_comment varchar(117) not null ); SELECT master_create_distributed_table('multi_outer_join_right', 'r_custkey', 'append'); CREATE TABLE multi_outer_join_third ( t_custkey integer not null, t_name varchar(25) not null, t_address varchar(40) not null, t_nationkey integer not null, t_phone char(15) not null, t_acctbal decimal(15,2) not null, t_mktsegment char(10) not null, t_comment varchar(117) not null ); SELECT master_create_distributed_table('multi_outer_join_third', 't_custkey', 'append'); -- Make sure we do not crash if both tables have no shards SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_third b ON (l_custkey = t_custkey); -- Left table is a large table \copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|' \copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' -- Right table is a small table \copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' -- Make sure we do not crash if one table has no shards SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_third b ON (l_custkey = t_custkey); SELECT min(t_custkey), max(t_custkey) FROM multi_outer_join_third a LEFT JOIN multi_outer_join_right b ON (r_custkey = t_custkey); -- Third table is a single shard table with all data \copy multi_outer_join_third FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|' -- Regular outer join should return results for all rows SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey); -- Since this is a broadcast join, we should be able to join on any key SELECT count(*) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_nationkey = r_nationkey); -- Anti-join should return customers for which there is no row in the right table SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey) WHERE r_custkey IS NULL; -- Partial anti-join with specific value SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey) WHERE r_custkey IS NULL OR r_custkey = 5; -- This query is an INNER JOIN in disguise since there cannot be NULL results -- Added extra filter to make query not router plannable SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey) WHERE r_custkey = 5 or r_custkey > 15; -- Apply a filter before the join SELECT count(l_custkey), count(r_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey AND r_custkey = 5); -- Apply a filter before the join (no matches right) SELECT count(l_custkey), count(r_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey AND r_custkey = -1 /* nonexistant */); -- Apply a filter before the join (no matches left) SELECT count(l_custkey), count(r_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey AND l_custkey = -1 /* nonexistant */); -- Right join should be disallowed in this case SELECT min(r_custkey), max(r_custkey) FROM multi_outer_join_left a RIGHT JOIN multi_outer_join_right b ON (l_custkey = r_custkey); -- Reverse right join should be same as left join SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_right a RIGHT JOIN multi_outer_join_left b ON (l_custkey = r_custkey); -- Turn the right table into a large table \copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' -- Shards do not have 1-1 matching. We should error here. SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey); -- empty tables SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_left'); SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_right'); -- reload shards with 1-1 matching \copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' \copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' \copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' \copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' -- multi_outer_join_third is a single shard table -- Regular left join should work as expected SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey); -- Since we cannot broadcast or re-partition, joining on a different key should error out SELECT count(*) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_nationkey = r_nationkey); -- Anti-join should return customers for which there is no row in the right table SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey) WHERE r_custkey IS NULL; -- Partial anti-join with specific value (5, 11-15) SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey) WHERE r_custkey IS NULL OR r_custkey = 15; -- This query is an INNER JOIN in disguise since there cannot be NULL results (21) -- Added extra filter to make query not router plannable SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey) WHERE r_custkey = 21 or r_custkey < 10; -- Apply a filter before the join SELECT count(l_custkey), count(r_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey AND r_custkey = 21); -- Right join should be allowed in this case SELECT min(r_custkey), max(r_custkey) FROM multi_outer_join_left a RIGHT JOIN multi_outer_join_right b ON (l_custkey = r_custkey); -- Reverse right join should be same as left join SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_right a RIGHT JOIN multi_outer_join_left b ON (l_custkey = r_custkey); -- complex query tree should error out SELECT * FROM multi_outer_join_left l1 LEFT JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_right r2 ON (l1.l_custkey = r2.r_custkey) RIGHT JOIN multi_outer_join_left l2 ON (r2.r_custkey = l2.l_custkey); -- add an anti-join, this should also error out SELECT * FROM multi_outer_join_left l1 LEFT JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_right r2 ON (l1.l_custkey = r2.r_custkey) RIGHT JOIN multi_outer_join_left l2 ON (r2.r_custkey = l2.l_custkey) WHERE r1.r_custkey is NULL; -- Three way join 2-2-1 (local + broadcast join) should work SELECT l_custkey, r_custkey, t_custkey FROM multi_outer_join_left l1 LEFT JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_third t1 ON (r1.r_custkey = t1.t_custkey); -- Right join with single shard right most table should error out SELECT l_custkey, r_custkey, t_custkey FROM multi_outer_join_left l1 LEFT JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey) RIGHT JOIN multi_outer_join_third t1 ON (r1.r_custkey = t1.t_custkey); -- Right join with single shard left most table should work SELECT t_custkey, r_custkey, l_custkey FROM multi_outer_join_third t1 RIGHT JOIN multi_outer_join_right r1 ON (t1.t_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_left l1 ON (r1.r_custkey = l1.l_custkey); -- Make it anti-join, should display values with l_custkey is null SELECT t_custkey, r_custkey, l_custkey FROM multi_outer_join_third t1 RIGHT JOIN multi_outer_join_right r1 ON (t1.t_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_left l1 ON (r1.r_custkey = l1.l_custkey) WHERE l_custkey is NULL; -- Cascading right join with single shard left most table should error out SELECT t_custkey, r_custkey, l_custkey FROM multi_outer_join_third t1 RIGHT JOIN multi_outer_join_right r1 ON (t1.t_custkey = r1.r_custkey) RIGHT JOIN multi_outer_join_left l1 ON (r1.r_custkey = l1.l_custkey); -- full outer join should work with 1-1 matched shards SELECT l_custkey, r_custkey FROM multi_outer_join_left l1 FULL JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey); -- full outer join + anti (right) should work with 1-1 matched shards SELECT l_custkey, r_custkey FROM multi_outer_join_left l1 FULL JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey) WHERE r_custkey is NULL; -- full outer join + anti (left) should work with 1-1 matched shards SELECT l_custkey, r_custkey FROM multi_outer_join_left l1 FULL JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey) WHERE l_custkey is NULL; -- full outer join + anti (both) should work with 1-1 matched shards SELECT l_custkey, r_custkey FROM multi_outer_join_left l1 FULL JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey) WHERE l_custkey is NULL or r_custkey is NULL; -- full outer join should error out for mismatched shards SELECT l_custkey, t_custkey FROM multi_outer_join_left l1 FULL JOIN multi_outer_join_third t1 ON (l1.l_custkey = t1.t_custkey); -- inner join + single shard left join should work SELECT l_custkey, r_custkey, t_custkey FROM multi_outer_join_left l1 INNER JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_third t1 ON (r1.r_custkey = t1.t_custkey); -- inner (broadcast) join + 2 shards left (local) join should work SELECT l_custkey, t_custkey, r_custkey FROM multi_outer_join_left l1 INNER JOIN multi_outer_join_third t1 ON (l1.l_custkey = t1.t_custkey) LEFT JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey); -- inner (local) join + 2 shards left (dual partition) join should error out SELECT t_custkey, l_custkey, r_custkey FROM multi_outer_join_third t1 INNER JOIN multi_outer_join_left l1 ON (l1.l_custkey = t1.t_custkey) LEFT JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey); -- inner (local) join + 2 shards left (dual partition) join should error out SELECT l_custkey, t_custkey, r_custkey FROM multi_outer_join_left l1 INNER JOIN multi_outer_join_third t1 ON (l1.l_custkey = t1.t_custkey) LEFT JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey); -- inner (broadcast) join + 2 shards left (local) + anti join should work SELECT l_custkey, t_custkey, r_custkey FROM multi_outer_join_left l1 INNER JOIN multi_outer_join_third t1 ON (l1.l_custkey = t1.t_custkey) LEFT JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey) WHERE r_custkey is NULL; -- Test joinExpr aliases by performing an outer-join. SELECT t_custkey FROM (multi_outer_join_right r1 LEFT OUTER JOIN multi_outer_join_left l1 ON (l1.l_custkey = r1.r_custkey)) AS test(c_custkey, c_nationkey) INNER JOIN multi_outer_join_third t1 ON (test.c_custkey = t1.t_custkey); -- flattened out subqueries with outer joins are not supported SELECT l1.l_custkey, count(*) as cnt FROM ( SELECT l_custkey, l_nationkey FROM multi_outer_join_left WHERE l_comment like '%a%' ) l1 LEFT JOIN ( SELECT r_custkey, r_name FROM multi_outer_join_right WHERE r_comment like '%b%' ) l2 ON l1.l_custkey = l2.r_custkey GROUP BY l1.l_custkey ORDER BY cnt DESC, l1.l_custkey DESC LIMIT 20; -- Add a shard to the left table that overlaps with multiple shards in the right \copy multi_outer_join_left FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' -- All outer joins should error out SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey); SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a RIGHT JOIN multi_outer_join_right b ON (l_custkey = r_custkey); SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a FULL JOIN multi_outer_join_right b ON (l_custkey = r_custkey); SELECT t_custkey FROM (multi_outer_join_right r1 LEFT OUTER JOIN multi_outer_join_left l1 ON (l1.l_custkey = r1.r_custkey)) AS test(c_custkey, c_nationkey) INNER JOIN multi_outer_join_third t1 ON (test.c_custkey = t1.t_custkey); -- simple test to ensure anti-joins work with hash-partitioned tables CREATE TABLE left_values(val int); SELECT master_create_distributed_table('left_values', 'val', 'hash'); SELECT master_create_worker_shards('left_values', 16, 1); \copy left_values from stdin 1 2 3 4 5 \. CREATE TABLE right_values(val int); SELECT master_create_distributed_table('right_values', 'val', 'hash'); SELECT master_create_worker_shards('right_values', 16, 1); \copy right_values from stdin 2 3 4 \. SELECT * FROM left_values AS l LEFT JOIN right_values AS r ON l.val = r.val WHERE r.val IS NULL; citus-7.0.3/src/test/regress/input/multi_outer_join_reference.source000066400000000000000000000347761317107136600261040ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1260000; SET citus.log_multi_join_order to true; SET client_min_messages TO LOG; SET citus.shard_count TO 4; CREATE TABLE multi_outer_join_left_hash ( l_custkey integer not null, l_name varchar(25) not null, l_address varchar(40) not null, l_nationkey integer not null, l_phone char(15) not null, l_acctbal decimal(15,2) not null, l_mktsegment char(10) not null, l_comment varchar(117) not null ); SELECT create_distributed_table('multi_outer_join_left_hash', 'l_custkey'); CREATE TABLE multi_outer_join_right_reference ( r_custkey integer not null, r_name varchar(25) not null, r_address varchar(40) not null, r_nationkey integer not null, r_phone char(15) not null, r_acctbal decimal(15,2) not null, r_mktsegment char(10) not null, r_comment varchar(117) not null ); SELECT create_reference_table('multi_outer_join_right_reference'); CREATE TABLE multi_outer_join_third_reference ( t_custkey integer not null, t_name varchar(25) not null, t_address varchar(40) not null, t_nationkey integer not null, t_phone char(15) not null, t_acctbal decimal(15,2) not null, t_mktsegment char(10) not null, t_comment varchar(117) not null ); SELECT create_reference_table('multi_outer_join_third_reference'); CREATE TABLE multi_outer_join_right_hash ( r_custkey integer not null, r_name varchar(25) not null, r_address varchar(40) not null, r_nationkey integer not null, r_phone char(15) not null, r_acctbal decimal(15,2) not null, r_mktsegment char(10) not null, r_comment varchar(117) not null ); SELECT create_distributed_table('multi_outer_join_right_hash', 'r_custkey'); -- Make sure we do not crash if both tables are emmpty SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_third_reference b ON (l_custkey = t_custkey); -- Left table is a large table \copy multi_outer_join_left_hash FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|' \copy multi_outer_join_left_hash FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' -- Right table is a small table \copy multi_outer_join_right_reference FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' \copy multi_outer_join_right_hash FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' -- Make sure we do not crash if one table has data SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_third_reference b ON (l_custkey = t_custkey); SELECT min(t_custkey), max(t_custkey) FROM multi_outer_join_third_reference a LEFT JOIN multi_outer_join_right_reference b ON (r_custkey = t_custkey); -- Third table is a single shard table with all data \copy multi_outer_join_third_reference FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|' \copy multi_outer_join_right_hash FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|' -- Regular outer join should return results for all rows SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey); -- Since this is a broadcast join, we should be able to join on any key SELECT count(*) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_nationkey = r_nationkey); -- Anti-join should return customers for which there is no row in the right table SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey) WHERE r_custkey IS NULL; -- Partial anti-join with specific value SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey) WHERE r_custkey IS NULL OR r_custkey = 5; -- This query is an INNER JOIN in disguise since there cannot be NULL results -- Added extra filter to make query not router plannable SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey) WHERE r_custkey = 5 or r_custkey > 15; -- Apply a filter before the join SELECT count(l_custkey), count(r_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey AND r_custkey = 5); -- Apply a filter before the join (no matches right) SELECT count(l_custkey), count(r_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey AND r_custkey = -1 /* nonexistant */); -- Apply a filter before the join (no matches left) SELECT count(l_custkey), count(r_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey AND l_custkey = -1 /* nonexistant */); -- Right join should be disallowed in this case SELECT min(r_custkey), max(r_custkey) FROM multi_outer_join_left_hash a RIGHT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey); -- Reverse right join should be same as left join SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_right_reference a RIGHT JOIN multi_outer_join_left_hash b ON (l_custkey = r_custkey); -- load some more data \copy multi_outer_join_right_reference FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' -- Update shards so that they do not have 1-1 matching, triggering an error. UPDATE pg_dist_shard SET shardminvalue = '2147483646' WHERE shardid = 1260006; UPDATE pg_dist_shard SET shardmaxvalue = '2147483647' WHERE shardid = 1260006; SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_hash b ON (l_custkey = r_custkey); UPDATE pg_dist_shard SET shardminvalue = '-2147483648' WHERE shardid = 1260006; UPDATE pg_dist_shard SET shardmaxvalue = '-1073741825' WHERE shardid = 1260006; -- empty tables SELECT master_modify_multiple_shards('DELETE FROM multi_outer_join_left_hash'); SELECT master_modify_multiple_shards('DELETE FROM multi_outer_join_right_hash'); DELETE FROM multi_outer_join_right_reference; -- reload shards with 1-1 matching \copy multi_outer_join_left_hash FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' \copy multi_outer_join_left_hash FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' \copy multi_outer_join_right_reference FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' \copy multi_outer_join_right_reference FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' \copy multi_outer_join_right_hash FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' \copy multi_outer_join_right_hash FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' -- multi_outer_join_third_reference is a single shard table -- Regular left join should work as expected SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_hash b ON (l_custkey = r_custkey); -- Citus can use broadcast join here SELECT count(*) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_hash b ON (l_nationkey = r_nationkey); -- Anti-join should return customers for which there is no row in the right table SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey) WHERE r_custkey IS NULL; -- Partial anti-join with specific value (5, 11-15) SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey) WHERE r_custkey IS NULL OR r_custkey = 15; -- This query is an INNER JOIN in disguise since there cannot be NULL results (21) -- Added extra filter to make query not router plannable SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey) WHERE r_custkey = 21 or r_custkey < 10; -- Apply a filter before the join SELECT count(l_custkey), count(r_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey AND r_custkey = 21); -- Right join should not be allowed in this case SELECT min(r_custkey), max(r_custkey) FROM multi_outer_join_left_hash a RIGHT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey); -- Reverse right join should be same as left join SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_right_reference a RIGHT JOIN multi_outer_join_left_hash b ON (l_custkey = r_custkey); -- complex query tree should error out SELECT * FROM multi_outer_join_left_hash l1 LEFT JOIN multi_outer_join_right_reference r1 ON (l1.l_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_right_reference r2 ON (l1.l_custkey = r2.r_custkey) RIGHT JOIN multi_outer_join_left_hash l2 ON (r2.r_custkey = l2.l_custkey); -- add an anti-join, this should also error out SELECT * FROM multi_outer_join_left_hash l1 LEFT JOIN multi_outer_join_right_reference r1 ON (l1.l_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_right_reference r2 ON (l1.l_custkey = r2.r_custkey) RIGHT JOIN multi_outer_join_left_hash l2 ON (r2.r_custkey = l2.l_custkey) WHERE r1.r_custkey is NULL; -- Three way join 2-1-1 (broadcast + broadcast join) should work SELECT l_custkey, r_custkey, t_custkey FROM multi_outer_join_left_hash l1 LEFT JOIN multi_outer_join_right_reference r1 ON (l1.l_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_third_reference t1 ON (r1.r_custkey = t1.t_custkey) ORDER BY 1; -- Right join with single shard right most table should error out SELECT l_custkey, r_custkey, t_custkey FROM multi_outer_join_left_hash l1 LEFT JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey) RIGHT JOIN multi_outer_join_third_reference t1 ON (r1.r_custkey = t1.t_custkey); -- Right join with single shard left most table should work SELECT t_custkey, r_custkey, l_custkey FROM multi_outer_join_third_reference t1 RIGHT JOIN multi_outer_join_right_hash r1 ON (t1.t_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_left_hash l1 ON (r1.r_custkey = l1.l_custkey) ORDER BY 1,2,3; -- Make it anti-join, should display values with l_custkey is null SELECT t_custkey, r_custkey, l_custkey FROM multi_outer_join_third_reference t1 RIGHT JOIN multi_outer_join_right_hash r1 ON (t1.t_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_left_hash l1 ON (r1.r_custkey = l1.l_custkey) WHERE l_custkey is NULL ORDER BY 1; -- Cascading right join with single shard left most table should error out SELECT t_custkey, r_custkey, l_custkey FROM multi_outer_join_third_reference t1 RIGHT JOIN multi_outer_join_right_hash r1 ON (t1.t_custkey = r1.r_custkey) RIGHT JOIN multi_outer_join_left_hash l1 ON (r1.r_custkey = l1.l_custkey); -- full outer join should work with 1-1 matched shards SELECT l_custkey, r_custkey FROM multi_outer_join_left_hash l1 FULL JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey) ORDER BY 1,2; -- full outer join + anti (right) should work with 1-1 matched shards SELECT l_custkey, r_custkey FROM multi_outer_join_left_hash l1 FULL JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey) WHERE r_custkey is NULL ORDER BY 1; -- full outer join + anti (left) should work with 1-1 matched shards SELECT l_custkey, r_custkey FROM multi_outer_join_left_hash l1 FULL JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey) WHERE l_custkey is NULL ORDER BY 2; -- full outer join + anti (both) should work with 1-1 matched shards SELECT l_custkey, r_custkey FROM multi_outer_join_left_hash l1 FULL JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey) WHERE l_custkey is NULL or r_custkey is NULL ORDER BY 1,2 DESC; -- full outer join should error out for mismatched shards SELECT l_custkey, t_custkey FROM multi_outer_join_left_hash l1 FULL JOIN multi_outer_join_third_reference t1 ON (l1.l_custkey = t1.t_custkey); -- inner join + single shard left join should work SELECT l_custkey, r_custkey, t_custkey FROM multi_outer_join_left_hash l1 INNER JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_third_reference t1 ON (r1.r_custkey = t1.t_custkey) ORDER BY 1; -- inner (broadcast) join + 2 shards left (local) join should work SELECT l_custkey, t_custkey, r_custkey FROM multi_outer_join_left_hash l1 INNER JOIN multi_outer_join_third_reference t1 ON (l1.l_custkey = t1.t_custkey) LEFT JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey) ORDER BY 1,2,3; -- inner (local) join + 2 shards left (dual partition) join should error out SELECT t_custkey, l_custkey, r_custkey FROM multi_outer_join_third_reference t1 INNER JOIN multi_outer_join_left_hash l1 ON (l1.l_custkey = t1.t_custkey) LEFT JOIN multi_outer_join_right_reference r1 ON (l1.l_custkey = r1.r_custkey); -- inner (local) join + 2 shards left (dual partition) join should work SELECT l_custkey, t_custkey, r_custkey FROM multi_outer_join_left_hash l1 INNER JOIN multi_outer_join_third_reference t1 ON (l1.l_custkey = t1.t_custkey) LEFT JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey) ORDER BY 1,2,3; -- inner (broadcast) join + 2 shards left (local) + anti join should work SELECT l_custkey, t_custkey, r_custkey FROM multi_outer_join_left_hash l1 INNER JOIN multi_outer_join_third_reference t1 ON (l1.l_custkey = t1.t_custkey) LEFT JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey) WHERE r_custkey is NULL ORDER BY 1; -- Test joinExpr aliases by performing an outer-join. SELECT t_custkey FROM (multi_outer_join_right_hash r1 LEFT OUTER JOIN multi_outer_join_left_hash l1 ON (l1.l_custkey = r1.r_custkey)) AS test(c_custkey, c_nationkey) INNER JOIN multi_outer_join_third_reference t1 ON (test.c_custkey = t1.t_custkey) ORDER BY 1; -- flattened out subqueries with outer joins are not supported SELECT l1.l_custkey, count(*) as cnt FROM ( SELECT l_custkey, l_nationkey FROM multi_outer_join_left_hash WHERE l_comment like '%a%' ) l1 LEFT JOIN ( SELECT r_custkey, r_name FROM multi_outer_join_right_reference WHERE r_comment like '%b%' ) l2 ON l1.l_custkey = l2.r_custkey GROUP BY l1.l_custkey ORDER BY cnt DESC, l1.l_custkey DESC LIMIT 20; -- full join among reference tables should go thourgh router planner SELECT t_custkey, r_custkey FROM multi_outer_join_right_reference FULL JOIN multi_outer_join_third_reference ON (t_custkey = r_custkey) ORDER BY 1; -- DROP unused tables to clean up workspace DROP TABLE multi_outer_join_left_hash; DROP TABLE multi_outer_join_right_reference; DROP TABLE multi_outer_join_third_reference; DROP TABLE multi_outer_join_right_hash; citus-7.0.3/src/test/regress/input/worker_copy.source000066400000000000000000000004671317107136600230300ustar00rootroot00000000000000-- -- WORKER_COPY -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 260000; COPY lineitem FROM '@abs_srcdir@/data/lineitem.1.data' WITH DELIMITER '|'; COPY lineitem FROM '@abs_srcdir@/data/lineitem.2.data' WITH DELIMITER '|'; COPY supplier FROM '@abs_srcdir@/data/supplier.data' WITH DELIMITER '|'; citus-7.0.3/src/test/regress/isolation_schedule000066400000000000000000000025421317107136600217000ustar00rootroot00000000000000test: isolation_add_remove_node test: isolation_add_node_vs_reference_table_operations test: isolation_create_table_vs_add_remove_node # tests that change node metadata should precede # isolation_cluster_management such that tests # that come later can be parallelized test: isolation_cluster_management test: isolation_dml_vs_repair isolation_copy_placement_vs_copy_placement test: isolation_concurrent_dml isolation_data_migration test: isolation_drop_shards isolation_copy_placement_vs_modification test: isolation_insert_vs_vacuum isolation_transaction_recovery test: isolation_distributed_transaction_id isolation_progress_monitoring test: isolation_dump_local_wait_edges test: isolation_dump_global_wait_edges test: isolation_replace_wait_function test: isolation_distributed_deadlock_detection # creating a restore point briefly blocks all # writes, run this test serially. test: isolation_create_restore_point test: isolation_hash_copy_vs_all test: isolation_append_copy_vs_all test: isolation_range_copy_vs_all test: isolation_reference_copy_vs_all test: isolation_partitioned_copy_vs_all test: isolation_select_vs_all test: isolation_insert_vs_all test: isolation_insert_select_vs_all test: isolation_upsert_vs_all test: isolation_update_vs_all test: isolation_delete_vs_all test: isolation_truncate_vs_all test: isolation_drop_vs_all test: isolation_ddl_vs_all citus-7.0.3/src/test/regress/multi_binary_schedule000066400000000000000000000015461317107136600224000ustar00rootroot00000000000000# ---------- # $Id$ # # Regression tests that test binary mode data transfer between workers. # No new tests are expected here unless they are specifically testing for changes # in binary mode data transfer # # ---------- # --- # Tests around schema changes, these are run first, so there's no preexisting objects. # --- test: multi_extension test: multi_cluster_management test: multi_test_helpers test: multi_table_ddl # ---------- # The following distributed tests depend on creating a partitioned table and # uploading data to it. # ---------- test: multi_create_table test: multi_load_data test: multi_basic_queries multi_complex_expressions test: multi_single_relation_subquery test: multi_binary_master_copy_format test: multi_simple_queries # --------- # multi_copy creates hash and range-partitioned tables and performs COPY # --------- test: multi_copy citus-7.0.3/src/test/regress/multi_follower_schedule000066400000000000000000000002241317107136600227350ustar00rootroot00000000000000test: multi_follower_sanity_check test: multi_follower_select_statements test: multi_follower_configure_followers test: multi_follower_task_tracker citus-7.0.3/src/test/regress/multi_mx_schedule000066400000000000000000000023721317107136600215360ustar00rootroot00000000000000# ---------- # $Id$ # # Regression tests for MX. This schedule runs tests for worker metadata # and MX tables. The tests mostly aim for running SQL queries from the worker # nodes and metadata operations from the schema node. # # Note that we use variant comparison files to test version dependent regression # test results. For more information: # http://www.postgresql.org/docs/current/static/regress-variant.html # ---------- # --- # Tests around schema changes, these are run first, so there's no preexisting objects. # --- test: multi_extension test: multi_cluster_management test: multi_test_helpers test: multi_mx_create_table test: multi_mx_copy_data multi_mx_router_planner test: multi_mx_schema_support multi_mx_tpch_query1 multi_mx_tpch_query10 test: multi_mx_tpch_query12 multi_mx_tpch_query14 multi_mx_tpch_query19 test: multi_mx_tpch_query3 multi_mx_tpch_query6 multi_mx_tpch_query7 test: multi_mx_tpch_query7_nested multi_mx_ddl test: multi_mx_repartition_udt_prepare test: multi_mx_repartition_join_w1 multi_mx_repartition_join_w2 multi_mx_repartition_udt_w1 multi_mx_repartition_udt_w2 test: multi_mx_metadata test: multi_mx_modifications multi_mx_transaction_recovery test: multi_mx_modifying_xacts test: multi_mx_explain test: multi_mx_reference_table citus-7.0.3/src/test/regress/multi_schedule000066400000000000000000000200761317107136600210330ustar00rootroot00000000000000# ---------- # $Id$ # # Regression tests that exercise distributed planning/execution functionality. # # All new regression tests are expected to be run by this schedule. Tests that # do not set specific task executor type should also be added to # multi_task_tracker_extra_schedule. # # Note that we use variant comparison files to test version dependent regression # test results. For more information: # http://www.postgresql.org/docs/current/static/regress-variant.html # ---------- # --- # Tests around schema changes, these are run first, so there's no preexisting objects. # --- test: multi_extension test: multi_703_upgrade test: multi_cluster_management test: multi_test_helpers test: multi_table_ddl test: multi_name_lengths test: multi_metadata_access test: multi_read_from_secondaries # ---------- # The following distributed tests depend on creating a partitioned table and # uploading data to it. # ---------- test: multi_create_table test: multi_create_table_constraints test: multi_master_protocol test: multi_load_data test: multi_behavioral_analytics_create_table test: multi_behavioral_analytics_basics multi_behavioral_analytics_single_shard_queries multi_insert_select_non_pushable_queries test: multi_insert_select # --- # Tests for partitioning support # --- test: multi_partitioning_utils test: multi_partitioning # ---------- # Miscellaneous tests to check our query planning behavior # ---------- test: multi_deparse_shard_query multi_distributed_transaction_id test: multi_basic_queries multi_complex_expressions test: multi_explain test: multi_subquery multi_subquery_complex_queries multi_subquery_behavioral_analytics test: multi_subquery_complex_reference_clause test: multi_subquery_in_where_reference_clause test: multi_subquery_union multi_subquery_in_where_clause multi_subquery_misc test: multi_reference_table test: multi_outer_join_reference test: multi_single_relation_subquery test: multi_agg_distinct multi_agg_approximate_distinct multi_limit_clause multi_limit_clause_approximate test: multi_average_expression multi_working_columns test: multi_array_agg test: multi_agg_type_conversion multi_count_type_conversion test: multi_partition_pruning test: multi_join_pruning multi_hash_pruning test: multi_null_minmax_value_pruning test: multi_query_directory_cleanup test: multi_task_assignment_policy multi_cross_shard test: multi_utility_statements test: multi_dropped_column_aliases test: multi_binary_master_copy_format test: multi_prepare_sql test: multi_prepare_plsql test: multi_sql_function test: multi_view # ---------- # Parallel TPC-H tests to check our distributed execution behavior # ---------- test: multi_tpch_query1 multi_tpch_query3 multi_tpch_query6 multi_tpch_query10 test: multi_tpch_query12 multi_tpch_query14 multi_tpch_query19 test: multi_tpch_query7 multi_tpch_query7_nested # ---------- # Parallel tests to check our join order planning logic. Note that we load data # below; and therefore these tests should come after the execution tests. # ---------- test: multi_join_order_tpch_small multi_join_order_additional test: multi_load_more_data test: multi_join_order_tpch_large # ---------- # Tests for large-table join planning and execution. Be careful when creating # new shards before these tests, as they expect specific shard identifiers in # the output. # ---------- test: multi_large_table_join_planning test: multi_large_table_pruning test: multi_large_table_task_assignment # ---------- # Tests to check our large record loading and shard deletion behavior # ---------- test: multi_load_large_records test: multi_master_delete_protocol test: multi_shard_modify # ---------- # Tests around DDL statements run on distributed tables # ---------- test: multi_index_statements test: multi_alter_table_statements test: multi_alter_table_add_constraints # ---------- # multi_create_schema tests creation, loading, and querying of a table in a new # schema (namespace). # ---------- test: multi_create_schema # ---------- # Tests to check if we inform the user about potential caveats of creating new # databases, schemas, and roles. # ---------- test: multi_utility_warnings # --------- # multi_append_table_to_shard loads data to create shards in a way that forces # shard caching. # --------- test: multi_append_table_to_shard # --------- # multi_outer_join loads data to create shards to test outer join mappings # --------- test: multi_outer_join # --- # Tests covering mostly modification queries and required preliminary # functionality related to metadata, shard creation, shard pruning and # "hacky" copy script for hash partitioned tables. # Note that the order of the following tests are important. multi_complex_count_distinct # is independed from the rest of the group, it is added to increase parallelism. # --- test: multi_create_fdw test: multi_complex_count_distinct test: multi_distribution_metadata test: multi_generate_ddl_commands test: multi_create_shards test: multi_prune_shard_list test: multi_repair_shards test: multi_modifications test: multi_upsert test: multi_simple_queries test: multi_create_insert_proxy test: multi_data_types test: multi_utilities test: multi_repartition_udt test: multi_repartitioned_subquery_udf test: multi_modifying_xacts test: multi_transaction_recovery test: multi_subtransactions # --------- # multi_copy creates hash and range-partitioned tables and performs COPY # --------- test: multi_copy # --------- # multi_router_planner creates hash partitioned tables. # --------- test: multi_router_planner # ---------- # multi_large_shardid loads more lineitem data using high shard identifiers # ---------- test: multi_large_shardid # ---------- # multi_size_queries tests various size commands on distributed tables # ---------- test: multi_size_queries # ---------- # multi_drop_extension makes sure we can safely drop and recreate the extension # ---------- test: multi_drop_extension # ---------- # multi_metadata_sync tests the propagation of mx-related metadata changes to metadata workers # multi_unsupported_worker_operations tests that unsupported operations error out on metadata workers # ---------- test: multi_metadata_sync test: multi_unsupported_worker_operations # ---------- # multi_schema_support makes sure we can work with tables in schemas other than public with no problem # ---------- test: multi_schema_support # ---------- # multi_function_evaluation tests edge-cases in master-side function pre-evaluation # ---------- test: multi_function_evaluation # ---------- # multi_truncate tests truncate functionality for distributed tables # ---------- test: multi_truncate # ---------- # multi_expire_table_cache tests for broadcast tables # ---------- test: multi_expire_table_cache # ---------- # multi_colocation_utils tests utility functions written for co-location feature & internal API # multi_colocated_shard_transfer tests master_copy_shard_placement with colocated tables. # ---------- test: multi_colocation_utils test: multi_colocated_shard_transfer # ---------- # multi_citus_tools tests utility functions written for citus tools # ---------- test: multi_citus_tools # ---------- # multi_foreign_key tests foreign key push down on distributed tables # ---------- test: multi_foreign_key # ---------- # multi_upgrade_reference_table tests for upgrade_reference_table UDF # multi_replicate_reference_table tests replicating reference tables to new nodes after we add new nodes # multi_remove_node_reference_table tests metadata changes after master_remove_node # ---------- test: multi_upgrade_reference_table test: multi_replicate_reference_table test: multi_remove_node_reference_table # ---------- # multi_transactional_drop_shards tests for dropping shards using connection API # ---------- test: multi_transactional_drop_shards # ---------- # multi_multiuser tests simple combinations of permission access and queries # ---------- test: multi_multiuser # --------- # multi_cache_invalidation tests for an obscure crash citus used to exhibit when shardids # changed the table they belonged to during a session # -------- test: multi_cache_invalidation # --------- # multi_task_string_size tests task string size checks # --------- test: multi_task_string_size citus-7.0.3/src/test/regress/multi_task_tracker_extra_schedule000066400000000000000000000065551317107136600250010ustar00rootroot00000000000000# ---------- # $Id$ # # Regression tests for task tracker executor. This schedule runs tests # in task tracker executor. Any test that do not explicitly set the task executor # are expected to be placed here in addition to multi_schedule. # # Note that we use variant comparison files to test version dependent regression # test results. For more information: # http://www.postgresql.org/docs/current/static/regress-variant.html # ---------- # --- # Tests around schema changes, these are run first, so there's no preexisting objects. # --- test: multi_extension test: multi_cluster_management test: multi_test_helpers test: multi_table_ddl # ---------- # The following distributed tests depend on creating a partitioned table and # uploading data to it. # ---------- test: multi_create_table test: multi_master_protocol test: multi_load_data # ---------- # Miscellaneous tests to check our query planning behavior # ---------- test: multi_basic_queries multi_complex_expressions test: multi_agg_distinct multi_limit_clause multi_limit_clause_approximate test: multi_average_expression multi_working_columns test: multi_array_agg test: multi_agg_type_conversion multi_count_type_conversion test: multi_hash_pruning test: multi_query_directory_cleanup test: multi_utility_statements test: multi_dropped_column_aliases # ---------- # Parallel TPC-H tests to check our distributed execution behavior # ---------- test: multi_tpch_query1 multi_tpch_query3 multi_tpch_query6 multi_tpch_query10 test: multi_tpch_query12 multi_tpch_query14 multi_tpch_query19 test: multi_tpch_query7 multi_tpch_query7_nested # ---------- # Parallel tests to check our join order planning logic. Note that we load data # below; and therefore these tests should come after the execution tests. # ---------- test: multi_load_more_data test: multi_join_order_tpch_large # ---------- # Tests to check our large record loading and shard deletion behavior # ---------- test: multi_load_large_records test: multi_master_delete_protocol test: multi_shard_modify # ---------- # multi_create_schema tests creation, loading, and querying of a table in a new # schema (namespace). # ---------- test: multi_create_schema # --------- # multi_outer_join loads data to create shards to test outer join mappings # --------- test: multi_outer_join # --- # Tests covering mostly modification queries and required preliminary # functionality related to metadata, shard creation, shard pruning and # "hacky" copy script for hash partitioned tables. # Note that the order of the following tests are important. # --- test: multi_create_fdw test: multi_distribution_metadata test: multi_generate_ddl_commands test: multi_create_shards test: multi_prune_shard_list test: multi_repair_shards test: multi_modifications test: multi_upsert test: multi_simple_queries test: multi_create_insert_proxy test: multi_data_types test: multi_utilities # --------- # multi_copy creates hash and range-partitioned tables and performs COPY # --------- test: multi_copy # ---------- # multi_large_shardid loads more lineitem data using high shard identifiers # ---------- test: multi_large_shardid # ---------- # multi_drop_extension makes sure we can safely drop and recreate the extension # ---------- test: multi_drop_extension # ---------- # multi_schema_support makes sure we can work with tables in schemas other than public with no problem # ---------- test: multi_schema_support citus-7.0.3/src/test/regress/output/000077500000000000000000000000001317107136600174355ustar00rootroot00000000000000citus-7.0.3/src/test/regress/output/multi_agg_distinct.source000066400000000000000000000142061317107136600245330ustar00rootroot00000000000000-- -- MULTI_AGG_DISTINCT -- -- Create a new range partitioned lineitem table and load data into it CREATE TABLE lineitem_range ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null ); SELECT master_create_distributed_table('lineitem_range', 'l_orderkey', 'range'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_empty_shard('lineitem_range') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('lineitem_range') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947 WHERE shardid = :new_shard_id; SET citus.shard_max_size TO "500MB"; \copy lineitem_range FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \copy lineitem_range FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' RESET citus.shard_max_size; -- Run aggregate(distinct) on partition column for range partitioned table SELECT count(distinct l_orderkey) FROM lineitem_range; count ------- 2985 (1 row) SELECT avg(distinct l_orderkey) FROM lineitem_range; avg ----------------------- 7463.9474036850921273 (1 row) -- Run count(distinct) on join between a range partitioned table and a single -- sharded table. For this test, we also change a config setting to ensure that -- we don't repartition any of the tables during the query. SET citus.large_table_shard_count TO 2; SELECT p_partkey, count(distinct l_orderkey) FROM lineitem_range, part WHERE l_partkey = p_partkey GROUP BY p_partkey ORDER BY p_partkey LIMIT 10; p_partkey | count -----------+------- 18 | 1 79 | 1 91 | 1 149 | 2 175 | 1 179 | 1 182 | 1 195 | 1 204 | 1 222 | 1 (10 rows) RESET citus.large_table_shard_count; -- Check that we don't support count(distinct) on non-partition column, and -- complex expressions. SELECT count(distinct l_partkey) FROM lineitem_range; ERROR: cannot compute aggregate (distinct) DETAIL: table partitioning is unsuitable for aggregate (distinct) HINT: You can load the hll extension from contrib packages and enable distinct approximations. SELECT count(distinct (l_orderkey + 1)) FROM lineitem_range; ERROR: cannot compute aggregate (distinct) DETAIL: aggregate (distinct) on complex expressions is unsupported HINT: You can load the hll extension from contrib packages and enable distinct approximations. -- Now test append partitioned tables. First run count(distinct) on a single -- sharded table. SELECT count(distinct p_mfgr) FROM part; count ------- 5 (1 row) SELECT p_mfgr, count(distinct p_partkey) FROM part GROUP BY p_mfgr ORDER BY p_mfgr; p_mfgr | count ---------------------------+------- Manufacturer#1 | 193 Manufacturer#2 | 190 Manufacturer#3 | 228 Manufacturer#4 | 204 Manufacturer#5 | 185 (5 rows) -- We don't support count(distinct) queries if table is append partitioned and -- has multiple shards SELECT count(distinct o_orderkey) FROM orders; ERROR: cannot compute aggregate (distinct) DETAIL: table partitioning is unsuitable for aggregate (distinct) HINT: You can load the hll extension from contrib packages and enable distinct approximations. -- Hash partitioned tables: CREATE TABLE lineitem_hash ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null ); SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('lineitem_hash', 4, 1); master_create_worker_shards ----------------------------- (1 row) \copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -- aggregate(distinct) on partition column is allowed SELECT count(distinct l_orderkey) FROM lineitem_hash; count ------- 2985 (1 row) SELECT avg(distinct l_orderkey) FROM lineitem_hash; avg ----------------------- 7463.9474036850921273 (1 row) -- count(distinct) on non-partition column or expression is not allowed SELECT count(distinct l_partkey) FROM lineitem_hash; ERROR: cannot compute aggregate (distinct) DETAIL: table partitioning is unsuitable for aggregate (distinct) HINT: You can load the hll extension from contrib packages and enable distinct approximations. SELECT count(distinct (l_orderkey + 1)) FROM lineitem_hash; ERROR: cannot compute aggregate (distinct) DETAIL: aggregate (distinct) on complex expressions is unsupported HINT: You can load the hll extension from contrib packages and enable distinct approximations. -- agg(distinct) is allowed if we group by partition column SELECT l_orderkey, count(distinct l_partkey) INTO hash_results FROM lineitem_hash GROUP BY l_orderkey; SELECT l_orderkey, count(distinct l_partkey) INTO range_results FROM lineitem_range GROUP BY l_orderkey; -- they should return the same results SELECT * FROM hash_results h, range_results r WHERE h.l_orderkey = r.l_orderkey AND h.count != r.count; l_orderkey | count | l_orderkey | count ------------+-------+------------+------- (0 rows) DROP TABLE lineitem_hash; citus-7.0.3/src/test/regress/output/multi_agg_type_conversion.source000066400000000000000000000062441317107136600261430ustar00rootroot00000000000000-- -- MULTI_AGG_TYPE_CONVERSION -- -- Test aggregate type conversions using sums of integers and division operator SELECT sum(l_suppkey) FROM lineitem; sum ---------- 60617976 (1 row) SELECT sum(l_suppkey) / 2 FROM lineitem; ?column? ---------- 30308988 (1 row) SELECT sum(l_suppkey) / 2::numeric FROM lineitem; ?column? ----------------------- 30308988.000000000000 (1 row) SELECT sum(l_suppkey)::int8 / 2 FROM lineitem; ?column? ---------- 30308988 (1 row) -- Create a new table to test type conversions on different types, and load -- data into this table. Then, apply aggregate functions and divide / multiply -- the results to test type conversions. CREATE TABLE aggregate_type ( float_value float(20) not null, double_value float(40) not null, interval_value interval not null); SELECT master_create_distributed_table('aggregate_type', 'float_value', 'append'); master_create_distributed_table --------------------------------- (1 row) \copy aggregate_type FROM '@abs_srcdir@/data/agg_type.data' -- Test conversions using aggregates on floats and division SELECT min(float_value), max(float_value), sum(float_value), count(float_value), avg(float_value) FROM aggregate_type; min | max | sum | count | avg -----+-----+------+-------+------- 1 | 4.5 | 10.5 | 4 | 2.625 (1 row) SELECT min(float_value) / 2, max(float_value) / 2, sum(float_value) / 2, count(float_value) / 2, avg(float_value) / 2 FROM aggregate_type; ?column? | ?column? | ?column? | ?column? | ?column? ----------+----------+----------+----------+---------- 0.5 | 2.25 | 5.25 | 2 | 1.3125 (1 row) -- Test conversions using aggregates on large floats and multiplication SELECT min(double_value), max(double_value), sum(double_value), count(double_value), avg(double_value) FROM aggregate_type; min | max | sum | count | avg -------+---------+----------+-------+----------- 2.343 | 6.34343 | 15.79703 | 4 | 3.9492575 (1 row) SELECT min(double_value) * 2, max(double_value) * 2, sum(double_value) * 2, count(double_value) * 2, avg(double_value) * 2 FROM aggregate_type; ?column? | ?column? | ?column? | ?column? | ?column? ----------+----------+----------+----------+---------- 4.686 | 12.68686 | 31.59406 | 8 | 7.898515 (1 row) -- Test conversions using aggregates on intervals and division. We also use the -- default configuration value for IntervalStyle. SET IntervalStyle TO 'postgres'; SELECT min(interval_value), max(interval_value), sum(interval_value), count(interval_value), avg(interval_value) FROM aggregate_type; min | max | sum | count | avg -------------+------------+-------------+-------+------------- 00:00:23.44 | 00:38:52.9 | 01:23:33.64 | 4 | 00:20:53.41 (1 row) SELECT min(interval_value) / 2, max(interval_value) / 2, sum(interval_value) / 2, count(interval_value) / 2, avg(interval_value) / 2 FROM aggregate_type; ?column? | ?column? | ?column? | ?column? | ?column? -------------+-------------+-------------+----------+-------------- 00:00:11.72 | 00:19:26.45 | 00:41:46.82 | 2 | 00:10:26.705 (1 row) citus-7.0.3/src/test/regress/output/multi_alter_table_statements.source000066400000000000000000001132441317107136600266230ustar00rootroot00000000000000-- -- MULTI_ALTER_TABLE_STATEMENTS -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 220000; -- Check that we can run ALTER TABLE statements on distributed tables. -- We set the shardid sequence here so that the shardids in this test -- aren't affected by changes to the previous tests. CREATE TABLE lineitem_alter ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null ); SELECT master_create_distributed_table('lineitem_alter', 'l_orderkey', 'append'); master_create_distributed_table --------------------------------- (1 row) \copy lineitem_alter FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' -- Verify that we can add columns ALTER TABLE lineitem_alter ADD COLUMN float_column FLOAT; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' ALTER TABLE lineitem_alter ADD COLUMN date_column DATE; ALTER TABLE lineitem_alter ADD COLUMN int_column1 INTEGER DEFAULT 1; ALTER TABLE lineitem_alter ADD COLUMN int_column2 INTEGER DEFAULT 2; ALTER TABLE lineitem_alter ADD COLUMN null_column INTEGER; -- show changed schema on one worker \c - - - :worker_1_port SELECT attname, atttypid::regtype FROM (SELECT oid FROM pg_class WHERE relname LIKE 'lineitem_alter_%' ORDER BY relname LIMIT 1) pc JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid) ORDER BY attnum; attname | atttypid -----------------+------------------- tableoid | oid cmax | cid xmax | xid cmin | cid xmin | xid ctid | tid l_orderkey | bigint l_partkey | integer l_suppkey | integer l_linenumber | integer l_quantity | numeric l_extendedprice | numeric l_discount | numeric l_tax | numeric l_returnflag | character l_linestatus | character l_shipdate | date l_commitdate | date l_receiptdate | date l_shipinstruct | character l_shipmode | character l_comment | character varying float_column | double precision date_column | date int_column1 | integer int_column2 | integer null_column | integer (27 rows) \c - - - :master_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; Column | Type | Modifiers -----------------+-----------------------+----------- l_orderkey | bigint | not null l_partkey | integer | not null l_suppkey | integer | not null l_linenumber | integer | not null l_quantity | numeric(15,2) | not null l_extendedprice | numeric(15,2) | not null l_discount | numeric(15,2) | not null l_tax | numeric(15,2) | not null l_returnflag | character(1) | not null l_linestatus | character(1) | not null l_shipdate | date | not null l_commitdate | date | not null l_receiptdate | date | not null l_shipinstruct | character(25) | not null l_shipmode | character(10) | not null l_comment | character varying(44) | not null float_column | double precision | date_column | date | int_column1 | integer | default 1 int_column2 | integer | default 2 null_column | integer | (21 rows) SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column; float_column | count --------------+------- | 6000 (1 row) SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1; int_column1 | count -------------+------- 1 | 6000 (1 row) -- Verify that SET|DROP DEFAULT works ALTER TABLE lineitem_alter ALTER COLUMN float_column SET DEFAULT 1; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' ALTER TABLE lineitem_alter ALTER COLUMN int_column1 DROP DEFAULT; -- \copy to verify that default values take effect \copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' SELECT float_column, count(*) FROM lineitem_alter GROUP BY float_column; float_column | count --------------+------- | 6000 1 | 6000 (2 rows) SELECT int_column1, count(*) FROM lineitem_alter GROUP BY int_column1; int_column1 | count -------------+------- | 6000 1 | 6000 (2 rows) -- Verify that SET NOT NULL works ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET NOT NULL; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; Column | Type | Modifiers -----------------+-----------------------+-------------------- l_orderkey | bigint | not null l_partkey | integer | not null l_suppkey | integer | not null l_linenumber | integer | not null l_quantity | numeric(15,2) | not null l_extendedprice | numeric(15,2) | not null l_discount | numeric(15,2) | not null l_tax | numeric(15,2) | not null l_returnflag | character(1) | not null l_linestatus | character(1) | not null l_shipdate | date | not null l_commitdate | date | not null l_receiptdate | date | not null l_shipinstruct | character(25) | not null l_shipmode | character(10) | not null l_comment | character varying(44) | not null float_column | double precision | default 1 date_column | date | int_column1 | integer | int_column2 | integer | not null default 2 null_column | integer | (21 rows) -- Drop default so that NULLs will be inserted for this column ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP DEFAULT; -- \copy should fail because it will try to insert NULLs for a NOT NULL column -- Note, this operation will create a table on the workers but it won't be in the metadata \copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' ERROR: null value in column "int_column2" violates not-null constraint DETAIL: Failing row contains (1, 155190, 7706, 1, 17.00, 21168.23, 0.04, 0.02, N, O, 1996-03-13, 1996-02-12, 1996-03-22, DELIVER IN PERSON , TRUCK , egular courts above the, 1, null, null, null, null). -- Verify that DROP NOT NULL works ALTER TABLE lineitem_alter ALTER COLUMN int_column2 DROP NOT NULL; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; Column | Type | Modifiers -----------------+-----------------------+----------- l_orderkey | bigint | not null l_partkey | integer | not null l_suppkey | integer | not null l_linenumber | integer | not null l_quantity | numeric(15,2) | not null l_extendedprice | numeric(15,2) | not null l_discount | numeric(15,2) | not null l_tax | numeric(15,2) | not null l_returnflag | character(1) | not null l_linestatus | character(1) | not null l_shipdate | date | not null l_commitdate | date | not null l_receiptdate | date | not null l_shipinstruct | character(25) | not null l_shipmode | character(10) | not null l_comment | character varying(44) | not null float_column | double precision | default 1 date_column | date | int_column1 | integer | int_column2 | integer | null_column | integer | (21 rows) -- \copy should succeed now \copy lineitem_alter (l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' SELECT count(*) from lineitem_alter; count ------- 18000 (1 row) -- Verify that SET DATA TYPE works SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2; int_column2 | pg_typeof | count -------------+-----------+------- | integer | 6000 2 | integer | 12000 (2 rows) ALTER TABLE lineitem_alter ALTER COLUMN int_column2 SET DATA TYPE FLOAT; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; Column | Type | Modifiers -----------------+-----------------------+----------- l_orderkey | bigint | not null l_partkey | integer | not null l_suppkey | integer | not null l_linenumber | integer | not null l_quantity | numeric(15,2) | not null l_extendedprice | numeric(15,2) | not null l_discount | numeric(15,2) | not null l_tax | numeric(15,2) | not null l_returnflag | character(1) | not null l_linestatus | character(1) | not null l_shipdate | date | not null l_commitdate | date | not null l_receiptdate | date | not null l_shipinstruct | character(25) | not null l_shipmode | character(10) | not null l_comment | character varying(44) | not null float_column | double precision | default 1 date_column | date | int_column1 | integer | int_column2 | double precision | null_column | integer | (21 rows) SELECT int_column2, pg_typeof(int_column2), count(*) from lineitem_alter GROUP BY int_column2; int_column2 | pg_typeof | count -------------+------------------+------- | double precision | 6000 2 | double precision | 12000 (2 rows) -- Verify that DROP COLUMN works ALTER TABLE lineitem_alter DROP COLUMN int_column1; ALTER TABLE lineitem_alter DROP COLUMN float_column; ALTER TABLE lineitem_alter DROP COLUMN date_column; -- Verify that RENAME COLUMN works ALTER TABLE lineitem_alter RENAME COLUMN l_orderkey TO l_orderkey_renamed; SELECT SUM(l_orderkey_renamed) FROM lineitem_alter; sum ---------- 53620791 (1 row) -- Verify that IF EXISTS works as expected ALTER TABLE non_existent_table ADD COLUMN new_column INTEGER; ERROR: relation "non_existent_table" does not exist ALTER TABLE IF EXISTS non_existent_table ADD COLUMN new_column INTEGER; NOTICE: relation "non_existent_table" does not exist, skipping ALTER TABLE IF EXISTS lineitem_alter ALTER COLUMN int_column2 SET DATA TYPE INTEGER; ALTER TABLE lineitem_alter DROP COLUMN non_existent_column; ERROR: column "non_existent_column" of relation "lineitem_alter" does not exist ALTER TABLE lineitem_alter DROP COLUMN IF EXISTS non_existent_column; NOTICE: column "non_existent_column" of relation "lineitem_alter" does not exist, skipping ALTER TABLE lineitem_alter DROP COLUMN IF EXISTS int_column2; -- Verify with IF EXISTS for extant table ALTER TABLE IF EXISTS lineitem_alter RENAME COLUMN l_orderkey_renamed TO l_orderkey; SELECT SUM(l_orderkey) FROM lineitem_alter; sum ---------- 53620791 (1 row) SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; Column | Type | Modifiers -----------------+-----------------------+----------- l_orderkey | bigint | not null l_partkey | integer | not null l_suppkey | integer | not null l_linenumber | integer | not null l_quantity | numeric(15,2) | not null l_extendedprice | numeric(15,2) | not null l_discount | numeric(15,2) | not null l_tax | numeric(15,2) | not null l_returnflag | character(1) | not null l_linestatus | character(1) | not null l_shipdate | date | not null l_commitdate | date | not null l_receiptdate | date | not null l_shipinstruct | character(25) | not null l_shipmode | character(10) | not null l_comment | character varying(44) | not null null_column | integer | (17 rows) -- Verify that we can execute commands with multiple subcommands ALTER TABLE lineitem_alter ADD COLUMN int_column1 INTEGER, ADD COLUMN int_column2 INTEGER; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; Column | Type | Modifiers -----------------+-----------------------+----------- l_orderkey | bigint | not null l_partkey | integer | not null l_suppkey | integer | not null l_linenumber | integer | not null l_quantity | numeric(15,2) | not null l_extendedprice | numeric(15,2) | not null l_discount | numeric(15,2) | not null l_tax | numeric(15,2) | not null l_returnflag | character(1) | not null l_linestatus | character(1) | not null l_shipdate | date | not null l_commitdate | date | not null l_receiptdate | date | not null l_shipinstruct | character(25) | not null l_shipmode | character(10) | not null l_comment | character varying(44) | not null null_column | integer | int_column1 | integer | int_column2 | integer | (19 rows) ALTER TABLE lineitem_alter ADD COLUMN int_column3 INTEGER, ALTER COLUMN int_column1 SET STATISTICS 10; ERROR: alter table command is currently unsupported DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP CONSTRAINT, ATTACH|DETACH PARTITION and TYPE subcommands are supported. ALTER TABLE lineitem_alter DROP COLUMN int_column1, DROP COLUMN int_column2; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; Column | Type | Modifiers -----------------+-----------------------+----------- l_orderkey | bigint | not null l_partkey | integer | not null l_suppkey | integer | not null l_linenumber | integer | not null l_quantity | numeric(15,2) | not null l_extendedprice | numeric(15,2) | not null l_discount | numeric(15,2) | not null l_tax | numeric(15,2) | not null l_returnflag | character(1) | not null l_linestatus | character(1) | not null l_shipdate | date | not null l_commitdate | date | not null l_receiptdate | date | not null l_shipinstruct | character(25) | not null l_shipmode | character(10) | not null l_comment | character varying(44) | not null null_column | integer | (17 rows) -- Verify that we cannot execute alter commands on the distribution column ALTER TABLE lineitem_alter ALTER COLUMN l_orderkey DROP NOT NULL; ERROR: cannot execute ALTER TABLE command involving partition column ALTER TABLE lineitem_alter DROP COLUMN l_orderkey; ERROR: cannot execute ALTER TABLE command involving partition column -- Verify that we error out on unsupported statement types ALTER TABLE lineitem_alter ALTER COLUMN l_orderkey SET STATISTICS 100; ERROR: alter table command is currently unsupported DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP CONSTRAINT, ATTACH|DETACH PARTITION and TYPE subcommands are supported. ALTER TABLE lineitem_alter DROP CONSTRAINT IF EXISTS non_existent_contraint; NOTICE: constraint "non_existent_contraint" of relation "lineitem_alter" does not exist, skipping ALTER TABLE lineitem_alter SET WITHOUT OIDS; ERROR: alter table command is currently unsupported DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP CONSTRAINT, ATTACH|DETACH PARTITION and TYPE subcommands are supported. -- Verify that we error out in case of postgres errors on supported statement -- types ALTER TABLE lineitem_alter ADD COLUMN new_column non_existent_type; ERROR: type "non_existent_type" does not exist LINE 1: ALTER TABLE lineitem_alter ADD COLUMN new_column non_existen... ^ ALTER TABLE lineitem_alter ALTER COLUMN null_column SET NOT NULL; ERROR: column "null_column" contains null values CONTEXT: while executing command on localhost:57638 ALTER TABLE lineitem_alter ALTER COLUMN l_partkey SET DEFAULT 'a'; ERROR: invalid input syntax for integer: "a" -- Verify that we error out on non-column RENAME statements ALTER TABLE lineitem_alter RENAME TO lineitem_renamed; ERROR: renaming distributed tables is currently unsupported ALTER TABLE lineitem_alter RENAME CONSTRAINT constraint_a TO constraint_b; ERROR: renaming constraints belonging to distributed tables is currently unsupported -- Verify that IF EXISTS works as expected with RENAME statements ALTER TABLE non_existent_table RENAME TO non_existent_table_renamed; ERROR: relation "non_existent_table" does not exist ALTER TABLE IF EXISTS non_existent_table RENAME TO non_existent_table_renamed; NOTICE: relation "non_existent_table" does not exist, skipping ALTER TABLE IF EXISTS non_existent_table RENAME COLUMN column1 TO column2; NOTICE: relation "non_existent_table" does not exist, skipping -- Verify that none of the failed alter table commands took effect on the master -- node SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; Column | Type | Modifiers -----------------+-----------------------+----------- l_orderkey | bigint | not null l_partkey | integer | not null l_suppkey | integer | not null l_linenumber | integer | not null l_quantity | numeric(15,2) | not null l_extendedprice | numeric(15,2) | not null l_discount | numeric(15,2) | not null l_tax | numeric(15,2) | not null l_returnflag | character(1) | not null l_linestatus | character(1) | not null l_shipdate | date | not null l_commitdate | date | not null l_receiptdate | date | not null l_shipinstruct | character(25) | not null l_shipmode | character(10) | not null l_comment | character varying(44) | not null null_column | integer | (17 rows) -- verify that non-propagated ddl commands are allowed inside a transaction block SET citus.enable_ddl_propagation to false; BEGIN; CREATE INDEX temp_index_1 ON lineitem_alter(l_linenumber); COMMIT; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; indexname | tablename --------------+---------------- temp_index_1 | lineitem_alter (1 row) DROP INDEX temp_index_1; -- verify that single distributed ddl commands are allowed inside a transaction block SET citus.enable_ddl_propagation to true; BEGIN; CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); COMMIT; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; indexname | tablename --------------+---------------- temp_index_2 | lineitem_alter (1 row) DROP INDEX temp_index_2; -- and so are multiple ddl statements BEGIN; CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); ALTER TABLE lineitem_alter ADD COLUMN first integer; COMMIT; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.lineitem_alter'::regclass; Column | Type | Modifiers -----------------+-----------------------+----------- l_orderkey | bigint | not null l_partkey | integer | not null l_suppkey | integer | not null l_linenumber | integer | not null l_quantity | numeric(15,2) | not null l_extendedprice | numeric(15,2) | not null l_discount | numeric(15,2) | not null l_tax | numeric(15,2) | not null l_returnflag | character(1) | not null l_linestatus | character(1) | not null l_shipdate | date | not null l_commitdate | date | not null l_receiptdate | date | not null l_shipinstruct | character(25) | not null l_shipmode | character(10) | not null l_comment | character varying(44) | not null null_column | integer | first | integer | (18 rows) \d temp_index_2 Index "public.temp_index_2" Column | Type | Definition ------------+--------+------------ l_orderkey | bigint | l_orderkey btree, for table "public.lineitem_alter" ALTER TABLE lineitem_alter DROP COLUMN first; DROP INDEX temp_index_2; -- ensure that user-specified rollback causes full rollback BEGIN; CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); CREATE INDEX temp_index_3 ON lineitem_alter(l_partkey); ROLLBACK; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; indexname | tablename -----------+----------- (0 rows) -- ensure that errors cause full rollback BEGIN; CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); ERROR: relation "temp_index_2" already exists ROLLBACK; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; indexname | tablename -----------+----------- (0 rows) -- verify that SAVEPOINT is allowed... BEGIN; CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); SAVEPOINT my_savepoint; CREATE INDEX temp_index_3 ON lineitem_alter(l_partkey); ROLLBACK; -- and also rolling back to it is also allowed BEGIN; CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); SAVEPOINT my_savepoint; CREATE INDEX temp_index_3 ON lineitem_alter(l_partkey); ROLLBACK TO my_savepoint; COMMIT; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; indexname | tablename --------------+---------------- temp_index_2 | lineitem_alter (1 row) DROP INDEX temp_index_2; -- Add column on only one worker... \c - - - :worker_2_port ALTER TABLE lineitem_alter_220000 ADD COLUMN first integer; \c - - - :master_port -- and try to add it in a multi-statement block, which fails BEGIN; CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' ALTER TABLE lineitem_alter ADD COLUMN first integer; ERROR: column "first" of relation "lineitem_alter_220000" already exists CONTEXT: while executing command on localhost:57638 COMMIT; -- Nothing from the block should have committed SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; indexname | tablename -----------+----------- (0 rows) -- Create single-shard table (to avoid deadlocks in the upcoming test hackery) CREATE TABLE single_shard_items (id integer, name text); SELECT master_create_distributed_table('single_shard_items', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('single_shard_items', 1, 2); master_create_worker_shards ----------------------------- (1 row) -- Drop the column from the worker... \c - - - :worker_2_port ALTER TABLE lineitem_alter_220000 DROP COLUMN first; -- Create table to trigger at-xact-end (deferred) failure CREATE TABLE ddl_commands (command text UNIQUE DEFERRABLE INITIALLY DEFERRED); -- Use an event trigger to log all DDL event tags in it CREATE FUNCTION log_ddl_tag() RETURNS event_trigger AS $ldt$ BEGIN INSERT INTO ddl_commands VALUES (tg_tag); END; $ldt$ LANGUAGE plpgsql; CREATE EVENT TRIGGER log_ddl_tag ON ddl_command_end EXECUTE PROCEDURE log_ddl_tag(); \c - - - :master_port -- The above trigger will cause failure at transaction end on one placement. -- We'll test 2PC first, as it should handle this "best" (no divergence) SET citus.multi_shard_commit_protocol TO '2pc'; BEGIN; CREATE INDEX single_index_2 ON single_shard_items(id); CREATE INDEX single_index_3 ON single_shard_items(name); COMMIT; WARNING: duplicate key value violates unique constraint "ddl_commands_command_key" DETAIL: Key (command)=(CREATE INDEX) already exists. CONTEXT: while executing command on localhost:57638 ERROR: failure on connection marked as essential: localhost:57638 -- Nothing from the block should have committed SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'single_shard_items' ORDER BY 1; indexname | tablename -----------+----------- (0 rows) -- Now try with 2pc off RESET citus.multi_shard_commit_protocol; BEGIN; CREATE INDEX single_index_2 ON single_shard_items(id); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' CREATE INDEX single_index_3 ON single_shard_items(name); COMMIT; WARNING: duplicate key value violates unique constraint "ddl_commands_command_key" DETAIL: Key (command)=(CREATE INDEX) already exists. CONTEXT: while executing command on localhost:57638 WARNING: failed to commit critical transaction on localhost:57638, metadata is likely out of sync -- The block should have committed with a warning SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'single_shard_items' ORDER BY 1; indexname | tablename ----------------+-------------------- single_index_2 | single_shard_items single_index_3 | single_shard_items (2 rows) \c - - - :worker_2_port DROP EVENT TRIGGER log_ddl_tag; DROP FUNCTION log_ddl_tag(); DROP TABLE ddl_commands; \c - - - :master_port -- Distributed SELECTs cannot appear after ALTER BEGIN; CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' SELECT count(*) FROM lineitem_alter; ERROR: cannot open new connections after the first modification command within a transaction COMMIT; -- but are allowed before BEGIN; SELECT count(*) FROM lineitem_alter; count ------- 18000 (1 row) CREATE INDEX temp_index_2 ON lineitem_alter(l_orderkey); COMMIT; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; indexname | tablename --------------+---------------- temp_index_2 | lineitem_alter (1 row) DROP INDEX temp_index_2; --- verify that distributed ddl commands can be used with 2pc SET citus.multi_shard_commit_protocol TO '2pc'; CREATE INDEX temp_index_3 ON lineitem_alter(l_orderkey); SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; indexname | tablename --------------+---------------- temp_index_3 | lineitem_alter (1 row) DROP INDEX temp_index_3; SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; indexname | tablename -----------+----------- (0 rows) RESET citus.multi_shard_commit_protocol; -- verify that not any of shard placements are marked as failed when a query failure occurs CREATE TABLE test_ab (a int, b int); SELECT master_create_distributed_table('test_ab', 'a', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('test_ab', 8, 2); master_create_worker_shards ----------------------------- (1 row) INSERT INTO test_ab VALUES (2, 10); INSERT INTO test_ab VALUES (2, 11); CREATE UNIQUE INDEX temp_unique_index_1 ON test_ab(a); ERROR: could not create unique index "temp_unique_index_1_220022" DETAIL: Key (a)=(2) is duplicated. CONTEXT: while executing command on localhost:57638 SELECT shardid FROM pg_dist_shard_placement NATURAL JOIN pg_dist_shard WHERE logicalrelid='test_ab'::regclass AND shardstate=3; shardid --------- (0 rows) -- Check that the schema on the worker still looks reasonable \c - - - :worker_1_port SELECT attname, atttypid::regtype FROM (SELECT oid FROM pg_class WHERE relname LIKE 'lineitem_alter_%' ORDER BY relname LIMIT 1) pc JOIN pg_attribute ON (pc.oid = pg_attribute.attrelid) ORDER BY attnum; attname | atttypid -------------------------------+------------------- tableoid | oid cmax | cid xmax | xid cmin | cid xmin | xid ctid | tid l_orderkey | bigint l_partkey | integer l_suppkey | integer l_linenumber | integer l_quantity | numeric l_extendedprice | numeric l_discount | numeric l_tax | numeric l_returnflag | character l_linestatus | character l_shipdate | date l_commitdate | date l_receiptdate | date l_shipinstruct | character l_shipmode | character l_comment | character varying ........pg.dropped.17........ | - ........pg.dropped.18........ | - ........pg.dropped.19........ | - ........pg.dropped.20........ | - null_column | integer ........pg.dropped.22........ | - ........pg.dropped.23........ | - ........pg.dropped.24........ | - (30 rows) \c - - - :master_port -- verify that we don't intercept DDL commands if propagation is turned off SET citus.enable_ddl_propagation to false; -- table rename statement can be performed now ALTER TABLE lineitem_alter RENAME TO lineitem_renamed; -- verify rename is performed SELECT relname FROM pg_class WHERE relname = 'lineitem_alter' or relname = 'lineitem_renamed'; relname ------------------ lineitem_renamed (1 row) -- revert it to original name ALTER TABLE lineitem_renamed RENAME TO lineitem_alter; -- this column is added to master table and not workers ALTER TABLE lineitem_alter ADD COLUMN column_only_added_to_master int; -- verify newly added column is not present in a worker shard \c - - - :worker_1_port SELECT column_only_added_to_master FROM lineitem_alter_220000 LIMIT 0; ERROR: column "column_only_added_to_master" does not exist LINE 1: SELECT column_only_added_to_master FROM lineitem_alter_22000... ^ \c - - - :master_port -- ddl propagation flag is reset to default, disable it again SET citus.enable_ddl_propagation to false; -- following query succeeds since it accesses an previously existing column SELECT l_orderkey FROM lineitem_alter LIMIT 0; l_orderkey ------------ (0 rows) -- make master and workers have the same schema again ALTER TABLE lineitem_alter DROP COLUMN column_only_added_to_master; -- now this should succeed SELECT * FROM lineitem_alter LIMIT 0; l_orderkey | l_partkey | l_suppkey | l_linenumber | l_quantity | l_extendedprice | l_discount | l_tax | l_returnflag | l_linestatus | l_shipdate | l_commitdate | l_receiptdate | l_shipinstruct | l_shipmode | l_comment | null_column ------------+-----------+-----------+--------------+------------+-----------------+------------+-------+--------------+--------------+------------+--------------+---------------+----------------+------------+-----------+------------- (0 rows) -- previously unsupported statements are accepted by postgresql now ALTER TABLE lineitem_alter ALTER COLUMN l_orderkey SET STATISTICS 100; ALTER TABLE lineitem_alter DROP CONSTRAINT IF EXISTS non_existent_contraint; NOTICE: constraint "non_existent_contraint" of relation "lineitem_alter" does not exist, skipping ALTER TABLE lineitem_alter SET WITHOUT OIDS; -- distribution column still cannot be dropped. ALTER TABLE lineitem_alter DROP COLUMN l_orderkey; ERROR: cannot execute ALTER TABLE command dropping partition column -- Even unique indexes on l_partkey (non-partition column) are allowed. -- Citus would have prevented that. CREATE UNIQUE INDEX unique_lineitem_partkey on lineitem_alter(l_partkey); SELECT indexname, tablename FROM pg_indexes WHERE tablename = 'lineitem_alter'; indexname | tablename -------------------------+---------------- unique_lineitem_partkey | lineitem_alter (1 row) -- verify index is not created on worker \c - - - :worker_1_port SELECT indexname, tablename FROM pg_indexes WHERE tablename like 'lineitem_alter_%'; indexname | tablename -----------+----------- (0 rows) \c - - - :master_port -- verify alter table and drop sequence in the same transaction does not cause deadlock CREATE TABLE sequence_deadlock_test (a serial, b serial); SELECT create_distributed_table('sequence_deadlock_test', 'a'); create_distributed_table -------------------------- (1 row) BEGIN; ALTER TABLE sequence_deadlock_test ADD COLUMN c int; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' DROP SEQUENCE sequence_deadlock_test_b_seq CASCADE; NOTICE: drop cascades to default for table sequence_deadlock_test column b END; DROP TABLE sequence_deadlock_test; -- verify enable/disable trigger all works SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 1; CREATE TABLE trigger_table ( id int, value text ); SELECT create_distributed_table('trigger_table', 'id'); create_distributed_table -------------------------- (1 row) -- first set a trigger on a shard \c - - - :worker_1_port CREATE FUNCTION update_value() RETURNS trigger AS $up$ BEGIN NEW.value := 'trigger enabled'; RETURN NEW; END; $up$ LANGUAGE plpgsql; CREATE TRIGGER update_value BEFORE INSERT ON trigger_table_220056 FOR EACH ROW EXECUTE PROCEDURE update_value(); \c - - - :master_port INSERT INTO trigger_table VALUES (1, 'trigger disabled'); SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value; value | count -----------------+------- trigger enabled | 1 (1 row) ALTER TABLE trigger_table DISABLE TRIGGER ALL; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' INSERT INTO trigger_table VALUES (1, 'trigger disabled'); SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value; value | count ------------------+------- trigger disabled | 1 trigger enabled | 1 (2 rows) ALTER TABLE trigger_table ENABLE TRIGGER ALL; INSERT INTO trigger_table VALUES (1, 'trigger disabled'); SELECT value, count(*) FROM trigger_table GROUP BY value ORDER BY value; value | count ------------------+------- trigger disabled | 1 trigger enabled | 2 (2 rows) DROP TABLE trigger_table; -- test ALTER TABLE ALL IN TABLESPACE -- we expect that it will warn out CREATE TABLESPACE super_fast_ssd LOCATION '@abs_srcdir@/data'; ALTER TABLE ALL IN TABLESPACE pg_default SET TABLESPACE super_fast_ssd; WARNING: not propagating ALTER TABLE ALL IN TABLESPACE commands to worker nodes HINT: Connect to worker nodes directly to manually move all tables. ALTER TABLE ALL IN TABLESPACE super_fast_ssd SET TABLESPACE pg_default; WARNING: not propagating ALTER TABLE ALL IN TABLESPACE commands to worker nodes HINT: Connect to worker nodes directly to manually move all tables. DROP TABLESPACE super_fast_ssd; -- Cleanup the table and its shards SET citus.enable_ddl_propagation to true; SELECT master_apply_delete_command('DELETE FROM lineitem_alter'); master_apply_delete_command ----------------------------- 14 (1 row) DROP TABLE lineitem_alter; -- check that nothing's left over on workers, other than the leftover shard created -- during the unsuccessful COPY \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'lineitem_alter%'; relname ----------------------- lineitem_alter_220009 (1 row) \c - - - :master_port -- Test alter table with drop table in the same transaction BEGIN; CREATE TABLE test_table_1(id int); SELECT create_distributed_table('test_table_1','id'); create_distributed_table -------------------------- (1 row) ALTER TABLE test_table_1 ADD CONSTRAINT u_key UNIQUE(id); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' DROP TABLE test_table_1; END; -- There should be no test_table_1 shard on workers \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'test_table_1%'; relname --------- (0 rows) \c - - - :master_port citus-7.0.3/src/test/regress/output/multi_append_table_to_shard.source000066400000000000000000000171711317107136600264010ustar00rootroot00000000000000-- -- MULTI_APPEND_TABLE_TO_SHARD -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 230000; -- Initialize tables to join CREATE TABLE multi_append_table_to_shard_right ( right_number INTEGER not null, right_text TEXT not null ); SELECT master_create_distributed_table('multi_append_table_to_shard_right', 'right_number', 'append'); master_create_distributed_table --------------------------------- (1 row) CREATE TABLE multi_append_table_to_shard_left ( left_number INTEGER not null, left_text TEXT not null ); SELECT master_create_distributed_table('multi_append_table_to_shard_left', 'left_number', 'append'); master_create_distributed_table --------------------------------- (1 row) CREATE TABLE multi_append_table_to_shard_right_hash ( right_number INTEGER not null, right_text TEXT not null ); SELECT master_create_distributed_table('multi_append_table_to_shard_right_hash', 'right_number', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('multi_append_table_to_shard_right_hash', 1, 1); master_create_worker_shards ----------------------------- (1 row) -- Replicate 'left' table on both workers SELECT set_config('citus.shard_replication_factor', '2', false); set_config ------------ 2 (1 row) \copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' \copy multi_append_table_to_shard_left FROM '@abs_srcdir@/data/agg.data' -- Place 'right' table only on the primary worker SELECT set_config('citus.shard_replication_factor', '1', false); set_config ------------ 1 (1 row) \copy multi_append_table_to_shard_right FROM '@abs_srcdir@/data/agg.data' -- Reset shard replication factor to ensure tasks will be assigned to both workers SELECT set_config('citus.shard_replication_factor', '2', false); set_config ------------ 2 (1 row) -- All 8 rows in left table match a row in right table SELECT COUNT(*) FROM multi_append_table_to_shard_left, multi_append_table_to_shard_right WHERE left_number = right_number; count ------- 8 (1 row) -- Now append more data to the 'right' table CREATE TABLE multi_append_table_to_shard_stage ( number INTEGER not null, text TEXT not null ); COPY multi_append_table_to_shard_stage FROM '@abs_srcdir@/data/agg.data'; SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM pg_dist_shard WHERE 'multi_append_table_to_shard_right'::regclass::oid = logicalrelid; master_append_table_to_shard ------------------------------ 0.0533333 (1 row) -- Only the primary worker will see the new matches, as the secondary still uses a cached shard SELECT COUNT(*) FROM multi_append_table_to_shard_left, multi_append_table_to_shard_right WHERE left_number = right_number; count ------- 12 (1 row) -- Now add a lot of data to ensure we increase the size on disk DELETE FROM multi_append_table_to_shard_stage; COPY multi_append_table_to_shard_stage FROM '@abs_srcdir@/data/large_records.data' with delimiter '|'; SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM pg_dist_shard WHERE 'multi_append_table_to_shard_right'::regclass::oid = logicalrelid; master_append_table_to_shard ------------------------------ 0.106667 (1 row) -- This join will refresh the shard on the secondary, all 8 rows in the left table will match twice (16) SELECT COUNT(*) FROM multi_append_table_to_shard_left, multi_append_table_to_shard_right WHERE left_number = right_number; count ------- 16 (1 row) -- Check that we error out if we try to append data to a hash partitioned table. SELECT master_create_empty_shard('multi_append_table_to_shard_right_hash'); ERROR: relation "multi_append_table_to_shard_right_hash" is a hash partitioned table DETAIL: We currently don't support creating shards on hash-partitioned tables SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM pg_dist_shard WHERE 'multi_append_table_to_shard_right_hash'::regclass::oid = logicalrelid; ERROR: cannot append to shardId 230000 DETAIL: We currently don't support appending to shards in hash-partitioned or reference tables -- Clean up after test SELECT master_apply_delete_command('DELETE FROM multi_append_table_to_shard_right'); master_apply_delete_command ----------------------------- 1 (1 row) SELECT master_apply_delete_command('DELETE FROM multi_append_table_to_shard_left'); master_apply_delete_command ----------------------------- 2 (1 row) DROP TABLE multi_append_table_to_shard_stage; DROP TABLE multi_append_table_to_shard_right; DROP TABLE multi_append_table_to_shard_left; -- Check partitioning by date CREATE TABLE multi_append_table_to_shard_date ( event_date DATE, value INT ); SELECT master_create_distributed_table('multi_append_table_to_shard_date', 'event_date', 'append'); master_create_distributed_table --------------------------------- (1 row) -- Create an empty shard and check that we can query the table SELECT master_create_empty_shard('multi_append_table_to_shard_date'); master_create_empty_shard --------------------------- 230004 (1 row) SELECT * FROM multi_append_table_to_shard_date; event_date | value ------------+------- (0 rows) -- Create an empty distributed table and check that we can query it CREATE TABLE multi_append_table_to_shard_stage (LIKE multi_append_table_to_shard_date); SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM pg_dist_shard WHERE 'multi_append_table_to_shard_date'::regclass::oid = logicalrelid; master_append_table_to_shard ------------------------------ 0 (1 row) SELECT * FROM multi_append_table_to_shard_date; event_date | value ------------+------- (0 rows) -- INSERT NULL values and check that we can query the table INSERT INTO multi_append_table_to_shard_stage VALUES (NULL, NULL); SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM pg_dist_shard WHERE 'multi_append_table_to_shard_date'::regclass::oid = logicalrelid; master_append_table_to_shard ------------------------------ 0.0266667 (1 row) SELECT * FROM multi_append_table_to_shard_date; event_date | value ------------+------- | (1 row) -- INSERT regular values and check that we can query the table INSERT INTO multi_append_table_to_shard_stage VALUES ('2016-01-01', 3); SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM pg_dist_shard WHERE 'multi_append_table_to_shard_date'::regclass::oid = logicalrelid; master_append_table_to_shard ------------------------------ 0.0266667 (1 row) SELECT * FROM multi_append_table_to_shard_date; event_date | value ------------+------- | | 01-01-2016 | 3 (3 rows) -- When run inside aborted transaction does not persist changes INSERT INTO multi_append_table_to_shard_stage VALUES ('2016-02-02', 4); BEGIN; SELECT master_append_table_to_shard(shardid, 'multi_append_table_to_shard_stage', 'localhost', 57636) FROM pg_dist_shard WHERE 'multi_append_table_to_shard_date'::regclass::oid = logicalrelid; master_append_table_to_shard ------------------------------ 0.0266667 (1 row) ROLLBACK; SELECT * FROM multi_append_table_to_shard_date; event_date | value ------------+------- | | 01-01-2016 | 3 (3 rows) DROP TABLE multi_append_table_to_shard_stage; DROP TABLE multi_append_table_to_shard_date; citus-7.0.3/src/test/regress/output/multi_behavioral_analytics_create_table.source000066400000000000000000000321201317107136600307440ustar00rootroot00000000000000-- -- multi behavioral analytics -- this file is intended to create the table requires for the tests -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1400000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1400000; SET citus.shard_replication_factor = 1; SET citus.shard_count = 4; CREATE TABLE users_table (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint); SELECT create_distributed_table('users_table', 'user_id'); create_distributed_table -------------------------- (1 row) CREATE TABLE events_table (user_id int, time timestamp, event_type int, value_2 int, value_3 float, value_4 bigint); SELECT create_distributed_table('events_table', 'user_id'); create_distributed_table -------------------------- (1 row) CREATE TABLE agg_results (user_id int, value_1_agg int, value_2_agg int, value_3_agg float, value_4_agg bigint, agg_time timestamp); SELECT create_distributed_table('agg_results', 'user_id'); create_distributed_table -------------------------- (1 row) -- we need this to improve the concurrency on the regression tests CREATE TABLE agg_results_second (user_id int, value_1_agg int, value_2_agg int, value_3_agg float, value_4_agg bigint, agg_time timestamp); SELECT create_distributed_table('agg_results_second', 'user_id'); create_distributed_table -------------------------- (1 row) -- same as agg_results_second CREATE TABLE agg_results_third (user_id int, value_1_agg int, value_2_agg int, value_3_agg float, value_4_agg bigint, agg_time timestamp); SELECT create_distributed_table('agg_results_third', 'user_id'); create_distributed_table -------------------------- (1 row) -- same as agg_results_second CREATE TABLE agg_results_fourth (user_id int, value_1_agg int, value_2_agg int, value_3_agg float, value_4_agg bigint, agg_time timestamp); SELECT create_distributed_table('agg_results_fourth', 'user_id'); create_distributed_table -------------------------- (1 row) COPY users_table FROM '@abs_srcdir@/data/users_table.data' WITH CSV; COPY events_table FROM '@abs_srcdir@/data/events_table.data' WITH CSV; -- create indexes for CREATE INDEX is_index1 ON users_table(user_id); NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' CREATE INDEX is_index2 ON events_table(user_id); CREATE INDEX is_index3 ON users_table(value_1); CREATE INDEX is_index4 ON events_table(event_type); CREATE INDEX is_index5 ON users_table(value_2); CREATE INDEX is_index6 ON events_table(value_2); -- Create composite type to use in subquery pushdown SELECT run_command_on_master_and_workers($f$ CREATE TYPE user_composite_type AS ( tenant_id BIGINT, user_id BIGINT ); $f$); run_command_on_master_and_workers ----------------------------------- (1 row) SELECT run_command_on_master_and_workers($f$ CREATE FUNCTION cmp_user_composite_type_function(user_composite_type, user_composite_type) RETURNS int LANGUAGE 'internal' AS 'btrecordcmp' IMMUTABLE RETURNS NULL ON NULL INPUT; $f$); run_command_on_master_and_workers ----------------------------------- (1 row) SELECT run_command_on_master_and_workers($f$ CREATE FUNCTION gt_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean LANGUAGE 'internal' AS 'record_gt' IMMUTABLE RETURNS NULL ON NULL INPUT; $f$); run_command_on_master_and_workers ----------------------------------- (1 row) SELECT run_command_on_master_and_workers($f$ CREATE FUNCTION ge_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean LANGUAGE 'internal' AS 'record_ge' IMMUTABLE RETURNS NULL ON NULL INPUT; $f$); run_command_on_master_and_workers ----------------------------------- (1 row) SELECT run_command_on_master_and_workers($f$ CREATE FUNCTION equal_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean LANGUAGE 'internal' AS 'record_eq' IMMUTABLE; $f$); run_command_on_master_and_workers ----------------------------------- (1 row) SELECT run_command_on_master_and_workers($f$ CREATE FUNCTION lt_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean LANGUAGE 'internal' AS 'record_lt' IMMUTABLE RETURNS NULL ON NULL INPUT; $f$); run_command_on_master_and_workers ----------------------------------- (1 row) SELECT run_command_on_master_and_workers($f$ CREATE FUNCTION le_user_composite_type_function(user_composite_type, user_composite_type) RETURNS boolean LANGUAGE 'internal' AS 'record_lt' IMMUTABLE RETURNS NULL ON NULL INPUT; $f$); run_command_on_master_and_workers ----------------------------------- (1 row) SELECT run_command_on_master_and_workers($f$ CREATE OPERATOR > ( LEFTARG = user_composite_type, RIGHTARG = user_composite_type, PROCEDURE = gt_user_composite_type_function ); $f$); run_command_on_master_and_workers ----------------------------------- (1 row) SELECT run_command_on_master_and_workers($f$ CREATE OPERATOR >= ( LEFTARG = user_composite_type, RIGHTARG = user_composite_type, PROCEDURE = ge_user_composite_type_function ); $f$); run_command_on_master_and_workers ----------------------------------- (1 row) -- ... use that function to create a custom equality operator... SELECT run_command_on_master_and_workers($f$ -- ... use that function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = user_composite_type, RIGHTARG = user_composite_type, PROCEDURE = equal_user_composite_type_function, commutator = =, RESTRICT = eqsel, JOIN = eqjoinsel, merges, hashes ); $f$); run_command_on_master_and_workers ----------------------------------- (1 row) SELECT run_command_on_master_and_workers($f$ CREATE OPERATOR <= ( LEFTARG = user_composite_type, RIGHTARG = user_composite_type, PROCEDURE = le_user_composite_type_function ); $f$); run_command_on_master_and_workers ----------------------------------- (1 row) SELECT run_command_on_master_and_workers($f$ CREATE OPERATOR < ( LEFTARG = user_composite_type, RIGHTARG = user_composite_type, PROCEDURE = lt_user_composite_type_function ); $f$); run_command_on_master_and_workers ----------------------------------- (1 row) -- ... and create a custom operator family for hash indexes... SELECT run_command_on_master_and_workers($f$ CREATE OPERATOR FAMILY cats_2_op_fam USING hash; $f$); run_command_on_master_and_workers ----------------------------------- (1 row) -- ... create a test HASH function. Though it is a poor hash function, -- it is acceptable for our tests SELECT run_command_on_master_and_workers($f$ CREATE FUNCTION test_composite_type_hash(user_composite_type) RETURNS int AS 'SELECT hashtext( ($1.tenant_id + $1.tenant_id)::text);' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; $f$); run_command_on_master_and_workers ----------------------------------- (1 row) -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH SELECT run_command_on_master_and_workers($f$ CREATE OPERATOR CLASS cats_2_op_fam_clas3 DEFAULT FOR TYPE user_composite_type USING BTREE AS OPERATOR 1 <= (user_composite_type, user_composite_type), OPERATOR 2 < (user_composite_type, user_composite_type), OPERATOR 3 = (user_composite_type, user_composite_type), OPERATOR 4 >= (user_composite_type, user_composite_type), OPERATOR 5 > (user_composite_type, user_composite_type), FUNCTION 1 cmp_user_composite_type_function(user_composite_type, user_composite_type); $f$); run_command_on_master_and_workers ----------------------------------- (1 row) SELECT run_command_on_master_and_workers($f$ CREATE OPERATOR CLASS cats_2_op_fam_class DEFAULT FOR TYPE user_composite_type USING HASH AS OPERATOR 1 = (user_composite_type, user_composite_type), FUNCTION 1 test_composite_type_hash(user_composite_type); $f$); run_command_on_master_and_workers ----------------------------------- (1 row) CREATE TABLE events ( composite_id user_composite_type, event_id bigint, event_type character varying(255), event_time bigint ); SELECT master_create_distributed_table('events', 'composite_id', 'range'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_empty_shard('events') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = '(1,1)', shardmaxvalue = '(1,2000000000)' WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('events') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = '(1,2000000001)', shardmaxvalue = '(1,4300000000)' WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('events') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = '(2,1)', shardmaxvalue = '(2,2000000000)' WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('events') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = '(2,2000000001)', shardmaxvalue = '(2,4300000000)' WHERE shardid = :new_shard_id; \COPY events FROM STDIN WITH CSV CREATE TABLE users ( composite_id user_composite_type, lastseen bigint ); SELECT master_create_distributed_table('users', 'composite_id', 'range'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_empty_shard('users') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = '(1,1)', shardmaxvalue = '(1,2000000000)' WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('users') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = '(1,2000000001)', shardmaxvalue = '(1,4300000000)' WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('users') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = '(2,1)', shardmaxvalue = '(2,2000000000)' WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('users') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = '(2,2000000001)', shardmaxvalue = '(2,4300000000)' WHERE shardid = :new_shard_id; \COPY users FROM STDIN WITH CSV -- Create tables for subquery tests CREATE TABLE lineitem_subquery ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); SELECT master_create_distributed_table('lineitem_subquery', 'l_orderkey', 'range'); master_create_distributed_table --------------------------------- (1 row) CREATE TABLE orders_subquery ( o_orderkey bigint not null, o_custkey integer not null, o_orderstatus char(1) not null, o_totalprice decimal(15,2) not null, o_orderdate date not null, o_orderpriority char(15) not null, o_clerk char(15) not null, o_shippriority integer not null, o_comment varchar(79) not null, PRIMARY KEY(o_orderkey) ); SELECT master_create_distributed_table('orders_subquery', 'o_orderkey', 'range'); master_create_distributed_table --------------------------------- (1 row) SET citus.enable_router_execution TO 'false'; -- Check that we don't crash if there are not any shards. SELECT avg(unit_price) FROM (SELECT l_orderkey, avg(o_totalprice) AS unit_price FROM lineitem_subquery, orders_subquery WHERE l_orderkey = o_orderkey GROUP BY l_orderkey) AS unit_prices; avg ----- (1 row) -- Load data into tables. SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('lineitem_subquery') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14947 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('orders_subquery') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 5986 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('orders_subquery') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 8997, shardmaxvalue = 14946 WHERE shardid = :new_shard_id; SET citus.shard_max_size TO "1MB"; \copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \copy lineitem_subquery FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' \copy orders_subquery FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' \copy orders_subquery FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' CREATE TABLE events_reference_table (like events_table including all); SELECT create_reference_table('events_reference_table'); create_reference_table ------------------------ (1 row) INSERT INTO events_reference_table SELECT * FROM events_table; CREATE TABLE users_reference_table (like users_table including all); SELECT create_reference_table('users_reference_table'); create_reference_table ------------------------ (1 row) INSERT INTO users_reference_table SELECT * FROM users_table; citus-7.0.3/src/test/regress/output/multi_complex_count_distinct.source000066400000000000000000000266051317107136600266620ustar00rootroot00000000000000-- -- COMPLEX_COUNT_DISTINCT -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 240000; CREATE TABLE lineitem_hash ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('lineitem_hash', 8, 1); master_create_worker_shards ----------------------------- (1 row) \copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \copy lineitem_hash FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' SET citus.task_executor_type to "task-tracker"; -- count(distinct) is supported on top level query if there -- is a grouping on the partition key SELECT l_orderkey, count(DISTINCT l_partkey) FROM lineitem_hash GROUP BY l_orderkey ORDER BY 2 DESC, 1 DESC LIMIT 10; l_orderkey | count ------------+------- 14885 | 7 14884 | 7 14821 | 7 14790 | 7 14785 | 7 14755 | 7 14725 | 7 14694 | 7 14627 | 7 14624 | 7 (10 rows) -- it is not supported if there is no grouping or grouping is on non-partition field SELECT count(DISTINCT l_partkey) FROM lineitem_hash ORDER BY 1 DESC LIMIT 10; ERROR: cannot compute aggregate (distinct) DETAIL: table partitioning is unsuitable for aggregate (distinct) HINT: You can load the hll extension from contrib packages and enable distinct approximations. SELECT l_shipmode, count(DISTINCT l_partkey) FROM lineitem_hash GROUP BY l_shipmode ORDER BY 2 DESC, 1 DESC LIMIT 10; ERROR: cannot compute aggregate (distinct) DETAIL: table partitioning is unsuitable for aggregate (distinct) HINT: You can load the hll extension from contrib packages and enable distinct approximations. -- count distinct is supported on single table subqueries SELECT * FROM ( SELECT l_orderkey, count(DISTINCT l_partkey) FROM lineitem_hash GROUP BY l_orderkey) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; l_orderkey | count ------------+------- 14885 | 7 14884 | 7 14821 | 7 14790 | 7 14785 | 7 14755 | 7 14725 | 7 14694 | 7 14627 | 7 14624 | 7 (10 rows) SELECT * FROM ( SELECT l_partkey, count(DISTINCT l_orderkey) FROM lineitem_hash GROUP BY l_partkey) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; l_partkey | count -----------+------- 199146 | 3 188804 | 3 177771 | 3 160895 | 3 149926 | 3 136884 | 3 87761 | 3 15283 | 3 6983 | 3 1927 | 3 (10 rows) -- count distinct with filters SELECT l_orderkey, count(DISTINCT l_partkey) FILTER (WHERE l_shipmode = 'AIR') FROM lineitem_hash GROUP BY l_orderkey ORDER BY 2 DESC, 1 DESC LIMIT 10; l_orderkey | count ------------+------- 12005 | 4 5409 | 4 4964 | 4 14848 | 3 14496 | 3 13473 | 3 13122 | 3 12929 | 3 12645 | 3 12417 | 3 (10 rows) -- filter column already exists in target list SELECT * FROM ( SELECT l_orderkey, count(DISTINCT l_partkey) FILTER (WHERE l_orderkey > 100) FROM lineitem_hash GROUP BY l_orderkey) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; l_orderkey | count ------------+------- 14885 | 7 14884 | 7 14821 | 7 14790 | 7 14785 | 7 14755 | 7 14725 | 7 14694 | 7 14627 | 7 14624 | 7 (10 rows) -- filter column does not exist in target list SELECT * FROM ( SELECT l_orderkey, count(DISTINCT l_partkey) FILTER (WHERE l_shipmode = 'AIR') FROM lineitem_hash GROUP BY l_orderkey) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; l_orderkey | count ------------+------- 12005 | 4 5409 | 4 4964 | 4 14848 | 3 14496 | 3 13473 | 3 13122 | 3 12929 | 3 12645 | 3 12417 | 3 (10 rows) -- case expr in count distinct is supported. -- count orders partkeys if l_shipmode is air SELECT * FROM ( SELECT l_orderkey, count(DISTINCT CASE WHEN l_shipmode = 'AIR' THEN l_partkey ELSE NULL END) as count FROM lineitem_hash GROUP BY l_orderkey) sub WHERE count > 0 ORDER BY 2 DESC, 1 DESC LIMIT 10; l_orderkey | count ------------+------- 12005 | 4 5409 | 4 4964 | 4 14848 | 3 14496 | 3 13473 | 3 13122 | 3 12929 | 3 12645 | 3 12417 | 3 (10 rows) -- text like operator is also supported SELECT * FROM ( SELECT l_orderkey, count(DISTINCT CASE WHEN l_shipmode like '%A%' THEN l_partkey ELSE NULL END) as count FROM lineitem_hash GROUP BY l_orderkey) sub WHERE count > 0 ORDER BY 2 DESC, 1 DESC LIMIT 10; l_orderkey | count ------------+------- 14275 | 7 14181 | 7 13605 | 7 12707 | 7 12384 | 7 11746 | 7 10727 | 7 10467 | 7 5636 | 7 4614 | 7 (10 rows) -- count distinct is rejected if it does not reference any columns SELECT * FROM ( SELECT l_linenumber, count(DISTINCT 1) FROM lineitem_hash GROUP BY l_linenumber) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; ERROR: cannot compute aggregate (distinct) DETAIL: aggregate (distinct) with no columns is unsupported HINT: You can load the hll extension from contrib packages and enable distinct approximations. -- count distinct is rejected if it does not reference any columns SELECT * FROM ( SELECT l_linenumber, count(DISTINCT (random() * 5)::int) FROM lineitem_hash GROUP BY l_linenumber) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; ERROR: cannot compute aggregate (distinct) DETAIL: aggregate (distinct) with no columns is unsupported HINT: You can load the hll extension from contrib packages and enable distinct approximations. -- even non-const function calls are supported within count distinct SELECT * FROM ( SELECT l_orderkey, count(DISTINCT (random() * 5)::int = l_linenumber) FROM lineitem_hash GROUP BY l_orderkey) sub ORDER BY 2 DESC, 1 DESC LIMIT 0; l_orderkey | count ------------+------- (0 rows) -- multiple nested subquery SELECT total, avg(avg_count) as total_avg_count FROM ( SELECT number_sum, count(DISTINCT l_suppkey) as total, avg(total_count) avg_count FROM ( SELECT l_suppkey, sum(l_linenumber) as number_sum, count(DISTINCT l_shipmode) as total_count FROM lineitem_hash WHERE l_partkey > 100 and l_quantity > 2 and l_orderkey < 10000 GROUP BY l_suppkey) as distributed_table WHERE number_sum >= 10 GROUP BY number_sum) as distributed_table_2 GROUP BY total ORDER BY total_avg_count DESC; total | total_avg_count -------+-------------------- 1 | 3.6000000000000000 6 | 2.8333333333333333 10 | 2.6000000000000000 27 | 2.5555555555555556 32 | 2.4687500000000000 77 | 2.1948051948051948 57 | 2.1754385964912281 (7 rows) -- multiple cases query SELECT * FROM ( SELECT count(DISTINCT CASE WHEN l_shipmode = 'TRUCK' THEN l_partkey WHEN l_shipmode = 'AIR' THEN l_quantity WHEN l_shipmode = 'SHIP' THEN l_discount ELSE l_suppkey END) as count, l_shipdate FROM lineitem_hash GROUP BY l_shipdate) sub WHERE count > 0 ORDER BY 1 DESC, 2 DESC LIMIT 10; count | l_shipdate -------+------------ 14 | 07-30-1997 13 | 05-26-1998 13 | 08-08-1997 13 | 11-17-1995 13 | 01-09-1993 12 | 01-15-1998 12 | 10-15-1997 12 | 09-07-1997 12 | 06-02-1997 12 | 03-14-1997 (10 rows) -- count DISTINCT expression SELECT * FROM ( SELECT l_quantity, count(DISTINCT ((l_orderkey / 1000) * 1000 )) as count FROM lineitem_hash GROUP BY l_quantity) sub WHERE count > 0 ORDER BY 2 DESC, 1 DESC LIMIT 10; l_quantity | count ------------+------- 48.00 | 13 47.00 | 13 37.00 | 13 33.00 | 13 26.00 | 13 25.00 | 13 23.00 | 13 21.00 | 13 15.00 | 13 12.00 | 13 (10 rows) -- count DISTINCT is part of an expression which inclues another aggregate SELECT * FROM ( SELECT sum(((l_partkey * l_tax) / 100)) / count(DISTINCT CASE WHEN l_shipmode = 'TRUCK' THEN l_partkey ELSE l_suppkey END) as avg, l_shipmode FROM lineitem_hash GROUP BY l_shipmode) sub ORDER BY 1 DESC, 2 DESC LIMIT 10; avg | l_shipmode -------------------------+------------ 44.82904609027336300064 | MAIL 44.80704536679536679537 | SHIP 44.68891732736572890026 | AIR 44.34106724470134874759 | REG AIR 43.12739987269255251432 | FOB 43.07299253636938646426 | RAIL 40.50298377916903813318 | TRUCK (7 rows) --- count DISTINCT CASE WHEN expression SELECT * FROM ( SELECT count(DISTINCT CASE WHEN l_shipmode = 'TRUCK' THEN l_linenumber WHEN l_shipmode = 'AIR' THEN l_linenumber + 10 ELSE 2 END) as avg FROM lineitem_hash GROUP BY l_shipdate) sub ORDER BY 1 DESC LIMIT 10; avg ----- 7 6 6 6 6 6 6 6 5 5 (10 rows) -- COUNT DISTINCT (c1, c2) SELECT * FROM (SELECT l_shipmode, count(DISTINCT (l_shipdate, l_tax)) FROM lineitem_hash GROUP BY l_shipmode) t ORDER BY 2 DESC,1 DESC LIMIT 10; l_shipmode | count ------------+------- TRUCK | 1689 MAIL | 1683 FOB | 1655 AIR | 1650 SHIP | 1644 RAIL | 1636 REG AIR | 1607 (7 rows) -- other distinct aggregate are not supported SELECT * FROM ( SELECT l_linenumber, sum(DISTINCT l_partkey) FROM lineitem_hash GROUP BY l_linenumber) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; ERROR: cannot compute aggregate (distinct) DETAIL: Only count(distinct) aggregate is supported in subqueries SELECT * FROM ( SELECT l_linenumber, avg(DISTINCT l_partkey) FROM lineitem_hash GROUP BY l_linenumber) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; ERROR: cannot compute aggregate (distinct) DETAIL: Only count(distinct) aggregate is supported in subqueries -- whole row references, oid, and ctid are not supported in count distinct -- test table does not have oid or ctid enabled, so tests for them are skipped SELECT * FROM ( SELECT l_linenumber, count(DISTINCT lineitem_hash) FROM lineitem_hash GROUP BY l_linenumber) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; ERROR: cannot compute count (distinct) DETAIL: Non-column references are not supported yet SELECT * FROM ( SELECT l_linenumber, count(DISTINCT lineitem_hash.*) FROM lineitem_hash GROUP BY l_linenumber) sub ORDER BY 2 DESC, 1 DESC LIMIT 10; ERROR: cannot compute count (distinct) DETAIL: Non-column references are not supported yet DROP TABLE lineitem_hash; citus-7.0.3/src/test/regress/output/multi_copy.source000066400000000000000000001124441317107136600230510ustar00rootroot00000000000000-- -- MULTI_COPY -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 560000; -- Create a new hash-partitioned table into which to COPY CREATE TABLE customer_copy_hash ( c_custkey integer, c_name varchar(25) not null, c_address varchar(40), c_nationkey integer, c_phone char(15), c_acctbal decimal(15,2), c_mktsegment char(10), c_comment varchar(117), primary key (c_custkey)); SELECT master_create_distributed_table('customer_copy_hash', 'c_custkey', 'hash'); master_create_distributed_table --------------------------------- (1 row) -- Test COPY into empty hash-partitioned table COPY customer_copy_hash FROM '@abs_srcdir@/data/customer.1.data' WITH (DELIMITER '|'); ERROR: could not find any shards into which to copy DETAIL: No shards exist for distributed table "customer_copy_hash". HINT: Run master_create_worker_shards to create shards and try again. SELECT master_create_worker_shards('customer_copy_hash', 64, 1); master_create_worker_shards ----------------------------- (1 row) -- Test empty copy COPY customer_copy_hash FROM STDIN; -- Test syntax error COPY customer_copy_hash (c_custkey,c_name) FROM STDIN; ERROR: invalid input syntax for integer: "1,customer1" CONTEXT: COPY customer_copy_hash, line 1, column c_custkey: "1,customer1" -- Confirm that no data was copied SELECT count(*) FROM customer_copy_hash; count ------- 0 (1 row) -- Test primary key violation COPY customer_copy_hash (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); ERROR: duplicate key value violates unique constraint "customer_copy_hash_pkey_560048" DETAIL: Key (c_custkey)=(2) already exists. -- Confirm that no data was copied SELECT count(*) FROM customer_copy_hash; count ------- 0 (1 row) -- Test headers option COPY customer_copy_hash (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv', HEADER true, FORCE_NULL (c_custkey)); -- Confirm that only first row was skipped SELECT count(*) FROM customer_copy_hash; count ------- 3 (1 row) -- Test force_not_null option COPY customer_copy_hash (c_custkey, c_name, c_address) FROM STDIN WITH (FORMAT 'csv', QUOTE '"', FORCE_NOT_NULL (c_address)); -- Confirm that value is not null SELECT count(c_address) FROM customer_copy_hash WHERE c_custkey = 4; count ------- 1 (1 row) -- Test force_null option COPY customer_copy_hash (c_custkey, c_name, c_address) FROM STDIN WITH (FORMAT 'csv', QUOTE '"', FORCE_NULL (c_address)); -- Confirm that value is null SELECT count(c_address) FROM customer_copy_hash WHERE c_custkey = 5; count ------- 0 (1 row) -- Test null violation COPY customer_copy_hash (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); ERROR: null value in column "c_name" violates not-null constraint DETAIL: Failing row contains (8, null, null, null, null, null, null, null). -- Confirm that no data was copied SELECT count(*) FROM customer_copy_hash; count ------- 5 (1 row) -- Test server-side copy from program COPY customer_copy_hash (c_custkey, c_name) FROM PROGRAM 'echo 9 customer9' WITH (DELIMITER ' '); -- Confirm that data was copied SELECT count(*) FROM customer_copy_hash WHERE c_custkey = 9; count ------- 1 (1 row) -- Test server-side copy from file COPY customer_copy_hash FROM '@abs_srcdir@/data/customer.2.data' WITH (DELIMITER '|'); -- Confirm that data was copied SELECT count(*) FROM customer_copy_hash; count ------- 1006 (1 row) -- Test client-side copy from file \copy customer_copy_hash FROM '@abs_srcdir@/data/customer.3.data' WITH (DELIMITER '|'); -- Confirm that data was copied SELECT count(*) FROM customer_copy_hash; count ------- 2006 (1 row) -- Make sure that master_update_shard_statistics() only updates shard length for -- hash-partitioned tables SELECT master_update_shard_statistics(560000); master_update_shard_statistics -------------------------------- 8192 (1 row) SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560000; shardid | shardminvalue | shardmaxvalue ---------+---------------+--------------- 560000 | -2147483648 | -2080374785 (1 row) SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560000; shardid | shardlength ---------+------------- 560000 | 8192 (1 row) -- Create a new hash-partitioned table with default now() function CREATE TABLE customer_with_default( c_custkey integer, c_name varchar(25) not null, c_time timestamp default now()); SELECT master_create_distributed_table('customer_with_default', 'c_custkey', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('customer_with_default', 64, 1); master_create_worker_shards ----------------------------- (1 row) -- Test with default values for now() function COPY customer_with_default (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); -- Confirm that data was copied with now() function SELECT count(*) FROM customer_with_default where c_time IS NOT NULL; count ------- 2 (1 row) -- Add columns to the table and perform a COPY ALTER TABLE customer_copy_hash ADD COLUMN extra1 INT DEFAULT 0; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' ALTER TABLE customer_copy_hash ADD COLUMN extra2 INT DEFAULT 0; COPY customer_copy_hash (c_custkey, c_name, extra1, extra2) FROM STDIN CSV; SELECT * FROM customer_copy_hash WHERE extra1 = 1; c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment | extra1 | extra2 -----------+------------+-----------+-------------+---------+-----------+--------------+-----------+--------+-------- 10 | customer10 | | | | | | | 1 | 5 (1 row) -- Test dropping an intermediate column ALTER TABLE customer_copy_hash DROP COLUMN extra1; COPY customer_copy_hash (c_custkey, c_name, extra2) FROM STDIN CSV; SELECT * FROM customer_copy_hash WHERE c_custkey = 11; c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment | extra2 -----------+------------+-----------+-------------+---------+-----------+--------------+-----------+-------- 11 | customer11 | | | | | | | 5 (1 row) -- Test dropping the last column ALTER TABLE customer_copy_hash DROP COLUMN extra2; COPY customer_copy_hash (c_custkey, c_name) FROM STDIN CSV; SELECT * FROM customer_copy_hash WHERE c_custkey = 12; c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment -----------+------------+-----------+-------------+---------+-----------+--------------+----------- 12 | customer12 | | | | | | (1 row) -- Create a new range-partitioned table into which to COPY CREATE TABLE customer_copy_range ( c_custkey integer, c_name varchar(25), c_address varchar(40), c_nationkey integer, c_phone char(15), c_acctbal decimal(15,2), c_mktsegment char(10), c_comment varchar(117), primary key (c_custkey)); SELECT master_create_distributed_table('customer_copy_range', 'c_custkey', 'range'); master_create_distributed_table --------------------------------- (1 row) -- Test COPY into empty range-partitioned table COPY customer_copy_range FROM '@abs_srcdir@/data/customer.1.data' WITH (DELIMITER '|'); ERROR: could not find any shards into which to copy DETAIL: No shards exist for distributed table "customer_copy_range". SELECT master_create_empty_shard('customer_copy_range') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 500 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('customer_copy_range') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 501, shardmaxvalue = 1000 WHERE shardid = :new_shard_id; -- Test copy into range-partitioned table COPY customer_copy_range FROM '@abs_srcdir@/data/customer.1.data' WITH (DELIMITER '|'); -- Check whether data went into the right shard (maybe) SELECT min(c_custkey), max(c_custkey), avg(c_custkey), count(*) FROM customer_copy_range WHERE c_custkey <= 500; min | max | avg | count -----+-----+----------------------+------- 1 | 500 | 250.5000000000000000 | 500 (1 row) -- Check whether data was copied SELECT count(*) FROM customer_copy_range; count ------- 1000 (1 row) -- Manipulate min/max values and check shard statistics for new shard UPDATE pg_dist_shard SET shardminvalue = 1501, shardmaxvalue = 2000 WHERE shardid = :new_shard_id; SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = :new_shard_id; shardid | shardminvalue | shardmaxvalue ---------+---------------+--------------- 560129 | 1501 | 2000 (1 row) SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = :new_shard_id; shardid | shardlength ---------+------------- 560129 | 0 560129 | 0 (2 rows) -- Update shard statistics for range-partitioned shard and check that only the -- shard length is updated. SELECT master_update_shard_statistics(:new_shard_id); master_update_shard_statistics -------------------------------- 131072 (1 row) SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = :new_shard_id; shardid | shardminvalue | shardmaxvalue ---------+---------------+--------------- 560129 | 1501 | 2000 (1 row) SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = :new_shard_id; shardid | shardlength ---------+------------- 560129 | 131072 560129 | 131072 (2 rows) -- Revert back min/max value updates UPDATE pg_dist_shard SET shardminvalue = 501, shardmaxvalue = 1000 WHERE shardid = :new_shard_id; -- Create a new append-partitioned table into which to COPY CREATE TABLE customer_copy_append ( c_custkey integer, c_name varchar(25) not null, c_address varchar(40), c_nationkey integer, c_phone char(15), c_acctbal decimal(15,2), c_mktsegment char(10), c_comment varchar(117)); SELECT master_create_distributed_table('customer_copy_append', 'c_custkey', 'append'); master_create_distributed_table --------------------------------- (1 row) -- Test syntax error COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); ERROR: invalid input syntax for integer: "notinteger" CONTEXT: COPY customer_copy_append, line 3, column c_custkey: "notinteger" -- Test that no shard is created for failing copy SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'customer_copy_append'::regclass; count ------- 0 (1 row) -- Test empty copy COPY customer_copy_append FROM STDIN; -- Test that no shard is created for copying zero rows SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'customer_copy_append'::regclass; count ------- 0 (1 row) -- Test proper copy COPY customer_copy_append(c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); -- Check whether data was copied properly SELECT * FROM customer_copy_append; c_custkey | c_name | c_address | c_nationkey | c_phone | c_acctbal | c_mktsegment | c_comment -----------+-----------+-----------+-------------+---------+-----------+--------------+----------- 1 | customer1 | | | | | | 2 | customer2 | | | | | | (2 rows) -- Manipulate manipulate and check shard statistics for append-partitioned table shard UPDATE pg_dist_shard SET shardminvalue = 1501, shardmaxvalue = 2000 WHERE shardid = 560131; UPDATE pg_dist_shard_placement SET shardlength = 0 WHERE shardid = 560131; SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560131; shardid | shardminvalue | shardmaxvalue ---------+---------------+--------------- 560131 | 1501 | 2000 (1 row) SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560131; shardid | shardlength ---------+------------- 560131 | 0 560131 | 0 (2 rows) -- Update shard statistics for append-partitioned shard SELECT master_update_shard_statistics(560131); master_update_shard_statistics -------------------------------- 8192 (1 row) SELECT shardid, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = 560131; shardid | shardminvalue | shardmaxvalue ---------+---------------+--------------- 560131 | 1 | 2 (1 row) SELECT shardid, shardlength FROM pg_dist_shard_placement WHERE shardid = 560131; shardid | shardlength ---------+------------- 560131 | 8192 560131 | 8192 (2 rows) -- Create lineitem table CREATE TABLE lineitem_copy_append ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null); SELECT master_create_distributed_table('lineitem_copy_append', 'l_orderkey', 'append'); master_create_distributed_table --------------------------------- (1 row) -- Test multiple shard creation SET citus.shard_max_size TO '256kB'; COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|'; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid = 'lineitem_copy_append'::regclass; count ------- 5 (1 row) -- Test round robin shard policy SET citus.shard_replication_factor TO 1; COPY lineitem_copy_append FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|'; SELECT pg_dist_shard_placement.shardid, pg_dist_shard_placement.nodeport FROM pg_dist_shard, pg_dist_shard_placement WHERE pg_dist_shard.shardid = pg_dist_shard_placement.shardid AND logicalrelid = 'lineitem_copy_append'::regclass ORDER BY pg_dist_shard.shardid DESC LIMIT 5; shardid | nodeport ---------+---------- 560141 | 57637 560140 | 57638 560139 | 57637 560138 | 57638 560137 | 57637 (5 rows) -- Ensure that copy from worker node of table with serial column fails CREATE TABLE customer_worker_copy_append_seq (id integer, seq serial); SELECT master_create_distributed_table('customer_worker_copy_append_seq', 'id', 'append'); master_create_distributed_table --------------------------------- (1 row) -- Connect to the first worker node \c - - - 57637 -- Test copy from the worker node COPY customer_worker_copy_append_seq FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|', master_host 'localhost', master_port 57636); ERROR: relation "public.customer_worker_copy_append_seq_seq_seq" does not exist -- Connect back to the master node \c - - - 57636 -- Create customer table for the worker copy with constraint and index CREATE TABLE customer_worker_copy_append ( c_custkey integer , c_name varchar(25) not null, c_address varchar(40), c_nationkey integer, c_phone char(15), c_acctbal decimal(15,2), c_mktsegment char(10), c_comment varchar(117), primary key (c_custkey)); CREATE INDEX ON customer_worker_copy_append (c_name); SELECT master_create_distributed_table('customer_worker_copy_append', 'c_custkey', 'append'); WARNING: table "customer_worker_copy_append" has a UNIQUE or EXCLUDE constraint DETAIL: UNIQUE constraints, EXCLUDE constraints, and PRIMARY KEYs on append-partitioned tables cannot be enforced. HINT: Consider using hash partitioning. master_create_distributed_table --------------------------------- (1 row) -- Connect to the first worker node \c - - - 57637 -- Test copy from the worker node COPY customer_worker_copy_append FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|', master_host 'localhost', master_port 57636); -- Make sure we don't use 2PC when connecting to master, even if requested BEGIN; SET LOCAL citus.multi_shard_commit_protocol TO '2pc'; COPY customer_worker_copy_append FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|', master_host 'localhost', master_port 57636); COMMIT; -- Test if there is no relation to copy data with the worker copy COPY lineitem_copy_none FROM '@abs_srcdir@/data/lineitem.1.data' with (delimiter '|', master_host 'localhost', master_port 57636); WARNING: relation "lineitem_copy_none" does not exist CONTEXT: while executing command on localhost:57636 ERROR: could not run copy from the worker node -- Connect back to the master node \c - - - 57636 -- Test the content of the table SELECT min(c_custkey), max(c_custkey), avg(c_acctbal), count(*) FROM customer_worker_copy_append; min | max | avg | count -----+------+-----------------------+------- 1 | 7000 | 4443.8028800000000000 | 2000 (1 row) -- Test schema support on append partitioned tables CREATE SCHEMA append; CREATE TABLE append.customer_copy ( c_custkey integer , c_name varchar(25) not null, c_address varchar(40), c_nationkey integer, c_phone char(15), c_acctbal decimal(15,2), c_mktsegment char(10), c_comment varchar(117)); SELECT master_create_distributed_table('append.customer_copy', 'c_custkey', 'append'); master_create_distributed_table --------------------------------- (1 row) -- Test copy from the master node COPY append.customer_copy FROM '@abs_srcdir@/data/customer.1.data' with (delimiter '|'); -- Test copy from the worker node \c - - - 57637 COPY append.customer_copy FROM '@abs_srcdir@/data/customer.2.data' with (delimiter '|', master_host 'localhost', master_port 57636); -- Connect back to the master node \c - - - 57636 -- Test the content of the table SELECT min(c_custkey), max(c_custkey), avg(c_acctbal), count(*) FROM append.customer_copy; min | max | avg | count -----+------+-----------------------+------- 1 | 7000 | 4443.8028800000000000 | 2000 (1 row) -- Test with table name which contains special character CREATE TABLE "customer_with_special_\\_character"( c_custkey integer, c_name varchar(25) not null); SELECT master_create_distributed_table('"customer_with_special_\\_character"', 'c_custkey', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('"customer_with_special_\\_character"', 4, 1); master_create_worker_shards ----------------------------- (1 row) COPY "customer_with_special_\\_character" (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); -- Confirm that data was copied SELECT count(*) FROM "customer_with_special_\\_character"; count ------- 2 (1 row) -- Test with table name which starts with number CREATE TABLE "1_customer"( c_custkey integer, c_name varchar(25) not null); SELECT master_create_distributed_table('"1_customer"', 'c_custkey', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('"1_customer"', 4, 1); master_create_worker_shards ----------------------------- (1 row) COPY "1_customer" (c_custkey, c_name) FROM STDIN WITH (FORMAT 'csv'); -- Confirm that data was copied SELECT count(*) FROM "1_customer"; count ------- 2 (1 row) -- Test COPY with types having different Oid at master and workers CREATE TYPE number_pack AS ( number1 integer, number2 integer ); CREATE TYPE super_number_pack AS ( packed_number1 number_pack, packed_number2 number_pack ); -- Create same types in worker1 \c - - - :worker_1_port CREATE TYPE number_pack AS ( number1 integer, number2 integer ); CREATE TYPE super_number_pack AS ( packed_number1 number_pack, packed_number2 number_pack ); -- Create same types in worker2 \c - - - :worker_2_port CREATE TYPE number_pack AS ( number1 integer, number2 integer ); CREATE TYPE super_number_pack AS ( packed_number1 number_pack, packed_number2 number_pack ); -- Connect back to master \c - - - :master_port -- Test array of user-defined type with hash distribution CREATE TABLE packed_numbers_hash ( id integer, packed_numbers number_pack[] ); SELECT master_create_distributed_table('packed_numbers_hash', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('packed_numbers_hash', 4, 1); master_create_worker_shards ----------------------------- (1 row) COPY (SELECT 1, ARRAY[ROW(42, 42), ROW(42, 42)]) TO '/tmp/copy_test_array_of_composite'; COPY packed_numbers_hash FROM '/tmp/copy_test_array_of_composite'; -- Verify data is actually copied SELECT * FROM packed_numbers_hash; id | packed_numbers ----+----------------------- 1 | {"(42,42)","(42,42)"} (1 row) -- Test composite type containing an element with different Oid with hash distribution CREATE TABLE super_packed_numbers_hash ( id integer, super_packed_number super_number_pack ); SELECT master_create_distributed_table('super_packed_numbers_hash', 'id', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('super_packed_numbers_hash', 4, 1); master_create_worker_shards ----------------------------- (1 row) COPY (SELECT 1, ROW(ROW(42, 42), ROW(42, 42))) TO '/tmp/copy_test_composite_of_composite'; COPY super_packed_numbers_hash FROM '/tmp/copy_test_composite_of_composite'; -- Verify data is actually copied SELECT * FROM super_packed_numbers_hash; id | super_packed_number ----+----------------------- 1 | ("(42,42)","(42,42)") (1 row) -- Test array of user-defined type with append distribution CREATE TABLE packed_numbers_append ( id integer, packed_numbers number_pack[] ); SELECT master_create_distributed_table('packed_numbers_append', 'id', 'append'); master_create_distributed_table --------------------------------- (1 row) COPY packed_numbers_append FROM '/tmp/copy_test_array_of_composite'; -- Verify data is actually copied SELECT * FROM packed_numbers_append; id | packed_numbers ----+----------------------- 1 | {"(42,42)","(42,42)"} (1 row) -- Test composite type containing an element with different Oid with append distribution CREATE TABLE super_packed_numbers_append ( id integer, super_packed_number super_number_pack ); SELECT master_create_distributed_table('super_packed_numbers_append', 'id', 'append'); master_create_distributed_table --------------------------------- (1 row) COPY super_packed_numbers_append FROM '/tmp/copy_test_composite_of_composite'; -- Verify data is actually copied SELECT * FROM super_packed_numbers_append; id | super_packed_number ----+----------------------- 1 | ("(42,42)","(42,42)") (1 row) -- Test copy on append for composite type partition column CREATE TABLE composite_partition_column_table( id integer, composite_column number_pack ); SELECT master_create_distributed_table('composite_partition_column_table', 'composite_column', 'append'); master_create_distributed_table --------------------------------- (1 row) \COPY composite_partition_column_table FROM STDIN WITH (FORMAT 'csv'); WARNING: function min(number_pack) does not exist HINT: No function matches the given name and argument types. You might need to add explicit type casts. CONTEXT: while executing command on localhost:57637 WARNING: function min(number_pack) does not exist HINT: No function matches the given name and argument types. You might need to add explicit type casts. CONTEXT: while executing command on localhost:57638 WARNING: could not get statistics for shard public.composite_partition_column_table_560164 DETAIL: Setting shard statistics to NULL ERROR: failure on connection marked as essential: localhost:57637 -- Test copy on append distributed tables do not create shards on removed workers CREATE TABLE numbers_append (a int, b int); SELECT master_create_distributed_table('numbers_append', 'a', 'append'); master_create_distributed_table --------------------------------- (1 row) -- no shards is created yet SELECT shardid, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_append'::regclass order by placementid; shardid | nodename | nodeport ---------+----------+---------- (0 rows) COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); -- verify there are shards at both workers SELECT shardid, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_append'::regclass order by placementid; shardid | nodename | nodeport ---------+-----------+---------- 560165 | localhost | 57637 560165 | localhost | 57638 560166 | localhost | 57638 560166 | localhost | 57637 (4 rows) -- disable the first node SELECT master_disable_node('localhost', :worker_1_port); NOTICE: Node localhost:57637 has active shard placements. Some queries may fail after this operation. Use SELECT master_activate_node('localhost', 57637) to activate this node back. master_disable_node --------------------- (1 row) -- set replication factor to 1 so that copy will -- succeed without replication count error SET citus.shard_replication_factor TO 1; -- add two new shards and verify they are created at the other node COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); SELECT shardid, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_append'::regclass order by placementid; shardid | nodename | nodeport ---------+-----------+---------- 560165 | localhost | 57637 560165 | localhost | 57638 560166 | localhost | 57638 560166 | localhost | 57637 560167 | localhost | 57638 560168 | localhost | 57638 (6 rows) -- add the node back SELECT 1 FROM master_activate_node('localhost', :worker_1_port); NOTICE: Replicating reference table "nation" to the node localhost:57637 NOTICE: Replicating reference table "supplier" to the node localhost:57637 ?column? ---------- 1 (1 row) RESET citus.shard_replication_factor; -- add two new shards and verify they are created at both workers COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); COPY numbers_append FROM STDIN WITH (FORMAT 'csv'); SELECT shardid, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_append'::regclass order by placementid; shardid | nodename | nodeport ---------+-----------+---------- 560165 | localhost | 57637 560165 | localhost | 57638 560166 | localhost | 57638 560166 | localhost | 57637 560167 | localhost | 57638 560168 | localhost | 57638 560169 | localhost | 57637 560169 | localhost | 57638 560170 | localhost | 57638 560170 | localhost | 57637 (10 rows) DROP TABLE numbers_append; -- Test copy failures against connection failures -- create and switch to test user CREATE USER test_user; NOTICE: not propagating CREATE ROLE/USER commands to worker nodes HINT: Connect to worker nodes directly to manually create all necessary users and roles. SELECT * FROM run_command_on_workers('CREATE USER test_user'); nodename | nodeport | success | result -----------+----------+---------+------------- localhost | 57637 | t | CREATE ROLE localhost | 57638 | t | CREATE ROLE (2 rows) \c - test_user SET citus.shard_count to 4; CREATE TABLE numbers_hash (a int, b int); SELECT create_distributed_table('numbers_hash', 'a'); create_distributed_table -------------------------- (1 row) COPY numbers_hash FROM STDIN WITH (FORMAT 'csv'); -- verify each placement is active SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_hash'::regclass order by placementid; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 560171 | 1 | localhost | 57637 560171 | 1 | localhost | 57638 560172 | 1 | localhost | 57638 560172 | 1 | localhost | 57637 560173 | 1 | localhost | 57637 560173 | 1 | localhost | 57638 560174 | 1 | localhost | 57638 560174 | 1 | localhost | 57637 (8 rows) -- create a reference table CREATE TABLE numbers_reference(a int, b int); SELECT create_reference_table('numbers_reference'); create_reference_table ------------------------ (1 row) COPY numbers_reference FROM STDIN WITH (FORMAT 'csv'); -- create another hash distributed table CREATE TABLE numbers_hash_other(a int, b int); SELECT create_distributed_table('numbers_hash_other', 'a'); create_distributed_table -------------------------- (1 row) SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_hash_other'::regclass order by placementid; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 560176 | 1 | localhost | 57638 560176 | 1 | localhost | 57637 560177 | 1 | localhost | 57637 560177 | 1 | localhost | 57638 560178 | 1 | localhost | 57638 560178 | 1 | localhost | 57637 560179 | 1 | localhost | 57637 560179 | 1 | localhost | 57638 (8 rows) -- manually corrupt pg_dist_shard such that both copies of one shard is placed in -- worker_1. This is to test the behavior when no replica of a shard is accessible. -- Whole copy operation is supposed to fail and rollback. \c - :default_user UPDATE pg_dist_shard_placement SET nodeport = :worker_1_port WHERE shardid = 560176; -- disable test_user on the first worker \c - :default_user - :worker_1_port ALTER USER test_user WITH nologin; \c - test_user - :master_port -- reissue copy COPY numbers_hash FROM STDIN WITH (FORMAT 'csv'); WARNING: connection error: localhost:57637 DETAIL: FATAL: role "test_user" is not permitted to log in CONTEXT: COPY numbers_hash, line 1: "1,1" WARNING: connection error: localhost:57637 DETAIL: FATAL: role "test_user" is not permitted to log in CONTEXT: COPY numbers_hash, line 2: "2,2" WARNING: connection error: localhost:57637 DETAIL: FATAL: role "test_user" is not permitted to log in CONTEXT: COPY numbers_hash, line 3: "3,3" WARNING: connection error: localhost:57637 DETAIL: FATAL: role "test_user" is not permitted to log in CONTEXT: COPY numbers_hash, line 6: "6,6" -- verify shards in the first worker as marked invalid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_hash'::regclass order by placementid; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 560171 | 3 | localhost | 57637 560171 | 1 | localhost | 57638 560172 | 1 | localhost | 57638 560172 | 3 | localhost | 57637 560173 | 3 | localhost | 57637 560173 | 1 | localhost | 57638 560174 | 1 | localhost | 57638 560174 | 3 | localhost | 57637 (8 rows) -- try to insert into a reference table copy should fail COPY numbers_reference FROM STDIN WITH (FORMAT 'csv'); ERROR: connection error: localhost:57637 DETAIL: FATAL: role "test_user" is not permitted to log in CONTEXT: COPY numbers_reference, line 1: "3,1" -- verify shards for reference table are still valid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_reference'::regclass order by placementid; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 560175 | 1 | localhost | 57637 560175 | 1 | localhost | 57638 (2 rows) -- try to insert into numbers_hash_other. copy should fail and rollback -- since it can not insert into either copies of a shard. shards are expected to -- stay valid since the operation is rolled back. COPY numbers_hash_other FROM STDIN WITH (FORMAT 'csv'); WARNING: connection error: localhost:57637 DETAIL: FATAL: role "test_user" is not permitted to log in CONTEXT: COPY numbers_hash_other, line 1: "1,1" WARNING: connection error: localhost:57637 DETAIL: FATAL: role "test_user" is not permitted to log in CONTEXT: COPY numbers_hash_other, line 1: "1,1" ERROR: could not connect to any active placements CONTEXT: COPY numbers_hash_other, line 1: "1,1" -- verify shards for numbers_hash_other are still valid -- since copy has failed altogether SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_hash_other'::regclass order by placementid; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 560176 | 1 | localhost | 57637 560176 | 1 | localhost | 57637 560177 | 1 | localhost | 57637 560177 | 1 | localhost | 57638 560178 | 1 | localhost | 57638 560178 | 1 | localhost | 57637 560179 | 1 | localhost | 57637 560179 | 1 | localhost | 57638 (8 rows) -- re-enable test_user on the first worker \c - :default_user - :worker_1_port ALTER USER test_user WITH login; -- there is a dangling shard in worker_2, drop it \c - test_user - :worker_2_port DROP TABLE numbers_hash_other_560176; \c - test_user - :master_port DROP TABLE numbers_hash; DROP TABLE numbers_hash_other; DROP TABLE numbers_reference; \c - :default_user -- test copy failure inside the node -- it will be done by changing definition of a shard table SET citus.shard_count to 4; CREATE TABLE numbers_hash(a int, b int); SELECT create_distributed_table('numbers_hash', 'a'); create_distributed_table -------------------------- (1 row) \c - - - :worker_1_port ALTER TABLE numbers_hash_560180 DROP COLUMN b; \c - - - :master_port -- operation will fail to modify a shard and roll back COPY numbers_hash FROM STDIN WITH (FORMAT 'csv'); ERROR: column "b" of relation "numbers_hash_560180" does not exist CONTEXT: while executing command on localhost:57637 COPY numbers_hash, line 1: "1,1" -- verify no row is inserted SELECT count(a) FROM numbers_hash; count ------- 0 (1 row) -- verify shard is still marked as valid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) WHERE logicalrelid = 'numbers_hash'::regclass order by placementid; shardid | shardstate | nodename | nodeport ---------+------------+-----------+---------- 560180 | 1 | localhost | 57637 560180 | 1 | localhost | 57638 560181 | 1 | localhost | 57638 560181 | 1 | localhost | 57637 560182 | 1 | localhost | 57637 560182 | 1 | localhost | 57638 560183 | 1 | localhost | 57638 560183 | 1 | localhost | 57637 (8 rows) DROP TABLE numbers_hash; SELECT * FROM run_command_on_workers('DROP USER test_user'); nodename | nodeport | success | result -----------+----------+---------+----------- localhost | 57637 | t | DROP ROLE localhost | 57638 | t | DROP ROLE (2 rows) DROP USER test_user; -- Test copy with built-in type without binary output function CREATE TABLE test_smgr ( col1 smgr NOT NULL, col2 character varying(255) NOT NULL ); SELECT create_reference_table('test_smgr'); create_reference_table ------------------------ (1 row) \COPY test_smgr FROM STDIN WITH (format CSV) SELECT * FROM test_smgr; col1 | col2 ---------------+------- magnetic disk | test (1 row) DROP TABLE test_smgr; -- Test drop table with copy in the same transaction BEGIN; CREATE TABLE tt1(id int); SELECT create_distributed_table('tt1','id'); create_distributed_table -------------------------- (1 row) \copy tt1 from STDIN; DROP TABLE tt1; END; -- Test dropping a column in front of the partition column CREATE TABLE drop_copy_test_table (col1 int, col2 int, col3 int, col4 int); SELECT create_distributed_table('drop_copy_test_table','col3'); create_distributed_table -------------------------- (1 row) ALTER TABLE drop_copy_test_table drop column col1; NOTICE: using one-phase commit for distributed DDL commands HINT: You can enable two-phase commit for extra safety with: SET citus.multi_shard_commit_protocol TO '2pc' COPY drop_copy_test_table (col2,col3,col4) from STDIN with CSV; SELECT * FROM drop_copy_test_table WHERE col3 = 1; col2 | col3 | col4 ------+------+------ | 1 | (1 row) ALTER TABLE drop_copy_test_table drop column col4; COPY drop_copy_test_table (col2,col3) from STDIN with CSV; SELECT * FROM drop_copy_test_table WHERE col3 = 1; col2 | col3 ------+------ | 1 | 1 (2 rows) DROP TABLE drop_copy_test_table; -- There should be no "tt1" shard on the worker nodes \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'tt1%'; relname --------- (0 rows) \c - - - :master_port citus-7.0.3/src/test/regress/output/multi_create_schema.source000066400000000000000000000010051317107136600246500ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 250000; CREATE SCHEMA tpch CREATE TABLE nation ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); SELECT master_create_distributed_table('tpch.nation', 'n_nationkey', 'append'); master_create_distributed_table --------------------------------- (1 row) \copy tpch.nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' SELECT count(*) from tpch.nation; count ------- 25 (1 row) citus-7.0.3/src/test/regress/output/multi_large_shardid.source000066400000000000000000000047311317107136600246660ustar00rootroot00000000000000-- -- MULTI_LARGE_SHARDID -- -- Load data into distributed tables, and run TPC-H query #1 and #6. This test -- differs from previous tests in that it modifies the *internal* shardId -- generator, forcing the distributed database to use 64-bit shard identifiers. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 100200300400500; -- Load additional data to start using large shard identifiers. \copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' -- Query #1 from the TPC-H decision support benchmark. SELECT l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order FROM lineitem WHERE l_shipdate <= date '1998-12-01' - interval '90 days' GROUP BY l_returnflag, l_linestatus ORDER BY l_returnflag, l_linestatus; l_returnflag | l_linestatus | sum_qty | sum_base_price | sum_disc_price | sum_charge | avg_qty | avg_price | avg_disc | count_order --------------+--------------+-----------+----------------+----------------+------------------+---------------------+--------------------+------------------------+------------- A | F | 150930.00 | 227239747.26 | 215682574.1456 | 224342306.491846 | 25.6334918478260870 | 38593.707075407609 | 0.05055027173913043478 | 5888 N | F | 4044.00 | 6205102.90 | 5905081.4236 | 6145285.541304 | 26.6052631578947368 | 40823.045394736842 | 0.05263157894736842105 | 152 N | O | 299556.00 | 449413896.32 | 427269715.3708 | 444268143.859602 | 25.4594594594594595 | 38195.979629440762 | 0.04939486656467788543 | 11766 R | F | 146312.00 | 217875959.46 | 207033247.3396 | 215487067.568656 | 25.2175112030334367 | 37551.871675284385 | 0.04983798690106859704 | 5802 (4 rows) -- Query #6 from the TPC-H decision support benchmark. SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem WHERE l_shipdate >= date '1994-01-01' and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; revenue ------------- 486555.5716 (1 row) citus-7.0.3/src/test/regress/output/multi_load_data.source000066400000000000000000000026121317107136600240020ustar00rootroot00000000000000-- -- MULTI_LOAD_DATA -- -- Tests for loading data in a distributed cluster. Please note that the number -- of shards uploaded depends on two config values: citus.shard_replication_factor and -- citus.shard_max_size. These values are set in pg_regress_multi.pl. Shard placement -- policy is left to the default value (round-robin) to test the common install case. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 290000; \copy lineitem FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \copy lineitem FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' \copy orders FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' \copy orders FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' \copy customer FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' \copy nation FROM '@abs_srcdir@/data/nation.data' with delimiter '|' \copy part FROM '@abs_srcdir@/data/part.data' with delimiter '|' \copy supplier FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' \copy supplier_single_shard FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' \copy lineitem_hash_part FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \copy lineitem_hash_part FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' \copy orders_hash_part FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' \copy orders_hash_part FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' citus-7.0.3/src/test/regress/output/multi_load_large_records.source000066400000000000000000000017111317107136600257030ustar00rootroot00000000000000-- -- MULTI_STAGE_LARGE_RECORDS -- -- Tests for loading data with large records (i.e. greater than the read buffer -- size, which is 32kB) in a distributed cluster. These tests make sure that we -- are creating shards of correct size even when records are large. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 300000; SET citus.shard_max_size TO "256kB"; CREATE TABLE large_records_table (data_id integer, data text); SELECT master_create_distributed_table('large_records_table', 'data_id', 'append'); master_create_distributed_table --------------------------------- (1 row) \copy large_records_table FROM '@abs_srcdir@/data/large_records.data' with delimiter '|' SELECT shardminvalue, shardmaxvalue FROM pg_dist_shard, pg_class WHERE pg_class.oid=logicalrelid AND relname='large_records_table' ORDER BY shardid; shardminvalue | shardmaxvalue ---------------+--------------- 1 | 1 2 | 2 (2 rows) RESET citus.shard_max_size; citus-7.0.3/src/test/regress/output/multi_load_more_data.source000066400000000000000000000010521317107136600250210ustar00rootroot00000000000000-- -- MULTI_STAGE_MORE_DATA -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 280000; -- We load more data to customer and part tables to test distributed joins. The -- loading causes the planner to consider customer and part tables as large, and -- evaluate plans where some of the underlying tables need to be repartitioned. \copy customer FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' \copy customer FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' \copy part FROM '@abs_srcdir@/data/part.more.data' with delimiter '|' citus-7.0.3/src/test/regress/output/multi_master_delete_protocol.source000066400000000000000000000073011317107136600266300ustar00rootroot00000000000000-- -- MULTI_MASTER_DELETE_PROTOCOL -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 320000; -- Create a new range partitioned customer_delete_protocol table and load data into it. CREATE TABLE customer_delete_protocol ( c_custkey integer not null, c_name varchar(25) not null, c_address varchar(40) not null, c_nationkey integer not null, c_phone char(15) not null, c_acctbal decimal(15,2) not null, c_mktsegment char(10) not null, c_comment varchar(117) not null); SELECT master_create_distributed_table('customer_delete_protocol', 'c_custkey', 'append'); master_create_distributed_table --------------------------------- (1 row) \copy customer_delete_protocol FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' \copy customer_delete_protocol FROM '@abs_srcdir@/data/customer.2.data' with delimiter '|' \copy customer_delete_protocol FROM '@abs_srcdir@/data/customer.3.data' with delimiter '|' -- Testing master_apply_delete_command -- Check that we don't support conditions on columns other than partition key. SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol WHERE c_acctbal > 0.0'); ERROR: cannot delete from distributed table DETAIL: Where clause includes a column other than partition column -- Check that free-form deletes are not supported. DELETE FROM customer_delete_protocol WHERE c_custkey > 100; ERROR: cannot run DELETE command which targets multiple shards HINT: Consider using an equality filter on partition column "c_custkey" to target a single shard. If you'd like to run a multi-shard operation, use master_modify_multiple_shards(). -- Check that we delete a shard if and only if all rows in the shard satisfy the condition. SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol WHERE c_custkey > 6500'); master_apply_delete_command ----------------------------- 0 (1 row) SELECT count(*) from customer_delete_protocol; count ------- 3000 (1 row) -- Delete one shard that satisfies the given conditions. SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol WHERE c_custkey > 1000 AND c_custkey < 3000'); master_apply_delete_command ----------------------------- 1 (1 row) SELECT count(*) from customer_delete_protocol; count ------- 2000 (1 row) -- Delete all shards if no condition is provided. SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol'); master_apply_delete_command ----------------------------- 2 (1 row) SELECT count(*) FROM customer_delete_protocol; count ------- 0 (1 row) -- Verify that empty shards are deleted if no condition is provided SELECT 1 AS one FROM master_create_empty_shard('customer_delete_protocol'); one ----- 1 (1 row) SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol WHERE c_custkey > 1000'); master_apply_delete_command ----------------------------- 0 (1 row) SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol'); master_apply_delete_command ----------------------------- 1 (1 row) -- Verify that master_apply_delete_command can be called in a transaction block SELECT 1 AS one FROM master_create_empty_shard('customer_delete_protocol'); one ----- 1 (1 row) BEGIN; SELECT master_apply_delete_command('DELETE FROM customer_delete_protocol'); master_apply_delete_command ----------------------------- 1 (1 row) COMMIT; citus-7.0.3/src/test/regress/output/multi_mx_copy_data.source000066400000000000000000000024301317107136600245370ustar00rootroot00000000000000-- -- MULTI_MX_COPY_DATA -- \COPY nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|'; SET search_path TO citus_mx_test_schema; \COPY nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|'; \COPY citus_mx_test_schema_join_1.nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|'; \COPY citus_mx_test_schema_join_1.nation_hash_2 FROM '@abs_srcdir@/data/nation.data' with delimiter '|'; \COPY citus_mx_test_schema_join_2.nation_hash FROM '@abs_srcdir@/data/nation.data' with delimiter '|'; -- now try loading data from worker node \c - - - :worker_1_port SET search_path TO public; \COPY lineitem_mx FROM '@abs_srcdir@/data/lineitem.1.data' with delimiter '|' \COPY lineitem_mx FROM '@abs_srcdir@/data/lineitem.2.data' with delimiter '|' \COPY orders_mx FROM '@abs_srcdir@/data/orders.1.data' with delimiter '|' \COPY orders_mx FROM '@abs_srcdir@/data/orders.2.data' with delimiter '|' -- and use second worker as well \c - - - :worker_2_port SET search_path TO public; \COPY customer_mx FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' \COPY nation_mx FROM '@abs_srcdir@/data/nation.data' with delimiter '|' \COPY part_mx FROM '@abs_srcdir@/data/part.data' with delimiter '|' \COPY supplier_mx FROM '@abs_srcdir@/data/supplier.data' with delimiter '|' citus-7.0.3/src/test/regress/output/multi_outer_join.source000066400000000000000000000652301317107136600242540ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 310000; SET citus.large_table_shard_count TO 2; SET citus.log_multi_join_order to true; SET client_min_messages TO LOG; CREATE TABLE multi_outer_join_left ( l_custkey integer not null, l_name varchar(25) not null, l_address varchar(40) not null, l_nationkey integer not null, l_phone char(15) not null, l_acctbal decimal(15,2) not null, l_mktsegment char(10) not null, l_comment varchar(117) not null ); SELECT master_create_distributed_table('multi_outer_join_left', 'l_custkey', 'append'); master_create_distributed_table --------------------------------- (1 row) CREATE TABLE multi_outer_join_right ( r_custkey integer not null, r_name varchar(25) not null, r_address varchar(40) not null, r_nationkey integer not null, r_phone char(15) not null, r_acctbal decimal(15,2) not null, r_mktsegment char(10) not null, r_comment varchar(117) not null ); SELECT master_create_distributed_table('multi_outer_join_right', 'r_custkey', 'append'); master_create_distributed_table --------------------------------- (1 row) CREATE TABLE multi_outer_join_third ( t_custkey integer not null, t_name varchar(25) not null, t_address varchar(40) not null, t_nationkey integer not null, t_phone char(15) not null, t_acctbal decimal(15,2) not null, t_mktsegment char(10) not null, t_comment varchar(117) not null ); SELECT master_create_distributed_table('multi_outer_join_third', 't_custkey', 'append'); master_create_distributed_table --------------------------------- (1 row) -- Make sure we do not crash if both tables have no shards SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_third b ON (l_custkey = t_custkey); ERROR: cannot perform distributed planning on this query DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning -- Left table is a large table \copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|' \copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' -- Right table is a small table \copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' -- Make sure we do not crash if one table has no shards SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_third b ON (l_custkey = t_custkey); ERROR: cannot perform distributed planning on this query DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning SELECT min(t_custkey), max(t_custkey) FROM multi_outer_join_third a LEFT JOIN multi_outer_join_right b ON (r_custkey = t_custkey); LOG: join order: [ "multi_outer_join_third" ][ broadcast join "multi_outer_join_right" ] min | max -----+----- | (1 row) -- Third table is a single shard table with all data \copy multi_outer_join_third FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|' -- Regular outer join should return results for all rows SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey); LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_right" ] min | max -----+----- 1 | 20 (1 row) -- Since this is a broadcast join, we should be able to join on any key SELECT count(*) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_nationkey = r_nationkey); LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_right" ] count ------- 28 (1 row) -- Anti-join should return customers for which there is no row in the right table SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey) WHERE r_custkey IS NULL; LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_right" ] min | max -----+----- 16 | 20 (1 row) -- Partial anti-join with specific value SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey) WHERE r_custkey IS NULL OR r_custkey = 5; LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_right" ] min | max -----+----- 5 | 20 (1 row) -- This query is an INNER JOIN in disguise since there cannot be NULL results -- Added extra filter to make query not router plannable SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey) WHERE r_custkey = 5 or r_custkey > 15; LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_right" ] min | max -----+----- 5 | 5 (1 row) -- Apply a filter before the join SELECT count(l_custkey), count(r_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey AND r_custkey = 5); LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_right" ] count | count -------+------- 20 | 1 (1 row) -- Apply a filter before the join (no matches right) SELECT count(l_custkey), count(r_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey AND r_custkey = -1 /* nonexistant */); LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_right" ] count | count -------+------- 20 | 0 (1 row) -- Apply a filter before the join (no matches left) SELECT count(l_custkey), count(r_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey AND l_custkey = -1 /* nonexistant */); LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_right" ] count | count -------+------- 20 | 0 (1 row) -- Right join should be disallowed in this case SELECT min(r_custkey), max(r_custkey) FROM multi_outer_join_left a RIGHT JOIN multi_outer_join_right b ON (l_custkey = r_custkey); ERROR: cannot perform distributed planning on this query DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning -- Reverse right join should be same as left join SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_right a RIGHT JOIN multi_outer_join_left b ON (l_custkey = r_custkey); LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_right" ] min | max -----+----- 1 | 20 (1 row) -- Turn the right table into a large table \copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' -- Shards do not have 1-1 matching. We should error here. SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey); ERROR: cannot perform distributed planning on this query DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning -- empty tables SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_left'); master_apply_delete_command ----------------------------- 2 (1 row) SELECT * FROM master_apply_delete_command('DELETE FROM multi_outer_join_right'); master_apply_delete_command ----------------------------- 2 (1 row) -- reload shards with 1-1 matching \copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' \copy multi_outer_join_left FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' \copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' \copy multi_outer_join_right FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' -- multi_outer_join_third is a single shard table -- Regular left join should work as expected SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey); LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ] min | max -----+----- 1 | 30 (1 row) -- Since we cannot broadcast or re-partition, joining on a different key should error out SELECT count(*) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_nationkey = r_nationkey); ERROR: cannot run outer join query if join is not on the partition column DETAIL: Outer joins requiring repartitioning are not supported. -- Anti-join should return customers for which there is no row in the right table SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey) WHERE r_custkey IS NULL; LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ] min | max -----+----- 1 | 10 (1 row) -- Partial anti-join with specific value (5, 11-15) SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey) WHERE r_custkey IS NULL OR r_custkey = 15; LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ] min | max -----+----- 1 | 15 (1 row) -- This query is an INNER JOIN in disguise since there cannot be NULL results (21) -- Added extra filter to make query not router plannable SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey) WHERE r_custkey = 21 or r_custkey < 10; LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ] min | max -----+----- 21 | 21 (1 row) -- Apply a filter before the join SELECT count(l_custkey), count(r_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey AND r_custkey = 21); LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ] count | count -------+------- 25 | 1 (1 row) -- Right join should be allowed in this case SELECT min(r_custkey), max(r_custkey) FROM multi_outer_join_left a RIGHT JOIN multi_outer_join_right b ON (l_custkey = r_custkey); LOG: join order: [ "multi_outer_join_right" ][ local partition join "multi_outer_join_left" ] min | max -----+----- 11 | 30 (1 row) -- Reverse right join should be same as left join SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_right a RIGHT JOIN multi_outer_join_left b ON (l_custkey = r_custkey); LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ] min | max -----+----- 1 | 30 (1 row) -- complex query tree should error out SELECT * FROM multi_outer_join_left l1 LEFT JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_right r2 ON (l1.l_custkey = r2.r_custkey) RIGHT JOIN multi_outer_join_left l2 ON (r2.r_custkey = l2.l_custkey); ERROR: could not run distributed query with complex join orders -- add an anti-join, this should also error out SELECT * FROM multi_outer_join_left l1 LEFT JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_right r2 ON (l1.l_custkey = r2.r_custkey) RIGHT JOIN multi_outer_join_left l2 ON (r2.r_custkey = l2.l_custkey) WHERE r1.r_custkey is NULL; ERROR: could not run distributed query with complex join orders -- Three way join 2-2-1 (local + broadcast join) should work SELECT l_custkey, r_custkey, t_custkey FROM multi_outer_join_left l1 LEFT JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_third t1 ON (r1.r_custkey = t1.t_custkey); LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ][ broadcast join "multi_outer_join_third" ] l_custkey | r_custkey | t_custkey -----------+-----------+----------- 1 | | 2 | | 3 | | 4 | | 5 | | 6 | | 7 | | 8 | | 9 | | 10 | | 11 | 11 | 11 12 | 12 | 12 13 | 13 | 13 14 | 14 | 14 15 | 15 | 15 21 | 21 | 21 22 | 22 | 22 23 | 23 | 23 24 | 24 | 24 25 | 25 | 25 26 | 26 | 26 27 | 27 | 27 28 | 28 | 28 29 | 29 | 29 30 | 30 | 30 (25 rows) -- Right join with single shard right most table should error out SELECT l_custkey, r_custkey, t_custkey FROM multi_outer_join_left l1 LEFT JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey) RIGHT JOIN multi_outer_join_third t1 ON (r1.r_custkey = t1.t_custkey); ERROR: could not run distributed query with complex join orders -- Right join with single shard left most table should work SELECT t_custkey, r_custkey, l_custkey FROM multi_outer_join_third t1 RIGHT JOIN multi_outer_join_right r1 ON (t1.t_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_left l1 ON (r1.r_custkey = l1.l_custkey); LOG: join order: [ "multi_outer_join_right" ][ broadcast join "multi_outer_join_third" ][ local partition join "multi_outer_join_left" ] t_custkey | r_custkey | l_custkey -----------+-----------+----------- 11 | 11 | 11 12 | 12 | 12 13 | 13 | 13 14 | 14 | 14 15 | 15 | 15 16 | 16 | 17 | 17 | 18 | 18 | 19 | 19 | 20 | 20 | 21 | 21 | 21 22 | 22 | 22 23 | 23 | 23 24 | 24 | 24 25 | 25 | 25 26 | 26 | 26 27 | 27 | 27 28 | 28 | 28 29 | 29 | 29 30 | 30 | 30 (20 rows) -- Make it anti-join, should display values with l_custkey is null SELECT t_custkey, r_custkey, l_custkey FROM multi_outer_join_third t1 RIGHT JOIN multi_outer_join_right r1 ON (t1.t_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_left l1 ON (r1.r_custkey = l1.l_custkey) WHERE l_custkey is NULL; LOG: join order: [ "multi_outer_join_right" ][ broadcast join "multi_outer_join_third" ][ local partition join "multi_outer_join_left" ] t_custkey | r_custkey | l_custkey -----------+-----------+----------- 16 | 16 | 17 | 17 | 18 | 18 | 19 | 19 | 20 | 20 | (5 rows) -- Cascading right join with single shard left most table should error out SELECT t_custkey, r_custkey, l_custkey FROM multi_outer_join_third t1 RIGHT JOIN multi_outer_join_right r1 ON (t1.t_custkey = r1.r_custkey) RIGHT JOIN multi_outer_join_left l1 ON (r1.r_custkey = l1.l_custkey); ERROR: could not run distributed query with complex join orders -- full outer join should work with 1-1 matched shards SELECT l_custkey, r_custkey FROM multi_outer_join_left l1 FULL JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey); LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ] l_custkey | r_custkey -----------+----------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 11 12 | 12 13 | 13 14 | 14 15 | 15 | 20 | 17 | 18 | 19 | 16 21 | 21 22 | 22 23 | 23 24 | 24 25 | 25 26 | 26 27 | 27 28 | 28 29 | 29 30 | 30 (30 rows) -- full outer join + anti (right) should work with 1-1 matched shards SELECT l_custkey, r_custkey FROM multi_outer_join_left l1 FULL JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey) WHERE r_custkey is NULL; LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ] l_custkey | r_custkey -----------+----------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | (10 rows) -- full outer join + anti (left) should work with 1-1 matched shards SELECT l_custkey, r_custkey FROM multi_outer_join_left l1 FULL JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey) WHERE l_custkey is NULL; LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ] l_custkey | r_custkey -----------+----------- | 20 | 17 | 18 | 19 | 16 (5 rows) -- full outer join + anti (both) should work with 1-1 matched shards SELECT l_custkey, r_custkey FROM multi_outer_join_left l1 FULL JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey) WHERE l_custkey is NULL or r_custkey is NULL; LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ] l_custkey | r_custkey -----------+----------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | | 20 | 17 | 18 | 19 | 16 (15 rows) -- full outer join should error out for mismatched shards SELECT l_custkey, t_custkey FROM multi_outer_join_left l1 FULL JOIN multi_outer_join_third t1 ON (l1.l_custkey = t1.t_custkey); ERROR: cannot perform distributed planning on this query DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning -- inner join + single shard left join should work SELECT l_custkey, r_custkey, t_custkey FROM multi_outer_join_left l1 INNER JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_third t1 ON (r1.r_custkey = t1.t_custkey); LOG: join order: [ "multi_outer_join_left" ][ local partition join "multi_outer_join_right" ][ broadcast join "multi_outer_join_third" ] l_custkey | r_custkey | t_custkey -----------+-----------+----------- 11 | 11 | 11 12 | 12 | 12 13 | 13 | 13 14 | 14 | 14 15 | 15 | 15 21 | 21 | 21 22 | 22 | 22 23 | 23 | 23 24 | 24 | 24 25 | 25 | 25 26 | 26 | 26 27 | 27 | 27 28 | 28 | 28 29 | 29 | 29 30 | 30 | 30 (15 rows) -- inner (broadcast) join + 2 shards left (local) join should work SELECT l_custkey, t_custkey, r_custkey FROM multi_outer_join_left l1 INNER JOIN multi_outer_join_third t1 ON (l1.l_custkey = t1.t_custkey) LEFT JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey); LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_third" ][ local partition join "multi_outer_join_right" ] l_custkey | t_custkey | r_custkey -----------+-----------+----------- 1 | 1 | 2 | 2 | 3 | 3 | 4 | 4 | 5 | 5 | 6 | 6 | 7 | 7 | 8 | 8 | 9 | 9 | 10 | 10 | 11 | 11 | 11 12 | 12 | 12 13 | 13 | 13 14 | 14 | 14 15 | 15 | 15 21 | 21 | 21 22 | 22 | 22 23 | 23 | 23 24 | 24 | 24 25 | 25 | 25 26 | 26 | 26 27 | 27 | 27 28 | 28 | 28 29 | 29 | 29 30 | 30 | 30 (25 rows) -- inner (local) join + 2 shards left (dual partition) join should error out SELECT t_custkey, l_custkey, r_custkey FROM multi_outer_join_third t1 INNER JOIN multi_outer_join_left l1 ON (l1.l_custkey = t1.t_custkey) LEFT JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey); ERROR: cannot perform distributed planning on this query DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning -- inner (local) join + 2 shards left (dual partition) join should error out SELECT l_custkey, t_custkey, r_custkey FROM multi_outer_join_left l1 INNER JOIN multi_outer_join_third t1 ON (l1.l_custkey = t1.t_custkey) LEFT JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey); LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_third" ][ local partition join "multi_outer_join_right" ] l_custkey | t_custkey | r_custkey -----------+-----------+----------- 1 | 1 | 2 | 2 | 3 | 3 | 4 | 4 | 5 | 5 | 6 | 6 | 7 | 7 | 8 | 8 | 9 | 9 | 10 | 10 | 11 | 11 | 11 12 | 12 | 12 13 | 13 | 13 14 | 14 | 14 15 | 15 | 15 21 | 21 | 21 22 | 22 | 22 23 | 23 | 23 24 | 24 | 24 25 | 25 | 25 26 | 26 | 26 27 | 27 | 27 28 | 28 | 28 29 | 29 | 29 30 | 30 | 30 (25 rows) -- inner (broadcast) join + 2 shards left (local) + anti join should work SELECT l_custkey, t_custkey, r_custkey FROM multi_outer_join_left l1 INNER JOIN multi_outer_join_third t1 ON (l1.l_custkey = t1.t_custkey) LEFT JOIN multi_outer_join_right r1 ON (l1.l_custkey = r1.r_custkey) WHERE r_custkey is NULL; LOG: join order: [ "multi_outer_join_left" ][ broadcast join "multi_outer_join_third" ][ local partition join "multi_outer_join_right" ] l_custkey | t_custkey | r_custkey -----------+-----------+----------- 1 | 1 | 2 | 2 | 3 | 3 | 4 | 4 | 5 | 5 | 6 | 6 | 7 | 7 | 8 | 8 | 9 | 9 | 10 | 10 | (10 rows) -- Test joinExpr aliases by performing an outer-join. SELECT t_custkey FROM (multi_outer_join_right r1 LEFT OUTER JOIN multi_outer_join_left l1 ON (l1.l_custkey = r1.r_custkey)) AS test(c_custkey, c_nationkey) INNER JOIN multi_outer_join_third t1 ON (test.c_custkey = t1.t_custkey); LOG: join order: [ "multi_outer_join_right" ][ local partition join "multi_outer_join_left" ][ broadcast join "multi_outer_join_third" ] t_custkey ----------- 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 (20 rows) -- flattened out subqueries with outer joins are not supported SELECT l1.l_custkey, count(*) as cnt FROM ( SELECT l_custkey, l_nationkey FROM multi_outer_join_left WHERE l_comment like '%a%' ) l1 LEFT JOIN ( SELECT r_custkey, r_name FROM multi_outer_join_right WHERE r_comment like '%b%' ) l2 ON l1.l_custkey = l2.r_custkey GROUP BY l1.l_custkey ORDER BY cnt DESC, l1.l_custkey DESC LIMIT 20; ERROR: cannot perform distributed planning on this query DETAIL: Subqueries in outer joins are not supported -- Add a shard to the left table that overlaps with multiple shards in the right \copy multi_outer_join_left FROM '@abs_srcdir@/data/customer.1.data' with delimiter '|' -- All outer joins should error out SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a LEFT JOIN multi_outer_join_right b ON (l_custkey = r_custkey); ERROR: cannot perform distributed planning on this query DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a RIGHT JOIN multi_outer_join_right b ON (l_custkey = r_custkey); ERROR: cannot perform distributed planning on this query DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left a FULL JOIN multi_outer_join_right b ON (l_custkey = r_custkey); ERROR: cannot perform distributed planning on this query DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning SELECT t_custkey FROM (multi_outer_join_right r1 LEFT OUTER JOIN multi_outer_join_left l1 ON (l1.l_custkey = r1.r_custkey)) AS test(c_custkey, c_nationkey) INNER JOIN multi_outer_join_third t1 ON (test.c_custkey = t1.t_custkey); ERROR: cannot perform distributed planning on this query DETAIL: Shards of relations in outer join queries must have 1-to-1 shard partitioning -- simple test to ensure anti-joins work with hash-partitioned tables CREATE TABLE left_values(val int); SELECT master_create_distributed_table('left_values', 'val', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('left_values', 16, 1); master_create_worker_shards ----------------------------- (1 row) \copy left_values from stdin CREATE TABLE right_values(val int); SELECT master_create_distributed_table('right_values', 'val', 'hash'); master_create_distributed_table --------------------------------- (1 row) SELECT master_create_worker_shards('right_values', 16, 1); master_create_worker_shards ----------------------------- (1 row) \copy right_values from stdin SELECT * FROM left_values AS l LEFT JOIN right_values AS r ON l.val = r.val WHERE r.val IS NULL; LOG: join order: [ "left_values" ][ local partition join "right_values" ] val | val -----+----- 1 | 5 | (2 rows) citus-7.0.3/src/test/regress/output/multi_outer_join_reference.source000066400000000000000000000676471317107136600263100ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1260000; SET citus.log_multi_join_order to true; SET client_min_messages TO LOG; SET citus.shard_count TO 4; CREATE TABLE multi_outer_join_left_hash ( l_custkey integer not null, l_name varchar(25) not null, l_address varchar(40) not null, l_nationkey integer not null, l_phone char(15) not null, l_acctbal decimal(15,2) not null, l_mktsegment char(10) not null, l_comment varchar(117) not null ); SELECT create_distributed_table('multi_outer_join_left_hash', 'l_custkey'); create_distributed_table -------------------------- (1 row) CREATE TABLE multi_outer_join_right_reference ( r_custkey integer not null, r_name varchar(25) not null, r_address varchar(40) not null, r_nationkey integer not null, r_phone char(15) not null, r_acctbal decimal(15,2) not null, r_mktsegment char(10) not null, r_comment varchar(117) not null ); SELECT create_reference_table('multi_outer_join_right_reference'); create_reference_table ------------------------ (1 row) CREATE TABLE multi_outer_join_third_reference ( t_custkey integer not null, t_name varchar(25) not null, t_address varchar(40) not null, t_nationkey integer not null, t_phone char(15) not null, t_acctbal decimal(15,2) not null, t_mktsegment char(10) not null, t_comment varchar(117) not null ); SELECT create_reference_table('multi_outer_join_third_reference'); create_reference_table ------------------------ (1 row) CREATE TABLE multi_outer_join_right_hash ( r_custkey integer not null, r_name varchar(25) not null, r_address varchar(40) not null, r_nationkey integer not null, r_phone char(15) not null, r_acctbal decimal(15,2) not null, r_mktsegment char(10) not null, r_comment varchar(117) not null ); SELECT create_distributed_table('multi_outer_join_right_hash', 'r_custkey'); create_distributed_table -------------------------- (1 row) -- Make sure we do not crash if both tables are emmpty SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_third_reference b ON (l_custkey = t_custkey); LOG: join order: [ "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_third_reference" ] min | max -----+----- | (1 row) -- Left table is a large table \copy multi_outer_join_left_hash FROM '@abs_srcdir@/data/customer-1-10.data' with delimiter '|' \copy multi_outer_join_left_hash FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' -- Right table is a small table \copy multi_outer_join_right_reference FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' \copy multi_outer_join_right_hash FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' -- Make sure we do not crash if one table has data SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_third_reference b ON (l_custkey = t_custkey); LOG: join order: [ "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_third_reference" ] min | max -----+----- 1 | 20 (1 row) SELECT min(t_custkey), max(t_custkey) FROM multi_outer_join_third_reference a LEFT JOIN multi_outer_join_right_reference b ON (r_custkey = t_custkey); min | max -----+----- | (1 row) -- Third table is a single shard table with all data \copy multi_outer_join_third_reference FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|' \copy multi_outer_join_right_hash FROM '@abs_srcdir@/data/customer-1-30.data' with delimiter '|' -- Regular outer join should return results for all rows SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey); LOG: join order: [ "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_right_reference" ] min | max -----+----- 1 | 20 (1 row) -- Since this is a broadcast join, we should be able to join on any key SELECT count(*) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_nationkey = r_nationkey); LOG: join order: [ "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_right_reference" ] count ------- 28 (1 row) -- Anti-join should return customers for which there is no row in the right table SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey) WHERE r_custkey IS NULL; LOG: join order: [ "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_right_reference" ] min | max -----+----- 16 | 20 (1 row) -- Partial anti-join with specific value SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey) WHERE r_custkey IS NULL OR r_custkey = 5; LOG: join order: [ "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_right_reference" ] min | max -----+----- 5 | 20 (1 row) -- This query is an INNER JOIN in disguise since there cannot be NULL results -- Added extra filter to make query not router plannable SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey) WHERE r_custkey = 5 or r_custkey > 15; LOG: join order: [ "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_right_reference" ] min | max -----+----- 5 | 5 (1 row) -- Apply a filter before the join SELECT count(l_custkey), count(r_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey AND r_custkey = 5); LOG: join order: [ "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_right_reference" ] count | count -------+------- 20 | 1 (1 row) -- Apply a filter before the join (no matches right) SELECT count(l_custkey), count(r_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey AND r_custkey = -1 /* nonexistant */); LOG: join order: [ "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_right_reference" ] count | count -------+------- 20 | 0 (1 row) -- Apply a filter before the join (no matches left) SELECT count(l_custkey), count(r_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey AND l_custkey = -1 /* nonexistant */); LOG: join order: [ "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_right_reference" ] count | count -------+------- 20 | 0 (1 row) -- Right join should be disallowed in this case SELECT min(r_custkey), max(r_custkey) FROM multi_outer_join_left_hash a RIGHT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey); ERROR: cannot run outer join query if join is not on the partition column DETAIL: Outer joins requiring repartitioning are not supported. -- Reverse right join should be same as left join SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_right_reference a RIGHT JOIN multi_outer_join_left_hash b ON (l_custkey = r_custkey); LOG: join order: [ "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_right_reference" ] min | max -----+----- 1 | 20 (1 row) -- load some more data \copy multi_outer_join_right_reference FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' -- Update shards so that they do not have 1-1 matching, triggering an error. UPDATE pg_dist_shard SET shardminvalue = '2147483646' WHERE shardid = 1260006; UPDATE pg_dist_shard SET shardmaxvalue = '2147483647' WHERE shardid = 1260006; SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_hash b ON (l_custkey = r_custkey); ERROR: hash partitioned table has overlapping shards UPDATE pg_dist_shard SET shardminvalue = '-2147483648' WHERE shardid = 1260006; UPDATE pg_dist_shard SET shardmaxvalue = '-1073741825' WHERE shardid = 1260006; -- empty tables SELECT master_modify_multiple_shards('DELETE FROM multi_outer_join_left_hash'); master_modify_multiple_shards ------------------------------- 20 (1 row) SELECT master_modify_multiple_shards('DELETE FROM multi_outer_join_right_hash'); master_modify_multiple_shards ------------------------------- 45 (1 row) DELETE FROM multi_outer_join_right_reference; -- reload shards with 1-1 matching \copy multi_outer_join_left_hash FROM '@abs_srcdir@/data/customer-1-15.data' with delimiter '|' \copy multi_outer_join_left_hash FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' \copy multi_outer_join_right_reference FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' \copy multi_outer_join_right_reference FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' \copy multi_outer_join_right_hash FROM '@abs_srcdir@/data/customer-11-20.data' with delimiter '|' \copy multi_outer_join_right_hash FROM '@abs_srcdir@/data/customer-21-30.data' with delimiter '|' -- multi_outer_join_third_reference is a single shard table -- Regular left join should work as expected SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_hash b ON (l_custkey = r_custkey); LOG: join order: [ "multi_outer_join_left_hash" ][ local partition join "multi_outer_join_right_hash" ] min | max -----+----- 1 | 30 (1 row) -- Citus can use broadcast join here SELECT count(*) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_hash b ON (l_nationkey = r_nationkey); ERROR: cannot run outer join query if join is not on the partition column DETAIL: Outer joins requiring repartitioning are not supported. -- Anti-join should return customers for which there is no row in the right table SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey) WHERE r_custkey IS NULL; LOG: join order: [ "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_right_reference" ] min | max -----+----- 1 | 10 (1 row) -- Partial anti-join with specific value (5, 11-15) SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey) WHERE r_custkey IS NULL OR r_custkey = 15; LOG: join order: [ "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_right_reference" ] min | max -----+----- 1 | 15 (1 row) -- This query is an INNER JOIN in disguise since there cannot be NULL results (21) -- Added extra filter to make query not router plannable SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey) WHERE r_custkey = 21 or r_custkey < 10; LOG: join order: [ "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_right_reference" ] min | max -----+----- 21 | 21 (1 row) -- Apply a filter before the join SELECT count(l_custkey), count(r_custkey) FROM multi_outer_join_left_hash a LEFT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey AND r_custkey = 21); LOG: join order: [ "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_right_reference" ] count | count -------+------- 25 | 1 (1 row) -- Right join should not be allowed in this case SELECT min(r_custkey), max(r_custkey) FROM multi_outer_join_left_hash a RIGHT JOIN multi_outer_join_right_reference b ON (l_custkey = r_custkey); ERROR: cannot run outer join query if join is not on the partition column DETAIL: Outer joins requiring repartitioning are not supported. -- Reverse right join should be same as left join SELECT min(l_custkey), max(l_custkey) FROM multi_outer_join_right_reference a RIGHT JOIN multi_outer_join_left_hash b ON (l_custkey = r_custkey); LOG: join order: [ "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_right_reference" ] min | max -----+----- 1 | 30 (1 row) -- complex query tree should error out SELECT * FROM multi_outer_join_left_hash l1 LEFT JOIN multi_outer_join_right_reference r1 ON (l1.l_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_right_reference r2 ON (l1.l_custkey = r2.r_custkey) RIGHT JOIN multi_outer_join_left_hash l2 ON (r2.r_custkey = l2.l_custkey); ERROR: could not run distributed query with complex join orders HINT: Consider joining tables on partition column and have equal filter on joining columns. -- add an anti-join, this should also error out SELECT * FROM multi_outer_join_left_hash l1 LEFT JOIN multi_outer_join_right_reference r1 ON (l1.l_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_right_reference r2 ON (l1.l_custkey = r2.r_custkey) RIGHT JOIN multi_outer_join_left_hash l2 ON (r2.r_custkey = l2.l_custkey) WHERE r1.r_custkey is NULL; ERROR: could not run distributed query with complex join orders HINT: Consider joining tables on partition column and have equal filter on joining columns. -- Three way join 2-1-1 (broadcast + broadcast join) should work SELECT l_custkey, r_custkey, t_custkey FROM multi_outer_join_left_hash l1 LEFT JOIN multi_outer_join_right_reference r1 ON (l1.l_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_third_reference t1 ON (r1.r_custkey = t1.t_custkey) ORDER BY 1; LOG: join order: [ "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_right_reference" ][ broadcast join "multi_outer_join_third_reference" ] l_custkey | r_custkey | t_custkey -----------+-----------+----------- 1 | | 2 | | 3 | | 4 | | 5 | | 6 | | 7 | | 8 | | 9 | | 10 | | 11 | 11 | 11 12 | 12 | 12 13 | 13 | 13 14 | 14 | 14 15 | 15 | 15 21 | 21 | 21 22 | 22 | 22 23 | 23 | 23 24 | 24 | 24 25 | 25 | 25 26 | 26 | 26 27 | 27 | 27 28 | 28 | 28 29 | 29 | 29 30 | 30 | 30 (25 rows) -- Right join with single shard right most table should error out SELECT l_custkey, r_custkey, t_custkey FROM multi_outer_join_left_hash l1 LEFT JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey) RIGHT JOIN multi_outer_join_third_reference t1 ON (r1.r_custkey = t1.t_custkey); ERROR: could not run distributed query with complex join orders HINT: Consider joining tables on partition column and have equal filter on joining columns. -- Right join with single shard left most table should work SELECT t_custkey, r_custkey, l_custkey FROM multi_outer_join_third_reference t1 RIGHT JOIN multi_outer_join_right_hash r1 ON (t1.t_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_left_hash l1 ON (r1.r_custkey = l1.l_custkey) ORDER BY 1,2,3; LOG: join order: [ "multi_outer_join_right_hash" ][ broadcast join "multi_outer_join_third_reference" ][ local partition join "multi_outer_join_left_hash" ] t_custkey | r_custkey | l_custkey -----------+-----------+----------- 11 | 11 | 11 12 | 12 | 12 13 | 13 | 13 14 | 14 | 14 15 | 15 | 15 16 | 16 | 17 | 17 | 18 | 18 | 19 | 19 | 20 | 20 | 21 | 21 | 21 22 | 22 | 22 23 | 23 | 23 24 | 24 | 24 25 | 25 | 25 26 | 26 | 26 27 | 27 | 27 28 | 28 | 28 29 | 29 | 29 30 | 30 | 30 (20 rows) -- Make it anti-join, should display values with l_custkey is null SELECT t_custkey, r_custkey, l_custkey FROM multi_outer_join_third_reference t1 RIGHT JOIN multi_outer_join_right_hash r1 ON (t1.t_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_left_hash l1 ON (r1.r_custkey = l1.l_custkey) WHERE l_custkey is NULL ORDER BY 1; LOG: join order: [ "multi_outer_join_right_hash" ][ broadcast join "multi_outer_join_third_reference" ][ local partition join "multi_outer_join_left_hash" ] t_custkey | r_custkey | l_custkey -----------+-----------+----------- 16 | 16 | 17 | 17 | 18 | 18 | 19 | 19 | 20 | 20 | (5 rows) -- Cascading right join with single shard left most table should error out SELECT t_custkey, r_custkey, l_custkey FROM multi_outer_join_third_reference t1 RIGHT JOIN multi_outer_join_right_hash r1 ON (t1.t_custkey = r1.r_custkey) RIGHT JOIN multi_outer_join_left_hash l1 ON (r1.r_custkey = l1.l_custkey); ERROR: could not run distributed query with complex join orders HINT: Consider joining tables on partition column and have equal filter on joining columns. -- full outer join should work with 1-1 matched shards SELECT l_custkey, r_custkey FROM multi_outer_join_left_hash l1 FULL JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey) ORDER BY 1,2; LOG: join order: [ "multi_outer_join_left_hash" ][ local partition join "multi_outer_join_right_hash" ] l_custkey | r_custkey -----------+----------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 11 12 | 12 13 | 13 14 | 14 15 | 15 21 | 21 22 | 22 23 | 23 24 | 24 25 | 25 26 | 26 27 | 27 28 | 28 29 | 29 30 | 30 | 16 | 17 | 18 | 19 | 20 (30 rows) -- full outer join + anti (right) should work with 1-1 matched shards SELECT l_custkey, r_custkey FROM multi_outer_join_left_hash l1 FULL JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey) WHERE r_custkey is NULL ORDER BY 1; LOG: join order: [ "multi_outer_join_left_hash" ][ local partition join "multi_outer_join_right_hash" ] l_custkey | r_custkey -----------+----------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | (10 rows) -- full outer join + anti (left) should work with 1-1 matched shards SELECT l_custkey, r_custkey FROM multi_outer_join_left_hash l1 FULL JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey) WHERE l_custkey is NULL ORDER BY 2; LOG: join order: [ "multi_outer_join_left_hash" ][ local partition join "multi_outer_join_right_hash" ] l_custkey | r_custkey -----------+----------- | 16 | 17 | 18 | 19 | 20 (5 rows) -- full outer join + anti (both) should work with 1-1 matched shards SELECT l_custkey, r_custkey FROM multi_outer_join_left_hash l1 FULL JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey) WHERE l_custkey is NULL or r_custkey is NULL ORDER BY 1,2 DESC; LOG: join order: [ "multi_outer_join_left_hash" ][ local partition join "multi_outer_join_right_hash" ] l_custkey | r_custkey -----------+----------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | | 20 | 19 | 18 | 17 | 16 (15 rows) -- full outer join should error out for mismatched shards SELECT l_custkey, t_custkey FROM multi_outer_join_left_hash l1 FULL JOIN multi_outer_join_third_reference t1 ON (l1.l_custkey = t1.t_custkey); ERROR: cannot run outer join query if join is not on the partition column DETAIL: Outer joins requiring repartitioning are not supported. -- inner join + single shard left join should work SELECT l_custkey, r_custkey, t_custkey FROM multi_outer_join_left_hash l1 INNER JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey) LEFT JOIN multi_outer_join_third_reference t1 ON (r1.r_custkey = t1.t_custkey) ORDER BY 1; LOG: join order: [ "multi_outer_join_left_hash" ][ local partition join "multi_outer_join_right_hash" ][ broadcast join "multi_outer_join_third_reference" ] l_custkey | r_custkey | t_custkey -----------+-----------+----------- 11 | 11 | 11 12 | 12 | 12 13 | 13 | 13 14 | 14 | 14 15 | 15 | 15 21 | 21 | 21 22 | 22 | 22 23 | 23 | 23 24 | 24 | 24 25 | 25 | 25 26 | 26 | 26 27 | 27 | 27 28 | 28 | 28 29 | 29 | 29 30 | 30 | 30 (15 rows) -- inner (broadcast) join + 2 shards left (local) join should work SELECT l_custkey, t_custkey, r_custkey FROM multi_outer_join_left_hash l1 INNER JOIN multi_outer_join_third_reference t1 ON (l1.l_custkey = t1.t_custkey) LEFT JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey) ORDER BY 1,2,3; LOG: join order: [ "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_third_reference" ][ local partition join "multi_outer_join_right_hash" ] l_custkey | t_custkey | r_custkey -----------+-----------+----------- 1 | 1 | 2 | 2 | 3 | 3 | 4 | 4 | 5 | 5 | 6 | 6 | 7 | 7 | 8 | 8 | 9 | 9 | 10 | 10 | 11 | 11 | 11 12 | 12 | 12 13 | 13 | 13 14 | 14 | 14 15 | 15 | 15 21 | 21 | 21 22 | 22 | 22 23 | 23 | 23 24 | 24 | 24 25 | 25 | 25 26 | 26 | 26 27 | 27 | 27 28 | 28 | 28 29 | 29 | 29 30 | 30 | 30 (25 rows) -- inner (local) join + 2 shards left (dual partition) join should error out SELECT t_custkey, l_custkey, r_custkey FROM multi_outer_join_third_reference t1 INNER JOIN multi_outer_join_left_hash l1 ON (l1.l_custkey = t1.t_custkey) LEFT JOIN multi_outer_join_right_reference r1 ON (l1.l_custkey = r1.r_custkey); ERROR: cannot run outer join query if join is not on the partition column DETAIL: Outer joins requiring repartitioning are not supported. -- inner (local) join + 2 shards left (dual partition) join should work SELECT l_custkey, t_custkey, r_custkey FROM multi_outer_join_left_hash l1 INNER JOIN multi_outer_join_third_reference t1 ON (l1.l_custkey = t1.t_custkey) LEFT JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey) ORDER BY 1,2,3; LOG: join order: [ "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_third_reference" ][ local partition join "multi_outer_join_right_hash" ] l_custkey | t_custkey | r_custkey -----------+-----------+----------- 1 | 1 | 2 | 2 | 3 | 3 | 4 | 4 | 5 | 5 | 6 | 6 | 7 | 7 | 8 | 8 | 9 | 9 | 10 | 10 | 11 | 11 | 11 12 | 12 | 12 13 | 13 | 13 14 | 14 | 14 15 | 15 | 15 21 | 21 | 21 22 | 22 | 22 23 | 23 | 23 24 | 24 | 24 25 | 25 | 25 26 | 26 | 26 27 | 27 | 27 28 | 28 | 28 29 | 29 | 29 30 | 30 | 30 (25 rows) -- inner (broadcast) join + 2 shards left (local) + anti join should work SELECT l_custkey, t_custkey, r_custkey FROM multi_outer_join_left_hash l1 INNER JOIN multi_outer_join_third_reference t1 ON (l1.l_custkey = t1.t_custkey) LEFT JOIN multi_outer_join_right_hash r1 ON (l1.l_custkey = r1.r_custkey) WHERE r_custkey is NULL ORDER BY 1; LOG: join order: [ "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_third_reference" ][ local partition join "multi_outer_join_right_hash" ] l_custkey | t_custkey | r_custkey -----------+-----------+----------- 1 | 1 | 2 | 2 | 3 | 3 | 4 | 4 | 5 | 5 | 6 | 6 | 7 | 7 | 8 | 8 | 9 | 9 | 10 | 10 | (10 rows) -- Test joinExpr aliases by performing an outer-join. SELECT t_custkey FROM (multi_outer_join_right_hash r1 LEFT OUTER JOIN multi_outer_join_left_hash l1 ON (l1.l_custkey = r1.r_custkey)) AS test(c_custkey, c_nationkey) INNER JOIN multi_outer_join_third_reference t1 ON (test.c_custkey = t1.t_custkey) ORDER BY 1; LOG: join order: [ "multi_outer_join_right_hash" ][ local partition join "multi_outer_join_left_hash" ][ broadcast join "multi_outer_join_third_reference" ] t_custkey ----------- 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 (20 rows) -- flattened out subqueries with outer joins are not supported SELECT l1.l_custkey, count(*) as cnt FROM ( SELECT l_custkey, l_nationkey FROM multi_outer_join_left_hash WHERE l_comment like '%a%' ) l1 LEFT JOIN ( SELECT r_custkey, r_name FROM multi_outer_join_right_reference WHERE r_comment like '%b%' ) l2 ON l1.l_custkey = l2.r_custkey GROUP BY l1.l_custkey ORDER BY cnt DESC, l1.l_custkey DESC LIMIT 20; ERROR: cannot perform distributed planning on this query DETAIL: Subqueries in outer joins are not supported -- full join among reference tables should go thourgh router planner SELECT t_custkey, r_custkey FROM multi_outer_join_right_reference FULL JOIN multi_outer_join_third_reference ON (t_custkey = r_custkey) ORDER BY 1; t_custkey | r_custkey -----------+----------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 11 12 | 12 13 | 13 14 | 14 15 | 15 16 | 16 17 | 17 18 | 18 19 | 19 20 | 20 21 | 21 22 | 22 23 | 23 24 | 24 25 | 25 26 | 26 27 | 27 28 | 28 29 | 29 30 | 30 (30 rows) -- DROP unused tables to clean up workspace DROP TABLE multi_outer_join_left_hash; DROP TABLE multi_outer_join_right_reference; DROP TABLE multi_outer_join_third_reference; DROP TABLE multi_outer_join_right_hash; citus-7.0.3/src/test/regress/output/worker_copy.source000066400000000000000000000004621317107136600232240ustar00rootroot00000000000000-- -- WORKER_COPY -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 260000; COPY lineitem FROM '@abs_srcdir@/data/lineitem.1.data' WITH DELIMITER '|'; COPY lineitem FROM '@abs_srcdir@/data/lineitem.2.data' WITH DELIMITER '|'; COPY supplier FROM '@abs_srcdir@/data/supplier.data' WITH DELIMITER '|'; citus-7.0.3/src/test/regress/pg_regress_multi.pl000077500000000000000000000500141317107136600220070ustar00rootroot00000000000000#!/usr/bin/perl -w #---------------------------------------------------------------------- # # pg_regress_multi.pl - Test runner for Citus # # Portions Copyright (c) 2012-2016, Citus Data, Inc. # Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # # src/test/regress/pg_regress_multi.pl # #---------------------------------------------------------------------- use strict; use warnings; use Fcntl; use Getopt::Long; sub Usage() { print "pg_regress_multi - Citus test runner\n"; print "\n"; print "Usage:\n"; print " pg_regress_multi [MULTI OPTIONS] -- [PG REGRESS OPTS]\n"; print "\n"; print "Multi Options:\n"; print " --isolationtester Run isolationtester tests instead of plain tests\n"; print " --vanillatest Run postgres tests with citus loaded as shared preload library\n"; print " --bindir Path to postgres binary directory\n"; print " --libdir Path to postgres library directory\n"; print " --postgres-builddir Path to postgres build directory\n"; print " --postgres-srcdir Path to postgres build directory\n"; print " --pgxsdir Path to the PGXS directory\n"; print " --load-extension Extensions to install in all nodes\n"; print " --server-option Config option to pass to the server\n"; print " --valgrind Run server via valgrind\n"; print " --valgrind-path Path to the valgrind executable\n"; print " --valgrind-log-file Path to the write valgrind logs\n"; print " --pg_ctl-timeout Timeout for pg_ctl\n"; print " --connection-timeout Timeout for connecting to worker nodes\n"; exit 1; } # Option parsing my $isolationtester = 0; my $vanillatest = 0; my $followercluster = 0; my $bindir = ""; my $libdir = undef; my $pgxsdir = ""; my $postgresBuilddir = ""; my $postgresSrcdir = ""; my $majorversion = ""; my @extensions = (); my @userPgOptions = (); my %dataTypes = (); my %fdws = (); my %fdwServers = (); my %functions = (); my %operators = (); my $valgrind = 0; my $valgrindPath = "valgrind"; my $valgrindLogFile = "valgrind_test_log.txt"; my $pgCtlTimeout = undef; my $connectionTimeout = 5000; my $serversAreShutdown = "TRUE"; GetOptions( 'isolationtester' => \$isolationtester, 'vanillatest' => \$vanillatest, 'follower-cluster' => \$followercluster, 'bindir=s' => \$bindir, 'libdir=s' => \$libdir, 'pgxsdir=s' => \$pgxsdir, 'postgres-builddir=s' => \$postgresBuilddir, 'postgres-srcdir=s' => \$postgresSrcdir, 'majorversion=s' => \$majorversion, 'load-extension=s' => \@extensions, 'server-option=s' => \@userPgOptions, 'valgrind' => \$valgrind, 'valgrind-path=s' => \$valgrindPath, 'valgrind-log-file=s' => \$valgrindLogFile, 'pg_ctl-timeout=s' => \$pgCtlTimeout, 'connection-timeout=s' => \$connectionTimeout, 'help' => sub { Usage() }); # Update environment to include [DY]LD_LIBRARY_PATH/LIBDIR/etc - # pointing to the libdir - that's required so the right version of # libpq, citus et al is being picked up. # # XXX: There's some issues with el capitan's SIP here, causing # DYLD_LIBRARY_PATH not being inherited if SIP is enabled. That's a # know problem, present in postgres itself as well. if (defined $libdir) { $ENV{LD_LIBRARY_PATH} = "$libdir:".($ENV{LD_LIBRARY_PATH} || ''); $ENV{DYLD_LIBRARY_PATH} = "$libdir:".($ENV{DYLD_LIBRARY_PATH} || ''); $ENV{LIBPATH} = "$libdir:".($ENV{LIBPATH} || ''); $ENV{PATH} = "$libdir:".($ENV{PATH} || ''); } # Put $bindir to the end of PATH. We want to prefer system binaries by # default (as e.g. new libpq and old psql can cause issues), but still # want to find binaries if they're not in PATH. if (defined $bindir) { $ENV{PATH} = ($ENV{PATH} || '').":$bindir"; } # Most people are used to unified diffs these days, rather than the # context diffs pg_regress defaults to. Change default to avoid # everyone having to (re-)learn how to change that setting. Also add # a bit more context to make it easier to locate failed test sections. $ENV{PG_REGRESS_DIFF_OPTS} = '-dU10'; my $plainRegress = "$pgxsdir/src/test/regress/pg_regress"; my $isolationRegress = "${postgresBuilddir}/src/test/isolation/pg_isolation_regress"; if ($isolationtester && ! -f "$isolationRegress") { die <<"MESSAGE"; isolationtester not found at $isolationRegress. isolationtester tests can only be run when source (detected as ${postgresSrcdir}) and build (detected as ${postgresBuilddir}) directory corresponding to $bindir are present. Additionally isolationtester in src/test/isolation needs to be built, which it is not by default if tests have not been run. If the build directory is present locally "make -C ${postgresBuilddir} all" should do the trick. MESSAGE } my $vanillaRegress = "${postgresBuilddir}/src/test/regress/pg_regress"; if ($vanillatest && ! -f "$vanillaRegress") { die <<"MESSAGE"; pg_regress (for vanilla tests) not found at $vanillaRegress. Vanilla tests can only be run when source (detected as ${postgresSrcdir}) and build (detected as ${postgresBuilddir}) directory corresponding to $bindir are present. MESSAGE } # If pgCtlTimeout is defined, we will set related environment variable. # This is generally used with valgrind because valgrind starts slow and we # need to increase timeout. if (defined $pgCtlTimeout) { $ENV{PGCTLTIMEOUT} = "$pgCtlTimeout"; } # We don't want valgrind to run pg_ctl itself, as that'd trigger a lot # of spurious OS failures, e.g. in bash. So instead we have to replace # the postgres binary with a wrapper that exec's valgrind, which in # turn then executes postgres. That's unfortunately at the moment the # only reliable way to do this. sub replace_postgres { if (-e "$bindir/postgres.orig") { print "wrapper exists\n"; } else { print "moving $bindir/postgres to $bindir/postgres.orig\n"; rename "$bindir/postgres", "$bindir/postgres.orig" or die "Could not move postgres out of the way"; } sysopen my $fh, "$bindir/postgres", O_CREAT|O_TRUNC|O_RDWR, 0700 or die "Could not create postgres wrapper at $bindir/postgres"; print $fh <<"END"; #!/bin/bash exec $valgrindPath \\ --quiet \\ --suppressions=${postgresSrcdir}/src/tools/valgrind.supp \\ --trace-children=yes --track-origins=yes --read-var-info=no \\ --leak-check=no \\ --error-markers=VALGRINDERROR-BEGIN,VALGRINDERROR-END \\ --log-file=$valgrindLogFile \\ $bindir/postgres.orig \\ "\$@" END close $fh; } # revert changes replace_postgres() performed sub revert_replace_postgres { if (-e "$bindir/postgres.orig") { print "wrapper exists, removing\n"; print "moving $bindir/postgres.orig to $bindir/postgres\n"; rename "$bindir/postgres.orig", "$bindir/postgres" or die "Could not move postgres back"; } } # always want to call initdb under normal postgres, so revert from a # partial run, even if we're now not using valgrind. revert_replace_postgres(); # Set some default configuration options my $masterPort = 57636; my $workerCount = 2; my @workerPorts = (); for (my $workerIndex = 1; $workerIndex <= $workerCount; $workerIndex++) { my $workerPort = $masterPort + $workerIndex; push(@workerPorts, $workerPort); } my $followerCoordPort = 9070; my @followerWorkerPorts = (); for (my $workerIndex = 1; $workerIndex <= $workerCount; $workerIndex++) { my $workerPort = $followerCoordPort + $workerIndex; push(@followerWorkerPorts, $workerPort); } my $host = "localhost"; my $user = "postgres"; my @pgOptions = (); # Postgres options set for the tests push(@pgOptions, '-c', "listen_addresses='${host}'"); # not required, and we don't necessarily have access to the default directory push(@pgOptions, '-c', "unix_socket_directories="); push(@pgOptions, '-c', "fsync=off"); push(@pgOptions, '-c', "shared_preload_libraries=citus"); push(@pgOptions, '-c', "wal_level=logical"); # Citus options set for the tests push(@pgOptions, '-c', "citus.shard_max_size=300kB"); push(@pgOptions, '-c', "citus.max_running_tasks_per_node=4"); push(@pgOptions, '-c', "citus.expire_cached_shards=on"); push(@pgOptions, '-c', "citus.task_tracker_delay=10ms"); push(@pgOptions, '-c', "citus.remote_task_check_interval=1ms"); push(@pgOptions, '-c', "citus.shard_replication_factor=2"); push(@pgOptions, '-c', "citus.node_connection_timeout=${connectionTimeout}"); if ($followercluster) { push(@pgOptions, '-c', "max_wal_senders=10"); push(@pgOptions, '-c', "hot_standby=on"); push(@pgOptions, '-c', "wal_level=replica"); } # disable automatic distributed deadlock detection during the isolation testing # to make sure that we always get consistent test outputs. If we don't manually # (i.e., calling a UDF) detect the deadlocks, some sessions that do not participate # in the deadlock may interleave with the deadlock detection, which results in non- # consistent test outputs. # since we have CREATE/DROP distributed tables very frequently, we also set # shard_count to 4 to speed up the tests. if($isolationtester) { push(@pgOptions, '-c', "citus.log_distributed_deadlock_detection=on"); push(@pgOptions, '-c', "citus.distributed_deadlock_detection_factor=-1"); push(@pgOptions, '-c', "citus.shard_count=4"); } # Add externally added options last, so they overwrite the default ones above for my $option (@userPgOptions) { push(@pgOptions, '-c', $option); } #define data types as a name->definition %dataTypes = ('dummy_type', '(i integer)', 'order_side', ' ENUM (\'buy\', \'sell\')', 'test_composite_type', '(i integer, i2 integer)', 'bug_status', ' ENUM (\'new\', \'open\', \'closed\')'); # define functions as signature->definition %functions = ('fake_fdw_handler()', 'fdw_handler AS \'citus\' LANGUAGE C STRICT;', 'equal_test_composite_type_function(test_composite_type, test_composite_type)', 'boolean AS \'select $1.i = $2.i AND $1.i2 = $2.i2;\' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT;'); %operators = ('=', '(LEFTARG = test_composite_type, RIGHTARG = test_composite_type, PROCEDURE = equal_test_composite_type_function, HASHES)'); #define fdws as name->handler name %fdws = ('fake_fdw', 'fake_fdw_handler'); #define server_name->fdw %fdwServers = ('fake_fdw_server', 'fake_fdw'); # Cleanup leftovers and prepare directories for the run system("rm", ('-rf', 'tmp_check/tmp-bin')) == 0 or die "Could not remove tmp-bin directory"; system("rm", ('-rf', 'tmp_check/master')) == 0 or die "Could not remove master directory"; for my $port (@workerPorts) { system("rm", ('-rf', "tmp_check/worker.$port")) == 0 or die "Could not remove worker directory"; } system("rm", ('-rf', 'tmp_check/master-follower')) == 0 or die "Could not remove master directory"; for my $port (@followerWorkerPorts) { system("rm", ('-rf', "tmp_check/follower.$port")) == 0 or die "Could not remove worker directory"; } # Prepare directory in which 'psql' has some helpful variables for locating the workers system("mkdir", ('-p', "tmp_check/tmp-bin")) == 0 or die "Could not create tmp-bin directory"; sysopen my $fh, "tmp_check/tmp-bin/psql", O_CREAT|O_TRUNC|O_RDWR, 0700 or die "Could not create psql wrapper"; print $fh "#!/bin/bash\n"; print $fh "exec psql "; print $fh "--variable=master_port=$masterPort "; print $fh "--variable=follower_master_port=$followerCoordPort "; print $fh "--variable=default_user=$user "; print $fh "--variable=SHOW_CONTEXT=always "; for my $workeroff (0 .. $#workerPorts) { my $port = $workerPorts[$workeroff]; print $fh "--variable=worker_".($workeroff+1)."_port=$port "; } for my $workeroff (0 .. $#followerWorkerPorts) { my $port = $followerWorkerPorts[$workeroff]; print $fh "--variable=follower_worker_".($workeroff+1)."_port=$port "; } print $fh "\"\$@\"\n"; # pass on the commandline arguments close $fh; system("mkdir", ('-p', 'tmp_check/master/log')) == 0 or die "Could not create master directory"; for my $port (@workerPorts) { system("mkdir", ('-p', "tmp_check/worker.$port/log")) == 0 or die "Could not create worker directory"; } if ($followercluster) { system("mkdir", ('-p', 'tmp_check/master-follower/log')) == 0 or die "Could not create follower directory"; for my $port (@followerWorkerPorts) { system("mkdir", ('-p', "tmp_check/follower.$port/log")) == 0 or die "Could not create worker directory"; } } # Create new data directories, copy workers for speed system("$bindir/initdb", ("--nosync", "-U", $user, "tmp_check/master/data")) == 0 or die "Could not create master data directory"; if ($followercluster) { # This is only necessary on PG 9.6 but it doesn't hurt PG 10 open(my $fd, ">>", "tmp_check/master/data/pg_hba.conf") or die "could not open pg_hba.conf"; print $fd "\nhost replication postgres 127.0.0.1/32 trust"; close $fd; } for my $port (@workerPorts) { system("cp -a tmp_check/master/data tmp_check/worker.$port/data") == 0 or die "Could not create worker data directory"; } # Routine to shutdown servers at failure/exit sub ShutdownServers() { if ($serversAreShutdown eq "FALSE") { system("$bindir/pg_ctl", ('stop', '-w', '-D', 'tmp_check/master/data')) == 0 or warn "Could not shutdown worker server"; for my $port (@workerPorts) { system("$bindir/pg_ctl", ('stop', '-w', '-D', "tmp_check/worker.$port/data")) == 0 or warn "Could not shutdown worker server"; } if ($followercluster) { system("$bindir/pg_ctl", ('stop', '-w', '-D', 'tmp_check/master-follower/data')) == 0 or warn "Could not shutdown worker server"; for my $port (@followerWorkerPorts) { system("$bindir/pg_ctl", ('stop', '-w', '-D', "tmp_check/follower.$port/data")) == 0 or warn "Could not shutdown worker server"; } } $serversAreShutdown = "TRUE"; } } # Set signals to shutdown servers $SIG{INT} = \&ShutdownServers; $SIG{QUIT} = \&ShutdownServers; $SIG{TERM} = \&ShutdownServers; $SIG{__DIE__} = \&ShutdownServers; # Shutdown servers on exit only if help option is not used END { if ($? != 1) { ShutdownServers(); } # At the end of a run, replace redirected binary with original again if ($valgrind) { revert_replace_postgres(); } } # want to use valgrind, replace binary before starting server if ($valgrind) { replace_postgres(); } # Signal that servers should be shutdown $serversAreShutdown = "FALSE"; # Start servers if(system("$bindir/pg_ctl", ('start', '-w', '-o', join(" ", @pgOptions)." -c port=$masterPort", '-D', 'tmp_check/master/data', '-l', 'tmp_check/master/log/postmaster.log')) != 0) { system("tail", ("-n20", "tmp_check/master/log/postmaster.log")); die "Could not start master server"; } for my $port (@workerPorts) { if(system("$bindir/pg_ctl", ('start', '-w', '-o', join(" ", @pgOptions)." -c port=$port", '-D', "tmp_check/worker.$port/data", '-l', "tmp_check/worker.$port/log/postmaster.log")) != 0) { system("tail", ("-n20", "tmp_check/worker.$port/log/postmaster.log")); die "Could not start worker server"; } } # Setup the follower nodes if ($followercluster) { # This test would run faster on PG10 if we could pass --no-sync here but that flag # isn't supported on PG 9.6. In a year when we drop support for PG9.6 add that flag! system("$bindir/pg_basebackup", ("-D", "tmp_check/master-follower/data", "--host=$host", "--port=$masterPort", "--username=$user", "-R", "-X", "stream")) == 0 or die 'could not take basebackup'; for my $offset (0 .. $#workerPorts) { my $workerPort = $workerPorts[$offset]; my $followerPort = $followerWorkerPorts[$offset]; system("$bindir/pg_basebackup", ("-D", "tmp_check/follower.$followerPort/data", "--host=$host", "--port=$workerPort", "--username=$user", "-R", "-X", "stream")) == 0 or die "Could not take basebackup"; } if(system("$bindir/pg_ctl", ('start', '-w', '-o', join(" ", @pgOptions)." -c port=$followerCoordPort", '-D', 'tmp_check/master-follower/data', '-l', 'tmp_check/master-follower/log/postmaster.log')) != 0) { system("tail", ("-n20", "tmp_check/master-follower/log/postmaster.log")); die "Could not start master follower server"; } for my $port (@followerWorkerPorts) { if(system("$bindir/pg_ctl", ('start', '-w', '-o', join(" ", @pgOptions)." -c port=$port", '-D', "tmp_check/follower.$port/data", '-l', "tmp_check/follower.$port/log/postmaster.log")) != 0) { system("tail", ("-n20", "tmp_check/follower.$port/log/postmaster.log")); die "Could not start follower server"; } } } ### # Create database, extensions, types, functions and fdws on the workers, # pg_regress won't know to create them for us. ### for my $port (@workerPorts) { system("psql", '-X', ('-h', $host, '-p', $port, '-U', $user, "postgres", '-c', "CREATE DATABASE regression;")) == 0 or die "Could not create regression database on worker"; for my $extension (@extensions) { system("psql", '-X', ('-h', $host, '-p', $port, '-U', $user, "regression", '-c', "CREATE EXTENSION IF NOT EXISTS \"$extension\";")) == 0 or die "Could not create extension on worker"; } foreach my $dataType (keys %dataTypes) { system("psql", '-X', ('-h', $host, '-p', $port, '-U', $user, "regression", '-c', "CREATE TYPE $dataType AS $dataTypes{$dataType};")) == 0 or die "Could not create TYPE $dataType on worker"; } foreach my $function (keys %functions) { system("psql", '-X', ('-h', $host, '-p', $port, '-U', $user, "regression", '-c', "CREATE FUNCTION $function RETURNS $functions{$function};")) == 0 or die "Could not create FUNCTION $function on worker"; } foreach my $operator (keys %operators) { system("psql", '-X', ('-h', $host, '-p', $port, '-U', $user, "regression", '-c', "CREATE OPERATOR $operator $operators{$operator};")) == 0 or die "Could not create OPERATOR $operator on worker"; } foreach my $fdw (keys %fdws) { system("psql", '-X', ('-h', $host, '-p', $port, '-U', $user, "regression", '-c', "CREATE FOREIGN DATA WRAPPER $fdw HANDLER $fdws{$fdw};")) == 0 or die "Could not create foreign data wrapper $fdw on worker"; } foreach my $fdwServer (keys %fdwServers) { system("psql", '-X', ('-h', $host, '-p', $port, '-U', $user, "regression", '-c', "CREATE SERVER $fdwServer FOREIGN DATA WRAPPER $fdwServers{$fdwServer};")) == 0 or die "Could not create server $fdwServer on worker"; } } # Prepare pg_regress arguments my @arguments = ( "--host", $host, '--port', $masterPort, '--user', $user, '--bindir', "tmp_check/tmp-bin" ); # Add load extension parameters to the argument list for my $extension (@extensions) { push(@arguments, "--load-extension=$extension"); } # Append remaining ARGV arguments to pg_regress arguments push(@arguments, @ARGV); my $startTime = time(); # Finally run the tests if ($vanillatest) { $ENV{PGHOST} = $host; $ENV{PGPORT} = $masterPort; $ENV{PGUSER} = $user; system("make -C $postgresBuilddir/src/test/regress installcheck-parallel") == 0 or die "Could not run vanilla tests"; } elsif ($isolationtester) { push(@arguments, "--dbname=regression"); system("$isolationRegress", @arguments) == 0 or die "Could not run isolation tests"; } else { system("$plainRegress", @arguments) == 0 or die "Could not run regression tests"; } my $endTime = time(); print "Finished in ". ($endTime - $startTime)." seconds. \n"; exit 0; citus-7.0.3/src/test/regress/specs/000077500000000000000000000000001317107136600172125ustar00rootroot00000000000000citus-7.0.3/src/test/regress/specs/README.md000066400000000000000000000007701317107136600204750ustar00rootroot00000000000000In this folder, all tests which in the format of '*_add.spec' organized according to specific format. For isolation tests, we selected 'n' representative operations and we aimed to perform all possible pairs of 'n' operations together. So first test just runs first of these 'n' operation with remaining 'n - 1' operation. Similary, second test just runs second operation with remaining 'n - 2' operation. With this logic, we eventually run every selected operation with every other selected operation. citus-7.0.3/src/test/regress/specs/isolation_add_node_vs_reference_table_operations.spec000066400000000000000000000067651317107136600320620ustar00rootroot00000000000000# the test expects to have zero nodes in pg_dist_node at the beginning # add single one of the nodes for the purpose of the test setup { SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57637); CREATE TABLE test_reference_table (test_id integer); SELECT create_reference_table('test_reference_table'); } # ensure that both nodes exists for the remaining of the isolation tests teardown { DROP TABLE test_reference_table; SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; } session "s1" step "s1-begin" { BEGIN; } step "s1-add-second-worker" { SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); } step "s1-remove-second-worker" { SELECT master_remove_node('localhost', 57638); } step "s1-commit" { COMMIT; } session "s2" # COPY accesses all shard/placement metadata, so should be enough for # loading the cache step "s2-load-metadata-cache" { COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; } step "s2-copy-to-reference-table" { COPY test_reference_table FROM PROGRAM 'echo "1\n2\n3\n4\n5"'; } step "s2-insert-to-reference-table" { INSERT INTO test_reference_table VALUES (6); } step "s2-ddl-on-reference-table" { CREATE INDEX reference_index ON test_reference_table(test_id); } step "s2-begin" { BEGIN; } step "s2-commit" { COMMIT; } step "s2-print-content" { SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from %s') ORDER BY nodeport; } step "s2-print-index-count" { SELECT nodeport, success, result FROM run_command_on_placements('test_reference_table', 'select count(*) from pg_indexes WHERE schemaname || ''.'' || tablename = ''%s''') ORDER BY nodeport; } # verify that copy/insert gets the invalidation and re-builts its metadata cache # note that we need to run "s1-load-metadata-cache" and "s2-load-metadata-cache" # to ensure that metadata is cached otherwise the test would be useless since # the cache would be empty and the metadata data is gathered from the tables directly permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-copy-to-reference-table" "s1-commit" "s2-print-content" permutation "s2-load-metadata-cache" "s2-begin" "s2-copy-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content" permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-insert-to-reference-table" "s1-commit" "s2-print-content" permutation "s2-load-metadata-cache" "s2-begin" "s2-insert-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content" permutation "s2-load-metadata-cache" "s1-begin" "s1-add-second-worker" "s2-ddl-on-reference-table" "s1-commit" "s2-print-index-count" permutation "s2-load-metadata-cache" "s2-begin" "s2-ddl-on-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-index-count" # same tests without loading the cache permutation "s1-begin" "s1-add-second-worker" "s2-copy-to-reference-table" "s1-commit" "s2-print-content" permutation "s2-begin" "s2-copy-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content" permutation "s1-begin" "s1-add-second-worker" "s2-insert-to-reference-table" "s1-commit" "s2-print-content" permutation "s2-begin" "s2-insert-to-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-content" permutation "s1-begin" "s1-add-second-worker" "s2-ddl-on-reference-table" "s1-commit" "s2-print-index-count" permutation "s2-begin" "s2-ddl-on-reference-table" "s1-add-second-worker" "s2-commit" "s2-print-index-count" citus-7.0.3/src/test/regress/specs/isolation_add_remove_node.spec000066400000000000000000000103131317107136600252570ustar00rootroot00000000000000setup { SELECT 1; } teardown { SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; } session "s1" step "s1-begin" { BEGIN; } step "s1-add-node-1" { SELECT 1 FROM master_add_node('localhost', 57637); } step "s1-add-node-2" { SELECT 1 FROM master_add_node('localhost', 57638); } step "s1-add-inactive-1" { SELECT 1 FROM master_add_inactive_node('localhost', 57637); } step "s1-activate-node-1" { SELECT 1 FROM master_activate_node('localhost', 57637); } step "s1-disable-node-1" { SELECT 1 FROM master_disable_node('localhost', 57637); } step "s1-remove-node-1" { SELECT * FROM master_remove_node('localhost', 57637); } step "s1-remove-node-2" { SELECT * FROM master_remove_node('localhost', 57638); } step "s1-abort" { ABORT; } step "s1-commit" { COMMIT; } step "s1-show-nodes" { SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; } session "s2" step "s2-begin" { BEGIN; } step "s2-add-node-1" { SELECT 1 FROM master_add_node('localhost', 57637); } step "s2-add-node-2" { SELECT 1 FROM master_add_node('localhost', 57638); } step "s2-activate-node-1" { SELECT 1 FROM master_activate_node('localhost', 57637); } step "s2-disable-node-1" { SELECT 1 FROM master_disable_node('localhost', 57637); } step "s2-remove-node-1" { SELECT * FROM master_remove_node('localhost', 57637); } step "s2-remove-node-2" { SELECT * FROM master_remove_node('localhost', 57638); } step "s2-commit" { COMMIT; } # session 1 adds a node, session 2 removes it, should be ok permutation "s1-begin" "s1-add-node-1" "s2-remove-node-1" "s1-commit" "s1-show-nodes" # add a different node from 2 sessions, should be ok permutation "s1-begin" "s1-add-node-1" "s2-add-node-2" "s1-commit" "s1-show-nodes" # add the same node from 2 sessions, should be ok (idempotent) permutation "s1-begin" "s1-add-node-1" "s2-add-node-1" "s1-commit" "s1-show-nodes" # add a different node from 2 sessions, one aborts permutation "s1-begin" "s1-add-node-1" "s2-add-node-2" "s1-abort" "s1-show-nodes" # add the same node from 2 sessions, one aborts permutation "s1-begin" "s1-add-node-1" "s2-add-node-1" "s1-abort" "s1-show-nodes" # remove a different node from 2 transactions, should be ok permutation "s1-add-node-1" "s1-add-node-2" "s1-begin" "s1-remove-node-1" "s2-remove-node-2" "s1-commit" "s1-show-nodes" # remove the same node from 2 transactions, should be ok (idempotent) permutation "s1-add-node-1" "s1-begin" "s1-remove-node-1" "s2-remove-node-1" "s1-commit" "s1-show-nodes" # activate an active node from 2 transactions, should be ok permutation "s1-add-node-1" "s1-begin" "s1-activate-node-1" "s2-activate-node-1" "s1-commit" "s1-show-nodes" # disable an active node from 2 transactions, should be ok permutation "s1-add-node-1" "s1-begin" "s1-disable-node-1" "s2-disable-node-1" "s1-commit" "s1-show-nodes" # activate an inactive node from 2 transactions, should be ok permutation "s1-add-inactive-1" "s1-begin" "s1-activate-node-1" "s2-activate-node-1" "s1-commit" "s1-show-nodes" # disable an inactive node from 2 transactions, should be ok permutation "s1-add-inactive-1" "s1-begin" "s1-disable-node-1" "s2-disable-node-1" "s1-commit" "s1-show-nodes" # disable and activate an active node from 2 transactions, should be ok permutation "s1-add-node-1" "s1-begin" "s1-disable-node-1" "s2-activate-node-1" "s1-commit" "s1-show-nodes" # activate and disable an active node node from 2 transactions, should be ok permutation "s1-add-node-1" "s1-begin" "s1-activate-node-1" "s2-disable-node-1" "s1-commit" "s1-show-nodes" # disable and activate an inactive node from 2 transactions, should be ok permutation "s1-add-inactive-1" "s1-begin" "s1-disable-node-1" "s2-activate-node-1" "s1-commit" "s1-show-nodes" # activate and disable an inactive node node from 2 transactions, should be ok permutation "s1-add-inactive-1" "s1-begin" "s1-activate-node-1" "s2-disable-node-1" "s1-commit" "s1-show-nodes" # activate and disable an inactive node from 2 transactions, one aborts permutation "s1-add-inactive-1" "s1-begin" "s1-activate-node-1" "s2-disable-node-1" "s1-abort" "s1-show-nodes" # disable an active node from 2 transactions, one aborts permutation "s1-add-node-1" "s1-begin" "s1-disable-node-1" "s2-disable-node-1" "s1-abort" "s1-show-nodes" citus-7.0.3/src/test/regress/specs/isolation_append_copy_vs_all.spec000066400000000000000000000224161317107136600260150ustar00rootroot00000000000000# # How we organize this isolation test spec, is explained at README.md file in this directory. # # create append distributed table to test behavior of COPY in concurrent operations setup { SET citus.shard_replication_factor TO 1; CREATE TABLE append_copy(id integer, data text, int_data int); SELECT create_distributed_table('append_copy', 'id', 'append'); } # drop distributed table teardown { DROP TABLE IF EXISTS append_copy CASCADE; } # session 1 session "s1" step "s1-initialize" { COPY append_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; } step "s1-begin" { BEGIN; } step "s1-copy" { COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; } step "s1-copy-additional-column" { COPY append_copy FROM PROGRAM 'echo 5, f, 5, 5\\n6, g, 6, 6\\n7, h, 7, 7\\n8, i, 8, 8\\n9, j, 9, 9' WITH CSV; } step "s1-router-select" { SELECT * FROM append_copy WHERE id = 1; } step "s1-real-time-select" { SELECT * FROM append_copy ORDER BY 1, 2; } step "s1-task-tracker-select" { SET citus.task_executor_type TO "task-tracker"; SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; } step "s1-insert" { INSERT INTO append_copy VALUES(0, 'k', 0); } step "s1-insert-select" { INSERT INTO append_copy SELECT * FROM append_copy; } step "s1-update" { UPDATE append_copy SET data = 'l' WHERE id = 0; } step "s1-delete" { DELETE FROM append_copy WHERE id = 1; } step "s1-truncate" { TRUNCATE append_copy; } step "s1-drop" { DROP TABLE append_copy; } step "s1-ddl-create-index" { CREATE INDEX append_copy_index ON append_copy(id); } step "s1-ddl-drop-index" { DROP INDEX append_copy_index; } step "s1-ddl-add-column" { ALTER TABLE append_copy ADD new_column int DEFAULT 0; } step "s1-ddl-drop-column" { ALTER TABLE append_copy DROP new_column; } step "s1-ddl-rename-column" { ALTER TABLE append_copy RENAME data TO new_column; } step "s1-ddl-unique-constraint" { ALTER TABLE append_copy ADD CONSTRAINT append_copy_unique UNIQUE(id); } step "s1-table-size" { SELECT citus_total_relation_size('append_copy'); } step "s1-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM append_copy;'); } step "s1-master-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM append_copy WHERE id <= 4;'); } step "s1-master-drop-all-shards" { SELECT master_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); } step "s1-create-non-distributed-table" { CREATE TABLE append_copy(id integer, data text, int_data int); } step "s1-distribute-table" { SELECT create_distributed_table('append_copy', 'id', 'append'); } step "s1-select-count" { SELECT COUNT(*) FROM append_copy; } step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''append_copy%'''); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''append_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } # session 2 session "s2" step "s2-copy" { COPY append_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; } step "s2-copy-additional-column" { COPY append_copy FROM PROGRAM 'echo 5, f, 5, 5\\n6, g, 6, 6\\n7, h, 7, 7\\n8, i, 8, 8\\n9, j, 9, 9' WITH CSV; } step "s2-router-select" { SELECT * FROM append_copy WHERE id = 1; } step "s2-real-time-select" { SELECT * FROM append_copy ORDER BY 1, 2; } step "s2-task-tracker-select" { SET citus.task_executor_type TO "task-tracker"; SELECT * FROM append_copy AS t1 JOIN append_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; } step "s2-insert" { INSERT INTO append_copy VALUES(0, 'k', 0); } step "s2-insert-select" { INSERT INTO append_copy SELECT * FROM append_copy; } step "s2-update" { UPDATE append_copy SET data = 'l' WHERE id = 0; } step "s2-delete" { DELETE FROM append_copy WHERE id = 1; } step "s2-truncate" { TRUNCATE append_copy; } step "s2-drop" { DROP TABLE append_copy; } step "s2-ddl-create-index" { CREATE INDEX append_copy_index ON append_copy(id); } step "s2-ddl-drop-index" { DROP INDEX append_copy_index; } step "s2-ddl-create-index-concurrently" { CREATE INDEX CONCURRENTLY append_copy_index ON append_copy(id); } step "s2-ddl-add-column" { ALTER TABLE append_copy ADD new_column int DEFAULT 0; } step "s2-ddl-drop-column" { ALTER TABLE append_copy DROP new_column; } step "s2-ddl-rename-column" { ALTER TABLE append_copy RENAME data TO new_column; } step "s2-table-size" { SELECT citus_total_relation_size('append_copy'); } step "s2-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM append_copy;'); } step "s2-master-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM append_copy WHERE id <= 4;'); } step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('append_copy'::regclass, 'public', 'append_copy'); } step "s2-distribute-table" { SELECT create_distributed_table('append_copy', 'id', 'append'); } # permutations - COPY vs COPY permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count" # permutations - COPY first permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-update" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-delete" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-truncate" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-drop" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-create-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-copy" "s2-ddl-drop-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-add-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-copy-additional-column" "s2-ddl-drop-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-rename-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-table-size" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-apply-delete-command" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-drop-all-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count" # permutations - COPY second permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-delete" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-truncate" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-drop" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-ddl-create-index" "s2-copy" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-ddl-drop-index" "s2-copy" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-ddl-add-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-ddl-drop-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-ddl-rename-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-table-size" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-apply-delete-command" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-drop-all-shards" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-distribute-table" "s2-copy" "s1-commit" "s1-select-count" citus-7.0.3/src/test/regress/specs/isolation_cancellation.spec000066400000000000000000000033101317107136600246000ustar00rootroot00000000000000# Tests around cancelling statements. As we can't trigger cancel # interrupts directly, we use statement_timeout instead, which largely # behaves the same as proper cancellation. setup { CREATE TABLE cancel_table (test_id integer NOT NULL, data text); SELECT create_distributed_table('cancel_table', 'test_id', 'hash'); INSERT INTO cancel_table VALUES(1); } teardown { DROP TABLE IF EXISTS cancel_table; } session "s1" step "s1-begin" { BEGIN; } step "s1-commit" { COMMIT; } step "s1-rollback" { ROLLBACK; } step "s1-sleep10000" { SELECT pg_sleep(10000) FROM cancel_table WHERE test_id = 1; } step "s1-timeout" { SET statement_timeout = '100ms'; } step "s1-update1" { UPDATE cancel_table SET data = '' WHERE test_id = 1; } step "s1-reset" { RESET ALL; } step "s1-drop" { DROP TABLE cancel_table; } session "s2" step "s2-drop" { DROP TABLE cancel_table; } # check that statement cancel works for plain selects, drop table # afterwards to make sure sleep on workers is cancelled (thereby not # preventing drop via locks) permutation "s1-timeout" "s1-sleep10000" "s1-reset" "s1-drop" permutation "s1-timeout" "s1-sleep10000" "s1-reset" "s2-drop" # check that statement cancel works for selects in transaction permutation "s1-timeout" "s1-begin" "s1-sleep10000" "s1-rollback" "s1-reset" "s1-drop" permutation "s1-timeout" "s1-begin" "s1-sleep10000" "s1-rollback" "s1-reset" "s2-drop" # check that statement cancel works for selects in transaction, that previously wrote permutation "s1-timeout" "s1-begin" "s1-update1" "s1-sleep10000" "s1-rollback" "s1-reset" "s1-drop" permutation "s1-timeout" "s1-begin" "s1-update1" "s1-sleep10000" "s1-rollback" "s1-reset" "s2-drop" citus-7.0.3/src/test/regress/specs/isolation_cluster_management.spec000066400000000000000000000003231317107136600260220ustar00rootroot00000000000000session "s1" step "s1a" { SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57637); SELECT nodename, nodeport, isactive FROM master_add_node('localhost', 57638); } permutation "s1a" citus-7.0.3/src/test/regress/specs/isolation_concurrent_dml.spec000066400000000000000000000027611317107136600251730ustar00rootroot00000000000000setup { CREATE TABLE test_concurrent_dml (test_id integer NOT NULL, data text); SELECT master_create_distributed_table('test_concurrent_dml', 'test_id', 'hash'); SELECT master_create_worker_shards('test_concurrent_dml', 4, 2); } teardown { DROP TABLE IF EXISTS test_concurrent_dml CASCADE; } session "s1" step "s1-begin" { BEGIN; } step "s1-insert" { INSERT INTO test_concurrent_dml VALUES(1); } step "s1-multi-insert" { INSERT INTO test_concurrent_dml VALUES (1), (2); } step "s1-commit" { COMMIT; } session "s2" step "s2-begin" { BEGIN; } step "s2-update" { UPDATE test_concurrent_dml SET data = 'blarg' WHERE test_id = 1; } step "s2-multi-insert-overlap" { INSERT INTO test_concurrent_dml VALUES (1), (4); } step "s2-multi-insert" { INSERT INTO test_concurrent_dml VALUES (3), (4); } step "s2-commit" { COMMIT; } # verify that an in-progress insert blocks concurrent updates permutation "s1-begin" "s1-insert" "s2-update" "s1-commit" # but an insert without xact will not block permutation "s1-insert" "s2-update" # verify that an in-progress multi-row insert blocks concurrent updates permutation "s1-begin" "s1-multi-insert" "s2-update" "s1-commit" # two multi-row inserts that hit same shards will block permutation "s1-begin" "s1-multi-insert" "s2-multi-insert-overlap" "s1-commit" # but concurrent multi-row inserts don't block unless shards overlap permutation "s1-begin" "s2-begin" "s1-multi-insert" "s2-multi-insert" "s1-commit" "s2-commit" citus-7.0.3/src/test/regress/specs/isolation_copy_placement_vs_copy_placement.spec000066400000000000000000000040431317107136600307440ustar00rootroot00000000000000# we use 5 as the partition key value through out the test # so setting the corresponding shard here is useful setup { SET citus.shard_count TO 2; SET citus.shard_replication_factor TO 2; CREATE TABLE test_hash_table (x int, y int); SELECT create_distributed_table('test_hash_table', 'x'); SELECT get_shard_id_for_distribution_column('test_hash_table', 5) INTO selected_shard_for_test_table; } teardown { DROP TABLE test_hash_table; DROP TABLE selected_shard_for_test_table; } session "s1" # since test_hash_table has rep > 1 simple select query doesn't hit all placements # hence not all placements are cached # but with copy all placements are cached step "s1-load-cache" { COPY test_hash_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; } step "s1-repair-placement" { SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); } session "s2" step "s2-begin" { BEGIN; } step "s2-set-placement-inactive" { UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard_for_test_table) AND nodeport = 57638; } step "s2-repair-placement" { SELECT master_copy_shard_placement((SELECT * FROM selected_shard_for_test_table), 'localhost', 57637, 'localhost', 57638); } # since test_hash_table has rep > 1 simple select query doesn't hit all placements # hence not all placements are cached # but with copy all placements are cached step "s2-load-cache" { COPY test_hash_table FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; } step "s2-commit" { COMMIT; } # two concurrent shard repairs on the same shard # note that "s1-repair-placement" errors out but that is expected # given that "s2-repair-placement" succeeds and the placement is # already repaired permutation "s1-load-cache" "s2-load-cache" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-repair-placement" "s2-commit" # the same test without the load caches permutation "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-repair-placement" "s2-commit" citus-7.0.3/src/test/regress/specs/isolation_copy_placement_vs_modification.spec000066400000000000000000000101311317107136600304020ustar00rootroot00000000000000# we use 5 as the partition key value through out the test # so setting the corresponding shard here is useful setup { SET citus.shard_count TO 2; SET citus.shard_replication_factor TO 2; CREATE TABLE test_copy_placement_vs_modification (x int, y int); SELECT create_distributed_table('test_copy_placement_vs_modification', 'x'); SELECT get_shard_id_for_distribution_column('test_copy_placement_vs_modification', 5) INTO selected_shard; } teardown { DROP TABLE test_copy_placement_vs_modification; DROP TABLE selected_shard; } session "s1" step "s1-begin" { BEGIN; } # since test_copy_placement_vs_modification has rep > 1 simple select query doesn't hit all placements # hence not all placements are cached step "s1-load-cache" { TRUNCATE test_copy_placement_vs_modification; } step "s1-insert" { INSERT INTO test_copy_placement_vs_modification VALUES (5, 10); } step "s1-update" { UPDATE test_copy_placement_vs_modification SET y = 5 WHERE x = 5; } step "s1-delete" { DELETE FROM test_copy_placement_vs_modification WHERE x = 5; } step "s1-select" { SELECT count(*) FROM test_copy_placement_vs_modification WHERE x = 5; } step "s1-ddl" { CREATE INDEX test_copy_placement_vs_modification_index ON test_copy_placement_vs_modification(x); } step "s1-copy" { COPY test_copy_placement_vs_modification FROM PROGRAM 'echo "1,1\n2,2\n3,3\n4,4\n5,5"' WITH CSV; } step "s1-commit" { COMMIT; } session "s2" step "s2-begin" { BEGIN; } step "s2-set-placement-inactive" { UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid IN (SELECT * FROM selected_shard) AND nodeport = 57638; } step "s2-repair-placement" { SELECT master_copy_shard_placement((SELECT * FROM selected_shard), 'localhost', 57637, 'localhost', 57638); } step "s2-commit" { COMMIT; } step "s2-print-content" { SELECT nodeport, success, result FROM run_command_on_placements('test_copy_placement_vs_modification', 'select y from %s WHERE x = 5') WHERE shardid IN (SELECT * FROM selected_shard) ORDER BY nodeport; } step "s2-print-index-count" { SELECT nodeport, success, result FROM run_command_on_placements('test_copy_placement_vs_modification', 'select count(*) from pg_indexes WHERE schemaname || ''.'' || tablename = ''%s''') ORDER BY nodeport; } # repair a placement while concurrently performing an update/delete/insert/copy # note that at some points we use "s1-select" just after "s1-begin" given that BEGIN # may invalidate cache at certain cases permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-update" "s2-commit" "s1-commit" "s2-print-content" permutation "s1-load-cache" "s1-insert" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-delete" "s2-commit" "s1-commit" "s2-print-content" permutation "s1-load-cache" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-insert" "s2-commit" "s1-commit" "s2-print-content" permutation "s1-load-cache" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-copy" "s2-commit" "s1-commit" "s2-print-content" permutation "s1-load-cache" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-ddl" "s2-commit" "s1-commit" "s2-print-index-count" # the same tests without loading the cache at first permutation "s1-insert" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-update" "s2-commit" "s1-commit" "s2-print-content" permutation "s1-insert" "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-delete" "s2-commit" "s1-commit" "s2-print-content" permutation "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-insert" "s2-commit" "s1-commit" "s2-print-content" permutation "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-copy" "s2-commit" "s1-commit" "s2-print-content" permutation "s1-begin" "s1-select" "s2-set-placement-inactive" "s2-begin" "s2-repair-placement" "s1-ddl" "s2-commit" "s1-commit" "s2-print-index-count" citus-7.0.3/src/test/regress/specs/isolation_create_restore_point.spec000066400000000000000000000044341317107136600263730ustar00rootroot00000000000000setup { CREATE TABLE restore_table (test_id integer NOT NULL, data text); SELECT create_distributed_table('restore_table', 'test_id'); } teardown { DROP TABLE IF EXISTS restore_table, test_create_distributed_table; } session "s1" step "s1-begin" { BEGIN; } step "s1-create-distributed" { CREATE TABLE test_create_distributed_table (test_id integer NOT NULL, data text); SELECT create_distributed_table('test_create_distributed_table', 'test_id'); } step "s1-insert" { INSERT INTO restore_table VALUES (1,'hello'); } step "s1-modify-multiple" { SELECT master_modify_multiple_shards($$UPDATE restore_table SET data = 'world'$$); } step "s1-ddl" { ALTER TABLE restore_table ADD COLUMN x int; } step "s1-copy" { COPY restore_table FROM PROGRAM 'echo 1,hello' WITH CSV; } step "s1-drop" { DROP TABLE restore_table; } step "s1-add-node" { SELECT 1 FROM master_add_inactive_node('localhost', 9999); } step "s1-remove-node" { SELECT master_remove_node('localhost', 9999); } step "s1-commit" { COMMIT; } session "s2" step "s2-create-restore" { SELECT 1 FROM citus_create_restore_point('citus-test'); } # verify that citus_create_restore_point is blocked by concurrent create_distributed_table permutation "s1-begin" "s1-create-distributed" "s2-create-restore" "s1-commit" # verify that citus_create_restore_point is blocked by concurrent INSERT permutation "s1-begin" "s1-insert" "s2-create-restore" "s1-commit" # verify that citus_create_restore_point is blocked by concurrent master_modify_multiple_shards permutation "s1-begin" "s1-modify-multiple" "s2-create-restore" "s1-commit" # verify that citus_create_restore_point is blocked by concurrent DDL permutation "s1-begin" "s1-ddl" "s2-create-restore" "s1-commit" # verify that citus_create_restore_point is blocked by concurrent COPY permutation "s1-begin" "s1-copy" "s2-create-restore" "s1-commit" # verify that citus_create_restore_point is blocked by concurrent DROP TABLE permutation "s1-begin" "s1-drop" "s2-create-restore" "s1-commit" # verify that citus_create_restore_point is blocked by concurrent master_add_node permutation "s1-begin" "s1-add-node" "s2-create-restore" "s1-commit" # verify that citus_create_restore_point is blocked by concurrent master_remove_node permutation "s1-begin" "s1-remove-node" "s2-create-restore" "s1-commit" citus-7.0.3/src/test/regress/specs/isolation_create_table_vs_add_remove_node.spec000066400000000000000000000060031317107136600304620ustar00rootroot00000000000000setup { SELECT 1 FROM master_add_node('localhost', 57637); SELECT * FROM master_get_active_worker_nodes() ORDER BY node_name, node_port; } teardown { DROP TABLE IF EXISTS dist_table; SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; } session "s1" step "s1-begin" { BEGIN; } step "s1-add-node-2" { SELECT 1 FROM master_add_node('localhost', 57638); } step "s1-remove-node-2" { SELECT * FROM master_remove_node('localhost', 57638); } step "s1-abort" { ABORT; } step "s1-commit" { COMMIT; } step "s1-query-table" { SELECT * FROM dist_table; } step "s1-show-nodes" { SELECT nodename, nodeport, isactive FROM pg_dist_node ORDER BY nodename, nodeport; } step "s1-show-placements" { SELECT nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'dist_table'::regclass ORDER BY nodename, nodeport; } session "s2" step "s2-begin" { BEGIN; } step "s2-create-table-1" { SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x'); } step "s2-create-table-2" { SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 2; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x'); } step "s2-create-append-table" { SET citus.shard_replication_factor TO 1; CREATE TABLE dist_table (x int, y int); SELECT create_distributed_table('dist_table', 'x', 'append'); SELECT 1 FROM master_create_empty_shard('dist_table'); } step "s2-select" { SELECT * FROM dist_table; } step "s2-commit" { COMMIT; } # session 1 adds a node, session 2 creates a distributed table permutation "s1-begin" "s1-add-node-2" "s2-create-table-1" "s1-commit" "s1-show-placements" "s2-select" permutation "s1-begin" "s1-add-node-2" "s2-create-table-1" "s1-abort" "s1-show-placements" "s2-select" permutation "s2-begin" "s2-create-table-1" "s1-add-node-2" "s2-commit" "s1-show-placements" "s2-select" # session 1 removes a node, session 2 creates a distributed table permutation "s1-add-node-2" "s1-begin" "s1-remove-node-2" "s2-create-table-1" "s1-commit" "s1-show-placements" "s2-select" permutation "s1-add-node-2" "s1-begin" "s1-remove-node-2" "s2-create-table-1" "s1-abort" "s1-show-placements" "s2-select" permutation "s1-add-node-2" "s2-begin" "s2-create-table-1" "s1-remove-node-2" "s2-commit" "s1-show-placements" "s2-select" # session 1 removes a node, session 2 creates a distributed table with replication factor 2, should throw a sane error permutation "s1-add-node-2" "s1-begin" "s1-remove-node-2" "s2-create-table-2" "s1-commit" "s2-select" permutation "s1-add-node-2" "s2-begin" "s2-create-table-2" "s1-remove-node-2" "s2-commit" "s2-select" # session 1 removes a node, session 2 creates a shard in an append-distributed table permutation "s1-add-node-2" "s1-begin" "s1-remove-node-2" "s2-create-append-table" "s1-commit" "s2-select" permutation "s1-add-node-2" "s2-begin" "s2-create-append-table" "s1-remove-node-2" "s2-commit" "s2-select" citus-7.0.3/src/test/regress/specs/isolation_data_migration.spec000066400000000000000000000031261317107136600251330ustar00rootroot00000000000000setup { CREATE TABLE migration_table (test_id integer NOT NULL, data text); } teardown { DROP TABLE migration_table; } session "s1" step "s1-begin" { BEGIN; } step "s1-begin-serializable" { BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; SELECT 1; } step "s1-create_distributed_table" { SELECT create_distributed_table('migration_table', 'test_id'); } step "s1-commit" { COMMIT; } session "s2" step "s2-begin" { BEGIN; } step "s2-copy" { COPY migration_table FROM PROGRAM 'echo 1,hello' WITH CSV; } step "s2-insert" { INSERT INTO migration_table VALUES (1, 'hello'); } step "s2-commit" { COMMIT; } step "s2-select" { SELECT * FROM migration_table ORDER BY test_id; } # verify that local COPY is picked up by create_distributed_table once it commits permutation "s2-begin" "s2-copy" "s1-create_distributed_table" "s2-commit" "s2-select" # verify that COPY is distributed once create_distributed_table commits permutation "s1-begin" "s1-create_distributed_table" "s2-copy" "s1-commit" "s2-select" # verify that local INSERT is picked up by create_distributed_table once it commits permutation "s2-begin" "s2-insert" "s1-create_distributed_table" "s2-commit" "s2-select" # verify that INSERT is distributed once create_distributed_table commits permutation "s1-begin" "s1-create_distributed_table" "s2-insert" "s1-commit" "s2-select" # verify that changes are picked up even in serializable mode permutation "s1-begin-serializable" "s2-copy" "s1-create_distributed_table" "s1-commit" "s2-select" permutation "s1-begin-serializable" "s2-insert" "s1-create_distributed_table" "s1-commit" "s2-select" citus-7.0.3/src/test/regress/specs/isolation_ddl_vs_all.spec000066400000000000000000000174441317107136600242640ustar00rootroot00000000000000# # How we organize this isolation test spec, is explained at README.md file in this directory. # # create range distributed table to test behavior of DDL in concurrent operations setup { SELECT citus.replace_isolation_tester_func(); SELECT citus.refresh_isolation_tester_prepared_statement(); SET citus.shard_replication_factor TO 1; CREATE TABLE ddl_hash(id integer, data text); SELECT create_distributed_table('ddl_hash', 'id'); } # drop distributed table teardown { DROP TABLE IF EXISTS ddl_hash CASCADE; SELECT citus.restore_isolation_tester_func(); } # session 1 session "s1" step "s1-initialize" { COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; } step "s1-begin" { BEGIN; } step "s1-ddl-create-index" { CREATE INDEX ddl_hash_index ON ddl_hash(id); } step "s1-ddl-drop-index" { DROP INDEX ddl_hash_index; } step "s1-ddl-add-column" { ALTER TABLE ddl_hash ADD new_column_1 int DEFAULT 0; } step "s1-ddl-drop-column" { ALTER TABLE ddl_hash DROP new_column_2; } step "s1-ddl-rename-column" { ALTER TABLE ddl_hash RENAME data TO new_column; } step "s1-table-size" { SELECT citus_total_relation_size('ddl_hash'); } step "s1-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM ddl_hash;'); } step "s1-drop" { DROP TABLE ddl_hash; } step "s1-create-non-distributed-table" { CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; } step "s1-distribute-table" { SELECT create_distributed_table('ddl_hash', 'id'); } step "s1-select-count" { SELECT COUNT(*) FROM ddl_hash; } step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''ddl_hash%'''); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } # session 2 session "s2" step "s2-begin" { BEGIN; } step "s2-ddl-create-index" { CREATE INDEX ddl_hash_index ON ddl_hash(id); } step "s2-ddl-drop-index" { DROP INDEX ddl_hash_index; } step "s2-ddl-create-index-concurrently" { CREATE INDEX CONCURRENTLY ddl_hash_index ON ddl_hash(id); } step "s2-ddl-add-column" { ALTER TABLE ddl_hash ADD new_column_2 int DEFAULT 0; } step "s2-ddl-drop-column" { ALTER TABLE ddl_hash DROP new_column_1; } step "s2-ddl-rename-column" { ALTER TABLE ddl_hash RENAME data TO new_column; } step "s2-table-size" { SELECT citus_total_relation_size('ddl_hash'); } step "s2-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM ddl_hash;'); } step "s2-create-non-distributed-table" { CREATE TABLE ddl_hash(id integer, data text); COPY ddl_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; } step "s2-distribute-table" { SELECT create_distributed_table('ddl_hash', 'id'); } step "s2-select" { SELECT * FROM ddl_hash ORDER BY 1, 2; } step "s2-commit" { COMMIT; } # permutations - DDL vs DDL permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-ddl-create-index" "s2-ddl-create-index-concurrently" "s1-commit" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-ddl-add-column" "s1-commit" "s2-commit" "s1-show-indexes" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-ddl-rename-column" "s1-commit" "s2-commit" "s1-show-indexes" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-add-column" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-show-columns" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-ddl-add-column" "s2-ddl-create-index-concurrently" "s1-commit" "s1-show-columns" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-add-column" "s2-ddl-add-column" "s1-commit" "s2-commit" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-add-column" "s2-ddl-rename-column" "s1-commit" "s2-commit" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-show-columns" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-ddl-rename-column" "s2-ddl-create-index-concurrently" "s1-commit" "s1-show-columns" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-ddl-add-column" "s1-commit" "s2-commit" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-ddl-rename-column" "s1-commit" "s2-commit" "s1-show-columns" # permutations - DDL first permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-table-size" "s1-commit" "s2-commit" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-master-modify-multiple-shards" "s1-commit" "s2-commit" "s1-show-indexes" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-distribute-table" "s1-commit" "s2-commit" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-add-column" "s2-table-size" "s1-commit" "s2-commit" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-add-column" "s2-master-modify-multiple-shards" "s1-commit" "s2-commit" "s1-show-columns" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-add-column" "s2-distribute-table" "s1-commit" "s2-commit" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-table-size" "s1-commit" "s2-commit" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-master-modify-multiple-shards" "s1-commit" "s2-commit" "s1-show-columns" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-distribute-table" "s1-commit" "s2-commit" "s1-show-columns" # permutations - DDL second permutation "s1-initialize" "s1-begin" "s2-begin" "s1-table-size" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-master-modify-multiple-shards" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-show-indexes" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-distribute-table" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-table-size" "s2-ddl-create-index-concurrently" "s1-commit" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards" "s2-ddl-create-index-concurrently" "s1-commit" "s1-show-indexes" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-distribute-table" "s2-ddl-create-index-concurrently" "s1-commit" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-table-size" "s2-ddl-add-column" "s1-commit" "s2-commit" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-master-modify-multiple-shards" "s2-ddl-add-column" "s1-commit" "s2-commit" "s1-show-columns" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-distribute-table" "s2-ddl-add-column" "s1-commit" "s2-commit" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-table-size" "s2-ddl-rename-column" "s1-commit" "s2-commit" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-master-modify-multiple-shards" "s2-ddl-rename-column" "s1-commit" "s2-commit" "s1-show-columns" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-distribute-table" "s2-ddl-rename-column" "s1-commit" "s2-commit" "s1-show-columns" citus-7.0.3/src/test/regress/specs/isolation_delete_vs_all.spec000066400000000000000000000146041317107136600247560ustar00rootroot00000000000000# # How we organize this isolation test spec, is explained at README.md file in this directory. # # create range distributed table to test behavior of DELETE in concurrent operations setup { SELECT citus.replace_isolation_tester_func(); SELECT citus.refresh_isolation_tester_prepared_statement(); SET citus.shard_replication_factor TO 1; CREATE TABLE delete_hash(id integer, data text); SELECT create_distributed_table('delete_hash', 'id'); } # drop distributed table teardown { DROP TABLE IF EXISTS delete_hash CASCADE; SELECT citus.restore_isolation_tester_func(); } # session 1 session "s1" step "s1-initialize" { COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; } step "s1-begin" { BEGIN; } step "s1-delete" { DELETE FROM delete_hash WHERE id = 4; } step "s1-truncate" { TRUNCATE delete_hash; } step "s1-drop" { DROP TABLE delete_hash; } step "s1-ddl-create-index" { CREATE INDEX delete_hash_index ON delete_hash(id); } step "s1-ddl-drop-index" { DROP INDEX delete_hash_index; } step "s1-ddl-add-column" { ALTER TABLE delete_hash ADD new_column int DEFAULT 0; } step "s1-ddl-drop-column" { ALTER TABLE delete_hash DROP new_column; } step "s1-ddl-rename-column" { ALTER TABLE delete_hash RENAME data TO new_column; } step "s1-table-size" { SELECT citus_total_relation_size('delete_hash'); } step "s1-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM delete_hash;'); } step "s1-create-non-distributed-table" { CREATE TABLE delete_hash(id integer, data text); COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; } step "s1-distribute-table" { SELECT create_distributed_table('delete_hash', 'id'); } step "s1-select-count" { SELECT COUNT(*) FROM delete_hash; } step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''delete_hash%'''); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''delete_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } # session 2 session "s2" step "s2-begin" { BEGIN; } step "s2-delete" { DELETE FROM delete_hash WHERE id = 4; } step "s2-truncate" { TRUNCATE delete_hash; } step "s2-drop" { DROP TABLE delete_hash; } step "s2-ddl-create-index" { CREATE INDEX delete_hash_index ON delete_hash(id); } step "s2-ddl-drop-index" { DROP INDEX delete_hash_index; } step "s2-ddl-create-index-concurrently" { CREATE INDEX CONCURRENTLY delete_hash_index ON delete_hash(id); } step "s2-ddl-add-column" { ALTER TABLE delete_hash ADD new_column int DEFAULT 0; } step "s2-ddl-drop-column" { ALTER TABLE delete_hash DROP new_column; } step "s2-ddl-rename-column" { ALTER TABLE delete_hash RENAME data TO new_column; } step "s2-table-size" { SELECT citus_total_relation_size('delete_hash'); } step "s2-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM delete_hash;'); } step "s2-create-non-distributed-table" { CREATE TABLE delete_hash(id integer, data text); COPY delete_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; } step "s2-distribute-table" { SELECT create_distributed_table('delete_hash', 'id'); } step "s2-select" { SELECT * FROM delete_hash ORDER BY 1, 2; } step "s2-commit" { COMMIT; } # permutations - DELETE vs DELETE permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" # permutations - DELETE first permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s2-begin" "s1-delete" "s2-ddl-drop-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-delete" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-ddl-add-column" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s2-begin" "s1-delete" "s2-ddl-drop-column" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-ddl-rename-column" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-table-size" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-master-modify-multiple-shards" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-distribute-table" "s1-commit" "s2-commit" "s1-select-count" # permutations - DELETE second permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s2-begin" "s1-ddl-drop-index" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-add-column" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s2-begin" "s1-ddl-drop-column" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-table-size" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-master-modify-multiple-shards" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-distribute-table" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" citus-7.0.3/src/test/regress/specs/isolation_distributed_deadlock_detection.spec000066400000000000000000000253221317107136600303610ustar00rootroot00000000000000setup { SELECT citus.replace_isolation_tester_func(); SELECT citus.refresh_isolation_tester_prepared_statement(); CREATE TABLE deadlock_detection_reference (user_id int UNIQUE, some_val int); SELECT create_reference_table('deadlock_detection_reference'); CREATE TABLE deadlock_detection_test (user_id int UNIQUE, some_val int); INSERT INTO deadlock_detection_test SELECT i, i FROM generate_series(1,7) i; SELECT create_distributed_table('deadlock_detection_test', 'user_id'); CREATE TABLE local_deadlock_table (user_id int UNIQUE, some_val int); CREATE TABLE deadlock_detection_test_rep_2 (user_id int UNIQUE, some_val int); SET citus.shard_replication_factor = 2; SELECT create_distributed_table('deadlock_detection_test_rep_2', 'user_id'); INSERT INTO deadlock_detection_test_rep_2 VALUES (1,1); INSERT INTO deadlock_detection_test_rep_2 VALUES (2,2); } teardown { DROP TABLE deadlock_detection_test; DROP TABLE local_deadlock_table; DROP TABLE deadlock_detection_test_rep_2; DROP TABLE deadlock_detection_reference; SELECT citus.restore_isolation_tester_func(); SET citus.shard_replication_factor = 1; } session "s1" step "s1-begin" { BEGIN; } step "s1-update-1" { UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 1; } step "s1-update-2" { UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 2; } step "s1-update-3" { UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 3; } step "s1-update-4" { UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 4; } step "s1-update-5" { UPDATE deadlock_detection_test SET some_val = 1 WHERE user_id = 5; } step "s1-insert-dist-10" { INSERT INTO deadlock_detection_test VALUES (10, 10); } step "s1-insert-local-10" { INSERT INTO local_deadlock_table VALUES (10, 10); } step "s1-set-2pc" { set citus.multi_shard_commit_protocol TO '2pc'; } step "s1-update-1-rep-2" { UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 1; } step "s1-update-2-rep-2" { UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 2; } step "s1-insert-ref-10" { INSERT INTO deadlock_detection_reference VALUES (10, 10); } step "s1-insert-ref-11" { INSERT INTO deadlock_detection_reference VALUES (11, 11); } step "s1-finish" { COMMIT; } session "s2" step "s2-begin" { BEGIN; } step "s2-update-1" { UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 1; } step "s2-update-2" { UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 2; } step "s2-update-3" { UPDATE deadlock_detection_test SET some_val = 2 WHERE user_id = 3; } step "s2-upsert-select-all" { INSERT INTO deadlock_detection_test SELECT * FROM deadlock_detection_test ON CONFLICT(user_id) DO UPDATE SET some_val = deadlock_detection_test.some_val + 5 RETURNING *; } step "s2-ddl" { ALTER TABLE deadlock_detection_test ADD COLUMN test_col INT; } step "s2-insert-dist-10" { INSERT INTO deadlock_detection_test VALUES (10, 10); } step "s2-insert-local-10" { INSERT INTO local_deadlock_table VALUES (10, 10); } step "s2-set-2pc" { set citus.multi_shard_commit_protocol TO '2pc'; } step "s2-update-1-rep-2" { UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 1; } step "s2-update-2-rep-2" { UPDATE deadlock_detection_test_rep_2 SET some_val = 1 WHERE user_id = 2; } step "s2-insert-ref-10" { INSERT INTO deadlock_detection_reference VALUES (10, 10); } step "s2-insert-ref-11" { INSERT INTO deadlock_detection_reference VALUES (11, 11); } step "s2-finish" { COMMIT; } session "s3" step "s3-begin" { BEGIN; } step "s3-update-1" { UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 1; } step "s3-update-2" { UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 2; } step "s3-update-3" { UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 3; } step "s3-update-4" { UPDATE deadlock_detection_test SET some_val = 3 WHERE user_id = 4; } step "s3-finish" { COMMIT; } session "s4" step "s4-begin" { BEGIN; } step "s4-update-1" { UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 1; } step "s4-update-2" { UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 2; } step "s4-update-3" { UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 3; } step "s4-update-4" { UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 4; } step "s4-update-5" { UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 5; } step "s4-update-6" { UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 6; } step "s4-update-7" { UPDATE deadlock_detection_test SET some_val = 4 WHERE user_id = 7; } step "s4-finish" { COMMIT; } session "s5" step "s5-begin" { BEGIN; } step "s5-update-1" { UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 1; } step "s5-update-2" { UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 2; } step "s5-update-3" { UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 3; } step "s5-update-4" { UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 4; } step "s5-update-5" { UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 5; } step "s5-update-6" { UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 6; } step "s5-update-7" { UPDATE deadlock_detection_test SET some_val = 5 WHERE user_id = 7; } step "s5-finish" { COMMIT; } session "s6" step "s6-begin" { BEGIN; } step "s6-update-1" { UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 1; } step "s6-update-2" { UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 2; } step "s6-update-3" { UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 3; } step "s6-update-4" { UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 4; } step "s6-update-5" { UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 5; } step "s6-update-6" { UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 6; } step "s6-update-7" { UPDATE deadlock_detection_test SET some_val = 6 WHERE user_id = 7; } step "s6-finish" { COMMIT; } # we disable the deamon during the regression tests in order to get consistent results # thus we manually issue the deadlock detection session "deadlock-checker" # we issue the checker not only when there are deadlocks to ensure that we never cancel # backend inappropriately step "deadlock-checker-call" { SELECT check_distributed_deadlocks(); } # simplest case, loop with two nodes permutation "s1-begin" "s2-begin" "s1-update-1" "s2-update-2" "s2-update-1" "deadlock-checker-call" "s1-update-2" "deadlock-checker-call" "s1-finish" "s2-finish" # simplest case with replication factor 2 permutation "s1-begin" "s2-begin" "s1-update-1-rep-2" "s2-update-2-rep-2" "s2-update-1-rep-2" "deadlock-checker-call" "s1-update-2-rep-2" "deadlock-checker-call" "s1-finish" "s2-finish" # simplest case with 2pc enabled permutation "s1-begin" "s2-begin" "s1-set-2pc" "s2-set-2pc" "s1-update-1" "s2-update-2" "s2-update-1" "deadlock-checker-call" "s1-update-2" "deadlock-checker-call" "s1-finish" "s2-finish" # simplest case with multi-shard query is cancelled permutation "s1-begin" "s2-begin" "s1-update-1" "s2-update-2" "s1-update-2" "deadlock-checker-call" "s2-upsert-select-all" "deadlock-checker-call" "s1-finish" "s2-finish" # simplest case with DDL is cancelled permutation "s1-begin" "s2-begin" "s1-update-1" "s2-update-2" "s1-update-2" "deadlock-checker-call" "s2-ddl" "deadlock-checker-call" "s1-finish" "s2-finish" # daedlock with local table permutation "s1-begin" "s2-begin" "s1-insert-dist-10" "s2-insert-local-10" "s2-insert-dist-10" "s1-insert-local-10" "deadlock-checker-call" "s1-finish" "s2-finish" # daedlock with reference tables only permutation "s1-begin" "s2-begin" "s2-insert-ref-10" "s1-insert-ref-11" "s2-insert-ref-11" "s1-insert-ref-10" "deadlock-checker-call" "s1-finish" "s2-finish" # deadlock with referecen + distributed tables permutation "s1-begin" "s2-begin" "s2-insert-ref-10" "s1-update-1" "deadlock-checker-call" "s2-update-1" "s1-insert-ref-10" "deadlock-checker-call" "s1-finish" "s2-finish" # slightly more complex case, loop with three nodes permutation "s1-begin" "s2-begin" "s3-begin" "s1-update-1" "s2-update-2" "s3-update-3" "deadlock-checker-call" "s1-update-2" "s2-update-3" "s3-update-1" "deadlock-checker-call" "s3-finish" "s2-finish" "s1-finish" # similar to the above (i.e., 3 nodes), but the cycle starts from the second node permutation "s1-begin" "s2-begin" "s3-begin" "s2-update-1" "s1-update-1" "s2-update-2" "s3-update-3" "s3-update-2" "deadlock-checker-call" "s2-update-3" "deadlock-checker-call" "s3-finish" "s2-finish" "s1-finish" # not connected graph permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s1-update-1" "s2-update-2" "s3-update-3" "s3-update-2" "deadlock-checker-call" "s4-update-4" "s2-update-3" "deadlock-checker-call" "s3-finish" "s2-finish" "s1-finish" "s4-finish" # still a not connected graph, but each smaller graph contains dependencies, one of which is a distributed deadlock permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s4-update-1" "s1-update-1" "deadlock-checker-call" "s2-update-2" "s3-update-3" "s2-update-3" "s3-update-2" "deadlock-checker-call" "s3-finish" "s2-finish" "s4-finish" "s1-finish" # multiple deadlocks on a not connected graph permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s1-update-1" "s4-update-4" "s2-update-2" "s3-update-3" "s3-update-2" "s4-update-1" "s1-update-4" "deadlock-checker-call" "s1-finish" "s4-finish" "s2-update-3" "deadlock-checker-call" "s2-finish" "s3-finish" # a larger graph where the first node is in the distributed deadlock permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s5-begin" "s6-begin" "s1-update-1" "s5-update-5" "s3-update-2" "s2-update-3" "s4-update-4" "s3-update-4" "deadlock-checker-call" "s6-update-6" "s4-update-6" "s1-update-5" "s5-update-1" "deadlock-checker-call" "s1-finish" "s5-finish" "s6-finish" "s4-finish" "s3-finish" "s2-finish" # a larger graph where the deadlock starts from a middle node permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s5-begin" "s6-begin" "s6-update-6" "s5-update-5" "s5-update-6" "s4-update-4" "s1-update-4" "s4-update-5" "deadlock-checker-call" "s2-update-3" "s3-update-2" "s2-update-2" "s3-update-3" "deadlock-checker-call" "s6-finish" "s5-finish" "s4-finish" "s1-finish" "s3-finish" "s2-finish" # a larger graph where the deadlock starts from the last node permutation "s1-begin" "s2-begin" "s3-begin" "s4-begin" "s5-begin" "s6-begin" "s5-update-5" "s3-update-2" "s2-update-2" "s4-update-4" "s3-update-4" "s4-update-5" "s1-update-4" "deadlock-checker-call" "s6-update-6" "s5-update-6" "s6-update-5" "deadlock-checker-call" "s5-finish" "s6-finish" "s4-finish" "s3-finish" "s1-finish" "s2-finish" citus-7.0.3/src/test/regress/specs/isolation_distributed_transaction_id.spec000066400000000000000000000044461317107136600275620ustar00rootroot00000000000000# Tests around distributed transaction id generation setup { SET TIME ZONE 'PST8PDT'; } teardown { SET TIME ZONE DEFAULT; } session "s1" step "s1-begin" { BEGIN; } step "s1-assign-transaction-id" { SELECT assign_distributed_transaction_id(1, 1, '2015-01-01 00:00:00+0'); } step "s1-commit" { COMMIT; } step "s1-create-table" { -- some tests also use distributed table CREATE TABLE distributed_transaction_id_table(some_value int, other_value int); SET citus.shard_count TO 4; SELECT create_distributed_table('distributed_transaction_id_table', 'some_value'); } step "s1-insert" { INSERT INTO distributed_transaction_id_table VALUES (1, 1); } step "s1-get-current-transaction-id" { SELECT row(initiator_node_identifier, transaction_number) FROM get_current_transaction_id(); } session "s2" step "s2-begin" { BEGIN; } step "s2-assign-transaction-id" { SELECT assign_distributed_transaction_id(2, 2, '2015-01-02 00:00:00+0'); } step "s2-commit" { COMMIT; } # print only the necessary parts to prevent concurrent runs to print different values step "s2-get-first-worker-active-transactions" { SELECT * FROM run_command_on_workers('SELECT row(initiator_node_identifier, transaction_number) FROM get_all_active_transactions(); ') WHERE nodeport = 57637; ; } session "s3" step "s3-begin" { BEGIN; } step "s3-assign-transaction-id" { SELECT assign_distributed_transaction_id(3, 3, '2015-01-03 00:00:00+0'); } step "s3-commit" { COMMIT; } session "s4" step "s4-get-all-transactions" { SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_all_active_transactions() ORDER BY 1,2,3; } # show that we could get all distributed transaction ids from seperate sessions permutation "s1-begin" "s1-assign-transaction-id" "s4-get-all-transactions" "s2-begin" "s2-assign-transaction-id" "s4-get-all-transactions" "s3-begin" "s3-assign-transaction-id" "s4-get-all-transactions" "s1-commit" "s4-get-all-transactions" "s2-commit" "s4-get-all-transactions" "s3-commit" "s4-get-all-transactions" # now show that distributed transaction id on the coordinator # is the same with the one on the worker permutation "s1-create-table" "s1-begin" "s1-insert" "s1-get-current-transaction-id" "s2-get-first-worker-active-transactions" citus-7.0.3/src/test/regress/specs/isolation_dml_vs_repair.spec000066400000000000000000000063071317107136600250030ustar00rootroot00000000000000setup { CREATE TABLE test_dml_vs_repair (test_id integer NOT NULL, data int); SELECT master_create_distributed_table('test_dml_vs_repair', 'test_id', 'hash'); SELECT master_create_worker_shards('test_dml_vs_repair', 1, 2); } teardown { DROP TABLE IF EXISTS test_dml_vs_repair CASCADE; } session "s1" setup { DEALLOCATE all; TRUNCATE test_dml_vs_repair; PREPARE insertone AS INSERT INTO test_dml_vs_repair VALUES(1, 1); PREPARE insertall AS INSERT INTO test_dml_vs_repair SELECT test_id, data+1 FROM test_dml_vs_repair; } step "s1-begin" { BEGIN; } step "s1-insertone" { INSERT INTO test_dml_vs_repair VALUES(1, 1); } step "s1-prepared-insertone" { EXECUTE insertone; } step "s1-insertall" { INSERT INTO test_dml_vs_repair SELECT test_id, data+1 FROM test_dml_vs_repair; } step "s1-prepared-insertall" { EXECUTE insertall; } step "s1-display" { SELECT * FROM test_dml_vs_repair WHERE test_id = 1 ORDER BY test_id; } step "s1-commit" { COMMIT; } session "s2" step "s2-begin" { BEGIN; } step "s2-invalidate-57637" { UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; } step "s2-revalidate-57637" { UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57637; } step "s2-invalidate-57638" { UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; } step "s2-revalidate-57638" { UPDATE pg_dist_shard_placement SET shardstate = '1' WHERE shardid = (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass) AND nodeport = 57638; } step "s2-repair" { SELECT master_copy_shard_placement((SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_dml_vs_repair'::regclass), 'localhost', 57638, 'localhost', 57637); } step "s2-commit" { COMMIT; } # verify that repair is blocked by ongoing modifying simple transaction permutation "s2-invalidate-57637" "s1-begin" "s1-insertone" "s2-repair" "s1-commit" # verify that repair is blocked by ongoing modifying insert...select transaction permutation "s1-insertone" "s2-invalidate-57637" "s1-begin" "s1-insertall" "s2-repair" "s1-commit" # verify that modifications wait for shard repair permutation "s2-invalidate-57637" "s2-begin" "s2-repair" "s1-insertone" "s2-commit" "s2-invalidate-57638" "s1-display" "s2-invalidate-57637" "s2-revalidate-57638" "s1-display" # verify that prepared plain modifications wait for shard repair permutation "s2-invalidate-57637" "s1-prepared-insertone" "s2-begin" "s2-repair" "s1-prepared-insertone" "s2-commit" "s2-invalidate-57638" "s1-display" "s2-invalidate-57637" "s2-revalidate-57638" "s1-display" # verify that prepared INSERT ... SELECT waits for shard repair permutation "s2-invalidate-57637" "s1-insertone" "s1-prepared-insertall" "s2-begin" "s2-repair" "s1-prepared-insertall" "s2-commit" "s2-invalidate-57638" "s1-display" "s2-invalidate-57637" "s2-revalidate-57638" "s1-display" citus-7.0.3/src/test/regress/specs/isolation_drop_shards.spec000066400000000000000000000036131317107136600244620ustar00rootroot00000000000000setup { CREATE TABLE append_table (test_id integer NOT NULL, data text); SELECT create_distributed_table('append_table', 'test_id', 'append'); SELECT 1 FROM ( SELECT min(master_create_empty_shard('append_table')) FROM generate_series(1,16) ) a; } teardown { DROP TABLE append_table; } session "s1" step "s1-begin" { BEGIN; } step "s1-truncate" { TRUNCATE append_table; } step "s1-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM append_table'); } step "s1-drop-all-shards" { SELECT master_drop_all_shards('append_table', 'public', 'append_table'); } step "s1-commit" { COMMIT; } session "s2" step "s2-truncate" { TRUNCATE append_table; } step "s2-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM append_table'); } step "s2-drop-all-shards" { SELECT master_drop_all_shards('append_table', 'public', 'append_table'); } step "s2-select" { SELECT * FROM append_table; } permutation "s1-begin" "s1-drop-all-shards" "s2-truncate" "s1-commit" permutation "s1-begin" "s1-drop-all-shards" "s2-apply-delete-command" "s1-commit" permutation "s1-begin" "s1-drop-all-shards" "s2-drop-all-shards" "s1-commit" permutation "s1-begin" "s1-drop-all-shards" "s2-select" "s1-commit" # We can't verify master_apply_delete_command + SELECT since it blocks on the # the workers, but this is not visible on the master, meaning the isolation # test cannot proceed. permutation "s1-begin" "s1-apply-delete-command" "s2-truncate" "s1-commit" permutation "s1-begin" "s1-apply-delete-command" "s2-apply-delete-command" "s1-commit" permutation "s1-begin" "s1-apply-delete-command" "s2-drop-all-shards" "s1-commit" permutation "s1-begin" "s1-truncate" "s2-truncate" "s1-commit" permutation "s1-begin" "s1-truncate" "s2-apply-delete-command" "s1-commit" permutation "s1-begin" "s1-truncate" "s2-drop-all-shards" "s1-commit" permutation "s1-begin" "s1-truncate" "s2-select" "s1-commit" citus-7.0.3/src/test/regress/specs/isolation_drop_vs_all.spec000066400000000000000000000131521317107136600244550ustar00rootroot00000000000000# # How we organize this isolation test spec, is explained at README.md file in this directory. # # create range distributed table to test behavior of DROP in concurrent operations setup { SELECT citus.replace_isolation_tester_func(); SELECT citus.refresh_isolation_tester_prepared_statement(); SET citus.shard_replication_factor TO 1; CREATE TABLE drop_hash(id integer, data text); SELECT create_distributed_table('drop_hash', 'id'); } # drop distributed table teardown { DROP TABLE IF EXISTS drop_hash CASCADE; SELECT citus.restore_isolation_tester_func(); } # session 1 session "s1" step "s1-initialize" { COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; } step "s1-begin" { BEGIN; } step "s1-drop" { DROP TABLE drop_hash; } step "s1-ddl-create-index" { CREATE INDEX drop_hash_index ON drop_hash(id); } step "s1-ddl-drop-index" { DROP INDEX drop_hash_index; } step "s1-ddl-add-column" { ALTER TABLE drop_hash ADD new_column int DEFAULT 0; } step "s1-ddl-drop-column" { ALTER TABLE drop_hash DROP new_column; } step "s1-ddl-rename-column" { ALTER TABLE drop_hash RENAME data TO new_column; } step "s1-table-size" { SELECT citus_total_relation_size('drop_hash'); } step "s1-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DROP FROM drop_hash;'); } step "s1-create-non-distributed-table" { CREATE TABLE drop_hash(id integer, data text); COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; } step "s1-distribute-table" { SELECT create_distributed_table('drop_hash', 'id'); } step "s1-select-count" { SELECT COUNT(*) FROM drop_hash; } step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''drop_hash%'''); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''ddl_hash%'' AND column_name = ''drop_hash'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } # session 2 session "s2" step "s2-begin" { BEGIN; } step "s2-drop" { DROP TABLE drop_hash; } step "s2-ddl-create-index" { CREATE INDEX drop_hash_index ON drop_hash(id); } step "s2-ddl-drop-index" { DROP INDEX drop_hash_index; } step "s2-ddl-create-index-concurrently" { CREATE INDEX CONCURRENTLY drop_hash_index ON drop_hash(id); } step "s2-ddl-add-column" { ALTER TABLE drop_hash ADD new_column int DEFAULT 0; } step "s2-ddl-drop-column" { ALTER TABLE drop_hash DROP new_column; } step "s2-ddl-rename-column" { ALTER TABLE drop_hash RENAME data TO new_column; } step "s2-table-size" { SELECT citus_total_relation_size('drop_hash'); } step "s2-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DROP FROM drop_hash;'); } step "s2-create-non-distributed-table" { CREATE TABLE drop_hash(id integer, data text); COPY drop_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; } step "s2-distribute-table" { SELECT create_distributed_table('drop_hash', 'id'); } step "s2-select" { SELECT * FROM drop_hash ORDER BY 1, 2; } step "s2-commit" { COMMIT; } # permutations - DROP vs DROP permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" # permutations - DROP first permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s2-begin" "s1-drop" "s2-ddl-drop-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-drop" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-ddl-add-column" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s2-begin" "s1-drop" "s2-ddl-drop-column" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-ddl-rename-column" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-table-size" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-master-modify-multiple-shards" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-distribute-table" "s1-commit" "s2-commit" "s1-select-count" # permutations - DROP second permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s2-begin" "s1-ddl-drop-index" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-add-column" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s2-begin" "s1-ddl-drop-column" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-table-size" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-master-modify-multiple-shards" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-distribute-table" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" citus-7.0.3/src/test/regress/specs/isolation_dump_global_wait_edges.spec000066400000000000000000000035541317107136600266360ustar00rootroot00000000000000setup { CREATE TABLE distributed_table (x int primary key, y int); SELECT create_distributed_table('distributed_table', 'x'); INSERT INTO distributed_table VALUES (1,0); CREATE OR REPLACE FUNCTION get_adjacency_list_wait_graph(OUT transactionNumber int, OUT waitingTransactionNumbers cstring) RETURNS SETOF RECORD LANGUAGE C STRICT AS 'citus', $$get_adjacency_list_wait_graph$$; COMMENT ON FUNCTION get_adjacency_list_wait_graph(OUT transactionNumber int, OUT waitingTransactionNumbers cstring) IS 'returns flattened wait graph'; } teardown { DROP TABLE distributed_table; } session "s1" step "s1-begin" { BEGIN; } step "s1-update" { UPDATE distributed_table SET y = 1 WHERE x = 1; } step "s1-abort" { ABORT; } session "s2" step "s2-begin" { BEGIN; } step "s2-update" { UPDATE distributed_table SET y = 2 WHERE x = 1; } step "s2-abort" { ABORT; } session "s3" step "s3-begin" { BEGIN; } step "s3-update" { UPDATE distributed_table SET y = 3 WHERE x = 1; } step "s3-abort" { ABORT; } session "detector" step "detector-dump-wait-edges" { SELECT waiting_transaction_num, blocking_transaction_num, blocking_transaction_waiting FROM dump_global_wait_edges() ORDER BY waiting_transaction_num, blocking_transaction_num, blocking_transaction_waiting; SELECT * FROM get_adjacency_list_wait_graph() ORDER BY 1; } # Distributed transaction blocked by another distributed transaction permutation "s1-begin" "s2-begin" "s1-update" "s2-update" "detector-dump-wait-edges" "s1-abort" "s2-abort" # Distributed transaction blocked by another distributed transaction blocked by another distributed transaction permutation "s1-begin" "s2-begin" "s3-begin" "s1-update" "s2-update" "s3-update" "detector-dump-wait-edges" "s1-abort" "s2-abort" "s3-abort" citus-7.0.3/src/test/regress/specs/isolation_dump_local_wait_edges.spec000066400000000000000000000034401317107136600264620ustar00rootroot00000000000000setup { CREATE TABLE local_table (x int primary key, y int); INSERT INTO local_table VALUES (1,0); } teardown { DROP TABLE local_table; } session "dist11" step "dist11-begin" { BEGIN; SELECT assign_distributed_transaction_id(11, 1, '2017-01-01 00:00:00+0'); } step "dist11-update" { UPDATE local_table SET y = 1 WHERE x = 1; } step "dist11-abort" { ABORT; } session "local" step "local-begin" { BEGIN; } step "local-update" { UPDATE local_table SET y = 2 WHERE x = 1; } step "local-abort" { ABORT; } session "dist13" step "dist13-begin" { BEGIN; SELECT assign_distributed_transaction_id(13, 1, '2017-01-01 00:00:00+0'); } step "dist13-update" { UPDATE local_table SET y = 3 WHERE x = 1; } step "dist13-abort" { ABORT; } session "detector" step "detector-dump-wait-edges" { SELECT waiting_node_id, waiting_transaction_num, blocking_node_id, blocking_transaction_num, blocking_transaction_waiting FROM dump_local_wait_edges() ORDER BY waiting_node_id, blocking_transaction_num, blocking_transaction_waiting; } # Distributed transaction blocked by another distributed transaction permutation "dist11-begin" "dist13-begin" "dist11-update" "dist13-update" "detector-dump-wait-edges" "dist11-abort" "dist13-abort" # Distributed transaction blocked by a regular transaction permutation "local-begin" "dist13-begin" "local-update" "dist13-update" "detector-dump-wait-edges" "local-abort" "dist13-abort" # Distributed transaction blocked by a regular transaction blocked by a distributed transaction permutation "dist11-begin" "local-begin" "dist13-begin" "dist11-update" "local-update" "dist13-update" "detector-dump-wait-edges" "dist11-abort" "local-abort" "dist13-abort" citus-7.0.3/src/test/regress/specs/isolation_hash_copy_vs_all.spec000066400000000000000000000214451317107136600254720ustar00rootroot00000000000000# # How we organize this isolation test spec, is explained at README.md file in this directory. # # create append distributed table to test behavior of COPY in concurrent operations setup { SET citus.shard_replication_factor TO 1; CREATE TABLE hash_copy(id integer, data text, int_data int); SELECT create_distributed_table('hash_copy', 'id'); } # drop distributed table teardown { DROP TABLE IF EXISTS hash_copy CASCADE; } # session 1 session "s1" step "s1-initialize" { COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; } step "s1-begin" { BEGIN; } step "s1-copy" { COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; } step "s1-copy-additional-column" { COPY hash_copy FROM PROGRAM 'echo 5, f, 5, 5\\n6, g, 6, 6\\n7, h, 7, 7\\n8, i, 8, 8\\n9, j, 9, 9' WITH CSV; } step "s1-router-select" { SELECT * FROM hash_copy WHERE id = 1; } step "s1-real-time-select" { SELECT * FROM hash_copy ORDER BY 1, 2; } step "s1-task-tracker-select" { SET citus.task_executor_type TO "task-tracker"; SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; } step "s1-insert" { INSERT INTO hash_copy VALUES(0, 'k', 0); } step "s1-insert-select" { INSERT INTO hash_copy SELECT * FROM hash_copy; } step "s1-update" { UPDATE hash_copy SET data = 'l' WHERE id = 0; } step "s1-delete" { DELETE FROM hash_copy WHERE id = 1; } step "s1-truncate" { TRUNCATE hash_copy; } step "s1-drop" { DROP TABLE hash_copy; } step "s1-ddl-create-index" { CREATE INDEX hash_copy_index ON hash_copy(id); } step "s1-ddl-drop-index" { DROP INDEX hash_copy_index; } step "s1-ddl-add-column" { ALTER TABLE hash_copy ADD new_column int DEFAULT 0; } step "s1-ddl-drop-column" { ALTER TABLE hash_copy DROP new_column; } step "s1-ddl-rename-column" { ALTER TABLE hash_copy RENAME data TO new_column; } step "s1-ddl-unique-constraint" { ALTER TABLE hash_copy ADD CONSTRAINT hash_copy_unique UNIQUE(id); } step "s1-table-size" { SELECT citus_total_relation_size('hash_copy'); } step "s1-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM hash_copy;'); } step "s1-master-drop-all-shards" { SELECT master_drop_all_shards('hash_copy'::regclass, 'public', 'hash_copy'); } step "s1-create-non-distributed-table" { CREATE TABLE hash_copy(id integer, data text, int_data int); COPY hash_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; } step "s1-distribute-table" { SELECT create_distributed_table('hash_copy', 'id'); } step "s1-select-count" { SELECT COUNT(*) FROM hash_copy; } step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''hash_copy%'''); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''hash_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } # session 2 session "s2" step "s2-copy" { COPY hash_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; } step "s2-copy-additional-column" { COPY hash_copy FROM PROGRAM 'echo 5, f, 5, 5\\n6, g, 6, 6\\n7, h, 7, 7\\n8, i, 8, 8\\n9, j, 9, 9' WITH CSV; } step "s2-router-select" { SELECT * FROM hash_copy WHERE id = 1; } step "s2-real-time-select" { SELECT * FROM hash_copy ORDER BY 1, 2; } step "s2-task-tracker-select" { SET citus.task_executor_type TO "task-tracker"; SELECT * FROM hash_copy AS t1 JOIN hash_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; } step "s2-insert" { INSERT INTO hash_copy VALUES(0, 'k', 0); } step "s2-insert-select" { INSERT INTO hash_copy SELECT * FROM hash_copy; } step "s2-update" { UPDATE hash_copy SET data = 'l' WHERE id = 0; } step "s2-delete" { DELETE FROM hash_copy WHERE id = 1; } step "s2-truncate" { TRUNCATE hash_copy; } step "s2-drop" { DROP TABLE hash_copy; } step "s2-ddl-create-index" { CREATE INDEX hash_copy_index ON hash_copy(id); } step "s2-ddl-drop-index" { DROP INDEX hash_copy_index; } step "s2-ddl-create-index-concurrently" { CREATE INDEX CONCURRENTLY hash_copy_index ON hash_copy(id); } step "s2-ddl-add-column" { ALTER TABLE hash_copy ADD new_column int DEFAULT 0; } step "s2-ddl-drop-column" { ALTER TABLE hash_copy DROP new_column; } step "s2-ddl-rename-column" { ALTER TABLE hash_copy RENAME data TO new_column; } step "s2-table-size" { SELECT citus_total_relation_size('hash_copy'); } step "s2-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM hash_copy;'); } step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('hash_copy'::regclass, 'public', 'hash_copy'); } step "s2-distribute-table" { SELECT create_distributed_table('hash_copy', 'id'); } # permutations - COPY vs COPY permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count" # permutations - COPY first permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-update" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-delete" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-truncate" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-drop" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-create-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-copy" "s2-ddl-drop-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-add-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-copy-additional-column" "s2-ddl-drop-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-rename-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-table-size" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-drop-all-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count" # permutations - COPY second permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-delete" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-truncate" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-drop" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-ddl-create-index" "s2-copy" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-ddl-drop-index" "s2-copy" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-ddl-add-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-ddl-drop-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-ddl-rename-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-table-size" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-drop-all-shards" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-distribute-table" "s2-copy" "s1-commit" "s1-select-count" citus-7.0.3/src/test/regress/specs/isolation_insert_select_vs_all.spec000066400000000000000000000405231317107136600263560ustar00rootroot00000000000000# # How we organize this isolation test spec, is explained at README.md file in this directory. # # create range distributed table to test behavior of INSERT/SELECT in concurrent operations setup { SET citus.shard_replication_factor TO 1; CREATE TABLE insert_of_insert_select_hash(id integer, data text); SELECT create_distributed_table('insert_of_insert_select_hash', 'id'); CREATE TABLE select_of_insert_select_hash(id integer, data text); SELECT create_distributed_table('select_of_insert_select_hash', 'id'); } # drop distributed table teardown { DROP TABLE IF EXISTS insert_of_insert_select_hash CASCADE; DROP TABLE IF EXISTS select_of_insert_select_hash CASCADE; } # session 1 session "s1" step "s1-initialize" { COPY insert_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; COPY insert_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; COPY select_of_insert_select_hash FROM PROGRAM 'echo 5, a\\n6, b\\n7, c\\n8, d\\n9, e' WITH CSV; } step "s1-begin" { BEGIN; } step "s1-insert-select" { INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5;; } step "s1-update-on-inserted" { UPDATE insert_of_insert_select_hash SET data = 'l' WHERE id = 4; } step "s1-delete-on-inserted" { DELETE FROM insert_of_insert_select_hash WHERE id = 4; } step "s1-truncate-on-inserted" { TRUNCATE insert_of_insert_select_hash; } step "s1-drop-on-inserted" { DROP TABLE insert_of_insert_select_hash; } step "s1-ddl-create-index-on-inserted" { CREATE INDEX insert_of_insert_select_hash_index ON insert_of_insert_select_hash(id); } step "s1-ddl-drop-index-on-inserted" { DROP INDEX insert_of_insert_select_hash_index; } step "s1-ddl-add-column-on-inserted" { ALTER TABLE insert_of_insert_select_hash ADD new_column int DEFAULT 0; } step "s1-ddl-drop-column-on-inserted" { ALTER TABLE insert_of_insert_select_hash DROP new_column; } step "s1-ddl-rename-column-on-inserted" { ALTER TABLE insert_of_insert_select_hash RENAME data TO new_column; } step "s1-table-size-on-inserted" { SELECT citus_total_relation_size('insert_of_insert_select_hash'); } step "s1-master-modify-multiple-shards-on-inserted" { SELECT master_modify_multiple_shards('DELETE FROM insert_of_insert_select_hash;'); } step "s1-master-drop-all-shards-on-inserted" { SELECT master_drop_all_shards('insert_of_insert_select_hash'::regclass, 'public', 'insert_of_insert_select_hash'); } step "s1-create-non-distributed-table-on-inserted" { CREATE TABLE insert_of_insert_select_hash(id integer, data text); } step "s1-distribute-table-on-inserted" { SELECT create_distributed_table('insert_of_insert_select_hash', 'id'); } step "s1-show-indexes-inserted" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_of_insert_select_hash%'''); } step "s1-show-columns-inserted" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-update-on-selected" { UPDATE select_of_insert_select_hash SET data = 'l' WHERE id = 4; } step "s1-delete-on-selected" { DELETE FROM select_of_insert_select_hash WHERE id = 4; } step "s1-truncate-on-selected" { TRUNCATE select_of_insert_select_hash; } step "s1-drop-on-selected" { DROP TABLE select_of_insert_select_hash; } step "s1-ddl-create-index-on-selected" { CREATE INDEX select_of_insert_select_hash_index ON select_of_insert_select_hash(id); } step "s1-ddl-drop-index-on-selected" { DROP INDEX select_of_insert_select_hash_index; } step "s1-ddl-add-column-on-selected" { ALTER TABLE select_of_insert_select_hash ADD new_column int DEFAULT 0; } step "s1-ddl-drop-column-on-selected" { ALTER TABLE select_of_insert_select_hash DROP new_column; } step "s1-ddl-rename-column-on-selected" { ALTER TABLE select_of_insert_select_hash RENAME data TO new_column; } step "s1-table-size-on-selected" { SELECT citus_total_relation_size('select_of_insert_select_hash'); } step "s1-master-modify-multiple-shards-on-selected" { SELECT master_modify_multiple_shards('DELETE FROM select_of_insert_select_hash;'); } step "s1-master-drop-all-shards-on-selected" { SELECT master_drop_all_shards('select_of_insert_select_hash'::regclass, 'public', 'select_of_insert_select_hash'); } step "s1-create-non-distributed-table-on-selected" { CREATE TABLE select_of_insert_select_hash(id integer, data text); } step "s1-distribute-table-on-selected" { SELECT create_distributed_table('select_of_insert_select_hash', 'id'); } step "s1-show-indexes-selected" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_of_insert_select_hash%'''); } step "s1-show-columns-selected" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_of_insert_select_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-select-count" { SELECT COUNT(*) FROM select_of_insert_select_hash; } step "s1-commit" { COMMIT; } # session 2 session "s2" step "s2-insert-select" { INSERT INTO insert_of_insert_select_hash SELECT * FROM select_of_insert_select_hash ORDER BY 1, 2 LIMIT 5; } step "s2-update-on-inserted" { UPDATE insert_of_insert_select_hash SET data = 'l' WHERE id = 4; } step "s2-delete-on-inserted" { DELETE FROM insert_of_insert_select_hash WHERE id = 4; } step "s2-truncate-on-inserted" { TRUNCATE insert_of_insert_select_hash; } step "s2-drop-on-inserted" { DROP TABLE insert_of_insert_select_hash; } step "s2-ddl-create-index-on-inserted" { CREATE INDEX insert_of_insert_select_hash_index ON insert_of_insert_select_hash(id); } step "s2-ddl-drop-index-on-inserted" { DROP INDEX insert_of_insert_select_hash_index; } step "s2-ddl-create-index-concurrently-on-inserted" { CREATE INDEX CONCURRENTLY insert_of_insert_select_hash_index ON insert_of_insert_select_hash(id); } step "s2-ddl-add-column-on-inserted" { ALTER TABLE insert_of_insert_select_hash ADD new_column int DEFAULT 0; } step "s2-ddl-drop-column-on-inserted" { ALTER TABLE insert_of_insert_select_hash DROP new_column; } step "s2-ddl-rename-column-on-inserted" { ALTER TABLE insert_of_insert_select_hash RENAME data TO new_column; } step "s2-table-size-on-inserted" { SELECT citus_total_relation_size('insert_of_insert_select_hash'); } step "s2-master-modify-multiple-shards-on-inserted" { SELECT master_modify_multiple_shards('DELETE FROM insert_of_insert_select_hash;'); } step "s2-master-drop-all-shards-on-inserted" { SELECT master_drop_all_shards('insert_of_insert_select_hash'::regclass, 'public', 'insert_of_insert_select_hash'); } step "s2-create-non-distributed-table-on-inserted" { CREATE TABLE insert_of_insert_select_hash(id integer, data text); } step "s2-distribute-table-on-inserted" { SELECT create_distributed_table('insert_of_insert_select_hash', 'id'); } step "s2-update-on-selected" { UPDATE select_of_insert_select_hash SET data = 'l' WHERE id = 4; } step "s2-delete-on-selected" { DELETE FROM select_of_insert_select_hash WHERE id = 4; } step "s2-truncate-on-selected" { TRUNCATE select_of_insert_select_hash; } step "s2-drop-on-selected" { DROP TABLE select_of_insert_select_hash; } step "s2-ddl-create-index-on-selected" { CREATE INDEX select_of_insert_select_hash_index ON select_of_insert_select_hash(id); } step "s2-ddl-drop-index-on-selected" { DROP INDEX select_of_insert_select_hash_index; } step "s2-ddl-create-index-concurrently-on-selected" { CREATE INDEX CONCURRENTLY select_of_insert_select_hash_index ON insert_of_insert_select_hash(id); } step "s2-ddl-add-column-on-selected" { ALTER TABLE select_of_insert_select_hash ADD new_column int DEFAULT 0; } step "s2-ddl-drop-column-on-selected" { ALTER TABLE select_of_insert_select_hash DROP new_column; } step "s2-ddl-rename-column-on-selected" { ALTER TABLE select_of_insert_select_hash RENAME data TO new_column; } step "s2-table-size-on-selected" { SELECT citus_total_relation_size('select_of_insert_select_hash'); } step "s2-master-modify-multiple-shards-on-selected" { SELECT master_modify_multiple_shards('DELETE FROM select_of_insert_select_hash;'); } step "s2-master-drop-all-shards-on-selected" { SELECT master_drop_all_shards('select_of_insert_select_hash'::regclass, 'public', 'select_of_insert_select_hash'); } step "s2-create-non-distributed-table-on-selected" { CREATE TABLE select_of_insert_select_hash(id integer, data text); } step "s2-distribute-table-on-selected" { SELECT create_distributed_table('select_of_insert_select_hash', 'id'); } # permutations - INSERT/SELECT vs INSERT/SELECT permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-insert-select" "s1-commit" "s1-select-count" # permutations - INSERT/SELECT first operation on INSERT side permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-update-on-inserted" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-delete-on-inserted" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-truncate-on-inserted" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-drop-on-inserted" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-ddl-create-index-on-inserted" "s1-commit" "s1-select-count" "s1-show-indexes-inserted" permutation "s1-initialize" "s1-ddl-create-index-on-inserted" "s1-begin" "s1-insert-select" "s2-ddl-drop-index-on-inserted" "s1-commit" "s1-select-count" "s1-show-indexes-inserted" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-ddl-create-index-concurrently-on-inserted" "s1-commit" "s1-select-count" "s1-show-indexes-inserted" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-ddl-add-column-on-inserted" "s1-commit" "s1-select-count" "s1-show-columns-inserted" permutation "s1-initialize" "s1-ddl-add-column-on-inserted" "s1-begin" "s1-insert-select" "s2-ddl-drop-column-on-inserted" "s1-commit" "s1-select-count" "s1-show-columns-inserted" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-ddl-rename-column-on-inserted" "s1-commit" "s1-select-count" "s1-show-columns-inserted" "s1-show-columns-inserted" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-table-size-on-inserted" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-master-modify-multiple-shards-on-inserted" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-master-drop-all-shards-on-inserted" "s1-commit" "s1-select-count" permutation "s1-drop-on-inserted" "s1-create-non-distributed-table-on-inserted" "s1-initialize" "s1-begin" "s1-insert-select" "s2-distribute-table-on-inserted" "s1-commit" "s1-select-count" # permutations - INSERT/SELECT first operation on SELECT side permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-update-on-selected" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-delete-on-selected" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-truncate-on-selected" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-drop-on-selected" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-ddl-create-index-on-selected" "s1-commit" "s1-select-count" "s1-show-indexes-selected" permutation "s1-initialize" "s1-ddl-create-index-on-selected" "s1-begin" "s1-insert-select" "s2-ddl-drop-index-on-selected" "s1-commit" "s1-select-count" "s1-show-indexes-selected" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-ddl-create-index-concurrently-on-selected" "s1-commit" "s1-select-count" "s1-show-indexes-selected" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-ddl-add-column-on-selected" "s1-commit" "s1-select-count" "s1-show-columns-selected" permutation "s1-initialize" "s1-ddl-add-column-on-selected" "s1-begin" "s1-insert-select" "s2-ddl-drop-column-on-selected" "s1-commit" "s1-select-count" "s1-show-columns-selected" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-ddl-rename-column-on-selected" "s1-commit" "s1-select-count" "s1-show-columns-selected" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-table-size-on-selected" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-master-modify-multiple-shards-on-selected" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-master-drop-all-shards-on-selected" "s1-commit" "s1-select-count" permutation "s1-drop-on-selected" "s1-create-non-distributed-table-on-selected" "s1-initialize" "s1-begin" "s1-insert-select" "s2-distribute-table-on-selected" "s1-commit" "s1-select-count" # permutations - INSERT/SELECT second on INSERT side permutation "s1-initialize" "s1-begin" "s1-update-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-delete-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-truncate-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-drop-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-ddl-create-index-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" "s1-show-indexes-inserted" permutation "s1-initialize" "s1-ddl-create-index-on-inserted" "s1-begin" "s1-ddl-drop-index-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" "s1-show-indexes-inserted" permutation "s1-initialize" "s1-begin" "s1-ddl-add-column-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" "s1-show-columns-inserted" permutation "s1-initialize" "s1-ddl-add-column-on-inserted" "s1-begin" "s1-ddl-drop-column-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" "s1-show-columns-inserted" permutation "s1-initialize" "s1-begin" "s1-ddl-rename-column-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" "s1-show-columns-inserted" permutation "s1-initialize" "s1-begin" "s1-table-size-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-drop-all-shards-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-drop-on-inserted" "s1-create-non-distributed-table-on-inserted" "s1-initialize" "s1-begin" "s1-distribute-table-on-inserted" "s2-insert-select" "s1-commit" "s1-select-count" # permutations - INSERT/SELECT second on SELECT side permutation "s1-initialize" "s1-begin" "s1-update-on-selected" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-delete-on-selected" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-truncate-on-selected" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-drop-on-selected" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-ddl-create-index-on-selected" "s2-insert-select" "s1-commit" "s1-select-count" "s1-show-indexes-selected" permutation "s1-initialize" "s1-ddl-create-index-on-selected" "s1-begin" "s1-ddl-drop-index-on-selected" "s2-insert-select" "s1-commit" "s1-select-count" "s1-show-indexes-selected" permutation "s1-initialize" "s1-begin" "s1-ddl-add-column-on-selected" "s2-insert-select" "s1-commit" "s1-select-count" "s1-show-columns-selected" permutation "s1-initialize" "s1-ddl-add-column-on-selected" "s1-begin" "s1-ddl-drop-column-on-selected" "s2-insert-select" "s1-commit" "s1-select-count" "s1-show-columns-selected" permutation "s1-initialize" "s1-begin" "s1-ddl-rename-column-on-selected" "s2-insert-select" "s1-commit" "s1-select-count" "s1-show-columns-selected" permutation "s1-initialize" "s1-begin" "s1-table-size-on-selected" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards-on-selected" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-drop-all-shards-on-selected" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-drop-on-selected" "s1-create-non-distributed-table-on-selected" "s1-initialize" "s1-begin" "s1-distribute-table-on-selected" "s2-insert-select" "s1-commit" "s1-select-count" citus-7.0.3/src/test/regress/specs/isolation_insert_vs_all.spec000066400000000000000000000244301317107136600250160ustar00rootroot00000000000000# # How we organize this isolation test spec, is explained at README.md file in this directory. # # create range distributed table to test behavior of INSERT in concurrent operations setup { SET citus.shard_replication_factor TO 1; CREATE TABLE insert_hash(id integer, data text); SELECT create_distributed_table('insert_hash', 'id'); } # drop distributed table teardown { DROP TABLE IF EXISTS insert_hash CASCADE; } # session 1 session "s1" step "s1-initialize" { COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; } step "s1-begin" { BEGIN; } step "s1-insert" { INSERT INTO insert_hash VALUES(7, 'k'); } step "s1-insert-multi-row" { INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); } step "s1-insert-select" { INSERT INTO insert_hash SELECT * FROM insert_hash; } step "s1-update" { UPDATE insert_hash SET data = 'l' WHERE id = 4; } step "s1-delete" { DELETE FROM insert_hash WHERE id = 4; } step "s1-truncate" { TRUNCATE insert_hash; } step "s1-drop" { DROP TABLE insert_hash; } step "s1-ddl-create-index" { CREATE INDEX insert_hash_index ON insert_hash(id); } step "s1-ddl-drop-index" { DROP INDEX insert_hash_index; } step "s1-ddl-add-column" { ALTER TABLE insert_hash ADD new_column int DEFAULT 0; } step "s1-ddl-drop-column" { ALTER TABLE insert_hash DROP new_column; } step "s1-ddl-rename-column" { ALTER TABLE insert_hash RENAME data TO new_column; } step "s1-table-size" { SELECT citus_total_relation_size('insert_hash'); } step "s1-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM insert_hash;'); } step "s1-create-non-distributed-table" { CREATE TABLE insert_hash(id integer, data text); COPY insert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; } step "s1-distribute-table" { SELECT create_distributed_table('insert_hash', 'id'); } step "s1-select-count" { SELECT COUNT(*) FROM insert_hash; } step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''insert_hash%'''); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''insert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } # session 2 session "s2" step "s2-insert" { INSERT INTO insert_hash VALUES(7, 'k'); } step "s2-insert-multi-row" { INSERT INTO insert_hash VALUES(7, 'k'), (8, 'l'), (9, 'm'); } step "s2-insert-select" { INSERT INTO insert_hash SELECT * FROM insert_hash; } step "s2-update" { UPDATE insert_hash SET data = 'l' WHERE id = 4; } step "s2-delete" { DELETE FROM insert_hash WHERE id = 4; } step "s2-truncate" { TRUNCATE insert_hash; } step "s2-drop" { DROP TABLE insert_hash; } step "s2-ddl-create-index" { CREATE INDEX insert_hash_index ON insert_hash(id); } step "s2-ddl-drop-index" { DROP INDEX insert_hash_index; } step "s2-ddl-create-index-concurrently" { CREATE INDEX CONCURRENTLY insert_hash_index ON insert_hash(id); } step "s2-ddl-add-column" { ALTER TABLE insert_hash ADD new_column int DEFAULT 0; } step "s2-ddl-drop-column" { ALTER TABLE insert_hash DROP new_column; } step "s2-ddl-rename-column" { ALTER TABLE insert_hash RENAME data TO new_column; } step "s2-table-size" { SELECT citus_total_relation_size('insert_hash'); } step "s2-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM insert_hash;'); } step "s2-distribute-table" { SELECT create_distributed_table('insert_hash', 'id'); } # permutations - INSERT vs INSERT permutation "s1-initialize" "s1-begin" "s1-insert" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-insert-multi-row" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-insert" "s1-commit" "s1-select-count" # permutations - INSERT first permutation "s1-initialize" "s1-begin" "s1-insert" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-update" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-delete" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-truncate" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-drop" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-ddl-create-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-insert" "s2-ddl-drop-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-ddl-add-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-insert" "s2-ddl-drop-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-ddl-rename-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-table-size" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-insert" "s2-distribute-table" "s1-commit" "s1-select-count" # permutations - INSERT second permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-delete" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-truncate" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-drop" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-ddl-create-index" "s2-insert" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-ddl-drop-index" "s2-insert" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-ddl-add-column" "s2-insert" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-ddl-drop-column" "s2-insert" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-ddl-rename-column" "s2-insert" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-table-size" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-distribute-table" "s2-insert" "s1-commit" "s1-select-count" # permutations - multi row INSERT first permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-update" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-delete" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-truncate" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-drop" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-ddl-create-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-insert-multi-row" "s2-ddl-drop-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-ddl-add-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-insert-multi-row" "s2-ddl-drop-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-ddl-rename-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-table-size" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-insert-multi-row" "s2-distribute-table" "s1-commit" "s1-select-count" # permutations - multi row INSERT second permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-insert-multi-row" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update" "s2-insert-multi-row" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-delete" "s2-insert-multi-row" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-truncate" "s2-insert-multi-row" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-drop" "s2-insert-multi-row" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-ddl-create-index" "s2-insert-multi-row" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-ddl-drop-index" "s2-insert-multi-row" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-ddl-add-column" "s2-insert-multi-row" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-ddl-drop-column" "s2-insert-multi-row" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-ddl-rename-column" "s2-insert-multi-row" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-table-size" "s2-insert-multi-row" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards" "s2-insert-multi-row" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-distribute-table" "s2-insert-multi-row" "s1-commit" "s1-select-count" citus-7.0.3/src/test/regress/specs/isolation_insert_vs_vacuum.spec000066400000000000000000000014071317107136600255450ustar00rootroot00000000000000setup { SET citus.shard_replication_factor TO 1; CREATE TABLE test_insert_vacuum(column1 int, column2 int); SELECT create_distributed_table('test_insert_vacuum', 'column1'); } teardown { DROP TABLE test_insert_vacuum; } session "s1" step "s1-begin" { BEGIN; } step "s1-insert" { INSERT INTO test_insert_vacuum VALUES(1, 1); } step "s1-commit" { COMMIT; } session "s2" step "s2-vacuum-analyze" { VACUUM ANALYZE test_insert_vacuum; } step "s2-vacuum-full" { VACUUM FULL test_insert_vacuum; } # INSERT and VACUUM ANALYZE should not block each other. permutation "s1-begin" "s1-insert" "s2-vacuum-analyze" "s1-commit" # INSERT and VACUUM FULL should block each other. permutation "s1-begin" "s1-insert" "s2-vacuum-full" "s1-commit" citus-7.0.3/src/test/regress/specs/isolation_partitioned_copy_vs_all.spec000066400000000000000000000216101317107136600270630ustar00rootroot00000000000000# # How we organize this isolation test spec, is explained at README.md file in this directory. # # create append distributed table to test behavior of COPY in concurrent operations setup { SET citus.shard_replication_factor TO 1; CREATE TABLE partitioned_copy(id integer, data text, int_data int) PARTITION BY RANGE (int_data); CREATE TABLE partitioned_copy_0_3 PARTITION OF partitioned_copy FOR VALUES FROM (0) TO (3); CREATE TABLE partitioned_copy_3_6 PARTITION OF partitioned_copy FOR VALUES FROM (3) TO (6); CREATE TABLE partitioned_copy_6_10 PARTITION OF partitioned_copy FOR VALUES FROM (6) TO (10); SELECT create_distributed_table('partitioned_copy', 'id'); } # drop distributed table teardown { DROP TABLE IF EXISTS partitioned_copy CASCADE; } # session 1 session "s1" step "s1-initialize" { COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; } step "s1-begin" { BEGIN; } step "s1-copy" { COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; } step "s1-copy-additional-column" { COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5, 5\\n6, g, 6, 6\\n7, h, 7, 7\\n8, i, 8, 8\\n9, j, 9, 9' WITH CSV; } step "s1-router-select" { SELECT * FROM partitioned_copy WHERE id = 1; } step "s1-real-time-select" { SELECT * FROM partitioned_copy ORDER BY 1, 2; } step "s1-task-tracker-select" { SET citus.task_executor_type TO "task-tracker"; SELECT * FROM partitioned_copy AS t1 JOIN partitioned_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; } step "s1-insert" { INSERT INTO partitioned_copy VALUES(0, 'k', 0); } step "s1-insert-select" { INSERT INTO partitioned_copy SELECT * FROM partitioned_copy; } step "s1-update" { UPDATE partitioned_copy SET data = 'l' WHERE id = 0; } step "s1-delete" { DELETE FROM partitioned_copy WHERE id = 1; } step "s1-truncate" { TRUNCATE partitioned_copy; } step "s1-drop" { DROP TABLE partitioned_copy; } step "s1-ddl-create-index" { CREATE INDEX partitioned_copy_index ON partitioned_copy(id); } step "s1-ddl-drop-index" { DROP INDEX partitioned_copy_index; } step "s1-ddl-add-column" { ALTER TABLE partitioned_copy ADD new_column int DEFAULT 0; } step "s1-ddl-drop-column" { ALTER TABLE partitioned_copy DROP new_column; } step "s1-ddl-rename-column" { ALTER TABLE partitioned_copy RENAME data TO new_column; } step "s1-ddl-unique-constraint" { ALTER TABLE partitioned_copy ADD CONSTRAINT partitioned_copy_unique UNIQUE(id); } step "s1-table-size" { SELECT citus_total_relation_size('partitioned_copy'); } step "s1-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM partitioned_copy;'); } step "s1-master-drop-all-shards" { SELECT master_drop_all_shards('partitioned_copy'::regclass, 'public', 'partitioned_copy'); } step "s1-create-non-distributed-table" { CREATE TABLE partitioned_copy(id integer, data text, int_data int); COPY partitioned_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; } step "s1-distribute-table" { SELECT create_distributed_table('partitioned_copy', 'id'); } step "s1-select-count" { SELECT COUNT(*) FROM partitioned_copy; } step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''partitioned_copy%'''); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''partitioned_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } # session 2 session "s2" step "s2-copy" { COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; } step "s2-copy-additional-column" { COPY partitioned_copy FROM PROGRAM 'echo 5, f, 5, 5\\n6, g, 6, 6\\n7, h, 7, 7\\n8, i, 8, 8\\n9, j, 9, 9' WITH CSV; } step "s2-router-select" { SELECT * FROM partitioned_copy WHERE id = 1; } step "s2-real-time-select" { SELECT * FROM partitioned_copy ORDER BY 1, 2; } step "s2-task-tracker-select" { SET citus.task_executor_type TO "task-tracker"; SELECT * FROM partitioned_copy AS t1 JOIN partitioned_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; } step "s2-insert" { INSERT INTO partitioned_copy VALUES(0, 'k', 0); } step "s2-insert-select" { INSERT INTO partitioned_copy SELECT * FROM partitioned_copy; } step "s2-update" { UPDATE partitioned_copy SET data = 'l' WHERE id = 0; } step "s2-delete" { DELETE FROM partitioned_copy WHERE id = 1; } step "s2-truncate" { TRUNCATE partitioned_copy; } step "s2-drop" { DROP TABLE partitioned_copy; } step "s2-ddl-create-index" { CREATE INDEX partitioned_copy_index ON partitioned_copy(id); } step "s2-ddl-drop-index" { DROP INDEX partitioned_copy_index; } step "s2-ddl-create-index-concurrently" { CREATE INDEX CONCURRENTLY partitioned_copy_index ON partitioned_copy(id); } step "s2-ddl-add-column" { ALTER TABLE partitioned_copy ADD new_column int DEFAULT 0; } step "s2-ddl-drop-column" { ALTER TABLE partitioned_copy DROP new_column; } step "s2-ddl-rename-column" { ALTER TABLE partitioned_copy RENAME data TO new_column; } step "s2-table-size" { SELECT citus_total_relation_size('partitioned_copy'); } step "s2-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM partitioned_copy;'); } step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('partitioned_copy'::regclass, 'public', 'partitioned_copy'); } step "s2-distribute-table" { SELECT create_distributed_table('partitioned_copy', 'id'); } # permutations - COPY vs COPY permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count" # permutations - COPY first permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-update" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-delete" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-truncate" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-drop" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-add-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-copy-additional-column" "s2-ddl-drop-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-rename-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-table-size" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-drop-all-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count" # permutations - COPY second permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-delete" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-truncate" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-drop" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-ddl-add-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-ddl-drop-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-ddl-rename-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-table-size" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-drop-all-shards" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-distribute-table" "s2-copy" "s1-commit" "s1-select-count" citus-7.0.3/src/test/regress/specs/isolation_progress_monitoring.spec000066400000000000000000000057161317107136600262710ustar00rootroot00000000000000# Isolation tests for checking the progress monitoring infrastructure # We create three different processes, two of the type "1337" and one of type "3778" # We utilize advisory locks to control steps of the processes # Different locks are held for each step so that the processes stop at each step and # we can see their progress. setup { CREATE FUNCTION create_progress(bigint, bigint) RETURNS void AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION update_progress(bigint, bigint) RETURNS void AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION finish_progress() RETURNS void AS 'citus' LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION show_progress(bigint) RETURNS TABLE(step int, progress bigint) AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION sample_operation(command_type bigint, lockid bigint, progress bigint) RETURNS VOID AS $$ BEGIN PERFORM create_progress(command_type, 2); PERFORM pg_advisory_xact_lock(lockid); PERFORM update_progress(0, progress); PERFORM pg_advisory_xact_lock(lockid + 1); PERFORM update_progress(1, progress); PERFORM pg_advisory_xact_lock(lockid + 2); PERFORM finish_progress(); END; $$ LANGUAGE 'plpgsql'; } teardown { DROP FUNCTION IF EXISTS create_progress(bigint, bigint); DROP FUNCTION IF EXISTS update_progress(bigint, bigint); DROP FUNCTION IF EXISTS finish_progress(); DROP FUNCTION IF EXISTS show_progress(bigint); DROP FUNCTION IF EXISTS sample_operation(bigint, bigint, bigint); } session "s1" step "s1-start-operation" { SELECT sample_operation(1337, 10, -1); } session "s2" step "s2-start-operation" { SELECT sample_operation(1337, 20, 2); } session "s3" step "s3-start-operation" { SELECT sample_operation(3778, 30, 9); } session "lock-orchestrator" step "take-locks" { -- Locks for steps of sample operation in s1 SELECT pg_advisory_lock(10); SELECT pg_advisory_lock(11); SELECT pg_advisory_lock(12); -- Locks for steps of sample operation in s2 SELECT pg_advisory_lock(20); SELECT pg_advisory_lock(21); SELECT pg_advisory_lock(22); -- Locks for steps of sample operation in s3 SELECT pg_advisory_lock(30); SELECT pg_advisory_lock(31); SELECT pg_advisory_lock(32); } step "release-locks-1" { -- Release the locks of first steps of sample operations SELECT pg_advisory_unlock(10); SELECT pg_advisory_unlock(20); SELECT pg_advisory_unlock(30); } step "release-locks-2" { -- Release the locks of second steps of sample operations SELECT pg_advisory_unlock(11); SELECT pg_advisory_unlock(21); SELECT pg_advisory_unlock(31); } step "release-locks-3" { -- Release the locks of final steps of sample operations SELECT pg_advisory_unlock(12); SELECT pg_advisory_unlock(22); SELECT pg_advisory_unlock(32); } session "monitor" step "show-progress" { SELECT show_progress(1337); SELECT show_progress(3778); } permutation "take-locks" "s1-start-operation" "s2-start-operation" "s3-start-operation" "show-progress" "release-locks-1" "show-progress" "release-locks-2" "show-progress" "release-locks-3"citus-7.0.3/src/test/regress/specs/isolation_range_copy_vs_all.spec000066400000000000000000000223171317107136600256420ustar00rootroot00000000000000# # How we organize this isolation test spec, is explained at README.md file in this directory. # # create append distributed table to test behavior of COPY in concurrent operations setup { SET citus.shard_replication_factor TO 1; CREATE TABLE range_copy(id integer, data text, int_data int); SELECT create_distributed_table('range_copy', 'id', 'append'); } # drop distributed table teardown { DROP TABLE IF EXISTS range_copy CASCADE; } # session 1 session "s1" step "s1-initialize" { COPY range_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; } step "s1-begin" { BEGIN; } step "s1-copy" { COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; } step "s1-copy-additional-column" { COPY range_copy FROM PROGRAM 'echo 5, f, 5, 5\\n6, g, 6, 6\\n7, h, 7, 7\\n8, i, 8, 8\\n9, j, 9, 9' WITH CSV; } step "s1-router-select" { SELECT * FROM range_copy WHERE id = 1; } step "s1-real-time-select" { SELECT * FROM range_copy ORDER BY 1, 2; } step "s1-task-tracker-select" { SET citus.task_executor_type TO "task-tracker"; SELECT * FROM range_copy AS t1 JOIN range_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; } step "s1-insert" { INSERT INTO range_copy VALUES(0, 'k', 0); } step "s1-insert-select" { INSERT INTO range_copy SELECT * FROM range_copy; } step "s1-update" { UPDATE range_copy SET data = 'l' WHERE id = 0; } step "s1-delete" { DELETE FROM range_copy WHERE id = 1; } step "s1-truncate" { TRUNCATE range_copy; } step "s1-drop" { DROP TABLE range_copy; } step "s1-ddl-create-index" { CREATE INDEX range_copy_index ON range_copy(id); } step "s1-ddl-drop-index" { DROP INDEX range_copy_index; } step "s1-ddl-add-column" { ALTER TABLE range_copy ADD new_column int DEFAULT 0; } step "s1-ddl-drop-column" { ALTER TABLE range_copy DROP new_column; } step "s1-ddl-rename-column" { ALTER TABLE range_copy RENAME data TO new_column; } step "s1-ddl-unique-constraint" { ALTER TABLE range_copy ADD CONSTRAINT range_copy_unique UNIQUE(id); } step "s1-table-size" { SELECT citus_total_relation_size('range_copy'); } step "s1-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM range_copy;'); } step "s1-master-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM range_copy WHERE id <= 4;'); } step "s1-master-drop-all-shards" { SELECT master_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); } step "s1-create-non-distributed-table" { CREATE TABLE range_copy(id integer, data text, int_data int); } step "s1-distribute-table" { SELECT create_distributed_table('range_copy', 'id', 'range'); } step "s1-select-count" { SELECT COUNT(*) FROM range_copy; } step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''range_copy%'''); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''range_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } # session 2 session "s2" step "s2-copy" { COPY range_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; } step "s2-copy-additional-column" { COPY range_copy FROM PROGRAM 'echo 5, f, 5, 5\\n6, g, 6, 6\\n7, h, 7, 7\\n8, i, 8, 8\\n9, j, 9, 9' WITH CSV; } step "s2-router-select" { SELECT * FROM range_copy WHERE id = 1; } step "s2-real-time-select" { SELECT * FROM range_copy ORDER BY 1, 2; } step "s2-task-tracker-select" { SET citus.task_executor_type TO "task-tracker"; SELECT * FROM range_copy AS t1 JOIN range_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; } step "s2-insert" { INSERT INTO range_copy VALUES(0, 'k', 0); } step "s2-insert-select" { INSERT INTO range_copy SELECT * FROM range_copy; } step "s2-update" { UPDATE range_copy SET data = 'l' WHERE id = 0; } step "s2-delete" { DELETE FROM range_copy WHERE id = 1; } step "s2-truncate" { TRUNCATE range_copy; } step "s2-drop" { DROP TABLE range_copy; } step "s2-ddl-create-index" { CREATE INDEX range_copy_index ON range_copy(id); } step "s2-ddl-drop-index" { DROP INDEX range_copy_index; } step "s2-ddl-create-index-concurrently" { CREATE INDEX CONCURRENTLY range_copy_index ON range_copy(id); } step "s2-ddl-add-column" { ALTER TABLE range_copy ADD new_column int DEFAULT 0; } step "s2-ddl-drop-column" { ALTER TABLE range_copy DROP new_column; } step "s2-ddl-rename-column" { ALTER TABLE range_copy RENAME data TO new_column; } step "s2-table-size" { SELECT citus_total_relation_size('range_copy'); } step "s2-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM range_copy;'); } step "s2-master-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM range_copy WHERE id <= 4;'); } step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('range_copy'::regclass, 'public', 'range_copy'); } step "s2-distribute-table" { SELECT create_distributed_table('range_copy', 'id', 'range'); } # permutations - COPY vs COPY permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count" # permutations - COPY first permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-update" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-delete" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-truncate" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-drop" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-create-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-copy" "s2-ddl-drop-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-add-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-copy-additional-column" "s2-ddl-drop-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-rename-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-table-size" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-apply-delete-command" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-drop-all-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count" # permutations - COPY second permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-delete" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-truncate" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-drop" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-ddl-create-index" "s2-copy" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-ddl-drop-index" "s2-copy" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-ddl-add-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-ddl-drop-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-ddl-rename-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-table-size" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-apply-delete-command" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-drop-all-shards" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-distribute-table" "s2-copy" "s1-commit" "s1-select-count" citus-7.0.3/src/test/regress/specs/isolation_reference_copy_vs_all.spec000066400000000000000000000222611317107136600265020ustar00rootroot00000000000000# # How we organize this isolation test spec, is explained at README.md file in this directory. # # create append distributed table to test behavior of COPY in concurrent operations setup { SET citus.shard_replication_factor TO 1; CREATE TABLE reference_copy(id integer, data text, int_data int); SELECT create_reference_table('reference_copy'); } # drop distributed table teardown { DROP TABLE IF EXISTS reference_copy CASCADE; } # session 1 session "s1" step "s1-initialize" { COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; } step "s1-begin" { BEGIN; } step "s1-copy" { COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; } step "s1-copy-additional-column" { COPY reference_copy FROM PROGRAM 'echo 5, f, 5, 5\\n6, g, 6, 6\\n7, h, 7, 7\\n8, i, 8, 8\\n9, j, 9, 9' WITH CSV; } step "s1-router-select" { SELECT * FROM reference_copy WHERE id = 1; } step "s1-real-time-select" { SELECT * FROM reference_copy ORDER BY 1, 2; } step "s1-task-tracker-select" { SET citus.task_executor_type TO "task-tracker"; SELECT * FROM reference_copy AS t1 JOIN reference_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; } step "s1-insert" { INSERT INTO reference_copy VALUES(0, 'k', 0); } step "s1-insert-select" { INSERT INTO reference_copy SELECT * FROM reference_copy; } step "s1-update" { UPDATE reference_copy SET data = 'l' WHERE id = 0; } step "s1-delete" { DELETE FROM reference_copy WHERE id = 1; } step "s1-truncate" { TRUNCATE reference_copy; } step "s1-drop" { DROP TABLE reference_copy; } step "s1-ddl-create-index" { CREATE INDEX reference_copy_index ON reference_copy(id); } step "s1-ddl-drop-index" { DROP INDEX reference_copy_index; } step "s1-ddl-add-column" { ALTER TABLE reference_copy ADD new_column int DEFAULT 0; } step "s1-ddl-drop-column" { ALTER TABLE reference_copy DROP new_column; } step "s1-ddl-rename-column" { ALTER TABLE reference_copy RENAME data TO new_column; } step "s1-ddl-unique-constraint" { ALTER TABLE reference_copy ADD CONSTRAINT reference_copy_unique UNIQUE(id); } step "s1-table-size" { SELECT citus_total_relation_size('reference_copy'); } step "s1-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM reference_copy;'); } step "s1-master-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM reference_copy WHERE id <= 4;'); } step "s1-create-non-distributed-table" { CREATE TABLE reference_copy(id integer, data text, int_data int); COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; } step "s1-distribute-table" { SELECT create_reference_table('reference_copy'); } step "s1-select-count" { SELECT COUNT(*) FROM reference_copy; } step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''reference_copy%'''); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''reference_copy%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } # session 2 session "s2" step "s2-copy" { COPY reference_copy FROM PROGRAM 'echo 5, f, 5\\n6, g, 6\\n7, h, 7\\n8, i, 8\\n9, j, 9' WITH CSV; } step "s2-copy-additional-column" { COPY reference_copy FROM PROGRAM 'echo 5, f, 5, 5\\n6, g, 6, 6\\n7, h, 7, 7\\n8, i, 8, 8\\n9, j, 9, 9' WITH CSV; } step "s2-router-select" { SELECT * FROM reference_copy WHERE id = 1; } step "s2-real-time-select" { SELECT * FROM reference_copy ORDER BY 1, 2; } step "s2-task-tracker-select" { SET citus.task_executor_type TO "task-tracker"; SELECT * FROM reference_copy AS t1 JOIN reference_copy AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; } step "s2-insert" { INSERT INTO reference_copy VALUES(0, 'k', 0); } step "s2-insert-select" { INSERT INTO reference_copy SELECT * FROM reference_copy; } step "s2-update" { UPDATE reference_copy SET data = 'l' WHERE id = 0; } step "s2-delete" { DELETE FROM reference_copy WHERE id = 1; } step "s2-truncate" { TRUNCATE reference_copy; } step "s2-drop" { DROP TABLE reference_copy; } step "s2-ddl-create-index" { CREATE INDEX reference_copy_index ON reference_copy(id); } step "s2-ddl-drop-index" { DROP INDEX reference_copy_index; } step "s2-ddl-create-index-concurrently" { CREATE INDEX CONCURRENTLY reference_copy_index ON reference_copy(id); } step "s2-ddl-add-column" { ALTER TABLE reference_copy ADD new_column int DEFAULT 0; } step "s2-ddl-drop-column" { ALTER TABLE reference_copy DROP new_column; } step "s2-ddl-rename-column" { ALTER TABLE reference_copy RENAME data TO new_column; } step "s2-table-size" { SELECT citus_total_relation_size('reference_copy'); } step "s2-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM reference_copy;'); } step "s2-master-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM reference_copy WHERE id <= 4;'); } step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('reference_copy'::regclass, 'public', 'reference_copy'); } step "s2-create-non-distributed-table" { CREATE TABLE reference_copy(id integer, data text, int_data int); COPY reference_copy FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; } step "s2-distribute-table" { SELECT create_reference_table('reference_copy'); } # permutations - COPY vs COPY permutation "s1-initialize" "s1-begin" "s1-copy" "s2-copy" "s1-commit" "s1-select-count" # permutations - COPY first permutation "s1-initialize" "s1-begin" "s1-copy" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-update" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-delete" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-truncate" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-drop" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-create-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-copy" "s2-ddl-drop-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-add-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-copy-additional-column" "s2-ddl-drop-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-ddl-rename-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-table-size" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-copy" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-copy" "s2-distribute-table" "s1-commit" "s1-select-count" # permutations - COPY second permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-delete" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-truncate" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-drop" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-ddl-create-index" "s2-copy" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-ddl-drop-index" "s2-copy" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-ddl-add-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-ddl-drop-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-ddl-rename-column" "s2-copy" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-table-size" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards" "s2-copy" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s1-distribute-table" "s2-copy" "s1-commit" "s1-select-count" citus-7.0.3/src/test/regress/specs/isolation_replace_wait_function.spec000066400000000000000000000016411317107136600265150ustar00rootroot00000000000000# check that replace_isolation_tester_func correctly replaces the functions isolation # tester uses while searching for locks. If those functions aren't correctly replaced # this test will timeout, since isolation tester will never notice that s2 is blocked # by s1 on a lock it's taken out on one of the workers setup { SELECT citus.replace_isolation_tester_func(); SELECT citus.refresh_isolation_tester_prepared_statement(); CREATE TABLE test_locking (a int unique); SELECT create_distributed_table('test_locking', 'a'); } teardown { DROP TABLE test_locking; SELECT citus.restore_isolation_tester_func(); } session "s1" step "s1-insert-1" { BEGIN; INSERT INTO test_locking (a) VALUES (1); } step "s1-finish" { COMMIT; } session "s2" step "s2-insert" { BEGIN; INSERT INTO test_locking (a) VALUES (1); } step "s2-finish" { COMMIT; } permutation "s1-insert-1" "s2-insert" "s1-finish" "s2-finish" citus-7.0.3/src/test/regress/specs/isolation_select_vs_all.spec000066400000000000000000000406751317107136600250020ustar00rootroot00000000000000# # How we organize this isolation test spec, is explained at README.md file in this directory. # # create range distributed table to test behavior of SELECT in concurrent operations setup { SET citus.shard_replication_factor TO 1; CREATE TABLE select_append(id integer, data text, int_data int); SELECT create_distributed_table('select_append', 'id', 'append'); } # drop distributed table teardown { DROP TABLE IF EXISTS select_append CASCADE; } # session 1 session "s1" step "s1-initialize" { COPY select_append FROM PROGRAM 'echo 0, a, 0\\n1, b, 1\\n2, c, 2\\n3, d, 3\\n4, e, 4' WITH CSV; } step "s1-begin" { BEGIN; } step "s1-router-select" { SELECT * FROM select_append WHERE id = 1; } step "s1-real-time-select" { SELECT * FROM select_append ORDER BY 1, 2; } step "s1-task-tracker-select" { SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; } step "s1-insert" { INSERT INTO select_append VALUES(0, 'k', 0); } step "s1-insert-select" { INSERT INTO select_append SELECT * FROM select_append; } step "s1-update" { UPDATE select_append SET data = 'l' WHERE id = 0; } step "s1-delete" { DELETE FROM select_append WHERE id = 1; } step "s1-truncate" { TRUNCATE select_append; } step "s1-drop" { DROP TABLE select_append; } step "s1-ddl-create-index" { CREATE INDEX select_append_index ON select_append(id); } step "s1-ddl-drop-index" { DROP INDEX select_append_index; } step "s1-ddl-add-column" { ALTER TABLE select_append ADD new_column int DEFAULT 0; } step "s1-ddl-drop-column" { ALTER TABLE select_append DROP new_column; } step "s1-ddl-rename-column" { ALTER TABLE select_append RENAME data TO new_column; } step "s1-table-size" { SELECT citus_total_relation_size('select_append'); } step "s1-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM select_append;'); } step "s1-master-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM select_append WHERE id <= 4;'); } step "s1-master-drop-all-shards" { SELECT master_drop_all_shards('select_append'::regclass, 'public', 'append_copy'); } step "s1-create-non-distributed-table" { CREATE TABLE select_append(id integer, data text, int_data int); } step "s1-distribute-table" { SELECT create_distributed_table('select_append', 'id', 'append'); } step "s1-select-count" { SELECT COUNT(*) FROM select_append; } step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''select_append%'''); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''select_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } # session 2 session "s2" step "s2-router-select" { SELECT * FROM select_append WHERE id = 1; } step "s2-real-time-select" { SELECT * FROM select_append ORDER BY 1, 2; } step "s2-task-tracker-select" { SET citus.task_executor_type TO "task-tracker"; SELECT * FROM select_append AS t1 JOIN select_append AS t2 ON t1.id = t2.int_data ORDER BY 1, 2, 3, 4; } step "s2-insert" { INSERT INTO select_append VALUES(0, 'k', 0); } step "s2-insert-select" { INSERT INTO select_append SELECT * FROM select_append; } step "s2-update" { UPDATE select_append SET data = 'l' WHERE id = 0; } step "s2-delete" { DELETE FROM select_append WHERE id = 1; } step "s2-truncate" { TRUNCATE select_append; } step "s2-drop" { DROP TABLE select_append; } step "s2-ddl-create-index" { CREATE INDEX select_append_index ON select_append(id); } step "s2-ddl-drop-index" { DROP INDEX select_append_index; } step "s2-ddl-create-index-concurrently" { CREATE INDEX CONCURRENTLY select_append_index ON select_append(id); } step "s2-ddl-add-column" { ALTER TABLE select_append ADD new_column int DEFAULT 0; } step "s2-ddl-drop-column" { ALTER TABLE select_append DROP new_column; } step "s2-ddl-rename-column" { ALTER TABLE select_append RENAME data TO new_column; } step "s2-table-size" { SELECT citus_total_relation_size('select_append'); } step "s2-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM select_append;'); } step "s2-master-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM select_append WHERE id <= 4;'); } step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('select_append'::regclass, 'public', 'append_copy'); } step "s2-distribute-table" { SELECT create_distributed_table('select_append', 'id', 'append'); } # permutations - SELECT vs SELECT permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-task-tracker-select" "s1-commit" "s1-select-count" # permutations - router SELECT first permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-update" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-delete" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-truncate" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-drop" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-ddl-create-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-router-select" "s2-ddl-drop-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-ddl-add-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-router-select" "s2-ddl-drop-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-ddl-rename-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-table-size" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-router-select" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-master-apply-delete-command" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-master-drop-all-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-router-select" "s2-distribute-table" "s1-commit" "s1-select-count" # permutations - router SELECT second permutation "s1-initialize" "s1-begin" "s1-insert" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-delete" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-truncate" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-drop" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-ddl-create-index" "s2-router-select" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-ddl-drop-index" "s2-router-select" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-ddl-add-column" "s2-router-select" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-ddl-drop-column" "s2-router-select" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-ddl-rename-column" "s2-router-select" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-table-size" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards" "s2-router-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-apply-delete-command" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-drop-all-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-distribute-table" "s2-router-select" "s1-commit" "s1-select-count" # permutations - real-time SELECT first permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-update" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-delete" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-truncate" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-drop" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-ddl-create-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-real-time-select" "s2-ddl-drop-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-ddl-add-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-real-time-select" "s2-ddl-drop-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-ddl-rename-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-table-size" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-real-time-select" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-real-time-select" "s2-distribute-table" "s1-commit" "s1-select-count" # permutations - real-time SELECT second permutation "s1-initialize" "s1-begin" "s1-insert" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-delete" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-truncate" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-drop" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-ddl-create-index" "s2-real-time-select" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-ddl-drop-index" "s2-real-time-select" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-ddl-add-column" "s2-real-time-select" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-ddl-drop-column" "s2-real-time-select" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-ddl-rename-column" "s2-real-time-select" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-table-size" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards" "s2-real-time-select" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-distribute-table" "s2-real-time-select" "s1-commit" "s1-select-count" # permutations - task-tracker SELECT first permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-insert" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-insert-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-update" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-delete" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-truncate" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-drop" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-ddl-create-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-task-tracker-select" "s2-ddl-drop-index" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-ddl-add-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-task-tracker-select" "s2-ddl-drop-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-ddl-rename-column" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-table-size" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-task-tracker-select" "s2-master-modify-multiple-shards" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-task-tracker-select" "s2-distribute-table" "s1-commit" "s1-select-count" # permutations - task-tracker SELECT second permutation "s1-initialize" "s1-begin" "s1-insert" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-insert-select" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-update" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-delete" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-truncate" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-drop" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-ddl-create-index" "s2-task-tracker-select" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s1-ddl-drop-index" "s2-task-tracker-select" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-ddl-add-column" "s2-task-tracker-select" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s1-ddl-drop-column" "s2-task-tracker-select" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-ddl-rename-column" "s2-task-tracker-select" "s1-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s1-table-size" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s1-master-modify-multiple-shards" "s2-task-tracker-select" "s1-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s1-distribute-table" "s2-task-tracker-select" "s1-commit" "s1-select-count" citus-7.0.3/src/test/regress/specs/isolation_transaction_recovery.spec000066400000000000000000000010401317107136600264050ustar00rootroot00000000000000setup { CREATE TABLE test_transaction_recovery(column1 int, column2 int); SELECT create_reference_table('test_transaction_recovery'); } teardown { DROP TABLE test_transaction_recovery; } session "s1" step "s1-begin" { BEGIN; } step "s1-recover" { SELECT recover_prepared_transactions(); } step "s1-commit" { COMMIT; } session "s2" step "s2-insert" { INSERT INTO test_transaction_recovery VALUES (1,2); } # Recovery and 2PCs should block each other permutation "s1-begin" "s1-recover" "s2-insert" "s1-commit" citus-7.0.3/src/test/regress/specs/isolation_truncate_vs_all.spec000066400000000000000000000165021317107136600253400ustar00rootroot00000000000000# # How we organize this isolation test spec, is explained at README.md file in this directory. # # create range distributed table to test behavior of TRUNCATE in concurrent operations setup { SELECT citus.replace_isolation_tester_func(); SELECT citus.refresh_isolation_tester_prepared_statement(); SET citus.shard_replication_factor TO 1; CREATE TABLE truncate_append(id integer, data text); SELECT create_distributed_table('truncate_append', 'id', 'append'); } # drop distributed table teardown { DROP TABLE IF EXISTS truncate_append CASCADE; SELECT citus.restore_isolation_tester_func(); } # session 1 session "s1" step "s1-initialize" { COPY truncate_append FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; } step "s1-begin" { BEGIN; } step "s1-truncate" { TRUNCATE truncate_append; } step "s1-drop" { DROP TABLE truncate_append; } step "s1-ddl-create-index" { CREATE INDEX truncate_append_index ON truncate_append(id); } step "s1-ddl-drop-index" { DROP INDEX truncate_append_index; } step "s1-ddl-add-column" { ALTER TABLE truncate_append ADD new_column int DEFAULT 0; } step "s1-ddl-drop-column" { ALTER TABLE truncate_append DROP new_column; } step "s1-ddl-rename-column" { ALTER TABLE truncate_append RENAME data TO new_column; } step "s1-table-size" { SELECT citus_total_relation_size('truncate_append'); } step "s1-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM truncate_append;'); } step "s1-master-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM truncate_append WHERE id <= 4;'); } step "s1-master-drop-all-shards" { SELECT master_drop_all_shards('truncate_append'::regclass, 'public', 'truncate_append'); } step "s1-create-non-distributed-table" { CREATE TABLE truncate_append(id integer, data text); } step "s1-distribute-table" { SELECT create_distributed_table('truncate_append', 'id', 'append'); } step "s1-select-count" { SELECT COUNT(*) FROM truncate_append; } step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''truncate_append%'''); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''truncate_append%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } # session 2 session "s2" step "s2-begin" { BEGIN; } step "s2-truncate" { TRUNCATE truncate_append; } step "s2-drop" { DROP TABLE truncate_append; } step "s2-ddl-create-index" { CREATE INDEX truncate_append_index ON truncate_append(id); } step "s2-ddl-drop-index" { DROP INDEX truncate_append_index; } step "s2-ddl-create-index-concurrently" { CREATE INDEX CONCURRENTLY truncate_append_index ON truncate_append(id); } step "s2-ddl-add-column" { ALTER TABLE truncate_append ADD new_column int DEFAULT 0; } step "s2-ddl-drop-column" { ALTER TABLE truncate_append DROP new_column; } step "s2-ddl-rename-column" { ALTER TABLE truncate_append RENAME data TO new_column; } step "s2-table-size" { SELECT citus_total_relation_size('truncate_append'); } step "s2-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM truncate_append;'); } step "s2-master-apply-delete-command" { SELECT master_apply_delete_command('DELETE FROM truncate_append WHERE id <= 4;'); } step "s2-master-drop-all-shards" { SELECT master_drop_all_shards('truncate_append'::regclass, 'public', 'truncate_append'); } step "s2-create-non-distributed-table" { CREATE TABLE truncate_append(id integer, data text); } step "s2-distribute-table" { SELECT create_distributed_table('truncate_append', 'id', 'append'); } step "s2-select" { SELECT * FROM truncate_append ORDER BY 1, 2; } step "s2-commit" { COMMIT; } # permutations - TRUNCATE vs TRUNCATE permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" # permutations - TRUNCATE first permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s2-begin" "s1-truncate" "s2-ddl-drop-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-truncate" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-ddl-add-column" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s2-begin" "s1-truncate" "s2-ddl-drop-column" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-ddl-rename-column" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-table-size" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-master-modify-multiple-shards" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-master-apply-delete-command" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-master-drop-all-shards" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s2-begin" "s1-truncate" "s2-distribute-table" "s1-commit" "s2-commit" "s1-select-count" # permutations - TRUNCATE second permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s2-begin" "s1-ddl-drop-index" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-add-column" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s2-begin" "s1-ddl-drop-column" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-table-size" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-master-modify-multiple-shards" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-master-apply-delete-command" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-master-drop-all-shards" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-begin" "s2-begin" "s1-distribute-table" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" citus-7.0.3/src/test/regress/specs/isolation_update_vs_all.spec000066400000000000000000000150141317107136600247720ustar00rootroot00000000000000# # How we organize this isolation test spec, is explained at README.md file in this directory. # # create range distributed table to test behavior of UPDATE in concurrent operations setup { SELECT citus.replace_isolation_tester_func(); SELECT citus.refresh_isolation_tester_prepared_statement(); SET citus.shard_replication_factor TO 1; CREATE TABLE update_hash(id integer, data text); SELECT create_distributed_table('update_hash', 'id'); } # drop distributed table teardown { DROP TABLE IF EXISTS update_hash CASCADE; SELECT citus.restore_isolation_tester_func(); } # session 1 session "s1" step "s1-initialize" { COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; } step "s1-begin" { BEGIN; } step "s1-update" { UPDATE update_hash SET data = 'l' WHERE id = 4; } step "s1-delete" { DELETE FROM update_hash WHERE id = 4; } step "s1-truncate" { TRUNCATE update_hash; } step "s1-drop" { DROP TABLE update_hash; } step "s1-ddl-create-index" { CREATE INDEX update_hash_index ON update_hash(id); } step "s1-ddl-drop-index" { DROP INDEX update_hash_index; } step "s1-ddl-add-column" { ALTER TABLE update_hash ADD new_column int DEFAULT 0; } step "s1-ddl-drop-column" { ALTER TABLE update_hash DROP new_column; } step "s1-ddl-rename-column" { ALTER TABLE update_hash RENAME data TO new_column; } step "s1-table-size" { SELECT citus_total_relation_size('update_hash'); } step "s1-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM update_hash;'); } step "s1-create-non-distributed-table" { CREATE TABLE update_hash(id integer, data text); COPY update_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; } step "s1-distribute-table" { SELECT create_distributed_table('update_hash', 'id'); } step "s1-select-count" { SELECT COUNT(*) FROM update_hash; } step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''update_hash%'''); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''update_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } # session 2 session "s2" step "s2-begin" { BEGIN; } step "s2-update" { UPDATE update_hash SET data = 'l' WHERE id = 4; } step "s2-delete" { DELETE FROM update_hash WHERE id = 4; } step "s2-truncate" { TRUNCATE update_hash; } step "s2-drop" { DROP TABLE update_hash; } step "s2-ddl-create-index" { CREATE INDEX update_hash_index ON update_hash(id); } step "s2-ddl-drop-index" { DROP INDEX update_hash_index; } step "s2-ddl-create-index-concurrently" { CREATE INDEX CONCURRENTLY update_hash_index ON update_hash(id); } step "s2-ddl-add-column" { ALTER TABLE update_hash ADD new_column int DEFAULT 0; } step "s2-ddl-drop-column" { ALTER TABLE update_hash DROP new_column; } step "s2-ddl-rename-column" { ALTER TABLE update_hash RENAME data TO new_column; } step "s2-table-size" { SELECT citus_total_relation_size('update_hash'); } step "s2-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM update_hash;'); } step "s2-distribute-table" { SELECT create_distributed_table('update_hash', 'id'); } step "s2-commit" { COMMIT; } # permutations - UPDATE vs UPDATE permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-update" "s1-commit" "s2-commit" "s1-select-count" # permutations - UPDATE first permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s2-begin" "s1-update" "s2-ddl-drop-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-update" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-ddl-add-column" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s2-begin" "s1-update" "s2-ddl-drop-column" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-ddl-rename-column" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-table-size" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-master-modify-multiple-shards" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-distribute-table" "s1-commit" "s2-commit" "s1-select-count" # permutations - UPDATE second permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-update" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-update" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-update" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-update" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s2-begin" "s1-ddl-drop-index" "s2-update" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-add-column" "s2-update" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s2-begin" "s1-ddl-drop-column" "s2-update" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-update" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-table-size" "s2-update" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-master-modify-multiple-shards" "s2-update" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-distribute-table" "s2-update" "s1-commit" "s2-commit" "s1-select-count" citus-7.0.3/src/test/regress/specs/isolation_upsert_vs_all.spec000066400000000000000000000153361317107136600250410ustar00rootroot00000000000000# # How we organize this isolation test spec, is explained at README.md file in this directory. # # create range distributed table to test behavior of UPSERT in concurrent operations setup { SELECT citus.replace_isolation_tester_func(); SELECT citus.refresh_isolation_tester_prepared_statement(); SET citus.shard_replication_factor TO 1; CREATE TABLE upsert_hash(id integer PRIMARY KEY, data text); SELECT create_distributed_table('upsert_hash', 'id'); } # drop distributed table teardown { DROP TABLE IF EXISTS upsert_hash CASCADE; SELECT citus.restore_isolation_tester_func(); } # session 1 session "s1" step "s1-initialize" { COPY upsert_hash FROM PROGRAM 'echo 0, a\\n1, b\\n2, c\\n3, d\\n4, e' WITH CSV; } step "s1-begin" { BEGIN; } step "s1-upsert" { INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; } step "s1-update" { UPDATE upsert_hash SET data = 'l' WHERE id = 4; } step "s1-delete" { DELETE FROM upsert_hash WHERE id = 4; } step "s1-truncate" { TRUNCATE upsert_hash; } step "s1-drop" { DROP TABLE upsert_hash; } step "s1-ddl-create-index" { CREATE INDEX upsert_hash_index ON upsert_hash(id); } step "s1-ddl-drop-index" { DROP INDEX upsert_hash_index; } step "s1-ddl-add-column" { ALTER TABLE upsert_hash ADD new_column int DEFAULT 0; } step "s1-ddl-drop-column" { ALTER TABLE upsert_hash DROP new_column; } step "s1-ddl-rename-column" { ALTER TABLE upsert_hash RENAME data TO new_column; } step "s1-table-size" { SELECT citus_total_relation_size('upsert_hash'); } step "s1-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM upsert_hash;'); } step "s1-create-non-distributed-table" { CREATE TABLE upsert_hash(id integer PRIMARY KEY, data text); } step "s1-distribute-table" { SELECT create_distributed_table('upsert_hash', 'id'); } step "s1-select-count" { SELECT COUNT(*) FROM upsert_hash; } step "s1-show-indexes" { SELECT run_command_on_workers('SELECT COUNT(*) FROM pg_indexes WHERE tablename LIKE ''upsert_hash%'''); } step "s1-show-columns" { SELECT run_command_on_workers('SELECT column_name FROM information_schema.columns WHERE table_name LIKE ''upsert_hash%'' AND column_name = ''new_column'' ORDER BY 1 LIMIT 1'); } step "s1-commit" { COMMIT; } # session 2 session "s2" step "s2-begin" { BEGIN; } step "s2-upsert" { INSERT INTO upsert_hash VALUES(4, 'k') ON CONFLICT(id) DO UPDATE SET data = 'k'; } step "s2-update" { UPDATE upsert_hash SET data = 'l' WHERE id = 4; } step "s2-delete" { DELETE FROM upsert_hash WHERE id = 4; } step "s2-truncate" { TRUNCATE upsert_hash; } step "s2-drop" { DROP TABLE upsert_hash; } step "s2-ddl-create-index" { CREATE INDEX upsert_hash_index ON upsert_hash(id); } step "s2-ddl-drop-index" { DROP INDEX upsert_hash_index; } step "s2-ddl-create-index-concurrently" { CREATE INDEX CONCURRENTLY upsert_hash_index ON upsert_hash(id); } step "s2-ddl-add-column" { ALTER TABLE upsert_hash ADD new_column int DEFAULT 0; } step "s2-ddl-drop-column" { ALTER TABLE upsert_hash DROP new_column; } step "s2-ddl-rename-column" { ALTER TABLE upsert_hash RENAME data TO new_column; } step "s2-table-size" { SELECT citus_total_relation_size('upsert_hash'); } step "s2-master-modify-multiple-shards" { SELECT master_modify_multiple_shards('DELETE FROM upsert_hash;'); } step "s2-distribute-table" { SELECT create_distributed_table('upsert_hash', 'id'); } step "s2-commit" { COMMIT; } # permutations - UPSERT vs UPSERT permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count" # permutations - UPSERT first permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-update" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-delete" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-truncate" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-drop" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-ddl-create-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s2-begin" "s1-upsert" "s2-ddl-drop-index" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s1-upsert" "s2-ddl-create-index-concurrently" "s1-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-ddl-add-column" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s2-begin" "s1-upsert" "s2-ddl-drop-column" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-ddl-rename-column" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-table-size" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-master-modify-multiple-shards" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-drop" "s1-create-non-distributed-table" "s1-initialize" "s1-begin" "s2-begin" "s1-upsert" "s2-distribute-table" "s1-commit" "s2-commit" "s1-select-count" # permutations - UPSERT second permutation "s1-initialize" "s1-begin" "s2-begin" "s1-update" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-delete" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-truncate" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-drop" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-create-index" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-ddl-create-index" "s1-begin" "s2-begin" "s1-ddl-drop-index" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count" "s1-show-indexes" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-add-column" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-ddl-add-column" "s1-begin" "s2-begin" "s1-ddl-drop-column" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-ddl-rename-column" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count" "s1-show-columns" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-table-size" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count" permutation "s1-initialize" "s1-begin" "s2-begin" "s1-master-modify-multiple-shards" "s2-upsert" "s1-commit" "s2-commit" "s1-select-count" citus-7.0.3/src/test/regress/sql/000077500000000000000000000000001317107136600166745ustar00rootroot00000000000000citus-7.0.3/src/test/regress/sql/.gitignore000066400000000000000000000011011317107136600206550ustar00rootroot00000000000000/multi_agg_distinct.sql /multi_agg_type_conversion.sql /multi_alter_table_statements.sql /multi_append_table_to_shard.sql /multi_behavioral_analytics_create_table.sql /multi_copy.sql /multi_create_schema.sql /multi_large_shardid.sql /multi_master_delete_protocol.sql /multi_outer_join.sql /multi_outer_join_reference.sql /multi_load_data.sql /multi_load_large_records.sql /multi_load_more_data.sql /worker_copy.sql /multi_complex_count_distinct.sql /multi_mx_copy_data.sql /multi_behavioral_analytics_create_table.sql /multi_insert_select_behavioral_analytics_create_table.sql citus-7.0.3/src/test/regress/sql/multi_703_upgrade.sql000066400000000000000000000015241317107136600226510ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 103000; -- tests that the upgrade from 7.0-2 to 7.0-3 properly migrates shard placements DROP EXTENSION citus; SET citus.enable_version_checks TO 'false'; CREATE EXTENSION citus VERSION '7.0-2'; INSERT INTO pg_dist_shard_placement (placementid, shardid, shardstate, shardlength, nodename, nodeport) VALUES (1, 1, 1, 0, 'localhost', :worker_1_port); -- if there are no worker nodes which match the shards this should fail ALTER EXTENSION citus UPDATE TO '7.0-3'; -- if you add a matching worker the upgrade should succeed INSERT INTO pg_dist_node (nodename, nodeport, groupid) VALUES ('localhost', :worker_1_port, 1); ALTER EXTENSION citus UPDATE TO '7.0-3'; SELECT * FROM pg_dist_placement; -- reset and prepare for the rest of the tests DROP EXTENSION citus; CREATE EXTENSION citus; citus-7.0.3/src/test/regress/sql/multi_agg_approximate_distinct.sql000066400000000000000000000103501317107136600256760ustar00rootroot00000000000000-- -- MULTI_AGG_APPROXIMATE_DISTINCT -- -- Create HLL extension if present, print false result otherwise SELECT CASE WHEN COUNT(*) > 0 THEN 'CREATE EXTENSION HLL' ELSE 'SELECT false AS hll_present' END AS create_cmd FROM pg_available_extensions() WHERE name = 'hll' \gset :create_cmd; \c - - - :worker_1_port :create_cmd; \c - - - :worker_2_port :create_cmd; \c - - - :master_port -- Try to execute count(distinct) when approximate distincts aren't enabled SELECT count(distinct l_orderkey) FROM lineitem; -- Check approximate count(distinct) at different precisions / error rates SET citus.count_distinct_error_rate = 0.1; SELECT count(distinct l_orderkey) FROM lineitem; SET citus.count_distinct_error_rate = 0.01; SELECT count(distinct l_orderkey) FROM lineitem; -- Check approximate count(distinct) for different data types SELECT count(distinct l_partkey) FROM lineitem; SELECT count(distinct l_extendedprice) FROM lineitem; SELECT count(distinct l_shipdate) FROM lineitem; SELECT count(distinct l_comment) FROM lineitem; -- Check that we can execute approximate count(distinct) on complex expressions SELECT count(distinct (l_orderkey * 2 + 1)) FROM lineitem; SELECT count(distinct extract(month from l_shipdate)) AS my_month FROM lineitem; SELECT count(distinct l_partkey) / count(distinct l_orderkey) FROM lineitem; -- Check that we can execute approximate count(distinct) on select queries that -- contain different filter, join, sort and limit clauses SELECT count(distinct l_orderkey) FROM lineitem WHERE octet_length(l_comment) + octet_length('randomtext'::text) > 40; SELECT count(DISTINCT l_orderkey) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5; SELECT count(DISTINCT l_orderkey) as distinct_order_count, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY distinct_order_count ASC, l_quantity ASC LIMIT 10; -- Check that approximate count(distinct) works at a table in a schema other than public -- create necessary objects CREATE SCHEMA test_count_distinct_schema; CREATE TABLE test_count_distinct_schema.nation_hash( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152) ); SELECT master_create_distributed_table('test_count_distinct_schema.nation_hash', 'n_nationkey', 'hash'); SELECT master_create_worker_shards('test_count_distinct_schema.nation_hash', 4, 2); \copy test_count_distinct_schema.nation_hash FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5|ETHIOPIA|0|ven packages wake quickly. regu \. SET search_path TO public; SET citus.count_distinct_error_rate TO 0.01; SELECT COUNT (DISTINCT n_regionkey) FROM test_count_distinct_schema.nation_hash; -- test with search_path is set SET search_path TO test_count_distinct_schema; SELECT COUNT (DISTINCT n_regionkey) FROM nation_hash; SET search_path TO public; -- If we have an order by on count(distinct) that we intend to push down to -- worker nodes, we need to error out. Otherwise, we are fine. SET citus.limit_clause_row_fetch_count = 1000; SELECT l_returnflag, count(DISTINCT l_shipdate) as count_distinct, count(*) as total FROM lineitem GROUP BY l_returnflag ORDER BY count_distinct LIMIT 10; SELECT l_returnflag, count(DISTINCT l_shipdate) as count_distinct, count(*) as total FROM lineitem GROUP BY l_returnflag ORDER BY total LIMIT 10; SELECT l_orderkey, count(l_partkey) FILTER (WHERE l_shipmode = 'AIR'), count(DISTINCT l_partkey) FILTER (WHERE l_shipmode = 'AIR'), count(DISTINCT CASE WHEN l_shipmode = 'AIR' THEN l_partkey ELSE NULL END) FROM lineitem GROUP BY l_orderkey ORDER BY 2 DESC, 1 DESC LIMIT 10; -- Check that we can revert config and disable count(distinct) approximations SET citus.count_distinct_error_rate = 0.0; SELECT count(distinct l_orderkey) FROM lineitem; citus-7.0.3/src/test/regress/sql/multi_alter_table_add_constraints.sql000066400000000000000000000331631317107136600263520ustar00rootroot00000000000000-- -- MULTI_ALTER_TABLE_ADD_CONSTRAINTS -- -- Test checks whether constraints of distributed tables can be adjusted using -- the ALTER TABLE ... ADD CONSTRAINT ... command. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1450000; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 1450000; -- Check "PRIMARY KEY CONSTRAINT" CREATE TABLE products ( product_no integer, name text, price numeric ); SELECT create_distributed_table('products', 'product_no'); -- Can only add primary key constraint on distribution column (or group of columns -- including distribution column) -- Command below should error out since 'name' is not a distribution column ALTER TABLE products ADD CONSTRAINT p_key PRIMARY KEY(name); ALTER TABLE products ADD CONSTRAINT p_key PRIMARY KEY(product_no); INSERT INTO products VALUES(1, 'product_1', 1); -- Should error out, since we are trying to add a new row having a value on p_key column -- conflicting with the existing row. INSERT INTO products VALUES(1, 'product_1', 1); ALTER TABLE products DROP CONSTRAINT p_key; INSERT INTO products VALUES(1, 'product_1', 1); -- Can not create constraint since it conflicts with the existing data ALTER TABLE products ADD CONSTRAINT p_key PRIMARY KEY(product_no); DROP TABLE products; -- Check "PRIMARY KEY CONSTRAINT" with reference table CREATE TABLE products_ref ( product_no integer, name text, price numeric ); SELECT create_reference_table('products_ref'); -- Can add PRIMARY KEY to any column ALTER TABLE products_ref ADD CONSTRAINT p_key PRIMARY KEY(name); ALTER TABLE products_ref DROP CONSTRAINT p_key; ALTER TABLE products_ref ADD CONSTRAINT p_key PRIMARY KEY(product_no); INSERT INTO products_ref VALUES(1, 'product_1', 1); -- Should error out, since we are trying to add new row having a value on p_key column -- conflicting with the existing row. INSERT INTO products_ref VALUES(1, 'product_1', 1); DROP TABLE products_ref; -- Check "PRIMARY KEY CONSTRAINT" on append table CREATE TABLE products_append ( product_no integer, name text, price numeric ); SELECT create_distributed_table('products_append', 'product_no', 'append'); -- Can only add primary key constraint on distribution column (or group -- of columns including distribution column) -- Command below should error out since 'name' is not a distribution column ALTER TABLE products_append ADD CONSTRAINT p_key_name PRIMARY KEY(name); ALTER TABLE products_append ADD CONSTRAINT p_key PRIMARY KEY(product_no); --- Error out since first and third rows have the same product_no \COPY products_append FROM STDIN DELIMITER AS ','; 1, Product_1, 10 2, Product_2, 15 1, Product_3, 8 \. DROP TABLE products_append; -- Check "UNIQUE CONSTRAINT" CREATE TABLE unique_test_table(id int, name varchar(20)); SELECT create_distributed_table('unique_test_table', 'id'); -- Can only add unique constraint on distribution column (or group -- of columns including distribution column) -- Command below should error out since 'name' is not a distribution column ALTER TABLE unique_test_table ADD CONSTRAINT unn_name UNIQUE(name); ALTER TABLE unique_test_table ADD CONSTRAINT unn_id UNIQUE(id); -- Error out, since table can not have two rows with same id. INSERT INTO unique_test_table VALUES(1, 'Ahmet'); INSERT INTO unique_test_table VALUES(1, 'Mehmet'); ALTER TABLE unique_test_table DROP CONSTRAINT unn_id; -- Insert row which will conflict with the next unique constraint command INSERT INTO unique_test_table VALUES(1, 'Mehmet'); -- Can not create constraint since it conflicts with the existing data ALTER TABLE unique_test_table ADD CONSTRAINT unn_id UNIQUE(id); -- Can create unique constraint over multiple columns which must include -- distribution column ALTER TABLE unique_test_table ADD CONSTRAINT unn_id_name UNIQUE(id, name); -- Error out, since tables can not have two rows with same id and name. INSERT INTO unique_test_table VALUES(1, 'Mehmet'); DROP TABLE unique_test_table; -- Check "UNIQUE CONSTRAINT" with reference table CREATE TABLE unique_test_table_ref(id int, name varchar(20)); SELECT create_reference_table('unique_test_table_ref'); -- We can add unique constraint on any column with reference tables ALTER TABLE unique_test_table_ref ADD CONSTRAINT unn_name UNIQUE(name); ALTER TABLE unique_test_table_ref ADD CONSTRAINT unn_id UNIQUE(id); -- Error out. Since the table can not have two rows with the same id. INSERT INTO unique_test_table_ref VALUES(1, 'Ahmet'); INSERT INTO unique_test_table_ref VALUES(1, 'Mehmet'); -- We can add unique constraint with multiple columns ALTER TABLE unique_test_table_ref DROP CONSTRAINT unn_id; ALTER TABLE unique_test_table_ref ADD CONSTRAINT unn_id_name UNIQUE(id,name); -- Error out, since two rows can not have the same id or name. INSERT INTO unique_test_table_ref VALUES(1, 'Mehmet'); DROP TABLE unique_test_table_ref; -- Check "UNIQUE CONSTRAINT" with append table CREATE TABLE unique_test_table_append(id int, name varchar(20)); SELECT create_distributed_table('unique_test_table_append', 'id', 'append'); -- Can only add unique constraint on distribution column (or group -- of columns including distribution column) -- Command below should error out since 'name' is not a distribution column ALTER TABLE unique_test_table_append ADD CONSTRAINT unn_name UNIQUE(name); ALTER TABLE unique_test_table_append ADD CONSTRAINT unn_id UNIQUE(id); -- Error out. Table can not have two rows with the same id. \COPY unique_test_table_append FROM STDIN DELIMITER AS ','; 1, Product_1 2, Product_2 1, Product_3 \. DROP TABLE unique_test_table_append; -- Check "CHECK CONSTRAINT" CREATE TABLE products ( product_no integer, name text, price numeric, discounted_price numeric ); SELECT create_distributed_table('products', 'product_no'); -- Can add column and table check constraints ALTER TABLE products ADD CONSTRAINT p_check CHECK(price > 0); ALTER TABLE products ADD CONSTRAINT p_multi_check CHECK(price > discounted_price); -- First and third queries will error out, because of conflicts with p_check and -- p_multi_check, respectively. INSERT INTO products VALUES(1, 'product_1', -1, -2); INSERT INTO products VALUES(1, 'product_1', 5, 3); INSERT INTO products VALUES(1, 'product_1', 2, 3); DROP TABLE products; -- Check "CHECK CONSTRAINT" with reference table CREATE TABLE products_ref ( product_no integer, name text, price numeric, discounted_price numeric ); SELECT create_reference_table('products_ref'); -- Can add column and table check constraints ALTER TABLE products_ref ADD CONSTRAINT p_check CHECK(price > 0); ALTER TABLE products_ref ADD CONSTRAINT p_multi_check CHECK(price > discounted_price); -- First and third queries will error out, because of conflicts with p_check and -- p_multi_check, respectively. INSERT INTO products_ref VALUES(1, 'product_1', -1, -2); INSERT INTO products_ref VALUES(1, 'product_1', 5, 3); INSERT INTO products_ref VALUES(1, 'product_1', 2, 3); DROP TABLE products_ref; -- Check "CHECK CONSTRAINT" with append table CREATE TABLE products_append ( product_no int, name varchar(20), price int, discounted_price int ); SELECT create_distributed_table('products_append', 'product_no', 'append'); -- Can add column and table check constraints ALTER TABLE products_append ADD CONSTRAINT p_check CHECK(price > 0); ALTER TABLE products_append ADD CONSTRAINT p_multi_check CHECK(price > discounted_price); -- Error out,since the third row conflicting with the p_multi_check \COPY products_append FROM STDIN DELIMITER AS ','; 1, Product_1, 10, 5 2, Product_2, 15, 8 1, Product_3, 8, 10 \. DROP TABLE products_append; -- Check "EXCLUSION CONSTRAINT" CREATE TABLE products ( product_no integer, name text, price numeric ); SELECT create_distributed_table('products', 'product_no'); -- Can only add exclusion constraint on distribution column (or group of columns -- including distribution column) -- Command below should error out since 'name' is not a distribution column ALTER TABLE products ADD CONSTRAINT exc_name EXCLUDE USING btree (name with =); -- We can add composite exclusion ALTER TABLE products ADD CONSTRAINT exc_pno_name EXCLUDE USING btree (product_no with =, name with =); -- 4th command will error out since it conflicts with exc_pno_name constraint INSERT INTO products VALUES(1,'product_1', 5); INSERT INTO products VALUES(1,'product_2', 10); INSERT INTO products VALUES(2,'product_2', 5); INSERT INTO products VALUES(2,'product_2', 5); DROP TABLE products; -- Check "EXCLUSION CONSTRAINT" with reference table CREATE TABLE products_ref ( product_no integer, name text, price numeric ); SELECT create_reference_table('products_ref'); -- We can add exclusion constraint on any column ALTER TABLE products_ref ADD CONSTRAINT exc_name EXCLUDE USING btree (name with =); -- We can add composite exclusion because none of pair of rows are conflicting ALTER TABLE products_ref ADD CONSTRAINT exc_pno_name EXCLUDE USING btree (product_no with =, name with =); -- Third insertion will error out, since it has the same name with second insertion INSERT INTO products_ref VALUES(1,'product_1', 5); INSERT INTO products_ref VALUES(1,'product_2', 10); INSERT INTO products_ref VALUES(2,'product_2', 5); DROP TABLE products_ref; -- Check "EXCLUSION CONSTRAINT" with append table CREATE TABLE products_append ( product_no integer, name text, price numeric ); SELECT create_distributed_table('products_append', 'product_no','append'); -- Can only add exclusion constraint on distribution column (or group of column -- including distribution column) -- Command below should error out since 'name' is not a distribution column ALTER TABLE products_append ADD CONSTRAINT exc_name EXCLUDE USING btree (name with =); ALTER TABLE products_append ADD CONSTRAINT exc_pno_name EXCLUDE USING btree (product_no with =, name with =); -- Error out since first and third can not pass the exclusion check. \COPY products_append FROM STDIN DELIMITER AS ','; 1, Product_1, 10 1, Product_2, 15 1, Product_1, 8 \. DROP TABLE products_append; -- Check "NOT NULL" CREATE TABLE products ( product_no integer, name text, price numeric ); SELECT create_distributed_table('products', 'product_no'); ALTER TABLE products ALTER COLUMN name SET NOT NULL; -- Insertions will error out since both product_no and name can not have NULL value INSERT INTO products VALUES(1,NULL,5); INSERT INTO products VALUES(NULL,'product_1', 5); DROP TABLE products; -- Check "NOT NULL" with reference table CREATE TABLE products_ref ( product_no integer, name text, price numeric ); SELECT create_reference_table('products_ref'); ALTER TABLE products_ref ALTER COLUMN name SET NOT NULL; -- Insertions will error out since both product_no and name can not have NULL value INSERT INTO products_ref VALUES(1,NULL,5); INSERT INTO products_ref VALUES(NULL,'product_1', 5); DROP TABLE products_ref; -- Check "NOT NULL" with append table CREATE TABLE products_append ( product_no integer, name text, price numeric ); SELECT create_distributed_table('products_append', 'product_no', 'append'); ALTER TABLE products_append ALTER COLUMN name SET NOT NULL; -- Error out since name and product_no columns can not handle NULL value. \COPY products_append FROM STDIN DELIMITER AS ','; 1, \N, 10 \N, Product_2, 15 1, Product_1, 8 \. DROP TABLE products_append; -- Tests for ADD CONSTRAINT is not only subcommand CREATE TABLE products ( product_no integer, name text, price numeric ); SELECT create_distributed_table('products', 'product_no'); -- Should error out since add constraint is not the single subcommand ALTER TABLE products ADD CONSTRAINT unn_1 UNIQUE(product_no, price), ADD CONSTRAINT unn_2 UNIQUE(product_no, name); -- Tests for constraints without name -- Commands below should error out since constraints do not have the name ALTER TABLE products ADD UNIQUE(product_no); ALTER TABLE products ADD PRIMARY KEY(product_no); ALTER TABLE products ADD CHECK(product_no <> 0); ALTER TABLE products ADD EXCLUDE USING btree (product_no with =); DROP TABLE products; -- Tests with transactions CREATE TABLE products ( product_no integer, name text, price numeric, discounted_price numeric ); SELECT create_distributed_table('products', 'product_no'); BEGIN; INSERT INTO products VALUES(1,'product_1', 5); -- DDL should pick the right connections after a single INSERT ALTER TABLE products ADD CONSTRAINT unn_pno UNIQUE(product_no); ROLLBACK; BEGIN; -- Add constraints ALTER TABLE products ADD CONSTRAINT unn_pno UNIQUE(product_no); ALTER TABLE products ADD CONSTRAINT check_price CHECK(price > discounted_price); ALTER TABLE products ALTER COLUMN product_no SET NOT NULL; ALTER TABLE products ADD CONSTRAINT p_key_product PRIMARY KEY(product_no); INSERT INTO products VALUES(1,'product_1', 10, 8); ROLLBACK; -- There should be no constraint on master and worker(s) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='products'::regclass; \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.products_1450202'::regclass; \c - - - :master_port -- Tests to check the effect of rollback BEGIN; -- Add constraints (which will be rollbacked) ALTER TABLE products ADD CONSTRAINT unn_pno UNIQUE(product_no); ALTER TABLE products ADD CONSTRAINT check_price CHECK(price > discounted_price); ALTER TABLE products ADD CONSTRAINT p_key_product PRIMARY KEY(product_no); ROLLBACK; -- There should be no constraint on master and worker(s) SELECT "Constraint", "Definition" FROM table_checks WHERE relid='products'::regclass; \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.products_1450202'::regclass; \c - - - :master_port DROP TABLE products; citus-7.0.3/src/test/regress/sql/multi_array_agg.sql000066400000000000000000000050201317107136600225600ustar00rootroot00000000000000-- -- MULTI_ARRAY_AGG -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 520000; CREATE OR REPLACE FUNCTION array_sort (ANYARRAY) RETURNS ANYARRAY LANGUAGE SQL AS $$ SELECT ARRAY(SELECT unnest($1) ORDER BY 1) $$; -- Check multi_cat_agg() aggregate which is used to implement array_agg() SELECT array_cat_agg(i) FROM (VALUES (ARRAY[1,2]), (NULL), (ARRAY[3,4])) AS t(i); -- Check that we don't support distinct and order by with array_agg() SELECT array_agg(distinct l_orderkey) FROM lineitem; SELECT array_agg(l_orderkey ORDER BY l_partkey) FROM lineitem; SELECT array_agg(distinct l_orderkey ORDER BY l_orderkey) FROM lineitem; -- Check array_agg() for different data types and LIMIT clauses SELECT array_sort(array_agg(l_partkey)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; SELECT array_sort(array_agg(l_extendedprice)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; SELECT array_sort(array_agg(l_shipdate)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; SELECT array_sort(array_agg(l_shipmode)) FROM lineitem GROUP BY l_orderkey ORDER BY l_orderkey LIMIT 10; -- Check that we can execute array_agg() within other functions SELECT array_length(array_agg(l_orderkey), 1) FROM lineitem; -- Check that we can execute array_agg() on select queries that hit multiple -- shards and contain different aggregates, filter clauses and other complex -- expressions. Note that the l_orderkey ranges are such that the matching rows -- lie in different shards. SELECT l_quantity, count(*), avg(l_extendedprice), array_sort(array_agg(l_orderkey)) FROM lineitem WHERE l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; SELECT l_quantity, array_sort(array_agg(extract (month FROM o_orderdate))) AS my_month FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; SELECT l_quantity, array_sort(array_agg(l_orderkey * 2 + 1)) FROM lineitem WHERE l_quantity < 5 AND octet_length(l_comment) + octet_length('randomtext'::text) > 40 AND l_orderkey > 5500 AND l_orderkey < 9500 GROUP BY l_quantity ORDER BY l_quantity; -- Check that we can execute array_agg() with an expression containing NULL values SELECT array_agg(case when l_quantity > 20 then l_quantity else NULL end) FROM lineitem WHERE l_orderkey < 10; -- Check that we return NULL in case there are no input rows to array_agg() SELECT array_agg(l_orderkey) FROM lineitem WHERE l_quantity < 0; citus-7.0.3/src/test/regress/sql/multi_average_expression.sql000066400000000000000000000026511317107136600245240ustar00rootroot00000000000000-- -- MULTI_AVERAGE_EXPRESSION_ORDER -- -- This test checks that the group-by columns don't need to be above an average -- expression, and can be anywhere in the projection order. This is in response -- to a bug we had due to the average expression introducing new columns. SELECT sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order, l_returnflag, l_linestatus FROM lineitem WHERE l_shipdate <= date '1998-12-01' - interval '90 days' GROUP BY l_returnflag, l_linestatus ORDER BY l_returnflag, l_linestatus; -- These tests check that distributed averages only consider non-null input -- values. This is in response to a bug we had due to the distributed average -- using sum(expression)/count(*) to calculate avg(expression). We now use the -- correct form sum(expression)/count(expression) for average calculations. -- Run avg() on an expression that contains some null values SELECT avg(case when l_quantity > 20 then l_quantity end) FROM lineitem; -- Run avg() on an expression that contains only null values SELECT avg(case when l_quantity > 5000 then l_quantity end) FROM lineitem; citus-7.0.3/src/test/regress/sql/multi_basic_queries.sql000066400000000000000000000010441317107136600234440ustar00rootroot00000000000000-- -- MULTI_BASIC_QUERIES -- -- Execute simple sum, average, and count queries on data recently uploaded to -- our partitioned table. SELECT count(*) FROM lineitem; SELECT sum(l_extendedprice) FROM lineitem; SELECT avg(l_extendedprice) FROM lineitem; -- Verify that we can do queries in read-only mode BEGIN; SET TRANSACTION READ ONLY; SELECT count(*) FROM lineitem; COMMIT; -- Verify temp tables which are used for final result aggregation don't persist. SELECT count(*) FROM pg_class WHERE relname LIKE 'pg_merge_job_%' AND relkind = 'r'; citus-7.0.3/src/test/regress/sql/multi_behavioral_analytics_basics.sql000066400000000000000000000335271317107136600263500ustar00rootroot00000000000000------------------------------------ ------------------------------------ -- Vanilla funnel query ------------------------------------ ------------------------------------ INSERT INTO agg_results (user_id, value_1_agg) SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; ------------------------------------ ------------------------------------ -- Funnel grouped by whether or not a user has done an event ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results (user_id, value_1_agg, value_2_agg ) SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event) FROM ( SELECT t1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(hasdone_event, 'Has not done event') AS hasdone_event FROM ( ( SELECT u.user_id, 'step=>1'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) UNION ( SELECT u.user_id, 'step=>2'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (103, 104, 105) ) ) t1 LEFT JOIN ( SELECT DISTINCT user_id, 'Has done event'::TEXT AS hasdone_event FROM events_table AS e WHERE e.user_id >= 10 AND e.user_id <= 25 AND e.event_type IN (106, 107, 108) ) t2 ON (t1.user_id = t2.user_id) GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; ------------------------------------ ------------------------------------ -- Funnel, grouped by the number of times a user has done an event ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results (user_id, value_1_agg, value_2_agg) SELECT user_id, avg(array_length(events_table, 1)) AS event_average, count_pay FROM ( SELECT subquery_1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(count_pay, 0) AS count_pay FROM ( (SELECT users_table.user_id, 'action=>1'AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 10 AND events_table.event_type < 12 ) UNION (SELECT users_table.user_id, 'action=>2'AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 12 AND events_table.event_type < 14 ) ) AS subquery_1 LEFT JOIN (SELECT user_id, COUNT(*) AS count_pay FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 15 AND users_table.value_1 < 17 GROUP BY user_id HAVING COUNT(*) > 1) AS subquery_2 ON subquery_1.user_id = subquery_2.user_id GROUP BY subquery_1.user_id, count_pay) AS subquery_top WHERE array_ndims(events_table) > 0 GROUP BY count_pay, user_id ORDER BY count_pay; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; ------------------------------------ ------------------------------------ -- Most recently seen users_table events_table ------------------------------------ -- Note that we don't use ORDER BY/LIMIT yet ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results (user_id, agg_time, value_2_agg) SELECT user_id, user_lastseen, array_length(event_array, 1) FROM ( SELECT user_id, max(u.time) as user_lastseen, array_agg(event_type ORDER BY u.time) AS event_array FROM ( SELECT user_id, time FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 10 AND users_table.value_1 < 12 ) u LEFT JOIN LATERAL ( SELECT event_type, time FROM events_table WHERE user_id = u.user_id AND events_table.event_type > 10 AND events_table.event_type < 12 ) t ON true GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; ------------------------------------ ------------------------------------ -- Count the number of distinct users_table who are in segment X and Y and Z ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results (user_id) SELECT DISTINCT user_id FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 10 AND value_1 <= 20) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 30 AND value_1 <= 40) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 50 AND value_1 <= 60); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; ------------------------------------ ------------------------------------ -- Count the number of distinct users_table who are in at least two of X and Y and Z segments ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id) SELECT user_id FROM users_table WHERE (value_1 = 10 OR value_1 = 11 OR value_1 = 12) GROUP BY user_id HAVING count(distinct value_1) >= 2; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; ------------------------------------ ------------------------------------ -- Find customers who have done X, and satisfy other customer specific criteria ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 101 AND value_1 < 110 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type>101 AND event_type < 110 AND value_3 > 100 AND user_id=users_table.user_id); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; ------------------------------------ ------------------------------------ -- Customers who haven’t done X, and satisfy other customer specific criteria ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 = 101 AND value_2 >= 5 AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type=101 AND value_3 > 100 AND user_id=users_table.user_id); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; ------------------------------------ ------------------------------------ -- Customers who have done X and Y, and satisfy other customer specific criteria ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 100 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type!=100 AND value_3 > 100 AND user_id=users_table.user_id) AND EXISTS (SELECT user_id FROM events_table WHERE event_type=101 AND value_3 > 100 AND user_id=users_table.user_id); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; ------------------------------------ ------------------------------------ -- Customers who have done X and haven’t done Y, and satisfy other customer specific criteria ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type <= 300 AND value_3 > 100 AND user_id=users_table.user_id) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 300 AND event_type <= 350 AND value_3 > 100 AND user_id=users_table.user_id); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; ------------------------------------ ------------------------------------ -- Customers who have done X more than 2 times, and satisfy other customer specific criteria ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 100 AND value_1 < 124 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type < 124 AND value_3 > 100 AND user_id = users_table.user_id GROUP BY user_id HAVING Count(*) > 2); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; ------------------------------------ ------------------------------------ -- Find me all users_table who logged in more than once ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_1_agg) SELECT user_id, value_1 from ( SELECT user_id, value_1 From users_table WHERE value_2 > 100 and user_id = 15 GROUP BY value_1, user_id HAVING count(*) > 1 ) as a; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; ------------------------------------ ------------------------------------ -- Find me all users_table who has done some event and has filters ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id) Select user_id From events_table Where event_type = 16 And value_2 > 50 And user_id in (select user_id From users_table Where value_1 = 15 And value_2 > 25); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; ------------------------------------ ------------------------------------ -- Which events_table did people who has done some specific events_table ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_1_agg) SELECT user_id, event_type FROM events_table WHERE user_id in (SELECT user_id from events_table WHERE event_type > 500 and event_type < 505) GROUP BY user_id, event_type; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; ------------------------------------ ------------------------------------ -- Find me all the users_table who has done some event more than three times ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id) select user_id from ( select user_id from events_table where event_type = 901 group by user_id having count(*) > 3 ) as a; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; ------------------------------------ ------------------------------------ -- Find my assets that have the highest probability and fetch their metadata ------------------------------------ ------------------------------------ TRUNCATE agg_results; INSERT INTO agg_results(user_id, value_1_agg, value_3_agg) SELECT users_table.user_id, users_table.value_1, prob FROM users_table JOIN (SELECT ma.user_id, (GREATEST(coalesce(ma.value_4 / 250, 0.0) + GREATEST(1.0))) / 2 AS prob FROM users_table AS ma, events_table as short_list WHERE short_list.user_id = ma.user_id and ma.value_1 < 50 and short_list.event_type < 50 ) temp ON users_table.user_id = temp.user_id WHERE users_table.value_1 < 50; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results; citus-7.0.3/src/test/regress/sql/multi_behavioral_analytics_single_shard_queries.sql000066400000000000000000000351651317107136600313030ustar00rootroot00000000000000------------------------------------ ------------------------------------ -- Vanilla funnel query -- single shard ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, value_1_agg) SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q WHERE user_id = 20; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; ------------------------------------ ------------------------------------ -- Vanilla funnel query -- two shards ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, value_1_agg) SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND (u.user_id = 13 OR u.user_id = 20) AND (e.user_id = 13 OR e.user_id = 20) AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q WHERE (user_id = 13 OR user_id = 20); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; ------------------------------------ ------------------------------------ -- Funnel grouped by whether or not a user has done an event -- single shard query ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, value_1_agg, value_2_agg ) SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event) FROM ( SELECT t1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(hasdone_event, 'Has not done event') AS hasdone_event FROM ( ( SELECT u.user_id, 'step=>1'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) UNION ( SELECT u.user_id, 'step=>2'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (103, 104, 105) ) ) t1 LEFT JOIN ( SELECT DISTINCT user_id, 'Has done event'::TEXT AS hasdone_event FROM events_table AS e WHERE e.user_id >= 10 AND e.user_id <= 25 AND e.event_type IN (106, 107, 108) ) t2 ON (t1.user_id = t2.user_id) WHERE t1.user_id = 20 GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event; ------------------------------------ ------------------------------------ -- Funnel grouped by whether or not a user has done an event -- two shards query ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, value_1_agg, value_2_agg ) SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event) FROM ( SELECT t1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(hasdone_event, 'Has not done event') AS hasdone_event FROM ( ( SELECT u.user_id, 'step=>1'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND (e.user_id = 20 OR e.user_id = 17) AND e.event_type IN (100, 101, 102) ) UNION ( SELECT u.user_id, 'step=>2'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND (e.user_id = 20 OR e.user_id = 17) AND e.event_type IN (103, 104, 105) ) ) t1 LEFT JOIN ( SELECT DISTINCT user_id, 'Has done event'::TEXT AS hasdone_event FROM events_table AS e WHERE (e.user_id = 20 OR e.user_id = 17) AND e.event_type IN (106, 107, 108) ) t2 ON (t1.user_id = t2.user_id) WHERE (t1.user_id = 20 OR t1.user_id = 17) GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; ------------------------------------ ------------------------------------ -- Most recently seen users_table events_table -- single shard query ------------------------------------ -- Note that we don't use ORDER BY/LIMIT yet ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, agg_time, value_2_agg) SELECT user_id, user_lastseen, array_length(event_array, 1) FROM ( SELECT user_id, max(u.time) as user_lastseen, array_agg(event_type ORDER BY u.time) AS event_array FROM ( SELECT user_id, time FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 10 AND users_table.value_1 < 12 ) u LEFT JOIN LATERAL ( SELECT event_type, time FROM events_table WHERE user_id = u.user_id AND events_table.event_type > 10 AND events_table.event_type < 12 ) t ON true WHERE user_id = 65 GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; ------------------------------------ ------------------------------------ -- Most recently seen users_table events_table -- two shards query ------------------------------------ -- Note that we don't use ORDER BY/LIMIT yet ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id, agg_time, value_2_agg) SELECT user_id, user_lastseen, array_length(event_array, 1) FROM ( SELECT user_id, max(u.time) as user_lastseen, array_agg(event_type ORDER BY u.time) AS event_array FROM ( SELECT user_id, time FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND (user_id = 65 OR user_id = 12) AND users_table.value_1 > 10 AND users_table.value_1 < 12 ) u LEFT JOIN LATERAL ( SELECT event_type, time FROM events_table WHERE user_id = u.user_id AND (user_id = 65 OR user_id = 12) AND events_table.event_type > 10 AND events_table.event_type < 12 ) t ON true WHERE (user_id = 65 OR user_id = 12) GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; ------------------------------------ ------------------------------------ -- Count the number of distinct users_table who are in segment X and Y and Z -- single shard ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id) SELECT DISTINCT user_id FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 10 AND value_1 <= 20) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 30 AND value_1 <= 40) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 50 AND value_1 <= 60) AND user_id = 7; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; ------------------------------------ ------------------------------------ -- Count the number of distinct users_table who are in segment X and Y and Z -- two shards ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second (user_id) SELECT DISTINCT user_id FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 10 AND value_1 <= 20 AND (user_id = 7 OR user_id = 20)) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 30 AND value_1 <= 40 AND (user_id = 7 OR user_id = 20)) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 50 AND value_1 <= 60 AND (user_id = 7 OR user_id = 20)) AND (user_id = 7 OR user_id = 20); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; ------------------------------------ ------------------------------------ -- Find customers who have done X, and satisfy other customer specific criteria -- single shard ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 101 AND value_1 < 110 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type>101 AND event_type < 110 AND value_3 > 100 AND user_id=users_table.user_id) AND user_id = 61; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; ------------------------------------ ------------------------------------ -- Find customers who have done X, and satisfy other customer specific criteria -- two shards ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 101 AND value_1 < 110 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type>101 AND event_type < 110 AND value_3 > 100 AND (user_id = 61 OR user_id = 51) AND user_id=users_table.user_id) AND (user_id = 61 OR user_id = 51); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; ------------------------------------ ------------------------------------ -- Customers who have done X and haven’t done Y, and satisfy other customer specific criteria -- single shard ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_2 >= 5 AND user_id = 96 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type <= 300 AND value_3 > 100 AND user_id=users_table.user_id) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 300 AND event_type <= 350 AND value_3 > 100 AND user_id=users_table.user_id); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; ------------------------------------ ------------------------------------ -- Customers who have done X and haven’t done Y, and satisfy other customer specific criteria -- two shards ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_2 >= 5 AND (user_id = 96 OR user_id = 8) AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type <= 300 AND value_3 > 100 AND user_id=users_table.user_id AND (user_id = 96 OR user_id = 8)) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 300 AND event_type <= 350 AND value_3 > 100 AND user_id=users_table.user_id AND (user_id = 96 OR user_id = 8)); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; ------------------------------------ ------------------------------------ -- Customers who have done X more than 2 times, and satisfy other customer specific criteria -- single shard ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 100 AND value_1 < 124 AND value_2 >= 5 AND user_id = 47 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type < 124 AND value_3 > 100 AND user_id = users_table.user_id AND user_id = 47 GROUP BY user_id HAVING Count(*) > 2); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; ------------------------------------ ------------------------------------ -- Customers who have done X more than 2 times, and satisfy other customer specific criteria -- two shards ------------------------------------ ------------------------------------ TRUNCATE agg_results_second; INSERT INTO agg_results_second(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 100 AND value_1 < 124 AND value_2 >= 5 AND (user_id = 47 or user_id = 81) AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type < 124 AND value_3 > 100 AND user_id = users_table.user_id AND (user_id = 47 or user_id = 81) GROUP BY user_id HAVING Count(*) > 2); -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM agg_results_second; citus-7.0.3/src/test/regress/sql/multi_binary_master_copy_format.sql000066400000000000000000000010001317107136600260570ustar00rootroot00000000000000-- -- MULTI_BINARY_MASTER_COPY -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 430000; -- Try binary master copy for different executors SET citus.binary_master_copy_format TO 'on'; SET citus.task_executor_type TO 'task-tracker'; SELECT count(*) FROM lineitem; SELECT l_shipmode FROM lineitem WHERE l_partkey = 67310 OR l_partkey = 155190; SET citus.task_executor_type TO 'real-time'; SELECT count(*) FROM lineitem; SELECT l_shipmode FROM lineitem WHERE l_partkey = 67310 OR l_partkey = 155190; citus-7.0.3/src/test/regress/sql/multi_cache_invalidation.sql000066400000000000000000000010151317107136600244300ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1601000; CREATE TABLE tab9 (test_id integer NOT NULL, data int); CREATE TABLE tab10 (test_id integer NOT NULL, data int); SELECT master_create_distributed_table('tab9', 'test_id', 'hash'); SELECT master_create_distributed_table('tab10', 'test_id', 'hash'); SELECT master_create_worker_shards('tab9', 1, 1); TRUNCATE tab9; UPDATE pg_dist_shard SET logicalrelid = 'tab10'::regclass WHERE logicalrelid = 'tab9'::regclass; TRUNCATE tab10; DROP TABLE tab9; DROP TABLE tab10; citus-7.0.3/src/test/regress/sql/multi_citus_tools.sql000066400000000000000000000275551317107136600232140ustar00rootroot00000000000000-- -- MULTI CITUS TOOLS -- -- tests UDFs created for citus tools -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1240000; -- test with invalid port, prevent OS dependent warning from being displayed SET client_min_messages to ERROR; -- PG 9.5 does not show context for plpgsql raise -- message whereas PG 9.6 shows. disabling it -- for this test only to have consistent behavior -- b/w PG 9.6+ and PG 9.5. \set SHOW_CONTEXT never SELECT * FROM master_run_on_worker(ARRAY['localhost']::text[], ARRAY['666']::int[], ARRAY['select count(*) from pg_dist_shard']::text[], false); SELECT * FROM master_run_on_worker(ARRAY['localhost']::text[], ARRAY['666']::int[], ARRAY['select count(*) from pg_dist_shard']::text[], true); RESET client_min_messages; -- store worker node name and port SELECT quote_literal(node_name) as node_name, node_port as node_port FROM master_get_active_worker_nodes() ORDER BY node_port LIMIT 1 \gset -- connect to the first worker and ask for shard count, should return 0 SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from pg_dist_shard']::text[], false); -- connect to the first worker and ask for shards, should fail with -- expecting a single column error SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select * from pg_dist_shard']::text[], false); -- query result may only contain a single row SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select a from generate_series(1,2) a']::text[], false); -- send multiple queries SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY[:node_port, :node_port]::int[], ARRAY['select a from generate_series(1,1) a', 'select a from generate_series(2,2) a']::text[], false); -- send multiple queries, one fails SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY[:node_port, :node_port]::int[], ARRAY['select a from generate_series(1,1) a', 'select a from generate_series(1,2) a']::text[], false); -- send multiple queries, both fail SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY[:node_port, :node_port]::int[], ARRAY['select a from generate_series(1,2) a', 'select a from generate_series(1,2) a']::text[], false); -- can create tables at worker SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY[:node_port, :node_port]::int[], ARRAY['create table first_table(a int, b int)', 'create table second_table(a int, b int)']::text[], false); -- can insert into table SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into first_table select a,a from generate_series(1,20) a']::text[], false); SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from first_table']::text[], false); -- insert into second table twice SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into second_table select * from first_table']::text[], false); SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into second_table select * from first_table']::text[], false); -- check inserted values at second table SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from second_table']::text[], false); -- store worker node name and port again -- previously set variables become unusable after some number of uses SELECT quote_literal(node_name) as node_name, node_port as node_port FROM master_get_active_worker_nodes() ORDER BY node_port LIMIT 1 \gset -- create index on tables SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['create index first_table_index on first_table(a)']::text[], false); -- drop created tables SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['drop table first_table']::text[], false); SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['drop table second_table']::text[], false); -- verify table is dropped SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from second_table']::text[], false); -- -- Run the same tests in parallel -- -- connect to the first worker and ask for shard count, should return 0 SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from pg_dist_shard']::text[], true); -- connect to the first worker and ask for shards, should fail with -- expecting a single column error SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select * from pg_dist_shard']::text[], true); -- query result may only contain a single row SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select a from generate_series(1,2) a']::text[], true); -- send multiple queries SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY[:node_port, :node_port]::int[], ARRAY['select a from generate_series(1,1) a', 'select a from generate_series(2,2) a']::text[], true); -- send multiple queries, one fails SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY[:node_port, :node_port]::int[], ARRAY['select a from generate_series(1,1) a', 'select a from generate_series(1,2) a']::text[], true); -- send multiple queries, both fail SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY[:node_port, :node_port]::int[], ARRAY['select a from generate_series(1,2) a', 'select a from generate_series(1,2) a']::text[], true); -- can create tables at worker SELECT * FROM master_run_on_worker(ARRAY[:node_name, :node_name]::text[], ARRAY[:node_port, :node_port]::int[], ARRAY['create table first_table(a int, b int)', 'create table second_table(a int, b int)']::text[], true); -- store worker node name and port again -- previously set variables become unusable after some number of uses SELECT quote_literal(node_name) as node_name, node_port as node_port FROM master_get_active_worker_nodes() ORDER BY node_port LIMIT 1 \gset -- can insert into table SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into first_table select a,a from generate_series(1,20) a']::text[], true); SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from first_table']::text[], true); -- insert into second table twice SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into second_table select * from first_table']::text[], true); SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['insert into second_table select * from first_table']::text[], true); -- check inserted values at second table SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from second_table']::text[], true); -- create index on tables SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['create index first_table_index on first_table(a)']::text[], true); -- drop created tables SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['drop table first_table']::text[], true); SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['drop table second_table']::text[], true); -- verify table is dropped SELECT * FROM master_run_on_worker(ARRAY[:node_name]::text[], ARRAY[:node_port]::int[], ARRAY['select count(*) from second_table']::text[], true); -- run_command_on_XXX tests SELECT * FROM run_command_on_workers('select 1') ORDER BY 2 ASC; SELECT * FROM run_command_on_workers('select count(*) from pg_dist_partition') ORDER BY 2 ASC; -- make sure run_on_all_placements respects shardstate CREATE TABLE check_placements (key int); SELECT master_create_distributed_table('check_placements', 'key', 'hash'); SELECT master_create_worker_shards('check_placements', 5, 2); SELECT * FROM run_command_on_placements('check_placements', 'select 1'); UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid % 2 = 0 AND nodeport = :worker_1_port; SELECT * FROM run_command_on_placements('check_placements', 'select 1'); DROP TABLE check_placements CASCADE; -- make sure run_on_all_colocated_placements correctly detects colocation CREATE TABLE check_colocated (key int); SELECT master_create_distributed_table('check_colocated', 'key', 'hash'); SELECT master_create_worker_shards('check_colocated', 5, 2); CREATE TABLE second_table (key int); SELECT master_create_distributed_table('second_table', 'key', 'hash'); SELECT master_create_worker_shards('second_table', 4, 2); SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table', 'select 1'); -- even when the difference is in replication factor, an error is thrown SELECT master_drop_all_shards('second_table'::regclass, current_schema(), 'second_table'); SELECT master_create_worker_shards('second_table', 5, 1); SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table', 'select 1'); -- when everything matches, the command is run! SELECT master_drop_all_shards('second_table'::regclass, current_schema(), 'second_table'); SELECT master_create_worker_shards('second_table', 5, 2); SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table', 'select 1'); -- when a placement is invalid considers the tables to not be colocated UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = ( SELECT shardid FROM pg_dist_shard WHERE nodeport = :worker_1_port AND logicalrelid = 'second_table'::regclass ORDER BY 1 ASC LIMIT 1 ); SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table', 'select 1'); -- when matching placement is also invalid, considers the tables to be colocated UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = ( SELECT shardid FROM pg_dist_shard WHERE nodeport = :worker_1_port AND logicalrelid = 'check_colocated'::regclass ORDER BY 1 ASC LIMIT 1 ); SELECT * FROM run_command_on_colocated_placements('check_colocated', 'second_table', 'select 1'); DROP TABLE check_colocated CASCADE; DROP TABLE second_table CASCADE; -- runs on all shards CREATE TABLE check_shards (key int); SELECT master_create_distributed_table('check_shards', 'key', 'hash'); SELECT master_create_worker_shards('check_shards', 5, 2); SELECT * FROM run_command_on_shards('check_shards', 'select 1'); UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid % 2 = 0; SELECT * FROM run_command_on_shards('check_shards', 'select 1'); DROP TABLE check_shards CASCADE; -- set SHOW_CONTEXT back to default \set SHOW_CONTEXT errors citus-7.0.3/src/test/regress/sql/multi_cluster_management.sql000066400000000000000000000247221317107136600245130ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; -- Tests functions related to cluster membership -- before starting the test, lets try to create reference table and see a -- meaningful error CREATE TABLE test_reference_table (y int primary key, name text); SELECT create_reference_table('test_reference_table'); -- add the nodes to the cluster SELECT 1 FROM master_add_node('localhost', :worker_1_port); SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- get the active nodes SELECT master_get_active_worker_nodes(); -- try to add a node that is already in the cluster SELECT nodeid, groupid FROM master_add_node('localhost', :worker_1_port); -- get the active nodes SELECT master_get_active_worker_nodes(); -- try to remove a node (with no placements) SELECT master_remove_node('localhost', :worker_2_port); -- verify that the node has been deleted SELECT master_get_active_worker_nodes(); -- try to disable a node with no placements see that node is removed SELECT 1 FROM master_add_node('localhost', :worker_2_port); SELECT master_disable_node('localhost', :worker_2_port); SELECT master_get_active_worker_nodes(); -- add some shard placements to the cluster SELECT isactive FROM master_activate_node('localhost', :worker_2_port); CREATE TABLE cluster_management_test (col_1 text, col_2 int); SELECT master_create_distributed_table('cluster_management_test', 'col_1', 'hash'); SELECT master_create_worker_shards('cluster_management_test', 16, 1); -- see that there are some active placements in the candidate node SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port; -- try to remove a node with active placements and see that node removal is failed SELECT master_remove_node('localhost', :worker_2_port); SELECT master_get_active_worker_nodes(); -- insert a row so that master_disable_node() exercises closing connections INSERT INTO test_reference_table VALUES (1, '1'); -- try to disable a node with active placements see that node is removed -- observe that a notification is displayed SELECT master_disable_node('localhost', :worker_2_port); SELECT master_get_active_worker_nodes(); -- try to disable a node which does not exist and see that an error is thrown SELECT master_disable_node('localhost.noexist', 2345); -- restore the node for next tests SELECT isactive FROM master_activate_node('localhost', :worker_2_port); -- try to remove a node with active placements and see that node removal is failed SELECT master_remove_node('localhost', :worker_2_port); -- mark all placements in the candidate node as inactive SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport=:worker_2_port \gset UPDATE pg_dist_placement SET shardstate=3 WHERE groupid=:worker_2_group; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport=:worker_2_port; -- try to remove a node with only inactive placements and see that removal still fails SELECT master_remove_node('localhost', :worker_2_port); SELECT master_get_active_worker_nodes(); -- clean-up SELECT 1 FROM master_add_node('localhost', :worker_2_port); UPDATE pg_dist_placement SET shardstate=1 WHERE groupid=:worker_2_group; -- when there is no primary we should get a pretty error UPDATE pg_dist_node SET noderole = 'secondary' WHERE nodeport=:worker_2_port; SELECT * FROM cluster_management_test; -- when there is no node at all in the group we should get a different error DELETE FROM pg_dist_node WHERE nodeport=:worker_2_port; SELECT * FROM cluster_management_test; -- clean-up SELECT groupid as new_group FROM master_add_node('localhost', :worker_2_port) \gset UPDATE pg_dist_placement SET groupid = :new_group WHERE groupid = :worker_2_group; -- test that you are allowed to remove secondary nodes even if there are placements SELECT 1 FROM master_add_node('localhost', 9990, groupid => :new_group, noderole => 'secondary'); SELECT master_remove_node('localhost', :worker_2_port); SELECT master_remove_node('localhost', 9990); -- clean-up DROP TABLE cluster_management_test; -- check that adding/removing nodes are propagated to nodes with hasmetadata=true SELECT master_remove_node('localhost', :worker_2_port); UPDATE pg_dist_node SET hasmetadata=true WHERE nodeport=:worker_1_port; SELECT 1 FROM master_add_node('localhost', :worker_2_port); \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; \c - - - :master_port SELECT master_remove_node('localhost', :worker_2_port); \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; \c - - - :master_port -- check that added nodes are not propagated to nodes with hasmetadata=false UPDATE pg_dist_node SET hasmetadata=false WHERE nodeport=:worker_1_port; SELECT 1 FROM master_add_node('localhost', :worker_2_port); \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; \c - - - :master_port -- check that removing two nodes in the same transaction works SELECT master_remove_node('localhost', :worker_1_port), master_remove_node('localhost', :worker_2_port); SELECT count(1) FROM pg_dist_node; -- check that adding two nodes in the same transaction works SELECT master_add_node('localhost', :worker_1_port), master_add_node('localhost', :worker_2_port); SELECT * FROM pg_dist_node ORDER BY nodeid; -- check that mixed add/remove node commands work fine inside transaction BEGIN; SELECT master_remove_node('localhost', :worker_2_port); SELECT 1 FROM master_add_node('localhost', :worker_2_port); SELECT master_remove_node('localhost', :worker_2_port); COMMIT; SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; UPDATE pg_dist_node SET hasmetadata=true WHERE nodeport=:worker_1_port; BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); SELECT master_remove_node('localhost', :worker_2_port); SELECT 1 FROM master_add_node('localhost', :worker_2_port); COMMIT; SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; \c - - - :worker_1_port SELECT nodename, nodeport FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_2_port; \c - - - :master_port SELECT master_remove_node(nodename, nodeport) FROM pg_dist_node; SELECT 1 FROM master_add_node('localhost', :worker_1_port); SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- check that a distributed table can be created after adding a node in a transaction SELECT master_remove_node('localhost', :worker_2_port); BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); CREATE TABLE temp(col1 text, col2 int); SELECT create_distributed_table('temp', 'col1'); INSERT INTO temp VALUES ('row1', 1); INSERT INTO temp VALUES ('row2', 2); COMMIT; SELECT col1, col2 FROM temp ORDER BY col1; SELECT count(*) FROM pg_dist_shard_placement, pg_dist_shard WHERE pg_dist_shard_placement.shardid = pg_dist_shard.shardid AND pg_dist_shard.logicalrelid = 'temp'::regclass AND pg_dist_shard_placement.nodeport = :worker_2_port; DROP TABLE temp; \c - - - :worker_1_port DELETE FROM pg_dist_partition; DELETE FROM pg_dist_shard; DELETE FROM pg_dist_placement; DELETE FROM pg_dist_node; \c - - - :master_port SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); -- check that you can't add a primary to a non-default cluster SELECT master_add_node('localhost', 9999, nodecluster => 'olap'); -- check that you can't add more than one primary to a group SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset SELECT master_add_node('localhost', 9999, groupid => :worker_1_group, noderole => 'primary'); -- check that you can add secondaries and unavailable nodes to a group SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset SELECT 1 FROM master_add_node('localhost', 9998, groupid => :worker_1_group, noderole => 'secondary'); SELECT 1 FROM master_add_node('localhost', 9997, groupid => :worker_1_group, noderole => 'unavailable'); -- add_inactive_node also works with secondaries SELECT 1 FROM master_add_inactive_node('localhost', 9996, groupid => :worker_2_group, noderole => 'secondary'); -- check that you can add a seconary to a non-default cluster, and activate it, and remove it SELECT master_add_inactive_node('localhost', 9999, groupid => :worker_2_group, nodecluster => 'olap', noderole => 'secondary'); SELECT master_activate_node('localhost', 9999); SELECT master_disable_node('localhost', 9999); SELECT master_remove_node('localhost', 9999); -- check that you can't manually add two primaries to a group INSERT INTO pg_dist_node (nodename, nodeport, groupid, noderole) VALUES ('localhost', 5000, :worker_1_group, 'primary'); UPDATE pg_dist_node SET noderole = 'primary' WHERE groupid = :worker_1_group AND nodeport = 9998; -- check that you can't manually add a primary to a non-default cluster INSERT INTO pg_dist_node (nodename, nodeport, groupid, noderole, nodecluster) VALUES ('localhost', 5000, 1000, 'primary', 'olap'); UPDATE pg_dist_node SET nodecluster = 'olap' WHERE nodeport = :worker_1_port; -- check that you /can/ add a secondary node to a non-default cluster SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary', nodecluster=> 'olap'); -- check that super-long cluster names are truncated SELECT master_add_node('localhost', 8887, groupid => :worker_1_group, noderole => 'secondary', nodecluster=> 'thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.' 'thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.' 'thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.' 'thisisasixtyfourcharacterstringrepeatedfourtimestomake256chars.' 'overflow' ); SELECT * FROM pg_dist_node WHERE nodeport=8887; -- don't remove the secondary and unavailable nodes, check that no commands are sent to -- them in any of the remaining tests -- master_add_secondary_node lets you skip looking up the groupid SELECT master_add_secondary_node('localhost', 9995, 'localhost', :worker_1_port); SELECT master_add_secondary_node('localhost', 9994, primaryname => 'localhost', primaryport => :worker_2_port); SELECT master_add_secondary_node('localhost', 9993, 'localhost', 2000); SELECT master_add_secondary_node('localhost', 9992, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); citus-7.0.3/src/test/regress/sql/multi_colocated_shard_transfer.sql000066400000000000000000000105621317107136600256550ustar00rootroot00000000000000-- -- MULTI_COLOCATED_SHARD_TRANSFER -- -- These tables are created in multi_colocation_utils test -- test repair -- manually set shardstate as inactive UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_2_port AND (shardid = 1300000 OR shardid = 1300004); UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_2_port AND shardid = 1300016; UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_2_port AND shardid = 1300020; -- test repairing colocated shards -- status before shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; -- repair colocated shards SELECT master_copy_shard_placement(1300000, 'localhost', :worker_1_port, 'localhost', :worker_2_port); -- status after shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; -- test repairing NOT colocated shard -- status before shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND p.logicalrelid = 'table5_groupX'::regclass ORDER BY s.shardid, sp.nodeport; -- repair NOT colocated shard SELECT master_copy_shard_placement(1300016, 'localhost', :worker_1_port, 'localhost', :worker_2_port); -- status after shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND p.logicalrelid = 'table5_groupX'::regclass ORDER BY s.shardid, sp.nodeport; -- test repairing shard in append distributed table -- status before shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND p.logicalrelid = 'table6_append'::regclass ORDER BY s.shardid, sp.nodeport; -- repair shard in append distributed table SELECT master_copy_shard_placement(1300020, 'localhost', :worker_1_port, 'localhost', :worker_2_port); -- status after shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND p.logicalrelid = 'table6_append'::regclass ORDER BY s.shardid, sp.nodeport; -- test repair while all placements of one shard in colocation group is unhealthy -- manually set shardstate as inactive UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1300000; -- status before shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; -- repair while all placements of one shard in colocation group is unhealthy SELECT master_copy_shard_placement(1300000, 'localhost', :worker_1_port, 'localhost', :worker_2_port); -- status after shard repair SELECT s.shardid, s.logicalrelid::regclass, sp.nodeport, p.colocationid, sp.shardstate FROM pg_dist_partition p, pg_dist_shard s, pg_dist_shard_placement sp WHERE p.logicalrelid = s.logicalrelid AND s.shardid = sp.shardid AND colocationid = (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'table1_group1'::regclass) ORDER BY s.shardid, sp.nodeport; citus-7.0.3/src/test/regress/sql/multi_colocation_utils.sql000066400000000000000000000374611317107136600242140ustar00rootroot00000000000000 ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1300000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 4; -- =================================================================== -- create test utility function -- =================================================================== CREATE SEQUENCE colocation_test_seq MINVALUE 1000 NO CYCLE; /* a very simple UDF that only sets the colocation ids the same * DO NOT USE THIS FUNCTION IN PRODUCTION. It manually sets colocationid column of * pg_dist_partition and it does not check anything about pyshical state about shards. */ CREATE OR REPLACE FUNCTION colocation_test_colocate_tables(source_table regclass, target_table regclass) RETURNS BOOL LANGUAGE plpgsql AS $colocate_tables$ DECLARE nextid INTEGER; BEGIN SELECT nextval('colocation_test_seq') INTO nextid; UPDATE pg_dist_partition SET colocationId = nextid WHERE logicalrelid IN ( (SELECT p1.logicalrelid FROM pg_dist_partition p1, pg_dist_partition p2 WHERE p2.logicalrelid = source_table AND (p1.logicalrelid = source_table OR (p1.colocationId = p2.colocationId AND p1.colocationId != 0))) UNION (SELECT target_table) ); RETURN TRUE; END; $colocate_tables$; -- =================================================================== -- create test functions -- =================================================================== CREATE FUNCTION get_table_colocation_id(regclass) RETURNS INTEGER AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION tables_colocated(regclass, regclass) RETURNS bool AS 'citus' LANGUAGE C; CREATE FUNCTION shards_colocated(bigint, bigint) RETURNS bool AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION find_shard_interval_index(bigint) RETURNS int AS 'citus' LANGUAGE C STRICT; -- =================================================================== -- test co-location util functions -- =================================================================== -- create distributed table observe shard pruning CREATE TABLE table1_group1 ( id int ); SELECT master_create_distributed_table('table1_group1', 'id', 'hash'); SELECT master_create_worker_shards('table1_group1', 4, 2); CREATE TABLE table2_group1 ( id int ); SELECT master_create_distributed_table('table2_group1', 'id', 'hash'); SELECT master_create_worker_shards('table2_group1', 4, 2); CREATE TABLE table3_group2 ( id int ); SELECT master_create_distributed_table('table3_group2', 'id', 'hash'); SELECT master_create_worker_shards('table3_group2', 4, 2); CREATE TABLE table4_group2 ( id int ); SELECT master_create_distributed_table('table4_group2', 'id', 'hash'); SELECT master_create_worker_shards('table4_group2', 4, 2); CREATE TABLE table5_groupX ( id int ); SELECT master_create_distributed_table('table5_groupX', 'id', 'hash'); SELECT master_create_worker_shards('table5_groupX', 4, 2); CREATE TABLE table6_append ( id int ); SELECT master_create_distributed_table('table6_append', 'id', 'append'); SELECT master_create_empty_shard('table6_append'); SELECT master_create_empty_shard('table6_append'); -- make table1_group1 and table2_group1 co-located manually SELECT colocation_test_colocate_tables('table1_group1', 'table2_group1'); -- check co-location id SELECT get_table_colocation_id('table1_group1'); SELECT get_table_colocation_id('table5_groupX'); SELECT get_table_colocation_id('table6_append'); -- check self table co-location SELECT tables_colocated('table1_group1', 'table1_group1'); SELECT tables_colocated('table5_groupX', 'table5_groupX'); SELECT tables_colocated('table6_append', 'table6_append'); -- check table co-location with same co-location group SELECT tables_colocated('table1_group1', 'table2_group1'); -- check table co-location with different co-location group SELECT tables_colocated('table1_group1', 'table3_group2'); -- check table co-location with invalid co-location group SELECT tables_colocated('table1_group1', 'table5_groupX'); SELECT tables_colocated('table1_group1', 'table6_append'); -- check self shard co-location SELECT shards_colocated(1300000, 1300000); SELECT shards_colocated(1300016, 1300016); SELECT shards_colocated(1300020, 1300020); -- check shard co-location with same co-location group SELECT shards_colocated(1300000, 1300004); -- check shard co-location with same table different co-location group SELECT shards_colocated(1300000, 1300001); -- check shard co-location with different co-location group SELECT shards_colocated(1300000, 1300005); -- check shard co-location with invalid co-location group SELECT shards_colocated(1300000, 1300016); SELECT shards_colocated(1300000, 1300020); -- check co-located table list SELECT UNNEST(get_colocated_table_array('table1_group1'))::regclass ORDER BY 1; SELECT UNNEST(get_colocated_table_array('table5_groupX'))::regclass ORDER BY 1; SELECT UNNEST(get_colocated_table_array('table6_append'))::regclass ORDER BY 1; -- check co-located shard list SELECT get_colocated_shard_array(1300000) ORDER BY 1; SELECT get_colocated_shard_array(1300016) ORDER BY 1; SELECT get_colocated_shard_array(1300020) ORDER BY 1; -- check FindShardIntervalIndex function SELECT find_shard_interval_index(1300000); SELECT find_shard_interval_index(1300001); SELECT find_shard_interval_index(1300002); SELECT find_shard_interval_index(1300003); SELECT find_shard_interval_index(1300016); -- check external colocation API SET citus.shard_count = 2; CREATE TABLE table1_groupA ( id int ); SELECT create_distributed_table('table1_groupA', 'id'); CREATE TABLE table2_groupA ( id int ); SELECT create_distributed_table('table2_groupA', 'id'); -- change shard replication factor SET citus.shard_replication_factor = 1; CREATE TABLE table1_groupB ( id int ); SELECT create_distributed_table('table1_groupB', 'id'); CREATE TABLE table2_groupB ( id int ); SELECT create_distributed_table('table2_groupB', 'id'); UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='table1_groupB'::regclass; UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='table2_groupB'::regclass; -- revert back to default shard replication factor SET citus.shard_replication_factor to DEFAULT; -- change partition column type CREATE TABLE table1_groupC ( id text ); SELECT create_distributed_table('table1_groupC', 'id'); CREATE TABLE table2_groupC ( id text ); SELECT create_distributed_table('table2_groupC', 'id'); -- change shard count SET citus.shard_count = 4; CREATE TABLE table1_groupD ( id int ); SELECT create_distributed_table('table1_groupD', 'id'); CREATE TABLE table2_groupD ( id int ); SELECT create_distributed_table('table2_groupD', 'id'); -- try other distribution methods CREATE TABLE table_append ( id int ); SELECT create_distributed_table('table_append', 'id', 'append'); CREATE TABLE table_range ( id int ); SELECT create_distributed_table('table_range', 'id', 'range'); -- test foreign table creation CREATE FOREIGN TABLE table3_groupD ( id int ) SERVER fake_fdw_server; SELECT create_distributed_table('table3_groupD', 'id'); -- check metadata SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY logicalrelid; -- check effects of dropping tables DROP TABLE table1_groupA; SELECT * FROM pg_dist_colocation WHERE colocationid = 4; -- dropping all tables in a colocation group also deletes the colocation group DROP TABLE table2_groupA; SELECT * FROM pg_dist_colocation WHERE colocationid = 4; -- create dropped colocation group again SET citus.shard_count = 2; CREATE TABLE table1_groupE ( id int ); SELECT create_distributed_table('table1_groupE', 'id'); CREATE TABLE table2_groupE ( id int ); SELECT create_distributed_table('table2_groupE', 'id'); -- test different table DDL CREATE TABLE table3_groupE ( dummy_column text, id int ); SELECT create_distributed_table('table3_groupE', 'id'); -- test different schema CREATE SCHEMA schema_collocation; CREATE TABLE schema_collocation.table4_groupE ( id int ); SELECT create_distributed_table('schema_collocation.table4_groupE', 'id'); -- test colocate_with option CREATE TABLE table1_group_none_1 ( id int ); SELECT create_distributed_table('table1_group_none_1', 'id', colocate_with => 'none'); CREATE TABLE table2_group_none_1 ( id int ); SELECT create_distributed_table('table2_group_none_1', 'id', colocate_with => 'table1_group_none_1'); CREATE TABLE table1_group_none_2 ( id int ); SELECT create_distributed_table('table1_group_none_2', 'id', colocate_with => 'none'); CREATE TABLE table4_groupE ( id int ); SELECT create_distributed_table('table4_groupE', 'id', colocate_with => 'default'); SET citus.shard_count = 3; -- check that this new configuration does not have a default group CREATE TABLE table1_group_none_3 ( id int ); SELECT create_distributed_table('table1_group_none_3', 'id', colocate_with => 'NONE'); -- a new table does not use a non-default group CREATE TABLE table1_group_default ( id int ); SELECT create_distributed_table('table1_group_default', 'id', colocate_with => 'DEFAULT'); -- check metadata SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; -- check failing colocate_with options CREATE TABLE table_postgresql( id int ); CREATE TABLE table_failing ( id int ); SELECT create_distributed_table('table_failing', 'id', colocate_with => 'table_append'); SELECT create_distributed_table('table_failing', 'id', 'append', 'table1_groupE'); SELECT create_distributed_table('table_failing', 'id', colocate_with => 'table_postgresql'); SELECT create_distributed_table('table_failing', 'id', colocate_with => 'no_table'); SELECT create_distributed_table('table_failing', 'id', colocate_with => ''); SELECT create_distributed_table('table_failing', 'id', colocate_with => NULL); -- check with different distribution column types CREATE TABLE table_bigint ( id bigint ); SELECT create_distributed_table('table_bigint', 'id', colocate_with => 'table1_groupE'); -- check worker table schemas \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.table3_groupE_1300050'::regclass; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='schema_collocation.table4_groupE_1300052'::regclass; \c - - - :master_port CREATE TABLE table1_groupF ( id int ); SELECT create_reference_table('table1_groupF'); CREATE TABLE table2_groupF ( id int ); SELECT create_reference_table('table2_groupF'); -- check metadata SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; -- cross check with internal colocation API SELECT p1.logicalrelid::regclass AS table1, p2.logicalrelid::regclass AS table2, tables_colocated(p1.logicalrelid , p2.logicalrelid) AS colocated FROM pg_dist_partition p1, pg_dist_partition p2 WHERE p1.logicalrelid < p2.logicalrelid AND p1.colocationid != 0 AND p2.colocationid != 0 AND tables_colocated(p1.logicalrelid , p2.logicalrelid) is TRUE ORDER BY table1, table2; -- check created shards SELECT logicalrelid, pg_dist_shard.shardid AS shardid, shardstorage, nodeport, shardminvalue, shardmaxvalue FROM pg_dist_shard, pg_dist_shard_placement WHERE pg_dist_shard.shardid = pg_dist_shard_placement.shardid AND pg_dist_shard.shardid >= 1300026 ORDER BY logicalrelid, shardmaxvalue::integer, shardid, placementid; -- reset colocation ids to test mark_tables_colocated ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1; DELETE FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000; UPDATE pg_dist_partition SET colocationid = 0 WHERE colocationid >= 1 AND colocationid < 1000; -- check metadata SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; -- first check failing cases SELECT mark_tables_colocated('table1_groupB', ARRAY['table1_groupC']); SELECT mark_tables_colocated('table1_groupB', ARRAY['table1_groupD']); SELECT mark_tables_colocated('table1_groupB', ARRAY['table1_groupE']); SELECT mark_tables_colocated('table1_groupB', ARRAY['table1_groupF']); SELECT mark_tables_colocated('table1_groupB', ARRAY['table2_groupB', 'table1_groupD']); -- check metadata to see failing calls didn't have any side effects SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; -- check successfully cololated tables SELECT mark_tables_colocated('table1_groupB', ARRAY['table2_groupB']); SELECT mark_tables_colocated('table1_groupC', ARRAY['table2_groupC']); SELECT mark_tables_colocated('table1_groupD', ARRAY['table2_groupD']); SELECT mark_tables_colocated('table1_groupE', ARRAY['table2_groupE', 'table3_groupE']); SELECT mark_tables_colocated('table1_groupF', ARRAY['table2_groupF']); -- check to colocate with itself SELECT mark_tables_colocated('table1_groupB', ARRAY['table1_groupB']); SET citus.shard_count = 2; CREATE TABLE table1_group_none ( id int ); SELECT create_distributed_table('table1_group_none', 'id', colocate_with => 'NONE'); CREATE TABLE table2_group_none ( id int ); SELECT create_distributed_table('table2_group_none', 'id', colocate_with => 'NONE'); -- check metadata to see colocation groups are created successfully SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; -- move the all tables in colocation group 5 to colocation group 7 SELECT mark_tables_colocated('table1_group_none', ARRAY['table1_groupE', 'table2_groupE', 'table3_groupE']); -- move a table with a colocation id which is already not in pg_dist_colocation SELECT mark_tables_colocated('table1_group_none', ARRAY['table2_group_none']); -- check metadata to see that unused colocation group is deleted SELECT * FROM pg_dist_colocation WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid; SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE colocationid >= 1 AND colocationid < 1000 ORDER BY colocationid, logicalrelid; -- try to colocate different replication models CREATE TABLE table1_groupG ( id int ); SELECT create_distributed_table('table1_groupG', 'id'); -- update replication model UPDATE pg_dist_partition SET repmodel = 's' WHERE logicalrelid = 'table1_groupG'::regclass; CREATE TABLE table2_groupG ( id int ); SELECT create_distributed_table('table2_groupG', 'id', colocate_with => 'table1_groupG'); CREATE TABLE table2_groupG ( id int ); SELECT create_distributed_table('table2_groupG', 'id', colocate_with => 'NONE'); SELECT mark_tables_colocated('table1_groupG', ARRAY['table2_groupG']); -- drop tables to clean test space DROP TABLE table1_groupb; DROP TABLE table2_groupb; DROP TABLE table1_groupc; DROP TABLE table2_groupc; DROP TABLE table1_groupd; DROP TABLE table2_groupd; DROP TABLE table1_groupf; DROP TABLE table2_groupf; DROP TABLE table1_groupe; DROP TABLE table2_groupe; DROP TABLE table3_groupe; DROP TABLE table4_groupe; DROP TABLE schema_collocation.table4_groupe; DROP TABLE table1_group_none_1; DROP TABLE table2_group_none_1; DROP TABLE table1_group_none_2; DROP TABLE table1_group_none_3; DROP TABLE table1_group_none; DROP TABLE table2_group_none; DROP TABLE table1_group_default; citus-7.0.3/src/test/regress/sql/multi_complex_expressions.sql000066400000000000000000000162251317107136600247460ustar00rootroot00000000000000-- -- MULTI_COMPLEX_EXPRESSIONS -- -- Check that we can correctly handle complex expressions and aggregates. SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; SELECT sum(l_quantity) / (10 * avg(l_quantity)) FROM lineitem; SELECT (sum(l_quantity) / (10 * avg(l_quantity))) + 11 FROM lineitem; SELECT avg(l_quantity) as average FROM lineitem; SELECT 100 * avg(l_quantity) as average_times_hundred FROM lineitem; SELECT 100 * avg(l_quantity) / 10 as average_times_ten FROM lineitem; SELECT l_quantity, 10 * count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -- Check that we can handle complex select clause expressions. SELECT count(*) FROM lineitem WHERE octet_length(l_comment || l_comment) > 40; SELECT count(*) FROM lineitem WHERE octet_length(concat(l_comment, l_comment)) > 40; SELECT count(*) FROM lineitem WHERE octet_length(l_comment) + octet_length('randomtext'::text) > 40; SELECT count(*) FROM lineitem WHERE octet_length(l_comment) + 10 > 40; SELECT count(*) FROM lineitem WHERE (l_receiptdate::timestamp - l_shipdate::timestamp) > interval '5 days'; -- can push down queries where no columns present on the WHERE clause SELECT count(*) FROM lineitem WHERE random() = -0.1; -- boolean tests can be pushed down SELECT count(*) FROM lineitem WHERE (l_partkey > 10000) is true; -- scalar array operator expressions can be pushed down SELECT count(*) FROM lineitem WHERE l_partkey = ANY(ARRAY[19353, 19354, 19355]); -- some more scalar array operator expressions SELECT count(*) FROM lineitem WHERE l_partkey = ALL(ARRAY[19353]); -- operator expressions involving arrays SELECT count(*) FROM lineitem WHERE ARRAY[19353, 19354, 19355] @> ARRAY[l_partkey]; -- coerced via io expressions can be pushed down SELECT count(*) FROM lineitem WHERE (l_quantity/100)::int::bool::text::bool; -- case expressions can be pushed down SELECT count(*) FROM lineitem WHERE (CASE WHEN l_orderkey > 4000 THEN l_partkey / 100 > 1 ELSE false END); -- coalesce expressions can be pushed down SELECT count(*) FROM lineitem WHERE COALESCE((l_partkey/50000)::bool, false); -- nullif expressions can be pushed down SELECT count(*) FROM lineitem WHERE NULLIF((l_partkey/50000)::bool, false); -- null test expressions can be pushed down SELECT count(*) FROM orders WHERE o_comment IS NOT null; -- functions can be pushed down SELECT count(*) FROM lineitem WHERE isfinite(l_shipdate); -- constant expressions can be pushed down SELECT count(*) FROM lineitem WHERE 0 != 0; -- distinct expressions can be pushed down SELECT count(*) FROM lineitem WHERE l_partkey IS DISTINCT FROM 50040; -- row compare expression can be pushed down SELECT count(*) FROM lineitem WHERE row(l_partkey, 2, 3) > row(2000, 2, 3); -- combination of different expressions can be pushed down SELECT count(*) FROM lineitem WHERE (l_quantity/100)::int::bool::text::bool AND CASE WHEN l_orderkey > 4000 THEN l_partkey / 100 > 1 ELSE false END AND COALESCE((l_partkey/50000)::bool, false) AND NULLIF((l_partkey/50000)::bool, false) AND isfinite(l_shipdate) AND l_partkey IS DISTINCT FROM 50040 AND row(l_partkey, 2, 3) > row(2000, 2, 3); -- constant expression in the WHERE clause with a column in the target list SELECT l_linenumber FROM lineitem WHERE 1!=0 ORDER BY l_linenumber LIMIT 1; -- constant expression in the WHERE clause with expressions and a column the target list SELECT count(*) * l_discount as total_discount, count(*), sum(l_tax), l_discount FROM lineitem WHERE 1!=0 GROUP BY l_discount ORDER BY total_discount DESC, sum(l_tax) DESC; -- distinct expressions in the WHERE clause with a column in the target list SELECT l_linenumber FROM lineitem WHERE l_linenumber IS DISTINCT FROM 1 AND l_orderkey IS DISTINCT FROM 8997 ORDER BY l_linenumber LIMIT 1; -- distinct expressions in the WHERE clause with expressions and a column the target list SELECT max(l_linenumber), min(l_discount), l_receiptdate FROM lineitem WHERE l_linenumber IS DISTINCT FROM 1 AND l_orderkey IS DISTINCT FROM 8997 GROUP BY l_receiptdate ORDER BY l_receiptdate LIMIT 1; -- Check that we can handle implicit and explicit join clause definitions. SELECT count(*) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_quantity < 5; SELECT count(*) FROM lineitem JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5; SELECT count(*) FROM lineitem JOIN orders ON l_orderkey = o_orderkey WHERE l_quantity < 5; -- Check that we make sure local joins are between columns only. SELECT count(*) FROM lineitem, orders WHERE l_orderkey + 1 = o_orderkey; -- Check that we can issue limit/offset queries -- OFFSET in subqueries are not supported -- Error in the planner when single repartition subquery SELECT * FROM (SELECT o_custkey FROM orders GROUP BY o_custkey ORDER BY o_custkey OFFSET 20) sq; -- Error in the optimizer when subquery pushdown is on SELECT * FROM (SELECT o_orderkey FROM orders ORDER BY o_orderkey OFFSET 20) sq; -- Simple LIMIT/OFFSET with ORDER BY SELECT o_orderkey FROM orders ORDER BY o_orderkey LIMIT 10 OFFSET 20; -- LIMIT/OFFSET with a subquery SET citus.task_executor_type TO 'task-tracker'; SELECT customer_keys.o_custkey, SUM(order_count) AS total_order_count FROM (SELECT o_custkey, o_orderstatus, COUNT(*) AS order_count FROM orders GROUP BY o_custkey, o_orderstatus ) customer_keys GROUP BY customer_keys.o_custkey ORDER BY customer_keys.o_custkey DESC LIMIT 10 OFFSET 20; SET citus.task_executor_type TO 'real-time'; SET client_min_messages TO DEBUG1; -- Ensure that we push down LIMIT and OFFSET properly -- No Group-By -> Push Down CREATE TEMP TABLE temp_limit_test_1 AS SELECT o_custkey FROM orders LIMIT 10 OFFSET 15; -- GROUP BY without ORDER BY -> No push-down CREATE TEMP TABLE temp_limit_test_2 AS SELECT o_custkey FROM orders GROUP BY o_custkey LIMIT 10 OFFSET 15; -- GROUP BY and ORDER BY non-aggregate -> push-down CREATE TEMP TABLE temp_limit_test_3 AS SELECT o_custkey FROM orders GROUP BY o_custkey ORDER BY o_custkey LIMIT 10 OFFSET 15; -- GROUP BY and ORDER BY aggregate -> No push-down CREATE TEMP TABLE temp_limit_test_4 AS SELECT o_custkey, COUNT(*) AS ccnt FROM orders GROUP BY o_custkey ORDER BY ccnt DESC LIMIT 10 OFFSET 15; -- OFFSET without LIMIT SELECT o_custkey FROM orders ORDER BY o_custkey OFFSET 2980; -- LIMIT/OFFSET with Joins SELECT li.l_partkey, o.o_custkey, li.l_quantity FROM lineitem li JOIN orders o ON li.l_orderkey = o.o_orderkey WHERE li.l_quantity > 25 ORDER BY 1, 2, 3 LIMIT 10 OFFSET 20; RESET client_min_messages; -- FILTERs SELECT l_orderkey, sum(l_extendedprice), sum(l_extendedprice) FILTER (WHERE l_shipmode = 'AIR'), count(*), count(*) FILTER (WHERE l_shipmode = 'AIR'), max(l_extendedprice), max(l_extendedprice) FILTER (WHERE l_quantity < 30) FROM lineitem GROUP BY l_orderkey ORDER BY 2 DESC, 1 DESC LIMIT 10; SELECT l_orderkey, sum(l_extendedprice), sum(l_extendedprice) FILTER (WHERE l_shipmode = 'AIR'), count(*), count(*) FILTER (WHERE l_shipmode = 'AIR'), max(l_extendedprice), max(l_extendedprice) FILTER (WHERE l_quantity < 30) FROM lineitem GROUP BY l_orderkey HAVING count(*) FILTER (WHERE l_shipmode = 'AIR') > 1 ORDER BY 2 DESC, 1 DESC LIMIT 10; citus-7.0.3/src/test/regress/sql/multi_count_type_conversion.sql000066400000000000000000000014251317107136600252670ustar00rootroot00000000000000-- -- MULTI_COUNT_TYPE_CONVERSION -- -- Verify that we can sort count(*) results correctly. We perform this check as -- our count() operations execute in two steps: worker nodes report their -- count() results, and the master node sums these counts up. During this sum(), -- the data type changes from int8 to numeric. When we sort the numeric value, -- we get erroneous results on 64-bit architectures. To fix this issue, we -- manually cast back the sum() result to an int8 data type. SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity DESC; SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity DESC, l_quantity ASC; citus-7.0.3/src/test/regress/sql/multi_create_fdw.sql000066400000000000000000000007661317107136600227430ustar00rootroot00000000000000 ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 390000; -- =================================================================== -- get ready for the foreign data wrapper tests -- =================================================================== -- create fake fdw for use in tests CREATE FUNCTION fake_fdw_handler() RETURNS fdw_handler AS 'citus' LANGUAGE C STRICT; CREATE FOREIGN DATA WRAPPER fake_fdw HANDLER fake_fdw_handler; CREATE SERVER fake_fdw_server FOREIGN DATA WRAPPER fake_fdw; citus-7.0.3/src/test/regress/sql/multi_create_insert_proxy.sql000066400000000000000000000043301317107136600247170ustar00rootroot00000000000000 ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 380000; -- =================================================================== -- test INSERT proxy creation functionality -- =================================================================== -- use transaction to permit multiple calls to proxy function in one session BEGIN; -- use "unorthodox" object names to test quoting CREATE SCHEMA "A$AP Mob" CREATE TABLE "Dr. Bronner's ""Magic"" Soaps" ( id bigint PRIMARY KEY, data text NOT NULL DEFAULT 'lorem ipsum' ); \set insert_target '"A$AP Mob"."Dr. Bronner''s ""Magic"" Soaps"' -- create proxy and save proxy table name SELECT create_insert_proxy_for_table(:'insert_target') AS proxy_tablename \gset -- insert to proxy, relying on default value INSERT INTO pg_temp.:"proxy_tablename" (id) VALUES (1); -- copy some rows into the proxy COPY pg_temp.:"proxy_tablename" FROM stdin; 2 dolor sit amet 3 consectetur adipiscing elit 4 sed do eiusmod 5 tempor incididunt ut 6 labore et dolore \. -- verify rows were copied to target SELECT * FROM :insert_target ORDER BY id ASC; -- and not to proxy SELECT count(*) FROM pg_temp.:"proxy_tablename"; ROLLBACK; -- test behavior with distributed table, (so no transaction) CREATE TABLE insert_target ( id bigint PRIMARY KEY, data text NOT NULL DEFAULT 'lorem ipsum' ); -- squelch WARNINGs that contain worker_port SET client_min_messages TO ERROR; SELECT master_create_distributed_table('insert_target', 'id', 'hash'); SELECT master_create_worker_shards('insert_target', 2, 1); CREATE TEMPORARY SEQUENCE rows_inserted; SELECT create_insert_proxy_for_table('insert_target', 'rows_inserted') AS proxy_tablename \gset -- insert to proxy, again relying on default value INSERT INTO pg_temp.:"proxy_tablename" (id) VALUES (1); -- test copy with bad row in middle \set VERBOSITY terse COPY pg_temp.:"proxy_tablename" FROM stdin; 2 dolor sit amet 3 consectetur adipiscing elit 4 sed do eiusmod 5 tempor incididunt ut 6 labore et dolore 7 \N 8 magna aliqua \. \set VERBOSITY default -- verify rows were copied to distributed table SELECT * FROM insert_target ORDER BY id ASC; -- the counter should match the number of rows stored SELECT currval('rows_inserted'); SET client_min_messages TO DEFAULT; citus-7.0.3/src/test/regress/sql/multi_create_shards.sql000066400000000000000000000122221317107136600234350ustar00rootroot00000000000000 ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 370000; -- =================================================================== -- create test functions and types needed for tests -- =================================================================== CREATE FUNCTION sort_names(cstring, cstring, cstring) RETURNS cstring AS 'citus' LANGUAGE C STRICT; -- create a custom type... CREATE TYPE dummy_type AS ( i integer ); -- ... as well as a function to use as its comparator... CREATE FUNCTION dummy_type_function(dummy_type, dummy_type) RETURNS boolean AS 'SELECT TRUE;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- ... use that function to create a custom operator... CREATE OPERATOR = ( LEFTARG = dummy_type, RIGHTARG = dummy_type, PROCEDURE = dummy_type_function ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY dummy_op_family USING hash; -- ... finally, build an operator class, designate it as the default operator -- class for the type, but only specify an equality operator. So the type will -- have a default op class but no hash operator in that class. CREATE OPERATOR CLASS dummy_op_family_class DEFAULT FOR TYPE dummy_type USING hash FAMILY dummy_op_family AS OPERATOR 1 =; -- =================================================================== -- test shard creation functionality -- =================================================================== CREATE TABLE table_to_distribute ( name text PRIMARY KEY, id bigint, json_data json, test_type_data dummy_type ); -- use the table WITH (OIDS) set ALTER TABLE table_to_distribute SET WITH OIDS; SELECT master_create_distributed_table('table_to_distribute', 'id', 'hash'); -- revert WITH (OIDS) from above ALTER TABLE table_to_distribute SET WITHOUT OIDS; -- use an index instead of table name SELECT master_create_distributed_table('table_to_distribute_pkey', 'id', 'hash'); -- use a bad column name SELECT master_create_distributed_table('table_to_distribute', 'bad_column', 'hash'); -- use unrecognized partition type SELECT master_create_distributed_table('table_to_distribute', 'name', 'unrecognized'); -- use a partition column of a type lacking any default operator class SELECT master_create_distributed_table('table_to_distribute', 'json_data', 'hash'); -- use a partition column of type lacking the required support function (hash) SELECT master_create_distributed_table('table_to_distribute', 'test_type_data', 'hash'); -- distribute table and inspect side effects SELECT master_create_distributed_table('table_to_distribute', 'name', 'hash'); SELECT partmethod, partkey FROM pg_dist_partition WHERE logicalrelid = 'table_to_distribute'::regclass; -- use a bad shard count SELECT master_create_worker_shards('table_to_distribute', 0, 1); -- use a bad replication factor SELECT master_create_worker_shards('table_to_distribute', 16, 0); -- use a replication factor higher than shard count SELECT master_create_worker_shards('table_to_distribute', 16, 3); -- finally, create shards and inspect metadata SELECT master_create_worker_shards('table_to_distribute', 16, 1); SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE logicalrelid = 'table_to_distribute'::regclass ORDER BY (shardminvalue::integer) ASC; -- all shards should have the same size (16 divides evenly into the hash space) SELECT count(*) AS shard_count, shardmaxvalue::integer - shardminvalue::integer AS shard_size FROM pg_dist_shard WHERE logicalrelid='table_to_distribute'::regclass GROUP BY shard_size; SELECT COUNT(*) FROM pg_class WHERE relname LIKE 'table_to_distribute%' AND relkind = 'r'; -- try to create them again SELECT master_create_worker_shards('table_to_distribute', 16, 1); -- test list sorting SELECT sort_names('sumedh', 'jason', 'ozgun'); SELECT COUNT(*) FROM pg_class WHERE relname LIKE 'throwaway%' AND relkind = 'r'; -- test foreign table creation CREATE FOREIGN TABLE foreign_table_to_distribute ( name text, id bigint ) SERVER fake_fdw_server; SELECT master_create_distributed_table('foreign_table_to_distribute', 'id', 'hash'); SELECT master_create_worker_shards('foreign_table_to_distribute', 16, 1); SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE logicalrelid = 'foreign_table_to_distribute'::regclass ORDER BY (shardminvalue::integer) ASC; -- test shard creation using weird shard count CREATE TABLE weird_shard_count ( name text, id bigint ); SELECT master_create_distributed_table('weird_shard_count', 'id', 'hash'); SELECT master_create_worker_shards('weird_shard_count', 7, 1); -- Citus ensures all shards are roughly the same size SELECT shardmaxvalue::integer - shardminvalue::integer AS shard_size FROM pg_dist_shard WHERE logicalrelid = 'weird_shard_count'::regclass ORDER BY shardminvalue::integer ASC; -- cleanup foreign table, related shards and shard placements DELETE FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'foreign_table_to_distribute'::regclass); DELETE FROM pg_dist_shard WHERE logicalrelid = 'foreign_table_to_distribute'::regclass; DELETE FROM pg_dist_partition WHERE logicalrelid = 'foreign_table_to_distribute'::regclass; citus-7.0.3/src/test/regress/sql/multi_create_table.sql000066400000000000000000000420211317107136600232400ustar00rootroot00000000000000-- -- MULTI_CREATE_TABLE -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 360000; -- Create new table definitions for use in testing in distributed planning and -- execution functionality. Also create indexes to boost performance. CREATE TABLE lineitem ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); SELECT master_create_distributed_table('lineitem', 'l_orderkey', 'append'); CREATE INDEX lineitem_time_index ON lineitem (l_shipdate); CREATE TABLE orders ( o_orderkey bigint not null, o_custkey integer not null, o_orderstatus char(1) not null, o_totalprice decimal(15,2) not null, o_orderdate date not null, o_orderpriority char(15) not null, o_clerk char(15) not null, o_shippriority integer not null, o_comment varchar(79) not null, PRIMARY KEY(o_orderkey) ); SELECT master_create_distributed_table('orders', 'o_orderkey', 'append'); CREATE TABLE customer ( c_custkey integer not null, c_name varchar(25) not null, c_address varchar(40) not null, c_nationkey integer not null, c_phone char(15) not null, c_acctbal decimal(15,2) not null, c_mktsegment char(10) not null, c_comment varchar(117) not null); SELECT master_create_distributed_table('customer', 'c_custkey', 'append'); CREATE TABLE nation ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); SELECT create_reference_table('nation'); CREATE TABLE part ( p_partkey integer not null, p_name varchar(55) not null, p_mfgr char(25) not null, p_brand char(10) not null, p_type varchar(25) not null, p_size integer not null, p_container char(10) not null, p_retailprice decimal(15,2) not null, p_comment varchar(23) not null); SELECT master_create_distributed_table('part', 'p_partkey', 'append'); CREATE TABLE supplier ( s_suppkey integer not null, s_name char(25) not null, s_address varchar(40) not null, s_nationkey integer, s_phone char(15) not null, s_acctbal decimal(15,2) not null, s_comment varchar(101) not null ); SELECT create_reference_table('supplier'); -- create a single shard supplier table which is not -- a reference table CREATE TABLE supplier_single_shard ( s_suppkey integer not null, s_name char(25) not null, s_address varchar(40) not null, s_nationkey integer, s_phone char(15) not null, s_acctbal decimal(15,2) not null, s_comment varchar(101) not null ); SELECT master_create_distributed_table('supplier_single_shard', 's_suppkey', 'append'); CREATE TABLE mx_table_test (col1 int, col2 text); -- Since we're superuser, we can set the replication model to 'streaming' to -- create a one-off MX table... but if we forget to set the replication factor to one, -- we should see an error reminding us to fix that SET citus.replication_model TO 'streaming'; SELECT create_distributed_table('mx_table_test', 'col1'); -- ok, so now actually create the one-off MX table SET citus.shard_replication_factor TO 1; SELECT create_distributed_table('mx_table_test', 'col1'); SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_table_test'::regclass; DROP TABLE mx_table_test; -- Show that master_create_distributed_table ignores citus.replication_model GUC CREATE TABLE s_table(a int); SELECT master_create_distributed_table('s_table', 'a', 'hash'); SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='s_table'::regclass; -- Show that master_create_worker_shards complains when RF>1 and replication model is streaming UPDATE pg_dist_partition SET repmodel = 's' WHERE logicalrelid='s_table'::regclass; SELECT master_create_worker_shards('s_table', 4, 2); DROP TABLE s_table; RESET citus.replication_model; -- Show that create_distributed_table with append and range distributions ignore -- citus.replication_model GUC SET citus.shard_replication_factor TO 2; SET citus.replication_model TO streaming; CREATE TABLE repmodel_test (a int); SELECT create_distributed_table('repmodel_test', 'a', 'append'); SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; DROP TABLE repmodel_test; CREATE TABLE repmodel_test (a int); SELECT create_distributed_table('repmodel_test', 'a', 'range'); SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; DROP TABLE repmodel_test; -- Show that master_create_distributed_table created statement replicated tables no matter -- what citus.replication_model set to CREATE TABLE repmodel_test (a int); SELECT master_create_distributed_table('repmodel_test', 'a', 'hash'); SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; DROP TABLE repmodel_test; CREATE TABLE repmodel_test (a int); SELECT master_create_distributed_table('repmodel_test', 'a', 'append'); SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; DROP TABLE repmodel_test; CREATE TABLE repmodel_test (a int); SELECT master_create_distributed_table('repmodel_test', 'a', 'range'); SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; DROP TABLE repmodel_test; -- Check that the replication_model overwrite behavior is the same with RF=1 SET citus.shard_replication_factor TO 1; CREATE TABLE repmodel_test (a int); SELECT create_distributed_table('repmodel_test', 'a', 'append'); SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; DROP TABLE repmodel_test; CREATE TABLE repmodel_test (a int); SELECT create_distributed_table('repmodel_test', 'a', 'range'); SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; DROP TABLE repmodel_test; CREATE TABLE repmodel_test (a int); SELECT master_create_distributed_table('repmodel_test', 'a', 'hash'); SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; DROP TABLE repmodel_test; CREATE TABLE repmodel_test (a int); SELECT master_create_distributed_table('repmodel_test', 'a', 'append'); SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; DROP TABLE repmodel_test; CREATE TABLE repmodel_test (a int); SELECT master_create_distributed_table('repmodel_test', 'a', 'range'); SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='repmodel_test'::regclass; DROP TABLE repmodel_test; RESET citus.replication_model; -- Test initial data loading CREATE TABLE data_load_test (col1 int, col2 text, col3 serial); INSERT INTO data_load_test VALUES (132, 'hello'); INSERT INTO data_load_test VALUES (243, 'world'); -- table must be empty when using append- or range-partitioning SELECT create_distributed_table('data_load_test', 'col1', 'append'); SELECT create_distributed_table('data_load_test', 'col1', 'range'); -- table must be empty when using master_create_distributed_table (no shards created) SELECT master_create_distributed_table('data_load_test', 'col1', 'hash'); -- create_distributed_table creates shards and copies data into the distributed table SELECT create_distributed_table('data_load_test', 'col1'); SELECT * FROM data_load_test ORDER BY col1; DROP TABLE data_load_test; -- test queries on distributed tables with no shards CREATE TABLE no_shard_test (col1 int, col2 text); SELECT create_distributed_table('no_shard_test', 'col1', 'append'); SELECT * FROM no_shard_test WHERE col1 > 1; DROP TABLE no_shard_test; CREATE TABLE no_shard_test (col1 int, col2 text); SELECT create_distributed_table('no_shard_test', 'col1', 'range'); SELECT * FROM no_shard_test WHERE col1 > 1; DROP TABLE no_shard_test; CREATE TABLE no_shard_test (col1 int, col2 text); SELECT master_create_distributed_table('no_shard_test', 'col1', 'hash'); SELECT * FROM no_shard_test WHERE col1 > 1; DROP TABLE no_shard_test; -- ensure writes in the same transaction as create_distributed_table are visible BEGIN; CREATE TABLE data_load_test (col1 int, col2 text, col3 serial); INSERT INTO data_load_test VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test', 'col1'); INSERT INTO data_load_test VALUES (243, 'world'); END; SELECT * FROM data_load_test ORDER BY col1; DROP TABLE data_load_test; -- creating co-located distributed tables in the same transaction works BEGIN; CREATE TABLE data_load_test1 (col1 int, col2 text, col3 serial); INSERT INTO data_load_test1 VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test1', 'col1'); CREATE TABLE data_load_test2 (col1 int, col2 text, col3 serial); INSERT INTO data_load_test2 VALUES (132, 'world'); SELECT create_distributed_table('data_load_test2', 'col1'); SELECT a.col2 ||' '|| b.col2 FROM data_load_test1 a JOIN data_load_test2 b USING (col1) WHERE col1 = 132; DROP TABLE data_load_test1, data_load_test2; END; -- There should be no table on the worker node \c - - - :worker_1_port SELECT relname FROM pg_class WHERE relname LIKE 'data_load_test%'; \c - - - :master_port -- creating an index after loading data works BEGIN; CREATE TABLE data_load_test (col1 int, col2 text, col3 serial); INSERT INTO data_load_test VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test', 'col1'); CREATE INDEX data_load_test_idx ON data_load_test (col2); DROP TABLE data_load_test; END; -- popping in and out of existence in the same transaction works BEGIN; CREATE TABLE data_load_test (col1 int, col2 text, col3 serial); INSERT INTO data_load_test VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test', 'col1'); DROP TABLE data_load_test; END; -- but dropping after a write on the distributed table is currently disallowed BEGIN; CREATE TABLE data_load_test (col1 int, col2 text, col3 serial); INSERT INTO data_load_test VALUES (132, 'hello'); SELECT create_distributed_table('data_load_test', 'col1'); INSERT INTO data_load_test VALUES (243, 'world'); DROP TABLE data_load_test; END; -- Test data loading after dropping a column CREATE TABLE data_load_test (col1 int, col2 text, col3 text, "CoL4"")" int); INSERT INTO data_load_test VALUES (132, 'hello', 'world'); INSERT INTO data_load_test VALUES (243, 'world', 'hello'); ALTER TABLE data_load_test DROP COLUMN col1; SELECT create_distributed_table('data_load_test', 'col3'); SELECT * FROM data_load_test ORDER BY col2; -- make sure the tuple went to the right shard SELECT * FROM data_load_test WHERE col3 = 'world'; DROP TABLE data_load_test; SET citus.shard_replication_factor TO default; SET citus.shard_count to 4; CREATE TABLE lineitem_hash_part (like lineitem); SELECT create_distributed_table('lineitem_hash_part', 'l_orderkey'); CREATE TABLE orders_hash_part (like orders); SELECT create_distributed_table('orders_hash_part', 'o_orderkey'); CREATE UNLOGGED TABLE unlogged_table ( key text, value text ); SELECT create_distributed_table('unlogged_table', 'key'); SELECT * FROM master_get_table_ddl_events('unlogged_table'); \c - - - :worker_1_port SELECT relpersistence FROM pg_class WHERE relname LIKE 'unlogged_table_%'; \c - - - :master_port -- Test rollback of create table BEGIN; CREATE TABLE rollback_table(id int, name varchar(20)); SELECT create_distributed_table('rollback_table','id'); ROLLBACK; -- Table should not exist on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%'); \c - - - :master_port -- Insert 3 rows to make sure that copy after shard creation touches the same -- worker node twice. BEGIN; CREATE TABLE rollback_table(id int, name varchar(20)); INSERT INTO rollback_table VALUES(1, 'Name_1'); INSERT INTO rollback_table VALUES(2, 'Name_2'); INSERT INTO rollback_table VALUES(3, 'Name_3'); SELECT create_distributed_table('rollback_table','id'); ROLLBACK; -- Table should not exist on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%'); \c - - - :master_port BEGIN; CREATE TABLE rollback_table(id int, name varchar(20)); SELECT create_distributed_table('rollback_table','id'); \copy rollback_table from stdin delimiter ',' 1, 'name_1' 2, 'name_2' 3, 'name_3' \. CREATE INDEX rollback_index ON rollback_table(id); COMMIT; -- Check the table is created SELECT count(*) FROM rollback_table; DROP TABLE rollback_table; BEGIN; CREATE TABLE rollback_table(id int, name varchar(20)); SELECT create_distributed_table('rollback_table','id'); \copy rollback_table from stdin delimiter ',' 1, 'name_1' 2, 'name_2' 3, 'name_3' \. ROLLBACK; -- Table should not exist on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid FROM pg_class WHERE relname LIKE 'rollback_table%'); \c - - - :master_port BEGIN; CREATE TABLE tt1(id int); SELECT create_distributed_table('tt1','id'); CREATE TABLE tt2(id int); SELECT create_distributed_table('tt2','id'); INSERT INTO tt1 VALUES(1); INSERT INTO tt2 SELECT * FROM tt1 WHERE id = 1; COMMIT; -- Table should exist on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt1_360430'::regclass; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt2_360462'::regclass; \c - - - :master_port DROP TABLE tt1; DROP TABLE tt2; -- It is known that creating a table with master_create_empty_shard is not -- transactional, so table stay remaining on the worker node after the rollback BEGIN; CREATE TABLE append_tt1(id int); SELECT create_distributed_table('append_tt1','id','append'); SELECT master_create_empty_shard('append_tt1'); ROLLBACK; -- Table exists on the worker node. \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.append_tt1_360494'::regclass; \c - - - :master_port -- There should be no table on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid from pg_class WHERE relname LIKE 'public.tt1%'); \c - - - :master_port -- Queries executing with router executor is allowed in the same transaction -- with create_distributed_table BEGIN; CREATE TABLE tt1(id int); INSERT INTO tt1 VALUES(1); SELECT create_distributed_table('tt1','id'); INSERT INTO tt1 VALUES(2); SELECT * FROM tt1 WHERE id = 1; COMMIT; -- Placements should be created on the worker \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = 'public.tt1_360495'::regclass; \c - - - :master_port DROP TABLE tt1; BEGIN; CREATE TABLE tt1(id int); SELECT create_distributed_table('tt1','id'); DROP TABLE tt1; COMMIT; -- There should be no table on the worker node \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid = (SELECT oid from pg_class WHERE relname LIKE 'tt1%'); \c - - - :master_port -- Tests with create_distributed_table & DDL & DML commands -- Test should pass since GetPlacementListConnection can provide connections -- in this order of execution CREATE TABLE sample_table(id int); SELECT create_distributed_table('sample_table','id'); BEGIN; CREATE TABLE stage_table (LIKE sample_table); \COPY stage_table FROM stdin; -- Note that this operation is a local copy 1 2 3 4 \. SELECT create_distributed_table('stage_table', 'id'); INSERT INTO sample_table SELECT * FROM stage_table; DROP TABLE stage_table; SELECT * FROM sample_table WHERE id = 3; COMMIT; -- Show that rows of sample_table are updated SELECT count(*) FROM sample_table; DROP table sample_table; -- Test as create_distributed_table - copy - create_distributed_table - copy -- This combination is used by tests written by some ORMs. BEGIN; CREATE TABLE tt1(id int); SELECT create_distributed_table('tt1','id'); \COPY tt1 from stdin; 1 2 3 \. CREATE TABLE tt2(like tt1); SELECT create_distributed_table('tt2','id'); \COPY tt2 from stdin; 4 5 6 \. INSERT INTO tt1 SELECT * FROM tt2; SELECT * FROM tt1 WHERE id = 3; SELECT * FROM tt2 WHERE id = 6; END; SELECT count(*) FROM tt1; -- the goal of the following test is to make sure that -- both create_reference_table and create_distributed_table -- calls creates the schemas without leading to any deadlocks -- first create reference table, then hash distributed table BEGIN; CREATE SCHEMA sc; CREATE TABLE sc.ref(a int); insert into sc.ref SELECT s FROM generate_series(0, 100) s; SELECT create_reference_table('sc.ref'); CREATE TABLE sc.hash(a int); insert into sc.hash SELECT s FROM generate_series(0, 100) s; SELECT create_distributed_table('sc.hash', 'a'); COMMIT; -- first create hash distributed table, then reference table BEGIN; CREATE SCHEMA sc2; CREATE TABLE sc2.hash(a int); insert into sc2.hash SELECT s FROM generate_series(0, 100) s; SELECT create_distributed_table('sc2.hash', 'a'); CREATE TABLE sc2.ref(a int); insert into sc2.ref SELECT s FROM generate_series(0, 100) s; SELECT create_reference_table('sc2.ref'); COMMIT; DROP TABLE tt1; DROP TABLE tt2; DROP SCHEMA sc CASCADE; DROP SCHEMA sc2 CASCADE; citus-7.0.3/src/test/regress/sql/multi_create_table_constraints.sql000066400000000000000000000241721317107136600256760ustar00rootroot00000000000000-- -- MULTI_CREATE_TABLE_CONSTRAINTS -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 365000; -- test that Citus forbids unique and EXCLUDE constraints on append-partitioned tables. CREATE TABLE uniq_cns_append_tables ( partition_col integer UNIQUE, other_col integer ); SELECT master_create_distributed_table('uniq_cns_append_tables', 'partition_col', 'append'); CREATE TABLE excl_cns_append_tables ( partition_col integer, other_col integer, EXCLUDE (partition_col WITH =) ); SELECT master_create_distributed_table('excl_cns_append_tables', 'partition_col', 'append'); -- test that Citus cannot distribute unique constraints that do not include -- the partition column on hash-partitioned tables. CREATE TABLE pk_on_non_part_col ( partition_col integer, other_col integer PRIMARY KEY ); SELECT master_create_distributed_table('pk_on_non_part_col', 'partition_col', 'hash'); CREATE TABLE uq_on_non_part_col ( partition_col integer, other_col integer UNIQUE ); SELECT master_create_distributed_table('uq_on_non_part_col', 'partition_col', 'hash'); CREATE TABLE ex_on_non_part_col ( partition_col integer, other_col integer, EXCLUDE (other_col WITH =) ); SELECT master_create_distributed_table('ex_on_non_part_col', 'partition_col', 'hash'); -- now show that Citus can distribute unique and EXCLUDE constraints that -- include the partition column for hash-partitioned tables. -- However, EXCLUDE constraints must include the partition column with -- an equality operator. -- These tests are for UNNAMED constraints. CREATE TABLE pk_on_part_col ( partition_col integer PRIMARY KEY, other_col integer ); SELECT master_create_distributed_table('pk_on_part_col', 'partition_col', 'hash'); CREATE TABLE uq_part_col ( partition_col integer UNIQUE, other_col integer ); SELECT master_create_distributed_table('uq_part_col', 'partition_col', 'hash'); CREATE TABLE uq_two_columns ( partition_col integer, other_col integer, UNIQUE (partition_col, other_col) ); SELECT master_create_distributed_table('uq_two_columns', 'partition_col', 'hash'); SELECT master_create_worker_shards('uq_two_columns', '4', '2'); INSERT INTO uq_two_columns (partition_col, other_col) VALUES (1,1); INSERT INTO uq_two_columns (partition_col, other_col) VALUES (1,1); CREATE TABLE ex_on_part_col ( partition_col integer, other_col integer, EXCLUDE (partition_col WITH =) ); SELECT master_create_distributed_table('ex_on_part_col', 'partition_col', 'hash'); SELECT master_create_worker_shards('ex_on_part_col', '4', '2'); INSERT INTO ex_on_part_col (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_part_col (partition_col, other_col) VALUES (1,2); CREATE TABLE ex_on_two_columns ( partition_col integer, other_col integer, EXCLUDE (partition_col WITH =, other_col WITH =) ); SELECT master_create_distributed_table('ex_on_two_columns', 'partition_col', 'hash'); SELECT master_create_worker_shards('ex_on_two_columns', '4', '2'); INSERT INTO ex_on_two_columns (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns (partition_col, other_col) VALUES (1,1); CREATE TABLE ex_on_two_columns_prt ( partition_col integer, other_col integer, EXCLUDE (partition_col WITH =, other_col WITH =) WHERE (other_col > 100) ); SELECT master_create_distributed_table('ex_on_two_columns_prt', 'partition_col', 'hash'); SELECT master_create_worker_shards('ex_on_two_columns_prt', '4', '2'); INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,101); INSERT INTO ex_on_two_columns_prt (partition_col, other_col) VALUES (1,101); CREATE TABLE ex_wrong_operator ( partition_col tsrange, other_col tsrange, EXCLUDE USING gist (other_col WITH =, partition_col WITH &&) ); SELECT master_create_distributed_table('ex_wrong_operator', 'partition_col', 'hash'); CREATE TABLE ex_overlaps ( partition_col tsrange, other_col tsrange, EXCLUDE USING gist (other_col WITH &&, partition_col WITH =) ); SELECT master_create_distributed_table('ex_overlaps', 'partition_col', 'hash'); SELECT master_create_worker_shards('ex_overlaps', '4', '2'); INSERT INTO ex_overlaps (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-01 00:00:00, 2016-02-01 00:00:00]'); INSERT INTO ex_overlaps (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-15 00:00:00, 2016-02-01 00:00:00]'); -- now show that Citus can distribute unique and EXCLUDE constraints that -- include the partition column, for hash-partitioned tables. -- However, EXCLUDE constraints must include the partition column with -- an equality operator. -- These tests are for NAMED constraints. CREATE TABLE pk_on_part_col_named ( partition_col integer CONSTRAINT pk_on_part_col_named_pk PRIMARY KEY, other_col integer ); SELECT master_create_distributed_table('pk_on_part_col_named', 'partition_col', 'hash'); CREATE TABLE uq_part_col_named ( partition_col integer CONSTRAINT uq_part_col_named_uniq UNIQUE, other_col integer ); SELECT master_create_distributed_table('uq_part_col_named', 'partition_col', 'hash'); CREATE TABLE uq_two_columns_named ( partition_col integer, other_col integer, CONSTRAINT uq_two_columns_named_uniq UNIQUE (partition_col, other_col) ); SELECT master_create_distributed_table('uq_two_columns_named', 'partition_col', 'hash'); SELECT master_create_worker_shards('uq_two_columns_named', '4', '2'); INSERT INTO uq_two_columns_named (partition_col, other_col) VALUES (1,1); INSERT INTO uq_two_columns_named (partition_col, other_col) VALUES (1,1); CREATE TABLE ex_on_part_col_named ( partition_col integer, other_col integer, CONSTRAINT ex_on_part_col_named_exclude EXCLUDE (partition_col WITH =) ); SELECT master_create_distributed_table('ex_on_part_col_named', 'partition_col', 'hash'); SELECT master_create_worker_shards('ex_on_part_col_named', '4', '2'); INSERT INTO ex_on_part_col_named (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_part_col_named (partition_col, other_col) VALUES (1,2); CREATE TABLE ex_on_two_columns_named ( partition_col integer, other_col integer, CONSTRAINT ex_on_two_columns_named_exclude EXCLUDE (partition_col WITH =, other_col WITH =) ); SELECT master_create_distributed_table('ex_on_two_columns_named', 'partition_col', 'hash'); SELECT master_create_worker_shards('ex_on_two_columns_named', '4', '2'); INSERT INTO ex_on_two_columns_named (partition_col, other_col) VALUES (1,1); INSERT INTO ex_on_two_columns_named (partition_col, other_col) VALUES (1,1); CREATE TABLE ex_multiple_excludes ( partition_col integer, other_col integer, other_other_col integer, CONSTRAINT ex_multiple_excludes_excl1 EXCLUDE (partition_col WITH =, other_col WITH =), CONSTRAINT ex_multiple_excludes_excl2 EXCLUDE (partition_col WITH =, other_other_col WITH =) ); SELECT master_create_distributed_table('ex_multiple_excludes', 'partition_col', 'hash'); SELECT master_create_worker_shards('ex_multiple_excludes', '4', '2'); INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,1,1); INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,1,2); INSERT INTO ex_multiple_excludes (partition_col, other_col, other_other_col) VALUES (1,2,1); CREATE TABLE ex_wrong_operator_named ( partition_col tsrange, other_col tsrange, CONSTRAINT ex_wrong_operator_named_exclude EXCLUDE USING gist (other_col WITH =, partition_col WITH &&) ); SELECT master_create_distributed_table('ex_wrong_operator_named', 'partition_col', 'hash'); CREATE TABLE ex_overlaps_named ( partition_col tsrange, other_col tsrange, CONSTRAINT ex_overlaps_operator_named_exclude EXCLUDE USING gist (other_col WITH &&, partition_col WITH =) ); SELECT master_create_distributed_table('ex_overlaps_named', 'partition_col', 'hash'); SELECT master_create_worker_shards('ex_overlaps_named', '4', '2'); INSERT INTO ex_overlaps_named (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-01 00:00:00, 2016-02-01 00:00:00]'); INSERT INTO ex_overlaps_named (partition_col, other_col) VALUES ('[2016-01-01 00:00:00, 2016-02-01 00:00:00]', '[2016-01-15 00:00:00, 2016-02-01 00:00:00]'); -- now show that Citus allows unique constraints on range-partitioned tables. CREATE TABLE uq_range_tables ( partition_col integer UNIQUE, other_col integer ); SELECT master_create_distributed_table('uq_range_tables', 'partition_col', 'range'); -- show that CHECK constraints are distributed. CREATE TABLE check_example ( partition_col integer UNIQUE, other_col integer CHECK (other_col >= 100), other_other_col integer CHECK (abs(other_other_col) >= 100) ); SELECT master_create_distributed_table('check_example', 'partition_col', 'hash'); SELECT master_create_worker_shards('check_example', '2', '2'); \c - - - :worker_1_port \d check_example_partition_col_key_365040 SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.check_example_365040'::regclass; \c - - - :master_port -- drop unnecessary tables DROP TABLE pk_on_non_part_col, uq_on_non_part_col CASCADE; DROP TABLE pk_on_part_col, uq_part_col, uq_two_columns CASCADE; DROP TABLE ex_on_part_col, ex_on_two_columns, ex_on_two_columns_prt, ex_multiple_excludes, ex_overlaps CASCADE; DROP TABLE ex_on_part_col_named, ex_on_two_columns_named, ex_overlaps_named CASCADE; DROP TABLE uq_range_tables, check_example CASCADE; -- test dropping table with foreign keys SET citus.shard_count = 4; SET citus.shard_replication_factor = 1; CREATE TABLE raw_table_1 (user_id int, UNIQUE(user_id)); SELECT create_distributed_table('raw_table_1', 'user_id'); CREATE TABLE raw_table_2 (user_id int REFERENCES raw_table_1(user_id), UNIQUE(user_id)); SELECT create_distributed_table('raw_table_2', 'user_id'); -- see that the constraint exists SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='raw_table_2'::regclass; -- should be prevented by the foreign key DROP TABLE raw_table_1; -- should cleanly drop the remote shards DROP TABLE raw_table_1 CASCADE; -- see that the constraint also dropped SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='raw_table_2'::regclass; -- drop the table as well DROP TABLE raw_table_2; citus-7.0.3/src/test/regress/sql/multi_create_table_new_features.sql000066400000000000000000000013011317107136600260030ustar00rootroot00000000000000-- -- MULTI_CREATE_TABLE_NEW_FEATURES -- -- print whether we're using version > 9 to make version-specific tests clear SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine; -- Verify that the GENERATED ... AS IDENTITY feature in PostgreSQL 10 -- is forbidden in distributed tables. CREATE TABLE table_identity_col ( id integer GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, payload text ); SELECT master_create_distributed_table('table_identity_col', 'id', 'append'); SELECT create_distributed_table('table_identity_col', 'id'); SELECT create_distributed_table('table_identity_col', 'text'); SELECT create_reference_table('table_identity_col'); citus-7.0.3/src/test/regress/sql/multi_cross_shard.sql000066400000000000000000000067311317107136600231500ustar00rootroot00000000000000-- -- MULTI_CROSS_SHARD -- -- Tests to log cross shard queries according to error log level -- -- Create a distributed table and add data to it CREATE TABLE multi_task_table ( id int, name varchar(20) ); SELECT create_distributed_table('multi_task_table', 'id'); INSERT INTO multi_task_table VALUES(1, 'elem_1'); INSERT INTO multi_task_table VALUES(2, 'elem_2'); INSERT INTO multi_task_table VALUES(3, 'elem_3'); -- Shouldn't log anything when the log level is 'off' SHOW citus.multi_task_query_log_level; SELECT * FROM multi_task_table; -- Get messages with the log level 'notice' SET citus.multi_task_query_log_level TO notice; SELECT * FROM multi_task_table; SELECT AVG(id) AS avg_id FROM multi_task_table; -- Get messages with the log level 'error' SET citus.multi_task_query_log_level TO error; SELECT * FROM multi_task_table; -- Check the log message with INSERT INTO ... SELECT CREATE TABLE raw_table ( id int, order_count int ); CREATE TABLE summary_table ( id int, order_sum BIGINT ); SELECT create_distributed_table('raw_table', 'id'); SELECT create_distributed_table('summary_table', 'id'); INSERT INTO raw_table VALUES(1, '15'); INSERT INTO raw_table VALUES(2, '15'); INSERT INTO raw_table VALUES(3, '15'); INSERT INTO raw_table VALUES(1, '20'); INSERT INTO raw_table VALUES(2, '25'); INSERT INTO raw_table VALUES(3, '35'); -- Should notice user that the query is multi-task one SET citus.multi_task_query_log_level TO notice; INSERT INTO summary_table SELECT id, SUM(order_count) FROM raw_table GROUP BY id; -- Should error out since the query is multi-task one SET citus.multi_task_query_log_level TO error; INSERT INTO summary_table SELECT id, SUM(order_count) FROM raw_table GROUP BY id; -- Shouldn't error out since it is a single task insert-into select query INSERT INTO summary_table SELECT id, SUM(order_count) FROM raw_table WHERE id = 1 GROUP BY id; -- Should have four rows (three rows from the query without where and the one from with where) SET citus.multi_task_query_log_level to DEFAULT; SELECT * FROM summary_table; -- Set log-level to different levels inside the transaction BEGIN; -- Should notice user that the query is multi-task one SET citus.multi_task_query_log_level TO notice; INSERT INTO summary_table SELECT id, SUM(order_count) FROM raw_table GROUP BY id; -- Should error out since the query is multi-task one SET citus.multi_task_query_log_level TO error; INSERT INTO summary_table SELECT id, SUM(order_count) FROM raw_table GROUP BY id; ROLLBACK; -- Should have only four rows since the transaction is rollbacked. SET citus.multi_task_query_log_level to DEFAULT; SELECT * FROM summary_table; -- Test router-select query SET citus.multi_task_query_log_level TO notice; -- Shouldn't log since it is a router select query SELECT * FROM raw_table WHERE ID = 1; -- Task tracker query test CREATE TABLE tt1 ( id int, name varchar(20) ); CREATE TABLE tt2 ( id int, name varchar(20), count bigint ); SELECT create_distributed_table('tt1', 'id'); SELECT create_distributed_table('tt2', 'name'); INSERT INTO tt1 VALUES(1, 'Ahmet'); INSERT INTO tt1 VALUES(2, 'Mehmet'); INSERT INTO tt2 VALUES(1, 'Ahmet', 5); INSERT INTO tt2 VALUES(2, 'Mehmet', 15); -- Should notice since it is a task-tracker query SET citus.task_executor_type to "task-tracker"; SELECT tt1.id, tt2.count from tt1,tt2 where tt1.id = tt2.id; SET citus.task_executor_type to DEFAULT; DROP TABLE tt2; DROP TABLE tt1; DROP TABLE multi_task_table; DROP TABLE raw_table; DROP TABLE summary_table; citus-7.0.3/src/test/regress/sql/multi_data_types.sql000066400000000000000000000112441317107136600227660ustar00rootroot00000000000000-- =================================================================== -- test composite type, varchar and enum types -- create, distribute, INSERT, SELECT and UPDATE -- =================================================================== ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 530000; -- create a custom type... CREATE TYPE test_composite_type AS ( i integer, i2 integer ); -- ... as well as a function to use as its comparator... CREATE FUNCTION equal_test_composite_type_function(test_composite_type, test_composite_type) RETURNS boolean LANGUAGE 'internal' AS 'record_eq' IMMUTABLE RETURNS NULL ON NULL INPUT; CREATE FUNCTION cmp_test_composite_type_function(test_composite_type, test_composite_type) RETURNS int LANGUAGE 'internal' AS 'btrecordcmp' IMMUTABLE RETURNS NULL ON NULL INPUT; -- ... use that function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_composite_type, RIGHTARG = test_composite_type, PROCEDURE = equal_test_composite_type_function, HASHES ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY cats_op_fam USING hash; -- ... create a test HASH function. Though it is a poor hash function, -- it is acceptable for our tests CREATE FUNCTION test_composite_type_hash(test_composite_type) RETURNS int AS 'SELECT hashtext( ($1.i + $1.i2)::text);' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS cats_op_fam_clas3 DEFAULT FOR TYPE test_composite_type USING BTREE AS OPERATOR 3 = (test_composite_type, test_composite_type), FUNCTION 1 cmp_test_composite_type_function(test_composite_type, test_composite_type); CREATE OPERATOR CLASS cats_op_fam_class DEFAULT FOR TYPE test_composite_type USING HASH AS OPERATOR 1 = (test_composite_type, test_composite_type), FUNCTION 1 test_composite_type_hash(test_composite_type); -- create and distribute a table on composite type column CREATE TABLE composite_type_partitioned_table ( id integer, col test_composite_type ); SELECT master_create_distributed_table('composite_type_partitioned_table', 'col', 'hash'); SELECT master_create_worker_shards('composite_type_partitioned_table', 4, 1); -- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table INSERT INTO composite_type_partitioned_table VALUES (1, '(1, 2)'::test_composite_type); INSERT INTO composite_type_partitioned_table VALUES (2, '(3, 4)'::test_composite_type); INSERT INTO composite_type_partitioned_table VALUES (3, '(5, 6)'::test_composite_type); INSERT INTO composite_type_partitioned_table VALUES (4, '(7, 8)'::test_composite_type); INSERT INTO composite_type_partitioned_table VALUES (5, '(9, 10)'::test_composite_type); SELECT * FROM composite_type_partitioned_table WHERE col = '(7, 8)'::test_composite_type; UPDATE composite_type_partitioned_table SET id = 6 WHERE col = '(7, 8)'::test_composite_type; SELECT * FROM composite_type_partitioned_table WHERE col = '(7, 8)'::test_composite_type; -- create and distribute a table on enum type column CREATE TYPE bug_status AS ENUM ('new', 'open', 'closed'); CREATE TABLE bugs ( id integer, status bug_status ); SELECT master_create_distributed_table('bugs', 'status', 'hash'); SELECT master_create_worker_shards('bugs', 4, 1); -- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table INSERT INTO bugs VALUES (1, 'new'); INSERT INTO bugs VALUES (2, 'open'); INSERT INTO bugs VALUES (3, 'closed'); INSERT INTO bugs VALUES (4, 'closed'); INSERT INTO bugs VALUES (5, 'open'); SELECT * FROM bugs WHERE status = 'closed'::bug_status; UPDATE bugs SET status = 'closed'::bug_status WHERE id = 2; SELECT * FROM bugs WHERE status = 'open'::bug_status; -- create and distribute a table on varchar column CREATE TABLE varchar_hash_partitioned_table ( id int, name varchar ); SELECT master_create_distributed_table('varchar_hash_partitioned_table', 'name', 'hash'); SELECT master_create_worker_shards('varchar_hash_partitioned_table', 4, 1); -- execute INSERT, SELECT and UPDATE queries on composite_type_partitioned_table INSERT INTO varchar_hash_partitioned_table VALUES (1, 'Jason'); INSERT INTO varchar_hash_partitioned_table VALUES (2, 'Ozgun'); INSERT INTO varchar_hash_partitioned_table VALUES (3, 'Onder'); INSERT INTO varchar_hash_partitioned_table VALUES (4, 'Sumedh'); INSERT INTO varchar_hash_partitioned_table VALUES (5, 'Marco'); SELECT * FROM varchar_hash_partitioned_table WHERE id = 1; UPDATE varchar_hash_partitioned_table SET id = 6 WHERE name = 'Jason'; SELECT * FROM varchar_hash_partitioned_table WHERE id = 6; citus-7.0.3/src/test/regress/sql/multi_deparse_shard_query.sql000066400000000000000000000155631317107136600246720ustar00rootroot00000000000000-- -- MULTI_DEPARSE_SHARD_QUERY -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 13100000; CREATE FUNCTION deparse_shard_query_test(text) RETURNS VOID AS 'citus' LANGUAGE C STRICT; -- create the first table CREATE TABLE raw_events_1 (tenant_id bigint, value_1 int, value_2 int, value_3 float, value_4 bigint, value_5 text, value_6 int DEfAULT 10, value_7 int, event_at date DEfAULT now() ); SELECT master_create_distributed_table('raw_events_1', 'tenant_id', 'hash'); SELECT master_create_worker_shards('raw_events_1', 4, 1); -- create the first table CREATE TABLE raw_events_2 (tenant_id bigint, value_1 int, value_2 int, value_3 float, value_4 bigint, value_5 text, value_6 float DEfAULT (random()*100)::float, value_7 int, event_at date DEfAULT now() ); SELECT master_create_distributed_table('raw_events_2', 'tenant_id', 'hash'); SELECT master_create_worker_shards('raw_events_2', 4, 1); CREATE TABLE aggregated_events (tenant_id bigint, sum_value_1 bigint, average_value_2 float, average_value_3 float, sum_value_4 bigint, sum_value_5 float, average_value_6 int, rollup_hour date); SELECT master_create_distributed_table('aggregated_events', 'tenant_id', 'hash'); SELECT master_create_worker_shards('aggregated_events', 4, 1); -- start with very simple examples on a single table SELECT deparse_shard_query_test(' INSERT INTO raw_events_1 SELECT * FROM raw_events_1; '); SELECT deparse_shard_query_test(' INSERT INTO raw_events_1(tenant_id, value_4) SELECT tenant_id, value_4 FROM raw_events_1; '); -- now that shuffle columns a bit on a single table SELECT deparse_shard_query_test(' INSERT INTO raw_events_1(value_5, value_2, tenant_id, value_4) SELECT value_2::text, value_5::int, tenant_id, value_4 FROM raw_events_1; '); -- same test on two different tables SELECT deparse_shard_query_test(' INSERT INTO raw_events_1(value_5, value_2, tenant_id, value_4) SELECT value_2::text, value_5::int, tenant_id, value_4 FROM raw_events_2; '); -- lets do some simple aggregations SELECT deparse_shard_query_test(E' INSERT INTO aggregated_events (tenant_id, rollup_hour, sum_value_1, average_value_3, average_value_6, sum_value_4) SELECT tenant_id, date_trunc(\'hour\', event_at) , sum(value_1), avg(value_3), avg(value_6), sum(value_4) FROM raw_events_1 GROUP BY tenant_id, date_trunc(\'hour\', event_at) '); -- also some subqueries, JOINS with a complicated target lists -- a simple JOIN SELECT deparse_shard_query_test(' INSERT INTO raw_events_1 (value_3, tenant_id) SELECT raw_events_2.value_3, raw_events_1.tenant_id FROM raw_events_1, raw_events_2 WHERE raw_events_1.tenant_id = raw_events_2.tenant_id; '); -- join with group by SELECT deparse_shard_query_test(' INSERT INTO raw_events_1 (value_3, tenant_id) SELECT max(raw_events_2.value_3), avg(raw_events_1.value_3) FROM raw_events_1, raw_events_2 WHERE raw_events_1.tenant_id = raw_events_2.tenant_id GROUP BY raw_events_1.event_at '); -- a more complicated JOIN SELECT deparse_shard_query_test(' INSERT INTO aggregated_events (sum_value_4, tenant_id) SELECT max(r1.value_4), r3.tenant_id FROM raw_events_1 r1, raw_events_2 r2, raw_events_1 r3 WHERE r1.tenant_id = r2.tenant_id AND r2.tenant_id = r3.tenant_id GROUP BY r1.value_1, r3.tenant_id, r2.event_at ORDER BY r2.event_at DESC; '); -- queries with CTEs are supported SELECT deparse_shard_query_test(' WITH first_tenant AS (SELECT event_at, value_5, tenant_id FROM raw_events_1) INSERT INTO aggregated_events (rollup_hour, sum_value_5, tenant_id) SELECT event_at, sum(value_5::int), tenant_id FROM raw_events_1 GROUP BY event_at, tenant_id; '); SELECT deparse_shard_query_test(' WITH first_tenant AS (SELECT event_at, value_5, tenant_id FROM raw_events_1) INSERT INTO aggregated_events (sum_value_5, tenant_id) SELECT sum(value_5::int), tenant_id FROM raw_events_1 GROUP BY event_at, tenant_id; '); SELECT deparse_shard_query_test(' INSERT INTO aggregated_events (sum_value_1, sum_value_5, tenant_id) WITH RECURSIVE hierarchy as ( SELECT value_1, 1 AS LEVEL, tenant_id FROM raw_events_1 WHERE tenant_id = 1 UNION SELECT re.value_2, (h.level+1), re.tenant_id FROM hierarchy h JOIN raw_events_1 re ON (h.tenant_id = re.tenant_id AND h.value_1 = re.value_6)) SELECT * FROM hierarchy WHERE LEVEL <= 2; '); SELECT deparse_shard_query_test(' INSERT INTO aggregated_events (sum_value_1) SELECT DISTINCT value_1 FROM raw_events_1; '); -- many filters suffled SELECT deparse_shard_query_test(E' INSERT INTO aggregated_events (sum_value_5, sum_value_1, tenant_id) SELECT value_3, value_2, tenant_id FROM raw_events_1 WHERE (value_5 like \'%s\' or value_5 like \'%a\') and (tenant_id = 1) and (value_6 < 3000 or value_3 > 8000); '); SELECT deparse_shard_query_test(E' INSERT INTO aggregated_events (sum_value_5, tenant_id) SELECT rank() OVER (PARTITION BY tenant_id ORDER BY value_6), tenant_id FROM raw_events_1 WHERE event_at = now(); '); SELECT deparse_shard_query_test(E' INSERT INTO aggregated_events (sum_value_5, tenant_id, sum_value_4) SELECT random(), int4eq(1, max(value_1))::int, value_6 FROM raw_events_1 WHERE event_at = now() GROUP BY event_at, value_7, value_6; '); SELECT deparse_shard_query_test(' INSERT INTO aggregated_events (sum_value_1, tenant_id) SELECT count(DISTINCT CASE WHEN value_1 > 100 THEN tenant_id ELSE value_6 END) as c, max(tenant_id) FROM raw_events_1; '); SELECT deparse_shard_query_test(' INSERT INTO raw_events_1(value_7, value_1, tenant_id) SELECT value_7, value_1, tenant_id FROM (SELECT tenant_id, value_2 as value_7, value_1 FROM raw_events_2 ) as foo '); SELECT deparse_shard_query_test(E' INSERT INTO aggregated_events(sum_value_1, tenant_id, sum_value_5) SELECT sum(value_1), tenant_id, sum(value_5::bigint) FROM (SELECT raw_events_1.event_at, raw_events_2.tenant_id, raw_events_2.value_5, raw_events_1.value_1 FROM raw_events_2, raw_events_1 WHERE raw_events_1.tenant_id = raw_events_2.tenant_id ) as foo GROUP BY tenant_id, date_trunc(\'hour\', event_at) '); SELECT deparse_shard_query_test(E' INSERT INTO raw_events_2(tenant_id, value_1, value_2, value_3, value_4) SELECT tenant_id, value_1, value_2, value_3, value_4 FROM (SELECT value_2, value_4, tenant_id, value_1, value_3 FROM raw_events_1 ) as foo '); SELECT deparse_shard_query_test(E' INSERT INTO raw_events_2(tenant_id, value_1, value_4, value_2, value_3) SELECT * FROM (SELECT value_2, value_4, tenant_id, value_1, value_3 FROM raw_events_1 ) as foo '); -- use a column multiple times SELECT deparse_shard_query_test(' INSERT INTO raw_events_1(tenant_id, value_7, value_4) SELECT tenant_id, value_7, value_7 FROM raw_events_1 ORDER BY value_2, value_1; '); -- test dropped table as well ALTER TABLE raw_events_1 DROP COLUMN value_5; SELECT deparse_shard_query_test(' INSERT INTO raw_events_1(tenant_id, value_7, value_4) SELECT tenant_id, value_7, value_4 FROM raw_events_1; '); citus-7.0.3/src/test/regress/sql/multi_distributed_transaction_id.sql000066400000000000000000000061311317107136600262330ustar00rootroot00000000000000-- -- MULTI_DISTRIBUTED_TRANSACTION_ID -- -- Unit tests for distributed transaction id functionality -- -- get the current transaction id, which should be uninitialized -- note that we skip printing the databaseId, which might change -- per run -- set timezone to a specific value to prevent -- different values on different servers SET TIME ZONE 'PST8PDT'; -- should return uninitialized values if not in a transaction SELECT initiator_node_identifier, transaction_number, transaction_stamp FROM get_current_transaction_id(); BEGIN; -- we should still see the uninitialized values SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); -- now assign a value SELECT assign_distributed_transaction_id(50, 50, '2016-01-01 00:00:00+0'); -- see the assigned value SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); -- a backend cannot be assigned another tx id if already assigned SELECT assign_distributed_transaction_id(51, 51, '2017-01-01 00:00:00+0'); ROLLBACK; -- since the transaction finished, we should see the uninitialized values SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); -- also see that ROLLBACK (i.e., failures in the transaction) clears the shared memory BEGIN; -- we should still see the uninitialized values SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); -- now assign a value SELECT assign_distributed_transaction_id(52, 52, '2015-01-01 00:00:00+0'); SELECT 5 / 0; COMMIT; -- since the transaction errored, we should see the uninitialized values again SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); -- we should also see that a new connection means an uninitialized transaction id BEGIN; SELECT assign_distributed_transaction_id(52, 52, '2015-01-01 00:00:00+0'); SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); \c - - - :master_port SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); -- now show that PREPARE resets the distributed transaction id BEGIN; SELECT assign_distributed_transaction_id(120, 120, '2015-01-01 00:00:00+0'); SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); PREPARE TRANSACTION 'dist_xact_id_test'; -- after the prepare we should see that transaction id is cleared SELECT initiator_node_identifier, transaction_number, transaction_stamp, (process_id = pg_backend_pid()) FROM get_current_transaction_id(); -- cleanup the transaction ROLLBACK PREPARED 'dist_xact_id_test'; -- set back to the original zone SET TIME ZONE DEFAULT; citus-7.0.3/src/test/regress/sql/multi_distribution_metadata.sql000066400000000000000000000221451317107136600252120ustar00rootroot00000000000000-- =================================================================== -- create test functions -- =================================================================== ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 540000; CREATE FUNCTION load_shard_id_array(regclass) RETURNS bigint[] AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION load_shard_interval_array(bigint, anyelement) RETURNS anyarray AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION load_shard_placement_array(bigint, bool) RETURNS text[] AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION partition_column_id(regclass) RETURNS smallint AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION partition_type(regclass) RETURNS "char" AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION is_distributed_table(regclass) RETURNS boolean AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION column_name_to_column_id(regclass, cstring) RETURNS smallint AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION create_monolithic_shard_row(regclass) RETURNS bigint AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION acquire_shared_shard_lock(bigint) RETURNS void AS 'citus' LANGUAGE C STRICT; -- =================================================================== -- test distribution metadata functionality -- =================================================================== -- create hash distributed table CREATE TABLE events_hash ( id bigint, name text ); SELECT master_create_distributed_table('events_hash', 'name', 'hash'); -- create worker shards SELECT master_create_worker_shards('events_hash', 4, 2); -- set shardstate of one replication from each shard to 0 (invalid value) UPDATE pg_dist_placement SET shardstate = 0 WHERE shardid BETWEEN 540000 AND 540003 AND groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port); -- should see above shard identifiers SELECT load_shard_id_array('events_hash'); -- should see array with first shard range SELECT load_shard_interval_array(540000, 0); -- should even work for range-partitioned shards -- create range distributed table CREATE TABLE events_range ( id bigint, name text ); SELECT master_create_distributed_table('events_range', 'name', 'range'); -- create empty shard SELECT master_create_empty_shard('events_range'); UPDATE pg_dist_shard SET shardminvalue = 'Aardvark', shardmaxvalue = 'Zebra' WHERE shardid = 540004; SELECT load_shard_interval_array(540004, ''::text); -- should see error for non-existent shard SELECT load_shard_interval_array(540005, 0); -- should see two placements SELECT load_shard_placement_array(540001, false); -- only one of which is finalized SELECT load_shard_placement_array(540001, true); -- should see error for non-existent shard SELECT load_shard_placement_array(540001, false); -- should see column id of 'name' SELECT partition_column_id('events_hash'); -- should see hash partition type and fail for non-distributed tables SELECT partition_type('events_hash'); SELECT partition_type('pg_type'); -- should see true for events_hash, false for others SELECT is_distributed_table('events_hash'); SELECT is_distributed_table('pg_type'); SELECT is_distributed_table('pg_dist_shard'); -- test underlying column name-id translation SELECT column_name_to_column_id('events_hash', 'name'); SELECT column_name_to_column_id('events_hash', 'ctid'); SELECT column_name_to_column_id('events_hash', 'non_existent'); -- drop shard rows (must drop placements first) DELETE FROM pg_dist_placement WHERE shardid BETWEEN 540000 AND 540004; DELETE FROM pg_dist_shard WHERE logicalrelid = 'events_hash'::regclass; DELETE FROM pg_dist_shard WHERE logicalrelid = 'events_range'::regclass; -- verify that an eager load shows them missing SELECT load_shard_id_array('events_hash'); -- create second table to distribute CREATE TABLE customers ( id bigint, name text ); -- now we'll distribute using function calls but verify metadata manually... -- partition on id and manually inspect partition row INSERT INTO pg_dist_partition (logicalrelid, partmethod, partkey) VALUES ('customers'::regclass, 'h', column_name_to_column('customers'::regclass, 'id')); SELECT partmethod, column_to_column_name(logicalrelid, partkey) FROM pg_dist_partition WHERE logicalrelid = 'customers'::regclass; -- make one huge shard and manually inspect shard row SELECT create_monolithic_shard_row('customers') AS new_shard_id \gset SELECT shardstorage, shardminvalue, shardmaxvalue FROM pg_dist_shard WHERE shardid = :new_shard_id; -- now we'll even test our lock methods... -- use transaction to bound how long we hold the lock BEGIN; -- pick up a shard lock and look for it in pg_locks SELECT acquire_shared_shard_lock(5); SELECT objid, mode FROM pg_locks WHERE locktype = 'advisory' AND objid = 5; -- commit should drop the lock COMMIT; -- lock should be gone now SELECT COUNT(*) FROM pg_locks WHERE locktype = 'advisory' AND objid = 5; -- test get_shard_id_for_distribution_column SET citus.shard_count TO 4; CREATE TABLE get_shardid_test_table1(column1 int, column2 int); SELECT create_distributed_table('get_shardid_test_table1', 'column1'); \COPY get_shardid_test_table1 FROM STDIN with delimiter '|'; 1|1 2|2 3|3 \. SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 1); SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 2); SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 3); -- verify result of the get_shard_id_for_distribution_column \c - - - :worker_1_port SELECT * FROM get_shardid_test_table1_540006; SELECT * FROM get_shardid_test_table1_540009; SELECT * FROM get_shardid_test_table1_540007; \c - - - :master_port -- test non-existing value SELECT get_shard_id_for_distribution_column('get_shardid_test_table1', 4); -- test array type SET citus.shard_count TO 4; CREATE TABLE get_shardid_test_table2(column1 text[], column2 int); SELECT create_distributed_table('get_shardid_test_table2', 'column1'); \COPY get_shardid_test_table2 FROM STDIN with delimiter '|'; {a, b, c}|1 {d, e, f}|2 \. SELECT get_shard_id_for_distribution_column('get_shardid_test_table2', '{a, b, c}'); SELECT get_shard_id_for_distribution_column('get_shardid_test_table2', '{d, e, f}'); -- verify result of the get_shard_id_for_distribution_column \c - - - :worker_1_port SELECT * FROM get_shardid_test_table2_540013; SELECT * FROM get_shardid_test_table2_540011; \c - - - :master_port -- test mismatching data type SELECT get_shard_id_for_distribution_column('get_shardid_test_table2', 'a'); -- test NULL distribution column value for hash distributed table SELECT get_shard_id_for_distribution_column('get_shardid_test_table2'); SELECT get_shard_id_for_distribution_column('get_shardid_test_table2', NULL); -- test non-distributed table CREATE TABLE get_shardid_test_table3(column1 int, column2 int); SELECT get_shard_id_for_distribution_column('get_shardid_test_table3', 1); -- test append distributed table SELECT create_distributed_table('get_shardid_test_table3', 'column1', 'append'); SELECT get_shard_id_for_distribution_column('get_shardid_test_table3', 1); -- test reference table; CREATE TABLE get_shardid_test_table4(column1 int, column2 int); SELECT create_reference_table('get_shardid_test_table4'); -- test NULL distribution column value for reference table SELECT get_shard_id_for_distribution_column('get_shardid_test_table4'); SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', NULL); -- test different data types for reference table SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', 1); SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', 'a'); SELECT get_shard_id_for_distribution_column('get_shardid_test_table4', '{a, b, c}'); -- test range distributed table CREATE TABLE get_shardid_test_table5(column1 int, column2 int); SELECT create_distributed_table('get_shardid_test_table5', 'column1', 'range'); -- create worker shards SELECT master_create_empty_shard('get_shardid_test_table5'); SELECT master_create_empty_shard('get_shardid_test_table5'); SELECT master_create_empty_shard('get_shardid_test_table5'); SELECT master_create_empty_shard('get_shardid_test_table5'); -- now the comparison is done via the partition column type, which is text UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 1000 WHERE shardid = 540015; UPDATE pg_dist_shard SET shardminvalue = 1001, shardmaxvalue = 2000 WHERE shardid = 540016; UPDATE pg_dist_shard SET shardminvalue = 2001, shardmaxvalue = 3000 WHERE shardid = 540017; UPDATE pg_dist_shard SET shardminvalue = 3001, shardmaxvalue = 4000 WHERE shardid = 540018; SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 5); SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 1111); SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 2689); SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 3248); -- test non-existing value for range distributed tables SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', 4001); SELECT get_shard_id_for_distribution_column('get_shardid_test_table5', -999); -- clear unnecessary tables; DROP TABLE get_shardid_test_table1, get_shardid_test_table2, get_shardid_test_table3, get_shardid_test_table4, get_shardid_test_table5; citus-7.0.3/src/test/regress/sql/multi_drop_extension.sql000066400000000000000000000022631317107136600236720ustar00rootroot00000000000000-- -- MULTI_DROP_EXTENSION -- -- Tests around dropping and recreating the extension ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 550000; CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); -- this emits a NOTICE message for every table we are dropping with our CASCADE. It would -- be nice to check that we get those NOTICE messages, but it's nicer to not have to -- change this test every time the previous tests change the set of tables they leave -- around. SET client_min_messages TO 'WARNING'; DROP EXTENSION citus CASCADE; RESET client_min_messages; CREATE EXTENSION citus; -- re-add the nodes to the cluster SELECT 1 FROM master_add_node('localhost', :worker_1_port); SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- verify that a table can be created after the extension has been dropped and recreated CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); SELECT 1 FROM master_create_empty_shard('testtableddl'); SELECT * FROM testtableddl; DROP TABLE testtableddl; citus-7.0.3/src/test/regress/sql/multi_dropped_column_aliases.sql000066400000000000000000000020661317107136600253460ustar00rootroot00000000000000 -- Tests that check that our query functionality behaves as expected when the -- table schema is modified via ALTER statements. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 620000; SELECT count(*) FROM customer; SELECT * FROM customer LIMIT 2; ALTER TABLE customer ADD COLUMN new_column1 INTEGER; ALTER TABLE customer ADD COLUMN new_column2 INTEGER; SELECT count(*) FROM customer; SELECT * FROM customer LIMIT 2; ALTER TABLE customer DROP COLUMN new_column1; ALTER TABLE customer DROP COLUMN new_column2; SELECT count(*) FROM customer; SELECT * FROM customer LIMIT 2; -- Verify joins work with dropped columns. SELECT count(*) FROM customer, orders WHERE c_custkey = o_custkey; -- Test joinExpr aliases by performing an outer-join. This code path is -- currently not exercised, but we are adding this test to catch this bug when -- we start supporting outer joins. SELECT c_custkey FROM (customer LEFT OUTER JOIN orders ON (c_custkey = o_custkey)) AS test(c_custkey, c_nationkey) INNER JOIN lineitem ON (test.c_custkey = l_orderkey) LIMIT 10; citus-7.0.3/src/test/regress/sql/multi_expire_table_cache.sql000066400000000000000000000034251317107136600244210ustar00rootroot00000000000000--- --- MULTI_EXPIRE_TABLE_CACHE --- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; -- create test table CREATE TABLE large_table(a int, b int); SELECT master_create_distributed_table('large_table', 'a', 'hash'); SELECT master_create_worker_shards('large_table', 8, 1); CREATE TABLE broadcast_table(a int, b int); SELECT master_create_distributed_table('broadcast_table', 'a', 'hash'); SELECT master_create_worker_shards('broadcast_table', 2, 1); -- verify only small tables are supported SELECT master_expire_table_cache('large_table'); SELECT master_expire_table_cache('broadcast_table'); -- run a join so that broadcast tables are cached on other workers SELECT * from large_table l, broadcast_table b where l.a = b.b; -- insert some data INSERT INTO large_table VALUES(1, 1); INSERT INTO large_table VALUES(1, 2); INSERT INTO large_table VALUES(2, 1); INSERT INTO large_table VALUES(2, 2); INSERT INTO large_table VALUES(3, 1); INSERT INTO large_table VALUES(3, 2); INSERT INTO broadcast_table VALUES(1, 1); -- verify returned results are wrong SELECT * from large_table l, broadcast_table b WHERE l.b = b.b ORDER BY l.a, l.b; -- expire cache and re-run, results should be correct this time SELECT master_expire_table_cache('broadcast_table'); SELECT * from large_table l, broadcast_table b WHERE l.b = b.b ORDER BY l.a, l.b; -- insert some more data into broadcast table INSERT INTO broadcast_table VALUES(2, 2); -- run the same query, get wrong results SELECT * from large_table l, broadcast_table b WHERE l.b = b.b ORDER BY l.a, l.b; -- expire cache and re-run, results should be correct this time SELECT master_expire_table_cache('broadcast_table'); SELECT * from large_table l, broadcast_table b WHERE l.b = b.b ORDER BY l.a, l.b; DROP TABLE large_table, broadcast_table; citus-7.0.3/src/test/regress/sql/multi_explain.sql000066400000000000000000000320131317107136600222660ustar00rootroot00000000000000-- -- MULTI_EXPLAIN -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 570000; -- print whether we're using version > 9 to make version-specific tests clear SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine; \a\t SET citus.task_executor_type TO 'real-time'; SET citus.explain_distributed_queries TO on; -- Function that parses explain output as JSON CREATE FUNCTION explain_json(query text) RETURNS jsonb AS $BODY$ DECLARE result jsonb; BEGIN EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result; RETURN result; END; $BODY$ LANGUAGE plpgsql; -- Function that parses explain output as XML CREATE FUNCTION explain_xml(query text) RETURNS xml AS $BODY$ DECLARE result xml; BEGIN EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result; RETURN result; END; $BODY$ LANGUAGE plpgsql; -- VACUMM related tables to ensure test outputs are stable VACUUM ANALYZE lineitem; VACUUM ANALYZE orders; -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -- Test JSON format EXPLAIN (COSTS FALSE, FORMAT JSON) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -- Validate JSON format SELECT true AS valid FROM explain_json($$ SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$); -- Test XML format EXPLAIN (COSTS FALSE, FORMAT XML) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -- Validate XML format SELECT true AS valid FROM explain_xml($$ SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$); -- Test YAML format EXPLAIN (COSTS FALSE, FORMAT YAML) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -- Test verbose EXPLAIN (COSTS FALSE, VERBOSE TRUE) SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem; -- Test join EXPLAIN (COSTS FALSE) SELECT * FROM lineitem JOIN orders ON l_orderkey = o_orderkey AND l_quantity < 5.0 ORDER BY l_quantity LIMIT 10; -- Test insert EXPLAIN (COSTS FALSE) INSERT INTO lineitem VALUES (1,0), (2, 0), (3, 0), (4, 0); -- Test update EXPLAIN (COSTS FALSE) UPDATE lineitem SET l_suppkey = 12 WHERE l_orderkey = 1 AND l_partkey = 0; -- Test delete EXPLAIN (COSTS FALSE) DELETE FROM lineitem WHERE l_orderkey = 1 AND l_partkey = 0; -- Test zero-shard update EXPLAIN (COSTS FALSE) UPDATE lineitem SET l_suppkey = 12 WHERE l_orderkey = 1 AND l_orderkey = 0; -- Test zero-shard delete EXPLAIN (COSTS FALSE) DELETE FROM lineitem WHERE l_orderkey = 1 AND l_orderkey = 0; -- Test single-shard SELECT EXPLAIN (COSTS FALSE) SELECT l_quantity FROM lineitem WHERE l_orderkey = 5; SELECT true AS valid FROM explain_xml($$ SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$); SELECT true AS valid FROM explain_json($$ SELECT l_quantity FROM lineitem WHERE l_orderkey = 5$$); -- Test CREATE TABLE ... AS EXPLAIN (COSTS FALSE) CREATE TABLE explain_result AS SELECT * FROM lineitem; -- Test having EXPLAIN (COSTS FALSE, VERBOSE TRUE) SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem HAVING sum(l_quantity) > 100; -- Test having without aggregate EXPLAIN (COSTS FALSE, VERBOSE TRUE) SELECT l_quantity FROM lineitem GROUP BY l_quantity HAVING l_quantity > (100 * random()); -- Subquery pushdown tests with explain EXPLAIN (COSTS OFF) SELECT avg(array_length(events, 1)) AS event_average FROM (SELECT tenant_id, user_id, array_agg(event_type ORDER BY event_time) AS events FROM (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, event_type, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type IN ('click', 'submit', 'pay')) AS subquery GROUP BY tenant_id, user_id) AS subquery; -- Union and left join subquery pushdown EXPLAIN (COSTS OFF) SELECT avg(array_length(events, 1)) AS event_average, hasdone FROM (SELECT subquery_1.tenant_id, subquery_1.user_id, array_agg(event ORDER BY event_time) AS events, COALESCE(hasdone, 'Has not done paying') AS hasdone FROM ( (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id) as composite_id, 'action=>1'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'click') UNION (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id) as composite_id, 'action=>2'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'submit') ) AS subquery_1 LEFT JOIN (SELECT DISTINCT ON ((composite_id).tenant_id, (composite_id).user_id) composite_id, (composite_id).tenant_id, (composite_id).user_id, 'Has done paying'::TEXT AS hasdone FROM events WHERE events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'pay') AS subquery_2 ON subquery_1.composite_id = subquery_2.composite_id GROUP BY subquery_1.tenant_id, subquery_1.user_id, hasdone) AS subquery_top GROUP BY hasdone; -- Union, left join and having subquery pushdown EXPLAIN (COSTS OFF) SELECT avg(array_length(events, 1)) AS event_average, count_pay FROM ( SELECT subquery_1.tenant_id, subquery_1.user_id, array_agg(event ORDER BY event_time) AS events, COALESCE(count_pay, 0) AS count_pay FROM ( (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id), 'action=>1'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'click') UNION (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id), 'action=>2'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'submit') ) AS subquery_1 LEFT JOIN (SELECT (composite_id).tenant_id, (composite_id).user_id, composite_id, COUNT(*) AS count_pay FROM events WHERE events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'pay' GROUP BY composite_id HAVING COUNT(*) > 2) AS subquery_2 ON subquery_1.composite_id = subquery_2.composite_id GROUP BY subquery_1.tenant_id, subquery_1.user_id, count_pay) AS subquery_top WHERE array_ndims(events) > 0 GROUP BY count_pay ORDER BY count_pay; -- Lateral join subquery pushdown -- set subquery_pushdown due to limit in the query SET citus.subquery_pushdown to ON; EXPLAIN (COSTS OFF) SELECT tenant_id, user_id, user_lastseen, event_array FROM (SELECT tenant_id, user_id, max(lastseen) as user_lastseen, array_agg(event_type ORDER BY event_time) AS event_array FROM (SELECT (composite_id).tenant_id, (composite_id).user_id, composite_id, lastseen FROM users WHERE composite_id >= '(1, -9223372036854775808)'::user_composite_type AND composite_id <= '(1, 9223372036854775807)'::user_composite_type ORDER BY lastseen DESC LIMIT 10 ) AS subquery_top LEFT JOIN LATERAL (SELECT event_type, event_time FROM events WHERE (composite_id) = subquery_top.composite_id ORDER BY event_time DESC LIMIT 99) AS subquery_lateral ON true GROUP BY tenant_id, user_id ) AS shard_union ORDER BY user_lastseen DESC LIMIT 10; -- Test all tasks output SET citus.explain_all_tasks TO on; EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; SELECT true AS valid FROM explain_xml($$ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$); SELECT true AS valid FROM explain_json($$ SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030$$); -- Test track tracker SET citus.task_executor_type TO 'task-tracker'; SET citus.explain_all_tasks TO off; EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; -- Test re-partition join SET citus.large_table_shard_count TO 1; EXPLAIN (COSTS FALSE) SELECT count(*) FROM lineitem, orders, customer, supplier_single_shard WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; EXPLAIN (COSTS FALSE, FORMAT JSON) SELECT count(*) FROM lineitem, orders, customer, supplier_single_shard WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; SELECT true AS valid FROM explain_json($$ SELECT count(*) FROM lineitem, orders, customer, supplier_single_shard WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey$$); EXPLAIN (COSTS FALSE, FORMAT XML) SELECT count(*) FROM lineitem, orders, customer, supplier_single_shard WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; SELECT true AS valid FROM explain_xml($$ SELECT count(*) FROM lineitem, orders, customer, supplier WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey$$); -- make sure that EXPLAIN works without -- problems for queries that inlvolves only -- reference tables SELECT true AS valid FROM explain_xml($$ SELECT count(*) FROM nation WHERE n_name = 'CHINA'$$); SELECT true AS valid FROM explain_xml($$ SELECT count(*) FROM nation, supplier WHERE nation.n_nationkey = supplier.s_nationkey$$); EXPLAIN (COSTS FALSE, FORMAT YAML) SELECT count(*) FROM lineitem, orders, customer, supplier_single_shard WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; -- test parallel aggregates SET parallel_setup_cost=0; SET parallel_tuple_cost=0; SET min_parallel_relation_size=0; SET min_parallel_table_scan_size=0; SET max_parallel_workers_per_gather=4; -- ensure local plans display correctly CREATE TABLE lineitem_clone (LIKE lineitem); EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem_clone; -- ensure distributed plans don't break EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem; -- ensure EXPLAIN EXECUTE doesn't crash PREPARE task_tracker_query AS SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; EXPLAIN (COSTS FALSE) EXECUTE task_tracker_query; SET citus.task_executor_type TO 'real-time'; PREPARE router_executor_query AS SELECT l_quantity FROM lineitem WHERE l_orderkey = 5; EXPLAIN EXECUTE router_executor_query; PREPARE real_time_executor_query AS SELECT avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; EXPLAIN (COSTS FALSE) EXECUTE real_time_executor_query; -- EXPLAIN EXECUTE of parametrized prepared statements is broken, but -- at least make sure to fail without crashing PREPARE router_executor_query_param(int) AS SELECT l_quantity FROM lineitem WHERE l_orderkey = $1; EXPLAIN EXECUTE router_executor_query_param(5); -- test explain in a transaction with alter table to test we use right connections BEGIN; CREATE TABLE explain_table(id int); SELECT create_distributed_table('explain_table', 'id'); ALTER TABLE explain_table ADD COLUMN value int; ROLLBACK; -- test explain with local INSERT ... SELECT EXPLAIN (COSTS OFF) INSERT INTO lineitem_hash_part SELECT o_orderkey FROM orders_hash_part LIMIT 3; SELECT true AS valid FROM explain_json($$ INSERT INTO lineitem_hash_part (l_orderkey) SELECT o_orderkey FROM orders_hash_part LIMIT 3; $$); EXPLAIN (COSTS OFF) INSERT INTO lineitem_hash_part (l_orderkey, l_quantity) SELECT o_orderkey, 5 FROM orders_hash_part LIMIT 3; EXPLAIN (COSTS OFF) INSERT INTO lineitem_hash_part (l_orderkey) SELECT s FROM generate_series(1,5) s; EXPLAIN (COSTS OFF) WITH cte1 AS (SELECT s FROM generate_series(1,10) s) INSERT INTO lineitem_hash_part WITH cte1 AS (SELECT * FROM cte1 LIMIT 5) SELECT s FROM cte1; EXPLAIN (COSTS OFF) INSERT INTO lineitem_hash_part ( SELECT s FROM generate_series(1,5) s) UNION ( SELECT s FROM generate_series(5,10) s); citus-7.0.3/src/test/regress/sql/multi_extension.sql000066400000000000000000000242121317107136600226440ustar00rootroot00000000000000-- -- MULTI_EXTENSION -- -- Tests around extension creation / upgrades -- -- It'd be nice to script generation of this file, but alas, that's -- not done yet. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 580000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 580000; CREATE SCHEMA test; CREATE OR REPLACE FUNCTION test.maintenance_worker(p_dbname text DEFAULT current_database()) RETURNS pg_stat_activity LANGUAGE plpgsql AS $$ DECLARE activity record; BEGIN LOOP SELECT * INTO activity FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon' AND datname = p_dbname; IF activity.pid IS NOT NULL THEN RETURN activity; ELSE PERFORM pg_sleep(0.1); PERFORM pg_stat_clear_snapshot(); END IF ; END LOOP; END; $$; -- check maintenance daemon is started SELECT datname, datname = current_database(), usename = (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') FROM test.maintenance_worker(); -- ensure no objects were created outside pg_catalog SELECT COUNT(*) FROM pg_depend AS pgd, pg_extension AS pge, LATERAL pg_identify_object(pgd.classid, pgd.objid, pgd.objsubid) AS pgio WHERE pgd.refclassid = 'pg_extension'::regclass AND pgd.refobjid = pge.oid AND pge.extname = 'citus' AND pgio.schema NOT IN ('pg_catalog', 'citus', 'test'); -- DROP EXTENSION pre-created by the regression suite DROP EXTENSION citus; \c SET citus.enable_version_checks TO 'false'; -- Create extension in oldest version CREATE EXTENSION citus VERSION '5.0'; ALTER EXTENSION citus UPDATE TO '5.0-1'; ALTER EXTENSION citus UPDATE TO '5.0-2'; ALTER EXTENSION citus UPDATE TO '5.1-1'; ALTER EXTENSION citus UPDATE TO '5.1-2'; ALTER EXTENSION citus UPDATE TO '5.1-3'; ALTER EXTENSION citus UPDATE TO '5.1-4'; ALTER EXTENSION citus UPDATE TO '5.1-5'; ALTER EXTENSION citus UPDATE TO '5.1-6'; ALTER EXTENSION citus UPDATE TO '5.1-7'; ALTER EXTENSION citus UPDATE TO '5.1-8'; ALTER EXTENSION citus UPDATE TO '5.2-1'; ALTER EXTENSION citus UPDATE TO '5.2-2'; ALTER EXTENSION citus UPDATE TO '5.2-3'; ALTER EXTENSION citus UPDATE TO '5.2-4'; ALTER EXTENSION citus UPDATE TO '6.0-1'; ALTER EXTENSION citus UPDATE TO '6.0-2'; ALTER EXTENSION citus UPDATE TO '6.0-3'; ALTER EXTENSION citus UPDATE TO '6.0-4'; ALTER EXTENSION citus UPDATE TO '6.0-5'; ALTER EXTENSION citus UPDATE TO '6.0-6'; ALTER EXTENSION citus UPDATE TO '6.0-7'; ALTER EXTENSION citus UPDATE TO '6.0-8'; ALTER EXTENSION citus UPDATE TO '6.0-9'; ALTER EXTENSION citus UPDATE TO '6.0-10'; ALTER EXTENSION citus UPDATE TO '6.0-11'; ALTER EXTENSION citus UPDATE TO '6.0-12'; ALTER EXTENSION citus UPDATE TO '6.0-13'; ALTER EXTENSION citus UPDATE TO '6.0-14'; ALTER EXTENSION citus UPDATE TO '6.0-15'; ALTER EXTENSION citus UPDATE TO '6.0-16'; ALTER EXTENSION citus UPDATE TO '6.0-17'; ALTER EXTENSION citus UPDATE TO '6.0-18'; ALTER EXTENSION citus UPDATE TO '6.1-1'; ALTER EXTENSION citus UPDATE TO '6.1-2'; ALTER EXTENSION citus UPDATE TO '6.1-3'; ALTER EXTENSION citus UPDATE TO '6.1-4'; ALTER EXTENSION citus UPDATE TO '6.1-5'; ALTER EXTENSION citus UPDATE TO '6.1-6'; ALTER EXTENSION citus UPDATE TO '6.1-7'; ALTER EXTENSION citus UPDATE TO '6.1-8'; ALTER EXTENSION citus UPDATE TO '6.1-9'; ALTER EXTENSION citus UPDATE TO '6.1-10'; ALTER EXTENSION citus UPDATE TO '6.1-11'; ALTER EXTENSION citus UPDATE TO '6.1-12'; ALTER EXTENSION citus UPDATE TO '6.1-13'; ALTER EXTENSION citus UPDATE TO '6.1-14'; ALTER EXTENSION citus UPDATE TO '6.1-15'; ALTER EXTENSION citus UPDATE TO '6.1-16'; ALTER EXTENSION citus UPDATE TO '6.1-17'; ALTER EXTENSION citus UPDATE TO '6.2-1'; ALTER EXTENSION citus UPDATE TO '6.2-2'; ALTER EXTENSION citus UPDATE TO '6.2-3'; ALTER EXTENSION citus UPDATE TO '6.2-4'; ALTER EXTENSION citus UPDATE TO '7.0-1'; ALTER EXTENSION citus UPDATE TO '7.0-2'; ALTER EXTENSION citus UPDATE TO '7.0-3'; ALTER EXTENSION citus UPDATE TO '7.0-4'; ALTER EXTENSION citus UPDATE TO '7.0-5'; ALTER EXTENSION citus UPDATE TO '7.0-6'; ALTER EXTENSION citus UPDATE TO '7.0-7'; ALTER EXTENSION citus UPDATE TO '7.0-8'; ALTER EXTENSION citus UPDATE TO '7.0-9'; ALTER EXTENSION citus UPDATE TO '7.0-10'; ALTER EXTENSION citus UPDATE TO '7.0-11'; ALTER EXTENSION citus UPDATE TO '7.0-12'; ALTER EXTENSION citus UPDATE TO '7.0-13'; ALTER EXTENSION citus UPDATE TO '7.0-14'; ALTER EXTENSION citus UPDATE TO '7.0-15'; -- show running version SHOW citus.version; -- ensure no objects were created outside pg_catalog SELECT COUNT(*) FROM pg_depend AS pgd, pg_extension AS pge, LATERAL pg_identify_object(pgd.classid, pgd.objid, pgd.objsubid) AS pgio WHERE pgd.refclassid = 'pg_extension'::regclass AND pgd.refobjid = pge.oid AND pge.extname = 'citus' AND pgio.schema NOT IN ('pg_catalog', 'citus', 'test'); -- see incompatible version errors out RESET citus.enable_version_checks; DROP EXTENSION citus; CREATE EXTENSION citus VERSION '5.0'; -- Test non-distributed queries work even in version mismatch SET citus.enable_version_checks TO 'false'; CREATE EXTENSION citus VERSION '6.2-1'; SET citus.enable_version_checks TO 'true'; -- Test CREATE TABLE CREATE TABLE version_mismatch_table(column1 int); -- Test COPY \copy version_mismatch_table FROM STDIN; 0 1 2 3 4 \. -- Test INSERT INSERT INTO version_mismatch_table(column1) VALUES(5); -- Test SELECT SELECT * FROM version_mismatch_table ORDER BY column1; -- Test SELECT from pg_catalog SELECT d.datname as "Name", pg_catalog.pg_get_userbyid(d.datdba) as "Owner", pg_catalog.array_to_string(d.datacl, E'\n') AS "Access privileges" FROM pg_catalog.pg_database d ORDER BY 1; -- We should not distribute table in version mistmatch SELECT create_distributed_table('version_mismatch_table', 'column1'); -- This function will cause fail in next ALTER EXTENSION CREATE OR REPLACE FUNCTION pg_catalog.citus_table_size(table_name regclass) RETURNS bigint LANGUAGE plpgsql AS $function$ BEGIN END; $function$; SET citus.enable_version_checks TO 'false'; -- This will fail because of previous function declaration ALTER EXTENSION citus UPDATE TO '6.2-2'; -- We can DROP problematic function and continue ALTER EXTENSION even when version checks are on SET citus.enable_version_checks TO 'true'; DROP FUNCTION citus_table_size(regclass); SET citus.enable_version_checks TO 'false'; ALTER EXTENSION citus UPDATE TO '6.2-2'; -- Test updating to the latest version without specifying the version number ALTER EXTENSION citus UPDATE; -- re-create in newest version DROP EXTENSION citus; \c CREATE EXTENSION citus; -- test cache invalidation in workers \c - - - :worker_1_port DROP EXTENSION citus; SET citus.enable_version_checks TO 'false'; CREATE EXTENSION citus VERSION '5.2-4'; SET citus.enable_version_checks TO 'true'; -- during ALTER EXTENSION, we should invalidate the cache ALTER EXTENSION citus UPDATE; -- if cache is invalidated succesfull, this \d should work without any problem \d \c - - - :master_port -- check that maintenance daemon gets (re-)started for the right user DROP EXTENSION citus; CREATE USER testuser SUPERUSER; SET ROLE testuser; CREATE EXTENSION citus; SELECT datname, datname = current_database(), usename = (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') FROM test.maintenance_worker(); -- and recreate as the right owner RESET ROLE; DROP EXTENSION citus; CREATE EXTENSION citus; -- Check that maintenance daemon can also be started in another database CREATE DATABASE another; \c another CREATE EXTENSION citus; CREATE SCHEMA test; CREATE OR REPLACE FUNCTION test.maintenance_worker(p_dbname text DEFAULT current_database()) RETURNS pg_stat_activity LANGUAGE plpgsql AS $$ DECLARE activity record; BEGIN LOOP SELECT * INTO activity FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon' AND datname = p_dbname; IF activity.pid IS NOT NULL THEN RETURN activity; ELSE PERFORM pg_sleep(0.1); PERFORM pg_stat_clear_snapshot(); END IF ; END LOOP; END; $$; -- see that the deamon started SELECT datname, datname = current_database(), usename = (SELECT extowner::regrole::text FROM pg_extension WHERE extname = 'citus') FROM test.maintenance_worker(); -- Test that database with active worker can be dropped. \c regression CREATE SCHEMA test_deamon; -- we create a similar function on the regression database -- note that this function checks for the existence of the daemon -- when not found, returns true else tries for 5 times and -- returns false CREATE OR REPLACE FUNCTION test_deamon.maintenance_deamon_died(p_dbname text) RETURNS boolean LANGUAGE plpgsql AS $$ DECLARE activity record; BEGIN PERFORM pg_stat_clear_snapshot(); LOOP SELECT * INTO activity FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon' AND datname = p_dbname; IF activity.pid IS NULL THEN RETURN true; ELSE RETURN false; END IF; END LOOP; END; $$; -- drop the database and see that the deamon is dead DROP DATABASE another; SELECT * FROM test_deamon.maintenance_deamon_died('another'); -- we don't need the schema and the function anymore DROP SCHEMA test_deamon CASCADE; -- verify citus does not crash while creating a table when run against an older worker -- create_distributed_table piggybacks multiple commands into single one, if one worker -- did not have the required UDF it should fail instead of crash. -- create a test database, configure citus with single node CREATE DATABASE another; \c - - - :worker_1_port CREATE DATABASE another; \c - - - :master_port \c another CREATE EXTENSION citus; SELECT FROM master_add_node('localhost', :worker_1_port); \c - - - :worker_1_port CREATE EXTENSION citus; ALTER FUNCTION assign_distributed_transaction_id(initiator_node_identifier integer, transaction_number bigint, transaction_stamp timestamp with time zone) RENAME TO dummy_assign_function; \c - - - :master_port SET citus.shard_replication_factor to 1; -- create_distributed_table command should fail CREATE TABLE t1(a int, b int); SELECT create_distributed_table('t1', 'a'); \c regression \c - - - :worker_1_port DROP DATABASE another; \c - - - :master_port DROP DATABASE another; citus-7.0.3/src/test/regress/sql/multi_follower_configure_followers.sql000066400000000000000000000012131317107136600266120ustar00rootroot00000000000000-- prepare for future tests by configuring all the follower nodes \c - - - :follower_master_port ALTER SYSTEM SET citus.use_secondary_nodes TO 'always'; ALTER SYSTEM SET citus.cluster_name TO 'second-cluster'; SELECT pg_reload_conf(); -- also configure the workers, they'll run queries when MX is enabled \c - - - :follower_worker_1_port ALTER SYSTEM SET citus.use_secondary_nodes TO 'always'; ALTER SYSTEM SET citus.cluster_name TO 'second-cluster'; SELECT pg_reload_conf(); \c - - - :follower_worker_2_port ALTER SYSTEM SET citus.use_secondary_nodes TO 'always'; ALTER SYSTEM SET citus.cluster_name TO 'second-cluster'; SELECT pg_reload_conf(); citus-7.0.3/src/test/regress/sql/multi_follower_sanity_check.sql000066400000000000000000000003761317107136600252120ustar00rootroot00000000000000-- check that the nodes are all in read-only mode and rejecting write queries \c - - - :follower_master_port CREATE TABLE tab (a int); \c - - - :follower_worker_1_port CREATE TABLE tab (a int); \c - - - :follower_worker_2_port CREATE TABLE tab (a int); citus-7.0.3/src/test/regress/sql/multi_follower_select_statements.sql000066400000000000000000000046341317107136600262750ustar00rootroot00000000000000\c - - - :master_port -- do some setup SELECT 1 FROM master_add_node('localhost', :worker_1_port); SELECT 1 FROM master_add_node('localhost', :worker_2_port); CREATE TABLE the_table (a int, b int); SELECT create_distributed_table('the_table', 'a'); INSERT INTO the_table (a, b) VALUES (1, 1); INSERT INTO the_table (a, b) VALUES (1, 2); -- connect to the follower and check that a simple select query works, the follower -- is still in the default cluster and will send queries to the primary nodes \c - - - :follower_master_port SELECT * FROM the_table; -- now, connect to the follower but tell it to use secondary nodes. There are no -- secondary nodes so this should fail. -- (this is :follower_master_port but substitution doesn't work here) \c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always'" SELECT * FROM the_table; -- add the secondary nodes and try again, the SELECT statement should work this time \c - - - :master_port SELECT 1 FROM master_add_node('localhost', :follower_worker_1_port, groupid => (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_1_port), noderole => 'secondary'); SELECT 1 FROM master_add_node('localhost', :follower_worker_2_port, groupid => (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port), noderole => 'secondary'); \c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always'" -- now that we've added secondaries this should work SELECT * FROM the_table; SELECT node_name, node_port FROM master_get_active_worker_nodes() ORDER BY node_name, node_port; -- okay, now let's play with nodecluster. If we change the cluster of our follower node -- queries should stat failing again, since there are no worker nodes in the new cluster \c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'" -- there are no secondary nodes in this cluster, so this should fail! SELECT * FROM the_table; -- now move the secondary nodes into the new cluster and see that the follower, finally -- correctly configured, can run select queries involving them \c - - - :master_port UPDATE pg_dist_node SET nodecluster = 'second-cluster' WHERE noderole = 'secondary'; \c "port=9070 dbname=regression options='-c\ citus.use_secondary_nodes=always\ -c\ citus.cluster_name=second-cluster'" SELECT * FROM the_table; -- clean up after ourselves \c - - - :master_port DROP TABLE the_table; citus-7.0.3/src/test/regress/sql/multi_follower_task_tracker.sql000066400000000000000000000006341317107136600252200ustar00rootroot00000000000000\c - - - :master_port -- do some setup CREATE TABLE tab(a int, b int); SELECT create_distributed_table('tab', 'a'); INSERT INTO tab (a, b) VALUES (1, 1); INSERT INTO tab (a, b) VALUES (1, 2); \c - - - :follower_master_port SET citus.task_executor_type TO 'real-time'; SELECT * FROM tab; SET citus.task_executor_type TO 'task-tracker'; SELECT * FROM tab; -- clean up \c - - - :master_port DROP TABLE tab; citus-7.0.3/src/test/regress/sql/multi_foreign_key.sql000066400000000000000000000611341317107136600231350ustar00rootroot00000000000000-- -- MULTI_FOREIGN_KEY -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1350000; -- set shard_count to 4 for faster tests, because we create/drop lots of shards in this test. SET citus.shard_count TO 4; -- create tables CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); SELECT create_distributed_table('referenced_table', 'id', 'hash'); -- test foreign constraint creation with not supported parameters CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE SET NULL); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE SET DEFAULT); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON UPDATE SET NULL); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON UPDATE SET DEFAULT); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); DROP TABLE referencing_table; CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON UPDATE CASCADE); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); DROP TABLE referencing_table; -- test foreign constraint creation on NOT co-located tables SET citus.shard_count TO 8; CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id)); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); DROP TABLE referencing_table; SET citus.shard_count TO 4; -- test foreign constraint creation on non-partition columns CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(id) REFERENCES referenced_table(id)); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); DROP TABLE referencing_table; -- test foreign constraint creation while column list are in incorrect order CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(id, ref_id) REFERENCES referenced_table(id, test_column)); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); DROP TABLE referencing_table; -- test foreign constraint with replication factor > 1 CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id)); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); DROP TABLE referencing_table; DROP TABLE referenced_table; -- test foreign constraint with correct conditions SET citus.shard_replication_factor TO 1; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id)); SELECT create_distributed_table('referenced_table', 'id', 'hash'); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); -- test inserts -- test insert to referencing table while there is NO corresponding value in referenced table INSERT INTO referencing_table VALUES(1, 1); -- test insert to referencing while there is corresponding value in referenced table INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); -- test deletes -- test delete from referenced table while there is corresponding value in referencing table DELETE FROM referenced_table WHERE id = 1; -- test delete from referenced table while there is NO corresponding value in referencing table DELETE FROM referencing_table WHERE ref_id = 1; DELETE FROM referenced_table WHERE id = 1; -- test cascading truncate INSERT INTO referenced_table VALUES(2, 2); INSERT INTO referencing_table VALUES(2, 2); TRUNCATE referenced_table CASCADE; SELECT * FROM referencing_table; -- drop table for next tests DROP TABLE referencing_table; DROP TABLE referenced_table; -- test foreign constraint options -- test ON DELETE CASCADE CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE CASCADE); SELECT create_distributed_table('referenced_table', 'id', 'hash'); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); -- single shard cascading delete INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; SELECT * FROM referencing_table; SELECT * FROM referenced_table; -- multi shard cascading delete INSERT INTO referenced_table VALUES(2, 2); INSERT INTO referencing_table VALUES(2, 2); SELECT master_modify_multiple_shards('DELETE FROM referenced_table'); SELECT * FROM referencing_table; -- multi shard cascading delete with alter table INSERT INTO referenced_table VALUES(3, 3); INSERT INTO referencing_table VALUES(3, 3); BEGIN; ALTER TABLE referencing_table ADD COLUMN x int DEFAULT 0; SELECT master_modify_multiple_shards('DELETE FROM referenced_table'); COMMIT; DROP TABLE referencing_table; DROP TABLE referenced_table; -- test ON DELETE NO ACTION + DEFERABLE + INITIALLY DEFERRED CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE NO ACTION DEFERRABLE INITIALLY DEFERRED); SELECT create_distributed_table('referenced_table', 'id', 'hash'); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; BEGIN; DELETE FROM referenced_table WHERE id = 1; DELETE FROM referencing_table WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; SELECT * FROM referenced_table; DROP TABLE referencing_table; DROP TABLE referenced_table; -- test ON DELETE RESTRICT CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE RESTRICT); SELECT create_distributed_table('referenced_table', 'id', 'hash'); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); BEGIN; DELETE FROM referenced_table WHERE id = 1; DELETE FROM referencing_table WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; SELECT * FROM referenced_table; DROP TABLE referencing_table; DROP TABLE referenced_table; -- test ON UPDATE NO ACTION + DEFERABLE + INITIALLY DEFERRED CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED); SELECT create_distributed_table('referenced_table', 'id', 'hash'); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); UPDATE referenced_table SET test_column = 10 WHERE id = 1; BEGIN; UPDATE referenced_table SET test_column = 10 WHERE id = 1; UPDATE referencing_table SET id = 10 WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; SELECT * FROM referenced_table; DROP TABLE referencing_table; DROP TABLE referenced_table; -- test ON UPDATE RESTRICT CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) ON UPDATE RESTRICT); SELECT create_distributed_table('referenced_table', 'id', 'hash'); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); BEGIN; UPDATE referenced_table SET test_column = 20 WHERE id = 1; UPDATE referencing_table SET id = 20 WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; SELECT * FROM referenced_table; DROP TABLE referencing_table; DROP TABLE referenced_table; -- test MATCH SIMPLE CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) MATCH SIMPLE); SELECT create_distributed_table('referenced_table', 'id', 'hash'); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); INSERT INTO referencing_table VALUES(null, 2); SELECT * FROM referencing_table; DELETE FROM referencing_table WHERE ref_id = 2; DROP TABLE referencing_table; DROP TABLE referenced_table; -- test MATCH FULL CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int, FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) MATCH FULL); SELECT create_distributed_table('referenced_table', 'id', 'hash'); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); INSERT INTO referencing_table VALUES(null, 2); SELECT * FROM referencing_table; DROP TABLE referencing_table; DROP TABLE referenced_table; -- Similar tests, but this time we push foreign key constraints created by ALTER TABLE queries -- create tables CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); SELECT master_create_distributed_table('referenced_table', 'id', 'hash'); SELECT master_create_worker_shards('referenced_table', 4, 1); CREATE TABLE referencing_table(id int, ref_id int); SELECT master_create_distributed_table('referencing_table', 'ref_id', 'hash'); SELECT master_create_worker_shards('referencing_table', 4, 1); -- verify that we skip foreign key validation when propagation is turned off -- not skipping validation would result in a distributed query, which emits debug messages BEGIN; SET LOCAL citus.enable_ddl_propagation TO off; SET LOCAL client_min_messages TO DEBUG2; ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY (ref_id) REFERENCES referenced_table (id); ABORT; -- test foreign constraint creation -- test foreign constraint creation with not supported parameters ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE SET NULL; ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE SET DEFAULT; ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON UPDATE SET NULL; ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON UPDATE SET DEFAULT; ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON UPDATE CASCADE; -- test foreign constraint creation with multiple subcommands ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id), ADD CONSTRAINT test_constraint FOREIGN KEY(id) REFERENCES referenced_table(test_column); -- test foreign constraint creation without giving explicit name ALTER TABLE referencing_table ADD FOREIGN KEY(ref_id) REFERENCES referenced_table(id); -- test foreign constraint creation on NOT co-located tables ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id); -- create co-located tables DROP TABLE referencing_table; DROP TABLE referenced_table; CREATE TABLE referenced_table(id int UNIQUE, test_column int, PRIMARY KEY(id, test_column)); CREATE TABLE referencing_table(id int, ref_id int); SELECT create_distributed_table('referenced_table', 'id', 'hash'); SELECT create_distributed_table('referencing_table', 'ref_id', 'hash'); -- columns for the referenced table is empty ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table ON DELETE CASCADE; -- test foreign constraint creation on non-partition columns ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(id) REFERENCES referenced_table(id); -- test foreign constraint creation while column list are in incorrect order ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(id, ref_id) REFERENCES referenced_table(id, test_column); -- test foreign constraint creation while column list are not in same length ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id, test_column); -- test foreign constraint creation while existing tables does not satisfy the constraint INSERT INTO referencing_table VALUES(1, 1); ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id); -- test foreign constraint with correct conditions DELETE FROM referencing_table WHERE ref_id = 1; ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id); -- test inserts -- test insert to referencing table while there is NO corresponding value in referenced table INSERT INTO referencing_table VALUES(1, 1); -- test insert to referencing while there is corresponding value in referenced table INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); -- test deletes -- test delete from referenced table while there is corresponding value in referencing table DELETE FROM referenced_table WHERE id = 1; -- test delete from referenced table while there is NO corresponding value in referencing table DELETE FROM referencing_table WHERE ref_id = 1; DELETE FROM referenced_table WHERE id = 1; -- test DROP CONSTRAINT ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; -- test foreign constraint options -- test ON DELETE CASCADE ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE CASCADE; INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; SELECT * FROM referencing_table; SELECT * FROM referenced_table; ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; -- test ON DELETE NO ACTION + DEFERABLE + INITIALLY DEFERRED ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE NO ACTION DEFERRABLE INITIALLY DEFERRED; INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); DELETE FROM referenced_table WHERE id = 1; BEGIN; DELETE FROM referenced_table WHERE id = 1; DELETE FROM referencing_table WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; SELECT * FROM referenced_table; ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; -- test ON DELETE RESTRICT ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id) REFERENCES referenced_table(id) ON DELETE RESTRICT; INSERT INTO referenced_table VALUES(1, 1); INSERT INTO referencing_table VALUES(1, 1); BEGIN; DELETE FROM referenced_table WHERE id = 1; DELETE FROM referencing_table WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; SELECT * FROM referenced_table; ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; -- test ON UPDATE NO ACTION + DEFERABLE + INITIALLY DEFERRED ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED; UPDATE referenced_table SET test_column = 10 WHERE id = 1; BEGIN; UPDATE referenced_table SET test_column = 10 WHERE id = 1; UPDATE referencing_table SET id = 10 WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; SELECT * FROM referenced_table; ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; -- test ON UPDATE RESTRICT ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) ON UPDATE RESTRICT; BEGIN; UPDATE referenced_table SET test_column = 20 WHERE id = 1; UPDATE referencing_table SET id = 20 WHERE ref_id = 1; COMMIT; SELECT * FROM referencing_table; SELECT * FROM referenced_table; ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; -- test MATCH SIMPLE ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) MATCH SIMPLE; INSERT INTO referencing_table VALUES(null, 2); SELECT * FROM referencing_table; DELETE FROM referencing_table WHERE ref_id = 2; ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; -- test MATCH FULL ALTER TABLE referencing_table ADD CONSTRAINT test_constraint FOREIGN KEY(ref_id, id) REFERENCES referenced_table(id, test_column) MATCH FULL; INSERT INTO referencing_table VALUES(null, 2); SELECT * FROM referencing_table; ALTER TABLE referencing_table DROP CONSTRAINT test_constraint; -- we no longer need those tables DROP TABLE referencing_table; DROP TABLE referenced_table; -- test cyclical foreign keys CREATE TABLE cyclic_reference_table1(id int, table2_id int, PRIMARY KEY(id, table2_id)); CREATE TABLE cyclic_reference_table2(id int, table1_id int, PRIMARY KEY(id, table1_id)); SELECT create_distributed_table('cyclic_reference_table1', 'id', 'hash'); SELECT create_distributed_table('cyclic_reference_table2', 'table1_id', 'hash'); ALTER TABLE cyclic_reference_table1 ADD CONSTRAINT cyclic_constraint1 FOREIGN KEY(id, table2_id) REFERENCES cyclic_reference_table2(table1_id, id) DEFERRABLE INITIALLY DEFERRED; ALTER TABLE cyclic_reference_table2 ADD CONSTRAINT cyclic_constraint2 FOREIGN KEY(id, table1_id) REFERENCES cyclic_reference_table1(table2_id, id) DEFERRABLE INITIALLY DEFERRED; -- test insertion to a table which has cyclic foreign constraints, we expect that to fail INSERT INTO cyclic_reference_table1 VALUES(1, 1); -- proper insertion to table with cyclic dependency BEGIN; INSERT INTO cyclic_reference_table1 VALUES(1, 1); INSERT INTO cyclic_reference_table2 VALUES(1, 1); COMMIT; -- verify that rows are actually inserted SELECT * FROM cyclic_reference_table1; SELECT * FROM cyclic_reference_table2; -- test dropping cyclic referenced tables -- we expect those two queries to fail DROP TABLE cyclic_reference_table1; DROP TABLE cyclic_reference_table2; -- proper way of DROP with CASCADE option DROP TABLE cyclic_reference_table1 CASCADE; DROP TABLE cyclic_reference_table2 CASCADE; -- test creation of foreign keys in a transaction CREATE TABLE transaction_referenced_table(id int PRIMARY KEY); CREATE TABLE transaction_referencing_table(id int, ref_id int); BEGIN; ALTER TABLE transaction_referencing_table ADD CONSTRAINT transaction_fk_constraint FOREIGN KEY(ref_id) REFERENCES transaction_referenced_table(id); COMMIT; -- test insertion to referencing table, we expect that to fail INSERT INTO transaction_referencing_table VALUES(1, 1); -- proper insertion to both referenced and referencing tables INSERT INTO transaction_referenced_table VALUES(1); INSERT INTO transaction_referencing_table VALUES(1, 1); -- verify that rows are actually inserted SELECT * FROM transaction_referenced_table; SELECT * FROM transaction_referencing_table; -- we no longer need those tables DROP TABLE transaction_referencing_table; DROP TABLE transaction_referenced_table; -- test self referencing foreign key CREATE TABLE self_referencing_table1( id int, other_column int, other_column_ref int, PRIMARY KEY(id, other_column), FOREIGN KEY(id, other_column_ref) REFERENCES self_referencing_table1(id, other_column) ); SELECT create_distributed_table('self_referencing_table1', 'id', 'hash'); -- test insertion to self referencing table INSERT INTO self_referencing_table1 VALUES(1, 1, 1); -- we expect this query to fail INSERT INTO self_referencing_table1 VALUES(1, 2, 3); -- verify that rows are actually inserted SELECT * FROM self_referencing_table1; -- we no longer need those tables DROP TABLE self_referencing_table1; -- test self referencing foreign key with ALTER TABLE CREATE TABLE self_referencing_table2(id int, other_column int, other_column_ref int, PRIMARY KEY(id, other_column)); SELECT create_distributed_table('self_referencing_table2', 'id', 'hash'); ALTER TABLE self_referencing_table2 ADD CONSTRAINT self_referencing_fk_constraint FOREIGN KEY(id, other_column_ref) REFERENCES self_referencing_table2(id, other_column); -- test insertion to self referencing table INSERT INTO self_referencing_table2 VALUES(1, 1, 1); -- we expect this query to fail INSERT INTO self_referencing_table2 VALUES(1, 2, 3); -- verify that rows are actually inserted SELECT * FROM self_referencing_table2; -- we no longer need those tables DROP TABLE self_referencing_table2; -- test reference tables -- test foreign key creation on CREATE TABLE from reference table CREATE TABLE referenced_by_reference_table(id int PRIMARY KEY, other_column int); SELECT create_distributed_table('referenced_by_reference_table', 'id'); CREATE TABLE reference_table(id int, referencing_column int REFERENCES referenced_by_reference_table(id)); SELECT create_reference_table('reference_table'); -- test foreign key creation on CREATE TABLE to reference table DROP TABLE reference_table; CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int); SELECT create_reference_table('reference_table'); CREATE TABLE references_to_reference_table(id int, referencing_column int REFERENCES reference_table(id)); SELECT create_distributed_table('references_to_reference_table', 'referencing_column'); -- test foreign key creation on CREATE TABLE from + to reference table CREATE TABLE reference_table_second(id int, referencing_column int REFERENCES reference_table(id)); SELECT create_reference_table('reference_table_second'); -- test foreign key creation on CREATE TABLE from reference table to local table CREATE TABLE referenced_local_table(id int PRIMARY KEY, other_column int); DROP TABLE reference_table CASCADE; CREATE TABLE reference_table(id int, referencing_column int REFERENCES referenced_local_table(id)); SELECT create_reference_table('reference_table'); -- test foreign key creation on CREATE TABLE on self referencing reference table CREATE TABLE self_referencing_reference_table( id int, other_column int, other_column_ref int, PRIMARY KEY(id, other_column), FOREIGN KEY(id, other_column_ref) REFERENCES self_referencing_reference_table(id, other_column) ); SELECT create_reference_table('self_referencing_reference_table'); -- test foreign key creation on ALTER TABLE from reference table DROP TABLE reference_table; CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int); SELECT create_reference_table('reference_table'); ALTER TABLE reference_table ADD CONSTRAINT fk FOREIGN KEY(referencing_column) REFERENCES referenced_by_reference_table(id); -- test foreign key creation on ALTER TABLE to reference table DROP TABLE references_to_reference_table; CREATE TABLE references_to_reference_table(id int, referencing_column int); SELECT create_distributed_table('references_to_reference_table', 'referencing_column'); ALTER TABLE references_to_reference_table ADD CONSTRAINT fk FOREIGN KEY(referencing_column) REFERENCES reference_table(id); -- test foreign key creation on ALTER TABLE from + to reference table DROP TABLE reference_table_second; CREATE TABLE reference_table_second(id int, referencing_column int); SELECT create_reference_table('reference_table_second'); ALTER TABLE reference_table_second ADD CONSTRAINT fk FOREIGN KEY(referencing_column) REFERENCES reference_table(id); -- test foreign key creation on ALTER TABLE from reference table to local table DROP TABLE reference_table; CREATE TABLE reference_table(id int PRIMARY KEY, referencing_column int); SELECT create_reference_table('reference_table'); ALTER TABLE reference_table ADD CONSTRAINT fk FOREIGN KEY(referencing_column) REFERENCES referenced_local_table(id); -- test foreign key creation on ALTER TABLE on self referencing reference table DROP TABLE self_referencing_reference_table; CREATE TABLE self_referencing_reference_table( id int, other_column int, other_column_ref int, PRIMARY KEY(id, other_column) ); SELECT create_reference_table('self_referencing_reference_table'); ALTER TABLE self_referencing_reference_table ADD CONSTRAINT fk FOREIGN KEY(id, other_column_ref) REFERENCES self_referencing_reference_table(id, other_column); -- we no longer need those tables DROP TABLE referenced_by_reference_table, references_to_reference_table, reference_table, reference_table_second, referenced_local_table, self_referencing_reference_table; citus-7.0.3/src/test/regress/sql/multi_function_evaluation.sql000066400000000000000000000111531317107136600247040ustar00rootroot00000000000000-- -- MULTI_FUNCTION_EVALUATION -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1200000; -- nextval() works (no good way to test DEFAULT, or, by extension, SERIAL) CREATE TABLE example (key INT, value INT); SELECT master_create_distributed_table('example', 'key', 'hash'); CREATE SEQUENCE example_value_seq; SELECT master_create_worker_shards('example', 1, 2); INSERT INTO example VALUES (1, nextval('example_value_seq')); SELECT * FROM example; -- functions called by prepared statements are also evaluated PREPARE stmt AS INSERT INTO example VALUES (2); EXECUTE stmt; EXECUTE stmt; SELECT * FROM example; -- non-immutable functions inside CASE/COALESCE aren't allowed ALTER TABLE example DROP value; ALTER TABLE example ADD value timestamp; -- this is allowed because there are no mutable funcs in the CASE UPDATE example SET value = (CASE WHEN value > timestamp '12-12-1991' THEN timestamp '12-12-1991' ELSE value + interval '1 hour' END) WHERE key = 1; -- this is allowed because the planner strips away the CASE during constant evaluation UPDATE example SET value = CASE WHEN true THEN now() ELSE now() + interval '1 hour' END WHERE key = 1; -- this is not allowed because there're mutable functions in a CaseWhen clause -- (which we can't easily evaluate on the master) UPDATE example SET value = (CASE WHEN now() > timestamp '12-12-1991' THEN now() ELSE timestamp '10-24-1190' END) WHERE key = 1; -- make sure we also check defresult (the ELSE clause) UPDATE example SET value = (CASE WHEN now() > timestamp '12-12-1991' THEN timestamp '12-12-1191' ELSE now() END) WHERE key = 1; -- COALESCE is allowed UPDATE example SET value = COALESCE(null, null, timestamp '10-10-1000') WHERE key = 1; -- COALESCE is not allowed if there are any mutable functions UPDATE example SET value = COALESCE(now(), timestamp '10-10-1000') WHERE key = 1; UPDATE example SET value = COALESCE(timestamp '10-10-1000', now()) WHERE key = 1; -- RowCompareExpr's are checked for mutability. These are allowed: ALTER TABLE example DROP value; ALTER TABLE example ADD value boolean; ALTER TABLE example ADD time_col timestamptz; UPDATE example SET value = NULLIF(ROW(1, 2) < ROW(2, 3), true) WHERE key = 1; UPDATE example SET value = NULLIF(ROW(true, 2) < ROW(value, 3), true) WHERE key = 1; -- But this RowCompareExpr is not (it passes Var into STABLE) UPDATE example SET value = NULLIF( ROW(date '10-10-1000', 2) < ROW(time_col, 3), true ) WHERE key = 1; -- DistinctExpr's are also checked for mutability. These are allowed: UPDATE example SET value = 1 IS DISTINCT FROM 2 WHERE key = 1; UPDATE example SET value = date '10-10-1000' IS DISTINCT FROM timestamptz '10-10-1000' WHERE key = 1; -- But this RowCompare references the STABLE = (date, timestamptz) operator UPDATE example SET value = date '10-10-1000' IS DISTINCT FROM time_col WHERE key = 1; -- this ScalarArrayOpExpr ("scalar op ANY/ALL (array)") is allowed UPDATE example SET value = date '10-10-1000' = ANY ('{10-10-1000}'::date[]) WHERE key = 1; -- this ScalarArrayOpExpr is not, it invokes the STABLE = (timestamptz, date) operator UPDATE example SET value = time_col = ANY ('{10-10-1000}'::date[]) WHERE key = 1; -- CoerceViaIO (typoutput -> typinput, a type coercion) ALTER TABLE example DROP value; ALTER TABLE example ADD value date; -- this one is allowed UPDATE example SET value = (timestamp '10-19-2000 13:29')::date WHERE key = 1; -- this one is not UPDATE example SET value = time_col::date WHERE key = 1; -- ArrayCoerceExpr (applies elemfuncid to each elem) ALTER TABLE example DROP value; ALTER TABLE example ADD value date[]; -- this one is allowed UPDATE example SET value = array[timestamptz '10-20-2013 10:20']::date[] WHERE key = 1; -- this one is not UPDATE example SET value = array[time_col]::date[] WHERE key = 1; -- test that UPDATE and DELETE also have the functions in WHERE evaluated ALTER TABLE example DROP time_col; ALTER TABLE example DROP value; ALTER TABLE example ADD value timestamptz; INSERT INTO example VALUES (3, now()); UPDATE example SET value = timestamp '10-10-2000 00:00' WHERE key = 3 AND value > now() - interval '1 hour'; SELECT * FROM example WHERE key = 3; DELETE FROM example WHERE key = 3 AND value < now() - interval '1 hour'; SELECT * FROM example WHERE key = 3; -- test that function evaluation descends into expressions CREATE OR REPLACE FUNCTION stable_fn() RETURNS timestamptz STABLE LANGUAGE plpgsql AS $function$ BEGIN RAISE NOTICE 'stable_fn called'; RETURN timestamp '10-10-2000 00:00'; END; $function$; INSERT INTO example VALUES (44, (ARRAY[stable_fn(),stable_fn()])[1]); SELECT * FROM example WHERE key = 44; DROP FUNCTION stable_fn(); DROP TABLE example; citus-7.0.3/src/test/regress/sql/multi_generate_ddl_commands.sql000066400000000000000000000065751317107136600251420ustar00rootroot00000000000000 ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 610000; -- =================================================================== -- create test functions -- =================================================================== CREATE FUNCTION table_ddl_command_array(regclass) RETURNS text[] AS 'citus' LANGUAGE C STRICT; -- =================================================================== -- test ddl command generation functionality -- =================================================================== -- first make sure a simple table works CREATE TABLE simple_table ( first_name text, last_name text, id bigint ); SELECT table_ddl_command_array('simple_table'); -- ensure not-null constraints are propagated CREATE TABLE not_null_table ( city text, id bigint not null ); SELECT table_ddl_command_array('not_null_table'); -- ensure tables not in search path are schema-prefixed CREATE SCHEMA not_in_path CREATE TABLE simple_table (id bigint); SELECT table_ddl_command_array('not_in_path.simple_table'); -- even more complex constraints should be preserved... CREATE TABLE column_constraint_table ( first_name text, last_name text, age int CONSTRAINT non_negative_age CHECK (age >= 0) ); SELECT table_ddl_command_array('column_constraint_table'); -- including table constraints CREATE TABLE table_constraint_table ( bid_item_id bigint, min_bid decimal not null, max_bid decimal not null, CONSTRAINT bids_ordered CHECK (min_bid > max_bid) ); SELECT table_ddl_command_array('table_constraint_table'); -- default values are supported CREATE TABLE default_value_table ( name text, price decimal default 0.00 ); SELECT table_ddl_command_array('default_value_table'); -- of course primary keys work... CREATE TABLE pkey_table ( first_name text, last_name text, id bigint PRIMARY KEY ); SELECT table_ddl_command_array('pkey_table'); -- as do unique indexes... CREATE TABLE unique_table ( user_id bigint not null, username text UNIQUE not null ); SELECT table_ddl_command_array('unique_table'); -- and indexes used for clustering CREATE TABLE clustered_table ( data json not null, received_at timestamp not null ); CREATE INDEX clustered_time_idx ON clustered_table (received_at); CLUSTER clustered_table USING clustered_time_idx; SELECT table_ddl_command_array('clustered_table'); -- fiddly things like storage type and statistics also work CREATE TABLE fiddly_table ( hostname char(255) not null, os char(255) not null, ip_addr inet not null, traceroute text not null ); ALTER TABLE fiddly_table ALTER hostname SET STORAGE PLAIN, ALTER os SET STORAGE MAIN, ALTER ip_addr SET STORAGE EXTENDED, ALTER traceroute SET STORAGE EXTERNAL, ALTER ip_addr SET STATISTICS 500; SELECT table_ddl_command_array('fiddly_table'); -- test foreign tables using fake FDW CREATE FOREIGN TABLE foreign_table ( id bigint not null, full_name text not null default '' ) SERVER fake_fdw_server OPTIONS (encoding 'utf-8', compression 'true'); SELECT table_ddl_command_array('foreign_table'); -- propagating views is not supported CREATE VIEW local_view AS SELECT * FROM simple_table; SELECT table_ddl_command_array('local_view'); -- clean up DROP VIEW IF EXISTS local_view; DROP FOREIGN TABLE IF EXISTS foreign_table; DROP TABLE IF EXISTS simple_table, not_null_table, column_constraint_table, table_constraint_table, default_value_table, pkey_table, unique_table, clustered_table, fiddly_table; citus-7.0.3/src/test/regress/sql/multi_hash_pruning.sql000066400000000000000000000121231317107136600233130ustar00rootroot00000000000000-- -- MULTI_HASH_PRUNING -- -- Tests for shard and join pruning logic on hash partitioned tables. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 630000; -- Create a table partitioned on integer column and update partition type to -- hash. Then load data into this table and update shard min max values with -- hashed ones. Hash value of 1, 2, 3 and 4 are consecutively -1905060026, -- 1134484726, -28094569 and -1011077333. CREATE TABLE orders_hash_partitioned ( o_orderkey integer, o_custkey integer, o_orderstatus char(1), o_totalprice decimal(15,2), o_orderdate date, o_orderpriority char(15), o_clerk char(15), o_shippriority integer, o_comment varchar(79) ); SELECT master_create_distributed_table('orders_hash_partitioned', 'o_orderkey', 'hash'); SELECT master_create_worker_shards('orders_hash_partitioned', 4, 1); SET client_min_messages TO DEBUG2; -- Check that we can prune shards for simple cases, boolean expressions and -- immutable functions. SELECT count(*) FROM orders_hash_partitioned; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 2; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 3; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 4; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 AND o_clerk = 'aaa'; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = abs(-1); -- disable router planning SET citus.enable_router_execution TO 'false'; SELECT count(*) FROM orders_hash_partitioned; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 2; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 3; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 4; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 AND o_clerk = 'aaa'; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = abs(-1); SET citus.enable_router_execution TO DEFAULT; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey is NULL; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey is not NULL; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey > 2; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_orderkey = 2; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_clerk = 'aaa'; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR (o_orderkey = 3 AND o_clerk = 'aaa'); SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_orderkey is NULL; SELECT count(*) FROM (SELECT o_orderkey FROM orders_hash_partitioned WHERE o_orderkey = 1) AS orderkeys; SET client_min_messages TO DEFAULT; -- Check that we support runing for ANY/IN with literal. SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY ('{1,2,3}'); SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey IN (1,2,3); -- Check whether we can deal with null arrays SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey IN (NULL); SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY (NULL); SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey IN (NULL) OR TRUE; SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY (NULL) OR TRUE; -- Check whether we support IN/ANY in subquery SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey IN (SELECT l_orderkey FROM lineitem_hash_part); SELECT count(*) FROM lineitem_hash_part WHERE l_orderkey = ANY (SELECT l_orderkey FROM lineitem_hash_part); -- Check whether we support IN/ANY in subquery with append and range distributed table SELECT count(*) FROM lineitem WHERE l_orderkey = ANY ('{1,2,3}'); SELECT count(*) FROM lineitem WHERE l_orderkey IN (1,2,3); SELECT count(*) FROM lineitem WHERE l_orderkey = ANY(NULL) OR TRUE; SELECT count(*) FROM lineitem_range WHERE l_orderkey = ANY ('{1,2,3}'); SELECT count(*) FROM lineitem_range WHERE l_orderkey IN (1,2,3); SELECT count(*) FROM lineitem_range WHERE l_orderkey = ANY(NULL) OR TRUE; SET client_min_messages TO DEBUG2; -- Check that we don't show the message if the operator is not -- equality operator SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey < ALL ('{1,2,3}'); -- Check that we don't give a spurious hint message when non-partition -- columns are used with ANY/IN/ALL SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = 1 OR o_totalprice IN (2, 5); -- Check that we cannot prune for mutable functions. SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = random(); SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = random() OR o_orderkey = 1; SELECT count(*) FROM orders_hash_partitioned WHERE o_orderkey = random() AND o_orderkey = 1; -- Check that we can do join pruning. SELECT count(*) FROM orders_hash_partitioned orders1, orders_hash_partitioned orders2 WHERE orders1.o_orderkey = orders2.o_orderkey; SELECT count(*) FROM orders_hash_partitioned orders1, orders_hash_partitioned orders2 WHERE orders1.o_orderkey = orders2.o_orderkey AND orders1.o_orderkey = 1 AND orders2.o_orderkey is NULL; citus-7.0.3/src/test/regress/sql/multi_index_statements.sql000066400000000000000000000166121317107136600242130ustar00rootroot00000000000000-- -- MULTI_INDEX_STATEMENTS -- -- Check that we can run CREATE INDEX and DROP INDEX statements on distributed -- tables. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 640000; -- -- CREATE TEST TABLES -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 102080; CREATE TABLE index_test_range(a int, b int, c int); SELECT master_create_distributed_table('index_test_range', 'a', 'range'); SELECT master_create_empty_shard('index_test_range'); SELECT master_create_empty_shard('index_test_range'); CREATE TABLE index_test_hash(a int, b int, c int); SELECT master_create_distributed_table('index_test_hash', 'a', 'hash'); SELECT master_create_worker_shards('index_test_hash', 8, 2); CREATE TABLE index_test_append(a int, b int, c int); SELECT master_create_distributed_table('index_test_append', 'a', 'append'); SELECT master_create_empty_shard('index_test_append'); SELECT master_create_empty_shard('index_test_append'); -- -- CREATE INDEX -- -- Verify that we can create different types of indexes CREATE INDEX lineitem_orderkey_index ON lineitem (l_orderkey); CREATE INDEX lineitem_partkey_desc_index ON lineitem (l_partkey DESC); CREATE INDEX lineitem_partial_index ON lineitem (l_shipdate) WHERE l_shipdate < '1995-01-01'; CREATE INDEX lineitem_colref_index ON lineitem (record_ne(lineitem.*, NULL)); SET client_min_messages = ERROR; -- avoid version dependant warning about WAL CREATE INDEX lineitem_orderkey_hash_index ON lineitem USING hash (l_partkey); CREATE UNIQUE INDEX index_test_range_index_a ON index_test_range(a); CREATE UNIQUE INDEX index_test_range_index_a_b ON index_test_range(a,b); CREATE UNIQUE INDEX index_test_hash_index_a ON index_test_hash(a); CREATE UNIQUE INDEX index_test_hash_index_a_b ON index_test_hash(a,b); CREATE UNIQUE INDEX index_test_hash_index_a_b_partial ON index_test_hash(a,b) WHERE c IS NOT NULL; CREATE UNIQUE INDEX index_test_range_index_a_b_partial ON index_test_range(a,b) WHERE c IS NOT NULL; RESET client_min_messages; -- Verify that we handle if not exists statements correctly CREATE INDEX lineitem_orderkey_index on lineitem(l_orderkey); CREATE INDEX IF NOT EXISTS lineitem_orderkey_index on lineitem(l_orderkey); CREATE INDEX IF NOT EXISTS lineitem_orderkey_index_new on lineitem(l_orderkey); -- Verify if not exists behavior with an index with same name on a different table CREATE INDEX lineitem_orderkey_index on index_test_hash(a); CREATE INDEX IF NOT EXISTS lineitem_orderkey_index on index_test_hash(a); -- Verify that we can create indexes concurrently CREATE INDEX CONCURRENTLY lineitem_concurrently_index ON lineitem (l_orderkey); -- Verify that no-name local CREATE INDEX CONCURRENTLY works CREATE TABLE local_table (id integer, name text); CREATE INDEX CONCURRENTLY ON local_table(id); DROP TABLE local_table; -- Verify that all indexes got created on the master node and one of the workers SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; \c - - - :worker_1_port SELECT count(*) FROM pg_indexes WHERE tablename = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1); SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_hash%'; SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_range%'; SELECT count(*) FROM pg_indexes WHERE tablename LIKE 'index_test_append%'; \c - - - :master_port -- Verify that we error out on unsupported statement types CREATE UNIQUE INDEX try_index ON lineitem (l_orderkey); CREATE INDEX try_index ON lineitem (l_orderkey) TABLESPACE newtablespace; CREATE UNIQUE INDEX try_unique_range_index ON index_test_range(b); CREATE UNIQUE INDEX try_unique_range_index_partial ON index_test_range(b) WHERE c IS NOT NULL; CREATE UNIQUE INDEX try_unique_hash_index ON index_test_hash(b); CREATE UNIQUE INDEX try_unique_hash_index_partial ON index_test_hash(b) WHERE c IS NOT NULL; CREATE UNIQUE INDEX try_unique_append_index ON index_test_append(b); CREATE UNIQUE INDEX try_unique_append_index ON index_test_append(a); CREATE UNIQUE INDEX try_unique_append_index_a_b ON index_test_append(a,b); -- Verify that we error out in case of postgres errors on supported statement -- types. CREATE INDEX lineitem_orderkey_index ON lineitem (l_orderkey); CREATE INDEX try_index ON lineitem USING gist (l_orderkey); CREATE INDEX try_index ON lineitem (non_existent_column); CREATE INDEX ON lineitem (l_orderkey); -- Verify that none of failed indexes got created on the master node SELECT * FROM pg_indexes WHERE tablename = 'lineitem' or tablename like 'index_test_%' ORDER BY indexname; -- -- DROP INDEX -- -- Verify that we can't drop multiple indexes in a single command DROP INDEX lineitem_orderkey_index, lineitem_partial_index; -- Verify that we can succesfully drop indexes DROP INDEX lineitem_orderkey_index; DROP INDEX lineitem_orderkey_index_new; DROP INDEX lineitem_partkey_desc_index; DROP INDEX lineitem_partial_index; DROP INDEX lineitem_colref_index; -- Verify that we handle if exists statements correctly DROP INDEX non_existent_index; DROP INDEX IF EXISTS non_existent_index; DROP INDEX IF EXISTS lineitem_orderkey_hash_index; DROP INDEX lineitem_orderkey_hash_index; DROP INDEX index_test_range_index_a; DROP INDEX index_test_range_index_a_b; DROP INDEX index_test_range_index_a_b_partial; DROP INDEX index_test_hash_index_a; DROP INDEX index_test_hash_index_a_b; DROP INDEX index_test_hash_index_a_b_partial; -- Verify that we can drop indexes concurrently DROP INDEX CONCURRENTLY lineitem_concurrently_index; -- Verify that all the indexes are dropped from the master and one worker node. -- As there's a primary key, so exclude those from this check. SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%'; SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname; \c - - - :worker_1_port SELECT indrelid::regclass, indexrelid::regclass FROM pg_index WHERE indrelid = (SELECT relname FROM pg_class WHERE relname LIKE 'lineitem%' ORDER BY relname LIMIT 1)::regclass AND NOT indisprimary AND indexrelid::regclass::text NOT LIKE 'lineitem_time_index%'; SELECT * FROM pg_indexes WHERE tablename LIKE 'index_test_%' ORDER BY indexname; -- create index that will conflict with master operations CREATE INDEX CONCURRENTLY ith_b_idx_102089 ON index_test_hash_102089(b); \c - - - :master_port -- should fail because worker index already exists CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b); -- the failure results in an INVALID index SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; -- we can clean it up and recreate with an DROP IF EXISTS DROP INDEX CONCURRENTLY IF EXISTS ith_b_idx; CREATE INDEX CONCURRENTLY ith_b_idx ON index_test_hash(b); SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; \c - - - :worker_1_port -- now drop shard index to test partial master DROP failure DROP INDEX CONCURRENTLY ith_b_idx_102089; \c - - - :master_port DROP INDEX CONCURRENTLY ith_b_idx; -- the failure results in an INVALID index SELECT indisvalid AS "Index Valid?" FROM pg_index WHERE indexrelid='ith_b_idx'::regclass; -- final clean up DROP INDEX CONCURRENTLY IF EXISTS ith_b_idx; -- Drop created tables DROP TABLE index_test_range; DROP TABLE index_test_hash; DROP TABLE index_test_append; citus-7.0.3/src/test/regress/sql/multi_insert_select.sql000066400000000000000000001626541317107136600235100ustar00rootroot00000000000000-- -- MULTI_INSERT_SELECT -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 13300000; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 13300000; -- create co-located tables SET citus.shard_count = 4; SET citus.shard_replication_factor = 2; CREATE TABLE raw_events_first (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint, UNIQUE(user_id, value_1)); SELECT create_distributed_table('raw_events_first', 'user_id'); CREATE TABLE raw_events_second (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint, UNIQUE(user_id, value_1)); SELECT create_distributed_table('raw_events_second', 'user_id'); CREATE TABLE agg_events (user_id int, value_1_agg int, value_2_agg int, value_3_agg float, value_4_agg bigint, agg_time timestamp, UNIQUE(user_id, value_1_agg)); SELECT create_distributed_table('agg_events', 'user_id');; -- create the reference table as well CREATE TABLE reference_table (user_id int); SELECT create_reference_table('reference_table'); CREATE TABLE insert_select_varchar_test (key varchar, value int); SELECT create_distributed_table('insert_select_varchar_test', 'key', 'hash'); -- set back to the defaults SET citus.shard_count = DEFAULT; SET citus.shard_replication_factor = DEFAULT; INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (1, now(), 10, 100, 1000.1, 10000); INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (2, now(), 20, 200, 2000.1, 20000); INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (3, now(), 30, 300, 3000.1, 30000); INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (4, now(), 40, 400, 4000.1, 40000); INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (5, now(), 50, 500, 5000.1, 50000); INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (6, now(), 60, 600, 6000.1, 60000); SET client_min_messages TO DEBUG2; -- raw table to raw table INSERT INTO raw_events_second SELECT * FROM raw_events_first; -- see that our first multi shard INSERT...SELECT works expected SET client_min_messages TO INFO; SELECT raw_events_first.user_id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id; -- see that we get unique vialitons INSERT INTO raw_events_second SELECT * FROM raw_events_first; -- stable functions should be allowed INSERT INTO raw_events_second (user_id, time) SELECT user_id, now() FROM raw_events_first WHERE user_id < 0; INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE time > now() + interval '1 day'; -- hide version-dependent PL/pgSQL context messages \set VERBOSITY terse -- make sure we evaluate stable functions on the master, once CREATE OR REPLACE FUNCTION evaluate_on_master() RETURNS int LANGUAGE plpgsql STABLE AS $function$ BEGIN RAISE NOTICE 'evaluating on master'; RETURN 0; END; $function$; INSERT INTO raw_events_second (user_id, value_1) SELECT user_id, evaluate_on_master() FROM raw_events_first WHERE user_id < 0; -- make sure we don't evaluate stable functions with column arguments CREATE OR REPLACE FUNCTION evaluate_on_master(x int) RETURNS int LANGUAGE plpgsql STABLE AS $function$ BEGIN RAISE NOTICE 'evaluating on master'; RETURN x; END; $function$; INSERT INTO raw_events_second (user_id, value_1) SELECT user_id, evaluate_on_master(value_1) FROM raw_events_first WHERE user_id = 0; \set VERBOSITY default -- add one more row INSERT INTO raw_events_first (user_id, time) VALUES (7, now()); -- try a single shard query SET client_min_messages TO DEBUG2; INSERT INTO raw_events_second (user_id, time) SELECT user_id, time FROM raw_events_first WHERE user_id = 7; SET client_min_messages TO INFO; -- add one more row INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (8, now(), 80, 800, 8000, 80000); -- reorder columns SET client_min_messages TO DEBUG2; INSERT INTO raw_events_second (value_2, value_1, value_3, value_4, user_id, time) SELECT value_2, value_1, value_3, value_4, user_id, time FROM raw_events_first WHERE user_id = 8; -- a zero shard select INSERT INTO raw_events_second (value_2, value_1, value_3, value_4, user_id, time) SELECT value_2, value_1, value_3, value_4, user_id, time FROM raw_events_first WHERE false; -- another zero shard select INSERT INTO raw_events_second (value_2, value_1, value_3, value_4, user_id, time) SELECT value_2, value_1, value_3, value_4, user_id, time FROM raw_events_first WHERE 0 != 0; -- add one more row SET client_min_messages TO INFO; INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (9, now(), 90, 900, 9000, 90000); -- show that RETURNING also works SET client_min_messages TO DEBUG2; INSERT INTO raw_events_second (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM raw_events_first WHERE value_3 = 9000 RETURNING *; -- hits two shards INSERT INTO raw_events_second (user_id, value_1, value_3) SELECT user_id, value_1, value_3 FROM raw_events_first WHERE user_id = 9 OR user_id = 16 RETURNING *; -- now do some aggregations INSERT INTO agg_events SELECT user_id, sum(value_1), avg(value_2), sum(value_3), count(value_4) FROM raw_events_first GROUP BY user_id; -- group by column not exists on the SELECT target list INSERT INTO agg_events (value_3_agg, value_4_agg, value_1_agg, user_id) SELECT sum(value_3), count(value_4), sum(value_1), user_id FROM raw_events_first GROUP BY value_2, user_id RETURNING *; -- some subquery tests INSERT INTO agg_events (value_1_agg, user_id) SELECT SUM(value_1), id FROM (SELECT raw_events_second.user_id AS id, raw_events_second.value_1 FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id) AS foo GROUP BY id ORDER BY id; -- subquery one more level depth INSERT INTO agg_events (value_4_agg, value_1_agg, user_id) SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id) AS foo ORDER BY id; -- join between subqueries INSERT INTO agg_events (user_id) SELECT f2.id FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id); -- add one more level subqueris on top of subquery JOINs INSERT INTO agg_events (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) FROM ( SELECT f2.id as id, f2.v4 as value FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id)) as outer_most GROUP BY outer_most.id; -- subqueries in WHERE clause INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id IN (SELECT user_id FROM raw_events_second WHERE user_id = 2); INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id IN (SELECT user_id FROM raw_events_second WHERE user_id != 2 AND value_1 = 2000) ON conflict (user_id, value_1) DO NOTHING; INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id IN (SELECT user_id FROM raw_events_second WHERE false); INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id IN (SELECT user_id FROM raw_events_second WHERE value_1 = 1000 OR value_1 = 2000 OR value_1 = 3000); -- lets mix subqueries in FROM clause and subqueries in WHERE INSERT INTO agg_events (user_id) SELECT f2.id FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 1000) AS foo2 ) as f2 ON (f.id = f2.id) WHERE f.id IN (SELECT user_id FROM raw_events_second); -- some UPSERTS INSERT INTO agg_events AS ae ( user_id, value_1_agg, agg_time ) SELECT user_id, value_1, time FROM raw_events_first ON conflict (user_id, value_1_agg) DO UPDATE SET agg_time = EXCLUDED.agg_time WHERE ae.agg_time < EXCLUDED.agg_time; -- upserts with returning INSERT INTO agg_events AS ae ( user_id, value_1_agg, agg_time ) SELECT user_id, value_1, time FROM raw_events_first ON conflict (user_id, value_1_agg) DO UPDATE SET agg_time = EXCLUDED.agg_time WHERE ae.agg_time < EXCLUDED.agg_time RETURNING user_id, value_1_agg; INSERT INTO agg_events (user_id, value_1_agg) SELECT user_id, sum(value_1 + value_2) FROM raw_events_first GROUP BY user_id; -- FILTER CLAUSE INSERT INTO agg_events (user_id, value_1_agg) SELECT user_id, sum(value_1 + value_2) FILTER (where value_3 = 15) FROM raw_events_first GROUP BY user_id; -- a test with reference table JOINs INSERT INTO agg_events (user_id, value_1_agg) SELECT raw_events_first.user_id, sum(value_1) FROM reference_table, raw_events_first WHERE raw_events_first.user_id = reference_table.user_id GROUP BY raw_events_first.user_id; -- a note on the outer joins is that -- we filter out outer join results -- where partition column returns -- NULL. Thus, we could INSERT less rows -- than we expect from subquery result. -- see the following tests SET client_min_messages TO INFO; -- we don't want to see constraint vialotions, so truncate first TRUNCATE agg_events; -- add a row to first table to make table contents different INSERT INTO raw_events_second (user_id, time, value_1, value_2, value_3, value_4) VALUES (10, now(), 100, 10000, 10000, 100000); DELETE FROM raw_events_second WHERE user_id = 2; -- we select 11 rows SELECT t1.user_id AS col1, t2.user_id AS col2 FROM raw_events_first t1 FULL JOIN raw_events_second t2 ON t1.user_id = t2.user_id ORDER BY t1.user_id, t2.user_id; SET client_min_messages TO DEBUG2; -- we insert 10 rows since we filtered out -- NULL partition column values INSERT INTO agg_events (user_id, value_1_agg) SELECT t1.user_id AS col1, t2.user_id AS col2 FROM raw_events_first t1 FULL JOIN raw_events_second t2 ON t1.user_id = t2.user_id; SET client_min_messages TO INFO; -- see that the results are different from the SELECT query SELECT user_id, value_1_agg FROM agg_events ORDER BY user_id, value_1_agg; -- we don't want to see constraint vialotions, so truncate first SET client_min_messages TO INFO; TRUNCATE agg_events; SET client_min_messages TO DEBUG2; -- DISTINCT clause INSERT INTO agg_events (value_1_agg, user_id) SELECT DISTINCT value_1, user_id FROM raw_events_first; -- we don't want to see constraint vialotions, so truncate first SET client_min_messages TO INFO; truncate agg_events; SET client_min_messages TO DEBUG2; -- we do not support DISTINCT ON clauses INSERT INTO agg_events (value_1_agg, user_id) SELECT DISTINCT ON (value_1) value_1, user_id FROM raw_events_first; -- We do not support some CTEs WITH fist_table_agg AS (SELECT sum(value_1) as v1_agg, user_id FROM raw_events_first GROUP BY user_id) INSERT INTO agg_events (value_1_agg, user_id) SELECT v1_agg, user_id FROM fist_table_agg; -- We don't support CTEs that consist of const values as well INSERT INTO agg_events WITH sub_cte AS (SELECT 1) SELECT raw_events_first.user_id, (SELECT * FROM sub_cte) FROM raw_events_first; -- We support set operations via the coordinator BEGIN; INSERT INTO raw_events_first(user_id) SELECT user_id FROM ((SELECT user_id FROM raw_events_first) UNION (SELECT user_id FROM raw_events_second)) as foo; ROLLBACK; -- We do not support any set operations INSERT INTO raw_events_first(user_id) (SELECT user_id FROM raw_events_first) INTERSECT (SELECT user_id FROM raw_events_first); -- If the query is router plannable then it is executed via the coordinator INSERT INTO raw_events_first(user_id) SELECT user_id FROM ((SELECT user_id FROM raw_events_first WHERE user_id = 15) EXCEPT (SELECT user_id FROM raw_events_second where user_id = 17)) as foo; -- some supported LEFT joins INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first LEFT JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.user_id; INSERT INTO agg_events (user_id) SELECT raw_events_second.user_id FROM reference_table LEFT JOIN raw_events_second ON reference_table.user_id = raw_events_second.user_id; INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first LEFT JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.user_id WHERE raw_events_first.user_id = 10; INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first LEFT JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.user_id WHERE raw_events_second.user_id = 10 OR raw_events_second.user_id = 11; INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first INNER JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.user_id WHERE raw_events_first.user_id = 10 AND raw_events_first.user_id = 20; INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first LEFT JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.user_id WHERE raw_events_first.user_id = 10 AND raw_events_second.user_id = 20; INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first LEFT JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.user_id WHERE raw_events_first.user_id IN (19, 20, 21); INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first INNER JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.user_id WHERE raw_events_second.user_id IN (19, 20, 21); -- the following is a very tricky query for Citus -- although we do not support pushing down JOINs on non-partition -- columns here it is safe to push it down given that we're looking for -- a specific value (i.e., value_1 = 12) on the joining column. -- Note that the query always hits the same shard on raw_events_second -- and this query wouldn't have worked if we're to use different worker -- count or shard replication factor INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first, raw_events_second WHERE raw_events_second.user_id = raw_events_first.value_1 AND raw_events_first.value_1 = 12; -- some unsupported LEFT/INNER JOINs -- JOIN on one table with partition column other is not INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first LEFT JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.value_1; -- same as the above with INNER JOIN INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first INNER JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.value_1; -- a not meaningful query INSERT INTO agg_events (user_id) SELECT raw_events_second.user_id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_first.value_1; -- both tables joined on non-partition columns INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first LEFT JOIN raw_events_second ON raw_events_first.value_1 = raw_events_second.value_1; -- same as the above with INNER JOIN INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first INNER JOIN raw_events_second ON raw_events_first.value_1 = raw_events_second.value_1; -- even if there is a filter on the partition key, since the join is not on the partition key we reject -- this query INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first LEFT JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.value_1 WHERE raw_events_first.user_id = 10; -- same as the above with INNER JOIN INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first INNER JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.value_1 WHERE raw_events_first.user_id = 10; -- make things a bit more complicate with IN clauses INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first INNER JOIN raw_events_second ON raw_events_first.user_id = raw_events_second.value_1 WHERE raw_events_first.value_1 IN (10, 11,12) OR raw_events_second.user_id IN (1,2,3,4); -- implicit join on non partition column should also not be pushed down INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first, raw_events_second WHERE raw_events_second.user_id = raw_events_first.value_1; -- the following is again a tricky query for Citus -- if the given filter was on value_1 as shown in the above, Citus could -- push it down. But here the query is refused INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first, raw_events_second WHERE raw_events_second.user_id = raw_events_first.value_1 AND raw_events_first.value_2 = 12; -- lets do some unsupported query tests with subqueries -- foo is not joined on the partition key so the query is not -- pushed down INSERT INTO agg_events (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) FROM ( SELECT f2.id as id, f2.v4 as value FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first LEFT JOIN reference_table ON (raw_events_first.value_1 = reference_table.user_id)) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id)) as outer_most GROUP BY outer_most.id; -- if the given filter was on value_1 as shown in the above, Citus could -- push it down. But here the query is refused INSERT INTO agg_events (user_id) SELECT raw_events_first.user_id FROM raw_events_first, raw_events_second WHERE raw_events_second.user_id = raw_events_first.value_1 AND raw_events_first.value_2 = 12; -- lets do some unsupported query tests with subqueries -- foo is not joined on the partition key so the query is not -- pushed down INSERT INTO agg_events (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) FROM ( SELECT f2.id as id, f2.v4 as value FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first LEFT JOIN reference_table ON (raw_events_first.value_1 = reference_table.user_id)) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id)) as outer_most GROUP BY outer_most.id; INSERT INTO agg_events (value_4_agg, value_1_agg, user_id) SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id != raw_events_second.user_id GROUP BY raw_events_second.user_id) AS foo; -- INSERT partition column does not match with SELECT partition column INSERT INTO agg_events (value_4_agg, value_1_agg, user_id) SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.value_3 AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.value_3) AS foo; -- error cases -- no part column at all INSERT INTO raw_events_second (value_1) SELECT value_1 FROM raw_events_first; INSERT INTO raw_events_second (value_1) SELECT user_id FROM raw_events_first; INSERT INTO raw_events_second (user_id) SELECT value_1 FROM raw_events_first; INSERT INTO raw_events_second (user_id) SELECT user_id * 2 FROM raw_events_first; INSERT INTO raw_events_second (user_id) SELECT user_id :: bigint FROM raw_events_first; INSERT INTO agg_events (value_3_agg, value_4_agg, value_1_agg, value_2_agg, user_id) SELECT SUM(value_3), Count(value_4), user_id, SUM(value_1), Avg(value_2) FROM raw_events_first GROUP BY user_id; INSERT INTO agg_events (value_3_agg, value_4_agg, value_1_agg, value_2_agg, user_id) SELECT SUM(value_3), Count(value_4), user_id, SUM(value_1), value_2 FROM raw_events_first GROUP BY user_id, value_2; -- tables should be co-located INSERT INTO agg_events (user_id) SELECT user_id FROM reference_table; -- unsupported joins between subqueries -- we do not return bare partition column on the inner query INSERT INTO agg_events (user_id) SELECT f2.id FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, raw_events_second.value_1 AS v1, SUM(raw_events_second.user_id) AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.value_1 HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id); -- the second part of the query is not routable since -- GROUP BY not on the partition column (i.e., value_1) and thus join -- on f.id = f2.id is not on the partition key (instead on the sum of partition key) INSERT INTO agg_events (user_id) SELECT f.id FROM (SELECT id FROM (SELECT raw_events_first.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, raw_events_second.value_1 AS v1, SUM(raw_events_second.user_id) AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.value_1 HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id); -- cannot pushdown the query since the JOIN is not equi JOIN INSERT INTO agg_events (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) FROM ( SELECT f2.id as id, f2.v4 as value FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id != f2.id)) as outer_most GROUP BY outer_most.id; -- cannot pushdown since foo2 is not join on partition key INSERT INTO agg_events (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) FROM ( SELECT f2.id as id, f2.v4 as value FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.value_1 GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id)) as outer_most GROUP BY outer_most.id; -- cannot push down since foo doesn't have en equi join INSERT INTO agg_events (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) FROM ( SELECT f2.id as id, f2.v4 as value FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id != reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id)) as outer_most GROUP BY outer_most.id; -- some unsupported LATERAL JOINs -- join on averages is not on the partition key INSERT INTO agg_events (user_id, value_4_agg) SELECT averages.user_id, avg(averages.value_4) FROM (SELECT raw_events_second.user_id FROM reference_table JOIN raw_events_second on (reference_table.user_id = raw_events_second.user_id) ) reference_ids JOIN LATERAL (SELECT user_id, value_4 FROM raw_events_first WHERE value_4 = reference_ids.user_id) as averages ON true GROUP BY averages.user_id; -- join among reference_ids and averages is not on the partition key INSERT INTO agg_events (user_id, value_4_agg) SELECT averages.user_id, avg(averages.value_4) FROM (SELECT raw_events_second.user_id FROM reference_table JOIN raw_events_second on (reference_table.user_id = raw_events_second.user_id) ) reference_ids JOIN LATERAL (SELECT user_id, value_4 FROM raw_events_first) as averages ON averages.value_4 = reference_ids.user_id GROUP BY averages.user_id; -- join among the agg_ids and averages is not on the partition key INSERT INTO agg_events (user_id, value_4_agg) SELECT averages.user_id, avg(averages.value_4) FROM (SELECT raw_events_second.user_id FROM reference_table JOIN raw_events_second on (reference_table.user_id = raw_events_second.user_id) ) reference_ids JOIN LATERAL (SELECT user_id, value_4 FROM raw_events_first) as averages ON averages.user_id = reference_ids.user_id JOIN LATERAL (SELECT user_id, value_4 FROM agg_events) as agg_ids ON (agg_ids.value_4 = averages.user_id) GROUP BY averages.user_id; -- not supported subqueries in WHERE clause -- since the selected value in the WHERE is not -- partition key INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id IN (SELECT value_1 FROM raw_events_second); -- same as above but slightly more complex -- since it also includes subquery in FROM as well INSERT INTO agg_events (user_id) SELECT f2.id FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id) WHERE f.id IN (SELECT value_1 FROM raw_events_second); -- some more semi-anti join tests -- join in where INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id IN (SELECT raw_events_second.user_id FROM raw_events_second, raw_events_first WHERE raw_events_second.user_id = raw_events_first.user_id AND raw_events_first.user_id = 200); -- we cannot push this down since it is NOT IN INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id NOT IN (SELECT raw_events_second.user_id FROM raw_events_second, raw_events_first WHERE raw_events_second.user_id = raw_events_first.user_id AND raw_events_first.user_id = 200); -- safe to push down INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE EXISTS (SELECT 1 FROM raw_events_second WHERE raw_events_second.user_id =raw_events_first.user_id); -- we cannot push down INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE NOT EXISTS (SELECT 1 FROM raw_events_second WHERE raw_events_second.user_id =raw_events_first.user_id); -- more complex LEFT JOINs INSERT INTO agg_events (user_id, value_4_agg) SELECT outer_most.id, max(outer_most.value) FROM ( SELECT f2.id as id, f2.v4 as value FROM (SELECT id FROM (SELECT raw_events_first.user_id AS id FROM raw_events_first LEFT JOIN reference_table ON (raw_events_first.user_id = reference_table.user_id)) AS foo) as f LEFT JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id)) as outer_most GROUP BY outer_most.id; -- cannot push down since the f.id IN is matched with value_1 INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id IN ( SELECT f2.id FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id) WHERE f.id IN (SELECT value_1 FROM raw_events_second)); -- same as above, but this time is it safe to push down since -- f.id IN is matched with user_id INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id IN ( SELECT f2.id FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id) WHERE f.id IN (SELECT user_id FROM raw_events_second)); -- cannot push down since top level user_id is matched with NOT IN INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id NOT IN ( SELECT f2.id FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id = f2.id) WHERE f.id IN (SELECT user_id FROM raw_events_second)); -- cannot push down since join is not equi join (f.id > f2.id) INSERT INTO raw_events_second (user_id) SELECT user_id FROM raw_events_first WHERE user_id IN ( SELECT f2.id FROM (SELECT id FROM (SELECT reference_table.user_id AS id FROM raw_events_first, reference_table WHERE raw_events_first.user_id = reference_table.user_id ) AS foo) as f INNER JOIN (SELECT v4, v1, id FROM (SELECT SUM(raw_events_second.value_4) AS v4, SUM(raw_events_first.value_1) AS v1, raw_events_second.user_id AS id FROM raw_events_first, raw_events_second WHERE raw_events_first.user_id = raw_events_second.user_id GROUP BY raw_events_second.user_id HAVING SUM(raw_events_second.value_4) > 10) AS foo2 ) as f2 ON (f.id > f2.id) WHERE f.id IN (SELECT user_id FROM raw_events_second)); -- we currently not support grouping sets INSERT INTO agg_events (user_id, value_1_agg, value_2_agg) SELECT user_id, Sum(value_1) AS sum_val1, Sum(value_2) AS sum_val2 FROM raw_events_second GROUP BY grouping sets ( ( user_id ), ( value_1 ), ( user_id, value_1 ), ( ) ); -- set back to INFO SET client_min_messages TO INFO; -- avoid constraint violations TRUNCATE raw_events_first; -- we don't support LIMIT even if it exists in the subqueries -- in where clause INSERT INTO agg_events(user_id) SELECT user_id FROM users_table WHERE user_id IN (SELECT user_id FROM ( ( SELECT user_id FROM ( SELECT e1.user_id FROM users_table u1, events_table e1 WHERE e1.user_id = u1.user_id LIMIT 3 ) as f_inner ) ) AS f2); -- Altering a table and selecting from it using a multi-shard statement -- in the same transaction is allowed because we will use the same -- connections for all co-located placements. BEGIN; ALTER TABLE raw_events_second DROP COLUMN value_4; INSERT INTO raw_events_first SELECT * FROM raw_events_second; ROLLBACK; -- Alterating a table and selecting from it using a single-shard statement -- in the same transaction is disallowed because we will use a different -- connection. BEGIN; ALTER TABLE raw_events_second DROP COLUMN value_4; INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 100; ROLLBACK; -- Altering a reference table and then performing an INSERT ... SELECT which -- joins with the reference table is not allowed, since the INSERT ... SELECT -- would read from the reference table over others connections than the ones -- that performed the DDL. BEGIN; ALTER TABLE reference_table ADD COLUMN z int; INSERT INTO raw_events_first (user_id) SELECT user_id FROM raw_events_second JOIN reference_table USING (user_id); ROLLBACK; -- Insert after copy is allowed BEGIN; COPY raw_events_second (user_id, value_1) FROM STDIN DELIMITER ','; 100,100 \. INSERT INTO raw_events_first SELECT * FROM raw_events_second; ROLLBACK; -- Insert after copy is currently allowed for single-shard operation. -- Both insert and copy are rolled back successfully. BEGIN; COPY raw_events_second (user_id, value_1) FROM STDIN DELIMITER ','; 101,101 \. INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 101; SELECT user_id FROM raw_events_first WHERE user_id = 101; ROLLBACK; BEGIN; INSERT INTO raw_events_first SELECT * FROM raw_events_second; COPY raw_events_first (user_id, value_1) FROM STDIN DELIMITER ','; 102,102 \. ROLLBACK; BEGIN; INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 100; COPY raw_events_first (user_id, value_1) FROM STDIN DELIMITER ','; 103,103 \. ROLLBACK; -- Similarly, multi-row INSERTs will take part in transactions and reuse connections... BEGIN; INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 100; COPY raw_events_first (user_id, value_1) FROM STDIN DELIMITER ','; 104,104 \. INSERT INTO raw_events_first (user_id, value_1) VALUES (105, 105), (106, 106); ROLLBACK; -- selecting from views works CREATE VIEW test_view AS SELECT * FROM raw_events_first; INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (16, now(), 60, 600, 6000.1, 60000); SELECT count(*) FROM raw_events_second; INSERT INTO raw_events_second SELECT * FROM test_view; INSERT INTO raw_events_first (user_id, time, value_1, value_2, value_3, value_4) VALUES (17, now(), 60, 600, 6000.1, 60000); INSERT INTO raw_events_second SELECT * FROM test_view WHERE user_id = 17 GROUP BY 1,2,3,4,5,6; SELECT count(*) FROM raw_events_second; -- we need this in our next test truncate raw_events_first; SET client_min_messages TO DEBUG2; -- first show that the query works now INSERT INTO raw_events_first SELECT * FROM raw_events_second; SET client_min_messages TO INFO; truncate raw_events_first; SET client_min_messages TO DEBUG2; -- now show that it works for a single shard query as well INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 5; SET client_min_messages TO INFO; -- if a single shard of the SELECT is unhealty, the query should fail UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 13300004 AND nodeport = :worker_1_port; truncate raw_events_first; SET client_min_messages TO DEBUG2; -- this should fail INSERT INTO raw_events_first SELECT * FROM raw_events_second; -- this should also fail INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 5; -- but this should work given that it hits different shard INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 6; SET client_min_messages TO INFO; -- mark the unhealthy placement as healthy again for the next tests UPDATE pg_dist_shard_placement SET shardstate = 1 WHERE shardid = 13300004 AND nodeport = :worker_1_port; -- now that we should show that it works if one of the target shard interval is not healthy UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 13300000 AND nodeport = :worker_1_port; truncate raw_events_first; SET client_min_messages TO DEBUG2; -- this should work INSERT INTO raw_events_first SELECT * FROM raw_events_second; SET client_min_messages TO INFO; truncate raw_events_first; SET client_min_messages TO DEBUG2; -- this should also work INSERT INTO raw_events_first SELECT * FROM raw_events_second WHERE user_id = 5; SET client_min_messages TO INFO; -- now do some tests with varchars INSERT INTO insert_select_varchar_test VALUES ('test_1', 10); INSERT INTO insert_select_varchar_test VALUES ('test_2', 30); INSERT INTO insert_select_varchar_test (key, value) SELECT *, 100 FROM (SELECT f1.key FROM (SELECT key FROM insert_select_varchar_test GROUP BY 1 HAVING Count(key) < 3) AS f1, (SELECT key FROM insert_select_varchar_test GROUP BY 1 HAVING Sum(COALESCE(insert_select_varchar_test.value, 0)) > 20.0) AS f2 WHERE f1.key = f2.key GROUP BY 1) AS foo; SELECT * FROM insert_select_varchar_test; -- some tests with DEFAULT columns and constant values -- this test is mostly importantly intended for deparsing the query correctly -- but still it is preferable to have this test here instead of multi_deparse_shard_query CREATE TABLE table_with_defaults ( store_id int, first_name text, default_1 int DEFAULT 1, last_name text, default_2 text DEFAULT '2' ); -- we don't need many shards SET citus.shard_count = 2; SELECT create_distributed_table('table_with_defaults', 'store_id'); -- let's see the queries SET client_min_messages TO DEBUG2; -- a very simple query INSERT INTO table_with_defaults SELECT * FROM table_with_defaults; -- see that defaults are filled INSERT INTO table_with_defaults (store_id, first_name) SELECT store_id, first_name FROM table_with_defaults; -- shuffle one of the defaults and skip the other INSERT INTO table_with_defaults (default_2, store_id, first_name) SELECT default_2, store_id, first_name FROM table_with_defaults; -- shuffle both defaults INSERT INTO table_with_defaults (default_2, store_id, default_1, first_name) SELECT default_2, store_id, default_1, first_name FROM table_with_defaults; -- use constants instead of non-default column INSERT INTO table_with_defaults (default_2, last_name, store_id, first_name) SELECT default_2, 'Freund', store_id, 'Andres' FROM table_with_defaults; -- use constants instead of non-default column and skip both defauls INSERT INTO table_with_defaults (last_name, store_id, first_name) SELECT 'Freund', store_id, 'Andres' FROM table_with_defaults; -- use constants instead of default columns INSERT INTO table_with_defaults (default_2, last_name, store_id, first_name, default_1) SELECT 20, last_name, store_id, first_name, 10 FROM table_with_defaults; -- use constants instead of both default columns and non-default columns INSERT INTO table_with_defaults (default_2, last_name, store_id, first_name, default_1) SELECT 20, 'Freund', store_id, 'Andres', 10 FROM table_with_defaults; -- some of the the ultimate queries where we have constants, -- defaults and group by entry is not on the target entry INSERT INTO table_with_defaults (default_2, store_id, first_name) SELECT '2000', store_id, 'Andres' FROM table_with_defaults GROUP BY last_name, store_id; INSERT INTO table_with_defaults (default_1, store_id, first_name, default_2) SELECT 1000, store_id, 'Andres', '2000' FROM table_with_defaults GROUP BY last_name, store_id, first_name; INSERT INTO table_with_defaults (default_1, store_id, first_name, default_2) SELECT 1000, store_id, 'Andres', '2000' FROM table_with_defaults GROUP BY last_name, store_id, first_name, default_2; INSERT INTO table_with_defaults (default_1, store_id, first_name) SELECT 1000, store_id, 'Andres' FROM table_with_defaults GROUP BY last_name, store_id, first_name, default_2; RESET client_min_messages; -- Stable function in default should be allowed ALTER TABLE table_with_defaults ADD COLUMN t timestamptz DEFAULT now(); INSERT INTO table_with_defaults (store_id, first_name, last_name) SELECT store_id, 'first '||store_id, 'last '||store_id FROM table_with_defaults GROUP BY store_id, first_name, last_name; -- Volatile function in default should be disallowed CREATE TABLE table_with_serial ( store_id int, s bigserial ); SELECT create_distributed_table('table_with_serial', 'store_id'); INSERT INTO table_with_serial (store_id) SELECT store_id FROM table_with_defaults GROUP BY store_id; -- do some more error/error message checks SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; CREATE TABLE text_table (part_col text, val int); CREATE TABLE char_table (part_col char[], val int); create table table_with_starts_with_defaults (a int DEFAULT 5, b int, c int); SELECT create_distributed_table('text_table', 'part_col'); SELECT create_distributed_table('char_table','part_col'); SELECT create_distributed_table('table_with_starts_with_defaults', 'c'); SET client_min_messages TO DEBUG; INSERT INTO text_table (part_col) SELECT CASE WHEN part_col = 'onder' THEN 'marco' END FROM text_table ; INSERT INTO text_table (part_col) SELECT COALESCE(part_col, 'onder') FROM text_table; INSERT INTO text_table (part_col) SELECT GREATEST(part_col, 'jason') FROM text_table; INSERT INTO text_table (part_col) SELECT LEAST(part_col, 'andres') FROM text_table; INSERT INTO text_table (part_col) SELECT NULLIF(part_col, 'metin') FROM text_table; INSERT INTO text_table (part_col) SELECT part_col isnull FROM text_table; INSERT INTO text_table (part_col) SELECT part_col::text from char_table; INSERT INTO text_table (part_col) SELECT (part_col = 'burak') is true FROM text_table; INSERT INTO text_table (part_col) SELECT val FROM text_table; INSERT INTO text_table (part_col) SELECT val::text FROM text_table; RESET client_min_messages; insert into table_with_starts_with_defaults (b,c) select b,c FROM table_with_starts_with_defaults; -- Test on partition column without native hash function CREATE TABLE raw_table ( id BIGINT, time DATE ); CREATE TABLE summary_table ( time DATE, count BIGINT ); SELECT create_distributed_table('raw_table', 'time'); SELECT create_distributed_table('summary_table', 'time'); INSERT INTO raw_table VALUES(1, '11-11-1980'); INSERT INTO summary_table SELECT time, COUNT(*) FROM raw_table GROUP BY time; SELECT * FROM summary_table; -- Test INSERT ... SELECT via coordinator -- Select from constants TRUNCATE raw_events_first; INSERT INTO raw_events_first (user_id, value_1) SELECT * FROM (VALUES (1,2), (3,4), (5,6)) AS v(int,int); SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id; -- Select from local functions TRUNCATE raw_events_first; CREATE SEQUENCE insert_select_test_seq; SET client_min_messages TO DEBUG; INSERT INTO raw_events_first (user_id, value_1, value_2) SELECT s, nextval('insert_select_test_seq'), (random()*10)::int FROM generate_series(1, 5) s; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; -- ON CONFLICT is unsupported INSERT INTO raw_events_first (user_id, value_1) SELECT s, nextval('insert_select_test_seq') FROM generate_series(1, 5) s ON CONFLICT DO NOTHING; -- RETURNING is unsupported INSERT INTO raw_events_first (user_id, value_1) SELECT s, nextval('insert_select_test_seq') FROM generate_series(1, 5) s RETURNING *; RESET client_min_messages; -- INSERT ... SELECT and multi-shard SELECT in the same transaction is unsupported TRUNCATE raw_events_first; BEGIN; INSERT INTO raw_events_first (user_id, value_1) SELECT s, s FROM generate_series(1, 5) s; SELECT user_id, value_1 FROM raw_events_first; ROLLBACK; -- INSERT ... SELECT and single-shard SELECT in the same transaction is supported TRUNCATE raw_events_first; BEGIN; INSERT INTO raw_events_first (user_id, value_1) SELECT s, s FROM generate_series(1, 5) s; SELECT user_id, value_1 FROM raw_events_first WHERE user_id = 1; COMMIT; -- Select from local table TRUNCATE raw_events_first; CREATE TEMPORARY TABLE raw_events_first_local AS SELECT s AS u, 2*s AS v FROM generate_series(1, 5) s; INSERT INTO raw_events_first (user_id, value_1) SELECT u, v FROM raw_events_first_local; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; -- Use columns in opposite order TRUNCATE raw_events_first; INSERT INTO raw_events_first (value_1, user_id) SELECT u, v FROM raw_events_first_local; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; -- Set operations can work with opposite column order TRUNCATE raw_events_first; INSERT INTO raw_events_first (value_3, user_id) ( SELECT v, u::bigint FROM raw_events_first_local ) UNION ALL ( SELECT v, u FROM raw_events_first_local ); SELECT user_id, value_3 FROM raw_events_first ORDER BY user_id, value_3; -- Select from other distributed table with limit TRUNCATE raw_events_first; TRUNCATE raw_events_second; INSERT INTO raw_events_second (user_id, value_4) SELECT s, 3*s FROM generate_series (1,5) s; INSERT INTO raw_events_first (user_id, value_1) SELECT user_id, value_4 FROM raw_events_second LIMIT 5; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; -- CTEs are supported in local queries TRUNCATE raw_events_first; WITH removed_rows AS ( DELETE FROM raw_events_first_local RETURNING u ) INSERT INTO raw_events_first (user_id, value_1) WITH value AS (SELECT 1) SELECT * FROM removed_rows, value; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; -- nested CTEs are also supported TRUNCATE raw_events_first; INSERT INTO raw_events_first_local SELECT s, 2*s FROM generate_series(0, 10) s; WITH rows_to_remove AS ( SELECT u FROM raw_events_first_local WHERE u > 0 ), removed_rows AS ( DELETE FROM raw_events_first_local WHERE u IN (SELECT * FROM rows_to_remove) RETURNING u, v ) INSERT INTO raw_events_first (user_id, value_1) WITH ultra_rows AS ( WITH numbers AS ( SELECT s FROM generate_series(1,10) s ), super_rows AS ( SELECT u, v FROM removed_rows JOIN numbers ON (u = s) ) SELECT * FROM super_rows LIMIT 5 ) SELECT u, v FROM ultra_rows; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; -- CTEs with duplicate names are also supported TRUNCATE raw_events_first; WITH super_rows AS ( SELECT u FROM raw_events_first_local ) INSERT INTO raw_events_first (user_id, value_1) WITH super_rows AS ( SELECT * FROM super_rows GROUP BY u ) SELECT u, 5 FROM super_rows; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; -- CTEs are supported in router queries TRUNCATE raw_events_first; WITH user_two AS ( SELECT user_id, value_4 FROM raw_events_second WHERE user_id = 2 ) INSERT INTO raw_events_first (user_id, value_1) SELECT * FROM user_two; SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; -- CTEs are supported when there are name collisions WITH numbers AS ( SELECT s FROM generate_series(1,10) s ) INSERT INTO raw_events_first(user_id, value_1) WITH numbers AS ( SELECT s, s FROM generate_series(1,5) s ) SELECT * FROM numbers; -- Select into distributed table with a sequence CREATE TABLE "CaseSensitiveTable" ("UserID" int, "Value1" int); SELECT create_distributed_table('"CaseSensitiveTable"', 'UserID'); INSERT INTO "CaseSensitiveTable" SELECT s, s FROM generate_series(1,10) s; SELECT * FROM "CaseSensitiveTable" ORDER BY "UserID"; DROP TABLE "CaseSensitiveTable"; -- Select into distributed table with a sequence CREATE TABLE dist_table_with_sequence (user_id serial, value_1 serial); SELECT create_distributed_table('dist_table_with_sequence', 'user_id'); -- from local query INSERT INTO dist_table_with_sequence (value_1) SELECT s FROM generate_series(1,5) s; SELECT * FROM dist_table_with_sequence ORDER BY user_id; -- from a distributed query INSERT INTO dist_table_with_sequence (value_1) SELECT value_1 FROM dist_table_with_sequence; SELECT * FROM dist_table_with_sequence ORDER BY user_id; -- Select from distributed table into reference table CREATE TABLE ref_table (user_id int, value_1 int); SELECT create_reference_table('ref_table'); INSERT INTO ref_table SELECT user_id, value_1 FROM raw_events_second; SELECT * FROM ref_table ORDER BY user_id, value_1; DROP TABLE ref_table; -- Select into an append-partitioned table is not supported CREATE TABLE insert_append_table (user_id int, value_4 bigint); SELECT create_distributed_table('insert_append_table', 'user_id', 'append'); INSERT INTO insert_append_table (user_id, value_4) SELECT user_id, 1 FROM raw_events_second LIMIT 5; DROP TABLE insert_append_table; -- Insert from other distributed table as prepared statement TRUNCATE raw_events_first; PREPARE insert_prep(int) AS INSERT INTO raw_events_first (user_id, value_1) SELECT $1, value_4 FROM raw_events_second ORDER BY value_4 LIMIT 1; EXECUTE insert_prep(1); EXECUTE insert_prep(2); EXECUTE insert_prep(3); EXECUTE insert_prep(4); EXECUTE insert_prep(5); EXECUTE insert_prep(6); SELECT user_id, value_1 FROM raw_events_first ORDER BY user_id, value_1; -- Inserting into views is handled via coordinator TRUNCATE raw_events_first; INSERT INTO test_view SELECT * FROM raw_events_second; SELECT user_id, value_4 FROM test_view ORDER BY user_id, value_4; -- Drop the view now, because the column we are about to drop depends on it DROP VIEW test_view; -- Make sure we handle dropped columns correctly CREATE TABLE drop_col_table (col1 text, col2 text, col3 text); SELECT create_distributed_table('drop_col_table', 'col2'); ALTER TABLE drop_col_table DROP COLUMN col1; INSERT INTO drop_col_table (col3, col2) SELECT value_4, user_id FROM raw_events_second LIMIT 5; SELECT * FROM drop_col_table ORDER BY col2, col3; -- make sure the tuple went to the right shard SELECT * FROM drop_col_table WHERE col2 = '1'; RESET client_min_messages; DROP TABLE drop_col_table; DROP TABLE raw_table; DROP TABLE summary_table; DROP TABLE raw_events_first CASCADE; DROP TABLE raw_events_second; DROP TABLE reference_table; DROP TABLE agg_events; DROP TABLE table_with_defaults; DROP TABLE table_with_serial; DROP TABLE text_table; DROP TABLE char_table; DROP TABLE table_with_starts_with_defaults; citus-7.0.3/src/test/regress/sql/multi_insert_select_non_pushable_queries.sql000066400000000000000000000527271317107136600300010ustar00rootroot00000000000000------------------------------------ ------------------------------------ -- Vanilla funnel query ------------------------------------ ------------------------------------ -- not pushable since the JOIN is not an equi join INSERT INTO agg_results_third (user_id, value_1_agg) SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id != e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q; ------------------------------------ ------------------------------------ -- Funnel grouped by whether or not a user has done an event ------------------------------------ ------------------------------------ -- not pushable since the JOIN is not an equi join left part of the UNION -- is not equi join INSERT INTO agg_results_third (user_id, value_1_agg, value_2_agg ) SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event) FROM ( SELECT t1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(hasdone_event, 'Has not done event') AS hasdone_event FROM ( ( SELECT u.user_id, 'step=>1'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id != e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) UNION ( SELECT u.user_id, 'step=>2'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (103, 104, 105) ) ) t1 LEFT JOIN ( SELECT DISTINCT user_id, 'Has done event'::TEXT AS hasdone_event FROM events_table AS e WHERE e.user_id >= 10 AND e.user_id <= 25 AND e.event_type IN (106, 107, 108) ) t2 ON (t1.user_id = t2.user_id) GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event; -- not pushable since the JOIN is not an equi join right part of the UNION -- is not joined on the partition key INSERT INTO agg_results_third (user_id, value_1_agg, value_2_agg ) SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event) FROM ( SELECT t1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(hasdone_event, 'Has not done event') AS hasdone_event FROM ( ( SELECT u.user_id, 'step=>1'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) UNION ( SELECT u.user_id, 'step=>2'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.event_type AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (103, 104, 105) ) ) t1 LEFT JOIN ( SELECT DISTINCT user_id, 'Has done event'::TEXT AS hasdone_event FROM events_table AS e WHERE e.user_id >= 10 AND e.user_id <= 25 AND e.event_type IN (106, 107, 108) ) t2 ON (t1.user_id = t2.user_id) GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event; -- the LEFT JOIN conditon is not on the partition column (i.e., is it part_key divided by 2) INSERT INTO agg_results_third (user_id, value_1_agg, value_2_agg ) SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event) FROM ( SELECT t1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(hasdone_event, 'Has not done event') AS hasdone_event FROM ( ( SELECT u.user_id, 'step=>1'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) UNION ( SELECT u.user_id, 'step=>2'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (103, 104, 105) ) ) t1 LEFT JOIN ( SELECT DISTINCT user_id, 'Has done event'::TEXT AS hasdone_event FROM events_table AS e WHERE e.user_id >= 10 AND e.user_id <= 25 AND e.event_type IN (106, 107, 108) ) t2 ON (t1.user_id = (t2.user_id)/2) GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event; ------------------------------------ ------------------------------------ -- Funnel, grouped by the number of times a user has done an event ------------------------------------ ------------------------------------ -- not pushable since the right of the UNION query is not joined on -- the partition key INSERT INTO agg_results_third (user_id, value_1_agg, value_2_agg) SELECT user_id, avg(array_length(events_table, 1)) AS event_average, count_pay FROM ( SELECT subquery_1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(count_pay, 0) AS count_pay FROM ( (SELECT users_table.user_id, 'action=>1'AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 10 AND events_table.event_type < 12 ) UNION (SELECT users_table.user_id, 'action=>2'AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id != events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 12 AND events_table.event_type < 14 ) ) AS subquery_1 LEFT JOIN (SELECT user_id, COUNT(*) AS count_pay FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 15 AND users_table.value_1 < 17 GROUP BY user_id HAVING COUNT(*) > 1) AS subquery_2 ON subquery_1.user_id = subquery_2.user_id GROUP BY subquery_1.user_id, count_pay) AS subquery_top WHERE array_ndims(events_table) > 0 GROUP BY count_pay, user_id ORDER BY count_pay; -- not pushable since the JOIN condition is not equi JOIN -- (subquery_1 JOIN subquery_2) INSERT INTO agg_results_third (user_id, value_1_agg, value_2_agg) SELECT user_id, avg(array_length(events_table, 1)) AS event_average, count_pay FROM ( SELECT subquery_1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(count_pay, 0) AS count_pay FROM ( (SELECT users_table.user_id, 'action=>1'AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 10 AND events_table.event_type < 12 ) UNION (SELECT users_table.user_id, 'action=>2'AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 12 AND events_table.event_type < 14 ) ) AS subquery_1 LEFT JOIN (SELECT user_id, COUNT(*) AS count_pay FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 15 AND users_table.value_1 < 17 GROUP BY user_id HAVING COUNT(*) > 1) AS subquery_2 ON subquery_1.user_id > subquery_2.user_id GROUP BY subquery_1.user_id, count_pay) AS subquery_top WHERE array_ndims(events_table) > 0 GROUP BY count_pay, user_id ORDER BY count_pay; ------------------------------------ ------------------------------------ -- Most recently seen users_table events_table ------------------------------------ -- Note that we don't use ORDER BY/LIMIT yet ------------------------------------ ------------------------------------ -- not pushable since lateral join is not an equi join INSERT INTO agg_results_third (user_id, agg_time, value_2_agg) SELECT user_id, user_lastseen, array_length(event_array, 1) FROM ( SELECT user_id, max(u.time) as user_lastseen, array_agg(event_type ORDER BY u.time) AS event_array FROM ( SELECT user_id, time FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 10 AND users_table.value_1 < 12 ) u LEFT JOIN LATERAL ( SELECT event_type, time FROM events_table WHERE user_id != u.user_id AND events_table.event_type > 10 AND events_table.event_type < 12 ) t ON true GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC; -- not pushable since lateral join is not on the partition key INSERT INTO agg_results_third (user_id, agg_time, value_2_agg) SELECT user_id, user_lastseen, array_length(event_array, 1) FROM ( SELECT user_id, max(u.time) as user_lastseen, array_agg(event_type ORDER BY u.time) AS event_array FROM ( SELECT user_id, time FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 10 AND users_table.value_1 < 12 ) u LEFT JOIN LATERAL ( SELECT event_type, time FROM events_table WHERE event_type = u.user_id AND events_table.event_type > 10 AND events_table.event_type < 12 ) t ON true GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC; -- not pushable since lateral join is not on the partition key INSERT INTO agg_results_third (user_id, agg_time, value_2_agg) SELECT user_id, user_lastseen, array_length(event_array, 1) FROM ( SELECT user_id, max(u.time) as user_lastseen, array_agg(event_type ORDER BY u.time) AS event_array FROM ( SELECT user_id, time, value_3 as val_3 FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 10 AND users_table.value_1 < 12 ) u LEFT JOIN LATERAL ( SELECT event_type, time FROM events_table WHERE event_type = u.val_3 AND events_table.event_type > 10 AND events_table.event_type < 12 ) t ON true GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC; ------------------------------------ ------------------------------------ -- Count the number of distinct users_table who are in segment X and Y and Z ------------------------------------ ------------------------------------ -- not pushable since partition key is NOT IN INSERT INTO agg_results_third (user_id) SELECT DISTINCT user_id FROM users_table WHERE user_id NOT IN (SELECT user_id FROM users_table WHERE value_1 >= 10 AND value_1 <= 20) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 30 AND value_1 <= 40) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 50 AND value_1 <= 60); -- not pushable since partition key is not selected from the second subquery INSERT INTO agg_results_third (user_id) SELECT DISTINCT user_id FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 10 AND value_1 <= 20) AND user_id IN (SELECT value_1 FROM users_table WHERE value_1 >= 30 AND value_1 <= 40) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 50 AND value_1 <= 60); -- not pushable since second subquery does not return bare partition key INSERT INTO agg_results_third (user_id) SELECT DISTINCT user_id FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 10 AND value_1 <= 20) AND user_id IN (SELECT 3 * user_id FROM users_table WHERE value_1 >= 30 AND value_1 <= 40) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 50 AND value_1 <= 60); ------------------------------------ ------------------------------------ -- Find customers who have done X, and satisfy other customer specific criteria ------------------------------------ ------------------------------------ -- not pushable since join is not an euqi join INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 101 AND value_1 < 110 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type>101 AND event_type < 110 AND value_3 > 100 AND user_id!=users_table.user_id); -- not pushable since the join is not on the partition key INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 101 AND value_1 < 110 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type>101 AND event_type < 110 AND value_3 > 100 AND event_type = users_table.user_id); ------------------------------------ ------------------------------------ -- Customers who haven’t done X, and satisfy other customer specific criteria ------------------------------------ ------------------------------------ -- not pushable since the join is not an equi join INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 = 101 AND value_2 >= 5 AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type=101 AND value_3 > 100 AND user_id!=users_table.user_id); -- not pushable since the join is not the partition key INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 = 101 AND value_2 >= 5 AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type=101 AND value_3 > 100 AND event_type=users_table.user_id); ------------------------------------ ------------------------------------ -- Customers who have done X and Y, and satisfy other customer specific criteria ------------------------------------ ------------------------------------ -- not pushable since the second join is not on the partition key INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 100 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type!=100 AND value_3 > 100 AND user_id=users_table.user_id) AND EXISTS (SELECT user_id FROM events_table WHERE event_type=101 AND value_3 > 100 AND user_id!=users_table.user_id); ------------------------------------ ------------------------------------ -- Customers who have done X and haven’t done Y, and satisfy other customer specific criteria ------------------------------------ ------------------------------------ -- not pushable since the first join is not on the partition key INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type <= 300 AND value_3 > 100 AND user_id!=users_table.user_id) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 300 AND event_type <= 350 AND value_3 > 100 AND user_id=users_table.user_id); ------------------------------------ ------------------------------------ -- Customers who have done X more than 2 times, and satisfy other customer specific criteria ------------------------------------ ------------------------------------ -- not pushable since the second join is not an equi join INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 100 AND value_1 < 124 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type < 124 AND value_3 > 100 AND user_id != users_table.user_id GROUP BY user_id HAVING Count(*) > 2); -- not pushable since the second join is not on the partition key INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 100 AND value_1 < 124 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type < 124 AND value_3 > 100 AND event_type = users_table.user_id GROUP BY user_id HAVING Count(*) > 2); -- not pushable since the second join is not on the partition key INSERT INTO agg_results_third(user_id, value_2_agg) SELECT user_id, value_2 FROM users_table WHERE value_1 > 100 AND value_1 < 124 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type < 124 AND value_3 > 100 AND user_id = users_table.value_1 GROUP BY user_id HAVING Count(*) > 2); ------------------------------------ ------------------------------------ -- Find me all users_table who has done some event and has filters ------------------------------------ ------------------------------------ -- not pushable due to NOT IN INSERT INTO agg_results_third(user_id) Select user_id From events_table Where event_type = 16 And value_2 > 50 And user_id NOT in (select user_id From users_table Where value_1 = 15 And value_2 > 25); -- not pushable since we're not selecting the partition key INSERT INTO agg_results_third(user_id) Select user_id From events_table Where event_type = 16 And value_2 > 50 And user_id in (select value_3 From users_table Where value_1 = 15 And value_2 > 25); -- not pushable since we're not selecting the partition key -- from the events table INSERT INTO agg_results_third(user_id) Select user_id From events_table Where event_type = 16 And value_2 > 50 And event_type in (select user_id From users_table Where value_1 = 15 And value_2 > 25); ------------------------------------ ------------------------------------ -- Which events_table did people who has done some specific events_table ------------------------------------ ------------------------------------ -- not pushable due to NOT IN INSERT INTO agg_results_third(user_id, value_1_agg) SELECT user_id, event_type FROM events_table WHERE user_id NOT IN (SELECT user_id from events_table WHERE event_type > 500 and event_type < 505) GROUP BY user_id, event_type; -- not pushable due to not selecting the partition key INSERT INTO agg_results_third(user_id, value_1_agg) SELECT user_id, event_type FROM events_table WHERE user_id IN (SELECT value_2 from events_table WHERE event_type > 500 and event_type < 505) GROUP BY user_id, event_type; -- not pushable due to not comparing user id from the events table INSERT INTO agg_results_third(user_id, value_1_agg) SELECT user_id, event_type FROM events_table WHERE event_type IN (SELECT user_id from events_table WHERE event_type > 500 and event_type < 505) GROUP BY user_id, event_type; ------------------------------------ ------------------------------------ -- Find my assets that have the highest probability and fetch their metadata ------------------------------------ ------------------------------------ -- not pushable since the join is not an equi join INSERT INTO agg_results_third(user_id, value_1_agg, value_3_agg) SELECT users_table.user_id, users_table.value_1, prob FROM users_table JOIN (SELECT ma.user_id, (GREATEST(coalesce(ma.value_4 / 250, 0.0) + GREATEST(1.0))) / 2 AS prob FROM users_table AS ma, events_table as short_list WHERE short_list.user_id != ma.user_id and ma.value_1 < 50 and short_list.event_type < 50 ) temp ON users_table.user_id = temp.user_id WHERE users_table.value_1 < 50; -- not pushable since the join is not on the partition key INSERT INTO agg_results_third(user_id, value_1_agg, value_3_agg) SELECT users_table.user_id, users_table.value_1, prob FROM users_table JOIN (SELECT ma.user_id, (GREATEST(coalesce(ma.value_4 / 250, 0.0) + GREATEST(1.0))) / 2 AS prob FROM users_table AS ma, events_table as short_list WHERE short_list.user_id = ma.value_2 and ma.value_1 < 50 and short_list.event_type < 50 ) temp ON users_table.user_id = temp.user_id WHERE users_table.value_1 < 50; -- not supported since one of the queries doesn't have a relation INSERT INTO agg_results (user_id, agg_time, value_2_agg) SELECT user_id, user_lastseen, array_length(event_array, 1) FROM ( SELECT user_id, max(u.time) as user_lastseen, array_agg(event_type ORDER BY u.time) AS event_array FROM ( SELECT user_id, time, value_3 as val_3 FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 10 AND users_table.value_1 < 12 ) u LEFT JOIN LATERAL ( SELECT event_type, time FROM events_table, (SELECT 1 as x) as f WHERE user_id = u.user_id AND events_table.event_type > 10 AND events_table.event_type < 12 ) t ON true GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC; citus-7.0.3/src/test/regress/sql/multi_join_order_additional.sql000066400000000000000000000110231317107136600251460ustar00rootroot00000000000000-- -- MULTI_JOIN_ORDER_ADDITIONAL -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 650000; -- Set configuration to print table join order and pruned shards SET citus.explain_distributed_queries TO off; SET citus.log_multi_join_order TO TRUE; SET citus.task_executor_type = 'task-tracker'; -- can't explain all queries otherwise SET client_min_messages TO DEBUG2; -- Create new table definitions for use in testing in distributed planning and -- execution functionality. Also create indexes to boost performance. CREATE TABLE lineitem_hash ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); SELECT master_create_distributed_table('lineitem_hash', 'l_orderkey', 'hash'); SELECT master_create_worker_shards('lineitem_hash', 2, 1); CREATE INDEX lineitem_hash_time_index ON lineitem_hash (l_shipdate); CREATE TABLE orders_hash ( o_orderkey bigint not null, o_custkey integer not null, o_orderstatus char(1) not null, o_totalprice decimal(15,2) not null, o_orderdate date not null, o_orderpriority char(15) not null, o_clerk char(15) not null, o_shippriority integer not null, o_comment varchar(79) not null, PRIMARY KEY(o_orderkey) ); SELECT master_create_distributed_table('orders_hash', 'o_orderkey', 'hash'); SELECT master_create_worker_shards('orders_hash', 2, 1); CREATE TABLE customer_hash ( c_custkey integer not null, c_name varchar(25) not null, c_address varchar(40) not null, c_nationkey integer not null, c_phone char(15) not null, c_acctbal decimal(15,2) not null, c_mktsegment char(10) not null, c_comment varchar(117) not null); SELECT master_create_distributed_table('customer_hash', 'c_custkey', 'hash'); SELECT master_create_worker_shards('customer_hash', 2, 1); -- The following query checks that we can correctly handle self-joins EXPLAIN SELECT l1.l_quantity FROM lineitem l1, lineitem l2 WHERE l1.l_orderkey = l2.l_orderkey AND l1.l_quantity > 5; -- Update configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; SET client_min_messages TO LOG; -- The following queries check that we correctly handle joins and OR clauses. In -- particular, these queries check that we factorize out OR clauses if possible, -- and that we default to a cartesian product otherwise. EXPLAIN SELECT count(*) FROM lineitem, orders WHERE (l_orderkey = o_orderkey AND l_quantity > 5) OR (l_orderkey = o_orderkey AND l_quantity < 10); EXPLAIN SELECT l_quantity FROM lineitem, orders WHERE (l_orderkey = o_orderkey OR l_quantity > 5); -- The below queries modify the partition method in pg_dist_partition. We thus -- begin a transaction here so the changes don't impact any other parallel -- running tests. BEGIN; -- Validate that we take into account the partition method when building the -- join-order plan. EXPLAIN SELECT count(*) FROM orders, lineitem_hash WHERE o_orderkey = l_orderkey; -- Verify we handle local joins between two hash-partitioned tables. EXPLAIN SELECT count(*) FROM orders_hash, lineitem_hash WHERE o_orderkey = l_orderkey; -- Validate that we can handle broadcast joins with hash-partitioned tables. EXPLAIN SELECT count(*) FROM customer_hash, nation WHERE c_nationkey = n_nationkey; -- Update the large table shard count for all the following tests. SET citus.large_table_shard_count TO 1; -- Validate that we don't use a single-partition join method for a hash -- re-partitioned table, thus preventing a partition of just the customer table. EXPLAIN SELECT count(*) FROM orders, lineitem, customer WHERE o_custkey = l_partkey AND o_custkey = c_nationkey; -- Validate that we don't chose a single-partition join method with a -- hash-partitioned base table EXPLAIN SELECT count(*) FROM orders, customer_hash WHERE c_custkey = o_custkey; -- Validate that we can re-partition a hash partitioned table to join with a -- range partitioned one. EXPLAIN SELECT count(*) FROM orders_hash, customer WHERE c_custkey = o_custkey; COMMIT; -- Reset client logging level to its previous value SET client_min_messages TO NOTICE; DROP TABLE lineitem_hash; DROP TABLE orders_hash; DROP TABLE customer_hash; citus-7.0.3/src/test/regress/sql/multi_join_order_tpch_large.sql000066400000000000000000000061511317107136600251540ustar00rootroot00000000000000-- -- MULTI_JOIN_ORDER_TPCH_LARGE -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 660000; -- Enable configuration to print table join order SET citus.explain_distributed_queries TO off; SET citus.log_multi_join_order TO TRUE; SET citus.task_executor_type = 'task-tracker'; -- can't explain all queries otherwise SET client_min_messages TO LOG; -- Change configuration to treat lineitem, orders, customer, and part tables as -- large. The following queries are basically the same as the ones in tpch_small -- except that more data has been loaded into customer and part tables. Therefore, -- we will apply different distributed join strategies for these queries. SET citus.large_table_shard_count TO 2; -- Query #6 from the TPC-H decision support benchmark EXPLAIN SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem WHERE l_shipdate >= date '1994-01-01' and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; -- Query #3 from the TPC-H decision support benchmark EXPLAIN SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority FROM customer, orders, lineitem WHERE c_mktsegment = 'BUILDING' AND c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate < date '1995-03-15' AND l_shipdate > date '1995-03-15' GROUP BY l_orderkey, o_orderdate, o_shippriority ORDER BY revenue DESC, o_orderdate; -- Query #10 from the TPC-H decision support benchmark EXPLAIN SELECT c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment FROM customer, orders, lineitem, nation WHERE c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate >= date '1993-10-01' AND o_orderdate < date '1993-10-01' + interval '3' month AND l_returnflag = 'R' AND c_nationkey = n_nationkey GROUP BY c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment ORDER BY revenue DESC; -- Query #19 from the TPC-H decision support benchmark (modified) EXPLAIN SELECT sum(l_extendedprice* (1 - l_discount)) as revenue FROM lineitem, part WHERE ( p_partkey = l_partkey AND (p_brand = 'Brand#12' OR p_brand= 'Brand#14' OR p_brand='Brand#15') AND l_quantity >= 10 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#23' OR p_brand='Brand#24') AND l_quantity >= 20 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#33' OR p_brand = 'Brand#34' OR p_brand = 'Brand#35') AND l_quantity >= 1 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ); -- Query to test multiple re-partition jobs in a single query EXPLAIN SELECT l_partkey, count(*) FROM lineitem, part, orders, customer WHERE l_orderkey = o_orderkey AND l_partkey = p_partkey AND c_custkey = o_custkey GROUP BY l_partkey; -- Reset client logging level to its previous value SET client_min_messages TO NOTICE; citus-7.0.3/src/test/regress/sql/multi_join_order_tpch_small.sql000066400000000000000000000047371317107136600252020ustar00rootroot00000000000000-- -- MULTI_JOIN_ORDER_TPCH_SMALL -- -- Enable configuration to print table join order SET citus.explain_distributed_queries TO off; SET citus.log_multi_join_order TO TRUE; SET client_min_messages TO LOG; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #6 from the TPC-H decision support benchmark EXPLAIN SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem WHERE l_shipdate >= date '1994-01-01' and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; -- Query #3 from the TPC-H decision support benchmark EXPLAIN SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority FROM customer, orders, lineitem WHERE c_mktsegment = 'BUILDING' AND c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate < date '1995-03-15' AND l_shipdate > date '1995-03-15' GROUP BY l_orderkey, o_orderdate, o_shippriority ORDER BY revenue DESC, o_orderdate; -- Query #10 from the TPC-H decision support benchmark EXPLAIN SELECT c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment FROM customer, orders, lineitem, nation WHERE c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate >= date '1993-10-01' AND o_orderdate < date '1993-10-01' + interval '3' month AND l_returnflag = 'R' AND c_nationkey = n_nationkey GROUP BY c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment ORDER BY revenue DESC; -- Query #19 from the TPC-H decision support benchmark (modified) EXPLAIN SELECT sum(l_extendedprice* (1 - l_discount)) as revenue FROM lineitem, part WHERE ( p_partkey = l_partkey AND (p_brand = 'Brand#12' OR p_brand= 'Brand#14' OR p_brand='Brand#15') AND l_quantity >= 10 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#23' OR p_brand='Brand#24') AND l_quantity >= 20 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#33' OR p_brand = 'Brand#34' OR p_brand = 'Brand#35') AND l_quantity >= 1 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ); -- Reset client logging level to its previous value SET client_min_messages TO NOTICE; citus-7.0.3/src/test/regress/sql/multi_join_pruning.sql000066400000000000000000000045031317107136600233320ustar00rootroot00000000000000-- -- MULTI_JOIN_PRUNING -- -- Check that join-pruning works for joins between two large relations. For now -- we only check for join-pruning between locally partitioned relations. In the -- future we want to check for pruning between re-partitioned relations as well. SET citus.explain_distributed_queries TO off; SET client_min_messages TO DEBUG2; -- Change configuration to treat all tables as large SET citus.large_table_shard_count TO 2; SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey; SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_orderkey > 9030; -- Shards for the lineitem table have been pruned away. Check that join pruning -- works as expected in this case. SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_orderkey > 20000; -- Partition pruning left three shards for the lineitem and one shard for the -- orders table. These shard sets don't overlap, so join pruning should prune -- out all the shards, and leave us with an empty task list. SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_orderkey > 6000 AND o_orderkey < 6000; -- Make sure that we can handle filters without a column SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND false; SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem INNER JOIN orders ON (l_orderkey = o_orderkey) WHERE false; -- These tests check that we can do join pruning for tables partitioned over -- different type of columns including varchar, array types, composite types -- etc. This is in response to a bug we had where we were not able to resolve -- correct operator types for some kind of column types. EXPLAIN SELECT count(*) FROM array_partitioned_table table1, array_partitioned_table table2 WHERE table1.array_column = table2.array_column; EXPLAIN SELECT count(*) FROM composite_partitioned_table table1, composite_partitioned_table table2 WHERE table1.composite_column = table2.composite_column; -- Test that large table joins on partition varchar columns work EXPLAIN SELECT count(*) FROM varchar_partitioned_table table1, varchar_partitioned_table table2 WHERE table1.varchar_column = table2.varchar_column; citus-7.0.3/src/test/regress/sql/multi_large_table_join_planning.sql000066400000000000000000000036041317107136600260000ustar00rootroot00000000000000-- -- MULTI_LARGE_TABLE_PLANNING -- -- Tests that cover large table join planning. Note that we explicitly start a -- transaction block here so that we don't emit debug messages with changing -- transaction ids in them. Also, we set the executor type to task tracker -- executor here, as we cannot run repartition jobs with real time executor. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 690000; SET citus.enable_unique_job_ids TO off; -- print whether we're using version > 9 to make version-specific tests clear SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine; BEGIN; SET client_min_messages TO DEBUG4; SET citus.large_table_shard_count TO 2; SET citus.task_executor_type TO 'task-tracker'; -- Debug4 log messages display jobIds within them. We explicitly set the jobId -- sequence here so that the regression output becomes independent of the number -- of jobs executed prior to running this test. -- Multi-level repartition join to verify our projection columns are correctly -- referenced and propagated across multiple repartition jobs. The test also -- validates that only the minimal necessary projection columns are transferred -- between jobs. SELECT l_partkey, o_orderkey, count(*) FROM lineitem, part, orders, customer WHERE l_orderkey = o_orderkey AND l_partkey = p_partkey AND c_custkey = o_custkey AND (l_quantity > 5.0 OR l_extendedprice > 1200.0) AND p_size > 8 AND o_totalprice > 10.0 AND c_acctbal < 5000.0 AND l_partkey < 1000 GROUP BY l_partkey, o_orderkey ORDER BY l_partkey, o_orderkey; SELECT l_partkey, o_orderkey, count(*) FROM lineitem, orders WHERE l_suppkey = o_shippriority AND l_quantity < 5.0 AND o_totalprice <> 4.0 GROUP BY l_partkey, o_orderkey ORDER BY l_partkey, o_orderkey; -- Reset client logging level to its previous value SET client_min_messages TO NOTICE; COMMIT; citus-7.0.3/src/test/regress/sql/multi_large_table_pruning.sql000066400000000000000000000051301317107136600246310ustar00rootroot00000000000000-- -- MULTI_LARGE_TABLE_PRUNING -- -- Tests covering partition and join-pruning for large table joins. Note that we -- set executor type to task tracker executor here, as we cannot run repartition -- jobs with real time executor. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 700000; SET citus.large_table_shard_count TO 2; SET client_min_messages TO DEBUG2; SET citus.task_executor_type TO 'task-tracker'; -- Single range-repartition join to test join-pruning behaviour. EXPLAIN (COSTS OFF) SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey; SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey; -- Single range-repartition join with a selection clause on the partitioned -- table to test the case when all map tasks are pruned away. EXPLAIN (COSTS OFF) SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey AND o_orderkey < 0; SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey AND o_orderkey < 0; -- Single range-repartition join with a selection clause on the base table to -- test the case when all sql tasks are pruned away. EXPLAIN (COSTS OFF) SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey AND c_custkey < 0; SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey AND c_custkey < 0; -- Dual hash-repartition join test case. Note that this query doesn't produce -- meaningful results and is only to test hash-partitioning of two large tables -- on non-partition columns. EXPLAIN (COSTS OFF) SELECT count(*) FROM lineitem, customer WHERE l_partkey = c_nationkey; SELECT count(*) FROM lineitem, customer WHERE l_partkey = c_nationkey; -- Dual hash-repartition join with a selection clause on one of the tables to -- test the case when all map tasks are pruned away. EXPLAIN (COSTS OFF) SELECT count(*) FROM lineitem, customer WHERE l_partkey = c_nationkey AND l_orderkey < 0; SELECT count(*) FROM lineitem, customer WHERE l_partkey = c_nationkey AND l_orderkey < 0; -- Test cases with false in the WHERE clause EXPLAIN (COSTS OFF) SELECT o_orderkey FROM orders INNER JOIN customer ON (o_custkey = c_custkey) WHERE false; -- execute once, to verify that's handled SELECT o_orderkey FROM orders INNER JOIN customer ON (o_custkey = c_custkey) WHERE false; EXPLAIN (COSTS OFF) SELECT o_orderkey FROM orders INNER JOIN customer ON (o_custkey = c_custkey) WHERE 1=0 AND c_custkey < 0; EXPLAIN (COSTS OFF) SELECT o_orderkey FROM orders INNER JOIN customer ON (o_custkey = c_custkey AND false); EXPLAIN (COSTS OFF) SELECT o_orderkey FROM orders, customer WHERE o_custkey = c_custkey AND false; citus-7.0.3/src/test/regress/sql/multi_large_table_task_assignment.sql000066400000000000000000000033201317107136600263400ustar00rootroot00000000000000-- -- MULTI_LARGE_TABLE_TASK_ASSIGNMENT -- -- Tests which cover task assignment for MapMerge jobs for single range repartition -- and dual hash repartition joins. The tests also cover task assignment propagation -- from a sql task to its depended tasks. Note that we set the executor type to task -- tracker executor here, as we cannot run repartition jobs with real time executor. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 710000; -- print whether we're using version > 9 to make version-specific tests clear SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine; BEGIN; SET client_min_messages TO DEBUG3; SET citus.large_table_shard_count TO 2; SET citus.task_executor_type TO 'task-tracker'; -- Single range repartition join to test anchor-shard based task assignment and -- assignment propagation to merge and data-fetch tasks. SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey; -- Single range repartition join, along with a join with a small table containing -- more than one shard. This situation results in multiple sql tasks depending on -- the same merge task, and tests our constraint group creation and assignment -- propagation. Here 'orders' is considered the small table. SET citus.large_table_shard_count TO 3; SELECT count(*) FROM orders, customer, lineitem WHERE o_custkey = c_custkey AND o_orderkey = l_orderkey; SET citus.large_table_shard_count TO 2; -- Dual hash repartition join which tests the separate hash repartition join -- task assignment algorithm. SELECT count(*) FROM lineitem, customer WHERE l_partkey = c_nationkey; -- Reset client logging level to its previous value SET client_min_messages TO NOTICE; COMMIT; citus-7.0.3/src/test/regress/sql/multi_limit_clause.sql000066400000000000000000000043661317107136600233120ustar00rootroot00000000000000-- -- MULTI_LIMIT_CLAUSE -- -- Display debug messages on limit clause push down. SET client_min_messages TO DEBUG1; -- Check that we can correctly handle the Limit clause in distributed queries. -- Note that we don't have the limit optimization enabled for these queries, and -- will end up fetching all rows to the master database. SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity ASC; SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity DESC, l_quantity DESC; SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity ASC LIMIT 5; SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity ASC LIMIT 10; SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 32.0 GROUP BY l_quantity ORDER BY count_quantity DESC, l_quantity DESC LIMIT 10; -- Check that we can handle limits for simple sort clauses. We order by columns -- in the first two tests, and then by a simple expression in the last test. SELECT min(l_orderkey) FROM lineitem; SELECT l_orderkey FROM lineitem ORDER BY l_orderkey ASC LIMIT 1; SELECT max(l_orderkey) FROM lineitem; SELECT l_orderkey FROM lineitem ORDER BY l_orderkey DESC LIMIT 1; SELECT * FROM lineitem ORDER BY l_orderkey DESC, l_linenumber DESC LIMIT 3; SELECT max(extract(epoch from l_shipdate)) FROM lineitem; SELECT * FROM lineitem ORDER BY extract(epoch from l_shipdate) DESC, l_orderkey DESC LIMIT 3; -- Exercise the scenario where order by clauses don't have any aggregates, and -- that we can push down the limit as a result. Check that when this happens, we -- also sort on all group by clauses behind the covers. SELECT l_quantity, l_discount, avg(l_partkey) FROM lineitem GROUP BY l_quantity, l_discount ORDER BY l_quantity LIMIT 1; -- Results from the previous query should match this query's results. SELECT l_quantity, l_discount, avg(l_partkey) FROM lineitem GROUP BY l_quantity, l_discount ORDER BY l_quantity, l_discount LIMIT 1; SET client_min_messages TO NOTICE; citus-7.0.3/src/test/regress/sql/multi_limit_clause_approximate.sql000066400000000000000000000047331317107136600257210ustar00rootroot00000000000000-- -- MULTI_LIMIT_CLAUSE_APPROXIMATE -- -- Display debug messages on limit clause push down. SET client_min_messages TO DEBUG1; -- We first look at results with limit optimization disabled. This first query -- has a group and an order by. The order by clause is a commutative aggregate -- function. SELECT l_partkey, sum(l_partkey * (1 + l_suppkey)) AS aggregate FROM lineitem GROUP BY l_partkey ORDER BY aggregate DESC LIMIT 10; -- Enable limit optimization to fetch one third of each shard's data SET citus.limit_clause_row_fetch_count TO 600; SELECT l_partkey, sum(l_partkey * (1 + l_suppkey)) AS aggregate FROM lineitem GROUP BY l_partkey ORDER BY aggregate DESC LIMIT 10; -- Disable limit optimization for our second test. This time, we have a query -- that joins several tables, and that groups and orders the results. RESET citus.limit_clause_row_fetch_count; SELECT c_custkey, c_name, count(*) as lineitem_count FROM customer, orders, lineitem WHERE c_custkey = o_custkey AND l_orderkey = o_orderkey GROUP BY c_custkey, c_name ORDER BY lineitem_count DESC, c_custkey LIMIT 10; -- Now, enable limit optimization to fetch half of each task's results. For this -- test, we also change a config setting to ensure that we don't repartition any -- of the tables during the query. SET citus.limit_clause_row_fetch_count TO 150; SET citus.large_table_shard_count TO 2; SELECT c_custkey, c_name, count(*) as lineitem_count FROM customer, orders, lineitem WHERE c_custkey = o_custkey AND l_orderkey = o_orderkey GROUP BY c_custkey, c_name ORDER BY lineitem_count DESC, c_custkey LIMIT 10; RESET citus.large_table_shard_count; -- We now test scenarios where applying the limit optimization wouldn't produce -- meaningful results. First, we check that we don't push down the limit clause -- for non-commutative aggregates. SELECT l_partkey, avg(l_suppkey) AS average FROM lineitem GROUP BY l_partkey ORDER BY average DESC, l_partkey LIMIT 10; -- Next, check that we don't apply the limit optimization for expressions that -- have aggregates within them SELECT l_partkey, round(sum(l_suppkey)) AS complex_expression FROM lineitem GROUP BY l_partkey ORDER BY complex_expression DESC LIMIT 10; -- Check that query execution works as expected for other queries without limits SELECT count(*) count_quantity, l_quantity FROM lineitem WHERE l_quantity < 10.0 GROUP BY l_quantity ORDER BY count_quantity ASC, l_quantity ASC; RESET citus.limit_clause_row_fetch_count; RESET client_min_messages; citus-7.0.3/src/test/regress/sql/multi_master_protocol.sql000066400000000000000000000006761317107136600240540ustar00rootroot00000000000000-- -- MULTI_MASTER_PROTOCOL -- -- Tests that check the metadata returned by the master node. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 740000; SELECT part_storage_type, part_key, part_replica_count, part_max_size, part_placement_policy FROM master_get_table_metadata('lineitem'); SELECT * FROM master_get_table_ddl_events('lineitem'); SELECT * FROM master_get_new_shardid(); SELECT * FROM master_get_active_worker_nodes(); citus-7.0.3/src/test/regress/sql/multi_metadata_access.sql000066400000000000000000000012271317107136600237320ustar00rootroot00000000000000-- -- MULTI_METADATA_ACCESS -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1360000; CREATE USER no_access; SET ROLE no_access; -- list relations in the citus extension without sufficient privileges SELECT pg_class.oid::regclass FROM pg_class JOIN pg_namespace nsp ON (pg_class.relnamespace = nsp.oid) JOIN pg_depend dep ON(objid = pg_class.oid) JOIN pg_extension ext ON (ext.oid = dep.refobjid) WHERE refclassid = 'pg_extension'::regclass AND classid ='pg_class'::regclass AND ext.extname = 'citus' AND nsp.nspname = 'pg_catalog' AND NOT has_table_privilege(pg_class.oid, 'select'); RESET role; DROP USER no_access; citus-7.0.3/src/test/regress/sql/multi_metadata_sync.sql000066400000000000000000000600311317107136600234430ustar00rootroot00000000000000-- -- MULTI_METADATA_SYNC -- -- Tests for metadata snapshot functions, metadata syncing functions and propagation of -- metadata changes to MX tables. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1310000; SELECT nextval('pg_catalog.pg_dist_placement_placementid_seq') AS last_placement_id \gset ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 100000; SELECT nextval('pg_catalog.pg_dist_groupid_seq') AS last_group_id \gset SELECT nextval('pg_catalog.pg_dist_node_nodeid_seq') AS last_node_id \gset -- Create the necessary test utility function CREATE FUNCTION master_metadata_snapshot() RETURNS text[] LANGUAGE C STRICT AS 'citus'; COMMENT ON FUNCTION master_metadata_snapshot() IS 'commands to create the metadata snapshot'; -- Show that none of the existing tables are qualified to be MX tables SELECT * FROM pg_dist_partition WHERE partmethod='h' AND repmodel='s'; -- Show that, with no MX tables, metadata snapshot contains only the delete commands, -- pg_dist_node entries and reference tables SELECT unnest(master_metadata_snapshot()); -- Create a test table with constraints and SERIAL CREATE TABLE mx_test_table (col_1 int UNIQUE, col_2 text NOT NULL, col_3 BIGSERIAL); SELECT master_create_distributed_table('mx_test_table', 'col_1', 'hash'); SELECT master_create_worker_shards('mx_test_table', 8, 1); -- Set the replication model of the test table to streaming replication so that it is -- considered as an MX table UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='mx_test_table'::regclass; -- Show that the created MX table is included in the metadata snapshot SELECT unnest(master_metadata_snapshot()); -- Show that CREATE INDEX commands are included in the metadata snapshot CREATE INDEX mx_index ON mx_test_table(col_2); SELECT unnest(master_metadata_snapshot()); -- Show that schema changes are included in the metadata snapshot CREATE SCHEMA mx_testing_schema; ALTER TABLE mx_test_table SET SCHEMA mx_testing_schema; SELECT unnest(master_metadata_snapshot()); -- Show that append distributed tables are not included in the metadata snapshot CREATE TABLE non_mx_test_table (col_1 int, col_2 text); SELECT master_create_distributed_table('non_mx_test_table', 'col_1', 'append'); UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='non_mx_test_table'::regclass; SELECT unnest(master_metadata_snapshot()); -- Show that range distributed tables are not included in the metadata snapshot UPDATE pg_dist_partition SET partmethod='r' WHERE logicalrelid='non_mx_test_table'::regclass; SELECT unnest(master_metadata_snapshot()); -- Test start_metadata_sync_to_node UDF -- Ensure that hasmetadata=false for all nodes SELECT count(*) FROM pg_dist_node WHERE hasmetadata=true; -- Ensure it works when run on a secondary node SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset SELECT master_add_node('localhost', 8888, groupid => :worker_1_group, noderole => 'secondary'); SELECT start_metadata_sync_to_node('localhost', 8888); SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888; SELECT stop_metadata_sync_to_node('localhost', 8888); SELECT hasmetadata FROM pg_dist_node WHERE nodeport = 8888; -- Add a node to another cluster to make sure it's also synced SELECT master_add_secondary_node('localhost', 8889, 'localhost', :worker_1_port, nodecluster => 'second-cluster'); -- Run start_metadata_sync_to_node and check that it marked hasmetadata for that worker SELECT start_metadata_sync_to_node('localhost', :worker_1_port); SELECT nodeid, hasmetadata FROM pg_dist_node WHERE nodename='localhost' AND nodeport=:worker_1_port; -- Check that the metadata has been copied to the worker \c - - - :worker_1_port SELECT * FROM pg_dist_local_group; SELECT * FROM pg_dist_node ORDER BY nodeid; SELECT * FROM pg_dist_partition ORDER BY logicalrelid; SELECT * FROM pg_dist_shard ORDER BY shardid; SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass; \d mx_testing_schema.mx_test_table_col_1_key \d mx_testing_schema.mx_index -- Check that pg_dist_colocation is not synced SELECT * FROM pg_dist_colocation ORDER BY colocationid; -- Make sure that truncate trigger has been set for the MX table on worker SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass; -- Make sure that start_metadata_sync_to_node considers foreign key constraints \c - - - :master_port -- Since we're superuser, we can set the replication model to 'streaming' to -- create some MX tables SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE SCHEMA mx_testing_schema_2; CREATE TABLE mx_testing_schema.fk_test_1 (col1 int, col2 text, col3 int, UNIQUE(col1, col3)); CREATE TABLE mx_testing_schema_2.fk_test_2 (col1 int, col2 int, col3 text, FOREIGN KEY (col1, col2) REFERENCES mx_testing_schema.fk_test_1 (col1, col3)); SELECT create_distributed_table('mx_testing_schema.fk_test_1', 'col1'); SELECT create_distributed_table('mx_testing_schema_2.fk_test_2', 'col1'); SELECT start_metadata_sync_to_node('localhost', :worker_1_port); -- Check that foreign key metadata exists on the worker \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_testing_schema_2.fk_test_2'::regclass; \c - - - :master_port DROP TABLE mx_testing_schema_2.fk_test_2; DROP TABLE mx_testing_schema.fk_test_1; RESET citus.shard_replication_factor; RESET citus.replication_model; -- Check that repeated calls to start_metadata_sync_to_node has no side effects \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_1_port); SELECT start_metadata_sync_to_node('localhost', :worker_1_port); \c - - - :worker_1_port SELECT * FROM pg_dist_local_group; SELECT * FROM pg_dist_node ORDER BY nodeid; SELECT * FROM pg_dist_partition ORDER BY logicalrelid; SELECT * FROM pg_dist_shard ORDER BY shardid; SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_testing_schema.mx_test_table'::regclass; \d mx_testing_schema.mx_test_table_col_1_key \d mx_testing_schema.mx_index SELECT count(*) FROM pg_trigger WHERE tgrelid='mx_testing_schema.mx_test_table'::regclass; -- Make sure that start_metadata_sync_to_node cannot be called inside a transaction \c - - - :master_port BEGIN; SELECT start_metadata_sync_to_node('localhost', :worker_2_port); ROLLBACK; SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; -- Check that the distributed table can be queried from the worker \c - - - :master_port SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); CREATE TABLE mx_query_test (a int, b text, c int); SELECT create_distributed_table('mx_query_test', 'a'); SELECT repmodel FROM pg_dist_partition WHERE logicalrelid='mx_query_test'::regclass; INSERT INTO mx_query_test VALUES (1, 'one', 1); INSERT INTO mx_query_test VALUES (2, 'two', 4); INSERT INTO mx_query_test VALUES (3, 'three', 9); INSERT INTO mx_query_test VALUES (4, 'four', 16); INSERT INTO mx_query_test VALUES (5, 'five', 24); \c - - - :worker_1_port SELECT * FROM mx_query_test ORDER BY a; INSERT INTO mx_query_test VALUES (6, 'six', 36); UPDATE mx_query_test SET c = 25 WHERE a = 5; \c - - - :master_port SELECT * FROM mx_query_test ORDER BY a; \c - - - :master_port DROP TABLE mx_query_test; -- Check that stop_metadata_sync_to_node function sets hasmetadata of the node to false \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_1_port); SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_1_port; -- Test DDL propagation in MX tables SELECT start_metadata_sync_to_node('localhost', :worker_1_port); SET citus.shard_count = 5; SET citus.multi_shard_commit_protocol TO '2pc'; CREATE SCHEMA mx_test_schema_1; CREATE SCHEMA mx_test_schema_2; -- Create MX tables SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE TABLE mx_test_schema_1.mx_table_1 (col1 int UNIQUE, col2 text); CREATE INDEX mx_index_1 ON mx_test_schema_1.mx_table_1 (col1); CREATE TABLE mx_test_schema_2.mx_table_2 (col1 int, col2 text); CREATE INDEX mx_index_2 ON mx_test_schema_2.mx_table_2 (col2); ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY(col1) REFERENCES mx_test_schema_1.mx_table_1(col1); SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass; \d mx_test_schema_1.mx_table_1_col1_key \d mx_test_schema_1.mx_index_1 SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_2.mx_table_2'::regclass; \d mx_test_schema_2.mx_index_2 SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_2.mx_table_2'::regclass; SELECT create_distributed_table('mx_test_schema_1.mx_table_1', 'col1'); SELECT create_distributed_table('mx_test_schema_2.mx_table_2', 'col1'); -- Check that created tables are marked as streaming replicated tables SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass ORDER BY logicalrelid; -- See the shards and placements of the mx tables SELECT logicalrelid, shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass ORDER BY logicalrelid, shardid; -- Check that metadata of MX tables exist on the metadata worker \c - - - :worker_1_port -- Check that tables are created \dt mx_test_schema_?.mx_table_? -- Check that table metadata are created SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass; -- Check that shard and placement data are created SELECT logicalrelid, shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'mx_test_schema_1.mx_table_1'::regclass OR logicalrelid = 'mx_test_schema_2.mx_table_2'::regclass ORDER BY logicalrelid, shardid; -- Check that metadata of MX tables don't exist on the non-metadata worker \c - - - :worker_2_port \d mx_test_schema_1.mx_table_1 \d mx_test_schema_2.mx_table_2 SELECT * FROM pg_dist_partition; SELECT * FROM pg_dist_shard; SELECT * FROM pg_dist_shard_placement ORDER BY shardid, nodename, nodeport; -- Check that CREATE INDEX statement is propagated \c - - - :master_port SET citus.multi_shard_commit_protocol TO '2pc'; SET client_min_messages TO 'ERROR'; CREATE INDEX mx_index_3 ON mx_test_schema_2.mx_table_2 USING hash (col1); ALTER TABLE mx_test_schema_2.mx_table_2 ADD CONSTRAINT mx_table_2_col1_key UNIQUE (col1); \c - - - :worker_1_port \d mx_test_schema_2.mx_index_3 \d mx_test_schema_2.mx_table_2_col1_key -- Check that DROP INDEX statement is propagated \c - - - :master_port SET citus.multi_shard_commit_protocol TO '2pc'; DROP INDEX mx_test_schema_2.mx_index_3; \c - - - :worker_1_port \d mx_test_schema_2.mx_index_3 -- Check that ALTER TABLE statements are propagated \c - - - :master_port SET citus.multi_shard_commit_protocol TO '2pc'; ALTER TABLE mx_test_schema_1.mx_table_1 ADD COLUMN col3 NUMERIC; ALTER TABLE mx_test_schema_1.mx_table_1 ALTER COLUMN col3 SET DATA TYPE INT; ALTER TABLE mx_test_schema_1.mx_table_1 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1); \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_test_schema_1.mx_table_1'::regclass; SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass; -- Check that foreign key constraint with NOT VALID works as well \c - - - :master_port SET citus.multi_shard_commit_protocol TO '2pc'; ALTER TABLE mx_test_schema_1.mx_table_1 DROP CONSTRAINT mx_fk_constraint; ALTER TABLE mx_test_schema_1.mx_table_1 ADD CONSTRAINT mx_fk_constraint_2 FOREIGN KEY (col1) REFERENCES mx_test_schema_2.mx_table_2(col1) NOT VALID; \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_fkeys WHERE relid='mx_test_schema_1.mx_table_1'::regclass; -- Check that mark_tables_colocated call propagates the changes to the workers \c - - - :master_port SELECT nextval('pg_catalog.pg_dist_colocationid_seq') AS last_colocation_id \gset ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 10000; SET citus.shard_count TO 7; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE TABLE mx_colocation_test_1 (a int); SELECT create_distributed_table('mx_colocation_test_1', 'a'); CREATE TABLE mx_colocation_test_2 (a int); SELECT create_distributed_table('mx_colocation_test_2', 'a'); -- Check the colocation IDs of the created tables SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid = 'mx_colocation_test_1'::regclass OR logicalrelid = 'mx_colocation_test_2'::regclass ORDER BY logicalrelid; -- Reset the colocation IDs of the test tables DELETE FROM pg_dist_colocation WHERE EXISTS ( SELECT 1 FROM pg_dist_partition WHERE colocationid = pg_dist_partition.colocationid AND pg_dist_partition.logicalrelid = 'mx_colocation_test_1'::regclass); UPDATE pg_dist_partition SET colocationid = 0 WHERE logicalrelid = 'mx_colocation_test_1'::regclass OR logicalrelid = 'mx_colocation_test_2'::regclass; -- Mark tables colocated and see the changes on the master and the worker SELECT mark_tables_colocated('mx_colocation_test_1', ARRAY['mx_colocation_test_2']); SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid = 'mx_colocation_test_1'::regclass OR logicalrelid = 'mx_colocation_test_2'::regclass; \c - - - :worker_1_port SELECT logicalrelid, colocationid FROM pg_dist_partition WHERE logicalrelid = 'mx_colocation_test_1'::regclass OR logicalrelid = 'mx_colocation_test_2'::regclass; \c - - - :master_port -- Check that DROP TABLE on MX tables works DROP TABLE mx_colocation_test_1; DROP TABLE mx_colocation_test_2; \d mx_colocation_test_1 \d mx_colocation_test_2 \c - - - :worker_1_port \d mx_colocation_test_1 \d mx_colocation_test_2 -- Check that dropped MX table can be recreated again \c - - - :master_port SET citus.shard_count TO 7; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE TABLE mx_temp_drop_test (a int); SELECT create_distributed_table('mx_temp_drop_test', 'a'); SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_temp_drop_test'::regclass; DROP TABLE mx_temp_drop_test; CREATE TABLE mx_temp_drop_test (a int); SELECT create_distributed_table('mx_temp_drop_test', 'a'); SELECT logicalrelid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'mx_temp_drop_test'::regclass; DROP TABLE mx_temp_drop_test; -- Check that MX tables can be created with SERIAL columns, but error out on metadata sync \c - - - :master_port SET citus.shard_count TO 3; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL); SELECT create_distributed_table('mx_table_with_small_sequence', 'a'); SELECT start_metadata_sync_to_node('localhost', :worker_1_port); DROP TABLE mx_table_with_small_sequence; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); -- Show that create_distributed_table errors out if the table has a SERIAL column and -- there are metadata workers CREATE TABLE mx_table_with_small_sequence(a int, b SERIAL); SELECT create_distributed_table('mx_table_with_small_sequence', 'a'); DROP TABLE mx_table_with_small_sequence; -- Create an MX table with (BIGSERIAL) sequences CREATE TABLE mx_table_with_sequence(a int, b BIGSERIAL, c BIGSERIAL); SELECT create_distributed_table('mx_table_with_sequence', 'a'); SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; \ds mx_table_with_sequence_b_seq \ds mx_table_with_sequence_c_seq -- Check that the sequences created on the metadata worker as well \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; \ds mx_table_with_sequence_b_seq \ds mx_table_with_sequence_c_seq -- Check that the sequences on the worker have their own space SELECT nextval('mx_table_with_sequence_b_seq'); SELECT nextval('mx_table_with_sequence_c_seq'); -- Check that adding a new metadata node sets the sequence space correctly \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_2_port); \c - - - :worker_2_port SELECT groupid FROM pg_dist_local_group; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_table_with_sequence'::regclass; \ds mx_table_with_sequence_b_seq \ds mx_table_with_sequence_c_seq SELECT nextval('mx_table_with_sequence_b_seq'); SELECT nextval('mx_table_with_sequence_c_seq'); -- Check that dropping the mx table with sequences works as expected, even the metadata -- syncing is stopped to one of the workers \c - - - :master_port SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); DROP TABLE mx_table_with_sequence; \d mx_table_with_sequence \ds mx_table_with_sequence_b_seq \ds mx_table_with_sequence_c_seq -- Check that the sequences are dropped from the workers \c - - - :worker_1_port \d mx_table_with_sequence \ds mx_table_with_sequence_b_seq \ds mx_table_with_sequence_c_seq -- Check that the sequences are dropped from the workers \c - - - :worker_2_port \ds mx_table_with_sequence_b_seq \ds mx_table_with_sequence_c_seq -- Check that MX sequences play well with non-super users \c - - - :master_port -- Remove a node so that shards and sequences won't be created on table creation. Therefore, -- we can test that start_metadata_sync_to_node can actually create the sequence with proper -- owner CREATE TABLE pg_dist_placement_temp AS SELECT * FROM pg_dist_placement; CREATE TABLE pg_dist_partition_temp AS SELECT * FROM pg_dist_partition; DELETE FROM pg_dist_placement; DELETE FROM pg_dist_partition; SELECT groupid AS old_worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset SELECT master_remove_node('localhost', :worker_2_port); -- the master user needs superuser permissions to change the replication model CREATE USER mx_user WITH SUPERUSER; \c - - - :worker_1_port CREATE USER mx_user; \c - - - :worker_2_port CREATE USER mx_user; \c - mx_user - :master_port -- Create an mx table as a different user CREATE TABLE mx_table (a int, b BIGSERIAL); SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SELECT create_distributed_table('mx_table', 'a'); \c - postgres - :master_port SELECT master_add_node('localhost', :worker_2_port); SELECT start_metadata_sync_to_node('localhost', :worker_2_port); \c - mx_user - :worker_1_port SELECT nextval('mx_table_b_seq'); INSERT INTO mx_table (a) VALUES (37); INSERT INTO mx_table (a) VALUES (38); SELECT * FROM mx_table ORDER BY a; \c - mx_user - :worker_2_port SELECT nextval('mx_table_b_seq'); INSERT INTO mx_table (a) VALUES (39); INSERT INTO mx_table (a) VALUES (40); SELECT * FROM mx_table ORDER BY a; \c - mx_user - :master_port DROP TABLE mx_table; -- put the metadata back into a consistent state \c - postgres - :master_port INSERT INTO pg_dist_placement SELECT * FROM pg_dist_placement_temp; INSERT INTO pg_dist_partition SELECT * FROM pg_dist_partition_temp; DROP TABLE pg_dist_placement_temp; DROP TABLE pg_dist_partition_temp; UPDATE pg_dist_placement SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) WHERE groupid = :old_worker_2_group; \c - - - :worker_1_port UPDATE pg_dist_placement SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) WHERE groupid = :old_worker_2_group; \c - - - :worker_2_port UPDATE pg_dist_placement SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) WHERE groupid = :old_worker_2_group; \c - - - :master_port SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); DROP USER mx_user; \c - - - :worker_1_port DROP USER mx_user; \c - - - :worker_2_port DROP USER mx_user; -- Check that create_reference_table creates the metadata on workers \c - - - :master_port CREATE TABLE mx_ref (col_1 int, col_2 text); SELECT create_reference_table('mx_ref'); \dt mx_ref \c - - - :worker_1_port \dt mx_ref SELECT logicalrelid, partmethod, repmodel, shardid, placementid, nodename, nodeport FROM pg_dist_partition NATURAL JOIN pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'mx_ref'::regclass ORDER BY nodeport; SELECT shardid AS ref_table_shardid FROM pg_dist_shard WHERE logicalrelid='mx_ref'::regclass \gset -- Check that DDL commands are propagated to reference tables on workers \c - - - :master_port ALTER TABLE mx_ref ADD COLUMN col_3 NUMERIC DEFAULT 0; CREATE INDEX mx_ref_index ON mx_ref(col_1); SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass; \d mx_ref_index \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ref'::regclass; \d mx_ref_index -- Check that metada is cleaned successfully upon drop table \c - - - :master_port DROP TABLE mx_ref; \d mx_ref \c - - - :worker_1_port \d mx_ref SELECT * FROM pg_dist_shard WHERE shardid=:ref_table_shardid; SELECT * FROM pg_dist_shard_placement WHERE shardid=:ref_table_shardid; -- Check that master_add_node propagates the metadata about new placements of a reference table \c - - - :master_port SELECT groupid AS old_worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset CREATE TABLE tmp_placement AS SELECT * FROM pg_dist_placement WHERE groupid = :old_worker_2_group; DELETE FROM pg_dist_placement WHERE groupid = :old_worker_2_group; SELECT master_remove_node('localhost', :worker_2_port); CREATE TABLE mx_ref (col_1 int, col_2 text); SELECT create_reference_table('mx_ref'); SELECT shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass; \c - - - :worker_1_port SELECT shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass; \c - - - :master_port SELECT master_add_node('localhost', :worker_2_port); SELECT shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass ORDER BY shardid, nodeport; \c - - - :worker_1_port SELECT shardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_ref'::regclass ORDER BY shardid, nodeport; -- Get the metadata back into a consistent state \c - - - :master_port INSERT INTO pg_dist_placement (SELECT * FROM tmp_placement); DROP TABLE tmp_placement; UPDATE pg_dist_placement SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) WHERE groupid = :old_worker_2_group; \c - - - :worker_1_port UPDATE pg_dist_placement SET groupid = (SELECT groupid FROM pg_dist_node WHERE nodeport = :worker_2_port) WHERE groupid = :old_worker_2_group; -- Cleanup \c - - - :master_port DROP TABLE mx_test_schema_2.mx_table_2 CASCADE; DROP TABLE mx_test_schema_1.mx_table_1 CASCADE; DROP TABLE mx_testing_schema.mx_test_table; DROP TABLE mx_ref; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); RESET citus.shard_count; RESET citus.shard_replication_factor; RESET citus.replication_model; RESET citus.multi_shard_commit_protocol; ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART :last_group_id; ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART :last_node_id; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; citus-7.0.3/src/test/regress/sql/multi_modifications.sql000066400000000000000000000705671317107136600234760ustar00rootroot00000000000000 ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 750000; -- =================================================================== -- test end-to-end modification functionality -- =================================================================== CREATE TYPE order_side AS ENUM ('buy', 'sell'); CREATE TABLE limit_orders ( id bigint PRIMARY KEY, symbol text NOT NULL, bidder_id bigint NOT NULL, placed_at timestamp NOT NULL, kind order_side NOT NULL, limit_price decimal NOT NULL DEFAULT 0.00 CHECK (limit_price >= 0.00) ); CREATE TABLE multiple_hash ( category text NOT NULL, data text NOT NULL ); CREATE TABLE insufficient_shards ( LIKE limit_orders ); CREATE TABLE range_partitioned ( LIKE limit_orders ); CREATE TABLE append_partitioned ( LIKE limit_orders ); SELECT master_create_distributed_table('limit_orders', 'id', 'hash'); SELECT master_create_distributed_table('multiple_hash', 'category', 'hash'); SELECT master_create_distributed_table('insufficient_shards', 'id', 'hash'); SELECT master_create_distributed_table('range_partitioned', 'id', 'range'); SELECT master_create_distributed_table('append_partitioned', 'id', 'append'); SELECT master_create_worker_shards('limit_orders', 2, 2); SELECT master_create_worker_shards('multiple_hash', 2, 2); -- make a single shard that covers no partition values SELECT master_create_worker_shards('insufficient_shards', 1, 1); UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 0 WHERE logicalrelid = 'insufficient_shards'::regclass; -- create range-partitioned shards SELECT master_create_empty_shard('range_partitioned') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 49999 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('range_partitioned') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 50000, shardmaxvalue = 99999 WHERE shardid = :new_shard_id; -- create append-partitioned shards SELECT master_create_empty_shard('append_partitioned') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 500000 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('append_partitioned') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 500000, shardmaxvalue = 1000000 WHERE shardid = :new_shard_id; -- basic single-row INSERT INSERT INTO limit_orders VALUES (32743, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); SELECT COUNT(*) FROM limit_orders WHERE id = 32743; -- basic single-row INSERT with RETURNING INSERT INTO limit_orders VALUES (32744, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69) RETURNING *; -- try a single-row INSERT with no shard to receive it INSERT INTO insufficient_shards VALUES (32743, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); -- try an insert to a range-partitioned table INSERT INTO range_partitioned VALUES (32743, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); -- also insert to an append-partitioned table INSERT INTO append_partitioned VALUES (414123, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); -- ensure the values are where we put them and query to ensure they are properly pruned SET client_min_messages TO 'DEBUG2'; SET citus.task_executor_type TO 'real-time'; SELECT * FROM range_partitioned WHERE id = 32743; SELECT * FROM append_partitioned WHERE id = 414123; SET client_min_messages TO DEFAULT; SET citus.task_executor_type TO DEFAULT; -- try inserting without a range-partitioned shard to receive the value INSERT INTO range_partitioned VALUES (999999, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); -- and insert into an append-partitioned table with a value that spans shards: INSERT INTO append_partitioned VALUES (500000, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); -- INSERT with DEFAULT in the target list INSERT INTO limit_orders VALUES (12756, 'MSFT', 10959, '2013-05-08 07:29:23', 'sell', DEFAULT); SELECT COUNT(*) FROM limit_orders WHERE id = 12756; -- INSERT with expressions in target list INSERT INTO limit_orders VALUES (430, upper('ibm'), 214, timestamp '2003-01-28 10:31:17' + interval '5 hours', 'buy', sqrt(2)); SELECT COUNT(*) FROM limit_orders WHERE id = 430; -- INSERT without partition key INSERT INTO limit_orders DEFAULT VALUES; -- squelch WARNINGs that contain worker_port SET client_min_messages TO ERROR; -- INSERT violating NOT NULL constraint INSERT INTO limit_orders VALUES (NULL, 'T', 975234, DEFAULT); -- INSERT violating column constraint INSERT INTO limit_orders VALUES (18811, 'BUD', 14962, '2014-04-05 08:32:16', 'sell', -5.00); -- INSERT violating primary key constraint INSERT INTO limit_orders VALUES (32743, 'LUV', 5994, '2001-04-16 03:37:28', 'buy', 0.58); -- INSERT violating primary key constraint, with RETURNING specified. INSERT INTO limit_orders VALUES (32743, 'LUV', 5994, '2001-04-16 03:37:28', 'buy', 0.58) RETURNING *; -- INSERT, with RETURNING specified, failing with a non-constraint error INSERT INTO limit_orders VALUES (34153, 'LEE', 5994, '2001-04-16 03:37:28', 'buy', 0.58) RETURNING id / 0; SET client_min_messages TO DEFAULT; -- commands with non-constant partition values are supported INSERT INTO limit_orders VALUES (random() * 100, 'ORCL', 152, '2011-08-25 11:50:45', 'sell', 0.58); -- values for other columns are totally fine INSERT INTO limit_orders VALUES (2036, 'GOOG', 5634, now(), 'buy', random()); -- commands with mutable functions in their quals DELETE FROM limit_orders WHERE id = 246 AND bidder_id = (random() * 1000); -- commands with mutable but non-volatile functions(ie: stable func.) in their quals -- (the cast to timestamp is because the timestamp_eq_timestamptz operator is stable) DELETE FROM limit_orders WHERE id = 246 AND placed_at = current_timestamp::timestamp; -- multi-row inserts are supported INSERT INTO limit_orders VALUES (12037, 'GOOG', 5634, '2001-04-16 03:37:28', 'buy', 0.50), (12038, 'GOOG', 5634, '2001-04-17 03:37:28', 'buy', 2.50), (12039, 'GOOG', 5634, '2001-04-18 03:37:28', 'buy', 1.50); SELECT COUNT(*) FROM limit_orders WHERE id BETWEEN 12037 AND 12039; -- even those with functions and returning INSERT INTO limit_orders VALUES (22037, 'GOOG', 5634, now(), 'buy', 0.50), (22038, 'GOOG', 5634, now(), 'buy', 2.50), (22039, 'GOOG', 5634, now(), 'buy', 1.50) RETURNING id; SELECT COUNT(*) FROM limit_orders WHERE id BETWEEN 22037 AND 22039; -- even those with functions in their partition columns INSERT INTO limit_orders VALUES (random() * 10 + 70000, 'GOOG', 5634, now(), 'buy', 0.50), (random() * 10 + 80000, 'GOOG', 5634, now(), 'buy', 2.50), (random() * 10 + 80090, 'GOOG', 5634, now(), 'buy', 1.50); SELECT COUNT(*) FROM limit_orders WHERE id BETWEEN 70000 AND 90000; -- Who says that? :) -- INSERT ... SELECT ... FROM commands are unsupported -- INSERT INTO limit_orders SELECT * FROM limit_orders; -- commands containing a CTE are unsupported WITH deleted_orders AS (DELETE FROM limit_orders RETURNING *) INSERT INTO limit_orders DEFAULT VALUES; -- test simple DELETE INSERT INTO limit_orders VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); SELECT COUNT(*) FROM limit_orders WHERE id = 246; DELETE FROM limit_orders WHERE id = 246; SELECT COUNT(*) FROM limit_orders WHERE id = 246; -- test simple DELETE with RETURNING DELETE FROM limit_orders WHERE id = 430 RETURNING *; SELECT COUNT(*) FROM limit_orders WHERE id = 430; -- DELETE with expression in WHERE clause INSERT INTO limit_orders VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); SELECT COUNT(*) FROM limit_orders WHERE id = 246; DELETE FROM limit_orders WHERE id = (2 * 123); SELECT COUNT(*) FROM limit_orders WHERE id = 246; -- commands with no constraints on the partition key are not supported DELETE FROM limit_orders WHERE bidder_id = 162; -- commands with a USING clause are unsupported CREATE TABLE bidders ( name text, id bigint ); DELETE FROM limit_orders USING bidders WHERE limit_orders.id = 246 AND limit_orders.bidder_id = bidders.id AND bidders.name = 'Bernie Madoff'; -- commands containing a CTE are unsupported WITH deleted_orders AS (INSERT INTO limit_orders DEFAULT VALUES RETURNING *) DELETE FROM limit_orders; -- cursors are not supported DELETE FROM limit_orders WHERE CURRENT OF cursor_name; INSERT INTO limit_orders VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); -- simple UPDATE UPDATE limit_orders SET symbol = 'GM' WHERE id = 246; SELECT symbol FROM limit_orders WHERE id = 246; -- simple UPDATE with RETURNING UPDATE limit_orders SET symbol = 'GM' WHERE id = 246 RETURNING *; -- expression UPDATE UPDATE limit_orders SET bidder_id = 6 * 3 WHERE id = 246; SELECT bidder_id FROM limit_orders WHERE id = 246; -- expression UPDATE with RETURNING UPDATE limit_orders SET bidder_id = 6 * 5 WHERE id = 246 RETURNING *; -- multi-column UPDATE UPDATE limit_orders SET (kind, limit_price) = ('buy', DEFAULT) WHERE id = 246; SELECT kind, limit_price FROM limit_orders WHERE id = 246; -- multi-column UPDATE with RETURNING UPDATE limit_orders SET (kind, limit_price) = ('buy', 999) WHERE id = 246 RETURNING *; -- Test that on unique contraint violations, we fail fast INSERT INTO limit_orders VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67); INSERT INTO limit_orders VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67); -- Test that shards which miss a modification are marked unhealthy -- First: Connect to the second worker node \c - - - :worker_2_port -- Second: Move aside limit_orders shard on the second worker node ALTER TABLE limit_orders_750000 RENAME TO renamed_orders; -- Third: Connect back to master node \c - - - :master_port -- Fourth: Perform an INSERT on the remaining node INSERT INTO limit_orders VALUES (276, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67); -- Last: Verify the insert worked but the deleted placement is now unhealthy SELECT count(*) FROM limit_orders WHERE id = 276; SELECT count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND sp.nodename = 'localhost' AND sp.nodeport = :worker_2_port AND sp.shardstate = 3 AND s.logicalrelid = 'limit_orders'::regclass; -- Test that if all shards miss a modification, no state change occurs -- First: Connect to the first worker node \c - - - :worker_1_port -- Second: Move aside limit_orders shard on the second worker node ALTER TABLE limit_orders_750000 RENAME TO renamed_orders; -- Third: Connect back to master node \c - - - :master_port -- Fourth: Perform an INSERT on the remaining node INSERT INTO limit_orders VALUES (276, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67); -- Last: Verify worker is still healthy SELECT count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND sp.nodename = 'localhost' AND sp.nodeport = :worker_1_port AND sp.shardstate = 1 AND s.logicalrelid = 'limit_orders'::regclass; -- Undo our change... -- First: Connect to the first worker node \c - - - :worker_1_port -- Second: Move aside limit_orders shard on the second worker node ALTER TABLE renamed_orders RENAME TO limit_orders_750000; -- Third: Connect back to master node \c - - - :master_port -- commands with no constraints on the partition key are not supported UPDATE limit_orders SET limit_price = 0.00; -- attempting to change the partition key is unsupported UPDATE limit_orders SET id = 0 WHERE id = 246; UPDATE limit_orders SET id = 0 WHERE id = 0 OR id = 246; -- setting the partition column value to itself is allowed UPDATE limit_orders SET id = 246 WHERE id = 246; UPDATE limit_orders SET id = 246 WHERE id = 246 AND symbol = 'GM'; UPDATE limit_orders SET id = limit_orders.id WHERE id = 246; -- UPDATEs with a FROM clause are unsupported UPDATE limit_orders SET limit_price = 0.00 FROM bidders WHERE limit_orders.id = 246 AND limit_orders.bidder_id = bidders.id AND bidders.name = 'Bernie Madoff'; -- commands containing a CTE are unsupported WITH deleted_orders AS (INSERT INTO limit_orders DEFAULT VALUES RETURNING *) UPDATE limit_orders SET symbol = 'GM'; SELECT symbol, bidder_id FROM limit_orders WHERE id = 246; -- updates referencing just a var are supported UPDATE limit_orders SET bidder_id = id WHERE id = 246; -- updates referencing a column are supported UPDATE limit_orders SET bidder_id = bidder_id + 1 WHERE id = 246; -- IMMUTABLE functions are allowed UPDATE limit_orders SET symbol = LOWER(symbol) WHERE id = 246; SELECT symbol, bidder_id FROM limit_orders WHERE id = 246; -- IMMUTABLE functions are allowed -- even in returning UPDATE limit_orders SET symbol = UPPER(symbol) WHERE id = 246 RETURNING id, LOWER(symbol), symbol; ALTER TABLE limit_orders ADD COLUMN array_of_values integer[]; -- updates referencing STABLE functions are allowed UPDATE limit_orders SET placed_at = LEAST(placed_at, now()::timestamp) WHERE id = 246; -- so are binary operators UPDATE limit_orders SET array_of_values = 1 || array_of_values WHERE id = 246; CREATE FUNCTION immutable_append(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; \c - - - :worker_1_port CREATE FUNCTION immutable_append(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; \c - - - :worker_2_port CREATE FUNCTION immutable_append(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; \c - - - :master_port -- immutable function calls with vars are also allowed UPDATE limit_orders SET array_of_values = immutable_append(array_of_values, 2) WHERE id = 246; CREATE FUNCTION stable_append(old_values int[], new_value int) RETURNS int[] AS $$ BEGIN RETURN old_values || new_value; END; $$ LANGUAGE plpgsql STABLE; -- but STABLE function calls with vars are not allowed UPDATE limit_orders SET array_of_values = stable_append(array_of_values, 3) WHERE id = 246; SELECT array_of_values FROM limit_orders WHERE id = 246; -- STRICT functions work as expected CREATE FUNCTION temp_strict_func(integer,integer) RETURNS integer AS 'SELECT COALESCE($1, 2) + COALESCE($1, 3);' LANGUAGE SQL STABLE STRICT; UPDATE limit_orders SET bidder_id = temp_strict_func(1, null) WHERE id = 246; SELECT array_of_values FROM limit_orders WHERE id = 246; ALTER TABLE limit_orders DROP array_of_values; -- even in RETURNING UPDATE limit_orders SET placed_at = placed_at WHERE id = 246 RETURNING NOW(); -- cursors are not supported UPDATE limit_orders SET symbol = 'GM' WHERE CURRENT OF cursor_name; -- check that multi-row UPDATE/DELETEs with RETURNING work INSERT INTO multiple_hash VALUES ('0', '1'); INSERT INTO multiple_hash VALUES ('0', '2'); INSERT INTO multiple_hash VALUES ('0', '3'); INSERT INTO multiple_hash VALUES ('0', '4'); INSERT INTO multiple_hash VALUES ('0', '5'); INSERT INTO multiple_hash VALUES ('0', '6'); UPDATE multiple_hash SET data = data ||'-1' WHERE category = '0' RETURNING *; DELETE FROM multiple_hash WHERE category = '0' RETURNING *; -- ensure returned row counters are correct \set QUIET off INSERT INTO multiple_hash VALUES ('1', '1'); INSERT INTO multiple_hash VALUES ('1', '2'); INSERT INTO multiple_hash VALUES ('1', '3'); INSERT INTO multiple_hash VALUES ('2', '1'); INSERT INTO multiple_hash VALUES ('2', '2'); INSERT INTO multiple_hash VALUES ('2', '3'); INSERT INTO multiple_hash VALUES ('2', '3') RETURNING *; -- check that update return the right number of rows -- one row UPDATE multiple_hash SET data = data ||'-1' WHERE category = '1' AND data = '1'; -- three rows UPDATE multiple_hash SET data = data ||'-2' WHERE category = '1'; -- three rows, with RETURNING UPDATE multiple_hash SET data = data ||'-2' WHERE category = '1' RETURNING category; -- check SELECT * FROM multiple_hash WHERE category = '1' ORDER BY category, data; -- check that deletes return the right number of rows -- one row DELETE FROM multiple_hash WHERE category = '2' AND data = '1'; -- two rows DELETE FROM multiple_hash WHERE category = '2'; -- three rows, with RETURNING DELETE FROM multiple_hash WHERE category = '1' RETURNING category; -- check SELECT * FROM multiple_hash WHERE category = '1' ORDER BY category, data; SELECT * FROM multiple_hash WHERE category = '2' ORDER BY category, data; -- verify interaction of default values, SERIAL, and RETURNING \set QUIET on CREATE TABLE app_analytics_events (id serial, app_id integer, name text); SELECT master_create_distributed_table('app_analytics_events', 'app_id', 'hash'); SELECT master_create_worker_shards('app_analytics_events', 4, 1); INSERT INTO app_analytics_events VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id; INSERT INTO app_analytics_events (app_id, name) VALUES (102, 'Wayz') RETURNING id; INSERT INTO app_analytics_events (app_id, name) VALUES (103, 'Mynt') RETURNING *; DROP TABLE app_analytics_events; -- again with serial in the partition column CREATE TABLE app_analytics_events (id serial, app_id integer, name text); SELECT create_distributed_table('app_analytics_events', 'id'); INSERT INTO app_analytics_events VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id; INSERT INTO app_analytics_events (app_id, name) VALUES (102, 'Wayz') RETURNING id; INSERT INTO app_analytics_events (app_id, name) VALUES (103, 'Mynt') RETURNING *; -- Test multi-row insert with serial in the partition column INSERT INTO app_analytics_events (app_id, name) VALUES (104, 'Wayz'), (105, 'Mynt') RETURNING *; INSERT INTO app_analytics_events (id, name) VALUES (DEFAULT, 'Foo'), (300, 'Wah') RETURNING *; PREPARE prep(varchar) AS INSERT INTO app_analytics_events (id, name) VALUES (DEFAULT, $1 || '.1'), (400 , $1 || '.2') RETURNING *; EXECUTE prep('version-1'); EXECUTE prep('version-2'); EXECUTE prep('version-3'); EXECUTE prep('version-4'); EXECUTE prep('version-5'); EXECUTE prep('version-6'); SELECT * FROM app_analytics_events ORDER BY id, name; TRUNCATE app_analytics_events; -- Test multi-row insert with a dropped column ALTER TABLE app_analytics_events DROP COLUMN app_id; INSERT INTO app_analytics_events (name) VALUES ('Wayz'), ('Mynt') RETURNING *; SELECT * FROM app_analytics_events ORDER BY id; DROP TABLE app_analytics_events; -- Test multi-row insert with a dropped column before the partition column CREATE TABLE app_analytics_events (id int default 3, app_id integer, name text); SELECT create_distributed_table('app_analytics_events', 'name'); ALTER TABLE app_analytics_events DROP COLUMN app_id; INSERT INTO app_analytics_events (name) VALUES ('Wayz'), ('Mynt') RETURNING *; SELECT * FROM app_analytics_events WHERE name = 'Wayz'; DROP TABLE app_analytics_events; -- Test multi-row insert with serial in a reference table CREATE TABLE app_analytics_events (id serial, app_id integer, name text); SELECT create_reference_table('app_analytics_events'); INSERT INTO app_analytics_events (app_id, name) VALUES (104, 'Wayz'), (105, 'Mynt') RETURNING *; SELECT * FROM app_analytics_events ORDER BY id; DROP TABLE app_analytics_events; -- Test multi-row insert with serial in a non-partition column CREATE TABLE app_analytics_events (id int, app_id serial, name text); SELECT create_distributed_table('app_analytics_events', 'id'); INSERT INTO app_analytics_events (id, name) VALUES (99, 'Wayz'), (98, 'Mynt') RETURNING name, app_id; SELECT * FROM app_analytics_events ORDER BY id; DROP TABLE app_analytics_events; -- test UPDATE with subqueries CREATE TABLE raw_table (id bigint, value bigint); CREATE TABLE summary_table ( id bigint, min_value numeric, average_value numeric, count int, uniques int); SELECT create_distributed_table('raw_table', 'id'); SELECT create_distributed_table('summary_table', 'id'); INSERT INTO raw_table VALUES (1, 100); INSERT INTO raw_table VALUES (1, 200); INSERT INTO raw_table VALUES (1, 200); INSERT INTO raw_table VALUES (1, 300); INSERT INTO raw_table VALUES (2, 400); INSERT INTO raw_table VALUES (2, 500); INSERT INTO summary_table VALUES (1); INSERT INTO summary_table VALUES (2); SELECT * FROM summary_table ORDER BY id; UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table WHERE id = 1 ) average_query WHERE id = 1; SELECT * FROM summary_table ORDER BY id; -- try different syntax UPDATE summary_table SET (min_value, average_value) = (SELECT min(value), avg(value) FROM raw_table WHERE id = 2) WHERE id = 2; SELECT * FROM summary_table ORDER BY id; UPDATE summary_table SET min_value = 100 WHERE id IN (SELECT id FROM raw_table WHERE id = 1 and value > 100) AND id = 1; SELECT * FROM summary_table ORDER BY id; -- indeed, we don't need filter on UPDATE explicitly if SELECT already prunes to one shard UPDATE summary_table SET uniques = 2 WHERE id IN (SELECT id FROM raw_table WHERE id = 1 and value IN (100, 200)); SELECT * FROM summary_table ORDER BY id; -- use inner results for non-partition column UPDATE summary_table SET uniques = NULL WHERE min_value IN (SELECT value FROM raw_table WHERE id = 1) AND id = 1; SELECT * FROM summary_table ORDER BY id; -- these should not update anything UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table WHERE id = 1 AND id = 4 ) average_query WHERE id = 1 AND id = 4; UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table WHERE id = 1 ) average_query WHERE id = 1 AND id = 4; SELECT * FROM summary_table ORDER BY id; -- update with NULL value UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table WHERE id = 1 AND id = 4 ) average_query WHERE id = 1; SELECT * FROM summary_table ORDER BY id; -- unsupported multi-shard updates UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table) average_query; UPDATE summary_table SET average_value = average_value + 1 WHERE id = (SELECT id FROM raw_table WHERE value > 100); -- test complex queries UPDATE summary_table SET uniques = metrics.expensive_uniques, count = metrics.total_count FROM (SELECT id, count(DISTINCT (CASE WHEN value > 100 then value end)) AS expensive_uniques, count(value) AS total_count FROM raw_table WHERE id = 1 GROUP BY id) metrics WHERE summary_table.id = metrics.id AND summary_table.id = 1; SELECT * FROM summary_table ORDER BY id; -- test joins UPDATE summary_table SET count = count + 1 FROM raw_table WHERE raw_table.id = summary_table.id AND summary_table.id = 1; SELECT * FROM summary_table ORDER BY id; -- test with prepared statements PREPARE prepared_update_with_subquery(int, int) AS UPDATE summary_table SET count = count + $1 FROM raw_table WHERE raw_table.id = summary_table.id AND summary_table.id = $2; -- execute 6 times to trigger prepared statement usage EXECUTE prepared_update_with_subquery(10, 1); EXECUTE prepared_update_with_subquery(10, 1); EXECUTE prepared_update_with_subquery(10, 1); EXECUTE prepared_update_with_subquery(10, 1); EXECUTE prepared_update_with_subquery(10, 1); EXECUTE prepared_update_with_subquery(10, 1); SELECT * FROM summary_table ORDER BY id; -- test with reference tables CREATE TABLE reference_raw_table (id bigint, value bigint); CREATE TABLE reference_summary_table ( id bigint, min_value numeric, average_value numeric, count int, uniques int); SELECT create_reference_table('reference_raw_table'); SELECT create_reference_table('reference_summary_table'); INSERT INTO reference_raw_table VALUES (1, 100); INSERT INTO reference_raw_table VALUES (1, 200); INSERT INTO reference_raw_table VALUES (1, 200); INSERT INTO reference_raw_table VALUES (1,300), (2, 400), (2,500) RETURNING *; INSERT INTO reference_summary_table VALUES (1); INSERT INTO reference_summary_table VALUES (2); SELECT * FROM reference_summary_table ORDER BY id; UPDATE reference_summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM reference_raw_table WHERE id = 1 ) average_query WHERE id = 1; UPDATE reference_summary_table SET (min_value, average_value) = (SELECT min(value), avg(value) FROM reference_raw_table WHERE id = 2) WHERE id = 2; SELECT * FROM reference_summary_table ORDER BY id; -- no need partition colum equalities on reference tables UPDATE reference_summary_table SET (count) = (SELECT id AS inner_id FROM reference_raw_table WHERE value = 500) WHERE min_value = 400; SELECT * FROM reference_summary_table ORDER BY id; -- can read from a reference table and update a distributed table UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM reference_raw_table WHERE id = 1 ) average_query WHERE id = 1; -- cannot read from a distributed table and update a reference table UPDATE reference_summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table WHERE id = 1 ) average_query WHERE id = 1; UPDATE reference_summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table WHERE id = 1 AND id = 2 ) average_query WHERE id = 1; -- test master_modify_multiple_shards() with subqueries and expect to fail SELECT master_modify_multiple_shards(' UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table WHERE id = 1 ) average_query WHERE id = 1'); -- test connection API via using COPY -- COPY on SELECT part BEGIN; \COPY raw_table FROM STDIN WITH CSV 3, 100 3, 200 \. INSERT INTO summary_table VALUES (3); UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table WHERE id = 3 ) average_query WHERE id = 3; COMMIT; SELECT * FROM summary_table ORDER BY id; -- COPY on UPDATE part BEGIN; INSERT INTO raw_table VALUES (4, 100); INSERT INTO raw_table VALUES (4, 200); \COPY summary_table FROM STDIN WITH CSV 4,,,, \. UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table WHERE id = 4 ) average_query WHERE id = 4; COMMIT; SELECT * FROM summary_table ORDER BY id; -- COPY on both part BEGIN; \COPY raw_table FROM STDIN WITH CSV 5, 100 5, 200 \. \COPY summary_table FROM STDIN WITH CSV 5,,,, \. UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM raw_table WHERE id = 5 ) average_query WHERE id = 5; COMMIT; SELECT * FROM summary_table ORDER BY id; -- COPY on reference tables BEGIN; \COPY reference_raw_table FROM STDIN WITH CSV 6, 100 6, 200 \. \COPY summary_table FROM STDIN WITH CSV 6,,,, \. UPDATE summary_table SET average_value = average_query.average FROM ( SELECT avg(value) AS average FROM reference_raw_table WHERE id = 6 ) average_query WHERE id = 6; COMMIT; SELECT * FROM summary_table ORDER BY id; -- test DELETE queries SELECT * FROM raw_table ORDER BY id, value; DELETE FROM summary_table WHERE min_value IN (SELECT value FROM raw_table WHERE id = 1) AND id = 1; SELECT * FROM summary_table ORDER BY id; -- test with different syntax DELETE FROM summary_table USING raw_table WHERE summary_table.id = raw_table.id AND raw_table.id = 2; SELECT * FROM summary_table ORDER BY id; -- cannot read from a distributed table and delete from a reference table DELETE FROM reference_summary_table USING raw_table WHERE reference_summary_table.id = raw_table.id AND raw_table.id = 3; SELECT * FROM summary_table ORDER BY id; -- test connection API via using COPY with DELETEs BEGIN; \COPY summary_table FROM STDIN WITH CSV 1,,,, 2,,,, \. DELETE FROM summary_table USING raw_table WHERE summary_table.id = raw_table.id AND raw_table.id = 1; DELETE FROM summary_table USING reference_raw_table WHERE summary_table.id = reference_raw_table.id AND reference_raw_table.id = 2; COMMIT; SELECT * FROM summary_table ORDER BY id; -- test DELETEs with prepared statements PREPARE prepared_delete_with_join(int) AS DELETE FROM summary_table USING raw_table WHERE summary_table.id = raw_table.id AND raw_table.id = $1; INSERT INTO raw_table VALUES (6, 100); -- execute 6 times to trigger prepared statement usage EXECUTE prepared_delete_with_join(1); EXECUTE prepared_delete_with_join(2); EXECUTE prepared_delete_with_join(3); EXECUTE prepared_delete_with_join(4); EXECUTE prepared_delete_with_join(5); EXECUTE prepared_delete_with_join(6); SELECT * FROM summary_table ORDER BY id; DROP TABLE raw_table; DROP TABLE summary_table; DROP TABLE reference_raw_table; DROP TABLE reference_summary_table; citus-7.0.3/src/test/regress/sql/multi_modifying_xacts.sql000066400000000000000000000757031317107136600240320ustar00rootroot00000000000000 ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1200000; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART 1200000; -- =================================================================== -- test end-to-end modification functionality -- =================================================================== CREATE TABLE researchers ( id bigint NOT NULL, lab_id int NOT NULL, name text NOT NULL ); CREATE TABLE labs ( id bigint NOT NULL, name text NOT NULL ); SELECT master_create_distributed_table('researchers', 'lab_id', 'hash'); SELECT master_create_worker_shards('researchers', 2, 2); SELECT master_create_distributed_table('labs', 'id', 'hash'); SELECT master_create_worker_shards('labs', 1, 1); -- might be confusing to have two people in the same lab with the same name CREATE UNIQUE INDEX avoid_name_confusion_idx ON researchers (lab_id, name); -- add some data INSERT INTO researchers VALUES (1, 1, 'Donald Knuth'); INSERT INTO researchers VALUES (2, 1, 'Niklaus Wirth'); INSERT INTO researchers VALUES (3, 2, 'Tony Hoare'); INSERT INTO researchers VALUES (4, 2, 'Kenneth Iverson'); -- replace a researcher, reusing their id in a multi-row INSERT BEGIN; DELETE FROM researchers WHERE lab_id = 1 AND id = 2; INSERT INTO researchers VALUES (2, 1, 'John Backus'), (12, 1, 'Frances E. Allen'); COMMIT; SELECT name FROM researchers WHERE lab_id = 1 AND id % 10 = 2; -- and the other way around BEGIN; INSERT INTO researchers VALUES (14, 2, 'Alan Kay'), (15, 2, 'Barbara Liskov'); DELETE FROM researchers WHERE id = 14 AND lab_id = 2; ROLLBACK; -- should have rolled everything back SELECT * FROM researchers WHERE id = 15 AND lab_id = 2; -- abort a modification BEGIN; DELETE FROM researchers WHERE lab_id = 1 AND id = 1; ABORT; SELECT name FROM researchers WHERE lab_id = 1 AND id = 1; -- trigger a unique constraint violation BEGIN; UPDATE researchers SET name = 'John Backus' WHERE id = 1 AND lab_id = 1; ABORT; -- creating savepoints should work... BEGIN; INSERT INTO researchers VALUES (5, 3, 'Dennis Ritchie'); SAVEPOINT hire_thompson; INSERT INTO researchers VALUES (6, 3, 'Ken Thompson'); COMMIT; SELECT name FROM researchers WHERE lab_id = 3 AND id = 6; -- even if created by PL/pgSQL... \set VERBOSITY terse BEGIN; DO $$ BEGIN INSERT INTO researchers VALUES (10, 10, 'Edsger Dijkstra'); EXCEPTION WHEN not_null_violation THEN RAISE NOTICE 'caught not_null_violation'; END $$; COMMIT; -- rollback should also work BEGIN; INSERT INTO researchers VALUES (7, 4, 'Jim Gray'); SAVEPOINT hire_engelbart; INSERT INTO researchers VALUES (8, 4, 'Douglas Engelbart'); ROLLBACK TO hire_engelbart; COMMIT; SELECT name FROM researchers WHERE lab_id = 4; BEGIN; DO $$ BEGIN INSERT INTO researchers VALUES (11, 11, 'Whitfield Diffie'); INSERT INTO researchers VALUES (NULL, 10, 'Edsger Dijkstra'); EXCEPTION WHEN not_null_violation THEN RAISE NOTICE 'caught not_null_violation'; END $$; COMMIT; \set VERBOSITY default -- should be valid to edit labs after researchers... BEGIN; INSERT INTO researchers VALUES (8, 5, 'Douglas Engelbart'); INSERT INTO labs VALUES (5, 'Los Alamos'); COMMIT; SELECT * FROM researchers, labs WHERE labs.id = researchers.lab_id; -- and the other way around is also allowed BEGIN; INSERT INTO labs VALUES (6, 'Bell Labs'); INSERT INTO researchers VALUES (9, 6, 'Leslie Lamport'); COMMIT; -- we should be able to expand the transaction participants BEGIN; INSERT INTO labs VALUES (6, 'Bell Labs'); INSERT INTO researchers VALUES (9, 6, 'Leslie Lamport'); ABORT; -- SELECTs may occur after a modification: First check that selecting -- from the modified node works. BEGIN; INSERT INTO labs VALUES (6, 'Bell Labs'); SELECT count(*) FROM researchers WHERE lab_id = 6; ABORT; -- then check that SELECT going to new node still is fine BEGIN; UPDATE pg_dist_shard_placement AS sp SET shardstate = 3 FROM pg_dist_shard AS s WHERE sp.shardid = s.shardid AND sp.nodename = 'localhost' AND sp.nodeport = :worker_1_port AND s.logicalrelid = 'researchers'::regclass; INSERT INTO labs VALUES (6, 'Bell Labs'); SELECT count(*) FROM researchers WHERE lab_id = 6; ABORT; -- we can mix DDL and INSERT BEGIN; INSERT INTO labs VALUES (6, 'Bell Labs'); ALTER TABLE labs ADD COLUMN motto text; ABORT; -- whether it occurs first or second BEGIN; ALTER TABLE labs ADD COLUMN motto text; INSERT INTO labs VALUES (6, 'Bell Labs'); ABORT; -- but the DDL should correctly roll back SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.labs'::regclass; SELECT * FROM labs WHERE id = 6; -- COPY can happen after single row INSERT BEGIN; INSERT INTO labs VALUES (6, 'Bell Labs'); \copy labs from stdin delimiter ',' 10,Weyland-Yutani \. COMMIT; -- COPY cannot be performed if multiple shards were modified over the same connection BEGIN; INSERT INTO researchers VALUES (2, 1, 'Knuth Donald'); INSERT INTO researchers VALUES (10, 6, 'Lamport Leslie'); \copy researchers from stdin delimiter ',' 3,1,Duth Knonald 10,6,Lesport Lampie \. ROLLBACK; -- COPY cannot be performed after a multi-row INSERT that uses one connection BEGIN; INSERT INTO researchers VALUES (2, 1, 'Knuth Donald'), (10, 6, 'Lamport Leslie'); \copy researchers from stdin delimiter ',' 3,1,Duth Knonald 10,6,Lesport Lampie \. ROLLBACK; -- after a COPY you can modify multiple shards, since they'll use different connections BEGIN; \copy researchers from stdin delimiter ',' 3,1,Duth Knonald 10,6,Lesport Lampie \. INSERT INTO researchers VALUES (2, 1, 'Knuth Donald'); INSERT INTO researchers VALUES (10, 6, 'Lamport Leslie'); ROLLBACK; -- after a COPY you can perform a multi-row INSERT BEGIN; \copy researchers from stdin delimiter ',' 3,1,Duth Knonald 10,6,Lesport Lampie \. INSERT INTO researchers VALUES (2, 1, 'Knuth Donald'), (10, 6, 'Lamport Leslie'); ROLLBACK; -- COPY can happen before single row INSERT BEGIN; \copy labs from stdin delimiter ',' 10,Weyland-Yutani \. SELECT name FROM labs WHERE id = 10; INSERT INTO labs VALUES (6, 'Bell Labs'); COMMIT; -- two consecutive COPYs in a transaction are allowed BEGIN; \copy labs from stdin delimiter ',' 11,Planet Express \. \copy labs from stdin delimiter ',' 12,fsociety \. COMMIT; SELECT name FROM labs WHERE id = 11 OR id = 12 ORDER BY id; -- 1pc failure test SELECT recover_prepared_transactions(); -- copy with unique index violation BEGIN; \copy researchers FROM STDIN delimiter ',' 17, 6, 'Bjarne Stroustrup' \. \copy researchers FROM STDIN delimiter ',' 18, 6, 'Bjarne Stroustrup' \. COMMIT; -- verify rollback SELECT * FROM researchers WHERE lab_id = 6; SELECT count(*) FROM pg_dist_transaction; -- 2pc failure and success tests SET citus.multi_shard_commit_protocol TO '2pc'; SELECT recover_prepared_transactions(); -- copy with unique index violation BEGIN; \copy researchers FROM STDIN delimiter ',' 17, 6, 'Bjarne Stroustrup' \. \copy researchers FROM STDIN delimiter ',' 18, 6, 'Bjarne Stroustrup' \. COMMIT; -- verify rollback SELECT * FROM researchers WHERE lab_id = 6; SELECT count(*) FROM pg_dist_transaction; BEGIN; \copy researchers FROM STDIN delimiter ',' 17, 6, 'Bjarne Stroustrup' \. \copy researchers FROM STDIN delimiter ',' 18, 6, 'Dennis Ritchie' \. COMMIT; -- verify success SELECT * FROM researchers WHERE lab_id = 6; -- verify 2pc SELECT count(*) FROM pg_dist_transaction; RESET citus.multi_shard_commit_protocol; -- create a check function SELECT * from run_command_on_workers('CREATE FUNCTION reject_large_id() RETURNS trigger AS $rli$ BEGIN IF (NEW.id > 30) THEN RAISE ''illegal value''; END IF; RETURN NEW; END; $rli$ LANGUAGE plpgsql;') ORDER BY nodeport; -- register after insert trigger SELECT * FROM run_command_on_placements('researchers', 'CREATE CONSTRAINT TRIGGER reject_large_researcher_id AFTER INSERT ON %s DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_large_id()') ORDER BY nodeport, shardid; -- hide postgresql version dependend messages for next test only \set VERBOSITY terse -- deferred check should abort the transaction BEGIN; DELETE FROM researchers WHERE lab_id = 6; \copy researchers FROM STDIN delimiter ',' 31, 6, 'Bjarne Stroustrup' \. \copy researchers FROM STDIN delimiter ',' 30, 6, 'Dennis Ritchie' \. COMMIT; \unset VERBOSITY -- verify everyhing including delete is rolled back SELECT * FROM researchers WHERE lab_id = 6; -- cleanup triggers and the function SELECT * from run_command_on_placements('researchers', 'drop trigger reject_large_researcher_id on %s') ORDER BY nodeport, shardid; SELECT * FROM run_command_on_workers('drop function reject_large_id()') ORDER BY nodeport; -- ALTER and copy are compatible BEGIN; ALTER TABLE labs ADD COLUMN motto text; \copy labs from stdin delimiter ',' 12,fsociety,lol \. ROLLBACK; BEGIN; \copy labs from stdin delimiter ',' 12,fsociety \. ALTER TABLE labs ADD COLUMN motto text; ABORT; -- cannot perform DDL once a connection is used for multiple shards BEGIN; SELECT lab_id FROM researchers WHERE lab_id = 1 AND id = 0; SELECT lab_id FROM researchers WHERE lab_id = 2 AND id = 0; ALTER TABLE researchers ADD COLUMN motto text; ROLLBACK; -- multi-shard operations can co-exist with DDL in a transactional way BEGIN; ALTER TABLE labs ADD COLUMN motto text; SELECT master_modify_multiple_shards('DELETE FROM labs'); ALTER TABLE labs ADD COLUMN score float; ROLLBACK; -- should have rolled everything back SELECT * FROM labs WHERE id = 12; -- now, for some special failures... CREATE TABLE objects ( id bigint PRIMARY KEY, name text NOT NULL ); SELECT master_create_distributed_table('objects', 'id', 'hash'); SELECT master_create_worker_shards('objects', 1, 2); -- test primary key violations BEGIN; INSERT INTO objects VALUES (1, 'apple'); INSERT INTO objects VALUES (1, 'orange'); COMMIT; -- data shouldn't have persisted... SELECT * FROM objects WHERE id = 1; -- and placements should still be healthy... SELECT count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND sp.shardstate = 1 AND s.logicalrelid = 'objects'::regclass; -- create trigger on one worker to reject certain values \c - - - :worker_2_port CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$ BEGIN IF (NEW.name = 'BAD') THEN RAISE 'illegal value'; END IF; RETURN NEW; END; $rb$ LANGUAGE plpgsql; CREATE CONSTRAINT TRIGGER reject_bad AFTER INSERT ON objects_1200003 DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW EXECUTE PROCEDURE reject_bad(); \c - - - :master_port -- test partial failure; worker_1 succeeds, 2 fails \set VERBOSITY terse BEGIN; INSERT INTO objects VALUES (1, 'apple'); INSERT INTO objects VALUES (2, 'BAD'); INSERT INTO labs VALUES (7, 'E Corp'); COMMIT; -- data should be persisted SELECT * FROM objects WHERE id = 2; SELECT * FROM labs WHERE id = 7; -- but one placement should be bad SELECT count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND sp.nodename = 'localhost' AND sp.nodeport = :worker_2_port AND sp.shardstate = 3 AND s.logicalrelid = 'objects'::regclass; DELETE FROM objects; -- mark shards as healthy again; delete all data UPDATE pg_dist_shard_placement AS sp SET shardstate = 1 FROM pg_dist_shard AS s WHERE sp.shardid = s.shardid AND s.logicalrelid = 'objects'::regclass; -- what if there are errors on different shards at different times? \c - - - :worker_1_port CREATE FUNCTION reject_bad() RETURNS trigger AS $rb$ BEGIN IF (NEW.name = 'BAD') THEN RAISE 'illegal value'; END IF; RETURN NEW; END; $rb$ LANGUAGE plpgsql; CREATE CONSTRAINT TRIGGER reject_bad AFTER INSERT ON labs_1200002 DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW EXECUTE PROCEDURE reject_bad(); \c - - - :master_port BEGIN; INSERT INTO objects VALUES (1, 'apple'); INSERT INTO objects VALUES (2, 'BAD'); INSERT INTO labs VALUES (8, 'Aperture Science'); INSERT INTO labs VALUES (9, 'BAD'); COMMIT; -- data should NOT be persisted SELECT * FROM objects WHERE id = 1; SELECT * FROM labs WHERE id = 8; -- all placements should remain healthy SELECT count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND sp.shardstate = 1 AND (s.logicalrelid = 'objects'::regclass OR s.logicalrelid = 'labs'::regclass); -- what if the failures happen at COMMIT time? \c - - - :worker_2_port DROP TRIGGER reject_bad ON objects_1200003; CREATE CONSTRAINT TRIGGER reject_bad AFTER INSERT ON objects_1200003 DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_bad(); \c - - - :master_port -- should be the same story as before, just at COMMIT time BEGIN; INSERT INTO objects VALUES (1, 'apple'); INSERT INTO objects VALUES (2, 'BAD'); INSERT INTO labs VALUES (9, 'Umbrella Corporation'); COMMIT; -- data should be persisted SELECT * FROM objects WHERE id = 2; SELECT * FROM labs WHERE id = 7; -- but one placement should be bad SELECT count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND sp.nodename = 'localhost' AND sp.nodeport = :worker_2_port AND sp.shardstate = 3 AND s.logicalrelid = 'objects'::regclass; DELETE FROM objects; -- mark shards as healthy again; delete all data UPDATE pg_dist_shard_placement AS sp SET shardstate = 1 FROM pg_dist_shard AS s WHERE sp.shardid = s.shardid AND s.logicalrelid = 'objects'::regclass; -- what if all nodes have failures at COMMIT time? \c - - - :worker_1_port DROP TRIGGER reject_bad ON labs_1200002; CREATE CONSTRAINT TRIGGER reject_bad AFTER INSERT ON labs_1200002 DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_bad(); \c - - - :master_port BEGIN; INSERT INTO objects VALUES (1, 'apple'); INSERT INTO objects VALUES (2, 'BAD'); INSERT INTO labs VALUES (8, 'Aperture Science'); INSERT INTO labs VALUES (9, 'BAD'); COMMIT; -- data should NOT be persisted SELECT * FROM objects WHERE id = 1; SELECT * FROM labs WHERE id = 8; -- all placements should remain healthy SELECT count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND sp.shardstate = 1 AND (s.logicalrelid = 'objects'::regclass OR s.logicalrelid = 'labs'::regclass); -- what if one shard (objects) succeeds but another (labs) completely fails? \c - - - :worker_2_port DROP TRIGGER reject_bad ON objects_1200003; \c - - - :master_port BEGIN; INSERT INTO objects VALUES (1, 'apple'); INSERT INTO labs VALUES (8, 'Aperture Science'); INSERT INTO labs VALUES (9, 'BAD'); COMMIT; \set VERBOSITY default -- data to objects should be persisted, but labs should not... SELECT * FROM objects WHERE id = 1; SELECT * FROM labs WHERE id = 8; -- labs should be healthy, but one object placement shouldn't be SELECT s.logicalrelid::regclass::text, sp.shardstate, count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND (s.logicalrelid = 'objects'::regclass OR s.logicalrelid = 'labs'::regclass) GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; -- some append-partitioned tests for good measure CREATE TABLE append_researchers ( LIKE researchers ); SELECT master_create_distributed_table('append_researchers', 'id', 'append'); SET citus.shard_replication_factor TO 1; SELECT master_create_empty_shard('append_researchers') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 0, shardmaxvalue = 500000 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('append_researchers') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 500000, shardmaxvalue = 1000000 WHERE shardid = :new_shard_id; SET citus.shard_replication_factor TO DEFAULT; -- try single-shard INSERT BEGIN; INSERT INTO append_researchers VALUES (0, 0, 'John Backus'); COMMIT; SELECT * FROM append_researchers WHERE id = 0; -- try rollback BEGIN; DELETE FROM append_researchers WHERE id = 0; ROLLBACK; SELECT * FROM append_researchers WHERE id = 0; -- try hitting shard on other node BEGIN; INSERT INTO append_researchers VALUES (1, 1, 'John McCarthy'); INSERT INTO append_researchers VALUES (500000, 500000, 'Tony Hoare'); ROLLBACK; SELECT * FROM append_researchers; -- we use 2PC for reference tables by default -- let's add some tests for them CREATE TABLE reference_modifying_xacts (key int, value int); SELECT create_reference_table('reference_modifying_xacts'); -- very basic test, ensure that INSERTs work INSERT INTO reference_modifying_xacts VALUES (1, 1); SELECT * FROM reference_modifying_xacts; -- now ensure that it works in a transaction as well BEGIN; INSERT INTO reference_modifying_xacts VALUES (2, 2); SELECT * FROM reference_modifying_xacts; COMMIT; -- we should be able to see the insert outside of the transaction as well SELECT * FROM reference_modifying_xacts; -- rollback should also work BEGIN; INSERT INTO reference_modifying_xacts VALUES (3, 3); SELECT * FROM reference_modifying_xacts; ROLLBACK; -- see that we've not inserted SELECT * FROM reference_modifying_xacts; -- lets fail on of the workers at before the commit time \c - - - :worker_1_port CREATE FUNCTION reject_bad_reference() RETURNS trigger AS $rb$ BEGIN IF (NEW.key = 999) THEN RAISE 'illegal value'; END IF; RETURN NEW; END; $rb$ LANGUAGE plpgsql; CREATE CONSTRAINT TRIGGER reject_bad_reference AFTER INSERT ON reference_modifying_xacts_1200006 DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW EXECUTE PROCEDURE reject_bad_reference(); \c - - - :master_port \set VERBOSITY terse -- try without wrapping inside a transaction INSERT INTO reference_modifying_xacts VALUES (999, 3); -- same test within a transaction BEGIN; INSERT INTO reference_modifying_xacts VALUES (999, 3); COMMIT; -- lets fail one of the workers at COMMIT time \c - - - :worker_1_port DROP TRIGGER reject_bad_reference ON reference_modifying_xacts_1200006; CREATE CONSTRAINT TRIGGER reject_bad_reference AFTER INSERT ON reference_modifying_xacts_1200006 DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_bad_reference(); \c - - - :master_port \set VERBOSITY terse -- try without wrapping inside a transaction INSERT INTO reference_modifying_xacts VALUES (999, 3); -- same test within a transaction BEGIN; INSERT INTO reference_modifying_xacts VALUES (999, 3); COMMIT; -- all placements should be healthy SELECT s.logicalrelid::regclass::text, sp.shardstate, count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND s.logicalrelid = 'reference_modifying_xacts'::regclass GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; -- for the time-being drop the constraint \c - - - :worker_1_port DROP TRIGGER reject_bad_reference ON reference_modifying_xacts_1200006; \c - - - :master_port -- now create a hash distributed table and run tests -- including both the reference table and the hash -- distributed table SET citus.shard_count = 4; SET citus.shard_replication_factor = 1; CREATE TABLE hash_modifying_xacts (key int, value int); SELECT create_distributed_table('hash_modifying_xacts', 'key'); -- let's try to expand the xact participants BEGIN; INSERT INTO hash_modifying_xacts VALUES (1, 1); INSERT INTO reference_modifying_xacts VALUES (10, 10); COMMIT; -- it is allowed when turning off deadlock prevention BEGIN; INSERT INTO hash_modifying_xacts VALUES (1, 1); INSERT INTO reference_modifying_xacts VALUES (10, 10); ABORT; BEGIN; INSERT INTO hash_modifying_xacts VALUES (1, 1); INSERT INTO hash_modifying_xacts VALUES (2, 2); ABORT; -- lets fail one of the workers before COMMIT time for the hash table \c - - - :worker_1_port CREATE FUNCTION reject_bad_hash() RETURNS trigger AS $rb$ BEGIN IF (NEW.key = 997) THEN RAISE 'illegal value'; END IF; RETURN NEW; END; $rb$ LANGUAGE plpgsql; CREATE CONSTRAINT TRIGGER reject_bad_hash AFTER INSERT ON hash_modifying_xacts_1200007 DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW EXECUTE PROCEDURE reject_bad_hash(); \c - - - :master_port \set VERBOSITY terse -- the transaction as a whole should fail BEGIN; INSERT INTO reference_modifying_xacts VALUES (55, 10); INSERT INTO hash_modifying_xacts VALUES (997, 1); COMMIT; -- ensure that the value didn't go into the reference table SELECT * FROM reference_modifying_xacts WHERE key = 55; -- now lets fail on of the workers for the hash distributed table table -- when there is a reference table involved \c - - - :worker_1_port DROP TRIGGER reject_bad_hash ON hash_modifying_xacts_1200007; -- the trigger is on execution time CREATE CONSTRAINT TRIGGER reject_bad_hash AFTER INSERT ON hash_modifying_xacts_1200007 DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_bad_hash(); \c - - - :master_port \set VERBOSITY terse -- the transaction as a whole should fail BEGIN; INSERT INTO reference_modifying_xacts VALUES (12, 12); INSERT INTO hash_modifying_xacts VALUES (997, 1); COMMIT; -- ensure that the values didn't go into the reference table SELECT * FROM reference_modifying_xacts WHERE key = 12; -- all placements should be healthy SELECT s.logicalrelid::regclass::text, sp.shardstate, count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND (s.logicalrelid = 'reference_modifying_xacts'::regclass OR s.logicalrelid = 'hash_modifying_xacts'::regclass) GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; -- now, fail the insert on reference table -- and ensure that hash distributed table's -- change is rollbacked as well \c - - - :worker_1_port CREATE CONSTRAINT TRIGGER reject_bad_reference AFTER INSERT ON reference_modifying_xacts_1200006 DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW EXECUTE PROCEDURE reject_bad_reference(); \c - - - :master_port \set VERBOSITY terse BEGIN; -- to expand participant to include all worker nodes INSERT INTO reference_modifying_xacts VALUES (66, 3); INSERT INTO hash_modifying_xacts VALUES (80, 1); INSERT INTO reference_modifying_xacts VALUES (999, 3); COMMIT; SELECT * FROM hash_modifying_xacts WHERE key = 80; SELECT * FROM reference_modifying_xacts WHERE key = 66; SELECT * FROM reference_modifying_xacts WHERE key = 999; -- all placements should be healthy SELECT s.logicalrelid::regclass::text, sp.shardstate, count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND (s.logicalrelid = 'reference_modifying_xacts'::regclass OR s.logicalrelid = 'hash_modifying_xacts'::regclass) GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; -- now show that all modifications to reference -- tables are done in 2PC SELECT recover_prepared_transactions(); INSERT INTO reference_modifying_xacts VALUES (70, 70); SELECT count(*) FROM pg_dist_transaction; -- reset the transactions table SELECT recover_prepared_transactions(); BEGIN; INSERT INTO reference_modifying_xacts VALUES (71, 71); COMMIT; SELECT count(*) FROM pg_dist_transaction; -- create a hash distributed tablw which spans all nodes SET citus.shard_count = 4; SET citus.shard_replication_factor = 2; CREATE TABLE hash_modifying_xacts_second (key int, value int); SELECT create_distributed_table('hash_modifying_xacts_second', 'key'); -- reset the transactions table SELECT recover_prepared_transactions(); BEGIN; INSERT INTO hash_modifying_xacts_second VALUES (72, 1); INSERT INTO reference_modifying_xacts VALUES (72, 3); COMMIT; SELECT count(*) FROM pg_dist_transaction; -- reset the transactions table SELECT recover_prepared_transactions(); DELETE FROM reference_modifying_xacts; SELECT count(*) FROM pg_dist_transaction; -- reset the transactions table SELECT recover_prepared_transactions(); UPDATE reference_modifying_xacts SET key = 10; SELECT count(*) FROM pg_dist_transaction; -- now to one more type of failure testing -- in which we'll make the remote host unavailable -- first create the new user on all nodes CREATE USER test_user; \c - - - :worker_1_port CREATE USER test_user; \c - - - :worker_2_port CREATE USER test_user; -- now connect back to the master with the new user \c - test_user - :master_port CREATE TABLE reference_failure_test (key int, value int); SELECT create_reference_table('reference_failure_test'); -- create a hash distributed table SET citus.shard_count TO 4; CREATE TABLE numbers_hash_failure_test(key int, value int); SELECT create_distributed_table('numbers_hash_failure_test', 'key'); -- ensure that the shard is created for this user \c - test_user - :worker_1_port \dt reference_failure_test_1200015 -- now connect with the default user, -- and rename the existing user \c - :default_user - :worker_1_port ALTER USER test_user RENAME TO test_user_new; -- connect back to master and query the reference table \c - test_user - :master_port -- should fail since the worker doesn't have test_user anymore INSERT INTO reference_failure_test VALUES (1, '1'); -- the same as the above, but wrapped within a transaction BEGIN; INSERT INTO reference_failure_test VALUES (1, '1'); COMMIT; BEGIN; COPY reference_failure_test FROM STDIN WITH (FORMAT 'csv'); 2,2 \. COMMIT; -- show that no data go through the table and shard states are good SELECT * FROM reference_failure_test; -- all placements should be healthy SELECT s.logicalrelid::regclass::text, sp.shardstate, count(*) FROM pg_dist_shard_placement AS sp, pg_dist_shard AS s WHERE sp.shardid = s.shardid AND s.logicalrelid = 'reference_failure_test'::regclass GROUP BY s.logicalrelid, sp.shardstate ORDER BY s.logicalrelid, sp.shardstate; BEGIN; COPY numbers_hash_failure_test FROM STDIN WITH (FORMAT 'csv'); 1,1 2,2 \. -- some placements are invalid before abort SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; ABORT; -- verify nothing is inserted SELECT count(*) FROM numbers_hash_failure_test; -- all placements to be market valid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; BEGIN; COPY numbers_hash_failure_test FROM STDIN WITH (FORMAT 'csv'); 1,1 2,2 \. -- check shard states before commit SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; COMMIT; -- expect some placements to be market invalid after commit SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement JOIN pg_dist_shard USING (shardid) WHERE logicalrelid = 'numbers_hash_failure_test'::regclass ORDER BY shardid, nodeport; -- verify data is inserted SELECT count(*) FROM numbers_hash_failure_test; -- connect back to the worker and set rename the test_user back \c - :default_user - :worker_1_port ALTER USER test_user_new RENAME TO test_user; -- connect back to the master with the proper user to continue the tests \c - :default_user - :master_port DROP TABLE reference_modifying_xacts, hash_modifying_xacts, hash_modifying_xacts_second, reference_failure_test, numbers_hash_failure_test; SELECT * FROM run_command_on_workers('DROP USER test_user'); DROP USER test_user; -- set up foreign keys to test transactions with co-located and reference tables BEGIN; SET LOCAL citus.shard_replication_factor TO 1; SET LOCAL citus.shard_count TO 4; CREATE TABLE usergroups ( gid int PRIMARY KEY, name text ); SELECT create_reference_table('usergroups'); CREATE TABLE itemgroups ( gid int PRIMARY KEY, name text ); SELECT create_reference_table('itemgroups'); CREATE TABLE users ( id int PRIMARY KEY, name text, user_group int ); SELECT create_distributed_table('users', 'id'); CREATE TABLE items ( user_id int REFERENCES users (id) ON DELETE CASCADE, item_name text, item_group int ); SELECT create_distributed_table('items', 'user_id'); -- Table to find values that live in different shards on the same node SELECT id, shard_name('users', shardid), nodename, nodeport FROM pg_dist_shard_placement JOIN ( SELECT id, get_shard_id_for_distribution_column('users', id) shardid FROM generate_series(1,10) id ) ids USING (shardid) ORDER BY id; END; -- the INSERTs into items should see the users BEGIN; \COPY users FROM STDIN WITH CSV 1,brian,0 6,metin,0 \. INSERT INTO items VALUES (1, 'item-1'); INSERT INTO items VALUES (6, 'item-6'); END; SELECT user_id FROM items ORDER BY user_id; -- should not be able to open multiple connections per node after INSERTing over one connection BEGIN; INSERT INTO users VALUES (2, 'burak'); INSERT INTO users VALUES (3, 'burak'); \COPY items FROM STDIN WITH CSV 2,item-2,0 3,item-3,0 \. END; -- cannot perform DDL after a co-located table has been read over 1 connection BEGIN; SELECT id FROM users WHERE id = 1; SELECT id FROM users WHERE id = 6; ALTER TABLE items ADD COLUMN last_update timestamptz; END; -- but the other way around is fine BEGIN; ALTER TABLE items ADD COLUMN last_update timestamptz; SELECT id FROM users JOIN items ON (id = user_id) WHERE id = 1; SELECT id FROM users JOIN items ON (id = user_id) WHERE id = 6; END; BEGIN; -- establish multiple connections to a node \COPY users FROM STDIN WITH CSV 2,burak,0 3,burak,0 \. -- now read from the reference table over each connection SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 2; SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 3; -- perform a DDL command on the reference table ALTER TABLE itemgroups ADD COLUMN last_update timestamptz; END; BEGIN; -- establish multiple connections to a node \COPY users FROM STDIN WITH CSV 2,burak,0 3,burak,0 \. -- read from the reference table over each connection SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 2; SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 3; -- perform a DDL command on a co-located reference table ALTER TABLE usergroups ADD COLUMN last_update timestamptz; END; BEGIN; -- make a modification over connection 1 INSERT INTO usergroups VALUES (0,'istanbul'); -- copy over connections 1 and 2 \COPY users FROM STDIN WITH CSV 2,burak,0 3,burak,0 \. -- cannot read modifications made over different connections SELECT id FROM users JOIN usergroups ON (gid = user_group) WHERE id = 3; END; -- make sure we can see cascading deletes BEGIN; SELECT master_modify_multiple_shards('DELETE FROM users'); SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 1; SELECT user_id FROM items JOIN itemgroups ON (item_group = gid) WHERE user_id = 6; END; -- test visibility after COPY INSERT INTO usergroups VALUES (2,'group'); BEGIN; -- opens two separate connections to node \COPY users FROM STDIN WITH CSV 2,onder,2 4,murat,2 \. -- Uses first connection, which wrote the row with id = 2 SELECT * FROM users JOIN usergroups ON (user_group = gid) WHERE id = 2; -- Should use second connection, which wrote the row with id = 4 SELECT * FROM users JOIN usergroups ON (user_group = gid) WHERE id = 4; END; DROP TABLE items, users, itemgroups, usergroups, researchers, labs; citus-7.0.3/src/test/regress/sql/multi_multiuser.sql000066400000000000000000000062171317107136600226660ustar00rootroot00000000000000-- -- MULTI_MULTIUSERS -- -- Test user permissions. -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1420000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1420000; SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 2; CREATE TABLE test (id integer, val integer); SELECT create_distributed_table('test', 'id'); -- turn off propagation to avoid Enterprise processing the following section SET citus.enable_ddl_propagation TO off; CREATE USER full_access; CREATE USER read_access; CREATE USER no_access; GRANT ALL ON TABLE test TO full_access; GRANT SELECT ON TABLE test TO read_access; SET citus.enable_ddl_propagation TO DEFAULT; \c - - - :worker_1_port CREATE USER full_access; CREATE USER read_access; CREATE USER no_access; GRANT ALL ON TABLE test_1420000 TO full_access; GRANT SELECT ON TABLE test_1420000 TO read_access; \c - - - :worker_2_port CREATE USER full_access; CREATE USER read_access; CREATE USER no_access; GRANT ALL ON TABLE test_1420001 TO full_access; GRANT SELECT ON TABLE test_1420001 TO read_access; \c - - - :master_port -- create prepare tests PREPARE prepare_insert AS INSERT INTO test VALUES ($1); PREPARE prepare_select AS SELECT count(*) FROM test; -- not allowed to read absolute paths, even as superuser COPY "/etc/passwd" TO STDOUT WITH (format transmit); -- check full permission SET ROLE full_access; EXECUTE prepare_insert(1); EXECUTE prepare_select; INSERT INTO test VALUES (2); SELECT count(*) FROM test; SELECT count(*) FROM test WHERE id = 1; SET citus.task_executor_type TO 'task-tracker'; SELECT count(*) FROM test; -- test re-partition query (needs to transmit intermediate results) SELECT count(*) FROM test a JOIN test b ON (a.val = b.val) WHERE a.id = 1 AND b.id = 2; -- should not be able to transmit directly COPY "postgresql.conf" TO STDOUT WITH (format transmit); SET citus.task_executor_type TO 'real-time'; -- should not be able to transmit directly COPY "postgresql.conf" TO STDOUT WITH (format transmit); -- check read permission SET ROLE read_access; EXECUTE prepare_insert(1); EXECUTE prepare_select; INSERT INTO test VALUES (2); SELECT count(*) FROM test; SELECT count(*) FROM test WHERE id = 1; SET citus.task_executor_type TO 'task-tracker'; SELECT count(*) FROM test; -- test re-partition query (needs to transmit intermediate results) SELECT count(*) FROM test a JOIN test b ON (a.val = b.val) WHERE a.id = 1 AND b.id = 2; -- should not be able to transmit directly COPY "postgresql.conf" TO STDOUT WITH (format transmit); SET citus.task_executor_type TO 'real-time'; -- check no permission SET ROLE no_access; EXECUTE prepare_insert(1); EXECUTE prepare_select; INSERT INTO test VALUES (2); SELECT count(*) FROM test; SELECT count(*) FROM test WHERE id = 1; SET citus.task_executor_type TO 'task-tracker'; SELECT count(*) FROM test; -- test re-partition query SELECT count(*) FROM test a JOIN test b ON (a.val = b.val) WHERE a.id = 1 AND b.id = 2; -- should not be able to transmit directly COPY "postgresql.conf" TO STDOUT WITH (format transmit); SET citus.task_executor_type TO 'real-time'; RESET ROLE; DROP TABLE test; DROP USER full_access; DROP USER read_access; DROP USER no_access; citus-7.0.3/src/test/regress/sql/multi_mx_create_table.sql000066400000000000000000000311241317107136600237460ustar00rootroot00000000000000-- -- MULTI_MX_CREATE_TABLE -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); SELECT start_metadata_sync_to_node('localhost', :worker_2_port); -- create schema to test schema support CREATE SCHEMA citus_mx_test_schema; CREATE SCHEMA citus_mx_test_schema_join_1; CREATE SCHEMA citus_mx_test_schema_join_2; -- create UDFs that we're going to use in our tests SET search_path TO public; CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; SET search_path TO citus_mx_test_schema; CREATE OR REPLACE FUNCTION simpleTestFunction2(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; CREATE OPERATOR citus_mx_test_schema.=== ( LEFTARG = int, RIGHTARG = int, PROCEDURE = int4eq, COMMUTATOR = ===, NEGATOR = !==, HASHES, MERGES ); SET search_path TO public; CREATE COLLATION citus_mx_test_schema.english FROM "en_US"; CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text); CREATE TYPE order_side_mx AS ENUM ('buy', 'sell'); -- now create required stuff in the worker 1 \c - - - :worker_1_port -- create schema to test schema support CREATE SCHEMA citus_mx_test_schema; CREATE SCHEMA citus_mx_test_schema_join_1; CREATE SCHEMA citus_mx_test_schema_join_2; -- create UDFs in worker node CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; SET search_path TO citus_mx_test_schema; CREATE OR REPLACE FUNCTION simpleTestFunction2(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; -- create operator CREATE OPERATOR citus_mx_test_schema.=== ( LEFTARG = int, RIGHTARG = int, PROCEDURE = int4eq, COMMUTATOR = ===, NEGATOR = !==, HASHES, MERGES ); SET search_path TO public; CREATE COLLATION citus_mx_test_schema.english FROM "en_US"; SET search_path TO public; CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text); CREATE TYPE order_side_mx AS ENUM ('buy', 'sell'); -- now create required stuff in the worker 2 \c - - - :worker_2_port -- create schema to test schema support CREATE SCHEMA citus_mx_test_schema; CREATE SCHEMA citus_mx_test_schema_join_1; CREATE SCHEMA citus_mx_test_schema_join_2; -- create UDF CREATE OR REPLACE FUNCTION simpleTestFunction(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; SET search_path TO citus_mx_test_schema; CREATE OR REPLACE FUNCTION simpleTestFunction2(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; -- create operator CREATE OPERATOR citus_mx_test_schema.=== ( LEFTARG = int, RIGHTARG = int, PROCEDURE = int4eq, COMMUTATOR = ===, NEGATOR = !==, HASHES, MERGES ); SET search_path TO public; CREATE COLLATION citus_mx_test_schema.english FROM "en_US"; SET search_path TO public; CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text); CREATE TYPE order_side_mx AS ENUM ('buy', 'sell'); -- connect back to the master, and do some more tests \c - - - :master_port SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; SET search_path TO public; CREATE TABLE nation_hash( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152) ); SET citus.shard_count TO 16; SELECT create_distributed_table('nation_hash', 'n_nationkey'); SET search_path TO citus_mx_test_schema; -- create mx tables that we're going to use for our tests CREATE TABLE citus_mx_test_schema.nation_hash( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152) ); SELECT create_distributed_table('nation_hash', 'n_nationkey'); CREATE TABLE citus_mx_test_schema_join_1.nation_hash ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); SET citus.shard_count TO 4; SELECT create_distributed_table('citus_mx_test_schema_join_1.nation_hash', 'n_nationkey'); CREATE TABLE citus_mx_test_schema_join_1.nation_hash_2 ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); SELECT create_distributed_table('citus_mx_test_schema_join_1.nation_hash_2', 'n_nationkey'); SET search_path TO citus_mx_test_schema_join_2; CREATE TABLE nation_hash ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); SELECT create_distributed_table('nation_hash', 'n_nationkey'); SET search_path TO citus_mx_test_schema; CREATE TABLE nation_hash_collation_search_path( n_nationkey integer not null, n_name char(25) not null COLLATE english, n_regionkey integer not null, n_comment varchar(152) ); SELECT create_distributed_table('nation_hash_collation_search_path', 'n_nationkey'); \COPY nation_hash_collation_search_path FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5|ETHIOPIA|0|ven packages wake quickly. regu \. CREATE TABLE citus_mx_test_schema.nation_hash_composite_types( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152), test_col citus_mx_test_schema.new_composite_type ); SELECT create_distributed_table('citus_mx_test_schema.nation_hash_composite_types', 'n_nationkey'); -- insert some data to verify composite type queries \COPY citus_mx_test_schema.nation_hash_composite_types FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai|(a,a) 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon|(a,b) 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special |(a,c) 3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold|(a,d) 4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d|(a,e) 5|ETHIOPIA|0|ven packages wake quickly. regu|(a,f) \. -- now create tpch tables -- Create new table definitions for use in testing in distributed planning and -- execution functionality. Also create indexes to boost performance. SET search_path TO public; CREATE TABLE lineitem_mx ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); SET citus.shard_count TO 16; SELECT create_distributed_table('lineitem_mx', 'l_orderkey'); CREATE INDEX lineitem_mx_time_index ON lineitem_mx (l_shipdate); CREATE TABLE orders_mx ( o_orderkey bigint not null, o_custkey integer not null, o_orderstatus char(1) not null, o_totalprice decimal(15,2) not null, o_orderdate date not null, o_orderpriority char(15) not null, o_clerk char(15) not null, o_shippriority integer not null, o_comment varchar(79) not null, PRIMARY KEY(o_orderkey) ); SELECT create_distributed_table('orders_mx', 'o_orderkey'); CREATE TABLE customer_mx ( c_custkey integer not null, c_name varchar(25) not null, c_address varchar(40) not null, c_nationkey integer not null, c_phone char(15) not null, c_acctbal decimal(15,2) not null, c_mktsegment char(10) not null, c_comment varchar(117) not null); SET citus.shard_count TO 1; SELECT create_distributed_table('customer_mx', 'c_custkey'); CREATE TABLE nation_mx ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); SELECT create_distributed_table('nation_mx', 'n_nationkey'); CREATE TABLE part_mx ( p_partkey integer not null, p_name varchar(55) not null, p_mfgr char(25) not null, p_brand char(10) not null, p_type varchar(25) not null, p_size integer not null, p_container char(10) not null, p_retailprice decimal(15,2) not null, p_comment varchar(23) not null); SELECT create_distributed_table('part_mx', 'p_partkey'); CREATE TABLE supplier_mx ( s_suppkey integer not null, s_name char(25) not null, s_address varchar(40) not null, s_nationkey integer, s_phone char(15) not null, s_acctbal decimal(15,2) not null, s_comment varchar(101) not null ); SELECT create_distributed_table('supplier_mx', 's_suppkey'); -- Create test table for ddl CREATE TABLE mx_ddl_table ( key int primary key, value int ); SET citus.shard_count TO 4; SELECT create_distributed_table('mx_ddl_table', 'key', 'hash'); -- Load some test data COPY mx_ddl_table (key, value) FROM STDIN WITH (FORMAT 'csv'); 1,10 2,11 3,21 4,37 5,60 6,100 10,200 11,230 \. -- test table for modifications CREATE TABLE limit_orders_mx ( id bigint PRIMARY KEY, symbol text NOT NULL, bidder_id bigint NOT NULL, placed_at timestamp NOT NULL, kind order_side_mx NOT NULL, limit_price decimal NOT NULL DEFAULT 0.00 CHECK (limit_price >= 0.00) ); SET citus.shard_count TO 2; SELECT create_distributed_table('limit_orders_mx', 'id'); -- test table for modifications CREATE TABLE multiple_hash_mx ( category text NOT NULL, data text NOT NULL ); SELECT create_distributed_table('multiple_hash_mx', 'category'); SET citus.shard_count TO 4; CREATE TABLE app_analytics_events_mx (id bigserial, app_id integer, name text); SELECT create_distributed_table('app_analytics_events_mx', 'app_id'); CREATE TABLE researchers_mx ( id bigint NOT NULL, lab_id int NOT NULL, name text NOT NULL ); SET citus.shard_count TO 2; SELECT create_distributed_table('researchers_mx', 'lab_id'); CREATE TABLE labs_mx ( id bigint NOT NULL, name text NOT NULL ); SET citus.shard_count TO 1; SELECT create_distributed_table('labs_mx', 'id'); -- now, for some special failures... CREATE TABLE objects_mx ( id bigint PRIMARY KEY, name text NOT NULL ); SELECT create_distributed_table('objects_mx', 'id', 'hash'); CREATE TABLE articles_hash_mx ( id bigint NOT NULL, author_id bigint NOT NULL, title varchar(20) NOT NULL, word_count integer ); -- this table is used in router executor tests CREATE TABLE articles_single_shard_hash_mx (LIKE articles_hash_mx); SET citus.shard_count TO 2; SELECT create_distributed_table('articles_hash_mx', 'author_id'); SET citus.shard_count TO 1; SELECT create_distributed_table('articles_single_shard_hash_mx', 'author_id'); SET citus.shard_count TO 4; CREATE TABLE company_employees_mx (company_id int, employee_id int, manager_id int); SELECT create_distributed_table('company_employees_mx', 'company_id'); WITH shard_counts AS ( SELECT logicalrelid, count(*) AS shard_count FROM pg_dist_shard GROUP BY logicalrelid ) SELECT logicalrelid, colocationid, shard_count, partmethod, repmodel FROM pg_dist_partition NATURAL JOIN shard_counts ORDER BY colocationid, logicalrelid; citus-7.0.3/src/test/regress/sql/multi_mx_ddl.sql000066400000000000000000000073311317107136600221020ustar00rootroot00000000000000-- Tests related to distributed DDL commands on mx cluster SELECT * FROM mx_ddl_table ORDER BY key; -- CREATE INDEX CREATE INDEX ddl_test_index ON mx_ddl_table(value); CREATE INDEX CONCURRENTLY ddl_test_concurrent_index ON mx_ddl_table(value); -- ADD COLUMN ALTER TABLE mx_ddl_table ADD COLUMN version INTEGER; -- SET DEFAULT ALTER TABLE mx_ddl_table ALTER COLUMN version SET DEFAULT 1; SELECT master_modify_multiple_shards('UPDATE mx_ddl_table SET version=0.1 WHERE version IS NULL'); -- SET NOT NULL ALTER TABLE mx_ddl_table ALTER COLUMN version SET NOT NULL; -- See that the changes are applied on coordinator, worker tables and shards SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; \d ddl_test*_index \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; \d ddl_test*_index SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220088'::regclass; \d ddl_test*_index_1220088 \c - - - :worker_2_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; \d ddl_test*_index SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220089'::regclass; \d ddl_test*_index_1220089 INSERT INTO mx_ddl_table VALUES (37, 78, 2); INSERT INTO mx_ddl_table VALUES (38, 78); -- Switch to the coordinator \c - - - :master_port -- SET DATA TYPE ALTER TABLE mx_ddl_table ALTER COLUMN version SET DATA TYPE double precision; INSERT INTO mx_ddl_table VALUES (78, 83, 2.1); \c - - - :worker_1_port SELECT * FROM mx_ddl_table ORDER BY key; -- Switch to the coordinator \c - - - :master_port -- DROP INDEX DROP INDEX ddl_test_index; DROP INDEX CONCURRENTLY ddl_test_concurrent_index; -- DROP DEFAULT ALTER TABLE mx_ddl_table ALTER COLUMN version DROP DEFAULT; -- DROP NOT NULL ALTER TABLE mx_ddl_table ALTER COLUMN version DROP NOT NULL; -- DROP COLUMN ALTER TABLE mx_ddl_table DROP COLUMN version; -- See that the changes are applied on coordinator, worker tables and shards SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; \di ddl_test*_index \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; \di ddl_test*_index SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220088'::regclass; \di ddl_test*_index_1220088 \c - - - :worker_2_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table'::regclass; \di ddl_test*_index SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='mx_ddl_table_1220089'::regclass; \di ddl_test*_index_1220089 -- Show that DDL commands are done within a two-phase commit transaction \c - - - :master_port SET client_min_messages TO debug2; CREATE INDEX ddl_test_index ON mx_ddl_table(value); RESET client_min_messages; DROP INDEX ddl_test_index; -- show that sequences owned by mx tables result in unique values SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 4; SET citus.replication_model TO streaming; CREATE TABLE mx_sequence(key INT, value BIGSERIAL); SELECT create_distributed_table('mx_sequence', 'key'); \c - - - :worker_1_port SELECT last_value AS worker_1_lastval FROM mx_sequence_value_seq \gset \c - - - :worker_2_port SELECT last_value AS worker_2_lastval FROM mx_sequence_value_seq \gset \c - - - :master_port -- don't look at the actual values because they rely on the groupids of the nodes -- which can change depending on the tests which have run before this one SELECT :worker_1_lastval = :worker_2_lastval; -- the type of sequences can't be changed ALTER TABLE mx_sequence ALTER value TYPE BIGINT; ALTER TABLE mx_sequence ALTER value TYPE INT; citus-7.0.3/src/test/regress/sql/multi_mx_explain.sql000066400000000000000000000125561317107136600230040ustar00rootroot00000000000000-- -- MULTI_MX_EXPLAIN -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1320000; \c - - - :worker_1_port \c - - - :worker_2_port \c - - - :master_port \a\t SET citus.task_executor_type TO 'real-time'; SET citus.explain_distributed_queries TO on; \c - - - :worker_1_port -- Function that parses explain output as JSON CREATE FUNCTION explain_json(query text) RETURNS jsonb AS $BODY$ DECLARE result jsonb; BEGIN EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result; RETURN result; END; $BODY$ LANGUAGE plpgsql; -- Function that parses explain output as XML CREATE FUNCTION explain_xml(query text) RETURNS xml AS $BODY$ DECLARE result xml; BEGIN EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result; RETURN result; END; $BODY$ LANGUAGE plpgsql; \c - - - :worker_2_port -- Function that parses explain output as JSON CREATE FUNCTION explain_json(query text) RETURNS jsonb AS $BODY$ DECLARE result jsonb; BEGIN EXECUTE format('EXPLAIN (FORMAT JSON) %s', query) INTO result; RETURN result; END; $BODY$ LANGUAGE plpgsql; -- Function that parses explain output as XML CREATE FUNCTION explain_xml(query text) RETURNS xml AS $BODY$ DECLARE result xml; BEGIN EXECUTE format('EXPLAIN (FORMAT XML) %s', query) INTO result; RETURN result; END; $BODY$ LANGUAGE plpgsql; -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -- Test JSON format EXPLAIN (COSTS FALSE, FORMAT JSON) SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -- Validate JSON format SELECT true AS valid FROM explain_json($$ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$); \c - - - :worker_1_port -- Test XML format EXPLAIN (COSTS FALSE, FORMAT XML) SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -- Validate XML format SELECT true AS valid FROM explain_xml($$ SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity$$); -- Test YAML format EXPLAIN (COSTS FALSE, FORMAT YAML) SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity; -- Test Text format EXPLAIN (COSTS FALSE, FORMAT TEXT) SELECT l_quantity, count(*) count_quantity FROM lineitem_mx GROUP BY l_quantity ORDER BY count_quantity, l_quantity; \c - - - :worker_2_port -- Test verbose EXPLAIN (COSTS FALSE, VERBOSE TRUE) SELECT sum(l_quantity) / avg(l_quantity) FROM lineitem_mx; -- Test join EXPLAIN (COSTS FALSE) SELECT * FROM lineitem_mx JOIN orders_mx ON l_orderkey = o_orderkey AND l_quantity < 5.0 ORDER BY l_quantity LIMIT 10; -- Test insert EXPLAIN (COSTS FALSE) INSERT INTO lineitem_mx VALUES(1,0); -- Test update EXPLAIN (COSTS FALSE) UPDATE lineitem_mx SET l_suppkey = 12 WHERE l_orderkey = 1 AND l_partkey = 0; -- Test delete EXPLAIN (COSTS FALSE) DELETE FROM lineitem_mx WHERE l_orderkey = 1 AND l_partkey = 0; -- make the outputs more consistent VACUUM ANALYZE lineitem_mx; -- Test single-shard SELECT EXPLAIN (COSTS FALSE) SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5; SELECT true AS valid FROM explain_xml($$ SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5$$); SELECT true AS valid FROM explain_json($$ SELECT l_quantity FROM lineitem_mx WHERE l_orderkey = 5$$); -- Test CREATE TABLE ... AS EXPLAIN (COSTS FALSE) CREATE TABLE explain_result AS SELECT * FROM lineitem_mx; -- Test all tasks output SET citus.explain_all_tasks TO on; EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030; SELECT true AS valid FROM explain_xml($$ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030$$); SELECT true AS valid FROM explain_json($$ SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030$$); -- Test track tracker SET citus.task_executor_type TO 'task-tracker'; SET citus.explain_all_tasks TO off; EXPLAIN (COSTS FALSE) SELECT avg(l_linenumber) FROM lineitem_mx WHERE l_orderkey > 9030; -- Test re-partition join SET citus.large_table_shard_count TO 1; EXPLAIN (COSTS FALSE) SELECT count(*) FROM lineitem_mx, orders_mx, customer_mx, supplier_mx WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; EXPLAIN (COSTS FALSE, FORMAT JSON) SELECT count(*) FROM lineitem_mx, orders_mx, customer_mx, supplier_mx WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; SELECT true AS valid FROM explain_json($$ SELECT count(*) FROM lineitem_mx, orders_mx, customer_mx, supplier_mx WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey$$); EXPLAIN (COSTS FALSE, FORMAT XML) SELECT count(*) FROM lineitem_mx, orders_mx, customer_mx, supplier_mx WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; SELECT true AS valid FROM explain_xml($$ SELECT count(*) FROM lineitem_mx, orders_mx, customer_mx, supplier_mx WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey$$); EXPLAIN (COSTS FALSE, FORMAT YAML) SELECT count(*) FROM lineitem_mx, orders_mx, customer_mx, supplier_mx WHERE l_orderkey = o_orderkey AND o_custkey = c_custkey AND l_suppkey = s_suppkey; citus-7.0.3/src/test/regress/sql/multi_mx_metadata.sql000066400000000000000000000130301317107136600231100ustar00rootroot00000000000000-- Test creation of mx tables and metadata syncing -- get rid of the previously created entries in pg_dist_transaction -- for the sake of getting consistent results in this test file SELECT recover_prepared_transactions(); CREATE TABLE distributed_mx_table ( key text primary key, value jsonb ); CREATE INDEX ON distributed_mx_table USING GIN (value); SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; SET citus.shard_count TO 4; SELECT create_distributed_table('distributed_mx_table', 'key'); -- Verify that we've logged commit records SELECT count(*) FROM pg_dist_transaction; -- Confirm that the metadata transactions have been committed SELECT recover_prepared_transactions(); -- Verify that the commit records have been removed SELECT count(*) FROM pg_dist_transaction; \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass; \d distributed_mx_table_pkey \d distributed_mx_table_value_idx SELECT repmodel FROM pg_dist_partition WHERE logicalrelid = 'distributed_mx_table'::regclass; SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'distributed_mx_table'::regclass; \c - - - :worker_2_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='distributed_mx_table'::regclass; \d distributed_mx_table_pkey \d distributed_mx_table_value_idx SELECT repmodel FROM pg_dist_partition WHERE logicalrelid = 'distributed_mx_table'::regclass; SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'distributed_mx_table'::regclass; -- Create a table and then roll back the transaction \c - - - :master_port SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; BEGIN; CREATE TABLE should_not_exist ( key text primary key, value jsonb ); SELECT create_distributed_table('should_not_exist', 'key'); ABORT; -- Verify that the table does not exist on the worker \c - - - :worker_1_port SELECT count(*) FROM pg_tables WHERE tablename = 'should_not_exist'; -- Ensure that we don't allow prepare on a metadata transaction \c - - - :master_port SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; BEGIN; CREATE TABLE should_not_exist ( key text primary key, value jsonb ); SELECT create_distributed_table('should_not_exist', 'key'); PREPARE TRANSACTION 'this_should_fail'; -- now show that we can create tables and schemas withing a single transaction BEGIN; CREATE SCHEMA IF NOT EXISTS citus_mx_schema_for_xacts; SET search_path TO citus_mx_schema_for_xacts; SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 1; CREATE TABLE objects_for_xacts ( id bigint PRIMARY KEY, name text NOT NULL ); SELECT create_distributed_table('objects_for_xacts', 'id'); COMMIT; -- see that the table actually created and distributed \c - - - :worker_1_port SELECT repmodel FROM pg_dist_partition WHERE logicalrelid = 'citus_mx_schema_for_xacts.objects_for_xacts'::regclass; SELECT count(*) FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) WHERE logicalrelid = 'citus_mx_schema_for_xacts.objects_for_xacts'::regclass; \c - - - :master_port SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; -- now show that we can rollback on creating mx table, but shards remain.... BEGIN; CREATE SCHEMA IF NOT EXISTS citus_mx_schema_for_xacts; SET search_path TO citus_mx_schema_for_xacts; SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 2; CREATE TABLE objects_for_xacts2 ( id bigint PRIMARY KEY, name text NOT NULL ); SELECT create_distributed_table('objects_for_xacts2', 'id'); ROLLBACK; -- show that the table not exists on the coordinator SELECT count(*) FROM pg_tables WHERE tablename = 'objects_for_xacts2' and schemaname = 'citus_mx_schema_for_xacts'; \c - - - :worker_1_port -- the distributed table not exists on the worker node SELECT count(*) FROM pg_tables WHERE tablename = 'objects_for_xacts2' and schemaname = 'citus_mx_schema_for_xacts'; -- shard also does not exist since we create shards in a transaction SELECT count(*) FROM pg_tables WHERE tablename LIKE 'objects_for_xacts2_%' and schemaname = 'citus_mx_schema_for_xacts'; -- make sure that master_drop_all_shards does not work from the worker nodes SELECT master_drop_all_shards('citus_mx_schema_for_xacts.objects_for_xacts'::regclass, 'citus_mx_schema_for_xacts', 'objects_for_xacts'); -- Ensure pg_dist_transaction is empty for test SELECT recover_prepared_transactions(); -- Create some "fake" prepared transactions to recover \c - - - :worker_1_port BEGIN; CREATE TABLE should_abort (value int); PREPARE TRANSACTION 'citus_0_should_abort'; BEGIN; CREATE TABLE should_commit (value int); PREPARE TRANSACTION 'citus_0_should_commit'; BEGIN; CREATE TABLE should_be_sorted_into_middle (value int); PREPARE TRANSACTION 'citus_0_should_be_sorted_into_middle'; \c - - - :master_port -- Add "fake" pg_dist_transaction records and run recovery SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport = :worker_1_port \gset INSERT INTO pg_dist_transaction VALUES (:worker_1_group, 'citus_0_should_commit'); INSERT INTO pg_dist_transaction VALUES (:worker_1_group, 'citus_0_should_be_forgotten'); SELECT recover_prepared_transactions(); SELECT count(*) FROM pg_dist_transaction; -- Confirm that transactions were correctly rolled forward \c - - - :worker_1_port SELECT count(*) FROM pg_tables WHERE tablename = 'should_abort'; SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit'; citus-7.0.3/src/test/regress/sql/multi_mx_modifications.sql000066400000000000000000000276171317107136600242000ustar00rootroot00000000000000 ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1330000; -- =================================================================== -- test end-to-end modification functionality for mx tables -- =================================================================== -- basic single-row INSERT INSERT INTO limit_orders_mx VALUES (32743, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32743; -- now singe-row INSERT from a worker \c - - - :worker_1_port INSERT INTO limit_orders_mx VALUES (32744, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32744; -- now singe-row INSERT to the other worker \c - - - :worker_2_port INSERT INTO limit_orders_mx VALUES (32745, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 32745; -- and see all the inserted rows SELECT * FROM limit_orders_mx; -- basic single-row INSERT with RETURNING INSERT INTO limit_orders_mx VALUES (32746, 'AAPL', 9580, '2004-10-19 10:23:54', 'buy', 20.69) RETURNING *; -- INSERT with DEFAULT in the target list INSERT INTO limit_orders_mx VALUES (12756, 'MSFT', 10959, '2013-05-08 07:29:23', 'sell', DEFAULT); SELECT * FROM limit_orders_mx WHERE id = 12756; -- INSERT with expressions in target list INSERT INTO limit_orders_mx VALUES (430, upper('ibm'), 214, timestamp '2003-01-28 10:31:17' + interval '5 hours', 'buy', sqrt(2)); SELECT * FROM limit_orders_mx WHERE id = 430; -- INSERT without partition key INSERT INTO limit_orders_mx DEFAULT VALUES; -- squelch WARNINGs that contain worker_port SET client_min_messages TO ERROR; -- INSERT violating NOT NULL constraint INSERT INTO limit_orders_mx VALUES (NULL, 'T', 975234, DEFAULT); -- INSERT violating column constraint INSERT INTO limit_orders_mx VALUES (18811, 'BUD', 14962, '2014-04-05 08:32:16', 'sell', -5.00); -- INSERT violating primary key constraint INSERT INTO limit_orders_mx VALUES (32743, 'LUV', 5994, '2001-04-16 03:37:28', 'buy', 0.58); -- INSERT violating primary key constraint, with RETURNING specified. INSERT INTO limit_orders_mx VALUES (32743, 'LUV', 5994, '2001-04-16 03:37:28', 'buy', 0.58) RETURNING *; -- INSERT, with RETURNING specified, failing with a non-constraint error INSERT INTO limit_orders_mx VALUES (34153, 'LEE', 5994, '2001-04-16 03:37:28', 'buy', 0.58) RETURNING id / 0; SET client_min_messages TO DEFAULT; -- commands with non-constant partition values are unsupported INSERT INTO limit_orders_mx VALUES (random() * 100, 'ORCL', 152, '2011-08-25 11:50:45', 'sell', 0.58); -- values for other columns are totally fine INSERT INTO limit_orders_mx VALUES (2036, 'GOOG', 5634, now(), 'buy', random()); -- commands with mutable functions in their quals DELETE FROM limit_orders_mx WHERE id = 246 AND bidder_id = (random() * 1000); -- commands with mutable but non-volatile functions(ie: stable func.) in their quals -- (the cast to timestamp is because the timestamp_eq_timestamptz operator is stable) DELETE FROM limit_orders_mx WHERE id = 246 AND placed_at = current_timestamp::timestamp; -- commands with multiple rows are supported INSERT INTO limit_orders_mx VALUES (2037, 'GOOG', 5634, now(), 'buy', random()), (2038, 'GOOG', 5634, now(), 'buy', random()), (2039, 'GOOG', 5634, now(), 'buy', random()); -- connect back to the other node \c - - - :worker_1_port -- commands containing a CTE are unsupported WITH deleted_orders AS (DELETE FROM limit_orders_mx RETURNING *) INSERT INTO limit_orders_mx DEFAULT VALUES; -- test simple DELETE INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246; DELETE FROM limit_orders_mx WHERE id = 246; SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246; -- test simple DELETE with RETURNING DELETE FROM limit_orders_mx WHERE id = 430 RETURNING *; SELECT COUNT(*) FROM limit_orders_mx WHERE id = 430; -- DELETE with expression in WHERE clause INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246; DELETE FROM limit_orders_mx WHERE id = (2 * 123); SELECT COUNT(*) FROM limit_orders_mx WHERE id = 246; -- commands with no constraints on the partition key are not supported DELETE FROM limit_orders_mx WHERE bidder_id = 162; -- commands with a USING clause are unsupported CREATE TABLE bidders ( name text, id bigint ); DELETE FROM limit_orders_mx USING bidders WHERE limit_orders_mx.id = 246 AND limit_orders_mx.bidder_id = bidders.id AND bidders.name = 'Bernie Madoff'; -- commands containing a CTE are unsupported WITH deleted_orders AS (INSERT INTO limit_orders_mx DEFAULT VALUES RETURNING *) DELETE FROM limit_orders_mx; -- cursors are not supported DELETE FROM limit_orders_mx WHERE CURRENT OF cursor_name; INSERT INTO limit_orders_mx VALUES (246, 'TSLA', 162, '2007-07-02 16:32:15', 'sell', 20.69); -- simple UPDATE UPDATE limit_orders_mx SET symbol = 'GM' WHERE id = 246; SELECT symbol FROM limit_orders_mx WHERE id = 246; -- simple UPDATE with RETURNING UPDATE limit_orders_mx SET symbol = 'GM' WHERE id = 246 RETURNING *; -- expression UPDATE UPDATE limit_orders_mx SET bidder_id = 6 * 3 WHERE id = 246; SELECT bidder_id FROM limit_orders_mx WHERE id = 246; -- expression UPDATE with RETURNING UPDATE limit_orders_mx SET bidder_id = 6 * 5 WHERE id = 246 RETURNING *; -- multi-column UPDATE UPDATE limit_orders_mx SET (kind, limit_price) = ('buy', DEFAULT) WHERE id = 246; SELECT kind, limit_price FROM limit_orders_mx WHERE id = 246; -- multi-column UPDATE with RETURNING UPDATE limit_orders_mx SET (kind, limit_price) = ('buy', 999) WHERE id = 246 RETURNING *; -- Test that on unique contraint violations, we fail fast INSERT INTO limit_orders_mx VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67); INSERT INTO limit_orders_mx VALUES (275, 'ADR', 140, '2007-07-02 16:32:15', 'sell', 43.67); -- commands with no constraints on the partition key are not supported UPDATE limit_orders_mx SET limit_price = 0.00; -- attempting to change the partition key is unsupported UPDATE limit_orders_mx SET id = 0 WHERE id = 246; -- UPDATEs with a FROM clause are unsupported UPDATE limit_orders_mx SET limit_price = 0.00 FROM bidders WHERE limit_orders_mx.id = 246 AND limit_orders_mx.bidder_id = bidders.id AND bidders.name = 'Bernie Madoff'; -- commands containing a CTE are unsupported WITH deleted_orders AS (INSERT INTO limit_orders_mx DEFAULT VALUES RETURNING *) UPDATE limit_orders_mx SET symbol = 'GM'; SELECT symbol, bidder_id FROM limit_orders_mx WHERE id = 246; -- updates referencing just a var are supported UPDATE limit_orders_mx SET bidder_id = id WHERE id = 246; -- updates referencing a column are supported UPDATE limit_orders_mx SET bidder_id = bidder_id + 1 WHERE id = 246; -- IMMUTABLE functions are allowed UPDATE limit_orders_mx SET symbol = LOWER(symbol) WHERE id = 246; SELECT symbol, bidder_id FROM limit_orders_mx WHERE id = 246; -- IMMUTABLE functions are allowed -- even in returning UPDATE limit_orders_mx SET symbol = UPPER(symbol) WHERE id = 246 RETURNING id, LOWER(symbol), symbol; -- connect coordinator to run the DDL \c - - - :master_port ALTER TABLE limit_orders_mx ADD COLUMN array_of_values integer[]; -- connect back to the other node \c - - - :worker_2_port -- updates referencing STABLE functions are allowed UPDATE limit_orders_mx SET placed_at = LEAST(placed_at, now()::timestamp) WHERE id = 246; -- so are binary operators UPDATE limit_orders_mx SET array_of_values = 1 || array_of_values WHERE id = 246; -- connect back to the other node \c - - - :worker_2_port -- immutable function calls with vars are also allowed UPDATE limit_orders_mx SET array_of_values = immutable_append_mx(array_of_values, 2) WHERE id = 246; CREATE FUNCTION stable_append_mx(old_values int[], new_value int) RETURNS int[] AS $$ BEGIN RETURN old_values || new_value; END; $$ LANGUAGE plpgsql STABLE; -- but STABLE function calls with vars are not allowed UPDATE limit_orders_mx SET array_of_values = stable_append_mx(array_of_values, 3) WHERE id = 246; SELECT array_of_values FROM limit_orders_mx WHERE id = 246; -- STRICT functions work as expected CREATE FUNCTION temp_strict_func(integer,integer) RETURNS integer AS 'SELECT COALESCE($1, 2) + COALESCE($1, 3);' LANGUAGE SQL STABLE STRICT; UPDATE limit_orders_mx SET bidder_id = temp_strict_func(1, null) WHERE id = 246; SELECT array_of_values FROM limit_orders_mx WHERE id = 246; -- connect coordinator to run the DDL \c - - - :master_port ALTER TABLE limit_orders_mx DROP array_of_values; -- connect back to the other node \c - - - :worker_2_port -- even in RETURNING UPDATE limit_orders_mx SET placed_at = placed_at WHERE id = 246 RETURNING NOW(); -- cursors are not supported UPDATE limit_orders_mx SET symbol = 'GM' WHERE CURRENT OF cursor_name; -- check that multi-row UPDATE/DELETEs with RETURNING work INSERT INTO multiple_hash_mx VALUES ('0', '1'); INSERT INTO multiple_hash_mx VALUES ('0', '2'); INSERT INTO multiple_hash_mx VALUES ('0', '3'); INSERT INTO multiple_hash_mx VALUES ('0', '4'); INSERT INTO multiple_hash_mx VALUES ('0', '5'); INSERT INTO multiple_hash_mx VALUES ('0', '6'); UPDATE multiple_hash_mx SET data = data ||'-1' WHERE category = '0' RETURNING *; DELETE FROM multiple_hash_mx WHERE category = '0' RETURNING *; -- ensure returned row counters are correct \set QUIET off INSERT INTO multiple_hash_mx VALUES ('1', '1'); INSERT INTO multiple_hash_mx VALUES ('1', '2'); INSERT INTO multiple_hash_mx VALUES ('1', '3'); INSERT INTO multiple_hash_mx VALUES ('2', '1'); INSERT INTO multiple_hash_mx VALUES ('2', '2'); INSERT INTO multiple_hash_mx VALUES ('2', '3'); INSERT INTO multiple_hash_mx VALUES ('2', '3') RETURNING *; -- check that update return the right number of rows -- one row UPDATE multiple_hash_mx SET data = data ||'-1' WHERE category = '1' AND data = '1'; -- three rows UPDATE multiple_hash_mx SET data = data ||'-2' WHERE category = '1'; -- three rows, with RETURNING UPDATE multiple_hash_mx SET data = data ||'-2' WHERE category = '1' RETURNING category; -- check SELECT * FROM multiple_hash_mx WHERE category = '1' ORDER BY category, data; -- check that deletes return the right number of rows -- one row DELETE FROM multiple_hash_mx WHERE category = '2' AND data = '1'; -- two rows DELETE FROM multiple_hash_mx WHERE category = '2'; -- three rows, with RETURNING DELETE FROM multiple_hash_mx WHERE category = '1' RETURNING category; -- check SELECT * FROM multiple_hash_mx WHERE category = '1' ORDER BY category, data; SELECT * FROM multiple_hash_mx WHERE category = '2' ORDER BY category, data; --- INSERT ... SELECT ... FROM commands are supported from workers INSERT INTO multiple_hash_mx SELECT s, s*2 FROM generate_series(1,10) s; -- but are never distributed BEGIN; SET LOCAL client_min_messages TO DEBUG1; INSERT INTO multiple_hash_mx SELECT * FROM multiple_hash_mx; END; -- verify interaction of default values, SERIAL, and RETURNING \set QUIET on -- make sure this test always returns the same output no matter which tests have run SELECT minimum_value::bigint AS min_value, maximum_value::bigint AS max_value FROM information_schema.sequences WHERE sequence_name = 'app_analytics_events_mx_id_seq' \gset SELECT last_value FROM app_analytics_events_mx_id_seq \gset ALTER SEQUENCE app_analytics_events_mx_id_seq NO MINVALUE NO MAXVALUE; SELECT setval('app_analytics_events_mx_id_seq'::regclass, 3940649673949184); INSERT INTO app_analytics_events_mx VALUES (DEFAULT, 101, 'Fauxkemon Geaux') RETURNING id; INSERT INTO app_analytics_events_mx (app_id, name) VALUES (102, 'Wayz') RETURNING id; INSERT INTO app_analytics_events_mx (app_id, name) VALUES (103, 'Mynt') RETURNING *; -- clean up SELECT setval('app_analytics_events_mx_id_seq'::regclass, :last_value); ALTER SEQUENCE app_analytics_events_mx_id_seq MINVALUE :min_value MAXVALUE :max_value; citus-7.0.3/src/test/regress/sql/multi_mx_modifying_xacts.sql000066400000000000000000000206601317107136600245260ustar00rootroot00000000000000 ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1340000; -- =================================================================== -- test end-to-end modification functionality for mx tables in transactions -- =================================================================== -- add some data INSERT INTO researchers_mx VALUES (1, 1, 'Donald Knuth'); INSERT INTO researchers_mx VALUES (2, 1, 'Niklaus Wirth'); INSERT INTO researchers_mx VALUES (3, 2, 'Tony Hoare'); INSERT INTO researchers_mx VALUES (4, 2, 'Kenneth Iverson'); -- replace a researcher, reusing their id on the coordinator BEGIN; DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 2; INSERT INTO researchers_mx VALUES (2, 1, 'John Backus'); COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2; -- do it on the worker node as well \c - - - :worker_1_port BEGIN; DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 2; INSERT INTO researchers_mx VALUES (2, 1, 'John Backus Worker 1'); COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2; -- do it on the worker other node as well \c - - - :worker_2_port BEGIN; DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 2; INSERT INTO researchers_mx VALUES (2, 1, 'John Backus Worker 2'); COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 2; \c - - - :master_port -- abort a modification BEGIN; DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1; ABORT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1; \c - - - :worker_1_port -- abort a modification on the worker node BEGIN; DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1; ABORT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1; \c - - - :worker_2_port -- abort a modification on the other worker node BEGIN; DELETE FROM researchers_mx WHERE lab_id = 1 AND id = 1; ABORT; SELECT name FROM researchers_mx WHERE lab_id = 1 AND id = 1; -- switch back to the first worker node \c - - - :worker_1_port -- creating savepoints should work... BEGIN; INSERT INTO researchers_mx VALUES (5, 3, 'Dennis Ritchie'); SAVEPOINT hire_thompson; INSERT INTO researchers_mx VALUES (6, 3, 'Ken Thompson'); COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 3 AND id = 6; -- even if created by PL/pgSQL... \set VERBOSITY terse BEGIN; DO $$ BEGIN INSERT INTO researchers_mx VALUES (10, 10, 'Edsger Dijkstra'); EXCEPTION WHEN not_null_violation THEN RAISE NOTICE 'caught not_null_violation'; END $$; COMMIT; -- rollback should also work BEGIN; INSERT INTO researchers_mx VALUES (7, 4, 'Jim Gray'); SAVEPOINT hire_engelbart; INSERT INTO researchers_mx VALUES (8, 4, 'Douglas Engelbart'); ROLLBACK TO hire_engelbart; COMMIT; SELECT name FROM researchers_mx WHERE lab_id = 4; BEGIN; DO $$ BEGIN INSERT INTO researchers_mx VALUES (NULL, 10, 'Edsger Dijkstra'); EXCEPTION WHEN not_null_violation THEN RAISE NOTICE 'caught not_null_violation'; END $$; COMMIT; \set VERBOSITY default -- should be valid to edit labs_mx after researchers_mx... BEGIN; INSERT INTO researchers_mx VALUES (8, 5, 'Douglas Engelbart'); INSERT INTO labs_mx VALUES (5, 'Los Alamos'); COMMIT; SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id; -- and the other way around is also allowed BEGIN; INSERT INTO labs_mx VALUES (6, 'Bell labs_mx'); INSERT INTO researchers_mx VALUES (9, 6, 'Leslie Lamport'); COMMIT; -- have the same test on the other worker node \c - - - :worker_2_port -- should be valid to edit labs_mx after researchers_mx... BEGIN; INSERT INTO researchers_mx VALUES (8, 5, 'Douglas Engelbart'); INSERT INTO labs_mx VALUES (5, 'Los Alamos'); COMMIT; SELECT * FROM researchers_mx, labs_mx WHERE labs_mx.id = researchers_mx.lab_id; -- and the other way around is also allowed BEGIN; INSERT INTO labs_mx VALUES (6, 'Bell labs_mx'); INSERT INTO researchers_mx VALUES (9, 6, 'Leslie Lamport'); COMMIT; -- switch back to the worker node \c - - - :worker_1_port -- this logic doesn't apply to router SELECTs occurring after a modification: -- selecting from the modified node is fine... BEGIN; INSERT INTO labs_mx VALUES (6, 'Bell labs_mx'); SELECT count(*) FROM researchers_mx WHERE lab_id = 6; ABORT; -- doesn't apply to COPY after modifications BEGIN; INSERT INTO labs_mx VALUES (6, 'Bell labs_mx'); \copy labs_mx from stdin delimiter ',' 10,Weyland-Yutani-1 \. COMMIT; -- copy will also work if before any modifications BEGIN; \copy labs_mx from stdin delimiter ',' 10,Weyland-Yutani-2 \. SELECT name FROM labs_mx WHERE id = 10; INSERT INTO labs_mx VALUES (6, 'Bell labs_mx'); COMMIT; \c - - - :worker_1_port -- test primary key violations BEGIN; INSERT INTO objects_mx VALUES (1, 'apple'); INSERT INTO objects_mx VALUES (1, 'orange'); COMMIT; -- data shouldn't have persisted... SELECT * FROM objects_mx WHERE id = 1; -- same test on the second worker node \c - - - :worker_2_port -- test primary key violations BEGIN; INSERT INTO objects_mx VALUES (1, 'apple'); INSERT INTO objects_mx VALUES (1, 'orange'); COMMIT; -- data shouldn't have persisted... SELECT * FROM objects_mx WHERE id = 1; -- create trigger on one worker to reject certain values \c - - - :worker_1_port CREATE FUNCTION reject_bad_mx() RETURNS trigger AS $rb$ BEGIN IF (NEW.name = 'BAD') THEN RAISE 'illegal value'; END IF; RETURN NEW; END; $rb$ LANGUAGE plpgsql; CREATE CONSTRAINT TRIGGER reject_bad_mx AFTER INSERT ON objects_mx_1220103 DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW EXECUTE PROCEDURE reject_bad_mx(); -- test partial failure; statement 1 successed, statement 2 fails \set VERBOSITY terse BEGIN; INSERT INTO labs_mx VALUES (7, 'E Corp'); INSERT INTO objects_mx VALUES (2, 'BAD'); COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 2; SELECT * FROM labs_mx WHERE id = 7; -- same failure test from worker 2 \c - - - :worker_2_port -- test partial failure; statement 1 successed, statement 2 fails BEGIN; INSERT INTO labs_mx VALUES (7, 'E Corp'); INSERT INTO objects_mx VALUES (2, 'BAD'); COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 2; SELECT * FROM labs_mx WHERE id = 7; \c - - - :worker_1_port -- what if there are errors on different shards at different times? \c - - - :worker_1_port CREATE CONSTRAINT TRIGGER reject_bad_mx AFTER INSERT ON labs_mx_1220102 DEFERRABLE INITIALLY IMMEDIATE FOR EACH ROW EXECUTE PROCEDURE reject_bad_mx(); BEGIN; INSERT INTO objects_mx VALUES (1, 'apple'); INSERT INTO objects_mx VALUES (2, 'BAD'); INSERT INTO labs_mx VALUES (8, 'Aperture Science'); INSERT INTO labs_mx VALUES (9, 'BAD'); COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 1; SELECT * FROM labs_mx WHERE id = 8; -- same test from the other worker \c - - - :worker_2_port BEGIN; INSERT INTO objects_mx VALUES (1, 'apple'); INSERT INTO objects_mx VALUES (2, 'BAD'); INSERT INTO labs_mx VALUES (8, 'Aperture Science'); INSERT INTO labs_mx VALUES (9, 'BAD'); COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 1; SELECT * FROM labs_mx WHERE id = 8; -- what if the failures happen at COMMIT time? \c - - - :worker_1_port DROP TRIGGER reject_bad_mx ON objects_mx_1220103; CREATE CONSTRAINT TRIGGER reject_bad_mx AFTER INSERT ON objects_mx_1220103 DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_bad_mx(); -- should be the same story as before, just at COMMIT time BEGIN; INSERT INTO objects_mx VALUES (1, 'apple'); INSERT INTO objects_mx VALUES (2, 'BAD'); INSERT INTO labs_mx VALUES (9, 'Umbrella Corporation'); COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 2; SELECT * FROM labs_mx WHERE id = 7; DROP TRIGGER reject_bad_mx ON labs_mx_1220102; CREATE CONSTRAINT TRIGGER reject_bad_mx AFTER INSERT ON labs_mx_1220102 DEFERRABLE INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE reject_bad_mx(); BEGIN; INSERT INTO objects_mx VALUES (1, 'apple'); INSERT INTO objects_mx VALUES (2, 'BAD'); INSERT INTO labs_mx VALUES (8, 'Aperture Science'); INSERT INTO labs_mx VALUES (9, 'BAD'); COMMIT; -- data should NOT be persisted SELECT * FROM objects_mx WHERE id = 1; SELECT * FROM labs_mx WHERE id = 8; -- what if one shard (objects_mx) succeeds but another (labs_mx) completely fails? \c - - - :worker_1_port DROP TRIGGER reject_bad_mx ON objects_mx_1220103; BEGIN; INSERT INTO objects_mx VALUES (1, 'apple'); INSERT INTO labs_mx VALUES (8, 'Aperture Science'); INSERT INTO labs_mx VALUES (9, 'BAD'); COMMIT; -- no data should persists SELECT * FROM objects_mx WHERE id = 1; SELECT * FROM labs_mx WHERE id = 8; citus-7.0.3/src/test/regress/sql/multi_mx_reference_table.sql000066400000000000000000000257651317107136600244570ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000; \c - - - :master_port CREATE TABLE reference_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test'); INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO reference_table_test VALUES (3, 3.0, '3', '2016-12-03'); INSERT INTO reference_table_test VALUES (4, 4.0, '4', '2016-12-04'); INSERT INTO reference_table_test VALUES (5, 5.0, '5', '2016-12-05'); \c - - - :worker_1_port -- run some queries on top of the data SELECT * FROM reference_table_test; SELECT * FROM reference_table_test WHERE value_1 = 1; SELECT value_1, value_2 FROM reference_table_test ORDER BY 2 ASC LIMIT 3; SELECT value_1, value_3 FROM reference_table_test WHERE value_2 >= 4 ORDER BY 2 LIMIT 3; SELECT value_1, 15 * value_2 FROM reference_table_test ORDER BY 2 ASC LIMIT 2; SELECT value_1, 15 * value_2 FROM reference_table_test ORDER BY 2 ASC LIMIT 2 OFFSET 2; SELECT value_2, value_4 FROM reference_table_test WHERE value_2 = 2 OR value_2 = 3; SELECT value_2, value_4 FROM reference_table_test WHERE value_2 = 2 AND value_2 = 3; SELECT value_2, value_4 FROM reference_table_test WHERE value_3 = '2' OR value_1 = 3; SELECT value_2, value_4 FROM reference_table_test WHERE ( value_3 = '2' OR value_1 = 3 ) AND FALSE; SELECT * FROM reference_table_test WHERE value_2 IN ( SELECT value_3::FLOAT FROM reference_table_test ) AND value_1 < 3; SELECT value_4 FROM reference_table_test WHERE value_3 IN ( '1', '2' ); SELECT date_part('day', value_4) FROM reference_table_test WHERE value_3 IN ( '5', '2' ); SELECT value_4 FROM reference_table_test WHERE value_2 <= 2 AND value_2 >= 4; SELECT value_4 FROM reference_table_test WHERE value_2 <= 20 AND value_2 >= 4; SELECT value_4 FROM reference_table_test WHERE value_2 >= 5 AND value_2 <= random(); SELECT value_1 FROM reference_table_test WHERE value_4 BETWEEN '2016-12-01' AND '2016-12-03'; SELECT value_1 FROM reference_table_test WHERE FALSE; SELECT value_1 FROM reference_table_test WHERE int4eq(1, 2); -- rename output name and do some operations SELECT value_1 as id, value_2 * 15 as age FROM reference_table_test; -- queries with CTEs are supported WITH some_data AS ( SELECT value_2, value_4 FROM reference_table_test WHERE value_2 >=3) SELECT * FROM some_data; -- queries with CTEs are supported even if CTE is not referenced inside query WITH some_data AS ( SELECT value_2, value_4 FROM reference_table_test WHERE value_2 >=3) SELECT * FROM reference_table_test ORDER BY 1 LIMIT 1; -- queries which involve functions in FROM clause are supported if it goes to a single worker. SELECT * FROM reference_table_test, position('om' in 'Thomas') WHERE value_1 = 1; SELECT * FROM reference_table_test, position('om' in 'Thomas') WHERE value_1 = 1 OR value_1 = 2; -- set operations are supported SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 1 UNION SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 1 EXCEPT SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 1 INTERSECT SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; -- to make the tests more interested for aggregation tests, ingest some more data \c - - - :master_port INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO reference_table_test VALUES (3, 3.0, '3', '2016-12-03'); \c - - - :worker_1_port -- some aggregations SELECT value_4, SUM(value_2) FROM reference_table_test GROUP BY value_4 HAVING SUM(value_2) > 3 ORDER BY 1; SELECT value_4, value_3, SUM(value_2) FROM reference_table_test GROUP BY GROUPING sets ((value_4), (value_3)) ORDER BY 1, 2, 3; -- distinct clauses also work fine SELECT DISTINCT value_4 FROM reference_table_test ORDER BY 1; -- window functions are also supported SELECT value_4, RANK() OVER (PARTITION BY value_1 ORDER BY value_4) FROM reference_table_test; -- window functions are also supported SELECT value_4, AVG(value_1) OVER (PARTITION BY value_4 ORDER BY value_4) FROM reference_table_test; SELECT count(DISTINCT CASE WHEN value_2 >= 3 THEN value_2 ELSE NULL END) as c FROM reference_table_test; SELECT value_1, count(DISTINCT CASE WHEN value_2 >= 3 THEN value_2 ELSE NULL END) as c FROM reference_table_test GROUP BY value_1 ORDER BY 1; -- selects inside a transaction works fine as well BEGIN; SELECT * FROM reference_table_test; SELECT * FROM reference_table_test WHERE value_1 = 1; END; -- cursor queries also works fine BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM reference_table_test WHERE value_1 = 1 OR value_1 = 2 ORDER BY value_1; FETCH test_cursor; FETCH ALL test_cursor; FETCH test_cursor; -- fetch one row after the last FETCH BACKWARD test_cursor; END; -- table creation queries inside can be router plannable CREATE TEMP TABLE temp_reference_test as SELECT * FROM reference_table_test WHERE value_1 = 1; \c - - - :master_port -- all kinds of joins are supported among reference tables -- first create two more tables CREATE TABLE reference_table_test_second (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_second'); CREATE TABLE reference_table_test_third (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_third'); -- ingest some data to both tables INSERT INTO reference_table_test_second VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO reference_table_test_second VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO reference_table_test_second VALUES (3, 3.0, '3', '2016-12-03'); INSERT INTO reference_table_test_third VALUES (4, 4.0, '4', '2016-12-04'); INSERT INTO reference_table_test_third VALUES (5, 5.0, '5', '2016-12-05'); \c - - - :worker_2_port -- some very basic tests SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2 WHERE t1.value_2 = t2.value_2 ORDER BY 1; SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_third t3 WHERE t1.value_2 = t3.value_2 ORDER BY 1; SELECT DISTINCT t2.value_1 FROM reference_table_test_second t2, reference_table_test_third t3 WHERE t2.value_2 = t3.value_2 ORDER BY 1; -- join on different columns and different data types via casts SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2 WHERE t1.value_2 = t2.value_1 ORDER BY 1; SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2 WHERE t1.value_2 = t2.value_3::int ORDER BY 1; SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2 WHERE t1.value_2 = date_part('day', t2.value_4) ORDER BY 1; -- ingest a common row to see more meaningful results with joins involving 3 tables \c - - - :master_port INSERT INTO reference_table_test_third VALUES (3, 3.0, '3', '2016-12-03'); \c - - - :worker_1_port SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2, reference_table_test_third t3 WHERE t1.value_2 = date_part('day', t2.value_4) AND t3.value_2 = t1.value_2 ORDER BY 1; -- same query on different columns SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2, reference_table_test_third t3 WHERE t1.value_1 = date_part('day', t2.value_4) AND t3.value_2 = t1.value_1 ORDER BY 1; -- with the JOIN syntax SELECT DISTINCT t1.value_1 FROM reference_table_test t1 JOIN reference_table_test_second t2 USING (value_1) JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; -- and left/right joins SELECT DISTINCT t1.value_1 FROM reference_table_test t1 LEFT JOIN reference_table_test_second t2 USING (value_1) LEFT JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; SELECT DISTINCT t1.value_1 FROM reference_table_test t1 RIGHT JOIN reference_table_test_second t2 USING (value_1) RIGHT JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; \c - - - :master_port SET citus.shard_count TO 6; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; CREATE TABLE colocated_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_distributed_table('colocated_table_test', 'value_1'); CREATE TABLE colocated_table_test_2 (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_distributed_table('colocated_table_test_2', 'value_1'); DELETE FROM reference_table_test; INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO colocated_table_test VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO colocated_table_test VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO colocated_table_test_2 VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO colocated_table_test_2 VALUES (2, 2.0, '2', '2016-12-02'); \c - - - :worker_1_port SET client_min_messages TO DEBUG1; SET citus.log_multi_join_order TO TRUE; SELECT reference_table_test.value_1 FROM reference_table_test, colocated_table_test WHERE colocated_table_test.value_1 = reference_table_test.value_1; SELECT colocated_table_test.value_2 FROM reference_table_test, colocated_table_test WHERE colocated_table_test.value_2 = reference_table_test.value_2; SELECT colocated_table_test.value_2 FROM colocated_table_test, reference_table_test WHERE reference_table_test.value_1 = colocated_table_test.value_1; SELECT colocated_table_test.value_2 FROM reference_table_test, colocated_table_test, colocated_table_test_2 WHERE colocated_table_test.value_2 = reference_table_test.value_2; SELECT colocated_table_test.value_2 FROM reference_table_test, colocated_table_test, colocated_table_test_2 WHERE colocated_table_test.value_1 = colocated_table_test_2.value_1 AND colocated_table_test.value_2 = reference_table_test.value_2; SET citus.task_executor_type to "task-tracker"; SELECT colocated_table_test.value_2 FROM reference_table_test, colocated_table_test, colocated_table_test_2 WHERE colocated_table_test.value_2 = colocated_table_test_2.value_2 AND colocated_table_test.value_2 = reference_table_test.value_2; SELECT reference_table_test.value_2 FROM reference_table_test, colocated_table_test, colocated_table_test_2 WHERE colocated_table_test.value_1 = reference_table_test.value_1 AND colocated_table_test_2.value_1 = reference_table_test.value_1; SET client_min_messages TO NOTICE; SET citus.log_multi_join_order TO FALSE; -- clean up tables \c - - - :master_port DROP TABLE reference_table_test, reference_table_test_second, reference_table_test_third;; citus-7.0.3/src/test/regress/sql/multi_mx_repartition_join_w1.sql000066400000000000000000000007311317107136600253220ustar00rootroot00000000000000-- Test two concurrent reparttition joins from two different workers -- This test runs the below query from the :worker_1_port and the -- concurrent test runs the same query on :worker_2_port. Note that, both -- tests use the same sequence ids but the queries should not fail. \c - - - :worker_1_port SET citus.task_executor_type TO "task-tracker"; CREATE TEMP TABLE t1 AS SELECT l1.l_comment FROM lineitem_mx l1, orders_mx l2 WHERE l1.l_comment = l2.o_comment; citus-7.0.3/src/test/regress/sql/multi_mx_repartition_join_w2.sql000066400000000000000000000007341317107136600253260ustar00rootroot00000000000000-- Test two concurrent reparttition joins from two different workers -- This test runs the below query from the :worker_2_port and the -- concurrent test runs the same query on :worker_1_port. Note that, both -- tests use the same sequence ids but the queries should not fail. \c - - - :worker_2_port SET citus.task_executor_type TO "task-tracker"; CREATE TEMP TABLE t1 AS SELECT l1.l_comment FROM lineitem_mx l1, orders_mx l2 WHERE l1.l_comment = l2.o_comment; citus-7.0.3/src/test/regress/sql/multi_mx_repartition_udt_prepare.sql000066400000000000000000000144451317107136600262750ustar00rootroot00000000000000-- -- MULTI_MX_REPARTITION_UDT_PREPARE -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 535000; -- START type creation CREATE TYPE test_udt AS (i integer, i2 integer); -- ... as well as a function to use as its comparator... CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- ... use that function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, PROCEDURE = equal_test_udt_function, COMMUTATOR = =, HASHES ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; -- ... create a test HASH function. Though it is a poor hash function, -- it is acceptable for our tests CREATE FUNCTION test_udt_hash(test_udt) RETURNS int AS 'SELECT hashtext( ($1.i + $1.i2)::text);' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 DEFAULT FOR TYPE test_udt USING BTREE AS OPERATOR 3 = (test_udt, test_udt); CREATE OPERATOR CLASS tudt_op_fam_class DEFAULT FOR TYPE test_udt USING HASH AS OPERATOR 1 = (test_udt, test_udt), FUNCTION 1 test_udt_hash(test_udt); -- END type creation CREATE TABLE repartition_udt ( pk integer not null, udtcol test_udt, txtcol text ); CREATE TABLE repartition_udt_other ( pk integer not null, udtcol test_udt, txtcol text ); -- Connect directly to a worker, create and drop the type, then -- proceed with type creation as above; thus the OIDs will be different. -- so that the OID is off. \c - - - :worker_1_port CREATE TYPE test_udt AS (i integer, i2 integer); DROP TYPE test_udt CASCADE; -- START type creation CREATE TYPE test_udt AS (i integer, i2 integer); -- ... as well as a function to use as its comparator... CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- ... use that function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, PROCEDURE = equal_test_udt_function, COMMUTATOR = =, HASHES ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; -- ... create a test HASH function. Though it is a poor hash function, -- it is acceptable for our tests CREATE FUNCTION test_udt_hash(test_udt) RETURNS int AS 'SELECT hashtext( ($1.i + $1.i2)::text);' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 DEFAULT FOR TYPE test_udt USING BTREE AS OPERATOR 3 = (test_udt, test_udt); CREATE OPERATOR CLASS tudt_op_fam_class DEFAULT FOR TYPE test_udt USING HASH AS OPERATOR 1 = (test_udt, test_udt), FUNCTION 1 test_udt_hash(test_udt); -- END type creation \c - - - :worker_2_port -- START type creation CREATE TYPE test_udt AS (i integer, i2 integer); -- ... as well as a function to use as its comparator... CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- ... use that function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, PROCEDURE = equal_test_udt_function, COMMUTATOR = =, HASHES ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; -- ... create a test HASH function. Though it is a poor hash function, -- it is acceptable for our tests CREATE FUNCTION test_udt_hash(test_udt) RETURNS int AS 'SELECT hashtext( ($1.i + $1.i2)::text);' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 DEFAULT FOR TYPE test_udt USING BTREE AS OPERATOR 3 = (test_udt, test_udt); CREATE OPERATOR CLASS tudt_op_fam_class DEFAULT FOR TYPE test_udt USING HASH AS OPERATOR 1 = (test_udt, test_udt), FUNCTION 1 test_udt_hash(test_udt); -- END type creation -- Connect to master \c - - - :master_port -- Distribute and populate the two tables. SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; SET citus.shard_count TO 3; SELECT create_distributed_table('repartition_udt', 'pk'); SET citus.shard_count TO 5; SELECT create_distributed_table('repartition_udt_other', 'pk'); INSERT INTO repartition_udt values (1, '(1,1)'::test_udt, 'foo'); INSERT INTO repartition_udt values (2, '(1,2)'::test_udt, 'foo'); INSERT INTO repartition_udt values (3, '(1,3)'::test_udt, 'foo'); INSERT INTO repartition_udt values (4, '(2,1)'::test_udt, 'foo'); INSERT INTO repartition_udt values (5, '(2,2)'::test_udt, 'foo'); INSERT INTO repartition_udt values (6, '(2,3)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (7, '(1,1)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (8, '(1,2)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (9, '(1,3)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (10, '(2,1)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (11, '(2,2)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (12, '(2,3)'::test_udt, 'foo'); SET client_min_messages = LOG; -- Query that should result in a repartition join on int column, and be empty. SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.pk = repartition_udt_other.pk WHERE repartition_udt.pk > 1; -- Query that should result in a repartition join on UDT column. SET citus.large_table_shard_count = 1; SET citus.task_executor_type = 'task-tracker'; SET citus.log_multi_join_order = true; EXPLAIN SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.udtcol = repartition_udt_other.udtcol WHERE repartition_udt.pk > 1; SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.udtcol = repartition_udt_other.udtcol WHERE repartition_udt.pk > 1 ORDER BY repartition_udt.pk; \c - - - :worker_1_port \c - - - :worker_2_port citus-7.0.3/src/test/regress/sql/multi_mx_repartition_udt_w1.sql000066400000000000000000000012531317107136600251570ustar00rootroot00000000000000-- -- MULTI_MX_REPARTITION_W1_UDT -- \c - - - :worker_1_port SET client_min_messages = LOG; -- Query that should result in a repartition join on UDT column. SET citus.large_table_shard_count = 1; SET citus.task_executor_type = 'task-tracker'; SET citus.log_multi_join_order = true; -- Query that should result in a repartition join on int column, and be empty. SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.pk = repartition_udt_other.pk WHERE repartition_udt.pk > 1; SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.udtcol = repartition_udt_other.udtcol WHERE repartition_udt.pk > 1 ORDER BY repartition_udt.pk; citus-7.0.3/src/test/regress/sql/multi_mx_repartition_udt_w2.sql000066400000000000000000000012531317107136600251600ustar00rootroot00000000000000-- -- MULTI_MX_REPARTITION_W2_UDT -- \c - - - :worker_2_port SET client_min_messages = LOG; -- Query that should result in a repartition join on UDT column. SET citus.large_table_shard_count = 1; SET citus.task_executor_type = 'task-tracker'; SET citus.log_multi_join_order = true; -- Query that should result in a repartition join on int column, and be empty. SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.pk = repartition_udt_other.pk WHERE repartition_udt.pk > 1; SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.udtcol = repartition_udt_other.udtcol WHERE repartition_udt.pk > 1 ORDER BY repartition_udt.pk; citus-7.0.3/src/test/regress/sql/multi_mx_router_planner.sql000066400000000000000000000505001317107136600243720ustar00rootroot00000000000000 -- =================================================================== -- test router planner functionality for single shard select queries -- =================================================================== -- run all the router queries from the one of the workers \c - - - :worker_1_port -- this table is used in a CTE test CREATE TABLE authors_hash_mx ( name text, id bigint ); -- create a bunch of test data INSERT INTO articles_hash_mx VALUES ( 1, 1, 'arsenous', 9572); INSERT INTO articles_hash_mx VALUES ( 2, 2, 'abducing', 13642); INSERT INTO articles_hash_mx VALUES ( 3, 3, 'asternal', 10480); INSERT INTO articles_hash_mx VALUES ( 4, 4, 'altdorfer', 14551); INSERT INTO articles_hash_mx VALUES ( 5, 5, 'aruru', 11389); INSERT INTO articles_hash_mx VALUES ( 6, 6, 'atlases', 15459); INSERT INTO articles_hash_mx VALUES ( 7, 7, 'aseptic', 12298); INSERT INTO articles_hash_mx VALUES ( 8, 8, 'agatized', 16368); INSERT INTO articles_hash_mx VALUES ( 9, 9, 'alligate', 438); INSERT INTO articles_hash_mx VALUES (10, 10, 'aggrandize', 17277); INSERT INTO articles_hash_mx VALUES (11, 1, 'alamo', 1347); INSERT INTO articles_hash_mx VALUES (12, 2, 'archiblast', 18185); INSERT INTO articles_hash_mx VALUES (13, 3, 'aseyev', 2255); INSERT INTO articles_hash_mx VALUES (14, 4, 'andesite', 19094); INSERT INTO articles_hash_mx VALUES (15, 5, 'adversa', 3164); INSERT INTO articles_hash_mx VALUES (16, 6, 'allonym', 2); INSERT INTO articles_hash_mx VALUES (17, 7, 'auriga', 4073); INSERT INTO articles_hash_mx VALUES (18, 8, 'assembly', 911); INSERT INTO articles_hash_mx VALUES (19, 9, 'aubergiste', 4981); INSERT INTO articles_hash_mx VALUES (20, 10, 'absentness', 1820); INSERT INTO articles_hash_mx VALUES (21, 1, 'arcading', 5890); INSERT INTO articles_hash_mx VALUES (22, 2, 'antipope', 2728); INSERT INTO articles_hash_mx VALUES (23, 3, 'abhorring', 6799); INSERT INTO articles_hash_mx VALUES (24, 4, 'audacious', 3637); INSERT INTO articles_hash_mx VALUES (25, 5, 'antehall', 7707); INSERT INTO articles_hash_mx VALUES (26, 6, 'abington', 4545); INSERT INTO articles_hash_mx VALUES (27, 7, 'arsenous', 8616); INSERT INTO articles_hash_mx VALUES (28, 8, 'aerophyte', 5454); INSERT INTO articles_hash_mx VALUES (29, 9, 'amateur', 9524); INSERT INTO articles_hash_mx VALUES (30, 10, 'andelee', 6363); INSERT INTO articles_hash_mx VALUES (31, 1, 'athwartships', 7271); INSERT INTO articles_hash_mx VALUES (32, 2, 'amazon', 11342); INSERT INTO articles_hash_mx VALUES (33, 3, 'autochrome', 8180); INSERT INTO articles_hash_mx VALUES (34, 4, 'amnestied', 12250); INSERT INTO articles_hash_mx VALUES (35, 5, 'aminate', 9089); INSERT INTO articles_hash_mx VALUES (36, 6, 'ablation', 13159); INSERT INTO articles_hash_mx VALUES (37, 7, 'archduchies', 9997); INSERT INTO articles_hash_mx VALUES (38, 8, 'anatine', 14067); INSERT INTO articles_hash_mx VALUES (39, 9, 'anchises', 10906); INSERT INTO articles_hash_mx VALUES (40, 10, 'attemper', 14976); INSERT INTO articles_hash_mx VALUES (41, 1, 'aznavour', 11814); INSERT INTO articles_hash_mx VALUES (42, 2, 'ausable', 15885); INSERT INTO articles_hash_mx VALUES (43, 3, 'affixal', 12723); INSERT INTO articles_hash_mx VALUES (44, 4, 'anteport', 16793); INSERT INTO articles_hash_mx VALUES (45, 5, 'afrasia', 864); INSERT INTO articles_hash_mx VALUES (46, 6, 'atlanta', 17702); INSERT INTO articles_hash_mx VALUES (47, 7, 'abeyance', 1772); INSERT INTO articles_hash_mx VALUES (48, 8, 'alkylic', 18610); INSERT INTO articles_hash_mx VALUES (49, 9, 'anyone', 2681); INSERT INTO articles_hash_mx VALUES (50, 10, 'anjanette', 19519); SET citus.task_executor_type TO 'real-time'; SET citus.large_table_shard_count TO 2; SET client_min_messages TO 'DEBUG2'; -- insert a single row for the test INSERT INTO articles_single_shard_hash_mx VALUES (50, 10, 'anjanette', 19519); -- single-shard tests -- test simple select for a single row SELECT * FROM articles_hash_mx WHERE author_id = 10 AND id = 50; -- get all titles by a single author SELECT title FROM articles_hash_mx WHERE author_id = 10; -- try ordering them by word count SELECT title, word_count FROM articles_hash_mx WHERE author_id = 10 ORDER BY word_count DESC NULLS LAST; -- look at last two articles by an author SELECT title, id FROM articles_hash_mx WHERE author_id = 5 ORDER BY id LIMIT 2; -- find all articles by two authors in same shard -- but plan is not router executable due to order by SELECT title, author_id FROM articles_hash_mx WHERE author_id = 7 OR author_id = 8 ORDER BY author_id ASC, id; -- same query is router executable with no order by SELECT title, author_id FROM articles_hash_mx WHERE author_id = 7 OR author_id = 8; -- add in some grouping expressions, still on same shard -- having queries unsupported in Citus SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash_mx WHERE author_id = 1 OR author_id = 7 OR author_id = 8 OR author_id = 10 GROUP BY author_id HAVING sum(word_count) > 1000 ORDER BY sum(word_count) DESC; -- however having clause is supported if it goes to a single shard SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash_mx WHERE author_id = 1 GROUP BY author_id HAVING sum(word_count) > 1000 ORDER BY sum(word_count) DESC; -- query is a single shard query but can't do shard pruning, -- not router-plannable due to <= and IN SELECT * FROM articles_hash_mx WHERE author_id <= 1; SELECT * FROM articles_hash_mx WHERE author_id IN (1, 3); -- queries with CTEs are supported WITH first_author AS ( SELECT id FROM articles_hash_mx WHERE author_id = 1) SELECT * FROM first_author; -- queries with CTEs are supported even if CTE is not referenced inside query WITH first_author AS ( SELECT id FROM articles_hash_mx WHERE author_id = 1) SELECT title FROM articles_hash_mx WHERE author_id = 1; -- two CTE joins are supported if they go to the same worker WITH id_author AS ( SELECT id, author_id FROM articles_hash_mx WHERE author_id = 1), id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 1) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; WITH id_author AS ( SELECT id, author_id FROM articles_hash_mx WHERE author_id = 1), id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 3) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; -- CTE joins are not supported if table shards are at different workers WITH id_author AS ( SELECT id, author_id FROM articles_hash_mx WHERE author_id = 1), id_title AS (SELECT id, title from articles_hash_mx WHERE author_id = 2) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; -- recursive CTEs are supported when filtered on partition column INSERT INTO company_employees_mx values(1, 1, 0); INSERT INTO company_employees_mx values(1, 2, 1); INSERT INTO company_employees_mx values(1, 3, 1); INSERT INTO company_employees_mx values(1, 4, 2); INSERT INTO company_employees_mx values(1, 5, 4); INSERT INTO company_employees_mx values(3, 1, 0); INSERT INTO company_employees_mx values(3, 15, 1); INSERT INTO company_employees_mx values(3, 3, 1); -- find employees at top 2 level within company hierarchy WITH RECURSIVE hierarchy as ( SELECT *, 1 AS level FROM company_employees_mx WHERE company_id = 1 and manager_id = 0 UNION SELECT ce.*, (h.level+1) FROM hierarchy h JOIN company_employees_mx ce ON (h.employee_id = ce.manager_id AND h.company_id = ce.company_id AND ce.company_id = 1)) SELECT * FROM hierarchy WHERE LEVEL <= 2; -- query becomes not router plannble and gets rejected -- if filter on company is dropped WITH RECURSIVE hierarchy as ( SELECT *, 1 AS level FROM company_employees_mx WHERE company_id = 1 and manager_id = 0 UNION SELECT ce.*, (h.level+1) FROM hierarchy h JOIN company_employees_mx ce ON (h.employee_id = ce.manager_id AND h.company_id = ce.company_id)) SELECT * FROM hierarchy WHERE LEVEL <= 2; -- logically wrong query, query involves different shards -- from the same table, but still router plannable due to -- shard being placed on the same worker. WITH RECURSIVE hierarchy as ( SELECT *, 1 AS level FROM company_employees_mx WHERE company_id = 3 and manager_id = 0 UNION SELECT ce.*, (h.level+1) FROM hierarchy h JOIN company_employees_mx ce ON (h.employee_id = ce.manager_id AND h.company_id = ce.company_id AND ce.company_id = 2)) SELECT * FROM hierarchy WHERE LEVEL <= 2; -- grouping sets are supported on single shard SELECT id, substring(title, 2, 1) AS subtitle, count(*) FROM articles_hash_mx WHERE author_id = 1 or author_id = 3 GROUP BY GROUPING SETS ((id),(subtitle)) ORDER BY id, subtitle; -- grouping sets are not supported on multiple shards SELECT id, substring(title, 2, 1) AS subtitle, count(*) FROM articles_hash_mx WHERE author_id = 1 or author_id = 2 GROUP BY GROUPING SETS ((id),(subtitle)) ORDER BY id, subtitle; -- queries which involve functions in FROM clause are supported if it goes to a single worker. SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1; SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 3; -- they are not supported if multiple workers are involved SELECT * FROM articles_hash_mx, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 2; -- subqueries are supported in FROM clause but they are not router plannable SELECT articles_hash_mx.id,test.word_count FROM articles_hash_mx, (SELECT id, word_count FROM articles_hash_mx) AS test WHERE test.id = articles_hash_mx.id ORDER BY articles_hash_mx.id; SELECT articles_hash_mx.id,test.word_count FROM articles_hash_mx, (SELECT id, word_count FROM articles_hash_mx) AS test WHERE test.id = articles_hash_mx.id and articles_hash_mx.author_id = 1 ORDER BY articles_hash_mx.id; -- subqueries are not supported in SELECT clause SELECT a.title AS name, (SELECT a2.id FROM articles_single_shard_hash_mx a2 WHERE a.id = a2.id LIMIT 1) AS special_price FROM articles_hash_mx a; -- simple lookup query SELECT * FROM articles_hash_mx WHERE author_id = 1; -- below query hits a single shard, router plannable SELECT * FROM articles_hash_mx WHERE author_id = 1 OR author_id = 17; -- below query hits two shards, not router plannable + not router executable -- handled by real-time executor SELECT * FROM articles_hash_mx WHERE author_id = 1 OR author_id = 18; -- rename the output columns SELECT id as article_id, word_count * id as random_value FROM articles_hash_mx WHERE author_id = 1; -- we can push down co-located joins to a single worker SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash_mx a, articles_hash_mx b WHERE a.author_id = 10 and a.author_id = b.author_id LIMIT 3; -- following join is router plannable since the same worker -- has both shards SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash_mx a, articles_single_shard_hash_mx b WHERE a.author_id = 10 and a.author_id = b.author_id LIMIT 3; -- following join is not router plannable since there are no -- workers containing both shards, added a CTE to make this fail -- at logical planner WITH single_shard as (SELECT * FROM articles_single_shard_hash_mx) SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash_mx a, single_shard b WHERE a.author_id = 2 and a.author_id = b.author_id LIMIT 3; -- single shard select with limit is router plannable SELECT * FROM articles_hash_mx WHERE author_id = 1 LIMIT 3; -- single shard select with limit + offset is router plannable SELECT * FROM articles_hash_mx WHERE author_id = 1 LIMIT 2 OFFSET 1; -- single shard select with limit + offset + order by is router plannable SELECT * FROM articles_hash_mx WHERE author_id = 1 ORDER BY id desc LIMIT 2 OFFSET 1; -- single shard select with group by on non-partition column is router plannable SELECT id FROM articles_hash_mx WHERE author_id = 1 GROUP BY id ORDER BY id; -- single shard select with distinct is router plannable SELECT distinct id FROM articles_hash_mx WHERE author_id = 1 ORDER BY id; -- single shard aggregate is router plannable SELECT avg(word_count) FROM articles_hash_mx WHERE author_id = 2; -- max, min, sum, count are router plannable on single shard SELECT max(word_count) as max, min(word_count) as min, sum(word_count) as sum, count(word_count) as cnt FROM articles_hash_mx WHERE author_id = 2; -- queries with aggregates and group by supported on single shard SELECT max(word_count) FROM articles_hash_mx WHERE author_id = 1 GROUP BY author_id; -- router plannable union queries are supported SELECT * FROM ( SELECT * FROM articles_hash_mx WHERE author_id = 1 UNION SELECT * FROM articles_hash_mx WHERE author_id = 3 ) AS combination ORDER BY id; (SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 1) UNION (SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 3); (SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 1) INTERSECT (SELECT LEFT(title, 1) FROM articles_hash_mx WHERE author_id = 3); SELECT * FROM ( SELECT LEFT(title, 2) FROM articles_hash_mx WHERE author_id = 1 EXCEPT SELECT LEFT(title, 2) FROM articles_hash_mx WHERE author_id = 3 ) AS combination ORDER BY 1; -- union queries are not supported if not router plannable -- there is an inconsistency on shard pruning between -- ubuntu/mac disabling log messages for this queries only SET client_min_messages to 'NOTICE'; (SELECT * FROM articles_hash_mx WHERE author_id = 1) UNION (SELECT * FROM articles_hash_mx WHERE author_id = 2); SELECT * FROM ( (SELECT * FROM articles_hash_mx WHERE author_id = 1) UNION (SELECT * FROM articles_hash_mx WHERE author_id = 2)) uu; -- error out for queries with repartition jobs SELECT * FROM articles_hash_mx a, articles_hash_mx b WHERE a.id = b.id AND a.author_id = 1; -- queries which hit more than 1 shards are not router plannable or executable -- handled by real-time executor SELECT * FROM articles_hash_mx WHERE author_id >= 1 AND author_id <= 3; SET citus.task_executor_type TO 'real-time'; -- Test various filtering options for router plannable check SET client_min_messages to 'DEBUG2'; -- this is definitely single shard -- and router plannable SELECT * FROM articles_hash_mx WHERE author_id = 1 and author_id >= 1; -- not router plannable due to or SELECT * FROM articles_hash_mx WHERE author_id = 1 or id = 1; -- router plannable SELECT * FROM articles_hash_mx WHERE author_id = 1 and (id = 1 or id = 41); -- router plannable SELECT * FROM articles_hash_mx WHERE author_id = 1 and (id = random()::int * 0); -- not router plannable due to function call on the right side SELECT * FROM articles_hash_mx WHERE author_id = (random()::int * 0 + 1); -- not router plannable due to or SELECT * FROM articles_hash_mx WHERE author_id = 1 or id = 1; -- router plannable due to abs(-1) getting converted to 1 by postgresql SELECT * FROM articles_hash_mx WHERE author_id = abs(-1); -- not router plannable due to abs() function SELECT * FROM articles_hash_mx WHERE 1 = abs(author_id); -- not router plannable due to abs() function SELECT * FROM articles_hash_mx WHERE author_id = abs(author_id - 2); -- router plannable, function on different field SELECT * FROM articles_hash_mx WHERE author_id = 1 and (id = abs(id - 2)); -- not router plannable due to is true SELECT * FROM articles_hash_mx WHERE (author_id = 1) is true; -- router plannable, (boolean expression) = true is collapsed to (boolean expression) SELECT * FROM articles_hash_mx WHERE (author_id = 1) = true; -- router plannable, between operator is on another column SELECT * FROM articles_hash_mx WHERE (author_id = 1) and id between 0 and 20; -- router plannable, partition column expression is and'ed to rest SELECT * FROM articles_hash_mx WHERE (author_id = 1) and (id = 1 or id = 31) and title like '%s'; -- router plannable, order is changed SELECT * FROM articles_hash_mx WHERE (id = 1 or id = 31) and title like '%s' and (author_id = 1); -- router plannable SELECT * FROM articles_hash_mx WHERE (title like '%s' or title like 'a%') and (author_id = 1); -- router plannable SELECT * FROM articles_hash_mx WHERE (title like '%s' or title like 'a%') and (author_id = 1) and (word_count < 3000 or word_count > 8000); -- window functions are supported if query is router plannable SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count FROM articles_hash_mx WHERE author_id = 5; SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count FROM articles_hash_mx WHERE author_id = 5 ORDER BY word_count DESC; SELECT id, MIN(id) over (order by word_count) FROM articles_hash_mx WHERE author_id = 1; SELECT id, word_count, AVG(word_count) over (order by word_count) FROM articles_hash_mx WHERE author_id = 1; SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count) FROM articles_hash_mx WHERE author_id = 1; -- window functions are not supported for not router plannable queries SELECT id, MIN(id) over (order by word_count) FROM articles_hash_mx WHERE author_id = 1 or author_id = 2; SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count FROM articles_hash_mx WHERE author_id = 5 or author_id = 2; -- complex query hitting a single shard SELECT count(DISTINCT CASE WHEN word_count > 100 THEN id ELSE NULL END) as c FROM articles_hash_mx WHERE author_id = 5; -- same query is not router plannable if hits multiple shards SELECT count(DISTINCT CASE WHEN word_count > 100 THEN id ELSE NULL END) as c FROM articles_hash_mx GROUP BY author_id; -- queries inside transactions can be router plannable BEGIN; SELECT * FROM articles_hash_mx WHERE author_id = 1 ORDER BY id; END; -- cursor queries are router plannable BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM articles_hash_mx WHERE author_id = 1 ORDER BY id; FETCH test_cursor; FETCH test_cursor; FETCH BACKWARD test_cursor; END; -- queries inside copy can be router plannable COPY ( SELECT * FROM articles_hash_mx WHERE author_id = 1 ORDER BY id) TO STDOUT; -- table creation queries inside can be router plannable CREATE TEMP TABLE temp_articles_hash_mx as SELECT * FROM articles_hash_mx WHERE author_id = 1 ORDER BY id; -- router plannable queries may include filter for aggragates SELECT count(*), count(*) FILTER (WHERE id < 3) FROM articles_hash_mx WHERE author_id = 1; -- non-router plannable queries support filters as well SELECT count(*), count(*) FILTER (WHERE id < 3) FROM articles_hash_mx WHERE author_id = 1 or author_id = 2; -- prepare queries can be router plannable PREPARE author_1_articles as SELECT * FROM articles_hash_mx WHERE author_id = 1; EXECUTE author_1_articles; -- parametric prepare queries can be router plannable PREPARE author_articles(int) as SELECT * FROM articles_hash_mx WHERE author_id = $1; EXECUTE author_articles(1); -- queries inside plpgsql functions could be router plannable CREATE OR REPLACE FUNCTION author_articles_max_id() RETURNS int AS $$ DECLARE max_id integer; BEGIN SELECT MAX(id) FROM articles_hash_mx ah WHERE author_id = 1 into max_id; return max_id; END; $$ LANGUAGE plpgsql; SELECT author_articles_max_id(); -- plpgsql function that return query results are not router plannable CREATE OR REPLACE FUNCTION author_articles_id_word_count() RETURNS TABLE(id bigint, word_count int) AS $$ DECLARE BEGIN RETURN QUERY SELECT ah.id, ah.word_count FROM articles_hash_mx ah WHERE author_id = 1; END; $$ LANGUAGE plpgsql; SELECT * FROM author_articles_id_word_count(); -- materialized views can be created for router plannable queries CREATE MATERIALIZED VIEW mv_articles_hash_mx AS SELECT * FROM articles_hash_mx WHERE author_id = 1; SELECT * FROM mv_articles_hash_mx; SET client_min_messages to 'INFO'; DROP MATERIALIZED VIEW mv_articles_hash_mx; SET client_min_messages to 'DEBUG2'; CREATE MATERIALIZED VIEW mv_articles_hash_mx_error AS SELECT * FROM articles_hash_mx WHERE author_id in (1,2); -- router planner/executor is disabled for task-tracker executor -- following query is router plannable, but router planner is disabled -- TODO: Uncomment once we fix task-tracker issue --SET citus.task_executor_type to 'task-tracker'; --SELECT id -- FROM articles_hash_mx -- WHERE author_id = 1; -- insert query is router plannable even under task-tracker INSERT INTO articles_hash_mx VALUES (51, 1, 'amateus', 1814); -- verify insert is successfull (not router plannable and executable) SELECT id FROM articles_hash_mx WHERE author_id = 1; citus-7.0.3/src/test/regress/sql/multi_mx_schema_support.sql000066400000000000000000000147531317107136600244010ustar00rootroot00000000000000-- -- MULTI_MX_SCHEMA_SUPPORT -- -- connect to a worker node and run some queries \c - - - :worker_1_port -- test very basic queries SELECT * FROM nation_hash ORDER BY n_nationkey LIMIT 4; SELECT * FROM citus_mx_test_schema.nation_hash ORDER BY n_nationkey LIMIT 4; -- test cursors SET search_path TO public; BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM nation_hash WHERE n_nationkey = 1; FETCH test_cursor; FETCH test_cursor; FETCH BACKWARD test_cursor; END; -- test with search_path is set SET search_path TO citus_mx_test_schema; BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM nation_hash WHERE n_nationkey = 1; FETCH test_cursor; FETCH test_cursor; FETCH BACKWARD test_cursor; END; -- test inserting to table in different schema SET search_path TO public; INSERT INTO citus_mx_test_schema.nation_hash(n_nationkey, n_name, n_regionkey) VALUES (100, 'TURKEY', 3); -- verify insertion SELECT * FROM citus_mx_test_schema.nation_hash WHERE n_nationkey = 100; -- test with search_path is set SET search_path TO citus_mx_test_schema; INSERT INTO nation_hash(n_nationkey, n_name, n_regionkey) VALUES (101, 'GERMANY', 3); -- verify insertion SELECT * FROM nation_hash WHERE n_nationkey = 101; -- TODO: add UPDATE/DELETE/UPSERT -- test UDFs with schemas SET search_path TO public; -- UDF in public, table in a schema other than public, search_path is not set SELECT simpleTestFunction(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5; -- UDF in public, table in a schema other than public, search_path is set SET search_path TO citus_mx_test_schema; SELECT public.simpleTestFunction(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5; -- UDF in schema, table in a schema other than public, search_path is not set SET search_path TO public; SELECT citus_mx_test_schema.simpleTestFunction2(n_nationkey)::int FROM citus_mx_test_schema.nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5; -- UDF in schema, table in a schema other than public, search_path is set SET search_path TO citus_mx_test_schema; SELECT simpleTestFunction2(n_nationkey)::int FROM nation_hash GROUP BY 1 ORDER BY 1 DESC LIMIT 5; -- test operators with schema SET search_path TO public; -- test with search_path is not set SELECT * FROM citus_mx_test_schema.nation_hash WHERE n_nationkey OPERATOR(citus_mx_test_schema.===) 1; -- test with search_path is set SET search_path TO citus_mx_test_schema; SELECT * FROM nation_hash WHERE n_nationkey OPERATOR(===) 1; SELECT * FROM citus_mx_test_schema.nation_hash_collation_search_path; SELECT n_comment FROM citus_mx_test_schema.nation_hash_collation_search_path ORDER BY n_comment COLLATE citus_mx_test_schema.english; SET search_path TO citus_mx_test_schema; SELECT * FROM nation_hash_collation_search_path ORDER BY 1 DESC; SELECT n_comment FROM nation_hash_collation_search_path ORDER BY n_comment COLLATE english; SELECT * FROM citus_mx_test_schema.nation_hash_composite_types WHERE test_col = '(a,a)'::citus_mx_test_schema.new_composite_type ORDER BY 1::int DESC; --test with search_path is set SET search_path TO citus_mx_test_schema; SELECT * FROM nation_hash_composite_types WHERE test_col = '(a,a)'::new_composite_type ORDER BY 1::int DESC; -- check when search_path is public, -- join of two tables which are in different schemas, -- join on partition column SET search_path TO public; SELECT count (*) FROM citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_nationkey; -- check when search_path is different than public, -- join of two tables which are in different schemas, -- join on partition column SET search_path TO citus_mx_test_schema_join_1; SELECT count (*) FROM nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_nationkey; -- check when search_path is public, -- join of two tables which are in same schemas, -- join on partition column SET search_path TO public; SELECT count (*) FROM citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_1.nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_nationkey; -- check when search_path is different than public, -- join of two tables which are in same schemas, -- join on partition column SET search_path TO citus_mx_test_schema_join_1; SELECT count (*) FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_nationkey; -- single repartition joins SET citus.task_executor_type TO "task-tracker"; -- check when search_path is public, -- join of two tables which are in different schemas, -- join on partition column and non-partition column --SET search_path TO public; SELECT count (*) FROM citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_regionkey; -- check when search_path is different than public, -- join of two tables which are in different schemas, -- join on partition column and non-partition column SET search_path TO citus_mx_test_schema_join_1; SELECT count (*) FROM nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_regionkey; -- check when search_path is different than public, -- join of two tables which are in same schemas, -- join on partition column and non-partition column SET search_path TO citus_mx_test_schema_join_1; SELECT count (*) FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_regionkey; -- hash repartition joins -- check when search_path is public, -- join of two tables which are in different schemas, -- join on non-partition column SET search_path TO public; SELECT count (*) FROM citus_mx_test_schema_join_1.nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_regionkey = n2.n_regionkey; -- check when search_path is different than public, -- join of two tables which are in different schemas, -- join on non-partition column SET search_path TO citus_mx_test_schema_join_1; SELECT count (*) FROM nation_hash n1, citus_mx_test_schema_join_2.nation_hash n2 WHERE n1.n_regionkey = n2.n_regionkey; -- check when search_path is different than public, -- join of two tables which are in same schemas, -- join on non-partition column SET search_path TO citus_mx_test_schema_join_1; SELECT count (*) FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_regionkey = n2.n_regionkey; -- set task_executor back to real-time SET citus.task_executor_type TO "real-time"; citus-7.0.3/src/test/regress/sql/multi_mx_tpch_query1.sql000066400000000000000000000042661317107136600236070ustar00rootroot00000000000000-- -- MULTI_MX_TPCH_QUERY1 -- -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #1 from the TPC-H decision support benchmark SELECT l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order FROM lineitem_mx WHERE l_shipdate <= date '1998-12-01' - interval '90 days' GROUP BY l_returnflag, l_linestatus ORDER BY l_returnflag, l_linestatus; -- connect one of the workers \c - - - :worker_1_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #1 from the TPC-H decision support benchmark SELECT l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order FROM lineitem_mx WHERE l_shipdate <= date '1998-12-01' - interval '90 days' GROUP BY l_returnflag, l_linestatus ORDER BY l_returnflag, l_linestatus; -- connect to the other node \c - - - :worker_2_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #1 from the TPC-H decision support benchmark SELECT l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order FROM lineitem_mx WHERE l_shipdate <= date '1998-12-01' - interval '90 days' GROUP BY l_returnflag, l_linestatus ORDER BY l_returnflag, l_linestatus;citus-7.0.3/src/test/regress/sql/multi_mx_tpch_query10.sql000066400000000000000000000036731317107136600236700ustar00rootroot00000000000000-- -- MULTI_MX_TPCH_QUERY10 -- -- Query #10 from the TPC-H decision support benchmark. Unlike other TPC-H tests, -- we don't set citus.large_table_shard_count here, and instead use the default value -- coming from postgresql.conf or multi_task_tracker_executor.conf. -- connect to master \c - - - :master_port SELECT c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment FROM customer_mx, orders_mx, lineitem_mx, nation_mx WHERE c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate >= date '1993-10-01' AND o_orderdate < date '1993-10-01' + interval '3' month AND l_returnflag = 'R' AND c_nationkey = n_nationkey GROUP BY c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment ORDER BY revenue DESC LIMIT 20; -- connect one of the workers \c - - - :worker_1_port SELECT c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment FROM customer_mx, orders_mx, lineitem_mx, nation_mx WHERE c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate >= date '1993-10-01' AND o_orderdate < date '1993-10-01' + interval '3' month AND l_returnflag = 'R' AND c_nationkey = n_nationkey GROUP BY c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment ORDER BY revenue DESC LIMIT 20; -- connect to the other worker \c - - - :worker_2_port SELECT c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment FROM customer_mx, orders_mx, lineitem_mx, nation_mx WHERE c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate >= date '1993-10-01' AND o_orderdate < date '1993-10-01' + interval '3' month AND l_returnflag = 'R' AND c_nationkey = n_nationkey GROUP BY c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment ORDER BY revenue DESC LIMIT 20; citus-7.0.3/src/test/regress/sql/multi_mx_tpch_query12.sql000066400000000000000000000045731317107136600236720ustar00rootroot00000000000000-- -- MULTI_MX_TPCH_QUERY12 -- -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #12 from the TPC-H decision support benchmark SELECT l_shipmode, sum(case when o_orderpriority = '1-URGENT' OR o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority <> '1-URGENT' AND o_orderpriority <> '2-HIGH' then 1 else 0 end) AS low_line_count FROM orders_mx, lineitem_mx WHERE o_orderkey = l_orderkey AND l_shipmode in ('MAIL', 'SHIP') AND l_commitdate < l_receiptdate AND l_shipdate < l_commitdate AND l_receiptdate >= date '1994-01-01' AND l_receiptdate < date '1994-01-01' + interval '1' year GROUP BY l_shipmode ORDER BY l_shipmode; -- connect one of the workers \c - - - :worker_1_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #12 from the TPC-H decision support benchmark SELECT l_shipmode, sum(case when o_orderpriority = '1-URGENT' OR o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority <> '1-URGENT' AND o_orderpriority <> '2-HIGH' then 1 else 0 end) AS low_line_count FROM orders_mx, lineitem_mx WHERE o_orderkey = l_orderkey AND l_shipmode in ('MAIL', 'SHIP') AND l_commitdate < l_receiptdate AND l_shipdate < l_commitdate AND l_receiptdate >= date '1994-01-01' AND l_receiptdate < date '1994-01-01' + interval '1' year GROUP BY l_shipmode ORDER BY l_shipmode; -- connect to the other worker node \c - - - :worker_2_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #12 from the TPC-H decision support benchmark SELECT l_shipmode, sum(case when o_orderpriority = '1-URGENT' OR o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority <> '1-URGENT' AND o_orderpriority <> '2-HIGH' then 1 else 0 end) AS low_line_count FROM orders_mx, lineitem_mx WHERE o_orderkey = l_orderkey AND l_shipmode in ('MAIL', 'SHIP') AND l_commitdate < l_receiptdate AND l_shipdate < l_commitdate AND l_receiptdate >= date '1994-01-01' AND l_receiptdate < date '1994-01-01' + interval '1' year GROUP BY l_shipmode ORDER BY l_shipmode; citus-7.0.3/src/test/regress/sql/multi_mx_tpch_query14.sql000066400000000000000000000032301317107136600236610ustar00rootroot00000000000000-- -- MULTI_MX_TPCH_QUERY14 -- -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #14 from the TPC-H decision support benchmark SELECT 100.00 * sum(case when p_type like 'PROMO%' then l_extendedprice * (1 - l_discount) else 0 end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue FROM lineitem_mx, part_mx WHERE l_partkey = p_partkey AND l_shipdate >= date '1995-09-01' AND l_shipdate < date '1995-09-01' + interval '1' year; -- connect one of the workers \c - - - :worker_1_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #14 from the TPC-H decision support benchmark SELECT 100.00 * sum(case when p_type like 'PROMO%' then l_extendedprice * (1 - l_discount) else 0 end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue FROM lineitem_mx, part_mx WHERE l_partkey = p_partkey AND l_shipdate >= date '1995-09-01' AND l_shipdate < date '1995-09-01' + interval '1' year; -- connect to the other node \c - - - :worker_2_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #14 from the TPC-H decision support benchmark SELECT 100.00 * sum(case when p_type like 'PROMO%' then l_extendedprice * (1 - l_discount) else 0 end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue FROM lineitem_mx, part_mx WHERE l_partkey = p_partkey AND l_shipdate >= date '1995-09-01' AND l_shipdate < date '1995-09-01' + interval '1' year;citus-7.0.3/src/test/regress/sql/multi_mx_tpch_query19.sql000066400000000000000000000061501317107136600236720ustar00rootroot00000000000000-- -- MULTI_MX_TPCH_QUERY19 -- -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #19 from the TPC-H decision support benchmark. Note that we modified -- the query from its original to make it work on smaller data sets. SELECT sum(l_extendedprice* (1 - l_discount)) as revenue FROM lineitem_mx, part_mx WHERE ( p_partkey = l_partkey AND (p_brand = 'Brand#12' OR p_brand= 'Brand#14' OR p_brand='Brand#15') AND l_quantity >= 10 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#23' OR p_brand='Brand#24') AND l_quantity >= 20 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#33' OR p_brand = 'Brand#34' OR p_brand = 'Brand#35') AND l_quantity >= 1 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ); -- connect one of the workers \c - - - :worker_1_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #19 from the TPC-H decision support benchmark. Note that we modified -- the query from its original to make it work on smaller data sets. SELECT sum(l_extendedprice* (1 - l_discount)) as revenue FROM lineitem_mx, part_mx WHERE ( p_partkey = l_partkey AND (p_brand = 'Brand#12' OR p_brand= 'Brand#14' OR p_brand='Brand#15') AND l_quantity >= 10 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#23' OR p_brand='Brand#24') AND l_quantity >= 20 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#33' OR p_brand = 'Brand#34' OR p_brand = 'Brand#35') AND l_quantity >= 1 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ); -- connect to the other node \c - - - :worker_2_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #19 from the TPC-H decision support benchmark. Note that we modified -- the query from its original to make it work on smaller data sets. SELECT sum(l_extendedprice* (1 - l_discount)) as revenue FROM lineitem_mx, part_mx WHERE ( p_partkey = l_partkey AND (p_brand = 'Brand#12' OR p_brand= 'Brand#14' OR p_brand='Brand#15') AND l_quantity >= 10 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#23' OR p_brand='Brand#24') AND l_quantity >= 20 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#33' OR p_brand = 'Brand#34' OR p_brand = 'Brand#35') AND l_quantity >= 1 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ); citus-7.0.3/src/test/regress/sql/multi_mx_tpch_query3.sql000066400000000000000000000031351317107136600236030ustar00rootroot00000000000000-- -- MULTI_MX_TPCH_QUERY3 -- -- Query #3 from the TPC-H decision support benchmark. Unlike other TPC-H tests, -- we don't set citus.large_table_shard_count here, and instead use the default value -- coming from postgresql.conf or multi_task_tracker_executor.conf. -- connect to the coordinator \c - - - :master_port SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority FROM customer_mx, orders_mx, lineitem_mx WHERE c_mktsegment = 'BUILDING' AND c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate < date '1995-03-15' AND l_shipdate > date '1995-03-15' GROUP BY l_orderkey, o_orderdate, o_shippriority ORDER BY revenue DESC, o_orderdate; -- connect one of the workers \c - - - :worker_1_port SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority FROM customer_mx, orders_mx, lineitem_mx WHERE c_mktsegment = 'BUILDING' AND c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate < date '1995-03-15' AND l_shipdate > date '1995-03-15' GROUP BY l_orderkey, o_orderdate, o_shippriority ORDER BY revenue DESC, o_orderdate; -- connect to the other node \c - - - :worker_2_port SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority FROM customer_mx, orders_mx, lineitem_mx WHERE c_mktsegment = 'BUILDING' AND c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate < date '1995-03-15' AND l_shipdate > date '1995-03-15' GROUP BY l_orderkey, o_orderdate, o_shippriority ORDER BY revenue DESC, o_orderdate; citus-7.0.3/src/test/regress/sql/multi_mx_tpch_query6.sql000066400000000000000000000026251317107136600236110ustar00rootroot00000000000000-- -- MULTI_MX_TPCH_QUERY6 -- -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #6 from the TPC-H decision support benchmark SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem_mx WHERE l_shipdate >= date '1994-01-01' and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; -- connect to one of the worker nodes \c - - - :worker_1_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #6 from the TPC-H decision support benchmark SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem_mx WHERE l_shipdate >= date '1994-01-01' and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; -- connect to the other worker node \c - - - :worker_2_port -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #6 from the TPC-H decision support benchmark SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem_mx WHERE l_shipdate >= date '1994-01-01' and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; citus-7.0.3/src/test/regress/sql/multi_mx_tpch_query7.sql000066400000000000000000000057171317107136600236170ustar00rootroot00000000000000-- -- MULTI_MX_TPCH_QUERY7 -- -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem AND orders tables as large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H decision support benchmark SELECT supp_nation, cust_nation, l_year, sum(volume) as revenue FROM ( SELECT n1.n_name as supp_nation, n2.n_name as cust_nation, extract(year FROM l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume FROM supplier_mx, lineitem_mx, orders_mx, customer_mx, nation_mx n1, nation_mx n2 WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = n1.n_nationkey AND c_nationkey = n2.n_nationkey AND ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) AND l_shipdate between date '1995-01-01' AND date '1996-12-31' ) as shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year; -- connect one of the workers \c - - - :worker_1_port -- Change configuration to treat lineitem AND orders tables as large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H decision support benchmark SELECT supp_nation, cust_nation, l_year, sum(volume) as revenue FROM ( SELECT n1.n_name as supp_nation, n2.n_name as cust_nation, extract(year FROM l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume FROM supplier_mx, lineitem_mx, orders_mx, customer_mx, nation_mx n1, nation_mx n2 WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = n1.n_nationkey AND c_nationkey = n2.n_nationkey AND ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) AND l_shipdate between date '1995-01-01' AND date '1996-12-31' ) as shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year; -- connect to the other worker node \c - - - :worker_2_port -- Change configuration to treat lineitem AND orders tables as large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H decision support benchmark SELECT supp_nation, cust_nation, l_year, sum(volume) as revenue FROM ( SELECT n1.n_name as supp_nation, n2.n_name as cust_nation, extract(year FROM l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume FROM supplier_mx, lineitem_mx, orders_mx, customer_mx, nation_mx n1, nation_mx n2 WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = n1.n_nationkey AND c_nationkey = n2.n_nationkey AND ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) AND l_shipdate between date '1995-01-01' AND date '1996-12-31' ) as shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year; citus-7.0.3/src/test/regress/sql/multi_mx_tpch_query7_nested.sql000066400000000000000000000067511317107136600251600ustar00rootroot00000000000000-- -- MULTI_MX_TPCH_QUERY7_NESTED -- -- connect to the coordinator \c - - - :master_port -- Change configuration to treat lineitem AND orders tables AS large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H benchmark; modified to include sub-selects SELECT supp_nation, cust_nation, l_year, sum(volume) AS revenue FROM ( SELECT supp_nation, cust_nation, extract(year FROM l_shipdate) AS l_year, l_extendedprice * (1 - l_discount) AS volume FROM supplier_mx, lineitem_mx, orders_mx, customer_mx, ( SELECT n1.n_nationkey AS supp_nation_key, n2.n_nationkey AS cust_nation_key, n1.n_name AS supp_nation, n2.n_name AS cust_nation FROM nation_mx n1, nation_mx n2 WHERE ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) ) AS temp WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = supp_nation_key AND c_nationkey = cust_nation_key AND l_shipdate between date '1995-01-01' AND date '1996-12-31' ) AS shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year; -- connect to one of the workers \c - - - :worker_1_port -- Change configuration to treat lineitem AND orders tables AS large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H benchmark; modified to include sub-selects SELECT supp_nation, cust_nation, l_year, sum(volume) AS revenue FROM ( SELECT supp_nation, cust_nation, extract(year FROM l_shipdate) AS l_year, l_extendedprice * (1 - l_discount) AS volume FROM supplier_mx, lineitem_mx, orders_mx, customer_mx, ( SELECT n1.n_nationkey AS supp_nation_key, n2.n_nationkey AS cust_nation_key, n1.n_name AS supp_nation, n2.n_name AS cust_nation FROM nation_mx n1, nation_mx n2 WHERE ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) ) AS temp WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = supp_nation_key AND c_nationkey = cust_nation_key AND l_shipdate between date '1995-01-01' AND date '1996-12-31' ) AS shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year; -- connect to the coordinator \c - - - :worker_2_port -- Change configuration to treat lineitem AND orders tables AS large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H benchmark; modified to include sub-selects SELECT supp_nation, cust_nation, l_year, sum(volume) AS revenue FROM ( SELECT supp_nation, cust_nation, extract(year FROM l_shipdate) AS l_year, l_extendedprice * (1 - l_discount) AS volume FROM supplier_mx, lineitem_mx, orders_mx, customer_mx, ( SELECT n1.n_nationkey AS supp_nation_key, n2.n_nationkey AS cust_nation_key, n1.n_name AS supp_nation, n2.n_name AS cust_nation FROM nation_mx n1, nation_mx n2 WHERE ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) ) AS temp WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = supp_nation_key AND c_nationkey = cust_nation_key AND l_shipdate between date '1995-01-01' AND date '1996-12-31' ) AS shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year; citus-7.0.3/src/test/regress/sql/multi_mx_transaction_recovery.sql000066400000000000000000000045431317107136600256040ustar00rootroot00000000000000-- Tests for running transaction recovery from a worker node SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO streaming; CREATE TABLE test_recovery (x text); SELECT create_distributed_table('test_recovery', 'x'); \c - - - :worker_1_port SET citus.multi_shard_commit_protocol TO '2pc'; -- Ensure pg_dist_transaction is empty for test SELECT recover_prepared_transactions(); SELECT count(*) FROM pg_dist_transaction; -- If the groupid of the worker changes this query will produce a -- different result and the prepared statement names should be adapted -- accordingly. SELECT * FROM pg_dist_local_group; BEGIN; CREATE TABLE table_should_abort (value int); PREPARE TRANSACTION 'citus_12_should_abort'; BEGIN; CREATE TABLE table_should_commit (value int); PREPARE TRANSACTION 'citus_12_should_commit'; BEGIN; CREATE TABLE should_be_sorted_into_middle (value int); PREPARE TRANSACTION 'citus_12_should_be_sorted_into_middle'; -- Add "fake" pg_dist_transaction records and run recovery INSERT INTO pg_dist_transaction VALUES (12, 'citus_12_should_commit'); INSERT INTO pg_dist_transaction VALUES (12, 'citus_12_should_be_forgotten'); SELECT recover_prepared_transactions(); SELECT count(*) FROM pg_dist_transaction; SELECT count(*) FROM pg_tables WHERE tablename = 'table_should_abort'; SELECT count(*) FROM pg_tables WHERE tablename = 'table_should_commit'; -- plain INSERT does not use 2PC INSERT INTO test_recovery VALUES ('hello'); SELECT count(*) FROM pg_dist_transaction; -- Multi-statement transactions should write 2 transaction recovery records BEGIN; INSERT INTO test_recovery VALUES ('hello'); INSERT INTO test_recovery VALUES ('world'); COMMIT; SELECT count(*) FROM pg_dist_transaction; SELECT recover_prepared_transactions(); -- Committed INSERT..SELECT via coordinator should write 4 transaction recovery records INSERT INTO test_recovery (x) SELECT 'hello-'||s FROM generate_series(1,100) s; SELECT count(*) FROM pg_dist_transaction; SELECT recover_prepared_transactions(); -- Committed COPY should write 3 transaction records (2 fall into the same shard) COPY test_recovery (x) FROM STDIN CSV; hello-0 hello-1 world-0 world-1 \. SELECT count(*) FROM pg_dist_transaction; SELECT recover_prepared_transactions(); DROP TABLE table_should_commit; \c - - - :master_port DROP TABLE test_recovery_ref; DROP TABLE test_recovery; citus-7.0.3/src/test/regress/sql/multi_name_lengths.sql000066400000000000000000000216011317107136600232730ustar00rootroot00000000000000-- -- MULTI_NAME_LENGTHS -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 225000; SET citus.multi_shard_commit_protocol = '2pc'; -- Verify that a table name > 56 characters gets hashed properly. CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( col1 integer not null, col2 integer not null); SELECT master_create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345678901234567890', '2', '2'); \c - - - :worker_1_port \dt too_long_* \c - - - :master_port -- Verify that the UDF works and rejects bad arguments. SELECT shard_name(NULL, 666666); SELECT shard_name(0, 666666); SELECT shard_name('too_long_12345678901234567890123456789012345678901234567890'::regclass, 666666); SELECT shard_name('too_long_12345678901234567890123456789012345678901234567890'::regclass, NULL); SELECT shard_name('too_long_12345678901234567890123456789012345678901234567890'::regclass, -21); DROP TABLE too_long_12345678901234567890123456789012345678901234567890 CASCADE; -- Table to use for rename checks. CREATE TABLE name_lengths ( col1 integer not null, col2 integer not null, constraint constraint_a UNIQUE (col1) ); SELECT master_create_distributed_table('name_lengths', 'col1', 'hash'); SELECT master_create_worker_shards('name_lengths', '2', '2'); -- Verify that we CAN add columns with "too-long names", because -- the columns' names are not extended in the corresponding shard tables. ALTER TABLE name_lengths ADD COLUMN float_col_12345678901234567890123456789012345678901234567890 FLOAT; ALTER TABLE name_lengths ADD COLUMN date_col_12345678901234567890123456789012345678901234567890 DATE; ALTER TABLE name_lengths ADD COLUMN int_col_12345678901234567890123456789012345678901234567890 INTEGER DEFAULT 1; -- Placeholders for unsupported ALTER TABLE to add constraints with implicit names that are likely too long ALTER TABLE name_lengths ADD UNIQUE (float_col_12345678901234567890123456789012345678901234567890); ALTER TABLE name_lengths ADD EXCLUDE (int_col_12345678901234567890123456789012345678901234567890 WITH =); ALTER TABLE name_lengths ADD CHECK (date_col_12345678901234567890123456789012345678901234567890 > '2014-01-01'::date); \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.name_lengths_225002'::regclass; \c - - - :master_port -- Placeholders for unsupported add constraints with EXPLICIT names that are too long ALTER TABLE name_lengths ADD CONSTRAINT nl_unique_12345678901234567890123456789012345678901234567890 UNIQUE (float_col_12345678901234567890123456789012345678901234567890); ALTER TABLE name_lengths ADD CONSTRAINT nl_exclude_12345678901234567890123456789012345678901234567890 EXCLUDE (int_col_12345678901234567890123456789012345678901234567890 WITH =); ALTER TABLE name_lengths ADD CONSTRAINT nl_checky_12345678901234567890123456789012345678901234567890 CHECK (date_col_12345678901234567890123456789012345678901234567890 >= '2014-01-01'::date); \c - - - :worker_1_port SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.name_lengths_225002'::regclass; \c - - - :master_port -- Placeholders for RENAME operations ALTER TABLE name_lengths RENAME TO name_len_12345678901234567890123456789012345678901234567890; ALTER TABLE name_lengths RENAME CONSTRAINT unique_12345678901234567890123456789012345678901234567890 TO unique2_12345678901234567890123456789012345678901234567890; -- Verify that CREATE INDEX on already distributed table has proper shard names. CREATE INDEX tmp_idx_12345678901234567890123456789012345678901234567890 ON name_lengths(col2); \c - - - :worker_1_port \d tmp_idx_* \c - - - :master_port -- Verify that a new index name > 63 characters is auto-truncated -- by the parser/rewriter before further processing, just as in Postgres. CREATE INDEX tmp_idx_123456789012345678901234567890123456789012345678901234567890 ON name_lengths(col2); \c - - - :worker_1_port \d tmp_idx_* \c - - - :master_port -- Verify that distributed tables with too-long names -- for CHECK constraints are no trouble. CREATE TABLE sneaky_name_lengths ( col1 integer not null, col2 integer not null, int_col_12345678901234567890123456789012345678901234567890 integer not null, CHECK (int_col_12345678901234567890123456789012345678901234567890 > 100) ); SELECT master_create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2'); DROP TABLE sneaky_name_lengths CASCADE; CREATE TABLE sneaky_name_lengths ( int_col_123456789012345678901234567890123456789012345678901234 integer UNIQUE not null, col2 integer not null, CONSTRAINT checky_12345678901234567890123456789012345678901234567890 CHECK (int_col_123456789012345678901234567890123456789012345678901234 > 100) ); \di public.sneaky_name_lengths* SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths'::regclass; SELECT master_create_distributed_table('sneaky_name_lengths', 'int_col_123456789012345678901234567890123456789012345678901234', 'hash'); SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2'); \c - - - :worker_1_port \di public.sneaky*225006 SELECT "Constraint", "Definition" FROM table_checks WHERE relid='public.sneaky_name_lengths_225006'::regclass; \c - - - :master_port DROP TABLE sneaky_name_lengths CASCADE; -- verify that named constraint with too-long name gets hashed properly CREATE TABLE sneaky_name_lengths ( col1 integer not null, col2 integer not null, int_col_12345678901234567890123456789012345678901234567890 integer not null, constraint unique_12345678901234567890123456789012345678901234567890 UNIQUE (col1) ); SELECT master_create_distributed_table('sneaky_name_lengths', 'col1', 'hash'); SELECT master_create_worker_shards('sneaky_name_lengths', '2', '2'); \c - - - :worker_1_port \di unique*225008 \c - - - :master_port DROP TABLE sneaky_name_lengths CASCADE; -- Verify that much larger shardIds are handled properly ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 2250000000000; CREATE TABLE too_long_12345678901234567890123456789012345678901234567890 ( col1 integer not null, col2 integer not null); SELECT master_create_distributed_table('too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); SELECT master_create_worker_shards('too_long_12345678901234567890123456789012345678901234567890', '2', '2'); \c - - - :worker_1_port \dt *225000000000* \c - - - :master_port DROP TABLE too_long_12345678901234567890123456789012345678901234567890 CASCADE; -- Verify that multi-byte boundaries are respected for databases with UTF8 encoding. CREATE TABLE U&"elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D" UESCAPE '!' ( col1 integer not null PRIMARY KEY, col2 integer not null); SELECT master_create_distributed_table(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', 'col1', 'hash'); SELECT master_create_worker_shards(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!', '2', '2'); -- Verify that quoting is used in shard_name SELECT shard_name(U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!'::regclass, min(shardid)) FROM pg_dist_shard WHERE logicalrelid = U&'elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D' UESCAPE '!'::regclass; \c - - - :worker_1_port \dt public.elephant_* \di public.elephant_* \c - - - :master_port -- Verify that shard_name UDF supports schemas CREATE SCHEMA multi_name_lengths; CREATE TABLE multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890 ( col1 integer not null, col2 integer not null); SELECT master_create_distributed_table('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890', 'col1', 'hash'); SELECT master_create_worker_shards('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890', 2, 1); SELECT shard_name('multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890'::regclass, min(shardid)) FROM pg_dist_shard WHERE logicalrelid = 'multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890'::regclass; DROP TABLE multi_name_lengths.too_long_12345678901234567890123456789012345678901234567890; -- Clean up. DROP TABLE name_lengths CASCADE; DROP TABLE U&"elephant_!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D!0441!043B!043E!043D" UESCAPE '!' CASCADE; citus-7.0.3/src/test/regress/sql/multi_null_minmax_value_pruning.sql000066400000000000000000000055251317107136600261170ustar00rootroot00000000000000-- -- MULTI_NULL_MINMAX_VALUE_PRUNING -- -- This test checks that we can handle null min/max values in shard statistics -- and that we don't partition or join prune shards that have null values. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 760000; -- print whether we're using version > 9 to make version-specific tests clear SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine; SET client_min_messages TO DEBUG2; SET citus.explain_all_tasks TO on; -- to avoid differing explain output - executor doesn't matter, -- because were testing pruning here. SET citus.task_executor_type TO 'real-time'; -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290000; SELECT shardminvalue, shardmaxvalue from pg_dist_shard WHERE shardid = 290001; -- Check that partition and join pruning works when min/max values exist -- Adding l_orderkey = 1 to make the query not router executable EXPLAIN (COSTS FALSE) SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1; EXPLAIN (COSTS FALSE) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey; -- Now set the minimum value for a shard to null. Then check that we don't apply -- partition or join pruning for the shard with null min value. UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 290000; EXPLAIN (COSTS FALSE) SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; EXPLAIN (COSTS FALSE) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey; -- Next, set the maximum value for another shard to null. Then check that we -- don't apply partition or join pruning for this other shard either. UPDATE pg_dist_shard SET shardmaxvalue = NULL WHERE shardid = 290001; EXPLAIN (COSTS FALSE) SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; EXPLAIN (COSTS FALSE) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey; -- Last, set the minimum value to 0 and check that we don't treat it as null. We -- should apply partition and join pruning for this shard now. UPDATE pg_dist_shard SET shardminvalue = '0' WHERE shardid = 290000; EXPLAIN (COSTS FALSE) SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030; EXPLAIN (COSTS FALSE) SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey; -- Set minimum and maximum values for two shards back to their original values UPDATE pg_dist_shard SET shardminvalue = '1' WHERE shardid = 290000; UPDATE pg_dist_shard SET shardmaxvalue = '4964' WHERE shardid = 290001; SET client_min_messages TO NOTICE; citus-7.0.3/src/test/regress/sql/multi_partition_pruning.sql000066400000000000000000000126461317107136600244130ustar00rootroot00000000000000-- -- MULTI_PARTITION_PRUNING -- -- Tests to verify that we correctly prune unreferenced shards. For this, we -- need to increase the logging verbosity of messages displayed on the client. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 770000; -- Adding additional l_orderkey = 1 to make this query not router executable SELECT l_orderkey, l_linenumber, l_shipdate FROM lineitem WHERE l_orderkey = 9030 or l_orderkey = 1; -- We use the l_linenumber field for the following aggregations. We need to use -- an integer type, as aggregations on numerics or big integers return numerics -- of unknown length. When the numerics are read into our temporary table, they -- trigger the the creation of toasted tables and indexes. This in turn prints -- non-deterministic debug messages. To avoid this chain, we use l_linenumber. SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE l_orderkey > 9030; SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE (l_orderkey < 4000 OR l_orderkey > 9030); -- The following query should prune out all shards and return empty results SELECT sum(l_linenumber), avg(l_linenumber) FROM lineitem WHERE l_orderkey > 20000; -- The tests below verify that we can prune shards partitioned over different -- types of columns including varchar, array types, composite types etc. This is -- in response to a bug we had where we were not able to resolve correct operator -- types for some kind of column types. First we create tables partitioned on -- these types and the logical shards and placements for them. -- Create varchar partitioned table CREATE TABLE varchar_partitioned_table ( varchar_column varchar(100) ); SELECT master_create_distributed_table('varchar_partitioned_table', 'varchar_column', 'append'); -- Create logical shards and shard placements with shardid 100,101 INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES('varchar_partitioned_table'::regclass, 100, 't', 'AA1000U2AMO4ZGX', 'AZZXSP27F21T6'), ('varchar_partitioned_table'::regclass, 101, 't', 'BA1000U2AMO4ZGX', 'BZZXSP27F21T6'); INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 100, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport ASC LIMIT 1; INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 101, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport ASC LIMIT 1; -- Create array partitioned table RESET client_min_messages; -- avoid debug messages about toast index creation CREATE TABLE array_partitioned_table ( array_column text[] ); SELECT master_create_distributed_table('array_partitioned_table', 'array_column', 'append'); SET client_min_messages TO DEBUG2; -- Create logical shard with shardid 102, 103 INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES('array_partitioned_table'::regclass, 102, 't', '{}', '{AZZXSP27F21T6, AZZXSP27F21T6}'), ('array_partitioned_table'::regclass, 103, 't', '{BA1000U2AMO4ZGX, BZZXSP27F21T6}', '{CA1000U2AMO4ZGX, CZZXSP27F21T6}'); INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 102, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport ASC LIMIT 1; INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 103, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport ASC LIMIT 1; -- Create composite type partitioned table CREATE TYPE composite_type AS ( text_column text, double_column decimal, varchar_column varchar(50) ); RESET client_min_messages; -- avoid debug messages about toast index creation CREATE TABLE composite_partitioned_table ( composite_column composite_type ); SELECT master_create_distributed_table('composite_partitioned_table', 'composite_column', 'append'); SET client_min_messages TO DEBUG2; -- Create logical shard with shardid 104, 105 INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) VALUES('composite_partitioned_table'::regclass, 104, 't', '(a,3,b)', '(b,4,c)'), ('composite_partitioned_table'::regclass, 105, 't', '(c,5,d)', '(d,6,e)'); INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 104, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport ASC LIMIT 1; INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 105, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport ASC LIMIT 1; -- Verify that shard pruning works. Note that these queries should all -- prune one shard (see task count). As these tables don't exist -- remotely, temporarily disable WARNING messages. SET client_min_messages TO ERROR; EXPLAIN (COSTS OFF) SELECT count(*) FROM varchar_partitioned_table WHERE varchar_column = 'BA2'; EXPLAIN (COSTS OFF) SELECT count(*) FROM array_partitioned_table WHERE array_column > '{BA1000U2AMO4ZGX, BZZXSP27F21T6}'; EXPLAIN (COSTS OFF) SELECT count(*) FROM composite_partitioned_table WHERE composite_column < '(b,5,c)'::composite_type; SET client_min_messages TO NOTICE; citus-7.0.3/src/test/regress/sql/multi_partitioning.sql000066400000000000000000000712121317107136600233410ustar00rootroot00000000000000-- -- Distributed Partitioned Table Tests -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1660000; SET citus.shard_count TO 4; SET citus.shard_replication_factor TO 1; -- -- Distributed Partitioned Table Creation Tests -- -- 1-) Distributing partitioned table -- create partitioned table CREATE TABLE partitioning_test(id int, time date) PARTITION BY RANGE (time); -- create its partitions CREATE TABLE partitioning_test_2009 PARTITION OF partitioning_test FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); CREATE TABLE partitioning_test_2010 PARTITION OF partitioning_test FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); -- load some data and distribute tables INSERT INTO partitioning_test VALUES (1, '2009-06-06'); INSERT INTO partitioning_test VALUES (2, '2010-07-07'); INSERT INTO partitioning_test_2009 VALUES (3, '2009-09-09'); INSERT INTO partitioning_test_2010 VALUES (4, '2010-03-03'); -- distribute partitioned table SELECT create_distributed_table('partitioning_test', 'id'); -- see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; -- see partitioned table and its partitions are distributed SELECT logicalrelid FROM pg_dist_partition WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') ORDER BY 1; SELECT logicalrelid, count(*) FROM pg_dist_shard WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2009', 'partitioning_test_2010') GROUP BY logicalrelid ORDER BY 1,2; -- 2-) Creating partition of a distributed table CREATE TABLE partitioning_test_2011 PARTITION OF partitioning_test FOR VALUES FROM ('2011-01-01') TO ('2012-01-01'); -- new partition is automatically distributed as well SELECT logicalrelid FROM pg_dist_partition WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2011') ORDER BY 1; SELECT logicalrelid, count(*) FROM pg_dist_shard WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2011') GROUP BY logicalrelid ORDER BY 1,2; -- 3-) Attaching non distributed table to a distributed table CREATE TABLE partitioning_test_2012(id int, time date); -- load some data INSERT INTO partitioning_test_2012 VALUES (5, '2012-06-06'); INSERT INTO partitioning_test_2012 VALUES (6, '2012-07-07'); ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2012 FOR VALUES FROM ('2012-01-01') TO ('2013-01-01'); -- attached partition is distributed as well SELECT logicalrelid FROM pg_dist_partition WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2012') ORDER BY 1; SELECT logicalrelid, count(*) FROM pg_dist_shard WHERE logicalrelid IN ('partitioning_test', 'partitioning_test_2012') GROUP BY logicalrelid ORDER BY 1,2; -- see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; -- 4-) Attaching distributed table to distributed table CREATE TABLE partitioning_test_2013(id int, time date); SELECT create_distributed_table('partitioning_test_2013', 'id'); -- load some data INSERT INTO partitioning_test_2013 VALUES (7, '2013-06-06'); INSERT INTO partitioning_test_2013 VALUES (8, '2013-07-07'); ALTER TABLE partitioning_test ATTACH PARTITION partitioning_test_2013 FOR VALUES FROM ('2013-01-01') TO ('2014-01-01'); -- see the data is loaded to shards SELECT * FROM partitioning_test ORDER BY 1; -- 5-) Failure cases while creating distributed partitioned tables -- cannot distribute a partition if its parent is not distributed CREATE TABLE partitioning_test_failure(id int, time date) PARTITION BY RANGE (time); CREATE TABLE partitioning_test_failure_2009 PARTITION OF partitioning_test_failure FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); SELECT create_distributed_table('partitioning_test_failure_2009', 'id'); -- only hash distributed tables can have partitions SELECT create_distributed_table('partitioning_test_failure', 'id', 'append'); SELECT create_distributed_table('partitioning_test_failure', 'id', 'range'); SELECT create_reference_table('partitioning_test_failure'); -- replication factor > 1 is not allowed in distributed partitioned tables SET citus.shard_replication_factor TO 2; SELECT create_distributed_table('partitioning_test_failure', 'id'); SET citus.shard_replication_factor TO 1; -- non-distributed tables cannot have distributed partitions; DROP TABLE partitioning_test_failure_2009; CREATE TABLE partitioning_test_failure_2009(id int, time date); SELECT create_distributed_table('partitioning_test_failure_2009', 'id'); ALTER TABLE partitioning_test_failure ATTACH PARTITION partitioning_test_failure_2009 FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); -- multi-level partitioning is not allowed DROP TABLE partitioning_test_failure_2009; CREATE TABLE partitioning_test_failure_2009 PARTITION OF partitioning_test_failure FOR VALUES FROM ('2009-01-01') TO ('2010-01-01') PARTITION BY RANGE (time); SELECT create_distributed_table('partitioning_test_failure', 'id'); -- multi-level partitioning is not allowed in different order DROP TABLE partitioning_test_failure_2009; SELECT create_distributed_table('partitioning_test_failure', 'id'); CREATE TABLE partitioning_test_failure_2009 PARTITION OF partitioning_test_failure FOR VALUES FROM ('2009-01-01') TO ('2010-01-01') PARTITION BY RANGE (time); -- -- DMLs in distributed partitioned tables -- -- test COPY -- COPY data to partitioned table COPY partitioning_test FROM STDIN WITH CSV; 9,2009-01-01 10,2010-01-01 11,2011-01-01 12,2012-01-01 \. -- COPY data to partition directly COPY partitioning_test_2009 FROM STDIN WITH CSV; 13,2009-01-02 14,2009-01-03 \. -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id >= 9 ORDER BY 1; -- test INSERT -- INSERT INTO the partitioned table INSERT INTO partitioning_test VALUES(15, '2009-02-01'); INSERT INTO partitioning_test VALUES(16, '2010-02-01'); INSERT INTO partitioning_test VALUES(17, '2011-02-01'); INSERT INTO partitioning_test VALUES(18, '2012-02-01'); -- INSERT INTO the partitions directly table INSERT INTO partitioning_test VALUES(19, '2009-02-02'); INSERT INTO partitioning_test VALUES(20, '2010-02-02'); -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id >= 15 ORDER BY 1; -- test INSERT/SELECT -- INSERT/SELECT from partition to partitioned table INSERT INTO partitioning_test SELECT * FROM partitioning_test_2011; -- INSERT/SELECT from partitioned table to partition INSERT INTO partitioning_test_2012 SELECT * FROM partitioning_test WHERE time >= '2012-01-01' AND time < '2013-01-01'; -- see the data is loaded to shards (rows in the given range should be duplicated) SELECT * FROM partitioning_test WHERE time >= '2011-01-01' AND time < '2013-01-01' ORDER BY 1; -- test UPDATE -- UPDATE partitioned table UPDATE partitioning_test SET time = '2013-07-07' WHERE id = 7; -- UPDATE partition directly UPDATE partitioning_test_2013 SET time = '2013-08-08' WHERE id = 8; -- see the data is updated SELECT * FROM partitioning_test WHERE id = 7 OR id = 8 ORDER BY 1; -- UPDATE that tries to move a row to a non-existing partition (this should fail) UPDATE partitioning_test SET time = '2020-07-07' WHERE id = 7; -- UPDATE with subqueries on partitioned table UPDATE partitioning_test SET time = time + INTERVAL '1 day' WHERE id IN (SELECT id FROM partitioning_test WHERE id = 1); -- UPDATE with subqueries on partition UPDATE partitioning_test_2009 SET time = time + INTERVAL '1 month' WHERE id IN (SELECT id FROM partitioning_test WHERE id = 2); -- see the data is updated SELECT * FROM partitioning_test WHERE id = 1 OR id = 2 ORDER BY 1; -- test DELETE -- DELETE from partitioned table DELETE FROM partitioning_test WHERE id = 9; -- DELETE from partition directly DELETE FROM partitioning_test_2010 WHERE id = 10; -- see the data is deleted SELECT * FROM partitioning_test WHERE id = 9 OR id = 10 ORDER BY 1; -- test master_modify_multiple_shards -- master_modify_multiple_shards on partitioned table SELECT master_modify_multiple_shards('UPDATE partitioning_test SET time = time + INTERVAL ''1 day'''); -- see rows are UPDATED SELECT * FROM partitioning_test ORDER BY 1; -- master_modify_multiple_shards on partition directly SELECT master_modify_multiple_shards('UPDATE partitioning_test_2009 SET time = time + INTERVAL ''1 day'''); -- see rows are UPDATED SELECT * FROM partitioning_test_2009 ORDER BY 1; -- test master_modify_multiple_shards which fails in workers (updated value is outside of partition bounds) SELECT master_modify_multiple_shards('UPDATE partitioning_test_2009 SET time = time + INTERVAL ''6 month'''); -- -- DDL in distributed partitioned tables -- -- test CREATE INDEX -- CREATE INDEX on partitioned table - this will error out CREATE INDEX partitioning_index ON partitioning_test(id); -- CREATE INDEX on partition CREATE INDEX partitioning_2009_index ON partitioning_test_2009(id); -- CREATE INDEX CONCURRENTLY on partition CREATE INDEX CONCURRENTLY partitioned_2010_index ON partitioning_test_2010(id); -- see index is created SELECT tablename, indexname FROM pg_indexes WHERE tablename LIKE 'partitioning_test%' ORDER BY indexname; -- test add COLUMN -- add COLUMN to partitioned table ALTER TABLE partitioning_test ADD new_column int; -- add COLUMN to partition - this will error out ALTER TABLE partitioning_test_2010 ADD new_column_2 int; -- see additional column is created SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test_2010'::regclass ORDER BY 1; -- test add PRIMARY KEY -- add PRIMARY KEY to partitioned table - this will error out ALTER TABLE partitioning_test ADD CONSTRAINT partitioning_primary PRIMARY KEY (id); -- ADD PRIMARY KEY to partition ALTER TABLE partitioning_test_2009 ADD CONSTRAINT partitioning_2009_primary PRIMARY KEY (id); -- see PRIMARY KEY is created SELECT table_name, constraint_name, constraint_type FROM information_schema.table_constraints WHERE table_name = 'partitioning_test_2009' AND constraint_name = 'partitioning_2009_primary'; -- test ADD FOREIGN CONSTRAINT -- add FOREIGN CONSTRAINT to partitioned table -- this will error out ALTER TABLE partitioning_test ADD CONSTRAINT partitioning_foreign FOREIGN KEY (id) REFERENCES partitioning_test_2009 (id); -- add FOREIGN CONSTRAINT to partition INSERT INTO partitioning_test_2009 VALUES (5, '2009-06-06'); INSERT INTO partitioning_test_2009 VALUES (6, '2009-07-07'); INSERT INTO partitioning_test_2009 VALUES(12, '2009-02-01'); INSERT INTO partitioning_test_2009 VALUES(18, '2009-02-01'); ALTER TABLE partitioning_test_2012 ADD CONSTRAINT partitioning_2012_foreign FOREIGN KEY (id) REFERENCES partitioning_test_2009 (id) ON DELETE CASCADE; -- see FOREIGN KEY is created SELECT "Constraint" FROM table_fkeys WHERE relid = 'partitioning_test_2012'::regclass ORDER BY 1; -- test ON DELETE CASCADE works DELETE FROM partitioning_test_2009 WHERE id = 5; -- see that element is deleted from both partitions SELECT * FROM partitioning_test_2009 WHERE id = 5 ORDER BY 1; SELECT * FROM partitioning_test_2012 WHERE id = 5 ORDER BY 1; -- test DETACH partition ALTER TABLE partitioning_test DETACH PARTITION partitioning_test_2009; -- see DETACHed partitions content is not accessible from partitioning_test; SELECT * FROM partitioning_test WHERE time >= '2009-01-01' AND time < '2010-01-01' ORDER BY 1; -- -- Transaction tests -- -- DDL in transaction BEGIN; ALTER TABLE partitioning_test ADD newer_column int; -- see additional column is created SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; ROLLBACK; -- see rollback is successful SELECT name, type FROM table_attrs WHERE relid = 'partitioning_test'::regclass ORDER BY 1; -- COPY in transaction BEGIN; COPY partitioning_test FROM STDIN WITH CSV; 22,2010-01-01,22 23,2011-01-01,23 24,2013-01-01,24 \. -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id = 22 ORDER BY 1; SELECT * FROM partitioning_test WHERE id = 23 ORDER BY 1; SELECT * FROM partitioning_test WHERE id = 24 ORDER BY 1; ROLLBACK; -- see rollback is successful SELECT * FROM partitioning_test WHERE id >= 22 ORDER BY 1; -- DML in transaction BEGIN; -- INSERT in transaction INSERT INTO partitioning_test VALUES(25, '2010-02-02'); -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; -- INSERT/SELECT in transaction INSERT INTO partitioning_test SELECT * FROM partitioning_test WHERE id = 25; -- see the data is loaded to shards SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; -- UPDATE in transaction UPDATE partitioning_test SET time = '2010-10-10' WHERE id = 25; -- see the data is updated SELECT * FROM partitioning_test WHERE id = 25 ORDER BY 1; -- perform operations on partition and partioned tables together INSERT INTO partitioning_test VALUES(26, '2010-02-02', 26); INSERT INTO partitioning_test_2010 VALUES(26, '2010-02-02', 26); COPY partitioning_test FROM STDIN WITH CSV; 26,2010-02-02,26 \. COPY partitioning_test_2010 FROM STDIN WITH CSV; 26,2010-02-02,26 \. -- see the data is loaded to shards (we should see 4 rows with same content) SELECT * FROM partitioning_test WHERE id = 26 ORDER BY 1; ROLLBACK; -- see rollback is successful SELECT * FROM partitioning_test WHERE id = 26 ORDER BY 1; -- DETACH and DROP in a transaction BEGIN; ALTER TABLE partitioning_test DETACH PARTITION partitioning_test_2011; DROP TABLE partitioning_test_2011; COMMIT; -- see DROPed partitions content is not accessible SELECT * FROM partitioning_test WHERE time >= '2011-01-01' AND time < '2012-01-01' ORDER BY 1; -- -- Misc tests -- -- test TRUNCATE -- test TRUNCATE partition TRUNCATE partitioning_test_2012; -- see partition is TRUNCATEd SELECT * FROM partitioning_test_2012 ORDER BY 1; -- test TRUNCATE partitioned table TRUNCATE partitioning_test; -- see partitioned table is TRUNCATEd SELECT * FROM partitioning_test ORDER BY 1; -- test DROP -- test DROP partition INSERT INTO partitioning_test_2010 VALUES(27, '2010-02-01'); DROP TABLE partitioning_test_2010; -- see DROPped partitions content is not accessible from partitioning_test; SELECT * FROM partitioning_test WHERE time >= '2010-01-01' AND time < '2011-01-01' ORDER BY 1; -- test DROP partitioned table DROP TABLE partitioning_test; -- dropping the parent should CASCADE to the children as well SELECT table_name FROM information_schema.tables WHERE table_name LIKE 'partitioning_test%' ORDER BY 1; -- test distributing partitioned table colocated with non-partitioned table CREATE TABLE partitioned_users_table (user_id int, time timestamp, value_1 int, value_2 int, value_3 float, value_4 bigint) PARTITION BY RANGE (time); CREATE TABLE partitioned_events_table (user_id int, time timestamp, event_type int, value_2 int, value_3 float, value_4 bigint) PARTITION BY RANGE (time); SELECT create_distributed_table('partitioned_users_table', 'user_id', colocate_with => 'users_table'); SELECT create_distributed_table('partitioned_events_table', 'user_id', colocate_with => 'events_table'); -- INSERT/SELECT from regular table to partitioned table CREATE TABLE partitioned_users_table_2009 PARTITION OF partitioned_users_table FOR VALUES FROM ('2014-01-01') TO ('2015-01-01'); CREATE TABLE partitioned_events_table_2009 PARTITION OF partitioned_events_table FOR VALUES FROM ('2014-01-01') TO ('2015-01-01'); INSERT INTO partitioned_events_table SELECT * FROM events_table; INSERT INTO partitioned_users_table_2009 SELECT * FROM users_table; -- -- Complex JOINs, subqueries, UNIONs etc... -- -- subquery with UNIONs on partitioned table SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM (SELECT *, random() FROM (SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM (SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM( (SELECT "events"."user_id", "events"."time", 0 AS event FROM partitioned_events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15)) UNION (SELECT "events"."user_id", "events"."time", 1 AS event FROM partitioned_events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) UNION (SELECT "events"."user_id", "events"."time", 2 AS event FROM partitioned_events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) UNION (SELECT "events"."user_id", "events"."time", 3 AS event FROM partitioned_events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13))) t1 GROUP BY "t1"."user_id") AS t) "q" ) AS final_query GROUP BY types ORDER BY types; -- UNION and JOIN on both partitioned and regular tables SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM (SELECT *, random() FROM (SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM (SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."time", 0 AS event, "events"."user_id" FROM partitioned_events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM ( SELECT * FROM ( SELECT max("events"."time"), 0 AS event, "events"."user_id" FROM events_table as "events", users_table as "users" WHERE events.user_id = users.user_id AND event_type IN (10, 11, 12, 13, 14, 15) GROUP BY "events"."user_id" ) as events_subquery_5 ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."time", 2 AS event, "events"."user_id" FROM partitioned_events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."time", 3 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4) ) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM partitioned_users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; -- test LIST partitioning CREATE TABLE list_partitioned_events_table (user_id int, time date, event_type int, value_2 int, value_3 float, value_4 bigint) PARTITION BY LIST (time); CREATE TABLE list_partitioned_events_table_2014_01_01_05 PARTITION OF list_partitioned_events_table FOR VALUES IN ('2014-01-01', '2014-01-02', '2014-01-03', '2014-01-04', '2014-01-05'); CREATE TABLE list_partitioned_events_table_2014_01_06_10 PARTITION OF list_partitioned_events_table FOR VALUES IN ('2014-01-06', '2014-01-07', '2014-01-08', '2014-01-09', '2014-01-10'); CREATE TABLE list_partitioned_events_table_2014_01_11_15 PARTITION OF list_partitioned_events_table FOR VALUES IN ('2014-01-11', '2014-01-12', '2014-01-13', '2014-01-14', '2014-01-15'); -- test distributing partitioned table colocated with another partitioned table SELECT create_distributed_table('list_partitioned_events_table', 'user_id', colocate_with => 'partitioned_events_table'); -- INSERT/SELECT from partitioned table to partitioned table INSERT INTO list_partitioned_events_table SELECT user_id, date_trunc('day', time) as time, event_type, value_2, value_3, value_4 FROM events_table WHERE time >= '2014-01-01' AND time <= '2014-01-15'; -- LEFT JOINs used with INNER JOINs on range partitioned table, list partitioned table and non-partitioned table SELECT count(*) AS cnt, "generated_group_field" FROM (SELECT "eventQuery"."user_id", random(), generated_group_field FROM (SELECT "multi_group_wrapper_1".*, generated_group_field, random() FROM (SELECT * FROM (SELECT "list_partitioned_events_table"."time", "list_partitioned_events_table"."user_id" as event_user_id FROM list_partitioned_events_table as "list_partitioned_events_table" WHERE user_id > 80) "temp_data_queries" INNER JOIN (SELECT "users"."user_id" FROM partitioned_users_table as "users" WHERE user_id > 80 and value_2 = 5) "user_filters_1" ON ("temp_data_queries".event_user_id = "user_filters_1".user_id)) AS "multi_group_wrapper_1" LEFT JOIN (SELECT "users"."user_id" AS "user_id", value_2 AS "generated_group_field" FROM partitioned_users_table as "users") "left_group_by_1" ON ("left_group_by_1".user_id = "multi_group_wrapper_1".event_user_id)) "eventQuery") "pushedDownQuery" GROUP BY "generated_group_field" ORDER BY cnt DESC, generated_group_field ASC LIMIT 10; -- -- Additional partitioning features -- -- test multi column partitioning CREATE TABLE multi_column_partitioning(c1 int, c2 int) PARTITION BY RANGE (c1, c2); CREATE TABLE multi_column_partitioning_0_0_10_0 PARTITION OF multi_column_partitioning FOR VALUES FROM (0, 0) TO (10, 0); SELECT create_distributed_table('multi_column_partitioning', 'c1'); -- test INSERT to multi-column partitioned table INSERT INTO multi_column_partitioning VALUES(1, 1); INSERT INTO multi_column_partitioning_0_0_10_0 VALUES(5, -5); -- test INSERT to multi-column partitioned table where no suitable partition exists INSERT INTO multi_column_partitioning VALUES(10, 1); -- test with MINVALUE/MAXVALUE CREATE TABLE multi_column_partitioning_10_max_20_min PARTITION OF multi_column_partitioning FOR VALUES FROM (10, MAXVALUE) TO (20, MINVALUE); -- test INSERT to partition with MINVALUE/MAXVALUE bounds INSERT INTO multi_column_partitioning VALUES(11, -11); INSERT INTO multi_column_partitioning_10_max_20_min VALUES(19, -19); -- test INSERT to multi-column partitioned table where no suitable partition exists INSERT INTO multi_column_partitioning VALUES(20, -20); -- see data is loaded to multi-column partitioned table SELECT * FROM multi_column_partitioning ORDER BY 1, 2; -- -- Tests for locks on partitioned tables -- CREATE TABLE partitioning_locks(id int, ref_id int, time date) PARTITION BY RANGE (time); -- create its partitions CREATE TABLE partitioning_locks_2009 PARTITION OF partitioning_locks FOR VALUES FROM ('2009-01-01') TO ('2010-01-01'); CREATE TABLE partitioning_locks_2010 PARTITION OF partitioning_locks FOR VALUES FROM ('2010-01-01') TO ('2011-01-01'); -- distribute partitioned table SELECT create_distributed_table('partitioning_locks', 'id'); -- test locks on router SELECT BEGIN; SELECT * FROM partitioning_locks WHERE id = 1 ORDER BY 1, 2; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; COMMIT; -- test locks on real-time SELECT BEGIN; SELECT * FROM partitioning_locks ORDER BY 1, 2; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; COMMIT; -- test locks on task-tracker SELECT SET citus.task_executor_type TO 'task-tracker'; BEGIN; SELECT * FROM partitioning_locks AS pl1 JOIN partitioning_locks AS pl2 ON pl1.id = pl2.ref_id ORDER BY 1, 2; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; COMMIT; SET citus.task_executor_type TO 'real-time'; -- test locks on INSERT BEGIN; INSERT INTO partitioning_locks VALUES(1, 1, '2009-01-01'); SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; COMMIT; -- test locks on UPDATE BEGIN; UPDATE partitioning_locks SET time = '2009-02-01' WHERE id = 1; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; COMMIT; -- test locks on DELETE BEGIN; DELETE FROM partitioning_locks WHERE id = 1; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; COMMIT; -- test locks on INSERT/SELECT CREATE TABLE partitioning_locks_for_select(id int, ref_id int, time date); SELECT create_distributed_table('partitioning_locks_for_select', 'id'); BEGIN; INSERT INTO partitioning_locks SELECT * FROM partitioning_locks_for_select; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; COMMIT; -- test locks on coordinator INSERT/SELECT BEGIN; INSERT INTO partitioning_locks SELECT * FROM partitioning_locks_for_select LIMIT 5; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; COMMIT; -- test locks on master_modify_multiple_shards BEGIN; SELECT master_modify_multiple_shards('UPDATE partitioning_locks SET time = ''2009-03-01'''); SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; COMMIT; -- test locks on DDL BEGIN; ALTER TABLE partitioning_locks ADD COLUMN new_column int; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; COMMIT; -- test locks on TRUNCATE BEGIN; TRUNCATE partitioning_locks; SELECT relation::regclass, locktype, mode FROM pg_locks WHERE relation::regclass::text LIKE 'partitioning_locks%' AND pid = pg_backend_pid() ORDER BY 1, 2, 3; COMMIT; -- test shard resource locks with master_modify_multiple_shards BEGIN; SELECT master_modify_multiple_shards('UPDATE partitioning_locks_2009 SET time = ''2009-03-01'''); -- see the locks on parent table SELECT logicalrelid, locktype, mode FROM pg_locks AS l JOIN pg_dist_shard AS s ON l.objid = s.shardid WHERE logicalrelid IN ('partitioning_locks', 'partitioning_locks_2009', 'partitioning_locks_2010') AND pid = pg_backend_pid() ORDER BY 1, 2, 3; COMMIT; -- test shard resource locks with TRUNCATE BEGIN; TRUNCATE partitioning_locks_2009; -- see the locks on parent table SELECT logicalrelid, locktype, mode FROM pg_locks AS l JOIN pg_dist_shard AS s ON l.objid = s.shardid WHERE logicalrelid IN ('partitioning_locks', 'partitioning_locks_2009', 'partitioning_locks_2010') AND pid = pg_backend_pid() ORDER BY 1, 2, 3; COMMIT; -- test shard resource locks with INSERT/SELECT BEGIN; INSERT INTO partitioning_locks_2009 SELECT * FROM partitioning_locks WHERE time >= '2009-01-01' AND time < '2010-01-01'; -- see the locks on parent table SELECT logicalrelid, locktype, mode FROM pg_locks AS l JOIN pg_dist_shard AS s ON l.objid = s.shardid WHERE logicalrelid IN ('partitioning_locks', 'partitioning_locks_2009', 'partitioning_locks_2010') AND pid = pg_backend_pid() ORDER BY 1, 2, 3; COMMIT; DROP TABLE IF EXISTS partitioning_test_2012, partitioning_test_2013, partitioned_events_table, partitioned_users_table, list_partitioned_events_table, multi_column_partitioning, partitioning_locks, partitioning_locks_for_select; citus-7.0.3/src/test/regress/sql/multi_partitioning_utils.sql000066400000000000000000000233331317107136600245620ustar00rootroot00000000000000-- =================================================================== -- create test functions -- =================================================================== CREATE FUNCTION generate_alter_table_detach_partition_command(regclass) RETURNS text AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION generate_alter_table_attach_partition_command(regclass) RETURNS text AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION generate_partition_information(regclass) RETURNS text AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION print_partitions(regclass) RETURNS text AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION table_inherits(regclass) RETURNS bool AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION table_inherited(regclass) RETURNS bool AS 'citus' LANGUAGE C STRICT; CREATE OR REPLACE FUNCTION detach_and_attach_partition(partition_name regclass, parent_table_name regclass) RETURNS void LANGUAGE plpgsql VOLATILE AS $function$ DECLARE detach_partition_command text := ''; attach_partition_command text := ''; command_result text := ''; BEGIN -- first generate the command SELECT public.generate_alter_table_attach_partition_command(partition_name) INTO attach_partition_command; -- now genereate the detach command SELECT public.generate_alter_table_detach_partition_command(partition_name) INTO detach_partition_command; -- later detach the same partition EXECUTE detach_partition_command; -- not attach it again EXECUTE attach_partition_command; END; $function$; CREATE OR REPLACE FUNCTION drop_and_recreate_partitioned_table(parent_table_name regclass) RETURNS void LANGUAGE plpgsql VOLATILE AS $function$ DECLARE command text := ''; BEGIN -- first generate the command CREATE TABLE partitioned_table_create_commands AS SELECT master_get_table_ddl_events(parent_table_name::text); -- later detach the same partition EXECUTE 'DROP TABLE ' || parent_table_name::text || ';'; FOR command IN SELECT * FROM partitioned_table_create_commands LOOP -- can do some processing here EXECUTE command; END LOOP; DROP TABLE partitioned_table_create_commands; END; $function$; -- create a partitioned table CREATE TABLE date_partitioned_table(id int, time date) PARTITION BY RANGE (time); -- we should be able to get the partitioning information even if there are no partitions SELECT generate_partition_information('date_partitioned_table'); -- we should be able to drop and re-create the partitioned table using the command that Citus generate SELECT drop_and_recreate_partitioned_table('date_partitioned_table'); -- we should also be able to see the PARTITION BY ... for the parent table SELECT master_get_table_ddl_events('date_partitioned_table'); -- now create the partitions CREATE TABLE date_partition_2006 PARTITION OF date_partitioned_table FOR VALUES FROM ('2006-01-01') TO ('2007-01-01'); CREATE TABLE date_partition_2007 PARTITION OF date_partitioned_table FOR VALUES FROM ('2007-01-01') TO ('2008-01-01'); -- we should be able to get the partitioning information after the partitions are created SELECT generate_partition_information('date_partitioned_table'); -- lets get the attach partition commands SELECT generate_alter_table_attach_partition_command('date_partition_2006'); SELECT generate_alter_table_attach_partition_command('date_partition_2007'); -- detach and attach the partition by the command generated by us \d+ date_partitioned_table SELECT detach_and_attach_partition('date_partition_2007', 'date_partitioned_table'); -- check that both partitions are visiable \d+ date_partitioned_table -- make sure that inter shard commands work as expected -- assume that the shardId is 100 CREATE TABLE date_partitioned_table_100 (id int, time date) PARTITION BY RANGE (time); CREATE TABLE date_partition_2007_100 (id int, time date ); -- now create the partitioning hierarcy SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public', referenced_shard:=100, referenced_schema_name:='public', command:='ALTER TABLE date_partitioned_table ATTACH PARTITION date_partition_2007 FOR VALUES FROM (''2007-01-01'') TO (''2008-01-02'')' ); -- the hierarcy is successfully created \d+ date_partitioned_table_100 -- Citus can also get the DDL events for the partitions as regular tables SELECT master_get_table_ddl_events('date_partition_2007_100'); -- now break the partitioning hierarcy SELECT worker_apply_inter_shard_ddl_command(referencing_shard:=100, referencing_schema_name:='public', referenced_shard:=100, referenced_schema_name:='public', command:='ALTER TABLE date_partitioned_table DETACH PARTITION date_partition_2007' ); -- the hierarcy is successfully broken \d+ date_partitioned_table_100 -- now lets have some more complex partitioning hierarcies with -- tables on different schemas and constraints on the tables CREATE SCHEMA partition_parent_schema; CREATE TABLE partition_parent_schema.parent_table (id int NOT NULL, time date DEFAULT now()) PARTITION BY RANGE (time); CREATE SCHEMA partition_child_1_schema; CREATE TABLE partition_child_1_schema.child_1 (id int NOT NULL, time date ); CREATE SCHEMA partition_child_2_schema; CREATE TABLE partition_child_2_schema.child_2 (id int NOT NULL, time date ); -- we should be able to get the partitioning information even if there are no partitions SELECT generate_partition_information('partition_parent_schema.parent_table'); -- we should be able to drop and re-create the partitioned table using the command that Citus generate SELECT drop_and_recreate_partitioned_table('partition_parent_schema.parent_table'); ALTER TABLE partition_parent_schema.parent_table ATTACH PARTITION partition_child_1_schema.child_1 FOR VALUES FROM ('2009-01-01') TO ('2010-01-02'); SET search_path = 'partition_parent_schema'; ALTER TABLE parent_table ATTACH PARTITION partition_child_2_schema.child_2 FOR VALUES FROM ('2006-01-01') TO ('2007-01-01'); SELECT public.generate_partition_information('parent_table'); -- lets get the attach partition commands SELECT public.generate_alter_table_attach_partition_command('partition_child_1_schema.child_1'); SET search_path = 'partition_child_2_schema'; SELECT public.generate_alter_table_attach_partition_command('child_2'); SET search_path = 'partition_parent_schema'; -- detach and attach the partition by the command generated by us \d+ parent_table SELECT public.detach_and_attach_partition('partition_child_1_schema.child_1', 'parent_table'); -- check that both partitions are visiable \d+ parent_table -- some very simple checks that should error out SELECT public.generate_alter_table_attach_partition_command('parent_table'); SELECT public.generate_partition_information('partition_child_1_schema.child_1'); SELECT public.print_partitions('partition_child_1_schema.child_1'); -- now pring the partitions SELECT public.print_partitions('parent_table'); SET search_path = 'public'; -- test multi column / expression partitioning with UNBOUNDED ranges CREATE OR REPLACE FUNCTION some_function(input_val text) RETURNS text LANGUAGE plpgsql IMMUTABLE AS $function$ BEGIN return reverse(input_val); END; $function$; CREATE TABLE multi_column_partitioned ( a int, b int, c text ) PARTITION BY RANGE (a, (a+b+1), some_function(upper(c))); CREATE TABLE multi_column_partition_1( a int, b int, c text ); CREATE TABLE multi_column_partition_2( a int, b int, c text ); -- partitioning information SELECT generate_partition_information('multi_column_partitioned'); SELECT master_get_table_ddl_events('multi_column_partitioned'); SELECT drop_and_recreate_partitioned_table('multi_column_partitioned'); -- partitions and their ranges ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_column_partition_1 FOR VALUES FROM (1, 10, '250') TO (1, 20, '250'); SELECT generate_alter_table_attach_partition_command('multi_column_partition_1'); ALTER TABLE multi_column_partitioned ATTACH PARTITION multi_column_partition_2 FOR VALUES FROM (10, 1000, '2500') TO (MAXVALUE, MAXVALUE, MAXVALUE); SELECT generate_alter_table_attach_partition_command('multi_column_partition_2'); SELECT generate_alter_table_detach_partition_command('multi_column_partition_2'); -- finally a test with LIST partitioning CREATE TABLE list_partitioned (col1 NUMERIC, col2 NUMERIC, col3 VARCHAR(10)) PARTITION BY LIST (col1) ; SELECT generate_partition_information('list_partitioned'); SELECT master_get_table_ddl_events('list_partitioned'); SELECT drop_and_recreate_partitioned_table('list_partitioned'); CREATE TABLE list_partitioned_1 PARTITION OF list_partitioned FOR VALUES IN (100, 101, 102, 103, 104); SELECT generate_alter_table_attach_partition_command('list_partitioned_1'); -- also differentiate partitions and inhereted tables CREATE TABLE cities ( name text, population float, altitude int -- in feet ); CREATE TABLE capitals ( state char(2) ) INHERITS (cities); -- returns true since capitals inherits from cities SELECT table_inherits('capitals'); -- although date_partition_2006 inherits from its parent -- returns false since the hierarcy is formed via partitioning SELECT table_inherits('date_partition_2006'); -- returns true since cities inherited by capitals SELECT table_inherited('cities'); -- although date_partitioned_table inherited by its partitions -- returns false since the hierarcy is formed via partitioning SELECT table_inherited('date_partitioned_table'); -- also these are not supported SELECT master_get_table_ddl_events('capitals'); SELECT master_get_table_ddl_events('cities'); -- dropping parents frop the partitions DROP TABLE date_partitioned_table, multi_column_partitioned, list_partitioned, partition_parent_schema.parent_table, cities, capitals; citus-7.0.3/src/test/regress/sql/multi_prepare_plsql.sql000066400000000000000000000357761317107136600235220ustar00rootroot00000000000000-- -- MULTI_PREPARE_PLSQL -- -- Many of the queries are taken from other regression test files -- and converted into both plain SQL and PL/pgsql functions, which -- use prepared statements internally. CREATE FUNCTION plpgsql_test_1() RETURNS TABLE(count bigint) AS $$ DECLARE BEGIN RETURN QUERY SELECT count(*) FROM orders; END; $$ LANGUAGE plpgsql; CREATE FUNCTION plpgsql_test_2() RETURNS TABLE(count bigint) AS $$ DECLARE BEGIN RETURN QUERY SELECT count(*) FROM orders, lineitem WHERE o_orderkey = l_orderkey; END; $$ LANGUAGE plpgsql; CREATE FUNCTION plpgsql_test_3() RETURNS TABLE(count bigint) AS $$ DECLARE BEGIN RETURN QUERY SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey; END; $$ LANGUAGE plpgsql; CREATE FUNCTION plpgsql_test_4() RETURNS TABLE(count bigint) AS $$ DECLARE BEGIN RETURN QUERY SELECT count(*) FROM orders, customer, lineitem WHERE o_custkey = c_custkey AND o_orderkey = l_orderkey; END; $$ LANGUAGE plpgsql; CREATE FUNCTION plpgsql_test_5() RETURNS TABLE(count bigint) AS $$ DECLARE BEGIN RETURN QUERY SELECT count(*) FROM lineitem, customer WHERE l_partkey = c_nationkey; END; $$ LANGUAGE plpgsql; CREATE FUNCTION plpgsql_test_6(int) RETURNS TABLE(count bigint) AS $$ DECLARE BEGIN RETURN QUERY SELECT count(*) FROM orders, lineitem WHERE o_orderkey = l_orderkey AND l_suppkey > $1; END; $$ LANGUAGE plpgsql; CREATE FUNCTION plpgsql_test_7(text, text) RETURNS TABLE(supp_natadsion text, cusasdt_nation text, l_yeasdar int, sasdaum double precision) AS $$ DECLARE BEGIN RETURN QUERY SELECT supp_nation::text, cust_nation::text, l_year::int, sum(volume)::double precision AS revenue FROM ( SELECT supp_nation, cust_nation, extract(year FROM l_shipdate) AS l_year, l_extendedprice * (1 - l_discount) AS volume FROM supplier, lineitem, orders, customer, ( SELECT n1.n_nationkey AS supp_nation_key, n2.n_nationkey AS cust_nation_key, n1.n_name AS supp_nation, n2.n_name AS cust_nation FROM nation n1, nation n2 WHERE ( (n1.n_name = $1 AND n2.n_name = $2) OR (n1.n_name = $2 AND n2.n_name = $1) ) ) AS temp WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = supp_nation_key AND c_nationkey = cust_nation_key AND l_shipdate between date '1995-01-01' AND date '1996-12-31' ) AS shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year; END; $$ LANGUAGE plpgsql; SET citus.task_executor_type TO 'task-tracker'; SET client_min_messages TO INFO; -- now, run PL/pgsql functions SELECT plpgsql_test_1(); SELECT plpgsql_test_2(); SELECT plpgsql_test_3(); SELECT plpgsql_test_4(); SELECT plpgsql_test_5(); -- run PL/pgsql functions with different parameters SELECT plpgsql_test_6(155); SELECT plpgsql_test_6(1555); SELECT plpgsql_test_7('UNITED KINGDOM', 'CHINA'); SELECT plpgsql_test_7('FRANCE', 'GERMANY'); -- now, PL/pgsql functions with random order SELECT plpgsql_test_6(155); SELECT plpgsql_test_3(); SELECT plpgsql_test_7('FRANCE', 'GERMANY'); SELECT plpgsql_test_5(); SELECT plpgsql_test_1(); SELECT plpgsql_test_6(1555); SELECT plpgsql_test_4(); SELECT plpgsql_test_7('UNITED KINGDOM', 'CHINA'); SELECT plpgsql_test_2(); -- run the tests which do not require re-partition -- with real-time executor SET citus.task_executor_type TO 'real-time'; -- now, run PL/pgsql functions SELECT plpgsql_test_1(); SELECT plpgsql_test_2(); -- run PL/pgsql functions with different parameters SELECT plpgsql_test_6(155); SELECT plpgsql_test_6(1555); -- test router executor parameterized PL/pgsql functions CREATE TABLE plpgsql_table ( key int, value int ); SELECT master_create_distributed_table('plpgsql_table','key','hash'); SELECT master_create_worker_shards('plpgsql_table',4,1); CREATE FUNCTION no_parameter_insert() RETURNS void as $$ BEGIN INSERT INTO plpgsql_table (key) VALUES (0); END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT no_parameter_insert(); SELECT no_parameter_insert(); SELECT no_parameter_insert(); SELECT no_parameter_insert(); SELECT no_parameter_insert(); SELECT no_parameter_insert(); CREATE FUNCTION single_parameter_insert(key_arg int) RETURNS void as $$ BEGIN INSERT INTO plpgsql_table (key) VALUES (key_arg); END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT single_parameter_insert(1); SELECT single_parameter_insert(2); SELECT single_parameter_insert(3); SELECT single_parameter_insert(4); SELECT single_parameter_insert(5); SELECT single_parameter_insert(6); CREATE FUNCTION double_parameter_insert(key_arg int, value_arg int) RETURNS void as $$ BEGIN INSERT INTO plpgsql_table (key, value) VALUES (key_arg, value_arg); END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT double_parameter_insert(1, 10); SELECT double_parameter_insert(2, 20); SELECT double_parameter_insert(3, 30); SELECT double_parameter_insert(4, 40); SELECT double_parameter_insert(5, 50); SELECT double_parameter_insert(6, 60); CREATE FUNCTION non_partition_parameter_insert(value_arg int) RETURNS void as $$ BEGIN INSERT INTO plpgsql_table (key, value) VALUES (0, value_arg); END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT non_partition_parameter_insert(10); SELECT non_partition_parameter_insert(20); SELECT non_partition_parameter_insert(30); SELECT non_partition_parameter_insert(40); SELECT non_partition_parameter_insert(50); SELECT non_partition_parameter_insert(60); -- check inserted values SELECT * FROM plpgsql_table ORDER BY key, value; -- check router executor select CREATE FUNCTION router_partition_column_select(key_arg int) RETURNS TABLE(key int, value int) AS $$ DECLARE BEGIN RETURN QUERY SELECT plpgsql_table.key, plpgsql_table.value FROM plpgsql_table WHERE plpgsql_table.key = key_arg ORDER BY key, value; END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT router_partition_column_select(1); SELECT router_partition_column_select(2); SELECT router_partition_column_select(3); SELECT router_partition_column_select(4); SELECT router_partition_column_select(5); SELECT router_partition_column_select(6); CREATE FUNCTION router_non_partition_column_select(value_arg int) RETURNS TABLE(key int, value int) AS $$ DECLARE BEGIN RETURN QUERY SELECT plpgsql_table.key, plpgsql_table.value FROM plpgsql_table WHERE plpgsql_table.key = 0 AND plpgsql_table.value = value_arg ORDER BY key, value; END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT router_non_partition_column_select(10); SELECT router_non_partition_column_select(20); SELECT router_non_partition_column_select(30); SELECT router_non_partition_column_select(40); SELECT router_non_partition_column_select(50); SELECT router_non_partition_column_select(60); -- check real-time executor CREATE FUNCTION real_time_non_partition_column_select(value_arg int) RETURNS TABLE(key int, value int) AS $$ DECLARE BEGIN RETURN QUERY SELECT plpgsql_table.key, plpgsql_table.value FROM plpgsql_table WHERE plpgsql_table.value = value_arg ORDER BY key, value; END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT real_time_non_partition_column_select(10); SELECT real_time_non_partition_column_select(20); SELECT real_time_non_partition_column_select(30); SELECT real_time_non_partition_column_select(40); SELECT real_time_non_partition_column_select(50); SELECT real_time_non_partition_column_select(60); CREATE FUNCTION real_time_partition_column_select(key_arg int) RETURNS TABLE(key int, value int) AS $$ DECLARE BEGIN RETURN QUERY SELECT plpgsql_table.key, plpgsql_table.value FROM plpgsql_table WHERE plpgsql_table.key = key_arg OR plpgsql_table.value = 10 ORDER BY key, value; END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT real_time_partition_column_select(1); SELECT real_time_partition_column_select(2); SELECT real_time_partition_column_select(3); SELECT real_time_partition_column_select(4); SELECT real_time_partition_column_select(5); SELECT real_time_partition_column_select(6); -- check task-tracker executor SET citus.task_executor_type TO 'task-tracker'; CREATE FUNCTION task_tracker_non_partition_column_select(value_arg int) RETURNS TABLE(key int, value int) AS $$ DECLARE BEGIN RETURN QUERY SELECT plpgsql_table.key, plpgsql_table.value FROM plpgsql_table WHERE plpgsql_table.value = value_arg ORDER BY key, value; END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT task_tracker_non_partition_column_select(10); SELECT task_tracker_non_partition_column_select(20); SELECT task_tracker_non_partition_column_select(30); SELECT task_tracker_non_partition_column_select(40); SELECT task_tracker_non_partition_column_select(50); SELECT real_time_non_partition_column_select(60); CREATE FUNCTION task_tracker_partition_column_select(key_arg int) RETURNS TABLE(key int, value int) AS $$ DECLARE BEGIN RETURN QUERY SELECT plpgsql_table.key, plpgsql_table.value FROM plpgsql_table WHERE plpgsql_table.key = key_arg OR plpgsql_table.value = 10 ORDER BY key, value; END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT task_tracker_partition_column_select(1); SELECT task_tracker_partition_column_select(2); SELECT task_tracker_partition_column_select(3); SELECT task_tracker_partition_column_select(4); SELECT task_tracker_partition_column_select(5); SELECT task_tracker_partition_column_select(6); SET citus.task_executor_type TO 'real-time'; -- check updates CREATE FUNCTION partition_parameter_update(int, int) RETURNS void as $$ BEGIN UPDATE plpgsql_table SET value = $2 WHERE key = $1; END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT partition_parameter_update(1, 11); SELECT partition_parameter_update(2, 21); SELECT partition_parameter_update(3, 31); SELECT partition_parameter_update(4, 41); SELECT partition_parameter_update(5, 51); SELECT partition_parameter_update(6, 61); CREATE FUNCTION non_partition_parameter_update(int, int) RETURNS void as $$ BEGIN UPDATE plpgsql_table SET value = $2 WHERE key = 0 AND value = $1; END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT non_partition_parameter_update(10, 12); SELECT non_partition_parameter_update(20, 22); SELECT non_partition_parameter_update(30, 32); SELECT non_partition_parameter_update(40, 42); SELECT non_partition_parameter_update(50, 52); SELECT non_partition_parameter_update(60, 62); -- check table after updates SELECT * FROM plpgsql_table ORDER BY key, value; -- check deletes CREATE FUNCTION partition_parameter_delete(int, int) RETURNS void as $$ BEGIN DELETE FROM plpgsql_table WHERE key = $1 AND value = $2; END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT partition_parameter_delete(1, 11); SELECT partition_parameter_delete(2, 21); SELECT partition_parameter_delete(3, 31); SELECT partition_parameter_delete(4, 41); SELECT partition_parameter_delete(5, 51); SELECT partition_parameter_delete(6, 61); CREATE FUNCTION non_partition_parameter_delete(int) RETURNS void as $$ BEGIN DELETE FROM plpgsql_table WHERE key = 0 AND value = $1; END; $$ LANGUAGE plpgsql; -- execute 6 times to trigger prepared statement usage SELECT non_partition_parameter_delete(12); SELECT non_partition_parameter_delete(22); SELECT non_partition_parameter_delete(32); SELECT non_partition_parameter_delete(42); SELECT non_partition_parameter_delete(52); SELECT non_partition_parameter_delete(62); -- check table after deletes SELECT * FROM plpgsql_table ORDER BY key, value; -- check whether we can handle execute parameters CREATE TABLE execute_parameter_test (key int, val date); SELECT create_distributed_table('execute_parameter_test', 'key'); DO $$ BEGIN EXECUTE 'INSERT INTO execute_parameter_test VALUES (3, $1)' USING date '2000-01-01'; EXECUTE 'INSERT INTO execute_parameter_test VALUES (3, $1)' USING NULL::date; END; $$; DROP TABLE execute_parameter_test; -- check whether we can handle parameters + default CREATE TABLE func_parameter_test ( key text NOT NULL, seq int4 NOT NULL, created_at timestamptz NOT NULL DEFAULT now(), updated_at timestamptz NOT NULL DEFAULT now(), PRIMARY KEY (key, seq) ); SELECT create_distributed_table('func_parameter_test', 'key'); CREATE OR REPLACE FUNCTION insert_with_max(pkey text) RETURNS VOID AS $BODY$ DECLARE max_seq int4; BEGIN SELECT MAX(seq) INTO max_seq FROM func_parameter_test WHERE func_parameter_test.key = pkey; IF max_seq IS NULL THEN max_seq := 0; END IF; INSERT INTO func_parameter_test(key, seq) VALUES (pkey, max_seq + 1); END; $BODY$ LANGUAGE plpgsql; SELECT insert_with_max('key'); SELECT insert_with_max('key'); SELECT insert_with_max('key'); SELECT insert_with_max('key'); SELECT insert_with_max('key'); SELECT insert_with_max('key'); SELECT key, seq FROM func_parameter_test ORDER BY seq; DROP FUNCTION insert_with_max(text); DROP TABLE func_parameter_test; -- test prepared DDL, mainly to verify we don't mess up the query tree SET citus.multi_shard_commit_protocol TO '2pc'; CREATE TABLE prepare_ddl (x int, y int); SELECT create_distributed_table('prepare_ddl', 'x'); CREATE OR REPLACE FUNCTION ddl_in_plpgsql() RETURNS VOID AS $BODY$ BEGIN CREATE INDEX prepared_index ON public.prepare_ddl(x); DROP INDEX prepared_index; END; $BODY$ LANGUAGE plpgsql; SELECT ddl_in_plpgsql(); SELECT ddl_in_plpgsql(); -- test prepared COPY CREATE OR REPLACE FUNCTION copy_in_plpgsql() RETURNS VOID AS $BODY$ BEGIN COPY prepare_ddl (x) FROM PROGRAM 'echo 1' WITH CSV; END; $BODY$ LANGUAGE plpgsql; SELECT copy_in_plpgsql(); SELECT copy_in_plpgsql(); DROP FUNCTION ddl_in_plpgsql(); DROP FUNCTION copy_in_plpgsql(); DROP TABLE prepare_ddl; -- clean-up functions DROP FUNCTION plpgsql_test_1(); DROP FUNCTION plpgsql_test_2(); DROP FUNCTION plpgsql_test_3(); DROP FUNCTION plpgsql_test_4(); DROP FUNCTION plpgsql_test_5(); DROP FUNCTION plpgsql_test_6(int); DROP FUNCTION plpgsql_test_7(text, text); DROP FUNCTION no_parameter_insert(); DROP FUNCTION single_parameter_insert(int); DROP FUNCTION double_parameter_insert(int, int); DROP FUNCTION non_partition_parameter_insert(int); DROP FUNCTION router_partition_column_select(int); DROP FUNCTION router_non_partition_column_select(int); DROP FUNCTION real_time_non_partition_column_select(int); DROP FUNCTION real_time_partition_column_select(int); DROP FUNCTION task_tracker_non_partition_column_select(int); DROP FUNCTION task_tracker_partition_column_select(int); DROP FUNCTION partition_parameter_update(int, int); DROP FUNCTION non_partition_parameter_update(int, int); DROP FUNCTION partition_parameter_delete(int, int); DROP FUNCTION non_partition_parameter_delete(int); citus-7.0.3/src/test/regress/sql/multi_prepare_sql.sql000066400000000000000000000452671317107136600231620ustar00rootroot00000000000000-- -- MULTI_PREPARE_SQL -- -- Tests covering PREPARE statements. Many of the queries are -- taken from other regression test files and converted into -- prepared statements. PREPARE prepared_test_1 AS SELECT count(*) FROM orders; PREPARE prepared_test_2 AS SELECT count(*) FROM orders, lineitem WHERE o_orderkey = l_orderkey; PREPARE prepared_test_3 AS SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey; PREPARE prepared_test_4 AS SELECT count(*) FROM orders, customer, lineitem WHERE o_custkey = c_custkey AND o_orderkey = l_orderkey; PREPARE prepared_test_5 AS SELECT count(*) FROM lineitem, customer WHERE l_partkey = c_nationkey; PREPARE prepared_test_6(int) AS SELECT count(*) FROM orders, lineitem WHERE o_orderkey = l_orderkey AND l_suppkey > $1; PREPARE prepared_test_7(text, text) AS SELECT supp_nation, cust_nation, l_year, sum(volume) AS revenue FROM ( SELECT supp_nation, cust_nation, extract(year FROM l_shipdate) AS l_year, l_extendedprice * (1 - l_discount) AS volume FROM supplier, lineitem, orders, customer, ( SELECT n1.n_nationkey AS supp_nation_key, n2.n_nationkey AS cust_nation_key, n1.n_name AS supp_nation, n2.n_name AS cust_nation FROM nation n1, nation n2 WHERE ( (n1.n_name = $1 AND n2.n_name = $2) OR (n1.n_name = $2 AND n2.n_name = $1) ) ) AS temp WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = supp_nation_key AND c_nationkey = cust_nation_key AND l_shipdate between date '1995-01-01' AND date '1996-12-31' ) AS shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year; SET citus.task_executor_type TO 'task-tracker'; SET client_min_messages TO INFO; -- execute prepared statements EXECUTE prepared_test_1; EXECUTE prepared_test_2; EXECUTE prepared_test_3; EXECUTE prepared_test_4; EXECUTE prepared_test_5; -- execute prepared statements with different parameters EXECUTE prepared_test_6(155); EXECUTE prepared_test_6(1555); EXECUTE prepared_test_7('UNITED KINGDOM', 'CHINA'); EXECUTE prepared_test_7('FRANCE', 'GERMANY'); -- now, execute prepared statements with random order EXECUTE prepared_test_6(155); EXECUTE prepared_test_3; EXECUTE prepared_test_7('FRANCE', 'GERMANY'); EXECUTE prepared_test_5; EXECUTE prepared_test_1; EXECUTE prepared_test_6(1555); EXECUTE prepared_test_4; EXECUTE prepared_test_7('UNITED KINGDOM', 'CHINA'); EXECUTE prepared_test_2; -- CREATE TABLE ... AS EXECUTE prepared_statement tests CREATE TEMP TABLE prepared_sql_test_7 AS EXECUTE prepared_test_7('UNITED KINGDOM', 'CHINA'); SELECT * from prepared_sql_test_7; -- now, run some of the tests with real-time executor SET citus.task_executor_type TO 'real-time'; -- execute prepared statements EXECUTE prepared_test_1; EXECUTE prepared_test_2; -- execute prepared statements with different parameters EXECUTE prepared_test_6(155); EXECUTE prepared_test_6(1555); -- test router executor with parameterized non-partition columns -- create a custom type which also exists on worker nodes CREATE TYPE test_composite_type AS ( i integer, i2 integer ); CREATE TABLE router_executor_table ( id bigint NOT NULL, comment varchar(20), stats test_composite_type ); SELECT master_create_distributed_table('router_executor_table', 'id', 'hash'); SELECT master_create_worker_shards('router_executor_table', 2, 2); -- test parameterized inserts PREPARE prepared_insert(varchar(20)) AS INSERT INTO router_executor_table VALUES (1, $1, $2); EXECUTE prepared_insert('comment-1', '(1, 10)'); EXECUTE prepared_insert('comment-2', '(2, 20)'); EXECUTE prepared_insert('comment-3', '(3, 30)'); EXECUTE prepared_insert('comment-4', '(4, 40)'); EXECUTE prepared_insert('comment-5', '(5, 50)'); EXECUTE prepared_insert('comment-6', '(6, 60)'); SELECT * FROM router_executor_table ORDER BY comment; -- test parameterized selects PREPARE prepared_select(integer, integer) AS SELECT count(*) FROM router_executor_table WHERE id = 1 AND stats = ROW($1, $2)::test_composite_type; EXECUTE prepared_select(1, 10); EXECUTE prepared_select(2, 20); EXECUTE prepared_select(3, 30); EXECUTE prepared_select(4, 40); EXECUTE prepared_select(5, 50); EXECUTE prepared_select(6, 60); -- Test that parameterized partition column for an insert is supported PREPARE prepared_partition_column_insert(bigint) AS INSERT INTO router_executor_table VALUES ($1, 'arsenous', '(1,10)'); -- execute 6 times to trigger prepared statement usage EXECUTE prepared_partition_column_insert(1); EXECUTE prepared_partition_column_insert(2); EXECUTE prepared_partition_column_insert(3); EXECUTE prepared_partition_column_insert(4); EXECUTE prepared_partition_column_insert(5); EXECUTE prepared_partition_column_insert(6); DROP TYPE test_composite_type CASCADE; -- test router executor with prepare statements CREATE TABLE prepare_table ( key int, value int ); SELECT master_create_distributed_table('prepare_table','key','hash'); SELECT master_create_worker_shards('prepare_table',4,1); PREPARE prepared_no_parameter_insert AS INSERT INTO prepare_table (key) VALUES (0); -- execute 6 times to trigger prepared statement usage EXECUTE prepared_no_parameter_insert; EXECUTE prepared_no_parameter_insert; EXECUTE prepared_no_parameter_insert; EXECUTE prepared_no_parameter_insert; EXECUTE prepared_no_parameter_insert; EXECUTE prepared_no_parameter_insert; PREPARE prepared_single_parameter_insert(int) AS INSERT INTO prepare_table (key) VALUES ($1); -- execute 6 times to trigger prepared statement usage EXECUTE prepared_single_parameter_insert(1); EXECUTE prepared_single_parameter_insert(2); EXECUTE prepared_single_parameter_insert(3); EXECUTE prepared_single_parameter_insert(4); EXECUTE prepared_single_parameter_insert(5); EXECUTE prepared_single_parameter_insert(6); PREPARE prepared_double_parameter_insert(int, int) AS INSERT INTO prepare_table (key, value) VALUES ($1, $2); -- execute 6 times to trigger prepared statement usage EXECUTE prepared_double_parameter_insert(1, 10); EXECUTE prepared_double_parameter_insert(2, 20); EXECUTE prepared_double_parameter_insert(3, 30); EXECUTE prepared_double_parameter_insert(4, 40); EXECUTE prepared_double_parameter_insert(5, 50); EXECUTE prepared_double_parameter_insert(6, 60); PREPARE prepared_multi_insert(int, int) AS INSERT INTO prepare_table (key, value) VALUES ($1, $2), ($1 + 1, $2 + 10); -- execute 6 times to trigger prepared statement usage EXECUTE prepared_multi_insert( 7, 70); EXECUTE prepared_multi_insert( 9, 90); EXECUTE prepared_multi_insert(11, 110); EXECUTE prepared_multi_insert(13, 130); EXECUTE prepared_multi_insert(15, 150); EXECUTE prepared_multi_insert(17, 170); PREPARE prepared_non_partition_parameter_insert(int) AS INSERT INTO prepare_table (key, value) VALUES (0, $1); -- execute 6 times to trigger prepared statement usage EXECUTE prepared_non_partition_parameter_insert(10); EXECUTE prepared_non_partition_parameter_insert(20); EXECUTE prepared_non_partition_parameter_insert(30); EXECUTE prepared_non_partition_parameter_insert(40); EXECUTE prepared_non_partition_parameter_insert(50); EXECUTE prepared_non_partition_parameter_insert(60); -- check inserted values SELECT * FROM prepare_table ORDER BY key, value; SELECT master_modify_multiple_shards('DELETE FROM prepare_table WHERE value >= 70'); -- check router executor select PREPARE prepared_router_partition_column_select(int) AS SELECT prepare_table.key, prepare_table.value FROM prepare_table WHERE prepare_table.key = $1 ORDER BY key, value; EXECUTE prepared_router_partition_column_select(1); EXECUTE prepared_router_partition_column_select(2); EXECUTE prepared_router_partition_column_select(3); EXECUTE prepared_router_partition_column_select(4); EXECUTE prepared_router_partition_column_select(5); EXECUTE prepared_router_partition_column_select(6); PREPARE prepared_router_non_partition_column_select(int) AS SELECT prepare_table.key, prepare_table.value FROM prepare_table WHERE prepare_table.key = 0 AND prepare_table.value = $1 ORDER BY key, value; EXECUTE prepared_router_non_partition_column_select(10); EXECUTE prepared_router_non_partition_column_select(20); EXECUTE prepared_router_non_partition_column_select(30); EXECUTE prepared_router_non_partition_column_select(40); EXECUTE prepared_router_non_partition_column_select(50); EXECUTE prepared_router_non_partition_column_select(60); -- check real-time executor PREPARE prepared_real_time_non_partition_column_select(int) AS SELECT prepare_table.key, prepare_table.value FROM prepare_table WHERE prepare_table.value = $1 ORDER BY key, value; EXECUTE prepared_real_time_non_partition_column_select(10); EXECUTE prepared_real_time_non_partition_column_select(20); EXECUTE prepared_real_time_non_partition_column_select(30); EXECUTE prepared_real_time_non_partition_column_select(40); EXECUTE prepared_real_time_non_partition_column_select(50); EXECUTE prepared_real_time_non_partition_column_select(60); PREPARE prepared_real_time_partition_column_select(int) AS SELECT prepare_table.key, prepare_table.value FROM prepare_table WHERE prepare_table.key = $1 OR prepare_table.value = 10 ORDER BY key, value; EXECUTE prepared_real_time_partition_column_select(1); EXECUTE prepared_real_time_partition_column_select(2); EXECUTE prepared_real_time_partition_column_select(3); EXECUTE prepared_real_time_partition_column_select(4); EXECUTE prepared_real_time_partition_column_select(5); EXECUTE prepared_real_time_partition_column_select(6); -- check task-tracker executor SET citus.task_executor_type TO 'task-tracker'; PREPARE prepared_task_tracker_non_partition_column_select(int) AS SELECT prepare_table.key, prepare_table.value FROM prepare_table WHERE prepare_table.value = $1 ORDER BY key, value; EXECUTE prepared_task_tracker_non_partition_column_select(10); EXECUTE prepared_task_tracker_non_partition_column_select(20); EXECUTE prepared_task_tracker_non_partition_column_select(30); EXECUTE prepared_task_tracker_non_partition_column_select(40); EXECUTE prepared_task_tracker_non_partition_column_select(50); EXECUTE prepared_task_tracker_non_partition_column_select(60); PREPARE prepared_task_tracker_partition_column_select(int) AS SELECT prepare_table.key, prepare_table.value FROM prepare_table WHERE prepare_table.key = $1 OR prepare_table.value = 10 ORDER BY key, value; EXECUTE prepared_task_tracker_partition_column_select(1); EXECUTE prepared_task_tracker_partition_column_select(2); EXECUTE prepared_task_tracker_partition_column_select(3); EXECUTE prepared_task_tracker_partition_column_select(4); EXECUTE prepared_task_tracker_partition_column_select(5); EXECUTE prepared_task_tracker_partition_column_select(6); SET citus.task_executor_type TO 'real-time'; -- check updates PREPARE prepared_partition_parameter_update(int, int) AS UPDATE prepare_table SET value = $2 WHERE key = $1; -- execute 6 times to trigger prepared statement usage EXECUTE prepared_partition_parameter_update(1, 11); EXECUTE prepared_partition_parameter_update(2, 21); EXECUTE prepared_partition_parameter_update(3, 31); EXECUTE prepared_partition_parameter_update(4, 41); EXECUTE prepared_partition_parameter_update(5, 51); EXECUTE prepared_partition_parameter_update(6, 61); PREPARE prepared_non_partition_parameter_update(int, int) AS UPDATE prepare_table SET value = $2 WHERE key = 0 AND value = $1; -- execute 6 times to trigger prepared statement usage EXECUTE prepared_non_partition_parameter_update(10, 12); EXECUTE prepared_non_partition_parameter_update(20, 22); EXECUTE prepared_non_partition_parameter_update(30, 32); EXECUTE prepared_non_partition_parameter_update(40, 42); EXECUTE prepared_non_partition_parameter_update(50, 52); EXECUTE prepared_non_partition_parameter_update(60, 62); -- check after updates SELECT * FROM prepare_table ORDER BY key, value; -- check deletes PREPARE prepared_partition_parameter_delete(int, int) AS DELETE FROM prepare_table WHERE key = $1 AND value = $2; EXECUTE prepared_partition_parameter_delete(1, 11); EXECUTE prepared_partition_parameter_delete(2, 21); EXECUTE prepared_partition_parameter_delete(3, 31); EXECUTE prepared_partition_parameter_delete(4, 41); EXECUTE prepared_partition_parameter_delete(5, 51); EXECUTE prepared_partition_parameter_delete(6, 61); PREPARE prepared_non_partition_parameter_delete(int) AS DELETE FROM prepare_table WHERE key = 0 AND value = $1; -- execute 6 times to trigger prepared statement usage EXECUTE prepared_non_partition_parameter_delete(12); EXECUTE prepared_non_partition_parameter_delete(22); EXECUTE prepared_non_partition_parameter_delete(32); EXECUTE prepared_non_partition_parameter_delete(42); EXECUTE prepared_non_partition_parameter_delete(52); EXECUTE prepared_non_partition_parameter_delete(62); -- check after deletes SELECT * FROM prepare_table ORDER BY key, value; -- Testing parameters + function evaluation CREATE TABLE prepare_func_table ( key text, value1 int, value2 text, value3 timestamptz DEFAULT now() ); SELECT create_distributed_table('prepare_func_table', 'key'); -- test function evaluation with parameters in an expression PREPARE prepared_function_evaluation_insert(int) AS INSERT INTO prepare_func_table (key, value1) VALUES ($1+1, 0*random()); -- execute 6 times to trigger prepared statement usage EXECUTE prepared_function_evaluation_insert(1); EXECUTE prepared_function_evaluation_insert(2); EXECUTE prepared_function_evaluation_insert(3); EXECUTE prepared_function_evaluation_insert(4); EXECUTE prepared_function_evaluation_insert(5); EXECUTE prepared_function_evaluation_insert(6); SELECT key, value1 FROM prepare_func_table ORDER BY key; TRUNCATE prepare_func_table; -- make it a bit harder: parameter wrapped in a function call PREPARE wrapped_parameter_evaluation(text,text[]) AS INSERT INTO prepare_func_table (key,value2) VALUES ($1,array_to_string($2,'')); EXECUTE wrapped_parameter_evaluation('key', ARRAY['value']); EXECUTE wrapped_parameter_evaluation('key', ARRAY['value']); EXECUTE wrapped_parameter_evaluation('key', ARRAY['value']); EXECUTE wrapped_parameter_evaluation('key', ARRAY['value']); EXECUTE wrapped_parameter_evaluation('key', ARRAY['value']); EXECUTE wrapped_parameter_evaluation('key', ARRAY['value']); SELECT key, value2 FROM prepare_func_table; DROP TABLE prepare_func_table; -- Text columns can give issues when there is an implicit cast from varchar CREATE TABLE text_partition_column_table ( key text NOT NULL, value int ); SELECT create_distributed_table('text_partition_column_table', 'key'); PREPARE prepared_relabel_insert(varchar) AS INSERT INTO text_partition_column_table VALUES ($1, 1); EXECUTE prepared_relabel_insert('test'); EXECUTE prepared_relabel_insert('test'); EXECUTE prepared_relabel_insert('test'); EXECUTE prepared_relabel_insert('test'); EXECUTE prepared_relabel_insert('test'); EXECUTE prepared_relabel_insert('test'); SELECT key, value FROM text_partition_column_table ORDER BY key; DROP TABLE text_partition_column_table; -- Domain type columns can give issues CREATE DOMAIN test_key AS text CHECK(VALUE ~ '^test-\d$'); SELECT run_command_on_workers($$ CREATE DOMAIN test_key AS text CHECK(VALUE ~ '^test-\d$') $$); CREATE TABLE domain_partition_column_table ( key test_key NOT NULL, value int ); SELECT create_distributed_table('domain_partition_column_table', 'key'); PREPARE prepared_coercion_to_domain_insert(text) AS INSERT INTO domain_partition_column_table VALUES ($1, 1); EXECUTE prepared_coercion_to_domain_insert('test-1'); EXECUTE prepared_coercion_to_domain_insert('test-2'); EXECUTE prepared_coercion_to_domain_insert('test-3'); EXECUTE prepared_coercion_to_domain_insert('test-4'); EXECUTE prepared_coercion_to_domain_insert('test-5'); EXECUTE prepared_coercion_to_domain_insert('test-6'); SELECT key, value FROM domain_partition_column_table ORDER BY key; DROP TABLE domain_partition_column_table; -- verify we re-evaluate volatile functions every time CREATE TABLE http_request ( site_id INT, ingest_time TIMESTAMPTZ DEFAULT now(), url TEXT, request_country TEXT, ip_address TEXT, status_code INT, response_time_msec INT ); SELECT create_distributed_table('http_request', 'site_id'); PREPARE FOO AS INSERT INTO http_request ( site_id, ingest_time, url, request_country, ip_address, status_code, response_time_msec ) VALUES ( 1, clock_timestamp(), 'http://example.com/path', 'USA', inet '88.250.10.123', 200, 10 ); EXECUTE foo; EXECUTE foo; EXECUTE foo; EXECUTE foo; EXECUTE foo; EXECUTE foo; SELECT count(distinct ingest_time) FROM http_request WHERE site_id = 1; DROP TABLE http_request; -- verify placement state updates invalidate shard state -- -- We use a immutable function to check for that. The planner will -- evaluate it once during planning, during execution it should never -- be reached (no rows). That way we'll see a NOTICE when -- (re-)planning, but not when executing. -- first create helper function CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$; \c - - - :worker_1_port CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$; \c - - - :worker_2_port CREATE OR REPLACE FUNCTION immutable_bleat(text) RETURNS int LANGUAGE plpgsql IMMUTABLE AS $$BEGIN RAISE NOTICE '%', $1;RETURN 1;END$$; \c - - - :master_port -- test table CREATE TABLE test_table (test_id integer NOT NULL, data text); SELECT master_create_distributed_table('test_table', 'test_id', 'hash'); SELECT master_create_worker_shards('test_table', 2, 2); -- avoid 9.6+ only context messages \set VERBOSITY terse --plain statement, needs planning SELECT count(*) FROM test_table HAVING COUNT(*) = immutable_bleat('replanning'); --prepared statement PREPARE countsome AS SELECT count(*) FROM test_table HAVING COUNT(*) = immutable_bleat('replanning'); EXECUTE countsome; -- should indicate planning EXECUTE countsome; -- no replanning -- invalidate half of the placements using SQL, should invalidate via trigger UPDATE pg_dist_shard_placement SET shardstate = '3' WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_table'::regclass) AND nodeport = :worker_1_port; EXECUTE countsome; -- should indicate replanning EXECUTE countsome; -- no replanning -- repair shards, should invalidate via master_metadata_utility.c SELECT master_copy_shard_placement(shardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port) FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'test_table'::regclass) AND nodeport = :worker_1_port; EXECUTE countsome; -- should indicate replanning EXECUTE countsome; -- no replanning -- reset \set VERBOSITY default -- clean-up prepared statements DEALLOCATE ALL; DROP TABLE prepare_table; citus-7.0.3/src/test/regress/sql/multi_prune_shard_list.sql000066400000000000000000000111621317107136600241750ustar00rootroot00000000000000 ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 800000; -- =================================================================== -- create test functions -- =================================================================== CREATE FUNCTION prune_using_no_values(regclass) RETURNS text[] AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION prune_using_single_value(regclass, text) RETURNS text[] AS 'citus' LANGUAGE C; CREATE FUNCTION prune_using_either_value(regclass, text, text) RETURNS text[] AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION prune_using_both_values(regclass, text, text) RETURNS text[] AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION debug_equality_expression(regclass) RETURNS cstring AS 'citus' LANGUAGE C STRICT; CREATE FUNCTION print_sorted_shard_intervals(regclass) RETURNS text[] AS 'citus' LANGUAGE C STRICT; -- =================================================================== -- test shard pruning functionality -- =================================================================== -- create distributed table observe shard pruning CREATE TABLE pruning ( species text, last_pruned date, plant_id integer ); SELECT master_create_distributed_table('pruning', 'species', 'hash'); -- create worker shards SELECT master_create_worker_shards('pruning', 4, 1); -- with no values, expect all shards SELECT prune_using_no_values('pruning'); -- with a single value, expect a single shard SELECT prune_using_single_value('pruning', 'tomato'); -- null values should result in no pruning SELECT prune_using_single_value('pruning', NULL); -- build an OR clause and expect more than one sahrd SELECT prune_using_either_value('pruning', 'tomato', 'petunia'); -- an AND clause with values on different shards returns no shards SELECT prune_using_both_values('pruning', 'tomato', 'petunia'); -- even if both values are on the same shard, a value can't be equal to two others SELECT prune_using_both_values('pruning', 'tomato', 'rose'); -- unit test of the equality expression generation code SELECT debug_equality_expression('pruning'); -- print the initial ordering of shard intervals SELECT print_sorted_shard_intervals('pruning'); -- update only min value for one shard UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 103071; SELECT print_sorted_shard_intervals('pruning'); -- now lets have one more shard without min/max values UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 103072; SELECT print_sorted_shard_intervals('pruning'); -- now lets have one more shard without min/max values UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 103070; SELECT print_sorted_shard_intervals('pruning'); -- all shard placements are uninitialized UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 103073; SELECT print_sorted_shard_intervals('pruning'); -- create range distributed table observe shard pruning CREATE TABLE pruning_range ( species text, last_pruned date, plant_id integer ); SELECT master_create_distributed_table('pruning_range', 'species', 'range'); -- create worker shards SELECT master_create_empty_shard('pruning_range'); SELECT master_create_empty_shard('pruning_range'); SELECT master_create_empty_shard('pruning_range'); SELECT master_create_empty_shard('pruning_range'); -- now the comparison is done via the partition column type, which is text UPDATE pg_dist_shard SET shardminvalue = 'a', shardmaxvalue = 'b' WHERE shardid = 103074; UPDATE pg_dist_shard SET shardminvalue = 'c', shardmaxvalue = 'd' WHERE shardid = 103075; UPDATE pg_dist_shard SET shardminvalue = 'e', shardmaxvalue = 'f' WHERE shardid = 103076; UPDATE pg_dist_shard SET shardminvalue = 'g', shardmaxvalue = 'h' WHERE shardid = 103077; -- print the ordering of shard intervals with range partitioning as well SELECT print_sorted_shard_intervals('pruning_range'); -- update only min value for one shard UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 103075; SELECT print_sorted_shard_intervals('pruning_range'); -- now lets have one more shard without min/max values UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 103076; SELECT print_sorted_shard_intervals('pruning_range'); -- now lets have one more shard without min/max values UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 103074; SELECT print_sorted_shard_intervals('pruning_range'); -- all shard placements are uninitialized UPDATE pg_dist_shard set shardminvalue = NULL, shardmaxvalue = NULL WHERE shardid = 103077; SELECT print_sorted_shard_intervals('pruning_range'); citus-7.0.3/src/test/regress/sql/multi_query_directory_cleanup.sql000066400000000000000000000073731317107136600256010ustar00rootroot00000000000000-- -- MULTI_QUERY_DIRECTORY_CLEANUP -- -- We execute sub-queries on worker nodes, and copy query results to a directory -- on the master node for final processing. When the query completes or fails, -- the resource owner should automatically clean up these intermediate query -- result files. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 810000; SET citus.enable_unique_job_ids TO off; BEGIN; -- pg_ls_dir() displays jobids. We explicitly set the jobId sequence -- here so that the regression output becomes independent of the -- number of jobs executed prior to running this test. SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; SELECT pg_ls_dir('base/pgsql_job_cache'); COMMIT; SELECT pg_ls_dir('base/pgsql_job_cache'); BEGIN; SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; SELECT pg_ls_dir('base/pgsql_job_cache'); ROLLBACK; SELECT pg_ls_dir('base/pgsql_job_cache'); -- Test that multiple job directories are all cleaned up correctly, -- both individually (by closing a cursor) and in bulk when ending a -- transaction. BEGIN; DECLARE c_00 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_00; DECLARE c_01 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_01; DECLARE c_02 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_02; DECLARE c_03 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_03; DECLARE c_04 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_04; DECLARE c_05 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_05; DECLARE c_06 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_06; DECLARE c_07 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_07; DECLARE c_08 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_08; DECLARE c_09 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_09; DECLARE c_10 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_10; DECLARE c_11 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_11; DECLARE c_12 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_12; DECLARE c_13 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_13; DECLARE c_14 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_14; DECLARE c_15 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_15; DECLARE c_16 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_16; DECLARE c_17 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_17; DECLARE c_18 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_18; DECLARE c_19 CURSOR FOR SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem; FETCH 1 FROM c_19; SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; -- close first, 17th (first after re-alloc) and last cursor. CLOSE c_00; CLOSE c_16; CLOSE c_19; SELECT * FROM pg_ls_dir('base/pgsql_job_cache') f ORDER BY f; ROLLBACK; SELECT pg_ls_dir('base/pgsql_job_cache'); citus-7.0.3/src/test/regress/sql/multi_read_from_secondaries.sql000066400000000000000000000024301317107136600251430ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1600000; \c "dbname=regression options='-c\ citus.use_secondary_nodes=always'" CREATE TABLE dest_table (a int, b int); CREATE TABLE source_table (a int, b int); -- attempts to change metadata should fail while reading from secondaries SELECT create_distributed_table('dest_table', 'a'); \c "dbname=regression options='-c\ citus.use_secondary_nodes=never'" SELECT create_distributed_table('dest_table', 'a'); SELECT create_distributed_table('source_table', 'a'); INSERT INTO dest_table (a, b) VALUES (1, 1); INSERT INTO dest_table (a, b) VALUES (2, 1); INSERT INTO source_table (a, b) VALUES (10, 10); -- simluate actually having secondary nodes SELECT * FROM pg_dist_node; UPDATE pg_dist_node SET noderole = 'secondary'; \c "dbname=regression options='-c\ citus.use_secondary_nodes=always'" -- inserts are disallowed INSERT INTO dest_table (a, b) VALUES (1, 2); -- router selects are allowed SELECT a FROM dest_table WHERE a = 1; -- real-time selects are also allowed SELECT a FROM dest_table; -- insert into is definitely not allowed INSERT INTO dest_table (a, b) SELECT a, b FROM source_table; \c "dbname=regression options='-c\ citus.use_secondary_nodes=never'" UPDATE pg_dist_node SET noderole = 'primary'; DROP TABLE dest_table; citus-7.0.3/src/test/regress/sql/multi_reference_table.sql000066400000000000000000000660371317107136600237500ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1250000; CREATE TABLE reference_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp); -- insert some data, and make sure that cannot be create_distributed_table INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); -- create the reference table SELECT create_reference_table('reference_table_test'); -- see that partkey is NULL SELECT partmethod, (partkey IS NULL) as partkeyisnull, repmodel FROM pg_dist_partition WHERE logicalrelid = 'reference_table_test'::regclass; -- now see that shard min/max values are NULL SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'reference_table_test'::regclass; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'reference_table_test'::regclass) ORDER BY placementid; -- check whether data was copied into distributed table SELECT * FROM reference_table_test; -- now, execute some modification queries INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO reference_table_test VALUES (3, 3.0, '3', '2016-12-03'); INSERT INTO reference_table_test VALUES (4, 4.0, '4', '2016-12-04'); INSERT INTO reference_table_test VALUES (5, 5.0, '5', '2016-12-05'); -- most of the queries in this file are already tested on multi_router_planner.sql -- However, for the sake of completeness we need to run similar tests with -- reference tables as well -- run some queries on top of the data SELECT * FROM reference_table_test; SELECT * FROM reference_table_test WHERE value_1 = 1; SELECT value_1, value_2 FROM reference_table_test ORDER BY 2 ASC LIMIT 3; SELECT value_1, value_3 FROM reference_table_test WHERE value_2 >= 4 ORDER BY 2 LIMIT 3; SELECT value_1, 15 * value_2 FROM reference_table_test ORDER BY 2 ASC LIMIT 2; SELECT value_1, 15 * value_2 FROM reference_table_test ORDER BY 2 ASC LIMIT 2 OFFSET 2; SELECT value_2, value_4 FROM reference_table_test WHERE value_2 = 2 OR value_2 = 3; SELECT value_2, value_4 FROM reference_table_test WHERE value_2 = 2 AND value_2 = 3; SELECT value_2, value_4 FROM reference_table_test WHERE value_3 = '2' OR value_1 = 3; SELECT value_2, value_4 FROM reference_table_test WHERE ( value_3 = '2' OR value_1 = 3 ) AND FALSE; SELECT * FROM reference_table_test WHERE value_2 IN ( SELECT value_3::FLOAT FROM reference_table_test ) AND value_1 < 3; SELECT value_4 FROM reference_table_test WHERE value_3 IN ( '1', '2' ); SELECT date_part('day', value_4) FROM reference_table_test WHERE value_3 IN ( '5', '2' ); SELECT value_4 FROM reference_table_test WHERE value_2 <= 2 AND value_2 >= 4; SELECT value_4 FROM reference_table_test WHERE value_2 <= 20 AND value_2 >= 4; SELECT value_4 FROM reference_table_test WHERE value_2 >= 5 AND value_2 <= random(); SELECT value_1 FROM reference_table_test WHERE value_4 BETWEEN '2016-12-01' AND '2016-12-03'; SELECT value_1 FROM reference_table_test WHERE FALSE; SELECT value_1 FROM reference_table_test WHERE int4eq(1, 2); -- rename output name and do some operations SELECT value_1 as id, value_2 * 15 as age FROM reference_table_test; -- queries with CTEs are supported WITH some_data AS ( SELECT value_2, value_4 FROM reference_table_test WHERE value_2 >=3) SELECT * FROM some_data; -- queries with CTEs are supported even if CTE is not referenced inside query WITH some_data AS ( SELECT value_2, value_4 FROM reference_table_test WHERE value_2 >=3) SELECT * FROM reference_table_test ORDER BY 1 LIMIT 1; -- queries which involve functions in FROM clause are supported if it goes to a single worker. SELECT * FROM reference_table_test, position('om' in 'Thomas') WHERE value_1 = 1; SELECT * FROM reference_table_test, position('om' in 'Thomas') WHERE value_1 = 1 OR value_1 = 2; -- set operations are supported SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 1 UNION SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 1 EXCEPT SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; SELECT * FROM ( SELECT * FROM reference_table_test WHERE value_1 = 1 INTERSECT SELECT * FROM reference_table_test WHERE value_1 = 3 ) AS combination ORDER BY value_1; -- to make the tests more interested for aggregation tests, ingest some more data INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO reference_table_test VALUES (3, 3.0, '3', '2016-12-03'); -- some aggregations SELECT value_4, SUM(value_2) FROM reference_table_test GROUP BY value_4 HAVING SUM(value_2) > 3 ORDER BY 1; SELECT value_4, value_3, SUM(value_2) FROM reference_table_test GROUP BY GROUPING sets ((value_4), (value_3)) ORDER BY 1, 2, 3; -- distinct clauses also work fine SELECT DISTINCT value_4 FROM reference_table_test ORDER BY 1; -- window functions are also supported SELECT value_4, RANK() OVER (PARTITION BY value_1 ORDER BY value_4) FROM reference_table_test; -- window functions are also supported SELECT value_4, AVG(value_1) OVER (PARTITION BY value_4 ORDER BY value_4) FROM reference_table_test; SELECT count(DISTINCT CASE WHEN value_2 >= 3 THEN value_2 ELSE NULL END) as c FROM reference_table_test; SELECT value_1, count(DISTINCT CASE WHEN value_2 >= 3 THEN value_2 ELSE NULL END) as c FROM reference_table_test GROUP BY value_1 ORDER BY 1; -- selects inside a transaction works fine as well BEGIN; SELECT * FROM reference_table_test; SELECT * FROM reference_table_test WHERE value_1 = 1; END; -- cursor queries also works fine BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM reference_table_test WHERE value_1 = 1 OR value_1 = 2 ORDER BY value_1; FETCH test_cursor; FETCH ALL test_cursor; FETCH test_cursor; -- fetch one row after the last FETCH BACKWARD test_cursor; END; -- table creation queries inside can be router plannable CREATE TEMP TABLE temp_reference_test as SELECT * FROM reference_table_test WHERE value_1 = 1; -- all kinds of joins are supported among reference tables -- first create two more tables CREATE TABLE reference_table_test_second (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_second'); CREATE TABLE reference_table_test_third (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_third'); -- ingest some data to both tables INSERT INTO reference_table_test_second VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO reference_table_test_second VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO reference_table_test_second VALUES (3, 3.0, '3', '2016-12-03'); INSERT INTO reference_table_test_third VALUES (4, 4.0, '4', '2016-12-04'); INSERT INTO reference_table_test_third VALUES (5, 5.0, '5', '2016-12-05'); -- some very basic tests SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2 WHERE t1.value_2 = t2.value_2 ORDER BY 1; SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_third t3 WHERE t1.value_2 = t3.value_2 ORDER BY 1; SELECT DISTINCT t2.value_1 FROM reference_table_test_second t2, reference_table_test_third t3 WHERE t2.value_2 = t3.value_2 ORDER BY 1; -- join on different columns and different data types via casts SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2 WHERE t1.value_2 = t2.value_1 ORDER BY 1; SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2 WHERE t1.value_2 = t2.value_3::int ORDER BY 1; SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2 WHERE t1.value_2 = date_part('day', t2.value_4) ORDER BY 1; -- ingest a common row to see more meaningful results with joins involving 3 tables INSERT INTO reference_table_test_third VALUES (3, 3.0, '3', '2016-12-03'); SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2, reference_table_test_third t3 WHERE t1.value_2 = date_part('day', t2.value_4) AND t3.value_2 = t1.value_2 ORDER BY 1; -- same query on different columns SELECT DISTINCT t1.value_1 FROM reference_table_test t1, reference_table_test_second t2, reference_table_test_third t3 WHERE t1.value_1 = date_part('day', t2.value_4) AND t3.value_2 = t1.value_1 ORDER BY 1; -- with the JOIN syntax SELECT DISTINCT t1.value_1 FROM reference_table_test t1 JOIN reference_table_test_second t2 USING (value_1) JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; -- and left/right joins SELECT DISTINCT t1.value_1 FROM reference_table_test t1 LEFT JOIN reference_table_test_second t2 USING (value_1) LEFT JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; SELECT DISTINCT t1.value_1 FROM reference_table_test t1 RIGHT JOIN reference_table_test_second t2 USING (value_1) RIGHT JOIN reference_table_test_third t3 USING (value_1) ORDER BY 1; -- now, lets have some tests on UPSERTs and uniquness CREATE TABLE reference_table_test_fourth (value_1 int, value_2 float PRIMARY KEY, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_fourth'); -- insert a row INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '1', '2016-12-01'); -- now get the unique key violation INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '1', '2016-12-01'); -- now get null constraint violation due to primary key INSERT INTO reference_table_test_fourth (value_1, value_3, value_4) VALUES (1, '1.0', '2016-12-01'); -- lets run some upserts INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '1', '2016-12-01') ON CONFLICT DO NOTHING RETURNING *; INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '10', '2016-12-01') ON CONFLICT (value_2) DO UPDATE SET value_3 = EXCLUDED.value_3, value_2 = EXCLUDED.value_2 RETURNING *; -- update all columns INSERT INTO reference_table_test_fourth VALUES (1, 1.0, '10', '2016-12-01') ON CONFLICT (value_2) DO UPDATE SET value_3 = EXCLUDED.value_3 || '+10', value_2 = EXCLUDED.value_2 + 10, value_1 = EXCLUDED.value_1 + 10, value_4 = '2016-12-10' RETURNING *; -- finally see that shard healths are OK SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'reference_table_test_fourth'::regclass) ORDER BY placementid; -- let's not run some update/delete queries on arbitrary columns DELETE FROM reference_table_test WHERE value_1 = 1 RETURNING *; DELETE FROM reference_table_test WHERE value_4 = '2016-12-05' RETURNING *; UPDATE reference_table_test SET value_2 = 15 WHERE value_2 = 2 RETURNING *; -- and some queries without any filters UPDATE reference_table_test SET value_2 = 15, value_1 = 45 RETURNING *; DELETE FROM reference_table_test RETURNING *; -- some tests with function evaluation and sequences CREATE TABLE reference_table_test_fifth (value_1 serial PRIMARY KEY, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_fifth'); CREATE SEQUENCE example_ref_value_seq; -- see that sequences work as expected INSERT INTO reference_table_test_fifth (value_2) VALUES (2) RETURNING value_1, value_2; INSERT INTO reference_table_test_fifth (value_2) VALUES (2) RETURNING value_1, value_2; INSERT INTO reference_table_test_fifth (value_2, value_3) VALUES (nextval('example_ref_value_seq'), nextval('example_ref_value_seq')::text) RETURNING value_1, value_2, value_3; UPDATE reference_table_test_fifth SET value_4 = now() WHERE value_1 = 1 RETURNING value_1, value_2, value_4 > '2000-01-01'; -- test copying FROM / TO -- first delete all the data DELETE FROM reference_table_test; COPY reference_table_test FROM STDIN WITH CSV; 1,1.0,1,2016-01-01 \. COPY reference_table_test (value_2, value_3, value_4) FROM STDIN WITH CSV; 2.0,2,2016-01-02 \. COPY reference_table_test (value_3) FROM STDIN WITH CSV; 3 \. COPY reference_table_test FROM STDIN WITH CSV; ,,, \. COPY reference_table_test TO STDOUT WITH CSV; -- INSERT INTO SELECT among reference tables DELETE FROM reference_table_test_second; INSERT INTO reference_table_test_second SELECT * FROM reference_table_test RETURNING *; INSERT INTO reference_table_test_second (value_2) SELECT reference_table_test.value_2 FROM reference_table_test JOIN reference_table_test_second USING (value_1) RETURNING *; SET citus.shard_count TO 6; SET citus.shard_replication_factor TO 2; CREATE TABLE colocated_table_test (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_distributed_table('colocated_table_test', 'value_1'); CREATE TABLE colocated_table_test_2 (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_distributed_table('colocated_table_test_2', 'value_1'); DELETE FROM reference_table_test; INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO colocated_table_test VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO colocated_table_test VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO colocated_table_test_2 VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO colocated_table_test_2 VALUES (2, 2.0, '2', '2016-12-02'); SET client_min_messages TO DEBUG1; SET citus.log_multi_join_order TO TRUE; SELECT reference_table_test.value_1 FROM reference_table_test, colocated_table_test WHERE colocated_table_test.value_1 = reference_table_test.value_1; SELECT colocated_table_test.value_2 FROM reference_table_test, colocated_table_test WHERE colocated_table_test.value_2 = reference_table_test.value_2; SELECT colocated_table_test.value_2 FROM colocated_table_test, reference_table_test WHERE reference_table_test.value_1 = colocated_table_test.value_1; SELECT colocated_table_test.value_2 FROM reference_table_test, colocated_table_test, colocated_table_test_2 WHERE colocated_table_test.value_2 = reference_table_test.value_2; SELECT colocated_table_test.value_2 FROM reference_table_test, colocated_table_test, colocated_table_test_2 WHERE colocated_table_test.value_1 = colocated_table_test_2.value_1 AND colocated_table_test.value_2 = reference_table_test.value_2; SET citus.task_executor_type to "task-tracker"; SELECT colocated_table_test.value_2 FROM reference_table_test, colocated_table_test, colocated_table_test_2 WHERE colocated_table_test.value_2 = colocated_table_test_2.value_2 AND colocated_table_test.value_2 = reference_table_test.value_2; SELECT reference_table_test.value_2 FROM reference_table_test, colocated_table_test, colocated_table_test_2 WHERE colocated_table_test.value_1 = reference_table_test.value_1 AND colocated_table_test_2.value_1 = reference_table_test.value_1; SET citus.log_multi_join_order TO FALSE; SET citus.shard_count TO DEFAULT; SET citus.task_executor_type to "real-time"; -- some INSERT .. SELECT queries that involve both hash distributed and reference tables -- should go via coordinator since we're inserting into reference table where -- not all the participants are reference tables INSERT INTO reference_table_test (value_1) SELECT colocated_table_test.value_1 FROM colocated_table_test, colocated_table_test_2 WHERE colocated_table_test.value_1 = colocated_table_test_2.value_1; -- should go via coordinator, same as the above INSERT INTO reference_table_test (value_1) SELECT colocated_table_test.value_1 FROM colocated_table_test, reference_table_test WHERE colocated_table_test.value_1 = reference_table_test.value_1; -- not pushable due to lack of equality between partition column and column of reference table INSERT INTO colocated_table_test (value_1, value_2) SELECT colocated_table_test_2.value_1, reference_table_test.value_2 FROM colocated_table_test_2, reference_table_test WHERE colocated_table_test_2.value_4 = reference_table_test.value_4 RETURNING value_1, value_2; -- some more complex queries (Note that there are more complex queries in multi_insert_select.sql) INSERT INTO colocated_table_test (value_1, value_2) SELECT colocated_table_test_2.value_1, reference_table_test.value_2 FROM colocated_table_test_2, reference_table_test WHERE colocated_table_test_2.value_2 = reference_table_test.value_2 RETURNING value_1, value_2; -- partition column value comes from reference table, goes via coordinator INSERT INTO colocated_table_test (value_1, value_2) SELECT reference_table_test.value_2, colocated_table_test_2.value_1 FROM colocated_table_test_2, reference_table_test WHERE colocated_table_test_2.value_4 = reference_table_test.value_4; INSERT INTO colocated_table_test (value_1, value_2) SELECT reference_table_test.value_1, colocated_table_test_2.value_1 FROM colocated_table_test_2, reference_table_test WHERE colocated_table_test_2.value_4 = reference_table_test.value_4; RESET client_min_messages; -- some tests for mark_tables_colocated -- should error out SELECT mark_tables_colocated('colocated_table_test_2', ARRAY['reference_table_test']); -- should work sliently SELECT mark_tables_colocated('reference_table_test', ARRAY['reference_table_test_fifth']); -- ensure that reference tables on -- different queries works as expected CREATE SCHEMA reference_schema; -- create with schema prefix CREATE TABLE reference_schema.reference_table_test_sixth (value_1 serial PRIMARY KEY, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_schema.reference_table_test_sixth'); SET search_path TO 'reference_schema'; -- create on the schema CREATE TABLE reference_table_test_seventh (value_1 serial PRIMARY KEY, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_test_seventh'); -- ingest some data INSERT INTO reference_table_test_sixth VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO reference_table_test_seventh VALUES (1, 1.0, '1', '2016-12-01'); SET search_path TO 'public'; -- ingest some data INSERT INTO reference_schema.reference_table_test_sixth VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO reference_schema.reference_table_test_seventh VALUES (2, 2.0, '2', '2016-12-02'); -- some basic queries SELECT value_1 FROM reference_schema.reference_table_test_sixth; SET search_path TO 'reference_schema'; SELECT reference_table_test_sixth.value_1 FROM reference_table_test_sixth, reference_table_test_seventh WHERE reference_table_test_sixth.value_4 = reference_table_test_seventh.value_4; -- last test with cross schemas SET search_path TO 'public'; SELECT reftable.value_2, colocated_table_test_2.value_1 FROM colocated_table_test_2, reference_schema.reference_table_test_sixth as reftable WHERE colocated_table_test_2.value_4 = reftable.value_4; -- let's now test TRUNCATE and DROP TABLE -- delete all rows and ingest some data DELETE FROM reference_table_test; INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); INSERT INTO reference_table_test VALUES (3, 3.0, '3', '2016-12-03'); INSERT INTO reference_table_test VALUES (4, 4.0, '4', '2016-12-04'); INSERT INTO reference_table_test VALUES (5, 5.0, '5', '2016-12-05'); SELECT count(*) FROM reference_table_test; -- truncate it and get the result back TRUNCATE reference_table_test; SELECT count(*) FROM reference_table_test; -- now try dropping one of the existing reference tables -- and check the metadata SELECT logicalrelid FROM pg_dist_partition WHERE logicalrelid::regclass::text LIKE '%reference_table_test_fifth%'; SELECT logicalrelid FROM pg_dist_shard WHERE logicalrelid::regclass::text LIKE '%reference_table_test_fifth%'; DROP TABLE reference_table_test_fifth; SELECT logicalrelid FROM pg_dist_partition WHERE logicalrelid::regclass::text LIKE '%reference_table_test_fifth%'; SELECT logicalrelid FROM pg_dist_shard WHERE logicalrelid::regclass::text LIKE '%reference_table_test_fifth%'; -- now test DDL changes CREATE TABLE reference_table_ddl (value_1 int, value_2 float, value_3 text, value_4 timestamp); SELECT create_reference_table('reference_table_ddl'); -- CREATE & DROP index and check the workers CREATE INDEX reference_index_1 ON reference_table_ddl(value_1); CREATE INDEX reference_index_2 ON reference_table_ddl(value_2, value_3); -- should be able to create/drop UNIQUE index on a reference table CREATE UNIQUE INDEX reference_index_3 ON reference_table_ddl(value_1); -- should be able to add a column ALTER TABLE reference_table_ddl ADD COLUMN value_5 INTEGER; ALTER TABLE reference_table_ddl ALTER COLUMN value_5 SET DATA TYPE FLOAT; ALTER TABLE reference_table_ddl DROP COLUMN value_1; ALTER TABLE reference_table_ddl ALTER COLUMN value_2 SET DEFAULT 25.0; ALTER TABLE reference_table_ddl ALTER COLUMN value_3 SET NOT NULL; -- see that Citus applied all DDLs to the table SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.reference_table_ddl'::regclass; \d reference_index_2 -- also to the shard placements \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.reference_table_ddl_1250019'::regclass; \d reference_index_2_1250019 \c - - - :master_port DROP INDEX reference_index_2; \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.reference_table_ddl_1250019'::regclass; \di reference_index_2* \c - - - :master_port -- as we expect, renaming and setting WITH OIDS does not work for reference tables ALTER TABLE reference_table_ddl RENAME TO reference_table_ddl_test; ALTER TABLE reference_table_ddl SET WITH OIDS; -- now test reference tables against some helper UDFs that Citus provides -- cannot delete / drop shards from a reference table SELECT master_apply_delete_command('DELETE FROM reference_table_ddl'); -- cannot add shards SELECT master_create_empty_shard('reference_table_ddl'); -- master_modify_multiple_shards works, but, does it make sense to use at all? INSERT INTO reference_table_ddl (value_2, value_3) VALUES (7, 'aa'); SELECT master_modify_multiple_shards('DELETE FROM reference_table_ddl WHERE value_2 = 7'); INSERT INTO reference_table_ddl (value_2, value_3) VALUES (7, 'bb'); SELECT master_modify_multiple_shards('DELETE FROM reference_table_ddl'); -- get/update the statistics SELECT part_storage_type, part_key, part_replica_count, part_max_size, part_placement_policy FROM master_get_table_metadata('reference_table_ddl'); SELECT shardid AS a_shard_id FROM pg_dist_shard WHERE logicalrelid = 'reference_table_ddl'::regclass \gset SELECT master_update_shard_statistics(:a_shard_id); CREATE TABLE append_reference_tmp_table (id INT); SELECT master_append_table_to_shard(:a_shard_id, 'append_reference_tmp_table', 'localhost', :master_port); SELECT master_get_table_ddl_events('reference_table_ddl'); -- in reality, we wouldn't need to repair any reference table shard placements -- however, the test could be relevant for other purposes SELECT placementid AS a_placement_id FROM pg_dist_shard_placement WHERE shardid = :a_shard_id AND nodeport = :worker_1_port \gset SELECT placementid AS b_placement_id FROM pg_dist_shard_placement WHERE shardid = :a_shard_id AND nodeport = :worker_2_port \gset UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE placementid = :a_placement_id; SELECT master_copy_shard_placement(:a_shard_id, 'localhost', :worker_2_port, 'localhost', :worker_1_port); SELECT shardid, shardstate FROM pg_dist_shard_placement WHERE placementid = :a_placement_id; -- some queries that are captured in functions CREATE FUNCTION select_count_all() RETURNS bigint AS ' SELECT count(*) FROM reference_table_test; ' LANGUAGE SQL; CREATE FUNCTION insert_into_ref_table(value_1 int, value_2 float, value_3 text, value_4 timestamp) RETURNS void AS ' INSERT INTO reference_table_test VALUES ($1, $2, $3, $4); ' LANGUAGE SQL; TRUNCATE reference_table_test; SELECT select_count_all(); SELECT insert_into_ref_table(1, 1.0, '1', '2016-12-01'); SELECT insert_into_ref_table(2, 2.0, '2', '2016-12-02'); SELECT insert_into_ref_table(3, 3.0, '3', '2016-12-03'); SELECT insert_into_ref_table(4, 4.0, '4', '2016-12-04'); SELECT insert_into_ref_table(5, 5.0, '5', '2016-12-05'); SELECT insert_into_ref_table(6, 6.0, '6', '2016-12-06'); SELECT select_count_all(); TRUNCATE reference_table_test; -- some prepared queries and pl/pgsql functions PREPARE insert_into_ref_table_pr (int, float, text, timestamp) AS INSERT INTO reference_table_test VALUES ($1, $2, $3, $4); -- reference tables do not have up-to-five execution limit as other tables EXECUTE insert_into_ref_table_pr(1, 1.0, '1', '2016-12-01'); EXECUTE insert_into_ref_table_pr(2, 2.0, '2', '2016-12-02'); EXECUTE insert_into_ref_table_pr(3, 3.0, '3', '2016-12-03'); EXECUTE insert_into_ref_table_pr(4, 4.0, '4', '2016-12-04'); EXECUTE insert_into_ref_table_pr(5, 5.0, '5', '2016-12-05'); EXECUTE insert_into_ref_table_pr(6, 6.0, '6', '2016-12-06'); -- see the count, then truncate the table SELECT select_count_all(); TRUNCATE reference_table_test; -- reference tables work with composite key -- and we even do not need to create hash -- function etc. -- first create the type on all nodes CREATE TYPE reference_comp_key as (key text, value text); \c - - - :worker_1_port CREATE TYPE reference_comp_key as (key text, value text); \c - - - :worker_2_port CREATE TYPE reference_comp_key as (key text, value text); \c - - - :master_port CREATE TABLE reference_table_composite (id int PRIMARY KEY, data reference_comp_key); SELECT create_reference_table('reference_table_composite'); -- insert and query some data INSERT INTO reference_table_composite (id, data) VALUES (1, ('key_1', 'value_1')::reference_comp_key); INSERT INTO reference_table_composite (id, data) VALUES (2, ('key_2', 'value_2')::reference_comp_key); SELECT * FROM reference_table_composite; SELECT (data).key FROM reference_table_composite; -- make sure that reference tables obeys single shard transactions TRUNCATE reference_table_test; BEGIN; INSERT INTO reference_table_test VALUES (1, 1.0, '1', '2016-12-01'); SELECT * FROM reference_table_test; ROLLBACK; SELECT * FROM reference_table_test; -- now insert a row and commit BEGIN; INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); COMMIT; SELECT * FROM reference_table_test; -- one basic UPDATE test BEGIN; UPDATE reference_table_test SET value_1 = 10 WHERE value_1 = 2; COMMIT; SELECT * FROM reference_table_test; -- DML+master_modify_multiple_shards is allowed BEGIN; INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); SELECT master_modify_multiple_shards('DELETE FROM colocated_table_test'); ROLLBACK; -- DDL+DML is allowed BEGIN; ALTER TABLE reference_table_test ADD COLUMN value_dummy INT; INSERT INTO reference_table_test VALUES (2, 2.0, '2', '2016-12-02'); ROLLBACK; -- clean up tables DROP TABLE reference_table_test, reference_table_test_second, reference_table_test_third, reference_table_test_fourth, reference_table_ddl, reference_table_composite; DROP SCHEMA reference_schema CASCADE; citus-7.0.3/src/test/regress/sql/multi_remove_node_reference_table.sql000066400000000000000000000367501317107136600263310ustar00rootroot00000000000000-- -- MULTI_REMOVE_NODE_REFERENCE_TABLE -- -- Tests that check the metadata after master_remove_node. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1380000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1380000; ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1380000; ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1380000; -- create copy of pg_dist_shard_placement to reload after the test CREATE TABLE tmp_shard_placement AS SELECT * FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; DELETE FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; -- make worker 1 receive metadata changes SELECT start_metadata_sync_to_node('localhost', :worker_1_port); -- remove non-existing node SELECT master_remove_node('localhost', 55555); -- remove a node with no reference tables -- verify node exist before removal SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT master_remove_node('localhost', :worker_2_port); -- verify node is removed SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; -- re-add the node for next tests SELECT groupid AS worker_2_group FROM master_add_node('localhost', :worker_2_port) \gset -- add a secondary to check we don't attempt to replicate the table to it SELECT isactive FROM master_add_node('localhost', 9000, groupid=>:worker_2_group, noderole=>'secondary'); -- remove a node with reference table CREATE TABLE remove_node_reference_table(column1 int); SELECT create_reference_table('remove_node_reference_table'); -- make sure when we add a secondary we don't attempt to add placements to it SELECT isactive FROM master_add_node('localhost', 9001, groupid=>:worker_2_group, noderole=>'secondary'); SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; -- make sure when we disable a secondary we don't remove any placements SELECT master_disable_node('localhost', 9001); SELECT isactive FROM pg_dist_node WHERE nodeport = 9001; SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; -- make sure when we activate a secondary we don't add any placements SELECT 1 FROM master_activate_node('localhost', 9001); SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; -- make sure when we remove a secondary we don't remove any placements SELECT master_remove_node('localhost', 9001); SELECT count(*) FROM pg_dist_placement WHERE groupid = :worker_2_group; -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; \c - - - :master_port SELECT master_remove_node('localhost', :worker_2_port); -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; \c - - - :master_port -- remove same node twice SELECT master_remove_node('localhost', :worker_2_port); -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- try to disable the node before removing it (this used to crash) SELECT master_disable_node('localhost', :worker_2_port); SELECT master_remove_node('localhost', :worker_2_port); -- re-add the node for the next test SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- remove node in a transaction and ROLLBACK -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; \c - - - :master_port BEGIN; SELECT master_remove_node('localhost', :worker_2_port); ROLLBACK; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; \c - - - :master_port -- remove node in a transaction and COMMIT -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; \c - - - :master_port BEGIN; SELECT master_remove_node('localhost', :worker_2_port); COMMIT; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; \c - - - :master_port -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- test inserting a value then removing a node in a transaction -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; \c - - - :master_port BEGIN; INSERT INTO remove_node_reference_table VALUES(1); SELECT master_remove_node('localhost', :worker_2_port); COMMIT; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); --verify the data is inserted SELECT * FROM remove_node_reference_table; \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM remove_node_reference_table; \c - - - :master_port -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- test executing DDL command then removing a node in a transaction -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; \c - - - :master_port BEGIN; ALTER TABLE remove_node_reference_table ADD column2 int; SELECT master_remove_node('localhost', :worker_2_port); COMMIT; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; \c - - - :master_port -- verify table structure is changed SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.remove_node_reference_table'::regclass; -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- test DROP table after removing a node in a transaction -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); BEGIN; SELECT master_remove_node('localhost', :worker_2_port); DROP TABLE remove_node_reference_table; COMMIT; -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid = 1380000; -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- re-create remove_node_reference_table CREATE TABLE remove_node_reference_table(column1 int); SELECT create_reference_table('remove_node_reference_table'); -- test removing a node while there is a reference table at another schema CREATE SCHEMA remove_node_reference_table_schema; CREATE TABLE remove_node_reference_table_schema.table1(column1 int); SELECT create_reference_table('remove_node_reference_table_schema.table1'); -- status before master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port ORDER BY shardid; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table_schema.table1'::regclass); \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port ORDER BY shardid; \c - - - :master_port SELECT master_remove_node('localhost', :worker_2_port); -- status after master_remove_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table_schema.table1'::regclass); \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; \c - - - :master_port -- re-add the node for next tests SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- test with master_disable_node -- status before master_disable_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port ORDER BY shardid; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port ORDER BY shardid ASC; \c - - - :master_port SELECT master_disable_node('localhost', :worker_2_port); -- status after master_disable_node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'remove_node_reference_table'::regclass); \c - - - :worker_1_port SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; \c - - - :master_port -- re-add the node for next tests SELECT 1 FROM master_activate_node('localhost', :worker_2_port); -- DROP tables to clean workspace DROP TABLE remove_node_reference_table; DROP TABLE remove_node_reference_table_schema.table1; DROP SCHEMA remove_node_reference_table_schema CASCADE; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); -- reload pg_dist_shard_placement table INSERT INTO pg_dist_shard_placement (SELECT * FROM tmp_shard_placement); DROP TABLE tmp_shard_placement; citus-7.0.3/src/test/regress/sql/multi_repair_shards.sql000066400000000000000000000111231317107136600234530ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 820000; SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport=:worker_2_port \gset SELECT groupid AS worker_1_group FROM pg_dist_node WHERE nodeport=:worker_1_port \gset -- =================================================================== -- test shard repair functionality -- =================================================================== -- create a table and create its distribution metadata CREATE TABLE customer_engagements ( id integer, created_at date, event_data text ); -- add some indexes CREATE INDEX ON customer_engagements (id); CREATE INDEX ON customer_engagements (created_at); CREATE INDEX ON customer_engagements (event_data); -- distribute the table SELECT master_create_distributed_table('customer_engagements', 'id', 'hash'); -- create a single shard on the first worker SELECT master_create_worker_shards('customer_engagements', 1, 2); -- ingest some data for the tests INSERT INTO customer_engagements VALUES (1, '01-01-2015', 'first event'); INSERT INTO customer_engagements VALUES (2, '02-01-2015', 'second event'); INSERT INTO customer_engagements VALUES (1, '03-01-2015', 'third event'); -- the following queries does the following: -- (i) create a new shard -- (ii) mark the second shard placements as unhealthy -- (iii) do basic checks i.e., only allow copy from healthy placement to unhealthy ones -- (iv) do a successful master_copy_shard_placement from the first placement to the second -- (v) mark the first placement as unhealthy and execute a query that is routed to the second placement -- get the newshardid SELECT shardid as newshardid FROM pg_dist_shard WHERE logicalrelid = 'customer_engagements'::regclass \gset -- now, update the second placement as unhealthy UPDATE pg_dist_placement SET shardstate = 3 WHERE shardid = :newshardid AND groupid = :worker_2_group; -- cannot repair a shard after a modification (transaction still open during repair) BEGIN; ALTER TABLE customer_engagements ADD COLUMN value float; SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); ROLLBACK; BEGIN; INSERT INTO customer_engagements VALUES (4, '04-01-2015', 'fourth event'); SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); ROLLBACK; -- modifications after reparing a shard are fine (will use new metadata) BEGIN; SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); ALTER TABLE customer_engagements ADD COLUMN value float; ROLLBACK; BEGIN; SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); INSERT INTO customer_engagements VALUES (4, '04-01-2015', 'fourth event'); ROLLBACK; -- add a fake healthy placement for the tests INSERT INTO pg_dist_placement (groupid, shardid, shardstate, shardlength) VALUES (:worker_2_group, :newshardid, 1, 0); SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); DELETE FROM pg_dist_placement WHERE groupid = :worker_2_group AND shardid = :newshardid AND shardstate = 1; -- also try to copy from an inactive placement SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_2_port, 'localhost', :worker_1_port); -- "copy" this shard from the first placement to the second one SELECT master_copy_shard_placement(:newshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); -- now, update first placement as unhealthy (and raise a notice) so that queries are not routed to there UPDATE pg_dist_placement SET shardstate = 3 WHERE shardid = :newshardid AND groupid = :worker_1_group; -- get the data from the second placement SELECT * FROM customer_engagements; -- now do the same test over again with a foreign table CREATE FOREIGN TABLE remote_engagements ( id integer, created_at date, event_data text ) SERVER fake_fdw_server; -- distribute the table SELECT master_create_distributed_table('remote_engagements', 'id', 'hash'); -- create a single shard on the first worker SELECT master_create_worker_shards('remote_engagements', 1, 2); -- get the newshardid SELECT shardid as remotenewshardid FROM pg_dist_shard WHERE logicalrelid = 'remote_engagements'::regclass \gset -- now, update the second placement as unhealthy UPDATE pg_dist_placement SET shardstate = 3 WHERE shardid = :remotenewshardid AND groupid = :worker_2_group; -- oops! we don't support repairing shards backed by foreign tables SELECT master_copy_shard_placement(:remotenewshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); citus-7.0.3/src/test/regress/sql/multi_repartition_udt.sql000066400000000000000000000143721317107136600240520ustar00rootroot00000000000000-- -- MULTI_REPARTITION_UDT -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 535000; -- START type creation CREATE TYPE test_udt AS (i integer, i2 integer); -- ... as well as a function to use as its comparator... CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- ... use that function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, PROCEDURE = equal_test_udt_function, COMMUTATOR = =, HASHES ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; -- ... create a test HASH function. Though it is a poor hash function, -- it is acceptable for our tests CREATE FUNCTION test_udt_hash(test_udt) RETURNS int AS 'SELECT hashtext( ($1.i + $1.i2)::text);' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 DEFAULT FOR TYPE test_udt USING BTREE AS OPERATOR 3 = (test_udt, test_udt); CREATE OPERATOR CLASS tudt_op_fam_class DEFAULT FOR TYPE test_udt USING HASH AS OPERATOR 1 = (test_udt, test_udt), FUNCTION 1 test_udt_hash(test_udt); -- END type creation CREATE TABLE repartition_udt ( pk integer not null, udtcol test_udt, txtcol text ); CREATE TABLE repartition_udt_other ( pk integer not null, udtcol test_udt, txtcol text ); -- Connect directly to a worker, create and drop the type, then -- proceed with type creation as above; thus the OIDs will be different. -- so that the OID is off. \c - - - :worker_1_port CREATE TYPE test_udt AS (i integer, i2 integer); DROP TYPE test_udt CASCADE; -- START type creation CREATE TYPE test_udt AS (i integer, i2 integer); -- ... as well as a function to use as its comparator... CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- ... use that function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, PROCEDURE = equal_test_udt_function, COMMUTATOR = =, HASHES ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; -- ... create a test HASH function. Though it is a poor hash function, -- it is acceptable for our tests CREATE FUNCTION test_udt_hash(test_udt) RETURNS int AS 'SELECT hashtext( ($1.i + $1.i2)::text);' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 DEFAULT FOR TYPE test_udt USING BTREE AS OPERATOR 3 = (test_udt, test_udt); CREATE OPERATOR CLASS tudt_op_fam_class DEFAULT FOR TYPE test_udt USING HASH AS OPERATOR 1 = (test_udt, test_udt), FUNCTION 1 test_udt_hash(test_udt); -- END type creation \c - - - :worker_2_port -- START type creation CREATE TYPE test_udt AS (i integer, i2 integer); -- ... as well as a function to use as its comparator... CREATE FUNCTION equal_test_udt_function(test_udt, test_udt) RETURNS boolean AS 'select $1.i = $2.i AND $1.i2 = $2.i2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- ... use that function to create a custom equality operator... CREATE OPERATOR = ( LEFTARG = test_udt, RIGHTARG = test_udt, PROCEDURE = equal_test_udt_function, COMMUTATOR = =, HASHES ); -- ... and create a custom operator family for hash indexes... CREATE OPERATOR FAMILY tudt_op_fam USING hash; -- ... create a test HASH function. Though it is a poor hash function, -- it is acceptable for our tests CREATE FUNCTION test_udt_hash(test_udt) RETURNS int AS 'SELECT hashtext( ($1.i + $1.i2)::text);' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- We need to define two different operator classes for the composite types -- One uses BTREE the other uses HASH CREATE OPERATOR CLASS tudt_op_fam_clas3 DEFAULT FOR TYPE test_udt USING BTREE AS OPERATOR 3 = (test_udt, test_udt); CREATE OPERATOR CLASS tudt_op_fam_class DEFAULT FOR TYPE test_udt USING HASH AS OPERATOR 1 = (test_udt, test_udt), FUNCTION 1 test_udt_hash(test_udt); -- END type creation -- Connect to master \c - - - :master_port -- Distribute and populate the two tables. SELECT master_create_distributed_table('repartition_udt', 'pk', 'hash'); SELECT master_create_worker_shards('repartition_udt', 3, 1); SELECT master_create_distributed_table('repartition_udt_other', 'pk', 'hash'); SELECT master_create_worker_shards('repartition_udt_other', 5, 1); INSERT INTO repartition_udt values (1, '(1,1)'::test_udt, 'foo'); INSERT INTO repartition_udt values (2, '(1,2)'::test_udt, 'foo'); INSERT INTO repartition_udt values (3, '(1,3)'::test_udt, 'foo'); INSERT INTO repartition_udt values (4, '(2,1)'::test_udt, 'foo'); INSERT INTO repartition_udt values (5, '(2,2)'::test_udt, 'foo'); INSERT INTO repartition_udt values (6, '(2,3)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (7, '(1,1)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (8, '(1,2)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (9, '(1,3)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (10, '(2,1)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (11, '(2,2)'::test_udt, 'foo'); INSERT INTO repartition_udt_other values (12, '(2,3)'::test_udt, 'foo'); SET client_min_messages = LOG; -- Query that should result in a repartition join on int column, and be empty. SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.pk = repartition_udt_other.pk WHERE repartition_udt.pk > 1; -- Query that should result in a repartition join on UDT column. SET citus.large_table_shard_count = 1; SET citus.task_executor_type = 'task-tracker'; SET citus.log_multi_join_order = true; EXPLAIN SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.udtcol = repartition_udt_other.udtcol WHERE repartition_udt.pk > 1; SELECT * FROM repartition_udt JOIN repartition_udt_other ON repartition_udt.udtcol = repartition_udt_other.udtcol WHERE repartition_udt.pk > 1 ORDER BY repartition_udt.pk; citus-7.0.3/src/test/regress/sql/multi_repartitioned_subquery_udf.sql000066400000000000000000000026221317107136600262770ustar00rootroot00000000000000-- -- MULTI_REPARTITIONED_SUBQUERY_UDF -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 830000; -- Create UDF in master and workers \c - - - :master_port DROP FUNCTION IF EXISTS median(double precision[]); CREATE FUNCTION median(double precision[]) RETURNS double precision LANGUAGE sql IMMUTABLE AS $_$ SELECT AVG(val) FROM (SELECT val FROM unnest($1) val ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; $_$; \c - - - :worker_1_port DROP FUNCTION IF EXISTS median(double precision[]); CREATE FUNCTION median(double precision[]) RETURNS double precision LANGUAGE sql IMMUTABLE AS $_$ SELECT AVG(val) FROM (SELECT val FROM unnest($1) val ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; $_$; \c - - - :worker_2_port DROP FUNCTION IF EXISTS median(double precision[]); CREATE FUNCTION median(double precision[]) RETURNS double precision LANGUAGE sql IMMUTABLE AS $_$ SELECT AVG(val) FROM (SELECT val FROM unnest($1) val ORDER BY 1 LIMIT 2 - MOD(array_upper($1, 1), 2) OFFSET CEIL(array_upper($1, 1) / 2.0) - 1) sub; $_$; -- Run query on master \c - - - :master_port SET citus.task_executor_type TO 'task-tracker'; SELECT * FROM (SELECT median(ARRAY[1,2,sum(l_suppkey)]) as median, count(*) FROM lineitem GROUP BY l_partkey) AS a WHERE median > 2; citus-7.0.3/src/test/regress/sql/multi_replicate_reference_table.sql000066400000000000000000000317441317107136600257750ustar00rootroot00000000000000-- -- MULTI_REPLICATE_REFERENCE_TABLE -- -- Tests that check the metadata returned by the master node. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1370000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1370000; ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1370000; ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1370000; -- remove a node for testing purposes CREATE TABLE tmp_shard_placement AS SELECT * FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; DELETE FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT master_remove_node('localhost', :worker_2_port); -- test adding new node with no reference tables -- verify there is no node with nodeport = :worker_2_port before adding the node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- verify node is added SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; -- verify nothing is replicated to the new node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; -- test adding new node with a reference table which does not have any healthy placement SELECT master_remove_node('localhost', :worker_2_port); -- verify there is no node with nodeport = :worker_2_port before adding the node SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; CREATE TABLE replicate_reference_table_unhealthy(column1 int); SELECT create_reference_table('replicate_reference_table_unhealthy'); UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1370000; SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- verify node is not added SELECT COUNT(*) FROM pg_dist_node WHERE nodeport = :worker_2_port; -- verify nothing is replicated to the new node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; DROP TABLE replicate_reference_table_unhealthy; -- test replicating a reference table when a new node added CREATE TABLE replicate_reference_table_valid(column1 int); SELECT create_reference_table('replicate_reference_table_valid'); -- status before master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_valid'::regclass); SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- status after master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_valid'::regclass); -- test add same node twice -- status before master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_valid'::regclass); SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- status after master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_valid'::regclass); DROP TABLE replicate_reference_table_valid; -- test replicating a reference table when a new node added in TRANSACTION + ROLLBACK SELECT master_remove_node('localhost', :worker_2_port); CREATE TABLE replicate_reference_table_rollback(column1 int); SELECT create_reference_table('replicate_reference_table_rollback'); -- status before master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_rollback'::regclass); BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); ROLLBACK; -- status after master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_rollback'::regclass); DROP TABLE replicate_reference_table_rollback; -- test replicating a reference table when a new node added in TRANSACTION + COMMIT CREATE TABLE replicate_reference_table_commit(column1 int); SELECT create_reference_table('replicate_reference_table_commit'); -- status before master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_commit'::regclass); BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); COMMIT; -- status after master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_commit'::regclass); DROP TABLE replicate_reference_table_commit; -- test adding new node + upgrading another hash distributed table to reference table + creating new reference table in TRANSACTION SELECT master_remove_node('localhost', :worker_2_port); CREATE TABLE replicate_reference_table_reference_one(column1 int); SELECT create_reference_table('replicate_reference_table_reference_one'); SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE TABLE replicate_reference_table_hash(column1 int); SELECT create_distributed_table('replicate_reference_table_hash', 'column1'); -- update replication model to statement-based replication since streaming replicated tables cannot be upgraded to reference tables UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='replicate_reference_table_hash'::regclass; CREATE TABLE replicate_reference_table_reference_two(column1 int); -- status before master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_reference_one'::regclass); SELECT logicalrelid, partmethod, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid IN ('replicate_reference_table_reference_one', 'replicate_reference_table_hash', 'replicate_reference_table_reference_two') ORDER BY logicalrelid; BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); SELECT upgrade_to_reference_table('replicate_reference_table_hash'); SELECT create_reference_table('replicate_reference_table_reference_two'); COMMIT; -- status after master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port ORDER BY shardid; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_reference_one'::regclass); SELECT logicalrelid, partmethod, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid IN ('replicate_reference_table_reference_one', 'replicate_reference_table_hash', 'replicate_reference_table_reference_two') ORDER BY logicalrelid; DROP TABLE replicate_reference_table_reference_one; DROP TABLE replicate_reference_table_hash; DROP TABLE replicate_reference_table_reference_two; -- test inserting a value then adding a new node in a transaction SELECT master_remove_node('localhost', :worker_2_port); CREATE TABLE replicate_reference_table_insert(column1 int); SELECT create_reference_table('replicate_reference_table_insert'); BEGIN; INSERT INTO replicate_reference_table_insert VALUES(1); SELECT 1 FROM master_add_node('localhost', :worker_2_port); ROLLBACK; DROP TABLE replicate_reference_table_insert; -- test COPY then adding a new node in a transaction CREATE TABLE replicate_reference_table_copy(column1 int); SELECT create_reference_table('replicate_reference_table_copy'); BEGIN; COPY replicate_reference_table_copy FROM STDIN; 1 2 3 4 5 \. SELECT 1 FROM master_add_node('localhost', :worker_2_port); ROLLBACK; DROP TABLE replicate_reference_table_copy; -- test executing DDL command then adding a new node in a transaction CREATE TABLE replicate_reference_table_ddl(column1 int); SELECT create_reference_table('replicate_reference_table_ddl'); BEGIN; ALTER TABLE replicate_reference_table_ddl ADD column2 int; SELECT 1 FROM master_add_node('localhost', :worker_2_port); ROLLBACK; DROP TABLE replicate_reference_table_ddl; -- test DROP table after adding new node in a transaction CREATE TABLE replicate_reference_table_drop(column1 int); SELECT create_reference_table('replicate_reference_table_drop'); -- status before master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_drop'::regclass); BEGIN; SELECT 1 FROM master_add_node('localhost', :worker_2_port); DROP TABLE replicate_reference_table_drop; COMMIT; -- status after master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid = 1370009; -- test adding a node while there is a reference table at another schema SELECT master_remove_node('localhost', :worker_2_port); CREATE SCHEMA replicate_reference_table_schema; CREATE TABLE replicate_reference_table_schema.table1(column1 int); SELECT create_reference_table('replicate_reference_table_schema.table1'); -- status before master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_schema.table1'::regclass); SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- status after master_add_node SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE nodeport = :worker_2_port; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'replicate_reference_table_schema.table1'::regclass); DROP TABLE replicate_reference_table_schema.table1; DROP SCHEMA replicate_reference_table_schema CASCADE; -- do some tests with inactive node SELECT master_remove_node('localhost', :worker_2_port); CREATE TABLE initially_not_replicated_reference_table (key int); SELECT create_reference_table('initially_not_replicated_reference_table'); SELECT 1 FROM master_add_inactive_node('localhost', :worker_2_port); -- we should see only one shard placements SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'initially_not_replicated_reference_table'::regclass) ORDER BY 1,4,5; -- we should see the two shard placements after activation SELECT 1 FROM master_activate_node('localhost', :worker_2_port); SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'initially_not_replicated_reference_table'::regclass) ORDER BY 1,4,5; -- this should have no effect SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- drop unnecassary tables DROP TABLE initially_not_replicated_reference_table; -- reload pg_dist_shard_placement table INSERT INTO pg_dist_shard_placement (SELECT * FROM tmp_shard_placement); DROP TABLE tmp_shard_placement; RESET citus.shard_replication_factor; RESET citus.replication_model; citus-7.0.3/src/test/regress/sql/multi_router_planner.sql000066400000000000000000001030241317107136600236660ustar00rootroot00000000000000 ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 840000; -- =================================================================== -- test router planner functionality for single shard select queries -- =================================================================== CREATE TABLE articles_hash ( id bigint NOT NULL, author_id bigint NOT NULL, title varchar(20) NOT NULL, word_count integer ); CREATE TABLE articles_range ( id bigint NOT NULL, author_id bigint NOT NULL, title varchar(20) NOT NULL, word_count integer ); CREATE TABLE articles_append ( id bigint NOT NULL, author_id bigint NOT NULL, title varchar(20) NOT NULL, word_count integer ); -- Check for the existence of line 'DEBUG: Creating router plan' -- to determine if router planner is used. -- this table is used in a CTE test CREATE TABLE authors_hash ( name varchar(20), id bigint ); CREATE TABLE authors_range ( name varchar(20), id bigint ); CREATE TABLE authors_reference ( name varchar(20), id bigint ); -- this table is used in router executor tests CREATE TABLE articles_single_shard_hash (LIKE articles_hash); SELECT master_create_distributed_table('articles_hash', 'author_id', 'hash'); SELECT master_create_distributed_table('articles_single_shard_hash', 'author_id', 'hash'); -- test when a table is distributed but no shards created yet SELECT count(*) from articles_hash; SELECT master_create_worker_shards('articles_hash', 2, 1); SELECT master_create_worker_shards('articles_single_shard_hash', 1, 1); SELECT create_reference_table('authors_reference'); -- create a bunch of test data INSERT INTO articles_hash VALUES ( 1, 1, 'arsenous', 9572); INSERT INTO articles_hash VALUES ( 2, 2, 'abducing', 13642); INSERT INTO articles_hash VALUES ( 3, 3, 'asternal', 10480); INSERT INTO articles_hash VALUES ( 4, 4, 'altdorfer', 14551); INSERT INTO articles_hash VALUES ( 5, 5, 'aruru', 11389); INSERT INTO articles_hash VALUES ( 6, 6, 'atlases', 15459); INSERT INTO articles_hash VALUES ( 7, 7, 'aseptic', 12298); INSERT INTO articles_hash VALUES ( 8, 8, 'agatized', 16368); INSERT INTO articles_hash VALUES ( 9, 9, 'alligate', 438); INSERT INTO articles_hash VALUES (10, 10, 'aggrandize', 17277); INSERT INTO articles_hash VALUES (11, 1, 'alamo', 1347); INSERT INTO articles_hash VALUES (12, 2, 'archiblast', 18185); INSERT INTO articles_hash VALUES (13, 3, 'aseyev', 2255); INSERT INTO articles_hash VALUES (14, 4, 'andesite', 19094); INSERT INTO articles_hash VALUES (15, 5, 'adversa', 3164); INSERT INTO articles_hash VALUES (16, 6, 'allonym', 2); INSERT INTO articles_hash VALUES (17, 7, 'auriga', 4073); INSERT INTO articles_hash VALUES (18, 8, 'assembly', 911); INSERT INTO articles_hash VALUES (19, 9, 'aubergiste', 4981); INSERT INTO articles_hash VALUES (20, 10, 'absentness', 1820); INSERT INTO articles_hash VALUES (21, 1, 'arcading', 5890); INSERT INTO articles_hash VALUES (22, 2, 'antipope', 2728); INSERT INTO articles_hash VALUES (23, 3, 'abhorring', 6799); INSERT INTO articles_hash VALUES (24, 4, 'audacious', 3637); INSERT INTO articles_hash VALUES (25, 5, 'antehall', 7707); INSERT INTO articles_hash VALUES (26, 6, 'abington', 4545); INSERT INTO articles_hash VALUES (27, 7, 'arsenous', 8616); INSERT INTO articles_hash VALUES (28, 8, 'aerophyte', 5454); INSERT INTO articles_hash VALUES (29, 9, 'amateur', 9524); INSERT INTO articles_hash VALUES (30, 10, 'andelee', 6363); INSERT INTO articles_hash VALUES (31, 1, 'athwartships', 7271); INSERT INTO articles_hash VALUES (32, 2, 'amazon', 11342); INSERT INTO articles_hash VALUES (33, 3, 'autochrome', 8180); INSERT INTO articles_hash VALUES (34, 4, 'amnestied', 12250); INSERT INTO articles_hash VALUES (35, 5, 'aminate', 9089); INSERT INTO articles_hash VALUES (36, 6, 'ablation', 13159); INSERT INTO articles_hash VALUES (37, 7, 'archduchies', 9997); INSERT INTO articles_hash VALUES (38, 8, 'anatine', 14067); INSERT INTO articles_hash VALUES (39, 9, 'anchises', 10906); INSERT INTO articles_hash VALUES (40, 10, 'attemper', 14976); INSERT INTO articles_hash VALUES (41, 1, 'aznavour', 11814); INSERT INTO articles_hash VALUES (42, 2, 'ausable', 15885); INSERT INTO articles_hash VALUES (43, 3, 'affixal', 12723); INSERT INTO articles_hash VALUES (44, 4, 'anteport', 16793); INSERT INTO articles_hash VALUES (45, 5, 'afrasia', 864); INSERT INTO articles_hash VALUES (46, 6, 'atlanta', 17702); INSERT INTO articles_hash VALUES (47, 7, 'abeyance', 1772); INSERT INTO articles_hash VALUES (48, 8, 'alkylic', 18610); INSERT INTO articles_hash VALUES (49, 9, 'anyone', 2681); INSERT INTO articles_hash VALUES (50, 10, 'anjanette', 19519); SET citus.task_executor_type TO 'real-time'; SET citus.large_table_shard_count TO 2; SET client_min_messages TO 'DEBUG2'; -- insert a single row for the test INSERT INTO articles_single_shard_hash VALUES (50, 10, 'anjanette', 19519); -- single-shard tests -- test simple select for a single row SELECT * FROM articles_hash WHERE author_id = 10 AND id = 50; -- get all titles by a single author SELECT title FROM articles_hash WHERE author_id = 10; -- try ordering them by word count SELECT title, word_count FROM articles_hash WHERE author_id = 10 ORDER BY word_count DESC NULLS LAST; -- look at last two articles by an author SELECT title, id FROM articles_hash WHERE author_id = 5 ORDER BY id LIMIT 2; -- find all articles by two authors in same shard -- but plan is not router executable due to order by SELECT title, author_id FROM articles_hash WHERE author_id = 7 OR author_id = 8 ORDER BY author_id ASC, id; -- same query is router executable with no order by SELECT title, author_id FROM articles_hash WHERE author_id = 7 OR author_id = 8; -- add in some grouping expressions, still on same shard -- having queries unsupported in Citus SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash WHERE author_id = 1 OR author_id = 7 OR author_id = 8 OR author_id = 10 GROUP BY author_id HAVING sum(word_count) > 1000 ORDER BY sum(word_count) DESC; -- however having clause is supported if it goes to a single shard SELECT author_id, sum(word_count) AS corpus_size FROM articles_hash WHERE author_id = 1 GROUP BY author_id HAVING sum(word_count) > 1000 ORDER BY sum(word_count) DESC; -- query is a single shard query but can't do shard pruning, -- not router-plannable due to <= and IN SELECT * FROM articles_hash WHERE author_id <= 1; SELECT * FROM articles_hash WHERE author_id IN (1, 3); -- queries with CTEs are supported WITH first_author AS ( SELECT id FROM articles_hash WHERE author_id = 1) SELECT * FROM first_author; -- queries with CTEs are supported even if CTE is not referenced inside query WITH first_author AS ( SELECT id FROM articles_hash WHERE author_id = 1) SELECT title FROM articles_hash WHERE author_id = 1; -- two CTE joins are supported if they go to the same worker WITH id_author AS ( SELECT id, author_id FROM articles_hash WHERE author_id = 1), id_title AS (SELECT id, title from articles_hash WHERE author_id = 1) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; WITH id_author AS ( SELECT id, author_id FROM articles_hash WHERE author_id = 1), id_title AS (SELECT id, title from articles_hash WHERE author_id = 3) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; -- CTE joins are not supported if table shards are at different workers WITH id_author AS ( SELECT id, author_id FROM articles_hash WHERE author_id = 1), id_title AS (SELECT id, title from articles_hash WHERE author_id = 2) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; -- recursive CTEs are supported when filtered on partition column CREATE TABLE company_employees (company_id int, employee_id int, manager_id int); SELECT master_create_distributed_table('company_employees', 'company_id', 'hash'); SELECT master_create_worker_shards('company_employees', 4, 1); INSERT INTO company_employees values(1, 1, 0); INSERT INTO company_employees values(1, 2, 1); INSERT INTO company_employees values(1, 3, 1); INSERT INTO company_employees values(1, 4, 2); INSERT INTO company_employees values(1, 5, 4); INSERT INTO company_employees values(3, 1, 0); INSERT INTO company_employees values(3, 15, 1); INSERT INTO company_employees values(3, 3, 1); -- find employees at top 2 level within company hierarchy WITH RECURSIVE hierarchy as ( SELECT *, 1 AS level FROM company_employees WHERE company_id = 1 and manager_id = 0 UNION SELECT ce.*, (h.level+1) FROM hierarchy h JOIN company_employees ce ON (h.employee_id = ce.manager_id AND h.company_id = ce.company_id AND ce.company_id = 1)) SELECT * FROM hierarchy WHERE LEVEL <= 2; -- query becomes not router plannble and gets rejected -- if filter on company is dropped WITH RECURSIVE hierarchy as ( SELECT *, 1 AS level FROM company_employees WHERE company_id = 1 and manager_id = 0 UNION SELECT ce.*, (h.level+1) FROM hierarchy h JOIN company_employees ce ON (h.employee_id = ce.manager_id AND h.company_id = ce.company_id)) SELECT * FROM hierarchy WHERE LEVEL <= 2; -- logically wrong query, query involves different shards -- from the same table WITH RECURSIVE hierarchy as ( SELECT *, 1 AS level FROM company_employees WHERE company_id = 3 and manager_id = 0 UNION SELECT ce.*, (h.level+1) FROM hierarchy h JOIN company_employees ce ON (h.employee_id = ce.manager_id AND h.company_id = ce.company_id AND ce.company_id = 2)) SELECT * FROM hierarchy WHERE LEVEL <= 2; -- CTE with queries other than SELECT is not supported WITH new_article AS ( INSERT INTO articles_hash VALUES (1, 1, 'arsenous', 9572) RETURNING * ) SELECT * FROM new_article; -- Modifying statement in nested CTE case is covered by PostgreSQL itself WITH new_article AS ( WITH nested_cte AS ( INSERT INTO articles_hash VALUES (1, 1, 'arsenous', 9572) RETURNING * ) SELECT * FROM nested_cte ) SELECT * FROM new_article; -- Modifying statement in a CTE in subquery is also covered by PostgreSQL SELECT * FROM ( WITH new_article AS ( INSERT INTO articles_hash VALUES (1, 1, 'arsenous', 9572) RETURNING * ) SELECT * FROM new_article ) AS subquery_cte; -- grouping sets are supported on single shard SELECT id, substring(title, 2, 1) AS subtitle, count(*) FROM articles_hash WHERE author_id = 1 or author_id = 3 GROUP BY GROUPING SETS ((id),(subtitle)) ORDER BY id, subtitle; -- grouping sets are not supported on multiple shards SELECT id, substring(title, 2, 1) AS subtitle, count(*) FROM articles_hash WHERE author_id = 1 or author_id = 2 GROUP BY GROUPING SETS ((id),(subtitle)) ORDER BY id, subtitle; -- queries which involve functions in FROM clause are supported if it goes to a single worker. SELECT * FROM articles_hash, position('om' in 'Thomas') WHERE author_id = 1; SELECT * FROM articles_hash, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 3; -- they are not supported if multiple workers are involved SELECT * FROM articles_hash, position('om' in 'Thomas') WHERE author_id = 1 or author_id = 2; -- unless the query can be transformed into a join SELECT * FROM articles_hash WHERE author_id IN (SELECT author_id FROM articles_hash WHERE author_id = 2) ORDER BY articles_hash.id; -- subqueries are supported in FROM clause but they are not router plannable SELECT articles_hash.id,test.word_count FROM articles_hash, (SELECT id, word_count FROM articles_hash) AS test WHERE test.id = articles_hash.id ORDER BY articles_hash.id; SELECT articles_hash.id,test.word_count FROM articles_hash, (SELECT id, word_count FROM articles_hash) AS test WHERE test.id = articles_hash.id and articles_hash.author_id = 1 ORDER BY articles_hash.id; -- subqueries are not supported in SELECT clause SELECT a.title AS name, (SELECT a2.id FROM articles_single_shard_hash a2 WHERE a.id = a2.id LIMIT 1) AS special_price FROM articles_hash a; -- simple lookup query SELECT * FROM articles_hash WHERE author_id = 1; -- below query hits a single shard, router plannable SELECT * FROM articles_hash WHERE author_id = 1 OR author_id = 17; -- below query hits two shards, not router plannable + not router executable -- handled by real-time executor SELECT * FROM articles_hash WHERE author_id = 1 OR author_id = 18; -- rename the output columns SELECT id as article_id, word_count * id as random_value FROM articles_hash WHERE author_id = 1; -- we can push down co-located joins to a single worker SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash a, articles_hash b WHERE a.author_id = 10 and a.author_id = b.author_id LIMIT 3; -- following join is router plannable since the same worker -- has both shards SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash a, articles_single_shard_hash b WHERE a.author_id = 10 and a.author_id = b.author_id LIMIT 3; -- following join is not router plannable since there are no -- workers containing both shards, added a CTE to make this fail -- at logical planner WITH single_shard as (SELECT * FROM articles_single_shard_hash) SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash a, single_shard b WHERE a.author_id = 2 and a.author_id = b.author_id LIMIT 3; -- single shard select with limit is router plannable SELECT * FROM articles_hash WHERE author_id = 1 LIMIT 3; -- single shard select with limit + offset is router plannable SELECT * FROM articles_hash WHERE author_id = 1 LIMIT 2 OFFSET 1; -- single shard select with limit + offset + order by is router plannable SELECT * FROM articles_hash WHERE author_id = 1 ORDER BY id desc LIMIT 2 OFFSET 1; -- single shard select with group by on non-partition column is router plannable SELECT id FROM articles_hash WHERE author_id = 1 GROUP BY id ORDER BY id; -- single shard select with distinct is router plannable SELECT DISTINCT id FROM articles_hash WHERE author_id = 1 ORDER BY id; -- single shard aggregate is router plannable SELECT avg(word_count) FROM articles_hash WHERE author_id = 2; -- max, min, sum, count are router plannable on single shard SELECT max(word_count) as max, min(word_count) as min, sum(word_count) as sum, count(word_count) as cnt FROM articles_hash WHERE author_id = 2; -- queries with aggregates and group by supported on single shard SELECT max(word_count) FROM articles_hash WHERE author_id = 1 GROUP BY author_id; -- router plannable union queries are supported SELECT * FROM ( SELECT * FROM articles_hash WHERE author_id = 1 UNION SELECT * FROM articles_hash WHERE author_id = 3 ) AS combination ORDER BY id; (SELECT LEFT(title, 1) FROM articles_hash WHERE author_id = 1) UNION (SELECT LEFT(title, 1) FROM articles_hash WHERE author_id = 3); (SELECT LEFT(title, 1) FROM articles_hash WHERE author_id = 1) INTERSECT (SELECT LEFT(title, 1) FROM articles_hash WHERE author_id = 3); SELECT * FROM ( SELECT LEFT(title, 2) FROM articles_hash WHERE author_id = 1 EXCEPT SELECT LEFT(title, 2) FROM articles_hash WHERE author_id = 3 ) AS combination ORDER BY 1; -- union queries are not supported if not router plannable -- there is an inconsistency on shard pruning between -- ubuntu/mac disabling log messages for this queries only SET client_min_messages to 'NOTICE'; (SELECT * FROM articles_hash WHERE author_id = 1) UNION (SELECT * FROM articles_hash WHERE author_id = 2); SELECT * FROM ( (SELECT * FROM articles_hash WHERE author_id = 1) UNION (SELECT * FROM articles_hash WHERE author_id = 2)) uu; -- error out for queries with repartition jobs SELECT * FROM articles_hash a, articles_hash b WHERE a.id = b.id AND a.author_id = 1; -- queries which hit more than 1 shards are not router plannable or executable -- handled by real-time executor SELECT * FROM articles_hash WHERE author_id >= 1 AND author_id <= 3; SET citus.task_executor_type TO 'real-time'; -- Test various filtering options for router plannable check SET client_min_messages to 'DEBUG2'; -- this is definitely single shard -- and router plannable SELECT * FROM articles_hash WHERE author_id = 1 and author_id >= 1; -- not router plannable due to or SELECT * FROM articles_hash WHERE author_id = 1 or id = 1; -- router plannable SELECT * FROM articles_hash WHERE author_id = 1 and (id = 1 or id = 41); -- router plannable SELECT * FROM articles_hash WHERE author_id = 1 and (id = random()::int * 0); -- not router plannable due to function call on the right side SELECT * FROM articles_hash WHERE author_id = (random()::int * 0 + 1); -- not router plannable due to or SELECT * FROM articles_hash WHERE author_id = 1 or id = 1; -- router plannable due to abs(-1) getting converted to 1 by postgresql SELECT * FROM articles_hash WHERE author_id = abs(-1); -- not router plannable due to abs() function SELECT * FROM articles_hash WHERE 1 = abs(author_id); -- not router plannable due to abs() function SELECT * FROM articles_hash WHERE author_id = abs(author_id - 2); -- router plannable, function on different field SELECT * FROM articles_hash WHERE author_id = 1 and (id = abs(id - 2)); -- not router plannable due to is true SELECT * FROM articles_hash WHERE (author_id = 1) is true; -- router plannable, (boolean expression) = true is collapsed to (boolean expression) SELECT * FROM articles_hash WHERE (author_id = 1) = true; -- router plannable, between operator is on another column SELECT * FROM articles_hash WHERE (author_id = 1) and id between 0 and 20; -- router plannable, partition column expression is and'ed to rest SELECT * FROM articles_hash WHERE (author_id = 1) and (id = 1 or id = 31) and title like '%s'; -- router plannable, order is changed SELECT * FROM articles_hash WHERE (id = 1 or id = 31) and title like '%s' and (author_id = 1); -- router plannable SELECT * FROM articles_hash WHERE (title like '%s' or title like 'a%') and (author_id = 1); -- router plannable SELECT * FROM articles_hash WHERE (title like '%s' or title like 'a%') and (author_id = 1) and (word_count < 3000 or word_count > 8000); -- window functions are supported if query is router plannable SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count FROM articles_hash WHERE author_id = 5; SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count FROM articles_hash WHERE author_id = 5 ORDER BY word_count DESC; SELECT id, MIN(id) over (order by word_count) FROM articles_hash WHERE author_id = 1; SELECT id, word_count, AVG(word_count) over (order by word_count) FROM articles_hash WHERE author_id = 1; SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count) FROM articles_hash WHERE author_id = 1; -- window functions are not supported for not router plannable queries SELECT id, MIN(id) over (order by word_count) FROM articles_hash WHERE author_id = 1 or author_id = 2; SELECT LAG(title, 1) over (ORDER BY word_count) prev, title, word_count FROM articles_hash WHERE author_id = 5 or author_id = 2; -- where false queries are router plannable SELECT * FROM articles_hash WHERE false; SELECT * FROM articles_hash WHERE author_id = 1 and false; SELECT * FROM articles_hash WHERE author_id = 1 and 1=0; SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash a, articles_single_shard_hash b WHERE a.author_id = 10 and a.author_id = b.author_id and false; SELECT * FROM articles_hash WHERE null; -- where false with immutable function returning false SELECT * FROM articles_hash a WHERE a.author_id = 10 and int4eq(1, 2); SELECT * FROM articles_hash a WHERE int4eq(1, 2); SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash a, articles_single_shard_hash b WHERE a.author_id = 10 and a.author_id = b.author_id and int4eq(1, 1); SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash a, articles_single_shard_hash b WHERE a.author_id = 10 and a.author_id = b.author_id and int4eq(1, 2); -- partition_column is null clause does not prune out any shards, -- all shards remain after shard pruning, not router plannable SELECT * FROM articles_hash a WHERE a.author_id is null; -- partition_column equals to null clause prunes out all shards -- no shards after shard pruning, router plannable SELECT * FROM articles_hash a WHERE a.author_id = null; -- stable function returning bool SELECT * FROM articles_hash a WHERE date_ne_timestamp('1954-04-11', '1954-04-11'::timestamp); SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles_hash a, articles_single_shard_hash b WHERE a.author_id = 10 and a.author_id = b.author_id and date_ne_timestamp('1954-04-11', '1954-04-11'::timestamp); -- union/difference /intersection with where false -- this query was not originally router plannable, addition of 1=0 -- makes it router plannable SELECT * FROM ( SELECT * FROM articles_hash WHERE author_id = 1 UNION SELECT * FROM articles_hash WHERE author_id = 2 and 1=0 ) AS combination ORDER BY id; SELECT * FROM ( SELECT * FROM articles_hash WHERE author_id = 1 EXCEPT SELECT * FROM articles_hash WHERE author_id = 2 and 1=0 ) AS combination ORDER BY id; (SELECT * FROM articles_hash WHERE author_id = 1) INTERSECT (SELECT * FROM articles_hash WHERE author_id = 2 and 1=0); -- CTEs with where false WITH id_author AS ( SELECT id, author_id FROM articles_hash WHERE author_id = 1), id_title AS (SELECT id, title from articles_hash WHERE author_id = 1 and 1=0) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id; WITH id_author AS ( SELECT id, author_id FROM articles_hash WHERE author_id = 1), id_title AS (SELECT id, title from articles_hash WHERE author_id = 1) SELECT * FROM id_author, id_title WHERE id_author.id = id_title.id and 1=0; WITH RECURSIVE hierarchy as ( SELECT *, 1 AS level FROM company_employees WHERE company_id = 1 and manager_id = 0 UNION SELECT ce.*, (h.level+1) FROM hierarchy h JOIN company_employees ce ON (h.employee_id = ce.manager_id AND h.company_id = ce.company_id AND ce.company_id = 1)) SELECT * FROM hierarchy WHERE LEVEL <= 2 and 1=0; WITH RECURSIVE hierarchy as ( SELECT *, 1 AS level FROM company_employees WHERE company_id = 1 and manager_id = 0 UNION SELECT ce.*, (h.level+1) FROM hierarchy h JOIN company_employees ce ON (h.employee_id = ce.manager_id AND h.company_id = ce.company_id AND ce.company_id = 1 AND 1=0)) SELECT * FROM hierarchy WHERE LEVEL <= 2; WITH RECURSIVE hierarchy as ( SELECT *, 1 AS level FROM company_employees WHERE company_id = 1 and manager_id = 0 AND 1=0 UNION SELECT ce.*, (h.level+1) FROM hierarchy h JOIN company_employees ce ON (h.employee_id = ce.manager_id AND h.company_id = ce.company_id AND ce.company_id = 1)) SELECT * FROM hierarchy WHERE LEVEL <= 2; -- window functions with where false SELECT word_count, rank() OVER (PARTITION BY author_id ORDER BY word_count) FROM articles_hash WHERE author_id = 1 and 1=0; -- function calls in WHERE clause with non-relational arguments SELECT author_id FROM articles_hash WHERE substring('hello world', 1, 5) = 'hello' ORDER BY author_id LIMIT 1; -- when expression evaluates to false SELECT author_id FROM articles_hash WHERE substring('hello world', 1, 4) = 'hello' ORDER BY author_id LIMIT 1; -- verify range partitioned tables can be used in router plannable queries -- just 4 shards to be created for each table to make sure -- they are 'co-located' pairwise SET citus.shard_replication_factor TO 1; SELECT master_create_distributed_table('authors_range', 'id', 'range'); SELECT master_create_distributed_table('articles_range', 'author_id', 'range'); SELECT master_create_empty_shard('authors_range') as shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue=10 WHERE shardid = :shard_id; SELECT master_create_empty_shard('authors_range') as shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 11, shardmaxvalue=30 WHERE shardid = :shard_id; SELECT master_create_empty_shard('authors_range') as shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 21, shardmaxvalue=40 WHERE shardid = :shard_id; SELECT master_create_empty_shard('authors_range') as shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 31, shardmaxvalue=40 WHERE shardid = :shard_id; SELECT master_create_empty_shard('articles_range') as shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue=10 WHERE shardid = :shard_id; SELECT master_create_empty_shard('articles_range') as shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 11, shardmaxvalue=30 WHERE shardid = :shard_id; SELECT master_create_empty_shard('articles_range') as shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 21, shardmaxvalue=40 WHERE shardid = :shard_id; SELECT master_create_empty_shard('articles_range') as shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 31, shardmaxvalue=40 WHERE shardid = :shard_id; -- single shard select queries are router plannable SELECT * FROM articles_range where author_id = 1; SELECT * FROM articles_range where author_id = 1 or author_id = 5; -- zero shard select query is router plannable SELECT * FROM articles_range where author_id = 1 and author_id = 2; -- single shard joins on range partitioned table are router plannable SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id) WHERE ar.author_id = 1; -- zero shard join is router plannable SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id) WHERE ar.author_id = 1 and au.id = 2; -- multi-shard join is not router plannable SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id) WHERE ar.author_id = 35; -- this is a bug, it is a single shard join query but not router plannable SELECT * FROM articles_range ar join authors_range au on (ar.author_id = au.id) WHERE ar.author_id = 1 or au.id = 5; -- bogus query, join on non-partition column, but router plannable due to filters SELECT * FROM articles_range ar join authors_range au on (ar.id = au.id) WHERE ar.author_id = 1 and au.id < 10; -- join between hash and range partition tables are router plannable -- only if both tables pruned down to single shard and co-located on the same -- node. -- router plannable SELECT * FROM articles_hash ar join authors_range au on (ar.author_id = au.id) WHERE ar.author_id = 2; -- not router plannable SELECT * FROM articles_hash ar join authors_range au on (ar.author_id = au.id) WHERE ar.author_id = 3; -- join between a range partitioned table and reference table is router plannable SELECT * FROM articles_range ar join authors_reference au on (ar.author_id = au.id) WHERE ar.author_id = 1; -- still hits a single shard and router plannable SELECT * FROM articles_range ar join authors_reference au on (ar.author_id = au.id) WHERE ar.author_id = 1 or ar.author_id = 5; -- it is not router plannable if hit multiple shards SELECT * FROM articles_range ar join authors_reference au on (ar.author_id = au.id) WHERE ar.author_id = 1 or ar.author_id = 15; -- following is a bug, function should have been -- evaluated at master before going to worker -- need to use a append distributed table here SELECT master_create_distributed_table('articles_append', 'author_id', 'append'); SET citus.shard_replication_factor TO 1; SELECT master_create_empty_shard('articles_append') AS shard_id \gset UPDATE pg_dist_shard SET shardmaxvalue = 100, shardminvalue=1 WHERE shardid = :shard_id; SELECT author_id FROM articles_append WHERE substring('articles_append'::regclass::text, 1, 5) = 'hello' ORDER BY author_id LIMIT 1; -- same query with where false but evaluation left to worker SELECT author_id FROM articles_append WHERE substring('articles_append'::regclass::text, 1, 4) = 'hello' ORDER BY author_id LIMIT 1; -- same query on router planner with where false but evaluation left to worker SELECT author_id FROM articles_single_shard_hash WHERE substring('articles_single_shard_hash'::regclass::text, 1, 4) = 'hello' ORDER BY author_id LIMIT 1; SELECT author_id FROM articles_hash WHERE author_id = 1 AND substring('articles_hash'::regclass::text, 1, 5) = 'hello' ORDER BY author_id LIMIT 1; -- create a dummy function to be used in filtering CREATE OR REPLACE FUNCTION someDummyFunction(regclass) RETURNS text AS $$ BEGIN RETURN md5($1::text); END; $$ LANGUAGE 'plpgsql' IMMUTABLE; -- not router plannable, returns all rows SELECT * FROM articles_hash WHERE someDummyFunction('articles_hash') = md5('articles_hash') ORDER BY author_id, id LIMIT 5; -- router plannable, errors SELECT * FROM articles_hash WHERE someDummyFunction('articles_hash') = md5('articles_hash') AND author_id = 1 ORDER BY author_id, id LIMIT 5; -- temporarily turn off debug messages before dropping the function SET client_min_messages TO 'NOTICE'; DROP FUNCTION someDummyFunction(regclass); SET client_min_messages TO 'DEBUG2'; -- complex query hitting a single shard SELECT count(DISTINCT CASE WHEN word_count > 100 THEN id ELSE NULL END) as c FROM articles_hash WHERE author_id = 5; -- same query is not router plannable if hits multiple shards SELECT count(DISTINCT CASE WHEN word_count > 100 THEN id ELSE NULL END) as c FROM articles_hash GROUP BY author_id; -- queries inside transactions can be router plannable BEGIN; SELECT * FROM articles_hash WHERE author_id = 1 ORDER BY id; END; -- queries inside read-only transactions can be router plannable BEGIN; SET TRANSACTION READ ONLY; SELECT * FROM articles_hash WHERE author_id = 1 ORDER BY id; END; -- cursor queries are router plannable BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM articles_hash WHERE author_id = 1 ORDER BY id; FETCH test_cursor; FETCH ALL test_cursor; FETCH test_cursor; -- fetch one row after the last FETCH BACKWARD test_cursor; END; -- queries inside copy can be router plannable COPY ( SELECT * FROM articles_hash WHERE author_id = 1 ORDER BY id) TO STDOUT; -- table creation queries inside can be router plannable CREATE TEMP TABLE temp_articles_hash as SELECT * FROM articles_hash WHERE author_id = 1 ORDER BY id; -- router plannable queries may include filter for aggragates SELECT count(*), count(*) FILTER (WHERE id < 3) FROM articles_hash WHERE author_id = 1; -- non-router plannable queries also support filters SELECT count(*), count(*) FILTER (WHERE id < 3) FROM articles_hash WHERE author_id = 1 or author_id = 2; -- prepare queries can be router plannable PREPARE author_1_articles as SELECT * FROM articles_hash WHERE author_id = 1; EXECUTE author_1_articles; -- parametric prepare queries can be router plannable PREPARE author_articles(int) as SELECT * FROM articles_hash WHERE author_id = $1; EXECUTE author_articles(1); -- queries inside plpgsql functions could be router plannable CREATE OR REPLACE FUNCTION author_articles_max_id() RETURNS int AS $$ DECLARE max_id integer; BEGIN SELECT MAX(id) FROM articles_hash ah WHERE author_id = 1 into max_id; return max_id; END; $$ LANGUAGE plpgsql; SELECT author_articles_max_id(); -- check that function returning setof query are router plannable CREATE OR REPLACE FUNCTION author_articles_id_word_count() RETURNS TABLE(id bigint, word_count int) AS $$ DECLARE BEGIN RETURN QUERY SELECT ah.id, ah.word_count FROM articles_hash ah WHERE author_id = 1; END; $$ LANGUAGE plpgsql; SELECT * FROM author_articles_id_word_count(); -- materialized views can be created for router plannable queries CREATE MATERIALIZED VIEW mv_articles_hash_empty AS SELECT * FROM articles_hash WHERE author_id = 1; SELECT * FROM mv_articles_hash_empty; CREATE MATERIALIZED VIEW mv_articles_hash_data AS SELECT * FROM articles_hash WHERE author_id in (1,2); SELECT * FROM mv_articles_hash_data; -- router planner/executor is now enabled for task-tracker executor SET citus.task_executor_type to 'task-tracker'; SELECT id FROM articles_hash WHERE author_id = 1; -- insert query is router plannable even under task-tracker INSERT INTO articles_hash VALUES (51, 1, 'amateus', 1814); -- verify insert is successfull (not router plannable and executable) SELECT id FROM articles_hash WHERE author_id = 1; SET client_min_messages to 'NOTICE'; -- test that a connection failure marks placements invalid SET citus.shard_replication_factor TO 2; CREATE TABLE failure_test (a int, b int); SELECT master_create_distributed_table('failure_test', 'a', 'hash'); SELECT master_create_worker_shards('failure_test', 2); CREATE USER router_user; GRANT INSERT ON ALL TABLES IN SCHEMA public TO router_user; \c - - - :worker_1_port CREATE USER router_user; GRANT INSERT ON ALL TABLES IN SCHEMA public TO router_user; \c - router_user - :master_port -- first test that it is marked invalid inside a transaction block -- we will fail to connect to worker 2, since the user does not exist BEGIN; INSERT INTO failure_test VALUES (1, 1); SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'failure_test'::regclass ) ORDER BY placementid; ROLLBACK; INSERT INTO failure_test VALUES (2, 1); SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'failure_test'::regclass ) ORDER BY placementid; \c - postgres - :worker_1_port DROP OWNED BY router_user; DROP USER router_user; \c - - - :master_port DROP OWNED BY router_user; DROP USER router_user; DROP TABLE failure_test; DROP FUNCTION author_articles_max_id(); DROP FUNCTION author_articles_id_word_count(); DROP MATERIALIZED VIEW mv_articles_hash_empty; DROP MATERIALIZED VIEW mv_articles_hash_data; DROP TABLE articles_hash; DROP TABLE articles_single_shard_hash; DROP TABLE authors_hash; DROP TABLE authors_range; DROP TABLE authors_reference; DROP TABLE company_employees; DROP TABLE articles_range; DROP TABLE articles_append; citus-7.0.3/src/test/regress/sql/multi_schema_support.sql000066400000000000000000000665221317107136600236760ustar00rootroot00000000000000-- -- MULTI_SCHEMA_SUPPORT -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1190000; -- create schema to test schema support CREATE SCHEMA test_schema_support; -- test master_append_table_to_shard with schema -- create local table to append CREATE TABLE public.nation_local( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152) ); \copy public.nation_local FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5|ETHIOPIA|0|ven packages wake quickly. regu \. CREATE TABLE test_schema_support.nation_append( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152) ); SELECT master_create_distributed_table('test_schema_support.nation_append', 'n_nationkey', 'append'); SELECT master_create_empty_shard('test_schema_support.nation_append'); -- append table to shard SELECT master_append_table_to_shard(1190000, 'public.nation_local', 'localhost', :master_port); -- verify table actually appended to shard SELECT COUNT(*) FROM test_schema_support.nation_append; -- test with shard name contains special characters CREATE TABLE test_schema_support."nation._'append" ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); SELECT master_create_distributed_table('test_schema_support."nation._''append"', 'n_nationkey', 'append'); SELECT master_create_empty_shard('test_schema_support."nation._''append"'); SELECT master_append_table_to_shard(1190001, 'nation_local', 'localhost', :master_port); -- verify table actually appended to shard SELECT COUNT(*) FROM test_schema_support."nation._'append"; -- test master_append_table_to_shard with schema with search_path is set SET search_path TO test_schema_support; SELECT master_append_table_to_shard(1190000, 'public.nation_local', 'localhost', :master_port); -- verify table actually appended to shard SELECT COUNT(*) FROM nation_append; -- test with search_path is set and shard name contains special characters SELECT master_append_table_to_shard(1190001, 'nation_local', 'localhost', :master_port); -- verify table actually appended to shard SELECT COUNT(*) FROM "nation._'append"; -- test shard creation on append(by data loading) and hash distributed(with UDF) tables -- when search_path is set SET search_path TO test_schema_support; -- create shard with COPY on append distributed table CREATE TABLE nation_append_search_path( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152) ); SELECT master_create_distributed_table('nation_append_search_path', 'n_nationkey', 'append'); \copy nation_append_search_path FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5|ETHIOPIA|0|ven packages wake quickly. regu \. -- create shard with master_create_worker_shards CREATE TABLE test_schema_support.nation_hash( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152) ); SELECT master_create_distributed_table('test_schema_support.nation_hash', 'n_nationkey', 'hash'); SELECT master_create_worker_shards('test_schema_support.nation_hash', 4, 2); -- test cursors SET search_path TO public; BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM test_schema_support.nation_append WHERE n_nationkey = 1; FETCH test_cursor; FETCH test_cursor; FETCH BACKWARD test_cursor; END; -- test with search_path is set SET search_path TO test_schema_support; BEGIN; DECLARE test_cursor CURSOR FOR SELECT * FROM nation_append WHERE n_nationkey = 1; FETCH test_cursor; FETCH test_cursor; FETCH BACKWARD test_cursor; END; -- test inserting to table in different schema SET search_path TO public; INSERT INTO test_schema_support.nation_hash(n_nationkey, n_name, n_regionkey) VALUES (6, 'FRANCE', 3); -- verify insertion SELECT * FROM test_schema_support.nation_hash WHERE n_nationkey = 6; -- test with search_path is set SET search_path TO test_schema_support; INSERT INTO nation_hash(n_nationkey, n_name, n_regionkey) VALUES (7, 'GERMANY', 3); -- verify insertion SELECT * FROM nation_hash WHERE n_nationkey = 7; -- test UDFs with schemas SET search_path TO public; \copy test_schema_support.nation_hash FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5|ETHIOPIA|0|ven packages wake quickly. regu \. -- create UDF in master node CREATE OR REPLACE FUNCTION dummyFunction(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; -- create UDF in worker node 1 \c - - - :worker_1_port CREATE OR REPLACE FUNCTION dummyFunction(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; -- create UDF in worker node 2 \c - - - :worker_2_port CREATE OR REPLACE FUNCTION dummyFunction(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; \c - - - :master_port -- UDF in public, table in a schema other than public, search_path is not set SELECT dummyFunction(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; -- UDF in public, table in a schema other than public, search_path is set SET search_path TO test_schema_support; SELECT public.dummyFunction(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; -- create UDF in master node in schema SET search_path TO test_schema_support; CREATE OR REPLACE FUNCTION dummyFunction2(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; -- create UDF in worker node 1 in schema \c - - - :worker_1_port SET search_path TO test_schema_support; CREATE OR REPLACE FUNCTION dummyFunction2(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; -- create UDF in worker node 2 in schema \c - - - :worker_2_port SET search_path TO test_schema_support; CREATE OR REPLACE FUNCTION dummyFunction2(theValue integer) RETURNS text AS $$ DECLARE strresult text; BEGIN RETURN theValue * 3 / 2 + 1; END; $$ LANGUAGE 'plpgsql' IMMUTABLE; \c - - - :master_port -- UDF in schema, table in a schema other than public, search_path is not set SET search_path TO public; SELECT test_schema_support.dummyFunction2(n_nationkey) FROM test_schema_support.nation_hash GROUP BY 1 ORDER BY 1; -- UDF in schema, table in a schema other than public, search_path is set SET search_path TO test_schema_support; SELECT dummyFunction2(n_nationkey) FROM nation_hash GROUP BY 1 ORDER BY 1; -- test operators with schema SET search_path TO public; -- create operator in master CREATE OPERATOR test_schema_support.=== ( LEFTARG = int, RIGHTARG = int, PROCEDURE = int4eq, COMMUTATOR = ===, NEGATOR = !==, HASHES, MERGES ); -- create operator in worker node 1 \c - - - :worker_1_port CREATE OPERATOR test_schema_support.=== ( LEFTARG = int, RIGHTARG = int, PROCEDURE = int4eq, COMMUTATOR = ===, NEGATOR = !==, HASHES, MERGES ); -- create operator in worker node 2 \c - - - :worker_2_port CREATE OPERATOR test_schema_support.=== ( LEFTARG = int, RIGHTARG = int, PROCEDURE = int4eq, COMMUTATOR = ===, NEGATOR = !==, HASHES, MERGES ); \c - - - :master_port -- test with search_path is not set SELECT * FROM test_schema_support.nation_hash WHERE n_nationkey OPERATOR(test_schema_support.===) 1; -- test with search_path is set SET search_path TO test_schema_support; SELECT * FROM nation_hash WHERE n_nationkey OPERATOR(===) 1; -- test with master_modify_multiple_shards SET search_path TO public; SELECT master_modify_multiple_shards('UPDATE test_schema_support.nation_hash SET n_regionkey = n_regionkey + 1'); --verify master_modify_multiple_shards SELECT * FROM test_schema_support.nation_hash; --test with search_path is set SET search_path TO test_schema_support; SELECT master_modify_multiple_shards('UPDATE nation_hash SET n_regionkey = n_regionkey + 1'); --verify master_modify_multiple_shards SELECT * FROM nation_hash; --test COLLATION with schema SET search_path TO public; CREATE COLLATION test_schema_support.english FROM "en_US"; -- create COLLATION in worker node 1 in schema \c - - - :worker_1_port CREATE COLLATION test_schema_support.english FROM "en_US"; -- create COLLATION in worker node 2 in schema \c - - - :worker_2_port CREATE COLLATION test_schema_support.english FROM "en_US"; \c - - - :master_port CREATE TABLE test_schema_support.nation_hash_collation( n_nationkey integer not null, n_name char(25) not null COLLATE test_schema_support.english, n_regionkey integer not null, n_comment varchar(152) ); SELECT master_create_distributed_table('test_schema_support.nation_hash_collation', 'n_nationkey', 'hash'); SELECT master_create_worker_shards('test_schema_support.nation_hash_collation', 4, 2); \copy test_schema_support.nation_hash_collation FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5|ETHIOPIA|0|ven packages wake quickly. regu \. SELECT * FROM test_schema_support.nation_hash_collation; SELECT n_comment FROM test_schema_support.nation_hash_collation ORDER BY n_comment COLLATE test_schema_support.english; --test with search_path is set SET search_path TO test_schema_support; CREATE TABLE nation_hash_collation_search_path( n_nationkey integer not null, n_name char(25) not null COLLATE english, n_regionkey integer not null, n_comment varchar(152) ); SELECT master_create_distributed_table('nation_hash_collation_search_path', 'n_nationkey', 'hash'); SELECT master_create_worker_shards('nation_hash_collation_search_path', 4, 2); \copy nation_hash_collation_search_path FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5|ETHIOPIA|0|ven packages wake quickly. regu \. SELECT * FROM nation_hash_collation_search_path; SELECT n_comment FROM nation_hash_collation_search_path ORDER BY n_comment COLLATE english; --test composite types with schema SET search_path TO public; CREATE TYPE test_schema_support.new_composite_type as (key1 text, key2 text); -- create type in worker node 1 in schema \c - - - :worker_1_port CREATE TYPE test_schema_support.new_composite_type as (key1 text, key2 text); -- create type in worker node 2 in schema \c - - - :worker_2_port CREATE TYPE test_schema_support.new_composite_type as (key1 text, key2 text); \c - - - :master_port CREATE TABLE test_schema_support.nation_hash_composite_types( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152), test_col test_schema_support.new_composite_type ); SELECT master_create_distributed_table('test_schema_support.nation_hash_composite_types', 'n_nationkey', 'hash'); SELECT master_create_worker_shards('test_schema_support.nation_hash_composite_types', 4, 2); -- insert some data to verify composite type queries \copy test_schema_support.nation_hash_composite_types FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai|(a,a) 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon|(a,b) 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special |(a,c) 3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold|(a,d) 4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d|(a,e) 5|ETHIOPIA|0|ven packages wake quickly. regu|(a,f) \. SELECT * FROM test_schema_support.nation_hash_composite_types WHERE test_col = '(a,a)'::test_schema_support.new_composite_type; --test with search_path is set SET search_path TO test_schema_support; SELECT * FROM nation_hash_composite_types WHERE test_col = '(a,a)'::new_composite_type; -- test ALTER TABLE ADD/DROP queries with schemas SET search_path TO public; ALTER TABLE test_schema_support.nation_hash ADD COLUMN new_col INT; -- verify column is added SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash'::regclass; \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; \c - - - :master_port ALTER TABLE test_schema_support.nation_hash DROP COLUMN IF EXISTS non_existent_column; ALTER TABLE test_schema_support.nation_hash DROP COLUMN IF EXISTS new_col; -- verify column is dropped SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash'::regclass; \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; \c - - - :master_port --test with search_path is set SET search_path TO test_schema_support; ALTER TABLE nation_hash ADD COLUMN new_col INT; -- verify column is added SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash'::regclass; \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; \c - - - :master_port SET search_path TO test_schema_support; ALTER TABLE nation_hash DROP COLUMN IF EXISTS non_existent_column; ALTER TABLE nation_hash DROP COLUMN IF EXISTS new_col; -- verify column is dropped SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash'::regclass; \c - - - :worker_1_port SELECT "Column", "Type", "Modifiers" FROM public.table_desc WHERE relid='test_schema_support.nation_hash_1190003'::regclass; \c - - - :master_port -- test CREATE/DROP INDEX with schemas SET search_path TO public; -- CREATE index CREATE INDEX index1 ON test_schema_support.nation_hash(n_name); --verify INDEX is created \d test_schema_support.index1 \c - - - :worker_1_port \d test_schema_support.index1_1190003 \c - - - :master_port -- DROP index DROP INDEX test_schema_support.index1; --verify INDEX is dropped \d test_schema_support.index1 \c - - - :worker_1_port \d test_schema_support.index1_1190003 \c - - - :master_port --test with search_path is set SET search_path TO test_schema_support; -- CREATE index CREATE INDEX index1 ON nation_hash(n_name); --verify INDEX is created \d test_schema_support.index1 \c - - - :worker_1_port \d test_schema_support.index1_1190003 \c - - - :master_port -- DROP index SET search_path TO test_schema_support; DROP INDEX index1; --verify INDEX is dropped \d test_schema_support.index1 \c - - - :worker_1_port \d test_schema_support.index1_1190003 \c - - - :master_port -- test master_copy_shard_placement with schemas SET search_path TO public; -- mark shard as inactive UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1190000 and nodeport = :worker_1_port; SELECT master_copy_shard_placement(1190000, 'localhost', :worker_2_port, 'localhost', :worker_1_port); -- verify shardstate SELECT shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid = 1190000 ORDER BY nodeport; --test with search_path is set SET search_path TO test_schema_support; -- mark shard as inactive UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1190000 and nodeport = :worker_1_port; SELECT master_copy_shard_placement(1190000, 'localhost', :worker_2_port, 'localhost', :worker_1_port); -- verify shardstate SELECT shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid = 1190000 ORDER BY nodeport; -- test master_apply_delete_command with schemas SET search_path TO public; SELECT master_apply_delete_command('DELETE FROM test_schema_support.nation_append') ; -- verify shard is dropped \c - - - :worker_1_port \d test_schema_support.nation_append_119* \c - - - :master_port -- test with search_path is set SET search_path TO test_schema_support; \copy nation_append FROM STDIN with delimiter '|'; 0|ALGERIA|0| haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5|ETHIOPIA|0|ven packages wake quickly. regu \. SELECT master_apply_delete_command('DELETE FROM nation_append') ; -- verify shard is dropped \c - - - :worker_1_port \d test_schema_support.nation_append_119* \c - - - :master_port -- check joins of tables which are in schemas other than public -- we create new tables with replication factor of 1 -- so that we guarantee to have repartitions when necessary -- create necessary objects and load data to them CREATE SCHEMA test_schema_support_join_1; CREATE SCHEMA test_schema_support_join_2; CREATE TABLE test_schema_support_join_1.nation_hash ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); CREATE TABLE test_schema_support_join_1.nation_hash_2 ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); CREATE TABLE test_schema_support_join_2.nation_hash ( n_nationkey integer not null, n_name char(25) not null, n_regionkey integer not null, n_comment varchar(152)); SELECT master_create_distributed_table('test_schema_support_join_1.nation_hash', 'n_nationkey', 'hash'); SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash', 4, 1); \copy test_schema_support_join_1.nation_hash FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5|ETHIOPIA|0|ven packages wake quickly. regu \. SELECT master_create_distributed_table('test_schema_support_join_1.nation_hash_2', 'n_nationkey', 'hash'); SELECT master_create_worker_shards('test_schema_support_join_1.nation_hash_2', 4, 1); \copy test_schema_support_join_1.nation_hash_2 FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5|ETHIOPIA|0|ven packages wake quickly. regu \. SELECT master_create_distributed_table('test_schema_support_join_2.nation_hash', 'n_nationkey', 'hash'); SELECT master_create_worker_shards('test_schema_support_join_2.nation_hash', 4, 1); \copy test_schema_support_join_2.nation_hash FROM STDIN with delimiter '|'; 0|ALGERIA|0|haggle. carefully final deposits detect slyly agai 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 5|ETHIOPIA|0|ven packages wake quickly. regu \. -- check when search_path is public, -- join of two tables which are in different schemas, -- join on partition column SET search_path TO public; SELECT count (*) FROM test_schema_support_join_1.nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_nationkey; -- check when search_path is different than public, -- join of two tables which are in different schemas, -- join on partition column SET search_path TO test_schema_support_join_1; SELECT count (*) FROM nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_nationkey; -- check when search_path is public, -- join of two tables which are in same schemas, -- join on partition column SET search_path TO public; SELECT count (*) FROM test_schema_support_join_1.nation_hash n1, test_schema_support_join_1.nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_nationkey; -- check when search_path is different than public, -- join of two tables which are in same schemas, -- join on partition column SET search_path TO test_schema_support_join_1; SELECT count (*) FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_nationkey; -- single repartition joins SET citus.task_executor_type TO "task-tracker"; -- check when search_path is public, -- join of two tables which are in different schemas, -- join on partition column and non-partition column SET search_path TO public; SELECT count (*) FROM test_schema_support_join_1.nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_regionkey; -- check when search_path is different than public, -- join of two tables which are in different schemas, -- join on partition column and non-partition column SET search_path TO test_schema_support_join_1; SELECT count (*) FROM nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_nationkey = n2.n_regionkey; -- check when search_path is different than public, -- join of two tables which are in same schemas, -- join on partition column and non-partition column SET search_path TO test_schema_support_join_1; SELECT count (*) FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_nationkey = n2.n_regionkey; -- hash repartition joins -- check when search_path is public, -- join of two tables which are in different schemas, -- join on non-partition column SET search_path TO public; SELECT count (*) FROM test_schema_support_join_1.nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_regionkey = n2.n_regionkey; -- check when search_path is different than public, -- join of two tables which are in different schemas, -- join on non-partition column SET search_path TO test_schema_support_join_1; SELECT count (*) FROM nation_hash n1, test_schema_support_join_2.nation_hash n2 WHERE n1.n_regionkey = n2.n_regionkey; -- check when search_path is different than public, -- join of two tables which are in same schemas, -- join on non-partition column SET search_path TO test_schema_support_join_1; SELECT count (*) FROM nation_hash n1, nation_hash_2 n2 WHERE n1.n_regionkey = n2.n_regionkey; -- set task_executor back to real-time SET citus.task_executor_type TO "real-time"; -- test ALTER TABLE SET SCHEMA -- we expect that it will warn out SET search_path TO public; ALTER TABLE test_schema_support.nation_hash SET SCHEMA public; -- we will use this function in next test CREATE FUNCTION run_command_on_coordinator_and_workers(p_sql text) RETURNS void LANGUAGE plpgsql AS $$ BEGIN EXECUTE p_sql; PERFORM run_command_on_workers(p_sql); END;$$; -- test schema propagation with user other than current user SELECT run_command_on_coordinator_and_workers('CREATE USER "test-user"'); SELECT run_command_on_coordinator_and_workers('GRANT ALL ON DATABASE postgres to "test-user"'); CREATE SCHEMA schema_with_user AUTHORIZATION "test-user"; CREATE TABLE schema_with_user.test_table(column1 int); SELECT create_reference_table('schema_with_user.test_table'); -- verify that owner of the created schema is test-user \c - - - :worker_1_port \dn schema_with_user \c - - - :master_port -- we do not use run_command_on_coordinator_and_workers here because when there is CASCADE, it causes deadlock DROP OWNED BY "test-user" CASCADE; SELECT run_command_on_workers('DROP OWNED BY "test-user" CASCADE'); SELECT run_command_on_coordinator_and_workers('DROP USER "test-user"'); DROP FUNCTION run_command_on_coordinator_and_workers(p_sql text); -- test run_command_on_* UDFs with schema CREATE SCHEMA run_test_schema; CREATE TABLE run_test_schema.test_table(id int); SELECT create_distributed_table('run_test_schema.test_table','id'); -- randomly insert data to evaluate below UDFs better INSERT INTO run_test_schema.test_table VALUES(1); INSERT INTO run_test_schema.test_table VALUES(7); INSERT INTO run_test_schema.test_table VALUES(9); -- try UDFs which call shard_name as a subroutine SELECT sum(result::int) FROM run_command_on_placements('run_test_schema.test_table','SELECT pg_table_size(''%s'')'); SELECT sum(result::int) FROM run_command_on_shards('run_test_schema.test_table','SELECT pg_table_size(''%s'')'); -- Clean up the created schema DROP SCHEMA run_test_schema CASCADE; citus-7.0.3/src/test/regress/sql/multi_shard_modify.sql000066400000000000000000000171161317107136600233050ustar00rootroot00000000000000-- -- MULTI_SHARD_MODIFY -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 350000; -- Create a new hash partitioned multi_shard_modify_test table and load data into it. CREATE TABLE multi_shard_modify_test ( t_key integer not null, t_name varchar(25) not null, t_value integer not null); SELECT master_create_distributed_table('multi_shard_modify_test', 't_key', 'hash'); SELECT master_create_worker_shards('multi_shard_modify_test', 4, 2); COPY multi_shard_modify_test (t_key, t_name, t_value) FROM STDIN WITH (FORMAT 'csv'); 1,san francisco,99 2,istanbul,34 3,paris,46 4,london,91 5,toronto,98 6,london,44 7,stockholm,21 8,tallinn,33 9,helsinki,21 10,ankara,6 11,karabuk,78 12,kastamonu,37 13,samsun,55 14,rome,13 15,madrid,1 16,barcelona,8 17,poznan,12 31,kabul,4 32,dhaka,62 33,iamey,121 34,muscat,77 41,uppsala,-1 42,malmo,-2 101,tokyo,106 102,new delhi,978 201,taipei,556 202,beijing,754 \. -- Testing master_modify_multiple_shards -- Verify that master_modify_multiple_shards can be rolled back BEGIN; SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key > 10 AND t_key <= 13'); SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = 202'); ROLLBACK; SELECT count(*) FROM multi_shard_modify_test; -- Check that master_modify_multiple_shards cannot be called with non-distributed tables CREATE TEMPORARY TABLE temporary_nondistributed_table (col_1 integer,col_2 text); INSERT INTO temporary_nondistributed_table VALUES (37, 'eren'), (31, 'onder'); SELECT master_modify_multiple_shards('DELETE FROM temporary_nondistributed_table WHERE col_1 = 37'); -- commands with volatile functions in their quals SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = (random() * 1000)'); SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_value = (random() * 1000)'); -- commands with immutable functions in their quals SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = abs(-3)'); -- DELETE with expression in WHERE clause SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = (3*18-40)'); -- commands with a USING a non distributed table error out CREATE TABLE temp_nations(name text, key integer); SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test USING temp_nations WHERE multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''foobar'' '); -- commands with a USING clause are unsupported SELECT master_create_distributed_table('temp_nations', 'name', 'hash'); SELECT master_create_worker_shards('temp_nations', 4, 2); SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test USING temp_nations WHERE multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''foobar'' '); -- commands with a RETURNING clause are unsupported SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = 3 RETURNING *'); -- commands containing a CTE are unsupported SELECT master_modify_multiple_shards('WITH deleted_stuff AS (INSERT INTO multi_shard_modify_test DEFAULT VALUES RETURNING *) DELETE FROM multi_shard_modify_test'); -- Check that we can successfully delete from multiple shards with 1PC SET citus.multi_shard_commit_protocol TO '1pc'; SELECT count(*) FROM multi_shard_modify_test; SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key > 200'); SELECT count(*) FROM multi_shard_modify_test; -- Check that we can successfully delete from multiple shards with 2PC SET citus.multi_shard_commit_protocol TO '2pc'; SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key > 100'); SELECT count(*) FROM multi_shard_modify_test; -- Check that shard pruning works SET client_min_messages TO DEBUG2; SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = 15'); SET client_min_messages TO NOTICE; -- Check that master_modify_multiple_shards works without partition keys SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_name LIKE ''barce%'' '); -- Simple, Single Shard Update SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''warsaw'' WHERE t_key=17'); SELECT t_name FROM multi_shard_modify_test WHERE t_key=17; -- Simple, Multi Shard Update SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''???'' WHERE t_key>30 AND t_key<35'); SELECT t_name FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; -- expression UPDATE SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value=8*37 WHERE t_key>30 AND t_key<35'); SELECT t_value FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; -- multi-column UPDATE SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''somename'', t_value=333 WHERE t_key>30 AND t_key<35'); SELECT t_name, t_value FROM multi_shard_modify_test WHERE t_key>30 AND t_key<35; -- commands with no constraints on the partition key are supported SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''nice city'' WHERE t_value < 0'); SELECT t_name FROM multi_shard_modify_test WHERE t_value < 0; -- attempting to change the partition key is unsupported SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_key=3000 WHERE t_key < 10 '); -- UPDATEs with a FROM clause are unsupported SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name = ''FAIL'' FROM temp_nations WHERE multi_shard_modify_test.t_key = 3 AND multi_shard_modify_test.t_value = temp_nations.key AND temp_nations.name = ''dummy'' '); -- commands with a RETURNING clause are unsupported SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name=''FAIL'' WHERE t_key=4 RETURNING *'); -- commands containing a CTE are unsupported SELECT master_modify_multiple_shards('WITH t AS (INSERT INTO multi_shard_modify_test DEFAULT VALUES RETURNING *) UPDATE multi_shard_modify_test SET t_name = ''FAIL'' '); -- updates referencing just a var are supported SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value=t_key WHERE t_key = 10'); SELECT t_value FROM multi_shard_modify_test WHERE t_key=10; -- updates referencing a column are supported SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = t_value + 37 WHERE t_key = 10'); SELECT t_value FROM multi_shard_modify_test WHERE t_key=10; CREATE FUNCTION temp_stable_func() RETURNS integer AS 'SELECT 10;' LANGUAGE SQL STABLE; -- updates referencing non-IMMUTABLE functions are unsupported SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_name = ''FAIL!'' WHERE t_key = temp_stable_func()'); -- updates referencing IMMUTABLE functions in SET section are supported SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = abs(-78) WHERE t_key = 10'); SELECT t_value FROM multi_shard_modify_test WHERE t_key=10; -- updates referencing STABLE functions in SET section are supported SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = temp_stable_func() * 2 WHERE t_key = 10'); -- updates referencing VOLATILE functions in SET section are not supported SELECT master_modify_multiple_shards('UPDATE multi_shard_modify_test SET t_value = random() WHERE t_key = 10'); -- commands with stable functions in their quals are allowed SELECT master_modify_multiple_shards('DELETE FROM multi_shard_modify_test WHERE t_key = temp_stable_func()'); ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 102046; citus-7.0.3/src/test/regress/sql/multi_simple_queries.sql000066400000000000000000000242351317107136600236630ustar00rootroot00000000000000 ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 850000; -- =================================================================== -- test end-to-end query functionality -- =================================================================== CREATE TABLE articles ( id bigint NOT NULL, author_id bigint NOT NULL, title varchar(20) NOT NULL, word_count integer NOT NULL CHECK (word_count > 0) ); -- this table is used in a CTE test CREATE TABLE authors ( name text, id bigint ); -- this table is used in router executor tests CREATE TABLE articles_single_shard (LIKE articles); SELECT master_create_distributed_table('articles', 'author_id', 'hash'); SELECT master_create_distributed_table('articles_single_shard', 'author_id', 'hash'); SELECT master_create_worker_shards('articles', 2, 1); SELECT master_create_worker_shards('articles_single_shard', 1, 1); -- create a bunch of test data INSERT INTO articles VALUES ( 1, 1, 'arsenous', 9572); INSERT INTO articles VALUES ( 2, 2, 'abducing', 13642); INSERT INTO articles VALUES ( 3, 3, 'asternal', 10480); INSERT INTO articles VALUES ( 4, 4, 'altdorfer', 14551); INSERT INTO articles VALUES ( 5, 5, 'aruru', 11389); INSERT INTO articles VALUES ( 6, 6, 'atlases', 15459); INSERT INTO articles VALUES ( 7, 7, 'aseptic', 12298); INSERT INTO articles VALUES ( 8, 8, 'agatized', 16368); INSERT INTO articles VALUES ( 9, 9, 'alligate', 438); INSERT INTO articles VALUES (10, 10, 'aggrandize', 17277); INSERT INTO articles VALUES (11, 1, 'alamo', 1347); INSERT INTO articles VALUES (12, 2, 'archiblast', 18185); INSERT INTO articles VALUES (13, 3, 'aseyev', 2255); INSERT INTO articles VALUES (14, 4, 'andesite', 19094); INSERT INTO articles VALUES (15, 5, 'adversa', 3164); INSERT INTO articles VALUES (16, 6, 'allonym', 2); INSERT INTO articles VALUES (17, 7, 'auriga', 4073); INSERT INTO articles VALUES (18, 8, 'assembly', 911); INSERT INTO articles VALUES (19, 9, 'aubergiste', 4981); INSERT INTO articles VALUES (20, 10, 'absentness', 1820); INSERT INTO articles VALUES (21, 1, 'arcading', 5890); INSERT INTO articles VALUES (22, 2, 'antipope', 2728); INSERT INTO articles VALUES (23, 3, 'abhorring', 6799); INSERT INTO articles VALUES (24, 4, 'audacious', 3637); INSERT INTO articles VALUES (25, 5, 'antehall', 7707); INSERT INTO articles VALUES (26, 6, 'abington', 4545); INSERT INTO articles VALUES (27, 7, 'arsenous', 8616); INSERT INTO articles VALUES (28, 8, 'aerophyte', 5454); INSERT INTO articles VALUES (29, 9, 'amateur', 9524); INSERT INTO articles VALUES (30, 10, 'andelee', 6363); INSERT INTO articles VALUES (31, 1, 'athwartships', 7271); INSERT INTO articles VALUES (32, 2, 'amazon', 11342); INSERT INTO articles VALUES (33, 3, 'autochrome', 8180); INSERT INTO articles VALUES (34, 4, 'amnestied', 12250); INSERT INTO articles VALUES (35, 5, 'aminate', 9089); INSERT INTO articles VALUES (36, 6, 'ablation', 13159); INSERT INTO articles VALUES (37, 7, 'archduchies', 9997); INSERT INTO articles VALUES (38, 8, 'anatine', 14067); INSERT INTO articles VALUES (39, 9, 'anchises', 10906); INSERT INTO articles VALUES (40, 10, 'attemper', 14976); INSERT INTO articles VALUES (41, 1, 'aznavour', 11814); INSERT INTO articles VALUES (42, 2, 'ausable', 15885); INSERT INTO articles VALUES (43, 3, 'affixal', 12723); INSERT INTO articles VALUES (44, 4, 'anteport', 16793); INSERT INTO articles VALUES (45, 5, 'afrasia', 864); INSERT INTO articles VALUES (46, 6, 'atlanta', 17702); INSERT INTO articles VALUES (47, 7, 'abeyance', 1772); INSERT INTO articles VALUES (48, 8, 'alkylic', 18610); INSERT INTO articles VALUES (49, 9, 'anyone', 2681); INSERT INTO articles VALUES (50, 10, 'anjanette', 19519); -- insert a single row for the test INSERT INTO articles_single_shard VALUES (50, 10, 'anjanette', 19519); -- zero-shard modifications should succeed UPDATE articles SET title = '' WHERE author_id = 1 AND author_id = 2; UPDATE articles SET title = '' WHERE 0 = 1; DELETE FROM articles WHERE author_id = 1 AND author_id = 2; -- single-shard tests -- test simple select for a single row SELECT * FROM articles WHERE author_id = 10 AND id = 50; -- get all titles by a single author SELECT title FROM articles WHERE author_id = 10; -- try ordering them by word count SELECT title, word_count FROM articles WHERE author_id = 10 ORDER BY word_count DESC NULLS LAST; -- look at last two articles by an author SELECT title, id FROM articles WHERE author_id = 5 ORDER BY id LIMIT 2; -- find all articles by two authors in same shard SELECT title, author_id FROM articles WHERE author_id = 7 OR author_id = 8 ORDER BY author_id ASC, id; -- add in some grouping expressions SELECT author_id, sum(word_count) AS corpus_size FROM articles WHERE author_id = 1 OR author_id = 2 OR author_id = 8 OR author_id = 10 GROUP BY author_id HAVING sum(word_count) > 40000 ORDER BY sum(word_count) DESC; -- UNION/INTERSECT queries are unsupported if on multiple shards SELECT * FROM articles WHERE author_id = 10 UNION SELECT * FROM articles WHERE author_id = 2; -- queries using CTEs are unsupported WITH long_names AS ( SELECT id FROM authors WHERE char_length(name) > 15 ) SELECT title FROM articles; -- queries which involve functions in FROM clause are unsupported. SELECT * FROM articles, position('om' in 'Thomas'); -- subqueries are not supported in WHERE clause in Citus SELECT * FROM articles WHERE author_id IN (SELECT id FROM authors WHERE name LIKE '%a'); -- subqueries are supported in FROM clause SELECT articles.id,test.word_count FROM articles, (SELECT id, word_count FROM articles) AS test WHERE test.id = articles.id ORDER BY articles.id; -- subqueries are not supported in SELECT clause SELECT a.title AS name, (SELECT a2.id FROM articles_single_shard a2 WHERE a.id = a2.id LIMIT 1) AS special_price FROM articles a; -- joins are not supported between local and distributed tables SELECT title, authors.name FROM authors, articles WHERE authors.id = articles.author_id; -- inner joins are not supported (I think) SELECT * FROM (articles INNER JOIN authors ON articles.id = authors.id); -- test use of EXECUTE statements within plpgsql DO $sharded_execute$ BEGIN EXECUTE 'SELECT COUNT(*) FROM articles ' || 'WHERE author_id = $1 AND author_id = $2' USING 1, 2; END $sharded_execute$; -- test use of bare SQL within plpgsql DO $sharded_sql$ BEGIN SELECT COUNT(*) FROM articles WHERE author_id = 1 AND author_id = 2; END $sharded_sql$; -- test cross-shard queries SELECT COUNT(*) FROM articles; -- test with empty target list SELECT FROM articles; SELECT FROM articles WHERE author_id = 3737; SELECT FROM articles WHERE word_count = 65500; -- having queries supported in Citus SELECT author_id, sum(word_count) AS corpus_size FROM articles GROUP BY author_id HAVING sum(word_count) > 25000 ORDER BY sum(word_count) DESC LIMIT 5; SELECT author_id FROM articles GROUP BY author_id HAVING sum(word_count) > 50000 ORDER BY author_id; SELECT author_id FROM articles GROUP BY author_id HAVING sum(word_count) > 50000 AND author_id < 5 ORDER BY author_id; SELECT author_id FROM articles GROUP BY author_id HAVING sum(word_count) > 50000 OR author_id < 5 ORDER BY author_id; SELECT author_id FROM articles GROUP BY author_id HAVING author_id <= 2 OR author_id = 8 ORDER BY author_id; SELECT o_orderstatus, count(*), avg(o_totalprice) FROM orders GROUP BY o_orderstatus HAVING count(*) > 1450 OR avg(o_totalprice) > 150000 ORDER BY o_orderstatus; SELECT o_orderstatus, sum(l_linenumber), avg(l_linenumber) FROM lineitem, orders WHERE l_orderkey = o_orderkey AND l_orderkey > 9030 GROUP BY o_orderstatus HAVING sum(l_linenumber) > 1000 ORDER BY o_orderstatus; -- now, test the cases where Citus do or do not need to create -- the master queries SET citus.large_table_shard_count TO 2; SET client_min_messages TO 'DEBUG2'; SET citus.task_executor_type TO 'real-time'; -- start with the simple lookup query SELECT * FROM articles WHERE author_id = 1; -- below query hits a single shard, so no need to create the master query SELECT * FROM articles WHERE author_id = 1 OR author_id = 17; -- below query hits two shards, so needs to create the master query SELECT * FROM articles WHERE author_id = 1 OR author_id = 18; -- rename the output columns on a no master query case SELECT id as article_id, word_count * id as random_value FROM articles WHERE author_id = 1; -- we can push down co-located joins to a single worker without the -- master query being required for only the same tables SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles a, articles b WHERE a.author_id = 10 and a.author_id = b.author_id LIMIT 3; -- now show that JOINs with multiple tables are not router executable -- they are executed by real-time executor SELECT a.author_id as first_author, b.word_count as second_word_count FROM articles a, articles_single_shard b WHERE a.author_id = 10 and a.author_id = b.author_id LIMIT 3; -- do not create the master query for LIMIT on a single shard SELECT SELECT * FROM articles WHERE author_id = 1 LIMIT 2; -- This query hits a single shard. So GROUP BY can be -- pushed down to the workers directly. This query is -- equivalent to SELECT DISTINCT on a single shard. SELECT id FROM articles WHERE author_id = 1 GROUP BY id ORDER BY id; -- copying from a single shard table does not require the master query COPY articles_single_shard TO stdout; -- error out for queries with aggregates SELECT avg(word_count) FROM articles WHERE author_id = 2; -- max, min, sum, count is somehow implemented -- differently in distributed planning SELECT max(word_count) as max, min(word_count) as min, sum(word_count) as sum, count(word_count) as cnt FROM articles WHERE author_id = 2; -- error out for queries with repartition jobs SELECT * FROM articles a, articles b WHERE a.id = b.id AND a.author_id = 1; -- system columns from shard tables can be queried and retrieved SELECT count(*) FROM ( SELECT tableoid, ctid, cmin, cmax, xmin, xmax FROM articles WHERE tableoid IS NOT NULL OR ctid IS NOT NULL OR cmin IS NOT NULL OR cmax IS NOT NULL OR xmin IS NOT NULL OR xmax IS NOT NULL ) x; SET client_min_messages to 'NOTICE'; citus-7.0.3/src/test/regress/sql/multi_single_relation_subquery.sql000066400000000000000000000074741317107136600257600ustar00rootroot00000000000000-- -- MULTI_SINGLE_RELATION_SUBQUERY -- -- This test checks that we are able to run selected set of distributed SQL subqueries. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 860000; SET citus.task_executor_type TO 'task-tracker'; select number_sum, count(*) as total, avg(total_count) avg_count from (select l_suppkey, l_linestatus, sum(l_linenumber) as number_sum, count(*) as total_count from lineitem group by l_suppkey, l_linestatus) as distributed_table where number_sum >= 10 group by number_sum order by total desc, number_sum desc limit 10; -- same query above, just replace outer where clause with inner having clause select number_sum, count(*) as total, avg(total_count) avg_count from (select l_suppkey, l_linestatus, sum(l_linenumber) as number_sum, count(*) as total_count from lineitem group by l_suppkey, l_linestatus having sum(l_linenumber) >= 10) as distributed_table group by number_sum order by total desc, number_sum desc limit 10; select (l_suppkey / 100) as suppkey_bin, avg(total_count) avg_count from (select l_suppkey, sum(l_linenumber) as number_sum, count(*) as total_count from lineitem group by l_suppkey, l_linestatus) as distributed_table group by suppkey_bin order by avg_count desc limit 20; select total, avg(avg_count) as total_avg_count from (select number_sum, count(*) as total, avg(total_count) avg_count from (select l_suppkey, sum(l_linenumber) as number_sum, count(*) as total_count from lineitem where l_partkey > 100 and l_quantity > 2 and l_orderkey < 10000 group by l_suppkey) as distributed_table where number_sum >= 10 group by number_sum) as distributed_table_2 group by total order by total; -- Check that we support subquery even though group by clause is an expression -- and it is not referred in the target list. select avg(count) from (select l_suppkey, count(*) as count from lineitem group by (l_orderkey/4)::int, l_suppkey ) as distributed_table; -- Check that we don't support subqueries with limit. select l_suppkey, sum(suppkey_count) as total_suppkey_count from (select l_suppkey, count(*) as suppkey_count from lineitem group by l_suppkey order by l_suppkey limit 100) as distributed_table group by l_suppkey; -- Check that we don't support subqueries without aggregates. select rounded_tax from (select round(l_tax) as rounded_tax from lineitem group by l_tax) as distributed_table; -- Check that we support subqueries with count(distinct). select avg(different_shipment_days) from (select count(distinct l_shipdate) as different_shipment_days from lineitem group by l_partkey) as distributed_table; select avg(different_shipment_days) from (select count(distinct l_shipdate) as different_shipment_days from lineitem group by l_partkey having count(distinct l_shipdate) >= 2) as distributed_table; -- Check that if subquery is pulled, we don't error and run query properly. SELECT max(l_suppkey) FROM ( SELECT l_suppkey FROM ( SELECT l_suppkey, count(*) FROM lineitem WHERE l_orderkey < 20000 GROUP BY l_suppkey) z ) y; citus-7.0.3/src/test/regress/sql/multi_size_queries.sql000066400000000000000000000040301317107136600233330ustar00rootroot00000000000000-- -- MULTI_SIZE_QUERIES -- -- Test checks whether size of distributed tables can be obtained with citus_table_size. -- To find the relation size and total relation size citus_relation_size and -- citus_total_relation_size are also tested. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1390000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1390000; -- Tests with invalid relation IDs SELECT citus_table_size(1); SELECT citus_relation_size(1); SELECT citus_total_relation_size(1); -- Tests with non-distributed table CREATE TABLE non_distributed_table (x int); SELECT citus_table_size('non_distributed_table'); SELECT citus_relation_size('non_distributed_table'); SELECT citus_total_relation_size('non_distributed_table'); DROP TABLE non_distributed_table; -- Tests on distributed table with replication factor > 1 SELECT citus_table_size('lineitem_hash_part'); SELECT citus_relation_size('lineitem_hash_part'); SELECT citus_total_relation_size('lineitem_hash_part'); VACUUM (FULL) customer_copy_hash; -- Tests on distributed tables with streaming replication. SELECT citus_table_size('customer_copy_hash'); SELECT citus_relation_size('customer_copy_hash'); SELECT citus_total_relation_size('customer_copy_hash'); CREATE INDEX index_1 on customer_copy_hash(c_custkey); VACUUM (FULL) customer_copy_hash; -- Tests on distributed table with index. SELECT citus_table_size('customer_copy_hash'); SELECT citus_relation_size('customer_copy_hash'); SELECT citus_total_relation_size('customer_copy_hash'); -- Tests on reference table VACUUM (FULL) supplier; SELECT citus_table_size('supplier'); SELECT citus_relation_size('supplier'); SELECT citus_total_relation_size('supplier'); CREATE INDEX index_2 on supplier(s_suppkey); VACUUM (FULL) supplier; SELECT citus_table_size('supplier'); SELECT citus_relation_size('supplier'); SELECT citus_total_relation_size('supplier'); -- Test inside the transaction BEGIN; ALTER TABLE supplier ALTER COLUMN s_suppkey SET NOT NULL; select citus_table_size('supplier'); END; DROP INDEX index_1; DROP INDEX index_2; citus-7.0.3/src/test/regress/sql/multi_sql_function.sql000066400000000000000000000113521317107136600233350ustar00rootroot00000000000000-- -- MULTI_SQL_FUNCTION -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1230000; CREATE FUNCTION sql_test_no_1() RETURNS bigint AS ' SELECT count(*) FROM orders; ' LANGUAGE SQL; CREATE FUNCTION sql_test_no_2() RETURNS bigint AS ' SELECT count(*) FROM orders, lineitem WHERE o_orderkey = l_orderkey; ' LANGUAGE SQL; CREATE FUNCTION sql_test_no_3() RETURNS bigint AS ' SELECT count(*) FROM orders, customer WHERE o_custkey = c_custkey; ' LANGUAGE SQL; CREATE FUNCTION sql_test_no_4() RETURNS bigint AS ' SELECT count(*) FROM orders, customer, lineitem WHERE o_custkey = c_custkey AND o_orderkey = l_orderkey; ' LANGUAGE SQL; SET citus.task_executor_type TO 'task-tracker'; SET client_min_messages TO INFO; -- now, run plain SQL functions SELECT sql_test_no_1(); SELECT sql_test_no_2(); SELECT sql_test_no_3(); SELECT sql_test_no_4(); -- run the tests which do not require re-partition -- with real-time executor SET citus.task_executor_type TO 'real-time'; -- now, run plain SQL functions SELECT sql_test_no_1(); SELECT sql_test_no_2(); -- test router executor parameterized sql functions CREATE TABLE temp_table ( key int, value int ); SELECT master_create_distributed_table('temp_table','key','hash'); SELECT master_create_worker_shards('temp_table',4,1); CREATE FUNCTION no_parameter_insert_sql() RETURNS void AS $$ INSERT INTO temp_table (key) VALUES (0); $$ LANGUAGE SQL; -- execute 6 times SELECT no_parameter_insert_sql(); SELECT no_parameter_insert_sql(); SELECT no_parameter_insert_sql(); SELECT no_parameter_insert_sql(); SELECT no_parameter_insert_sql(); SELECT no_parameter_insert_sql(); CREATE FUNCTION non_partition_parameter_insert_sql(int) RETURNS void AS $$ INSERT INTO temp_table (key, value) VALUES (0, $1); $$ LANGUAGE SQL; -- execute 6 times SELECT non_partition_parameter_insert_sql(10); SELECT non_partition_parameter_insert_sql(20); SELECT non_partition_parameter_insert_sql(30); SELECT non_partition_parameter_insert_sql(40); SELECT non_partition_parameter_insert_sql(50); SELECT non_partition_parameter_insert_sql(60); -- check inserted values SELECT * FROM temp_table ORDER BY key, value; -- check updates CREATE FUNCTION non_partition_parameter_update_sql(int, int) RETURNS void AS $$ UPDATE temp_table SET value = $2 WHERE key = 0 AND value = $1; $$ LANGUAGE SQL; -- execute 6 times SELECT non_partition_parameter_update_sql(10, 12); SELECT non_partition_parameter_update_sql(20, 22); SELECT non_partition_parameter_update_sql(30, 32); SELECT non_partition_parameter_update_sql(40, 42); SELECT non_partition_parameter_update_sql(50, 52); SELECT non_partition_parameter_update_sql(60, 62); -- check after updates SELECT * FROM temp_table ORDER BY key, value; -- check deletes CREATE FUNCTION non_partition_parameter_delete_sql(int) RETURNS void AS $$ DELETE FROM temp_table WHERE key = 0 AND value = $1; $$ LANGUAGE SQL; -- execute 6 times to trigger prepared statement usage SELECT non_partition_parameter_delete_sql(12); SELECT non_partition_parameter_delete_sql(22); SELECT non_partition_parameter_delete_sql(32); SELECT non_partition_parameter_delete_sql(42); SELECT non_partition_parameter_delete_sql(52); SELECT non_partition_parameter_delete_sql(62); -- check after deletes SELECT * FROM temp_table ORDER BY key, value; -- test running parameterized SQL function CREATE TABLE test_parameterized_sql(id integer, org_id integer); select create_distributed_table('test_parameterized_sql','org_id'); CREATE OR REPLACE FUNCTION test_parameterized_sql_function(org_id_val integer) RETURNS TABLE (a bigint) AS $$ SELECT count(*) AS count_val from test_parameterized_sql where org_id = org_id_val; $$ LANGUAGE SQL STABLE; CREATE OR REPLACE FUNCTION test_parameterized_sql_function_in_subquery_where(org_id_val integer) RETURNS TABLE (a bigint) AS $$ SELECT count(*) AS count_val from test_parameterized_sql as t1 where org_id IN (SELECT org_id FROM test_parameterized_sql as t2 WHERE t2.org_id = t1.org_id AND org_id = org_id_val); $$ LANGUAGE SQL STABLE; INSERT INTO test_parameterized_sql VALUES(1, 1); -- all of them should fail SELECT * FROM test_parameterized_sql_function(1); SELECT test_parameterized_sql_function(1); SELECT test_parameterized_sql_function_in_subquery_where(1); DROP TABLE temp_table; DROP TABLE test_parameterized_sql; -- clean-up functions DROP FUNCTION sql_test_no_1(); DROP FUNCTION sql_test_no_2(); DROP FUNCTION sql_test_no_3(); DROP FUNCTION sql_test_no_4(); DROP FUNCTION no_parameter_insert_sql(); DROP FUNCTION non_partition_parameter_insert_sql(int); DROP FUNCTION non_partition_parameter_update_sql(int, int); DROP FUNCTION non_partition_parameter_delete_sql(int); DROP FUNCTION test_parameterized_sql_function(int); DROP FUNCTION test_parameterized_sql_function_in_subquery_where(int); citus-7.0.3/src/test/regress/sql/multi_subquery.sql000066400000000000000000000270441317107136600225150ustar00rootroot00000000000000-- -- MULTI_SUBQUERY -- -- no need to set shardid sequence given that we're not creating any shards SET citus.enable_router_execution TO FALSE; -- Check that we error out if shard min/max values are not exactly same. SELECT avg(unit_price) FROM (SELECT l_orderkey, avg(o_totalprice) AS unit_price FROM lineitem_subquery, orders_subquery WHERE l_orderkey = o_orderkey GROUP BY l_orderkey) AS unit_prices; -- Update metadata in order to make all shards equal -- note that the table is created on multi_insert_select_create_table.sql UPDATE pg_dist_shard SET shardmaxvalue = '14947' WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'orders_subquery'::regclass ORDER BY shardid DESC LIMIT 1); -- If group by is not on partition column then we error out from single table -- repartition code path SELECT avg(order_count) FROM (SELECT l_suppkey, count(*) AS order_count FROM lineitem_subquery GROUP BY l_suppkey) AS order_counts; -- Check that we error out if join is not on partition columns. SELECT avg(unit_price) FROM (SELECT l_orderkey, avg(o_totalprice / l_quantity) AS unit_price FROM lineitem_subquery, orders_subquery GROUP BY l_orderkey) AS unit_prices; SELECT avg(unit_price) FROM (SELECT l_orderkey, avg(o_totalprice / l_quantity) AS unit_price FROM lineitem_subquery, orders_subquery WHERE l_orderkey = o_custkey GROUP BY l_orderkey) AS unit_prices; -- Check that we error out if there is non relation subqueries SELECT count(*) FROM ( (SELECT l_orderkey FROM lineitem_subquery) UNION ALL (SELECT 1::bigint) ) b; -- Check that we error out if queries in union do not include partition columns. SELECT count(*) FROM ( (SELECT l_orderkey FROM lineitem_subquery) UNION (SELECT l_partkey FROM lineitem_subquery) ) b; -- Check that we run union queries if partition column is selected. SELECT count(*) FROM ( (SELECT l_orderkey FROM lineitem_subquery) UNION (SELECT l_orderkey FROM lineitem_subquery) ) b; -- Check that we error out if inner query has Limit but subquery_pushdown is not set SELECT avg(o_totalprice/l_quantity) FROM (SELECT l_orderkey, l_quantity FROM lineitem_subquery ORDER BY l_quantity LIMIT 10 ) lineitem_quantities JOIN LATERAL (SELECT o_totalprice FROM orders_subquery WHERE lineitem_quantities.l_orderkey = o_orderkey) orders_price ON true; -- Limit is only supported when subquery_pushdown is set -- Check that we error out if inner query has limit but outer query has not. SET citus.subquery_pushdown to ON; SELECT avg(o_totalprice/l_quantity) FROM (SELECT l_orderkey, l_quantity FROM lineitem_subquery ORDER BY l_quantity LIMIT 10 ) lineitem_quantities JOIN LATERAL (SELECT o_totalprice FROM orders_subquery WHERE lineitem_quantities.l_orderkey = o_orderkey) orders_price ON true; -- reset the flag for next query SET citus.subquery_pushdown to OFF; -- Check that we error out if the outermost query is a distinct clause. SELECT count(DISTINCT a) FROM ( SELECT count(*) a FROM lineitem_subquery GROUP BY l_orderkey ) z; -- Check supported subquery types. SELECT o_custkey, sum(order_count) as total_order_count FROM (SELECT o_orderkey, o_custkey, count(*) AS order_count FROM orders_subquery WHERE o_orderkey > 0 AND o_orderkey < 12000 GROUP BY o_orderkey, o_custkey) AS order_counts GROUP BY o_custkey ORDER BY total_order_count DESC, o_custkey ASC LIMIT 10; SELECT avg(unit_price) FROM (SELECT l_orderkey, avg(o_totalprice / l_quantity) AS unit_price FROM lineitem_subquery, orders_subquery WHERE l_orderkey = o_orderkey GROUP BY l_orderkey) AS unit_prices WHERE unit_price > 1000 AND unit_price < 10000; -- Check that if subquery is pulled, we don't error and run query properly. SELECT count(*) FROM ( SELECT l_orderkey FROM ( (SELECT l_orderkey FROM lineitem_subquery) UNION (SELECT l_orderkey FROM lineitem_subquery) ) a WHERE l_orderkey = 1 ) b; SELECT count(*) FROM ( SELECT * FROM ( (SELECT * FROM lineitem_subquery) UNION (SELECT * FROM lineitem_subquery) ) a WHERE l_orderkey = 1 ) b; SELECT max(l_orderkey) FROM ( SELECT l_orderkey FROM ( SELECT l_orderkey FROM lineitem_subquery WHERE l_orderkey < 20000 GROUP BY l_orderkey ) z ) y; -- Add one more shard to one relation, then test if we error out because of different -- shard counts for joining relations. SELECT master_create_empty_shard('orders_subquery') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 15000, shardmaxvalue = 20000 WHERE shardid = :new_shard_id; SELECT avg(unit_price) FROM (SELECT l_orderkey, avg(o_totalprice / l_quantity) AS unit_price FROM lineitem_subquery, orders_subquery WHERE l_orderkey = o_orderkey GROUP BY l_orderkey) AS unit_prices; -- Check that we can prune shards in subqueries with VARCHAR partition columns CREATE TABLE subquery_pruning_varchar_test_table ( a varchar, b int ); SELECT master_create_distributed_table('subquery_pruning_varchar_test_table', 'a', 'hash'); SELECT master_create_worker_shards('subquery_pruning_varchar_test_table', 4, 1); SET client_min_messages TO DEBUG2; SELECT * FROM (SELECT count(*) FROM subquery_pruning_varchar_test_table WHERE a = 'onder' GROUP BY a) AS foo; SELECT * FROM (SELECT count(*) FROM subquery_pruning_varchar_test_table WHERE 'eren' = a GROUP BY a) AS foo; SET client_min_messages TO NOTICE; -- test subquery join on VARCHAR partition column SELECT * FROM (SELECT a_inner AS a FROM (SELECT subquery_pruning_varchar_test_table.a AS a_inner FROM subquery_pruning_varchar_test_table GROUP BY subquery_pruning_varchar_test_table.a HAVING count(subquery_pruning_varchar_test_table.a) < 3) AS f1, (SELECT subquery_pruning_varchar_test_table.a FROM subquery_pruning_varchar_test_table GROUP BY subquery_pruning_varchar_test_table.a HAVING sum(coalesce(subquery_pruning_varchar_test_table.b,0)) > 20.0) AS f2 WHERE f1.a_inner = f2.a GROUP BY a_inner) AS foo; DROP TABLE subquery_pruning_varchar_test_table; -- Simple join subquery pushdown SELECT avg(array_length(events, 1)) AS event_average FROM (SELECT tenant_id, user_id, array_agg(event_type ORDER BY event_time) AS events FROM (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, event_type, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type IN ('click', 'submit', 'pay')) AS subquery GROUP BY tenant_id, user_id) AS subquery; -- Union and left join subquery pushdown SELECT avg(array_length(events, 1)) AS event_average, hasdone FROM (SELECT subquery_1.tenant_id, subquery_1.user_id, array_agg(event ORDER BY event_time) AS events, COALESCE(hasdone, 'Has not done paying') AS hasdone FROM ( (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id) as composite_id, 'action=>1'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'click') UNION (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id) as composite_id, 'action=>2'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'submit') ) AS subquery_1 LEFT JOIN (SELECT DISTINCT ON ((composite_id).tenant_id, (composite_id).user_id) composite_id, (composite_id).tenant_id, (composite_id).user_id, 'Has done paying'::TEXT AS hasdone FROM events WHERE events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'pay') AS subquery_2 ON subquery_1.composite_id = subquery_2.composite_id GROUP BY subquery_1.tenant_id, subquery_1.user_id, hasdone) AS subquery_top GROUP BY hasdone ORDER BY event_average DESC; -- Union, left join and having subquery pushdown SELECT avg(array_length(events, 1)) AS event_average, count_pay FROM ( SELECT subquery_1.tenant_id, subquery_1.user_id, array_agg(event ORDER BY event_time) AS events, COALESCE(count_pay, 0) AS count_pay FROM ( (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id), 'action=>1'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'click') UNION (SELECT (users.composite_id).tenant_id, (users.composite_id).user_id, (users.composite_id), 'action=>2'AS event, events.event_time FROM users, events WHERE (users.composite_id) = (events.composite_id) AND users.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND users.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'submit') ) AS subquery_1 LEFT JOIN (SELECT (composite_id).tenant_id, (composite_id).user_id, composite_id, COUNT(*) AS count_pay FROM events WHERE events.composite_id >= '(1, -9223372036854775808)'::user_composite_type AND events.composite_id <= '(1, 9223372036854775807)'::user_composite_type AND event_type = 'pay' GROUP BY composite_id HAVING COUNT(*) > 2) AS subquery_2 ON subquery_1.composite_id = subquery_2.composite_id GROUP BY subquery_1.tenant_id, subquery_1.user_id, count_pay) AS subquery_top WHERE array_ndims(events) > 0 GROUP BY count_pay ORDER BY count_pay; -- Lateral join subquery pushdown -- set subquery_pushdown since there is limit in the query SET citus.subquery_pushdown to ON; SELECT tenant_id, user_id, user_lastseen, event_array FROM (SELECT tenant_id, user_id, max(lastseen) as user_lastseen, array_agg(event_type ORDER BY event_time) AS event_array FROM (SELECT (composite_id).tenant_id, (composite_id).user_id, composite_id, lastseen FROM users WHERE composite_id >= '(1, -9223372036854775808)'::user_composite_type AND composite_id <= '(1, 9223372036854775807)'::user_composite_type ORDER BY lastseen DESC LIMIT 10 ) AS subquery_top LEFT JOIN LATERAL (SELECT event_type, event_time FROM events WHERE (composite_id) = subquery_top.composite_id ORDER BY event_time DESC LIMIT 99) AS subquery_lateral ON true GROUP BY tenant_id, user_id ) AS shard_union ORDER BY user_lastseen DESC LIMIT 10; -- cleanup the tables and the type & functions -- also set the min messages to WARNING to skip -- CASCADE NOTICE messagez SET client_min_messages TO WARNING; DROP TABLE users, events; SELECT run_command_on_master_and_workers($f$ DROP TYPE user_composite_type CASCADE; $f$); -- createed in multi_behavioral_analytics_create_table DROP FUNCTION run_command_on_master_and_workers(p_sql text); SET client_min_messages TO DEFAULT; SET citus.subquery_pushdown to OFF; SET citus.enable_router_execution TO 'true'; citus-7.0.3/src/test/regress/sql/multi_subquery_behavioral_analytics.sql000066400000000000000000001261271317107136600267620ustar00rootroot00000000000000-- -- multi subquery behavioral analytics queries aims to expand existing subquery pushdown -- regression tests to cover more cases -- the tables that are used depends to multi_insert_select_behavioral_analytics_create_table.sql -- --- We don't need shard id sequence here given that we're not creating any shards, so not writing it at all -- The following line is intended to force Citus to NOT use router planner for the tests in this -- file. The motivation for doing this is to make sure that single-task queries can be planned -- by non-router code-paths. Thus, this flag should NOT be used in production. Otherwise, the actual -- router queries would fail. SET citus.enable_router_execution TO FALSE; ------------------------------------ -- Vanilla funnel query ------------------------------------ SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q ORDER BY 2 DESC, 1; ------------------------------------ -- Funnel grouped by whether or not a user has done an event -- This has multiple subqueries joinin at the top level ------------------------------------ SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event), hasdone_event FROM ( SELECT t1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(hasdone_event, 'Has not done event') AS hasdone_event FROM ( ( SELECT u.user_id, 'step=>1'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) UNION ( SELECT u.user_id, 'step=>2'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (103, 104, 105) ) ) t1 LEFT JOIN ( SELECT DISTINCT user_id, 'Has done event'::TEXT AS hasdone_event FROM events_table AS e WHERE e.user_id >= 10 AND e.user_id <= 25 AND e.event_type IN (106, 107, 108) ) t2 ON (t1.user_id = t2.user_id) GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event ORDER BY user_id; -- same query but multiple joins are one level below, returns count of row instead of actual rows SELECT count(*) FROM ( SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event), hasdone_event FROM ( SELECT t1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(hasdone_event, 'Has not done event') AS hasdone_event FROM ( ( SELECT u.user_id, 'step=>1'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102) ) UNION ( SELECT u.user_id, 'step=>2'::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (103, 104, 105) ) ) t1 LEFT JOIN ( SELECT DISTINCT user_id, 'Has done event'::TEXT AS hasdone_event FROM events_table AS e WHERE e.user_id >= 10 AND e.user_id <= 25 AND e.event_type IN (106, 107, 108) ) t2 ON (t1.user_id = t2.user_id) GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event ORDER BY user_id) u; -- Same queries written without unions SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event), hasdone_event FROM ( SELECT t1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(hasdone_event, 'Has not done event') AS hasdone_event FROM ( SELECT u.user_id, CASE WHEN e.event_type IN (100, 101, 102) THEN 'step=>1'::text else 'step==>2'::text END AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102, 103, 104, 105) GROUP BY 1,2,3 ) t1 LEFT JOIN ( SELECT DISTINCT user_id, 'Has done event'::TEXT AS hasdone_event FROM events_table AS e WHERE e.user_id >= 10 AND e.user_id <= 25 AND e.event_type IN (106, 107, 108) ) t2 ON (t1.user_id = t2.user_id) GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event ORDER BY user_id; -- same query but multiple joins are one level below, returns count of row instead of actual rows SELECT count(*) FROM ( SELECT user_id, sum(array_length(events_table, 1)), length(hasdone_event), hasdone_event FROM ( SELECT t1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(hasdone_event, 'Has not done event') AS hasdone_event FROM ( SELECT u.user_id, CASE WHEN e.event_type in (100, 101, 102) then 'step=>1'::text else 'step==>2'::text END AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id >= 10 AND u.user_id <= 25 AND e.event_type IN (100, 101, 102, 103, 104, 105) GROUP BY 1,2,3 ) t1 LEFT JOIN ( SELECT DISTINCT user_id, 'Has done event'::TEXT AS hasdone_event FROM events_table AS e WHERE e.user_id >= 10 AND e.user_id <= 25 AND e.event_type IN (106, 107, 108) ) t2 ON (t1.user_id = t2.user_id) GROUP BY t1.user_id, hasdone_event ) t GROUP BY user_id, hasdone_event ORDER BY user_id) u; ------------------------------------ -- Funnel, grouped by the number of times a user has done an event ------------------------------------ SELECT user_id, avg(array_length(events_table, 1)) AS event_average, count_pay FROM ( SELECT subquery_1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(count_pay, 0) AS count_pay FROM ( (SELECT users_table.user_id, 'action=>1'AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 10 AND events_table.event_type < 12 ) UNION (SELECT users_table.user_id, 'action=>2'AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 12 AND events_table.event_type < 14 ) ) AS subquery_1 LEFT JOIN (SELECT user_id, COUNT(*) AS count_pay FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 15 AND users_table.value_1 < 17 GROUP BY user_id HAVING COUNT(*) > 1) AS subquery_2 ON subquery_1.user_id = subquery_2.user_id GROUP BY subquery_1.user_id, count_pay) AS subquery_top WHERE array_ndims(events_table) > 0 GROUP BY count_pay, user_id ORDER BY event_average DESC, count_pay DESC, user_id DESC; SELECT user_id, avg(array_length(events_table, 1)) AS event_average, count_pay FROM ( SELECT subquery_1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(count_pay, 0) AS count_pay FROM ( (SELECT users_table.user_id, 'action=>1'AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 10 AND events_table.event_type < 12 ) UNION (SELECT users_table.user_id, 'action=>2'AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 12 AND events_table.event_type < 14 ) ) AS subquery_1 LEFT JOIN (SELECT user_id, COUNT(*) AS count_pay FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 15 AND users_table.value_1 < 17 GROUP BY user_id HAVING COUNT(*) > 1) AS subquery_2 ON subquery_1.user_id = subquery_2.user_id GROUP BY subquery_1.user_id, count_pay) AS subquery_top WHERE array_ndims(events_table) > 0 GROUP BY count_pay, user_id HAVING avg(array_length(events_table, 1)) > 0 ORDER BY event_average DESC, count_pay DESC, user_id DESC; -- Same queries rewritten without using unions SELECT user_id, avg(array_length(events_table, 1)) AS event_average, count_pay FROM ( SELECT subquery_1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(count_pay, 0) AS count_pay FROM ( SELECT users_table.user_id, CASE WHEN events_table.event_type > 10 AND events_table.event_type < 12 THEN 'action=>1' ELSE 'action=>2' END AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND (events_table.event_type > 10 AND events_table.event_type < 12 OR events_table.event_type > 12 AND events_table.event_type < 14) GROUP BY 1, 2, 3 ) AS subquery_1 LEFT JOIN (SELECT user_id, COUNT(*) AS count_pay FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 15 AND users_table.value_1 < 17 GROUP BY user_id HAVING COUNT(*) > 1) AS subquery_2 ON subquery_1.user_id = subquery_2.user_id GROUP BY subquery_1.user_id, count_pay) AS subquery_top WHERE array_ndims(events_table) > 0 GROUP BY count_pay, user_id ORDER BY event_average DESC, count_pay DESC, user_id DESC; SELECT user_id, avg(array_length(events_table, 1)) AS event_average, count_pay FROM ( SELECT subquery_1.user_id, array_agg(event ORDER BY time) AS events_table, COALESCE(count_pay, 0) AS count_pay FROM ( SELECT users_table.user_id, CASE WHEN events_table.event_type > 10 AND events_table.event_type < 12 THEN 'action=>1' ELSE 'action=>2' END AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND (events_table.event_type > 10 AND events_table.event_type < 12 OR events_table.event_type > 12 AND events_table.event_type < 14) GROUP BY 1, 2, 3 ) AS subquery_1 LEFT JOIN (SELECT user_id, COUNT(*) AS count_pay FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 15 AND users_table.value_1 < 17 GROUP BY user_id HAVING COUNT(*) > 1) AS subquery_2 ON subquery_1.user_id = subquery_2.user_id GROUP BY subquery_1.user_id, count_pay) AS subquery_top WHERE array_ndims(events_table) > 0 GROUP BY count_pay, user_id HAVING avg(array_length(events_table, 1)) > 0 ORDER BY event_average DESC, count_pay DESC, user_id DESC; ------------------------------------ -- Most recently seen users_table events_table ------------------------------------ -- Note that we don't use ORDER BY/LIMIT yet ------------------------------------ SELECT user_id, user_lastseen, array_length(event_array, 1) FROM ( SELECT user_id, max(u.time) as user_lastseen, array_agg(event_type ORDER BY u.time) AS event_array FROM ( SELECT user_id, time FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 10 AND users_table.value_1 < 12 ) u LEFT JOIN LATERAL ( SELECT event_type, time FROM events_table WHERE user_id = u.user_id AND events_table.event_type > 10 AND events_table.event_type < 12 ) t ON true GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC, user_id; ------------------------------------ -- Count the number of distinct users_table who are in segment X and Y and Z ------------------------------------ SELECT user_id FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 10 AND value_1 <= 20) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 30 AND value_1 <= 40) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 50 AND value_1 <= 60) GROUP BY user_id ORDER BY user_id DESC LIMIT 5; ------------------------------------ -- Find customers who have done X, and satisfy other customer specific criteria ------------------------------------ SELECT user_id, value_2 FROM users_table WHERE value_1 > 101 AND value_1 < 110 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 101 AND event_type < 110 AND value_3 > 100 AND user_id = users_table.user_id) ORDER BY 2 DESC, 1 DESC LIMIT 5; ------------------------------------ -- Customers who haven’t done X, and satisfy other customer specific criteria ------------------------------------ SELECT user_id, value_2 FROM users_table WHERE value_1 = 101 AND value_2 >= 5 AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type=101 AND value_3 > 100 AND user_id = users_table.user_id) ORDER BY 1 DESC, 2 DESC LIMIT 3; ------------------------------------ -- Customers who have done X and Y, and satisfy other customer specific criteria ------------------------------------ SELECT user_id, sum(value_2) as cnt FROM users_table WHERE value_1 > 100 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type != 100 AND value_3 > 100 AND user_id = users_table.user_id) AND EXISTS (SELECT user_id FROM events_table WHERE event_type = 101 AND value_3 > 100 AND user_id = users_table.user_id) GROUP BY user_id ORDER BY cnt DESC, user_id DESC LIMIT 5; ------------------------------------ -- Customers who have done X and haven’t done Y, and satisfy other customer specific criteria ------------------------------------ SELECT user_id, value_2 FROM users_table WHERE value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type <= 300 AND value_3 > 100 AND user_id = users_table.user_id) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 300 AND event_type <= 350 AND value_3 > 100 AND user_id = users_table.user_id) ORDER BY 2 DESC, 1 DESC LIMIT 4; ------------------------------------ -- Customers who have done X more than 2 times, and satisfy other customer specific criteria ------------------------------------ SELECT user_id, avg(value_2) FROM users_table WHERE value_1 > 100 AND value_1 < 124 AND value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type < 124 AND value_3 > 100 AND user_id = users_table.user_id GROUP BY user_id HAVING Count(*) > 2) GROUP BY user_id ORDER BY 1 DESC, 2 DESC LIMIT 5; ------------------------------------ -- Find me all users_table who logged in more than once ------------------------------------ SELECT user_id, value_1 from ( SELECT user_id, value_1 From users_table WHERE value_2 > 100 and user_id = 15 GROUP BY value_1, user_id HAVING count(*) > 1 ) AS a ORDER BY user_id ASC, value_1 ASC; -- same query with additional filter to make it not router plannable SELECT user_id, value_1 from ( SELECT user_id, value_1 From users_table WHERE value_2 > 100 and (user_id = 15 OR user_id = 16) GROUP BY value_1, user_id HAVING count(*) > 1 ) AS a ORDER BY user_id ASC, value_1 ASC; ------------------------------------ -- Find me all users_table who has done some event and has filters ------------------------------------ SELECT user_id FROM events_table WHERE event_type = 16 AND value_2 > 50 AND user_id IN (SELECT user_id FROM users_table WHERE value_1 = 15 AND value_2 > 25 ) ORDER BY 1; ------------------------------------ -- Which events_table did people who has done some specific events_table ------------------------------------ SELECT user_id, event_type FROM events_table WHERE user_id in (SELECT user_id from events_table WHERE event_type > 500 and event_type < 505) GROUP BY user_id, event_type ORDER BY 2 DESC, 1 LIMIT 3; ------------------------------------ -- Find me all the users_table who has done some event more than three times ------------------------------------ SELECT user_id FROM ( SELECT user_id FROM events_table WHERE event_type = 901 GROUP BY user_id HAVING count(*) > 3 ) AS a ORDER BY user_id; ------------------------------------ -- Find my assets that have the highest probability and fetch their metadata ------------------------------------ CREATE TEMP TABLE assets AS SELECT users_table.user_id, users_table.value_1, prob FROM users_table JOIN (SELECT ma.user_id, (GREATEST(coalesce(ma.value_4 / 250, 0.0) + GREATEST(1.0))) / 2 AS prob FROM users_table AS ma, events_table as short_list WHERE short_list.user_id = ma.user_id and ma.value_1 < 50 and short_list.event_type < 50 ) temp ON users_table.user_id = temp.user_id WHERE users_table.value_1 < 50; -- get some statistics from the aggregated results to ensure the results are correct SELECT count(*), count(DISTINCT user_id), avg(user_id) FROM assets; DROP TABLE assets; -- count number of distinct users who have value_1 equal to 5 or 13 but not 3 -- original query that fails SELECT count(*) FROM ( SELECT user_id FROM users_table WHERE (value_1 = '5' OR value_1 = '13') AND user_id NOT IN (select user_id from users_table where value_1 = '3') GROUP BY user_id HAVING count(distinct value_1) = 2 ) as foo; -- previous push down query SELECT subquery_count FROM (SELECT count(*) as subquery_count FROM (SELECT user_id FROM users_table WHERE (value_1 = '5' OR value_1 = '13') GROUP BY user_id HAVING count(distinct value_1) = 2) as a LEFT JOIN (SELECT user_id FROM users_table WHERE (value_1 = '3') GROUP BY user_id) as b ON a.user_id = b.user_id WHERE b.user_id IS NULL GROUP BY a.user_id ) AS inner_subquery; -- new pushdown query without single range table entry at top requirement SELECT count(*) as subquery_count FROM ( SELECT user_id FROM users_table WHERE (value_1 = '5' OR value_1 = '13') GROUP BY user_id HAVING count(distinct value_1) = 2 ) as a LEFT JOIN ( SELECT user_id FROM users_table WHERE (value_1 = '3') GROUP BY user_id) AS b ON a.user_id = b.user_id WHERE b.user_id IS NULL GROUP BY a.user_id; -- most queries below has limit clause -- therefore setting subquery_pushdown flag for all SET citus.subquery_pushdown to ON; -- multi-subquery-join -- The first query has filters on partion column to make it router plannable -- but it is processed by logical planner since we disabled router execution SELECT e1.user_id, sum(view_homepage) AS viewed_homepage, sum(use_demo) AS use_demo, sum(enter_credit_card) AS entered_credit_card, sum(submit_card_info) as submit_card_info, sum(see_bought_screen) as see_bought_screen FROM ( -- Get the first time each user viewed the homepage. SELECT user_id, 1 AS view_homepage, min(time) AS view_homepage_time FROM events_table WHERE user_id = 1 and event_type IN (10, 20, 30, 40, 50, 60, 70, 80, 90) GROUP BY user_id ) e1 LEFT JOIN LATERAL ( SELECT user_id, 1 AS use_demo, time AS use_demo_time FROM events_table WHERE user_id = e1.user_id AND user_id = 1 and event_type IN (11, 21, 31, 41, 51, 61, 71, 81, 91) ORDER BY time LIMIT 1 ) e2 ON true LEFT JOIN LATERAL ( SELECT user_id, 1 AS enter_credit_card, time AS enter_credit_card_time FROM events_table WHERE user_id = e2.user_id AND user_id = 1 and event_type IN (12, 22, 32, 42, 52, 62, 72, 82, 92) ORDER BY time LIMIT 1 ) e3 ON true LEFT JOIN LATERAL ( SELECT 1 AS submit_card_info, user_id, time AS enter_credit_card_time FROM events_table WHERE user_id = e3.user_id AND user_id = 1 and event_type IN (13, 23, 33, 43, 53, 63, 73, 83, 93) ORDER BY time LIMIT 1 ) e4 ON true LEFT JOIN LATERAL ( SELECT 1 AS see_bought_screen FROM events_table WHERE user_id = e4.user_id AND user_id = 1 and event_type IN (14, 24, 34, 44, 54, 64, 74, 84, 94) ORDER BY time LIMIT 1 ) e5 ON true WHERE e1.user_id = 1 GROUP BY e1.user_id LIMIT 1; -- Same query without all limitations SELECT e1.user_id, sum(view_homepage) AS viewed_homepage, sum(use_demo) AS use_demo, sum(enter_credit_card) AS entered_credit_card, sum(submit_card_info) as submit_card_info, sum(see_bought_screen) as see_bought_screen FROM ( -- Get the first time each user viewed the homepage. SELECT user_id, 1 AS view_homepage, min(time) AS view_homepage_time FROM events_table WHERE event_type IN (10, 20, 30, 40, 50, 60, 70, 80, 90) GROUP BY user_id ) e1 LEFT JOIN LATERAL ( SELECT user_id, 1 AS use_demo, time AS use_demo_time FROM events_table WHERE user_id = e1.user_id AND event_type IN (11, 21, 31, 41, 51, 61, 71, 81, 91) ORDER BY time ) e2 ON true LEFT JOIN LATERAL ( SELECT user_id, 1 AS enter_credit_card, time AS enter_credit_card_time FROM events_table WHERE user_id = e2.user_id AND event_type IN (12, 22, 32, 42, 52, 62, 72, 82, 92) ORDER BY time ) e3 ON true LEFT JOIN LATERAL ( SELECT 1 AS submit_card_info, user_id, time AS enter_credit_card_time FROM events_table WHERE user_id = e3.user_id AND event_type IN (13, 23, 33, 43, 53, 63, 73, 83, 93) ORDER BY time ) e4 ON true LEFT JOIN LATERAL ( SELECT 1 AS see_bought_screen FROM events_table WHERE user_id = e4.user_id AND event_type IN (14, 24, 34, 44, 54, 64, 74, 84, 94) ORDER BY time ) e5 ON true GROUP BY e1.user_id ORDER BY 6 DESC NULLS LAST, 5 DESC NULLS LAST, 4 DESC NULLS LAST, 3 DESC NULLS LAST, 2 DESC NULLS LAST, 1 LIMIT 15; -- Same query without all limitations but uses having() to show only those submitted their credit card info SELECT e1.user_id, sum(view_homepage) AS viewed_homepage, sum(use_demo) AS use_demo, sum(enter_credit_card) AS entered_credit_card, sum(submit_card_info) as submit_card_info, sum(see_bought_screen) as see_bought_screen FROM ( -- Get the first time each user viewed the homepage. SELECT user_id, 1 AS view_homepage, min(time) AS view_homepage_time FROM events_table WHERE event_type IN (10, 20, 30, 40, 50, 60, 70, 80, 90) GROUP BY user_id ) e1 LEFT JOIN LATERAL ( SELECT user_id, 1 AS use_demo, time AS use_demo_time FROM events_table WHERE user_id = e1.user_id AND event_type IN (11, 21, 31, 41, 51, 61, 71, 81, 91) ORDER BY time ) e2 ON true LEFT JOIN LATERAL ( SELECT user_id, 1 AS enter_credit_card, time AS enter_credit_card_time FROM events_table WHERE user_id = e2.user_id AND event_type IN (12, 22, 32, 42, 52, 62, 72, 82, 92) ORDER BY time ) e3 ON true LEFT JOIN LATERAL ( SELECT 1 AS submit_card_info, user_id, time AS enter_credit_card_time FROM events_table WHERE user_id = e3.user_id AND event_type IN (13, 23, 33, 43, 53, 63, 73, 83, 93) ORDER BY time ) e4 ON true LEFT JOIN LATERAL ( SELECT 1 AS see_bought_screen FROM events_table WHERE user_id = e4.user_id AND event_type IN (14, 24, 34, 44, 54, 64, 74, 84, 94) ORDER BY time ) e5 ON true group by e1.user_id HAVING sum(submit_card_info) > 0 ORDER BY 6 DESC NULLS LAST, 5 DESC NULLS LAST, 4 DESC NULLS LAST, 3 DESC NULLS LAST, 2 DESC NULLS LAST, 1 LIMIT 15; -- Explain analyze on this query fails due to #756 -- avg expression used on order by SELECT a.user_id, avg(b.value_2) as subquery_avg FROM ( SELECT user_id FROM users_table WHERE (value_1 > 5) GROUP BY user_id HAVING count(distinct value_1) > 88 ) as a LEFT JOIN ( SELECT user_id, value_2, value_3 FROM users_table WHERE (value_1 > 3)) AS b ON a.user_id = b.user_id WHERE b.user_id IS NOT NULL GROUP BY a.user_id ORDER BY avg(b.value_3), 2, 1 LIMIT 5; -- add having to the same query SELECT a.user_id, avg(b.value_2) as subquery_avg FROM ( SELECT user_id FROM users_table WHERE (value_1 > 5) GROUP BY user_id HAVING count(distinct value_1) > 88 ) as a LEFT JOIN ( SELECT user_id, value_2, value_3 FROM users_table WHERE (value_1 > 3)) AS b ON a.user_id = b.user_id WHERE b.user_id IS NOT NULL GROUP BY a.user_id HAVING sum(b.value_3) > 50000 ORDER BY avg(b.value_3), 2, 1 LIMIT 5; -- avg on the value_3 is not a resjunk SELECT a.user_id, avg(b.value_2) as subquery_avg, avg(b.value_3) FROM (SELECT user_id FROM users_table WHERE (value_1 > 5) GROUP BY user_id HAVING count(distinct value_1) > 88 ) as a LEFT JOIN ( SELECT user_id, value_2, value_3 FROM users_table WHERE (value_1 > 3) ) AS b ON a.user_id = b.user_id WHERE b.user_id IS NOT NULL GROUP BY a.user_id ORDER BY avg(b.value_3) DESC, 2, 1 LIMIT 5; -- a powerful query structure that analyzes users/events -- using (relation JOIN subquery JOIN relation) SELECT u.user_id, sub.value_2, sub.value_3, COUNT(e2.user_id) counts FROM users_table u LEFT OUTER JOIN LATERAL (SELECT * FROM events_table e1 WHERE e1.user_id = u.user_id ORDER BY e1.value_3 DESC LIMIT 1 ) sub ON true LEFT OUTER JOIN events_table e2 ON e2.user_id = sub.user_id WHERE e2.value_2 > 10 AND e2.value_2 < 50 AND u.value_2 > 10 AND u.value_2 < 50 GROUP BY u.user_id, sub.value_2, sub.value_3 ORDER BY 4 DESC, 1 DESC, 2 ASC, 3 ASC LIMIT 10; -- distinct users joined with events SELECT avg(events_table.event_type) as avg_type, count(*) as users_count FROM events_table JOIN (SELECT DISTINCT user_id FROM users_table ) as distinct_users ON distinct_users.user_id = events_table.user_id GROUP BY distinct_users.user_id ORDER BY users_count desc, avg_type DESC LIMIT 5; -- reduce the data set, aggregate and join SELECT events_table.event_type, users_count.ct FROM events_table JOIN (SELECT distinct_users.user_id, count(1) as ct FROM (SELECT user_id FROM users_table ) as distinct_users GROUP BY distinct_users.user_id ) as users_count ON users_count.user_id = events_table.user_id ORDER BY users_count.ct desc, event_type DESC LIMIT 5; --- now, test (subquery JOIN subquery) SELECT n1.user_id, count_1, total_count FROM (SELECT user_id, count(1) as count_1 FROM users_table GROUP BY user_id ) n1 INNER JOIN ( SELECT user_id, count(1) as total_count FROM events_table GROUP BY user_id, event_type ) n2 ON (n2.user_id = n1.user_id) ORDER BY total_count DESC, count_1 DESC, 1 DESC LIMIT 10; SELECT a.user_id, avg(b.value_2) as subquery_avg FROM (SELECT user_id FROM users_table WHERE (value_1 > 5) GROUP BY user_id HAVING count(distinct value_1) > 88 ) as a LEFT JOIN (SELECT DISTINCT ON (user_id) user_id, value_2, value_3 FROM users_table WHERE (value_1 > 3) ORDER BY 1,2,3 ) AS b ON a.user_id = b.user_id WHERE b.user_id IS NOT NULL GROUP BY a.user_id ORDER BY avg(b.value_3), 2, 1 LIMIT 5; -- distinct clause must include partition column -- when used in target list SELECT a.user_id, avg(b.value_2) as subquery_avg FROM (SELECT user_id FROM users_table WHERE (value_1 > 5) GROUP BY user_id HAVING count(distinct value_1) > 88 ) as a LEFT JOIN (SELECT DISTINCT ON (value_2) value_2 , user_id, value_3 FROM users_table WHERE (value_1 > 3) ORDER BY 1,2,3 ) AS b USING (user_id) GROUP BY user_id; SELECT a.user_id, avg(b.value_2) as subquery_avg FROM (SELECT user_id FROM users_table WHERE (value_1 > 5) GROUP BY user_id HAVING count(distinct value_1) > 88 ) as a LEFT JOIN (SELECT DISTINCT ON (value_2, user_id) value_2 , user_id, value_3 FROM users_table WHERE (value_1 > 3) ORDER BY 1,2,3 ) AS b ON a.user_id = b.user_id WHERE b.user_id IS NOT NULL GROUP BY a.user_id ORDER BY avg(b.value_3), 2, 1 LIMIT 5; SELECT user_id, event_type FROM (SELECT * FROM ( (SELECT event_type, user_id as a_user_id FROM events_table) AS a JOIN (SELECT ma.user_id AS user_id, ma.value_2 AS value_2, (GREATEST(coalesce((ma.value_3 * ma.value_2) / 20, 0.0) + GREATEST(1.0))) / 2 AS prob FROM users_table AS ma WHERE (ma.value_2 > 100) ORDER BY prob DESC, user_id DESC LIMIT 10 ) AS ma ON (a.a_user_id = ma.user_id) ) AS inner_sub ORDER BY prob DESC, user_id DESC LIMIT 10 ) AS outer_sub ORDER BY prob DESC, event_type DESC, user_id DESC LIMIT 10; -- very similar query but produces different result due to -- ordering difference in the previous one's inner query SELECT user_id, event_type FROM (SELECT event_type, user_id as a_user_id FROM events_table) AS a JOIN (SELECT ma.user_id AS user_id, ma.value_2 AS value_2, (GREATEST(coalesce((ma.value_3 * ma.value_2) / 20, 0.0) + GREATEST(1.0))) / 2 AS prob FROM users_table AS ma WHERE (ma.value_2 > 100) ORDER BY prob DESC, user_id DESC LIMIT 10 ) AS ma ON (a.a_user_id = ma.user_id) ORDER BY prob DESC, event_type DESC, user_id DESC LIMIT 10; -- now they produce the same result when ordering fixed in 'outer_sub' SELECT user_id, event_type FROM (SELECT * FROM ( (SELECT event_type, user_id as a_user_id FROM events_table ) AS a JOIN (SELECT ma.user_id AS user_id, ma.value_2 AS value_2, (GREATEST(coalesce((ma.value_3 * ma.value_2) / 20, 0.0) + GREATEST(1.0))) / 2 AS prob FROM users_table AS ma WHERE (ma.value_2 > 100) ORDER BY prob DESC, user_id DESC LIMIT 10 ) AS ma ON (a.a_user_id = ma.user_id) ) AS inner_sub ORDER BY prob DESC, event_type DESC, user_id DESC LIMIT 10 ) AS outer_sub ORDER BY prob DESC, event_type DESC, user_id DESC LIMIT 10; -- this is one complex join query derived from a user's production query -- first declare the function on workers on master -- With array_index: SELECT * FROM run_command_on_workers('CREATE OR REPLACE FUNCTION array_index(ANYARRAY, ANYELEMENT) RETURNS INT AS $$ SELECT i FROM (SELECT generate_series(array_lower($1, 1), array_upper($1, 1))) g(i) WHERE $1 [i] = $2 LIMIT 1; $$ LANGUAGE sql') ORDER BY 1,2; CREATE OR REPLACE FUNCTION array_index(ANYARRAY, ANYELEMENT) RETURNS INT AS $$ SELECT i FROM (SELECT generate_series(array_lower($1, 1), array_upper($1, 1))) g(i) WHERE $1 [i] = $2 LIMIT 1; $$ LANGUAGE sql; SELECT * FROM (SELECT * FROM ( (SELECT user_id AS user_id_e, event_type AS event_type_e FROM events_table ) AS ma_e JOIN (SELECT value_2, value_3, user_id FROM (SELECT * FROM ( (SELECT user_id_p AS user_id FROM (SELECT * FROM ( (SELECT user_id AS user_id_p FROM events_table WHERE (event_type IN (1,2,3,4,5)) ) AS ma_p JOIN (SELECT user_id AS user_id_a FROM users_table WHERE (value_2 % 5 = 1) ) AS a ON (a.user_id_a = ma_p.user_id_p) ) ) AS a_ma_p ) AS inner_filter_q JOIN (SELECT value_2, value_3, user_id AS user_id_ck FROM events_table WHERE event_type = ANY(ARRAY [10, 11, 12]) ORDER BY value_3 ASC, user_id_ck DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC LIMIT 10 ) AS ma_ck ON (ma_ck.user_id_ck = inner_filter_q.user_id) ) AS inner_sub_q ORDER BY value_3 ASC, user_id_ck DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC LIMIT 10 ) AS outer_sub_q ORDER BY value_3 ASC, user_id DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC LIMIT 10) AS inner_search_q ON (ma_e.user_id_e = inner_search_q.user_id) ) AS outer_inner_sub_q ORDER BY value_3 ASC, user_id DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC, event_type_e DESC LIMIT 10) AS outer_outer_sub_q ORDER BY value_3 ASC, user_id DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC, event_type_e DESC LIMIT 10; -- top level select * is removed now there is -- a join at top level. SELECT * FROM ( (SELECT user_id AS user_id_e, event_type as event_type_e FROM events_table ) AS ma_e JOIN (SELECT value_2, value_3, user_id FROM (SELECT * FROM ( (SELECT user_id_p AS user_id FROM (SELECT * FROM ( (SELECT user_id AS user_id_p FROM events_table WHERE (event_type IN (1, 2, 3, 4, 5)) ) AS ma_p JOIN (SELECT user_id AS user_id_a FROM users_table WHERE (value_2 % 5 = 1) ) AS a ON (a.user_id_a = ma_p.user_id_p) ) ) AS a_ma_p ) AS inner_filter_q JOIN (SELECT value_2, value_3, user_id AS user_id_ck FROM events_table WHERE event_type = ANY(ARRAY [10, 11, 12]) ORDER BY value_3 ASC, user_id_ck DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC LIMIT 10 ) AS ma_ck ON (ma_ck.user_id_ck = inner_filter_q.user_id) ) AS inner_sub_q ORDER BY value_3 ASC, user_id_ck DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC LIMIT 10 ) AS outer_sub_q ORDER BY value_3 ASC, user_id DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC LIMIT 10) AS inner_search_q ON (ma_e.user_id_e = inner_search_q.user_id) ) AS outer_inner_sub_q ORDER BY value_3 ASC, user_id DESC, array_index(ARRAY [1, 2, 3], (value_2 % 3)) ASC, event_type_e DESC LIMIT 10; -- drop created functions SELECT * FROM run_command_on_workers('DROP FUNCTION array_index(ANYARRAY, ANYELEMENT)') ORDER BY 1,2; DROP FUNCTION array_index(ANYARRAY, ANYELEMENT); -- a not supported query due to constant range table entry SELECT count(*) as subquery_count FROM ( SELECT user_id FROM users_table WHERE (value_1 = '5' OR value_1 = '13') GROUP BY user_id HAVING count(distinct value_1) = 2 ) as a LEFT JOIN ( SELECT 1 as user_id ) AS b ON a.user_id = b.user_id WHERE b.user_id IS NULL GROUP BY a.user_id; -- same with INNER JOIN SELECT count(*) as subquery_count FROM ( SELECT user_id FROM users_table WHERE (value_1 = '5' OR value_1 = '13') GROUP BY user_id HAVING count(distinct value_1) = 2 ) as a INNER JOIN ( SELECT 1 as user_id ) AS b ON a.user_id = b.user_id WHERE b.user_id IS NULL GROUP BY a.user_id; -- this is slightly different, we use RTE_VALUEs here SELECT Count(*) AS subquery_count FROM (SELECT user_id FROM users_table WHERE (value_1 = '5' OR value_1 = '13' ) GROUP BY user_id HAVING Count(DISTINCT value_1) = 2) AS a INNER JOIN (SELECT * FROM (VALUES (1, 'one'), (2, 'two'), (3, 'three')) AS t (user_id, letter)) AS b ON a.user_id = b.user_id WHERE b.user_id IS NULL GROUP BY a.user_id; -- same query without LIMIT/OFFSET returns 30 rows SET client_min_messages TO DEBUG1; -- now, lets use a simple expression on the LIMIT and explicit coercion on the OFFSET SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q ORDER BY 2 DESC, 1 LIMIT 3+3 OFFSET 5::smallint; -- now, lets use implicit coersion in LIMIT and a simple expressions on OFFSET SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q ORDER BY 2 DESC, 1 LIMIT '3' OFFSET 27+2; -- create a test function which is marked as volatile CREATE OR REPLACE FUNCTION volatile_func_test() RETURNS INT AS $$ SELECT 5; $$ LANGUAGE sql VOLATILE; -- Citus should be able to evalute functions/row comparisons on the LIMIT/OFFSET SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q ORDER BY 2 DESC, 1 LIMIT volatile_func_test() + (ROW(1,2,NULL) < ROW(1,3,0))::int OFFSET volatile_func_test() + volatile_func_test(); -- now, lets use expressions on both the LIMIT and OFFSET SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q ORDER BY 2 DESC, 1 LIMIT (5 > 4)::int OFFSET CASE WHEN 5 != 5 THEN 27 WHEN 1 > 5 THEN 28 ELSE 29 END; -- we don't allow parameters on the LIMIT/OFFSET clauses PREPARE parametrized_limit AS SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q ORDER BY 2 DESC, 1 LIMIT $1 OFFSET $2; EXECUTE parametrized_limit(3,3); PREPARE parametrized_offset AS SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND e.event_type IN (100, 101, 102) ) t GROUP BY user_id ) q ORDER BY 2 DESC, 1 LIMIT 3 OFFSET $1; EXECUTE parametrized_offset(3); SET client_min_messages TO DEFAULT; DROP FUNCTION volatile_func_test(); CREATE FUNCTION test_join_function_2(integer, integer) RETURNS bool AS 'select $1 > $2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- we don't support joins via functions SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE test_join_function_2(u.user_id, e.user_id) ) t GROUP BY user_id ) q ORDER BY 2 DESC, 1; -- note that the following query has joins on the partition keys -- however we fail to push down it due to the function call on the -- where clause. We probably need to relax that check SELECT users_table.user_id, users_table.value_1, prob FROM users_table JOIN (SELECT ma.user_id, (GREATEST(coalesce(ma.value_4 / 250, 0.0) + GREATEST(1.0))) / 2 AS prob FROM users_table AS ma, events_table as short_list WHERE short_list.user_id = ma.user_id and ma.value_1 < 50 and short_list.event_type < 50 ) temp ON users_table.user_id = temp.user_id WHERE users_table.value_1 < 50 AND test_join_function_2(users_table.user_id, temp.user_id); DROP FUNCTION test_join_function_2(integer, integer); SET citus.enable_router_execution TO TRUE; SET citus.subquery_pushdown to OFF; citus-7.0.3/src/test/regress/sql/multi_subquery_complex_queries.sql000066400000000000000000002215411317107136600257770ustar00rootroot00000000000000-- -- multi subquery complex queries aims to expand existing subquery pushdown -- regression tests to cover more caeses -- the tables that are used depends to multi_insert_select_behavioral_analytics_create_table.sql -- -- We don't need shard id sequence here, so commented out to prevent conflicts with concurrent tests -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1400000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1400000; SET citus.enable_router_execution TO FALSE; -- -- UNIONs and JOINs mixed -- SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; -- same query with target entries shuffled inside UNIONs SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."time", 0 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."time", 1 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."time", 2 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."time", 3 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; -- not supported since events_subquery_2 doesn't have partition key on the target list -- within the shuffled target list SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."time", 0 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."time", 1 AS event, "events"."user_id" * 2 FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."time", 2 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."time", 3 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; -- not supported since events_subquery_2 doesn't have partition key on the target list SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."time", 0 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."time", 1 AS event, "events"."value_2" as user_id FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."time", 2 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."time", 3 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; -- we can support arbitrary subqueries within UNIONs SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM (SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."time", 0 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM ( SELECT * FROM ( SELECT max("events"."time"), 0 AS event, "events"."user_id" FROM events_table as "events", users_table as "users" WHERE events.user_id = users.user_id AND event_type IN (10, 11, 12, 13, 14, 15) GROUP BY "events"."user_id" ) as events_subquery_5 ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."time", 2 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."time", 3 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4) ) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; -- not supported since events_subquery_5 is not joined on partition key SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM (SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."time", 0 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM ( SELECT * FROM ( SELECT max("events"."time"), 0 AS event, "events"."user_id" FROM events_table as "events", users_table as "users" WHERE events.user_id = users.value_2 AND event_type IN (10, 11, 12, 13, 14, 15) GROUP BY "events"."user_id" ) as events_subquery_5 ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."time", 2 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."time", 3 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4) ) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; -- not supported since the join is not equi join SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id != q.user_id)) as final_query GROUP BY types ORDER BY types; -- not supported since subquery 3 includes a JOIN with non-equi join SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events", users_table as "users" WHERE event_type IN (20, 21, 22, 23, 24, 25) AND users.user_id != events.user_id ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; -- similar query with more union statements (to enable UNION tree become larger) SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 4 AS event FROM events_table as "events" WHERE event_type IN (31, 32, 33, 34, 35, 36)) events_subquery_5) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 5 AS event FROM events_table as "events" WHERE event_type IN (37, 38, 39, 40, 41, 42)) events_subquery_6) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 6 AS event FROM events_table as "events" WHERE event_type IN (50, 51, 52, 53, 54, 55)) events_subquery_6) ) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; -- -- UNION ALL Queries -- SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; -- same query target list entries shuffled SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."time", 0 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION ALL (SELECT * FROM (SELECT "events"."time", 1 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION ALL (SELECT * FROM (SELECT "events"."time", 2 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION ALL (SELECT * FROM (SELECT "events"."time", 3 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; -- not supported since subquery 3 does not have partition key SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION ALL (SELECT * FROM (SELECT "events"."value_2", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; -- not supported since events_subquery_4 does not have partition key on the -- target list SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."time", 0 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION ALL (SELECT * FROM (SELECT "events"."time", 1 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION ALL (SELECT * FROM (SELECT "events"."time", 2 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION ALL (SELECT * FROM (SELECT "events"."time", 3 AS event, "events"."user_id" * 2 FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; -- union all with inner and left joins SELECT user_id, count(*) as cnt FROM (SELECT first_query.user_id, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "first_query" INNER JOIN (SELECT "t"."user_id" FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t LEFT OUTER JOIN ( SELECT DISTINCT "events"."user_id" as user_id FROM events_table as "events" WHERE event_type IN (35, 36, 37, 38) GROUP BY user_id ) as t2 ON (t2.user_id = t.user_id) WHERE t2.user_id is NULL) as second_query ON ("first_query".user_id = "second_query".user_id)) as final_query GROUP BY user_id ORDER BY cnt DESC, user_id DESC LIMIT 10; -- not supported since the join between t and t2 is not equi join -- union all with inner and left joins SELECT user_id, count(*) as cnt FROM (SELECT first_query.user_id, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "first_query" INNER JOIN (SELECT "t"."user_id" FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t LEFT OUTER JOIN ( SELECT DISTINCT "events"."user_id" as user_id FROM events_table as "events" WHERE event_type IN (35, 36, 37, 38) GROUP BY user_id ) as t2 ON (t2.user_id > t.user_id) WHERE t2.user_id is NULL) as second_query ON ("first_query".user_id = "second_query".user_id)) as final_query GROUP BY user_id ORDER BY cnt DESC, user_id DESC LIMIT 10; -- -- Union, inner join and left join -- SELECT user_id, count(*) as cnt FROM (SELECT first_query.user_id, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "first_query" INNER JOIN (SELECT "t"."user_id" FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t LEFT OUTER JOIN ( SELECT DISTINCT "events"."user_id" as user_id FROM events_table as "events" WHERE event_type IN (35, 36, 37, 38) GROUP BY user_id ) as t2 ON (t2.user_id = t.user_id) WHERE t2.user_id is NULL) as second_query ON ("first_query".user_id = "second_query".user_id)) as final_query GROUP BY user_id ORDER BY cnt DESC, user_id DESC LIMIT 10; -- Simple LATERAL JOINs with GROUP BYs in each side -- need to set subquery_pushdown due to limit for next 2 queries SET citus.subquery_pushdown to ON; SELECT * FROM (SELECT "some_users_data".user_id, lastseen FROM (SELECT user_id, max(time) AS lastseen FROM (SELECT user_id, time FROM (SELECT user_id, time FROM events_table as "events" WHERE user_id > 10 and user_id < 40) "events_1" ORDER BY time DESC LIMIT 1000) "recent_events_1" GROUP BY user_id ORDER BY max(time) DESC) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND users.value_2 > 50 and users.value_2 < 55 LIMIT 1) "some_users_data" ON TRUE ORDER BY lastseen DESC LIMIT 50) "some_users" order BY user_id LIMIT 50; -- same query with subuqery joins in topmost select SELECT "some_users_data".user_id, lastseen FROM (SELECT user_id, max(time) AS lastseen FROM (SELECT user_id, time FROM (SELECT user_id, time FROM events_table as "events" WHERE user_id > 10 and user_id < 40) "events_1" ORDER BY time DESC LIMIT 1000) "recent_events_1" GROUP BY user_id ORDER BY max(TIME) DESC) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND users.value_2 > 50 and users.value_2 < 55 LIMIT 1) "some_users_data" ON TRUE ORDER BY user_id limit 50; -- reset subquery_pushdown SET citus.subquery_pushdown to OFF; -- not supported since JOIN is not on the partition key SELECT "some_users_data".user_id, lastseen FROM (SELECT user_id, max(time) AS lastseen FROM (SELECT user_id, time FROM (SELECT user_id, time FROM events_table as "events" WHERE user_id > 10 and user_id < 40) "events_1" ORDER BY time DESC LIMIT 1000) "recent_events_1" GROUP BY user_id ORDER BY max(TIME) DESC) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."value_1" = "some_recent_users"."user_id" AND users.value_2 > 50 and users.value_2 < 55 LIMIT 1) "some_users_data" ON TRUE ORDER BY user_id limit 50; -- not supported since JOIN is not on the partition key -- see (2 * user_id as user_id) target list element SELECT "some_users_data".user_id, lastseen FROM (SELECT 2 * user_id as user_id, max(time) AS lastseen FROM (SELECT user_id, time FROM (SELECT user_id, time FROM events_table as "events" WHERE user_id > 10 and user_id < 40) "events_1" ORDER BY time DESC LIMIT 1000) "recent_events_1" GROUP BY user_id ORDER BY max(TIME) DESC) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND users.value_2 > 50 and users.value_2 < 55 LIMIT 1) "some_users_data" ON TRUE ORDER BY user_id limit 50; -- LATERAL JOINs used with INNER JOINs SET citus.subquery_pushdown to ON; SELECT user_id, lastseen FROM (SELECT "some_users_data".user_id, lastseen FROM (SELECT filter_users_1.user_id, time AS lastseen FROM (SELECT user_where_1_1.user_id FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_1 > 20) user_where_1_1 INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_2 > 60) user_where_1_join_1 ON ("user_where_1_1".user_id = "user_where_1_join_1".user_id)) filter_users_1 JOIN LATERAL (SELECT user_id, time FROM events_table as "events" WHERE user_id > 12 and user_id < 16 AND user_id = filter_users_1.user_id ORDER BY time DESC LIMIT 1) "last_events_1" ON TRUE ORDER BY time DESC LIMIT 10) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND "users"."value_2" > 70 LIMIT 1) "some_users_data" ON TRUE ORDER BY lastseen DESC LIMIT 10) "some_users" ORDER BY user_id DESC LIMIT 10; -- -- A similar query with topmost select is dropped -- and replaced by aggregation. Notice the heavy use of limit -- SELECT "some_users_data".user_id, MAX(lastseen), count(*) FROM (SELECT filter_users_1.user_id, time AS lastseen FROM (SELECT user_where_1_1.user_id FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_1 > 20) user_where_1_1 INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_2 > 60) user_where_1_join_1 ON ("user_where_1_1".user_id = "user_where_1_join_1".user_id)) filter_users_1 JOIN LATERAL (SELECT user_id, time FROM events_table as "events" WHERE user_id > 12 and user_id < 16 and user_id = filter_users_1.user_id ORDER BY time DESC LIMIT 1) "last_events_1" ON true ORDER BY time DESC LIMIT 10) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND "users"."value_2" > 70 LIMIT 1) "some_users_data" ON true GROUP BY 1 ORDER BY 2, 1 DESC LIMIT 10; SET citus.subquery_pushdown to OFF; -- not supported since the inner JOIN is not equi join SELECT user_id, lastseen FROM (SELECT "some_users_data".user_id, lastseen FROM (SELECT filter_users_1.user_id, time AS lastseen FROM (SELECT user_where_1_1.user_id FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_1 > 20) user_where_1_1 INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_2 > 60) user_where_1_join_1 ON ("user_where_1_1".user_id != "user_where_1_join_1".user_id)) filter_users_1 JOIN LATERAL (SELECT user_id, time FROM events_table as "events" WHERE user_id > 12 and user_id < 16 and user_id = filter_users_1.user_id ORDER BY time DESC LIMIT 1) "last_events_1" ON true ORDER BY time DESC LIMIT 10) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND "users"."value_2" > 70 LIMIT 1) "some_users_data" ON true ORDER BY lastseen DESC LIMIT 10) "some_users" ORDER BY user_id DESC LIMIT 10; -- not supported since the inner JOIN is not on the partition key SELECT user_id, lastseen FROM (SELECT "some_users_data".user_id, lastseen FROM (SELECT filter_users_1.user_id, time AS lastseen FROM (SELECT user_where_1_1.user_id FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_1 > 20) user_where_1_1 INNER JOIN (SELECT "users"."user_id", "users"."value_1" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_2 > 60) user_where_1_join_1 ON ("user_where_1_1".user_id = "user_where_1_join_1".value_1)) filter_users_1 JOIN LATERAL (SELECT user_id, time FROM events_table as "events" WHERE user_id > 12 and user_id < 16 and user_id = filter_users_1.user_id ORDER BY time DESC LIMIT 1) "last_events_1" ON true ORDER BY time DESC LIMIT 10) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND "users"."value_2" > 70 LIMIT 1) "some_users_data" ON true ORDER BY lastseen DESC LIMIT 10) "some_users" ORDER BY user_id DESC LIMIT 10; -- not supported since upper LATERAL JOIN is not equi join SELECT user_id, lastseen FROM (SELECT "some_users_data".user_id, lastseen FROM (SELECT filter_users_1.user_id, time AS lastseen FROM (SELECT user_where_1_1.user_id FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_1 > 20) user_where_1_1 INNER JOIN (SELECT "users"."user_id", "users"."value_1" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_2 > 60) user_where_1_join_1 ON ("user_where_1_1".user_id = "user_where_1_join_1".user_id)) filter_users_1 JOIN LATERAL (SELECT user_id, time FROM events_table as "events" WHERE user_id > 12 and user_id < 16 and user_id != filter_users_1.user_id ORDER BY time DESC LIMIT 1) "last_events_1" ON true ORDER BY time DESC LIMIT 10) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND "users"."value_2" > 70 LIMIT 1) "some_users_data" ON true ORDER BY lastseen DESC LIMIT 10) "some_users" ORDER BY user_id DESC LIMIT 10; -- not supported since lower LATERAL JOIN is not on the partition key SELECT user_id, lastseen FROM (SELECT "some_users_data".user_id, lastseen FROM (SELECT filter_users_1.user_id, time AS lastseen FROM (SELECT user_where_1_1.user_id FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_1 > 20) user_where_1_1 INNER JOIN (SELECT "users"."user_id", "users"."value_1" FROM users_table as "users" WHERE user_id > 12 and user_id < 16 and value_2 > 60) user_where_1_join_1 ON ("user_where_1_1".user_id = "user_where_1_join_1".user_id)) filter_users_1 JOIN LATERAL (SELECT user_id, time FROM events_table as "events" WHERE user_id > 12 and user_id < 16 and user_id = filter_users_1.user_id ORDER BY time DESC LIMIT 1) "last_events_1" ON true ORDER BY time DESC LIMIT 10) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."value_1" = "some_recent_users"."user_id" AND "users"."value_2" > 70 LIMIT 1) "some_users_data" ON true ORDER BY lastseen DESC LIMIT 10) "some_users" ORDER BY user_id DESC LIMIT 10; -- NESTED INNER JOINs SELECT count(*) AS value, "generated_group_field" FROM (SELECT DISTINCT "pushedDownQuery"."real_user_id", "generated_group_field" FROM (SELECT "eventQuery"."real_user_id", "eventQuery"."time", random(), ("eventQuery"."value_2") AS "generated_group_field" FROM (SELECT * FROM (SELECT "events"."time", "events"."user_id", "events"."value_2" FROM events_table as "events" WHERE user_id > 10 and user_id < 40 AND event_type IN (40, 41, 42, 43, 44, 45) ) "temp_data_queries" INNER JOIN (SELECT user_where_1_1.real_user_id FROM (SELECT "users"."user_id" as real_user_id FROM users_table as "users" WHERE user_id > 10 and user_id < 40 and value_2 > 50 ) user_where_1_1 INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 10 and user_id < 40 and value_3 > 50 ) user_where_1_join_1 ON ("user_where_1_1".real_user_id = "user_where_1_join_1".user_id)) "user_filters_1" ON ("temp_data_queries".user_id = "user_filters_1".real_user_id)) "eventQuery") "pushedDownQuery") "pushedDownQuery" GROUP BY "generated_group_field" ORDER BY generated_group_field DESC, value DESC; -- not supported since the first inner join is not on the partition key SELECT count(*) AS value, "generated_group_field" FROM (SELECT DISTINCT "pushedDownQuery"."real_user_id", "generated_group_field" FROM (SELECT "eventQuery"."real_user_id", "eventQuery"."time", random(), ("eventQuery"."value_2") AS "generated_group_field" FROM (SELECT * FROM (SELECT "events"."time", "events"."user_id", "events"."value_2" FROM events_table as "events" WHERE user_id > 10 and user_id < 40 AND event_type IN (40, 41, 42, 43, 44, 45) ) "temp_data_queries" INNER JOIN (SELECT user_where_1_1.real_user_id FROM (SELECT "users"."user_id" as real_user_id FROM users_table as "users" WHERE user_id > 10 and user_id < 40 and value_2 > 50 ) user_where_1_1 INNER JOIN (SELECT "users"."user_id", "users"."value_2" FROM users_table as "users" WHERE user_id > 10 and user_id < 40 and value_3 > 50 ) user_where_1_join_1 ON ("user_where_1_1".real_user_id = "user_where_1_join_1".value_2)) "user_filters_1" ON ("temp_data_queries".user_id = "user_filters_1".real_user_id)) "eventQuery") "pushedDownQuery") "pushedDownQuery" GROUP BY "generated_group_field" ORDER BY generated_group_field DESC, value DESC; -- not supported since the first inner join is not an equi join SELECT count(*) AS value, "generated_group_field" FROM (SELECT DISTINCT "pushedDownQuery"."real_user_id", "generated_group_field" FROM (SELECT "eventQuery"."real_user_id", "eventQuery"."time", random(), ("eventQuery"."value_2") AS "generated_group_field" FROM (SELECT * FROM (SELECT "events"."time", "events"."user_id", "events"."value_2" FROM events_table as "events" WHERE user_id > 10 and user_id < 40 AND event_type IN (40, 41, 42, 43, 44, 45) ) "temp_data_queries" INNER JOIN (SELECT user_where_1_1.real_user_id FROM (SELECT "users"."user_id" as real_user_id FROM users_table as "users" WHERE user_id > 10 and user_id < 40 and value_2 > 50 ) user_where_1_1 INNER JOIN (SELECT "users"."user_id", "users"."value_2" FROM users_table as "users" WHERE user_id > 10 and user_id < 40 and value_3 > 50 ) user_where_1_join_1 ON ("user_where_1_1".real_user_id >= "user_where_1_join_1".user_id)) "user_filters_1" ON ("temp_data_queries".user_id = "user_filters_1".real_user_id)) "eventQuery") "pushedDownQuery") "pushedDownQuery" GROUP BY "generated_group_field" ORDER BY generated_group_field DESC, value DESC; -- single level inner joins SELECT "value_3", count(*) AS cnt FROM (SELECT "value_3", "user_id", random() FROM (SELECT users_in_segment_1.user_id, value_3 FROM (SELECT user_id, value_3 * 2 as value_3 FROM (SELECT user_id, value_3 FROM (SELECT "users"."user_id", value_3 FROM users_table as "users" WHERE user_id > 10 and user_id < 40 and value_2 > 30 ) simple_user_where_1 ) all_buckets_1 ) users_in_segment_1 JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 10 and user_id < 40 and value_2 > 60 ) some_users_data ON ("users_in_segment_1".user_id = "some_users_data".user_id) ) segmentalias_1) "tempQuery" GROUP BY "value_3" ORDER BY cnt, value_3 DESC LIMIT 10; -- not supported since there is no partition column equality at all SELECT "value_3", count(*) AS cnt FROM (SELECT "value_3", "user_id", random() FROM (SELECT users_in_segment_1.user_id, value_3 FROM (SELECT user_id, value_3 * 2 as value_3 FROM (SELECT user_id, value_3 FROM (SELECT "users"."user_id", value_3 FROM users_table as "users" WHERE user_id > 10 and user_id < 40 and value_2 > 30 ) simple_user_where_1 ) all_buckets_1 ) users_in_segment_1 JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 10 and user_id < 40 and value_2 > 60 ) some_users_data ON (true) ) segmentalias_1) "tempQuery" GROUP BY "value_3" ORDER BY cnt, value_3 DESC LIMIT 10; -- nested LATERAL JOINs SET citus.subquery_pushdown to ON; SELECT * FROM (SELECT "some_users_data".user_id, "some_recent_users".value_3 FROM (SELECT filter_users_1.user_id, value_3 FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 20 and user_id < 70 and users.value_2 = 200) filter_users_1 JOIN LATERAL (SELECT user_id, value_3 FROM events_table as "events" WHERE user_id > 20 and user_id < 70 AND ("events".user_id = "filter_users_1".user_id) ORDER BY value_3 DESC LIMIT 1) "last_events_1" ON true ORDER BY value_3 DESC LIMIT 10) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND users.value_2 > 200 LIMIT 1) "some_users_data" ON true ORDER BY value_3 DESC LIMIT 10) "some_users" ORDER BY value_3 DESC LIMIT 10; -- nested lateral join at top most level SELECT "some_users_data".user_id, "some_recent_users".value_3 FROM (SELECT filter_users_1.user_id, value_3 FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 20 and user_id < 70 and users.value_2 = 200 ) filter_users_1 JOIN LATERAL (SELECT user_id, value_3 FROM events_table as "events" WHERE user_id > 20 and user_id < 70 AND ("events".user_id = "filter_users_1".user_id) ORDER BY value_3 DESC LIMIT 1 ) "last_events_1" ON true ORDER BY value_3 DESC LIMIT 10 ) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND users.value_2 > 200 LIMIT 1 ) "some_users_data" ON true ORDER BY value_3 DESC, user_id ASC LIMIT 10; -- longer nested lateral joins SELECT * FROM (SELECT "some_users_data".user_id, "some_recent_users".value_3 FROM (SELECT filter_users_1.user_id, value_3 FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 20 and user_id < 70 and users.value_2 = 200) filter_users_1 JOIN LATERAL (SELECT user_id, value_3 FROM events_table as "events" WHERE user_id > 20 and user_id < 70 AND ("events".user_id = "filter_users_1".user_id) ORDER BY value_3 DESC LIMIT 1) "last_events_1" ON true ORDER BY value_3 DESC LIMIT 10) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND users.value_2 > 200 LIMIT 1) "some_users_data" ON true ORDER BY value_3 DESC LIMIT 10) "some_users" ORDER BY value_3 DESC LIMIT 10; -- longer nested lateral join wth top level join SELECT "some_users_data".user_id, "some_recent_users".value_3 FROM (SELECT filter_users_1.user_id, value_3 FROM (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 20 and user_id < 70 and users.value_2 = 200 ) filter_users_1 JOIN LATERAL (SELECT user_id, value_3 FROM events_table as "events" WHERE user_id > 20 and user_id < 70 AND ("events".user_id = "filter_users_1".user_id) ORDER BY value_3 DESC LIMIT 1 ) "last_events_1" ON TRUE ORDER BY value_3 DESC LIMIT 10 ) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND users.value_2 > 200 LIMIT 1 ) "some_users_data" ON TRUE ORDER BY value_3 DESC LIMIT 10; SET citus.subquery_pushdown to OFF; -- LEFT JOINs used with INNER JOINs SELECT count(*) AS cnt, "generated_group_field" FROM (SELECT "eventQuery"."user_id", random(), generated_group_field FROM (SELECT "multi_group_wrapper_1".*, generated_group_field, random() FROM (SELECT * FROM (SELECT "events"."time", "events"."user_id" as event_user_id FROM events_table as "events" WHERE user_id > 80) "temp_data_queries" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 80 and value_2 = 5) "user_filters_1" ON ("temp_data_queries".event_user_id = "user_filters_1".user_id)) AS "multi_group_wrapper_1" LEFT JOIN (SELECT "users"."user_id" AS "user_id", value_2 AS "generated_group_field" FROM users_table as "users") "left_group_by_1" ON ("left_group_by_1".user_id = "multi_group_wrapper_1".event_user_id)) "eventQuery") "pushedDownQuery" group BY "generated_group_field" ORDER BY cnt DESC, generated_group_field ASC LIMIT 10; -- single table subquery, no JOINS involved SELECT count(*) AS cnt, user_id FROM (SELECT "eventQuery"."user_id", random() FROM (SELECT "events"."user_id" FROM events_table "events" WHERE event_type IN (10, 20, 30, 40, 50, 60, 70, 80, 90)) "eventQuery") "pushedDownQuery" GROUP BY "user_id" ORDER BY cnt DESC, user_id DESC LIMIT 10; -- lateral joins in the nested manner SET citus.subquery_pushdown to ON; SELECT * FROM (SELECT "some_users_data".user_id, value_2 FROM (SELECT user_id, max(value_2) AS value_2 FROM (SELECT user_id, value_2 FROM (SELECT user_id, value_2 FROM events_table as "events" WHERE user_id > 10 and user_id < 20) "events_1" ORDER BY value_2 DESC LIMIT 10000) "recent_events_1" GROUP BY user_id ORDER BY max(value_2) DESC) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND value_2 > 75 LIMIT 1) "some_users_data" ON true ORDER BY value_2 DESC LIMIT 10) "some_users" ORDER BY value_2 DESC, user_id DESC LIMIT 10; SET citus.subquery_pushdown to OFF; -- not supported since join is not on the partition key SELECT * FROM (SELECT "some_users_data".user_id, value_2 FROM (SELECT user_id, max(value_2) AS value_2 FROM (SELECT user_id, value_2 FROM (SELECT user_id, value_2 FROM events_table as "events" WHERE user_id > 10 and user_id < 20) "events_1" ORDER BY value_2 DESC LIMIT 10000) "recent_events_1" GROUP BY user_id ORDER BY max(value_2) DESC) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_table as "users" WHERE "users"."value_2" = "some_recent_users"."user_id" AND value_2 > 75 LIMIT 1) "some_users_data" ON true ORDER BY value_2 DESC LIMIT 10) "some_users" ORDER BY value_2 DESC, user_id DESC LIMIT 10; -- lets test some unsupported set operations -- not supported since we use INTERSECT SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) INTERSECT (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; -- not supported due to offset SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4) OFFSET 3) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; -- not supported due to window functions SELECT user_id, some_vals FROM ( SELECT * , Row_number() over (PARTITION BY "user_id" ORDER BY "user_id") AS "some_vals", Random() FROM users_table ) user_id ORDER BY 1, 2 limit 10; -- not supported due to non relation rte SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT 1 as user_id, now(), 3 AS event ) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; -- similar to the above, but constant rte is on the right side of the query SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT 1 as user_id, now(), 3 AS event ) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT random()::int as user_id) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; SET citus.enable_router_execution TO TRUE;citus-7.0.3/src/test/regress/sql/multi_subquery_complex_reference_clause.sql000066400000000000000000000472731317107136600276240ustar00rootroot00000000000000-- -- multi subquery complex queries aims to expand existing subquery pushdown -- regression tests to cover more caeses -- the tables that are used depends to multi_insert_select_behavioral_analytics_create_table.sql -- -- We don't need shard id sequence here, so commented out to prevent conflicts with concurrent tests -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1400000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1400000; SET citus.enable_router_execution TO FALSE; CREATE TABLE user_buy_test_table(user_id int, item_id int, buy_count int); SELECT create_distributed_table('user_buy_test_table', 'user_id'); INSERT INTO user_buy_test_table VALUES(1,2,1); INSERT INTO user_buy_test_table VALUES(2,3,4); INSERT INTO user_buy_test_table VALUES(3,4,2); INSERT INTO user_buy_test_table VALUES(7,5,2); CREATE TABLE users_return_test_table(user_id int, item_id int, buy_count int); SELECT create_distributed_table('users_return_test_table', 'user_id'); INSERT INTO users_return_test_table VALUES(4,1,1); INSERT INTO users_return_test_table VALUES(1,3,1); INSERT INTO users_return_test_table VALUES(3,2,2); CREATE TABLE users_ref_test_table(id int, it_name varchar(25), k_no int); SELECT create_reference_table('users_ref_test_table'); INSERT INTO users_ref_test_table VALUES(1,'User_1',45); INSERT INTO users_ref_test_table VALUES(2,'User_2',46); INSERT INTO users_ref_test_table VALUES(3,'User_3',47); INSERT INTO users_ref_test_table VALUES(4,'User_4',48); INSERT INTO users_ref_test_table VALUES(5,'User_5',49); INSERT INTO users_ref_test_table VALUES(6,'User_6',50); -- Simple Join test with reference table SELECT count(*) FROM (SELECT random() FROM user_buy_test_table JOIN users_ref_test_table ON user_buy_test_table.user_id = users_ref_test_table.id) subquery_1; -- Should work, reference table at the inner side is allowed SELECT count(*) FROM (SELECT random(), k_no FROM user_buy_test_table LEFT JOIN users_ref_test_table ON user_buy_test_table.user_id = users_ref_test_table.id) subquery_1 WHERE k_no = 47; -- Should not work, no equality between partition column and reference table SELECT * FROM (SELECT random() FROM user_buy_test_table LEFT JOIN users_ref_test_table ON user_buy_test_table.item_id = users_ref_test_table.id) subquery_1; -- Should not work, no equality between partition column and reference table SELECT * FROM (SELECT random() FROM user_buy_test_table LEFT JOIN users_ref_test_table ON user_buy_test_table.user_id > users_ref_test_table.id) subquery_1; -- Shouldn't work, reference table at the outer side is not allowed SELECT * FROM (SELECT random() FROM users_ref_test_table LEFT JOIN user_buy_test_table ON users_ref_test_table.id = user_buy_test_table.user_id) subquery_1; -- Should work, reference table at the inner side is allowed SELECT count(*) FROM (SELECT random() FROM users_ref_test_table RIGHT JOIN user_buy_test_table ON user_buy_test_table.user_id = users_ref_test_table.id) subquery_1; -- Shouldn't work, reference table at the outer side is not allowed SELECT * FROM (SELECT random() FROM user_buy_test_table RIGHT JOIN users_ref_test_table ON user_buy_test_table.user_id = users_ref_test_table.id) subquery_1; -- Should pass since reference table locates in the inner part of each left join SELECT count(*) FROM (SELECT tt1.user_id, random() FROM user_buy_test_table AS tt1 JOIN users_return_test_table as tt2 ON tt1.user_id = tt2.user_id) subquery_1 LEFT JOIN (SELECT tt1.user_id, random() FROM user_buy_test_table as tt1 LEFT JOIN users_ref_test_table as ref ON tt1.user_id = ref.id) subquery_2 ON subquery_1.user_id = subquery_2.user_id; -- Should not pass since reference table locates in the outer part of right join SELECT * FROM (SELECT tt1.user_id, random() FROM user_buy_test_table AS tt1 JOIN users_return_test_table as tt2 ON tt1.user_id = tt2.user_id) subquery_1 RIGHT JOIN (SELECT tt1.user_id, random() FROM user_buy_test_table as tt1 JOIN users_ref_test_table as ref ON tt1.user_id = ref.id) subquery_2 ON subquery_1.user_id = subquery_2.user_id; -- LATERAL JOINs used with INNER JOINs with reference tables SET citus.subquery_pushdown to ON; SELECT user_id, lastseen FROM (SELECT "some_users_data".user_id, lastseen FROM (SELECT filter_users_1.user_id, time AS lastseen FROM (SELECT user_where_1_1.user_id FROM (SELECT "users"."user_id" FROM users_reference_table as "users" WHERE user_id > 12 and user_id < 16 and value_1 > 20) user_where_1_1 INNER JOIN (SELECT "users"."user_id" FROM users_reference_table as "users" WHERE user_id > 12 and user_id < 16 and value_2 > 60) user_where_1_join_1 ON ("user_where_1_1".user_id = "user_where_1_join_1".user_id)) filter_users_1 JOIN LATERAL (SELECT user_id, time FROM events_reference_table as "events" WHERE user_id > 12 and user_id < 16 AND user_id = filter_users_1.user_id ORDER BY time DESC LIMIT 1) "last_events_1" ON TRUE ORDER BY time DESC LIMIT 10) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_reference_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND "users"."value_2" > 70 LIMIT 1) "some_users_data" ON TRUE ORDER BY lastseen DESC LIMIT 10) "some_users" ORDER BY user_id DESC LIMIT 10; SET citus.subquery_pushdown to OFF; -- NESTED INNER JOINs with reference tables SELECT count(*) AS value, "generated_group_field" FROM (SELECT DISTINCT "pushedDownQuery"."real_user_id", "generated_group_field" FROM (SELECT "eventQuery"."real_user_id", "eventQuery"."time", random(), ("eventQuery"."value_2") AS "generated_group_field" FROM (SELECT * FROM (SELECT "events"."time", "events"."user_id", "events"."value_2" FROM events_reference_table as "events" WHERE user_id > 10 and user_id < 40 AND event_type IN (40, 41, 42, 43, 44, 45) ) "temp_data_queries" INNER JOIN (SELECT user_where_1_1.real_user_id FROM (SELECT "users"."user_id" as real_user_id FROM users_reference_table as "users" WHERE user_id > 10 and user_id < 40 and value_2 > 50 ) user_where_1_1 INNER JOIN (SELECT "users"."user_id" FROM users_reference_table as "users" WHERE user_id > 10 and user_id < 40 and value_3 > 50 ) user_where_1_join_1 ON ("user_where_1_1".real_user_id = "user_where_1_join_1".user_id)) "user_filters_1" ON ("temp_data_queries".user_id = "user_filters_1".real_user_id)) "eventQuery") "pushedDownQuery") "pushedDownQuery" GROUP BY "generated_group_field" ORDER BY generated_group_field DESC, value DESC; -- single level inner joins with reference tables SELECT "value_3", count(*) AS cnt FROM (SELECT "value_3", "user_id", random() FROM (SELECT users_in_segment_1.user_id, value_3 FROM (SELECT user_id, value_3 * 2 as value_3 FROM (SELECT user_id, value_3 FROM (SELECT "users"."user_id", value_3 FROM users_reference_table as "users" WHERE user_id > 10 and user_id < 40 and value_2 > 30 ) simple_user_where_1 ) all_buckets_1 ) users_in_segment_1 JOIN (SELECT "users"."user_id" FROM users_reference_table as "users" WHERE user_id > 10 and user_id < 40 and value_2 > 60 ) some_users_data ON ("users_in_segment_1".user_id = "some_users_data".user_id) ) segmentalias_1) "tempQuery" GROUP BY "value_3" ORDER BY cnt, value_3 DESC LIMIT 10; -- nested LATERAL JOINs with reference tables SET citus.subquery_pushdown to ON; SELECT * FROM (SELECT "some_users_data".user_id, "some_recent_users".value_3 FROM (SELECT filter_users_1.user_id, value_3 FROM (SELECT "users"."user_id" FROM users_reference_table as "users" WHERE user_id > 20 and user_id < 70 and users.value_2 = 200) filter_users_1 JOIN LATERAL (SELECT user_id, value_3 FROM events_reference_table as "events" WHERE user_id > 20 and user_id < 70 AND ("events".user_id = "filter_users_1".user_id) ORDER BY value_3 DESC LIMIT 1) "last_events_1" ON true ORDER BY value_3 DESC LIMIT 10) "some_recent_users" JOIN LATERAL (SELECT "users".user_id FROM users_reference_table as "users" WHERE "users"."user_id" = "some_recent_users"."user_id" AND users.value_2 > 200 LIMIT 1) "some_users_data" ON true ORDER BY value_3 DESC LIMIT 10) "some_users" ORDER BY value_3 DESC LIMIT 10; SET citus.subquery_pushdown to OFF; -- LEFT JOINs used with INNER JOINs should error out since reference table exist in the -- left side of the LEFT JOIN. SELECT count(*) AS cnt, "generated_group_field" FROM (SELECT "eventQuery"."user_id", random(), generated_group_field FROM (SELECT "multi_group_wrapper_1".*, generated_group_field, random() FROM (SELECT * FROM (SELECT "events"."time", "events"."user_id" as event_user_id FROM events_table as "events" WHERE user_id > 80) "temp_data_queries" INNER JOIN (SELECT "users"."user_id" FROM users_reference_table as "users" WHERE user_id > 80 and value_2 = 5) "user_filters_1" ON ("temp_data_queries".event_user_id = "user_filters_1".user_id)) AS "multi_group_wrapper_1" LEFT JOIN (SELECT "users"."user_id" AS "user_id", value_2 AS "generated_group_field" FROM users_table as "users") "left_group_by_1" ON ("left_group_by_1".user_id = "multi_group_wrapper_1".event_user_id)) "eventQuery") "pushedDownQuery" group BY "generated_group_field" ORDER BY cnt DESC, generated_group_field ASC LIMIT 10; -- RIGHT JOINs used with INNER JOINs should error out since reference table exist in the -- right side of the RIGHT JOIN. SELECT count(*) AS cnt, "generated_group_field" FROM (SELECT "eventQuery"."user_id", random(), generated_group_field FROM (SELECT "multi_group_wrapper_1".*, generated_group_field, random() FROM (SELECT * FROM (SELECT "events"."time", "events"."user_id" as event_user_id FROM events_table as "events" WHERE user_id > 80) "temp_data_queries" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE user_id > 80 and value_2 = 5) "user_filters_1" ON ("temp_data_queries".event_user_id = "user_filters_1".user_id)) AS "multi_group_wrapper_1" RIGHT JOIN (SELECT "users"."user_id" AS "user_id", value_2 AS "generated_group_field" FROM users_reference_table as "users") "right_group_by_1" ON ("right_group_by_1".user_id = "multi_group_wrapper_1".event_user_id)) "eventQuery") "pushedDownQuery" group BY "generated_group_field" ORDER BY cnt DESC, generated_group_field ASC LIMIT 10; -- Outer subquery with reference table SELECT "some_users_data".user_id, lastseen FROM (SELECT user_id, max(time) AS lastseen FROM (SELECT user_id, time FROM (SELECT user_id, time FROM events_reference_table as "events" WHERE user_id > 10 and user_id < 40) "events_1" ORDER BY time DESC) "recent_events_1" GROUP BY user_id ORDER BY max(TIME) DESC) "some_recent_users" FULL JOIN (SELECT "users".user_id FROM users_table as "users" WHERE users.value_2 > 50 and users.value_2 < 55) "some_users_data" ON "some_users_data"."user_id" = "some_recent_users"."user_id" ORDER BY user_id limit 50; -- -- UNIONs and JOINs with reference tables, shoukld error out -- SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_reference_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; -- reference table exist in the subquery of union, should error out SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM (SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."time", 0 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION (SELECT * FROM ( SELECT * FROM ( SELECT max("events"."time"), 0 AS event, "events"."user_id" FROM events_reference_table as "events", users_table as "users" WHERE events.user_id = users.user_id AND event_type IN (10, 11, 12, 13, 14, 15) GROUP BY "events"."user_id" ) as events_subquery_5 ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."time", 2 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."time", 3 AS event, "events"."user_id" FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4) ) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; -- -- Should error out with UNION ALL Queries on reference tables -- SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15) ) events_subquery_1) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_reference_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION ALL (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" INNER JOIN (SELECT "users"."user_id" FROM users_table as "users" WHERE value_1 > 50 and value_1 < 70) AS t ON (t.user_id = q.user_id)) as final_query GROUP BY types ORDER BY types; DROP TABLE user_buy_test_table; DROP TABLE users_ref_test_table; DROP TABLE users_return_test_table; citus-7.0.3/src/test/regress/sql/multi_subquery_in_where_clause.sql000066400000000000000000000303671317107136600257330ustar00rootroot00000000000000-- -- multi subquery in where queries aims to expand existing subquery pushdown -- regression tests to cover more cases specifically subqueries in WHERE clause -- the tables that are used depends to multi_insert_select_behavioral_analytics_create_table.sql -- -- We don't need shard id sequence here, so commented out to prevent conflicts with concurrent tests -- subqueries in WHERE with greater operator SELECT user_id FROM users_table WHERE value_2 > (SELECT max(value_2) FROM events_table WHERE users_table.user_id = events_table.user_id AND event_type = 50 GROUP BY user_id ) GROUP BY user_id HAVING count(*) > 66 ORDER BY user_id LIMIT 5; -- subqueries in where with ALL operator SELECT user_id FROM users_table WHERE value_2 > 545 AND value_2 < ALL (SELECT avg(value_3) FROM events_table WHERE users_table.user_id = events_table.user_id GROUP BY user_id) GROUP BY 1 ORDER BY 1 DESC LIMIT 3; -- IN operator on non-partition key SELECT user_id FROM events_table as e1 WHERE event_type IN (SELECT event_type FROM events_table as e2 WHERE value_2 = 15 AND value_3 > 25 AND e1.user_id = e2.user_id ) ORDER BY 1; -- NOT IN on non-partition key SELECT user_id FROM events_table as e1 WHERE event_type NOT IN (SELECT event_type FROM events_table as e2 WHERE value_2 = 15 AND value_3 > 25 AND e1.user_id = e2.user_id ) GROUP BY 1 HAVING count(*) > 122 ORDER BY 1; -- non-correlated query with =ANY on partition keys SELECT user_id, count(*) FROM users_table WHERE user_id =ANY(SELECT user_id FROM users_table WHERE value_1 >= 10 AND value_1 <= 20) GROUP BY 1 ORDER BY 2 DESC LIMIT 5; -- users that appeared more than 118 times SELECT user_id FROM users_table WHERE 118 <= (SELECT count(*) FROM events_table WHERE users_table.user_id = events_table.user_id GROUP BY user_id) GROUP BY user_id ORDER BY user_id; -- the following query doesn't have a meaningful result -- but it is a valid query with an arbitrary subquery in -- WHERE clause SELECT user_id, value_2 FROM users_table WHERE value_1 > 101 AND value_1 < 110 AND value_2 >= 5 AND user_id IN ( SELECT e1.user_id FROM ( -- Get the first time each user viewed the homepage. SELECT user_id, 1 AS view_homepage, min(time) AS view_homepage_time FROM events_table WHERE event_type IN (10, 20, 30, 40, 50, 60, 70, 80, 90) GROUP BY user_id ) e1 LEFT JOIN LATERAL ( SELECT user_id, 1 AS use_demo, time AS use_demo_time FROM events_table WHERE user_id = e1.user_id AND event_type IN (11, 21, 31, 41, 51, 61, 71, 81, 91) ORDER BY time ) e2 ON true LEFT JOIN LATERAL ( SELECT user_id, 1 AS enter_credit_card, time AS enter_credit_card_time FROM events_table WHERE user_id = e2.user_id AND event_type IN (12, 22, 32, 42, 52, 62, 72, 82, 92) ORDER BY time ) e3 ON true LEFT JOIN LATERAL ( SELECT 1 AS submit_card_info, user_id, time AS enter_credit_card_time FROM events_table WHERE user_id = e3.user_id AND event_type IN (13, 23, 33, 43, 53, 63, 73, 83, 93) ORDER BY time ) e4 ON true LEFT JOIN LATERAL ( SELECT 1 AS see_bought_screen FROM events_table WHERE user_id = e4.user_id AND event_type IN (14, 24, 34, 44, 54, 64, 74, 84, 94) ORDER BY time ) e5 ON true group by e1.user_id HAVING sum(submit_card_info) > 0 ) ORDER BY 1, 2; -- similar to the above query -- the following query doesn't have a meaningful result -- but it is a valid query with an arbitrary subquery in -- WHERE clause SELECT user_id FROM users_table WHERE user_id IN ( SELECT user_id FROM ( SELECT subquery_1.user_id, count_pay FROM ( (SELECT users_table.user_id, 'action=>1' AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 10 AND events_table.event_type < 12 ) UNION (SELECT users_table.user_id, 'action=>2' AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 12 AND events_table.event_type < 14 ) ) AS subquery_1 LEFT JOIN (SELECT user_id, COUNT(*) AS count_pay FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 15 AND users_table.value_1 < 17 GROUP BY user_id HAVING COUNT(*) > 1) AS subquery_2 ON subquery_1.user_id = subquery_2.user_id GROUP BY subquery_1.user_id, count_pay) AS subquery_top GROUP BY count_pay, user_id ) GROUP BY user_id HAVING count(*) > 3 AND sum(value_2) > 49000 ORDER BY 1; -- the following query doesn't have a meaningful result -- but it is a valid query with an arbitrary subquery in -- FROM clause involving a complex query in WHERE clause SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id IN ( SELECT user_id FROM users_table WHERE value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type <= 300 AND value_3 > 100 AND user_id = users_table.user_id) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 300 AND event_type <= 350 AND value_3 > 100 AND user_id = users_table.user_id) ) ) t GROUP BY user_id ) q ORDER BY 2 DESC, 1; -- -- below tests only aims for cases where all relations -- are not joined on partition key -- -- e4 is not joined on the partition key SELECT user_id, value_2 FROM users_table WHERE value_1 > 101 AND value_1 < 110 AND value_2 >= 5 AND user_id IN ( SELECT e1.user_id FROM ( -- Get the first time each user viewed the homepage. SELECT user_id, 1 AS view_homepage, min(time) AS view_homepage_time FROM events_table WHERE event_type IN (10, 20, 30, 40, 50, 60, 70, 80, 90) GROUP BY user_id ) e1 LEFT JOIN LATERAL ( SELECT user_id, 1 AS use_demo, time AS use_demo_time FROM events_table WHERE user_id = e1.user_id AND event_type IN (11, 21, 31, 41, 51, 61, 71, 81, 91) ORDER BY time ) e2 ON true LEFT JOIN LATERAL ( SELECT user_id, 1 AS enter_credit_card, time AS enter_credit_card_time FROM events_table WHERE user_id = e2.user_id AND event_type IN (12, 22, 32, 42, 52, 62, 72, 82, 92) ORDER BY time ) e3 ON true LEFT JOIN LATERAL ( SELECT 1 AS submit_card_info, user_id, time AS enter_credit_card_time FROM events_table WHERE value_2 = e3.user_id AND event_type IN (13, 23, 33, 43, 53, 63, 73, 83, 93) ORDER BY time ) e4 ON true LEFT JOIN LATERAL ( SELECT 1 AS see_bought_screen FROM events_table WHERE user_id = e4.user_id AND event_type IN (14, 24, 34, 44, 54, 64, 74, 84, 94) ORDER BY time ) e5 ON true group by e1.user_id HAVING sum(submit_card_info) > 0 ); -- left leaf query does not return partition key SELECT user_id FROM users_table WHERE user_id IN ( SELECT user_id FROM ( SELECT subquery_1.user_id, count_pay FROM ( (SELECT 2 * users_table.user_id as user_id, 'action=>1' AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 10 AND events_table.event_type < 12 ) UNION (SELECT users_table.user_id, 'action=>2' AS event, events_table.time FROM users_table, events_table WHERE users_table.user_id = events_table.user_id AND users_table.user_id >= 10 AND users_table.user_id <= 70 AND events_table.event_type > 12 AND events_table.event_type < 14 ) ) AS subquery_1 LEFT JOIN (SELECT user_id, COUNT(*) AS count_pay FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 15 AND users_table.value_1 < 17 GROUP BY user_id HAVING COUNT(*) > 1) AS subquery_2 ON subquery_1.user_id = subquery_2.user_id GROUP BY subquery_1.user_id, count_pay) AS subquery_top GROUP BY count_pay, user_id ) GROUP BY user_id HAVING count(*) > 3 AND sum(value_2) > 49000 ORDER BY 1; -- NOT EXISTS query has non-equi join SELECT user_id, array_length(events_table, 1) FROM ( SELECT user_id, array_agg(event ORDER BY time) AS events_table FROM ( SELECT u.user_id, e.event_type::text AS event, e.time FROM users_table AS u, events_table AS e WHERE u.user_id = e.user_id AND u.user_id IN ( SELECT user_id FROM users_table WHERE value_2 >= 5 AND EXISTS (SELECT user_id FROM events_table WHERE event_type > 100 AND event_type <= 300 AND value_3 > 100 AND user_id = users_table.user_id) AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type > 300 AND event_type <= 350 AND value_3 > 100 AND user_id != users_table.user_id) ) ) t GROUP BY user_id ) q ORDER BY 2 DESC, 1; -- subquery in where clause doesn't have a relation SELECT user_id FROM users_table WHERE value_2 > (SELECT 1); -- OFFSET is not supported in the subquey SELECT user_id FROM users_table WHERE value_2 > (SELECT max(value_2) FROM events_table WHERE users_table.user_id = events_table.user_id AND event_type = 50 GROUP BY user_id OFFSET 3 ); -- we can detect unsupported subquerues even if they appear -- in WHERE subquery -> FROM subquery -> WHERE subquery SELECT user_id FROM users_table WHERE user_id IN (SELECT f_inner.user_id FROM ( SELECT e1.user_id FROM users_table u1, events_table e1 WHERE e1.user_id = u1.user_id ) as f_inner, ( SELECT e1.user_id FROM users_table u1, events_table e1 WHERE e1.user_id = u1.user_id AND e1.user_id IN (SELECT user_id FROM users_table LIMIT 3 ) ) as f_outer WHERE f_inner.user_id = f_outer.user_id ); -- semi join is not on the partition key for the third subquery SELECT user_id FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 10 AND value_1 <= 20) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= 30 AND value_1 <= 40) AND value_2 IN (SELECT user_id FROM users_table WHERE value_1 >= 50 AND value_1 <= 60); CREATE FUNCTION test_join_function(integer, integer) RETURNS bool AS 'select $1 > $2;' LANGUAGE SQL IMMUTABLE RETURNS NULL ON NULL INPUT; -- we disallow JOINs via functions SELECT user_id, value_2 FROM users_table WHERE value_1 = 101 AND value_2 >= 5 AND NOT EXISTS (SELECT user_id FROM events_table WHERE event_type=101 AND value_3 > 100 AND test_join_function(events_table.user_id, users_table.user_id)) ORDER BY 1 DESC, 2 DESC LIMIT 3; DROP FUNCTION test_join_function(int,int); citus-7.0.3/src/test/regress/sql/multi_subquery_in_where_reference_clause.sql000066400000000000000000000104321317107136600277400ustar00rootroot00000000000000-- -- queries to test the subquery pushdown on reference tables -- subqueries in WHERE with greater operator SELECT user_id FROM users_table WHERE value_2 > (SELECT max(value_2) FROM events_reference_table WHERE users_table.user_id = events_reference_table.user_id AND event_type = 50 GROUP BY user_id ) GROUP BY user_id HAVING count(*) > 66 ORDER BY user_id LIMIT 5; -- subqueries in WHERE with IN operator SELECT user_id FROM users_table WHERE value_2 IN (SELECT value_2 FROM events_reference_table WHERE users_table.user_id = events_reference_table.user_id ) GROUP BY user_id ORDER BY user_id LIMIT 3; -- subqueries in WHERE with NOT EXISTS operator, should work since -- reference table in the inner part of the join SELECT user_id FROM users_table WHERE NOT EXISTS (SELECT value_2 FROM events_reference_table WHERE users_table.user_id = events_reference_table.user_id ) GROUP BY user_id ORDER BY user_id LIMIT 3; -- subqueries in WHERE with NOT EXISTS operator, should not work -- there is a reference table in the outer part of the join SELECT user_id FROM users_reference_table WHERE NOT EXISTS (SELECT value_2 FROM events_table WHERE users_reference_table.user_id = events_table.user_id ) LIMIT 3; -- subqueries in WHERE with IN operator without equality SELECT user_id FROM users_table WHERE value_2 IN (SELECT value_2 FROM events_reference_table WHERE users_table.user_id > events_reference_table.user_id ) GROUP BY user_id ORDER BY user_id LIMIT 3; -- have reference table without any equality, should error out SELECT user_id FROM users_table WHERE value_2 > (SELECT max(value_2) FROM events_reference_table WHERE event_type = 50 GROUP BY user_id ) GROUP BY user_id HAVING count(*) > 66 ORDER BY user_id LIMIT 5; -- users that appeared more than 118 times, should run since the reference table -- on the right side of the semi join SELECT user_id FROM users_table WHERE 118 <= (SELECT count(*) FROM events_reference_table WHERE users_table.user_id = events_reference_table.user_id GROUP BY user_id) GROUP BY user_id ORDER BY user_id; -- should error out since reference table exist on the left side -- of the left lateral join SELECT user_id, value_2 FROM users_table WHERE value_1 > 101 AND value_1 < 110 AND value_2 >= 5 AND user_id IN ( SELECT e1.user_id FROM ( -- Get the first time each user viewed the homepage. SELECT user_id, 1 AS view_homepage, min(time) AS view_homepage_time FROM events_reference_table WHERE event_type IN (10, 20, 30, 40, 50, 60, 70, 80, 90) GROUP BY user_id ) e1 LEFT JOIN LATERAL ( SELECT user_id, 1 AS use_demo, time AS use_demo_time FROM events_reference_table WHERE user_id = e1.user_id AND event_type IN (11, 21, 31, 41, 51, 61, 71, 81, 91) ORDER BY time ) e2 ON true LEFT JOIN LATERAL ( SELECT user_id, 1 AS enter_credit_card, time AS enter_credit_card_time FROM events_reference_table WHERE user_id = e2.user_id AND event_type IN (12, 22, 32, 42, 52, 62, 72, 82, 92) ORDER BY time ) e3 ON true LEFT JOIN LATERAL ( SELECT 1 AS submit_card_info, user_id, time AS enter_credit_card_time FROM events_reference_table WHERE user_id = e3.user_id AND event_type IN (13, 23, 33, 43, 53, 63, 73, 83, 93) ORDER BY time ) e4 ON true LEFT JOIN LATERAL ( SELECT 1 AS see_bought_screen FROM events_reference_table WHERE user_id = e4.user_id AND event_type IN (14, 24, 34, 44, 54, 64, 74, 84, 94) ORDER BY time ) e5 ON true group by e1.user_id HAVING sum(submit_card_info) > 0 ) ORDER BY 1, 2; citus-7.0.3/src/test/regress/sql/multi_subquery_misc.sql000066400000000000000000000111771317107136600235300ustar00rootroot00000000000000-- multi subquery pushdown misc aims to test subquery pushdown queries with -- (i) Prepared statements -- (ii) PL/PGSQL functions -- (iii) SQL functions -- the tables that are used depends to multi_insert_select_behavioral_analytics_create_table.sql -- We don't need shard id sequence here, so commented out to prevent conflicts with concurrent tests SET citus.enable_router_execution TO false; PREPARE prepared_subquery_1 AS SELECT user_id, user_lastseen, array_length(event_array, 1) FROM ( SELECT user_id, max(u.time) as user_lastseen, array_agg(event_type ORDER BY u.time) AS event_array FROM ( SELECT user_id, time FROM users_table WHERE user_id >= 10 AND user_id <= 70 AND users_table.value_1 > 10 AND users_table.value_1 < 12 ) u LEFT JOIN LATERAL ( SELECT event_type, time FROM events_table WHERE user_id = u.user_id AND events_table.event_type > 10 AND events_table.event_type < 12 ) t ON true GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC, user_id; EXECUTE prepared_subquery_1; PREPARE prepared_subquery_2(int, int) AS SELECT user_id, user_lastseen, array_length(event_array, 1) FROM ( SELECT user_id, max(u.time) as user_lastseen, array_agg(event_type ORDER BY u.time) AS event_array FROM ( SELECT user_id, time FROM users_table WHERE user_id >= $1 AND user_id <= $2 AND users_table.value_1 > 10 AND users_table.value_1 < 12 ) u LEFT JOIN LATERAL ( SELECT event_type, time FROM events_table WHERE user_id = u.user_id AND events_table.event_type > 10 AND events_table.event_type < 12 ) t ON true GROUP BY user_id ) AS shard_union ORDER BY user_lastseen DESC, user_id; -- should be fine with more than five executions EXECUTE prepared_subquery_2(10, 70); EXECUTE prepared_subquery_2(10, 70); EXECUTE prepared_subquery_2(10, 70); EXECUTE prepared_subquery_2(10, 70); EXECUTE prepared_subquery_2(10, 70); EXECUTE prepared_subquery_2(10, 70); EXECUTE prepared_subquery_2(10, 70); -- prepared statements with subqueries in WHERE clause PREPARE prepared_subquery_3(int, int, int, int, int, int) AS SELECT user_id FROM users_table WHERE user_id IN (SELECT user_id FROM users_table WHERE value_1 >= $4 AND value_1 <= $3) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= $5 AND value_1 <= $6) AND user_id IN (SELECT user_id FROM users_table WHERE value_1 >= $1 AND value_1 <= $2) GROUP BY user_id ORDER BY user_id DESC LIMIT 5; -- enough times (6+) to actually use prepared statements EXECUTE prepared_subquery_3(50, 60, 20, 10, 30, 40); EXECUTE prepared_subquery_3(50, 60, 20, 10, 30, 40); EXECUTE prepared_subquery_3(50, 60, 20, 10, 30, 40); EXECUTE prepared_subquery_3(50, 60, 20, 10, 30, 40); EXECUTE prepared_subquery_3(50, 60, 20, 10, 30, 40); EXECUTE prepared_subquery_3(50, 60, 20, 10, 30, 40); CREATE FUNCTION plpgsql_subquery_test(int, int) RETURNS TABLE(count bigint) AS $$ DECLARE BEGIN RETURN QUERY SELECT count(*) FROM users_table JOIN (SELECT ma.user_id, (GREATEST(coalesce(ma.value_4 / 250, 0.0) + GREATEST(1.0))) / 2 AS prob FROM users_table AS ma, events_table as short_list WHERE short_list.user_id = ma.user_id and ma.value_1 < $1 and short_list.event_type < 50 ) temp ON users_table.user_id = temp.user_id WHERE users_table.value_1 < $2; END; $$ LANGUAGE plpgsql; -- enough times (6+) to actually use prepared statements SELECT plpgsql_subquery_test(10, 20); SELECT plpgsql_subquery_test(10, 20); SELECT plpgsql_subquery_test(10, 20); SELECT plpgsql_subquery_test(10, 20); SELECT plpgsql_subquery_test(10, 20); SELECT plpgsql_subquery_test(10, 20); -- this should also work, but should return 0 given that int = NULL is always returns false SELECT plpgsql_subquery_test(10, NULL); CREATE FUNCTION sql_subquery_test(int, int) RETURNS bigint AS $$ SELECT count(*) FROM users_table JOIN (SELECT ma.user_id, (GREATEST(coalesce(ma.value_4 / 250, 0.0) + GREATEST(1.0))) / 2 AS prob FROM users_table AS ma, events_table as short_list WHERE short_list.user_id = ma.user_id and ma.value_1 < $1 and short_list.event_type < 50 ) temp ON users_table.user_id = temp.user_id WHERE users_table.value_1 < $2; $$ LANGUAGE SQL; -- should error out SELECT sql_subquery_test(5,5); DROP FUNCTION plpgsql_subquery_test(int, int); DROP FUNCTION sql_subquery_test(int, int); citus-7.0.3/src/test/regress/sql/multi_subquery_union.sql000066400000000000000000000647461317107136600237370ustar00rootroot00000000000000-- -- multi subquery toplevel union queries aims to expand existing subquery pushdown -- regression tests to cover more cases -- the tables that are used depends to multi_insert_select_behavioral_analytics_create_table.sql -- We don't need shard id sequence here, so commented out to prevent conflicts with concurrent tests -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1400000; SET citus.enable_router_execution TO false; -- a very simple union query SELECT user_id, counter FROM ( SELECT user_id, value_2 % 10 AS counter FROM events_table WHERE event_type IN (1, 2, 3, 4, 5) UNION SELECT user_id, value_2 % 10 AS counter FROM events_table WHERE event_type IN (5, 6, 7, 8, 9, 10) ) user_id ORDER BY 2 DESC,1 LIMIT 5; -- a very simple union query with reference table SELECT user_id, counter FROM ( SELECT user_id, value_2 % 10 AS counter FROM events_table WHERE event_type IN (1, 2, 3, 4, 5) UNION SELECT user_id, value_2 % 10 AS counter FROM events_reference_table WHERE event_type IN (5, 6, 7, 8, 9, 10) ) user_id ORDER BY 2 DESC,1 LIMIT 5; -- the same query with union all SELECT user_id, counter FROM ( SELECT user_id, value_2 % 10 AS counter FROM events_table WHERE event_type IN (1, 2, 3, 4, 5) UNION ALL SELECT user_id, value_2 % 10 AS counter FROM events_table WHERE event_type IN (5, 6, 7, 8, 9, 10) ) user_id ORDER BY 2 DESC,1 LIMIT 5; -- the same query with union all and reference table SELECT user_id, counter FROM ( SELECT user_id, value_2 % 10 AS counter FROM events_table WHERE event_type IN (1, 2, 3, 4, 5) UNION ALL SELECT user_id, value_2 % 10 AS counter FROM events_reference_table WHERE event_type IN (5, 6, 7, 8, 9, 10) ) user_id ORDER BY 2 DESC,1 LIMIT 5; -- the same query with group by SELECT user_id, sum(counter) FROM ( SELECT user_id, value_2 % 10 AS counter FROM events_table WHERE event_type IN (1, 2, 3, 4, 5) UNION SELECT user_id, value_2 % 10 AS counter FROM events_table WHERE event_type IN (5, 6, 7, 8, 9, 10) ) user_id GROUP BY 1 ORDER BY 2 DESC,1 LIMIT 5; -- the same query with UNION ALL clause SELECT user_id, sum(counter) FROM ( SELECT user_id, value_2 % 10 AS counter FROM events_table WHERE event_type IN (1, 2, 3, 4, 5) UNION ALL SELECT user_id, value_2 % 10 AS counter FROM events_table WHERE event_type IN (5, 6, 7, 8, 9, 10) ) user_id GROUP BY 1 ORDER BY 2 DESC,1 LIMIT 5; -- the same query target list entries shuffled SELECT user_id, sum(counter) FROM ( SELECT value_2 % 10 AS counter, user_id FROM events_table WHERE event_type IN (1, 2, 3, 4, 5) UNION SELECT value_2 % 10 AS counter, user_id FROM events_table WHERE event_type IN (5, 6, 7, 8, 9, 10) ) user_id GROUP BY 1 ORDER BY 2 DESC,1 LIMIT 5; -- same query with GROUP BY SELECT user_id, sum(counter) FROM ( SELECT user_id, value_2 AS counter FROM events_table WHERE event_type IN (1, 2) UNION SELECT user_id, value_2 AS counter FROM events_table WHERE event_type IN (5, 6) ) user_id GROUP BY user_id --HAVING sum(counter) > 900 ORDER BY 1,2 DESC LIMIT 5; -- the same query target list entries shuffled but this time the subqueries target list -- is shuffled SELECT user_id, sum(counter) FROM ( SELECT value_2 AS counter, user_id FROM events_table WHERE event_type IN (1, 2) UNION SELECT value_2 AS counter, user_id FROM events_table WHERE event_type IN (5, 6) ) user_id GROUP BY user_id --HAVING sum(counter) > 900 ORDER BY 1,2 DESC LIMIT 5; -- similar query this time more subqueries and target list contains a resjunk entry SELECT sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 20 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 40 and value_1 < 60 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 60 and value_1 < 80 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 80 and value_1 < 100 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 100 and value_1 < 120 GROUP BY user_id HAVING sum(value_2) > 500 ) user_id GROUP BY user_id ORDER BY 1 DESC LIMIT 5; -- similar query this time more subqueries with reference table and target list contains a resjunk entry SELECT sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 20 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 40 and value_1 < 60 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_reference_table where value_1 < 60 and value_1 < 80 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 80 and value_1 < 100 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 100 and value_1 < 120 GROUP BY user_id HAVING sum(value_2) > 500 ) user_id GROUP BY user_id ORDER BY 1 DESC LIMIT 5; -- similar query as above, with UNION ALL SELECT sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 20 GROUP BY user_id HAVING sum(value_2) > 5000 UNION ALL SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 40 and value_1 < 60 GROUP BY user_id HAVING sum(value_2) > 500 UNION ALL SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 60 and value_1 < 80 GROUP BY user_id HAVING sum(value_2) > 500 UNION ALL SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 80 and value_1 < 100 GROUP BY user_id HAVING sum(value_2) > 500 UNION ALL SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 100 and value_1 < 120 GROUP BY user_id HAVING sum(value_2) > 500 ) user_id GROUP BY user_id ORDER BY 1 DESC LIMIT 5; -- unions within unions SELECT * FROM ( ( SELECT user_id, sum(counter) FROM (SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id UNION SELECT user_id, sum(value_2) AS counter FROM events_table GROUP BY user_id) user_id_1 GROUP BY user_id) UNION (SELECT user_id, sum(counter) FROM (SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id UNION SELECT user_id, sum(value_2) AS counter FROM events_table GROUP BY user_id) user_id_2 GROUP BY user_id)) AS ftop ORDER BY 2 DESC, 1 DESC LIMIT 5; -- unions within unions with reference table SELECT * FROM ( ( SELECT user_id, sum(counter) FROM (SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id UNION SELECT user_id, sum(value_2) AS counter FROM events_reference_table GROUP BY user_id) user_id_1 GROUP BY user_id) UNION (SELECT user_id, sum(counter) FROM (SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id UNION SELECT user_id, sum(value_2) AS counter FROM events_table GROUP BY user_id) user_id_2 GROUP BY user_id)) AS ftop ORDER BY 2 DESC, 1 DESC LIMIT 5; -- top level unions are wrapped into top level aggregations SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15)) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13)) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" ) as final_query GROUP BY types ORDER BY types; -- exactly the same query -- but wrapper unions are removed from the inner part of the query SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM (SELECT *, random() FROM (SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM (SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM( (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15)) UNION (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) UNION (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) UNION (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13))) t1 GROUP BY "t1"."user_id") AS t) "q" ) as final_query GROUP BY types ORDER BY types; -- again excatly the same query with top level wrapper removed SELECT ("q"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15)) UNION (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) UNION (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) UNION (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13))) t1 GROUP BY "t1"."user_id") AS t) "q" GROUP BY types ORDER BY types; -- again same query but with only two top level empty queries (i.e., no group bys) SELECT * FROM ( SELECT * FROM ( SELECT "t1"."user_id" FROM ( (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15)) UNION (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) UNION (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) UNION (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13))) t1 ) AS t) "q" ORDER BY 1 LIMIT 5; -- a very similar query UNION ALL SELECT ("q"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15)) UNION ALL (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) UNION ALL (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) UNION ALL (SELECT "events"."user_id", "events"."time", 3 AS event FROM events_table as "events" WHERE event_type IN (26, 27, 28, 29, 30, 13))) t1 GROUP BY "t1"."user_id") AS t) "q" GROUP BY types ORDER BY types; -- some UNION ALL queries that are going to be pulled up SELECT count(*) FROM ( (SELECT user_id FROM users_table) UNION ALL (SELECT user_id FROM events_table) ) b; -- some UNION ALL queries that are going to be pulled up with reference table SELECT count(*) FROM ( (SELECT user_id FROM users_table) UNION ALL (SELECT user_id FROM events_reference_table) ) b; -- similar query without top level agg SELECT user_id FROM ( (SELECT user_id FROM users_table) UNION ALL (SELECT user_id FROM events_table) ) b ORDER BY 1 DESC LIMIT 5; -- similar query with multiple target list entries SELECT user_id, value_3 FROM ( (SELECT value_3, user_id FROM users_table) UNION ALL (SELECT value_3, user_id FROM events_table) ) b ORDER BY 1 DESC, 2 DESC LIMIT 5; -- similar query group by inside the subqueries SELECT user_id, value_3_sum FROM ( (SELECT sum(value_3) as value_3_sum, user_id FROM users_table GROUP BY user_id) UNION ALL (SELECT sum(value_3) as value_3_sum, user_id FROM users_table GROUP BY user_id) ) b ORDER BY 2 DESC, 1 DESC LIMIT 5; -- similar query top level group by SELECT user_id, sum(value_3) FROM ( (SELECT value_3, user_id FROM users_table) UNION ALL (SELECT value_3, user_id FROM events_table) ) b GROUP BY 1 ORDER BY 2 DESC, 1 DESC LIMIT 5; -- a long set operation list SELECT user_id, value_3 FROM ( (SELECT value_3, user_id FROM events_table where event_type IN (1, 2, 3, 4, 5)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (6, 7, 8, 9, 10)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (11, 12, 13, 14, 15)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (16, 17, 18, 19, 20)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (21, 22, 23, 24, 25)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (26, 27, 28, 29, 30)) ) b ORDER BY 1 DESC, 2 DESC LIMIT 5; -- no partition key on the top SELECT max(value_3) FROM ( (SELECT value_3, user_id FROM events_table where event_type IN (1, 2, 3, 4, 5)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (6, 7, 8, 9, 10)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (11, 12, 13, 14, 15)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (16, 17, 18, 19, 20)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (21, 22, 23, 24, 25)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (26, 27, 28, 29, 30)) ) b GROUP BY user_id ORDER BY 1 DESC LIMIT 5; -- now lets also have some unsupported queries -- group by is not on the partition key SELECT user_id, sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM events_table GROUP BY user_id UNION SELECT value_1 as user_id, sum(value_2) AS counter FROM users_table GROUP BY value_1 ) user_id GROUP BY user_id; -- partition key is not selected SELECT sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 20 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 40 and value_1 < 60 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 60 and value_1 < 80 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT user_id, sum(value_2) AS counter FROM users_table where value_1 < 80 and value_1 < 100 GROUP BY user_id HAVING sum(value_2) > 500 UNION SELECT 2 * user_id, sum(value_2) AS counter FROM users_table where value_1 < 100 and value_1 < 120 GROUP BY user_id HAVING sum(value_2) > 500 ) user_id GROUP BY user_id ORDER BY 1 DESC LIMIT 5; -- excepts within unions are not supported SELECT * FROM ( ( SELECT user_id, sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id UNION SELECT user_id, sum(value_2) AS counter FROM events_table GROUP BY user_id ) user_id_1 GROUP BY user_id ) UNION ( SELECT user_id, sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id EXCEPT SELECT user_id, sum(value_2) AS counter FROM events_table GROUP BY user_id ) user_id_2 GROUP BY user_id) ) as ftop; -- joins inside unions are not supported SELECT user_id, sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id UNION SELECT events_table.user_id, sum(events_table.value_2) AS counter FROM events_table, users_table WHERE users_table.user_id > events_table.user_id GROUP BY 1 ) user_id GROUP BY user_id; -- joins inside unions are not supported -- slightly more comlex than the above SELECT * FROM ( ( SELECT user_id, sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id UNION SELECT user_id, sum(value_2) AS counter FROM events_table GROUP BY user_id ) user_id_1 GROUP BY user_id ) UNION ( SELECT user_id, sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id UNION SELECT events_table.user_id, sum(events_table.value_2) AS counter FROM events_table, users_table WHERE (events_table.user_id = users_table.user_id) GROUP BY events_table.user_id ) user_id_2 GROUP BY user_id) ) as ftop; -- offset inside the union SELECT user_id, sum(counter) FROM ( SELECT user_id, sum(value_2) AS counter FROM events_table GROUP BY user_id UNION SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id OFFSET 4 ) user_id GROUP BY user_id; -- lower level union does not return partition key with the other relations SELECT * FROM ( ( SELECT user_id, sum(counter) FROM (SELECT user_id, sum(value_2) AS counter FROM users_table GROUP BY user_id UNION SELECT user_id, sum(value_2) AS counter FROM events_table GROUP BY user_id) user_id_1 GROUP BY user_id) UNION (SELECT user_id, sum(counter) FROM (SELECT sum(value_2) AS counter, user_id FROM users_table GROUP BY user_id UNION SELECT user_id, sum(value_2) AS counter FROM events_table GROUP BY user_id) user_id_2 GROUP BY user_id)) AS ftop; -- some UNION all queries that are going to be pulled up SELECT count(*) FROM ( (SELECT user_id FROM users_table) UNION ALL (SELECT 2 * user_id FROM events_table) ) b; -- last query does not have partition key SELECT user_id, value_3 FROM ( (SELECT value_3, user_id FROM events_table where event_type IN (1, 2, 3, 4, 5)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (6, 7, 8, 9, 10)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (11, 12, 13, 14, 15)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (16, 17, 18, 19, 20)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (21, 22, 23, 24, 25)) UNION ALL (SELECT value_3, value_2 FROM events_table where event_type IN (26, 27, 28, 29, 30)) ) b ORDER BY 1 DESC, 2 DESC LIMIT 5; -- we don't allow joins within unions SELECT count(*) FROM ( (SELECT user_id FROM users_table) UNION ALL (SELECT users_table.user_id FROM events_table, users_table WHERE events_table.user_id = users_table.user_id) ) b; -- we don't support subqueries without relations SELECT count(*) FROM ( (SELECT user_id FROM users_table) UNION ALL (SELECT 1) ) b; -- we don't support subqueries without relations SELECT * FROM ( (SELECT user_id FROM users_table) UNION ALL (SELECT (random() * 100)::int) ) b; -- we don't support subqueries without relations SELECT user_id, value_3 FROM ( (SELECT value_3, user_id FROM events_table where event_type IN (1, 2, 3, 4, 5)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (6, 7, 8, 9, 10)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (11, 12, 13, 14, 15)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (16, 17, 18, 19, 20)) UNION ALL (SELECT value_3, user_id FROM events_table where event_type IN (21, 22, 23, 24, 25)) UNION ALL (SELECT 1, 2) ) b ORDER BY 1 DESC, 2 DESC LIMIT 5; SELECT ("final_query"."event_types") as types, count(*) AS sumOfEventType FROM ( SELECT *, random() FROM ( SELECT "t"."user_id", "t"."time", unnest("t"."collected_events") AS "event_types" FROM ( SELECT "t1"."user_id", min("t1"."time") AS "time", array_agg(("t1"."event") ORDER BY TIME ASC, event DESC) AS collected_events FROM ( (SELECT * FROM (SELECT "events"."user_id", "events"."time", 0 AS event FROM events_table as "events" WHERE event_type IN (10, 11, 12, 13, 14, 15)) events_subquery_1) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 1 AS event FROM events_table as "events" WHERE event_type IN (15, 16, 17, 18, 19) ) events_subquery_2) UNION (SELECT * FROM (SELECT "events"."user_id", "events"."time", 2 AS event FROM events_table as "events" WHERE event_type IN (20, 21, 22, 23, 24, 25) ) events_subquery_3) UNION (SELECT * FROM (SELECT 1, now(), 3 AS event) events_subquery_4)) t1 GROUP BY "t1"."user_id") AS t) "q" ) as final_query GROUP BY types ORDER BY types; SET citus.enable_router_execution TO true; DROP TABLE events_reference_table; DROP TABLE users_reference_table; citus-7.0.3/src/test/regress/sql/multi_subtransactions.sql000066400000000000000000000112551317107136600240550ustar00rootroot00000000000000 CREATE TABLE artists ( id bigint NOT NULL, name text NOT NULL ); SELECT create_distributed_table('artists', 'id'); -- add some data INSERT INTO artists VALUES (1, 'Pablo Picasso'); INSERT INTO artists VALUES (2, 'Vincent van Gogh'); INSERT INTO artists VALUES (3, 'Claude Monet'); INSERT INTO artists VALUES (4, 'William Kurelek'); -- RELEASE SAVEPOINT BEGIN; INSERT INTO artists VALUES (5, 'Asher Lev'); SAVEPOINT s1; DELETE FROM artists WHERE id=5; RELEASE SAVEPOINT s1; COMMIT; SELECT * FROM artists WHERE id=5; -- ROLLBACK TO SAVEPOINT BEGIN; INSERT INTO artists VALUES (5, 'Asher Lev'); SAVEPOINT s1; DELETE FROM artists WHERE id=5; ROLLBACK TO SAVEPOINT s1; COMMIT; SELECT * FROM artists WHERE id=5; -- Serial sub-transaction releases BEGIN; SAVEPOINT s1; DELETE FROM artists WHERE id=5; RELEASE SAVEPOINT s1; SAVEPOINT s2; INSERT INTO artists VALUES (5, 'Jacob Kahn'); RELEASE SAVEPOINT s2; COMMIT; SELECT * FROM artists WHERE id=5; -- Serial sub-transaction rollbacks BEGIN; SAVEPOINT s1; UPDATE artists SET name='A' WHERE id=5; ROLLBACK TO SAVEPOINT s1; SAVEPOINT s2; DELETE FROM artists WHERE id=5; ROLLBACK TO SAVEPOINT s2; COMMIT; SELECT * FROM artists WHERE id=5; -- Multiple sub-transaction activity before first query BEGIN; SAVEPOINT s0; SAVEPOINT s1; SAVEPOINT s2; SAVEPOINT s3; ROLLBACK TO SAVEPOINT s2; RELEASE SAVEPOINT s1; INSERT INTO artists VALUES (6, 'John J. Audubon'); ROLLBACK TO SAVEPOINT s0; INSERT INTO artists VALUES (6, 'Emily Carr'); COMMIT; SELECT * FROM artists WHERE id=6; -- Release after rollback BEGIN; SAVEPOINT s1; ROLLBACK TO s1; RELEASE SAVEPOINT s1; SAVEPOINT s2; INSERT INTO artists VALUES (7, 'John J. Audubon'); ROLLBACK TO s2; RELEASE SAVEPOINT s2; COMMIT; SELECT * FROM artists WHERE id=7; -- Recover from errors \set VERBOSITY terse BEGIN; SAVEPOINT s1; SAVEPOINT s2; INSERT INTO artists VALUES (7, NULL); ROLLBACK TO SAVEPOINT s1; COMMIT; -- Don't recover from errors BEGIN; SAVEPOINT s1; SAVEPOINT s2; INSERT INTO artists VALUES (7, NULL); SAVEPOINT s3; ROLLBACK TO SAVEPOINT s3; COMMIT; -- =================================================================== -- Tests for replication factor > 1 -- =================================================================== CREATE TABLE researchers ( id bigint NOT NULL, lab_id int NOT NULL, name text NOT NULL ); SELECT master_create_distributed_table('researchers', 'lab_id', 'hash'); SELECT master_create_worker_shards('researchers', 2, 2); -- Basic rollback and release BEGIN; INSERT INTO researchers VALUES (7, 4, 'Jan Plaza'); SAVEPOINT s1; INSERT INTO researchers VALUES (8, 4, 'Alonzo Church'); ROLLBACK TO s1; RELEASE SAVEPOINT s1; COMMIT; SELECT * FROM researchers WHERE id in (7, 8); -- Recover from failure on one of nodes BEGIN; SAVEPOINT s1; INSERT INTO researchers VALUES (11, 11, 'Dana Scott'); INSERT INTO researchers VALUES (NULL, 10, 'Stephen Kleene'); ROLLBACK TO SAVEPOINT s1; INSERT INTO researchers VALUES (12, 10, 'Stephen Kleene'); COMMIT; SELECT * FROM researchers WHERE lab_id=10; -- Don't recover, but rollback BEGIN; SAVEPOINT s1; INSERT INTO researchers VALUES (NULL, 10, 'Raymond Smullyan'); RELEASE SAVEPOINT s1; SAVEPOINT s2; ROLLBACK; SELECT * FROM researchers WHERE lab_id=10; -- Don't recover, and commit BEGIN; SAVEPOINT s1; INSERT INTO researchers VALUES (NULL, 10, 'Raymond Smullyan'); RELEASE SAVEPOINT s1; SAVEPOINT s2; COMMIT; SELECT * FROM researchers WHERE lab_id=10; -- Implicit savepoints via pl/pgsql exceptions BEGIN; DO $$ BEGIN INSERT INTO researchers VALUES (15, 10, 'Melvin Fitting'); INSERT INTO researchers VALUES (NULL, 10, 'Raymond Smullyan'); EXCEPTION WHEN not_null_violation THEN RAISE NOTICE 'caught not_null_violation'; END $$; COMMIT; SELECT * FROM researchers WHERE lab_id=10; BEGIN; DO $$ BEGIN INSERT INTO researchers VALUES (15, 10, 'Melvin Fitting'); RAISE EXCEPTION plpgsql_error; EXCEPTION WHEN plpgsql_error THEN RAISE NOTICE 'caught manual plpgsql_error'; END $$; COMMIT; SELECT * FROM researchers WHERE lab_id=10; BEGIN; DO $$ BEGIN INSERT INTO researchers VALUES (15, 10, 'Melvin Fitting'); INSERT INTO researchers VALUES (NULL, 10, 'Raymond Smullyan'); EXCEPTION WHEN not_null_violation THEN RAISE EXCEPTION not_null_violation; -- rethrow it END $$; COMMIT; SELECT * FROM researchers WHERE lab_id=10; -- Insert something after catching error. BEGIN; DO $$ BEGIN INSERT INTO researchers VALUES (15, 10, 'Melvin Fitting'); INSERT INTO researchers VALUES (NULL, 10, 'Raymond Smullyan'); EXCEPTION WHEN not_null_violation THEN INSERT INTO researchers VALUES (32, 10, 'Raymond Smullyan'); END $$; COMMIT; SELECT * FROM researchers WHERE lab_id=10; -- Clean-up DROP TABLE artists; DROP TABLE researchers; citus-7.0.3/src/test/regress/sql/multi_table_ddl.sql000066400000000000000000000057031317107136600225460ustar00rootroot00000000000000-- -- MULTI_TABLE_DDL -- -- Tests around changing the schema and dropping of a distributed table ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 870000; CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); -- verify that the citus extension can't be dropped while distributed tables exist DROP EXTENSION citus; -- verify that the distribution column can't have its type changed ALTER TABLE testtableddl ALTER COLUMN distributecol TYPE text; -- verify that the distribution column can't be dropped ALTER TABLE testtableddl DROP COLUMN distributecol; -- verify that the table can be dropped in a transaction block \set VERBOSITY terse BEGIN; DROP TABLE testtableddl; COMMIT; \set VERBOSITY default -- recreate testtableddl CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); -- verify that the table can be dropped DROP TABLE testtableddl; -- verify that the table can dropped even if shards exist CREATE TABLE testtableddl(somecol int, distributecol text NOT NULL); -- create table and do create empty shard test here, too SET citus.shard_replication_factor TO 1; SELECT master_create_distributed_table('testtableddl', 'distributecol', 'append'); SELECT 1 FROM master_create_empty_shard('testtableddl'); -- now actually drop table and shards DROP TABLE testtableddl; RESET citus.shard_replication_factor; -- ensure no metadata of distributed tables are remaining SELECT * FROM pg_dist_partition; SELECT * FROM pg_dist_shard; SELECT * FROM pg_dist_shard_placement; -- check that the extension now can be dropped (and recreated) DROP EXTENSION citus; CREATE EXTENSION citus; -- re-add the nodes to the cluster SELECT 1 FROM master_add_node('localhost', :worker_1_port); SELECT 1 FROM master_add_node('localhost', :worker_2_port); -- create a table with a SERIAL column CREATE TABLE testserialtable(id serial, group_id integer); SELECT master_create_distributed_table('testserialtable', 'group_id', 'hash'); SELECT master_create_worker_shards('testserialtable', 2, 1); -- should not be able to add additional serial columns ALTER TABLE testserialtable ADD COLUMN other_id serial; -- and we shouldn't be able to change a distributed sequence's owner ALTER SEQUENCE testserialtable_id_seq OWNED BY NONE; -- or create a sequence with a distributed owner CREATE SEQUENCE standalone_sequence OWNED BY testserialtable.group_id; -- or even change a manual sequence to be owned by a distributed table CREATE SEQUENCE standalone_sequence; ALTER SEQUENCE standalone_sequence OWNED BY testserialtable.group_id; -- an edge case, but it's OK to change an owner to the same distributed table ALTER SEQUENCE testserialtable_id_seq OWNED BY testserialtable.id; -- drop distributed table \c - - - :master_port DROP TABLE testserialtable; -- verify owned sequence is dropped \c - - - :worker_1_port \ds citus-7.0.3/src/test/regress/sql/multi_task_assignment_policy.sql000066400000000000000000000061631317107136600254060ustar00rootroot00000000000000-- -- MULTI_TASK_ASSIGNMENT -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 880000; -- print whether we're using version > 9 to make version-specific tests clear SHOW server_version \gset SELECT substring(:'server_version', '\d+')::int > 9 AS version_above_nine; SET citus.explain_distributed_queries TO off; -- Check that our policies for assigning tasks to worker nodes run as expected. -- To test this, we first create a shell table, and then manually insert shard -- and shard placement data into system catalogs. We next run Explain command, -- and check that tasks are assigned to worker nodes as expected. CREATE TABLE task_assignment_test_table (test_id integer); SELECT master_create_distributed_table('task_assignment_test_table', 'test_id', 'append'); -- Create logical shards with shardids 200, 201, and 202 INSERT INTO pg_dist_shard (logicalrelid, shardid, shardstorage, shardminvalue, shardmaxvalue) SELECT pg_class.oid, series.index, 'r', 1, 1000 FROM pg_class, generate_series(200, 202) AS series(index) WHERE pg_class.relname = 'task_assignment_test_table'; -- Create shard placements for shard 200 and 201 INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 200, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport ASC LIMIT 2; INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 201, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport ASC LIMIT 2; -- Create shard placements for shard 202 INSERT INTO pg_dist_shard_placement (shardid, shardstate, shardlength, nodename, nodeport) SELECT 202, 1, 1, nodename, nodeport FROM pg_dist_shard_placement GROUP BY nodename, nodeport ORDER BY nodename, nodeport DESC LIMIT 2; -- Start transaction block to avoid auto commits. This avoids additional debug -- messages from getting printed at real transaction starts and commits. BEGIN; -- Increase log level to see which worker nodes tasks are assigned to. Note that -- the following log messages print node name and port numbers; and node numbers -- in regression tests depend upon PG_VERSION_NUM. SET client_min_messages TO DEBUG3; -- First test the default greedy task assignment policy SET citus.task_assignment_policy TO 'greedy'; EXPLAIN SELECT count(*) FROM task_assignment_test_table; EXPLAIN SELECT count(*) FROM task_assignment_test_table; -- Next test the first-replica task assignment policy SET citus.task_assignment_policy TO 'first-replica'; EXPLAIN SELECT count(*) FROM task_assignment_test_table; EXPLAIN SELECT count(*) FROM task_assignment_test_table; -- Finally test the round-robin task assignment policy SET citus.task_assignment_policy TO 'round-robin'; EXPLAIN SELECT count(*) FROM task_assignment_test_table; EXPLAIN SELECT count(*) FROM task_assignment_test_table; EXPLAIN SELECT count(*) FROM task_assignment_test_table; RESET citus.task_assignment_policy; RESET client_min_messages; COMMIT; citus-7.0.3/src/test/regress/sql/multi_task_string_size.sql000066400000000000000000000124001317107136600242060ustar00rootroot00000000000000-- -- MULTI_TASK_STRING_SIZE -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1602000; ALTER SEQUENCE pg_catalog.pg_dist_jobid_seq RESTART 1602000; CREATE TABLE wide_table ( long_column_001 int, long_column_002 int, long_column_003 int, long_column_004 int, long_column_005 int, long_column_006 int, long_column_007 int, long_column_008 int, long_column_009 int, long_column_010 int, long_column_011 int, long_column_012 int, long_column_013 int, long_column_014 int, long_column_015 int, long_column_016 int, long_column_017 int, long_column_018 int, long_column_019 int, long_column_020 int, long_column_021 int, long_column_022 int, long_column_023 int, long_column_024 int, long_column_025 int, long_column_026 int, long_column_027 int, long_column_028 int, long_column_029 int, long_column_030 int, long_column_031 int, long_column_032 int, long_column_033 int, long_column_034 int, long_column_035 int, long_column_036 int, long_column_037 int, long_column_038 int, long_column_039 int, long_column_040 int, long_column_041 int, long_column_042 int, long_column_043 int, long_column_044 int, long_column_045 int, long_column_046 int, long_column_047 int, long_column_048 int, long_column_049 int, long_column_050 int, long_column_051 int, long_column_052 int, long_column_053 int, long_column_054 int, long_column_055 int, long_column_056 int, long_column_057 int, long_column_058 int, long_column_059 int, long_column_060 int, long_column_061 int, long_column_062 int, long_column_063 int, long_column_064 int, long_column_065 int, long_column_066 int, long_column_067 int, long_column_068 int, long_column_069 int, long_column_070 int, long_column_071 int, long_column_072 int, long_column_073 int, long_column_074 int, long_column_075 int, long_column_076 int, long_column_077 int, long_column_078 int, long_column_079 int, long_column_080 int, long_column_081 int, long_column_082 int, long_column_083 int, long_column_084 int, long_column_085 int, long_column_086 int, long_column_087 int, long_column_088 int, long_column_089 int, long_column_090 int, long_column_091 int, long_column_092 int, long_column_093 int, long_column_094 int, long_column_095 int, long_column_096 int, long_column_097 int, long_column_098 int, long_column_099 int, long_column_100 int, long_column_101 int, long_column_102 int, long_column_103 int, long_column_104 int, long_column_105 int, long_column_106 int, long_column_107 int, long_column_108 int, long_column_109 int, long_column_110 int, long_column_111 int, long_column_112 int, long_column_113 int, long_column_114 int, long_column_115 int, long_column_116 int, long_column_117 int, long_column_118 int, long_column_119 int, long_column_120 int, long_column_121 int, long_column_122 int, long_column_123 int, long_column_124 int, long_column_125 int, long_column_126 int, long_column_127 int, long_column_128 int, long_column_129 int, long_column_130 int, long_column_131 int, long_column_132 int, long_column_133 int, long_column_134 int, long_column_135 int, long_column_136 int, long_column_137 int, long_column_138 int, long_column_139 int, long_column_140 int, long_column_141 int, long_column_142 int, long_column_143 int, long_column_144 int, long_column_145 int, long_column_146 int, long_column_147 int, long_column_148 int, long_column_149 int, long_column_150 int, long_column_151 int, long_column_152 int, long_column_153 int, long_column_154 int, long_column_155 int, long_column_156 int, long_column_157 int, long_column_158 int, long_column_159 int, long_column_160 int, long_column_161 int, long_column_162 int, long_column_163 int, long_column_164 int, long_column_165 int, long_column_166 int, long_column_167 int, long_column_168 int, long_column_169 int, long_column_170 int, long_column_171 int, long_column_172 int, long_column_173 int, long_column_174 int, long_column_175 int, long_column_176 int, long_column_177 int, long_column_178 int, long_column_179 int, long_column_180 int, long_column_181 int, long_column_182 int, long_column_183 int, long_column_184 int, long_column_185 int, long_column_186 int, long_column_187 int, long_column_188 int, long_column_189 int, long_column_190 int, long_column_191 int, long_column_192 int, long_column_193 int, long_column_194 int, long_column_195 int, long_column_196 int, long_column_197 int, long_column_198 int, long_column_199 int, long_column_200 int ); SELECT create_distributed_table('wide_table', 'long_column_001'); SET citus.task_executor_type TO 'task-tracker'; SHOW citus.max_task_string_size; -- setting can not be changed on runtime SET citus.max_task_string_size TO 20000; -- error message may vary between executions -- hiding warning and error message -- no output means the query has failed SET client_min_messages to FATAL; SELECT u.* FROM wide_table u JOIN wide_table v ON (u.long_column_002 = v.long_column_003); -- following will succeed since it fetches few columns SELECT u.long_column_001, u.long_column_002, u.long_column_003 FROM wide_table u JOIN wide_table v ON (u.long_column_002 = v.long_column_003); RESET client_min_messages; DROP TABLE wide_table; RESET citus.shard_count; RESET citus.task_executor_type; citus-7.0.3/src/test/regress/sql/multi_test_helpers.sql000066400000000000000000000062721317107136600233370ustar00rootroot00000000000000-- File to create functions and helpers needed for subsequent tests -- create a helper function to create objects on each node CREATE FUNCTION run_command_on_master_and_workers(p_sql text) RETURNS void LANGUAGE plpgsql AS $$ BEGIN EXECUTE p_sql; PERFORM run_command_on_workers(p_sql); END;$$; -- The following views are intended as alternatives to \d commands, whose -- output changed in PostgreSQL 10. In particular, they must be used any time -- a test wishes to print out the structure of a relation, which previously -- was safely accomplished by a \d invocation. SELECT run_command_on_master_and_workers( $desc_views$ CREATE VIEW table_fkey_cols AS SELECT rc.constraint_name AS "name", kcu.column_name AS "column_name", uc_kcu.column_name AS "refd_column_name", format('%I.%I', kcu.table_schema, kcu.table_name)::regclass::oid AS relid, format('%I.%I', uc_kcu.table_schema, uc_kcu.table_name)::regclass::oid AS refd_relid FROM information_schema.referential_constraints rc, information_schema.key_column_usage kcu, information_schema.key_column_usage uc_kcu WHERE rc.constraint_schema = kcu.constraint_schema AND rc.constraint_name = kcu.constraint_name AND rc.unique_constraint_schema = uc_kcu.constraint_schema AND rc.unique_constraint_name = uc_kcu.constraint_name; CREATE VIEW table_fkeys AS SELECT name AS "Constraint", format('FOREIGN KEY (%s) REFERENCES %s(%s)', string_agg(DISTINCT quote_ident(column_name), ', '), string_agg(DISTINCT refd_relid::regclass::text, ', '), string_agg(DISTINCT quote_ident(refd_column_name), ', ')) AS "Definition", "relid" FROM table_fkey_cols GROUP BY (name, relid); CREATE VIEW table_attrs AS SELECT c.column_name AS "name", c.data_type AS "type", CASE WHEN character_maximum_length IS NOT NULL THEN format('(%s)', character_maximum_length) WHEN data_type = 'numeric' AND numeric_precision IS NOT NULL THEN format('(%s,%s)', numeric_precision, numeric_scale) ELSE '' END AS "modifier", c.column_default AS "default", (NOT c.is_nullable::boolean) AS "notnull", format('%I.%I', c.table_schema, c.table_name)::regclass::oid AS "relid" FROM information_schema.columns AS c ORDER BY ordinal_position; CREATE VIEW table_desc AS SELECT "name" AS "Column", "type" || "modifier" AS "Type", rtrim(( CASE "notnull" WHEN true THEN 'not null ' ELSE '' END ) || ( CASE WHEN "default" IS NULL THEN '' ELSE 'default ' || "default" END )) AS "Modifiers", "relid" FROM table_attrs; CREATE VIEW table_checks AS SELECT cc.constraint_name AS "Constraint", ('CHECK ' || regexp_replace(check_clause, '^\((.*)\)$', '\1')) AS "Definition", format('%I.%I', ccu.table_schema, ccu.table_name)::regclass::oid AS relid FROM information_schema.check_constraints cc, information_schema.constraint_column_usage ccu WHERE cc.constraint_schema = ccu.constraint_schema AND cc.constraint_name = ccu.constraint_name ORDER BY cc.constraint_name ASC; $desc_views$ ); citus-7.0.3/src/test/regress/sql/multi_tpch_query1.sql000066400000000000000000000013001317107136600230650ustar00rootroot00000000000000-- -- MULTI_TPCH_QUERY1 -- -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #1 from the TPC-H decision support benchmark SELECT l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order FROM lineitem WHERE l_shipdate <= date '1998-12-01' - interval '90 days' GROUP BY l_returnflag, l_linestatus ORDER BY l_returnflag, l_linestatus; citus-7.0.3/src/test/regress/sql/multi_tpch_query10.sql000066400000000000000000000014041317107136600231520ustar00rootroot00000000000000-- -- MULTI_TPCH_QUERY10 -- -- Query #10 from the TPC-H decision support benchmark. Unlike other TPC-H tests, -- we don't set citus.large_table_shard_count here, and instead use the default value -- coming from postgresql.conf or multi_task_tracker_executor.conf. SELECT c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment FROM customer, orders, lineitem, nation WHERE c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate >= date '1993-10-01' AND o_orderdate < date '1993-10-01' + interval '3' month AND l_returnflag = 'R' AND c_nationkey = n_nationkey GROUP BY c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment ORDER BY revenue DESC LIMIT 20; citus-7.0.3/src/test/regress/sql/multi_tpch_query12.sql000066400000000000000000000013751317107136600231630ustar00rootroot00000000000000-- -- MULTI_TPCH_QUERY12 -- -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #12 from the TPC-H decision support benchmark SELECT l_shipmode, sum(case when o_orderpriority = '1-URGENT' OR o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority <> '1-URGENT' AND o_orderpriority <> '2-HIGH' then 1 else 0 end) AS low_line_count FROM orders, lineitem WHERE o_orderkey = l_orderkey AND l_shipmode in ('MAIL', 'SHIP') AND l_commitdate < l_receiptdate AND l_shipdate < l_commitdate AND l_receiptdate >= date '1994-01-01' AND l_receiptdate < date '1994-01-01' + interval '1' year GROUP BY l_shipmode ORDER BY l_shipmode; citus-7.0.3/src/test/regress/sql/multi_tpch_query14.sql000066400000000000000000000010111317107136600231500ustar00rootroot00000000000000-- -- MULTI_TPCH_QUERY14 -- -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #14 from the TPC-H decision support benchmark SELECT 100.00 * sum(case when p_type like 'PROMO%' then l_extendedprice * (1 - l_discount) else 0 end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue FROM lineitem, part WHERE l_partkey = p_partkey AND l_shipdate >= date '1995-09-01' AND l_shipdate < date '1995-09-01' + interval '1' year; citus-7.0.3/src/test/regress/sql/multi_tpch_query19.sql000066400000000000000000000017711317107136600231720ustar00rootroot00000000000000-- -- MULTI_TPCH_QUERY19 -- -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #19 from the TPC-H decision support benchmark. Note that we modified -- the query from its original to make it work on smaller data sets. SELECT sum(l_extendedprice* (1 - l_discount)) as revenue FROM lineitem, part WHERE ( p_partkey = l_partkey AND (p_brand = 'Brand#12' OR p_brand= 'Brand#14' OR p_brand='Brand#15') AND l_quantity >= 10 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#23' OR p_brand='Brand#24') AND l_quantity >= 20 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ) OR ( p_partkey = l_partkey AND (p_brand = 'Brand#33' OR p_brand = 'Brand#34' OR p_brand = 'Brand#35') AND l_quantity >= 1 AND l_shipmode in ('AIR', 'AIR REG', 'TRUCK') AND l_shipinstruct = 'DELIVER IN PERSON' ); citus-7.0.3/src/test/regress/sql/multi_tpch_query3.sql000066400000000000000000000012171317107136600230760ustar00rootroot00000000000000-- -- MULTI_TPCH_QUERY3 -- -- Query #3 from the TPC-H decision support benchmark. Unlike other TPC-H tests, -- we don't set citus.large_table_shard_count here, and instead use the default value -- coming from postgresql.conf or multi_task_tracker_executor.conf. SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority FROM customer, orders, lineitem WHERE c_mktsegment = 'BUILDING' AND c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate < date '1995-03-15' AND l_shipdate > date '1995-03-15' GROUP BY l_orderkey, o_orderdate, o_shippriority ORDER BY revenue DESC, o_orderdate; citus-7.0.3/src/test/regress/sql/multi_tpch_query6.sql000066400000000000000000000006601317107136600231020ustar00rootroot00000000000000-- -- MULTI_TPCH_QUERY6 -- -- Change configuration to treat lineitem and orders tables as large SET citus.large_table_shard_count TO 2; -- Query #6 from the TPC-H decision support benchmark SELECT sum(l_extendedprice * l_discount) as revenue FROM lineitem WHERE l_shipdate >= date '1994-01-01' and l_shipdate < date '1994-01-01' + interval '1 year' and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; citus-7.0.3/src/test/regress/sql/multi_tpch_query7.sql000066400000000000000000000016671317107136600231130ustar00rootroot00000000000000-- -- MULTI_TPCH_QUERY7 -- -- Change configuration to treat lineitem AND orders tables as large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H decision support benchmark SELECT supp_nation, cust_nation, l_year, sum(volume) as revenue FROM ( SELECT n1.n_name as supp_nation, n2.n_name as cust_nation, extract(year FROM l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume FROM supplier, lineitem, orders, customer, nation n1, nation n2 WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = n1.n_nationkey AND c_nationkey = n2.n_nationkey AND ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) AND l_shipdate between date '1995-01-01' AND date '1996-12-31' ) as shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year; citus-7.0.3/src/test/regress/sql/multi_tpch_query7_nested.sql000066400000000000000000000021601317107136600244420ustar00rootroot00000000000000-- -- MULTI_TPCH_QUERY7_NESTED -- -- Change configuration to treat lineitem AND orders tables AS large SET citus.large_table_shard_count TO 2; -- Query #7 from the TPC-H benchmark; modified to include sub-selects SELECT supp_nation, cust_nation, l_year, sum(volume) AS revenue FROM ( SELECT supp_nation, cust_nation, extract(year FROM l_shipdate) AS l_year, l_extendedprice * (1 - l_discount) AS volume FROM supplier, lineitem, orders, customer, ( SELECT n1.n_nationkey AS supp_nation_key, n2.n_nationkey AS cust_nation_key, n1.n_name AS supp_nation, n2.n_name AS cust_nation FROM nation n1, nation n2 WHERE ( (n1.n_name = 'FRANCE' AND n2.n_name = 'GERMANY') OR (n1.n_name = 'GERMANY' AND n2.n_name = 'FRANCE') ) ) AS temp WHERE s_suppkey = l_suppkey AND o_orderkey = l_orderkey AND c_custkey = o_custkey AND s_nationkey = supp_nation_key AND c_nationkey = cust_nation_key AND l_shipdate between date '1995-01-01' AND date '1996-12-31' ) AS shipping GROUP BY supp_nation, cust_nation, l_year ORDER BY supp_nation, cust_nation, l_year; citus-7.0.3/src/test/regress/sql/multi_transaction_recovery.sql000066400000000000000000000066411317107136600251010ustar00rootroot00000000000000ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; -- Tests for prepared transaction recovery -- Ensure pg_dist_transaction is empty for test SELECT recover_prepared_transactions(); SELECT * FROM pg_dist_transaction; -- Create some "fake" prepared transactions to recover \c - - - :worker_1_port BEGIN; CREATE TABLE should_abort (value int); PREPARE TRANSACTION 'citus_0_should_abort'; BEGIN; CREATE TABLE should_commit (value int); PREPARE TRANSACTION 'citus_0_should_commit'; BEGIN; CREATE TABLE should_be_sorted_into_middle (value int); PREPARE TRANSACTION 'citus_0_should_be_sorted_into_middle'; \c - - - :master_port -- Add "fake" pg_dist_transaction records and run recovery INSERT INTO pg_dist_transaction VALUES (1, 'citus_0_should_commit'); INSERT INTO pg_dist_transaction VALUES (1, 'citus_0_should_be_forgotten'); SELECT recover_prepared_transactions(); SELECT count(*) FROM pg_dist_transaction; -- Confirm that transactions were correctly rolled forward \c - - - :worker_1_port SELECT count(*) FROM pg_tables WHERE tablename = 'should_abort'; SELECT count(*) FROM pg_tables WHERE tablename = 'should_commit'; \c - - - :master_port SET citus.shard_replication_factor TO 2; SET citus.shard_count TO 2; SET citus.multi_shard_commit_protocol TO '2pc'; -- create_distributed_table should add 2 recovery records (1 connection per node) CREATE TABLE test_recovery (x text); SELECT create_distributed_table('test_recovery', 'x'); SELECT count(*) FROM pg_dist_transaction; -- create_reference_table should add another 2 recovery records CREATE TABLE test_recovery_ref (x text); SELECT create_reference_table('test_recovery_ref'); SELECT count(*) FROM pg_dist_transaction; SELECT recover_prepared_transactions(); -- plain INSERT does not use 2PC INSERT INTO test_recovery VALUES ('hello'); SELECT count(*) FROM pg_dist_transaction; -- Committed DDL commands should write 4 transaction recovery records BEGIN; ALTER TABLE test_recovery ADD COLUMN y text; ROLLBACK; SELECT count(*) FROM pg_dist_transaction; ALTER TABLE test_recovery ADD COLUMN y text; SELECT count(*) FROM pg_dist_transaction; SELECT recover_prepared_transactions(); SELECT count(*) FROM pg_dist_transaction; -- Committed master_modify_multiple_shards should write 4 transaction recovery records BEGIN; SELECT master_modify_multiple_shards($$UPDATE test_recovery SET y = 'world'$$); ROLLBACK; SELECT count(*) FROM pg_dist_transaction; SELECT master_modify_multiple_shards($$UPDATE test_recovery SET y = 'world'$$); SELECT count(*) FROM pg_dist_transaction; SELECT recover_prepared_transactions(); SELECT count(*) FROM pg_dist_transaction; -- Committed INSERT..SELECT should write 4 transaction recovery records BEGIN; INSERT INTO test_recovery SELECT x, 'earth' FROM test_recovery; ROLLBACK; SELECT count(*) FROM pg_dist_transaction; INSERT INTO test_recovery SELECT x, 'earth' FROM test_recovery; SELECT count(*) FROM pg_dist_transaction; SELECT recover_prepared_transactions(); -- Committed INSERT..SELECT via coordinator should write 4 transaction recovery records INSERT INTO test_recovery (x) SELECT 'hello-'||s FROM generate_series(1,100) s; SELECT count(*) FROM pg_dist_transaction; SELECT recover_prepared_transactions(); -- Committed COPY should write 4 transaction records COPY test_recovery (x) FROM STDIN CSV; hello-0 hello-1 \. SELECT count(*) FROM pg_dist_transaction; SELECT recover_prepared_transactions(); DROP TABLE test_recovery_ref; DROP TABLE test_recovery; citus-7.0.3/src/test/regress/sql/multi_transactional_drop_shards.sql000066400000000000000000000253331317107136600260670ustar00rootroot00000000000000-- -- MULTI_TRANSACTIONAL_DROP_SHARDS -- -- Tests that check the metadata returned by the master node. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1410000; SET citus.shard_count TO 4; -- test DROP TABLE(ergo master_drop_all_shards) in transaction, then ROLLBACK CREATE TABLE transactional_drop_shards(column1 int); SELECT create_distributed_table('transactional_drop_shards', 'column1'); BEGIN; DROP TABLE transactional_drop_shards; ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; -- verify table is not dropped \dt transactional_drop_shards -- verify shards are not dropped \c - - - :worker_1_port \dt transactional_drop_shards_* \c - - - :master_port -- test DROP TABLE(ergo master_drop_all_shards) in transaction, then COMMIT BEGIN; DROP TABLE transactional_drop_shards; COMMIT; -- verify metadata is deleted SELECT shardid FROM pg_dist_shard WHERE shardid IN (1410000, 1410001, 1410002, 1410003) ORDER BY shardid; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (1410000, 1410001, 1410002, 1410003) ORDER BY shardid, nodename, nodeport; -- verify table is dropped \dt transactional_drop_shards -- verify shards are dropped \c - - - :worker_1_port \dt transactional_drop_shards_* \c - - - :master_port -- test master_delete_protocol in transaction, then ROLLBACK CREATE TABLE transactional_drop_shards(column1 int); SELECT create_distributed_table('transactional_drop_shards', 'column1', 'append'); SELECT master_create_empty_shard('transactional_drop_shards'); BEGIN; SELECT master_apply_delete_command('DELETE FROM transactional_drop_shards'); ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; -- verify shards are not dropped \c - - - :worker_1_port \dt transactional_drop_shards_* \c - - - :master_port -- test master_delete_protocol in transaction, then COMMIT BEGIN; SELECT master_apply_delete_command('DELETE FROM transactional_drop_shards'); COMMIT; -- verify metadata is deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; -- verify shards are dropped \c - - - :worker_1_port \dt transactional_drop_shards_* \c - - - :master_port -- test DROP table in a transaction after insertion SELECT master_create_empty_shard('transactional_drop_shards'); BEGIN; INSERT INTO transactional_drop_shards VALUES (1); DROP TABLE transactional_drop_shards; ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; -- verify table is not dropped \dt transactional_drop_shards -- verify shards are not dropped \c - - - :worker_1_port \dt transactional_drop_shards_* \c - - - :master_port -- test master_apply_delete_command in a transaction after insertion BEGIN; INSERT INTO transactional_drop_shards VALUES (1); SELECT master_apply_delete_command('DELETE FROM transactional_drop_shards'); ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; -- verify shards are not dropped \c - - - :worker_1_port \dt transactional_drop_shards_* -- test DROP table with failing worker CREATE FUNCTION fail_drop_table() RETURNS event_trigger AS $fdt$ BEGIN RAISE 'illegal value'; END; $fdt$ LANGUAGE plpgsql; CREATE EVENT TRIGGER fail_drop_table ON sql_drop EXECUTE PROCEDURE fail_drop_table(); \c - - - :master_port \set VERBOSITY terse DROP TABLE transactional_drop_shards; \set VERBOSITY default -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; -- verify table is not dropped \dt transactional_drop_shards -- verify shards are not dropped \c - - - :worker_1_port \dt transactional_drop_shards_* \c - - - :master_port -- test DROP reference table with failing worker CREATE TABLE transactional_drop_reference(column1 int); SELECT create_reference_table('transactional_drop_reference'); \set VERBOSITY terse DROP TABLE transactional_drop_reference; \set VERBOSITY default -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_reference'::regclass ORDER BY shardid; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_reference'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; -- verify table is not dropped \dt transactional_drop_reference -- verify shards are not dropped \c - - - :worker_1_port \dt transactional_drop_reference* \c - - - :master_port -- test master_apply_delete_command table with failing worker \set VERBOSITY terse SELECT master_apply_delete_command('DELETE FROM transactional_drop_shards'); \set VERBOSITY default -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_shards'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; -- verify shards are not dropped \c - - - :worker_1_port \dt transactional_drop_shards_* DROP EVENT TRIGGER fail_drop_table; \c - - - :master_port -- test with SERIAL column + with more shards SET citus.shard_count TO 8; CREATE TABLE transactional_drop_serial(column1 int, column2 SERIAL); SELECT create_distributed_table('transactional_drop_serial', 'column1'); -- test DROP TABLE(ergo master_drop_all_shards) in transaction, then ROLLBACK BEGIN; DROP TABLE transactional_drop_serial; ROLLBACK; -- verify metadata is not deleted SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_serial'::regclass ORDER BY shardid; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_serial'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; -- verify table is not dropped \dt transactional_drop_serial -- verify shards and sequence are not dropped \c - - - :worker_1_port \dt transactional_drop_serial_* \ds transactional_drop_serial_column2_seq \c - - - :master_port -- test DROP TABLE(ergo master_drop_all_shards) in transaction, then COMMIT BEGIN; DROP TABLE transactional_drop_serial; COMMIT; -- verify metadata is deleted SELECT shardid FROM pg_dist_shard WHERE shardid IN (1410007, 1410008, 1410009, 1410010, 1410011, 1410012, 1410013, 1410014) ORDER BY shardid; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (1410007, 1410008, 1410009, 1410010, 1410011, 1410012, 1410013, 1410014) ORDER BY shardid, nodename, nodeport; -- verify table is dropped \dt transactional_drop_serial -- verify shards and sequence are dropped \c - - - :worker_1_port \dt transactional_drop_serial_* \ds transactional_drop_serial_column2_seq \c - - - :master_port -- test with MX, DROP TABLE, then ROLLBACK SET citus.shard_replication_factor TO 1; SET citus.shard_count TO 4; CREATE TABLE transactional_drop_mx(column1 int); SELECT create_distributed_table('transactional_drop_mx', 'column1'); UPDATE pg_dist_partition SET repmodel='s' WHERE logicalrelid='transactional_drop_mx'::regclass; -- make worker 1 receive metadata changes SELECT start_metadata_sync_to_node('localhost', :worker_1_port); -- see metadata is propogated to the worker \c - - - :worker_1_port SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_mx'::regclass ORDER BY shardid; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_mx'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; \c - - - :master_port BEGIN; DROP TABLE transactional_drop_mx; ROLLBACK; -- verify metadata is not deleted \c - - - :worker_1_port SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_mx'::regclass ORDER BY shardid; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'transactional_drop_mx'::regclass ORDER BY shardid) ORDER BY shardid, nodename, nodeport; -- test with MX, DROP TABLE, then COMMIT \c - - - :master_port BEGIN; DROP TABLE transactional_drop_mx; COMMIT; -- verify metadata is deleted \c - - - :worker_1_port SELECT shardid FROM pg_dist_shard WHERE shardid IN (1410015, 1410016, 1410017, 1410018) ORDER BY shardid; SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (1410015, 1410016, 1410017, 1410018) ORDER BY shardid, nodename, nodeport; \c - - - :master_port -- clean the workspace DROP TABLE transactional_drop_shards, transactional_drop_reference; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); citus-7.0.3/src/test/regress/sql/multi_truncate.sql000066400000000000000000000156121317107136600224610ustar00rootroot00000000000000-- -- MULTI_TRUNCATE -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1210000; -- -- truncate for append distribution -- expect all shards to be dropped -- CREATE TABLE test_truncate_append(a int); SELECT master_create_distributed_table('test_truncate_append', 'a', 'append'); -- verify no error is thrown when no shards are present TRUNCATE TABLE test_truncate_append; SELECT master_create_empty_shard('test_truncate_append') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 500 WHERE shardid = :new_shard_id; SELECT count(*) FROM test_truncate_append; INSERT INTO test_truncate_append values (1); SELECT count(*) FROM test_truncate_append; -- create some more shards SELECT master_create_empty_shard('test_truncate_append'); SELECT master_create_empty_shard('test_truncate_append'); -- verify 3 shards are presents SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_append'::regclass ORDER BY shardid; TRUNCATE TABLE test_truncate_append; -- verify data is truncated from the table SELECT count(*) FROM test_truncate_append; -- verify no shard exists anymore SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_append'::regclass; -- command can run inside transaction BEGIN; TRUNCATE TABLE test_truncate_append; COMMIT; DROP TABLE test_truncate_append; -- -- truncate for range distribution -- expect shard to be present, data to be truncated -- CREATE TABLE test_truncate_range(a int); SELECT master_create_distributed_table('test_truncate_range', 'a', 'range'); -- verify no error is thrown when no shards are present TRUNCATE TABLE test_truncate_range; SELECT master_create_empty_shard('test_truncate_range') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 500 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('test_truncate_range') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 501, shardmaxvalue = 1500 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('test_truncate_range') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1501, shardmaxvalue = 2500 WHERE shardid = :new_shard_id; SELECT count(*) FROM test_truncate_range; INSERT INTO test_truncate_range values (1); INSERT INTO test_truncate_range values (1001); INSERT INTO test_truncate_range values (2000); INSERT INTO test_truncate_range values (100); SELECT count(*) FROM test_truncate_range; -- verify 3 shards are presents SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_range'::regclass ORDER BY shardid; TRUNCATE TABLE test_truncate_range; -- verify data is truncated from the table SELECT count(*) FROM test_truncate_range; -- verify 3 shards are still present SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_range'::regclass ORDER BY shardid; -- verify that truncate can be aborted INSERT INTO test_truncate_range VALUES (1); BEGIN; TRUNCATE TABLE test_truncate_range; ROLLBACK; SELECT count(*) FROM test_truncate_range; DROP TABLE test_truncate_range; -- -- truncate for hash distribution. -- expect shard to be present, data to be truncated -- CREATE TABLE test_truncate_hash(a int); SELECT master_create_distributed_table('test_truncate_hash', 'a', 'hash'); -- verify no error is thrown when no shards are present TRUNCATE TABLE test_truncate_hash; SELECT count(*) FROM test_truncate_hash; INSERT INTO test_truncate_hash values (1); INSERT INTO test_truncate_hash values (1001); INSERT INTO test_truncate_hash values (2000); INSERT INTO test_truncate_hash values (100); SELECT count(*) FROM test_truncate_hash; -- verify 4 shards are present SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_hash'::regclass ORDER BY shardid; TRUNCATE TABLE test_truncate_hash; SELECT master_create_worker_shards('test_truncate_hash', 4, 1); INSERT INTO test_truncate_hash values (1); INSERT INTO test_truncate_hash values (1001); INSERT INTO test_truncate_hash values (2000); INSERT INTO test_truncate_hash values (100); SELECT count(*) FROM test_truncate_hash; TRUNCATE TABLE test_truncate_hash; -- verify data is truncated from the table SELECT count(*) FROM test_truncate_hash; -- verify 4 shards are still presents SELECT shardid FROM pg_dist_shard where logicalrelid = 'test_truncate_hash'::regclass ORDER BY shardid; -- verify that truncate can be aborted INSERT INTO test_truncate_hash VALUES (1); BEGIN; TRUNCATE TABLE test_truncate_hash; ROLLBACK; SELECT count(*) FROM test_truncate_hash; DROP TABLE test_truncate_hash; -- test with table with spaces in it CREATE TABLE "a b hash" (a int, b int); SELECT master_create_distributed_table('"a b hash"', 'a', 'hash'); SELECT master_create_worker_shards('"a b hash"', 4, 1); INSERT INTO "a b hash" values (1, 0); SELECT * from "a b hash"; TRUNCATE TABLE "a b hash"; SELECT * from "a b hash"; DROP TABLE "a b hash"; -- now with append CREATE TABLE "a b append" (a int, b int); SELECT master_create_distributed_table('"a b append"', 'a', 'append'); SELECT master_create_empty_shard('"a b append"') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 1, shardmaxvalue = 500 WHERE shardid = :new_shard_id; SELECT master_create_empty_shard('"a b append"') AS new_shard_id \gset UPDATE pg_dist_shard SET shardminvalue = 501, shardmaxvalue = 1000 WHERE shardid = :new_shard_id; INSERT INTO "a b append" values (1, 1); INSERT INTO "a b append" values (600, 600); SELECT * FROM "a b append" ORDER BY a; TRUNCATE TABLE "a b append"; -- verify all shards are dropped SELECT shardid FROM pg_dist_shard where logicalrelid = '"a b append"'::regclass; DROP TABLE "a b append"; -- Truncate local data only CREATE TABLE test_local_truncate (x int, y int); INSERT INTO test_local_truncate VALUES (1,2); SELECT create_distributed_table('test_local_truncate', 'x', colocate_with => 'none'); BEGIN; SET LOCAL citus.enable_ddl_propagation TO off; TRUNCATE test_local_truncate; COMMIT; -- Ensure distributed data is not truncated SELECT * FROM test_local_truncate; -- Undistribute table SELECT master_drop_all_shards('test_local_truncate', 'pubic', 'test_local_truncate'); DELETE FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regclass; -- Ensure local data is truncated SELECT * FROM test_local_truncate; DROP TABLE test_local_truncate; -- Truncate local data, but roll back CREATE TABLE test_local_truncate (x int, y int); INSERT INTO test_local_truncate VALUES (1,2); SELECT create_distributed_table('test_local_truncate', 'x', colocate_with => 'none'); BEGIN; SET LOCAL citus.enable_ddl_propagation TO off; TRUNCATE test_local_truncate; ROLLBACK; -- Ensure distributed data is not truncated SELECT * FROM test_local_truncate; -- Undistribute table SELECT master_drop_all_shards('test_local_truncate', 'pubic', 'test_local_truncate'); DELETE FROM pg_dist_partition WHERE logicalrelid = 'test_local_truncate'::regclass; -- Ensure local data is not truncated SELECT * FROM test_local_truncate; DROP TABLE test_local_truncate; citus-7.0.3/src/test/regress/sql/multi_unsupported_worker_operations.sql000066400000000000000000000201741317107136600270570ustar00rootroot00000000000000-- -- MULTI_UNSUPPORTED_WORKER_OPERATIONS -- -- Tests for ensuring unsupported functions on workers error out. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1270000; ALTER SEQUENCE pg_catalog.pg_dist_groupid_seq RESTART 1370000; ALTER SEQUENCE pg_catalog.pg_dist_node_nodeid_seq RESTART 1370000; -- Set the colocation id to a safe value so that -- it is not affected by future changes to colocation id sequence SELECT nextval('pg_catalog.pg_dist_colocationid_seq') AS last_colocation_id \gset ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 150000; -- Prepare the environment SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; SET citus.shard_count TO 5; -- Create test tables CREATE TABLE mx_table (col_1 int, col_2 text, col_3 BIGSERIAL); SELECT create_distributed_table('mx_table', 'col_1'); CREATE TABLE mx_table_2 (col_1 int, col_2 text, col_3 BIGSERIAL); SELECT create_distributed_table('mx_table_2', 'col_1'); CREATE TABLE mx_ref_table (col_1 int, col_2 text); SELECT create_reference_table('mx_ref_table'); -- Check that the created tables are colocated MX tables SELECT logicalrelid, repmodel, colocationid FROM pg_dist_partition WHERE logicalrelid IN ('mx_table'::regclass, 'mx_table_2'::regclass) ORDER BY logicalrelid; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); COPY mx_table (col_1, col_2) FROM STDIN WITH (FORMAT 'csv'); -37, 'lorem' 65536, 'ipsum' 80, 'dolor' 7344, 'sit' 65832, 'amet' \. INSERT INTO mx_ref_table VALUES (-37, 'morbi'); INSERT INTO mx_ref_table VALUES (-78, 'sapien'); INSERT INTO mx_ref_table VALUES (-34, 'augue'); SELECT * FROM mx_table ORDER BY col_1; -- Try commands from metadata worker \c - - - :worker_1_port CREATE TABLE mx_table_worker(col_1 text); -- master_create_distributed_table SELECT master_create_distributed_table('mx_table_worker', 'col_1', 'hash'); -- create_distributed_table SELECT create_distributed_table('mx_table_worker', 'col_1'); -- create_reference_table SELECT create_reference_table('mx_table_worker'); SELECT count(*) FROM pg_dist_partition WHERE logicalrelid='mx_table_worker'::regclass; DROP TABLE mx_table_worker; -- master_create_worker_shards CREATE TEMP TABLE pg_dist_shard_temp AS SELECT * FROM pg_dist_shard WHERE logicalrelid = 'mx_table'::regclass; DELETE FROM pg_dist_shard WHERE logicalrelid = 'mx_table'::regclass; SELECT master_create_worker_shards('mx_table', 5, 1); SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='mx_table'::regclass; INSERT INTO pg_dist_shard SELECT * FROM pg_dist_shard_temp; SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='mx_table'::regclass; -- INSERT/UPDATE/DELETE/COPY on reference tables SELECT * FROM mx_ref_table ORDER BY col_1; INSERT INTO mx_ref_table (col_1, col_2) VALUES (-6, 'vestibulum'); UPDATE mx_ref_table SET col_2 = 'habitant' WHERE col_1 = -37; DELETE FROM mx_ref_table WHERE col_1 = -78; COPY mx_ref_table (col_1, col_2) FROM STDIN WITH (FORMAT 'csv'); SELECT * FROM mx_ref_table ORDER BY col_1; \c - - - :master_port DROP TABLE mx_ref_table; CREATE UNIQUE INDEX mx_test_uniq_index ON mx_table(col_1); \c - - - :worker_1_port -- DDL commands SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass; CREATE INDEX mx_test_index ON mx_table(col_2); ALTER TABLE mx_table ADD COLUMN col_4 int; ALTER TABLE mx_table_2 ADD CONSTRAINT mx_fk_constraint FOREIGN KEY(col_1) REFERENCES mx_table(col_1); SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass; \d mx_test_index -- master_modify_multiple_shards SELECT master_modify_multiple_shards('UPDATE mx_table SET col_2=''none'''); SELECT count(*) FROM mx_table WHERE col_2='none'; SELECT count(*) FROM mx_table WHERE col_2!='none'; SELECT master_modify_multiple_shards('DELETE FROM mx_table'); SELECT count(*) FROM mx_table; -- master_drop_all_shards SELECT master_drop_all_shards('mx_table'::regclass, 'public', 'mx_table'); SELECT count(*) FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid='mx_table'::regclass; -- master_apply_delete_command SELECT master_apply_delete_command('DELETE FROM mx_table'); SELECT count(*) FROM mx_table; -- master_add_node SELECT 1 FROM master_add_node('localhost', 5432); SELECT count(1) FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432; -- master_remove_node \c - - - :master_port DROP INDEX mx_test_uniq_index; SELECT 1 FROM master_add_node('localhost', 5432); \c - - - :worker_1_port SELECT master_remove_node('localhost', 5432); SELECT count(1) FROM pg_dist_node WHERE nodename='localhost' AND nodeport=5432; \c - - - :master_port SELECT master_remove_node('localhost', 5432); -- TRUNCATE \c - - - :worker_1_port TRUNCATE mx_table; SELECT count(*) FROM mx_table; -- INSERT / SELECT pulls results to worker BEGIN; SET LOCAL client_min_messages TO DEBUG; INSERT INTO mx_table_2 SELECT * FROM mx_table; END; SELECT count(*) FROM mx_table_2; -- mark_tables_colocated UPDATE pg_dist_partition SET colocationid = 0 WHERE logicalrelid='mx_table_2'::regclass; SELECT mark_tables_colocated('mx_table', ARRAY['mx_table_2']); SELECT colocationid FROM pg_dist_partition WHERE logicalrelid='mx_table_2'::regclass; SELECT colocationid AS old_colocation_id FROM pg_dist_partition WHERE logicalrelid='mx_table'::regclass \gset UPDATE pg_dist_partition SET colocationid = :old_colocation_id WHERE logicalrelid='mx_table_2'::regclass; -- start_metadata_sync_to_node SELECT start_metadata_sync_to_node('localhost', :worker_2_port); SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; -- stop_metadata_sync_to_node \c - - - :master_port SELECT start_metadata_sync_to_node('localhost', :worker_2_port); \c - - - :worker_1_port SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); \c - - - :master_port SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; SELECT stop_metadata_sync_to_node('localhost', :worker_2_port); SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_port; \c - - - :worker_2_port SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition; DELETE FROM pg_dist_node; \c - - - :worker_1_port -- DROP TABLE DROP TABLE mx_table; SELECT count(*) FROM mx_table; -- master_drop_distributed_table_metadata SELECT master_drop_distributed_table_metadata('mx_table'::regclass, 'public', 'mx_table'); SELECT count(*) FROM mx_table; -- master_copy_shard_placement SELECT logicalrelid, shardid AS testshardid, nodename, nodeport FROM pg_dist_shard NATURAL JOIN pg_dist_shard_placement WHERE logicalrelid = 'mx_table'::regclass AND nodeport=:worker_1_port ORDER BY shardid LIMIT 1 \gset SELECT groupid AS worker_2_group FROM pg_dist_node WHERE nodeport = :worker_2_port \gset INSERT INTO pg_dist_placement (groupid, shardid, shardstate, shardlength) VALUES (:worker_2_group, :testshardid, 3, 0); SELECT master_copy_shard_placement(:testshardid, 'localhost', :worker_1_port, 'localhost', :worker_2_port); SELECT shardid, nodename, nodeport, shardstate FROM pg_dist_shard_placement WHERE shardid = :testshardid ORDER BY nodeport; DELETE FROM pg_dist_placement WHERE groupid = :worker_2_group AND shardid = :testshardid; -- master_get_new_placementid SELECT master_get_new_placementid(); -- Show that sequences can be created and dropped on worker nodes CREATE TABLE some_table_with_sequence(a int, b BIGSERIAL, c BIGSERIAL); DROP TABLE some_table_with_sequence; CREATE SEQUENCE some_sequence; DROP SEQUENCE some_sequence; -- Show that dropping the sequence of an MX table with cascade harms the table and shards BEGIN; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass; DROP SEQUENCE mx_table_col_3_seq CASCADE; SELECT "Column", "Type", "Modifiers" FROM table_desc WHERE relid='public.mx_table'::regclass; ROLLBACK; -- Cleanup \c - - - :master_port DROP TABLE mx_table; DROP TABLE mx_table_2; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); \c - - - :worker_1_port DELETE FROM pg_dist_node; SELECT worker_drop_distributed_table(logicalrelid) FROM pg_dist_partition; \c - - - :master_port ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART :last_colocation_id; RESET citus.shard_replication_factor; RESET citus.replication_model; citus-7.0.3/src/test/regress/sql/multi_upgrade_reference_table.sql000066400000000000000000000532631317107136600254540ustar00rootroot00000000000000-- -- MULTI_UPGRADE_REFERENCE_TABLE -- -- Tests around upgrade_reference_table UDF -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1360000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1360000; -- test with not distributed table CREATE TABLE upgrade_reference_table_local(column1 int); SELECT upgrade_to_reference_table('upgrade_reference_table_local'); DROP TABLE upgrade_reference_table_local; -- test with table which has more than one shard SET citus.shard_count TO 4; CREATE TABLE upgrade_reference_table_multiple_shard(column1 int); SELECT create_distributed_table('upgrade_reference_table_multiple_shard', 'column1'); SELECT upgrade_to_reference_table('upgrade_reference_table_multiple_shard'); DROP TABLE upgrade_reference_table_multiple_shard; -- test with table which has no shard CREATE TABLE upgrade_reference_table_no_shard(column1 int); SELECT create_distributed_table('upgrade_reference_table_no_shard', 'column1', 'append'); SELECT upgrade_to_reference_table('upgrade_reference_table_no_shard'); DROP TABLE upgrade_reference_table_no_shard; -- test with table with foreign keys SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; CREATE TABLE upgrade_reference_table_referenced(column1 int PRIMARY KEY); SELECT create_distributed_table('upgrade_reference_table_referenced', 'column1'); CREATE TABLE upgrade_reference_table_referencing(column1 int REFERENCES upgrade_reference_table_referenced(column1)); SELECT create_distributed_table('upgrade_reference_table_referencing', 'column1'); -- update replication model to statement-based replication since streaming replicated tables cannot be upgraded to reference tables UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_referenced'::regclass; UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_referencing'::regclass; SELECT upgrade_to_reference_table('upgrade_reference_table_referenced'); SELECT upgrade_to_reference_table('upgrade_reference_table_referencing'); DROP TABLE upgrade_reference_table_referencing; DROP TABLE upgrade_reference_table_referenced; -- test with no healthy placements CREATE TABLE upgrade_reference_table_unhealthy(column1 int); SELECT create_distributed_table('upgrade_reference_table_unhealthy', 'column1'); UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_unhealthy'::regclass; UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1360006; SELECT upgrade_to_reference_table('upgrade_reference_table_unhealthy'); DROP TABLE upgrade_reference_table_unhealthy; -- test with table containing composite type CREATE TYPE upgrade_test_composite_type AS (key1 text, key2 text); \c - - - :worker_1_port CREATE TYPE upgrade_test_composite_type AS (key1 text, key2 text); \c - - - :master_port SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; CREATE TABLE upgrade_reference_table_composite(column1 int, column2 upgrade_test_composite_type); SELECT create_distributed_table('upgrade_reference_table_composite', 'column1'); UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_composite'::regclass; SELECT upgrade_to_reference_table('upgrade_reference_table_composite'); DROP TABLE upgrade_reference_table_composite; -- test with reference table CREATE TABLE upgrade_reference_table_reference(column1 int); SELECT create_reference_table('upgrade_reference_table_reference'); SELECT upgrade_to_reference_table('upgrade_reference_table_reference'); DROP TABLE upgrade_reference_table_reference; -- test valid cases, append distributed table CREATE TABLE upgrade_reference_table_append(column1 int); SELECT create_distributed_table('upgrade_reference_table_append', 'column1', 'append'); COPY upgrade_reference_table_append FROM STDIN; 1 2 3 4 5 \. -- situation before upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_append'::regclass; SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_append'::regclass; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_append'::regclass); SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_append'::regclass); SELECT upgrade_to_reference_table('upgrade_reference_table_append'); -- situation after upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_append'::regclass; SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_append'::regclass; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_append'::regclass); SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_append'::regclass) ORDER BY nodeport; DROP TABLE upgrade_reference_table_append; -- test valid cases, shard exists at one worker CREATE TABLE upgrade_reference_table_one_worker(column1 int); SELECT create_distributed_table('upgrade_reference_table_one_worker', 'column1'); UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_one_worker'::regclass; -- situation before upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass); SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass); SELECT upgrade_to_reference_table('upgrade_reference_table_one_worker'); -- situation after upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass); SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_worker'::regclass) ORDER BY nodeport; DROP TABLE upgrade_reference_table_one_worker; -- test valid cases, shard exists at both workers but one is unhealthy SET citus.shard_replication_factor TO 2; CREATE TABLE upgrade_reference_table_one_unhealthy(column1 int); SELECT create_distributed_table('upgrade_reference_table_one_unhealthy', 'column1'); UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE shardid = 1360010 AND nodeport = :worker_1_port; -- situation before upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass); SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass) ORDER BY nodeport; SELECT upgrade_to_reference_table('upgrade_reference_table_one_unhealthy'); -- situation after upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass); SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_one_unhealthy'::regclass) ORDER BY nodeport; DROP TABLE upgrade_reference_table_one_unhealthy; -- test valid cases, shard exists at both workers and both are healthy CREATE TABLE upgrade_reference_table_both_healthy(column1 int); SELECT create_distributed_table('upgrade_reference_table_both_healthy', 'column1'); -- situation before upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass); SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass) ORDER BY nodeport; SELECT upgrade_to_reference_table('upgrade_reference_table_both_healthy'); -- situation after upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass); SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_both_healthy'::regclass) ORDER BY nodeport; DROP TABLE upgrade_reference_table_both_healthy; -- test valid cases, do it in transaction and ROLLBACK SET citus.shard_replication_factor TO 1; CREATE TABLE upgrade_reference_table_transaction_rollback(column1 int); SELECT create_distributed_table('upgrade_reference_table_transaction_rollback', 'column1'); UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_transaction_rollback'::regclass; -- situation before upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass); SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass); BEGIN; SELECT upgrade_to_reference_table('upgrade_reference_table_transaction_rollback'); ROLLBACK; -- situation after upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass); SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_rollback'::regclass); DROP TABLE upgrade_reference_table_transaction_rollback; -- test valid cases, do it in transaction and COMMIT SET citus.shard_replication_factor TO 1; CREATE TABLE upgrade_reference_table_transaction_commit(column1 int); SELECT create_distributed_table('upgrade_reference_table_transaction_commit', 'column1'); UPDATE pg_dist_partition SET repmodel='c' WHERE logicalrelid='upgrade_reference_table_transaction_commit'::regclass; -- situation before upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass); SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass); BEGIN; SELECT upgrade_to_reference_table('upgrade_reference_table_transaction_commit'); COMMIT; -- situation after upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass); SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_transaction_commit'::regclass) ORDER BY nodeport; -- verify that shard is replicated to other worker \c - - - :worker_2_port \dt upgrade_reference_table_transaction_commit_* \c - - - :master_port DROP TABLE upgrade_reference_table_transaction_commit; -- create an mx table SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; SET citus.replication_model TO 'streaming'; CREATE TABLE upgrade_reference_table_mx(column1 int); SELECT create_distributed_table('upgrade_reference_table_mx', 'column1'); -- verify that streaming replicated tables cannot be upgraded to reference tables SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass) ORDER BY nodeport; SELECT upgrade_to_reference_table('upgrade_reference_table_mx'); -- situation after upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass) ORDER BY nodeport; DROP TABLE upgrade_reference_table_mx; -- test valid cases, do it with MX SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 2; RESET citus.replication_model; CREATE TABLE upgrade_reference_table_mx(column1 int); SELECT create_distributed_table('upgrade_reference_table_mx', 'column1'); UPDATE pg_dist_shard_placement SET shardstate = 3 WHERE nodeport = :worker_2_port AND shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='upgrade_reference_table_mx'::regclass); SELECT start_metadata_sync_to_node('localhost', :worker_1_port); -- situation before upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass) ORDER BY nodeport; SELECT upgrade_to_reference_table('upgrade_reference_table_mx'); -- situation after upgrade_reference_table SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; SELECT * FROM pg_dist_colocation WHERE colocationid IN (SELECT colocationid FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass); SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass) ORDER BY nodeport; -- situation on metadata worker \c - - - :worker_1_port SELECT partmethod, (partkey IS NULL) as partkeyisnull, colocationid, repmodel FROM pg_dist_partition WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; SELECT shardid, (shardminvalue IS NULL) as shardminvalueisnull, (shardmaxvalue IS NULL) as shardmaxvalueisnull FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass; SELECT shardid, shardstate, shardlength, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid = 'upgrade_reference_table_mx'::regclass) ORDER BY nodeport; \c - - - :master_port DROP TABLE upgrade_reference_table_mx; SELECT stop_metadata_sync_to_node('localhost', :worker_1_port); citus-7.0.3/src/test/regress/sql/multi_upsert.sql000066400000000000000000000177461317107136600221700ustar00rootroot00000000000000-- this test file aims to test UPSERT feature on Citus ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 980000; CREATE TABLE upsert_test ( part_key int UNIQUE, other_col int, third_col int ); -- distribute the table and create shards SELECT master_create_distributed_table('upsert_test', 'part_key', 'hash'); SELECT master_create_worker_shards('upsert_test', '4', '2'); -- do a regular insert INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1), (2, 2); -- observe that there is a conflict and the following query does nothing INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT DO NOTHING; -- same as the above with different syntax INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO NOTHING; --again the same query with another syntax INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT ON CONSTRAINT upsert_test_part_key_key DO NOTHING; -- now, update the columns INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET other_col = 2, third_col = 4; -- see the results SELECT * FROM upsert_test; -- do a multi-row DO NOTHING insert INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1), (2, 2) ON CONFLICT DO NOTHING; -- do a multi-row DO UPDATE insert INSERT INTO upsert_test (part_key, other_col) VALUES (1, 10), (2, 20), (3, 30) ON CONFLICT (part_key) DO UPDATE SET other_col = EXCLUDED.other_col WHERE upsert_test.part_key != 1; -- see the results SELECT * FROM upsert_test ORDER BY part_key ASC; DELETE FROM upsert_test WHERE part_key = 2; DELETE FROM upsert_test WHERE part_key = 3; -- use a WHERE clause, so that SET doesn't have an affect INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET other_col = 30 WHERE upsert_test.other_col = 3; -- see the results SELECT * FROM upsert_test; -- use a WHERE clause, that hits the row and updates it INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET other_col = 30 WHERE upsert_test.other_col = 2; -- see the results SELECT * FROM upsert_test; -- use two elements in the WHERE, that doesn't hit the row and updates it INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET other_col = 30 WHERE upsert_test.other_col = 2 AND upsert_test.other_col = 3; -- use EXCLUDED keyword INSERT INTO upsert_test (part_key, other_col, third_col) VALUES (1, 1, 100) ON CONFLICT (part_key) DO UPDATE SET other_col = EXCLUDED.third_col; -- see the results SELECT * FROM upsert_test; -- now update multiple columns with ALIAS table and reference to the row itself INSERT INTO upsert_test as ups_test (part_key) VALUES (1) ON CONFLICT (part_key) DO UPDATE SET other_col = ups_test.other_col + 50, third_col = 200; -- see the results SELECT * FROM upsert_test; -- now, do some more complex assignments INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET other_col = upsert_test.other_col + 1, third_col = upsert_test.third_col + (EXCLUDED.part_key + EXCLUDED.other_col) + 670; -- see the results SELECT * FROM upsert_test; -- now, WHERE clause also has table reference INSERT INTO upsert_test as ups_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET other_col = (ups_test.other_col + ups_test.third_col + (EXCLUDED.part_key + EXCLUDED.other_col)) % 15 WHERE ups_test.third_col < 1000 + ups_test.other_col; -- see the results SELECT * FROM upsert_test; -- Test upsert, with returning: INSERT INTO upsert_test (part_key, other_col) VALUES (2, 2) ON CONFLICT (part_key) DO UPDATE SET other_col = 3 RETURNING *; INSERT INTO upsert_test (part_key, other_col) VALUES (2, 2) ON CONFLICT (part_key) DO UPDATE SET other_col = 3 RETURNING *; -- create another table CREATE TABLE upsert_test_2 ( part_key int, other_col int, third_col int, PRIMARY KEY (part_key, other_col) ); -- distribute the table and create shards SELECT master_create_distributed_table('upsert_test_2', 'part_key', 'hash'); SELECT master_create_worker_shards('upsert_test_2', '4', '2'); -- now show that Citus works with multiple columns as the PRIMARY KEY, including the partiton key INSERT INTO upsert_test_2 (part_key, other_col) VALUES (1, 1); INSERT INTO upsert_test_2 (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key, other_col) DO NOTHING; -- this errors out since there is no unique constraint on partition key INSERT INTO upsert_test_2 (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO NOTHING; -- create another table CREATE TABLE upsert_test_3 ( part_key int, count int ); -- note that this is not a unique index CREATE INDEX idx_ups_test ON upsert_test_3(part_key); -- distribute the table and create shards SELECT master_create_distributed_table('upsert_test_3', 'part_key', 'hash'); SELECT master_create_worker_shards('upsert_test_3', '4', '2'); -- since there are no unique indexes, error-out INSERT INTO upsert_test_3 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_3.count + 1; -- create another table CREATE TABLE upsert_test_4 ( part_key int UNIQUE, count int ); -- distribute the table and create shards SELECT master_create_distributed_table('upsert_test_4', 'part_key', 'hash'); SELECT master_create_worker_shards('upsert_test_4', '4', '2'); -- a single row insert INSERT INTO upsert_test_4 VALUES (1, 0); -- show a simple count example use case INSERT INTO upsert_test_4 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_4.count + 1; INSERT INTO upsert_test_4 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_4.count + 1; INSERT INTO upsert_test_4 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_4.count + 1; INSERT INTO upsert_test_4 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_4.count + 1; INSERT INTO upsert_test_4 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_4.count + 1; INSERT INTO upsert_test_4 VALUES (1, 0) ON CONFLICT(part_key) DO UPDATE SET count = upsert_test_4.count + 1; -- now see the results SELECT * FROM upsert_test_4; -- now test dropped columns CREATE TABLE dropcol_distributed(key int primary key, drop1 int, keep1 text, drop2 numeric, keep2 float); SELECT master_create_distributed_table('dropcol_distributed', 'key', 'hash'); SELECT master_create_worker_shards('dropcol_distributed', 4, 1); INSERT INTO dropcol_distributed AS dropcol (key, keep1, keep2) VALUES (1, '5', 5) ON CONFLICT(key) DO UPDATE SET keep1 = dropcol.keep1; ALTER TABLE dropcol_distributed DROP COLUMN drop2; INSERT INTO dropcol_distributed (key, keep1, keep2) VALUES (1, '5', 5) ON CONFLICT(key) DO UPDATE SET keep1 = dropcol_distributed.keep1; ALTER TABLE dropcol_distributed DROP COLUMN keep2; INSERT INTO dropcol_distributed AS dropcol (key, keep1) VALUES (1, '5') ON CONFLICT(key) DO UPDATE SET keep1 = dropcol.keep1; ALTER TABLE dropcol_distributed DROP COLUMN drop1; INSERT INTO dropcol_distributed AS dropcol (key, keep1) VALUES (1, '5') ON CONFLICT(key) DO UPDATE SET keep1 = dropcol.keep1; -- below we test the cases that Citus does not support -- subquery in the SET clause INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET other_col = (SELECT count(*) from upsert_test); -- non mutable function call in the SET INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET other_col = random()::int; -- non mutable function call in the WHERE INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET other_col = 5 WHERE upsert_test.other_col = random()::int; -- non mutable function call in the arbiter WHERE INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) WHERE part_key = random()::int DO UPDATE SET other_col = 5; -- error out on attempt to update the partition key INSERT INTO upsert_test (part_key, other_col) VALUES (1, 1) ON CONFLICT (part_key) DO UPDATE SET part_key = 15; citus-7.0.3/src/test/regress/sql/multi_utilities.sql000066400000000000000000000164601317107136600226510ustar00rootroot00000000000000 ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 990000; -- =================================================================== -- test utility statement functionality -- =================================================================== CREATE TABLE sharded_table ( name text, id bigint ); SELECT master_create_distributed_table('sharded_table', 'id', 'hash'); SELECT master_create_worker_shards('sharded_table', 2, 1); -- COPY out is supported with distributed tables COPY sharded_table TO STDOUT; COPY (SELECT COUNT(*) FROM sharded_table) TO STDOUT; BEGIN; SET TRANSACTION READ ONLY; COPY sharded_table TO STDOUT; COPY (SELECT COUNT(*) FROM sharded_table) TO STDOUT; COMMIT; -- cursors may not involve distributed tables DECLARE all_sharded_rows CURSOR FOR SELECT * FROM sharded_table; -- verify PREPARE functionality PREPARE sharded_insert AS INSERT INTO sharded_table VALUES ('adam', 1); PREPARE sharded_update AS UPDATE sharded_table SET name = 'bob' WHERE id = 1; PREPARE sharded_delete AS DELETE FROM sharded_table WHERE id = 1; PREPARE sharded_query AS SELECT name FROM sharded_table WHERE id = 1; EXECUTE sharded_query; EXECUTE sharded_insert; EXECUTE sharded_query; EXECUTE sharded_update; EXECUTE sharded_query; EXECUTE sharded_delete; EXECUTE sharded_query; -- try to drop shards with where clause SELECT master_apply_delete_command('DELETE FROM sharded_table WHERE id > 0'); -- drop all shards SELECT master_apply_delete_command('DELETE FROM sharded_table'); -- lock shard metadata: take some share locks and exclusive locks BEGIN; SELECT lock_shard_metadata(5, ARRAY[999001, 999002, 999002]); SELECT lock_shard_metadata(7, ARRAY[999001, 999003, 999004]); SELECT locktype, objid, mode, granted FROM pg_locks WHERE objid IN (999001, 999002, 999003, 999004) ORDER BY objid, mode; END; -- lock shard metadata: unsupported lock type SELECT lock_shard_metadata(0, ARRAY[990001, 999002]); -- lock shard metadata: invalid shard ID SELECT lock_shard_metadata(5, ARRAY[0]); -- lock shard metadata: lock nothing SELECT lock_shard_metadata(5, ARRAY[]::bigint[]); -- lock shard resources: take some share locks and exclusive locks BEGIN; SELECT lock_shard_resources(5, ARRAY[999001, 999002, 999002]); SELECT lock_shard_resources(7, ARRAY[999001, 999003, 999004]); SELECT locktype, objid, mode, granted FROM pg_locks WHERE objid IN (999001, 999002, 999003, 999004) ORDER BY objid, mode; END; -- lock shard metadata: unsupported lock type SELECT lock_shard_resources(0, ARRAY[990001, 999002]); -- lock shard metadata: invalid shard ID SELECT lock_shard_resources(5, ARRAY[-1]); -- lock shard metadata: lock nothing SELECT lock_shard_resources(5, ARRAY[]::bigint[]); -- drop table DROP TABLE sharded_table; -- VACUUM tests -- create a table with a single shard (for convenience) CREATE TABLE dustbunnies (id integer, name text, age integer); SELECT master_create_distributed_table('dustbunnies', 'id', 'hash'); SELECT master_create_worker_shards('dustbunnies', 1, 2); -- add some data to the distributed table \copy dustbunnies (id, name) from stdin with csv 1,bugs 2,babs 3,buster 4,roger \. -- following approach adapted from PostgreSQL's stats.sql file -- save relevant stat counter values in refreshable view \c - - - :worker_1_port CREATE MATERIALIZED VIEW prevcounts AS SELECT analyze_count, vacuum_count FROM pg_stat_user_tables WHERE relname='dustbunnies_990002'; -- create function that sleeps until those counters increment create function wait_for_stats() returns void as $$ declare start_time timestamptz := clock_timestamp(); analyze_updated bool; vacuum_updated bool; begin -- we don't want to wait forever; loop will exit after 10 seconds for i in 1 .. 100 loop -- check to see if analyze has been updated SELECT (st.analyze_count >= pc.analyze_count + 1) INTO analyze_updated FROM pg_stat_user_tables AS st, pg_class AS cl, prevcounts AS pc WHERE st.relname='dustbunnies_990002' AND cl.relname='dustbunnies_990002'; -- check to see if vacuum has been updated SELECT (st.vacuum_count >= pc.vacuum_count + 1) INTO vacuum_updated FROM pg_stat_user_tables AS st, pg_class AS cl, prevcounts AS pc WHERE st.relname='dustbunnies_990002' AND cl.relname='dustbunnies_990002'; exit when analyze_updated or vacuum_updated; -- wait a little perform pg_sleep(0.1); -- reset stats snapshot so we can test again perform pg_stat_clear_snapshot(); end loop; -- report time waited in postmaster log (where it won't change test output) raise log 'wait_for_stats delayed % seconds', extract(epoch from clock_timestamp() - start_time); end $$ language plpgsql; -- run VACUUM and ANALYZE against the table on the master \c - - - :master_port VACUUM dustbunnies; ANALYZE dustbunnies; -- verify that the VACUUM and ANALYZE ran \c - - - :worker_1_port SELECT wait_for_stats(); REFRESH MATERIALIZED VIEW prevcounts; SELECT pg_stat_get_vacuum_count('dustbunnies_990002'::regclass); SELECT pg_stat_get_analyze_count('dustbunnies_990002'::regclass); -- get file node to verify VACUUM FULL SELECT relfilenode AS oldnode FROM pg_class WHERE oid='dustbunnies_990002'::regclass \gset -- send a VACUUM FULL and a VACUUM ANALYZE \c - - - :master_port VACUUM (FULL) dustbunnies; VACUUM ANALYZE dustbunnies; -- verify that relfilenode changed \c - - - :worker_1_port SELECT relfilenode != :oldnode AS table_rewritten FROM pg_class WHERE oid='dustbunnies_990002'::regclass; -- verify the VACUUM ANALYZE incremented both vacuum and analyze counts SELECT wait_for_stats(); SELECT pg_stat_get_vacuum_count('dustbunnies_990002'::regclass); SELECT pg_stat_get_analyze_count('dustbunnies_990002'::regclass); -- disable auto-VACUUM for next test ALTER TABLE dustbunnies_990002 SET (autovacuum_enabled = false); SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid='dustbunnies_990002'::regclass \gset -- send a VACUUM FREEZE after adding a new row \c - - - :master_port INSERT INTO dustbunnies VALUES (5, 'peter'); VACUUM (FREEZE) dustbunnies; -- verify that relfrozenxid increased \c - - - :worker_1_port SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class WHERE oid='dustbunnies_990002'::regclass; -- check there are no nulls in either column SELECT attname, null_frac FROM pg_stats WHERE tablename = 'dustbunnies_990002' ORDER BY attname; -- add NULL values, then perform column-specific ANALYZE \c - - - :master_port INSERT INTO dustbunnies VALUES (6, NULL, NULL); ANALYZE dustbunnies (name); -- verify that name's NULL ratio is updated but age's is not \c - - - :worker_1_port SELECT attname, null_frac FROM pg_stats WHERE tablename = 'dustbunnies_990002' ORDER BY attname; \c - - - :master_port -- verify warning for unqualified VACUUM VACUUM; -- and warning when using targeted VACUUM without DDL propagation SET citus.enable_ddl_propagation to false; VACUUM dustbunnies; SET citus.enable_ddl_propagation to DEFAULT; -- test worker_hash SELECT worker_hash(123); SELECT worker_hash('1997-08-08'::date); -- test a custom type (this test should run after multi_data_types) SELECT worker_hash('(1, 2)'); SELECT worker_hash('(1, 2)'::test_composite_type); SELECT citus_truncate_trigger(); -- confirm that citus_create_restore_point works SELECT 1 FROM citus_create_restore_point('regression-test'); -- TODO: support VERBOSE -- VACUUM VERBOSE dustbunnies; -- VACUUM (FULL, VERBOSE) dustbunnies; -- ANALYZE VERBOSE dustbunnies; citus-7.0.3/src/test/regress/sql/multi_utility_statements.sql000066400000000000000000000067121317107136600246070ustar00rootroot00000000000000-- -- MULTI_UTILITY_STATEMENTS -- -- Check that we can run utility statements with embedded SELECT statements on -- distributed tables. Currently we only support CREATE TABLE AS (SELECT..), -- DECLARE CURSOR, and COPY ... TO statements. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1000000; CREATE TEMP TABLE lineitem_pricing_summary AS ( SELECT l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order FROM lineitem WHERE l_shipdate <= date '1998-12-01' - interval '90 days' GROUP BY l_returnflag, l_linestatus ORDER BY l_returnflag, l_linestatus ); SELECT * FROM lineitem_pricing_summary ORDER BY l_returnflag, l_linestatus; -- Test we can handle joins SET citus.large_table_shard_count TO 2; CREATE TABLE shipping_priority AS ( SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority FROM customer, orders, lineitem WHERE c_mktsegment = 'BUILDING' AND c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate < date '1995-03-15' AND l_shipdate > date '1995-03-15' GROUP BY l_orderkey, o_orderdate, o_shippriority ORDER BY revenue DESC, o_orderdate ); SELECT * FROM shipping_priority; DROP TABLE shipping_priority; -- Check COPY against distributed tables works both when specifying a -- query as the source, and when directly naming a table. COPY ( SELECT l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority FROM customer, orders, lineitem WHERE c_mktsegment = 'BUILDING' AND c_custkey = o_custkey AND l_orderkey = o_orderkey AND o_orderdate < date '1995-03-15' AND l_shipdate > date '1995-03-15' GROUP BY l_orderkey, o_orderdate, o_shippriority ORDER BY revenue DESC, o_orderdate ) TO stdout; -- check copying to file -- (quiet off to force number of copied records to be displayed) \set QUIET off COPY nation TO '/dev/null'; \set QUIET on -- stdout COPY nation TO STDOUT; -- ensure individual cols can be copied out, too COPY nation(n_name) TO STDOUT; -- Test that we can create on-commit drop tables, and also test creating with -- oids, along with changing column names BEGIN; CREATE TEMP TABLE customer_few (customer_key) WITH (OIDS) ON COMMIT DROP AS (SELECT * FROM customer WHERE c_nationkey = 1 ORDER BY c_custkey LIMIT 10); SELECT customer_key, c_name, c_address FROM customer_few ORDER BY customer_key LIMIT 5; COMMIT; SELECT customer_key, c_name, c_address FROM customer_few ORDER BY customer_key LIMIT 5; -- Test DECLARE CURSOR statements DECLARE holdCursor SCROLL CURSOR WITH HOLD FOR SELECT l_orderkey, l_linenumber, l_quantity, l_discount FROM lineitem ORDER BY l_orderkey, l_linenumber; FETCH NEXT FROM holdCursor; FETCH FORWARD 5 FROM holdCursor; FETCH LAST FROM holdCursor; FETCH BACKWARD 5 FROM holdCursor; -- Test WITHOUT HOLD cursors inside transactions BEGIN; DECLARE noHoldCursor SCROLL CURSOR FOR SELECT l_orderkey, l_linenumber, l_quantity, l_discount FROM lineitem ORDER BY l_orderkey, l_linenumber; FETCH ABSOLUTE 5 FROM noHoldCursor; FETCH BACKWARD noHoldCursor; COMMIT; FETCH ABSOLUTE 5 FROM noHoldCursor; citus-7.0.3/src/test/regress/sql/multi_utility_warnings.sql000066400000000000000000000004421317107136600242420ustar00rootroot00000000000000-- -- MULTI_UTILITY_WARNINGS -- -- Tests to check if we inform the user about potential caveats of creating new -- databases, schemas, and roles. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1010000; CREATE DATABASE new_database; CREATE ROLE new_role; CREATE USER new_user; citus-7.0.3/src/test/regress/sql/multi_view.sql000066400000000000000000000354021317107136600216050ustar00rootroot00000000000000-- -- MULTI_VIEW -- -- This file contains test cases for view support. It verifies various -- Citus features: simple selects, aggregates, joins, outer joins -- router queries, single row inserts, multi row inserts via insert -- into select, multi row insert via copy commands. SELECT count(*) FROM lineitem_hash_part; SELECT count(*) FROM orders_hash_part; -- create a view for priority orders CREATE VIEW priority_orders AS SELECT * FROM orders_hash_part WHERE o_orderpriority < '3-MEDIUM'; -- aggregate pushdown SELECT o_orderpriority, count(*) FROM priority_orders GROUP BY 1 ORDER BY 2, 1; SELECT o_orderpriority, count(*) FROM orders_hash_part WHERE o_orderpriority < '3-MEDIUM' GROUP BY 1 ORDER BY 2,1; -- filters SELECT o_orderpriority, count(*) as all, count(*) FILTER (WHERE o_orderstatus ='F') as fullfilled FROM priority_orders GROUP BY 1 ORDER BY 2, 1; -- having SELECT o_orderdate, count(*) from priority_orders group by 1 having (count(*) > 3) order by 2 desc, 1 desc; -- having with filters SELECT o_orderdate, count(*) as all, count(*) FILTER(WHERE o_orderstatus = 'F') from priority_orders group by 1 having (count(*) > 3) order by 2 desc, 1 desc; -- limit SELECT o_orderkey, o_totalprice from orders_hash_part order by 2 desc, 1 asc limit 5 ; SELECT o_orderkey, o_totalprice from priority_orders order by 2 desc, 1 asc limit 1 ; CREATE VIEW priority_lineitem AS SELECT li.* FROM lineitem_hash_part li JOIN priority_orders ON (l_orderkey = o_orderkey); SELECT l_orderkey, count(*) FROM priority_lineitem GROUP BY 1 ORDER BY 2 DESC, 1 LIMIT 5; CREATE VIEW air_shipped_lineitems AS SELECT * FROM lineitem_hash_part WHERE l_shipmode = 'AIR'; -- join between view and table SELECT count(*) FROM orders_hash_part join air_shipped_lineitems ON (o_orderkey = l_orderkey); -- join between views SELECT count(*) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey); -- count distinct on partition column is not supported SELECT count(distinct o_orderkey) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey); -- count distinct on partition column is supported on router queries SELECT count(distinct o_orderkey) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey) WHERE (o_orderkey = 231); -- select distinct on router joins of views also works SELECT distinct(o_orderkey) FROM priority_orders join air_shipped_lineitems ON (o_orderkey = l_orderkey) WHERE (o_orderkey = 231); -- left join support depends on flattening of the query -- following query fails since the inner part is kept as subquery SELECT * FROM priority_orders left join air_shipped_lineitems ON (o_orderkey = l_orderkey); -- however, this works SELECT count(*) FROM priority_orders left join lineitem_hash_part ON (o_orderkey = l_orderkey) WHERE l_shipmode ='AIR'; -- view at the inner side of is not supported SELECT count(*) FROM priority_orders right join lineitem_hash_part ON (o_orderkey = l_orderkey) WHERE l_shipmode ='AIR'; -- but view at the outer side is. This is essentially the same as a left join with arguments reversed. SELECT count(*) FROM lineitem_hash_part right join priority_orders ON (o_orderkey = l_orderkey) WHERE l_shipmode ='AIR'; -- left join on router query is supported SELECT o_orderkey, l_linenumber FROM priority_orders left join air_shipped_lineitems ON (o_orderkey = l_orderkey) WHERE o_orderkey = 2; -- repartition query on view join -- it passes planning, fails at execution stage SELECT * FROM priority_orders JOIN air_shipped_lineitems ON (o_custkey = l_suppkey); SET citus.task_executor_type to "task-tracker"; SELECT count(*) FROM priority_orders JOIN air_shipped_lineitems ON (o_custkey = l_suppkey); SET citus.task_executor_type to DEFAULT; -- insert into... select works with views CREATE TABLE temp_lineitem(LIKE lineitem_hash_part); SELECT create_distributed_table('temp_lineitem', 'l_orderkey', 'hash', 'lineitem_hash_part'); INSERT INTO temp_lineitem SELECT * FROM air_shipped_lineitems; SELECT count(*) FROM temp_lineitem; -- following is a where false query, should not be inserting anything INSERT INTO temp_lineitem SELECT * FROM air_shipped_lineitems WHERE l_shipmode = 'MAIL'; SELECT count(*) FROM temp_lineitem; SET citus.task_executor_type to "task-tracker"; -- single view repartition subqueries are not supported SELECT l_suppkey, count(*) FROM (SELECT l_suppkey, l_shipdate, count(*) FROM air_shipped_lineitems GROUP BY l_suppkey, l_shipdate) supps GROUP BY l_suppkey ORDER BY 2 DESC, 1 LIMIT 5; -- logically same query without a view works fine SELECT l_suppkey, count(*) FROM (SELECT l_suppkey, l_shipdate, count(*) FROM lineitem_hash_part WHERE l_shipmode = 'AIR' GROUP BY l_suppkey, l_shipdate) supps GROUP BY l_suppkey ORDER BY 2 DESC, 1 LIMIT 5; -- when a view is replaced by actual query it still fails SELECT l_suppkey, count(*) FROM (SELECT l_suppkey, l_shipdate, count(*) FROM (SELECT * FROM lineitem_hash_part WHERE l_shipmode = 'AIR') asi GROUP BY l_suppkey, l_shipdate) supps GROUP BY l_suppkey ORDER BY 2 DESC, 1 LIMIT 5; -- repartition query on view with single table subquery CREATE VIEW supp_count_view AS SELECT * FROM (SELECT l_suppkey, count(*) FROM lineitem_hash_part GROUP BY 1) s1; SELECT * FROM supp_count_view ORDER BY 2 DESC, 1 LIMIT 10; SET citus.task_executor_type to DEFAULT; -- create a view with aggregate CREATE VIEW lineitems_by_shipping_method AS SELECT l_shipmode, count(*) as cnt FROM lineitem_hash_part GROUP BY 1; -- following will fail due to non GROUP BY of partition key SELECT * FROM lineitems_by_shipping_method; -- create a view with group by on partition column CREATE VIEW lineitems_by_orderkey AS SELECT l_orderkey, count(*) FROM lineitem_hash_part GROUP BY 1; -- this should work since we're able to push down this query SELECT * FROM lineitems_by_orderkey ORDER BY 2 DESC, 1 ASC LIMIT 10; -- it would also work since it is made router plannable SELECT * FROM lineitems_by_orderkey WHERE l_orderkey = 100; DROP TABLE temp_lineitem CASCADE; DROP VIEW supp_count_view; DROP VIEW lineitems_by_orderkey; DROP VIEW lineitems_by_shipping_method; DROP VIEW air_shipped_lineitems; DROP VIEW priority_lineitem; DROP VIEW priority_orders; -- new tests for real time use case including views and subqueries -- create view to display recent user who has an activity after a timestamp CREATE VIEW recent_users AS SELECT user_id, max(time) as lastseen FROM users_table GROUP BY user_id HAVING max(time) > '2014-01-21 05:45:49.978738'::timestamp order by 2 DESC; SELECT * FROM recent_users; -- create a view for recent_events CREATE VIEW recent_events AS SELECT user_id, time FROM events_table WHERE time > '2014-01-20 01:45:49.978738'::timestamp; SELECT count(*) FROM recent_events; -- count number of events of recent_users SELECT count(*) FROM recent_users ru JOIN events_table et ON (ru.user_id = et.user_id); -- count number of events of per recent users order by count SELECT ru.user_id, count(*) FROM recent_users ru JOIN events_table et ON (ru.user_id = et.user_id) GROUP BY ru.user_id ORDER BY 2 DESC, 1; -- the same query with a left join however, it would still generate the same result SELECT ru.user_id, count(*) FROM recent_users ru LEFT JOIN events_table et ON (ru.user_id = et.user_id) GROUP BY ru.user_id ORDER BY 2 DESC, 1; -- query wrapped inside a subquery, it needs another top level order by SELECT * FROM (SELECT ru.user_id, count(*) FROM recent_users ru JOIN events_table et ON (ru.user_id = et.user_id) GROUP BY ru.user_id ORDER BY 2 DESC, 1) s1 ORDER BY 2 DESC, 1; -- non-partition key joins are not supported inside subquery SELECT * FROM (SELECT ru.user_id, count(*) FROM recent_users ru JOIN events_table et ON (ru.user_id = et.event_type) GROUP BY ru.user_id ORDER BY 2 DESC, 1) s1 ORDER BY 2 DESC, 1; -- join between views -- recent users who has an event in recent events SELECT ru.user_id FROM recent_users ru JOIN recent_events re USING(user_id) GROUP BY ru.user_id ORDER BY ru.user_id; -- outer join inside a subquery -- recent_events who are not done by recent users SELECT count(*) FROM ( SELECT re.*, ru.user_id AS recent_user FROM recent_events re LEFT JOIN recent_users ru USING(user_id)) reu WHERE recent_user IS NULL; -- same query with anti-join SELECT count(*) FROM recent_events re LEFT JOIN recent_users ru ON(ru.user_id = re.user_id) WHERE ru.user_id IS NULL; -- join between view and table -- users who has recent activity and they have an entry with value_1 is less than 15 SELECT ut.* FROM recent_users ru JOIN users_table ut USING (user_id) WHERE ut.value_1 < 15 ORDER BY 1,2; -- determine if a recent user has done a given event type or not SELECT ru.user_id, CASE WHEN et.user_id IS NULL THEN 'NO' ELSE 'YES' END as done_event FROM recent_users ru LEFT JOIN events_table et ON(ru.user_id = et.user_id AND et.event_type = 625) ORDER BY 2 DESC, 1; -- view vs table join wrapped inside a subquery SELECT * FROM (SELECT ru.user_id, CASE WHEN et.user_id IS NULL THEN 'NO' ELSE 'YES' END as done_event FROM recent_users ru LEFT JOIN events_table et ON(ru.user_id = et.user_id AND et.event_type = 625) ) s1 ORDER BY 2 DESC, 1; -- event vs table non-partition-key join is not supported SELECT * FROM (SELECT ru.user_id, CASE WHEN et.user_id IS NULL THEN 'NO' ELSE 'YES' END as done_event FROM recent_users ru LEFT JOIN events_table et ON(ru.user_id = et.event_type) ) s1 ORDER BY 2 DESC, 1; -- create a select only view CREATE VIEW selected_users AS SELECT * FROM users_table WHERE value_1 >= 120 and value_1 <150; CREATE VIEW recent_selected_users AS SELECT su.* FROM selected_users su JOIN recent_users ru USING(user_id); SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER BY 1; -- this would be supported when we implement where partition_key in (subquery) support SELECT et.user_id, et.time FROM events_table et WHERE et.user_id IN (SELECT user_id FROM recent_selected_users) GROUP BY 1,2 ORDER BY 1 DESC,2 DESC LIMIT 5; -- it is supported when it is a router query SELECT count(*) FROM events_table et WHERE et.user_id IN (SELECT user_id FROM recent_selected_users WHERE user_id = 90); -- expected this to work but it did not (SELECT user_id FROM recent_users) UNION (SELECT user_id FROM selected_users); -- wrapping it inside a SELECT * works SELECT * FROM ( (SELECT user_id FROM recent_users) UNION (SELECT user_id FROM selected_users) ) u WHERE user_id < 15 AND user_id > 10 ORDER BY user_id; -- union all also works for views SELECT * FROM ( (SELECT user_id FROM recent_users) UNION ALL (SELECT user_id FROM selected_users) ) u WHERE user_id < 15 AND user_id > 10 ORDER BY user_id; SELECT count(*) FROM ( (SELECT user_id FROM recent_users) UNION (SELECT user_id FROM selected_users) ) u WHERE user_id < 15 AND user_id > 10; -- expected this to work but it does not SELECT count(*) FROM ( (SELECT user_id FROM recent_users) UNION ALL (SELECT user_id FROM selected_users) ) u WHERE user_id < 15 AND user_id > 10; -- expand view definitions and re-run last 2 queries SELECT count(*) FROM ( (SELECT user_id FROM (SELECT user_id, max(time) as lastseen FROM users_table GROUP BY user_id HAVING max(time) > '2014-01-21 05:45:49.978738'::timestamp order by 2 DESC) aa ) UNION (SELECT user_id FROM (SELECT * FROM users_table WHERE value_1 >= 120 and value_1 <150) bb) ) u WHERE user_id < 15 AND user_id > 10; SELECT count(*) FROM ( (SELECT user_id FROM (SELECT user_id, max(time) as lastseen FROM users_table GROUP BY user_id HAVING max(time) > '2014-01-21 05:45:49.978738'::timestamp order by 2 DESC) aa ) UNION ALL (SELECT user_id FROM (SELECT * FROM users_table WHERE value_1 >= 120 and value_1 <150) bb) ) u WHERE user_id < 15 AND user_id > 10; -- test distinct -- distinct is supported if it is on a partition key CREATE VIEW distinct_user_with_value_1_15 AS SELECT DISTINCT user_id FROM users_table WHERE value_1 = 15; SELECT * FROM distinct_user_with_value_1_15 ORDER BY user_id; -- distinct is not supported if it is on a non-partition key CREATE VIEW distinct_value_1 AS SELECT DISTINCT value_1 FROM users_table WHERE value_2 = 15; SELECT * FROM distinct_value_1; -- CTEs are not supported even if they are on views CREATE VIEW cte_view_1 AS WITH c1 AS (SELECT * FROM users_table WHERE value_1 = 15) SELECT * FROM c1 WHERE value_2 < 500; SELECT * FROM cte_view_1; -- this is single shard query but still not supported since it has view + cte -- router planner can't detect it SELECT * FROM cte_view_1 WHERE user_id = 8; -- if CTE itself prunes down to a single shard than the view is supported (router plannable) CREATE VIEW cte_view_2 AS WITH c1 AS (SELECT * FROM users_table WHERE user_id = 8) SELECT * FROM c1 WHERE value_1 = 15; SELECT * FROM cte_view_2; CREATE VIEW router_view AS SELECT * FROM users_table WHERE user_id = 2; -- router plannable SELECT user_id FROM router_view GROUP BY 1; -- join a router view SELECT * FROM (SELECT user_id FROM router_view GROUP BY 1) rv JOIN recent_events USING (user_id) ORDER BY 2 LIMIT 3; SELECT * FROM (SELECT user_id FROM router_view GROUP BY 1) rv JOIN (SELECT * FROM recent_events) re USING (user_id) ORDER BY 2 LIMIT 3; -- views with limits CREATE VIEW recent_10_users AS SELECT user_id, max(time) as lastseen FROM users_table GROUP BY user_id ORDER BY lastseen DESC LIMIT 10; -- this is not supported since it has limit in it and subquery_pushdown is not set SELECT * FROM recent_10_users; SET citus.subquery_pushdown to ON; -- still not supported since outer query does not have limit -- it shows a different (subquery with single relation) error message SELECT * FROM recent_10_users; -- now it displays more correct error message SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id); -- now both are supported when there is a limit on the outer most query SELECT * FROM recent_10_users ORDER BY lastseen DESC LIMIT 10; SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id) ORDER BY et.time DESC LIMIT 10; RESET citus.subquery_pushdown; VACUUM ANALYZE users_table; -- explain tests EXPLAIN (COSTS FALSE) SELECT user_id FROM recent_selected_users GROUP BY 1 ORDER BY 1; EXPLAIN (COSTS FALSE) SELECT * FROM ( (SELECT user_id FROM recent_users) UNION (SELECT user_id FROM selected_users) ) u WHERE user_id < 15 AND user_id > 10 ORDER BY user_id; EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id) ORDER BY et.time DESC LIMIT 10; SET citus.subquery_pushdown to ON; EXPLAIN (COSTS FALSE) SELECT et.* FROM recent_10_users JOIN events_table et USING(user_id) ORDER BY et.time DESC LIMIT 10; RESET citus.subquery_pushdown; DROP VIEW recent_10_users; DROP VIEW router_view; DROP VIEW cte_view_2; DROP VIEW cte_view_1; DROP VIEW distinct_value_1; DROP VIEW distinct_user_with_value_1_15; DROP VIEW recent_selected_users; DROP VIEW selected_users; DROP VIEW recent_events; DROP VIEW recent_users; citus-7.0.3/src/test/regress/sql/multi_working_columns.sql000066400000000000000000000011671317107136600240540ustar00rootroot00000000000000-- -- MULTI_WORKING_COLUMNS -- -- Columns that are used in sorting and grouping but that do not appear in the -- projection order are called working (resjunk) columns. We check in here that -- these columns are pulled to the master, and are correctly used in sorting and -- grouping. SELECT l_quantity FROM lineitem ORDER BY l_shipdate, l_quantity LIMIT 20; SELECT l_quantity, count(*) as count FROM lineitem GROUP BY l_quantity, l_shipdate ORDER BY l_quantity, count LIMIT 20; SELECT l_quantity, l_shipdate, count(*) as count FROM lineitem GROUP BY l_quantity, l_shipdate ORDER BY l_quantity, count, l_shipdate LIMIT 20; citus-7.0.3/src/test/regress/sql/task_tracker_assign_task.sql000066400000000000000000000034261317107136600244650ustar00rootroot00000000000000-- -- TASK_TRACKER_ASSIGN_TASK -- \set JobId 401010 \set SimpleTaskId 101101 \set RecoverableTaskId 801102 \set SimpleTaskTable lineitem_simple_task \set BadQueryString '\'SELECT COUNT(*) FROM bad_table_name\'' \set GoodQueryString '\'SELECT COUNT(*) FROM lineitem\'' \set SelectAll 'SELECT *' -- We assign two tasks to the task tracker. The first task simply executes. The -- recoverable task on the other hand repeatedly fails, and we sleep until the -- task tracker stops retrying the recoverable task. SELECT task_tracker_assign_task(:JobId, :SimpleTaskId, 'COPY (SELECT * FROM lineitem) TO ' '''base/pgsql_job_cache/job_401010/task_101101'''); SELECT task_tracker_assign_task(:JobId, :RecoverableTaskId, :BadQueryString); -- After assigning the two tasks, we wait for them to make progress. Note that -- these tasks get scheduled and run asynchronously, so if the sleep interval is -- not enough, the regression tests may fail on an overloaded box. SELECT pg_sleep(3.0); SELECT task_tracker_task_status(:JobId, :SimpleTaskId); SELECT task_tracker_task_status(:JobId, :RecoverableTaskId); COPY :SimpleTaskTable FROM 'base/pgsql_job_cache/job_401010/task_101101'; SELECT COUNT(*) FROM :SimpleTaskTable; SELECT COUNT(*) AS diff_lhs FROM ( :SelectAll FROM :SimpleTaskTable EXCEPT ALL :SelectAll FROM lineitem ) diff; SELECT COUNT(*) As diff_rhs FROM ( :SelectAll FROM lineitem EXCEPT ALL :SelectAll FROM :SimpleTaskTable ) diff; -- We now reassign the recoverable task with a good query string. This updates -- the task's query string, and reschedules the updated task for execution. SELECT task_tracker_assign_task(:JobId, :RecoverableTaskId, :GoodQueryString); SELECT pg_sleep(2.0); SELECT task_tracker_task_status(:JobId, :RecoverableTaskId); citus-7.0.3/src/test/regress/sql/task_tracker_cleanup_job.sql000066400000000000000000000024531317107136600244370ustar00rootroot00000000000000-- -- TASK_TRACKER_CLEANUP_JOB -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1060000; \set JobId 401010 \set CompletedTaskId 801107 \set RunningTaskId 801108 -- We assign two tasks to the task tracker. The first task should complete and -- the second task should continue to keep running. SELECT task_tracker_assign_task(:JobId, :CompletedTaskId, 'COPY (SELECT * FROM lineitem) TO ' '''base/pgsql_job_cache/job_401010/task_801107'''); SELECT task_tracker_assign_task(:JobId, :RunningTaskId, 'SELECT pg_sleep(100)'); SELECT pg_sleep(2.0); SELECT task_tracker_task_status(:JobId, :CompletedTaskId); SELECT task_tracker_task_status(:JobId, :RunningTaskId); SELECT isdir FROM pg_stat_file('base/pgsql_job_cache/job_401010/task_801107'); SELECT isdir FROM pg_stat_file('base/pgsql_job_cache/job_401010'); -- We now clean up all tasks for this job id. As a result, shared hash entries, -- files, and connections associated with these tasks should all be cleaned up. SELECT task_tracker_cleanup_job(:JobId); SELECT pg_sleep(1.0); SELECT task_tracker_task_status(:JobId, :CompletedTaskId); SELECT task_tracker_task_status(:JobId, :RunningTaskId); SELECT isdir FROM pg_stat_file('base/pgsql_job_cache/job_401010/task_801107'); SELECT isdir FROM pg_stat_file('base/pgsql_job_cache/job_401010'); citus-7.0.3/src/test/regress/sql/task_tracker_create_table.sql000066400000000000000000000010271317107136600245640ustar00rootroot00000000000000-- -- TASK_TRACKER_CREATE_TABLE -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1070000; -- New table definitions to test the task tracker process and protocol CREATE TABLE lineitem_simple_task ( LIKE lineitem ); CREATE TABLE lineitem_compute_task ( LIKE lineitem ); CREATE TABLE lineitem_compute_update_task ( LIKE lineitem ); CREATE TABLE lineitem_partition_task_part_00 ( LIKE lineitem ); CREATE TABLE lineitem_partition_task_part_01 ( LIKE lineitem ); CREATE TABLE lineitem_partition_task_part_02 ( LIKE lineitem ); citus-7.0.3/src/test/regress/sql/task_tracker_partition_task.sql000066400000000000000000000046071317107136600252140ustar00rootroot00000000000000-- -- TASK_TRACKER_PARTITION_TASK -- \set JobId 401010 \set PartitionTaskId 801106 \set PartitionColumn l_orderkey \set SelectAll 'SELECT *' \set TablePart00 lineitem_partition_task_part_00 \set TablePart01 lineitem_partition_task_part_01 \set TablePart02 lineitem_partition_task_part_02 -- We assign a partition task and wait for it to complete. Note that we hardcode -- the partition function call string, including the job and task identifiers, -- into the argument in the task assignment function. This hardcoding is -- necessary as the current psql version does not perform variable interpolation -- for names inside single quotes. SELECT task_tracker_assign_task(:JobId, :PartitionTaskId, 'SELECT worker_range_partition_table(' '401010, 801106, ''SELECT * FROM lineitem'', ' '''l_orderkey'', 20, ARRAY[1000, 3000]::_int8)'); SELECT pg_sleep(4.0); SELECT task_tracker_task_status(:JobId, :PartitionTaskId); COPY :TablePart00 FROM 'base/pgsql_job_cache/job_401010/task_801106/p_00000'; COPY :TablePart01 FROM 'base/pgsql_job_cache/job_401010/task_801106/p_00001'; COPY :TablePart02 FROM 'base/pgsql_job_cache/job_401010/task_801106/p_00002'; SELECT COUNT(*) FROM :TablePart00; SELECT COUNT(*) FROM :TablePart02; -- We first compute the difference of partition tables against the base table. -- Then, we compute the difference of the base table against partitioned tables. SELECT COUNT(*) AS diff_lhs_00 FROM ( :SelectAll FROM :TablePart00 EXCEPT ALL :SelectAll FROM lineitem WHERE :PartitionColumn < 1000 ) diff; SELECT COUNT(*) AS diff_lhs_01 FROM ( :SelectAll FROM :TablePart01 EXCEPT ALL :SelectAll FROM lineitem WHERE :PartitionColumn >= 1000 AND :PartitionColumn < 3000 ) diff; SELECT COUNT(*) AS diff_lhs_02 FROM ( :SelectAll FROM :TablePart02 EXCEPT ALL :SelectAll FROM lineitem WHERE :PartitionColumn >= 3000 ) diff; SELECT COUNT(*) AS diff_rhs_00 FROM ( :SelectAll FROM lineitem WHERE :PartitionColumn < 1000 EXCEPT ALL :SelectAll FROM :TablePart00 ) diff; SELECT COUNT(*) AS diff_rhs_01 FROM ( :SelectAll FROM lineitem WHERE :PartitionColumn >= 1000 AND :PartitionColumn < 3000 EXCEPT ALL :SelectAll FROM :TablePart01 ) diff; SELECT COUNT(*) AS diff_rhs_02 FROM ( :SelectAll FROM lineitem WHERE :PartitionColumn >= 3000 EXCEPT ALL :SelectAll FROM :TablePart02 ) diff; citus-7.0.3/src/test/regress/sql/worker_binary_data_partition.sql000066400000000000000000000064071317107136600253630ustar00rootroot00000000000000-- -- WORKER_BINARY_DATA_PARTITION -- \set JobId 201010 \set TaskId 101105 \set Partition_Column textcolumn \set Partition_Column_Text '\'textcolumn\'' \set Partition_Column_Type 25 \set Select_Query_Text '\'SELECT * FROM binary_data_table\'' \set Select_All 'SELECT *' \set Table_Name binary_data_table \set Table_Part_00 binary_data_table_part_00 \set Table_Part_01 binary_data_table_part_01 \set Table_Part_02 binary_data_table_part_02 -- Create table with special characters CREATE TABLE :Table_Name(textcolumn text, binarycolumn bytea); COPY :Table_Name FROM stdin; aaa \013\120 binary data first \012\120\20\21 binary data second \21\120\130 binary data hex \x1E\x0D binary data with tabs \012\t\120\v some\t tabs\t with \t spaces text with tabs some\\ special\n characters \b text with special characters some ' and " and '' characters text with quotes \N null text \N null text 2 \N null text 3 \\N actual backslash N value \NN null string and N empty string \. SELECT length(binarycolumn) FROM :Table_Name; -- Run select query, and apply range partitioning on query results SELECT worker_range_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type, ARRAY['aaa', 'some']::_text); -- Copy range partitioned files into tables CREATE TABLE :Table_Part_00 ( LIKE :Table_Name ); CREATE TABLE :Table_Part_01 ( LIKE :Table_Name ); CREATE TABLE :Table_Part_02 ( LIKE :Table_Name ); COPY :Table_Part_00 FROM 'base/pgsql_job_cache/job_201010/task_101105/p_00000'; COPY :Table_Part_01 FROM 'base/pgsql_job_cache/job_201010/task_101105/p_00001'; COPY :Table_Part_02 FROM 'base/pgsql_job_cache/job_201010/task_101105/p_00002'; -- The union of the three partitions should have as many rows as original table SELECT COUNT(*) AS total_row_count FROM ( SELECT * FROM :Table_Part_00 UNION ALL SELECT * FROM :Table_Part_01 UNION ALL SELECT * FROM :Table_Part_02 ) AS all_rows; -- We first compute the difference of partition tables against the base table. -- Then, we compute the difference of the base table against partitioned tables. SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Table_Part_00 EXCEPT ALL :Select_All FROM :Table_Name WHERE :Partition_Column IS NULL OR :Partition_Column < 'aaa' ) diff; SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Table_Part_01 EXCEPT ALL :Select_All FROM :Table_Name WHERE :Partition_Column >= 'aaa' AND :Partition_Column < 'some' ) diff; SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_All FROM :Table_Part_02 EXCEPT ALL :Select_All FROM :Table_Name WHERE :Partition_Column >= 'some' ) diff; SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_All FROM :Table_Name WHERE :Partition_Column IS NULL OR :Partition_Column < 'aaa' EXCEPT ALL :Select_All FROM :Table_Part_00 ) diff; SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM :Table_Name WHERE :Partition_Column >= 'aaa' AND :Partition_Column < 'some' EXCEPT ALL :Select_All FROM :Table_Part_01 ) diff; SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM :Table_Name WHERE :Partition_Column >= 'some' EXCEPT ALL :Select_All FROM :Table_Part_02 ) diff; citus-7.0.3/src/test/regress/sql/worker_check_invalid_arguments.sql000066400000000000000000000055201317107136600256600ustar00rootroot00000000000000-- -- WORKER_CHECK_INVALID_ARGUMENTS -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1100000; \set JobId 201010 \set TaskId 101108 \set Table_Name simple_binary_data_table \set Partition_Column_Name '\'textcolumn\'' \set Partition_Column_Type 25 \set Partition_Count 2 \set Select_Query_Text '\'SELECT * FROM simple_binary_data_table\'' \set Bad_Partition_Column_Name '\'badcolumnname\'' \set Bad_Partition_Column_Type 20 \set Bad_Select_Query_Text '\'SELECT * FROM bad_table_name\'' -- Create simple table and insert a few rows into this table -- N.B. - These rows will be partitioned to files on disk then read back in the -- order the files are listed by a call to readdir; because this order is not -- predictable, the second column of these rows always has the same value, to -- avoid an error message differing based on file read order. CREATE TABLE :Table_Name(textcolumn text, binarycolumn bytea); COPY :Table_Name FROM stdin; aaa \013\120 some\t tabs\t with \t spaces \013\120 \. SELECT COUNT(*) FROM :Table_Name; -- Check that we fail with bad SQL query SELECT worker_range_partition_table(:JobId, :TaskId, :Bad_Select_Query_Text, :Partition_Column_Name, :Partition_Column_Type, ARRAY['aaa', 'some']::_text); -- Check that we fail with bad partition column name SELECT worker_range_partition_table(:JobId, :TaskId, :Select_Query_Text, :Bad_Partition_Column_Name, :Partition_Column_Type, ARRAY['aaa', 'some']::_text); -- Check that we fail when partition column and split point types do not match SELECT worker_range_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Name, :Bad_Partition_Column_Type, ARRAY['aaa', 'some']::_text); -- Check that we fail with bad partition column type on hash partitioning SELECT worker_hash_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Name, :Bad_Partition_Column_Type, :Partition_Count); -- Now, partition table data using valid arguments SELECT worker_range_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Name, :Partition_Column_Type, ARRAY['aaa', 'some']::_text); -- Check that we fail to merge when the number of column names and column types -- do not match SELECT worker_merge_files_into_table(:JobId, :TaskId, ARRAY['textcolumn', 'binarycolumn'], ARRAY['text', 'bytea', 'integer']); -- Check that we fail to merge when column types do not match underlying data SELECT worker_merge_files_into_table(:JobId, :TaskId, ARRAY['textcolumn', 'binarycolumn'], ARRAY['text', 'integer']); -- Finally, merge partitioned files using valid arguments SELECT worker_merge_files_into_table(:JobId, :TaskId, ARRAY['textcolumn', 'binarycolumn'], ARRAY['text', 'bytea']); citus-7.0.3/src/test/regress/sql/worker_create_table.sql000066400000000000000000000061361317107136600234260ustar00rootroot00000000000000-- -- WORKER_CREATE_TABLE -- -- Create new table definitions for lineitem and supplier tables to test worker -- node execution logic. For now,the tests include range and hash partitioning -- of existing tables. ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1110000; CREATE TABLE lineitem ( l_orderkey bigint not null, l_partkey integer not null, l_suppkey integer not null, l_linenumber integer not null, l_quantity decimal(15, 2) not null, l_extendedprice decimal(15, 2) not null, l_discount decimal(15, 2) not null, l_tax decimal(15, 2) not null, l_returnflag char(1) not null, l_linestatus char(1) not null, l_shipdate date not null, l_commitdate date not null, l_receiptdate date not null, l_shipinstruct char(25) not null, l_shipmode char(10) not null, l_comment varchar(44) not null, PRIMARY KEY(l_orderkey, l_linenumber) ); CREATE TABLE lineitem_complex ( l_partkey integer not null, l_discount decimal(15, 2) not null, l_shipdate date not null, l_comment varchar(44) not null ); -- Range partitioned lineitem data are inserted into these four tables CREATE TABLE lineitem_range_part_00 ( LIKE lineitem ); CREATE TABLE lineitem_range_part_01 ( LIKE lineitem ); CREATE TABLE lineitem_range_part_02 ( LIKE lineitem ); CREATE TABLE lineitem_range_part_03 ( LIKE lineitem ); -- Complex range partitioned lineitem data are inserted into these four tables CREATE TABLE lineitem_range_complex_part_00 ( LIKE lineitem_complex ); CREATE TABLE lineitem_range_complex_part_01 ( LIKE lineitem_complex ); CREATE TABLE lineitem_range_complex_part_02 ( LIKE lineitem_complex ); CREATE TABLE lineitem_range_complex_part_03 ( LIKE lineitem_complex ); -- Hash partitioned lineitem data are inserted into these four tables CREATE TABLE lineitem_hash_part_00 ( LIKE lineitem ); CREATE TABLE lineitem_hash_part_01 ( LIKE lineitem ); CREATE TABLE lineitem_hash_part_02 ( LIKE lineitem ); CREATE TABLE lineitem_hash_part_03 ( LIKE lineitem ); -- Complex hash partitioned lineitem data are inserted into these four tables CREATE TABLE lineitem_hash_complex_part_00 ( LIKE lineitem_complex ); CREATE TABLE lineitem_hash_complex_part_01 ( LIKE lineitem_complex ); CREATE TABLE lineitem_hash_complex_part_02 ( LIKE lineitem_complex ); CREATE TABLE lineitem_hash_complex_part_03 ( LIKE lineitem_complex ); -- Now create a supplier table to test repartitioning the data on the nation key -- column, where the column's values can be null or zero. CREATE TABLE SUPPLIER ( s_suppkey integer not null, s_name char(25) not null, s_address varchar(40) not null, s_nationkey integer, s_phone char(15) not null, s_acctbal decimal(15,2) not null, s_comment varchar(101) not null ); -- Range partitioned supplier data are inserted into three tables CREATE TABLE supplier_range_part_00 ( LIKE supplier ); CREATE TABLE supplier_range_part_01 ( LIKE supplier ); CREATE TABLE supplier_range_part_02 ( LIKE supplier ); -- Hash partitioned supplier data are inserted into three tables CREATE TABLE supplier_hash_part_00 ( LIKE supplier ); CREATE TABLE supplier_hash_part_01 ( LIKE supplier ); CREATE TABLE supplier_hash_part_02 ( LIKE supplier ); citus-7.0.3/src/test/regress/sql/worker_hash_partition.sql000066400000000000000000000057531317107136600240340ustar00rootroot00000000000000-- -- WORKER_HASH_PARTITION -- \set JobId 201010 \set TaskId 101103 \set Partition_Column l_orderkey \set Partition_Column_Text '\'l_orderkey\'' \set Partition_Column_Type '\'int8\'' \set Partition_Count 4 \set Select_Query_Text '\'SELECT * FROM lineitem\'' \set Select_All 'SELECT *' -- Hash functions internally return unsigned 32-bit integers. However, when -- called externally, the return value becomes a signed 32-bit integer. We hack -- around this conversion issue by bitwise-anding the hash results. Note that -- this only works because we are modding with 4. The proper Hash_Mod_Function -- would be (case when hashint8(l_orderkey) >= 0 then (hashint8(l_orderkey) % 4) -- else ((hashint8(l_orderkey) + 4294967296) % 4) end). \set Hash_Mod_Function '( (hashint8(l_orderkey) & 2147483647) % 4 )' \set Table_Part_00 lineitem_hash_part_00 \set Table_Part_01 lineitem_hash_part_01 \set Table_Part_02 lineitem_hash_part_02 \set Table_Part_03 lineitem_hash_part_03 -- Run select query, and apply hash partitioning on query results SELECT worker_hash_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type::regtype, :Partition_Count); COPY :Table_Part_00 FROM 'base/pgsql_job_cache/job_201010/task_101103/p_00000'; COPY :Table_Part_01 FROM 'base/pgsql_job_cache/job_201010/task_101103/p_00001'; COPY :Table_Part_02 FROM 'base/pgsql_job_cache/job_201010/task_101103/p_00002'; COPY :Table_Part_03 FROM 'base/pgsql_job_cache/job_201010/task_101103/p_00003'; SELECT COUNT(*) FROM :Table_Part_00; SELECT COUNT(*) FROM :Table_Part_03; -- We first compute the difference of partition tables against the base table. -- Then, we compute the difference of the base table against partitioned tables. SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Table_Part_00 EXCEPT ALL :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 0) ) diff; SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Table_Part_01 EXCEPT ALL :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 1) ) diff; SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_All FROM :Table_Part_02 EXCEPT ALL :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 2) ) diff; SELECT COUNT(*) AS diff_lhs_03 FROM ( :Select_All FROM :Table_Part_03 EXCEPT ALL :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 3) ) diff; SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 0) EXCEPT ALL :Select_All FROM :Table_Part_00 ) diff; SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 1) EXCEPT ALL :Select_All FROM :Table_Part_01 ) diff; SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 2) EXCEPT ALL :Select_All FROM :Table_Part_02 ) diff; SELECT COUNT(*) AS diff_rhs_03 FROM ( :Select_All FROM lineitem WHERE (:Hash_Mod_Function = 3) EXCEPT ALL :Select_All FROM :Table_Part_03 ) diff; citus-7.0.3/src/test/regress/sql/worker_hash_partition_complex.sql000066400000000000000000000065371317107136600255640ustar00rootroot00000000000000-- -- WORKER_HASH_PARTITION_COMPLEX -- \set JobId 201010 \set TaskId 101104 \set Partition_Column l_partkey \set Partition_Column_Text '\'l_partkey\'' \set Partition_Column_Type 23 \set Partition_Count 4 \set Select_Columns 'SELECT l_partkey, l_discount, l_shipdate, l_comment' \set Select_Filters 'l_shipdate >= date \'1992-01-15\' AND l_discount between 0.02 AND 0.08' \set Hash_Mod_Function '( (hashint4(l_partkey) & 2147483647) % 4 )' \set Table_Part_00 lineitem_hash_complex_part_00 \set Table_Part_01 lineitem_hash_complex_part_01 \set Table_Part_02 lineitem_hash_complex_part_02 \set Table_Part_03 lineitem_hash_complex_part_03 -- Run hardcoded complex select query, and apply hash partitioning on query -- results SELECT worker_hash_partition_table(:JobId, :TaskId, 'SELECT l_partkey, l_discount, l_shipdate, l_comment' ' FROM lineitem ' ' WHERE l_shipdate >= date ''1992-01-15''' ' AND l_discount between 0.02 AND 0.08', :Partition_Column_Text, :Partition_Column_Type, :Partition_Count); -- Copy partitioned data files into tables for testing purposes COPY :Table_Part_00 FROM 'base/pgsql_job_cache/job_201010/task_101104/p_00000'; COPY :Table_Part_01 FROM 'base/pgsql_job_cache/job_201010/task_101104/p_00001'; COPY :Table_Part_02 FROM 'base/pgsql_job_cache/job_201010/task_101104/p_00002'; COPY :Table_Part_03 FROM 'base/pgsql_job_cache/job_201010/task_101104/p_00003'; SELECT COUNT(*) FROM :Table_Part_00; SELECT COUNT(*) FROM :Table_Part_03; -- We first compute the difference of partition tables against the base table. -- Then, we compute the difference of the base table against partitioned tables. SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_Columns FROM :Table_Part_00 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 0) ) diff; SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_Columns FROM :Table_Part_01 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 1) ) diff; SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_Columns FROM :Table_Part_02 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 2) ) diff; SELECT COUNT(*) AS diff_lhs_03 FROM ( :Select_Columns FROM :Table_Part_03 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 3) ) diff; SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 0) EXCEPT ALL :Select_Columns FROM :Table_Part_00 ) diff; SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 1) EXCEPT ALL :Select_Columns FROM :Table_Part_01 ) diff; SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 2) EXCEPT ALL :Select_Columns FROM :Table_Part_02 ) diff; SELECT COUNT(*) AS diff_rhs_03 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND (:Hash_Mod_Function = 3) EXCEPT ALL :Select_Columns FROM :Table_Part_03 ) diff; citus-7.0.3/src/test/regress/sql/worker_merge_hash_files.sql000066400000000000000000000025471317107136600243020ustar00rootroot00000000000000-- -- WORKER_MERGE_HASH_FILES -- \set JobId 201010 \set TaskId 101103 \set Task_Table_Name public.task_101103 \set Select_All 'SELECT *' -- TaskId determines our dependency on hash partitioned files. We take these -- files, and merge them in a task table. We also pass the column names and -- column types that are used to create the task table. SELECT worker_merge_files_into_table(:JobId, :TaskId, ARRAY['orderkey', 'partkey', 'suppkey', 'linenumber', 'quantity', 'extendedprice', 'discount', 'tax', 'returnflag', 'linestatus', 'shipdate', 'commitdate', 'receiptdate', 'shipinstruct', 'shipmode', 'comment']::_text, ARRAY['bigint', 'integer', 'integer', 'integer', 'decimal(15, 2)', 'decimal(15, 2)', 'decimal(15, 2)', 'decimal(15, 2)', 'char(1)', 'char(1)', 'date', 'date', 'date', 'char(25)', 'char(10)', 'varchar(44)']::_text); -- We first count elements from the merged table and the original table we hash -- partitioned. We then compute the difference of these two tables. SELECT COUNT(*) FROM :Task_Table_Name; SELECT COUNT(*) FROM lineitem; SELECT COUNT(*) AS diff_lhs FROM ( :Select_All FROM :Task_Table_Name EXCEPT ALL :Select_All FROM lineitem ) diff; SELECT COUNT(*) AS diff_rhs FROM ( :Select_All FROM lineitem EXCEPT ALL :Select_All FROM :Task_Table_Name ) diff; citus-7.0.3/src/test/regress/sql/worker_merge_range_files.sql000066400000000000000000000025521317107136600244470ustar00rootroot00000000000000-- -- WORKER_MERGE_RANGE_FILES -- \set JobId 201010 \set TaskId 101101 \set Task_Table_Name public.task_101101 \set Select_All 'SELECT *' -- TaskId determines our dependency on range partitioned files. We take these -- files, and merge them in a task table. We also pass the column names and -- column types that are used to create the task table. SELECT worker_merge_files_into_table(:JobId, :TaskId, ARRAY['orderkey', 'partkey', 'suppkey', 'linenumber', 'quantity', 'extendedprice', 'discount', 'tax', 'returnflag', 'linestatus', 'shipdate', 'commitdate', 'receiptdate', 'shipinstruct', 'shipmode', 'comment']::_text, ARRAY['bigint', 'integer', 'integer', 'integer', 'decimal(15, 2)', 'decimal(15, 2)', 'decimal(15, 2)', 'decimal(15, 2)', 'char(1)', 'char(1)', 'date', 'date', 'date', 'char(25)', 'char(10)', 'varchar(44)']::_text); -- We first count elements from the merged table and the original table we range -- partitioned. We then compute the difference of these two tables. SELECT COUNT(*) FROM :Task_Table_Name; SELECT COUNT(*) FROM lineitem; SELECT COUNT(*) AS diff_lhs FROM ( :Select_All FROM :Task_Table_Name EXCEPT ALL :Select_All FROM lineitem ) diff; SELECT COUNT(*) AS diff_rhs FROM ( :Select_All FROM lineitem EXCEPT ALL :Select_All FROM :Task_Table_Name ) diff; citus-7.0.3/src/test/regress/sql/worker_null_data_partition.sql000066400000000000000000000116211317107136600250430ustar00rootroot00000000000000-- -- WORKER_NULL_DATA_PARTITION -- \set JobId 201010 \set Range_TaskId 101106 \set Partition_Column s_nationkey \set Partition_Column_Text '\'s_nationkey\'' \set Partition_Column_Type 23 \set Select_Query_Text '\'SELECT * FROM supplier\'' \set Select_All 'SELECT *' \set Range_Table_Part_00 supplier_range_part_00 \set Range_Table_Part_01 supplier_range_part_01 \set Range_Table_Part_02 supplier_range_part_02 -- Run select query, and apply range partitioning on query results. Note that -- one of the split point values is 0, We are checking here that the partition -- function doesn't treat 0 as null, and that range repartitioning correctly -- puts null nation key values into the 0th repartition bucket. SELECT worker_range_partition_table(:JobId, :Range_TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type, ARRAY[0, 10]::_int4); -- Copy partitioned data files into tables for testing purposes COPY :Range_Table_Part_00 FROM 'base/pgsql_job_cache/job_201010/task_101106/p_00000'; COPY :Range_Table_Part_01 FROM 'base/pgsql_job_cache/job_201010/task_101106/p_00001'; COPY :Range_Table_Part_02 FROM 'base/pgsql_job_cache/job_201010/task_101106/p_00002'; SELECT COUNT(*) FROM :Range_Table_Part_00; SELECT COUNT(*) FROM :Range_Table_Part_02; -- We first compute the difference of partition tables against the base table. -- Then, we compute the difference of the base table against partitioned tables. SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Range_Table_Part_00 EXCEPT ALL (:Select_All FROM supplier WHERE :Partition_Column < 0 OR :Partition_Column IS NULL) ) diff; SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Range_Table_Part_01 EXCEPT ALL :Select_All FROM supplier WHERE :Partition_Column >= 0 AND :Partition_Column < 10 ) diff; SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM supplier WHERE :Partition_Column >= 10 EXCEPT ALL :Select_All FROM :Range_Table_Part_02 ) diff; SELECT COUNT(*) AS diff_rhs_00 FROM ( (:Select_All FROM supplier WHERE :Partition_Column < 0 OR :Partition_Column IS NULL) EXCEPT ALL :Select_All FROM :Range_Table_Part_00 ) diff; SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM supplier WHERE :Partition_Column >= 0 AND :Partition_Column < 10 EXCEPT ALL :Select_All FROM :Range_Table_Part_01 ) diff; SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM supplier WHERE :Partition_Column >= 10 EXCEPT ALL :Select_All FROM :Range_Table_Part_02 ) diff; -- Next, run select query and apply hash partitioning on query results. We are -- checking here that hash repartitioning correctly puts null nation key values -- into the 0th repartition bucket. \set Hash_TaskId 101107 \set Partition_Count 4 \set Hash_Mod_Function '( (hashint4(s_nationkey) & 2147483647) % 4 )' \set Hash_Table_Part_00 supplier_hash_part_00 \set Hash_Table_Part_01 supplier_hash_part_01 \set Hash_Table_Part_02 supplier_hash_part_02 -- Run select query, and apply hash partitioning on query results SELECT worker_hash_partition_table(:JobId, :Hash_TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type, :Partition_Count); COPY :Hash_Table_Part_00 FROM 'base/pgsql_job_cache/job_201010/task_101107/p_00000'; COPY :Hash_Table_Part_01 FROM 'base/pgsql_job_cache/job_201010/task_101107/p_00001'; COPY :Hash_Table_Part_02 FROM 'base/pgsql_job_cache/job_201010/task_101107/p_00002'; SELECT COUNT(*) FROM :Hash_Table_Part_00; SELECT COUNT(*) FROM :Hash_Table_Part_02; -- We first compute the difference of partition tables against the base table. -- Then, we compute the difference of the base table against partitioned tables. SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Hash_Table_Part_00 EXCEPT ALL (:Select_All FROM supplier WHERE (:Hash_Mod_Function = 0) OR :Partition_Column IS NULL) ) diff; SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Hash_Table_Part_01 EXCEPT ALL :Select_All FROM supplier WHERE (:Hash_Mod_Function = 1) ) diff; SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_All FROM :Hash_Table_Part_02 EXCEPT ALL :Select_All FROM supplier WHERE (:Hash_Mod_Function = 2) ) diff; SELECT COUNT(*) AS diff_rhs_00 FROM ( (:Select_All FROM supplier WHERE (:Hash_Mod_Function = 0) OR :Partition_Column IS NULL) EXCEPT ALL :Select_All FROM :Hash_Table_Part_00 ) diff; SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM supplier WHERE (:Hash_Mod_Function = 1) EXCEPT ALL :Select_All FROM :Hash_Table_Part_01 ) diff; SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM supplier WHERE (:Hash_Mod_Function = 2) EXCEPT ALL :Select_All FROM :Hash_Table_Part_02 ) diff; citus-7.0.3/src/test/regress/sql/worker_range_partition.sql000066400000000000000000000054561317107136600242050ustar00rootroot00000000000000-- -- WORKER_RANGE_PARTITION -- \set JobId 201010 \set TaskId 101101 \set Partition_Column l_orderkey \set Partition_Column_Text '\'l_orderkey\'' \set Partition_Column_Type 20 \set Select_Query_Text '\'SELECT * FROM lineitem\'' \set Select_All 'SELECT *' \set Table_Part_00 lineitem_range_part_00 \set Table_Part_01 lineitem_range_part_01 \set Table_Part_02 lineitem_range_part_02 \set Table_Part_03 lineitem_range_part_03 \set File_Basedir base/pgsql_job_cache \set Table_File_00 :File_Basedir/job_:JobId/task_:TaskId/p_00000 \set Table_File_01 :File_Basedir/job_:JobId/task_:TaskId/p_00001 \set Table_File_02 :File_Basedir/job_:JobId/task_:TaskId/p_00002 \set Table_File_03 :File_Basedir/job_:JobId/task_:TaskId/p_00003 -- Run select query, and apply range partitioning on query results SELECT worker_range_partition_table(:JobId, :TaskId, :Select_Query_Text, :Partition_Column_Text, :Partition_Column_Type, ARRAY[1, 3000, 12000]::_int8); COPY :Table_Part_00 FROM :'Table_File_00'; COPY :Table_Part_01 FROM :'Table_File_01'; COPY :Table_Part_02 FROM :'Table_File_02'; COPY :Table_Part_03 FROM :'Table_File_03'; SELECT COUNT(*) FROM :Table_Part_00; SELECT COUNT(*) FROM :Table_Part_03; -- We first compute the difference of partition tables against the base table. -- Then, we compute the difference of the base table against partitioned tables. SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_All FROM :Table_Part_00 EXCEPT ALL :Select_All FROM lineitem WHERE :Partition_Column < 1 ) diff; SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_All FROM :Table_Part_01 EXCEPT ALL :Select_All FROM lineitem WHERE :Partition_Column >= 1 AND :Partition_Column < 3000 ) diff; SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_All FROM :Table_Part_02 EXCEPT ALL :Select_All FROM lineitem WHERE :Partition_Column >= 3000 AND :Partition_Column < 12000 ) diff; SELECT COUNT(*) AS diff_lhs_03 FROM ( :Select_All FROM :Table_Part_03 EXCEPT ALL :Select_All FROM lineitem WHERE :Partition_Column >= 12000 ) diff; SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_All FROM lineitem WHERE :Partition_Column < 1 EXCEPT ALL :Select_All FROM :Table_Part_00 ) diff; SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_All FROM lineitem WHERE :Partition_Column >= 1 AND :Partition_Column < 3000 EXCEPT ALL :Select_All FROM :Table_Part_01 ) diff; SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_All FROM lineitem WHERE :Partition_Column >= 3000 AND :Partition_Column < 12000 EXCEPT ALL :Select_All FROM :Table_Part_02 ) diff; SELECT COUNT(*) AS diff_rhs_03 FROM ( :Select_All FROM lineitem WHERE :Partition_Column >= 12000 EXCEPT ALL :Select_All FROM :Table_Part_03 ) diff; citus-7.0.3/src/test/regress/sql/worker_range_partition_complex.sql000066400000000000000000000065651317107136600257360ustar00rootroot00000000000000-- -- WORKER_RANGE_PARTITION_COMPLEX -- \set JobId 201010 \set TaskId 101102 \set Partition_Column l_partkey \set Partition_Column_Text '\'l_partkey\'' \set Partition_Column_Type 23 \set Select_Columns 'SELECT l_partkey, l_discount, l_shipdate, l_comment' \set Select_Filters 'l_shipdate >= date \'1992-01-15\' AND l_discount between 0.02 AND 0.08' \set Table_Part_00 lineitem_range_complex_part_00 \set Table_Part_01 lineitem_range_complex_part_01 \set Table_Part_02 lineitem_range_complex_part_02 \set Table_Part_03 lineitem_range_complex_part_03 -- Run hardcoded complex select query, and apply range partitioning on query -- results SELECT worker_range_partition_table(:JobId, :TaskId, 'SELECT l_partkey, l_discount, l_shipdate, l_comment' ' FROM lineitem ' ' WHERE l_shipdate >= date ''1992-01-15''' ' AND l_discount between 0.02 AND 0.08', :Partition_Column_Text, :Partition_Column_Type, ARRAY[101, 12000, 18000]::_int4); -- Copy partitioned data files into tables for testing purposes COPY :Table_Part_00 FROM 'base/pgsql_job_cache/job_201010/task_101102/p_00000'; COPY :Table_Part_01 FROM 'base/pgsql_job_cache/job_201010/task_101102/p_00001'; COPY :Table_Part_02 FROM 'base/pgsql_job_cache/job_201010/task_101102/p_00002'; COPY :Table_Part_03 FROM 'base/pgsql_job_cache/job_201010/task_101102/p_00003'; SELECT COUNT(*) FROM :Table_Part_00; SELECT COUNT(*) FROM :Table_Part_03; -- We first compute the difference of partition tables against the base table. -- Then, we compute the difference of the base table against partitioned tables. SELECT COUNT(*) AS diff_lhs_00 FROM ( :Select_Columns FROM :Table_Part_00 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column < 101 ) diff; SELECT COUNT(*) AS diff_lhs_01 FROM ( :Select_Columns FROM :Table_Part_01 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 101 AND :Partition_Column < 12000 ) diff; SELECT COUNT(*) AS diff_lhs_02 FROM ( :Select_Columns FROM :Table_Part_02 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 12000 AND :Partition_Column < 18000 ) diff; SELECT COUNT(*) AS diff_lhs_03 FROM ( :Select_Columns FROM :Table_Part_03 EXCEPT ALL :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 18000 ) diff; SELECT COUNT(*) AS diff_rhs_00 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column < 101 EXCEPT ALL :Select_Columns FROM :Table_Part_00 ) diff; SELECT COUNT(*) AS diff_rhs_01 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 101 AND :Partition_Column < 12000 EXCEPT ALL :Select_Columns FROM :Table_Part_01 ) diff; SELECT COUNT(*) AS diff_rhs_02 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 12000 AND :Partition_Column < 18000 EXCEPT ALL :Select_Columns FROM :Table_Part_02 ) diff; SELECT COUNT(*) AS diff_rhs_03 FROM ( :Select_Columns FROM lineitem WHERE :Select_Filters AND :Partition_Column >= 18000 EXCEPT ALL :Select_Columns FROM :Table_Part_03 ) diff; citus-7.0.3/src/test/regress/worker_schedule000066400000000000000000000014421317107136600212060ustar00rootroot00000000000000# ---------- # $Id$ # # Regression tests that exercise worker node related distributed execution # logic. # ---------- # ---------- # All worker tests use the following table and its data # ---------- test: worker_create_table test: worker_copy # ---------- # Range and hash re-partitioning related regression tests # ---------- test: worker_range_partition worker_range_partition_complex test: worker_hash_partition worker_hash_partition_complex test: worker_merge_range_files worker_merge_hash_files test: worker_binary_data_partition worker_null_data_partition test: worker_check_invalid_arguments # ---------- # All task tracker tests use the following tables # ---------- test: task_tracker_create_table test: task_tracker_assign_task task_tracker_partition_task test: task_tracker_cleanup_job